From 89a7dd8596eed0bb07bf1ad08d2f937b31f4275f Mon Sep 17 00:00:00 2001 From: Michael Date: Fri, 15 Nov 2024 07:50:07 +0000 Subject: [PATCH] Deployed 6e4499823 with MkDocs version: 1.6.0 --- admin/kpanda/backup/index.html | 2 +- en/admin/ghippo/report-billing/index.html | 2 +- en/admin/kpanda/gpu/Iluvatar_usage.html | 2 +- en/admin/kpanda/gpu/ascend/Ascend_usage.html | 2 +- en/admin/kpanda/gpu/index.html | 2 +- en/admin/kpanda/gpu/metax/usemetax.html | 6 +++--- en/admin/kpanda/gpu/mlu/use-mlu.html | 16 ++++++++-------- .../kpanda/gpu/nvidia/full_gpu_userguide.html | 8 ++++---- en/admin/kpanda/gpu/nvidia/index.html | 4 ++-- .../install_nvidia_driver_of_operator.html | 2 +- en/admin/kpanda/gpu/nvidia/mig/create_mig.html | 2 +- en/admin/kpanda/gpu/nvidia/mig/index.html | 4 ++-- en/admin/kpanda/gpu/nvidia/mig/mig_usage.html | 6 +++--- en/admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html | 4 ++-- en/admin/kpanda/namespaces/podsecurity.html | 2 +- en/admin/kpanda/network/create-ingress.html | 2 +- en/admin/kpanda/network/create-services.html | 2 +- .../kpanda/permissions/custom-kpanda-role.html | 2 +- en/admin/kpanda/workloads/create-cronjob.html | 4 ++-- en/admin/kpanda/workloads/create-daemonset.html | 4 ++-- en/admin/kpanda/workloads/create-deployment.html | 6 +++--- en/admin/kpanda/workloads/create-job.html | 4 ++-- .../kpanda/workloads/create-statefulset.html | 4 ++-- en/end-user/kpanda/backup/etcd-backup.html | 5 ++--- en/end-user/kpanda/backup/index.html | 4 ++-- en/end-user/kpanda/clusters/create-cluster.html | 2 +- en/end-user/kpanda/gpu/Iluvatar_usage.html | 2 +- en/end-user/kpanda/gpu/ascend/Ascend_usage.html | 2 +- en/end-user/kpanda/gpu/index.html | 2 +- .../kpanda/gpu/nvidia/full_gpu_userguide.html | 12 ++++++------ en/end-user/kpanda/gpu/nvidia/index.html | 8 ++++---- .../install_nvidia_driver_of_operator.html | 2 +- .../kpanda/gpu/nvidia/mig/create_mig.html | 2 +- en/end-user/kpanda/gpu/nvidia/mig/index.html | 4 ++-- en/end-user/kpanda/gpu/nvidia/mig/mig_usage.html | 6 +++--- .../kpanda/gpu/nvidia/vgpu/vgpu_user.html | 4 ++-- en/end-user/kpanda/namespaces/podsecurity.html | 2 +- en/end-user/kpanda/network/create-ingress.html | 2 +- en/end-user/kpanda/network/create-services.html | 2 +- en/end-user/kpanda/nodes/add-node.html | 2 +- .../kpanda/permissions/custom-kpanda-role.html | 2 +- en/end-user/kpanda/workloads/create-cronjob.html | 4 ++-- .../kpanda/workloads/create-daemonset.html | 4 ++-- .../kpanda/workloads/create-deployment.html | 6 +++--- en/end-user/kpanda/workloads/create-job.html | 4 ++-- .../kpanda/workloads/create-statefulset.html | 4 ++-- en/openapi/baize/v0.107.4.json | 6 +++--- en/openapi/kpanda/v0.32.2.json | 6 +++--- end-user/kpanda/backup/index.html | 2 +- openapi/baize/v0.107.4.json | 6 +++--- openapi/kpanda/v0.32.2.json | 6 +++--- search/search_index.json | 2 +- 52 files changed, 103 insertions(+), 104 deletions(-) diff --git a/admin/kpanda/backup/index.html b/admin/kpanda/backup/index.html index afb423e538..cf151371c4 100644 --- a/admin/kpanda/backup/index.html +++ b/admin/kpanda/backup/index.html @@ -532,7 +532,7 @@

备份恢复
  • 应用备份

    应用备份指,备份集群中的某个工作负载的数据,然后将该工作负载的数据恢复到本集群或者其他集群。支持备份整个命名空间下的所有资源,也支持通过标签选择器过滤,仅备份带有特定标签的资源。

    -

    应用备份支持跨集群备份有状态应用,具体步骤可参考MySQL 应用及数据的跨集群备份恢复

    +

    应用备份支持跨集群备份有状态应用,具体步骤可参考MySQL 应用及数据的跨集群备份恢复

  • ETCD 备份

    diff --git a/en/admin/ghippo/report-billing/index.html b/en/admin/ghippo/report-billing/index.html index 52ab9ec643..86addc01de 100644 --- a/en/admin/ghippo/report-billing/index.html +++ b/en/admin/ghippo/report-billing/index.html @@ -483,7 +483,7 @@

    Operation Managementinstall or upgrade the Operations Management module first, and then you can experience report management and billing metering.

    Report Management

    Report Management provides data statistics for cluster, node, pods, workspace, and namespace across -five dimensions: CPU Utilization, Memory Utilization, Storage Utilization, GPU Computing Power Utilization, +five dimensions: CPU Utilization, Memory Utilization, Storage Utilization, GPU Utilization, and GPU Memory Utilization. It also integrates with the audit and alert modules to support the statistical management of audit and alert data, supporting a total of seven types of reports.

    Accounting & Billing

    diff --git a/en/admin/kpanda/gpu/Iluvatar_usage.html b/en/admin/kpanda/gpu/Iluvatar_usage.html index b8a1e1d5a3..1ee0a5fd0a 100644 --- a/en/admin/kpanda/gpu/Iluvatar_usage.html +++ b/en/admin/kpanda/gpu/Iluvatar_usage.html @@ -646,7 +646,7 @@

    ProcedureConfiguration via User Interface

    1. -

      Check if the GPU card in the cluster has been detected. Click Clusters -> Cluster Settings -> Addon Plugins , and check if the proper GPU type has been automatically enabled and detected. +

      Check if the GPU in the cluster has been detected. Click Clusters -> Cluster Settings -> Addon Plugins , and check if the proper GPU type has been automatically enabled and detected. Currently, the cluster will automatically enable GPU and set the GPU type as Iluvatar .

    2. diff --git a/en/admin/kpanda/gpu/ascend/Ascend_usage.html b/en/admin/kpanda/gpu/ascend/Ascend_usage.html index 17f6217849..02ced5ed27 100644 --- a/en/admin/kpanda/gpu/ascend/Ascend_usage.html +++ b/en/admin/kpanda/gpu/ascend/Ascend_usage.html @@ -422,7 +422,7 @@

      Quick StartUI Usage

      1. -

        Confirm whether the cluster has detected the GPU card. Click Clusters -> Cluster Settings -> Addon Plugins , +

        Confirm whether the cluster has detected the GPU. Click Clusters -> Cluster Settings -> Addon Plugins , and check whether the proper GPU type is automatically enabled and detected. Currently, the cluster will automatically enable GPU and set the GPU type to Ascend .

        Cluster Settings

        diff --git a/en/admin/kpanda/gpu/index.html b/en/admin/kpanda/gpu/index.html index 705e0f40b7..3434a09f2f 100644 --- a/en/admin/kpanda/gpu/index.html +++ b/en/admin/kpanda/gpu/index.html @@ -591,7 +591,7 @@

        Introduction to GPU CapabilitiesCompatibility with various training frameworks such as TensorFlow and PyTorch.

      2. Introduction to GPU Operator

        -

        Similar to regular computer hardware, NVIDIA GPUs, as physical devices, need to have the NVIDIA GPU driver installed in order to be used. To reduce the cost of using GPUs on Kubernetes, NVIDIA provides the NVIDIA GPU Operator component to manage various components required for using NVIDIA GPUs. These components include the NVIDIA driver (for enabling CUDA), NVIDIA container runtime, GPU node labeling, DCGM-based monitoring, and more. In theory, users only need to plug the GPU card into a compute device managed by Kubernetes, and they can use all the capabilities of NVIDIA GPUs through the GPU Operator. For more information about NVIDIA GPU Operator, refer to the NVIDIA official documentation. For deployment instructions, refer to Offline Installation of GPU Operator.

        +

        Similar to regular computer hardware, NVIDIA GPUs, as physical devices, need to have the NVIDIA GPU driver installed in order to be used. To reduce the cost of using GPUs on Kubernetes, NVIDIA provides the NVIDIA GPU Operator component to manage various components required for using NVIDIA GPUs. These components include the NVIDIA driver (for enabling CUDA), NVIDIA container runtime, GPU node labeling, DCGM-based monitoring, and more. In theory, users only need to plug the GPU into a compute device managed by Kubernetes, and they can use all the capabilities of NVIDIA GPUs through the GPU Operator. For more information about NVIDIA GPU Operator, refer to the NVIDIA official documentation. For deployment instructions, refer to Offline Installation of GPU Operator.

        Architecture diagram of NVIDIA GPU Operator:

        diff --git a/en/admin/kpanda/gpu/metax/usemetax.html b/en/admin/kpanda/gpu/metax/usemetax.html index 437239083d..d301413df4 100644 --- a/en/admin/kpanda/gpu/metax/usemetax.html +++ b/en/admin/kpanda/gpu/metax/usemetax.html @@ -662,7 +662,7 @@

        MetaX GPU Component Installation and Usage

        -

        This chapter provides installation guidance for MetaX's gpu-extensions, gpu-operator, and other components, as well as usage methods for both the full GPU card and vGPU modes.

        +

        This chapter provides installation guidance for MetaX's gpu-extensions, gpu-operator, and other components, as well as usage methods for both the full GPU and vGPU modes.

        Prerequisites

        1. The required tar package has been downloaded and installed from the MetaX Software Center. This article uses metax-gpu-k8s-package.0.7.10.tar.gz as an example.
        2. @@ -671,8 +671,8 @@

          PrerequisitesComponent Introduction

          Metax provides two helm-chart packages: metax-extensions and gpu-operator. Depending on the usage scenario, different components can be selected for installation.

            -
          1. Metax-extensions: Includes two components, gpu-device and gpu-label. When using the Metax-extensions solution, the user's application container image needs to be built based on the MXMACA® base image. Moreover, Metax-extensions is only suitable for scenarios using the full GPU card.
          2. -
          3. gpu-operator: Includes components such as gpu-device, gpu-label, driver-manager, container-runtime, and operator-controller. When using the gpu-operator solution, users can choose to create application container images that do not include the MXMACA® SDK. The gpu-operator is suitable for both full GPU card and vGPU scenarios.
          4. +
          5. Metax-extensions: Includes two components, gpu-device and gpu-label. When using the Metax-extensions solution, the user's application container image needs to be built based on the MXMACA® base image. Moreover, Metax-extensions is only suitable for scenarios using the full GPU.
          6. +
          7. gpu-operator: Includes components such as gpu-device, gpu-label, driver-manager, container-runtime, and operator-controller. When using the gpu-operator solution, users can choose to create application container images that do not include the MXMACA® SDK. The gpu-operator is suitable for both full GPU and vGPU scenarios.

          Operation Steps

            diff --git a/en/admin/kpanda/gpu/mlu/use-mlu.html b/en/admin/kpanda/gpu/mlu/use-mlu.html index d56486827a..00131ab922 100644 --- a/en/admin/kpanda/gpu/mlu/use-mlu.html +++ b/en/admin/kpanda/gpu/mlu/use-mlu.html @@ -484,9 +484,9 @@
          1. - + - Using Cambricon in SuanFeng AI Computing Platform + Using Cambricon in Suanova AI Computing Platform
          2. @@ -605,9 +605,9 @@
          3. - + - Using Cambricon in SuanFeng AI Computing Platform + Using Cambricon in Suanova AI Computing Platform
          4. @@ -626,10 +626,10 @@

            Using Cambricon GPU

            -

            This article introduces how to use Cambricon GPU in the SuanFeng AI computing platform.

            +

            This article introduces how to use Cambricon GPU in the Suanova AI computing platform.

            Prerequisites

            -

            Using the Whole NVIDIA GPU Card for an Application

            -

            This section describes how to allocate the entire NVIDIA GPU card to a single application on the AI platform platform.

            +

            Using the Whole NVIDIA GPU for an Application

            +

            This section describes how to allocate the entire NVIDIA GPU to a single application on the AI platform platform.

            Prerequisites

            • AI platform container management platform has been deployed and is running properly.
            • The container management module has been connected to a Kubernetes cluster or a Kubernetes cluster has been created, and you can access the UI interface of the cluster.
            • GPU Operator has been offline installed and NVIDIA DevicePlugin has been enabled on the current cluster. Refer to Offline Installation of GPU Operator for instructions.
            • -
            • The GPU card in the current cluster has not undergone any virtualization operations or been occupied by other applications.
            • +
            • The GPU in the current cluster has not undergone any virtualization operations or been occupied by other applications.

            Procedure

            Configuring via the User Interface

            diff --git a/en/admin/kpanda/gpu/nvidia/index.html b/en/admin/kpanda/gpu/nvidia/index.html index f227828753..3a03b73ea5 100644 --- a/en/admin/kpanda/gpu/nvidia/index.html +++ b/en/admin/kpanda/gpu/nvidia/index.html @@ -26,7 +26,7 @@ @@ -642,7 +642,7 @@
            -

            NVIDIA GPU Card Usage Modes

            +

            NVIDIA GPU Usage Modes

            NVIDIA, as a well-known graphics computing provider, offers various software and hardware solutions to enhance computational power. Among them, NVIDIA provides the following three solutions for GPU usage:

            Full GPU

            Full GPU refers to allocating the entire NVIDIA GPU to a single user or application. In this configuration, the application can fully occupy all the resources of the GPU and achieve maximum computational performance. Full GPU is suitable for workloads that require a large amount of computational resources and memory, such as deep learning training, scientific computing, etc.

            diff --git a/en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html b/en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html index 118b82f545..6aafceb0ca 100644 --- a/en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html +++ b/en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html @@ -803,7 +803,7 @@

            Offline Install gpu-operatorPrerequisites

            • The kernel version of the cluster nodes where the gpu-operator is to be deployed must be - completely consistent. The distribution and GPU card model of the nodes must fall within + completely consistent. The distribution and GPU model of the nodes must fall within the scope specified in the GPU Support Matrix.
            • When installing the gpu-operator, select v23.9.0+2 or above.
            diff --git a/en/admin/kpanda/gpu/nvidia/mig/create_mig.html b/en/admin/kpanda/gpu/nvidia/mig/create_mig.html index c50ff5c496..f805432159 100644 --- a/en/admin/kpanda/gpu/nvidia/mig/create_mig.html +++ b/en/admin/kpanda/gpu/nvidia/mig/create_mig.html @@ -768,7 +768,7 @@

            Enabling MIG FeaturesNVIDIA GPU Card Usage Modes.

            +

            For more details, refer to the NVIDIA GPU Usage Modes.

            Prerequisites

            • Check the system requirements for the GPU driver installation on the target node: GPU Support Matrix
            • diff --git a/en/admin/kpanda/gpu/nvidia/mig/index.html b/en/admin/kpanda/gpu/nvidia/mig/index.html index a98bc77d62..b47f97c948 100644 --- a/en/admin/kpanda/gpu/nvidia/mig/index.html +++ b/en/admin/kpanda/gpu/nvidia/mig/index.html @@ -724,13 +724,13 @@

              MIG ScenariosMIG offers increased compute power and memory capacity for training large-scale deep learning models. By partitioning the physical GPU into multiple MIG instances, each instance can independently carry out model training, improving training efficiency and throughput.

              In general, NVIDIA MIG is suitable for scenarios that require finer-grained allocation and management of GPU resources. It enables resource isolation, improved performance utilization, and meets the GPU computing needs of multiple users or applications.

              Overview of MIG

              -

              NVIDIA Multi-Instance GPU (MIG) is a new feature introduced by NVIDIA on H100, A100, and A30 series GPUs. Its purpose is to divide a physical GPU into multiple GPU instances to provide finer-grained resource sharing and isolation. MIG can split a GPU into up to seven GPU instances, allowing a single physical GPU card to provide separate GPU resources to multiple users, maximizing GPU utilization.

              +

              NVIDIA Multi-Instance GPU (MIG) is a new feature introduced by NVIDIA on H100, A100, and A30 series GPUs. Its purpose is to divide a physical GPU into multiple GPU instances to provide finer-grained resource sharing and isolation. MIG can split a GPU into up to seven GPU instances, allowing a single physical GPU to provide separate GPU resources to multiple users, maximizing GPU utilization.

              This feature enables multiple applications or users to share GPU resources simultaneously, improving the utilization of computational resources and increasing system scalability.

              With MIG, each GPU instance's processor has an independent and isolated path throughout the entire memory system, including cross-switch ports on the chip, L2 cache groups, memory controllers, and DRAM address buses, all uniquely allocated to a single instance.

              This ensures that the workload of individual users can run with predictable throughput and latency, along with identical L2 cache allocation and DRAM bandwidth. MIG can partition available GPU compute resources (such as streaming multiprocessors or SMs and GPU engines like copy engines or decoders) to provide defined quality of service (QoS) and fault isolation for different clients such as virtual machines, containers, or processes. MIG enables multiple GPU instances to run in parallel on a single physical GPU.

              MIG allows multiple vGPUs (and virtual machines) to run in parallel on a single GPU instance while retaining the isolation guarantees provided by vGPU. For more details on using vGPU and MIG for GPU partitioning, refer to NVIDIA Multi-Instance GPU and NVIDIA Virtual Compute Server.

              MIG Architecture

              -

              The following diagram provides an overview of MIG, illustrating how it virtualizes one physical GPU card into seven GPU instances that can be used by multiple users.

              +

              The following diagram provides an overview of MIG, illustrating how it virtualizes one physical GPU into seven GPU instances that can be used by multiple users.

              Important Concepts

              • SM (Streaming Multiprocessor): The core computational unit of a GPU responsible for executing graphics rendering and general-purpose computing tasks. Each SM contains a group of CUDA cores, as well as shared memory, register files, and other resources, capable of executing multiple threads concurrently. Each MIG instance has a certain number of SMs and other related resources, along with the allocated memory slices.
              • diff --git a/en/admin/kpanda/gpu/nvidia/mig/mig_usage.html b/en/admin/kpanda/gpu/nvidia/mig/mig_usage.html index 05115e1640..fc3909149b 100644 --- a/en/admin/kpanda/gpu/nvidia/mig/mig_usage.html +++ b/en/admin/kpanda/gpu/nvidia/mig/mig_usage.html @@ -738,17 +738,17 @@

                PrerequisitesUsing MIG GPU through the UI

                1. -

                  Confirm if the cluster has recognized the GPU card type.

                  +

                  Confirm if the cluster has recognized the GPU type.

                  Go to Cluster Details -> Nodes and check if it has been correctly recognized as MIG.

                2. When deploying an application using an image, you can select and use NVIDIA MIG resources.

                3. -

                  Example of MIG Single Mode (used in the same way as a full GPU card):

                  +

                  Example of MIG Single Mode (used in the same way as a full GPU):

                  Note

                  -

                  The MIG single policy allows users to request and use GPU resources in the same way as a full GPU card (nvidia.com/gpu). The difference is that these resources can be a portion of the GPU (MIG device) rather than the entire GPU. Learn more from the GPU MIG Mode Design.

                  +

                  The MIG single policy allows users to request and use GPU resources in the same way as a full GPU (nvidia.com/gpu). The difference is that these resources can be a portion of the GPU (MIG device) rather than the entire GPU. Learn more from the GPU MIG Mode Design.

                4. diff --git a/en/admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html b/en/admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html index 88d3ab73d7..22a30eb491 100644 --- a/en/admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html +++ b/en/admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html @@ -796,11 +796,11 @@

                  Using vGPU through YAML Configura limits: nvidia.com/gpucores: '20' # Request 20% of GPU cores for each card nvidia.com/gpumem: '200' # Request 200MB of GPU memory for each card - nvidia.com/vgpu: '1' # Request 1 GPU card + nvidia.com/vgpu: '1' # Request 1 GPU imagePullPolicy: Always restartPolicy: Always

            -

            This YAML configuration requests the application to use vGPU resources. It specifies that each card should utilize 20% of GPU cores, 200MB of GPU memory, and requests 1 GPU card.

            +

            This YAML configuration requests the application to use vGPU resources. It specifies that each card should utilize 20% of GPU cores, 200MB of GPU memory, and requests 1 GPU.

        diff --git a/en/admin/kpanda/namespaces/podsecurity.html b/en/admin/kpanda/namespaces/podsecurity.html index 58a25aeb41..1fab1d9d1f 100644 --- a/en/admin/kpanda/namespaces/podsecurity.html +++ b/en/admin/kpanda/namespaces/podsecurity.html @@ -576,7 +576,7 @@

        PrerequisitesThe container management module has integrated a Kubernetes cluster or created a Kubernetes cluster. The cluster version needs to be v1.22 or above, and you should be able to access the cluster's UI interface.

      3. -

        A namespace has been created, a user has been created, and the user has been granted NS Admin or higher permissions. For details, refer to Namespace Authorization.

        +

        A namespace has been created, a user has been created, and the user has been granted NS Admin or higher permissions. For details, refer to Namespace Authorization.

      4. Configure Pod Security Policies for Namespace

        diff --git a/en/admin/kpanda/network/create-ingress.html b/en/admin/kpanda/network/create-ingress.html index e7edb0b964..686816540a 100644 --- a/en/admin/kpanda/network/create-ingress.html +++ b/en/admin/kpanda/network/create-ingress.html @@ -621,7 +621,7 @@

        Create an IngressPrerequisites

        diff --git a/en/admin/kpanda/network/create-services.html b/en/admin/kpanda/network/create-services.html index 5154bfceeb..195bd0d4e2 100644 --- a/en/admin/kpanda/network/create-services.html +++ b/en/admin/kpanda/network/create-services.html @@ -636,7 +636,7 @@

        PrerequisitesContainer management module connected to Kubernetes cluster or created Kubernetes, and can access the cluster UI interface.

      5. -

        Completed a namespace creation, user creation, and authorize the user as NS Editor role, for details, refer to Namespace Authorization.

        +

        Completed a namespace creation, user creation, and authorize the user as NS Editor role, for details, refer to Namespace Authorization.

      6. When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

        diff --git a/en/admin/kpanda/permissions/custom-kpanda-role.html b/en/admin/kpanda/permissions/custom-kpanda-role.html index bbbd1f9de4..760572d949 100644 --- a/en/admin/kpanda/permissions/custom-kpanda-role.html +++ b/en/admin/kpanda/permissions/custom-kpanda-role.html @@ -576,7 +576,7 @@

        PrerequisitesContainer management v0.27.0 and above.

      7. Integrated Kubernetes cluster or created Kubernetes cluster, and able to access the cluster's UI interface.
      8. -
      9. Completed creation of a namespace and user account, +
      10. Completed creation of a namespace and user account, and the granting of NS Viewer. For details, refer to namespace authorization.
      11. diff --git a/en/admin/kpanda/workloads/create-cronjob.html b/en/admin/kpanda/workloads/create-cronjob.html index 852597f494..8a3d73ad9e 100644 --- a/en/admin/kpanda/workloads/create-cronjob.html +++ b/en/admin/kpanda/workloads/create-cronjob.html @@ -379,9 +379,9 @@

        Container settingsCluster Settings.

        +

        Before setting exclusive GPU, the administrator needs to install the GPU and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

        diff --git a/en/admin/kpanda/workloads/create-daemonset.html b/en/admin/kpanda/workloads/create-daemonset.html index ca332284f9..a100d85c2f 100644 --- a/en/admin/kpanda/workloads/create-daemonset.html +++ b/en/admin/kpanda/workloads/create-daemonset.html @@ -379,9 +379,9 @@

        Container settingsCluster Settings.

        +

        Before setting exclusive GPU, the administrator needs to install the GPU and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

        diff --git a/en/admin/kpanda/workloads/create-deployment.html b/en/admin/kpanda/workloads/create-deployment.html index 90b6e846f9..72804e1899 100644 --- a/en/admin/kpanda/workloads/create-deployment.html +++ b/en/admin/kpanda/workloads/create-deployment.html @@ -390,12 +390,12 @@

        Container settingsCluster Settings.

        diff --git a/en/admin/kpanda/workloads/create-job.html b/en/admin/kpanda/workloads/create-job.html index edb802b912..c285a1085e 100644 --- a/en/admin/kpanda/workloads/create-job.html +++ b/en/admin/kpanda/workloads/create-job.html @@ -364,10 +364,10 @@

        Container settingsImage Pull Policy.
      12. Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
      13. CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
      14. -
      15. GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU card or part of the vGPU for the container. For example, for an 8-core GPU card, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.
      16. +
      17. GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU or part of the vGPU for the container. For example, for an 8-core GPU, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.
      18. -

        Before setting exclusive GPU, the administrator needs to install the GPU card and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

        +

        Before setting exclusive GPU, the administrator needs to install the GPU and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

        diff --git a/en/admin/kpanda/workloads/create-statefulset.html b/en/admin/kpanda/workloads/create-statefulset.html index 6d814430b1..f4eca465b7 100644 --- a/en/admin/kpanda/workloads/create-statefulset.html +++ b/en/admin/kpanda/workloads/create-statefulset.html @@ -372,10 +372,10 @@

        Container settingsImage Pull Policy.
      19. Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
      20. CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
      21. -
      22. GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU card or part of the vGPU for the container. For example, for an 8-core GPU card, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.
      23. +
      24. GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU or part of the vGPU for the container. For example, for an 8-core GPU, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.
      25. -

        Before setting exclusive GPU, the administrator needs to install the GPU card and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

        +

        Before setting exclusive GPU, the administrator needs to install the GPU and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

        Basic Info

        diff --git a/en/end-user/kpanda/backup/etcd-backup.html b/en/end-user/kpanda/backup/etcd-backup.html index 0960a85e4f..1c4eb069f1 100644 --- a/en/end-user/kpanda/backup/etcd-backup.html +++ b/en/end-user/kpanda/backup/etcd-backup.html @@ -648,7 +648,7 @@

        etcd backup

        etcd backup is based on cluster data as the core backup. In cases such as hardware device damage, development and test configuration errors, etc., the backup cluster data can be restored through etcd backup.

        This section will introduce how to realize the etcd backup for clusters. -Also see etcd Backup and Restore Best Practices.

        +Also see etcd Backup and Restore Best Practices.

        Prerequisites

        • @@ -663,8 +663,7 @@

          PrerequisitesNamespace Authorization.

        • -

          Prepared a MinIO instance. It is recommended to create it through AI platform's MinIO middleware. - For specific steps, refer to MinIO Object Storage.

          +

          Prepared a MinIO instance. It is recommended to create it through AI platform's MinIO middleware.

        Create etcd backup

        diff --git a/en/end-user/kpanda/backup/index.html b/en/end-user/kpanda/backup/index.html index ae43e719c8..06c35aaff5 100644 --- a/en/end-user/kpanda/backup/index.html +++ b/en/end-user/kpanda/backup/index.html @@ -586,7 +586,7 @@

        Backup and RestoreBackup and Restore MySQL Applications and Data Across Clusters guide.

        +For detailed steps, refer to the Backup and Restore MySQL Applications and Data Across Clusters guide.

      26. etcd Backup

        @@ -595,7 +595,7 @@

        Backup and RestoreETCD Backup and Restore guide.

        +ETCD Backup and Restore guide.

      27. diff --git a/en/end-user/kpanda/clusters/create-cluster.html b/en/end-user/kpanda/clusters/create-cluster.html index e6203a5794..295cb05e6c 100644 --- a/en/end-user/kpanda/clusters/create-cluster.html +++ b/en/end-user/kpanda/clusters/create-cluster.html @@ -277,7 +277,7 @@

        Create Worker ClustersIntegrate Cluster).

        This page explains how to create a Worker Cluster. By default, when creating a new Worker Cluster, the operating system type and CPU architecture of the worker nodes should be consistent with the Global Service Cluster. If you want to create a cluster with a different operating system or architecture than the Global Management Cluster, refer to Creating an Ubuntu Worker Cluster on a CentOS Management Platform for instructions.

        -

        It is recommended to use the supported operating systems in AI platform to create the cluster. If your local nodes are not within the supported range, you can refer to Creating a Cluster on Non-Mainstream Operating Systems for instructions.

        +

        It is recommended to use the supported operating systems in AI platform to create the cluster. If your local nodes are not within the supported range, you can refer to Creating a Cluster on Non-Mainstream Operating Systems for instructions.

        Prerequisites

        Certain prerequisites must be met before creating a cluster:

          diff --git a/en/end-user/kpanda/gpu/Iluvatar_usage.html b/en/end-user/kpanda/gpu/Iluvatar_usage.html index b3caad7643..8f9b494f49 100644 --- a/en/end-user/kpanda/gpu/Iluvatar_usage.html +++ b/en/end-user/kpanda/gpu/Iluvatar_usage.html @@ -303,7 +303,7 @@

          ProcedureConfiguration via User Interface

          1. -

            Check if the GPU card in the cluster has been detected. Click Clusters -> Cluster Settings -> Addon Plugins , and check if the proper GPU type has been automatically enabled and detected. +

            Check if the GPU in the cluster has been detected. Click Clusters -> Cluster Settings -> Addon Plugins , and check if the proper GPU type has been automatically enabled and detected. Currently, the cluster will automatically enable GPU and set the GPU type as Iluvatar .

          2. diff --git a/en/end-user/kpanda/gpu/ascend/Ascend_usage.html b/en/end-user/kpanda/gpu/ascend/Ascend_usage.html index 0587ef16bb..90f8bebab8 100644 --- a/en/end-user/kpanda/gpu/ascend/Ascend_usage.html +++ b/en/end-user/kpanda/gpu/ascend/Ascend_usage.html @@ -422,7 +422,7 @@

            Quick StartUI Usage

            1. -

              Confirm whether the cluster has detected the GPU card. Click Clusters -> Cluster Settings -> Addon Plugins , +

              Confirm whether the cluster has detected the GPU. Click Clusters -> Cluster Settings -> Addon Plugins , and check whether the proper GPU type is automatically enabled and detected. Currently, the cluster will automatically enable GPU and set the GPU type to Ascend .

              Cluster Settings

              diff --git a/en/end-user/kpanda/gpu/index.html b/en/end-user/kpanda/gpu/index.html index 8dc1ce0c18..6bdd70fd62 100644 --- a/en/end-user/kpanda/gpu/index.html +++ b/en/end-user/kpanda/gpu/index.html @@ -295,7 +295,7 @@

              Introduction to GPU CapabilitiesCompatibility with various training frameworks such as TensorFlow and PyTorch.

        Introduction to GPU Operator

        -

        Similar to regular computer hardware, NVIDIA GPUs, as physical devices, need to have the NVIDIA GPU driver installed in order to be used. To reduce the cost of using GPUs on Kubernetes, NVIDIA provides the NVIDIA GPU Operator component to manage various components required for using NVIDIA GPUs. These components include the NVIDIA driver (for enabling CUDA), NVIDIA container runtime, GPU node labeling, DCGM-based monitoring, and more. In theory, users only need to plug the GPU card into a compute device managed by Kubernetes, and they can use all the capabilities of NVIDIA GPUs through the GPU Operator. For more information about NVIDIA GPU Operator, refer to the NVIDIA official documentation. For deployment instructions, refer to Offline Installation of GPU Operator.

        +

        Similar to regular computer hardware, NVIDIA GPUs, as physical devices, need to have the NVIDIA GPU driver installed in order to be used. To reduce the cost of using GPUs on Kubernetes, NVIDIA provides the NVIDIA GPU Operator component to manage various components required for using NVIDIA GPUs. These components include the NVIDIA driver (for enabling CUDA), NVIDIA container runtime, GPU node labeling, DCGM-based monitoring, and more. In theory, users only need to plug the GPU into a compute device managed by Kubernetes, and they can use all the capabilities of NVIDIA GPUs through the GPU Operator. For more information about NVIDIA GPU Operator, refer to the NVIDIA official documentation. For deployment instructions, refer to Offline Installation of GPU Operator.

        Architecture diagram of NVIDIA GPU Operator:

        diff --git a/en/end-user/kpanda/gpu/nvidia/full_gpu_userguide.html b/en/end-user/kpanda/gpu/nvidia/full_gpu_userguide.html index 88a2cb79d1..9635269082 100644 --- a/en/end-user/kpanda/gpu/nvidia/full_gpu_userguide.html +++ b/en/end-user/kpanda/gpu/nvidia/full_gpu_userguide.html @@ -10,7 +10,7 @@ -Using the Whole NVIDIA GPU Card for an Application - 豐收二號檔案站 +Using the Whole NVIDIA GPU for an Application - 豐收二號檔案站 @@ -24,7 +24,7 @@ @@ -48,7 +48,7 @@
        - Using the Whole NVIDIA GPU Card for an Application + Using the Whole NVIDIA GPU for an Application
        @@ -290,14 +290,14 @@
        -

        Using the Whole NVIDIA GPU Card for an Application

        -

        This section describes how to allocate the entire NVIDIA GPU card to a single application on the AI platform platform.

        +

        Using the Whole NVIDIA GPU for an Application

        +

        This section describes how to allocate the entire NVIDIA GPU to a single application on the AI platform platform.

        Prerequisites

        • AI platform container management platform has been deployed and is running properly.
        • The container management module has been connected to a Kubernetes cluster or a Kubernetes cluster has been created, and you can access the UI interface of the cluster.
        • GPU Operator has been offline installed and NVIDIA DevicePlugin has been enabled on the current cluster. Refer to Offline Installation of GPU Operator for instructions.
        • -
        • The GPU card in the current cluster has not undergone any virtualization operations or been occupied by other applications.
        • +
        • The GPU in the current cluster has not undergone any virtualization operations or been occupied by other applications.

        Procedure

        Configuring via the User Interface

        diff --git a/en/end-user/kpanda/gpu/nvidia/index.html b/en/end-user/kpanda/gpu/nvidia/index.html index ca5f5afcb8..1fb687c1e8 100644 --- a/en/end-user/kpanda/gpu/nvidia/index.html +++ b/en/end-user/kpanda/gpu/nvidia/index.html @@ -10,7 +10,7 @@ -NVIDIA GPU Card Usage Modes - 豐收二號檔案站 +NVIDIA GPU Usage Modes - 豐收二號檔案站 @@ -24,7 +24,7 @@ @@ -48,7 +48,7 @@
        - NVIDIA GPU Card Usage Modes + NVIDIA GPU Usage Modes
        @@ -265,7 +265,7 @@
        -

        NVIDIA GPU Card Usage Modes

        +

        NVIDIA GPU Usage Modes

        NVIDIA, as a well-known graphics computing provider, offers various software and hardware solutions to enhance computational power. Among them, NVIDIA provides the following three solutions for GPU usage:

        Full GPU

        Full GPU refers to allocating the entire NVIDIA GPU to a single user or application. In this configuration, the application can fully occupy all the resources of the GPU and achieve maximum computational performance. Full GPU is suitable for workloads that require a large amount of computational resources and memory, such as deep learning training, scientific computing, etc.

        diff --git a/en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html b/en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html index 3831cb9630..f6dcdb3de7 100644 --- a/en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html +++ b/en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html @@ -314,7 +314,7 @@

        Offline Install gpu-operatorPrerequisites

        • The kernel version of the cluster nodes where the gpu-operator is to be deployed must be - completely consistent. The distribution and GPU card model of the nodes must fall within + completely consistent. The distribution and GPU model of the nodes must fall within the scope specified in the GPU Support Matrix.
        • When installing the gpu-operator, select v23.9.0+2 or above.
        diff --git a/en/end-user/kpanda/gpu/nvidia/mig/create_mig.html b/en/end-user/kpanda/gpu/nvidia/mig/create_mig.html index e17a200f92..0f84dc57ab 100644 --- a/en/end-user/kpanda/gpu/nvidia/mig/create_mig.html +++ b/en/end-user/kpanda/gpu/nvidia/mig/create_mig.html @@ -303,7 +303,7 @@

        Enabling MIG FeaturesNVIDIA GPU Card Usage Modes.

        +

        For more details, refer to the NVIDIA GPU Usage Modes.

        Prerequisites

        • Check the system requirements for the GPU driver installation on the target node: GPU Support Matrix
        • diff --git a/en/end-user/kpanda/gpu/nvidia/mig/index.html b/en/end-user/kpanda/gpu/nvidia/mig/index.html index edf262fc28..35b82ab88a 100644 --- a/en/end-user/kpanda/gpu/nvidia/mig/index.html +++ b/en/end-user/kpanda/gpu/nvidia/mig/index.html @@ -313,13 +313,13 @@

          MIG ScenariosMIG offers increased compute power and memory capacity for training large-scale deep learning models. By partitioning the physical GPU into multiple MIG instances, each instance can independently carry out model training, improving training efficiency and throughput.

          In general, NVIDIA MIG is suitable for scenarios that require finer-grained allocation and management of GPU resources. It enables resource isolation, improved performance utilization, and meets the GPU computing needs of multiple users or applications.

          Overview of MIG

          -

          NVIDIA Multi-Instance GPU (MIG) is a new feature introduced by NVIDIA on H100, A100, and A30 series GPUs. Its purpose is to divide a physical GPU into multiple GPU instances to provide finer-grained resource sharing and isolation. MIG can split a GPU into up to seven GPU instances, allowing a single physical GPU card to provide separate GPU resources to multiple users, maximizing GPU utilization.

          +

          NVIDIA Multi-Instance GPU (MIG) is a new feature introduced by NVIDIA on H100, A100, and A30 series GPUs. Its purpose is to divide a physical GPU into multiple GPU instances to provide finer-grained resource sharing and isolation. MIG can split a GPU into up to seven GPU instances, allowing a single physical GPU to provide separate GPU resources to multiple users, maximizing GPU utilization.

          This feature enables multiple applications or users to share GPU resources simultaneously, improving the utilization of computational resources and increasing system scalability.

          With MIG, each GPU instance's processor has an independent and isolated path throughout the entire memory system, including cross-switch ports on the chip, L2 cache groups, memory controllers, and DRAM address buses, all uniquely allocated to a single instance.

          This ensures that the workload of individual users can run with predictable throughput and latency, along with identical L2 cache allocation and DRAM bandwidth. MIG can partition available GPU compute resources (such as streaming multiprocessors or SMs and GPU engines like copy engines or decoders) to provide defined quality of service (QoS) and fault isolation for different clients such as virtual machines, containers, or processes. MIG enables multiple GPU instances to run in parallel on a single physical GPU.

          MIG allows multiple vGPUs (and virtual machines) to run in parallel on a single GPU instance while retaining the isolation guarantees provided by vGPU. For more details on using vGPU and MIG for GPU partitioning, refer to NVIDIA Multi-Instance GPU and NVIDIA Virtual Compute Server.

          MIG Architecture

          -

          The following diagram provides an overview of MIG, illustrating how it virtualizes one physical GPU card into seven GPU instances that can be used by multiple users.

          +

          The following diagram provides an overview of MIG, illustrating how it virtualizes one physical GPU into seven GPU instances that can be used by multiple users.

          Important Concepts

          • SM (Streaming Multiprocessor): The core computational unit of a GPU responsible for executing graphics rendering and general-purpose computing tasks. Each SM contains a group of CUDA cores, as well as shared memory, register files, and other resources, capable of executing multiple threads concurrently. Each MIG instance has a certain number of SMs and other related resources, along with the allocated memory slices.
          • diff --git a/en/end-user/kpanda/gpu/nvidia/mig/mig_usage.html b/en/end-user/kpanda/gpu/nvidia/mig/mig_usage.html index 6ff20181e0..96457133a7 100644 --- a/en/end-user/kpanda/gpu/nvidia/mig/mig_usage.html +++ b/en/end-user/kpanda/gpu/nvidia/mig/mig_usage.html @@ -291,17 +291,17 @@

            PrerequisitesUsing MIG GPU through the UI

            1. -

              Confirm if the cluster has recognized the GPU card type.

              +

              Confirm if the cluster has recognized the GPU type.

              Go to Cluster Details -> Nodes and check if it has been correctly recognized as MIG.

            2. When deploying an application using an image, you can select and use NVIDIA MIG resources.

            3. -

              Example of MIG Single Mode (used in the same way as a full GPU card):

              +

              Example of MIG Single Mode (used in the same way as a full GPU):

              Note

              -

              The MIG single policy allows users to request and use GPU resources in the same way as a full GPU card (nvidia.com/gpu). The difference is that these resources can be a portion of the GPU (MIG device) rather than the entire GPU. Learn more from the GPU MIG Mode Design.

              +

              The MIG single policy allows users to request and use GPU resources in the same way as a full GPU (nvidia.com/gpu). The difference is that these resources can be a portion of the GPU (MIG device) rather than the entire GPU. Learn more from the GPU MIG Mode Design.

            4. diff --git a/en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html b/en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html index f126c67f4e..cad0b17360 100644 --- a/en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html +++ b/en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html @@ -342,11 +342,11 @@

              Using vGPU through YAML Configura limits: nvidia.com/gpucores: '20' # Request 20% of GPU cores for each card nvidia.com/gpumem: '200' # Request 200MB of GPU memory for each card - nvidia.com/vgpu: '1' # Request 1 GPU card + nvidia.com/vgpu: '1' # Request 1 GPU imagePullPolicy: Always restartPolicy: Always

        -

        This YAML configuration requests the application to use vGPU resources. It specifies that each card should utilize 20% of GPU cores, 200MB of GPU memory, and requests 1 GPU card.

        +

        This YAML configuration requests the application to use vGPU resources. It specifies that each card should utilize 20% of GPU cores, 200MB of GPU memory, and requests 1 GPU.

        diff --git a/en/end-user/kpanda/namespaces/podsecurity.html b/en/end-user/kpanda/namespaces/podsecurity.html index b9c4f0040a..91a7a1f848 100644 --- a/en/end-user/kpanda/namespaces/podsecurity.html +++ b/en/end-user/kpanda/namespaces/podsecurity.html @@ -612,7 +612,7 @@

        PrerequisitesThe container management module has integrated a Kubernetes cluster or created a Kubernetes cluster. The cluster version needs to be v1.22 or above, and you should be able to access the cluster's UI interface.

      28. -

        A namespace has been created, a user has been created, and the user has been granted NS Admin or higher permissions. For details, refer to Namespace Authorization.

        +

        A namespace has been created, a user has been created, and the user has been granted NS Admin or higher permissions. For details, refer to Namespace Authorization.

      29. Configure Pod Security Policies for Namespace

        diff --git a/en/end-user/kpanda/network/create-ingress.html b/en/end-user/kpanda/network/create-ingress.html index 691a70bcd4..ebb4e82e3c 100644 --- a/en/end-user/kpanda/network/create-ingress.html +++ b/en/end-user/kpanda/network/create-ingress.html @@ -657,7 +657,7 @@

        Create an IngressPrerequisites

        diff --git a/en/end-user/kpanda/network/create-services.html b/en/end-user/kpanda/network/create-services.html index 1436c8931d..623468b922 100644 --- a/en/end-user/kpanda/network/create-services.html +++ b/en/end-user/kpanda/network/create-services.html @@ -672,7 +672,7 @@

        PrerequisitesContainer management module connected to Kubernetes cluster or created Kubernetes, and can access the cluster UI interface.

      30. -

        Completed a namespace creation, user creation, and authorize the user as NS Editor role, for details, refer to Namespace Authorization.

        +

        Completed a namespace creation, user creation, and authorize the user as NS Editor role, for details, refer to Namespace Authorization.

      31. When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

        diff --git a/en/end-user/kpanda/nodes/add-node.html b/en/end-user/kpanda/nodes/add-node.html index 98d8714192..a7e8f87132 100644 --- a/en/end-user/kpanda/nodes/add-node.html +++ b/en/end-user/kpanda/nodes/add-node.html @@ -254,7 +254,7 @@

        Cluster Node Expansion

        As the number of business applications continues to grow, the resources of the cluster become increasingly tight. At this point, you can expand the cluster nodes based on kubean. After the expansion, applications can run on the newly added nodes, alleviating resource pressure.

        -

        Only clusters created through the container management module support node autoscaling. Clusters accessed from the outside do not support this operation. This article mainly introduces the expansion of worker nodes in the same architecture work cluster. If you need to add control nodes or heterogeneous work nodes to the cluster, refer to: Expanding the control node of the work cluster, Adding heterogeneous nodes to the work cluster, Expanding the worker node of the global service cluster.

        +

        Only clusters created through the container management module support node autoscaling. Clusters accessed from the outside do not support this operation. This article mainly introduces the expansion of worker nodes in the same architecture work cluster. If you need to add control nodes or heterogeneous work nodes to the cluster, refer to: Expanding the control node of the work cluster, Adding heterogeneous nodes to the work cluster, Expanding the worker node of the global service cluster.

        1. On the Clusters page, click the name of the target cluster.

          diff --git a/en/end-user/kpanda/permissions/custom-kpanda-role.html b/en/end-user/kpanda/permissions/custom-kpanda-role.html index 0eeb51b990..e5447231c5 100644 --- a/en/end-user/kpanda/permissions/custom-kpanda-role.html +++ b/en/end-user/kpanda/permissions/custom-kpanda-role.html @@ -612,7 +612,7 @@

          PrerequisitesContainer management v0.27.0 and above.

        2. Integrated Kubernetes cluster or created Kubernetes cluster, and able to access the cluster's UI interface.
        3. -
        4. Completed creation of a namespace and user account, +
        5. Completed creation of a namespace and user account, and the granting of NS Viewer. For details, refer to namespace authorization.
        6. diff --git a/en/end-user/kpanda/workloads/create-cronjob.html b/en/end-user/kpanda/workloads/create-cronjob.html index 77fb89d308..b1879bf8c6 100644 --- a/en/end-user/kpanda/workloads/create-cronjob.html +++ b/en/end-user/kpanda/workloads/create-cronjob.html @@ -774,9 +774,9 @@

          Container settingsCluster Settings.

          +

          Before setting exclusive GPU, the administrator needs to install the GPU and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

          diff --git a/en/end-user/kpanda/workloads/create-daemonset.html b/en/end-user/kpanda/workloads/create-daemonset.html index 825c266a9f..9a0ec976f3 100644 --- a/en/end-user/kpanda/workloads/create-daemonset.html +++ b/en/end-user/kpanda/workloads/create-daemonset.html @@ -767,9 +767,9 @@

          Container settingsCluster Settings.

          +

          Before setting exclusive GPU, the administrator needs to install the GPU and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

          diff --git a/en/end-user/kpanda/workloads/create-deployment.html b/en/end-user/kpanda/workloads/create-deployment.html index 4fac8b9cb1..42eae8aad1 100644 --- a/en/end-user/kpanda/workloads/create-deployment.html +++ b/en/end-user/kpanda/workloads/create-deployment.html @@ -778,12 +778,12 @@

          Container settingsCluster Settings.

          diff --git a/en/end-user/kpanda/workloads/create-job.html b/en/end-user/kpanda/workloads/create-job.html index 18006d21b3..2c481263ac 100644 --- a/en/end-user/kpanda/workloads/create-job.html +++ b/en/end-user/kpanda/workloads/create-job.html @@ -745,10 +745,10 @@

          Container settingsImage Pull Policy.
        7. Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
        8. CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
        9. -
        10. GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU card or part of the vGPU for the container. For example, for an 8-core GPU card, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.
        11. +
        12. GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU or part of the vGPU for the container. For example, for an 8-core GPU, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.
        13. -

          Before setting exclusive GPU, the administrator needs to install the GPU card and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

          +

          Before setting exclusive GPU, the administrator needs to install the GPU and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

          diff --git a/en/end-user/kpanda/workloads/create-statefulset.html b/en/end-user/kpanda/workloads/create-statefulset.html index 165a186091..e0606ab7a5 100644 --- a/en/end-user/kpanda/workloads/create-statefulset.html +++ b/en/end-user/kpanda/workloads/create-statefulset.html @@ -760,10 +760,10 @@

          Container settingsImage Pull Policy.
        14. Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
        15. CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
        16. -
        17. GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU card or part of the vGPU for the container. For example, for an 8-core GPU card, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.
        18. +
        19. GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU or part of the vGPU for the container. For example, for an 8-core GPU, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.
        20. -

          Before setting exclusive GPU, the administrator needs to install the GPU card and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

          +

          Before setting exclusive GPU, the administrator needs to install the GPU and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

          Basic Info

          diff --git a/en/openapi/baize/v0.107.4.json b/en/openapi/baize/v0.107.4.json index b0676f6a74..228d33f2c8 100644 --- a/en/openapi/baize/v0.107.4.json +++ b/en/openapi/baize/v0.107.4.json @@ -5213,18 +5213,18 @@ "properties": { "alias": { "type": "string", - "title": "alias is gpu card alias" + "title": "alias is GPU alias" }, "resource": { "type": "array", - "title": "gpu card resource setting", + "title": "GPU resource setting", "items": { "$ref": "#/definitions/v1alpha1GPUResourceSetting" } }, "type": { "type": "string", - "title": "type is gpu card type" + "title": "type is GPU type" } } }, diff --git a/en/openapi/kpanda/v0.32.2.json b/en/openapi/kpanda/v0.32.2.json index e36c672080..f4202870f5 100644 --- a/en/openapi/kpanda/v0.32.2.json +++ b/en/openapi/kpanda/v0.32.2.json @@ -26468,18 +26468,18 @@ "properties": { "type": { "type": "string", - "title": "type is gpu card type" + "title": "type is GPU type" }, "alias": { "type": "string", - "title": "alias is gpu card alias" + "title": "alias is GPU alias" }, "resource": { "type": "array", "items": { "$ref": "#/definitions/v1alpha1GPUResourceSetting" }, - "title": "gpu card resource setting" + "title": "GPU resource setting" }, "resourceTemplate": { "$ref": "#/definitions/v1alpha1GPUResourceSetting", diff --git a/end-user/kpanda/backup/index.html b/end-user/kpanda/backup/index.html index ca6e9ecfe3..7bd025a023 100644 --- a/end-user/kpanda/backup/index.html +++ b/end-user/kpanda/backup/index.html @@ -568,7 +568,7 @@

          备份恢复
        21. 应用备份

          应用备份指,备份集群中的某个工作负载的数据,然后将该工作负载的数据恢复到本集群或者其他集群。支持备份整个命名空间下的所有资源,也支持通过标签选择器过滤,仅备份带有特定标签的资源。

          -

          应用备份支持跨集群备份有状态应用,具体步骤可参考MySQL 应用及数据的跨集群备份恢复

          +

          应用备份支持跨集群备份有状态应用,具体步骤可参考MySQL 应用及数据的跨集群备份恢复

        22. ETCD 备份

          diff --git a/openapi/baize/v0.107.4.json b/openapi/baize/v0.107.4.json index 10ab175872..025552d21e 100644 --- a/openapi/baize/v0.107.4.json +++ b/openapi/baize/v0.107.4.json @@ -5213,18 +5213,18 @@ "properties": { "alias": { "type": "string", - "title": "alias is gpu card alias" + "title": "alias is GPU alias" }, "resource": { "type": "array", - "title": "gpu card resource setting", + "title": "GPU resource setting", "items": { "$ref": "#/definitions/v1alpha1GPUResourceSetting" } }, "type": { "type": "string", - "title": "type is gpu card type" + "title": "type is GPU type" } } }, diff --git a/openapi/kpanda/v0.32.2.json b/openapi/kpanda/v0.32.2.json index 5440a0411f..70edcccb2f 100644 --- a/openapi/kpanda/v0.32.2.json +++ b/openapi/kpanda/v0.32.2.json @@ -26468,18 +26468,18 @@ "properties": { "type": { "type": "string", - "title": "type is gpu card type" + "title": "type is GPU type" }, "alias": { "type": "string", - "title": "alias is gpu card alias" + "title": "alias is GPU alias" }, "resource": { "type": "array", "items": { "$ref": "#/definitions/v1alpha1GPUResourceSetting" }, - "title": "gpu card resource setting" + "title": "GPU resource setting" }, "resourceTemplate": { "$ref": "#/definitions/v1alpha1GPUResourceSetting", diff --git a/search/search_index.json b/search/search_index.json index 254db82c77..5b5d2a83ae 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en","zh"],"separator":"[\\s\\u200b\\u3000\\-\u3001\u3002\uff0c\uff0e\uff1f\uff01\uff1b]+","pipeline":["stemmer"]},"docs":[{"location":"index.html","title":"\u8c50\u6536\u4e8c\u865f\u6a94\u6848\u7ad9","text":"

          \u9019\u662f\u8c50\u6536\u4e8c\u865f AI \u7b97\u529b\u4e2d\u5fc3\u7684\u6a94\u6848\u7ad9\u3002

          • \u7d42\u7aef\u7528\u6236\u624b\u518a\uff1a\u5728\u5bb9\u5668\u5316\u74b0\u5883\u4e2d\uff0c\u4f7f\u7528\u96f2\u4e3b\u6a5f\uff0c\u958b\u767c AI \u7b97\u6cd5\uff0c\u69cb\u5efa\u8a13\u7df4\u548c\u63a8\u7406\u4efb\u52d9
          • \u7ba1\u7406\u54e1\u624b\u518a\uff1a\u70ba\u5bb9\u5668\u5316\u7d42\u7aef\u7528\u6236\u505a\u597d\u904b\u7dad\u5de5\u4f5c\uff0c\u4fdd\u969c\u5e73\u53f0\u5e73\u7a69\u9ad8\u6548\u904b\u884c
          • \u958b\u767c\u8005\u624b\u518a\uff1a\u532f\u7e3d\u4e86 5 \u500b\u6a21\u584a\u7684 OpenAPI \u624b\u518a

          "},{"location":"admin/index.html","title":"\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 - \u7ba1\u7406\u5458","text":"

          \u8fd9\u662f\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9762\u5411\u7ba1\u7406\u5458\u7684\u8fd0\u7ef4\u6587\u6863\u3002

          • \u4e91\u4e3b\u673a

            \u4e91\u4e3b\u673a\u662f\u90e8\u7f72\u5728\u4e91\u7aef\u7684\u865a\u62df\u673a\u3002

            • \u7ba1\u7406\u4e91\u4e3b\u673a
            • \u4e91\u4e3b\u673a vGPU
            • \u4e91\u4e3b\u673a\u6a21\u677f
            • \u4ece VMWare \u5bfc\u5165\u4e91\u4e3b\u673a
          • \u5bb9\u5668\u7ba1\u7406

            \u7ba1\u7406 K8s \u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5e94\u7528\u3001\u8d44\u6e90\u548c\u6743\u9650\u3002

            • \u521b\u5efa\u96c6\u7fa4
            • \u6dfb\u52a0\u5de5\u4f5c\u8282\u70b9
            • \u7ba1\u7406 Helm \u5e94\u7528
            • HPA \u6c34\u5e73\u6269\u7f29\u5bb9
          • \u7b97\u6cd5\u5f00\u53d1

            \u7ba1\u7406 AI \u8d44\u6e90\u548c\u961f\u5217\u3002

            • \u7ba1\u7406\u8d44\u6e90
            • \u7ba1\u7406\u961f\u5217
            • AI \u8bad\u63a8\u6700\u4f73\u5b9e\u8df5
            • \u7b97\u6cd5\u5f00\u53d1\u6545\u969c\u6392\u67e5
          • \u53ef\u89c2\u6d4b\u6027

            \u4e86\u89e3\u53ef\u89c2\u6d4b\u6027\u8d44\u6e90\uff0c\u914d\u7f6e\u548c\u6545\u969c\u6392\u67e5\u3002

            • \u90e8\u7f72\u8d44\u6e90\u89c4\u5212
            • \u5b89\u88c5\u4e0e\u5347\u7ea7
            • \u517c\u5bb9\u6027\u6d4b\u8bd5
            • \u5e38\u89c1\u95ee\u9898
          • \u5168\u5c40\u7ba1\u7406

            \u7ba1\u63a7\u7528\u6237\u3001\u7528\u6237\u7ec4\u3001\u5de5\u4f5c\u7a7a\u95f4\u3001\u8d44\u6e90\u7b49\u8bbf\u95ee\u6743\u9650\u3002

            • \u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4
            • \u4e3a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u8d44\u6e90
            • \u5ba1\u8ba1\u65e5\u5fd7
            • \u5e73\u53f0\u8bbe\u7f6e

          "},{"location":"admin/baize/best-practice/add-scheduler.html","title":"\u589e\u52a0\u4efb\u52a1\u8c03\u5ea6\u5668","text":"

          5.0 AI Lab \u63d0\u4f9b\u4e86\u4efb\u52a1\u8c03\u5ea6\u5668\uff0c\u53ef\u4ee5\u5e2e\u52a9\u60a8\u66f4\u597d\u5730\u7ba1\u7406\u4efb\u52a1\uff0c\u9664\u4e86\u63d0\u4f9b\u57fa\u7840\u7684\u8c03\u5ea6\u5668\u4e4b\u5916\uff0c\u76ee\u524d\u4e5f\u652f\u6301\u7528\u6237\u81ea\u5b9a\u4e49\u8c03\u5ea6\u5668\u3002

          "},{"location":"admin/baize/best-practice/add-scheduler.html#_2","title":"\u4efb\u52a1\u8c03\u5ea6\u5668\u4ecb\u7ecd","text":"

          \u5728 Kubernetes \u4e2d\uff0c\u4efb\u52a1\u8c03\u5ea6\u5668\u8d1f\u8d23\u51b3\u5b9a\u5c06 Pod \u5206\u914d\u5230\u54ea\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u3002\u5b83\u8003\u8651\u591a\u79cd\u56e0\u7d20\uff0c\u5982\u8d44\u6e90\u9700\u6c42\u3001\u786c\u4ef6/\u8f6f\u4ef6\u7ea6\u675f\u3001\u4eb2\u548c\u6027/\u53cd\u4eb2\u548c\u6027\u89c4\u5219\u3001\u6570\u636e\u5c40\u90e8\u6027\u7b49\u3002

          \u9ed8\u8ba4\u8c03\u5ea6\u5668\u662f Kubernetes \u96c6\u7fa4\u4e2d\u7684\u4e00\u4e2a\u6838\u5fc3\u7ec4\u4ef6\uff0c\u8d1f\u8d23\u51b3\u5b9a\u5c06 Pod \u5206\u914d\u5230\u54ea\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u3002\u8ba9\u6211\u4eec\u6df1\u5165\u4e86\u89e3\u5b83\u7684\u5de5\u4f5c\u539f\u7406\u3001\u7279\u6027\u548c\u914d\u7f6e\u65b9\u6cd5\u3002

          "},{"location":"admin/baize/best-practice/add-scheduler.html#_3","title":"\u8c03\u5ea6\u5668\u7684\u5de5\u4f5c\u6d41\u7a0b","text":"

          \u9ed8\u8ba4\u8c03\u5ea6\u5668\u7684\u5de5\u4f5c\u6d41\u7a0b\u53ef\u4ee5\u5206\u4e3a\u4e24\u4e2a\u4e3b\u8981\u9636\u6bb5\uff1a\u8fc7\u6ee4\uff08Filtering\uff09\u548c\u8bc4\u5206\uff08Scoring\uff09\u3002

          "},{"location":"admin/baize/best-practice/add-scheduler.html#_4","title":"\u8fc7\u6ee4\u9636\u6bb5","text":"

          \u8c03\u5ea6\u5668\u4f1a\u904d\u5386\u6240\u6709\u8282\u70b9\uff0c\u6392\u9664\u4e0d\u6ee1\u8db3 Pod \u8981\u6c42\u7684\u8282\u70b9\uff0c\u8003\u8651\u7684\u56e0\u7d20\u5305\u62ec\uff1a

          • \u8d44\u6e90\u9700\u6c42
          • \u8282\u70b9\u9009\u62e9\u5668
          • \u8282\u70b9\u4eb2\u548c\u6027
          • \u6c61\u70b9\u548c\u5bb9\u5fcd

          \u4ee5\u4e0a\u53c2\u6570\uff0c\u6211\u4eec\u53ef\u4ee5\u901a\u8fc7\u521b\u5efa\u4efb\u52a1\u65f6\u7684\u9ad8\u7ea7\u914d\u7f6e\u6765\u8bbe\u7f6e\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff1a

          "},{"location":"admin/baize/best-practice/add-scheduler.html#_5","title":"\u8bc4\u5206\u9636\u6bb5","text":"

          \u5bf9\u901a\u8fc7\u8fc7\u6ee4\u7684\u8282\u70b9\u8fdb\u884c\u6253\u5206\uff0c\u9009\u62e9\u5f97\u5206\u6700\u9ad8\u7684\u8282\u70b9\u6765\u8fd0\u884c Pod\uff0c\u8003\u8651\u56e0\u7d20\u5305\u62ec\uff1a

          • \u8d44\u6e90\u4f7f\u7528\u7387
          • Pod \u4eb2\u548c\u6027/\u53cd\u4eb2\u548c\u6027
          • \u8282\u70b9\u4eb2\u548c\u6027\u7b49\u3002
          "},{"location":"admin/baize/best-practice/add-scheduler.html#_6","title":"\u8c03\u5ea6\u5668\u63d2\u4ef6","text":"

          \u9664\u4e86\u57fa\u7840\u7684\u4e00\u4e9b\u4efb\u52a1\u8c03\u5ea6\u80fd\u529b\u4e4b\u5916\uff0c\u6211\u4eec\u8fd8\u652f\u6301\u4f7f\u7528 Scheduler Plugins\uff1aKubernetes SIG Scheduling \u7ef4\u62a4\u7684\u4e00\u7ec4\u8c03\u5ea6\u5668\u63d2\u4ef6\uff0c\u5305\u62ec Coscheduling (Gang Scheduling) \u7b49\u529f\u80fd\u3002

          "},{"location":"admin/baize/best-practice/add-scheduler.html#_7","title":"\u90e8\u7f72\u8c03\u5ea6\u5668\u63d2\u4ef6","text":"

          \u5728\u5de5\u4f5c\u96c6\u7fa4\u4e2d\u90e8\u7f72\u7b2c\u4e8c\u8c03\u5ea6\u5668\u63d2\u4ef6\uff0c\u8bf7\u53c2\u8003\u90e8\u7f72\u7b2c\u4e8c\u8c03\u5ea6\u5668\u63d2\u4ef6\u3002

          "},{"location":"admin/baize/best-practice/add-scheduler.html#ai-lab","title":"\u5728 AI Lab \u4e2d\u542f\u7528\u8c03\u5ea6\u5668\u63d2\u4ef6","text":"

          Danger

          \u589e\u52a0\u8c03\u5ea6\u5668\u63d2\u4ef6\u82e5\u64cd\u4f5c\u4e0d\u5f53\uff0c\u53ef\u80fd\u4f1a\u5f71\u54cd\u5230\u6574\u4e2a\u96c6\u7fa4\u7684\u7a33\u5b9a\u6027\uff0c\u5efa\u8bae\u5728\u6d4b\u8bd5\u73af\u5883\u4e2d\u8fdb\u884c\u6d4b\u8bd5\uff1b\u6216\u8005\u8054\u7cfb\u6211\u4eec\u7684\u6280\u672f\u652f\u6301\u56e2\u961f\u3002

          \u6ce8\u610f\uff0c\u5982\u679c\u5e0c\u671b\u5728\u8bad\u7ec3\u4efb\u52a1\u4e2d\u4f7f\u7528\u66f4\u591a\u7684\u8c03\u5ea6\u5668\u63d2\u4ef6\uff0c\u9700\u8981\u4e8b\u5148\u624b\u5de5\u5728\u5de5\u4f5c\u96c6\u7fa4\u4e2d\u6210\u529f\u5b89\u88c5\uff0c\u7136\u540e\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72 baize-agent \u65f6\uff0c\u589e\u52a0\u5bf9\u5e94\u7684\u8c03\u5ea6\u5668\u63d2\u4ef6\u914d\u7f6e\u3002

          \u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u63d0\u4f9b\u7684\u754c\u9762 Helm \u5e94\u7528 \u7ba1\u7406\u80fd\u529b\uff0c\u53ef\u4ee5\u65b9\u4fbf\u5730\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72\u8c03\u5ea6\u5668\u63d2\u4ef6\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff1a

          \u7136\u540e\uff0c\u5728\u53f3\u4e0a\u89d2\u70b9\u51fb \u5b89\u88c5 \uff0c\uff08\u82e5\u5df2\u90e8\u7f72\u4e86 baize-agent\uff0c\u53ef\u4ee5\u5230 Helm \u5e94\u7528\u5217\u8868\u53bb\u66f4\u65b0\uff09\uff0c\u6839\u636e\u5982\u4e0b\u56fe\u6240\u793a\u7684\u914d\u7f6e\uff0c\u589e\u52a0\u8c03\u5ea6\u5668\u3002

          \u6ce8\u610f\u8c03\u5ea6\u5668\u7684\u53c2\u6570\u5c42\u7ea7\uff0c\u6dfb\u52a0\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

          \u6ce8\u610f\u4ee5\u540e\u5728\u66f4\u65b0 baize-agent \u65f6\uff0c\u4e0d\u8981\u9057\u6f0f\u8fd9\u4e2a\u914d\u7f6e\u3002

          "},{"location":"admin/baize/best-practice/add-scheduler.html#_8","title":"\u5728\u521b\u5efa\u4efb\u52a1\u65f6\u6307\u5b9a\u8c03\u5ea6\u5668","text":"

          \u5f53\u60a8\u5728\u96c6\u7fa4\u4e2d\u6210\u529f\u90e8\u7f72\u4e86\u5bf9\u5e94\u7684\u8c03\u5ea6\u5668\u63d2\u4ef6\uff0c\u5e76\u4e14\u5728 baize-agent \u4e5f\u6b63\u786e\u589e\u52a0\u4e86\u5bf9\u5e94\u7684\u8c03\u5ea6\u5668\u914d\u7f6e\u540e\uff0c\u53ef\u4ee5\u5728\u521b\u5efa\u4efb\u52a1\u65f6\uff0c\u6307\u5b9a\u8c03\u5ea6\u5668\u3002

          \u4e00\u5207\u6b63\u5e38\u7684\u60c5\u51b5\u4e0b\uff0c\u60a8\u53ef\u4ee5\u5728\u8c03\u5ea6\u5668\u4e0b\u62c9\u6846\u4e2d\u770b\u5230\u60a8\u90e8\u7f72\u7684\u8c03\u5ea6\u5668\u63d2\u4ef6\u3002

          \u4ee5\u4e0a\uff0c\u5c31\u662f\u6211\u4eec\u5728 AI Lab \u4e2d\uff0c\u4e3a\u4efb\u52a1\u589e\u52a0\u8c03\u5ea6\u5668\u9009\u9879\u7684\u914d\u7f6e\u4f7f\u7528\u8bf4\u660e\u3002

          "},{"location":"admin/baize/best-practice/change-notebook-image.html","title":"\u66f4\u65b0 Notebook \u5185\u7f6e\u955c\u50cf","text":"

          \u5728 Notebook \u4e2d\uff0c\u9ed8\u8ba4\u63d0\u4f9b\u4e86\u591a\u4e2a\u53ef\u7528\u7684\u57fa\u7840\u955c\u50cf\uff0c\u4f9b\u5f00\u53d1\u8005\u9009\u62e9\uff1b\u5927\u90e8\u5206\u60c5\u51b5\u4e0b\uff0c\u8fd9\u4f1a\u6ee1\u8db3\u5f00\u53d1\u8005\u7684\u4f7f\u7528\u3002

          \u7b97\u4e30\u63d0\u4f9b\u4e86\u4e00\u4e2a\u9ed8\u8ba4\u7684 Notebook \u955c\u50cf\uff0c\u5305\u542b\u4e86\u6240\u9700\u7684\u4efb\u4f55\u5f00\u53d1\u5de5\u5177\u548c\u8d44\u6599\u3002

          baize/baize-notebook\n

          \u8fd9\u4e2a Notebook \u91cc\u9762\u5305\u542b\u4e86\u57fa\u7840\u7684\u5f00\u53d1\u5de5\u5177\uff0c\u4ee5 baize-notebook:v0.5.0 \uff082024 \u5e74 5 \u6708 30 \u65e5\uff09\u4e3a\u4f8b\uff0c\u76f8\u5173\u4f9d\u8d56\u53ca\u7248\u672c\u5982\u4e0b\uff1a

          \u4f9d\u8d56 \u7248\u672c\u7f16\u53f7 \u4ecb\u7ecd Ubuntu 22.04.3 \u9ed8\u8ba4 OS Python 3.11.6 \u9ed8\u8ba4 Python \u7248\u672c pip 23.3.1 conda(mamba) 23.3.1 jupyterlab 3.6.6 JupyterLab \u955c\u50cf\uff0c\u63d0\u4f9b\u5b8c\u6574\u7684 Notebook \u5f00\u53d1\u4f53\u9a8c codeserver v4.89.1 \u4e3b\u6d41 Code \u5f00\u53d1\u5de5\u5177\uff0c\u65b9\u4fbf\u7528\u6237\u4f7f\u7528\u719f\u6089\u7684\u5de5\u5177\u8fdb\u884c\u5f00\u53d1\u4f53\u9a8c *baizectl v0.5.0 \u7b97\u4e30\u5185\u7f6e CLI \u4efb\u52a1\u7ba1\u7406\u5de5\u5177 *SSH - \u652f\u6301\u672c\u5730 SSH \u76f4\u63a5\u8bbf\u95ee\u5230 Notebook \u5bb9\u5668\u5185 *kubectl v1.27 Kubernetes CLI\uff0c\u53ef\u4ee5\u4f7f\u7528 kubectl \u5728 Notebook \u5185 \u7ba1\u7406\u5bb9\u5668\u8d44\u6e90

          \u4f46\u6709\u65f6\u7528\u6237\u53ef\u80fd\u9700\u8981\u81ea\u5b9a\u4e49\u955c\u50cf\uff0c\u672c\u6587\u4ecb\u7ecd\u4e86\u5982\u4f55\u66f4\u65b0\u955c\u50cf\uff0c\u5e76\u589e\u52a0\u5230 Notebook \u521b\u5efa\u754c\u9762\u4e2d\u8fdb\u884c\u9009\u62e9\u3002

          "},{"location":"admin/baize/best-practice/change-notebook-image.html#_1","title":"\u6784\u5efa\u81ea\u5b9a\u4e49\u955c\u50cf\uff08\u4ec5\u4f9b\u53c2\u8003\uff09","text":"

          Note

          \u6ce8\u610f\uff0c\u6784\u5efa\u65b0\u955c\u50cf \u9700\u8981\u4ee5 baize-notebook \u4f5c\u4e3a\u57fa\u7840\u955c\u50cf\uff0c\u4ee5\u4fdd\u8bc1 Notebook \u7684\u6b63\u5e38\u8fd0\u884c\u3002

          \u5728\u6784\u5efa\u81ea\u5b9a\u4e49\u955c\u50cf\u65f6\uff0c\u5efa\u8bae\u5148\u4e86\u89e3 baize-notebook \u955c\u50cf\u7684 Dockerfile\uff0c\u4ee5\u4fbf\u66f4\u597d\u5730\u7406\u89e3\u5982\u4f55\u6784\u5efa\u81ea\u5b9a\u4e49\u955c\u50cf\u3002

          "},{"location":"admin/baize/best-practice/change-notebook-image.html#baize-noteboook-dockerfile","title":"baize-noteboook \u7684 Dockerfile","text":"
          ARG BASE_IMG=docker.m.daocloud.io/kubeflownotebookswg/jupyter:v1.8.0\n\nFROM $BASE_IMG\n\nUSER root\n\n# install - useful linux packages\nRUN export DEBIAN_FRONTEND=noninteractive \\\n && apt-get -yq update \\\n && apt-get -yq install --no-install-recommends \\\n    openssh-server git git-lfs bash-completion \\\n && apt-get clean \\\n && rm -rf /var/lib/apt/lists/*\n\n# remove default s6 jupyterlab run script\nRUN rm -rf /etc/services.d/jupyterlab\n\n# install - useful jupyter plugins\nRUN mamba install -n base -y jupyterlab-language-pack-zh-cn \\\n  && mamba clean --all -y\n\nARG CODESERVER_VERSION=4.89.1\nARG TARGETARCH\n\nRUN curl -fsSL \"https://github.com/coder/code-server/releases/download/v$CODESERVER_VERSION/code-server_${CODESERVER_VERSION}_$TARGETARCH.deb\" -o /tmp/code-server.deb \\\n  && dpkg -i /tmp/code-server.deb \\\n  && rm -f /tmp/code-server.deb\n\nARG CODESERVER_PYTHON_VERSION=2024.4.1\nARG CODESERVER_JUPYTER_VERSION=2024.3.1\nARG CODESERVER_LANGUAGE_PACK_ZH_CN=1.89.0\nARG CODESERVER_YAML=1.14.0\nARG CODESERVER_DOTENV=1.0.1\nARG CODESERVER_EDITORCONFIG=0.16.6\nARG CODESERVER_TOML=0.19.1\nARG CODESERVER_GITLENS=15.0.4\n\n# configure for code-server extensions\n# # https://github.com/kubeflow/kubeflow/blob/709254159986d2cc99e675d0fad5a128ddeb0917/components/example-notebook-servers/codeserver-python/Dockerfile\n# # and\n# # https://github.com/kubeflow/kubeflow/blob/709254159986d2cc99e675d0fad5a128ddeb0917/components/example-notebook-servers/codeserver/Dockerfile\nRUN code-server --list-extensions --show-versions \\\n  && code-server --list-extensions --show-versions \\\n  && code-server \\\n    --install-extension MS-CEINTL.vscode-language-pack-zh-hans@$CODESERVER_LANGUAGE_PACK_ZH_CN \\\n    --install-extension ms-python.python@$CODESERVER_PYTHON_VERSION \\\n    --install-extension ms-toolsai.jupyter@$CODESERVER_JUPYTER_VERSION \\\n    --install-extension redhat.vscode-yaml@$CODESERVER_YAML \\\n    --install-extension mikestead.dotenv@$CODESERVER_DOTENV \\\n    --install-extension EditorConfig.EditorConfig@$CODESERVER_EDITORCONFIG \\\n    --install-extension tamasfe.even-better-toml@$CODESERVER_TOML \\\n    --install-extension eamodio.gitlens@$CODESERVER_GITLENS \\\n    --install-extension catppuccin.catppuccin-vsc-pack \\\n    --force \\\n  && code-server --list-extensions --show-versions\n\n# configure for code-server\nRUN mkdir -p /home/${NB_USER}/.local/share/code-server/User \\\n  && chown -R ${NB_USER}:users /home/${NB_USER} \\\n  && cat <<EOF > /home/${NB_USER}/.local/share/code-server/User/settings.json\n{\n  \"gitlens.showWelcomeOnInstall\": false,\n  \"workbench.colorTheme\": \"Catppuccin Mocha\",\n}\nEOF\n\nRUN mkdir -p /tmp_home/${NB_USER}/.local/share \\\n  && mv /home/${NB_USER}/.local/share/code-server /tmp_home/${NB_USER}/.local/share\n\n# set ssh configuration\nRUN mkdir -p /run/sshd \\\n && chown -R ${NB_USER}:users /etc/ssh \\\n && chown -R ${NB_USER}:users /run/sshd \\\n && sed -i \"/#\\?Port/s/^.*$/Port 2222/g\" /etc/ssh/sshd_config \\\n && sed -i \"/#\\?PasswordAuthentication/s/^.*$/PasswordAuthentication no/g\" /etc/ssh/sshd_config \\\n && sed -i \"/#\\?PubkeyAuthentication/s/^.*$/PubkeyAuthentication yes/g\" /etc/ssh/sshd_config \\\n && rclone_version=v1.65.0 && \\\n       arch=$(uname -m | sed -E 's/x86_64/amd64/g;s/aarch64/arm64/g') && \\\n       filename=rclone-${rclone_version}-linux-${arch} && \\\n       curl -fsSL https://github.com/rclone/rclone/releases/download/${rclone_version}/${filename}.zip -o ${filename}.zip && \\\n       unzip ${filename}.zip && mv ${filename}/rclone /usr/local/bin && rm -rf ${filename} ${filename}.zip\n\n# Init mamba\nRUN mamba init --system\n\n# init baize-base environment for essential python packages\nRUN mamba create -n baize-base -y python \\\n  && /opt/conda/envs/baize-base/bin/pip install tensorboard \\\n  && mamba clean --all -y \\\n  && ln -s /opt/conda/envs/baize-base/bin/tensorboard /usr/local/bin/tensorboard\n\n# prepare baize-runtime-env directory\nRUN mkdir -p /opt/baize-runtime-env \\\n  && chown -R ${NB_USER}:users /opt/baize-runtime-env\n\nARG APP\nARG PROD_NAME\nARG TARGETOS\n\nCOPY out/$TARGETOS/$TARGETARCH/data-loader /usr/local/bin/\nCOPY out/$TARGETOS/$TARGETARCH/baizectl /usr/local/bin/\n\nRUN chmod +x /usr/local/bin/baizectl /usr/local/bin/data-loader && \\\n    echo \"source /etc/bash_completion\" >> /opt/conda/etc/profile.d/conda.sh && \\\n    echo \"source <(baizectl completion bash)\" >> /opt/conda/etc/profile.d/conda.sh && \\\n    echo \"source <(kubectl completion bash)\" >> /opt/conda/etc/profile.d/conda.sh && \\\n    echo '[ -f /run/baize-env ] && export $(cat /run/baize-env | xargs)' >> /opt/conda/etc/profile.d/conda.sh && \\\n    echo 'alias conda=\"mamba\"' >> /opt/conda/etc/profile.d/conda.sh\n\nUSER ${NB_UID}\n
          "},{"location":"admin/baize/best-practice/change-notebook-image.html#_2","title":"\u6784\u5efa\u4f60\u7684\u955c\u50cf","text":"
          ARG BASE_IMG=release.daocloud.io/baize/baize-notebook:v0.5.0\n\nFROM $BASE_IMG\nUSER root\n\n# Do Customization\nRUN mamba install -n baize-base -y pytorch torchvision torchaudio cpuonly -c pytorch \\\n && mamba install -n baize-base -y tensorflow \\\n && mamba clean --all -y\n\nUSER ${NB_UID}\n
          "},{"location":"admin/baize/best-practice/change-notebook-image.html#notebook-helm","title":"\u589e\u52a0\u5230 Notebook \u955c\u50cf\u5217\u8868\uff08Helm\uff09","text":"

          Warning

          \u6ce8\u610f\uff0c\u5fc5\u987b\u7531\u5e73\u53f0\u7ba1\u7406\u5458\u64cd\u4f5c\uff0c\u8c28\u614e\u53d8\u66f4\u3002

          \u76ee\u524d\uff0c\u955c\u50cf\u9009\u62e9\u5668\u9700\u8981\u901a\u8fc7\u66f4\u65b0 baize \u7684 Helm \u53c2\u6570\u6765\u4fee\u6539\uff0c\u5177\u4f53\u6b65\u9aa4\u5982\u4e0b\uff1a

          \u5728 kpanda-global-cluster \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684 Helm \u5e94\u7528\u5217\u8868\uff0c\u627e\u5230 baize\uff0c\u8fdb\u5165\u66f4\u65b0\u9875\u9762\uff0c\u5728 YAML \u53c2\u6570\u4e2d\u4fee\u6539 Notebook \u955c\u50cf\uff1a

          \u6ce8\u610f\u53c2\u6570\u4fee\u6539\u7684\u8def\u5f84\u5982\u4e0b global.config.notebook_images\uff1a

          ...\nglobal:\n  ...\n  config:\n    notebook_images:\n      ...\n      names: release.daocloud.io/baize/baize-notebook:v0.5.0\n      # \u5728\u8fd9\u91cc\u589e\u52a0\u4f60\u7684\u955c\u50cf\u4fe1\u606f\n

          \u66f4\u65b0\u5b8c\u6210\u4e4b\u540e\uff0c\u5f85 Helm \u5e94\u7528\u91cd\u542f\u6210\u529f\u4e4b\u540e\uff0c\u53ef\u4ee5\u5728 Notebook \u521b\u5efa\u754c\u9762\u4e2d\u7684\u9009\u62e9\u955c\u50cf\u770b\u5230\u65b0\u7684\u955c\u50cf\u3002

          "},{"location":"admin/baize/best-practice/checkpoint.html","title":"Checkpoint \u673a\u5236\u53ca\u4f7f\u7528\u4ecb\u7ecd","text":"

          \u5728\u6df1\u5ea6\u5b66\u4e60\u7684\u5b9e\u9645\u573a\u666f\u4e2d\uff0c\u6a21\u578b\u8bad\u7ec3\u4e00\u822c\u90fd\u4f1a\u6301\u7eed\u4e00\u6bb5\u65f6\u95f4\uff0c\u8fd9\u5bf9\u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u7684\u7a33\u5b9a\u6027\u548c\u6548\u7387\u63d0\u51fa\u4e86\u66f4\u9ad8\u7684\u8981\u6c42\u3002 \u800c\u4e14\uff0c\u5728\u5b9e\u9645\u8bad\u7ec3\u7684\u8fc7\u7a0b\u4e2d\uff0c\u5f02\u5e38\u4e2d\u65ad\u4f1a\u5bfc\u81f4\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u7684\u6a21\u578b\u72b6\u6001\u4e22\u5931\uff0c\u9700\u8981\u91cd\u65b0\u5f00\u59cb\u8bad\u7ec3\uff0c \u8fd9\u4e0d\u4ec5\u6d6a\u8d39\u4e86\u65f6\u95f4\u548c\u8d44\u6e90\uff0c\u8fd9\u5728 LLM \u8bad\u7ec3\u4e2d\u5c24\u4e3a\u660e\u663e\uff0c\u800c\u4e14\u4e5f\u4f1a\u5f71\u54cd\u6a21\u578b\u7684\u8bad\u7ec3\u6548\u679c\u3002

          \u80fd\u591f\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u4fdd\u5b58\u6a21\u578b\u7684\u72b6\u6001\uff0c\u4ee5\u4fbf\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u5f02\u5e38\u65f6\u80fd\u591f\u6062\u590d\u6a21\u578b\u72b6\u6001\uff0c\u53d8\u5f97\u81f3\u5173\u91cd\u8981\u3002 Checkpoint \u5c31\u662f\u76ee\u524d\u4e3b\u6d41\u7684\u89e3\u51b3\u65b9\u6848\uff0c\u672c\u6587\u5c06\u4ecb\u7ecd Checkpoint \u673a\u5236\u7684\u57fa\u672c\u6982\u5ff5\u548c\u5728 PyTorch \u548c TensorFlow \u4e2d\u7684\u4f7f\u7528\u65b9\u6cd5\u3002

          "},{"location":"admin/baize/best-practice/checkpoint.html#checkpoint_1","title":"\u4ec0\u4e48\u662f Checkpoint\uff1f","text":"

          Checkpoint \u662f\u5728\u6a21\u578b\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u4fdd\u5b58\u6a21\u578b\u72b6\u6001\u7684\u673a\u5236\u3002\u901a\u8fc7\u5b9a\u671f\u4fdd\u5b58 Checkpoint\uff0c\u53ef\u4ee5\u5728\u4ee5\u4e0b\u60c5\u51b5\u4e0b\u6062\u590d\u6a21\u578b\uff1a

          • \u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u65ad\uff08\u5982\u7cfb\u7edf\u5d29\u6e83\u6216\u624b\u52a8\u4e2d\u65ad\uff09
          • \u9700\u8981\u5728\u67d0\u4e2a\u8bad\u7ec3\u9636\u6bb5\u8fdb\u884c\u8bc4\u4f30
          • \u5e0c\u671b\u5728\u4e0d\u540c\u7684\u5b9e\u9a8c\u4e2d\u590d\u7528\u6a21\u578b
          "},{"location":"admin/baize/best-practice/checkpoint.html#pytorch","title":"PyTorch","text":"

          \u5728 PyTorch \u4e2d\uff0ctorch.save \u548c torch.load \u662f\u7528\u4e8e\u4fdd\u5b58\u548c\u52a0\u8f7d\u6a21\u578b\u7684\u57fa\u672c\u51fd\u6570\u3002

          "},{"location":"admin/baize/best-practice/checkpoint.html#pytorch-checkpoint","title":"PyTorch \u4fdd\u5b58 Checkpoint","text":"

          \u5728 PyTorch \u4e2d\uff0c\u901a\u5e38\u4f7f\u7528 state_dict \u4fdd\u5b58\u6a21\u578b\u7684\u53c2\u6570\u3002\u4ee5\u4e0b\u662f\u4e00\u4e2a\u7b80\u5355\u7684\u793a\u4f8b\uff1a

          import torch\nimport torch.nn as nn\n\n# \u5047\u8bbe\u6211\u4eec\u6709\u4e00\u4e2a\u7b80\u5355\u7684\u795e\u7ecf\u7f51\u7edc\nclass SimpleModel(nn.Module):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = nn.Linear(10, 2)\n\n    def forward(self, x):\n        return self.fc(x)\n\n# \u521d\u59cb\u5316\u6a21\u578b\u548c\u4f18\u5316\u5668\nmodel = SimpleModel()\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n\n# \u8bad\u7ec3\u6a21\u578b...\n# \u4fdd\u5b58 Checkpoint\ncheckpoint_path = 'model_checkpoint.pth'\ntorch.save({\n    'epoch': 10,\n    'model_state_dict': model.state_dict(),\n    'optimizer_state_dict': optimizer.state_dict(),\n    'loss': 0.02,\n}, checkpoint_path)\n
          "},{"location":"admin/baize/best-practice/checkpoint.html#pytorch-checkpoint_1","title":"PyTorch \u6062\u590d Checkpoint","text":"

          \u52a0\u8f7d\u6a21\u578b\u65f6\uff0c\u9700\u8981\u6062\u590d\u6a21\u578b\u53c2\u6570\u548c\u4f18\u5316\u5668\u72b6\u6001\uff0c\u5e76\u7ee7\u7eed\u8bad\u7ec3\u6216\u63a8\u7406\uff1a

          # \u6062\u590d Checkpoint\ncheckpoint = torch.load('model_checkpoint.pth')\nmodel.load_state_dict(checkpoint['model_state_dict'])\noptimizer.load_state_dict(checkpoint['optimizer_state_dict'])\nepoch = checkpoint['epoch']\nloss = checkpoint['loss']\n\n# \u7ee7\u7eed\u8bad\u7ec3\u6216\u63a8\u7406...\n
          • model_state_dict: \u6a21\u578b\u53c2\u6570
          • optimizer_state_dict: \u4f18\u5316\u5668\u72b6\u6001
          • epoch: \u5f53\u524d\u8bad\u7ec3\u8f6e\u6570
          • loss: \u635f\u5931\u503c
          • learning_rate: \u5b66\u4e60\u7387
          • best_accuracy: \u6700\u4f73\u51c6\u786e\u7387
          "},{"location":"admin/baize/best-practice/checkpoint.html#tensorflow","title":"TensorFlow","text":"

          TensorFlow \u63d0\u4f9b\u4e86 tf.train.Checkpoint \u7c7b\u6765\u7ba1\u7406\u6a21\u578b\u548c\u4f18\u5316\u5668\u7684\u4fdd\u5b58\u548c\u6062\u590d\u3002

          "},{"location":"admin/baize/best-practice/checkpoint.html#tensorflow-checkpoint","title":"TensorFlow \u4fdd\u5b58 Checkpoint","text":"

          \u4ee5\u4e0b\u662f\u4e00\u4e2a\u5728 TensorFlow \u4e2d\u4fdd\u5b58 Checkpoint \u7684\u793a\u4f8b\uff1a

          import tensorflow as tf\n\n# \u5047\u8bbe\u6211\u4eec\u6709\u4e00\u4e2a\u7b80\u5355\u7684\u6a21\u578b\nmodel = tf.keras.Sequential([\n    tf.keras.layers.Dense(2, input_shape=(10,))\n])\noptimizer = tf.keras.optimizers.Adam(learning_rate=0.001)\n\n# \u5b9a\u4e49 Checkpoint\ncheckpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\ncheckpoint_dir = './checkpoints'\ncheckpoint_prefix = f'{checkpoint_dir}/ckpt'\n\n# \u8bad\u7ec3\u6a21\u578b...\n# \u4fdd\u5b58 Checkpoint\ncheckpoint.save(file_prefix=checkpoint_prefix)\n

          Note

          \u4f7f\u7528 AI Lab \u7684\u7528\u6237\uff0c\u53ef\u4ee5\u76f4\u63a5\u5c06\u9ad8\u6027\u80fd\u5b58\u50a8\u6302\u8f7d\u4e3a Checkpoint \u76ee\u5f55\uff0c\u4ee5\u63d0\u9ad8 Checkpoint \u4fdd\u5b58\u548c\u6062\u590d\u7684\u901f\u5ea6\u3002

          "},{"location":"admin/baize/best-practice/checkpoint.html#tensorflow-checkpoint_1","title":"TensorFlow \u6062\u590d Checkpoint","text":"

          \u52a0\u8f7d Checkpoint \u5e76\u6062\u590d\u6a21\u578b\u548c\u4f18\u5316\u5668\u72b6\u6001\uff1a

          # \u6062\u590d Checkpoint\nlatest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)\ncheckpoint.restore(latest_checkpoint)\n\n# \u7ee7\u7eed\u8bad\u7ec3\u6216\u63a8\u7406...\n
          "},{"location":"admin/baize/best-practice/checkpoint.html#tensorflow-checkpoint_2","title":"TensorFlow \u5728\u5206\u5e03\u5f0f\u8bad\u7ec3\u7684 Checkpoint \u7ba1\u7406","text":"

          TensorFlow \u5728\u5206\u5e03\u5f0f\u8bad\u7ec3\u4e2d\u7ba1\u7406 Checkpoint \u7684\u4e3b\u8981\u65b9\u6cd5\u5982\u4e0b\uff1a

          • \u4f7f\u7528 tf.train.Checkpoint \u548c tf.train.CheckpointManager

            checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)\nmanager = tf.train.CheckpointManager(checkpoint, directory='/tmp/model', max_to_keep=3)\n
          • \u5728\u5206\u5e03\u5f0f\u7b56\u7565\u4e2d\u4fdd\u5b58 Checkpoint

            strategy = tf.distribute.MirroredStrategy()\nwith strategy.scope():\n    checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)\n    manager = tf.train.CheckpointManager(checkpoint, directory='/tmp/model', max_to_keep=3)\n
          • \u53ea\u5728\u4e3b\u8282\u70b9 (chief worker) \u4fdd\u5b58 Checkpoint

            if strategy.cluster_resolver.task_type == 'chief':\n    manager.save()\n
          • \u4f7f\u7528 MultiWorkerMirroredStrategy \u65f6\u7684\u7279\u6b8a\u5904\u7406

            strategy = tf.distribute.MultiWorkerMirroredStrategy()\nwith strategy.scope():\n    # \u6a21\u578b\u5b9a\u4e49\n    ...\n    checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)\n    manager = tf.train.CheckpointManager(checkpoint, '/tmp/model', max_to_keep=3)\n\ndef _chief_worker(task_type, task_id):\n    return task_type is None or task_type == 'chief' or (task_type == 'worker' and task_id == 0)\n\nif _chief_worker(strategy.cluster_resolver.task_type, strategy.cluster_resolver.task_id):\n    manager.save()\n
          • \u4f7f\u7528\u5206\u5e03\u5f0f\u6587\u4ef6\u7cfb\u7edf

            \u786e\u4fdd\u6240\u6709\u5de5\u4f5c\u8282\u70b9\u90fd\u80fd\u8bbf\u95ee\u5230\u540c\u4e00\u4e2a Checkpoint \u76ee\u5f55\uff0c\u901a\u5e38\u4f7f\u7528\u5206\u5e03\u5f0f\u6587\u4ef6\u7cfb\u7edf\u5982 HDFS \u6216 GCS\u3002

          • \u5f02\u6b65\u4fdd\u5b58

            \u4f7f\u7528 tf.keras.callbacks.ModelCheckpoint \u5e76\u8bbe\u7f6e save_freq \u53c2\u6570\u53ef\u4ee5\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u5f02\u6b65\u4fdd\u5b58 Checkpoint\u3002

          • Checkpoint \u6062\u590d

            status = checkpoint.restore(manager.latest_checkpoint)\nstatus.assert_consumed()  # (1)!\n
            1. \u786e\u4fdd\u6240\u6709\u53d8\u91cf\u90fd\u88ab\u6062\u590d
          • \u6027\u80fd\u4f18\u5316

            • \u4f7f\u7528 tf.train.experimental.enable_mixed_precision_graph_rewrite() \u542f\u7528\u6df7\u5408\u7cbe\u5ea6\u8bad\u7ec3
            • \u8c03\u6574\u4fdd\u5b58\u9891\u7387\uff0c\u907f\u514d\u8fc7\u4e8e\u9891\u7e41\u7684 I/O \u64cd\u4f5c
            • \u8003\u8651\u4f7f\u7528 tf.saved_model.save() \u4fdd\u5b58\u6574\u4e2a\u6a21\u578b\uff0c\u800c\u4e0d\u4ec5\u4ec5\u662f\u6743\u91cd
          "},{"location":"admin/baize/best-practice/checkpoint.html#_1","title":"\u6ce8\u610f\u4e8b\u9879","text":"
          1. \u5b9a\u671f\u4fdd\u5b58\uff1a\u6839\u636e\u8bad\u7ec3\u65f6\u95f4\u548c\u8d44\u6e90\u6d88\u8017\uff0c\u51b3\u5b9a\u5408\u9002\u7684\u4fdd\u5b58\u9891\u7387\u3002\u5982\u6bcf\u4e2a epoch \u6216\u6bcf\u9694\u4e00\u5b9a\u7684\u8bad\u7ec3\u6b65\u6570\u3002

          2. \u4fdd\u5b58\u591a\u4e2a Checkpoint\uff1a\u4fdd\u7559\u6700\u65b0\u7684\u51e0\u4e2a Checkpoint \u4ee5\u9632\u6b62\u6587\u4ef6\u635f\u574f\u6216\u4e0d\u9002\u7528\u7684\u60c5\u51b5\u3002

          3. \u8bb0\u5f55\u5143\u6570\u636e\uff1a\u5728 Checkpoint \u4e2d\u4fdd\u5b58\u989d\u5916\u7684\u4fe1\u606f\uff0c\u5982 epoch \u6570\u3001\u635f\u5931\u503c\u7b49\uff0c\u4ee5\u4fbf\u66f4\u597d\u5730\u6062\u590d\u8bad\u7ec3\u72b6\u6001\u3002

          4. \u4f7f\u7528\u7248\u672c\u63a7\u5236\uff1a\u4fdd\u5b58\u4e0d\u540c\u5b9e\u9a8c\u7684 Checkpoint\uff0c\u4fbf\u4e8e\u5bf9\u6bd4\u548c\u590d\u7528\u3002

          5. \u9a8c\u8bc1\u548c\u6d4b\u8bd5\uff1a\u5728\u8bad\u7ec3\u7684\u4e0d\u540c\u9636\u6bb5\u4f7f\u7528 Checkpoint \u8fdb\u884c\u9a8c\u8bc1\u548c\u6d4b\u8bd5\uff0c\u786e\u4fdd\u6a21\u578b\u6027\u80fd\u548c\u7a33\u5b9a\u6027\u3002

          "},{"location":"admin/baize/best-practice/checkpoint.html#_2","title":"\u7ed3\u8bba","text":"

          Checkpoint \u673a\u5236\u5728\u6df1\u5ea6\u5b66\u4e60\u8bad\u7ec3\u4e2d\u8d77\u5230\u4e86\u5173\u952e\u4f5c\u7528\u3002\u901a\u8fc7\u5408\u7406\u4f7f\u7528 PyTorch \u548c TensorFlow \u4e2d\u7684 Checkpoint \u529f\u80fd\uff0c \u53ef\u4ee5\u6709\u6548\u63d0\u9ad8\u8bad\u7ec3\u7684\u53ef\u9760\u6027\u548c\u6548\u7387\u3002\u5e0c\u671b\u672c\u6587\u6240\u8ff0\u7684\u65b9\u6cd5\u548c\u6700\u4f73\u5b9e\u8df5\u80fd\u5e2e\u52a9\u4f60\u66f4\u597d\u5730\u7ba1\u7406\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u7684\u8bad\u7ec3\u8fc7\u7a0b\u3002

          "},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html","title":"\u90e8\u7f72 NFS \u505a\u6570\u636e\u96c6\u9884\u70ed","text":"

          \u7f51\u7edc\u6587\u4ef6\u7cfb\u7edf (NFS) \u5141\u8bb8\u8fdc\u7a0b\u4e3b\u673a\u901a\u8fc7\u7f51\u7edc\u6302\u8f7d\u6587\u4ef6\uff0c\u5e76\u50cf\u672c\u5730\u6587\u4ef6\u7cfb\u7edf\u4e00\u6837\u8fdb\u884c\u4ea4\u4e92\u3002 \u8fd9\u4f7f\u7cfb\u7edf\u7ba1\u7406\u5458\u80fd\u591f\u5c06\u8d44\u6e90\u96c6\u4e2d\u5230\u7f51\u7edc\u670d\u52a1\u5668\u4e0a\u8fdb\u884c\u7ba1\u7406\u3002

          \u6570\u636e\u96c6 \u662f AI Lab \u4e2d\u7684\u6838\u5fc3\u6570\u636e\u7ba1\u7406\u529f\u80fd\uff0c\u5c06 MLOps \u751f\u547d\u5468\u671f\u4e2d\u5bf9\u4e8e\u6570\u636e\u7684\u4f9d\u8d56\u7edf\u4e00\u62bd\u8c61\u4e3a\u6570\u636e\u96c6\uff1b \u652f\u6301\u7528\u6237\u5c06\u5404\u7c7b\u6570\u636e\u7eb3\u7ba1\u5230\u6570\u636e\u96c6\u5185\uff0c\u4ee5\u4fbf\u8bad\u7ec3\u4efb\u52a1\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u6570\u636e\u96c6\u4e2d\u7684\u6570\u636e\u3002

          \u5f53\u8fdc\u7aef\u6570\u636e\u4e0d\u5728\u5de5\u4f5c\u96c6\u7fa4\u5185\u65f6\uff0c\u6570\u636e\u96c6\u63d0\u4f9b\u4e86\u81ea\u52a8\u8fdb\u884c\u9884\u70ed\u7684\u80fd\u529b\uff0c\u652f\u6301 Git\u3001S3\u3001HTTP \u7b49\u6570\u636e\u63d0\u524d\u9884\u70ed\u5230\u96c6\u7fa4\u672c\u5730\u3002

          \u6570\u636e\u96c6\u9700\u8981\u4e00\u4e2a\u652f\u6301 ReadWriteMany \u6a21\u5f0f\u7684\u5b58\u50a8\u670d\u52a1\u5bf9\u8fdc\u7aef\u6570\u636e\u8fdb\u884c\u9884\u70ed\uff0c\u63a8\u8350\u5728\u96c6\u7fa4\u5185\u90e8\u7f72 NFS\u3002

          \u672c\u6587\u4e3b\u8981\u4ecb\u7ecd\u4e86\u5982\u4f55\u5feb\u901f\u90e8\u7f72\u4e00\u4e2a NFS \u670d\u52a1\uff0c\u5e76\u5c06\u5176\u6dfb\u52a0\u4e3a\u96c6\u7fa4\u7684\u5b58\u50a8\u7c7b\u3002

          "},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html#_1","title":"\u51c6\u5907\u5de5\u4f5c","text":"
          • NFS \u9ed8\u8ba4\u4f7f\u7528\u8282\u70b9\u7684\u5b58\u50a8\u4f5c\u4e3a\u6570\u636e\u7f13\u5b58\u70b9\uff0c\u56e0\u6b64\u9700\u8981\u786e\u8ba4\u78c1\u76d8\u672c\u8eab\u6709\u8db3\u591f\u7684\u78c1\u76d8\u7a7a\u95f4\u3002
          • \u5b89\u88c5\u65b9\u5f0f\u4f7f\u7528 Helm \u4e0e Kubectl\uff0c\u8bf7\u786e\u4fdd\u5df2\u7ecf\u5b89\u88c5\u597d\u3002
          "},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html#_2","title":"\u90e8\u7f72\u8fc7\u7a0b","text":"

          \u4e00\u5171\u9700\u8981\u5b89\u88c5\u51e0\u4e2a\u7ec4\u4ef6\uff1a

          • NFS Server
          • csi-driver-nfs
          • StorageClass
          "},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html#_3","title":"\u521d\u59cb\u5316\u547d\u540d\u7a7a\u95f4","text":"

          \u6240\u6709\u7cfb\u7edf\u7ec4\u4ef6\u4f1a\u5b89\u88c5\u5230 nfs \u547d\u540d\u7a7a\u95f4\u5185\uff0c\u56e0\u6b64\u9700\u8981\u5148\u521b\u5efa\u6b64\u547d\u540d\u7a7a\u95f4\u3002

          kubectl create namespace nfs\n
          "},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html#nfs-server","title":"\u5b89\u88c5 NFS Server","text":"

          \u8fd9\u91cc\u662f\u4e00\u4e2a\u7b80\u5355\u7684 YAML \u90e8\u7f72\u6587\u4ef6\uff0c\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u3002

          Note

          \u6ce8\u610f\u68c0\u67e5 image:\uff0c\u6839\u636e\u96c6\u7fa4\u6240\u5728\u4f4d\u7f6e\u60c5\u51b5\uff0c\u53ef\u80fd\u9700\u8981\u4fee\u6539\u4e3a\u56fd\u5185\u955c\u50cf\u3002

          nfs-server.yaml
          kind: Service\napiVersion: v1\nmetadata:\n  name: nfs-server\n  namespace: nfs\n  labels:\n    app: nfs-server\nspec:\n  type: ClusterIP\n  selector:\n    app: nfs-server\n  ports:\n    - name: tcp-2049\n      port: 2049\n      protocol: TCP\n    - name: udp-111\n      port: 111\n      protocol: UDP\n---\nkind: Deployment\napiVersion: apps/v1\nmetadata:\n  name: nfs-server\n  namespace: nfs\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: nfs-server\n  template:\n    metadata:\n      name: nfs-server\n      labels:\n        app: nfs-server\n    spec:\n      nodeSelector:\n        \"kubernetes.io/os\": linux\n      containers:\n        - name: nfs-server\n          image: itsthenetwork/nfs-server-alpine:latest\n          env:\n            - name: SHARED_DIRECTORY\n              value: \"/exports\"\n          volumeMounts:\n            - mountPath: /exports\n              name: nfs-vol\n          securityContext:\n            privileged: true\n          ports:\n            - name: tcp-2049\n              containerPort: 2049\n              protocol: TCP\n            - name: udp-111\n              containerPort: 111\n              protocol: UDP\n      volumes:\n        - name: nfs-vol\n          hostPath:\n            path: /nfsdata  # (1)!\n            type: DirectoryOrCreate\n
          1. \u4fee\u6539\u6b64\u5904\u4ee5\u6307\u5b9a\u53e6\u4e00\u4e2a\u8def\u5f84\u6765\u5b58\u50a8 NFS \u5171\u4eab\u6570\u636e

          \u5c06\u4e0a\u8ff0 YAML \u4fdd\u5b58\u4e3a nfs-server.yaml\uff0c\u7136\u540e\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u8fdb\u884c\u90e8\u7f72\uff1a

          kubectl -n nfs apply -f nfs-server.yaml\n\n# \u68c0\u67e5\u90e8\u7f72\u7ed3\u679c\nkubectl -n nfs get pod,svc\n
          "},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html#csi-driver-nfs","title":"\u5b89\u88c5 csi-driver-nfs","text":"

          \u5b89\u88c5 csi-driver-nfs \u9700\u8981\u4f7f\u7528 Helm\uff0c\u8bf7\u6ce8\u610f\u63d0\u524d\u5b89\u88c5\u3002

          # \u6dfb\u52a0 Helm \u4ed3\u5e93\nhelm repo add csi-driver-nfs https://mirror.ghproxy.com/https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts\nhelm repo update csi-driver-nfs\n\n# \u90e8\u7f72 csi-driver-nfs\n# \u8fd9\u91cc\u53c2\u6570\u4e3b\u8981\u4f18\u5316\u4e86\u955c\u50cf\u5730\u5740\uff0c\u52a0\u901f\u56fd\u5185\u4e0b\u8f7d\nhelm upgrade --install csi-driver-nfs csi-driver-nfs/csi-driver-nfs \\\n    --set image.nfs.repository=k8s.m.daocloud.io/sig-storage/nfsplugin \\\n    --set image.csiProvisioner.repository=k8s.m.daocloud.io/sig-storage/csi-provisioner \\\n    --set image.livenessProbe.repository=k8s.m.daocloud.io/sig-storage/livenessprobe \\\n    --set image.nodeDriverRegistrar.repository=k8s.m.daocloud.io/sig-storage/csi-node-driver-registrar \\\n    --namespace nfs \\\n    --version v4.5.0\n

          Warning

          csi-nfs-controller \u7684\u955c\u50cf\u5e76\u672a\u5168\u90e8\u652f\u6301 helm \u53c2\u6570\uff0c\u9700\u8981\u624b\u5de5\u4fee\u6539 deployment \u7684 image \u5b57\u6bb5\u3002 \u5c06 image: registry.k8s.io \u6539\u4e3a image: k8s.dockerproxy.com \u4ee5\u52a0\u901f\u56fd\u5185\u4e0b\u8f7d\u3002

          "},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html#storageclass","title":"\u521b\u5efa StorageClass","text":"

          \u5c06\u4ee5\u4e0b YAML \u4fdd\u5b58\u4e3a nfs-sc.yaml\uff1a

          nfs-sc.yaml
          apiVersion: storage.k8s.io/v1\nkind: StorageClass\nmetadata:\n  name: nfs-csi\nprovisioner: nfs.csi.k8s.io\nparameters:\n  server: nfs-server.nfs.svc.cluster.local\n  share: /\n  # csi.storage.k8s.io/provisioner-secret is only needed for providing mountOptions in DeleteVolume\n  # csi.storage.k8s.io/provisioner-secret-name: \"mount-options\"\n  # csi.storage.k8s.io/provisioner-secret-namespace: \"default\"\nreclaimPolicy: Retain\nvolumeBindingMode: Immediate\nmountOptions:\n  - nfsvers=4.1\n

          \u7136\u540e\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u8fdb\u884c\u90e8\u7f72\uff1a

          kubectl apply -f nfs-sc.yaml\n
          "},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html#_4","title":"\u6d4b\u8bd5","text":"

          \u521b\u5efa\u6570\u636e\u96c6\uff0c\u5e76\u5c06\u6570\u636e\u96c6\u7684 \u5173\u8054\u5b58\u50a8\u7c7b \uff0c\u9884\u70ed\u65b9\u5f0f \u8bbe\u7f6e\u4e3a NFS\uff0c\u5373\u53ef\u5c06\u8fdc\u7aef\u6570\u636e\u9884\u70ed\u5230\u96c6\u7fa4\u5185\u3002

          \u6570\u636e\u96c6\u521b\u5efa\u6210\u529f\u540e\uff0c\u53ef\u4ee5\u770b\u5230\u6570\u636e\u96c6\u7684\u72b6\u6001\u4e3a \u9884\u70ed\u4e2d\uff0c\u7b49\u5f85\u9884\u70ed\u5b8c\u6210\u540e\u5373\u53ef\u4f7f\u7528\u3002

          "},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html#_5","title":"\u5e38\u89c1\u95ee\u9898","text":""},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html#nfs-sbinmount","title":"\u7f3a\u5c11\u5fc5\u8981\u7684 NFS \u5ba2\u6237\u7aef\u8f6f\u4ef6 /sbin/mount","text":"
          bad option; for several filesystems (e.g. nfs, cifs) you might need a /sbin/mount.<type> helper program.\n

          \u5728\u8fd0\u884c Kubernetes \u7684\u8282\u70b9\u673a\u5668\u4e0a\uff0c\u786e\u4fdd\u5df2\u5b89\u88c5 NFS \u5ba2\u6237\u7aef\uff1a

          Ubuntu/DebianCentOS/RHEL

          \u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u5b89\u88c5 NFS \u5ba2\u6237\u7aef\uff1a

          sudo apt-get update\nsudo apt-get install nfs-common\n

          \u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u5b89\u88c5 NFS \u5ba2\u6237\u7aef\uff1a

          sudo yum install nfs-utils\n

          \u68c0\u67e5 NFS \u670d\u52a1\u5668\u914d\u7f6e\uff0c\u786e\u4fdd NFS \u670d\u52a1\u5668\u6b63\u5728\u8fd0\u884c\u4e14\u914d\u7f6e\u6b63\u786e\u3002\u4f60\u53ef\u4ee5\u5c1d\u8bd5\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u624b\u52a8\u6302\u8f7d\u6765\u6d4b\u8bd5\uff1a

          sudo mkdir -p /mnt/test\nsudo mount -t nfs <nfs-server>:/nfsdata /mnt/test\n
          "},{"location":"admin/baize/best-practice/finetunel-llm.html","title":"\u4f7f\u7528 AI Lab \u5fae\u8c03 ChatGLM3 \u6a21\u578b","text":"

          \u672c\u6587\u4ee5 ChatGLM3 \u6a21\u578b\u4e3a\u4f8b\uff0c\u6f14\u793a\u5982\u4f55\u5728 AI Lab \u4e2d\u4f7f\u7528 LoRA\uff08Low-Rank Adaptation\uff0c\u4f4e\u79e9\u81ea\u9002\u5e94\uff09\u5fae\u8c03 ChatGLM3 \u6a21\u578b\u3002 Demo \u7a0b\u5e8f\u6765\u81ea ChatGLM3 \u5b98\u65b9\u6848\u4f8b\u3002

          \u5fae\u8c03\u7684\u5927\u81f4\u6d41\u7a0b\u4e3a\uff1a

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#_1","title":"\u73af\u5883\u4f9d\u8d56","text":"
          • GPU \u663e\u5b58\u81f3\u5c11 20GB\uff0c\u63a8\u8350\u4f7f\u7528 RTX4090\u3001NVIDIA A/H \u7cfb\u5217\u663e\u5361
          • \u53ef\u7528\u78c1\u76d8\u7a7a\u95f4\u81f3\u5c11 200GB
          • CPU \u81f3\u5c11 8 \u6838\uff0c\u63a8\u8350 16 \u6838
          • \u5185\u5b58 64GB\uff0c\u63a8\u8350 128GB

          Info

          \u5728\u5f00\u59cb\u4f53\u9a8c\u4e4b\u524d\uff0c\u8bf7\u68c0\u67e5 AI \u7b97\u529b\u5e73\u53f0\u4ee5\u53ca AI Lab \u90e8\u7f72\u6b63\u786e\uff0cGPU \u961f\u5217\u8d44\u6e90\u521d\u59cb\u5316\u6210\u529f\uff0c\u4e14\u7b97\u529b\u8d44\u6e90\u5145\u8db3\u3002

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#_2","title":"\u6570\u636e\u51c6\u5907","text":"

          \u5229\u7528 AI Lab \u63d0\u4f9b\u7684\u6570\u636e\u96c6\u7ba1\u7406\u529f\u80fd\uff0c\u5feb\u901f\u5c06\u5fae\u8c03\u5927\u6a21\u578b\u6240\u9700\u7684\u6570\u636e\u8fdb\u884c\u9884\u70ed\u53ca\u6301\u4e45\u5316\uff0c\u51cf\u5c11\u56e0\u4e3a\u51c6\u5907\u6570\u636e\u5bfc\u81f4\u7684 GPU \u8d44\u6e90\u5360\u7528\uff0c\u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u6548\u7387\u3002

          \u5728\u6570\u636e\u96c6\u5217\u8868\u9875\u9762\uff0c\u521b\u5efa\u9700\u8981\u7684\u6570\u636e\u8d44\u6e90\uff0c\u8fd9\u4e9b\u8d44\u6e90\u5305\u542b\u4e86 ChatGLM3 \u4ee3\u7801\uff0c\u4e5f\u53ef\u4ee5\u662f\u6570\u636e\u6587\u4ef6\uff0c\u6240\u6709\u8fd9\u4e9b\u6570\u636e\u90fd\u53ef\u4ee5\u901a\u8fc7\u6570\u636e\u96c6\u5217\u8868\u6765\u7edf\u4e00\u7ba1\u7406\u3002

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#_3","title":"\u4ee3\u7801\u53ca\u6a21\u578b\u6587\u4ef6","text":"

          ChatGLM3 \u662f\u667a\u8c31 AI \u548c\u6e05\u534e\u5927\u5b66 KEG \u5b9e\u9a8c\u5ba4\u8054\u5408\u53d1\u5e03\u7684\u5bf9\u8bdd\u9884\u8bad\u7ec3\u6a21\u578b\u3002

          \u5148\u62c9\u53d6 ChatGLM3 \u4ee3\u7801\u4ed3\u5e93\uff0c\u4e0b\u8f7d\u9884\u8bad\u7ec3\u6a21\u578b\uff0c\u7528\u4e8e\u540e\u7eed\u7684\u5fae\u8c03\u4efb\u52a1\u3002

          AI Lab \u4f1a\u5728\u540e\u53f0\u8fdb\u884c\u5168\u81ea\u52a8\u6570\u636e\u9884\u70ed\uff0c\u4ee5\u4fbf\u540e\u7eed\u7684\u4efb\u52a1\u80fd\u591f\u5feb\u901f\u8bbf\u95ee\u6570\u636e\u3002

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#advertisegen","title":"AdvertiseGen \u6570\u636e\u96c6","text":"

          \u56fd\u5185\u6570\u636e\u53ef\u4ee5\u4ece Tsinghua Cloud \u76f4\u63a5\u83b7\u53d6\uff0c\u8fd9\u91cc\u4f7f\u7528 HTTP \u7684\u6570\u636e\u6e90\u65b9\u5f0f\u3002

          \u6ce8\u610f\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u9700\u8981\u7b49\u5f85\u6570\u636e\u96c6\u9884\u70ed\u5b8c\u6210\uff0c\u4e00\u822c\u5f88\u5feb\uff0c\u6839\u636e\u60a8\u7684\u7f51\u7edc\u60c5\u51b5\u800c\u5b9a\u3002

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#_4","title":"\u5fae\u8c03\u8f93\u51fa\u6570\u636e","text":"

          \u540c\u65f6\uff0c\u60a8\u9700\u8981\u51c6\u5907\u4e00\u4e2a\u7a7a\u7684\u6570\u636e\u96c6\uff0c\u7528\u4e8e\u5b58\u653e\u5fae\u8c03\u4efb\u52a1\u5b8c\u6210\u540e\u8f93\u51fa\u7684\u6a21\u578b\u6587\u4ef6\uff0c\u8fd9\u91cc\u521b\u5efa\u4e00\u4e2a\u7a7a\u7684\u6570\u636e\u96c6\uff0c\u4ee5 PVC \u4e3a\u4f8b\u3002

          Warning

          \u6ce8\u610f\u9700\u8981\u4f7f\u7528\u652f\u6301 ReadWriteMany \u7684\u5b58\u50a8\u7c7b\u578b\uff0c\u4ee5\u4fbf\u540e\u7eed\u7684\u4efb\u52a1\u80fd\u591f\u5feb\u901f\u8bbf\u95ee\u6570\u636e\u3002

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#_5","title":"\u73af\u5883\u51c6\u5907","text":"

          \u5bf9\u4e8e\u6a21\u578b\u5f00\u53d1\u8005\u6765\u8bf4\uff0c\u51c6\u5907\u6a21\u578b\u5f00\u53d1\u9700\u8981\u7684 Python \u73af\u5883\u4f9d\u8d56\u662f\u975e\u5e38\u91cd\u8981\u7684\uff0c\u4f20\u7edf\u505a\u6cd5\u5c06\u73af\u5883\u4f9d\u8d56\u76f4\u63a5\u6253\u5305\u5230\u5f00\u53d1\u5de5\u5177\u7684\u955c\u50cf\u4e2d\uff0c \u6216\u8005\u76f4\u63a5\u5728\u672c\u5730\u73af\u5883\u4e2d\u5b89\u88c5\uff0c\u4f46\u662f\u8fd9\u6837\u505a\u4f1a\u5bfc\u81f4\u73af\u5883\u4f9d\u8d56\u7684\u4e0d\u4e00\u81f4\uff0c\u800c\u4e14\u4e0d\u5229\u4e8e\u73af\u5883\u7684\u7ba1\u7406\u548c\u4f9d\u8d56\u66f4\u65b0\u53ca\u540c\u6b65\u3002

          AI Lab \u63d0\u4f9b\u4e86\u73af\u5883\u7ba1\u7406\u7684\u80fd\u529b\uff0c\u5c06 Python \u73af\u5883\u4f9d\u8d56\u5305\u7ba1\u7406\u548c\u5f00\u53d1\u5de5\u5177\u3001\u4efb\u52a1\u955c\u50cf\u7b49\u8fdb\u884c\u89e3\u8026\uff0c\u89e3\u51b3\u4e86\u4f9d\u8d56\u7ba1\u7406\u6df7\u4e71\uff0c\u73af\u5883\u4e0d\u4e00\u81f4\u7b49\u95ee\u9898\u3002

          \u8fd9\u91cc\u4f7f\u7528 AI Lab \u63d0\u4f9b\u7684\u73af\u5883\u7ba1\u7406\u529f\u80fd\uff0c\u521b\u5efa ChatGLM3 \u5fae\u8c03\u6240\u9700\u7684\u73af\u5883\uff0c\u4ee5\u5907\u540e\u7eed\u4f7f\u7528\u3002

          Warning

          1. ChatGLM \u4ed3\u5e93\u5185\u6709 requirements.txt \u6587\u4ef6\uff0c\u91cc\u9762\u5305\u542b\u4e86 ChatGLM3 \u5fae\u8c03\u6240\u9700\u7684\u73af\u5883\u4f9d\u8d56
          2. \u672c\u6b21\u5fae\u8c03\u6ca1\u6709\u7528\u5230 deepspeed \u548c mpi4py \u5305\uff0c\u5efa\u8bae\u4ece requirements.txt \u6587\u4ef6\u4e2d\u5c06\u5176\u6ce8\u91ca\u6389\uff0c\u5426\u5219\u53ef\u80fd\u51fa\u73b0\u5305\u7f16\u8bd1\u4e0d\u901a\u8fc7\u7684\u60c5\u51b5

          \u5728\u73af\u5883\u7ba1\u7406\u5217\u8868\uff0c\u60a8\u53ef\u4ee5\u5feb\u901f\u521b\u5efa\u4e00\u4e2a Python \u73af\u5883\uff0c\u5e76\u901a\u8fc7\u7b80\u5355\u7684\u8868\u5355\u914d\u7f6e\u6765\u5b8c\u6210\u73af\u5883\u7684\u521b\u5efa\uff1b\u8fd9\u91cc\u9700\u8981\u4e00\u4e2a Python 3.11.x \u73af\u5883\uff0c

          \u56e0\u4e3a\u672c\u5b9e\u9a8c\u9700\u8981\u4f7f\u7528 CUDA\uff0c\u6240\u4ee5\u5728\u8fd9\u91cc\u9700\u8981\u914d\u7f6e GPU \u8d44\u6e90\uff0c\u7528\u4e8e\u9884\u70ed\u9700\u8981\u8d44\u6e90\u7684\u4f9d\u8d56\u5e93\u3002

          \u521b\u5efa\u73af\u5883\uff0c\u9700\u8981\u53bb\u4e0b\u8f7d\u4e00\u7cfb\u5217\u7684 Python \u4f9d\u8d56\uff0c\u6839\u636e\u60a8\u7684\u5b9e\u9645\u4f4d\u7f6e\u4e0d\u540c\uff0c\u53ef\u80fd\u4f1a\u6709\u4e0d\u540c\u7684\u4e0b\u8f7d\u901f\u5ea6\uff0c\u8fd9\u91cc\u4f7f\u7528\u4e86\u56fd\u5185\u7684\u955c\u50cf\u52a0\u901f\uff0c\u53ef\u4ee5\u52a0\u5feb\u4e0b\u8f7d\u901f\u5ea6\u3002

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#notebook-ide","title":"\u4f7f\u7528 Notebook \u4f5c\u4e3a IDE","text":"

          AI Lab \u63d0\u4f9b\u4e86 Notebook \u4f5c\u4e3a IDE \u7684\u529f\u80fd\uff0c\u53ef\u4ee5\u8ba9\u7528\u6237\u5728\u6d4f\u89c8\u5668\u4e2d\u76f4\u63a5\u7f16\u5199\u4ee3\u7801\uff0c\u8fd0\u884c\u4ee3\u7801\uff0c\u67e5\u770b\u4ee3\u7801\u8fd0\u884c\u7ed3\u679c\uff0c\u975e\u5e38\u9002\u5408\u4e8e\u6570\u636e\u5206\u6790\u3001\u673a\u5668\u5b66\u4e60\u3001\u6df1\u5ea6\u5b66\u4e60\u7b49\u9886\u57df\u7684\u5f00\u53d1\u3002

          \u60a8\u53ef\u4ee5\u4f7f\u7528 AI Lab \u63d0\u4f9b\u7684 JupyterLab Notebook \u6765\u8fdb\u884c ChatGLM3 \u7684\u5fae\u8c03\u4efb\u52a1\u3002

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#jupyterlab-notebook","title":"\u521b\u5efa JupyterLab Notebook","text":"

          \u5728 Notebook \u5217\u8868\u4e2d\uff0c\u53ef\u4ee5\u6839\u636e\u9875\u9762\u64cd\u4f5c\u6307\u5f15\uff0c\u521b\u5efa\u4e00\u4e2a Notebook\u3002\u6ce8\u610f\u60a8\u9700\u8981\u6839\u636e\u524d\u6587\u63d0\u5230\u7684\u8d44\u6e90\u8981\u6c42\u6765\u914d\u7f6e\u5bf9\u5e94\u7684 Notebook \u8d44\u6e90\u53c2\u6570\uff0c \u907f\u514d\u540e\u7eed\u56e0\u4e3a\u8d44\u6e90\u95ee\u9898\uff0c\u5f71\u54cd\u5fae\u8c03\u8fc7\u7a0b\u3002

          Note

          \u5728\u521b\u5efa Notebook \u65f6\uff0c\u53ef\u4ee5\u5c06\u4e4b\u524d\u9884\u52a0\u8f7d\u7684\u6a21\u578b\u4ee3\u7801\u6570\u636e\u96c6\u548c\u73af\u5883\uff0c\u76f4\u63a5\u6302\u8f7d\u5230 Notebook \u4e2d\uff0c\u6781\u5927\u8282\u7701\u4e86\u6570\u636e\u51c6\u5907\u7684\u65f6\u95f4\u3002

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#_6","title":"\u6302\u8f7d\u6570\u636e\u96c6\u548c\u4ee3\u7801","text":"

          \u6ce8\u610f\uff1aChatGLM3 \u7684\u4ee3\u7801\u6587\u4ef6\u6302\u8f7d\u5230\u4e86 /home/jovyan/ChatGLM3 \u76ee\u5f55\u4e0b\uff0c\u540c\u65f6\u60a8\u4e5f\u9700\u8981\u5c06 AdvertiseGen \u6570\u636e\u96c6\u6302\u8f7d\u5230 /home/jovyan/ChatGLM3/finetune_demo/data/AdvertiseGen \u76ee\u5f55\u4e0b\uff0c\u4ee5\u4fbf\u540e\u7eed\u7684\u5fae\u8c03\u4efb\u52a1\u80fd\u591f\u8bbf\u95ee\u6570\u636e\u3002

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#pvc","title":"\u6302\u8f7d PVC \u5230\u6a21\u578b\u8f93\u51fa\u6587\u4ef6\u5939","text":"

          \u672c\u6b21\u4f7f\u7528\u7684\u6a21\u578b\u8f93\u51fa\u4f4d\u7f6e\u5728 /home/jovyan/ChatGLM3/finetune_demo/output \u76ee\u5f55\u4e0b\uff0c\u53ef\u4ee5\u5c06\u4e4b\u524d\u521b\u5efa\u7684 PVC \u6570\u636e\u96c6\u6302\u8f7d\u5230\u8fd9\u4e2a\u76ee\u5f55\u4e0b\uff0c \u8fd9\u6837\u8bad\u7ec3\u8f93\u51fa\u7684\u6a21\u578b\u5c31\u53ef\u4ee5\u4fdd\u5b58\u5230\u6570\u636e\u96c6\u4e2d\uff0c\u540e\u7eed\u6a21\u578b\u63a8\u7406\u7b49\u4efb\u52a1\u53ef\u4ee5\u76f4\u63a5\u8bbf\u95ee\u3002

          \u521b\u5efa\u5b8c\u6210\u540e\uff0c\u53ef\u4ee5\u770b\u5230 Notebook \u7684\u754c\u9762\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u5728 Notebook \u4e2d\u7f16\u5199\u4ee3\u7801\uff0c\u8fd0\u884c\u4ee3\u7801\uff0c\u67e5\u770b\u4ee3\u7801\u8fd0\u884c\u7ed3\u679c\u3002

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#chatglm3","title":"\u5fae\u8c03 ChatGLM3","text":"

          \u5f53\u60a8\u8fdb\u5165\u5230 Notebook \u4e2d\u540e\uff0c\u53ef\u4ee5\u5728 Notebook \u4fa7\u8fb9\u680f\u4f1a\u53d1\u73b0\u6709\u4e00\u4e2a File Browser \u7684\u9009\u9879\uff0c\u53ef\u4ee5\u770b\u5230\u4e4b\u524d\u6302\u8f7d\u7684\u6570\u636e\u96c6\u548c\u4ee3\u7801\uff0c\u5728\u8fd9\u91cc\u627e\u5230 ChatGLM3 \u7684\u6587\u4ef6\u5939\u3002

          \u60a8\u53ef\u4ee5\u770b\u5230 ChatGLM3 \u7684\u5fae\u8c03\u4ee3\u7801\u5728 finetune_demo \u6587\u4ef6\u5939\u4e2d\uff0c\u8fd9\u91cc\u53ef\u4ee5\u76f4\u63a5\u6253\u5f00 lora_finetune.ipynb \u6587\u4ef6\uff0c\u8fd9\u662f ChatGLM3 \u7684\u5fae\u8c03\u4ee3\u7801\u3002

          \u9996\u5148\uff0c\u6839\u636e README.md \u7684\u8bf4\u660e\uff0c\u60a8\u53ef\u4ee5\u4e86\u89e3\u5230\u6574\u4e2a\u5fae\u8c03\u7684\u8fc7\u7a0b\uff0c\u5efa\u8bae\u5148\u9605\u8bfb\u4e00\u904d\uff0c\u786e\u4fdd\u57fa\u7840\u7684\u73af\u5883\u4f9d\u8d56\u548c\u6570\u636e\u51c6\u5907\u5de5\u4f5c\u90fd\u5df2\u7ecf\u5b8c\u6210\u3002

          \u6253\u5f00\u7ec8\u7aef\uff0c\u5e76\u4f7f\u7528 conda \u5207\u6362\u5230\u60a8\u63d0\u524d\u9884\u70ed\u7684\u73af\u5883\u4e2d\uff0c\u6b64\u73af\u5883\u4e0e JupyterLab Kernel \u4fdd\u6301\u4e00\u81f4\uff0c\u4ee5\u4fbf\u540e\u7eed\u7684\u4ee3\u7801\u8fd0\u884c\u3002

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#_7","title":"\u6570\u636e\u9884\u5904\u7406","text":"

          \u9996\u5148\uff0c\u60a8\u9700\u8981\u5c06 AdvertiseGen \u6570\u636e\u96c6\u8fdb\u884c\u9884\u5904\u7406\uff0c\u5bf9\u6570\u636e\u8fdb\u884c\u6807\u51c6\u5316\u5904\u7406\uff0c\u4f7f\u5176\u7b26\u5408 Lora \u9884\u8bad\u7ec3\u7684\u6807\u51c6\u683c\u5f0f\u8981\u6c42\uff1b \u8fd9\u91cc\u5c06\u5904\u7406\u540e\u7684\u6570\u636e\u4fdd\u5b58\u5230 AdvertiseGen_fix \u6587\u4ef6\u5939\u4e2d\u3002

          import json\nfrom typing import Union\nfrom pathlib import Path\n\ndef _resolve_path(path: Union[str, Path]) -> Path:\n    return Path(path).expanduser().resolve()\n\ndef _mkdir(dir_name: Union[str, Path]):\n    dir_name = _resolve_path(dir_name)\n    if not dir_name.is_dir():\n        dir_name.mkdir(parents=True, exist_ok=False)\n\ndef convert_adgen(data_dir: Union[str, Path], save_dir: Union[str, Path]):\n    def _convert(in_file: Path, out_file: Path):\n        _mkdir(out_file.parent)\n        with open(in_file, encoding='utf-8') as fin:\n            with open(out_file, 'wt', encoding='utf-8') as fout:\n                for line in fin:\n                    dct = json.loads(line)\n                    sample = {'conversations': [{'role': 'user', 'content': dct['content']},\n                                                {'role': 'assistant', 'content': dct['summary']}]}\n                    fout.write(json.dumps(sample, ensure_ascii=False) + '\\n')\n\n    data_dir = _resolve_path(data_dir)\n    save_dir = _resolve_path(save_dir)\n\n    train_file = data_dir / 'train.json'\n    if train_file.is_file():\n        out_file = save_dir / train_file.relative_to(data_dir)\n        _convert(train_file, out_file)\n\n    dev_file = data_dir / 'dev.json'\n    if dev_file.is_file():\n        out_file = save_dir / dev_file.relative_to(data_dir)\n        _convert(dev_file, out_file)\n\nconvert_adgen('data/AdvertiseGen', 'data/AdvertiseGen_fix')\n

          \u4e3a\u4e86\u8282\u7701\u8c03\u8bd5\u7684\u65f6\u95f4\uff0c\u60a8\u53ef\u4ee5\u5c06 /home/jovyan/ChatGLM3/finetune_demo/data/AdvertiseGen_fix/dev.json \u4e2d\u7684\u6570\u636e\u91cf\u7f29\u51cf\u5230 50 \u6761\uff0c\u8fd9\u91cc\u7684\u6570\u636e\u662f JSON \u683c\u5f0f\uff0c\u5904\u7406\u8d77\u6765\u4e5f\u662f\u6bd4\u8f83\u65b9\u4fbf\u7684\u3002

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#lora","title":"\u672c\u5730 LoRA \u5fae\u8c03\u6d4b\u8bd5","text":"

          \u5b8c\u6210\u6570\u636e\u7684\u9884\u5904\u7406\u4e4b\u540e\uff0c\u57fa\u672c\u4e0a\u60a8\u5c31\u53ef\u4ee5\u76f4\u63a5\u5fae\u8c03\u6d4b\u8bd5\u4e86\uff0c\u53ef\u4ee5\u5728 /home/jovyan/ChatGLM3/finetune_demo/configs/lora.yaml \u6587\u4ef6\u4e2d\u914d\u7f6e\u5fae\u8c03\u7684\u53c2\u6570\uff0c\u4e00\u822c\u9700\u8981\u5173\u6ce8\u7684\u53c2\u6570\u57fa\u672c\u5982\u4e0b\uff1a

          \u65b0\u5f00\u4e00\u4e2a\u7ec8\u7aef\u7a97\u53e3\uff0c\u4f7f\u7528\u5982\u4e0b\u547d\u4ee4\u5373\u53ef\u8fdb\u884c\u672c\u5730\u5fae\u8c03\u6d4b\u8bd5\uff0c\u8bf7\u786e\u4fdd\u53c2\u6570\u914d\u7f6e\u548c\u8def\u5f84\u6b63\u786e\uff1a

          !CUDA_VISIBLE_DEVICES=0 NCCL_P2P_DISABLE=\"1\" NCCL_IB_DISABLE=\"1\" python finetune_hf.py  data/AdvertiseGen_fix  ./chatglm3-6b  configs/lora.yaml\n

          \u5728\u8fd9\u6761\u547d\u4ee4\u4e2d\uff0c

          • finetune_hf.py \u662f ChatGLM3 \u4ee3\u7801\u4e2d\u7684\u5fae\u8c03\u811a\u672c
          • data/AdvertiseGen_fix \u662f\u60a8\u9884\u5904\u7406\u540e\u7684\u6570\u636e\u96c6
          • ./chatglm3-6b \u662f\u60a8\u9884\u8bad\u7ec3\u6a21\u578b\u7684\u8def\u5f84
          • configs/lora.yaml \u662f\u5fae\u8c03\u7684\u914d\u7f6e\u6587\u4ef6

          \u5fae\u8c03\u8fc7\u7a0b\u4e2d\u53ef\u4ee5\u4f7f\u7528 nvidia-smi \u547d\u4ee4\u67e5\u770b GPU \u663e\u5b58\u4f7f\u7528\u60c5\u51b5\uff1a

          \u5728\u5fae\u8c03\u5b8c\u6210\u540e\uff0c\u5728 finetune_demo \u76ee\u5f55\u4e0b\u4f1a\u751f\u6210\u4e00\u4e2a output \u76ee\u5f55\uff0c\u91cc\u9762\u5305\u542b\u4e86\u5fae\u8c03\u7684\u6a21\u578b\u6587\u4ef6\uff0c \u8fd9\u6837\u5fae\u8c03\u7684\u6a21\u578b\u6587\u4ef6\u5c31\u76f4\u63a5\u4fdd\u5b58\u5230\u60a8\u4e4b\u524d\u521b\u5efa\u7684 PVC \u6570\u636e\u96c6\u4e2d\u4e86\u3002

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#_8","title":"\u5fae\u8c03\u4efb\u52a1\u63d0\u4ea4","text":"

          \u5728\u672c\u5730\u5fae\u8c03\u6d4b\u8bd5\u5b8c\u6210\u540e\uff0c\u786e\u4fdd\u60a8\u7684\u4ee3\u7801\u548c\u6570\u636e\u6ca1\u6709\u95ee\u9898\uff0c\u63a5\u4e0b\u6765\u53ef\u4ee5\u5c06\u5fae\u8c03\u4efb\u52a1\u63d0\u4ea4\u5230AI Lab \u4e2d\uff0c\u8fdb\u884c\u5927\u89c4\u6a21\u7684\u8bad\u7ec3\u548c\u5fae\u8c03\u4efb\u52a1\u3002

          \u8fd9\u4e5f\u662f\u63a8\u8350\u7684\u6a21\u578b\u5f00\u53d1\u548c\u5fae\u8c03\u6d41\u7a0b\uff0c\u5148\u5728\u672c\u5730\u8fdb\u884c\u5fae\u8c03\u6d4b\u8bd5\uff0c\u786e\u4fdd\u4ee3\u7801\u548c\u6570\u636e\u6ca1\u6709\u95ee\u9898\u3002

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#_9","title":"\u4f7f\u7528\u754c\u9762\u63d0\u4ea4\u5fae\u8c03\u4efb\u52a1","text":"

          \u8fd9\u91cc\u4f7f\u7528 Pytorch \u6765\u521b\u5efa\u5fae\u8c03\u4efb\u52a1\uff0c\u6839\u636e\u60a8\u7684\u5b9e\u9645\u60c5\u51b5\uff0c\u9009\u62e9\u9700\u8981\u4f7f\u7528\u54ea\u4e2a\u96c6\u7fa4\u7684\u8d44\u6e90\uff0c\u6ce8\u610f\u9700\u8981\u6ee1\u8db3\u524d\u9762\u8d44\u6e90\u51c6\u5907\u4e2d\u63d0\u53ca\u7684\u8d44\u6e90\u8981\u6c42\u3002

          • \u955c\u50cf\uff1a\u53ef\u76f4\u63a5\u4f7f\u7528 baizectl \u63d0\u4f9b\u7684\u6a21\u578b\u955c\u50cf
          • \u542f\u52a8\u547d\u4ee4\uff0c\u6839\u636e\u60a8\u5728 Notebook \u4e2d\u4f7f\u7528 LoRA \u5fae\u8c03\u7684\u7ecf\u9a8c\uff0c\u4ee3\u7801\u6587\u4ef6\u548c\u6570\u636e\u5728 /home/jovyan/ChatGLM3/finetune_demo \u76ee\u5f55\u4e0b\uff0c\u6240\u4ee5\u60a8\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u8fd9\u4e2a\u8def\u5f84\uff1a

            bash -c \"cd /home/jovyan/ChatGLM3/finetune_demo && CUDA_VISIBLE_DEVICES=0 NCCL_P2P_DISABLE=\"1\" NCCL_IB_DISABLE=\"1\" python finetune_hf.py  data/AdvertiseGen_fix  ./chatglm3-6b  configs/lora.yaml\"\n
          • \u6302\u8f7d\u73af\u5883\uff0c\u8fd9\u6837\u4e4b\u524d\u9884\u52a0\u8f7d\u7684\u73af\u5883\u4f9d\u8d56\u4e0d\u4ec5\u53ef\u4ee5\u5728 Notebook \u4e2d\u4f7f\u7528\uff0c\u540c\u65f6\u4e5f\u53ef\u4ee5\u5728\u4efb\u52a1\u4e2d\u4f7f\u7528

          • \u6570\u636e\u96c6\uff1a\u76f4\u63a5\u4f7f\u7528\u4e4b\u524d\u9884\u70ed\u7684\u6570\u636e\u96c6
            • \u5c06\u6a21\u578b\u8f93\u51fa\u8def\u5f84\u8bbe\u7f6e\u4e3a\u4e4b\u524d\u521b\u5efa\u7684 PVC \u6570\u636e\u96c6
            • \u5c06 AdvertiseGen \u6570\u636e\u96c6\u6302\u8f7d\u5230 /home/jovyan/ChatGLM3/finetune_demo/data/AdvertiseGen \u76ee\u5f55\u4e0b
          • \u914d\u7f6e\u8db3\u591f\u7684 GPU \u8d44\u6e90\uff0c\u786e\u4fdd\u5fae\u8c03\u4efb\u52a1\u80fd\u591f\u6b63\u5e38\u8fd0\u884c

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#_10","title":"\u67e5\u770b\u4efb\u52a1\u72b6\u6001","text":"

          \u4efb\u52a1\u6210\u529f\u63d0\u4ea4\u540e\uff0c\u60a8\u53ef\u4ee5\u5728\u4efb\u52a1\u5217\u8868\u4e2d\u5b9e\u65f6\u67e5\u770b\u4efb\u52a1\u7684\u8bad\u7ec3\u8fdb\u5c55\uff0c\u8fd9\u91cc\u60a8\u53ef\u4ee5\u770b\u5230\u4efb\u52a1\u7684\u72b6\u6001\u3001\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u3001\u65e5\u5fd7\u7b49\u4fe1\u606f\u3002

          \u67e5\u770b\u4efb\u52a1\u65e5\u5fd7

          \u4efb\u52a1\u8fd0\u884c\u5b8c\u6210\u540e\uff0c\u60a8\u53ef\u4ee5\u5728\u6570\u636e\u8f93\u51fa\u7684\u6570\u636e\u96c6\u4e2d\u67e5\u770b\u5fae\u8c03\u7684\u6a21\u578b\u6587\u4ef6\uff0c\u8fd9\u6837\u5c31\u53ef\u4ee5\u4f7f\u7528\u8fd9\u4e2a\u6a21\u578b\u6587\u4ef6\u8fdb\u884c\u540e\u7eed\u7684\u63a8\u7406\u4efb\u52a1\u3002

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#baizectl","title":"\u4f7f\u7528 baizectl \u63d0\u4ea4\u4efb\u52a1","text":"

          AI Lab \u7684 Notebook \u652f\u6301\u514d\u8ba4\u8bc1\u76f4\u63a5\u4f7f\u7528 baizectl \u547d\u4ee4\u884c\u5de5\u5177\uff0c \u5982\u679c\u60a8\u559c\u6b22\u4f7f\u7528 CLI\uff0c\u90a3\u4e48\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528 baizectl \u63d0\u4f9b\u7684\u547d\u4ee4\u884c\u5de5\u5177\uff0c\u63d0\u4ea4\u4efb\u52a1\u3002

          baizectl job submit --name finetunel-chatglm3 -t PYTORCH \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --resources cpu=8,memory=16Gi,nvidia.com/gpu=1 \\\n    --workers 1 \\\n    --queue default \\\n    --working-dir /home/jovyan/ChatGLM3 \\\n    --datasets AdvertiseGen:/home/jovyan/ChatGLM3/finetune_demo/data/AdvertiseGen  \\\n    --datasets output:/home/jovyan/ChatGLM3/finetune_demo/output  \\\n    --labels job_type=pytorch \\\n    --restart-policy on-failure \\\n    -- bash -c \"cd /home/jovyan/ChatGLM3/finetune_demo && CUDA_VISIBLE_DEVICES=0 NCCL_P2P_DISABLE=\"1\" NCCL_IB_DISABLE=\"1\" python finetune_hf.py  data/AdvertiseGen_fix  ./chatglm3-6b  configs/lora.yaml\"\n

          \u5982\u679c\u5e0c\u671b\u4e86\u89e3\u66f4\u591a baizectl \u7684\u4f7f\u7528\u8bf4\u660e\uff0c\u53ef\u4ee5\u67e5\u770b baizectl \u4f7f\u7528\u6587\u6863\u3002

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#_11","title":"\u6a21\u578b\u63a8\u7406","text":"

          \u5728\u5fae\u8c03\u4efb\u52a1\u5b8c\u6210\u540e\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528\u5fae\u8c03\u7684\u6a21\u578b\u8fdb\u884c\u63a8\u7406\u4efb\u52a1\uff0c\u8fd9\u91cc\u60a8\u53ef\u4ee5\u4f7f\u7528AI Lab \u63d0\u4f9b\u7684\u63a8\u7406\u670d\u52a1\uff0c\u5c06\u8f93\u51fa\u540e\u7684\u6a21\u578b\u521b\u5efa\u4e3a\u63a8\u7406\u670d\u52a1\u3002

          \u5728\u63a8\u7406\u670d\u52a1\u5217\u8868\u4e2d\uff0c\u60a8\u53ef\u4ee5\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u63a8\u7406\u670d\u52a1\uff0c\u5728\u9009\u62e9\u6a21\u578b\u7684\u4f4d\u7f6e\uff0c\u9009\u62e9\u4e4b\u524d\u63a8\u7406\u8f93\u51fa\u7684\u6570\u636e\u96c6\uff0c\u5e76\u914d\u7f6e\u6a21\u578b\u8def\u5f84\u3002

          \u6709\u5173\u6a21\u578b\u8d44\u6e90\u8981\u6c42\u3001\u63a8\u7406\u670d\u52a1\u7684 GPU \u8d44\u6e90\u8981\u6c42\uff0c\u9700\u8981\u6839\u636e\u6a21\u578b\u7684\u5927\u5c0f\u548c\u63a8\u7406\u7684\u5e76\u53d1\u91cf\u6765\u914d\u7f6e\uff0c\u8fd9\u91cc\u60a8\u53ef\u4ee5\u6839\u636e\u4e4b\u524d\u5fae\u8c03\u4efb\u52a1\u7684\u8d44\u6e90\u914d\u7f6e\u6765\u914d\u7f6e\u3002

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#_12","title":"\u914d\u7f6e\u6a21\u578b\u8fd0\u884c\u65f6","text":"

          \u914d\u7f6e\u6a21\u578b\u7684\u8fd0\u884c\u65f6\u5c24\u4e3a\u91cd\u8981\uff0c\u76ee\u524d AI Lab \u5df2\u7ecf\u652f\u6301 vLLM \u4f5c\u4e3a\u6a21\u578b\u63a8\u7406\u670d\u52a1\u7684\u8fd0\u884c\u65f6\uff0c\u53ef\u4ee5\u76f4\u63a5\u9009\u62e9 vLLM\u3002

          vLLM \u652f\u6301\u975e\u5e38\u4e30\u5bcc\u7684\u5927\u8bed\u8a00\u6a21\u578b\uff0c\u5efa\u8bae\u8bbf\u95ee vLLM \u4e86\u89e3\u66f4\u591a\u4fe1\u606f\uff0c\u8fd9\u4e9b\u6a21\u578b\u90fd\u53ef\u4ee5\u5f88\u65b9\u4fbf\u5730\u5728 AI Lab \u4e2d\u4f7f\u7528\u3002

          \u521b\u5efa\u5b8c\u6210\u540e\uff0c\u60a8\u53ef\u4ee5\u5728\u63a8\u7406\u670d\u52a1\u5217\u8868\u4e2d\u770b\u5230\u60a8\u521b\u5efa\u7684\u63a8\u7406\u670d\u52a1\uff0c\u5728\u6a21\u578b\u670d\u52a1\u5217\u8868\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u83b7\u53d6\u6a21\u578b\u7684\u8bbf\u95ee\u5730\u5740

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#_13","title":"\u4f7f\u7528\u6a21\u578b\u670d\u52a1\u6d4b\u8bd5","text":"

          \u7b80\u5355\u5728\u7ec8\u7aef\u4e2d\u5c1d\u8bd5\uff0c\u4f7f\u7528 curl \u547d\u4ee4\u6765\u6d4b\u8bd5\u6a21\u578b\u670d\u52a1\uff0c\u8fd9\u91cc\u60a8\u53ef\u4ee5\u770b\u5230\u8fd4\u56de\u7684\u7ed3\u679c\uff0c\u8fd9\u6837\u5c31\u53ef\u4ee5\u4f7f\u7528\u6a21\u578b\u670d\u52a1\u8fdb\u884c\u63a8\u7406\u4efb\u52a1\u4e86\u3002

          curl -X POST http://10.20.100.210:31118/v2/models/chatglm3-6b/generate \\\n  -d '{\"text_input\": \"hello\", \"stream\": false, \"sampling_parameters\": \"{\\\"temperature\\\": 0.7, \\\"top_p\\\": 0.95, \\'max_tokens\\\": 1024\uff5d\"\uff5d'\n

          "},{"location":"admin/baize/best-practice/finetunel-llm.html#_14","title":"\u7ed3\u8bed","text":"

          \u672c\u6587\u4ee5 ChatGLM3 \u4e3a\u4f8b\uff0c\u5e26\u60a8\u5feb\u901f\u4e86\u89e3\u548c\u4e0a\u624b AI Lab \u7684\u6a21\u578b\u5fae\u8c03\uff0c\u4f7f\u7528 LoRA \u5fae\u8c03\u4e86 ChatGLM3 \u6a21\u578b\u3002

          AI Lab \u63d0\u4f9b\u4e86\u975e\u5e38\u4e30\u5bcc\u7684\u529f\u80fd\uff0c\u53ef\u4ee5\u5e2e\u52a9\u6a21\u578b\u5f00\u53d1\u8005\u5feb\u901f\u8fdb\u884c\u6a21\u578b\u5f00\u53d1\u3001\u5fae\u8c03\u3001\u63a8\u7406\u7b49\u4efb\u52a1\uff0c\u540c\u65f6\u4e5f\u63d0\u4f9b\u4e86\u4e30\u5bcc\u7684 OpenAPI \u63a5\u53e3\uff0c\u53ef\u4ee5\u65b9\u4fbf\u5730\u4e0e\u7b2c\u4e09\u65b9\u5e94\u7528\u751f\u6001\u8fdb\u884c\u7ed3\u5408\u3002

          "},{"location":"admin/baize/best-practice/label-studio.html","title":"\u90e8\u7f72 Label Studio","text":"

          Label Studio \u662f\u4e00\u4e2a\u5f00\u6e90\u7684\u6570\u636e\u6807\u6ce8\u5de5\u5177\uff0c\u7528\u4e8e\u5404\u79cd\u673a\u5668\u5b66\u4e60\u548c\u4eba\u5de5\u667a\u80fd\u4efb\u52a1\u3002 \u4ee5\u4e0b\u662f Label Studio \u7684\u7b80\u8981\u4ecb\u7ecd\uff1a

          • \u652f\u6301\u56fe\u50cf\u3001\u97f3\u9891\u3001\u89c6\u9891\u3001\u6587\u672c\u7b49\u591a\u79cd\u6570\u636e\u7c7b\u578b\u7684\u6807\u6ce8
          • \u53ef\u7528\u4e8e\u76ee\u6807\u68c0\u6d4b\u3001\u56fe\u50cf\u5206\u7c7b\u3001\u8bed\u97f3\u8f6c\u5f55\u3001\u547d\u540d\u5b9e\u4f53\u8bc6\u522b\u7b49\u591a\u79cd\u4efb\u52a1
          • \u63d0\u4f9b\u53ef\u5b9a\u5236\u7684\u6807\u6ce8\u754c\u9762
          • \u652f\u6301\u591a\u79cd\u6807\u6ce8\u683c\u5f0f\u548c\u5bfc\u51fa\u9009\u9879

          Label Studio \u901a\u8fc7\u5176\u7075\u6d3b\u6027\u548c\u529f\u80fd\u4e30\u5bcc\u6027\uff0c\u4e3a\u6570\u636e\u79d1\u5b66\u5bb6\u548c\u673a\u5668\u5b66\u4e60\u5de5\u7a0b\u5e08\u63d0\u4f9b\u4e86\u5f3a\u5927\u7684\u6570\u636e\u6807\u6ce8\u89e3\u51b3\u65b9\u6848\u3002

          "},{"location":"admin/baize/best-practice/label-studio.html#ai","title":"\u90e8\u7f72\u5230 AI \u7b97\u529b\u5e73\u53f0","text":"

          \u8981\u60f3\u5728 AI Lab \u4e2d\u4f7f\u7528 Label Studio\uff0c\u9700\u5c06\u5176\u90e8\u7f72\u5230\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\uff0c \u4f60\u53ef\u4ee5\u901a\u8fc7 Helm \u7684\u65b9\u5f0f\u5feb\u901f\u90e8\u7f72\u3002

          Note

          \u66f4\u591a\u90e8\u7f72\u8be6\u60c5\uff0c\u8bf7\u53c2\u9605 Deploy Label Studio on Kubernetes\u3002

          1. \u6253\u5f00\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u754c\u9762\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u627e\u5230 Helm \u5e94\u7528 -> Helm \u4ed3\u5e93 \uff0c\u9009\u62e9 \u521b\u5efa\u4ed3\u5e93 \u6309\u94ae\uff0c\u586b\u5199\u5982\u4e0b\u53c2\u6570\uff1a

          2. \u6dfb\u52a0\u6210\u529f\u540e\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u9009\u62e9 \u540c\u6b65\u4ed3\u5e93 \uff0c\u7a0d\u7b49\u7247\u523b\u540e\u5b8c\u6210\u540c\u6b65\u3002\uff08\u540e\u7eed\u66f4\u65b0 Label Studio \u4e5f\u4f1a\u7528\u5230\u8fd9\u4e2a\u540c\u6b65\u64cd\u4f5c\uff09\u3002

          3. \u7136\u540e\u8df3\u8f6c\u5230 Helm \u6a21\u677f \u9875\u9762\uff0c\u4f60\u53ef\u4ee5\u641c\u7d22\u627e\u5230 label-studio\uff0c\u70b9\u51fb\u5361\u7247\u3002

          4. \u9009\u62e9\u6700\u65b0\u7684\u7248\u672c\uff0c\u5982\u4e0b\u56fe\u914d\u7f6e\u5b89\u88c5\u53c2\u6570\uff0c\u540d\u79f0\u4e3a label-stuio\uff0c\u5efa\u8bae\u521b\u5efa\u65b0\u7684\u547d\u4ee4\u7a7a\u95f4\uff0c\u914d\u7f6e\u53c2\u6570\u5207\u6362\u5230 YAML \uff0c\u6839\u636e\u8bf4\u660e\u4fee\u6539\u5176\u4e2d\u914d\u7f6e\u3002

            global:\n  image:\n    repository: heartexlabs/label-studio   # \u5982\u679c\u65e0\u6cd5\u8bbf\u95ee docker.io\uff0c\u5728\u6b64\u5904\u914d\u7f6e\u4ee3\u7406\u5730\u5740\n  extraEnvironmentVars:\n    LABEL_STUDIO_HOST: https://{\u8bbf\u95ee\u5730\u5740}/label-studio    # \u4f7f\u7528\u7684\u767b\u5f55\u5730\u5740\uff0c\u8bf7\u53c2\u9605\u5f53\u524d\u7f51\u9875 URL\n    LABEL_STUDIO_USERNAME: {\u7528\u6237\u90ae\u7bb1}    # \u5fc5\u987b\u662f\u90ae\u7bb1\uff0c\u66ff\u6362\u4e3a\u81ea\u5df1\u7684\n    LABEL_STUDIO_PASSWORD: {\u7528\u6237\u5bc6\u7801}    \napp:\n  nginx:\n    livenessProbe:\n      path: /label-studio/nginx_health\n    readinessProbe:\n      path: /label-studio/version\n

          \u81f3\u6b64\uff0c\u5b8c\u6210\u4e86 Label studio \u7684\u5b89\u88c5\u3002

          Warning

          \u9ed8\u8ba4\u4f1a\u5b89\u88c5 PostgreSQL \u4f5c\u4e3a\u6570\u636e\u670d\u52a1\u4e2d\u95f4\u4ef6\uff0c\u5982\u679c\u955c\u50cf\u62c9\u53d6\u5931\u8d25\uff0c\u53ef\u80fd\u662f docker.io \u65e0\u6cd5\u8bbf\u95ee\uff0c\u6ce8\u610f\u5207\u6362\u5230\u53ef\u7528\u4ee3\u7406\u5373\u53ef\u3002

          \u5982\u679c\u4f60\u6709\u81ea\u5df1\u7684 PostgreSQL \u6570\u636e\u670d\u52a1\u4e2d\u95f4\u4ef6\uff0c\u53ef\u4ee5\u4f7f\u7528\u5982\u4e0b\u53c2\u6570\u914d\u7f6e\uff1a

          global:\n  image:\n    repository: heartexlabs/label-studio   # \u5982\u679c\u65e0\u6cd5\u8bbf\u95ee docker.io\uff0c\u5728\u6b64\u5904\u914d\u7f6e\u4ee3\u7406\u5730\u5740\n  extraEnvironmentVars:\n    LABEL_STUDIO_HOST: https://{\u8bbf\u95ee\u5730\u5740}/label-studio    # \u4f7f\u7528\u7684\u767b\u5f55\u5730\u5740\uff0c\u53c2\u9605\u5f53\u524d\u7f51\u9875 URL\n    LABEL_STUDIO_USERNAME: {\u7528\u6237\u90ae\u7bb1}    # \u5fc5\u987b\u662f\u90ae\u7bb1\uff0c\u66ff\u6362\u4e3a\u81ea\u5df1\u7684\n    LABEL_STUDIO_PASSWORD: {\u7528\u6237\u5bc6\u7801}    \napp:\n  nginx:\n    livenessProbe:\n      path: /label-studio/nginx_health\n    readinessProbe:\n      path: /label-studio/version\npostgresql:\n  enabled: false  # \u7981\u7528\u5185\u7f6e\u7684 PostgreSQL\nexternalPostgresql:\n  host: \"postgres-postgresql\"  # PostgreSQL \u5730\u5740\n  port: 5432\n  username: \"label_studio\"  # PostgreSQL \u7528\u6237\u540d\n  password: \"your_label_studio_password\"  # PostgreSQL \u5bc6\u7801\n  database: \"label_studio\"  # PostgreSQL \u6570\u636e\u5e93\u540d\n
          "},{"location":"admin/baize/best-practice/label-studio.html#gproduct","title":"\u6dfb\u52a0 GProduct \u5230\u5bfc\u822a\u680f","text":"

          \u5982\u679c\u8981\u6dfb\u52a0 Label Studio \u5230\u5bfc\u822a\u680f\uff0c\u53ef\u4ee5\u53c2\u8003\u5168\u5c40\u7ba1\u7406 OEM IN \u7684\u65b9\u5f0f\u3002 \u4ee5\u4e0b\u6848\u4f8b\u662f\u589e\u52a0\u5230 AI Lab \u4e8c\u7ea7\u5bfc\u822a\u7684\u6dfb\u52a0\u65b9\u5f0f\u3002

          "},{"location":"admin/baize/best-practice/label-studio.html#_1","title":"\u6dfb\u52a0\u4ee3\u7406\u8bbf\u95ee","text":"
          apiVersion: ghippo.io/v1alpha1\nkind: GProductProxy\nmetadata:\n  name: label-studio\nspec:\n  gproduct: label-studio\n  proxies:\n  - authnCheck: false\n    destination:\n      host: label-studio-ls-app.label-studio.svc.cluster.local\n      port: 80\n    match:\n      uri:\n        prefix: /label-studio\n
          "},{"location":"admin/baize/best-practice/label-studio.html#ai-lab","title":"\u6dfb\u52a0\u5230 AI Lab","text":"

          \u4fee\u6539 CRD \u4e3a GProductNavigator \u7684 CR baize \uff0c\u7136\u540e\u5728\u73b0\u6709\u914d\u7f6e\u4e2d\u8fdb\u884c\u5982\u4e0b\u53d8\u66f4\uff1a

          apiVersion: ghippo.io/v1alpha1\nkind: GProductNavigator\nmetadata:\n  annotations:\n    meta.helm.sh/release-name: baize\n    meta.helm.sh/release-namespace: baize-system\n  labels:\n    app.kubernetes.io/managed-by: Helm\n    gProductName: baize\n  name: baize\nspec:\n  category: cloudnativeai\n  gproduct: baize\n  iconUrl: ./ui/baize/logo.svg\n  isCustom: false\n  localizedName:\n    en-US: AI Lab\n    zh-CN: AI Lab\n  menus:\n    - iconUrl: ''\n      isCustom: false\n      localizedName:\n        en-US: AI Lab\n        zh-CN: AI Lab\n      name: workspace-view\n      order: 1\n      url: ./baize\n      visible: true\n    - iconUrl: ''\n      isCustom: false\n      localizedName:\n        en-US: Operator\n        zh-CN: \u8fd0\u7ef4\u7ba1\u7406\n      name: admin-view\n      order: 1\n      url: ./baize/admin\n      visible: true\n    # \u6dfb\u52a0\u5f00\u59cb\n    - iconUrl: ''\n      localizedName:\n        en-US: Data Annotation\n        zh-CN: \u6570\u636e\u6807\u6ce8\n      name: label-studio\n      order: 1\n      target: blank    # \u63a7\u5236\u65b0\u5f00\u9875\n      url: https://{\u8bbf\u95ee\u5730\u5740}/label-studio    # \u8bbf\u95ee\u5730\u5740\n      visible: true\n    # \u6dfb\u52a0\u7ed3\u675f\n  name: AI Lab\n  order: 10\n  url: ./baize\n  visible: true\n
          "},{"location":"admin/baize/best-practice/label-studio.html#_2","title":"\u6dfb\u52a0\u6548\u679c","text":""},{"location":"admin/baize/best-practice/label-studio.html#_3","title":"\u7ed3\u8bed","text":"

          \u4ee5\u4e0a\uff0c\u5c31\u662f\u5982\u4f55\u6dfb\u52a0 Label Studio \u5e76\u5c06\u5176\u4f5c\u4e3a AI Lab \u7684\u6807\u6ce8\u7ec4\u4ef6\uff0c\u901a\u8fc7\u5c06\u6807\u6ce8\u540e\u7684\u6570\u636e\u6dfb\u52a0\u5230 AI Lab \u7684\u6570\u636e\u96c6\u4e2d\uff0c \u8054\u52a8\u7b97\u6cd5\u5f00\u53d1\uff0c\u5b8c\u5584\u7b97\u6cd5\u5f00\u53d1\u6d41\u7a0b\uff0c\u540e\u7eed\u5982\u4f55\u4f7f\u7528\u8bf7\u5173\u6ce8\u5176\u4ed6\u6587\u6863\u53c2\u8003\u3002

          "},{"location":"admin/baize/best-practice/train-with-deepspeed.html","title":"\u5982\u4f55\u63d0\u4ea4 DeepSpeed \u8bad\u7ec3\u4efb\u52a1","text":"

          \u6839\u636e DeepSpeed \u5b98\u65b9\u6587\u6863\uff0c\u6211\u4eec\u63a8\u8350\u4f7f\u7528\u4fee\u6539\u4ee3\u7801\u7684\u65b9\u5f0f\u5b9e\u73b0\u3002

          \u5373\u4f7f\u7528 deepspeed.init_distributed() \u4ee3\u66ff torch.distributed.init_process_group(...)\u3002 \u7136\u540e\u8fd0\u884c\u547d\u4ee4\u4f7f\u7528 torchrun\uff0c\u63d0\u4ea4\u4e3a Pytorch \u5206\u5e03\u5f0f\u4efb\u52a1\uff0c\u65e2\u53ef\u8fd0\u884c DeepSpeed \u4efb\u52a1\u3002

          \u662f\u7684\uff0c\u4f60\u53ef\u4ee5\u4f7f\u7528 torchrun \u8fd0\u884c\u4f60\u7684 DeepSpeed \u8bad\u7ec3\u811a\u672c\u3002 torchrun \u662f PyTorch \u63d0\u4f9b\u7684\u4e00\u4e2a\u5b9e\u7528\u5de5\u5177\uff0c\u7528\u4e8e\u5206\u5e03\u5f0f\u8bad\u7ec3\u3002\u4f60\u53ef\u4ee5\u7ed3\u5408 torchrun \u548c DeepSpeed API \u6765\u542f\u52a8\u4f60\u7684\u8bad\u7ec3\u4efb\u52a1\u3002

          \u4ee5\u4e0b\u662f\u4e00\u4e2a\u4f7f\u7528 torchrun \u8fd0\u884c DeepSpeed \u8bad\u7ec3\u811a\u672c\u7684\u793a\u4f8b\uff1a

          1. \u7f16\u5199\u8bad\u7ec3\u811a\u672c\uff1a

            train.py
            import torch\nimport deepspeed\nfrom torch.utils.data import DataLoader\n\n# \u6a21\u578b\u548c\u6570\u636e\u52a0\u8f7d\nmodel = YourModel()\ntrain_dataset = YourDataset()\ntrain_dataloader = DataLoader(train_dataset, batch_size=32)\n\n# \u914d\u7f6e\u6587\u4ef6\u8def\u5f84\ndeepspeed_config = \"deepspeed_config.json\"\n\n# \u521b\u5efa DeepSpeed \u8bad\u7ec3\u5f15\u64ce\nmodel_engine, optimizer, _, _ = deepspeed.initialize(\n    model=model,\n    model_parameters=model.parameters(),\n    config_params=deepspeed_config\n)\n\n# \u8bad\u7ec3\u5faa\u73af\nfor batch in train_dataloader:\n    loss = model_engine(batch)\n    model_engine.backward(loss)\n    model_engine.step()\n
          2. \u521b\u5efa DeepSpeed \u914d\u7f6e\u6587\u4ef6\uff1a

            deepspeed_config.json
            {\n  \"train_batch_size\": 32,\n  \"gradient_accumulation_steps\": 1,\n  \"fp16\": {\n    \"enabled\": true,\n    \"loss_scale\": 0\n  },\n  \"optimizer\": {\n    \"type\": \"Adam\",\n    \"params\": {\n      \"lr\": 0.00015,\n      \"betas\": [0.9, 0.999],\n      \"eps\": 1e-08,\n      \"weight_decay\": 0\n    }\n  }\n}\n
          3. \u4f7f\u7528 torchrun \u6216\u8005 baizectl \u8fd0\u884c\u8bad\u7ec3\u811a\u672c\uff1a

            torchrun train.py\n

            \u901a\u8fc7\u8fd9\u79cd\u65b9\u5f0f\uff0c\u4f60\u53ef\u4ee5\u7ed3\u5408 PyTorch \u7684\u5206\u5e03\u5f0f\u8bad\u7ec3\u529f\u80fd\u548c DeepSpeed \u7684\u4f18\u5316\u6280\u672f\uff0c\u4ece\u800c\u5b9e\u73b0\u66f4\u9ad8\u6548\u7684\u8bad\u7ec3\u3002 \u60a8\u53ef\u4ee5\u5728 Notebook \u4e2d\uff0c\u4f7f\u7528 baizectl \u63d0\u4ea4\u547d\u4ee4\uff1a

            baizectl job submit --pytorch --workers 2 -- torchrun train.py\n
          "},{"location":"admin/baize/developer/index.html","title":"\u5f00\u53d1\u63a7\u5236\u53f0","text":"

          \u5f00\u53d1\u63a7\u5236\u53f0\u662f\u5f00\u53d1\u8005\u65e5\u5e38\u6267\u884c AI \u63a8\u7406\u3001\u5927\u6a21\u578b\u8bad\u7ec3\u7b49\u4efb\u52a1\u7684\u63a7\u5236\u53f0\u3002

          \u65b9\u4fbf\u7528\u6237\u901a\u8fc7\u6982\u89c8\u5feb\u901f\u4e86\u89e3\uff0c\u5f53\u524d\u5de5\u4f5c\u7a7a\u95f4\u7684\u8d44\u6e90\u53ca\u7528\u91cf\u60c5\u51b5\uff0c\u5305\u542b\u4e86GPU\u8d44\u6e90\u3001Notebook\u3001\u4efb\u52a1\u4ee5\u53ca\u6570\u636e\u96c6\u7684\u6570\u91cf\u4fe1\u606f\u3002

          "},{"location":"admin/baize/developer/quick-start.html","title":"\u5feb\u901f\u5165\u95e8","text":"

          \u672c\u6587\u63d0\u4f9b\u4e86\u7b80\u5355\u7684\u64cd\u4f5c\u624b\u518c\u4ee5\u4fbf\u7528\u6237\u4f7f\u7528 AI Lab \u8fdb\u884c\u6570\u636e\u96c6\u3001Notebook\u3001\u4efb\u52a1\u8bad\u7ec3\u7684\u6574\u4e2a\u5f00\u53d1\u3001\u8bad\u7ec3\u6d41\u7a0b\u3002

          "},{"location":"admin/baize/developer/quick-start.html#_2","title":"\u51c6\u5907\u6570\u636e\u96c6","text":"

          \u70b9\u51fb \u6570\u636e\u7ba1\u7406 -> \u6570\u636e\u96c6 \uff0c\u9009\u62e9 \u521b\u5efa \u6309\u94ae\uff0c\u5206\u522b\u521b\u5efa\u4ee5\u4e0b\u4e09\u4e2a\u6570\u636e\u96c6\u3002

          "},{"location":"admin/baize/developer/quick-start.html#_3","title":"\u6570\u636e\u96c6\uff1a\u8bad\u7ec3\u4ee3\u7801","text":"
          • \u4ee3\u7801\u6570\u636e\u6e90\uff1ahttps://github.com/samzong/training-sample-code.git\uff0c\u4e3b\u8981\u662f\u4e00\u4e2a\u7b80\u5355\u7684 Tensorflow \u4ee3\u7801\u3002
          • \u5982\u679c\u662f\u4e2d\u56fd\u5883\u5185\u7684\u7528\u6237\uff0c\u53ef\u4ee5\u4f7f\u7528 Gitee \u52a0\u901f\uff1ahttps://gitee.com/samzong_lu/training-sample-code.git
          • \u4ee3\u7801\u8def\u5f84\u4e3a tensorflow/tf-fashion-mnist-sample

          Note

          \u76ee\u524d\u4ec5\u652f\u6301\u8bfb\u5199\u6a21\u5f0f\u4e3a ReadWriteMany \u7684 StorageClass\uff0c\u8bf7\u4f7f\u7528 NFS \u6216\u8005\u63a8\u8350\u7684 JuiceFS\u3002

          "},{"location":"admin/baize/developer/quick-start.html#_4","title":"\u6570\u636e\u96c6\uff1a\u8bad\u7ec3\u6570\u636e","text":"

          \u672c\u6b21\u8bad\u7ec3\u4f7f\u7528\u7684\u6570\u636e\u4e3a https://github.com/zalandoresearch/fashion-mnist.git\uff0c \u8fd9\u662f Fashion-MNIST \u6570\u636e\u96c6\u3002

          \u5982\u679c\u662f\u4e2d\u56fd\u5883\u5185\u7684\u7528\u6237\uff0c\u53ef\u4ee5\u4f7f\u7528 Gitee \u52a0\u901f\uff1ahttps://gitee.com/samzong_lu/fashion-mnist.git

          Note

          \u5982\u679c\u672a\u521b\u5efa\u8bad\u7ec3\u6570\u636e\u7684\u6570\u636e\u96c6\uff0c\u901a\u8fc7\u8bad\u7ec3\u811a\u672c\u4e5f\u4f1a\u81ea\u52a8\u4e0b\u8f7d\uff1b\u63d0\u524d\u51c6\u5907\u8bad\u7ec3\u6570\u636e\u53ef\u4ee5\u63d0\u9ad8\u8bad\u7ec3\u901f\u5ea6\u3002

          "},{"location":"admin/baize/developer/quick-start.html#_5","title":"\u6570\u636e\u96c6\uff1a\u7a7a\u6570\u636e\u96c6","text":"

          AI Lab \u652f\u6301\u5c06 PVC \u4f5c\u4e3a\u6570\u636e\u96c6\u7684\u6570\u636e\u6e90\u7c7b\u578b\uff0c\u6240\u4ee5\u521b\u5efa\u4e00\u4e2a\u7a7a PVC \u7ed1\u5b9a\u5230\u6570\u636e\u96c6\u540e\uff0c\u53ef\u5c06\u7a7a\u6570\u636e\u96c6\u4f5c\u4e3a\u5b58\u653e\u540e\u7eed\u8bad\u7ec3\u4efb\u52a1\u7684\u8f93\u51fa\u6570\u636e\u96c6\uff0c\u5b58\u653e\u6a21\u578b\u548c\u65e5\u5fd7\u3002

          "},{"location":"admin/baize/developer/quick-start.html#tensorflow","title":"\u73af\u5883\u4f9d\u8d56: tensorflow","text":"

          \u811a\u672c\u5728\u8fd0\u884c\u65f6\uff0c\u9700\u8981\u4f9d\u8d56 Tensorflow \u7684 Python \u5e93\uff0c\u53ef\u4ee5\u4f7f\u7528 AI Lab \u7684\u73af\u5883\u4f9d\u8d56\u7ba1\u7406\u529f\u80fd\uff0c\u63d0\u524d\u5c06\u9700\u8981\u7684 Python \u5e93\u4e0b\u8f7d\u548c\u51c6\u5907\u5b8c\u6210\uff0c\u65e0\u9700\u4f9d\u8d56\u955c\u50cf\u6784\u5efa

          \u53c2\u8003 \u73af\u5883\u4f9d\u8d56 \u7684\u64cd\u4f5c\u65b9\u5f0f\uff0c\u6dfb\u52a0\u4e00\u4e2a CONDA \u73af\u5883.

          name: tensorflow\nchannels:\n  - defaults\n  - conda-forge\ndependencies:\n  - python=3.12\n  - tensorflow\nprefix: /opt/conda/envs/tensorflow\n

          Note

          \u7b49\u5f85\u73af\u5883\u9884\u70ed\u6210\u529f\u540e\uff0c\u53ea\u9700\u8981\u5c06\u6b64\u73af\u5883\u6302\u8f7d\u5230 Notebook\u3001\u8bad\u7ec3\u4efb\u52a1\u4e2d\uff0c\u4f7f\u7528 AI Lab \u63d0\u4f9b\u7684\u57fa\u7840\u955c\u50cf\u5c31\u53ef\u4ee5

          "},{"location":"admin/baize/developer/quick-start.html#notebook","title":"\u4f7f\u7528 Notebook \u8c03\u8bd5\u811a\u672c","text":"

          \u51c6\u5907\u5f00\u53d1\u73af\u5883\uff0c\u70b9\u51fb\u5bfc\u822a\u680f\u7684 Notebooks \uff0c\u70b9\u51fb \u521b\u5efa \u3002

          • \u5c06\u51c6\u5907\u597d\u7684\u4e09\u4e2a\u6570\u636e\u96c6\u8fdb\u884c\u5173\u8054\uff0c\u6302\u8f7d\u8def\u5f84\u8bf7\u53c2\u7167\u4e0b\u56fe\u586b\u5199\uff0c\u6ce8\u610f\u5c06\u9700\u8981\u4f7f\u7528\u7684\u7a7a\u6570\u636e\u96c6\u5728 \u8f93\u51fa\u6570\u636e\u96c6\u4f4d\u7f6e\u914d\u7f6e

          • \u9009\u62e9\u5e76\u7ed1\u5b9a\u73af\u5883\u4f9d\u8d56\u5305

            \u7b49\u5f85 Notebook \u521b\u5efa\u6210\u529f\uff0c\u70b9\u51fb\u5217\u8868\u4e2d\u7684\u8bbf\u95ee\u5730\u5740\uff0c\u8fdb\u5165 Notebook\u3002\u5e76\u5728 Notebook \u7684\u7ec8\u7aef\u4e2d\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u8fdb\u884c\u4efb\u52a1\u8bad\u7ec3\u3002

            Note

            \u811a\u672c\u4f7f\u7528 Tensorflow\uff0c\u5982\u679c\u5fd8\u8bb0\u5173\u8054\u4f9d\u8d56\u5e93\uff0c\u4e5f\u53ef\u4ee5\u4e34\u65f6\u7528 pip install tensorflow \u5b89\u88c5\u3002

            python /home/jovyan/code/tensorflow/tf-fashion-mnist-sample/train.py\n
          "},{"location":"admin/baize/developer/quick-start.html#_6","title":"\u521b\u5efa\u8bad\u7ec3\u4efb\u52a1","text":"
          1. \u70b9\u51fb\u5bfc\u822a\u680f\u7684 \u4efb\u52a1\u4e2d\u5fc3 -> \u8bad\u7ec3\u4efb\u52a1 \uff0c\u521b\u5efa\u4e00\u4e2a Tensorflow \u5355\u673a\u4efb\u52a1
          2. \u5148\u586b\u5199\u57fa\u672c\u53c2\u6570\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65
          3. \u5728\u4efb\u52a1\u8d44\u6e90\u914d\u7f6e\u4e2d\uff0c\u6b63\u786e\u914d\u7f6e\u4efb\u52a1\u8d44\u6e90\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65

            • \u955c\u50cf\uff1a\u5982\u679c\u524d\u5e8f\u73af\u5883\u4f9d\u8d56\u5305\u51c6\u5907\u597d\u4e86\uff0c\u4f7f\u7528\u9ed8\u8ba4\u955c\u50cf\u5373\u53ef\uff1b \u5982\u679c\u672a\u51c6\u5907\uff0c\u8981\u786e\u8ba4\u955c\u50cf\u5185\u6709 tensorflow \u7684 Python \u5e93
            • shell\uff1a\u4f7f\u7528 bash \u5373\u53ef
            • \u542f\u7528\u547d\u4ee4\uff1a

              /home/jovyan/code/tensorflow/tf-fashion-mnist-sample/train.py\n
          4. \u5728\u9ad8\u7ea7\u914d\u7f6e\u4e2d\uff0c\u542f\u7528 \u4efb\u52a1\u5206\u6790\uff08Tensorboard\uff09 \uff0c\u70b9\u51fb \u786e\u5b9a \u3002

            Note

            \u65e5\u5fd7\u6240\u5728\u4f4d\u7f6e\u4e3a\u8f93\u51fa\u6570\u636e\u96c6\u7684 /home/jovyan/model/train/logs/

          5. \u8fd4\u56de\u8bad\u7ec3\u4efb\u52a1\u5217\u8868\uff0c\u7b49\u5230\u72b6\u6001\u53d8\u4e3a \u6210\u529f \u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u67e5\u770b\u8be6\u60c5\u3001\u514b\u9686\u4efb\u52a1\u3001\u66f4\u65b0\u4f18\u5148\u7ea7\u3001\u67e5\u770b\u65e5\u5fd7\u548c\u5220\u9664\u7b49\u64cd\u4f5c\u3002

          6. \u6210\u529f\u521b\u5efa\u4efb\u52a1\u540e\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u4efb\u52a1\u5206\u6790 \uff0c\u53ef\u4ee5\u67e5\u770b\u4efb\u52a1\u72b6\u6001\u5e76\u5bf9\u4efb\u52a1\u8bad\u7ec3\u8fdb\u884c\u8c03\u4f18\u3002

          "},{"location":"admin/baize/developer/dataset/create-use-delete.html","title":"\u6570\u636e\u96c6\u5217\u8868","text":"

          AI Lab \u63d0\u4f9b\u6a21\u578b\u5f00\u53d1\u3001\u8bad\u7ec3\u4ee5\u53ca\u63a8\u7406\u8fc7\u7a0b\u6240\u6709\u9700\u8981\u7684\u6570\u636e\u96c6\u7ba1\u7406\u529f\u80fd\u3002\u76ee\u524d\u652f\u6301\u5c06\u591a\u79cd\u6570\u636e\u6e90\u7edf\u4e00\u63a5\u5165\u80fd\u529b\u3002

          \u901a\u8fc7\u7b80\u5355\u914d\u7f6e\u5373\u53ef\u5c06\u6570\u636e\u6e90\u63a5\u5165\u5230 AI Lab \u4e2d\uff0c\u5b9e\u73b0\u6570\u636e\u7684\u7edf\u4e00\u7eb3\u7ba1\u3001\u9884\u70ed\u3001\u6570\u636e\u96c6\u7ba1\u7406\u7b49\u529f\u80fd\u3002

          "},{"location":"admin/baize/developer/dataset/create-use-delete.html#_2","title":"\u521b\u5efa\u6570\u636e\u96c6","text":"
          1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u6570\u636e\u7ba1\u7406 -> \u6570\u636e\u96c6\u5217\u8868 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae\u3002

          2. \u9009\u62e9\u6570\u636e\u96c6\u5f52\u5c5e\u7684\u5de5\u4f5c\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4 \u4e0b\u4e00\u6b65 \u3002

          3. \u914d\u7f6e\u76ee\u6807\u6570\u636e\u7684\u6570\u636e\u6e90\u7c7b\u578b\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a \u3002

            \u76ee\u524d\u652f\u6301\u8fd9\u51e0\u79cd\u6570\u636e\u6e90\uff1a

            • GIT\uff1a\u652f\u6301 GitHub\u3001GitLab\u3001Gitee \u7b49\u4ed3\u5e93
            • S3\uff1a\u652f\u6301 Amazon \u4e91\u7b49\u5bf9\u8c61\u5b58\u50a8
            • HTTP\uff1a\u76f4\u63a5\u8f93\u5165\u4e00\u4e2a\u6709\u6548\u7684 HTTP \u7f51\u5740
            • PVC\uff1a\u652f\u6301\u9884\u5148\u521b\u5efa\u7684 Kubernetes PersistentVolumeClaim
            • NFS\uff1a\u652f\u6301 NFS \u5171\u4eab\u5b58\u50a8
          4. \u6570\u636e\u96c6\u521b\u5efa\u6210\u529f\u5c06\u8fd4\u56de\u6570\u636e\u96c6\u5217\u8868\u3002\u4f60\u53ef\u4ee5\u901a\u8fc7\u53f3\u4fa7\u7684 \u2507 \u6267\u884c\u66f4\u591a\u64cd\u4f5c\u3002

          Info

          \u7cfb\u7edf\u81ea\u52a8\u4f1a\u5728\u6570\u636e\u96c6\u521b\u5efa\u6210\u529f\u540e\uff0c\u7acb\u5373\u8fdb\u884c\u4e00\u6b21\u6027\u7684\u6570\u636e\u9884\u52a0\u8f7d\uff1b\u5728\u9884\u52a0\u8f7d\u5b8c\u6210\u4e4b\u524d\uff0c\u6570\u636e\u96c6\u4e0d\u53ef\u4ee5\u4f7f\u7528\u3002

          "},{"location":"admin/baize/developer/dataset/create-use-delete.html#_3","title":"\u6570\u636e\u96c6\u4f7f\u7528","text":"

          \u6570\u636e\u96c6\u521b\u5efa\u6210\u529f\u540e\uff0c\u53ef\u4ee5\u5728\u6a21\u578b\u8bad\u7ec3\u3001\u63a8\u7406\u7b49\u4efb\u52a1\u4e2d\u4f7f\u7528\u3002

          "},{"location":"admin/baize/developer/dataset/create-use-delete.html#notebook","title":"\u5728 Notebook \u4e2d\u4f7f\u7528","text":"

          \u5728\u521b\u5efa Notebook \u4e2d\uff0c\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u6570\u636e\u96c6\uff1b\u4f7f\u7528\u65b9\u5f0f\u5982\u4e0b\uff1a

          • \u4f7f\u7528\u6570\u636e\u96c6\u505a\u8bad\u7ec3\u6570\u636e\u6302\u8f7d
          • \u4f7f\u7528\u6570\u636e\u96c6\u505a\u4ee3\u7801\u6302\u8f7d

          "},{"location":"admin/baize/developer/dataset/create-use-delete.html#_4","title":"\u5728 \u8bad\u7ec3\u4efb\u52a1 \u4e2d\u4f7f\u7528","text":"
          • \u4f7f\u7528\u6570\u636e\u96c6\u6307\u5b9a\u4efb\u52a1\u8f93\u51fa
          • \u4f7f\u7528\u6570\u636e\u96c6\u6307\u5b9a\u4efb\u52a1\u8f93\u5165
          • \u4f7f\u7528\u6570\u636e\u96c6\u6307\u5b9a TensorBoard \u8f93\u51fa
          "},{"location":"admin/baize/developer/dataset/create-use-delete.html#_5","title":"\u5728\u63a8\u7406\u670d\u52a1 \u4e2d\u4f7f\u7528","text":"
          • \u4f7f\u7528\u6570\u636e\u96c6\u6302\u8f7d\u6a21\u578b
          "},{"location":"admin/baize/developer/dataset/create-use-delete.html#_6","title":"\u5220\u9664\u6570\u636e\u96c6","text":"

          \u5982\u679c\u53d1\u73b0\u6570\u636e\u96c6\u5197\u4f59\u3001\u8fc7\u671f\u6216\u56e0\u5176\u4ed6\u7f18\u6545\u4e0d\u518d\u9700\u8981\uff0c\u53ef\u4ee5\u4ece\u6570\u636e\u96c6\u5217\u8868\u4e2d\u5220\u9664\u3002

          1. \u5728\u6570\u636e\u96c6\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u5220\u9664 \u3002

          2. \u5728\u5f39\u7a97\u4e2d\u786e\u8ba4\u8981\u5220\u9664\u7684\u6570\u636e\u96c6\uff0c\u8f93\u5165\u6570\u636e\u96c6\u540d\u79f0\u540e\u70b9\u51fb \u5220\u9664 \u3002

          3. \u5c4f\u5e55\u63d0\u793a\u5220\u9664\u6210\u529f\uff0c\u8be5\u6570\u636e\u96c6\u4ece\u5217\u8868\u4e2d\u6d88\u5931\u3002

          Caution

          \u6570\u636e\u96c6\u4e00\u65e6\u5220\u9664\u5c06\u4e0d\u53ef\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

          "},{"location":"admin/baize/developer/dataset/environments.html","title":"\u7ba1\u7406\u73af\u5883","text":"

          \u672c\u6587\u8bf4\u660e\u5982\u4f55\u5728 AI Lab \u4e2d\u7ba1\u7406\u4f60\u7684\u73af\u5883\u4f9d\u8d56\u5e93\uff0c\u4ee5\u4e0b\u662f\u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u548c\u6ce8\u610f\u4e8b\u9879\u3002

          1. \u73af\u5883\u7ba1\u7406\u6982\u8ff0
          2. \u521b\u5efa\u65b0\u73af\u5883
          3. \u914d\u7f6e\u73af\u5883
          4. \u6545\u969c\u6392\u9664
          "},{"location":"admin/baize/developer/dataset/environments.html#_2","title":"\u73af\u5883\u7ba1\u7406\u6982\u8ff0","text":"

          \u4f20\u7edf\u65b9\u5f0f\uff0c\u4e00\u822c\u4f1a\u5c06 Python \u73af\u5883\u4f9d\u8d56\u5728\u955c\u50cf\u4e2d\u6784\u5efa\uff0c\u955c\u50cf\u5e26\u6709 Python \u7248\u672c\u548c\u4f9d\u8d56\u5305\u7684\u955c\u50cf\uff0c\u7ef4\u62a4\u6210\u672c\u8f83\u9ad8\u4e14\u66f4\u65b0\u4e0d\u65b9\u4fbf\uff0c\u5f80\u5f80\u9700\u8981\u91cd\u65b0\u6784\u5efa\u955c\u50cf\u3002

          \u800c\u5728 AI Lab \u4e2d\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 \u73af\u5883\u7ba1\u7406 \u6a21\u5757\u6765\u7ba1\u7406\u7eaf\u7cb9\u7684\u73af\u5883\u4f9d\u8d56\uff0c\u5c06\u8fd9\u90e8\u5206\u4ece\u955c\u50cf\u4e2d\u89e3\u8026\uff0c\u5e26\u6765\u7684\u4f18\u52bf\u6709\uff1a

          • \u4e00\u4efd\u73af\u5883\u591a\u5904\u4f7f\u7528\uff0c\u540c\u65f6\u53ef\u4ee5\u5728 Notebook\u3001\u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u3001\u4e43\u81f3\u63a8\u7406\u670d\u52a1\u4e2d\u4f7f\u7528\u3002
          • \u66f4\u65b0\u4f9d\u8d56\u5305\u66f4\u52a0\u65b9\u4fbf\uff0c\u53ea\u9700\u8981\u66f4\u65b0\u73af\u5883\u4f9d\u8d56\u5373\u53ef\uff0c\u65e0\u9700\u91cd\u65b0\u6784\u5efa\u955c\u50cf\u3002

          \u4ee5\u4e0b\u4e3a\u73af\u5883\u7ba1\u7406\u7684\u4e3b\u8981\u7ec4\u6210\u90e8\u5206\uff1a

          • \u96c6\u7fa4 \uff1a\u9009\u62e9\u9700\u8981\u64cd\u4f5c\u7684\u96c6\u7fa4\u3002
          • \u547d\u540d\u7a7a\u95f4 \uff1a\u9009\u62e9\u547d\u540d\u7a7a\u95f4\u4ee5\u9650\u5b9a\u64cd\u4f5c\u8303\u56f4\u3002
          • \u73af\u5883\u5217\u8868 \uff1a\u5c55\u793a\u5f53\u524d\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u73af\u5883\u53ca\u5176\u72b6\u6001\u3002

          \u5b57\u6bb5 \u63cf\u8ff0 \u4e3e\u4f8b\u503c \u540d\u79f0 \u73af\u5883\u7684\u540d\u79f0 my-environment \u72b6\u6001 \u73af\u5883\u5f53\u524d\u7684\u72b6\u6001\uff08\u6b63\u5e38\u6216\u5931\u8d25\uff09\uff0c\u65b0\u521b\u5efa\u73af\u5883\u6709\u4e00\u4e2a\u9884\u70ed\u8fc7\u7a0b\uff0c\u9884\u70ed\u6210\u529f\u540e\u5373\u53ef\u5728\u5176\u4ed6\u4efb\u52a1\u4e2d\u4f7f\u7528 \u6b63\u5e38 \u521b\u5efa\u65f6\u95f4 \u73af\u5883\u521b\u5efa\u7684\u65f6\u95f4 2023-10-01 10:00:00"},{"location":"admin/baize/developer/dataset/environments.html#_3","title":"\u521b\u5efa\u65b0\u73af\u5883","text":"

          \u5728 \u73af\u5883\u7ba1\u7406 \u754c\u9762\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u521b\u5efa\u73af\u5883\u7684\u6d41\u7a0b\u3002

          \u5b57\u6bb5 \u63cf\u8ff0 \u4e3e\u4f8b\u503c \u540d\u79f0 \u8f93\u5165\u73af\u5883\u7684\u540d\u79f0\uff0c\u957f\u5ea6\u4e3a 2-63 \u4e2a\u5b57\u7b26\uff0c\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u5f00\u5934\u548c\u7ed3\u5c3e\u3002 my-environment \u90e8\u7f72\u4f4d\u7f6e \u96c6\u7fa4 \uff1a\u9009\u62e9\u9700\u8981\u90e8\u7f72\u7684\u96c6\u7fa4 gpu-cluster \u547d\u540d\u7a7a\u95f4 \uff1a\u9009\u62e9\u547d\u540d\u7a7a\u95f4 default \u5907\u6ce8 \u586b\u5199\u5907\u6ce8\u4fe1\u606f\u3002 \u8fd9\u662f\u4e00\u4e2a\u6d4b\u8bd5\u73af\u5883 \u6807\u7b7e \u4e3a\u73af\u5883\u6dfb\u52a0\u6807\u7b7e\u3002 env:test \u6ce8\u89e3 \u4e3a\u73af\u5883\u6dfb\u52a0\u6ce8\u89e3\u3002\u586b\u5199\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u8fdb\u5165\u73af\u5883\u914d\u7f6e\u3002 \u6ce8\u89e3\u793a\u4f8b"},{"location":"admin/baize/developer/dataset/environments.html#_4","title":"\u914d\u7f6e\u73af\u5883","text":"

          \u5728\u73af\u5883\u914d\u7f6e\u6b65\u9aa4\u4e2d\uff0c\u7528\u6237\u9700\u8981\u914d\u7f6e Python \u7248\u672c\u548c\u4f9d\u8d56\u5305\u7ba1\u7406\u5de5\u5177\u3002

          \u5b57\u6bb5 \u63cf\u8ff0 \u4e3e\u4f8b\u503c Python \u7248\u672c \u9009\u62e9\u6240\u9700\u7684 Python \u7248\u672c 3.12.3 \u5305\u7ba1\u7406\u5668 \u9009\u62e9\u5305\u7ba1\u7406\u5de5\u5177\uff0c\u53ef\u9009 PIP \u6216 CONDA PIP Environment Data \u5982\u679c\u9009\u62e9 PIP\uff1a\u5728\u4e0b\u65b9\u7f16\u8f91\u5668\u4e2d\u8f93\u5165 requirements.txt \u683c\u5f0f\u7684\u4f9d\u8d56\u5305\u5217\u8868\u3002 numpy==1.21.0 \u5982\u679c\u9009\u62e9 CONDA\uff1a\u5728\u4e0b\u65b9\u7f16\u8f91\u5668\u4e2d\u8f93\u5165 environment.yaml \u683c\u5f0f\u7684\u4f9d\u8d56\u5305\u5217\u8868\u3002 \u5176\u4ed6\u9009\u9879 pip \u989d\u5916\u7d22\u5f15\u5730\u5740 \uff1a\u914d\u7f6e pip \u989d\u5916\u7684\u7d22\u5f15\u5730\u5740\uff1b\u9002\u7528\u4e8e\u4f01\u4e1a\u5185\u90e8\u6709\u81ea\u5df1\u7684\u79c1\u6709\u4ed3\u5e93\u6216\u8005 PIP \u52a0\u901f\u7ad9\u70b9\u3002 https://pypi.example.com GPU \u914d\u7f6e \uff1a\u542f\u7528\u6216\u7981\u7528 GPU \u914d\u7f6e\uff1b\u90e8\u5206\u6d89\u53ca\u5230 GPU \u7684\u4f9d\u8d56\u5305\u9700\u8981\u5728\u9884\u52a0\u8f7d\u65f6\u914d\u7f6e GPU \u8d44\u6e90\u3002 \u542f\u7528 \u5173\u8054\u5b58\u50a8 \uff1a\u9009\u62e9\u5173\u8054\u7684\u5b58\u50a8\u914d\u7f6e\uff1b\u73af\u5883\u4f9d\u8d56\u5305\u4f1a\u5b58\u50a8\u5728\u5173\u8054\u5b58\u50a8\u4e2d\u3002\u6ce8\u610f\uff1a\u9700\u8981\u4f7f\u7528\u652f\u6301 ReadWriteMany \u7684\u5b58\u50a8\u3002 my-storage-config

          \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u521b\u5efa \u6309\u94ae\uff0c\u7cfb\u7edf\u4f1a\u81ea\u52a8\u521b\u5efa\u5e76\u914d\u7f6e\u65b0\u7684 Python \u73af\u5883\u3002

          "},{"location":"admin/baize/developer/dataset/environments.html#_5","title":"\u6545\u969c\u6392\u9664","text":"
          • \u5982\u679c\u73af\u5883\u521b\u5efa\u5931\u8d25\uff1a

            • \u68c0\u67e5\u7f51\u7edc\u8fde\u63a5\u662f\u5426\u6b63\u5e38\u3002
            • \u786e\u8ba4\u586b\u5199\u7684 Python \u7248\u672c\u548c\u5305\u7ba1\u7406\u5668\u914d\u7f6e\u65e0\u8bef\u3002
            • \u786e\u4fdd\u6240\u9009\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u53ef\u7528\u3002
          • \u5982\u679c\u4f9d\u8d56\u9884\u70ed\u5931\u8d25\uff1a

            • \u68c0\u67e5 requirements.txt \u6216 environment.yaml \u6587\u4ef6\u683c\u5f0f\u662f\u5426\u6b63\u786e\u3002
            • \u786e\u8ba4\u4f9d\u8d56\u5305\u540d\u79f0\u548c\u7248\u672c\u662f\u5426\u6b63\u786e\u65e0\u8bef\u3002\u5982\u9047\u5230\u5176\u4ed6\u95ee\u9898\uff0c\u8bf7\u8054\u7cfb\u5e73\u53f0\u7ba1\u7406\u5458\u6216\u67e5\u770b\u5e73\u53f0\u5e2e\u52a9\u6587\u6863\u83b7\u53d6\u66f4\u591a\u652f\u6301\u3002

          \u4ee5\u4e0a\u5373\u4e3a\u5728 AI Lab \u4e2d\u7ba1\u7406 Python \u4f9d\u8d56\u5e93\u7684\u57fa\u672c\u64cd\u4f5c\u6b65\u9aa4\u548c\u6ce8\u610f\u4e8b\u9879\u3002

          "},{"location":"admin/baize/developer/inference/models.html","title":"\u4e86\u89e3\u6a21\u578b\u652f\u6301\u60c5\u51b5","text":"

          \u968f\u7740 AI Lab \u7684\u5feb\u901f\u8fed\u4ee3\uff0c\u6211\u4eec\u5df2\u7ecf\u652f\u6301\u4e86\u591a\u79cd\u6a21\u578b\u7684\u63a8\u7406\u670d\u52a1\uff0c\u60a8\u53ef\u4ee5\u5728\u8fd9\u91cc\u770b\u5230\u6240\u652f\u6301\u7684\u6a21\u578b\u4fe1\u606f\u3002

          • AI Lab v0.3.0 \u4e0a\u7ebf\u4e86\u6a21\u578b\u63a8\u7406\u670d\u52a1\uff0c\u9488\u5bf9\u4f20\u7edf\u7684\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\uff0c\u65b9\u4fbf\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528AI Lab \u7684\u63a8\u7406\u670d\u52a1\uff0c\u65e0\u9700\u5173\u5fc3\u6a21\u578b\u7684\u90e8\u7f72\u548c\u7ef4\u62a4\u3002
          • AI Lab v0.6.0 \u652f\u6301\u4e86\u5b8c\u6574\u7248\u672c\u7684 vLLM \u63a8\u7406\u80fd\u529b\uff0c\u652f\u6301\u8bf8\u591a\u5927\u8bed\u8a00\u6a21\u578b\uff0c\u5982 LLama\u3001Qwen\u3001ChatGLM \u7b49\u3002

          \u60a8\u53ef\u4ee5\u5728 AI Lab \u4e2d\u4f7f\u7528\u7ecf\u8fc7\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9a8c\u8bc1\u8fc7\u7684 GPU \u7c7b\u578b\uff1b \u66f4\u591a\u7ec6\u8282\u53c2\u9605 GPU \u652f\u6301\u77e9\u9635\u3002

          "},{"location":"admin/baize/developer/inference/models.html#triton-inference-server","title":"Triton Inference Server","text":"

          \u901a\u8fc7 Triton Inference Server \u53ef\u4ee5\u5f88\u597d\u7684\u652f\u6301\u4f20\u7edf\u7684\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\uff0c\u6211\u4eec\u76ee\u524d\u652f\u6301\u4e3b\u6d41\u7684\u63a8\u7406\u540e\u7aef\u670d\u52a1\uff1a

          Backend \u652f\u6301\u6a21\u578b\u683c\u5f0f \u4ecb\u7ecd pytorch TorchScript\u3001PyTorch 2.0 \u683c\u5f0f\u7684\u6a21\u578b triton-inference-server/pytorch_backend tensorflow TensorFlow 2.x triton-inference-server/tensorflow_backend vLLM(Deprecated) \u4e0e vLLM \u4e00\u81f4 \u652f\u6301\u7684\u6a21\u578b\u548c vLLM support Model \u4e00\u81f4

          Danger

          \u4f7f\u7528 Triton \u7684 Backend vLLM \u7684\u65b9\u5f0f\u5df2\u88ab\u5f03\u7528\uff0c\u63a8\u8350\u4f7f\u7528\u6700\u65b0\u652f\u6301 vLLM \u6765\u90e8\u7f72\u60a8\u7684\u5927\u8bed\u8a00\u6a21\u578b\u3002

          "},{"location":"admin/baize/developer/inference/models.html#vllm","title":"vLLM","text":"

          \u901a\u8fc7 vLLM \u6211\u4eec\u53ef\u4ee5\u5f88\u5feb\u7684\u4f7f\u7528\u5927\u8bed\u8a00\u6a21\u578b\uff0c\u60a8\u53ef\u4ee5\u5728\u8fd9\u91cc\u770b\u5230\u6211\u4eec\u652f\u6301\u7684\u6a21\u578b\u5217\u8868\uff0c\u8fd9\u901a\u5e38\u548c vLLM Support Models \u4fdd\u6301\u4e00\u81f4\u3002

          • HuggingFace \u6a21\u578b\uff1a\u6211\u4eec\u652f\u6301\u4e86 HuggingFace \u7684\u5927\u90e8\u5206\u6a21\u578b\uff0c\u60a8\u53ef\u4ee5\u5728 HuggingFace Model Hub \u67e5\u770b\u66f4\u591a\u6a21\u578b\u3002
          • vLLM \u652f\u6301\u6a21\u578b\u5217\u51fa\u4e86\u652f\u6301\u7684\u5927\u8bed\u8a00\u6a21\u578b\u548c\u89c6\u89c9\u8bed\u8a00\u6a21\u578b\u3002
          • \u4f7f\u7528 vLLM \u652f\u6301\u6846\u67b6\u7684\u6a21\u578b\u8fdb\u884c\u5fae\u8c03\u540e\u7684\u6a21\u578b\u3002
          "},{"location":"admin/baize/developer/inference/models.html#vllm_1","title":"vLLM \u65b0\u7279\u6027","text":"

          \u76ee\u524d\uff0cAI Lab \u8fd8\u652f\u6301\u5728\u4f7f\u7528 vLLM \u4f5c\u4e3a\u63a8\u7406\u5de5\u5177\u65f6\u7684\u4e00\u4e9b\u65b0\u7279\u6027\uff1a

          • \u5728\u63a8\u7406\u6a21\u578b\u65f6\uff0c\u542f\u7528 Lora Adapter \u6765\u4f18\u5316\u6a21\u578b\u63a8\u7406\u670d\u52a1
          • \u63d0\u4f9b\u517c\u5bb9 OpenAI \u7684 OpenAPI \u63a5\u53e3\uff0c\u65b9\u4fbf\u7528\u6237\u5207\u6362\u5230\u672c\u5730\u63a8\u7406\u670d\u52a1\u65f6\uff0c\u53ef\u4ee5\u4f4e\u6210\u672c\u7684\u5feb\u901f\u5207\u6362
          "},{"location":"admin/baize/developer/inference/models.html#_2","title":"\u4e0b\u4e00\u6b65","text":"
          • \u521b\u5efa Triton \u63a8\u7406\u670d\u52a1
          • \u521b\u5efa vLLM \u63a8\u7406\u670d\u52a1
          "},{"location":"admin/baize/developer/inference/triton-inference.html","title":"\u521b\u5efa Triton \u63a8\u7406\u670d\u52a1","text":"

          AI Lab \u76ee\u524d\u63d0\u4f9b\u4ee5 Triton\u3001vLLM \u4f5c\u4e3a\u63a8\u7406\u6846\u67b6\uff0c\u7528\u6237\u53ea\u9700\u7b80\u5355\u914d\u7f6e\u5373\u53ef\u5feb\u901f\u542f\u52a8\u4e00\u4e2a\u9ad8\u6027\u80fd\u7684\u63a8\u7406\u670d\u52a1\u3002

          Danger

          \u4f7f\u7528 Triton \u7684 Backend vLLM \u7684\u65b9\u5f0f\u5df2\u88ab\u5f03\u7528\uff0c\u63a8\u8350\u4f7f\u7528\u6700\u65b0\u652f\u6301 vLLM \u6765\u90e8\u7f72\u60a8\u7684\u5927\u8bed\u8a00\u6a21\u578b\u3002

          "},{"location":"admin/baize/developer/inference/triton-inference.html#triton_1","title":"Triton\u4ecb\u7ecd","text":"

          Triton \u662f\u7531 NVIDIA \u5f00\u53d1\u7684\u4e00\u4e2a\u5f00\u6e90\u63a8\u7406\u670d\u52a1\u5668\uff0c\u65e8\u5728\u7b80\u5316\u673a\u5668\u5b66\u4e60\u6a21\u578b\u7684\u90e8\u7f72\u548c\u63a8\u7406\u670d\u52a1\u3002\u5b83\u652f\u6301\u591a\u79cd\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\uff0c\u5305\u62ec TensorFlow\u3001PyTorch \u7b49\uff0c\u4f7f\u5f97\u7528\u6237\u80fd\u591f\u8f7b\u677e\u7ba1\u7406\u548c\u90e8\u7f72\u4e0d\u540c\u7c7b\u578b\u7684\u6a21\u578b\u3002

          "},{"location":"admin/baize/developer/inference/triton-inference.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

          \u51c6\u5907\u6a21\u578b\u6570\u636e\uff1a\u5728\u6570\u636e\u96c6\u7ba1\u7406\u4e2d\u7eb3\u7ba1\u6a21\u578b\u4ee3\u7801\uff0c\u5e76\u4fdd\u8bc1\u6570\u636e\u6210\u529f\u9884\u52a0\u8f7d\uff0c\u4e0b\u9762\u4ee5 mnist \u624b\u5199\u6570\u5b57\u8bc6\u522b\u7684 PyTorch \u6a21\u578b\u4e3a\u4f8b\u3002

          Note

          \u5f85\u63a8\u7406\u7684\u6a21\u578b\u5728\u6570\u636e\u96c6\u4e2d\u9700\u8981\u9075\u4ee5\u4e0b\u76ee\u5f55\u683c\u5f0f\uff1a

            <model-repository-name>\n  \u2514\u2500\u2500 <model-name>\n     \u2514\u2500\u2500 <version>\n        \u2514\u2500\u2500 <model-definition-file>\n

          \u672c\u4f8b\u4e2d\u7684\u76ee\u5f55\u683c\u5f0f\u4e3a\uff1a

              model-repo\n    \u2514\u2500\u2500 mnist-cnn\n        \u2514\u2500\u2500 1\n            \u2514\u2500\u2500 model.pt\n
          "},{"location":"admin/baize/developer/inference/triton-inference.html#_2","title":"\u521b\u5efa\u63a8\u7406\u670d\u52a1","text":"

          \u76ee\u524d\u5df2\u7ecf\u652f\u6301\u8868\u5355\u521b\u5efa\uff0c\u53ef\u4ee5\u754c\u9762\u5b57\u6bb5\u63d0\u793a\uff0c\u8fdb\u884c\u670d\u52a1\u521b\u5efa\u3002

          "},{"location":"admin/baize/developer/inference/triton-inference.html#_3","title":"\u914d\u7f6e\u6a21\u578b\u8def\u5f84","text":"

          \u6a21\u578b\u8def\u5f84 model-repo/mnist-cnn/1/model.pt \u9700\u8981\u548c\u6570\u636e\u96c6\u4e2d\u7684\u6a21\u578b\u76ee\u5f55\u683c\u5f0f\u4e00\u81f4\u3002

          "},{"location":"admin/baize/developer/inference/triton-inference.html#_4","title":"\u6a21\u578b\u914d\u7f6e","text":""},{"location":"admin/baize/developer/inference/triton-inference.html#_5","title":"\u914d\u7f6e\u8f93\u5165\u548c\u8f93\u51fa\u53c2\u6570","text":"

          Note

          \u8f93\u5165\u548c\u8f93\u51fa\u53c2\u6570\u7684\u7b2c\u4e00\u4e2a\u7ef4\u5ea6\u9ed8\u8ba4\u4e3a batchsize \u7684\u5927\u5c0f\uff0c\u8bbe\u7f6e\u4e3a -1 \u53ef\u4ee5\u6839\u636e\u8f93\u5165\u7684\u63a8\u7406\u6570\u636e\u81ea\u52a8\u8ba1\u7b97 batchsize\u3002\u53c2\u6570\u5176\u4f59\u7ef4\u5ea6\u548c\u6570\u636e\u7c7b\u578b\u9700\u8981\u4e0e\u6a21\u578b\u8f93\u5165\u5339\u914d\u3002

          "},{"location":"admin/baize/developer/inference/triton-inference.html#_6","title":"\u914d\u7f6e\u73af\u5883","text":"

          \u53ef\u4ee5\u5bfc\u5165 \u73af\u5883\u7ba1\u7406 \u4e2d\u521b\u5efa\u7684\u73af\u5883\u4f5c\u4e3a\u63a8\u7406\u65f6\u7684\u8fd0\u884c\u73af\u5883\u3002

          "},{"location":"admin/baize/developer/inference/triton-inference.html#_7","title":"\u9ad8\u7ea7\u914d\u7f6e","text":""},{"location":"admin/baize/developer/inference/triton-inference.html#_8","title":"\u914d\u7f6e\u8ba4\u8bc1\u7b56\u7565","text":"

          \u652f\u6301 API key \u7684\u8bf7\u6c42\u65b9\u5f0f\u8ba4\u8bc1\uff0c\u7528\u6237\u53ef\u4ee5\u81ea\u5b9a\u4e49\u589e\u52a0\u8ba4\u8bc1\u53c2\u6570\u3002

          "},{"location":"admin/baize/developer/inference/triton-inference.html#_9","title":"\u4eb2\u548c\u6027\u8c03\u5ea6","text":"

          \u652f\u6301 \u6839\u636e GPU \u8d44\u6e90\u7b49\u8282\u70b9\u914d\u7f6e\u5b9e\u73b0\u81ea\u52a8\u5316\u7684\u4eb2\u548c\u6027\u8c03\u5ea6\uff0c\u540c\u65f6\u4e5f\u65b9\u4fbf\u7528\u6237\u81ea\u5b9a\u4e49\u8c03\u5ea6\u7b56\u7565\u3002

          "},{"location":"admin/baize/developer/inference/triton-inference.html#_10","title":"\u8bbf\u95ee","text":""},{"location":"admin/baize/developer/inference/triton-inference.html#api","title":"API \u8bbf\u95ee","text":"
          • Triton \u63d0\u4f9b\u4e86\u4e00\u4e2a\u57fa\u4e8e REST \u7684 API\uff0c\u5141\u8bb8\u5ba2\u6237\u7aef\u901a\u8fc7 HTTP POST \u8bf7\u6c42\u8fdb\u884c\u6a21\u578b\u63a8\u7406\u3002
          • \u5ba2\u6237\u7aef\u53ef\u4ee5\u53d1\u9001 JSON \u683c\u5f0f\u7684\u8bf7\u6c42\u4f53\uff0c\u5176\u4e2d\u5305\u542b\u8f93\u5165\u6570\u636e\u548c\u76f8\u5173\u7684\u5143\u6570\u636e\u3002
          "},{"location":"admin/baize/developer/inference/triton-inference.html#http","title":"HTTP \u8bbf\u95ee","text":"
          1. \u53d1\u9001 HTTP POST \u8bf7\u6c42\uff1a\u4f7f\u7528\u5de5\u5177\u5982 curl \u6216 HTTP \u5ba2\u6237\u7aef\u5e93\uff08\u5982 Python \u7684 requests \u5e93\uff09\u5411 Triton Server \u53d1\u9001 POST \u8bf7\u6c42\u3002

          2. \u8bbe\u7f6e HTTP \u5934\uff1a\u6839\u636e\u7528\u6237\u914d\u7f6e\u9879\u81ea\u52a8\u751f\u6210\u7684\u914d\u7f6e\uff0c\u5305\u542b\u6a21\u578b\u8f93\u5165\u548c\u8f93\u51fa\u7684\u5143\u6570\u636e\u3002

          3. \u6784\u5efa\u8bf7\u6c42\u4f53\uff1a\u8bf7\u6c42\u4f53\u901a\u5e38\u5305\u542b\u8981\u8fdb\u884c\u63a8\u7406\u7684\u8f93\u5165\u6570\u636e\uff0c\u4ee5\u53ca\u6a21\u578b\u7279\u5b9a\u7684\u5143\u6570\u636e\u3002

          "},{"location":"admin/baize/developer/inference/triton-inference.html#curl","title":"\u793a\u4f8b curl \u547d\u4ee4","text":"
            curl -X POST \"http://<ip>:<port>/v2/models/<inference-name>/infer\" \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\n    \"inputs\": [\n      {\n        \"name\": \"model_input\",            \n        \"shape\": [1, 1, 32, 32],          \n        \"datatype\": \"FP32\",               \n        \"data\": [\n          [0.1234, 0.5678, 0.9101, ... ]  \n        ]\n      }\n    ]\n  }'\n
          • <ip> \u662f Triton Inference Server \u8fd0\u884c\u7684\u4e3b\u673a\u5730\u5740\u3002
          • <port> \u662f Triton Inference Server \u8fd0\u884c\u7684\u4e3b\u673a\u7aef\u53e3\u53f7\u3002
          • <inference-name> \u662f\u6240\u521b\u5efa\u7684\u63a8\u7406\u670d\u52a1\u7684\u540d\u79f0\u3002
          • \"name\" \u8981\u4e0e\u6a21\u578b\u914d\u7f6e\u4e2d\u7684\u8f93\u5165\u53c2\u6570\u7684 name \u4e00\u81f4\u3002
          • \"shape\" \u8981\u4e0e\u6a21\u578b\u914d\u7f6e\u4e2d\u7684\u8f93\u5165\u53c2\u6570\u7684 dims \u4e00\u81f4\u3002
          • \"datatype\" \u8981\u4e0e\u6a21\u578b\u914d\u7f6e\u4e2d\u7684\u8f93\u5165\u53c2\u6570\u7684 Data Type \u4e00\u81f4\u3002
          • \"data\" \u66ff\u6362\u4e3a\u5b9e\u9645\u7684\u63a8\u7406\u6570\u636e\u3002

          \u8bf7\u6ce8\u610f\uff0c\u4e0a\u8ff0\u793a\u4f8b\u4ee3\u7801\u9700\u8981\u6839\u636e\u4f60\u7684\u5177\u4f53\u6a21\u578b\u548c\u73af\u5883\u8fdb\u884c\u8c03\u6574\uff0c\u8f93\u5165\u6570\u636e\u7684\u683c\u5f0f\u548c\u5185\u5bb9\u4e5f\u9700\u8981\u7b26\u5408\u6a21\u578b\u7684\u8981\u6c42\u3002

          "},{"location":"admin/baize/developer/inference/vllm-inference.html","title":"\u521b\u5efa vLLM \u63a8\u7406\u670d\u52a1","text":"

          AI Lab \u652f\u6301\u4ee5 vLLM \u4f5c\u4e3a\u63a8\u7406\u670d\u52a1\uff0c\u63d0\u4f9b\u5168\u90e8 vLLM \u7684\u80fd\u529b\uff0c\u540c\u65f6\u63d0\u4f9b\u4e86\u5b8c\u5168\u9002\u914d OpenAI \u63a5\u53e3\u5b9a\u4e49\u3002

          "},{"location":"admin/baize/developer/inference/vllm-inference.html#vllm_1","title":"vLLM \u4ecb\u7ecd","text":"

          vLLM \u662f\u4e00\u4e2a\u5feb\u901f\u4e14\u6613\u4e8e\u4f7f\u7528\u7684\u7528\u4e8e\u63a8\u7406\u548c\u670d\u52a1\u7684\u5e93\uff0cvLLM \u65e8\u5728\u6781\u5927\u5730\u63d0\u5347\u5b9e\u65f6\u573a\u666f\u4e0b\u7684\u8bed\u8a00\u6a21\u578b\u670d\u52a1\u7684\u541e\u5410\u4e0e\u5185\u5b58\u4f7f\u7528\u6548\u7387\u3002vLLM \u5728\u901f\u5ea6\u3001\u7075\u6d3b\u6027\u65b9\u9762\u5177\u6709\u4ee5\u4e0b\u90e8\u5206\u7279\u70b9\uff1a

          • \u8fde\u7eed\u6279\u5904\u7406\u4f20\u5165\u8bf7\u6c42\uff1b
          • \u4f7f\u7528 PagedAttention \u9ad8\u6548\u7ba1\u7406\u6ce8\u610f\u529b\u952e\u548c\u503c\u5185\u5b58\uff1b
          • \u4e0e\u6d41\u884c\u7684 HuggingFace \u578b\u53f7\u65e0\u7f1d\u96c6\u6210\uff1b
          • \u517c\u5bb9 OpenAI \u7684 API \u670d\u52a1\u5668\u3002
          "},{"location":"admin/baize/developer/inference/vllm-inference.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

          \u51c6\u5907\u6a21\u578b\u6570\u636e\uff1a\u5728\u6570\u636e\u96c6\u7ba1\u7406\u4e2d\u7eb3\u7ba1\u6a21\u578b\u4ee3\u7801\uff0c\u5e76\u4fdd\u8bc1\u6570\u636e\u6210\u529f\u9884\u52a0\u8f7d\u3002

          "},{"location":"admin/baize/developer/inference/vllm-inference.html#_2","title":"\u521b\u5efa\u63a8\u7406\u670d\u52a1","text":"
          1. \u9009\u62e9 vLLM \u63a8\u7406\u6846\u67b6\uff0c\u5e76\u5728\u9009\u62e9\u6a21\u578b\u6a21\u5757\u9009\u62e9\u63d0\u524d\u521b\u5efa\u597d\u7684\u6a21\u578b\u6570\u636e\u96c6 hdd-models \u5e76\u586b\u5199\u6570\u636e\u96c6\u4e2d\u6a21\u578b\u6240\u5728\u7684\u8def\u5f84\u4fe1\u606f\u3002

            \u672c\u6587\u63a8\u7406\u670d\u52a1\u7684\u521b\u5efa\u4f7f\u7528 ChatGLM3 \u6a21\u578b\u3002

          2. \u914d\u7f6e\u63a8\u7406\u670d\u52a1\u7684\u8d44\u6e90\uff0c\u5e76\u8c03\u6574\u63a8\u7406\u670d\u52a1\u8fd0\u884c\u7684\u53c2\u6570\u3002

            \u53c2\u6570\u540d \u63cf\u8ff0 GPU \u8d44\u6e90 \u6839\u636e\u6a21\u578b\u89c4\u6a21\u4ee5\u53ca\u96c6\u7fa4\u8d44\u6e90\u53ef\u4ee5\u4e3a\u63a8\u7406\u914d\u7f6e GPU \u8d44\u6e90\u3002 \u5141\u8bb8\u8fdc\u7a0b\u4ee3\u7801 \u63a7\u5236 vLLM \u662f\u5426\u4fe1\u4efb\u5e76\u6267\u884c\u6765\u81ea\u8fdc\u7a0b\u6e90\u7684\u4ee3\u7801 LoRA LoRA \u662f\u4e00\u79cd\u9488\u5bf9\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u7684\u53c2\u6570\u9ad8\u6548\u8c03\u6574\u6280\u672f\u3002\u5b83\u901a\u8fc7\u5c06\u539f\u59cb\u6a21\u578b\u53c2\u6570\u77e9\u9635\u5206\u89e3\u4e3a\u4f4e\u79e9\u77e9\u9635\uff0c\u4ece\u800c\u51cf\u5c11\u53c2\u6570\u6570\u91cf\u548c\u8ba1\u7b97\u590d\u6742\u5ea6\u3002 1. --lora-modules\uff1a\u7528\u6765\u6307\u5b9a\u7279\u5b9a\u6a21\u5757\u6216\u5c42\u8fdb\u884c\u4f4e\u79e9\u8fd1\u4f3c 2. max_loras_rank\uff1a\u7528\u6765\u6307\u5b9a LoRA \u6a21\u578b\u4e2d\u6bcf\u4e2a\u9002\u914d\u5c42\u7684\u6700\u5927\u79e9\uff0c\u5bf9\u4e8e\u7b80\u5355\u7684\u4efb\u52a1\uff0c\u53ef\u4ee5\u9009\u62e9\u8f83\u5c0f\u7684\u79e9\u503c\uff0c\u800c\u5bf9\u4e8e\u590d\u6742\u4efb\u52a1\uff0c\u53ef\u80fd\u9700\u8981\u8f83\u5927\u7684\u79e9\u503c\u6765\u4fdd\u8bc1\u6a21\u578b\u6027\u80fd\u3002 3. max_loras\uff1a\u8868\u793a\u6a21\u578b\u4e2d\u53ef\u4ee5\u5305\u542b\u7684 LoRA \u5c42\u7684\u6700\u5927\u6570\u91cf\uff0c\u6839\u636e\u6a21\u578b\u5927\u5c0f\u3001\u63a8\u7406\u590d\u6742\u5ea6\u7b49\u56e0\u7d20\u81ea\u5b9a 4. max_cpu_loras\uff1a\u7528\u4e8e\u6307\u5b9a\u5728 CPU \u73af\u5883\u4e2d\u53ef\u4ee5\u5904\u7406\u7684 LoRA \u5c42\u7684\u6700\u5927\u6570\u3002 \u5173\u8054\u73af\u5883 \u901a\u8fc7\u9009\u62e9\u73af\u5883\u9884\u5b9a\u4e49\u63a8\u7406\u65f6\u6240\u9700\u7684\u73af\u5883\u4f9d\u8d56\u3002

            Info

            \u652f\u6301\u914d\u7f6e LoRA \u53c2\u6570\u7684\u6a21\u578b\u53ef\u53c2\u8003 vLLM \u652f\u6301\u7684\u6a21\u578b\u3002

          3. \u5728 \u9ad8\u7ea7\u914d\u7f6e \u4e2d\uff0c\u652f\u6301\u6839\u636e GPU \u8d44\u6e90\u7b49\u8282\u70b9\u914d\u7f6e\u5b9e\u73b0\u81ea\u52a8\u5316\u7684\u4eb2\u548c\u6027\u8c03\u5ea6\uff0c\u540c\u65f6\u4e5f\u65b9\u4fbf\u7528\u6237\u81ea\u5b9a\u4e49\u8c03\u5ea6\u7b56\u7565\u3002

          "},{"location":"admin/baize/developer/inference/vllm-inference.html#_3","title":"\u9a8c\u8bc1\u63a8\u7406\u670d\u52a1","text":"

          \u63a8\u7406\u670d\u52a1\u521b\u5efa\u5b8c\u6210\u4e4b\u540e\uff0c\u70b9\u51fb\u63a8\u7406\u670d\u52a1\u540d\u79f0\u8fdb\u5165\u8be6\u60c5\uff0c\u67e5\u770b API \u8c03\u7528\u65b9\u6cd5\u3002\u901a\u8fc7\u4f7f\u7528 Curl\u3001Python\u3001Nodejs \u7b49\u65b9\u5f0f\u9a8c\u8bc1\u6267\u884c\u7ed3\u679c\u3002

          \u62f7\u8d1d\u8be6\u60c5\u4e2d\u7684 curl \u547d\u4ee4\uff0c\u5e76\u5728\u7ec8\u7aef\u4e2d\u6267\u884c\u547d\u4ee4\u53d1\u9001\u4e00\u6761\u6a21\u578b\u63a8\u7406\u8bf7\u6c42\uff0c\u9884\u671f\u8f93\u51fa\uff1a

          "},{"location":"admin/baize/developer/jobs/create.html","title":"\u521b\u5efa\u4efb\u52a1\uff08Job\uff09","text":"

          \u4efb\u52a1\u7ba1\u7406\u662f\u6307\u901a\u8fc7\u4f5c\u4e1a\u8c03\u5ea6\u548c\u7ba1\u63a7\u7ec4\u4ef6\u6765\u521b\u5efa\u548c\u7ba1\u7406\u4efb\u52a1\u751f\u547d\u5468\u671f\u7684\u529f\u80fd\u3002

          AI Lab \u91c7\u7528 Kubernetes \u7684 Job \u673a\u5236\u6765\u8c03\u5ea6\u5404\u9879 AI \u63a8\u7406\u3001\u8bad\u7ec3\u4efb\u52a1\u3002

          "},{"location":"admin/baize/developer/jobs/create.html#_1","title":"\u901a\u7528\u6b65\u9aa4","text":"
          1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u4efb\u52a1\u4e2d\u5fc3 -> \u8bad\u7ec3\u4efb\u52a1 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae\u3002

          2. \u7cfb\u7edf\u4f1a\u9884\u5148\u586b\u5145\u57fa\u7840\u914d\u7f6e\u6570\u636e\uff0c\u5305\u62ec\u8981\u90e8\u7f72\u7684\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u4efb\u52a1\u7c7b\u578b\u3001\u961f\u5217\u3001\u4f18\u5148\u7ea7\u7b49\u3002 \u8c03\u6574\u8fd9\u4e9b\u53c2\u6570\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

          3. \u914d\u7f6e\u955c\u50cf\u5730\u5740\u3001\u8fd0\u884c\u53c2\u6570\u4ee5\u53ca\u5173\u8054\u7684\u6570\u636e\u96c6\u3001\u73af\u5883\u548c\u8d44\u6e90\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

          4. \u6309\u9700\u6dfb\u52a0\u6807\u7b7e\u3001\u6ce8\u89e3\u3001\u73af\u5883\u53d8\u91cf\u7b49\u4efb\u52a1\u53c2\u6570\uff0c\u9009\u62e9\u8c03\u5ea6\u7b56\u7565\u540e\u70b9\u51fb \u786e\u5b9a \u3002

          5. \u4efb\u52a1\u521b\u5efa\u6210\u529f\u540e\uff0c\u4f1a\u6709\u51e0\u79cd\u8fd0\u884c\u72b6\u6001\uff1a

            • \u8fd0\u884c\u4e2d
            • \u6392\u961f\u4e2d
            • \u63d0\u4ea4\u6210\u529f\u3001\u63d0\u4ea4\u5931\u8d25
            • \u4efb\u52a1\u6210\u529f\u3001\u4efb\u52a1\u5931\u8d25
          "},{"location":"admin/baize/developer/jobs/create.html#_2","title":"\u521b\u5efa\u7279\u5b9a\u4efb\u52a1","text":"
          • \u521b\u5efa Pytorch \u4efb\u52a1
          • \u521b\u5efa TensorFlow \u4efb\u52a1
          • \u521b\u5efa MPI \u4efb\u52a1
          • \u521b\u5efa MXNet \u4efb\u52a1
          • \u521b\u5efa PaddlePaddle \u4efb\u52a1
          "},{"location":"admin/baize/developer/jobs/delete.html","title":"\u5220\u9664\u4efb\u52a1\uff08Job\uff09","text":"

          \u5982\u679c\u53d1\u73b0\u4efb\u52a1\u5197\u4f59\u3001\u8fc7\u671f\u6216\u56e0\u5176\u4ed6\u7f18\u6545\u4e0d\u518d\u9700\u8981\uff0c\u53ef\u4ee5\u4ece\u8bad\u7ec3\u4efb\u52a1\u5217\u8868\u4e2d\u5220\u9664\u3002

          1. \u5728\u8bad\u7ec3\u4efb\u52a1\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u5220\u9664 \u3002

          2. \u5728\u5f39\u7a97\u4e2d\u786e\u8ba4\u8981\u5220\u9664\u7684\u4efb\u52a1\uff0c\u8f93\u5165\u4efb\u52a1\u540d\u79f0\u540e\u70b9\u51fb \u5220\u9664 \u3002

          3. \u5c4f\u5e55\u63d0\u793a\u5220\u9664\u6210\u529f\uff0c\u8be5\u4efb\u52a1\u4ece\u5217\u8868\u4e2d\u6d88\u5931\u3002

          Caution

          \u4efb\u52a1\u4e00\u65e6\u5220\u9664\u5c06\u4e0d\u53ef\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

          "},{"location":"admin/baize/developer/jobs/mpi.html","title":"MPI \u4efb\u52a1","text":"

          MPI\uff08Message Passing Interface\uff09\u662f\u4e00\u79cd\u7528\u4e8e\u5e76\u884c\u8ba1\u7b97\u7684\u901a\u4fe1\u534f\u8bae\uff0c\u5b83\u5141\u8bb8\u591a\u4e2a\u8ba1\u7b97\u8282\u70b9\u4e4b\u95f4\u8fdb\u884c\u6d88\u606f\u4f20\u9012\u548c\u534f\u4f5c\u3002 MPI \u4efb\u52a1\u662f\u4f7f\u7528 MPI \u534f\u8bae\u8fdb\u884c\u5e76\u884c\u8ba1\u7b97\u7684\u4efb\u52a1\uff0c\u9002\u7528\u4e8e\u9700\u8981\u5927\u89c4\u6a21\u5e76\u884c\u5904\u7406\u7684\u5e94\u7528\u573a\u666f\uff0c\u4f8b\u5982\u5206\u5e03\u5f0f\u8bad\u7ec3\u3001\u79d1\u5b66\u8ba1\u7b97\u7b49\u3002

          \u5728 AI Lab \u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86 MPI \u4efb\u52a1\u7684\u652f\u6301\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c\u5feb\u901f\u521b\u5efa MPI \u4efb\u52a1\uff0c\u8fdb\u884c\u9ad8\u6027\u80fd\u7684\u5e76\u884c\u8ba1\u7b97\u3002 \u672c\u6559\u7a0b\u5c06\u6307\u5bfc\u60a8\u5982\u4f55\u5728 AI Lab \u4e2d\u521b\u5efa\u548c\u8fd0\u884c\u4e00\u4e2a MPI \u4efb\u52a1\u3002

          "},{"location":"admin/baize/developer/jobs/mpi.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
          • \u4efb\u52a1\u7c7b\u578b \uff1aMPI\uff0c\u7528\u4e8e\u8fd0\u884c\u5e76\u884c\u8ba1\u7b97\u4efb\u52a1\u3002
          • \u8fd0\u884c\u73af\u5883 \uff1a\u9009\u7528\u9884\u88c5\u4e86 MPI \u73af\u5883\u7684\u955c\u50cf\uff0c\u6216\u8005\u5728\u4efb\u52a1\u4e2d\u6307\u5b9a\u5b89\u88c5\u5fc5\u8981\u7684\u4f9d\u8d56\u3002
          • MPIJob \u914d\u7f6e \uff1a\u7406\u89e3\u5e76\u914d\u7f6e MPIJob \u7684\u5404\u9879\u53c2\u6570\uff0c\u5982\u526f\u672c\u6570\u3001\u8d44\u6e90\u8bf7\u6c42\u7b49\u3002
          "},{"location":"admin/baize/developer/jobs/mpi.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

          \u5728\u8fd9\u91cc\u6211\u4eec\u4f7f\u7528 baize-notebook \u57fa\u7840\u955c\u50cf\u548c \u5173\u8054\u73af\u5883 \u7684\u65b9\u5f0f\u6765\u4f5c\u4e3a\u4efb\u52a1\u7684\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002 \u786e\u4fdd\u8fd0\u884c\u73af\u5883\u4e2d\u5305\u542b MPI \u53ca\u76f8\u5173\u5e93\uff0c\u5982 OpenMPI\u3001mpi4py \u7b49\u3002

          \u6ce8\u610f \uff1a\u4e86\u89e3\u5982\u4f55\u521b\u5efa\u73af\u5883\uff0c\u8bf7\u53c2\u8003\u73af\u5883\u5217\u8868\u3002

          "},{"location":"admin/baize/developer/jobs/mpi.html#mpi_1","title":"\u521b\u5efa MPI \u4efb\u52a1","text":""},{"location":"admin/baize/developer/jobs/mpi.html#mpi_2","title":"MPI \u4efb\u52a1\u521b\u5efa\u6b65\u9aa4","text":"
          1. \u767b\u5f55\u5e73\u53f0 \uff1a\u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3\uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
          2. \u521b\u5efa\u4efb\u52a1 \uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
          3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b \uff1a\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\uff0c\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a MPI\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
          4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f \uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cbenchmarks-mpi\u201d\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
          5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570 \uff1a\u6839\u636e\u60a8\u7684\u9700\u6c42\uff0c\u914d\u7f6e\u4efb\u52a1\u7684\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u4fe1\u606f\u3002
          "},{"location":"admin/baize/developer/jobs/mpi.html#_3","title":"\u8fd0\u884c\u53c2\u6570","text":"
          • \u542f\u52a8\u547d\u4ee4 \uff1a\u4f7f\u7528 mpirun\uff0c\u8fd9\u662f\u8fd0\u884c MPI \u7a0b\u5e8f\u7684\u547d\u4ee4\u3002
          • \u547d\u4ee4\u53c2\u6570 \uff1a\u8f93\u5165\u60a8\u8981\u8fd0\u884c\u7684 MPI \u7a0b\u5e8f\u7684\u53c2\u6570\u3002

          \u793a\u4f8b\uff1a\u8fd0\u884c TensorFlow Benchmarks

          \u5728\u672c\u793a\u4f8b\u4e2d\uff0c\u6211\u4eec\u5c06\u8fd0\u884c\u4e00\u4e2a TensorFlow \u7684\u57fa\u51c6\u6d4b\u8bd5\u7a0b\u5e8f\uff0c\u4f7f\u7528 Horovod \u8fdb\u884c\u5206\u5e03\u5f0f\u8bad\u7ec3\u3002 \u9996\u5148\uff0c\u786e\u4fdd\u60a8\u4f7f\u7528\u7684\u955c\u50cf\u4e2d\u5305\u542b\u6240\u9700\u7684\u4f9d\u8d56\u9879\uff0c\u4f8b\u5982 TensorFlow\u3001Horovod\u3001Open MPI \u7b49\u3002

          \u955c\u50cf\u9009\u62e9 \uff1a\u4f7f\u7528\u5305\u542b TensorFlow \u548c MPI \u7684\u955c\u50cf\uff0c\u4f8b\u5982 mai.daocloud.io/docker.io/mpioperator/tensorflow-benchmarks:latest\u3002

          \u547d\u4ee4\u53c2\u6570 \uff1a

          mpirun --allow-run-as-root -np 2 -bind-to none -map-by slot \\\n  -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH -x PATH \\\n  -mca pml ob1 -mca btl ^openib \\\n  python scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py \\\n  --model=resnet101 --batch_size=64 --variable_update=horovod\n

          \u8bf4\u660e \uff1a

          • mpirun\uff1aMPI \u7684\u542f\u52a8\u547d\u4ee4\u3002
          • --allow-run-as-root\uff1a\u5141\u8bb8\u4ee5 root \u7528\u6237\u8fd0\u884c\uff08\u5728\u5bb9\u5668\u4e2d\u901a\u5e38\u662f root \u7528\u6237\uff09\u3002
          • -np 2\uff1a\u6307\u5b9a\u8fd0\u884c\u7684\u8fdb\u7a0b\u6570\u4e3a 2\u3002
          • -bind-to none\uff0c-map-by slot\uff1aMPI \u8fdb\u7a0b\u7ed1\u5b9a\u548c\u6620\u5c04\u7684\u914d\u7f6e\u3002
          • -x NCCL_DEBUG=INFO\uff1a\u8bbe\u7f6e NCCL\uff08NVIDIA Collective Communication Library\uff09\u7684\u8c03\u8bd5\u4fe1\u606f\u7ea7\u522b\u3002
          • -x LD_LIBRARY_PATH\uff0c-x PATH\uff1a\u5728 MPI \u73af\u5883\u4e2d\u4f20\u9012\u5fc5\u8981\u7684\u73af\u5883\u53d8\u91cf\u3002
          • -mca pml ob1 -mca btl ^openib\uff1aMPI \u7684\u914d\u7f6e\u53c2\u6570\uff0c\u6307\u5b9a\u4f20\u8f93\u5c42\u548c\u6d88\u606f\u5c42\u534f\u8bae\u3002
          • python scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py\uff1a\u8fd0\u884c TensorFlow \u57fa\u51c6\u6d4b\u8bd5\u811a\u672c\u3002
          • --model=resnet101\uff0c--batch_size=64\uff0c--variable_update=horovod\uff1aTensorFlow \u811a\u672c\u7684\u53c2\u6570\uff0c\u6307\u5b9a\u6a21\u578b\u3001\u6279\u91cf\u5927\u5c0f\u548c\u4f7f\u7528 Horovod \u8fdb\u884c\u53c2\u6570\u66f4\u65b0\u3002
          "},{"location":"admin/baize/developer/jobs/mpi.html#_4","title":"\u8d44\u6e90\u914d\u7f6e","text":"

          \u5728\u4efb\u52a1\u914d\u7f6e\u4e2d\uff0c\u9700\u8981\u4e3a\u6bcf\u4e2a\u8282\u70b9\uff08Launcher \u548c Worker\uff09\u5206\u914d\u9002\u5f53\u7684\u8d44\u6e90\uff0c\u4f8b\u5982 CPU\u3001\u5185\u5b58\u548c GPU\u3002

          \u8d44\u6e90\u793a\u4f8b \uff1a

          • Launcher\uff08\u542f\u52a8\u5668\uff09 \uff1a

            • \u526f\u672c\u6570 \uff1a1
            • \u8d44\u6e90\u8bf7\u6c42 \uff1a
              • CPU\uff1a2 \u6838
              • \u5185\u5b58\uff1a4 GiB
          • Worker\uff08\u5de5\u4f5c\u8282\u70b9\uff09 \uff1a

            • \u526f\u672c\u6570 \uff1a2
            • \u8d44\u6e90\u8bf7\u6c42 \uff1a
              • CPU\uff1a2 \u6838
              • \u5185\u5b58\uff1a4 GiB
              • GPU\uff1a\u6839\u636e\u9700\u6c42\u5206\u914d
          "},{"location":"admin/baize/developer/jobs/mpi.html#mpijob","title":"\u5b8c\u6574\u7684 MPIJob \u914d\u7f6e\u793a\u4f8b","text":"

          \u4ee5\u4e0b\u662f\u5b8c\u6574\u7684 MPIJob \u914d\u7f6e\u793a\u4f8b\uff0c\u4f9b\u60a8\u53c2\u8003\u3002

          apiVersion: kubeflow.org/v1\nkind: MPIJob\nmetadata:\n  name: tensorflow-benchmarks\nspec:\n  slotsPerWorker: 1\n  runPolicy:\n    cleanPodPolicy: Running\n  mpiReplicaSpecs:\n    Launcher:\n      replicas: 1\n      template:\n        spec:\n          containers:\n            - name: tensorflow-benchmarks\n              image: mai.daocloud.io/docker.io/mpioperator/tensorflow-benchmarks:latest\n              command:\n                - mpirun\n                - --allow-run-as-root\n                - -np\n                - \"2\"\n                - -bind-to\n                - none\n                - -map-by\n                - slot\n                - -x\n                - NCCL_DEBUG=INFO\n                - -x\n                - LD_LIBRARY_PATH\n                - -x\n                - PATH\n                - -mca\n                - pml\n                - ob1\n                - -mca\n                - btl\n                - ^openib\n                - python\n                - scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py\n                - --model=resnet101\n                - --batch_size=64\n                - --variable_update=horovod\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n    Worker:\n      replicas: 2\n      template:\n        spec:\n          containers:\n            - name: tensorflow-benchmarks\n              image: mai.daocloud.io/docker.io/mpioperator/tensorflow-benchmarks:latest\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpumem: 1k\n                  nvidia.com/vgpu: \"1\"\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n

          \u914d\u7f6e\u89e3\u6790 \uff1a

          • apiVersion \u548c kind\uff1a\u8868\u793a\u8d44\u6e90\u7684 API \u7248\u672c\u548c\u7c7b\u578b\uff0cMPIJob \u662f Kubeflow \u5b9a\u4e49\u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\uff0c\u7528\u4e8e\u521b\u5efa MPI \u7c7b\u578b\u7684\u4efb\u52a1\u3002
          • metadata\uff1a\u5143\u6570\u636e\uff0c\u5305\u542b\u4efb\u52a1\u7684\u540d\u79f0\u7b49\u4fe1\u606f\u3002
          • spec\uff1a\u4efb\u52a1\u7684\u8be6\u7ec6\u914d\u7f6e\u3002
            • slotsPerWorker\uff1a\u6bcf\u4e2a Worker \u8282\u70b9\u7684\u69fd\u4f4d\u6570\u91cf\uff0c\u901a\u5e38\u8bbe\u7f6e\u4e3a 1\u3002
            • runPolicy\uff1a\u8fd0\u884c\u7b56\u7565\uff0c\u4f8b\u5982\u4efb\u52a1\u5b8c\u6210\u540e\u662f\u5426\u6e05\u7406 Pod\u3002
            • mpiReplicaSpecs\uff1aMPI \u4efb\u52a1\u7684\u526f\u672c\u914d\u7f6e\u3002
              • Launcher\uff1a\u542f\u52a8\u5668\uff0c\u8d1f\u8d23\u542f\u52a8 MPI \u4efb\u52a1\u3002
                • replicas\uff1a\u526f\u672c\u6570\uff0c\u901a\u5e38\u4e3a 1\u3002
                • template\uff1aPod \u6a21\u677f\uff0c\u5b9a\u4e49\u5bb9\u5668\u8fd0\u884c\u7684\u955c\u50cf\u3001\u547d\u4ee4\u3001\u8d44\u6e90\u7b49\u3002
              • Worker\uff1a\u5de5\u4f5c\u8282\u70b9\uff0c\u5b9e\u9645\u6267\u884c\u4efb\u52a1\u7684\u8ba1\u7b97\u8282\u70b9\u3002
                • replicas\uff1a\u526f\u672c\u6570\uff0c\u6839\u636e\u5e76\u884c\u9700\u6c42\u8bbe\u7f6e\uff0c\u8fd9\u91cc\u8bbe\u7f6e\u4e3a 2\u3002
                • template\uff1aPod \u6a21\u677f\uff0c\u540c\u6837\u5b9a\u4e49\u5bb9\u5668\u7684\u8fd0\u884c\u73af\u5883\u548c\u8d44\u6e90\u3002
          "},{"location":"admin/baize/developer/jobs/mpi.html#_5","title":"\u8bbe\u7f6e\u4efb\u52a1\u526f\u672c\u6570","text":"

          \u5728\u521b\u5efa MPI \u4efb\u52a1\u65f6\uff0c\u9700\u8981\u6839\u636e mpiReplicaSpecs \u4e2d\u914d\u7f6e\u7684\u526f\u672c\u6570\uff0c\u6b63\u786e\u8bbe\u7f6e \u4efb\u52a1\u526f\u672c\u6570\u3002

          • \u603b\u526f\u672c\u6570 = Launcher \u526f\u672c\u6570 + Worker \u526f\u672c\u6570
          • \u672c\u793a\u4f8b\u4e2d\uff1a

            • Launcher \u526f\u672c\u6570\uff1a1
            • Worker \u526f\u672c\u6570\uff1a2
            • \u603b\u526f\u672c\u6570 \uff1a1 + 2 = 3

          \u56e0\u6b64\uff0c\u5728\u4efb\u52a1\u914d\u7f6e\u4e2d\uff0c\u60a8\u9700\u8981\u5c06 \u4efb\u52a1\u526f\u672c\u6570 \u8bbe\u7f6e\u4e3a 3\u3002

          "},{"location":"admin/baize/developer/jobs/mpi.html#_6","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

          \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c MPI \u4efb\u52a1\u3002

          "},{"location":"admin/baize/developer/jobs/mpi.html#_7","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

          \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\u540e\uff0c\u60a8\u53ef\u4ee5\u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\u548c\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u3002 \u4ece\u53f3\u4e0a\u89d2\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\uff0c\u53ef\u4ee5\u67e5\u770b\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u6bcf\u4e2a\u8282\u70b9\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

          \u793a\u4f8b\u8f93\u51fa\uff1a

          TensorFlow:  1.13\nModel:       resnet101\nMode:        training\nBatch size:  64\n...\n\nTotal images/sec: 125.67\n

          \u8fd9\u8868\u793a MPI \u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0cTensorFlow \u57fa\u51c6\u6d4b\u8bd5\u7a0b\u5e8f\u5b8c\u6210\u4e86\u5206\u5e03\u5f0f\u8bad\u7ec3\u3002

          "},{"location":"admin/baize/developer/jobs/mpi.html#_8","title":"\u5c0f\u7ed3","text":"

          \u901a\u8fc7\u672c\u6559\u7a0b\uff0c\u60a8\u5b66\u4e60\u4e86\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c\u4e00\u4e2a MPI \u4efb\u52a1\u3002\u6211\u4eec\u8be6\u7ec6\u4ecb\u7ecd\u4e86 MPIJob \u7684\u914d\u7f6e\u65b9\u5f0f\uff0c \u4ee5\u53ca\u5982\u4f55\u5728\u4efb\u52a1\u4e2d\u6307\u5b9a\u8fd0\u884c\u7684\u547d\u4ee4\u548c\u8d44\u6e90\u9700\u6c42\u3002\u5e0c\u671b\u672c\u6559\u7a0b\u5bf9\u60a8\u6709\u6240\u5e2e\u52a9\uff0c\u5982\u6709\u4efb\u4f55\u95ee\u9898\uff0c\u8bf7\u53c2\u8003\u5e73\u53f0\u63d0\u4f9b\u7684\u5176\u4ed6\u6587\u6863\u6216\u8054\u7cfb\u6280\u672f\u652f\u6301\u3002

          \u9644\u5f55 \uff1a

          • \u5982\u679c\u60a8\u7684\u8fd0\u884c\u73af\u5883\u672a\u9884\u88c5\u6240\u9700\u7684\u5e93\uff08\u5982 mpi4py\u3001Horovod \u7b49\uff09\uff0c\u8bf7\u5728\u4efb\u52a1\u4e2d\u6dfb\u52a0\u5b89\u88c5\u547d\u4ee4\uff0c\u6216\u8005\u4f7f\u7528\u9884\u88c5\u4e86\u76f8\u5173\u4f9d\u8d56\u7684\u955c\u50cf\u3002
          • \u5728\u5b9e\u9645\u5e94\u7528\u4e2d\uff0c\u60a8\u53ef\u4ee5\u6839\u636e\u9700\u6c42\u4fee\u6539 MPIJob \u7684\u914d\u7f6e\uff0c\u4f8b\u5982\u66f4\u6539\u955c\u50cf\u3001\u547d\u4ee4\u53c2\u6570\u3001\u8d44\u6e90\u8bf7\u6c42\u7b49\u3002
          "},{"location":"admin/baize/developer/jobs/mxnet.html","title":"MXNet \u4efb\u52a1","text":"

          Warning

          \u7531\u4e8e Apache MXNet \u9879\u76ee\u5df2\u5b58\u6863\uff0c\u56e0\u6b64 Kubeflow MXJob \u5c06\u5728\u672a\u6765\u7684 Training Operator 1.9 \u7248\u672c\u4e2d\u5f03\u7528\u548c\u5220\u9664\u3002

          Apache MXNet \u662f\u4e00\u4e2a\u9ad8\u6027\u80fd\u7684\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\uff0c\u652f\u6301\u591a\u79cd\u7f16\u7a0b\u8bed\u8a00\u3002MXNet \u4efb\u52a1\u53ef\u4ee5\u4f7f\u7528\u591a\u79cd\u65b9\u5f0f\u8fdb\u884c\u8bad\u7ec3\uff0c\u5305\u62ec\u5355\u673a\u6a21\u5f0f\u548c\u5206\u5e03\u5f0f\u6a21\u5f0f\u3002\u5728 AI Lab \u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86\u5bf9 MXNet \u4efb\u52a1\u7684\u652f\u6301\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c\u5feb\u901f\u521b\u5efa MXNet \u4efb\u52a1\uff0c\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\u3002

          \u672c\u6559\u7a0b\u5c06\u6307\u5bfc\u60a8\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c MXNet \u7684\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4efb\u52a1\u3002

          "},{"location":"admin/baize/developer/jobs/mxnet.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
          • \u4efb\u52a1\u7c7b\u578b\uff1aMXNet\uff0c\u652f\u6301\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4e24\u79cd\u6a21\u5f0f\u3002
          • \u8fd0\u884c\u73af\u5883\uff1a\u9009\u62e9\u5305\u542b MXNet \u6846\u67b6\u7684\u955c\u50cf\uff0c\u6216\u5728\u4efb\u52a1\u4e2d\u5b89\u88c5\u5fc5\u8981\u7684\u4f9d\u8d56\u3002
          "},{"location":"admin/baize/developer/jobs/mxnet.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

          \u6211\u4eec\u4f7f\u7528 release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest \u955c\u50cf\u4f5c\u4e3a\u4efb\u52a1\u7684\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002\u8be5\u955c\u50cf\u9884\u88c5\u4e86 MXNet \u53ca\u5176\u76f8\u5173\u4f9d\u8d56\uff0c\u652f\u6301 GPU \u52a0\u901f\u3002

          \u6ce8\u610f\uff1a\u4e86\u89e3\u5982\u4f55\u521b\u5efa\u548c\u7ba1\u7406\u73af\u5883\uff0c\u8bf7\u53c2\u8003 \u73af\u5883\u5217\u8868\u3002

          "},{"location":"admin/baize/developer/jobs/mxnet.html#mxnet_1","title":"\u521b\u5efa MXNet \u4efb\u52a1","text":""},{"location":"admin/baize/developer/jobs/mxnet.html#mxnet_2","title":"MXNet \u5355\u673a\u4efb\u52a1","text":""},{"location":"admin/baize/developer/jobs/mxnet.html#_3","title":"\u521b\u5efa\u6b65\u9aa4","text":"
          1. \u767b\u5f55\u5e73\u53f0\uff1a\u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3\uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
          2. \u521b\u5efa\u4efb\u52a1\uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
          3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\uff1a\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\uff0c\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a MXNet\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
          4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f\uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cMXNet \u5355\u673a\u8bad\u7ec3\u4efb\u52a1\u201d\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a\u3002
          5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570\uff1a\u6839\u636e\u60a8\u7684\u9700\u6c42\uff0c\u914d\u7f6e\u4efb\u52a1\u7684\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u4fe1\u606f\u3002
          "},{"location":"admin/baize/developer/jobs/mxnet.html#_4","title":"\u8fd0\u884c\u53c2\u6570","text":"
          • \u542f\u52a8\u547d\u4ee4\uff1apython3
          • \u547d\u4ee4\u53c2\u6570\uff1a

            /mxnet/mxnet/example/gluon/mnist/mnist.py --epochs 10 --cuda\n

            \u8bf4\u660e\uff1a

            • /mxnet/mxnet/example/gluon/mnist/mnist.py\uff1aMXNet \u63d0\u4f9b\u7684 MNIST \u624b\u5199\u6570\u5b57\u8bc6\u522b\u793a\u4f8b\u811a\u672c\u3002
            • --epochs 10\uff1a\u8bbe\u7f6e\u8bad\u7ec3\u8f6e\u6570\u4e3a 10\u3002
            • --cuda\uff1a\u4f7f\u7528 CUDA \u8fdb\u884c GPU \u52a0\u901f\u3002
          "},{"location":"admin/baize/developer/jobs/mxnet.html#_5","title":"\u8d44\u6e90\u914d\u7f6e","text":"
          • \u526f\u672c\u6570\uff1a1\uff08\u5355\u673a\u4efb\u52a1\uff09
          • \u8d44\u6e90\u8bf7\u6c42\uff1a
            • CPU\uff1a2 \u6838
            • \u5185\u5b58\uff1a4 GiB
            • GPU\uff1a1 \u5757
          "},{"location":"admin/baize/developer/jobs/mxnet.html#mxjob","title":"\u5b8c\u6574\u7684 MXJob \u914d\u7f6e\u793a\u4f8b","text":"

          \u4ee5\u4e0b\u662f\u5355\u673a MXJob \u7684 YAML \u914d\u7f6e\uff1a

          apiVersion: \"kubeflow.org/v1\"\nkind: \"MXJob\"\nmetadata:\n  name: \"mxnet-single-job\"\nspec:\n  jobMode: MXTrain\n  mxReplicaSpecs:\n    Worker:\n      replicas: 1\n      restartPolicy: Never\n      template:\n        spec:\n          containers:\n            - name: mxnet\n              image: release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest\n              command: [\"python3\"]\n              args:\n                [\n                  \"/mxnet/mxnet/example/gluon/mnist/mnist.py\",\n                  \"--epochs\",\n                  \"10\",\n                  \"--cuda\",\n                ]\n              ports:\n                - containerPort: 9991\n                  name: mxjob-port\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n

          \u914d\u7f6e\u89e3\u6790\uff1a

          • apiVersion \u548c kind\uff1a\u6307\u5b9a\u8d44\u6e90\u7684 API \u7248\u672c\u548c\u7c7b\u578b\uff0c\u8fd9\u91cc\u662f MXJob\u3002
          • metadata\uff1a\u5143\u6570\u636e\uff0c\u5305\u62ec\u4efb\u52a1\u540d\u79f0\u7b49\u4fe1\u606f\u3002
          • spec\uff1a\u4efb\u52a1\u7684\u8be6\u7ec6\u914d\u7f6e\u3002
            • jobMode\uff1a\u8bbe\u7f6e\u4e3a MXTrain\uff0c\u8868\u793a\u8bad\u7ec3\u4efb\u52a1\u3002
            • mxReplicaSpecs\uff1aMXNet \u4efb\u52a1\u7684\u526f\u672c\u914d\u7f6e\u3002
              • Worker\uff1a\u6307\u5b9a\u5de5\u4f5c\u8282\u70b9\u7684\u914d\u7f6e\u3002
                • replicas\uff1a\u526f\u672c\u6570\uff0c\u8fd9\u91cc\u4e3a 1\u3002
                • restartPolicy\uff1a\u91cd\u542f\u7b56\u7565\uff0c\u8bbe\u4e3a Never\uff0c\u8868\u793a\u4efb\u52a1\u5931\u8d25\u65f6\u4e0d\u91cd\u542f\u3002
                • template\uff1aPod \u6a21\u677f\uff0c\u5b9a\u4e49\u5bb9\u5668\u7684\u8fd0\u884c\u73af\u5883\u548c\u8d44\u6e90\u3002
                  • containers\uff1a\u5bb9\u5668\u5217\u8868\u3002
                    • name\uff1a\u5bb9\u5668\u540d\u79f0\u3002
                    • image\uff1a\u4f7f\u7528\u7684\u955c\u50cf\u3002
                    • command \u548c args\uff1a\u542f\u52a8\u547d\u4ee4\u548c\u53c2\u6570\u3002
                    • ports\uff1a\u5bb9\u5668\u7aef\u53e3\u914d\u7f6e\u3002
                    • resources\uff1a\u8d44\u6e90\u8bf7\u6c42\u548c\u9650\u5236\u3002
          "},{"location":"admin/baize/developer/jobs/mxnet.html#_6","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

          \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c MXNet \u5355\u673a\u4efb\u52a1\u3002

          "},{"location":"admin/baize/developer/jobs/mxnet.html#_7","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

          \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\u540e\uff0c\u60a8\u53ef\u4ee5\u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\u548c\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u3002\u4ece\u53f3\u4e0a\u89d2\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\uff0c\u53ef\u4ee5\u67e5\u770b\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

          \u793a\u4f8b\u8f93\u51fa\uff1a

          Epoch 1: accuracy=0.95\nEpoch 2: accuracy=0.97\n...\nEpoch 10: accuracy=0.98\nTraining completed.\n

          \u8fd9\u8868\u793a MXNet \u5355\u673a\u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0c\u6a21\u578b\u8bad\u7ec3\u5b8c\u6210\u3002

          "},{"location":"admin/baize/developer/jobs/mxnet.html#mxnet_3","title":"MXNet \u5206\u5e03\u5f0f\u4efb\u52a1","text":"

          \u5728\u5206\u5e03\u5f0f\u6a21\u5f0f\u4e0b\uff0cMXNet \u4efb\u52a1\u53ef\u4ee5\u4f7f\u7528\u591a\u53f0\u8ba1\u7b97\u8282\u70b9\u5171\u540c\u5b8c\u6210\u8bad\u7ec3\uff0c\u63d0\u9ad8\u8bad\u7ec3\u6548\u7387\u3002

          "},{"location":"admin/baize/developer/jobs/mxnet.html#_8","title":"\u521b\u5efa\u6b65\u9aa4","text":"
          1. \u767b\u5f55\u5e73\u53f0\uff1a\u540c\u4e0a\u3002
          2. \u521b\u5efa\u4efb\u52a1\uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
          3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\uff1a\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a MXNet\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
          4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f\uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cMXNet \u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u201d\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a\u3002
          5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570\uff1a\u6839\u636e\u9700\u6c42\uff0c\u914d\u7f6e\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u3002
          "},{"location":"admin/baize/developer/jobs/mxnet.html#_9","title":"\u8fd0\u884c\u53c2\u6570","text":"
          • \u542f\u52a8\u547d\u4ee4\uff1apython3
          • \u547d\u4ee4\u53c2\u6570\uff1a

            /mxnet/mxnet/example/image-classification/train_mnist.py --num-epochs 10 --num-layers 2 --kv-store dist_device_sync --gpus 0\n

            \u8bf4\u660e\uff1a

            • /mxnet/mxnet/example/image-classification/train_mnist.py\uff1aMXNet \u63d0\u4f9b\u7684\u56fe\u50cf\u5206\u7c7b\u793a\u4f8b\u811a\u672c\u3002
            • --num-epochs 10\uff1a\u8bad\u7ec3\u8f6e\u6570\u4e3a 10\u3002
            • --num-layers 2\uff1a\u6a21\u578b\u7684\u5c42\u6570\u4e3a 2\u3002
            • --kv-store dist_device_sync\uff1a\u4f7f\u7528\u5206\u5e03\u5f0f\u8bbe\u5907\u540c\u6b65\u6a21\u5f0f\u3002
            • --gpus 0\uff1a\u4f7f\u7528 GPU \u8fdb\u884c\u52a0\u901f\u3002
          "},{"location":"admin/baize/developer/jobs/mxnet.html#_10","title":"\u8d44\u6e90\u914d\u7f6e","text":"
          • \u4efb\u52a1\u526f\u672c\u6570\uff1a3\uff08\u5305\u62ec Scheduler\u3001Server \u548c Worker\uff09
          • \u5404\u89d2\u8272\u8d44\u6e90\u8bf7\u6c42\uff1a
            • Scheduler\uff08\u8c03\u5ea6\u5668\uff09\uff1a
              • \u526f\u672c\u6570\uff1a1
              • \u8d44\u6e90\u8bf7\u6c42\uff1a
                • CPU\uff1a2 \u6838
                • \u5185\u5b58\uff1a4 GiB
                • GPU\uff1a1 \u5757
            • Server\uff08\u53c2\u6570\u670d\u52a1\u5668\uff09\uff1a
              • \u526f\u672c\u6570\uff1a1
              • \u8d44\u6e90\u8bf7\u6c42\uff1a
                • CPU\uff1a2 \u6838
                • \u5185\u5b58\uff1a4 GiB
                • GPU\uff1a1 \u5757
            • Worker\uff08\u5de5\u4f5c\u8282\u70b9\uff09\uff1a
              • \u526f\u672c\u6570\uff1a1
              • \u8d44\u6e90\u8bf7\u6c42\uff1a
                • CPU\uff1a2 \u6838
                • \u5185\u5b58\uff1a4 GiB
                • GPU\uff1a1 \u5757
          "},{"location":"admin/baize/developer/jobs/mxnet.html#mxjob_1","title":"\u5b8c\u6574\u7684 MXJob \u914d\u7f6e\u793a\u4f8b","text":"

          \u4ee5\u4e0b\u662f\u5206\u5e03\u5f0f MXJob \u7684 YAML \u914d\u7f6e\uff1a

          apiVersion: \"kubeflow.org/v1\"\nkind: \"MXJob\"\nmetadata:\n  name: \"mxnet-job\"\nspec:\n  jobMode: MXTrain\n  mxReplicaSpecs:\n    Scheduler:\n      replicas: 1\n      restartPolicy: Never\n      template:\n        spec:\n          containers:\n            - name: mxnet\n              image: release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest\n              ports:\n                - containerPort: 9991\n                  name: mxjob-port\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n    Server:\n      replicas: 1\n      restartPolicy: Never\n      template:\n        spec:\n          containers:\n            - name: mxnet\n              image: release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest\n              ports:\n                - containerPort: 9991\n                  name: mxjob-port\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n    Worker:\n      replicas: 1\n      restartPolicy: Never\n      template:\n        spec:\n          containers:\n            - name: mxnet\n              image: release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest\n              command: [\"python3\"]\n              args:\n                [\n                  \"/mxnet/mxnet/example/image-classification/train_mnist.py\",\n                  \"--num-epochs\",\n                  \"10\",\n                  \"--num-layers\",\n                  \"2\",\n                  \"--kv-store\",\n                  \"dist_device_sync\",\n                  \"--gpus\",\n                  \"0\",\n                ]\n              ports:\n                - containerPort: 9991\n                  name: mxjob-port\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n

          \u914d\u7f6e\u89e3\u6790\uff1a

          • Scheduler\uff08\u8c03\u5ea6\u5668\uff09\uff1a\u8d1f\u8d23\u534f\u8c03\u96c6\u7fa4\u4e2d\u5404\u8282\u70b9\u7684\u4efb\u52a1\u8c03\u5ea6\u3002
          • Server\uff08\u53c2\u6570\u670d\u52a1\u5668\uff09\uff1a\u7528\u4e8e\u5b58\u50a8\u548c\u66f4\u65b0\u6a21\u578b\u53c2\u6570\uff0c\u5b9e\u73b0\u5206\u5e03\u5f0f\u53c2\u6570\u540c\u6b65\u3002
          • Worker\uff08\u5de5\u4f5c\u8282\u70b9\uff09\uff1a\u5b9e\u9645\u6267\u884c\u8bad\u7ec3\u4efb\u52a1\u3002
          • \u8d44\u6e90\u914d\u7f6e\uff1a\u4e3a\u5404\u89d2\u8272\u5206\u914d\u9002\u5f53\u7684\u8d44\u6e90\uff0c\u786e\u4fdd\u4efb\u52a1\u987a\u5229\u8fd0\u884c\u3002
          "},{"location":"admin/baize/developer/jobs/mxnet.html#_11","title":"\u8bbe\u7f6e\u4efb\u52a1\u526f\u672c\u6570","text":"

          \u5728\u521b\u5efa MXNet \u5206\u5e03\u5f0f\u4efb\u52a1\u65f6\uff0c\u9700\u8981\u6839\u636e mxReplicaSpecs \u4e2d\u914d\u7f6e\u7684\u526f\u672c\u6570\uff0c\u6b63\u786e\u8bbe\u7f6e \u4efb\u52a1\u526f\u672c\u6570\u3002

          • \u603b\u526f\u672c\u6570 = Scheduler \u526f\u672c\u6570 + Server \u526f\u672c\u6570 + Worker \u526f\u672c\u6570
          • \u672c\u793a\u4f8b\u4e2d\uff1a
            • Scheduler \u526f\u672c\u6570\uff1a1
            • Server \u526f\u672c\u6570\uff1a1
            • Worker \u526f\u672c\u6570\uff1a1
            • \u603b\u526f\u672c\u6570\uff1a1 + 1 + 1 = 3

          \u56e0\u6b64\uff0c\u5728\u4efb\u52a1\u914d\u7f6e\u4e2d\uff0c\u9700\u8981\u5c06 \u4efb\u52a1\u526f\u672c\u6570 \u8bbe\u7f6e\u4e3a 3\u3002

          "},{"location":"admin/baize/developer/jobs/mxnet.html#_12","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

          \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c MXNet \u5206\u5e03\u5f0f\u4efb\u52a1\u3002

          "},{"location":"admin/baize/developer/jobs/mxnet.html#_13","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

          \u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u548c\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u3002\u60a8\u53ef\u4ee5\u67e5\u770b\u6bcf\u4e2a\u89d2\u8272\uff08Scheduler\u3001Server\u3001Worker\uff09\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

          \u793a\u4f8b\u8f93\u51fa\uff1a

          INFO:root:Epoch[0] Batch [50]     Speed: 1000 samples/sec   accuracy=0.85\nINFO:root:Epoch[0] Batch [100]    Speed: 1200 samples/sec   accuracy=0.87\n...\nINFO:root:Epoch[9] Batch [100]    Speed: 1300 samples/sec   accuracy=0.98\nTraining completed.\n

          \u8fd9\u8868\u793a MXNet \u5206\u5e03\u5f0f\u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0c\u6a21\u578b\u8bad\u7ec3\u5b8c\u6210\u3002

          "},{"location":"admin/baize/developer/jobs/mxnet.html#_14","title":"\u5c0f\u7ed3","text":"

          \u901a\u8fc7\u672c\u6559\u7a0b\uff0c\u60a8\u5b66\u4e60\u4e86\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c MXNet \u7684\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4efb\u52a1\u3002\u6211\u4eec\u8be6\u7ec6\u4ecb\u7ecd\u4e86 MXJob \u7684\u914d\u7f6e\u65b9\u5f0f\uff0c\u4ee5\u53ca\u5982\u4f55\u5728\u4efb\u52a1\u4e2d\u6307\u5b9a\u8fd0\u884c\u7684\u547d\u4ee4\u548c\u8d44\u6e90\u9700\u6c42\u3002\u5e0c\u671b\u672c\u6559\u7a0b\u5bf9\u60a8\u6709\u6240\u5e2e\u52a9\uff0c\u5982\u6709\u4efb\u4f55\u95ee\u9898\uff0c\u8bf7\u53c2\u8003\u5e73\u53f0\u63d0\u4f9b\u7684\u5176\u4ed6\u6587\u6863\u6216\u8054\u7cfb\u6280\u672f\u652f\u6301\u3002

          "},{"location":"admin/baize/developer/jobs/mxnet.html#_15","title":"\u9644\u5f55","text":"
          • \u6ce8\u610f\u4e8b\u9879\uff1a

            • \u786e\u4fdd\u60a8\u4f7f\u7528\u7684\u955c\u50cf\u5305\u542b\u6240\u9700\u7684 MXNet \u7248\u672c\u548c\u4f9d\u8d56\u3002
            • \u6839\u636e\u5b9e\u9645\u9700\u6c42\u8c03\u6574\u8d44\u6e90\u914d\u7f6e\uff0c\u907f\u514d\u8d44\u6e90\u4e0d\u8db3\u6216\u6d6a\u8d39\u3002
            • \u5982\u9700\u4f7f\u7528\u81ea\u5b9a\u4e49\u7684\u8bad\u7ec3\u811a\u672c\uff0c\u8bf7\u4fee\u6539\u542f\u52a8\u547d\u4ee4\u548c\u53c2\u6570\u3002
          • \u53c2\u8003\u6587\u6863\uff1a

            • MXNet \u5b98\u65b9\u6587\u6863
            • Kubeflow MXJob \u6307\u5357
          "},{"location":"admin/baize/developer/jobs/paddle.html","title":"PaddlePaddle \u4efb\u52a1","text":"

          PaddlePaddle\uff08\u98de\u6868\uff09\u662f\u767e\u5ea6\u5f00\u6e90\u7684\u6df1\u5ea6\u5b66\u4e60\u5e73\u53f0\uff0c\u652f\u6301\u4e30\u5bcc\u7684\u795e\u7ecf\u7f51\u7edc\u6a21\u578b\u548c\u5206\u5e03\u5f0f\u8bad\u7ec3\u65b9\u5f0f\u3002PaddlePaddle \u4efb\u52a1\u53ef\u4ee5\u901a\u8fc7\u5355\u673a\u6216\u5206\u5e03\u5f0f\u6a21\u5f0f\u8fdb\u884c\u8bad\u7ec3\u3002\u5728 AI Lab \u5e73\u53f0\u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86\u5bf9 PaddlePaddle \u4efb\u52a1\u7684\u652f\u6301\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c\u5feb\u901f\u521b\u5efa PaddlePaddle \u4efb\u52a1\uff0c\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\u3002

          \u672c\u6559\u7a0b\u5c06\u6307\u5bfc\u60a8\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c PaddlePaddle \u7684\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4efb\u52a1\u3002

          "},{"location":"admin/baize/developer/jobs/paddle.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
          • \u4efb\u52a1\u7c7b\u578b\uff1aPaddlePaddle\uff0c\u652f\u6301\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4e24\u79cd\u6a21\u5f0f\u3002
          • \u8fd0\u884c\u73af\u5883\uff1a\u9009\u62e9\u5305\u542b PaddlePaddle \u6846\u67b6\u7684\u955c\u50cf\uff0c\u6216\u5728\u4efb\u52a1\u4e2d\u5b89\u88c5\u5fc5\u8981\u7684\u4f9d\u8d56\u3002
          "},{"location":"admin/baize/developer/jobs/paddle.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

          \u6211\u4eec\u4f7f\u7528 registry.baidubce.com/paddlepaddle/paddle:2.4.0rc0-cpu \u955c\u50cf\u4f5c\u4e3a\u4efb\u52a1\u7684\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002\u8be5\u955c\u50cf\u9884\u88c5\u4e86 PaddlePaddle \u6846\u67b6\uff0c\u9002\u7528\u4e8e CPU \u8ba1\u7b97\u3002\u5982\u679c\u9700\u8981\u4f7f\u7528 GPU\uff0c\u8bf7\u9009\u62e9\u5bf9\u5e94\u7684 GPU \u7248\u672c\u955c\u50cf\u3002

          \u6ce8\u610f\uff1a\u4e86\u89e3\u5982\u4f55\u521b\u5efa\u548c\u7ba1\u7406\u73af\u5883\uff0c\u8bf7\u53c2\u8003 \u73af\u5883\u5217\u8868\u3002

          "},{"location":"admin/baize/developer/jobs/paddle.html#paddlepaddle_1","title":"\u521b\u5efa PaddlePaddle \u4efb\u52a1","text":""},{"location":"admin/baize/developer/jobs/paddle.html#paddlepaddle_2","title":"PaddlePaddle \u5355\u673a\u8bad\u7ec3\u4efb\u52a1","text":""},{"location":"admin/baize/developer/jobs/paddle.html#_3","title":"\u521b\u5efa\u6b65\u9aa4","text":"
          1. \u767b\u5f55\u5e73\u53f0\uff1a\u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3\uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
          2. \u521b\u5efa\u4efb\u52a1\uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
          3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\uff1a\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\uff0c\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a PaddlePaddle\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
          4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f\uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cPaddlePaddle \u5355\u673a\u8bad\u7ec3\u4efb\u52a1\u201d\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a\u3002
          5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570\uff1a\u6839\u636e\u60a8\u7684\u9700\u6c42\uff0c\u914d\u7f6e\u4efb\u52a1\u7684\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u4fe1\u606f\u3002
          "},{"location":"admin/baize/developer/jobs/paddle.html#_4","title":"\u8fd0\u884c\u53c2\u6570","text":"
          • \u542f\u52a8\u547d\u4ee4\uff1apython
          • \u547d\u4ee4\u53c2\u6570\uff1a

            -m paddle.distributed.launch run_check\n

            \u8bf4\u660e\uff1a

            • -m paddle.distributed.launch\uff1a\u4f7f\u7528 PaddlePaddle \u63d0\u4f9b\u7684\u5206\u5e03\u5f0f\u542f\u52a8\u6a21\u5757\uff0c\u5373\u4f7f\u5728\u5355\u673a\u6a21\u5f0f\u4e0b\u4e5f\u53ef\u4ee5\u4f7f\u7528\uff0c\u65b9\u4fbf\u5c06\u6765\u8fc1\u79fb\u5230\u5206\u5e03\u5f0f\u3002
            • run_check\uff1aPaddlePaddle \u63d0\u4f9b\u7684\u6d4b\u8bd5\u811a\u672c\uff0c\u7528\u4e8e\u68c0\u67e5\u5206\u5e03\u5f0f\u73af\u5883\u662f\u5426\u6b63\u5e38\u3002
          "},{"location":"admin/baize/developer/jobs/paddle.html#_5","title":"\u8d44\u6e90\u914d\u7f6e","text":"
          • \u526f\u672c\u6570\uff1a1\uff08\u5355\u673a\u4efb\u52a1\uff09
          • \u8d44\u6e90\u8bf7\u6c42\uff1a
            • CPU\uff1a\u6839\u636e\u9700\u6c42\u8bbe\u7f6e\uff0c\u5efa\u8bae\u81f3\u5c11 1 \u6838
            • \u5185\u5b58\uff1a\u6839\u636e\u9700\u6c42\u8bbe\u7f6e\uff0c\u5efa\u8bae\u81f3\u5c11 2 GiB
            • GPU\uff1a\u5982\u679c\u9700\u8981\u4f7f\u7528 GPU\uff0c\u9009\u62e9 GPU \u7248\u672c\u7684\u955c\u50cf\uff0c\u5e76\u5206\u914d\u76f8\u5e94\u7684 GPU \u8d44\u6e90
          "},{"location":"admin/baize/developer/jobs/paddle.html#paddlejob","title":"\u5b8c\u6574\u7684 PaddleJob \u914d\u7f6e\u793a\u4f8b","text":"

          \u4ee5\u4e0b\u662f\u5355\u673a PaddleJob \u7684 YAML \u914d\u7f6e\uff1a

          apiVersion: kubeflow.org/v1\nkind: PaddleJob\nmetadata:\n    name: paddle-simple-cpu\n    namespace: kubeflow\nspec:\n    paddleReplicaSpecs:\n        Worker:\n            replicas: 1\n            restartPolicy: OnFailure\n            template:\n                spec:\n                    containers:\n                        - name: paddle\n                          image: registry.baidubce.com/paddlepaddle/paddle:2.4.0rc0-cpu\n                          command:\n                              [\n                                  'python',\n                                  '-m',\n                                  'paddle.distributed.launch',\n                                  'run_check',\n                              ]\n

          \u914d\u7f6e\u89e3\u6790\uff1a

          • apiVersion \u548c kind\uff1a\u6307\u5b9a\u8d44\u6e90\u7684 API \u7248\u672c\u548c\u7c7b\u578b\uff0c\u8fd9\u91cc\u662f PaddleJob\u3002
          • metadata\uff1a\u5143\u6570\u636e\uff0c\u5305\u62ec\u4efb\u52a1\u540d\u79f0\u548c\u547d\u540d\u7a7a\u95f4\u3002
          • spec\uff1a\u4efb\u52a1\u7684\u8be6\u7ec6\u914d\u7f6e\u3002
            • paddleReplicaSpecs\uff1aPaddlePaddle \u4efb\u52a1\u7684\u526f\u672c\u914d\u7f6e\u3002
              • Worker\uff1a\u6307\u5b9a\u5de5\u4f5c\u8282\u70b9\u7684\u914d\u7f6e\u3002
                • replicas\uff1a\u526f\u672c\u6570\uff0c\u8fd9\u91cc\u4e3a 1\uff0c\u8868\u793a\u5355\u673a\u8bad\u7ec3\u3002
                • restartPolicy\uff1a\u91cd\u542f\u7b56\u7565\uff0c\u8bbe\u4e3a OnFailure\uff0c\u8868\u793a\u4efb\u52a1\u5931\u8d25\u65f6\u81ea\u52a8\u91cd\u542f\u3002
                • template\uff1aPod \u6a21\u677f\uff0c\u5b9a\u4e49\u5bb9\u5668\u7684\u8fd0\u884c\u73af\u5883\u548c\u8d44\u6e90\u3002
                  • containers\uff1a\u5bb9\u5668\u5217\u8868\u3002
                    • name\uff1a\u5bb9\u5668\u540d\u79f0\u3002
                    • image\uff1a\u4f7f\u7528\u7684\u955c\u50cf\u3002
                    • command\uff1a\u542f\u52a8\u547d\u4ee4\u548c\u53c2\u6570\u3002
          "},{"location":"admin/baize/developer/jobs/paddle.html#_6","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

          \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c PaddlePaddle \u5355\u673a\u4efb\u52a1\u3002

          "},{"location":"admin/baize/developer/jobs/paddle.html#_7","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

          \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\u540e\uff0c\u60a8\u53ef\u4ee5\u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\u548c\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u3002\u4ece\u53f3\u4e0a\u89d2\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\uff0c\u53ef\u4ee5\u67e5\u770b\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

          \u793a\u4f8b\u8f93\u51fa\uff1a

          run check success, PaddlePaddle is installed correctly on this node :)\n

          \u8fd9\u8868\u793a PaddlePaddle \u5355\u673a\u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0c\u73af\u5883\u914d\u7f6e\u6b63\u5e38\u3002

          "},{"location":"admin/baize/developer/jobs/paddle.html#paddlepaddle_3","title":"PaddlePaddle \u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1","text":"

          \u5728\u5206\u5e03\u5f0f\u6a21\u5f0f\u4e0b\uff0cPaddlePaddle \u4efb\u52a1\u53ef\u4ee5\u4f7f\u7528\u591a\u53f0\u8ba1\u7b97\u8282\u70b9\u5171\u540c\u5b8c\u6210\u8bad\u7ec3\uff0c\u63d0\u9ad8\u8bad\u7ec3\u6548\u7387\u3002

          "},{"location":"admin/baize/developer/jobs/paddle.html#_8","title":"\u521b\u5efa\u6b65\u9aa4","text":"
          1. \u767b\u5f55\u5e73\u53f0\uff1a\u540c\u4e0a\u3002
          2. \u521b\u5efa\u4efb\u52a1\uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
          3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\uff1a\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a PaddlePaddle\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
          4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f\uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cPaddlePaddle \u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u201d\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a\u3002
          5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570\uff1a\u6839\u636e\u9700\u6c42\uff0c\u914d\u7f6e\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u3002
          "},{"location":"admin/baize/developer/jobs/paddle.html#_9","title":"\u8fd0\u884c\u53c2\u6570","text":"
          • \u542f\u52a8\u547d\u4ee4\uff1apython
          • \u547d\u4ee4\u53c2\u6570\uff1a

            -m paddle.distributed.launch train.py --epochs=10\n

            \u8bf4\u660e\uff1a

            • -m paddle.distributed.launch\uff1a\u4f7f\u7528 PaddlePaddle \u63d0\u4f9b\u7684\u5206\u5e03\u5f0f\u542f\u52a8\u6a21\u5757\u3002
            • train.py\uff1a\u60a8\u7684\u8bad\u7ec3\u811a\u672c\uff0c\u9700\u8981\u653e\u5728\u955c\u50cf\u4e2d\u6216\u6302\u8f7d\u5230\u5bb9\u5668\u5185\u3002
            • --epochs=10\uff1a\u8bad\u7ec3\u7684\u8f6e\u6570\uff0c\u8fd9\u91cc\u8bbe\u7f6e\u4e3a 10\u3002
          "},{"location":"admin/baize/developer/jobs/paddle.html#_10","title":"\u8d44\u6e90\u914d\u7f6e","text":"
          • \u4efb\u52a1\u526f\u672c\u6570\uff1a\u6839\u636e Worker \u526f\u672c\u6570\u8bbe\u7f6e\uff0c\u8fd9\u91cc\u4e3a 2\u3002
          • \u8d44\u6e90\u8bf7\u6c42\uff1a
            • CPU\uff1a\u6839\u636e\u9700\u6c42\u8bbe\u7f6e\uff0c\u5efa\u8bae\u81f3\u5c11 1 \u6838
            • \u5185\u5b58\uff1a\u6839\u636e\u9700\u6c42\u8bbe\u7f6e\uff0c\u5efa\u8bae\u81f3\u5c11 2 GiB
            • GPU\uff1a\u5982\u679c\u9700\u8981\u4f7f\u7528 GPU\uff0c\u9009\u62e9 GPU \u7248\u672c\u7684\u955c\u50cf\uff0c\u5e76\u5206\u914d\u76f8\u5e94\u7684 GPU \u8d44\u6e90
          "},{"location":"admin/baize/developer/jobs/paddle.html#paddlejob_1","title":"\u5b8c\u6574\u7684 PaddleJob \u914d\u7f6e\u793a\u4f8b","text":"

          \u4ee5\u4e0b\u662f\u5206\u5e03\u5f0f PaddleJob \u7684 YAML \u914d\u7f6e\uff1a

          apiVersion: kubeflow.org/v1\nkind: PaddleJob\nmetadata:\n    name: paddle-distributed-job\n    namespace: kubeflow\nspec:\n    paddleReplicaSpecs:\n        Worker:\n            replicas: 2\n            restartPolicy: OnFailure\n            template:\n                spec:\n                    containers:\n                        - name: paddle\n                          image: registry.baidubce.com/paddlepaddle/paddle:2.4.0rc0-cpu\n                          command:\n                              [\n                                  'python',\n                                  '-m',\n                                  'paddle.distributed.launch',\n                                  'train.py',\n                              ]\n                          args:\n                              - '--epochs=10'\n

          \u914d\u7f6e\u89e3\u6790\uff1a

          • Worker\uff1a
            • replicas\uff1a\u526f\u672c\u6570\uff0c\u8bbe\u7f6e\u4e3a 2\uff0c\u8868\u793a\u4f7f\u7528 2 \u4e2a\u5de5\u4f5c\u8282\u70b9\u8fdb\u884c\u5206\u5e03\u5f0f\u8bad\u7ec3\u3002
            • \u5176\u4ed6\u914d\u7f6e\u4e0e\u5355\u673a\u6a21\u5f0f\u7c7b\u4f3c\u3002
          "},{"location":"admin/baize/developer/jobs/paddle.html#_11","title":"\u8bbe\u7f6e\u4efb\u52a1\u526f\u672c\u6570","text":"

          \u5728\u521b\u5efa PaddlePaddle \u5206\u5e03\u5f0f\u4efb\u52a1\u65f6\uff0c\u9700\u8981\u6839\u636e paddleReplicaSpecs \u4e2d\u914d\u7f6e\u7684\u526f\u672c\u6570\uff0c\u6b63\u786e\u8bbe\u7f6e \u4efb\u52a1\u526f\u672c\u6570\u3002

          • \u603b\u526f\u672c\u6570 = Worker \u526f\u672c\u6570
          • \u672c\u793a\u4f8b\u4e2d\uff1a
            • Worker \u526f\u672c\u6570\uff1a2
            • \u603b\u526f\u672c\u6570\uff1a2

          \u56e0\u6b64\uff0c\u5728\u4efb\u52a1\u914d\u7f6e\u4e2d\uff0c\u9700\u8981\u5c06 \u4efb\u52a1\u526f\u672c\u6570 \u8bbe\u7f6e\u4e3a 2\u3002

          "},{"location":"admin/baize/developer/jobs/paddle.html#_12","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

          \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c PaddlePaddle \u5206\u5e03\u5f0f\u4efb\u52a1\u3002

          "},{"location":"admin/baize/developer/jobs/paddle.html#_13","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

          \u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u548c\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u3002\u60a8\u53ef\u4ee5\u67e5\u770b\u6bcf\u4e2a\u5de5\u4f5c\u8282\u70b9\u7684\u65e5\u5fd7\u8f93\u51fa\uff0c\u786e\u8ba4\u5206\u5e03\u5f0f\u8bad\u7ec3\u662f\u5426\u6b63\u5e38\u8fd0\u884c\u3002

          \u793a\u4f8b\u8f93\u51fa\uff1a

          Worker 0: Epoch 1, Batch 100, Loss 0.5\nWorker 1: Epoch 1, Batch 100, Loss 0.6\n...\nTraining completed.\n

          \u8fd9\u8868\u793a PaddlePaddle \u5206\u5e03\u5f0f\u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0c\u6a21\u578b\u8bad\u7ec3\u5b8c\u6210\u3002

          "},{"location":"admin/baize/developer/jobs/paddle.html#_14","title":"\u5c0f\u7ed3","text":"

          \u901a\u8fc7\u672c\u6559\u7a0b\uff0c\u60a8\u5b66\u4e60\u4e86\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c PaddlePaddle \u7684\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4efb\u52a1\u3002\u6211\u4eec\u8be6\u7ec6\u4ecb\u7ecd\u4e86 PaddleJob \u7684\u914d\u7f6e\u65b9\u5f0f\uff0c\u4ee5\u53ca\u5982\u4f55\u5728\u4efb\u52a1\u4e2d\u6307\u5b9a\u8fd0\u884c\u7684\u547d\u4ee4\u548c\u8d44\u6e90\u9700\u6c42\u3002\u5e0c\u671b\u672c\u6559\u7a0b\u5bf9\u60a8\u6709\u6240\u5e2e\u52a9\uff0c\u5982\u6709\u4efb\u4f55\u95ee\u9898\uff0c\u8bf7\u53c2\u8003\u5e73\u53f0\u63d0\u4f9b\u7684\u5176\u4ed6\u6587\u6863\u6216\u8054\u7cfb\u6280\u672f\u652f\u6301\u3002

          "},{"location":"admin/baize/developer/jobs/paddle.html#_15","title":"\u9644\u5f55","text":"
          • \u6ce8\u610f\u4e8b\u9879\uff1a

            • \u8bad\u7ec3\u811a\u672c\uff1a\u786e\u4fdd train.py\uff08\u6216\u5176\u4ed6\u8bad\u7ec3\u811a\u672c\uff09\u5728\u5bb9\u5668\u5185\u5b58\u5728\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u81ea\u5b9a\u4e49\u955c\u50cf\u3001\u6302\u8f7d\u6301\u4e45\u5316\u5b58\u50a8\u7b49\u65b9\u5f0f\u5c06\u811a\u672c\u653e\u5165\u5bb9\u5668\u3002
            • \u955c\u50cf\u9009\u62e9\uff1a\u6839\u636e\u60a8\u7684\u9700\u6c42\u9009\u62e9\u5408\u9002\u7684\u955c\u50cf\uff0c\u4f8b\u5982\u4f7f\u7528 GPU \u65f6\u9009\u62e9 paddle:2.4.0rc0-gpu \u7b49\u3002
            • \u53c2\u6570\u8c03\u6574\uff1a\u53ef\u4ee5\u901a\u8fc7\u4fee\u6539 command \u548c args \u6765\u4f20\u9012\u4e0d\u540c\u7684\u8bad\u7ec3\u53c2\u6570\u3002
          • \u53c2\u8003\u6587\u6863\uff1a

            • PaddlePaddle \u5b98\u65b9\u6587\u6863
            • Kubeflow PaddleJob \u6307\u5357
          "},{"location":"admin/baize/developer/jobs/pytorch.html","title":"Pytorch \u4efb\u52a1","text":"

          Pytorch \u662f\u4e00\u4e2a\u5f00\u6e90\u7684\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\uff0c\u5b83\u63d0\u4f9b\u4e86\u4e00\u4e2a\u7075\u6d3b\u7684\u8bad\u7ec3\u548c\u90e8\u7f72\u73af\u5883\u3002 Pytorch \u4efb\u52a1\u662f\u4e00\u4e2a\u4f7f\u7528 Pytorch \u6846\u67b6\u7684\u4efb\u52a1\u3002

          \u5728 AI Lab \u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86 Pytorch \u4efb\u52a1\u652f\u6301\u548c\u9002\u914d\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c \u5feb\u901f\u521b\u5efa Pytorch \u4efb\u52a1\uff0c\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\u3002

          "},{"location":"admin/baize/developer/jobs/pytorch.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
          • \u4efb\u52a1\u7c7b\u578b\u540c\u65f6\u652f\u6301 Pytorch \u5355\u673a \u548c Pytorch \u5206\u5e03\u5f0f \u4e24\u79cd\u6a21\u5f0f\u3002
          • \u8fd0\u884c\u955c\u50cf\u5185\u5df2\u7ecf\u9ed8\u8ba4\u652f\u6301 Pytorch \u6846\u67b6\uff0c\u65e0\u9700\u989d\u5916\u5b89\u88c5\u3002
          "},{"location":"admin/baize/developer/jobs/pytorch.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

          \u5728\u8fd9\u91cc\u6211\u4eec\u4f7f\u7528 baize-notebook \u57fa\u7840\u955c\u50cf \u548c \u5173\u8054\u73af\u5883 \u7684\u65b9\u5f0f\u6765\u4f5c\u4e3a\u4efb\u52a1\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002

          \u4e86\u89e3\u5982\u4f55\u521b\u5efa\u73af\u5883\uff0c\u8bf7\u53c2\u8003 \u73af\u5883\u5217\u8868\u3002

          "},{"location":"admin/baize/developer/jobs/pytorch.html#_3","title":"\u521b\u5efa\u4efb\u52a1","text":""},{"location":"admin/baize/developer/jobs/pytorch.html#pytorch_1","title":"Pytorch \u5355\u673a\u4efb\u52a1","text":"
          1. \u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3 \uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
          2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
          3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a Pytorch \u5355\u673a\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002
          4. \u586b\u5199\u4efb\u52a1\u540d\u79f0\u3001\u63cf\u8ff0\u540e\u70b9\u51fb \u786e\u5b9a \u3002
          "},{"location":"admin/baize/developer/jobs/pytorch.html#_4","title":"\u8fd0\u884c\u53c2\u6570","text":"
          • \u542f\u52a8\u547d\u4ee4 \u4f7f\u7528 bash
          • \u547d\u4ee4\u53c2\u6570\u4f7f\u7528
          import torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n# \u5b9a\u4e49\u4e00\u4e2a\u7b80\u5355\u7684\u795e\u7ecf\u7f51\u7edc\nclass SimpleNet(nn.Module):\n    def __init__(self):\n        super(SimpleNet, self).__init__()\n        self.fc = nn.Linear(10, 1)\n\n    def forward(self, x):\n        return self.fc(x)\n\n# \u521b\u5efa\u6a21\u578b\u3001\u635f\u5931\u51fd\u6570\u548c\u4f18\u5316\u5668\nmodel = SimpleNet()\ncriterion = nn.MSELoss()\noptimizer = optim.SGD(model.parameters(), lr=0.01)\n\n# \u751f\u6210\u4e00\u4e9b\u968f\u673a\u6570\u636e\nx = torch.randn(100, 10)\ny = torch.randn(100, 1)\n\n# \u8bad\u7ec3\u6a21\u578b\nfor epoch in range(100):\n    # \u524d\u5411\u4f20\u64ad\n    outputs = model(x)\n    loss = criterion(outputs, y)\n\n    # \u53cd\u5411\u4f20\u64ad\u548c\u4f18\u5316\n    optimizer.zero_grad()\n    loss.backward()\n    optimizer.step()\n\n    if (epoch + 1) % 10 == 0:\n        print(f'Epoch [{epoch+1}/100], Loss: {loss.item():.4f}')\n\nprint('Training finished.')\n
          "},{"location":"admin/baize/developer/jobs/pytorch.html#_5","title":"\u8fd0\u884c\u7ed3\u679c","text":"

          \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\uff0c\u6211\u4eec\u53ef\u4ee5\u8fdb\u5165\u4efb\u52a1\u8be6\u60c5\u67e5\u770b\u5230\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\uff0c\u4ece\u53f3\u4e0a\u89d2\u53bb\u5f80 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5 \uff0c\u53ef\u4ee5\u67e5\u770b\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u7684\u65e5\u5fd7\u8f93\u51fa

          [HAMI-core Warn(1:140244541377408:utils.c:183)]: get default cuda from (null)\n[HAMI-core Msg(1:140244541377408:libvgpu.c:855)]: Initialized\nEpoch [10/100], Loss: 1.1248\nEpoch [20/100], Loss: 1.0486\nEpoch [30/100], Loss: 0.9969\nEpoch [40/100], Loss: 0.9611\nEpoch [50/100], Loss: 0.9360\nEpoch [60/100], Loss: 0.9182\nEpoch [70/100], Loss: 0.9053\nEpoch [80/100], Loss: 0.8960\nEpoch [90/100], Loss: 0.8891\nEpoch [100/100], Loss: 0.8841\nTraining finished.\n[HAMI-core Msg(1:140244541377408:multiprocess_memory_limit.c:468)]: Calling exit handler 1\n
          "},{"location":"admin/baize/developer/jobs/pytorch.html#pytorch_2","title":"Pytorch \u5206\u5e03\u5f0f\u4efb\u52a1","text":"
          1. \u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3 \uff0c\u8fdb\u5165 \u4efb\u52a1\u5217\u8868 \u9875\u9762\u3002
          2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
          3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a Pytorch \u5206\u5e03\u5f0f\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002
          4. \u586b\u5199\u4efb\u52a1\u540d\u79f0\u3001\u63cf\u8ff0\u540e\u70b9\u51fb \u786e\u5b9a \u3002
          "},{"location":"admin/baize/developer/jobs/pytorch.html#_6","title":"\u8fd0\u884c\u53c2\u6570","text":"
          • \u542f\u52a8\u547d\u4ee4 \u4f7f\u7528 bash
          • \u547d\u4ee4\u53c2\u6570\u4f7f\u7528
          import os\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nclass SimpleModel(nn.Module):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = nn.Linear(10, 1)\n\n    def forward(self, x):\n        return self.fc(x)\n\ndef train():\n    # \u6253\u5370\u73af\u5883\u4fe1\u606f\n    print(f'PyTorch version: {torch.__version__}')\n    print(f'CUDA available: {torch.cuda.is_available()}')\n    if torch.cuda.is_available():\n        print(f'CUDA version: {torch.version.cuda}')\n        print(f'CUDA device count: {torch.cuda.device_count()}')\n\n    rank = int(os.environ.get('RANK', '0'))\n    world_size = int(os.environ.get('WORLD_SIZE', '1'))\n\n    print(f'Rank: {rank}, World Size: {world_size}')\n\n    # \u521d\u59cb\u5316\u5206\u5e03\u5f0f\u73af\u5883\n    try:\n        if world_size > 1:\n            dist.init_process_group('nccl')\n            print('Distributed process group initialized successfully')\n        else:\n            print('Running in non-distributed mode')\n    except Exception as e:\n        print(f'Error initializing process group: {e}')\n        return\n\n    # \u8bbe\u7f6e\u8bbe\u5907\n    try:\n        if torch.cuda.is_available():\n            device = torch.device(f'cuda:{rank % torch.cuda.device_count()}')\n            print(f'Using CUDA device: {device}')\n        else:\n            device = torch.device('cpu')\n            print('CUDA not available, using CPU')\n    except Exception as e:\n        print(f'Error setting device: {e}')\n        device = torch.device('cpu')\n        print('Falling back to CPU')\n\n    try:\n        model = SimpleModel().to(device)\n        print('Model moved to device successfully')\n    except Exception as e:\n        print(f'Error moving model to device: {e}')\n        return\n\n    try:\n        if world_size > 1:\n            ddp_model = DDP(model, device_ids=[rank % torch.cuda.device_count()] if torch.cuda.is_available() else None)\n            print('DDP model created successfully')\n        else:\n            ddp_model = model\n            print('Using non-distributed model')\n    except Exception as e:\n        print(f'Error creating DDP model: {e}')\n        return\n\n    loss_fn = nn.MSELoss()\n    optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)\n\n    # \u751f\u6210\u4e00\u4e9b\u968f\u673a\u6570\u636e\n    try:\n        data = torch.randn(100, 10, device=device)\n        labels = torch.randn(100, 1, device=device)\n        print('Data generated and moved to device successfully')\n    except Exception as e:\n        print(f'Error generating or moving data to device: {e}')\n        return\n\n    for epoch in range(10):\n        try:\n            ddp_model.train()\n            outputs = ddp_model(data)\n            loss = loss_fn(outputs, labels)\n            optimizer.zero_grad()\n            loss.backward()\n            optimizer.step()\n\n            if rank == 0:\n                print(f'Epoch {epoch}, Loss: {loss.item():.4f}')\n        except Exception as e:\n            print(f'Error during training epoch {epoch}: {e}')\n            break\n\n    if world_size > 1:\n        dist.destroy_process_group()\n\nif __name__ == '__main__':\n    train()\n
          "},{"location":"admin/baize/developer/jobs/pytorch.html#_7","title":"\u4efb\u52a1\u526f\u672c\u6570","text":"

          \u6ce8\u610f Pytorch \u5206\u5e03\u5f0f \u8bad\u7ec3\u4efb\u52a1\u4f1a\u521b\u5efa\u4e00\u7ec4 Master \u548c Worker \u7684\u8bad\u7ec3 Pod\uff0c Master \u8d1f\u8d23\u534f\u8c03\u8bad\u7ec3\u4efb\u52a1\uff0cWorker \u8d1f\u8d23\u5b9e\u9645\u7684\u8bad\u7ec3\u5de5\u4f5c\u3002

          Note

          \u672c\u6b21\u6f14\u793a\u4e2d\uff1aMaster \u526f\u672c\u6570\u4e3a 1\uff0cWorker \u526f\u672c\u6570\u4e3a 2\uff1b \u6240\u4ee5\u6211\u4eec\u9700\u8981\u5728 \u4efb\u52a1\u914d\u7f6e \u4e2d\u8bbe\u7f6e\u526f\u672c\u6570\u4e3a 3\uff0c\u5373 Master \u526f\u672c\u6570 + Worker \u526f\u672c\u6570\u3002 Pytorch \u4f1a\u81ea\u52a8\u8c03\u8c10 Master \u548c Worker \u7684\u89d2\u8272\u3002

          "},{"location":"admin/baize/developer/jobs/pytorch.html#_8","title":"\u8fd0\u884c\u7ed3\u679c","text":"

          \u540c\u6837\uff0c\u6211\u4eec\u53ef\u4ee5\u8fdb\u5165\u4efb\u52a1\u8be6\u60c5\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\uff0c\u4ee5\u53ca\u6bcf\u4e2a Pod \u7684\u65e5\u5fd7\u8f93\u51fa\u3002

          "},{"location":"admin/baize/developer/jobs/tensorboard.html","title":"\u4efb\u52a1\u5206\u6790\u4ecb\u7ecd","text":"

          \u5728 AI Lab \u6a21\u5757\u4e2d\uff0c\u63d0\u4f9b\u4e86\u6a21\u578b\u5f00\u53d1\u8fc7\u7a0b\u91cd\u8981\u7684\u53ef\u89c6\u5316\u5206\u6790\u5de5\u5177\uff0c\u7528\u4e8e\u5c55\u793a\u673a\u5668\u5b66\u4e60\u6a21\u578b\u7684\u8bad\u7ec3\u8fc7\u7a0b\u548c\u7ed3\u679c\u3002 \u672c\u6587\u5c06\u4ecb\u7ecd \u4efb\u52a1\u5206\u6790\uff08Tensorboard\uff09\u7684\u57fa\u672c\u6982\u5ff5\u3001\u5728 AI Lab \u7cfb\u7edf\u4e2d\u7684\u4f7f\u7528\u65b9\u6cd5\uff0c\u4ee5\u53ca\u5982\u4f55\u914d\u7f6e\u6570\u636e\u96c6\u7684\u65e5\u5fd7\u5185\u5bb9\u3002

          Note

          Tensorboard \u662f TensorFlow \u63d0\u4f9b\u7684\u4e00\u4e2a\u53ef\u89c6\u5316\u5de5\u5177\uff0c\u7528\u4e8e\u5c55\u793a\u673a\u5668\u5b66\u4e60\u6a21\u578b\u7684\u8bad\u7ec3\u8fc7\u7a0b\u548c\u7ed3\u679c\u3002 \u5b83\u53ef\u4ee5\u5e2e\u52a9\u5f00\u53d1\u8005\u66f4\u76f4\u89c2\u5730\u7406\u89e3\u6a21\u578b\u7684\u8bad\u7ec3\u52a8\u6001\uff0c\u5206\u6790\u6a21\u578b\u6027\u80fd\uff0c\u8c03\u8bd5\u6a21\u578b\u95ee\u9898\u7b49\u3002

          Tensorboard \u5728\u6a21\u578b\u5f00\u53d1\u8fc7\u7a0b\u4e2d\u7684\u4f5c\u7528\u53ca\u4f18\u52bf\uff1a

          • \u53ef\u89c6\u5316\u8bad\u7ec3\u8fc7\u7a0b\uff1a\u901a\u8fc7\u56fe\u8868\u5c55\u793a\u8bad\u7ec3\u548c\u9a8c\u8bc1\u7684\u635f\u5931\u3001\u7cbe\u5ea6\u7b49\u6307\u6807\uff0c\u5e2e\u52a9\u5f00\u53d1\u8005\u76f4\u89c2\u5730\u89c2\u5bdf\u6a21\u578b\u7684\u8bad\u7ec3\u6548\u679c\u3002
          • \u8c03\u8bd5\u548c\u4f18\u5316\u6a21\u578b\uff1a\u901a\u8fc7\u67e5\u770b\u4e0d\u540c\u5c42\u7684\u6743\u91cd\u3001\u68af\u5ea6\u5206\u5e03\u7b49\uff0c\u5e2e\u52a9\u5f00\u53d1\u8005\u53d1\u73b0\u548c\u4fee\u6b63\u6a21\u578b\u4e2d\u7684\u95ee\u9898\u3002
          • \u5bf9\u6bd4\u4e0d\u540c\u5b9e\u9a8c\uff1a\u53ef\u4ee5\u540c\u65f6\u5c55\u793a\u591a\u4e2a\u5b9e\u9a8c\u7684\u7ed3\u679c\uff0c\u65b9\u4fbf\u5f00\u53d1\u8005\u5bf9\u6bd4\u4e0d\u540c\u6a21\u578b\u548c\u8d85\u53c2\u6570\u914d\u7f6e\u7684\u6548\u679c\u3002
          • \u8ffd\u8e2a\u8bad\u7ec3\u6570\u636e\uff1a\u8bb0\u5f55\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u4f7f\u7528\u7684\u6570\u636e\u96c6\u548c\u53c2\u6570\uff0c\u786e\u4fdd\u5b9e\u9a8c\u7684\u53ef\u590d\u73b0\u6027\u3002
          "},{"location":"admin/baize/developer/jobs/tensorboard.html#tensorboard","title":"\u5982\u4f55\u521b\u5efa Tensorboard","text":"

          \u5728 AI Lab \u7cfb\u7edf\u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86\u4fbf\u6377\u7684\u65b9\u5f0f\u6765\u521b\u5efa\u548c\u7ba1\u7406 Tensorboard\u3002\u4ee5\u4e0b\u662f\u5177\u4f53\u6b65\u9aa4\uff1a

          "},{"location":"admin/baize/developer/jobs/tensorboard.html#notebook-tensorboard","title":"\u5728\u521b\u5efa\u65f6 Notebook \u542f\u7528 Tensorboard","text":"
          1. \u521b\u5efa Notebook\uff1a\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u4e00\u4e2a\u65b0\u7684 Notebook\u3002
          2. \u542f\u7528 Tensorboard\uff1a\u5728\u521b\u5efa Notebook \u7684\u9875\u9762\u4e2d\uff0c\u542f\u7528 Tensorboard \u9009\u9879\uff0c\u5e76\u6307\u5b9a\u6570\u636e\u96c6\u548c\u65e5\u5fd7\u8def\u5f84\u3002

          "},{"location":"admin/baize/developer/jobs/tensorboard.html#tensorboard_1","title":"\u5728\u5206\u5e03\u5f0f\u4efb\u52a1\u521b\u5efa\u53ca\u5b8c\u6210\u540e\u542f\u7528 Tensorboard","text":"
          1. \u521b\u5efa\u5206\u5e03\u5f0f\u4efb\u52a1\uff1a\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u3002
          2. \u914d\u7f6e Tensorboard\uff1a\u5728\u4efb\u52a1\u914d\u7f6e\u9875\u9762\u4e2d\uff0c\u542f\u7528 Tensorboard \u9009\u9879\uff0c\u5e76\u6307\u5b9a\u6570\u636e\u96c6\u548c\u65e5\u5fd7\u8def\u5f84\u3002
          3. \u4efb\u52a1\u5b8c\u6210\u540e\u67e5\u770b Tensorboard\uff1a\u4efb\u52a1\u5b8c\u6210\u540e\uff0c\u53ef\u4ee5\u5728\u4efb\u52a1\u8be6\u60c5\u9875\u9762\u4e2d\u67e5\u770b Tensorboard \u7684\u94fe\u63a5\uff0c\u70b9\u51fb\u94fe\u63a5\u5373\u53ef\u67e5\u770b\u8bad\u7ec3\u8fc7\u7a0b\u7684\u53ef\u89c6\u5316\u7ed3\u679c\u3002

          "},{"location":"admin/baize/developer/jobs/tensorboard.html#notebook-tensorboard_1","title":"\u5728 Notebook \u4e2d\u76f4\u63a5\u5f15\u7528 Tensorboard","text":"

          \u5728 Notebook \u4e2d\uff0c\u53ef\u4ee5\u901a\u8fc7\u4ee3\u7801\u76f4\u63a5\u542f\u52a8 Tensorboard\u3002\u4ee5\u4e0b\u662f\u4e00\u4e2a\u793a\u4f8b\u4ee3\u7801\uff1a

          # \u5bfc\u5165\u5fc5\u8981\u7684\u5e93\nimport tensorflow as tf\nimport datetime\n\n# \u5b9a\u4e49\u65e5\u5fd7\u76ee\u5f55\nlog_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n# \u521b\u5efa Tensorboard \u56de\u8c03\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n\n# \u6784\u5efa\u5e76\u7f16\u8bd1\u6a21\u578b\nmodel = tf.keras.models.Sequential([\n    tf.keras.layers.Flatten(input_shape=(28, 28)),\n    tf.keras.layers.Dense(512, activation='relu'),\n    tf.keras.layers.Dropout(0.2),\n    tf.keras.layers.Dense(10, activation='softmax')\n])\n\nmodel.compile(optimizer='adam',\n              loss='sparse_categorical_crossentropy',\n              metrics=['accuracy'])\n\n# \u8bad\u7ec3\u6a21\u578b\u5e76\u542f\u7528 Tensorboard \u56de\u8c03\nmodel.fit(x_train, y_train, epochs=5, validation_data=(x_test, y_test), callbacks=[tensorboard_callback])\n
          "},{"location":"admin/baize/developer/jobs/tensorboard.html#_2","title":"\u5982\u4f55\u914d\u7f6e\u6570\u636e\u96c6\u7684\u65e5\u5fd7\u5185\u5bb9","text":"

          \u5728\u4f7f\u7528 Tensorboard \u65f6\uff0c\u53ef\u4ee5\u8bb0\u5f55\u548c\u914d\u7f6e\u4e0d\u540c\u7684\u6570\u636e\u96c6\u548c\u65e5\u5fd7\u5185\u5bb9\u3002\u4ee5\u4e0b\u662f\u4e00\u4e9b\u5e38\u89c1\u7684\u914d\u7f6e\u65b9\u5f0f\uff1a

          "},{"location":"admin/baize/developer/jobs/tensorboard.html#_3","title":"\u914d\u7f6e\u8bad\u7ec3\u548c\u9a8c\u8bc1\u6570\u636e\u96c6\u7684\u65e5\u5fd7","text":"

          \u5728\u8bad\u7ec3\u6a21\u578b\u65f6\uff0c\u53ef\u4ee5\u901a\u8fc7 TensorFlow \u7684 tf.summary API \u6765\u8bb0\u5f55\u8bad\u7ec3\u548c\u9a8c\u8bc1\u6570\u636e\u96c6\u7684\u65e5\u5fd7\u3002\u4ee5\u4e0b\u662f\u4e00\u4e2a\u793a\u4f8b\u4ee3\u7801\uff1a

          # \u5bfc\u5165\u5fc5\u8981\u7684\u5e93\nimport tensorflow as tf\n\n# \u521b\u5efa\u65e5\u5fd7\u76ee\u5f55\ntrain_log_dir = 'logs/gradient_tape/train'\nval_log_dir = 'logs/gradient_tape/val'\ntrain_summary_writer = tf.summary.create_file_writer(train_log_dir)\nval_summary_writer = tf.summary.create_file_writer(val_log_dir)\n\n# \u8bad\u7ec3\u6a21\u578b\u5e76\u8bb0\u5f55\u65e5\u5fd7\nfor epoch in range(EPOCHS):\n    for (x_train, y_train) in train_dataset:\n        # \u8bad\u7ec3\u6b65\u9aa4\n        train_step(x_train, y_train)\n        with train_summary_writer.as_default():\n            tf.summary.scalar('loss', train_loss.result(), step=epoch)\n            tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)\n\n    for (x_val, y_val) in val_dataset:\n        # \u9a8c\u8bc1\u6b65\u9aa4\n        val_step(x_val, y_val)\n        with val_summary_writer.as_default():\n            tf.summary.scalar('loss', val_loss.result(), step=epoch)\n            tf.summary.scalar('accuracy', val_accuracy.result(), step=epoch)\n
          "},{"location":"admin/baize/developer/jobs/tensorboard.html#_4","title":"\u914d\u7f6e\u81ea\u5b9a\u4e49\u65e5\u5fd7","text":"

          \u9664\u4e86\u8bad\u7ec3\u548c\u9a8c\u8bc1\u6570\u636e\u96c6\u7684\u65e5\u5fd7\u5916\uff0c\u8fd8\u53ef\u4ee5\u8bb0\u5f55\u5176\u4ed6\u81ea\u5b9a\u4e49\u7684\u65e5\u5fd7\u5185\u5bb9\uff0c\u4f8b\u5982\u5b66\u4e60\u7387\u3001\u68af\u5ea6\u5206\u5e03\u7b49\u3002\u4ee5\u4e0b\u662f\u4e00\u4e2a\u793a\u4f8b\u4ee3\u7801\uff1a

          # \u8bb0\u5f55\u81ea\u5b9a\u4e49\u65e5\u5fd7\nwith train_summary_writer.as_default():\n    tf.summary.scalar('learning_rate', learning_rate, step=epoch)\n    tf.summary.histogram('gradients', gradients, step=epoch)\n
          "},{"location":"admin/baize/developer/jobs/tensorboard.html#tensorboard_2","title":"Tensorboard \u7ba1\u7406","text":"

          \u5728 AI Lab \u4e2d\uff0c\u901a\u8fc7\u5404\u79cd\u65b9\u5f0f\u521b\u5efa\u51fa\u6765\u7684 Tensorboard \u4f1a\u7edf\u4e00\u5c55\u793a\u5728\u4efb\u52a1\u5206\u6790\u7684\u9875\u9762\u4e2d\uff0c\u65b9\u4fbf\u7528\u6237\u67e5\u770b\u548c\u7ba1\u7406\u3002

          \u7528\u6237\u53ef\u4ee5\u5728\u4efb\u52a1\u5206\u6790\u9875\u9762\u4e2d\u67e5\u770b Tensorboard \u7684\u94fe\u63a5\u3001\u72b6\u6001\u3001\u521b\u5efa\u65f6\u95f4\u7b49\u4fe1\u606f\uff0c\u5e76\u901a\u8fc7\u94fe\u63a5\u76f4\u63a5\u8bbf\u95ee Tensorboard \u7684\u53ef\u89c6\u5316\u7ed3\u679c\u3002

          "},{"location":"admin/baize/developer/jobs/tensorflow.html","title":"Tensorflow \u4efb\u52a1","text":"

          Tensorflow \u662f\u9664\u4e86 Pytorch \u53e6\u5916\u4e00\u4e2a\u975e\u5e38\u6d3b\u8dc3\u7684\u5f00\u6e90\u7684\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\uff0c\u5b83\u63d0\u4f9b\u4e86\u4e00\u4e2a\u7075\u6d3b\u7684\u8bad\u7ec3\u548c\u90e8\u7f72\u73af\u5883\u3002

          \u5728 AI Lab \u4e2d\uff0c\u6211\u4eec\u540c\u6837\u63d0\u4f9b\u4e86 Tensorflow \u6846\u67b6\u7684\u652f\u6301\u548c\u9002\u914d\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c\u5feb\u901f\u521b\u5efa Tensorflow \u4efb\u52a1\uff0c\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\u3002

          "},{"location":"admin/baize/developer/jobs/tensorflow.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
          • \u4efb\u52a1\u7c7b\u578b\u540c\u65f6\u652f\u6301 Tensorflow \u5355\u673a \u548c Tensorflow \u5206\u5e03\u5f0f \u4e24\u79cd\u6a21\u5f0f\u3002
          • \u8fd0\u884c\u955c\u50cf\u5185\u5df2\u7ecf\u9ed8\u8ba4\u652f\u6301 Tensorflow \u6846\u67b6\uff0c\u65e0\u9700\u989d\u5916\u5b89\u88c5\u3002
          "},{"location":"admin/baize/developer/jobs/tensorflow.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

          \u5728\u8fd9\u91cc\u6211\u4eec\u4f7f\u7528 baize-notebook \u57fa\u7840\u955c\u50cf \u548c \u5173\u8054\u73af\u5883 \u7684\u65b9\u5f0f\u6765\u4f5c\u4e3a\u4efb\u52a1\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002

          \u4e86\u89e3\u5982\u4f55\u521b\u5efa\u73af\u5883\uff0c\u8bf7\u53c2\u8003 \u73af\u5883\u5217\u8868\u3002

          "},{"location":"admin/baize/developer/jobs/tensorflow.html#_3","title":"\u521b\u5efa\u4efb\u52a1","text":""},{"location":"admin/baize/developer/jobs/tensorflow.html#tfjob","title":"\u793a\u4f8b TFJob \u5355\u673a\u4efb\u52a1","text":"
          1. \u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3 \uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
          2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
          3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a Tensorflow \u5355\u673a\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002
          4. \u586b\u5199\u4efb\u52a1\u540d\u79f0\u3001\u63cf\u8ff0\u540e\u70b9\u51fb \u786e\u5b9a \u3002
          "},{"location":"admin/baize/developer/jobs/tensorflow.html#_4","title":"\u63d0\u524d\u9884\u70ed\u4ee3\u7801\u4ed3\u5e93","text":"

          \u4f7f\u7528 AI Lab -> \u6570\u636e\u96c6\u5217\u8868 \uff0c\u521b\u5efa\u4e00\u4e2a\u6570\u636e\u96c6\uff0c\u5e76\u5c06\u8fdc\u7aef Github \u7684\u4ee3\u7801\u62c9\u53d6\u5230\u6570\u636e\u96c6\u4e2d\uff0c \u8fd9\u6837\u5728\u521b\u5efa\u4efb\u52a1\u65f6\uff0c\u53ef\u4ee5\u76f4\u63a5\u9009\u62e9\u6570\u636e\u96c6\uff0c\u5c06\u4ee3\u7801\u6302\u8f7d\u5230\u4efb\u52a1\u4e2d\u3002

          \u6f14\u793a\u4ee3\u7801\u4ed3\u5e93\u5730\u5740\uff1ahttps://github.com/d-run/training-sample-code/

          "},{"location":"admin/baize/developer/jobs/tensorflow.html#_5","title":"\u8fd0\u884c\u53c2\u6570","text":"
          • \u542f\u52a8\u547d\u4ee4 \u4f7f\u7528 bash
          • \u547d\u4ee4\u53c2\u6570\u4f7f\u7528 python /code/tensorflow/tf-single.py
          \"\"\"\n  pip install tensorflow numpy\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\n# \u521b\u5efa\u4e00\u4e9b\u968f\u673a\u6570\u636e\nx = np.random.rand(100, 1)\ny = 2 * x + 1 + np.random.rand(100, 1) * 0.1\n\n# \u521b\u5efa\u4e00\u4e2a\u7b80\u5355\u7684\u6a21\u578b\nmodel = tf.keras.Sequential([\n    tf.keras.layers.Dense(1, input_shape=(1,))\n])\n\n# \u7f16\u8bd1\u6a21\u578b\nmodel.compile(optimizer='adam', loss='mse')\n\n# \u8bad\u7ec3\u6a21\u578b\uff0c\u5c06 epochs \u6539\u4e3a 10\nhistory = model.fit(x, y, epochs=10, verbose=1)\n\n# \u6253\u5370\u6700\u7ec8\u635f\u5931\nprint('Final loss: {' + str(history.history['loss'][-1]) +'}')\n\n# \u4f7f\u7528\u6a21\u578b\u8fdb\u884c\u9884\u6d4b\ntest_x = np.array([[0.5]])\nprediction = model.predict(test_x)\nprint(f'Prediction for x=0.5: {prediction[0][0]}')\n
          "},{"location":"admin/baize/developer/jobs/tensorflow.html#_6","title":"\u8fd0\u884c\u7ed3\u679c","text":"

          \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\u540e\uff0c\u53ef\u4ee5\u8fdb\u5165\u4efb\u52a1\u8be6\u60c5\u67e5\u770b\u5230\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\uff0c\u4ece\u53f3\u4e0a\u89d2\u53bb\u5f80 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5 \uff0c\u53ef\u4ee5\u67e5\u770b\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

          "},{"location":"admin/baize/developer/jobs/tensorflow.html#tfjob_1","title":"TFJob \u5206\u5e03\u5f0f\u4efb\u52a1","text":"
          1. \u767b\u5f55 AI Lab \uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3 \uff0c\u8fdb\u5165 \u4efb\u52a1\u5217\u8868 \u9875\u9762\u3002
          2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
          3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a Tensorflow \u5206\u5e03\u5f0f\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002
          4. \u586b\u5199\u4efb\u52a1\u540d\u79f0\u3001\u63cf\u8ff0\u540e\u70b9\u51fb \u786e\u5b9a \u3002
          "},{"location":"admin/baize/developer/jobs/tensorflow.html#_7","title":"\u793a\u4f8b\u4efb\u52a1\u4ecb\u7ecd","text":"

          \u672c\u6b21\u5305\u542b\u4e86\u4e09\u79cd\u89d2\u8272\uff1aChief\u3001Worker \u548c Parameter Server (PS)\u3002

          • Chief: \u4e3b\u8981\u8d1f\u8d23\u534f\u8c03\u8bad\u7ec3\u8fc7\u7a0b\u548c\u6a21\u578b\u68c0\u67e5\u70b9\u7684\u4fdd\u5b58\u3002
          • Worker: \u6267\u884c\u5b9e\u9645\u7684\u6a21\u578b\u8bad\u7ec3\u3002
          • PS: \u5728\u5f02\u6b65\u8bad\u7ec3\u4e2d\u7528\u4e8e\u5b58\u50a8\u548c\u66f4\u65b0\u6a21\u578b\u53c2\u6570\u3002

          \u4e3a\u4e0d\u540c\u7684\u89d2\u8272\u5206\u914d\u4e86\u4e0d\u540c\u7684\u8d44\u6e90\u3002Chief \u548c Worker \u4f7f\u7528 GPU\uff0c\u800c PS \u4f7f\u7528 CPU \u548c\u8f83\u5927\u7684\u5185\u5b58\u3002

          "},{"location":"admin/baize/developer/jobs/tensorflow.html#_8","title":"\u8fd0\u884c\u53c2\u6570","text":"
          • \u542f\u52a8\u547d\u4ee4 \u4f7f\u7528 bash
          • \u547d\u4ee4\u53c2\u6570\u4f7f\u7528 python /code/tensorflow/tensorflow-distributed.py
          import os\nimport json\nimport tensorflow as tf\n\nclass SimpleModel(tf.keras.Model):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = tf.keras.layers.Dense(1, input_shape=(10,))\n\n    def call(self, x):\n        return self.fc(x)\n\ndef train():\n    # \u6253\u5370\u73af\u5883\u4fe1\u606f\n    print(f\"TensorFlow version: {tf.__version__}\")\n    print(f\"GPU available: {tf.test.is_gpu_available()}\")\n    if tf.test.is_gpu_available():\n        print(f\"GPU device count: {len(tf.config.list_physical_devices('GPU'))}\")\n\n    # \u83b7\u53d6\u5206\u5e03\u5f0f\u8bad\u7ec3\u4fe1\u606f\n    tf_config = json.loads(os.environ.get('TF_CONFIG') or '{}')\n    task_type = tf_config.get('task', {}).get('type')\n    task_id = tf_config.get('task', {}).get('index')\n\n    print(f\"Task type: {task_type}, Task ID: {task_id}\")\n\n    # \u8bbe\u7f6e\u5206\u5e03\u5f0f\u7b56\u7565\n    strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()\n\n    with strategy.scope():\n        model = SimpleModel()\n        loss_fn = tf.keras.losses.MeanSquaredError()\n        optimizer = tf.keras.optimizers.SGD(learning_rate=0.001)\n\n    # \u751f\u6210\u4e00\u4e9b\u968f\u673a\u6570\u636e\n    data = tf.random.normal((100, 10))\n    labels = tf.random.normal((100, 1))\n\n    @tf.function\n    def train_step(inputs, labels):\n        with tf.GradientTape() as tape:\n            predictions = model(inputs)\n            loss = loss_fn(labels, predictions)\n        gradients = tape.gradient(loss, model.trainable_variables)\n        optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n        return loss\n\n    for epoch in range(10):\n        loss = train_step(data, labels)\n        if task_type == 'chief':\n            print(f'Epoch {epoch}, Loss: {loss.numpy():.4f}')\n\nif __name__ == '__main__':\n    train()\n
          "},{"location":"admin/baize/developer/jobs/tensorflow.html#_9","title":"\u8fd0\u884c\u7ed3\u679c","text":"

          \u540c\u6837\uff0c\u6211\u4eec\u53ef\u4ee5\u8fdb\u5165\u4efb\u52a1\u8be6\u60c5\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\uff0c\u4ee5\u53ca\u6bcf\u4e2a Pod \u7684\u65e5\u5fd7\u8f93\u51fa\u3002

          "},{"location":"admin/baize/developer/jobs/view.html","title":"\u67e5\u770b\u4efb\u52a1\uff08Job\uff09\u5de5\u4f5c\u8d1f\u8f7d","text":"

          \u4efb\u52a1\u521b\u5efa\u597d\u540e\uff0c\u90fd\u4f1a\u663e\u793a\u5728\u8bad\u7ec3\u4efb\u52a1\u5217\u8868\u4e2d\u3002

          1. \u5728\u8bad\u7ec3\u8bad\u7ec3\u4efb\u52a1\u5217\u8868\u4e2d\uff0c\u70b9\u51fb\u67d0\u4e2a\u4efb\u52a1\u53f3\u4fa7\u7684 \u2507 -> \u4efb\u52a1\u8d1f\u8f7d\u8be6\u60c5 \u3002

          2. \u51fa\u73b0\u4e00\u4e2a\u5f39\u7a97\u9009\u62e9\u8981\u67e5\u770b\u54ea\u4e2a Pod \u540e\uff0c\u70b9\u51fb \u8fdb\u5165 \u3002

          3. \u8df3\u8f6c\u5230\u5bb9\u5668\u7ba1\u7406\u754c\u9762\uff0c\u53ef\u4ee5\u67e5\u770b\u5bb9\u5668\u7684\u5de5\u4f5c\u72b6\u6001\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u4ee5\u53ca\u53d1\u751f\u7684\u4e8b\u4ef6\u3002

          4. \u4f60\u8fd8\u53ef\u4ee5\u67e5\u770b\u5f53\u524d Pod \u6700\u8fd1\u4e00\u6bb5\u65f6\u95f4\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002 \u6b64\u5904\u9ed8\u8ba4\u5c55\u793a 100 \u884c\u65e5\u5fd7\uff0c\u5982\u679c\u8981\u67e5\u770b\u66f4\u8be6\u7ec6\u7684\u65e5\u5fd7\u6d3b\u4e0b\u8f7d\u65e5\u5fd7\uff0c\u8bf7\u70b9\u51fb\u9876\u90e8\u7684\u84dd\u8272 \u53ef\u89c2\u6d4b\u6027 \u6587\u5b57\u3002

          5. \u5f53\u7136\u4f60\u8fd8\u53ef\u4ee5\u901a\u8fc7\u53f3\u4e0a\u89d2\u7684 ... \uff0c\u67e5\u770b\u5f53\u524d Pod \u7684 YAML\u3001\u4e0a\u4f20\u548c\u4e0b\u8f7d\u6587\u4ef6\u3002 \u4ee5\u4e0b\u662f\u4e00\u4e2a Pod \u7684 YAML \u793a\u4f8b\u3002

          kind: Pod\napiVersion: v1\nmetadata:\n  name: neko-tensorboard-job-test-202404181843-skxivllb-worker-0\n  namespace: default\n  uid: ddedb6ff-c278-47eb-ae1e-0de9b7c62f8c\n  resourceVersion: '41092552'\n  creationTimestamp: '2024-04-18T10:43:36Z'\n  labels:\n    training.kubeflow.org/job-name: neko-tensorboard-job-test-202404181843-skxivllb\n    training.kubeflow.org/operator-name: pytorchjob-controller\n    training.kubeflow.org/replica-index: '0'\n    training.kubeflow.org/replica-type: worker\n  annotations:\n    cni.projectcalico.org/containerID: 0cfbb9af257d5e69027c603c6cb2d3890a17c4ae1a145748d5aef73a10d7fbe1\n    cni.projectcalico.org/podIP: ''\n    cni.projectcalico.org/podIPs: ''\n    hami.io/bind-phase: success\n    hami.io/bind-time: '1713437016'\n    hami.io/vgpu-devices-allocated: GPU-29d5fa0d-935b-2966-aff8-483a174d61d1,NVIDIA,1024,20:;\n    hami.io/vgpu-devices-to-allocate: ;\n    hami.io/vgpu-node: worker-a800-1\n    hami.io/vgpu-time: '1713437016'\n    k8s.v1.cni.cncf.io/network-status: |-\n      [{\n          \"name\": \"kube-system/calico\",\n          \"ips\": [\n              \"10.233.97.184\"\n          ],\n          \"default\": true,\n          \"dns\": {}\n      }]\n    k8s.v1.cni.cncf.io/networks-status: |-\n      [{\n          \"name\": \"kube-system/calico\",\n          \"ips\": [\n              \"10.233.97.184\"\n          ],\n          \"default\": true,\n          \"dns\": {}\n      }]\n  ownerReferences:\n    - apiVersion: kubeflow.org/v1\n      kind: PyTorchJob\n      name: neko-tensorboard-job-test-202404181843-skxivllb\n      uid: e5a8b05d-1f03-4717-8e1c-4ec928014b7b\n      controller: true\n      blockOwnerDeletion: true\nspec:\n  volumes:\n    - name: 0-dataset-pytorch-examples\n      persistentVolumeClaim:\n        claimName: pytorch-examples\n    - name: kube-api-access-wh9rh\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n  containers:\n    - name: pytorch\n      image: m.daocloud.io/docker.io/pytorch/pytorch\n      command:\n        - bash\n      args:\n        - '-c'\n        - >-\n          ls -la /root && which pip && pip install pytorch_lightning tensorboard\n          && python /root/Git/pytorch/examples/mnist/main.py\n      ports:\n        - name: pytorchjob-port\n          containerPort: 23456\n          protocol: TCP\n      env:\n        - name: PYTHONUNBUFFERED\n          value: '1'\n        - name: PET_NNODES\n          value: '1'\n      resources:\n        limits:\n          cpu: '4'\n          memory: 8Gi\n          nvidia.com/gpucores: '20'\n          nvidia.com/gpumem: '1024'\n          nvidia.com/vgpu: '1'\n        requests:\n          cpu: '4'\n          memory: 8Gi\n          nvidia.com/gpucores: '20'\n          nvidia.com/gpumem: '1024'\n          nvidia.com/vgpu: '1'\n      volumeMounts:\n        - name: 0-dataset-pytorch-examples\n          mountPath: /root/Git/pytorch/examples\n        - name: kube-api-access-wh9rh\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  restartPolicy: Never\n  terminationGracePeriodSeconds: 30\n  dnsPolicy: ClusterFirst\n  serviceAccountName: default\n  serviceAccount: default\n  nodeName: worker-a800-1\n  securityContext: {}\n  affinity: {}\n  schedulerName: hami-scheduler\n  tolerations:\n    - key: node.kubernetes.io/not-ready\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n    - key: node.kubernetes.io/unreachable\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n  priorityClassName: baize-high-priority\n  priority: 100000\n  enableServiceLinks: true\n  preemptionPolicy: PreemptLowerPriority\nstatus:\n  phase: Succeeded\n  conditions:\n    - type: Initialized\n      status: 'True'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:43:36Z'\n      reason: PodCompleted\n    - type: Ready\n      status: 'False'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:46:34Z'\n      reason: PodCompleted\n    - type: ContainersReady\n      status: 'False'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:46:34Z'\n      reason: PodCompleted\n    - type: PodScheduled\n      status: 'True'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:43:36Z'\n  hostIP: 10.20.100.211\n  podIP: 10.233.97.184\n  podIPs:\n    - ip: 10.233.97.184\n  startTime: '2024-04-18T10:43:36Z'\n  containerStatuses:\n    - name: pytorch\n      state:\n        terminated:\n          exitCode: 0\n          reason: Completed\n          startedAt: '2024-04-18T10:43:39Z'\n          finishedAt: '2024-04-18T10:46:34Z'\n          containerID: >-\n            containerd://09010214bcf3315e81d38fba50de3943c9d2b48f50a6cc2e83f8ef0e5c6eeec1\n      lastState: {}\n      ready: false\n      restartCount: 0\n      image: m.daocloud.io/docker.io/pytorch/pytorch:latest\n      imageID: >-\n        m.daocloud.io/docker.io/pytorch/pytorch@sha256:11691e035a3651d25a87116b4f6adc113a27a29d8f5a6a583f8569e0ee5ff897\n      containerID: >-\n        containerd://09010214bcf3315e81d38fba50de3943c9d2b48f50a6cc2e83f8ef0e5c6eeec1\n      started: false\n  qosClass: Guaranteed\n
          "},{"location":"admin/baize/developer/notebooks/baizectl.html","title":"baizectl \u547d\u4ee4\u884c\u5de5\u5177\u4f7f\u7528\u6307\u5357","text":"

          baizectl \u662f\u5728 AI Lab \u6a21\u5757\u4e2d\u4e13\u95e8\u670d\u52a1\u4e8e\u6a21\u578b\u5f00\u53d1\u8005\u4e0e\u6570\u636e\u79d1\u5b66\u5bb6\u4eec\u4f7f\u7528\u7684\u547d\u4ee4\u884c\u5de5\u5177\u3002 \u5b83\u63d0\u4f9b\u4e86\u4e00\u7cfb\u5217\u547d\u4ee4\u6765\u5e2e\u52a9\u7528\u6237\u7ba1\u7406\u5206\u5e03\u5f0f\u8bad\u7ec3\u4f5c\u4e1a\u3001\u67e5\u770b\u4efb\u52a1\u72b6\u6001\u3001\u7ba1\u7406\u6570\u636e\u96c6\u7b49\u64cd\u4f5c\uff0c\u540c\u65f6\u652f\u6301\u8fde\u63a5 Kubernetes \u5de5\u4f5c\u96c6\u7fa4\u548c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5de5\u4f5c\u7a7a\u95f4\uff0c\u5e2e\u52a9\u7528\u6237\u66f4\u9ad8\u6548\u5730\u4f7f\u7528\u548c\u7ba1\u7406 Kubernetes \u5e73\u53f0\u8d44\u6e90\u3002

          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_1","title":"\u5b89\u88c5","text":"

          \u76ee\u524d\uff0cbaizectl \u5df2\u7ecf\u96c6\u6210\u5728 AI Lab \u4e2d\u3002 \u4f60\u5728\u521b\u5efa Notebook \u540e\uff0c\u5373\u53ef\u5728 Notebook \u4e2d\u76f4\u63a5\u4f7f\u7528 baizectl\u3002

          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_2","title":"\u5feb\u901f\u4e0a\u624b","text":""},{"location":"admin/baize/developer/notebooks/baizectl.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"

          baizectl \u547d\u4ee4\u7684\u57fa\u672c\u683c\u5f0f\u5982\u4e0b\uff1a

          jovyan@19d0197587cc:/$ baizectl\nAI platform management tool\n\nUsage:\n  baizectl [command]\n\nAvailable Commands:\n  completion  Generate the autocompletion script for the specified shell\n  data        Management datasets\n  help        Help about any command\n  job         Manage jobs\n  login       Login to the platform\n  version     Show cli version\n\nFlags:\n      --cluster string     Cluster name to operate\n  -h, --help               help for baizectl\n      --mode string        Connection mode: auto, api, notebook (default \"auto\")\n  -n, --namespace string   Namespace to use for the operation. If not set, the default Namespace will be used.\n  -s, --server string      \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 access base url\n      --skip-tls-verify    Skip TLS certificate verification\n      --token string       \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 access token\n  -w, --workspace int32    Workspace ID to use for the operation\n\nUse \"baizectl [command] --help\" for more information about a command.\n

          \u4ee5\u4e0a\u662f baizectl \u7684\u57fa\u672c\u4fe1\u606f\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 baizectl --help \u67e5\u770b\u5e2e\u52a9\u4fe1\u606f\uff0c \u6216\u8005\u901a\u8fc7 baizectl [command] --help \u67e5\u770b\u5177\u4f53\u547d\u4ee4\u7684\u5e2e\u52a9\u4fe1\u606f\u3002

          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_4","title":"\u67e5\u770b\u7248\u672c\u4fe1\u606f","text":"

          baizectl \u652f\u6301\u901a\u8fc7 version \u547d\u4ee4\u67e5\u770b\u7248\u672c\u4fe1\u606f\u3002

          (base) jovyan@den-0:~$ baizectl version \nbaizectl version: v0.5.0, commit sha: ac0837c4\n
          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_5","title":"\u547d\u4ee4\u683c\u5f0f","text":"

          baizectl \u547d\u4ee4\u7684\u57fa\u672c\u683c\u5f0f\u5982\u4e0b\uff1a

          baizectl [command] [flags]\n

          \u5176\u4e2d\uff0c[command] \u662f\u5177\u4f53\u7684\u64cd\u4f5c\u547d\u4ee4\uff0c\u5982 data\u3001job \u7b49\uff0c[flags] \u662f\u53ef\u9009\u7684\u53c2\u6570\uff0c\u7528\u4e8e\u6307\u5b9a\u64cd\u4f5c\u7684\u8be6\u7ec6\u4fe1\u606f\u3002

          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_6","title":"\u5e38\u7528\u9009\u9879","text":"
          • --cluster string\uff1a\u6307\u5b9a\u8981\u64cd\u4f5c\u7684\u96c6\u7fa4\u540d\u79f0
          • -h, --help\uff1a\u663e\u793a\u5e2e\u52a9\u4fe1\u606f
          • --mode string\uff1a\u8fde\u63a5\u6a21\u5f0f\uff0c\u53ef\u9009\u503c\u4e3a auto\u3001api\u3001notebook\uff08\u9ed8\u8ba4\u503c\u4e3a auto\uff09
          • -n, --namespace string\uff1a\u6307\u5b9a\u64cd\u4f5c\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5982\u679c\u672a\u8bbe\u7f6e\uff0c\u5c06\u4f7f\u7528\u9ed8\u8ba4\u547d\u540d\u7a7a\u95f4
          • -s, --server string\uff1a\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8bbf\u95ee\u57fa\u7840 URL
          • --skip-tls-verify\uff1a\u8df3\u8fc7 TLS \u8bc1\u4e66\u9a8c\u8bc1
          • --token string\uff1a\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8bbf\u95ee\u4ee4\u724c
          • -w, --workspace int32\uff1a\u6307\u5b9a\u64cd\u4f5c\u7684\u5de5\u4f5c\u533a ID
          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_7","title":"\u529f\u80fd\u4ecb\u7ecd","text":""},{"location":"admin/baize/developer/notebooks/baizectl.html#_8","title":"\u4efb\u52a1\u7ba1\u7406","text":"

          baizectl \u63d0\u4f9b\u4e86\u4e00\u7cfb\u5217\u547d\u4ee4\u6765\u7ba1\u7406\u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\uff0c\u5305\u542b\u4e86\u67e5\u770b\u4efb\u52a1\u5217\u8868\uff0c\u63d0\u4ea4\u4efb\u52a1\u3001\u67e5\u770b\u65e5\u5fd7\u3001\u91cd\u542f\u4efb\u52a1\u3001\u5220\u9664\u4efb\u52a1\u7b49\u3002

          jovyan@19d0197587cc:/$ baizectl job\nManage jobs\n\nUsage:\n  baizectl job [command]\n\nAvailable Commands:\n  delete      Delete a job\n  logs        Show logs of a job\n  ls          List jobs\n  restart     restart a job\n  submit      Submit a job\n\nFlags:\n  -h, --help            help for job\n  -o, --output string   Output format. One of: table, json, yaml (default \"table\")\n      --page int        Page number (default 1)\n      --page-size int   Page size (default -1)\n      --search string   Search query\n      --sort string     Sort order\n      --truncate int    Truncate output to the given length, 0 means no truncation (default 50)\n\nUse \"baizectl job [command] --help\" for more information about a command.\n
          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_9","title":"\u63d0\u4ea4\u8bad\u7ec3\u4efb\u52a1","text":"

          baizectl \u652f\u6301\u4f7f\u7528 submit \u547d\u4ee4\u63d0\u4ea4\u4e00\u4e2a\u4efb\u52a1\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 baizectl job submit --help \u67e5\u770b\u8be6\u7ec6\u4fe1\u606f\u3002

          (base) jovyan@den-0:~$ baizectl job submit --help\nSubmit a job\n\nUsage:\n  baizectl job submit [flags] -- command ...\n\nAliases:\n  submit, create\n\nExamples:\n# Submit a job to run the command \"torchrun python train.py\"\nbaizectl job submit -- torchrun python train.py\n# Submit a job with 2 workers(each pod use 4 gpus) to run the command \"torchrun python train.py\" and use the image \"pytorch/pytorch:1.8.1-cuda11.1-cudnn8-runtime\"\nbaizectl job submit --image pytorch/pytorch:1.8.1-cuda11.1-cudnn8-runtime --workers 2 --resources nvidia.com/gpu=4 -- torchrun python train.py\n# Submit a tensorflow job to run the command \"python train.py\"\nbaizectl job submit --tensorflow -- python train.py\n\n\nFlags:\n      --annotations stringArray                       The annotations of the job, the format is key=value\n      --auto-load-env                                 It only takes effect when executed in Notebook, the environment variables of the current environment will be automatically read and set to the environment variables of the Job, the specific environment variables to be read can be specified using the BAIZE_MAPPING_ENVS environment variable, the default is PATH,CONDA_*,*PYTHON*,NCCL_*, if set to false, the environment variables of the current environment will not be read. (default true)\n      --commands stringArray                          The default command of the job\n  -d, --datasets stringArray                          The dataset bind to the job, the format is datasetName:mountPath, e.g. mnist:/data/mnist\n  -e, --envs stringArray                              The environment variables of the job, the format is key=value\n  -x, --from-notebook string                          Define whether to read the configuration of the current Notebook and directly create tasks, including images, resources, Dataset, etc.\n                                                      auto: Automatically determine the mode according to the current environment. If the current environment is a Notebook, it will be set to notebook mode.\n                                                      false: Do not read the configuration of the current Notebook.\n                                                      true: Read the configuration of the current Notebook. (default \"auto\")\n  -h, --help                                          help for submit\n      --image string                                  The image of the job, it must be specified if fromNotebook is false.\n  -t, --job-type string                               Job type: PYTORCH, TENSORFLOW, PADDLE (default \"PYTORCH\")\n      --labels stringArray                            The labels of the job, the format is key=value\n      --max-retries int32                             number of retries before marking this job failed\n      --max-run-duration int                          Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it\n      --name string                                   The name of the job, if empty, the name will be generated automatically.\n      --paddle                                        PaddlePaddle Job, has higher priority than --job-type\n      --priority string                               The priority of the job, current support baize-medium-priority, baize-low-priority, baize-high-priority\n      --pvcs stringArray                              The pvcs bind to the job, the format is pvcName:mountPath, e.g. mnist:/data/mnist\n      --pytorch                                       Pytorch Job, has higher priority than --job-type\n      --queue string                                  The queue to used\n      --requests-resources stringArray                Similar to resources, but sets the resources of requests\n      --resources stringArray                         The resources of the job, it is a string in the format of cpu=1,memory=1Gi,nvidia.com/gpu=1, it will be set to the limits and requests of the container.\n      --restart-policy string                         The job restart policy (default \"on-failure\")\n      --runtime-envs baizectl data ls --runtime-env   The runtime environment to use for the job, you can use baizectl data ls --runtime-env to get the runtime environment\n      --shm-size int32                                The shared memory size of the job, default is 0, which means no shared memory, if set to more than 0, the job will use the shared memory, the unit is MiB\n      --tensorboard-log-dir string                    The tensorboard log directory, if set, the job will automatically start tensorboard, else not. The format is /path/to/log, you can use relative path in notebook.\n      --tensorflow                                    Tensorflow Job, has higher priority than --job-type\n      --workers int                                   The workers of the job, default is 1, which means single worker, if set to more than 1, the job will be distributed. (default 1)\n      --working-dir string                            The working directory of job container, if in notebook mode, the default is the directory of the current file\n

          Note

          \u63d0\u4ea4\u4efb\u52a1\u7684\u547d\u4ee4\u53c2\u6570\u8bf4\u660e\uff1a

          • --name: \u4efb\u52a1\u540d\u79f0\uff0c\u5982\u679c\u4e3a\u7a7a\uff0c\u5219\u4f1a\u81ea\u52a8\u751f\u6210
          • --image: \u955c\u50cf\u540d\u79f0\uff0c\u5fc5\u987b\u6307\u5b9a
          • --priority: \u4efb\u52a1\u4f18\u5148\u7ea7\uff0c\u652f\u6301 \u9ad8=baize-high-priority\u3001\u4e2d=baize-medium-priority\u3001\u4f4e=baize-low-priority
          • --resources: \u4efb\u52a1\u8d44\u6e90\uff0c\u683c\u5f0f\u4e3a cpu=1 memory=1Gi,nvidia.com/gpu=1
          • --workers: \u4efb\u52a1\u5de5\u4f5c\u8282\u70b9\u6570\uff0c\u9ed8\u8ba4\u4e3a 1\uff0c\u5f53\u8bbe\u7f6e\u5927\u4e8e 1 \u65f6\uff0c\u4efb\u52a1\u5c06\u4f1a\u5206\u5e03\u5f0f\u8fd0\u884c
          • --queue: \u4efb\u52a1\u961f\u5217\uff0c\u9700\u8981\u63d0\u524d\u521b\u5efa\u961f\u5217\u8d44\u6e90
          • --working-dir: \u5de5\u4f5c\u76ee\u5f55\uff0c\u5982\u679c\u5728 Notebook \u6a21\u5f0f\u4e0b\uff0c\u4f1a\u9ed8\u8ba4\u4f7f\u7528\u5f53\u524d\u6587\u4ef6\u76ee\u5f55
          • --datasets: \u6570\u636e\u96c6\uff0c\u683c\u5f0f\u4e3a datasetName:mountPath\uff0c\u4f8b\u5982 mnist:/data/mnist
          • --shm-size: \u5171\u4eab\u5185\u5b58\u5927\u5c0f\uff0c\u5728\u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u65f6\uff0c\u53ef\u4ee5\u542f\u7528\uff0c\u8868\u793a\u4f7f\u7528\u5171\u4eab\u5185\u5b58\uff0c\u5355\u4f4d\u4e3a MiB
          • --labels: \u4efb\u52a1\u6807\u7b7e\uff0c\u683c\u5f0f\u4e3a key=value
          • --max-retries: \u6700\u5927\u91cd\u8bd5\u6b21\u6570\uff0c\u4efb\u52a1\u5931\u8d25\u540e\u91cd\u8bd5\u6b21\u6570\uff0c\u5931\u8d25\u540e\u4f1a\u91cd\u542f\u4efb\u52a1\uff0c\u9ed8\u8ba4\u4e0d\u9650\u5236
          • --max-run-duration: \u6700\u5927\u8fd0\u884c\u65f6\u95f4\uff0c\u4efb\u52a1\u8fd0\u884c\u65f6\u95f4\u8d85\u8fc7\u6307\u5b9a\u65f6\u95f4\u540e\uff0c\u4f1a\u88ab\u7cfb\u7edf\u7ec8\u6b62\uff0c\u9ed8\u8ba4\u4e0d\u9650\u5236
          • --restart-policy: \u91cd\u542f\u7b56\u7565\uff0c\u652f\u6301 on-failure\u3001never\u3001always\uff0c\u9ed8\u8ba4\u4e3a on-failure
          • --from-notebook: \u662f\u5426\u4ece Notebook \u4e2d\u8bfb\u53d6\u914d\u7f6e\uff0c\u652f\u6301 auto\u3001true\u3001false\uff0c\u9ed8\u8ba4\u4e3a auto
          "},{"location":"admin/baize/developer/notebooks/baizectl.html#pytorch","title":"PyTorch \u5355\u673a\u4efb\u52a1\u793a\u4f8b","text":"

          \u63d0\u4ea4\u8bad\u7ec3\u4efb\u52a1\u793a\u4f8b\uff0c\u7528\u6237\u53ef\u4ee5\u6839\u636e\u5b9e\u9645\u9700\u6c42\u4fee\u6539\u53c2\u6570\uff0c\u4ee5\u4e0b\u4e3a\u521b\u5efa\u4e00\u4e2a PyTorch \u4efb\u52a1\u7684\u793a\u4f8b\uff1a

          baizectl job submit --name demojob-v2 -t PYTORCH \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --resources cpu=1,memory=1Gi \\\n    --workers 1 \\\n    --queue default \\\n    --working-dir /data \\\n    --datasets fashion-mnist:/data/mnist \\\n    --labels job_type=pytorch \\\n    --max-retries 3 \\\n    --max-run-duration 60 \\\n    --restart-policy on-failure \\\n    -- sleep 1000\n
          "},{"location":"admin/baize/developer/notebooks/baizectl.html#pytorch_1","title":"PyTorch \u5206\u5e03\u5f0f\u4efb\u52a1\u793a\u4f8b","text":"

          \u63d0\u4ea4\u8bad\u7ec3\u4efb\u52a1\u793a\u4f8b\uff0c\u7528\u6237\u53ef\u4ee5\u6839\u636e\u5b9e\u9645\u9700\u6c42\u4fee\u6539\u53c2\u6570\uff0c\u4ee5\u4e0b\u4e3a\u521b\u5efa\u4e00\u4e2a PyTorch \u4efb\u52a1\u7684\u793a\u4f8b\uff1a

          baizectl job submit --name demojob-v2 -t PYTORCH \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --resources cpu=1,memory=1Gi \\\n    --workers 2 \\   # \u591a\u4efb\u52a1\u526f\u672c\u4f1a\u81ea\u52a8\u521b\u5efa\u5206\u5e03\u5f0f\u4efb\u52a1\n    --shm-size 1024 \\\n    --queue default \\\n    --working-dir /data \\\n    --datasets fashion-mnist:/data/mnist \\\n    --labels job_type=pytorch \\\n    --max-retries 3 \\\n    --max-run-duration 60 \\\n    --restart-policy on-failure \\\n    -- sleep 1000\n
          "},{"location":"admin/baize/developer/notebooks/baizectl.html#tensorflow","title":"Tensorflow \u4efb\u52a1\u793a\u4f8b","text":"

          \u4f7f\u7528 -t \u53c2\u6570\u6307\u5b9a\u4efb\u52a1\u7c7b\u578b\uff0c\u4ee5\u4e0b\u4e3a\u521b\u5efa\u4e00\u4e2a Tensorflow \u4efb\u52a1\u7684\u793a\u4f8b\uff1a

          baizectl job submit --name demojob-v2 -t TENSORFLOW \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --from-notebook auto \\\n    --workers 1 \\\n    --queue default \\\n    --working-dir /data \\\n    --datasets fashion-mnist:/data/mnist \\\n    --labels job_type=pytorch \\\n    --max-retries 3 \\\n    --max-run-duration 60 \\\n    --restart-policy on-failure \\\n    -- sleep 1000\n

          \u4e5f\u53ef\u4ee5\u4f7f\u7528 --job-type \u6216\u8005 --tensorflow \u53c2\u6570\u6307\u5b9a\u4efb\u52a1\u7c7b\u578b

          "},{"location":"admin/baize/developer/notebooks/baizectl.html#paddle","title":"Paddle \u4efb\u52a1\u793a\u4f8b","text":"
          baizectl job submit --name demojob-v2 -t PADDLE \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --queue default \\\n    --working-dir /data \\\n    --datasets fashion-mnist:/data/mnist \\\n    --labels job_type=pytorch \\\n    --max-retries 3 \\\n    --max-run-duration 60 \\\n    --restart-policy on-failure \\\n    -- sleep 1000\n
          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_10","title":"\u67e5\u770b\u4efb\u52a1\u5217\u8868","text":"

          baizectl job \u652f\u6301\u901a\u8fc7 ls \u547d\u4ee4\u67e5\u770b\u4efb\u52a1\u5217\u8868\uff0c\u9ed8\u8ba4\u663e\u793a pytroch \u4efb\u52a1\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 -t \u6307\u5b9a\u4efb\u52a1\u7c7b\u578b\u3002

          (base) jovyan@den-0:~$ baizectl job ls  # \u9ed8\u8ba4\u67e5\u770b pytorch \u4efb\u52a1\n NAME        TYPE     PHASE      DURATION  COMMAND    \n demong      PYTORCH  SUCCEEDED  1m2s      sleep 60   \n demo-sleep  PYTORCH  RUNNING    1h25m28s  sleep 7200 \n(base) jovyan@den-0:~$ baizectl job ls demo-sleep  # \u67e5\u770b\u6307\u5b9a\u4efb\u52a1\n NAME        TYPE     PHASE      DURATION  COMMAND     \n demo-sleep  PYTORCH  RUNNING    1h25m28s  sleep 7200 \n(base) jovyan@den-0:~$ baizectl job ls -t TENSORFLOW   # \u67e5\u770b tensorflow \u4efb\u52a1\n NAME       TYPE        PHASE    DURATION  COMMAND    \n demotfjob  TENSORFLOW  CREATED  0s        sleep 1000 \n

          \u4efb\u52a1\u5217\u8868\u9ed8\u8ba4\u60c5\u51b5\u4e0b\u4f7f\u7528 table \u4f5c\u4e3a\u5c55\u793a\u5f62\u5f0f\uff0c\u5982\u679c\u5e0c\u671b\u67e5\u770b\u66f4\u591a\u4fe1\u606f\uff0c\u53ef\u4f7f\u7528 json \u6216 yaml \u683c\u5f0f\u5c55\u793a\uff0c\u53ef\u4ee5\u901a\u8fc7 -o \u53c2\u6570\u6307\u5b9a\u3002

          (base) jovyan@den-0:~$ baizectl job ls -t TENSORFLOW -o yaml\n- baseConfig:\n    args:\n    - sleep\n    - \"1000\"\n    image: release.daocloud.io/baize/baize-notebook:v0.5.0\n    labels:\n      app: den\n    podConfig:\n      affinity: {}\n      kubeEnvs:\n      - name: CONDA_EXE\n        value: /opt/conda/bin/conda\n      - name: CONDA_PREFIX\n        value: /opt/conda\n      - name: CONDA_PROMPT_MODIFIER\n        value: '(base) '\n      - name: CONDA_SHLVL\n        value: \"1\"\n      - name: CONDA_DIR\n        value: /opt/conda\n      - name: CONDA_PYTHON_EXE\n        value: /opt/conda/bin/python\n      - name: CONDA_PYTHON_EXE\n        value: /opt/conda/bin/python\n      - name: CONDA_DEFAULT_ENV\n        value: base\n      - name: PATH\n        value: /opt/conda/bin:/opt/conda/condabin:/command:/opt/conda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\n      priorityClass: baize-high-priority\n      queue: default\n  creationTimestamp: \"2024-06-16T07:47:27Z\"\n  jobSpec:\n    runPolicy:\n      suspend: true\n    tfReplicaSpecs:\n      Worker:\n        replicas: 1\n        restartPolicy: OnFailure\n        template:\n          metadata:\n            creationTimestamp: null\n          spec:\n            affinity: {}\n            containers:\n            - args:\n              - sleep\n              - \"1000\"\n              env:\n              - name: CONDA_EXE\n                value: /opt/conda/bin/conda\n              - name: CONDA_PREFIX\n                value: /opt/conda\n              - name: CONDA_PROMPT_MODIFIER\n                value: '(base) '\n              - name: CONDA_SHLVL\n                value: \"1\"\n              - name: CONDA_DIR\n                value: /opt/conda\n              - name: CONDA_PYTHON_EXE\n                value: /opt/conda/bin/python\n              - name: CONDA_PYTHON_EXE\n                value: /opt/conda/bin/python\n              - name: CONDA_DEFAULT_ENV\n                value: base\n              - name: PATH\n                value: /opt/conda/bin:/opt/conda/condabin:/command:/opt/conda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\n              image: release.daocloud.io/baize/baize-notebook:v0.5.0\n              name: tensorflow\n              resources:\n                limits:\n                  memory: 1Gi\n                requests:\n                  cpu: \"1\"\n                  memory: 2Gi\n              workingDir: /home/jovyan\n            priorityClassName: baize-high-priority\n  name: demotfjob\n  namespace: ns-chuanjia-ndx\n  phase: CREATED\n  roleConfig:\n    TF_WORKER:\n      replicas: 1\n      resources:\n        limits:\n          memory: 1Gi\n        requests:\n          cpu: \"1\"\n          memory: 2Gi\n  totalResources:\n    limits:\n      memory: \"1073741824\"\n    requests:\n      cpu: \"1\"\n      memory: \"2147483648\"\n  trainingConfig:\n    restartPolicy: RESTART_POLICY_ON_FAILURE\n  trainingMode: SINGLE\n  type: TENSORFLOW\n
          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_11","title":"\u67e5\u770b\u4efb\u52a1\u65e5\u5fd7","text":"

          baizectl job \u652f\u6301\u4f7f\u7528 logs \u547d\u4ee4\u67e5\u770b\u4efb\u52a1\u65e5\u5fd7\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 baizectl job logs --help \u67e5\u770b\u8be6\u7ec6\u4fe1\u606f\u3002

          (base) jovyan@den-0:~$ baizectl job logs --help\nShow logs of a job\n\nUsage:\n  baizectl job logs <job-name> [pod-name] [flags]\n\nAliases:\n  logs, log\n\nFlags:\n  -f, --follow            Specify if the logs should be streamed.\n  -h, --help              help for logs\n  -t, --job-type string   Job type: PYTORCH, TENSORFLOW, PADDLE (default \"PYTORCH\")\n      --paddle            PaddlePaddle Job, has higher priority than --job-type\n      --pytorch           Pytorch Job, has higher priority than --job-type\n      --tail int          Lines of recent log file to display.\n      --tensorflow        Tensorflow Job, has higher priority than --job-type\n      --timestamps        Show timestamps\n

          Note

          • --follow \u53c2\u6570\u5b9e\u65f6\u67e5\u770b\u65e5\u5fd7
          • --tail \u53c2\u6570\u6307\u5b9a\u67e5\u770b\u65e5\u5fd7\u7684\u884c\u6570\uff0c\u9ed8\u8ba4\u4e3a 50 \u884c
          • --timestamps \u53c2\u6570\u663e\u793a\u65f6\u95f4\u6233

          \u793a\u4f8b\u67e5\u770b\u4efb\u52a1\u65e5\u5fd7\uff1a

          (base) jovyan@den-0:~$ baizectl job log -t TENSORFLOW tf-sample-job-v2-202406161632-evgrbrhn -f\n2024-06-16 08:33:06.083766: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n2024-06-16 08:33:06.086189: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used.\n2024-06-16 08:33:06.132416: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used.\n2024-06-16 08:33:06.132903: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\nTo enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n2024-06-16 08:33:07.223046: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\nModel: \"sequential\"\n_________________________________________________________________\n Layer (type)                Output Shape              Param #   \n=================================================================\n Conv1 (Conv2D)              (None, 13, 13, 8)         80        \n\n flatten (Flatten)           (None, 1352)              0         \n\n Softmax (Dense)             (None, 10)                13530     \n\n=================================================================\nTotal params: 13610 (53.16 KB)\nTrainable params: 13610 (53.16 KB)\nNon-trainable params: 0 (0.00 Byte)\n...\n
          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_12","title":"\u5220\u9664\u4efb\u52a1","text":"

          baizectl job \u652f\u6301\u4f7f\u7528 delete \u547d\u4ee4\u5220\u9664\u4efb\u52a1\uff0c\u5e76\u4e14\u540c\u65f6\u652f\u6301\u5220\u9664\u591a\u4e2a\u4efb\u52a1\u3002

          (base) jovyan@den-0:~$ baizectl job delete --help\nDelete a job\n\nUsage:\n  baizectl job delete [flags]\n\nAliases:\n  delete, del, remove, rm\n\nFlags:\n  -h, --help              help for delete\n  -t, --job-type string   Job type: PYTORCH, TENSORFLOW, PADDLE (default \"PYTORCH\")\n      --paddle            PaddlePaddle Job, has higher priority than --job-type\n      --pytorch           Pytorch Job, has higher priority than --job-type\n      --tensorflow        Tensorflow Job, has higher priority than --job-type\n

          \u793a\u4f8b\u5220\u9664\u4efb\u52a1\uff1a

          (base) jovyan@den-0:~$ baizectl job ls\n NAME        TYPE     PHASE      DURATION  COMMAND    \n demong      PYTORCH  SUCCEEDED  1m2s      sleep 60   \n demo-sleep  PYTORCH  RUNNING    1h20m51s  sleep 7200 \n demojob     PYTORCH  FAILED     16m46s    sleep 1000 \n demojob-v2  PYTORCH  RUNNING    3m13s     sleep 1000 \n demojob-v3  PYTORCH  CREATED    0s        sleep 1000 \n(base) jovyan@den-0:~$ baizectl job delete demojob      # \u5220\u9664\u5355\u4e2a\u4efb\u52a1\nDelete job demojob in ns-chuanjia-ndx successfully\n(base) jovyan@den-0:~$ baizectl job delete demojob-v2 demojob-v3     # \u5220\u9664\u591a\u4e2a\u4efb\u52a1\nDelete job demojob-v2 in ns-chuanjia-ndx successfully\nDelete job demojob-v3 in ns-chuanjia-ndx successfully\n
          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_13","title":"\u91cd\u542f\u4efb\u52a1","text":"

          baizectl job \u652f\u6301\u4f7f\u7528 restart \u547d\u4ee4\u91cd\u542f\u4efb\u52a1\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 baizectl job restart --help \u67e5\u770b\u8be6\u7ec6\u4fe1\u606f\u3002

          (base) jovyan@den-0:~$ baizectl job restart --help\nrestart a job\n\nUsage:\n  baizectl job restart [flags] job\n\nAliases:\n  restart, rerun\n\nFlags:\n  -h, --help              help for restart\n  -t, --job-type string   Job type: PYTORCH, TENSORFLOW, PADDLE (default \"PYTORCH\")\n      --paddle            PaddlePaddle Job, has higher priority than --job-type\n      --pytorch           Pytorch Job, has higher priority than --job-type\n      --tensorflow        Tensorflow Job, has higher priority than --job-type\n
          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_14","title":"\u6570\u636e\u96c6\u7ba1\u7406","text":"

          baizectl \u652f\u6301\u7ba1\u7406\u6570\u636e\u96c6\uff0c\u76ee\u524d\u652f\u6301\u67e5\u770b\u6570\u636e\u96c6\u5217\u8868\uff0c\u65b9\u4fbf\u5728\u4efb\u52a1\u8bad\u7ec3\u65f6\uff0c\u5feb\u901f\u7ed1\u5b9a\u6570\u636e\u96c6\u3002

          (base) jovyan@den-0:~$ baizectl data \nManagement datasets\n\nUsage:\n  baizectl data [flags]\n  baizectl data [command]\n\nAliases:\n  data, dataset, datasets, envs, runtime-envs\n\nAvailable Commands:\n  ls          List datasets\n\nFlags:\n  -h, --help            help for data\n  -o, --output string   Output format. One of: table, json, yaml (default \"table\")\n      --page int        Page number (default 1)\n      --page-size int   Page size (default -1)\n      --search string   Search query\n      --sort string     Sort order\n      --truncate int    Truncate output to the given length, 0 means no truncation (default 50)\n\nUse \"baizectl data [command] --help\" for more information about a command.\n
          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_15","title":"\u67e5\u770b\u6570\u636e\u96c6\u5217\u8868","text":"

          baizectl data \u652f\u6301\u901a\u8fc7 ls \u547d\u4ee4\u67e5\u770b\u6570\u636e\u96c6\u5217\u8868\uff0c\u9ed8\u8ba4\u663e\u793a table \u683c\u5f0f\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 -o \u53c2\u6570\u6307\u5b9a\u8f93\u51fa\u683c\u5f0f\u3002

          (base) jovyan@den-0:~$ baizectl data ls\n NAME             TYPE  URI                                                    PHASE \n fashion-mnist    GIT   https://gitee.com/samzong_lu/fashion-mnist.git         READY \n sample-code      GIT   https://gitee.com/samzong_lu/training-sample-code....  READY \n training-output  PVC   pvc://training-output                                  READY \n

          \u5728\u63d0\u4ea4\u8bad\u7ec3\u4efb\u52a1\u65f6\uff0c\u53ef\u4ee5\u4f7f\u7528 -d \u6216\u8005 --datasets \u53c2\u6570\u6307\u5b9a\u6570\u636e\u96c6\uff0c\u4f8b\u5982\uff1a

          baizectl job submit --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --datasets sample-code:/home/jovyan/code \\\n    -- sleep 1000\n

          \u540c\u65f6\u6302\u8f7d\u591a\u4e2a\u6570\u636e\u96c6\uff0c\u53ef\u4ee5\u6309\u7167\u5982\u4e0b\u683c\u5f0f\uff1a

          baizectl job submit --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --datasets sample-code:/home/jovyan/code fashion-mnist:/home/jovyan/data \\\n    -- sleep 1000\n
          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_16","title":"\u67e5\u770b\u4f9d\u8d56\u5e93\uff08\u73af\u5883\uff09","text":"

          \u73af\u5883 runtime-env \u662f\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u7279\u8272\u73af\u5883\u7ba1\u7406\u80fd\u529b\uff0c\u901a\u8fc7\u5c06\u6a21\u578b\u5f00\u53d1\u3001\u8bad\u7ec3\u4efb\u52a1\u4ee5\u53ca\u63a8\u7406\u4e2d\u6240\u9700\u7684\u4f9d\u8d56\u5e93\u89e3\u8026\uff0c \u63d0\u4f9b\u4e86\u4e00\u79cd\u66f4\u52a0\u7075\u6d3b\u7684\u4f9d\u8d56\u5e93\u7ba1\u7406\u65b9\u5f0f\uff0c\u65e0\u9700\u91cd\u590d\u6784\u5efa\u590d\u6742\u7684 Docker \u955c\u50cf\uff0c\u53ea\u9700\u9009\u62e9\u5408\u9002\u7684\u73af\u5883\u5373\u53ef\u3002

          \u540c\u65f6 runtime-env \u652f\u6301\u70ed\u66f4\u65b0\uff0c\u52a8\u6001\u5347\u7ea7\uff0c\u65e0\u9700\u91cd\u65b0\u6784\u5efa\u955c\u50cf\uff0c\u5373\u53ef\u66f4\u65b0\u73af\u5883\u4f9d\u8d56\u5e93\u3002

          baizectl data \u652f\u6301\u901a\u8fc7 runtime-env \u547d\u4ee4\u67e5\u770b\u73af\u5883\u5217\u8868\uff0c\u9ed8\u8ba4\u663e\u793a table \u683c\u5f0f\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 -o \u53c2\u6570\u6307\u5b9a\u8f93\u51fa\u683c\u5f0f\u3002

          (base) jovyan@den-0:~$ baizectl data ls --runtime-env \n NAME               TYPE   URI                                                    PHASE      \n fashion-mnist      GIT    https://gitee.com/samzong_lu/fashion-mnist.git         READY      \n sample-code        GIT    https://gitee.com/samzong_lu/training-sample-code....  READY      \n training-output    PVC    pvc://training-output                                  READY      \n tensorflow-sample  CONDA  conda://python?version=3.12.3                          PROCESSING \n

          \u5728\u63d0\u4ea4\u8bad\u7ec3\u4efb\u52a1\u65f6\uff0c\u53ef\u4ee5\u4f7f\u7528 --runtime-env \u53c2\u6570\u6307\u5b9a\u73af\u5883\uff0c\u4f8b\u5982\uff1a

          baizectl job submit --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --runtime-env tensorflow-sample \\\n    -- sleep 1000\n
          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_17","title":"\u9ad8\u7ea7\u7528\u6cd5","text":"

          baizectl \u652f\u6301\u66f4\u591a\u9ad8\u7ea7\u7528\u6cd5\uff0c\u4f8b\u5982\u81ea\u52a8\u8865\u5168\u811a\u672c\u751f\u6210\u3001\u4f7f\u7528\u7279\u5b9a\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u3001\u4f7f\u7528\u7279\u5b9a\u5de5\u4f5c\u7a7a\u95f4\u7b49\u3002

          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_18","title":"\u81ea\u52a8\u8865\u5168\u811a\u672c\u751f\u6210","text":"
          baizectl completion bash > /etc/bash_completion.d/baizectl\n

          \u4e0a\u8ff0\u547d\u4ee4\u751f\u6210 bash \u7684\u81ea\u52a8\u8865\u5168\u811a\u672c\uff0c\u5e76\u5c06\u5176\u4fdd\u5b58\u5230 /etc/bash_completion.d/baizectl \u76ee\u5f55\u4e2d\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 source /etc/bash_completion.d/baizectl \u52a0\u8f7d\u81ea\u52a8\u8865\u5168\u811a\u672c\u3002

          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_19","title":"\u4f7f\u7528\u7279\u5b9a\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4","text":"
          baizectl job ls --cluster my-cluster --namespace my-namespace\n

          \u8be5\u547d\u4ee4\u5c06\u5217\u51fa my-cluster \u96c6\u7fa4\u4e2d my-namespace \u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u4f5c\u4e1a\u3002

          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_20","title":"\u4f7f\u7528\u7279\u5b9a\u5de5\u4f5c\u7a7a\u95f4","text":"
          baizectl job ls --workspace 123\n
          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_21","title":"\u5e38\u89c1\u95ee\u9898","text":"
          • \u95ee\u9898\uff1a\u4e3a\u4ec0\u4e48\u65e0\u6cd5\u8fde\u63a5\u5230\u670d\u52a1\u5668\uff1f

            \u89e3\u51b3\u65b9\u6cd5\uff1a\u68c0\u67e5 --server \u53c2\u6570\u662f\u5426\u6b63\u786e\u8bbe\u7f6e\uff0c\u5e76\u786e\u4fdd\u7f51\u7edc\u8fde\u63a5\u6b63\u5e38\u3002 \u5982\u679c\u670d\u52a1\u5668\u4f7f\u7528\u81ea\u7b7e\u540d\u8bc1\u4e66\uff0c\u53ef\u4ee5\u4f7f\u7528 --skip-tls-verify \u8df3\u8fc7 TLS \u8bc1\u4e66\u9a8c\u8bc1\u3002

          • \u95ee\u9898\uff1a\u5982\u4f55\u89e3\u51b3\u6743\u9650\u4e0d\u8db3\u7684\u95ee\u9898\uff1f

            \u89e3\u51b3\u65b9\u6cd5\uff1a\u786e\u4fdd\u4f7f\u7528\u6b63\u786e\u7684 --token \u53c2\u6570\u767b\u5f55\uff0c\u5e76\u68c0\u67e5\u5f53\u524d\u7528\u6237\u662f\u5426\u5177\u6709\u76f8\u5e94\u7684\u64cd\u4f5c\u6743\u9650\u3002

          • \u95ee\u9898\uff1a\u4e3a\u4ec0\u4e48\u65e0\u6cd5\u5217\u51fa\u6570\u636e\u96c6\uff1f

            \u89e3\u51b3\u65b9\u6cd5\uff1a\u68c0\u67e5\u547d\u540d\u7a7a\u95f4\u548c\u5de5\u4f5c\u533a\u662f\u5426\u6b63\u786e\u8bbe\u7f6e\uff0c\u786e\u4fdd\u5f53\u524d\u7528\u6237\u6709\u6743\u9650\u8bbf\u95ee\u8fd9\u4e9b\u8d44\u6e90\u3002

          "},{"location":"admin/baize/developer/notebooks/baizectl.html#_22","title":"\u7ed3\u8bed","text":"

          \u901a\u8fc7\u4ee5\u4e0a\u6307\u5357\uff0c\u7528\u6237\u53ef\u4ee5\u5feb\u901f\u4e0a\u624b baizectl \u547d\u4ee4\uff0c\u5e76\u5728\u5b9e\u9645\u5e94\u7528\u4e2d\u9ad8\u6548\u5730\u7ba1\u7406 AI \u5e73\u53f0\u8d44\u6e90\u3002 \u5982\u679c\u6709\u4efb\u4f55\u7591\u95ee\u6216\u95ee\u9898\uff0c\u5efa\u8bae\u53c2\u8003 baizectl [command] --help \u83b7\u53d6\u66f4\u591a\u8be6\u7ec6\u4fe1\u606f\u3002

          "},{"location":"admin/baize/developer/notebooks/baizess.html","title":"baizess \u6362\u6e90\u5de5\u5177\u4f7f\u7528\u6307\u5357","text":"

          baizess \u662f AI Lab \u6a21\u5757\u4e2d Notebook \u5185\u7f6e\u7684\u5f00\u7bb1\u5373\u7528\u7684\u6362\u6e90\u5c0f\u5de5\u5177\u3002\u5b83\u63d0\u4f9b\u4e86\u7b80\u6d01\u7684\u547d\u4ee4\u884c\u754c\u9762\uff0c\u65b9\u4fbf\u7528\u6237\u7ba1\u7406\u5404\u79cd\u7f16\u7a0b\u73af\u5883\u7684\u5305\u7ba1\u7406\u5668\u6e90\u3002 \u901a\u8fc7 baizess\uff0c\u7528\u6237\u53ef\u4ee5\u8f7b\u677e\u5207\u6362\u5e38\u7528\u5305\u7ba1\u7406\u5668\u7684\u6e90\uff0c\u786e\u4fdd\u987a\u5229\u8bbf\u95ee\u6700\u65b0\u7684\u5e93\u548c\u4f9d\u8d56\u9879\u3002\u8be5\u5de5\u5177\u901a\u8fc7\u7b80\u5316\u5305\u6e90\u7ba1\u7406\u6d41\u7a0b\uff0c\u63d0\u5347\u4e86\u5f00\u53d1\u8005\u548c\u6570\u636e\u79d1\u5b66\u5bb6\u7684\u5de5\u4f5c\u6548\u7387\u3002

          "},{"location":"admin/baize/developer/notebooks/baizess.html#_1","title":"\u5b89\u88c5","text":"

          \u76ee\u524d\uff0cbaizess \u5df2\u7ecf\u96c6\u6210\u5728 AI Lab \u4e2d\u3002 \u4f60\u5728\u521b\u5efa Notebook \u540e\uff0c\u5373\u53ef\u5728 Notebook \u4e2d\u76f4\u63a5\u4f7f\u7528 baizess\u3002

          "},{"location":"admin/baize/developer/notebooks/baizess.html#_2","title":"\u5feb\u901f\u4e0a\u624b","text":""},{"location":"admin/baize/developer/notebooks/baizess.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"

          baizess \u547d\u4ee4\u7684\u57fa\u672c\u4fe1\u606f\u5982\u4e0b\uff1a

          jovyan@19d0197587cc:/$ baizess\nsource switch tool\n\nUsage:\n  baizess [command] [package-manager]\n\nAvailable Commands:\n  set     Switch the source of specified package manager to current fastest source\n  reset   Reset the source of specified package manager to default source\n\nAvailable Package-managers:\n  apt     (require root privilege)\n  conda\n  pip\n
          "},{"location":"admin/baize/developer/notebooks/baizess.html#_4","title":"\u547d\u4ee4\u683c\u5f0f","text":"

          baizess \u547d\u4ee4\u7684\u57fa\u672c\u683c\u5f0f\u5982\u4e0b\uff1a

          baizess [command] [package-manager]\n

          \u5176\u4e2d\uff0c[command] \u662f\u5177\u4f53\u7684\u64cd\u4f5c\u547d\u4ee4\uff0c[package-manager] \u7528\u4e8e\u6307\u5b9a\u64cd\u4f5c\u5bf9\u5e94\u7684\u5305\u7ba1\u7406\u5668\u3002

          "},{"location":"admin/baize/developer/notebooks/baizess.html#command","title":"command","text":"
          • set\uff1a\u5907\u4efd\u6e90\uff0c\u6d4b\u901f\uff0c\u5c06\u6240\u6307\u5b9a\u7684\u5305\u7ba1\u7406\u5668\u7684\u6e90\u5207\u6362\u4e3a\u6d4b\u901f\u7ed3\u679c\u6700\u5feb\u7684\u56fd\u5185\u6e90\u3002
          • reset\uff1a\u5c06\u6240\u6307\u5b9a\u7684\u5305\u7ba1\u7406\u5668\u91cd\u7f6e\u4e3a\u9ed8\u8ba4\u6e90\u3002
          "},{"location":"admin/baize/developer/notebooks/baizess.html#package-manager","title":"\u76ee\u524d\u652f\u6301\u7684 package-manager","text":"
          • apt \uff08\u6e90\u7684\u5207\u6362\u4e0e\u91cd\u7f6e\u9700\u8981root\u6743\u9650\uff09
          • conda \uff08\u539f\u5148\u7684\u6e90\u5c06\u88ab\u5907\u4efd\u5728/etc/apt/backup/\uff09
          • pip \uff08\u66f4\u65b0\u540e\u6e90\u4fe1\u606f\u5c06\u88ab\u5199\u5165~/.condarc\uff09
          "},{"location":"admin/baize/developer/notebooks/create.html","title":"\u521b\u5efa Notebook","text":"

          Notebook \u63d0\u4f9b\u4e86\u4e00\u4e2a\u5728\u7ebf\u7684 Web \u4ea4\u4e92\u5f0f\u7f16\u7a0b\u73af\u5883\uff0c\u65b9\u4fbf\u5f00\u53d1\u8005\u5feb\u901f\u8fdb\u884c\u6570\u636e\u79d1\u5b66\u548c\u673a\u5668\u5b66\u4e60\u5b9e\u9a8c\u3002

          \u8fdb\u5165\u5f00\u53d1\u8005\u63a7\u5236\u53f0\u540e\uff0c\u5f00\u53d1\u8005\u53ef\u4ee5\u5728\u4e0d\u540c\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u4e2d\u521b\u5efa\u548c\u7ba1\u7406 Notebook\u3002

          1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb Notebooks \uff0c\u8fdb\u5165 Notebook \u5217\u8868\u3002\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae\u3002

          2. \u7cfb\u7edf\u4f1a\u9884\u5148\u586b\u5145\u57fa\u7840\u914d\u7f6e\u6570\u636e\uff0c\u5305\u62ec\u8981\u90e8\u7f72\u7684\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001Notebook \u955c\u50cf\u5730\u5740\u3001\u961f\u5217\u3001\u8d44\u6e90\u3001\u7528\u6237\u76ee\u5f55\u7b49\u3002 \u8c03\u6574\u8fd9\u4e9b\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a \u3002

          3. \u521a\u521b\u5efa\u7684 Notebook \u72b6\u6001\u4e3a \u7b49\u5f85\u4e2d \uff0c\u7247\u523b\u540e\u5c06\u53d8\u4e3a \u8fd0\u884c\u4e2d \uff0c\u9ed8\u8ba4\u6700\u65b0\u7684\u4f4d\u4e8e\u5217\u8868\u9876\u90e8\u3002

          4. \u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u6267\u884c\u66f4\u591a\u64cd\u4f5c\uff1a\u66f4\u65b0\u53c2\u6570\u3001\u542f\u52a8/\u6682\u505c\u3001\u514b\u9686 Notebook \u3001\u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u548c\u5220\u9664\u3002

          Note

          \u5982\u679c\u9009\u62e9\u7eaf CPU \u8d44\u6e90\u540e\uff0c\u53d1\u73b0\u6302\u8f7d\u4e86\u8282\u70b9\u4e0a\u7684\u6240\u6709 GPU \u5361\uff0c\u53ef\u4ee5\u5c1d\u8bd5\u6dfb\u52a0 container env \u6765\u89e3\u51b3\u6b64\u95ee\u9898\uff1a

          NVIDIA_VISIBLE_DEVICES=\"\"\n
          "},{"location":"admin/baize/developer/notebooks/delete.html","title":"\u5220\u9664 Notebook","text":"

          \u5982\u679c\u53d1\u73b0 Notebook \u5197\u4f59\u3001\u8fc7\u671f\u6216\u56e0\u5176\u4ed6\u7f18\u6545\u4e0d\u518d\u9700\u8981\uff0c\u53ef\u4ee5\u4ece Notebook \u5217\u8868\u4e2d\u5220\u9664\u3002

          1. \u5728 Notebook \u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u5220\u9664 \u3002

          2. \u5728\u5f39\u7a97\u4e2d\u786e\u8ba4\u8981\u5220\u9664\u7684\u4efb\u52a1\uff0c\u8f93\u5165 Notebook \u540d\u79f0\u540e\u70b9\u51fb \u5220\u9664 \u3002

          3. \u5c4f\u5e55\u63d0\u793a\u5220\u9664\u6210\u529f\uff0c\u8be5 Notebook \u4ece\u5217\u8868\u4e2d\u6d88\u5931\u3002

          Caution

          Notebook \u4e00\u65e6\u5220\u9664\u5c06\u4e0d\u53ef\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

          "},{"location":"admin/baize/developer/notebooks/notebook-auto-close.html","title":"Notebook \u95f2\u7f6e\u8d85\u65f6\u81ea\u52a8\u5173\u673a","text":"

          \u5728\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u4e3a\u4f18\u5316\u8d44\u6e90\u5229\u7528\u7387\uff0cAI Lab \u542f\u7528\u4e86 Notebook \u95f2\u7f6e\u8d85\u65f6\u81ea\u52a8\u5173\u673a\u529f\u80fd\uff1b \u5f53 Notebook \u957f\u65f6\u95f4\u65e0\u64cd\u4f5c\u65f6\uff0c\u7cfb\u7edf\u4f1a\u81ea\u52a8\u5173\u673a Notebook\uff0c\u91ca\u653e\u8d44\u6e90\u3002

          • \u4f18\u70b9\uff1a\u901a\u8fc7\u8fd9\u4e2a\u65b9\u5f0f\uff0c\u53ef\u4ee5\u6781\u5927\u51cf\u5c11\u56e0\u4e3a\u957f\u65f6\u95f4\u65e0\u64cd\u4f5c\u5bfc\u81f4\u7684\u8d44\u6e90\u6d6a\u8d39\uff0c\u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u6548\u7387\u3002
          • \u7f3a\u70b9\uff1a\u5982\u679c Notebook \u672a\u914d\u7f6e\u76f8\u5173\u5907\u4efd\u7b56\u7565\uff0c\u53ef\u80fd\u5bfc\u81f4\u6570\u636e\u4e22\u5931\u3002

          Note

          \u5f53\u524d\uff0c\u6b64\u529f\u80fd\u4e3a\u96c6\u7fa4\u7ea7\u522b\u914d\u7f6e\uff0c\u9ed8\u8ba4\u5f00\u542f\uff0c\u9ed8\u8ba4\u8d85\u65f6\u65f6\u957f\u4e3a 30 \u5206\u949f\u3002

          "},{"location":"admin/baize/developer/notebooks/notebook-auto-close.html#_1","title":"\u914d\u7f6e\u53d8\u66f4","text":"

          \u76ee\u524d\u914d\u7f6e\u4fee\u6539\u65b9\u5f0f\u4e3a\u624b\u52a8\u4fee\u6539\uff0c\u540e\u7eed\u4f1a\u63d0\u4f9b\u66f4\u52a0\u4fbf\u6377\u7684\u914d\u7f6e\u65b9\u5f0f\u3002

          \u4fee\u6539\u5de5\u4f5c\u96c6\u7fa4\u4e2d baize-agent \u7684\u90e8\u7f72\u53c2\u6570\uff0c\u6b63\u786e\u7684\u4fee\u6539\u65b9\u5f0f\u4e3a\u66f4\u65b0 Helm \u5e94\u7528\uff0c

          "},{"location":"admin/baize/developer/notebooks/notebook-auto-close.html#_2","title":"\u754c\u9762\u5316\u4fee\u6539","text":"
          1. \u5728\u96c6\u7fa4\u7ba1\u7406\u754c\u9762\u627e\u5230\u5bf9\u5e94\u7684\u5de5\u4f5c\u96c6\u7fa4\uff0c\u8fdb\u5165\u96c6\u7fa4\u8be6\u60c5\uff0c\u9009\u62e9 Helm \u5e94\u7528 \uff0c\u5728 baize-system \u547d\u540d\u7a7a\u95f4\u4e0b\u627e\u5230 baize-agent\uff0c\u5728\u53f3\u4e0a\u89d2\u70b9\u51fb \u66f4\u65b0 \u6309\u94ae\uff1a

          2. \u5982\u56fe\u4fee\u6539 YAML \u4ee3\u7801\uff1a

            ...\nnotebook-controller:\n  culling_enabled: false\n  cull_idle_time: 120\n  idleness_check_period: 1\n...\n
          3. \u786e\u8ba4\u53c2\u6570\u4fee\u6539\u6210\u529f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u548c \u786e\u5b9a \u3002

          "},{"location":"admin/baize/developer/notebooks/notebook-auto-close.html#_3","title":"\u547d\u4ee4\u884c\u4fee\u6539","text":"

          \u8fdb\u5165\u63a7\u5236\u53f0\u4ee5\u540e\uff0c\u4f7f\u7528 helm upgrade \u547d\u4ee4\u66f4\u6539\u914d\u7f6e\uff1a

          # \u8bbe\u5b9a\u7248\u672c\u53f7\nexport VERSION=0.8.0\n\n# \u66f4\u65b0 Helm Chart \nhelm upgrade --install baize-agent baize/baize-agent \\\n    --namespace baize-system \\\n    --create-namespace \\\n    --set global.imageRegistry=release.daocloud.io \\\n    --set notebook-controller.culling_enabled=true \\    # \u5f00\u542f\u81ea\u52a8\u5173\u673a\uff0c\u9ed8\u8ba4\u4e3a true\n    --set notebook-controller.cull_idle_time=120 \\      # \u8bbe\u7f6e\u95f2\u7f6e\u8d85\u65f6\u65f6\u95f4\u4e3a 120 \u5206\u949f\uff0c\u9ed8\u8ba4\u4e3a 30 \u5206\u949f\n    --set notebook-controller.idleness_check_period=1 \\ # \u8bbe\u7f6e\u68c0\u67e5\u95f4\u9694\u4e3a 1 \u5206\u949f\uff0c\u9ed8\u8ba4\u4e3a 1 \u5206\u949f\n    --version=$VERSION\n

          Note

          \u4e3a\u4e86\u907f\u514d\u81ea\u52a8\u5173\u673a\u540e\u4e22\u5931\u6570\u636e\uff0c\u60a8\u53ef\u4ee5\u5c06 AI Lab \u5347\u7ea7\u5230 v0.8.0 \u53ca\u66f4\u9ad8\u7248\u672c\uff0c\u5728 Notebook \u914d\u7f6e\u4e2d\u542f\u7528\u5173\u673a\u81ea\u52a8\u4fdd\u5b58\u529f\u80fd\u3002

          "},{"location":"admin/baize/developer/notebooks/notebook-with-envs.html","title":"\u5728 Notebook \u4e2d\u4f7f\u7528\u73af\u5883","text":"

          \u73af\u5883\u7ba1\u7406\u662f AI Lab \u7684\u91cd\u8981\u529f\u80fd\u4e4b\u4e00\uff0c\u901a\u8fc7\u5728 Notebook \u4e2d\u5173\u8054\u5bf9\u5e94\u7684\u73af\u5883\uff0c\u53ef\u4ee5\u5feb\u901f\u5207\u6362\u4e0d\u540c\u7684\u73af\u5883\uff0c\u65b9\u4fbf\u7528\u6237\u8fdb\u884c\u5f00\u53d1\u548c\u8c03\u8bd5\u3002

          "},{"location":"admin/baize/developer/notebooks/notebook-with-envs.html#notebook_1","title":"\u521b\u5efa Notebook \u65f6\u9009\u62e9\u73af\u5883","text":"

          \u5728\u521b\u5efa Notebook \u65f6\uff0c\u53ef\u4ee5\u9009\u62e9\u4e00\u4e2a\u6216\u591a\u4e2a\u7684\u73af\u5883 Envs \u3002\u5982\u679c\u6ca1\u6709\u5408\u9002\u7684\u73af\u5883\uff0c\u53ef\u4ee5\u53bb \u73af\u5883\u7ba1\u7406 \u4e2d\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u73af\u5883\u3002

          \u5982\u4f55\u521b\u5efa\u73af\u5883\uff0c\u8bf7\u53c2\u8003\u73af\u5883\u7ba1\u7406\u3002

          "},{"location":"admin/baize/developer/notebooks/notebook-with-envs.html#notebook_2","title":"\u5728 Notebook \u4f7f\u7528\u73af\u5883","text":"

          Note

          \u5728 Notebook \u4e2d\uff0c\u6211\u4eec\u540c\u65f6\u63d0\u4f9b\u4e86 conda \u548c mamba \u4e24\u79cd\uff0c\u7528\u6237\u53ef\u4ee5\u6839\u636e\u81ea\u5df1\u7684\u9700\u6c42\u9009\u62e9\u5408\u9002\u7684\u73af\u5883\u7ba1\u7406\u5de5\u5177\u3002

          AI Lab \u4e2d\uff0c\u6211\u4eec\u91c7\u7528\u4e86 conda \u73af\u5883\u7ba1\u7406\u5de5\u5177\uff0c\u7528\u6237\u53ef\u4ee5\u5728 Notebook \u4e2d\u901a\u8fc7 !conda env list \u547d\u4ee4\u67e5\u770b\u5f53\u524d\u73af\u5883\u5217\u8868\u3002

          (base) jovyan@chuanjia-jupyter-0:~/yolov8$ conda env list\n# conda environments:\n#\ndkj-python312-pure       /opt/baize-runtime-env/dkj-python312-pure/conda/envs/dkj-python312-pure\npython-3.10              /opt/baize-runtime-env/python-3.10/conda/envs/python-3.10\ntorch-smaple             /opt/baize-runtime-env/torch-smaple/conda/envs/torch-smaple\nbase                  *  /opt/conda     # \u5f53\u524d\u6fc0\u6d3b\u7684\u73af\u5883\nbaize-base               /opt/conda/envs/baize-base\n

          \u8fd9\u4e2a\u547d\u4ee4\u4f1a\u5217\u51fa\u6240\u6709\u7684 conda \u73af\u5883\uff0c\u5e76\u5728\u5f53\u524d\u6fc0\u6d3b\u7684\u73af\u5883\u524d\u9762\u52a0\u4e0a\u4e00\u4e2a\u661f\u53f7\uff08*\uff09\u3002

          "},{"location":"admin/baize/developer/notebooks/notebook-with-envs.html#jupyterlab-kernel","title":"JupyterLab \u7684 Kernel \u73af\u5883\u7ba1\u7406","text":"

          \u5728 Jupyterlab \u4e2d\uff0c\u6211\u4eec\u81ea\u52a8\u5c06 Notebook \u5173\u8054\u7684\u73af\u5883\u7ed1\u5b9a\u5230 Kernel \u5217\u8868\u4e2d\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 Kernel \u5feb\u901f\u5207\u6362\u73af\u5883\u3002

          \u901a\u8fc7\u4ee5\u4e0a\u529e\u6cd5\uff0c\u53ef\u4ee5\u540c\u65f6\u5728\u4e00\u4e2a Notebook \u4e2d\u4f7f\u7528\u4e0d\u540c\u7f16\u5199\u548c\u8c03\u8bd5\u7b97\u6cd5\u3002

          "},{"location":"admin/baize/developer/notebooks/notebook-with-envs.html#terminal","title":"Terminal \u5207\u6362\u73af\u5883","text":"

          AI Lab \u7684 Notebook \u76ee\u524d\u4e5f\u5df2\u7ecf\u652f\u6301\u4e86 VSCode\u3002

          \u5982\u679c\u60a8\u66f4\u559c\u6b22\u5728 Terminal \u4e2d\u7ba1\u7406\u548c\u5207\u6362\u73af\u5883\uff0c\u53ef\u4ee5\u5b89\u88c5\u5982\u4e0b\u6b65\u9aa4\uff1a

          \u5728\u9996\u6b21\u542f\u52a8\u5e76\u4f7f\u7528 Notebook \u65f6\uff0c\u9700\u8981\u5148\u6267\u884c conda init\uff0c\u7136\u540e\u518d\u6267\u884c conda activate <env_name> \u5207\u6362\u5230\u5bf9\u5e94\u7684\u73af\u5883\u3002

          (base) jovyan@chuanjia-jupyter-0:~/yolov8$ conda init bash# \u521d\u59cb\u5316 bash \u73af\u5883, \u4ec5\u9996\u6b21\u4f7f\u7528\u9700\u8981\u6267\u884c\nno change     /opt/conda/condabin/conda\n change     /opt/conda/bin/conda\n change     /opt/conda/bin/conda-env\n change     /opt/conda/bin/activate\n change     /opt/conda/bin/deactivate\n change     /opt/conda/etc/profile.d/conda.sh\n change     /opt/conda/etc/fish/conf.d/conda.fish\n change     /opt/conda/shell/condabin/Conda.psm1\n change     /opt/conda/shell/condabin/conda-hook.ps1\n change     /opt/conda/lib/python3.11/site-packages/xontrib/conda.xsh\n change     /opt/conda/etc/profile.d/conda.csh\n change     /home/jovyan/.bashrc\n action taken.\nAdded mamba to /home/jovyan/.bashrc\n\n==> For changes to take effect, close and re-open your current shell. <==\n\n(base) jovyan@chuanjia-jupyter-0:~/yolov8$ source ~/.bashrc  # \u91cd\u65b0\u52a0\u8f7d bash \u73af\u5883\n(base) jovyan@chuanjia-jupyter-0:~/yolov8$ conda activate python-3.10   # \u5207\u6362\u5230 python-3.10 \u73af\u5883\n(python-3.10) jovyan@chuanjia-jupyter-0:~/yolov8$ conda env list\n\n              mamba version : 1.5.1\n# conda environments:\n#\ndkj-python312-pure       /opt/baize-runtime-env/dkj-python312-pure/conda/envs/dkj-python312-pure\npython-3.10           *  /opt/baize-runtime-env/python-3.10/conda/envs/python-3.10    # \u5f53\u524d\u6fc0\u6d3b\u7684\u73af\u5883\ntorch-smaple             /opt/baize-runtime-env/torch-smaple/conda/envs/torch-smaple\nbase                     /opt/conda\nbaize-base               /opt/conda/envs/baize-base\n

          \u5982\u679c\u60a8\u66f4\u559c\u6b22\u4f7f\u7528 mamba \uff0c\u8fd9\u91cc\u9700\u8981\u4f7f\u7528 mamaba init \u548c mamba activate <env_name>\u3002

          "},{"location":"admin/baize/developer/notebooks/notebook-with-envs.html#_1","title":"\u67e5\u770b\u73af\u5883\u4e2d\u7684\u5305","text":"

          \u901a\u8fc7\u4e0d\u540c\u73af\u5883\u7ba1\u7406\u7684\u4e00\u4e2a\u5f88\u91cd\u8981\u7684\u529f\u80fd\u662f\uff0c\u53ef\u4ee5\u5728\u4e00\u4e2a Notebook \u4e2d\u901a\u8fc7\u5feb\u901f\u5207\u6362\u73af\u5883\uff0c\u4f7f\u7528\u4e0d\u7528\u7684\u5305\u3002

          \u6211\u4eec\u53ef\u4ee5\u901a\u8fc7\u4e0b\u65b9\u7684\u547d\u4ee4\u6765\u4f7f\u7528 conda \u67e5\u770b\u5f53\u524d\u73af\u5883\u4e2d\u7684\u6240\u6709\u5305\u3002

          (python-3.10) jovyan@chuanjia-jupyter-0:~/yolov8$ conda list\n# packages in environment at /opt/baize-runtime-env/python-3.10/conda/envs/python-3.10:\n#\n# Name                    Version                   Build  Channel\n_libgcc_mutex             0.1                        main    defaults\n_openmp_mutex             5.1                       1_gnu    defaults\n... # \u7701\u7565\u90e8\u5206\u8f93\u51fa\nidna                      3.7             py310h06a4308_0    defaults\nipykernel                 6.28.0          py310h06a4308_0    defaults\nipython                   8.20.0          py310h06a4308_0    defaults\nipython_genutils          0.2.0              pyhd3eb1b0_1    defaults\njedi                      0.18.1          py310h06a4308_1    defaults\njinja2                    3.1.4           py310h06a4308_0    defaults\njsonschema                4.19.2          py310h06a4308_0    defaults\njsonschema-specifications 2023.7.1        py310h06a4308_0    defaults\njupyter_client            7.4.9           py310h06a4308_0    defaults\njupyter_core              5.5.0           py310h06a4308_0    defaults\njupyter_events            0.8.0           py310h06a4308_0    defaults\njupyter_server            2.10.0          py310h06a4308_0    defaults\njupyter_server_terminals  0.4.4           py310h06a4308_1    defaults\njupyterlab_pygments       0.2.2           py310h06a4308_0    defaults\n... # \u7701\u7565\u90e8\u5206\u8f93\u51fa\nxz                        5.4.6                h5eee18b_1    defaults\nyaml                      0.2.5                h7b6447c_0    defaults\nzeromq                    4.3.5                h6a678d5_0    defaults\nzlib                      1.2.13               h5eee18b_1    defaults\n
          "},{"location":"admin/baize/developer/notebooks/notebook-with-envs.html#_2","title":"\u66f4\u65b0\u73af\u5883\u7684\u5305","text":"

          \u76ee\u524d\uff0c\u53ef\u4ee5\u901a\u8fc7\u5728 AI Lab \u7684\u754c\u9762\u4e2d \u73af\u5883\u7ba1\u7406 \u6765\u66f4\u65b0\u73af\u5883\u4e2d\u7684\u5305\u3002

          "},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html","title":"Notebook SSH \u8bbf\u95ee\u6307\u5357","text":"

          AI Lab \u63d0\u4f9b\u7684 Notebook \u652f\u6301\u5728\u672c\u5730\u901a\u8fc7 SSH \u7684\u65b9\u5f0f\u8bbf\u95ee\uff1b

          \u901a\u8fc7\u7b80\u5355\u7684\u914d\u7f6e\uff0c\u5373\u53ef\u4f7f\u7528 SSH \u8bbf\u95ee Jupyter Notebook \u7684\u529f\u80fd\u3002 \u65e0\u8bba\u60a8\u662f\u4f7f\u7528 Windows\u3001Mac \u8fd8\u662f Linux \u64cd\u4f5c\u7cfb\u7edf\uff0c\u90fd\u53ef\u4ee5\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u8fdb\u884c\u64cd\u4f5c\u3002

          "},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#ssh","title":"\u914d\u7f6e SSH \u8bbf\u95ee\u51ed\u8bc1","text":""},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#ssh_1","title":"\u751f\u6210 SSH \u5bc6\u94a5\u5bf9","text":"

          \u9996\u5148\uff0c\u60a8\u9700\u8981\u5728\u60a8\u7684\u8ba1\u7b97\u673a\u4e0a\u751f\u6210 SSH \u516c\u94a5\u548c\u79c1\u94a5\u5bf9\u3002\u8fd9\u4e2a\u5bc6\u94a5\u5bf9\u5c06\u7528\u4e8e\u8ba4\u8bc1\u8fc7\u7a0b\uff0c\u786e\u4fdd\u5b89\u5168\u8bbf\u95ee\u3002

          Mac/LinuxWindows
          1. \u6253\u5f00\u7ec8\u7aef
          2. \u8f93\u5165\u547d\u4ee4\uff1a

            ssh-keygen -t rsa -b 4096\n
          3. \u5f53\u7cfb\u7edf\u63d0\u793a\u60a8\u201cEnter a file in which to save the key\u201d\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u6572\u51fb Enter \u952e\u4f7f\u7528\u9ed8\u8ba4\u8def\u5f84\uff0c\u6216\u8005\u6307\u5b9a\u4e00\u4e2a\u65b0\u7684\u8def\u5f84\u3002

          4. \u63a5\u4e0b\u6765\uff0c\u7cfb\u7edf\u4f1a\u63d0\u793a\u60a8\u8f93\u5165\u5bc6\u7801\uff08\u53ef\u9009\uff09\uff0c\u8fd9\u5c06\u589e\u52a0\u4e00\u4e2a\u989d\u5916\u7684\u5b89\u5168\u5c42\u3002\u5982\u679c\u9009\u62e9\u8f93\u5165\u5bc6\u7801\uff0c\u8bf7\u8bb0\u4f4f\u8fd9\u4e2a\u5bc6\u7801\uff0c\u56e0\u4e3a\u6bcf\u6b21\u4f7f\u7528\u5bc6\u94a5\u65f6\u90fd\u4f1a\u9700\u8981\u5b83\u3002
          1. \u5b89\u88c5 Git Bash\uff08\u5982\u679c\u60a8\u5c1a\u672a\u5b89\u88c5\uff09
          2. \u6253\u5f00 Git Bash
          3. \u8f93\u5165\u547d\u4ee4\uff1a

            ssh-keygen -t rsa -b 4096\n
          4. \u540c Mac/Linux \u6b65\u9aa4

          "},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#ssh_2","title":"\u6dfb\u52a0 SSH \u516c\u94a5\u5230\u4e2a\u4eba\u4e2d\u5fc3","text":"

          Note

          \u5177\u4f53\u64cd\u4f5c\u53ef\u4ee5\u53c2\u8003\uff1a\u914d\u7f6e SSH \u516c\u94a5

          1. \u6253\u5f00\u751f\u6210\u7684\u516c\u94a5\u6587\u4ef6\uff0c\u901a\u5e38\u4f4d\u4e8e ~/.ssh/id_rsa.pub\uff08\u5982\u679c\u60a8\u6ca1\u6709\u66f4\u6539\u9ed8\u8ba4\u8def\u5f84\uff09
          2. \u590d\u5236\u516c\u94a5\u5185\u5bb9
          3. \u767b\u5f55\u5230\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0, \u7136\u540e\u53f3\u4e0a\u89d2\u5e10\u53f7\u70b9\u5f00\uff0c\u9009\u62e9\u4e2a\u4eba\u4e2d\u5fc3
          4. \u5728 SSH \u516c\u94a5\u914d\u7f6e\u9875\uff0c\u6dfb\u52a0\u4f60\u672c\u5730\u751f\u6210\u7684\u516c\u94a5\u6587\u4ef6
          5. \u4fdd\u5b58\u66f4\u6539
          "},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#notebook-ssh_1","title":"\u5728 Notebook \u4e2d\u5f00\u542f SSH \u8bbf\u95ee","text":"
          1. \u767b\u5f55\u5230 Jupyter Notebook \u7684 Web \u754c\u9762\u3002
          2. \u5bfb\u627e\u60a8\u60f3\u8981\u542f\u7528 SSH \u8bbf\u95ee\u7684 Notebook\u3002
          3. \u5728 Notebook \u7684\u8bbe\u7f6e\u6216\u8be6\u60c5\u9875\u9762\uff0c\u627e\u5230 \u5f00\u542f SSH \u8bbf\u95ee \u7684\u9009\u9879\u5e76\u542f\u7528\u5b83\u3002
          4. \u8bb0\u5f55\u6216\u590d\u5236\u663e\u793a\u7684 SSH \u8bbf\u95ee\u547d\u4ee4\u3002\u8fd9\u4e2a\u547d\u4ee4\u5c06\u7528\u4e8e\u540e\u7eed\u6b65\u9aa4\u4e2d\u7684 SSH \u8fde\u63a5\u3002
          "},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#ssh_3","title":"\u4e0d\u540c\u73af\u5883\u4e0b\u7684 SSH \u8bbf\u95ee\u65b9\u5f0f","text":""},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#_1","title":"\u8bbf\u95ee\u793a\u4f8b","text":"

          \u5047\u8bbe\u60a8\u83b7\u5f97\u7684 SSH \u8bbf\u95ee\u547d\u4ee4\u5982\u4e0b\uff1a

              # ssh {USERNAME}@{CLUSTER}.{NAMESPACE}.{NOTEBOOK_NAME}@{UI_LOGIN_IP} -p {UI_LOGIN_IP}\n    ssh baizeuser01@gpu-cluster.demo.demo-notebook@10.20.100.201 -p 80 -i private_key\n

          \u8bf7\u5c06 USERNAME \u66ff\u6362\u4e3a\u60a8\u7684\u7528\u6237\u540d\uff0cUI_LOGIN_IP \u66ff\u6362\u4e3a\u5b9e\u9645\u7684\u4e3b\u673a\u540d\uff0cUI_LOGIN_IP \u66ff\u6362\u4e3a\u5b9e\u9645\u7684\u7aef\u53e3\u53f7\u3002

          "},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#windows","title":"Windows","text":"

          \u63a8\u8350\u4f7f\u7528 PuTTY \u6216 Git Bash \u8fdb\u884c SSH \u8fde\u63a5\u3002

          PuTTYGit Bash
          1. \u6253\u5f00 PuTTY
          2. \u5728 Host Name (or IP address) \u680f\u8f93\u5165 mockhost\uff08\u5b9e\u9645\u7684\u4e3b\u673a\u540d\uff09
          3. \u8f93\u5165\u7aef\u53e3\u53f7 2222\uff08\u5b9e\u9645\u7684\u7aef\u53e3\u53f7\uff09
          4. \u70b9\u51fb Open \u5f00\u59cb\u8fde\u63a5
          5. \u7b2c\u4e00\u6b21\u8fde\u63a5\u65f6\uff0c\u53ef\u80fd\u4f1a\u63d0\u793a\u9a8c\u8bc1\u670d\u52a1\u5668\u7684\u8eab\u4efd\uff0c\u70b9\u51fb Yes
          1. \u6253\u5f00 Git Bash
          2. \u8f93\u5165\u8bbf\u95ee\u547d\u4ee4\uff1a

                # ssh {USERNAME}@{CLUSTER}.{NAMESPACE}.{NOTEBOOK_NAME}@{UI_LOGIN_IP} -p {UI_LOGIN_IP}\n    ssh baizeuser01@gpu-cluster.demo.demo-notebook@10.20.100.201 -p 80 -i private_key\n
          3. \u6309 Enter \u952e

          "},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#maclinux","title":"Mac/Linux","text":"
          1. \u6253\u5f00\u7ec8\u7aef\u3002
          2. \u8f93\u5165\u8bbf\u95ee\u547d\u4ee4\uff1a

                # ssh {USERNAME}@{CLUSTER}.{NAMESPACE}.{NOTEBOOK_NAME}@{UI_LOGIN_IP} -p {UI_LOGIN_IP}\n    ssh baizeuser01@gpu-cluster.demo.demo-notebook@10.20.100.201 -p 80 -i private_key\n
          3. \u5982\u679c\u7cfb\u7edf\u63d0\u793a\u60a8\u63a5\u53d7\u4e3b\u673a\u7684\u8eab\u4efd\uff0c\u8bf7\u8f93\u5165yes\u3002

          "},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#ide","title":"\u914d\u5408 IDE \u5b9e\u73b0\u8fdc\u7a0b\u5f00\u53d1","text":"

          \u9664\u4e86\u4f7f\u7528\u547d\u4ee4\u884c\u5de5\u5177\u8fdb\u884c SSH \u8fde\u63a5\uff0c\u60a8\u8fd8\u53ef\u4ee5\u5229\u7528\u73b0\u4ee3 IDE \u5982 Visual Studio Code (VSCode) \u548c PyCharm \u7684 SSH \u8fdc\u7a0b\u8fde\u63a5\u529f\u80fd\uff0c \u76f4\u63a5\u5728\u672c\u5730 IDE \u4e2d\u5f00\u53d1\u5e76\u5229\u7528\u8fdc\u7a0b\u670d\u52a1\u5668\u7684\u8d44\u6e90\u3002

          \u5728 VSCode \u4e2d\u4f7f\u7528 SSH \u8fdc\u7a0b\u8fde\u63a5\u5728 PyCharm \u4e2d\u4f7f\u7528 SSH \u8fdc\u7a0b\u8fde\u63a5

          VSCode \u901a\u8fc7 Remote - SSH \u6269\u5c55\u652f\u6301 SSH \u8fdc\u7a0b\u8fde\u63a5\uff0c\u5141\u8bb8\u60a8\u76f4\u63a5\u5728\u672c\u5730 VSCode \u73af\u5883\u4e2d\u7f16\u8f91\u8fdc\u7a0b\u670d\u52a1\u5668\u4e0a\u7684\u6587\u4ef6\uff0c\u5e76\u8fd0\u884c\u547d\u4ee4\u3002

          \u64cd\u4f5c\u6b65\u9aa4\u4e3a\uff1a

          1. \u786e\u4fdd\u60a8\u5df2\u5b89\u88c5 VSCode \u548c Remote - SSH \u6269\u5c55\u3002
          2. \u6253\u5f00 VSCode\uff0c\u70b9\u51fb\u5de6\u4fa7\u6d3b\u52a8\u680f\u5e95\u90e8\u7684\u8fdc\u7a0b\u8d44\u6e90\u7ba1\u7406\u5668\u56fe\u6807\u3002
          3. \u9009\u62e9 Remote-SSH: Connect to Host... \u9009\u9879\uff0c\u7136\u540e\u70b9\u51fb + Add New SSH Host...
          4. \u8f93\u5165 SSH \u8fde\u63a5\u547d\u4ee4\uff0c\u4f8b\u5982\uff1a

                # ssh {USERNAME}@{CLUSTER}.{NAMESPACE}.{NOTEBOOK_NAME}@{UI_LOGIN_IP} -p {UI_LOGIN_IP}\n    ssh baizeuser01@gpu-cluster.demo.demo-notebook@10.20.100.201 -p 80 -i private_key\n
          5. \u6572\u51fb Enter \u952e\u3002\u8bf7\u5c06 username\u3001mockhost \u548c 2222 \u66ff\u6362\u4e3a\u5b9e\u9645\u7684\u7528\u6237\u540d\u3001\u4e3b\u673a\u540d\u548c\u7aef\u53e3\u53f7\u3002

          6. \u9009\u62e9\u4e00\u4e2a\u914d\u7f6e\u6587\u4ef6\u6765\u4fdd\u5b58\u6b64 SSH \u4e3b\u673a\uff0c\u901a\u5e38\u9009\u62e9\u9ed8\u8ba4\u5373\u53ef\u3002

          \u5b8c\u6210\u540e\uff0c\u60a8\u7684 SSH \u4e3b\u673a\u5c06\u6dfb\u52a0\u5230 SSH \u76ee\u6807\u5217\u8868\u4e2d\u3002\u70b9\u51fb\u60a8\u7684\u4e3b\u673a\u8fdb\u884c\u8fde\u63a5\u3002 \u5982\u679c\u662f\u7b2c\u4e00\u6b21\u8fde\u63a5\uff0c\u53ef\u80fd\u4f1a\u63d0\u793a\u60a8\u9a8c\u8bc1\u4e3b\u673a\u7684\u6307\u7eb9\u3002\u63a5\u53d7\u540e\uff0c\u60a8\u5c06\u88ab\u8981\u6c42\u8f93\u5165\u5bc6\u7801\uff08\u5982\u679c SSH \u5bc6\u94a5\u8bbe\u7f6e\u4e86\u5bc6\u7801\uff09\u3002 \u8fde\u63a5\u6210\u529f\u540e\uff0c\u60a8\u53ef\u4ee5\u50cf\u5728\u672c\u5730\u5f00\u53d1\u4e00\u6837\u5728 VSCode \u4e2d\u7f16\u8f91\u8fdc\u7a0b\u6587\u4ef6\uff0c\u5e76\u5229\u7528\u8fdc\u7a0b\u8d44\u6e90\u3002

          PyCharm Professional \u7248\u652f\u6301\u901a\u8fc7 SSH \u8fde\u63a5\u5230\u8fdc\u7a0b\u670d\u52a1\u5668\uff0c\u5e76\u5728\u672c\u5730 PyCharm \u4e2d\u76f4\u63a5\u5f00\u53d1\u3002

          \u64cd\u4f5c\u6b65\u9aa4\u4e3a\uff1a

          1. \u6253\u5f00 PyCharm\uff0c\u5e76\u6253\u5f00\u6216\u521b\u5efa\u4e00\u4e2a\u9879\u76ee
          2. \u9009\u62e9 File -> Settings \uff08\u5728 Mac \u4e0a\u662f PyCharm -> Preferences
          3. \u5728\u8bbe\u7f6e\u7a97\u53e3\u4e2d\uff0c\u5bfc\u822a\u5230 Project: YourProjectName -> Python Interpreter
          4. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684\u9f7f\u8f6e\u56fe\u6807\uff0c\u9009\u62e9 Add...

            • \u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\uff0c\u9009\u62e9 SSH Interpreter
            • \u8f93\u5165\u8fdc\u7a0b\u4e3b\u673a\u7684\u4fe1\u606f\uff1a\u4e3b\u673a\u540d\uff08mockhost\uff09\u3001\u7aef\u53e3\u53f7\uff082222\uff09\u3001\u7528\u6237\u540d\uff08username\uff09\u3002 \u8bf7\u4f7f\u7528\u60a8\u7684\u5b9e\u9645\u4fe1\u606f\u66ff\u6362\u8fd9\u4e9b\u5360\u4f4d\u7b26\u3002
            • \u70b9\u51fb Next \uff0cPyCharm \u5c06\u5c1d\u8bd5\u8fde\u63a5\u5230\u8fdc\u7a0b\u670d\u52a1\u5668\u3002\u5982\u679c\u8fde\u63a5\u6210\u529f\uff0c\u60a8\u5c06\u88ab\u8981\u6c42\u8f93\u5165\u5bc6\u7801\u6216\u9009\u62e9\u79c1\u94a5\u6587\u4ef6\u3002
          5. \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb Finish \u3002\u73b0\u5728\uff0c\u60a8\u7684 PyCharm \u5c06\u4f7f\u7528\u8fdc\u7a0b\u670d\u52a1\u5668\u4e0a\u7684 Python \u89e3\u91ca\u5668\u3002

          "},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#_2","title":"\u5b89\u5168\u9650\u5236","text":"

          \u5728\u540c\u4e00\u4e2a Workspace \u5185\uff0c\u4efb\u610f\u7528\u6237\u90fd\u53ef\u4ee5\u901a\u8fc7\u81ea\u5df1\u7684 SSH \u8bbf\u95ee\u51ed\u8bc1\u6765\u767b\u5f55\u5230\u542f\u7528\u4e86 SSH \u7684 Notebook\u3002 \u8fd9\u610f\u5473\u7740\uff0c\u53ea\u8981\u7528\u6237\u914d\u7f6e\u4e86\u81ea\u5df1\u7684 SSH \u516c\u94a5\u5230\u4e2a\u4eba\u4e2d\u5fc3\uff0c\u5e76\u4e14 Notebook \u542f\u7528\u4e86 SSH \u8bbf\u95ee\uff0c\u5c31\u53ef\u4ee5\u4f7f\u7528 SSH \u8fdb\u884c\u5b89\u5168\u8fde\u63a5\u3002

          \u8bf7\u6ce8\u610f\uff0c\u4e0d\u540c\u7528\u6237\u7684\u8bbf\u95ee\u6743\u9650\u53ef\u80fd\u4f1a\u6839\u636e Workspace \u7684\u914d\u7f6e\u800c\u6709\u6240\u4e0d\u540c\u3002\u786e\u4fdd\u60a8\u4e86\u89e3\u5e76\u9075\u5b88\u60a8\u6240\u5728\u7ec4\u7ec7\u7684\u5b89\u5168\u548c\u8bbf\u95ee\u7b56\u7565\u3002

          \u901a\u8fc7\u9075\u5faa\u4e0a\u8ff0\u6b65\u9aa4\uff0c\u60a8\u5e94\u8be5\u80fd\u591f\u6210\u529f\u914d\u7f6e\u5e76\u4f7f\u7528 SSH \u8bbf\u95ee Jupyter Notebook\u3002\u5982\u679c\u9047\u5230\u4efb\u4f55\u95ee\u9898\uff0c\u8bf7\u53c2\u8003\u7cfb\u7edf\u5e2e\u52a9\u6587\u6863\u6216\u8054\u7cfb\u7cfb\u7edf\u7ba1\u7406\u5458\u3002

          "},{"location":"admin/baize/developer/notebooks/start-pause.html","title":"\u542f\u52a8\u548c\u6682\u505c Notebook","text":"

          Notebook \u521b\u5efa\u6210\u529f\u540e\uff0c\u901a\u5e38\u4f1a\u6709\u51e0\u4e2a\u72b6\u6001\uff1a

          • \u7b49\u5f85\u4e2d
          • \u8fd0\u884c\u4e2d
          • \u5df2\u505c\u6b62

          \u5982\u679c\u67d0\u4e2a Notebook \u7684\u72b6\u6001\u4e3a \u5df2\u505c\u6b62 \uff0c\u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u542f\u52a8 \u3002

          \u6b64 Notebook \u5c06\u8fdb\u5165\u8fd0\u884c\u961f\u5217\u4e2d\uff0c\u72b6\u6001\u53d8\u4e3a \u7b49\u5f85\u4e2d \uff0c\u5982\u679c\u4e00\u5207\u6b63\u5e38\uff0c\u7247\u523b\u540e\u5176\u72b6\u6001\u5c06\u53d8\u4e3a \u8fd0\u884c\u4e2d \u3002

          \u5982\u679c\u4f7f\u7528\u7ed3\u675f\uff0c\u53ef\u4ee5\u4ece\u83dc\u5355\u4e2d\u9009\u62e9 \u6682\u505c \uff0c\u5c06\u5176\u72b6\u6001\u53d8\u4e3a \u5df2\u505c\u6b62 \u3002

          "},{"location":"admin/baize/developer/notebooks/view.html","title":"Notebook \u5de5\u4f5c\u8d1f\u8f7d","text":"

          \u5982\u679c\u60f3\u8981\u67e5\u770b\u67d0\u4e2a Notebook \u7684\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u53ef\u4ee5\u6267\u884c\u4ee5\u4e0b\u64cd\u4f5c\uff1a

          1. \u5728 Notebook \u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5 \u3002

          2. \u8df3\u8f6c\u5230\u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\uff08StatefulSet\uff09\u5217\u8868\uff0c\u53ef\u4ee5\u67e5\u770b\uff1a

            • \u5bb9\u5668\u7ec4 Pod \u7684\u8fd0\u884c\u72b6\u6001\u3001IP\u3001\u8d44\u6e90\u8bf7\u6c42\u548c\u4f7f\u7528\u60c5\u51b5
            • \u5bb9\u5668\u914d\u7f6e\u4fe1\u606f
            • \u8bbf\u95ee\u65b9\u5f0f\uff1aClusterIP\u3001NodePort
            • \u8c03\u5ea6\u7b56\u7565\uff1a\u8282\u70b9\u548c\u5de5\u4f5c\u8d1f\u8f7d\u7684\u4eb2\u548c\u6027\u3001\u53cd\u4eb2\u548c\u6027
            • \u6807\u7b7e\u4e0e\u6ce8\u89e3\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u3001Pod \u7684\u6807\u7b7e\u4e0e\u6ce8\u89e3\u952e\u503c\u5bf9
            • \u5f39\u6027\u4f38\u7f29\uff1a\u652f\u6301 HPA\u3001CronHPA\u3001VPA \u7b49\u65b9\u5f0f
            • \u4e8b\u4ef6\u5217\u8868\uff1a\u8b66\u544a\u3001\u901a\u77e5\u7b49\u6d88\u606f

          3. \u5728 StatefulSet \u5217\u8868\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507\uff0c\u53ef\u4ee5\u9488\u5bf9 Pod \u6267\u884c\u66f4\u591a\u64cd\u4f5c\u3002

          "},{"location":"admin/baize/oam/index.html","title":"\u8fd0\u7ef4\u7ba1\u7406","text":"

          \u8fd0\u7ef4\u7ba1\u7406\u662f IT \u8fd0\u7ef4\u4eba\u5458\u65e5\u5e38\u7ba1\u7406 IT \u8d44\u6e90\uff0c\u5904\u7406\u5de5\u4f5c\u7684\u7a7a\u95f4\u3002

          \u5728\u8fd9\u91cc\u53ef\u4ee5\u76f4\u89c2\u5730\u4e86\u89e3\u5f53\u524d\u96c6\u7fa4\u3001\u8282\u70b9\u3001CPU\u3001GPU\u3001vGPU \u7b49\u8d44\u6e90\u7684\u4f7f\u7528\u72b6\u51b5\u3002

          "},{"location":"admin/baize/oam/index.html#_2","title":"\u5e38\u89c1\u672f\u8bed","text":"
          • GPU \u5206\u914d\u7387\uff1a\u7edf\u8ba1\u5f53\u524d\u96c6\u7fa4\u5185\u6240\u6709\u672a\u5b8c\u6210\u7684\u4efb\u52a1\u7684 GPU \u5206\u914d\u60c5\u51b5\uff0c\u7edf\u8ba1\u8bf7\u6c42\u7684 GPU\uff08Request\uff09\u4e0e\u603b\u8d44\u6e90\u91cf\uff08Total\uff09\u4e4b\u95f4\u7684\u6bd4\u4f8b\u3002
          • GPU \u5229\u7528\u7387\uff1a\u7edf\u8ba1\u5f53\u524d\u96c6\u7fa4\u4e2d\u6240\u6709\u8fd0\u884c\u4e2d\u7684\u4efb\u52a1\u7684\u5b9e\u9645\u8d44\u6e90\u5229\u7528\u60c5\u51b5\uff0c\u7edf\u8ba1\u5b9e\u9645\u4f7f\u7528\u7684 GPU\uff08Usage\uff09\u4e0e\u603b\u8d44\u6e90\u91cf\uff08Total\uff09\u4e4b\u95f4\u7684\u6bd4\u4f8b\u3002
          "},{"location":"admin/baize/oam/resource.html","title":"GPU \u5217\u8868","text":"

          \u81ea\u52a8\u5316\u6c47\u603b\u6574\u4e2a\u5e73\u53f0\u4e2d\u7684 GPU \u8d44\u6e90\u4fe1\u606f\uff0c\u63d0\u4f9b\u8be6\u5c3d\u7684 GPU \u8bbe\u5907\u4fe1\u606f\u5c55\u793a\uff0c\u53ef\u67e5\u770b\u5404\u79cd GPU \u5361\u7684\u8d1f\u8f7d\u7edf\u8ba1\u548c\u4efb\u52a1\u8fd0\u884c\u4fe1\u606f\u3002

          \u8fdb\u5165 \u8fd0\u7ef4\u7ba1\u7406 \u540e\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u8d44\u6e90\u7ba1\u7406 -> GPU \u7ba1\u7406 \uff0c\u53ef\u4ee5\u67e5\u770b GPU \u5361\u548c\u4efb\u52a1\u4fe1\u606f\u3002

          "},{"location":"admin/baize/oam/queue/create.html","title":"\u521b\u5efa\u961f\u5217","text":"

          \u5728\u8fd0\u7ef4\u7ba1\u7406\u6a21\u5f0f\u4e2d\uff0c\u961f\u5217\u53ef\u7528\u4e8e\u8c03\u5ea6\u548c\u4f18\u5316\u6279\u5904\u7406\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5b83\u53ef\u4ee5\u6709\u6548\u5730\u7ba1\u7406\u5728\u96c6\u7fa4\u4e0a\u8fd0\u884c\u7684\u591a\u4e2a\u4efb\u52a1\uff0c\u901a\u8fc7\u961f\u5217\u7cfb\u7edf\u6765\u4f18\u5316\u8d44\u6e90\u5229\u7528\u7387\u3002

          1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u961f\u5217\u7ba1\u7406 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae\u3002

          2. \u7cfb\u7edf\u4f1a\u9884\u5148\u586b\u5145\u57fa\u7840\u8bbe\u7f6e\u6570\u636e\uff0c\u5305\u62ec\u8981\u90e8\u7f72\u7684\u96c6\u7fa4\u3001\u5de5\u4f5c\u7a7a\u95f4\u3001\u6392\u961f\u7b56\u7565\u7b49\u3002 \u8c03\u6574\u8fd9\u4e9b\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a \u3002

          3. \u5c4f\u5e55\u63d0\u793a\u521b\u5efa\uff0c\u8fd4\u56de\u961f\u5217\u7ba1\u7406\u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u7b49\u66f4\u591a\u64cd\u4f5c\u3002

          "},{"location":"admin/baize/oam/queue/delete.html","title":"\u5220\u9664\u961f\u5217","text":"

          \u5728\u8fd0\u7ef4\u7ba1\u7406\u6a21\u5f0f\u4e2d\uff0c\u5982\u679c\u53d1\u73b0\u961f\u5217\u5197\u4f59\u3001\u8fc7\u671f\u6216\u56e0\u5176\u4ed6\u7f18\u6545\u4e0d\u518d\u9700\u8981\uff0c\u53ef\u4ee5\u4ece\u961f\u5217\u5217\u8868\u4e2d\u5220\u9664\u3002

          1. \u5728\u961f\u5217\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u5220\u9664 \u3002

          2. \u5728\u5f39\u7a97\u4e2d\u786e\u8ba4\u8981\u5220\u9664\u7684\u961f\u5217\uff0c\u8f93\u5165\u961f\u5217\u540d\u79f0\u540e\u70b9\u51fb \u5220\u9664 \u3002

          3. \u5c4f\u5e55\u63d0\u793a\u5220\u9664\u6210\u529f\uff0c\u8be5\u961f\u5217\u4ece\u5217\u8868\u4e2d\u6d88\u5931\u3002

          Caution

          \u961f\u5217\u4e00\u65e6\u5220\u9664\u5c06\u4e0d\u53ef\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

          "},{"location":"admin/baize/troubleshoot/index.html","title":"\u6545\u969c\u6392\u67e5","text":"

          \u672c\u6587\u5c06\u6301\u7eed\u7edf\u8ba1\u548c\u68b3\u7406 AI Lab \u4f7f\u7528\u8fc7\u7a0b\u53ef\u80fd\u56e0\u73af\u5883\u6216\u64cd\u4f5c\u4e0d\u89c4\u8303\u5f15\u8d77\u7684\u62a5\u9519\uff0c\u4ee5\u53ca\u5728\u4f7f\u7528\u8fc7\u7a0b\u4e2d\u9047\u5230\u67d0\u4e9b\u62a5\u9519\u7684\u95ee\u9898\u5206\u6790\u3001\u89e3\u51b3\u65b9\u6848\u3002

          Warning

          \u672c\u6587\u6863\u4ec5\u9002\u7528\u4e8e AI \u7b97\u529b\u4e2d\u5fc3\u7248\u672c\uff0c\u82e5\u9047\u5230 AI Lab \u7684\u4f7f\u7528\u95ee\u9898\uff0c\u8bf7\u4f18\u5148\u67e5\u770b\u6b64\u6392\u969c\u624b\u518c\u3002

          AI Lab \u5728 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u6a21\u5757\u540d\u79f0 baize\uff0c\u63d0\u4f9b\u4e86\u4e00\u7ad9\u5f0f\u7684\u6a21\u578b\u8bad\u7ec3\u3001\u63a8\u7406\u3001\u6a21\u578b\u7ba1\u7406\u7b49\u529f\u80fd\u3002

          "},{"location":"admin/baize/troubleshoot/index.html#_2","title":"\u5e38\u89c1\u6545\u969c\u6848\u4f8b","text":"
          • \u96c6\u7fa4\u4e0b\u62c9\u5217\u8868\u4e2d\u627e\u4e0d\u5230\u96c6\u7fa4
          • Notebook \u4e0d\u53d7\u961f\u5217\u914d\u989d\u63a7\u5236
          • \u961f\u5217\u521d\u59cb\u5316\u5931\u8d25
          "},{"location":"admin/baize/troubleshoot/cluster-not-found.html","title":"\u96c6\u7fa4\u4e0b\u62c9\u5217\u8868\u4e2d\u627e\u4e0d\u5230\u96c6\u7fa4","text":""},{"location":"admin/baize/troubleshoot/cluster-not-found.html#_2","title":"\u95ee\u9898\u73b0\u8c61","text":"

          \u5728 AI Lab \u5f00\u53d1\u63a7\u5236\u53f0\u3001\u8fd0\u7ef4\u63a7\u5236\u53f0\uff0c\u529f\u80fd\u6a21\u5757\u7684\u96c6\u7fa4\u641c\u7d22\u6761\u4ef6\u7684\u4e0b\u62c9\u5217\u8868\u627e\u4e0d\u5230\u60f3\u8981\u7684\u96c6\u7fa4\u3002

          "},{"location":"admin/baize/troubleshoot/cluster-not-found.html#_3","title":"\u95ee\u9898\u5206\u6790","text":"

          \u5728 AI Lab \u4e2d\uff0c\u96c6\u7fa4\u4e0b\u62c9\u5217\u8868\u5982\u679c\u7f3a\u5c11\u4e86\u60f3\u8981\u7684\u96c6\u7fa4\uff0c\u53ef\u80fd\u662f\u7531\u4e8e\u4ee5\u4e0b\u539f\u56e0\u5bfc\u81f4\u7684\uff1a

          • baize-agent \u672a\u5b89\u88c5\u6216\u5b89\u88c5\u4e0d\u6210\u529f\uff0c\u5bfc\u81f4 AI Lab \u65e0\u6cd5\u83b7\u53d6\u96c6\u7fa4\u4fe1\u606f
          • \u5b89\u88c5 baize-agent \u672a\u914d\u7f6e\u96c6\u7fa4\u540d\u79f0\uff0c\u5bfc\u81f4 AI Lab \u65e0\u6cd5\u83b7\u53d6\u96c6\u7fa4\u4fe1\u606f
          • \u5de5\u4f5c\u96c6\u7fa4\u5185\u53ef\u89c2\u6d4b\u7ec4\u4ef6\u5f02\u5e38\uff0c\u5bfc\u81f4\u65e0\u6cd5\u91c7\u96c6\u96c6\u7fa4\u5185\u7684\u6307\u6807\u4fe1\u606f
          "},{"location":"admin/baize/troubleshoot/cluster-not-found.html#_4","title":"\u89e3\u51b3\u529e\u6cd5","text":""},{"location":"admin/baize/troubleshoot/cluster-not-found.html#baize-agent","title":"baize-agent \u672a\u5b89\u88c5\u6216\u5b89\u88c5\u4e0d\u6210\u529f","text":"

          AI Lab \u6709\u4e00\u4e9b\u57fa\u7840\u7ec4\u4ef6\u9700\u8981\u5728\u6bcf\u4e2a\u5de5\u4f5c\u96c6\u7fa4\u5185\u8fdb\u884c\u5b89\u88c5\uff0c\u5982\u679c\u5de5\u4f5c\u96c6\u7fa4\u5185\u672a\u5b89\u88c5 baize-agent \u65f6\uff0c\u53ef\u4ee5\u5728\u754c\u9762\u4e0a\u9009\u62e9\u5b89\u88c5\uff0c\u53ef\u80fd\u4f1a\u5bfc\u81f4\u4e00\u4e9b\u975e\u9884\u671f\u7684\u62a5\u9519\u7b49\u95ee\u9898\u3002

          \u6240\u4ee5\uff0c\u4e3a\u4e86\u4fdd\u969c\u4f7f\u7528\u4f53\u9a8c\uff0c\u53ef\u9009\u62e9\u7684\u96c6\u7fa4\u8303\u56f4\u4ec5\u5305\u542b\u4e86\u5df2\u7ecf\u6210\u529f\u5b89\u88c5\u4e86 baize-agent \u7684\u96c6\u7fa4\u3002

          \u5982\u679c\u662f\u56e0\u4e3a baize-agent \u672a\u5b89\u88c5\u6216\u5b89\u88c5\u5931\u8d25\uff0c\u5219\u4f7f\u7528 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u7ba1\u7406 -> Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u627e\u5230 baize-agent \u5e76\u5b89\u88c5\u3002

          Note

          \u6b64\u5730\u5740\u5feb\u901f\u8df3\u8f6c https://<ai_host>/kpanda/clusters/<cluster_name>/helm/charts/addon/baize-agent\u3002 \u6ce8\u610f\u5c06 <ai_host> \u66ff\u6362\u4e3a\u5b9e\u9645\u7684 AI \u7b97\u529b\u4e2d\u5fc3\u63a7\u5236\u53f0\u5730\u5740\uff0c<cluster_name> \u66ff\u6362\u4e3a\u5b9e\u9645\u7684\u96c6\u7fa4\u540d\u79f0\u3002

          "},{"location":"admin/baize/troubleshoot/cluster-not-found.html#baize-agent_1","title":"\u5b89\u88c5 baize-agent \u65f6\u672a\u914d\u7f6e\u96c6\u7fa4\u540d\u79f0","text":"

          \u5728\u5b89\u88c5 baize-agent \u65f6\uff0c\u9700\u8981\u6ce8\u610f\u914d\u7f6e\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fd9\u4e2a\u540d\u79f0\u4f1a\u7528\u4e8e\u53ef\u89c2\u6d4b\u6307\u6807\u91c7\u96c6\uff0c \u9ed8\u8ba4\u4e3a\u7a7a\uff0c\u9700\u624b\u5de5\u914d\u7f6e \u3002

          "},{"location":"admin/baize/troubleshoot/cluster-not-found.html#_5","title":"\u5de5\u4f5c\u96c6\u7fa4\u5185\u53ef\u89c2\u6d4b\u7ec4\u4ef6\u5f02\u5e38","text":"

          \u5982\u679c\u96c6\u7fa4\u5185\u53ef\u89c2\u6d4b\u7ec4\u4ef6\u5f02\u5e38\uff0c\u53ef\u80fd\u4f1a\u5bfc\u81f4 AI Lab \u65e0\u6cd5\u83b7\u53d6\u96c6\u7fa4\u4fe1\u606f\uff0c\u8bf7\u68c0\u67e5\u5e73\u53f0\u7684\u53ef\u89c2\u6d4b\u670d\u52a1\u662f\u5426\u6b63\u5e38\u8fd0\u884c\u53ca\u914d\u7f6e\u3002

          • \u68c0\u67e5\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5185 insight-server \u7ec4\u4ef6\u662f\u5426\u6b63\u5e38\u8fd0\u884c
          • \u68c0\u67e5\u5de5\u4f5c\u96c6\u7fa4\u5185 insight-agent \u7ec4\u4ef6\u662f\u5426\u6b63\u5e38\u8fd0\u884c
          "},{"location":"admin/baize/troubleshoot/local-queue-initialization-failed.html","title":"\u672c\u5730\u961f\u5217\u521d\u59cb\u5316\u5931\u8d25","text":""},{"location":"admin/baize/troubleshoot/local-queue-initialization-failed.html#_2","title":"\u95ee\u9898\u73b0\u8c61","text":"

          \u5728\u521b\u5efa Notebook\u3001\u8bad\u7ec3\u4efb\u52a1\u6216\u8005\u63a8\u7406\u670d\u52a1\u65f6\uff0c\u5f53\u961f\u5217\u662f\u9996\u6b21\u5728\u8be5\u547d\u540d\u7a7a\u95f4\u4f7f\u7528\u65f6\uff0c\u4f1a\u63d0\u793a\u9700\u8981\u4e00\u952e\u521d\u59cb\u5316\u961f\u5217\uff0c\u4f46\u662f\u521d\u59cb\u5316\u5931\u8d25\u3002

          "},{"location":"admin/baize/troubleshoot/local-queue-initialization-failed.html#_3","title":"\u95ee\u9898\u5206\u6790","text":"

          \u5728 AI Lab \u4e2d\uff0c\u961f\u5217\u7ba1\u7406\u80fd\u529b\u7531 Kueue \u63d0\u4f9b\uff0c \u800c Kueue \u63d0\u4f9b\u4e86 \u4e24\u79cd\u961f\u5217\u7ba1\u7406\u8d44\u6e90\uff1a

          • ClusterQueue \u662f\u96c6\u7fa4\u7ea7\u522b\u7684\u961f\u5217\uff0c\u4e3b\u8981\u7528\u4e8e\u7ba1\u7406\u961f\u5217\u4e2d\u7684\u8d44\u6e90\u914d\u989d\uff0c\u5305\u542b\u4e86 CPU\u3001\u5185\u5b58\u3001GPU \u7b49\u8d44\u6e90
          • LocalQueue \u662f\u547d\u540d\u7a7a\u95f4\u7ea7\u522b\u7684\u961f\u5217\uff0c\u9700\u8981\u6307\u5411\u5230\u4e00\u4e2a ClusterQueue\uff0c\u7528\u4e8e\u4f7f\u7528\u961f\u5217\u4e2d\u7684\u8d44\u6e90\u5206\u914d

          \u5728 AI Lab \u4e2d\uff0c\u5982\u679c\u521b\u5efa\u670d\u52a1\u65f6\uff0c\u53d1\u73b0\u6307\u5b9a\u7684\u547d\u540d\u7a7a\u95f4\u4e0d\u5b58\u5728 LocalQueue\uff0c\u5219\u4f1a\u63d0\u793a\u9700\u8981\u521d\u59cb\u5316\u961f\u5217\u3002

          \u5728\u6781\u5c11\u6570\u60c5\u51b5\u4e0b\uff0c\u53ef\u80fd\u7531\u4e8e\u7279\u6b8a\u539f\u56e0\u4f1a\u5bfc\u81f4 LocalQueue \u521d\u59cb\u5316\u5931\u8d25\u3002

          "},{"location":"admin/baize/troubleshoot/local-queue-initialization-failed.html#_4","title":"\u89e3\u51b3\u529e\u6cd5","text":"

          \u68c0\u67e5 Kueue \u662f\u5426\u6b63\u5e38\u8fd0\u884c\uff0c\u5982\u679c kueue-controller-manager \u672a\u8fd0\u884c\uff0c\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b\u3002

          kubectl get deploy kueue-controller-manager -n baize-sysatem\n

          \u5982\u679c kueue-controller-manager \u672a\u6b63\u5e38\u8fd0\u884c\uff0c\u8bf7\u5148\u4fee\u590d Kueue\u3002

          "},{"location":"admin/baize/troubleshoot/local-queue-initialization-failed.html#_5","title":"\u53c2\u8003\u8d44\u6599","text":"
          • ClusterQueue
          • LocalQueue
          "},{"location":"admin/baize/troubleshoot/notebook-not-controlled-by-quotas.html","title":"Notebook \u4e0d\u53d7\u961f\u5217\u914d\u989d\u63a7\u5236","text":"

          \u5728 AI Lab \u4e2d\uff0c\u7528\u6237\u5728\u521b\u5efa Notebook \u65f6\uff0c\u53d1\u73b0\u9009\u62e9\u7684\u961f\u5217\u5373\u4f7f\u8d44\u6e90\u4e0d\u8db3\uff0cNotebook \u4f9d\u7136\u53ef\u4ee5\u521b\u5efa\u6210\u529f\u3002

          "},{"location":"admin/baize/troubleshoot/notebook-not-controlled-by-quotas.html#01-kubernetes","title":"\u95ee\u9898 01: Kubernetes \u7248\u672c\u4e0d\u652f\u6301","text":"
          • \u5206\u6790\uff1a

            AI Lab \u4e2d\u7684\u961f\u5217\u7ba1\u7406\u80fd\u529b\u7531 Kueue \u63d0\u4f9b\uff0c Notebook \u670d\u52a1\u662f\u901a\u8fc7 JupyterHub \u63d0\u4f9b\u7684\u3002 JupyterHub \u5bf9 Kubernetes \u7684\u7248\u672c\u8981\u6c42\u8f83\u9ad8\uff0c\u5bf9\u4e8e\u4f4e\u4e8e v1.27 \u7684\u7248\u672c\uff0c\u5373\u4f7f\u5728 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u8bbe\u7f6e\u4e86\u961f\u5217\u914d\u989d\uff0c \u7528\u6237\u5728\u521b\u5efa Notebook \u65f6\u4e5f\u9009\u62e9\u4e86\u914d\u989d\uff0c\u4f46 Notebook \u5b9e\u9645\u4e5f\u4e0d\u4f1a\u53d7\u5230\u961f\u5217\u914d\u989d\u7684\u9650\u5236\u3002

          • \u89e3\u51b3\u529e\u6cd5\uff1a\u63d0\u524d\u89c4\u5212\uff0c\u751f\u4ea7\u73af\u5883\u4e2d\u5efa\u8bae\u4f7f\u7528 Kubernetes \u7248\u672c v1.27 \u4ee5\u4e0a\u3002

          • \u53c2\u8003\u8d44\u6599\uff1aJupyter Notebook Documentation

          "},{"location":"admin/baize/troubleshoot/notebook-not-controlled-by-quotas.html#02","title":"\u95ee\u9898 02: \u914d\u7f6e\u672a\u542f\u7528","text":"
          • \u5206\u6790\uff1a

            \u5f53 Kubernetes \u96c6\u7fa4\u7248\u672c \u5927\u4e8e v1.27 \u65f6\uff0cNotebook \u4ecd\u65e0\u6cd5\u53d7\u5230\u961f\u5217\u914d\u989d\u7684\u9650\u5236\u3002

            \u8fd9\u662f\u56e0\u4e3a\uff0cKueue \u9700\u8981\u542f\u7528\u5bf9 enablePlainPod \u652f\u6301\uff0c\u624d\u4f1a\u5bf9 Notebook \u670d\u52a1\u751f\u6548\u3002

          • \u89e3\u51b3\u529e\u6cd5\uff1a\u5728\u5de5\u4f5c\u96c6\u7fa4\u4e2d\u90e8\u7f72 baize-agent \u65f6\uff0c\u542f\u7528 Kueue \u5bf9 enablePlainPod \u7684\u652f\u6301\u3002

          • \u53c2\u8003\u8d44\u6599\uff1aRun Plain Pods as a Kueue-Managed Job

          "},{"location":"admin/ghippo/password.html","title":"\u5bc6\u7801\u91cd\u7f6e","text":"

          \u5982\u679c\u60a8\u5fd8\u8bb0\u5bc6\u7801\uff0c\u53ef\u4ee5\u6309\u672c\u9875\u9762\u8bf4\u660e\u91cd\u7f6e\u5bc6\u7801\u3002

          "},{"location":"admin/ghippo/password.html#_2","title":"\u91cd\u7f6e\u5bc6\u7801\u6b65\u9aa4","text":"

          \u7ba1\u7406\u5458\u6700\u521d\u521b\u5efa\u4e00\u4e2a\u7528\u6237\u65f6\uff0c\u4f1a\u4e3a\u5176\u8bbe\u7f6e\u7528\u6237\u540d\u548c\u5bc6\u7801\u3002 \u8be5\u7528\u6237\u767b\u5f55\u540e\uff0c\u5728 \u4e2a\u4eba\u4e2d\u5fc3 \u586b\u5199\u90ae\u7bb1\u5e76\u4fee\u6539\u5bc6\u7801\u3002 \u82e5\u8be5\u7528\u6237\u672a\u8bbe\u7f6e\u90ae\u7bb1\uff0c\u5219\u53ea\u80fd\u8054\u7cfb\u7ba1\u7406\u5458\u8fdb\u884c\u5bc6\u7801\u91cd\u7f6e\u3002

          1. \u5982\u679c\u7528\u6237\u5fd8\u8bb0\u4e86\u5bc6\u7801\uff0c\u53ef\u4ee5\u5728\u767b\u5f55\u754c\u9762\u70b9\u51fb \u5fd8\u8bb0\u5bc6\u7801 \u3002

          2. \u8f93\u5165\u767b\u5f55\u90ae\u7bb1\uff0c\u70b9\u51fb \u63d0\u4ea4 \u3002

          3. \u5728\u90ae\u7bb1\u4e2d\u627e\u5230\u5bc6\u7801\u91cd\u7f6e\u90ae\u4ef6\uff0c\u70b9\u51fb\u4e0b\u65b9\u94fe\u63a5\u8fdb\u884c\u5bc6\u7801\u91cd\u7f6e\uff0c\u94fe\u63a5\u65f6\u6548 5 \u5206\u949f\u3002

          4. \u5728\u624b\u673a\u7b49\u7ec8\u7aef\u8bbe\u5907\u5b89\u88c5\u652f\u6301 2FA \u52a8\u6001\u53e3\u4ee4\u751f\u6210\u7684\u5e94\u7528\uff08\u5982 Google Authenticator\uff09\uff0c\u6309\u7167\u9875\u9762\u63d0\u793a\u914d\u7f6e\u52a8\u6001\u53e3\u4ee4\u4ee5\u6fc0\u6d3b\u8d26\u6237\uff0c\u70b9\u51fb \u63d0\u4ea4 \u3002

          5. \u8bbe\u7f6e\u65b0\u5bc6\u7801\uff0c\u70b9\u51fb \u63d0\u4ea4 \u3002\u8bbe\u7f6e\u65b0\u5bc6\u7801\u7684\u8981\u6c42\u4e0e\u521b\u5efa\u7528\u6237\u65f6\u7684\u5bc6\u7801\u89c4\u5219\u4e00\u81f4\u3002

          6. \u4fee\u6539\u5bc6\u7801\u6210\u529f\uff0c\u76f4\u63a5\u8df3\u8f6c\u9996\u9875\u3002

          "},{"location":"admin/ghippo/password.html#_3","title":"\u91cd\u7f6e\u5bc6\u7801\u6d41\u7a0b","text":"

          \u6574\u4e2a\u5bc6\u7801\u91cd\u7f6e\u7684\u6d41\u7a0b\u793a\u610f\u56fe\u5982\u4e0b\u3002

          graph TB\n\npass[\u5fd8\u8bb0\u5bc6\u7801] --> usern[\u8f93\u5165\u7528\u6237\u540d]\n--> button[\u70b9\u51fb\u53d1\u9001\u9a8c\u8bc1\u90ae\u4ef6\u7684\u6309\u94ae] --> judge1[\u5224\u65ad\u7528\u6237\u540d\u662f\u5426\u6b63\u786e]\n\n    judge1 -.\u6b63\u786e.-> judge2[\u5224\u65ad\u662f\u5426\u7ed1\u5b9a\u90ae\u7bb1]\n    judge1 -.\u9519\u8bef.-> tip1[\u63d0\u793a\u7528\u6237\u540d\u4e0d\u6b63\u786e]\n\n        judge2 -.\u5df2\u7ed1\u5b9a\u90ae\u7bb1.-> send[\u53d1\u9001\u91cd\u7f6e\u90ae\u4ef6]\n        judge2 -.\u672a\u7ed1\u5b9a\u90ae\u7bb1.-> tip2[\u63d0\u793a\u672a\u7ed1\u5b9a\u90ae\u7bb1<br>\u8054\u7cfb\u7ba1\u7406\u5458\u91cd\u7f6e\u5bc6\u7801]\n\nsend --> click[\u70b9\u51fb\u90ae\u4ef6\u4e2d\u7684\u94fe\u63a5] --> config[\u914d\u7f6e\u52a8\u6001\u53e3\u4ee4] --> reset[\u91cd\u7f6e\u5bc6\u7801]\n--> success[\u6210\u529f\u91cd\u7f6e\u5bc6\u7801]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill:#326ce5,stroke:#fff,stroke-width:1px,color:#fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass pass,usern,button,tip1,send,tip2,send,click,config,reset,success plain;\nclass judge1,judge2 k8s
          "},{"location":"admin/ghippo/access-control/custom-role.html","title":"\u81ea\u5b9a\u4e49\u89d2\u8272","text":"

          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u521b\u5efa\u4e09\u79cd\u8303\u56f4\u7684\u81ea\u5b9a\u4e49\u89d2\u8272\uff1a

          • \u5e73\u53f0\u89d2\u8272 \u7684\u6743\u9650\u5bf9\u5e73\u53f0\u6240\u6709\u76f8\u5173\u8d44\u6e90\u751f\u6548
          • \u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272 \u7684\u6743\u9650\u5bf9\u8be5\u7528\u6237\u6240\u5728\u7684\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u7684\u8d44\u6e90\u751f\u6548
          • \u6587\u4ef6\u5939\u89d2\u8272 \u7684\u6743\u9650\u5bf9\u8be5\u7528\u6237\u6240\u5728\u7684\u6587\u4ef6\u5939\u53ca\u5176\u4e0b\u7684\u5b50\u6587\u4ef6\u5939\u548c\u5de5\u4f5c\u7a7a\u95f4\u8d44\u6e90\u751f\u6548
          "},{"location":"admin/ghippo/access-control/custom-role.html#_2","title":"\u521b\u5efa\u5e73\u53f0\u89d2\u8272","text":"

          \u5e73\u53f0\u89d2\u8272\u662f\u7c97\u7c92\u5ea6\u89d2\u8272\uff0c\u80fd\u591f\u5bf9\u6240\u9009\u6743\u9650\u5185\u7684\u6240\u6709\u8d44\u6e90\u751f\u6548\u3002\u5982\u6388\u6743\u540e\u7528\u6237\u53ef\u4ee5\u62e5\u6709\u6240\u6709\u5de5\u4f5c\u7a7a\u95f4\u7684\u67e5\u770b\u6743\u9650\u3001\u6240\u6709\u96c6\u7fa4\u7684\u7f16\u8f91\u6743\u9650\u7b49\uff0c\u800c\u4e0d\u80fd\u9488\u5bf9\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u6216\u67d0\u4e2a\u96c6\u7fa4\u751f\u6548\u3002\u5e73\u53f0\u89d2\u8272\u521b\u5efa\u5b8c\u6210\u540e\u53ef\u4ee5\u5728\u7528\u6237/\u7528\u6237\u7ec4\u5217\u8868\u4e2d\u8fdb\u884c\u6388\u6743\u3002

          1. \u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u89d2\u8272 \uff0c\u70b9\u51fb \u521b\u5efa\u81ea\u5b9a\u4e49\u89d2\u8272 \u3002

          2. \u8f93\u5165\u540d\u79f0\u3001\u63cf\u8ff0\uff0c\u9009\u62e9 \u5e73\u53f0\u89d2\u8272 \uff0c\u52fe\u9009\u89d2\u8272\u6743\u9650\u540e\u70b9\u51fb \u786e\u5b9a \u3002

          3. \u8fd4\u56de\u89d2\u8272\u5217\u8868\uff0c\u641c\u7d22\u521a\u521b\u5efa\u7684\u81ea\u5b9a\u4e49\u89d2\u8272\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u6267\u884c\u590d\u5236\u3001\u7f16\u8f91\u548c\u5220\u9664\u7b49\u64cd\u4f5c\u3002

          4. \u5e73\u53f0\u89d2\u8272\u521b\u5efa\u6210\u529f\u540e\uff0c\u53ef\u4ee5\u53bb\u7528\u6237/\u7528\u6237\u7ec4\u6388\u6743\uff0c\u4e3a\u8fd9\u4e2a\u89d2\u8272\u6dfb\u52a0\u7528\u6237\u548c\u7528\u6237\u7ec4\u3002

          "},{"location":"admin/ghippo/access-control/custom-role.html#_3","title":"\u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272","text":"

          \u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u662f\u7ec6\u7c92\u5ea6\u89d2\u8272\uff0c\u9488\u5bf9\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u751f\u6548\u3002\u5982\u5728\u8be5\u89d2\u8272\u4e2d\u9009\u62e9\u5e94\u7528\u5de5\u4f5c\u53f0\u7684\u5168\u90e8\u6743\u9650\uff0c\u7ed9\u7528\u6237\u5728\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u6388\u4e88\u8be5\u89d2\u8272\u540e\uff0c\u8be5\u7528\u6237\u5c06\u4ec5\u80fd\u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u4f7f\u7528\u5e94\u7528\u5de5\u4f5c\u53f0\u76f8\u5173\u7684\u529f\u80fd\uff0c\u800c\u65e0\u6cd5\u4f7f\u7528\u5982\u5fae\u670d\u52a1\u5f15\u64ce\u3001\u4e2d\u95f4\u4ef6\u7b49\u5176\u4ed6\u6a21\u5757\u7684\u80fd\u529b\u3002\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u53ef\u4ee5\u5728\u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7\u4e2d\u9009\u62e9\u5de5\u4f5c\u7a7a\u95f4\u540e\u8fdb\u884c\u6388\u6743\u3002

          1. \u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u89d2\u8272 \uff0c\u70b9\u51fb \u521b\u5efa\u81ea\u5b9a\u4e49\u89d2\u8272 \u3002

          2. \u8f93\u5165\u540d\u79f0\u3001\u63cf\u8ff0\uff0c\u9009\u62e9 \u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272 \uff0c\u52fe\u9009\u89d2\u8272\u6743\u9650\u540e\u70b9\u51fb \u786e\u5b9a \u3002

          3. \u8fd4\u56de\u89d2\u8272\u5217\u8868\uff0c\u641c\u7d22\u521a\u521b\u5efa\u7684\u81ea\u5b9a\u4e49\u89d2\u8272\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u6267\u884c\u590d\u5236\u3001\u7f16\u8f91\u548c\u5220\u9664\u7b49\u64cd\u4f5c\u3002

          4. \u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u521b\u5efa\u6210\u529f\u540e\uff0c\u53ef\u4ee5\u53bb\u5de5\u4f5c\u7a7a\u95f4\u6388\u6743\uff0c\u8bbe\u5b9a\u8fd9\u4e2a\u89d2\u8272\u53ef\u4ee5\u7ba1\u7406\u54ea\u4e9b\u5de5\u4f5c\u7a7a\u95f4\u3002

          "},{"location":"admin/ghippo/access-control/custom-role.html#_4","title":"\u521b\u5efa\u6587\u4ef6\u5939\u89d2\u8272","text":"

          \u6587\u4ef6\u5939\u89d2\u8272\u9488\u5bf9\u67d0\u4e2a\u6587\u4ef6\u5939\u548c\u8be5\u6587\u4ef6\u5939\u4e0b\u7684\u6240\u6709\u5b50\u6587\u4ef6\u5939\u53ca\u5de5\u4f5c\u7a7a\u95f4\u751f\u6548\u3002\u5982\u5728\u8be5\u89d2\u8272\u4e2d\u9009\u62e9\u5168\u5c40\u7ba1\u7406-\u5de5\u4f5c\u7a7a\u95f4\u548c\u5e94\u7528\u5de5\u4f5c\u53f0\uff0c\u7ed9\u7528\u6237\u5728\u67d0\u4e2a\u6587\u4ef6\u5939\u4e0b\u6388\u4e88\u8be5\u89d2\u8272\u540e\uff0c\u8be5\u7528\u6237\u5c06\u80fd\u591f\u5728\u5176\u4e0b\u7684\u6240\u6709\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u4f7f\u7528\u5e94\u7528\u5de5\u4f5c\u53f0\u7684\u76f8\u5173\u529f\u80fd\uff0c\u800c\u65e0\u6cd5\u4f7f\u7528\u5982\u5fae\u670d\u52a1\u5f15\u64ce\u3001\u4e2d\u95f4\u4ef6\u7b49\u5176\u4ed6\u6a21\u5757\u7684\u80fd\u529b\u3002\u6587\u4ef6\u5939\u89d2\u8272\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u53ef\u4ee5\u5728\u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7\u4e2d\u9009\u62e9\u6587\u4ef6\u5939\u540e\u8fdb\u884c\u6388\u6743\u3002 \u8bf7\u6ce8\u610f\uff1a\u5e94\u7528\u5de5\u4f5c\u53f0\u3001\u591a\u4e91\u7f16\u6392\u3001\u955c\u50cf\u4ed3\u5e93\u3001\u5fae\u670d\u52a1\u5f15\u64ce\u3001\u670d\u52a1\u7f51\u683c\u548c\u4e2d\u95f4\u4ef6\u5747\u4f9d\u8d56\u4e8e\u5de5\u4f5c\u7a7a\u95f4\uff0c\u56e0\u6b64\u5728\u521b\u5efa\u6587\u4ef6\u5939\u89d2\u8272\u65f6\u5927\u90e8\u5206\u573a\u666f\u4e0b\u9700\u8981\u7528\u5230\u5de5\u4f5c\u7a7a\u95f4\uff0c\u8bf7\u6ce8\u610f\u5728\u5168\u5c40\u7ba1\u7406-\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u52fe\u9009\u3002

          1. \u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u89d2\u8272 \uff0c\u70b9\u51fb \u521b\u5efa\u81ea\u5b9a\u4e49\u89d2\u8272 \u3002

          2. \u8f93\u5165\u540d\u79f0\u3001\u63cf\u8ff0\uff0c\u9009\u62e9 \u6587\u4ef6\u5939\u89d2\u8272 \uff0c\u52fe\u9009\u89d2\u8272\u6743\u9650\u540e\u70b9\u51fb \u786e\u5b9a \u3002

          3. \u8fd4\u56de\u89d2\u8272\u5217\u8868\uff0c\u641c\u7d22\u521a\u521b\u5efa\u7684\u81ea\u5b9a\u4e49\u89d2\u8272\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u6267\u884c\u590d\u5236\u3001\u7f16\u8f91\u548c\u5220\u9664\u7b49\u64cd\u4f5c\u3002

          4. \u6587\u4ef6\u5939\u89d2\u8272\u521b\u5efa\u6210\u529f\u540e\uff0c\u53ef\u4ee5\u53bb\u6587\u4ef6\u5939\u6388\u6743\uff0c\u8bbe\u5b9a\u8fd9\u4e2a\u89d2\u8272\u53ef\u4ee5\u7ba1\u7406\u54ea\u4e9b\u6587\u4ef6\u5939\u3002

          "},{"location":"admin/ghippo/access-control/docking.html","title":"\u63a5\u5165\u7ba1\u7406","text":"

          \u5f53\u4e24\u4e2a\u6216\u4e24\u4e2a\u4ee5\u4e0a\u5e73\u53f0\u76f8\u4e92\u5bf9\u63a5\u6216\u5d4c\u5165\u65f6\uff0c\u901a\u5e38\u9700\u8981\u8fdb\u884c\u7528\u6237\u4f53\u7cfb\u6253\u901a\u3002 \u5728\u7528\u6237\u6253\u901a\u8fc7\u7a0b\u4e2d\uff0c \u63a5\u5165\u7ba1\u7406 \u4e3b\u8981\u63d0\u4f9b SSO \u63a5\u5165\u80fd\u529b\uff0c\u5f53\u60a8\u9700\u8981\u5c06\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f5c\u4e3a\u7528\u6237\u6e90\u63a5\u5165\u5ba2\u6237\u7cfb\u7edf\u65f6\uff0c \u60a8\u53ef\u4ee5\u901a\u8fc7 \u63a5\u5165\u7ba1\u7406 \u521b\u5efa SSO \u63a5\u5165\u6765\u5b9e\u73b0\u3002

          "},{"location":"admin/ghippo/access-control/docking.html#sso","title":"\u521b\u5efa SSO \u63a5\u5165","text":"

          \u524d\u63d0\uff1a\u62e5\u6709\u5e73\u53f0\u7ba1\u7406\u5458 Admin \u6743\u9650\u6216\u8005\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u7ba1\u7406\u5458 IAM Owner \u6743\u9650\u3002

          1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u63a5\u5165\u7ba1\u7406 \uff0c\u8fdb\u5165\u63a5\u5165\u7ba1\u7406\u5217\u8868\uff0c\u70b9\u51fb\u53f3\u4e0a\u65b9\u7684 \u521b\u5efa SSO \u63a5\u5165 \u3002

          2. \u5728 \u521b\u5efa SSO \u63a5\u5165 \u9875\u9762\u586b\u5199\u5ba2\u6237\u7aef ID\u3002

            • \u5ba2\u6237\u7aef ID\uff1a\u5bf9\u5e94 client \u540d\u79f0
            • \u5ba2\u6237\u7aef\u8bbf\u95ee\u5730\u5740\uff1a\u7528\u6237\u5b8c\u6210\u767b\u5f55\u5e76\u901a\u8fc7\u8eab\u4efd\u9a8c\u8bc1\u540e\uff0c\u8ba4\u8bc1\u670d\u52a1\u5668\u7528\u6765\u91cd\u5b9a\u5411\u7528\u6237\u7684\u5730\u5740\uff0c\u5373 Callback URL

          3. \u521b\u5efa SSO \u63a5\u5165\u6210\u529f\u540e\uff0c\u5728 \u63a5\u5165\u7ba1\u7406 \u7ba1\u7406\u5217\u8868\uff0c\u70b9\u51fb\u521a\u521b\u5efa\u7684\u5ba2\u6237\u7aef ID \u8fdb\u5165\u8be6\u60c5\uff0c \u590d\u5236\u5ba2\u6237\u7aef ID\u3001\u5bc6\u94a5\u548c\u5355\u70b9\u767b\u5f55 URL \u4fe1\u606f\uff0c\u586b\u5199\u81f3\u5ba2\u6237\u7cfb\u7edf\u5b8c\u6210\u7528\u6237\u4f53\u7cfb\u6253\u901a\u3002

            Note

            realm \u540d\u79f0\u4e3a ghippo\u3002

          "},{"location":"admin/ghippo/access-control/global.html","title":"\u7cfb\u7edf\u89d2\u8272","text":""},{"location":"admin/ghippo/access-control/global.html#_2","title":"\u9002\u7528\u573a\u666f","text":"

          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u4e86\u9884\u7f6e\u7684\u7cfb\u7edf\u89d2\u8272\uff0c\u5e2e\u52a9\u7528\u6237\u7b80\u5316\u89d2\u8272\u6743\u9650\u7684\u4f7f\u7528\u6b65\u9aa4\u3002

          Note

          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u4e86\u4e09\u79cd\u7c7b\u578b\u7684\u7cfb\u7edf\u89d2\u8272\uff0c\u5206\u522b\u4e3a\u5e73\u53f0\u89d2\u8272\u3001\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u548c\u6587\u4ef6\u5939\u89d2\u8272\u3002

          - \u5e73\u53f0\u89d2\u8272\uff1a\u5bf9\u5e73\u53f0\u4e0a\u6240\u6709\u76f8\u5173\u8d44\u6e90\u5177\u6709\u76f8\u5e94\u6743\u9650\uff0c\u8bf7\u524d\u5f80\u7528\u6237/\u7528\u6237\u7ec4\u6388\u6743\u3002\n- \u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\uff1a\u5bf9\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u5177\u6709\u76f8\u5e94\u6743\u9650\uff0c\u8bf7\u524d\u5f80\u5177\u4f53\u5de5\u4f5c\u7a7a\u95f4\u6388\u6743\u3002\n- \u6587\u4ef6\u5939\u89d2\u8272\uff1a\u5bf9\u67d0\u4e2a\u6587\u4ef6\u5939\u3001\u5b50\u6587\u4ef6\u5939\u53ca\u5176\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u7684\u8d44\u6e90\u5177\u6709\u76f8\u5e94\u6743\u9650\uff0c\u8bf7\u524d\u5f80\u5177\u4f53\u6587\u4ef6\u5939\u6388\u6743\u3002\n
          "},{"location":"admin/ghippo/access-control/global.html#_3","title":"\u5e73\u53f0\u89d2\u8272","text":"

          \u5728\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u4e2d\u9884\u5b9a\u4e49\u4e86 5 \u4e2a\u7cfb\u7edf\u89d2\u8272\uff0c\u5206\u522b\u662f\uff1aAdmin\u3001IAM Owner\u3001Audit Owner\u3001 Kpanda Owner \u548c Workspace and Folder Owner \u3002\u8fd9 5 \u4e2a\u89d2\u8272\u7531\u7cfb\u7edf\u521b\u5efa\uff0c\u7528\u6237\u53ea\u80fd\u4f7f\u7528\u4e0d\u80fd\u4fee\u6539\u3002\u89d2\u8272\u5bf9\u5e94\u7684\u6743\u9650\u5982\u4e0b\uff1a

          \u89d2\u8272\u540d\u79f0 \u89d2\u8272\u7c7b\u578b \u6240\u5c5e\u6a21\u5757 \u89d2\u8272\u6743\u9650 Admin \u7cfb\u7edf\u89d2\u8272 \u5168\u90e8 \u5e73\u53f0\u7ba1\u7406\u5458\uff0c\u7ba1\u7406\u6240\u6709\u5e73\u53f0\u8d44\u6e90\uff0c\u4ee3\u8868\u5e73\u53f0\u7684\u6700\u9ad8\u6743\u9650 IAM Owner \u7cfb\u7edf\u89d2\u8272 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u7684\u7ba1\u7406\u5458\uff0c\u62e5\u6709\u8be5\u670d\u52a1\u4e0b\u7684\u6240\u6709\u6743\u9650\uff0c\u5982\u7ba1\u7406\u7528\u6237/\u7528\u6237\u7ec4\u53ca\u6388\u6743 Audit Owner \u7cfb\u7edf\u89d2\u8272 \u5ba1\u8ba1\u65e5\u5fd7 \u5ba1\u8ba1\u65e5\u5fd7\u7684\u7ba1\u7406\u5458\uff0c\u62e5\u6709\u8be5\u670d\u52a1\u4e0b\u7684\u6240\u6709\u6743\u9650\uff0c\u5982\u8bbe\u7f6e\u5ba1\u8ba1\u65e5\u5fd7\u7b56\u7565\uff0c\u5bfc\u51fa\u5ba1\u8ba1\u65e5\u5fd7 Kpanda Owner \u7cfb\u7edf\u89d2\u8272 \u5bb9\u5668\u7ba1\u7406 \u5bb9\u5668\u7ba1\u7406\u7684\u7ba1\u7406\u5458\uff0c\u62e5\u6709\u8be5\u670d\u52a1\u4e0b\u7684\u6240\u6709\u6743\u9650\uff0c\u5982\u521b\u5efa/\u63a5\u5165\u96c6\u7fa4\uff0c\u90e8\u7f72\u5e94\u7528\uff0c\u7ed9\u7528\u6237/\u7528\u6237\u7ec4\u6388\u4e88\u96c6\u7fa4/\u547d\u540d\u7a7a\u95f4\u76f8\u5173\u7684\u6743\u9650 Workspace and Folder Owner \u7cfb\u7edf\u89d2\u8272 \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7\u7ba1\u7406\u5458\uff0c\u62e5\u6709\u8be5\u670d\u52a1\u4e0b\u7684\u6240\u6709\u6743\u9650\uff0c\u5982\u521b\u5efa\u6587\u4ef6\u5939/\u5de5\u4f5c\u7a7a\u95f4\uff0c\u7ed9\u7528\u6237/\u7528\u6237\u7ec4\u6388\u6743\u6587\u4ef6\u5939/\u5de5\u4f5c\u7a7a\u95f4\u7684\u76f8\u5173\u6743\u9650\uff0c\u5728\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u4f7f\u7528\u5e94\u7528\u5de5\u4f5c\u53f0\u3001\u5fae\u670d\u52a1\u5f15\u64ce\u7b49\u529f\u80fd"},{"location":"admin/ghippo/access-control/global.html#_4","title":"\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272","text":"

          \u5728\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u4e2d\u9884\u5b9a\u4e49\u4e86 3 \u4e2a\u7cfb\u7edf\u89d2\u8272\uff0c\u5206\u522b\u662f\uff1aWorkspace Admin\u3001Workspace Editor\u3001Workspace Viewer\u3002\u8fd9 3 \u4e2a\u89d2\u8272\u7531\u7cfb\u7edf\u521b\u5efa\uff0c\u7528\u6237\u53ea\u80fd\u4f7f\u7528\u4e0d\u80fd\u4fee\u6539\u3002\u89d2\u8272\u5bf9\u5e94\u7684\u6743\u9650\u5982\u4e0b\uff1a

          \u89d2\u8272\u540d\u79f0 \u89d2\u8272\u7c7b\u578b \u6240\u5c5e\u6a21\u5757 \u89d2\u8272\u6743\u9650 Workspace Admin \u7cfb\u7edf\u89d2\u8272 \u5de5\u4f5c\u7a7a\u95f4 \u5de5\u4f5c\u7a7a\u95f4\u7684\u7ba1\u7406\u6743\u9650 Workspace Editor \u7cfb\u7edf\u89d2\u8272 \u5de5\u4f5c\u7a7a\u95f4 \u5de5\u4f5c\u7a7a\u95f4\u7684\u7f16\u8f91\u6743\u9650 Workspace Viewer \u7cfb\u7edf\u89d2\u8272 \u5de5\u4f5c\u7a7a\u95f4 \u5de5\u4f5c\u7a7a\u95f4\u7684\u53ea\u8bfb\u6743\u9650"},{"location":"admin/ghippo/access-control/global.html#_5","title":"\u6587\u4ef6\u5939\u89d2\u8272","text":"

          \u5728\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u4e2d\u9884\u5b9a\u4e49\u4e86 3 \u4e2a\u7cfb\u7edf\u89d2\u8272\uff0c\u5206\u522b\u662f\uff1aFolder Admin\u3001Folder Editor\u3001Folder Viewer\u3002\u8fd9 3 \u4e2a\u89d2\u8272\u7531\u7cfb\u7edf\u521b\u5efa\uff0c\u7528\u6237\u53ea\u80fd\u4f7f\u7528\u4e0d\u80fd\u4fee\u6539\u3002\u89d2\u8272\u5bf9\u5e94\u7684\u6743\u9650\u5982\u4e0b\uff1a

          \u89d2\u8272\u540d\u79f0 \u89d2\u8272\u7c7b\u578b \u6240\u5c5e\u6a21\u5757 \u89d2\u8272\u6743\u9650 Folder Admin \u7cfb\u7edf\u89d2\u8272 \u5de5\u4f5c\u7a7a\u95f4 \u6587\u4ef6\u5939\u53ca\u5176\u4e0b\u5b50\u6587\u4ef6\u5939\u3001\u5de5\u4f5c\u7a7a\u95f4\u7684\u7ba1\u7406\u6743\u9650 Folder Editor \u7cfb\u7edf\u89d2\u8272 \u5de5\u4f5c\u7a7a\u95f4 \u6587\u4ef6\u5939\u53ca\u5176\u4e0b\u5b50\u6587\u4ef6\u5939\u3001\u5de5\u4f5c\u7a7a\u95f4\u7684\u7f16\u8f91\u6743\u9650 Folder Viewer \u7cfb\u7edf\u89d2\u8272 \u5de5\u4f5c\u7a7a\u95f4 \u6587\u4ef6\u5939\u53ca\u5176\u4e0b\u5b50\u6587\u4ef6\u5939\u3001\u5de5\u4f5c\u7a7a\u95f4\u7684\u53ea\u8bfb\u6743\u9650"},{"location":"admin/ghippo/access-control/group.html","title":"\u7528\u6237\u7ec4","text":"

          \u7528\u6237\u7ec4\u662f\u7528\u6237\u7684\u96c6\u5408\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u52a0\u5165\u7528\u6237\u7ec4\uff0c\u7ee7\u627f\u7528\u6237\u7ec4\u7684\u89d2\u8272\u6743\u9650\u3002\u901a\u8fc7\u7528\u6237\u7ec4\u6279\u91cf\u5730\u7ed9\u7528\u6237\u8fdb\u884c\u6388\u6743\uff0c\u53ef\u4ee5\u66f4\u597d\u5730\u7ba1\u7406\u7528\u6237\u53ca\u5176\u6743\u9650\u3002

          "},{"location":"admin/ghippo/access-control/group.html#_2","title":"\u9002\u7528\u573a\u666f","text":"

          \u5f53\u7528\u6237\u6743\u9650\u53d1\u751f\u53d8\u5316\u65f6\uff0c\u53ea\u9700\u5c06\u5176\u79fb\u5230\u76f8\u5e94\u7684\u7528\u6237\u7ec4\u4e0b\uff0c\u4e0d\u4f1a\u5bf9\u5176\u4ed6\u7528\u6237\u4ea7\u751f\u5f71\u54cd\u3002

          \u5f53\u7528\u6237\u7ec4\u7684\u6743\u9650\u53d1\u751f\u53d8\u5316\u65f6\uff0c\u53ea\u9700\u4fee\u6539\u7528\u6237\u7ec4\u7684\u89d2\u8272\u6743\u9650\uff0c\u5373\u53ef\u5e94\u7528\u5230\u7ec4\u5185\u7684\u6240\u6709\u7528\u6237\u3002

          "},{"location":"admin/ghippo/access-control/group.html#_3","title":"\u521b\u5efa\u7528\u6237\u7ec4","text":"

          \u524d\u63d0\uff1a\u62e5\u6709\u5e73\u53f0\u7ba1\u7406\u5458 Admin \u6743\u9650\u6216\u8005\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u7ba1\u7406\u5458 IAM Owner \u6743\u9650\u3002

          1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u7528\u6237\u7ec4 \uff0c\u8fdb\u5165\u7528\u6237\u7ec4\u5217\u8868\uff0c\u70b9\u51fb\u53f3\u4e0a\u65b9\u7684 \u521b\u5efa\u7528\u6237\u7ec4 \u3002

          2. \u5728 \u521b\u5efa\u7528\u6237\u7ec4 \u9875\u9762\u586b\u5199\u7528\u6237\u7ec4\u4fe1\u606f\u3002

          3. \u70b9\u51fb \u786e\u5b9a \uff0c\u521b\u5efa\u7528\u6237\u7ec4\u6210\u529f\uff0c\u8fd4\u56de\u7528\u6237\u7ec4\u5217\u8868\u9875\u9762\u3002\u5217\u8868\u4e2d\u7684\u7b2c\u4e00\u884c\u662f\u65b0\u521b\u5efa\u7684\u7528\u6237\u7ec4\u3002

          "},{"location":"admin/ghippo/access-control/group.html#_4","title":"\u4e3a\u7528\u6237\u7ec4\u6388\u6743","text":"

          \u524d\u63d0\uff1a\u8be5\u7528\u6237\u7ec4\u5df2\u5b58\u5728\u3002

          1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u7528\u6237\u7ec4 \uff0c\u8fdb\u5165\u7528\u6237\u7ec4\u5217\u8868\uff0c\u70b9\u51fb ... -> \u6388\u6743 \u3002

          2. \u5728 \u6388\u6743 \u9875\u9762\u52fe\u9009\u9700\u8981\u7684\u89d2\u8272\u6743\u9650\uff08\u53ef\u591a\u9009\uff09\u3002

          3. \u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u4e3a\u7528\u6237\u7ec4\u7684\u6388\u6743\u3002\u81ea\u52a8\u8fd4\u56de\u7528\u6237\u7ec4\u5217\u8868\uff0c\u70b9\u51fb\u67d0\u4e2a\u7528\u6237\u7ec4\uff0c\u53ef\u4ee5\u67e5\u770b\u7528\u6237\u7ec4\u88ab\u6388\u4e88\u7684\u6743\u9650\u3002

          "},{"location":"admin/ghippo/access-control/group.html#_5","title":"\u7ed9\u7528\u6237\u7ec4\u6dfb\u52a0\u7528\u6237","text":"
          1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u7528\u6237\u7ec4 \u8fdb\u5165\u7528\u6237\u7ec4\u5217\u8868\uff0c\u5728\u67d0\u4e2a\u7528\u6237\u7ec4\u53f3\u4fa7\uff0c\u70b9\u51fb ... -> \u6dfb\u52a0\u7528\u6237 \u3002

          2. \u5728 \u6dfb\u52a0\u7528\u6237 \u9875\u9762\u70b9\u9009\u9700\u8981\u6dfb\u52a0\u7684\u7528\u6237\uff08\u53ef\u591a\u9009\uff09\u3002\u82e5\u6ca1\u6709\u53ef\u9009\u7684\u7528\u6237\uff0c\u70b9\u51fb \u524d\u5f80\u521b\u5efa\u65b0\u7528\u6237 \uff0c\u5148\u524d\u5f80\u521b\u5efa\u7528\u6237\uff0c\u518d\u8fd4\u56de\u8be5\u9875\u9762\u70b9\u51fb \u5237\u65b0 \u6309\u94ae\uff0c\u663e\u793a\u521a\u521b\u5efa\u7684\u7528\u6237\u3002

          3. \u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u7ed9\u7528\u6237\u7ec4\u6dfb\u52a0\u7528\u6237\u3002

          Note

          \u7528\u6237\u7ec4\u4e2d\u7684\u7528\u6237\u4f1a\u7ee7\u627f\u7528\u6237\u7ec4\u7684\u6743\u9650\uff1b\u53ef\u4ee5\u5728\u7528\u6237\u7ec4\u8be6\u60c5\u4e2d\u67e5\u770b\u52a0\u5165\u8be5\u7ec4\u7684\u7528\u6237\u3002

          "},{"location":"admin/ghippo/access-control/group.html#_6","title":"\u5220\u9664\u7528\u6237\u7ec4","text":"

          \u8bf4\u660e\uff1a\u5220\u9664\u7528\u6237\u7ec4\uff0c\u4e0d\u4f1a\u5220\u9664\u7ec4\u5185\u7684\u7528\u6237\uff0c\u4f46\u7ec4\u5185\u7528\u6237\u5c06\u65e0\u6cd5\u518d\u7ee7\u627f\u8be5\u7ec4\u7684\u6743\u9650

          1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u7528\u6237\u7ec4 \u8fdb\u5165\u7528\u6237\u7ec4\u5217\u8868\uff0c\u5728\u67d0\u4e2a\u7528\u6237\u7ec4\u53f3\u4fa7\uff0c\u70b9\u51fb ... -> \u5220\u9664 \u3002

          2. \u70b9\u51fb \u79fb\u9664 \u5220\u9664\u7528\u6237\u7ec4\u3002

          3. \u8fd4\u56de\u7528\u6237\u7ec4\u5217\u8868\uff0c\u5c4f\u5e55\u4e0a\u65b9\u5c06\u63d0\u793a\u5220\u9664\u6210\u529f\u3002

          Note

          \u8bf4\u660e\uff1a\u5220\u9664\u7528\u6237\u7ec4\uff0c\u4e0d\u4f1a\u5220\u9664\u7ec4\u5185\u7684\u7528\u6237\uff0c\u4f46\u7ec4\u5185\u7528\u6237\u5c06\u65e0\u6cd5\u518d\u7ee7\u627f\u8be5\u7ec4\u7684\u6743\u9650\u3002

          "},{"location":"admin/ghippo/access-control/iam.html","title":"\u4ec0\u4e48\u662f\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236","text":"

          IAM\uff08Identity and Access Management\uff0c\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\uff09\u662f\u5168\u5c40\u7ba1\u7406\u7684\u4e00\u4e2a\u91cd\u8981\u6a21\u5757\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u6a21\u5757\u521b\u5efa\u3001\u7ba1\u7406\u548c\u9500\u6bc1\u7528\u6237\uff08\u7528\u6237\u7ec4\uff09\uff0c\u5e76\u4f7f\u7528\u7cfb\u7edf\u89d2\u8272\u548c\u81ea\u5b9a\u4e49\u89d2\u8272\u63a7\u5236\u5176\u4ed6\u7528\u6237\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u6743\u9650\u3002

          "},{"location":"admin/ghippo/access-control/iam.html#_2","title":"\u4f18\u52bf","text":"
          • \u7b80\u6d01\u6d41\u7545

            \u4f01\u4e1a\u5185\u90e8\u7684\u7ed3\u6784\u548c\u89d2\u8272\u53ef\u80fd\u975e\u5e38\u590d\u6742\uff0c\u9879\u76ee\u3001\u5de5\u4f5c\u5c0f\u7ec4\u53ca\u6388\u6743\u7684\u7ba1\u7406\u90fd\u5728\u4e0d\u65ad\u5730\u53d8\u5316\u3002\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u91c7\u7528\u6e05\u6670\u6574\u6d01\u7684\u9875\u9762\uff0c\u6253\u901a\u7528\u6237\u3001\u7528\u6237\u7ec4\u3001\u89d2\u8272\u4e4b\u95f4\u7684\u6388\u6743\u5173\u7cfb\uff0c\u4ee5\u6700\u77ed\u94fe\u8def\u5b9e\u73b0\u5bf9\u7528\u6237\uff08\u7528\u6237\u7ec4\uff09\u7684\u6388\u6743\u3002

          • \u9002\u5f53\u7684\u89d2\u8272

            \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u4e3a\u6bcf\u4e2a\u5b50\u6a21\u5757\u9884\u5b9a\u4e49\u4e86\u4e00\u4e2a\u7ba1\u7406\u5458\u89d2\u8272\uff0c\u65e0\u9700\u7528\u6237\u7ef4\u62a4\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u5c06\u5e73\u53f0\u9884\u5b9a\u4e49\u7684\u7cfb\u7edf\u89d2\u8272\u6388\u6743\u7ed9\u7528\u6237\uff0c\u5b9e\u73b0\u5e73\u53f0\u7684\u6a21\u5757\u5316\u7ba1\u7406\uff08\u7ec6\u7c92\u5ea6\u6743\u9650\u8bf7\u53c2\u9605\u6743\u9650\u7ba1\u7406\u3002

          • \u4f01\u4e1a\u7ea7\u8bbf\u95ee\u63a7\u5236

            \u5f53\u60a8\u5e0c\u671b\u672c\u4f01\u4e1a\u5458\u5de5\u53ef\u4ee5\u4f7f\u7528\u4f01\u4e1a\u5185\u90e8\u7684\u8ba4\u8bc1\u7cfb\u7edf\u767b\u5f55\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\uff0c\u800c\u4e0d\u9700\u8981\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u521b\u5efa\u5bf9\u5e94\u7684\u7528\u6237\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u7684\u8eab\u4efd\u63d0\u4f9b\u5546\u529f\u80fd\uff0c\u5efa\u7acb\u60a8\u6240\u5728\u4f01\u4e1a\u4e0e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u4fe1\u4efb\u5173\u7cfb\uff0c\u901a\u8fc7\u8054\u5408\u8ba4\u8bc1\u4f7f\u5458\u5de5\u4f7f\u7528\u4f01\u4e1a\u5df2\u6709\u8d26\u53f7\u76f4\u63a5\u767b\u5f55\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5b9e\u73b0\u5355\u70b9\u767b\u5f55\u3002

          "},{"location":"admin/ghippo/access-control/iam.html#_3","title":"\u4f7f\u7528\u6d41\u7a0b","text":"

          \u6709\u5173\u8bbf\u95ee\u63a7\u5236\u7684\u5e38\u89c4\u6d41\u7a0b\u4e3a\uff1a

          graph TD\n    login[\u767b\u5f55] --> user[\u521b\u5efa\u7528\u6237]\n    user --> auth[\u4e3a\u7528\u6237\u6388\u6743]\n    auth --> group[\u521b\u5efa\u7528\u6237\u7ec4]\n    group --> role[\u521b\u5efa\u81ea\u5b9a\u4e49\u89d2\u8272]\n    role --> id[\u521b\u5efa\u8eab\u4efd\u63d0\u4f9b\u5546]\n\n classDef plain fill:#ddd,stroke:#fff,stroke-width:4px,color:#000;\n classDef k8s fill:#326ce5,stroke:#fff,stroke-width:4px,color:#fff;\n classDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n class login,user,auth,group,role,id cluster;\n\nclick login \"https://docs.daocloud.io/ghippo/install/login.html\"\nclick user \"https://docs.daocloud.io/ghippo/access-control/user.html\"\nclick auth \"https://docs.daocloud.io/ghippo/access-control/role.html\"\nclick group \"https://docs.daocloud.io/ghippo/access-control/group.html\"\nclick role \"https://docs.daocloud.io/ghippo/access-control/custom-role.html\"\nclick id \"https://docs.daocloud.io/ghippo/access-control/idprovider.html\"
          "},{"location":"admin/ghippo/access-control/idprovider.html","title":"\u8eab\u4efd\u63d0\u4f9b\u5546","text":"

          \u5168\u5c40\u7ba1\u7406\u652f\u6301\u57fa\u4e8e LDAP \u548c OIDC \u534f\u8bae\u7684\u5355\u70b9\u767b\u5f55\uff0c\u5982\u679c\u60a8\u7684\u4f01\u4e1a\u6216\u7ec4\u7ec7\u5df2\u6709\u81ea\u5df1\u7684\u8d26\u53f7\u4f53\u7cfb\uff0c\u540c\u65f6\u5e0c\u671b\u7ba1\u7406\u7ec4\u7ec7\u5185\u7684\u6210\u5458\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8d44\u6e90\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528\u5168\u5c40\u7ba1\u7406\u63d0\u4f9b\u7684\u8eab\u4efd\u63d0\u4f9b\u5546\u529f\u80fd\uff0c\u800c\u4e0d\u5fc5\u5728\u60a8\u7684\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4e3a\u6bcf\u4e00\u4f4d\u7ec4\u7ec7\u6210\u5458\u521b\u5efa\u7528\u6237\u540d/\u5bc6\u7801\u3002\u60a8\u53ef\u4ee5\u5411\u8fd9\u4e9b\u5916\u90e8\u7528\u6237\u8eab\u4efd\u6388\u4e88\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8d44\u6e90\u7684\u6743\u9650\u3002

          "},{"location":"admin/ghippo/access-control/idprovider.html#_2","title":"\u57fa\u672c\u6982\u5ff5","text":"
          • \u8eab\u4efd\u63d0\u4f9b\u5546\uff08Identity Provider\uff0c\u7b80\u79f0 IdP\uff09

            \u8d1f\u8d23\u6536\u96c6\u548c\u5b58\u50a8\u7528\u6237\u8eab\u4efd\u4fe1\u606f\u3001\u7528\u6237\u540d\u3001\u5bc6\u7801\u7b49\uff0c\u5728\u7528\u6237\u767b\u5f55\u65f6\u8d1f\u8d23\u8ba4\u8bc1\u7528\u6237\u7684\u670d\u52a1\u3002\u5728\u4f01\u4e1a\u4e0e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\u7684\u8fc7\u7a0b\u4e2d\uff0c\u8eab\u4efd\u63d0\u4f9b\u5546\u6307\u4f01\u4e1a\u81ea\u8eab\u7684\u8eab\u4efd\u63d0\u4f9b\u5546\u3002

          • \u670d\u52a1\u63d0\u4f9b\u5546\uff08Service Provider\uff0c\u7b80\u79f0 SP\uff09

            \u670d\u52a1\u63d0\u4f9b\u5546\u901a\u8fc7\u4e0e\u8eab\u4efd\u63d0\u4f9b\u5546 IdP \u5efa\u7acb\u4fe1\u4efb\u5173\u7cfb\uff0c\u4f7f\u7528 IDP \u63d0\u4f9b\u7684\u7528\u6237\u4fe1\u606f\uff0c\u4e3a\u7528\u6237\u63d0\u4f9b\u5177\u4f53\u7684\u670d\u52a1\u3002\u5728\u4f01\u4e1a\u4e0e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\u7684\u8fc7\u7a0b\u4e2d\uff0c\u670d\u52a1\u63d0\u4f9b\u5546\u6307 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u3002

          • LDAP

            LDAP \u6307\u8f7b\u578b\u76ee\u5f55\u8bbf\u95ee\u534f\u8bae\uff08Lightweight Directory Access Protocol\uff09\uff0c\u5e38\u7528\u4e8e\u5355\u70b9\u767b\u5f55\uff0c\u5373\u7528\u6237\u53ef\u4ee5\u5728\u591a\u4e2a\u670d\u52a1\u4e2d\u4f7f\u7528\u4e00\u4e2a\u8d26\u53f7\u5bc6\u7801\u8fdb\u884c\u767b\u5f55\u3002\u5168\u5c40\u7ba1\u7406\u652f\u6301 LDAP \u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\uff0c\u56e0\u6b64\u4e0e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u901a\u8fc7 LDAP \u534f\u8bae\u5efa\u7acb\u8eab\u4efd\u8ba4\u8bc1\u7684\u4f01\u4e1a IdP \u5fc5\u987b\u652f\u6301 LDAP \u534f\u8bae\u3002\u5173\u4e8e LDAP \u7684\u8be6\u7ec6\u63cf\u8ff0\u8bf7\u53c2\u89c1\uff1a\u6b22\u8fce\u4f7f\u7528 LDAP\u3002

          • OIDC

            OIDC \u662f OpenID Connect \u7684\u7b80\u79f0\uff0c\u662f\u4e00\u4e2a\u57fa\u4e8e OAuth 2.0 \u534f\u8bae\u7684\u8eab\u4efd\u8ba4\u8bc1\u6807\u51c6\u534f\u8bae\u3002\u5168\u5c40\u7ba1\u7406\u652f\u6301\u4f7f\u7528 OIDC \u534f\u8bae\u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\uff0c\u56e0\u6b64\u4e0e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u901a\u8fc7 OIDC \u534f\u8bae\u5efa\u7acb\u8eab\u4efd\u8ba4\u8bc1\u7684\u4f01\u4e1a IdP \u5fc5\u987b\u652f\u6301 OIDC \u534f\u8bae\u3002\u5173\u4e8e OIDC \u7684\u8be6\u7ec6\u63cf\u8ff0\u8bf7\u53c2\u89c1\uff1a\u6b22\u8fce\u4f7f\u7528 OpenID Connect\u3002

          • OAuth 2.0

            OAuth 2.0 \u662f Open Authorization 2.0 \u7684\u7b80\u79f0\uff0c\u662f\u4e00\u79cd\u5f00\u653e\u6388\u6743\u534f\u8bae\uff0c\u6388\u6743\u6846\u67b6\u652f\u6301\u7b2c\u4e09\u65b9\u5e94\u7528\u7a0b\u5e8f\u4ee5\u81ea\u5df1\u7684\u540d\u4e49\u83b7\u53d6\u8bbf\u95ee\u6743\u9650\u3002

          "},{"location":"admin/ghippo/access-control/idprovider.html#_3","title":"\u529f\u80fd\u7279\u6027","text":"
          • \u7ba1\u7406\u5458\u65e0\u9700\u91cd\u65b0\u521b\u5efa\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7528\u6237

            \u4f7f\u7528\u8eab\u4efd\u63d0\u4f9b\u5546\u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\u524d\uff0c\u7ba1\u7406\u5458\u9700\u8981\u5728\u4f01\u4e1a\u7ba1\u7406\u7cfb\u7edf\u548c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u5206\u522b\u4e3a\u7528\u6237\u521b\u5efa\u8d26\u53f7\uff1b\u4f7f\u7528\u8eab\u4efd\u63d0\u4f9b\u5546\u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\u540e\uff0c\u4f01\u4e1a\u7ba1\u7406\u5458\u53ea\u9700\u8981\u5728\u4f01\u4e1a\u7ba1\u7406\u7cfb\u7edf\u4e2d\u4e3a\u7528\u6237\u521b\u5efa\u8d26\u53f7\uff0c\u7528\u6237\u5373\u53ef\u540c\u65f6\u8bbf\u95ee\u4e24\u4e2a\u7cfb\u7edf\uff0c\u964d\u4f4e\u4e86\u4eba\u5458\u7ba1\u7406\u6210\u672c\u3002

          • \u7528\u6237\u65e0\u9700\u8bb0\u4f4f\u4e24\u5957\u5e73\u53f0\u8d26\u53f7

            \u4f7f\u7528\u8eab\u4efd\u63d0\u4f9b\u5546\u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\u524d\uff0c\u7528\u6237\u8bbf\u95ee\u4f01\u4e1a\u7ba1\u7406\u7cfb\u7edf\u548c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9700\u8981\u4f7f\u7528\u4e24\u4e2a\u7cfb\u7edf\u7684\u8d26\u53f7\u767b\u5f55\uff1b\u4f7f\u7528\u8eab\u4efd\u63d0\u4f9b\u5546\u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\u540e\uff0c\u7528\u6237\u5728\u672c\u4f01\u4e1a\u7ba1\u7406\u7cfb\u7edf\u4e2d\u767b\u5f55\u5373\u53ef\u8bbf\u95ee\u4e24\u4e2a\u7cfb\u7edf\u3002

          "},{"location":"admin/ghippo/access-control/ldap.html","title":"LDAP","text":"

          LDAP \u82f1\u6587\u5168\u79f0\u4e3a Lightweight Directory Access Protocol\uff0c\u5373\u8f7b\u578b\u76ee\u5f55\u8bbf\u95ee\u534f\u8bae\uff0c\u8fd9\u662f\u4e00\u4e2a\u5f00\u653e\u7684\u3001\u4e2d\u7acb\u7684\u5de5\u4e1a\u6807\u51c6\u5e94\u7528\u534f\u8bae\uff0c \u901a\u8fc7 IP \u534f\u8bae\u63d0\u4f9b\u8bbf\u95ee\u63a7\u5236\u548c\u7ef4\u62a4\u5206\u5e03\u5f0f\u4fe1\u606f\u7684\u76ee\u5f55\u4fe1\u606f\u3002

          \u5982\u679c\u60a8\u7684\u4f01\u4e1a\u6216\u7ec4\u7ec7\u5df2\u6709\u81ea\u5df1\u7684\u8d26\u53f7\u4f53\u7cfb\uff0c\u540c\u65f6\u60a8\u7684\u4f01\u4e1a\u7528\u6237\u7ba1\u7406\u7cfb\u7edf\u652f\u6301 LDAP \u534f\u8bae\uff0c\u5c31\u53ef\u4ee5\u4f7f\u7528\u5168\u5c40\u7ba1\u7406\u63d0\u4f9b\u7684\u57fa\u4e8e LDAP \u534f\u8bae\u7684\u8eab\u4efd\u63d0\u4f9b\u5546\u529f\u80fd\uff0c\u800c\u4e0d\u5fc5\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4e3a\u6bcf\u4e00\u4f4d\u7ec4\u7ec7\u6210\u5458\u521b\u5efa\u7528\u6237\u540d/\u5bc6\u7801\u3002 \u60a8\u53ef\u4ee5\u5411\u8fd9\u4e9b\u5916\u90e8\u7528\u6237\u8eab\u4efd\u6388\u4e88\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8d44\u6e90\u7684\u6743\u9650\u3002

          \u5728\u5168\u5c40\u7ba1\u7406\u4e2d\uff0c\u5176\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\uff1a

          1. \u4f7f\u7528\u5177\u6709 admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5de6\u4e0b\u89d2\u7684 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \u3002

          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8eab\u4efd\u63d0\u4f9b\u5546 \uff0c\u70b9\u51fb \u521b\u5efa\u8eab\u4efd\u63d0\u4f9b\u5546 \u6309\u94ae\u3002

          3. \u5728 LDAP \u9875\u7b7e\u4e2d\uff0c\u586b\u5199\u4ee5\u4e0b\u5b57\u6bb5\u540e\u70b9\u51fb \u4fdd\u5b58 \uff0c\u5efa\u7acb\u4e0e\u8eab\u4efd\u63d0\u4f9b\u5546\u7684\u4fe1\u4efb\u5173\u7cfb\u53ca\u7528\u6237\u7684\u6620\u5c04\u5173\u7cfb\u3002

            \u5b57\u6bb5 \u63cf\u8ff0 \u7c7b\u578b\uff08Vendor\uff09 \u652f\u6301 LDAP (Lightweight Directory Access Protocol) \u548c AD (Active Directory) \u8eab\u4efd\u63d0\u4f9b\u5546\u540d\u79f0\uff08UI display name\uff09 \u7528\u4e8e\u533a\u5206\u4e0d\u540c\u7684\u8eab\u4efd\u63d0\u4f9b\u5546 \u670d\u52a1\u5668\uff08Connection URL\uff09 LDAP \u670d\u52a1\u7684\u5730\u5740\u548c\u7aef\u53e3\u53f7\uff0c\u5982 ldap://10.6.165.2:30061 \u7528\u6237\u540d\u79f0\uff08Bind DN\uff09 LDAP \u7ba1\u7406\u5458\u7684 DN\uff0cKeycloak \u5c06\u4f7f\u7528\u8be5 DN \u6765\u8bbf\u95ee LDAP \u670d\u52a1\u5668 \u5bc6\u7801\uff08Bind credentials\uff09 LDAP \u7ba1\u7406\u5458\u7684\u5bc6\u7801\u3002\u8be5\u5b57\u6bb5\u53ef\u4ee5\u4ece vault \u4e2d\u83b7\u53d6\u5176\u503c\uff0c\u4f7f\u7528 ${vault.ID} \u683c\u5f0f\u3002 \u7528\u6237 DN\uff08Users DN\uff09 \u60a8\u7684\u7528\u6237\u6240\u5728\u7684 LDAP \u6811\u7684\u5b8c\u6574 DN\u3002\u6b64 DN \u662f LDAP \u7528\u6237\u7684\u7236\u7ea7\u3002\u4f8b\u5982\uff0c\u5047\u8bbe\u60a8\u7684\u5178\u578b\u7528\u6237\u7684 DN \u7c7b\u4f3c\u4e8e\u201cuid='john',ou=users,dc=example,dc=com\u201d\uff0c\u5219\u53ef\u4ee5\u662f\u201cou=users,dc=example,dc=com\u201d\u3002 \u7528\u6237\u5bf9\u8c61\u7c7b\uff08User object classes\uff09 LDAP \u4e2d\u7528\u6237\u7684 LDAP objectClass \u5c5e\u6027\u7684\u6240\u6709\u503c\uff0c\u4ee5\u9017\u53f7\u5206\u9694\u3002\u4f8b\u5982\uff1a\u201cinetOrgPerson\uff0corganizationalPerson\u201d\u3002\u65b0\u521b\u5efa\u7684 Keycloak \u7528\u6237\u5c06\u4e0e\u6240\u6709\u8fd9\u4e9b\u5bf9\u8c61\u7c7b\u4e00\u8d77\u5199\u5165 L\u200b\u200bDAP\uff0c\u5e76\u4e14\u53ea\u8981\u73b0\u6709 LDAP \u7528\u6237\u8bb0\u5f55\u5305\u542b\u6240\u6709\u8fd9\u4e9b\u5bf9\u8c61\u7c7b\uff0c\u5c31\u4f1a\u627e\u5230\u5b83\u4eec\u3002 \u662f\u5426\u542f\u7528TLS\uff08Enable StartTLS\uff09 \u542f\u7528\u540e\u5c06\u52a0\u5bc6\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e0e LDAP \u7684\u8fde\u63a5 \u9884\u8bbe\u6743\u9650\uff08Default permission\uff09 \u540c\u6b65\u540e\u7684\u7528\u6237/\u7528\u6237\u7ec4\u9ed8\u8ba4\u6ca1\u6709\u4efb\u4f55\u6743\u9650 \u5168\u540d\u6620\u5c04\uff08First/Last name mapping\uff09 \u5bf9\u5e94 First name \u548c Last Name \u7528\u6237\u540d\u6620\u5c04\uff08User name mapping\uff09 \u7528\u6237\u552f\u4e00\u7684\u7528\u6237\u540d \u90ae\u7bb1\u6620\u5c04\uff08Mailbox mapping\uff09 \u7528\u6237\u7684\u90ae\u7bb1

            \u9ad8\u7ea7\u914d\u7f6e

            \u5b57\u6bb5 \u63cf\u8ff0 \u662f\u5426\u542f\u7528\uff08Enable or not\uff09 \u9ed8\u8ba4\u542f\u7528\uff0c\u5173\u95ed\u540e\u8be5 LDAP \u914d\u7f6e\u4e0d\u751f\u6548 \u81ea\u52a8\u540c\u6b65\u7528\u6237\uff08Periodic full sync\uff09 \u9ed8\u8ba4\u4e0d\u542f\u7528\uff0c\u542f\u7528\u540e\u53ef\u914d\u7f6e\u540c\u6b65\u5468\u671f\uff0c\u5982\u6bcf\u5c0f\u65f6\u540c\u6b65\u4e00\u6b21 \u6570\u636e\u540c\u6b65\u6a21\u5f0f\uff08Edit mode\uff09 \u53ea\u8bfb\u6a21\u5f0f\u4e0d\u4f1a\u4fee\u6539 LDAP \u7684\u6e90\u6570\u636e\uff1b\u5199\u5165\u6a21\u5f0f\u5728\u5e73\u53f0\u7f16\u8f91\u7528\u6237\u4fe1\u606f\u540e\uff0c\u6570\u636e\u5c06\u540c\u6b65\u56deLDAP \u8bfb\u53d6\u8d85\u65f6\uff08Read timeout\uff09 \u5f53LDAP\u6570\u636e\u91cf\u8f83\u5927\u65f6\uff0c\u8c03\u6574\u8be5\u6570\u503c\u53ef\u4ee5\u6709\u6548\u907f\u514d\u63a5\u53e3\u8d85\u65f6 \u7528\u6237\u5bf9\u8c61\u8fc7\u6ee4\u5668\uff08User LDAP filter\uff09 \u7528\u4e8e\u8fc7\u6ee4\u641c\u7d22\u7528\u6237\u7684\u9644\u52a0 LDAP \u8fc7\u6ee4\u5668\u3002\u5982\u679c\u60a8\u4e0d\u9700\u8981\u989d\u5916\u7684\u8fc7\u6ee4\u5668\uff0c\u8bf7\u5c06\u5176\u7559\u7a7a\u3002\u786e\u4fdd\u5b83\u4ee5\u201c(\u201d\u5f00\u5934\uff0c\u5e76\u4ee5\u201c)\u201d\u7ed3\u5c3e\u3002 \u7528\u6237\u540d\u5c5e\u6027\uff08Username LDAP attribute\uff09 LDAP \u5c5e\u6027\u7684\u540d\u79f0\uff0c\u6620\u5c04\u4e3a Keycloak \u7528\u6237\u540d\u3002\u5bf9\u4e8e\u8bb8\u591a LDAP \u670d\u52a1\u5668\u4f9b\u5e94\u5546\u6765\u8bf4\uff0c\u5b83\u53ef\u4ee5\u662f\u201cuid\u201d\u3002\u5bf9\u4e8e Active Directory\uff0c\u5b83\u53ef\u4ee5\u662f\u201csAMAccountName\u201d\u6216\u201ccn\u201d\u3002\u5e94\u4e3a\u60a8\u60f3\u8981\u4ece LDAP \u5bfc\u5165\u5230 Keycloak \u7684\u6240\u6709 LDAP \u7528\u6237\u8bb0\u5f55\u586b\u5199\u8be5\u5c5e\u6027\u3002 RDN\u5c5e\u6027\uff08RDN LDAP attribute\uff09 LDAP \u5c5e\u6027\u540d\u79f0\uff0c\u4f5c\u4e3a\u5178\u578b\u7528\u6237DN\u7684RDN\uff08\u9876\u7ea7\u5c5e\u6027\uff09\u3002\u901a\u5e38\u5b83\u4e0e\u7528\u6237\u540d LDAP \u5c5e\u6027\u76f8\u540c\uff0c\u4f46\u8fd9\u4e0d\u662f\u5fc5\u9700\u7684\u3002\u4f8b\u5982\uff0c\u5bf9\u4e8e Active Directory\uff0c\u5f53\u7528\u6237\u540d\u5c5e\u6027\u53ef\u80fd\u662f\u201csAMAccountName\u201d\u65f6\uff0c\u901a\u5e38\u4f7f\u7528\u201ccn\u201d\u4f5c\u4e3a RDN \u5c5e\u6027\u3002 UUID\u5c5e\u6027\uff08UUID LDAP attribute\uff09 LDAP \u5c5e\u6027\u7684\u540d\u79f0\uff0c\u7528\u4f5c LDAP \u4e2d\u5bf9\u8c61\u7684\u552f\u4e00\u5bf9\u8c61\u6807\u8bc6\u7b26 (UUID)\u3002\u5bf9\u4e8e\u8bb8\u591a LDAP \u670d\u52a1\u5668\u4f9b\u5e94\u5546\u6765\u8bf4\uff0c\u5b83\u662f\u201centryUUID\u201d\uff1b\u7136\u800c\u6709\u4e9b\u662f\u4e0d\u540c\u7684\u3002\u4f8b\u5982\uff0c\u5bf9\u4e8e Active Directory\uff0c\u5b83\u5e94\u8be5\u662f\u201cobjectGUID\u201d\u3002\u5982\u679c\u60a8\u7684 LDAP \u670d\u52a1\u5668\u4e0d\u652f\u6301 UUID \u6982\u5ff5\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528\u5728\u6811\u4e2d\u7684 LDAP \u7528\u6237\u4e4b\u95f4\u5e94\u8be5\u552f\u4e00\u7684\u4efb\u4f55\u5176\u4ed6\u5c5e\u6027\u3002\u4f8b\u5982\u201cuid\u201d\u6216\u201centryDN\u201d\u3002
          4. \u5728 \u540c\u6b65\u7528\u6237\u7ec4 \u9875\u7b7e\u4e2d\uff0c\u586b\u5199\u4ee5\u4e0b\u5b57\u6bb5\u914d\u7f6e\u7528\u6237\u7ec4\u7684\u6620\u5c04\u5173\u7cfb\u540e\uff0c\u518d\u6b21\u70b9\u51fb \u4fdd\u5b58 \u3002

            \u5b57\u6bb5 \u63cf\u8ff0 \u4e3e\u4f8b\u503c \u57fa\u51c6 DN \u7528\u6237\u7ec4\u5728 LDAP \u6811\u72b6\u7ed3\u6784\u4e2d\u7684\u4f4d\u7f6e ou=groups,dc=example,dc=org \u7528\u6237\u7ec4\u5bf9\u8c61\u8fc7\u6ee4\u5668 \u7528\u6237\u7ec4\u7684\u5bf9\u8c61\u7c7b\uff0c\u5982\u679c\u9700\u8981\u66f4\u591a\u7c7b\uff0c\u5219\u7528\u9017\u53f7\u5206\u9694\u3002\u5728\u5178\u578b\u7684 LDAP \u90e8\u7f72\u4e2d\uff0c\u901a\u5e38\u662f \u201cgroupOfNames\u201d\uff0c\u7cfb\u7edf\u5df2\u81ea\u52a8\u586b\u5165\uff0c\u5982\u9700\u66f4\u6539\u8bf7\u76f4\u63a5\u7f16\u8f91\u3002* \u8868\u793a\u6240\u6709\u3002 * \u7528\u6237\u7ec4\u540d cn \u4e0d\u53ef\u66f4\u6539

          Note

          1. \u5f53\u60a8\u901a\u8fc7 LDAP \u534f\u8bae\u5c06\u4f01\u4e1a\u7528\u6237\u7ba1\u7406\u7cfb\u7edf\u4e0e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5efa\u7acb\u4fe1\u4efb\u5173\u7cfb\u540e\uff0c\u53ef\u901a\u8fc7\u624b\u52a8\u540c\u6b65\u6216\u81ea\u52a8\u540c\u6b65\u7684\u65b9\u5f0f\uff0c\u5c06\u4f01\u4e1a\u7528\u6237\u7ba1\u7406\u7cfb\u7edf\u4e2d\u7684\u7528\u6237\u6216\u7528\u6237\u7ec4\u4e00\u6b21\u6027\u540c\u6b65\u81f3 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u3002
          2. \u540c\u6b65\u540e\u7ba1\u7406\u5458\u53ef\u5bf9\u7528\u6237\u7ec4/\u7528\u6237\u7ec4\u8fdb\u884c\u6279\u91cf\u6388\u6743\uff0c\u540c\u65f6\u7528\u6237\u53ef\u901a\u8fc7\u5728\u4f01\u4e1a\u7528\u6237\u7ba1\u7406\u7cfb\u7edf\u4e2d\u7684\u8d26\u53f7/\u5bc6\u7801\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002
          "},{"location":"admin/ghippo/access-control/oauth2.0.html","title":"OAuth 2.0 - \u4f01\u4e1a\u5fae\u4fe1","text":"

          \u5982\u679c\u60a8\u7684\u4f01\u4e1a\u6216\u7ec4\u7ec7\u4e2d\u7684\u6210\u5458\u5747\u7ba1\u7406\u5728\u4f01\u4e1a\u5fae\u4fe1\u4e2d\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528\u5168\u5c40\u7ba1\u7406\u63d0\u4f9b\u7684\u57fa\u4e8e OAuth 2.0 \u534f\u8bae\u7684\u8eab\u4efd\u63d0\u4f9b\u5546\u529f\u80fd\uff0c \u800c\u4e0d\u5fc5\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4e3a\u6bcf\u4e00\u4f4d\u7ec4\u7ec7\u6210\u5458\u521b\u5efa\u7528\u6237\u540d/\u5bc6\u7801\u3002 \u60a8\u53ef\u4ee5\u5411\u8fd9\u4e9b\u5916\u90e8\u7528\u6237\u8eab\u4efd\u6388\u4e88\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8d44\u6e90\u7684\u6743\u9650\u3002

          "},{"location":"admin/ghippo/access-control/oauth2.0.html#_1","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u4f7f\u7528\u5177\u6709 Admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \u3002

          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 \u8eab\u4efd\u63d0\u4f9b\u5546 \uff0c\u70b9\u51fb OAuth2.0 \u9875\u7b7e\u3002\u586b\u5199\u8868\u5355\u5b57\u6bb5\uff0c\u5efa\u7acb\u4e0e\u4f01\u4e1a\u5fae\u4fe1\u7684\u4fe1\u4efb\u5173\u7cfb\u540e\uff0c\u70b9\u51fb \u4fdd\u5b58 \u3002

          "},{"location":"admin/ghippo/access-control/oauth2.0.html#_2","title":"\u4f01\u4e1a\u5fae\u4fe1\u4e2d\u5bf9\u5e94\u7684\u5b57\u6bb5","text":"

          Note

          \u5bf9\u63a5\u524d\u9700\u8981\u5728\u4f01\u4e1a\u5fae\u4fe1\u7ba1\u7406\u540e\u53f0\u4e2d\u521b\u5efa\u81ea\u5efa\u5e94\u7528\uff0c\u53c2\u9605\u5982\u4f55\u521b\u5efa\u81ea\u5efa\u5e94\u7528\u94fe\u63a5\u3002

          \u5b57\u6bb5 \u63cf\u8ff0 \u4f01\u4e1a ID \u4f01\u4e1a\u5fae\u4fe1\u7684 ID Agent ID \u81ea\u5efa\u5e94\u7528\u7684 ID ClientSecret \u81ea\u5efa\u5e94\u7528\u7684 Secret

          \u4f01\u4e1a\u5fae\u4fe1 ID\uff1a

          Agent ID \u548c ClientSecret\uff1a

          "},{"location":"admin/ghippo/access-control/oidc.html","title":"\u521b\u5efa\u548c\u7ba1\u7406 OIDC","text":"

          OIDC\uff08OpenID Connect\uff09\u662f\u5efa\u7acb\u5728 OAuth 2.0 \u57fa\u7840\u4e0a\u7684\u4e00\u4e2a\u8eab\u4efd\u5c42\uff0c\u662f\u57fa\u4e8e OAuth2 \u534f\u8bae\u7684\u8eab\u4efd\u8ba4\u8bc1\u6807\u51c6\u534f\u8bae\u3002

          \u5982\u679c\u60a8\u7684\u4f01\u4e1a\u6216\u7ec4\u7ec7\u5df2\u6709\u81ea\u5df1\u7684\u8d26\u53f7\u4f53\u7cfb\uff0c\u540c\u65f6\u60a8\u7684\u4f01\u4e1a\u7528\u6237\u7ba1\u7406\u7cfb\u7edf\u652f\u6301 OIDC \u534f\u8bae\uff0c \u53ef\u4ee5\u4f7f\u7528\u5168\u5c40\u7ba1\u7406\u63d0\u4f9b\u7684\u57fa\u4e8e OIDC \u534f\u8bae\u7684\u8eab\u4efd\u63d0\u4f9b\u5546\u529f\u80fd\uff0c\u800c\u4e0d\u5fc5\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4e3a\u6bcf\u4e00\u4f4d\u7ec4\u7ec7\u6210\u5458\u521b\u5efa\u7528\u6237\u540d/\u5bc6\u7801\u3002 \u60a8\u53ef\u4ee5\u5411\u8fd9\u4e9b\u5916\u90e8\u7528\u6237\u8eab\u4efd\u6388\u4e88\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8d44\u6e90\u7684\u6743\u9650\u3002

          \u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\u3002

          1. \u4f7f\u7528\u5177\u6709 admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \u3002

          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 \u8eab\u4efd\u63d0\u4f9b\u5546 \uff0c\u70b9\u51fb OIDC \u9875\u7b7e -> \u521b\u5efa\u8eab\u4efd\u63d0\u4f9b\u5546 \u6309\u94ae\u3002

          3. \u586b\u5199\u8868\u5355\u5b57\u6bb5\uff0c\u5efa\u7acb\u4e0e\u8eab\u4efd\u63d0\u4f9b\u5546\u7684\u4fe1\u4efb\u5173\u7cfb\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

            \u5b57\u6bb5 \u63cf\u8ff0 \u63d0\u4f9b\u5546\u540d\u79f0 \u663e\u793a\u5728\u767b\u5f55\u9875\u4e0a\uff0c\u662f\u8eab\u4efd\u63d0\u4f9b\u5546\u7684\u5165\u53e3 \u8ba4\u8bc1\u65b9\u5f0f \u5ba2\u6237\u7aef\u8eab\u4efd\u9a8c\u8bc1\u65b9\u6cd5\u3002\u5982\u679c JWT \u4f7f\u7528\u79c1\u94a5\u7b7e\u540d\uff0c\u8bf7\u4e0b\u62c9\u9009\u62e9 JWT signed with private key \u3002\u5177\u4f53\u53c2\u9605 Client Authentication\u3002 \u5ba2\u6237\u7aef ID \u5ba2\u6237\u7aef\u7684 ID \u5ba2\u6237\u7aef\u5bc6\u94a5 \u5ba2\u6237\u7aef\u5bc6\u7801 \u5ba2\u6237\u7aef URL \u53ef\u901a\u8fc7\u8eab\u4efd\u63d0\u4f9b\u5546 well-known \u63a5\u53e3\u4e00\u952e\u83b7\u53d6\u767b\u5f55 URL\u3001Token URL\u3001\u7528\u6237\u4fe1\u606f URL \u548c\u767b\u51fa URL \u81ea\u52a8\u5173\u8054 \u5f00\u542f\u540e\u5f53\u8eab\u4efd\u63d0\u4f9b\u5546\u7528\u6237\u540d/\u90ae\u7bb1\u4e0e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7528\u6237\u540d/\u90ae\u7bb1\u91cd\u590d\u65f6\u5c06\u81ea\u52a8\u4f7f\u4e8c\u8005\u5173\u8054

          Note

          1. \u5f53\u7528\u6237\u901a\u8fc7\u4f01\u4e1a\u7528\u6237\u7ba1\u7406\u7cfb\u7edf\u5b8c\u6210\u7b2c\u4e00\u6b21\u767b\u5f55\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u540e\uff0c\u7528\u6237\u4fe1\u606f\u624d\u4f1a\u88ab\u540c\u6b65\u81f3\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u7528\u6237\u5217\u8868 \u3002
          2. \u521d\u6b21\u767b\u5f55\u7684\u7528\u6237\u4e0d\u4f1a\u88ab\u8d4b\u4e88\u4efb\u4f55\u9ed8\u8ba4\u6743\u9650\uff0c\u9700\u8981\u6709\u7ba1\u7406\u5458\u7ed9\u5176\u8d4b\u6743\uff08\u7ba1\u7406\u5458\u53ef\u4ee5\u662f\u5e73\u53f0\u7ba1\u7406\u5458\u3001\u5b50\u6a21\u5757\u7ba1\u7406\u5458\u6216\u8d44\u6e90\u7ba1\u7406\u5458\uff09\u3002
          3. \u53c2\u8003 Azure OpenID Connect (OIDC) \u63a5\u5165\u6d41\u7a0b\u3002
          "},{"location":"admin/ghippo/access-control/oidc.html#_1","title":"\u7528\u6237\u8eab\u4efd\u8ba4\u8bc1\u4ea4\u4e92\u6d41\u7a0b","text":"

          \u7528\u6237\u8eab\u4efd\u8ba4\u8bc1\u7684\u4ea4\u4e92\u6d41\u7a0b\u4e3a\uff1a

          1. \u4f7f\u7528\u6d4f\u89c8\u5668\u53d1\u8d77\u5355\u70b9\u767b\u5f55\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u8bf7\u6c42\u3002 1.\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u6839\u636e\u767b\u5f55\u94fe\u63a5\u4e2d\u643a\u5e26\u7684\u4fe1\u606f\uff0c\u67e5\u627e \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u8eab\u4efd\u63d0\u4f9b\u5546 \u4e2d\u5bf9\u5e94\u7684\u914d\u7f6e\u4fe1\u606f\uff0c \u6784\u5efa OIDC \u6388\u6743 Request\uff0c\u53d1\u9001\u7ed9\u6d4f\u89c8\u5668\u3002
          2. \u6d4f\u89c8\u5668\u6536\u5230\u8bf7\u6c42\u540e\uff0c\u8f6c\u53d1 OIDC \u6388\u6743 Request \u7ed9\u4f01\u4e1a IdP\u3002
          3. \u5728\u4f01\u4e1a IdP \u7684\u767b\u5f55\u9875\u9762\u4e2d\u8f93\u5165\u7528\u6237\u540d\u548c\u5bc6\u7801\uff0c\u4f01\u4e1a IdP \u5bf9\u63d0\u4f9b\u7684\u8eab\u4efd\u4fe1\u606f\u8fdb\u884c\u9a8c\u8bc1\uff0c\u5e76\u6784\u5efa\u643a\u5e26\u7528\u6237\u4fe1\u606f\u7684 ID Token\uff0c\u5411\u6d4f\u89c8\u5668\u53d1\u9001 OIDC \u6388\u6743 Response\u3002
          4. \u6d4f\u89c8\u5668\u54cd\u5e94\u540e\u8f6c\u53d1 OIDC \u6388\u6743 Response \u7ed9 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u3002 1.\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4ece OIDC \u6388\u6743 Response \u4e2d\u53d6\u51fa ID Token\uff0c\u5e76\u6839\u636e\u5df2\u914d\u7f6e\u7684\u8eab\u4efd\u8f6c\u6362\u89c4\u5219\u6620\u5c04\u5230\u5177\u4f53\u7684\u7528\u6237\u5217\u8868\uff0c\u9881\u53d1 Token\u3002
          5. \u5b8c\u6210\u5355\u70b9\u767b\u5f55\uff0c\u8bbf\u95ee \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u3002
          "},{"location":"admin/ghippo/access-control/role.html","title":"\u89d2\u8272\u548c\u6743\u9650\u7ba1\u7406","text":"

          \u4e00\u4e2a\u89d2\u8272\u5bf9\u5e94\u4e00\u7ec4\u6743\u9650\u3002\u6743\u9650\u51b3\u5b9a\u4e86\u53ef\u4ee5\u5bf9\u8d44\u6e90\u6267\u884c\u7684\u64cd\u4f5c\u3002\u5411\u7528\u6237\u6388\u4e88\u67d0\u89d2\u8272\uff0c\u5373\u6388\u4e88\u8be5\u89d2\u8272\u6240\u5305\u542b\u7684\u6240\u6709\u6743\u9650\u3002

          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5b58\u5728\u4e09\u79cd\u89d2\u8272\u8303\u56f4\uff0c\u80fd\u591f\u7075\u6d3b\u3001\u6709\u6548\u5730\u89e3\u51b3\u60a8\u5728\u6743\u9650\u4e0a\u7684\u4f7f\u7528\u95ee\u9898\uff1a

          • \u5e73\u53f0\u89d2\u8272
          • \u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272
          • \u6587\u4ef6\u5939\u89d2\u8272
          "},{"location":"admin/ghippo/access-control/role.html#_2","title":"\u5e73\u53f0\u89d2\u8272","text":"

          \u5e73\u53f0\u89d2\u8272\u662f\u7c97\u7c92\u5ea6\u6743\u9650\uff0c\u5bf9\u5e73\u53f0\u4e0a\u6240\u6709\u76f8\u5173\u8d44\u6e90\u5177\u6709\u76f8\u5e94\u6743\u9650\u3002\u901a\u8fc7\u5e73\u53f0\u89d2\u8272\u53ef\u4ee5\u8d4b\u4e88\u7528\u6237\u5bf9\u6240\u6709\u96c6\u7fa4\u3001\u6240\u6709\u5de5\u4f5c\u7a7a\u95f4\u7b49\u7684\u589e\u5220\u6539\u67e5\u6743\u9650\uff0c \u800c\u4e0d\u80fd\u5177\u4f53\u5230\u67d0\u4e00\u4e2a\u96c6\u7fa4\u6216\u67d0\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u3002\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u4e86 5 \u4e2a\u9884\u7f6e\u7684\u3001\u7528\u6237\u53ef\u76f4\u63a5\u4f7f\u7528\u7684\u5e73\u53f0\u89d2\u8272\uff1a

          • Admin
          • Kpanda Owner
          • Workspace and Folder Owner
          • IAM Owner
          • Audit Owner

          \u540c\u65f6\uff0c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8fd8\u652f\u6301\u7528\u6237\u521b\u5efa\u81ea\u5b9a\u4e49\u5e73\u53f0\u89d2\u8272\uff0c\u53ef\u6839\u636e\u9700\u8981\u81ea\u5b9a\u4e49\u89d2\u8272\u5185\u5bb9\u3002 \u5982\u521b\u5efa\u4e00\u4e2a\u5e73\u53f0\u89d2\u8272\uff0c\u5305\u542b\u5e94\u7528\u5de5\u4f5c\u53f0\u7684\u6240\u6709\u529f\u80fd\u6743\u9650\uff0c\u7531\u4e8e\u5e94\u7528\u5de5\u4f5c\u53f0\u4f9d\u8d56\u4e8e\u5de5\u4f5c\u7a7a\u95f4\uff0c \u56e0\u6b64\u5e73\u53f0\u4f1a\u5e2e\u52a9\u7528\u6237\u9ed8\u8ba4\u52fe\u9009\u5de5\u4f5c\u7a7a\u95f4\u7684\u67e5\u770b\u6743\u9650\uff0c\u8bf7\u4e0d\u8981\u624b\u52a8\u53d6\u6d88\u52fe\u9009\u3002 \u82e5\u7528\u6237 A \u88ab\u6388\u4e88\u8be5 Workbench\uff08\u5e94\u7528\u5de5\u4f5c\u53f0\uff09\u89d2\u8272\uff0c\u5c06\u81ea\u52a8\u62e5\u6709\u6240\u6709\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u7684\u5e94\u7528\u5de5\u4f5c\u53f0\u76f8\u5173\u529f\u80fd\u7684\u589e\u5220\u6539\u67e5\u7b49\u6743\u9650\u3002

          "},{"location":"admin/ghippo/access-control/role.html#_3","title":"\u5e73\u53f0\u89d2\u8272\u6388\u6743\u65b9\u5f0f","text":"

          \u7ed9\u5e73\u53f0\u89d2\u8272\u6388\u6743\u5171\u6709\u4e09\u79cd\u65b9\u5f0f\uff1a

          • \u5728 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u7528\u6237 \u7684\u7528\u6237\u5217\u8868\u4e2d\uff0c\u627e\u5230\u8be5\u7528\u6237\uff0c\u70b9\u51fb ... \uff0c\u9009\u62e9 \u6388\u6743 \uff0c\u4e3a\u8be5\u7528\u6237\u8d4b\u4e88\u5e73\u53f0\u89d2\u8272\u6743\u9650\u3002

          • \u5728 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u7528\u6237\u7ec4 \u7684\u7528\u6237\u7ec4\u5217\u8868\u4e2d\u521b\u5efa\u7528\u6237\u7ec4\uff0c\u5c06\u8be5\u7528\u6237\u52a0\u5165\u7528\u6237\u7ec4\uff0c\u5e76\u7ed9\u7528\u6237\u7ec4\u6388\u6743 \uff08\u5177\u4f53\u64cd\u4f5c\u4e3a\uff1a\u5728\u7528\u6237\u7ec4\u5217\u8868\u627e\u5230\u8be5\u7528\u6237\u7ec4\uff0c\u70b9\u51fb ... \uff0c\u9009\u62e9 \u6388\u6743 \uff0c\u4e3a\u8be5\u7528\u6237\u7ec4\u8d4b\u4e88\u5e73\u53f0\u89d2\u8272\uff09\u3002

          • \u5728 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u89d2\u8272 \u7684\u89d2\u8272\u5217\u8868\u4e2d\uff0c\u627e\u5230\u76f8\u5e94\u7684\u5e73\u53f0\u89d2\u8272\uff0c \u70b9\u51fb\u89d2\u8272\u540d\u79f0\u8fdb\u5165\u8be6\u60c5\uff0c\u70b9\u51fb \u5173\u8054\u6210\u5458 \u6309\u94ae\uff0c\u9009\u4e2d\u8be5\u7528\u6237\u6216\u7528\u6237\u6240\u5728\u7684\u7528\u6237\u7ec4\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

          "},{"location":"admin/ghippo/access-control/role.html#_4","title":"\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272","text":"

          \u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u662f\u7ec6\u7c92\u5ea6\u89d2\u8272\uff0c\u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u53ef\u4ee5\u8d4b\u4e88\u7528\u6237\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u7684\u7ba1\u7406\u6743\u9650\u3001\u67e5\u770b\u6743\u9650\u6216\u8be5\u5de5\u4f5c\u7a7a\u95f4\u5e94\u7528\u5de5\u4f5c\u53f0\u76f8\u5173\u7684\u6743\u9650\u7b49\u3002 \u83b7\u5f97\u8be5\u89d2\u8272\u6743\u9650\u7684\u7528\u6237\u53ea\u80fd\u7ba1\u7406\u8be5\u5de5\u4f5c\u7a7a\u95f4\uff0c\u800c\u65e0\u6cd5\u8bbf\u95ee\u5176\u4ed6\u5de5\u4f5c\u7a7a\u95f4\u3002\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u4e86 3 \u4e2a\u9884\u7f6e\u7684\u3001\u7528\u6237\u53ef\u76f4\u63a5\u4f7f\u7528\u7684\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\uff1a

          • Workspace Admin
          • Workspace Editor
          • Workspace Viewer

          \u540c\u65f6\uff0c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8fd8\u652f\u6301\u7528\u6237\u521b\u5efa\u81ea\u5b9a\u4e49\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\uff0c\u53ef\u6839\u636e\u9700\u8981\u81ea\u5b9a\u4e49\u89d2\u8272\u5185\u5bb9\u3002\u5982\u521b\u5efa\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\uff0c \u5305\u542b\u5e94\u7528\u5de5\u4f5c\u53f0\u7684\u6240\u6709\u529f\u80fd\u6743\u9650\uff0c\u7531\u4e8e\u5e94\u7528\u5de5\u4f5c\u53f0\u4f9d\u8d56\u4e8e\u5de5\u4f5c\u7a7a\u95f4\uff0c\u56e0\u6b64\u5e73\u53f0\u4f1a\u5e2e\u52a9\u7528\u6237\u9ed8\u8ba4\u52fe\u9009\u5de5\u4f5c\u7a7a\u95f4\u7684\u67e5\u770b\u6743\u9650\uff0c \u8bf7\u4e0d\u8981\u624b\u52a8\u53d6\u6d88\u52fe\u9009\u3002\u82e5\u7528\u6237 A \u5728\u5de5\u4f5c\u7a7a\u95f4 01 \u4e2d\u88ab\u6388\u4e88\u8be5\u89d2\u8272\uff0c\u5c06\u62e5\u6709\u5de5\u4f5c\u7a7a\u95f4 01 \u4e0b\u7684\u5e94\u7528\u5de5\u4f5c\u53f0\u76f8\u5173\u529f\u80fd\u7684\u589e\u5220\u6539\u67e5\u6743\u9650\u3002

          Note

          \u4e0e\u5e73\u53f0\u89d2\u8272\u4e0d\u540c\uff0c\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u88ab\u521b\u5efa\u540e\u9700\u8981\u524d\u5f80\u5de5\u4f5c\u7a7a\u95f4\u4f7f\u7528\uff0c\u88ab\u6388\u6743\u540e\u7528\u6237\u4ec5\u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u62e5\u6709\u8be5\u89d2\u8272\u4e2d\u7684\u529f\u80fd\u6743\u9650\u3002

          "},{"location":"admin/ghippo/access-control/role.html#_5","title":"\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u6388\u6743\u65b9\u5f0f","text":"

          \u5728 \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u5217\u8868\u4e2d\uff0c\u627e\u5230\u8be5\u5de5\u4f5c\u7a7a\u95f4\uff0c\u70b9\u51fb \u6dfb\u52a0\u6388\u6743 \uff0c\u4e3a\u8be5\u7528\u6237\u8d4b\u4e88\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u6743\u9650\u3002

          "},{"location":"admin/ghippo/access-control/role.html#_6","title":"\u6587\u4ef6\u5939\u89d2\u8272","text":"

          \u6587\u4ef6\u5939\u89d2\u8272\u7684\u6743\u9650\u7c92\u5ea6\u4ecb\u4e8e\u5e73\u53f0\u89d2\u8272\u4e0e\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u4e4b\u95f4\uff0c\u901a\u8fc7\u6587\u4ef6\u5939\u89d2\u8272\u53ef\u4ee5\u8d4b\u4e88\u7528\u6237\u67d0\u4e2a\u6587\u4ef6\u5939\u53ca\u5176\u5b50\u6587\u4ef6\u5939\u548c\u8be5\u6587\u4ef6\u5939\u4e0b\u6240\u6709\u5de5\u4f5c\u7a7a\u95f4\u7684\u7ba1\u7406\u6743\u9650\u3001\u67e5\u770b\u6743\u9650\u7b49\uff0c \u5e38\u9002\u7528\u4e8e\u4f01\u4e1a\u4e2d\u7684\u90e8\u95e8\u573a\u666f\u3002\u6bd4\u5982\u7528\u6237 B \u662f\u4e00\u7ea7\u90e8\u95e8\u7684 Leader\uff0c\u901a\u5e38\u7528\u6237 B \u80fd\u591f\u7ba1\u7406\u8be5\u4e00\u7ea7\u90e8\u95e8\u3001\u5176\u4e0b\u7684\u6240\u6709\u4e8c\u7ea7\u90e8\u95e8\u548c\u90e8\u95e8\u4e2d\u7684\u9879\u76ee\u7b49\uff0c \u5728\u6b64\u573a\u666f\u4e2d\u7ed9\u7528\u6237 B \u6388\u4e88\u4e00\u7ea7\u6587\u4ef6\u5939\u7684\u7ba1\u7406\u5458\u6743\u9650\uff0c\u7528\u6237 B \u4e5f\u5c06\u62e5\u6709\u5176\u4e0b\u7684\u4e8c\u7ea7\u6587\u4ef6\u5939\u548c\u5de5\u4f5c\u7a7a\u95f4\u7684\u76f8\u5e94\u6743\u9650\u3002 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u4e86 3 \u4e2a\u9884\u7f6e\u7684\u3001\u7528\u6237\u53ef\u76f4\u63a5\u4f7f\u7528\u6587\u4ef6\u5939\u89d2\u8272\uff1a

          • Folder Admin
          • Folder Editor
          • Folder Viewer

          \u540c\u65f6\uff0c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8fd8\u652f\u6301\u7528\u6237\u521b\u5efa\u81ea\u5b9a\u4e49\u6587\u4ef6\u5939\u89d2\u8272\uff0c\u53ef\u6839\u636e\u9700\u8981\u81ea\u5b9a\u4e49\u89d2\u8272\u5185\u5bb9\u3002 \u5982\u521b\u5efa\u4e00\u4e2a\u6587\u4ef6\u5939\u89d2\u8272\uff0c\u5305\u542b\u5e94\u7528\u5de5\u4f5c\u53f0\u7684\u6240\u6709\u529f\u80fd\u6743\u9650\u3002\u82e5\u7528\u6237 A \u5728\u6587\u4ef6\u5939 01 \u4e2d\u88ab\u6388\u4e88\u8be5\u89d2\u8272\uff0c \u5c06\u62e5\u6709\u8be5\u6587\u4ef6\u5939\u4e0b\u6240\u6709\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u5e94\u7528\u5de5\u4f5c\u53f0\u76f8\u5173\u529f\u80fd\u7684\u589e\u5220\u6539\u67e5\u6743\u9650\u3002

          Note

          \u529f\u80fd\u6a21\u5757\u672c\u8eab\u4f9d\u8d56\u7684\u662f\u5de5\u4f5c\u7a7a\u95f4\uff0c\u6587\u4ef6\u5939\u662f\u5de5\u4f5c\u7a7a\u95f4\u4e0a\u7684\u8fdb\u4e00\u6b65\u5206\u7ec4\u673a\u5236\u4e14\u5177\u6709\u6743\u9650\u7ee7\u627f\u80fd\u529b\uff0c \u56e0\u6b64\u6587\u4ef6\u5939\u6743\u9650\u4e0d\u5149\u5305\u542b\u6587\u4ef6\u5939\u672c\u8eab\uff0c\u8fd8\u5305\u62ec\u5176\u4e0b\u7684\u5b50\u6587\u4ef6\u5939\u548c\u5de5\u4f5c\u7a7a\u95f4\u3002

          "},{"location":"admin/ghippo/access-control/role.html#_7","title":"\u6587\u4ef6\u5939\u89d2\u8272\u6388\u6743\u65b9\u5f0f","text":"

          \u5728 \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u5217\u8868\u4e2d\uff0c\u627e\u5230\u8be5\u6587\u4ef6\u5939\uff0c\u70b9\u51fb \u6dfb\u52a0\u6388\u6743 \uff0c\u4e3a\u8be5\u7528\u6237\u8d4b\u4e88\u6587\u4ef6\u5939\u89d2\u8272\u6743\u9650\u3002

          "},{"location":"admin/ghippo/access-control/user.html","title":"\u7528\u6237","text":"

          \u7528\u6237\u6307\u7684\u662f\u7531\u5e73\u53f0\u7ba1\u7406\u5458 admin \u6216\u8005\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u7ba1\u7406\u5458 IAM Owner \u5728 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u7528\u6237 \u9875\u9762\u521b\u5efa\u7684\u7528\u6237\uff0c\u6216\u8005\u901a\u8fc7 LDAP / OIDC \u5bf9\u63a5\u8fc7\u6765\u7684\u7528\u6237\u3002 \u7528\u6237\u540d\u4ee3\u8868\u8d26\u53f7\uff0c\u7528\u6237\u901a\u8fc7\u7528\u6237\u540d\u548c\u5bc6\u7801\u767b\u5f55\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u3002

          \u62e5\u6709\u4e00\u4e2a\u7528\u6237\u8d26\u53f7\u662f\u7528\u6237\u8bbf\u95ee\u5e73\u53f0\u7684\u524d\u63d0\u3002\u65b0\u5efa\u7684\u7528\u6237\u9ed8\u8ba4\u6ca1\u6709\u4efb\u4f55\u6743\u9650\uff0c\u4f8b\u5982\u60a8\u9700\u8981\u7ed9\u7528\u6237\u8d4b\u4e88\u76f8\u5e94\u7684\u89d2\u8272\u6743\u9650\uff0c\u6bd4\u5982\u5728 \u7528\u6237\u5217\u8868 \u6216 \u7528\u6237\u8be6\u60c5 \u6388\u4e88\u5b50\u6a21\u5757\u7684\u7ba1\u7406\u5458\u6743\u9650\u3002 \u5b50\u6a21\u5757\u7ba1\u7406\u5458\u62e5\u6709\u8be5\u5b50\u6a21\u5757\u7684\u6700\u9ad8\u6743\u9650\uff0c\u80fd\u591f\u521b\u5efa\u3001\u7ba1\u7406\u3001\u5220\u9664\u8be5\u6a21\u5757\u7684\u6240\u6709\u8d44\u6e90\u3002 \u5982\u679c\u7528\u6237\u9700\u8981\u88ab\u6388\u4e88\u5177\u4f53\u8d44\u6e90\u7684\u6743\u9650\uff0c\u6bd4\u5982\u67d0\u4e2a\u8d44\u6e90\u7684\u4f7f\u7528\u6743\u9650\uff0c\u8bf7\u67e5\u770b\u8d44\u6e90\u6388\u6743\u8bf4\u660e\u3002

          \u672c\u9875\u4ecb\u7ecd\u7528\u6237\u7684\u521b\u5efa\u3001\u6388\u6743\u3001\u7981\u7528\u3001\u542f\u7528\u3001\u5220\u9664\u7b49\u64cd\u4f5c\u3002

          "},{"location":"admin/ghippo/access-control/user.html#_2","title":"\u521b\u5efa\u7528\u6237","text":"

          \u524d\u63d0\uff1a\u62e5\u6709\u5e73\u53f0\u7ba1\u7406\u5458 Admin \u6743\u9650\u6216\u8005\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u7ba1\u7406\u5458 IAM Owner \u6743\u9650\u3002

          1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u7528\u6237 \uff0c\u8fdb\u5165\u7528\u6237\u5217\u8868\uff0c\u70b9\u51fb\u53f3\u4e0a\u65b9\u7684 \u521b\u5efa\u7528\u6237 \u3002

          2. \u5728 \u521b\u5efa\u7528\u6237 \u9875\u9762\u586b\u5199\u7528\u6237\u540d\u548c\u767b\u5f55\u5bc6\u7801\u3002\u5982\u9700\u4e00\u6b21\u6027\u521b\u5efa\u591a\u4e2a\u7528\u6237\uff0c\u53ef\u4ee5\u70b9\u51fb \u521b\u5efa\u7528\u6237 \u540e\u8fdb\u884c\u6279\u91cf\u521b\u5efa\uff0c\u4e00\u6b21\u6027\u6700\u591a\u521b\u5efa 5 \u4e2a\u7528\u6237\u3002\u6839\u636e\u60a8\u7684\u5b9e\u9645\u60c5\u51b5\u786e\u5b9a\u662f\u5426\u8bbe\u7f6e\u7528\u6237\u5728\u9996\u6b21\u767b\u5f55\u65f6\u91cd\u7f6e\u5bc6\u7801\u3002

          3. \u70b9\u51fb \u786e\u5b9a \uff0c\u521b\u5efa\u7528\u6237\u6210\u529f\uff0c\u8fd4\u56de\u7528\u6237\u5217\u8868\u9875\u3002

          Note

          \u6b64\u5904\u8bbe\u7f6e\u7684\u7528\u6237\u540d\u548c\u5bc6\u7801\u5c06\u7528\u4e8e\u767b\u5f55\u5e73\u53f0\u3002

          "},{"location":"admin/ghippo/access-control/user.html#grant-admin-permissions","title":"\u4e3a\u7528\u6237\u6388\u4e88\u5b50\u6a21\u5757\u7ba1\u7406\u5458\u6743\u9650","text":"

          \u524d\u63d0\uff1a\u8be5\u7528\u6237\u5df2\u5b58\u5728\u3002

          1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u7528\u6237 \uff0c\u8fdb\u5165\u7528\u6237\u5217\u8868\uff0c\u70b9\u51fb \u2507 -> \u6388\u6743 \u3002

          2. \u5728 \u6388\u6743 \u9875\u9762\u52fe\u9009\u9700\u8981\u7684\u89d2\u8272\u6743\u9650\uff08\u53ef\u591a\u9009\uff09\u3002

          3. \u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u4e3a\u7528\u6237\u7684\u6388\u6743\u3002

          Note

          \u5728\u7528\u6237\u5217\u8868\u4e2d\uff0c\u70b9\u51fb\u67d0\u4e2a\u7528\u6237\uff0c\u53ef\u4ee5\u8fdb\u5165\u7528\u6237\u8be6\u60c5\u9875\u9762\u3002

          "},{"location":"admin/ghippo/access-control/user.html#_3","title":"\u5c06\u7528\u6237\u52a0\u5165\u7528\u6237\u7ec4","text":"
          1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u7528\u6237 \uff0c\u8fdb\u5165\u7528\u6237\u5217\u8868\uff0c\u70b9\u51fb \u2507 -> \u52a0\u5165\u7528\u6237\u7ec4 \u3002

          2. \u5728 \u52a0\u5165\u7528\u6237\u7ec4 \u9875\u9762\u52fe\u9009\u9700\u8981\u52a0\u5165\u7684\u7528\u6237\u7ec4\uff08\u53ef\u591a\u9009\uff09\u3002\u82e5\u6ca1\u6709\u53ef\u9009\u7684\u7528\u6237\u7ec4\uff0c\u70b9\u51fb \u521b\u5efa\u7528\u6237\u7ec4 \u521b\u5efa\u7528\u6237\u7ec4\uff0c\u518d\u8fd4\u56de\u8be5\u9875\u9762\u70b9\u51fb \u5237\u65b0 \u6309\u94ae\uff0c\u663e\u793a\u521a\u521b\u5efa\u7684\u7528\u6237\u7ec4\u3002

          3. \u70b9\u51fb \u786e\u5b9a \u5c06\u7528\u6237\u52a0\u5165\u7528\u6237\u7ec4\u3002

          Note

          \u7528\u6237\u4f1a\u7ee7\u627f\u7528\u6237\u7ec4\u7684\u6743\u9650\uff0c\u53ef\u4ee5\u5728 \u7528\u6237\u8be6\u60c5 \u4e2d\u67e5\u770b\u8be5\u7528\u6237\u5df2\u52a0\u5165\u7684\u7528\u6237\u7ec4\u3002

          "},{"location":"admin/ghippo/access-control/user.html#_4","title":"\u542f\u7528/\u7981\u7528\u7528\u6237","text":"

          \u7981\u7528\u7528\u6237\u540e\uff0c\u8be5\u7528\u6237\u5c06\u65e0\u6cd5\u518d\u8bbf\u95ee\u5e73\u53f0\u3002\u4e0e\u5220\u9664\u7528\u6237\u4e0d\u540c\uff0c\u7981\u7528\u7684\u7528\u6237\u53ef\u4ee5\u6839\u636e\u9700\u8981\u518d\u6b21\u542f\u7528\uff0c\u5efa\u8bae\u5220\u9664\u7528\u6237\u524d\u5148\u7981\u7528\uff0c\u4ee5\u786e\u4fdd\u6ca1\u6709\u5173\u952e\u670d\u52a1\u5728\u4f7f\u7528\u8be5\u7528\u6237\u521b\u5efa\u7684\u5bc6\u94a5\u3002

          1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u7528\u6237 \uff0c\u8fdb\u5165\u7528\u6237\u5217\u8868\uff0c\u70b9\u51fb\u4e00\u4e2a\u7528\u6237\u540d\u8fdb\u5165\u7528\u6237\u8be6\u60c5\u3002

          2. \u70b9\u51fb\u53f3\u4e0a\u65b9\u7684 \u7f16\u8f91 \uff0c\u5173\u95ed\u72b6\u6001\u6309\u94ae\uff0c\u4f7f\u6309\u94ae\u7f6e\u7070\u4e14\u5904\u4e8e\u672a\u542f\u7528\u72b6\u6001\u3002

          3. \u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u7981\u7528\u7528\u6237\u7684\u64cd\u4f5c\u3002

          "},{"location":"admin/ghippo/access-control/user.html#_5","title":"\u5fd8\u8bb0\u5bc6\u7801","text":"

          \u524d\u63d0\uff1a\u9700\u8981\u8bbe\u7f6e\u7528\u6237\u90ae\u7bb1\uff0c\u6709\u4e24\u79cd\u65b9\u5f0f\u53ef\u4ee5\u8bbe\u7f6e\u7528\u6237\u90ae\u7bb1\u3002

          • \u7ba1\u7406\u5458\u5728\u8be5\u7528\u6237\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb \u7f16\u8f91 \uff0c\u5728\u5f39\u51fa\u6846\u8f93\u5165\u7528\u6237\u90ae\u7bb1\u5730\u5740\uff0c\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u90ae\u7bb1\u8bbe\u7f6e\u3002

          • \u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u5165 \u4e2a\u4eba\u4e2d\u5fc3 \uff0c\u5728 \u5b89\u5168\u8bbe\u7f6e \u9875\u9762\u8bbe\u7f6e\u90ae\u7bb1\u5730\u5740\u3002

          \u5982\u679c\u7528\u6237\u767b\u5f55\u65f6\u5fd8\u8bb0\u5bc6\u7801\uff0c\u8bf7\u53c2\u8003\u91cd\u7f6e\u5bc6\u7801\u3002

          "},{"location":"admin/ghippo/access-control/user.html#_6","title":"\u5220\u9664\u7528\u6237","text":"

          Warning

          \u5220\u9664\u7528\u6237\u540e\uff0c\u8be5\u7528\u6237\u5c06\u65e0\u6cd5\u518d\u901a\u8fc7\u4efb\u4f55\u65b9\u5f0f\u8bbf\u95ee\u5e73\u53f0\u8d44\u6e90\uff0c\u8bf7\u8c28\u614e\u5220\u9664\u3002 \u5728\u5220\u9664\u7528\u6237\u4e4b\u524d\uff0c\u8bf7\u786e\u4fdd\u60a8\u7684\u5173\u952e\u7a0b\u5e8f\u4e0d\u518d\u4f7f\u7528\u8be5\u7528\u6237\u521b\u5efa\u7684\u5bc6\u94a5\u3002 \u5982\u679c\u60a8\u4e0d\u786e\u5b9a\uff0c\u5efa\u8bae\u5728\u5220\u9664\u524d\u5148\u7981\u7528\u8be5\u7528\u6237\u3002 \u5982\u679c\u60a8\u5220\u9664\u4e86\u4e00\u4e2a\u7528\u6237\uff0c\u7136\u540e\u518d\u521b\u5efa\u4e00\u4e2a\u540c\u540d\u7684\u65b0\u7528\u6237\uff0c\u5219\u65b0\u7528\u6237\u5c06\u88ab\u89c6\u4e3a\u4e00\u4e2a\u65b0\u7684\u72ec\u7acb\u8eab\u4efd\uff0c\u5b83\u4e0d\u4f1a\u7ee7\u627f\u5df2\u5220\u9664\u7528\u6237\u7684\u89d2\u8272\u3002

          1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u7528\u6237 \uff0c\u8fdb\u5165\u7528\u6237\u5217\u8868\uff0c\u70b9\u51fb \u2507 -> \u5220\u9664 \u3002

          2. \u70b9\u51fb \u79fb\u9664 \u5b8c\u6210\u5220\u9664\u7528\u6237\u7684\u64cd\u4f5c\u3002

          "},{"location":"admin/ghippo/access-control/webhook.html","title":"Webhook \u6d88\u606f\u901a\u77e5","text":"

          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5728\u63a5\u5165\u5ba2\u6237\u7684\u7cfb\u7edf\u540e\uff0c\u53ef\u4ee5\u521b\u5efa Webhook\uff0c\u5728\u7528\u6237\u521b\u5efa/\u66f4\u65b0/\u5220\u9664/\u767b\u5f55/\u767b\u51fa\u4e4b\u65f6\u53d1\u9001\u6d88\u606f\u901a\u77e5\u3002

          Webhook \u662f\u4e00\u79cd\u7528\u4e8e\u5b9e\u73b0\u5b9e\u65f6\u4e8b\u4ef6\u901a\u77e5\u7684\u673a\u5236\u3002\u5b83\u5141\u8bb8\u4e00\u4e2a\u5e94\u7528\u7a0b\u5e8f\u5c06\u6570\u636e\u6216\u4e8b\u4ef6\u63a8\u9001\u5230\u53e6\u4e00\u4e2a\u5e94\u7528\u7a0b\u5e8f\uff0c \u800c\u65e0\u9700\u8f6e\u8be2\u6216\u6301\u7eed\u67e5\u8be2\u3002\u901a\u8fc7\u914d\u7f6e Webhook\uff0c\u60a8\u53ef\u4ee5\u6307\u5b9a\u5728\u67d0\u4e2a\u4e8b\u4ef6\u53d1\u751f\u65f6\uff0c\u7531\u76ee\u6807\u5e94\u7528\u7a0b\u5e8f\u63a5\u6536\u5e76\u5904\u7406\u901a\u77e5\u3002

          Webhook \u7684\u5de5\u4f5c\u539f\u7406\u5982\u4e0b\uff1a

          1. \u6e90\u5e94\u7528\u7a0b\u5e8f\uff08\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\uff09\u6267\u884c\u67d0\u4e2a\u7279\u5b9a\u64cd\u4f5c\u6216\u4e8b\u4ef6\u3002
          2. \u6e90\u5e94\u7528\u7a0b\u5e8f\u5c06\u76f8\u5173\u6570\u636e\u548c\u4fe1\u606f\u6253\u5305\u6210 HTTP \u8bf7\u6c42\uff0c\u5e76\u5c06\u5176\u53d1\u9001\u5230\u76ee\u6807\u5e94\u7528\u7a0b\u5e8f\u6307\u5b9a\u7684 URL\uff08\u4f8b\u5982\u4f01\u4e1a\u5fae\u4fe1\u7fa4\u673a\u5668\u4eba\uff09\u3002
          3. \u76ee\u6807\u5e94\u7528\u7a0b\u5e8f\u63a5\u6536\u5230\u8bf7\u6c42\u540e\uff0c\u6839\u636e\u5176\u4e2d\u7684\u6570\u636e\u548c\u4fe1\u606f\u8fdb\u884c\u76f8\u5e94\u7684\u5904\u7406\u3002

          \u901a\u8fc7\u4f7f\u7528 Webhook\uff0c\u60a8\u53ef\u4ee5\u5b9e\u73b0\u4ee5\u4e0b\u529f\u80fd\uff1a

          • \u5b9e\u65f6\u901a\u77e5\uff1a\u5f53\u67d0\u4e2a\u7279\u5b9a\u4e8b\u4ef6\u53d1\u751f\u65f6\uff0c\u901a\u8fc7 Webhook \u53ca\u65f6\u901a\u77e5\u5176\u4ed6\u5e94\u7528\u7a0b\u5e8f\u3002
          • \u81ea\u52a8\u5316\u5904\u7406\uff1a\u76ee\u6807\u5e94\u7528\u7a0b\u5e8f\u53ef\u4ee5\u6839\u636e\u6536\u5230\u7684 Webhook \u8bf7\u6c42\u81ea\u52a8\u89e6\u53d1\u4e8b\u5148\u5b9a\u4e49\u597d\u7684\u64cd\u4f5c\uff0c\u65e0\u9700\u624b\u52a8\u5e72\u9884\u3002
          • \u6570\u636e\u540c\u6b65\uff1a\u901a\u8fc7 Webhook \u5c06\u6570\u636e\u4ece\u4e00\u4e2a\u5e94\u7528\u7a0b\u5e8f\u4f20\u9012\u5230\u53e6\u4e00\u4e2a\u5e94\u7528\u7a0b\u5e8f\uff0c\u5b9e\u73b0\u6570\u636e\u7684\u540c\u6b65\u66f4\u65b0\u3002

          \u5e38\u89c1\u7684\u5e94\u7528\u573a\u666f\u5305\u62ec\uff1a

          • \u7248\u672c\u63a7\u5236\u7cfb\u7edf\uff08\u4f8b\u5982 GitHub\u3001GitLab\uff09\u4e2d\uff0c\u5f53\u4ee3\u7801\u4ed3\u5e93\u53d1\u751f\u53d8\u52a8\u65f6\uff0c\u81ea\u52a8\u89e6\u53d1\u6784\u5efa\u548c\u90e8\u7f72\u64cd\u4f5c\u3002
          • \u7535\u5b50\u5546\u52a1\u5e73\u53f0\u4e2d\uff0c\u5f53\u8ba2\u5355\u72b6\u6001\u53d1\u751f\u53d8\u5316\u65f6\uff0c\u5411\u7269\u6d41\u7cfb\u7edf\u53d1\u9001\u66f4\u65b0\u901a\u77e5\u3002
          • \u804a\u5929\u673a\u5668\u4eba\u5e73\u53f0\u4e2d\uff0c\u5f53\u63a5\u6536\u5230\u7528\u6237\u6d88\u606f\u65f6\uff0c\u901a\u8fc7 Webhook \u5c06\u6d88\u606f\u63a8\u9001\u5230\u76ee\u6807\u670d\u52a1\u5668\u8fdb\u884c\u5904\u7406\u3002
          "},{"location":"admin/ghippo/access-control/webhook.html#_1","title":"\u914d\u7f6e\u6b65\u9aa4","text":"

          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u56fe\u5f62\u5316\u914d\u7f6e Webhook \u7684\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\uff1a

          1. \u5728 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u63a5\u5165\u7ba1\u7406 \uff0c\u521b\u5efa\u4e00\u4e2a\u5ba2\u6237\u7aef ID\u3002

          2. \u70b9\u51fb\u67d0\u4e2a\u5ba2\u6237\u7aef ID\uff0c\u8fdb\u5165\u8be6\u60c5\u9875\uff0c\u70b9\u51fb \u521b\u5efa Webhook \u6309\u94ae\u3002

          3. \u5728\u5f39\u7a97\u4e2d\u586b\u5165\u5b57\u6bb5\u4fe1\u606f\u540e\u70b9\u51fb \u786e\u5b9a \u3002

            • \u5bf9\u8c61\uff1a\u76ee\u524d\u4ec5\u652f\u6301 \u7528\u6237 \u5bf9\u8c61
            • \u884c\u4e3a\uff1a\u7528\u6237\u521b\u5efa/\u66f4\u65b0/\u5220\u9664/\u767b\u5f55/\u767b\u5f55\u65f6\u53d1\u9001 Webhook \u6d88\u606f
            • URL\uff1a\u63a5\u6536\u6d88\u606f\u7684\u5730\u5740
            • Method\uff1a\u89c6\u60c5\u51b5\u9009\u62e9\u9002\u7528\u7684\u65b9\u6cd5\uff0c\u4f8b\u5982\u4f01\u4e1a\u5fae\u4fe1\u63a8\u8350\u4f7f\u7528 POST \u65b9\u6cd5
            • \u9ad8\u7ea7\u914d\u7f6e\uff1a\u53ef\u4ee5\u7528 Json \u7f16\u5199\u6d88\u606f\u4f53\u3002\u5982\u679c\u662f\u4f01\u4e1a\u5fae\u4fe1\u7fa4\uff0c\u8bf7\u53c2\u9605\u7fa4\u673a\u5668\u4eba\u914d\u7f6e\u8bf4\u660e

          4. \u5c4f\u5e55\u63d0\u793a Webhook \u521b\u5efa\u6210\u529f\u3002

          5. \u73b0\u5728\u53bb\u8bd5\u7740\u521b\u5efa\u4e00\u4e2a\u7528\u6237\u3002

          6. \u7528\u6237\u521b\u5efa\u6210\u529f\uff0c\u53ef\u4ee5\u770b\u5230\u4f01\u4e1a\u5fae\u4fe1\u7fa4\u6536\u5230\u4e86\u4e00\u6761\u6d88\u606f\u3002

          "},{"location":"admin/ghippo/access-control/webhook.html#_2","title":"\u9ad8\u7ea7\u914d\u7f6e\u793a\u4f8b","text":"

          \u7cfb\u7edf\u9ed8\u8ba4\u7684\u6d88\u606f\u4f53

          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9884\u5148\u5b9a\u4e49\u4e86\u4e00\u4e9b\u53d8\u91cf\uff0c\u60a8\u53ef\u4ee5\u6839\u636e\u81ea\u5df1\u60c5\u51b5\u5728\u6d88\u606f\u4f53\u4e2d\u4f7f\u7528\u8fd9\u4e9b\u53d8\u91cf\u3002

          {\n  \"id\": \"{{$$.ID$$}}\",\n  \"email\": \"{{$$.Email$$}}\",\n  \"username\": \"{{$$.Name$$}}\",\n  \"last_name\": \"{{$$.LastName$$}}\",\n  \"first_name\": \"{{$$.FirstName$$}}\",\n  \"created_at\": \"{{$$.CreatedAt$$}}\",\n  \"enabled\": \"{{$$.Enabled$$}}\"\n}\n

          \u4f01\u4e1a\u5fae\u4fe1\u7fa4\u673a\u5668\u4eba\u7684 Message Body

          {\n    \"msgtype\": \"text\",\n    \"text\": {\n      \"content\": \"{{$$.Name$$}} hello world\"\n    }\n}\n
          "},{"location":"admin/ghippo/access-control/webhook.html#_3","title":"\u53c2\u8003\u6587\u6863","text":"
          • OEM OUT \u6587\u6863
          • OEM IN \u6587\u6863
          "},{"location":"admin/ghippo/audit/audit-log.html","title":"\u5ba1\u8ba1\u65e5\u5fd7","text":"

          \u5ba1\u8ba1\u65e5\u5fd7\u5e2e\u52a9\u60a8\u76d1\u63a7\u5e76\u8bb0\u5f55\u6bcf\u4e2a\u7528\u6237\u7684\u6d3b\u52a8\uff0c\u63d0\u4f9b\u4e86\u4e0e\u5b89\u5168\u76f8\u5173\u7684\u3001\u6309\u65f6\u95f4\u987a\u5e8f\u6392\u5217\u7684\u8bb0\u5f55\u7684\u6536\u96c6\u3001\u5b58\u50a8\u548c\u67e5\u8be2\u529f\u80fd\u3002 \u501f\u52a9\u5ba1\u8ba1\u65e5\u5fd7\u670d\u52a1\uff0c\u60a8\u53ef\u4ee5\u6301\u7eed\u76d1\u63a7\u5e76\u4fdd\u7559\u7528\u6237\u5728\u5168\u5c40\u7ba1\u7406\u6a21\u5757\u7684\u4f7f\u7528\u884c\u4e3a\uff0c\u5305\u62ec\u4f46\u4e0d\u9650\u4e8e\u521b\u5efa\u7528\u6237\u3001\u7528\u6237\u767b\u5f55/\u767b\u51fa\u3001\u7528\u6237\u6388\u6743\u4ee5\u53ca\u4e0e Kubernetes \u76f8\u5173\u7684\u7528\u6237\u64cd\u4f5c\u884c\u4e3a\u3002

          "},{"location":"admin/ghippo/audit/audit-log.html#_2","title":"\u529f\u80fd\u7279\u6027","text":"

          \u5ba1\u8ba1\u65e5\u5fd7\u529f\u80fd\u5177\u6709\u4ee5\u4e0b\u7279\u70b9\uff1a

          • \u5f00\u7bb1\u5373\u7528\uff1a\u5728\u5b89\u88c5\u4f7f\u7528\u8be5\u5e73\u53f0\u65f6\uff0c\u5ba1\u8ba1\u65e5\u5fd7\u529f\u80fd\u5c06\u4f1a\u88ab\u9ed8\u8ba4\u542f\u7528\uff0c\u81ea\u52a8\u8bb0\u5f55\u4e0e\u7528\u6237\u76f8\u5173\u7684\u5404\u79cd\u884c\u4e3a\uff0c \u5982\u521b\u5efa\u7528\u6237\u3001\u6388\u6743\u3001\u767b\u5f55/\u767b\u51fa\u7b49\u3002\u9ed8\u8ba4\u53ef\u4ee5\u5728\u5e73\u53f0\u5185\u67e5\u770b 365 \u5929\u7684\u7528\u6237\u884c\u4e3a\u3002
          • \u5b89\u5168\u5206\u6790\uff1a\u5ba1\u8ba1\u65e5\u5fd7\u4f1a\u5bf9\u7528\u6237\u64cd\u4f5c\u8fdb\u884c\u8be6\u7ec6\u7684\u8bb0\u5f55\u5e76\u63d0\u4f9b\u5bfc\u51fa\u529f\u80fd\uff0c\u901a\u8fc7\u8fd9\u4e9b\u4e8b\u4ef6\u60a8\u53ef\u4ee5\u5224\u65ad\u8d26\u53f7\u662f\u5426\u5b58\u5728\u98ce\u9669\u3002
          • \u5b9e\u65f6\u8bb0\u5f55\uff1a\u8fc5\u901f\u6536\u96c6\u64cd\u4f5c\u4e8b\u4ef6\uff0c\u7528\u6237\u64cd\u4f5c\u540e\u53ef\u5728\u5ba1\u8ba1\u65e5\u5fd7\u5217\u8868\u8fdb\u884c\u8ffd\u6eaf\uff0c\u968f\u65f6\u53d1\u73b0\u53ef\u7591\u884c\u4e3a\u3002
          • \u65b9\u4fbf\u53ef\u9760\uff1a\u5ba1\u8ba1\u65e5\u5fd7\u652f\u6301\u624b\u52a8\u6e05\u7406\u548c\u81ea\u52a8\u6e05\u7406\u4e24\u79cd\u65b9\u5f0f\uff0c\u53ef\u6839\u636e\u60a8\u7684\u5b58\u50a8\u5927\u5c0f\u914d\u7f6e\u6e05\u7406\u7b56\u7565\u3002
          "},{"location":"admin/ghippo/audit/audit-log.html#_3","title":"\u67e5\u770b\u5ba1\u8ba1\u65e5\u5fd7","text":"
          1. \u4f7f\u7528\u5177\u6709 admin \u6216 Audit Owner \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002

          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\uff0c\u70b9\u51fb \u5168\u5c40\u7ba1\u7406 -> \u5ba1\u8ba1\u65e5\u5fd7 \u3002

          "},{"location":"admin/ghippo/audit/audit-log.html#_4","title":"\u7528\u6237\u64cd\u4f5c","text":"

          \u5728 \u7528\u6237\u64cd\u4f5c \u9875\u7b7e\u4e2d\uff0c\u53ef\u4ee5\u6309\u65f6\u95f4\u8303\u56f4\uff0c\u4e5f\u53ef\u4ee5\u901a\u8fc7\u6a21\u7cca\u641c\u7d22\u3001\u7cbe\u786e\u641c\u7d22\u6765\u67e5\u627e\u7528\u6237\u64cd\u4f5c\u4e8b\u4ef6\u3002

          \u70b9\u51fb\u67d0\u4e2a\u4e8b\u4ef6\u6700\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u67e5\u770b\u4e8b\u4ef6\u8be6\u60c5\u3002

          \u4e8b\u4ef6\u8be6\u60c5\u5982\u4e0b\u56fe\u6240\u793a\u3002

          \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u5bfc\u51fa \u6309\u94ae\uff0c\u53ef\u4ee5\u6309 CSV \u548c Excel \u683c\u5f0f\u5bfc\u51fa\u5f53\u524d\u6240\u9009\u65f6\u95f4\u8303\u56f4\u5185\u7684\u7528\u6237\u64cd\u4f5c\u65e5\u5fd7\u3002

          "},{"location":"admin/ghippo/audit/audit-log.html#_5","title":"\u7cfb\u7edf\u64cd\u4f5c","text":"

          \u5728 \u7cfb\u7edf\u64cd\u4f5c \u9875\u7b7e\u4e2d\uff0c\u53ef\u4ee5\u6309\u65f6\u95f4\u8303\u56f4\uff0c\u4e5f\u53ef\u4ee5\u901a\u8fc7\u6a21\u7cca\u641c\u7d22\u3001\u7cbe\u786e\u641c\u7d22\u6765\u67e5\u627e\u7cfb\u7edf\u64cd\u4f5c\u4e8b\u4ef6\u3002

          \u540c\u6837\u70b9\u51fb\u67d0\u4e2a\u4e8b\u4ef6\u6700\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u67e5\u770b\u4e8b\u4ef6\u8be6\u60c5\u3002

          \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u5bfc\u51fa \u6309\u94ae\uff0c\u53ef\u4ee5\u6309 CSV \u548c Excel \u683c\u5f0f\u5bfc\u51fa\u5f53\u524d\u6240\u9009\u65f6\u95f4\u8303\u56f4\u5185\u7684\u7cfb\u7edf\u64cd\u4f5c\u65e5\u5fd7\u3002

          "},{"location":"admin/ghippo/audit/audit-log.html#_6","title":"\u8bbe\u7f6e","text":"

          \u5728 \u8bbe\u7f6e \u9875\u7b7e\u4e2d\uff0c\u60a8\u53ef\u4ee5\u6e05\u7406\u7528\u6237\u64cd\u4f5c\u548c\u7cfb\u7edf\u64cd\u4f5c\u7684\u5ba1\u8ba1\u65e5\u5fd7\u3002

          \u53ef\u4ee5\u624b\u52a8\u6e05\u7406\uff0c\u5efa\u8bae\u6e05\u7406\u524d\u5148\u5bfc\u51fa\u5e76\u4fdd\u5b58\u3002\u4e5f\u53ef\u4ee5\u8bbe\u7f6e\u65e5\u5fd7\u7684\u6700\u957f\u4fdd\u5b58\u65f6\u95f4\u5b9e\u73b0\u81ea\u52a8\u6e05\u7406\u3002

          Note

          \u5ba1\u8ba1\u65e5\u5fd7\u4e2d\u4e0e Kubernetes \u76f8\u5173\u7684\u65e5\u5fd7\u8bb0\u5f55\u7531\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u63d0\u4f9b\uff0c\u4e3a\u51cf\u8f7b\u5ba1\u8ba1\u65e5\u5fd7\u7684\u5b58\u50a8\u538b\u529b\uff0c\u5168\u5c40\u7ba1\u7406\u9ed8\u8ba4\u4e0d\u91c7\u96c6 Kubernetes \u76f8\u5173\u65e5\u5fd7\u3002 \u5982\u9700\u8bb0\u5f55\u8bf7\u53c2\u9605\u5f00\u542f K8s \u5ba1\u8ba1\u65e5\u5fd7\u3002\u5f00\u542f\u540e\u7684\u6e05\u7406\u529f\u80fd\u4e0e\u5168\u5c40\u7ba1\u7406\u7684\u6e05\u7406\u529f\u80fd\u4e00\u81f4\uff0c\u4f46\u4e92\u4e0d\u5f71\u54cd\u3002

          "},{"location":"admin/ghippo/audit/open-audit.html","title":"\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7","text":"
          • \u751f\u6210 K8s \u5ba1\u8ba1\u65e5\u5fd7\uff1aK8s \u672c\u8eab\u751f\u6210\u7684\u5ba1\u8ba1\u65e5\u5fd7\uff0c\u5f00\u542f\u8be5\u529f\u80fd\u540e\uff0c\u4f1a\u5728\u6307\u5b9a\u76ee\u5f55\u4e0b\u751f\u6210 K8s \u5ba1\u8ba1\u65e5\u5fd7\u7684\u65e5\u5fd7\u6587\u4ef6
          • \u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\uff1a\u901a\u8fc7 insight-agent \u91c7\u96c6\u4e0a\u8ff0 \u2018K8s \u5ba1\u8ba1\u65e5\u5fd7\u2019\u7684\u65e5\u5fd7\u6587\u4ef6\uff0c\u2019\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u2018 \u7684\u524d\u63d0\u6761\u4ef6\u662f\uff1a
            • \u96c6\u7fa4\u751f\u6210\u4e86 \u2018K8s \u5ba1\u8ba1\u65e5\u5fd7\u2018
            • \u65e5\u5fd7\u8f93\u51fa\u5f00\u5173\u5df2\u6253\u5f00
            • \u65e5\u5fd7\u91c7\u96c6\u5f00\u5173\u5df2\u6253\u5f00
          "},{"location":"admin/ghippo/audit/open-audit.html#ai","title":"\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5b89\u88c5\u5b8c\u6210\u65f6\u72b6\u6001","text":"
          • \u7ba1\u7406\u96c6\u7fa4\u7684 K8s \u5ba1\u8ba1\u65e5\u5fd7\u5f00\u5173\u9ed8\u8ba4\u5f00\u542f
          • \u7ba1\u7406\u96c6\u7fa4\u7684\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u5f00\u5173\u9ed8\u8ba4\u5173\u95ed
            • \u9ed8\u8ba4\u8bbe\u7f6e\u4e0d\u652f\u6301\u914d\u7f6e
          "},{"location":"admin/ghippo/audit/open-audit.html#k8s_1","title":"\u7ba1\u7406\u96c6\u7fa4\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u5f00\u5173","text":""},{"location":"admin/ghippo/audit/open-audit.html#k8s_2","title":"\u786e\u8ba4\u662f\u5426\u5f00\u542f\u4e86 K8s \u5ba1\u8ba1\u65e5\u5fd7","text":"

          \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b /var/log/kubernetes/audit \u76ee\u5f55\u4e0b\u662f\u5426\u6709\u5ba1\u8ba1\u65e5\u5fd7\u751f\u6210\u3002 \u82e5\u6709\uff0c\u5219\u8868\u793a K8s \u5ba1\u8ba1\u65e5\u5fd7\u6210\u529f\u5f00\u542f\u3002

          ls /var/log/kubernetes/audit\n

          \u82e5\u672a\u5f00\u542f\uff0c\u8bf7\u53c2\u8003\u751f\u6210 K8s \u5ba1\u8ba1\u65e5\u5fd7\u3002

          "},{"location":"admin/ghippo/audit/open-audit.html#k8s_3","title":"\u5f00\u542f\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u6d41\u7a0b","text":"
          1. \u6dfb\u52a0 chartmuseum \u5230 helm repo \u4e2d

            helm repo add chartmuseum http://10.5.14.30:8081\n

            \u8fd9\u6761\u547d\u4ee4\u4e2d\u7684 IP \u9700\u8981\u4fee\u6539\u4e3a\u706b\u79cd\u8282\u70b9\u7684 IP \u5730\u5740\u3002

            Note

            \u4f7f\u7528\u81ea\u5efa Harbor \u4ed3\u5e93\u7684\u60c5\u51b5\u4e0b\uff0c\u8bf7\u4fee\u6539\u7b2c\u4e00\u6b65\u4e2d\u7684 chart repo \u5730\u5740\u4e3a\u81ea\u5efa\u4ed3\u5e93\u7684 insight-agent chart \u5730\u5740\u3002

          2. \u4fdd\u5b58\u5f53\u524d insight-agent helm value

            helm get values insight-agent -n insight-system -o yaml > insight-agent-values-bak.yaml\n
          3. \u83b7\u53d6\u5f53\u524d\u7248\u672c\u53f7 ${insight_version_code}

            insight_version_code=`helm list -n insight-system |grep insight-agent | awk {'print $10'}`\n
          4. \u66f4\u65b0 helm value \u914d\u7f6e

            helm upgrade --install --create-namespace --version ${insight_version_code} --cleanup-on-fail insight-agent chartmuseum/insight-agent -n insight-system -f insight-agent-values-bak.yaml --set global.exporters.auditLog.kubeAudit.enabled=true\n
          5. \u91cd\u542f insight-system \u4e0b\u7684\u6240\u6709 fluentBit pod

            fluent_pod=`kubectl get pod -n insight-system | grep insight-agent-fluent-bit | awk {'print $1'} | xargs`\nkubectl delete pod ${fluent_pod} -n insight-system\n
          "},{"location":"admin/ghippo/audit/open-audit.html#k8s_4","title":"\u5173\u95ed\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7","text":"

          \u5176\u4f59\u6b65\u9aa4\u548c\u5f00\u542f\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u4e00\u81f4\uff0c\u4ec5\u9700\u4fee\u6539\u4e0a\u4e00\u8282\u4e2d\u7b2c 4 \u6b65\uff1a\u66f4\u65b0 helm value \u914d\u7f6e\u3002

          helm upgrade --install --create-namespace --version ${insight_version_code} --cleanup-on-fail insight-agent chartmuseum/insight-agent -n insight-system -f insight-agent-values-bak.yaml --set global.exporters.auditLog.kubeAudit.enabled=false\n
          "},{"location":"admin/ghippo/audit/open-audit.html#_1","title":"\u5de5\u4f5c\u96c6\u7fa4\u5f00\u5173","text":"

          \u5404\u5de5\u4f5c\u96c6\u7fa4\u5f00\u5173\u72ec\u7acb\uff0c\u6309\u9700\u5f00\u542f\u3002

          "},{"location":"admin/ghippo/audit/open-audit.html#_2","title":"\u521b\u5efa\u96c6\u7fa4\u65f6\u6253\u5f00\u91c7\u96c6\u5ba1\u8ba1\u65e5\u5fd7\u6b65\u9aa4","text":"

          \u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u529f\u80fd\u9ed8\u8ba4\u4e3a\u5173\u95ed\u72b6\u6001\u3002\u82e5\u9700\u8981\u5f00\u542f\uff0c\u53ef\u4ee5\u6309\u7167\u5982\u4e0b\u6b65\u9aa4\uff1a

          \u5c06\u8be5\u6309\u94ae\u8bbe\u7f6e\u4e3a\u542f\u7528\u72b6\u6001\uff0c\u5f00\u542f\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u529f\u80fd\u3002

          \u901a\u8fc7\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u65f6\uff0c\u786e\u8ba4\u8be5\u96c6\u7fa4\u7684 K8s \u5ba1\u8ba1\u65e5\u5fd7\u9009\u62e9 \u2018true'\uff0c\u8fd9\u6837\u521b\u5efa\u51fa\u6765\u7684\u5de5\u4f5c\u96c6\u7fa4 K8s \u5ba1\u8ba1\u65e5\u5fd7\u662f\u5f00\u542f\u7684\u3002

          \u7b49\u5f85\u96c6\u7fa4\u521b\u5efa\u6210\u529f\u540e\uff0c\u8be5\u5de5\u4f5c\u96c6\u7fa4\u7684 K8s \u5ba1\u8ba1\u65e5\u5fd7\u5c06\u88ab\u91c7\u96c6\u3002

          "},{"location":"admin/ghippo/audit/open-audit.html#_3","title":"\u63a5\u5165\u7684\u96c6\u7fa4\u548c\u521b\u5efa\u5b8c\u6210\u540e\u5f00\u5173\u6b65\u9aa4","text":""},{"location":"admin/ghippo/audit/open-audit.html#k8s_5","title":"\u786e\u8ba4\u5f00\u542f K8s \u5ba1\u8ba1\u65e5\u5fd7","text":"

          \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b /var/log/kubernetes/audit \u76ee\u5f55\u4e0b\u662f\u5426\u6709\u5ba1\u8ba1\u65e5\u5fd7\u751f\u6210\uff0c\u82e5\u6709\uff0c\u5219\u8868\u793a K8s \u5ba1\u8ba1\u65e5\u5fd7\u6210\u529f\u5f00\u542f\u3002

          ls /var/log/kubernetes/audit\n

          \u82e5\u672a\u5f00\u542f\uff0c\u8bf7\u53c2\u8003\u6587\u6863\u7684\u5f00\u542f\u5173\u95ed K8s \u5ba1\u8ba1\u65e5\u5fd7

          "},{"location":"admin/ghippo/audit/open-audit.html#k8s_6","title":"\u5f00\u542f\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7","text":"

          \u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u529f\u80fd\u9ed8\u8ba4\u4e3a\u5173\u95ed\u72b6\u6001\uff0c\u82e5\u9700\u8981\u5f00\u542f\uff0c\u53ef\u4ee5\u6309\u7167\u5982\u4e0b\u6b65\u9aa4\uff1a

          1. \u9009\u4e2d\u5df2\u63a5\u5165\u5e76\u4e14\u9700\u8981\u5f00\u542f\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u529f\u80fd\u7684\u96c6\u7fa4

          2. \u8fdb\u5165 helm \u5e94\u7528\u7ba1\u7406\u9875\u9762\uff0c\u66f4\u65b0 insight-agent \u914d\u7f6e \uff08\u82e5\u672a\u5b89\u88c5 insight-agent\uff0c\u53ef\u4ee5\u5b89\u88c5 insight-agent\uff09

          3. \u5f00\u542f/\u5173\u95ed\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u6309\u94ae

          4. \u63a5\u5165\u96c6\u7fa4\u7684\u60c5\u51b5\u4e0b\u5f00\u5173\u540e\u4ecd\u9700\u8981\u91cd\u542f fluent-bit pod \u624d\u80fd\u751f\u6548

          "},{"location":"admin/ghippo/audit/open-k8s-audit.html","title":"\u751f\u6210 K8s \u5ba1\u8ba1\u65e5\u5fd7","text":"

          \u9ed8\u8ba4 Kubernetes \u96c6\u7fa4\u4e0d\u4f1a\u751f\u6210\u5ba1\u8ba1\u65e5\u5fd7\u4fe1\u606f\u3002\u901a\u8fc7\u4ee5\u4e0b\u914d\u7f6e\uff0c\u53ef\u4ee5\u5f00\u542f Kubernetes \u7684\u5ba1\u8ba1\u65e5\u5fd7\u529f\u80fd\u3002

          Note

          \u516c\u6709\u4e91\u73af\u5883\u4e2d\u53ef\u80fd\u65e0\u6cd5\u63a7\u5236 Kubernetes \u5ba1\u8ba1\u65e5\u5fd7\u8f93\u51fa\u53ca\u8f93\u51fa\u8def\u5f84\u3002

          1. \u51c6\u5907\u5ba1\u8ba1\u65e5\u5fd7\u7684 Policy \u6587\u4ef6
          2. \u914d\u7f6e API \u670d\u52a1\u5668\uff0c\u5f00\u542f\u5ba1\u8ba1\u65e5\u5fd7
          3. \u91cd\u542f\u5e76\u9a8c\u8bc1
          "},{"location":"admin/ghippo/audit/open-k8s-audit.html#policy","title":"\u51c6\u5907\u5ba1\u8ba1\u65e5\u5fd7 Policy \u6587\u4ef6","text":"\u70b9\u51fb\u67e5\u770b\u5ba1\u8ba1\u65e5\u5fd7 Policy YAML \u6587\u4ef6 policy.yaml
          apiVersion: audit.k8s.io/v1\nkind: Policy\nrules:\n# The following requests were manually identified as high-volume and low-risk,\n# so drop them.\n- level: None\n  users: [\"system:kube-proxy\"]\n  verbs: [\"watch\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"endpoints\", \"services\", \"services/status\"]\n- level: None\n  # Ingress controller reads `configmaps/ingress-uid` through the unsecured port.\n  # TODO(#46983): Change this to the ingress controller service account.\n  users: [\"system:unsecured\"]\n  namespaces: [\"kube-system\"]\n  verbs: [\"get\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"configmaps\"]\n- level: None\n  users: [\"kubelet\"] # legacy kubelet identity\n  verbs: [\"get\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"nodes\", \"nodes/status\"]\n- level: None\n  userGroups: [\"system:nodes\"]\n  verbs: [\"get\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"nodes\", \"nodes/status\"]\n- level: None\n  users:\n   - system:kube-controller-manager\n   - system:kube-scheduler\n   - system:serviceaccount:kube-system:endpoint-controller\n     verbs: [\"get\", \"update\"]\n     namespaces: [\"kube-system\"]\n     resources:\n   - group: \"\" # core\n     resources: [\"endpoints\"]\n- level: None\n  users: [\"system:apiserver\"]\n  verbs: [\"get\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"namespaces\", \"namespaces/status\", \"namespaces/finalize\"]\n# Don't log HPA fetching metrics.\n- level: None\n  users:\n   - system:kube-controller-manager\n     verbs: [\"get\", \"list\"]\n     resources:\n   - group: \"metrics.k8s.io\"\n# Don't log these read-only URLs.\n- level: None\n  nonResourceURLs:\n   - /healthz*\n   - /version\n   - /swagger*\n# Don't log events requests.\n- level: None\n  resources:\n   - group: \"\" # core\n     resources: [\"events\"]\n# Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,\n# so only log at the Metadata level.\n- level: Metadata\n  resources:\n   - group: \"\" # core\n     resources: [\"secrets\", \"configmaps\", \"serviceaccounts/token\"]\n   - group: authentication.k8s.io\n     resources: [\"tokenreviews\"]\n     omitStages:\n   - \"RequestReceived\"\n# Get responses can be large; skip them.\n- level: Request\n  verbs: [\"get\", \"list\", \"watch\"]\n  resources:\n   - group: \"\" # core\n   - group: \"admissionregistration.k8s.io\"\n   - group: \"apiextensions.k8s.io\"\n   - group: \"apiregistration.k8s.io\"\n   - group: \"apps\"\n   - group: \"authentication.k8s.io\"\n   - group: \"authorization.k8s.io\"\n   - group: \"autoscaling\"\n   - group: \"batch\"\n   - group: \"certificates.k8s.io\"\n   - group: \"extensions\"\n   - group: \"metrics.k8s.io\"\n   - group: \"networking.k8s.io\"\n   - group: \"policy\"\n   - group: \"rbac.authorization.k8s.io\"\n   - group: \"settings.k8s.io\"\n   - group: \"storage.k8s.io\"\n     omitStages:\n   - \"RequestReceived\"\n# Default level for known APIs\n- level: RequestResponse\n  resources:\n   - group: \"\" # core\n   - group: \"admissionregistration.k8s.io\"\n   - group: \"apiextensions.k8s.io\"\n   - group: \"apiregistration.k8s.io\"\n   - group: \"apps\"\n   - group: \"authentication.k8s.io\"\n   - group: \"authorization.k8s.io\"\n   - group: \"autoscaling\"\n   - group: \"batch\"\n   - group: \"certificates.k8s.io\"\n   - group: \"extensions\"\n   - group: \"metrics.k8s.io\"\n   - group: \"networking.k8s.io\"\n   - group: \"policy\"\n   - group: \"rbac.authorization.k8s.io\"\n   - group: \"settings.k8s.io\"\n   - group: \"storage.k8s.io\"\n     omitStages:\n   - \"RequestReceived\"\n# Default level for all other requests.\n- level: Metadata\n  omitStages:\n   - \"RequestReceived\"\n

          \u5c06\u4ee5\u4e0a\u5ba1\u8ba1\u65e5\u5fd7\u6587\u4ef6\u653e\u5230 /etc/kubernetes/audit-policy/ \u6587\u4ef6\u5939\u4e0b\uff0c\u5e76\u53d6\u540d\u4e3a apiserver-audit-policy.yaml \u3002

          "},{"location":"admin/ghippo/audit/open-k8s-audit.html#api","title":"\u914d\u7f6e API \u670d\u52a1\u5668","text":"

          \u6253\u5f00 API \u670d\u52a1\u5668\u7684\u914d\u7f6e\u6587\u4ef6 kube-apiserver.yaml \uff0c\u4e00\u822c\u4f1a\u5728 /etc/kubernetes/manifests/ \u6587\u4ef6\u5939\u4e0b\uff0c\u5e76\u6dfb\u52a0\u4ee5\u4e0b\u914d\u7f6e\u4fe1\u606f\uff1a

          \u8fd9\u4e00\u6b65\u64cd\u4f5c\u524d\u8bf7\u5907\u4efd kube-apiserver.yaml \uff0c\u5e76\u4e14\u5907\u4efd\u7684\u6587\u4ef6\u4e0d\u80fd\u653e\u5728 /etc/kubernetes/manifests/ \u4e0b\uff0c\u5efa\u8bae\u653e\u5728 /etc/kubernetes/tmp \u3002

          1. \u5728 spec.containers.command \u4e0b\u6dfb\u52a0\u547d\u4ee4\uff1a

            --audit-log-maxage=30\n--audit-log-maxbackup=10\n--audit-log-maxsize=100\n--audit-log-path=/var/log/audit/kube-apiserver-audit.log\n--audit-policy-file=/etc/kubernetes/audit-policy/apiserver-audit-policy.yaml\n
          2. \u5728 spec.containers.volumeMounts \u4e0b\u6dfb\u52a0\uff1a

            - mountPath: /var/log/audit\n  name: audit-logs\n- mountPath: /etc/kubernetes/audit-policy\n  name: audit-policy\n
          3. \u5728 spec.volumes \u4e0b\u6dfb\u52a0\uff1a

            - hostPath:\n  path: /var/log/kubernetes/audit\n  type: \"\"\n  name: audit-logs\n- hostPath:\n  path: /etc/kubernetes/audit-policy\n  type: \"\"\n  name: audit-policy\n
          "},{"location":"admin/ghippo/audit/open-k8s-audit.html#_1","title":"\u6d4b\u8bd5\u5e76\u9a8c\u8bc1","text":"

          \u7a0d\u7b49\u4e00\u4f1a\uff0cAPI \u670d\u52a1\u5668\u4f1a\u81ea\u52a8\u91cd\u542f\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b /var/log/kubernetes/audit \u76ee\u5f55\u4e0b\u662f\u5426\u6709\u5ba1\u8ba1\u65e5\u5fd7\u751f\u6210\uff0c\u82e5\u6709\uff0c\u5219\u8868\u793a K8s \u5ba1\u8ba1\u65e5\u5fd7\u6210\u529f\u5f00\u542f\u3002

          ls /var/log/kubernetes/audit\n

          \u5982\u679c\u60f3\u5173\u95ed\uff0c\u53bb\u6389 spec.containers.command \u4e2d\u7684\u76f8\u5173\u547d\u4ee4\u5373\u53ef\u3002

          "},{"location":"admin/ghippo/audit/source-ip.html","title":"\u5ba1\u8ba1\u65e5\u5fd7\u83b7\u53d6\u6e90 IP","text":"

          \u5ba1\u8ba1\u65e5\u5fd7\u6e90 IP \u5728\u7cfb\u7edf\u548c\u7f51\u7edc\u7ba1\u7406\u4e2d\u626e\u6f14\u7740\u5173\u952e\u89d2\u8272\uff0c\u5b83\u6709\u52a9\u4e8e\u8ffd\u8e2a\u6d3b\u52a8\u3001\u7ef4\u62a4\u5b89\u5168\u3001\u89e3\u51b3\u95ee\u9898\u5e76\u786e\u4fdd\u7cfb\u7edf\u5408\u89c4\u6027\u3002 \u4f46\u662f\u83b7\u53d6\u6e90 IP \u4f1a\u5e26\u6765\u4e00\u5b9a\u7684\u6027\u80fd\u635f\u8017\uff0c\u6240\u4ee5\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u5ba1\u8ba1\u65e5\u5fd7\u5e76\u4e0d\u603b\u662f\u5f00\u542f\u7684\uff0c \u5728\u4e0d\u540c\u7684\u5b89\u88c5\u6a21\u5f0f\u4e0b\uff0c\u5ba1\u8ba1\u65e5\u5fd7\u6e90 IP \u7684\u9ed8\u8ba4\u5f00\u542f\u60c5\u51b5\u4e0d\u540c\uff0c\u5e76\u4e14\u5f00\u542f\u7684\u65b9\u5f0f\u4e0d\u540c\u3002 \u4e0b\u9762\u4f1a\u6839\u636e\u5b89\u88c5\u6a21\u5f0f\u5206\u522b\u4ecb\u7ecd\u5ba1\u8ba1\u65e5\u5fd7\u6e90 IP \u7684\u9ed8\u8ba4\u5f00\u542f\u60c5\u51b5\u4ee5\u53ca\u5982\u4f55\u5f00\u542f\u3002

          Note

          \u5f00\u542f\u5ba1\u8ba1\u65e5\u5fd7\u4f1a\u4fee\u6539 istio-ingressgateway \u7684\u526f\u672c\u6570\uff0c\u5e26\u6765\u4e00\u5b9a\u7684\u6027\u80fd\u635f\u8017\u3002 \u5f00\u542f\u5ba1\u8ba1\u65e5\u5fd7\u9700\u8981\u5173\u95ed kube-proxy \u7684\u8d1f\u8f7d\u5747\u8861\u4ee5\u53ca\u62d3\u6251\u611f\u77e5\u8def\u7531\uff0c\u4f1a\u5bf9\u96c6\u7fa4\u6027\u80fd\u4ea7\u751f\u4e00\u5b9a\u7684\u5f71\u54cd\u3002 \u5f00\u542f\u5ba1\u8ba1\u65e5\u5fd7\u540e\uff0c\u8bbf\u95eeIP\u6240\u5bf9\u5e94\u7684\u8282\u70b9\u4e0a\u5fc5\u987b\u4fdd\u8bc1\u5b58\u5728 istio-ingressgateway \uff0c\u82e5\u56e0\u4e3a\u8282\u70b9\u5065\u5eb7\u6216\u5176\u4ed6\u95ee\u9898\u5bfc\u81f4 istio-ingressgateway \u53d1\u751f\u6f02\u79fb\uff0c\u9700\u8981\u624b\u52a8\u8c03\u5ea6\u56de\u8be5\u8282\u70b9\uff0c\u5426\u5219\u4f1a\u5f71\u54cd\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u6b63\u5e38\u4f7f\u7528\u3002

          "},{"location":"admin/ghippo/audit/source-ip.html#_1","title":"\u5224\u65ad\u5b89\u88c5\u6a21\u5f0f\u7684\u65b9\u6cd5","text":"
          kubectl get pod -n metallb-system\n

          \u5728\u96c6\u7fa4\u4e2d\u6267\u884c\u4e0a\u9762\u7684\u547d\u4ee4\uff0c\u82e5\u8fd4\u56de\u7ed3\u679c\u5982\u4e0b\uff0c\u5219\u8868\u793a\u8be5\u96c6\u7fa4\u4e3a\u975e MetalLB \u5b89\u88c5\u6a21\u5f0f

          No resources found in metallbs-system namespace.\n
          "},{"location":"admin/ghippo/audit/source-ip.html#nodeport","title":"NodePort \u5b89\u88c5\u6a21\u5f0f","text":"

          \u8be5\u6a21\u5f0f\u5b89\u88c5\u4e0b\uff0c\u5ba1\u8ba1\u65e5\u5fd7\u6e90 IP \u9ed8\u8ba4\u662f\u5173\u95ed\u7684\uff0c\u5f00\u542f\u6b65\u9aa4\u5982\u4e0b\uff1a

          1. \u8bbe\u7f6e istio-ingressgateway \u7684 HPA \u7684\u6700\u5c0f\u526f\u672c\u6570\u4e3a\u63a7\u5236\u9762\u8282\u70b9\u6570

            count=$(kubectl get nodes --selector=node-role.kubernetes.io/control-plane | wc -l)\ncount=$((count-1))\n\nkubectl patch hpa istio-ingressgateway -n istio-system -p '{\"spec\":{\"minReplicas\":'$count'}}'\n
          2. \u4fee\u6539 istio-ingressgateway \u7684 service \u7684 externalTrafficPolicy \u548c internalTrafficPolicy \u503c\u4e3a \"Local\"

            kubectl patch svc istio-ingressgateway -n istio-system -p '{\"spec\":{\"externalTrafficPolicy\":\"Local\",\"internalTrafficPolicy\":\"Local\"}}'\n
          "},{"location":"admin/ghippo/audit/source-ip.html#metallb","title":"MetalLB \u5b89\u88c5\u6a21\u5f0f","text":"

          \u8be5\u6a21\u5f0f\u4e0b\u5b89\u88c5\u5b8c\u6210\u540e\uff0c\u4f1a\u9ed8\u8ba4\u83b7\u53d6\u5ba1\u8ba1\u65e5\u5fd7\u6e90 IP\u3002

          "},{"location":"admin/ghippo/audit/gproduct-audit/ghippo.html","title":"\u5168\u5c40\u7ba1\u7406\u5ba1\u8ba1\u9879\u6c47\u603b","text":"\u4e8b\u4ef6\u540d\u79f0 \u8d44\u6e90\u7c7b\u578b \u5907\u6ce8 \u4fee\u6539\u7528\u6237email\uff1aUpdateEmail-Account Account \u4fee\u6539\u7528\u6237\u5bc6\u7801\uff1aUpdatePassword-Account Account \u521b\u5efask\uff1aCreateAccessKeys-Account Account \u4fee\u6539sk\uff1aUpdateAccessKeys-Account Account \u5220\u9664sk\uff1aDeleteAccessKeys-Account Account \u521b\u5efa\u7528\u6237\uff1aCreate-User User \u5220\u9664\u7528\u6237\uff1aDelete-User User \u66f4\u65b0\u7528\u6237\u4fe1\u606f\uff1aUpdate-User User \u66f4\u65b0\u7528\u6237\u89d2\u8272\uff1a UpdateRoles-User User \u8bbe\u7f6e\u7528\u6237\u5bc6\u7801\uff1a UpdatePassword-User User \u521b\u5efa\u7528\u6237\u5bc6\u94a5\uff1a CreateAccessKeys-User User \u66f4\u65b0\u7528\u6237\u5bc6\u94a5\uff1a UpdateAccessKeys-User User \u5220\u9664\u7528\u6237\u5bc6\u94a5\uff1aDeleteAccessKeys-User User \u521b\u5efa\u7528\u6237\u7ec4\uff1aCreate-Group Group \u5220\u9664\u7528\u6237\u7ec4\uff1aDelete-Group Group \u66f4\u65b0\u7528\u6237\u7ec4\uff1aUpdate-Group Group \u6dfb\u52a0\u7528\u6237\u81f3\u7528\u6237\u7ec4\uff1aAddUserTo-Group Group \u4ece\u7528\u6237\u7ec4\u5220\u9664\u7528\u6237\uff1a RemoveUserFrom-Group Group \u66f4\u65b0\u7528\u6237\u7ec4\u89d2\u8272\uff1a UpdateRoles-Group Group \u89d2\u8272\u5173\u8054\u7528\u6237\uff1aUpdateRoles-User User \u521b\u5efaLdap \uff1aCreate-LADP LADP \u66f4\u65b0Ldap\uff1aUpdate-LADP LADP \u5220\u9664Ldap \uff1a Delete-LADP LADP OIDC\u6ca1\u6709\u8d70APIserver\u5ba1\u8ba1\u4e0d\u5230 \u767b\u5f55\uff1aLogin-User User \u767b\u51fa\uff1aLogout-User User \u8bbe\u7f6e\u5bc6\u7801\u7b56\u7565\uff1aUpdatePassword-SecurityPolicy SecurityPolicy \u8bbe\u7f6e\u4f1a\u8bdd\u8d85\u65f6\uff1aUpdateSessionTimeout-SecurityPolicy SecurityPolicy \u8bbe\u7f6e\u8d26\u53f7\u9501\u5b9a\uff1aUpdateAccountLockout-SecurityPolicy SecurityPolicy \u8bbe\u7f6e\u81ea\u52a8\u767b\u51fa\uff1aUpdateLogout-SecurityPolicy SecurityPolicy \u90ae\u4ef6\u670d\u52a1\u5668\u8bbe\u7f6e MailServer-SecurityPolicy SecurityPolicy \u5916\u89c2\u5b9a\u5236 CustomAppearance-SecurityPolicy SecurityPolicy \u6b63\u7248\u6388\u6743 OfficialAuthz-SecurityPolicy SecurityPolicy \u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4\uff1aCreate-Workspace Workspace \u5220\u9664\u5de5\u4f5c\u7a7a\u95f4\uff1aDelete-Workspace Workspace \u7ed1\u5b9a\u8d44\u6e90\uff1aBindResourceTo-Workspace Workspace \u89e3\u7ed1\u8d44\u6e90\uff1aUnBindResource-Workspace Workspace \u7ed1\u5b9a\u5171\u4eab\u8d44\u6e90\uff1aBindShared-Workspace Workspace \u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff1aSetQuota-Workspace Workspace \u5de5\u4f5c\u7a7a\u95f4\u6388\u6743\uff1aAuthorize-Workspace Workspace \u5220\u9664\u6388\u6743 DeAuthorize-Workspace Workspace \u7f16\u8f91\u6388\u6743 UpdateDeAuthorize-Workspace Workspace \u66f4\u65b0\u5de5\u4f5c\u7a7a\u95f4 Update-Workspace Workspace \u521b\u5efa\u6587\u4ef6\u5939\uff1aCreate-Folder Folder \u5220\u9664\u6587\u4ef6\u5939\uff1aDelete-Folder Folder \u7f16\u8f91\u6587\u4ef6\u5939\u6388\u6743\uff1aUpdateAuthorize-Folder Folder \u66f4\u65b0\u6587\u4ef6\u5939\uff1aUpdate-Folder Folder \u65b0\u589e\u6587\u4ef6\u5939\u6388\u6743\uff1aAuthorize-Folder Folder \u5220\u9664\u6587\u4ef6\u5939\u6388\u6743\uff1aDeAuthorize-Folder Folder \u8bbe\u7f6e\u5ba1\u8ba1\u65e5\u5fd7\u81ea\u52a8\u6e05\u7406\uff1aAutoCleanup-Audit Audit \u624b\u52a8\u6e05\u7406\u5ba1\u8ba1\u65e5\u5fd7\uff1aManualCleanup-Audit Audit \u5bfc\u51fa\u5ba1\u8ba1\u65e5\u5fd7\uff1aExport-Audit Audit"},{"location":"admin/ghippo/audit/gproduct-audit/insight.html","title":"\u53ef\u89c2\u6d4b\u6027\u5ba1\u8ba1\u9879\u6c47\u603b","text":"\u4e8b\u4ef6\u540d\u79f0 \u8d44\u6e90\u7c7b\u578b \u5907\u6ce8 \u521b\u5efa\u62e8\u6d4b\u4efb\u52a1\uff1aCreate-ProbeJob ProbeJob \u7f16\u8f91\u62e8\u6d4b\u4efb\u52a1\uff1aUpdate-ProbeJob ProbeJob \u5220\u9664\u62e8\u6d4b\u4efb\u52a1\uff1aDelete-ProbeJob ProbeJob \u521b\u5efa\u544a\u8b66\u7b56\u7565\uff1aCreate-AlertPolicy AlertPolicy \u7f16\u8f91\u544a\u8b66\u7b56\u7565\uff1aUpdate-AlertPolicy AlertPolicy \u5220\u9664\u544a\u8b66\u7b56\u7565\uff1aDelete-AlertPolicy AlertPolicy \u5bfc\u5165\u544a\u8b66\u7b56\u7565\uff1aImport-AlertPolicy AlertPolicy \u5728\u544a\u8b66\u7b56\u7565\u4e2d\u6dfb\u52a0\u89c4\u5219\uff1aCreate-AlertRule AlertRule \u5728\u544a\u8b66\u7b56\u7565\u4e2d\u7f16\u8f91\u89c4\u5219\uff1aUpdate-AlertRule AlertRule \u5728\u544a\u8b66\u7b56\u7565\u4e2d\u5220\u9664\u89c4\u5219\uff1aDelete-AlertRule AlertRule \u521b\u5efa\u544a\u8b66\u6a21\u677f\uff1aCreate-RuleTemplate RuleTemplate \u7f16\u8f91\u544a\u8b66\u6a21\u677f\uff1aUpdate-RuleTemplate RuleTemplate \u5220\u9664\u544a\u8b66\u6a21\u677f\uff1aDelete-RuleTemplate RuleTemplate \u521b\u5efa\u90ae\u7bb1\u7ec4\uff1aCreate-email email \u7f16\u8f91\u90ae\u7bb1\u7ec4\uff1aUpdate-email email \u5220\u9664\u90ae\u7bb1\u7ec4\uff1aDelete-Receiver Receiver \u521b\u5efa\u9489\u9489\u673a\u5668\u4eba\uff1aCreate-dingtalk dingtalk \u7f16\u8f91\u9489\u9489\u673a\u5668\u4eba\uff1aUpdate-dingtalk dingtalk \u5220\u9664\u9489\u9489\u673a\u5668\u4eba\uff1aDelete-Receiver Receiver \u521b\u5efa\u4f01\u5fae\u673a\u5668\u4eba\uff1aCreate-wecom wecom \u7f16\u8f91\u4f01\u5fae\u673a\u5668\u4eba\uff1aUpdate-wecom wecom \u5220\u9664\u4f01\u5fae\u673a\u5668\u4eba\uff1aDelete-Receiver Receiver \u521b\u5efa Webhook\uff1aCreate-webhook webhook \u7f16\u8f91 Webhook\uff1aUpdate-webhook webhook \u5220\u9664 Webhook\uff1aDelete-Receiver Receiver \u521b\u5efa SMS\uff1aCreate-sms sms \u7f16\u8f91 SMS\uff1aUpdate-sms sms \u5220\u9664 SMS\uff1aDelete-Receiver Receiver \u521b\u5efa SMS \u670d\u52a1\u5668\uff1aCreate-aliyun(\u6216\u8005\uff1atencent\uff0ccustom) aliyun, tencent, custom \u7f16\u8f91 SMS \u670d\u52a1\u5668\uff1aUpdate-aliyun(\u6216\u8005\uff1atencent\uff0ccustom) aliyun, tencent, custom \u5220\u9664 SMS \u670d\u52a1\u5668\uff1aDelete-SMSserver SMSserver \u521b\u5efa\u6d88\u606f\u6a21\u677f\uff1aCreate-MessageTemplate MessageTemplate \u7f16\u8f91\u6d88\u606f\u6a21\u677f\uff1aUpdate-MessageTemplate MessageTemplate \u5220\u9664\u6d88\u606f\u6a21\u677f\uff1aDelete-MessageTemplate MessageTemplate \u521b\u5efa\u544a\u8b66\u9759\u9ed8\uff1aCreate-AlertSilence AlertSilence \u7f16\u8f91\u544a\u8b66\u9759\u9ed8\uff1aUpdate-AlertSilence AlertSilence \u5220\u9664\u544a\u8b66\u9759\u9ed8\uff1aDelete-AlertSilence AlertSilence \u521b\u5efa\u544a\u8b66\u6291\u5236\u89c4\u5219\uff1aCreate-AlertInhibition AlertInhibition \u7f16\u8f91\u544a\u8b66\u6291\u5236\u89c4\u5219\uff1aUpdate-AlertInhibition AlertInhibition \u5220\u9664\u544a\u8b66\u6291\u5236\u89c4\u5219\uff1aDelete-AlertInhibition AlertInhibition \u66f4\u65b0\u7cfb\u7edf\u914d\u7f6e\uff1aUpdate-SystemSettings SystemSettings"},{"location":"admin/ghippo/audit/gproduct-audit/kpanda.html","title":"\u5bb9\u5668\u7ba1\u7406\u5ba1\u8ba1\u9879\u6c47\u603b","text":"\u4e8b\u4ef6\u540d\u79f0 \u8d44\u6e90\u7c7b\u578b \u521b\u5efa\u96c6\u7fa4\uff1aCreate-Cluster Cluster \u5378\u8f7d\u96c6\u7fa4\uff1aDelete-Cluster Cluster \u63a5\u5165\u96c6\u7fa4\uff1aIntegrate-Cluster Cluster \u89e3\u9664\u63a5\u5165\u7684\u96c6\u7fa4\uff1aRemove-Cluster Cluster \u96c6\u7fa4\u5347\u7ea7\uff1aUpgrade-Cluster Cluster \u96c6\u7fa4\u63a5\u5165\u8282\u70b9\uff1aIntegrate-Node Node \u96c6\u7fa4\u8282\u70b9\u79fb\u9664\uff1aRemove-Node Node \u96c6\u7fa4\u8282\u70b9 GPU \u6a21\u5f0f\u5207\u6362\uff1aUpdate-NodeGPUMode NodeGPUMode helm\u4ed3\u5e93\u521b\u5efa\uff1aCreate-HelmRepo HelmRepo helm\u5e94\u7528\u90e8\u7f72\uff1aCreate-HelmApp HelmApp helm\u5e94\u7528\u5220\u9664\uff1aDelete-HelmApp HelmApp \u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\uff1aCreate-Deployment Deployment \u5220\u9664\u65e0\u72b6\u6001\u8d1f\u8f7d\uff1aDelete-Deployment Deployment \u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b\uff1aCreate-DaemonSet DaemonSet \u5220\u9664\u5b88\u62a4\u8fdb\u7a0b\uff1aDelete-DaemonSet DaemonSet \u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\uff1aCreate-StatefulSet StatefulSet \u5220\u9664\u6709\u72b6\u6001\u8d1f\u8f7d\uff1aDelete-StatefulSet StatefulSet \u521b\u5efa\u4efb\u52a1\uff1aCreate-Job Job \u5220\u9664\u4efb\u52a1\uff1aDelete-Job Job \u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\uff1aCreate-CronJob CronJob \u5220\u9664\u5b9a\u65f6\u4efb\u52a1\uff1aDelete-CronJob CronJob \u5220\u9664\u5bb9\u5668\u7ec4\uff1aDelete-Pod Pod \u521b\u5efa\u670d\u52a1\uff1aCreate-Service Service \u5220\u9664\u670d\u52a1\uff1aDelete-Service Service \u521b\u5efa\u8def\u7531\uff1aCreate-Ingress Ingress \u5220\u9664\u8def\u7531\uff1aDelete-Ingress Ingress \u521b\u5efa\u5b58\u50a8\u6c60\uff1aCreate-StorageClass StorageClass \u5220\u9664\u5b58\u50a8\u6c60\uff1aDelete-StorageClass StorageClass \u521b\u5efa\u6570\u636e\u5377\uff1aCreate-PersistentVolume PersistentVolume \u5220\u9664\u6570\u636e\u5377\uff1aDelete-PersistentVolume PersistentVolume \u521b\u5efa\u6570\u636e\u5377\u58f0\u660e\uff1aCreate-PersistentVolumeClaim PersistentVolumeClaim \u5220\u9664\u6570\u636e\u5377\u58f0\u660e\uff1aDelete-PersistentVolumeClaim PersistentVolumeClaim \u5220\u9664\u526f\u672c\u96c6\uff1aDelete-ReplicaSet ReplicaSet ns\u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4\uff1aBindResourceTo-Workspace Workspace ns\u89e3\u7ed1\u5de5\u4f5c\u7a7a\u95f4 \uff1aUnBindResource-Workspace Workspace \u96c6\u7fa4\u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4\uff1aBindResourceTo-Workspace Workspace \u96c6\u7fa4\u89e3\u7ed1\u5de5\u4f5c\u7a7a\u95f4\uff1aUnBindResource-Workspace Workspace \u6253\u5f00\u63a7\u5236\u53f0\uff1aCreate-CloudShell CloudShell \u5173\u95ed\u63a7\u5236\u53f0\uff1aDelete-CloudShell CloudShell"},{"location":"admin/ghippo/audit/gproduct-audit/virtnest.html","title":"\u4e91\u4e3b\u673a\u5ba1\u8ba1\u9879\u6c47\u603b","text":"\u4e8b\u4ef6\u540d\u79f0 \u8d44\u6e90\u7c7b\u578b \u5907\u6ce8 \u91cd\u542f\u4e91\u4e3b\u673a\uff1aRestart-VMs VM \u4e91\u4e3b\u673a\u8f6c\u6362\u4e3a\u6a21\u677f\uff1aConvertToTemplate-VMs VM \u7f16\u8f91\u4e91\u4e3b\u673a\uff1aEdit-VMs VM \u66f4\u65b0\u4e91\u4e3b\u673a\uff1aUpdate-VMs VM \u5feb\u7167\u6062\u590d\uff1aRestore-VMs VM \u5f00\u673a\u4e91\u4e3b\u673a\uff1aPower on-VMs VM \u5b9e\u65f6\u8fc1\u79fb\uff1aLiveMigrate-VMs VM \u5220\u9664\u4e91\u4e3b\u673a\uff1aDelete-VMs VM \u5220\u9664\u4e91\u4e3b\u673a\u6a21\u677f\uff1aDelete-VM Template VM Template \u521b\u5efa\u4e91\u4e3b\u673a\uff1aCreate-VMs VM \u521b\u5efa\u5feb\u7167\uff1aCreateSnapshot-VMs VM \u5173\u673a\u4e91\u4e3b\u673a\uff1aPower off-VMs VM \u514b\u9686\u4e91\u4e3b\u673a\uff1aClone-VMs VM"},{"location":"admin/ghippo/best-practice/authz-plan.html","title":"\u666e\u901a\u7528\u6237\u6388\u6743\u89c4\u5212","text":"

          \u666e\u901a\u7528\u6237\u662f\u6307\u80fd\u591f\u4f7f\u7528 AI \u7b97\u529b\u4e2d\u5fc3\u5927\u90e8\u5206\u4ea7\u54c1\u6a21\u5757\u53ca\u529f\u80fd\uff08\u7ba1\u7406\u529f\u80fd\u9664\u5916\uff09\uff0c\u5bf9\u6743\u9650\u8303\u56f4\u5185\u7684\u8d44\u6e90\u6709\u4e00\u5b9a\u7684\u64cd\u4f5c\u6743\u9650\uff0c\u80fd\u591f\u72ec\u7acb\u4f7f\u7528\u8d44\u6e90\u90e8\u7f72\u5e94\u7528\u3002

          \u5bf9\u8fd9\u7c7b\u7528\u6237\u7684\u6388\u6743\u53ca\u8d44\u6e90\u89c4\u5212\u6d41\u7a0b\u5982\u4e0b\u56fe\u6240\u793a\u3002

          graph TB\n\n    start([\u5f00\u59cb]) --> user[1. \u521b\u5efa\u7528\u6237]\n    user --> ns[2. \u51c6\u5907 Kubernetes \u547d\u540d\u7a7a\u95f4]\n    ns --> ws[3. \u51c6\u5907\u5de5\u4f5c\u7a7a\u95f4]\n    ws --> ws-to-ns[4. \u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u547d\u540d\u7a7a\u95f4]\n    ws-to-ns --> authu[5. \u7ed9\u7528\u6237\u6388\u6743 Workspace Editor]\n    authu --> complete([\u7ed3\u675f])\n\nclick user \"https://docs.daocloud.io/ghippo/user-guide/access-control/user/\"\nclick ns \"https://docs.daocloud.io/kpanda/user-guide/namespaces/createns/\"\nclick ws \"https://docs.daocloud.io/ghippo/user-guide/workspace/workspace/\"\nclick ws-to-ns \"https://docs.daocloud.io/ghippo/user-guide/workspace/ws-to-ns/\"\nclick authu \"https://docs.daocloud.io/ghippo/user-guide/workspace/ws-permission/\"\n\n classDef plain fill:#ddd,stroke:#fff,stroke-width:4px,color:#000;\n classDef k8s fill:#326ce5,stroke:#fff,stroke-width:4px,color:#fff;\n classDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n class user,ns,ws,ws-to-ns,authu cluster;\n class start,complete plain;
          "},{"location":"admin/ghippo/best-practice/cluster-for-multiws.html","title":"\u5c06\u96c6\u7fa4\u5206\u914d\u7ed9\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09","text":"

          \u96c6\u7fa4\u8d44\u6e90\u901a\u5e38\u7531\u8fd0\u7ef4\u4eba\u5458\u8fdb\u884c\u7ba1\u7406\u3002\u5728\u5206\u914d\u8d44\u6e90\u5206\u914d\u65f6\uff0c\u4ed6\u4eec\u9700\u8981\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u6765\u9694\u79bb\u8d44\u6e90\uff0c\u5e76\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\u3002 \u8fd9\u79cd\u65b9\u5f0f\u6709\u4e2a\u5f0a\u7aef\uff0c\u5982\u679c\u4f01\u4e1a\u7684\u4e1a\u52a1\u91cf\u5f88\u5927\uff0c\u624b\u52a8\u5206\u914d\u8d44\u6e90\u9700\u8981\u8f83\u5927\u7684\u5de5\u4f5c\u91cf\uff0c\u800c\u60f3\u8981\u7075\u6d3b\u8c03\u914d\u8d44\u6e90\u989d\u5ea6\u4e5f\u6709\u4e0d\u5c0f\u96be\u5ea6\u3002

          AI \u7b97\u529b\u4e2d\u5fc3\u4e3a\u6b64\u5f15\u5165\u4e86\u5de5\u4f5c\u7a7a\u95f4\u7684\u6982\u5ff5\u3002\u5de5\u4f5c\u7a7a\u95f4\u901a\u8fc7\u5171\u4eab\u8d44\u6e90\u53ef\u4ee5\u63d0\u4f9b\u66f4\u9ad8\u7ef4\u5ea6\u7684\u8d44\u6e90\u9650\u989d\u80fd\u529b\uff0c\u5b9e\u73b0\u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09\u5728\u8d44\u6e90\u9650\u989d\u4e0b\u81ea\u52a9\u5f0f\u521b\u5efa Kubernetes \u547d\u540d\u7a7a\u95f4\u7684\u80fd\u529b\u3002

          \u4e3e\u4f8b\u800c\u8a00\uff0c\u5982\u679c\u60f3\u8981\u8ba9\u51e0\u4e2a\u90e8\u95e8\u5171\u4eab\u4e0d\u540c\u7684\u96c6\u7fa4\u3002

          Cluster01\uff08\u666e\u901a\uff09 Cluster02\uff08\u9ad8\u53ef\u7528\uff09 \u90e8\u95e8\uff08\u5de5\u4f5c\u7a7a\u95f4\uff09A 50 quota 10 quota \u90e8\u95e8\uff08\u5de5\u4f5c\u7a7a\u95f4\uff09B 100 quota 20 quota

          \u53ef\u4ee5\u53c2\u7167\u4ee5\u4e0b\u6d41\u7a0b\u5c06\u96c6\u7fa4\u5206\u4eab\u7ed9\u591a\u4e2a\u90e8\u95e8/\u5de5\u4f5c\u7a7a\u95f4/\u79df\u6237\uff1a

          graph TB\n\npreparews[\u51c6\u5907\u5de5\u4f5c\u7a7a\u95f4] --> preparecs[\u51c6\u5907\u96c6\u7fa4]\n--> share[\u5c06\u96c6\u7fa4\u5171\u4eab\u5230\u5de5\u4f5c\u7a7a\u95f4]\n--> judge([\u5224\u65ad\u5de5\u4f5c\u7a7a\u95f4\u5269\u4f59\u989d\u5ea6])\njudge -.\u5927\u4e8e\u5269\u4f59\u989d\u5ea6.->modifyns[\u4fee\u6539\u547d\u540d\u7a7a\u95f4\u989d\u5ea6]\njudge -.\u5c0f\u4e8e\u5269\u4f59\u989d\u5ea6.->createns[\u521b\u5efa\u547d\u540d\u7a7a\u95f4]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill:#326ce5,stroke:#fff,stroke-width:1px,color:#fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass preparews,preparecs,share, cluster;\nclass judge plain\nclass modifyns,createns k8s\n\nclick preparews \"https://docs.daocloud.io/ghippo/user-guide/workspace/cluster-for-multiws/#_2\"\nclick preparecs \"https://docs.daocloud.io/ghippo/user-guide/workspace/cluster-for-multiws/#_3\"\nclick share \"https://docs.daocloud.io/ghippo/user-guide/workspace/cluster-for-multiws/#_4\"\nclick createns \"https://docs.daocloud.io/amamba/user-guide/namespace/namespace/#_3\"\nclick modifyns \"https://docs.daocloud.io/amamba/user-guide/namespace/namespace/#_4\"
          "},{"location":"admin/ghippo/best-practice/cluster-for-multiws.html#_2","title":"\u51c6\u5907\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4","text":"

          \u5de5\u4f5c\u7a7a\u95f4\u662f\u4e3a\u4e86\u6ee1\u8db3\u591a\u79df\u6237\u7684\u4f7f\u7528\u573a\u666f\uff0c\u57fa\u4e8e\u96c6\u7fa4\u3001\u96c6\u7fa4\u547d\u540d\u7a7a\u95f4\u3001\u7f51\u683c\u3001\u7f51\u683c\u547d\u540d\u7a7a\u95f4\u3001\u591a\u4e91\u3001\u591a\u4e91\u547d\u540d\u7a7a\u95f4\u7b49\u591a\u79cd\u8d44\u6e90\u5f62\u6210\u76f8\u4e92\u9694\u79bb\u7684\u8d44\u6e90\u73af\u5883\uff0c \u5de5\u4f5c\u7a7a\u95f4\u53ef\u4ee5\u6620\u5c04\u4e3a\u9879\u76ee\u3001\u79df\u6237\u3001\u4f01\u4e1a\u3001\u4f9b\u5e94\u5546\u7b49\u591a\u79cd\u6982\u5ff5\u3002

          1. \u4f7f\u7528 admin/folder admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u3002

          2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4 \u6309\u94ae\u3002

          3. \u586b\u5199\u5de5\u4f5c\u7a7a\u95f4\u540d\u79f0\u3001\u6240\u5c5e\u6587\u4ef6\u5939\u7b49\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4\u3002

          "},{"location":"admin/ghippo/best-practice/cluster-for-multiws.html#_3","title":"\u51c6\u5907\u4e00\u4e2a\u96c6\u7fa4","text":"

          \u5de5\u4f5c\u7a7a\u95f4\u662f\u4e3a\u4e86\u6ee1\u8db3\u591a\u79df\u6237\u7684\u4f7f\u7528\u573a\u666f\uff0c\u57fa\u4e8e\u96c6\u7fa4\u3001\u96c6\u7fa4\u547d\u540d\u7a7a\u95f4\u3001\u7f51\u683c\u3001\u7f51\u683c\u547d\u540d\u7a7a\u95f4\u3001\u591a\u4e91\u3001\u591a\u4e91\u547d\u540d\u7a7a\u95f4\u7b49\u591a\u79cd\u8d44\u6e90\u5f62\u6210\u76f8\u4e92\u9694\u79bb\u7684\u8d44\u6e90\u73af\u5883\uff0c\u5de5\u4f5c\u7a7a\u95f4\u53ef\u4ee5\u6620\u5c04\u4e3a\u9879\u76ee\u3001\u79df\u6237\u3001\u4f01\u4e1a\u3001\u4f9b\u5e94\u5546\u7b49\u591a\u79cd\u6982\u5ff5\u3002

          \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u51c6\u5907\u4e00\u4e2a\u96c6\u7fa4\u3002

          1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u9009\u62e9 \u96c6\u7fa4\u5217\u8868 \u3002

          2. \u70b9\u51fb \u521b\u5efa\u96c6\u7fa4 \u521b\u5efa\u4e00\u4e2a\u96c6\u7fa4\uff0c\u6216\u70b9\u51fb \u63a5\u5165\u96c6\u7fa4 \u63a5\u5165\u4e00\u4e2a\u96c6\u7fa4\u3002

          "},{"location":"admin/ghippo/best-practice/cluster-for-multiws.html#_4","title":"\u5728\u5de5\u4f5c\u7a7a\u95f4\u6dfb\u52a0\u96c6\u7fa4","text":"

          \u8fd4\u56de \u5168\u5c40\u7ba1\u7406 \uff0c\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u6dfb\u52a0\u96c6\u7fa4\u3002

          1. \u4f9d\u6b21\u70b9\u51fb \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 -> \u5171\u4eab\u8d44\u6e90 \uff0c\u70b9\u51fb\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u540d\u79f0\u540e\uff0c\u70b9\u51fb \u65b0\u589e\u5171\u4eab\u8d44\u6e90 \u6309\u94ae\u3002

          2. \u9009\u62e9\u96c6\u7fa4\uff0c\u586b\u5199\u8d44\u6e90\u9650\u989d\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

          \u4e0b\u4e00\u6b65\uff1a\u5c06\u96c6\u7fa4\u8d44\u6e90\u5206\u914d\u7ed9\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u540e\uff0c\u7528\u6237\u53ef\u4ee5\u524d\u5f80 \u5e94\u7528\u5de5\u4f5c\u53f0 \u5728\u8fd9\u4e9b\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u5e76\u90e8\u7f72\u5e94\u7528\u3002

          "},{"location":"admin/ghippo/best-practice/folder-practice.html","title":"\u6587\u4ef6\u5939\u6700\u4f73\u5b9e\u8df5","text":"

          \u6587\u4ef6\u5939\u4ee3\u8868\u4e00\u4e2a\u7ec4\u7ec7\u673a\u6784\uff08\u4f8b\u5982\u4e00\u4e2a\u90e8\u95e8\uff09\uff0c\u662f\u8d44\u6e90\u5c42\u6b21\u7ed3\u6784\u4e2d\u7684\u4e00\u4e2a\u8282\u70b9\u3002

          \u4e00\u4e2a\u6587\u4ef6\u5939\u53ef\u4ee5\u5305\u542b\u5de5\u4f5c\u7a7a\u95f4\u3001\u5b50\u6587\u4ef6\u5939\u6216\u4e24\u8005\u7684\u7ec4\u5408\u3002 \u5b83\u63d0\u4f9b\u4e86\u8eab\u4efd\u7ba1\u7406\u3001\u591a\u5c42\u7ea7\u548c\u6743\u9650\u6620\u5c04\u80fd\u529b\uff0c\u80fd\u591f\u5c06\u7528\u6237/\u7528\u6237\u7ec4\u5728\u6587\u4ef6\u5939\u4e2d\u7684\u89d2\u8272\u6620\u5c04\u5230\u5176\u4e0b\u7684\u5b50\u6587\u4ef6\u5939\u3001\u5de5\u4f5c\u7a7a\u95f4\u548c\u8d44\u6e90\u4e0a\u3002 \u56e0\u6b64\u501f\u52a9\u4e8e\u6587\u4ef6\u5939\uff0c\u4f01\u4e1a\u7ba1\u7406\u8005\u80fd\u591f\u96c6\u4e2d\u7ba1\u63a7\u6240\u6709\u8d44\u6e90\u3002

          1. \u6784\u5efa\u4f01\u4e1a\u5c42\u7ea7\u5173\u7cfb

            \u9996\u5148\u8981\u6309\u7167\u73b0\u6709\u7684\u4f01\u4e1a\u5c42\u7ea7\u7ed3\u6784\uff0c\u6784\u5efa\u4e0e\u4f01\u4e1a\u76f8\u540c\u7684\u6587\u4ef6\u5939\u5c42\u7ea7\u3002 AI \u7b97\u529b\u4e2d\u5fc3\u652f\u6301 5 \u7ea7\u6587\u4ef6\u5939\uff0c\u53ef\u4ee5\u6839\u636e\u4f01\u4e1a\u5b9e\u9645\u60c5\u51b5\u81ea\u7531\u7ec4\u5408\uff0c\u5c06\u6587\u4ef6\u5939\u548c\u5de5\u4f5c\u7a7a\u95f4\u6620\u5c04\u4e3a\u4f01\u4e1a\u4e2d\u7684\u90e8\u95e8\u3001\u9879\u76ee\u3001\u4f9b\u5e94\u5546\u7b49\u5b9e\u4f53\u3002

            \u6587\u4ef6\u5939\u4e0d\u76f4\u63a5\u4e0e\u8d44\u6e90\u6302\u94a9\uff0c\u800c\u662f\u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u95f4\u63a5\u5b9e\u73b0\u8d44\u6e90\u5206\u7ec4\u3002

          2. \u7528\u6237\u8eab\u4efd\u7ba1\u7406

            \u6587\u4ef6\u5939\u63d0\u4f9b\u4e86 Folder Admin\u3001Folder Editor\u3001Folder Viewer \u4e09\u79cd\u89d2\u8272\u3002 \u67e5\u770b\u89d2\u8272\u6743\u9650\uff0c\u53ef\u901a\u8fc7\u6388\u6743\u7ed9\u540c\u4e00\u6587\u4ef6\u5939\u4e2d\u7684\u7528\u6237/\u7528\u6237\u7ec4\u6388\u4e88\u4e0d\u540c\u7684\u89d2\u8272\u3002

          3. \u89d2\u8272\u6743\u9650\u6620\u5c04

            \u4f01\u4e1a\u7ba1\u7406\u8005\uff1a\u5728\u6839\u6587\u4ef6\u5939\u6388\u4e88 Folder Admin \u89d2\u8272\u3002\u4ed6\u5c06\u62e5\u6709\u6240\u6709\u90e8\u95e8\u3001\u9879\u76ee\u53ca\u5176\u8d44\u6e90\u7684\u7ba1\u7406\u6743\u9650\u3002

            \u90e8\u95e8\u7ba1\u7406\u8005\uff1a\u5728\u5404\u4e2a\u5b50\u6587\u4ef6\u5939\u3001\u5de5\u4f5c\u7a7a\u95f4\u5355\u72ec\u6388\u4e88\u7ba1\u7406\u6743\u9650\u3002

            \u9879\u76ee\u6210\u5458\uff1a\u5728\u5de5\u4f5c\u7a7a\u95f4\u3001\u8d44\u6e90\u5c42\u7ea7\u5355\u72ec\u6388\u4e88\u7ba1\u7406\u6743\u9650\u3002

          "},{"location":"admin/ghippo/best-practice/super-group.html","title":"\u8d85\u5927\u578b\u4f01\u4e1a\u7684\u67b6\u6784\u7ba1\u7406","text":"

          \u4f34\u968f\u4e1a\u52a1\u7684\u6301\u7eed\u6269\u5f20\uff0c\u516c\u53f8\u89c4\u6a21\u4e0d\u65ad\u58ee\u5927\uff0c\u5b50\u516c\u53f8\u3001\u5206\u516c\u53f8\u7eb7\u7eb7\u8bbe\u7acb\uff0c\u6709\u7684\u5b50\u516c\u53f8\u8fd8\u8fdb\u4e00\u6b65\u8bbe\u7acb\u5b59\u516c\u53f8\uff0c \u539f\u5148\u7684\u5927\u90e8\u95e8\u4e5f\u9010\u6e10\u7ec6\u5206\u6210\u591a\u4e2a\u5c0f\u90e8\u95e8\uff0c\u4ece\u800c\u4f7f\u5f97\u7ec4\u7ec7\u7ed3\u6784\u7684\u5c42\u7ea7\u65e5\u76ca\u589e\u591a\u3002\u8fd9\u79cd\u7ec4\u7ec7\u7ed3\u6784\u7684\u53d8\u5316\uff0c\u4e5f\u5bf9 IT \u6cbb\u7406\u67b6\u6784\u4ea7\u751f\u4e86\u5f71\u54cd\u3002

          \u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\uff1a

          1. \u5f00\u542f Folder/WS \u4e4b\u95f4\u7684\u9694\u79bb\u6a21\u5f0f

            \u8bf7\u53c2\u8003\u5f00\u542f Folder/WS \u4e4b\u95f4\u7684\u9694\u79bb\u6a21\u5f0f\u3002

          2. \u6309\u7167\u5b9e\u9645\u60c5\u51b5\u89c4\u5212\u4f01\u4e1a\u67b6\u6784

            \u5728\u591a\u5c42\u7ea7\u7ec4\u7ec7\u67b6\u6784\u4e0b\uff0c\u5efa\u8bae\u5c06\u4e8c\u7ea7\u6587\u4ef6\u5939\u4f5c\u4e3a\u9694\u79bb\u5355\u5143\uff0c\u8fdb\u884c\u201c\u5b50\u516c\u53f8\u201d\u4e4b\u95f4\u7684\u7528\u6237/\u7528\u6237\u7ec4/\u8d44\u6e90\u4e4b\u95f4\u7684\u9694\u79bb\u3002 \u9694\u79bb\u540e\u201c\u5b50\u516c\u53f8\u201d\u4e4b\u95f4\u7684\u7528\u6237/\u7528\u6237\u7ec4/\u8d44\u6e90\u4e92\u4e0d\u53ef\u89c1\u3002

          3. \u521b\u5efa\u7528\u6237/\u6253\u901a\u7528\u6237\u4f53\u7cfb

            \u7531\u4e3b\u5e73\u53f0\u7ba1\u7406\u5458 Admin \u5728\u5e73\u53f0\u7edf\u4e00\u521b\u5efa\u7528\u6237\u6216\u901a\u8fc7 LDAP/OIDC/OAuth2.0 \u7b49\u8eab\u4efd\u63d0\u4f9b\u5546\u80fd\u529b\u5c06\u7528\u6237\u7edf\u4e00\u5bf9\u63a5\u5230 AI \u7b97\u529b\u4e2d\u5fc3\u3002

          4. \u521b\u5efa\u6587\u4ef6\u5939\u89d2\u8272

            \u5728 Folder/WS \u7684\u9694\u79bb\u6a21\u5f0f\u4e0b\uff0c\u9700\u8981\u5e73\u53f0\u7ba1\u7406\u5458 Admin \u901a\u8fc7 \u6388\u6743 \u9996\u5148\u5c06\u7528\u6237\u9080\u8bf7\u5230\u5404\u4e2a\u5b50\u516c\u53f8\uff0c\u201c\u5b50\u516c\u53f8\u7ba1\u7406\u5458\uff08Folder Admin\uff09\u201d\u624d\u80fd\u591f\u5bf9\u8fd9\u4e9b\u7528\u6237\u8fdb\u884c\u7ba1\u7406\uff0c \u5982\u4e8c\u6b21\u6388\u6743\u6216\u8005\u7f16\u8f91\u6743\u9650\u3002\u5efa\u8bae\u7b80\u5316\u5e73\u53f0\u7ba1\u7406\u5458 Admin \u7684\u7ba1\u7406\u5de5\u4f5c\uff0c\u521b\u5efa\u4e00\u4e2a\u65e0\u5b9e\u9645\u6743\u9650\u7684\u89d2\u8272\u6765\u8f85\u52a9\u5e73\u53f0\u7ba1\u7406\u5458 Admin \u5b9e\u73b0\u901a\u8fc7\u201c\u6388\u6743\u201d\u5c06\u7528\u6237\u9080\u8bf7\u5230\u5b50\u516c\u53f8\u7684\u64cd\u4f5c\u3002 \u800c\u5b50\u516c\u53f8\u7528\u6237\u7684\u5b9e\u9645\u6743\u9650\u4e0b\u653e\u5230\u5404\u4e2a\u5b50\u516c\u53f8\u7ba1\u7406\u5458\uff08Folder Admin\uff09\u81ea\u884c\u7ba1\u7406\u3002

            Note

            \u8d44\u6e90\u7ed1\u5b9a\u6743\u9650\u70b9\u5355\u72ec\u4f7f\u7528\u4e0d\u751f\u6548\uff0c\u56e0\u6b64\u7b26\u5408\u4e0a\u8ff0\u901a\u8fc7\u201c\u6388\u6743\u201d\u5c06\u7528\u6237\u9080\u8bf7\u5230\u5b50\u516c\u53f8\u7684\u64cd\u4f5c\uff0c\u518d\u7531\u5b50\u516c\u53f8\u7ba1\u7406\u5458 Folder Admin \u81ea\u884c\u7ba1\u7406\u7684\u8981\u6c42\u3002

            \u4ee5\u4e0b\u6f14\u793a\u5982\u4f55\u521b\u5efa\u8d44\u6e90\u7ed1\u5b9a \u65e0\u5b9e\u9645\u6743\u9650\u7684\u89d2\u8272 \uff0c\u5373 minirole\u3002

          5. \u7ed9\u7528\u6237\u6388\u6743

            \u5e73\u53f0\u7ba1\u7406\u5458\u901a\u8fc7\u201c\u6388\u6743\u201d\u5c06\u7528\u6237\u6309\u7167\u5b9e\u9645\u60c5\u51b5\u9080\u8bf7\u5230\u5404\u4e2a\u5b50\u516c\u53f8\uff0c\u5e76\u4efb\u547d\u5b50\u516c\u53f8\u7ba1\u7406\u5458\u3002

            \u5c06\u5b50\u516c\u53f8\u666e\u901a\u7528\u6237\u6388\u6743\u4e3a \u201cminirole\u201d (1)\uff0c\u5c06\u5b50\u516c\u53f8\u7ba1\u7406\u5458\u6388\u6743\u4e3a Floder Admin\u3002

            1. \u5373\u7b2c 4 \u6b65\uff08\u4e0a\u4e00\u6b65\uff09\u4e2d\u521b\u5efa\u7684 \u65e0\u5b9e\u9645\u6743\u9650\u7684\u89d2\u8272

          6. \u5b50\u516c\u53f8\u7ba1\u7406\u5458\u81ea\u884c\u7ba1\u7406\u7528\u6237/\u7528\u6237\u7ec4

            \u5b50\u516c\u53f8\u7ba1\u7406\u5458 Folder Admin \u767b\u5f55\u5e73\u53f0\u540e\u53ea\u80fd\u770b\u5230\u81ea\u5df1\u6240\u5728\u7684\u201c\u5b50\u516c\u53f8 2\u201d\uff0c \u5e76\u80fd\u591f\u901a\u8fc7\u521b\u5efa\u6587\u4ef6\u5939\u3001\u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4\u8c03\u6574\u67b6\u6784\uff0c\u901a\u8fc7\u6dfb\u52a0\u6388\u6743/\u7f16\u8f91\u6743\u9650\u4e3a\u5b50\u516c\u53f8 2 \u4e2d\u7684\u7528\u6237\u8d4b\u4e88\u5176\u4ed6\u6743\u9650\u3002

            \u5728\u6dfb\u52a0\u6388\u6743\u65f6\uff0c\u5b50\u516c\u53f8\u7ba1\u7406\u5458 Folder Admin \u53ea\u80fd\u770b\u5230\u88ab\u5e73\u53f0\u7ba1\u7406\u5458\u901a\u8fc7\u201c\u6388\u6743\u201d\u9080\u8bf7\u8fdb\u6765\u7684\u7528\u6237\uff0c\u800c\u4e0d\u80fd\u770b\u5230\u5e73\u53f0\u4e0a\u7684\u6240\u6709\u7528\u6237\uff0c \u4ece\u800c\u5b9e\u73b0 Folder/WS \u4e4b\u95f4\u7684\u7528\u6237\u9694\u79bb\uff0c\u7528\u6237\u7ec4\u540c\u7406\uff08\u5e73\u53f0\u7ba1\u7406\u5458\u89c6\u89d2\u80fd\u591f\u770b\u5230\u5e76\u6388\u6743\u5e73\u53f0\u4e0a\u6240\u6709\u7684\u7528\u6237\u548c\u7528\u6237\u7ec4\uff09\u3002

          Note

          \u8d85\u5927\u578b\u4f01\u4e1a\u4e0e\u5927/\u4e2d/\u5c0f\u578b\u4f01\u4e1a\u7684\u4e3b\u8981\u533a\u522b\u5728\u4e8e Folder \u548c\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7528\u6237/\u7528\u6237\u7ec4\u4e4b\u95f4\u662f\u5426\u53ef\u89c1\u3002 \u8d85\u5927\u578b\u4f01\u4e1a\u91cc\u5b50\u516c\u53f8\u4e0e\u5b50\u516c\u53f8\u4e4b\u95f4\u7528\u6237/\u7528\u6237\u7ec4\u4e0d\u53ef\u89c1 + \u6743\u9650\u9694\u79bb\uff1b \u5927/\u4e2d/\u5c0f\u578b\u4f01\u4e1a\u90e8\u95e8\u4e4b\u95f4\u7684\u7528\u6237\u76f8\u4e92\u53ef\u89c1 + \u6743\u9650\u9694\u79bb\u3002

          "},{"location":"admin/ghippo/best-practice/system-message.html","title":"\u7cfb\u7edf\u6d88\u606f","text":"

          \u7cfb\u7edf\u6d88\u606f\u7528\u4e8e\u901a\u77e5\u6240\u6709\u7528\u6237\uff0c\u7c7b\u4f3c\u4e8e\u7cfb\u7edf\u516c\u544a\uff0c\u4f1a\u5728\u7279\u5b9a\u65f6\u95f4\u663e\u793a\u5728 AI \u7b97\u529b\u4e2d\u5fc3UI \u7684\u9876\u90e8\u680f\u3002

          "},{"location":"admin/ghippo/best-practice/system-message.html#_2","title":"\u914d\u7f6e\u7cfb\u7edf\u6d88\u606f","text":"

          \u901a\u8fc7\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4 apply \u7cfb\u7edf\u6d88\u606f\u7684 YAML \u5373\u53ef\u521b\u5efa\u4e00\u6761\u7cfb\u7edf\u6d88\u606f\uff0c\u6d88\u606f\u7684\u663e\u793a\u65f6\u95f4\u7531 YAML \u4e2d\u7684\u65f6\u95f4\u5b57\u6bb5\u51b3\u5b9a\u3002 \u7cfb\u7edf\u6d88\u606f\u4ec5\u5728 start\u3001end \u5b57\u6bb5\u914d\u7f6e\u7684\u65f6\u95f4\u8303\u56f4\u4e4b\u5185\u624d\u4f1a\u663e\u793a\u3002

          1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\uff0c\u70b9\u51fb\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u3002

          2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u641c\u7d22 ghippoconfig\uff0c\u70b9\u51fb\u641c\u7d22\u51fa\u6765\u7684 ghippoconfigs.ghippo.io

          3. \u70b9\u51fb YAML \u521b\u5efa \uff0c\u6216\u4fee\u6539\u5df2\u5b58\u5728\u7684 YAML

          4. \u6700\u7ec8\u6548\u679c\u5982\u4e0b

          \u4ee5\u4e0b\u662f\u4e00\u4e2a YAML \u793a\u4f8b\uff1a

          apiVersion: ghippo.io/v1alpha1\nkind: GhippoConfig\nmetadata:\n  name: system-message\nspec:\n  message: \"this is a message\"\n  start: 2024-01-02T15:04:05+08:00\n  end: 2024-07-24T17:26:05+08:00\n
          "},{"location":"admin/ghippo/best-practice/ws-best-practice.html","title":"\u5de5\u4f5c\u7a7a\u95f4\u6700\u4f73\u5b9e\u8df5","text":"

          \u5de5\u4f5c\u7a7a\u95f4\u662f\u4e00\u79cd\u8d44\u6e90\u5206\u7ec4\u5355\u5143\uff0c\u5927\u591a\u6570\u8d44\u6e90\u90fd\u53ef\u4ee5\u5728\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u521b\u5efa\u6216\u624b\u52a8\u7ed1\u5b9a\u5230\u67d0\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u3002 \u800c\u5de5\u4f5c\u7a7a\u95f4\u901a\u8fc7\u6388\u6743\u548c\u8d44\u6e90\u7ed1\u5b9a\uff0c\u80fd\u591f\u5b9e\u73b0\u7528\u6237\u4e0e\u89d2\u8272\u7684\u7ed1\u5b9a\u5173\u7cfb\uff0c\u5e76\u4e00\u6b21\u6027\u5e94\u7528\u5230\u5de5\u4f5c\u7a7a\u95f4\u7684\u6240\u6709\u8d44\u6e90\u4e0a\u3002

          \u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\uff0c\u53ef\u4ee5\u8f7b\u677e\u7ba1\u7406\u56e2\u961f\u4e0e\u8d44\u6e90\uff0c\u89e3\u51b3\u8de8\u6a21\u5757\u3001\u8de8\u96c6\u7fa4\u7684\u8d44\u6e90\u6388\u6743\u95ee\u9898\u3002

          "},{"location":"admin/ghippo/best-practice/ws-best-practice.html#_2","title":"\u5de5\u4f5c\u7a7a\u95f4\u7684\u529f\u80fd","text":"

          \u5de5\u4f5c\u7a7a\u95f4\u5305\u542b\u4e09\u4e2a\u529f\u80fd\uff1a\u6388\u6743\u3001\u8d44\u6e90\u7ec4\u548c\u5171\u4eab\u8d44\u6e90\u3002\u4e3b\u8981\u89e3\u51b3\u8d44\u6e90\u7edf\u4e00\u6388\u6743\u3001\u8d44\u6e90\u5206\u7ec4\u53ca\u8d44\u6e90\u914d\u989d\u95ee\u9898\u3002

          1. \u6388\u6743\uff1a\u4e3a\u7528\u6237/\u7528\u6237\u7ec4\u6388\u4e88\u8be5\u5de5\u4f5c\u7a7a\u95f4\u7684\u4e0d\u540c\u89d2\u8272\uff0c\u5e76\u5c06\u89d2\u8272\u5e94\u7528\u5230\u5de5\u4f5c\u7a7a\u95f4\u7684\u8d44\u6e90\u4e0a\u3002

            \u6700\u4f73\u5b9e\u8df5\uff1a\u666e\u901a\u7528\u6237\u60f3\u8981\u4f7f\u7528\u5e94\u7528\u5de5\u4f5c\u53f0\u3001\u5fae\u670d\u52a1\u5f15\u64ce\u3001\u670d\u52a1\u7f51\u683c\u3001\u4e2d\u95f4\u4ef6\u6a21\u5757\u529f\u80fd\uff0c\u6216\u8005\u9700\u8981\u62e5\u6709\u5bb9\u5668\u7ba1\u7406\u3001\u670d\u52a1\u7f51\u683c\u4e2d\u90e8\u5206\u8d44\u6e90\u7684\u4f7f\u7528\u6743\u9650\u65f6\uff0c\u9700\u8981\u7ba1\u7406\u5458\u6388\u4e88\u8be5\u5de5\u4f5c\u7a7a\u95f4\u7684\u4f7f\u7528\u6743\u9650\uff08Workspace Admin\u3001Workspace Edit\u3001Workspace View\uff09\u3002 \u8fd9\u91cc\u7684\u7ba1\u7406\u5458\u53ef\u4ee5\u662f Admin \u89d2\u8272\u3001\u8be5\u5de5\u4f5c\u7a7a\u95f4\u7684 Workspace Admin \u89d2\u8272\u6216\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0a\u5c42\u7684 Folder Admin \u89d2\u8272\u3002 \u67e5\u770b Folder \u4e0e Workspace \u7684\u5173\u7cfb\u3002

          2. \u8d44\u6e90\u7ec4\uff1a\u8d44\u6e90\u7ec4\u652f\u6301 Cluster\u3001Cluster-Namespace (\u8de8\u96c6\u7fa4)\u3001Mesh\u3001Mesh-Namespace\u3001Kairship\u3001Kairship-Namespace \u516d\u79cd\u8d44\u6e90\u7c7b\u578b\u3002 \u4e00\u4e2a\u8d44\u6e90\u53ea\u80fd\u7ed1\u5b9a\u4e00\u4e2a\u8d44\u6e90\u7ec4\uff0c\u8d44\u6e90\u88ab\u7ed1\u5b9a\u5230\u8d44\u6e90\u7ec4\u540e\uff0c\u5de5\u4f5c\u7a7a\u95f4\u7684\u6240\u6709\u8005\u5c06\u62e5\u6709\u8be5\u8d44\u6e90\u7684\u6240\u6709\u7ba1\u7406\u6743\u9650\uff0c\u76f8\u5f53\u4e8e\u8be5\u8d44\u6e90\u7684\u6240\u6709\u8005\uff0c\u56e0\u6b64\u4e0d\u53d7\u8d44\u6e90\u914d\u989d\u7684\u9650\u5236\u3002

            \u6700\u4f73\u5b9e\u8df5\uff1a\u5de5\u4f5c\u7a7a\u95f4\u901a\u8fc7\u201c\u6388\u6743\u201d\u529f\u80fd\u53ef\u4ee5\u7ed9\u90e8\u95e8\u6210\u5458\u6388\u4e88\u4e0d\u540c\u89d2\u8272\u6743\u9650\uff0c\u800c\u5de5\u4f5c\u7a7a\u95f4\u80fd\u591f\u628a\u4eba\u4e0e\u89d2\u8272\u7684\u6388\u6743\u5173\u7cfb\u4e00\u6b21\u6027\u5e94\u7528\u5230\u5de5\u4f5c\u7a7a\u95f4\u7684\u6240\u6709\u8d44\u6e90\u4e0a\u3002\u56e0\u6b64\u8fd0\u7ef4\u4eba\u5458\u53ea\u9700\u5c06\u8d44\u6e90\u7ed1\u5b9a\u5230\u8d44\u6e90\u7ec4\uff0c\u5c06\u90e8\u95e8\u4e2d\u7684\u4e0d\u540c\u89d2\u8272\u52a0\u5165\u4e0d\u540c\u7684\u8d44\u6e90\u7ec4\uff0c\u5c31\u80fd\u786e\u4fdd\u8d44\u6e90\u7684\u6743\u9650\u88ab\u6b63\u786e\u5206\u914d\u3002

            \u89d2\u8272 \u96c6\u7fa4 Cluster \u8de8\u96c6\u7fa4 Cluster-Namespace Workspace Admin Cluster Admin NS Admin Workspace Edit \u2717 NS Editor Workspace View \u2717 NS Viewer
          3. \u5171\u4eab\u8d44\u6e90\uff1a\u5171\u4eab\u8d44\u6e90\u529f\u80fd\u4e3b\u8981\u9488\u5bf9\u96c6\u7fa4\u8d44\u6e90\u3002

            \u4e00\u4e2a\u96c6\u7fa4\u53ef\u4ee5\u5171\u4eab\u7ed9\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff08\u6307\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u5171\u4eab\u8d44\u6e90\u529f\u80fd\uff09\uff1b\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u4e5f\u53ef\u4ee5\u540c\u65f6\u4f7f\u7528\u591a\u4e2a\u96c6\u7fa4\u7684\u8d44\u6e90\u3002 \u4f46\u662f\u96c6\u7fa4\u8d44\u6e90\u88ab\u5171\u4eab\u540e\uff0c\u4e0d\u610f\u5473\u7740\u88ab\u5171\u4eab\u8005\uff08\u5de5\u4f5c\u7a7a\u95f4\uff09\u53ef\u4ee5\u65e0\u9650\u5236\u5730\u4f7f\u7528\uff0c\u56e0\u6b64\u901a\u5e38\u4f1a\u9650\u5236\u88ab\u5171\u4eab\u8005\uff08\u5de5\u4f5c\u7a7a\u95f4\uff09\u80fd\u591f\u4f7f\u7528\u7684\u8d44\u6e90\u9650\u989d\u3002

            \u540c\u65f6\uff0c\u4e0e\u8d44\u6e90\u7ec4\u4e0d\u540c\uff0c\u5de5\u4f5c\u7a7a\u95f4\u6210\u5458\u53ea\u662f\u5171\u4eab\u8d44\u6e90\u7684\u4f7f\u7528\u8005\uff0c\u80fd\u591f\u5728\u8d44\u6e90\u9650\u989d\u4e0b\u4f7f\u7528\u96c6\u7fa4\u4e2d\u7684\u8d44\u6e90\u3002\u6bd4\u5982\u524d\u5f80\u5e94\u7528\u5de5\u4f5c\u53f0\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u3001\u90e8\u7f72\u5e94\u7528\u7b49\uff0c\u4f46\u5e76\u4e0d\u5177\u5907\u96c6\u7fa4\u7684\u7ba1\u7406\u6743\u9650\uff0c\u9650\u5236\u540e\u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u521b\u5efa/\u7ed1\u5b9a\u7684\u547d\u540d\u7a7a\u95f4\u7684\u8d44\u6e90\u914d\u989d\u603b\u548c\u4e0d\u80fd\u8d85\u8fc7\u96c6\u7fa4\u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u8bbe\u7f6e\u7684\u8d44\u6e90\u4f7f\u7528\u4e0a\u9650\u3002

            \u6700\u4f73\u5b9e\u8df5\uff1a\u8fd0\u7ef4\u90e8\u95e8\u624b\u4e2d\u6709\u4e00\u4e2a\u9ad8\u53ef\u7528\u96c6\u7fa4 01\uff0c\u60f3\u8981\u5206\u914d\u7ed9\u90e8\u95e8 A\uff08\u5de5\u4f5c\u7a7a\u95f4 A\uff09\u548c\u90e8\u95e8 B\uff08\u5de5\u4f5c\u7a7a\u95f4 B\uff09\u4f7f\u7528\uff0c\u5176\u4e2d\u90e8\u95e8 A \u5206\u914d CPU 50 \u6838\uff0c\u90e8\u95e8 B \u5206\u914d CPU 100 \u6838\u3002 \u90a3\u4e48\u53ef\u4ee5\u501f\u7528\u5171\u4eab\u8d44\u6e90\u7684\u6982\u5ff5\uff0c\u5c06\u96c6\u7fa4 01 \u5206\u522b\u5171\u4eab\u7ed9\u90e8\u95e8 A \u548c\u90e8\u95e8 B\uff0c\u5e76\u9650\u5236\u90e8\u95e8 A \u7684 CPU \u4f7f\u7528\u989d\u5ea6\u4e3a 50\uff0c\u90e8\u95e8 B \u7684 CPU \u4f7f\u7528\u989d\u5ea6\u4e3a 100\u3002 \u90a3\u4e48\u90e8\u95e8 A \u7684\u7ba1\u7406\u5458\uff08\u5de5\u4f5c\u7a7a\u95f4 A Admin\uff09\u80fd\u591f\u5728\u5e94\u7528\u5de5\u4f5c\u53f0\u521b\u5efa\u5e76\u4f7f\u7528\u547d\u540d\u7a7a\u95f4\uff0c\u5176\u4e2d\u547d\u540d\u7a7a\u95f4\u989d\u5ea6\u603b\u548c\u4e0d\u80fd\u8d85\u8fc7 50 \u6838\uff0c\u90e8\u95e8 B \u7684\u7ba1\u7406\u5458\uff08\u5de5\u4f5c\u7a7a\u95f4 B Admin\uff09\u80fd\u591f\u5728\u5e94\u7528\u5de5\u4f5c\u53f0\u521b\u5efa\u5e76\u4f7f\u7528\u547d\u540d\u7a7a\u95f4\uff0c\u5176\u4e2d\u547d\u540d\u7a7a\u95f4\u989d\u5ea6\u603b\u548c\u4e0d\u80fd\u8d85\u8fc7 100 \u6838\u3002 \u90e8\u95e8 A \u7684\u7ba1\u7406\u5458\u548c\u90e8\u95e8 B \u7ba1\u7406\u5458\u521b\u5efa\u7684\u547d\u540d\u7a7a\u95f4\u4f1a\u88ab\u81ea\u52a8\u7ed1\u5b9a\u5728\u8be5\u90e8\u95e8\uff0c\u90e8\u95e8\u4e2d\u7684\u5176\u4ed6\u6210\u5458\u5c06\u5bf9\u5e94\u7684\u62e5\u6709\u547d\u540d\u7a7a\u95f4\u7684 Namesapce Admin\u3001Namesapce Edit\u3001Namesapce View \u89d2\u8272\uff08\u8fd9\u91cc\u90e8\u95e8\u6307\u7684\u662f\u5de5\u4f5c\u7a7a\u95f4\uff0c\u5de5\u4f5c\u7a7a\u95f4\u8fd8\u53ef\u4ee5\u6620\u5c04\u4e3a\u7ec4\u7ec7\u3001\u4f9b\u5e94\u5546\u7b49\u5176\u4ed6\u6982\u5ff5\uff09\u3002\u6574\u4e2a\u8fc7\u7a0b\u5982\u4e0b\u8868\uff1a

            \u90e8\u95e8 \u89d2\u8272 \u5171\u4eab\u96c6\u7fa4 Cluster \u8d44\u6e90\u914d\u989d \u90e8\u95e8\u7ba1\u7406\u5458 A Workspace Admin \u96c6\u7fa4 01 CPU 50 \u6838 \u90e8\u95e8\u7ba1\u7406\u5458 B Workspace Admin \u96c6\u7fa4 01 CPU 100 \u6838
          "},{"location":"admin/ghippo/best-practice/ws-best-practice.html#ai","title":"\u5de5\u4f5c\u7a7a\u95f4\u5bf9 AI \u7b97\u529b\u4e2d\u5fc3\u5404\u6a21\u5757\u7684\u4f5c\u7528","text":"

          \u6a21\u5757\u540d\u79f0\uff1a\u5bb9\u5668\u7ba1\u7406

          \u7531\u4e8e\u529f\u80fd\u6a21\u5757\u7684\u7279\u6b8a\u6027\uff0c\u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u521b\u5efa\u7684\u8d44\u6e90\u4e0d\u4f1a\u81ea\u52a8\u88ab\u7ed1\u5b9a\u5230\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u3002

          \u5982\u679c\u60a8\u9700\u8981\u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u5bf9\u4eba\u548c\u8d44\u6e90\u8fdb\u884c\u7edf\u4e00\u6388\u6743\u7ba1\u7406\uff0c\u53ef\u4ee5\u624b\u52a8\u5c06\u9700\u8981\u7684\u8d44\u6e90\u7ed1\u5b9a\u5230\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u4e2d\uff0c\u4ece\u800c\u5c06\u7528\u6237\u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u7684\u89d2\u8272\u5e94\u7528\u5230\u8d44\u6e90\u4e0a\uff08\u8fd9\u91cc\u7684\u8d44\u6e90\u662f\u53ef\u4ee5\u8de8\u96c6\u7fa4\u7684\uff09\u3002

          \u53e6\u5916\uff0c\u5728\u8d44\u6e90\u7684\u7ed1\u5b9a\u5165\u53e3\u4e0a\u5bb9\u5668\u7ba1\u7406\u4e0e\u670d\u52a1\u7f51\u683c\u7a0d\u6709\u5dee\u5f02\uff0c\u5de5\u4f5c\u7a7a\u95f4\u63d0\u4f9b\u4e86\u5bb9\u5668\u7ba1\u7406\u4e2d\u7684 Cluster \u3001 Cluster-Namesapce \u548c\u670d\u52a1\u7f51\u683c\u4e2d\u7684 Mesh\u3001Mesh-Namespace \u8d44\u6e90\u7684\u7ed1\u5b9a\u5165\u53e3\uff0c\u4f46\u5c1a\u672a\u5f00\u653e\u5bf9\u670d\u52a1\u7f51\u683c\u7684 kairship \u548c Kairship-Namespace \u8d44\u6e90\u7684\u7ed1\u5b9a\u3002

          \u5bf9\u4e8e kairship \u548c Kairship-Namespace \u8d44\u6e90\uff0c\u53ef\u4ee5\u5728\u670d\u52a1\u7f51\u683c\u7684\u8d44\u6e90\u5217\u8868\u8fdb\u884c\u624b\u52a8\u7ed1\u5b9a\u3002

          "},{"location":"admin/ghippo/best-practice/ws-best-practice.html#_3","title":"\u5de5\u4f5c\u7a7a\u95f4\u7684\u4f7f\u7528\u573a\u666f","text":"
          • \u6620\u5c04\u4e3a\u4e0d\u540c\u7684\u90e8\u95e8\u3001\u9879\u76ee\u3001\u7ec4\u7ec7\u7b49\u6982\u5ff5\uff0c\u540c\u65f6\u53ef\u4ee5\u5c06\u5de5\u4f5c\u7a7a\u95f4\u4e2d Workspace Admin\u3001Workspace Edit \u548c Workspace View \u89d2\u8272\u5bf9\u5e94\u5230\u90e8\u95e8\u3001\u9879\u76ee\u3001\u7ec4\u7ec7\u4e2d\u7684\u4e0d\u540c\u89d2\u8272
          • \u5c06\u4e0d\u540c\u7528\u9014\u7684\u8d44\u6e90\u52a0\u5165\u4e0d\u540c\u7684\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u5206\u5f00\u7ba1\u7406\u548c\u4f7f\u7528
          • \u4e3a\u4e0d\u540c\u5de5\u4f5c\u7a7a\u95f4\u8bbe\u7f6e\u5b8c\u5168\u72ec\u7acb\u7684\u7ba1\u7406\u5458\uff0c\u5b9e\u73b0\u5de5\u4f5c\u7a7a\u95f4\u8303\u56f4\u5185\u7684\u7528\u6237\u4e0e\u6743\u9650\u7ba1\u7406
          • \u5c06\u8d44\u6e90\u5171\u4eab\u7ed9\u4e0d\u540c\u7684\u5de5\u4f5c\u7a7a\u95f4\u4f7f\u7528\uff0c\u5e76\u9650\u5236\u5de5\u4f5c\u7a7a\u95f4\u80fd\u591f\u4f7f\u7528\u7684\u8d44\u6e90\u989d\u5ea6\u4e0a\u9650
          "},{"location":"admin/ghippo/best-practice/ws-to-ns.html","title":"\u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09\u7ed1\u5b9a\u8de8\u96c6\u7fa4\u7684\u547d\u540d\u7a7a\u95f4","text":"

          \u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09\u4e0b\u7ed1\u5b9a\u6765\u81ea\u4e0d\u540c\u96c6\u7fa4\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u80fd\u591f\u4f7f\u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09\u7075\u6d3b\u7eb3\u7ba1\u5e73\u53f0\u4e0a\u4efb\u610f\u96c6\u7fa4\u4e0b\u7684 Kubernetes Namespace\u3002 \u540c\u65f6\u5e73\u53f0\u63d0\u4f9b\u4e86\u6743\u9650\u6620\u5c04\u80fd\u529b\uff0c\u80fd\u591f\u5c06\u7528\u6237\u5728\u5de5\u4f5c\u7a7a\u95f4\u7684\u6743\u9650\u6620\u5c04\u5230\u7ed1\u5b9a\u7684\u547d\u540d\u7a7a\u95f4\u8eab\u4e0a\u3002

          \u5f53\u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09\u4e0b\u7ed1\u5b9a\u4e00\u4e2a\u6216\u591a\u4e2a\u8de8\u96c6\u7fa4\u7684\u547d\u540d\u7a7a\u95f4\u65f6\uff0c\u7ba1\u7406\u5458\u65e0\u9700\u518d\u6b21\u7ed9\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u6210\u5458\u6388\u6743\uff0c \u6210\u5458\u4eec\u5728\u5de5\u4f5c\u7a7a\u95f4\u4e0a\u7684\u89d2\u8272\u5c06\u6839\u636e\u4ee5\u4e0b\u6620\u5c04\u5173\u7cfb\u81ea\u52a8\u6620\u5c04\u5b8c\u6210\u6388\u6743\uff0c\u907f\u514d\u4e86\u591a\u6b21\u6388\u6743\u7684\u91cd\u590d\u6027\u64cd\u4f5c\uff1a

          • Workspace Admin \u5bf9\u5e94 Namespace Admin
          • Workspace Editor \u5bf9\u5e94 Namespace Editor
          • Workspace Viewer \u5bf9\u5e94 Namespace Viewer

          \u4ee5\u4e0b\u662f\u4e00\u4e2a\u4f8b\u5b50\uff1a

          \u7528\u6237 \u5de5\u4f5c\u7a7a\u95f4 \u89d2\u8272 \u7528\u6237 A Workspace01 Workspace Admin

          \u5c06\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4\u540e\uff1a

          \u7528\u6237 \u6240\u5c5e\u8303\u7574 \u89d2\u8272 \u7528\u6237 A Workspace01 Workspace Admin Namespace01 Namespace Admin"},{"location":"admin/ghippo/best-practice/ws-to-ns.html#_2","title":"\u5b9e\u73b0\u65b9\u6848","text":"

          \u5c06\u6765\u81ea\u4e0d\u540c\u96c6\u7fa4\u7684\u4e0d\u540c\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u540c\u4e00\u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09\uff0c\u7ed9\u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09\u4e0b\u7684\u6210\u5458\u4f7f\u7528\u6d41\u7a0b\u5982\u56fe\u3002

          graph TB\n\npreparews[\u51c6\u5907\u5de5\u4f5c\u7a7a\u95f4] --> preparens[\u51c6\u5907\u547d\u540d\u7a7a\u95f4]\n--> judge([\u547d\u540d\u7a7a\u95f4\u662f\u5426\u4e0e\u7ed1\u5b9a\u5230\u5176\u4ed6\u5de5\u4f5c\u7a7a\u95f4])\njudge -.\u672a\u7ed1\u5b9a.->nstows[\u5c06\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4] --> wsperm[\u7ba1\u7406\u5de5\u4f5c\u7a7a\u95f4\u8bbf\u95ee\u6743\u9650]\njudge -.\u5df2\u7ed1\u5b9a.->createns[\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill:#326ce5,stroke:#fff,stroke-width:1px,color:#fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass preparews,preparens,createns,nstows,wsperm cluster;\nclass judge plain\n\nclick preparews \"https://docs.daocloud.io/ghippo/user-guide/workspace/ws-to-ns/#_3\"\nclick preparens \"https://docs.daocloud.io/ghippo/user-guide/workspace/ws-to-ns/#_4\"\nclick nstows \"https://docs.daocloud.io/ghippo/user-guide/workspace/ws-to-ns/#_5\"\nclick wsperm \"https://docs.daocloud.io/ghippo/user-guide/workspace/ws-to-ns/#_6\"\nclick createns \"https://docs.daocloud.io/ghippo/user-guide/workspace/ws-to-ns/#_4\"

          Tip

          \u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u53ea\u80fd\u88ab\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u3002

          "},{"location":"admin/ghippo/best-practice/ws-to-ns.html#_3","title":"\u51c6\u5907\u5de5\u4f5c\u7a7a\u95f4","text":"

          \u5de5\u4f5c\u7a7a\u95f4\u662f\u4e3a\u4e86\u6ee1\u8db3\u591a\u79df\u6237\u7684\u4f7f\u7528\u573a\u666f\uff0c\u57fa\u4e8e\u96c6\u7fa4\u3001\u96c6\u7fa4\u547d\u540d\u7a7a\u95f4\u3001\u7f51\u683c\u3001\u7f51\u683c\u547d\u540d\u7a7a\u95f4\u3001\u591a\u4e91\u3001\u591a\u4e91\u547d\u540d\u7a7a\u95f4\u7b49\u591a\u79cd\u8d44\u6e90\u5f62\u6210\u76f8\u4e92\u9694\u79bb\u7684\u8d44\u6e90\u73af\u5883\u3002 \u5de5\u4f5c\u7a7a\u95f4\u53ef\u4ee5\u6620\u5c04\u4e3a\u9879\u76ee\u3001\u79df\u6237\u3001\u4f01\u4e1a\u3001\u4f9b\u5e94\u5546\u7b49\u591a\u79cd\u6982\u5ff5\u3002

          1. \u4f7f\u7528 admin/folder admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 \u3002

          2. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4 \u6309\u94ae\u3002

          3. \u586b\u5199\u5de5\u4f5c\u7a7a\u95f4\u540d\u79f0\u3001\u6240\u5c5e\u6587\u4ef6\u5939\u7b49\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4\u3002

          \u63d0\u793a\uff1a\u82e5\u5e73\u53f0\u4e2d\u5df2\u5b58\u5728\u521b\u5efa\u597d\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u70b9\u51fb\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff0c\u5728 \u8d44\u6e90\u7ec4 \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb \u7ed1\u5b9a\u8d44\u6e90 \uff0c\u53ef\u4ee5\u76f4\u63a5\u7ed1\u5b9a\u547d\u540d\u7a7a\u95f4\u3002

          "},{"location":"admin/ghippo/best-practice/ws-to-ns.html#_4","title":"\u51c6\u5907\u547d\u540d\u7a7a\u95f4","text":"

          \u547d\u540d\u7a7a\u95f4\u662f\u66f4\u5c0f\u7684\u8d44\u6e90\u9694\u79bb\u5355\u5143\uff0c\u5c06\u5176\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4\u540e\uff0c\u5de5\u4f5c\u7a7a\u95f4\u7684\u6210\u5458\u5c31\u53ef\u4ee5\u8fdb\u884c\u7ba1\u7406\u548c\u4f7f\u7528\u3002

          \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u51c6\u5907\u4e00\u4e2a\u8fd8\u672a\u7ed1\u5b9a\u5230\u4efb\u4f55\u5de5\u4f5c\u7a7a\u95f4\u7684\u547d\u540d\u7a7a\u95f4\u3002

          1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5bb9\u5668\u7ba1\u7406 \u3002

          2. \u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

          3. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u547d\u540d\u7a7a\u95f4 \uff0c\u8fdb\u5165\u547d\u540d\u7a7a\u95f4\u7ba1\u7406\u9875\u9762\uff0c\u70b9\u51fb\u9875\u9762\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae\u3002

          4. \u586b\u5199\u547d\u540d\u7a7a\u95f4\u7684\u540d\u79f0\uff0c\u914d\u7f6e\u5de5\u4f5c\u7a7a\u95f4\u548c\u6807\u7b7e\uff08\u53ef\u9009\u8bbe\u7f6e\uff09\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a \u3002

            Info

            \u5de5\u4f5c\u7a7a\u95f4\u4e3b\u8981\u7528\u4e8e\u5212\u5206\u8d44\u6e90\u7ec4\u5e76\u4e3a\u7528\u6237\uff08\u7528\u6237\u7ec4\uff09\u6388\u4e88\u5bf9\u8be5\u8d44\u6e90\u7684\u4e0d\u540c\u8bbf\u95ee\u6743\u9650\u3002\u6709\u5173\u5de5\u4f5c\u7a7a\u95f4\u7684\u8be6\u7ec6\u8bf4\u660e\uff0c\u53ef\u53c2\u8003\u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7\u3002

          5. \u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3002\u5728\u547d\u540d\u7a7a\u95f4\u5217\u8868\u53f3\u4fa7\uff0c\u70b9\u51fb \u2507 \uff0c\u53ef\u4ee5\u4ece\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4 \u3002

          "},{"location":"admin/ghippo/best-practice/ws-to-ns.html#_5","title":"\u5c06\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4","text":"

          \u9664\u4e86\u5728\u547d\u540d\u7a7a\u95f4\u5217\u8868\u4e2d\u7ed1\u5b9a\u5916\uff0c\u4e5f\u53ef\u4ee5\u8fd4\u56de \u5168\u5c40\u7ba1\u7406 \uff0c\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4\u3002

          1. \u4f9d\u6b21\u70b9\u51fb \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 -> \u8d44\u6e90\u7ec4 \uff0c\u70b9\u51fb\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u540d\u79f0\u540e\uff0c\u70b9\u51fb \u7ed1\u5b9a\u8d44\u6e90 \u6309\u94ae\u3002

          2. \u9009\u4e2d\u8981\u7ed1\u5b9a\u7684\u5de5\u4f5c\u7a7a\u95f4\uff08\u53ef\u591a\u9009\uff09\uff0c\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u7ed1\u5b9a\u3002

          "},{"location":"admin/ghippo/best-practice/ws-to-ns.html#_6","title":"\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u6dfb\u52a0\u6210\u5458\u5e76\u6388\u6743","text":"
          1. \u5728 \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 -> \u6388\u6743 \u4e2d\uff0c\u70b9\u51fb\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u540d\u79f0\u540e\uff0c\u70b9\u51fb \u6dfb\u52a0\u6388\u6743 \u6309\u94ae\u3002

          2. \u9009\u62e9\u8981\u6388\u6743\u7684 \u7528\u6237/\u7528\u6237\u7ec4 \u3001 \u89d2\u8272 \u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u6388\u6743\u3002

          "},{"location":"admin/ghippo/best-practice/gproduct/intro.html","title":"GProduct \u5982\u4f55\u5bf9\u63a5\u5168\u5c40\u7ba1\u7406","text":"

          GProduct \u662f AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u9664\u5168\u5c40\u7ba1\u7406\u5916\u7684\u6240\u6709\u5176\u4ed6\u6a21\u5757\u7684\u7edf\u79f0\uff0c\u8fd9\u4e9b\u6a21\u5757\u9700\u8981\u4e0e\u5168\u5c40\u7ba1\u7406\u5bf9\u63a5\u540e\u624d\u80fd\u52a0\u5165\u5230 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u3002

          "},{"location":"admin/ghippo/best-practice/gproduct/intro.html#_1","title":"\u5bf9\u63a5\u4ec0\u4e48","text":"
          • \u5bf9\u63a5\u5bfc\u822a\u680f

            \u5165\u53e3\u7edf\u4e00\u653e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u3002

          • \u63a5\u5165\u8def\u7531\u548c AuthN

            \u7edf\u4e00 IP \u6216\u57df\u540d\uff0c\u5c06\u8def\u7531\u5165\u53e3\u7edf\u4e00\u8d70\u5168\u5c40\u7ba1\u7406\u7684 Istio Gateway\u3002

          • \u7edf\u4e00\u767b\u5f55 / \u7edf\u4e00 AuthN \u8ba4\u8bc1

            \u767b\u5f55\u7edf\u4e00\u4f7f\u7528\u5168\u5c40\u7ba1\u7406 (Keycloak) \u767b\u5f55\u9875\uff0cAPI authn token \u9a8c\u8bc1\u4f7f\u7528 Istio Gateway\u3002 GProduct \u5bf9\u63a5\u5168\u5c40\u7ba1\u7406\u540e\u4e0d\u9700\u8981\u5173\u6ce8\u5982\u4f55\u5b9e\u73b0\u767b\u5f55\u548c\u8ba4\u8bc1\u3002

          "},{"location":"admin/ghippo/best-practice/gproduct/intro.html#pdf","title":"\u89c6\u9891\u6f14\u793a\u548c PDF","text":"

          \u5c06 AI \u7b97\u529b\u4e2d\u5fc3\u96c6\u6210\u5230\u5ba2\u6237\u7cfb\u7edf\uff08OEM OUT\uff09\uff0c\u53c2\u9605 OEM OUT \u6587\u6863\u3002

          \u5c06\u5ba2\u6237\u7cfb\u7edf\u96c6\u6210\u5230 AI \u7b97\u529b\u4e2d\u5fc3\uff08OEM IN\uff09\uff0c\u53c2\u9605 OEM IN \u6587\u6863\u3002

          "},{"location":"admin/ghippo/best-practice/gproduct/nav.html","title":"\u5bf9\u63a5\u5bfc\u822a\u680f","text":"

          \u4ee5\u5bb9\u5668\u7ba1\u7406\uff08\u5f00\u53d1\u4ee3\u53f7 kpanda \uff09\u4e3a\u4f8b\uff0c\u5bf9\u63a5\u5230\u5bfc\u822a\u680f\u3002

          \u5bf9\u63a5\u540e\u7684\u9884\u671f\u6548\u679c\u5982\u56fe\uff1a

          "},{"location":"admin/ghippo/best-practice/gproduct/nav.html#_2","title":"\u5bf9\u63a5\u65b9\u6cd5","text":"

          \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u5bf9\u63a5 GProduct\uff1a

          1. \u901a\u8fc7 GProductNavigator CR \u5c06\u5bb9\u5668\u7ba1\u7406\u7684\u5404\u529f\u80fd\u9879\u6ce8\u518c\u5230\u5bfc\u822a\u680f\u83dc\u5355\u3002

            apiVersion: ghippo.io/v1alpha1\nkind: GProductNavigator\nmetadata:\n  name: kpanda\nspec:\n  gproduct: kpanda\n  name: \u5bb9\u5668\u7ba1\u7406\n  localizedName:\n    zh-CN: \u5bb9\u5668\u7ba1\u7406\n    en-US: Container Management\n  url: /kpanda\n  category: \u5bb9\u5668  # (1)\n  iconUrl: /kpanda/nav-icon.png\n  order: 10 # (2)\n  menus:\n  - name: \u5907\u4efd\u7ba1\u7406\n    localizedName:\n      zh-CN: \u5907\u4efd\u7ba1\u7406\n      en-US: Backup Management\n    iconUrl: /kpanda/bkup-icon.png\n    url: /kpanda/backup\n
            1. \u5f53\u524d\u53ea\u652f\u6301\u6982\u89c8\u3001\u5de5\u4f5c\u53f0\u3001\u5bb9\u5668\u3001\u5fae\u670d\u52a1\u3001\u6570\u636e\u670d\u52a1\u3001\u7ba1\u7406\uff0c\u516d\u9009\u4e00
            2. \u6570\u5b57\u8d8a\u5927\u6392\u5728\u8d8a\u4e0a\u9762

            \u5168\u5c40\u7ba1\u7406\u7684\u5bfc\u822a\u680f category \u914d\u7f6e\u5728 ConfigMap\uff0c\u6682\u65f6\u4e0d\u80fd\u4ee5\u6ce8\u518c\u65b9\u5f0f\u589e\u52a0\uff0c\u9700\u8981\u8054\u7cfb\u5168\u5c40\u7ba1\u7406\u56e2\u961f\u6765\u6dfb\u52a0\u3002

          2. kpanda \u524d\u7aef\u4f5c\u4e3a\u5fae\u524d\u7aef\u63a5\u5165\u5230 AI \u7b97\u529b\u4e2d\u5fc3\u7236\u5e94\u7528 Anakin \u4e2d

            \u524d\u7aef\u4f7f\u7528 qiankun \u6765\u63a5\u5165\u5b50\u5e94\u7528 UI\uff0c \u53ef\u4ee5\u53c2\u8003\u5feb\u901f\u4e0a\u624b\u3002

            \u5728\u6ce8\u518c GProductNavigator CR \u540e\uff0c\u63a5\u53e3\u4f1a\u751f\u6210\u5bf9\u5e94\u7684\u6ce8\u518c\u4fe1\u606f\uff0c\u4f9b\u524d\u7aef\u7236\u5e94\u7528\u6ce8\u518c\u4f7f\u7528\u3002 \u4f8b\u5982 kpanda \u5c31\u4f1a\u751f\u6210\u4ee5\u4e0b\u6ce8\u518c\u4fe1\u606f\uff1a

            {\n  \"id\": \"kpanda\",\n  \"title\": \"\u5bb9\u5668\u7ba1\u7406\",\n  \"url\": \"/kpanda\",\n  \"uiAssetsUrl\": \"/ui/kpanda/\", // \u7ed3\u5c3e\u7684/\u662f\u5fc5\u987b\u7684\n  \"needImportLicense\": false\n},\n

            \u4ee5\u4e0a\u6ce8\u518c\u4fe1\u606f\u4e0e qiankun \u5b50\u5e94\u7528\u4fe1\u606f\u5b57\u6bb5\u7684\u5bf9\u5e94\u5173\u7cfb\u662f\uff1a

            {\n    name: id,\n    entry: uiAssetsUrl,\n    container: '#container',\n    activeRule: url, \n    loader,\n    props: globalProps,\n}\n

            container \u548c loader \u7531\u524d\u7aef\u7236\u5e94\u7528\u63d0\u4f9b\uff0c\u5b50\u5e94\u7528\u65e0\u9700\u5173\u5fc3\u3002 props \u4f1a\u63d0\u4f9b\u4e00\u4e2a\u5305\u542b\u7528\u6237\u57fa\u672c\u4fe1\u606f\u3001\u5b50\u4ea7\u54c1\u6ce8\u518c\u4fe1\u606f\u7b49\u7684 pinia store\u3002

            qiankun \u542f\u52a8\u65f6\u4f1a\u4f7f\u7528\u5982\u4e0b\u53c2\u6570\uff1a

            start({\n  sandbox: {\n    experimentalStyleIsolation: true,\n  },\n  // \u53bb\u9664\u5b50\u5e94\u7528\u4e2d\u7684favicon\u9632\u6b62\u5728Firefox\u4e2d\u8986\u76d6\u7236\u5e94\u7528\u7684favicon\n  getTemplate: (template) => template.replaceAll(/<link\\s* rel=\"[\\w\\s]*icon[\\w\\s]*\"\\s*( href=\".*?\")?\\s*\\/?>/g, ''),\n});\n

          \u8bf7\u53c2\u9605\u524d\u7aef\u56e2\u961f\u51fa\u5177\u7684 GProduct \u5bf9\u63a5 demo tar \u5305\u3002

          "},{"location":"admin/ghippo/best-practice/gproduct/route-auth.html","title":"\u63a5\u5165\u8def\u7531\u548c\u767b\u5f55\u8ba4\u8bc1","text":"

          \u63a5\u5165\u540e\u7edf\u4e00\u767b\u5f55\u548c\u5bc6\u7801\u9a8c\u8bc1\uff0c\u6548\u679c\u5982\u4e0b\u56fe\uff1a

          \u5404\u4e2a GProduct \u6a21\u5757\u7684 API bear token \u9a8c\u8bc1\u90fd\u8d70 Istio Gateway\u3002

          \u63a5\u5165\u540e\u7684\u8def\u7531\u6620\u5c04\u56fe\u5982\u4e0b\uff1a

          "},{"location":"admin/ghippo/best-practice/gproduct/route-auth.html#_2","title":"\u63a5\u5165\u65b9\u6cd5","text":"

          \u4ee5 kpanda \u4e3a\u4f8b\u6ce8\u518c GProductProxy CR\u3002

          # GProductProxy CR \u793a\u4f8b, \u5305\u542b\u8def\u7531\u548c\u767b\u5f55\u8ba4\u8bc1\n\n# spec.proxies: \u540e\u5199\u7684\u8def\u7531\u4e0d\u80fd\u662f\u5148\u5199\u7684\u8def\u7531\u5b50\u96c6, \u53cd\u4e4b\u53ef\u4ee5\n# spec.proxies.match.uri.prefix: \u5982\u679c\u662f\u540e\u7aef api, \u5efa\u8bae\u5728 prefix \u672b\u5c3e\u6dfb\u52a0 \"/\" \u8868\u8ff0\u8fd9\u6bb5 path \u7ed3\u675f\uff08\u7279\u6b8a\u9700\u6c42\u53ef\u4ee5\u4e0d\u7528\u52a0\uff09\n# spec.proxies.match.uri: \u652f\u6301 prefix \u548c exact \u6a21\u5f0f; Prefix \u548c Exact \u53ea\u80fd 2 \u9009 1; Prefix \u4f18\u5148\u7ea7\u5927\u4e8e Exact\n\napiVersion: ghippo.io/v1alpha1\nkind: GProductProxy\nmetadata:\n  name: kpanda  # (1)\nspec:\n  gproduct: kpanda  # (2)\n  proxies:\n  - labels:\n      kind: UIEntry\n    match:\n      uri:\n        prefix: /kpanda # (3)\n    rewrite:\n      uri: /index.html\n    destination:\n      host: ghippo-anakin.ghippo-system.svc.cluster.local\n      port: 80\n    authnCheck: false  # (4)\n  - labels:\n      kind: UIAssets\n    match:\n      uri:\n        prefix: /ui/kpanda/ # (5)\n    destination:\n      host: kpanda-ui.kpanda-system.svc.cluster.local\n      port: 80\n    authnCheck: false\n  - match:\n      uri:\n        prefix: /apis/kpanda.io/v1/a\n    destination:\n      host: kpanda-service.kpanda-system.svc.cluster.local\n      port: 80\n    authnCheck: false\n  - match:\n      uri:\n        prefix: /apis/kpanda.io/v1 # (6)\n    destination:\n      host: kpanda-service.kpanda-system.svc.cluster.local\n      port: 80\n    authnCheck: true\n
          1. cluster \u7ea7\u522b CRD
          2. \u9700\u8981\u7528\u5c0f\u5199\u6307\u5b9a GProduct \u540d\u5b57
          3. \u8fd8\u53ef\u652f\u6301 exact
          4. \u662f\u5426\u9700\u8981 istio-gateway \u7ed9\u8be5\u6761\u8def\u7531 API \u4f5c AuthN Token \u8ba4\u8bc1, false \u4e3a\u8df3\u8fc7\u8ba4\u8bc1
          5. UIAssets \u5efa\u8bae\u672b\u5c3e\u6dfb\u52a0 / \u8868\u793a\u7ed3\u675f\uff08\u4e0d\u7136\u524d\u7aef\u53ef\u80fd\u4f1a\u51fa\u73b0\u95ee\u9898\uff09
          6. \u540e\u5199\u7684\u8def\u7531\u4e0d\u80fd\u662f\u5148\u5199\u7684\u8def\u7531\u7684\u5b50\u96c6, \u53cd\u4e4b\u53ef\u4ee5
          "},{"location":"admin/ghippo/best-practice/menu/menu-display-or-hiding.html","title":"\u5bfc\u822a\u680f\u83dc\u5355\u6839\u636e\u6743\u9650\u663e\u793a/\u9690\u85cf","text":"

          \u5728\u73b0\u6709\u7684\u6743\u9650\u4f53\u7cfb\u4e0b, \u5168\u5c40\u7ba1\u7406\u53ef\u4ee5\u6839\u636e\u7528\u6237\u7684\u6743\u9650\u63a7\u5236\u5bfc\u822a\u680f\u7684\u83dc\u5355\u662f\u5426\u5c55\u793a\uff0c \u4f46\u662f\u7531\u4e8e\u5bb9\u5668\u7ba1\u7406\u7684\u6388\u6743\u4fe1\u606f\u672a\u540c\u6b65\u5230\u5168\u5c40\u7ba1\u7406\uff0c\u5bfc\u81f4\u5168\u5c40\u7ba1\u7406\u65e0\u6cd5\u51c6\u786e\u5224\u65ad\u5bb9\u5668\u7ba1\u7406\u83dc\u5355\u662f\u5426\u9700\u8981\u5c55\u793a\u3002

          \u672c\u6587\u901a\u8fc7\u914d\u7f6e\u5b9e\u73b0\u4e86\uff1a \u5c06\u5bb9\u5668\u7ba1\u7406\u53ca\u53ef\u89c2\u6d4b\u6027\u7684\u83dc\u5355\u5728 \u5168\u5c40\u7ba1\u7406\u65e0\u6cd5\u5224\u65ad\u7684\u90e8\u5206, \u9ed8\u8ba4\u4e0d\u663e\u793a \uff0c \u901a\u8fc7 \u767d\u540d\u5355 \u6388\u6743\u7684\u65b9\u5f0f\uff0c\u5b9e\u73b0\u83dc\u5355\u7684\u9690\u85cf\u4e0e\u663e\u793a\uff08\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u9875\u9762\u6388\u6743\u7684\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\u6743\u9650\uff0c\u5168\u5c40\u7ba1\u7406\u5747\u65e0\u6cd5\u611f\u77e5\u548c\u5224\u65ad\uff09\u3002

          \u4f8b\u5982\uff1aA \u7528\u6237\u5728\u5bb9\u5668\u7ba1\u7406\u662f cluster A \u7684 Cluster Admin \u89d2\u8272\uff0c \u8fd9\u79cd\u60c5\u51b5\u4e0b\u5168\u5c40\u7ba1\u7406\u65e0\u6cd5\u5224\u65ad\u662f\u5426\u6709\u6743\u9650\u5c55\u793a\u5bb9\u5668\u7ba1\u7406\u83dc\u5355\u3002 \u901a\u8fc7\u672c\u6587\u6863\u914d\u7f6e\u540e\uff0c\u7528\u6237 A \u9ed8\u8ba4\u4e0d\u53ef\u89c1\u5bb9\u5668\u7ba1\u7406\u83dc\u5355\uff0c\u9700\u8981 \u663e\u5f0f\u5730\u5728\u5168\u5c40\u7ba1\u7406\u6388\u6743 \u624d\u53ef\u4ee5\u770b\u5230\u5bb9\u5668\u7ba1\u7406\u83dc\u5355\u3002

          "},{"location":"admin/ghippo/best-practice/menu/menu-display-or-hiding.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

          \u5df2\u5f00\u542f\u57fa\u4e8e\u6743\u9650\u663e\u793a/\u9690\u85cf\u83dc\u5355\u7684\u529f\u80fd\uff0c\u5f00\u542f\u65b9\u6cd5\u5982\u4e0b\uff1a

          • \u65b0\u5b89\u88c5\u7684\u73af\u5883, \u4f7f\u7528 helm install \u65f6\u589e\u52a0 --set global.navigatorVisibleDependency=true \u53c2\u6570
          • \u5df2\u6709\u73af\u5883\uff0chelm get values ghippo -n ghippo-system -o yaml \u5907\u4efd values, \u968f\u540e\u4fee\u6539 bak.yaml \u5e76\u6dfb\u52a0 global.navigatorVisibleDependency: true

          \u518d\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u5347\u7ea7\u5168\u5c40\u7ba1\u7406\uff1a

          helm upgrade ghippo ghippo-release/ghippo \\  \n  -n ghippo-system \\  \n  -f ./bak.yaml \\  \n  --version ${version}\n
          "},{"location":"admin/ghippo/best-practice/menu/menu-display-or-hiding.html#_3","title":"\u914d\u7f6e\u5bfc\u822a\u680f","text":"

          \u5728 kpanda-global-cluster \u4e2d apply \u5982\u4e0b YAML\uff1a

          apiVersion: ghippo.io/v1alpha1  \nkind: GProductNavigator  \nmetadata:  \n  name: kpanda-menus-custom  \nspec:  \n  category: container  \n  gproduct: kpanda  \n  iconUrl: ./ui/kpanda/kpanda.svg  \n  isCustom: true  \n  localizedName:  \n    en-US: Container Management  \n    zh-CN: \u5bb9\u5668\u7ba1\u7406  \n  menus:  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Clusters  \n        zh-CN: \u96c6\u7fa4\u5217\u8868  \n      name: Clusters  \n      order: 80  \n      url: ./kpanda/clusters  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Namespaces  \n        zh-CN: \u547d\u540d\u7a7a\u95f4  \n      name: Namespaces  \n      order: 70  \n      url: ./kpanda/namespaces  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Workloads  \n        zh-CN: \u5de5\u4f5c\u8d1f\u8f7d  \n      name: Workloads  \n      order: 60  \n      url: ./kpanda/workloads/deployments  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Permissions  \n        zh-CN: \u6743\u9650\u7ba1\u7406  \n      name: Permissions  \n      order: 10  \n      url: ./kpanda/rbac/content/cluster  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n  name: \u5bb9\u5668\u7ba1\u7406  \n  order: 50  \n  url: ./kpanda/clusters  \n  visible: true  \n\n---\napiVersion: ghippo.io/v1alpha1  \nkind: GProductNavigator  \nmetadata:  \n  name: insight-menus-custom  \nspec:  \n  category: microservice  \n  gproduct: insight  \n  iconUrl: ./ui/insight/logo.svg  \n  isCustom: true  \n  localizedName:  \n    en-US: Insight  \n    zh-CN: \u53ef\u89c2\u6d4b\u6027  \n  menus:  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Overview  \n        zh-CN: \u6982\u89c8  \n      name: Overview  \n      order: 9  \n      url: ./insight/overview  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Dashboard  \n        zh-CN: \u4eea\u8868\u76d8  \n      name: Dashboard  \n      order: 8  \n      url: ./insight/dashboard  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Infrastructure  \n        zh-CN: \u57fa\u7840\u8bbe\u65bd  \n      name: Infrastructure  \n      order: 7  \n      url: ./insight/clusters  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Metrics  \n        zh-CN: \u6307\u6807  \n      name: Metrics  \n      order: 6  \n      url: ./insight/metric/basic  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Logs  \n        zh-CN: \u65e5\u5fd7  \n      name: Logs  \n      order: 5  \n      url: ./insight/logs  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Trace Tracking  \n        zh-CN: \u94fe\u8def\u8ffd\u8e2a  \n      name: Trace Tracking  \n      order: 4  \n      url: ./insight/topology  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Alerts  \n        zh-CN: \u544a\u8b66  \n      name: Alerts  \n      order: 3  \n      url: ./insight/alerts/active/metrics  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Collect Management  \n        zh-CN: \u91c7\u96c6\u7ba1\u7406  \n      name: Collect Management  \n      order: 2  \n      url: ./insight/agents  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: System Management  \n        zh-CN: \u7cfb\u7edf\u7ba1\u7406  \n      name: System Management  \n      order: 1  \n      url: ./insight/system-components  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n  name: \u53ef\u89c2\u6d4b\u6027  \n  order: 30  \n  url: ./insight  \n  visible: true  \n\n---\napiVersion: ghippo.io/v1alpha1  \nkind: GProductResourcePermissions  \nmetadata:  \n  name: kpanda  \nspec:  \n  actions:  \n    - localizedName:  \n        en-US: Create  \n        zh-CN: \u521b\u5efa  \n      name: create  \n    - localizedName:  \n        en-US: Delete  \n        zh-CN: \u5220\u9664  \n      name: delete  \n    - localizedName:  \n        en-US: Update  \n        zh-CN: \u7f16\u8f91  \n      name: update  \n    - localizedName:  \n        en-US: Get  \n        zh-CN: \u67e5\u770b  \n      name: get  \n    - localizedName:  \n        en-US: Admin  \n        zh-CN: \u7ba1\u7406  \n      name: admin  \n  authScopes:  \n    - resourcePermissions:  \n        - actions:  \n            - name: get  \n            - dependPermissions:  \n                - action: get  \n              name: create  \n            - dependPermissions:  \n                - action: get  \n              name: update  \n            - dependPermissions:  \n                - action: get  \n              name: delete  \n          resourceType: cluster  \n        - actions:  \n            - name: get  \n          resourceType: menu  \n      scope: platform  \n    - resourcePermissions:  \n        - actions:  \n            - name: admin  \n              tips:  \n                - en-US: >-  \n                    If the workspace is bound to a cluster, it will be assigned  \n                    the Cluster Admin role upon authorization.  \n                  zh-CN: \u82e5\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u4e86\u96c6\u7fa4\uff0c\u6388\u6743\u540e\u8fd8\u5c06\u88ab\u6620\u5c04\u4e3a\u5bf9\u5e94\u96c6\u7fa4\u7684 Cluster Admin \u89d2\u8272  \n          resourceType: cluster  \n        - actions:  \n            - name: get  \n              tips:  \n                - en-US: >-  \n                    If the workspace is bound to a namespace, it will be  \n                    assigned the NS View role upon authorization.  \n                  zh-CN: \u82e5\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u4e86\u547d\u540d\u7a7a\u95f4\uff0c\u6388\u6743\u540e\u8fd8\u5c06\u88ab\u6620\u5c04\u4e3a\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u7684 NS View \u89d2\u8272  \n            - name: update  \n              tips:  \n                - en-US: >-  \n                    If the workspace is bound to a namespace, it will be  \n                    assigned the NS Edit role upon authorization.  \n                  zh-CN: \u82e5\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u4e86\u547d\u540d\u7a7a\u95f4\uff0c\u6388\u6743\u540e\u8fd8\u5c06\u88ab\u6620\u5c04\u4e3a\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u7684 NS  Edit \u89d2\u8272  \n            - name: admin  \n              tips:  \n                - en-US: >-  \n                    If the workspace is bound to a namespace, it will be  \n                    assigned the NS Admin role upon authorization.  \n                  zh-CN: \u82e5\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u4e86\u547d\u540d\u7a7a\u95f4\uff0c\u6388\u6743\u540e\u8fd8\u5c06\u88ab\u6620\u5c04\u4e3a\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u7684 NS Admin \u89d2\u8272  \n          resourceType: namespace  \n      scope: workspace  \n  gproduct: kpanda  \n  resourceTypes:  \n    - localizedName:  \n        en-US: Cluster Management  \n        zh-CN: \u96c6\u7fa4\u7ba1\u7406  \n      name: cluster  \n    - localizedName:  \n        en-US: Menu  \n        zh-CN: \u83dc\u5355  \n      name: menu  \n    - localizedName:  \n        en-US: Namespace Management  \n        zh-CN: \u547d\u540d\u7a7a\u95f4  \n      name: namespace\n
          "},{"location":"admin/ghippo/best-practice/menu/menu-display-or-hiding.html#_4","title":"\u901a\u8fc7\u81ea\u5b9a\u4e49\u89d2\u8272\u5b9e\u73b0\u4e0a\u8ff0\u6548\u679c","text":"

          Note

          \u4ec5\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684\u83dc\u5355\u9700\u8981\u5355\u72ec\u914d\u7f6e\u83dc\u5355\u6743\u9650\uff0c\u5176\u4ed6\u6a21\u5757\u4f1a\u6839\u636e\u7528\u6237\u7684\u6743\u9650\u81ea\u52a8\u663e\u793a/\u9690\u85cf

          \u521b\u5efa\u4e00\u4e2a\u81ea\u5b9a\u4e49\u89d2\u8272\uff0c\u5305\u542b\u7684\u6743\u9650\u70b9\u4e3a\u5bb9\u5668\u7ba1\u7406\u7684\u83dc\u5355\u67e5\u770b\u6743\u9650\uff0c\u540e\u7eed\u6388\u6743\u7ed9\u9700\u8981\u67e5\u770b\u5bb9\u5668\u7ba1\u7406\u83dc\u5355\u7684\u7528\u6237\u3002

          \u6548\u679c\u5982\u4e0b\uff0c\u53ef\u4ee5\u770b\u5230\u5bb9\u5668\u7ba1\u7406\u548c\u53ef\u89c2\u6d4b\u6027\u7684\u5bfc\u822a\u680f\u83dc\u5355\uff1a

          "},{"location":"admin/ghippo/best-practice/menu/navigator.html","title":"\u81ea\u5b9a\u4e49\u5bfc\u822a\u680f","text":"

          \u5f53\u524d\u81ea\u5b9a\u4e49\u5bfc\u822a\u680f\u9700\u8981\u901a\u8fc7\u624b\u52a8\u521b\u5efa\u5bfc\u822a\u680f\u7684 YAML \uff0c\u5e76 apply \u5230\u96c6\u7fa4\u4e2d\u3002

          "},{"location":"admin/ghippo/best-practice/menu/navigator.html#_2","title":"\u5bfc\u822a\u680f\u5206\u7c7b","text":"

          \u82e5\u9700\u8981\u65b0\u589e\u6216\u91cd\u65b0\u6392\u5e8f\u5bfc\u822a\u680f\u5206\u7c7b\u53ef\u4ee5\u901a\u8fc7\u65b0\u589e\u3001\u4fee\u6539 category YAML \u5b9e\u73b0\u3002

          category \u7684 YAML \u793a\u4f8b\u5982\u4e0b\uff1a

          apiVersion: ghippo.io/v1alpha1\nkind: NavigatorCategory\nmetadata:\n  name: management-custom # (1)!\nspec:\n  name: Management # (2)!\n  isCustom: true # (3)!\n  localizedName: # (4)!\n    zh-CN: \u7ba1\u7406\n    en-US: Management\n  order: 100 # (5)!\n
          1. \u547d\u540d\u89c4\u5219\uff1a\u7531\u5c0f\u5199\u7684\"spec.name\"\u4e0e\"-custom\"\u800c\u6210
          2. \u82e5\u662f\u7528\u4e8e\u4fee\u6539category
          3. \u8be5\u5b57\u6bb5\u5fc5\u987b\u4e3atrue
          4. \u5b9a\u4e49\u5206\u7c7b\u7684\u4e2d\u82f1\u6587\u540d\u79f0
          5. \u6392\u5e8f\uff0c\u6570\u5b57\u8d8a\u5927\uff0c\u8d8a\u9760\u4e0a

          \u7f16\u5199\u597d YAML \u6587\u4ef6\u540e\uff0c\u901a\u8fc7\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u540e\uff0c\u5237\u65b0\u9875\u9762\u5373\u53ef\u770b\u5230\u65b0\u589e\u3001\u4fee\u6539\u7684\u5bfc\u822a\u680f\u5206\u7c7b\u3002

          kubectl apply -f xxx.yaml\n
          "},{"location":"admin/ghippo/best-practice/menu/navigator.html#_3","title":"\u5bfc\u822a\u680f\u83dc\u5355","text":"

          \u82e5\u9700\u8981\u65b0\u589e\u6216\u91cd\u65b0\u6392\u5e8f\u5bfc\u822a\u680f\u83dc\u5355\u53ef\u4ee5\u901a\u8fc7\u65b0\u589e navigator YAML \u5b9e\u73b0\u3002

          Note

          \u82e5\u9700\u8981\u7f16\u8f91\u5df2\u5b58\u5728\u7684\u5bfc\u822a\u680f\u83dc\u5355\uff08\u975e\u7528\u6237\u81ea\u5df1\u65b0\u589e\u7684 custom \u83dc\u5355\uff09\uff0c\u9700\u8981\u4ee4\u65b0\u589e custom \u83dc\u5355 gproduct \u5b57\u6bb5\u4e0e\u9700\u8981\u8986\u76d6\u7684\u83dc\u5355\u7684 gproduct \u76f8\u540c\uff0c \u65b0\u7684\u5bfc\u822a\u680f\u83dc\u5355\u4f1a\u5c06 menus \u4e2d name \u76f8\u540c\u7684\u90e8\u5206\u6267\u884c\u8986\u76d6\uff0cname \u4e0d\u540c\u7684\u5730\u65b9\u505a\u65b0\u589e\u64cd\u4f5c\u3002

          "},{"location":"admin/ghippo/best-practice/menu/navigator.html#_4","title":"\u4e00\u7ea7\u83dc\u5355","text":"

          \u4f5c\u4e3a\u4ea7\u54c1\u63d2\u5165\u5230\u67d0\u4e2a\u5bfc\u822a\u680f\u5206\u7c7b\u4e0b

          apiVersion: ghippo.io/v1alpha1\nkind: GProductNavigator\nmetadata:\n  name: gmagpie-custom # (1)!\nspec:\n  name: Operations Management\n  iconUrl: ./ui/gmagpie/gmagpie.svg\n  localizedName: # (2)!\n    zh-CN: \u8fd0\u8425\u7ba1\u7406\n    en-US: Operations Management\n  url: ./gmagpie\n  category: management # (3)!\n  menus: # (4)!\n    - name: Access Control\n      iconUrl: ./ui/ghippo/menus/access-control.svg\n      localizedName:\n        zh-CN: \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\n        en-US: Access Control\n      url: ./ghippo/users\n      order: 50 # (5)!\n    - name: Workspace\n      iconUrl: ./ui/ghippo/menus/workspace-folder.svg\n      localizedName:\n        zh-CN: \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7\n        en-US: Workspace and Folder\n      url: ./ghippo/workspaces\n      order: 40\n    - name: Audit Log\n      iconUrl: ./ui/ghippo/menus/audit-logs.svg\n      localizedName:\n        zh-CN: \u5ba1\u8ba1\u65e5\u5fd7\n        en-US: Audit Log\n      url: ./ghippo/audit\n      order: 30\n    - name: Settings\n      iconUrl: ./ui/ghippo/menus/setting.svg\n      localizedName:\n        zh-CN: \u5e73\u53f0\u8bbe\u7f6e\n        en-US: Settings\n      url: ./ghippo/settings\n      order: 10\n  gproduct: gmagpie # (6)!\n  visible: true # (7)!\n  isCustom: true # (8)!\n  order: 20 # (9)!\n  target: blank # (10)!\n
          1. \u547d\u540d\u89c4\u5219\uff1a\u7531\u5c0f\u5199\u7684\"spec.gproduct\"\u4e0e\"-custom\"\u800c\u6210
          2. \u5b9a\u4e49\u83dc\u5355\u7684\u4e2d\u82f1\u6587\u540d\u79f0
          3. \u4e0eparentGProduct\u4e8c\u9009\u4e00\uff0c\u7528\u4e8e\u533a\u5206\u4e00\u7ea7\u83dc\u5355\u8fd8\u662f\u4e8c\u7ea7\u83dc\u5355\uff0c\u4e0eNavigatorCategory\u7684spec.name\u5b57\u6bb5\u5bf9\u5e94\u6765\u5b8c\u6210\u5339\u914d
          4. \u4e8c\u7ea7\u83dc\u5355
          5. \u6392\u5e8f\uff0c\u6570\u5b57\u8d8a\u5c0f\uff0c\u8d8a\u9760\u4e0a
          6. \u5b9a\u4e49\u83dc\u5355\u7684\u6807\u5fd7\uff0c\u7528\u4e8e\u548cparentGProduct\u5b57\u6bb5\u8054\u52a8\uff0c\u5b9e\u73b0\u7236\u5b50\u5173\u7cfb\u3002
          7. \u8bbe\u7f6e\u8be5\u83dc\u5355\u662f\u5426\u53ef\u89c1\uff0c\u9ed8\u8ba4\u4e3atrue
          8. \u8be5\u5b57\u6bb5\u5fc5\u987b\u4e3atrue
          9. \u6392\u5e8f\uff0c\u6570\u5b57\u8d8a\u5927\uff0c\u8d8a\u9760\u4e0a
          10. \u65b0\u5f00\u6807\u7b7e\u9875
          "},{"location":"admin/ghippo/best-practice/menu/navigator.html#_5","title":"\u4e8c\u7ea7\u83dc\u5355","text":"

          \u4f5c\u4e3a\u5b50\u4ea7\u54c1\u63d2\u5165\u5230\u67d0\u4e2a\u4e00\u7ea7\u83dc\u5355\u7684\u4e8c\u7ea7\u83dc\u5355\u4e2d

          apiVersion: ghippo.io/v1alpha1\nkind: GProductNavigator\nmetadata:\n  name: gmagpie-custom # (1)!\nspec:\n  name: Operations Management\n  iconUrl: ./ui/gmagpie/gmagpie.svg\n  localizedName: # (2)!\n    zh-CN: \u8fd0\u8425\u7ba1\u7406\n    en-US: Operations Management\n  url: ./gmagpie\n  parentGProduct: ghippo # (3)!\n  gproduct: gmagpie # (4)!\n  visible: true # (5)!\n  isCustom: true # (6)!\n  order: 20 # (7)!\n
          1. \u547d\u540d\u89c4\u5219\uff1a\u7531\u5c0f\u5199\u7684\"spec.gproduct\"\u4e0e\"-custom\"\u800c\u6210
          2. \u5b9a\u4e49\u83dc\u5355\u7684\u4e2d\u82f1\u6587\u540d\u79f0
          3. \u4e0ecategory\u4e8c\u9009\u4e00\uff0c\u7528\u4e8e\u533a\u5206\u4e00\u7ea7\u83dc\u5355\u8fd8\u662f\u4e8c\u7ea7\u83dc\u5355, \u82e5\u6dfb\u52a0\u8be5\u5b57\u6bb5\uff0c\u5219\u4f1a\u5ffd\u89c6\u6389menus\u5b57\u6bb5\uff0c\u5e76\u5c06\u8be5\u83dc\u5355\u4f5c\u4e3a\u4e8c\u7ea7\u83dc\u5355\u63d2\u5165\u5230\u4e0egproduct\u4e3aghippo\u7684\u4e00\u7ea7\u83dc\u5355\u4e2d
          4. \u5b9a\u4e49\u83dc\u5355\u7684\u6807\u5fd7\uff0c\u7528\u4e8e\u548cparentGProduct\u5b57\u6bb5\u8054\u52a8\uff0c\u5b9e\u73b0\u7236\u5b50\u5173\u7cfb
          5. \u8bbe\u7f6e\u8be5\u83dc\u5355\u662f\u5426\u53ef\u89c1\uff0c\u9ed8\u8ba4\u4e3atrue
          6. \u8be5\u5b57\u6bb5\u5fc5\u987b\u4e3atrue
          7. \u6392\u5e8f\uff0c\u6570\u5b57\u8d8a\u5927\uff0c\u8d8a\u9760\u4e0a
          "},{"location":"admin/ghippo/best-practice/oem/custom-idp.html","title":"\u5b9a\u5236 AI \u7b97\u529b\u4e2d\u5fc3\u5bf9\u63a5\u5916\u90e8\u8eab\u4efd\u63d0\u4f9b\u5546 (IdP)","text":"

          \u8eab\u4efd\u63d0\u4f9b\u5546\uff08IdP, Identity Provider\uff09\uff1a\u5f53 AI \u7b97\u529b\u4e2d\u5fc3\u9700\u8981\u4f7f\u7528\u5ba2\u6237\u7cfb\u7edf\u4f5c\u4e3a\u7528\u6237\u6e90\uff0c \u4f7f\u7528\u5ba2\u6237\u7cfb\u7edf\u767b\u5f55\u754c\u9762\u6765\u8fdb\u884c\u767b\u5f55\u8ba4\u8bc1\u65f6\uff0c\u8be5\u5ba2\u6237\u7cfb\u7edf\u88ab\u79f0\u4e3a AI \u7b97\u529b\u4e2d\u5fc3\u7684\u8eab\u4efd\u63d0\u4f9b\u5546

          "},{"location":"admin/ghippo/best-practice/oem/custom-idp.html#_1","title":"\u9002\u7528\u573a\u666f","text":"

          \u5982\u679c\u5ba2\u6237\u5bf9 Ghippo \u767b\u5f55 IdP \u6709\u9ad8\u5ea6\u5b9a\u5236\u9700\u6c42\uff0c\u4f8b\u5982\u652f\u6301\u4f01\u4e1a\u5fae\u4fe1\u3001\u5fae\u4fe1\u7b49\u5176\u4ed6\u793e\u4f1a\u7ec4\u7ec7\u767b\u5f55\u9700\u6c42\uff0c\u8bf7\u6839\u636e\u672c\u6587\u6863\u5b9e\u65bd\u3002

          "},{"location":"admin/ghippo/best-practice/oem/custom-idp.html#_2","title":"\u652f\u6301\u7248\u672c","text":"

          Ghippo 0.15.0\u53ca\u4ee5\u4e0a\u7248\u672c\u3002

          "},{"location":"admin/ghippo/best-practice/oem/custom-idp.html#_3","title":"\u5177\u4f53\u65b9\u6cd5","text":""},{"location":"admin/ghippo/best-practice/oem/custom-idp.html#ghippo-keycloak-plugin","title":"\u81ea\u5b9a\u4e49 ghippo keycloak plugin","text":"
          1. \u5b9a\u5236 plugin

            \u53c2\u8003 keycloak \u5b98\u65b9\u6587\u6863\u548c keycloak \u81ea\u5b9a\u4e49 IdP \u8fdb\u884c\u5f00\u53d1\u3002

          2. \u6784\u5efa\u955c\u50cf

            # FROM scratch\nFROM scratch\n\n# plugin\nCOPY ./xxx-jar-with-dependencies.jar /plugins/\n

          Note

          \u5982\u679c\u9700\u8981\u4e24\u4e2a\u5b9a\u5236\u5316 IdP\uff0c\u9700\u8981\u590d\u5236\u4e24\u4e2a jar \u5305\u3002

          "},{"location":"admin/ghippo/best-practice/oem/custom-idp.html#ghippo-keycloak-plugin_1","title":"\u90e8\u7f72 Ghippo keycloak plugin \u6b65\u9aa4","text":"
          1. \u628a Ghippo \u5347\u7ea7\u5230 0.15.0 \u6216\u4ee5\u4e0a\u3002 \u60a8\u4e5f\u53ef\u4ee5\u76f4\u63a5\u5b89\u88c5\u90e8\u7f72 Ghippo 0.15.0 \u7248\u672c\uff0c\u4f46\u9700\u8981\u628a\u4ee5\u4e0b\u4fe1\u606f\u624b\u52a8\u8bb0\u5f55\u4e0b\u6765\u3002

            helm -n ghippo-system get values ghippo -o yaml\n
            apiserver:\n  image:\n    repository: release.daocloud.io/ghippo-ci/ghippo-apiserver\n    tag: v0.4.2-test-3-gaba5ec2\ncontrollermanager:\n  image:\n    repository: release.daocloud.io/ghippo-ci/ghippo-apiserver\n    tag: v0.4.2-test-3-gaba5ec2\nglobal:\n  database:\n    builtIn: true\n  reverseProxy: http://192.168.31.10:32628\n
          2. \u5347\u7ea7\u6210\u529f\u540e\uff0c\u624b\u5de5\u8dd1\u4e00\u4e2a\u5b89\u88c5\u547d\u4ee4\uff0c --set \u91cc\u8bbe\u7684\u53c2\u6570\u503c\u4ece\u4e0a\u8ff0\u4fdd\u5b58\u7684\u5185\u5bb9\u91cc\u5f97\u5230\uff0c\u5e76\u4e14\u5916\u52a0\u51e0\u4e2a\u53c2\u6570\u503c\uff1a

            • global.idpPlugin.enabled\uff1a\u662f\u5426\u542f\u7528\u5b9a\u5236 plugin\uff0c\u9ed8\u8ba4\u5df2\u5173\u95ed
            • global.idpPlugin.image.repository\uff1a\u521d\u59cb\u5316\u81ea\u5b9a\u4e49 plugin \u7684 initContainer \u7528\u7684 image \u5730\u5740
            • global.idpPlugin.image.tag\uff1a\u521d\u59cb\u5316\u81ea\u5b9a\u4e49 plugin \u7684 initContainer \u7528\u7684 image tag
            • global.idpPlugin.path\uff1a\u81ea\u5b9a\u4e49 plugin \u7684\u76ee\u5f55\u6587\u4ef6\u5728\u4e0a\u8ff0 image \u91cc\u6240\u5728\u7684\u4f4d\u7f6e

            \u5177\u4f53\u793a\u4f8b\u5982\u4e0b\uff1a

            helm upgrade \\\n    ghippo \\\n    ghippo-release/ghippo \\\n    --version v0.4.2-test-3-gaba5ec2 \\\n    -n ghippo-system \\\n    --set apiserver.image.repository=release.daocloud.io/ghippo-ci/ghippo-apiserver \\\n    --set apiserver.image.tag=v0.4.2-test-3-gaba5ec2 \\\n    --set controllermanager.image.repository=release.daocloud.io/ghippo-ci/ghippo-apiserver \\\n    --set controllermanager.image.tag=v0.4.2-test-3-gaba5ec2 \\\n    --set global.reverseProxy=http://192.168.31.10:32628 \\\n    --set global.database.builtIn=true \\\n    --set global.idpPlugin.enabled=true \\\n    --set global.idpPlugin.image.repository=chenyang-idp \\\n    --set global.idpPlugin.image.tag=v0.0.1 \\\n    --set global.idpPlugin.path=/plugins/.\n
          3. \u5728 keycloak \u7ba1\u7406\u9875\u9762\u9009\u62e9\u6240\u8981\u4f7f\u7528\u7684\u63d2\u4ef6\u3002

          "},{"location":"admin/ghippo/best-practice/oem/demo.html","title":"gproduct-demo","text":"

          \u672c\u9875\u8bf4\u660e\u5982\u4f55\u642d\u5efa GProduct Demo \u73af\u5883\u3002

          "},{"location":"admin/ghippo/best-practice/oem/demo.html#_1","title":"\u642d\u5efa\u73af\u5883","text":"
          npm install\n

          \u7f16\u8bd1\u548c\u70ed\u52a0\u8f7d\u5f00\u53d1\u73af\u5883\uff1a

          npm run serve\n

          \u7f16\u8bd1\u548c\u6784\u5efa\uff1a

          npm run build\n

          \u8865\u5168 Lint \u68c0\u67e5\u6587\u4ef6\uff1a

          npm run lint\n
          "},{"location":"admin/ghippo/best-practice/oem/demo.html#_2","title":"\u81ea\u5b9a\u4e49\u914d\u7f6e","text":"

          \u53c2\u89c1\u914d\u7f6e\u53c2\u8003\u3002

          \u6784\u5efa\u955c\u50cf\uff1a

          docker build -t release.daocloud.io/henry/gproduct-demo .\n

          \u5728 K8s \u4e0a\u8fd0\u884c\uff1a

          kubectl apply -f demo.yaml\n
          "},{"location":"admin/ghippo/best-practice/oem/keycloak-idp.html","title":"Keycloak \u81ea\u5b9a\u4e49 IdP","text":"

          \u8981\u6c42\uff1akeycloak >= v20

          \u5df2\u77e5\u95ee\u9898 keycloak >= v21\uff0c\u5220\u9664\u4e86\u65e7\u7248 theme \u7684\u652f\u6301\uff0c\u53ef\u80fd\u4f1a\u5728 v22 \u4fee\u590d\u3002 \u53c2\u89c1 Issue #15344 \u3002

          \u6b64\u6b21 demo \u4f7f\u7528 Keycloak v20.0.5\u3002

          "},{"location":"admin/ghippo/best-practice/oem/keycloak-idp.html#source","title":"\u57fa\u4e8e source \u5f00\u53d1","text":""},{"location":"admin/ghippo/best-practice/oem/keycloak-idp.html#_1","title":"\u914d\u7f6e\u73af\u5883","text":"

          \u53c2\u7167 keycloak/building.md \u914d\u7f6e\u73af\u5883\u3002

          \u53c2\u7167 keycloak/README.md \u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

          cd quarkus\nmvn -f ../pom.xml clean install -DskipTestsuite -DskipExamples -DskipTests\n
          "},{"location":"admin/ghippo/best-practice/oem/keycloak-idp.html#ide","title":"\u4ece IDE \u8fd0\u884c","text":""},{"location":"admin/ghippo/best-practice/oem/keycloak-idp.html#service","title":"\u6dfb\u52a0 service \u4ee3\u7801","text":""},{"location":"admin/ghippo/best-practice/oem/keycloak-idp.html#keycloak","title":"\u5982\u679c\u53ef\u4ece keycloak \u7ee7\u627f\u90e8\u5206\u529f\u80fd","text":"

          \u5728\u76ee\u5f55 services/src/main/java/org/keycloak/broker \u4e0b\u6dfb\u52a0\u6587\u4ef6\uff1a

          \u6587\u4ef6\u540d\u9700\u8981\u662f xxxProvider.java \u548c xxxProviderFactory.java

          xxxProviderFactory.java \u793a\u4f8b\uff1a

          \u7559\u610f PROVIDER_ID = \"oauth\"; \u8fd9\u4e2a\u53d8\u91cf\uff0c\u540e\u9762\u5b9a\u4e49 html \u4f1a\u7528\u5230\u3002

          xxxProvider.java \u793a\u4f8b

          "},{"location":"admin/ghippo/best-practice/oem/keycloak-idp.html#keycloak_1","title":"\u5982\u679c\u4e0d\u80fd\u4ece keycloak \u7ee7\u627f\u529f\u80fd","text":"

          \u53c2\u8003\u4e0b\u56fe\u4e2d\u7684\u4e09\u4e2a\u6587\u4ef6\u7f16\u5199\u4f60\u7684\u4ee3\u7801\uff1a

          \u6dfb\u52a0 xxxProviderFactory \u5230 resource service

          \u5728 services/src/main/resources/META-INF/services/org.keycloak.broker.provider.IdentityProviderFactory \u6dfb\u52a0 xxxProviderFactory\uff0c\u8fd9\u6837\u521a\u521a\u7f16\u5199\u7684\u80fd\u5de5\u4f5c\u4e86\uff1a

          \u6dfb\u52a0 html \u6587\u4ef6

          \u590d\u5236 themes/src/main/resources/theme/base/admin/resources/partials/realm-identity-provider-oidc.html \u6587\u4ef6\u5230\uff08\u6539\u540d\u4e3a realm-identity-provider-oauth.html \uff0c\u8fd8\u8bb0\u5f97\u4e0a\u6587\u4e2d\u9700\u8981\u7559\u610f\u7684\u53d8\u91cf\u5417\uff09 themes/src/main/resources/theme/base/admin/resources/partials/realm-identity-provider-oauth.html

          \u5230\u6b64\u6240\u6709\u7684\u6587\u4ef6\u90fd\u6dfb\u52a0\u5b8c\u6210\u4e86\uff0c\u5f00\u59cb\u8c03\u8bd5\u529f\u80fd\u3002

          "},{"location":"admin/ghippo/best-practice/oem/keycloak-idp.html#jar","title":"\u6253\u5305\u6210 jar \u4f5c\u4e3a\u63d2\u4ef6\u8fd0\u884c","text":"

          \u65b0\u5efa\u4e00\u4e2a java \u9879\u76ee\uff0c\u5e76\u5c06\u4e0a\u9762\u7684\u4ee3\u7801\u590d\u5236\u5230\u9879\u76ee\u4e2d\uff0c\u5982\u4e0b\u6240\u793a\uff1a

          \u53c2\u89c1 pom.xml\u3002

          \u8fd0\u884c mvn clean package \uff0c\u6253\u5305\u5b8c\u6210\u5f97\u5230 xxx-jar-with-dependencies.jar \u6587\u4ef6\u3002

          \u4e0b\u8f7d keycloak Release 20.0.5 zip \u5305\u5e76\u89e3\u538b\u3002

          \u5c06 xxx-jar-with-dependencies.jar \u590d\u5236\u5230 keycloak-20.0.5/providers \u76ee\u5f55\u4e2d\u3002

          \u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b\u529f\u80fd\u662f\u5426\u5b8c\u6574\uff1a

          bin/kc.sh start-dev\n
          "},{"location":"admin/ghippo/best-practice/oem/oem-in.html","title":"\u5982\u4f55\u5c06\u5ba2\u6237\u7cfb\u7edf\u96c6\u6210\u5230 AI \u7b97\u529b\u4e2d\u5fc3\uff08OEM IN\uff09","text":"

          OEM IN \u662f\u6307\u5408\u4f5c\u4f19\u4f34\u7684\u5e73\u53f0\u4f5c\u4e3a\u5b50\u6a21\u5757\u5d4c\u5165 AI \u7b97\u529b\u4e2d\u5fc3\uff0c\u51fa\u73b0\u5728 AI \u7b97\u529b\u4e2d\u5fc3\u4e00\u7ea7\u5bfc\u822a\u680f\u3002 \u7528\u6237\u901a\u8fc7 AI \u7b97\u529b\u4e2d\u5fc3\u8fdb\u884c\u767b\u5f55\u548c\u7edf\u4e00\u7ba1\u7406\u3002\u5b9e\u73b0 OEM IN \u5171\u5206\u4e3a 5 \u6b65\uff0c\u5206\u522b\u662f\uff1a

          1. \u7edf\u4e00\u57df\u540d
          2. \u6253\u901a\u7528\u6237\u4f53\u7cfb
          3. \u5bf9\u63a5\u5bfc\u822a\u680f
          4. \u5b9a\u5236\u5916\u89c2
          5. \u6253\u901a\u6743\u9650\u4f53\u7cfb\uff08\u53ef\u9009\uff09

          Note

          \u4ee5\u4e0b\u4f7f\u7528\u5f00\u6e90\u8f6f\u4ef6 Label Studio \u6765\u505a\u5d4c\u5957\u6f14\u793a\u3002\u5b9e\u9645\u573a\u666f\u9700\u8981\u81ea\u5df1\u89e3\u51b3\u5ba2\u6237\u7cfb\u7edf\u7684\u95ee\u9898\uff1a

          \u4f8b\u5982\u5ba2\u6237\u7cfb\u7edf\u9700\u8981\u81ea\u5df1\u6dfb\u52a0\u4e00\u4e2a Subpath\uff0c\u7528\u4e8e\u533a\u5206\u54ea\u4e9b\u662f AI \u7b97\u529b\u4e2d\u5fc3\u7684\u670d\u52a1\uff0c\u54ea\u4e9b\u662f\u5ba2\u6237\u7cfb\u7edf\u7684\u670d\u52a1\u3002

          "},{"location":"admin/ghippo/best-practice/oem/oem-in.html#_1","title":"\u73af\u5883\u51c6\u5907","text":"
          1. \u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3\u73af\u5883\uff1a

            https://10.6.202.177:30443 \u4f5c\u4e3a AI \u7b97\u529b\u4e2d\u5fc3\u7684\u73af\u5883\u3002

          2. \u90e8\u7f72\u5ba2\u6237\u7cfb\u7edf\u73af\u5883\uff1a

            http://10.6.202.177:30123 \u4f5c\u4e3a\u5ba2\u6237\u7cfb\u7edf

            \u5e94\u7528\u8fc7\u7a0b\u4e2d\u5bf9\u5ba2\u6237\u7cfb\u7edf\u7684\u64cd\u4f5c\u8bf7\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u8fdb\u884c\u8c03\u6574\u3002

          3. \u89c4\u5212\u5ba2\u6237\u7cfb\u7edf\u7684 Subpath \u8def\u5f84\uff1a http://10.6.202.177:30123/label-studio \uff08\u5efa\u8bae\u4f7f\u7528\u8fa8\u8bc6\u5ea6\u9ad8\u7684\u540d\u79f0\u4f5c\u4e3a Subpath\uff0c\u4e0d\u80fd\u4e0e\u4e3b AI \u7b97\u529b\u4e2d\u5fc3\u7684 HTTP router \u53d1\u751f\u51b2\u7a81\uff09\u3002 \u8bf7\u786e\u4fdd\u7528\u6237\u901a\u8fc7 http://10.6.202.177:30123/label-studio \u80fd\u591f\u6b63\u5e38\u8bbf\u95ee\u5ba2\u6237\u7cfb\u7edf\u3002

          "},{"location":"admin/ghippo/best-practice/oem/oem-in.html#_2","title":"\u7edf\u4e00\u57df\u540d\u548c\u7aef\u53e3","text":"
          1. SSH \u767b\u5f55\u5230 AI \u7b97\u529b\u4e2d\u5fc3\u670d\u52a1\u5668\u3002

            ssh root@10.6.202.177\n
          2. \u4f7f\u7528 vim \u547d\u4ee4\u521b\u5efa\u548c\u4fee\u6539 label-studio.yaml \u6587\u4ef6

            vim label-studio.yaml\n
            label-studio.yaml
            apiVersion: networking.istio.io/v1beta1\nkind: ServiceEntry\nmetadata:\n  name: label-studio\n  namespace: ghippo-system\nspec:\n  exportTo:\n  - \"*\"\n  hosts:\n  - label-studio.svc.external\n  ports:\n  # \u6dfb\u52a0\u865a\u62df\u7aef\u53e3\n  - number: 80\n    name: http\n    protocol: HTTP\n  location: MESH_EXTERNAL\n  resolution: STATIC\n  endpoints:\n  # \u6539\u4e3a\u5ba2\u6237\u7cfb\u7edf\u7684\u57df\u540d\uff08\u6216IP\uff09\n  - address: 10.6.202.177\n    ports:\n      # \u6539\u4e3a\u5ba2\u6237\u7cfb\u7edf\u7684\u7aef\u53e3\u53f7\n      http: 30123\n---\napiVersion: networking.istio.io/v1alpha3\nkind: VirtualService\nmetadata:\n  # \u4fee\u6539\u4e3a\u5ba2\u6237\u7cfb\u7edf\u7684\u540d\u5b57\n  name: label-studio\n  namespace: ghippo-system\nspec:\n  exportTo:\n  - \"*\"\n  hosts:\n  - \"*\"\n  gateways:\n  - ghippo-gateway\n  http:\n  - match:\n      - uri:\n          exact: /label-studio # \u4fee\u6539\u4e3a\u5ba2\u6237\u7cfb\u7edf\u5728 AI \u7b97\u529b\u4e2d\u5fc3.0 Web UI \u5165\u53e3\u4e2d\u7684\u8def\u7531\u5730\u5740\n      - uri:\n          prefix: /label-studio/ # \u4fee\u6539\u4e3a\u5ba2\u6237\u7cfb\u7edf\u5728 AI \u7b97\u529b\u4e2d\u5fc3.0 Web UI \u5165\u53e3\u4e2d\u7684\u8def\u7531\u5730\u5740\n    route:\n    - destination:\n        # \u4fee\u6539\u4e3a\u4e0a\u6587 ServiceEntry \u4e2d\u7684 spec.hosts \u7684\u503c\n        host: label-studio.svc.external\n        port:\n          # \u4fee\u6539\u4e3a\u4e0a\u6587 ServiceEntry \u4e2d\u7684 spec.ports \u7684\u503c\n          number: 80\n---\napiVersion: security.istio.io/v1beta1\nkind: AuthorizationPolicy\nmetadata:\n  # \u4fee\u6539\u4e3a\u5ba2\u6237\u7cfb\u7edf\u7684\u540d\u5b57\n  name: label-studio\n  namespace: istio-system\nspec:\n  action: ALLOW\n  selector:\n    matchLabels:\n      app: istio-ingressgateway\n  rules:\n  - from:\n    - source:\n        requestPrincipals:\n        - '*'\n  - to:\n    - operation:\n        paths:\n        - /label-studio # \u4fee\u6539\u4e3a VirtualService \u4e2d\u7684 spec.http.match.uri.prefix \u7684\u503c\n        - /label-studio/* # \u4fee\u6539\u4e3a VirtualService \u4e2d\u7684 spec.http.match.uri.prefix \u7684\u503c\uff08\u6ce8\u610f\uff0c\u672b\u5c3e\u9700\u8981\u6dfb\u52a0 \"*\"\uff09\n
          3. \u4f7f\u7528 kubectl \u547d\u4ee4\u5e94\u7528 label-studio.yaml \uff1a

            kubectl apply -f\u00a0label-studio.yaml\n
          4. \u9a8c\u8bc1 Label Studio UI \u7684 IP \u548c \u7aef\u53e3\u662f\u5426\u4e00\u81f4\uff1a

          "},{"location":"admin/ghippo/best-practice/oem/oem-in.html#_3","title":"\u6253\u901a\u7528\u6237\u4f53\u7cfb","text":"

          \u5c06\u5ba2\u6237\u7cfb\u7edf\u4e0e AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u901a\u8fc7 OIDC/OAUTH \u7b49\u534f\u8bae\u5bf9\u63a5\uff0c\u4f7f\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u540e\u8fdb\u5165\u5ba2\u6237\u7cfb\u7edf\u65f6\u65e0\u9700\u518d\u6b21\u767b\u5f55\u3002

          Note

          \u8fd9\u91cc\u4f7f\u7528\u4e24\u5957 AI \u7b97\u529b\u4e2d\u5fc3\u76f8\u4e92\u5bf9\u63a5\u6765\u8fdb\u884c\u6f14\u793a\u3002\u6db5\u76d6\u5c06 AI \u7b97\u529b\u4e2d\u5fc3 \u4f5c\u4e3a\u7528\u6237\u6e90\u767b\u5f55\u5ba2\u6237\u5e73\u53f0\uff0c\u548c\u5c06\u5ba2\u6237\u5e73\u53f0\u4f5c\u4e3a\u7528\u6237\u6e90\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\u4e24\u79cd\u573a\u666f\u3002

          1. AI \u7b97\u529b\u4e2d\u5fc3\u4f5c\u4e3a\u7528\u6237\u6e90\uff0c\u767b\u5f55\u5ba2\u6237\u5e73\u53f0\uff1a \u9996\u5148\u5c06\u7b2c\u4e00\u5957 AI \u7b97\u529b\u4e2d\u5fc3\u4f5c\u4e3a\u7528\u6237\u6e90\uff0c\u5b9e\u73b0\u5bf9\u63a5\u540e\u7b2c\u4e00\u5957 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u7684\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 OIDC \u76f4\u63a5\u767b\u5f55\u7b2c\u4e8c\u5957 AI \u7b97\u529b\u4e2d\u5fc3\uff0c \u800c\u65e0\u9700\u5728\u7b2c\u4e8c\u5957\u4e2d\u518d\u6b21\u521b\u5efa\u7528\u6237\u3002\u5728\u7b2c\u4e00\u5957 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u901a\u8fc7 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u63a5\u5165\u7ba1\u7406 \u521b\u5efa SSO \u63a5\u5165\u3002

          2. \u5ba2\u6237\u5e73\u53f0\u4f5c\u4e3a\u7528\u6237\u6e90\uff0c\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3\uff1a \u5c06\u7b2c\u4e00\u5957 AI \u7b97\u529b\u4e2d\u5fc3 \u4e2d\u751f\u6210\u7684\u5ba2\u6237\u7aef ID\u3001\u5ba2\u6237\u7aef\u5bc6\u94a5\u3001\u5355\u70b9\u767b\u5f55 URL \u7b49\u586b\u5199\u5230\u7b2c\u4e8c\u5957 AI \u7b97\u529b\u4e2d\u5fc3 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u8eab\u4efd\u63d0\u4f9b\u5546 -> OIDC \u4e2d\uff0c\u5b8c\u6210\u7528\u6237\u5bf9\u63a5\u3002 \u5bf9\u63a5\u540e\uff0c\u7b2c\u4e00\u5957 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u7684\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 OIDC \u76f4\u63a5\u767b\u5f55\u7b2c\u4e8c\u5957 AI \u7b97\u529b\u4e2d\u5fc3\uff0c\u800c\u65e0\u9700\u5728\u7b2c\u4e8c\u5957\u4e2d\u518d\u6b21\u521b\u5efa\u7528\u6237\u3002

          3. \u5bf9\u63a5\u5b8c\u6210\u540e\uff0c\u7b2c\u4e8c\u5957 AI \u7b97\u529b\u4e2d\u5fc3 \u767b\u5f55\u9875\u9762\u5c06\u51fa\u73b0 OIDC \u9009\u9879\uff0c\u9996\u6b21\u767b\u5f55\u65f6\u9009\u62e9\u901a\u8fc7 OIDC \u767b\u5f55\uff08\u81ea\u5b9a\u4e49\u540d\u79f0\uff0c\u8fd9\u91cc\u662f\u540d\u79f0\u662f loginname\uff09\uff0c \u540e\u7eed\u5c06\u76f4\u63a5\u8fdb\u5165\u65e0\u9700\u518d\u6b21\u9009\u62e9\u3002

          Note

          \u4f7f\u7528\u4e24\u5957 AI \u7b97\u529b\u4e2d\u5fc3,\u8868\u660e\u5ba2\u6237\u53ea\u8981\u652f\u6301 OIDC \u534f\u8bae\uff0c\u65e0\u8bba\u662f AI \u7b97\u529b\u4e2d\u5fc3\u4f5c\u4e3a\u7528\u6237\u6e90\uff0c\u8fd8\u662f\u201c\u5ba2\u6237\u5e73\u53f0\u201d\u4f5c\u4e3a\u7528\u6237\u6e90\uff0c\u4e24\u79cd\u573a\u666f\u90fd\u652f\u6301\u3002

          "},{"location":"admin/ghippo/best-practice/oem/oem-in.html#_4","title":"\u5bf9\u63a5\u5bfc\u822a\u680f","text":"

          \u53c2\u8003\u6587\u6863\u4e0b\u65b9\u7684 tar \u5305\u6765\u5b9e\u73b0\u4e00\u4e2a\u7a7a\u58f3\u7684\u524d\u7aef\u5b50\u5e94\u7528\uff0c\u628a\u5ba2\u6237\u7cfb\u7edf\u4ee5 iframe \u7684\u5f62\u5f0f\u653e\u8fdb\u8be5\u7a7a\u58f3\u5e94\u7528\u91cc\u3002

          1. \u4e0b\u8f7d gproduct-demo-main.tar.gz \u6587\u4ef6\uff0c\u6253\u5f00 src/App-iframe.vue \u6587\u4ef6\uff0c\u4fee\u6539\u5176\u4e2d\u7684 src \u5c5e\u6027\u503c\uff08\u5373\u8fdb\u5165\u5ba2\u6237\u7cfb\u7edf\u7684\u5730\u5740\uff09\uff1a

            • \u7edd\u5bf9\u5730\u5740\uff1asrc=\"https://10.6.202.177:30443/label-studio\" (AI \u7b97\u529b\u4e2d\u5fc3\u5730\u5740 + Subpath)
            • \u76f8\u5bf9\u5730\u5740\uff1asrc=\"./external-anyproduct/insight\"
            App-iframe.vue
            <template>\n  <iframe>\n    src=\"https://daocloud.io\"\n    title=\"demo\"\n    class=\"iframe-container\"\n  </iframe>\n</template>\n\n<style lang=\"scss\">\nhtml,\nbody {\n  height: 100%;\n}\n\n# app {\n  display: flex;\n  height: 100%;\n  .iframe-container {\n    border: 0;\n    flex: 1 1 0;\n  }\n}\n</style>\n
          2. \u5220\u9664 src \u6587\u4ef6\u5939\u4e0b\u7684 App.vue \u548c main.ts \u6587\u4ef6\uff0c\u540c\u65f6\u5c06\uff1a

            • App-iframe.vue \u91cd\u547d\u540d\u4e3a App.vue
            • main-iframe.ts \u91cd\u547d\u540d\u4e3a main.ts
          3. \u6309\u7167 readme \u6b65\u9aa4\u6784\u5efa\u955c\u50cf\uff08\u6ce8\u610f\uff1a\u6267\u884c\u6700\u540e\u4e00\u6b65\u524d\u9700\u8981\u5c06 demo.yaml \u4e2d\u7684\u955c\u50cf\u5730\u5740\u66ff\u6362\u6210\u6784\u5efa\u51fa\u7684\u955c\u50cf\u5730\u5740\uff09

            demo.yaml
            kind: Namespace\napiVersion: v1\nmetadata:\n  name: gproduct-demo\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: gproduct-demo\n  namespace: gproduct-demo\n  labels:\n    app: gproduct-demo\nspec:\n  selector:\n    matchLabels:\n      app: gproduct-demo\n  template:\n    metadata:\n      name: gproduct-demo\n      labels:\n        app: gproduct-demo\n    spec:\n      containers:\n      - name: gproduct-demo\n        image: release.daocloud.io/gproduct-demo # \u4fee\u6539\u8fd9\u4e2a\u955c\u50cf\u5730\u5740\n        ports:\n        - containerPort: 80\n---\napiVersion: v1\nkind: Service\n...\n

          \u5bf9\u63a5\u5b8c\u6210\u540e\uff0c\u5c06\u5728 AI \u7b97\u529b\u4e2d\u5fc3\u7684\u4e00\u7ea7\u5bfc\u822a\u680f\u51fa\u73b0 \u5ba2\u6237\u7cfb\u7edf \uff0c\u70b9\u51fb\u53ef\u8fdb\u5165\u5ba2\u6237\u7cfb\u7edf\u3002

          "},{"location":"admin/ghippo/best-practice/oem/oem-in.html#_5","title":"\u5b9a\u5236\u5916\u89c2","text":"

          Note

          AI \u7b97\u529b\u4e2d\u5fc3\u652f\u6301\u901a\u8fc7\u5199 CSS \u7684\u65b9\u5f0f\u6765\u5b9e\u73b0\u5916\u89c2\u5b9a\u5236\u3002\u5b9e\u9645\u5e94\u7528\u4e2d\u5ba2\u6237\u7cfb\u7edf\u5982\u4f55\u5b9e\u73b0\u5916\u89c2\u5b9a\u5236\u9700\u8981\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u5904\u7406\u3002

          \u767b\u5f55\u5ba2\u6237\u7cfb\u7edf\uff0c\u901a\u8fc7 \u5168\u5c40\u7ba1\u7406 -> \u5e73\u53f0\u8bbe\u7f6e -> \u5916\u89c2\u5b9a\u5236 \u53ef\u4ee5\u81ea\u5b9a\u4e49\u5e73\u53f0\u80cc\u666f\u989c\u8272\u3001logo\u3001\u540d\u79f0\u7b49\uff0c \u5177\u4f53\u64cd\u4f5c\u8bf7\u53c2\u7167\u5916\u89c2\u5b9a\u5236\u3002

          "},{"location":"admin/ghippo/best-practice/oem/oem-in.html#_6","title":"\u6253\u901a\u6743\u9650\u4f53\u7cfb\uff08\u53ef\u9009\uff09","text":"

          \u65b9\u6848\u601d\u8def\u4e00\uff1a

          \u5b9a\u5236\u5316\u56e2\u961f\u53ef\u5b9e\u73b0\u4e00\u5b9a\u5236\u6a21\u5757\uff0cAI \u7b97\u529b\u4e2d\u5fc3\u5c06\u6bcf\u4e00\u6b21\u7684\u7528\u6237\u767b\u5f55\u4e8b\u4ef6\u901a\u8fc7 Webhook \u7684\u65b9\u5f0f\u901a\u77e5\u5230\u5b9a\u5236\u6a21\u5757\uff0c \u5b9a\u5236\u6a21\u5757\u53ef\u81ea\u884c\u8c03\u7528 AnyProduct \u548c AI \u7b97\u529b\u4e2d\u5fc3\u7684 OpenAPI \u5c06\u8be5\u7528\u6237\u7684\u6743\u9650\u4fe1\u606f\u540c\u6b65\u3002

          \u65b9\u6848\u601d\u8def\u4e8c\uff1a

          \u901a\u8fc7 Webhook \u65b9\u5f0f\uff0c\u5c06\u6bcf\u4e00\u6b21\u7684\u6388\u6743\u53d8\u5316\u90fd\u901a\u77e5\u5230 AnyProduct\uff08\u5982\u6709\u9700\u6c42\uff0c\u540e\u7eed\u53ef\u5b9e\u73b0\uff09\u3002

          "},{"location":"admin/ghippo/best-practice/oem/oem-in.html#anyproduct-ai","title":"AnyProduct \u4f7f\u7528 AI \u7b97\u529b\u4e2d\u5fc3\u7684\u5176\u4ed6\u80fd\u529b(\u53ef\u9009)","text":"

          \u64cd\u4f5c\u65b9\u6cd5\u4e3a\u8c03\u7528 AI \u7b97\u529b\u4e2d\u5fc3OpenAPI\u3002

          "},{"location":"admin/ghippo/best-practice/oem/oem-in.html#_7","title":"\u53c2\u8003\u8d44\u6599","text":"
          • \u53c2\u8003 OEM OUT \u6587\u6863
          • \u53c2\u9605 gProduct-demo-main \u5bf9\u63a5 tar \u5305
          "},{"location":"admin/ghippo/best-practice/oem/oem-out.html","title":"\u5982\u4f55\u5c06AI \u7b97\u529b\u4e2d\u5fc3\u96c6\u6210\u5230\u5ba2\u6237\u7cfb\u7edf\uff08OEM OUT\uff09","text":"

          OEM OUT \u662f\u6307\u5c06 AI \u7b97\u529b\u4e2d\u5fc3\u4f5c\u4e3a\u5b50\u6a21\u5757\u63a5\u5165\u5176\u4ed6\u4ea7\u54c1\uff0c\u51fa\u73b0\u5728\u5176\u4ed6\u4ea7\u54c1\u7684\u83dc\u5355\u4e2d\u3002 \u7528\u6237\u767b\u5f55\u5176\u4ed6\u4ea7\u54c1\u540e\u53ef\u76f4\u63a5\u8df3\u8f6c\u81f3 AI \u7b97\u529b\u4e2d\u5fc3\u65e0\u9700\u4e8c\u6b21\u767b\u5f55\u3002\u5b9e\u73b0 OEM OUT \u5171\u5206\u4e3a 5 \u6b65\uff0c\u5206\u522b\u662f\uff1a

          • \u7edf\u4e00\u57df\u540d
          • \u6253\u901a\u7528\u6237\u4f53\u7cfb
          • \u5bf9\u63a5\u5bfc\u822a\u680f
          • \u5b9a\u5236\u5916\u89c2
          • \u6253\u901a\u6743\u9650\u4f53\u7cfb(\u53ef\u9009)
          "},{"location":"admin/ghippo/best-practice/oem/oem-out.html#_1","title":"\u7edf\u4e00\u57df\u540d","text":"
          1. \u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3\uff08\u5047\u8bbe\u90e8\u7f72\u5b8c\u7684\u8bbf\u95ee\u5730\u5740\u4e3a https://10.6.8.2:30343/\uff09

          2. \u5ba2\u6237\u7cfb\u7edf\u548c AI \u7b97\u529b\u4e2d\u5fc3\u524d\u53ef\u4ee5\u653e\u4e00\u4e2a nginx \u53cd\u4ee3\u6765\u5b9e\u73b0\u540c\u57df\u8bbf\u95ee\uff0c / \u8def\u7531\u5230\u5ba2\u6237\u7cfb\u7edf\uff0c /dce5 (subpath) \u8def\u7531\u5230 AI \u7b97\u529b\u4e2d\u5fc3\u7cfb\u7edf\uff0c vi /etc/nginx/conf.d/default.conf \u793a\u4f8b\u5982\u4e0b\uff1a

            server {\n    listen       80;\n    server_name  localhost;\n\n    location /dce5/ {\n      proxy_pass https://10.6.8.2:30343/;\n      proxy_http_version 1.1;\n      proxy_read_timeout 300s; # \u5982\u9700\u8981\u4f7f\u7528 kpanda cloudtty\u529f\u80fd\u9700\u8981\u8fd9\u884c\uff0c\u5426\u5219\u53ef\u4ee5\u53bb\u6389\n      proxy_send_timeout 300s; # \u5982\u9700\u8981\u4f7f\u7528 kpanda cloudtty\u529f\u80fd\u9700\u8981\u8fd9\u884c\uff0c\u5426\u5219\u53ef\u4ee5\u53bb\u6389\n\n      proxy_set_header Host $host;\n      proxy_set_header X-Real-IP $remote_addr;\n      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n\n      proxy_set_header Upgrade $http_upgrade; # \u5982\u9700\u8981\u4f7f\u7528 kpanda cloudtty\u529f\u80fd\u9700\u8981\u8fd9\u884c\uff0c\u5426\u5219\u53ef\u4ee5\u53bb\u6389\n      proxy_set_header Connection $connection_upgrade; # \u5982\u9700\u8981\u4f7f\u7528 kpanda cloudtty\u529f\u80fd\u9700\u8981\u8fd9\u884c\uff0c\u5426\u5219\u53ef\u4ee5\u53bb\u6389\n    }\n\n    location / {\n        proxy_pass https://10.6.165.50:30443/; # \u5047\u8bbe\u8fd9\u662f\u5ba2\u6237\u7cfb\u7edf\u5730\u5740(\u5982\u610f\u4e91)\n        proxy_http_version 1.1;\n\n        proxy_set_header Host $host;\n        proxy_set_header X-Real-IP $remote_addr;\n        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n    }\n}\n
          3. \u5047\u8bbe nginx \u5165\u53e3\u5730\u5740\u4e3a 10.6.165.50\uff0c\u6309\u81ea\u5b9a\u4e49 AI \u7b97\u529b\u4e2d\u5fc3\u53cd\u5411\u4ee3\u7406\u670d\u52a1\u5668\u5730\u5740\u628a AI_PROXY \u53cd\u4ee3\u8bbe\u4e3a http://10.6.165.50/dce5\u3002\u786e\u4fdd\u80fd\u591f\u901a\u8fc7 http://10.6.165.50/dce5\u8bbf\u95ee AI \u7b97\u529b\u4e2d\u5fc3\u3002 \u5ba2\u6237\u7cfb\u7edf\u4e5f\u9700\u8981\u8fdb\u884c\u53cd\u4ee3\u8bbe\u7f6e\uff0c\u9700\u8981\u6839\u636e\u4e0d\u540c\u5e73\u53f0\u7684\u60c5\u51b5\u8fdb\u884c\u5904\u7406\u3002

          "},{"location":"admin/ghippo/best-practice/oem/oem-out.html#_2","title":"\u6253\u901a\u7528\u6237\u4f53\u7cfb","text":"

          \u5c06\u5ba2\u6237\u7cfb\u7edf\u4e0e AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u901a\u8fc7 OIDC/OAUTH \u7b49\u534f\u8bae\u5bf9\u63a5\uff0c\u4f7f\u7528\u6237\u767b\u5f55\u5ba2\u6237\u7cfb\u7edf\u540e\u8fdb\u5165 AI \u7b97\u529b\u4e2d\u5fc3\u65f6\u65e0\u9700\u518d\u6b21\u767b\u5f55\u3002 \u5728\u62ff\u5230\u5ba2\u6237\u7cfb\u7edf\u7684 OIDC \u4fe1\u606f\u540e\u586b\u5165 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u8eab\u4efd\u63d0\u4f9b\u5546 \u4e2d\u3002

          \u5bf9\u63a5\u5b8c\u6210\u540e\uff0cAI \u7b97\u529b\u4e2d\u5fc3\u767b\u5f55\u9875\u9762\u5c06\u51fa\u73b0 OIDC\uff08\u81ea\u5b9a\u4e49\uff09\u9009\u9879\uff0c\u9996\u6b21\u4ece\u5ba2\u6237\u7cfb\u7edf\u8fdb\u5165 AI \u7b97\u529b\u4e2d\u5fc3\u65f6\u9009\u62e9\u901a\u8fc7 OIDC \u767b\u5f55\uff0c \u540e\u7eed\u5c06\u76f4\u63a5\u8fdb\u5165 AI \u7b97\u529b\u4e2d\u5fc3\u65e0\u9700\u518d\u6b21\u9009\u62e9\u3002

          "},{"location":"admin/ghippo/best-practice/oem/oem-out.html#_3","title":"\u5bf9\u63a5\u5bfc\u822a\u680f","text":"

          \u5bf9\u63a5\u5bfc\u822a\u680f\u662f\u6307 AI \u7b97\u529b\u4e2d\u5fc3\u51fa\u73b0\u5728\u5ba2\u6237\u7cfb\u7edf\u7684\u83dc\u5355\u4e2d\uff0c\u7528\u6237\u70b9\u51fb\u76f8\u5e94\u7684\u83dc\u5355\u540d\u79f0\u80fd\u591f\u76f4\u63a5\u8fdb\u5165 AI \u7b97\u529b\u4e2d\u5fc3\u3002 \u56e0\u6b64\u5bf9\u63a5\u5bfc\u822a\u680f\u4f9d\u8d56\u4e8e\u5ba2\u6237\u7cfb\u7edf\uff0c\u4e0d\u540c\u5e73\u53f0\u9700\u8981\u6309\u7167\u5177\u4f53\u60c5\u51b5\u8fdb\u884c\u5904\u7406\u3002

          "},{"location":"admin/ghippo/best-practice/oem/oem-out.html#_4","title":"\u5b9a\u5236\u5916\u89c2","text":"

          \u901a\u8fc7 \u5168\u5c40\u7ba1\u7406 -> \u5e73\u53f0\u8bbe\u7f6e -> \u5916\u89c2\u5b9a\u5236 \u53ef\u4ee5\u81ea\u5b9a\u4e49\u5e73\u53f0\u80cc\u666f\u989c\u8272\u3001logo\u3001\u540d\u79f0\u7b49\uff0c \u5177\u4f53\u64cd\u4f5c\u8bf7\u53c2\u7167\u5916\u89c2\u5b9a\u5236\u3002

          "},{"location":"admin/ghippo/best-practice/oem/oem-out.html#_5","title":"\u6253\u901a\u6743\u9650\u4f53\u7cfb\uff08\u53ef\u9009\uff09","text":"

          \u6253\u901a\u6743\u9650\u8f83\u4e3a\u590d\u6742\uff0c\u5982\u6709\u9700\u6c42\u8bf7\u8054\u7cfb\u5168\u5c40\u7ba1\u7406\u56e2\u961f\u3002

          "},{"location":"admin/ghippo/best-practice/oem/oem-out.html#_6","title":"\u53c2\u8003","text":"
          • OEM IN \u6587\u6863
          "},{"location":"admin/ghippo/install/gm-gateway.html","title":"\u4f7f\u7528\u56fd\u5bc6\u7f51\u5173\u4ee3\u7406 AI \u7b97\u529b\u4e2d\u5fc3","text":"

          \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u4e3a AI \u7b97\u529b\u4e2d\u5fc3\u914d\u7f6e\u56fd\u5bc6\u7f51\u5173\u3002

          "},{"location":"admin/ghippo/install/gm-gateway.html#_1","title":"\u8f6f\u4ef6\u4ecb\u7ecd","text":"

          Tengine: Tengine \u662f\u7531\u6dd8\u5b9d\u7f51\u53d1\u8d77\u7684 Web \u670d\u52a1\u5668\u9879\u76ee\u3002\u5b83\u5728 Nginx \u7684\u57fa\u7840\u4e0a\uff0c \u9488\u5bf9\u5927\u8bbf\u95ee\u91cf\u7f51\u7ad9\u7684\u9700\u6c42\uff0c\u6dfb\u52a0\u4e86\u5f88\u591a\u9ad8\u7ea7\u529f\u80fd\u548c\u7279\u6027\u3002\u6bd4\u5982\u652f\u6301 Tongsuo \u63d2\u4ef6\uff0c\u652f\u6301\u56fd\u5bc6\u8bc1\u4e66\u7b49\u3002

          Tongsuo: \u94dc\u9501/Tongsuo\uff08\u539f BabaSSL\uff09\u662f\u4e00\u4e2a\u63d0\u4f9b\u73b0\u4ee3\u5bc6\u7801\u5b66\u7b97\u6cd5\u548c\u5b89\u5168\u901a\u4fe1\u534f\u8bae\u7684\u5f00\u6e90\u57fa\u7840\u5bc6\u7801\u5e93\uff0c \u4e3a\u5b58\u50a8\u3001\u7f51\u7edc\u3001\u5bc6\u94a5\u7ba1\u7406\u3001\u9690\u79c1\u8ba1\u7b97\u7b49\u8bf8\u591a\u4e1a\u52a1\u573a\u666f\u63d0\u4f9b\u5e95\u5c42\u7684\u5bc6\u7801\u5b66\u57fa\u7840\u80fd\u529b\uff0c\u5b9e\u73b0\u6570\u636e\u5728\u4f20\u8f93\u3001\u4f7f\u7528\u3001\u5b58\u50a8\u7b49\u8fc7\u7a0b\u4e2d\u7684\u79c1\u5bc6\u6027\u3001\u5b8c\u6574\u6027\u548c\u53ef\u8ba4\u8bc1\u6027\uff0c \u4e3a\u6570\u636e\u751f\u547d\u5468\u671f\u4e2d\u7684\u9690\u79c1\u548c\u5b89\u5168\u63d0\u4f9b\u4fdd\u62a4\u80fd\u529b\u3002

          "},{"location":"admin/ghippo/install/gm-gateway.html#_2","title":"\u51c6\u5907\u5de5\u4f5c","text":"

          \u4e00\u53f0\u5b89\u88c5\u4e86 Docker \u7684 Linux \u4e3b\u673a\uff0c\u5e76\u4e14\u786e\u4fdd\u5b83\u80fd\u8bbf\u95ee\u4e92\u8054\u7f51\u3002

          "},{"location":"admin/ghippo/install/gm-gateway.html#_3","title":"\u7f16\u8bd1\u548c\u5b89\u88c5\u56fd\u5bc6\u7f51\u5173","text":"

          \u4e0b\u9762\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528 Tengine \u548c Tongsuo \u6784\u5efa\u56fd\u5bc6\u7f51\u5173\u3002

          Note

          \u6b64\u914d\u7f6e\u4ec5\u4f9b\u53c2\u8003\u3002

          FROM docker.m.daocloud.io/debian:11.3\n\n# Version\nENV TENGINE_VERSION=\"2.3.4\" \\\n    TONGSUO_VERSION=\"8.3.2\"\n\n# Install required system packages and dependencies\nRUN apt update && \\\n    apt -y install \\\n    wget \\\n    gcc \\\n    make \\\n    libpcre3 \\\n    libpcre3-dev \\\n    zlib1g-dev \\\n    perl \\\n    && apt clean\n\n# Build tengine\nRUN mkdir -p /tmp/pkg/cache/ && cd /tmp/pkg/cache/ \\\n    && wget https://github.com/alibaba/tengine/archive/refs/tags/${TENGINE_VERSION}.tar.gz -O tengine-${TENGINE_VERSION}.tar.gz \\\n    && tar zxvf tengine-${TENGINE_VERSION}.tar.gz \\\n    && wget https://github.com/Tongsuo-Project/Tongsuo/archive/refs/tags/${TONGSUO_VERSION}.tar.gz -O Tongsuo-${TONGSUO_VERSION}.tar.gz \\\n    && tar zxvf Tongsuo-${TONGSUO_VERSION}.tar.gz \\\n    && cd tengine-${TENGINE_VERSION} \\\n    && ./configure \\\n        --add-module=modules/ngx_openssl_ntls \\\n        --with-openssl=/tmp/pkg/cache/Tongsuo-${TONGSUO_VERSION} \\\n        --with-openssl-opt=\"--strict-warnings enable-ntls\" \\\n        --with-http_ssl_module --with-stream \\\n        --with-stream_ssl_module --with-stream_sni \\\n    && make \\\n    && make install \\\n    && ln -s /usr/local/nginx/sbin/nginx /usr/sbin/ \\\n    && rm -rf /tmp/pkg/cache\n\nEXPOSE 80 443\nSTOPSIGNAL SIGTERM\nCMD [\"nginx\", \"-g\", \"daemon off;\"]\n
          docker build -t tengine:0.0.1 .\n
          "},{"location":"admin/ghippo/install/gm-gateway.html#sm2-rsa-tls","title":"\u751f\u6210 SM2 \u548c RSA TLS \u8bc1\u4e66","text":"

          \u4e0b\u9762\u4ecb\u7ecd\u5982\u4f55\u751f\u6210 SM2 \u548c RSA TLS \u8bc1\u4e66\uff0c\u5e76\u914d\u7f6e\u56fd\u5bc6\u7f51\u5173\u3002

          "},{"location":"admin/ghippo/install/gm-gateway.html#sm2-tls","title":"SM2 TLS \u8bc1\u4e66","text":"

          Note

          \u6b64\u8bc1\u4e66\u4ec5\u9002\u7528\u4e8e\u6d4b\u8bd5\u73af\u5883\u3002

          \u60a8\u53ef\u4ee5\u53c2\u8003 Tongsuo \u5b98\u65b9\u6587\u6863\u4f7f\u7528 OpenSSL \u751f\u6210 SM2 \u8bc1\u4e66\uff0c \u6216\u8005\u8bbf\u95ee\u56fd\u5bc6 SSL \u5b9e\u9a8c\u5ba4\u7533\u8bf7 SM2 \u8bc1\u4e66\u3002

          \u6700\u7ec8\u6211\u4eec\u4f1a\u5f97\u5230\u4ee5\u4e0b\u6587\u4ef6\uff1a

          -rw-r--r-- 1 root root  749 Dec  8 02:59 sm2.*.enc.crt.pem\n-rw-r--r-- 1 root root  258 Dec  8 02:59 sm2.*.enc.key.pem\n-rw-r--r-- 1 root root  749 Dec  8 02:59 sm2.*.sig.crt.pem\n-rw-r--r-- 1 root root  258 Dec  8 02:59 sm2.*.sig.key.pem\n
          "},{"location":"admin/ghippo/install/gm-gateway.html#rsa-tls","title":"RSA TLS \u8bc1\u4e66","text":"
          -rw-r--r-- 1 root root  216 Dec  8 03:21 rsa.*.crt.pem\n-rw-r--r-- 1 root root 4096 Dec  8 02:59 rsa.*.key.pem\n
          "},{"location":"admin/ghippo/install/gm-gateway.html#sm2-rsa-tls_1","title":"\u7ed9\u56fd\u5bc6\u7f51\u5173\u914d\u7f6e SM2 \u548c RSA TLS \u8bc1\u4e66","text":"

          \u672c\u6587\u4e2d\u4f7f\u7528\u7684\u56fd\u5bc6\u7f51\u5173\uff0c\u652f\u6301 SM2 \u548c RSA \u7b49 TLS \u8bc1\u4e66\u3002\u53cc\u8bc1\u4e66\u7684\u4f18\u70b9\u662f\uff1a\u5f53\u6d4f\u89c8\u5668\u4e0d\u652f\u6301 SM2 TLS \u8bc1\u4e66\u65f6\uff0c\u81ea\u52a8\u5207\u6362\u5230 RSA TLS \u8bc1\u4e66\u3002

          \u66f4\u591a\u8be6\u7ec6\u914d\u7f6e\uff0c\u8bf7\u53c2\u8003Tongsuo \u5b98\u65b9\u6587\u6863\u3002

          \u6211\u4eec\u8fdb\u5165 Tengine \u5bb9\u5668\u5185\u90e8\uff1a

          # \u8fdb\u5165 nginx \u914d\u7f6e\u6587\u4ef6\u5b58\u653e\u76ee\u5f55\ncd /usr/local/nginx/conf\n\n# \u521b\u5efa cert \u6587\u4ef6\u5939\uff0c\u7528\u4e8e\u5b58\u653e TLS \u8bc1\u4e66\nmkdir cert\n\n# \u628a SM2\u3001RSA TLS \u8bc1\u4e66\u62f7\u8d1d\u5230 `/usr/local/nginx/conf/cert` \u76ee\u5f55\u4e0b\ncp sm2.*.enc.crt.pem sm2.*.enc.key.pem  sm2.*.sig.crt.pem  sm2.*.sig.key.pem /usr/local/nginx/conf/cert\ncp rsa.*.crt.pem  rsa.*.key.pem /usr/local/nginx/conf/cert\n\n# \u7f16\u8f91 nginx.conf \u914d\u7f6e\nvim nginx.conf\n...\nserver {\n  listen 443          ssl;\n  proxy_http_version  1.1;\n  # \u5f00\u542f\u56fd\u5bc6\u529f\u80fd\uff0c\u4f7f\u5176\u652f\u6301 SM2 \u7b97\u6cd5\u7684 TLS \u8bc1\u4e66\n  enable_ntls         on;\n\n  # RSA \u8bc1\u4e66\n  # \u5982\u679c\u60a8\u7684\u6d4f\u89c8\u5668\u4e0d\u652f\u6301\u56fd\u5bc6\u8bc1\u4e66\uff0c\u90a3\u4e48\u60a8\u53ef\u4ee5\u5f00\u542f\u6b64\u9009\u9879\uff0cTengine \u4f1a\u81ea\u52a8\u8bc6\u522b\u6700\u7ec8\u7528\u6237\u7684\u6d4f\u89c8\u5668\uff0c\u5e76\u4f7f\u7528 RSA \u8bc1\u4e66\u8fdb\u884c\u56de\u9000\n  ssl_certificate                 /usr/local/nginx/conf/cert/rsa.*.crt.pem;\n  ssl_certificate_key             /usr/local/nginx/conf/cert/rsa.*.key.pem;\n\n  # \u914d\u7f6e\u4e24\u5bf9 SM2 \u8bc1\u4e66\uff0c\u7528\u4e8e\u52a0\u5bc6\u548c\u7b7e\u540d\n  # SM2 \u7b7e\u540d\u8bc1\u4e66\n  ssl_sign_certificate            /usr/local/nginx/conf/cert/sm2.*.sig.crt.pem;\n  ssl_sign_certificate_key        /usr/local/nginx/conf/cert/sm2.*.sig.key.pem;\n  # SM2 \u52a0\u5bc6\u8bc1\u4e66\n  ssl_enc_certificate             /usr/local/nginx/conf/cert/sm2.*.enc.crt.pem;\n  ssl_enc_certificate_key         /usr/local/nginx/conf/cert/sm2.*.enc.key.pem;\n  ssl_protocols                   TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;\n\n  location / {\n    proxy_set_header Host $http_host;\n    proxy_set_header X-Real-IP $remote_addr;\n    proxy_set_header REMOTE-HOST $remote_addr;\n    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n    # \u60a8\u9700\u8981\u5c06\u8fd9\u91cc\u7684\u5730\u5740\u4fee\u6539\u4e3a Istio \u5165\u53e3\u7f51\u5173\u7684\u5730\u5740\n    # \u4f8b\u5982 proxy_pass https://istio-ingressgateway.istio-system.svc.cluster.local\n    # \u6216\u8005 proxy_pass https://demo-dev.daocloud.io\n    proxy_pass https://istio-ingressgateway.istio-system.svc.cluster.local;\n  }\n}\n
          "},{"location":"admin/ghippo/install/gm-gateway.html#_4","title":"\u91cd\u65b0\u52a0\u8f7d\u56fd\u5bc6\u7f51\u5173\u7684\u914d\u7f6e","text":"
          nginx -s reload\n
          "},{"location":"admin/ghippo/install/gm-gateway.html#_5","title":"\u4e0b\u4e00\u6b65","text":"

          \u56fd\u5bc6\u7f51\u5173\u90e8\u7f72\u6210\u529f\u4e4b\u540e\uff0c\u81ea\u5b9a\u4e49 AI \u7b97\u529b\u4e2d\u5fc3\u53cd\u5411\u4ee3\u7406\u670d\u52a1\u5668\u5730\u5740\u3002

          "},{"location":"admin/ghippo/install/gm-gateway.html#_6","title":"\u9a8c\u8bc1","text":"

          \u60a8\u53ef\u4ee5\u90e8\u7f72\u4e00\u4e2a\u652f\u6301\u56fd\u5bc6\u8bc1\u4e66\u7684 Web \u6d4f\u89c8\u5668\u3002 \u4f8b\u5982 Samarium Browser\uff0c \u7136\u540e\u901a\u8fc7 Tengine \u8bbf\u95ee AI \u7b97\u529b\u4e2d\u5fc3 UI \u754c\u9762\uff0c\u9a8c\u8bc1\u56fd\u5bc6\u8bc1\u4e66\u662f\u5426\u751f\u6548\u3002

          "},{"location":"admin/ghippo/install/login.html","title":"\u767b\u5f55","text":"

          \u7528\u6237\u5728\u4f7f\u7528\u4e00\u4e2a\u65b0\u7cfb\u7edf\u524d\uff0c\u5728\u8fd9\u4e2a\u7cfb\u7edf\u4e2d\u662f\u6ca1\u6709\u4efb\u4f55\u6570\u636e\u7684\uff0c\u7cfb\u7edf\u4e5f\u65e0\u6cd5\u8bc6\u522b\u8fd9\u4e2a\u65b0\u7528\u6237\u3002\u4e3a\u4e86\u6807\u8bc6\u7528\u6237\u8eab\u4efd\u3001\u7ed1\u5b9a\u7528\u6237\u6570\u636e\uff0c\u7528\u6237\u9700\u8981\u4e00\u4e2a\u80fd\u552f\u4e00\u6807\u8bc6\u7528\u6237\u8eab\u4efd\u7684\u5e10\u53f7\u3002

          AI \u7b97\u529b\u4e2d\u5fc3\u5728 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \u4e2d\u901a\u8fc7\u7ba1\u7406\u5458\u521b\u5efa\u65b0\u7528\u6237\u7684\u65b9\u5f0f\u4e3a\u7528\u6237\u5206\u914d\u4e00\u4e2a\u9644\u6709\u4e00\u5b9a\u6743\u9650\u7684\u8d26\u53f7\u3002\u8be5\u7528\u6237\u4ea7\u751f\u7684\u6240\u6709\u884c\u4e3a\u90fd\u5c06\u5173\u8054\u5230\u81ea\u5df1\u7684\u5e10\u53f7\u3002

          \u7528\u6237\u901a\u8fc7\u8d26\u53f7/\u5bc6\u7801\u8fdb\u884c\u767b\u5f55\uff0c\u7cfb\u7edf\u9a8c\u8bc1\u8eab\u4efd\u662f\u5426\u5408\u6cd5\uff0c\u5982\u679c\u9a8c\u8bc1\u5408\u6cd5\uff0c\u5219\u7528\u6237\u6210\u529f\u767b\u5f55\u3002

          Note

          \u5982\u679c\u7528\u6237\u767b\u5f55\u540e 24 \u5c0f\u65f6\u5185\u65e0\u4efb\u4f55\u64cd\u4f5c\uff0c\u5c06\u81ea\u52a8\u9000\u51fa\u767b\u5f55\u72b6\u6001\u3002\u5982\u679c\u767b\u5f55\u7684\u7528\u6237\u59cb\u7ec8\u6d3b\u8dc3\uff0c\u5c06\u6301\u7eed\u5904\u4e8e\u767b\u5f55\u72b6\u6001\u3002

          \u7528\u6237\u767b\u5f55\u7684\u7b80\u5355\u6d41\u7a0b\u5982\u4e0b\u56fe\u3002

          graph TB\n\nuser[\u8f93\u5165\u7528\u6237\u540d] --> pass[\u8f93\u5165\u5bc6\u7801] --> judge([\u70b9\u51fb\u767b\u5f55\u5e76\u6821\u9a8c\u7528\u6237\u540d\u548c\u5bc6\u7801])\njudge -.\u6b63\u786e.->success[\u767b\u5f55\u6210\u529f]\njudge -.\u9519\u8bef.->fail[\u63d0\u793a\u9519\u8bef]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill:#326ce5,stroke:#fff,stroke-width:1px,color:#fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass user,pass cluster;\nclass judge plain\nclass success,fail k8s

          \u7528\u6237\u767b\u5f55\u754c\u9762\u5982\u4e0b\u56fe\u3002\u5177\u4f53\u767b\u5f55\u753b\u9762\uff0c\u8bf7\u4e0e\u5b9e\u9645\u4ea7\u54c1\u4e3a\u51c6\u3002

          "},{"location":"admin/ghippo/install/offline-install.html","title":"\u79bb\u7ebf\u5347\u7ea7\u5168\u5c40\u7ba1\u7406\u6a21\u5757","text":"

          \u672c\u9875\u8bf4\u660e\u4e0b\u8f7d\u5168\u5c40\u7ba1\u7406\u6a21\u5757\u540e\uff0c\u5e94\u8be5\u5982\u4f55\u5b89\u88c5\u6216\u5347\u7ea7\u3002

          Info

          \u4e0b\u8ff0\u547d\u4ee4\u6216\u811a\u672c\u5185\u51fa\u73b0\u7684 ghippo \u5b57\u6837\u662f\u5168\u5c40\u7ba1\u7406\u6a21\u5757\u7684\u5185\u90e8\u5f00\u53d1\u4ee3\u53f7\u3002

          "},{"location":"admin/ghippo/install/offline-install.html#_2","title":"\u4ece\u5b89\u88c5\u5305\u4e2d\u52a0\u8f7d\u955c\u50cf","text":"

          \u60a8\u53ef\u4ee5\u6839\u636e\u4e0b\u9762\u4e24\u79cd\u65b9\u5f0f\u4e4b\u4e00\u52a0\u8f7d\u955c\u50cf\uff0c\u5f53\u73af\u5883\u4e2d\u5b58\u5728\u955c\u50cf\u4ed3\u5e93\u65f6\uff0c\u5efa\u8bae\u9009\u62e9 chart-syncer \u540c\u6b65\u955c\u50cf\u5230\u955c\u50cf\u4ed3\u5e93\uff0c\u8be5\u65b9\u6cd5\u66f4\u52a0\u9ad8\u6548\u4fbf\u6377\u3002

          "},{"location":"admin/ghippo/install/offline-install.html#chart-syncer","title":"chart-syncer \u540c\u6b65\u955c\u50cf\u5230\u955c\u50cf\u4ed3\u5e93","text":"
          1. \u521b\u5efa load-image.yaml

            Note

            \u8be5 YAML \u6587\u4ef6\u4e2d\u7684\u5404\u9879\u53c2\u6570\u5747\u4e3a\u5fc5\u586b\u9879\u3002\u60a8\u9700\u8981\u4e00\u4e2a\u79c1\u6709\u7684\u955c\u50cf\u4ed3\u5e93\uff0c\u5e76\u4fee\u6539\u76f8\u5173\u914d\u7f6e\u3002

            \u5df2\u5b89\u88c5 chart repo\u672a\u5b89\u88c5 chart repo

            \u82e5\u5f53\u524d\u73af\u5883\u5df2\u5b89\u88c5 chart repo\uff0cchart-syncer \u4e5f\u652f\u6301\u5c06 Chart \u5bfc\u51fa\u4e3a tgz \u6587\u4ef6\u3002

            load-image.yaml
            source:\n  intermediateBundlesPath: ghippo-offline # (1)!\ntarget:\n  containerRegistry: 10.16.10.111 # (2)!\n  containerRepository: release.daocloud.io/ghippo # (3)!\n  repo:\n    kind: HARBOR # (4)!\n    url: http://10.16.10.111/chartrepo/release.daocloud.io # (5)!\n    auth:\n      username: \"admin\" # (6)!\n      password: \"Harbor12345\" # (7)!\n  containers:\n    auth:\n      username: \"admin\" # (8)!\n      password: \"Harbor12345\" # (9)!\n
            1. \u5230\u6267\u884c charts-syncer \u547d\u4ee4\u7684\u76f8\u5bf9\u8def\u5f84\uff0c\u800c\u4e0d\u662f\u6b64 YAML \u6587\u4ef6\u548c\u79bb\u7ebf\u5305\u4e4b\u95f4\u7684\u76f8\u5bf9\u8def\u5f84
            2. \u9700\u66f4\u6539\u4e3a\u4f60\u7684\u955c\u50cf\u4ed3\u5e93 url
            3. \u9700\u66f4\u6539\u4e3a\u4f60\u7684\u955c\u50cf\u4ed3\u5e93
            4. \u4e5f\u53ef\u4ee5\u662f\u4efb\u4f55\u5176\u4ed6\u652f\u6301\u7684 Helm Chart \u4ed3\u5e93\u7c7b\u522b
            5. \u9700\u66f4\u6539\u4e3a chart repo url
            6. \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u7528\u6237\u540d
            7. \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u5bc6\u7801
            8. \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u7528\u6237\u540d
            9. \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u5bc6\u7801

            \u82e5\u5f53\u524d\u73af\u5883\u672a\u5b89\u88c5 chart repo\uff0cchart-syncer \u4e5f\u652f\u6301\u5c06 Chart \u5bfc\u51fa\u4e3a tgz \u6587\u4ef6\uff0c\u5e76\u5b58\u653e\u5728\u6307\u5b9a\u8def\u5f84\u3002

            load-image.yaml
            source:\n  intermediateBundlesPath: ghippo-offline # (1)!\ntarget:\n  containerRegistry: 10.16.10.111 # (2)!\n  containerRepository: release.daocloud.io/ghippo # (3)!\n  repo:\n    kind: LOCAL\n    path: ./local-repo # (4)!\n  containers:\n    auth:\n      username: \"admin\" # (5)!\n      password: \"Harbor12345\" # (6)!\n
            1. \u5230\u6267\u884c charts-syncer \u547d\u4ee4\u7684\u76f8\u5bf9\u8def\u5f84\uff0c\u800c\u4e0d\u662f\u6b64 YAML \u6587\u4ef6\u548c\u79bb\u7ebf\u5305\u4e4b\u95f4\u7684\u76f8\u5bf9\u8def\u5f84
            2. \u9700\u66f4\u6539\u4e3a\u4f60\u7684\u955c\u50cf\u4ed3\u5e93 URL
            3. \u9700\u66f4\u6539\u4e3a\u4f60\u7684\u955c\u50cf\u4ed3\u5e93
            4. Chart \u672c\u5730\u8def\u5f84
            5. \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u7528\u6237\u540d
            6. \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u5bc6\u7801
          2. \u6267\u884c\u540c\u6b65\u955c\u50cf\u547d\u4ee4\u3002

            charts-syncer sync --config load-image.yaml\n
          "},{"location":"admin/ghippo/install/offline-install.html#docker-containerd","title":"Docker \u6216 containerd \u76f4\u63a5\u52a0\u8f7d","text":"

          \u89e3\u538b\u5e76\u52a0\u8f7d\u955c\u50cf\u6587\u4ef6\u3002

          1. \u89e3\u538b tar \u538b\u7f29\u5305\u3002

            tar xvf ghippo.bundle.tar\n

            \u89e3\u538b\u6210\u529f\u540e\u4f1a\u5f97\u5230\u51e0\u4e2a\u6587\u4ef6\uff1a

            • hints.yaml
            • images.tar
            • original-chart
          2. \u4ece\u672c\u5730\u52a0\u8f7d\u955c\u50cf\u5230 Docker \u6216 containerd\u3002

            Dockercontainerd
            docker load -i images.tar\n
            ctr -n k8s.io image import images.tar\n

          Note

          \u6bcf\u4e2a node \u90fd\u9700\u8981\u505a Docker \u6216 containerd \u52a0\u8f7d\u955c\u50cf\u64cd\u4f5c\uff0c \u52a0\u8f7d\u5b8c\u6210\u540e\u9700\u8981 tag \u955c\u50cf\uff0c\u4fdd\u6301 Registry\u3001Repository \u4e0e\u5b89\u88c5\u65f6\u4e00\u81f4\u3002

          "},{"location":"admin/ghippo/install/offline-install.html#_3","title":"\u5347\u7ea7","text":"

          \u5347\u7ea7\u6ce8\u610f\u4e8b\u9879\uff1a

          \u4ece v0.11.x \u5347\u7ea7\u5230 \u2265v0.12.0\u4ece v0.15.x \u5347\u7ea7\u5230 \u2265v0.16.0

          \u5f53\u4ece v0.11.x (\u6216\u66f4\u4f4e\u7248\u672c) \u5347\u7ea7\u5230 v0.12.0 (\u6216\u66f4\u9ad8\u7248\u672c) \u65f6\uff0c\u9700\u8981\u5c06 bak.yaml \u4e2d\u6240\u6709 keycloak key \u4fee\u6539\u4e3a keycloakx \u3002

          \u4fee\u6539\u524d\uff1a

          bak.yaml
          USER-SUPPLIED VALUES:\nkeycloak:\n    ...\n

          \u4fee\u6539\u540e\uff1a

          bak.yaml
          USER-SUPPLIED VALUES:\nkeycloakx:\n    ...\n

          \u5f53\u4ece v0.15.x (\u6216\u66f4\u4f4e\u7248\u672c) \u5347\u7ea7\u5230 v0.16.0 (\u6216\u66f4\u9ad8\u7248\u672c) \u65f6\uff0c\u9700\u8981\u4fee\u6539\u6570\u636e\u5e93\u8fde\u63a5\u53c2\u6570\u3002

          \u4fee\u6539\u524d\uff1a

          bak.yaml
          USER-SUPPLIED VALUES:\nglobal:\n  database:\n    host: 127.0.0.1\n    port: 3306\n    apiserver:\n      dbname: ghippo\n      password: passowrd\n      user: ghippo\n    keycloakx:\n      dbname: keycloak\n      password: passowrd\n      user: keycloak\n  auditDatabase:\n    auditserver:\n      dbname: audit\n      password: passowrd\n      user: audit\n    host: 127.0.0.1\n    port: 3306\n

          \u4fee\u6539\u540e\uff1a

          bak.yaml
          USER-SUPPLIED VALUES:\nglobal:\n  storage:\n    ghippo:\n    - driver: mysql\n      accessType: readwrite\n      dsn: {global.database.apiserver.user}:{global.database.apiserver.password}@tcp({global.database.host}:{global.database.port})/{global.database.apiserver.dbname}?charset=utf8mb4&multiStatements=true&parseTime=true\n    audit:\n    - driver: mysql\n      accessType: readwrite\n      dsn: {global.auditDatabase.auditserver.user}:{global.auditDatabase.auditserver.password}@tcp({global.auditDatabase.host}:{global.auditDatabase.port})/{global.auditDatabase.auditserver.dbname}?charset=utf8mb4&multiStatements=true&parseTime=true\n    keycloak:\n    - driver: mysql\n      accessType: readwrite\n      dsn: {global.database.keycloakx.user}:{global.database.keycloakx.password}@tcp({global.database.host}:{global.database.port})/{global.database.keycloakx.dbname}?charset=utf8mb4\n

          \u6709\u4e24\u79cd\u5347\u7ea7\u65b9\u5f0f\u3002\u60a8\u53ef\u4ee5\u6839\u636e\u524d\u7f6e\u64cd\u4f5c\uff0c\u9009\u62e9\u5bf9\u5e94\u7684\u5347\u7ea7\u65b9\u6848\uff1a

          \u901a\u8fc7 Helm \u4ed3\u5e93\u5347\u7ea7\u901a\u8fc7 Chart \u5305\u5347\u7ea7
          1. \u68c0\u67e5\u5168\u5c40\u7ba1\u7406 Helm \u4ed3\u5e93\u662f\u5426\u5b58\u5728\u3002

            helm repo list | grep ghippo\n

            \u82e5\u8fd4\u56de\u7ed3\u679c\u4e3a\u7a7a\u6216\u5982\u4e0b\u63d0\u793a\uff0c\u5219\u8fdb\u884c\u4e0b\u4e00\u6b65\uff1b\u53cd\u4e4b\u5219\u8df3\u8fc7\u4e0b\u4e00\u6b65\u3002

            Error: no repositories to show\n
          2. \u6dfb\u52a0\u5168\u5c40\u7ba1\u7406\u7684 Helm \u4ed3\u5e93\u3002

            helm repo add ghippo http://{harbor url}/chartrepo/{project}\n
          3. \u66f4\u65b0\u5168\u5c40\u7ba1\u7406\u7684 Helm \u4ed3\u5e93\u3002

            helm repo update ghippo # (1)!\n
            1. Helm \u7248\u672c\u8fc7\u4f4e\u4f1a\u5bfc\u81f4\u5931\u8d25\uff0c\u82e5\u5931\u8d25\uff0c\u8bf7\u5c1d\u8bd5\u6267\u884c helm update repo
          4. \u9009\u62e9\u60a8\u60f3\u5b89\u88c5\u7684\u5168\u5c40\u7ba1\u7406\u7248\u672c\uff08\u5efa\u8bae\u5b89\u88c5\u6700\u65b0\u7248\u672c\uff09\u3002

            helm search repo ghippo/ghippo --versions\n
            NAME                   CHART VERSION  APP VERSION  DESCRIPTION\nghippo/ghippo  0.9.0          v0.9.0       A Helm chart for GHippo\n...\n
          5. \u5907\u4efd --set \u53c2\u6570\u3002

            \u5728\u5347\u7ea7\u5168\u5c40\u7ba1\u7406\u7248\u672c\u4e4b\u524d\uff0c\u5efa\u8bae\u60a8\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5907\u4efd\u8001\u7248\u672c\u7684 --set \u53c2\u6570\u3002

            helm get values ghippo -n ghippo-system -o yaml > bak.yaml\n
          6. \u66f4\u65b0 Ghippo CRD\uff1a

            helm pull ghippo/ghippo --version 0.9.0 && tar -zxf ghippo-0.9.0.tgz\nkubectl apply -f ghippo/crds\n
          7. \u6267\u884c helm upgrade \u3002

            \u5347\u7ea7\u524d\u5efa\u8bae\u60a8\u8986\u76d6 bak.yaml \u4e2d\u7684 global.imageRegistry \u5b57\u6bb5\u4e3a\u5f53\u524d\u4f7f\u7528\u7684\u955c\u50cf\u4ed3\u5e93\u5730\u5740\u3002

            export imageRegistry={\u4f60\u7684\u955c\u50cf\u4ed3\u5e93}\n
            helm upgrade ghippo ghippo/ghippo \\\n  -n ghippo-system \\\n  -f ./bak.yaml \\\n  --set global.imageRegistry=$imageRegistry \\\n  --version 0.9.0\n
          1. \u5907\u4efd --set \u53c2\u6570\u3002

            \u5728\u5347\u7ea7\u5168\u5c40\u7ba1\u7406\u7248\u672c\u4e4b\u524d\uff0c\u5efa\u8bae\u60a8\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5907\u4efd\u8001\u7248\u672c\u7684 --set \u53c2\u6570\u3002

            helm get values ghippo -n ghippo-system -o yaml > bak.yaml\n
          2. \u66f4\u65b0 Ghippo CRD\uff1a

            kubectl apply -f ./crds\n
          3. \u6267\u884c helm upgrade\u3002

            \u5347\u7ea7\u524d\u5efa\u8bae\u60a8\u8986\u76d6 bak.yaml \u4e2d\u7684 global.imageRegistry \u4e3a\u5f53\u524d\u4f7f\u7528\u7684\u955c\u50cf\u4ed3\u5e93\u5730\u5740\u3002

            export imageRegistry={\u4f60\u7684\u955c\u50cf\u4ed3\u5e93}\n
            helm upgrade ghippo . \\\n  -n ghippo-system \\\n  -f ./bak.yaml \\\n  --set global.imageRegistry=$imageRegistry\n
          "},{"location":"admin/ghippo/install/reverse-proxy.html","title":"\u81ea\u5b9a\u4e49 AI \u7b97\u529b\u4e2d\u5fc3\u53cd\u5411\u4ee3\u7406\u670d\u52a1\u5668\u5730\u5740","text":"

          \u5177\u4f53\u8bbe\u7f6e\u6b65\u9aa4\u5982\u4e0b\uff1a

          1. \u68c0\u67e5\u5168\u5c40\u7ba1\u7406 helm \u4ed3\u5e93\u662f\u5426\u5b58\u5728\u3002

            helm repo list | grep ghippo\n

            \u82e5\u8fd4\u56de\u7ed3\u679c\u4e3a\u7a7a\u6216\u5982\u4e0b\u63d0\u793a\uff0c\u5219\u8fdb\u884c\u4e0b\u4e00\u6b65\uff1b\u53cd\u4e4b\u5219\u8df3\u8fc7\u4e0b\u4e00\u6b65\u3002

            Error: no repositories to show\n
          2. \u6dfb\u52a0\u5e76\u4e14\u66f4\u65b0\u5168\u5c40\u7ba1\u7406\u7684 helm \u4ed3\u5e93\u3002

            helm repo add ghippo http://{harbor url}/chartrepo/{project}\nhelm repo update ghippo\n
          3. \u8bbe\u7f6e\u73af\u5883\u53d8\u91cf\uff0c\u65b9\u4fbf\u5728\u4e0b\u6587\u4e2d\u4f7f\u7528\u3002

            # \u60a8\u7684\u53cd\u5411\u4ee3\u7406\u5730\u5740\uff0c\u4f8b\u5982 `export AI_PROXY=\"https://demo-alpha.daocloud.io\"` \nexport AI_PROXY=\"https://domain:port\"\n\n# helm --set \u53c2\u6570\u5907\u4efd\u6587\u4ef6\nexport GHIPPO_VALUES_BAK=\"ghippo-values-bak.yaml\"\n\n# \u83b7\u53d6\u5f53\u524d ghippo \u7684\u7248\u672c\u53f7\nexport GHIPPO_HELM_VERSION=$(helm get notes ghippo -n ghippo-system | grep \"Chart Version\" | awk -F ': ' '{ print $2 }')\n
          4. \u5907\u4efd --set \u53c2\u6570\u3002

            helm get values ghippo -n ghippo-system -o yaml > ${GHIPPO_VALUES_BAK}\n
          5. \u6dfb\u52a0\u60a8\u7684\u53cd\u5411\u4ee3\u7406\u5730\u5740\u3002

            Note

            • \u5982\u679c\u53ef\u4ee5\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528 yq \u547d\u4ee4\uff1a

              yq -i \".global.reverseProxy = \\\"${AI_PROXY}\\\"\" ${GHIPPO_VALUES_BAK}\n
            • \u6216\u8005\u60a8\u53ef\u4ee5\u4f7f\u7528 vim \u547d\u4ee4\u7f16\u8f91\u5e76\u4fdd\u5b58\uff1a

              vim ${GHIPPO_VALUES_BAK}\n\nUSER-SUPPLIED VALUES:\n...\nglobal:\n  ...\n  reverseProxy: ${AI_PROXY} # \u53ea\u9700\u8981\u4fee\u6539\u8fd9\u4e00\u884c\n
          6. \u6267\u884c helm upgrade \u4f7f\u914d\u7f6e\u751f\u6548\u3002

            helm upgrade ghippo ghippo/ghippo \\\n  -n ghippo-system \\\n  -f ${GHIPPO_VALUES_BAK} \\\n  --version ${GHIPPO_HELM_VERSION}\n
          7. \u4f7f\u7528 kubectl \u91cd\u542f\u5168\u5c40\u7ba1\u7406 Pod\uff0c\u4f7f\u914d\u7f6e\u751f\u6548\u3002

            kubectl rollout restart deploy/ghippo-apiserver -n ghippo-system\nkubectl rollout restart statefulset/ghippo-keycloakx -n ghippo-system\n
          "},{"location":"admin/ghippo/install/user-isolation.html","title":"\u5f00\u542f Folder/WS \u4e4b\u95f4\u7684\u9694\u79bb\u6a21\u5f0f","text":"

          \u5177\u4f53\u8bbe\u7f6e\u6b65\u9aa4\u5982\u4e0b\uff1a

          1. \u68c0\u67e5\u5168\u5c40\u7ba1\u7406 helm \u4ed3\u5e93\u662f\u5426\u5b58\u5728\u3002

            helm repo list | grep ghippo\n

            \u82e5\u8fd4\u56de\u7ed3\u679c\u4e3a\u7a7a\u6216\u5982\u4e0b\u63d0\u793a\uff0c\u5219\u8fdb\u884c\u4e0b\u4e00\u6b65\uff1b\u53cd\u4e4b\u5219\u8df3\u8fc7\u4e0b\u4e00\u6b65\u3002

            Error: no repositories to show\n
          2. \u6dfb\u52a0\u5e76\u4e14\u66f4\u65b0\u5168\u5c40\u7ba1\u7406\u7684 helm \u4ed3\u5e93\u3002

            helm repo add ghippo http://{harbor url}/chartrepo/{project}\nhelm repo update ghippo\n
          3. \u8bbe\u7f6e\u73af\u5883\u53d8\u91cf\uff0c\u65b9\u4fbf\u5728\u4e0b\u6587\u4e2d\u4f7f\u7528\u3002

            # helm --set \u53c2\u6570\u5907\u4efd\u6587\u4ef6\nexport GHIPPO_VALUES_BAK=\"ghippo-values-bak.yaml\"\n\n# \u83b7\u53d6\u5f53\u524d ghippo \u7684\u7248\u672c\u53f7\nexport GHIPPO_HELM_VERSION=$(helm get notes ghippo -n ghippo-system | grep \"Chart Version\" | awk -F ': ' '{ print $2 }')\n
          4. \u5907\u4efd --set \u53c2\u6570\u3002

            helm get values ghippo -n ghippo-system -o yaml > ${GHIPPO_VALUES_BAK}\n
          5. \u6253\u5f00 Folder/WS \u4e4b\u95f4\u7684\u9694\u79bb\u6a21\u5f0f\u5f00\u5173\u3002

            Note

            • \u5982\u679c\u53ef\u4ee5\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528 yq \u547d\u4ee4\uff1a

              yq -i \".apiserver.userIsolationMode = \\\"Folder\\\"\" ${GHIPPO_VALUES_BAK}\n
            • \u6216\u8005\u60a8\u53ef\u4ee5\u4f7f\u7528 vim \u547d\u4ee4\u7f16\u8f91\u5e76\u4fdd\u5b58\uff1a

              vim ${GHIPPO_VALUES_BAK}\n\nUSER-SUPPLIED VALUES:\n...\n# \u6dfb\u52a0\u4e0b\u9762\u4e24\u884c\u5373\u53ef\napiserver:\n  userIsolationMode: Folder\n
          6. \u6267\u884c helm upgrade \u4f7f\u914d\u7f6e\u751f\u6548\u3002

            helm upgrade ghippo ghippo/ghippo \\\n  -n ghippo-system \\\n  -f ${GHIPPO_VALUES_BAK} \\\n  --version ${GHIPPO_HELM_VERSION}\n
          7. \u4f7f\u7528 kubectl \u91cd\u542f\u5168\u5c40\u7ba1\u7406 Pod\uff0c\u4f7f\u914d\u7f6e\u751f\u6548\u3002

            kubectl rollout restart deploy/ghippo-apiserver -n ghippo-system\n
          "},{"location":"admin/ghippo/permissions/baize.html","title":"AI Lab \u6743\u9650\u8bf4\u660e","text":"

          AI Lab \u652f\u6301\u56db\u79cd\u7528\u6237\u89d2\u8272\uff1a

          • Admin / Baize Owner\uff1a\u62e5\u6709 \u5f00\u53d1\u63a7\u5236\u53f0 \u548c \u8fd0\u7ef4\u7ba1\u7406 \u5168\u90e8\u529f\u80fd\u7684\u589e\u5220\u6539\u67e5\u7684\u6743\u9650\u3002
          • Workspace Admin\uff1a\u62e5\u6709\u6388\u6743\u5de5\u4f5c\u7a7a\u95f4\u7684 \u5f00\u53d1\u63a7\u5236\u53f0 \u5168\u90e8\u529f\u80fd\u7684\u589e\u5220\u6539\u67e5\u7684\u6743\u9650\u3002
          • Workspace Editor\uff1a\u62e5\u6709\u6388\u6743\u5de5\u4f5c\u7a7a\u95f4\u7684 \u5f00\u53d1\u63a7\u5236\u53f0 \u5168\u90e8\u529f\u80fd\u7684\u66f4\u65b0\u3001\u67e5\u8be2\u7684\u6743\u9650\u3002
          • Workspace Viewer\uff1a\u62e5\u6709\u6388\u6743\u5de5\u4f5c\u7a7a\u95f4\u7684 \u5f00\u53d1\u63a7\u5236\u53f0 \u5168\u90e8\u529f\u80fd\u7684\u67e5\u8be2\u7684\u6743\u9650\u3002

          \u6bcf\u79cd\u89d2\u8272\u5177\u6709\u4e0d\u540c\u7684\u6743\u9650\uff0c\u5177\u4f53\u8bf4\u660e\u5982\u4e0b\u3002

          \u83dc\u5355\u5bf9\u8c61 \u64cd\u4f5c Admin / Baize Owner Workspace Admin Workspace Editor Workspace Viewer \u5f00\u53d1\u63a7\u5236\u53f0 \u6982\u89c8 \u67e5\u770b\u6982\u89c8 \u2713 \u2713 \u2713 \u2713 Notebooks \u67e5\u770b Notebooks \u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b Notebooks \u8be6\u60c5 \u2713 \u2713 \u2713 \u2717 \u521b\u5efa Notebooks \u2713 \u2713 \u2717 \u2717 \u66f4\u65b0 Notebooks \u2713 \u2713 \u2713 \u2717 \u514b\u9686 Notebooks \u2713 \u2713 \u2717 \u2717 \u505c\u6b62 Notebooks \u2713 \u2713 \u2713 \u2717 \u542f\u52a8 Notebooks \u2713 \u2713 \u2713 \u2717 \u5220\u9664 Notebooks \u2713 \u2713 \u2717 \u2717 \u4efb\u52a1\u5217\u8868 \u67e5\u770b\u4efb\u52a1\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u4efb\u52a1\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713 \u521b\u5efa\u4efb\u52a1 \u2713 \u2713 \u2717 \u2717 \u514b\u9686\u4efb\u52a1 \u2713 \u2713 \u2717 \u2717 \u67e5\u770b\u4efb\u52a1\u8d1f\u8f7d\u8be6\u60c5 \u2713 \u2713 \u2713 \u2717 \u5220\u9664\u4efb\u52a1 \u2713 \u2713 \u2717 \u2717 \u4efb\u52a1\u5206\u6790 \u67e5\u770b\u4efb\u52a1\u5206\u6790 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u4efb\u52a1\u5206\u6790\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713 \u5220\u9664\u4efb\u52a1\u5206\u6790 \u2713 \u2713 \u2717 \u2717 \u6570\u636e\u96c6\u5217\u8868 \u67e5\u770b\u6570\u636e\u96c6\u5217\u8868 \u2713 \u2713 \u2713 \u2717 \u521b\u5efa\u6570\u636e\u96c6 \u2713 \u2713 \u2717 \u2717 \u91cd\u65b0\u540c\u6b65\u6570\u636e\u96c6 \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0\u51ed\u8bc1 \u2713 \u2713 \u2713 \u2717 \u5220\u9664\u6570\u636e\u96c6 \u2713 \u2713 \u2717 \u2717 \u73af\u5883\u7ba1\u7406 \u67e5\u770b\u73af\u5883\u7ba1\u7406\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u521b\u5efa\u73af\u5883 \u2713 \u2713 \u2717 \u2717 \u66f4\u65b0\u73af\u5883 \u2713 \u2713 \u2713 \u2717 \u5220\u9664\u73af\u5883 \u2713 \u2713 \u2717 \u2717 \u63a8\u7406\u670d\u52a1 \u67e5\u770b\u63a8\u7406\u670d\u52a1\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u63a8\u7406\u670d\u52a1\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713 \u521b\u5efa\u63a8\u7406\u670d\u52a1 \u2713 \u2713 \u2717 \u2717 \u66f4\u65b0\u63a8\u7406\u670d\u52a1 \u2713 \u2713 \u2713 \u2717 \u505c\u6b62\u63a8\u7406\u670d\u52a1 \u2713 \u2713 \u2713 \u2717 \u542f\u52a8\u63a8\u7406\u670d\u52a1 \u2713 \u2713 \u2713 \u2717 \u5220\u9664\u63a8\u7406\u670d\u52a1 \u2713 \u2713 \u2717 \u2717 \u8fd0\u7ef4\u7ba1\u7406 \u6982\u89c8 \u67e5\u770b\u6982\u89c8 \u2713 \u2717 \u2717 \u2717 GPU \u7ba1\u7406 \u67e5\u770b GPU \u7ba1\u7406\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u961f\u5217\u7ba1\u7406 \u67e5\u770b\u961f\u5217\u7ba1\u7406\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u961f\u5217\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u521b\u5efa\u961f\u5217 \u2713 \u2717 \u2717 \u2717 \u66f4\u65b0\u961f\u5217 \u2713 \u2717 \u2717 \u2717 \u5220\u9664\u961f\u5217 \u2713 \u2717 \u2717 \u2717"},{"location":"admin/ghippo/permissions/kpanda.html","title":"\u5bb9\u5668\u7ba1\u7406\u6743\u9650\u8bf4\u660e","text":"

          \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4f7f\u7528\u4ee5\u4e0b\u89d2\u8272\uff1a

          • Admin / Kpanda Owner
          • Cluster Admin
          • NS Admin
          • NS Editor
          • NS Viewer

          Note

          • \u6709\u5173\u6743\u9650\u7684\u66f4\u591a\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605\u5bb9\u5668\u7ba1\u7406\u6743\u9650\u4f53\u7cfb\u8bf4\u660e\u3002
          • \u6709\u5173\u89d2\u8272\u7684\u521b\u5efa\u3001\u7ba1\u7406\u548c\u5220\u9664\uff0c\u8bf7\u53c2\u9605\u89d2\u8272\u548c\u6743\u9650\u7ba1\u7406\u3002
          • Cluster Admin , NS Admin , NS Editor , NS Viewer \u7684\u6743\u9650\u4ec5\u5728\u5f53\u524d\u7684\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\u5185\u751f\u6548\u3002

          \u5404\u89d2\u8272\u6240\u5177\u5907\u7684\u6743\u9650\u5982\u4e0b\uff1a

          \u4e00\u7ea7\u529f\u80fd \u4e8c\u7ea7\u529f\u80fd \u6743\u9650\u70b9 Cluster Admin Ns Admin Ns Editor NS Viewer \u96c6\u7fa4 \u96c6\u7fa4\u5217\u8868 \u67e5\u770b\u96c6\u7fa4\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u63a5\u5165\u96c6\u7fa4 \u2717 \u2717 \u2717 \u2717 \u521b\u5efa\u96c6\u7fa4 \u2717 \u2717 \u2717 \u2717 \u96c6\u7fa4\u64cd\u4f5c \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713\uff08\u4ec5\u5217\u8868\u5185\u53ef\u4ee5\u8fdb\u5165\uff09 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2717 \u2717 \u2717 \u7f16\u8f91\u57fa\u7840\u914d\u7f6e \u2713 \u2717 \u2717 \u2717 \u4e0b\u8f7d kubeconfig \u2713 \u2713\uff08\u4e0b\u8f7dns\u6743\u9650\u7684kubeconfig\uff09 \u2713\uff08\u4e0b\u8f7d ns \u6743\u9650\u7684 kubeconfig\uff09 \u2713\uff08\u4e0b\u8f7d ns \u6743\u9650\u7684 kubeconfig\uff09 \u89e3\u9664\u63a5\u5165 \u2717 \u2717 \u2717 \u2717 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2717 \u2717 \u2717 \u91cd\u8bd5 \u2717 \u2717 \u2717 \u2717 \u5378\u8f7d\u96c6\u7fa4 \u2717 \u2717 \u2717 \u2717 \u96c6\u7fa4\u6982\u89c8 \u67e5\u770b\u96c6\u7fa4\u6982\u89c8 \u2713 \u2717 \u2717 \u2717 \u8282\u70b9\u7ba1\u7406 \u63a5\u5165\u8282\u70b9 \u2717 \u2717 \u2717 \u2717 \u67e5\u770b\u8282\u70b9\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u8282\u70b9\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b YAML \u2713 \u2717 \u2717 \u2717 \u6682\u505c\u8c03\u5ea6 \u2713 \u2717 \u2717 \u2717 \u4fee\u6539\u6807\u7b7e \u2713 \u2717 \u2717 \u2717 \u4fee\u6539\u6ce8\u89e3 \u2713 \u2717 \u2717 \u2717 \u4fee\u6539\u6c61\u70b9 \u2713 \u2717 \u2717 \u2717 \u79fb\u9664\u8282\u70b9 \u2717 \u2717 \u2717 \u2717 \u65e0\u72b6\u6001\u8d1f\u8f7d \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u955c\u50cf\u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u9009\u62e9 ns \u7ed1\u5b9a\u7684 ws \u5185\u7684\u5b9e\u4f8b \u9009\u62e9\u955c\u50cf \u2713 \u2713 \u2713 \u2717 IP \u6c60\u67e5\u770b \u2713 \u2713 \u2713 \u2717 \u7f51\u5361\u7f16\u8f91 \u2713 \u2713 \u2713 \u2717 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u8d1f\u8f7d\u4f38\u7f29 \u2713 \u2713 \u2713 \u2717 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001 - \u6682\u505c\u5347\u7ea7 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001 - \u505c\u6b62 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001 - \u91cd\u542f \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u6709\u72b6\u6001\u8d1f\u8f7d \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u955c\u50cf\u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u9009\u62e9ns\u7ed1\u5b9a\u7684ws\u5185\u7684\u5b9e\u4f8b \u9009\u62e9\u955c\u50cf \u2713 \u2713 \u2713 \u2717 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u8d1f\u8f7d\u4f38\u7f29 \u2713 \u2713 \u2713 \u2717 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001-\u505c\u6b62 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001-\u91cd\u542f \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u5b88\u62a4\u8fdb\u7a0b \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u955c\u50cf\u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u9009\u62e9ns\u7ed1\u5b9a\u7684ws\u5185\u7684\u5b9e\u4f8b \u9009\u62e9\u955c\u50cf \u2713 \u2713 \u2713 \u2717 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001-\u91cd\u542f \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u4efb\u52a1 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u955c\u50cf\u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u5b9e\u4f8b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u9009\u62e9ns\u7ed1\u5b9a\u7684ws\u5185\u7684\u5b9e\u4f8b \u9009\u62e9\u955c\u50cf \u2713 \u2713 \u2713 \u2717 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b YAML \u2713 \u2713 \u2713 \u2713 \u91cd\u542f \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u4e8b\u4ef6 \u2713 \u2713 \u2713 \u2713 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u5b9a\u65f6\u4efb\u52a1 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u955c\u50cf\u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u9009\u62e9ns\u7ed1\u5b9a\u7684ws\u5185\u7684\u5b9e\u4f8b \u9009\u62e9\u955c\u50cf \u2713 \u2713 \u2713 \u2717 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u505c\u6b62 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u4efb\u52a1\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u4e8b\u4ef6 \u2713 \u2713 \u2713 \u2713 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u5bb9\u5668\u7ec4 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b YAML \u2713 \u2713 \u2713 \u2713 \u4e0a\u4f20\u6587\u4ef6 \u2713 \u2713 \u2713 \u2717 \u4e0b\u8f7d\u6587\u4ef6 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u5bb9\u5668\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u4e8b\u4ef6 \u2713 \u2713 \u2713 \u2713 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 ReplicaSet \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b YAML \u2713 \u2713 \u2713 \u2713 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 Helm \u5e94\u7528 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b YAML \u2713 \u2713 \u2713 \u2713 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 Helm \u6a21\u677f \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713 \u5b89\u88c5\u6a21\u677f \u2713 \u2713\uff08ns\u7ea7\u522b\u7684\u53ef\u4ee5\uff09 \u2717 \u2717 \u4e0b\u8f7d\u6a21\u677f \u2713 \u2713 \u2713\uff08\u548c\u67e5\u770b\u63a5\u53e3\u4e00\u81f4\uff09 \u2713 Helm \u4ed3\u5e93 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u521b\u5efa\u4ed3\u5e93 \u2713 \u2717 \u2717 \u2717 \u66f4\u65b0\u4ed3\u5e93 \u2713 \u2717 \u2717 \u2717 \u514b\u9686\u4ed3\u5e93 \u2713 \u2717 \u2717 \u2717 \u5237\u65b0\u4ed3\u5e93 \u2713 \u2717 \u2717 \u2717 \u4fee\u6539\u6807\u7b7e \u2713 \u2717 \u2717 \u2717 \u4fee\u6539\u6ce8\u89e3 \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 \u670d\u52a1 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u4e8b\u4ef6 \u2713 \u2713 \u2713 \u2713 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u8def\u7531 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u4e8b\u4ef6 \u2713 \u2713 \u2713 \u2713 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u7f51\u7edc\u7b56\u7565 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2717 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u7f51\u7edc\u914d\u7f6e \u914d\u7f6e\u7f51\u7edc \u2713 \u2713 \u2713 \u2717 \u81ea\u5b9a\u4e49\u8d44\u6e90 \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 YAML \u521b\u5efa \u2713 \u2717 \u2717 \u2717 \u7f16\u8f91 YAML \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 PVC \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u9009\u62e9sc \u2713 \u2713 \u2713 \u2717 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u514b\u9686 \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 PV \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 YAML \u521b\u5efa \u2713 \u2717 \u2717 \u2717 \u521b\u5efa \u2713 \u2717 \u2717 \u2717 \u7f16\u8f91 YAML \u2713 \u2717 \u2717 \u2717 \u66f4\u65b0 \u2713 \u2717 \u2717 \u2717 \u514b\u9686 \u2713 \u2717 \u2717 \u2717 \u4fee\u6539\u6807\u7b7e \u2713 \u2717 \u2717 \u2717 \u4fee\u6539\u6ce8\u89e3 \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 SC \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 YAML \u521b\u5efa \u2713 \u2717 \u2717 \u2717 \u521b\u5efa \u2713 \u2717 \u2717 \u2717 \u67e5\u770b YAML \u2713 \u2717 \u2717 \u2717 \u66f4\u65b0 \u2713 \u2717 \u2717 \u2717 \u6388\u6743\u547d\u540d\u7a7a\u95f4 \u2713 \u2717 \u2717 \u2717 \u89e3\u9664\u6388\u6743 \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 \u914d\u7f6e\u9879 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u5bfc\u51fa\u914d\u7f6e\u9879 \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u5bc6\u94a5 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2717 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u5bfc\u51fa\u5bc6\u94a5 \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u547d\u540d\u7a7a\u95f4 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 YAML \u521b\u5efa \u2713 \u2717 \u2717 \u2717 \u521b\u5efa \u2713 \u2717 \u2717 \u2717 \u67e5\u770b YAML \u2713 \u2713 \u2713 \u2717 \u4fee\u6539\u6807\u7b7e \u2713 \u2713 \u2717 \u2717 \u89e3\u7ed1\u5de5\u4f5c\u7a7a\u95f4 \u2717 \u2717 \u2717 \u2717 \u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4 \u2717 \u2717 \u2717 \u2717 \u914d\u989d\u7ba1\u7406 \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 \u96c6\u7fa4\u64cd\u4f5c \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b YAML \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 helm \u64cd\u4f5c \u8bbe\u7f6e\u4fdd\u7559\u6761\u6570 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b YAML \u2713 \u2713 \u2717 \u2717 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2717 \u2717 \u5220\u9664 \u2713 \u2713 \u2717 \u2717 \u96c6\u7fa4\u5347\u7ea7 \u67e5\u770b\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u5347\u7ea7 \u2717 \u2717 \u2717 \u2717 \u96c6\u7fa4\u8bbe\u7f6e addon \u63d2\u4ef6\u914d\u7f6e \u2713 \u2717 \u2717 \u2717 \u9ad8\u7ea7\u914d\u7f6e \u2713 \u2717 \u2717 \u2717 \u547d\u540d\u7a7a\u95f4 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u521b\u5efa \u2713 \u2717 \u2717 \u2717 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b YAML \u2713 \u2713 \u2713 \u2717 \u4fee\u6539\u6807\u7b7e \u2713 \u2713 \u2717 \u2717 \u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4 \u2713 \u2717 \u2717 \u2717 \u914d\u989d\u7ba1\u7406 \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 \u5de5\u4f5c\u8d1f\u8f7d \u65e0\u72b6\u6001\u8d1f\u8f7d \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u8d1f\u8f7d\u4f38\u7f29 \u2713 \u2713 \u2713 \u2717 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001-\u6682\u505c\u5347\u7ea7 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001-\u505c\u6b62 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001-\u91cd\u542f \u2713 \u2713 \u2713 \u2717 \u56de\u9000 \u2713 \u2713 \u2713 \u2717 \u4fee\u6539\u6807\u7b7e\u6ce8\u89e3 \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u6709\u72b6\u6001\u8d1f\u8f7d \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u8d1f\u8f7d\u4f38\u7f29 \u2713 \u2713 \u2713 \u2717 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001-\u505c\u6b62 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001-\u91cd\u542f \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u5b88\u62a4\u8fdb\u7a0b \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001-\u91cd\u542f \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u4efb\u52a1 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b YAML \u2713 \u2713 \u2713 \u2717 \u91cd\u542f \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u4e8b\u4ef6 \u2713 \u2713 \u2713 \u2713 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u5b9a\u65f6\u4efb\u52a1 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u67e5\u770b\u4e8b\u4ef6 \u2713 \u2713 \u2713 \u2713 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u5bb9\u5668\u7ec4 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b YAML \u2713 \u2713 \u2713 \u2713 \u4e0a\u4f20\u6587\u4ef6 \u2713 \u2713 \u2713 \u2717 \u4e0b\u8f7d\u6587\u4ef6 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u5bb9\u5668\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u4e8b\u4ef6 \u2713 \u2713 \u2713 \u2713 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u5907\u4efd\u6062\u590d \u5e94\u7528\u5907\u4efd \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u521b\u5efa\u5907\u4efd\u8ba1\u5212 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b YAML \u2713 \u2717 \u2717 \u2717 \u66f4\u65b0\u8ba1\u5212 \u2713 \u2717 \u2717 \u2717 \u6682\u505c \u2713 \u2717 \u2717 \u2717 \u7acb\u5373\u6267\u884c \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 \u6062\u590d\u5907\u4efd \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u6062\u590d\u5907\u4efd \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 \u5907\u4efd\u70b9 \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 \u5bf9\u8c61\u5b58\u50a8 \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 ETCD\u5907\u4efd \u67e5\u770b\u5907\u4efd\u7b56\u7565\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u521b\u5efa\u5907\u4efd\u7b56\u7565 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b YAML \u2713 \u2717 \u2717 \u2717 \u66f4\u65b0\u5907\u4efd\u7b56\u7565 \u2713 \u2717 \u2717 \u2717 \u505c\u6b62/\u542f\u52a8 \u2713 \u2717 \u2717 \u2717 \u7acb\u5373\u6267\u884c \u2713 \u2717 \u2717 \u2717 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u5220\u9664\u5907\u4efd\u8bb0\u5f55 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u5907\u4efd\u70b9\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u96c6\u7fa4\u5de1\u68c0 \u96c6\u7fa4\u5de1\u68c0 \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u96c6\u7fa4\u5de1\u68c0 \u2713 \u2717 \u2717 \u2717 \u8bbe\u7f6e \u2713 \u2717 \u2717 \u2717 \u6743\u9650\u7ba1\u7406 \u96c6\u7fa4\u6743\u9650 \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u6388\u6743\u7528\u6237\u4e3a cluster admin \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 \u547d\u540d\u7a7a\u95f4\u6743\u9650 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2717 \u2717 \u6388\u6743\u7528\u6237\u4e3a ns admin \u2713 \u2713 \u2717 \u2717 \u6388\u6743\u7528\u6237\u4e3a ns editor \u2713 \u2713 \u2717 \u2717 \u6388\u6743\u7528\u6237\u4e3a ns viewer \u2713 \u2713 \u2717 \u2717 \u7f16\u8f91\u6743\u9650 \u2713 \u2713 \u2717 \u2717 \u5220\u9664 \u2713 \u2713 \u2717 \u2717 \u5b89\u5168\u7ba1\u7406 \u5408\u89c4\u6027\u626b\u63cf \u67e5\u770b\u626b\u63cf\u62a5\u544a\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u626b\u63cf\u62a5\u544a\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u4e0b\u8f7d\u626b\u63cf\u62a5\u544a \u2713 \u2717 \u2717 \u2717 \u5220\u9664\u626b\u63cf\u62a5\u544a \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u626b\u63cf\u7b56\u7565\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u521b\u5efa\u626b\u63cf\u7b56\u7565 \u2713 \u2717 \u2717 \u2717 \u5220\u9664\u626b\u63cf\u7b56\u7565 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u626b\u63cf\u914d\u7f6e\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u626b\u63cf\u914d\u7f6e\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u5220\u9664\u626b\u63cf\u914d\u7f6e \u2713 \u2717 \u2717 \u2717 \u6743\u9650\u626b\u63cf \u67e5\u770b\u626b\u63cf\u62a5\u544a\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u626b\u63cf\u62a5\u544a\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u5220\u9664\u626b\u63cf\u62a5\u544a \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u626b\u63cf\u7b56\u7565\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u521b\u5efa\u626b\u63cf\u7b56\u7565 \u2713 \u2717 \u2717 \u2717 \u5220\u9664\u626b\u63cf\u7b56\u7565 \u2713 \u2717 \u2717 \u2717 \u6f0f\u6d1e\u626b\u63cf \u67e5\u770b\u626b\u63cf\u62a5\u544a\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u626b\u63cf\u62a5\u544a\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u5220\u9664\u626b\u63cf\u62a5\u544a \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u626b\u63cf\u7b56\u7565\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u521b\u5efa\u626b\u63cf\u7b56\u7565 \u2713 \u2717 \u2717 \u2717 \u5220\u9664\u626b\u63cf\u7b56\u7565 \u2713 \u2717 \u2717 \u2717"},{"location":"admin/ghippo/personal-center/accesstoken.html","title":"\u8bbf\u95ee\u5bc6\u94a5","text":"

          \u8bbf\u95ee\u5bc6\u94a5\uff08Access Key\uff09\u53ef\u7528\u4e8e\u8bbf\u95ee\u5f00\u653e API \u548c\u6301\u7eed\u53d1\u5e03\uff0c\u7528\u6237\u53ef\u5728\u4e2a\u4eba\u4e2d\u5fc3\u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u83b7\u53d6\u5bc6\u94a5\u5e76\u8bbf\u95ee API\u3002

          "},{"location":"admin/ghippo/personal-center/accesstoken.html#_2","title":"\u83b7\u53d6\u5bc6\u94a5","text":"

          \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5728\u53f3\u4e0a\u89d2\u7684\u4e0b\u62c9\u83dc\u5355\u4e2d\u627e\u5230 \u4e2a\u4eba\u4e2d\u5fc3 \uff0c\u53ef\u4ee5\u5728 \u8bbf\u95ee\u5bc6\u94a5 \u9875\u9762\u7ba1\u7406\u8d26\u53f7\u7684\u8bbf\u95ee\u5bc6\u94a5\u3002

          Info

          \u8bbf\u95ee\u5bc6\u94a5\u4fe1\u606f\u4ec5\u663e\u793a\u4e00\u6b21\u3002\u5982\u679c\u60a8\u5fd8\u8bb0\u4e86\u8bbf\u95ee\u5bc6\u94a5\u4fe1\u606f\uff0c\u60a8\u9700\u8981\u91cd\u65b0\u521b\u5efa\u65b0\u7684\u8bbf\u95ee\u5bc6\u94a5\u3002

          "},{"location":"admin/ghippo/personal-center/accesstoken.html#api","title":"\u4f7f\u7528\u5bc6\u94a5\u8bbf\u95ee API","text":"

          \u5728\u8bbf\u95ee\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0openAPI \u65f6\uff0c\u5728\u8bf7\u6c42\u4e2d\u52a0\u4e0a\u8bf7\u6c42\u5934 Authorization:Bearer ${token} \u4ee5\u6807\u8bc6\u8bbf\u95ee\u8005\u7684\u8eab\u4efd\uff0c \u5176\u4e2d ${token} \u662f\u4e0a\u4e00\u6b65\u4e2d\u83b7\u53d6\u5230\u7684\u5bc6\u94a5\uff0c\u5177\u4f53\u63a5\u53e3\u4fe1\u606f\u53c2\u89c1 OpenAPI \u63a5\u53e3\u6587\u6863\u3002

          \u8bf7\u6c42\u793a\u4f8b

          curl -X GET -H 'Authorization:Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRKVjlBTHRBLXZ4MmtQUC1TQnVGS0dCSWc1cnBfdkxiQVVqM2U3RVByWnMiLCJ0eXAiOiJKV1QifQ.eyJleHAiOjE2NjE0MTU5NjksImlhdCI6MTY2MDgxMTE2OSwiaXNzIjoiZ2hpcHBvLmlvIiwic3ViIjoiZjdjOGIxZjUtMTc2MS00NjYwLTg2MWQtOWI3MmI0MzJmNGViIiwicHJlZmVycmVkX3VzZXJuYW1lIjoiYWRtaW4iLCJncm91cHMiOltdfQ.RsUcrAYkQQ7C6BxMOrdD3qbBRUt0VVxynIGeq4wyIgye6R8Ma4cjxG5CbU1WyiHKpvIKJDJbeFQHro2euQyVde3ygA672ozkwLTnx3Tu-_mB1BubvWCBsDdUjIhCQfT39rk6EQozMjb-1X1sbLwzkfzKMls-oxkjagI_RFrYlTVPwT3Oaw-qOyulRSw7Dxd7jb0vINPq84vmlQIsI3UuTZSNO5BCgHpubcWwBss-Aon_DmYA-Et_-QtmPBA3k8E2hzDSzc7eqK0I68P25r9rwQ3DeKwD1dbRyndqWORRnz8TLEXSiCFXdZT2oiMrcJtO188Ph4eLGut1-4PzKhwgrQ' https://demo-dev.daocloud.io/apis/ghippo.io/v1alpha1/users?page=1&pageSize=10 -k\n

          \u8bf7\u6c42\u7ed3\u679c

          {\n    \"items\": [\n        {\n            \"id\": \"a7cfd010-ebbe-4601-987f-d098d9ef766e\",\n            \"name\": \"a\",\n            \"email\": \"\",\n            \"description\": \"\",\n            \"firstname\": \"\",\n            \"lastname\": \"\",\n            \"source\": \"locale\",\n            \"enabled\": true,\n            \"createdAt\": \"1660632794800\",\n            \"updatedAt\": \"0\",\n            \"lastLoginAt\": \"\"\n        }\n    ],\n    \"pagination\": {\n        \"page\": 1,\n        \"pageSize\": 10,\n        \"total\": 1\n    }\n}\n
          "},{"location":"admin/ghippo/personal-center/language.html","title":"\u8bed\u8a00\u8bbe\u7f6e","text":"

          \u672c\u8282\u8bf4\u660e\u5982\u4f55\u8bbe\u7f6e\u754c\u9762\u8bed\u8a00\u3002\u76ee\u524d\u652f\u6301\u4e2d\u6587\u3001English \u4e24\u4e2a\u8bed\u8a00\u3002

          \u8bed\u8a00\u8bbe\u7f6e\u662f\u5e73\u53f0\u63d0\u4f9b\u591a\u8bed\u8a00\u670d\u52a1\u7684\u5165\u53e3\uff0c\u5e73\u53f0\u9ed8\u8ba4\u663e\u793a\u4e3a\u4e2d\u6587\uff0c\u7528\u6237\u53ef\u6839\u636e\u9700\u8981\u9009\u62e9\u82f1\u8bed\u6216\u81ea\u52a8\u68c0\u6d4b\u6d4f\u89c8\u5668\u8bed\u8a00\u9996\u9009\u9879\u7684\u65b9\u5f0f\u6765\u5207\u6362\u5e73\u53f0\u8bed\u8a00\u3002 \u6bcf\u4e2a\u7528\u6237\u7684\u591a\u8bed\u8a00\u670d\u52a1\u662f\u76f8\u4e92\u72ec\u7acb\u7684\uff0c\u5207\u6362\u540e\u4e0d\u4f1a\u5f71\u54cd\u5176\u4ed6\u7528\u6237\u3002

          \u5e73\u53f0\u63d0\u4f9b\u4e09\u79cd\u5207\u6362\u8bed\u8a00\u65b9\u5f0f\uff1a\u4e2d\u6587\u3001\u82f1\u8bed-English\u3001\u81ea\u52a8\u68c0\u6d4b\u60a8\u7684\u6d4f\u89c8\u5668\u8bed\u8a00\u9996\u9009\u9879\u3002

          \u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\u3002

          1. \u4f7f\u7528\u60a8\u7684\u7528\u6237\u540d/\u5bc6\u7801\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 \u3002

          2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684\u7528\u6237\u540d\u4f4d\u7f6e\uff0c\u9009\u62e9 \u4e2a\u4eba\u4e2d\u5fc3 \u3002

          3. \u70b9\u51fb \u8bed\u8a00\u8bbe\u7f6e \u9875\u7b7e\u3002

          4. \u5207\u6362\u8bed\u8a00\u9009\u9879\u3002

          "},{"location":"admin/ghippo/personal-center/security-setting.html","title":"\u5b89\u5168\u8bbe\u7f6e","text":"

          \u529f\u80fd\u8bf4\u660e\uff1a\u7528\u4e8e\u586b\u5199\u90ae\u7bb1\u5730\u5740\u548c\u4fee\u6539\u767b\u5f55\u5bc6\u7801\u3002

          • \u90ae\u7bb1\uff1a\u5f53\u7ba1\u7406\u5458\u914d\u7f6e\u90ae\u7bb1\u670d\u52a1\u5668\u5730\u5740\u4e4b\u540e\uff0c\u7528\u6237\u80fd\u591f\u901a\u8fc7\u767b\u5f55\u9875\u7684\u5fd8\u8bb0\u5bc6\u7801\u6309\u94ae\uff0c\u586b\u5199\u8be5\u5904\u7684\u90ae\u7bb1\u5730\u5740\u4ee5\u627e\u56de\u5bc6\u7801\u3002
          • \u5bc6\u7801\uff1a\u7528\u4e8e\u767b\u5f55\u5e73\u53f0\u7684\u5bc6\u7801\uff0c\u5efa\u8bae\u5b9a\u671f\u4fee\u6539\u5bc6\u7801\u3002

          \u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\uff1a

          1. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684\u7528\u6237\u540d\u4f4d\u7f6e\uff0c\u9009\u62e9 \u4e2a\u4eba\u4e2d\u5fc3 \u3002

          2. \u70b9\u51fb \u5b89\u5168\u8bbe\u7f6e \u9875\u7b7e\u3002\u586b\u5199\u60a8\u7684\u90ae\u7bb1\u5730\u5740\u6216\u4fee\u6539\u767b\u5f55\u5bc6\u7801\u3002

          "},{"location":"admin/ghippo/personal-center/ssh-key.html","title":"\u914d\u7f6e SSH \u516c\u94a5","text":"

          \u672c\u6587\u8bf4\u660e\u5982\u4f55\u914d\u7f6e SSH \u516c\u94a5\u3002

          "},{"location":"admin/ghippo/personal-center/ssh-key.html#1-ssh","title":"\u6b65\u9aa4 1\uff1a\u67e5\u770b\u5df2\u5b58\u5728\u7684 SSH \u5bc6\u94a5","text":"

          \u5728\u751f\u6210\u65b0\u7684 SSH \u5bc6\u94a5\u524d\uff0c\u8bf7\u5148\u786e\u8ba4\u662f\u5426\u9700\u8981\u4f7f\u7528\u672c\u5730\u5df2\u751f\u6210\u7684 SSH \u5bc6\u94a5\uff0cSSH \u5bc6\u94a5\u5bf9\u4e00\u822c\u5b58\u653e\u5728\u672c\u5730\u7528\u6237\u7684\u6839\u76ee\u5f55\u4e0b\u3002 Linux\u3001Mac \u8bf7\u76f4\u63a5\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b\u5df2\u5b58\u5728\u7684\u516c\u94a5\uff0cWindows \u7528\u6237\u5728 WSL\uff08\u9700\u8981 Windows 10 \u6216\u4ee5\u4e0a\uff09\u6216 Git Bash \u4e0b\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b\u5df2\u751f\u6210\u7684\u516c\u94a5\u3002

          • ED25519 \u7b97\u6cd5\uff1a

            cat ~/.ssh/id_ed25519.pub\n
          • RSA \u7b97\u6cd5\uff1a

            cat ~/.ssh/id_rsa.pub\n

          \u5982\u679c\u8fd4\u56de\u4e00\u957f\u4e32\u4ee5 ssh-ed25519 \u6216 ssh-rsa \u5f00\u5934\u7684\u5b57\u7b26\u4e32\uff0c\u8bf4\u660e\u5df2\u5b58\u5728\u672c\u5730\u516c\u94a5\uff0c \u60a8\u53ef\u4ee5\u8df3\u8fc7\u6b65\u9aa4 2 \u751f\u6210 SSH \u5bc6\u94a5\uff0c\u76f4\u63a5\u64cd\u4f5c\u6b65\u9aa4 3\u3002

          "},{"location":"admin/ghippo/personal-center/ssh-key.html#2-ssh","title":"\u6b65\u9aa4 2\uff1a\u751f\u6210 SSH \u5bc6\u94a5","text":"

          \u82e5\u6b65\u9aa4 1 \u672a\u8fd4\u56de\u6307\u5b9a\u7684\u5185\u5bb9\u5b57\u7b26\u4e32\uff0c\u8868\u793a\u672c\u5730\u6682\u65e0\u53ef\u7528 SSH \u5bc6\u94a5\uff0c\u9700\u8981\u751f\u6210\u65b0\u7684 SSH \u5bc6\u94a5\uff0c\u8bf7\u6309\u5982\u4e0b\u6b65\u9aa4\u64cd\u4f5c\uff1a

          1. \u8bbf\u95ee\u7ec8\u7aef\uff08Windows \u8bf7\u4f7f\u7528 WSL \u6216 Git Bash\uff09\uff0c \u8fd0\u884c ssh-keygen -t\u3002

          2. \u8f93\u5165\u5bc6\u94a5\u7b97\u6cd5\u7c7b\u578b\u548c\u53ef\u9009\u7684\u6ce8\u91ca\u3002

            \u6ce8\u91ca\u4f1a\u51fa\u73b0\u5728 .pub \u6587\u4ef6\u4e2d\uff0c\u4e00\u822c\u53ef\u4f7f\u7528\u90ae\u7bb1\u4f5c\u4e3a\u6ce8\u91ca\u5185\u5bb9\u3002

            • \u57fa\u4e8e ED25519 \u7b97\u6cd5\uff0c\u751f\u6210\u5bc6\u94a5\u5bf9\u547d\u4ee4\u5982\u4e0b\uff1a

              ssh-keygen -t ed25519 -C \"<\u6ce8\u91ca\u5185\u5bb9>\"\n
            • \u57fa\u4e8e RSA \u7b97\u6cd5\uff0c\u751f\u6210\u5bc6\u94a5\u5bf9\u547d\u4ee4\u5982\u4e0b\uff1a

              ssh-keygen -t rsa -C \"<\u6ce8\u91ca\u5185\u5bb9>\"\n
          3. \u70b9\u51fb\u56de\u8f66\uff0c\u9009\u62e9 SSH \u5bc6\u94a5\u751f\u6210\u8def\u5f84\u3002

            \u4ee5 ED25519 \u7b97\u6cd5\u4e3a\u4f8b\uff0c\u9ed8\u8ba4\u8def\u5f84\u5982\u4e0b\uff1a

            Generating public/private ed25519 key pair.\nEnter file in which to save the key (/home/user/.ssh/id_ed25519):\n

            \u5bc6\u94a5\u9ed8\u8ba4\u751f\u6210\u8def\u5f84\uff1a/home/user/.ssh/id_ed25519\uff0c\u516c\u94a5\u4e0e\u4e4b\u5bf9\u5e94\u4e3a\uff1a/home/user/.ssh/id_ed25519.pub\u3002

          4. \u8bbe\u7f6e\u4e00\u4e2a\u5bc6\u94a5\u53e3\u4ee4\u3002

            Enter passphrase (empty for no passphrase):\nEnter same passphrase again:\n

            \u53e3\u4ee4\u9ed8\u8ba4\u4e3a\u7a7a\uff0c\u60a8\u53ef\u4ee5\u9009\u62e9\u4f7f\u7528\u53e3\u4ee4\u4fdd\u62a4\u79c1\u94a5\u6587\u4ef6\u3002 \u5982\u679c\u60a8\u4e0d\u60f3\u5728\u6bcf\u6b21\u4f7f\u7528 SSH \u534f\u8bae\u8bbf\u95ee\u4ed3\u5e93\u65f6\uff0c\u90fd\u8981\u8f93\u5165\u7528\u4e8e\u4fdd\u62a4\u79c1\u94a5\u6587\u4ef6\u7684\u53e3\u4ee4\uff0c\u53ef\u4ee5\u5728\u521b\u5efa\u5bc6\u94a5\u65f6\uff0c\u8f93\u5165\u7a7a\u53e3\u4ee4\u3002

          5. \u70b9\u51fb\u56de\u8f66\uff0c\u5b8c\u6210\u5bc6\u94a5\u5bf9\u521b\u5efa\u3002

          "},{"location":"admin/ghippo/personal-center/ssh-key.html#3","title":"\u6b65\u9aa4 3\uff1a\u62f7\u8d1d\u516c\u94a5","text":"

          \u9664\u4e86\u5728\u547d\u4ee4\u884c\u6253\u5370\u51fa\u5df2\u751f\u6210\u7684\u516c\u94a5\u4fe1\u606f\u624b\u52a8\u590d\u5236\u5916\uff0c\u53ef\u4ee5\u4f7f\u7528\u547d\u4ee4\u62f7\u8d1d\u516c\u94a5\u5230\u7c98\u8d34\u677f\u4e0b\uff0c\u8bf7\u53c2\u8003\u64cd\u4f5c\u7cfb\u7edf\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u8fdb\u884c\u62f7\u8d1d\u3002

          • Windows\uff08\u5728 WSL \u6216 Git Bash \u4e0b\uff09\uff1a

            cat ~/.ssh/id_ed25519.pub | clip\n
          • Mac\uff1a

            tr -d '\\n'< ~/.ssh/id_ed25519.pub | pbcopy\n
          • GNU/Linux (requires xclip):

            xclip -sel clip < ~/.ssh/id_ed25519.pub\n
          "},{"location":"admin/ghippo/personal-center/ssh-key.html#4-ai","title":"\u6b65\u9aa4 4\uff1a\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e0a\u8bbe\u7f6e\u516c\u94a5","text":"
          1. \u767b\u5f55\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0UI \u9875\u9762\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u4e2a\u4eba\u4e2d\u5fc3 -> SSH \u516c\u94a5 \u3002

          2. \u6dfb\u52a0\u751f\u6210\u7684 SSH \u516c\u94a5\u4fe1\u606f\u3002

            1. SSH \u516c\u94a5\u5185\u5bb9\u3002

            2. \u516c\u94a5\u6807\u9898\uff1a\u652f\u6301\u81ea\u5b9a\u4e49\u516c\u94a5\u540d\u79f0\uff0c\u7528\u4e8e\u533a\u5206\u7ba1\u7406\u3002

            3. \u8fc7\u671f\u65f6\u95f4\uff1a\u8bbe\u7f6e\u516c\u94a5\u8fc7\u671f\u65f6\u95f4\uff0c\u5230\u671f\u540e\u516c\u94a5\u5c06\u81ea\u52a8\u5931\u6548\uff0c\u4e0d\u53ef\u4f7f\u7528\uff1b\u5982\u679c\u4e0d\u8bbe\u7f6e\uff0c\u5219\u6c38\u4e45\u6709\u6548\u3002

          "},{"location":"admin/ghippo/platform-setting/about.html","title":"\u5173\u4e8e\u5e73\u53f0","text":"

          \u5173\u4e8e\u5e73\u53f0 \u4e3b\u8981\u5448\u73b0\u5e73\u53f0\u5404\u4e2a\u5b50\u6a21\u5757\u5f53\u524d\u66f4\u65b0\u7684\u7248\u672c\uff0c\u58f0\u660e\u4e86\u5e73\u53f0\u4f7f\u7528\u7684\u5404\u4e2a\u5f00\u6e90\u8f6f\u4ef6\uff0c\u5e76\u4ee5\u52a8\u753b\u89c6\u9891\u7684\u65b9\u5f0f\u81f4\u8c22\u4e86\u5e73\u53f0\u7684\u6280\u672f\u56e2\u961f\u3002

          \u67e5\u770b\u6b65\u9aa4\uff1a

          1. \u4f7f\u7528\u5177\u6709 Admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 \u3002

          2. \u70b9\u51fb \u5e73\u53f0\u8bbe\u7f6e \uff0c\u9009\u62e9 \u5173\u4e8e\u5e73\u53f0 \uff0c\u67e5\u770b\u4ea7\u54c1\u7248\u672c\u3001\u5f00\u6e90\u8f6f\u4ef6\u58f0\u660e\u548c\u6280\u672f\u56e2\u961f\u3002

            License \u58f0\u660e

            \u6280\u672f\u56e2\u961f

          "},{"location":"admin/ghippo/platform-setting/appearance.html","title":"\u5916\u89c2\u5b9a\u5236","text":"

          \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\uff0c\u53ef\u901a\u8fc7 \u5916\u89c2\u5b9a\u5236 \u66f4\u6362\u767b\u5f55\u754c\u9762\u3001\u9876\u90e8\u5bfc\u822a\u680f\u4ee5\u53ca\u5e95\u90e8\u7248\u6743\u548c\u5907\u6848\u4fe1\u606f\uff0c\u5e2e\u52a9\u7528\u6237\u66f4\u597d\u5730\u8fa8\u8bc6\u4ea7\u54c1\u3002

          "},{"location":"admin/ghippo/platform-setting/appearance.html#_2","title":"\u5b9a\u5236\u8bf4\u660e","text":"
          1. \u4f7f\u7528\u5177\u6709 admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u5e73\u53f0\u8bbe\u7f6e \u3002

          2. \u9009\u62e9 \u5916\u89c2\u5b9a\u5236 \uff0c\u5728 \u767b\u5f55\u9875\u5b9a\u5236 \u9875\u7b7e\u4e2d\uff0c\u4fee\u6539\u767b\u5f55\u9875\u7684\u56fe\u6807\u548c\u6587\u5b57\u540e\uff0c\u70b9\u51fb \u4fdd\u5b58 \u3002

          3. \u9000\u51fa\u767b\u5f55\uff0c\u5728\u767b\u5f55\u9875\u5237\u65b0\u540e\u53ef\u770b\u5230\u914d\u7f6e\u540e\u7684\u6548\u679c

          4. \u70b9\u51fb \u9876\u90e8\u5bfc\u822a\u680f\u5b9a\u5236 \u9875\u7b7e\uff0c\u4fee\u6539\u5bfc\u822a\u680f\u7684\u56fe\u6807\u548c\u6587\u5b57\u540e\uff0c\u70b9\u51fb \u4fdd\u5b58 \u3002

          5. \u70b9\u51fb \u9ad8\u7ea7\u5b9a\u5236 \uff0c\u53ef\u4ee5\u7528 CSS \u6837\u5f0f\u8bbe\u7f6e\u767b\u5f55\u9875\u3001\u5bfc\u822a\u680f\u3001\u5e95\u90e8\u7248\u6743\u53ca\u5907\u6848\u4fe1\u606f\u3002

          "},{"location":"admin/ghippo/platform-setting/appearance.html#_3","title":"\u9ad8\u7ea7\u5b9a\u5236","text":"

          \u9ad8\u7ea7\u5b9a\u5236\u80fd\u591f\u901a\u8fc7 CSS \u6837\u5f0f\u6765\u4fee\u6539\u6574\u4e2a\u5bb9\u5668\u5e73\u53f0\u7684\u989c\u8272\u3001\u5b57\u4f53\u95f4\u9694\u3001\u5b57\u53f7\u7b49\u3002 \u60a8\u9700\u8981\u719f\u6089 CSS \u8bed\u6cd5\u3002\u5220\u9664\u9ed1\u8272\u8f93\u5165\u6846\u7684\u5185\u5bb9\uff0c\u53ef\u6062\u590d\u5230\u9ed8\u8ba4\u72b6\u6001\uff0c\u5f53\u7136\u4e5f\u53ef\u4ee5\u70b9\u51fb \u4e00\u952e\u8fd8\u539f \u6309\u94ae\u3002

          \u767b\u5f55\u9875\u5b9a\u5236\u7684 CSS \u6837\u4f8b\uff1a

          .test {\n  width: 12px;\n}\n\n#kc-login {\n /* color: red!important; */\n}\n

          \u767b\u5f55\u540e\u9875\u9762\u5b9a\u5236\u7684 CSS \u6837\u4f8b\uff1a

          .dao-icon.dao-iconfont.icon-service-global.dao-nav__head-icon {\n   color: red!important;\n}\n.ghippo-header-logo {\n  background-color: green!important;\n}\n.ghippo-header {\n  background-color: rgb(128, 115, 0)!important;\n}\n.ghippo-header-nav-main {\n  background-color: rgb(0, 19, 128)!important;\n}\n.ghippo-header-sub-nav-main .dao-popper-inner {\n  background-color: rgb(231, 82, 13) !important;\n}\n

          Footer\uff08\u9875\u9762\u5e95\u90e8\u7684\u7248\u6743\u3001\u5907\u6848\u7b49\u4fe1\u606f\uff09\u5b9a\u5236\u793a\u4f8b

          <div class=\"footer-content\">\n  <span class=\"footer-item\">Copyright \u00a9 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4fdd\u7559\u6240\u6709\u6743\u5229</span>\n  <a class=\"footer-item\" href=\"https://beian.miit.gov.cn/\" target=\"_blank\" rel=\"noopener noreferrer\">\u6caa ICP \u5907 xxxxxx \u53f7 - 1</a>\n  <a class=\"footer-item\" href=\"https://beian.miit.gov.cn/\" target=\"_blank\" rel=\"noopener noreferrer\">\u6caa ICP \u5907 xxxxxx \u53f7 - 2</a>\n</div>\n<div class=\"footer-content\">\n  <img class=\"gongan-icon\" src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABIAAAASCAYAAABWzo5XAAAACXBIWXMAAAsTAAALEwEAmpwYAAAKTWlDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVN3WJP3Fj7f92UPVkLY8LGXbIEAIiOsCMgQWaIQkgBhhBASQMWFiApWFBURnEhVxILVCkidiOKgKLhnQYqIWotVXDjuH9yntX167+3t+9f7vOec5/zOec8PgBESJpHmomoAOVKFPDrYH49PSMTJvYACFUjgBCAQ5svCZwXFAADwA3l4fnSwP/wBr28AAgBw1S4kEsfh/4O6UCZXACCRAOAiEucLAZBSAMguVMgUAMgYALBTs2QKAJQAAGx5fEIiAKoNAOz0ST4FANipk9wXANiiHKkIAI0BAJkoRyQCQLsAYFWBUiwCwMIAoKxAIi4EwK4BgFm2MkcCgL0FAHaOWJAPQGAAgJlCLMwAIDgCAEMeE80DIEwDoDDSv+CpX3CFuEgBAMDLlc2XS9IzFLiV0Bp38vDg4iHiwmyxQmEXKRBmCeQinJebIxNI5wNMzgwAABr50cH+OD+Q5+bk4eZm52zv9MWi/mvwbyI+IfHf/ryMAgQAEE7P79pf5eXWA3DHAbB1v2upWwDaVgBo3/ldM9sJoFoK0Hr5i3k4/EAenqFQyDwdHAoLC+0lYqG9MOOLPv8z4W/gi372/EAe/tt68ABxmkCZrcCjg/1xYW52rlKO58sEQjFu9+cj/seFf/2OKdHiNLFcLBWK8ViJuFAiTcd5uVKRRCHJleIS6X8y8R+W/QmTdw0ArIZPwE62B7XLbMB+7gECiw5Y0nYAQH7zLYwaC5EAEGc0Mnn3AACTv/mPQCsBAM2XpOMAALzoGFyolBdMxggAAESggSqwQQcMwRSswA6cwR28wBcCYQZEQAwkwDwQQgbkgBwKoRiWQRlUwDrYBLWwAxqgEZrhELTBMTgN5+ASXIHrcBcGYBiewhi8hgkEQcgIE2EhOogRYo7YIs4IF5mOBCJhSDSSgKQg6YgUUSLFyHKkAqlCapFdSCPyLXIUOY1cQPqQ28ggMor8irxHMZSBslED1AJ1QLmoHxqKxqBz0XQ0D12AlqJr0Rq0Hj2AtqKn0UvodXQAfYqOY4DRMQ5mjNlhXIyHRWCJWBomxxZj5Vg1Vo81Yx1YN3YVG8CeYe8IJAKLgBPsCF6EEMJsgpCQR1hMWEOoJewjtBK6CFcJg4Qxwicik6hPtCV6EvnEeGI6sZBYRqwm7iEeIZ4lXicOE1+TSCQOyZLkTgohJZAySQtJa0jbSC2kU6Q+0hBpnEwm65Btyd7kCLKArCCXkbeQD5BPkvvJw+S3FDrFiOJMCaIkUqSUEko1ZT/lBKWfMkKZoKpRzame1AiqiDqfWkltoHZQL1OHqRM0dZolzZsWQ8ukLaPV0JppZ2n3aC/pdLoJ3YMeRZfQl9Jr6Afp5+mD9HcMDYYNg8dIYigZaxl7GacYtxkvmUymBdOXmchUMNcyG5lnmA+Yb1VYKvYqfBWRyhKVOpVWlX6V56pUVXNVP9V5qgtUq1UPq15WfaZGVbNQ46kJ1Bar1akdVbupNq7OUndSj1DPUV+jvl/9gvpjDbKGhUaghkijVGO3xhmNIRbGMmXxWELWclYD6yxrmE1iW7L57Ex2Bfsbdi97TFNDc6pmrGaRZp3mcc0BDsax4PA52ZxKziHODc57LQMtPy2x1mqtZq1+rTfaetq+2mLtcu0W7eva73VwnUCdLJ31Om0693UJuja6UbqFutt1z+o+02PreekJ9cr1Dund0Uf1bfSj9Rfq79bv0R83MDQINpAZbDE4Y/DMkGPoa5hpuNHwhOGoEctoupHEaKPRSaMnuCbuh2fjNXgXPmasbxxirDTeZdxrPGFiaTLbpMSkxeS+Kc2Ua5pmutG003TMzMgs3KzYrMnsjjnVnGueYb7ZvNv8jYWlRZzFSos2i8eW2pZ8ywWWTZb3rJhWPlZ5VvVW16xJ1lzrLOtt1ldsUBtXmwybOpvLtqitm63Edptt3xTiFI8p0in1U27aMez87ArsmuwG7Tn2YfYl9m32zx3MHBId1jt0O3xydHXMdmxwvOuk4TTDqcSpw+lXZxtnoXOd8zUXpkuQyxKXdpcXU22niqdun3rLleUa7rrStdP1o5u7m9yt2W3U3cw9xX2r+00umxvJXcM970H08PdY4nHM452nm6fC85DnL152Xlle+70eT7OcJp7WMG3I28Rb4L3Le2A6Pj1l+s7pAz7GPgKfep+Hvqa+It89viN+1n6Zfgf8nvs7+sv9j/i/4XnyFvFOBWABwQHlAb2BGoGzA2sDHwSZBKUHNQWNBbsGLww+FUIMCQ1ZH3KTb8AX8hv5YzPcZyya0RXKCJ0VWhv6MMwmTB7WEY6GzwjfEH5vpvlM6cy2CIjgR2yIuB9pGZkX+X0UKSoyqi7qUbRTdHF09yzWrORZ+2e9jvGPqYy5O9tqtnJ2Z6xqbFJsY+ybuIC4qriBeIf4RfGXEnQTJAntieTE2MQ9ieNzAudsmjOc5JpUlnRjruXcorkX5unOy553PFk1WZB8OIWYEpeyP+WDIEJQLxhP5aduTR0T8oSbhU9FvqKNolGxt7hKPJLmnVaV9jjdO31D+miGT0Z1xjMJT1IreZEZkrkj801WRNberM/ZcdktOZSclJyjUg1plrQr1zC3KLdPZisrkw3keeZtyhuTh8r35CP5c/PbFWyFTNGjtFKuUA4WTC+oK3hbGFt4uEi9SFrUM99m/ur5IwuCFny9kLBQuLCz2Lh4WfHgIr9FuxYji1MXdy4xXVK6ZHhp8NJ9y2jLspb9UOJYUlXyannc8o5Sg9KlpUMrglc0lamUycturvRauWMVYZVkVe9ql9VbVn8qF5VfrHCsqK74sEa45uJXTl/VfPV5bdra3kq3yu3rSOuk626s91m/r0q9akHV0IbwDa0b8Y3lG19tSt50oXpq9Y7NtM3KzQM1YTXtW8y2rNvyoTaj9nqdf13LVv2tq7e+2Sba1r/dd3vzDoMdFTve75TsvLUreFdrvUV99W7S7oLdjxpiG7q/5n7duEd3T8Wej3ulewf2Re/ranRvbNyvv7+yCW1SNo0eSDpw5ZuAb9qb7Zp3tXBaKg7CQeXBJ9+mfHvjUOihzsPcw83fmX+39QjrSHkr0jq/dawto22gPaG97+iMo50dXh1Hvrf/fu8x42N1xzWPV56gnSg98fnkgpPjp2Snnp1OPz3Umdx590z8mWtdUV29Z0PPnj8XdO5Mt1/3yfPe549d8Lxw9CL3Ytslt0utPa49R35w/eFIr1tv62X3y+1XPK509E3rO9Hv03/6asDVc9f41y5dn3m978bsG7duJt0cuCW69fh29u0XdwruTNxdeo94r/y+2v3qB/oP6n+0/rFlwG3g+GDAYM/DWQ/vDgmHnv6U/9OH4dJHzEfVI0YjjY+dHx8bDRq98mTOk+GnsqcTz8p+Vv9563Or59/94vtLz1j82PAL+YvPv655qfNy76uprzrHI8cfvM55PfGm/K3O233vuO+638e9H5ko/ED+UPPR+mPHp9BP9z7nfP78L/eE8/sl0p8zAAAAIGNIUk0AAHolAACAgwAA+f8AAIDpAAB1MAAA6mAAADqYAAAXb5JfxUYAAAQjSURBVHjaVNNZbFRlGIDh95w525zpdGa6TVtbykBbyiICxQY0AhYTJUCiiYqGqEEiJhKQmBg0ESPeeCGRENEYb4jhBr0gNQrRlCBiSgyLaSlSaKEs3Wemy+xnzuqFYdD/6rt6ku/N9wue55EcPwWArCgIgkx5ZRuYVxsnJ801Z05f3jY1MRnb/HxHV+uSph9RKq4mhkdwbZVgdQ2SHkPTwgj/h1QUWWi8/tfg/hM/XN/Y2zfaZnkSnuRDtLMsXhBOvrJtya/LlrcdMs1Qb1lVRQmSAEDAsU1kxpgamXp3y+azu1esreK9dyRqs9PIjkW6OsLx7lTV1ld/237s8HRV57MbnvO8CA+e9GCQFTk6Mza+4/0P+t9a9VSEI3uyTH/eR27aB2Ed31Q/Hx1sI6BHOPT13c5Frd0HW9p3HPUQEwAigJW9RDp+bstrOy981nVGLN/7RpHUV70YfXnEAtjxFPasxPDBQXatjzNTdOQXtg983H/51AFFy1KCIg2bNIdC+8270NwmUmelsXqSqHkDK5PDl8iCW0QcnEW+lqCjvcjQuMZ4YnQRTkotQUZu4GkjcfZNv19G011kXw4vayNYNvqCCvSVTciOgABgeuhBGwhgz5zbkI2ff7HUqJiNR2QktbbSYnBYYqbMT/ilKI4SIbT/GcRylbnvLmJ2X8N7tJ7rR8OE/BbliqEYea81WIotmOs02WFpc55Lf0f5/mSI3dsamOgxSX7ZjaALuBmB6M6FnB+S+POCwmOLk1QFFAqZyQWl1YrpiRZJLvDkygyC5NJ1XCax7xYNiTQVEYVIuUulayIcGeLkpw6WK7GuPY/fb2CkhleXIFFe8XPGaKBj9QxLW1Ik0bg8EuT2zRCJYZvZIYepe0EGbvi4bQUJVZhs2phADFYj+df0lBqJUnaekS4SUHXe3jrOnoE2PhSewHfRpfZGgcryIvfHdQruQlLo7Ns6QizqkJ31CIUlqwQJXuWUpDXj6qOsW32HT3YNImll9FwJsb4jyaLmWQ4fa6a+2sQw0ry8YZSiHcPxxXBtMfCv4XkUCrfliWs/fTE31rtTVfv9vsIorvQIniMhqXM4popVcJFVMHMpfMEaLPdxR1Tnna1b1vl6tGntpAjgCTNWONZyIFBR8Ydtr6EgrCI3VySfzZPLBDHyIq5gkpmzcOUmTGMF+bh7M9LYulfWzMmHBzk7Fpq9deWEYxjrtaCMXjWfstp6BCGNXZzBdYqYhogWqkMum4+oBVD0YnP63u/fFqbv1D+M7VSlBbmmK5uYaLYLYwslfwFVAyXQiOfcx3XyyGIM8DDn0lgWyGokHogu/0UJxpL/+f2e569s/CZQZ53OpzJr0+NXludUfb5jVdf7VUGXJUPIZast1S9PeII6jFDT5xMjFwO1S4c8zwTgnwEAxufYSzA67PMAAAAASUVORK5CYII=\" >\n  <a class=\"footer-item\" href=\"http://www.beian.gov.cn/portal/registerSystemInfo\">\u6caa\u516c\u7f51\u5b89\u5907 12345678912345\u53f7</a>\n</div>\n<style>\n.footer-content {\n  display: flex;\n  flex-wrap: wrap;\n  align-items: center;\n  justify-content: center;\n}\n.footer-content + .footer-content {\n  margin-top: 8px;\n}\n.login-pf .footer-item {\n  color: white;\n}\n.footer-item {\n  color: var(--dao-gray-010);\n  text-decoration: none;\n}\n.footer-item + .footer-item {\n  margin-left: 8px;\n}\n.gongan-icon {\n  width: 18px;\n  height: 18px;\n  margin-right: 4px;\n}\n</style>\n

          Note

          \u5982\u679c\u60f3\u8981\u6062\u590d\u9ed8\u8ba4\u8bbe\u7f6e\uff0c\u53ef\u4ee5\u70b9\u51fb \u4e00\u952e\u8fd8\u539f \u3002\u8bf7\u6ce8\u610f\uff0c\u4e00\u952e\u8fd8\u539f\u540e\u5c06\u4e22\u5f03\u6240\u6709\u81ea\u5b9a\u4e49\u8bbe\u7f6e\u3002

          "},{"location":"admin/ghippo/platform-setting/mail-server.html","title":"\u90ae\u4ef6\u670d\u52a1\u5668","text":"

          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f1a\u5728\u7528\u6237\u5fd8\u8bb0\u5bc6\u7801\u65f6\uff0c\u5411\u7528\u6237\u53d1\u9001\u7535\u5b50\u90ae\u4ef6\u4ee5\u9a8c\u8bc1\u7535\u5b50\u90ae\u4ef6\u5730\u5740\uff0c\u786e\u4fdd\u7528\u6237\u662f\u672c\u4eba\u64cd\u4f5c\u3002 \u8981\u4f7f\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u80fd\u591f\u53d1\u9001\u7535\u5b50\u90ae\u4ef6\uff0c\u9700\u8981\u5148\u63d0\u4f9b\u60a8\u7684\u90ae\u4ef6\u670d\u52a1\u5668\u5730\u5740\u3002

          \u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\uff1a

          1. \u4f7f\u7528\u5177\u6709 admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 \u3002

          2. \u70b9\u51fb \u5e73\u53f0\u8bbe\u7f6e \uff0c\u9009\u62e9 \u90ae\u4ef6\u670d\u52a1\u5668\u8bbe\u7f6e \u3002

            \u586b\u5199\u4ee5\u4e0b\u5b57\u6bb5\u914d\u7f6e\u90ae\u4ef6\u670d\u52a1\u5668\uff1a

            \u5b57\u6bb5 \u63cf\u8ff0 \u4e3e\u4f8b\u503c SMTP \u670d\u52a1\u5668\u5730\u5740 \u80fd\u591f\u63d0\u4f9b\u90ae\u4ef6\u670d\u52a1\u7684 SMTP \u670d\u52a1\u5668\u5730\u5740 smtp.163.com SMTP \u670d\u52a1\u5668\u7aef\u53e3 \u53d1\u9001\u90ae\u4ef6\u7684\u7aef\u53e3 25 \u7528\u6237\u540d SMTP \u7528\u6237\u7684\u540d\u79f0 test@163.com \u5bc6\u7801 SMTP \u8d26\u53f7\u7684\u5bc6\u7801 123456 \u53d1\u4ef6\u4eba\u90ae\u7bb1 \u53d1\u4ef6\u4eba\u7684\u90ae\u7bb1\u5730\u5740 test@163.com \u4f7f\u7528 SSL \u5b89\u5168\u8fde\u63a5 SSL \u53ef\u4ee5\u7528\u4e8e\u52a0\u5bc6\u90ae\u4ef6\uff0c\u4ece\u800c\u63d0\u9ad8\u901a\u8fc7\u90ae\u4ef6\u4f20\u8f93\u7684\u4fe1\u606f\u7684\u5b89\u5168\u6027\uff0c\u901a\u5e38\u9700\u4e3a\u90ae\u4ef6\u670d\u52a1\u5668\u914d\u7f6e\u8bc1\u4e66 \u4e0d\u5f00\u542f
          3. \u914d\u7f6e\u5b8c\u6210\u540e\u70b9\u51fb \u4fdd\u5b58 \uff0c\u70b9\u51fb \u6d4b\u8bd5\u90ae\u4ef6\u670d\u52a1\u5668 \u3002

          4. \u5c4f\u5e55\u53f3\u4e0a\u89d2\u51fa\u73b0\u6210\u529f\u53d1\u9001\u90ae\u4ef6\u7684\u63d0\u793a\uff0c\u5219\u8868\u793a\u90ae\u4ef6\u670d\u52a1\u5668\u88ab\u6210\u529f\u8bbe\u7f6e\u3002

          "},{"location":"admin/ghippo/platform-setting/mail-server.html#_2","title":"\u5e38\u89c1\u95ee\u9898","text":"

          \u95ee\uff1a\u90ae\u4ef6\u670d\u52a1\u5668\u8bbe\u7f6e\u540e\u7528\u6237\u4ecd\u65e0\u6cd5\u627e\u56de\u5bc6\u7801\u662f\u4ec0\u4e48\u539f\u56e0\uff1f

          \u7b54\uff1a\u7528\u6237\u53ef\u80fd\u672a\u8bbe\u7f6e\u90ae\u7bb1\u6216\u8005\u8bbe\u7f6e\u4e86\u9519\u8bef\u7684\u90ae\u7bb1\u5730\u5740\uff1b\u6b64\u65f6\u53ef\u4ee5\u8ba9 admin \u89d2\u8272\u7684\u7528\u6237\u5728 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \u4e2d\u901a\u8fc7\u7528\u6237\u540d\u627e\u5230\u8be5\u7528\u6237\uff0c\u5e76\u5728\u7528\u6237\u8be6\u60c5\u4e2d\u4e3a\u8be5\u7528\u6237\u8bbe\u7f6e\u65b0\u7684\u767b\u5f55\u5bc6\u7801\u3002

          \u5982\u679c\u90ae\u4ef6\u670d\u52a1\u5668\u6ca1\u6709\u8fde\u901a\uff0c\u8bf7\u68c0\u67e5\u90ae\u4ef6\u670d\u52a1\u5668\u5730\u5740\u3001\u7528\u6237\u540d\u53ca\u5bc6\u7801\u662f\u5426\u6b63\u786e\u3002

          "},{"location":"admin/ghippo/platform-setting/security.html","title":"\u5b89\u5168\u7b56\u7565","text":"

          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5728\u56fe\u5f62\u754c\u9762\u4e0a\u63d0\u4f9b\u4e86\u57fa\u4e8e\u5bc6\u7801\u548c\u8bbf\u95ee\u63a7\u5236\u7684\u5b89\u5168\u7b56\u7565\u3002

          \u5bc6\u7801\u7b56\u7565

          • \u65b0\u5bc6\u7801\u4e0d\u80fd\u4e0e\u6700\u8fd1\u7684\u5386\u53f2\u5bc6\u7801\u76f8\u540c\u3002
          • \u5bc6\u7801\u8fc7\u671f\u540e\uff0c\u7cfb\u7edf\u5f3a\u5236\u8981\u6c42\u4fee\u6539\u5bc6\u7801\u3002
          • \u5bc6\u7801\u4e0d\u80fd\u4e0e\u7528\u6237\u540d\u76f8\u540c\u3002
          • \u5bc6\u7801\u4e0d\u80fd\u548c\u7528\u6237\u7684\u90ae\u7bb1\u5730\u5740\u76f8\u540c\u3002
          • \u81ea\u5b9a\u4e49\u5bc6\u7801\u89c4\u5219\u3002
          • \u81ea\u5b9a\u4e49\u5bc6\u7801\u6700\u5c0f\u957f\u5ea6\u3002

          \u8bbf\u95ee\u63a7\u5236\u7b56\u7565

          • \u4f1a\u8bdd\u8d85\u65f6\u7b56\u7565\uff1a\u7528\u6237\u5728 x \u5c0f\u65f6\u5185\u6ca1\u6709\u64cd\u4f5c\uff0c\u9000\u51fa\u5f53\u524d\u8d26\u53f7\u3002
          • \u8d26\u53f7\u9501\u5b9a\u7b56\u7565\uff1a\u9650\u5236\u65f6\u95f4\u5185\u591a\u6b21\u767b\u5f55\u5931\u8d25\uff0c\u8d26\u53f7\u5c06\u88ab\u9501\u5b9a\u3002
          • \u767b\u5f55/\u9000\u51fa\u7b56\u7565\uff1a\u5173\u95ed\u6d4f\u89c8\u5668\u7684\u540c\u65f6\u9000\u51fa\u767b\u5f55\u3002

          \u8fdb\u5165\u5168\u5c40\u7ba1\u7406\u540e\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5e73\u53f0\u8bbe\u7f6e -> \u5b89\u5168\u7b56\u7565 \uff0c\u5373\u53ef\u8bbe\u7f6e\u5bc6\u7801\u7b56\u7565\u548c\u8bbf\u95ee\u63a7\u5236\u7b56\u7565\u3002

          "},{"location":"admin/ghippo/report-billing/index.html","title":"\u8fd0\u8425\u7ba1\u7406","text":"

          \u8fd0\u8425\u7ba1\u7406\u901a\u8fc7\u53ef\u89c6\u5316\u7684\u65b9\u5f0f\uff0c\u4e3a\u60a8\u5c55\u793a\u5e73\u53f0\u4e0a\u7edf\u8ba1\u65f6\u95f4\u8303\u56f4\u5185\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u547d\u540d\u7a7a\u95f4\u3001\u5bb9\u5668\u7ec4\u3001\u5de5\u4f5c\u7a7a\u95f4\u7b49\u7ef4\u5ea6\u7684 CPU/\u5185\u5b58/\u5b58\u50a8/GPU \u7684\u4f7f\u7528\u603b\u91cf\u548c\u4f7f\u7528\u7387\u7b49\u4fe1\u606f\u3002 \u4ee5\u53ca\u901a\u8fc7\u4f7f\u7528\u91cf\u3001\u4f7f\u7528\u65f6\u95f4\u53ca\u5355\u4ef7\u7b49\u4fe1\u606f\uff0c\u81ea\u52a8\u8ba1\u7b97\u51fa\u7684\u5e73\u53f0\u6d88\u8d39\u4fe1\u606f\u3002\u8be5\u6a21\u5757\u9ed8\u8ba4\u5f00\u542f\u6240\u6709\u62a5\u8868\u7edf\u8ba1\uff0c\u540c\u65f6\u4e5f\u652f\u6301\u5e73\u53f0\u7ba1\u7406\u5458\u5bf9\u5355\u4e2a\u62a5\u8868\u8fdb\u884c\u624b\u52a8\u5f00\u542f\u6216\u5173\u95ed\uff0c \u5f00\u542f/\u5173\u95ed\u540e\u5c06\u5728\u6700\u957f 20 \u5206\u949f\u5185\uff0c\u5e73\u53f0\u5f00\u59cb/\u505c\u6b62\u91c7\u96c6\u62a5\u8868\u6570\u636e\uff0c\u5f80\u671f\u5df2\u91c7\u96c6\u5230\u7684\u6570\u636e\u8fd8\u5c06\u6b63\u5e38\u5c55\u793a\u3002 \u8fd0\u8425\u7ba1\u7406\u6570\u636e\u6700\u591a\u53ef\u5728\u5e73\u53f0\u4e0a\u4fdd\u7559 365 \u5929\uff0c\u8d85\u8fc7\u4fdd\u7559\u65f6\u95f4\u7684\u7edf\u8ba1\u6570\u636e\u5c06\u88ab\u81ea\u52a8\u5220\u9664\u3002\u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7 CSV \u6216 Excel \u65b9\u5f0f\u4e0b\u8f7d\u62a5\u8868\u540e\u8fdb\u884c\u8fdb\u4e00\u6b65\u7684\u7edf\u8ba1\u548c\u5206\u6790\u3002

          \u62a5\u8868\u7ba1\u7406\u901a\u8fc7 CPU \u5229\u7528\u7387\u3001\u5185\u5b58\u5229\u7528\u7387\u3001\u5b58\u50a8\u5229\u7528\u7387\u3001GPU \u7b97\u529b\u5229\u7528\u7387\u3001GPU \u663e\u5b58\u5229\u7528\u7387 5 \u4e2a\u7ef4\u5ea6\uff0c\u5bf9\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5bb9\u5668\u7ec4\u3001\u5de5\u4f5c\u7a7a\u95f4\u3001\u547d\u540d\u7a7a\u95f4 5 \u79cd\u8d44\u6e90\u8fdb\u884c\u6570\u636e\u7edf\u8ba1\u3002\u540c\u65f6\u8054\u52a8\u5ba1\u8ba1\u548c\u544a\u8b66\u6a21\u5757\uff0c\u652f\u6301\u5bf9\u5ba1\u8ba1\u6570\u636e\u548c\u544a\u8b66\u6570\u636e\u8fdb\u884c\u7edf\u8ba1\u7ba1\u7406\u3002\u5171\u8ba1\u652f\u6301 7 \u79cd\u7c7b\u578b\u62a5\u8868\u3002

          \u8ba1\u91cf\u8ba1\u8d39\u9488\u5bf9\u5e73\u53f0\u4e0a\u7684\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5bb9\u5668\u7ec4\u3001\u547d\u540d\u7a7a\u95f4\u548c\u5de5\u4f5c\u7a7a\u95f4 5 \u79cd\u8d44\u6e90\u8fdb\u884c\u8ba1\u8d39\u7edf\u8ba1\u3002 \u6839\u636e\u4e0d\u540c\u8d44\u6e90\u4e2d CPU\u3001\u5185\u5b58\u3001\u5b58\u50a8\u548c GPU \u7684\u4f7f\u7528\u91cf\uff0c\u4ee5\u53ca\u7528\u6237\u624b\u52a8\u914d\u7f6e\u7684\u4ef7\u683c\u548c\u8d27\u5e01\u5355\u4f4d\u81ea\u52a8\u8ba1\u7b97\u51fa\u6bcf\u79cd\u8d44\u6e90\u5728\u7edf\u8ba1\u65f6\u95f4\u7684\u6d88\u8d39\u60c5\u51b5\uff0c \u6839\u636e\u6240\u9009\u65f6\u95f4\u8de8\u5ea6\u4e0d\u540c\uff0c\u53ef\u5feb\u901f\u8ba1\u7b97\u51fa\u8be5\u8de8\u5ea6\u5185\u7684\u5b9e\u9645\u6d88\u8d39\u60c5\u51b5\uff0c\u5982\u6708\u5ea6\u3001\u5b63\u5ea6\u3001\u5e74\u5ea6\u7b49\u3002

          "},{"location":"admin/ghippo/report-billing/billing.html","title":"\u8ba1\u91cf\u8ba1\u8d39","text":"

          \u8ba1\u91cf\u8ba1\u8d39\u5728\u62a5\u8868\u7684\u57fa\u7840\u4e0a\uff0c\u5bf9\u8d44\u6e90\u7684\u4f7f\u7528\u6570\u636e\u505a\u4e86\u8fdb\u4e00\u6b65\u7684\u8ba1\u8d39\u5904\u7406\u3002\u652f\u6301\u7528\u6237\u624b\u52a8\u8bbe\u7f6e CPU\u3001\u5185\u5b58\u3001\u5b58\u50a8\u3001GPU \u7684\u5355\u4ef7\u4ee5\u53ca\u8d27\u5e01\u5355\u4f4d\u7b49\uff0c\u8bbe\u7f6e\u540e\u7cfb\u7edf\u5c06\u81ea\u52a8\u7edf\u8ba1\u51fa\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5bb9\u5668\u7ec4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u5de5\u4f5c\u7a7a\u95f4\u5728\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u82b1\u8d39\u60c5\u51b5\uff0c\u65f6\u95f4\u6bb5\u7528\u6237\u53ef\u81ea\u7531\u8c03\u6574\uff0c\u53ef\u6309\u7167\u5468\u3001\u6708\u3001\u5b63\u5ea6\u3001\u5e74\u7b5b\u9009\u8c03\u6574\u540e\u5bfc\u51fa Excel \u6216 Csv \u683c\u5f0f\u7684\u8ba1\u8d39\u62a5\u8868\u3002

          "},{"location":"admin/ghippo/report-billing/billing.html#_2","title":"\u8ba1\u8d39\u89c4\u5219\u53ca\u751f\u6548\u65f6\u95f4","text":"
          • \u8ba1\u8d39\u89c4\u5219\uff1a\u9ed8\u8ba4\u6309\u7167\u8bf7\u6c42\u503c\u548c\u4f7f\u7528\u91cf\u7684\u6700\u5927\u503c\u8ba1\u8d39\u3002
          • \u751f\u6548\u65f6\u95f4\uff1a\u6b21\u65e5\u751f\u6548\uff0c\u4ee5\u6b21\u65e5\u51cc\u6668\u65f6\u83b7\u53d6\u7684\u5355\u4ef7\u548c\u6570\u91cf\u8ba1\u7b97\u5f53\u5929\u4ea7\u751f\u7684\u8d39\u7528\u3002
          "},{"location":"admin/ghippo/report-billing/billing.html#_3","title":"\u529f\u80fd\u7279\u6027","text":"
          • \u652f\u6301\u81ea\u5b9a\u4e49\u8bbe\u7f6e CPU \u3001\u5185\u5b58\u3001\u5b58\u50a8\u4ee5\u53ca GPU \u7684\u8ba1\u8d39\u5355\u4f4d\uff0c\u4ee5\u53ca\u8d27\u5e01\u5355\u4f4d\u3002
          • \u652f\u6301\u67e5\u8be2\u81ea\u5b9a\u4e49\u65f6\u95f4\u8303\u56f4\u7684\u7edf\u8ba1\u6570\u636e\uff0c\u6839\u636e\u6240\u9009\u65f6\u95f4\u6bb5\u81ea\u52a8\u8ba1\u7b97\u51fa\u8be5\u65f6\u95f4\u6bb5\u5185\u7684\u8ba1\u8d39\u60c5\u51b5\u3002
          • \u652f\u6301\u4ee5 CSV \u548c Excel \u4e24\u79cd\u683c\u5f0f\u5bfc\u51fa\u8ba1\u8d39\u62a5\u8868\u3002
          • \u652f\u6301\u5f00\u542f/\u5173\u95ed\u5355\u4e2a\u8ba1\u8d39\u62a5\u8868\uff0c\u5f00\u542f/\u5173\u95ed\u540e\uff0c\u5e73\u53f0\u5c06\u5728 20 \u5206\u949f\u5185\u5f00\u59cb/\u505c\u6b62\u91c7\u96c6\u6570\u636e\uff0c\u5f80\u671f\u5df2\u7ecf\u91c7\u96c6\u5230\u7684\u6570\u636e\u8fd8\u5c06\u6b63\u5e38\u663e\u793a\u3002
          • \u652f\u6301\u5bf9 CPU\u3001\u5185\u5b58\u603b\u91cf\u3001\u5b58\u50a8\u3001GPU\u3001\u603b\u8ba1\u7b49\u8ba1\u8d39\u6570\u636e\u7684\u9009\u62e9\u6027\u5c55\u793a\u3002
          "},{"location":"admin/ghippo/report-billing/billing.html#_4","title":"\u62a5\u8868\u7ef4\u5ea6","text":"

          \u76ee\u524d\u652f\u6301\u4ee5\u4e0b\u51e0\u79cd\u62a5\u8868\uff1a

          • \u96c6\u7fa4\u8ba1\u8d39\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u5168\u90e8\u96c6\u7fa4\u7684 CPU\u3001\u5185\u5b58\u603b\u91cf\u3001\u5b58\u50a8\u3001GPU\u3001\u603b\u8ba1\u7b49\u8ba1\u8d39\u60c5\u51b5\uff0c\u4ee5\u53ca\u8be5\u6bb5\u65f6\u95f4\u5185\u8be5\u96c6\u7fa4\u4e0b\u7684\u8282\u70b9\u6570\u91cf\uff0c\u53ef\u901a\u8fc7\u70b9\u51fb\u8282\u70b9\u6570\u91cf\u5feb\u6377\u8fdb\u5165\u8282\u70b9\u8ba1\u8d39\u62a5\u8868\uff0c\u5e76\u67e5\u770b\u8be5\u6bb5\u65f6\u95f4\u5185\u8be5\u96c6\u7fa4\u4e0b\u7684\u8282\u70b9\u8ba1\u8d39\u60c5\u51b5\u3002
          • \u8282\u70b9\u8ba1\u8d39\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u5168\u90e8\u8282\u70b9\u7684 CPU\u3001\u5185\u5b58\u603b\u91cf\u3001\u5b58\u50a8\u3001GPU\u3001\u603b\u8ba1\u7b49\u8ba1\u8d39\u60c5\u51b5\uff0c\u4ee5\u53ca\u8282\u70b9\u7684 IP\u3001\u7c7b\u578b\u548c\u6240\u5c5e\u96c6\u7fa4\u3002
          • \u5bb9\u5668\u7ec4\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u5168\u90e8\u5bb9\u5668\u7ec4\u7684 CPU\u3001\u5185\u5b58\u603b\u91cf\u3001\u5b58\u50a8\u3001GPU\u3001\u603b\u8ba1\u7b49\u8ba1\u8d39\u60c5\u51b5\uff0c\u4ee5\u53ca\u5bb9\u5668\u7ec4\u7684\u6240\u5c5e\u547d\u540d\u7a7a\u95f4\u3001\u6240\u5c5e\u96c6\u7fa4\u548c\u6240\u5c5e\u5de5\u4f5c\u7a7a\u95f4\u3002
          • \u5de5\u4f5c\u7a7a\u95f4\u8ba1\u8d39\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u5168\u90e8\u5de5\u4f5c\u7a7a\u95f4\u7684 CPU\u3001\u5185\u5b58\u603b\u91cf\u3001\u5b58\u50a8\u3001GPU\u3001\u603b\u8ba1\u7b49\u8ba1\u8d39\u60c5\u51b5\uff0c\u4ee5\u53ca\u547d\u540d\u7a7a\u95f4\u6570\u91cf\u548c\u5bb9\u5668\u7ec4\u6570\u91cf\uff0c\u53ef\u901a\u8fc7\u70b9\u51fb\u547d\u540d\u7a7a\u95f4\u6570\u91cf\u5feb\u6377\u8fdb\u5165\u547d\u540d\u7a7a\u95f4\u8ba1\u8d39\u62a5\u8868\uff0c\u5e76\u67e5\u770b\u8be5\u6bb5\u65f6\u95f4\u5185\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u547d\u540d\u7a7a\u95f4\u7684\u8ba1\u8d39\u60c5\u51b5\uff1b\u540c\u6837\u7684\u65b9\u5f0f\u53ef\u67e5\u770b\u8be5\u6bb5\u65f6\u95f4\u5185\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u7684\u5bb9\u5668\u7ec4\u7684\u8ba1\u8d39\u60c5\u51b5\u3002
          • \u547d\u540d\u7a7a\u95f4\u8ba1\u8d39\u62a5\u8868\uff1a\u67d0\u6bb5\u65f6\u95f4\u5185\u5168\u90e8\u547d\u540d\u7a7a\u95f4\u7684 CPU\u3001\u5185\u5b58\u603b\u91cf\u3001\u5b58\u50a8\u3001GPU\u3001\u603b\u8ba1\u7b49\u8ba1\u8d39\u60c5\u51b5\uff0c\u4ee5\u53ca\u5bb9\u5668\u7ec4\u6570\u91cf\u3001\u6240\u5c5e\u96c6\u7fa4\u3001\u6240\u5c5e\u5de5\u4f5c\u7a7a\u95f4\uff0c\u53ef\u901a\u8fc7\u70b9\u51fb\u5bb9\u5668\u7ec4\u6570\u91cf\u5feb\u6377\u8fdb\u5165\u5bb9\u5668\u7ec4\u8ba1\u8d39\u62a5\u8868\uff0c\u5e76\u67e5\u770b\u8be5\u6bb5\u65f6\u95f4\u5185\u8be5\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u5bb9\u5668\u7ec4\u7684\u8ba1\u8d39\u60c5\u51b5\u3002
          "},{"location":"admin/ghippo/report-billing/billing.html#_5","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u4f7f\u7528\u5177\u6709 admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u8fd0\u8425\u7ba1\u7406 \u3002

          2. \u8fdb\u5165 \u8fd0\u8425\u7ba1\u7406 \u540e\u5207\u6362\u4e0d\u540c\u83dc\u5355\u53ef\u67e5\u770b\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5bb9\u5668\u7ec4\u7b49\u8ba1\u8d39\u62a5\u8868\u3002

          "},{"location":"admin/ghippo/report-billing/report.html","title":"\u62a5\u8868\u7ba1\u7406","text":"

          \u62a5\u8868\u7ba1\u7406\u4ee5\u53ef\u89c6\u5316\u7684\u65b9\u5f0f\uff0c\u5c55\u793a\u4e86\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5bb9\u5668\u7ec4\uff08Pod\uff09\u3001\u5de5\u4f5c\u7a7a\u95f4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u5ba1\u8ba1\u53ca\u544a\u8b66\u7ef4\u5ea6\u7684\u7edf\u8ba1\u6570\u636e\uff0c\u4e3a\u5e73\u53f0\u7684\u8ba1\u8d39\u53ca\u4f7f\u7528\u60c5\u51b5\u7684\u8c03\u4f18\u63d0\u4f9b\u4e86\u53ef\u9760\u7684\u57fa\u7840\u6570\u636e\u3002

          "},{"location":"admin/ghippo/report-billing/report.html#_2","title":"\u529f\u80fd\u7279\u6027","text":"
          • \u652f\u6301\u67e5\u8be2\u81ea\u5b9a\u4e49\u65f6\u95f4\u8303\u56f4\u7684\u7edf\u8ba1\u6570\u636e
          • \u652f\u6301\u4ee5 CSV \u548c Excel \u4e24\u79cd\u683c\u5f0f\u5bfc\u51fa\u62a5\u8868
          • \u652f\u6301\u5f00\u542f/\u5173\u95ed\u5355\u4e2a\u62a5\u8868\uff0c\u5f00\u542f/\u5173\u95ed\u540e\uff0c\u5e73\u53f0\u5c06\u5728 20 \u5206\u949f\u5185\u5f00\u59cb/\u505c\u6b62\u91c7\u96c6\u6570\u636e\uff0c\u5f80\u671f\u5df2\u7ecf\u91c7\u96c6\u5230\u7684\u6570\u636e\u8fd8\u5c06\u6b63\u5e38\u663e\u793a
          • \u652f\u6301\u5c55\u793a CPU \u4f7f\u7528\u7387\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u5b58\u50a8\u4f7f\u7528\u7387\u548c GPU \u663e\u5b58\u4f7f\u7528\u7387\u7684\u6700\u5927\u3001\u6700\u5c0f\u548c\u5e73\u5747\u503c
          "},{"location":"admin/ghippo/report-billing/report.html#_3","title":"\u62a5\u8868\u7ef4\u5ea6","text":"

          \u76ee\u524d\u652f\u6301\u4ee5\u4e0b\u51e0\u79cd\u62a5\u8868\uff1a

          • \u96c6\u7fa4\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u6240\u6709\u96c6\u7fa4\u7684 CPU \u4f7f\u7528\u7387\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u5b58\u50a8\u4f7f\u7528\u7387\u548c GPU \u663e\u5b58\u4f7f\u7528\u7387\u7684\u6700\u5927\u3001\u6700\u5c0f\u548c\u5e73\u5747\u503c\uff0c\u4ee5\u53ca\u8be5\u6bb5\u65f6\u95f4\u5185\u96c6\u7fa4\u4e0b\u7684\u8282\u70b9\u6570\u91cf\uff0c \u53ef\u901a\u8fc7\u70b9\u51fb\u8282\u70b9\u6570\u91cf\u5feb\u6377\u8fdb\u5165\u8282\u70b9\u62a5\u8868\uff0c\u5e76\u67e5\u770b\u8be5\u6bb5\u65f6\u95f4\u5185\u8be5\u96c6\u7fa4\u4e0b\u7684\u8282\u70b9\u4f7f\u7528\u60c5\u51b5\u3002
          • \u8282\u70b9\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u6240\u6709\u8282\u70b9\u7684 CPU \u4f7f\u7528\u7387\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u5b58\u50a8\u4f7f\u7528\u7387\u548c GPU \u663e\u5b58\u4f7f\u7528\u7387\u7684\u6700\u5927\u3001\u6700\u5c0f\u548c\u5e73\u5747\u503c\uff0c\u4ee5\u53ca\u8282\u70b9\u7684 IP\u3001\u7c7b\u578b\u548c\u6240\u5c5e\u96c6\u7fa4\u3002
          • \u5bb9\u5668\u7ec4\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u6240\u6709\u5bb9\u5668\u7ec4\u7684 CPU \u4f7f\u7528\u7387\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u5b58\u50a8\u4f7f\u7528\u7387\u548c GPU \u663e\u5b58\u4f7f\u7528\u7387\u7684\u6700\u5927\u3001\u6700\u5c0f\u548c\u5e73\u5747\u503c\uff0c\u4ee5\u53ca\u5bb9\u5668\u7ec4\u7684\u6240\u5c5e\u547d\u540d\u7a7a\u95f4\u3001\u6240\u5c5e\u96c6\u7fa4\u548c\u6240\u5c5e\u5de5\u4f5c\u7a7a\u95f4\u3002
          • \u5de5\u4f5c\u7a7a\u95f4\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u6240\u6709\u5de5\u4f5c\u7a7a\u95f4\u7684 CPU \u4f7f\u7528\u7387\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u5b58\u50a8\u4f7f\u7528\u7387\u548c GPU \u663e\u5b58\u4f7f\u7528\u7387\u7684\u6700\u5927\u3001\u6700\u5c0f\u548c\u5e73\u5747\u503c\uff0c\u4ee5\u53ca\u547d\u540d\u7a7a\u95f4\u6570\u91cf\u548c\u5bb9\u5668\u7ec4\u6570\u91cf\uff0c \u53ef\u901a\u8fc7\u70b9\u51fb\u547d\u540d\u7a7a\u95f4\u6570\u91cf\u5feb\u6377\u8fdb\u5165\u547d\u540d\u7a7a\u95f4\u62a5\u8868\uff0c\u5e76\u67e5\u770b\u8be5\u6bb5\u65f6\u95f4\u5185\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u547d\u540d\u7a7a\u95f4\u7684\u4f7f\u7528\u60c5\u51b5\uff1b\u540c\u6837\u7684\u65b9\u5f0f\u53ef\u67e5\u770b\u8be5\u6bb5\u65f6\u95f4\u4e0b\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u7684\u5bb9\u5668\u7ec4\u7684\u4f7f\u7528\u60c5\u51b5\u3002
          • \u547d\u540d\u7a7a\u95f4\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u6240\u6709\u547d\u540d\u7a7a\u95f4\u7684 CPU \u4f7f\u7528\u7387\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u5b58\u50a8\u4f7f\u7528\u7387\u548c GPU \u663e\u5b58\u4f7f\u7528\u7387\u7684\u6700\u5927\u3001\u6700\u5c0f\u548c\u5e73\u5747\u503c\uff0c\u4ee5\u53ca\u5bb9\u5668\u7ec4\u6570\u91cf\u3001\u6240\u5c5e\u96c6\u7fa4\u3001\u6240\u5c5e\u5de5\u4f5c\u7a7a\u95f4\uff0c \u53ef\u901a\u8fc7\u70b9\u51fb\u5bb9\u5668\u7ec4\u6570\u91cf\u5feb\u6377\u8fdb\u5165\u5bb9\u5668\u7ec4\u62a5\u8868\uff0c\u5e76\u67e5\u770b\u8be5\u6bb5\u65f6\u95f4\u5185\u8be5\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u5bb9\u5668\u7ec4\u7684\u4f7f\u7528\u60c5\u51b5\u3002
          • \u5ba1\u8ba1\u62a5\u8868\uff1a\u5206\u4e3a\u7528\u6237\u64cd\u4f5c\u548c\u8d44\u6e90\u64cd\u4f5c\u4e24\u4e2a\u62a5\u8868\u3002\u7528\u6237\u64cd\u4f5c\u62a5\u8868\u4e3b\u8981\u7edf\u8ba1\u5355\u4e2a\u7528\u6237\u5728\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u64cd\u4f5c\u6b21\u6570\uff0c\u4ee5\u53ca\u6210\u529f\u548c\u5931\u8d25\u7684\u6b21\u6570\uff1b \u8d44\u6e90\u64cd\u4f5c\u62a5\u8868\u4e3b\u8981\u7edf\u8ba1\u6240\u6709\u7528\u6237\u5bf9\u67d0\u79cd\u7c7b\u578b\u8d44\u6e90\u7684\u64cd\u4f5c\u6b21\u6570\u3002
          • \u544a\u8b66\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u6240\u6709\u8282\u70b9\u7684\u544a\u8b66\u6570\u91cf\uff0c\u4ee5\u53ca\u81f4\u547d\u3001\u4e25\u91cd\u3001\u544a\u8b66\u5206\u522b\u4ea7\u751f\u7684\u6b21\u6570\u3002
          "},{"location":"admin/ghippo/report-billing/report.html#_4","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u4f7f\u7528\u5177\u6709 Admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u8fd0\u8425\u7ba1\u7406 \u3002

          2. \u8fdb\u5165\u8fd0\u8425\u7ba1\u7406\u540e\u5207\u6362\u4e0d\u540c\u83dc\u5355\u53ef\u67e5\u770b\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5bb9\u5668\u7ec4\u7b49\u62a5\u8868\u3002

          "},{"location":"admin/ghippo/troubleshooting/ghippo01.html","title":"\u91cd\u542f\u96c6\u7fa4\uff08\u4e91\u4e3b\u673a\uff09istio-ingressgateway \u65e0\u6cd5\u542f\u52a8\uff1f","text":"

          \u62a5\u9519\u63d0\u793a\u5982\u4e0b\u56fe\uff1a

          \u53ef\u80fd\u539f\u56e0\uff1aRequestAuthentication CR \u7684 jwtsUri \u5730\u5740\u65e0\u6cd5\u8bbf\u95ee\uff0c \u5bfc\u81f4 istiod \u65e0\u6cd5\u4e0b\u53d1\u914d\u7f6e\u7ed9 istio-ingressgateway\uff08Istio 1.15 \u53ef\u4ee5\u89c4\u907f\u8fd9\u4e2a bug\uff1a https://github.com/istio/istio/pull/39341/\uff09

          \u89e3\u51b3\u65b9\u6cd5\uff1a

          1. \u5907\u4efd RequestAuthentication ghippo CR\u3002

            kubectl get RequestAuthentication ghippo -n istio-system -o yaml > ghippo-ra.yaml \n
          2. \u5220\u9664 RequestAuthentication ghippo CR\u3002

            kubectl delete RequestAuthentication ghippo -n istio-system \n
          3. \u91cd\u542f Istio\u3002

            kubectl rollout restart deploy/istiod -n istio-system\nkubectl rollout restart deploy/istio-ingressgateway -n istio-system \n
          4. \u91cd\u65b0 apply RequestAuthentication ghippo CR\u3002

            kubectl apply -f ghippo-ra.yaml \n

            Note

            apply RequestAuthentication ghippo CR \u4e4b\u524d\uff0c\u8bf7\u786e\u4fdd ghippo-apiserver \u548c ghippo-keycloak \u5df2\u7ecf\u6b63\u5e38\u542f\u52a8\u3002

          "},{"location":"admin/ghippo/troubleshooting/ghippo02.html","title":"\u767b\u5f55\u65e0\u9650\u5faa\u73af\uff0c\u62a5\u9519 401 \u6216 403","text":"

          \u51fa\u73b0\u8fd9\u4e2a\u95ee\u9898\u539f\u56e0\u4e3a\uff1aghippo-keycloak \u8fde\u63a5\u7684 Mysql \u6570\u636e\u5e93\u51fa\u73b0\u6545\u969c, \u5bfc\u81f4 OIDC Public keys \u88ab\u91cd\u7f6e

          \u5728\u5168\u5c40\u7ba1\u7406 0.11.1 \u53ca\u4ee5\u4e0a\u7248\u672c\uff0c\u60a8\u53ef\u4ee5\u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528 helm \u66f4\u65b0\u5168\u5c40\u7ba1\u7406\u914d\u7f6e\u6587\u4ef6\u5373\u53ef\u6062\u590d\u6b63\u5e38\u3002

          # \u66f4\u65b0 helm \u4ed3\u5e93\nhelm repo update ghippo\n\n# \u5907\u4efd ghippo \u53c2\u6570\nhelm get values ghippo -n ghippo-system -o yaml > ghippo-values-bak.yaml\n\n# \u83b7\u53d6\u5f53\u524d\u90e8\u7f72\u7684 ghippo \u7248\u672c\u53f7\nversion=$(helm get notes ghippo -n ghippo-system | grep \"Chart Version\" | awk -F ': ' '{ print $2 }')\n\n# \u6267\u884c\u66f4\u65b0\u64cd\u4f5c, \u4f7f\u914d\u7f6e\u6587\u4ef6\u751f\u6548\nhelm upgrade ghippo ghippo/ghippo \\\n-n ghippo-system \\\n-f ./ghippo-values-bak.yaml \\\n--version ${version}\n
          "},{"location":"admin/ghippo/troubleshooting/ghippo03.html","title":"Keycloak \u65e0\u6cd5\u542f\u52a8","text":""},{"location":"admin/ghippo/troubleshooting/ghippo03.html#_1","title":"\u5e38\u89c1\u6545\u969c","text":""},{"location":"admin/ghippo/troubleshooting/ghippo03.html#_2","title":"\u6545\u969c\u8868\u73b0","text":"

          MySQL \u5df2\u5c31\u7eea\uff0c\u65e0\u62a5\u9519\u3002\u5728\u5b89\u88c5\u5168\u5c40\u7ba1\u7406\u540e keycloak \u65e0\u6cd5\u542f\u52a8\uff08> 10 \u6b21\uff09\u3002

          "},{"location":"admin/ghippo/troubleshooting/ghippo03.html#_3","title":"\u68c0\u67e5\u9879","text":"
          • \u5982\u679c\u6570\u636e\u5e93\u662f MySQL\uff0c\u68c0\u67e5 keycloak database \u7f16\u7801\u662f\u5426\u662f UTF8\u3002
          • \u68c0\u67e5\u4ece keycloak \u5230\u6570\u636e\u5e93\u7684\u7f51\u7edc\uff0c\u68c0\u67e5\u6570\u636e\u5e93\u8d44\u6e90\u662f\u5426\u5145\u8db3\uff0c\u5305\u62ec\u4f46\u4e0d\u9650\u4e8e\u8d44\u6e90\u9650\u5236\u3001\u5b58\u50a8\u7a7a\u95f4\u3001\u7269\u7406\u673a\u8d44\u6e90\u3002
          "},{"location":"admin/ghippo/troubleshooting/ghippo03.html#_4","title":"\u89e3\u51b3\u6b65\u9aa4","text":"
          1. \u68c0\u67e5 MySQL \u8d44\u6e90\u5360\u7528\u662f\u5426\u5230\u8fbe limit \u9650\u5236
          2. \u68c0\u67e5 MySQL \u4e2d database keycloak table \u7684\u6570\u91cf\u662f\u4e0d\u662f 95 \uff08Keycloak \u4e0d\u540c\u7248\u672c\u6570\u636e\u5e93\u6570\u91cf\u53ef\u80fd\u4f1a\u4e0d\u4e00\u6837\uff0c\u53ef\u4ee5\u4e0e\u540c\u7248\u672c\u7684\u5f00\u53d1\u6216\u6d4b\u8bd5\u73af\u5883\u7684 Keycloak \u6570\u636e\u5e93\u6570\u91cf\u8fdb\u884c\u6bd4\u8f83\uff09\uff0c \u5982\u6570\u91cf\u5c11\u4e86\uff0c\u5219\u8bf4\u660e\u6570\u636e\u5e93\u8868\u521d\u59cb\u5316\u6709\u95ee\u9898\uff08\u67e5\u8be2\u8868\u6570\u91cf\u547d\u4ee4\u63d0\u793a\u4e3a\uff1ashow tables;\uff09
          3. \u5220\u9664 keycloak database \u5e76\u521b\u5efa\uff0c\u63d0\u793a CREATE DATABASE IF NOT EXISTS keycloak CHARACTER SET utf8
          4. \u91cd\u542f Keycloak Pod \u89e3\u51b3\u95ee\u9898
          "},{"location":"admin/ghippo/troubleshooting/ghippo03.html#cpu-does-not-support-86-64-v2","title":"CPU does not support \u00d786-64-v2","text":""},{"location":"admin/ghippo/troubleshooting/ghippo03.html#_5","title":"\u6545\u969c\u8868\u73b0","text":"

          keycloak \u65e0\u6cd5\u6b63\u5e38\u542f\u52a8\uff0ckeycloak pod \u8fd0\u884c\u72b6\u6001\u4e3a CrashLoopBackOff \u5e76\u4e14 keycloak \u7684 log \u51fa\u73b0\u5982\u4e0b\u56fe\u6240\u793a\u7684\u4fe1\u606f

          "},{"location":"admin/ghippo/troubleshooting/ghippo03.html#_6","title":"\u68c0\u67e5\u9879","text":"

          \u8fd0\u884c\u4e0b\u9762\u7684\u68c0\u67e5\u811a\u672c\uff0c\u67e5\u8be2\u5f53\u524d\u8282\u70b9 cpu \u7684 x86-64\u67b6\u6784\u7684\u7279\u5f81\u7ea7\u522b

          cat <<\"EOF\" > detect-cpu.sh\n#!/bin/sh -eu\n\nflags=$(cat /proc/cpuinfo | grep flags | head -n 1 | cut -d: -f2)\n\nsupports_v2='awk \"/cx16/&&/lahf/&&/popcnt/&&/sse4_1/&&/sse4_2/&&/ssse3/ {found=1} END {exit !found}\"'\nsupports_v3='awk \"/avx/&&/avx2/&&/bmi1/&&/bmi2/&&/f16c/&&/fma/&&/abm/&&/movbe/&&/xsave/ {found=1} END {exit !found}\"'\nsupports_v4='awk \"/avx512f/&&/avx512bw/&&/avx512cd/&&/avx512dq/&&/avx512vl/ {found=1} END {exit !found}\"'\n\necho \"$flags\" | eval $supports_v2 || exit 2 && echo \"CPU supports x86-64-v2\"\necho \"$flags\" | eval $supports_v3 || exit 3 && echo \"CPU supports x86-64-v3\"\necho \"$flags\" | eval $supports_v4 || exit 4 && echo \"CPU supports x86-64-v4\"\nEOF\n\nchmod +x detect-cpu.sh\nsh detect-cpu.sh\n

          \u6267\u884c\u4e0b\u9762\u547d\u4ee4\u67e5\u770b\u5f53\u524d cpu \u7684\u7279\u6027\uff0c\u5982\u679c\u8f93\u51fa\u4e2d\u5305\u542b sse4_2\uff0c\u5219\u8868\u793a\u4f60\u7684\u5904\u7406\u5668\u652f\u6301SSE 4.2\u3002

          lscpu | grep sse4_2\n

          "},{"location":"admin/ghippo/troubleshooting/ghippo03.html#_7","title":"\u89e3\u51b3\u65b9\u6cd5","text":"

          \u9700\u8981\u5347\u7ea7\u4f60\u7684\u4e91\u4e3b\u673a\u6216\u7269\u7406\u673a CPU \u4ee5\u652f\u6301 x86-64-v2 \u53ca\u4ee5\u4e0a\uff0c\u786e\u4fddx86 CPU \u6307\u4ee4\u96c6\u652f\u6301 sse4.2\uff0c\u5982\u4f55\u5347\u7ea7\u9700\u8981\u4f60\u54a8\u8be2\u4e91\u4e3b\u673a\u5e73\u53f0\u63d0\u4f9b\u5546\u6216\u7740\u7269\u7406\u673a\u63d0\u4f9b\u5546\u3002

          \u8be6\u89c1\uff1ahttps://github.com/keycloak/keycloak/issues/17290

          "},{"location":"admin/ghippo/troubleshooting/ghippo04.html","title":"\u5355\u72ec\u5347\u7ea7\u5168\u5c40\u7ba1\u7406\u65f6\u5347\u7ea7\u5931\u8d25","text":"

          \u82e5\u5347\u7ea7\u5931\u8d25\u65f6\u5305\u542b\u5982\u4e0b\u4fe1\u606f\uff0c\u53ef\u4ee5\u53c2\u8003\u79bb\u7ebf\u5347\u7ea7\u4e2d\u7684\u66f4\u65b0 ghippo crd \u6b65\u9aa4\u5b8c\u6210 crd \u5b89\u88c5

          ensure CRDs are installed first\n
          "},{"location":"admin/ghippo/workspace/folder-permission.html","title":"\u6587\u4ef6\u5939\u6743\u9650\u8bf4\u660e","text":"

          \u6587\u4ef6\u5939\u5177\u6709\u6743\u9650\u6620\u5c04\u80fd\u529b\uff0c\u80fd\u591f\u5c06\u7528\u6237/\u7528\u6237\u7ec4\u5728\u672c\u6587\u4ef6\u5939\u7684\u6743\u9650\u6620\u5c04\u5230\u5176\u4e0b\u7684\u5b50\u6587\u4ef6\u5939\u3001\u5de5\u4f5c\u7a7a\u95f4\u4ee5\u53ca\u8d44\u6e90\u4e0a\u3002

          \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u672c\u6587\u4ef6\u5939\u662f Folder Admin \u89d2\u8272\uff0c\u6620\u5c04\u5230\u5b50\u6587\u4ef6\u5939\u4ecd\u4e3a Folder Admin \u89d2\u8272\uff0c\u6620\u5c04\u5230\u5176\u4e0b\u7684\u5de5\u4f5c\u7a7a\u95f4\u5219\u4e3a Workspace Admin\uff1b \u82e5\u5728 \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 -> \u8d44\u6e90\u7ec4 \u4e2d\u7ed1\u5b9a\u4e86 Namespace\uff0c\u5219\u6620\u5c04\u540e\u8be5\u7528\u6237/\u7528\u6237\u7ec4\u540c\u65f6\u8fd8\u662f Namespace Admin\u3002

          Note

          \u6587\u4ef6\u5939\u7684\u6743\u9650\u6620\u5c04\u80fd\u529b\u4e0d\u4f1a\u4f5c\u7528\u5230\u5171\u4eab\u8d44\u6e90\u4e0a\uff0c\u56e0\u4e3a\u5171\u4eab\u662f\u5c06\u96c6\u7fa4\u7684\u4f7f\u7528\u6743\u9650\u5171\u4eab\u7ed9\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff0c\u800c\u4e0d\u662f\u5c06\u7ba1\u7406\u6743\u9650\u53d7\u8ba9\u7ed9\u5de5\u4f5c\u7a7a\u95f4\uff0c\u56e0\u6b64\u4e0d\u4f1a\u5b9e\u73b0\u6743\u9650\u7ee7\u627f\u548c\u89d2\u8272\u6620\u5c04\u3002

          "},{"location":"admin/ghippo/workspace/folder-permission.html#_2","title":"\u5e94\u7528\u573a\u666f","text":"

          \u6587\u4ef6\u5939\u5177\u6709\u5c42\u7ea7\u80fd\u529b\uff0c\u56e0\u6b64\u5c06\u6587\u4ef6\u5939\u5bf9\u5e94\u4e8e\u4f01\u4e1a\u4e2d\u7684\u90e8\u95e8/\u4f9b\u5e94\u5546/\u9879\u76ee\u7b49\u5c42\u7ea7\u65f6\uff0c

          • \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u4e00\u7ea7\u90e8\u95e8\u5177\u6709\u7ba1\u7406\u6743\u9650\uff08Admin\uff09\uff0c\u5176\u4e0b\u7684\u4e8c\u7ea7\u3001\u4e09\u7ea7\u3001\u56db\u7ea7\u90e8\u95e8\u6216\u9879\u76ee\u540c\u6837\u5177\u6709\u7ba1\u7406\u6743\u9650\uff1b
          • \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u4e00\u7ea7\u90e8\u95e8\u5177\u6709\u4f7f\u7528\u6743\u9650\uff08Editor\uff09\uff0c\u5176\u4e0b\u7684\u4e8c\u7ea7\u3001\u4e09\u7ea7\u3001\u56db\u7ea7\u90e8\u95e8\u6216\u9879\u76ee\u540c\u6837\u5177\u6709\u4f7f\u7528\u6743\u9650\uff1b
          • \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u4e00\u7ea7\u90e8\u95e8\u5177\u6709\u53ea\u8bfb\u6743\u9650\uff08Viewer\uff09\uff0c\u5176\u4e0b\u7684\u4e8c\u7ea7\u3001\u4e09\u7ea7\u3001\u56db\u7ea7\u90e8\u95e8\u6216\u9879\u76ee\u540c\u6837\u5177\u6709\u53ea\u8bfb\u6743\u9650\u3002
          \u5bf9\u8c61 \u64cd\u4f5c Folder Admin Folder Editor Folder Viewer \u5bf9\u6587\u4ef6\u5939\u672c\u8eab \u67e5\u770b \u2713 \u2713 \u2713 \u6388\u6743 \u2713 \u2717 \u2717 \u4fee\u6539\u522b\u540d \u2713 \u2717 \u2717 \u5bf9\u5b50\u6587\u4ef6\u5939 \u521b\u5efa \u2713 \u2717 \u2717 \u67e5\u770b \u2713 \u2713 \u2713 \u6388\u6743 \u2713 \u2717 \u2717 \u4fee\u6539\u522b\u540d \u2713 \u2717 \u2717 \u5bf9\u5176\u4e0b\u7684\u5de5\u4f5c\u7a7a\u95f4 \u521b\u5efa \u2713 \u2717 \u2717 \u67e5\u770b \u2713 \u2713 \u2713 \u6388\u6743 \u2713 \u2717 \u2717 \u4fee\u6539\u522b\u540d \u2713 \u2717 \u2717 \u5bf9\u5176\u4e0b\u7684\u5de5\u4f5c\u7a7a\u95f4 - \u8d44\u6e90\u7ec4 \u67e5\u770b \u2713 \u2713 \u2713 \u8d44\u6e90\u7ed1\u5b9a \u2713 \u2717 \u2717 \u89e3\u9664\u7ed1\u5b9a \u2713 \u2717 \u2717 \u5bf9\u5176\u4e0b\u7684\u5de5\u4f5c\u7a7a\u95f4 - \u5171\u4eab\u8d44\u6e90 \u67e5\u770b \u2713 \u2713 \u2713 \u65b0\u589e\u5171\u4eab \u2713 \u2717 \u2717 \u89e3\u9664\u5171\u4eab \u2713 \u2717 \u2717 \u8d44\u6e90\u9650\u989d \u2713 \u2717 \u2717"},{"location":"admin/ghippo/workspace/folders.html","title":"\u521b\u5efa/\u5220\u9664\u6587\u4ef6\u5939","text":"

          \u6587\u4ef6\u5939\u5177\u6709\u6743\u9650\u6620\u5c04\u80fd\u529b\uff0c\u80fd\u591f\u5c06\u7528\u6237/\u7528\u6237\u7ec4\u5728\u672c\u6587\u4ef6\u5939\u7684\u6743\u9650\u6620\u5c04\u5230\u5176\u4e0b\u7684\u5b50\u6587\u4ef6\u5939\u3001\u5de5\u4f5c\u7a7a\u95f4\u4ee5\u53ca\u8d44\u6e90\u4e0a\u3002

          \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u521b\u5efa\u4e00\u4e2a\u6587\u4ef6\u5939\u3002

          1. \u4f7f\u7528 admin/folder admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u3002

          2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa\u6587\u4ef6\u5939 \u6309\u94ae\u3002

          3. \u586b\u5199\u6587\u4ef6\u5939\u540d\u79f0\u3001\u4e0a\u4e00\u7ea7\u6587\u4ef6\u5939\u7b49\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u521b\u5efa\u6587\u4ef6\u5939\u3002

          Tip

          \u521b\u5efa\u6210\u529f\u540e\u6587\u4ef6\u5939\u540d\u79f0\u5c06\u663e\u793a\u5728\u5de6\u4fa7\u7684\u6811\u72b6\u7ed3\u6784\u4e2d\uff0c\u4ee5\u4e0d\u540c\u7684\u56fe\u6807\u8868\u793a\u5de5\u4f5c\u7a7a\u95f4\u548c\u6587\u4ef6\u5939\u3002

          Note

          \u9009\u4e2d\u67d0\u4e00\u4e2a\u6587\u4ef6\u5939\u6216\u6587\u4ef6\u5939\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u53ef\u4ee5\u8fdb\u884c\u7f16\u8f91\u6216\u5220\u9664\u3002

          • \u5f53\u8be5\u6587\u4ef6\u5939\u4e0b\u8d44\u6e90\u7ec4\u3001\u5171\u4eab\u8d44\u6e90\u4e2d\u5b58\u5728\u8d44\u6e90\u65f6\uff0c\u8be5\u6587\u4ef6\u5939\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u6240\u6709\u8d44\u6e90\u89e3\u7ed1\u540e\u518d\u5220\u9664\u3002

          • \u5f53\u5fae\u670d\u52a1\u5f15\u64ce\u6a21\u5757\u5728\u8be5\u6587\u4ef6\u5939\u4e0b\u5b58\u5728\u63a5\u5165\u6ce8\u518c\u4e2d\u5fc3\u8d44\u6e90\u65f6\uff0c\u8be5\u6587\u4ef6\u5939\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u6240\u6709\u63a5\u5165\u6ce8\u518c\u4e2d\u5fc3\u79fb\u9664\u540e\u518d\u5220\u9664\u6587\u4ef6\u5939\u3002

          "},{"location":"admin/ghippo/workspace/quota.html","title":"\u8d44\u6e90\u914d\u989d\uff08Quota\uff09","text":"

          \u5171\u4eab\u8d44\u6e90\u5e76\u975e\u610f\u5473\u7740\u88ab\u5171\u4eab\u8005\u53ef\u4ee5\u65e0\u9650\u5236\u5730\u4f7f\u7528\u88ab\u5171\u4eab\u7684\u8d44\u6e90\u3002 Admin\u3001Kpanda Owner \u548c Workspace Admin \u53ef\u4ee5\u901a\u8fc7\u5171\u4eab\u8d44\u6e90\u4e2d\u7684 \u8d44\u6e90\u914d\u989d \u529f\u80fd\u9650\u5236\u67d0\u4e2a\u7528\u6237\u7684\u6700\u5927\u4f7f\u7528\u989d\u5ea6\u3002 \u82e5\u4e0d\u9650\u5236\uff0c\u5219\u8868\u793a\u53ef\u4ee5\u65e0\u9650\u5236\u4f7f\u7528\u3002

          • CPU \u8bf7\u6c42\uff08Core\uff09
          • CPU \u9650\u5236\uff08Core\uff09
          • \u5185\u5b58\u8bf7\u6c42\uff08MB\uff09
          • \u5185\u5b58\u9650\u5236\uff08MB\uff09
          • \u5b58\u50a8\u8bf7\u6c42\u603b\u91cf\uff08GB\uff09
          • \u5b58\u50a8\u5377\u58f0\u660e\uff08\u4e2a\uff09
          • GPU \u7c7b\u578b\u3001\u89c4\u683c\u3001\u6570\u91cf\uff08\u5305\u62ec\u4f46\u4e0d\u9650\u4e8e Nvidia\u3001Ascend\u3001lluvatar\u7b49GPU\u5361\u7c7b\u578b\uff09

          \u4e00\u4e2a\u8d44\u6e90\uff08\u96c6\u7fa4\uff09\u53ef\u4ee5\u88ab\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u5171\u4eab\uff0c\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u4e5f\u53ef\u4ee5\u540c\u65f6\u4f7f\u7528\u591a\u4e2a\u5171\u4eab\u96c6\u7fa4\u4e2d\u7684\u8d44\u6e90\u3002

          "},{"location":"admin/ghippo/workspace/quota.html#_1","title":"\u8d44\u6e90\u7ec4\u548c\u5171\u4eab\u8d44\u6e90","text":"

          \u5171\u4eab\u8d44\u6e90\u548c\u8d44\u6e90\u7ec4\u4e2d\u7684\u96c6\u7fa4\u8d44\u6e90\u5747\u6765\u81ea\u5bb9\u5668\u7ba1\u7406\uff0c\u4f46\u662f\u96c6\u7fa4\u7ed1\u5b9a\u548c\u5171\u4eab\u7ed9\u540c\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u5c06\u4f1a\u4ea7\u751f\u4e24\u79cd\u622a\u7136\u4e0d\u540c\u7684\u6548\u679c\u3002

          1. \u7ed1\u5b9a\u8d44\u6e90

            \u4f7f\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u7528\u6237/\u7528\u6237\u7ec4\u5177\u6709\u8be5\u96c6\u7fa4\u7684\u5168\u90e8\u7ba1\u7406\u548c\u4f7f\u7528\u6743\u9650\uff0cWorkspace Admin \u5c06\u88ab\u6620\u5c04\u4e3a Cluster Admin\u3002 Workspace Admin \u80fd\u591f\u8fdb\u5165\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7ba1\u7406\u8be5\u96c6\u7fa4\u3002

            Note

            \u5f53\u524d\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u6682\u65e0 Cluster Editor \u548c Cluster Viewer \u89d2\u8272\uff0c\u56e0\u6b64 Workspace Editor\u3001Workspace Viewer \u8fd8\u65e0\u6cd5\u6620\u5c04\u3002

          2. \u65b0\u589e\u5171\u4eab\u8d44\u6e90

            \u4f7f\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u7528\u6237/\u7528\u6237\u7ec4\u5177\u6709\u8be5\u96c6\u7fa4\u8d44\u6e90\u7684\u4f7f\u7528\u6743\u9650\uff0c\u8fd9\u4e9b\u8d44\u6e90\u53ef\u4ee5\u5728\u521b\u5efa\u547d\u540d\u7a7a\u95f4\uff08Namespace\uff09\u65f6\u4f7f\u7528\u3002

            \u4e0e\u8d44\u6e90\u7ec4\u4e0d\u540c\uff0c\u5c06\u96c6\u7fa4\u5171\u4eab\u5230\u5de5\u4f5c\u7a7a\u95f4\u65f6\uff0c\u7528\u6237\u5728\u5de5\u4f5c\u7a7a\u95f4\u7684\u89d2\u8272\u4e0d\u4f1a\u6620\u5c04\u5230\u8d44\u6e90\u4e0a\uff0c\u56e0\u6b64 Workspace Admin \u4e0d\u4f1a\u88ab\u6620\u5c04\u4e3a Cluster admin\u3002

          \u672c\u8282\u5c55\u793a 3 \u4e2a\u4e0e\u8d44\u6e90\u914d\u989d\u6709\u5173\u7684\u573a\u666f\u3002

          "},{"location":"admin/ghippo/workspace/quota.html#_2","title":"\u521b\u5efa\u547d\u540d\u7a7a\u95f4","text":"

          \u521b\u5efa\u547d\u540d\u7a7a\u95f4\u65f6\u4f1a\u6d89\u53ca\u5230\u8d44\u6e90\u914d\u989d\u3002

          1. \u5728\u5de5\u4f5c\u7a7a\u95f4 ws01 \u65b0\u589e\u4e00\u4e2a\u5171\u4eab\u96c6\u7fa4\u3002

          2. \u5728\u5e94\u7528\u5de5\u4f5c\u53f0\u9009\u62e9\u5de5\u4f5c\u7a7a\u95f4 ws01 \u548c\u5171\u4eab\u96c6\u7fa4\uff0c\u521b\u5efa\u547d\u540d\u7a7a\u95f4 ns01\u3002

            • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u4e2d\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4e0d\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\u3002
            • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u4e2d\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff08\u4f8b\u5982 CPU \u8bf7\u6c42 = 100 core\uff09\uff0c\u5219\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u65f6 CPU \u8bf7\u6c42 \u2264 100 core \u3002
          "},{"location":"admin/ghippo/workspace/quota.html#_3","title":"\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4","text":"

          \u524d\u63d0\uff1a\u5de5\u4f5c\u7a7a\u95f4 ws01 \u5df2\u65b0\u589e\u5171\u4eab\u96c6\u7fa4\uff0c\u64cd\u4f5c\u8005\u4e3a Workspace Admin + Kpanda Owner \u6216 Admin \u89d2\u8272\u3002

          \u4ee5\u4e0b\u4e24\u79cd\u7ed1\u5b9a\u65b9\u5f0f\u7684\u6548\u679c\u76f8\u540c\u3002

          • \u5728\u5bb9\u5668\u7ba1\u7406\u4e2d\u5c06\u521b\u5efa\u7684\u547d\u540d\u7a7a\u95f4 ns01 \u7ed1\u5b9a\u5230 ws01

            • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u65e0\u8bba\u662f\u5426\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5747\u53ef\u6210\u529f\u7ed1\u5b9a\u3002
            • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d CPU \u8bf7\u6c42 = 100 core \uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u5fc5\u987b\u6ee1\u8db3 CPU \u8bf7\u6c42 \u2264 100 core \u624d\u80fd\u7ed1\u5b9a\u6210\u529f\u3002
          • \u5728\u5168\u5c40\u7ba1\u7406\u4e2d\uff0c\u5c06\u547d\u540d\u7a7a\u95f4 ns01 \u7ed1\u5b9a\u5230 ws01

            • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u65e0\u8bba\u662f\u5426\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5747\u53ef\u6210\u529f\u7ed1\u5b9a\u3002
            • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d CPU \u8bf7\u6c42 = 100 core \uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u5fc5\u987b\u6ee1\u8db3 CPU \u8bf7\u6c42 \u2264 100 core \u624d\u80fd\u7ed1\u5b9a\u6210\u529f\u3002
          "},{"location":"admin/ghippo/workspace/quota.html#_4","title":"\u4ece\u5de5\u4f5c\u7a7a\u95f4\u89e3\u7ed1\u547d\u540d\u7a7a\u95f4","text":"

          \u4ee5\u4e0b\u4e24\u79cd\u89e3\u7ed1\u65b9\u5f0f\u7684\u6548\u679c\u76f8\u540c\u3002

          • \u5728\u5bb9\u5668\u7ba1\u7406\u4e2d\u5c06\u547d\u540d\u7a7a\u95f4 ns01 \u4ece\u5de5\u4f5c\u7a7a\u95f4 ws01 \u89e3\u7ed1

            • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u4e2d\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u65e0\u8bba\u662f\u5426\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u89e3\u7ed1\u540e\u5747\u4e0d\u4f1a\u5bf9\u8d44\u6e90\u914d\u989d\u4ea7\u751f\u5f71\u54cd\u3002
            • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d CPU \u8bf7\u6c42 = 100 core \uff0c\u547d\u540d\u7a7a\u95f4 ns01 \u4e5f\u8bbe\u7f6e\u4e86\u8d44\u6e90\u914d\u989d\uff0c\u5219\u89e3\u7ed1\u540e\u5c06\u91ca\u653e\u76f8\u5e94\u7684\u8d44\u6e90\u989d\u5ea6\u3002
          • \u5728\u5168\u5c40\u7ba1\u7406\u4e2d\u5c06\u547d\u540d\u7a7a\u95f4 ns01 \u4ece\u5de5\u4f5c\u7a7a\u95f4 ws01 \u89e3\u7ed1

            • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u65e0\u8bba\u662f\u5426\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u89e3\u7ed1\u540e\u5747\u4e0d\u4f1a\u5bf9\u8d44\u6e90\u914d\u989d\u4ea7\u751f\u5f71\u54cd\u3002
            • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d CPU \u8bf7\u6c42 = 100 core \uff0c\u547d\u540d\u7a7a\u95f4 ns01 \u4e5f\u8bbe\u7f6e\u4e86\u8d44\u6e90\u914d\u989d\uff0c\u5219\u89e3\u7ed1\u540e\u5c06\u91ca\u653e\u76f8\u5e94\u7684\u8d44\u6e90\u989d\u5ea6\u3002
          "},{"location":"admin/ghippo/workspace/res-gp-and-shared-res.html","title":"\u8d44\u6e90\u7ec4\u4e0e\u5171\u4eab\u8d44\u6e90\u7684\u533a\u522b","text":"

          \u8d44\u6e90\u7ec4\u4e0e\u5171\u4eab\u8d44\u6e90\u5747\u652f\u6301\u7ed1\u5b9a\u96c6\u7fa4\uff0c\u4f46\u4f7f\u7528\u4e0a\u5b58\u5728\u5f88\u5927\u533a\u522b\u3002

          "},{"location":"admin/ghippo/workspace/res-gp-and-shared-res.html#_2","title":"\u4f7f\u7528\u573a\u666f\u533a\u522b","text":"
          • \u8d44\u6e90\u7ec4\u7ed1\u5b9a\u96c6\u7fa4\uff1a\u8d44\u6e90\u7ec4\u7ed1\u5b9a\u96c6\u7fa4\u901a\u5e38\u88ab\u7528\u6765\u6279\u91cf\u6388\u6743\u3002\u8d44\u6e90\u7ec4\u7ed1\u5b9a\u96c6\u7fa4\u540e\uff0c \u5de5\u4f5c\u7a7a\u95f4\u7ba1\u7406\u5458\u5c06\u88ab\u6620\u5c04\u4e3a\u96c6\u7fa4\u7ba1\u7406\u5458\uff0c\u80fd\u591f\u7ba1\u7406\u5e76\u4f7f\u7528\u96c6\u7fa4\u8d44\u6e90\u3002
          • \u5171\u4eab\u8d44\u6e90\u7ed1\u5b9a\u96c6\u7fa4\uff1a\u8d44\u6e90\u5171\u4eab\u7ed1\u5b9a\u96c6\u7fa4\u901a\u5e38\u88ab\u7528\u6765\u505a\u8d44\u6e90\u9650\u989d\u3002 \u5178\u578b\u7684\u573a\u666f\u662f\u5e73\u53f0\u7ba1\u7406\u5458\u5c06\u96c6\u7fa4\u5206\u914d\u7ed9\u4e00\u7ea7\u4f9b\u5e94\u5546\u540e\uff0c\u518d\u7531\u4e00\u7ea7\u4f9b\u5e94\u5546\u5206\u914d\u7ed9\u4e8c\u7ea7\u4f9b\u5e94\u5546\u5e76\u5bf9\u4e8c\u7ea7\u4f9b\u5e94\u5546\u8fdb\u884c\u8d44\u6e90\u9650\u989d\u3002

          \u8bf4\u660e\uff1a\u5728\u8be5\u573a\u666f\u4e2d\uff0c\u9700\u8981\u5e73\u53f0\u7ba1\u7406\u5458\u5bf9\u4e8c\u7ea7\u4f9b\u5e94\u5546\u8fdb\u884c\u8d44\u6e90\u9650\u5236\uff0c\u6682\u65f6\u8fd8\u4e0d\u652f\u6301\u4e00\u7ea7\u4f9b\u5e94\u5546\u9650\u5236\u4e8c\u7ea7\u4f9b\u5e94\u5546\u7684\u96c6\u7fa4\u989d\u5ea6\u3002

          "},{"location":"admin/ghippo/workspace/res-gp-and-shared-res.html#_3","title":"\u96c6\u7fa4\u989d\u5ea6\u7684\u4f7f\u7528\u533a\u522b","text":"
          • \u8d44\u6e90\u7ec4\u7ed1\u5b9a\u96c6\u7fa4\uff1a\u5de5\u4f5c\u7a7a\u95f4\u7684\u7ba1\u7406\u5458\u5c06\u88ab\u6620\u5c04\u4e3a\u8be5\u96c6\u7fa4\u7684\u7ba1\u7406\u5458\uff0c\u76f8\u5f53\u4e8e\u5728\u5bb9\u5668\u7ba1\u7406-\u6743\u9650\u7ba1\u7406\u4e2d\u88ab\u6388\u4e88 Cluster Admin \u89d2\u8272\uff0c \u80fd\u591f\u65e0\u9650\u5236\u652f\u914d\u8be5\u96c6\u7fa4\u8d44\u6e90\uff0c\u7ba1\u7406\u8282\u70b9\u7b49\u91cd\u8981\u5185\u5bb9\uff0c\u4e14\u8d44\u6e90\u7ec4\u4e0d\u80fd\u591f\u88ab\u8d44\u6e90\u9650\u989d\u3002
          • \u5171\u4eab\u8d44\u6e90\u7ed1\u5b9a\u8d44\u6e90\uff1a\u5de5\u4f5c\u7a7a\u95f4\u7ba1\u7406\u5458\u4ec5\u80fd\u591f\u4f7f\u7528\u96c6\u7fa4\u4e2d\u7684\u989d\u5ea6\u5728\u5e94\u7528\u5de5\u4f5c\u53f0\u521b\u5efa\u547d\u540d\u7a7a\u95f4\uff0c\u4e0d\u5177\u5907\u96c6\u7fa4\u7684\u7ba1\u7406\u6743\u9650\u3002 \u82e5\u5bf9\u8be5\u5de5\u4f5c\u7a7a\u95f4\u9650\u5236\u989d\u5ea6\uff0c\u5219\u5de5\u4f5c\u7a7a\u95f4\u7ba1\u7406\u4ec5\u80fd\u591f\u5728\u989d\u5ea6\u8303\u56f4\u5185\u521b\u5efa\u5e76\u4f7f\u7528\u547d\u540d\u7a7a\u95f4\u3002
          "},{"location":"admin/ghippo/workspace/res-gp-and-shared-res.html#_4","title":"\u8d44\u6e90\u7c7b\u578b\u7684\u533a\u522b","text":"
          • \u8d44\u6e90\u7ec4\uff1a\u80fd\u591f\u7ed1\u5b9a\u96c6\u7fa4\u3001\u96c6\u7fa4-\u547d\u540d\u7a7a\u95f4\u3001\u591a\u4e91\u3001\u591a\u4e91-\u547d\u540d\u7a7a\u95f4\u3001\u7f51\u683c\u3001\u7f51\u683c-\u547d\u540d\u7a7a\u95f4
          • \u5171\u4eab\u8d44\u6e90\uff1a\u4ec5\u80fd\u591f\u7ed1\u5b9a\u96c6\u7fa4
          "},{"location":"admin/ghippo/workspace/res-gp-and-shared-res.html#_5","title":"\u8d44\u6e90\u7ec4\u4e0e\u5171\u4eab\u8d44\u6e90\u7684\u76f8\u540c\u70b9","text":"

          \u5728\u8d44\u6e90\u7ec4/\u5171\u4eab\u8d44\u6e90\u7ed1\u5b9a\u96c6\u7fa4\u540e\u90fd\u53ef\u4ee5\u524d\u5f80\u5e94\u7528\u5de5\u4f5c\u53f0\u521b\u5efa\u547d\u540d\u7a7a\u95f4\uff0c\u521b\u5efa\u540e\u547d\u540d\u7a7a\u95f4\u5c06\u81ea\u52a8\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4\u3002

          "},{"location":"admin/ghippo/workspace/workspace.html","title":"\u521b\u5efa/\u5220\u9664\u5de5\u4f5c\u7a7a\u95f4","text":"

          \u5de5\u4f5c\u7a7a\u95f4\u662f\u4e00\u79cd\u8d44\u6e90\u8303\u7574\uff0c\u4ee3\u8868\u4e00\u79cd\u8d44\u6e90\u5c42\u7ea7\u5173\u7cfb\u3002 \u5de5\u4f5c\u7a7a\u95f4\u53ef\u4ee5\u5305\u542b\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u6ce8\u518c\u4e2d\u5fc3\u7b49\u8d44\u6e90\u3002 \u901a\u5e38\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u5bf9\u5e94\u4e00\u4e2a\u9879\u76ee\uff0c\u53ef\u4ee5\u4e3a\u6bcf\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u4e0d\u540c\u7684\u8d44\u6e90\uff0c\u6307\u6d3e\u4e0d\u540c\u7684\u7528\u6237\u548c\u7528\u6237\u7ec4\u3002

          \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u521b\u5efa\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u3002

          1. \u4f7f\u7528 admin/folder admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u3002

          2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4 \u6309\u94ae\u3002

          3. \u586b\u5199\u5de5\u4f5c\u7a7a\u95f4\u540d\u79f0\u3001\u6240\u5c5e\u6587\u4ef6\u5939\u7b49\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4\u3002

          Tip

          \u521b\u5efa\u6210\u529f\u540e\u5de5\u4f5c\u7a7a\u95f4\u540d\u79f0\u5c06\u663e\u793a\u5728\u5de6\u4fa7\u7684\u6811\u72b6\u7ed3\u6784\u4e2d\uff0c\u4ee5\u4e0d\u540c\u7684\u56fe\u6807\u8868\u793a\u6587\u4ef6\u5939\u548c\u5de5\u4f5c\u7a7a\u95f4\u3002

          Note

          \u9009\u4e2d\u67d0\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u6216\u6587\u4ef6\u5939\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 ... \u53ef\u4ee5\u8fdb\u884c\u7f16\u8f91\u6216\u5220\u9664\u3002

          • \u5f53\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u8d44\u6e90\u7ec4\u3001\u5171\u4eab\u8d44\u6e90\u4e2d\u5b58\u5728\u8d44\u6e90\u65f6\uff0c\u8be5\u5de5\u4f5c\u7a7a\u95f4\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u6240\u6709\u8d44\u6e90\u89e3\u7ed1\u540e\u518d\u5220\u9664\u3002
          • \u5f53\u5fae\u670d\u52a1\u5f15\u64ce\u6a21\u5757\u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u5b58\u5728\u63a5\u5165\u6ce8\u518c\u4e2d\u5fc3\u8d44\u6e90\u65f6\uff0c\u8be5\u5de5\u4f5c\u7a7a\u95f4\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u6240\u6709\u63a5\u5165\u6ce8\u518c\u4e2d\u5fc3\u79fb\u9664\u540e\u518d\u5220\u9664\u5de5\u4f5c\u7a7a\u95f4\u3002
          • \u5f53\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u5b58\u5728\u955c\u50cf\u7a7a\u95f4\u6216\u96c6\u6210\u4ed3\u5e93\u65f6\uff0c\u8be5\u5de5\u4f5c\u7a7a\u95f4\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u955c\u50cf\u7a7a\u95f4\u89e3\u7ed1\uff0c\u5c06\u4ed3\u5e93\u96c6\u6210\u5220\u9664\u540e\u518d\u5220\u9664\u5de5\u4f5c\u7a7a\u95f4\u3002
          "},{"location":"admin/ghippo/workspace/ws-folder.html","title":"\u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7","text":"

          \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u662f\u4e00\u4e2a\u5177\u6709\u5c42\u7ea7\u7684\u8d44\u6e90\u9694\u79bb\u548c\u8d44\u6e90\u5206\u7ec4\u7279\u6027\uff0c\u4e3b\u8981\u89e3\u51b3\u8d44\u6e90\u7edf\u4e00\u6388\u6743\u3001\u8d44\u6e90\u5206\u7ec4\u4ee5\u53ca\u8d44\u6e90\u9650\u989d\u95ee\u9898\u3002

          \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u6709\u4e24\u4e2a\u6982\u5ff5\uff1a\u5de5\u4f5c\u7a7a\u95f4\u548c\u6587\u4ef6\u5939\u3002

          "},{"location":"admin/ghippo/workspace/ws-folder.html#_2","title":"\u5de5\u4f5c\u7a7a\u95f4","text":"

          \u5de5\u4f5c\u7a7a\u95f4\u53ef\u901a\u8fc7 \u6388\u6743 \u3001 \u8d44\u6e90\u7ec4 \u548c \u5171\u4eab\u8d44\u6e90 \u6765\u7ba1\u7406\u8d44\u6e90\uff0c\u4f7f\u7528\u6237\uff08\u7528\u6237\u7ec4\uff09\u4e4b\u95f4\u80fd\u591f\u5171\u4eab\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u8d44\u6e90\u3002

          • \u8d44\u6e90

            \u8d44\u6e90\u5904\u4e8e\u8d44\u6e90\u7ba1\u7406\u6a21\u5757\u5c42\u7ea7\u7ed3\u6784\u7684\u6700\u4f4e\u5c42\u7ea7\uff0c\u8d44\u6e90\u5305\u62ec Cluster\u3001Namespace\u3001Pipeline\u3001\u7f51\u5173\u7b49\u3002 \u6240\u6709\u8fd9\u4e9b\u8d44\u6e90\u7684\u7236\u7ea7\u53ea\u80fd\u662f\u5de5\u4f5c\u7a7a\u95f4\uff0c\u800c\u5de5\u4f5c\u7a7a\u95f4\u4f5c\u4e3a\u8d44\u6e90\u5bb9\u5668\u662f\u4e00\u79cd\u8d44\u6e90\u5206\u7ec4\u5355\u4f4d\u3002

          • \u5de5\u4f5c\u7a7a\u95f4

            \u5de5\u4f5c\u7a7a\u95f4\u901a\u5e38\u4ee3\u6307\u4e00\u4e2a\u9879\u76ee\u6216\u73af\u5883\uff0c\u6bcf\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u7684\u8d44\u6e90\u76f8\u5bf9\u4e8e\u5176\u4ed6\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u8d44\u6e90\u65f6\u903b\u8f91\u9694\u79bb\u7684\u3002 \u60a8\u53ef\u4ee5\u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u6388\u6743\uff0c\u6388\u4e88\u7528\u6237\uff08\u7528\u6237\u7ec4\uff09\u540c\u4e00\u7ec4\u8d44\u6e90\u7684\u4e0d\u540c\u8bbf\u95ee\u6743\u9650\u3002

            \u4ece\u5c42\u6b21\u7ed3\u6784\u7684\u5e95\u5c42\u7b97\u8d77\uff0c\u5de5\u4f5c\u7a7a\u95f4\u4f4d\u4e8e\u7b2c\u4e00\u5c42\uff0c\u4e14\u5305\u542b\u8d44\u6e90\u3002 \u9664\u5171\u4eab\u8d44\u6e90\u5916\uff0c\u6240\u6709\u8d44\u6e90\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u7236\u9879\u3002\u6240\u6709\u5de5\u4f5c\u7a7a\u95f4\u4e5f\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u7236\u7ea7\u6587\u4ef6\u5939\u3002

            \u8d44\u6e90\u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u8fdb\u884c\u5206\u7ec4\uff0c\u800c\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u5b58\u5728\u4e24\u79cd\u5206\u7ec4\u6a21\u5f0f\uff0c\u5206\u522b\u662f \u8d44\u6e90\u7ec4 \u548c \u5171\u4eab\u8d44\u6e90 \u3002

          • \u8d44\u6e90\u7ec4

            \u4e00\u4e2a\u8d44\u6e90\u53ea\u80fd\u52a0\u5165\u4e00\u4e2a\u8d44\u6e90\u7ec4\uff0c\u8d44\u6e90\u7ec4\u4e0e\u5de5\u4f5c\u7a7a\u95f4\u4e00\u4e00\u5bf9\u5e94\u3002 \u8d44\u6e90\u88ab\u52a0\u5165\u5230\u8d44\u6e90\u7ec4\u540e\uff0cWorkspace Admin \u5c06\u83b7\u5f97\u8d44\u6e90\u7684\u7ba1\u7406\u6743\u9650\uff0c\u76f8\u5f53\u4e8e\u8be5\u8d44\u6e90\u7684\u6240\u6709\u8005\u3002

          • \u5171\u4eab\u8d44\u6e90

            \u800c\u5bf9\u4e8e\u5171\u4eab\u8d44\u6e90\u6765\u8bf4\uff0c\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u53ef\u4ee5\u5171\u4eab\u540c\u4e00\u4e2a\u6216\u8005\u591a\u4e2a\u8d44\u6e90\u3002 \u8d44\u6e90\u7684\u6240\u6709\u8005\uff0c\u53ef\u4ee5\u9009\u62e9\u5c06\u81ea\u5df1\u62e5\u6709\u7684\u8d44\u6e90\u5171\u4eab\u7ed9\u5de5\u4f5c\u7a7a\u95f4\u4f7f\u7528\uff0c\u4e00\u822c\u5171\u4eab\u65f6\u8d44\u6e90\u6240\u6709\u8005\u4f1a\u9650\u5236\u88ab\u5171\u4eab\u5de5\u4f5c\u7a7a\u95f4\u80fd\u591f\u4f7f\u7528\u7684\u8d44\u6e90\u989d\u5ea6\u3002 \u8d44\u6e90\u88ab\u5171\u4eab\u540e\uff0cWorkspace Admin \u4ec5\u5177\u6709\u8d44\u6e90\u9650\u989d\u4e0b\u7684\u8d44\u6e90\u4f7f\u7528\u6743\u9650\uff0c\u65e0\u6cd5\u7ba1\u7406\u8d44\u6e90\u6216\u8005\u8c03\u6574\u5de5\u4f5c\u7a7a\u95f4\u80fd\u591f\u4f7f\u7528\u7684\u8d44\u6e90\u91cf\u3002

            \u540c\u65f6\u5171\u4eab\u8d44\u6e90\u5bf9\u4e8e\u8d44\u6e90\u672c\u8eab\u4e5f\u5177\u6709\u4e00\u5b9a\u7684\u8981\u6c42\uff0c\u53ea\u6709 Cluster\uff08\u96c6\u7fa4\uff09\u8d44\u6e90\u53ef\u4ee5\u88ab\u5171\u4eab\u3002 Cluster Admin \u80fd\u591f\u5c06 Cluster \u8d44\u6e90\u5206\u4eab\u7ed9\u4e0d\u540c\u7684\u5de5\u4f5c\u7a7a\u95f4\u4f7f\u7528\uff0c\u5e76\u4e14\u9650\u5236\u5de5\u4f5c\u7a7a\u95f4\u5728\u6b64 Cluster \u4e0a\u7684\u4f7f\u7528\u989d\u5ea6\u3002

            Workspace Admin \u5728\u8d44\u6e90\u9650\u989d\u5185\u80fd\u591f\u521b\u5efa\u591a\u4e2a Namespace\uff0c\u4f46\u662f Namespace \u7684\u8d44\u6e90\u989d\u5ea6\u603b\u548c\u4e0d\u80fd\u8d85\u8fc7 Cluster \u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u7684\u8d44\u6e90\u9650\u989d\u3002 \u5bf9\u4e8e Kubernetes \u8d44\u6e90\uff0c\u5f53\u524d\u80fd\u591f\u5206\u4eab\u7684\u8d44\u6e90\u7c7b\u578b\u4ec5\u6709 Cluster\u3002

          "},{"location":"admin/ghippo/workspace/ws-folder.html#_3","title":"\u6587\u4ef6\u5939","text":"

          \u6587\u4ef6\u5939\u53ef\u7528\u4e8e\u6784\u5efa\u4f01\u4e1a\u4e1a\u52a1\u5c42\u7ea7\u5173\u7cfb\u3002

          • \u6587\u4ef6\u5939\u662f\u5728\u5de5\u4f5c\u7a7a\u95f4\u57fa\u7840\u4e4b\u4e0a\u7684\u8fdb\u4e00\u6b65\u5206\u7ec4\u673a\u5236\uff0c\u5177\u6709\u5c42\u7ea7\u7ed3\u6784\u3002 \u4e00\u4e2a\u6587\u4ef6\u5939\u53ef\u4ee5\u5305\u542b\u5de5\u4f5c\u7a7a\u95f4\u3001\u5176\u4ed6\u6587\u4ef6\u5939\u6216\u4e24\u8005\u7684\u7ec4\u5408\uff0c\u80fd\u591f\u5f62\u6210\u6811\u72b6\u7684\u7ec4\u7ec7\u5173\u7cfb\u3002

          • \u501f\u52a9\u6587\u4ef6\u5939\u60a8\u53ef\u4ee5\u6620\u5c04\u4f01\u4e1a\u4e1a\u52a1\u5c42\u7ea7\u5173\u7cfb\uff0c\u6309\u7167\u90e8\u95e8\u5bf9\u5de5\u4f5c\u7a7a\u95f4\u8fdb\u884c\u5206\u7ec4\u3002 \u6587\u4ef6\u5939\u4e0d\u76f4\u63a5\u4e0e\u8d44\u6e90\u6302\u94a9\uff0c\u800c\u662f\u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u95f4\u63a5\u5b9e\u73b0\u8d44\u6e90\u5206\u7ec4\u3002

          • \u6587\u4ef6\u5939\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u7236\u7ea7\u6587\u4ef6\u5939\uff0c\u800c\u6839\u6587\u4ef6\u5939\u662f\u5c42\u6b21\u7ed3\u6784\u7684\u6700\u9ad8\u5c42\u7ea7\u3002 \u6839\u6587\u4ef6\u5939\u6ca1\u6709\u7236\u7ea7\uff0c\u6587\u4ef6\u5939\u548c\u5de5\u4f5c\u7a7a\u95f4\u5747\u6302\u9760\u5230\u6839\u6587\u4ef6\u5939\u4e0b\u3002

          \u53e6\u5916\uff0c\u7528\u6237\uff08\u7528\u6237\u7ec4\uff09\u5728\u6587\u4ef6\u5939\u4e2d\u80fd\u591f\u901a\u8fc7\u5c42\u7ea7\u7ed3\u6784\u7ee7\u627f\u6765\u81ea\u7236\u9879\u7684\u6743\u9650\u3002 \u7528\u6237\u5728\u5c42\u6b21\u7ed3\u6784\u4e2d\u7684\u6743\u9650\u6765\u81ea\u5f53\u524d\u5c42\u7ea7\u7684\u6743\u9650\u4ee5\u53ca\u7ee7\u627f\u5176\u7236\u9879\u6743\u9650\u7684\u7ec4\u5408\u7ed3\u679c\uff0c\u6743\u9650\u4e4b\u95f4\u662f\u52a0\u5408\u5173\u7cfb\u4e0d\u5b58\u5728\u4e92\u65a5\u3002

          "},{"location":"admin/ghippo/workspace/ws-permission.html","title":"\u5de5\u4f5c\u7a7a\u95f4\u6743\u9650\u8bf4\u660e","text":"

          \u5de5\u4f5c\u7a7a\u95f4\u5177\u6709\u6743\u9650\u6620\u5c04\u548c\u8d44\u6e90\u9694\u79bb\u80fd\u529b\uff0c\u80fd\u591f\u5c06\u7528\u6237/\u7528\u6237\u7ec4\u5728\u5de5\u4f5c\u7a7a\u95f4\u7684\u6743\u9650\u6620\u5c04\u5230\u5176\u4e0b\u7684\u8d44\u6e90\u4e0a\u3002 \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u5de5\u4f5c\u7a7a\u95f4\u662f Workspace Admin \u89d2\u8272\uff0c\u540c\u65f6\u5de5\u4f5c\u7a7a\u95f4-\u8d44\u6e90\u7ec4\u4e2d\u7ed1\u5b9a\u4e86\u8d44\u6e90 Namespace\uff0c\u5219\u6620\u5c04\u540e\u8be5\u7528\u6237/\u7528\u6237\u7ec4\u5c06\u6210\u4e3a Namespace Admin\u3002

          Note

          \u5de5\u4f5c\u7a7a\u95f4\u7684\u6743\u9650\u6620\u5c04\u80fd\u529b\u4e0d\u4f1a\u4f5c\u7528\u5230\u5171\u4eab\u8d44\u6e90\u4e0a\uff0c\u56e0\u4e3a\u5171\u4eab\u662f\u5c06\u96c6\u7fa4\u7684\u4f7f\u7528\u6743\u9650\u5171\u4eab\u7ed9\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff0c\u800c\u4e0d\u662f\u5c06\u7ba1\u7406\u6743\u9650\u53d7\u8ba9\u7ed9\u5de5\u4f5c\u7a7a\u95f4\uff0c\u56e0\u6b64\u4e0d\u4f1a\u5b9e\u73b0\u6743\u9650\u7ee7\u627f\u548c\u89d2\u8272\u6620\u5c04\u3002

          "},{"location":"admin/ghippo/workspace/ws-permission.html#_2","title":"\u5e94\u7528\u573a\u666f","text":"

          \u901a\u8fc7\u5c06\u8d44\u6e90\u7ed1\u5b9a\u5230\u4e0d\u540c\u7684\u5de5\u4f5c\u7a7a\u95f4\u80fd\u591f\u5b9e\u73b0\u8d44\u6e90\u9694\u79bb\u3002 \u56e0\u6b64\u501f\u52a9\u6743\u9650\u6620\u5c04\u3001\u8d44\u6e90\u9694\u79bb\u548c\u5171\u4eab\u8d44\u6e90\u80fd\u529b\u80fd\u591f\u5c06\u8d44\u6e90\u7075\u6d3b\u5206\u914d\u7ed9\u5404\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09\u3002

          \u901a\u5e38\u9002\u7528\u4e8e\u4ee5\u4e0b\u4e24\u4e2a\u573a\u666f\uff1a

          • \u96c6\u7fa4\u4e00\u5bf9\u4e00

            \u666e\u901a\u96c6\u7fa4 \u90e8\u95e8/\u79df\u6237\uff08\u5de5\u4f5c\u7a7a\u95f4\uff09 \u7528\u9014 \u96c6\u7fa4 01 A \u7ba1\u7406\u548c\u4f7f\u7528 \u96c6\u7fa4 02 B \u7ba1\u7406\u548c\u4f7f\u7528
          • \u96c6\u7fa4\u4e00\u5bf9\u591a

            \u96c6\u7fa4 \u90e8\u95e8/\u79df\u6237\uff08\u5de5\u4f5c\u7a7a\u95f4\uff09 \u8d44\u6e90\u9650\u989d \u96c6\u7fa4 01 A 100 \u6838 CPU B 50 \u6838 CPU
          "},{"location":"admin/ghippo/workspace/ws-permission.html#_3","title":"\u6743\u9650\u8bf4\u660e","text":"\u64cd\u4f5c\u5bf9\u8c61 \u64cd\u4f5c Workspace Admin Workspace Editor Workspace Viewer \u672c\u8eab \u67e5\u770b \u2713 \u2713 \u2713 - \u6388\u6743 \u2713 \u2717 \u2717 - \u4fee\u6539\u522b\u540d \u2713 \u2713 \u2717 \u8d44\u6e90\u7ec4 \u67e5\u770b \u2713 \u2713 \u2713 - \u8d44\u6e90\u7ed1\u5b9a \u2713 \u2717 \u2717 - \u89e3\u9664\u7ed1\u5b9a \u2713 \u2717 \u2717 \u5171\u4eab\u8d44\u6e90 \u67e5\u770b \u2713 \u2713 \u2713 - \u65b0\u589e\u5171\u4eab \u2713 \u2717 \u2717 - \u89e3\u9664\u5171\u4eab \u2713 \u2717 \u2717 - \u8d44\u6e90\u9650\u989d \u2713 \u2717 \u2717 - \u4f7f\u7528\u5171\u4eab\u8d44\u6e90 [^1] \u2713 \u2717 \u2717"},{"location":"admin/ghippo/workspace/wsbind-permission.html","title":"\u8d44\u6e90\u7ed1\u5b9a\u6743\u9650\u8bf4\u660e","text":"

          \u5047\u5982\u7528\u6237\u5c0f\u660e\uff08\u201c\u5c0f\u660e\u201d\u4ee3\u8868\u4efb\u4f55\u6709\u8d44\u6e90\u7ed1\u5b9a\u9700\u6c42\u7684\u7528\u6237\uff09\u5df2\u7ecf\u5177\u5907\u4e86 Workspace Admin \u89d2\u8272\u6216\u5df2\u901a\u8fc7\u81ea\u5b9a\u4e49\u89d2\u8272\u6388\u6743\uff0c \u540c\u65f6\u81ea\u5b9a\u4e49\u89d2\u8272\u4e2d\u5305\u542b\u5de5\u4f5c\u7a7a\u95f4\u7684\u201c\u8d44\u6e90\u7ed1\u5b9a\u201d\u6743\u9650\uff0c\u5e0c\u671b\u5c06\u67d0\u4e2a\u96c6\u7fa4\u6216\u8005\u67d0\u4e2a\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u5176\u6240\u5728\u7684\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u3002

          \u8981\u5c06\u96c6\u7fa4/\u547d\u540d\u7a7a\u95f4\u8d44\u6e90\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4\uff0c\u4e0d\u4ec5\u9700\u8981\u8be5\u5de5\u4f5c\u7a7a\u95f4\u7684\u201c\u8d44\u6e90\u7ed1\u5b9a\u201d\u6743\u9650\uff0c\u8fd8\u9700\u8981 Cluster Admin \u7684\u8d44\u6e90\u6743\u9650\u3002

          "},{"location":"admin/ghippo/workspace/wsbind-permission.html#_2","title":"\u7ed9\u5c0f\u660e\u6388\u6743","text":"
          1. \u4f7f\u7528\u5e73\u53f0 Admin \u89d2\u8272\uff0c \u5728 \u5de5\u4f5c\u7a7a\u95f4 -> \u6388\u6743 \u9875\u9762\u7ed9\u5c0f\u660e\u6388\u4e88 Workspace Admin \u89d2\u8272\u3002

          2. \u7136\u540e\u5728 \u5bb9\u5668\u7ba1\u7406 -> \u6743\u9650\u7ba1\u7406 \u9875\u9762\uff0c\u901a\u8fc7 \u6dfb\u52a0\u6388\u6743 \u5c06\u5c0f\u660e\u6388\u6743\u4e3a Cluster Admin\u3002

          "},{"location":"admin/ghippo/workspace/wsbind-permission.html#_3","title":"\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4","text":"

          \u4f7f\u7528\u5c0f\u660e\u7684\u8d26\u53f7\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5728 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \u9875\u9762\uff0c\u901a\u8fc7 \u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4 \u529f\u80fd\uff0c \u5c0f\u660e\u53ef\u4ee5\u5c06\u6307\u5b9a\u96c6\u7fa4\u7ed1\u5b9a\u5230\u81ea\u5df1\u7684\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u3002

          Note

          \u5c0f\u660e\u80fd\u4e14\u53ea\u80fd\u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5c06\u96c6\u7fa4\u6216\u8005\u8be5\u96c6\u7fa4\u4e0b\u7684\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff0c\u65e0\u6cd5\u5728\u5168\u5c40\u7ba1\u7406\u6a21\u5757\u5b8c\u6210\u6b64\u64cd\u4f5c\u3002

          \u7ed1\u5b9a\u547d\u540d\u7a7a\u95f4\u5230\u5de5\u4f5c\u7a7a\u95f4\u4e5f\u81f3\u5c11\u9700\u8981 Workspace Admin + Cluster Admin \u6743\u9650\u3002

          "},{"location":"admin/host/createhost.html","title":"\u521b\u5efa\u548c\u542f\u52a8\u4e91\u4e3b\u673a","text":"

          \u7528\u6237\u5b8c\u6210\u6ce8\u518c\uff0c\u4e3a\u5176\u5206\u914d\u4e86\u5de5\u4f5c\u7a7a\u95f4\u3001\u547d\u540d\u7a7a\u95f4\u548c\u8d44\u6e90\u540e\uff0c\u5373\u53ef\u4ee5\u521b\u5efa\u5e76\u542f\u52a8\u4e91\u4e3b\u673a\u3002

          "},{"location":"admin/host/createhost.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
          • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
          • \u7528\u6237\u5df2\u6210\u529f\u6ce8\u518c
          • \u4e3a\u7528\u6237\u7ed1\u5b9a\u4e86\u5de5\u4f5c\u7a7a\u95f4
          • \u4e3a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u4e86\u8d44\u6e90
          "},{"location":"admin/host/createhost.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
          2. \u70b9\u51fb \u521b\u5efa\u4e91\u4e3b\u673a -> \u901a\u8fc7\u6a21\u677f\u521b\u5efa

          3. \u5b9a\u4e49\u7684\u4e91\u4e3b\u673a\u5404\u9879\u914d\u7f6e\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65

            \u57fa\u672c\u914d\u7f6e\u6a21\u677f\u914d\u7f6e\u5b58\u50a8\u4e0e\u7f51\u7edc

          4. \u914d\u7f6e root \u5bc6\u7801\u6216 ssh \u5bc6\u94a5\u540e\u70b9\u51fb \u786e\u5b9a

          5. \u8fd4\u56de\u4e3b\u673a\u5217\u8868\uff0c\u7b49\u5f85\u72b6\u6001\u53d8\u4e3a \u8fd0\u884c\u4e2d \u4e4b\u540e\uff0c\u53ef\u4ee5\u901a\u8fc7\u53f3\u4fa7\u7684 \u2507 \u542f\u52a8\u4e3b\u673a\u3002

          \u4e0b\u4e00\u6b65\uff1a\u4f7f\u7528\u4e91\u4e3b\u673a

          "},{"location":"admin/host/usehost.html","title":"\u4f7f\u7528\u4e91\u4e3b\u673a","text":"

          \u521b\u5efa\u5e76\u542f\u52a8\u4e91\u4e3b\u673a\u4e4b\u540e\uff0c\u7528\u6237\u5c31\u53ef\u4ee5\u5f00\u59cb\u4f7f\u7528\u4e91\u4e3b\u673a\u3002

          "},{"location":"admin/host/usehost.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
          • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
          • \u7528\u6237\u5df2\u521b\u5efa\u5e76\u542f\u52a8\u4e91\u4e3b\u673a
          "},{"location":"admin/host/usehost.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u4ee5\u7ba1\u7406\u5458\u8eab\u4efd\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
          2. \u5bfc\u822a\u5230 \u5bb9\u5668\u7ba1\u7406 -> \u5bb9\u5668\u7f51\u7edc -> \u670d\u52a1 \uff0c\u70b9\u51fb\u670d\u52a1\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u670d\u52a1\u8be6\u60c5\u9875\uff0c\u5728\u53f3\u4e0a\u89d2\u70b9\u51fb \u66f4\u65b0

          3. \u66f4\u6539\u7aef\u53e3\u8303\u56f4\u4e3a 30900-30999\uff0c\u4f46\u4e0d\u80fd\u51b2\u7a81\u3002

          4. \u4ee5\u7ec8\u7aef\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5bfc\u822a\u5230\u5bf9\u5e94\u7684\u670d\u52a1\uff0c\u67e5\u770b\u8bbf\u95ee\u7aef\u53e3\u3002

          5. \u5728\u5916\u7f51\u4f7f\u7528 SSH \u5ba2\u6237\u7aef\u767b\u5f55\u4e91\u4e3b\u673a

          6. \u81f3\u6b64\uff0c\u4f60\u53ef\u4ee5\u5728\u4e91\u4e3b\u673a\u4e0a\u6267\u884c\u5404\u9879\u64cd\u4f5c\u3002

          \u4e0b\u4e00\u6b65\uff1a\u4e91\u8d44\u6e90\u5171\u4eab\uff1a\u914d\u989d\u7ba1\u7406

          "},{"location":"admin/insight/alert-center/index.html","title":"\u544a\u8b66\u4e2d\u5fc3","text":"

          \u544a\u8b66\u4e2d\u5fc3\u662f AI \u7b97\u529b\u5e73\u53f0 \u63d0\u4f9b\u7684\u4e00\u4e2a\u91cd\u8981\u529f\u80fd\uff0c\u5b83\u8ba9\u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u754c\u9762\u65b9\u4fbf\u5730\u6309\u7167\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u67e5\u770b\u6240\u6709\u6d3b\u52a8\u548c\u5386\u53f2\u544a\u8b66\uff0c \u5e76\u6839\u636e\u544a\u8b66\u7ea7\u522b\uff08\u7d27\u6025\u3001\u8b66\u544a\u3001\u63d0\u793a\uff09\u6765\u641c\u7d22\u544a\u8b66\u3002

          \u6240\u6709\u544a\u8b66\u90fd\u662f\u57fa\u4e8e\u9884\u8bbe\u7684\u544a\u8b66\u89c4\u5219\u8bbe\u5b9a\u7684\u9608\u503c\u6761\u4ef6\u89e6\u53d1\u7684\u3002\u5728 AI \u7b97\u529b\u5e73\u53f0\u4e2d\uff0c\u5185\u7f6e\u4e86\u4e00\u4e9b\u5168\u5c40\u544a\u8b66\u7b56\u7565\uff0c\u540c\u65f6\u60a8\u4e5f\u53ef\u4ee5\u968f\u65f6\u521b\u5efa\u3001\u5220\u9664\u544a\u8b66\u7b56\u7565\uff0c\u5bf9\u4ee5\u4e0b\u6307\u6807\u8fdb\u884c\u8bbe\u7f6e\uff1a

          • CPU \u4f7f\u7528\u91cf
          • \u5185\u5b58\u4f7f\u7528\u91cf
          • \u78c1\u76d8\u4f7f\u7528\u91cf
          • \u78c1\u76d8\u6bcf\u79d2\u8bfb\u6b21\u6570
          • \u78c1\u76d8\u6bcf\u79d2\u5199\u6b21\u6570
          • \u96c6\u7fa4\u78c1\u76d8\u8bfb\u53d6\u541e\u5410\u91cf
          • \u96c6\u7fa4\u78c1\u76d8\u5199\u5165\u541e\u5410\u91cf
          • \u7f51\u7edc\u53d1\u9001\u901f\u7387
          • \u7f51\u7edc\u63a5\u6536\u901f\u7387

          \u8fd8\u53ef\u4ee5\u4e3a\u544a\u8b66\u89c4\u5219\u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002\u544a\u8b66\u89c4\u5219\u5206\u4e3a\u6d3b\u8dc3\u548c\u8fc7\u671f\u89c4\u5219\uff0c\u652f\u6301\u542f\u7528/\u7981\u7528\u67d0\u4e9b\u89c4\u5219\u6765\u5b9e\u73b0\u544a\u8b66\u9759\u9ed8\u3002

          \u5f53\u8fbe\u5230\u9608\u503c\u6761\u4ef6\u540e\uff0c\u53ef\u4ee5\u914d\u7f6e\u544a\u8b66\u901a\u77e5\u65b9\u5f0f\uff0c\u5305\u62ec\u90ae\u4ef6\u3001\u9489\u9489\u3001\u4f01\u4e1a\u5fae\u4fe1\u3001Webhook \u548c\u77ed\u4fe1\u901a\u77e5\u3002 \u6240\u6709\u901a\u77e5\u7684\u6d88\u606f\u6a21\u677f\u90fd\u53ef\u4ee5\u81ea\u5b9a\u4e49\uff0c\u540c\u65f6\u8fd8\u652f\u6301\u6309\u8bbe\u5b9a\u7684\u95f4\u9694\u65f6\u95f4\u53d1\u9001\u901a\u77e5\u3002

          \u6b64\u5916\uff0c\u544a\u8b66\u4e2d\u5fc3\u8fd8\u652f\u6301\u901a\u8fc7\u963f\u91cc\u4e91\u3001\u817e\u8baf\u4e91\u7b49\u63d0\u4f9b\u7684\u77ed\u4fe1\u670d\u52a1\u5c06\u544a\u8b66\u6d88\u606f\u53d1\u9001\u7ed9\u6307\u5b9a\u7528\u6237\uff0c\u5b9e\u73b0\u591a\u79cd\u65b9\u5f0f\u7684\u544a\u8b66\u901a\u77e5\u3002

          AI \u7b97\u529b\u5e73\u53f0 \u544a\u8b66\u4e2d\u5fc3\u662f\u4e00\u4e2a\u529f\u80fd\u5f3a\u5927\u7684\u544a\u8b66\u7ba1\u7406\u5e73\u53f0\uff0c\u53ef\u5e2e\u52a9\u7528\u6237\u53ca\u65f6\u53d1\u73b0\u548c\u89e3\u51b3\u96c6\u7fa4\u4e2d\u51fa\u73b0\u7684\u95ee\u9898\uff0c \u63d0\u9ad8\u4e1a\u52a1\u7a33\u5b9a\u6027\u548c\u53ef\u7528\u6027\uff0c\u4fbf\u4e8e\u96c6\u7fa4\u5de1\u68c0\u548c\u6545\u969c\u6392\u67e5\u3002

          "},{"location":"admin/insight/alert-center/alert-policy.html","title":"\u544a\u8b66\u7b56\u7565","text":"

          \u544a\u8b66\u7b56\u7565\u662f\u5728\u53ef\u89c2\u6d4b\u6027\u7cfb\u7edf\u4e2d\u5b9a\u4e49\u7684\u4e00\u7ec4\u89c4\u5219\u548c\u6761\u4ef6\uff0c\u7528\u4e8e\u68c0\u6d4b\u548c\u89e6\u53d1\u8b66\u62a5\uff0c\u4ee5\u4fbf\u5728\u7cfb\u7edf\u51fa\u73b0\u5f02\u5e38\u6216\u8fbe\u5230\u9884\u5b9a\u7684\u9608\u503c\u65f6\u53ca\u65f6\u901a\u77e5\u76f8\u5173\u4eba\u5458\u6216\u7cfb\u7edf\u3002

          \u6bcf\u6761\u544a\u8b66\u7b56\u7565\u662f\u4e00\u7ec4\u544a\u8b66\u89c4\u5219\u7684\u96c6\u5408\uff0c\u652f\u6301\u5bf9\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5de5\u4f5c\u8d1f\u8f7d\u7b49\u8d44\u6e90\u3001\u65e5\u5fd7\u3001\u4e8b\u4ef6\u8bbe\u7f6e\u544a\u8b66\u89c4\u5219\u3002\u5f53\u544a\u8b66\u5bf9\u8c61\u8fbe\u5230\u7b56\u7565\u4e0b\u4efb\u4e00\u89c4\u5219\u8bbe\u5b9a\u7684\u9608\u503c\uff0c\u5219\u4f1a\u81ea\u52a8\u89e6\u53d1\u544a\u8b66\u5e76\u53d1\u9001\u901a\u77e5\u3002

          "},{"location":"admin/insight/alert-center/alert-policy.html#_2","title":"\u67e5\u770b\u544a\u8b66\u7b56\u7565","text":"
          1. \u70b9\u51fb\u4e00\u7ea7\u5bfc\u822a\u680f\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027\u3002
          2. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u544a\u8b66\u4e2d\u5fc3 -> \u544a\u8b66\u7b56\u7565\u3002

            • \u96c6\u7fa4\uff1a\u5355\u51fb\u96c6\u7fa4\u4e0b\u62c9\u6846\u53ef\u5207\u6362\u96c6\u7fa4\uff1b
            • \u547d\u540d\u7a7a\u95f4\uff1a\u5355\u51fb\u547d\u540d\u7a7a\u95f4\u5207\u6362\u4e0b\u62c9\u6846\u3002

          3. \u70b9\u51fb\u544a\u8b66\u7b56\u7565\u540d\u79f0\u53ef\u67e5\u770b\u7b56\u7565\u7684\u57fa\u672c\u4fe1\u606f\u3001\u89c4\u5219\u4ee5\u53ca\u901a\u77e5\u914d\u7f6e\u3002

            1. \u5728\u89c4\u5219\u5217\u8868\u4e2d\u53ef\u67e5\u770b\u89c4\u5219\u7c7b\u578b\u3001\u89c4\u5219\u7684\u8868\u8fbe\u5f0f\u3001\u7ea7\u522b\u3001\u72b6\u6001\u7b49\u4fe1\u606f\u3002
            2. \u8fdb\u5165\u7b56\u7565\u8be6\u60c5\uff0c\u53ef\u4ee5\u6dfb\u52a0\u3001\u7f16\u8f91\u3001\u5220\u9664\u5176\u4e0b\u7684\u544a\u8b66\u89c4\u5219\u3002

          "},{"location":"admin/insight/alert-center/alert-policy.html#_3","title":"\u521b\u5efa\u544a\u8b66\u7b56\u7565","text":"
          1. \u586b\u5199\u57fa\u672c\u4fe1\u606f\uff0c\u9009\u62e9\u4e00\u4e2a\u6216\u591a\u4e2a\u96c6\u7fa4\u3001\u8282\u70b9\u6216\u5de5\u4f5c\u8d1f\u8f7d\u4e3a\u544a\u8b66\u5bf9\u8c61\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002

            Note

            • \u9009\u62e9\u5168\u90e8\u96c6\u7fa4\u3001\u8282\u70b9\u6216\u5de5\u4f5c\u8d1f\u8f7d\uff1a\u521b\u5efa\u7684\u544a\u8b66\u89c4\u5219\u5bf9\u6240\u6709\u5df2\u5b89\u88c5 insight-agent \u7684\u96c6\u7fa4\u751f\u6548\u3002
            • \u9009\u62e9\u5355\u4e2a\u6216\u591a\u4e2a\u96c6\u7fa4\u96c6\u7fa4\u3001\u8282\u70b9\u6216\u5de5\u4f5c\u8d1f\u8f7d\uff1a\u521b\u5efa\u7684\u544a\u8b66\u89c4\u5219\u4ec5\u5bf9\u6240\u9009\u7684\u8d44\u6e90\u5bf9\u8c61\u751f\u6548\u3002
            • \u540c\u65f6\uff0c\u7528\u6237\u53ea\u80fd\u5bf9\u5df2\u6743\u9650\u7684\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u544a\u8b66\u89c4\u5219\u3002
          "},{"location":"admin/insight/alert-center/alert-policy.html#_4","title":"\u624b\u52a8\u6dfb\u52a0\u89c4\u5219","text":"
          1. \u5728\u521b\u5efa\u544a\u8b66\u7b56\u7565\u7684\u7b2c\u4e8c\u90e8\u4e2d\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4e0a\u89d2\u7684\u6dfb\u52a0\u89c4\u5219\u3002

          2. \u5728\u5f39\u7a97\u4e2d\u521b\u5efa\u544a\u8b66\u89c4\u5219\uff0c\u586b\u5199\u5404\u9879\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a\u3002

            • \u6a21\u677f\u89c4\u5219\uff1a\u9884\u5b9a\u4e49\u4e86\u57fa\u7840\u6307\u6807\uff0c\u53ef\u4ee5\u6309 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u3001\u7f51\u7edc\u8bbe\u5b9a\u8981\u76d1\u63a7\u7684\u6307\u6807\u3002
            • PromQL \u89c4\u5219\uff1a\u8f93\u5165\u4e00\u4e2a PromQL \u8868\u8fbe\u5f0f\uff0c\u5177\u4f53\u8bf7\u67e5\u8be2 Prometheus \u8868\u8fbe\u5f0f\u3002
            • \u6301\u7eed\u65f6\u957f\uff1a\u544a\u8b66\u88ab\u89e6\u53d1\u4e14\u6301\u7eed\u65f6\u95f4\u8fbe\u5230\u8be5\u8bbe\u5b9a\u503c\u540e\uff0c\u544a\u8b66\u7b56\u7565\u5c06\u53d8\u4e3a\u89e6\u53d1\u4e2d\u72b6\u6001\u3002
            • \u544a\u8b66\u7ea7\u522b\uff1a\u5305\u542b\u7d27\u6025\u3001\u8b66\u544a\u3001\u4fe1\u606f\u4e09\u79cd\u7ea7\u522b\u3002
            • \u9ad8\u7ea7\u8bbe\u7f6e\uff1a\u53ef\u4ee5\u81ea\u5b9a\u4e49\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

            Info

            \u7cfb\u7edf\u5b9a\u4e49\u4e86\u5185\u7f6e\u6807\u7b7e\uff0c\u82e5\u81ea\u5b9a\u4e49\u6807\u7b7e\u4e0e\u5185\u7f6e\u6807\u7b7e\u7684\u952e\u503c\u76f8\u540c\uff0c\u5219\u81ea\u5b9a\u4e49\u6807\u7b7e\u4e0d\u751f\u6548\u3002 \u5185\u7f6e\u6807\u7b7e\u6709\uff1aseverity\u3001rule_id\uff0csource\u3001cluster_name\u3001group_id\u3001 target_type \u548c target\u3002

          "},{"location":"admin/insight/alert-center/alert-policy.html#_5","title":"\u521b\u5efa\u65e5\u5fd7\u89c4\u5219","text":"

          \u5b8c\u6210\u57fa\u672c\u4fe1\u606f\u7684\u586b\u5199\u540e\uff0c\u70b9\u51fb \u6dfb\u52a0\u89c4\u5219\uff0c\u89c4\u5219\u7c7b\u578b\u9009\u62e9 \u65e5\u5fd7\u89c4\u5219\u3002

          Note

          \u4ec5\u5f53\u8d44\u6e90\u5bf9\u8c61\u9009\u62e9\u8282\u70b9\u6216\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u652f\u6301\u521b\u5efa\u65e5\u5fd7\u89c4\u5219\u3002

          \u5b57\u6bb5\u8bf4\u660e\uff1a

          • \u8fc7\u6ee4\u6761\u4ef6\uff1a\u67e5\u8be2\u65e5\u5fd7\u5185\u5bb9\u7684\u5b57\u6bb5\uff0c\u652f\u6301\u4e0e\u3001\u6216\u3001\u6b63\u5219\u5339\u914d\u3001\u6a21\u7cca\u5339\u914d\u56db\u79cd\u8fc7\u6ee4\u6761\u4ef6\u3002
          • \u5224\u65ad\u6761\u4ef6\uff1a\u6839\u636e \u8fc7\u6ee4\u6761\u4ef6\uff0c\u8f93\u5165\u5173\u952e\u5b57\u6216\u5339\u914d\u6761\u4ef6\u3002
          • \u65f6\u95f4\u8303\u56f4\uff1a\u65e5\u5fd7\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u3002
          • \u9608\u503c\u6761\u4ef6\uff1a\u5728\u8f93\u5165\u6846\u4e2d\u8f93\u5165\u544a\u8b66\u9608\u503c\u3002\u5f53\u8fbe\u5230\u8bbe\u7f6e\u7684\u9608\u503c\u65f6\uff0c\u5219\u89e6\u53d1\u544a\u8b66\u3002\u652f\u6301\u7684\u6bd4\u8f83\u8fd0\u7b97\u7b26\u6709\uff1a >\u3001\u2265\u3001=\u3001\u2264\u3001<\u3002
          • \u544a\u8b66\u7ea7\u522b\uff1a\u9009\u62e9\u544a\u8b66\u7ea7\u522b\uff0c\u7528\u4e8e\u8868\u793a\u544a\u8b66\u7684\u4e25\u91cd\u7a0b\u5ea6\u3002
          "},{"location":"admin/insight/alert-center/alert-policy.html#_6","title":"\u521b\u5efa\u4e8b\u4ef6\u89c4\u5219","text":"

          \u5b8c\u6210\u57fa\u672c\u4fe1\u606f\u7684\u586b\u5199\u540e\uff0c\u70b9\u51fb \u6dfb\u52a0\u89c4\u5219\uff0c\u89c4\u5219\u7c7b\u578b\u9009\u62e9 \u4e8b\u4ef6\u89c4\u5219\u3002

          Note

          \u4ec5\u5f53\u8d44\u6e90\u5bf9\u8c61\u9009\u62e9\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u652f\u6301\u521b\u5efa\u4e8b\u4ef6\u89c4\u5219\u3002

          \u5b57\u6bb5\u8bf4\u660e\uff1a

          • \u4e8b\u4ef6\u89c4\u5219\uff1a\u4ec5\u652f\u6301\u8d44\u6e90\u5bf9\u8c61\u9009\u62e9\u5de5\u4f5c\u8d1f\u8f7d
          • \u4e8b\u4ef6\u539f\u56e0\uff1a\u4e0d\u540c\u7684\u5de5\u4f5c\u8d1f\u8f7d\u7c7b\u578b\u7684\u4e8b\u4ef6\u539f\u56e0\u4e0d\u540c\uff0c\u4e8b\u4ef6\u539f\u56e0\u4e4b\u95f4\u662f\u201c\u548c\u201d\u7684\u5173\u7cfb\u3002
          • \u65f6\u95f4\u8303\u56f4\uff1a\u68c0\u6d4b\u8be5\u65f6\u95f4\u8303\u56f4\u5185\u4ea7\u751f\u6570\u636e\uff0c\u82e5\u8fbe\u5230\u8bbe\u7f6e\u7684\u9608\u503c\u6761\u4ef6\uff0c\u5219\u89e6\u53d1\u544a\u8b66\u4e8b\u4ef6\u3002
          • \u9608\u503c\u6761\u4ef6\uff1a\u5f53\u4ea7\u751f\u7684\u4e8b\u4ef6\u8fbe\u5230\u8bbe\u7f6e\u7684\u9608\u503c\u65f6\uff0c\u5219\u89e6\u53d1\u544a\u8b66\u4e8b\u4ef6\u3002
          • \u8d8b\u52bf\u56fe\uff1a\u9ed8\u8ba4\u67e5\u8be2 10 \u5206\u949f\u5185\u7684\u4e8b\u4ef6\u53d8\u5316\u8d8b\u52bf\uff0c\u6bcf\u4e2a\u70b9\u7684\u6570\u503c\u7edf\u8ba1\u7684\u662f\u5f53\u524d\u65f6\u95f4\u70b9\u5230\u4e4b\u524d\u7684\u67d0\u6bb5\u65f6\u95f4\uff08\u65f6\u95f4\u8303\u56f4\uff09\u5185\u53d1\u751f\u7684\u603b\u6b21\u6570\u3002
          "},{"location":"admin/insight/alert-center/alert-policy.html#_7","title":"\u5bfc\u5165\u89c4\u5219\u6a21\u677f","text":"
          1. \u53ef\u70b9\u51fb \u6a21\u677f\u5bfc\u5165\uff0c\u9009\u62e9\u5e73\u53f0\u7ba1\u7406\u5458\u5df2\u521b\u5efa\u597d\u7684\u544a\u8b66\u6a21\u677f\u6279\u91cf\u5bfc\u5165\u544a\u8b66\u89c4\u5219\u3002

          2. \u70b9\u51fb \u4e0b\u4e00\u6b65 \u540e\u914d\u7f6e\u901a\u77e5\u3002

          3. \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u8fd4\u56de\u544a\u8b66\u7b56\u7565\u5217\u8868\u3002

          Tip

          \u65b0\u5efa\u7684\u544a\u8b66\u7b56\u7565\u4e3a \u672a\u89e6\u53d1 \u72b6\u6001\u3002\u4e00\u65e6\u6ee1\u8db3\u89c4\u5219\u4e2d\u7684\u9608\u503c\u6761\u4ef6\u548c\u6301\u7eed\u65f6\u95f4\u540e\uff0c\u5c06\u53d8\u4e3a \u89e6\u53d1\u4e2d \u72b6\u6001\u3002

          Warning

          \u5220\u9664\u540e\u7684\u544a\u8b66\u7b56\u7565\u5c06\u5b8c\u5168\u6d88\u5931\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

          "},{"location":"admin/insight/alert-center/alert-policy.html#yaml","title":"\u901a\u8fc7 YAML \u5bfc\u5165\u544a\u8b66\u7b56\u7565","text":"
          1. \u8fdb\u5165\u544a\u8b66\u7b56\u7565\u5217\u8868\uff0c\u70b9\u51fb YAML \u521b\u5efa\u3002

          2. \u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u7684\u9009\u62e9\u662f\u4e3a\u4e86\u544a\u8b66\u7b56\u7565\u7684\u7ba1\u7406\u6743\u9650\u3002

          3. YAML \u7f16\u8f91\u5668\u4e2d\u8bf7\u586b\u5199 spec \u53ca\u5176\u4e2d\u7684\u5185\u5bb9\uff0c\u4ec5\u652f\u6301\u5bfc\u5165\u4e00\u4e2a group\u3002
          4. \u544a\u8b66\u89c4\u5219\u540d\u79f0 \u9700\u8981\u7b26\u5408\u89c4\u8303\uff1a\u540d\u79f0\u53ea\u80fd\u5305\u542b\u5927\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u3001\u4e0b\u5212\u7ebf\uff08_\uff09\u548c\u8fde\u5b57\u7b26\uff08-\uff09\uff0c\u5fc5\u987b\u4ee5\u5b57\u6bcd\u5f00\u5934\uff0c\u6700\u957f 63 \u4e2a\u5b57\u7b26\u3002
          5. \u5fc5\u586b severity \u4e14\u7b26\u5408\u89c4\u8303\uff1acritical\u3001warning\u3001info\u3002
          6. \u5fc5\u586b\u8868\u8fbe\u5f0f expr\u3002

          7. \u5bfc\u5165 YAML \u6587\u4ef6\u540e\uff0c\u70b9\u51fb \u9884\u89c8\uff0c\u53ef\u4ee5\u5bf9\u5bfc\u5165\u7684 YAML \u683c\u5f0f\u8fdb\u884c\u9a8c\u8bc1\uff0c\u5e76\u5feb\u901f\u786e\u8ba4\u5bfc\u5165\u7684\u544a\u8b66\u89c4\u5219\u3002

          "},{"location":"admin/insight/alert-center/alert-template.html","title":"\u544a\u8b66\u6a21\u677f","text":"

          \u544a\u8b66\u6a21\u677f\u53ef\u652f\u6301\u5e73\u53f0\u7ba1\u7406\u5458\u521b\u5efa\u544a\u8b66\u6a21\u677f\u53ca\u89c4\u5219\uff0c\u4e1a\u52a1\u4fa7\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u544a\u8b66\u6a21\u677f\u521b\u5efa\u544a\u8b66\u7b56\u7565\u3002 \u8fd9\u4e2a\u529f\u80fd\u53ef\u4ee5\u51cf\u5c11\u4e1a\u52a1\u4eba\u5458\u5bf9\u544a\u8b66\u89c4\u5219\u7684\u7ba1\u7406\uff0c\u4e14\u53ef\u4ee5\u6839\u636e\u73af\u5883\u5b9e\u9645\u60c5\u51b5\u81ea\u884c\u4fee\u6539\u544a\u8b66\u9608\u503c\u3002

          "},{"location":"admin/insight/alert-center/alert-template.html#_2","title":"\u521b\u5efa\u544a\u8b66\u6a21\u677f","text":"
          1. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9\u00a0\u544a\u8b66\u4e2d\u5fc3\u00a0->\u00a0\u544a\u8b66\u7b56\u7565\uff0c\u5355\u51fb\u9876\u90e8\u7684 \u544a\u8b66\u6a21\u677f \u3002

          2. \u70b9\u51fb \u521b\u5efa\u544a\u8b66\u6a21\u677f \uff0c\u8bbe\u7f6e\u544a\u8b66\u6a21\u677f\u7684\u540d\u79f0\u3001\u63cf\u8ff0\u7b49\u4fe1\u606f\u3002

            \u53c2\u6570 \u8bf4\u660e \u6a21\u677f\u540d\u79f0 \u540d\u79f0\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u8fde\u5b57\u7b26\uff08-\uff09\uff0c\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u548c\u7ed3\u5c3e\uff0c\u6700\u957f 63 \u4e2a\u5b57\u7b26\u3002 \u63cf\u8ff0 \u63cf\u8ff0\u53ef\u5305\u542b\u4efb\u610f\u5b57\u7b26\uff0c\u6700\u957f 256 \u4e2a\u5b57\u7b26\u3002 \u8d44\u6e90\u7c7b\u578b \u7528\u4e8e\u6307\u5b9a\u544a\u8b66\u6a21\u677f\u7684\u5339\u914d\u7c7b\u578b\u3002 \u544a\u8b66\u89c4\u5219 \u652f\u6301\u9884\u5b9a\u4e49\u591a\u4e2a\u544a\u8b66\u89c4\u5219\uff0c\u53ef\u6dfb\u52a0\u6a21\u677f\u89c4\u5219\u3001PromQL \u89c4\u5219\u3002
          3. \u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u540e\u8fd4\u56de\u544a\u8b66\u6a21\u677f\u5217\u8868\uff0c\u70b9\u51fb\u6a21\u677f\u540d\u79f0\u540e\u53ef\u67e5\u770b\u6a21\u677f\u8be6\u60c5\u3002

          "},{"location":"admin/insight/alert-center/alert-template.html#_3","title":"\u7f16\u8f91\u544a\u8b66\u6a21\u677f","text":"

          \u70b9\u51fb\u76ee\u6807\u89c4\u5219\u540e\u7684 \u2507 \uff0c\u70b9\u51fb \u7f16\u8f91\uff0c\u8fdb\u5165\u6291\u5236\u89c4\u5219\u7684\u7f16\u8f91\u9875\u3002

          "},{"location":"admin/insight/alert-center/alert-template.html#_4","title":"\u5220\u9664\u544a\u8b66\u6a21\u677f","text":"

          \u70b9\u51fb\u76ee\u6807\u6a21\u677f\u540e\u4fa7\u7684 \u2507 \uff0c\u70b9\u51fb \u5220\u9664\uff0c\u5728\u8f93\u5165\u6846\u4e2d\u8f93\u5165\u544a\u8b66\u6a21\u677f\u7684\u540d\u79f0\u5373\u53ef\u5220\u9664\u3002

          "},{"location":"admin/insight/alert-center/inhibition.html","title":"\u544a\u8b66\u6291\u5236","text":"

          \u544a\u8b66\u6291\u5236\u4e3b\u8981\u662f\u5bf9\u4e8e\u67d0\u4e9b\u4e0d\u9700\u8981\u7acb\u5373\u5173\u6ce8\u7684\u544a\u8b66\u8fdb\u884c\u4e34\u65f6\u9690\u85cf\u6216\u8005\u964d\u4f4e\u5176\u4f18\u5148\u7ea7\u7684\u4e00\u79cd\u673a\u5236\u3002\u8fd9\u4e2a\u529f\u80fd\u7684\u76ee\u7684\u662f\u4e3a\u4e86\u51cf\u5c11\u4e0d\u5fc5\u8981\u7684\u544a\u8b66\u4fe1\u606f\u5bf9\u8fd0\u7ef4\u4eba\u5458\u7684\u5e72\u6270\uff0c\u4f7f\u4ed6\u4eec\u80fd\u591f\u96c6\u4e2d\u7cbe\u529b\u5904\u7406\u66f4\u91cd\u8981\u7684\u95ee\u9898\u3002

          \u544a\u8b66\u6291\u5236\u901a\u8fc7\u5b9a\u4e49\u4e00\u7ec4\u89c4\u5219\u6765\u8bc6\u522b\u548c\u5ffd\u7565\u67d0\u4e9b\u544a\u8b66\uff0c\u5f53\u5b83\u4eec\u5728\u7279\u5b9a\u6761\u4ef6\u4e0b\u53d1\u751f\u65f6\u3002\u4e3b\u8981\u6709\u4ee5\u4e0b\u51e0\u79cd\u60c5\u51b5\uff1a

          • \u7236\u5b50\u5173\u7cfb\u6291\u5236\uff1a\u5f53\u4e00\u4e2a\u7236\u544a\u8b66\uff08\u4f8b\u5982\u67d0\u4e2a\u8282\u70b9\u7684\u5d29\u6e83\uff09\u89e6\u53d1\u65f6\uff0c\u53ef\u4ee5\u6291\u5236\u6240\u6709\u7531\u6b64\u5f15\u8d77\u7684\u5b50\u544a\u8b66\uff08\u4f8b\u5982\u8be5\u8282\u70b9\u4e0a\u8fd0\u884c\u7684\u5bb9\u5668\u5d29\u6e83\uff09\u3002
          • \u76f8\u4f3c\u544a\u8b66\u6291\u5236\uff1a\u5f53\u591a\u4e2a\u544a\u8b66\u5177\u6709\u76f8\u540c\u7684\u7279\u5f81\uff08\u4f8b\u5982\u540c\u4e00\u5b9e\u4f8b\u4e0a\u7684\u76f8\u540c\u95ee\u9898\uff09\u65f6\uff0c\u53ef\u4ee5\u6291\u5236\u91cd\u590d\u7684\u544a\u8b66\u901a\u77e5\u3002
          "},{"location":"admin/insight/alert-center/inhibition.html#_2","title":"\u521b\u5efa\u6291\u5236\u89c4\u5219","text":"
          1. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9\u00a0\u544a\u8b66\u4e2d\u5fc3\u00a0->\u00a0\u544a\u8b66\u964d\u566a\uff0c\u5355\u51fb\u9876\u90e8\u7684 \u544a\u8b66\u6291\u5236 \u3002

          2. \u70b9\u51fb \u65b0\u5efa\u6291\u5236\u89c4\u5219 \uff0c\u8bbe\u7f6e\u6291\u5236\u89c4\u5219\u7684\u540d\u79f0\u3001\u89c4\u5219\u7b49\u3002

            Note

            \u901a\u8fc7\u89c4\u5219\u6807\u7b7e\u548c\u544a\u8b66\u6807\u7b7e\u5b9a\u4e49\u4e00\u7ec4\u89c4\u5219\u6765\u8bc6\u522b\u548c\u5ffd\u7565\u67d0\u4e9b\u544a\u8b66\uff0c\u8fbe\u5230\u907f\u514d\u540c\u4e00\u95ee\u9898\u53ef\u80fd\u4f1a\u89e6\u53d1\u591a\u4e2a\u76f8\u4f3c\u6216\u76f8\u5173\u7684\u544a\u8b66\u7684\u95ee\u9898\u3002

            \u53c2\u6570\u65f6\u95f4 \u8bf4\u660e \u6291\u5236\u89c4\u5219\u540d\u79f0 \u6291\u5236\u89c4\u5219\u540d\u79f0\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u8fde\u5b57\u7b26\uff08-\uff09\uff0c\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u548c\u7ed3\u5c3e\uff0c\u6700\u957f 63 \u4e2a\u5b57\u7b26\u3002 \u63cf\u8ff0 \u63cf\u8ff0\u53ef\u5305\u542b\u4efb\u610f\u5b57\u7b26\uff0c\u6700\u957f 256 \u4e2a\u5b57\u7b26\u3002 \u96c6\u7fa4 \u8be5\u6291\u5236\u89c4\u5219\u4f5c\u7528\u7684\u96c6\u7fa4\u3002 \u547d\u540d\u7a7a\u95f4 \u8be5\u6291\u5236\u89c4\u5219\u4f5c\u7528\u7684\u547d\u540d\u7a7a\u95f4\u3002 \u6839\u6e90\u544a\u8b66 \u901a\u8fc7\u586b\u5199\u7684\u6807\u7b7e\u6761\u4ef6\u5339\u914d\u544a\u8b66\uff0c\u4f1a\u5c06\u7b26\u5408\u6240\u6709\u6807\u7b7e\u6761\u4ef6\u7684\u544a\u8b66\u4e0e\u7b26\u5408\u6291\u5236\u6761\u4ef6\u7684\u8fdb\u884c\u5bf9\u6bd4\uff0c\u4e0d\u7b26\u5408\u6291\u5236\u6761\u4ef6\u7684\u544a\u8b66\u5c06\u7167\u5e38\u53d1\u9001\u6d88\u606f\u7ed9\u7528\u6237\u3002 \u53d6\u503c\u8303\u56f4\u8bf4\u660e\uff1a - \u544a\u8b66\u7ea7\u522b\uff1a\u6307\u6807\u6216\u4e8b\u4ef6\u544a\u8b66\u7684\u7ea7\u522b\uff0c\u53ef\u4ee5\u8bbe\u7f6e\u4e3a\uff1a\u7d27\u6025\u3001\u91cd\u8981\u3001\u63d0\u793a\u3002 - \u8d44\u6e90\u7c7b\u578b\uff1a\u544a\u8b66\u5bf9\u8c61\u6240\u5bf9\u5e94\u7684\u8d44\u6e90\u7c7b\u578b\uff0c\u53ef\u4ee5\u8bbe\u7f6e\u4e3a\uff1a\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u65e0\u72b6\u6001\u8d1f\u8f7d\u3001\u6709\u72b6\u5bb9\u8d1f\u8f7d\u3001\u5b88\u62a4\u8fdb\u7a0b\u3001\u5bb9\u5668\u7ec4\u3002 - \u6807\u7b7e\uff1a\u544a\u8b66\u6807\u8bc6\u5c5e\u6027\uff0c\u7531\u6807\u7b7e\u540d\u548c\u6807\u7b7e\u503c\u6784\u6210\uff0c\u652f\u6301\u7528\u6237\u81ea\u5b9a\u4e49\u3002 \u6291\u5236\u544a\u8b66 \u7528\u4e8e\u6307\u5b9a\u76ee\u6807\u8b66\u62a5\uff08\u5c06\u88ab\u6291\u5236\u7684\u8b66\u62a5\uff09\u7684\u5339\u914d\u6761\u4ef6\uff0c\u7b26\u5408\u6240\u6709\u6807\u7b7e\u6761\u4ef6\u7684\u544a\u8b66\u5c06\u4e0d\u4f1a\u518d\u53d1\u9001\u6d88\u606f\u7ed9\u7528\u6237\u3002 \u5339\u914d\u6807\u7b7e \u7528\u4e8e\u6307\u5b9a\u5e94\u8be5\u6bd4\u8f83\u7684\u6807\u7b7e\u5217\u8868\uff0c\u4ee5\u786e\u5b9a\u6e90\u8b66\u62a5\u548c\u76ee\u6807\u8b66\u62a5\u662f\u5426\u5339\u914d\u3002\u53ea\u6709\u5728\u00a0equal\u00a0\u4e2d\u6307\u5b9a\u7684\u6807\u7b7e\u5728\u6e90\u548c\u76ee\u6807\u8b66\u62a5\u4e2d\u7684\u503c\u5b8c\u5168\u76f8\u540c\u7684\u60c5\u51b5\u4e0b\uff0c\u624d\u4f1a\u89e6\u53d1\u6291\u5236\u3002equal\u00a0\u5b57\u6bb5\u662f\u53ef\u9009\u7684\u3002\u5982\u679c\u7701\u7565\u00a0equal\u00a0\u5b57\u6bb5\uff0c\u5219\u4f1a\u5c06\u6240\u6709\u6807\u7b7e\u7528\u4e8e\u5339\u914d
          3. \u70b9\u51fb**\u786e\u5b9a**\u5b8c\u6210\u521b\u5efa\u540e\u8fd4\u56de\u544a\u8b66\u6291\u5236\u5217\u8868\uff0c\u70b9\u51fb\u544a\u8b66\u6291\u5236\u540d\u79f0\u540e\u53ef\u67e5\u770b\u6291\u5236\u89c4\u5219\u8be6\u60c5\u3002

          "},{"location":"admin/insight/alert-center/inhibition.html#_3","title":"\u67e5\u770b\u89c4\u5219\u6807\u7b7e","text":"
          1. \u70b9\u51fb\u53f3\u4fa7\u5bfc\u822a\u680f\u9009\u62e9\u00a0\u544a\u8b66\u4e2d\u5fc3\u00a0->\u00a0\u544a\u8b66\u7b56\u7565 \uff0c\u70b9\u51fb\u89c4\u5219\u6240\u5728\u7684\u7b56\u7565\u8be6\u60c5\u3002
          2. \u70b9\u51fb\u76ee\u6807\u89c4\u5219\u540d\u79f0\uff0c\u67e5\u770b\u89c4\u5219\u8be6\u60c5\uff0c\u67e5\u770b\u5bf9\u5e94\u544a\u8b66\u89c4\u5219\u7684\u6807\u7b7e\u3002

            Note

            \u5728\u6dfb\u52a0\u89c4\u5219\u65f6\u53ef\u6dfb\u52a0\u81ea\u5b9a\u4e49\u6807\u7b7e\u3002

          "},{"location":"admin/insight/alert-center/inhibition.html#_4","title":"\u67e5\u770b\u544a\u8b66\u6807\u7b7e","text":"
          1. \u70b9\u51fb\u53f3\u4fa7\u5bfc\u822a\u680f\u9009\u62e9\u00a0\u544a\u8b66\u4e2d\u5fc3\u00a0->\u00a0\u544a\u8b66\u5217\u8868 \uff0c\u70b9\u51fb\u544a\u8b66\u6240\u5728\u884c\u67e5\u770b\u544a\u8b66\u8be6\u60c5\u3002

            Note

            \u544a\u8b66\u6807\u7b7e\u7528\u4e8e\u63cf\u8ff0\u544a\u8b66\u7684\u8be6\u7ec6\u4fe1\u606f\u548c\u5c5e\u6027\uff0c\u53ef\u4ee5\u7528\u6765\u521b\u5efa\u6291\u5236\u89c4\u5219\u3002

          "},{"location":"admin/insight/alert-center/inhibition.html#_5","title":"\u7f16\u8f91\u6291\u5236\u89c4\u5219","text":"
          1. \u70b9\u51fb\u76ee\u6807\u89c4\u5219\u540e\u4fa7\u7684 \u2507 \uff0c\u70b9\u51fb \u7f16\u8f91\uff0c\u8fdb\u5165\u6291\u5236\u89c4\u5219\u7684\u7f16\u8f91\u9875\u3002

          "},{"location":"admin/insight/alert-center/inhibition.html#_6","title":"\u5220\u9664\u6291\u5236\u89c4\u5219","text":"

          \u70b9\u51fb\u76ee\u6807\u89c4\u5219\u540e\u4fa7\u7684 \u2507 \uff0c\u70b9\u51fb \u5220\u9664\uff0c\u5728\u8f93\u5165\u6846\u4e2d\u8f93\u5165\u6291\u5236\u89c4\u5219\u7684\u540d\u79f0\u5373\u53ef\u5220\u9664\u3002

          "},{"location":"admin/insight/alert-center/message.html","title":"\u901a\u77e5\u914d\u7f6e","text":"

          \u5728 \u901a\u77e5\u914d\u7f6e \u9875\u9762\uff0c\u53ef\u4ee5\u914d\u7f6e\u901a\u8fc7\u90ae\u4ef6\u3001\u4f01\u4e1a\u5fae\u4fe1\u3001\u9489\u9489\u3001Webhook \u548c\u77ed\u4fe1\u7b49\u65b9\u5f0f\u5411\u7528\u6237\u53d1\u9001\u6d88\u606f\u3002

          "},{"location":"admin/insight/alert-center/message.html#_2","title":"\u90ae\u4ef6\u7ec4","text":"
          1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u540e\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e\uff0c\u9ed8\u8ba4\u4f4d\u4e8e\u90ae\u4ef6\u901a\u77e5\u5bf9\u8c61\u3002

          2. \u70b9\u51fb \u6dfb\u52a0\u90ae\u7bb1\u7ec4\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u90ae\u4ef6\u5730\u5740\u3002

          3. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u90ae\u7bb1\u7ec4\u3002

          "},{"location":"admin/insight/alert-center/message.html#_3","title":"\u4f01\u4e1a\u5fae\u4fe1","text":"
          1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u4f01\u4e1a\u5fae\u4fe1\u3002

            \u6709\u5173\u4f01\u4e1a\u5fae\u4fe1\u7fa4\u673a\u5668\u4eba\u7684 URL\uff0c\u8bf7\u53c2\u9605\u4f01\u4e1a\u5fae\u4fe1\u5b98\u65b9\u6587\u6863\uff1a\u5982\u4f55\u4f7f\u7528\u7fa4\u673a\u5668\u4eba\u3002

          2. \u70b9\u51fb \u6dfb\u52a0\u7fa4\u673a\u5668\u4eba\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u7fa4\u673a\u5668\u4eba\u3002

          3. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\uff0c\u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u7fa4\u673a\u5668\u4eba\u3002

          "},{"location":"admin/insight/alert-center/message.html#_4","title":"\u9489\u9489","text":"
          1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u9489\u9489\uff0c\u70b9\u51fb \u6dfb\u52a0\u7fa4\u673a\u5668\u4eba\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u7fa4\u673a\u5668\u4eba\u3002

            \u6709\u5173\u9489\u9489\u7fa4\u673a\u5668\u4eba\u7684 URL\uff0c\u8bf7\u53c2\u9605\u9489\u9489\u5b98\u65b9\u6587\u6863\uff1a\u81ea\u5b9a\u4e49\u673a\u5668\u4eba\u63a5\u5165\u3002

            Note

            \u52a0\u7b7e\u7684\u65b9\u5f0f\u662f\u9489\u9489\u673a\u5668\u4eba\u4e0e\u5f00\u53d1\u8005\u53cc\u5411\u8fdb\u884c\u5b89\u5168\u8ba4\u8bc1\uff0c\u82e5\u5728\u521b\u5efa\u9489\u9489\u673a\u5668\u4eba\u65f6\u5f00\u542f\u4e86\u52a0\u7b7e\uff0c\u5219\u9700\u8981\u5728\u6b64\u5904\u8f93\u5165\u9489\u9489\u751f\u6210\u7684\u5bc6\u94a5\u3002 \u53ef\u53c2\u8003\u9489\u9489\u81ea\u5b9a\u4e49\u673a\u5668\u4eba\u5b89\u5168\u8bbe\u7f6e\u3002

          2. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\uff0c\u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u7fa4\u673a\u5668\u4eba\u3002

          "},{"location":"admin/insight/alert-center/message.html#_5","title":"\u98de\u4e66","text":"
          1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u98de\u4e66\uff0c\u70b9\u51fb \u6dfb\u52a0\u7fa4\u673a\u5668\u4eba\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u7fa4\u673a\u5668\u4eba\u3002

            Note

            \u5f53\u98de\u4e66\u7684\u7fa4\u673a\u5668\u4eba\u5f00\u542f\u7b7e\u540d\u6821\u9a8c\u65f6\uff0c\u6dfb\u52a0\u98de\u4e66\u901a\u77e5\u65f6\u9700\u8981\u586b\u5199\u5bf9\u5e94\u7684\u7b7e\u540d\u5bc6\u94a5\u3002\u8bf7\u67e5\u9605 \u81ea\u5b9a\u4e49\u673a\u5668\u4eba\u4f7f\u7528\u6307\u5357\u3002

          2. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\uff0c\u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u7fa4\u673a\u5668\u4eba\u3002

          "},{"location":"admin/insight/alert-center/message.html#webhook","title":"Webhook","text":"
          1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> Webhook\u3002

            \u6709\u5173 Webhook URL \u53ca\u66f4\u591a\u914d\u7f6e\u65b9\u5f0f\uff0c\u8bf7\u53c2\u9605 webhook \u6587\u6863\u3002

          2. \u70b9\u51fb \u65b0\u5efa Webhook\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a Webhook\u3002

            HTTP Headers\uff1a\u975e\u5fc5\u586b\uff0c\u8bbe\u7f6e\u8bf7\u6c42\u5934\u3002\u53ef\u4ee5\u6dfb\u52a0\u591a\u4e2a Headers\u3002

            Note

            \u6709\u5173 Webhook URL \u53ca\u66f4\u591a\u914d\u7f6e\u65b9\u5f0f\uff0c\u8bf7\u53c2\u9605 webhook \u6587\u6863\u3002

          3. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\uff0c\u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664 Webhook\u3002

          "},{"location":"admin/insight/alert-center/message.html#_6","title":"\u7ad9\u5185\u4fe1","text":"

          Note

          \u544a\u8b66\u6d88\u606f\u53d1\u9001\u81f3\u7528\u6237\u4e2a\u4eba\u7684\u7ad9\u5185\u4fe1\uff0c\u70b9\u51fb\u9876\u90e8\u7684 \ud83d\udd14 \u7b26\u53f7\u53ef\u4ee5\u67e5\u770b\u901a\u77e5\u6d88\u606f\u3002

          1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u7ad9\u5185\u4fe1\uff0c\u70b9\u51fb\u521b\u5efa\u3002

            • \u7ad9\u5185\u4fe1\u901a\u77e5\u5141\u8bb8\u6dfb\u52a0\u591a\u4e2a\u7528\u6237\u3002

          2. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de \u7ad9\u5185\u4fe1\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\u3002

          "},{"location":"admin/insight/alert-center/message.html#_7","title":"\u77ed\u4fe1\u7ec4","text":"
          1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u77ed\u4fe1\uff0c\u70b9\u51fb \u6dfb\u52a0\u77ed\u4fe1\u7ec4\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u77ed\u4fe1\u7ec4\u3002

          2. \u5728\u5f39\u7a97\u4e2d\u8f93\u5165\u540d\u79f0\u3001\u63a5\u6536\u77ed\u4fe1\u7684\u5bf9\u8c61\u3001\u624b\u673a\u53f7\u4ee5\u53ca\u901a\u77e5\u670d\u52a1\u5668\u3002

            \u901a\u77e5\u670d\u52a1\u5668\u9700\u8981\u9884\u5148\u5728 \u901a\u77e5\u914d\u7f6e -> \u901a\u77e5\u670d\u52a1\u5668 \u4e2d\u6dfb\u52a0\u521b\u5efa\u3002\u76ee\u524d\u652f\u6301\u963f\u91cc\u4e91\u3001\u817e\u8baf\u4e91\u4e24\u79cd\u4e91\u670d\u52a1\u5668\uff0c\u5177\u4f53\u914d\u7f6e\u7684\u53c2\u6570\u8bf7\u53c2\u9605\u81ea\u5df1\u7684\u4e91\u670d\u52a1\u5668\u4fe1\u606f\u3002

          3. \u77ed\u4fe1\u7ec4\u6dfb\u52a0\u6210\u529f\u540e\uff0c\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u77ed\u4fe1\u7ec4\u3002

          "},{"location":"admin/insight/alert-center/msg-template.html","title":"\u6d88\u606f\u6a21\u677f","text":"

          \u53ef\u89c2\u6d4b\u6027\u63d0\u4f9b\u81ea\u5b9a\u4e49\u6d88\u606f\u6a21\u677f\u5185\u5bb9\u7684\u80fd\u529b\uff0c\u652f\u6301\u90ae\u4ef6\u3001\u4f01\u4e1a\u5fae\u4fe1\u3001\u9489\u9489\u3001Webhook\u3001\u98de\u4e66\u3001\u7ad9\u5185\u4fe1\u7b49\u4e0d\u540c\u7684\u901a\u77e5\u5bf9\u8c61\u5b9a\u4e49\u4e0d\u540c\u7684\u6d88\u606f\u901a\u77e5\u5185\u5bb9\u3002

          "},{"location":"admin/insight/alert-center/msg-template.html#_2","title":"\u521b\u5efa\u6d88\u606f\u6a21\u677f","text":"
          1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u544a\u8b66\u4e2d\u5fc3 -> \u6d88\u606f\u6a21\u677f\u3002

            Insight \u9ed8\u8ba4\u5185\u7f6e\u4e2d\u82f1\u6587\u4e24\u4e2a\u6a21\u677f\uff0c\u4ee5\u4fbf\u7528\u6237\u4f7f\u7528\u3002

          2. \u70b9\u51fb \u65b0\u5efa\u6d88\u606f\u6a21\u677f \u6309\u94ae\uff0c\u586b\u5199\u6a21\u677f\u5185\u5bb9\u3002

          Info

          \u53ef\u89c2\u6d4b\u6027\u9884\u7f6e\u4e86\u6d88\u606f\u6a21\u677f\u3002\u82e5\u9700\u8981\u5b9a\u4e49\u6a21\u677f\u7684\u5185\u5bb9\uff0c\u8bf7\u53c2\u8003\u914d\u7f6e\u901a\u77e5\u6a21\u677f\u3002

          "},{"location":"admin/insight/alert-center/msg-template.html#_3","title":"\u6d88\u606f\u6a21\u677f\u8be6\u60c5","text":"

          \u70b9\u51fb\u67d0\u4e00\u6d88\u606f\u6a21\u677f\u7684\u540d\u79f0\uff0c\u53f3\u4fa7\u6ed1\u5757\u53ef\u67e5\u770b\u6d88\u606f\u6a21\u677f\u7684\u8be6\u60c5\u3002

          \u53c2\u6570 \u53d8\u91cf \u63cf\u8ff0 \u89c4\u5219\u540d\u79f0 {{ .Labels.alertname }} \u89e6\u53d1\u544a\u8b66\u7684\u89c4\u5219\u540d\u79f0 \u7b56\u7565\u540d\u79f0 {{ .Labels.alertgroup }} \u89e6\u53d1\u544a\u8b66\u89c4\u5219\u6240\u5c5e\u7684\u544a\u8b66\u7b56\u7565\u540d\u79f0 \u544a\u8b66\u7ea7\u522b {{ .Labels.severity }} \u89e6\u53d1\u544a\u8b66\u7684\u7ea7\u522b \u96c6\u7fa4 {{ .Labels.cluster }} \u89e6\u53d1\u544a\u8b66\u7684\u8d44\u6e90\u6240\u5728\u7684\u96c6\u7fa4 \u547d\u540d\u7a7a\u95f4 {{ .Labels.namespace }} \u89e6\u53d1\u544a\u8b66\u7684\u8d44\u6e90\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4 \u8282\u70b9 {{ .Labels.node }} \u89e6\u53d1\u544a\u8b66\u7684\u8d44\u6e90\u6240\u5728\u7684\u8282\u70b9 \u8d44\u6e90\u7c7b\u578b {{ .Labels.target_type }} \u544a\u8b66\u5bf9\u8c61\u7684\u8d44\u6e90\u7c7b\u578b \u8d44\u6e90\u540d\u79f0 {{ .Labels.target }} \u89e6\u53d1\u544a\u8b66\u7684\u5bf9\u8c61\u540d\u79f0 \u89e6\u53d1\u503c {{ .Annotations.value }} \u89e6\u53d1\u544a\u8b66\u901a\u77e5\u65f6\u7684\u6307\u6807\u503c \u53d1\u751f\u65f6\u95f4 {{ .StartsAt }} \u544a\u8b66\u5f00\u59cb\u53d1\u751f\u7684\u65f6\u95f4 \u7ed3\u675f\u65f6\u95f4 {{ .EndsAT }} \u544a\u8b66\u7ed3\u675f\u7684\u65f6\u95f4 \u63cf\u8ff0 {{ .Annotations.description }} \u544a\u8b66\u7684\u8be6\u7ec6\u63cf\u8ff0 \u6807\u7b7e {{ for .labels}} {{end}} \u544a\u8b66\u7684\u6240\u6709\u6807\u7b7e\uff0c\u4f7f\u7528 for \u51fd\u6570\u904d\u5386 labels \u5217\u8868\uff0c\u83b7\u53d6\u544a\u8b66\u7684\u6240\u6709\u6807\u7b7e\u5185\u5bb9\u3002"},{"location":"admin/insight/alert-center/msg-template.html#_4","title":"\u7f16\u8f91\u6216\u5220\u9664\u6d88\u606f\u6a21\u677f","text":"

          \u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507\uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u7f16\u8f91 \u6216 \u5220\u9664\uff0c\u53ef\u4ee5\u4fee\u6539\u6216\u5220\u9664\u6d88\u606f\u6a21\u677f\u3002

          Warning

          \u8bf7\u6ce8\u610f\uff0c\u5220\u9664\u6a21\u677f\u540e\u65e0\u6cd5\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

          "},{"location":"admin/insight/alert-center/silent.html","title":"\u544a\u8b66\u9759\u9ed8","text":"

          \u544a\u8b66\u9759\u9ed8\u662f\u6307\u5728\u7279\u5b9a\u7684\u65f6\u95f4\u8303\u56f4\u5185\uff0c\u6839\u636e\u5b9a\u4e49\u597d\u7684\u89c4\u5219\u5bf9\u7b26\u5408\u6761\u4ef6\u7684\u544a\u8b66\u4e0d\u518d\u53d1\u9001\u544a\u8b66\u901a\u77e5\u3002\u8be5\u529f\u80fd\u53ef\u4ee5\u5e2e\u52a9\u8fd0\u7ef4\u4eba\u5458\u907f\u514d\u5728\u67d0\u4e9b\u64cd\u4f5c\u6216\u4e8b\u4ef6\u671f\u95f4\u63a5\u6536\u5230\u8fc7\u591a\u7684\u566a\u58f0\u544a\u8b66\uff0c\u540c\u65f6\u4fbf\u4e8e\u66f4\u52a0\u7cbe\u786e\u5730\u5904\u7406\u771f\u6b63\u9700\u8981\u89e3\u51b3\u7684\u95ee\u9898\u3002

          \u5728\u544a\u8b66\u9759\u9ed8\u9875\u9762\u4e0a\uff0c\u7528\u6237\u53ef\u4ee5\u770b\u5230\u4e24\u4e2a\u9875\u7b7e\uff1a\u6d3b\u8dc3\u89c4\u5219\u548c\u8fc7\u671f\u89c4\u5219\u3002 \u5176\u4e2d\uff0c\u6d3b\u8dc3\u89c4\u5219\u8868\u793a\u76ee\u524d\u6b63\u5728\u751f\u6548\u7684\u89c4\u5219\uff0c\u800c\u8fc7\u671f\u89c4\u5219\u5219\u662f\u4ee5\u524d\u5b9a\u4e49\u8fc7\u4f46\u5df2\u7ecf\u8fc7\u671f\uff08\u6216\u8005\u7528\u6237\u4e3b\u52a8\u5220\u9664\uff09\u7684\u89c4\u5219\u3002

          "},{"location":"admin/insight/alert-center/silent.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u544a\u8b66\u4e2d\u5fc3 -> \u544a\u8b66\u9759\u9ed8 ,\u70b9\u51fb \u65b0\u5efa\u9759\u9ed8\u89c4\u5219 \u6309\u94ae\u3002

          2. \u586b\u5199\u9759\u9ed8\u89c4\u5219\u7684\u5404\u9879\u53c2\u6570\uff0c\u5982\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u6807\u7b7e\u3001\u65f6\u95f4\u7b49\uff0c\u4ee5\u5b9a\u4e49\u8fd9\u6761\u89c4\u5219\u7684\u4f5c\u7528\u8303\u56f4\u548c\u751f\u6548\u65f6\u95f4\u3002

          3. \u8fd4\u56de\u89c4\u5219\u5217\u8868\uff0c\u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u9759\u9ed8\u89c4\u5219\u3002

          \u901a\u8fc7\u544a\u8b66\u9759\u9ed8\u529f\u80fd\uff0c\u60a8\u53ef\u4ee5\u7075\u6d3b\u5730\u63a7\u5236\u54ea\u4e9b\u544a\u8b66\u9700\u8981\u88ab\u5ffd\u7565\uff0c\u5728\u4ec0\u4e48\u65f6\u95f4\u6bb5\u5185\u751f\u6548\uff0c\u4ece\u800c\u63d0\u9ad8\u8fd0\u7ef4\u6548\u7387\uff0c\u51cf\u5c11\u8bef\u62a5\u7684\u53ef\u80fd\u6027\u3002

          "},{"location":"admin/insight/alert-center/sms-provider.html","title":"\u914d\u7f6e\u901a\u77e5\u670d\u52a1\u5668","text":"

          \u53ef\u89c2\u6d4b\u6027 Insight \u652f\u6301\u77ed\u4fe1\u901a\u77e5\uff0c\u76ee\u524d\u901a\u8fc7\u96c6\u6210\u963f\u91cc\u4e91\u3001\u817e\u8baf\u4e91\u7684\u77ed\u4fe1\u670d\u52a1\u53d1\u9001\u544a\u8b66\u6d88\u606f\u3002\u672c\u6587\u4ecb\u7ecd\u4e86\u5982\u4f55\u5728 insight \u4e2d\u914d\u7f6e\u77ed\u4fe1\u901a\u77e5\u7684\u670d\u52a1\u5668\u3002\u77ed\u4fe1\u7b7e\u540d\u4e2d\u652f\u6301\u7684\u53d8\u91cf\u4e3a\u6d88\u606f\u6a21\u677f\u4e2d\u7684\u9ed8\u8ba4\u53d8\u91cf\uff0c\u540c\u65f6\u7531\u4e8e\u77ed\u4fe1\u5b57\u6570\u6709\u9650\uff0c\u5efa\u8bae\u9009\u62e9\u8f83\u4e3a\u660e\u786e\u7684\u53d8\u91cf\u3002

          \u5982\u4f55\u914d\u7f6e\u77ed\u4fe1\u63a5\u6536\u4eba\u53ef\u53c2\u8003\u6587\u6863\uff1a\u914d\u7f6e\u77ed\u4fe1\u901a\u77e5\u7ec4\u3002

          "},{"location":"admin/insight/alert-center/sms-provider.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u8fdb\u5165 \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u901a\u77e5\u670d\u52a1\u5668 \u3002

          2. \u70b9\u51fb \u6dfb\u52a0\u901a\u77e5\u670d\u52a1\u5668 \u3002

            1. \u914d\u7f6e\u963f\u91cc\u4e91\u670d\u52a1\u5668\u3002

              \u7533\u8bf7\u963f\u91cc\u4e91\u77ed\u4fe1\u670d\u52a1\uff0c\u8bf7\u53c2\u8003\u963f\u91cc\u4e91\u77ed\u4fe1\u670d\u52a1\u3002

              \u5b57\u6bb5\u8bf4\u660e\uff1a

              • AccessKey ID \uff1a\u963f\u91cc\u4e91\u7528\u4e8e\u6807\u8bc6\u7528\u6237\u7684\u53c2\u6570\u3002
              • AccessKey Secret \uff1a\u963f\u91cc\u4e91\u7528\u4e8e\u9a8c\u8bc1\u7528\u6237\u7684\u5bc6\u94a5\u3002AccessKey Secret \u5fc5\u987b\u4fdd\u5bc6\u3002
              • \u77ed\u4fe1\u7b7e\u540d \uff1a\u77ed\u4fe1\u670d\u52a1\u652f\u6301\u6839\u636e\u7528\u6237\u9700\u6c42\u521b\u5efa\u7b26\u5408\u8981\u6c42\u7684\u7b7e\u540d\u3002\u53d1\u9001\u77ed\u4fe1\u65f6\uff0c\u77ed\u4fe1\u5e73\u53f0\u4f1a\u5c06\u5df2\u5ba1\u6838\u901a\u8fc7\u7684\u77ed\u4fe1\u7b7e\u540d\u6dfb\u52a0\u5230\u77ed\u4fe1\u5185\u5bb9\u4e2d\uff0c\u518d\u53d1\u9001\u7ed9\u77ed\u4fe1\u63a5\u6536\u65b9\u3002
              • \u6a21\u677f CODE \uff1a\u77ed\u4fe1\u6a21\u677f\u662f\u53d1\u9001\u77ed\u4fe1\u7684\u5177\u4f53\u5185\u5bb9\u3002
              • \u53c2\u6570\u6a21\u677f \uff1a\u77ed\u4fe1\u6b63\u6587\u6a21\u677f\u53ef\u4ee5\u5305\u542b\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u901a\u8fc7\u53d8\u91cf\u5b9e\u73b0\u81ea\u5b9a\u4e49\u77ed\u4fe1\u5185\u5bb9\u3002

              \u8bf7\u53c2\u8003\u963f\u91cc\u4e91\u53d8\u91cf\u89c4\u8303\u3002

              Note

              \u4e3e\u4f8b\uff1a\u5728\u963f\u91cc\u4e91\u5b9a\u4e49\u7684\u6a21\u677f\u5185\u5bb9\u4e3a\uff1a\\({severity}\uff1a\\) \u88ab\u89e6\u53d1\u3002\u53c2\u6570\u6a21\u677f\u4e2d\u7684\u914d\u7f6e\u53c2\u8003\u4e0a\u56fe\u3002} \u5728 ${startat

            2. \u914d\u7f6e\u817e\u8baf\u4e91\u670d\u52a1\u5668\u3002

              \u7533\u8bf7\u817e\u8baf\u4e91\u77ed\u4fe1\u670d\u52a1\uff0c\u8bf7\u53c2\u8003\u817e\u8baf\u4e91\u77ed\u4fe1\u3002

              \u5b57\u6bb5\u8bf4\u660e\uff1a

              • Secret ID \uff1a\u817e\u8baf\u4e91\u7528\u4e8e\u6807\u8bc6 API \u8c03\u7528\u8005\u8eab\u4efd\u53c2\u6570\u3002
              • SecretKey \uff1a\u817e\u8baf\u4e91\u7528\u4e8e\u9a8c\u8bc1 API \u8c03\u7528\u8005\u7684\u8eab\u4efd\u7684\u53c2\u6570\u3002
              • \u77ed\u4fe1\u6a21\u677f ID \uff1a\u77ed\u4fe1\u6a21\u677f ID\uff0c\u7531\u817e\u8baf\u4e91\u7cfb\u7edf\u81ea\u52a8\u751f\u6210\u3002
              • \u7b7e\u540d\u5185\u5bb9 \uff1a\u77ed\u4fe1\u7b7e\u540d\u5185\u5bb9\uff0c\u5373\u5728\u817e\u8baf\u4e91\u77ed\u4fe1\u7b7e\u540d\u4e2d\u5b9a\u4e49\u7684\u5b9e\u9645\u7f51\u7ad9\u540d\u7684\u5168\u79f0\u6216\u7b80\u79f0\u3002
              • SdkAppId \uff1a\u77ed\u4fe1 SdkAppId\uff0c\u5728\u817e\u8baf\u4e91\u77ed\u4fe1\u63a7\u5236\u53f0\u6dfb\u52a0\u5e94\u7528\u540e\u751f\u6210\u7684\u5b9e\u9645 SdkAppId\u3002
              • \u53c2\u6570\u6a21\u677f \uff1a\u77ed\u4fe1\u6b63\u6587\u6a21\u677f\u53ef\u4ee5\u5305\u542b\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u901a\u8fc7\u53d8\u91cf\u5b9e\u73b0\u81ea\u5b9a\u4e49\u77ed\u4fe1\u5185\u5bb9\u3002\u8bf7\u53c2\u8003\uff1a\u817e\u8baf\u4e91\u53d8\u91cf\u89c4\u8303\u3002

              Note

              \u4e3e\u4f8b\uff1a\u5728\u817e\u8baf\u4e91\u5b9a\u4e49\u7684\u6a21\u677f\u5185\u5bb9\u4e3a\uff1a{1}\uff1a{2} \u5728 {3} \u88ab\u89e6\u53d1\u3002\u53c2\u6570\u6a21\u677f\u4e2d\u7684\u914d\u7f6e\u53c2\u8003\u4e0a\u56fe\u3002

          "},{"location":"admin/insight/best-practice/debug-log.html","title":"\u65e5\u5fd7\u91c7\u96c6\u6392\u969c\u6307\u5357","text":"

          \u5728\u96c6\u7fa4\u4e2d\u5b89\u88c5 insight-agent \u540e\uff0c insight-agent \u4e2d\u7684 Fluent Bit \u4f1a\u9ed8\u8ba4\u91c7\u96c6\u96c6\u7fa4\u4e2d\u7684\u65e5\u5fd7\uff0c\u5305\u62ec Kubernetes \u4e8b\u4ef6\u65e5\u5fd7\u3001\u8282\u70b9\u65e5\u5fd7\u3001\u5bb9\u5668\u65e5\u5fd7\u7b49\u3002 Fluent Bit \u5df2\u914d\u7f6e\u597d\u5404\u79cd\u65e5\u5fd7\u91c7\u96c6\u63d2\u4ef6\u3001\u76f8\u5173\u7684\u8fc7\u6ee4\u5668\u63d2\u4ef6\u53ca\u65e5\u5fd7\u8f93\u51fa\u63d2\u4ef6\u3002 \u8fd9\u4e9b\u63d2\u4ef6\u7684\u5de5\u4f5c\u72b6\u6001\u51b3\u5b9a\u4e86\u65e5\u5fd7\u91c7\u96c6\u662f\u5426\u6b63\u5e38\u3002 \u4e0b\u9762\u662f\u4e00\u4e2a\u9488\u5bf9 Fluent Bit \u7684\u4eea\u8868\u76d8\uff0c\u5b83\u7528\u6765\u76d1\u63a7\u5404\u4e2a\u96c6\u7fa4\u4e2d Fluent Bit \u7684\u5de5\u4f5c\u60c5\u51b5\u548c\u63d2\u4ef6\u7684\u91c7\u96c6\u3001\u5904\u7406\u3001\u5bfc\u51fa\u65e5\u5fd7\u7684\u60c5\u51b5\u3002

          1. \u4f7f\u7528 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\uff0c\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \uff0c\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u4eea\u8868\u76d8 \u3002

          2. \u70b9\u51fb\u4eea\u8868\u76d8\u6807\u9898 \u6982\u89c8 \u3002

          3. \u5207\u6362\u5230 insight-system -> Fluent Bit \u4eea\u8868\u76d8\u3002

          4. Fluent Bit \u4eea\u8868\u76d8\u4e0a\u65b9\u6709\u51e0\u4e2a\u9009\u9879\u6846\uff0c\u53ef\u4ee5\u9009\u62e9\u65e5\u5fd7\u91c7\u96c6\u63d2\u4ef6\u3001\u65e5\u5fd7\u8fc7\u6ee4\u63d2\u4ef6\u3001\u65e5\u5fd7\u8f93\u51fa\u63d2\u4ef6\u53ca\u6240\u5728\u96c6\u7fa4\u540d\u3002

          "},{"location":"admin/insight/best-practice/debug-log.html#_2","title":"\u63d2\u4ef6\u8bf4\u660e","text":"

          \u6b64\u5904\u8bf4\u660e Fluent Bit \u7684\u51e0\u4e2a\u63d2\u4ef6\u3002

          \u65e5\u5fd7\u91c7\u96c6\u63d2\u4ef6

          input plugin \u63d2\u4ef6\u4ecb\u7ecd \u91c7\u96c6\u76ee\u5f55 tail.kube \u91c7\u96c6\u5bb9\u5668\u65e5\u5fd7 /var/log/containers/*.log tail.kubeevent \u91c7\u96c6 Kubernetes \u4e8b\u4ef6\u65e5\u5fd7 /var/log/containers/-kubernetes-event-exporter.log tail.syslog.dmesg \u91c7\u96c6\u4e3b\u673a dmesg \u65e5\u5fd7 /var/log/dmesg tail.syslog.messages \u91c7\u96c6\u4e3b\u673a\u5e38\u7528\u65e5\u5fd7 /var/log/secure, /var/log/messages, /var/log/syslog,/var/log/auth.log syslog.syslog.RSyslog \u91c7\u96c6 RSyslog \u65e5\u5fd7 systemd.syslog.systemd \u91c7\u96c6 Journald daemon \u65e5\u5fd7 tail.audit_log.k8s \u91c7\u96c6 Kubernetes \u5ba1\u8ba1\u65e5\u5fd7 /var/log//audit/.log tail.audit_log.ghippo \u91c7\u96c6\u5168\u5c40\u7ba1\u7406\u5ba1\u8ba1\u65e5\u5fd7 /var/log/containers/_ghippo-system_audit-log.log tail.skoala-gw \u91c7\u96c6\u5fae\u670d\u52a1\u7f51\u5173\u65e5\u5fd7 /var/log/containers/_skoala-gw.log

          \u65e5\u5fd7\u8fc7\u6ee4\u63d2\u4ef6

          filter plugin \u63d2\u4ef6\u4ecb\u7ecd Lua.audit_log.k8s \u4f7f\u7528 lua \u8fc7\u6ee4\u7b26\u5408\u6761\u4ef6\u7684 Kubernetes \u5ba1\u8ba1\u65e5\u5fd7

          Note

          \u8fc7\u6ee4\u5668\u63d2\u4ef6\u4e0d\u6b62 Lua.audit_log.k8s\uff0c\u8fd9\u91cc\u53ea\u4ecb\u7ecd\u4f1a\u4e22\u5f03\u65e5\u5fd7\u7684\u8fc7\u6ee4\u5668\u3002

          \u65e5\u5fd7\u8f93\u51fa\u63d2\u4ef6

          output plugin \u63d2\u4ef6\u4ecb\u7ecd es.kube.kubeevent.syslog \u628a Kubernetes \u5ba1\u8ba1\u65e5\u5fd7\u3001\u4e8b\u4ef6\u65e5\u5fd7\uff0csyslog \u65e5\u5fd7\u5199\u5165 ElasticSearch \u96c6\u7fa4 forward.audit_log \u628a Kubernetes \u5ba1\u8ba1\u65e5\u5fd7\u548c\u5168\u5c40\u7ba1\u7406\u7684\u5ba1\u8ba1\u65e5\u5fd7\u53d1\u9001\u5230 \u5168\u5c40\u7ba1\u7406"},{"location":"admin/insight/best-practice/debug-trace.html","title":"\u94fe\u8def\u91c7\u96c6\u6392\u969c\u6307\u5357","text":"

          \u5728\u5c1d\u8bd5\u6392\u67e5\u94fe\u8def\u6570\u636e\u91c7\u96c6\u7684\u95ee\u9898\u524d\uff0c\u9700\u5148\u7406\u89e3\u94fe\u8def\u6570\u636e\u7684\u4f20\u8f93\u8def\u5f84\uff0c\u4e0b\u9762\u662f\u94fe\u8def\u6570\u636e\u4f20\u8f93\u793a\u610f\u56fe\uff1a

          graph TB\n\nsdk[Language proble / SDK] --> workload[Workload cluster otel collector]\n--> otel[Global cluster otel collector]\n--> jaeger[Global cluster jaeger collector]\n--> es[Elasticsearch cluster]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill:#326ce5,stroke:#fff,stroke-width:1px,color:#fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass sdk,workload,otel,jaeger,es cluster

          \u5982\u4e0a\u56fe\u6240\u793a\uff0c\u5728\u4efb\u4e00\u6b65\u9aa4\u4f20\u8f93\u5931\u8d25\u90fd\u4f1a\u5bfc\u81f4\u65e0\u6cd5\u67e5\u8be2\u51fa\u94fe\u8def\u6570\u636e\u3002\u5982\u679c\u60a8\u5728\u5b8c\u6210\u5e94\u7528\u94fe\u8def\u589e\u5f3a\u540e\u53d1\u73b0\u6ca1\u6709\u94fe\u8def\u6570\u636e\uff0c\u8bf7\u6267\u884c\u4ee5\u4e0b\u6b65\u9aa4\uff1a

          1. \u4f7f\u7528 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\uff0c\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \uff0c\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u4eea\u8868\u76d8 \u3002

          2. \u70b9\u51fb\u4eea\u8868\u76d8\u6807\u9898 \u6982\u89c8 \u3002

          3. \u5207\u6362\u5230 insight-system -> insight tracing debug \u4eea\u8868\u76d8\u3002

          4. \u53ef\u4ee5\u770b\u5230\u8be5\u4eea\u8868\u76d8\u7531\u4e09\u4e2a\u533a\u5757\u7ec4\u6210\uff0c\u5206\u522b\u8d1f\u8d23\u76d1\u63a7\u4e0d\u540c\u96c6\u7fa4\u3001\u4e0d\u540c\u7ec4\u4ef6\u4f20\u8f93\u94fe\u8def\u7684\u6570\u636e\u60c5\u51b5\u3002\u901a\u8fc7\u751f\u6210\u7684\u65f6\u5e8f\u56fe\u8868\uff0c\u68c0\u67e5\u94fe\u8def\u6570\u636e\u4f20\u8f93\u662f\u5426\u5b58\u5728\u95ee\u9898\u3002

            • workload opentelemetry collector
            • global opentelemetry collector
            • global jaeger collector

          "},{"location":"admin/insight/best-practice/debug-trace.html#_2","title":"\u533a\u5757\u4ecb\u7ecd","text":"
          1. workload opentelemetry collector

            \u5c55\u793a\u4e0d\u540c\u5de5\u4f5c\u96c6\u7fa4\u7684 opentelemetry collector \u5728\u63a5\u53d7 language probe/SDK \u94fe\u8def\u6570\u636e\uff0c\u53d1\u9001\u805a\u5408\u94fe\u8def\u6570\u636e\u60c5\u51b5\u3002\u53ef\u4ee5\u901a\u8fc7\u5de6\u4e0a\u89d2\u7684 Cluster \u9009\u62e9\u6846\u9009\u62e9\u6240\u5728\u7684\u96c6\u7fa4\u3002

            Note

            \u6839\u636e\u8fd9\u56db\u5f20\u65f6\u5e8f\u56fe\uff0c\u53ef\u4ee5\u5224\u65ad\u51fa\u8be5\u96c6\u7fa4\u7684 opentelemetry collector \u662f\u5426\u6b63\u5e38\u8fd0\u884c\u3002

          2. global opentelemetry collector

            \u5c55\u793a \u5168\u5c40\u670d\u52a1\u96c6\u7fa4 \u7684 opentelemetry collector \u5728\u63a5\u6536 \u5de5\u4f5c\u96c6\u7fa4 \u4e2d otel collector \u94fe\u8def\u6570\u636e\u4ee5\u53ca\u53d1\u9001\u805a\u5408\u94fe\u8def\u6570\u636e\u7684\u60c5\u51b5\u3002

            Note

            \u5168\u5c40\u670d\u52a1\u96c6\u7fa4 \u7684 opentelemetry collector \u8fd8\u8d1f\u8d23\u53d1\u9001\u6240\u6709\u5de5\u4f5c\u96c6\u7fa4\u7684\u5168\u5c40\u7ba1\u7406\u6a21\u5757\u7684\u5ba1\u8ba1\u65e5\u5fd7\u4ee5\u53ca Kubernetes \u5ba1\u8ba1\u65e5\u5fd7\uff08\u9ed8\u8ba4\u4e0d\u91c7\u96c6\uff09\u5230\u5168\u5c40\u7ba1\u7406\u6a21\u5757\u7684 audit server \u7ec4\u4ef6\u3002

          3. global jaeger collector

            \u5c55\u793a \u5168\u5c40\u670d\u52a1\u96c6\u7fa4 \u7684 jaeger collector \u5728\u63a5\u6536 \u5168\u5c40\u670d\u52a1\u96c6\u7fa4 \u4e2d otel collector \u7684\u6570\u636e\uff0c\u5e76\u53d1\u9001\u94fe\u8def\u6570\u636e\u5230 ElasticSearch \u96c6\u7fa4\u7684\u60c5\u51b5\u3002

          "},{"location":"admin/insight/best-practice/find_root_cause.html","title":"\u4f7f\u7528 Insight \u5b9a\u4f4d\u5e94\u7528\u5f02\u5e38","text":"

          \u672c\u6587\u5c06\u4ee5 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u4e3e\u4f8b\uff0c\u8bb2\u89e3\u5982\u4f55\u901a\u8fc7 Insight \u53d1\u73b0 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u5f02\u5e38\u7684\u7ec4\u4ef6\u5e76\u5206\u6790\u51fa\u7ec4\u4ef6\u5f02\u5e38\u7684\u6839\u56e0\u3002

          \u672c\u6587\u5047\u8bbe\u4f60\u5df2\u7ecf\u4e86\u89e3 Insight \u7684\u4ea7\u54c1\u529f\u80fd\u6216\u613f\u666f\u3002

          "},{"location":"admin/insight/best-practice/find_root_cause.html#_1","title":"\u62d3\u6251\u56fe \u2014 \u4ece\u5b8f\u89c2\u5bdf\u89c9\u5f02\u5e38","text":"

          \u968f\u7740\u4f01\u4e1a\u5bf9\u5fae\u670d\u52a1\u67b6\u6784\u7684\u5b9e\u8df5\uff0c\u4f01\u4e1a\u4e2d\u7684\u670d\u52a1\u6570\u91cf\u53ef\u80fd\u4f1a\u9762\u4e34\u7740\u6570\u91cf\u591a\u3001\u8c03\u7528\u590d\u6742\u7684\u60c5\u51b5\uff0c\u5f00\u53d1\u6216\u8fd0\u7ef4\u4eba\u5458\u5f88\u96be\u7406\u6e05\u670d\u52a1\u4e4b\u95f4\u7684\u5173\u7cfb\uff0c \u56e0\u6b64\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86\u62d3\u6251\u56fe\u76d1\u63a7\u7684\u529f\u80fd\uff0c\u6211\u4eec\u53ef\u4ee5\u901a\u8fc7\u62d3\u6251\u56fe\u5bf9\u5f53\u524d\u7cfb\u7edf\u4e2d\u8fd0\u884c\u7684\u5fae\u670d\u52a1\u72b6\u51b5\u8fdb\u884c\u521d\u6b65\u8bca\u65ad\u3002

          \u5982\u4e0b\u56fe\u6240\u793a\uff0c\u6211\u4eec\u901a\u8fc7\u62d3\u6251\u56fe\u53d1\u73b0\u5176\u4e2d Insight-Server \u8fd9\u4e2a\u8282\u70b9\u7684\u989c\u8272\u4e3a \u7ea2\u8272 \uff0c\u5e76\u5c06\u9f20\u6807\u79fb\u5230\u8be5\u8282\u70b9\u4e0a\uff0c \u53d1\u73b0\u8be5\u8282\u70b9\u7684\u9519\u8bef\u7387\u4e3a 2.11% \u3002\u56e0\u6b64\uff0c\u6211\u4eec\u5e0c\u671b\u67e5\u770b\u66f4\u591a\u7ec6\u8282\u53bb\u627e\u5230\u9020\u6210\u8be5\u670d\u52a1\u9519\u8bef\u7387\u4e0d\u4e3a 0 \u7684\u539f\u56e0:

          \u5f53\u7136\uff0c\u6211\u4eec\u4e5f\u53ef\u4ee5\u70b9\u51fb\u6700\u9876\u90e8\u7684\u670d\u52a1\u540d\uff0c\u8fdb\u5165\u5230\u8be5\u670d\u52a1\u7684\u603b\u89c8\u754c\u9762\uff1a

          "},{"location":"admin/insight/best-practice/find_root_cause.html#_2","title":"\u670d\u52a1\u603b\u89c8 \u2014 \u5177\u4f53\u5206\u6790\u7684\u5f00\u59cb","text":"

          \u5f53\u4f60\u9700\u8981\u6839\u636e\u670d\u52a1\u7684\u5165\u53e3\u548c\u51fa\u53e3\u6d41\u91cf\u5206\u522b\u5206\u6790\u7684\u65f6\u5019\uff0c\u4f60\u53ef\u4ee5\u5728\u53f3\u4e0a\u89d2\u8fdb\u884c\u7b5b\u9009\u5207\u6362\uff0c\u7b5b\u9009\u6570\u636e\u4e4b\u540e\uff0c\u6211\u4eec\u53d1\u73b0\u8be5\u670d\u52a1\u6709\u5f88\u591a \u64cd\u4f5c \u5bf9\u5e94\u7684\u9519\u8bef\u7387\u90fd\u4e0d\u4e3a 0. \u6b64\u65f6\uff0c\u6211\u4eec\u53ef\u4ee5\u901a\u8fc7\u70b9\u51fb \u67e5\u770b\u94fe\u8def \u5bf9\u8be5 \u64cd\u4f5c \u5728\u8fd9\u6bb5\u65f6\u95f4\u4ea7\u751f\u7684\u5e76\u8bb0\u5f55\u4e0b\u6765\u7684\u94fe\u8def\u8fdb\u884c\u5206\u6790\uff1a

          "},{"location":"admin/insight/best-practice/find_root_cause.html#_3","title":"\u94fe\u8def\u8be6\u60c5 \u2014 \u627e\u5230\u9519\u8bef\u6839\u56e0\uff0c\u6d88\u706d\u5b83\u4eec","text":"

          \u5728\u94fe\u8def\u5217\u8868\u4e2d\uff0c\u6211\u4eec\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u76f4\u89c2\u5730\u53d1\u73b0\u94fe\u8def\u5217\u8868\u4e2d\u5b58\u5728\u7740 \u9519\u8bef \u7684\u94fe\u8def\uff08\u4e0a\u56fe\u4e2d\u7ea2\u6846\u5708\u8d77\u6765\u7684\uff09\uff0c\u6211\u4eec\u53ef\u4ee5\u70b9\u51fb\u9519\u8bef\u7684\u94fe\u8def\u67e5\u770b\u94fe\u8def\u8be6\u60c5\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff1a

          \u5728\u94fe\u8def\u56fe\u4e2d\u6211\u4eec\u4e5f\u53ef\u4ee5\u4e00\u773c\u5c31\u53d1\u73b0\u94fe\u8def\u7684\u6700\u540e\u4e00\u6761\u6570\u636e\u662f\u5904\u4e8e \u9519\u8bef \u72b6\u6001\uff0c\u5c06\u5176\u53f3\u8fb9 Logs \u5c55\u5f00\uff0c\u6211\u4eec\u5b9a\u4f4d\u5230\u4e86\u9020\u6210\u8fd9\u6b21\u8bf7\u6c42\u9519\u8bef\u7684\u539f\u56e0\uff1a

          \u6839\u636e\u4e0a\u9762\u7684\u5206\u6790\u65b9\u6cd5\uff0c\u6211\u4eec\u4e5f\u53ef\u4ee5\u5b9a\u4f4d\u5230\u5176\u4ed6 \u64cd\u4f5c \u9519\u8bef\u7684\u94fe\u8def\uff1a

          "},{"location":"admin/insight/best-practice/find_root_cause.html#_4","title":"\u63a5\u4e0b\u6765 \u2014 \u4f60\u6765\u5206\u6790\uff01","text":""},{"location":"admin/insight/best-practice/grafana-use-db.html","title":"Insight Grafana \u6301\u4e45\u5316\u5230\u6570\u636e\u5e93","text":"

          Insight \u4f7f\u7528\u4e91\u539f\u751f\u7684 GrafanaOperator + CRD \u7684\u65b9\u5f0f\u6765\u4f7f\u7528 Grafana\u3002\u6211\u4eec\u63a8\u8350\u4f7f\u7528 GrafanaDashboard(CRD) \u6765\u63cf\u8ff0\u4eea\u8868\u76d8\u7684 JSON \u6570\u636e\uff0c\u5373\u901a\u8fc7 GrafanaDashboard \u6765\u589e\u52a0\u3001\u5220\u9664\u3001\u4fee\u6539\u4eea\u8868\u76d8\u3002

          \u56e0\u4e3a Grafana \u9ed8\u8ba4\u4f7f\u7528 SQLite3 \u4f5c\u4e3a\u672c\u5730\u6570\u636e\u5e93\u6765\u5b58\u50a8\u914d\u7f6e\u4fe1\u606f\uff0c\u4f8b\u5982\u7528\u6237\u3001\u4eea\u8868\u76d8\u3001\u544a\u8b66\u7b49\u3002 \u5f53\u7528\u6237\u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd\uff0c\u901a\u8fc7 UI \u521b\u5efa\u6216\u8005\u5bfc\u5165\u4eea\u8868\u76d8\u4e4b\u540e\uff0c\u6570\u636e\u5c06\u4e34\u65f6\u5b58\u50a8\u5728 SQLite3 \u4e2d\u3002 \u5f53 Grafana \u91cd\u542f\u4e4b\u540e\uff0c\u5c06\u91cd\u7f6e\u6240\u6709\u7684\u4eea\u8868\u76d8\u7684\u6570\u636e\uff0c\u5c06\u53ea\u5c55\u793a\u901a\u8fc7 GrafanaDashboard CR \u63cf\u8ff0\u7684\u4eea\u8868\u76d8\u6570\u636e\uff0c\u800c\u901a\u8fc7 UI \u521b\u5efa\uff0c\u5220\u9664\uff0c\u4fee\u6539\u4e5f\u90fd\u5c06\u88ab\u5168\u90e8\u91cd\u7f6e\u3002

          Grafana \u652f\u6301\u4f7f\u7528\u5916\u90e8\u7684 MySQL\u3001PostgreSQL \u7b49\u6570\u636e\u5e93\u66ff\u4ee3\u5185\u7f6e\u7684 SQLite3 \u4f5c\u4e3a\u5185\u90e8\u5b58\u50a8\u3002\u672c\u6587\u63cf\u8ff0\u4e86\u5982\u679c\u7ed9 Insight \u63d0\u4f9b\u7684 Grafana \u914d\u7f6e\u5916\u7f6e\u7684\u6570\u636e\u5e93\u3002

          "},{"location":"admin/insight/best-practice/grafana-use-db.html#_1","title":"\u4f7f\u7528\u5916\u90e8\u6570\u636e\u5e93","text":"

          \u7ed3\u5408 Grafana\uff08\u5f53\u524d\u955c\u50cf\u7248\u672c 9.3.14\uff09\u7684\u5b98\u65b9\u6587\u6863\u3002\u6839\u636e\u5982\u4e0b\u6b65\u9aa4\u914d\u7f6e\u4f7f\u7528\u5916\u90e8\u7684\u6570\u636e\u5e93\uff0c\u793a\u4f8b\u4ee5 MySQL \u4e3a\u4f8b\uff1a

          1. \u5728\u5916\u90e8\u6570\u636e\u5e93\uff08MySQL /PostgreSQL\uff09\u4e2d\u521b\u5efa\u4e00\u4e2a\u6570\u636e\u5e93\uff08DB\uff09\u3002
          2. \u914d\u7f6e Grafana \u4f7f\u7528\u8fd9\u4e2a\u6570\u636e\u5e93\uff08MySQL \u7684 MGR \u6a21\u5f0f\u9700\u8981\u989d\u5916\u5904\u7406\uff09\u3002
          "},{"location":"admin/insight/best-practice/grafana-use-db.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u521d\u59cb\u5316\u6570\u636e\u5e93

            \u5728\u6570\u636e\u5e93\u4e2d\u521b\u5efa\u4e00\u4e2a\u65b0\u7684 database \u7ed9 Grafana \u4f7f\u7528\uff0c\u5efa\u8bae\u540d\u79f0\u4e3a grafana

          2. \u914d\u7f6e Grafana \u4f7f\u7528 DB

            \u5728 insight-system \u4e0b\uff0c\u540d\u4e3a insight-grafana-operator-grafana \u7684 Grafana \u7684 CR \u91cc\u7684\u914d\u7f6e\uff1a

            apiVersion: integreatly.org/v1alpha1\nkind: Grafana\nmetadata:\n  name: insight-grafana-operator-grafana\n  namespace: insight-system\nspec:\n  baseImage: 10.64.40.50/docker.m.daocloud.io/grafana/grafana:9.3.14\n  config:\n    // \u5728 config \u7684\u5c3e\u90e8\u8ffd\u52a0\n+   database:\n+     type: mysql # \u652f\u6301 mysql, postgres\n+     host: \"10.6.216.101:30782\" # \u6570\u636e\u5e93\u7684 Endpoint\n+     name: \"grafana\"  # \u63d0\u524d\u521b\u5efa\u7684 database\n+     user: \"grafana\"\n+     password: \"grafana_password\"\n
          3. \u5982\u4e0b\u662f\u914d\u7f6e\u5b8c\u6210\u540e\u5728 Grafana \u7684\u914d\u7f6e\u6587\u4ef6 grafana-config \u91cc\u7684\u914d\u7f6e\u4fe1\u606f\u3002

            [database]\n  host = 10.6.216.101:30782\n  name = grafana\n  password = grafana_password\n  type = mysql\n  user = grafana\n
            1. \u5728 insight.yaml \u6dfb\u52a0\u5982\u4e0b\u914d\u7f6e\uff1a

              grafana-operator:\n  grafana:\n    config:\n      database:\n        type: mysql\n        host: \"10.6.216.101:30782\"\n        name: \"grafana\"\n        user: \"grafana\"\n        password: \"grafana_password\"\n
            2. \u5347\u7ea7 insight server\uff0c\u5efa\u8bae\u901a\u8fc7 Helm \u5347\u7ea7\u3002

              helm upgrade insight insight/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --version ${version}\n
          4. \u901a\u8fc7\u547d\u4ee4\u884c\u8fdb\u884c\u5347\u7ea7\u3002

            1. \u83b7\u53d6 insight Helm \u4e2d\u539f\u6765\u7684\u914d\u7f6e\u3002

              helm get values insight -n insight-system -o yaml > insight.yaml\n
            2. \u6307\u5b9a\u539f\u6765\u914d\u7f6e\u6587\u4ef6\u5e76\u4fdd\u5b58 grafana \u6570\u636e\u5e93\u7684\u8fde\u63a5\u4fe1\u606f\u3002

              helm upgrade --install \\\n    --version ${version} \\\n    insight insight/insight -n insight-system \\\n    -f ./insight.yaml \\\n    --set grafana-operator.grafana.config.database.type=mysql \\\n    --set grafana-operator.grafana.config.database.host=10.6.216.101:30782 \\\n    --set grafana-operator.grafana.config.database.name=grafana \\\n    --set grafana-operator.grafana.config.database.user=grafana \\\n    --set grafana-operator.grafana.config.database.password=grafana_password \n
          "},{"location":"admin/insight/best-practice/grafana-use-db.html#_3","title":"\u6ce8\u610f\u4e8b\u9879","text":"
          1. \u7528\u6237\u662f\u5426\u4f1a\u8986\u76d6\u5185\u7f6e\u4eea\u8868\u76d8\uff0c\u5bfc\u81f4\u5347\u7ea7\u5931\u8d25\uff1f

            \u56de\u590d\uff1a\u4f1a\u3002\u5f53\u7528\u6237\u7f16\u8f91\u4e86 Dashbaord A\uff08v1.1\uff09\uff0c\u4e14 Insight \u4e5f\u5347\u7ea7\u4e86 Dashboard A\uff08v2.0\uff09\uff0c \u5347\u7ea7\u4e4b\u540e\uff08\u5347\u7ea7\u955c\u50cf\uff09\uff1b\u7528\u6237\u770b\u5230\u5185\u5bb9\u8fd8\u662f v1.1\uff0c\u800c v2.0 \u662f\u4e0d\u4f1a\u66f4\u65b0\u5230\u73af\u5883\u91cc\u3002

          2. \u5f53\u4f7f\u7528 MGR \u6a21\u5f0f MySQL \u65f6\u4f1a\u5b58\u5728\u95ee\u9898\uff0c\u5bfc\u81f4 grafana-deployment \u65e0\u6cd5\u6b63\u5e38\u542f\u52a8\u3002

            \u539f\u56e0\uff1a\u8868 alert_rule_tag_v1 \u548c annotation_tag_v2 \u4e2d\u6ca1\u6709\u4e3b\u952e\uff0c\u800c mysql mgr \u5fc5\u987b\u6709\u4e3b\u952e

            \u89e3\u51b3\u65b9\u6cd5\uff1a\u5411 alert_rule_tag_v1 \u548c annotation_tag_v2 \u4e34\u65f6\u8868\u6dfb\u52a0\u4e3b\u952e\uff1a

            alter table alert_rule_tag_v1\n    add constraint alert_rule_tag_v1_pk\n        primary key (tag_id, alert_id);\n\nalter table annotation_tag_v2\n    add constraint annotation_tag_v2_pk\n        primary key (tag_id, annotation_id);\n
          "},{"location":"admin/insight/best-practice/insight-kafka.html","title":"Kafka + Elasticsearch \u6d41\u5f0f\u67b6\u6784\u5e94\u5bf9\u8d85\u5927\u89c4\u6a21\u65e5\u5fd7\u65b9\u6848","text":"

          \u968f\u7740\u4e1a\u52a1\u53d1\u5c55\uff0c\u8d8a\u6765\u8d8a\u591a\u7684\u5e94\u7528\u4ea7\u751f\u7684\u65e5\u5fd7\u6570\u636e\u4f1a\u8d8a\u6765\u8d8a\u591a\uff0c\u4e3a\u4e86\u4fdd\u8bc1\u7cfb\u7edf\u80fd\u591f\u6b63\u5e38\u91c7\u96c6\u5e76\u5206\u6790\u5e9e\u6742\u7684\u65e5\u5fd7\u6570\u636e\u65f6\uff0c \u4e00\u822c\u505a\u6cd5\u662f\u5f15\u5165 Kafka \u7684\u6d41\u5f0f\u67b6\u6784\u6765\u89e3\u51b3\u5927\u91cf\u6570\u636e\u5f02\u6b65\u91c7\u96c6\u7684\u65b9\u6848\u3002\u91c7\u96c6\u5230\u7684\u65e5\u5fd7\u6570\u636e\u4f1a\u7ecf\u8fc7 Kafka \u6d41\u8f6c\uff0c \u7531\u76f8\u5e94\u7684\u6570\u636e\u6d88\u8d39\u7ec4\u4ef6\u5c06\u6570\u636e\u4ece Kafka \u6d88\u8d39\u5b58\u5165\u5230 Elasticsearch \u4e2d\uff0c\u5e76\u901a\u8fc7 Insight \u8fdb\u884c\u53ef\u89c6\u5316\u5c55\u793a\u4e0e\u5206\u6790\u3002

          \u672c\u6587\u5c06\u4ecb\u7ecd\u4ee5\u4e0b\u4e24\u79cd\u65b9\u6848\uff1a

          • Fluentbit + Kafka + Logstash + Elasticsearch
          • Fluentbit + Kafka + Vector + Elasticsearch

          \u5f53\u6211\u4eec\u5728\u65e5\u5fd7\u7cfb\u7edf\u4e2d\u5f15\u5165 Kafka \u4e4b\u540e\uff0c\u6570\u636e\u6d41\u56fe\u5982\u4e0b\u56fe\u6240\u793a\uff1a

          \u4e0a\u9762\u4e24\u79cd\u65b9\u6848\u4e2d\u6709\u5171\u901a\u7684\u5730\u65b9\uff0c\u4e0d\u540c\u4e4b\u5904\u5728\u4e8e\u6d88\u8d39 Kafka \u6570\u636e\u7684\u7ec4\u4ef6\uff0c\u540c\u65f6\uff0c\u4e3a\u4e86\u4e0d\u5f71\u54cd Insight \u6570\u636e\u5206\u6790\uff0c \u6211\u4eec\u9700\u8981\u5728\u6d88\u8d39 Kafka \u6570\u636e\u5e76\u5199\u5165\u5230 ES \u7684\u6570\u636e\u548c\u539f\u6765 Fluentbit \u76f4\u63a5\u5199\u5165 ES \u7684\u6570\u636e\u7684\u683c\u5f0f\u4e00\u81f4\u3002

          \u9996\u5148\u6211\u4eec\u6765\u770b\u770b Fluentbit \u600e\u4e48\u5c06\u65e5\u5fd7\u5199\u5165 Kafka\uff1a

          "},{"location":"admin/insight/best-practice/insight-kafka.html#fluentbit-output","title":"\u4fee\u6539 Fluentbit Output \u914d\u7f6e","text":"

          \u5f53 Kafka \u96c6\u7fa4\u51c6\u5907\u5c31\u7eea\u4e4b\u540e\uff0c\u6211\u4eec\u9700\u8981\u4fee\u6539 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b ConfigMap \u7684\u5185\u5bb9\uff0c \u65b0\u589e\u4ee5\u4e0b\u4e09\u4e2a Kafka Output \u5e76\u6ce8\u91ca\u539f\u6765\u4e09\u4e2a Elasticsearch Output\uff1a

          \u5047\u8bbe Kafka Brokers \u5730\u5740\u4e3a\uff1a insight-kafka.insight-system.svc.cluster.local:9092

              [OUTPUT]\n        Name        kafka\n        Match_Regex (?:kube|syslog)\\.(.*)\n        Brokers     insight-kafka.insight-system.svc.cluster.local:9092\n        Topics      insight-logs\n        format      json\n        timestamp_key @timestamp\n        rdkafka.batch.size 65536\n        rdkafka.compression.level 6\n        rdkafka.compression.type lz4\n        rdkafka.linger.ms 0\n        rdkafka.log.connection.close false\n        rdkafka.message.max.bytes 2.097152e+06\n        rdkafka.request.required.acks 1\n    [OUTPUT]\n        Name        kafka\n        Match_Regex (?:skoala-gw)\\.(.*)\n        Brokers     insight-kafka.insight-system.svc.cluster.local:9092\n        Topics      insight-gw-skoala\n        format      json\n        timestamp_key @timestamp\n        rdkafka.batch.size 65536\n        rdkafka.compression.level 6\n        rdkafka.compression.type lz4\n        rdkafka.linger.ms 0\n        rdkafka.log.connection.close false\n        rdkafka.message.max.bytes 2.097152e+06\n        rdkafka.request.required.acks 1\n    [OUTPUT]\n        Name        kafka\n        Match_Regex (?:kubeevent)\\.(.*)\n        Brokers     insight-kafka.insight-system.svc.cluster.local:9092\n        Topics      insight-event\n        format      json\n        timestamp_key @timestamp\n        rdkafka.batch.size 65536\n        rdkafka.compression.level 6\n        rdkafka.compression.type lz4\n        rdkafka.linger.ms 0\n        rdkafka.log.connection.close false\n        rdkafka.message.max.bytes 2.097152e+06\n        rdkafka.request.required.acks 1\n

          \u63a5\u4e0b\u6765\u5c31\u662f\u6d88\u8d39 Kafka \u6570\u636e\u4e4b\u540e\u5199\u5230 ES \u7684\u7ec6\u5fae\u5dee\u522b\u3002 \u6b63\u5982\u672c\u6587\u5f00\u59cb\u7684\u63cf\u8ff0\uff0c\u672c\u6587\u5c06\u4ecb\u7ecd Logstash \u4e0e Vector \u4f5c\u4e3a\u6d88\u8d39 Kafka \u7684\u4e24\u79cd\u65b9\u5f0f\u3002

          "},{"location":"admin/insight/best-practice/insight-kafka.html#kafka-elasticsearch_1","title":"\u6d88\u8d39 Kafka \u5e76\u5199\u5165 Elasticsearch","text":"

          \u5047\u8bbe Elasticsearch \u7684\u5730\u5740\u4e3a\uff1ahttps://mcamel-common-es-cluster-es-http.mcamel-system:9200

          "},{"location":"admin/insight/best-practice/insight-kafka.html#logstash","title":"\u901a\u8fc7 Logstash \u6d88\u8d39","text":"

          \u5982\u679c\u4f60\u5bf9 Logstash \u6280\u672f\u6808\u6bd4\u8f83\u719f\u6089\uff0c\u4f60\u53ef\u4ee5\u7ee7\u7eed\u4f7f\u7528\u8be5\u65b9\u5f0f\u3002

          \u5f53\u4f60\u901a\u8fc7 Helm \u90e8\u7f72 Logstash \u7684\u65f6\u5019\uff0c \u5728 logstashPipeline \u4e2d\u589e\u52a0\u5982\u4e0b Pipeline \u5373\u53ef\uff1a

          replicas: 3\nresources:\n  requests:\n    cpu: 100m\n    memory: 1536Mi\n  limits:\n    cpu: 1000m\n    memory: 1536Mi\nlogstashConfig:\n  logstash.yml: |\n    http.host: 0.0.0.0\n    xpack.monitoring.enabled: false\nlogstashPipeline:\n  insight-event.conf: |\n    input {\n      kafka {\n        add_field => {\"kafka_topic\" => \"insight-event\"}\n        topics => [\"insight-event\"]         \n        bootstrap_servers => \"172.30.120.189:32082\" # kafka\u7684ip \u548c\u7aef\u53e3\n        enable_auto_commit => true\n        consumer_threads => 1                       # \u5bf9\u5e94 partition \u7684\u6570\u91cf\n        decorate_events => true\n        codec => \"plain\"\n      }\n    }\n\n    filter {\n      mutate { gsub => [ \"message\", \"@timestamp\", \"_@timestamp\"] }\n      json {source => \"message\"}\n      date {\n        match => [ \"_@timestamp\", \"UNIX\" ]\n        remove_field => \"_@timestamp\"\n        remove_tag => \"_timestampparsefailure\"\n      }\n      mutate {\n        remove_field => [\"event\", \"message\"]\n      }\n    }\n\n    output {\n      if [kafka_topic] == \"insight-event\" {\n        elasticsearch {\n          hosts => [\"https://172.30.120.201:32427\"] # elasticsearch \u5730\u5740\n          user => 'elastic'                         # elasticsearch \u7528\u6237\u540d\n          ssl => 'true'\n          password => '0OWj4D54GTH3xK06f9Gg01Zk'    # elasticsearch \u5bc6\u7801\n          ssl_certificate_verification => 'false'\n          action => \"create\"\n          index => \"insight-es-k8s-event-logs-alias\"\n          data_stream => \"false\"\n        }\n      }\n    }\n  insight-gw-skoala.conf: |\n    input {\n      kafka {\n        add_field => {\"kafka_topic\" => \"insight-gw-skoala\"}\n        topics => [\"insight-gw-skoala\"]         \n        bootstrap_servers => \"172.30.120.189:32082\"\n        enable_auto_commit => true\n        consumer_threads => 1\n        decorate_events => true\n        codec => \"plain\"\n      }\n    }\n\n    filter {\n      mutate { gsub => [ \"message\", \"@timestamp\", \"_@timestamp\"] }\n      json {source => \"message\"}\n      date {\n        match => [ \"_@timestamp\", \"UNIX\" ]\n        remove_field => \"_@timestamp\"\n        remove_tag => \"_timestampparsefailure\"\n      }\n      mutate {\n        remove_field => [\"event\", \"message\"]\n      }\n    }\n\n    output {\n      if [kafka_topic] == \"insight-gw-skoala\" {\n        elasticsearch {\n          hosts => [\"https://172.30.120.201:32427\"]\n          user => 'elastic'\n          ssl => 'true'\n          password => '0OWj4D54GTH3xK06f9Gg01Zk'\n          ssl_certificate_verification => 'false'\n          action => \"create\"\n          index => \"skoala-gw-alias\"\n          data_stream => \"false\"\n        }\n      }\n    }\n  insight-logs.conf: |\n    input {\n      kafka {\n        add_field => {\"kafka_topic\" => \"insight-logs\"}\n        topics => [\"insight-logs\"]         \n        bootstrap_servers => \"172.30.120.189:32082\"   \n        enable_auto_commit => true\n        consumer_threads => 1\n        decorate_events => true\n        codec => \"plain\"\n      }\n    }\n\n    filter {\n      mutate { gsub => [ \"message\", \"@timestamp\", \"_@timestamp\"] }\n      json {source => \"message\"}\n      date {\n        match => [ \"_@timestamp\", \"UNIX\" ]\n        remove_field => \"_@timestamp\"\n        remove_tag => \"_timestampparsefailure\"\n      }\n      mutate {\n        remove_field => [\"event\", \"message\"]\n      }\n    }\n\n    output {\n      if [kafka_topic] == \"insight-logs\" {\n        elasticsearch {\n          hosts => [\"https://172.30.120.201:32427\"]\n          user => 'elastic'\n          ssl => 'true'\n          password => '0OWj4D54GTH3xK06f9Gg01Zk'\n          ssl_certificate_verification => 'false'\n          action => \"create\"\n          index => \"insight-es-k8s-logs-alias\"\n          data_stream => \"false\"\n        }\n      }\n    }\n
          "},{"location":"admin/insight/best-practice/insight-kafka.html#vector","title":"\u901a\u8fc7 Vector \u6d88\u8d39","text":"

          \u5982\u679c\u4f60\u5bf9 Vector \u6280\u672f\u6808\u6bd4\u8f83\u719f\u6089\uff0c\u4f60\u53ef\u4ee5\u7ee7\u7eed\u4f7f\u7528\u8be5\u65b9\u5f0f\u3002

          \u5f53\u4f60\u901a\u8fc7 Helm \u90e8\u7f72 Vector \u7684\u65f6\u5019\uff0c\u5f15\u7528\u5982\u4e0b\u89c4\u5219\u7684 Configmap \u914d\u7f6e\u6587\u4ef6\u5373\u53ef\uff1a

          metadata:\n  name: vector\napiVersion: v1\ndata:\n  aggregator.yaml: |\n    api:\n      enabled: true\n      address: '0.0.0.0:8686'\n    sources:\n      insight_logs_kafka:\n        type: kafka\n        bootstrap_servers: 'insight-kafka.insight-system.svc.cluster.local:9092'\n        group_id: consumer-group-insight\n        topics:\n          - insight-logs\n      insight_event_kafka:\n        type: kafka\n        bootstrap_servers: 'insight-kafka.insight-system.svc.cluster.local:9092'\n        group_id: consumer-group-insight\n        topics:\n          - insight-event\n      insight_gw_skoala_kafka:\n        type: kafka\n        bootstrap_servers: 'insight-kafka.insight-system.svc.cluster.local:9092'\n        group_id: consumer-group-insight\n        topics:\n          - insight-gw-skoala\n    transforms:\n      insight_logs_remap:\n        type: remap\n        inputs:\n          - insight_logs_kafka\n        source: |2\n              . = parse_json!(string!(.message))\n              .@timestamp = now()\n      insight_event_kafka_remap:\n        type: remap\n        inputs:\n          - insight_event_kafka\n          - insight_gw_skoala_kafka\n        source: |2\n              . = parse_json!(string!(.message))\n              .@timestamp = now()\n      insight_gw_skoala_kafka_remap:\n        type: remap\n        inputs:\n          - insight_gw_skoala_kafka\n        source: |2\n              . = parse_json!(string!(.message))\n              .@timestamp = now()\n    sinks:\n      insight_es_logs:\n        type: elasticsearch\n        inputs:\n          - insight_logs_remap\n        api_version: auto\n        auth:\n          strategy: basic\n          user: elastic\n          password: 8QZJ656ax3TXZqQh205l3Ee0\n        bulk:\n          index: insight-es-k8s-logs-alias-1418\n        endpoints:\n          - 'https://mcamel-common-es-cluster-es-http.mcamel-system:9200'\n        tls:\n          verify_certificate: false\n          verify_hostname: false\n      insight_es_event:\n        type: elasticsearch\n        inputs:\n          - insight_event_kafka_remap\n        api_version: auto\n        auth:\n          strategy: basic\n          user: elastic\n          password: 8QZJ656ax3TXZqQh205l3Ee0\n        bulk:\n          index: insight-es-k8s-event-logs-alias-1418\n        endpoints:\n          - 'https://mcamel-common-es-cluster-es-http.mcamel-system:9200'\n        tls:\n          verify_certificate: false\n          verify_hostname: false\n      insight_es_gw_skoala:\n        type: elasticsearch\n        inputs:\n          - insight_gw_skoala_kafka_remap\n        api_version: auto\n        auth:\n          strategy: basic\n          user: elastic\n          password: 8QZJ656ax3TXZqQh205l3Ee0\n        bulk:\n          index: skoala-gw-alias-1418\n        endpoints:\n          - 'https://mcamel-common-es-cluster-es-http.mcamel-system:9200'\n        tls:\n          verify_certificate: false\n          verify_hostname: false\n
          "},{"location":"admin/insight/best-practice/insight-kafka.html#_1","title":"\u68c0\u67e5\u662f\u5426\u6b63\u5e38\u5de5\u4f5c","text":"

          \u4f60\u53ef\u4ee5\u901a\u8fc7\u67e5\u770b Insight \u65e5\u5fd7\u67e5\u8be2\u754c\u9762\u662f\u5426\u6709\u6700\u65b0\u7684\u6570\u636e\uff0c\u6216\u8005\u67e5\u770b\u539f\u672c Elasticsearch \u7684\u7d22\u5f15\u7684\u6570\u91cf\u6709\u6ca1\u6709\u589e\u957f\uff0c\u589e\u957f\u5373\u4ee3\u8868\u914d\u7f6e\u6210\u529f\u3002

          "},{"location":"admin/insight/best-practice/insight-kafka.html#_2","title":"\u53c2\u8003","text":"
          • Logstash Helm Chart
          • Vector Helm Chart
          • Vector \u5b9e\u8df5
          • Vector Perfomance
          "},{"location":"admin/insight/best-practice/integration_deepflow.html","title":"\u96c6\u6210 DeepFlow","text":"

          DeepFlow \u662f\u4e00\u6b3e\u57fa\u4e8e eBPF \u7684\u53ef\u89c2\u6d4b\u6027\u4ea7\u54c1\u3002\u5b83\u7684\u793e\u533a\u7248\u5df2\u7ecf\u88ab\u96c6\u6210\u8fdb Insight \u4e2d\uff0c\u4ee5\u4e0b\u662f\u96c6\u6210\u65b9\u5f0f\u3002

          "},{"location":"admin/insight/best-practice/integration_deepflow.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
          • \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5df2\u7ecf\u5b89\u88c5 Insight
          • Insight \u6700\u4f4e\u7248\u672c\u8981\u6c42\u4e3a v0.23.0
          • \u4e86\u89e3\u5e76\u6ee1\u8db3 DeepFlow \u8fd0\u884c\u6743\u9650\u53ca\u5185\u6838\u8981\u6c42
          • \u5b58\u50a8\u5377\u5c31\u7eea
          "},{"location":"admin/insight/best-practice/integration_deepflow.html#deepflow-insight","title":"\u5b89\u88c5 DeepFlow \u548c\u914d\u7f6e Insight","text":"

          \u5b89\u88c5 DeepFlow \u7ec4\u4ef6\u9700\u8981\u7528\u5230\u4e24\u4e2a Chart\uff1a

          • deepflow\uff1a\u5305\u542b deepflow-app\u3001deepflow-server\u3001deepflow-clickhouse\u3001deepflow-agent \u7b49\u7ec4\u4ef6\u3002 \u4e00\u822c deepflow \u4f1a\u90e8\u7f72\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\uff0c\u6240\u4ee5\u5b83\u4e5f\u4e00\u5e76\u5b89\u88c5\u4e86 deepflow-agent
          • deepflow-agent\uff1a\u53ea\u5305\u542b\u4e86 deepflow-agent \u7ec4\u4ef6\uff0c\u7528\u4e8e\u91c7\u96c6 eBPF \u6570\u636e\u5e76\u53d1\u9001\u7ed9 deepflow-server
          "},{"location":"admin/insight/best-practice/integration_deepflow.html#deepflow_1","title":"\u5b89\u88c5 DeepFlow","text":"

          DeepFlow \u9700\u8981\u5b89\u88c5\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u3002

          1. \u8fdb\u5165 kpanda-global-cluster \u96c6\u7fa4\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u5185\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u4ed3\u5e93\u9009\u62e9 community \uff0c\u641c\u7d22\u6846\u627e\u5230 deepflow:

          2. \u70b9\u51fb deepflow \u5361\u7247\u8fdb\u5165\u8be6\u60c5\u9875\uff1a

          3. \u70b9\u51fb \u5b89\u88c5 \uff0c\u8fdb\u5165\u5b89\u88c5\u754c\u9762\uff1a

          4. \u5927\u90e8\u5206 values \u90fd\u6709\u9ed8\u8ba4\u503c\u3002\u5176\u4e2d Clickhouse \u548c Mysql \u90fd\u9700\u8981\u7533\u8bf7\u5b58\u50a8\u5377\uff0c\u5b83\u4eec\u7684\u9ed8\u8ba4\u5927\u5c0f\u90fd\u662f 10Gi \uff0c \u53ef\u4ee5\u901a\u8fc7 persistence \u5173\u952e\u5b57\u641c\u7d22\u5230\u76f8\u5173\u914d\u7f6e\u5e76\u4fee\u6539\u3002

          5. \u914d\u7f6e\u597d\u540e\u5c31\u53ef\u4ee5\u70b9\u51fb \u786e\u5b9a \uff0c\u6267\u884c\u5b89\u88c5\u4e86\u3002

          "},{"location":"admin/insight/best-practice/integration_deepflow.html#insight","title":"\u4fee\u6539 Insight \u914d\u7f6e","text":"

          \u5728\u5b89\u88c5 DeepFlow \u540e\uff0c\u8fd8\u9700\u8981\u5728 Insight \u4e2d\u5f00\u542f\u76f8\u5173\u7684\u529f\u80fd\u5f00\u5173\u3002

          1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u5185\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u914d\u7f6e\u9879 \uff0c \u901a\u8fc7\u641c\u7d22\u6846\u627e\u5230 insight-server-config \u5e76\u8fdb\u884c\u7f16\u8f91\uff1a

          2. \u5728 YAML \u914d\u7f6e\u4e2d\u627e\u5230 eBPF Flow feature \u8fd9\u4e2a\u529f\u80fd\u5f00\u5173\u5e76\u5c06\u5b83\u5f00\u542f:

          3. \u4fdd\u5b58\u66f4\u6539\uff0c\u91cd\u542f insight-server \u540e\uff0cInsight \u4e3b\u754c\u9762\u5c31\u4f1a\u51fa\u73b0 \u7f51\u7edc\u89c2\u6d4b :

          "},{"location":"admin/insight/best-practice/integration_deepflow.html#deepflow-agent","title":"\u5b89\u88c5 DeepFlow Agent","text":"

          DeepFlow Agent \u901a\u8fc7 deepflow-agent Chart \u6765\u5b89\u88c5\u5728\u5b50\u96c6\u7fa4\u4e2d\uff0c\u7528\u4e8e\u91c7\u96c6\u5b50\u96c6\u7fa4\u7684 eBPF \u89c2\u6d4b\u6570\u636e\u5e76\u4e0a\u62a5\u5230\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u3002 \u7c7b\u4f3c\u4e8e\u5b89\u88c5 deepflow\uff0c\u901a\u8fc7 Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u4ed3\u5e93\u9009\u62e9 community \uff0c \u901a\u8fc7\u641c\u7d22\u6846\u67e5\u8be2 deepflow-agent\uff0c\u6309\u6d41\u7a0b\u8fdb\u5165\u5b89\u88c5\u754c\u9762\u3002

          \u53c2\u6570\u8bf4\u660e\uff1a

          • DeployComponent \u90e8\u7f72\u6a21\u5f0f\uff0c\u9ed8\u8ba4\u4e3a daemonset
          • timezone \u65f6\u533a\uff0c\u9ed8\u8ba4\u4e3a Asia/Shanghai
          • DeepflowServerNodeIPS \u5bf9\u5e94 deepflow server \u5b89\u88c5\u96c6\u7fa4\u7684\u8282\u70b9\u5730\u5740
          • deepflowK8sClusterID \u96c6\u7fa4 UUID
          • agentGroupID agent \u7ec4 ID
          • controllerPort deepflow server \u7684\u6570\u636e\u4e0a\u62a5\u7aef\u53e3\uff0c\u53ef\u4ee5\u4e0d\u586b\uff0c\u9ed8\u8ba4\u4e3a 30035
          • clusterNAME \u96c6\u7fa4\u540d\u79f0

          \u914d\u7f6e\u597d\u540e\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u5b89\u88c5\u3002

          "},{"location":"admin/insight/best-practice/integration_deepflow.html#_2","title":"\u4f7f\u7528","text":"

          \u5728\u6b63\u786e\u5b89\u88c5 DeepFlow \u540e\uff0c\u70b9\u51fb \u7f51\u7edc\u89c2\u6d4b \u5c31\u53ef\u4ee5\u8fdb\u5165 DeepFlow Grafana UI\u3002 \u5b83\u5185\u7f6e\u4e86\u5927\u91cf\u7684 Dashboard \u53ef\u4f9b\u67e5\u770b\u4e0e\u5e2e\u52a9\u5206\u6790\u95ee\u9898\uff0c \u70b9\u51fb DeepFlow Templates \uff0c\u53ef\u4ee5\u6d4f\u89c8\u6240\u6709\u53ef\u4ee5\u67e5\u770b\u7684 Dashboard\uff1a

          "},{"location":"admin/insight/best-practice/sw-to-otel.html","title":"\u4f7f\u7528 OpenTelemetry \u96f6\u4ee3\u7801\u63a5\u6536 SkyWalking \u94fe\u8def\u6570\u636e","text":"

          \u53ef\u89c2\u6d4b\u6027 Insight \u901a\u8fc7 OpenTelemetry \u5c06\u5e94\u7528\u6570\u636e\u8fdb\u884c\u4e0a\u62a5\u3002\u82e5\u60a8\u7684\u5e94\u7528\u5df2\u4f7f\u7528 Skywalking \u6765\u91c7\u96c6\u94fe\u8def\uff0c \u53ef\u53c2\u8003\u672c\u6587\u8fdb\u884c\u96f6\u4ee3\u7801\u6539\u9020\u5c06\u94fe\u8def\u6570\u636e\u63a5\u5165 Insight\u3002

          "},{"location":"admin/insight/best-practice/sw-to-otel.html#_1","title":"\u4ee3\u7801\u89e3\u8bfb","text":"

          \u4e3a\u4e86\u80fd\u517c\u5bb9\u4e0d\u540c\u7684\u5206\u5e03\u5f0f\u8ffd\u8e2a\u5b9e\u73b0\uff0cOpenTelemetry \u63d0\u4f9b\u4e86\u7ec4\u4ef6\u690d\u5165\u7684\u65b9\u5f0f\uff0c\u8ba9\u4e0d\u540c\u7684\u5382\u5546\u80fd\u591f\u7ecf\u7531 OpenTelemetry \u6807\u51c6\u5316\u6570\u636e\u5904\u7406\u540e\u8f93\u51fa\u5230\u4e0d\u540c\u7684\u540e\u7aef\u3002Jaeger \u4e0e Zipkin \u5728\u793e\u533a\u4e2d\u5b9e\u73b0\u4e86 JaegerReceiver\u3001ZipkinReceiver\u3002 \u6211\u4eec\u4e5f\u4e3a\u793e\u533a\u8d21\u732e\u4e86 SkyWalkingReceiver\uff0c\u5e76\u8fdb\u884c\u4e86\u6301\u7eed\u7684\u6253\u78e8\uff0c\u73b0\u5728\u5df2\u7ecf\u5177\u5907\u4e86\u5728\u751f\u4ea7\u73af\u5883\u4e2d\u4f7f\u7528\u7684\u6761\u4ef6\uff0c \u800c\u4e14\u65e0\u9700\u4fee\u6539\u4efb\u4f55\u4e00\u884c\u4e1a\u52a1\u4ee3\u7801\u3002

          OpenTelemetry \u4e0e SkyWalking \u6709\u4e00\u4e9b\u5171\u540c\u70b9\uff1a\u90fd\u662f\u4f7f\u7528 Trace \u6765\u5b9a\u4e49\u4e00\u6b21\u8ffd\u8e2a\uff0c\u5e76\u4f7f\u7528 Span \u6765\u6807\u8bb0\u8ffd\u8e2a\u91cc\u7684\u6700\u5c0f\u7c92\u5ea6\u3002 \u4f46\u662f\u5728\u4e00\u4e9b\u7ec6\u8282\u548c\u5b9e\u73b0\u4e0a\u8fd8\u662f\u4f1a\u6709\u5dee\u522b\uff1a

          - Skywalking OpenTelemetry \u6570\u636e\u7ed3\u6784 span -> Segment -> Trace Span -> Trace \u5c5e\u6027\u4fe1\u606f Tags Attributes \u5e94\u7528\u65f6\u95f4 Logs Events \u5f15\u7528\u5173\u7cfb References Links

          \u660e\u786e\u4e86\u8fd9\u4e9b\u5dee\u5f02\u540e\uff0c\u5c31\u53ef\u4ee5\u5f00\u59cb\u5b9e\u73b0\u5c06 SkyWalking Trace \u8f6c\u6362\u4e3a OpenTelemetry Trace\u3002\u4e3b\u8981\u5de5\u4f5c\u5305\u62ec\uff1a

          1. \u5982\u4f55\u6784\u9020 OpenTelemetry \u7684 TraceId \u548c SpanId

          2. \u5982\u4f55\u6784\u9020 OpenTelemetry \u7684 ParentSpanId

          3. \u5982\u4f55\u5728 OpenTelemetry Span \u4e2d\u4fdd\u7559 SkyWalking \u7684\u539f\u59cb TraceId\u3001SegmentId\u3001SpanId

          \u9996\u5148\uff0c\u6211\u4eec\u6765\u770b\u5982\u4f55\u6784\u9020 OpenTelemetry \u7684 TraceId \u548c SpanId\u3002SkyWalking \u548c OpenTelemetry \u90fd\u662f\u901a\u8fc7 TraceId \u4e32\u8054\u8d77\u5404\u4e2a\u5206\u5e03\u5f0f\u670d\u52a1\u8c03\u7528\uff0c\u5e76\u901a\u8fc7 SpanId \u6765\u6807\u8bb0\u6bcf\u4e00\u4e2a Span\uff0c\u4f46\u662f\u5b9e\u73b0\u89c4\u683c\u6709\u8f83\u5927\u5dee\u5f02\uff1a

          Info

          \u4ee3\u7801\u5b9e\u73b0\u89c1 GitHub\uff1a

          1. Skywalking Receiver
          2. PR: Create skywalking component folder/structure
          3. PR: add Skywalking tracing receiver impl

          \u5177\u4f53\u6765\u8bb2\uff0cSkyWalking TraceId \u548c SegmentId \u6240\u6709\u53ef\u80fd\u7684\u683c\u5f0f\u5982\u4e0b\uff1a

          \u5176\u4e2d\uff0c\u5728 OpenTelemetry \u534f\u8bae\u91cc\uff0cSpan \u5728\u6240\u6709 Trace \u4e2d\u90fd\u662f\u552f\u4e00\u7684\uff0c\u800c\u5728 SkyWalking \u4e2d\uff0c Span \u4ec5\u5728\u6bcf\u4e2a Segment \u91cc\u662f\u552f\u4e00\u7684\uff0c\u8fd9\u8bf4\u660e\u8981\u901a\u8fc7 SegmentId \u4e0e SpanId \u7ed3\u5408\u624d\u80fd\u5728 SkyWalking \u4e2d\u5bf9 Span \u505a\u552f\u4e00\u6807\u8bc6\uff0c\u5e76\u8f6c\u6362\u4e3a OpenTelemetry \u7684 SpanId\u3002

          Info

          \u4ee3\u7801\u5b9e\u73b0\u89c1 GitHub\uff1a

          1. Skywalking Receiver
          2. PR: Fix skywalking traceid and spanid convertion

          \u63a5\u4e0b\u6765\uff0c\u6211\u4eec\u6765\u770b\u5982\u4f55\u6784\u9020 OpenTelemetry \u7684 ParentSpanId\u3002\u5728\u4e00\u4e2a Segment \u5185\u90e8\uff0c SkyWalking \u7684 ParentSpanId \u5b57\u6bb5\u53ef\u76f4\u63a5\u7528\u4e8e\u6784\u9020 OpenTelemetry \u7684 ParentSpanId \u5b57\u6bb5\u3002 \u4f46\u5f53\u4e00\u4e2a Trace \u8de8\u591a\u4e2a Segment \u65f6\uff0cSkyWalking \u662f\u901a\u8fc7 Reference \u4e2d\u7684 ParentTraceSegmentId \u548c ParentSpanId \u8868\u793a\u7684\u5173\u8054\u4fe1\u606f\uff0c\u4e8e\u662f\u6b64\u65f6\u9700\u8981\u901a\u8fc7 Reference \u4e2d\u7684\u4fe1\u606f\u6784\u5efa OpenTelemetry \u7684 ParentSpanId\u3002

          \u4ee3\u7801\u5b9e\u73b0\u89c1 GitHub\uff1aSkywalking Receiver

          \u6700\u540e\uff0c\u6211\u4eec\u6765\u770b\u5982\u4f55\u5728 OpenTelemetry Span \u4e2d\u4fdd\u7559 SkyWalking \u7684\u539f\u59cb TraceId\u3001SegmentId\u3001SpanId\u3002 \u6211\u4eec\u643a\u5e26\u8fd9\u4e9b\u539f\u59cb\u4fe1\u606f\u662f\u4e3a\u4e86\u80fd\u5c06\u5206\u5e03\u5f0f\u8ffd\u8e2a\u540e\u7aef\u5c55\u73b0\u7684 OpenTelemetry TraceId\u3001SpanId \u4e0e\u5e94\u7528\u7a0b\u5e8f\u65e5\u5fd7\u4e2d\u7684 SkyWalking TraceId\u3001SegmentId\u3001SpanId \u8fdb\u884c\u5173\u8054\uff0c\u6253\u901a\u8ffd\u8e2a\u548c\u65e5\u5fd7\u3002\u6211\u4eec\u9009\u62e9\u5c06 SkyWalking \u4e2d\u539f\u6709\u7684 TraceId\u3001SegmentId\u3001ParentSegmentId \u643a\u5e26\u5230 OpenTelemetry Attributes \u4e2d\u3002

          Info

          \u4ee3\u7801\u5b9e\u73b0\u89c1 GitHub\uff1a

          1. \u4ee3\u7801\u5b9e\u73b0\u89c1 GitHub\uff1aSkywalking Receiver
          2. Add extra link attributes from skywalking ref

          \u7ecf\u8fc7\u4e0a\u8ff0\u4e00\u7cfb\u5217\u8f6c\u6362\u540e\uff0c\u6211\u4eec\u5c06 SkyWalking Segment Object \u5b8c\u6574\u7684\u8f6c\u6362\u4e3a\u4e86 OpenTelmetry Trace\uff0c\u5982\u4e0b\u56fe\uff1a

          "},{"location":"admin/insight/best-practice/sw-to-otel.html#demo","title":"\u90e8\u7f72 Demo","text":"

          \u4e0b\u9762\u6211\u4eec\u4ee5\u4e00\u4e2a Demo \u6765\u5c55\u793a\u4f7f\u7528 OpenTelemetry \u6536\u96c6\u3001\u5c55\u793a SkyWalking \u8ffd\u8e2a\u6570\u636e\u7684\u5b8c\u6574\u8fc7\u7a0b\u3002

          \u9996\u5148\uff0c\u5728\u90e8\u7f72 OpenTelemetry Agent \u4e4b\u540e\uff0c\u5f00\u542f\u5982\u4e0b\u914d\u7f6e\uff0c\u5373\u53ef\u5728 OpenTelemetry \u4e2d\u62e5\u6709\u517c\u5bb9 SkyWalking \u534f\u8bae\u7684\u80fd\u529b\uff1a

          # otel-agent config\nreceivers:\n  # add the following config\n  skywalking:\n    protocols:\n      grpc:\n        endpoint: 0.0.0.0:11800 # \u63a5\u6536 SkyWalking Agent \u4e0a\u62a5\u7684 Trace \u6570\u636e\n      http: \n        endpoint: 0.0.0.0:12800 # \u63a5\u6536\u4ece\u524d\u7aef/ nginx \u7b49 HTTP \u534f\u8bae\u4e0a\u62a5\u7684 Trace \u6570\u636e\nservice: \n  pipelines: \n    traces:      \n      # add receiver __skywalking__ \n      receivers: [skywalking]\n\n# otel-agent service yaml\nspec:\n  ports: \n    - name: sw-http\n      port: 12800    \n      protocol: TCP    \n      targetPort: 12800 \n    - name: sw-grpc     \n      port: 11800 \n      protocol: TCP  \n      targetPort: 11800\n

          \u63a5\u4e0b\u6765\u9700\u8981\u5c06\u4e1a\u52a1\u5e94\u7528\u5bf9\u63a5\u7684 SkyWalking OAP Service\uff08\u5982 oap:11800\uff09\u4fee\u6539\u4e3a OpenTelemetry Agent Service\uff08\u5982 otel-agent:11800\uff09\uff0c \u5c31\u53ef\u4ee5\u5f00\u59cb\u4f7f\u7528 OpenTelemetry \u63a5\u6536 SkyWalking \u63a2\u9488\u7684\u8ffd\u8e2a\u6570\u636e\u4e86\u3002

          \u6211\u4eec\u4ee5 SkyWalking-showcase Demo \u4e3a\u4f8b\u5c55\u793a\u6574\u4e2a\u6548\u679c\u3002\u5b83\u4f7f\u7528 SkyWalking Agent \u505a\u8ffd\u8e2a\uff0c\u901a\u8fc7 OpenTelemetry \u6807\u51c6\u5316\u5904\u7406\u540e\u4f7f\u7528 Jaeger \u6765\u5448\u73b0\u6700\u7ec8\u6548\u679c\uff1a

          \u901a\u8fc7 SkyWalking Showcase \u7684\u67b6\u6784\u56fe\uff0c\u53ef\u77e5 SkyWalking \u7684\u6570\u636e\u7ecf\u8fc7 OpenTelemetry \u6807\u51c6\u5316\u540e\uff0c\u4f9d\u7136\u5b8c\u6574\u3002\u5728\u8fd9\u4e2a Trace \u91cc\uff0c \u8bf7\u6c42\u4ece app/homepage \u53d1\u8d77\uff0c\u4e4b\u540e\u5728 app \u540c\u65f6\u53d1\u8d77\u4e24\u4e2a\u8bf7\u6c42 /rcmd/\u4e0e/songs/top\uff0c\u5206\u53d1\u5230 recommandation/songs \u4e24\u4e2a\u670d\u52a1\u4e2d\uff0c \u5e76\u6700\u7ec8\u5230\u8fbe\u6570\u636e\u5e93\u8fdb\u884c\u67e5\u8be2\uff0c\u4ece\u800c\u5b8c\u6210\u6574\u4e2a\u8bf7\u6c42\u94fe\u8def\u3002

          \u53e6\u5916\uff0c\u6211\u4eec\u4e5f\u53ef\u4ece Jaeger \u9875\u9762\u4e2d\u67e5\u770b\u5230\u539f\u59cb SkyWalking Id \u4fe1\u606f\uff0c\u4fbf\u4e8e\u4e0e\u5e94\u7528\u65e5\u5fd7\u5173\u8054\uff1a

          "},{"location":"admin/insight/best-practice/tail-based-sampling.html","title":"\u94fe\u8def\u6570\u636e\u91c7\u6837\u4ecb\u7ecd\u4e0e\u914d\u7f6e","text":"

          \u4f7f\u7528\u5206\u5e03\u5f0f\u94fe\u8def\u8ddf\u8e2a\uff0c\u53ef\u4ee5\u5728\u5206\u5e03\u5f0f\u7cfb\u7edf\u4e2d\u89c2\u5bdf\u8bf7\u6c42\u5982\u4f55\u5728\u5404\u4e2a\u7cfb\u7edf\u4e2d\u6d41\u8f6c\u3002\u4e0d\u53ef\u5426\u8ba4\uff0c\u5b83\u975e\u5e38\u5b9e\u7528\uff0c\u4f8b\u5982\u4e86\u89e3\u60a8\u7684\u670d\u52a1\u8fde\u63a5\u548c\u8bca\u65ad\u5ef6\u8fdf\u95ee\u9898\uff0c\u4ee5\u53ca\u8bb8\u591a\u5176\u4ed6\u597d\u5904\u3002

          \u4f46\u662f\uff0c\u5982\u679c\u60a8\u7684\u5927\u591a\u6570\u8bf7\u6c42\u90fd\u6210\u529f\u4e86\uff0c\u5e76\u4e14\u6ca1\u6709\u51fa\u73b0\u4e0d\u53ef\u63a5\u53d7\u7684\u5ef6\u8fdf\u6216\u9519\u8bef\uff0c\u90a3\u4e48\u60a8\u771f\u7684\u9700\u8981\u6240\u6709\u8fd9\u4e9b\u6570\u636e\u5417\uff1f\u6240\u4ee5\uff0c\u4f60\u5e76\u4e0d\u603b\u662f\u9700\u8981\u5927\u91cf\u6216\u8005\u5168\u91cf\u7684\u6570\u636e\u6765\u627e\u5230\u6b63\u786e\u7684\u89c1\u89e3\u3002\u60a8\u53ea\u9700\u8981\u901a\u8fc7\u6070\u5f53\u7684\u6570\u636e\u91c7\u6837\u5373\u53ef\u3002

          \u91c7\u6837\u80cc\u540e\u7684\u60f3\u6cd5\u662f\u63a7\u5236\u53d1\u9001\u5230\u53ef\u89c2\u5bdf\u6027\u6536\u96c6\u5668\u7684\u94fe\u8def\uff0c\u4ece\u800c\u964d\u4f4e\u91c7\u96c6\u6210\u672c\u3002\u4e0d\u540c\u7684\u7ec4\u7ec7\u6709\u4e0d\u540c\u7684\u539f\u56e0\uff0c\u6bd4\u5982\u4e3a\u4ec0\u4e48\u8981\u62bd\u6837\uff0c\u4ee5\u53ca\u60f3\u8981\u62bd\u6837\u4ec0\u4e48\u6768\u7684\u6570\u636e\u3002\u6240\u4ee5\uff0c\u6211\u4eec\u9700\u8981\u81ea\u5b9a\u4e49\u91c7\u6837\u7b56\u7565\uff1a

          • \u7ba1\u7406\u6210\u672c\uff1a\u5982\u679c\u9700\u8981\u5b58\u50a8\u5927\u91cf\u7684\u9065\u6d4b\u6570\u636e\uff0c\u5219\u9700\u8981\u4ed8\u51fa\u66f4\u591a\u7684\u8ba1\u7b97\u3001\u5b58\u50a8\u6210\u672c\u3002
          • \u5173\u6ce8\u6709\u8da3\u7684\u8ddf\u8e2a\uff1a\u4e0d\u540c\u7ec4\u7ec7\u5173\u6ce8\u7684\u6570\u636e\u4e5f\u4e0d\u540c\u3002
          • \u8fc7\u6ee4\u6389\u566a\u97f3\uff1a\u4f8b\u5982\uff0c\u60a8\u53ef\u80fd\u5e0c\u671b\u8fc7\u6ee4\u6389\u5065\u5eb7\u68c0\u67e5\u3002

          \u5728\u8ba8\u8bba\u91c7\u6837\u65f6\u4f7f\u7528\u4e00\u81f4\u7684\u672f\u8bed\u662f\u5f88\u91cd\u8981\u7684\u3002Trace \u6216 Span \u88ab\u89c6\u4e3a \u91c7\u6837 \u6216 \u672a\u91c7\u6837\uff1a

          • \u91c7\u6837\uff1aTrace \u6216 Span \u88ab\u5904\u7406\u5e76\u4fdd\u5b58\u3002\u4e3a\u5b83\u88ab\u91c7\u6837\u8005\u9009\u62e9\u4e3a\u603b\u4f53\u7684\u4ee3\u8868\uff0c\u6240\u4ee5\u5b83\u88ab\u8ba4\u4e3a\u662f \u91c7\u6837\u7684\u3002
          • \u672a\u91c7\u6837\uff1a\u4e0d\u88ab\u5904\u7406\u6216\u4fdd\u5b58\u7684 Trace \u6216 Span\u3002\u56e0\u4e3a\u5b83\u4e0d\u662f\u7531\u91c7\u6837\u5668\u9009\u62e9\u7684\uff0c\u6240\u4ee5\u88ab\u8ba4\u4e3a\u662f \u672a\u91c7\u6837\u3002
          "},{"location":"admin/insight/best-practice/tail-based-sampling.html#_2","title":"\u91c7\u6837\u7684\u65b9\u5f0f\u6709\u54ea\u4e9b\uff1f","text":""},{"location":"admin/insight/best-practice/tail-based-sampling.html#head-sampling","title":"\u5934\u90e8\u91c7\u6837\uff08Head Sampling\uff09","text":"

          \u5934\u90e8\u62bd\u6837\u662f\u4e00\u79cd\u7528\u4e8e\u5c3d\u65e9\u505a\u51fa\u62bd\u6837\u51b3\u5b9a\u7684\u91c7\u6837\u6280\u672f\u3002\u91c7\u6837\u6216\u5220\u9664 Trace/Span \u7684\u51b3\u5b9a\u4e0d\u662f\u901a\u8fc7\u68c0\u67e5\u6574\u4e2a Trace \u6765\u505a\u51fa\u7684\u3002

          \u4f8b\u5982\uff0c\u6700\u5e38\u89c1\u7684\u5934\u90e8\u91c7\u6837\u5f62\u5f0f\u662f\u4e00\u81f4\u6982\u7387\u91c7\u6837\u3002\u5b83\u4e5f\u53ef\u4ee5\u79f0\u4e3a\u786e\u5b9a\u6027\u91c7\u6837\u3002\u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u5c06\u6839\u636e TraceID \u548c\u8981\u91c7\u6837\u7684\u6240\u9700 Trace \u767e\u5206\u6bd4\u505a\u51fa\u91c7\u6837\u51b3\u7b56\u3002\u8fd9\u53ef\u786e\u4fdd\u4ee5\u4e00\u81f4\u7684\u901f\u7387\uff08\u4f8b\u5982\u6240\u6709 Trace\u7684 5%\uff09\u5bf9\u6574\u4e2a Trace \u8fdb\u884c\u91c7\u6837\u5e76\u4e14\u4e0d\u9057\u6f0f Span\u3002

          \u5934\u90e8\u91c7\u6837\u7684\u597d\u5904\u662f\uff1a - \u6613\u4e8e\u7406\u89e3 - \u6613\u4e8e\u914d\u7f6e - \u9ad8\u6548 - \u53ef\u4ee5\u5728\u8ddf\u8e2a\u6536\u96c6\u7ba1\u9053\u4e2d\u7684\u4efb\u4f55\u4f4d\u7f6e\u5b8c\u6210

          \u5934\u90e8\u91c7\u6837\u7684\u4e3b\u8981\u7f3a\u70b9\u662f\u65e0\u6cd5\u6839\u636e\u6574\u4e2a Trace \u4e2d\u7684\u6570\u636e\u505a\u51fa\u91c7\u6837\u51b3\u7b56\u3002\u8fd9\u610f\u5473\u7740\u5934\u90e8\u62bd\u6837\u4f5c\u4e3a\u4e00\u79cd\u949d\u5668\u662f\u6709\u6548\u7684\uff0c\u4f46\u5bf9\u4e8e\u5fc5\u987b\u8003\u8651\u6574\u4e2a\u7cfb\u7edf\u4fe1\u606f\u7684\u62bd\u6837\u7b56\u7565\u6765\u8bf4\uff0c\u8fd9\u662f\u5b8c\u5168\u4e0d\u591f\u7684\u3002\u4f8b\u5982\uff0c\u65e0\u6cd5\u4f7f\u7528\u5934\u90e8\u91c7\u6837\u6765\u786e\u4fdd\u5bf9\u6240\u6709\u5177\u6709\u8bef\u5dee\u7684\u8ff9\u7ebf\u8fdb\u884c\u91c7\u6837\u3002\u4e3a\u6b64\uff0c\u60a8\u9700\u8981\u5c3e\u90e8\u91c7\u6837\u3002

          "},{"location":"admin/insight/best-practice/tail-based-sampling.html#tail-sampling","title":"\u5c3e\u90e8\u91c7\u6837\uff08Tail Sampling\uff09\u2014\u2014 \u63a8\u8350\u65b9\u6848","text":"

          \u5c3e\u90e8\u91c7\u6837\u662f\u901a\u8fc7\u8003\u8651 Trace \u5185\u7684\u5168\u90e8\u6216\u5927\u90e8\u5206 Span \u6765\u51b3\u5b9a\u5bf9 Trace \u8fdb\u884c\u91c7\u6837\u3002\u5c3e\u90e8\u91c7\u6837\u5141\u8bb8\u60a8\u6839\u636e\u4ece Trace \u7684\u4e0d\u540c\u90e8\u5206\u4f7f\u7528\u7684\u7279\u5b9a\u6761\u4ef6\u5bf9 Trace \u8fdb\u884c\u91c7\u6837\uff0c\u800c\u5934\u90e8\u91c7\u6837\u5219\u4e0d\u5177\u6709\u6b64\u9009\u9879\u3002

          \u5982\u4f55\u4f7f\u7528\u5c3e\u90e8\u91c7\u6837\u7684\u4e00\u4e9b\u793a\u4f8b\u5305\u62ec\uff1a

          • \u59cb\u7ec8\u5bf9\u5305\u542b\u9519\u8bef\u7684 Trace \u8fdb\u884c\u91c7\u6837
          • \u57fa\u4e8e\u603b\u4f53\u5ef6\u8fdf\u7684\u91c7\u6837
          • \u6839\u636e Trace \u4e2d\u4e00\u4e2a\u6216\u591a\u4e2a Span \u4e0a\u7279\u5b9a\u5c5e\u6027\u7684\u5b58\u5728\u6216\u503c\u5bf9 Trace \u8fdb\u884c\u91c7\u6837; \u4f8b\u5982\uff0c\u5bf9\u6e90\u81ea\u65b0\u90e8\u7f72\u7684\u670d\u52a1\u7684\u66f4\u591a Trace \u8fdb\u884c\u91c7\u6837
          • \u6839\u636e\u7279\u5b9a\u6761\u4ef6\u5bf9 Trace \u5e94\u7528\u4e0d\u540c\u7684\u91c7\u6837\u7387

          \u6b63\u5982\u4f60\u6240\u770b\u5230\u7684\uff0c\u5c3e\u90e8\u91c7\u6837\u6709\u7740\u66f4\u9ad8\u7a0b\u5ea6\u7684\u590d\u6742\u5ea6\u3002\u5bf9\u4e8e\u5fc5\u987b\u5bf9\u9065\u6d4b\u6570\u636e\u8fdb\u884c\u91c7\u6837\u7684\u5927\u578b\u7cfb\u7edf\uff0c\u51e0\u4e4e\u603b\u662f\u9700\u8981\u4f7f\u7528\u5c3e\u90e8\u91c7\u6837\u6765\u5e73\u8861\u6570\u636e\u91cf\u548c\u6570\u636e\u7684\u6709\u7528\u6027\u3002

          \u5982\u4eca\uff0c\u5c3e\u90e8\u91c7\u6837\u6709\u4e09\u4e2a\u4e3b\u8981\u7f3a\u70b9\uff1a

          • \u5c3e\u90e8\u91c7\u6837\u53ef\u80fd\u96be\u4ee5\u64cd\u4f5c\u3002\u5b9e\u73b0\u5c3e\u90e8\u91c7\u6837\u7684\u7ec4\u4ef6\u5fc5\u987b\u662f\u53ef\u4ee5\u63a5\u53d7\u548c\u5b58\u50a8\u5927\u91cf\u6570\u636e\u7684\u6709\u72b6\u6001\u7cfb\u7edf\u3002\u6839\u636e\u6d41\u91cf\u6a21\u5f0f\uff0c\u8fd9\u53ef\u80fd\u9700\u8981\u6570\u5341\u4e2a\u751a\u81f3\u6570\u767e\u4e2a\u8282\u70b9\uff0c\u8fd9\u4e9b\u8282\u70b9\u90fd\u4ee5\u4e0d\u540c\u7684\u65b9\u5f0f\u5229\u7528\u8d44\u6e90\u3002\u6b64\u5916\uff0c\u5982\u679c\u5c3e\u90e8\u91c7\u6837\u5668\u65e0\u6cd5\u8ddf\u4e0a\u63a5\u6536\u7684\u6570\u636e\u91cf\uff0c\u5219\u53ef\u80fd\u9700\u8981\u201c\u56de\u9000\u201d\u5230\u8ba1\u7b97\u5bc6\u96c6\u5ea6\u8f83\u4f4e\u7684\u91c7\u6837\u6280\u672f\u3002\u7531\u4e8e\u8fd9\u4e9b\u56e0\u7d20\uff0c\u76d1\u63a7\u5c3e\u90e8\u91c7\u6837\u7ec4\u4ef6\u4ee5\u786e\u4fdd\u5b83\u4eec\u62e5\u6709\u505a\u51fa\u6b63\u786e\u91c7\u6837\u51b3\u7b56\u6240\u9700\u7684\u8d44\u6e90\u81f3\u5173\u91cd\u8981\u3002
          • \u5c3e\u90e8\u91c7\u6837\u53ef\u80fd\u96be\u4ee5\u5b9e\u73b0\u3002\u6839\u636e\u60a8\u53ef\u7528\u7684\u91c7\u6837\u6280\u672f\u7c7b\u578b\uff0c\u5b83\u5e76\u4e0d\u603b\u662f\u201c\u4e00\u52b3\u6c38\u9038\u201d\u7684\u4e8b\u60c5\u3002\u968f\u7740\u7cfb\u7edf\u7684\u53d8\u5316\uff0c\u60a8\u7684\u91c7\u6837\u7b56\u7565\u4e5f\u4f1a\u53d1\u751f\u53d8\u5316\u3002\u5bf9\u4e8e\u5927\u578b\u800c\u590d\u6742\u7684\u5206\u5e03\u5f0f\u7cfb\u7edf\uff0c\u5b9e\u73b0\u91c7\u6837\u7b56\u7565\u7684\u89c4\u5219\u4e5f\u53ef\u4ee5\u662f\u5e9e\u5927\u800c\u590d\u6742\u7684\u3002
          • \u5982\u4eca\uff0c\u5c3e\u90e8\u91c7\u6837\u5668\u901a\u5e38\u6700\u7ec8\u5c5e\u4e8e\u4f9b\u5e94\u5546\u7279\u5b9a\u6280\u672f\u9886\u57df\u3002\u5982\u679c\u60a8\u4f7f\u7528\u4ed8\u8d39\u4f9b\u5e94\u5546\u6765\u5b9e\u73b0\u53ef\u89c2\u6d4b\u6027\uff0c\u5219\u53ef\u7528\u7684\u6700\u6709\u6548\u7684\u5c3e\u90e8\u91c7\u6837\u9009\u9879\u53ef\u80fd\u4ec5\u9650\u4e8e\u4f9b\u5e94\u5546\u63d0\u4f9b\u7684\u5185\u5bb9\u3002

          \u6700\u540e\uff0c\u5bf9\u4e8e\u67d0\u4e9b\u7cfb\u7edf\uff0c\u5c3e\u90e8\u91c7\u6837\u53ef\u4ee5\u4e0e\u5934\u90e8\u91c7\u6837\u7ed3\u5408\u4f7f\u7528\u3002\u4f8b\u5982\uff0c\u4e00\u7ec4\u751f\u6210\u5927\u91cf Trace \u6570\u636e\u7684\u670d\u52a1\u53ef\u80fd\u9996\u5148\u4f7f\u7528\u5934\u90e8\u91c7\u6837\u4ec5\u5bf9\u4e00\u5c0f\u90e8\u5206\u8ddf\u8e2a\u8fdb\u884c\u91c7\u6837\uff0c\u7136\u540e\u5728\u9065\u6d4b\u7ba1\u9053\u4e2d\u7a0d\u540e\u4f7f\u7528\u5c3e\u90e8\u91c7\u6837\u5728\u5bfc\u51fa\u5230\u540e\u7aef\u4e4b\u524d\u505a\u51fa\u66f4\u590d\u6742\u7684\u91c7\u6837\u51b3\u7b56\u3002\u8fd9\u6837\u505a\u901a\u5e38\u662f\u4e3a\u4e86\u4fdd\u62a4\u9065\u6d4b\u7ba1\u9053\u514d\u4e8e\u8fc7\u8f7d\u3002

          AI \u7b97\u529b\u4e2d\u5fc3 Insight \u76ee\u524d\u63a8\u8350\u4f7f\u7528\u5c3e\u90e8\u91c7\u6837\u5e76\u4f18\u5148\u652f\u6301\u5c3e\u90e8\u91c7\u6837\u3002

          \u5c3e\u90e8\u91c7\u6837\u5904\u7406\u5668\u6839\u636e\u4e00\u7ec4\u5b9a\u4e49\u7684\u7b56\u7565\u5bf9\u94fe\u8def\u8fdb\u884c\u91c7\u6837\u3002\u4f46\u662f\uff0c\u94fe\u8def\u7684\u6240\u6709\u8de8\u5ea6\uff08Span\uff09\u5fc5\u987b\u7531\u540c\u4e00\u6536\u96c6\u5668\u5b9e\u4f8b\u63a5\u6536\uff0c\u4ee5\u505a\u51fa\u6709\u6548\u7684\u91c7\u6837\u51b3\u7b56\u3002

          \u56e0\u6b64\uff0c\u9700\u8981\u5bf9 Insight \u7684 Global Opentelemetry Collector \u67b6\u6784\u8fdb\u884c\u8c03\u6574\u4ee5\u5b9e\u73b0\u5c3e\u90e8\u91c7\u6837\u7b56\u7565\u3002

          "},{"location":"admin/insight/best-practice/tail-based-sampling.html#insight","title":"Insight \u5177\u4f53\u6539\u52a8","text":"

          \u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u7684 insight-opentelemetry-collector \u524d\u9762\u5f15\u5165\u5177\u6709\u8d1f\u8f7d\u5747\u8861\u80fd\u529b\u7684 Opentelemetry Collector Gateway \u7ec4\u4ef6\uff0c\u4f7f\u5f97\u540c\u4e00\u7ec4 Trace \u80fd\u591f\u6839\u636e TraceID \u8def\u7531\u5230\u540c\u4e00\u4e2a Opentelemetry Collector \u5b9e\u4f8b\u3002

          1. \u90e8\u7f72\u5177\u6709\u8d1f\u8f7d\u5747\u8861\u80fd\u529b\u7684 OTEL COL Gateway \u7ec4\u4ef6

            \u5982\u679c\u60a8\u4f7f\u7528\u4e86 Insight 0.25.x \u7248\u672c\uff0c\u53ef\u4ee5\u901a\u8fc7\u5982\u4e0b Helm Upgrade \u53c2\u6570 --set opentelemetry-collector-gateway.enabled=true \u5feb\u901f\u5f00\u542f\uff0c\u4ee5\u6b64\u8df3\u8fc7\u5982\u4e0b\u90e8\u7f72\u8fc7\u7a0b\u3002

            \u53c2\u7167\u4ee5\u4e0b YAML \u914d\u7f6e\u6765\u90e8\u7f72\u3002

            \u70b9\u51fb\u67e5\u770b\u90e8\u7f72\u914d\u7f6e
            kind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: insight-otel-collector-gateway\nrules:\n- apiGroups: [\"\"]\n  resources: [\"endpoints\"]\n  verbs: [\"get\", \"watch\", \"list\"]\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: insight-otel-collector-gateway\n  namespace: insight-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: insight-otel-collector-gateway\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: insight-otel-collector-gateway\nsubjects:\n- kind: ServiceAccount\n  name: insight-otel-collector-gateway\n  namespace: insight-system\n---\nkind: ConfigMap\nmetadata:\n  labels:\n    app.kubernetes.io/component: opentelemetry-collector\n    app.kubernetes.io/instance: insight-otel-collector-gateway\n    app.kubernetes.io/name: insight-otel-collector-gateway\n  name: insight-otel-collector-gateway-collector\n  namespace: insight-system\napiVersion: v1\ndata:\n  collector.yaml: |\n    receivers:\n      otlp:\n        protocols:\n          grpc:\n          http:\n      jaeger:\n        protocols:\n          grpc:\n    processors:\n\n    extensions:\n      health_check:\n      pprof:\n        endpoint: :1888\n      zpages:\n        endpoint: :55679\n    exporters:\n      logging:\n      loadbalancing:\n        routing_key: \"traceID\"\n        protocol:\n          otlp:\n            # all options from the OTLP exporter are supported\n            # except the endpoint\n            timeout: 1s\n            tls:\n              insecure: true\n        resolver:\n          k8s:\n            service: insight-opentelemetry-collector\n            ports:\n              - 4317\n    service:\n      extensions: [pprof, zpages, health_check]\n      pipelines:\n        traces:\n          receivers: [otlp, jaeger]\n          exporters: [loadbalancing]\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app.kubernetes.io/component: opentelemetry-collector\n    app.kubernetes.io/instance: insight-otel-collector-gateway\n    app.kubernetes.io/name: insight-otel-collector-gateway\n  name: insight-otel-collector-gateway\n  namespace: insight-system\nspec:\n  replicas: 2\n  selector:\n    matchLabels:\n      app.kubernetes.io/component: opentelemetry-collector\n      app.kubernetes.io/instance: insight-otel-collector-gateway\n      app.kubernetes.io/name: insight-otel-collector-gateway\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/component: opentelemetry-collector\n        app.kubernetes.io/instance: insight-otel-collector-gateway\n        app.kubernetes.io/name: insight-otel-collector-gateway\n    spec:\n      containers:\n      - args:\n        - --config=/conf/collector.yaml\n        env:\n        - name: POD_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.name\n        image: ghcr.m.daocloud.io/openinsight-proj/opentelemetry-collector-contrib:5baef686672cfe5551e03b5c19d3072c432b6f33\n        imagePullPolicy: IfNotPresent\n        livenessProbe:\n          failureThreshold: 3\n          httpGet:\n            path: /\n            port: 13133\n            scheme: HTTP\n          periodSeconds: 10\n          successThreshold: 1\n          timeoutSeconds: 1\n        name: otc-container\n        resources:\n          limits:\n            cpu: '1'\n            memory: 2Gi\n          requests:\n            cpu: 100m\n            memory: 400Mi\n        ports:\n        - containerPort: 14250\n          name: jaeger-grpc\n          protocol: TCP\n        - containerPort: 8888\n          name: metrics\n          protocol: TCP\n        - containerPort: 4317\n          name: otlp-grpc\n          protocol: TCP\n        - containerPort: 4318\n          name: otlp-http\n          protocol: TCP\n        - containerPort: 55679\n          name: zpages\n          protocol: TCP\n\n        volumeMounts:\n        - mountPath: /conf\n          name: otc-internal\n\n      serviceAccount: insight-otel-collector-gateway\n      serviceAccountName: insight-otel-collector-gateway\n      volumes:\n      - configMap:\n          defaultMode: 420\n          items:\n          - key: collector.yaml\n            path: collector.yaml\n          name: insight-otel-collector-gateway-collector\n        name: otc-internal\n---\nkind: Service\napiVersion: v1\nmetadata:\n  name: insight-opentelemetry-collector-gateway\n  namespace: insight-system\n  labels:\n    app.kubernetes.io/component: opentelemetry-collector\n    app.kubernetes.io/instance: insight-otel-collector-gateway\n    app.kubernetes.io/name: insight-otel-collector-gateway\nspec:\n  ports:\n    - name: fluentforward\n      protocol: TCP\n      port: 8006\n      targetPort: 8006\n    - name: jaeger-compact\n      protocol: UDP\n      port: 6831\n      targetPort: 6831\n    - name: jaeger-grpc\n      protocol: TCP\n      port: 14250\n      targetPort: 14250\n    - name: jaeger-thrift\n      protocol: TCP\n      port: 14268\n      targetPort: 14268\n    - name: metrics\n      protocol: TCP\n      port: 8888\n      targetPort: 8888\n    - name: otlp\n      protocol: TCP\n      appProtocol: grpc\n      port: 4317\n      targetPort: 4317\n    - name: otlp-http\n      protocol: TCP\n      port: 4318\n      targetPort: 4318\n    - name: zipkin\n      protocol: TCP\n      port: 9411\n      targetPort: 9411\n    - name: zpages\n      protocol: TCP\n      port: 55679\n      targetPort: 55679\n  selector:\n    app.kubernetes.io/component: opentelemetry-collector\n    app.kubernetes.io/instance: insight-otel-collector-gateway\n    app.kubernetes.io/name: insight-otel-collector-gateway\n
          2. \u914d\u7f6e\u5c3e\u90e8\u91c7\u6837\u89c4\u5219

            Note

            \u9700\u8981\u5728\u539f\u672c insight-otel-collector-config configmap \u914d\u7f6e\u7ec4\u4e2d\u589e\u52a0\u5c3e\u90e8\u91c7\u6837\uff08tail_sampling processors\uff09\u7684\u89c4\u5219\u3002

          3. \u5728 processor \u4e2d\u589e\u52a0\u5982\u4e0b\u5185\u5bb9\uff0c\u5177\u4f53\u89c4\u5219\u53ef\u8c03\u6574\uff1b\u53c2\u8003 OTel \u5b98\u65b9\u793a\u4f8b\u3002

            ........\ntail_sampling:\n  decision_wait: 10s # \u7b49\u5f85 10 \u79d2\uff0c\u8d85\u8fc7 10 \u79d2\u540e\u7684 traceid \u5c06\u4e0d\u518d\u5904\u7406\n  num_traces: 1500000  # \u5185\u5b58\u4e2d\u4fdd\u5b58\u7684 trace \u6570\uff0c\u5047\u8bbe\u6bcf\u79d2 1000 \u6761 trace\uff0c\u6700\u5c0f\u4e0d\u4f4e\u4e8e 1000 * decision_wait * 2\uff1b\n                       # \u8bbe\u7f6e\u8fc7\u5927\u4f1a\u5360\u7528\u8fc7\u591a\u7684\u5185\u5b58\u8d44\u6e90\uff0c\u8fc7\u5c0f\u4f1a\u5bfc\u81f4\u90e8\u5206 trace \u88ab drop \u6389\n  expected_new_traces_per_sec: 10\n  policies: # \u4e0a\u62a5\u7b56\u7565\n    [\n        {\n          name: latency-policy,\n          type: latency,  # \u8017\u65f6\u8d85\u8fc7 500ms \u4e0a\u62a5\n          latency: {threshold_ms: 500}\n        },\n        {\n          name: status_code-policy,\n          type: status_code,  # \u72b6\u6001\u7801\u4e3a ERROR \u7684\u4e0a\u62a5\n          status_code: {status_codes: [ ERROR ]}\n        }\n    ]\n......\ntail_sampling: # \u7ec4\u5408\u91c7\u6837\n  decision_wait: 10s # \u7b49\u5f85 10 \u79d2\uff0c\u8d85\u8fc7 10 \u79d2\u540e\u7684 traceid \u5c06\u4e0d\u518d\u5904\u7406\n  num_traces: 1500000  # \u5185\u5b58\u4e2d\u4fdd\u5b58\u7684 trace \u6570\uff0c\u5047\u8bbe\u6bcf\u79d2 1000 \u6761 trace\uff0c\u6700\u5c0f\u4e0d\u4f4e\u4e8e 1000 * decision_wait * 2\uff1b\n                       # \u8bbe\u7f6e\u8fc7\u5927\u4f1a\u5360\u7528\u8fc7\u591a\u7684\u5185\u5b58\u8d44\u6e90\uff0c\u8fc7\u5c0f\u4f1a\u5bfc\u81f4\u90e8\u5206 trace \u88ab drop \u6389\n  expected_new_traces_per_sec: 10\n  policies: [\n      {\n        name: debug-worker-cluster-sample-policy,\n        type: and,\n        and:\n          {\n            and_sub_policy:\n              [\n                {\n                  name: service-name-policy,\n                  type: string_attribute,\n                  string_attribute:\n                    { key: k8s.cluster.id, values: [xxxxxxx] },\n                },\n                {\n                  name: trace-status-policy,\n                  type: status_code,\n                  status_code: { status_codes: [ERROR] },\n                },\n                {\n                  name: probabilistic-policy,\n                  type: probabilistic,\n                  probabilistic: { sampling_percentage: 1 },\n                }\n              ]\n          }\n      }\n    ]\n
          4. \u5728 insight-otel-collector-config configmap \u4e2d\u7684 otel col pipeline \u4e2d\u6fc0\u6d3b\u8be5 processor\uff1a

            traces:\n  exporters:\n    - servicegraph\n    - otlp/jaeger\n  processors:\n    - memory_limiter\n    - tail_sampling # \ud83d\udc48\n    - batch\n  receivers:\n    - otlp\n
          5. \u91cd\u542f insight-opentelemetry-collector \u7ec4\u4ef6\u3002

          6. \u90e8\u7f72\u6216\u66f4\u65b0 Insight-agent\uff0c\u5c06\u94fe\u8def\u6570\u636e\u7684\u4e0a\u62a5\u5730\u5740\u4fee\u6539\u4e3a opentelemetry-collector-gateway LB \u7684 4317 \u7aef\u53e3\u5730\u5740\u3002

            ....\n    exporters:\n      otlp/global:\n        endpoint: insight-opentelemetry-collector-gateway.insight-system.svc.cluster.local:4317  # \ud83d\udc48 \u4fee\u6539\u4e3a gateway/lb \u5730\u5740\n
          "},{"location":"admin/insight/best-practice/tail-based-sampling.html#_3","title":"\u53c2\u8003","text":"
          • sampling
          "},{"location":"admin/insight/collection-manag/agent-status.html","title":"insight-agent \u7ec4\u4ef6\u72b6\u6001\u8bf4\u660e","text":"

          \u5728 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u53ef\u89c2\u6d4b\u6027 Insight \u4f5c\u4e3a\u591a\u96c6\u7fa4\u89c2\u6d4b\u4ea7\u54c1\uff0c\u4e3a\u4e86\u5b9e\u73b0\u591a\u96c6\u7fa4\u89c2\u6d4b\u6570\u636e\u7684\u7edf\u4e00\u91c7\u96c6\uff0c\u9700\u8981\u7528\u6237\u5b89\u88c5 Helm \u5e94\u7528 insight-agent \uff08\u9ed8\u8ba4\u5b89\u88c5\u5728 insight-system \u547d\u540d\u7a7a\u95f4\uff09\u3002\u53c2\u9605\u5982\u4f55\u5b89\u88c5 insight-agent \u3002

          "},{"location":"admin/insight/collection-manag/agent-status.html#_1","title":"\u72b6\u6001\u8bf4\u660e","text":"

          \u5728 \u53ef\u89c2\u6d4b\u6027 -> \u91c7\u96c6\u7ba1\u7406 \u90e8\u5206\u53ef\u67e5\u770b\u5404\u96c6\u7fa4\u5b89\u88c5 insight-agent \u7684\u60c5\u51b5\u3002

          • \u672a\u5b89\u88c5 \uff1a\u8be5\u96c6\u7fa4\u4e2d\u672a\u5728 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\u5b89\u88c5 insight-agent
          • \u8fd0\u884c\u4e2d \uff1a\u8be5\u96c6\u7fa4\u4e2d\u6210\u529f\u5b89\u88c5 insight-agent \uff0c\u4e14\u90e8\u7f72\u7684\u6240\u6709\u7ec4\u4ef6\u5747\u5904\u4e8e\u8fd0\u884c\u4e2d\u72b6\u6001
          • \u5f02\u5e38 \uff1a\u82e5 insight-agent \u5904\u4e8e\u6b64\u72b6\u6001\uff0c\u8bf4\u660e helm \u90e8\u7f72\u5931\u8d25\u6216\u5b58\u5728\u90e8\u7f72\u7684\u7ec4\u4ef6\u5904\u4e8e\u975e\u8fd0\u884c\u4e2d\u72b6\u6001

          \u53ef\u901a\u8fc7\u4ee5\u4e0b\u65b9\u5f0f\u6392\u67e5\uff1a

          1. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff0c\u82e5\u72b6\u6001\u4e3a deployed \uff0c\u5219\u6267\u884c\u4e0b\u4e00\u6b65\u3002\u82e5\u4e3a failed \uff0c\u7531\u4e8e\u4f1a\u5f71\u54cd\u5e94\u7528\u7684\u5347\u7ea7\uff0c\u5efa\u8bae\u5728 \u5bb9\u5668\u7ba1\u7406 -> helm \u5e94\u7528 \u5378\u8f7d\u540e\u91cd\u65b0\u5b89\u88c5 :

            helm list -n insight-system\n
          2. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u6216\u5728 \u53ef\u89c2\u6d4b\u6027 -> \u91c7\u96c6\u7ba1\u7406 \u4e2d\u67e5\u770b\u8be5\u96c6\u7fa4\u90e8\u7f72\u7684\u7ec4\u4ef6\u7684\u72b6\u6001\uff0c\u82e5\u5b58\u5728\u975e \u8fd0\u884c\u4e2d \u72b6\u6001\u7684\u5bb9\u5668\u7ec4\uff0c\u8bf7\u91cd\u542f\u5f02\u5e38\u7684\u5bb9\u5668\u7ec4\u3002

            kubectl get pods -n insight-system\n
          "},{"location":"admin/insight/collection-manag/agent-status.html#_2","title":"\u8865\u5145\u8bf4\u660e","text":"
          1. insight-agent \u4e2d\u6307\u6807\u91c7\u96c6\u7ec4\u4ef6 Prometheus \u7684\u8d44\u6e90\u6d88\u8017\u4e0e\u96c6\u7fa4\u4e2d\u8fd0\u884c\u7684\u5bb9\u5668\u7ec4\u6570\u91cf\u5b58\u5728\u6b63\u6bd4\u5173\u7cfb\uff0c \u8bf7\u6839\u636e\u96c6\u7fa4\u89c4\u6a21\u8c03\u6574 Prometheus \u7684\u8d44\u6e90\uff0c\u8bf7\u53c2\u8003\uff1aPrometheus \u8d44\u6e90\u89c4\u5212

          2. \u7531\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u6307\u6807\u5b58\u50a8\u7ec4\u4ef6 vmstorage \u7684\u5b58\u50a8\u5bb9\u91cf\u4e0e\u5404\u4e2a\u96c6\u7fa4\u5bb9\u5668\u7ec4\u6570\u91cf\u603b\u548c\u5b58\u5728\u6b63\u6bd4\u5173\u7cfb\u3002

            • \u8bf7\u8054\u7cfb\u5e73\u53f0\u7ba1\u7406\u5458\u6839\u636e\u96c6\u7fa4\u89c4\u6a21\u8c03\u6574 vmstorage \u7684\u78c1\u76d8\u5bb9\u91cf\uff0c\u53c2\u9605 vmstorage \u78c1\u76d8\u5bb9\u91cf\u89c4\u5212
            • \u6839\u636e\u591a\u96c6\u7fa4\u89c4\u6a21\u8c03\u6574 vmstorage \u78c1\u76d8\uff0c\u53c2\u9605 vmstorge \u78c1\u76d8\u6269\u5bb9
          "},{"location":"admin/insight/collection-manag/collection-manag.html","title":"\u91c7\u96c6\u7ba1\u7406","text":"

          \u91c7\u96c6\u7ba1\u7406 \u4e3b\u8981\u662f\u96c6\u4e2d\u7ba1\u7406\u3001\u5c55\u793a\u96c6\u7fa4\u5b89\u88c5\u91c7\u96c6\u63d2\u4ef6 insight-agent \u7684\u5165\u53e3\uff0c\u5e2e\u52a9\u7528\u6237\u5feb\u901f\u7684\u67e5\u770b\u96c6\u7fa4\u91c7\u96c6\u63d2\u4ef6\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u5e76\u63d0\u4f9b\u4e86\u5feb\u6377\u5165\u53e3\u914d\u7f6e\u91c7\u96c6\u89c4\u5219\u3002

          \u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\uff1a

          1. \u70b9\u51fb\u5de6\u4e0a\u89d2\u7684\uff0c\u9009\u62e9 \u53ef\u89c2\u6d4b\u6027 \u3002

          2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u91c7\u96c6\u7ba1\u7406 \uff0c\u67e5\u770b\u5168\u90e8\u96c6\u7fa4\u91c7\u96c6\u63d2\u4ef6\u7684\u72b6\u6001\u3002

          3. \u96c6\u7fa4\u63a5\u5165 insight-agent \u4e14\u5904\u4e8e\u8fd0\u884c\u4e2d\u72b6\u6001\u65f6\uff0c\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u540d\u79f0\u8fdb\u5165\u8be6\u60c5\u3002

          4. \u5728 \u670d\u52a1\u76d1\u63a7 \u9875\u7b7e\u4e2d\uff0c\u70b9\u51fb\u5feb\u6377\u94fe\u63a5\u8df3\u8f6c\u5230 \u5bb9\u5668\u7ba1\u7406 -> \u81ea\u5b9a\u4e49\u8d44\u6e90 \u6dfb\u52a0\u670d\u52a1\u53d1\u73b0\u89c4\u5219\u3002

          "},{"location":"admin/insight/collection-manag/metric-collect.html","title":"\u6307\u6807\u6293\u53d6\u65b9\u5f0f","text":"

          Prometheus \u4e3b\u8981\u901a\u8fc7 Pull \u7684\u65b9\u5f0f\u6765\u6293\u53d6\u76ee\u6807\u670d\u52a1\u66b4\u9732\u51fa\u6765\u7684\u76d1\u63a7\u63a5\u53e3\uff0c\u56e0\u6b64\u9700\u8981\u914d\u7f6e\u5bf9\u5e94\u7684\u6293\u53d6\u4efb\u52a1\u6765\u8bf7\u6c42\u76d1\u63a7\u6570\u636e\u5e76\u5199\u5165\u5230 Prometheus \u63d0\u4f9b\u7684\u5b58\u50a8\u4e2d\uff0c\u76ee\u524d Prometheus \u670d\u52a1\u63d0\u4f9b\u4e86\u5982\u4e0b\u51e0\u4e2a\u4efb\u52a1\u7684\u914d\u7f6e\uff1a

          • \u539f\u751f Job \u914d\u7f6e\uff1a\u63d0\u4f9b Prometheus \u539f\u751f\u6293\u53d6 Job \u7684\u914d\u7f6e\u3002
          • Pod Monitor\uff1a\u5728 K8S \u751f\u6001\u4e0b\uff0c\u57fa\u4e8e Prometheus Operator \u6765\u6293\u53d6 Pod \u4e0a\u5bf9\u5e94\u7684\u76d1\u63a7\u6570\u636e\u3002
          • Service Monitor\uff1a\u5728 K8S \u751f\u6001\u4e0b\uff0c\u57fa\u4e8e Prometheus Operator \u6765\u6293\u53d6 Service \u5bf9\u5e94 Endpoints \u4e0a\u7684\u76d1\u63a7\u6570\u636e\u3002

          Note

          [ ] \u4e2d\u7684\u914d\u7f6e\u9879\u4e3a\u53ef\u9009\u3002

          "},{"location":"admin/insight/collection-manag/metric-collect.html#job","title":"\u539f\u751f Job \u914d\u7f6e","text":"

          \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

          # \u6293\u53d6\u4efb\u52a1\u540d\u79f0\uff0c\u540c\u65f6\u4f1a\u5728\u5bf9\u5e94\u6293\u53d6\u7684\u6307\u6807\u4e2d\u52a0\u4e86\u4e00\u4e2a label(job=job_name)\njob_name: <job_name>\n\n# \u6293\u53d6\u4efb\u52a1\u65f6\u95f4\u95f4\u9694\n[ scrape_interval: <duration> | default = <global_config.scrape_interval> ]\n\n# \u6293\u53d6\u8bf7\u6c42\u8d85\u65f6\u65f6\u95f4\n[ scrape_timeout: <duration> | default = <global_config.scrape_timeout> ]\n\n# \u6293\u53d6\u4efb\u52a1\u8bf7\u6c42 URI \u8def\u5f84\n[ metrics_path: <path> | default = /metrics ]\n\n# \u89e3\u51b3\u5f53\u6293\u53d6\u7684 label \u4e0e\u540e\u7aef Prometheus \u6dfb\u52a0 label \u51b2\u7a81\u65f6\u7684\u5904\u7406\u3002\n# true: \u4fdd\u7559\u6293\u53d6\u5230\u7684 label\uff0c\u5ffd\u7565\u4e0e\u540e\u7aef Prometheus \u51b2\u7a81\u7684 label\uff1b\n# false: \u5bf9\u51b2\u7a81\u7684 label\uff0c\u628a\u6293\u53d6\u7684 label \u524d\u52a0\u4e0a exported_<original-label>\uff0c\u6dfb\u52a0\u540e\u7aef Prometheus \u589e\u52a0\u7684 label\uff1b\n[ honor_labels: <boolean> | default = false ]\n\n# \u662f\u5426\u4f7f\u7528\u6293\u53d6\u5230 target \u4e0a\u4ea7\u751f\u7684\u65f6\u95f4\u3002\n# true: \u5982\u679c target \u4e2d\u6709\u65f6\u95f4\uff0c\u4f7f\u7528 target \u4e0a\u7684\u65f6\u95f4\uff1b\n# false: \u76f4\u63a5\u5ffd\u7565 target \u4e0a\u7684\u65f6\u95f4\uff1b\n[ honor_timestamps: <boolean> | default = true ]\n\n# \u6293\u53d6\u534f\u8bae: http \u6216\u8005 https\n[ scheme: <scheme> | default = http ]\n\n# \u6293\u53d6\u8bf7\u6c42\u5bf9\u5e94 URL \u53c2\u6570\nparams:\n  [ <string>: [<string>, ...] ]\n\n# \u901a\u8fc7 basic auth \u8bbe\u7f6e\u6293\u53d6\u8bf7\u6c42\u5934\u4e2d `Authorization` \u7684\u503c\uff0cpassword/password_file \u4e92\u65a5\uff0c\u4f18\u5148\u53d6 password_file \u91cc\u9762\u7684\u503c\u3002\nbasic_auth:\n  [ username: <string> ]\n  [ password: <secret> ]\n  [ password_file: <string> ]\n\n# \u901a\u8fc7 bearer token \u8bbe\u7f6e\u6293\u53d6\u8bf7\u6c42\u5934\u4e2d `Authorization` bearer_token/bearer_token_file \u4e92\u65a5\uff0c\u4f18\u5148\u53d6 bearer_token \u91cc\u9762\u7684\u503c\u3002\n[ bearer_token: <secret> ]\n\n# \u901a\u8fc7 bearer token \u8bbe\u7f6e\u6293\u53d6\u8bf7\u6c42\u5934\u4e2d `Authorization` bearer_token/bearer_token_file \u4e92\u65a5\uff0c\u4f18\u5148\u53d6 bearer_token \u91cc\u9762\u7684\u503c\u3002\n[ bearer_token_file: <filename> ]\n\n# \u6293\u53d6\u8fde\u63a5\u662f\u5426\u901a\u8fc7 TLS \u5b89\u5168\u901a\u9053\uff0c\u914d\u7f6e\u5bf9\u5e94\u7684 TLS \u53c2\u6570\ntls_config:\n  [ <tls_config> ]\n\n# \u901a\u8fc7\u4ee3\u7406\u670d\u52a1\u6765\u6293\u53d6 target \u4e0a\u7684\u6307\u6807\uff0c\u586b\u5199\u5bf9\u5e94\u7684\u4ee3\u7406\u670d\u52a1\u5730\u5740\u3002\n[ proxy_url: <string> ]\n\n# \u901a\u8fc7\u9759\u6001\u914d\u7f6e\u6765\u6307\u5b9a target\uff0c\u8be6\u89c1\u4e0b\u9762\u7684\u8bf4\u660e\u3002\nstatic_configs:\n  [ - <static_config> ... ]\n\n# CVM \u670d\u52a1\u53d1\u73b0\u914d\u7f6e\uff0c\u8be6\u89c1\u4e0b\u9762\u7684\u8bf4\u660e\u3002\ncvm_sd_configs:\n  [ - <cvm_sd_config> ... ]\n\n# \u5728\u6293\u53d6\u6570\u636e\u4e4b\u540e\uff0c\u628a target \u4e0a\u5bf9\u5e94\u7684 label \u901a\u8fc7 relabel \u7684\u673a\u5236\u8fdb\u884c\u6539\u5199\uff0c\u6309\u987a\u5e8f\u6267\u884c\u591a\u4e2a relabel \u89c4\u5219\u3002\n# relabel_config \u8be6\u89c1\u4e0b\u6587\u8bf4\u660e\u3002\nrelabel_configs:\n  [ - <relabel_config> ... ]\n\n# \u6570\u636e\u6293\u53d6\u5b8c\u6210\u5199\u5165\u4e4b\u524d\uff0c\u901a\u8fc7 relabel \u673a\u5236\u8fdb\u884c\u6539\u5199 label \u5bf9\u5e94\u7684\u503c\uff0c\u6309\u987a\u5e8f\u6267\u884c\u591a\u4e2a relabel \u89c4\u5219\u3002\n# relabel_config \u8be6\u89c1\u4e0b\u6587\u8bf4\u660e\u3002\nmetric_relabel_configs:\n  [ - <relabel_config> ... ]\n\n# \u4e00\u6b21\u6293\u53d6\u6570\u636e\u70b9\u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n[ sample_limit: <int> | default = 0 ]\n\n# \u4e00\u6b21\u6293\u53d6 Target \u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n[ target_limit: <int> | default = 0 ]\n
          "},{"location":"admin/insight/collection-manag/metric-collect.html#pod-monitor","title":"Pod Monitor","text":"

          \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

          # Prometheus Operator CRD \u7248\u672c\napiVersion: monitoring.coreos.com/v1\n# \u5bf9\u5e94 K8S \u7684\u8d44\u6e90\u7c7b\u578b\uff0c\u8fd9\u91cc\u9762 Pod Monitor\nkind: PodMonitor\n# \u5bf9\u5e94 K8S \u7684 Metadata\uff0c\u8fd9\u91cc\u53ea\u7528\u5173\u5fc3 name\uff0c\u5982\u679c\u6ca1\u6709\u6307\u5b9a jobLabel\uff0c\u5bf9\u5e94\u6293\u53d6\u6307\u6807 label \u4e2d job \u7684\u503c\u4e3a <namespace>/<name>\nmetadata:\n  name: redis-exporter # \u586b\u5199\u4e00\u4e2a\u552f\u4e00\u540d\u79f0\n  namespace: cm-prometheus  # namespace \u56fa\u5b9a\uff0c\u4e0d\u9700\u8981\u4fee\u6539\n# \u63cf\u8ff0\u6293\u53d6\u76ee\u6807 Pod \u7684\u9009\u53d6\u53ca\u6293\u53d6\u4efb\u52a1\u7684\u914d\u7f6e\n  label:\n    operator.insight.io/managed-by: insight # Insight \u7ba1\u7406\u7684\u6807\u7b7e\u6807\u8bc6\nspec:\n  # \u586b\u5199\u5bf9\u5e94 Pod \u7684 label\uff0cpod monitor \u4f1a\u53d6\u5bf9\u5e94\u7684\u503c\u4f5c\u4e3a job label \u7684\u503c\u3002\n  # \u5982\u679c\u67e5\u770b\u7684\u662f Pod Yaml\uff0c\u53d6 pod.metadata.labels \u4e2d\u7684\u503c\u3002\n  # \u5982\u679c\u67e5\u770b\u7684\u662f Deployment/Daemonset/Statefulset\uff0c\u53d6 spec.template.metadata.labels\u3002\n  [ jobLabel: string ]\n  # \u628a\u5bf9\u5e94 Pod \u4e0a\u7684 Label \u6dfb\u52a0\u5230 Target \u7684 Label \u4e2d\n  [ podTargetLabels: []string ]\n  # \u4e00\u6b21\u6293\u53d6\u6570\u636e\u70b9\u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n  [ sampleLimit: uint64 ]\n  # \u4e00\u6b21\u6293\u53d6 Target \u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n  [ targetLimit: uint64 ]\n  # \u914d\u7f6e\u9700\u8981\u6293\u53d6\u66b4\u9732\u7684 Prometheus HTTP \u63a5\u53e3\uff0c\u53ef\u4ee5\u914d\u7f6e\u591a\u4e2a Endpoint\n  podMetricsEndpoints:\n  [ - <endpoint_config> ... ] # \u8be6\u89c1\u4e0b\u9762 endpoint \u8bf4\u660e\n  # \u9009\u62e9\u8981\u76d1\u63a7 Pod \u6240\u5728\u7684 namespace\uff0c\u4e0d\u586b\u4e3a\u9009\u53d6\u6240\u6709 namespace\n  [ namespaceSelector: ]\n    # \u662f\u5426\u9009\u53d6\u6240\u6709 namespace\n    [ any: bool ]\n    # \u9700\u8981\u9009\u53d6 namespace \u5217\u8868\n    [ matchNames: []string ]\n  # \u586b\u5199\u8981\u76d1\u63a7 Pod \u7684 Label \u503c\uff0c\u4ee5\u5b9a\u4f4d\u76ee\u6807 Pod [K8S metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)\n  selector:\n    [ matchExpressions: array ]\n      [ example: - {key: tier, operator: In, values: [cache]} ]\n    [ matchLabels: object ]\n      [ example: k8s-app: redis-exporter ]\n
          "},{"location":"admin/insight/collection-manag/metric-collect.html#1","title":"\u4e3e\u4f8b 1","text":"
          apiVersion: monitoring.coreos.com/v1\nkind: PodMonitor\nmetadata:\n  name: redis-exporter # \u586b\u5199\u4e00\u4e2a\u552f\u4e00\u540d\u79f0\n  namespace: cm-prometheus # namespace \u56fa\u5b9a\uff0c\u4e0d\u8981\u4fee\u6539\n  label:\n    operator.insight.io/managed-by: insight  # Insight \u7ba1\u7406\u7684\u6807\u7b7e\u6807\u8bc6\uff0c\u5fc5\u586b\u3002\nspec:\n  podMetricsEndpoints:\n    - interval: 30s\n      port: metric-port # \u586b\u5199 pod yaml \u4e2d Prometheus Exporter \u5bf9\u5e94\u7684 Port \u7684 Name\n      path: /metrics # \u586b\u5199 Prometheus Exporter \u5bf9\u5e94\u7684 Path \u7684\u503c\uff0c\u4e0d\u586b\u9ed8\u8ba4 /metrics\n      relabelings:\n        - action: replace\n          sourceLabels:\n            - instance\n          regex: (.*)\n          targetLabel: instance\n          replacement: \"crs-xxxxxx\" # \u8c03\u6574\u6210\u5bf9\u5e94\u7684 Redis \u5b9e\u4f8b ID\n        - action: replace\n          sourceLabels:\n            - instance\n          regex: (.*)\n          targetLabel: ip\n          replacement: \"1.x.x.x\" # \u8c03\u6574\u6210\u5bf9\u5e94\u7684 Redis \u5b9e\u4f8b IP\n  namespaceSelector: # \u9009\u62e9\u8981\u76d1\u63a7 Pod \u6240\u5728\u7684 namespace\n    matchNames:\n      - redis-test\n  selector: # \u586b\u5199\u8981\u76d1\u63a7 Pod \u7684 Label \u503c\uff0c\u4ee5\u5b9a\u4f4d\u76ee\u6807 pod\n    matchLabels:\n      k8s-app: redis-exporter\n
          "},{"location":"admin/insight/collection-manag/metric-collect.html#2","title":"\u4e3e\u4f8b 2","text":"
          job_name: prometheus\nscrape_interval: 30s\nstatic_configs:\n- targets:\n  - 127.0.0.1:9090\n
          "},{"location":"admin/insight/collection-manag/metric-collect.html#service-monitor","title":"Service Monitor","text":"

          \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

          # Prometheus Operator CRD \u7248\u672c\napiVersion: monitoring.coreos.com/v1\n# \u5bf9\u5e94 K8S \u7684\u8d44\u6e90\u7c7b\u578b\uff0c\u8fd9\u91cc\u9762 Service Monitor\nkind: ServiceMonitor\n# \u5bf9\u5e94 K8S \u7684 Metadata\uff0c\u8fd9\u91cc\u53ea\u7528\u5173\u5fc3 name\uff0c\u5982\u679c\u6ca1\u6709\u6307\u5b9a jobLabel\uff0c\u5bf9\u5e94\u6293\u53d6\u6307\u6807 label \u4e2d job \u7684\u503c\u4e3a Service \u7684\u540d\u79f0\u3002\nmetadata:\n  name: redis-exporter # \u586b\u5199\u4e00\u4e2a\u552f\u4e00\u540d\u79f0\n  namespace: cm-prometheus  # namespace \u56fa\u5b9a\uff0c\u4e0d\u9700\u8981\u4fee\u6539\n# \u63cf\u8ff0\u6293\u53d6\u76ee\u6807 Pod \u7684\u9009\u53d6\u53ca\u6293\u53d6\u4efb\u52a1\u7684\u914d\u7f6e\n  label:\n    operator.insight.io/managed-by: insight  # Insight \u7ba1\u7406\u7684\u6807\u7b7e\u6807\u8bc6\uff0c\u5fc5\u586b\u3002\nspec:\n  # \u586b\u5199\u5bf9\u5e94 Pod \u7684 label(metadata/labels)\uff0cservice monitor \u4f1a\u53d6\u5bf9\u5e94\u7684\u503c\u4f5c\u4e3a job label \u7684\u503c\n  [ jobLabel: string ]\n  # \u628a\u5bf9\u5e94 service \u4e0a\u7684 Label \u6dfb\u52a0\u5230 Target \u7684 Label \u4e2d\n  [ targetLabels: []string ]\n  # \u628a\u5bf9\u5e94 Pod \u4e0a\u7684 Label \u6dfb\u52a0\u5230 Target \u7684 Label \u4e2d\n  [ podTargetLabels: []string ]\n  # \u4e00\u6b21\u6293\u53d6\u6570\u636e\u70b9\u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n  [ sampleLimit: uint64 ]\n  # \u4e00\u6b21\u6293\u53d6 Target \u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n  [ targetLimit: uint64 ]\n  # \u914d\u7f6e\u9700\u8981\u6293\u53d6\u66b4\u9732\u7684 Prometheus HTTP \u63a5\u53e3\uff0c\u53ef\u4ee5\u914d\u7f6e\u591a\u4e2a Endpoint\n  endpoints:\n  [ - <endpoint_config> ... ] # \u8be6\u89c1\u4e0b\u9762 endpoint \u8bf4\u660e\n  # \u9009\u62e9\u8981\u76d1\u63a7 Pod \u6240\u5728\u7684 namespace\uff0c\u4e0d\u586b\u4e3a\u9009\u53d6\u6240\u6709 namespace\n  [ namespaceSelector: ]\n    # \u662f\u5426\u9009\u53d6\u6240\u6709 namespace\n    [ any: bool ]\n    # \u9700\u8981\u9009\u53d6 namespace \u5217\u8868\n    [ matchNames: []string ]\n  # \u586b\u5199\u8981\u76d1\u63a7 Pod \u7684 Label \u503c\uff0c\u4ee5\u5b9a\u4f4d\u76ee\u6807 Pod [K8S metav1.LabelSelector](https://v1-17.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#labelselector-v1-meta)\n  selector:\n    [ matchExpressions: array ]\n      [ example: - {key: tier, operator: In, values: [cache]} ]\n    [ matchLabels: object ]\n      [ example: k8s-app: redis-exporter ]\n
          "},{"location":"admin/insight/collection-manag/metric-collect.html#_2","title":"\u4e3e\u4f8b","text":"
          apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: go-demo # \u586b\u5199\u4e00\u4e2a\u552f\u4e00\u540d\u79f0\n  namespace: cm-prometheus # namespace \u56fa\u5b9a\uff0c\u4e0d\u8981\u4fee\u6539\n  label:\n    operator.insight.io/managed-by: insight  # Insight \u7ba1\u7406\u7684\u6807\u7b7e\u6807\u8bc6\uff0c\u5fc5\u586b\u3002\nspec:\n  endpoints:\n    - interval: 30s\n      # \u586b\u5199 service yaml \u4e2d Prometheus Exporter \u5bf9\u5e94\u7684 Port \u7684 Name\n      port: 8080-8080-tcp\n      # \u586b\u5199 Prometheus Exporter \u5bf9\u5e94\u7684 Path \u7684\u503c\uff0c\u4e0d\u586b\u9ed8\u8ba4 /metrics\n      path: /metrics\n      relabelings:\n        # ** \u5fc5\u987b\u8981\u6709\u4e00\u4e2a label \u4e3a application\uff0c\u8fd9\u91cc\u5047\u8bbe k8s \u6709\u4e00\u4e2a label \u4e3a app\uff0c\n        # \u6211\u4eec\u901a\u8fc7 relabel \u7684 replace \u52a8\u4f5c\u628a\u5b83\u66ff\u6362\u6210\u4e86 application\n        - action: replace\n          sourceLabels: [__meta_kubernetes_pod_label_app]\n          targetLabel: application\n  # \u9009\u62e9\u8981\u76d1\u63a7 service \u6240\u5728\u7684 namespace\n  namespaceSelector:\n    matchNames:\n      - golang-demo\n  # \u586b\u5199\u8981\u76d1\u63a7 service \u7684 Label \u503c\uff0c\u4ee5\u5b9a\u4f4d\u76ee\u6807 service\n  selector:\n    matchLabels:\n      app: golang-app-demo\n
          "},{"location":"admin/insight/collection-manag/metric-collect.html#endpoint_config","title":"endpoint_config","text":"

          \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

          # \u5bf9\u5e94 port \u7684\u540d\u79f0\uff0c\u8fd9\u91cc\u9700\u8981\u6ce8\u610f\u4e0d\u662f\u5bf9\u5e94\u7684\u7aef\u53e3\uff0c\u9ed8\u8ba4\uff1a80\uff0c\u5bf9\u5e94\u7684\u53d6\u503c\u5982\u4e0b\uff1a\n# ServiceMonitor: \u5bf9\u5e94 Service>spec/ports/name;\n# PodMonitor: \u8bf4\u660e\u5982\u4e0b\uff1a\n#   \u5982\u679c\u67e5\u770b\u7684\u662f Pod Yaml\uff0c\u53d6 pod.spec.containers.ports.name \u4e2d\u7684\u503c\u3002\n#   \u5982\u679c\u67e5\u770b\u7684\u662f Deployment/Daemonset/Statefulset\uff0c\u53d6\u503c spec.template.spec.containers.ports.name\n[ port: string | default = 80]\n# \u6293\u53d6\u4efb\u52a1\u8bf7\u6c42 URI \u8def\u5f84\n[ path: string | default = /metrics ]\n# \u6293\u53d6\u534f\u8bae: http \u6216\u8005 https\n[ scheme: string | default = http]\n# \u6293\u53d6\u8bf7\u6c42\u5bf9\u5e94 URL \u53c2\u6570\n[ params: map[string][]string]\n# \u6293\u53d6\u4efb\u52a1\u95f4\u9694\u7684\u65f6\u95f4\n[ interval: string | default = 30s ]\n# \u6293\u53d6\u4efb\u52a1\u8d85\u65f6\n[ scrapeTimeout: string | default = 30s]\n# \u6293\u53d6\u8fde\u63a5\u662f\u5426\u901a\u8fc7 TLS \u5b89\u5168\u901a\u9053\uff0c\u914d\u7f6e\u5bf9\u5e94\u7684 TLS \u53c2\u6570\n[ tlsConfig: TLSConfig ]\n# \u901a\u8fc7\u5bf9\u5e94\u7684\u6587\u4ef6\u8bfb\u53d6 bearer token \u5bf9\u5e94\u7684\u503c\uff0c\u653e\u5230\u6293\u53d6\u4efb\u52a1\u7684 header \u4e2d\n[ bearerTokenFile: string ]\n# \u901a\u8fc7\u5bf9\u5e94\u7684 K8S secret key \u8bfb\u53d6\u5bf9\u5e94\u7684 bearer token\uff0c\u6ce8\u610f secret namespace \u9700\u8981\u548c PodMonitor/ServiceMonitor \u76f8\u540c\n[ bearerTokenSecret: string ]\n# \u89e3\u51b3\u5f53\u6293\u53d6\u7684 label \u4e0e\u540e\u7aef Prometheus \u6dfb\u52a0 label \u51b2\u7a81\u65f6\u7684\u5904\u7406\u3002\n# true: \u4fdd\u7559\u6293\u53d6\u5230\u7684 label\uff0c\u5ffd\u7565\u4e0e\u540e\u7aef Prometheus \u51b2\u7a81\u7684 label\uff1b\n# false: \u5bf9\u51b2\u7a81\u7684 label\uff0c\u628a\u6293\u53d6\u7684 label \u524d\u52a0\u4e0a exported_<original-label>\uff0c\u6dfb\u52a0\u540e\u7aef Prometheus \u589e\u52a0\u7684 label\uff1b\n[ honorLabels: bool | default = false ]\n# \u662f\u5426\u4f7f\u7528\u6293\u53d6\u5230 target \u4e0a\u4ea7\u751f\u7684\u65f6\u95f4\u3002\n# true: \u5982\u679c target \u4e2d\u6709\u65f6\u95f4\uff0c\u4f7f\u7528 target \u4e0a\u7684\u65f6\u95f4\uff1b\n# false: \u76f4\u63a5\u5ffd\u7565 target \u4e0a\u7684\u65f6\u95f4\uff1b\n[ honorTimestamps: bool | default = true ]\n# basic auth \u7684\u8ba4\u8bc1\u4fe1\u606f\uff0cusername/password \u586b\u5199\u5bf9\u5e94 K8S secret key \u7684\u503c\uff0c\u6ce8\u610f secret namespace \u9700\u8981\u548c PodMonitor/ServiceMonitor \u76f8\u540c\u3002\n[ basicAuth: BasicAuth ]\n# \u901a\u8fc7\u4ee3\u7406\u670d\u52a1\u6765\u6293\u53d6 target \u4e0a\u7684\u6307\u6807\uff0c\u586b\u5199\u5bf9\u5e94\u7684\u4ee3\u7406\u670d\u52a1\u5730\u5740\n[ proxyUrl: string ]\n# \u5728\u6293\u53d6\u6570\u636e\u4e4b\u540e\uff0c\u628a target \u4e0a\u5bf9\u5e94\u7684 label \u901a\u8fc7 relabel \u7684\u673a\u5236\u8fdb\u884c\u6539\u5199\uff0c\u6309\u987a\u5e8f\u6267\u884c\u591a\u4e2a relabel \u89c4\u5219\u3002\n# relabel_config \u8be6\u89c1\u4e0b\u6587\u8bf4\u660e\nrelabelings:\n[ - <relabel_config> ...]\n# \u6570\u636e\u6293\u53d6\u5b8c\u6210\u5199\u5165\u4e4b\u524d\uff0c\u901a\u8fc7 relabel \u673a\u5236\u8fdb\u884c\u6539\u5199 label \u5bf9\u5e94\u7684\u503c\uff0c\u6309\u987a\u5e8f\u6267\u884c\u591a\u4e2a relabel \u89c4\u5219\u3002\n# relabel_config \u8be6\u89c1\u4e0b\u6587\u8bf4\u660e\nmetricRelabelings:\n[ - <relabel_config> ...]\n
          "},{"location":"admin/insight/collection-manag/metric-collect.html#relabel_config","title":"relabel_config","text":"

          \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

          # \u4ece\u539f\u59cb labels \u4e2d\u53d6\u54ea\u4e9b label \u7684\u503c\u8fdb\u884c relabel\uff0c\u53d6\u51fa\u6765\u7684\u503c\u901a\u8fc7 separator \u4e2d\u7684\u5b9a\u4e49\u8fdb\u884c\u5b57\u7b26\u62fc\u63a5\u3002\n# \u5982\u679c\u662f PodMonitor/ServiceMonitor \u5bf9\u5e94\u7684\u914d\u7f6e\u9879\u4e3a sourceLabels\n[ source_labels: '[' <labelname> [, ...] ']' ]\n# \u5b9a\u4e49\u9700\u8981 relabel \u7684 label \u503c\u62fc\u63a5\u7684\u5b57\u7b26\uff0c\u9ed8\u8ba4\u4e3a ';'\n[ separator: <string> | default = ; ]\n\n# action \u4e3a replace/hashmod \u65f6\uff0c\u901a\u8fc7 target_label \u6765\u6307\u5b9a\u5bf9\u5e94 label name\u3002\n# \u5982\u679c\u662f PodMonitor/ServiceMonitor \u5bf9\u5e94\u7684\u914d\u7f6e\u9879\u4e3a targetLabel\n[ target_label: <labelname> ]\n\n# \u9700\u8981\u5bf9 source labels \u5bf9\u5e94\u503c\u8fdb\u884c\u6b63\u5219\u5339\u914d\u7684\u8868\u8fbe\u5f0f\n[ regex: <regex> | default = (.*) ]\n\n# action \u4e3a hashmod \u65f6\u7528\u5230\uff0c\u6839\u636e source label \u5bf9\u5e94\u503c md5 \u53d6\u6a21\u503c\n[ modulus: <int> ]\n\n# action \u4e3a replace \u7684\u65f6\u5019\uff0c\u901a\u8fc7 replacement \u6765\u5b9a\u4e49\u5f53 regex \u5339\u914d\u4e4b\u540e\u9700\u8981\u66ff\u6362\u7684\u8868\u8fbe\u5f0f\uff0c\u53ef\u4ee5\u7ed3\u5408 regex \u6b63\u89c4\u5219\u8868\u8fbe\u5f0f\u66ff\u6362\n[ replacement: <string> | default = $1 ]\n\n# \u57fa\u4e8e regex \u5339\u914d\u5230\u7684\u503c\u8fdb\u884c\u76f8\u5173\u7684\u64cd\u4f5c\uff0c\u5bf9\u5e94\u7684 action \u5982\u4e0b\uff0c\u9ed8\u8ba4\u4e3a replace\uff1a\n# replace: \u5982\u679c regex \u5339\u914d\u5230\uff0c\u901a\u8fc7 replacement \u4e2d\u5b9a\u4e49\u7684\u503c\u66ff\u6362\u76f8\u5e94\u7684\u503c\uff0c\u5e76\u901a\u8fc7 target_label \u8bbe\u503c\u5e76\u6dfb\u52a0\u76f8\u5e94\u7684 label\n# keep: \u5982\u679c regex \u6ca1\u6709\u5339\u914d\u5230\uff0c\u4e22\u5f03\n# drop: \u5982\u679c regex \u5339\u914d\u5230\uff0c\u4e22\u5f03\n# hashmod: \u901a\u8fc7 moduels \u6307\u5b9a\u7684\u503c\u628a source label \u5bf9\u5e94\u7684 md5 \u503c\u53d6\u6a21\n# \u5e76\u6dfb\u52a0\u4e00\u4e2a\u65b0\u7684 label\uff0clabel name \u901a\u8fc7 target_label \u6307\u5b9a\n# labelmap: \u5982\u679c regex \u5339\u914d\u5230\uff0c\u4f7f\u7528 replacement \u66ff\u6362\u5bf9\u5c31\u7684 label name\n# labeldrop: \u5982\u679c regex \u5339\u914d\u5230\uff0c\u5220\u9664\u5bf9\u5e94\u7684 label\n# labelkeep: \u5982\u679c regex \u6ca1\u6709\u5339\u914d\u5230\uff0c\u5220\u9664\u5bf9\u5e94\u7684 label\n[ action: <relabel_action> | default = replace ]\n
          "},{"location":"admin/insight/collection-manag/probe-module.html","title":"\u81ea\u5b9a\u4e49\u63a2\u6d4b\u65b9\u5f0f","text":"

          Insight \u4f7f\u7528 Prometheus \u5b98\u65b9\u63d0\u4f9b\u7684 Blackbox Exporter \u4f5c\u4e3a\u9ed1\u76d2\u76d1\u63a7\u89e3\u51b3\u65b9\u6848\uff0c\u53ef\u4ee5\u901a\u8fc7 HTTP\u3001HTTPS\u3001DNS\u3001ICMP\u3001TCP \u548c gRPC \u65b9\u5f0f\u5bf9\u76ee\u6807\u5b9e\u4f8b\u8fdb\u884c\u68c0\u6d4b\u3002\u53ef\u7528\u4e8e\u4ee5\u4e0b\u4f7f\u7528\u573a\u666f\uff1a

          • HTTP/HTTPS\uff1aURL/API\u53ef\u7528\u6027\u68c0\u6d4b
          • ICMP\uff1a\u4e3b\u673a\u5b58\u6d3b\u68c0\u6d4b
          • TCP\uff1a\u7aef\u53e3\u5b58\u6d3b\u68c0\u6d4b
          • DNS\uff1a\u57df\u540d\u89e3\u6790

          \u5728\u672c\u6587\u4e2d\uff0c\u6211\u4eec\u5c06\u4ecb\u7ecd\u5982\u4f55\u5728\u5df2\u6709\u7684 Blackbox ConfigMap \u4e2d\u914d\u7f6e\u81ea\u5b9a\u4e49\u7684\u63a2\u6d4b\u65b9\u5f0f\u3002

          Insight \u9ed8\u8ba4\u672a\u5f00\u542f ICMP \u63a2\u6d4b\u65b9\u5f0f\uff0c\u56e0\u4e3a ICMP \u9700\u8981\u66f4\u9ad8\u6743\u9650\uff0c\u56e0\u6b64\uff0c\u6211\u4eec\u5c06\u4ee5 ICMP \u548c HTTP \u63a2\u6d4b\u65b9\u5f0f\u4f5c\u4e3a\u793a\u4f8b\uff0c\u5c55\u793a\u5982\u4f55\u4fee\u6539 ConfigMap \u4ee5\u5b9e\u73b0\u81ea\u5b9a\u4e49\u7684 ICMP \u548c HTTP \u63a2\u6d4b\u3002

          "},{"location":"admin/insight/collection-manag/probe-module.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb\u8fdb\u5165\u76ee\u6807\u96c6\u7fa4\u7684\u8be6\u60c5\uff1b
          2. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\uff0c\u9009\u62e9 \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u914d\u7f6e\u9879 \uff1b
          3. \u627e\u5230\u540d\u4e3a insight-agent-prometheus-blackbox-exporter \u7684\u914d\u7f6e\u9879\uff0c\u70b9\u51fb \u7f16\u8f91 YAML\uff1b

            \u5728 modules \u4e0b\u6dfb\u52a0\u81ea\u5b9a\u4e49\u63a2\u6d4b\u65b9\u5f0f\uff1a

          HTTP \u63a2\u6d4bICMP \u63a2\u6d4b
          module:\n  http_2xx:\n    prober: http\n    timeout: 5s\n    http:\n      valid_http_versions: [HTTP/1.1, HTTP/2]\n      valid_status_codes: []  # Defaults to 2xx\n      method: GET\n

          module:\n  ICMP: # ICMP \u63a2\u6d4b\u914d\u7f6e\u7684\u793a\u4f8b\n    prober: icmp\n    timeout: 5s\n    icmp:\n      preferred_ip_protocol: ip4\nicmp_example: # ICMP \u63a2\u6d4b\u914d\u7f6e\u7684\u793a\u4f8b 2\n  prober: icmp\n  timeout: 5s\n  icmp:\n    preferred_ip_protocol: \"ip4\"\n    source_ip_address: \"127.0.0.1\"\n
          \u7531\u4e8e ICMP \u9700\u8981\u66f4\u9ad8\u6743\u9650\uff0c\u56e0\u6b64\uff0c\u6211\u4eec\u8fd8\u9700\u8981\u63d0\u5347 Pod \u6743\u9650\uff0c\u5426\u5219\u4f1a\u51fa\u73b0 operation not permitted \u7684\u9519\u8bef\u3002\u6709\u4ee5\u4e0b\u4e24\u79cd\u65b9\u5f0f\u63d0\u5347\u6743\u9650\uff1a

          • \u65b9\u5f0f\u4e00\uff1a \u76f4\u63a5\u7f16\u8f91 BlackBox Exporter \u90e8\u7f72\u6587\u4ef6\u5f00\u542f

            apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: insight-agent-prometheus-blackbox-exporter\n  namespace: insight-system\nspec:\n  template:\n    spec:\n      containers:\n        - name: blackbox-exporter\n          image: # ... (image, args, ports \u7b49\u4fdd\u6301\u4e0d\u53d8)\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            allowPrivilegeEscalation: false\n            capabilities:\n              add:\n              - NET_RAW\n              drop:\n              - ALL\n            readOnlyRootFilesystem: true\n            runAsGroup: 0\n            runAsNonRoot: false\n            runAsUser: 0\n
          • \u65b9\u5f0f\u4e8c\uff1a \u901a\u8fc7 Helm Upgrade \u65b9\u5f0f\u63d0\u6743

            prometheus-blackbox-exporter:\n  enabled: true\n  securityContext:\n    runAsUser: 0\n    runAsGroup: 0\n    readOnlyRootFilesystem: true\n    runAsNonRoot: false\n    allowPrivilegeEscalation: false\n    capabilities:\n      add: [\"NET_RAW\"]\n

          Info

          \u66f4\u591a\u63a2\u6d4b\u65b9\u5f0f\u53ef\u53c2\u8003 blackbox_exporter Configuration\u3002

          "},{"location":"admin/insight/collection-manag/probe-module.html#_3","title":"\u5176\u4ed6\u53c2\u8003","text":"

          \u4ee5\u4e0b YAML \u6587\u4ef6\u4e2d\u5305\u542b\u4e86 HTTP\u3001TCP\u3001SMTP\u3001ICMP\u3001DNS \u7b49\u591a\u79cd\u63a2\u6d4b\u65b9\u5f0f\uff0c\u53ef\u6839\u636e\u9700\u6c42\u81ea\u884c\u4fee\u6539 insight-agent-prometheus-blackbox-exporter \u7684\u914d\u7f6e\u6587\u4ef6\u3002

          \u70b9\u51fb\u67e5\u770b\u5b8c\u6574\u7684 YAML \u6587\u4ef6
          kind: ConfigMap\napiVersion: v1\nmetadata:\n  name: insight-agent-prometheus-blackbox-exporter\n  namespace: insight-system\n  labels:\n    app.kubernetes.io/instance: insight-agent\n    app.kubernetes.io/managed-by: Helm\n    app.kubernetes.io/name: prometheus-blackbox-exporter\n    app.kubernetes.io/version: v0.24.0\n    helm.sh/chart: prometheus-blackbox-exporter-8.8.0\n  annotations:\n    meta.helm.sh/release-name: insight-agent\n    meta.helm.sh/release-namespace: insight-system\ndata:\n  blackbox.yaml: |\n    modules:\n      HTTP_GET:\n        prober: http\n        timeout: 5s\n        http:\n          method: GET\n          valid_http_versions: [\"HTTP/1.1\", \"HTTP/2.0\"]\n          follow_redirects: true\n          preferred_ip_protocol: \"ip4\"\n      HTTP_POST:\n        prober: http\n        timeout: 5s\n        http:\n          method: POST\n          body_size_limit: 1MB\n      TCP:\n        prober: tcp\n        timeout: 5s\n      # \u9ed8\u8ba4\u672a\u5f00\u542f\uff1a\n      # ICMP:\n      #   prober: icmp\n      #   timeout: 5s\n      #   icmp:\n      #     preferred_ip_protocol: ip4\n      SSH:\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n          - expect: \"^SSH-2.0-\"\n      POP3S:\n        prober: tcp\n        tcp:\n          query_response:\n          - expect: \"^+OK\"\n          tls: true\n          tls_config:\n            insecure_skip_verify: false\n      http_2xx_example:               # http \u63a2\u6d4b\u793a\u4f8b\n        prober: http\n        timeout: 5s                   # \u63a2\u6d4b\u7684\u8d85\u65f6\u65f6\u95f4\n        http:\n          valid_http_versions: [\"HTTP/1.1\", \"HTTP/2.0\"]                   # \u8fd4\u56de\u4fe1\u606f\u4e2d\u7684 Version\uff0c\u4e00\u822c\u9ed8\u8ba4\u5373\u53ef\n          valid_status_codes: []  # Defaults to 2xx                       # \u6709\u6548\u7684\u8fd4\u56de\u7801\u8303\u56f4\uff0c\u5982\u679c\u8bf7\u6c42\u7684\u8fd4\u56de\u7801\u5728\u8be5\u8303\u56f4\u5185\uff0c\u89c6\u4e3a\u63a2\u6d4b\u6210\u529f\n          method: GET                 # \u8bf7\u6c42\u65b9\u6cd5\n          headers:                    # \u8bf7\u6c42\u7684\u5934\u90e8\n            Host: vhost.example.com\n            Accept-Language: en-US\n            Origin: example.com\n          no_follow_redirects: false  # \u662f\u5426\u5141\u8bb8\u91cd\u5b9a\u5411\n          fail_if_ssl: false   \n          fail_if_not_ssl: false\n          fail_if_body_matches_regexp:\n            - \"Could not connect to database\"\n          fail_if_body_not_matches_regexp:\n            - \"Download the latest version here\"\n          fail_if_header_matches: # Verifies that no cookies are set\n            - header: Set-Cookie\n              allow_missing: true\n              regexp: '.*'\n          fail_if_header_not_matches:\n            - header: Access-Control-Allow-Origin\n              regexp: '(\\*|example\\.com)'\n          tls_config:                  # \u9488\u5bf9 https \u8bf7\u6c42\u7684 tls \u7684\u914d\u7f6e\n            insecure_skip_verify: false\n          preferred_ip_protocol: \"ip4\" # defaults to \"ip6\"                 # \u9996\u9009\u7684 IP \u534f\u8bae\u7248\u672c\n          ip_protocol_fallback: false  # no fallback to \"ip6\"            \n      http_post_2xx:                   # \u5e26 Body \u7684 http \u63a2\u6d4b\u7684\u793a\u4f8b\n        prober: http\n        timeout: 5s\n        http:\n          method: POST                 # \u63a2\u6d4b\u7684\u8bf7\u6c42\u65b9\u6cd5\n          headers:\n            Content-Type: application/json\n          body: '{\"username\":\"admin\",\"password\":\"123456\"}'                   # \u63a2\u6d4b\u65f6\u643a\u5e26\u7684 body\n      http_basic_auth_example:         # \u5e26\u7528\u6237\u540d\u5bc6\u7801\u7684\u63a2\u6d4b\u7684\u793a\u4f8b\n        prober: http\n        timeout: 5s\n        http:\n          method: POST\n          headers:\n            Host: \"login.example.com\"\n          basic_auth:                  # \u63a2\u6d4b\u65f6\u8981\u52a0\u7684\u7528\u6237\u540d\u5bc6\u7801\n            username: \"username\"\n            password: \"mysecret\"\n      http_custom_ca_example:\n        prober: http\n        http:\n          method: GET\n          tls_config:                  # \u6307\u5b9a\u63a2\u6d4b\u65f6\u4f7f\u7528\u7684\u6839\u8bc1\u4e66\n            ca_file: \"/certs/my_cert.crt\"\n      http_gzip:\n        prober: http\n        http:\n          method: GET\n          compression: gzip            # \u63a2\u6d4b\u65f6\u4f7f\u7528\u7684\u538b\u7f29\u65b9\u6cd5\n      http_gzip_with_accept_encoding:\n        prober: http\n        http:\n          method: GET\n          compression: gzip\n          headers:\n            Accept-Encoding: gzip\n      tls_connect:                     # TCP \u63a2\u6d4b\u7684\u793a\u4f8b\n        prober: tcp\n        timeout: 5s\n        tcp:\n          tls: true                    # \u662f\u5426\u4f7f\u7528 TLS\n      tcp_connect_example:\n        prober: tcp\n        timeout: 5s\n      imap_starttls:                   # \u63a2\u6d4b IMAP \u90ae\u7bb1\u670d\u52a1\u5668\u7684\u914d\u7f6e\u793a\u4f8b\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - expect: \"OK.*STARTTLS\"\n            - send: \". STARTTLS\"\n            - expect: \"OK\"\n            - starttls: true\n            - send: \". capability\"\n            - expect: \"CAPABILITY IMAP4rev1\"\n      smtp_starttls:                   # \u63a2\u6d4b SMTP \u90ae\u7bb1\u670d\u52a1\u5668\u7684\u914d\u7f6e\u793a\u4f8b\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - expect: \"^220 ([^ ]+) ESMTP (.+)$\"\n            - send: \"EHLO prober\\r\"\n            - expect: \"^250-STARTTLS\"\n            - send: \"STARTTLS\\r\"\n            - expect: \"^220\"\n            - starttls: true\n            - send: \"EHLO prober\\r\"\n            - expect: \"^250-AUTH\"\n            - send: \"QUIT\\r\"\n      irc_banner_example:\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - send: \"NICK prober\"\n            - send: \"USER prober prober prober :prober\"\n            - expect: \"PING :([^ ]+)\"\n              send: \"PONG ${1}\"\n            - expect: \"^:[^ ]+ 001\"\n      # icmp_example:                    # ICMP \u63a2\u6d4b\u914d\u7f6e\u7684\u793a\u4f8b\n      #   prober: icmp\n      #   timeout: 5s\n      #   icmp:\n      #     preferred_ip_protocol: \"ip4\"\n      #     source_ip_address: \"127.0.0.1\"\n      dns_udp_example:                 # \u4f7f\u7528 UDP \u8fdb\u884c DNS \u67e5\u8be2\u7684\u793a\u4f8b\n        prober: dns\n        timeout: 5s\n        dns:\n          query_name: \"www.prometheus.io\"                 # \u8981\u89e3\u6790\u7684\u57df\u540d\n          query_type: \"A\"              # \u8be5\u57df\u540d\u5bf9\u5e94\u7684\u7c7b\u578b\n          valid_rcodes:\n          - NOERROR\n          validate_answer_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n            fail_if_all_match_regexp:\n            - \".*127.0.0.1\"\n            fail_if_not_matches_regexp:\n            - \"www.prometheus.io.\\t300\\tIN\\tA\\t127.0.0.1\"\n            fail_if_none_matches_regexp:\n            - \"127.0.0.1\"\n          validate_authority_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n          validate_additional_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n      dns_soa:\n        prober: dns\n        dns:\n          query_name: \"prometheus.io\"\n          query_type: \"SOA\"\n      dns_tcp_example:               # \u4f7f\u7528 TCP \u8fdb\u884c DNS \u67e5\u8be2\u7684\u793a\u4f8b\n        prober: dns\n        dns:\n          transport_protocol: \"tcp\" # defaults to \"udp\"\n          preferred_ip_protocol: \"ip4\" # defaults to \"ip6\"\n          query_name: \"www.prometheus.io\"\n
          "},{"location":"admin/insight/collection-manag/service-monitor.html","title":"\u914d\u7f6e\u670d\u52a1\u53d1\u73b0\u89c4\u5219","text":"

          \u53ef\u89c2\u6d4b Insight \u652f\u6301\u901a\u8fc7 \u5bb9\u5668\u7ba1\u7406 \u521b\u5efa CRD ServiceMonitor \u7684\u65b9\u5f0f\u6765\u6ee1\u8db3\u60a8\u81ea\u5b9a\u4e49\u670d\u52a1\u53d1\u73b0\u7684\u91c7\u96c6\u9700\u6c42\u3002 \u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528 ServiceMonitor \u81ea\u884c\u5b9a\u4e49 Pod \u53d1\u73b0\u7684 Namespace \u8303\u56f4\u4ee5\u53ca\u901a\u8fc7 matchLabel \u6765\u9009\u62e9\u76d1\u542c\u7684 Service\u3002

          "},{"location":"admin/insight/collection-manag/service-monitor.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

          \u96c6\u7fa4\u5df2\u5b89\u88c5 Helm \u5e94\u7528 insight-agent \u4e14\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

          "},{"location":"admin/insight/collection-manag/service-monitor.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u91c7\u96c6\u7ba1\u7406 \uff0c\u67e5\u770b\u5168\u90e8\u96c6\u7fa4\u91c7\u96c6\u63d2\u4ef6\u7684\u72b6\u6001\u3002

          2. \u70b9\u51fb\u5217\u8868\u4e2d\u7684\u67d0\u4e2a\u96c6\u7fa4\u540d\u79f0\u8fdb\u5165\u91c7\u96c6\u914d\u7f6e\u8be6\u60c5\u3002

          3. \u70b9\u51fb\u94fe\u63a5\u8df3\u8f6c\u5230 \u5bb9\u5668\u7ba1\u7406 \u4e2d\u521b\u5efa Service Monitor\u3002

            apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: micrometer-demo # (1)\n  namespace: insight-system # (2)\n    labels: \n      operator.insight.io/managed-by: insight\nspec:\n  endpoints: # (3)\n    - honorLabels: true\n        interval: 15s\n        path: /actuator/prometheus\n        port: http\n  namespaceSelector: # (4)\n    matchNames:\n      - insight-system  # (5)\n  selector: # (6)\n    matchLabels:\n          micrometer-prometheus-discovery: \"true\"\n
            1. \u6307\u5b9a ServiceMonitor \u7684\u540d\u79f0
            2. \u6307\u5b9a ServiceMonitor \u7684\u547d\u540d\u7a7a\u95f4
            3. \u8fd9\u662f\u670d\u52a1\u7aef\u70b9\uff0c\u4ee3\u8868 Prometheus \u6240\u9700\u7684\u91c7\u96c6 Metrics \u7684\u5730\u5740\u3002 endpoints \u4e3a\u4e00\u4e2a\u6570\u7ec4\uff0c \u540c\u65f6\u53ef\u4ee5\u521b\u5efa\u591a\u4e2a endpoints \u3002\u6bcf\u4e2a endpoints \u5305\u542b\u4e09\u4e2a\u5b57\u6bb5\uff0c\u6bcf\u4e2a\u5b57\u6bb5\u7684\u542b\u4e49\u5982\u4e0b\uff1a

              • interval \uff1a\u6307\u5b9a Prometheus \u5bf9\u5f53\u524d endpoints \u91c7\u96c6\u7684\u5468\u671f\u3002\u5355\u4f4d\u4e3a\u79d2\uff0c\u5728\u672c\u6b21\u793a\u4f8b\u4e2d\u8bbe\u5b9a\u4e3a 15s \u3002
              • path \uff1a\u6307\u5b9a Prometheus \u7684\u91c7\u96c6\u8def\u5f84\u3002\u5728\u672c\u6b21\u793a\u4f8b\u4e2d\uff0c\u6307\u5b9a\u4e3a /actuator/prometheus \u3002
              • port \uff1a\u6307\u5b9a\u91c7\u96c6\u6570\u636e\u9700\u8981\u901a\u8fc7\u7684\u7aef\u53e3\uff0c\u8bbe\u7f6e\u7684\u7aef\u53e3\u4e3a\u91c7\u96c6\u7684 Service \u7aef\u53e3\u6240\u8bbe\u7f6e\u7684 name \u3002
            4. \u8fd9\u662f\u9700\u8981\u53d1\u73b0\u7684 Service \u7684\u8303\u56f4\u3002 namespaceSelector \u5305\u542b\u4e24\u4e2a\u4e92\u65a5\u5b57\u6bb5\uff0c\u5b57\u6bb5\u7684\u542b\u4e49\u5982\u4e0b\uff1a

              • any \uff1a\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u503c true \uff0c\u5f53\u8be5\u5b57\u6bb5\u88ab\u8bbe\u7f6e\u65f6\uff0c\u5c06\u76d1\u542c\u6240\u6709\u7b26\u5408 Selector \u8fc7\u6ee4\u6761\u4ef6\u7684 Service \u7684\u53d8\u52a8\u3002
              • matchNames \uff1a\u6570\u7ec4\u503c\uff0c\u6307\u5b9a\u9700\u8981\u76d1\u542c\u7684 namespace \u7684\u8303\u56f4\u3002\u4f8b\u5982\uff0c\u53ea\u60f3\u76d1\u542c default \u548c insight-system \u4e24\u4e2a\u547d\u540d\u7a7a\u95f4\u4e2d\u7684 Service\uff0c\u90a3\u4e48 matchNames \u8bbe\u7f6e\u5982\u4e0b\uff1a

                namespaceSelector:\n  matchNames:\n    - default\n    - insight-system\n
            5. \u6b64\u5904\u5339\u914d\u7684\u547d\u540d\u7a7a\u95f4\u4e3a\u9700\u8981\u66b4\u9732\u6307\u6807\u7684\u5e94\u7528\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4

            6. \u7528\u4e8e\u9009\u62e9 Service
          "},{"location":"admin/insight/compati-test/k8s-compatibility.html","title":"Kubernetes \u96c6\u7fa4\u517c\u5bb9\u6027\u6d4b\u8bd5","text":"

          \u2705\uff1a\u6d4b\u8bd5\u901a\u8fc7\uff1b \u274c\uff1a\u6d4b\u8bd5\u672a\u901a\u8fc7\uff1b\u7a7a\uff1a\u672a\u8fdb\u884c\u6d4b\u8bd5\uff1b

          "},{"location":"admin/insight/compati-test/k8s-compatibility.html#insight-server-kubernetes","title":"Insight Server \u7684 Kubernetes \u517c\u5bb9\u6027\u6d4b\u8bd5","text":"\u573a\u666f \u6d4b\u8bd5\u65b9\u5f0f K8s 1.31 K8s 1.30 K8s 1.29 K8s 1.28 K8s 1.27 K8s 1.26 k8s 1.25.0 k8s 1.24 k8s 1.23 k8s 1.22 \u57fa\u7ebf\u573a\u666f E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u6307\u6807\u67e5\u8be2 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u65e5\u5fd7\u67e5\u8be2 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u94fe\u8def\u67e5\u8be2 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u544a\u8b66\u4e2d\u5fc3 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u62d3\u6251\u67e5\u8be2 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705"},{"location":"admin/insight/compati-test/k8s-compatibility.html#insight-agent-kubernetes","title":"Insight Agent \u7684 Kubernetes \u517c\u5bb9\u6027\u6d4b\u8bd5","text":"\u573a\u666f \u6d4b\u8bd5\u65b9\u5f0f K8s 1.31 K8s 1.30 K8s 1.29 K8s 1.28 K8s 1.27 K8s 1.26 k8s 1.25 k8s 1.24 k8s 1.23 k8s 1.22 k8s 1.21 k8s 1.20 k8s 1.19 k8s 1.18 k8s 1.17 k8s 1.16 \u57fa\u7ebf\u573a\u666f E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c \u6307\u6807\u67e5\u8be2 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c \u65e5\u5fd7\u67e5\u8be2 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c \u94fe\u8def\u67e5\u8be2 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c \u544a\u8b66\u4e2d\u5fc3 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c \u62d3\u6251\u67e5\u8be2 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c

          Note

          Insight Agent \u7248\u672c\u517c\u5bb9\u5386\u53f2\uff1a

          1. Insight Agent \u4ece v0.16.x \u5f00\u59cb\u4e0d\u517c\u5bb9 k8s v1.16.15
          2. Insight Agent v0.20.0 \u517c\u5bb9 k8s v1.18.20
          3. Insight Agent v0.19.2/v0.18.2/v0.17.x \u4e0d\u517c\u5bb9 k8s v1.18.20
          4. Insight Agent v0.30.1 \u4e0d\u517c\u5bb9 k8s v1.18.x \u53ca \u4ee5\u4e0b\u7248\u672c
          "},{"location":"admin/insight/compati-test/ocp-compatibility.html","title":"Openshift 4.x \u96c6\u7fa4\u517c\u5bb9\u6027\u6d4b\u8bd5","text":"

          \u2705\uff1a\u6d4b\u8bd5\u901a\u8fc7\uff1b \u274c\uff1a\u6d4b\u8bd5\u672a\u901a\u8fc7\u3002

          Note

          \u8868\u683c\u4e2d\u7684\u6d4b\u8bd5\u529f\u80fd\u975e\u5168\u91cf\u3002

          case \u6d4b\u8bd5\u65b9\u5f0f ocp4.10(k8s 1.23.0) \u5907\u6ce8 \u91c7\u96c6\u5e76\u67e5\u8be2 web \u5e94\u7528\u7684\u6307\u6807 \u624b\u5de5 \u2705 \u6dfb\u52a0\u81ea\u5b9a\u4e49\u6307\u6807\u91c7\u96c6 \u624b\u5de5 \u2705 \u67e5\u8be2\u5b9e\u65f6\u6307\u6807 \u624b\u5de5 \u2705 \u77ac\u65f6\u6307\u6807\u67e5\u8be2 \u624b\u5de5 \u2705 \u77ac\u65f6\u6307\u6807api\u5b57\u6bb5\u529f\u80fd\u9a8c\u8bc1 \u624b\u5de5 \u2705 \u67e5\u8be2\u4e00\u6bb5\u65f6\u95f4\u5185\u6307\u6807 \u624b\u5de5 \u2705 \u67e5\u8be2\u4e00\u6bb5\u65f6\u95f4\u5185\u6307\u6807 api \u5b57\u6bb5\u529f\u80fd\u9a8c\u8bc1 \u624b\u5de5 \u2705 \u6279\u91cf\u67e5\u8be2\u96c6\u7fa4CPU\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u96c6\u7fa4 CPU \u603b\u91cf\u3001\u96c6\u7fa4\u5185\u5b58\u4f7f\u7528\u91cf\uff0c\u96c6\u7fa4\u8282\u70b9\u603b\u6570 \u624b\u5de5 \u2705 \u6279\u91cf\u67e5\u8be2\u8282\u70b9CPU\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u8282\u70b9 CPU \u603b\u91cf\u3001\u8282\u70b9\u5185\u5b58\u4f7f\u7528\u91cf \u624b\u5de5 \u2705 \u6279\u91cf\u67e5\u8be2\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u96c6\u7fa4CPU\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u96c6\u7fa4 CPU \u603b\u91cf\u3001\u96c6\u7fa4\u5185\u5b58\u4f7f\u7528\u91cf\uff0c\u96c6\u7fa4\u8282\u70b9\u603b\u6570 \u624b\u5de5 \u2705 \u6279\u91cf\u67e5\u8be2\u67e5\u8be2\u4e00\u6bb5\u65f6\u95f4\u5185\u6307\u6807 api \u5b57\u6bb5\u529f\u80fd\u9a8c\u8bc1 \u624b\u5de5 \u2705 \u67e5\u8be2 Pod \u65e5\u5fd7 \u624b\u5de5 \u2705 \u67e5\u8be2 SVC \u65e5\u5fd7 \u624b\u5de5 \u2705 \u67e5\u8be2 statefulset \u65e5\u5fd7 \u624b\u5de5 \u2705 \u67e5\u8be2 Deployment \u65e5\u5fd7 \u624b\u5de5 \u2705 \u67e5\u8be2 NPD \u65e5\u5fd7 \u624b\u5de5 \u2705 \u65e5\u5fd7\u7b5b\u9009 \u624b\u5de5 \u2705 \u65e5\u5fd7\u6a21\u7cca\u67e5\u8be2-workloadSearch \u624b\u5de5 \u2705 \u65e5\u5fd7\u6a21\u7cca\u67e5\u8be2-podSearch \u624b\u5de5 \u2705 \u65e5\u5fd7\u6a21\u7cca\u67e5\u8be2-containerSearch \u624b\u5de5 \u2705 \u65e5\u5fd7\u7cbe\u786e\u67e5\u8be2-cluster \u624b\u5de5 \u2705 \u65e5\u5fd7\u7cbe\u786e\u67e5\u8be2-namespace \u624b\u5de5 \u2705 \u65e5\u5fd7\u67e5\u8be2 api \u5b57\u6bb5\u529f\u80fd\u9a8c\u8bc1 \u624b\u5de5 \u2705 \u544a\u8b66\u89c4\u5219-\u589e\u5220\u6539\u67e5 \u624b\u5de5 \u2705 \u544a\u8b66\u6a21\u677f-\u589e\u5220\u6539\u67e5 \u624b\u5de5 \u2705 \u901a\u77e5\u65b9\u5f0f-\u589e\u5220\u6539\u67e5 \u624b\u5de5 \u2705 \u94fe\u8def\u67e5\u8be2 \u624b\u5de5 \u2705 \u62d3\u6251\u67e5\u8be2 \u624b\u5de5 \u2705"},{"location":"admin/insight/compati-test/rancher-compatibility.html","title":"Rancher \u96c6\u7fa4\u517c\u5bb9\u6027\u6d4b\u8bd5","text":"

          \u2705\uff1a\u6d4b\u8bd5\u901a\u8fc7\uff1b \u274c\uff1a\u6d4b\u8bd5\u672a\u901a\u8fc7\u3002

          Note

          \u8868\u683c\u4e2d\u7684\u6d4b\u8bd5\u529f\u80fd\u975e\u5168\u91cf\u3002

          case \u6d4b\u8bd5\u65b9\u5f0f Rancher rke2c1(k8s 1.24.11) \u5907\u6ce8 \u91c7\u96c6\u5e76\u67e5\u8be2 web \u5e94\u7528\u7684\u6307\u6807 \u624b\u5de5 \u2705 \u6dfb\u52a0\u81ea\u5b9a\u4e49\u6307\u6807\u91c7\u96c6 \u624b\u5de5 \u2705 \u67e5\u8be2\u5b9e\u65f6\u6307\u6807 \u624b\u5de5 \u2705 \u67e5\u8be2\u77ac\u65f6\u6307\u6807 \u624b\u5de5 \u2705 \u9a8c\u8bc1\u67e5\u8be2\u77ac\u65f6\u6307\u6807 API \u63a5\u53e3 \u624b\u5de5 \u2705 \u67e5\u8be2\u4e00\u6bb5\u65f6\u95f4\u5185\u6307\u6807 \u624b\u5de5 \u2705 \u9a8c\u8bc1\u67e5\u8be2\u4e00\u6bb5\u65f6\u95f4\u5185\u6307\u6807 API \u63a5\u53e3 \u624b\u5de5 \u2705 \u6279\u91cf\u67e5\u8be2\u96c6\u7fa4 CPU\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u96c6\u7fa4 CPU \u603b\u91cf\u3001\u96c6\u7fa4\u5185\u5b58\u4f7f\u7528\u91cf\uff0c\u96c6\u7fa4\u8282\u70b9\u603b\u6570 \u624b\u5de5 \u2705 \u6279\u91cf\u67e5\u8be2\u8282\u70b9 CPU\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u8282\u70b9 CPU \u603b\u91cf\u3001\u8282\u70b9\u5185\u5b58\u4f7f\u7528\u91cf \u624b\u5de5 \u2705 \u6279\u91cf\u67e5\u8be2\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u96c6\u7fa4 CPU\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u96c6\u7fa4 CPU \u603b\u91cf\u3001\u96c6\u7fa4\u5185\u5b58\u4f7f\u7528\u91cf\uff0c\u96c6\u7fa4\u8282\u70b9\u603b\u6570 \u624b\u5de5 \u2705 \u9a8c\u8bc1\u6279\u91cf\u67e5\u8be2\u4e00\u6bb5\u65f6\u95f4\u5185\u6307\u6807 API \u63a5\u53e3 \u624b\u5de5 \u2705 \u67e5\u8be2 Pod \u65e5\u5fd7 \u624b\u5de5 \u2705 \u67e5\u8be2 SVC \u65e5\u5fd7 \u624b\u5de5 \u2705 \u67e5\u8be2 statefulset \u65e5\u5fd7 \u624b\u5de5 \u2705 \u67e5\u8be2 Deployment \u65e5\u5fd7 \u624b\u5de5 \u2705 \u67e5\u8be2 NPD \u65e5\u5fd7 \u624b\u5de5 \u2705 \u7b5b\u9009\u65e5\u5fd7 \u624b\u5de5 \u2705 \u6a21\u7cca\u67e5\u8be2\u65e5\u5fd7-workloadSearch \u624b\u5de5 \u2705 \u6a21\u7cca\u67e5\u8be2\u65e5\u5fd7-podSearch \u624b\u5de5 \u2705 \u6a21\u7cca\u67e5\u8be2\u65e5\u5fd7-containerSearch \u624b\u5de5 \u2705 \u7cbe\u786e\u67e5\u8be2\u65e5\u5fd7-cluster \u624b\u5de5 \u2705 \u7cbe\u786e\u67e5\u8be2\u65e5\u5fd7-namespace \u624b\u5de5 \u2705 \u9a8c\u8bc1\u67e5\u8be2\u65e5\u5fd7 API \u63a5\u53e3 \u624b\u5de5 \u2705 \u544a\u8b66\u89c4\u5219 - \u589e\u5220\u6539\u67e5 \u624b\u5de5 \u2705 \u544a\u8b66\u6a21\u677f - \u589e\u5220\u6539\u67e5 \u624b\u5de5 \u2705 \u901a\u77e5\u65b9\u5f0f - \u589e\u5220\u6539\u67e5 \u624b\u5de5 \u2705 \u94fe\u8def\u67e5\u8be2 \u624b\u5de5 \u2705 \u62d3\u6251\u67e5\u8be2 \u624b\u5de5 \u2705"},{"location":"admin/insight/dashboard/dashboard.html","title":"\u4eea\u8868\u76d8","text":"

          Grafana \u662f\u4e00\u79cd\u5f00\u6e90\u7684\u6570\u636e\u53ef\u89c6\u5316\u548c\u76d1\u63a7\u5e73\u53f0\uff0c\u5b83\u63d0\u4f9b\u4e86\u4e30\u5bcc\u7684\u56fe\u8868\u548c\u9762\u677f\uff0c\u7528\u4e8e\u5b9e\u65f6\u76d1\u63a7\u3001\u5206\u6790\u548c\u53ef\u89c6\u5316\u5404\u79cd\u6570\u636e\u6e90\u7684\u6307\u6807\u548c\u65e5\u5fd7\u3002\u53ef\u89c2\u6d4b\u6027 Insight \u4f7f\u7528\u5f00\u6e90 Grafana \u63d0\u4f9b\u76d1\u63a7\u670d\u52a1\uff0c\u652f\u6301\u4ece\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u547d\u540d\u7a7a\u95f4\u7b49\u591a\u7ef4\u5ea6\u67e5\u770b\u8d44\u6e90\u6d88\u8017\u60c5\u51b5\uff0c

          \u5173\u4e8e\u5f00\u6e90 Grafana \u7684\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u89c1 Grafana \u5b98\u65b9\u6587\u6863\u3002

          "},{"location":"admin/insight/dashboard/dashboard.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 \u4eea\u8868\u76d8 \u3002

            • \u5728 Insight /\u6982\u89c8 \u4eea\u8868\u76d8\u4e2d\uff0c\u53ef\u67e5\u770b\u591a\u9009\u96c6\u7fa4\u7684\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\uff0c\u5e76\u4ee5\u547d\u540d\u7a7a\u95f4\u3001\u5bb9\u5668\u7ec4\u7b49\u591a\u4e2a\u7ef4\u5ea6\u5206\u6790\u4e86\u8d44\u6e90\u4f7f\u7528\u3001\u7f51\u7edc\u3001\u5b58\u50a8\u7b49\u60c5\u51b5\u3002

            • \u70b9\u51fb\u4eea\u8868\u76d8\u5de6\u4e0a\u4fa7\u7684\u4e0b\u62c9\u6846\u53ef\u5207\u6362\u96c6\u7fa4\u3002

            • \u70b9\u51fb\u4eea\u8868\u76d8\u53f3\u4e0b\u4fa7\u53ef\u5207\u6362\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u3002

          2. Insight \u7cbe\u9009\u591a\u4e2a\u793e\u533a\u63a8\u8350\u4eea\u8868\u76d8\uff0c\u53ef\u4ece\u8282\u70b9\u3001\u547d\u540d\u7a7a\u95f4\u3001\u5de5\u4f5c\u8d1f\u8f7d\u7b49\u591a\u4e2a\u7ef4\u5ea6\u8fdb\u884c\u76d1\u63a7\u3002\u70b9\u51fb insight-system / Insight /\u6982\u89c8 \u533a\u57df\u5207\u6362\u4eea\u8868\u76d8\u3002

          Note

          1. \u8bbf\u95ee Grafana UI \u8bf7\u53c2\u8003\u4ee5\u7ba1\u7406\u5458\u8eab\u4efd\u767b\u5f55 Grafana\u3002

          2. \u5bfc\u5165\u81ea\u5b9a\u4e49\u4eea\u8868\u76d8\u8bf7\u53c2\u8003\u5bfc\u5165\u81ea\u5b9a\u4e49\u4eea\u8868\u76d8\u3002

          "},{"location":"admin/insight/dashboard/import-dashboard.html","title":"\u5bfc\u5165\u81ea\u5b9a\u4e49\u4eea\u8868\u76d8","text":"

          \u901a\u8fc7\u4f7f\u7528 Grafana CRD\uff0c\u53ef\u4ee5\u5c06\u4eea\u8868\u677f\u7684\u7ba1\u7406\u548c\u90e8\u7f72\u7eb3\u5165\u5230 Kubernetes \u7684\u751f\u547d\u5468\u671f\u7ba1\u7406\u4e2d\uff0c\u5b9e\u73b0\u4eea\u8868\u677f\u7684\u7248\u672c\u63a7\u5236\u3001\u81ea\u52a8\u5316\u90e8\u7f72\u548c\u96c6\u7fa4\u7ea7\u7684\u7ba1\u7406\u3002\u672c\u9875\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7 CRD \u548c UI \u754c\u9762\u5bfc\u5165\u81ea\u5b9a\u4e49\u7684\u4eea\u8868\u76d8\u3002

          "},{"location":"admin/insight/dashboard/import-dashboard.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0 \u5e73\u53f0\uff0c\u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \uff0c\u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u9009\u62e9 kpanda-global-cluster \u3002

          2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u5728\u5217\u8868\u4e2d\u67e5\u627e grafanadashboards.integreatly.org \u6587\u4ef6\uff0c\u8fdb\u5165\u8be6\u60c5\u3002

          3. \u70b9\u51fb Yaml \u521b\u5efa \uff0c\u4f7f\u7528\u4ee5\u4e0b\u6a21\u677f\uff0c\u5728 Json \u5b57\u6bb5\u4e2d\u66ff\u6362\u4eea\u8868\u76d8 JSON\u3002

            • namespace \uff1a\u586b\u5199\u76ee\u6807\u547d\u540d\u7a7a\u95f4\uff1b
            • name \uff1a\u586b\u5199\u4eea\u8868\u76d8\u7684\u540d\u79f0\u3002
            • label \uff1a\u5fc5\u586b\uff0c operator.insight.io/managed-by: insight \u3002
            apiVersion: integreatly.org/v1alpha1\nkind: GrafanaDashboard\nmetadata:\n  labels:\n    app: insight-grafana-operator\n    operator.insight.io/managed-by: insight\n  name: sample-dashboard\n  namespace: insight-system\nspec:\n  json: >\n    {\n      \"id\": null,\n      \"title\": \"Simple Dashboard\",\n      \"tags\": [],\n      \"style\": \"dark\",\n      \"timezone\": \"browser\",\n      \"editable\": true,\n      \"hideControls\": false,\n      \"graphTooltip\": 1,\n      \"panels\": [],\n      \"time\": {\n        \"from\": \"now-6h\",\n        \"to\": \"now\"\n      },\n      \"timepicker\": {\n        \"time_options\": [],\n        \"refresh_intervals\": []\n      },\n      \"templating\": {\n        \"list\": []\n      },\n      \"annotations\": {\n        \"list\": []\n      },\n      \"refresh\": \"5s\",\n      \"schemaVersion\": 17,\n      \"version\": 0,\n      \"links\": []\n    }\n
          4. \u70b9\u51fb \u786e\u8ba4 \u540e\uff0c\u7a0d\u7b49\u7247\u523b\u5373\u53ef\u5728 \u4eea\u8868\u76d8 \u4e2d\u67e5\u770b\u521a\u521a\u5bfc\u5165\u7684\u4eea\u8868\u76d8\u3002

          Info

          \u81ea\u5b9a\u4e49\u8bbe\u8ba1\u4eea\u8868\u76d8\uff0c\u8bf7\u53c2\u8003\u6dfb\u52a0\u4eea\u8868\u76d8\u9762\u677f\u3002

          "},{"location":"admin/insight/dashboard/login-grafana.html","title":"\u8bbf\u95ee\u539f\u751f Grafana","text":"

          Insight \u501f\u52a9 Grafana \u63d0\u4f9b\u4e86\u4e30\u5bcc\u7684\u53ef\u89c6\u5316\u80fd\u529b\uff0c\u540c\u65f6\u4fdd\u7559\u4e86\u8bbf\u95ee\u539f\u751f Grafana \u7684\u5165\u53e3\u3002

          "},{"location":"admin/insight/dashboard/login-grafana.html#_1","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u767b\u5f55\u6d4f\u89c8\u5668\uff0c\u5728\u6d4f\u89c8\u5668\u4e2d\u8f93\u5165 Grafana \u5730\u5740\u3002

            \u8bbf\u95ee\u5730\u5740\uff1a http://ip:\u8bbf\u95ee\u7aef\u53e3/ui/insight-grafana/login

            \u4f8b\u5982\uff1a http://10.6.10.233:30209/ui/insight-grafana/login

          2. \u70b9\u51fb\u53f3\u4e0b\u89d2\u7684\u767b\u5f55\uff0c\u4f7f\u7528\u9ed8\u8ba4\u7528\u6237\u540d\u3001\u5bc6\u7801\uff08admin/admin\uff09\u8fdb\u884c\u767b\u5f55\u3002

          3. \u70b9\u51fb Log in \u5b8c\u6210\u767b\u5f55\u3002

          "},{"location":"admin/insight/dashboard/overview.html","title":"\u6982\u89c8","text":"

          \u6982\u7387 \u4ec5\u7edf\u8ba1\u5df2\u5b89\u88c5 insight-agent \u4e14\u5176\u8fd0\u884c\u72b6\u6001\u4e3a\u6b63\u5e38\u7684\u96c6\u7fa4\u6570\u636e\u3002\u53ef\u5728\u6982\u89c8\u4e2d\u591a\u96c6\u7fa4\u7684\u8d44\u6e90\u6982\u51b5\uff1a

          • \u544a\u8b66\u7edf\u8ba1\uff1a\u53ef\u67e5\u770b\u6240\u6709\u96c6\u7fa4\u7684\u6b63\u5728\u544a\u8b66\u7684\u7edf\u8ba1\u6570\u636e\u3002
          • \u8d44\u6e90\u6d88\u8017\uff1a\u53ef\u6309 CPU \u4f7f\u7528\u7387\u3001\u5185\u5b58\u4f7f\u7528\u7387\u548c\u78c1\u76d8\u4f7f\u7528\u7387\u5206\u522b\u67e5\u770b\u8fd1\u4e00\u5c0f\u65f6 TOP5 \u96c6\u7fa4\u3001\u8282\u70b9\u7684\u8d44\u6e90\u53d8\u5316\u8d8b\u52bf\u3002
          • \u9ed8\u8ba4\u6309\u7167\u6839\u636e CPU \u4f7f\u7528\u7387\u6392\u5e8f\u3002\u60a8\u53ef\u5207\u6362\u6307\u6807\u5207\u6362\u96c6\u7fa4\u3001\u8282\u70b9\u7684\u6392\u5e8f\u65b9\u5f0f\u3002
          • \u8d44\u6e90\u53d8\u5316\u8d8b\u52bf\uff1a\u53ef\u67e5\u770b\u8fd1 15 \u5929\u7684\u8282\u70b9\u4e2a\u6570\u8d8b\u52bf\u4ee5\u53ca\u4e00\u5c0f\u65f6 Pod \u7684\u8fd0\u884c\u8d8b\u52bf\u3002
          • \u670d\u52a1\u8bf7\u6c42\u6392\u884c\uff1a\u53ef\u67e5\u770b\u591a\u96c6\u7fa4\u4e2d\u8bf7\u6c42\u5ef6\u65f6\u3001\u9519\u8bef\u7387\u6392\u884c TOP5 \u7684\u670d\u52a1\u53ca\u6240\u5728\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u3002
          "},{"location":"admin/insight/dashboard/overview.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

          \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u6982\u89c8 \u3002

          "},{"location":"admin/insight/data-query/log.html","title":"\u65e5\u5fd7\u67e5\u8be2","text":"

          Insight \u9ed8\u8ba4\u91c7\u96c6\u8282\u70b9\u65e5\u5fd7\u3001\u5bb9\u5668\u65e5\u5fd7\u4ee5\u53ca kubernetes \u5ba1\u8ba1\u65e5\u5fd7\u3002\u5728\u65e5\u5fd7\u67e5\u8be2\u9875\u9762\u4e2d\uff0c\u53ef\u67e5\u8be2\u767b\u5f55\u8d26\u53f7\u6743\u9650\u5185\u7684\u6807\u51c6\u8f93\u51fa (stdout) \u65e5\u5fd7\uff0c\u5305\u62ec\u8282\u70b9\u65e5\u5fd7\u3001\u4ea7\u54c1\u65e5\u5fd7\u3001Kubenetes \u5ba1\u8ba1\u65e5\u5fd7\u7b49\uff0c\u5feb\u901f\u5728\u5927\u91cf\u65e5\u5fd7\u4e2d\u67e5\u8be2\u5230\u6240\u9700\u7684\u65e5\u5fd7\uff0c\u540c\u65f6\u7ed3\u5408\u65e5\u5fd7\u7684\u6765\u6e90\u4fe1\u606f\u548c\u4e0a\u4e0b\u6587\u539f\u59cb\u6570\u636e\u8f85\u52a9\u5b9a\u4f4d\u95ee\u9898\u3002

          "},{"location":"admin/insight/data-query/log.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u70b9\u51fb\u4e00\u7ea7\u5bfc\u822a\u680f\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u3002
          2. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u65e5\u5fd7 \u3002

            • \u9ed8\u8ba4\u67e5\u8be2\u6700\u8fd1 24 \u5c0f\u65f6\uff1b
            • \u7b2c\u4e00\u6b21\u8fdb\u5165\u65f6\uff0c\u9ed8\u8ba4\u6839\u636e\u767b\u5f55\u8d26\u53f7\u6743\u9650\u67e5\u8be2\u6709\u6743\u9650\u7684\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\u7684\u5bb9\u5668\u65e5\u5fd7\uff1b

          3. \u9876\u90e8 Tab \u9ed8\u8ba4\u8fdb\u5165 \u666e\u901a\u67e5\u8be2 \u3002

            1. \u70b9\u51fb \u7b5b\u9009 \u5c55\u5f00\u8fc7\u6ee4\u9762\u677f\uff0c\u53ef\u5207\u6362\u65e5\u5fd7\u641c\u7d22\u6761\u4ef6\u548c\u7c7b\u578b\u3002
            2. \u65e5\u5fd7\u7c7b\u578b\uff1a

              • \u5bb9\u5668\u65e5\u5fd7 \uff1a\u8bb0\u5f55\u96c6\u7fa4\u4e2d\u5bb9\u5668\u5185\u90e8\u7684\u6d3b\u52a8\u548c\u4e8b\u4ef6\uff0c\u5305\u62ec\u5e94\u7528\u7a0b\u5e8f\u7684\u8f93\u51fa\u3001\u9519\u8bef\u6d88\u606f\u3001\u8b66\u544a\u548c\u8c03\u8bd5\u4fe1\u606f\u7b49\u3002\u652f\u6301\u901a\u8fc7\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u5bb9\u5668\u7ec4\u3001\u5bb9\u5668\u8fc7\u6ee4\u65e5\u5fd7\u3002
              • \u8282\u70b9\u65e5\u5fd7 \uff1a\u8bb0\u5f55\u96c6\u7fa4\u4e2d\u6bcf\u4e2a\u8282\u70b9\u7684\u7cfb\u7edf\u7ea7\u522b\u65e5\u5fd7\u3002\u8fd9\u4e9b\u65e5\u5fd7\u5305\u542b\u8282\u70b9\u7684\u64cd\u4f5c\u7cfb\u7edf\u3001\u5185\u6838\u3001\u670d\u52a1\u548c\u7ec4\u4ef6\u7684\u76f8\u5173\u4fe1\u606f\u3002\u652f\u6301\u901a\u8fc7\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u6587\u4ef6\u8def\u5f84\u8fc7\u6ee4\u65e5\u5fd7\u3002
            3. \u652f\u6301\u5bf9\u5355\u4e2a\u5173\u952e\u5b57\u8fdb\u884c\u6a21\u7cca\u641c\u7d22\u3002

          4. \u9876\u90e8\u5207\u6362 Tab \u9009\u62e9 Lucene \u8bed\u6cd5\u67e5\u8be2 \u3002

            \u7b2c\u4e00\u6b21\u8fdb\u5165\u65f6\uff0c\u9ed8\u8ba4\u9009\u62e9\u767b\u5f55\u8d26\u53f7\u6743\u9650\u67e5\u8be2\u6709\u6743\u9650\u7684\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\u7684\u5bb9\u5668\u65e5\u5fd7\u3002

            Lucene \u8bed\u6cd5\u8bf4\u660e\uff1a

            1. \u4f7f\u7528 \u903b\u8f91\u64cd\u4f5c\u7b26\uff08AND\u3001OR\u3001NOT\u3001\"\" \uff09\u7b26\u67e5\u8be2\u591a\u4e2a\u5173\u952e\u5b57\uff0c\u4f8b\u5982\uff1akeyword1 AND (keyword2 OR keyword3) NOT keyword4\u3002
            2. \u4f7f\u7528\u6ce2\u6d6a\u53f7 (~) \u5b9e\u73b0\u6a21\u7cca\u67e5\u8be2\uff0c\u5728 \"~\" \u540e\u53ef\u6307\u5b9a\u53ef\u9009\u7684\u53c2\u6570\uff0c\u7528\u4e8e\u63a7\u5236\u6a21\u7cca\u67e5\u8be2\u7684\u76f8\u4f3c\u5ea6\uff0c\u4e0d\u6307\u5b9a\u5219\u9ed8\u8ba4\u4f7f\u7528 0.5\u3002\u4f8b\u5982\uff1aerror~\u3002
            3. \u4f7f\u7528\u901a\u914d\u7b26 (*\u3001?) \u7528\u4f5c\u5355\u5b57\u7b26\u901a\u914d\u7b26\uff0c\u8868\u793a\u5339\u914d\u4efb\u610f\u4e00\u4e2a\u5b57\u7b26\u3002
            4. \u4f7f\u7528\u65b9\u62ec\u53f7 [ ] \u6216\u82b1\u62ec\u53f7 { } \u6765\u67e5\u8be2\u8303\u56f4\uff0c\u65b9\u62ec\u53f7\u00a0[ ]\u00a0\u8868\u793a\u95ed\u533a\u95f4\uff0c\u5305\u542b\u8fb9\u754c\u503c\u3002\u82b1\u62ec\u53f7\u00a0{ }\u00a0\u8868\u793a\u5f00\u533a\u95f4\uff0c\u6392\u9664\u8fb9\u754c\u503c\u3002\u8303\u56f4\u67e5\u8be2\u53ea\u9002\u7528\u4e8e\u80fd\u591f\u8fdb\u884c\u6392\u5e8f\u7684\u5b57\u6bb5\u7c7b\u578b\uff0c\u5982\u6570\u5b57\u3001\u65e5\u671f\u7b49\u3002\u4f8b\u5982\uff1atimestamp:[2022-01-01 TO 2022-01-31]\u3002
            5. \u66f4\u591a\u7528\u6cd5\u8bf7\u67e5\u770b\uff1aLucene \u8bed\u6cd5\u8bf4\u660e\u3002
          "},{"location":"admin/insight/data-query/log.html#_3","title":"\u5176\u4ed6\u64cd\u4f5c","text":""},{"location":"admin/insight/data-query/log.html#_4","title":"\u67e5\u770b\u65e5\u5fd7\u4e0a\u4e0b\u6587","text":"

          \u70b9\u51fb\u65e5\u5fd7\u540e\u7684\u6309\u94ae\uff0c\u5728\u53f3\u4fa7\u5212\u51fa\u9762\u677f\u4e2d\u53ef\u67e5\u770b\u8be5\u6761\u65e5\u5fd7\u7684\u9ed8\u8ba4 100 \u6761\u4e0a\u4e0b\u6587\u3002\u53ef\u5207\u6362 \u663e\u793a\u884c\u6570 \u67e5\u770b\u66f4\u591a\u4e0a\u4e0b\u6587\u5185\u5bb9\u3002

          "},{"location":"admin/insight/data-query/log.html#_5","title":"\u5bfc\u51fa\u65e5\u5fd7\u6570\u636e","text":"

          \u70b9\u51fb\u5217\u8868\u53f3\u4e0a\u4fa7\u7684\u4e0b\u8f7d\u6309\u94ae\u3002

          • \u652f\u6301\u914d\u7f6e\u5bfc\u51fa\u7684\u65e5\u5fd7\u5b57\u6bb5\uff0c\u6839\u636e\u65e5\u5fd7\u7c7b\u578b\u53ef\u914d\u7f6e\u7684\u5b57\u6bb5\u4e0d\u540c\uff0c\u5176\u4e2d \u65e5\u5fd7\u5185\u5bb9 \u5b57\u6bb5\u4e3a\u5fc5\u9009\u3002
          • \u652f\u6301\u5c06\u65e5\u5fd7\u67e5\u8be2\u7ed3\u679c\u5bfc\u51fa\u4e3a .txt \u6216 .csv \u683c\u5f0f\u3002

          "},{"location":"admin/insight/data-query/metric.html","title":"\u6307\u6807\u67e5\u8be2","text":"

          \u6307\u6807\u67e5\u8be2\u652f\u6301\u67e5\u8be2\u5bb9\u5668\u5404\u8d44\u6e90\u7684\u6307\u6807\u6570\u636e\uff0c\u53ef\u67e5\u770b\u76d1\u63a7\u6307\u6807\u7684\u8d8b\u52bf\u53d8\u5316\u3002\u540c\u65f6\uff0c\u9ad8\u7ea7\u67e5\u8be2\u652f\u6301\u539f\u751f PromQL \u8bed\u53e5\u8fdb\u884c\u6307\u6807\u67e5\u8be2\u3002

          "},{"location":"admin/insight/data-query/metric.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
          • \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002
          "},{"location":"admin/insight/data-query/metric.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u70b9\u51fb\u4e00\u7ea7\u5bfc\u822a\u680f\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u3002

          2. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u6307\u6807 \u3002

          3. \u9009\u62e9\u96c6\u7fa4\u3001\u7c7b\u578b\u3001\u8282\u70b9\u3001\u6307\u6807\u540d\u79f0\u67e5\u8be2\u6761\u4ef6\u540e\uff0c\u70b9\u51fb \u641c\u7d22 \uff0c\u5c4f\u5e55\u53f3\u4fa7\u5c06\u663e\u793a\u5bf9\u5e94\u6307\u6807\u56fe\u8868\u53ca\u6570\u636e\u8be6\u60c5\u3002

          4. \u652f\u6301\u81ea\u5b9a\u4e49\u65f6\u95f4\u8303\u56f4\u3002\u53ef\u624b\u52a8\u70b9\u51fb \u5237\u65b0 \u56fe\u6807\u6216\u9009\u62e9\u9ed8\u8ba4\u65f6\u95f4\u95f4\u9694\u8fdb\u884c\u5237\u65b0\u3002

          5. \u70b9\u51fb \u9ad8\u7ea7\u67e5\u8be2 \u9875\u7b7e\u901a\u8fc7\u539f\u751f\u7684 PromQL \u67e5\u8be2\u3002

          Note

          \u53c2\u9605 PromQL \u8bed\u6cd5\u3002

          "},{"location":"admin/insight/faq/expand-once-es-full.html","title":"ElasticSearch \u6570\u636e\u585e\u6ee1\u5982\u4f55\u64cd\u4f5c\uff1f","text":"

          \u5f53 ElasticSearch \u5185\u5b58\u5360\u6ee1\u65f6\uff0c\u53ef\u4ee5\u9009\u62e9\u6269\u5bb9\u6216\u8005\u5220\u9664\u6570\u636e\u6765\u89e3\u51b3\uff1a

          \u4f60\u53ef\u4ee5\u8fd0\u884c\u5982\u4e0b\u547d\u4ee4\u67e5\u770b ES \u8282\u70b9\u7684\u8d44\u6e90\u5360\u6bd4\u3002

          kubectl get pod -n mcamel-system | grep common-es-cluster-masters-es | awk '{print $1}' | xargs -I {} kubectl exec {} -n mcamel-system -c elasticsearch -- df -h | grep /usr/share/elasticsearch/data\n
          "},{"location":"admin/insight/faq/expand-once-es-full.html#_1","title":"\u6269\u5bb9","text":"

          \u5728\u4e3b\u673a\u8282\u70b9\u8fd8\u6709\u8d44\u6e90\u7684\u60c5\u51b5\u4e0b\uff0c \u6269\u5bb9 \u662f\u4e00\u79cd\u5e38\u89c1\u7684\u65b9\u6848\uff0c\u4e5f\u5c31\u662f\u63d0\u9ad8 PVC \u7684\u5bb9\u91cf\u3002

          1. \u5148\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u83b7\u53d6 es-data-0 \u8282\u70b9\u7684 PVC \u914d\u7f6e\uff0c\u8bf7\u4ee5\u5b9e\u9645\u7684\u73af\u5883\u7684 PVC \u4e3a\u51c6\u3002

            kubectl edit -n mcamel-system pvc elasticsearch-data-mcamel-common-es-cluster-masters-es-data-0\n
          2. \u7136\u540e\u4fee\u6539\u4ee5\u4e0b storage \u5b57\u6bb5\uff08\u9700\u8981\u4f7f\u7528\u7684\u5b58\u50a8\u7c7b SC \u53ef\u4ee5\u6269\u5bb9\uff09

            spec:\n  accessModes:\n    - ReadWriteOnce\n  resources:\n    requests:\n      storage: 35Gi # (1)!\n
            1. \u8fd9\u4e2a\u6570\u503c\u9700\u8c03\u6574
          "},{"location":"admin/insight/faq/expand-once-es-full.html#_2","title":"\u5220\u9664\u6570\u636e","text":"

          \u5f53 ElasticSearch \u5185\u5b58\u5360\u6ee1\u65f6\uff0c\u4f60\u8fd8\u53ef\u4ee5\u5220\u9664 index \u6570\u636e\u91ca\u653e\u8d44\u6e90\u3002

          \u4f60\u53ef\u4ee5\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u8fdb\u5165 Kibana \u9875\u9762\uff0c\u624b\u52a8\u6267\u884c\u5220\u9664\u64cd\u4f5c\u3002

          1. \u9996\u5148\u660e\u786e Kibana Pod \u662f\u5426\u5b58\u5728\u5e76\u4e14\u6b63\u5e38\u8fd0\u884c\uff1a

            kubectl get po -n mcamel-system |grep mcamel-common-es-cluster-masters-kb\n
          2. \u82e5\u4e0d\u5b58\u5728\uff0c\u5219\u624b\u52a8\u8bbe\u7f6e replica \u4e3a 1\uff0c\u5e76\u4e14\u7b49\u5f85\u670d\u52a1\u6b63\u5e38\u8fd0\u884c\uff1b\u82e5\u5b58\u5728\uff0c\u5219\u8df3\u8fc7\u8be5\u6b65\u9aa4

            kubectl scale -n mcamel-system deployment mcamel-common-es-cluster-masters-kb --replicas 1\n
          3. \u4fee\u6539 Kibana \u7684 Service \u4e3a NodePort \u66b4\u9732\u8bbf\u95ee\u65b9\u5f0f

            kubectl patch svc -n mcamel-system mcamel-common-es-cluster-masters-kb-http -p '{\"spec\":{\"type\":\"NodePort\"}}'\n\n# \u4fee\u6539\u5b8c\u6210\u540e\u67e5\u770b NodePort\u3002\u6b64\u4f8b\u7684\u7aef\u53e3\u4e3a 30128\uff0c\u5219\u8bbf\u95ee\u65b9\u5f0f\u4e3a https://{\u96c6\u7fa4\u4e2d\u7684\u8282\u70b9IP}:30128\n[root@insight-master1 ~]# kubectl get svc -n mcamel-system |grep mcamel-common-es-cluster-masters-kb-http\nmcamel-common-es-cluster-masters-kb-http   NodePort    10.233.51.174   <none>   5601:30128/TCP    108m\n
          4. \u83b7\u53d6 ElasticSearch \u7684 Secret\uff0c\u7528\u4e8e\u767b\u5f55 Kibana\uff08\u7528\u6237\u540d\u4e3a elastic\uff09

            kubectl get secrets -n mcamel-system mcamel-common-es-cluster-masters-es-elastic-user -o jsonpath=\"{.data.elastic}\" |base64 -d\n
          5. \u8fdb\u5165 Kibana -> Stack Management -> Index Management \uff0c\u6253\u5f00 Include hidden indices \u9009\u9879\uff0c\u5373\u53ef\u89c1\u6240\u6709\u7684 index\u3002 \u6839\u636e index \u7684\u5e8f\u53f7\u5927\u5c0f\uff0c\u4fdd\u7559\u5e8f\u53f7\u5927\u7684 index\uff0c\u5220\u9664\u5e8f\u53f7\u5c0f\u7684 index\u3002

          "},{"location":"admin/insight/faq/ignore-pod-log-collect.html","title":"\u5bb9\u5668\u65e5\u5fd7\u9ed1\u540d\u5355","text":""},{"location":"admin/insight/faq/ignore-pod-log-collect.html#_2","title":"\u914d\u7f6e\u65b9\u5f0f","text":"
          1. \u5bf9\u4e8e\u4efb\u610f\u4e00\u4e2a\u4e0d\u9700\u8981\u91c7\u96c6\u5bb9\u5668\u65e5\u5fd7\u7684 Pod, \u5728 Pod \u7684 annotation \u4e2d\u6dfb\u52a0 insight.opentelemetry.io/log-ignore: \"true\" \u6765\u6307\u5b9a\u4e0d\u9700\u8981\u91c7\u96c6\u7684\u5bb9\u5668\u65e5\u5fd7\uff0c\u4f8b\u5982\uff1a

            apiVersion: apps/v1\nkind: Pod\nmetadata:\n  name: log-generator\nspec:\n  selector:\n    matchLabels:\n      app.kubernetes.io/name: log-generator\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: log-generator\n      annotations:\n        insight.opentelemetry.io/log-ignore: \"true\"\n    spec:\n      containers:\n        - name: nginx\n          image: banzaicloud/log-generator:0.3.2\n
          2. \u91cd\u542f Pod\uff0c\u7b49\u5f85 Pod \u6062\u590d\u8fd0\u884c\u72b6\u6001\u4e4b\u540e\uff0cFluenbit \u5c06\u4e0d\u518d\u91c7\u96c6\u8fd9\u4e2a Pod \u5185\u7684\u5bb9\u5668\u7684\u65e5\u5fd7\u3002

          "},{"location":"admin/insight/faq/traceclockskew.html","title":"\u94fe\u8def\u6570\u636e\u4e2d\u7684\u65f6\u949f\u504f\u79fb","text":"

          \u5728\u4e00\u4e2a\u5206\u5e03\u5f0f\u7cfb\u7edf\u4e2d\uff0c\u7531\u4e8e Clock Skew\uff08\u65f6\u949f\u504f\u659c\u8c03\u6574\uff09\u5f71\u54cd\uff0c \u4e0d\u540c\u4e3b\u673a\u95f4\u5b58\u5728\u65f6\u95f4\u6f02\u79fb\u73b0\u8c61\u3002\u901a\u4fd7\u6765\u8bf4\uff0c\u4e0d\u540c\u4e3b\u673a\u5728\u540c\u4e00\u65f6\u523b\u7684\u7cfb\u7edf\u65f6\u95f4\u662f\u6709\u5fae\u5c0f\u7684\u504f\u5dee\u7684\u3002

          \u94fe\u8def\u8ffd\u8e2a\u7cfb\u7edf\u662f\u4e00\u4e2a\u5178\u578b\u7684\u5206\u5e03\u5f0f\u7cfb\u7edf\uff0c\u5b83\u5728\u6d89\u53ca\u65f6\u95f4\u6570\u636e\u91c7\u96c6\u4e0a\u4e5f\u53d7\u8fd9\u79cd\u73b0\u8c61\u5f71\u54cd\uff0c\u6bd4\u5982\u5728\u4e00\u6761\u94fe\u8def\u4e2d\u670d\u52a1\u7aef span \u7684\u5f00\u59cb\u65f6\u95f4\u65e9\u4e8e\u5ba2\u6237\u7aef span\uff0c \u8fd9\u79cd\u73b0\u8c61\u903b\u8f91\u4e0a\u662f\u4e0d\u5b58\u5728\u7684\uff0c\u4f46\u662f\u7531\u4e8e\u65f6\u949f\u504f\u79fb\u5f71\u54cd\uff0c\u94fe\u8def\u6570\u636e\u5728\u5404\u4e2a\u670d\u52a1\u4e2d\u88ab\u91c7\u96c6\u5230\u7684\u90a3\u4e00\u523b\u4e3b\u673a\u95f4\u7684\u7cfb\u7edf\u65f6\u5b58\u5728\u504f\u5dee\uff0c\u6700\u7ec8\u9020\u6210\u5982\u4e0b\u56fe\u6240\u793a\u7684\u73b0\u8c61\uff1a

          \u4e0a\u56fe\u4e2d\u51fa\u73b0\u7684\u73b0\u8c61\u7406\u8bba\u4e0a\u65e0\u6cd5\u6d88\u9664\u3002\u4f46\u8be5\u73b0\u8c61\u8f83\u5c11\uff0c\u5373\u4f7f\u51fa\u73b0\u4e5f\u4e0d\u4f1a\u5f71\u54cd\u670d\u52a1\u95f4\u7684\u8c03\u7528\u5173\u7cfb\u3002

          \u76ee\u524d Insight \u4f7f\u7528 Jaeger UI \u6765\u5c55\u793a\u94fe\u8def\u6570\u636e\uff0cUI \u5728\u9047\u5230\u8fd9\u79cd\u94fe\u8def\u65f6\u4f1a\u63d0\u9192\uff1a

          \u76ee\u524d Jaeger \u7684\u793e\u533a\u6b63\u5728\u5c1d\u8bd5\u901a\u8fc7 UI \u5c42\u9762\u6765\u4f18\u5316\u8fd9\u4e2a\u95ee\u9898\u3002

          \u66f4\u591a\u7684\u76f8\u5173\u8d44\u6599\uff0c\u8bf7\u53c2\u8003\uff1a

          • Clock Skew Adjuster considered harmful
          • Add ability to display unadjusted trace in the UI
          • Clock Skew Adjustment
          "},{"location":"admin/insight/infra/cluster.html","title":"\u96c6\u7fa4\u76d1\u63a7","text":"

          \u901a\u8fc7\u96c6\u7fa4\u76d1\u63a7\uff0c\u4f60\u53ef\u4ee5\u67e5\u770b\u96c6\u7fa4\u7684\u57fa\u672c\u4fe1\u606f\u3001\u8be5\u96c6\u7fa4\u4e2d\u7684\u8d44\u6e90\u6d88\u8017\u4ee5\u53ca\u4e00\u6bb5\u65f6\u95f4\u7684\u8d44\u6e90\u6d88\u8017\u53d8\u5316\u8d8b\u52bf\u7b49\u3002

          "},{"location":"admin/insight/infra/cluster.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

          \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

          "},{"location":"admin/insight/infra/cluster.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

          2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd -> \u96c6\u7fa4 \u3002\u5728\u8be5\u9875\u9762\u53ef\u67e5\u770b\u4ee5\u4e0b\u4fe1\u606f\uff1a

            • \u8d44\u6e90\u6982\u89c8 \uff1a\u591a\u9009\u96c6\u7fa4\u4e2d\u7684\u8282\u70b9\u3001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6b63\u5e38\u548c\u5168\u90e8\u7684\u6570\u91cf\u7edf\u8ba1\uff1b
            • \u6545\u969c \uff1a\u7edf\u8ba1\u5f53\u524d\u96c6\u7fa4\u4ea7\u751f\u7684\u544a\u8b66\u6570\u91cf\uff1b
            • \u8d44\u6e90\u6d88\u8017 \uff1a\u6240\u9009\u96c6\u7fa4\u7684 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u7684\u5b9e\u9645\u4f7f\u7528\u91cf\u548c\u603b\u91cf\uff1b
            • \u6307\u6807\u8bf4\u660e \uff1a\u6240\u9009\u96c6\u7fa4\u7684 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u8bfb\u5199\u3001\u7f51\u7edc\u63a5\u6536\u53d1\u9001\u7684\u53d8\u5316\u8d8b\u52bf\u3002

          3. \u5207\u6362\u5230 \u8d44\u6e90\u6c34\u4f4d\u7ebf\u76d1\u63a7 \u9875\u7b7e\uff0c\u53ef\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u7684\u66f4\u591a\u76d1\u63a7\u6570\u636e\u3002

          "},{"location":"admin/insight/infra/cluster.html#_4","title":"\u53c2\u8003\u6307\u6807\u8bf4\u660e","text":"\u6307\u6807\u540d \u8bf4\u660e CPU \u4f7f\u7528\u7387 \u8be5\u6307\u6807\u662f\u6307\u96c6\u7fa4\u4e2d\u6240\u6709 Pod \u8d44\u6e90\u7684\u5b9e\u9645 CPU \u7528\u91cf\u4e0e\u6240\u6709\u8282\u70b9\u7684 CPU \u603b\u91cf\u7684\u6bd4\u7387\u3002 CPU \u5206\u914d\u7387 \u8be5\u6307\u6807\u662f\u6307\u96c6\u7fa4\u4e2d\u6240\u6709 Pod \u7684 CPU \u8bf7\u6c42\u91cf\u7684\u603b\u548c\u4e0e\u6240\u6709\u8282\u70b9\u7684 CPU \u603b\u91cf\u7684\u6bd4\u7387\u3002 \u5185\u5b58\u4f7f\u7528\u7387 \u8be5\u6307\u6807\u662f\u6307\u96c6\u7fa4\u4e2d\u6240\u6709 Pod \u8d44\u6e90\u7684\u5b9e\u9645\u5185\u5b58\u7528\u91cf\u4e0e\u6240\u6709\u8282\u70b9\u7684\u5185\u5b58\u603b\u91cf\u7684\u6bd4\u7387\u3002 \u5185\u5b58\u5206\u914d\u7387 \u8be5\u6307\u6807\u662f\u6307\u96c6\u7fa4\u4e2d\u6240\u6709 Pod \u7684\u5185\u5b58\u8bf7\u6c42\u91cf\u7684\u603b\u548c\u4e0e\u6240\u6709\u8282\u70b9\u7684\u5185\u5b58\u603b\u91cf\u7684\u6bd4\u7387\u3002"},{"location":"admin/insight/infra/container.html","title":"\u5bb9\u5668\u76d1\u63a7","text":"

          \u5bb9\u5668\u76d1\u63a7\u662f\u5bf9\u96c6\u7fa4\u7ba1\u7406\u4e2d\u5de5\u4f5c\u8d1f\u8f7d\u7684\u76d1\u63a7\uff0c\u5728\u5217\u8868\u4e2d\u53ef\u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u57fa\u672c\u4fe1\u606f\u548c\u72b6\u6001\u3002\u5728\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\uff0c\u53ef\u67e5\u770b\u6b63\u5728\u544a\u8b66\u7684\u6570\u91cf\u4ee5\u53ca CPU\u3001\u5185\u5b58\u7b49\u8d44\u6e90\u6d88\u8017\u7684\u53d8\u5316\u8d8b\u52bf\u3002

          "},{"location":"admin/insight/infra/container.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

          \u96c6\u7fa4\u5df2\u5b89\u88c5 insight-agent\uff0c\u4e14\u6240\u6709\u7684\u5bb9\u5668\u7ec4\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

          • \u5b89\u88c5 insight-agent\uff0c\u8bf7\u53c2\u8003\u5728\u7ebf\u5b89\u88c5 insight-agent \u6216\u79bb\u7ebf\u5347\u7ea7 insight-agent\u3002
          "},{"location":"admin/insight/infra/container.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

          \u8bf7\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u67e5\u770b\u670d\u52a1\u76d1\u63a7\u6307\u6807\uff1a

          1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

          2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd -> \u5de5\u4f5c\u8d1f\u8f7d \u3002

          3. \u5207\u6362\u9876\u90e8 Tab\uff0c\u67e5\u770b\u4e0d\u540c\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u636e\u3002

          4. \u70b9\u51fb\u76ee\u6807\u5de5\u4f5c\u8d1f\u8f7d\u540d\u79f0\u67e5\u770b\u8be6\u60c5\u3002

            1. \u6545\u969c\uff1a\u5728\u6545\u969c\u5361\u7247\u4e2d\u7edf\u8ba1\u8be5\u5de5\u4f5c\u8d1f\u8f7d\u5f53\u524d\u6b63\u5728\u544a\u8b66\u7684\u603b\u6570\u3002
            2. \u8d44\u6e90\u6d88\u8017\uff1a\u5728\u8be5\u5361\u7247\u53ef\u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u7684 CPU\u3001\u5185\u5b58\u3001\u7f51\u7edc\u7684\u4f7f\u7528\u60c5\u51b5\u3002
            3. \u76d1\u63a7\u6307\u6807\uff1a\u53ef\u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u9ed8\u8ba4 1 \u5c0f\u65f6\u7684 CPU\u3001\u5185\u5b58\u3001\u7f51\u7edc\u548c\u78c1\u76d8\u7684\u53d8\u5316\u8d8b\u52bf\u3002

          5. \u5207\u6362 Tab \u5230 \u5bb9\u5668\u7ec4\u5217\u8868 \uff0c\u53ef\u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u5404\u4e2a\u5bb9\u5668\u7ec4\u72b6\u6001\u3001\u6240\u5728\u8282\u70b9\u3001\u91cd\u542f\u6b21\u6570\u7b49\u4fe1\u606f\u3002

          6. \u5207\u6362 Tab \u5230 JVM \u76d1\u63a7 \uff0c\u53ef\u67e5\u770b\u5404\u4e2a\u5bb9\u5668\u7ec4\u7684 JVM \u6307\u6807\u3002

            Note

            1. JVM \u76d1\u63a7\u529f\u80fd\u4ec5\u652f\u6301 Java \u8bed\u8a00\u3002
            2. \u5f00\u542f JVM \u76d1\u63a7\u529f\u80fd\uff0c\u8bf7\u53c2\u8003\u5f00\u59cb\u76d1\u63a7 Java \u5e94\u7528\u3002
          "},{"location":"admin/insight/infra/container.html#_4","title":"\u6307\u6807\u53c2\u8003\u8bf4\u660e","text":"\u6307\u6807\u540d\u79f0 \u8bf4\u660e CPU \u4f7f\u7528\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684 CPU \u4f7f\u7528\u91cf\u4e4b\u548c\u3002 CPU \u8bf7\u6c42\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684 CPU \u8bf7\u6c42\u91cf\u4e4b\u548c\u3002 CPU \u9650\u5236\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684 CPU \u9650\u5236\u91cf\u4e4b\u548c\u3002 \u5185\u5b58\u4f7f\u7528\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u4f7f\u7528\u91cf\u4e4b\u548c\u3002 \u5185\u5b58\u8bf7\u6c42\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u4f7f\u7528\u91cf\u4e4b\u548c\u3002 \u5185\u5b58\u9650\u5236\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u9650\u5236\u91cf\u4e4b\u548c\u3002 \u78c1\u76d8\u8bfb\u5199\u901f\u7387 \u6307\u5b9a\u65f6\u95f4\u8303\u56f4\u5185\u78c1\u76d8\u6bcf\u79d2\u8fde\u7eed\u8bfb\u53d6\u548c\u5199\u5165\u7684\u603b\u548c\uff0c\u8868\u793a\u78c1\u76d8\u6bcf\u79d2\u8bfb\u53d6\u548c\u5199\u5165\u64cd\u4f5c\u6570\u7684\u6027\u80fd\u5ea6\u91cf\u3002 \u7f51\u7edc\u53d1\u9001\u63a5\u6536\u901f\u7387 \u6307\u5b9a\u65f6\u95f4\u8303\u56f4\u5185\uff0c\u6309\u5de5\u4f5c\u8d1f\u8f7d\u7edf\u8ba1\u7684\u7f51\u7edc\u6d41\u91cf\u7684\u6d41\u5165\u3001\u6d41\u51fa\u901f\u7387\u3002"},{"location":"admin/insight/infra/event.html","title":"\u4e8b\u4ef6\u67e5\u8be2","text":"

          AI \u7b97\u529b\u5e73\u53f0 Insight \u652f\u6301\u6309\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u67e5\u8be2\u4e8b\u4ef6\uff0c\u5e76\u63d0\u4f9b\u4e86\u4e8b\u4ef6\u72b6\u6001\u5206\u5e03\u56fe\uff0c\u5bf9\u91cd\u8981\u4e8b\u4ef6\u8fdb\u884c\u7edf\u8ba1\u3002

          "},{"location":"admin/insight/infra/event.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u70b9\u51fb\u4e00\u7ea7\u5bfc\u822a\u680f\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u3002
          2. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u57fa\u7840\u8bbe\u7f6e > \u4e8b\u4ef6 \u3002

          "},{"location":"admin/insight/infra/event.html#_3","title":"\u4e8b\u4ef6\u72b6\u6001\u5206\u5e03","text":"

          \u9ed8\u8ba4\u663e\u793a\u6700\u8fd1 12 \u5c0f\u65f6\u5185\u53d1\u751f\u7684\u4e8b\u4ef6\uff0c\u60a8\u53ef\u4ee5\u5728\u53f3\u4e0a\u89d2\u9009\u62e9\u4e0d\u540c\u7684\u65f6\u95f4\u8303\u56f4\u6765\u67e5\u770b\u8f83\u957f\u6216\u8f83\u77ed\u7684\u65f6\u95f4\u6bb5\u3002 \u60a8\u8fd8\u53ef\u4ee5\u81ea\u5b9a\u4e49\u91c7\u6837\u95f4\u9694\u4e3a 1 \u5206\u949f\u81f3 5 \u5c0f\u65f6\u3002

          \u901a\u8fc7\u4e8b\u4ef6\u72b6\u6001\u5206\u5e03\u56fe\uff0c\u60a8\u53ef\u4ee5\u76f4\u89c2\u5730\u4e86\u89e3\u4e8b\u4ef6\u7684\u5bc6\u96c6\u7a0b\u5ea6\u548c\u5206\u6563\u60c5\u51b5\u3002 \u8fd9\u6709\u52a9\u4e8e\u5bf9\u540e\u7eed\u7684\u96c6\u7fa4\u8fd0\u7ef4\u8fdb\u884c\u8bc4\u4f30\uff0c\u5e76\u505a\u597d\u51c6\u5907\u548c\u5b89\u6392\u5de5\u4f5c\u3002 \u5982\u679c\u4e8b\u4ef6\u5bc6\u96c6\u53d1\u751f\u5728\u7279\u5b9a\u65f6\u6bb5\uff0c\u60a8\u53ef\u80fd\u9700\u8981\u8c03\u914d\u66f4\u591a\u7684\u8d44\u6e90\u6216\u91c7\u53d6\u76f8\u5e94\u63aa\u65bd\u6765\u786e\u4fdd\u96c6\u7fa4\u7a33\u5b9a\u6027\u548c\u9ad8\u53ef\u7528\u6027\u3002 \u800c\u5982\u679c\u4e8b\u4ef6\u8f83\u4e3a\u5206\u6563\uff0c\u5728\u6b64\u671f\u95f4\u60a8\u53ef\u4ee5\u5408\u7406\u5b89\u6392\u5176\u4ed6\u8fd0\u7ef4\u5de5\u4f5c\uff0c\u4f8b\u5982\u7cfb\u7edf\u4f18\u5316\u3001\u5347\u7ea7\u6216\u5904\u7406\u5176\u4ed6\u4efb\u52a1\u3002

          \u901a\u8fc7\u7efc\u5408\u8003\u8651\u4e8b\u4ef6\u72b6\u6001\u5206\u5e03\u56fe\u548c\u65f6\u95f4\u8303\u56f4\uff0c\u60a8\u80fd\u66f4\u597d\u5730\u89c4\u5212\u548c\u7ba1\u7406\u96c6\u7fa4\u7684\u8fd0\u7ef4\u5de5\u4f5c\uff0c\u786e\u4fdd\u7cfb\u7edf\u7a33\u5b9a\u6027\u548c\u53ef\u9760\u6027\u3002

          "},{"location":"admin/insight/infra/event.html#_4","title":"\u4e8b\u4ef6\u603b\u6570\u548c\u7edf\u8ba1","text":"

          \u901a\u8fc7\u91cd\u8981\u4e8b\u4ef6\u7edf\u8ba1\uff0c\u60a8\u53ef\u4ee5\u65b9\u4fbf\u5730\u4e86\u89e3\u955c\u50cf\u62c9\u53d6\u5931\u8d25\u6b21\u6570\u3001\u5065\u5eb7\u68c0\u67e5\u5931\u8d25\u6b21\u6570\u3001\u5bb9\u5668\u7ec4\uff08Pod\uff09\u8fd0\u884c\u5931\u8d25\u6b21\u6570\u3001 Pod \u8c03\u5ea6\u5931\u8d25\u6b21\u6570\u3001\u5bb9\u5668 OOM \u5185\u5b58\u8017\u5c3d\u6b21\u6570\u3001\u5b58\u50a8\u5377\u6302\u8f7d\u5931\u8d25\u6b21\u6570\u4ee5\u53ca\u6240\u6709\u4e8b\u4ef6\u7684\u603b\u6570\u3002\u8fd9\u4e9b\u4e8b\u4ef6\u901a\u5e38\u5206\u4e3a\u300cWarning\u300d\u548c\u300cNormal\u300d\u4e24\u7c7b\u3002

          "},{"location":"admin/insight/infra/event.html#_5","title":"\u4e8b\u4ef6\u5217\u8868","text":"

          \u4e8b\u4ef6\u5217\u8868\u4ee5\u65f6\u95f4\u4e3a\u8f74\uff0c\u4ee5\u6d41\u6c34\u7684\u5f62\u5f0f\u5c55\u793a\u53d1\u751f\u7684\u4e8b\u4ef6\u3002\u60a8\u53ef\u4ee5\u6839\u636e\u300c\u6700\u8fd1\u53d1\u751f\u65f6\u95f4\u300d\u548c\u300c\u7ea7\u522b\u300d\u8fdb\u884c\u6392\u5e8f\u3002

          \u70b9\u51fb\u53f3\u4fa7\u7684 \u2699\ufe0f \u56fe\u6807\uff0c\u60a8\u53ef\u4ee5\u6839\u636e\u81ea\u5df1\u7684\u559c\u597d\u548c\u9700\u6c42\u6765\u81ea\u5b9a\u4e49\u663e\u793a\u7684\u5217\u3002

          \u5728\u9700\u8981\u7684\u65f6\u5019\uff0c\u60a8\u8fd8\u53ef\u4ee5\u70b9\u51fb\u5237\u65b0\u56fe\u6807\u6765\u66f4\u65b0\u5f53\u524d\u7684\u4e8b\u4ef6\u5217\u8868\u3002

          "},{"location":"admin/insight/infra/event.html#_6","title":"\u5176\u4ed6\u64cd\u4f5c","text":"
          1. \u5728\u4e8b\u4ef6\u5217\u8868\u4e2d\u64cd\u4f5c\u5217\u7684\u56fe\u6807\uff0c\u53ef\u67e5\u770b\u67d0\u4e00\u4e8b\u4ef6\u7684\u5143\u6570\u636e\u4fe1\u606f\u3002

          2. \u70b9\u51fb\u9876\u90e8\u9875\u7b7e\u7684 \u4e0a\u4e0b\u6587 \u53ef\u67e5\u770b\u8be5\u4e8b\u4ef6\u5bf9\u5e94\u8d44\u6e90\u7684\u5386\u53f2\u4e8b\u4ef6\u8bb0\u5f55\u3002

          "},{"location":"admin/insight/infra/event.html#_7","title":"\u53c2\u8003","text":"

          \u6709\u5173\u7cfb\u7edf\u81ea\u5e26\u7684 Event \u4e8b\u4ef6\u7684\u8be6\u7ec6\u542b\u4e49\uff0c\u8bf7\u53c2\u9605 Kubenetest API \u4e8b\u4ef6\u5217\u8868\u3002

          "},{"location":"admin/insight/infra/namespace.html","title":"\u547d\u540d\u7a7a\u95f4\u76d1\u63a7","text":"

          \u4ee5\u547d\u540d\u7a7a\u95f4\u4e3a\u7ef4\u5ea6\uff0c\u5feb\u901f\u67e5\u8be2\u547d\u540d\u7a7a\u95f4\u5185\u7684\u8d44\u6e90\u6d88\u8017\u548c\u53d8\u5316\u8d8b\u52bf\u3002

          "},{"location":"admin/insight/infra/namespace.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

          \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

          "},{"location":"admin/insight/infra/namespace.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

          2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd > \u547d\u540d\u7a7a\u95f4 \u3002\u5728\u8be5\u9875\u9762\u53ef\u67e5\u770b\u4ee5\u4e0b\u4fe1\u606f\uff1a

            1. \u5207\u6362\u547d\u540d\u7a7a\u95f4\uff1a\u5728\u9876\u90e8\u5207\u6362\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\uff1b
            2. \u8d44\u6e90\u6982\u89c8\uff1a\u7edf\u8ba1\u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6b63\u5e38\u548c\u5168\u90e8\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u91cf\uff1b
            3. \u6545\u969c\uff1a\u7edf\u8ba1\u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e0b\u4ea7\u751f\u7684\u544a\u8b66\u6570\u91cf\uff1b
            4. \u4e8b\u4ef6\uff1a\u7edf\u8ba1\u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e0b 24 \u5c0f\u65f6\u5185 Warning \u7ea7\u522b\u7684\u4e8b\u4ef6\u6570\u91cf\uff1b
            5. \u8d44\u6e90\u6d88\u8017\uff1a\u7edf\u8ba1\u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e0b\u5bb9\u5668\u7ec4\u7684 CPU\u3001\u5185\u5b58\u4f7f\u7528\u91cf\u4e4b\u548c \u53ca CPU\u3001\u5185\u5b58\u914d\u989d\u60c5\u51b5\u3002

          "},{"location":"admin/insight/infra/namespace.html#_4","title":"\u6307\u6807\u8bf4\u660e","text":"\u6307\u6807\u540d \u8bf4\u660e CPU \u4f7f\u7528\u91cf \u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e2d\u5bb9\u5668\u7ec4\u7684 CPU \u4f7f\u7528\u91cf\u4e4b\u548c \u5185\u5b58\u4f7f\u7528\u91cf \u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e2d\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u4f7f\u7528\u91cf\u4e4b\u548c \u5bb9\u5668\u7ec4 CPU \u4f7f\u7528\u91cf \u547d\u540d\u7a7a\u95f4\u4e2d\u5404\u5bb9\u5668\u7ec4\u7684 CPU \u4f7f\u7528\u91cf \u5bb9\u5668\u7ec4\u5185\u5b58\u4f7f\u7528\u91cf \u547d\u540d\u7a7a\u95f4\u4e2d\u5404\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u4f7f\u7528\u91cf"},{"location":"admin/insight/infra/node.html","title":"\u8282\u70b9\u76d1\u63a7","text":"

          \u901a\u8fc7\u8282\u70b9\u76d1\u63a7\uff0c\u4f60\u53ef\u4ee5\u6982\u89c8\u6240\u9009\u96c6\u7fa4\u4e0b\u8282\u70b9\u7684\u5f53\u524d\u5065\u5eb7\u72b6\u6001\u3001\u5bf9\u5e94\u5bb9\u5668\u7ec4\u7684\u5f02\u5e38\u6570\u91cf\uff1b \u5728\u5f53\u524d\u8282\u70b9\u8be6\u60c5\u9875\uff0c\u4f60\u53ef\u4ee5\u67e5\u770b\u6b63\u5728\u544a\u8b66\u7684\u6570\u91cf\u4ee5\u53ca CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u7b49\u8d44\u6e90\u6d88\u8017\u7684\u53d8\u5316\u8d8b\u52bf\u56fe\u3002

          "},{"location":"admin/insight/infra/node.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

          \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

          "},{"location":"admin/insight/infra/node.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

          2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd -> \u8282\u70b9 \u3002\u5728\u8be5\u9875\u9762\u53ef\u67e5\u770b\u4ee5\u4e0b\u4fe1\u606f\uff1a

            • \u96c6\u7fa4\u5207\u6362 \uff1a\u5207\u6362\u9876\u90e8\u7684\u4e0b\u62c9\u6846\u53ef\u5207\u6362\u96c6\u7fa4\uff1b
            • \u8282\u70b9\u5217\u8868 \uff1a\u6240\u9009\u96c6\u7fa4\u4e2d\u7684\u8282\u70b9\u5217\u8868\uff0c\u5355\u51fb\u5207\u6362\u8282\u70b9\u3002
            • \u6545\u969c \uff1a\u7edf\u8ba1\u5f53\u524d\u96c6\u7fa4\u4ea7\u751f\u7684\u544a\u8b66\u6570\u91cf\uff1b
            • \u8d44\u6e90\u6d88\u8017 \uff1a\u6240\u9009\u8282\u70b9\u7684 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u7684\u5b9e\u9645\u4f7f\u7528\u91cf\u548c\u603b\u91cf\uff1b
            • \u6307\u6807\u8bf4\u660e \uff1a\u6240\u9009\u8282\u70b9\u7684 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u8bfb\u5199\u3001\u7f51\u7edc\u63a5\u6536\u53d1\u9001\u7684\u53d8\u5316\u8d8b\u52bf\u3002

          3. \u5207\u6362\u5230 \u8d44\u6e90\u6c34\u4f4d\u7ebf\u76d1\u63a7 \u9875\u7b7e\uff0c\u53ef\u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684\u66f4\u591a\u76d1\u63a7\u6570\u636e\u3002

          "},{"location":"admin/insight/infra/probe.html","title":"\u62e8\u6d4b","text":"

          \u62e8\u6d4b\uff08Probe\uff09\u6307\u7684\u662f\u57fa\u4e8e\u9ed1\u76d2\u76d1\u63a7\uff0c\u5b9a\u671f\u901a\u8fc7 HTTP\u3001TCP \u7b49\u65b9\u5f0f\u5bf9\u76ee\u6807\u8fdb\u884c\u8fde\u901a\u6027\u6d4b\u8bd5\uff0c\u5feb\u901f\u53d1\u73b0\u6b63\u5728\u53d1\u751f\u7684\u6545\u969c\u3002

          Insight \u57fa\u4e8e Prometheus Blackbox Exporter \u5de5\u5177\u901a\u8fc7 HTTP\u3001HTTPS\u3001DNS\u3001TCP \u548c ICMP \u7b49\u534f\u8bae\uff0c\u5bf9\u7f51\u7edc\u8fdb\u884c\u63a2\u6d4b\u5e76\u8fd4\u56de\u63a2\u6d4b\u7ed3\u679c\u4ee5\u4fbf\u4e86\u89e3\u7f51\u7edc\u72b6\u6001\u3002

          "},{"location":"admin/insight/infra/probe.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

          \u76ee\u6807\u96c6\u7fa4\u4e2d\u5df2\u6210\u529f\u90e8\u7f72 insight-agent\uff0c\u4e14\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

          "},{"location":"admin/insight/infra/probe.html#_3","title":"\u67e5\u770b\u62e8\u6d4b\u4efb\u52a1","text":"
          1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\uff1b
          2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd -> \u62e8\u6d4b\u3002

            • \u70b9\u51fb\u8868\u683c\u4e2d\u7684\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\u4e0b\u62c9\u6846\uff0c\u53ef\u5207\u6362\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4
            • \u4f60\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u2699\ufe0f \u4fee\u6539\u663e\u793a\u7684\u5217\uff0c\u9ed8\u8ba4\u4e3a\u62e8\u6d4b\u540d\u79f0\u3001\u63a2\u6d4b\u65b9\u5f0f\u3001\u63a2\u6d4b\u76ee\u6807\u3001\u8fde\u901a\u72b6\u6001\u3001\u521b\u5efa\u65f6\u95f4
            • \u8fde\u901a\u72b6\u6001\u6709 3 \u79cd\uff1a
              • \u6b63\u5e38\uff1aProbe \u6210\u529f\u8fde\u63a5\u5230\u4e86\u76ee\u6807\uff0c\u76ee\u6807\u8fd4\u56de\u4e86\u9884\u671f\u7684\u54cd\u5e94
              • \u5f02\u5e38\uff1aProbe \u65e0\u6cd5\u8fde\u63a5\u5230\u76ee\u6807\uff0c\u6216\u76ee\u6807\u6ca1\u6709\u8fd4\u56de\u9884\u671f\u7684\u54cd\u5e94
              • Pending\uff1aProbe \u6b63\u5728\u5c1d\u8bd5\u8fde\u63a5\u76ee\u6807
            • \u4f60\u53ef\u4ee5\u5728 \ud83d\udd0d \u641c\u7d22\u6846\u4e2d\u952e\u5165\u540d\u79f0\uff0c\u6a21\u7cca\u641c\u7d22\u67d0\u4e9b\u62e8\u6d4b\u4efb\u52a1

          "},{"location":"admin/insight/infra/probe.html#_4","title":"\u521b\u5efa\u62e8\u6d4b\u4efb\u52a1","text":"
          1. \u70b9\u51fb \u521b\u5efa\u62e8\u6d4b\u4efb\u52a1\u3002
          2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65

            • \u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u62e8\u6d4b\u7684\u96c6\u7fa4
            • \u547d\u540d\u7a7a\u95f4\uff1a\u62e8\u6d4b\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4

          3. \u914d\u7f6e\u63a2\u6d4b\u53c2\u6570\u3002

            • Blackbox \u5b9e\u4f8b\uff1a\u9009\u62e9\u8d1f\u8d23\u63a2\u6d4b\u7684 blackbox \u5b9e\u4f8b
            • \u63a2\u6d4b\u65b9\u5f0f\uff1a
              • HTTP\uff1a\u901a\u8fc7\u53d1\u9001 HTTP \u6216 HTTPS \u8bf7\u6c42\u5230\u76ee\u6807 URL\uff0c\u68c0\u6d4b\u5176\u8fde\u901a\u6027\u548c\u54cd\u5e94\u65f6\u95f4\uff0c\u8fd9\u53ef\u4ee5\u7528\u4e8e\u76d1\u6d4b\u7f51\u7ad9\u6216 Web \u5e94\u7528\u7684\u53ef\u7528\u6027\u548c\u6027\u80fd
              • TCP\uff1a\u901a\u8fc7\u5efa\u7acb\u5230\u76ee\u6807\u4e3b\u673a\u548c\u7aef\u53e3\u7684 TCP \u8fde\u63a5\uff0c\u68c0\u6d4b\u5176\u8fde\u901a\u6027\u548c\u54cd\u5e94\u65f6\u95f4\u3002\u8fd9\u53ef\u4ee5\u7528\u4e8e\u76d1\u6d4b\u57fa\u4e8e TCP \u7684\u670d\u52a1\uff0c\u5982 Web \u670d\u52a1\u5668\u3001\u6570\u636e\u5e93\u670d\u52a1\u5668\u7b49
              • \u5176\u4ed6\uff1a\u652f\u6301\u901a\u8fc7\u914d\u7f6e ConfigMap \u81ea\u5b9a\u4e49\u63a2\u6d4b\u65b9\u5f0f\uff0c\u53ef\u53c2\u8003\u81ea\u5b9a\u4e49\u62e8\u6d4b\u65b9\u5f0f
            • \u63a2\u6d4b\u76ee\u6807\uff1a\u63a2\u6d4b\u7684\u76ee\u6807\u5730\u5740\uff0c\u652f\u6301\u57df\u540d\u6216 IP \u5730\u5740\u7b49
            • \u6807\u7b7e\uff1a\u81ea\u5b9a\u4e49\u6807\u7b7e\uff0c\u8be5\u6807\u7b7e\u4f1a\u81ea\u52a8\u6dfb\u52a0\u5230 Prometheus \u7684 Label \u4e2d
            • \u63a2\u6d4b\u95f4\u9694\uff1a\u63a2\u6d4b\u95f4\u9694\u65f6\u95f4
            • \u63a2\u6d4b\u8d85\u65f6\uff1a\u63a2\u6d4b\u76ee\u6807\u65f6\u7684\u6700\u957f\u7b49\u5f85\u65f6\u95f4

          4. \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

          Warning

          \u62e8\u6d4b\u4efb\u52a1\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u9700\u8981\u5927\u6982 3 \u5206\u949f\u7684\u65f6\u95f4\u6765\u540c\u6b65\u914d\u7f6e\u3002\u5728\u6b64\u671f\u95f4\uff0c\u4e0d\u4f1a\u8fdb\u884c\u63a2\u6d4b\uff0c\u65e0\u6cd5\u67e5\u770b\u63a2\u6d4b\u7ed3\u679c\u3002

          "},{"location":"admin/insight/infra/probe.html#_5","title":"\u7f16\u8f91\u62e8\u6d4b\u4efb\u52a1","text":"

          \u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 -> \u7f16\u8f91\uff0c\u5b8c\u6210\u7f16\u8f91\u540e\u70b9\u51fb \u786e\u5b9a\u3002

          "},{"location":"admin/insight/infra/probe.html#_6","title":"\u67e5\u770b\u76d1\u63a7\u9762\u677f","text":"

          \u70b9\u51fb\u62e8\u6d4b\u540d\u79f0 \u67e5\u770b\u62e8\u6d4b\u4efb\u52a1\u4e2d\u6bcf\u4e2a\u76ee\u6807\u7684\u76d1\u63a7\u72b6\u6001\uff0c\u4ee5\u56fe\u8868\u65b9\u5f0f\u663e\u793a\u9488\u5bf9\u7f51\u7edc\u72b6\u51b5\u7684\u63a2\u6d4b\u7ed3\u679c\u3002

          \u6307\u6807\u540d\u79f0 \u63cf\u8ff0 Current Status Response \u8868\u793a HTTP \u63a2\u6d4b\u8bf7\u6c42\u7684\u54cd\u5e94\u72b6\u6001\u7801\u3002 Ping Status \u8868\u793a\u63a2\u6d4b\u8bf7\u6c42\u662f\u5426\u6210\u529f\u30021 \u8868\u793a\u63a2\u6d4b\u8bf7\u6c42\u6210\u529f\uff0c0 \u8868\u793a\u63a2\u6d4b\u8bf7\u6c42\u5931\u8d25\u3002 IP Protocol \u8868\u793a\u63a2\u6d4b\u8bf7\u6c42\u4f7f\u7528\u7684 IP \u534f\u8bae\u7248\u672c\u3002 SSL Expiry \u8868\u793a SSL/TLS \u8bc1\u4e66\u7684\u6700\u65e9\u5230\u671f\u65f6\u95f4\u3002 DNS Response (Latency) \u8868\u793a\u6574\u4e2a\u63a2\u6d4b\u8fc7\u7a0b\u7684\u6301\u7eed\u65f6\u95f4\uff0c\u5355\u4f4d\u662f\u79d2\u3002 HTTP Duration \u8868\u793a\u4ece\u53d1\u9001\u8bf7\u6c42\u5230\u63a5\u6536\u5230\u5b8c\u6574\u54cd\u5e94\u7684\u6574\u4e2a\u8fc7\u7a0b\u7684\u65f6\u95f4\u3002"},{"location":"admin/insight/infra/probe.html#_7","title":"\u5220\u9664\u62e8\u6d4b\u4efb\u52a1","text":"

          \u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 -> \u5220\u9664\uff0c\u786e\u8ba4\u65e0\u8bef\u540e\u70b9\u51fb \u786e\u5b9a\u3002

          Caution

          \u5220\u9664\u64cd\u4f5c\u4e0d\u53ef\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

          "},{"location":"admin/insight/quickstart/install/index.html","title":"\u5f00\u59cb\u89c2\u6d4b","text":"

          AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\u5b9e\u73b0\u4e86\u5bf9\u591a\u4e91\u591a\u96c6\u7fa4\u7684\u7eb3\u7ba1\uff0c\u5e76\u652f\u6301\u521b\u5efa\u96c6\u7fa4\u3002\u5728\u6b64\u57fa\u7840\u4e0a\uff0c\u53ef\u89c2\u6d4b\u6027 Insight \u4f5c\u4e3a\u591a\u96c6\u7fa4\u7edf\u4e00\u89c2\u6d4b\u65b9\u6848\uff0c\u901a\u8fc7\u90e8\u7f72 insight-agent \u63d2\u4ef6\u5b9e\u73b0\u5bf9\u591a\u96c6\u7fa4\u89c2\u6d4b\u6570\u636e\u7684\u91c7\u96c6\uff0c\u5e76\u652f\u6301\u901a\u8fc7 AI \u7b97\u529b\u4e2d\u5fc3 \u53ef\u89c2\u6d4b\u6027\u4ea7\u54c1\u5b9e\u73b0\u5bf9\u6307\u6807\u3001\u65e5\u5fd7\u3001\u94fe\u8def\u6570\u636e\u7684\u67e5\u8be2\u3002

          insight-agent \u662f\u53ef\u89c2\u6d4b\u6027\u5b9e\u73b0\u5bf9\u591a\u96c6\u7fa4\u6570\u636e\u91c7\u96c6\u7684\u5de5\u5177\uff0c\u5b89\u88c5\u540e\u65e0\u9700\u4efb\u4f55\u4fee\u6539\uff0c\u5373\u53ef\u5b9e\u73b0\u5bf9\u6307\u6807\u3001\u65e5\u5fd7\u4ee5\u53ca\u94fe\u8def\u6570\u636e\u7684\u81ea\u52a8\u5316\u91c7\u96c6\u3002

          \u901a\u8fc7 \u5bb9\u5668\u7ba1\u7406 \u521b\u5efa\u7684\u96c6\u7fa4\u9ed8\u8ba4\u4f1a\u5b89\u88c5 insight-agent\uff0c\u6545\u5728\u6b64\u4ec5\u9488\u5bf9\u63a5\u5165\u7684\u96c6\u7fa4\u5982\u4f55\u5f00\u542f\u89c2\u6d4b\u80fd\u529b\u63d0\u4f9b\u6307\u5bfc\u3002

          • \u5728\u7ebf\u5b89\u88c5 insight-agent

          \u53ef\u89c2\u6d4b\u6027 Insight \u4f5c\u4e3a\u591a\u96c6\u7fa4\u7684\u7edf\u4e00\u89c2\u6d4b\u5e73\u53f0\uff0c\u5176\u90e8\u5206\u7ec4\u4ef6\u7684\u8d44\u6e90\u6d88\u8017\u4e0e\u521b\u5efa\u96c6\u7fa4\u7684\u6570\u636e\u3001\u63a5\u5165\u96c6\u7fa4\u7684\u6570\u91cf\u606f\u606f\u76f8\u5173\uff0c\u5728\u5b89\u88c5 insight-agent \u65f6\uff0c\u9700\u8981\u6839\u636e\u96c6\u7fa4\u89c4\u6a21\u5bf9\u76f8\u5e94\u7ec4\u4ef6\u7684\u8d44\u6e90\u8fdb\u884c\u8c03\u6574\u3002

          1. \u6839\u636e\u521b\u5efa\u96c6\u7fa4\u7684\u89c4\u6a21\u6216\u63a5\u5165\u96c6\u7fa4\u7684\u89c4\u6a21\uff0c\u8c03\u6574 insight-agent \u4e2d\u91c7\u96c6\u7ec4\u4ef6 Prometheus \u7684 CPU \u548c\u5185\u5b58\uff0c\u8bf7\u53c2\u8003: Prometheus \u8d44\u6e90\u89c4\u5212

          2. \u7531\u4e8e\u591a\u96c6\u7fa4\u7684\u6307\u6807\u6570\u636e\u4f1a\u7edf\u4e00\u5b58\u50a8\uff0c\u5219\u9700\u8981 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\u7ba1\u7406\u5458\u6839\u636e\u521b\u5efa\u96c6\u7fa4\u7684\u89c4\u6a21\u3001\u63a5\u5165\u96c6\u7fa4\u7684\u89c4\u6a21\u5bf9\u5e94\u8c03\u6574 vmstorage \u7684\u78c1\u76d8\uff0c\u8bf7\u53c2\u8003\uff1avmstorage \u78c1\u76d8\u5bb9\u91cf\u89c4\u5212\u3002

          3. \u5982\u4f55\u8c03\u6574 vmstorage \u7684\u78c1\u76d8\uff0c\u8bf7\u53c2\u8003\uff1avmstorge \u78c1\u76d8\u6269\u5bb9\u3002

          \u7531\u4e8e AI \u7b97\u529b\u4e2d\u5fc3 \u652f\u6301\u5bf9\u591a\u4e91\u591a\u96c6\u7fa4\u7684\u7eb3\u7ba1\uff0cinsight-agent \u76ee\u524d\u4e5f\u5b8c\u6210\u4e86\u90e8\u5206\u9a8c\u8bc1\uff0c\u7531\u4e8e\u76d1\u63a7\u7ec4\u4ef6\u51b2\u7a81\u95ee\u9898\u5bfc\u81f4\u5728 Openshift 4.x \u96c6\u7fa4\u4e2d\u5b89\u88c5 insight-agent \u4f1a\u51fa\u73b0\u95ee\u9898\uff0c\u82e5\u60a8\u9047\u5230\u540c\u6837\u95ee\u9898\uff0c\u8bf7\u53c2\u8003\u4ee5\u4e0b\u6587\u6863\uff1a

          • \u5728 Openshift 4.x \u5b89\u88c5 insight-agent
          "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html","title":"\u5f00\u542f\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f","text":"

          \u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u4e3a\u4e86\u63d0\u9ad8\u5927\u89c4\u6a21\u73af\u5883\u4e0b\u7684\u6570\u636e\u5199\u5165\u80fd\u529b\uff0c\u652f\u6301\u5c06\u65e5\u5fd7\u5207\u6362\u4e3a \u5927\u65e5\u5fd7 \u6a21\u5f0f\u3001\u5c06\u94fe\u8def\u5207\u6362\u4e3a \u5927\u94fe\u8def \u6a21\u5f0f\u3002\u672c\u6587\u5c06\u4ecb\u7ecd\u4ee5\u4e0b\u51e0\u79cd\u5f00\u542f\u65b9\u5f0f\uff1a

          • \u901a\u8fc7\u5b89\u88c5\u5668\u5f00\u542f\u6216\u5347\u7ea7\u81f3\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f\uff08\u901a\u8fc7 manifest.yaml \u4e2d\u540c\u4e00\u4e2a\u53c2\u6570\u503c\u63a7\u5236\uff09
          • \u901a\u8fc7 Helm \u547d\u4ee4\u624b\u52a8\u5f00\u542f\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f
          "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_2","title":"\u65e5\u5fd7","text":"

          \u672c\u8282\u8bf4\u660e\u666e\u901a\u65e5\u5fd7\u6a21\u5f0f\u548c\u5927\u65e5\u5fd7\u6a21\u5f0f\u7684\u533a\u522b\u3002

          "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_3","title":"\u65e5\u5fd7\u6a21\u5f0f","text":"

          \u7ec4\u4ef6\uff1aFluentbit + Elasticsearch

          \u8be5\u6a21\u5f0f\u7b80\u79f0\u4e3a ES \u6a21\u5f0f\uff0c\u6570\u636e\u6d41\u56fe\u5982\u4e0b\u6240\u793a\uff1a

          "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_4","title":"\u5927\u65e5\u5fd7\u6a21\u5f0f","text":"

          \u7ec4\u4ef6\uff1aFluentbit + Kafka + Vector + Elasticsearch

          \u8be5\u6a21\u5f0f\u7b80\u79f0\u4e3a Kafka \u6a21\u5f0f\uff0c\u6570\u636e\u6d41\u56fe\u5982\u4e0b\u6240\u793a\uff1a

          "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_5","title":"\u94fe\u8def","text":"

          \u672c\u8282\u8bf4\u660e\u666e\u901a\u94fe\u8def\u6a21\u5f0f\u548c\u5927\u94fe\u8def\u6a21\u5f0f\u7684\u533a\u522b\u3002

          "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_6","title":"\u94fe\u8def\u6a21\u5f0f","text":"

          \u7ec4\u4ef6\uff1aAgent opentelemetry-collector + Global opentelemetry-collector + Jaeger-collector + Elasticsearch

          \u8be5\u6a21\u5f0f\u7b80\u79f0\u4e3a OTlp \u6a21\u5f0f\uff0c\u6570\u636e\u6d41\u56fe\u5982\u4e0b\u6240\u793a\uff1a

          "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_7","title":"\u5927\u94fe\u8def\u6a21\u5f0f","text":"

          \u7ec4\u4ef6\uff1aAgent opentelemetry-collector + Kafka + Global opentelemetry-collector + Jaeger-collector + Elasticsearch

          \u8be5\u6a21\u5f0f\u7b80\u79f0\u4e3a Kafka \u6a21\u5f0f\uff0c\u6570\u636e\u6d41\u56fe\u5982\u4e0b\u6240\u793a\uff1a

          "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_8","title":"\u901a\u8fc7\u5b89\u88c5\u5668\u5f00\u542f","text":"

          \u901a\u8fc7\u5b89\u88c5\u5668\u90e8\u7f72/\u5347\u7ea7 AI \u7b97\u529b\u4e2d\u5fc3 \u65f6\u4f7f\u7528\u7684 manifest.yaml \u4e2d\u5b58\u5728 infrastructures.kafka \u5b57\u6bb5\uff0c \u5982\u679c\u60f3\u5f00\u542f\u53ef\u89c2\u6d4b\u7684\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u542f\u7528 kafka\uff1a

          manifest.yaml
          apiVersion: manifest.daocloud.io/v1alpha1\nkind: DCEManifest\n...\ninfrastructures:\n  ...\n  kafka:\n    enable: true # \u9ed8\u8ba4\u4e3a false\n    cpuLimit: 1\n    memLimit: 2Gi\n    pvcSize: 15Gi\n
          "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_9","title":"\u5f00\u542f","text":"

          \u5b89\u88c5\u65f6\u4f7f\u7528\u542f\u7528 kafka \u7684 manifest.yaml\uff0c\u5219\u4f1a\u9ed8\u8ba4\u5b89\u88c5 kafka \u4e2d\u95f4\u4ef6\uff0c \u5e76\u5728\u5b89\u88c5 Insight \u65f6\u9ed8\u8ba4\u5f00\u542f\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f\u3002\u5b89\u88c5\u547d\u4ee4\u4e3a\uff1a

          ./dce5-installer cluster-create -c clusterConfig.yaml -m manifest.yaml\n
          "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_10","title":"\u5347\u7ea7","text":"

          \u5347\u7ea7\u540c\u6837\u662f\u4fee\u6539 kafka \u5b57\u6bb5\u3002\u4f46\u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u56e0\u4e3a\u8001\u73af\u5883\u5b89\u88c5\u65f6\u4f7f\u7528\u7684\u662f kafka: false\uff0c \u6240\u4ee5\u73af\u5883\u4e2d\u65e0 kafka\u3002\u6b64\u65f6\u5347\u7ea7\u9700\u8981\u6307\u5b9a\u5347\u7ea7 middleware\uff0c\u624d\u4f1a\u540c\u65f6\u5b89\u88c5 kafka \u4e2d\u95f4\u4ef6\u3002\u5347\u7ea7\u547d\u4ee4\u4e3a\uff1a

          ./dce5-installer cluster-create -c clusterConfig.yaml -m manifest.yaml -u gproduct,middleware\n

          Note

          \u5728\u5347\u7ea7\u5b8c\u6210\u540e\uff0c\u9700\u8981\u624b\u52a8\u91cd\u542f\u4ee5\u4e0b\u7ec4\u4ef6\uff1a

          • insight-agent-fluent-bit
          • insight-agent-opentelemetry-collector
          • insight-opentelemetry-collector
          "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#helm","title":"\u901a\u8fc7 Helm \u547d\u4ee4\u5f00\u542f","text":"

          \u524d\u63d0\u6761\u4ef6\uff1a\u9700\u8981\u4fdd\u8bc1\u5b58\u5728 \u53ef\u7528\u7684 kafka \u4e14\u5730\u5740\u53ef\u6b63\u5e38\u8bbf\u95ee\u3002

          \u6839\u636e\u4ee5\u4e0b\u547d\u4ee4\u83b7\u53d6\u8001\u7248\u672c insight \u548c insight-agent \u7684 values\uff08\u5efa\u8bae\u505a\u597d\u5907\u4efd\uff09\uff1a

          helm get values insight -n insight-system -o yaml > insight.yaml\nhelm get values insight-agent -n insight-system -o yaml > insight-agent.yaml\n
          "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_11","title":"\u5f00\u542f\u5927\u65e5\u5fd7","text":"

          \u6709\u4ee5\u4e0b\u51e0\u79cd\u65b9\u5f0f\u5f00\u542f\u6216\u5347\u7ea7\u81f3\u5927\u65e5\u5fd7\u6a21\u5f0f\uff1a

          \u5728 helm upgrade \u547d\u4ee4\u4e2d\u4f7f\u7528 --set\u4fee\u6539 YAML \u540e\u8fd0\u884c helm upgrade\u5bb9\u5668\u7ba1\u7406 UI \u5347\u7ea7

          \u5148\u8fd0\u884c\u4ee5\u4e0b insight \u5347\u7ea7\u547d\u4ee4\uff0c\u6ce8\u610f kafka brokers \u5730\u5740\u9700\u6b63\u786e\uff1a

          helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --set global.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.kafka.enabled=true \\\n  --set vector.enabled=true \\\n  --version 0.30.1\n

          \u7136\u540e\u8fd0\u884c\u4ee5\u4e0b insight-agent \u5347\u7ea7\u547d\u4ee4\uff0c\u6ce8\u610f kafka brokers \u5730\u5740\u9700\u6b63\u786e\uff1a

          helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --set global.exporters.logging.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.exporters.logging.output=kafka \\\n  --version 0.30.1\n

          \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539 YAMl \u540e\u8fd0\u884c helm upgrade \u547d\u4ee4\uff1a

          1. \u4fee\u6539 insight.yaml

            insight.yaml
            global:\n  ...\n  kafka:\n    brokers: 10.6.216.111:30592\n    enabled: true\n...\nvector:\n  enabled: true\n
          2. \u5347\u7ea7 insight \u7ec4\u4ef6\uff1a

            helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --version 0.30.1\n
          3. \u4fee\u6539 insight-agent.yaml

            insight-agent.yaml
            global:\n  ...\n  exporters:\n    ...\n    logging:\n      ...\n      kafka:\n        brokers: 10.6.216.111:30592\n      output: kafka\n
          4. \u5347\u7ea7 insight-agent\uff1a

            helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --version 0.30.1\n

          \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\uff0c\u627e\u5230\u5bf9\u5e94\u7684\u96c6\u7fa4\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 Helm \u5e94\u7528 \uff0c\u627e\u5230\u5e76\u66f4\u65b0 insight-agent\u3002

          \u5728 Logging Settings \u4e2d\uff0c\u4e3a output \u9009\u62e9 kafka\uff0c\u5e76\u586b\u5199\u6b63\u786e\u7684 brokers \u5730\u5740\u3002

          \u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u5728\u5347\u7ea7\u5b8c\u6210\u540e\uff0c\u9700\u624b\u52a8\u91cd\u542f insight-agent-fluent-bit \u7ec4\u4ef6\u3002

          "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_12","title":"\u5f00\u542f\u5927\u94fe\u8def","text":"

          \u6709\u4ee5\u4e0b\u51e0\u79cd\u65b9\u5f0f\u5f00\u542f\u6216\u5347\u7ea7\u81f3\u5927\u94fe\u8def\u6a21\u5f0f\uff1a

          \u5728 helm upgrade \u547d\u4ee4\u4e2d\u4f7f\u7528 --set\u4fee\u6539 YAML \u540e\u8fd0\u884c helm upgrade\u5bb9\u5668\u7ba1\u7406 UI \u5347\u7ea7

          \u5148\u8fd0\u884c\u4ee5\u4e0b insight \u5347\u7ea7\u547d\u4ee4\uff0c\u6ce8\u610f kafka brokers \u5730\u5740\u9700\u6b63\u786e\uff1a

          helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --set global.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.kafka.enabled=true \\\n  --set global.tracing.kafkaReceiver.enabled=true \\\n  --version 0.30.1\n

          \u7136\u540e\u8fd0\u884c\u4ee5\u4e0b insight-agent \u5347\u7ea7\u547d\u4ee4\uff0c\u6ce8\u610f kafka brokers \u5730\u5740\u9700\u6b63\u786e\uff1a

          helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --set global.exporters.trace.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.exporters.trace.output=kafka \\\n  --version 0.30.1\n

          \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539 YAMl \u540e\u8fd0\u884c helm upgrade \u547d\u4ee4\uff1a

          1. \u4fee\u6539 insight.yaml

            insight.yaml
            global:\n  ...\n  kafka:\n    brokers: 10.6.216.111:30592\n    enabled: true\n...\ntracing:\n  ...\n  kafkaReceiver:\n    enabled: true\n
          2. \u5347\u7ea7 insight \u7ec4\u4ef6\uff1a

            helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --version 0.30.1\n
          3. \u4fee\u6539 insight-agent.yaml

            insight-agent.yaml
            global:\n  ...\n  exporters:\n    ...\n    trace:\n      ...\n      kafka:\n        brokers: 10.6.216.111:30592\n      output: kafka\n
          4. \u5347\u7ea7 insight-agent\uff1a

            helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --version 0.30.1\n

          \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\uff0c\u627e\u5230\u5bf9\u5e94\u7684\u96c6\u7fa4\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 Helm \u5e94\u7528 \uff0c\u627e\u5230\u5e76\u66f4\u65b0 insight-agent\u3002

          \u5728 Trace Settings \u4e2d\uff0c\u4e3a output \u9009\u62e9 kafka\uff0c\u5e76\u586b\u5199\u6b63\u786e\u7684 brokers \u5730\u5740\u3002

          \u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u5728\u5347\u7ea7\u5b8c\u6210\u540e\uff0c\u9700\u624b\u52a8 \u91cd\u542f insight-agent-opentelemetry-collector \u548c insight-opentelemetry-collector \u7ec4\u4ef6\u3002

          "},{"location":"admin/insight/quickstart/install/component-scheduling.html","title":"\u81ea\u5b9a\u4e49 Insight \u7ec4\u4ef6\u8c03\u5ea6\u7b56\u7565","text":"

          \u5f53\u90e8\u7f72\u53ef\u89c2\u6d4b\u5e73\u53f0 Insight \u5230 Kubernetes \u73af\u5883\u65f6\uff0c\u6b63\u786e\u7684\u8d44\u6e90\u7ba1\u7406\u548c\u4f18\u5316\u81f3\u5173\u91cd\u8981\u3002 Insight \u5305\u542b\u591a\u4e2a\u6838\u5fc3\u7ec4\u4ef6\uff0c\u5982 Prometheus\u3001OpenTelemetry\u3001FluentBit\u3001Vector\u3001Elasticsearch \u7b49\uff0c \u8fd9\u4e9b\u7ec4\u4ef6\u5728\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u53ef\u80fd\u56e0\u4e3a\u8d44\u6e90\u5360\u7528\u95ee\u9898\u5bf9\u96c6\u7fa4\u5185\u5176\u4ed6 Pod \u7684\u6027\u80fd\u4ea7\u751f\u8d1f\u9762\u5f71\u54cd\u3002 \u4e3a\u4e86\u6709\u6548\u5730\u7ba1\u7406\u8d44\u6e90\u5e76\u4f18\u5316\u96c6\u7fa4\u7684\u8fd0\u884c\uff0c\u8282\u70b9\u4eb2\u548c\u6027\u6210\u4e3a\u4e00\u9879\u91cd\u8981\u7684\u914d\u7f6e\u9009\u9879\u3002

          \u672c\u6587\u5c06\u91cd\u70b9\u63a2\u8ba8\u5982\u4f55\u901a\u8fc7\u6c61\u70b9\u548c\u8282\u70b9\u4eb2\u548c\u6027\u7684\u914d\u7f6e\u7b56\u7565\uff0c\u4f7f\u5f97\u6bcf\u4e2a\u7ec4\u4ef6\u80fd\u591f\u5728\u9002\u5f53\u7684\u8282\u70b9\u4e0a\u8fd0\u884c\uff0c \u5e76\u907f\u514d\u8d44\u6e90\u7ade\u4e89\u6216\u4e89\u7528\uff0c\u4ece\u800c\u786e\u4fdd\u6574\u4e2a Kubernetes \u96c6\u7fa4\u7684\u7a33\u5b9a\u6027\u548c\u9ad8\u6548\u6027\u3002

          "},{"location":"admin/insight/quickstart/install/component-scheduling.html#insight_1","title":"\u901a\u8fc7\u6c61\u70b9\u4e3a Insight \u914d\u7f6e\u4e13\u6709\u8282\u70b9","text":"

          \u7531\u4e8e Insight Agent \u5305\u542b\u4e86 DaemonSet \u7ec4\u4ef6\uff0c\u6240\u4ee5\u672c\u8282\u6240\u8ff0\u7684\u914d\u7f6e\u65b9\u5f0f\u662f\u8ba9\u9664\u4e86 Insight DameonSet \u4e4b\u5916\u7684\u5176\u4f59\u7ec4\u4ef6\u5747\u8fd0\u884c\u5728\u4e13\u6709\u8282\u70b9\u4e0a\u3002

          \u8be5\u65b9\u5f0f\u662f\u901a\u8fc7\u4e3a\u4e13\u6709\u8282\u70b9\u6dfb\u52a0\u6c61\u70b9\uff08taint\uff09\uff0c\u5e76\u914d\u5408\u6c61\u70b9\u5bb9\u5fcd\u5ea6\uff08tolerations\uff09\u6765\u5b9e\u73b0\u7684\u3002 \u66f4\u591a\u7ec6\u8282\u53ef\u4ee5\u53c2\u8003 Kubernetes \u5b98\u65b9\u6587\u6863\u3002

          \u53ef\u4ee5\u53c2\u8003\u5982\u4e0b\u547d\u4ee4\u4e3a\u8282\u70b9\u6dfb\u52a0\u53ca\u79fb\u9664\u6c61\u70b9\uff1a

          # \u6dfb\u52a0\u6c61\u70b9\nkubectl taint nodes worker1 node.daocloud.io=insight-only:NoSchedule\n\n# \u79fb\u9664\u6c61\u70b9\nkubectl taint nodes worker1 node.daocloud.io:NoSchedule-\n

          \u6709\u4ee5\u4e0b\u4e24\u79cd\u9014\u5f84\u8ba9 Insight \u7ec4\u4ef6\u8c03\u5ea6\u81f3\u4e13\u6709\u8282\u70b9\uff1a

          "},{"location":"admin/insight/quickstart/install/component-scheduling.html#1","title":"1. \u4e3a\u6bcf\u4e2a\u7ec4\u4ef6\u6dfb\u52a0\u6c61\u70b9\u5bb9\u5fcd\u5ea6","text":"

          \u9488\u5bf9 insight-server \u548c insight-agent \u4e24\u4e2a Chart \u5206\u522b\u8fdb\u884c\u914d\u7f6e\uff1a

          insight-server Chart \u914d\u7f6einsight-agent Chart \u914d\u7f6e
          server:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nui:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nrunbook:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\n# mysql:\nvictoria-metrics-k8s-stack:\n  victoria-metrics-operator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  vmcluster:\n    spec:\n      vmstorage:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n      vmselect:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n      vminsert:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n  vmalert:\n    spec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n  alertmanager:\n    spec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n\njaeger:\n  collector:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  query:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n\nopentelemetry-collector-aggregator:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nopentelemetry-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\ngrafana-operator:\n  operator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  grafana:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\nkibana:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nelastic-alert:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nvector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n
          kube-prometheus-stack:\n  prometheus:\n    prometheusSpec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n  prometheus-node-exporter:\n    tolerations:\n      - effect: NoSchedule\n        operator: Exists\n  prometheusOperator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n\nkube-state-metrics:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-operator:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\ntailing-sidecar-operator:\n  operator:\n    tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-kubernetes-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nprometheus-blackbox-exporter:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\netcd-exporter:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\" \n
          "},{"location":"admin/insight/quickstart/install/component-scheduling.html#2","title":"2. \u901a\u8fc7\u547d\u540d\u7a7a\u95f4\u7ea7\u522b\u914d\u7f6e","text":"

          \u8ba9 insight-system \u547d\u540d\u7a7a\u95f4\u7684 Pod \u90fd\u5bb9\u5fcd node.daocloud.io=insight-only \u6c61\u70b9\u3002

          1. \u8c03\u6574 apiserver \u7684\u914d\u7f6e\u6587\u4ef6 /etc/kubernetes/manifests/kube-apiserver.yaml\uff0c\u653e\u5f00 PodTolerationRestriction,PodNodeSelector, \u53c2\u8003\u4e0b\u56fe\uff1a

          2. \u7ed9 insight-system \u547d\u540d\u7a7a\u95f4\u589e\u52a0\u6ce8\u89e3\uff1a

            apiVersion: v1\nkind: Namespace\nmetadata:\n  name: insight-system\n  annotations:\n    scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Equal\", \"effect\": \"NoSchedule\", \"key\": \"node.daocloud.io\", \"value\": \"insight-only\"}]'\n

          \u91cd\u542f insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\u9762\u7684\u7ec4\u4ef6\u5373\u53ef\u6b63\u5e38\u5bb9\u5fcd insight-system \u4e0b\u7684 Pod \u8c03\u5ea6\u3002

          "},{"location":"admin/insight/quickstart/install/component-scheduling.html#label","title":"\u4e3a\u8282\u70b9\u6dfb\u52a0 Label \u548c\u8282\u70b9\u4eb2\u548c\u6027\u6765\u7ba1\u7406\u7ec4\u4ef6\u8c03\u5ea6","text":"

          Info

          \u8282\u70b9\u4eb2\u548c\u6027\u6982\u5ff5\u4e0a\u7c7b\u4f3c\u4e8e nodeSelector\uff0c\u5b83\u4f7f\u4f60\u53ef\u4ee5\u6839\u636e\u8282\u70b9\u4e0a\u7684 \u6807\u7b7e(label) \u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002 \u8282\u70b9\u4eb2\u548c\u6027\u6709\u4e24\u79cd\uff1a

          1. requiredDuringSchedulingIgnoredDuringExecution\uff1a\u8c03\u5ea6\u5668\u53ea\u6709\u5728\u89c4\u5219\u88ab\u6ee1\u8db3\u7684\u65f6\u5019\u624d\u80fd\u6267\u884c\u8c03\u5ea6\u3002\u6b64\u529f\u80fd\u7c7b\u4f3c\u4e8e nodeSelector\uff0c \u4f46\u5176\u8bed\u6cd5\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002
          2. preferredDuringSchedulingIgnoredDuringExecution\uff1a\u8c03\u5ea6\u5668\u4f1a\u5c1d\u8bd5\u5bfb\u627e\u6ee1\u8db3\u5bf9\u5e94\u89c4\u5219\u7684\u8282\u70b9\u3002\u5982\u679c\u627e\u4e0d\u5230\u5339\u914d\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u5668\u4ecd\u7136\u4f1a\u8c03\u5ea6\u8be5 Pod\u3002

          \u66f4\u8fc7\u7ec6\u8282\u8bf7\u53c2\u8003 kubernetes \u5b98\u65b9\u6587\u6863\u3002

          \u4e3a\u4e86\u5b9e\u73b0\u4e0d\u540c\u7528\u6237\u5bf9 Insight \u7ec4\u4ef6\u8c03\u5ea6\u7684\u7075\u6d3b\u9700\u6c42\uff0cInsight \u5206\u522b\u63d0\u4f9b\u4e86\u8f83\u4e3a\u7ec6\u7c92\u5ea6\u7684 Label \u6765\u5b9e\u73b0\u4e0d\u540c\u7ec4\u4ef6\u7684\u8c03\u5ea6\u7b56\u7565\uff0c\u4ee5\u4e0b\u662f\u6807\u7b7e\u4e0e\u7ec4\u4ef6\u7684\u5173\u7cfb\u8bf4\u660e\uff1a

          \u6807\u7b7e Key \u6807\u7b7e Value \u8bf4\u660e node.daocloud.io/insight-any \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u4ee3\u8868 Insight \u6240\u6709\u7ec4\u4ef6\u4f18\u5148\u8003\u8651\u5e26\u4e86\u8be5\u6807\u7b7e\u7684\u8282\u70b9 node.daocloud.io/insight-prometheus \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u7279\u6307 Prometheus \u7ec4\u4ef6 node.daocloud.io/insight-vmstorage \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u7279\u6307 VictoriaMetrics vmstorage \u7ec4\u4ef6 node.daocloud.io/insight-vector \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u7279\u6307 Vector \u7ec4\u4ef6 node.daocloud.io/insight-otel-col \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u7279\u6307 OpenTelemetry \u7ec4\u4ef6

          \u53ef\u4ee5\u53c2\u8003\u5982\u4e0b\u547d\u4ee4\u4e3a\u8282\u70b9\u6dfb\u52a0\u53ca\u79fb\u9664\u6807\u7b7e\uff1a

          # \u4e3a node8 \u6dfb\u52a0\u6807\u7b7e\uff0c\u5148\u5c06 insight-prometheus \u8c03\u5ea6\u5230 node8 \nkubectl label nodes node8 node.daocloud.io/insight-prometheus=true\n\n# \u79fb\u9664 node8 \u7684 node.daocloud.io/insight-prometheus \u6807\u7b7e\nkubectl label nodes node8 node.daocloud.io/insight-prometheus-\n

          \u4ee5\u4e0b\u662f insight-prometheus \u7ec4\u4ef6\u5728\u90e8\u7f72\u65f6\u9ed8\u8ba4\u7684\u4eb2\u548c\u6027\u504f\u597d\uff1a

          affinity:\n  nodeAffinity:\n    preferredDuringSchedulingIgnoredDuringExecution:\n    - preference:\n        matchExpressions:\n        - key: node-role.kubernetes.io/control-plane\n          operator: DoesNotExist\n      weight: 1\n    - preference:\n        matchExpressions:\n        - key: node.daocloud.io/insight-prometheus # (1)!\n          operator: Exists\n      weight: 2\n    - preference:\n        matchExpressions:\n        - key: node.daocloud.io/insight-any\n          operator: Exists\n      weight: 3\n    podAntiAffinity:\n      preferredDuringSchedulingIgnoredDuringExecution:\n        - weight: 1\n          podAffinityTerm:\n            topologyKey: kubernetes.io/hostname\n            labelSelector:\n              matchExpressions:\n                - key: app.kubernetes.io/instance\n                  operator: In\n                  values:\n                    - insight-agent-kube-prometh-prometheus\n
          1. \u5148\u5c06 insight-prometheus \u8c03\u5ea6\u5230\u5e26\u6709 node.daocloud.io/insight-prometheus \u6807\u7b7e\u7684\u8282\u70b9
          "},{"location":"admin/insight/quickstart/install/gethosturl.html","title":"\u83b7\u53d6\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u6570\u636e\u5b58\u50a8\u5730\u5740","text":"

          \u53ef\u89c2\u6d4b\u6027\u662f\u591a\u96c6\u7fa4\u7edf\u4e00\u89c2\u6d4b\u7684\u4ea7\u54c1\uff0c\u4e3a\u5b9e\u73b0\u5bf9\u591a\u96c6\u7fa4\u89c2\u6d4b\u6570\u636e\u7684\u7edf\u4e00\u5b58\u50a8\u3001\u67e5\u8be2\uff0c \u5b50\u96c6\u7fa4\u9700\u8981\u5c06\u91c7\u96c6\u7684\u89c2\u6d4b\u6570\u636e\u4e0a\u62a5\u7ed9\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u8fdb\u884c\u7edf\u4e00\u5b58\u50a8\u3002 \u672c\u6587\u63d0\u4f9b\u4e86\u5728\u5b89\u88c5\u91c7\u96c6\u7ec4\u4ef6 insight-agent \u65f6\u5fc5\u586b\u7684\u5b58\u50a8\u7ec4\u4ef6\u7684\u5730\u5740\u3002

          "},{"location":"admin/insight/quickstart/install/gethosturl.html#insight-agent","title":"\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5b89\u88c5 insight-agent","text":"

          \u5982\u679c\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5b89\u88c5 insight-agent\uff0c\u63a8\u8350\u901a\u8fc7\u57df\u540d\u6765\u8bbf\u95ee\u96c6\u7fa4\uff1a

          export vminsert_host=\"vminsert-insight-victoria-metrics-k8s-stack.insight-system.svc.cluster.local\" # (1)!\nexport es_host=\"insight-es-master.insight-system.svc.cluster.local\" # (2)!\nexport otel_col_host=\"insight-opentelemetry-collector.insight-system.svc.cluster.local\" # (3)!\n
          "},{"location":"admin/insight/quickstart/install/gethosturl.html#insight-agent_1","title":"\u5728\u5176\u4ed6\u96c6\u7fa4\u5b89\u88c5 insight-agent","text":""},{"location":"admin/insight/quickstart/install/gethosturl.html#insight-server","title":"\u901a\u8fc7 Insight Server \u63d0\u4f9b\u7684\u63a5\u53e3\u83b7\u53d6\u5730\u5740","text":"
          1. \u7ba1\u7406\u96c6\u7fa4\u4f7f\u7528\u9ed8\u8ba4\u7684 LoadBalancer \u65b9\u5f0f\u66b4\u9732

            \u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

            export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam'\n

            Note

            \u8bf7\u66ff\u6362\u547d\u4ee4\u4e2d\u7684 ${INSIGHT_SERVER_IP} \u53c2\u6570\u3002

            \u83b7\u5f97\u5982\u4e0b\u8fd4\u56de\u503c\uff1a

            {\n  \"values\": {\n    \"global\": {\n      \"exporters\": {\n        \"logging\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"metric\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"auditLog\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"trace\": {\n          \"host\": \"10.6.182.32\"\n        }\n      }\n    },\n    \"opentelemetry-operator\": {\n      \"enabled\": true\n    },\n    \"opentelemetry-collector\": {\n      \"enabled\": true\n    }\n  }\n}\n
            • global.exporters.logging.host \u662f\u65e5\u5fd7\u670d\u52a1\u5730\u5740\uff0c\u4e0d\u9700\u8981\u518d\u8bbe\u7f6e\u5bf9\u5e94\u670d\u52a1\u7684\u7aef\u53e3\uff0c\u90fd\u4f1a\u4f7f\u7528\u76f8\u5e94\u9ed8\u8ba4\u503c
            • global.exporters.metric.host \u662f\u6307\u6807\u670d\u52a1\u5730\u5740
            • global.exporters.trace.host \u662f\u94fe\u8def\u670d\u52a1\u5730\u5740
            • global.exporters.auditLog.host \u662f\u5ba1\u8ba1\u65e5\u5fd7\u670d\u52a1\u5730\u5740\uff08\u548c\u94fe\u8def\u4f7f\u7528\u7684\u540c\u4e00\u4e2a\u670d\u52a1\u4e0d\u540c\u7aef\u53e3\uff09
          2. \u7ba1\u7406\u96c6\u7fa4\u7981\u7528 LoadBalancer

            \u8c03\u7528\u63a5\u53e3\u65f6\u9700\u8981\u989d\u5916\u4f20\u9012\u96c6\u7fa4\u4e2d\u4efb\u610f\u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u8282\u70b9 IP\uff0c\u4f1a\u4f7f\u7528\u8be5 IP \u62fc\u63a5\u51fa\u5bf9\u5e94\u670d\u52a1\u7684\u5b8c\u6574\u8bbf\u95ee\u5730\u5740\u3002

            export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam' --data '{\"extra\": {\"EXPORTER_EXTERNAL_IP\": \"10.5.14.51\"}}'\n

            \u5c06\u83b7\u5f97\u5982\u4e0b\u7684\u8fd4\u56de\u503c\uff1a

            {\n  \"values\": {\n    \"global\": {\n      \"exporters\": {\n        \"logging\": {\n          \"scheme\": \"https\",\n          \"host\": \"10.5.14.51\",\n          \"port\": 32007,\n          \"user\": \"elastic\",\n          \"password\": \"j8V1oVoM1184HvQ1F3C8Pom2\"\n        },\n        \"metric\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30683\n        },\n        \"auditLog\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30884\n        },\n        \"trace\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30274\n        }\n      }\n    },\n    \"opentelemetry-operator\": {\n      \"enabled\": true\n    },\n    \"opentelemetry-collector\": {\n      \"enabled\": true\n    }\n  }\n}\n
            • global.exporters.logging.host \u662f\u65e5\u5fd7\u670d\u52a1\u5730\u5740
            • global.exporters.logging.port \u662f\u65e5\u5fd7\u670d\u52a1\u66b4\u9732\u7684 NodePort
            • global.exporters.metric.host \u662f\u6307\u6807\u670d\u52a1\u5730\u5740
            • global.exporters.metric.port \u662f\u6307\u6807\u670d\u52a1\u66b4\u9732\u7684 NodePort
            • global.exporters.trace.host \u662f\u94fe\u8def\u670d\u52a1\u5730\u5740
            • global.exporters.trace.port \u662f\u94fe\u8def\u670d\u52a1\u66b4\u9732\u7684 NodePort
            • global.exporters.auditLog.host \u662f\u5ba1\u8ba1\u65e5\u5fd7\u670d\u52a1\u5730\u5740\uff08\u548c\u94fe\u8def\u4f7f\u7528\u7684\u540c\u4e00\u4e2a\u670d\u52a1\u4e0d\u540c\u7aef\u53e3\uff09
            • global.exporters.auditLog.host \u662f\u5ba1\u8ba1\u65e5\u5fd7\u670d\u52a1\u66b4\u9732\u7684 NodePort
          "},{"location":"admin/insight/quickstart/install/gethosturl.html#loadbalancer","title":"\u901a\u8fc7 LoadBalancer \u8fde\u63a5","text":"
          1. \u82e5\u96c6\u7fa4\u4e2d\u5f00\u542f LoadBalancer \u4e14\u4e3a Insight \u8bbe\u7f6e\u4e86 VIP \u65f6\uff0c\u60a8\u4e5f\u53ef\u4ee5\u624b\u52a8\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u83b7\u53d6 vminsert \u4ee5\u53ca opentelemetry-collector \u7684\u5730\u5740\u4fe1\u606f\uff1a

            $ kubectl get service -n insight-system | grep lb\nlb-insight-opentelemetry-collector               LoadBalancer   10.233.23.12    <pending>     4317:31286/TCP,8006:31351/TCP  24d\nlb-vminsert-insight-victoria-metrics-k8s-stack   LoadBalancer   10.233.63.67    <pending>     8480:31629/TCP                 24d\n
            • lb-vminsert-insight-victoria-metrics-k8s-stack \u662f\u6307\u6807\u670d\u52a1\u7684\u5730\u5740
            • lb-insight-opentelemetry-collector \u662f\u94fe\u8def\u670d\u52a1\u7684\u5730\u5740
          2. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u83b7\u53d6 elasticsearch \u5730\u5740\u4fe1\u606f\uff1a

            $ kubectl get service -n mcamel-system | grep es\nmcamel-common-es-cluster-masters-es-http               NodePort    10.233.16.120   <none>        9200:30465/TCP               47d\n

            mcamel-common-es-cluster-masters-es-http \u662f\u65e5\u5fd7\u670d\u52a1\u7684\u5730\u5740

          "},{"location":"admin/insight/quickstart/install/gethosturl.html#nodeport","title":"\u901a\u8fc7 NodePort \u8fde\u63a5","text":"

          \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7981\u7528 LB \u7279\u6027

          \u5728\u8be5\u60c5\u51b5\u4e0b\uff0c\u9ed8\u8ba4\u4e0d\u4f1a\u521b\u5efa\u4e0a\u8ff0\u7684 LoadBalancer \u8d44\u6e90\uff0c\u5bf9\u5e94\u670d\u52a1\u540d\u4e3a\uff1a

          • vminsert-insight-victoria-metrics-k8s-stack\uff08\u6307\u6807\u670d\u52a1\uff09
          • common-es\uff08\u65e5\u5fd7\u670d\u52a1\uff09
          • insight-opentelemetry-collector\uff08\u94fe\u8def\u670d\u52a1\uff09

          \u4e0a\u9762\u4e24\u79cd\u60c5\u51b5\u83b7\u53d6\u5230\u5bf9\u5e94\u670d\u52a1\u7684\u5bf9\u5e94\u7aef\u53e3\u4fe1\u606f\u540e\uff0c\u8fdb\u884c\u5982\u4e0b\u8bbe\u7f6e\uff1a

          --set global.exporters.logging.host=  # (1)!\n--set global.exporters.logging.port=  # (2)!\n--set global.exporters.metric.host=   # (3)!\n--set global.exporters.metric.port=   # (4)!\n--set global.exporters.trace.host=    # (5)!\n--set global.exporters.trace.port=    # (6)!\n--set global.exporters.auditLog.host= # (7)!\n
          1. \u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u7ba1\u7406\u96c6\u7fa4 NodeIP
          2. \u65e5\u5fd7\u670d\u52a1 9200 \u7aef\u53e3\u5bf9\u5e94\u7684 NodePort
          3. \u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u7ba1\u7406\u96c6\u7fa4 NodeIP
          4. \u6307\u6807\u670d\u52a1 8480 \u7aef\u53e3\u5bf9\u5e94\u7684 NodePort
          5. \u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u7ba1\u7406\u96c6\u7fa4 NodeIP
          6. \u94fe\u8def\u670d\u52a1 4317 \u7aef\u53e3\u5bf9\u5e94\u7684 NodePort
          7. \u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u7ba1\u7406\u96c6\u7fa4 NodeIP
          "},{"location":"admin/insight/quickstart/install/helm-installagent.html","title":"\u901a\u8fc7 Helm \u90e8\u7f72 Insight Agent","text":"

          \u672c\u6587\u63cf\u8ff0\u4e86\u5728\u547d\u4ee4\u884c\u4e2d\u901a\u8fc7 Helm \u547d\u4ee4\u5b89\u88c5 Insight Agent \u793e\u533a\u7248\u7684\u64cd\u4f5c\u6b65\u9aa4\u3002

          "},{"location":"admin/insight/quickstart/install/helm-installagent.html#insight-agent","title":"\u5b89\u88c5 Insight Agent","text":"
          1. \u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u6dfb\u52a0\u955c\u50cf\u4ed3\u5e93\u7684\u5730\u5740

            helm repo add insight https://release.daocloud.io/chartrepo/insight\nhelm repo upgrade\nhelm search repo  insight/insight-agent --versions\n
          2. \u5b89\u88c5 Insight Agent \u9700\u8981\u786e\u4fdd\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u7684 Insight Server \u6b63\u5e38\u8fd0\u884c\uff0c\u6267\u884c\u4ee5\u4e0b\u5b89\u88c5\u547d\u4ee4\u5b89\u88c5 Insight Agent \u793e\u533a\u7248\uff0c\u8be5\u914d\u7f6e\u4e0d\u542f\u7528 Tracing \u529f\u80fd\uff1a

            helm upgrade --install --create-namespace --cleanup-on-fail \\\n    --version ${version} \\      # \u8bf7\u6307\u5b9a\u90e8\u7f72\u7248\u672c\n    insight-agent  insight/insight-agent \\\n    --set global.exporters.logging.elasticsearch.host=10.10.10.x \\    # \u8bf7\u66ff\u6362\u201c10.10.10.x\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6216\u5916\u7f6e\u7684 Elasticsearch \u7684\u5730\u5740\n    --set global.exporters.logging.elasticsearch.port=32517 \\     # \u8bf7\u66ff\u6362\u201c32517\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6216\u5916\u7f6e\u7684 Elasticsearch \u66b4\u9732\u7684\u7aef\u53e3\n    --set global.exporters.logging.elasticsearch.user=elastic \\     # \u8bf7\u66ff\u6362\u201celastic\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6216\u5916\u7f6e\u7684 Elasticsearch \u7684\u7528\u6237\u540d\n    --set global.exporters.logging.elasticsearch.password=dangerous \\  # \u8bf7\u66ff\u6362\u201cdangerous\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6216\u5916\u7f6e\u7684 Elasticsearch \u7684\u5bc6\u7801\n    --set global.exporters.metric.host=${vminsert_address} \\    # \u8bf7\u66ff\u6362\u201c10.10.10.x\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d vminsert \u7684\u5730\u5740\n    --set global.exporters.metric.port=${vminsert_port} \\    # \u8bf7\u66ff\u6362\u201c32517\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d vminsert \u7684\u5730\u5740\n    --set global.exporters.auditLog.host=${opentelemetry-collector address} \\     # \u8bf7\u66ff\u6362\u201c32517\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d opentelemetry-collector \u7684\u7aef\u53e3\n    --set global.exporters.auditLog.port=${otel_col_auditlog_port}\\   # \u8bf7\u66ff\u6362\u201c32517\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d opentelemetry-collector \u5bb9\u5668\u7aef\u53e3\u4e3a 8006 \u7684 service \u5bf9\u5916\u8bbf\u95ee\u7684\u5730\u5740\n    -n insight-system\n

            Info

            \u53ef\u53c2\u8003 \u5982\u4f55\u83b7\u53d6\u8fde\u63a5\u5730\u5740 \u83b7\u53d6\u5730\u5740\u4fe1\u606f\u3002

          3. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u786e\u8ba4\u5b89\u88c5\u72b6\u6001\uff1a

            helm list -A\nkubectl get pods -n insight-system\n
          "},{"location":"admin/insight/quickstart/install/helm-installagent.html#_1","title":"\u5982\u4f55\u83b7\u53d6\u8fde\u63a5\u5730\u5740","text":""},{"location":"admin/insight/quickstart/install/helm-installagent.html#insight-agent_1","title":"\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5b89\u88c5 Insight Agent","text":"

          \u5982\u679c Agent \u662f\u5b89\u88c5\u5728\u7ba1\u7406\u96c6\u7fa4\uff0c\u63a8\u8350\u901a\u8fc7\u57df\u540d\u6765\u8bbf\u95ee\u96c6\u7fa4\uff1a

          export vminsert_host=\"vminsert-insight-victoria-metrics-k8s-stack.insight-system.svc.cluster.local\" # \u6307\u6807\nexport es_host=\"insight-es-master.insight-system.svc.cluster.local\" # \u65e5\u5fd7\nexport otel_col_host=\"insight-opentelemetry-collector.insight-system.svc.cluster.local\" # \u94fe\u8def\n
          "},{"location":"admin/insight/quickstart/install/helm-installagent.html#insight-agent_2","title":"\u5728\u5de5\u4f5c\u96c6\u7fa4\u5b89\u88c5 Insight Agent","text":"\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4f7f\u7528\u9ed8\u8ba4\u7684 LoadBalancer\u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\u64cd\u4f5c\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4f7f\u7528 Nodeport

          \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4f7f\u7528\u9ed8\u8ba4\u7684 LoadBalancer \u65b9\u5f0f\u66b4\u9732\u670d\u52a1\u65f6\uff0c\u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

          export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam'\n

          \u5c06\u83b7\u5f97\u5982\u4e0b\u7684\u8fd4\u56de\u503c\uff1a

          {\"global\":{\"exporters\":{\"logging\":{\"output\":\"elasticsearch\",\"elasticsearch\":{\"host\":\"10.6.182.32\"},\"kafka\":{},\"host\":\"10.6.182.32\"},\"metric\":{\"host\":\"10.6.182.32\"},\"auditLog\":    {\"host\":\"10.6.182.32\"}}},\"opentelemetry-operator\":{\"enabled\":true},\"opentelemetry-collector\":{\"enabled\":true}}\n

          \u5176\u4e2d\uff1a

          • global.exporters.logging.elasticsearch.host \u662f\u65e5\u5fd7\u670d\u52a1\u5730\u5740\u3010\u4e0d\u9700\u8981\u518d\u8bbe\u7f6e\u5bf9\u5e94\u670d\u52a1\u7684\u7aef\u53e3\uff0c\u90fd\u4f1a\u4f7f\u7528\u76f8\u5e94\u9ed8\u8ba4\u503c\u3011\uff1b
          • global.exporters.metric.host \u662f\u6307\u6807\u670d\u52a1\u5730\u5740\uff1b
          • global.exporters.trace.host \u662f\u94fe\u8def\u670d\u52a1\u5730\u5740\uff1b
          • global.exporters.auditLog.host \u662f\u5ba1\u8ba1\u65e5\u5fd7\u670d\u52a1\u5730\u5740 (\u548c\u94fe\u8def\u4f7f\u7528\u7684\u540c\u4e00\u4e2a\u670d\u52a1\u4e0d\u540c\u7aef\u53e3)\uff1b

          \u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

          kubectl get service -n insight-system | grep lb\nkubectl get service -n mcamel-system | grep es\n

          \u5176\u4e2d\uff1a

          • lb-vminsert-insight-victoria-metrics-k8s-stack \u662f\u6307\u6807\u670d\u52a1\u7684\u5730\u5740\uff1b
          • lb-insight-opentelemetry-collector \u662f\u94fe\u8def\u670d\u52a1\u7684\u5730\u5740;
          • mcamel-common-es-cluster-masters-es-http \u662f\u65e5\u5fd7\u670d\u52a1\u7684\u5730\u5740;

          \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4f7f\u7528 Nodeport \u65b9\u5f0f\u66b4\u9732\u670d\u52a1\u65f6\uff0c\u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

          kubectl get service -n insight-system\nkubectl get service -n mcamel-system\n

          \u5176\u4e2d\uff1a

          • vminsert-insight-victoria-metrics-k8s-stack \u662f\u6307\u6807\u670d\u52a1\u7684\u5730\u5740\uff1b
          • insight-opentelemetry-collector \u662f\u94fe\u8def\u670d\u52a1\u7684\u5730\u5740;
          • mcamel-common-es-cluster-masters-es-http \u662f\u65e5\u5fd7\u670d\u52a1\u7684\u5730\u5740;
          "},{"location":"admin/insight/quickstart/install/helm-installagent.html#insight-agent_3","title":"\u5347\u7ea7 Insight Agent","text":"
          1. \u767b\u5f55\u76ee\u6807\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u5907\u4efd --set \u53c2\u6570\u3002

            helm get values insight-agent -n insight-system -o yaml > insight-agent.yaml\n
          2. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u66f4\u65b0\u4ed3\u5e93\u3002

            helm repo upgrade\n
          3. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u8fdb\u884c\u5347\u7ea7\u3002

            helm upgrade insight-agent insight/insight-agent \\\n-n insight-system \\\n-f ./insight-agent.yaml \\\n--version ${version}   # \u6307\u5b9a\u5347\u7ea7\u7248\u672c\n
          4. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u786e\u8ba4\u5b89\u88c5\u72b6\u6001\uff1a

            kubectl get pods -n insight-system\n
          "},{"location":"admin/insight/quickstart/install/helm-installagent.html#insight-agent_4","title":"\u5378\u8f7d Insight Agent","text":"
          helm uninstall insight-agent -n insight-system --timeout 10m\n
          "},{"location":"admin/insight/quickstart/install/install-agent.html","title":"\u5728\u7ebf\u5b89\u88c5 insight-agent","text":"

          insight-agent \u662f\u96c6\u7fa4\u89c2\u6d4b\u6570\u636e\u91c7\u96c6\u7684\u63d2\u4ef6\uff0c\u652f\u6301\u5bf9\u6307\u6807\u3001\u94fe\u8def\u3001\u65e5\u5fd7\u6570\u636e\u7684\u7edf\u4e00\u89c2\u6d4b\u3002\u672c\u6587\u63cf\u8ff0\u4e86\u5982\u4f55\u5728\u5728\u7ebf\u73af\u5883\u4e2d\u4e3a\u63a5\u5165\u96c6\u7fa4\u5b89\u88c5 insight-agent\u3002

          "},{"location":"admin/insight/quickstart/install/install-agent.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
          • \u96c6\u7fa4\u5df2\u6210\u529f\u63a5\u5165 \u5bb9\u5668\u7ba1\u7406 \u6a21\u5757\u3002\u5982\u4f55\u63a5\u5165\u96c6\u7fa4\uff0c\u8bf7\u53c2\u8003\uff1a\u63a5\u5165\u96c6\u7fa4
          "},{"location":"admin/insight/quickstart/install/install-agent.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \u6a21\u5757\uff0c\u5728 \u96c6\u7fa4\u5217\u8868 \u4e2d\u627e\u5230\u8981\u5b89\u88c5 insight-agent \u7684\u96c6\u7fa4\u540d\u79f0\u3002

          2. \u9009\u62e9 \u7acb\u5373\u5b89\u88c5 \u8df3\u8f6c\uff0c\u6216\u70b9\u51fb\u96c6\u7fa4\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u5185\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u641c\u7d22\u6846\u67e5\u8be2 insight-agent \uff0c\u70b9\u51fb\u8be5\u5361\u7247\u8fdb\u5165\u8be6\u60c5\u3002

          3. \u67e5\u770b insight-agent \u7684\u5b89\u88c5\u9875\u9762\uff0c\u70b9\u51fb \u5b89\u88c5 \u8fdb\u5165\u4e0b\u4e00\u6b65\u3002

          4. \u9009\u62e9\u5b89\u88c5\u7684\u7248\u672c\u5e76\u5728\u4e0b\u65b9\u8868\u5355\u5206\u522b\u586b\u5199\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u5bf9\u5e94\u6570\u636e\u5b58\u50a8\u7ec4\u4ef6\u7684\u5730\u5740\uff0c\u786e\u8ba4\u586b\u5199\u7684\u4fe1\u606f\u65e0\u8bef\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

            • insight-agent \u9ed8\u8ba4\u90e8\u7f72\u5728\u96c6\u7fa4\u7684 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\u3002
            • \u5efa\u8bae\u5b89\u88c5\u6700\u65b0\u7248\u672c\u7684 insight-agent\u3002
            • \u7cfb\u7edf\u9ed8\u8ba4\u5df2\u586b\u5199\u6570\u636e\u4e0a\u62a5\u7684\u7ec4\u4ef6\u7684\u5730\u5740\uff0c\u4ecd\u8bf7\u60a8\u68c0\u67e5\u65e0\u8bef\u540e\u518d\u70b9\u51fb \u786e\u5b9a \u00a0\u8fdb\u884c\u5b89\u88c5\u3002 \u5982\u9700\u4fee\u6539\u6570\u636e\u4e0a\u62a5\u5730\u5740\uff0c\u8bf7\u53c2\u8003\uff1a\u83b7\u53d6\u6570\u636e\u4e0a\u62a5\u5730\u5740\u3002

          5. \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de\u00a0 Helm \u5e94\u7528 \u5217\u8868\uff0c\u5f53\u5e94\u7528 insight-agent \u7684\u72b6\u6001\u4ece\u00a0 \u672a\u5c31\u7eea \u53d8\u4e3a \u5df2\u90e8\u7f72 \uff0c\u4e14\u6240\u6709\u7684\u7ec4\u4ef6\u72b6\u6001\u4e3a \u8fd0\u884c\u4e2d \u65f6\uff0c\u5219\u5b89\u88c5\u6210\u529f\u3002\u7b49\u5f85\u4e00\u6bb5\u65f6\u95f4\u540e\uff0c\u53ef\u5728 \u53ef\u89c2\u6d4b\u6027 \u6a21\u5757\u67e5\u770b\u8be5\u96c6\u7fa4\u7684\u6570\u636e\u3002

          Note

          • \u70b9\u51fb\u6700\u53f3\u4fa7\u7684 \u2507 \uff0c\u60a8\u53ef\u4ee5\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u6267\u884c\u66f4\u591a\u64cd\u4f5c\uff0c\u5982 \u66f4\u65b0 \u3001 \u67e5\u770b YAML \u548c \u5220\u9664 \u3002
          "},{"location":"admin/insight/quickstart/install/knownissues.html","title":"\u5df2\u77e5\u95ee\u9898","text":"

          \u672c\u9875\u5217\u51fa\u4e00\u4e9b Insight Agent \u5b89\u88c5\u548c\u5378\u8f7d\u6709\u5173\u7684\u95ee\u9898\u53ca\u5176\u89e3\u51b3\u529e\u6cd5\u3002

          "},{"location":"admin/insight/quickstart/install/knownissues.html#v0230","title":"v0.23.0","text":""},{"location":"admin/insight/quickstart/install/knownissues.html#insight-agent","title":"Insight Agent","text":""},{"location":"admin/insight/quickstart/install/knownissues.html#insight-agent_1","title":"Insight Agent \u5378\u8f7d\u5931\u8d25","text":"

          \u5f53\u4f60\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u5378\u8f7d Insight Agent \u65f6\u3002

          helm uninstall insight-agent -n insight-system\n

          otel-oprator \u6240\u4f7f\u7528\u7684 tls secret \u672a\u88ab\u5378\u8f7d\u6389\u3002

          otel-operator \u5b9a\u4e49\u7684\u201c\u91cd\u590d\u5229\u7528 tls secret\u201d\u7684\u903b\u8f91\u4e2d\uff0c\u4f1a\u53bb\u5224\u65ad otel-oprator \u7684 MutationConfiguration \u662f\u5426\u5b58\u5728\u5e76\u91cd\u590d\u5229\u7528 MutationConfiguration \u4e2d\u7ed1\u5b9a\u7684 CA cert\u3002\u4f46\u662f\u7531\u4e8e helm uninstall \u5df2\u5378\u8f7d MutationConfiguration\uff0c\u5bfc\u81f4\u51fa\u73b0\u7a7a\u503c\u3002

          \u7efc\u4e0a\u8bf7\u624b\u52a8\u5220\u9664\u5bf9\u5e94\u7684 secret\uff0c\u4ee5\u4e0b\u4e24\u79cd\u65b9\u5f0f\u4efb\u9009\u4e00\u79cd\u5373\u53ef\uff1a

          • \u901a\u8fc7\u547d\u4ee4\u884c\u5220\u9664\uff1a\u767b\u5f55\u76ee\u6807\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

            kubectl -n insight-system delete secret insight-agent-opentelemetry-operator-controller-manager-service-cert\n
          • \u901a\u8fc7 UI \u5220\u9664\uff1a\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5bb9\u5668\u7ba1\u7406\uff0c\u9009\u62e9\u76ee\u6807\u96c6\u7fa4\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u8fdb\u5165\u5bc6\u94a5\uff0c\u8f93\u5165 insight-agent-opentelemetry-operator-controller-manager-service-cert\uff0c\u9009\u62e9\u5220\u9664\u3002

          "},{"location":"admin/insight/quickstart/install/knownissues.html#v0220","title":"v0.22.0","text":""},{"location":"admin/insight/quickstart/install/knownissues.html#insight-agent_2","title":"Insight Agent","text":""},{"location":"admin/insight/quickstart/install/knownissues.html#insight-agent_3","title":"\u5347\u7ea7 Insight Agent \u65f6\u66f4\u65b0\u65e5\u5fd7\u6536\u96c6\u7aef\uff0c\u672a\u751f\u6548","text":"

          \u66f4\u65b0 insight-agent \u65e5\u5fd7\u914d\u7f6e\u4ece elasticsearch \u6539\u4e3a kafka \u6216\u8005\u4ece kafka \u6539\u4e3a elasticsearch\uff0c\u5b9e\u9645\u4e0a\u90fd\u672a\u751f\u6548\uff0c\u8fd8\u662f\u4f7f\u7528\u66f4\u65b0\u524d\u914d\u7f6e\u3002

          \u89e3\u51b3\u65b9\u6848 \uff1a

          \u624b\u52a8\u91cd\u542f\u96c6\u7fa4\u4e2d\u7684 fluentbit\u3002

          "},{"location":"admin/insight/quickstart/install/knownissues.html#v0210","title":"v0.21.0","text":""},{"location":"admin/insight/quickstart/install/knownissues.html#insight-agent_4","title":"Insight Agent","text":""},{"location":"admin/insight/quickstart/install/knownissues.html#podmonitor-jvm","title":"PodMonitor \u91c7\u96c6\u591a\u4efd JVM \u6307\u6807\u6570\u636e","text":"
          1. \u8fd9\u4e2a\u7248\u672c\u7684 PodMonitor/insight-kubernetes-pod \u5b58\u5728\u7f3a\u9677\uff1a\u4f1a\u9519\u8bef\u5730\u521b\u5efa Job \u53bb\u91c7\u96c6\u6807\u8bb0\u4e86 insight.opentelemetry.io/metric-scrape=true \u7684 Pod \u7684\u6240\u6709 container\uff1b\u800c\u5b9e\u9645\u4e0a\u53ea\u9700\u91c7\u96c6 insight.opentelemetry.io/metric-port \u6240\u5bf9\u5e94 container \u7684\u7aef\u53e3\u3002

          2. \u56e0\u4e3a PodMonitor \u58f0\u660e\u4e4b\u540e\uff0cPromethuesOperator \u4f1a\u9884\u8bbe\u7f6e\u4e00\u4e9b\u670d\u52a1\u53d1\u73b0\u914d\u7f6e\u3002 \u518d\u8003\u8651\u5230 CRD \u7684\u517c\u5bb9\u6027\u7684\u95ee\u9898\u3002\u56e0\u6b64\uff0c\u653e\u5f03\u901a\u8fc7 PodMonitor \u6765\u914d\u7f6e\u901a\u8fc7 annotation \u521b\u5efa\u91c7\u96c6\u4efb\u52a1\u7684\u673a\u5236\u3002

          3. \u901a\u8fc7 Prometheus \u81ea\u5e26\u7684 additional scrape config \u673a\u5236\uff0c\u5c06\u670d\u52a1\u53d1\u73b0\u89c4\u5219\u914d\u7f6e\u5728 secret \u4e2d\uff0c\u5728\u5f15\u5165 Prometheus \u91cc\u3002

          \u7efc\u4e0a\uff1a

          1. \u5220\u9664\u8fd9\u4e2a PodMonitor \u7684\u5f53\u524d insight-kubernetes-pod
          2. \u4f7f\u7528\u65b0\u7684\u89c4\u5219

          \u65b0\u7684\u89c4\u5219\u91cc\u901a\u8fc7 action: keepequal \u6765\u6bd4\u8f83 source_labels \u548c target_label \u7684\u4e00\u81f4\u6027\uff0c \u6765\u5224\u65ad\u662f\u5426\u8981\u7ed9\u67d0\u4e2a container \u7684 port \u521b\u5efa\u91c7\u96c6\u4efb\u52a1\u3002\u9700\u8981\u6ce8\u610f\uff0c\u8fd9\u4e2a\u662f Prometheus 2.41.0\uff082022-12-20\uff09\u548c\u66f4\u9ad8\u7248\u672c\u624d\u5177\u5907\u7684\u529f\u80fd\u3002

          +    - source_labels: [__meta_kubernetes_pod_annotation_insight_opentelemetry_io_metric_port]\n+      separator: ;\n+      target_label: __meta_kubernetes_pod_container_port_number\n+      action: keepequal\n
          "},{"location":"admin/insight/quickstart/install/upgrade-note.html","title":"\u5347\u7ea7\u6ce8\u610f\u4e8b\u9879","text":"

          \u672c\u9875\u4ecb\u7ecd\u4e00\u4e9b\u5347\u7ea7 insight-server \u548c insight-agent \u7684\u6ce8\u610f\u4e8b\u9879\u3002

          "},{"location":"admin/insight/quickstart/install/upgrade-note.html#insight-agent","title":"insight-agent","text":""},{"location":"admin/insight/quickstart/install/upgrade-note.html#v028x-v029x","title":"\u4ece v0.28.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.29.x","text":"

          \u7531\u4e8e v0.29.0 \u5347\u7ea7\u4e86 Opentelemetry \u793e\u533a\u7684 operator chart \u7248\u672c\uff0cvalues \u4e2d\u7684 featureGates \u7684\u652f\u6301\u7684\u503c\u6709\u6240\u53d8\u5316\uff0c\u56e0\u6b64\uff0c\u5728 upgrade \u4e4b\u524d\uff0c\u9700\u8981\u5c06 featureGates \u7684\u503c\u8bbe\u7f6e\u4e3a\u7a7a, \u5373\uff1a

          -  --set opentelemetry-operator.manager.featureGates=\"+operator.autoinstrumentation.go,+operator.autoinstrumentation.multi-instrumentation,+operator.autoinstrumentation.nginx\" \\\n+  --set opentelemetry-operator.manager.featureGates=\"\"\n
          "},{"location":"admin/insight/quickstart/install/upgrade-note.html#insight-server","title":"insight-server","text":""},{"location":"admin/insight/quickstart/install/upgrade-note.html#v026x-v027x","title":"\u4ece v0.26.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.27.x \u6216\u66f4\u9ad8\u7248\u672c","text":"

          \u5728 v0.27.x \u7248\u672c\u4e2d\u5c06 vector \u7ec4\u4ef6\u7684\u5f00\u5173\u5355\u72ec\u62bd\u51fa\u3002\u6545\u539f\u6709\u73af\u5883\u5f00\u542f\u4e86 vector\uff0c\u90a3\u5728\u5347\u7ea7 insight-server \u65f6\uff0c\u9700\u8981\u6307\u5b9a --set vector.enabled=true \u3002

          "},{"location":"admin/insight/quickstart/install/upgrade-note.html#v019x-020x","title":"\u4ece v0.19.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 0.20.x","text":"

          \u5728\u5347\u7ea7 Insight \u4e4b\u524d\uff0c\u60a8\u9700\u8981\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u624b\u52a8\u5220\u9664 jaeger-collector \u548c jaeger-query \u90e8\u7f72\uff1a

          kubectl -n insight-system delete deployment insight-jaeger-collector\nkubectl -n insight-system delete deployment insight-jaeger-query\n
          "},{"location":"admin/insight/quickstart/install/upgrade-note.html#v017x-v018x","title":"\u4ece v0.17.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.18.x","text":"

          \u7531\u4e8e 0.18.x \u4e2d\u66f4\u65b0\u4e86 Jaeger \u76f8\u5173\u90e8\u7f72\u6587\u4ef6\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-server \u524d\u624b\u52a8\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

          kubectl -n insight-system delete deployment insight-jaeger-collector\nkubectl -n insight-system delete deployment insight-jaeger-query\n

          \u7531\u4e8e 0.18.x \u4e2d\u6307\u6807\u540d\u4ea7\u751f\u4e86\u53d8\u52a8\uff0c\u56e0\u6b64\uff0c\u9700\u8981\u5728\u5347\u7ea7 insight-server \u4e4b\u540e\uff0cinsight-agent \u4e5f\u5e94\u8be5\u505a\u5347\u7ea7\u3002

          \u6b64\u5916\uff0c\u8c03\u6574\u4e86\u5f00\u542f\u94fe\u8def\u6a21\u5757\u7684\u53c2\u6570\uff0c\u4ee5\u53ca ElasticSearch \u8fde\u63a5\u8c03\u6574\u3002\u5177\u4f53\u53c2\u8003\u4ee5\u4e0b\u53c2\u6570\uff1a

          +  --set global.tracing.enable=true \\\n-  --set jaeger.collector.enabled=true \\\n-  --set jaeger.query.enabled=true \\\n+  --set global.elasticsearch.scheme=${your-external-elasticsearch-scheme} \\\n+  --set global.elasticsearch.host=${your-external-elasticsearch-host} \\\n+  --set global.elasticsearch.port=${your-external-elasticsearch-port} \\\n+  --set global.elasticsearch.user=${your-external-elasticsearch-username} \\\n+  --set global.elasticsearch.password=${your-external-elasticsearch-password} \\\n-  --set jaeger.storage.elasticsearch.scheme=${your-external-elasticsearch-scheme} \\\n-  --set jaeger.storage.elasticsearch.host=${your-external-elasticsearch-host} \\\n-  --set jaeger.storage.elasticsearch.port=${your-external-elasticsearch-port} \\\n-  --set jaeger.storage.elasticsearch.user=${your-external-elasticsearch-username} \\\n-  --set jaeger.storage.elasticsearch.password=${your-external-elasticsearch-password} \\\n
          "},{"location":"admin/insight/quickstart/install/upgrade-note.html#v015x-v016x","title":"\u4ece v0.15.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.16.x","text":"

          \u7531\u4e8e 0.16.x \u4e2d\u4f7f\u7528\u4e86 vmalertmanagers CRD \u7684\u65b0\u7279\u6027\u53c2\u6570 disableRouteContinueEnforce\uff0c \u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-server \u524d\u624b\u52a8\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u3002

          kubectl apply --server-side -f https://raw.githubusercontent.com/VictoriaMetrics/operator/v0.33.0/config/crd/bases/operator.victoriametrics.com_vmalertmanagers.yaml --force-conflicts\n

          Note

          \u5982\u60a8\u662f\u79bb\u7ebf\u5b89\u88c5\uff0c\u53ef\u4ee5\u5728\u89e3\u538b Insight \u79bb\u7ebf\u5305\u540e\uff0c\u8bf7\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u66f4\u65b0 CRD\u3002

          kubectl apply --server-side -f insight/dependency-crds --force-conflicts \n
          "},{"location":"admin/insight/quickstart/install/upgrade-note.html#insight-agent_1","title":"insight-agent","text":""},{"location":"admin/insight/quickstart/install/upgrade-note.html#v023x-v024x","title":"\u4ece v0.23.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.24.x","text":"

          \u7531\u4e8e 0.24.x \u7248\u672c\u4e2d OTEL operator chart \u4e2d\u65b0\u589e\u4e86 CRD\uff0c\u4f46\u7531\u4e8e Helm Upgrade \u65f6\u5e76\u4e0d\u4f1a\u66f4\u65b0 CRD\uff0c\u56e0\u6b64\uff0c\u9700\u8981\u624b\u52a8\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

          kubectl apply -f https://raw.githubusercontent.com/open-telemetry/opentelemetry-helm-charts/main/charts/opentelemetry-operator/crds/crd-opentelemetry.io_opampbridges.yaml\n

          \u5982\u60a8\u662f\u79bb\u7ebf\u5b89\u88c5\uff0c\u53ef\u4ee5\u5728\u89e3\u538b insight-agent \u79bb\u7ebf\u5305\u540e\u53ef\u627e\u5230\u4e0a\u8ff0 CRD \u7684 yaml\uff0c\u89e3\u538b Insight-Agent Chart \u4e4b\u540e\u624b\u52a8\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

          kubectl apply -f charts/agent/crds/crd-opentelemetry.io_opampbridges.yaml\n
          "},{"location":"admin/insight/quickstart/install/upgrade-note.html#v019x-v020x","title":"\u4ece v0.19.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.20.x","text":"

          \u7531\u4e8e 0.20.x \u4e2d\u589e\u52a0\u4e86 Kafka \u65e5\u5fd7\u5bfc\u51fa\u914d\u7f6e\uff0c\u65e5\u5fd7\u5bfc\u51fa\u914d\u7f6e\u505a\u4e86\u4e00\u4e9b\u8c03\u6574\u3002\u5347\u7ea7 insight-agent \u4e4b\u524d\u9700\u8981\u6ce8\u610f\u53c2\u6570\u53d8\u5316\uff0c \u5373\u539f\u6765 logging \u7684\u914d\u7f6e\u5df2\u7ecf\u79fb\u5230\u4e86\u914d\u7f6e\u4e2d logging.elasticsearch\uff1a

          -  --set global.exporters.logging.host \\\n-  --set global.exporters.logging.port \\\n+  --set global.exporters.logging.elasticsearch.host \\\n+  --set global.exporters.logging.elasticsearch.port \\\n
          "},{"location":"admin/insight/quickstart/install/upgrade-note.html#v017x-v018x_1","title":"\u4ece v0.17.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.18.x","text":"

          \u7531\u4e8e 0.18.x \u4e2d\u66f4\u65b0\u4e86 Jaeger \u76f8\u5173\u90e8\u7f72\u6587\u4ef6\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-agent \u524d\u9700\u8981\u6ce8\u610f\u53c2\u6570\u7684\u6539\u52a8\u3002

          +  --set global.exporters.trace.enable=true \\\n-  --set opentelemetry-collector.enabled=true \\\n-  --set opentelemetry-operator.enabled=true \\\n
          "},{"location":"admin/insight/quickstart/install/upgrade-note.html#v016x-v017x","title":"\u4ece v0.16.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.17.x","text":"

          \u5728 v0.17.x \u7248\u672c\u4e2d\u5c06 kube-prometheus-stack chart \u7248\u672c\u4ece 41.9.1 \u5347\u7ea7\u81f3 45.28.1, \u5176\u4e2d\u4f7f\u7528\u7684 CRD \u4e5f\u5b58\u5728\u4e00\u4e9b\u5b57\u6bb5\u7684\u5347\u7ea7\uff0c\u5982 servicemonitor \u7684 attachMetadata \u5b57\u6bb5\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-agent \u524d\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

          kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --force-conflicts\n

          \u5982\u60a8\u662f\u79bb\u7ebf\u5b89\u88c5\uff0c\u53ef\u4ee5\u5728\u89e3\u538b insight-agent \u79bb\u7ebf\u5305\u540e\uff0c\u5728 insight-agent/dependency-crds \u4e2d\u627e\u5230\u4e0a\u8ff0 CRD \u7684 yaml\u3002

          "},{"location":"admin/insight/quickstart/install/upgrade-note.html#v011x-v012x","title":"\u4ece v0.11.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.12.x","text":"

          \u5728 v0.12.x \u5c06 kube-prometheus-stack chart \u4ece 39.6.0 \u5347\u7ea7\u5230 41.9.1\uff0c\u5176\u4e2d\u5305\u62ec prometheus-operator \u5347\u7ea7\u5230 v0.60.1, prometheus-node-exporter chart \u5347\u7ea7\u5230 4.3.0 \u7b49\u3002 prometheus-node-exporter \u5347\u7ea7\u540e\u4f7f\u7528\u4e86 Kubernetes \u63a8\u8350 label\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7\u524d\u5220\u9664 node-exporter \u7684 DaemonSet\u3002 prometheus-operator \u66f4\u65b0\u4e86 CRD\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-agent \u524d\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

          kubectl delete daemonset insight-agent-prometheus-node-exporter -n insight-system\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml --force-conflicts\n

          Note

          \u5982\u60a8\u662f\u79bb\u7ebf\u5b89\u88c5\uff0c\u53ef\u4ee5\u5728\u89e3\u538b insight-agent \u79bb\u7ebf\u5305\u540e\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u66f4\u65b0 CRD\u3002

          kubectl apply --server-side -f insight-agent/dependency-crds --force-conflicts\n
          "},{"location":"admin/insight/quickstart/otel/operator.html","title":"\u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a","text":"

          \u76ee\u524d\u53ea\u6709 Java\u3001NodeJs\u3001Python\u3001.Net\u3001Golang \u652f\u6301 Operator \u7684\u65b9\u5f0f\u65e0\u4fb5\u5165\u63a5\u5165\u3002

          "},{"location":"admin/insight/quickstart/otel/operator.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

          \u8bf7\u786e\u4fdd insight-agent \u5df2\u7ecf\u5c31\u7eea\u3002\u5982\u82e5\u6ca1\u6709\uff0c\u8bf7\u53c2\u8003\u5b89\u88c5 insight-agent \u91c7\u96c6\u6570\u636e\u5e76\u786e\u4fdd\u4ee5\u4e0b\u4e09\u9879\u5c31\u7eea\uff1a

          • \u4e3a insight-agent \u5f00\u542f trace \u529f\u80fd
          • trace \u6570\u636e\u7684\u5730\u5740\u4ee5\u53ca\u7aef\u53e3\u662f\u5426\u586b\u5199\u6b63\u786e
          • deployment/insight-agent-opentelemetry-operator \u548c deployment/insight-agent-opentelemetry-collector \u5bf9\u5e94\u7684 Pod \u5df2\u7ecf\u51c6\u5907\u5c31\u7eea
          "},{"location":"admin/insight/quickstart/otel/operator.html#instrumentation-cr","title":"\u5b89\u88c5 Instrumentation CR","text":"

          Tip

          \u4ece Insight v0.22.0 \u5f00\u59cb\uff0c\u4e0d\u518d\u9700\u8981\u624b\u52a8\u5b89\u88c5 Instrumentation CR\u3002

          \u5728 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\u5b89\u88c5\uff0c\u4e0d\u540c\u7248\u672c\u4e4b\u95f4\u6709\u4e00\u4e9b\u7ec6\u5c0f\u7684\u5dee\u522b\u3002

          Insight v0.21.xInsight v0.20.xInsight v0.18.xInsight v0.17.xInsight v0.16.x
          K8S_CLUSTER_UID=$(kubectl get namespace kube-system -o jsonpath='{.metadata.uid}')\nkubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/openinsight-proj/autoinstrumentation-java:1.31.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n      - name: OTEL_K8S_CLUSTER_UID\n        value: $K8S_CLUSTER_UID\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
          kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.29.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0-rc.2\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
          kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.25.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.37.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.38b0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.1-alpha\nEOF\n
          kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.23.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.34.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.33b0\nEOF\n
          kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.23.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.34.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.33b0\nEOF\n
          "},{"location":"admin/insight/quickstart/otel/operator.html#_2","title":"\u4e0e\u670d\u52a1\u7f51\u683c\u94fe\u8def\u4e32\u8054\u573a\u666f","text":"

          \u5982\u679c\u60a8\u5f00\u542f\u4e86\u670d\u52a1\u7f51\u683c\u7684\u94fe\u8def\u8ffd\u8e2a\u80fd\u529b\uff0c\u9700\u8981\u989d\u5916\u589e\u52a0\u4e00\u4e2a\u73af\u5883\u53d8\u91cf\u6ce8\u5165\u7684\u914d\u7f6e\uff1a

          "},{"location":"admin/insight/quickstart/otel/operator.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b","text":"
          1. \u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3.0\uff0c\u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \u540e\u9009\u62e9\u8fdb\u5165\u76ee\u6807\u96c6\u7fa4\uff0c
          2. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u627e\u5230 instrumentations.opentelemetry.io \u540e\u8fdb\u5165\u8be6\u60c5\u9875\u3002
          3. \u9009\u62e9 insight-system \u547d\u540d\u7a7a\u95f4\u540e\uff0c\u7f16\u8f91 insight-opentelemetry-autoinstrumentation \uff0c\u5728 spec:env: \u4e0b\u6dfb\u52a0\u4ee5\u4e0b\u5185\u5bb9\uff1a

                - name: OTEL_SERVICE_NAME\n      valueFrom:\n        fieldRef:\n          fieldPath: metadata.labels['app'] \n

            \u5b8c\u6574\u7684\u547d\u4ee4\u5982\u4e0b\uff08For Insight v0.21.x\uff09\uff1a

            K8S_CLUSTER_UID=$(kubectl get namespace kube-system -o jsonpath='{.metadata.uid}')\nkubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n    - name: OTEL_SERVICE_NAME\n      valueFrom:\n        fieldRef:\n          fieldPath: metadata.labels['app'] \n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/openinsight-proj/autoinstrumentation-java:1.31.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n      - name: OTEL_K8S_CLUSTER_UID\n        value: $K8S_CLUSTER_UID\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
          "},{"location":"admin/insight/quickstart/otel/operator.html#_4","title":"\u6dfb\u52a0\u6ce8\u89e3\uff0c\u81ea\u52a8\u63a5\u5165\u94fe\u8def","text":"

          \u4ee5\u4e0a\u5c31\u7eea\u4e4b\u540e\uff0c\u60a8\u5c31\u53ef\u4ee5\u901a\u8fc7\u6ce8\u89e3\uff08Annotation\uff09\u65b9\u5f0f\u4e3a\u5e94\u7528\u7a0b\u5e8f\u63a5\u5165\u94fe\u8def\u8ffd\u8e2a\u4e86\uff0cOTel \u76ee\u524d\u652f\u6301\u901a\u8fc7\u6ce8\u89e3\u7684\u65b9\u5f0f\u63a5\u5165\u94fe\u8def\u3002 \u6839\u636e\u670d\u52a1\u8bed\u8a00\uff0c\u9700\u8981\u6dfb\u52a0\u4e0a\u4e0d\u540c\u7684 pod annotations\u3002\u6bcf\u4e2a\u670d\u52a1\u53ef\u6dfb\u52a0\u4e24\u7c7b\u6ce8\u89e3\u4e4b\u4e00\uff1a

          • \u53ea\u6ce8\u5165\u73af\u5883\u53d8\u91cf\u6ce8\u89e3

            \u8fd9\u7c7b\u6ce8\u89e3\u53ea\u6709\u4e00\u4e2a\uff0c\u7528\u4e8e\u6dfb\u52a0 otel \u76f8\u5173\u7684\u73af\u5883\u53d8\u91cf\uff0c\u6bd4\u5982\u94fe\u8def\u4e0a\u62a5\u5730\u5740\u3001\u5bb9\u5668\u6240\u5728\u7684\u96c6\u7fa4 id\u3001\u547d\u540d\u7a7a\u95f4\u7b49\uff08\u8fd9\u4e2a\u6ce8\u89e3\u5728\u5e94\u7528\u4e0d\u652f\u6301\u81ea\u52a8\u63a2\u9488\u8bed\u8a00\u65f6\u5341\u5206\u6709\u7528\uff09

            instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

            \u5176\u4e2d value \u88ab / \u5206\u6210\u4e24\u90e8\u5206\uff0c\u7b2c\u4e00\u4e2a\u503c (insight-system) \u662f\u4e0a\u4e00\u6b65\u5b89\u88c5\u7684 CR \u7684\u547d\u540d\u7a7a\u95f4\uff0c \u7b2c\u4e8c\u4e2a\u503c (insight-opentelemetry-autoinstrumentation) \u662f\u8fd9\u4e2a CR \u7684\u540d\u5b57\u3002

          • \u81ea\u52a8\u63a2\u9488\u6ce8\u5165\u4ee5\u53ca\u73af\u5883\u53d8\u91cf\u6ce8\u5165\u6ce8\u89e3

            \u8fd9\u7c7b\u6ce8\u89e3\u76ee\u524d\u6709 4 \u4e2a\uff0c\u5206\u522b\u5bf9\u5e94 4 \u79cd\u4e0d\u540c\u7684\u7f16\u7a0b\u8bed\u8a00\uff1ajava\u3001nodejs\u3001python\u3001dotnet\uff0c \u4f7f\u7528\u5b83\u540e\u5c31\u4f1a\u5bf9 spec.pod \u4e0b\u7684\u7b2c\u4e00\u4e2a\u5bb9\u5668\u6ce8\u5165\u81ea\u52a8\u63a2\u9488\u4ee5\u53ca otel \u9ed8\u8ba4\u73af\u5883\u53d8\u91cf\uff1a

            Java \u5e94\u7528NodeJs \u5e94\u7528Python \u5e94\u7528Dotnet \u5e94\u7528Golang \u5e94\u7528
            instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
            instrumentation.opentelemetry.io/inject-nodejs: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
            instrumentation.opentelemetry.io/inject-python: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
            instrumentation.opentelemetry.io/inject-dotnet: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

            \u7531\u4e8e Go \u81ea\u52a8\u68c0\u6d4b\u9700\u8981\u8bbe\u7f6e OTEL_GO_AUTO_TARGET_EXE\uff0c \u56e0\u6b64\u60a8\u5fc5\u987b\u901a\u8fc7\u6ce8\u89e3\u6216 Instrumentation \u8d44\u6e90\u63d0\u4f9b\u6709\u6548\u7684\u53ef\u6267\u884c\u8def\u5f84\u3002\u672a\u8bbe\u7f6e\u6b64\u503c\u4f1a\u5bfc\u81f4 Go \u81ea\u52a8\u68c0\u6d4b\u6ce8\u5165\u4e2d\u6b62\uff0c\u4ece\u800c\u5bfc\u81f4\u63a5\u5165\u94fe\u8def\u5931\u8d25\u3002

            instrumentation.opentelemetry.io/inject-go: \"insight-system/insight-opentelemetry-autoinstrumentation\"\ninstrumentation.opentelemetry.io/otel-go-auto-target-exe: \"/path/to/container/executable\"\n

            Go \u81ea\u52a8\u68c0\u6d4b\u4e5f\u9700\u8981\u63d0\u5347\u6743\u9650\u3002\u4ee5\u4e0b\u6743\u9650\u662f\u81ea\u52a8\u8bbe\u7f6e\u7684\u5e76\u4e14\u662f\u5fc5\u9700\u7684\u3002

            securityContext:\n  privileged: true\n  runAsUser: 0\n

          Tip

          OpenTelemetry Operator \u5728\u6ce8\u5165\u63a2\u9488\u65f6\u4f1a\u81ea\u52a8\u6dfb\u52a0\u4e00\u4e9b OTel \u76f8\u5173\u73af\u5883\u53d8\u91cf\uff0c\u540c\u65f6\u4e5f\u652f\u6301\u8fd9\u4e9b\u73af\u5883\u53d8\u91cf\u7684\u8986\u76d6\u3002\u8fd9\u4e9b\u73af\u5883\u53d8\u91cf\u7684\u8986\u76d6\u4f18\u5148\u7ea7\uff1a

          original container env vars -> language specific env vars -> common env vars -> instrument spec configs' vars\n

          \u4f46\u662f\u9700\u8981\u907f\u514d\u624b\u52a8\u8986\u76d6 OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\uff0c\u5b83\u5728 Operator \u5185\u90e8\u4f5c\u4e3a\u4e00\u4e2a Pod \u662f\u5426\u5df2\u7ecf\u6ce8\u5165\u63a2\u9488\u7684\u6807\u8bc6\uff0c\u5982\u679c\u624b\u52a8\u6dfb\u52a0\u4e86\uff0c\u63a2\u9488\u53ef\u80fd\u65e0\u6cd5\u6ce8\u5165\u3002

          "},{"location":"admin/insight/quickstart/otel/operator.html#demo","title":"\u81ea\u52a8\u6ce8\u5165\u793a\u4f8b Demo","text":"

          \u6ce8\u610f\u8fd9\u4e2a annotations \u662f\u52a0\u5728 spec.annotations \u4e0b\u7684\u3002

          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-app\n  labels:\n    app: my-app\nspec:\n  selector:\n    matchLabels:\n      app: my-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-app\n      annotations:\n        instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n    spec:\n      containers:\n      - name: myapp\n        image: jaegertracing/vertx-create-span:operator-e2e-tests\n        ports:\n          - containerPort: 8080\n            protocol: TCP\n

          \u6700\u7ec8\u751f\u6210\u7684 YAML \u5185\u5bb9\u5982\u4e0b\uff1a

          apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-deployment-with-sidecar-565bd877dd-nqkk6\n  generateName: my-deployment-with-sidecar-565bd877dd-\n  namespace: default\n  uid: aa89ca0d-620c-4d20-8bc1-37d67bad4ea4\n  resourceVersion: '2668986'\n  creationTimestamp: '2022-04-08T05:58:48Z'\n  labels:\n    app: my-pod-with-sidecar\n    pod-template-hash: 565bd877dd\n  annotations:\n    cni.projectcalico.org/containerID: 234eae5e55ea53db2a4bc2c0384b9a1021ed3908f82a675e4a92a49a7e80dd61\n    cni.projectcalico.org/podIP: 192.168.134.133/32\n    cni.projectcalico.org/podIPs: 192.168.134.133/32\n    instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\nspec:\n  volumes:\n    - name: kube-api-access-sp2mz\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n    - name: opentelemetry-auto-instrumentation\n      emptyDir: {}\n  initContainers:\n    - name: opentelemetry-auto-instrumentation\n      image: >-\n        ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java\n      command:\n        - cp\n        - /javaagent.jar\n        - /otel-auto-instrumentation/javaagent.jar\n      resources: {}\n      volumeMounts:\n        - name: opentelemetry-auto-instrumentation\n          mountPath: /otel-auto-instrumentation\n        - name: kube-api-access-sp2mz\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  containers:\n    - name: myapp\n      image: ghcr.io/pavolloffay/spring-petclinic:latest\n      env:\n        - name: OTEL_JAVAAGENT_DEBUG\n          value: 'true'\n        - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n          value: 'true'\n        - name: SPLUNK_PROFILER_ENABLED\n          value: 'false'\n        - name: JAVA_TOOL_OPTIONS\n          value: ' -javaagent:/otel-auto-instrumentation/javaagent.jar'\n        - name: OTEL_TRACES_EXPORTER\n          value: otlp\n        - name: OTEL_EXPORTER_OTLP_ENDPOINT\n          value: http://insight-agent-opentelemetry-collector.svc.cluster.local:4317\n        - name: OTEL_EXPORTER_OTLP_TIMEOUT\n          value: '20'\n        - name: OTEL_TRACES_SAMPLER\n          value: parentbased_traceidratio\n        - name: OTEL_TRACES_SAMPLER_ARG\n          value: '0.85'\n        - name: SPLUNK_TRACE_RESPONSE_HEADER_ENABLED\n          value: 'true'\n        - name: OTEL_SERVICE_NAME\n          value: my-deployment-with-sidecar\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.name\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_UID\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.uid\n        - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: spec.nodeName\n        - name: OTEL_RESOURCE_ATTRIBUTES\n          value: >-\n            k8s.container.name=myapp,k8s.deployment.name=my-deployment-with-sidecar,k8s.deployment.uid=8de6929d-dda0-436c-bca1-604e9ca7ea4e,k8s.namespace.name=default,k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME),k8s.pod.uid=$(OTEL_RESOURCE_ATTRIBUTES_POD_UID),k8s.replicaset.name=my-deployment-with-sidecar-565bd877dd,k8s.replicaset.uid=190d5f6e-ba7f-4794-b2e6-390b5879a6c4\n        - name: OTEL_PROPAGATORS\n          value: jaeger,b3\n      resources: {}\n      volumeMounts:\n        - name: kube-api-access-sp2mz\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n        - name: opentelemetry-auto-instrumentation\n          mountPath: /otel-auto-instrumentation\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  restartPolicy: Always\n  terminationGracePeriodSeconds: 30\n  dnsPolicy: ClusterFirst\n  serviceAccountName: default\n  serviceAccount: default\n  nodeName: k8s-master3\n  securityContext:\n    runAsUser: 1000\n    runAsGroup: 3000\n    fsGroup: 2000\n  schedulerName: default-scheduler\n  tolerations:\n    - key: node.kubernetes.io/not-ready\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n    - key: node.kubernetes.io/unreachable\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n  priority: 0\n  enableServiceLinks: true\n  preemptionPolicy: PreemptLowerPriority\n
          "},{"location":"admin/insight/quickstart/otel/operator.html#_5","title":"\u94fe\u8def\u67e5\u8be2","text":"

          \u5982\u4f55\u67e5\u8be2\u5df2\u7ecf\u63a5\u5165\u7684\u670d\u52a1\uff0c\u53c2\u8003\u94fe\u8def\u67e5\u8be2\u3002

          "},{"location":"admin/insight/quickstart/otel/otel.html","title":"\u4f7f\u7528 OTel \u8d4b\u4e88\u5e94\u7528\u53ef\u89c2\u6d4b\u6027","text":"

          \u589e\u5f3a\u662f\u4f7f\u5e94\u7528\u7a0b\u5e8f\u4ee3\u7801\u80fd\u591f\u751f\u6210\u9065\u6d4b\u6570\u636e\u7684\u8fc7\u7a0b\u3002\u5373\u4e00\u4e9b\u53ef\u4ee5\u5e2e\u52a9\u60a8\u76d1\u89c6\u6216\u6d4b\u91cf\u5e94\u7528\u7a0b\u5e8f\u7684\u6027\u80fd\u548c\u72b6\u6001\u7684\u4e1c\u897f\u3002

          OpenTelemetry \u662f\u9886\u5148\u7684\u5f00\u6e90\u9879\u76ee\uff0c\u4e3a\u4e3b\u8981\u7f16\u7a0b\u8bed\u8a00\u548c\u6d41\u884c\u6846\u67b6\u63d0\u4f9b\u68c0\u6d4b\u5e93\u3002\u5b83\u662f\u4e91\u539f\u751f\u8ba1\u7b97\u57fa\u91d1\u4f1a\u4e0b\u7684\u4e00\u4e2a\u9879\u76ee\uff0c\u5f97\u5230\u4e86\u793e\u533a\u5e9e\u5927\u8d44\u6e90\u7684\u652f\u6301\u3002 \u5b83\u4e3a\u91c7\u96c6\u7684\u6570\u636e\u63d0\u4f9b\u6807\u51c6\u5316\u7684\u6570\u636e\u683c\u5f0f\uff0c\u65e0\u9700\u96c6\u6210\u7279\u5b9a\u7684\u4f9b\u5e94\u5546\u3002

          Insight \u652f\u6301\u7528\u4e8e\u68c0\u6d4b\u5e94\u7528\u7a0b\u5e8f\u7684 OpenTelemetry \u6765\u589e\u5f3a\u60a8\u7684\u5e94\u7528\u7a0b\u5e8f\u3002

          \u672c\u6307\u5357\u4ecb\u7ecd\u4e86\u4f7f\u7528 OpenTelemetry \u8fdb\u884c\u9065\u6d4b\u589e\u5f3a\u7684\u57fa\u672c\u6982\u5ff5\u3002 OpenTelemetry \u8fd8\u6709\u4e00\u4e2a\u7531\u5e93\u3001\u63d2\u4ef6\u3001\u96c6\u6210\u548c\u5176\u4ed6\u6709\u7528\u5de5\u5177\u7ec4\u6210\u7684\u751f\u6001\u7cfb\u7edf\u6765\u6269\u5c55\u5b83\u3002 \u60a8\u53ef\u4ee5\u5728 Otel Registry \u4e2d\u627e\u5230\u8fd9\u4e9b\u8d44\u6e90\u3002

          \u60a8\u53ef\u4ee5\u4f7f\u7528\u4efb\u4f55\u5f00\u653e\u6807\u51c6\u5e93\u8fdb\u884c\u9065\u6d4b\u589e\u5f3a\uff0c\u5e76\u4f7f\u7528 Insight \u4f5c\u4e3a\u53ef\u89c2\u5bdf\u6027\u540e\u7aef\u6765\u6444\u53d6\u3001\u5206\u6790\u548c\u53ef\u89c6\u5316\u6570\u636e\u3002

          \u4e3a\u4e86\u589e\u5f3a\u60a8\u7684\u4ee3\u7801\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528 OpenTelemetry \u4e3a\u7279\u5b9a\u8bed\u8a00\u63d0\u4f9b\u7684\u589e\u5f3a\u64cd\u4f5c\uff1a

          Insight \u76ee\u524d\u63d0\u4f9b\u4e86\u4f7f\u7528 OpenTelemetry \u589e\u5f3a .Net NodeJS\u3001Java\u3001Python \u548c Golang \u5e94\u7528\u7a0b\u5e8f\u7684\u7b80\u5355\u65b9\u6cd5\u3002\u8bf7\u9075\u5faa\u4ee5\u4e0b\u6307\u5357\u3002

          "},{"location":"admin/insight/quickstart/otel/otel.html#_1","title":"\u94fe\u8def\u589e\u5f3a","text":"
          • \u94fe\u8def\u63a5\u5165\u7684\u6700\u4f73\u5b9e\u8df5\uff1a\u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a
          • \u4ee5 Go \u8bed\u8a00\u4e3a\u4f8b\u7684\u624b\u52a8\u57cb\u70b9\u63a5\u5165\uff1a\u4f7f\u7528 OpenTelemetry SDK \u589e\u5f3a Go \u5e94\u7528\u7a0b\u5e8f
          • \u5229\u7528 ebpf \u5b9e\u73b0 Go \u8bed\u8a00\u65e0\u4fb5\u5165\u63a2\u9488\uff08\u5b9e\u9a8c\u6027\u529f\u80fd\uff09
          "},{"location":"admin/insight/quickstart/otel/send_tracing_to_insight.html","title":"\u5411 Insight \u53d1\u9001\u94fe\u8def\u6570\u636e","text":"

          \u6b64\u6587\u6863\u4e3b\u8981\u63cf\u8ff0\u5ba2\u6237\u5e94\u7528\u5982\u4f55\u81ea\u884c\u5c06\u94fe\u8def\u6570\u636e\u4e0a\u62a5\u7ed9 Insight\u3002\u4e3b\u8981\u5305\u542b\u5982\u4e0b\u4e24\u79cd\u573a\u666f\uff1a

          1. \u5ba2\u6237\u5e94\u7528\u901a\u8fc7 OTEL Agent/SDK \u4e0a\u62a5\u94fe\u8def\u7ed9 Insight
          2. \u901a\u8fc7 Opentelemtry Collector(\u7b80\u79f0 OTEL COL) \u5c06\u94fe\u8def\u8f6c\u53d1\u7ed9 Insight

          \u5728\u6bcf\u4e2a\u5df2\u5b89\u88c5 Insight Agent \u7684\u96c6\u7fa4\u4e2d\u90fd\u6709 insight-agent-otel-col \u7ec4\u4ef6\u7528\u4e8e\u7edf\u4e00\u63a5\u6536\u8be5\u96c6\u7fa4\u7684\u94fe\u8def\u6570\u636e\u3002 \u56e0\u6b64\uff0c\u8be5\u7ec4\u4ef6\u4f5c\u4e3a\u7528\u6237\u63a5\u5165\u4fa7\u7684\u5165\u53e3\uff0c\u9700\u8981\u5148\u83b7\u53d6\u8be5\u5730\u5740\u3002\u53ef\u4ee5\u901a\u8fc7 AI \u7b97\u529b\u4e2d\u5fc3 \u754c\u9762\u83b7\u53d6\u8be5\u96c6\u7fa4 Opentelemtry Collector \u7684\u5730\u5740\uff0c \u6bd4\u5982 insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 \uff1a

          \u9664\u6b64\u4e4b\u5916\uff0c\u9488\u5bf9\u4e0d\u540c\u4e0a\u62a5\u65b9\u5f0f\uff0c\u6709\u4e00\u4e9b\u7ec6\u5fae\u5dee\u522b\uff1a

          "},{"location":"admin/insight/quickstart/otel/send_tracing_to_insight.html#otel-agentsdk-insight-agent-opentelemtry-collector","title":"\u5ba2\u6237\u5e94\u7528\u901a\u8fc7 OTel Agent/SDK \u4e0a\u62a5\u94fe\u8def\u7ed9 Insight Agent Opentelemtry Collector","text":"

          \u4e3a\u4e86\u80fd\u591f\u5c06\u94fe\u8def\u6570\u636e\u6b63\u5e38\u4e0a\u62a5\u81f3 Insight \u5e76\u80fd\u591f\u5728 Insight \u6b63\u5e38\u5c55\u793a\uff0c\u9700\u8981\u5e76\u5efa\u8bae\u901a\u8fc7\u5982\u4e0b\u73af\u5883\u53d8\u91cf\u63d0\u4f9b OTLP \u6240\u9700\u7684\u5143\u6570\u636e (Resource Attribute)\uff0c\u6709\u4e24\u79cd\u65b9\u5f0f\u53ef\u5b9e\u73b0\uff1a

          • \u5728\u90e8\u7f72\u6587\u4ef6 YAML \u4e2d\u624b\u52a8\u6dfb\u52a0\uff0c\u4f8b\u5982\uff1a

            ...\n- name: OTEL_EXPORTER_OTLP_ENDPOINT\n  value: \"http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\"\n- name: \"OTEL_SERVICE_NAME\"\n  value: my-java-app-name\n- name: \"OTEL_K8S_NAMESPACE\"\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: metadata.namespace\n- name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: spec.nodeName\n- name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: metadata.name\n- name: OTEL_RESOURCE_ATTRIBUTES\n  value: \"k8s.namespace.name=$(OTEL_K8S_NAMESPACE),k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME)\"\n
          • \u5229\u7528 Insight Agent \u81ea\u52a8\u6ce8\u5165\u5982\u4e0a\u5143\u6570\u636e (Resource Attribute) \u80fd\u529b

            \u786e\u4fdd Insight Agent \u6b63\u5e38\u5de5\u4f5c\u5e76 \u5b89\u88c5 Instrumentation CR \u4e4b\u540e\uff0c \u53ea\u9700\u8981\u4e3a Pod \u6dfb\u52a0\u5982\u4e0b Annotation \u5373\u53ef\uff1a

            instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

            \u4e3e\u4f8b\uff1a

            apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-with-aotu-instrumentation\nspec:\n  selector:\n    matchLabels:\n      app.kubernetes.io/name: my-deployment-with-aotu-instrumentation-kuberntes\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: my-deployment-with-aotu-instrumentation-kuberntes\n      annotations:\n        sidecar.opentelemetry.io/inject: \"false\"\n        instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
          "},{"location":"admin/insight/quickstart/otel/send_tracing_to_insight.html#opentelemtry-collector-insight-agent-opentelemtry-collector","title":"\u901a\u8fc7 Opentelemtry Collector \u5c06\u94fe\u8def\u8f6c\u53d1\u7ed9 Insight Agent Opentelemtry Collector","text":"

          \u5728\u4fdd\u8bc1\u5e94\u7528\u6dfb\u52a0\u4e86\u5982\u4e0a\u5143\u6570\u636e\u4e4b\u540e\uff0c\u53ea\u9700\u5728\u5ba2\u6237 Opentelemtry Collector \u91cc\u9762\u65b0\u589e\u4e00\u4e2a OTLP Exporter \u5c06\u94fe\u8def\u6570\u636e\u8f6c\u53d1\u7ed9 Insight Agent Opentelemtry Collector \u5373\u53ef\uff0c\u5982\u4e0b Opentelemtry Collector \u914d\u7f6e\u6587\u4ef6\u6240\u793a\uff1a

          ...\nexporters:\n  otlp/insight:\n    endpoint: insight-opentelemetry-collector.insight-system.svc.cluster.local:4317\nservice:\n...\npipelines:\n...\ntraces:\n  exporters:\n    - otlp/insight\n
          "},{"location":"admin/insight/quickstart/otel/send_tracing_to_insight.html#_1","title":"\u53c2\u8003","text":"
          • \u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a
          • \u4f7f\u7528 OTel \u8d4b\u4e88\u5e94\u7528\u53ef\u89c2\u6d4b\u6027
          "},{"location":"admin/insight/quickstart/otel/golang/golang.html","title":"\u4f7f\u7528 OTel SDK \u589e\u5f3a Go \u5e94\u7528\u7a0b\u5e8f","text":"

          Golang \u65e0\u4fb5\u5165\u5f0f\u63a5\u5165\u94fe\u8def\u8bf7\u53c2\u8003 \u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a \u6587\u6863\uff0c\u901a\u8fc7\u6ce8\u89e3\u5b9e\u73b0\u81ea\u52a8\u63a5\u5165\u94fe\u8def\u3002

          OpenTelemetry \u4e5f\u7b80\u79f0\u4e3a OTel\uff0c\u662f\u4e00\u4e2a\u5f00\u6e90\u7684\u53ef\u89c2\u6d4b\u6027\u6846\u67b6\uff0c\u53ef\u4ee5\u5e2e\u52a9\u5728 Go \u5e94\u7528\u7a0b\u5e8f\u4e2d\u751f\u6210\u548c\u6536\u96c6\u9065\u6d4b\u6570\u636e\uff1a\u94fe\u8def\u3001\u6307\u6807\u548c\u65e5\u5fd7\u3002

          \u672c\u6587\u4e3b\u8981\u8bb2\u89e3\u5982\u4f55\u5728 Go \u5e94\u7528\u7a0b\u5e8f\u4e2d\u901a\u8fc7 OpenTelemetry Go SDK \u589e\u5f3a\u5e76\u63a5\u5165\u94fe\u8def\u76d1\u63a7\u3002

          "},{"location":"admin/insight/quickstart/otel/golang/golang.html#otel-sdk-go_1","title":"\u4f7f\u7528 OTel SDK \u589e\u5f3a Go \u5e94\u7528","text":""},{"location":"admin/insight/quickstart/otel/golang/golang.html#_1","title":"\u5b89\u88c5\u76f8\u5173\u4f9d\u8d56","text":"

          \u5fc5\u987b\u5148\u5b89\u88c5\u4e0e OpenTelemetry exporter \u548c SDK \u76f8\u5173\u7684\u4f9d\u8d56\u9879\u3002\u5982\u679c\u60a8\u6b63\u5728\u4f7f\u7528\u5176\u4ed6\u8bf7\u6c42\u8def\u7531\u5668\uff0c\u8bf7\u53c2\u8003\u8bf7\u6c42\u8def\u7531\u3002 \u5207\u6362/\u8fdb\u5165\u5230\u5e94\u7528\u7a0b\u5e8f\u6e90\u6587\u4ef6\u5939\u540e\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

          go get go.opentelemetry.io/otel@v1.19.0 \\\n  go.opentelemetry.io/otel/trace@v1.19.0 \\\n  go.opentelemetry.io/otel/sdk@v1.19.0 \\\n  go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin@v0.46.1 \\\n  go.opentelemetry.io/otel/exporters/otlp/otlptrace@v1.19.0 \\\n  go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc@v1.19.0\n
          "},{"location":"admin/insight/quickstart/otel/golang/golang.html#otel-sdk","title":"\u4f7f\u7528 OTel SDK \u521b\u5efa\u521d\u59cb\u5316\u51fd\u6570","text":"

          \u4e3a\u4e86\u8ba9\u5e94\u7528\u7a0b\u5e8f\u80fd\u591f\u53d1\u9001\u6570\u636e\uff0c\u9700\u8981\u4e00\u4e2a\u51fd\u6570\u6765\u521d\u59cb\u5316 OpenTelemetry\u3002\u5728 main.go \u6587\u4ef6\u4e2d\u6dfb\u52a0\u4ee5\u4e0b\u4ee3\u7801\u7247\u6bb5:

          import (\n    \"context\"\n    \"os\"\n    \"time\"\n\n    \"go.opentelemetry.io/otel\"\n    \"go.opentelemetry.io/otel/exporters/otlp/otlptrace\"\n    \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc\"\n    \"go.opentelemetry.io/otel/propagation\"\n    \"go.opentelemetry.io/otel/sdk/resource\"\n    sdktrace \"go.opentelemetry.io/otel/sdk/trace\"\n    semconv \"go.opentelemetry.io/otel/semconv/v1.7.0\"\n    \"go.uber.org/zap\"\n    \"google.golang.org/grpc\"\n)\n\nvar tracerExp *otlptrace.Exporter\n\nfunc retryInitTracer() func() {\n    var shutdown func()\n    go func() {\n        for {\n            // otel will reconnected and re-send spans when otel col recover. so, we don't need to re-init tracer exporter.\n            if tracerExp == nil {\n                shutdown = initTracer()\n            } else {\n                break\n            }\n            time.Sleep(time.Minute * 5)\n        }\n    }()\n    return shutdown\n}\n\nfunc initTracer() func() {\n    // temporarily set timeout to 10s\n    ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n    defer cancel()\n\n    serviceName, ok := os.LookupEnv(\"OTEL_SERVICE_NAME\")\n    if !ok {\n        serviceName = \"server_name\"\n        os.Setenv(\"OTEL_SERVICE_NAME\", serviceName)\n    }\n    otelAgentAddr, ok := os.LookupEnv(\"OTEL_EXPORTER_OTLP_ENDPOINT\")\n    if !ok {\n        otelAgentAddr = \"http://localhost:4317\"\n        os.Setenv(\"OTEL_EXPORTER_OTLP_ENDPOINT\", otelAgentAddr)\n    }\n    zap.S().Infof(\"OTLP Trace connect to: %s with service name: %s\", otelAgentAddr, serviceName)\n\n    traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithInsecure(), otlptracegrpc.WithDialOption(grpc.WithBlock()))\n    if err != nil {\n        handleErr(err, \"OTLP Trace gRPC Creation\")\n        return nil\n    }\n\n    tracerProvider := sdktrace.NewTracerProvider(\n        sdktrace.WithBatcher(traceExporter),\n        sdktrace.WithSampler(sdktrace.AlwaysSample()),\n    sdktrace.WithResource(resource.NewWithAttributes(semconv.SchemaURL)))\n\n    otel.SetTracerProvider(tracerProvider)\n    otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))\n\n    tracerExp = traceExporter\n    return func() {\n        // Shutdown will flush any remaining spans and shut down the exporter.\n        handleErr(tracerProvider.Shutdown(ctx), \"failed to shutdown TracerProvider\")\n    }\n}\n\nfunc handleErr(err error, message string) {\n    if err != nil {\n        zap.S().Errorf(\"%s: %v\", message, err)\n    }\n}\n
          "},{"location":"admin/insight/quickstart/otel/golang/golang.html#maingo","title":"\u5728 main.go \u4e2d\u521d\u59cb\u5316\u8ddf\u8e2a\u5668","text":"

          \u4fee\u6539 main \u51fd\u6570\u4ee5\u5728 main.go \u4e2d\u521d\u59cb\u5316\u8ddf\u8e2a\u5668\u3002\u53e6\u5916\u5f53\u60a8\u7684\u670d\u52a1\u5173\u95ed\u65f6\uff0c\u5e94\u8be5\u8c03\u7528 TracerProvider.Shutdown() \u786e\u4fdd\u5bfc\u51fa\u6240\u6709 Span\u3002\u8be5\u670d\u52a1\u5c06\u8be5\u8c03\u7528\u4f5c\u4e3a\u4e3b\u51fd\u6570\u4e2d\u7684\u5ef6\u8fdf\u51fd\u6570\uff1a

          func main() {\n    // start otel tracing\n    if shutdown := retryInitTracer(); shutdown != nil {\n            defer shutdown()\n        }\n    ......\n}\n
          "},{"location":"admin/insight/quickstart/otel/golang/golang.html#otel-gin","title":"\u4e3a\u5e94\u7528\u6dfb\u52a0 OTel Gin \u4e2d\u95f4\u4ef6","text":"

          \u901a\u8fc7\u5728 main.go \u4e2d\u6dfb\u52a0\u4ee5\u4e0b\u884c\u6765\u914d\u7f6e Gin \u4ee5\u4f7f\u7528\u4e2d\u95f4\u4ef6:

          import (\n    ....\n  \"go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin\"\n)\n\nfunc main() {\n    ......\n    r := gin.Default()\n    r.Use(otelgin.Middleware(\"my-app\"))\n    ......\n}\n
          "},{"location":"admin/insight/quickstart/otel/golang/golang.html#_2","title":"\u8fd0\u884c\u5e94\u7528\u7a0b\u5e8f","text":"
          • \u672c\u5730\u8c03\u8bd5\u8fd0\u884c

            \u6ce8\u610f: \u6b64\u6b65\u9aa4\u4ec5\u7528\u4e8e\u672c\u5730\u5f00\u53d1\u8c03\u8bd5\uff0c\u751f\u4ea7\u73af\u5883\u4e2d Operator \u4f1a\u81ea\u52a8\u5b8c\u6210\u4ee5\u4e0b\u73af\u5883\u53d8\u91cf\u7684\u6ce8\u5165\u3002

            \u4ee5\u4e0a\u6b65\u9aa4\u5df2\u7ecf\u5b8c\u6210\u4e86\u521d\u59cb\u5316 SDK \u7684\u5de5\u4f5c\uff0c\u73b0\u5728\u5982\u679c\u9700\u8981\u5728\u672c\u5730\u5f00\u53d1\u8fdb\u884c\u8c03\u8bd5\uff0c\u9700\u8981\u63d0\u524d\u83b7\u53d6\u5230 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b insight-agent-opentelemerty-collector \u7684\u5730\u5740\uff0c\u5047\u8bbe\u4e3a\uff1a insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 \u3002

            \u56e0\u6b64\uff0c\u53ef\u4ee5\u5728\u4f60\u672c\u5730\u542f\u52a8\u5e94\u7528\u7a0b\u5e8f\u7684\u65f6\u5019\u6dfb\u52a0\u5982\u4e0b\u73af\u5883\u53d8\u91cf\uff1a

            OTEL_SERVICE_NAME=my-golang-app OTEL_EXPORTER_OTLP_ENDPOINT=http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 go run main.go...\n
          • \u751f\u4ea7\u73af\u5883\u8fd0\u884c

            \u8bf7\u53c2\u8003\u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a \u4e2d \u53ea\u6ce8\u5165\u73af\u5883\u53d8\u91cf\u6ce8\u89e3 \u76f8\u5173\u4ecb\u7ecd\uff0c\u4e3a deployment yaml \u6dfb\u52a0\u6ce8\u89e3\uff1a

            instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

            \u5982\u679c\u65e0\u6cd5\u4f7f\u7528\u6ce8\u89e3\u7684\u65b9\u5f0f\uff0c\u60a8\u53ef\u4ee5\u624b\u52a8\u5728 deployment yaml \u6dfb\u52a0\u5982\u4e0b\u73af\u5883\u53d8\u91cf\uff1a

          \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\nenv:\n  - name: OTEL_EXPORTER_OTLP_ENDPOINT\n    value: 'http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317'\n  - name: OTEL_SERVICE_NAME\n    value: \"your depolyment name\" # (1)!\n  - name: OTEL_K8S_NAMESPACE\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: metadata.namespace\n  - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: spec.nodeName\n  - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: metadata.name\n  - name: OTEL_RESOURCE_ATTRIBUTES\n    value: 'k8s.namespace.name=$(OTEL_K8S_NAMESPACE),k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME)'\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
          1. \u4fee\u6539\u6b64\u503c
          "},{"location":"admin/insight/quickstart/otel/golang/golang.html#_3","title":"\u8bf7\u6c42\u8def\u7531","text":""},{"location":"admin/insight/quickstart/otel/golang/golang.html#opentelemetry-gingonic","title":"OpenTelemetry gin/gonic \u589e\u5f3a","text":"
          # Add one line to your import() stanza depending upon your request router:\nmiddleware \"go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin\"\n

          \u7136\u540e\u6ce8\u5165 OpenTelemetry \u4e2d\u95f4\u4ef6\uff1a

          router.Use(middleware.Middleware(\"my-app\"))\n
          "},{"location":"admin/insight/quickstart/otel/golang/golang.html#opentelemetry-gorillamux","title":"OpenTelemetry gorillamux \u589e\u5f3a","text":"
          # Add one line to your import() stanza depending upon your request router:\nmiddleware \"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux\"\n

          \u7136\u540e\u6ce8\u5165 OpenTelemetry \u4e2d\u95f4\u4ef6\uff1a

          router.Use(middleware.Middleware(\"my-app\"))\n
          "},{"location":"admin/insight/quickstart/otel/golang/golang.html#grpc","title":"gRPC \u589e\u5f3a","text":"

          \u540c\u6837\uff0cOpenTelemetry \u4e5f\u53ef\u4ee5\u5e2e\u52a9\u60a8\u81ea\u52a8\u68c0\u6d4b gRPC \u8bf7\u6c42\u3002\u8981\u68c0\u6d4b\u60a8\u62e5\u6709\u7684\u4efb\u4f55 gRPC \u670d\u52a1\u5668\uff0c\u8bf7\u5c06\u62e6\u622a\u5668\u6dfb\u52a0\u5230\u670d\u52a1\u5668\u7684\u5b9e\u4f8b\u5316\u4e2d\u3002

          import (\n  grpcotel \"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc\"\n)\nfunc main() {\n  [...]\n\n    s := grpc.NewServer(\n        grpc.UnaryInterceptor(grpcotel.UnaryServerInterceptor()),\n        grpc.StreamInterceptor(grpcotel.StreamServerInterceptor()),\n    )\n}\n

          \u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u5982\u679c\u4f60\u7684\u7a0b\u5e8f\u91cc\u9762\u4f7f\u7528\u5230\u4e86 Grpc Client \u8c03\u7528\u7b2c\u4e09\u65b9\u670d\u52a1\uff0c\u4f60\u8fd8\u9700\u8981\u5bf9 Grpc Client \u6dfb\u52a0\u62e6\u622a\u5668\uff1a

              [...]\n\n    conn, err := grpc.Dial(addr, grpc.WithTransportCredentials(insecure.NewCredentials()),\n        grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()),\n        grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()),\n    )\n
          "},{"location":"admin/insight/quickstart/otel/golang/golang.html#_4","title":"\u5982\u679c\u4e0d\u4f7f\u7528\u8bf7\u6c42\u8def\u7531","text":"
          import (\n  \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\"\n)\n

          \u5728\u5c06 http.Handler \u4f20\u9012\u7ed9 ServeMux \u7684\u6bcf\u4e2a\u5730\u65b9\uff0c\u60a8\u90fd\u5c06\u5305\u88c5\u5904\u7406\u7a0b\u5e8f\u51fd\u6570\u3002\u4f8b\u5982\uff0c\u5c06\u8fdb\u884c\u4ee5\u4e0b\u66ff\u6362\uff1a

          - mux.Handle(\"/path\", h)\n+ mux.Handle(\"/path\", otelhttp.NewHandler(h, \"description of path\"))\n---\n- mux.Handle(\"/path\", http.HandlerFunc(f))\n+ mux.Handle(\"/path\", otelhttp.NewHandler(http.HandlerFunc(f), \"description of path\"))\n

          \u901a\u8fc7\u8fd9\u79cd\u65b9\u5f0f\uff0c\u60a8\u53ef\u4ee5\u786e\u4fdd\u4f7f\u7528 othttp \u5305\u88c5\u7684\u6bcf\u4e2a\u51fd\u6570\u90fd\u4f1a\u81ea\u52a8\u6536\u96c6\u5176\u5143\u6570\u636e\u5e76\u542f\u52a8\u76f8\u5e94\u7684\u8ddf\u8e2a\u3002

          "},{"location":"admin/insight/quickstart/otel/golang/golang.html#_5","title":"\u6570\u636e\u5e93\u8bbf\u95ee\u589e\u5f3a","text":""},{"location":"admin/insight/quickstart/otel/golang/golang.html#golang-gorm","title":"Golang Gorm","text":"

          OpenTelemetry \u793e\u533a\u4e5f\u5f00\u53d1\u4e86\u6570\u636e\u5e93\u8bbf\u95ee\u5e93\u7684\u4e2d\u95f4\u4ef6\uff0c\u6bd4\u5982 Gorm:

          import (\n    \"github.com/uptrace/opentelemetry-go-extra/otelgorm\"\n    \"gorm.io/driver/sqlite\"\n    \"gorm.io/gorm\"\n)\n\ndb, err := gorm.Open(sqlite.Open(\"file::memory:?cache=shared\"), &gorm.Config{})\nif err != nil {\n    panic(err)\n}\n\notelPlugin := otelgorm.NewPlugin(otelgorm.WithDBName(\"mydb\"), # \u7f3a\u5931\u4f1a\u5bfc\u81f4\u6570\u636e\u5e93\u76f8\u5173\u62d3\u6251\u5c55\u793a\u4e0d\u5b8c\u6574\n    otelgorm.WithAttributes(semconv.ServerAddress(\"memory\"))) # \u7f3a\u5931\u4f1a\u5bfc\u81f4\u6570\u636e\u5e93\u76f8\u5173\u62d3\u6251\u5c55\u793a\u4e0d\u5b8c\u6574\nif err := db.Use(otelPlugin); err != nil {\n    panic(err)\n}\n

          "},{"location":"admin/insight/quickstart/otel/golang/golang.html#span","title":"\u81ea\u5b9a\u4e49 Span","text":"

          \u5f88\u591a\u65f6\u5019\uff0cOpenTelemetry \u63d0\u4f9b\u7684\u4e2d\u95f4\u4ef6\u4e0d\u80fd\u5e2e\u52a9\u6211\u4eec\u8bb0\u5f55\u66f4\u591a\u5185\u90e8\u8c03\u7528\u7684\u51fd\u6570\uff0c\u9700\u8981\u6211\u4eec\u81ea\u5b9a\u4e49 Span \u6765\u8bb0\u5f55

           \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n    _, span := otel.Tracer(\"GetServiceDetail\").Start(ctx,\n        \"spanMetricDao.GetServiceDetail\",\n        trace.WithSpanKind(trace.SpanKindInternal))\n    defer span.End()\n  \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
          "},{"location":"admin/insight/quickstart/otel/golang/golang.html#span_1","title":"\u5411 span \u6dfb\u52a0\u81ea\u5b9a\u4e49\u5c5e\u6027\u548c\u4e8b\u4ef6","text":"

          \u4e5f\u53ef\u4ee5\u5c06\u81ea\u5b9a\u4e49\u5c5e\u6027\u6216\u6807\u7b7e\u8bbe\u7f6e\u4e3a Span\u3002\u8981\u6dfb\u52a0\u81ea\u5b9a\u4e49\u5c5e\u6027\u548c\u4e8b\u4ef6\uff0c\u8bf7\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u64cd\u4f5c\uff1a

          "},{"location":"admin/insight/quickstart/otel/golang/golang.html#_6","title":"\u5bfc\u5165\u8ddf\u8e2a\u548c\u5c5e\u6027\u5e93","text":"
          import (\n    ...\n    \"go.opentelemetry.io/otel/attribute\"\n    \"go.opentelemetry.io/otel/trace\"\n)\n
          "},{"location":"admin/insight/quickstart/otel/golang/golang.html#span_2","title":"\u4ece\u4e0a\u4e0b\u6587\u4e2d\u83b7\u53d6\u5f53\u524d Span","text":"
          span := trace.SpanFromContext(c.Request.Context())\n
          "},{"location":"admin/insight/quickstart/otel/golang/golang.html#span_3","title":"\u5728\u5f53\u524d Span \u4e2d\u8bbe\u7f6e\u5c5e\u6027","text":"
          span.SetAttributes(attribute.String(\"controller\", \"books\"))\n
          "},{"location":"admin/insight/quickstart/otel/golang/golang.html#span-event","title":"\u4e3a\u5f53\u524d Span \u6dfb\u52a0 Event","text":"

          \u6dfb\u52a0 span \u4e8b\u4ef6\u662f\u4f7f\u7528 span \u5bf9\u8c61\u4e0a\u7684 AddEvent \u5b8c\u6210\u7684\u3002

          span.AddEvent(msg)\n
          "},{"location":"admin/insight/quickstart/otel/golang/golang.html#_7","title":"\u8bb0\u5f55\u9519\u8bef\u548c\u5f02\u5e38","text":"
          import \"go.opentelemetry.io/otel/codes\"\n\n// \u83b7\u53d6\u5f53\u524d span\nspan := trace.SpanFromContext(ctx)\n\n// RecordError \u4f1a\u81ea\u52a8\u5c06\u4e00\u4e2a\u9519\u8bef\u8f6c\u6362\u6210 span even\nspan.RecordError(err)\n\n// \u6807\u8bb0\u8fd9\u4e2a span \u9519\u8bef\nspan.SetStatus(codes.Error, \"internal error\")\n
          "},{"location":"admin/insight/quickstart/otel/golang/golang.html#_8","title":"\u53c2\u8003","text":"

          \u6709\u5173 Demo \u6f14\u793a\u8bf7\u53c2\u8003\uff1a - opentelemetry-demo/productcatalogservice - opentelemetry-collector-contrib/demo

          "},{"location":"admin/insight/quickstart/otel/golang/meter.html","title":"\u4f7f\u7528 OTel SDK \u4e3a\u5e94\u7528\u7a0b\u5e8f\u66b4\u9732\u6307\u6807","text":"

          \u672c\u6587\u4ec5\u4f9b\u5e0c\u671b\u8bc4\u4f30\u6216\u63a2\u7d22\u6b63\u5728\u5f00\u53d1\u7684 OTLP \u6307\u6807\u7684\u7528\u6237\u53c2\u8003\u3002

          OpenTelemetry \u9879\u76ee\u8981\u6c42\u4ee5\u5fc5\u987b\u5728 OpenTelemetry \u534f\u8bae (OTLP) \u4e2d\u53d1\u51fa\u6570\u636e\u7684\u8bed\u8a00\u63d0\u4f9b API \u548c SDK\u3002

          "},{"location":"admin/insight/quickstart/otel/golang/meter.html#golang","title":"\u9488\u5bf9 Golang \u5e94\u7528\u7a0b\u5e8f","text":"

          Golang \u53ef\u4ee5\u901a\u8fc7 sdk \u66b4\u9732 runtime \u6307\u6807\uff0c\u5177\u4f53\u6765\u8bf4\uff0c\u5728\u5e94\u7528\u4e2d\u6dfb\u52a0\u4ee5\u4e0b\u65b9\u6cd5\u5f00\u542f metrics \u66b4\u9732\u5668\uff1a

          "},{"location":"admin/insight/quickstart/otel/golang/meter.html#_1","title":"\u5b89\u88c5\u76f8\u5173\u4f9d\u8d56","text":"

          \u5207\u6362/\u8fdb\u5165\u5230\u5e94\u7528\u7a0b\u5e8f\u6e90\u6587\u4ef6\u5939\u540e\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

          go get go.opentelemetry.io/otel \\\n  go.opentelemetry.io/otel/attribute \\\n  go.opentelemetry.io/otel/exporters/prometheus \\\n  go.opentelemetry.io/otel/metric/global \\\n  go.opentelemetry.io/otel/metric/instrument \\\n  go.opentelemetry.io/otel/sdk/metric\n
          "},{"location":"admin/insight/quickstart/otel/golang/meter.html#otel-sdk_1","title":"\u4f7f\u7528 OTel SDK \u521b\u5efa\u521d\u59cb\u5316\u51fd\u6570","text":"
          import (\n    .....\n\n    \"go.opentelemetry.io/otel/attribute\"\n    otelPrometheus \"go.opentelemetry.io/otel/exporters/prometheus\"\n    \"go.opentelemetry.io/otel/metric/global\"\n    \"go.opentelemetry.io/otel/metric/instrument\"\n    \"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram\"\n    controller \"go.opentelemetry.io/otel/sdk/metric/controller/basic\"\n    \"go.opentelemetry.io/otel/sdk/metric/export/aggregation\"\n    processor \"go.opentelemetry.io/otel/sdk/metric/processor/basic\"\n    selector \"go.opentelemetry.io/otel/sdk/metric/selector/simple\"\n)\nfunc (s *insightServer) initMeter() *otelPrometheus.Exporter {\n    s.meter = global.Meter(\"xxx\")\n\n    config := otelPrometheus.Config{\n        DefaultHistogramBoundaries: []float64{1, 2, 5, 10, 20, 50},\n        Gatherer:                   prometheus.DefaultGatherer,\n        Registry:                   prometheus.NewRegistry(),\n        Registerer:                 prometheus.DefaultRegisterer,\n    }\n\n    c := controller.New(\n        processor.NewFactory(\n            selector.NewWithHistogramDistribution(\n                histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries),\n            ),\n            aggregation.CumulativeTemporalitySelector(),\n            processor.WithMemory(true),\n        ),\n    )\n\n    exporter, err := otelPrometheus.New(config, c)\n    if err != nil {\n        zap.S().Panicf(\"failed to initialize prometheus exporter %v\", err)\n    }\n\n    global.SetMeterProvider(exporter.MeterProvider())\n\n    http.HandleFunc(\"/metrics\", exporter.ServeHTTP)\n\n    go func() {\n        _ = http.ListenAndServe(fmt.Sprintf(\":%d\", 8888), nil)\n    }()\n\n    zap.S().Info(\"Prometheus server running on \", fmt.Sprintf(\":%d\", port))\n    return exporter\n}\n

          \u4ee5\u4e0a\u65b9\u6cd5\u4f1a\u4e3a\u60a8\u7684\u5e94\u7528\u66b4\u9732\u4e00\u4e2a\u6307\u6807\u63a5\u53e3: http://localhost:8888/metrics

          \u968f\u540e\uff0c\u5728 main.go \u4e2d\u5bf9\u5176\u8fdb\u884c\u521d\u59cb\u5316\uff1a

          func main() {\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n    tp := initMeter()\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n}\n

          \u6b64\u5916\uff0c\u5982\u679c\u60f3\u6dfb\u52a0\u81ea\u5b9a\u4e49\u6307\u6807\uff0c\u53ef\u4ee5\u53c2\u8003\uff1a

          // exposeClusterMetric expose metric like \"insight_logging_count{} 1\"\nfunc (s *insightServer) exposeLoggingMetric(lserver *log.LogService) {\n    s.meter = global.Meter(\"insight.io/basic\")\n\n    var lock sync.Mutex\n    logCounter, err := s.meter.AsyncFloat64().Counter(\"insight_log_total\")\n    if err != nil {\n        zap.S().Panicf(\"failed to initialize instrument: %v\", err)\n    }\n\n    _ = s.meter.RegisterCallback([]instrument.Asynchronous{logCounter}, func(ctx context.Context) {\n        lock.Lock()\n        defer lock.Unlock()\n        count, err := lserver.Count(ctx)\n        if err == nil || count != -1 {\n            logCounter.Observe(ctx, float64(count))\n        }\n    })\n}\n

          \u968f\u540e\uff0c\u5728 main.go \u8c03\u7528\u8be5\u65b9\u6cd5\uff1a

          \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\ns.exposeLoggingMetric(lservice)\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n

          \u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbf\u95ee http://localhost:8888/metrics \u6765\u68c0\u67e5\u60a8\u7684\u6307\u6807\u662f\u5426\u6b63\u5e38\u5de5\u4f5c\u3002

          "},{"location":"admin/insight/quickstart/otel/golang/meter.html#java","title":"\u9488\u5bf9 Java \u5e94\u7528\u7a0b\u5e8f","text":"

          Java \u5728\u4f7f\u7528 otel agent \u5728\u5b8c\u6210\u94fe\u8def\u7684\u81ea\u52a8\u63a5\u5165\u7684\u57fa\u7840\u4e0a\uff0c\u901a\u8fc7\u6dfb\u52a0\u73af\u5883\u53d8\u91cf\uff1a

          OTEL_METRICS_EXPORTER=prometheus\n

          \u5c31\u53ef\u4ee5\u76f4\u63a5\u66b4\u9732 JVM \u76f8\u5173\u6307\u6807\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbf\u95ee http://localhost:8888/metrics \u6765\u68c0\u67e5\u60a8\u7684\u6307\u6807\u662f\u5426\u6b63\u5e38\u5de5\u4f5c\u3002

          \u968f\u540e\uff0c\u518d\u914d\u5408 prometheus serviceMonitor \u5373\u53ef\u5b8c\u6210\u6307\u6807\u7684\u63a5\u5165\u3002 \u5982\u679c\u60f3\u66b4\u9732\u81ea\u5b9a\u4e49\u6307\u6807\u8bf7\u53c2\u9605 opentelemetry-java-docs/prometheus\u3002

          \u4e3b\u8981\u5206\u4ee5\u4e0b\u4e24\u6b65\uff1a

          • \u521b\u5efa meter provider\uff0c\u5e76\u6307\u5b9a prometheus \u4f5c\u4e3a exporter\u3002

            /*\n* Copyright The OpenTelemetry Authors\n* SPDX-License-Identifier: Apache-2.0\n*/\n\npackage io.opentelemetry.example.prometheus;\n\nimport io.opentelemetry.api.metrics.MeterProvider;\nimport io.opentelemetry.exporter.prometheus.PrometheusHttpServer;\nimport io.opentelemetry.sdk.metrics.SdkMeterProvider;\nimport io.opentelemetry.sdk.metrics.export.MetricReader;\n\npublic final class ExampleConfiguration {\n\n  /**\n  * Initializes the Meter SDK and configures the prometheus collector with all default settings.\n  *\n  * @param prometheusPort the port to open up for scraping.\n  * @return A MeterProvider for use in instrumentation.\n  */\n  static MeterProvider initializeOpenTelemetry(int prometheusPort) {\n    MetricReader prometheusReader = PrometheusHttpServer.builder().setPort(prometheusPort).build();\n\n    return SdkMeterProvider.builder().registerMetricReader(prometheusReader).build();\n  }\n}\n
          • \u81ea\u5b9a\u4e49 meter \u5e76\u5f00\u542f http server

            package io.opentelemetry.example.prometheus;\n\nimport io.opentelemetry.api.common.Attributes;\nimport io.opentelemetry.api.metrics.Meter;\nimport io.opentelemetry.api.metrics.MeterProvider;\nimport java.util.concurrent.ThreadLocalRandom;\n\n/**\n* Example of using the PrometheusHttpServer to convert OTel metrics to Prometheus format and expose\n* these to a Prometheus instance via a HttpServer exporter.\n*\n* <p>A Gauge is used to periodically measure how many incoming messages are awaiting processing.\n* The Gauge callback gets executed every collection interval.\n*/\npublic final class PrometheusExample {\n  private long incomingMessageCount;\n\n  public PrometheusExample(MeterProvider meterProvider) {\n    Meter meter = meterProvider.get(\"PrometheusExample\");\n    meter\n        .gaugeBuilder(\"incoming.messages\")\n        .setDescription(\"No of incoming messages awaiting processing\")\n        .setUnit(\"message\")\n        .buildWithCallback(result -> result.record(incomingMessageCount, Attributes.empty()));\n  }\n\n  void simulate() {\n    for (int i = 500; i > 0; i--) {\n      try {\n        System.out.println(\n            i + \" Iterations to go, current incomingMessageCount is:  \" + incomingMessageCount);\n        incomingMessageCount = ThreadLocalRandom.current().nextLong(100);\n        Thread.sleep(1000);\n      } catch (InterruptedException e) {\n        // ignored here\n      }\n    }\n  }\n\n  public static void main(String[] args) {\n    int prometheusPort = 8888;\n\n    // it is important to initialize the OpenTelemetry SDK as early as possible in your process.\n    MeterProvider meterProvider = ExampleConfiguration.initializeOpenTelemetry(prometheusPort);\n\n    PrometheusExample prometheusExample = new PrometheusExample(meterProvider);\n\n    prometheusExample.simulate();\n\n    System.out.println(\"Exiting\");\n  }\n}\n

          \u968f\u540e\uff0c\u5f85 java \u5e94\u7528\u7a0b\u5e8f\u8fd0\u884c\u4e4b\u540e\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbf\u95ee http://localhost:8888/metrics \u6765\u68c0\u67e5\u60a8\u7684\u6307\u6807\u662f\u5426\u6b63\u5e38\u5de5\u4f5c\u3002

          "},{"location":"admin/insight/quickstart/otel/golang/meter.html#insight","title":"Insight \u91c7\u96c6\u6307\u6807","text":"

          \u6700\u540e\u91cd\u8981\u7684\u662f\uff0c\u60a8\u5df2\u7ecf\u5728\u5e94\u7528\u7a0b\u5e8f\u4e2d\u66b4\u9732\u51fa\u4e86\u6307\u6807\uff0c\u73b0\u5728\u9700\u8981 Insight \u6765\u91c7\u96c6\u6307\u6807\u3002

          \u63a8\u8350\u7684\u6307\u6807\u66b4\u9732\u65b9\u5f0f\u662f\u901a\u8fc7 servicemonitor \u6216\u8005 podmonitor\u3002

          "},{"location":"admin/insight/quickstart/otel/golang/meter.html#servicemonitorpodmonitor","title":"\u521b\u5efa servicemonitor/podmonitor","text":"

          \u6dfb\u52a0\u7684 servicemonitor/podmonitor \u9700\u8981\u6253\u4e0a label\uff1a\"operator.insight.io/managed-by\": \"insight\" \u624d\u4f1a\u88ab Operator \u8bc6\u522b\uff1a

          apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: example-app\n  labels:\n    operator.insight.io/managed-by: insight\nspec:\n  selector:\n    matchLabels:\n      app: example-app\n  endpoints:\n  - port: web\n  namespaceSelector:\n    any: true\n
          "},{"location":"admin/insight/quickstart/otel/java/index.html","title":"\u5f00\u59cb\u76d1\u63a7 Java \u5e94\u7528","text":"
          1. Java \u5e94\u7528\u94fe\u8def\u63a5\u5165\u4e0e\u76d1\u63a7\u8bf7\u53c2\u8003 \u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a \u6587\u6863\uff0c\u901a\u8fc7\u6ce8\u89e3\u5b9e\u73b0\u81ea\u52a8\u63a5\u5165\u94fe\u8def\u3002

          2. Java \u5e94\u7528\u7684 JVM \u8fdb\u884c\u76d1\u63a7\uff1a\u5df2\u7ecf\u66b4\u9732 JVM \u6307\u6807\u548c\u4ecd\u672a\u66b4\u9732 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5982\u4f55\u4e0e\u53ef\u89c2\u6d4b\u6027 Insight \u5bf9\u63a5\u3002

          3. \u5982\u679c\u60a8\u7684 Java \u5e94\u7528\u672a\u5f00\u59cb\u66b4\u9732 JVM \u6307\u6807\uff0c\u60a8\u53ef\u4ee5\u53c2\u8003\u5982\u4e0b\u6587\u6863\uff1a

            • \u4f7f\u7528 JMX Exporter \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807
            • \u4f7f\u7528 OpenTelemetry Java Agent \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807
          4. \u5982\u679c\u60a8\u7684 Java \u5e94\u7528\u5df2\u7ecf\u66b4\u9732 JVM \u6307\u6807\uff0c\u60a8\u53ef\u4ee5\u53c2\u8003\u5982\u4e0b\u6587\u6863\uff1a

            • \u5df2\u6709 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5bf9\u63a5\u53ef\u89c2\u6d4b\u6027
          5. \u5c06 TraceId \u548c SpanId \u5199\u5165 Java \u5e94\u7528\u65e5\u5fd7, \u5b9e\u73b0\u94fe\u8def\u6570\u636e\u4e0e\u65e5\u5fd7\u6570\u636e\u5173\u8054

          "},{"location":"admin/insight/quickstart/otel/java/mdc.html","title":"\u5c06 TraceId \u548c SpanId \u5199\u5165 Java \u5e94\u7528\u65e5\u5fd7","text":"

          \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528 OpenTelemetry \u5c06 TraceId \u548c SpanId \u81ea\u52a8\u5199\u5165 Java \u5e94\u7528\u65e5\u5fd7\u3002 TraceId \u4e0e SpanId \u5199\u5165\u65e5\u5fd7\u540e\uff0c\u60a8\u53ef\u4ee5\u5c06\u5206\u5e03\u5f0f\u94fe\u8def\u6570\u636e\u4e0e\u65e5\u5fd7\u6570\u636e\u5173\u8054\u8d77\u6765\uff0c\u5b9e\u73b0\u66f4\u9ad8\u6548\u7684\u6545\u969c\u8bca\u65ad\u548c\u6027\u80fd\u5206\u6790\u3002

          "},{"location":"admin/insight/quickstart/otel/java/mdc.html#_1","title":"\u652f\u6301\u7684\u65e5\u5fd7\u5e93","text":"

          \u66f4\u591a\u4fe1\u606f\uff0c\u8bf7\u53c2\u89c1 Logger MDC auto-instrumentation\u3002

          \u65e5\u5fd7\u6846\u67b6 \u652f\u6301\u81ea\u52a8\u57cb\u70b9\u7684\u7248\u672c \u624b\u52a8\u57cb\u70b9\u9700\u8981\u5f15\u5165\u7684\u4f9d\u8d56 Log4j 1 1.2+ \u65e0 Log4j 2 2.7+ opentelemetry-log4j-context-data-2.17-autoconfigure Logback 1.0+ opentelemetry-logback-mdc-1.0"},{"location":"admin/insight/quickstart/otel/java/mdc.html#logbackspringboot","title":"\u4f7f\u7528 Logback\uff08SpringBoot \u9879\u76ee\uff09","text":"

          Spring Boot \u9879\u76ee\u5185\u7f6e\u4e86\u65e5\u5fd7\u6846\u67b6\uff0c\u5e76\u4e14\u9ed8\u8ba4\u4f7f\u7528 Logback \u4f5c\u4e3a\u5176\u65e5\u5fd7\u5b9e\u73b0\u3002\u5982\u679c\u60a8\u7684 Java \u9879\u76ee\u4e3a SpringBoot \u9879\u76ee\uff0c\u53ea\u9700\u5c11\u91cf\u914d\u7f6e\u5373\u53ef\u5c06 TraceId \u5199\u5165\u65e5\u5fd7\u3002

          \u5728 application.properties \u4e2d\u8bbe\u7f6e logging.pattern.level\uff0c\u6dfb\u52a0 %mdc{trace_id} \u4e0e %mdc{span_id} \u5230\u65e5\u5fd7\u4e2d\u3002

          logging.pattern.level=trace_id=%mdc{trace_id} span_id=%mdc{span_id} %5p ....\u7701\u7565...\n

          \u4ee5\u4e0b\u4e3a\u65e5\u5fd7\u793a\u4f8b\uff1a

          2024-06-26 10:56:31.200 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.a.c.c.C.[Tomcat].[localhost].[/]       : Initializing Spring DispatcherServlet 'dispatcherServlet'\n2024-06-26 10:56:31.201 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.s.web.servlet.DispatcherServlet        : Initializing Servlet 'dispatcherServlet'\n2024-06-26 10:56:31.209 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.s.web.servlet.DispatcherServlet        : Completed initialization in 8 ms\n2024-06-26 10:56:31.296 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=5743699405074f4e  INFO 53724 --- [nio-8081-exec-1] com.example.httpserver.ot.OTServer       : hello world\n
          "},{"location":"admin/insight/quickstart/otel/java/mdc.html#log4j2","title":"\u4f7f\u7528 Log4j2","text":"
          1. \u5728 pom.xml \u4e2d\u6dfb\u52a0 OpenTelemetry Log4j2 \u4f9d\u8d56:

            Tip

            \u8bf7\u5c06 OPENTELEMETRY_VERSION \u66ff\u6362\u4e3a\u6700\u65b0\u7248\u672c

            <dependencies>\n  <dependency>\n    <groupId>io.opentelemetry.instrumentation</groupId>\n    <artifactId>opentelemetry-log4j-context-data-2.17-autoconfigure</artifactId>\n    <version>OPENTELEMETRY_VERSION</version>\n    <scope>runtime</scope>\n  </dependency>\n</dependencies>\n
          2. \u4fee\u6539 log4j2.xml \u914d\u7f6e\uff0c\u5728 pattern \u4e2d\u6dfb\u52a0 %X{trace_id} \u4e0e %X{span_id}\uff0c\u53ef\u4ee5\u5c06 TraceId \u4e0e SpanId \u81ea\u52a8\u5199\u5165\u65e5\u5fd7:

            <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Configuration>\n  <Appenders>\n    <Console name=\"Console\" target=\"SYSTEM_OUT\">\n      <PatternLayout\n          pattern=\"%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} trace_id=%X{trace_id} span_id=%X{span_id} trace_flags=%X{trace_flags} - %msg%n\"/>\n    </Console>\n  </Appenders>\n  <Loggers>\n    <Root>\n      <AppenderRef ref=\"Console\" level=\"All\"/>\n    </Root>\n  </Loggers>\n</Configuration>\n
          3. \u4f7f\u7528 Logback \u5728 pom.xml \u4e2d\u6dfb\u52a0 OpenTelemetry Logback \u4f9d\u8d56\u3002

            Tip

            \u8bf7\u5c06 OPENTELEMETRY_VERSION \u66ff\u6362\u4e3a\u6700\u65b0\u7248\u672c

            <dependencies>\n  <dependency>\n    <groupId>io.opentelemetry.instrumentation</groupId>\n    <artifactId>opentelemetry-logback-mdc-1.0</artifactId>\n    <version>OPENTELEMETRY_VERSION</version>\n  </dependency>\n</dependencies>\n
          4. \u4fee\u6539 log4j2.xml \u914d\u7f6e\uff0c\u5728 pattern \u4e2d\u6dfb\u52a0 %X{trace_id} \u4e0e %X{span_id}\uff0c\u53ef\u4ee5\u5c06 TraceId \u4e0e SpanId \u81ea\u52a8\u5199\u5165\u65e5\u5fd7:

            <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<configuration>\n  <appender name=\"CONSOLE\" class=\"ch.qos.logback.core.ConsoleAppender\">\n    <encoder>\n      <pattern>%d{HH:mm:ss.SSS} trace_id=%X{trace_id} span_id=%X{span_id} trace_flags=%X{trace_flags} %msg%n</pattern>\n    </encoder>\n  </appender>\n\n  <!-- Just wrap your logging appender, for example ConsoleAppender, with OpenTelemetryAppender -->\n  <appender name=\"OTEL\" class=\"io.opentelemetry.instrumentation.logback.mdc.v1_0.OpenTelemetryAppender\">\n    <appender-ref ref=\"CONSOLE\"/>\n  </appender>\n\n  <!-- Use the wrapped \"OTEL\" appender instead of the original \"CONSOLE\" one -->\n  <root level=\"INFO\">\n    <appender-ref ref=\"OTEL\"/>\n  </root>\n\n</configuration>\n
          "},{"location":"admin/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html","title":"\u4f7f\u7528 JMX Exporter \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807","text":"

          JMX-Exporter \u63d0\u4f9b\u4e86\u4e24\u79cd\u7528\u6cd5:

          1. \u542f\u52a8\u72ec\u7acb\u8fdb\u7a0b\u3002JVM \u542f\u52a8\u65f6\u6307\u5b9a\u53c2\u6570\uff0c\u66b4\u9732 JMX \u7684 RMI \u63a5\u53e3\uff0cJMX Exporter \u8c03\u7528 RMI \u83b7\u53d6 JVM \u8fd0\u884c\u65f6\u72b6\u6001\u6570\u636e\uff0c \u8f6c\u6362\u4e3a Prometheus metrics \u683c\u5f0f\uff0c\u5e76\u66b4\u9732\u7aef\u53e3\u8ba9 Prometheus \u91c7\u96c6\u3002
          2. JVM \u8fdb\u7a0b\u5185\u542f\u52a8(in-process)\u3002JVM \u542f\u52a8\u65f6\u6307\u5b9a\u53c2\u6570\uff0c\u901a\u8fc7 javaagent \u7684\u5f62\u5f0f\u8fd0\u884c JMX-Exporter \u7684 jar \u5305\uff0c \u8fdb\u7a0b\u5185\u8bfb\u53d6 JVM \u8fd0\u884c\u65f6\u72b6\u6001\u6570\u636e\uff0c\u8f6c\u6362\u4e3a Prometheus metrics \u683c\u5f0f\uff0c\u5e76\u66b4\u9732\u7aef\u53e3\u8ba9 Prometheus \u91c7\u96c6\u3002

          Note

          \u5b98\u65b9\u4e0d\u63a8\u8350\u4f7f\u7528\u7b2c\u4e00\u79cd\u65b9\u5f0f\uff0c\u4e00\u65b9\u9762\u914d\u7f6e\u590d\u6742\uff0c\u53e6\u4e00\u65b9\u9762\u56e0\u4e3a\u5b83\u9700\u8981\u4e00\u4e2a\u5355\u72ec\u7684\u8fdb\u7a0b\uff0c\u800c\u8fd9\u4e2a\u8fdb\u7a0b\u672c\u8eab\u7684\u76d1\u63a7\u53c8\u6210\u4e86\u65b0\u7684\u95ee\u9898\uff0c \u6240\u4ee5\u672c\u6587\u91cd\u70b9\u56f4\u7ed5\u7b2c\u4e8c\u79cd\u7528\u6cd5\u8bb2\u5982\u4f55\u5728 Kubernetes \u73af\u5883\u4e0b\u4f7f\u7528 JMX Exporter \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807\u3002

          \u8fd9\u91cc\u4f7f\u7528\u7b2c\u4e8c\u79cd\u7528\u6cd5\uff0c\u542f\u52a8 JVM \u65f6\u9700\u8981\u6307\u5b9a JMX Exporter \u7684 jar \u5305\u6587\u4ef6\u548c\u914d\u7f6e\u6587\u4ef6\u3002 jar \u5305\u662f\u4e8c\u8fdb\u5236\u6587\u4ef6\uff0c\u4e0d\u597d\u901a\u8fc7 configmap \u6302\u8f7d\uff0c\u914d\u7f6e\u6587\u4ef6\u6211\u4eec\u51e0\u4e4e\u4e0d\u9700\u8981\u4fee\u6539\uff0c \u6240\u4ee5\u5efa\u8bae\u662f\u76f4\u63a5\u5c06 JMX Exporter \u7684 jar \u5305\u548c\u914d\u7f6e\u6587\u4ef6\u90fd\u6253\u5305\u5230\u4e1a\u52a1\u5bb9\u5668\u955c\u50cf\u4e2d\u3002

          \u5176\u4e2d\uff0c\u7b2c\u4e8c\u79cd\u65b9\u5f0f\u6211\u4eec\u53ef\u4ee5\u9009\u62e9\u5c06 JMX Exporter \u7684 jar \u6587\u4ef6\u653e\u5728\u4e1a\u52a1\u5e94\u7528\u955c\u50cf\u4e2d\uff0c \u4e5f\u53ef\u4ee5\u9009\u62e9\u5728\u90e8\u7f72\u7684\u65f6\u5019\u6302\u8f7d\u8fdb\u53bb\u3002\u8fd9\u91cc\u5206\u522b\u5bf9\u4e24\u79cd\u65b9\u5f0f\u505a\u4e00\u4e2a\u4ecb\u7ecd\uff1a

          "},{"location":"admin/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html#jmx-exporter-jar","title":"\u65b9\u5f0f\u4e00\uff1a\u5c06 JMX Exporter JAR \u6587\u4ef6\u6784\u5efa\u81f3\u4e1a\u52a1\u955c\u50cf\u4e2d","text":"

          prometheus-jmx-config.yaml \u5185\u5bb9\u5982\u4e0b\uff1a

          prometheus-jmx-config.yaml
          ...\nssl: false\nlowercaseOutputName: false\nlowercaseOutputLabelNames: false\nrules:\n- pattern: \".*\"\n

          Note

          \u66f4\u591a\u914d\u7f6e\u9879\u8bf7\u53c2\u8003\u5e95\u90e8\u4ecb\u7ecd\u6216Prometheus \u5b98\u65b9\u6587\u6863\u3002

          \u7136\u540e\u51c6\u5907 jar \u5305\u6587\u4ef6\uff0c\u53ef\u4ee5\u5728 jmx_exporter \u7684 Github \u9875\u9762\u627e\u5230\u6700\u65b0\u7684 jar \u5305\u4e0b\u8f7d\u5730\u5740\u5e76\u53c2\u8003\u5982\u4e0b Dockerfile:

          FROM openjdk:11.0.15-jre\nWORKDIR /app/\nCOPY target/my-app.jar ./\nCOPY prometheus-jmx-config.yaml ./\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\nENV JAVA_TOOL_OPTIONS=-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\nEXPOSE 8081 8999 8080 8888\nENTRYPOINT java $JAVA_OPTS -jar my-app.jar\n

          \u6ce8\u610f\uff1a

          • \u542f\u52a8\u53c2\u6570\u683c\u5f0f\uff1a-javaagent:=:
          • \u8fd9\u91cc\u4f7f\u7528\u4e86 8088 \u7aef\u53e3\u66b4\u9732 JVM \u7684\u76d1\u63a7\u6307\u6807\uff0c\u5982\u679c\u548c Java \u5e94\u7528\u51b2\u7a81\uff0c\u53ef\u81ea\u884c\u66f4\u6539
          "},{"location":"admin/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html#init-container","title":"\u65b9\u5f0f\u4e8c\uff1a\u901a\u8fc7 init container \u5bb9\u5668\u6302\u8f7d","text":"

          \u6211\u4eec\u9700\u8981\u5148\u5c06 JMX exporter \u505a\u6210 Docker \u955c\u50cf, \u4ee5\u4e0b Dockerfile \u4ec5\u4f9b\u53c2\u8003\uff1a

          FROM alpine/curl:3.14\nWORKDIR /app/\n# \u5c06\u524d\u9762\u521b\u5efa\u7684 config \u6587\u4ef6\u62f7\u8d1d\u81f3\u955c\u50cf\nCOPY prometheus-jmx-config.yaml ./\n# \u5728\u7ebf\u4e0b\u8f7d jmx prometheus javaagent jar\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\n

          \u6839\u636e\u4e0a\u9762 Dockerfile \u6784\u5efa\u955c\u50cf\uff1a docker build -t my-jmx-exporter .

          \u5728 Java \u5e94\u7528\u90e8\u7f72 Yaml \u4e2d\u52a0\u5165\u5982\u4e0b init container\uff1a

          \u70b9\u51fb\u5c55\u5f00 YAML \u6587\u4ef6
          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-demo-app\n  labels:\n    app: my-demo-app\nspec:\n  selector:\n    matchLabels:\n      app: my-demo-app\n  template:\n    metadata:\n      labels:\n        app: my-demo-app\n    spec:\n      imagePullSecrets:\n      - name: registry-pull\n      initContainers:\n      - name: jmx-sidecar\n        image: my-jmx-exporter\n        command: [\"cp\", \"-r\", \"/app/jmx_prometheus_javaagent-0.17.2.jar\", \"/target/jmx_prometheus_javaagent-0.17.2.jar\"]  \u278a\n        volumeMounts:\n        - name: sidecar\n          mountPath: /target\n      containers:\n      - image: my-demo-app-image\n        name: my-demo-app\n        resources:\n          requests:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n          limits:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n        ports:\n        - containerPort: 18083\n        env:\n        - name: JAVA_TOOL_OPTIONS\n          value: \"-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\" \u278b\n        volumeMounts:\n        - name: host-time\n          mountPath: /etc/localtime\n          readOnly: true\n        - name: sidecar\n          mountPath: /sidecar\n      volumes:\n      - name: host-time\n        hostPath:\n          path: /etc/localtime\n      - name: sidecar  #\u5171\u4eab agent \u6587\u4ef6\u5939\n        emptyDir: {}\n      restartPolicy: Always\n

          \u7ecf\u8fc7\u5982\u4e0a\u7684\u6539\u9020\u4e4b\u540e\uff0c\u793a\u4f8b\u5e94\u7528 my-demo-app \u5177\u5907\u4e86\u66b4\u9732 JVM \u6307\u6807\u7684\u80fd\u529b\u3002 \u8fd0\u884c\u670d\u52a1\u4e4b\u540e\uff0c\u6211\u4eec\u53ef\u4ee5\u901a\u8fc7 http://lcoalhost:8088 \u8bbf\u95ee\u670d\u52a1\u66b4\u9732\u51fa\u6765\u7684 prometheus \u683c\u5f0f\u7684\u6307\u6807\u3002

          \u63a5\u7740\uff0c\u60a8\u53ef\u4ee5\u53c2\u8003 \u5df2\u6709 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5bf9\u63a5\u53ef\u89c2\u6d4b\u6027\u3002

          "},{"location":"admin/insight/quickstart/otel/java/jvm-monitor/legacy-jvm.html","title":"\u5df2\u6709 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5bf9\u63a5\u53ef\u89c2\u6d4b\u6027","text":"

          \u5982\u679c\u60a8\u7684 Java \u5e94\u7528\u901a\u8fc7\u5176\u4ed6\u65b9\u5f0f\uff08\u6bd4\u5982 Spring Boot Actuator\uff09\u66b4\u9732\u4e86 JVM \u7684\u76d1\u63a7\u6307\u6807\uff0c \u6211\u4eec\u9700\u8981\u8ba9\u76d1\u63a7\u6570\u636e\u88ab\u91c7\u96c6\u5230\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u5728\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u6dfb\u52a0\u6ce8\u89e3\uff08Kubernetes Annotations\uff09\u7684\u65b9\u5f0f\u8ba9 Insight \u6765\u91c7\u96c6\u5df2\u6709\u7684 JVM \u6307\u6807\uff1a

          annatation: \n  insight.opentelemetry.io/metric-scrape: \"true\" # \u662f\u5426\u91c7\u96c6\n  insight.opentelemetry.io/metric-path: \"/\"      # \u91c7\u96c6\u6307\u6807\u7684\u8def\u5f84\n  insight.opentelemetry.io/metric-port: \"9464\"   # \u91c7\u96c6\u6307\u6807\u7684\u7aef\u53e3\n

          \u4f8b\u5982\u4e3a my-deployment-app \u6dfb\u52a0\u6ce8\u89e3\uff1a

          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-app\nspec:\n  selector:\n    matchLabels:\n      app: my-deployment-app\n      app.kubernetes.io/name: my-deployment-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-deployment-app\n        app.kubernetes.io/name: my-deployment-app\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\" # \u662f\u5426\u91c7\u96c6\n        insight.opentelemetry.io/metric-path: \"/\"      # \u91c7\u96c6\u6307\u6807\u7684\u8def\u5f84\n        insight.opentelemetry.io/metric-port: \"9464\"   # \u91c7\u96c6\u6307\u6807\u7684\u7aef\u53e3\n

          \u4ee5\u4e0b\u662f\u5b8c\u6574\u793a\u4f8b\uff1a

          ---\napiVersion: v1\nkind: Service\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  type: NodePort\n  selector:\n    #app: my-deployment-with-aotu-instrumentation-app\n    app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  ports:\n    - name: http\n      port: 8080\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  selector:\n    matchLabels:\n      #app: my-deployment-with-aotu-instrumentation-app\n      app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\" # \u662f\u5426\u91c7\u96c6\n        insight.opentelemetry.io/metric-path: \"/actuator/prometheus\"      # \u91c7\u96c6\u6307\u6807\u7684\u8def\u5f84\n        insight.opentelemetry.io/metric-port: \"8080\"   # \u91c7\u96c6\u6307\u6807\u7684\u7aef\u53e3\n    spec:\n      containers:\n        - name: myapp\n          image: docker.m.daocloud.io/wutang/spring-boot-actuator-prometheus-metrics-demo\n          ports:\n            - name: http\n              containerPort: 8080\n          resources:\n            limits:\n              cpu: 500m\n              memory: 800Mi\n            requests:\n              cpu: 200m\n              memory: 400Mi\n

          \u4ee5\u4e0a\u793a\u4f8b\u4e2d\uff0cInsight \u4f1a\u901a\u8fc7 :8080//actuator/prometheus \u6293\u53d6\u901a\u8fc7 Spring Boot Actuator \u66b4\u9732\u51fa\u6765\u7684 Prometheus \u6307\u6807\u3002

          "},{"location":"admin/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html","title":"\u4f7f\u7528 OpenTelemetry Java Agent \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807","text":"

          \u5728 Opentelemetry Agent v1.20.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u4e2d\uff0cOpentelemetry Agent \u65b0\u589e\u4e86 JMX Metric Insight \u6a21\u5757\uff0c\u5982\u679c\u4f60\u7684\u5e94\u7528\u5df2\u7ecf\u96c6\u6210\u4e86 Opentelemetry Agent \u53bb\u91c7\u96c6\u5e94\u7528\u94fe\u8def\uff0c\u90a3\u4e48\u4f60\u4e0d\u518d\u9700\u8981\u53e6\u5916\u5f15\u5165\u5176\u4ed6 Agent \u53bb\u4e3a\u6211\u4eec\u7684\u5e94\u7528\u66b4\u9732 JMX \u6307\u6807\u3002Opentelemetry Agent \u4e5f\u662f\u901a\u8fc7\u68c0\u6d4b\u5e94\u7528\u7a0b\u5e8f\u4e2d\u672c\u5730\u53ef\u7528\u7684 MBean \u516c\u5f00\u7684\u6307\u6807\uff0c\u5bf9\u5176\u8fdb\u884c\u6536\u96c6\u5e76\u66b4\u9732\u6307\u6807\u3002

          Opentelemetry Agent \u4e5f\u9488\u5bf9\u5e38\u89c1\u7684 Java Server \u6216\u6846\u67b6\u5185\u7f6e\u4e86\u4e00\u4e9b\u76d1\u63a7\u7684\u6837\u4f8b\uff0c\u8bf7\u53c2\u8003\u9884\u5b9a\u4e49\u7684\u6307\u6807\u3002

          \u4f7f\u7528 OpenTelemetry Java Agent \u540c\u6837\u9700\u8981\u8003\u8651\u5982\u4f55\u5c06 JAR \u6302\u8f7d\u8fdb\u5bb9\u5668\uff0c\u9664\u4e86\u53ef\u4ee5\u53c2\u8003\u4e0a\u9762 JMX Exporter \u6302\u8f7d JAR \u6587\u4ef6\u7684\u65b9\u5f0f\u5916\uff0c\u6211\u4eec\u8fd8\u53ef\u4ee5\u501f\u52a9 Opentelemetry \u63d0\u4f9b\u7684 Operator \u7684\u80fd\u529b\u6765\u5b9e\u73b0\u81ea\u52a8\u4e3a\u6211\u4eec\u7684\u5e94\u7528\u5f00\u542f JVM \u6307\u6807\u66b4\u9732\uff1a

          \u5982\u679c\u4f60\u7684\u5e94\u7528\u5df2\u7ecf\u96c6\u6210\u4e86 Opentelemetry Agent \u53bb\u91c7\u96c6\u5e94\u7528\u94fe\u8def\uff0c\u90a3\u4e48\u4f60\u4e0d\u518d\u9700\u8981\u53e6\u5916\u5f15\u5165\u5176\u4ed6 Agent \u53bb\u4e3a\u6211\u4eec\u7684\u5e94\u7528\u66b4\u9732 JMX \u6307\u6807\u3002Opentelemetry Agent \u901a\u8fc7\u68c0\u6d4b\u5e94\u7528\u7a0b\u5e8f\u4e2d\u672c\u5730\u53ef\u7528\u7684 MBean \u516c\u5f00\u7684\u6307\u6807\uff0c\u73b0\u5728\u53ef\u4ee5\u672c\u5730\u6536\u96c6\u5e76\u66b4\u9732\u6307\u6807\u63a5\u53e3\u3002

          \u4f46\u662f\uff0c\u622a\u81f3\u76ee\u524d\u7248\u672c\uff0c\u4f60\u4ecd\u7136\u9700\u8981\u624b\u52a8\u4e3a\u5e94\u7528\u52a0\u4e0a\u76f8\u5e94\u6ce8\u89e3\u4e4b\u540e\uff0cJVM \u6570\u636e\u624d\u4f1a\u88ab Insight \u91c7\u96c6\u5230\uff0c\u5177\u4f53\u6ce8\u89e3\u5185\u5bb9\u8bf7\u53c2\u8003 \u5df2\u6709 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5bf9\u63a5\u53ef\u89c2\u6d4b\u6027\u3002

          "},{"location":"admin/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html#java","title":"\u4e3a Java \u4e2d\u95f4\u4ef6\u66b4\u9732\u6307\u6807","text":"

          Opentelemetry Agent \u4e5f\u5185\u7f6e\u4e86\u4e00\u4e9b\u4e2d\u95f4\u4ef6\u76d1\u63a7\u7684\u6837\u4f8b\uff0c\u8bf7\u53c2\u8003 \u9884\u5b9a\u4e49\u6307\u6807\u3002

          \u9ed8\u8ba4\u6ca1\u6709\u6307\u5b9a\u4efb\u4f55\u7c7b\u578b\uff0c\u9700\u8981\u901a\u8fc7 -Dotel.jmx.target.system JVM Options \u6307\u5b9a,\u6bd4\u5982 -Dotel.jmx.target.system=jetty,kafka-broker \u3002

          "},{"location":"admin/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html#_1","title":"\u53c2\u8003","text":"
          • Gaining JMX Metric Insights with the OpenTelemetry Java Agent

          • Otel jmx metrics

          "},{"location":"admin/insight/quickstart/other/install-agent-on-ocp.html","title":"OpenShift \u5b89\u88c5 Insight Agent","text":"

          \u867d\u7136 OpenShift \u7cfb\u7edf\u81ea\u5e26\u4e86\u4e00\u5957\u76d1\u63a7\u7cfb\u7edf\uff0c\u56e0\u4e3a\u6570\u636e\u91c7\u96c6\u7ea6\u5b9a\u7684\u4e00\u4e9b\u89c4\u5219\uff0c\u6211\u4eec\u8fd8\u662f\u4f1a\u5b89\u88c5 Insight Agent\u3002

          \u5176\u4e2d\uff0c\u5b89\u9664\u4e86\u57fa\u7840\u7684\u5b89\u88c5\u914d\u7f6e\u4e4b\u5916\uff0chelm install \u7684\u65f6\u5019\u8fd8\u9700\u8981\u589e\u52a0\u5982\u4e0b\u7684\u53c2\u6570\uff1a

          ## \u9488\u5bf9 fluentbit \u76f8\u5173\u7684\u53c2\u6570\uff1b\n--set fluent-bit.ocp.enabled=true \\\n--set fluent-bit.serviceAccount.create=false \\\n--set fluent-bit.securityContext.runAsUser=0 \\\n--set fluent-bit.securityContext.seLinuxOptions.type=spc_t \\\n--set fluent-bit.securityContext.readOnlyRootFilesystem=false \\\n--set fluent-bit.securityContext.allowPrivilegeEscalation=false \\\n\n## \u542f\u7528\u9002\u914d OpenShift4.x \u7684 Prometheus(CR)\n--set compatibility.openshift.prometheus.enabled=true \\\n\n## \u5173\u95ed\u9ad8\u7248\u672c\u7684 Prometheus \u5b9e\u4f8b\n--set kube-prometheus-stack.prometheus.enabled=false \\\n--set kube-prometheus-stack.kubeApiServer.enabled=false \\\n--set kube-prometheus-stack.kubelet.enabled=false \\\n--set kube-prometheus-stack.kubeControllerManager.enabled=false \\\n--set kube-prometheus-stack.coreDns.enabled=false \\\n--set kube-prometheus-stack.kubeDns.enabled=false \\\n--set kube-prometheus-stack.kubeEtcd.enabled=false \\\n--set kube-prometheus-stack.kubeEtcd.enabled=false \\\n--set kube-prometheus-stack.kubeScheduler.enabled=false \\\n--set kube-prometheus-stack.kubeStateMetrics.enabled=false \\\n--set kube-prometheus-stack.nodeExporter.enabled=false \\\n\n## \u9650\u5236 PrometheusOperator \u5904\u7406\u7684 namespace\uff0c\u907f\u514d\u4e0e OpenShift \u81ea\u5e26\u7684 PrometheusOperator \u76f8\u4e92\u7ade\u4e89\n--set kube-prometheus-stack.prometheusOperator.kubeletService.namespace=\"insight-system\" \\\n--set kube-prometheus-stack.prometheusOperator.prometheusInstanceNamespaces=\"insight-system\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[0]=\"openshift-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[1]=\"openshift-user-workload-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[2]=\"openshift-customer-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[3]=\"openshift-route-monitor-operator\" \\\n
          "},{"location":"admin/insight/quickstart/other/install-agent-on-ocp.html#openshift-prometheus","title":"\u901a\u8fc7 OpenShift \u81ea\u8eab\u673a\u5236\uff0c\u5c06\u7cfb\u7edf\u76d1\u63a7\u6570\u636e\u5199\u5165 Prometheus \u4e2d","text":"
          apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: cluster-monitoring-config\n  namespace: openshift-monitoring\ndata:\n  config.yaml: |\n    prometheusK8s:\n      remoteWrite:\n        - queueConfig:\n            batchSendDeadline: 60s\n            maxBackoff: 5s\n            minBackoff: 30ms\n            minShards: 1\n            capacity: 5000\n            maxSamplesPerSend: 1000\n            maxShards: 100\n          remoteTimeout: 30s\n          url: http://insight-agent-prometheus.insight-system.svc.cluster.local:9090/api/v1/write\n          writeRelabelConfigs:\n            - action: keep\n              regex: etcd|kubelet|node-exporter|apiserver|kube-state-metrics\n              sourceLabels:\n                - job\n
          "},{"location":"admin/insight/quickstart/res-plan/index.html","title":"\u90e8\u7f72\u5bb9\u91cf\u89c4\u5212","text":"

          \u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u4e3a\u4e86\u907f\u514d\u6d88\u8017\u8fc7\u591a\u8d44\u6e90\uff0c\u5df2\u7ecf\u8bbe\u7f6e\u4e86\u8d44\u6e90\u4e0a\u7ebf\uff08resource limit\uff09\uff0c\u53ef\u89c2\u6d4b\u7cfb\u7edf\u9700\u8981\u5904\u7406\u5927\u91cf\u7684\u6570\u636e\uff0c\u5982\u679c\u5bb9\u91cf\u89c4\u5212\u4e0d\u5408\u7406\uff0c\u53ef\u80fd\u4f1a\u5bfc\u81f4\u7cfb\u7edf\u8d1f\u8f7d\u8fc7\u9ad8\uff0c\u5f71\u54cd\u7a33\u5b9a\u6027\u548c\u53ef\u9760\u6027\u3002

          "},{"location":"admin/insight/quickstart/res-plan/index.html#_2","title":"\u89c2\u6d4b\u7ec4\u4ef6\u7684\u8d44\u6e90\u89c4\u5212","text":"

          \u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u5305\u542b Insight \u548c Insight Agent\u3002\u5176\u4e2d\uff0cInsight \u4e3b\u8981\u8d1f\u8d23\u89c2\u6d4b\u6570\u636e\u7684\u5b58\u50a8\uff0c\u5206\u6790\u4e0e\u5c55\u793a\u3002\u800c Insight Agent \u5305\u542b\u4e86\u6570\u636e\u91c7\u96c6\u3001\u6570\u636e\u5904\u7406\u3001\u6570\u636e\u4e0a\u4f20\u7b49\u529f\u80fd\u3002

          "},{"location":"admin/insight/quickstart/res-plan/index.html#_3","title":"\u5b58\u50a8\u7ec4\u4ef6\u7684\u5bb9\u91cf\u89c4\u5212","text":"

          Insight \u7684\u5b58\u50a8\u7ec4\u4ef6\u4e3b\u8981\u5305\u62ec ElasticSearch \u548c VictoriaMetrics. \u5176\u4e2d\uff0cElasticSearch \u4e3b\u8981\u8d1f\u8d23\u5b58\u50a8\u548c\u67e5\u8be2\u65e5\u5fd7\u4e0e\u94fe\u8def\u6570\u636e\uff0cVictoriaMetrics \u4e3b\u8981\u8d1f\u8d23\u5b58\u50a8\u548c\u67e5\u8be2\u6307\u6807\u6570\u636e\u3002

          • VictoriaMetircs: \u5176\u78c1\u76d8\u7528\u91cf\u4e0e\u5b58\u50a8\u7684\u6307\u6807\u6709\u5173\uff0c\u6839\u636e vmstorage \u7684\u78c1\u76d8\u89c4\u5212 \u9884\u4f30\u5bb9\u91cf\u540e \u8c03\u6574 vmstorage \u78c1\u76d8\u3002
          "},{"location":"admin/insight/quickstart/res-plan/index.html#_4","title":"\u91c7\u96c6\u5668\u7684\u8d44\u6e90\u89c4\u5212","text":"

          Insight Agent \u7684\u91c7\u96c6\u5668\u4e2d\u5305\u542b Proemtheus\uff0c\u867d\u7136 Prometheus \u672c\u8eab\u662f\u4e00\u4e2a\u72ec\u7acb\u7684\u7ec4\u4ef6\uff0c\u4f46\u662f\u5728 Insight Agent \u4e2d\uff0cPrometheus \u4f1a\u88ab\u7528\u4e8e\u91c7\u96c6\u6570\u636e\uff0c\u56e0\u6b64\u9700\u8981\u5bf9 Prometheus \u7684\u8d44\u6e90\u8fdb\u884c\u89c4\u5212\u3002

          • Prometheus\uff1a\u5176\u8d44\u6e90\u7528\u91cf\u4e0e\u91c7\u96c6\u7684\u6307\u6807\u91cf\u6709\u5173\uff0c\u53ef\u4ee5\u53c2\u8003 Prometheus \u8d44\u6e90\u89c4\u5212 \u8fdb\u884c\u8c03\u6574\u3002
          "},{"location":"admin/insight/quickstart/res-plan/modify-vms-disk.html","title":"vmstorge \u78c1\u76d8\u6269\u5bb9","text":"

          \u672c\u6587\u63cf\u8ff0\u4e86 vmstorge \u78c1\u76d8\u6269\u5bb9\u7684\u65b9\u6cd5\uff0c vmstorge \u78c1\u76d8\u89c4\u8303\u8bf7\u53c2\u8003 vmstorage \u78c1\u76d8\u5bb9\u91cf\u89c4\u5212\u3002

          "},{"location":"admin/insight/quickstart/res-plan/modify-vms-disk.html#_1","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"admin/insight/quickstart/res-plan/modify-vms-disk.html#_2","title":"\u5f00\u542f\u5b58\u50a8\u6c60\u6269\u5bb9","text":"
          1. \u4ee5\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7ba1\u7406\u5458\u6743\u9650\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\uff0c\u70b9\u51fb \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb kpanda-global-cluster \u96c6\u7fa4\u3002

          2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e(PVC) \uff0c\u627e\u5230 vmstorage \u7ed1\u5b9a\u7684\u6570\u636e\u5377\u58f0\u660e\u3002

          3. \u70b9\u51fb\u67d0\u4e2a vmstorage PVC\uff0c\u8fdb\u5165 vmstorage \u7684\u6570\u636e\u5377\u58f0\u660e\u8be6\u60c5\uff0c\u786e\u8ba4\u8be5 PVC \u7ed1\u5b9a\u7684\u5b58\u50a8\u6c60\u3002

          4. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5bb9\u5668\u5b58\u50a8 -> \u5b58\u50a8\u6c60(SC) \uff0c\u627e\u5230 local-path \uff0c\u70b9\u51fb\u76ee\u6807\u53f3\u4fa7\u7684 \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u7f16\u8f91 \u3002

          5. \u5f00\u542f \u6269\u5bb9 \u540e\u70b9\u51fb \u786e\u5b9a \u3002

          "},{"location":"admin/insight/quickstart/res-plan/modify-vms-disk.html#vmstorage","title":"\u66f4\u6539 vmstorage \u7684\u78c1\u76d8\u5bb9\u91cf","text":"
          1. \u4ee5\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7ba1\u7406\u5458\u6743\u9650\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\uff0c\u8fdb\u5165 kpanda-global-cluster \u96c6\u7fa4\u8be6\u60c5\u3002

          2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u627e\u5230 vmcluster \u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\u3002

          3. \u70b9\u51fb\u8be5 vmcluster \u81ea\u5b9a\u4e49\u8d44\u6e90\u8fdb\u5165\u8be6\u60c5\u9875\uff0c\u5207\u6362\u5230 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\uff0c\u4ece insight-victoria-metrics-k8s-stack \u53f3\u4fa7\u83dc\u5355\u9009\u62e9 \u7f16\u8f91 YAML \u3002

          4. \u6839\u636e\u56fe\u4f8b\u4fee\u6539\u540e\u70b9\u51fb \u786e\u5b9a \u3002

          5. \u518d\u6b21\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e(PVC) \uff0c\u627e\u5230 vmstorage \u7ed1\u5b9a\u7684\u6570\u636e\u5377\u58f0\u660e\u786e\u8ba4\u4fee\u6539\u5df2\u751f\u6548\u3002\u5728\u67d0\u4e2a PVC \u8be6\u60c5\u9875\uff0c\u70b9\u51fb\u5173\u8054\u5b58\u50a8\u6e90 (PV)\u3002

          6. \u6253\u5f00\u6570\u636e\u5377\u8be6\u60c5\u9875\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u66f4\u65b0 \u6309\u94ae\u3002

          7. \u4fee\u6539 \u5bb9\u91cf \u540e\u70b9\u51fb \u786e\u5b9a \uff0c\u7a0d\u7b49\u7247\u523b\u7b49\u5230\u6269\u5bb9\u6210\u529f\u3002

          "},{"location":"admin/insight/quickstart/res-plan/modify-vms-disk.html#_3","title":"\u514b\u9686\u5b58\u50a8\u5377","text":"

          \u82e5\u5b58\u50a8\u5377\u6269\u5bb9\u5931\u8d25\uff0c\u53ef\u53c2\u8003\u4ee5\u4e0b\u65b9\u6cd5\u514b\u9686\u5b58\u50a8\u5377\u3002

          1. \u4ee5\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7ba1\u7406\u5458\u6743\u9650\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\uff0c\u8fdb\u5165 kpanda-global-cluster \u96c6\u7fa4\u8be6\u60c5\u3002

          2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5de5\u4f5c\u8d1f\u8f7d -> \u6709\u72b6\u6001\u8d1f\u8f7d \uff0c\u627e\u5230 vmstorage \u7684\u6709\u72b6\u6001\u8d1f\u8f7d\uff0c\u70b9\u51fb\u76ee\u6807\u53f3\u4fa7\u7684 \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u72b6\u6001 -> \u505c\u6b62 -> \u786e\u5b9a \u3002

          3. \u5728\u547d\u4ee4\u884c\u4e2d\u767b\u5f55 kpanda-global-cluster \u96c6\u7fa4\u7684 master \u8282\u70b9\u540e\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u590d\u5236 vmstorage \u5bb9\u5668\u4e2d\u7684 vm-data \u76ee\u5f55\u5c06\u6307\u6807\u4fe1\u606f\u5b58\u50a8\u5728\u672c\u5730\uff1a

            kubectl cp -n insight-system vmstorage-insight-victoria-metrics-k8s-stack-1:vm-data ./vm-data\n
          4. \u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\u8fdb\u5165 kpanda-global-cluster \u96c6\u7fa4\u8be6\u60c5\uff0c\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377(PV) \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u514b\u9686 \uff0c\u5e76\u4fee\u6539\u6570\u636e\u5377\u7684\u5bb9\u91cf\u3002

          5. \u5220\u9664\u4e4b\u524d vmstorage \u7684\u6570\u636e\u5377\u3002

          6. \u7a0d\u7b49\u7247\u523b\uff0c\u5f85\u5b58\u50a8\u5377\u58f0\u660e\u8ddf\u514b\u9686\u7684\u6570\u636e\u5377\u7ed1\u5b9a\u540e\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u5c06\u7b2c 3 \u6b65\u4e2d\u5bfc\u51fa\u7684\u6570\u636e\u5bfc\u5165\u5230\u5bf9\u5e94\u7684\u5bb9\u5668\u4e2d\uff0c\u7136\u540e\u5f00\u542f\u4e4b\u524d\u6682\u505c\u7684 vmstorage \u3002

            kubectl cp -n insight-system ./vm-data vmstorage-insight-victoria-metrics-k8s-stack-1:vm-data\n
          "},{"location":"admin/insight/quickstart/res-plan/prometheus-res.html","title":"Prometheus \u8d44\u6e90\u89c4\u5212","text":"

          Prometheus \u5728\u5b9e\u9645\u4f7f\u7528\u8fc7\u7a0b\u4e2d\uff0c\u53d7\u5230\u96c6\u7fa4\u5bb9\u5668\u6570\u91cf\u4ee5\u53ca\u5f00\u542f Istio \u7684\u5f71\u54cd\uff0c\u4f1a\u5bfc\u81f4 Prometheus \u7684 CPU\u3001\u5185\u5b58\u7b49\u8d44\u6e90\u4f7f\u7528\u91cf\u8d85\u51fa\u8bbe\u5b9a\u7684\u8d44\u6e90\u3002

          \u4e3a\u4e86\u4fdd\u8bc1\u4e0d\u540c\u89c4\u6a21\u96c6\u7fa4\u4e0b Prometheus \u7684\u6b63\u5e38\u8fd0\u884c\uff0c\u9700\u8981\u6839\u636e\u96c6\u7fa4\u7684\u5b9e\u9645\u89c4\u6a21\u5bf9 Prometheus \u8fdb\u884c\u8d44\u6e90\u8c03\u6574\u3002

          "},{"location":"admin/insight/quickstart/res-plan/prometheus-res.html#_1","title":"\u53c2\u8003\u8d44\u6e90\u89c4\u5212","text":"

          \u5728\u672a\u5f00\u542f\u7f51\u683c\u60c5\u51b5\u4e0b\uff0c\u6d4b\u8bd5\u60c5\u51b5\u7edf\u8ba1\u51fa\u7cfb\u7edf Job \u6307\u6807\u91cf\u4e0e Pod \u7684\u5173\u7cfb\u4e3a Series \u6570\u91cf = 800 * Pod \u6570\u91cf

          \u5728\u5f00\u542f\u670d\u52a1\u7f51\u683c\u65f6\uff0c\u5f00\u542f\u529f\u80fd\u540e Pod \u4ea7\u751f\u7684 Istio \u76f8\u5173\u6307\u6807\u6570\u91cf\u7ea7\u4e3a Series \u6570\u91cf = 768 * Pod \u6570\u91cf

          "},{"location":"admin/insight/quickstart/res-plan/prometheus-res.html#_2","title":"\u5f53\u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c\u65f6","text":"

          \u4ee5\u4e0b\u8d44\u6e90\u89c4\u5212\u4e3a \u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c \u573a\u666f\u4e0b\uff0cPrometheus \u7684\u8d44\u6e90\u89c4\u5212\u63a8\u8350\uff1a

          \u96c6\u7fa4\u89c4\u6a21(Pod \u6570) \u6307\u6807\u91cf(\u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c) CPU(core) \u5185\u5b58(GB) 100 8w Request: 0.5Limit\uff1a1 Request\uff1a2GBLimit\uff1a4GB 200 16w Request\uff1a1Limit\uff1a1.5 Request\uff1a3GBLimit\uff1a6GB 300 24w Request\uff1a1Limit\uff1a2 Request\uff1a3GBLimit\uff1a6GB 400 32w Request\uff1a1Limit\uff1a2 Request\uff1a4GBLimit\uff1a8GB 500 40w Request\uff1a1.5Limit\uff1a3 Request\uff1a5GBLimit\uff1a10GB 800 64w Request\uff1a2Limit\uff1a4 Request\uff1a8GBLimit\uff1a16GB 1000 80w Request\uff1a2.5Limit\uff1a5 Request\uff1a9GBLimit\uff1a18GB 2000 160w Request\uff1a3.5Limit\uff1a7 Request\uff1a20GBLimit\uff1a40GB 3000 240w Request\uff1a4Limit\uff1a8 Request\uff1a33GBLimit\uff1a66GB"},{"location":"admin/insight/quickstart/res-plan/prometheus-res.html#_3","title":"\u5f53\u5f00\u542f\u670d\u52a1\u7f51\u683c\u529f\u80fd\u65f6","text":"

          \u4ee5\u4e0b\u8d44\u6e90\u89c4\u5212\u4e3a \u5f00\u542f\u670d\u52a1\u7f51\u683c \u573a\u666f\u4e0b\uff0cPrometheus \u7684\u8d44\u6e90\u89c4\u5212\u63a8\u8350\uff1a

          \u96c6\u7fa4\u89c4\u6a21(Pod \u6570) \u6307\u6807\u91cf(\u5df2\u5f00\u542f\u670d\u52a1\u7f51\u683c) CPU(core) \u5185\u5b58(GB) 100 15w Request: 1Limit\uff1a2 Request\uff1a3GBLimit\uff1a6GB 200 31w Request\uff1a2Limit\uff1a3 Request\uff1a5GBLimit\uff1a10GB 300 46w Request\uff1a2Limit\uff1a4 Request\uff1a6GBLimit\uff1a12GB 400 62w Request\uff1a2Limit\uff1a4 Request\uff1a8GBLimit\uff1a16GB 500 78w Request\uff1a3Limit\uff1a6 Request\uff1a10GBLimit\uff1a20GB 800 125w Request\uff1a4Limit\uff1a8 Request\uff1a15GBLimit\uff1a30GB 1000 156w Request\uff1a5Limit\uff1a10 Request\uff1a18GBLimit\uff1a36GB 2000 312w Request\uff1a7Limit\uff1a14 Request\uff1a40GBLimit\uff1a80GB 3000 468w Request\uff1a8Limit\uff1a16 Request\uff1a65GBLimit\uff1a130GB

          Note

          1. \u8868\u683c\u4e2d\u7684 Pod \u6570\u91cf \u6307\u96c6\u7fa4\u4e2d\u57fa\u672c\u7a33\u5b9a\u8fd0\u884c\u7684 Pod \u6570\u91cf\uff0c\u5982\u51fa\u73b0\u5927\u91cf\u7684 Pod \u91cd\u542f\uff0c\u5219\u4f1a\u9020\u6210\u77ed\u65f6\u95f4\u5185\u6307\u6807\u91cf\u7684\u9661\u589e\uff0c\u6b64\u65f6\u8d44\u6e90\u9700\u8981\u8fdb\u884c\u76f8\u5e94\u4e0a\u8c03\u3002
          2. Prometheus \u5185\u5b58\u4e2d\u9ed8\u8ba4\u4fdd\u5b58\u4e24\u5c0f\u65f6\u6570\u636e\uff0c\u4e14\u96c6\u7fa4\u4e2d\u5f00\u542f\u4e86 Remote Write \u529f\u80fd\u65f6\uff0c\u4f1a\u5360\u7528\u4e00\u5b9a\u5185\u5b58\uff0c\u8d44\u6e90\u8d85\u914d\u6bd4\u5efa\u8bae\u914d\u7f6e\u4e3a 2\u3002
          3. \u8868\u683c\u4e2d\u6570\u636e\u4e3a\u63a8\u8350\u503c\uff0c\u9002\u7528\u4e8e\u901a\u7528\u60c5\u51b5\u3002\u5982\u73af\u5883\u6709\u7cbe\u786e\u7684\u8d44\u6e90\u8981\u6c42\uff0c\u5efa\u8bae\u5728\u96c6\u7fa4\u8fd0\u884c\u4e00\u6bb5\u65f6\u95f4\u540e\uff0c\u67e5\u770b\u5bf9\u5e94 Prometheus \u7684\u8d44\u6e90\u5360\u7528\u91cf\u8fdb\u884c\u7cbe\u786e\u914d\u7f6e\u3002
          "},{"location":"admin/insight/quickstart/res-plan/vms-res-plan.html","title":"vmstorage \u78c1\u76d8\u5bb9\u91cf\u89c4\u5212","text":"

          vmstorage \u662f\u8d1f\u8d23\u5b58\u50a8\u53ef\u89c2\u6d4b\u6027\u591a\u96c6\u7fa4\u6307\u6807\u3002 \u4e3a\u4fdd\u8bc1 vmstorage \u7684\u7a33\u5b9a\u6027\uff0c\u9700\u8981\u6839\u636e\u96c6\u7fa4\u6570\u91cf\u53ca\u96c6\u7fa4\u89c4\u6a21\u8c03\u6574 vmstorage \u7684\u78c1\u76d8\u5bb9\u91cf\u3002 \u66f4\u591a\u8d44\u6599\u8bf7\u53c2\u8003\uff1avmstorage \u4fdd\u7559\u671f\u4e0e\u78c1\u76d8\u7a7a\u95f4\u3002

          "},{"location":"admin/insight/quickstart/res-plan/vms-res-plan.html#_1","title":"\u6d4b\u8bd5\u7ed3\u679c","text":"

          \u7ecf\u8fc7 14 \u5929\u5bf9\u4e0d\u540c\u89c4\u6a21\u7684\u96c6\u7fa4\u7684 vmstorage \u7684\u78c1\u76d8\u89c2\u6d4b\uff0c \u6211\u4eec\u53d1\u73b0 vmstorage \u7684\u78c1\u76d8\u7528\u91cf\u4e0e\u5176\u5b58\u50a8\u7684\u6307\u6807\u91cf\u548c\u5355\u4e2a\u6570\u636e\u70b9\u5360\u7528\u78c1\u76d8\u6b63\u76f8\u5173\u3002

          1. \u77ac\u65f6\u5b58\u50a8\u7684\u6307\u6807\u91cf increase(vm_rows{ type != \"indexdb\"}[30s]) \u4ee5\u83b7\u53d6 30s \u5185\u589e\u52a0\u7684\u6307\u6807\u91cf
          2. \u5355\u4e2a\u6570\u636e\u70b9 (datapoint) \u7684\u5360\u7528\u78c1\u76d8\uff1a sum(vm_data_size_bytes{type!=\"indexdb\"}) /\u00a0sum(vm_rows{type\u00a0!=\u00a0\"indexdb\"})
          "},{"location":"admin/insight/quickstart/res-plan/vms-res-plan.html#_2","title":"\u8ba1\u7b97\u65b9\u6cd5","text":"

          \u78c1\u76d8\u7528\u91cf = \u77ac\u65f6\u6307\u6807\u91cf x 2 x \u5355\u4e2a\u6570\u636e\u70b9\u7684\u5360\u7528\u78c1\u76d8 x 60 x 24 x \u5b58\u50a8\u65f6\u95f4 (\u5929)

          \u53c2\u6570\u8bf4\u660e\uff1a

          1. \u78c1\u76d8\u7528\u91cf\u5355\u4f4d\u4e3a Byte \u3002
          2. \u5b58\u50a8\u65f6\u957f(\u5929) x 60 x 24 \u5c06\u65f6\u95f4(\u5929)\u6362\u7b97\u6210\u5206\u949f\u4ee5\u4fbf\u8ba1\u7b97\u78c1\u76d8\u7528\u91cf\u3002
          3. Insight Agent \u4e2d Prometheus \u9ed8\u8ba4\u91c7\u96c6\u65f6\u95f4\u4e3a 30s \uff0c\u6545\u5728 1 \u5206\u949f\u5185\u4ea7\u751f\u4e24\u500d\u7684\u6307\u6807\u91cf\u3002
          4. vmstorage \u4e2d\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 1 \u4e2a\u6708\uff0c\u4fee\u6539\u914d\u7f6e\u8bf7\u53c2\u8003\u4fee\u6539\u7cfb\u7edf\u914d\u7f6e\u3002

          Warning

          \u8be5\u516c\u5f0f\u4e3a\u901a\u7528\u65b9\u6848\uff0c\u5efa\u8bae\u5728\u8ba1\u7b97\u7ed3\u679c\u4e0a\u9884\u7559\u5197\u4f59\u78c1\u76d8\u5bb9\u91cf\u4ee5\u4fdd\u8bc1 vmstorage \u7684\u6b63\u5e38\u8fd0\u884c\u3002

          "},{"location":"admin/insight/quickstart/res-plan/vms-res-plan.html#_3","title":"\u53c2\u8003\u5bb9\u91cf","text":"

          \u8868\u683c\u4e2d\u6570\u636e\u662f\u6839\u636e\u9ed8\u8ba4\u5b58\u50a8\u65f6\u95f4\u4e3a\u4e00\u4e2a\u6708 (30 \u5929)\uff0c\u5355\u4e2a\u6570\u636e\u70b9 (datapoint) \u7684\u5360\u7528\u78c1\u76d8\u53d6 0.9 \u8ba1\u7b97\u6240\u5f97\u7ed3\u679c\u3002 \u591a\u96c6\u7fa4\u573a\u666f\u4e0b\uff0cPod \u6570\u91cf\u8868\u793a\u591a\u96c6\u7fa4 Pod \u6570\u91cf\u7684\u603b\u548c\u3002

          "},{"location":"admin/insight/quickstart/res-plan/vms-res-plan.html#_4","title":"\u5f53\u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c\u65f6","text":"\u96c6\u7fa4\u89c4\u6a21 (Pod \u6570) \u6307\u6807\u91cf \u78c1\u76d8\u5bb9\u91cf 100 8w 6 GiB 200 16w 12 GiB 300 24w 18 GiB 400 32w 24 GiB 500 40w 30 GiB 800 64w 48 GiB 1000 80w 60 GiB 2000 160w 120 GiB 3000 240w 180 GiB"},{"location":"admin/insight/quickstart/res-plan/vms-res-plan.html#_5","title":"\u5f53\u5f00\u542f\u670d\u52a1\u7f51\u683c\u65f6","text":"\u96c6\u7fa4\u89c4\u6a21 (Pod \u6570) \u6307\u6807\u91cf \u78c1\u76d8\u5bb9\u91cf 100 15w 12 GiB 200 31w 24 GiB 300 46w 36 GiB 400 62w 48 GiB 500 78w 60 GiB 800 125w 94 GiB 1000 156w 120 GiB 2000 312w 235 GiB 3000 468w 350 GiB"},{"location":"admin/insight/quickstart/res-plan/vms-res-plan.html#_6","title":"\u4e3e\u4f8b\u8bf4\u660e","text":"

          AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\u4e2d\u6709\u4e24\u4e2a\u96c6\u7fa4\uff0c\u5176\u4e2d\u5168\u5c40\u670d\u52a1\u96c6\u7fa4(\u5f00\u542f\u670d\u52a1\u7f51\u683c)\u4e2d\u8fd0\u884c 500 \u4e2a Pod\uff0c\u5de5\u4f5c\u96c6\u7fa4(\u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c)\u8fd0\u884c\u4e86 1000 \u4e2a Pod\uff0c\u9884\u671f\u6307\u6807\u5b58 30 \u5929\u3002

          • \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u6307\u6807\u91cf\u4e3a 800x500 + 768x500 = 784000
          • \u5de5\u4f5c\u96c6\u7fa4\u6307\u6807\u91cf\u4e3a 800x1000 = 800000

          \u5219\u5f53\u524d vmstorage \u78c1\u76d8\u7528\u91cf\u5e94\u8bbe\u7f6e\u4e3a (784000+80000)x2x0.9x60x24x31 = 124384896000 byte = 116 GiB

          Note

          \u96c6\u7fa4\u4e2d\u6307\u6807\u91cf\u4e0e Pod \u6570\u91cf\u7684\u5173\u7cfb\u53ef\u53c2\u8003Prometheus \u8d44\u6e90\u89c4\u5212\u3002

          "},{"location":"admin/insight/reference/alertnotification.html","title":"\u544a\u8b66\u901a\u77e5\u6d41\u7a0b\u8bf4\u660e","text":"

          \u5728\u521b\u5efa\u544a\u8b66\u7b56\u7565\u65f6\uff0c\u53ef\u89c2\u6d4b\u6027 Insight \u652f\u6301\u4e3a\u540c\u7b56\u7565\u4e0b\u4e0d\u540c\u7ea7\u522b\u89e6\u53d1\u7684\u544a\u8b66\u914d\u7f6e\u4e0d\u540c\u7684\u901a\u77e5\u53d1\u9001\u95f4\u9694\uff0c\u4f46\u7531\u4e8e\u5728 Alertmanager \u539f\u751f\u914d\u7f6e\u4e2d\u5b58\u5728 group_interval \u548c repeat_interval \u4e24\u4e2a\u53c2\u6570\uff0c\u4f1a\u5bfc\u81f4\u544a\u8b66\u901a\u77e5\u7684\u5b9e\u9645\u53d1\u9001\u95f4\u9694\u5b58\u5728\u504f\u5dee\u3002

          "},{"location":"admin/insight/reference/alertnotification.html#_2","title":"\u53c2\u6570\u914d\u7f6e","text":"

          \u5728 Alertmanager \u914d\u7f6e\u5982\u4e0b\uff1a

          route:  \n  group_by: [\"rulename\"]\n  group_wait: 30s\n  group_interval: 5m\n  repeat_interval: 1h\n

          \u53c2\u6570\u8bf4\u660e\uff1a

          • group_wait \uff1a\u7528\u4e8e\u8bbe\u7f6e\u544a\u8b66\u901a\u77e5\u7684\u7b49\u5f85\u65f6\u95f4\u3002\u5f53 Alertmanager \u63a5\u6536\u5230\u4e00\u7ec4\u544a\u8b66\u65f6\uff0c\u5982\u679c\u5728 group_wait \u6307\u5b9a\u7684\u65f6\u95f4\u5185\u6ca1\u6709\u66f4\u591a\u7684\u544a\u8b66\uff0c\u5219 Alertmanager \u5c06\u7b49\u5f85\u4e00\u6bb5\u65f6\u95f4\u4ee5\u4fbf\u6536\u96c6\u5230\u66f4\u591a\u76f8\u540c\u6807\u7b7e\u548c\u5185\u5bb9\u7684\u544a\u8b66\uff0c\u5e76\u5c06\u6240\u6709\u7b26\u5408\u6761\u4ef6\u7684\u544a\u8b66\u6dfb\u52a0\u5230\u540c\u4e00\u901a\u77e5\u4e2d\u3002

          • group_interval \uff1a\u7528\u4e8e\u8bbe\u7f6e\u4e00\u7ec4\u544a\u8b66\u5728\u88ab\u5408\u5e76\u6210\u5355\u4e00\u901a\u77e5\u524d\u7b49\u5f85\u7684\u65f6\u95f4\u3002\u5982\u679c\u5728\u8fd9\u4e2a\u65f6\u95f4\u5185\u6ca1\u6709\u6536\u5230\u66f4\u591a\u7684\u6765\u81ea\u540c\u4e00\u7ec4\u7684\u544a\u8b66\uff0c\u5219 Alertmanager \u5c06\u53d1\u9001\u4e00\u4e2a\u5305\u542b\u6240\u6709\u5df2\u63a5\u6536\u544a\u8b66\u7684\u901a\u77e5\u3002

          • repeat_interval \uff1a\u7528\u4e8e\u8bbe\u7f6e\u544a\u8b66\u901a\u77e5\u7684\u91cd\u590d\u53d1\u9001\u95f4\u9694\u3002\u5f53 Alertmanager \u53d1\u9001\u544a\u8b66\u901a\u77e5\u5230\u63a5\u6536\u5668\u540e\uff0c\u5982\u679c\u5728 repeat_interval \u53c2\u6570\u6307\u5b9a\u7684\u65f6\u95f4\u5185\u6301\u7eed\u6536\u5230\u76f8\u540c\u6807\u7b7e\u548c\u5185\u5bb9\u7684\u544a\u8b66\uff0c\u5219 Alertmanager \u5c06\u91cd\u590d\u53d1\u9001\u544a\u8b66\u901a\u77e5\u3002

          \u5f53\u540c\u65f6\u8bbe\u7f6e\u4e86 group_wait \u3001 group_interval \u548c repeat_interval \u53c2\u6570\u65f6\uff0cAlertmanager \u5c06\u6309\u7167\u4ee5\u4e0b\u65b9\u5f0f\u5904\u7406\u540c\u4e00 group \u4e0b\u7684\u544a\u8b66\u901a\u77e5\uff1a

          1. \u5f53 Alertmanager \u63a5\u6536\u5230\u7b26\u5408\u6761\u4ef6\u7684\u544a\u8b66\u65f6\uff0c\u5b83\u5c06\u7b49\u5f85\u81f3\u5c11 group_wait \u53c2\u6570\u4e2d\u6307\u5b9a\u7684\u65f6\u95f4\u4ee5\u4fbf\u6536\u96c6\u66f4\u591a\u76f8\u540c\u6807\u7b7e\u548c\u5185\u5bb9\u7684\u544a\u8b66\uff0c\u5e76\u5c06\u6240\u6709\u7b26\u5408\u6761\u4ef6\u7684\u544a\u8b66\u6dfb\u52a0\u5230\u540c\u4e00\u901a\u77e5\u4e2d\u3002

          2. \u5982\u679c\u5728 group_wait \u65f6\u95f4\u5185\u6ca1\u6709\u63a5\u6536\u5230\u66f4\u591a\u7684\u544a\u8b66\uff0c\u5219\u5728\u8be5\u65f6\u95f4\u4e4b\u540e\uff0cAlertmanager \u4f1a\u5c06\u6240\u6536\u5230\u7684\u6240\u6709\u6b64\u7c7b\u8b66\u62a5\u53d1\u9001\u5230\u63a5\u6536\u5668\u3002\u5982\u679c\u6709\u5176\u4ed6\u7b26\u5408\u6761\u4ef6\u7684\u544a\u8b66\u5728\u6b64\u671f\u95f4\u5185\u5230\u8fbe\uff0c\u5219 Alertmanager \u5c06\u7ee7\u7eed\u7b49\u5f85\uff0c\u76f4\u5230\u6536\u96c6\u5230\u6240\u6709\u544a\u8b66\u6216\u8d85\u65f6\u3002

          3. \u5982\u679c\u5728 group_interval \u53c2\u6570\u4e2d\u6307\u5b9a\u7684\u65f6\u95f4\u5185\u63a5\u6536\u5230\u4e86\u66f4\u591a\u7684\u76f8\u540c\u6807\u7b7e\u548c\u5185\u5bb9\u7684\u544a\u8b66\uff0c\u5219\u8fd9\u4e9b\u65b0\u544a\u8b66\u4e5f\u5c06\u88ab\u6dfb\u52a0\u5230\u5148\u524d\u7684\u901a\u77e5\u4e2d\u5e76\u4e00\u8d77\u53d1\u9001\u3002\u5982\u679c\u5728 group_interval \u65f6\u95f4\u7ed3\u675f\u540e\u4ecd\u7136\u6709\u672a\u53d1\u9001\u7684\u544a\u8b66\uff0cAlertmanager \u5c06\u4f1a\u91cd\u65b0\u5f00\u59cb\u4e00\u4e2a\u65b0\u7684\u8ba1\u65f6\u5468\u671f\uff0c\u5e76\u7b49\u5f85\u66f4\u591a\u7684\u544a\u8b66\uff0c\u76f4\u5230\u518d\u6b21\u8fbe\u5230 group_interval \u65f6\u95f4\u6216\u6536\u5230\u65b0\u7684\u544a\u8b66\u3002

          4. \u5982\u679c\u5728 repeat_interval \u53c2\u6570\u4e2d\u6307\u5b9a\u7684\u65f6\u95f4\u5185\u6301\u7eed\u6536\u5230\u76f8\u540c\u6807\u7b7e\u548c\u5185\u5bb9\u7684\u544a\u8b66\uff0c\u5219 Alertmanager \u5c06\u91cd\u590d\u53d1\u9001\u4e4b\u524d\u5df2\u7ecf\u53d1\u9001\u8fc7\u7684\u8b66\u62a5\u901a\u77e5\u3002\u5728\u91cd\u590d\u53d1\u9001\u8b66\u62a5\u901a\u77e5\u65f6\uff0cAlertmanager \u4e0d\u518d\u7b49\u5f85 group_wait \u6216 group_interval \uff0c\u800c\u662f\u6839\u636e repeat_interval \u6307\u5b9a\u7684\u65f6\u95f4\u95f4\u9694\u8fdb\u884c\u91cd\u590d\u901a\u77e5\u3002

          5. \u5982\u679c\u5728 repeat_interval \u65f6\u95f4\u7ed3\u675f\u540e\u4ecd\u6709\u672a\u53d1\u9001\u7684\u544a\u8b66\uff0c\u5219 Alertmanager \u5c06\u91cd\u65b0\u5f00\u59cb\u4e00\u4e2a\u65b0\u7684\u8ba1\u65f6\u5468\u671f\uff0c\u5e76\u7ee7\u7eed\u7b49\u5f85\u76f8\u540c\u6807\u7b7e\u548c\u5185\u5bb9\u7684\u65b0\u544a\u8b66\u3002\u8fd9\u4e2a\u8fc7\u7a0b\u5c06\u4e00\u76f4\u6301\u7eed\u4e0b\u53bb\uff0c\u76f4\u5230\u6ca1\u6709\u65b0\u544a\u8b66\u4e3a\u6b62\u6216 Alertmanager \u88ab\u505c\u6b62\u3002

          "},{"location":"admin/insight/reference/alertnotification.html#_3","title":"\u4e3e\u4f8b\u8bf4\u660e","text":"

          \u5728\u4e0b\u8ff0\u793a\u4f8b\u4e2d\uff0cAlertmanager \u5c06\u6240\u6709 CPU \u4f7f\u7528\u7387\u9ad8\u4e8e\u9608\u503c\u7684\u544a\u8b66\u5206\u914d\u5230\u4e00\u4e2a\u540d\u4e3a\u201ccritical_alerts\u201d\u7684\u7b56\u7565\u4e2d\u3002

          groups:\n- name: critical_alerts\n  rules:\n  - alert: HighCPUUsage\n    expr: node_cpu_seconds_total{mode=\"idle\"} < 50\n    for: 5m\n    labels:\n      severity: critical\n    annotations:\n      summary: \"High CPU usage detected on instance {{ $labels.instance }}\"\n  group_by: [rulename]\n  group_wait: 30s\n  group_interval: 5m\n  repeat_interval: 1h\n

          \u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff1a

          • \u5f53 Alertmanager \u6536\u5230\u544a\u8b66\u65f6\uff0c\u5b83\u5c06\u7b49\u5f85\u81f3\u5c11 30 \u79d2\u4ee5\u4fbf\u6536\u96c6\u66f4\u591a\u76f8\u540c\u6807\u7b7e\u548c\u5185\u5bb9\u7684\u544a\u8b66\uff0c\u5e76\u5c06\u5b83\u4eec\u6dfb\u52a0\u5230\u540c\u4e00\u901a\u77e5\u4e2d\u3002
          • \u5982\u679c\u5728 5 \u5206\u949f\u5185\u63a5\u6536\u5230\u4e86\u66f4\u591a\u76f8\u540c\u6807\u7b7e\u548c\u5185\u5bb9\u7684\u544a\u8b66\uff0c\u5219\u8fd9\u4e9b\u65b0\u544a\u8b66\u4e5f\u5c06\u88ab\u6dfb\u52a0\u5230\u5148\u524d\u7684\u901a\u77e5\u4e2d\u5e76\u4e00\u8d77\u53d1\u9001\u3002\u5982\u679c\u5728 15\u5206\u949f\u540e\u4ecd\u6709\u672a\u53d1\u9001\u7684\u544a\u8b66\uff0c\u5219 Alertmanager \u5c06\u91cd\u65b0\u5f00\u59cb\u4e00\u4e2a\u65b0\u7684\u8ba1\u65f6\u5468\u671f\uff0c\u5e76\u7b49\u5f85\u66f4\u591a\u7684\u544a\u8b66\uff0c\u76f4\u5230\u518d\u6b21\u8fbe\u5230 5 \u5206\u949f\u6216\u6536\u5230\u65b0\u544a\u8b66\u3002
          • \u5982\u679c\u5728 1 \u5c0f\u65f6\u5185\u6301\u7eed\u6536\u5230\u76f8\u540c\u6807\u7b7e\u548c\u5185\u5bb9\u7684\u544a\u8b66\uff0c\u5219 Alertmanager \u5c06\u91cd\u590d\u53d1\u9001\u4e4b\u524d\u5df2\u7ecf\u53d1\u9001\u8fc7\u7684\u8b66\u62a5\u901a\u77e5\u3002

          "},{"location":"admin/insight/reference/lucene.html","title":"Lucene \u8bed\u6cd5\u4f7f\u7528\u65b9\u6cd5","text":""},{"location":"admin/insight/reference/lucene.html#lucene_1","title":"Lucene \u7b80\u4ecb","text":"

          Lucene \u662f Apache \u8f6f\u4ef6\u57fa\u91d1\u4f1a 4 jakarta \u9879\u76ee\u7ec4\u7684\u4e00\u4e2a\u5b50\u9879\u76ee\uff0c\u662f\u4e00\u4e2a\u5f00\u653e\u6e90\u4ee3\u7801\u7684\u5168\u6587\u68c0\u7d22\u5f15\u64ce\u5de5\u5177\u5305\u3002 Lucene \u7684\u76ee\u7684\u662f\u4e3a\u8f6f\u4ef6\u5f00\u53d1\u4eba\u5458\u63d0\u4f9b\u4e00\u4e2a\u7b80\u5355\u6613\u7528\u7684\u5de5\u5177\u5305\uff0c\u4ee5\u65b9\u4fbf\u7684\u5728\u76ee\u6807\u7cfb\u7edf\u4e2d\u5b9e\u73b0\u5168\u6587\u68c0\u7d22\u7684\u529f\u80fd\u3002

          "},{"location":"admin/insight/reference/lucene.html#lucene_2","title":"Lucene \u8bed\u6cd5","text":"

          Lucene \u7684\u8bed\u6cd5\u641c\u7d22\u683c\u5f0f\u5141\u8bb8\u60a8\u4ee5\u7075\u6d3b\u7684\u65b9\u5f0f\u6784\u5efa\u641c\u7d22\u67e5\u8be2\uff0c\u4ee5\u6ee1\u8db3\u4e0d\u540c\u7684\u641c\u7d22\u9700\u6c42\u3002\u4ee5\u4e0b\u662f Lucene \u7684\u8bed\u6cd5\u641c\u7d22\u683c\u5f0f\u7684\u8be6\u7ec6\u8bf4\u660e\uff1a

          "},{"location":"admin/insight/reference/lucene.html#_1","title":"\u5173\u952e\u5b57\u67e5\u8be2","text":"

          \u8981\u901a\u8fc7 Lucene \u8bed\u6cd5\u5b9e\u73b0\u591a\u4e2a\u5173\u952e\u5b57\u7684\u67e5\u8be2\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528\u5e03\u5c14\u903b\u8f91\u64cd\u4f5c\u7b26\u6765\u7ec4\u5408\u591a\u4e2a\u5173\u952e\u5b57\u3002Lucene \u652f\u6301\u4ee5\u4e0b\u51e0\u79cd\u64cd\u4f5c\u7b26\uff1a

          1. AND \u64cd\u4f5c\u7b26

            • \u4f7f\u7528\u00a0 AND \u00a0\u6216\u00a0 && \u00a0\u6765\u8868\u793a\u903b\u8f91\u4e0e\u5173\u7cfb\u3002
            • \u4f8b\u5982\uff1a term1 AND term2 \u00a0\u6216\u00a0 term1 && term2
          2. OR \u64cd\u4f5c\u7b26

            • \u4f7f\u7528\u00a0 OR \u00a0\u6216\u00a0 || \u00a0\u6765\u8868\u793a\u903b\u8f91\u6216\u5173\u7cfb\u3002
            • \u4f8b\u5982\uff1a term1 OR term2 \u00a0\u6216\u00a0 term1 || term2
          3. NOT \u64cd\u4f5c\u7b26

            • \u4f7f\u7528\u00a0 NOT \u00a0\u6216\u00a0``\u00a0\u6765\u8868\u793a\u903b\u8f91\u975e\u5173\u7cfb\u3002
            • \u4f8b\u5982\uff1a term1 NOT term2 \u00a0\u6216\u00a0 term1 -term2
          4. \u5f15\u53f7

            • \u60a8\u53ef\u4ee5\u5c06\u4e00\u4e2a\u77ed\u8bed\u62ec\u5728\u5f15\u53f7\u4e2d\u4ee5\u8fdb\u884c\u7cbe\u786e\u5339\u914d\u3002
            • \u4f8b\u5982\uff1a \"exact phrase\"
          "},{"location":"admin/insight/reference/lucene.html#_2","title":"\u4e3e\u4f8b","text":"
          1. \u6307\u5b9a\u5b57\u6bb5

            field1:keyword1 AND (field2:keyword2 OR field3:keyword3) NOT field4:keyword4\n

            \u89e3\u91ca\u5982\u4e0b\uff1a

            • \u67e5\u8be2\u5b57\u6bb5\u00a0 field1 \u00a0\u5fc5\u987b\u5305\u542b\u5173\u952e\u5b57\u00a0 keyword1 \u3002
            • \u540c\u65f6\uff0c\u5b57\u6bb5\u00a0 field2 \u00a0\u5fc5\u987b\u5305\u542b\u5173\u952e\u5b57\u00a0 keyword2 \u00a0\u6216\u5b57\u6bb5\u00a0 field3 \u00a0\u5fc5\u987b\u5305\u542b\u5173\u952e\u5b57\u00a0 keyword3 \u3002
            • \u6700\u540e\uff0c\u5b57\u6bb5\u00a0 field4 \u00a0\u4e0d\u5f97\u5305\u542b\u5173\u952e\u5b57\u00a0 keyword4 \u3002
          2. \u4e0d\u6307\u5b9a\u5b57\u6bb5

            keyword1 AND (keyword2 OR keyword3) NOT keyword4\n

            \u89e3\u91ca\u5982\u4e0b\uff1a

            • \u67e5\u8be2\u5173\u952e\u5b57\u00a0 keyword1 \u00a0\u5fc5\u987b\u5b58\u5728\u4e8e\u4efb\u610f\u53ef\u641c\u7d22\u7684\u5b57\u6bb5\u4e2d\u3002
            • \u540c\u65f6\uff0c\u5173\u952e\u5b57\u00a0 keyword2 \u00a0\u5fc5\u987b\u5b58\u5728\u6216\u5173\u952e\u5b57\u00a0 keyword3 \u00a0\u5fc5\u987b\u5b58\u5728\u4e8e\u4efb\u610f\u53ef\u641c\u7d22\u7684\u5b57\u6bb5\u4e2d\u3002
            • \u6700\u540e\uff0c\u5173\u952e\u5b57\u00a0 keyword4 \u00a0\u4e0d\u5f97\u5b58\u5728\u4e8e\u4efb\u610f\u53ef\u641c\u7d22\u7684\u5b57\u6bb5\u4e2d\u3002
          "},{"location":"admin/insight/reference/lucene.html#_3","title":"\u6a21\u7cca\u67e5\u8be2","text":"

          \u5728 Lucene \u4e2d\uff0c\u6a21\u7cca\u67e5\u8be2\u53ef\u4ee5\u901a\u8fc7\u6ce2\u6d6a\u53f7 ~ \u6765\u5b9e\u73b0\u8fd1\u4f3c\u5339\u914d\u3002\u60a8\u53ef\u4ee5\u6307\u5b9a\u4e00\u4e2a\u7f16\u8f91\u8ddd\u79bb\u6765\u9650\u5236\u5339\u914d\u7684\u76f8\u4f3c\u5ea6\u7a0b\u5ea6\u3002

          term~\n

          \u5728\u4e0a\u8ff0\u793a\u4f8b\u4e2d\uff0c term \u662f\u8981\u8fdb\u884c\u6a21\u7cca\u5339\u914d\u7684\u5173\u952e\u5b57\u3002

          \u8bf7\u6ce8\u610f\u4ee5\u4e0b\u51e0\u70b9\uff1a

          • \u6ce2\u6d6a\u53f7\u00a0 ~ \u00a0\u540e\u9762\u53ef\u4ee5\u6307\u5b9a\u4e00\u4e2a\u53ef\u9009\u7684\u53c2\u6570\uff0c\u7528\u4e8e\u63a7\u5236\u6a21\u7cca\u67e5\u8be2\u7684\u76f8\u4f3c\u5ea6\u3002
          • \u53c2\u6570\u503c\u8303\u56f4\u4e3a 0 \u5230 2 \u4e4b\u95f4\uff0c\u5176\u4e2d 0 \u8868\u793a\u5b8c\u5168\u5339\u914d\uff0c1 \u8868\u793a\u4e00\u6b21\u7f16\u8f91\u64cd\u4f5c\uff08\u5982\u589e\u52a0\u3001\u5220\u9664\u6216\u66ff\u6362\u5b57\u7b26\uff09\u5185\u53ef\u4ee5\u5339\u914d\uff0c2 \u8868\u793a\u4e24\u6b21\u7f16\u8f91\u64cd\u4f5c\u5185\u53ef\u4ee5\u5339\u914d\u3002
          • \u5982\u679c\u4e0d\u6307\u5b9a\u53c2\u6570\u503c\uff0c\u9ed8\u8ba4\u4f7f\u7528 0.5 \u4f5c\u4e3a\u76f8\u4f3c\u5ea6\u9608\u503c\u3002
          • \u6a21\u7cca\u67e5\u8be2\u5c06\u8fd4\u56de\u4e0e\u7ed9\u5b9a\u5173\u952e\u5b57\u76f8\u4f3c\u7684\u6587\u6863\uff0c\u4f46\u4f1a\u6709\u4e00\u5b9a\u7684\u6027\u80fd\u5f00\u9500\uff0c\u7279\u522b\u662f\u5bf9\u4e8e\u8f83\u5927\u7684\u7d22\u5f15\u3002
          "},{"location":"admin/insight/reference/lucene.html#_4","title":"\u901a\u914d\u7b26","text":"

          Lucene \u652f\u6301\u4ee5\u4e0b\u4e24\u79cd\u901a\u914d\u7b26\u67e5\u8be2\uff1a

          1. * \u901a\u914d\u7b26\uff1a * \u7528\u4e8e\u5339\u914d\u96f6\u4e2a\u6216\u591a\u4e2a\u5b57\u7b26\u3002

            \u4f8b\u5982\uff0c te*t \u00a0\u53ef\u4ee5\u5339\u914d \"test\"\u3001\"text\"\u3001\"tempest\" \u7b49\u3002

          2. ? \u901a\u914d\u7b26\uff1a ? \u7528\u4e8e\u5339\u914d\u5355\u4e2a\u5b57\u7b26\u3002

            \u4f8b\u5982\uff0c te?t \u00a0\u53ef\u4ee5\u5339\u914d \"test\"\u3001\"text\" \u7b49\u3002

          "},{"location":"admin/insight/reference/lucene.html#_5","title":"\u4e3e\u4f8b\u8bf4\u660e","text":"
          te?t\n

          \u5728\u4e0a\u8ff0\u793a\u4f8b\u4e2d\uff0c te?t \u8868\u793a\u5339\u914d\u4ee5 \"te\" \u5f00\u5934\uff0c\u63a5\u7740\u662f\u4e00\u4e2a\u4efb\u610f\u5b57\u7b26\uff0c\u7136\u540e\u4ee5 \"t\" \u7ed3\u5c3e\u7684\u8bcd\u3002\u8fd9\u79cd\u67e5\u8be2\u53ef\u4ee5\u5339\u914d\u4f8b\u5982 \"test\"\u3001\"text\"\u3001\"tent\" \u7b49\u3002

          \u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u95ee\u53f7\u53ea\u80fd\u4ee3\u8868\u4e00\u4e2a\u5b57\u7b26\uff0c\u5982\u679c\u60f3\u8981\u5339\u914d\u591a\u4e2a\u5b57\u7b26\u6216\u8005\u662f\u53ef\u53d8\u957f\u5ea6\u7684\u5b57\u7b26\uff0c\u53ef\u4ee5\u4f7f\u7528\u661f\u53f7 * \u8fdb\u884c\u591a\u5b57\u7b26\u901a\u914d\u7b26\u5339\u914d\u3002 \u53e6\u5916\uff0c\u95ee\u53f7\u4e0d\u4f1a\u5339\u914d\u7a7a\u5b57\u7b26\u4e32\u3002

          \u603b\u7ed3\u4e00\u4e0b\uff0cLucene \u8bed\u6cd5\u4e2d\u7684\u95ee\u53f7 ? \u7528\u4f5c\u5355\u5b57\u7b26\u901a\u914d\u7b26\uff0c\u8868\u793a\u5339\u914d\u4efb\u610f\u4e00\u4e2a\u5b57\u7b26\u3002\u901a\u8fc7\u5728\u641c\u7d22\u5173\u952e\u5b57\u4e2d\u4f7f\u7528\u95ee\u53f7\uff0c\u60a8\u53ef\u4ee5\u8fdb\u884c\u66f4\u7075\u6d3b\u548c\u5177\u4f53\u7684\u6a21\u5f0f\u5339\u914d\u3002

          "},{"location":"admin/insight/reference/lucene.html#_6","title":"\u8303\u56f4\u67e5\u8be2","text":"

          Lucene \u8bed\u6cd5\u652f\u6301\u8303\u56f4\u67e5\u8be2\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528\u65b9\u62ec\u53f7 [ ] \u6216\u82b1\u62ec\u53f7 { } \u6765\u8868\u793a\u8303\u56f4\u3002\u4ee5\u4e0b\u662f\u8303\u56f4\u67e5\u8be2\u7684\u793a\u4f8b\uff1a

          1. \u5305\u542b\u8fb9\u754c\u7684\u8303\u56f4\u67e5\u8be2\uff1a

            • \u65b9\u62ec\u53f7\u00a0 [ ] \u00a0\u8868\u793a\u95ed\u533a\u95f4\uff0c\u5305\u542b\u8fb9\u754c\u503c\u3002
            • \u4f8b\u5982\uff1a field:[value1 TO value2] \u00a0\u8868\u793a\u00a0 field \u00a0\u7684\u53d6\u503c\u8303\u56f4\u4ece\u00a0 value1 \u00a0\u5230\u00a0 value2 \uff08\u5305\u542b\u4e24\u8005\uff09\u3002
          2. \u6392\u9664\u8fb9\u754c\u7684\u8303\u56f4\u67e5\u8be2\uff1a

            • \u82b1\u62ec\u53f7\u00a0 { } \u00a0\u8868\u793a\u5f00\u533a\u95f4\uff0c\u6392\u9664\u8fb9\u754c\u503c\u3002
            • \u4f8b\u5982\uff1a field:{value1 TO value2} \u00a0\u8868\u793a\u00a0 field \u00a0\u7684\u53d6\u503c\u8303\u56f4\u5728\u00a0 value1 \u00a0\u548c\u00a0 value2 \u00a0\u4e4b\u95f4\uff08\u4e0d\u5305\u542b\u4e24\u8005\uff09\u3002
          3. \u7701\u7565\u8fb9\u754c\u7684\u8303\u56f4\u67e5\u8be2\uff1a

            • \u53ef\u4ee5\u7701\u7565\u4e00\u4e2a\u6216\u4e24\u4e2a\u8fb9\u754c\u503c\u6765\u6307\u5b9a\u65e0\u9650\u8303\u56f4\u3002
            • \u4f8b\u5982\uff1a field:[value TO ] \u00a0\u8868\u793a\u00a0 field \u00a0\u7684\u53d6\u503c\u8303\u56f4\u4ece\u00a0 value \u00a0\u5230\u6b63\u65e0\u7a77\uff0c field:[ TO value] \u00a0\u8868\u793a\u00a0 field \u00a0\u7684\u53d6\u503c\u8303\u56f4\u4ece\u8d1f\u65e0\u7a77\u5230\u00a0 value \u3002

            Note

            \u8bf7\u6ce8\u610f\uff0c\u8303\u56f4\u67e5\u8be2\u53ea\u9002\u7528\u4e8e\u80fd\u591f\u8fdb\u884c\u6392\u5e8f\u7684\u5b57\u6bb5\u7c7b\u578b\uff0c\u5982\u6570\u5b57\u3001\u65e5\u671f\u7b49\u3002\u540c\u65f6\uff0c\u786e\u4fdd\u5728\u67e5\u8be2\u65f6\u5c06\u8fb9\u754c\u503c\u6b63\u786e\u5730\u6307\u5b9a\u4e3a\u5b57\u6bb5\u7684\u5b9e\u9645\u503c\u7c7b\u578b\u3002 \u5982\u679c\u60a8\u5e0c\u671b\u5728\u6574\u4e2a\u7d22\u5f15\u4e2d\u8fdb\u884c\u8303\u56f4\u67e5\u8be2\u800c\u4e0d\u6307\u5b9a\u7279\u5b9a\u5b57\u6bb5\uff0c\u53ef\u4ee5\u4f7f\u7528\u901a\u914d\u7b26\u67e5\u8be2 * \u6765\u4ee3\u66ff\u5b57\u6bb5\u540d\u3002

          "},{"location":"admin/insight/reference/lucene.html#_7","title":"\u4e3e\u4f8b\u8bf4\u660e","text":"
          1. \u6307\u5b9a\u5b57\u6bb5

            timestamp:[2022-01-01 TO 2022-01-31]\n

            \u8fd9\u5c06\u68c0\u7d22 timestamp \u5b57\u6bb5\u5728 2022 \u5e74 1 \u6708 1 \u65e5\u5230 2022 \u5e74 1 \u6708 31 \u65e5\u4e4b\u95f4\u7684\u6570\u636e\u3002

          2. \u4e0d\u6307\u5b9a\u5b57\u6bb5

            *:[value1 TO value2]\n

            \u8fd9\u5c06\u5728\u6574\u4e2a\u7d22\u5f15\u4e2d\u641c\u7d22\u53d6\u503c\u8303\u56f4\u4ece value1 \u5230 value2 \u7684\u6587\u6863\u3002

          "},{"location":"admin/insight/reference/lucene.html#insight","title":"Insight \u5e38\u7528\u5173\u952e\u5b57","text":""},{"location":"admin/insight/reference/lucene.html#_8","title":"\u5bb9\u5668\u65e5\u5fd7","text":"
          • kubernetes.container_image: \u5bb9\u5668\u955c\u50cf\u540d\u79f0
          • kubernetes.container_name: \u5bb9\u5668\u540d\u79f0
          • kubernetes.namespace_name: \u547d\u540d\u7a7a\u95f4\u540d\u79f0
          • kubernetes.pod_name: Pod \u540d\u79f0
          • log: \u65e5\u5fd7\u5185\u5bb9
          • time: \u65e5\u5fd7\u65f6\u95f4\u6233
          "},{"location":"admin/insight/reference/lucene.html#_9","title":"\u4e3b\u673a\u65e5\u5fd7","text":"
          • syslog.file: \u65e5\u5fd7\u6587\u4ef6\u8def\u5f84
          • syslog.host: \u4e3b\u673a\u540d\u79f0
          • log: \u65e5\u5fd7\u5185\u5bb9

          \u5982\u679c\u4f60\u60f3\u8981\u7cbe\u786e\u5339\u914d\u67d0\u4e2a\u7279\u5b9a\u7684\u503c\uff0c\u53ef\u4ee5\u5728\u5173\u952e\u5b57\u540e\u52a0\u5165 .keyword \u540e\u7f00\uff0c\u4f8b\u5982 kubernetes.container_name.keyword\u3002

          "},{"location":"admin/insight/reference/lucene.html#_10","title":"\u793a\u4f8b","text":"
          1. \u67e5\u8be2\u6307\u5b9a Pod \u4e2d\u6307\u5b9a\u5bb9\u5668\u7684\u65e5\u5fd7

            kubernetes.pod_name.keyword:nginx-pod AND kubernetes.container_name.keyword:nginx\n
            2. \u67e5\u8be2 Pod \u540d\u79f0\u4e2d\u5305\u542b nginx-pod \u7684\u5bb9\u5668\u65e5\u5fd7

            kubernetes.pod_name:nginx-pod\n
          "},{"location":"admin/insight/reference/notify-helper.html","title":"\u901a\u77e5\u6a21\u677f\u4f7f\u7528\u8bf4\u660e","text":""},{"location":"admin/insight/reference/notify-helper.html#go-template","title":"\u6a21\u677f\u8bed\u6cd5\uff08Go Template\uff09\u8bf4\u660e","text":"

          \u544a\u8b66\u901a\u77e5\u6a21\u677f\u91c7\u7528\u4e86 Go Template \u8bed\u6cd5\u6765\u6e32\u67d3\u6a21\u677f\u3002

          \u6a21\u677f\u4f1a\u57fa\u4e8e\u4e0b\u9762\u7684\u6570\u636e\u8fdb\u884c\u6e32\u67d3\u3002

          {\n    \"status\": \"firing\",\n    \"labels\": {\n        \"alertgroup\": \"test-group\",           // \u544a\u8b66\u7b56\u7565\u540d\u79f0\n        \"alertname\": \"test-rule\",          // \u544a\u8b66\u89c4\u5219\u540d\u79f0\n        \"cluster\": \"35b54a48-b66c-467b-a8dc-503c40826330\",\n        \"customlabel1\": \"v1\",\n        \"customlabel2\": \"v2\",\n        \"endpoint\": \"https\",\n        \"group_id\": \"01gypg06fcdf7rmqc4ksv97646\",\n        \"instance\": \"10.6.152.85:6443\",\n        \"job\": \"apiserver\",\n        \"namespace\": \"default\",\n        \"prometheus\": \"insight-system/insight-agent-kube-prometh-prometheus\",\n        \"prometheus_replica\": \"prometheus-insight-agent-kube-prometh-prometheus-0\",\n        \"rule_id\": \"01gypg06fcyn2g9zyehbrvcdfn\",\n        \"service\": \"kubernetes\",\n        \"severity\": \"critical\",\n        \"target\": \"35b54a48-b66c-467b-a8dc-503c40826330\",\n        \"target_type\": \"cluster\"\n   },\n    \"annotations\": {\n        \"customanno1\": \"v1\",\n        \"customanno2\": \"v2\",\n        \"description\": \"\u8fd9\u662f\u4e00\u6761\u6d4b\u8bd5\u89c4\u5219\uff0c10.6.152.85:6443 down\",\n        \"value\": \"1\"\n    },\n    \"startsAt\": \"2023-04-20T07:53:54.637363473Z\",\n    \"endsAt\": \"0001-01-01T00:00:00Z\",\n    \"generatorURL\": \"http://vmalert-insight-victoria-metrics-k8s-stack-df987997b-npsl9:8080/vmalert/alert?group_id=16797738747470868115&alert_id=10071735367745833597\",\n    \"fingerprint\": \"25c8d93d5bf58ac4\"\n}\n
          "},{"location":"admin/insight/reference/notify-helper.html#_2","title":"\u4f7f\u7528\u8bf4\u660e","text":"
          1. . \u5b57\u7b26

            \u5728\u5f53\u524d\u4f5c\u7528\u57df\u4e0b\u6e32\u67d3\u6307\u5b9a\u5bf9\u8c61\u3002

            \u793a\u4f8b 1: \u53d6\u9876\u7ea7\u4f5c\u7528\u57df\u4e0b\u7684\u6240\u6709\u5185\u5bb9\uff0c\u5373\u793a\u4f8b\u4ee3\u7801\u4e2d\u4e0a\u4e0b\u6587\u6570\u636e\u7684\u5168\u90e8\u5185\u5bb9\u3002

            {{ . }}\n
          2. \u5224\u65ad\u8bed\u53e5 if / else

            \u4f7f\u7528 if \u68c0\u67e5\u6570\u636e\uff0c\u5982\u679c\u4e0d\u6ee1\u8db3\u53ef\u4ee5\u6267\u884c else\u3002

            {{if .Labels.namespace }}\u547d\u540d\u7a7a\u95f4\uff1a{{ .Labels.namespace }} \\n{{ end }}\n
          3. \u5faa\u73af\u51fd\u6570 for

            for \u51fd\u6570\u7528\u4e8e\u91cd\u590d\u6267\u884c\u4ee3\u7801\u5185\u5bb9\u3002

            \u793a\u4f8b 1: \u904d\u5386 labels \u5217\u8868\uff0c\u83b7\u53d6\u544a\u8b66\u7684\u6240\u6709 label \u5185\u5bb9\u3002

            {{ for .Labels}} \\n {{end}}\n
          "},{"location":"admin/insight/reference/notify-helper.html#functions","title":"\u51fd\u6570\u8bf4\u660e FUNCTIONS","text":"

          Insight \u7684\u201d\u901a\u77e5\u6a21\u677f\u201c\u548c\u201d\u77ed\u4fe1\u6a21\u677f\u201c\u652f\u6301 70 \u591a\u4e2a sprig \u51fd\u6570\uff0c\u4ee5\u53ca\u81ea\u7814\u7684\u51fd\u6570\u3002

          "},{"location":"admin/insight/reference/notify-helper.html#sprig","title":"Sprig \u51fd\u6570","text":"

          Sprig \u5185\u7f6e\u4e86 70 \u591a\u79cd\u5e38\u89c1\u7684\u6a21\u677f\u51fd\u6570\u5e2e\u52a9\u6e32\u67d3\u6570\u636e\u3002\u4ee5\u4e0b\u5217\u4e3e\u5e38\u89c1\u51fd\u6570\uff1a

          • \u65f6\u95f4\u64cd\u4f5c
          • \u5b57\u7b26\u4e32\u64cd\u4f5c
          • \u7c7b\u578b\u8f6c\u6362\u64cd\u4f5c
          • \u6574\u6570\u7684\u6570\u5b66\u8ba1\u7b97

          \u66f4\u591a\u7ec6\u8282\u53ef\u4ee5\u67e5\u770b\u5b98\u65b9\u6587\u6863\u3002

          "},{"location":"admin/insight/reference/notify-helper.html#_3","title":"\u81ea\u7814\u51fd\u6570","text":""},{"location":"admin/insight/reference/notify-helper.html#toclustername","title":"toClusterName","text":"

          toClusterName \u51fd\u6570\u6839\u636e\u201c\u96c6\u7fa4\u552f\u4e00\u6807\u793a Id\u201d\u67e5\u8be2\u201c\u96c6\u7fa4\u540d\u201d\uff1b\u5982\u679c\u67e5\u8be2\u4e0d\u5230\u5bf9\u5e94\u7684\u96c6\u7fa4\uff0c\u5c06\u76f4\u63a5\u8fd4\u56de\u4f20\u5165\u7684\u96c6\u7fa4\u7684\u552f\u4e00\u6807\u793a\u3002

          func toClusterName(id string) (string, error)\n

          \u793a\u4f8b\uff1a

          {{ toClusterName \"clusterId\" }}\n{{ \"clusterId\" | toClusterName }}\n
          "},{"location":"admin/insight/reference/notify-helper.html#toclusterid","title":"toClusterId","text":"

          toClusterId \u51fd\u6570\u6839\u636e\u201c\u96c6\u7fa4\u540d\u201d\u67e5\u8be2\u201c\u96c6\u7fa4\u552f\u4e00\u6807\u793a Id\u201d\uff1b\u5982\u679c\u67e5\u8be2\u4e0d\u5230\u5bf9\u5e94\u7684\u96c6\u7fa4\uff0c\u5c06\u76f4\u63a5\u8fd4\u56de\u4f20\u5165\u7684\u96c6\u7fa4\u540d\u3002

          func toClusterId(name string) (string, error)\n

          \u793a\u4f8b\uff1a

          {{ toClusterId \"clusterName\" }}\n{{ \"clusterName\" | toClusterId }}\n
          "},{"location":"admin/insight/reference/notify-helper.html#todateinzone","title":"toDateInZone","text":"

          toDateInZone \u6839\u636e\u5b57\u7b26\u4e32\u65f6\u95f4\u8f6c\u6362\u6210\u6240\u9700\u7684\u65f6\u95f4\uff0c\u5e76\u8fdb\u884c\u683c\u5f0f\u5316\u3002

          func toDateInZone(fmt string, date interface{}, zone string) string\n

          \u793a\u4f8b 1\uff1a

          {{ toDateInZone \"2006-01-02T15:04:05\" \"2022-08-15T05:59:08.064449533Z\" \"Asia/Shanghai\" }}\n

          \u5c06\u83b7\u5f97\u8fd4\u56de\u503c 2022-08-15T13:59:08 \u3002\u6b64\u5916\uff0c\u4e5f\u53ef\u4ee5\u901a\u8fc7 sprig \u5185\u7f6e\u7684\u51fd\u6570\u8fbe\u5230 toDateInZone \u7684\u6548\u679c\uff1a

          {{ dateInZone \"2006-01-02T15:04:05\" (toDate \"2006-01-02T15:04:05Z07:00\" .StartsAt) \"Asia/Shanghai\" }}\n

          \u793a\u4f8b 2\uff1a

          {{ toDateInZone \"2006-01-02T15:04:05\" .StartsAt \"Asia/Shanghai\" }}\n\n## \u9608\u503c\u6a21\u677f\u8bf4\u660e\n\nInsight \u5185\u7f6e Webhook \u544a\u8b66\u6a21\u677f\u5982\u4e0b\uff0c\u5176\u4ed6\u5982\u90ae\u4ef6\u3001\u4f01\u4e1a\u5fae\u4fe1\u7b49\u5185\u5bb9\u76f8\u540c\uff0c\u53ea\u662f\u5bf9\u6362\u884c\u8fdb\u884c\u76f8\u5e94\u8c03\u6574\u3002\n\n```text\n\u89c4\u5219\u540d\u79f0\uff1a{{ .Labels.alertname }} \\n\n\u7b56\u7565\u540d\u79f0\uff1a{{ .Labels.alertgroup }} \\n\n\u544a\u8b66\u7ea7\u522b\uff1a{{ .Labels.severity }} \\n\n\u96c6\u7fa4\uff1a{{ .Labels.cluster }} \\n\n{{if .Labels.namespace }}\u547d\u540d\u7a7a\u95f4\uff1a{{ .Labels.namespace }} \\n{{ end }}\n{{if .Labels.node }}\u8282\u70b9\uff1a{{ .Labels.node }} \\n{{ end }}\n\u8d44\u6e90\u7c7b\u578b\uff1a{{ .Labels.target_type }} \\n\n{{if .Labels.target }}\u8d44\u6e90\u540d\u79f0\uff1a{{ .Labels.target }} \\n{{ end }}\n\u89e6\u53d1\u503c\uff1a{{ .Annotations.value }} \\n\n\u53d1\u751f\u65f6\u95f4\uff1a{{ .StartsAt }} \\n\n{{if ne \"0001-01-01T00:00:00Z\" .EndsAt }}\u7ed3\u675f\u65f6\u95f4\uff1a{{ .EndsAt }} \\n{{ end }}\n\u63cf\u8ff0\uff1a{{ .Annotations.description }} \\n\n
          "},{"location":"admin/insight/reference/notify-helper.html#_4","title":"\u90ae\u7bb1\u4e3b\u9898\u53c2\u6570","text":"

          \u7531\u4e8e Insight \u5728\u53d1\u9001\u544a\u8b66\u6d88\u606f\u65f6\uff0c\u4f1a\u5bf9\u540c\u4e00\u65f6\u95f4\u540c\u4e00\u6761\u89c4\u5219\u4ea7\u751f\u7684\u6d88\u606f\u8fdb\u884c\u5408\u5e76\u53d1\u9001\uff0c \u6240\u4ee5 email \u4e3b\u9898\u4e0d\u540c\u4e8e\u4e0a\u9762\u56db\u79cd\u6a21\u677f\uff0c\u53ea\u4f1a\u4f7f\u7528\u544a\u8b66\u6d88\u606f\u4e2d\u7684 commonLabels \u5185\u5bb9\u5bf9\u6a21\u677f\u8fdb\u884c\u6e32\u67d3\u3002\u9ed8\u8ba4\u6a21\u677f\u5982\u4e0b:

          [{{ .status }}] [{{ .severity }}] \u544a\u8b66\uff1a{{ .alertname }}\n

          \u5176\u4ed6\u53ef\u4f5c\u4e3a\u90ae\u7bb1\u4e3b\u9898\u7684\u5b57\u6bb5\u5982\u4e0b:

          {{ .status }} \u544a\u8b66\u6d88\u606f\u7684\u89e6\u53d1\u72b6\u6001\n{{ .alertgroup }} \u544a\u8b66\u6240\u5c5e\u7684\u7b56\u7565\u540d\u79f0\n{{ .alertname }} \u544a\u8b66\u6240\u5c5e\u7684\u89c4\u5219\u540d\u79f0\n{{ .severity }} \u544a\u8b66\u7ea7\u522b\n{{ .target_type }} \u544a\u8b66\u8d44\u6e90\u7c7b\u578b\n{{ .target }} \u544a\u8b66\u8d44\u6e90\u5bf9\u8c61\n{{ .\u89c4\u5219\u5176\u4ed6\u81ea\u5b9a\u4e49 label key }}\n
          "},{"location":"admin/insight/reference/tailing-sidecar.html","title":"\u901a\u8fc7 Sidecar \u91c7\u96c6\u5bb9\u5668\u65e5\u5fd7","text":"

          Tailing Sidecar \u662f\u4e00\u4e2a\u6d41\u5f0f Sidecar \u5bb9\u5668\uff0c \u662f Kubernetes \u96c6\u7fa4\u7ea7\u7684\u65e5\u5fd7\u4ee3\u7406\u3002Tailing Sidercar \u53ef\u4ee5\u5728\u5bb9\u5668\u65e0\u6cd5\u5199\u5165\u6807\u51c6\u8f93\u51fa\u6216\u6807\u51c6\u9519\u8bef\u6d41\u65f6\uff0c\u65e0\u9700\u66f4\u6539\uff0c\u5373\u53ef\u81ea\u52a8\u6536\u53d6\u548c\u6c47\u603b\u5bb9\u5668\u5185\u65e5\u5fd7\u6587\u4ef6\u3002

          Insight \u652f\u6301\u901a\u8fc7 Sidercar \u6a21\u5f0f\u91c7\u96c6\u65e5\u5fd7\uff0c\u5373\u5728\u6bcf\u4e2a Pod \u4e2d\u8fd0\u884c\u4e00\u4e2a Sidecar \u5bb9\u5668\u5c06\u65e5\u5fd7\u6570\u636e\u8f93\u51fa\u5230\u6807\u51c6\u8f93\u51fa\u6d41\uff0c\u4ee5\u4fbf FluentBit \u6536\u96c6\u5bb9\u5668\u65e5\u5fd7\u3002

          Insight Agent \u4e2d\u9ed8\u8ba4\u5b89\u88c5\u4e86 tailing-sidecar operator \u3002 \u82e5\u60a8\u60f3\u5f00\u542f\u91c7\u96c6\u5bb9\u5668\u5185\u6587\u4ef6\u65e5\u5fd7\uff0c\u8bf7\u901a\u8fc7\u7ed9 Pod \u6dfb\u52a0\u6ce8\u89e3\u8fdb\u884c\u6807\u8bb0\uff0c tailing-sidecar operator \u5c06\u81ea\u52a8\u6ce8\u5165 Tailing Sidecar \u5bb9\u5668\uff0c \u88ab\u6ce8\u5165\u7684 Sidecar \u5bb9\u5668\u8bfb\u53d6\u4e1a\u52a1\u5bb9\u5668\u5185\u7684\u6587\u4ef6\uff0c\u5e76\u8f93\u51fa\u5230\u6807\u51c6\u8f93\u51fa\u6d41\u3002

          \u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\uff1a

          1. \u4fee\u6539 Pod \u7684 YAML \u6587\u4ef6\uff0c\u5728 annotation \u5b57\u6bb5\u589e\u52a0\u5982\u4e0b\u53c2\u6570\uff1a

            metadata:\n  annotations:\n    tailing-sidecar:  <sidecar-name-0>:<volume-name-0>:<path-to-tail-0>;<sidecar-name-1>:<volume-name-1>:<path-to-tail-1>\n

            \u5b57\u6bb5\u8bf4\u660e\uff1a

            • sidecar-name-0 \uff1atailing sidecar \u5bb9\u5668\u540d\u79f0\uff08\u53ef\u9009\uff0c\u5982\u679c\u672a\u6307\u5b9a\u5bb9\u5668\u540d\u79f0\u5c06\u81ea\u52a8\u521b\u5efa\uff0c\u5b83\u5c06\u4ee5\u201ctailing-sidecar\u201d\u524d\u7f00\u5f00\u5934\uff09
            • volume-name-0 \uff1a\u5b58\u50a8\u5377\u540d\u79f0\uff1b
            • path-to-tail-0 \uff1a\u65e5\u5fd7\u7684\u6587\u4ef6\u8def\u5f84

            Note

            \u6bcf\u4e2a Pod \u53ef\u8fd0\u884c\u591a\u4e2a Sidecar \u5bb9\u5668\uff0c\u53ef\u4ee5\u901a\u8fc7 ; \u9694\u79bb\uff0c\u5b9e\u73b0\u4e0d\u540c Sidecar \u5bb9\u5668\u91c7\u96c6\u591a\u4e2a\u6587\u4ef6\u5230\u591a\u4e2a\u5b58\u50a8\u5377\u3002

          2. \u91cd\u542f Pod\uff0c\u5f85 Pod \u72b6\u6001\u53d8\u6210 \u8fd0\u884c\u4e2d \u540e\uff0c\u5219\u53ef\u901a\u8fc7 \u65e5\u5fd7\u67e5\u8be2 \u754c\u9762\uff0c\u67e5\u627e\u8be5 Pod \u7684\u5bb9\u5668\u5185\u65e5\u5fd7\u3002

          "},{"location":"admin/insight/reference/used-metric-in-insight.html","title":"Insight \u53c2\u8003\u6307\u6807\u8bf4\u660e","text":"

          \u672c\u6587\u4e2d\u7684\u6307\u6807\u662f\u57fa\u4e8e\u793e\u533a\u7684 kube-prometheus \u7684\u57fa\u7840\u4e4b\u4e0a\u6574\u7406\u800c\u6210\u3002 \u76ee\u524d\u6db5\u76d6\u4e86 Cluster\u3001Node\u3001Namespace\u3001Workload \u7b49\u591a\u4e2a\u5c42\u9762\u7684\u6307\u6807\u3002 \u672c\u6587\u679a\u4e3e\u4e86\u4e00\u4e9b\u5e38\u7528\u7684\u6307\u6807\u540d\u3001\u4e2d\u6587\u63cf\u8ff0\u548c\u5355\u4f4d\uff0c\u4ee5\u4fbf\u7d22\u5f15\u3002

          "},{"location":"admin/insight/reference/used-metric-in-insight.html#cluster","title":"\u96c6\u7fa4\uff08Cluster\uff09","text":"\u6307\u6807\u540d \u4e2d\u6587\u63cf\u8ff0 \u5355\u4f4d cluster_cpu_utilization \u96c6\u7fa4 CPU \u4f7f\u7528\u7387 cluster_cpu_total \u96c6\u7fa4 CPU \u603b\u91cf Core cluster_cpu_usage \u96c6\u7fa4 CPU \u7528\u91cf Core cluster_cpu_requests_commitment \u96c6\u7fa4 CPU \u5206\u914d\u7387 cluster_memory_utilization \u96c6\u7fa4\u5185\u5b58\u4f7f\u7528\u7387 cluster_memory_usage \u96c6\u7fa4\u5185\u5b58\u4f7f\u7528\u91cf Byte cluster_memory_available \u96c6\u7fa4\u53ef\u7528\u5185\u5b58 Byte cluster_memory_requests_commitment \u96c6\u7fa4\u5185\u5b58\u5206\u914d\u7387 cluster_memory_total \u96c6\u7fa4\u5185\u5b58\u53ef\u7528\u91cf Byte cluster_net_utilization \u96c6\u7fa4\u7f51\u7edc\u6570\u636e\u4f20\u8f93\u901f\u7387 Byte/s cluster_net_bytes_transmitted \u96c6\u7fa4\u7f51\u7edc\u6570\u636e\u53d1\u9001 (\u4e0a\u884c) \u901f\u7387 Byte/s cluster_net_bytes_received \u96c6\u7fa4\u7f51\u7edc\u6570\u636e\u63a5\u53d7 (\u4e0b\u884c) \u901f\u7387 Byte/s cluster_disk_read_iops \u96c6\u7fa4\u78c1\u76d8\u6bcf\u79d2\u8bfb\u6b21\u6570 \u6b21/s cluster_disk_write_iops \u96c6\u7fa4\u78c1\u76d8\u6bcf\u79d2\u5199\u6b21\u6570 \u6b21/s cluster_disk_read_throughput \u96c6\u7fa4\u78c1\u76d8\u6bcf\u79d2\u8bfb\u53d6\u6570\u636e\u91cf Byte/s cluster_disk_write_throughput \u96c6\u7fa4\u78c1\u76d8\u6bcf\u79d2\u5199\u5165\u6570\u636e\u91cf Byte/s cluster_disk_size_capacity \u96c6\u7fa4\u78c1\u76d8\u603b\u5bb9\u91cf Byte cluster_disk_size_available \u96c6\u7fa4\u78c1\u76d8\u53ef\u7528\u5927\u5c0f Byte cluster_disk_size_usage \u96c6\u7fa4\u78c1\u76d8\u4f7f\u7528\u91cf Byte cluster_disk_size_utilization \u96c6\u7fa4\u78c1\u76d8\u4f7f\u7528\u7387 cluster_node_total \u96c6\u7fa4\u8282\u70b9\u603b\u6570 \u4e2a cluster_node_online \u96c6\u7fa4\u8282\u70b9\u603b\u6570 \u4e2a cluster_node_offline_count \u96c6\u7fa4\u5931\u8054\u7684\u8282\u70b9\u4e2a\u6570 \u4e2a cluster_pod_count \u96c6\u7fa4 Pod \u603b\u6570 \u4e2a cluster_pod_running_count \u96c6\u7fa4\u6b63\u5e38\u8fd0\u884c Pod \u4e2a\u6570 \u4e2a cluster_pod_abnormal_count \u96c6\u7fa4\u5f02\u5e38\u8fd0\u884c Pod \u4e2a\u6570 \u4e2a cluster_deployment_count \u96c6\u7fa4 Deployment \u603b\u6570 \u4e2a cluster_deployment_normal_count \u96c6\u7fa4\u6b63\u5e38\u7684 Deployment \u603b\u6570 \u4e2a cluster_deployment_abnormal_count \u96c6\u7fa4\u5f02\u5e38\u7684 Deployment \u603b\u6570 \u4e2a cluster_statefulset_count \u96c6\u7fa4 StatefulSet \u4e2a\u6570 \u4e2a cluster_statefulset_normal_count \u96c6\u7fa4\u6b63\u5e38\u8fd0\u884c StatefulSet \u4e2a\u6570 \u4e2a cluster_statefulset_abnormal_count \u96c6\u7fa4\u5f02\u5e38\u8fd0\u884c StatefulSet \u4e2a\u6570 \u4e2a cluster_daemonset_count \u96c6\u7fa4 DaemonSet \u4e2a\u6570 \u4e2a cluster_daemonset_normal_count \u96c6\u7fa4\u6b63\u5e38\u8fd0\u884c DaemonSet \u4e2a\u6570 \u4e2a cluster_daemonset_abnormal_count \u96c6\u7fa4\u5f02\u5e38\u8fd0\u884c DaemonSet \u4e2a\u6570 \u4e2a cluster_job_count \u96c6\u7fa4 Job \u603b\u6570 \u4e2a cluster_job_normal_count \u96c6\u7fa4\u6b63\u5e38\u8fd0\u884c Job \u4e2a\u6570 \u4e2a cluster_job_abnormal_count \u96c6\u7fa4\u5f02\u5e38\u8fd0\u884c Job \u4e2a\u6570 \u4e2a

          Tip

          \u4f7f\u7528\u7387\u4e00\u822c\u662f\uff080,1] \u533a\u95f4\u7684\u6570\u5b57\uff08\u4f8b\u5982\uff1a0.21\uff0c\u800c\u4e0d\u662f 21%\uff09

          "},{"location":"admin/insight/reference/used-metric-in-insight.html#node","title":"\u8282\u70b9\uff08Node\uff09","text":"\u6307\u6807\u540d \u4e2d\u6587\u63cf\u8ff0 \u5355\u4f4d node_cpu_utilization \u8282\u70b9 CPU \u4f7f\u7528\u7387 node_cpu_total \u8282\u70b9 CPU \u603b\u91cf Core node_cpu_usage \u8282\u70b9 CPU \u7528\u91cf Core node_cpu_requests_commitment \u8282\u70b9 CPU \u5206\u914d\u7387 node_memory_utilization \u8282\u70b9\u5185\u5b58\u4f7f\u7528\u7387 node_memory_usage \u8282\u70b9\u5185\u5b58\u4f7f\u7528\u91cf Byte node_memory_requests_commitment \u8282\u70b9\u5185\u5b58\u5206\u914d\u7387 node_memory_available \u8282\u70b9\u53ef\u7528\u5185\u5b58 Byte node_memory_total \u8282\u70b9\u5185\u5b58\u53ef\u7528\u91cf Byte node_net_utilization \u8282\u70b9\u7f51\u7edc\u6570\u636e\u4f20\u8f93\u901f\u7387 Byte/s node_net_bytes_transmitted \u8282\u70b9\u7f51\u7edc\u6570\u636e\u53d1\u9001 (\u4e0a\u884c) \u901f\u7387 Byte/s node_net_bytes_received \u8282\u70b9\u7f51\u7edc\u6570\u636e\u63a5\u53d7 (\u4e0b\u884c) \u901f\u7387 Byte/s node_disk_read_iops \u8282\u70b9\u78c1\u76d8\u6bcf\u79d2\u8bfb\u6b21\u6570 \u6b21/s node_disk_write_iops \u8282\u70b9\u78c1\u76d8\u6bcf\u79d2\u5199\u6b21\u6570 \u6b21/s node_disk_read_throughput \u8282\u70b9\u78c1\u76d8\u6bcf\u79d2\u8bfb\u53d6\u6570\u636e\u91cf Byte/s node_disk_write_throughput \u8282\u70b9\u78c1\u76d8\u6bcf\u79d2\u5199\u5165\u6570\u636e\u91cf Byte/s node_disk_size_capacity \u8282\u70b9\u78c1\u76d8\u603b\u5bb9\u91cf Byte node_disk_size_available \u8282\u70b9\u78c1\u76d8\u53ef\u7528\u5927\u5c0f Byte node_disk_size_usage \u8282\u70b9\u78c1\u76d8\u4f7f\u7528\u91cf Byte node_disk_size_utilization \u8282\u70b9\u78c1\u76d8\u4f7f\u7528\u7387"},{"location":"admin/insight/reference/used-metric-in-insight.html#workload","title":"\u5de5\u4f5c\u8d1f\u8f7d\uff08Workload\uff09","text":"

          \u76ee\u524d\u652f\u6301\u7684\u5de5\u4f5c\u8d1f\u8f7d\u7c7b\u578b\u5305\u62ec\uff1aDeployment\u3001StatefulSet\u3001DaemonSet\u3001Job \u548c CronJob\u3002

          \u6307\u6807\u540d \u4e2d\u6587\u63cf\u8ff0 \u5355\u4f4d workload_cpu_usage \u5de5\u4f5c\u8d1f\u8f7d CPU \u7528\u91cf Core workload_cpu_limits \u5de5\u4f5c\u8d1f\u8f7d CPU \u9650\u5236\u91cf Core workload_cpu_requests \u5de5\u4f5c\u8d1f\u8f7d CPU \u8bf7\u6c42\u91cf Core workload_cpu_utilization \u5de5\u4f5c\u8d1f\u8f7d CPU \u4f7f\u7528\u7387 workload_memory_usage \u5de5\u4f5c\u8d1f\u8f7d\u5185\u5b58\u4f7f\u7528\u91cf Byte workload_memory_limits \u5de5\u4f5c\u8d1f\u8f7d \u5185\u5b58 \u9650\u5236\u91cf Byte workload_memory_requests \u5de5\u4f5c\u8d1f\u8f7d \u5185\u5b58 \u8bf7\u6c42\u91cf Byte workload_memory_utilization \u5de5\u4f5c\u8d1f\u8f7d\u5185\u5b58\u4f7f\u7528\u7387 workload_memory_usage_cached \u5de5\u4f5c\u8d1f\u8f7d\u5185\u5b58\u4f7f\u7528\u91cf\uff08\u5305\u542b\u7f13\u5b58\uff09 Byte workload_net_bytes_transmitted \u5de5\u4f5c\u8d1f\u8f7d\u7f51\u7edc\u6570\u636e\u53d1\u9001\u901f\u7387 Byte/s workload_net_bytes_received \u5de5\u4f5c\u8d1f\u8f7d\u7f51\u7edc\u6570\u636e\u63a5\u53d7\u901f\u7387 Byte/s workload_disk_read_throughput \u5de5\u4f5c\u8d1f\u8f7d\u78c1\u76d8\u6bcf\u79d2\u8bfb\u53d6\u6570\u636e\u91cf Byte/s workload_disk_write_throughput \u5de5\u4f5c\u8d1f\u8f7d\u78c1\u76d8\u6bcf\u79d2\u5199\u5165\u6570\u636e\u91cf Byte/s
          1. \u6b64\u5904\u8ba1\u7b97 workload \u603b\u91cf
          2. \u901a\u8fc7 workload_cpu_usage{workload_type=\"deployment\", workload=\"prometheus\"} \u7684\u65b9\u5f0f\u83b7\u53d6\u6307\u6807
          3. workload_pod_utilization \u8ba1\u7b97\u89c4\u5219\uff1a workload_pod_usage / workload_pod_request
          "},{"location":"admin/insight/reference/used-metric-in-insight.html#pod","title":"\u5bb9\u5668\u7ec4\uff08Pod\uff09","text":"\u6307\u6807\u540d \u4e2d\u6587\u63cf\u8ff0 \u5355\u4f4d pod_cpu_usage \u5bb9\u5668\u7ec4 CPU \u7528\u91cf Core pod_cpu_limits \u5bb9\u5668\u7ec4 CPU \u9650\u5236\u91cf Core pod_cpu_requests \u5bb9\u5668\u7ec4 CPU \u8bf7\u6c42\u91cf Core pod_cpu_utilization \u5bb9\u5668\u7ec4 CPU \u4f7f\u7528\u7387 pod_memory_usage \u5bb9\u5668\u7ec4\u5185\u5b58\u4f7f\u7528\u91cf Byte pod_memory_limits \u5bb9\u5668\u7ec4\u5185\u5b58\u9650\u5236\u91cf Byte pod_memory_requests \u5bb9\u5668\u7ec4\u5185\u5b58\u8bf7\u6c42\u91cf Byte pod_memory_utilization \u5bb9\u5668\u7ec4\u5185\u5b58\u4f7f\u7528\u7387 pod_memory_usage_cached \u5bb9\u5668\u7ec4\u5185\u5b58\u4f7f\u7528\u91cf\uff08\u5305\u542b\u7f13\u5b58\uff09 Byte pod_net_bytes_transmitted \u5bb9\u5668\u7ec4\u7f51\u7edc\u6570\u636e\u53d1\u9001\u901f\u7387 Byte/s pod_net_bytes_received \u5bb9\u5668\u7ec4\u7f51\u7edc\u6570\u636e\u63a5\u53d7\u901f\u7387 Byte/s pod_disk_read_throughput \u5bb9\u5668\u7ec4\u78c1\u76d8\u6bcf\u79d2\u8bfb\u53d6\u6570\u636e\u91cf Byte/s pod_disk_write_throughput \u5bb9\u5668\u7ec4\u78c1\u76d8\u6bcf\u79d2\u5199\u5165\u6570\u636e\u91cf Byte/s

          \u901a\u8fc7 pod_cpu_usage{workload_type=\"deployment\", workload=\"prometheus\"} \u83b7\u53d6\u540d\u4e3a prometheus \u7684 Deployment \u6240\u62e5\u6709\u7684\u6240\u6709 Pod \u7684 CPU \u4f7f\u7528\u7387\u3002

          "},{"location":"admin/insight/reference/used-metric-in-insight.html#span","title":"Span \u6307\u6807","text":"\u6307\u6807\u540d \u4e2d\u6587\u63cf\u8ff0 \u5355\u4f4d calls_total \u670d\u52a1\u8bf7\u6c42\u603b\u6570 duration_milliseconds_bucket \u670d\u52a1\u5ef6\u65f6\u76f4\u65b9\u56fe duration_milliseconds_sum \u670d\u52a1\u603b\u5ef6\u65f6 ms duration_milliseconds_count \u670d\u52a1\u5ef6\u65f6\u8bb0\u5f55\u6761\u6570 otelcol_processor_groupbytrace_spans_released \u91c7\u96c6\u5230\u7684 span \u6570 otelcol_processor_groupbytrace_traces_released \u91c7\u96c6\u5230\u7684 trace \u6570 traces_service_graph_request_total \u670d\u52a1\u8bf7\u6c42\u603b\u6570 (\u62d3\u6251\u529f\u80fd\u4f7f\u7528) traces_service_graph_request_server_seconds_sum \u670d\u52a1\u603b\u5ef6\u65f6 (\u62d3\u6251\u529f\u80fd\u4f7f\u7528) ms traces_service_graph_request_server_seconds_bucket \u670d\u52a1\u5ef6\u65f6\u76f4\u65b9\u56fe (\u62d3\u6251\u529f\u80fd\u4f7f\u7528) traces_service_graph_request_server_seconds_count \u670d\u52a1\u8bf7\u6c42\u603b\u6570 (\u62d3\u6251\u529f\u80fd\u4f7f\u7528)"},{"location":"admin/insight/system-config/modify-config.html","title":"\u4fee\u6539\u7cfb\u7edf\u914d\u7f6e","text":"

          \u53ef\u89c2\u6d4b\u6027\u4f1a\u9ed8\u8ba4\u6301\u4e45\u5316\u4fdd\u5b58\u6307\u6807\u3001\u65e5\u5fd7\u3001\u94fe\u8def\u7684\u6570\u636e\uff0c\u60a8\u53ef\u53c2\u9605\u672c\u6587\u4fee\u6539\u7cfb\u7edf\u914d\u7f6e\u3002\u8be5\u6587\u6863\u4ec5\u9002\u7528\u4e8e\u5185\u7f6e\u90e8\u7f72\u7684 Elasticsearch\uff0c\u82e5\u4f7f\u7528\u5916\u90e8 Elasticsearch \u53ef\u81ea\u884c\u8c03\u6574\u3002

          "},{"location":"admin/insight/system-config/modify-config.html#_2","title":"\u5982\u4f55\u4fee\u6539\u6307\u6807\u6570\u636e\u4fdd\u7559\u671f\u9650","text":"

          \u5148 ssh \u767b\u5f55\u5230\u5bf9\u5e94\u7684\u8282\u70b9\uff0c\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539\u6307\u6807\u6570\u636e\u4fdd\u7559\u671f\u9650\u3002

          1. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

            kubectl edit vmcluster insight-victoria-metrics-k8s-stack -n insight-system\n
          2. \u5728 Yaml \u6587\u4ef6\u4e2d\uff0c retentionPeriod \u7684\u9ed8\u8ba4\u503c\u4e3a 14 \uff0c\u5355\u4f4d\u4e3a \u5929 \u3002\u60a8\u53ef\u6839\u636e\u9700\u6c42\u4fee\u6539\u53c2\u6570\u3002

            apiVersion: operator.victoriametrics.com/v1beta1\nkind: VMCluster\nmetadata:\n  annotations:\n    meta.helm.sh/release-name: insight\n    meta.helm.sh/release-namespace: insight-system\n  creationTimestamp: \"2022-08-25T04:31:02Z\"\n  finalizers:\n  - apps.victoriametrics.com/finalizer\n  generation: 2\n  labels:\n    app.kubernetes.io/instance: insight\n    app.kubernetes.io/managed-by: Helm\n    app.kubernetes.io/name: victoria-metrics-k8s-stack\n    app.kubernetes.io/version: 1.77.2\n    helm.sh/chart: victoria-metrics-k8s-stack-0.9.3\n  name: insight-victoria-metrics-k8s-stack\n  namespace: insight-system\n  resourceVersion: \"123007381\"\n  uid: 55cee8d6-c651-404b-b2c9-50603b405b54\nspec:\n  replicationFactor: 1\n  retentionPeriod: \"14\"\n  vminsert:\n    extraArgs:\n      maxLabelsPerTimeseries: \"45\"\n    image:\n      repository: docker.m.daocloud.io/victoriametrics/vminsert\n      tag: v1.80.0-cluster\n      replicaCount: 1\n
          3. \u4fdd\u5b58\u4fee\u6539\u540e\uff0c\u8d1f\u8d23\u5b58\u50a8\u6307\u6807\u7684\u7ec4\u4ef6\u7684\u5bb9\u5668\u7ec4\u4f1a\u81ea\u52a8\u91cd\u542f\uff0c\u7a0d\u7b49\u7247\u523b\u5373\u53ef\u3002

          "},{"location":"admin/insight/system-config/modify-config.html#_3","title":"\u5982\u4f55\u4fee\u6539\u65e5\u5fd7\u6570\u636e\u5b58\u50a8\u65f6\u957f","text":"

          \u5148 ssh \u767b\u5f55\u5230\u5bf9\u5e94\u7684\u8282\u70b9\uff0c\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539\u65e5\u5fd7\u6570\u636e\u4fdd\u7559\u671f\u9650\uff1a

          "},{"location":"admin/insight/system-config/modify-config.html#json","title":"\u65b9\u6cd5\u4e00\uff1a\u4fee\u6539 Json \u6587\u4ef6","text":"
          1. \u4fee\u6539\u4ee5\u4e0b\u6587\u4ef6\u4e2d rollover \u5b57\u6bb5\u4e2d\u7684 max_age \u53c2\u6570\uff0c\u5e76\u8bbe\u7f6e\u4fdd\u7559\u671f\u9650\uff0c\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 7d \u3002\u6ce8\u610f\u9700\u8981\u4fee\u6539\u7b2c\u4e00\u884c\u4e2d\u7684 Elastic \u7528\u6237\u540d\u548c\u5bc6\u7801\u3001IP \u5730\u5740\u548c\u7d22\u5f15\u3002

            curl  --insecure --location -u\"elastic:amyVt4o826e322TUVi13Ezw6\" -X PUT \"https://172.30.47.112:30468/_ilm/policy/insight-es-k8s-logs-policy?pretty\" -H 'Content-Type: application/json' -d'\n{\n    \"policy\": {\n        \"phases\": {\n            \"hot\": {\n                \"min_age\": \"0ms\",\n                \"actions\": {\n                    \"set_priority\": {\n                        \"priority\": 100\n                    },\n                    \"rollover\": {\n                        \"max_age\": \"8d\",\n                        \"max_size\": \"10gb\"\n                    }\n                }\n            },\n            \"warm\": {\n                \"min_age\": \"10d\",\n                \"actions\": {\n                    \"forcemerge\": {\n                        \"max_num_segments\": 1\n                    }\n                }\n            },\n            \"delete\": {\n                \"min_age\": \"30d\",\n                \"actions\": {\n                    \"delete\": {}\n                }\n            }\n        }\n    }\n}'\n
          2. \u4fee\u6539\u5b8c\u540e\uff0c\u6267\u884c\u4ee5\u4e0a\u547d\u4ee4\u3002\u5b83\u4f1a\u6253\u5370\u51fa\u5982\u4e0b\u6240\u793a\u5185\u5bb9\uff0c\u5219\u4fee\u6539\u6210\u529f\u3002

            {\n\"acknowledged\" : true\n}\n
          "},{"location":"admin/insight/system-config/modify-config.html#ui","title":"\u65b9\u6cd5\u4e8c\uff1a\u4ece UI \u4fee\u6539","text":"
          1. \u767b\u5f55 kibana \uff0c\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f Stack Management \u3002

          2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a Index Lifecycle Polices \uff0c\u5e76\u627e\u5230\u7d22\u5f15 insight-es-k8s-logs-policy \uff0c\u70b9\u51fb\u8fdb\u5165\u8be6\u60c5\u3002

          3. \u5c55\u5f00 Hot phase \u914d\u7f6e\u9762\u677f\uff0c\u4fee\u6539 Maximum age \u53c2\u6570\uff0c\u5e76\u8bbe\u7f6e\u4fdd\u7559\u671f\u9650\uff0c\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 7d \u3002

          4. \u4fee\u6539\u5b8c\u540e\uff0c\u70b9\u51fb\u9875\u9762\u5e95\u90e8\u7684 Save policy \u5373\u4fee\u6539\u6210\u529f\u3002

          "},{"location":"admin/insight/system-config/modify-config.html#_4","title":"\u5982\u4f55\u4fee\u6539\u94fe\u8def\u6570\u636e\u5b58\u50a8\u65f6\u957f","text":"

          \u5148 ssh \u767b\u5f55\u5230\u5bf9\u5e94\u7684\u8282\u70b9\uff0c\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539\u94fe\u8def\u6570\u636e\u4fdd\u7559\u671f\u9650\uff1a

          "},{"location":"admin/insight/system-config/modify-config.html#json_1","title":"\u65b9\u6cd5\u4e00\uff1a\u4fee\u6539 Json \u6587\u4ef6","text":"
          1. \u4fee\u6539\u4ee5\u4e0b\u6587\u4ef6\u4e2d rollover \u5b57\u6bb5\u4e2d\u7684 max_age \u53c2\u6570\uff0c\u5e76\u8bbe\u7f6e\u4fdd\u7559\u671f\u9650\uff0c\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 7d \u3002\u6ce8\u610f\u9700\u8981\u4fee\u6539\u7b2c\u4e00\u884c\u4e2d\u7684 Elastic \u7528\u6237\u540d\u548c\u5bc6\u7801\u3001IP \u5730\u5740\u548c\u7d22\u5f15\u3002

            curl --insecure --location -u\"elastic:amyVt4o826e322TUVi13Ezw6\" -X PUT \"https://172.30.47.112:30468/_ilm/policy/jaeger-ilm-policy?pretty\" -H 'Content-Type: application/json' -d'\n{\n    \"policy\": {\n        \"phases\": {\n            \"hot\": {\n                \"min_age\": \"0ms\",\n                \"actions\": {\n                    \"set_priority\": {\n                        \"priority\": 100\n                    },\n                    \"rollover\": {\n                        \"max_age\": \"6d\",\n                        \"max_size\": \"10gb\"\n                    }\n                }\n            },\n            \"warm\": {\n                \"min_age\": \"10d\",\n                \"actions\": {\n                    \"forcemerge\": {\n                        \"max_num_segments\": 1\n                    }\n                }\n            },\n            \"delete\": {\n                \"min_age\": \"30d\",\n                \"actions\": {\n                    \"delete\": {}\n                }\n            }\n        }\n    }\n}'\n
          2. \u4fee\u6539\u5b8c\u540e\uff0c\u5728\u63a7\u5236\u53f0\u6267\u884c\u4ee5\u4e0a\u547d\u4ee4\u3002\u5b83\u4f1a\u6253\u5370\u51fa\u5982\u4e0b\u6240\u793a\u5185\u5bb9\uff0c\u5219\u4fee\u6539\u6210\u529f\u3002

            {\n\"acknowledged\" : true\n}\n
          "},{"location":"admin/insight/system-config/modify-config.html#ui_1","title":"\u65b9\u6cd5\u4e8c\uff1a\u4ece UI \u4fee\u6539","text":"
          1. \u767b\u5f55 kibana \uff0c\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f Stack Management \u3002

          2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a Index Lifecycle Polices \uff0c\u5e76\u627e\u5230\u7d22\u5f15 jaeger-ilm-policy \uff0c\u70b9\u51fb\u8fdb\u5165\u8be6\u60c5\u3002

          3. \u5c55\u5f00 Hot phase \u914d\u7f6e\u9762\u677f\uff0c\u4fee\u6539 Maximum age \u53c2\u6570\uff0c\u5e76\u8bbe\u7f6e\u4fdd\u7559\u671f\u9650\uff0c\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 7d \u3002

          4. \u4fee\u6539\u5b8c\u540e\uff0c\u70b9\u51fb\u9875\u9762\u5e95\u90e8\u7684 Save policy \u5373\u4fee\u6539\u6210\u529f\u3002

          "},{"location":"admin/insight/system-config/system-component.html","title":"\u7cfb\u7edf\u7ec4\u4ef6","text":"

          \u5728\u7cfb\u7edf\u7ec4\u4ef6\u9875\u9762\u53ef\u5feb\u901f\u7684\u67e5\u770b\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u4e2d\u7cfb\u7edf\u7ec4\u4ef6\u7684\u8fd0\u884c\u72b6\u6001\uff0c\u5f53\u7cfb\u7528\u7ec4\u4ef6\u53d1\u751f\u6545\u969c\u65f6\uff0c\u4f1a\u5bfc\u81f4\u53ef\u89c2\u6d4b\u6a21\u5757\u4e2d\u7684\u90e8\u5206\u529f\u80fd\u4e0d\u53ef\u7528\u3002

          1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\uff0c
          2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u7cfb\u7edf\u7ba1\u7406 -> \u7cfb\u7edf\u7ec4\u4ef6 \u3002

          "},{"location":"admin/insight/system-config/system-component.html#_2","title":"\u7ec4\u4ef6\u8bf4\u660e","text":"\u6a21\u5757 \u7ec4\u4ef6\u540d\u79f0 \u8bf4\u660e \u6307\u6807 vminsert-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u5c06\u5404\u96c6\u7fa4\u4e2d Prometheus \u91c7\u96c6\u5230\u7684\u6307\u6807\u6570\u636e\u5199\u5165\u5b58\u50a8\u7ec4\u4ef6\u3002\u8be5\u7ec4\u4ef6\u5f02\u5e38\u4f1a\u5bfc\u81f4\u65e0\u6cd5\u5199\u5165\u5de5\u4f5c\u96c6\u7fa4\u7684\u6307\u6807\u6570\u636e\u3002 \u6307\u6807 vmalert-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u751f\u6548 VM Rule \u4e2d\u914d\u7f6e\u7684 recording \u548c Alert \u89c4\u5219\uff0c\u5e76\u5c06\u89e6\u53d1\u7684\u544a\u8b66\u89c4\u5219\u53d1\u9001\u7ed9 alertmanager\u3002 \u6307\u6807 vmalertmanager-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u5728\u544a\u8b66\u89e6\u65f6\u53d1\u9001\u6d88\u606f\u3002\u8be5\u7ec4\u4ef6\u5f02\u5e38\u4f1a\u5bfc\u81f4\u65e0\u6cd5\u53d1\u9001\u544a\u8b66\u4fe1\u606f\u3002 \u6307\u6807 vmselect-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u67e5\u8be2\u6307\u6807\u6570\u636e\u3002\u8be5\u7ec4\u4ef6\u5f02\u5e38\u4f1a\u5bfc\u81f4\u65e0\u6cd5\u67e5\u8be2\u6307\u6807\u3002 \u6307\u6807 vmstorage-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u5b58\u50a8\u591a\u96c6\u7fa4\u7684\u6307\u6807\u6570\u636e\u3002 \u4eea\u8868\u76d8 grafana-deployment \u63d0\u4f9b\u76d1\u63a7\u9762\u677f\u80fd\u529b\u3002\u8be5\u7ec4\u4ef6\u5f02\u5e38\u4f1a\u5bfc\u81f4\u65e0\u6cd5\u67e5\u770b\u5185\u7f6e\u7684\u4eea\u8868\u76d8\u3002 \u94fe\u8def insight-jaeger-collector \u8d1f\u8d23\u63a5\u6536\u00a0opentelemetry-collector\u00a0\u4e2d\u94fe\u8def\u6570\u636e\u5e76\u5c06\u5176\u8fdb\u884c\u5b58\u50a8\u3002 \u94fe\u8def insight-jaeger-query \u8d1f\u8d23\u67e5\u8be2\u5404\u96c6\u7fa4\u4e2d\u91c7\u96c6\u5230\u7684\u94fe\u8def\u6570\u636e\u3002 \u94fe\u8def insight-opentelemetry-collector \u8d1f\u8d23\u63a5\u6536\u5404\u5b50\u96c6\u7fa4\u8f6c\u53d1\u7684\u94fe\u8def\u6570\u636e \u65e5\u5fd7 elasticsearch \u8d1f\u8d23\u5b58\u50a8\u5404\u96c6\u7fa4\u7684\u65e5\u5fd7\u6570\u636e\u3002

          Note

          \u82e5\u4f7f\u7528\u5916\u90e8 Elasticsearch \u53ef\u80fd\u65e0\u6cd5\u83b7\u53d6\u90e8\u5206\u6570\u636e\u4ee5\u81f4\u4e8e Elasticsearch \u7684\u4fe1\u606f\u4e3a\u7a7a\u3002

          "},{"location":"admin/insight/system-config/system-config.html","title":"\u7cfb\u7edf\u914d\u7f6e","text":"

          \u7cfb\u7edf\u914d\u7f6e \u5c55\u793a\u6307\u6807\u3001\u65e5\u5fd7\u3001\u94fe\u8def\u9ed8\u8ba4\u7684\u4fdd\u5b58\u65f6\u957f\u4ee5\u53ca\u9ed8\u8ba4\u7684 Apdex \u9608\u503c\u3002

          1. \u70b9\u51fb\u53f3\u4fa7\u5bfc\u822a\u680f\uff0c\u9009\u62e9 \u7cfb\u7edf\u914d\u7f6e\u3002

          2. \u4fee\u6539\u5386\u53f2\u544a\u8b66\u5b58\u50a8\u65f6\u957f\uff0c\u70b9\u51fb \u7f16\u8f91 \u8f93\u5165\u76ee\u6807\u65f6\u957f\u3002

            \u5f53\u5b58\u50a8\u65f6\u957f\u8bbe\u7f6e\u4e3a \"0\" \u5c06\u4e0d\u6e05\u9664\u5386\u53f2\u544a\u8b66\u3002

          3. \u4fee\u6539\u62d3\u6251\u56fe\u6e32\u67d3\u9ed8\u8ba4\u914d\u7f6e\uff0c\u70b9\u51fb \u7f16\u8f91 \u6839\u636e\u9700\u6c42\u5b9a\u4e49\u7cfb\u7edf\u4e2d\u62d3\u6251\u56fe\u9608\u503c\u3002

            \u9608\u503c\u8bbe\u7f6e\u5fc5\u987b\u5927\u4e8e 0\uff0c\u524d\u9762\u586b\u5199\u7684\u9608\u503c\u5fc5\u987b\u5c0f\u4e8e\u540e\u9762\u586b\u5199\u7684\u3002\u4e14\u586b\u5199\u7684\u9608\u503c\u5fc5\u987b\u5728\u6700\u5927\u548c\u6700\u5c0f\u7684\u8303\u56f4\u4e4b\u95f4\u3002

          Note

          \u4fee\u6539\u5176\u4ed6\u914d\u7f6e\uff0c\u8bf7\u70b9\u51fb\u67e5\u770b\u5982\u4f55\u4fee\u6539\u7cfb\u7edf\u914d\u7f6e\uff1f

          "},{"location":"admin/insight/trace/service.html","title":"\u670d\u52a1\u76d1\u63a7","text":"

          \u5728 \u53ef\u89c2\u6d4b\u6027 Insight \u4e2d\u670d\u52a1\u662f\u6307\u4f7f\u7528 Opentelemtry SDK \u63a5\u5165\u94fe\u8def\u6570\u636e\uff0c\u670d\u52a1\u76d1\u63a7\u80fd\u591f\u8f85\u52a9\u8fd0\u7ef4\u8fc7\u7a0b\u4e2d\u89c2\u5bdf\u5e94\u7528\u7a0b\u5e8f\u7684\u6027\u80fd\u548c\u72b6\u6001\u3002

          \u5982\u4f55\u4f7f\u7528 OpenTelemetry \u8bf7\u53c2\u8003\u4f7f\u7528 OTel \u8d4b\u4e88\u5e94\u7528\u53ef\u89c2\u6d4b\u6027\u3002

          "},{"location":"admin/insight/trace/service.html#_2","title":"\u540d\u8bcd\u89e3\u91ca","text":"
          • \u670d\u52a1 \uff1a\u670d\u52a1\u8868\u793a\u4e3a\u4f20\u5165\u8bf7\u6c42\u63d0\u4f9b\u76f8\u540c\u884c\u4e3a\u7684\u4e00\u7ec4\u5de5\u4f5c\u8d1f\u8f7d\u3002\u60a8\u53ef\u4ee5\u5728\u4f7f\u7528 OpenTelemetry SDK \u65f6\u5b9a\u4e49\u670d\u52a1\u540d\u79f0\u6216\u4f7f\u7528 Istio \u4e2d\u5b9a\u4e49\u7684\u540d\u79f0\u3002
          • \u64cd\u4f5c \uff1a\u64cd\u4f5c\u662f\u6307\u4e00\u4e2a\u670d\u52a1\u5904\u7406\u7684\u7279\u5b9a\u8bf7\u6c42\u6216\u64cd\u4f5c\uff0c\u6bcf\u4e2a Span \u90fd\u6709\u4e00\u4e2a\u64cd\u4f5c\u540d\u79f0\u3002
          • \u51fa\u53e3\u6d41\u91cf \uff1a\u51fa\u53e3\u6d41\u91cf\u662f\u6307\u5f53\u524d\u670d\u52a1\u53d1\u8d77\u8bf7\u6c42\u7684\u6240\u6709\u6d41\u91cf\u3002
          • \u5165\u53e3\u6d41\u91cf \uff1a\u5165\u53e3\u6d41\u91cf\u662f\u6307\u4e0a\u6e38\u670d\u52a1\u5bf9\u5f53\u524d\u670d\u52a1\u53d1\u8d77\u8bf7\u6c42\u7684\u6240\u6709\u6d41\u91cf\u3002
          "},{"location":"admin/insight/trace/service.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

          \u670d\u52a1\u5217\u8868\u9875\u9762\u5c55\u793a\u4e86\u96c6\u7fa4\u4e2d\u6240\u6709\u5df2\u63a5\u5165\u94fe\u8def\u6570\u636e\u7684\u670d\u52a1\u7684\u541e\u5410\u7387\u3001\u9519\u8bef\u7387\u3001\u8bf7\u6c42\u5ef6\u65f6\u7b49\u5173\u952e\u6307\u6807\u3002 \u60a8\u53ef\u4ee5\u6839\u636e\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u5bf9\u670d\u52a1\u8fdb\u884c\u8fc7\u6ee4\uff0c\u4e5f\u53ef\u4ee5\u6309\u7167\u541e\u5410\u7387\u3001\u9519\u8bef\u7387\u3001\u8bf7\u6c42\u5ef6\u65f6\u5bf9\u8be5\u5217\u8868\u8fdb\u884c\u6392\u5e8f\u3002\u5217\u8868\u4e2d\u7684\u6307\u6807\u6570\u636e\u9ed8\u8ba4\u65f6\u95f4\u4e3a 1 \u5c0f\u65f6\uff0c\u60a8\u53ef\u4ee5\u81ea\u5b9a\u4e49\u65f6\u95f4\u8303\u56f4\u3002

          \u8bf7\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u67e5\u770b\u670d\u52a1\u76d1\u63a7\u6307\u6807\uff1a

          1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

          2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u94fe\u8def\u8ffd\u8e2a -> \u670d\u52a1 \u3002

            Attention

            1. \u82e5\u5217\u8868\u4e2d\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u4e3a unknown \u65f6\uff0c\u5219\u8868\u793a\u8be5\u670d\u52a1\u672a\u89c4\u8303\u63a5\u5165\uff0c\u5efa\u8bae\u91cd\u65b0\u63a5\u5165\u3002
            2. \u82e5\u63a5\u5165\u7684\u670d\u52a1\u5b58\u5728\u540c\u540d\u4e14\u5747\u672a\u6b63\u786e\u586b\u5199\u73af\u5883\u53d8\u91cf\u4e2d\u7684 \u547d\u540d\u7a7a\u95f4 \u65f6\uff0c\u5217\u8868\u53ca\u670d\u52a1\u8be6\u60c5\u9875\u4e2d\u5c55\u793a\u7684\u76d1\u63a7\u6570\u636e\u4e3a\u591a\u4e2a\u670d\u52a1\u7684\u6c47\u603b\u6570\u636e\u3002
          3. \u70b9\u51fb\u670d\u52a1\u540d (\u4ee5 insight-server \u4e3a\u4f8b)\uff0c\u70b9\u51fb\u8fdb\u5165\u670d\u52a1\u8be6\u60c5\u9875\uff0c\u67e5\u770b\u670d\u52a1\u7684\u8be6\u7ec6\u6307\u6807\u548c\u8be5\u670d\u52a1\u7684\u64cd\u4f5c\u6307\u6807\u3002

            1. \u5728\u670d\u52a1\u62d3\u6251\u6a21\u5757\u4e2d\uff0c\u60a8\u53ef\u4ee5\u67e5\u770b\u5f53\u524d\u6240\u9009\u670d\u52a1\u7684\u4e0a\u4e0b\u5404\u4e00\u5c42\u7684\u670d\u52a1\u62d3\u6251\uff0c\u9f20\u6807\u60ac\u6d6e\u5728\u8282\u70b9\u4e0a\u65f6\u53ef\u4ee5\u67e5\u770b\u8282\u70b9\u7684\u4fe1\u606f\u3002
            2. \u5728\u6d41\u91cf\u6307\u6807\u6a21\u5757\uff0c\u60a8\u53ef\u67e5\u770b\u5230\u8be5\u670d\u52a1\u9ed8\u8ba4\u4e00\u5c0f\u65f6\u5185\u5168\u90e8\u8bf7\u6c42\uff08\u5305\u542b\u5165\u53e3\u6d41\u91cf\u548c\u51fa\u53e3\u6d41\u91cf\uff09\u7684\u76d1\u63a7\u6307\u6807\u3002
            3. \u652f\u6301\u901a\u8fc7\u53f3\u4e0a\u89d2\u7684\u65f6\u95f4\u9009\u62e9\u5668\u5feb\u901f\u9009\u62e9\u65f6\u95f4\u8303\u56f4\uff0c\u6216\u81ea\u5b9a\u4e49\u65f6\u95f4\u8303\u56f4\u3002
            4. \u5728 \u5173\u8054\u5bb9\u5668 \u6a21\u5757\u70b9\u51fb\u5bb9\u5668\u7ec4\u540d\u79f0\uff0c\u53ef\u8df3\u8f6c\u81f3\u5bb9\u5668\u7ec4\u8be6\u60c5\u9875\u3002

          4. \u70b9\u51fb Tab \u5207\u6362\u5230 \u64cd\u4f5c\u6307\u6807 \uff0c\u53ef\u67e5\u8be2\u591a\u9009\u670d\u52a1\u76f8\u540c\u64cd\u4f5c\u7684\u805a\u5408\u8d77\u6765\u7684\u6d41\u91cf\u6307\u6807\u3002

            1. \u652f\u6301\u5bf9\u64cd\u4f5c\u6307\u6807\u4e2d\u7684\u541e\u5410\u7387\u3001\u9519\u8bef\u7387\u3001\u8bf7\u6c42\u5ef6\u65f6\u7b49\u6307\u6807\u8fdb\u884c\u6392\u5e8f\u3002
            2. \u70b9\u51fb\u5355\u4e2a\u64cd\u4f5c\u540e\u7684\u56fe\u6807\uff0c\u53ef\u8df3\u8f6c\u81f3 \u8c03\u7528\u94fe \u5feb\u901f\u67e5\u8be2\u76f8\u5173\u94fe\u8def\u3002

          "},{"location":"admin/insight/trace/service.html#_4","title":"\u670d\u52a1\u6307\u6807\u8bf4\u660e","text":"\u53c2\u6570 \u8bf4\u660e \u541e\u5410\u7387 \u5355\u4f4d\u65f6\u95f4\u5185\u5904\u7406\u8bf7\u6c42\u7684\u6570\u91cf\u3002 \u9519\u8bef\u7387 \u67e5\u8be2\u65f6\u95f4\u8303\u56f4\u5185\u9519\u8bef\u8bf7\u6c42\u4e0e\u8bf7\u6c42\u603b\u6570\u7684\u6bd4\u503c\u3002 P50 \u8bf7\u6c42\u5ef6\u65f6 \u5728\u6240\u6709\u7684\u8bf7\u6c42\u4e2d\uff0c\u6709 50% \u7684\u8bf7\u6c42\u54cd\u5e94\u65f6\u95f4\u5c0f\u4e8e\u6216\u7b49\u4e8e\u8be5\u503c\u3002 P95 \u8bf7\u6c42\u5ef6\u65f6 \u5728\u6240\u6709\u7684\u8bf7\u6c42\u4e2d\uff0c\u6709 95% \u7684\u8bf7\u6c42\u54cd\u5e94\u65f6\u95f4\u5c0f\u4e8e\u6216\u7b49\u4e8e\u8be5\u503c\u3002 P99 \u8bf7\u6c42\u5ef6\u65f6 \u5728\u6240\u6709\u7684\u8bf7\u6c42\u4e2d\uff0c\u6709 95% \u7684\u8bf7\u6c42\u54cd\u5e94\u65f6\u95f4\u5c0f\u4e8e\u6216\u7b49\u4e8e\u8be5\u503c\u3002"},{"location":"admin/insight/trace/topology.html","title":"\u670d\u52a1\u62d3\u6251","text":"

          \u670d\u52a1\u62d3\u6251\u56fe\u662f\u5bf9\u670d\u52a1\u4e4b\u95f4\u8fde\u63a5\u3001\u901a\u4fe1\u548c\u4f9d\u8d56\u5173\u7cfb\u7684\u53ef\u89c6\u5316\u8868\u793a\u3002\u901a\u8fc7\u53ef\u89c6\u5316\u62d3\u6251\u4e86\u89e3\u670d\u52a1\u95f4\u7684\u8c03\u7528\u5173\u7cfb\uff0c \u67e5\u770b\u670d\u52a1\u5728\u6307\u5b9a\u65f6\u95f4\u5185\u7684\u8c03\u7528\u53ca\u5176\u6027\u80fd\u72b6\u51b5\u3002\u62d3\u6251\u56fe\u7684\u8282\u70b9\u4e4b\u95f4\u7684\u8054\u7cfb\u4ee3\u8868\u4e24\u4e2a\u670d\u52a1\u5728\u67e5\u8be2\u65f6\u95f4\u8303\u56f4\u5185\u670d\u52a1\u4e4b\u95f4\u7684\u5b58\u5728\u8c03\u7528\u5173\u7cfb\u3002

          "},{"location":"admin/insight/trace/topology.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
          1. \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002
          2. \u670d\u52a1\u5df2\u901a\u8fc7 Operator \u6216 Opentelemetry SDK \u7684\u65b9\u5f0f\u63a5\u5165\u94fe\u8def\u3002
          "},{"location":"admin/insight/trace/topology.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
          1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u6a21\u5757
          2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u94fe\u8def\u8ffd\u8e2a -> \u670d\u52a1\u62d3\u6251
          3. \u5728\u62d3\u6251\u56fe\u4e2d\uff0c\u60a8\u53ef\u6309\u9700\u6267\u884c\u4ee5\u4e0b\u64cd\u4f5c\uff1a

            • \u5355\u51fb \u8282\u70b9\uff0c\u4ece\u53f3\u4fa7\u5212\u51fa\u670d\u52a1\u7684\u8be6\u60c5\uff0c\u53ef\u67e5\u770b\u670d\u52a1\u7684\u8bf7\u6c42\u5ef6\u65f6\u3001\u541e\u5410\u7387\u3001\u9519\u8bef\u7387\u7684\u6307\u6807\u3002\u70b9\u51fb\u670d\u52a1\u540d\u79f0\u53ef\u8df3\u8f6c\u81f3\u5bf9\u5e94\u670d\u52a1\u7684\u8be6\u60c5\u9875\u3002
            • \u9f20\u6807\u60ac\u6d6e\u5728\u8fde\u7ebf\u4e0a\u65f6\uff0c\u53ef\u67e5\u770b\u4e24\u4e2a\u670d\u52a1\u4e4b\u95f4\u8bf7\u6c42\u7684\u6d41\u91cf\u6307\u6807\u3002
            • \u5728 \u663e\u793a\u8bbe\u7f6e \u6a21\u5757\uff0c\u53ef\u914d\u7f6e\u62d3\u6251\u56fe\u4e2d\u7684\u663e\u793a\u5143\u7d20\u3002

          4. \u70b9\u51fb\u53f3\u4e0b\u89d2 \u56fe\u4f8b \uff0c\u53ef\u901a\u8fc7 \u4e34\u65f6\u914d\u7f6e \u4fee\u6539\u5f53\u524d\u7684\u62d3\u6251\u56fe\u5b9a\u4e49\u7684\u6e32\u67d3\u9608\u503c\uff0c\u8df3\u51fa\u6216\u5173\u95ed\u8be5\u9875\u9762\u5373\u4f1a\u4e22\u5931\u8be5\u914d\u7f6e\u3002

            \u9608\u503c\u8bbe\u7f6e\u5fc5\u987b\u5927\u4e8e 0\uff0c\u524d\u9762\u586b\u5199\u7684\u9608\u503c\u5fc5\u987b\u5c0f\u4e8e\u540e\u9762\u586b\u5199\u7684\u3002\u4e14\u586b\u5199\u7684\u9608\u503c\u5fc5\u987b\u5728\u6700\u5927\u548c\u6700\u5c0f\u7684\u8303\u56f4\u4e4b\u95f4\u3002

          "},{"location":"admin/insight/trace/topology.html#_4","title":"\u5176\u4ed6\u8282\u70b9","text":"

          \u5728\u670d\u52a1\u62d3\u6251\u4e2d\u4f1a\u5b58\u5728\u6e38\u79bb\u5728\u96c6\u7fa4\u4e4b\u5916\u7684\u8282\u70b9\uff0c\u8fd9\u4e9b\u6e38\u79bb\u5728\u5916\u7684\u8282\u70b9\u53ef\u5206\u6210\u4e09\u7c7b\uff1a

          • \u6570\u636e\u5e93
          • \u6d88\u606f\u961f\u5217
          • \u865a\u62df\u8282\u70b9

          • \u82e5\u670d\u52a1\u53d1\u8d77\u8bf7\u6c42\u5230\u6570\u636e\u5e93\u6216\u6d88\u606f\u961f\u5217\u65f6\uff0c\u62d3\u6251\u56fe\u4e2d\u4f1a\u9ed8\u8ba4\u5c55\u793a\u8fd9\u4e24\u7c7b\u8282\u70b9\u3002 \u800c\u865a\u62df\u670d\u52a1\u8868\u793a\u96c6\u7fa4\u5185\u670d\u52a1\u8bf7\u6c42\u4e86\u96c6\u7fa4\u5916\u7684\u8282\u70b9\u6216\u8005\u672a\u63a5\u5165\u94fe\u8def\u7684\u670d\u52a1\uff0c\u62d3\u6251\u56fe\u4e2d\u9ed8\u8ba4\u4e0d\u4f1a\u5c55\u793a \u865a\u62df\u670d\u52a1\u3002

          • \u5f53\u670d\u52a1\u8bf7\u6c42\u5230 MySQL\u3001PostgreSQL\u3001Oracle Database \u8fd9\u4e09\u79cd\u6570\u636e\u5e93\u65f6\uff0c\u5728\u62d3\u6251\u56fe\u4e2d\u53ef\u4ee5\u770b\u5230\u8bf7\u6c42\u7684\u8be6\u7ec6\u6570\u636e\u5e93\u7c7b\u578b\u3002

          "},{"location":"admin/insight/trace/topology.html#_5","title":"\u5f00\u542f\u865a\u62df\u8282\u70b9","text":"
          1. \u66f4\u65b0 insight-server chart \u7684 values\uff0c\u627e\u5230\u4e0b\u56fe\u6240\u793a\u53c2\u6570\uff0c\u5c06 false \u6539\u4e3a true\u3002

          2. \u5728\u670d\u52a1\u62d3\u6251\u7684\u663e\u793a\u8bbe\u7f6e\u4e2d\u52fe\u9009 \u865a\u62df\u670d\u52a1 \u3002

          "},{"location":"admin/insight/trace/trace.html","title":"\u94fe\u8def\u67e5\u8be2","text":"

          \u5728\u94fe\u8def\u67e5\u8be2\u9875\u9762\uff0c\u60a8\u53ef\u4ee5\u8fc7 TraceID \u6216\u7cbe\u786e\u67e5\u8be2\u8c03\u7528\u94fe\u8def\u8be6\u7ec6\u60c5\u51b5\u6216\u7ed3\u5408\u591a\u79cd\u6761\u4ef6\u7b5b\u9009\u67e5\u8be2\u8c03\u7528\u94fe\u8def\u3002

          "},{"location":"admin/insight/trace/trace.html#_2","title":"\u540d\u8bcd\u89e3\u91ca","text":"
          • TraceID\uff1a\u7528\u4e8e\u6807\u8bc6\u4e00\u4e2a\u5b8c\u6574\u7684\u8bf7\u6c42\u8c03\u7528\u94fe\u8def\u3002
          • \u64cd\u4f5c\uff1a\u63cf\u8ff0 Span \u6240\u4ee3\u8868\u7684\u5177\u4f53\u64cd\u4f5c\u6216\u4e8b\u4ef6\u3002
          • \u5165\u53e3 Span\uff1a\u5165\u53e3 Span \u4ee3\u8868\u4e86\u6574\u4e2a\u8bf7\u6c42\u7684\u7b2c\u4e00\u4e2a\u8bf7\u6c42\u3002
          • \u5ef6\u65f6\uff1a\u6574\u4e2a\u8c03\u7528\u94fe\u4ece\u5f00\u59cb\u63a5\u6536\u8bf7\u6c42\u5230\u5b8c\u6210\u54cd\u5e94\u7684\u6301\u7eed\u65f6\u95f4\u3002
          • Span\uff1a\u6574\u4e2a\u94fe\u8def\u4e2d\u5305\u542b\u7684 Span \u4e2a\u6570\u3002
          • \u53d1\u751f\u65f6\u95f4\uff1a\u5f53\u524d\u94fe\u8def\u5f00\u59cb\u7684\u65f6\u95f4\u3002
          • Tag\uff1a\u4e00\u7ec4\u952e\u503c\u5bf9\u6784\u6210\u7684 Span \u6807\u7b7e\u96c6\u5408\uff0cTag \u662f\u7528\u6765\u5bf9 Span \u8fdb\u884c\u7b80\u5355\u7684\u6ce8\u89e3\u548c\u8865\u5145\uff0c\u6bcf\u4e2a Span \u53ef\u4ee5\u6709\u591a\u4e2a\u7b80\u76f4\u5bf9\u5f62\u5f0f\u7684 Tag\u3002
          "},{"location":"admin/insight/trace/trace.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

          \u8bf7\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u67e5\u8be2\u94fe\u8def\uff1a

          1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\uff0c
          2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u94fe\u8def\u8ffd\u8e2a -> \u8c03\u7528\u94fe\u3002

            Note

            \u5217\u8868\u4e2d\u652f\u6301\u5bf9 Span \u6570\u3001\u5ef6\u65f6\u3001\u53d1\u751f\u65f6\u95f4\u8fdb\u884c\u6392\u5e8f\u3002

          3. \u70b9\u51fb\u7b5b\u9009\u680f\u4e2d\u7684 TraceID \u641c\u7d22 \u5207\u6362\u4f7f\u7528 TraceID \u641c\u7d22\u94fe\u8def\u3002

          4. \u4f7f\u7528 TraceID \u641c\u7d22\u8bf7\u8f93\u5165\u5b8c\u6574\u7684 TraceID\u3002

          "},{"location":"admin/insight/trace/trace.html#_4","title":"\u5176\u4ed6\u64cd\u4f5c","text":""},{"location":"admin/insight/trace/trace.html#_5","title":"\u67e5\u770b\u94fe\u8def\u8be6\u60c5","text":"
          1. \u70b9\u51fb\u94fe\u8def\u5217\u8868\u4e2d\u7684\u67d0\u4e00\u94fe\u8def\u7684 TraceID\uff0c\u53ef\u67e5\u770b\u8be5\u94fe\u8def\u7684\u8be6\u60c5\u8c03\u7528\u60c5\u51b5\u3002

          "},{"location":"admin/insight/trace/trace.html#_6","title":"\u67e5\u770b\u5173\u8054\u65e5\u5fd7","text":"
          1. \u70b9\u51fb\u94fe\u8def\u6570\u636e\u53f3\u4fa7\u7684\u56fe\u6807\uff0c\u53ef\u67e5\u8be2\u8be5\u94fe\u8def\u7684\u5173\u8054\u65e5\u5fd7\u3002

            • \u9ed8\u8ba4\u67e5\u8be2\u8be5\u94fe\u8def\u7684\u6301\u7eed\u65f6\u95f4\u53ca\u5176\u7ed3\u675f\u4e4b\u540e\u4e00\u5206\u949f\u5185\u7684\u65e5\u5fd7\u6570\u636e\u3002
            • \u67e5\u8be2\u7684\u65e5\u5fd7\u5185\u5bb9\u4e3a\u65e5\u5fd7\u6587\u672c\u4e2d\u5305\u542b\u8be5\u94fe\u8def\u7684 TraceID \u7684\u65e5\u5fd7\u548c\u94fe\u8def\u8c03\u7528\u8fc7\u7a0b\u4e2d\u76f8\u5173\u7684\u5bb9\u5668\u65e5\u5fd7\u3002
          2. \u70b9\u51fb \u67e5\u770b\u66f4\u591a \u540e\u53ef\u5e26\u6761\u4ef6\u8df3\u8f6c\u5230 \u65e5\u5fd7\u67e5\u8be2 \u7684\u9875\u9762\u3002

          3. \u9ed8\u8ba4\u641c\u7d22\u5168\u90e8\u65e5\u5fd7\uff0c\u4f46\u53ef\u4e0b\u62c9\u6839\u636e\u94fe\u8def\u7684 TraceID \u6216\u94fe\u8def\u8c03\u7528\u8fc7\u7a0b\u4e2d\u76f8\u5173\u7684\u5bb9\u5668\u65e5\u5fd7\u8fdb\u884c\u8fc7\u6ee4\u3002

            Note

            \u7531\u4e8e\u94fe\u8def\u4f1a\u8de8\u96c6\u7fa4\u6216\u8de8\u547d\u540d\u7a7a\u95f4\uff0c\u82e5\u7528\u6237\u6743\u9650\u4e0d\u8db3\uff0c\u5219\u65e0\u6cd5\u67e5\u8be2\u8be5\u94fe\u8def\u7684\u5173\u8054\u65e5\u5fd7\u3002

          "},{"location":"admin/k8s/add-node.html","title":"\u6dfb\u52a0\u5de5\u4f5c\u8282\u70b9","text":"

          \u5982\u679c\u8282\u70b9\u4e0d\u591f\u7528\u4e86\uff0c\u53ef\u4ee5\u6dfb\u52a0\u66f4\u591a\u8282\u70b9\u5230\u96c6\u7fa4\u4e2d\u3002

          "},{"location":"admin/k8s/add-node.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
          • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
          • \u6709\u4e00\u4e2a\u7ba1\u7406\u5458\u5e10\u53f7
          • \u5df2\u521b\u5efa\u5e26 GPU \u8282\u70b9\u7684\u96c6\u7fa4
          • \u51c6\u5907\u4e00\u53f0\u4e91\u4e3b\u673a
          "},{"location":"admin/k8s/add-node.html#_3","title":"\u6dfb\u52a0\u6b65\u9aa4","text":"
          1. \u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
          2. \u5bfc\u822a\u81f3 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0

          3. \u8fdb\u5165\u96c6\u7fa4\u6982\u89c8\u9875\uff0c\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u63a5\u5165\u8282\u70b9 \u6309\u94ae

          4. \u6309\u7167\u5411\u5bfc\uff0c\u586b\u5199\u5404\u9879\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a

            \u57fa\u672c\u4fe1\u606f\u53c2\u6570\u914d\u7f6e

          5. \u5728\u5f39\u7a97\u4e2d\u70b9\u51fb \u786e\u5b9a

          6. \u8fd4\u56de\u8282\u70b9\u5217\u8868\uff0c\u65b0\u63a5\u5165\u7684\u8282\u70b9\u72b6\u6001\u4e3a \u63a5\u5165\u4e2d \uff0c\u7b49\u5f85\u51e0\u5206\u949f\u540e\u72b6\u6001\u53d8\u4e3a \u5065\u5eb7 \u5219\u8868\u793a\u63a5\u5165\u6210\u529f\u3002

          Tip

          \u5bf9\u4e8e\u521a\u63a5\u5165\u6210\u529f\u7684\u8282\u70b9\uff0c\u53ef\u80fd\u8fd8\u8981\u7b49 2-3 \u5206\u949f\u624d\u80fd\u8bc6\u522b\u51fa GPU\u3002

          "},{"location":"admin/k8s/create-k8s.html","title":"\u521b\u5efa\u4e91\u4e0a Kubernetes \u96c6\u7fa4","text":"

          \u90e8\u7f72 Kubernetes \u96c6\u7fa4\u662f\u4e3a\u4e86\u652f\u6301\u9ad8\u6548\u7684 AI \u7b97\u529b\u8c03\u5ea6\u548c\u7ba1\u7406\uff0c\u5b9e\u73b0\u5f39\u6027\u4f38\u7f29\uff0c\u63d0\u4f9b\u9ad8\u53ef\u7528\u6027\uff0c\u4ece\u800c\u4f18\u5316\u6a21\u578b\u8bad\u7ec3\u548c\u63a8\u7406\u8fc7\u7a0b\u3002

          "},{"location":"admin/k8s/create-k8s.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
          • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0\u5df2
          • \u6709\u4e00\u4e2a\u7ba1\u7406\u5458\u6743\u9650\u7684\u8d26\u53f7
          • \u51c6\u5907\u4e00\u53f0\u5e26 GPU \u7684\u7269\u7406\u673a
          • \u5206\u914d\u4e24\u6bb5 IP \u5730\u5740\uff08Pod CIDR 18 \u4f4d\u3001SVC CIDR 18 \u4f4d\uff0c\u4e0d\u80fd\u4e0e\u73b0\u6709\u7f51\u6bb5\u51b2\u7a81\uff09
          "},{"location":"admin/k8s/create-k8s.html#_2","title":"\u521b\u5efa\u6b65\u9aa4","text":"
          1. \u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
          2. \u521b\u5efa\u5e76\u542f\u52a8 3 \u53f0\u4e0d\u5e26 GPU \u7684\u4e91\u4e3b\u673a\u7528\u4f5c\u96c6\u7fa4\u7684 Master \u8282\u70b9

            • \u914d\u7f6e\u8d44\u6e90\uff0cCPU 16 \u6838\uff0c\u5185\u5b58 32 GB\uff0c\u7cfb\u7edf\u76d8 200 GB\uff08ReadWriteOnce\uff09
            • \u7f51\u7edc\u6a21\u5f0f\u9009\u62e9 Bridge\uff08\u6865\u63a5\uff09
            • \u8bbe\u7f6e root \u5bc6\u7801\u6216\u6dfb\u52a0 SSH \u516c\u94a5\uff0c\u65b9\u4fbf\u4ee5 SSH \u8fde\u63a5
            • \u8bb0\u5f55\u597d 3 \u53f0\u4e3b\u673a\u7684 IP
          3. \u5bfc\u822a\u81f3 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa\u96c6\u7fa4 \u6309\u94ae

          4. \u6309\u7167\u5411\u5bfc\uff0c\u914d\u7f6e\u96c6\u7fa4\u7684\u5404\u9879\u53c2\u6570

            \u57fa\u672c\u4fe1\u606f\u8282\u70b9\u914d\u7f6e\u7f51\u7edc\u914d\u7f6eAddon \u914d\u7f6e\u9ad8\u7ea7\u914d\u7f6e

            \u914d\u7f6e\u5b8c\u8282\u70b9\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u5f00\u59cb\u68c0\u67e5 \uff0c

            \u6bcf\u4e2a\u8282\u70b9\u9ed8\u8ba4\u53ef\u8fd0\u884c 110 \u4e2a Pod\uff08\u5bb9\u5668\u7ec4\uff09\uff0c\u5982\u679c\u8282\u70b9\u914d\u7f6e\u6bd4\u8f83\u9ad8\uff0c\u53ef\u4ee5\u8c03\u6574\u5230 200 \u6216 300 \u4e2a Pod\u3002

          5. \u7b49\u5f85\u96c6\u7fa4\u521b\u5efa\u5b8c\u6210\u3002

          6. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\uff0c\u627e\u5230\u521a\u521b\u5efa\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u5bfc\u822a\u5230 Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u5728\u641c\u7d22\u6846\u5185\u641c\u7d22 metax-gpu-extensions\uff0c\u70b9\u51fb\u5361\u7247

          7. \u70b9\u51fb\u53f3\u4fa7\u7684 \u5b89\u88c5 \u6309\u94ae\uff0c\u5f00\u59cb\u5b89\u88c5 GPU \u63d2\u4ef6

            \u5e94\u7528\u8bbe\u7f6eKubernetes \u7f16\u6392\u786e\u8ba4

            \u8f93\u5165\u540d\u79f0\uff0c\u9009\u62e9\u547d\u540d\u7a7a\u95f4\uff0c\u5728 YAMl \u4e2d\u4fee\u6539\u955c\u50cf\u5730\u5740\uff1a

          8. \u81ea\u52a8\u8fd4\u56de Helm \u5e94\u7528\u5217\u8868\uff0c\u7b49\u5f85 metax-gpu-extensions \u72b6\u6001\u53d8\u4e3a \u5df2\u90e8\u7f72

          9. \u5230\u6b64\u96c6\u7fa4\u521b\u5efa\u6210\u529f\uff0c\u53ef\u4ee5\u53bb\u67e5\u770b\u96c6\u7fa4\u6240\u5305\u542b\u7684\u8282\u70b9\u3002\u4f60\u53ef\u4ee5\u53bb\u521b\u5efa AI \u5de5\u4f5c\u8d1f\u8f7d\u5e76\u4f7f\u7528 GPU \u4e86\u3002

          \u4e0b\u4e00\u6b65\uff1a\u521b\u5efa AI \u5de5\u4f5c\u8d1f\u8f7d

          "},{"location":"admin/k8s/remove-node.html","title":"\u79fb\u9664 GPU \u5de5\u4f5c\u8282\u70b9","text":"

          GPU \u8d44\u6e90\u7684\u6210\u672c\u76f8\u5bf9\u8f83\u9ad8\uff0c\u5982\u679c\u6682\u65f6\u7528\u4e0d\u5230 GPU\uff0c\u53ef\u4ee5\u5c06\u5e26 GPU \u7684\u5de5\u4f5c\u8282\u70b9\u79fb\u9664\u3002 \u4ee5\u4e0b\u6b65\u9aa4\u4e5f\u540c\u6837\u9002\u7528\u4e8e\u79fb\u9664\u666e\u901a\u5de5\u4f5c\u8282\u70b9\u3002

          "},{"location":"admin/k8s/remove-node.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
          • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
          • \u6709\u4e00\u4e2a\u7ba1\u7406\u5458\u5e10\u53f7
          • \u5df2\u521b\u5efa\u5e26 GPU \u8282\u70b9\u7684\u96c6\u7fa4
          "},{"location":"admin/k8s/remove-node.html#_2","title":"\u79fb\u9664\u6b65\u9aa4","text":"
          1. \u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
          2. \u5bfc\u822a\u81f3 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0

          3. \u8fdb\u5165\u96c6\u7fa4\u6982\u89c8\u9875\uff0c\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u627e\u5230\u8981\u79fb\u9664\u7684\u8282\u70b9\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u79fb\u9664\u8282\u70b9

          4. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u8282\u70b9\u540d\u79f0\uff0c\u786e\u8ba4\u65e0\u8bef\u540e\u70b9\u51fb \u5220\u9664

          5. \u81ea\u52a8\u8fd4\u56de\u8282\u70b9\u5217\u8868\uff0c\u72b6\u6001\u4e3a \u79fb\u9664\u4e2d \uff0c\u51e0\u5206\u949f\u540e\u5237\u65b0\u9875\u9762\uff0c\u8282\u70b9\u4e0d\u5728\u4e86\uff0c\u8bf4\u660e\u8282\u70b9\u88ab\u6210\u529f\u79fb\u9664

          6. \u4ece UI \u5217\u8868\u79fb\u9664\u8282\u70b9\u540e\uff0c\u901a\u8fc7 SSH \u767b\u5f55\u5230\u5df2\u79fb\u9664\u7684\u8282\u70b9\u4e3b\u673a\uff0c\u6267\u884c\u5173\u673a\u547d\u4ee4\u3002

          Tip

          \u5728 UI \u4e0a\u79fb\u9664\u8282\u70b9\u5e76\u5c06\u5176\u5173\u673a\u540e\uff0c\u8282\u70b9\u4e0a\u7684\u6570\u636e\u5e76\u672a\u88ab\u7acb\u5373\u5220\u9664\uff0c\u8282\u70b9\u6570\u636e\u4f1a\u88ab\u4fdd\u7559\u4e00\u6bb5\u65f6\u95f4\u3002

          "},{"location":"admin/kpanda/backup/index.html","title":"\u5907\u4efd\u6062\u590d","text":"

          \u5907\u4efd\u6062\u590d\u5206\u4e3a\u5907\u4efd\u548c\u6062\u590d\u4e24\u65b9\u9762\uff0c\u5b9e\u9645\u5e94\u7528\u65f6\u9700\u8981\u5148\u5907\u4efd\u7cfb\u7edf\u5728\u67d0\u4e00\u65f6\u70b9\u7684\u6570\u636e\uff0c\u7136\u540e\u5b89\u5168\u5b58\u50a8\u5730\u5907\u4efd\u6570\u636e\u3002\u540e\u7eed\u5982\u679c\u51fa\u73b0\u6570\u636e\u635f\u574f\u3001\u4e22\u5931\u3001\u8bef\u5220\u7b49\u4e8b\u6545\uff0c\u5c31\u53ef\u4ee5\u57fa\u4e8e\u4e4b\u524d\u7684\u6570\u636e\u5907\u4efd\u5feb\u901f\u8fd8\u539f\u7cfb\u7edf\uff0c\u7f29\u77ed\u6545\u969c\u65f6\u95f4\uff0c\u51cf\u5c11\u635f\u5931\u3002

          • \u5728\u771f\u5b9e\u7684\u751f\u4ea7\u73af\u5883\u4e2d\uff0c\u670d\u52a1\u53ef\u80fd\u5206\u5e03\u5f0f\u5730\u90e8\u7f72\u5728\u4e0d\u540c\u7684\u4e91\u3001\u4e0d\u540c\u533a\u57df\u6216\u53ef\u7528\u533a\uff0c\u5982\u679c\u67d0\u4e00\u4e2a\u57fa\u7840\u8bbe\u65bd\u81ea\u8eab\u51fa\u73b0\u6545\u969c\uff0c\u4f01\u4e1a\u9700\u8981\u5728\u5176\u4ed6\u53ef\u7528\u73af\u5883\u4e2d\u5feb\u901f\u6062\u590d\u5e94\u7528\u3002\u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u8de8\u4e91/\u8de8\u96c6\u7fa4\u7684\u5907\u4efd\u6062\u590d\u663e\u5f97\u975e\u5e38\u91cd\u8981\u3002
          • \u5728\u5927\u89c4\u6a21\u7cfb\u7edf\u4e2d\u5f80\u5f80\u6709\u5f88\u591a\u89d2\u8272\u548c\u7528\u6237\uff0c\u6743\u9650\u7ba1\u7406\u4f53\u7cfb\u590d\u6742\uff0c\u64cd\u4f5c\u8005\u4f17\u591a\uff0c\u96be\u514d\u6709\u4eba\u8bef\u64cd\u4f5c\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u4e5f\u9700\u8981\u80fd\u591f\u901a\u8fc7\u4e4b\u524d\u5907\u4efd\u7684\u6570\u636e\u5feb\u901f\u56de\u6eda\u7cfb\u7edf\uff0c\u5426\u5219\u5982\u679c\u4f9d\u8d56\u4eba\u4e3a\u6392\u67e5\u6545\u969c\u3001\u4fee\u590d\u6545\u969c\u3001\u6062\u590d\u7cfb\u7edf\u5c31\u4f1a\u8017\u8d39\u5927\u91cf\u65f6\u95f4\uff0c\u7cfb\u7edf\u4e0d\u53ef\u7528\u65f6\u95f4\u8d8a\u957f\uff0c\u4f01\u4e1a\u7684\u635f\u5931\u8d8a\u5927\u3002
          • \u6b64\u5916\uff0c\u8fd8\u6709\u7f51\u7edc\u653b\u51fb\u3001\u81ea\u7136\u707e\u5bb3\u3001\u8bbe\u5907\u6545\u969c\u7b49\u5404\u79cd\u56e0\u7d20\u4e5f\u53ef\u80fd\u5bfc\u81f4\u6570\u636e\u4e8b\u6545

          \u56e0\u6b64\uff0c\u5907\u4efd\u6062\u590d\u975e\u5e38\u91cd\u8981\uff0c\u53ef\u4ee5\u89c6\u4e4b\u4e3a\u7ef4\u62a4\u7cfb\u7edf\u7a33\u5b9a\u548c\u6570\u636e\u5b89\u5168\u7684\u6700\u540e\u4e00\u9053\u4fdd\u9669\u3002

          \u5907\u4efd\u901a\u5e38\u5206\u4e3a\u5168\u91cf\u5907\u4efd\u3001\u589e\u91cf\u5907\u4efd\u3001\u5dee\u5f02\u5907\u4efd\u4e09\u79cd\u3002\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u76ee\u524d\u652f\u6301\u5168\u91cf\u5907\u4efd\u548c\u589e\u91cf\u5907\u4efd\u3002

          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u7684\u5907\u4efd\u6062\u590d\u53ef\u4ee5\u5206\u4e3a \u5e94\u7528\u5907\u4efd \u548c ETCD \u5907\u4efd \u4e24\u79cd\uff0c\u652f\u6301\u624b\u52a8\u5907\u4efd\uff0c\u6216\u57fa\u4e8e CronJob \u5b9a\u65f6\u81ea\u52a8\u5907\u4efd\u3002

          • \u5e94\u7528\u5907\u4efd

            \u5e94\u7528\u5907\u4efd\u6307\uff0c\u5907\u4efd\u96c6\u7fa4\u4e2d\u7684\u67d0\u4e2a\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u636e\uff0c\u7136\u540e\u5c06\u8be5\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u636e\u6062\u590d\u5230\u672c\u96c6\u7fa4\u6216\u8005\u5176\u4ed6\u96c6\u7fa4\u3002\u652f\u6301\u5907\u4efd\u6574\u4e2a\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u8d44\u6e90\uff0c\u4e5f\u652f\u6301\u901a\u8fc7\u6807\u7b7e\u9009\u62e9\u5668\u8fc7\u6ee4\uff0c\u4ec5\u5907\u4efd\u5e26\u6709\u7279\u5b9a\u6807\u7b7e\u7684\u8d44\u6e90\u3002

            \u5e94\u7528\u5907\u4efd\u652f\u6301\u8de8\u96c6\u7fa4\u5907\u4efd\u6709\u72b6\u6001\u5e94\u7528\uff0c\u5177\u4f53\u6b65\u9aa4\u53ef\u53c2\u8003MySQL \u5e94\u7528\u53ca\u6570\u636e\u7684\u8de8\u96c6\u7fa4\u5907\u4efd\u6062\u590d\u3002

          • ETCD \u5907\u4efd

            etcd \u662f Kubernetes \u7684\u6570\u636e\u5b58\u50a8\u7ec4\u4ef6\uff0cKubernetes \u5c06\u81ea\u8eab\u7684\u7ec4\u4ef6\u6570\u636e\u548c\u5176\u4e2d\u7684\u5e94\u7528\u6570\u636e\u90fd\u5b58\u50a8\u5728 etcd \u4e2d\u3002\u56e0\u6b64\uff0c\u5907\u4efd etcd \u5c31\u76f8\u5f53\u4e8e\u5907\u4efd\u6574\u4e2a\u96c6\u7fa4\u7684\u6570\u636e\uff0c\u53ef\u4ee5\u5728\u6545\u969c\u65f6\u5feb\u901f\u5c06\u96c6\u7fa4\u6062\u590d\u5230\u4e4b\u524d\u67d0\u4e00\u65f6\u70b9\u7684\u72b6\u6001\u3002

            \u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u76ee\u524d\u4ec5\u652f\u6301\u5c06 etcd \u5907\u4efd\u6570\u636e\u6062\u590d\u5230\u540c\u4e00\u96c6\u7fa4\uff08\u539f\u96c6\u7fa4\uff09\u3002

          "},{"location":"admin/kpanda/backup/deployment.html","title":"\u5e94\u7528\u5907\u4efd","text":"

          \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4e3a\u5e94\u7528\u505a\u5907\u4efd\uff0c\u672c\u6559\u7a0b\u4e2d\u4f7f\u7528\u7684\u6f14\u793a\u5e94\u7528\u540d\u4e3a dao-2048 \uff0c\u5c5e\u4e8e\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u3002

          "},{"location":"admin/kpanda/backup/deployment.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

          \u5728\u5bf9\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u8fdb\u884c\u5907\u4efd\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

          • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

          • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

          • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

          • \u5b89\u88c5 velero \u7ec4\u4ef6\uff0c\u4e14 velero \u7ec4\u4ef6\u8fd0\u884c\u6b63\u5e38\u3002

          • \u521b\u5efa\u4e00\u4e2a\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\uff08\u672c\u6559\u7a0b\u4e2d\u7684\u8d1f\u8f7d\u540d\u4e3a dao-2048 \uff09\uff0c\u5e76\u4e3a\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u6253\u4e0a app: dao-2048 \u7684\u6807\u7b7e\u3002

          "},{"location":"admin/kpanda/backup/deployment.html#_3","title":"\u5907\u4efd\u5de5\u4f5c\u8d1f\u8f7d","text":"

          \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u5907\u4efd\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d dao-2048 \u3002

          1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c \u70b9\u51fb \u5bb9\u5668\u7ba1\u7406 -> \u5907\u4efd\u6062\u590d \u3002

          2. \u8fdb\u5165 \u5e94\u7528\u5907\u4efd \u5217\u8868\u9875\u9762\uff0c\u4ece\u96c6\u7fa4\u4e0b\u62c9\u5217\u8868\u4e2d\u9009\u62e9\u5df2\u5b89\u88c5\u4e86 velero \u548c dao-2048 \u7684\u96c6\u7fa4\u3002 \u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa\u5907\u4efd\u8ba1\u5212 \u6309\u94ae\u3002

          3. \u53c2\u8003\u4e0b\u65b9\u8bf4\u660e\u586b\u5199\u5907\u4efd\u914d\u7f6e\u3002

          4. \u53c2\u8003\u4e0b\u65b9\u8bf4\u660e\u8bbe\u7f6e\u5907\u4efd\u6267\u884c\u9891\u7387\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

            • \u5907\u4efd\u9891\u7387\uff1a\u57fa\u4e8e\u5206\u949f\u3001\u5c0f\u65f6\u3001\u5929\u3001\u5468\u3001\u6708\u8bbe\u7f6e\u4efb\u52a1\u6267\u884c\u7684\u65f6\u95f4\u5468\u671f\u3002\u652f\u6301\u7528\u6570\u5b57\u548c * \u81ea\u5b9a\u4e49 Cron \u8868\u8fbe\u5f0f\uff0c\u8f93\u5165\u8868\u8fbe\u5f0f\u540e\u4e0b\u65b9\u4f1a\u63d0\u793a\u5f53\u524d\u8868\u8fbe\u5f0f\u7684\u542b\u4e49\u3002\u6709\u5173\u8be6\u7ec6\u7684\u8868\u8fbe\u5f0f\u8bed\u6cd5\u89c4\u5219\uff0c\u53ef\u53c2\u8003 Cron \u65f6\u95f4\u8868\u8bed\u6cd5\u3002
            • \u7559\u5b58\u65f6\u957f\uff08\u5929\uff09\uff1a\u8bbe\u7f6e\u5907\u4efd\u8d44\u6e90\u4fdd\u5b58\u7684\u65f6\u95f4\uff0c\u9ed8\u8ba4\u4e3a 30 \u5929\uff0c\u8fc7\u671f\u540e\u5c06\u4f1a\u88ab\u5220\u9664\u3002
            • \u5907\u4efd\u6570\u636e\u5377\uff08PV\uff09\uff1a\u662f\u5426\u5907\u4efd\u6570\u636e\u5377\uff08PV\uff09\u4e2d\u7684\u6570\u636e\uff0c\u652f\u6301\u76f4\u63a5\u590d\u5236\u548c\u4f7f\u7528 CSI \u5feb\u7167\u4e24\u79cd\u65b9\u5f0f\u3002
              • \u76f4\u63a5\u590d\u5236\uff1a\u76f4\u63a5\u590d\u5236\u6570\u636e\u5377\uff08PV\uff09\u4e2d\u7684\u6570\u636e\u7528\u4e8e\u5907\u4efd\uff1b
              • \u4f7f\u7528 CSI \u5feb\u7167\uff1a\u4f7f\u7528 CSI \u5feb\u7167\u6765\u5907\u4efd\u6570\u636e\u5377\uff08PV\uff09\u3002\u9700\u8981\u96c6\u7fa4\u4e2d\u6709\u53ef\u7528\u4e8e\u5907\u4efd\u7684 CSI \u5feb\u7167\u7c7b\u578b\u3002

          5. \u70b9\u51fb \u786e\u5b9a \uff0c\u9875\u9762\u4f1a\u81ea\u52a8\u8fd4\u56de\u5e94\u7528\u5907\u4efd\u8ba1\u5212\u5217\u8868\u3002\u60a8\u53ef\u4ee5\u627e\u5230\u65b0\u5efa\u7684 dao-2048 \u5907\u4efd\u8ba1\u5212\uff0c\u5728\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u9009\u62e9 \u7acb\u5373\u6267\u884c \u5f00\u59cb\u5907\u4efd\u3002

          6. \u6b64\u65f6\u96c6\u7fa4\u7684 \u4e0a\u4e00\u6b21\u6267\u884c\u72b6\u6001 \u5c06\u8f6c\u53d8\u4e3a \u5907\u4efd\u4e2d \u3002\u7b49\u5f85\u5907\u4efd\u5b8c\u6210\u540e\u53ef\u4ee5\u70b9\u51fb\u5907\u4efd\u8ba1\u5212\u7684\u540d\u79f0\uff0c\u67e5\u770b\u5907\u4efd\u8ba1\u5212\u8be6\u60c5\u3002

          Note

          \u5982\u679c Job \u7c7b\u578b\u7684\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u4e3a \u6267\u884c\u5b8c\u6210 \uff0c\u5219\u4e0d\u652f\u6301\u5907\u4efd\u3002

          "},{"location":"admin/kpanda/backup/etcd-backup.html","title":"etcd \u5907\u4efd","text":"

          etcd \u5907\u4efd\u662f\u4ee5\u96c6\u7fa4\u6570\u636e\u4e3a\u6838\u5fc3\u7684\u5907\u4efd\u3002\u5728\u786c\u4ef6\u8bbe\u5907\u635f\u574f\uff0c\u5f00\u53d1\u6d4b\u8bd5\u914d\u7f6e\u9519\u8bef\u7b49\u573a\u666f\u4e2d\uff0c\u53ef\u4ee5\u901a\u8fc7 etcd \u5907\u4efd\u6062\u590d\u96c6\u7fa4\u6570\u636e\u3002

          \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u4e3a\u96c6\u7fa4\u5236\u4f5c etcd \u5907\u4efd\u3002

          "},{"location":"admin/kpanda/backup/etcd-backup.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
          • \u63a5\u5165\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

          • \u521b\u5efa\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Admin \u6216\u66f4\u9ad8\u6743\u9650\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

          • \u51c6\u5907\u4e00\u4e2a MinIO \u5b9e\u4f8b\u3002

          "},{"location":"admin/kpanda/backup/etcd-backup.html#etcd_1","title":"\u521b\u5efa etcd \u5907\u4efd","text":"

          \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u521b\u5efa etcd \u5907\u4efd\u3002

          1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 -> \u5907\u4efd\u6062\u590d -> etcd \u5907\u4efd \uff0c\u70b9\u51fb \u5907\u4efd\u7b56\u7565 \u9875\u7b7e\uff0c\u7136\u540e\u5728\u53f3\u4fa7\u70b9\u51fb \u521b\u5efa\u5907\u4efd\u7b56\u7565 \u3002

          2. \u53c2\u8003\u4ee5\u4e0b\u8bf4\u660e\u586b\u5199 \u57fa\u672c\u4fe1\u606f \u3002\u586b\u5199\u5b8c\u6bd5\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \uff0c\u7cfb\u7edf\u5c06\u81ea\u52a8\u6821\u9a8c etcd \u7684\u8054\u901a\u6027\uff0c\u6821\u9a8c\u901a\u8fc7\u4e4b\u540e\u53ef\u4ee5\u8fdb\u884c\u4e0b\u4e00\u6b65\u3002

            • \u5907\u4efd\u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u5907\u4efd\u54ea\u4e2a\u96c6\u7fa4\u7684 etcd \u6570\u636e\uff0c\u5e76\u5728\u7ec8\u7aef\u767b\u5f55
            • etcd \u5730\u5740\uff1a\u683c\u5f0f\u4e3a https://${\u8282\u70b9IP}:${\u7aef\u53e3\u53f7}

              • \u5728\u6807\u51c6 Kubernetes \u96c6\u7fa4\u4e2d\uff0cetcd \u7684\u9ed8\u8ba4\u7aef\u53e3\u53f7\u4e3a 2379
              • \u5728\u516c\u6709\u4e91\u6258\u7ba1\u96c6\u7fa4\u4e2d\uff0c\u9700\u8981\u8054\u7cfb\u76f8\u5173\u5f00\u53d1\u4eba\u5458\u83b7\u53d6 etcd \u7684\u7aef\u53e3\u53f7\u3002 \u8fd9\u662f\u56e0\u4e3a\u516c\u6709\u4e91\u96c6\u7fa4\u7684\u63a7\u5236\u9762\u7ec4\u4ef6\u7531\u4e91\u670d\u52a1\u63d0\u4f9b\u5546\u7ef4\u62a4\u548c\u7ba1\u7406\uff0c\u7528\u6237\u65e0\u6cd5\u76f4\u63a5\u8bbf\u95ee\u6216\u67e5\u770b\u8fd9\u4e9b\u7ec4\u4ef6\uff0c \u4e5f\u65e0\u6cd5\u901a\u8fc7\u5e38\u89c4\u547d\u4ee4\uff08\u5982 kubectl\uff09\u65e0\u6cd5\u83b7\u53d6\u5230\u63a7\u5236\u9762\u7684\u7aef\u53e3\u7b49\u4fe1\u606f\u3002
              \u83b7\u53d6\u7aef\u53e3\u53f7\u7684\u65b9\u5f0f
              1. \u5728 kube-system \u547d\u540d\u7a7a\u95f4\u4e0b\u67e5\u627e etcd Pod

                kubectl get po -n kube-system | grep etcd\n
              2. \u83b7\u53d6 etcd Pod \u7684 listen-client-urls \u4e2d\u7684\u7aef\u53e3\u53f7

                kubectl get po -n kube-system ${etcd_pod_name} -oyaml | grep listen-client-urls # (1)!\n
                1. \u5c06 etcd_pod_name \u66ff\u6362\u4e3a\u5b9e\u9645\u7684 Pod \u540d\u79f0

                \u9884\u671f\u8f93\u51fa\u7ed3\u679c\u5982\u4e0b\uff0c\u8282\u70b9 IP \u540e\u7684\u6570\u5b57\u5373\u4e3a\u7aef\u53e3\u53f7:

                - --listen-client-urls=https://127.0.0.1:2379,https://10.6.229.191:2379\n
            • CA \u8bc1\u4e66\uff1a\u53ef\u901a\u8fc7\u5982\u4e0b\u547d\u4ee4\u67e5\u770b\u8bc1\u4e66\uff0c\u7136\u540e\u5c06\u8bc1\u4e66\u5185\u5bb9\u590d\u5236\u7c98\u8d34\u5230\u5bf9\u5e94\u4f4d\u7f6e\uff1a

              cat /etc/kubernetes/ssl/etcd/ca.crt\n
            • Cert \u8bc1\u4e66\uff1a\u53ef\u901a\u8fc7\u5982\u4e0b\u547d\u4ee4\u67e5\u770b\u8bc1\u4e66\uff0c\u7136\u540e\u5c06\u8bc1\u4e66\u5185\u5bb9\u590d\u5236\u7c98\u8d34\u5230\u5bf9\u5e94\u4f4d\u7f6e\uff1a

              cat /etc/kubernetes/ssl/apiserver-etcd-client.crt\n
            • Key\uff1a\u53ef\u901a\u8fc7\u5982\u4e0b\u547d\u4ee4\u67e5\u770b\u8bc1\u4e66\uff0c\u7136\u540e\u5c06\u8bc1\u4e66\u5185\u5bb9\u590d\u5236\u7c98\u8d34\u5230\u5bf9\u5e94\u4f4d\u7f6e\uff1a

              cat /etc/kubernetes/ssl/apiserver-etcd-client.key\n

            Note

            \u70b9\u51fb\u8f93\u5165\u6846\u4e0b\u65b9\u7684 \u5982\u4f55\u83b7\u53d6 \u53ef\u4ee5\u5728 UI \u9875\u9762\u67e5\u770b\u83b7\u53d6\u5bf9\u5e94\u4fe1\u606f\u7684\u65b9\u5f0f\u3002

          3. \u53c2\u8003\u4ee5\u4e0b\u4fe1\u606f\u586b\u5199 \u5907\u4efd\u7b56\u7565 \u3002

            • \u5907\u4efd\u65b9\u5f0f\uff1a\u9009\u62e9\u624b\u52a8\u5907\u4efd\u6216\u5b9a\u65f6\u5907\u4efd

              • \u624b\u52a8\u5907\u4efd\uff1a\u57fa\u4e8e\u5907\u4efd\u914d\u7f6e\u7acb\u5373\u6267\u884c\u4e00\u6b21 etcd \u5168\u91cf\u6570\u636e\u7684\u5907\u4efd\u3002
              • \u5b9a\u65f6\u5907\u4efd\uff1a\u6309\u7167\u8bbe\u7f6e\u7684\u5907\u4efd\u9891\u7387\u5bf9 etcd \u6570\u636e\u8fdb\u884c\u5468\u671f\u6027\u5168\u91cf\u5907\u4efd\u3002
            • \u5907\u4efd\u94fe\u957f\u5ea6\uff1a\u6700\u591a\u4fdd\u7559\u591a\u5c11\u6761\u5907\u4efd\u6570\u636e\u3002\u9ed8\u8ba4\u4e3a 30 \u6761\u3002

            • \u5907\u4efd\u9891\u7387\uff1a\u652f\u6301\u5c0f\u65f6\u3001\u65e5\u3001\u5468\u3001\u6708\u7ea7\u522b\u548c\u81ea\u5b9a\u4e49\u65b9\u5f0f\u3002

          4. \u53c2\u8003\u4ee5\u4e0b\u4fe1\u606f\u586b\u5199 \u5b58\u50a8\u4f4d\u7f6e \u3002

            • \u5b58\u50a8\u4f9b\u5e94\u5546\uff1a\u9ed8\u8ba4\u9009\u62e9 S3 \u5b58\u50a8
            • \u5bf9\u8c61\u5b58\u50a8\u8bbf\u95ee\u5730\u5740\uff1aMinIO \u7684\u8bbf\u95ee\u5730\u5740
            • \u5b58\u50a8\u6876\uff1a\u5728 MinIO \u4e2d\u521b\u5efa\u4e00\u4e2a Bucket\uff0c\u586b\u5199 Bucket \u7684\u540d\u79f0
            • \u7528\u6237\u540d\uff1aMinIO \u7684\u767b\u5f55\u7528\u6237\u540d
            • \u5bc6\u7801\uff1aMinIO \u7684\u767b\u5f55\u5bc6\u7801

          5. \u70b9\u51fb \u786e\u5b9a \u540e\u9875\u9762\u81ea\u52a8\u8df3\u8f6c\u5230\u5907\u4efd\u7b56\u7565\u5217\u8868\uff0c\u53ef\u4ee5\u67e5\u770b\u76ee\u524d\u521b\u5efa\u597d\u7684\u6240\u6709\u7b56\u7565\u3002

            • \u5728\u7b56\u7565\u53f3\u4fa7\u70b9\u51fb \u2507 \u64cd\u4f5c\u6309\u94ae\u53ef\u4ee5\u67e5\u770b\u65e5\u5fd7\u3001\u67e5\u770b YAML\u3001\u66f4\u65b0\u7b56\u7565\u3001\u505c\u6b62\u7b56\u7565\u3001\u7acb\u5373\u6267\u884c\u7b56\u7565\u7b49\u3002
            • \u5f53\u5907\u4efd\u65b9\u5f0f\u4e3a\u624b\u52a8\u65f6\uff0c\u53ef\u4ee5\u70b9\u51fb \u7acb\u5373\u6267\u884c \u8fdb\u884c\u5907\u4efd\u3002
            • \u5f53\u5907\u4efd\u65b9\u5f0f\u4e3a\u5b9a\u65f6\u5907\u4efd\u65f6\uff0c\u5219\u4f1a\u6839\u636e\u914d\u7f6e\u7684\u65f6\u95f4\u8fdb\u884c\u5907\u4efd\u3002

          "},{"location":"admin/kpanda/backup/etcd-backup.html#_2","title":"\u67e5\u770b\u5907\u4efd\u7b56\u7565\u65e5\u5fd7","text":"

          \u70b9\u51fb \u65e5\u5fd7 \u53ef\u4ee5\u67e5\u770b\u65e5\u5fd7\u5185\u5bb9\uff0c\u9ed8\u8ba4\u5c55\u793a 100 \u884c\u3002\u82e5\u60f3\u67e5\u770b\u66f4\u591a\u65e5\u5fd7\u4fe1\u606f\u6216\u8005\u4e0b\u8f7d\u65e5\u5fd7\uff0c\u53ef\u5728\u65e5\u5fd7\u4e0a\u65b9\u6839\u636e\u63d0\u793a\u524d\u5f80\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u3002

          "},{"location":"admin/kpanda/backup/etcd-backup.html#_3","title":"\u67e5\u770b\u5907\u4efd\u7b56\u7565\u8be6\u60c5","text":"

          \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 -> \u5907\u4efd\u6062\u590d -> etcd \u5907\u4efd \uff0c\u70b9\u51fb \u5907\u4efd\u7b56\u7565 \u9875\u7b7e\uff0c\u63a5\u7740\u70b9\u51fb\u7b56\u7565\u540d\u79f0\u53ef\u4ee5\u67e5\u770b\u7b56\u7565\u8be6\u60c5\u3002

          "},{"location":"admin/kpanda/backup/etcd-backup.html#_4","title":"\u67e5\u770b\u5907\u4efd\u70b9","text":"
          1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 -> \u5907\u4efd\u6062\u590d -> etcd \u5907\u4efd \uff0c\u70b9\u51fb \u5907\u4efd\u70b9 \u9875\u7b7e\u3002
          2. \u9009\u62e9\u76ee\u6807\u96c6\u7fa4\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u8be5\u96c6\u7fa4\u4e0b\u6240\u6709\u5907\u4efd\u4fe1\u606f\u3002

            \u6bcf\u6267\u884c\u4e00\u6b21\u5907\u4efd\uff0c\u5bf9\u5e94\u751f\u6210\u4e00\u4e2a\u5907\u4efd\u70b9\uff0c\u53ef\u901a\u8fc7\u6210\u529f\u72b6\u6001\u7684\u5907\u4efd\u70b9\u5feb\u901f\u6062\u590d\u5e94\u7528\u3002

          "},{"location":"admin/kpanda/backup/install-velero.html","title":"\u5b89\u88c5 velero \u63d2\u4ef6","text":"

          velero \u662f\u4e00\u4e2a\u5907\u4efd\u548c\u6062\u590d Kubernetes \u96c6\u7fa4\u8d44\u6e90\u7684\u5f00\u6e90\u5de5\u5177\u3002\u5b83\u53ef\u4ee5\u5c06 Kubernetes \u96c6\u7fa4\u4e2d\u7684\u8d44\u6e90\u5907\u4efd\u5230\u4e91\u5b58\u50a8\u670d\u52a1\u3001\u672c\u5730\u5b58\u50a8\u6216\u5176\u4ed6\u4f4d\u7f6e\uff0c\u5e76\u4e14\u53ef\u4ee5\u5728\u9700\u8981\u65f6\u5c06\u8fd9\u4e9b\u8d44\u6e90\u6062\u590d\u5230\u540c\u4e00\u6216\u4e0d\u540c\u7684\u96c6\u7fa4\u4e2d\u3002

          \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528 Helm \u5e94\u7528 \u90e8\u7f72 velero \u63d2\u4ef6\u3002

          "},{"location":"admin/kpanda/backup/install-velero.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

          \u5b89\u88c5 velero \u63d2\u4ef6\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

          • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

          • \u521b\u5efa velero \u547d\u540d\u7a7a\u95f4\u3002

          • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

          "},{"location":"admin/kpanda/backup/install-velero.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

          \u8bf7\u6267\u884c\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 velero \u63d2\u4ef6\u3002

          1. \u5728\u96c6\u7fa4\u5217\u8868\u9875\u9762\u627e\u5230\u9700\u8981\u5b89\u88c5 velero \u63d2\u4ef6\u7684\u76ee\u6807\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u5728\u641c\u7d22\u680f\u8f93\u5165 velero \u8fdb\u884c\u641c\u7d22\u3002

          2. \u9605\u8bfb velero \u63d2\u4ef6\u76f8\u5173\u4ecb\u7ecd\uff0c\u9009\u62e9\u7248\u672c\u540e\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u672c\u6587\u5c06\u4ee5 4.0.2 \u7248\u672c\u4e3a\u4f8b\u8fdb\u884c\u5b89\u88c5\uff0c\u63a8\u8350\u5b89\u88c5 4.0.2 \u6216\u66f4\u9ad8\u7248\u672c\u3002

          3. \u586b\u5199\u548c\u914d\u7f6e\u53c2\u6570\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65

            \u57fa\u672c\u53c2\u6570\u53c2\u6570\u914d\u7f6e

            • \u540d\u79f0\uff1a\u5fc5\u586b\u53c2\u6570\uff0c\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09,\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 metrics-server-01\u3002
            • \u547d\u540d\u7a7a\u95f4\uff1a\u63d2\u4ef6\u5b89\u88c5\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4e3a velero \u547d\u540d\u7a7a\u95f4\u3002
            • \u7248\u672c\uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 4.0.2 \u7248\u672c\u4e3a\u4f8b\u3002
            • \u5c31\u7eea\u7b49\u5f85\uff1a\u53ef\u9009\u53c2\u6570\uff0c\u542f\u7528\u540e\uff0c\u5c06\u7b49\u5f85\u5e94\u7528\u4e0b\u6240\u6709\u5173\u8054\u8d44\u6e90\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002
            • \u5931\u8d25\u5220\u9664\uff1a\u53ef\u9009\u53c2\u6570\uff0c\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f\u5c31\u7eea\u7b49\u5f85\u3002\u5982\u679c\u5b89\u88c5\u5931\u8d25\uff0c\u5c06\u5220\u9664\u5b89\u88c5\u76f8\u5173\u8d44\u6e90\u3002
            • \u8be6\u60c5\u65e5\u5fd7\uff1a\u53ef\u9009\u53c2\u6570\uff0c\u5f00\u542f\u540e\u5c06\u8f93\u51fa\u5b89\u88c5\u8fc7\u7a0b\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002

            Note

            \u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u548c/\u6216 \u5931\u8d25\u5220\u9664 \u540e\uff0c\u5e94\u7528\u9700\u8981\u7ecf\u8fc7\u8f83\u957f\u65f6\u95f4\u624d\u4f1a\u88ab\u6807\u8bb0\u4e3a \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

            • S3 Credentials\uff1a

              • Use secret \uff1a\u4fdd\u6301\u9ed8\u8ba4\u914d\u7f6e true \u3002
              • Secret name \uff1a\u4fdd\u6301\u9ed8\u8ba4\u914d\u7f6e velero-s3-credential \u3002
              • SecretContents.aws_access_key_id = \uff1a\u914d\u7f6e\u8bbf\u95ee\u5bf9\u8c61\u5b58\u50a8\u7684\u7528\u6237\u540d\uff0c\u66ff\u6362 \u4e3a\u771f\u5b9e\u53c2\u6570\u3002
              • SecretContents.aws_secret_access_key = \uff1a\u914d\u7f6e\u8bbf\u95ee\u5bf9\u8c61\u5b58\u50a8\u7684\u5bc6\u7801\uff0c\u66ff\u6362 \u4e3a\u771f\u5b9e\u53c2\u6570\u3002

                config \"SecretContents \u6837\u4f8b\" [default] aws_access_key_id = minio aws_secret_access_key = minio123

              • Velero Configuration\uff1a

                • Backupstoragelocation \uff1avelero \u5907\u4efd\u6570\u636e\u5b58\u50a8\u7684\u4f4d\u7f6e
                • S3 bucket \uff1a\u7528\u4e8e\u4fdd\u5b58\u5907\u4efd\u6570\u636e\u7684\u5b58\u50a8\u6876\u540d\u79f0(\u9700\u4e3a minio \u5df2\u7ecf\u5b58\u5728\u7684\u771f\u5b9e\u5b58\u50a8\u6876)
                • Is default BackupStorage \uff1a\u4fdd\u6301\u9ed8\u8ba4\u914d\u7f6e true
                • S3 access mode \uff1avelero \u5bf9\u6570\u636e\u7684\u8bbf\u95ee\u6a21\u5f0f\uff0c\u53ef\u4ee5\u9009\u62e9
                  • ReadWrite \uff1a\u5141\u8bb8 velero \u8bfb\u5199\u5907\u4efd\u6570\u636e
                  • ReadOnly \uff1a\u5141\u8bb8 velero \u8bfb\u53d6\u5907\u4efd\u6570\u636e\uff0c\u4e0d\u80fd\u4fee\u6539\u5907\u4efd\u6570\u636e
                  • WriteOnly \uff1a\u53ea\u5141\u8bb8 velero \u5199\u5165\u5907\u4efd\u6570\u636e\uff0c\u4e0d\u80fd\u8bfb\u53d6\u5907\u4efd\u6570\u636e
                • S3 Configs \uff1aS3 \u5b58\u50a8\uff08minio\uff09\u7684\u8be6\u7ec6\u914d\u7f6e
                • S3 region \uff1a\u4e91\u5b58\u50a8\u7684\u5730\u7406\u533a\u57df\u3002\u9ed8\u8ba4\u4f7f\u7528 us-east-1 \u53c2\u6570\uff0c\u7531\u7cfb\u7edf\u7ba1\u7406\u5458\u63d0\u4f9b
                • S3 force path style \uff1a\u4fdd\u6301\u9ed8\u8ba4\u914d\u7f6e true
                • S3 server URL \uff1a\u5bf9\u8c61\u5b58\u50a8\uff08minio\uff09\u7684\u63a7\u5236\u53f0\u8bbf\u95ee\u5730\u5740\uff0cminio \u4e00\u822c\u63d0\u4f9b\u4e86 UI \u8bbf\u95ee\u548c\u63a7\u5236\u53f0\u8bbf\u95ee\u4e24\u4e2a\u670d\u52a1\uff0c\u6b64\u5904\u8bf7\u4f7f\u7528\u63a7\u5236\u53f0\u8bbf\u95ee\u7684\u5730\u5740

                Note

                \u8bf7\u786e\u4fdd s3 \u5b58\u50a8\u670d\u52a1\u65f6\u95f4\u8ddf\u5907\u4efd\u8fd8\u539f\u96c6\u7fa4\u65f6\u95f4\u5dee\u572810\u5206\u949f\u4ee5\u5185\uff0c\u6700\u597d\u662f\u65f6\u95f4\u4fdd\u6301\u540c\u6b65\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u6267\u884c\u5907\u4efd\u64cd\u4f5c\u3002

              • migration plugin configuration\uff1a\u542f\u7528\u4e4b\u540e\uff0c\u5c06\u5728\u4e0b\u4e00\u6b65\u7684 YAML \u4ee3\u7801\u6bb5\u4e2d\u65b0\u589e\uff1a

                ...\ninitContainers:\n  - image: 'release.daocloud.io/kcoral/velero-plugin-for-migration:v0.3.0'\n    imagePullPolicy: IfNotPresent\n    name: velero-plugin-for-migration\n    volumeMounts:\n      - mountPath: /target\n        name: plugins\n  - image: 'docker.m.daocloud.io/velero/velero-plugin-for-csi:v0.7.0'\n    imagePullPolicy: IfNotPresent\n    name: velero-plugin-for-csi\n    volumeMounts:\n      - mountPath: /target\n        name: plugins\n  - image: 'docker.m.daocloud.io/velero/velero-plugin-for-aws:v1.9.0'\n    imagePullPolicy: IfNotPresent\n    name: velero-plugin-for-aws\n    volumeMounts:\n      - mountPath: /target\n        name: plugins\n...\n
              • \u786e\u8ba4 YAML \u65e0\u8bef\u540e\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210 velero \u63d2\u4ef6\u7684\u5b89\u88c5\u3002 \u4e4b\u540e\u7cfb\u7edf\u5c06\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\uff0c\u7a0d\u7b49\u51e0\u5206\u949f\u540e\uff0c\u4e3a\u9875\u9762\u6267\u884c\u5237\u65b0\u64cd\u4f5c\uff0c\u5373\u53ef\u770b\u5230\u521a\u521a\u5b89\u88c5\u7684\u5e94\u7528\u3002

              • "},{"location":"admin/kpanda/best-practice/add-master-node.html","title":"\u5bf9\u5de5\u4f5c\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u6269\u5bb9","text":"

                \u672c\u6587\u5c06\u4ee5\u4e00\u4e2a\u5355\u63a7\u5236\u8282\u70b9\u7684\u5de5\u4f5c\u96c6\u7fa4\u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u624b\u52a8\u4e3a\u5de5\u4f5c\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u8fdb\u884c\u6269\u5bb9\uff0c\u4ee5\u5b9e\u73b0\u81ea\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u9ad8\u53ef\u7528\u3002

                Note

                • \u63a8\u8350\u5728\u754c\u9762\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u65f6\u5373\u5f00\u542f\u9ad8\u53ef\u7528\u6a21\u5f0f\uff0c\u624b\u52a8\u6269\u5bb9\u5de5\u4f5c\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u5b58\u5728\u4e00\u5b9a\u7684\u64cd\u4f5c\u98ce\u9669\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002
                • \u5f53\u5de5\u4f5c\u96c6\u7fa4\u7684\u9996\u4e2a\u63a7\u5236\u8282\u70b9\u6545\u969c\u6216\u5f02\u5e38\u65f6\uff0c\u5982\u679c\u60a8\u60f3\u66ff\u6362\u6216\u91cd\u65b0\u63a5\u5165\u9996\u4e2a\u63a7\u5236\u8282\u70b9\uff0c \u8bf7\u53c2\u8003\u66ff\u6362\u5de5\u4f5c\u96c6\u7fa4\u7684\u9996\u4e2a\u63a7\u5236\u8282\u70b9
                "},{"location":"admin/kpanda/best-practice/add-master-node.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                • \u5df2\u7ecf\u901a\u8fc7 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u521b\u5efa\u597d\u4e00\u4e2a\u5de5\u4f5c\u96c6\u7fa4\uff0c\u53ef\u53c2\u8003\u6587\u6863\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u3002
                • \u5de5\u4f5c\u96c6\u7fa4\u7684\u88ab\u7eb3\u7ba1\u96c6\u7fa4\u5b58\u5728\u5f53\u524d\u5e73\u53f0\u4e2d\uff0c\u5e76\u4e14\u72b6\u6001\u8fd0\u884c\u6b63\u5e38\u3002

                Note

                \u88ab\u7eb3\u7ba1\u96c6\u7fa4\uff1a\u5728\u754c\u9762\u521b\u5efa\u96c6\u7fa4\u65f6\u6307\u5b9a\u7684\u7528\u6765\u7ba1\u7406\u5f53\u524d\u96c6\u7fa4\uff0c\u5e76\u4e3a\u5f53\u524d\u96c6\u7fa4\u63d0\u4f9b kubernetes \u7248\u672c\u5347\u7ea7\u3001\u8282\u70b9\u6269\u7f29\u5bb9\u3001\u5378\u8f7d\u3001\u64cd\u4f5c\u8bb0\u5f55\u7b49\u80fd\u529b\u7684\u96c6\u7fa4\u3002

                "},{"location":"admin/kpanda/best-practice/add-master-node.html#_3","title":"\u4fee\u6539\u4e3b\u673a\u6e05\u5355\u6587\u4ef6","text":"
                1. \u767b\u5f55\u5230\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u8fdb\u5165\u9700\u8981\u8fdb\u884c\u63a7\u5236\u8282\u70b9\u6269\u5bb9\u7684\u96c6\u7fa4\u6982\u89c8\u9875\u9762\uff0c\u5728 \u57fa\u672c\u4fe1\u606f \u5904\uff0c\u627e\u5230\u5f53\u524d\u96c6\u7fa4\u7684 \u88ab\u7eb3\u7ba1\u96c6\u7fa4 \uff0c \u70b9\u51fb\u88ab\u7eb3\u7ba1\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u88ab\u7eb3\u7ba1\u96c6\u7fa4\u7684\u6982\u89c8\u754c\u9762\u3002

                2. \u5728\u88ab\u7eb3\u7ba1\u96c6\u7fa4\u7684\u6982\u89c8\u754c\u9762\uff0c\u70b9\u51fb \u63a7\u5236\u53f0\uff0c\u6253\u5f00\u4e91\u7ec8\u7aef\u63a7\u5236\u53f0\uff0c\u5e76\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u627e\u5230\u5f85\u6269\u5bb9\u5de5\u4f5c\u96c6\u7fa4\u7684\u4e3b\u673a\u6e05\u5355\u6587\u4ef6\u3002

                  kubectl get cm -n kubean-system ${ClusterName}-hosts-conf -oyaml\n

                  ${ClusterName}\uff1a\u4e3a\u5f85\u6269\u5bb9\u5de5\u4f5c\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                3. \u53c2\u8003\u4e0b\u65b9\u793a\u4f8b\u4fee\u6539\u4e3b\u673a\u6e05\u5355\u6587\u4ef6\uff0c\u65b0\u589e\u63a7\u5236\u8282\u70b9\u4fe1\u606f\u3002

                  \u4fee\u6539\u524d\u4fee\u6539\u540e
                  apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: tanghai-dev-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      hosts:\n        node1:\n          ip: 10.6.175.10 \n          access_ip: 10.6.175.10\n          ansible_host: 10.6.175.10 \n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: password01\n      children:\n        kube_control_plane:\n          hosts:\n            node1:\n        kube_node:\n          hosts:\n            node1:\n        etcd:\n          hosts:\n            node1:\n        k8s_cluster:\n          children:\n            kube_control_plane:\n            kube_node:\n        calico_rr:\n          hosts: {}\n......\n
                  apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: tanghai-dev-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      hosts:\n        node1: # \u539f\u96c6\u7fa4\u4e2d\u5df2\u5b58\u5728\u7684\u4e3b\u8282\u70b9\n          ip: 10.6.175.10\n          access_ip: 10.6.175.10 \n          ansible_host: 10.6.175.10\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: password01\n        node2: # \u96c6\u7fa4\u6269\u5bb9\u5f85\u65b0\u589e\u7684\u63a7\u5236\u8282\u70b9\n          ip: 10.6.175.20\n          access_ip: 10.6.175.20\n          ansible_host: 10.6.175.20\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: password01\n        node3: # \u96c6\u7fa4\u6269\u5bb9\u5f85\u65b0\u589e\u7684\u63a7\u5236\u8282\u70b9\n          ip: 10.6.175.30 \n          access_ip: 10.6.175.30\n          ansible_host: 10.6.175.30 \n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: password01\n      children:\n        kube_control_plane:\n          hosts: # \u96c6\u7fa4\u4e2d\u7684\u63a7\u5236\u8282\u70b9\u7ec4\n            node1:\n            node2: # \u65b0\u589e\u63a7\u5236\u8282\u70b9 node2 \u5185\u5bb9 \n            node3: # \u65b0\u589e\u63a7\u5236\u8282\u70b9 node3 \u5185\u5bb9 \n        kube_node:\n          hosts: # \u96c6\u7fa4\u4e2d\u7684\u5de5\u4f5c\u8282\u70b9\u7ec4\n            node1:\n            node2: # \u65b0\u589e\u63a7\u5236\u8282\u70b9 node2 \u5185\u5bb9 \n            node3: # \u65b0\u589e\u63a7\u5236\u8282\u70b9 node3 \u5185\u5bb9 \n        etcd:\n          hosts: # \u96c6\u7fa4\u4e2d\u7684 ETCD \u8282\u70b9\u7ec4\n            node1:\n            node2: # \u65b0\u589e\u63a7\u5236\u8282\u70b9 node2 \u5185\u5bb9 \n            node3: # \u65b0\u589e\u63a7\u5236\u8282\u70b9 node3 \u5185\u5bb9 \n        k8s_cluster:\n          children:\n            kube_control_plane:\n            kube_node:\n        calico_rr:\n          hosts: {}\n
                "},{"location":"admin/kpanda/best-practice/add-master-node.html#clusteroperationyml","title":"\u65b0\u589e ClusterOperation.yml \u6269\u5bb9\u4efb\u52a1","text":"

                \u4f7f\u7528\u57fa\u4e8e\u4e0b\u9762\u7684 ClusterOperation.yml \u6a21\u677f\uff0c\u65b0\u589e\u4e00\u4e2a\u96c6\u7fa4\u63a7\u5236\u8282\u70b9\u6269\u5bb9\u4efb\u52a1 scale-master-node-ops.yaml \u3002

                ClusterOperation.yml
                apiVersion: kubean.io/v1alpha1\nkind: ClusterOperation\nmetadata:\n  name: cluster1-online-install-ops\nspec:\n  cluster: ${cluster-name} # (1)!\n  image: ghcr.m.daocloud.io/kubean-io/spray-job:v0.18.0 # (2)!\n  actionType: playbook\n  action: cluster.yml # (3)!\n  extraArgs: --limit=etcd,kube_control_plane -e ignore_assert_errors=yes\n  preHook:\n    - actionType: playbook\n      action: ping.yml\n    - actionType: playbook\n      action: disable-firewalld.yml\n    - actionType: playbook\n      action: enable-repo.yml  # (4)!\n      extraArgs: | # \u5982\u679c\u662f\u79bb\u7ebf\u73af\u5883\uff0c\u9700\u8981\u6dfb\u52a0 enable-repo.yml\uff0c\u5e76\u4e14 extraArgs \u53c2\u6570\u586b\u5199\u76f8\u5173 OS \u7684\u6b63\u786e repo_list\n        -e \"{repo_list: ['http://172.30.41.0:9000/kubean/centos/\\$releasever/os/\\$basearch','http://172.30.41.0:9000/kubean/centos-iso/\\$releasever/os/\\$basearch']}\"\n  postHook:\n    - actionType: playbook\n      action: upgrade-cluster.yml\n      extraArgs: --limit=etcd,kube_control_plane -e ignore_assert_errors=yes\n    - actionType: playbook\n      action: kubeconfig.yml\n    - actionType: playbook\n      action: cluster-info.yml\n
                1. \u6307\u5b9a cluster name
                2. \u6307\u5b9a kubean \u4efb\u52a1\u8fd0\u884c\u7684\u955c\u50cf\uff0c\u955c\u50cf\u5730\u5740\u8981\u4e0e\u4e4b\u524d\u6267\u884c\u90e8\u7f72\u65f6\u7684 job \u5176\u5185\u955c\u50cf\u4fdd\u6301\u4e00\u81f4
                3. \u5982\u679c\u4e00\u6b21\u6027\u6dfb\u52a0 Master\uff08etcd\uff09\u8282\u70b9\u8d85\u8fc7\uff08\u5305\u542b\uff09\u4e09\u4e2a\uff0c\u9700\u5728 cluster.yaml \u8ffd\u52a0\u989d\u5916\u53c2\u6570 -e etcd_retries=10 \u4ee5\u589e\u5927 etcd node join \u91cd\u8bd5\u6b21\u6570
                4. \u79bb\u7ebf\u73af\u5883\u4e0b\u9700\u8981\u6dfb\u52a0\u6b64 yaml\uff0c\u5e76\u4e14\u8bbe\u7f6e\u6b63\u786e\u7684 repo-list\uff08\u5b89\u88c5\u64cd\u4f5c\u7cfb\u7edf\u8f6f\u4ef6\u5305\uff09\uff0c\u4ee5\u4e0b\u53c2\u6570\u503c\u4ec5\u4f9b\u53c2\u8003

                \u7136\u540e\u521b\u5efa\u5e76\u90e8\u7f72 scale-master-node-ops.yaml\u3002

                vi scale-master-node-ops.yaml\nkubectl apply -f scale-master-node-ops.yaml -n kubean-system\n

                \u6267\u884c\u5b8c\u4e0a\u8ff0\u6b65\u9aa4\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u8fdb\u884c\u9a8c\u8bc1\uff1a

                kubectl get node\n
                "},{"location":"admin/kpanda/best-practice/add-worker-node-on-global.html","title":"\u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u5de5\u4f5c\u8282\u70b9\u6269\u5bb9","text":"

                \u672c\u6587\u5c06\u4ecb\u7ecd\u79bb\u7ebf\u6a21\u5f0f\u4e0b\uff0c\u5982\u4f55\u624b\u52a8\u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u5de5\u4f5c\u8282\u70b9\u8fdb\u884c\u6269\u5bb9\u3002 \u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u4e0d\u5efa\u8bae\u5728\u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3\u540e\u5bf9\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u8fdb\u884c\u6269\u5bb9\uff0c\u8bf7\u5728\u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3\u524d\u505a\u597d\u8d44\u6e90\u89c4\u5212\u3002

                Note

                \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u4e0d\u652f\u6301\u6269\u5bb9\u3002

                "},{"location":"admin/kpanda/best-practice/add-worker-node-on-global.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                • \u5df2\u7ecf\u901a\u8fc7\u706b\u79cd\u8282\u70b9\u5b8c\u6210 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u7684\u90e8\u7f72\uff0c\u5e76\u4e14\u706b\u79cd\u8282\u70b9\u4e0a\u7684 kind \u96c6\u7fa4\u8fd0\u884c\u6b63\u5e38\u3002
                • \u5fc5\u987b\u4f7f\u7528\u5e73\u53f0 Admin \u6743\u9650\u7684\u7528\u6237\u767b\u5f55\u3002
                "},{"location":"admin/kpanda/best-practice/add-worker-node-on-global.html#kind-kubeconfig","title":"\u83b7\u53d6\u706b\u79cd\u8282\u70b9\u4e0a kind \u96c6\u7fa4\u7684 kubeconfig","text":"
                1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\u767b\u5f55\u706b\u79cd\u8282\u70b9\uff1a

                  ssh root@\u706b\u79cd\u8282\u70b9 IP \u5730\u5740\n
                2. \u5728\u706b\u79cd\u8282\u70b9\u4e0a\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u83b7\u53d6 kind \u96c6\u7fa4\u7684 CONTAINER ID\uff1a

                  [root@localhost ~]# podman ps\n\n# \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a\nCONTAINER ID  IMAGE                                      COMMAND     CREATED      STATUS      PORTS                                                                                                         NAMES\n220d662b1b6a  docker.m.daocloud.io/kindest/node:v1.26.2              2 weeks ago  Up 2 weeks  0.0.0.0:443->30443/tcp, 0.0.0.0:8081->30081/tcp, 0.0.0.0:9000-9001->32000-32001/tcp, 0.0.0.0:36674->6443/tcp  my-cluster-installer-control-plane\n
                3. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u8fdb\u5165 kind \u96c6\u7fa4\u5bb9\u5668\u5185\uff1a

                  podman exec -it {CONTAINER ID} bash\n

                  {CONTAINER ID} \u66ff\u6362\u4e3a\u60a8\u771f\u5b9e\u7684\u5bb9\u5668 ID

                4. \u5728 kind \u96c6\u7fa4\u5bb9\u5668\u5185\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u83b7\u53d6 kind \u96c6\u7fa4\u7684 kubeconfig \u914d\u7f6e\u4fe1\u606f\uff1a

                  kubectl config view --minify --flatten --raw\n

                \u5f85\u63a7\u5236\u53f0\u8f93\u51fa\u540e\uff0c\u590d\u5236 kind \u96c6\u7fa4\u7684 kubeconfig \u914d\u7f6e\u4fe1\u606f\uff0c\u4e3a\u4e0b\u4e00\u6b65\u505a\u51c6\u5907\u3002

                "},{"location":"admin/kpanda/best-practice/add-worker-node-on-global.html#kind-clusterkubeanio","title":"\u5728\u706b\u79cd\u8282\u70b9\u4e0a kind \u96c6\u7fa4\u5185\u521b\u5efa cluster.kubean.io \u8d44\u6e90","text":"
                1. \u4f7f\u7528 podman exec -it {CONTAINER ID} bash \u547d\u4ee4\u8fdb\u5165 kind \u96c6\u7fa4\u5bb9\u5668\u5185\u3002

                2. \u5728 kind \u96c6\u7fa4\u5bb9\u5668\u5185\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u83b7\u53d6 kind \u96c6\u7fa4\u540d\u79f0 \uff1a

                  kubectl get clusters\n
                3. \u590d\u5236\u5e76\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5728 kind \u96c6\u7fa4\u5185\u6267\u884c\uff0c\u4ee5\u521b\u5efa cluster.kubean.io \u8d44\u6e90\uff1a

                  kubectl apply -f - <<EOF\napiVersion: kubean.io/v1alpha1\nkind: Cluster\nmetadata:\n  labels:\n    clusterName: kpanda-global-cluster\n  name: kpanda-global-cluster\nspec:\n  hostsConfRef:\n    name: my-cluster-hosts-conf\n    namespace: kubean-system\n  kubeconfRef:\n    name: my-cluster-kubeconf\n    namespace: kubean-system\n  varsConfRef:\n    name: my-cluster-vars-conf\n    namespace: kubean-system\nEOF\n

                  Note

                  spec.hostsConfRef.name\u3001spec.kubeconfRef.name\u3001spec.varsConfRef.name \u4e2d\u96c6\u7fa4\u540d\u79f0\u9ed8\u8ba4\u4e3a my-cluster\uff0c \u9700\u66ff\u6362\u6210\u4e0a\u4e00\u6b65\u9aa4\u4e2d\u83b7\u53d6\u7684 kind \u96c6\u7fa4\u540d\u79f0 \u3002

                4. \u5728 kind \u96c6\u7fa4\u5185\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u68c0\u9a8c cluster.kubean.io` \u8d44\u6e90\u662f\u5426\u6b63\u5e38\u521b\u5efa\uff1a

                  kubectl get clusters\n

                  \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                  NAME                    AGE\nkpanda-global-cluster   3s\nmy-cluster              16d\n
                "},{"location":"admin/kpanda/best-practice/add-worker-node-on-global.html#kind-containerd","title":"\u66f4\u65b0\u706b\u79cd\u8282\u70b9\u4e0a\u7684 kind \u96c6\u7fa4\u91cc\u7684 containerd \u914d\u7f6e","text":"
                1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u5176\u4e2d\u4e00\u4e2a\u63a7\u5236\u8282\u70b9\uff1a

                  ssh root@\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u63a7\u5236\u8282\u70b9 IP \u5730\u5740\n
                2. \u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u63a7\u5236\u8282\u70b9\u4e0a\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5c06\u63a7\u5236\u8282\u70b9\u7684 containerd \u914d\u7f6e\u6587\u4ef6 config.toml \u590d\u5236\u5230\u706b\u79cd\u8282\u70b9\u4e0a\uff1a

                  scp /etc/containerd/config.toml root@{\u706b\u79cd\u8282\u70b9 IP}:/root\n
                3. \u5728\u706b\u79cd\u8282\u70b9\u4e0a\uff0c\u4ece\u63a7\u5236\u8282\u70b9\u62f7\u8d1d\u8fc7\u6765\u7684 containerd \u914d\u7f6e\u6587\u4ef6 config.toml \u4e2d\u9009\u53d6 \u975e\u5b89\u5168\u955c\u50cf registry \u7684\u90e8\u5206 \u52a0\u5165\u5230 kind \u96c6\u7fa4\u5185 config.toml

                  \u975e\u5b89\u5168\u955c\u50cfregistry \u90e8\u5206\u793a\u4f8b\u5982\u4e0b\uff1a

                  [plugins.\"io.containerd.grpc.v1.cri\".registry]\n  [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors]\n    [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors.\"10.6.202.20\"]\n      endpoint = [\"https://10.6.202.20\"]\n    [plugins.\"io.containerd.grpc.v1.cri\".registry.configs.\"10.6.202.20\".tls]\n      insecure_skip_verify = true\n

                  Note

                  \u7531\u4e8e kind \u96c6\u7fa4\u5185\u4e0d\u80fd\u76f4\u63a5\u4fee\u6539 config.toml \u6587\u4ef6\uff0c\u6545\u53ef\u4ee5\u5148\u590d\u5236\u4e00\u4efd\u6587\u4ef6\u51fa\u6765\u4fee\u6539\uff0c\u518d\u62f7\u8d1d\u5230 kind \u96c6\u7fa4\uff0c\u6b65\u9aa4\u5982\u4e0b\uff1a

                  1. \u5728\u706b\u79cd\u8282\u70b9\u4e0a\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff0c\u5c06\u6587\u4ef6\u62f7\u8d1d\u51fa\u6765

                    podman cp {CONTAINER ID}:/etc/containerd/config.toml ./config.toml.kind\n
                  2. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\u7f16\u8f91 config.toml \u6587\u4ef6

                    vim ./config.toml.kind\n
                  3. \u5c06\u4fee\u6539\u597d\u7684\u6587\u4ef6\u518d\u590d\u5236\u5230 kind \u96c6\u7fa4\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4

                    podman cp ./config.toml.kind {CONTAINER ID}:/etc/containerd/config.toml\n

                    {CONTAINER ID} \u66ff\u6362\u4e3a\u60a8\u771f\u5b9e\u7684\u5bb9\u5668 ID

                4. \u5728 kind \u96c6\u7fa4\u5185\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u91cd\u542f containerd \u670d\u52a1

                  systemctl restart containerd\n
                "},{"location":"admin/kpanda/best-practice/add-worker-node-on-global.html#kind-ai","title":"\u5c06 kind \u96c6\u7fa4\u63a5\u5165 AI \u7b97\u529b\u4e2d\u5fc3\u96c6\u7fa4\u5217\u8868","text":"
                1. \u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3\uff0c\u8fdb\u5165\u5bb9\u5668\u7ba1\u7406\uff0c\u5728\u96c6\u7fa4\u5217\u8868\u9875\u53f3\u4fa7\u70b9\u51fb \u63a5\u5165\u96c6\u7fa4 \u6309\u94ae\uff0c\u8fdb\u5165\u63a5\u5165\u96c6\u7fa4\u9875\u9762\u3002

                2. \u5728\u63a5\u5165\u914d\u7f6e\u5904\uff0c\u586b\u5165\u5e76\u7f16\u8f91\u521a\u521a\u590d\u5236\u7684 kind \u96c6\u7fa4\u7684 kubeconfig \u914d\u7f6e\u3002

                  apiVersion: v1\nclusters:\n- cluster:\n    insecure-skip-tls-verify: true # (1)!\n    certificate-authority-data: LS0TLSCFDFWEFEWFEWFGGEWGFWFEWGWEGFEWGEWGSDGFSDSD\n    server: https://my-cluster-installer-control-plane:6443 # (2)!\nname: my-cluster-installer\ncontexts:\n- context:\n    cluster: my-cluster-installer\n    user: kubernetes-admin\nname: kubernetes-admin@my-cluster-installer\ncurrent-context: kubernetes-admin@my-cluster-installer\nkind: Config\npreferences: {}\nusers:\n
                  1. \u8df3\u8fc7 tls \u9a8c\u8bc1\uff0c\u8fd9\u4e00\u884c\u9700\u8981\u624b\u52a8\u6dfb\u52a0
                  2. \u66ff\u6362\u4e3a\u706b\u79cd\u8282\u70b9\u7684 IP\uff0c\u7aef\u53e3 6443 \u66ff\u6362\u4e3a\u5728\u8282\u70b9\u6620\u5c04\u7684\u7aef\u53e3\uff08\u4f60\u53ef\u4ee5\u6267\u884c podman ps|grep 6443 \u547d\u4ee4\u67e5\u770b\u6620\u5c04\u7684\u7aef\u53e3\uff09

                3. \u70b9\u51fb \u786e\u8ba4 \u6309\u94ae\uff0c\u5b8c\u6210 kind \u96c6\u7fa4\u7684\u63a5\u5165\u3002

                "},{"location":"admin/kpanda/best-practice/add-worker-node-on-global.html#_3","title":"\u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6dfb\u52a0\u6807\u7b7e","text":"
                1. \u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3\uff0c\u8fdb\u5165\u5bb9\u5668\u7ba1\u7406\uff0c\u627e\u5230 kapnda-glabal-cluster \u96c6\u7fa4\uff0c\u5728\u53f3\u4fa7\u64cd\u4f5c\u5217\u8868\u627e\u5230 \u57fa\u7840\u914d\u7f6e \u83dc\u5355\u9879\u5e76\u8fdb\u5165\u57fa\u7840\u914d\u7f6e\u754c\u9762\u3002

                2. \u5728\u57fa\u7840\u914d\u7f6e\u9875\u9762\uff0c\u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6dfb\u52a0\u7684\u6807\u7b7e kpanda.io/managed-by=my-cluster\uff1a

                Note

                \u6807\u7b7e kpanda.io/managed-by=my-cluster \u4e2d\u7684 vaule \u503c\u4e3a\u63a5\u5165\u96c6\u7fa4\u65f6\u6307\u5b9a\u7684\u96c6\u7fa4\u540d\u79f0\uff0c\u9ed8\u8ba4\u4e3a my-cluster\uff0c\u5177\u4f53\u4f9d\u636e\u60a8\u7684\u5b9e\u9645\u60c5\u51b5\u3002

                "},{"location":"admin/kpanda/best-practice/add-worker-node-on-global.html#_4","title":"\u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6dfb\u52a0\u8282\u70b9","text":"
                1. \u8fdb\u5165\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u8282\u70b9\u5217\u8868\u9875\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u63a5\u5165\u8282\u70b9 \u6309\u94ae\u3002

                2. \u586b\u5165\u5f85\u63a5\u5165\u8282\u70b9\u7684 IP \u548c\u8ba4\u8bc1\u4fe1\u606f\u540e\u70b9\u51fb \u5f00\u59cb\u68c0\u67e5 \uff0c\u901a\u8fc7\u8282\u70b9\u68c0\u67e5\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                3. \u5728 \u81ea\u5b9a\u4e49\u53c2\u6570 \u5904\u6dfb\u52a0\u5982\u4e0b\u81ea\u5b9a\u4e49\u53c2\u6570\uff1a

                  download_run_once: false\ndownload_container: false\ndownload_force_cache: false\ndownload_localhost: false\n

                4. \u70b9\u51fb \u786e\u5b9a \u7b49\u5f85\u8282\u70b9\u6dfb\u52a0\u5b8c\u6210\u3002

                "},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html","title":"MySQL \u5e94\u7528\u53ca\u6570\u636e\u7684\u8de8\u96c6\u7fa4\u5907\u4efd\u6062\u590d","text":"

                \u672c\u6b21\u6f14\u793a\u5c06\u57fa\u4e8e AI \u7b97\u529b\u4e2d\u5fc3\u7684\u5e94\u7528\u5907\u4efd\u529f\u80fd\uff0c\u5b9e\u73b0\u4e00\u4e2a\u6709\u72b6\u6001\u5e94\u7528\u7684\u8de8\u96c6\u7fa4\u5907\u4efd\u8fc1\u79fb\u3002

                Note

                \u5f53\u524d\u64cd\u4f5c\u8005\u5e94\u5177\u6709 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u7ba1\u7406\u5458\u7684\u6743\u9650\u3002

                "},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html#_1","title":"\u51c6\u5907\u6f14\u793a\u73af\u5883","text":""},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html#_2","title":"\u51c6\u5907\u4e24\u4e2a\u96c6\u7fa4","text":"

                main-cluster \u4f5c\u4e3a\u5907\u4efd\u6570\u636e\u7684\u6e90\u96c6\u7fa4\uff0c recovery-cluster \u96c6\u7fa4\u4f5c\u4e3a\u9700\u8981\u6062\u590d\u6570\u636e\u7684\u76ee\u6807\u96c6\u7fa4\u3002

                \u96c6\u7fa4 IP \u8282\u70b9 main-cluster 10.6.175.100 1 \u8282\u70b9 recovery-cluster 10.6.175.110 1 \u8282\u70b9"},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html#minio","title":"\u642d\u5efa MinIO \u914d\u7f6e","text":"MinIO \u670d\u52a1\u5668\u8bbf\u95ee\u5730\u5740 \u5b58\u50a8\u6876 \u7528\u6237\u540d \u5bc6\u7801 http://10.7.209.110:9000 mysql-demo root dangerous"},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html#nfs","title":"\u5728\u4e24\u4e2a\u96c6\u7fa4\u90e8\u7f72 NFS \u5b58\u50a8\u670d\u52a1","text":"

                Note

                \u9700\u8981\u5728 \u6e90\u96c6\u7fa4\u548c\u76ee\u6807\u96c6\u7fa4 \u4e0a\u7684\u6240\u6709\u8282\u70b9\u4e0a\u90e8\u7f72 NFS \u5b58\u50a8\u670d\u52a1\u3002

                1. \u5728\u4e24\u4e2a\u96c6\u7fa4\u4e2d\u7684\u6240\u6709\u8282\u70b9\u5b89\u88c5 NFS \u6240\u9700\u7684\u4f9d\u8d56\u3002

                  yum install nfs-utils iscsi-initiator-utils nfs-utils iscsi-initiator-utils nfs-utils iscsi-initiator-utils -y\n

                  \u9884\u671f\u8f93\u51fa\u4e3a\uff1a

                  [root@g-master1 ~]# kubectl apply -f nfs.yaml\nclusterrole.rbac.authorization.k8s.io/nfs-provisioner-runner created\nclusterrolebinding.rbac.authorization.k8s.io/run-nfs-provisioner created\nrole.rbac.authorization.k8s.io/leader-locking-nfs-provisioner created\nrolebinding.rbac.authorization.k8s.io/leader-locking-nfs-provisioner created\nserviceaccount/nfs-provisioner created\nservice/nfs-provisioner created\ndeployment.apps/nfs-provisioner created\nstorageclass.storage.k8s.io/nfs created\n
                2. \u4e3a MySQL \u5e94\u7528\u51c6\u5907 NFS \u5b58\u50a8\u670d\u52a1\u3002

                  \u767b\u5f55 main-cluster \u96c6\u7fa4\u548c recovery-cluster \u96c6\u7fa4\u7684\u4efb\u4e00\u63a7\u5236\u8282\u70b9\uff0c\u4f7f\u7528 vi nfs.yaml \u547d\u4ee4\u5728\u8282\u70b9\u4e0a\u521b\u5efa\u4e00\u4e2a \u540d\u4e3a nfs.yaml \u7684\u6587\u4ef6\uff0c\u5c06\u4e0b\u9762\u7684 YAML \u5185\u5bb9\u590d\u5236\u5230 nfs.yaml \u6587\u4ef6\u3002

                  \u70b9\u51fb\u67e5\u770b\u5b8c\u6574\u7684 nfs.yaml nfs.yaml

                  kind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: nfs-provisioner-runner\nnamespace: nfs-system\nrules:\n- apiGroups: [\"\"]\n    resources: [\"persistentvolumes\"]\n    verbs: [\"get\", \"list\", \"watch\", \"create\", \"delete\"]\n- apiGroups: [\"\"]\n    resources: [\"persistentvolumeclaims\"]\n    verbs: [\"get\", \"list\", \"watch\", \"update\"]\n- apiGroups: [\"storage.k8s.io\"]\n    resources: [\"storageclasses\"]\n    verbs: [\"get\", \"list\", \"watch\"]\n- apiGroups: [\"\"]\n    resources: [\"events\"]\n    verbs: [\"create\", \"update\", \"patch\"]\n- apiGroups: [\"\"]\n    resources: [\"services\", \"endpoints\"]\n    verbs: [\"get\"]\n- apiGroups: [\"extensions\"]\n    resources: [\"podsecuritypolicies\"]\n    resourceNames: [\"nfs-provisioner\"]\n    verbs: [\"use\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: run-nfs-provisioner\nsubjects:\n- kind: ServiceAccount\n    name: nfs-provisioner\n    # replace with namespace where provisioner is deployed\n    namespace: default\nroleRef:\nkind: ClusterRole\nname: nfs-provisioner-runner\napiGroup: rbac.authorization.k8s.io\n---\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: leader-locking-nfs-provisioner\nrules:\n- apiGroups: [\"\"]\n    resources: [\"endpoints\"]\n    verbs: [\"get\", \"list\", \"watch\", \"create\", \"update\", \"patch\"]\n---\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: leader-locking-nfs-provisioner\nsubjects:\n- kind: ServiceAccount\n    name: nfs-provisioner\n    # replace with namespace where provisioner is deployed\n    namespace: default\nroleRef:\nkind: Role\nname: leader-locking-nfs-provisioner\napiGroup: rbac.authorization.k8s.io\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\nname: nfs-provisioner\n---\nkind: Service\napiVersion: v1\nmetadata:\nname: nfs-provisioner\nlabels:\n    app: nfs-provisioner\nspec:\nports:\n    - name: nfs\n    port: 2049\n    - name: nfs-udp\n    port: 2049\n    protocol: UDP\n    - name: nlockmgr\n    port: 32803\n    - name: nlockmgr-udp\n    port: 32803\n    protocol: UDP\n    - name: mountd\n    port: 20048\n    - name: mountd-udp\n    port: 20048\n    protocol: UDP\n    - name: rquotad\n    port: 875\n    - name: rquotad-udp\n    port: 875\n    protocol: UDP\n    - name: rpcbind\n    port: 111\n    - name: rpcbind-udp\n    port: 111\n    protocol: UDP\n    - name: statd\n    port: 662\n    - name: statd-udp\n    port: 662\n    protocol: UDP\nselector:\n    app: nfs-provisioner\n---\nkind: Deployment\napiVersion: apps/v1\nmetadata:\nname: nfs-provisioner\nspec:\nselector:\n    matchLabels:\n    app: nfs-provisioner\nreplicas: 1\nstrategy:\n    type: Recreate\ntemplate:\n    metadata:\n    labels:\n        app: nfs-provisioner\n    spec:\n    serviceAccount: nfs-provisioner\n    containers:\n        - name: nfs-provisioner\n        resources:\n            limits:\n            cpu: \"1\"\n            memory: \"4294967296\"\n        image: release.daocloud.io/velero/nfs-provisioner:v3.0.0\n        ports:\n            - name: nfs\n            containerPort: 2049\n            - name: nfs-udp\n            containerPort: 2049\n            protocol: UDP\n            - name: nlockmgr\n            containerPort: 32803\n            - name: nlockmgr-udp\n            containerPort: 32803\n            protocol: UDP\n            - name: mountd\n            containerPort: 20048\n            - name: mountd-udp\n            containerPort: 20048\n            protocol: UDP\n            - name: rquotad\n            containerPort: 875\n            - name: rquotad-udp\n            containerPort: 875\n            protocol: UDP\n            - name: rpcbind\n            containerPort: 111\n            - name: rpcbind-udp\n            containerPort: 111\n            protocol: UDP\n            - name: statd\n            containerPort: 662\n            - name: statd-udp\n            containerPort: 662\n            protocol: UDP\n        securityContext:\n            capabilities:\n            add:\n                - DAC_READ_SEARCH\n                - SYS_RESOURCE\n        args:\n            - \"-provisioner=example.com/nfs\"\n        env:\n            - name: POD_IP\n            valueFrom:\n                fieldRef:\n                fieldPath: status.podIP\n            - name: SERVICE_NAME\n            value: nfs-provisioner\n            - name: POD_NAMESPACE\n            valueFrom:\n                fieldRef:\n                fieldPath: metadata.namespace\n        imagePullPolicy: \"IfNotPresent\"\n        volumeMounts:\n            - name: export-volume\n            mountPath: /export\n    volumes:\n        - name: export-volume\n        hostPath:\n            path: /data\n---\nkind: StorageClass\napiVersion: storage.k8s.io/v1\nmetadata:\nname: nfs\nprovisioner: example.com/nfs\nmountOptions:\n- vers=4.1\n

                3. \u5728\u4e24\u4e2a\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u4e0a\u6267\u884c nfs.yaml \u6587\u4ef6\u3002

                  kubectl apply -f nfs.yaml\n
                4. \u67e5\u770b NFS Pod \u72b6\u6001\uff0c\u7b49\u5f85\u5176\u72b6\u6001\u53d8\u4e3a running \uff08\u5927\u7ea6\u9700\u8981 2 \u5206\u949f\uff09\u3002

                  kubectl get pod -n nfs-system -owide\n

                  \u9884\u671f\u8f93\u51fa\u4e3a\uff1a

                  [root@g-master1 ~]# kubectl get pod -owide\nNAME                               READY   STATUS    RESTARTS   AGE     IP              NODE        NOMINATED NODE   READINESS GATES\nnfs-provisioner-7dfb9bcc45-74ws2   1/1     Running   0          4m45s   10.6.175.100   g-master1   <none>           <none>\n
                "},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html#mysql_1","title":"\u90e8\u7f72 MySQL \u5e94\u7528","text":"
                1. \u4e3a MySQL \u5e94\u7528\u51c6\u5907\u57fa\u4e8e NFS \u5b58\u50a8\u7684 PVC\uff0c\u7528\u6765\u5b58\u50a8 MySQL \u670d\u52a1\u5185\u7684\u6570\u636e\u3002

                  \u4f7f\u7528 vi pvc.yaml \u547d\u4ee4\u5728\u8282\u70b9\u4e0a\u521b\u5efa\u540d\u4e3a pvc.yaml \u7684\u6587\u4ef6\uff0c\u5c06\u4e0b\u9762\u7684 YAML \u5185\u5bb9\u590d\u5236\u5230 pvc.yaml \u6587\u4ef6\u5185\u3002

                  pvc.yaml

                  apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: mydata\n  namespace: default\nspec:\n  accessModes:\n  - ReadWriteOnce\n  resources:\n    requests:\n      storage: \"1Gi\"\n  storageClassName: nfs\n  volumeMode: Filesystem\n

                2. \u5728\u8282\u70b9\u4e0a\u4f7f\u7528 kubectl \u5de5\u5177\u6267\u884c pvc.yaml \u6587\u4ef6\u3002

                  kubectl apply -f pvc.yaml\n

                  \u9884\u671f\u8f93\u51fa\u4e3a\uff1a

                  [root@g-master1 ~]# kubectl apply -f pvc.yaml\npersistentvolumeclaim/mydata created\n

                3. \u90e8\u7f72 MySQL \u5e94\u7528\u3002

                  \u4f7f\u7528 vi mysql.yaml \u547d\u4ee4\u5728\u8282\u70b9\u4e0a\u521b\u5efa\u540d\u4e3a mysql.yaml \u7684\u6587\u4ef6\uff0c\u5c06\u4e0b\u9762\u7684 YAML \u5185\u5bb9\u590d\u5236\u5230 mysql.yaml \u6587\u4ef6\u3002

                  \u70b9\u51fb\u67e5\u770b\u5b8c\u6574\u7684 mysql.yaml nfs.yaml

                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: mysql-deploy\n  name: mysql-deploy\n  namespace: default\nspec:\n  progressDeadlineSeconds: 600\n  replicas: 1\n  revisionHistoryLimit: 10\n  selector:\n    matchLabels:\n      app: mysql-deploy\n  strategy:\n    rollingUpdate:\n      maxSurge: 25%\n      maxUnavailable: 25%\n    type: RollingUpdate\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mysql-deploy\n      name: mysql-deploy\n    spec:\n      containers:\n      - args:\n        - --ignore-db-dir=lost+found\n        env:\n        - name: MYSQL_ROOT_PASSWORD\n          value: dangerous\n        image: release.daocloud.io/velero/mysql:5\n        imagePullPolicy: IfNotPresent\n        name: mysql-deploy\n        ports:\n        - containerPort: 3306\n          protocol: TCP\n        resources:\n          limits:\n            cpu: \"1\"\n            memory: \"4294967296\"\n        terminationMessagePath: /dev/termination-log\n        terminationMessagePolicy: File\n        volumeMounts:\n        - mountPath: /var/lib/mysql\n          name: data\n      dnsPolicy: ClusterFirst\n      restartPolicy: Always\n      schedulerName: default-scheduler\n      securityContext:\n        fsGroup: 999\n      terminationGracePeriodSeconds: 30\n      volumes:\n      - name: data\n        persistentVolumeClaim:\n          claimName: mydata\n

                4. \u5728\u8282\u70b9\u4e0a\u4f7f\u7528 kubectl \u5de5\u5177\u6267\u884c mysql.yaml \u6587\u4ef6\u3002

                  kubectl apply -f mysql.yaml\n

                  \u9884\u671f\u8f93\u51fa\u4e3a\uff1a

                  [root@g-master1 ~]# kubectl apply -f mysql.yaml\ndeployment.apps/mysql-deploy created\n
                5. \u67e5\u770b MySQL Pod \u72b6\u6001\u3002

                  \u6267\u884c kubectl get pod | grep mysql \u67e5\u770b MySQL Pod \u72b6\u6001\uff0c\u7b49\u5f85\u5176\u72b6\u6001\u53d8\u4e3a running \uff08\u5927\u7ea6\u9700\u8981 2 \u5206\u949f\uff09\u3002

                  \u9884\u671f\u8f93\u51fa\u4e3a\uff1a

                  [root@g-master1 ~]# kubectl get pod |grep mysql\nmysql-deploy-5d6f94cb5c-gkrks      1/1     Running   0          2m53s\n

                  Note

                  • \u5982\u679c MySQL Pod \u72b6\u6001\u957f\u671f\u5904\u4e8e\u975e running \u72b6\u6001\uff0c\u901a\u5e38\u662f\u56e0\u4e3a\u6ca1\u6709\u5728\u96c6\u7fa4\u7684\u6240\u6709\u8282\u70b9\u4e0a\u5b89\u88c5 NFS \u4f9d\u8d56\u3002
                  • \u6267\u884c kubectl describe pod ${mysql pod \u540d\u79f0} \u67e5\u770b Pod \u7684\u8be6\u7ec6\u4fe1\u606f\u3002
                  • \u5982\u679c\u62a5\u9519\u4e2d\u6709 MountVolume.SetUp failed for volume \"pvc-4ad70cc6-df37-4253-b0c9-8cb86518ccf8\" : mount failed: exit status 32 \u4e4b\u7c7b\u7684\u4fe1\u606f\uff0c\u8bf7\u5206\u522b\u6267\u884c kubectl delete -f nfs.yaml/pvc.yaml/mysql.yaml \u5220\u9664\u4e4b\u524d\u7684\u8d44\u6e90\u540e\uff0c\u91cd\u65b0\u4ece\u90e8\u7f72 NFS \u670d\u52a1\u5f00\u59cb\u3002
                6. \u5411 MySQL \u5e94\u7528\u5199\u5165\u6570\u636e\u3002

                  \u4e3a\u4e86\u4fbf\u4e8e\u540e\u671f\u9a8c\u8bc1\u8fc1\u79fb\u6570\u636e\u662f\u5426\u6210\u529f\uff0c\u53ef\u4ee5\u4f7f\u7528\u811a\u672c\u5411 MySQL \u5e94\u7528\u4e2d\u5199\u5165\u6d4b\u8bd5\u6570\u636e\u3002

                  1. \u4f7f\u7528 vi insert.sh \u547d\u4ee4\u5728\u8282\u70b9\u4e0a\u521b\u5efa\u540d\u4e3a insert.sh \u7684\u811a\u672c\uff0c\u5c06\u4e0b\u9762\u7684 YAML \u5185\u5bb9\u590d\u5236\u5230\u8be5\u811a\u672c\u3002

                    insert.sh
                    #!/bin/bash\n\nfunction rand(){\n    min=$1\n    max=$(($2-$min+1))\n    num=$(date +%s%N)\n    echo $(($num%$max+$min))\n}\n\nfunction insert(){\n    user=$(date +%s%N | md5sum | cut -c 1-9)\n    age=$(rand 1 100)\n\n    sql=\"INSERT INTO test.users(user_name, age)VALUES('${user}', ${age});\"\n    echo -e ${sql}\n\n    kubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"${sql}\"\n\n}\n\nkubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"CREATE DATABASE IF NOT EXISTS test;\"\nkubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"CREATE TABLE IF NOT EXISTS test.users(user_name VARCHAR(10) NOT NULL,age INT UNSIGNED)ENGINE=InnoDB DEFAULT CHARSET=utf8;\"\n\nwhile true;do\n    insert\n    sleep 1\ndone\n
                  2. \u4e3a insert.sh \u811a\u672c\u6dfb\u52a0\u6743\u9650\u5e76\u8fd0\u884c\u8be5\u811a\u672c\u3002

                    [root@g-master1 ~]# chmod +x insert.sh\n[root@g-master1 ~]# ./insert.sh\n

                    \u9884\u671f\u8f93\u51fa\u4e3a\uff1a

                    mysql: [Warning] Using a password on the command line interface can be insecure.\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('dc09195ba', 10);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('80ab6aa28', 70);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('f488e3d46', 23);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('e6098695c', 93);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('eda563e7d', 63);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('a4d1b8d68', 17);\nmysql: [Warning] Using a password on the command line interface can be insecure.\n
                  3. \u5728\u952e\u76d8\u4e0a\u540c\u65f6\u6309\u4e0b control \u548c c \u6682\u505c\u811a\u672c\u7684\u6267\u884c\u3002

                  4. \u524d\u5f80 MySQL Pod \u67e5\u770b MySQL \u4e2d\u5199\u5165\u7684\u6570\u636e\u3002

                    kubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"SELECT * FROM test.users;\"\n

                    \u9884\u671f\u8f93\u51fa\u4e3a\uff1a

                    mysql: [Warning] Using a password on the command line interface can be insecure.\nuser_name   age\ndc09195ba   10\n80ab6aa28   70\nf488e3d46   23\ne6098695c   93\neda563e7d   63\na4d1b8d68   17\nea47546d9   86\na34311f2e   47\n740cefe17   33\nede85ea28   65\nb6d0d6a0e   46\nf0eb38e50   44\nc9d2f28f5   72\n8ddaafc6f   31\n3ae078d0e   23\n6e041631e   96\n
                "},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html#velero","title":"\u5728\u4e24\u4e2a\u96c6\u7fa4\u5b89\u88c5 velero \u63d2\u4ef6","text":"

                Note

                \u9700\u8981\u5728 \u6e90\u96c6\u7fa4\u548c\u76ee\u6807\u96c6\u7fa4 \u4e0a\u5747\u5b89\u88c5 velero \u63d2\u4ef6\u3002

                \u53c2\u8003\u5b89\u88c5 velero \u63d2\u4ef6\u6587\u6863\u548c\u4e0b\u65b9\u7684 MinIO \u914d\u7f6e\uff0c\u5728 main-cluster \u96c6\u7fa4\u548c recovery-cluster \u96c6\u7fa4\u4e0a\u5b89\u88c5 velero \u63d2\u4ef6\u3002

                minio \u670d\u52a1\u5668\u8bbf\u95ee\u5730\u5740 \u5b58\u50a8\u6876 \u7528\u6237\u540d \u5bc6\u7801 http://10.7.209.110:9000 mysql-demo root dangerous

                Note

                \u5b89\u88c5\u63d2\u4ef6\u65f6\u9700\u8981\u5c06 S3url \u66ff\u6362\u4e3a\u6b64\u6b21\u6f14\u793a\u51c6\u5907\u7684 MinIO \u670d\u52a1\u5668\u8bbf\u95ee\u5730\u5740\uff0c\u5b58\u50a8\u6876\u66ff\u6362\u4e3a MinIO \u4e2d\u771f\u5b9e\u5b58\u5728\u7684\u5b58\u50a8\u6876\u3002

                "},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html#mysql_2","title":"\u5907\u4efd MySQL \u5e94\u7528\u53ca\u6570\u636e","text":"
                1. \u5728\u5907\u4efd\u524d\u6211\u4eec\u9700\u8981\u5148\u4fdd\u8bc1\u6570\u636e\u5e93\u4e0d\u80fd\u6709\u65b0\u6570\u636e\u8fdb\u6765\uff0c\u6240\u4ee5\u8981\u8bbe\u7f6e\u4e3a\u53ea\u8bfb\u6a21\u5f0f\uff1a

                  mysql> set global read_only=1;    #1\u662f\u53ea\u8bfb\uff0c0\u662f\u8bfb\u5199\nmysql> show global variables like \"%read_only%\"; #\u67e5\u8be2\u72b6\u6001\n
                2. \u4e3a MySQL \u5e94\u7528\u53ca PVC \u6570\u636e\u6dfb\u52a0\u72ec\u6709\u7684\u6807\u7b7e\uff1a backup=mysql \uff0c\u4fbf\u4e8e\u5907\u4efd\u65f6\u9009\u62e9\u8d44\u6e90\u3002

                  kubectl label deploy mysql-deploy backup=mysql # \u4e3a __mysql-deploy__ \u8d1f\u8f7d\u6dfb\u52a0\u6807\u7b7e\nkubectl label pod mysql-deploy-5d6f94cb5c-gkrks backup=mysql # \u4e3a mysql pod \u6dfb\u52a0\u6807\u7b7e\nkubectl label pvc mydata backup=mysql # \u4e3a mysql \u7684 pvc \u6dfb\u52a0\u6807\u7b7e\n
                3. \u53c2\u8003\u5e94\u7528\u5907\u4efd\u4e2d\u4ecb\u7ecd\u7684\u6b65\u9aa4\uff0c\u4ee5\u53ca\u4e0b\u65b9\u7684\u53c2\u6570\u521b\u5efa\u5e94\u7528\u5907\u4efd\u3002

                  • \u540d\u79f0\uff1a backup-mysql \uff08\u53ef\u4ee5\u81ea\u5b9a\u4e49\uff09
                  • \u6e90\u96c6\u7fa4\uff1a main-cluster
                  • \u547d\u540d\u7a7a\u95f4\uff1adefault
                  • \u8d44\u6e90\u8fc7\u6ee4-\u6307\u5b9a\u8d44\u6e90\u6807\u7b7e\uff1abackup:mysql

                4. \u521b\u5efa\u597d\u5907\u4efd\u8ba1\u5212\u4e4b\u540e\u9875\u9762\u4f1a\u81ea\u52a8\u8fd4\u56de\u5907\u4efd\u8ba1\u5212\u5217\u8868\uff0c\u627e\u5230\u65b0\u5efa\u7684\u5907\u4efd\u8ba1\u5212 backup-mysq \uff0c\u70b9\u51fb\u66f4\u591a\u64cd\u4f5c\u6309\u94ae ... \uff0c\u9009\u62e9 \u7acb\u5373\u6267\u884c \u6267\u884c\u65b0\u5efa\u7684\u5907\u4efd\u8ba1\u5212\u3002

                5. \u7b49\u5f85\u5907\u4efd\u8ba1\u5212\u6267\u884c\u5b8c\u6210\u540e\uff0c\u5373\u53ef\u6267\u884c\u540e\u7eed\u64cd\u4f5c\u3002

                "},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html#mysql_3","title":"\u8de8\u96c6\u7fa4\u6062\u590d MySQL \u5e94\u7528\u53ca\u6570\u636e","text":"
                1. \u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u9009\u62e9 \u5bb9\u5668\u7ba1\u7406 -> \u5907\u4efd\u6062\u590d -> \u5e94\u7528\u5907\u4efd \u3002

                2. \u5728\u5de6\u4fa7\u529f\u80fd\u680f\u9009\u62e9 \u6062\u590d \uff0c\u7136\u540e\u5728\u53f3\u4fa7\u70b9\u51fb \u6062\u590d\u5907\u4efd \u3002

                3. \u67e5\u770b\u4ee5\u4e0b\u8bf4\u660e\u586b\u5199\u53c2\u6570\uff1a

                  • \u540d\u79f0\uff1a restore-mysql \uff08\u53ef\u4ee5\u81ea\u5b9a\u4e49\uff09
                  • \u5907\u4efd\u6e90\u96c6\u7fa4\uff1a main-cluster
                  • \u5907\u4efd\u8ba1\u5212\uff1a backup-mysql
                  • \u5907\u4efd\u70b9\uff1adefault
                  • \u6062\u590d\u76ee\u6807\u96c6\u7fa4\uff1a recovery-cluster

                4. \u5237\u65b0\u5907\u4efd\u8ba1\u5212\u5217\u8868\uff0c\u7b49\u5f85\u5907\u4efd\u8ba1\u5212\u6267\u884c\u5b8c\u6210\u3002

                "},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html#_3","title":"\u9a8c\u8bc1\u6570\u636e\u662f\u5426\u6210\u529f\u6062\u590d","text":"
                1. \u767b\u5f55 recovery-cluster \u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\uff0c\u67e5\u770b mysql-deploy \u8d1f\u8f7d\u662f\u5426\u5df2\u7ecf\u6210\u529f\u5907\u4efd\u5230\u5f53\u524d\u96c6\u7fa4\u3002

                  kubectl get pod\n

                  \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                  NAME                               READY   STATUS    RESTARTS   AGE\nmysql-deploy-5798f5d4b8-62k6c      1/1     Running   0          24h\n
                2. \u68c0\u67e5 MySQL \u6570\u636e\u8868\u4e2d\u7684\u6570\u636e\u662f\u5426\u6062\u590d\u6210\u529f\u3002

                  kubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"SELECT * FROM test.users;\"\n

                  \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                  mysql: [Warning] Using a password on the command line interface can be insecure.\nuser_name   age\ndc09195ba   10\n80ab6aa28   70\nf488e3d46   23\ne6098695c   93\neda563e7d   63\na4d1b8d68   17\nea47546d9   86\na34311f2e   47\n740cefe17   33\nede85ea28   65\nb6d0d6a0e   46\nf0eb38e50   44\nc9d2f28f5   72\n8ddaafc6f   31\n3ae078d0e   23\n6e041631e   96\n

                  Success

                  \u53ef\u4ee5\u770b\u5230\uff0cPod \u4e2d\u7684\u6570\u636e\u548c main-cluster \u96c6\u7fa4\u4e2d Pod \u91cc\u9762\u7684\u6570\u636e\u4e00\u81f4\u3002\u8fd9\u8bf4\u660e\u5df2\u7ecf\u6210\u529f\u5730\u5c06 main-cluster \u4e2d\u7684 MySQL \u5e94\u7528\u53ca\u5176\u6570\u636e\u8de8\u96c6\u7fa4\u6062\u590d\u5230\u4e86 recovery-cluster \u96c6\u7fa4\u3002

                "},{"location":"admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html","title":"\u5728 CentOS \u7ba1\u7406\u5e73\u53f0\u4e0a\u521b\u5efa RedHat 9.2 \u5de5\u4f5c\u96c6\u7fa4","text":"

                \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u5df2\u6709\u7684 CentOS \u7ba1\u7406\u5e73\u53f0\u4e0a\u521b\u5efa RedHat 9.2 \u5de5\u4f5c\u96c6\u7fa4\u3002

                Note

                \u672c\u6587\u4ec5\u9488\u5bf9\u79bb\u7ebf\u6a21\u5f0f\u4e0b\uff0c\u4f7f\u7528 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\uff0c\u7ba1\u7406\u5e73\u53f0\u548c\u5f85\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u67b6\u6784\u5747\u4e3a AMD\u3002 \u521b\u5efa\u96c6\u7fa4\u65f6\u4e0d\u652f\u6301\u5f02\u6784\uff08AMD \u548c ARM \u6df7\u5408\uff09\u90e8\u7f72\uff0c\u60a8\u53ef\u4ee5\u5728\u96c6\u7fa4\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u901a\u8fc7\u63a5\u5165\u5f02\u6784\u8282\u70b9\u7684\u65b9\u5f0f\u8fdb\u884c\u96c6\u7fa4\u6df7\u5408\u90e8\u7f72\u7ba1\u7406\u3002

                "},{"location":"admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                \u5df2\u7ecf\u90e8\u7f72\u597d\u4e00\u4e2a AI \u7b97\u529b\u4e2d\u5fc3\u5168\u6a21\u5f0f\uff0c\u5e76\u4e14\u706b\u79cd\u8282\u70b9\u8fd8\u5b58\u6d3b\uff0c\u90e8\u7f72\u53c2\u8003\u6587\u6863\u79bb\u7ebf\u5b89\u88c5 AI \u7b97\u529b\u4e2d\u5fc3\u5546\u4e1a\u7248

                "},{"location":"admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#redhat","title":"\u4e0b\u8f7d\u5e76\u5bfc\u5165 RedHat \u76f8\u5173\u79bb\u7ebf\u5305","text":"

                \u8bf7\u786e\u4fdd\u5df2\u7ecf\u767b\u5f55\u5230\u706b\u79cd\u8282\u70b9\uff01\u5e76\u4e14\u4e4b\u524d\u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3\u65f6\u4f7f\u7528\u7684 clusterConfig.yaml \u6587\u4ef6\u8fd8\u5728\u3002

                "},{"location":"admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#redhat_1","title":"\u4e0b\u8f7d RedHat \u76f8\u5173\u79bb\u7ebf\u5305","text":"

                \u4e0b\u8f7d\u6240\u9700\u7684 RedHat OS package \u5305\u548c ISO \u79bb\u7ebf\u5305\uff1a

                \u8d44\u6e90\u540d \u8bf4\u660e \u4e0b\u8f7d\u5730\u5740 os-pkgs-redhat9-v0.9.3.tar.gz RedHat9.2 OS-package \u5305 https://github.com/kubean-io/kubean/releases/download/v0.9.3/os-pkgs-redhat9-v0.9.3.tar.gz ISO \u79bb\u7ebf\u5305 ISO \u5305\u5bfc\u5165\u706b\u79cd\u8282\u70b9\u811a\u672c \u524d\u5f80 RedHat \u5b98\u65b9\u5730\u5740\u767b\u5f55\u4e0b\u8f7d import-iso ISO \u5bfc\u5165\u706b\u79cd\u8282\u70b9\u811a\u672c https://github.com/kubean-io/kubean/releases/download/v0.9.3/import_iso.sh"},{"location":"admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#os-pckage-minio","title":"\u5bfc\u5165 os pckage \u79bb\u7ebf\u5305\u81f3\u706b\u79cd\u8282\u70b9\u7684 minio","text":"

                \u89e3\u538b RedHat os pckage \u79bb\u7ebf\u5305

                \u6267\u884c\u5982\u4e0b\u547d\u4ee4\u89e3\u538b\u4e0b\u8f7d\u7684 os pckage \u79bb\u7ebf\u5305\u3002\u6b64\u5904\u6211\u4eec\u4e0b\u8f7d\u7684 RedHat os pckage \u79bb\u7ebf\u5305\u3002

                tar -xvf os-pkgs-redhat9-v0.9.3.tar.gz \n

                os package \u89e3\u538b\u540e\u7684\u6587\u4ef6\u5185\u5bb9\u5982\u4e0b\uff1a

                    os-pkgs\n    \u251c\u2500\u2500 import_ospkgs.sh       # \u8be5\u811a\u672c\u7528\u4e8e\u5bfc\u5165 os packages \u5230 MinIO \u6587\u4ef6\u670d\u52a1\n    \u251c\u2500\u2500 os-pkgs-amd64.tar.gz   # amd64 \u67b6\u6784\u7684 os packages \u5305\n    \u251c\u2500\u2500 os-pkgs-arm64.tar.gz   # arm64 \u67b6\u6784\u7684 os packages \u5305\n    \u2514\u2500\u2500 os-pkgs.sha256sum.txt  # os packages \u5305\u7684 sha256sum \u6548\u9a8c\u6587\u4ef6\n

                \u5bfc\u5165 OS Package \u81f3\u706b\u79cd\u8282\u70b9\u7684 MinIO

                \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5c06 os packages \u5305\u5230 MinIO \u6587\u4ef6\u670d\u52a1\u4e2d\uff1a

                MINIO_USER=rootuser MINIO_PASS=rootpass123 ./import_ospkgs.sh  http://127.0.0.1:9000 os-pkgs-redhat9-v0.9.3.tar.gz\n

                Note

                \u4e0a\u8ff0\u547d\u4ee4\u4ec5\u4ec5\u9002\u7528\u4e8e\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 MinIO \u670d\u52a1\uff0c\u5982\u679c\u4f7f\u7528\u5916\u90e8 MinIO \u8bf7\u5c06 http://127.0.0.1:9000 \u66ff\u6362\u4e3a\u5916\u90e8 MinIO \u7684\u8bbf\u95ee\u5730\u5740\u3002 \u201crootuser\u201d \u548c \u201crootpass123\u201d \u662f\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 MinIO \u670d\u52a1\u7684\u9ed8\u8ba4\u8d26\u6237\u548c\u5bc6\u7801\u3002\u201cos-pkgs-redhat9-v0.9.3.tar.gz\u201c \u4e3a\u6240\u4e0b\u8f7d\u7684 os package \u79bb\u7ebf\u5305\u7684\u540d\u79f0\u3002

                "},{"location":"admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#iso-minio","title":"\u5bfc\u5165 ISO \u79bb\u7ebf\u5305\u81f3\u706b\u79cd\u8282\u70b9\u7684 MinIO","text":"

                \u6267\u884c\u5982\u4e0b\u547d\u4ee4, \u5c06 ISO \u5305\u5230 MinIO \u6587\u4ef6\u670d\u52a1\u4e2d:

                MINIO_USER=rootuser MINIO_PASS=rootpass123 ./import_iso.sh http://127.0.0.1:9000 rhel-9.2-x86_64-dvd.iso\n

                Note

                \u4e0a\u8ff0\u547d\u4ee4\u4ec5\u4ec5\u9002\u7528\u4e8e\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 MinIO \u670d\u52a1\uff0c\u5982\u679c\u4f7f\u7528\u5916\u90e8 MinIO \u8bf7\u5c06 http://127.0.0.1:9000 \u66ff\u6362\u4e3a\u5916\u90e8 MinIO \u7684\u8bbf\u95ee\u5730\u5740\u3002 \u201crootuser\u201d \u548c \u201crootpass123\u201d \u662f\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 MinIO \u670d\u52a1\u7684\u9ed8\u8ba4\u8d26\u6237\u548c\u5bc6\u7801\u3002 \u201crhel-9.2-x86_64-dvd.iso\u201c \u4e3a\u6240\u4e0b\u8f7d\u7684 ISO \u79bb\u7ebf\u5305\u3002

                "},{"location":"admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#ui","title":"\u524d\u5f80 UI \u754c\u9762\u521b\u5efa\u96c6\u7fa4","text":"

                \u53c2\u8003\u6587\u6863\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\uff0c\u521b\u5efa RedHat 9.2 \u96c6\u7fa4\u3002

                "},{"location":"admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html","title":"\u5728 CentOS \u7ba1\u7406\u5e73\u53f0\u4e0a\u521b\u5efa Ubuntu \u5de5\u4f5c\u96c6\u7fa4","text":"

                \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u5df2\u6709\u7684 CentOS \u7ba1\u7406\u5e73\u53f0\u4e0a\u521b\u5efa Ubuntu \u5de5\u4f5c\u96c6\u7fa4\u3002

                Note

                \u672c\u6587\u4ec5\u9488\u5bf9\u79bb\u7ebf\u6a21\u5f0f\u4e0b\uff0c\u4f7f\u7528 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\uff0c\u7ba1\u7406\u5e73\u53f0\u548c\u5f85\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u67b6\u6784\u5747\u4e3a AMD\u3002 \u521b\u5efa\u96c6\u7fa4\u65f6\u4e0d\u652f\u6301\u5f02\u6784\uff08AMD \u548c ARM \u6df7\u5408\uff09\u90e8\u7f72\uff0c\u60a8\u53ef\u4ee5\u5728\u96c6\u7fa4\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u901a\u8fc7\u63a5\u5165\u5f02\u6784\u8282\u70b9\u7684\u65b9\u5f0f\u8fdb\u884c\u96c6\u7fa4\u6df7\u5408\u90e8\u7f72\u7ba1\u7406\u3002

                "},{"location":"admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                • \u5df2\u7ecf\u90e8\u7f72\u597d\u4e00\u4e2a AI \u7b97\u529b\u4e2d\u5fc3\u5168\u6a21\u5f0f\uff0c\u5e76\u4e14\u706b\u79cd\u8282\u70b9\u8fd8\u5b58\u6d3b\uff0c\u90e8\u7f72\u53c2\u8003\u6587\u6863\u79bb\u7ebf\u5b89\u88c5 AI \u7b97\u529b\u4e2d\u5fc3\u5546\u4e1a\u7248
                "},{"location":"admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#ubuntu","title":"\u4e0b\u8f7d\u5e76\u5bfc\u5165 Ubuntu \u76f8\u5173\u79bb\u7ebf\u5305","text":"

                \u8bf7\u786e\u4fdd\u5df2\u7ecf\u767b\u5f55\u5230\u706b\u79cd\u8282\u70b9\uff01\u5e76\u4e14\u4e4b\u524d\u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3\u65f6\u4f7f\u7528\u7684 clusterConfig.yaml \u6587\u4ef6\u8fd8\u5728\u3002

                "},{"location":"admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#ubuntu_1","title":"\u4e0b\u8f7d Ubuntu \u76f8\u5173\u79bb\u7ebf\u5305","text":"

                \u4e0b\u8f7d\u6240\u9700\u7684 Ubuntu OS package \u5305\u548c ISO \u79bb\u7ebf\u5305\uff1a

                \u8d44\u6e90\u540d \u8bf4\u660e \u4e0b\u8f7d\u5730\u5740 os-pkgs-ubuntu2204-v0.18.2.tar.gz Ubuntu1804 OS-package \u5305 https://github.com/kubean-io/kubean/releases/download/v0.18.2/os-pkgs-ubuntu2204-v0.18.2.tar.gz ISO \u79bb\u7ebf\u5305 ISO \u5305 http://mirrors.melbourne.co.uk/ubuntu-releases/"},{"location":"admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#os-package-iso-minio","title":"\u5bfc\u5165 OS Package \u548c ISO \u79bb\u7ebf\u5305\u81f3\u706b\u79cd\u8282\u70b9\u7684 MinIO","text":"

                \u53c2\u8003\u6587\u6863\u79bb\u7ebf\u8d44\u6e90\u5bfc\u5165\uff0c\u5bfc\u5165\u79bb\u7ebf\u8d44\u6e90\u81f3\u706b\u79cd\u8282\u70b9\u7684 MinIO\u3002

                "},{"location":"admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#ui","title":"\u524d\u5f80 UI \u754c\u9762\u521b\u5efa\u96c6\u7fa4","text":"

                \u53c2\u8003\u6587\u6863\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\uff0c\u521b\u5efa Ubuntu \u96c6\u7fa4\u3002

                "},{"location":"admin/kpanda/best-practice/etcd-backup.html","title":"ETCD \u5907\u4efd\u8fd8\u539f","text":"

                \u4f7f\u7528 ETCD \u5907\u4efd\u529f\u80fd\u521b\u5efa\u5907\u4efd\u7b56\u7565\uff0c\u53ef\u4ee5\u5c06\u6307\u5b9a\u96c6\u7fa4\u7684 etcd \u6570\u636e\u5b9a\u65f6\u5907\u4efd\u5230 S3 \u5b58\u50a8\u4e2d\u3002\u672c\u6587\u4e3b\u8981\u4ecb\u7ecd\u5982\u4f55\u5c06\u5df2\u7ecf\u5907\u4efd\u7684\u6570\u636e\u8fd8\u539f\u5230\u5f53\u524d\u96c6\u7fa4\u4e2d\u3002

                Note

                • AI \u7b97\u529b\u4e2d\u5fc3ETCD \u5907\u4efd\u8fd8\u539f\u4ec5\u9650\u4e8e\u9488\u5bf9\u540c\u4e00\u96c6\u7fa4\uff08\u8282\u70b9\u6570\u548c IP \u5730\u5740\u6ca1\u6709\u53d8\u5316\uff09\u8fdb\u884c\u5907\u4efd\u4e0e\u8fd8\u539f\u3002 \u4f8b\u5982\uff0c\u5907\u4efd\u4e86 A \u96c6\u7fa4 \u7684 etcd \u6570\u636e\u540e\uff0c\u53ea\u80fd\u5c06\u5907\u4efd\u6570\u636e\u8fd8\u539f\u5230 A \u96c6\u7fa4\u4e2d\uff0c\u4e0d\u80fd\u8fd8\u539f\u5230 B \u96c6\u7fa4\u3002
                • \u5bf9\u4e8e\u8de8\u96c6\u7fa4\u7684\u5907\u4efd\u4e0e\u8fd8\u539f\uff0c\u5efa\u8bae\u4f7f\u7528\u5e94\u7528\u5907\u4efd\u8fd8\u539f\u529f\u80fd\u3002
                • \u9996\u5148\u521b\u5efa\u5907\u4efd\u7b56\u7565\uff0c\u5907\u4efd\u5f53\u524d\u72b6\u6001\uff0c\u5efa\u8bae\u53c2\u8003ETCD \u5907\u4efd\u529f\u80fd\u3002

                \u4e0b\u9762\u901a\u8fc7\u5177\u4f53\u7684\u6848\u4f8b\u6765\u8bf4\u660e\u5907\u4efd\u8fd8\u539f\u7684\u6574\u4e2a\u8fc7\u7a0b\u3002

                "},{"location":"admin/kpanda/best-practice/etcd-backup.html#_1","title":"\u73af\u5883\u4fe1\u606f","text":"

                \u9996\u5148\u4ecb\u7ecd\u8fd8\u539f\u7684\u76ee\u6807\u96c6\u7fa4\u548c S3 \u5b58\u50a8\u7684\u57fa\u672c\u4fe1\u606f\u3002\u8fd9\u91cc\u4ee5 MinIo \u4f5c\u4e3a S3 \u5b58\u50a8\uff0c\u6574\u4e2a\u96c6\u7fa4\u6709 3 \u4e2a\u63a7\u5236\u9762\uff083 \u4e2a etcd \u526f\u672c\uff09\u3002

                IP \u4e3b\u673a \u89d2\u8272 \u5907\u6ce8 10.6.212.10 host01 k8s-master01 k8s \u8282\u70b9 1 10.6.212.11 host02 k8s-master02 k8s \u8282\u70b9 2 10.6.212.12 host03 k8s-master03 k8s \u8282\u70b9 3 10.6.212.13 host04 minio minio \u670d\u52a1"},{"location":"admin/kpanda/best-practice/etcd-backup.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":""},{"location":"admin/kpanda/best-practice/etcd-backup.html#etcdbrctl","title":"\u5b89\u88c5 etcdbrctl \u5de5\u5177","text":"

                \u4e3a\u4e86\u5b9e\u73b0 ETCD \u6570\u636e\u5907\u4efd\u8fd8\u539f\uff0c\u9700\u8981\u5728\u4e0a\u8ff0\u4efb\u610f\u4e00\u4e2a Kubernetes \u8282\u70b9\u4e0a\u5b89\u88c5 etcdbrctl \u5f00\u6e90\u5de5\u5177\u3002 \u6b64\u5de5\u5177\u6682\u65f6\u6ca1\u6709\u4e8c\u8fdb\u5236\u6587\u4ef6\uff0c\u9700\u8981\u81ea\u884c\u7f16\u8bd1\u3002\u7f16\u8bd1\u65b9\u5f0f\u8bf7\u53c2\u8003 Gardener / etcd-backup-restore \u672c\u5730\u5f00\u53d1\u6587\u6863\u3002

                \u5b89\u88c5\u5b8c\u6210\u540e\u7528\u5982\u4e0b\u547d\u4ee4\u68c0\u67e5\u5de5\u5177\u662f\u5426\u53ef\u7528\uff1a

                etcdbrctl -v\n

                \u9884\u671f\u8f93\u51fa\u5982\u4e0b:

                INFO[0000] etcd-backup-restore Version: v0.23.0-dev\nINFO[0000] Git SHA: b980beec\nINFO[0000] Go Version: go1.19.3\nINFO[0000] Go OS/Arch: linux/amd64\n
                "},{"location":"admin/kpanda/best-practice/etcd-backup.html#_3","title":"\u68c0\u67e5\u5907\u4efd\u6570\u636e","text":"

                \u8fd8\u539f\u4e4b\u524d\u9700\u8981\u68c0\u67e5\u4e0b\u5217\u4e8b\u9879\uff1a

                • \u662f\u5426\u5df2\u7ecf\u5728 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u6210\u529f\u5907\u4efd\u4e86\u6570\u636e
                • \u68c0\u67e5 S3 \u5b58\u50a8\u4e2d\u5907\u4efd\u6570\u636e\u662f\u5426\u5b58\u5728

                Note

                AI \u7b97\u529b\u4e2d\u5fc3\u7684\u5907\u4efd\u662f\u5168\u91cf\u6570\u636e\u5907\u4efd\uff0c\u8fd8\u539f\u65f6\u5c06\u8fd8\u539f\u6700\u540e\u4e00\u6b21\u5907\u4efd\u7684\u5168\u91cf\u6570\u636e\u3002

                "},{"location":"admin/kpanda/best-practice/etcd-backup.html#_4","title":"\u5173\u95ed\u96c6\u7fa4","text":"

                \u5728\u5907\u4efd\u4e4b\u524d\uff0c\u5fc5\u987b\u8981\u5148\u5173\u95ed\u96c6\u7fa4\u3002\u9ed8\u8ba4\u96c6\u7fa4 etcd \u548c kube-apiserver \u90fd\u662f\u4ee5\u9759\u6001 Pod \u7684\u5f62\u5f0f\u542f\u52a8\u7684\u3002 \u8fd9\u91cc\u7684\u5173\u95ed\u96c6\u7fa4\u662f\u6307\u5c06\u9759\u6001 Pod manifest \u6587\u4ef6\u79fb\u52a8\u5230 /etc/kubernetes/manifest \u76ee\u5f55\u5916\uff0c\u96c6\u7fa4\u5c31\u4f1a\u79fb\u9664\u5bf9\u5e94 Pod\uff0c\u8fbe\u5230\u5173\u95ed\u670d\u52a1\u7684\u4f5c\u7528\u3002

                1. \u9996\u5148\u5220\u9664\u4e4b\u524d\u7684\u5907\u4efd\u6570\u636e\uff0c\u79fb\u9664\u6570\u636e\u5e76\u975e\u5c06\u73b0\u6709 etcd \u6570\u636e\u5220\u9664\uff0c\u800c\u662f\u6307\u4fee\u6539 etcd \u6570\u636e\u76ee\u5f55\u7684\u540d\u79f0\u3002 \u7b49\u5907\u4efd\u8fd8\u539f\u6210\u529f\u4e4b\u540e\u518d\u5220\u9664\u6b64\u76ee\u5f55\u3002\u8fd9\u6837\u505a\u7684\u76ee\u7684\u662f\uff0c\u5982\u679c etcd \u5907\u4efd\u8fd8\u539f\u5931\u8d25\uff0c\u8fd8\u53ef\u4ee5\u5c1d\u8bd5\u8fd8\u539f\u5f53\u524d\u96c6\u7fa4\u3002\u6b64\u6b65\u9aa4\u6bcf\u4e2a\u8282\u70b9\u5747\u9700\u6267\u884c\u3002

                  rm -rf /var/lib/etcd_bak\n
                2. \u7136\u540e\u9700\u8981\u5173\u95ed kube-apiserver \u7684\u670d\u52a1\uff0c\u786e\u4fdd etcd \u7684\u6570\u636e\u6ca1\u6709\u65b0\u53d8\u5316\u3002\u6b64\u6b65\u9aa4\u6bcf\u4e2a\u8282\u70b9\u5747\u9700\u6267\u884c\u3002

                  mv /etc/kubernetes/manifests/kube-apiserver.yaml /tmp/kube-apiserver.yaml\n
                3. \u540c\u65f6\u8fd8\u9700\u8981\u5173\u95ed etcd \u7684\u670d\u52a1\u3002\u6b64\u6b65\u9aa4\u6bcf\u4e2a\u8282\u70b9\u5747\u9700\u6267\u884c\u3002

                  mv /etc/kubernetes/manifests/etcd.yaml /tmp/etcd.yaml\n
                4. \u786e\u4fdd\u6240\u6709\u63a7\u5236\u5e73\u9762\u7684 kube-apiserver \u548c etcd \u670d\u52a1\u90fd\u5df2\u7ecf\u5173\u95ed\u3002

                5. \u5173\u95ed\u6240\u6709\u7684\u8282\u70b9\u540e\uff0c\u4f7f\u7528\u5982\u4e0b\u547d\u4ee4\u68c0\u67e5 etcd \u96c6\u7fa4\u72b6\u6001\u3002\u6b64\u547d\u4ee4\u5728\u4efb\u610f\u4e00\u4e2a\u8282\u70b9\u6267\u884c\u5373\u53ef\u3002

                  endpoints \u7684\u503c\u9700\u8981\u66ff\u6362\u4e3a\u5b9e\u9645\u8282\u70b9\u540d\u79f0

                  etcdctl endpoint status --endpoints=controller-node-1:2379,controller-node-2:2379,controller-node-3:2379 -w table \\\n  --cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n  --cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n  --key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\"\n

                  \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff0c\u8868\u793a\u6240\u6709\u7684 etcd \u8282\u70b9\u90fd\u88ab\u9500\u6bc1\uff1a

                  {\"level\":\"warn\",\"ts\":\"2023-03-29T17:51:50.817+0800\",\"logger\":\"etcd-client\",\"caller\":\"v3@v3.5.6/retry_interceptor.go:62\",\"msg\":\"retrying of unary invoker failed\",\"target\":\"etcd-endpoints://0xc0001ba000/controller-node-1:2379\",\"attempt\":0,\"error\":\"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \\\"transport: Error while dialing dial tcp 10.5.14.31:2379: connect: connection refused\\\"\"}\nFailed to get the status of endpoint controller-node-1:2379 (context deadline exceeded)\n{\"level\":\"warn\",\"ts\":\"2023-03-29T17:51:55.818+0800\",\"logger\":\"etcd-client\",\"caller\":\"v3@v3.5.6/retry_interceptor.go:62\",\"msg\":\"retrying of unary invoker failed\",\"target\":\"etcd-endpoints://0xc0001ba000/controller-node-2:2379\",\"attempt\":0,\"error\":\"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \\\"transport: Error while dialing dial tcp 10.5.14.32:2379: connect: connection refused\\\"\"}\nFailed to get the status of endpoint controller-node-2:2379 (context deadline exceeded)\n{\"level\":\"warn\",\"ts\":\"2023-03-29T17:52:00.820+0800\",\"logger\":\"etcd-client\",\"caller\":\"v3@v3.5.6/retry_interceptor.go:62\",\"msg\":\"retrying of unary invoker failed\",\"target\":\"etcd-endpoints://0xc0001ba000/controller-node-1:2379\",\"attempt\":0,\"error\":\"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \\\"transport: Error while dialing dial tcp 10.5.14.33:2379: connect: connection refused\\\"\"}\nFailed to get the status of endpoint controller-node-3:2379 (context deadline exceeded)\n+----------+----+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |\n+----------+----+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n+----------+----+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n
                "},{"location":"admin/kpanda/best-practice/etcd-backup.html#_5","title":"\u8fd8\u539f\u5907\u4efd","text":"

                \u53ea\u9700\u8981\u8fd8\u539f\u4e00\u4e2a\u8282\u70b9\u7684\u6570\u636e\uff0c\u5176\u4ed6\u8282\u70b9\u7684 etcd \u6570\u636e\u5c31\u4f1a\u81ea\u52a8\u8fdb\u884c\u540c\u6b65\u3002

                1. \u8bbe\u7f6e\u73af\u5883\u53d8\u91cf

                  \u4f7f\u7528 etcdbrctl \u8fd8\u539f\u6570\u636e\u4e4b\u524d\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u5c06\u8fde\u63a5 S3 \u7684\u8ba4\u8bc1\u4fe1\u606f\u8bbe\u7f6e\u4e3a\u73af\u5883\u53d8\u91cf\uff1a

                  export ECS_ENDPOINT=http://10.6.212.13:9000 # (1)!\nexport ECS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE # (2)!\nexport ECS_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY # (3)!\n
                  1. S3 \u5b58\u50a8\u7684\u8bbf\u95ee\u70b9
                  2. S3 \u5b58\u50a8\u7684\u7528\u6237\u540d
                  3. S3 \u5b58\u50a8\u7684\u5bc6\u7801
                2. \u6267\u884c\u8fd8\u539f\u64cd\u4f5c

                  \u6267\u884c etcdbrctl \u547d\u4ee4\u884c\u5de5\u5177\u6267\u884c\u8fd8\u539f\uff0c\u8fd9\u662f\u6700\u5173\u952e\u7684\u4e00\u6b65\u3002

                  etcdbrctl restore --data-dir /var/lib/etcd/ --store-container=\"etcd-backup\" \\ \n  --storage-provider=ECS \\\n  --initial-cluster=controller-node1=https://10.6.212.10:2380 \\\n  --initial-advertise-peer-urls=https://10.6.212.10:2380 \n

                  \u53c2\u6570\u8bf4\u660e\u5982\u4e0b\uff1a

                  • --data-dir: etcd \u6570\u636e\u76ee\u5f55\uff0c\u6b64\u76ee\u5f55\u5fc5\u987b\u8ddf etcd \u6570\u636e\u76ee\u5f55\u4e00\u81f4\uff0cetcd \u624d\u80fd\u6b63\u5e38\u52a0\u8f7d\u6570\u636e\u3002
                  • --store-container\uff1aS3 \u5b58\u50a8\u7684\u4f4d\u7f6e\uff0cMinIO \u4e2d\u5bf9\u5e94\u7684 bucket\uff0c\u5fc5\u987b\u8ddf\u6570\u636e\u5907\u4efd\u7684 bucket \u76f8\u5bf9\u5e94\u3002
                  • --initial-cluster\uff1aetcd \u521d\u59cb\u5316\u914d\u7f6e, etcd \u96c6\u7fa4\u7684\u540d\u79f0\u5fc5\u987b\u8ddf\u539f\u6765\u4e00\u81f4\u3002
                  • --initial-advertise-peer-urls\uff1aetcd member \u96c6\u7fa4\u4e4b\u95f4\u8bbf\u95ee\u5730\u5740\u3002\u5fc5\u987b\u8ddf etcd \u7684\u914d\u7f6e\u4fdd\u6301\u4e00\u81f4\u3002

                  \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                  INFO[0000] Finding latest set of snapshot to recover from...\nINFO[0000] Restoring from base snapshot: Full-00000000-00111147-1679991074  actor=restorer\nINFO[0001] successfully fetched data of base snapshot in 1.241380207 seconds  actor=restorer\n{\"level\":\"info\",\"ts\":1680011221.2511616,\"caller\":\"mvcc/kvstore.go:380\",\"msg\":\"restored last compact revision\",\"meta-bucket-name\":\"meta\",\"meta-bucket-name-key\":\"finishedCompactRev\",\"restored-compact-revision\":110327}\n{\"level\":\"info\",\"ts\":1680011221.3045986,\"caller\":\"membership/cluster.go:392\",\"msg\":\"added member\",\"cluster-id\":\"66638454b9dd7b8a\",\"local-member-id\":\"0\",\"added-peer-id\":\"123c2503a378fc46\",\"added-peer-peer-urls\":[\"https://10.6.212.10:2380\"]}\nINFO[0001] Starting embedded etcd server...              actor=restorer\n....\n\n{\"level\":\"info\",\"ts\":\"2023-03-28T13:47:02.922Z\",\"caller\":\"embed/etcd.go:565\",\"msg\":\"stopped serving peer traffic\",\"address\":\"127.0.0.1:37161\"}\n{\"level\":\"info\",\"ts\":\"2023-03-28T13:47:02.922Z\",\"caller\":\"embed/etcd.go:367\",\"msg\":\"closed etcd server\",\"name\":\"default\",\"data-dir\":\"/var/lib/etcd\",\"advertise-peer-urls\":[\"http://localhost:0\"],\"advertise-client-urls\":[\"http://localhost:0\"]}\nINFO[0003] Successfully restored the etcd data directory.\n

                  \u53ef\u4ee5\u67e5\u770b etcd \u7684 YAML \u6587\u4ef6\u8fdb\u884c\u5bf9\u7167\uff0c\u4ee5\u514d\u914d\u7f6e\u9519\u8bef

                  cat /tmp/etcd.yaml | grep initial-\n- --experimental-initial-corrupt-check=true\n- --initial-advertise-peer-urls=https://10.6.212.10:2380\n- --initial-cluster=controller-node-1=https://10.6.212.10:2380\n
                3. \u4ee5\u4e0b\u547d\u4ee4\u5728\u8282\u70b9 01 \u4e0a\u6267\u884c\uff0c\u4e3a\u4e86\u6062\u590d\u8282\u70b9 01 \u7684 etcd \u670d\u52a1\u3002

                  \u9996\u5148\u5c06 etcd \u9759\u6001 Pod \u7684 manifest \u6587\u4ef6\u79fb\u52a8\u5230 /etc/kubernetes/manifests \u76ee\u5f55\u4e0b\uff0ckubelet \u5c06\u4f1a\u91cd\u542f etcd\uff1a

                  mv /tmp/etcd.yaml /etc/kubernetes/manifests/etcd.yaml\n

                  \u7136\u540e\u7b49\u5f85 etcd \u670d\u52a1\u542f\u52a8\u5b8c\u6210\u4ee5\u540e\uff0c\u68c0\u67e5 etcd \u7684\u72b6\u6001\uff0cetcd \u76f8\u5173\u8bc1\u4e66\u9ed8\u8ba4\u76ee\u5f55\uff1a /etc/kubernetes/ssl \u3002\u5982\u679c\u96c6\u7fa4\u8bc1\u4e66\u5b58\u653e\u5728\u5176\u4ed6\u4f4d\u7f6e\uff0c\u8bf7\u6307\u5b9a\u5bf9\u5e94\u8def\u5f84\u3002

                  • \u68c0\u67e5 etcd \u96c6\u7fa4\u5217\u8868:

                    etcdctl member list -w table \\\n--cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n--cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n--key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\" \n

                    \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                    +------------------+---------+-------------------+--------------------------+--------------------------+------------+\n|        ID        | STATUS  |       NAME        |        PEER ADDRS        |       CLIENT ADDRS       | IS LEARNER |\n+------------------+---------+-------------------+--------------------------+--------------------------+------------+\n| 123c2503a378fc46 | started | controller-node-1 | https://10.6.212.10:2380 | https://10.6.212.10:2379 |      false |\n+------------------+---------+-------------------+--------------------------+--------------------------+------------+\n
                  • \u67e5\u770b controller-node-1 \u72b6\u6001:

                    etcdctl endpoint status --endpoints=controller-node-1:2379 -w table \\\n--cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n--cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n--key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\"\n

                    \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                    +------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n|        ENDPOINT        |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |\n+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n| controller-node-1:2379 | 123c2503a378fc46 |   3.5.6 |   15 MB |      true |      false |         3 |       1200 |               1199 |        |\n+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n
                4. \u6062\u590d\u5176\u4ed6\u8282\u70b9\u6570\u636e

                  \u4e0a\u8ff0\u6b65\u9aa4\u5df2\u7ecf\u8fd8\u539f\u4e86\u8282\u70b9 01 \u7684\u6570\u636e\uff0c\u82e5\u60f3\u8981\u8fd8\u539f\u5176\u4ed6\u8282\u70b9\u6570\u636e\uff0c\u53ea\u9700\u8981\u5c06 etcd \u7684 Pod \u542f\u52a8\u8d77\u6765\uff0c\u8ba9 etcd \u81ea\u5df1\u5b8c\u6210\u6570\u636e\u540c\u6b65\u3002

                  • \u5728\u8282\u70b9 02 \u548c\u8282\u70b9 03 \u90fd\u6267\u884c\u76f8\u540c\u7684\u64cd\u4f5c\uff1a

                    mv /tmp/etcd.yaml /etc/kubernetes/manifests/etcd.yaml\n
                  • etcd member \u96c6\u7fa4\u4e4b\u95f4\u7684\u6570\u636e\u540c\u6b65\u9700\u8981\u4e00\u5b9a\u7684\u65f6\u95f4\uff0c\u53ef\u4ee5\u67e5\u770b etcd \u96c6\u7fa4\u72b6\u6001\uff0c\u786e\u4fdd\u6240\u6709 etcd \u96c6\u7fa4\u6b63\u5e38\uff1a

                    \u68c0\u67e5 etcd \u96c6\u7fa4\u72b6\u6001\u662f\u5426\u6b63\u5e38:

                    etcdctl member list -w table \\\n--cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n--cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n--key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\"\n

                    \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                    +------------------+---------+-------------------+-------------------------+-------------------------+------------+\n|        ID        | STATUS  |    NAME           |       PEER ADDRS        |      CLIENT ADDRS       | IS LEARNER |\n+------------------+---------+-------------------+-------------------------+-------------------------+------------+\n| 6ea47110c5a87c03 | started | controller-node-1 | https://10.5.14.31:2380 | https://10.5.14.31:2379 |      false |\n| e222e199f1e318c4 | started | controller-node-2 | https://10.5.14.32:2380 | https://10.5.14.32:2379 |      false |\n| f64eeda321aabe2d | started | controller-node-3 | https://10.5.14.33:2380 | https://10.5.14.33:2379 |      false |\n+------------------+---------+-------------------+-------------------------+-------------------------+------------+\n

                    \u68c0\u67e5 3 \u4e2a member \u8282\u70b9\u662f\u5426\u6b63\u5e38:

                    etcdctl endpoint status --endpoints=controller-node-1:2379,controller-node-2:2379,controller-node-3:2379 -w table \\\n--cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n--cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n--key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\"\n

                    \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                    +------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n|     ENDPOINT           |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |\n+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n| controller-node-1:2379 | 6ea47110c5a87c03 |   3.5.6 |   88 MB |      true |      false |         6 |     199008 |             199008 |        |\n| controller-node-2:2379 | e222e199f1e318c4 |   3.5.6 |   88 MB |     false |      false |         6 |     199114 |             199114 |        |\n| controller-node-3:2379 | f64eeda321aabe2d |   3.5.6 |   88 MB |     false |      false |         6 |     199316 |             199316 |        |\n+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n
                "},{"location":"admin/kpanda/best-practice/etcd-backup.html#_6","title":"\u6062\u590d\u96c6\u7fa4","text":"

                \u7b49\u6240\u6709\u8282\u70b9\u7684 etcd \u6570\u636e\u540c\u6b65\u5b8c\u6210\u540e\uff0c\u5219\u53ef\u4ee5\u5c06 kube-apiserver \u8fdb\u884c\u91cd\u65b0\u542f\u52a8\uff0c\u5c06\u6574\u4e2a\u96c6\u7fa4\u6062\u590d\u5230\u53ef\u8bbf\u95ee\u72b6\u6001\uff1a

                1. \u91cd\u65b0\u542f\u52a8 node1 \u7684 kube-apiserver \u670d\u52a1

                  mv /tmp/kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.yaml\n
                2. \u91cd\u65b0\u542f\u52a8 node2 \u7684 kube-apiserver \u670d\u52a1

                  mv /tmp/kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.yaml\n
                3. \u91cd\u65b0\u542f\u52a8 node3 \u7684 kube-apiserver \u670d\u52a1

                  mv /tmp/kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.yaml\n
                4. \u7b49\u5f85 kubelet \u5c06 kube-apiserver \u542f\u52a8\u540e\uff0c\u68c0\u67e5\u8fd8\u539f\u7684 k8s \u6570\u636e\u662f\u5426\u6b63\u5e38\uff1a

                  kubectl get nodes\n

                  \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                  NAME                STATUS     ROLES           AGE     VERSION\ncontroller-node-1   Ready      <none>          3h30m   v1.25.4\ncontroller-node-3   Ready      control-plane   3h29m   v1.25.4\ncontroller-node-3   Ready      control-plane   3h28m   v1.25.4\n
                "},{"location":"admin/kpanda/best-practice/hardening-cluster.html","title":"\u5982\u4f55\u52a0\u56fa\u81ea\u5efa\u5de5\u4f5c\u96c6\u7fa4","text":"

                \u5728 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\uff0c\u4f7f\u7528 CIS Benchmark (CIS) \u626b\u63cf\u4f7f\u7528\u754c\u9762\u521b\u5efa\u7684\u5de5\u4f5c\u96c6\u7fa4\uff0c\u6709\u4e00\u4e9b\u626b\u63cf\u9879\u5e76\u6ca1\u6709\u901a\u8fc7\u626b\u63cf\u3002 \u672c\u6587\u5c06\u57fa\u4e8e\u4e0d\u540c\u7684 CIS Benchmark \u7248\u672c\u8fdb\u884c\u52a0\u56fa\u8bf4\u660e\u3002

                "},{"location":"admin/kpanda/best-practice/hardening-cluster.html#cis-benchmark-127","title":"CIS Benchmark 1.27","text":"

                \u626b\u63cf\u73af\u5883\u8bf4\u660e\uff1a

                • kubernetes version: 1.25.4
                • containerd: 1.7.0
                • kubean version: 0.4.9
                • kubespary version: v2.22
                "},{"location":"admin/kpanda/best-practice/hardening-cluster.html#_2","title":"\u672a\u901a\u8fc7\u626b\u63cf\u9879","text":"
                1. [FAIL] 1.2.5 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)
                2. [FAIL] 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)
                3. [FAIL] 1.4.1 Ensure that the --profiling argument is set to false (Automated)
                4. [FAIL] 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)
                "},{"location":"admin/kpanda/best-practice/hardening-cluster.html#_3","title":"\u626b\u63cf\u5931\u8d25\u539f\u56e0\u5206\u6790","text":"
                1. [FAIL] 1.2.5 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)

                  \u539f\u56e0\uff1a CIS \u8981\u6c42 kube-apiserver \u5fc5\u987b\u6307\u5b9a kubelet \u7684 CA \u8bc1\u4e66\u8def\u5f84\uff1a

                2. [FAIL] 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)

                  \u539f\u56e0\uff1a CIS \u8981\u6c42 kube-controller-manager \u7684 --bing-address=127.0.0.1

                3. [FAIL] 1.4.1 Ensure that the --profiling argument is set to false (Automated)

                  \u539f\u56e0\uff1a CIS \u8981\u6c42 kube-scheduler \u8bbe\u7f6e --profiling=false

                4. [FAIL] 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)

                  \u539f\u56e0\uff1a CIS \u8981\u6c42 \u8bbe\u7f6e kube-scheduler \u7684 --bind-address=127.0.0.1

                "},{"location":"admin/kpanda/best-practice/hardening-cluster.html#cis","title":"\u52a0\u56fa\u914d\u7f6e\u4ee5\u901a\u8fc7 CIS \u626b\u63cf","text":"

                kubespray \u5b98\u65b9\u4e3a\u4e86\u89e3\u51b3\u8fd9\u4e9b\u5b89\u5168\u626b\u63cf\u95ee\u9898\uff0c\u5728 v2.22 \u4e2d\u6dfb\u52a0\u9ed8\u8ba4\u503c\u89e3\u51b3\u4e86\u4e00\u90e8\u5206\u95ee\u9898\uff0c \u66f4\u591a\u7ec6\u8282\u8bf7\u53c2\u8003 kubespray \u52a0\u56fa\u6587\u6863\u3002

                • \u901a\u8fc7\u4fee\u6539 kubean var-config \u914d\u7f6e\u6587\u4ef6\u6765\u6dfb\u52a0\u53c2\u6570\uff1a

                  kubernetes_audit: true\nkube_controller_manager_bind_address: 127.0.0.1\nkube_scheduler_bind_address: 127.0.0.1\nkube_kubeadm_scheduler_extra_args:\n  profiling: false\nkubelet_rotate_server_certificates: true\n
                • \u5728 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\uff0c\u4e5f\u63d0\u4f9b\u4e86\u901a\u8fc7 UI \u6765\u914d\u7f6e\u9ad8\u7ea7\u53c2\u6570\u7684\u529f\u80fd\uff0c\u5728\u521b\u5efa\u96c6\u7fa4\u6700\u540e\u4e00\u6b65\u6dfb\u52a0\u81ea\u5b9a\u4e49\u53c2\u6570\uff1a

                • \u8bbe\u7f6e\u81ea\u5b9a\u4e49\u53c2\u6570\u540e\uff0c\u5728 kubean \u7684 var-config \u7684 configmap \u4e2d\u6dfb\u52a0\u4e86\u5982\u4e0b\u53c2\u6570\uff1a

                • \u5b89\u88c5\u96c6\u7fa4\u540e\u8fdb\u884c\u626b\u63cf\uff1a

                \u626b\u63cf\u540e\u6240\u6709\u7684\u626b\u63cf\u9879\u90fd\u901a\u8fc7\u4e86\u626b\u63cf\uff08WARN \u548c INFO \u8ba1\u7b97\u4e3a PASS\uff09\uff0c \u7531\u4e8e CIS Benchmark \u4f1a\u4e0d\u65ad\u66f4\u65b0\uff0c\u6b64\u6587\u6863\u7684\u5185\u5bb9\u53ea\u9002\u7528\u4e8e CIS Benchmark 1.27\u3002

                "},{"location":"admin/kpanda/best-practice/k3s-lcm.html","title":"\u8fb9\u7f18\u96c6\u7fa4\u90e8\u7f72\u548c\u7ba1\u7406\u5b9e\u8df5","text":"

                \u5bf9\u4e8e\u8d44\u6e90\u53d7\u9650\u7684\u8fb9\u7f18\u6216\u7269\u8054\u7f51\u573a\u666f\uff0cKubernetes \u65e0\u6cd5\u5f88\u597d\u7684\u6ee1\u8db3\u8d44\u6e90\u8981\u6c42\uff0c\u4e3a\u6b64\u9700\u8981\u4e00\u4e2a\u8f7b\u91cf\u5316 Kubernetes \u65b9\u6848\uff0c \u65e2\u80fd\u5b9e\u73b0\u5bb9\u5668\u7ba1\u7406\u548c\u7f16\u6392\u80fd\u529b\uff0c\u53c8\u80fd\u7ed9\u4e1a\u52a1\u5e94\u7528\u9884\u7559\u66f4\u591a\u8d44\u6e90\u7a7a\u95f4\u3002\u672c\u6587\u4ecb\u7ecd\u8fb9\u7f18\u96c6\u7fa4 k3s \u7684\u90e8\u7f72\u548c\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406\u5b9e\u8df5\u3002

                "},{"location":"admin/kpanda/best-practice/k3s-lcm.html#_2","title":"\u8282\u70b9\u89c4\u5212","text":"

                \u67b6\u6784

                • x86_64
                • armhf
                • arm64/aarch64

                \u64cd\u4f5c\u7cfb\u7edf

                • \u53ef\u4ee5\u5728\u5927\u591a\u6570\u73b0\u4ee3 Linux \u7cfb\u7edf\u4e0a\u5de5\u4f5c

                CPU/\u5185\u5b58

                • \u5355\u8282\u70b9 K3s \u96c6\u7fa4

                  \u6700\u5c0f CPU \u63a8\u8350 CPU \u6700\u5c0f\u5185\u5b58 \u63a8\u8350\u5185\u5b58 K3s cluster 1 core 2 cores 1.5 GB 2 GB
                • \u591a\u8282\u70b9 K3s \u96c6\u7fa4

                  \u6700\u5c0f CPU \u63a8\u8350 CPU \u6700\u5c0f\u5185\u5b58 \u63a8\u8350\u5185\u5b58 K3s server 1 core 2 cores 1 GB 1.5 GB K3s agent 1 core 2 cores 512 MB 1 GB
                • \u8282\u70b9\u5165\u7ad9\u89c4\u5219

                  • \u6839\u636e\u9700\u8981\u786e\u4fdd\u4ee5\u4e0b\u7aef\u53e3\u672a\u88ab\u5360\u7528
                  • \u82e5\u6709\u7279\u6b8a\u8981\u6c42\u4e0d\u80fd\u5173\u95ed\u9632\u706b\u5899\uff0c\u9700\u786e\u4fdd\u7aef\u53e3\u4e3a\u653e\u884c
                  \u534f\u8bae \u7aef\u53e3 \u6e90 \u76ee\u7684 \u63cf\u8ff0 TCP 2379-2380 Servers Servers \u9002\u7528\u4e8e HA\u4e0e\u5d4c\u5165\u5f0fetcd TCP 6443 Agents Servers K3s supervisor \u548c Kubernetes API Server UDP 8472 All nodes All nodes \u4ec5\u9002\u7528\u4e8eFlannel VXLAN TCP 10250 All nodes All nodes Kubelet metrics UDP 51820 All nodes All nodes \u4ec5\u9002\u7528\u4e8e\u5e26\u6709 IPv4\u7684Flannel Wireguard UDP 51821 All nodes All nodes \u4ec5\u9002\u7528\u4e8e\u5e26\u6709 IPv6\u7684Flannel Wireguard TCP 5001 All nodes All nodes \u4ec5\u9002\u7528\u4e8e\u5d4c\u5165\u5206\u5e03\u5f0f\u6ce8\u518c\u8868\uff08Spegel\uff09 TCP 6443 All nodes All nodes \u4ec5\u9002\u7528\u4e8e\u5d4c\u5165\u5206\u5e03\u5f0f\u6ce8\u518c\u8868\uff08Spegel\uff09
                • \u8282\u70b9\u89d2\u8272

                  \u767b\u5f55\u7528\u6237\u9700\u5177\u5907 root \u6743\u9650

                  server node agent node \u63cf\u8ff0 1 0 \u4e00\u53f0 server \u8282\u70b9 1 2 \u4e00\u53f0 server \u8282\u70b9\u3001\u4e24\u53f0 agent \u8282\u70b9 3 0 \u4e09\u53f0 server \u8282\u70b9
                "},{"location":"admin/kpanda/best-practice/k3s-lcm.html#_3","title":"\u524d\u7f6e\u51c6\u5907","text":"
                1. \u4fdd\u5b58\u5b89\u88c5\u811a\u672c\u5230\u5b89\u88c5\u8282\u70b9\uff08\u4efb\u610f\u53ef\u4ee5\u8bbf\u95ee\u5230\u96c6\u7fa4\u8282\u70b9\u7684\u8282\u70b9\uff09

                  $ cat > k3slcm <<'EOF'\n#!/bin/bash\nset -e\n\nairgap_image=${K3S_AIRGAP_IMAGE:-}\nk3s_bin=${K3S_BINARY:-}\ninstall_script=${K3S_INSTALL_SCRIPT:-}\n\nservers=${K3S_SERVERS:-}\nagents=${K3S_AGENTS:-}\nssh_user=${SSH_USER:-root}\nssh_password=${SSH_PASSWORD:-}\nssh_privatekey_path=${SSH_PRIVATEKEY_PATH:-}\nextra_server_args=${EXTRA_SERVER_ARGS:-}\nextra_agent_args=${EXTRA_AGENT_ARGS:-}\nfirst_server=$(cut -d, -f1 <<<\"$servers,\")\nother_servers=$(cut -d, -f2- <<<\"$servers,\")\n\ninstall_script_env=\"INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_SELINUX_WARN=true \"\n[ -n \"$K3S_VERSION\" ] && install_script_env+=\"INSTALL_K3S_VERSION=$K3S_VERSION \"\n\nssh_opts=\"-q -o StrictHostkeyChecking=no -o UserKnownHostsFile=/dev/null -o ControlPath=/tmp/ssh_mux_%h_%p_%r -o ControlMaster=auto -o ControlPersist=10m\"\n\nif [ -n \"$ssh_privatekey_path\" ]; then\n  ssh_opts+=\" -i $ssh_privatekey_path\"\nelif [ -n \"$ssh_password\" ]; then\n  askpass=$(mktemp)\n  echo \"echo -n $ssh_password\" > $askpass\n  chmod 0755 $askpass\n  export SSH_ASKPASS=$askpass SSH_ASKPASS_REQUIRE=force\nelse\n  echo \"SSH_PASSWORD or SSH_PRIVATEKEY_PATH must be provided\" && exit 1\nfi\n\nlog_info() { echo -e \"\\033[36m* $*\\033[0m\"; }\nclean() { rm -f $askpass; }\ntrap clean EXIT\n\nIFS=',' read -ra all_nodes <<< \"$servers,$agents\"\nif [ -n \"$k3s_bin\" ]; then\n  for node in ${all_nodes[@]}; do\n    chmod +x \"$k3s_bin\" \"$install_script\"\n    ssh $ssh_opts \"$ssh_user@$node\" \"mkdir -p /usr/local/bin /var/lib/rancher/k3s/agent/images\"\n    log_info \"Copying $airgap_image to $node\"\n    scp -O $ssh_opts \"$airgap_image\" \"$ssh_user@$node:/var/lib/rancher/k3s/agent/images\"\n    log_info \"Copying $k3s_bin to $node\"\n    scp -O $ssh_opts \"$k3s_bin\" \"$ssh_user@$node:/usr/local/bin/k3s\"\n    log_info \"Copying $install_script to $node\"\n    scp -O $ssh_opts \"$install_script\" \"$ssh_user@$node:/usr/local/bin/k3s-install.sh\"\n  done\n  install_script_env+=\"INSTALL_K3S_SKIP_DOWNLOAD=true \"\nelse\n  for node in ${all_nodes[@]}; do\n    log_info \"Downloading install script for $node\"\n    ssh $ssh_opts \"$ssh_user@$node\" \"curl -sSLo /usr/local/bin/k3s-install.sh https://get.k3s.io/ && chmod +x /usr/local/bin/k3s-install.sh\"\n  done\nfi\n\nrestart_k3s() {\n  local node=$1\n  previous_k3s_version=$(ssh $ssh_opts \"$ssh_user@$first_server\" \"kubectl get no -o wide | awk '\\$6==\\\"$node\\\" {print \\$5}'\")\n  [ -n \"$previous_k3s_version\" -a \"$previous_k3s_version\" != \"$K3S_VERSION\" -a -n \"$k3s_bin\" ] && return 0 || return 1\n}\n\ntoken=mynodetoken\ninstall_script_env+=${K3S_INSTALL_SCRIPT_ENV:-}\nif [ -z \"$other_servers\" ]; then\n  log_info \"Installing on server node [$first_server]\"\n  ssh $ssh_opts \"$ssh_user@$first_server\" \"env $install_script_env /usr/local/bin/k3s-install.sh server --token $token $extra_server_args\"\n  ! restart_k3s \"$first_server\" || ssh $ssh_opts \"$ssh_user@$first_server\" \"systemctl restart k3s.service\"\nelse\n  log_info \"Installing on first server node [$first_server]\"\n  ssh $ssh_opts \"$ssh_user@$first_server\" \"env $install_script_env /usr/local/bin/k3s-install.sh server --cluster-init --token $token $extra_server_args\"\n  ! restart_k3s \"$first_server\" || ssh $ssh_opts \"$ssh_user@$first_server\" \"systemctl restart k3s.service\"\n  IFS=',' read -ra other_server_nodes <<< \"$other_servers\"\n  for node in ${other_server_nodes[@]}; do\n    log_info \"Installing on other server node [$node]\"\n    ssh $ssh_opts \"$ssh_user@$node\" \"env $install_script_env /usr/local/bin/k3s-install.sh server --server https://$first_server:6443 --token $token $extra_server_args\"\n    ! restart_k3s \"$node\" || ssh $ssh_opts \"$ssh_user@$node\" \"systemctl restart k3s.service\"\n  done\nfi\n\nif [ -n \"$agents\" ]; then\n  IFS=',' read -ra agent_nodes <<< \"$agents\"\n  for node in ${agent_nodes[@]}; do\n    log_info \"Installing on agent node [$node]\"\n    ssh $ssh_opts \"$ssh_user@$node\" \"env $install_script_env K3S_TOKEN=$token K3S_URL=https://$first_server:6443 /usr/local/bin/k3s-install.sh agent --token $token $extra_agent_args\"\n    ! restart_k3s \"$node\" || ssh $ssh_opts \"$ssh_user@$node\" \"systemctl restart k3s-agent.service\"\n  done\nfi\nEOF\n
                2. \uff08\u53ef\u9009\uff09\u79bb\u7ebf\u73af\u5883\u4e0b\uff0c\u5728\u4e00\u53f0\u53ef\u8054\u7f51\u8282\u70b9\u4e0b\u8f7d K3s \u76f8\u5173\u79bb\u7ebf\u8d44\u6e90\uff0c\u5e76\u62f7\u8d1d\u5230\u5b89\u88c5\u8282\u70b9

                  ## [\u8054\u7f51\u8282\u70b9\u6267\u884c]\n\n# \u8bbe\u7f6e K3s \u7248\u672c\u4e3a v1.30.2+k3s1\n$ export k3s_version=v1.30.2+k3s1\n\n# \u79bb\u7ebf\u955c\u50cf\u5305\n# arm64\u94fe\u63a5\u4e3a https://github.com/k3s-io/k3s/releases/download/$k3s_version/k3s-airgap-images-arm64.tar.zst\n$ curl -LO https://github.com/k3s-io/k3s/releases/download/$k3s_version/k3s-airgap-images-amd64.tar.zst\n\n# k3s \u4e8c\u8fdb\u5236\u6587\u4ef6\n# arm64\u94fe\u63a5\u4e3a https://github.com/k3s-io/k3s/releases/download/$k3s_version/k3s-arm64\n$ curl -LO https://github.com/k3s-io/k3s/releases/download/$k3s_version/k3s\n\n# \u5b89\u88c5\u90e8\u7f72\u811a\u672c\n$ curl -Lo k3s-install.sh https://get.k3s.io/\n\n## \u4e0a\u8ff0\u8d44\u6e90\u62f7\u8d1d\u5230\u5b89\u88c5\u8282\u70b9\u6587\u4ef6\u7cfb\u7edf\u4e0a\n\n## [\u5b89\u88c5\u8282\u70b9\u6267\u884c]\n$ export K3S_AIRGAP_IMAGE=<\u8d44\u6e90\u5b58\u653e\u76ee\u5f55>/k3s-airgap-images-amd64.tar.zst \n$ export K3S_BINARY=<\u8d44\u6e90\u5b58\u653e\u76ee\u5f55>/k3s \n$ export K3S_INSTALL_SCRIPT=<\u8d44\u6e90\u5b58\u653e\u76ee\u5f55>/k3s-install.sh\n
                3. \u5173\u95ed\u9632\u706b\u5899\u548c swap\uff08\u82e5\u9632\u706b\u5899\u65e0\u6cd5\u5173\u95ed\uff0c\u53ef\u653e\u884c\u4e0a\u8ff0\u5165\u7ad9\u7aef\u53e3\uff09

                  # Ubuntu \u5173\u95ed\u9632\u706b\u5899\u65b9\u6cd5\n$ sudo ufw disable\n# RHEL / CentOS / Fedora / SUSE \u5173\u95ed\u9632\u706b\u5899\u65b9\u6cd5\n$ systemctl disable firewalld --now\n$ sudo swapoff -a\n$ sudo sed -i '/swap/s/^/#/' /etc/fstab\n
                "},{"location":"admin/kpanda/best-practice/k3s-lcm.html#_4","title":"\u90e8\u7f72\u96c6\u7fa4","text":"

                \u4e0b\u6587\u6d4b\u8bd5\u73af\u5883\u4fe1\u606f\u4e3a Ubuntu 22.04 LTS, amd64\uff0c\u79bb\u7ebf\u5b89\u88c5

                1. \u5728\u5b89\u88c5\u8282\u70b9\u6839\u636e\u90e8\u7f72\u89c4\u5212\u8bbe\u7f6e\u8282\u70b9\u4fe1\u606f\uff0c\u5e76\u5bfc\u51fa\u73af\u5883\u53d8\u91cf\uff0c\u591a\u4e2a\u8282\u70b9\u4ee5\u534a\u89d2\u9017\u53f7 , \u5206\u9694

                  1 server / 0 agent1 server / 2 agent3 server / 0 agent
                  export K3S_SERVERS=172.30.41.5 $ export SSH_USER=root\n# \u82e5\u4f7f\u7528 public key \u65b9\u5f0f\u767b\u5f55\uff0c\u786e\u4fdd\u5df2\u5c06\u516c\u94a5\u6dfb\u52a0\u5230\u5404\u8282\u70b9\u7684 ~/.ssh/authorized_keys\n\nexport SSH_PRIVATEKEY_PATH=<\u79c1\u94a5\u8def\u5f84>\nexport SSH_PASSWORD=<SSH\u5bc6\u7801>\n
                  export K3S_SERVERS=172.30.41.5\nexport K3S_AGENTS=172.30.41.6,172.30.41.7\nexport SSH_USER=root\n\n# \u82e5\u4f7f\u7528 public key \u65b9\u5f0f\u767b\u5f55\uff0c\u786e\u4fdd\u5df2\u5c06\u516c\u94a5\u6dfb\u52a0\u5230\u5404\u8282\u70b9\u7684 ~/.ssh/authorized_keys\nexport SSH_PRIVATEKEY_PATH=<\u79c1\u94a5\u8def\u5f84>\nexport SSH_PASSWORD=<SSH\u5bc6\u7801>\n
                  export K3S_SERVERS=172.30.41.5,172.30.41.6,172.30.41.7\nexport SSH_USER=root\n\n# \u82e5\u4f7f\u7528 public key \u65b9\u5f0f\u767b\u5f55\uff0c\u786e\u4fdd\u5df2\u5c06\u516c\u94a5\u6dfb\u52a0\u5230\u5404\u8282\u70b9\u7684 ~/.ssh/authorized_keys\nexport SSH_PRIVATEKEY_PATH=<\u79c1\u94a5\u8def\u5f84>\nexport SSH_PASSWORD=<SSH\u5bc6\u7801>\n
                2. \u6267\u884c\u90e8\u7f72\u64cd\u4f5c

                  \u4ee5 3 server / 0 agent \u6a21\u5f0f\u4e3a\u4f8b\uff0c\u6bcf\u53f0\u673a\u5668\u5fc5\u987b\u6709\u4e00\u4e2a\u552f\u4e00\u7684\u4e3b\u673a\u540d

                  # \u82e5\u6709\u66f4\u591a K3s \u5b89\u88c5\u811a\u672c\u73af\u5883\u53d8\u91cf\u8bbe\u7f6e\u9700\u6c42\uff0c\u8bf7\u8bbe\u7f6e K3S_INSTALL_SCRIPT_ENV\uff0c\u5176\u503c\u53c2\u8003 https://docs.k3s.io/reference/env-variables\n# \u82e5\u9700\u5bf9 server \u6216 agent \u8282\u70b9\u4f5c\u51fa\u989d\u5916\u914d\u7f6e\uff0c\u8bf7\u8bbe\u7f6e EXTRA_SERVER_ARGS \u6216 EXTRA_AGENT_ARGS\uff0c\u5176\u503c\u53c2\u8003 https://docs.k3s.io/cli/server https://docs.k3s.io/cli/agent\n$ bash k3slcm\n* Copying ./v1.30.2/k3s-airgap-images-amd64.tar.zst to 172.30.41.5\n* Copying ./v1.30.2/k3s to 172.30.41.5\n* Copying ./v1.30.2/k3s-install.sh to 172.30.41.5\n* Copying ./v1.30.2/k3s-airgap-images-amd64.tar.zst to 172.30.41.6\n* Copying ./v1.30.2/k3s to 172.30.41.6\n* Copying ./v1.30.2/k3s-install.sh to 172.30.41.6\n* Copying ./v1.30.2/k3s-airgap-images-amd64.tar.zst to 172.30.41.7\n* Copying ./v1.30.2/k3s to 172.30.41.7\n* Copying ./v1.30.2/k3s-install.sh to 172.30.41.7\n* Installing on first server node [172.30.41.5]\n[INFO]  Skipping k3s download and verify\n[INFO]  Skipping installation of SELinux RPM\n[INFO]  Creating /usr/local/bin/kubectl symlink to k3s\n[INFO]  Creating /usr/local/bin/crictl symlink to k3s\n[INFO]  Creating /usr/local/bin/ctr symlink to k3s\n[INFO]  Creating killall script /usr/local/bin/k3s-killall.sh\n[INFO]  Creating uninstall script /usr/local/bin/k3s-uninstall.sh\n[INFO]  env: Creating environment file /etc/systemd/system/k3s.service.env\n[INFO]  systemd: Creating service file /etc/systemd/system/k3s.service\n[INFO]  systemd: Enabling k3s unit\nCreated symlink /etc/systemd/system/multi-user.target.wants/k3s.service \u2192 /etc/systemd/system/k3s.service.\n[INFO]  systemd: Starting k3s\n* Installing on other server node [172.30.41.6]\n......\n
                3. \u68c0\u67e5\u96c6\u7fa4\u72b6\u6001

                  $ kubectl get no -owide\nNAME      STATUS   ROLES                       AGE     VERSION        INTERNAL-IP   EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION      CONTAINER-RUNTIME\nserver1   Ready    control-plane,etcd,master   3m51s   v1.30.2+k3s1   172.30.41.5   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\nserver2   Ready    control-plane,etcd,master   3m18s   v1.30.2+k3s1   172.30.41.6   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\nserver3   Ready    control-plane,etcd,master   3m7s    v1.30.2+k3s1   172.30.41.7   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\n\n$ kubectl get pod --all-namespaces -owide\nNAMESPACE     NAME                                      READY   STATUS      RESTARTS   AGE     IP          NODE      NOMINATED NODE   READINESS GATES\nkube-system   coredns-576bfc4dc7-z4x2s                  1/1     Running     0          8m31s   10.42.0.3   server1   <none>           <none>\nkube-system   helm-install-traefik-98kh5                0/1     Completed   1          8m31s   10.42.0.4   server1   <none>           <none>\nkube-system   helm-install-traefik-crd-9xtfd            0/1     Completed   0          8m31s   10.42.0.5   server1   <none>           <none>\nkube-system   local-path-provisioner-86f46b7bf7-qt995   1/1     Running     0          8m31s   10.42.0.6   server1   <none>           <none>\nkube-system   metrics-server-557ff575fb-kptsh           1/1     Running     0          8m31s   10.42.0.2   server1   <none>           <none>\nkube-system   svclb-traefik-f95cc81c-mgcjh              2/2     Running     0          6m28s   10.42.1.3   server2   <none>           <none>\nkube-system   svclb-traefik-f95cc81c-xtb8f              2/2     Running     0          6m28s   10.42.2.2   server3   <none>           <none>\nkube-system   svclb-traefik-f95cc81c-zcsxl              2/2     Running     0          6m28s   10.42.0.7   server1   <none>           <none>\nkube-system   traefik-5fb479b77-6pbh5                   1/1     Running     0          6m28s   10.42.1.2   server2   <none>           <none>\n
                "},{"location":"admin/kpanda/best-practice/k3s-lcm.html#_5","title":"\u5347\u7ea7\u96c6\u7fa4","text":"
                1. \u5982\u5347\u7ea7\u5230 v1.30.3+k3s1 \u7248\u672c\uff0c\u6309\u7167 \u524d\u7f6e\u51c6\u5907 \u6b65\u9aa4 2 \u91cd\u65b0\u4e0b\u8f7d\u79bb\u7ebf\u8d44\u6e90\u5e76\u62f7\u8d1d\u5230\u5b89\u88c5\u8282\u70b9\uff0c\u540c\u65f6\u5728\u5b89\u88c5\u8282\u70b9\u5bfc\u51fa\u79bb\u7ebf\u8d44\u6e90\u8def\u5f84\u73af\u5883\u53d8\u91cf\u3002\uff08\u82e5\u4e3a\u8054\u7f51\u5347\u7ea7\uff0c\u5219\u8df3\u8fc7\u6b64\u64cd\u4f5c\uff09
                2. \u6267\u884c\u5347\u7ea7\u64cd\u4f5c

                  $ export K3S_VERSION=v1.30.3+k3s1\n$ bash k3slcm\n* Copying ./v1.30.3/k3s-airgap-images-amd64.tar.zst to 172.30.41.5\n* Copying ./v1.30.3/k3s to 172.30.41.5\n* Copying ./v1.30.3/k3s-install.sh to 172.30.41.5\n* Copying ./v1.30.3/k3s-airgap-images-amd64.tar.zst to 172.30.41.6\n* Copying ./v1.30.3/k3s to 172.30.41.6\n* Copying ./v1.30.3/k3s-install.sh to 172.30.41.6\n* Copying ./v1.30.3/k3s-airgap-images-amd64.tar.zst to 172.30.41.7\n* Copying ./v1.30.3/k3s to 172.30.41.7\n* Copying ./v1.30.3/k3s-install.sh to 172.30.41.7\n* Installing on first server node [172.30.41.5]\n[INFO]  Skipping k3s download and verify\n[INFO]  Skipping installation of SELinux RPM\n[INFO]  Skipping /usr/local/bin/kubectl symlink to k3s, already exists\n[INFO]  Skipping /usr/local/bin/crictl symlink to k3s, already exists\n[INFO]  Skipping /usr/local/bin/ctr symlink to k3s, already exists\n[INFO]  Creating killall script /usr/local/bin/k3s-killall.sh\n[INFO]  Creating uninstall script /usr/local/bin/k3s-uninstall.sh\n[INFO]  env: Creating environment file /etc/systemd/system/k3s.service.env\n[INFO]  systemd: Creating service file /etc/systemd/system/k3s.service\n[INFO]  systemd: Enabling k3s unit\nCreated symlink /etc/systemd/system/multi-user.target.wants/k3s.service \u2192 /etc/systemd/system/k3s.service.\n[INFO]  No change detected so skipping service start\n* Installing on other server node [172.30.41.6]\n......\n
                3. \u68c0\u67e5\u96c6\u7fa4\u72b6\u6001

                  $ kubectl get node -owide\nNAME      STATUS   ROLES                       AGE   VERSION        INTERNAL-IP   EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION      CONTAINER-RUNTIME\nserver1   Ready    control-plane,etcd,master   18m   v1.30.3+k3s1   172.30.41.5   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\nserver2   Ready    control-plane,etcd,master   17m   v1.30.3+k3s1   172.30.41.6   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\nserver3   Ready    control-plane,etcd,master   17m   v1.30.3+k3s1   172.30.41.7   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\n\n$ kubectl get po --all-namespaces -owide\nNAMESPACE     NAME                                      READY   STATUS      RESTARTS   AGE     IP          NODE      NOMINATED NODE   READINESS GATES\nkube-system   coredns-576bfc4dc7-z4x2s                  1/1     Running     0          18m     10.42.0.3   server1   <none>           <none>\nkube-system   helm-install-traefik-98kh5                0/1     Completed   1          18m     <none>      server1   <none>           <none>\nkube-system   helm-install-traefik-crd-9xtfd            0/1     Completed   0          18m     <none>      server1   <none>           <none>\nkube-system   local-path-provisioner-6795b5f9d8-t4rvm   1/1     Running     0          2m49s   10.42.2.3   server3   <none>           <none>\nkube-system   metrics-server-557ff575fb-kptsh           1/1     Running     0          18m     10.42.0.2   server1   <none>           <none>\nkube-system   svclb-traefik-f95cc81c-mgcjh              2/2     Running     0          16m     10.42.1.3   server2   <none>           <none>\nkube-system   svclb-traefik-f95cc81c-xtb8f              2/2     Running     0          16m     10.42.2.2   server3   <none>           <none>\nkube-system   svclb-traefik-f95cc81c-zcsxl              2/2     Running     0          16m     10.42.0.7   server1   <none>           <none>\nkube-system   traefik-5fb479b77-6pbh5                   1/1     Running     0          16m     10.42.1.2   server2   <none>           <none>\n
                "},{"location":"admin/kpanda/best-practice/k3s-lcm.html#_6","title":"\u6269\u5bb9\u96c6\u7fa4","text":"
                1. \u5982\u6dfb\u52a0\u65b0\u7684 agent \u8282\u70b9\uff1a

                  export K3S_AGENTS=172.30.41.8\n

                  \u6dfb\u52a0\u65b0\u7684 server \u8282\u70b9\u5982\u4e0b\uff1a

                  < export K3S_SERVERS=172.30.41.5,172.30.41.6,172.30.41.7\n---\n> export K3S_SERVERS=172.30.41.5,172.30.41.6,172.30.41.7,172.30.41.8,172.30.41.9\n
                2. \u6267\u884c\u6269\u5bb9\u64cd\u4f5c\uff08\u4ee5\u6dfb\u52a0 agent \u8282\u70b9\u4e3a\u4f8b\uff09

                  $ bash k3slcm\n* Copying ./v1.30.3/k3s-airgap-images-amd64.tar.zst to 172.30.41.5\n* Copying ./v1.30.3/k3s to 172.30.41.5\n* Copying ./v1.30.3/k3s-install.sh to 172.30.41.5\n* Copying ./v1.30.3/k3s-airgap-images-amd64.tar.zst to 172.30.41.6\n* Copying ./v1.30.3/k3s to 172.30.41.6\n* Copying ./v1.30.3/k3s-install.sh to 172.30.41.6\n* Copying ./v1.30.3/k3s-airgap-images-amd64.tar.zst to 172.30.41.7\n* Copying ./v1.30.3/k3s to 172.30.41.7\n* Copying ./v1.30.3/k3s-install.sh to 172.30.41.7\n* Copying ./v1.30.3/k3s-airgap-images-amd64.tar.zst to 172.30.41.8\n* Copying ./v1.30.3/k3s to 172.30.41.8\n* Copying ./v1.30.3/k3s-install.sh to 172.30.41.8\n* Installing on first server node [172.30.41.5]\n[INFO]  Skipping k3s download and verify\n[INFO]  Skipping installation of SELinux RPM\n[INFO]  Skipping /usr/local/bin/kubectl symlink to k3s, already exists\n[INFO]  Skipping /usr/local/bin/crictl symlink to k3s, already exists\n[INFO]  Skipping /usr/local/bin/ctr symlink to k3s, already exists\n[INFO]  Creating killall script /usr/local/bin/k3s-killall.sh\n[INFO]  Creating uninstall script /usr/local/bin/k3s-uninstall.sh\n[INFO]  env: Creating environment file /etc/systemd/system/k3s.service.env\n[INFO]  systemd: Creating service file /etc/systemd/system/k3s.service\n[INFO]  systemd: Enabling k3s unit\nCreated symlink /etc/systemd/system/multi-user.target.wants/k3s.service \u2192 /etc/systemd/system/k3s.service.\n[INFO]  No change detected so skipping service start\n......\n* Installing on agent node [172.30.41.8]\n[INFO]  Skipping k3s download and verify\n[INFO]  Skipping installation of SELinux RPM\n[INFO]  Creating /usr/local/bin/kubectl symlink to k3s\n[INFO]  Creating /usr/local/bin/crictl symlink to k3s\n[INFO]  Creating /usr/local/bin/ctr symlink to k3s\n[INFO]  Creating killall script /usr/local/bin/k3s-killall.sh\n[INFO]  Creating uninstall script /usr/local/bin/k3s-agent-uninstall.sh\n[INFO]  env: Creating environment file /etc/systemd/system/k3s-agent.service.env\n[INFO]  systemd: Creating service file /etc/systemd/system/k3s-agent.service\n[INFO]  systemd: Enabling k3s-agent unit\nCreated symlink /etc/systemd/system/multi-user.target.wants/k3s-agent.service \u2192 /etc/systemd/system/k3s-agent.service.\n[INFO]  systemd: Starting k3s-agent\n
                3. \u68c0\u67e5\u96c6\u7fa4\u72b6\u6001

                  $ kubectl get node -owide\nNAME      STATUS   ROLES                       AGE   VERSION        INTERNAL-IP   EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION      CONTAINER-RUNTIME\nagent1    Ready    <none>                      57s   v1.30.3+k3s1   172.30.41.8   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\nserver1   Ready    control-plane,etcd,master   12m   v1.30.3+k3s1   172.30.41.5   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\nserver2   Ready    control-plane,etcd,master   11m   v1.30.3+k3s1   172.30.41.6   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\nserver3   Ready    control-plane,etcd,master   11m   v1.30.3+k3s1   172.30.41.7   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\n
                "},{"location":"admin/kpanda/best-practice/k3s-lcm.html#_7","title":"\u7f29\u5bb9\u96c6\u7fa4","text":"
                1. \u4ec5\u5728\u5f85\u5220\u9664\u8282\u70b9\u6267\u884c k3s-uninstall.sh \u6216 k3s-agent-uninstall.sh
                2. \u5728\u4efb\u610f server \u8282\u70b9\u4e0a\u6267\u884c\uff1a

                  kubectl delete node <\u8282\u70b9\u540d\u79f0>\n
                "},{"location":"admin/kpanda/best-practice/k3s-lcm.html#_8","title":"\u5378\u8f7d\u96c6\u7fa4","text":"
                1. \u5728\u6240\u6709 server \u8282\u70b9\u624b\u52a8\u6267\u884c k3s-uninstall.sh
                2. \u5728\u6240\u6709 agent \u8282\u70b9\u624b\u52a8\u6267\u884c k3s-agent-uninstall.sh
                "},{"location":"admin/kpanda/best-practice/kubean-low-version.html","title":"\u79bb\u7ebf\u573a\u666f Kubean \u5411\u4e0b\u517c\u5bb9\u7248\u672c\u7684\u90e8\u7f72\u4e0e\u5347\u7ea7\u64cd\u4f5c","text":"

                \u4e3a\u4e86\u6ee1\u8db3\u5ba2\u6237\u5bf9\u4f4e\u7248\u672c\u7684 K8s \u96c6\u7fa4\u7684\u642d\u5efa\uff0cKubean \u63d0\u4f9b\u4e86\u5411\u4e0b\u517c\u5bb9\u5e76\u521b\u5efa\u4f4e\u7248\u672c\u7684 K8s \u96c6\u7fa4\u80fd\u529b\uff0c\u7b80\u79f0\u5411\u4e0b\u517c\u5bb9\u7248\u672c\u7684\u80fd\u529b\u3002

                \u76ee\u524d\u652f\u6301\u81ea\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7248\u672c\u8303\u56f4\u5728 v1.26-v1.28\uff0c\u53ef\u4ee5\u53c2\u9605 AI \u7b97\u529b\u4e2d\u5fc3\u96c6\u7fa4\u7248\u672c\u652f\u6301\u4f53\u7cfb\u3002

                \u672c\u6587\u5c06\u6f14\u793a\u5982\u4f55\u90e8\u7f72\u4f4e\u7248\u672c\u7684 K8s \u96c6\u7fa4\u3002

                Note

                \u672c\u6587\u6f14\u793a\u7684\u8282\u70b9\u73af\u5883\u4e3a\uff1a

                • X86 \u67b6\u6784
                • CentOS 7 Linux \u53d1\u884c\u7248
                "},{"location":"admin/kpanda/best-practice/kubean-low-version.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                • \u51c6\u5907\u4e00\u4e2a Kubean \u6240\u5728\u7684\u7ba1\u7406\u96c6\u7fa4\uff0c\u5e76\u4e14\u5f53\u524d\u73af\u5883\u5df2\u7ecf\u90e8\u7f72\u652f\u6301 podman \u3001skopeo\u3001minio client \u547d\u4ee4\u3002 \u5982\u679c\u4e0d\u652f\u6301\uff0c\u53ef\u901a\u8fc7\u811a\u672c\u8fdb\u884c\u5b89\u88c5\u4f9d\u8d56\u7ec4\u4ef6\uff0c\u5b89\u88c5\u524d\u7f6e\u4f9d\u8d56\u3002

                • \u524d\u5f80 kubean \u67e5\u770b\u53d1\u5e03\u7684\u5236\u54c1\uff0c \u5e76\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u9009\u62e9\u5177\u4f53\u7684\u5236\u54c1\u7248\u672c\u3002\u76ee\u524d\u652f\u6301\u7684\u5236\u54c1\u7248\u672c\u53ca\u5bf9\u5e94\u7684\u96c6\u7fa4\u7248\u672c\u8303\u56f4\u5982\u4e0b\uff1a

                  \u5236\u54c1\u5305\u7248\u672c \u652f\u6301\u96c6\u7fa4\u8303\u56f4 AI \u7b97\u529b\u4e2d\u5fc3\u652f\u6301\u60c5\u51b5 release-2.21 v1.23.0 ~ v1.25.6 \u5b89\u88c5\u5668 v0.14.0+ \u5df2\u652f\u6301 release-2.22 v1.24.0 ~ v1.26.13 \u5b89\u88c5\u5668 v0.15.0+ \u5df2\u652f\u6301 release-2.23 v1.25.0 ~ v1.27.10 \u5b89\u88c5\u5668 v0.16.0+ \u5df2\u652f\u6301 release-2.24 v1.26.0 ~ v1.29.1 \u5b89\u88c5\u5668 v0.17.0+ \u5df2\u652f\u6301 release-2.25 v1.27.0 ~ v1.29.5 \u5b89\u88c5\u5668 v0.20.0+ \u5df2\u652f\u6301

                Tip

                \u5728\u9009\u62e9\u5236\u54c1\u7248\u672c\u65f6\uff0c\u4e0d\u4ec5\u9700\u8981\u53c2\u8003\u96c6\u7fa4\u7248\u672c\u8303\u56f4\uff0c\u8fd8\u9700\u5224\u65ad\u8be5\u5236\u54c1 manifest \u8d44\u6e90\u4e2d\u76f8\u5e94\u7ec4\u4ef6(\u5982 calico\u3001containerd)\u7248\u672c\u8303\u56f4\u662f\u5426\u8986\u76d6\u5f53\u524d\u96c6\u7fa4\u8be5\u7ec4\u4ef6\u7248\u672c\uff01

                \u672c\u6587\u6f14\u793a\u79bb\u7ebf\u90e8\u7f72 K8s \u96c6\u7fa4\u5230 1.23.0 \u7248\u672c\u53ca\u79bb\u7ebf\u5347\u7ea7 K8s \u96c6\u7fa4\u4ece 1.23.0 \u7248\u672c\u5230 1.24.0 \u7248\u672c\uff0c\u6240\u4ee5\u9009\u62e9 release-2.21 \u7684\u5236\u54c1\u3002

                "},{"location":"admin/kpanda/best-practice/kubean-low-version.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"admin/kpanda/best-practice/kubean-low-version.html#kubespray-release","title":"\u51c6\u5907 Kubespray Release \u4f4e\u7248\u672c\u7684\u76f8\u5173\u5236\u54c1","text":"

                \u5c06 spray-job \u955c\u50cf\u5bfc\u5165\u5230\u79bb\u7ebf\u73af\u5883\u7684 Registry\uff08\u955c\u50cf\u4ed3\u5e93\uff09\u4e2d\u3002

                # \u5047\u8bbe\u706b\u79cd\u96c6\u7fa4\u4e2d\u7684 registry \u5730\u5740\u4e3a 172.30.41.200\nREGISTRY_ADDR=\"172.30.41.200\"\n\n# \u955c\u50cf spray-job \u8fd9\u91cc\u53ef\u4ee5\u91c7\u7528\u52a0\u901f\u5668\u5730\u5740\uff0c\u955c\u50cf\u5730\u5740\u6839\u636e\u9009\u62e9\u5236\u54c1\u7248\u672c\u6765\u51b3\u5b9a\nSPRAY_IMG_ADDR=\"ghcr.m.daocloud.io/kubean-io/spray-job:2.21-d6f688f\"\n\n# skopeo \u53c2\u6570\nSKOPEO_PARAMS=\" --insecure-policy -a --dest-tls-verify=false --retry-times=3 \"\n\n# \u5728\u7ebf\u73af\u5883\uff1a\u5bfc\u51fa release-2.21 \u7248\u672c\u7684 spray-job \u955c\u50cf\uff0c\u5e76\u5c06\u5176\u8f6c\u79fb\u5230\u79bb\u7ebf\u73af\u5883\nskopeo copy docker://${SPRAY_IMG_ADDR} docker-archive:spray-job-2.21.tar\n\n# \u79bb\u7ebf\u73af\u5883\uff1a\u5bfc\u5165 release-2.21 \u7248\u672c\u7684 spray-job \u955c\u50cf\u5230\u706b\u79cd registry\nskopeo copy ${SKOPEO_PARAMS} docker-archive:spray-job-2.21.tar docker://${REGISTRY_ADDR}/${SPRAY_IMG_ADDR/.m.daocloud/}\n
                "},{"location":"admin/kpanda/best-practice/kubean-low-version.html#k8s","title":"\u5236\u4f5c\u4f4e\u7248\u672c K8s \u79bb\u7ebf\u8d44\u6e90","text":"
                1. \u51c6\u5907 manifest.yml \u6587\u4ef6\u3002

                  cat > \"manifest.yml\" <<EOF\nimage_arch:\n  - \"amd64\" ## \"arm64\"\nkube_version: ## \u6839\u636e\u5b9e\u9645\u573a\u666f\u586b\u5199\u96c6\u7fa4\u7248\u672c\n  - \"v1.23.0\"\n  - \"v1.24.0\"\nEOF\n
                2. \u5236\u4f5c\u79bb\u7ebf\u589e\u91cf\u5305\u3002

                  # \u521b\u5efa data \u76ee\u5f55\nmkdir data\n# \u5236\u4f5c\u79bb\u7ebf\u5305\uff0c\nAIRGAP_IMG_ADDR=\"ghcr.m.daocloud.io/kubean-io/airgap-patch:2.21-d6f688f\" # (1)!\npodman run --rm -v $(pwd)/manifest.yml:/manifest.yml -v $(pwd)/data:/data -e ZONE=CN -e MODE=FULL ${AIRGAP_IMG_ADDR}\n
                  1. \u955c\u50cf spray-job \u8fd9\u91cc\u53ef\u4ee5\u91c7\u7528\u52a0\u901f\u5668\u5730\u5740\uff0c\u955c\u50cf\u5730\u5740\u6839\u636e\u9009\u62e9\u5236\u54c1\u7248\u672c\u6765\u51b3\u5b9a
                3. \u5bfc\u5165\u5bf9\u5e94 k8s \u7248\u672c\u7684\u79bb\u7ebf\u955c\u50cf\u4e0e\u4e8c\u8fdb\u5236\u5305

                  # \u5c06\u4e0a\u4e00\u6b65 data \u76ee\u5f55\u4e2d\u7684\u4e8c\u8fdb\u5236\u5bfc\u5165\u4e8c\u8fdb\u5236\u5305\u81f3\u706b\u79cd\u8282\u70b9\u7684 MinIO \u4e2d\ncd ./data/amd64/files/\nMINIO_ADDR=\"http://127.0.0.1:9000\" # (1)!\nMINIO_USER=rootuser MINIO_PASS=rootpass123 ./import_files.sh ${MINIO_ADDR}\n\n# \u5c06\u4e0a\u4e00\u6b65 data \u76ee\u5f55\u4e2d\u7684\u955c\u50cf\u5bfc\u5165\u4e8c\u8fdb\u5236\u5305\u81f3\u706b\u79cd\u8282\u70b9\u7684\u955c\u50cf\u4ed3\u5e93\u4e2d\ncd ./data/amd64/images/\nREGISTRY_ADDR=\"127.0.0.1\"  ./import_images.sh # (2)!\n
                  1. IP \u66ff\u6362\u4e3a\u5b9e\u9645\u7684\u4ed3\u5e93\u5730\u5740
                  2. IP \u66ff\u6362\u4e3a\u5b9e\u9645\u7684\u4ed3\u5e93\u5730\u5740
                4. \u5c06 manifest\u3001localartifactset.cr.yaml \u81ea\u5b9a\u4e49\u8d44\u6e90\u90e8\u7f72\u5230 Kubean \u6240\u5728\u7684\u7ba1\u7406\u96c6\u7fa4\u6216\u8005\u5168\u5c40\u670d\u52a1\u96c6\u7fa4 \u5f53\u4e2d\uff0c\u672c\u4f8b\u4f7f\u7528\u7684\u662f\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u3002

                  # \u90e8\u7f72 data \u6587\u4ef6\u76ee\u5f55\u4e0b\u7684 localArtifactSet \u8d44\u6e90\ncd ./data\nkubectl apply -f localartifactset.cr.yaml\n\n# \u4e0b\u8f7d release-2.21 \u7248\u672c\u7684 manifest \u8d44\u6e90\nwget https://raw.githubusercontent.com/kubean-io/kubean-manifest/main/manifests/manifest-2.21-d6f688f.yml\n\n# \u90e8\u7f72 release-2.21 \u5bf9\u5e94\u7684 manifest \u8d44\u6e90\nkubectl apply -f manifest-2.21-d6f688f.yml\n
                "},{"location":"admin/kpanda/best-practice/kubean-low-version.html#k8s_1","title":"\u90e8\u7f72\u548c\u5347\u7ea7 K8s \u96c6\u7fa4\u517c\u5bb9\u7248\u672c","text":""},{"location":"admin/kpanda/best-practice/kubean-low-version.html#_3","title":"\u90e8\u7f72","text":"
                1. \u524d\u5f80 \u5bb9\u5668\u7ba1\u7406 \uff0c\u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u4e2d\uff0c\u70b9\u51fb \u521b\u5efa\u96c6\u7fa4 \u6309\u94ae\u3002

                2. \u88ab\u7eb3\u7ba1\u53c2\u6570\u9009\u62e9 manifest\u3001localartifactset.cr.yaml \u81ea\u5b9a\u4e49\u8d44\u6e90\u90e8\u7f72\u7684\u96c6\u7fa4\uff0c\u672c\u4f8b\u4f7f\u7528\u7684\u662f\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u3002

                3. \u5176\u4f59\u53c2\u6570\u53c2\u8003\u521b\u5efa\u96c6\u7fa4\u3002

                "},{"location":"admin/kpanda/best-practice/kubean-low-version.html#_4","title":"\u5347\u7ea7","text":"
                1. \u9009\u62e9\u65b0\u521b\u5efa\u7684\u96c6\u7fa4\uff0c\u8fdb\u53bb\u8be6\u60c5\u754c\u9762\u3002

                2. \u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u96c6\u7fa4\u8fd0\u7ef4 -> \u96c6\u7fa4\u5347\u7ea7 \uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u70b9\u51fb \u7248\u672c\u5347\u7ea7 \u3002

                3. \u9009\u62e9\u53ef\u7528\u7684\u96c6\u7fa4\u8fdb\u884c\u5347\u7ea7\u3002

                "},{"location":"admin/kpanda/best-practice/limit-disk-usage-docker.html","title":"\u9650\u5236 Docker \u5355\u5bb9\u5668\u53ef\u5360\u7528\u7684\u78c1\u76d8\u7a7a\u95f4","text":"

                Docker \u5728 17.07.0-ce \u7248\u672c\u4e2d\u5f15\u5165 overlay2.zize\uff0c\u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528 overlay2.zize \u6765\u9650\u5236 docker \u5355\u5bb9\u5668\u53ef\u5360\u7528\u7684\u78c1\u76d8\u7a7a\u95f4\u3002

                "},{"location":"admin/kpanda/best-practice/limit-disk-usage-docker.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                \u5728\u914d\u7f6e docker overlay2.size \u4e4b\u524d\uff0c\u9700\u8981\u8c03\u6574\u64cd\u4f5c\u7cfb\u7edf\u4e2d\u6587\u4ef6\u7cfb\u7edf\u7c7b\u578b\u4e3a xfs \u5e76\u4f7f\u7528 pquota \u65b9\u5f0f\u8fdb\u884c\u8bbe\u5907\u6302\u8f7d\u3002

                \u683c\u5f0f\u5316\u8bbe\u5907\u4e3a XFS \u6587\u4ef6\u7cfb\u7edf\uff0c\u53ef\u4ee5\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                mkfs.xfs -f /dev/xxx\n

                Note

                pquota \u9650\u5236\u7684\u662f\u9879\u76ee\uff08project\uff09\u78c1\u76d8\u914d\u989d\u3002

                "},{"location":"admin/kpanda/best-practice/limit-disk-usage-docker.html#_2","title":"\u8bbe\u7f6e\u5355\u5bb9\u5668\u78c1\u76d8\u53ef\u5360\u7528\u7a7a\u95f4","text":"

                \u6ee1\u8db3\u4ee5\u4e0a\u6761\u4ef6\u540e\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e docker overlay2.size \u6765\u9650\u5236\u5355\u5bb9\u5668\u78c1\u76d8\u5360\u7528\u7a7a\u95f4\u5927\u5c0f\u3002\u547d\u4ee4\u884c\u793a\u4f8b\u5982\u4e0b\uff1a

                sudo dockerd -s overlay2 --storage-opt overlay2.size=1G\n
                "},{"location":"admin/kpanda/best-practice/limit-disk-usage-docker.html#_3","title":"\u573a\u666f\u6f14\u7ec3","text":"

                \u63a5\u4e0b\u6765\u4ee5\u4e00\u4e2a\u5b9e\u9645\u7684\u4f8b\u5b50\u6765\u6f14\u7ec3\u4e00\u4e0b\u9650\u5236 docker \u5355\u5bb9\u5668\u53ef\u5360\u7528\u7684\u78c1\u76d8\u7a7a\u95f4\u6574\u4f53\u5b9e\u73b0\u6d41\u7a0b\u3002

                "},{"location":"admin/kpanda/best-practice/limit-disk-usage-docker.html#_4","title":"\u76ee\u6807","text":"

                \u90e8\u7f72\u4e00\u4e2a Kubernetes \u96c6\u7fa4\uff0c\u5e76\u9650\u5236 docker \u5355\u5bb9\u5668\u53ef\u5360\u7528\u7684\u78c1\u76d8\u7a7a\u95f4\u5927\u5c0f\u4e3a1G\uff0c\u8d85\u51fa1G\u5c06\u65e0\u6cd5\u4f7f\u7528\u3002

                "},{"location":"admin/kpanda/best-practice/limit-disk-usage-docker.html#_5","title":"\u64cd\u4f5c\u6d41\u7a0b","text":"
                1. \u767b\u5f55\u76ee\u6807\u8282\u70b9\uff0c\u67e5\u770b fstab \u6587\u4ef6\uff0c\u83b7\u53d6\u5f53\u524d\u8bbe\u5907\u7684\u6302\u8f7d\u60c5\u51b5\u3002

                  $ cat /etc/fstab\n\n# /etc/fstab\n# Created by anaconda on Thu Mar 19 11:32:59 2020\n#\n# Accessible filesystems, by reference, are maintained under '/dev/disk'\n# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info\n#\n/dev/mapper/centos-root /                       xfs     defaults        0 0\nUUID=3ed01f0e-67a1-4083-943a-343b7fed1708 /boot                   xfs     defaults        0 0\n/dev/mapper/centos-swap swap                    swap    defaults        0 0\n

                  \u4ee5\u56fe\u793a\u8282\u70b9\u8bbe\u5907\u4e3a\u4f8b\uff0c\u53ef\u4ee5\u770b\u5230 XFS \u683c\u5f0f\u8bbe\u5907 /dev/mapper/centos-root \u4ee5\u9ed8\u8ba4\u65b9\u5f0f defauls \u6302\u8f7d\u5230 / \u6839\u76ee\u5f55.

                2. \u914d\u7f6e xfs \u6587\u4ef6\u7cfb\u7edf\u4f7f\u7528 pquota \u65b9\u5f0f\u6302\u8f7d\u3002

                  1. \u4fee\u6539 fstab \u6587\u4ef6\uff0c\u5c06\u6302\u8f7d\u65b9\u5f0f\u4ece defaults \u66f4\u65b0\u4e3a rw,pquota\uff1b

                    # \u4fee\u6539 fstab \u914d\u7f6e\n$ vi /etc/fstab\n- /dev/mapper/centos-root /                       xfs     defaults         0 0\n+ /dev/mapper/centos-root /                       xfs     rw,pquota        0 0\n\n# \u9a8c\u8bc1\u914d\u7f6e\u662f\u5426\u6709\u8bef\n$ mount -a\n
                  2. \u67e5\u770b pquota \u662f\u5426\u751f\u6548

                    xfs_quota -x -c print\n

                  Note

                  \u5982\u679c pquota \u672a\u751f\u6548\uff0c\u8bf7\u68c0\u67e5\u64cd\u4f5c\u7cfb\u7edf\u662f\u5426\u5f00\u542f pquota \u9009\u9879\uff0c\u5982\u679c\u672a\u5f00\u542f\uff0c\u9700\u8981\u5728\u7cfb\u7edf\u5f15\u5bfc\u914d\u7f6e /etc/grub2.cfg \u4e2d\u6dfb\u52a0 rootflags=pquota \u53c2\u6570\uff0c\u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u9700\u8981 reboot \u91cd\u542f\u64cd\u4f5c\u7cfb\u7edf\u3002

                3. \u521b\u5efa\u96c6\u7fa4 -> \u9ad8\u7ea7\u914d\u7f6e -> \u81ea\u5b9a\u4e49\u53c2\u6570 \u4e2d\u6dfb\u52a0 docker_storage_options \u53c2\u6570\uff0c\u8bbe\u7f6e\u5355\u5bb9\u5668\u78c1\u76d8\u53ef\u5360\u7528\u7a7a\u95f4\u3002

                  Note

                  \u4e5f\u53ef\u4ee5\u57fa\u4e8e kubean manifest \u64cd\u4f5c\uff0c\u5728 vars conf \u91cc\u6dfb\u52a0 docker_storage_options \u53c2\u6570\u3002

                  apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: sample-vars-conf\n  namespace: kubean-system\ndata:\n  group_vars.yml: |\n    unsafe_show_logs: true\n    container_manager: docker\n+   docker_storage_options: -s overlay2 --storage-opt overlay2.size=1G  # \u65b0\u589e docker_storage_options \u53c2\u6570\n    kube_network_plugin: calico\n    kube_network_plugin_multus: false\n    kube_proxy_mode: iptables\n    etcd_deployment_type: kubeadm\n    override_system_hostname: true\n    ...\n
                4. \u67e5\u770b dockerd \u670d\u52a1\u8fd0\u884c\u914d\u7f6e\uff0c\u68c0\u67e5\u78c1\u76d8\u9650\u5236\u662f\u5426\u8bbe\u7f6e\u6210\u529f\u3002

                \u4ee5\u4e0a\uff0c\u5b8c\u6210\u9650\u5236 docker \u5355\u5bb9\u5668\u53ef\u5360\u7528\u7684\u78c1\u76d8\u7a7a\u95f4\u6574\u4f53\u5b9e\u73b0\u6d41\u7a0b\u3002

                "},{"location":"admin/kpanda/best-practice/multi-arch.html","title":"\u4e3a\u5de5\u4f5c\u96c6\u7fa4\u6dfb\u52a0\u5f02\u6784\u8282\u70b9","text":"

                \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u4e3a AMD \u67b6\u6784\uff0c\u64cd\u4f5c\u7cfb\u7edf\u4e3a CentOS 7.9 \u7684\u5de5\u4f5c\u96c6\u7fa4\u6dfb\u52a0 ARM \u67b6\u6784\uff0c\u64cd\u4f5c\u7cfb\u7edf\u4e3a Kylin v10 sp2 \u7684\u5de5\u4f5c\u8282\u70b9

                Note

                \u672c\u6587\u4ec5\u9488\u5bf9\u79bb\u7ebf\u6a21\u5f0f\u4e0b\uff0c\u4f7f\u7528 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u6240\u521b\u5efa\u7684\u5de5\u4f5c\u96c6\u7fa4\u8fdb\u884c\u5f02\u6784\u8282\u70b9\u7684\u6dfb\u52a0\uff0c\u4e0d\u5305\u62ec\u63a5\u5165\u7684\u96c6\u7fa4\u3002

                "},{"location":"admin/kpanda/best-practice/multi-arch.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                • \u5df2\u7ecf\u90e8\u7f72\u597d\u4e00\u4e2a AI \u7b97\u529b\u4e2d\u5fc3\u5168\u6a21\u5f0f\uff0c\u5e76\u4e14\u706b\u79cd\u8282\u70b9\u8fd8\u5b58\u6d3b\uff0c\u90e8\u7f72\u53c2\u8003\u6587\u6863\u79bb\u7ebf\u5b89\u88c5 AI \u7b97\u529b\u4e2d\u5fc3\u5546\u4e1a\u7248
                • \u5df2\u7ecf\u901a\u8fc7 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u521b\u5efa\u597d\u4e00\u4e2a AMD \u67b6\u6784\uff0c\u64cd\u4f5c\u7cfb\u7edf\u4e3a CentOS 7.9 \u7684\u5de5\u4f5c\u96c6\u7fa4\uff0c\u521b\u5efa\u53c2\u8003\u6587\u6863\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4
                "},{"location":"admin/kpanda/best-practice/multi-arch.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"admin/kpanda/best-practice/multi-arch.html#_4","title":"\u4e0b\u8f7d\u5e76\u5bfc\u5165\u79bb\u7ebf\u5305","text":"

                \u4ee5 ARM \u67b6\u6784\u3001\u64cd\u4f5c\u7cfb\u7edf Kylin v10 sp2 \u4e3a\u4f8b\u3002

                \u8bf7\u786e\u4fdd\u5df2\u7ecf\u767b\u5f55\u5230\u706b\u79cd\u8282\u70b9\uff01\u5e76\u4e14\u4e4b\u524d\u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3\u65f6\u4f7f\u7528\u7684 clusterConfig.yaml \u6587\u4ef6\u8fd8\u5728\u3002

                "},{"location":"admin/kpanda/best-practice/multi-arch.html#_5","title":"\u79bb\u7ebf\u955c\u50cf\u5305","text":"

                Note

                \u53ef\u4ee5\u5728\u4e0b\u8f7d\u4e2d\u5fc3\u4e0b\u8f7d\u6700\u65b0\u7248\u672c\u3002\u8bf7\u786e\u4fdd\u5728\u5bb9\u5668\u7ba1\u7406 v0.31 \u53ca\u4ee5\u4e0a\u7248\u672c\u4f7f\u7528\u8be5\u80fd\u529b\uff0c\u5bf9\u5e94\u5b89\u88c5\u5668 v0.21.0 \u53ca\u4ee5\u4e0a\u7248\u672c

                CPU \u67b6\u6784 \u7248\u672c \u4e0b\u8f7d\u5730\u5740 AMD64 v0.21.0 https://qiniu-download-public.daocloud.io/DaoCloud_Enterprise/dce5/offline-v0.21.0-amd64.tar ARM64 v0.21.0 https://qiniu-download-public.daocloud.io/DaoCloud_Enterprise/dce5/offline-v0.21.0-arm64.tar

                \u4e0b\u8f7d\u5b8c\u6bd5\u540e\u89e3\u538b\u79bb\u7ebf\u5305\u3002\u6b64\u5904\u6211\u4eec\u4e0b\u8f7d arm64 \u67b6\u6784\u7684\u79bb\u7ebf\u5305\uff1a

                tar -xvf offline-v0.21.0-arm64.tar\n
                "},{"location":"admin/kpanda/best-practice/multi-arch.html#iso-kylin-v10-sp2","title":"ISO \u79bb\u7ebf\u5305\uff08Kylin v10 sp2\uff09","text":"CPU \u67b6\u6784 \u64cd\u4f5c\u7cfb\u7edf\u7248\u672c \u4e0b\u8f7d\u5730\u5740 ARM64 Kylin Linux Advanced Server release V10 (Sword) SP2 \u7533\u8bf7\u5730\u5740\uff1ahttps://www.kylinos.cn/support/trial.html

                Note

                \u9e92\u9e9f\u64cd\u4f5c\u7cfb\u7edf\u9700\u8981\u63d0\u4f9b\u4e2a\u4eba\u4fe1\u606f\u624d\u80fd\u4e0b\u8f7d\u4f7f\u7528\uff0c\u4e0b\u8f7d\u65f6\u8bf7\u9009\u62e9 V10 (Sword) SP2\u3002

                "},{"location":"admin/kpanda/best-practice/multi-arch.html#ospackage-kylin-v10-sp2","title":"osPackage \u79bb\u7ebf\u5305 \uff08Kylin v10 sp2\uff09","text":"

                \u5176\u4e2d Kubean \u63d0\u4f9b\u4e86\u4e0d\u540c\u64cd\u4f5c\u7cfb\u7edf\u7684osPackage \u79bb\u7ebf\u5305\uff0c\u53ef\u4ee5\u524d\u5f80 https://github.com/kubean-io/kubean/releases \u67e5\u770b\u3002

                \u64cd\u4f5c\u7cfb\u7edf\u7248\u672c \u4e0b\u8f7d\u5730\u5740 Kylin Linux Advanced Server release V10 (Sword) SP2 https://github.com/kubean-io/kubean/releases/download/v0.18.5/os-pkgs-kylin-v10sp2-v0.18.5.tar.gz

                Note

                osPackage \u79bb\u7ebf\u5305\u7684\u5177\u4f53\u5bf9\u5e94\u7248\u672c\u8bf7\u67e5\u770b\u79bb\u7ebf\u955c\u50cf\u5305\u4e2d offline/sample/clusterConfig.yaml \u4e2d\u5bf9\u5e94\u7684 kubean \u7248\u672c

                "},{"location":"admin/kpanda/best-practice/multi-arch.html#_6","title":"\u5bfc\u5165\u79bb\u7ebf\u5305\u81f3\u706b\u79cd\u8282\u70b9","text":"

                \u6267\u884c import-artifact \u547d\u4ee4\uff1a

                ./offline/dce5-installer import-artifact -c clusterConfig.yaml \\\n    --offline-path=/root/offline \\\n    --iso-path=/root/Kylin-Server-10-SP2-aarch64-Release-Build09-20210524.iso \\\n    --os-pkgs-path=/root/os-pkgs-kylin-v10sp2-v0.18.5.tar.gz\n

                Note

                \u53c2\u6570\u8bf4\u660e\uff1a

                • -c clusterConfig.yaml \u6307\u5b9a\u4e4b\u524d\u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3.0 \u65f6\u4f7f\u7528\u7684 clusterConfig.yaml \u6587\u4ef6
                • --offline-path \u6307\u5b9a\u4e0b\u8f7d\u7684\u79bb\u7ebf\u955c\u50cf\u5305\u6587\u4ef6\u5730\u5740
                • --iso-path \u6307\u5b9a\u4e0b\u8f7d\u7684 ISO \u64cd\u4f5c\u7cfb\u7edf\u955c\u50cf\u6587\u4ef6\u5730\u5740
                • --os-pkgs-path \u6307\u5b9a\u4e0b\u8f7d\u7684 osPackage \u79bb\u7ebf\u5305\u6587\u4ef6\u5730\u5740

                \u5bfc\u5165\u547d\u4ee4\u6267\u884c\u6210\u529f\u540e\uff0c\u4f1a\u5c06\u79bb\u7ebf\u5305\u4e0a\u4f20\u5230\u706b\u79cd\u8282\u70b9\u7684 Minio \u4e2d\u3002

                "},{"location":"admin/kpanda/best-practice/multi-arch.html#_7","title":"\u6dfb\u52a0\u5f02\u6784\u5de5\u4f5c\u8282\u70b9","text":"

                \u8bf7\u786e\u4fdd\u5df2\u7ecf\u767b\u5f55\u5230 AI \u7b97\u529b\u4e2d\u5fc3\u7ba1\u7406\u96c6\u7fa4\u7684\u7ba1\u7406\u8282\u70b9\u4e0a\u3002

                "},{"location":"admin/kpanda/best-practice/multi-arch.html#_8","title":"\u4fee\u6539\u4e3b\u673a\u6e05\u5355\u6587\u4ef6","text":"

                \u4e3b\u673a\u6e05\u5355\u6587\u4ef6\u793a\u4f8b\uff1a

                \u65b0\u589e\u8282\u70b9\u524d\u65b0\u589e\u8282\u70b9\u540e
                apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: ${cluster-name}-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      children:\n        etcd:\n          hosts:\n            centos-master:\n        k8s_cluster:\n          children:\n            kube_control_plane:\n            kube_node:\n        kube_control_plane:\n          hosts:\n            centos-master:\n        kube_node:\n          hosts:\n            centos-master:\n    hosts:\n      centos-master:\n        ip: 10.5.10.183\n        access_ip: 10.5.10.183\n        ansible_host: 10.5.10.183\n        ansible_connection: ssh\n        ansible_user: root\n        ansible_ssh_pass: ******\n        ansible_password: ******\n        ansible_become_password: ******\n
                apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: ${cluster-name}-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      hosts:\n        centos-master:\n          ip: 10.5.10.183\n          access_ip: 10.5.10.183\n          ansible_host: 10.5.10.183\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_ssh_pass: ******\n          ansible_password: ******\n          ansible_become_password: ******\n          # \u6dfb\u52a0\u5f02\u6784\u8282\u70b9\u4fe1\u606f\n        kylin-worker:\n          ip: 10.5.10.181\n          access_ip: 10.5.10.181\n          ansible_host: 10.5.10.181\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_ssh_pass: ******\n          ansible_password: ******\n          ansible_become_password: ******\n        children:\n          kube_control_plane:\n            hosts:\n              - centos-master\n          kube_node:\n            hosts:\n              - centos-master\n              - kylin-worker  # \u6dfb\u52a0\u65b0\u589e\u7684\u5f02\u6784\u8282\u70b9\u540d\u79f0\n          etcd:\n            hosts:\n              - centos-master\n          k8s_cluster:\n            children:\n              - kube_control_plane\n              - kube_node\n

                \u6309\u7167\u4e0a\u8ff0\u7684\u914d\u7f6e\u6ce8\u91ca\uff0c\u6dfb\u52a0\u65b0\u589e\u7684\u5de5\u4f5c\u8282\u70b9\u4fe1\u606f\u3002

                kubectl edit cm ${cluster-name}-hosts-conf -n kubean-system\n

                cluster-name \u4e3a\u5de5\u4f5c\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u521b\u5efa\u96c6\u7fa4\u65f6\u4f1a\u9ed8\u8ba4\u751f\u6210\u3002

                "},{"location":"admin/kpanda/best-practice/multi-arch.html#clusteroperationyml","title":"\u901a\u8fc7 ClusterOperation.yml \u65b0\u589e\u6269\u5bb9\u4efb\u52a1","text":"

                \u793a\u4f8b\uff1a

                ClusterOperation.yml
                apiVersion: kubean.io/v1alpha1\nkind: ClusterOperation\nmetadata:\n  name: add-worker-node\nspec:\n  cluster: ${cluster-name} # \u6307\u5b9a cluster name\n  image: 10.5.14.30/ghcr.m.daocloud.io/kubean-io/spray-job:v0.18.5\n  actionType: playbook\n  action: scale.yml\n  extraArgs: --limit=kylin-worker\n  preHook:\n    - actionType: playbook\n      action: ping.yml\n    - actionType: playbook\n      action: disable-firewalld.yml\n    - actionType: playbook\n      action: enable-repo.yml\n      extraArgs: |\n        -e \"{repo_list: [\"http://10.5.14.30:9000/kubean/kylin-iso/\\$releasever/sp2/os/\\$basearch\",\"http://10.5.14.30:9000/kubean/kylin/\\$releasever/sp2/os/\\$basearch\"]}\" --limit=kylin-worker\n  postHook:\n    - actionType: playbook\n      action: cluster-info.yml\n

                Note

                • spec.image \u955c\u50cf\u5730\u5740\u8981\u4e0e\u4e4b\u524d\u6267\u884c\u90e8\u7f72\u65f6\u7684 job \u5176\u5185\u955c\u50cf\u4fdd\u6301\u4e00\u81f4
                • spec.action \u8bbe\u7f6e\u4e3a scale.yml
                • spec.extraArgs \u8bbe\u7f6e\u4e3a --limit=g-worker
                • spec.preHook \u4e2d\u7684 enable-repo.yml \u5267\u672c\u53c2\u6570\uff0c\u8981\u586b\u5199\u76f8\u5173OS\u7684\u6b63\u786e\u7684 repo_list

                \u6309\u7167\u4e0a\u8ff0\u7684\u914d\u7f6e\uff0c\u521b\u5efa\u5e76\u90e8\u7f72 join-node-ops.yaml\uff1a

                vi join-node-ops.yaml\nkubectl apply -f join-node-ops.yaml -n kubean-system\n
                "},{"location":"admin/kpanda/best-practice/multi-arch.html#_9","title":"\u68c0\u67e5\u4efb\u52a1\u6267\u884c\u72b6\u6001","text":"
                kubectl -n kubean-system get pod | grep add-worker-node\n

                \u4e86\u89e3\u7f29\u5bb9\u4efb\u52a1\u6267\u884c\u8fdb\u5ea6\uff0c\u53ef\u67e5\u770b\u8be5 Pod \u65e5\u5fd7\u3002

                "},{"location":"admin/kpanda/best-practice/multi-arch.html#_10","title":"\u524d\u5f80\u754c\u9762\u9a8c\u8bc1","text":"
                1. \u524d\u5f80 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4 -> \u8282\u70b9\u7ba1\u7406

                2. \u70b9\u51fb\u65b0\u589e\u7684\u8282\u70b9\uff0c\u67e5\u770b\u8be6\u60c5

                "},{"location":"admin/kpanda/best-practice/replace-first-master-node.html","title":"\u66ff\u6362\u5de5\u4f5c\u96c6\u7fa4\u7684\u9996\u4e2a\u63a7\u5236\u8282\u70b9","text":"

                \u672c\u6587\u5c06\u4ee5\u4e00\u4e2a\u9ad8\u53ef\u7528\u4e09\u63a7\u5236\u8282\u70b9\u7684\u5de5\u4f5c\u96c6\u7fa4\u4e3a\u4f8b\u3002 \u5f53\u5de5\u4f5c\u96c6\u7fa4\u7684\u9996\u4e2a\u63a7\u5236\u8282\u70b9\u6545\u969c\u6216\u5f02\u5e38\u65f6\uff0c\u5982\u4f55\u66ff\u6362\u6216\u91cd\u65b0\u63a5\u5165\u9996\u4e2a\u63a7\u5236\u8282\u70b9\u3002

                \u672c\u6587\u7684\u9ad8\u53ef\u7528\u96c6\u7fa4\u6709 3 \u4e2a Master \u8282\u70b9\uff1a

                • node1 (172.30.41.161)
                • node2 (172.30.41.162)
                • node3 (172.30.41.163)

                \u5047\u8bbe node1 \u5b95\u673a\uff0c\u63a5\u4e0b\u6765\u4ecb\u7ecd\u5982\u4f55\u5c06\u5b95\u673a\u540e\u6062\u590d\u7684 node1 \u91cd\u65b0\u63a5\u5165\u5de5\u4f5c\u96c6\u7fa4\u3002

                "},{"location":"admin/kpanda/best-practice/replace-first-master-node.html#_2","title":"\u51c6\u5907\u5de5\u4f5c","text":"

                \u5728\u6267\u884c\u66ff\u6362\u64cd\u4f5c\u4e4b\u524d\uff0c\u5148\u83b7\u53d6\u96c6\u7fa4\u8d44\u6e90\u57fa\u672c\u4fe1\u606f\uff0c\u4fee\u6539\u76f8\u5173\u914d\u7f6e\u65f6\u4f1a\u7528\u5230\u3002

                Note

                \u4ee5\u4e0b\u83b7\u53d6\u96c6\u7fa4\u8d44\u6e90\u4fe1\u606f\u7684\u547d\u4ee4\u5747\u5728\u7ba1\u7406\u96c6\u7fa4\u4e2d\u6267\u884c\u3002

                1. \u83b7\u53d6\u96c6\u7fa4\u540d\u79f0

                  \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u627e\u5230\u96c6\u7fa4\u5bf9\u5e94\u7684 clusters.kubean.io \u8d44\u6e90\uff1a

                  # \u6bd4\u5982 clusters.kubean.io \u7684\u8d44\u6e90\u540d\u79f0\u4e3a cluster-mini-1\n# \u5219\u83b7\u53d6\u96c6\u7fa4\u7684\u540d\u79f0\nCLUSTER_NAME=$(kubectl get clusters.kubean.io cluster-mini-1 -o=jsonpath=\"{.metadata.name}{'\\n'}\")\n
                2. \u83b7\u53d6\u96c6\u7fa4\u7684\u4e3b\u673a\u6e05\u5355 configmap

                  kubectl get clusters.kubean.io cluster-mini-1 -o=jsonpath=\"{.spec.hostsConfRef}{'\\n'}\"\n{\"name\":\"mini-1-hosts-conf\",\"namespace\":\"kubean-system\"}\n
                3. \u83b7\u53d6\u96c6\u7fa4\u7684\u914d\u7f6e\u53c2\u6570 configmap

                  kubectl get clusters.kubean.io cluster-mini-1 -o=jsonpath=\"{.spec.varsConfRef}{'\\n'}\"\n{\"name\":\"mini-1-vars-conf\",\"namespace\":\"kubean-system\"}\n
                "},{"location":"admin/kpanda/best-practice/replace-first-master-node.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                1. \u8c03\u6574\u63a7\u5236\u5e73\u9762\u8282\u70b9\u987a\u5e8f

                  \u91cd\u7f6e node1 \u8282\u70b9\u4f7f\u5176\u6062\u590d\u5230\u5b89\u88c5\u96c6\u7fa4\u4e4b\u524d\u7684\u72b6\u6001\uff08\u6216\u4f7f\u7528\u65b0\u7684\u8282\u70b9\uff09\uff0c\u4fdd\u6301 node1 \u8282\u70b9\u7684\u7f51\u7edc\u8fde\u901a\u6027\u3002

                  \u8c03\u6574\u4e3b\u673a\u6e05\u5355\u4e2d node1 \u8282\u70b9\u5728 kube_control_plane \u3001kube_node\u3001etcd \u4e2d\u7684\u987a\u5e8f \uff08node1/node2/node3 -> node2/node3/node1\uff09\uff1a

                  function change_control_plane_order() {\n  cat << EOF | kubectl apply -f -\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: mini-1-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      hosts:\n        node1:\n          ip: \"172.30.41.161\"\n          access_ip: \"172.30.41.161\"\n          ansible_host: \"172.30.41.161\"\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: dangerous\n        node2:\n          ip: \"172.30.41.162\"\n          access_ip: \"172.30.41.162\"\n          ansible_host: \"172.30.41.162\"\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: dangerous\n        node3:\n          ip: \"172.30.41.163\"\n          access_ip: \"172.30.41.163\"\n          ansible_host: \"172.30.41.163\"\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: dangerous\n      children:\n        kube_control_plane:\n          hosts:\n            node2:\n            node3:\n            node1:\n        kube_node:\n          hosts:\n            node2:\n            node3:\n            node1:\n        etcd:\n          hosts:\n            node2:\n            node3:\n            node1:\n        k8s_cluster:\n          children:\n            kube_control_plane:\n            kube_node:\n        calico_rr:\n          hosts: {}\nEOF\n}\n\nchange_control_plane_order\n
                2. \u79fb\u9664\u5f02\u5e38\u72b6\u6001\u7684\u9996\u4e2a master \u8282\u70b9

                  \u8c03\u6574\u4e3b\u673a\u6e05\u5355\u7684\u8282\u70b9\u987a\u5e8f\u540e\uff0c\u79fb\u9664 K8s \u63a7\u5236\u5e73\u9762\u5f02\u5e38\u72b6\u6001\u7684 node1\u3002

                  Note

                  \u5982\u679c node1 \u79bb\u7ebf\u6216\u6545\u969c\uff0c\u5219 extraArgs \u987b\u6dfb\u52a0\u4ee5\u4e0b\u914d\u7f6e\u9879\uff0cnode1 \u5728\u7ebf\u65f6\u4e0d\u9700\u8981\u6dfb\u52a0\u3002

                  reset_nodes=false # \u8df3\u8fc7\u91cd\u7f6e\u8282\u70b9\u64cd\u4f5c\nallow_ungraceful_removal=true # \u5141\u8bb8\u975e\u4f18\u96c5\u7684\u79fb\u9664\u64cd\u4f5c\n
                  # \u955c\u50cf spray-job \u8fd9\u91cc\u53ef\u4ee5\u91c7\u7528\u52a0\u901f\u5668\u5730\u5740\n\nSPRAY_IMG_ADDR=\"ghcr.m.daocloud.io/kubean-io/spray-job\"\nSPRAY_RLS_2_22_TAG=\"2.22-336b323\"\nKUBE_VERSION=\"v1.24.14\"\nCLUSTER_NAME=\"cluster-mini-1\"\nREMOVE_NODE_NAME=\"node1\"\n\ncat << EOF | kubectl apply -f -\n---\napiVersion: kubean.io/v1alpha1\nkind: ClusterOperation\nmetadata:\n  name: cluster-mini-1-remove-node-ops\nspec:\n  cluster: ${CLUSTER_NAME}\n  image: ${SPRAY_IMG_ADDR}:${SPRAY_RLS_2_22_TAG}\n  actionType: playbook\n  action: remove-node.yml\n  extraArgs: -e node=${REMOVE_NODE_NAME} -e reset_nodes=false -e allow_ungraceful_removal=true -e kube_version=${KUBE_VERSION}\n  postHook:\n    - actionType: playbook\n      action: cluster-info.yml\nEOF\n
                3. \u624b\u52a8\u4fee\u6539\u96c6\u7fa4\u914d\u7f6e\uff0c\u7f16\u8f91\u66f4\u65b0 cluster-info

                  # \u7f16\u8f91 cluster-info\nkubectl -n kube-public edit cm cluster-info\n\n# 1. \u82e5 ca.crt \u8bc1\u4e66\u66f4\u65b0\uff0c\u5219\u9700\u8981\u66f4\u65b0 certificate-authority-data \u5b57\u6bb5\u7684\u5185\u5bb9\n# \u67e5\u770b ca \u8bc1\u4e66\u7684 base64 \u7f16\u7801\uff1a\ncat /etc/kubernetes/ssl/ca.crt | base64 | tr -d '\\n'\n\n# 2. \u9700\u6539 server \u5b57\u6bb5\u7684 IP \u5730\u5740\u4e3a\u65b0 first master IP, \u672c\u6587\u6863\u573a\u666f\u5c06\u4f7f\u7528 node2 \u7684 IP \u5730\u5740 172.30.41.162\n
                4. \u624b\u52a8\u4fee\u6539\u96c6\u7fa4\u914d\u7f6e\uff0c\u7f16\u8f91\u66f4\u65b0 kubeadm-config

                  # \u7f16\u8f91 kubeadm-config\nkubectl -n kube-system edit cm kubeadm-config\n\n# \u4fee\u6539 controlPlaneEndpoint \u4e3a\u65b0 first master IP, \u672c\u6587\u6863\u573a\u666f\u5c06\u4f7f\u7528 node2 \u7684 IP \u5730\u5740 172.30.41.162\n
                5. \u91cd\u65b0\u6269\u5bb9 master \u8282\u70b9\u5e76\u66f4\u65b0\u96c6\u7fa4

                  Note

                  • \u4f7f\u7528 --limit \u9650\u5236\u66f4\u65b0\u64cd\u4f5c\u4ec5\u4f5c\u7528\u4e8e etcd \u548c kube_control_plane \u8282\u70b9\u7ec4\u3002
                  • \u5982\u679c\u662f\u79bb\u7ebf\u73af\u5883\uff0cspec.preHook \u9700\u8981\u6dfb\u52a0 enable-repo.yml\uff0c\u5e76\u4e14 extraArgs \u53c2\u6570\u586b\u5199\u76f8\u5173 OS \u7684\u6b63\u786e repo_list\u3002
                  • \u6269\u5bb9\u5b8c\u6210\u540e\uff0cnode2 \u53d8\u66f4\u4e3a\u9996\u4e2a master
                  cat << EOF | kubectl apply -f -\n---\napiVersion: kubean.io/v1alpha1\nkind: ClusterOperation\nmetadata:\n  name: cluster-mini-1-update-cluster-ops\nspec:\n  cluster: ${CLUSTER_NAME}\n  image: ${SPRAY_IMG_ADDR}:${SPRAY_RLS_2_22_TAG}\n  actionType: playbook\n  action: cluster.yml\n  extraArgs: --limit=etcd,kube_control_plane -e kube_version=${KUBE_VERSION}\n  preHook:\n    - actionType: playbook\n      action: enable-repo.yml  # \u79bb\u7ebf\u73af\u5883\u4e0b\u9700\u8981\u6dfb\u52a0\u6b64 yaml\uff0c\u5e76\u4e14\u8bbe\u7f6e\u6b63\u786e\u7684 repo-list(\u5b89\u88c5\u64cd\u4f5c\u7cfb\u7edf\u8f6f\u4ef6\u5305)\uff0c\u4ee5\u4e0b\u53c2\u6570\u503c\u4ec5\u4f9b\u53c2\u8003\n      extraArgs: |\n        -e \"{repo_list: ['http://172.30.41.0:9000/kubean/centos/\\$releasever/os/\\$basearch','http://172.30.41.0:9000/kubean/centos-iso/\\$releasever/os/\\$basearch']}\"\n  postHook:\n    - actionType: playbook\n      action: cluster-info.yml\nEOF\n

                \u81f3\u6b64\uff0c\u5b8c\u6210\u4e86\u9996\u4e2a Master \u8282\u70b9\u7684\u66ff\u6362\u3002

                "},{"location":"admin/kpanda/best-practice/update-offline-cluster.html","title":"\u5de5\u4f5c\u96c6\u7fa4\u79bb\u7ebf\u90e8\u7f72/\u5347\u7ea7\u6307\u5357","text":"

                Note

                \u672c\u6587\u4ec5\u9488\u5bf9\u79bb\u7ebf\u6a21\u5f0f\u4e0b\uff0c\u4f7f\u7528 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u6240\u521b\u5efa\u7684\u5de5\u4f5c\u96c6\u7fa4\u7684 kubernetes \u7684\u7248\u672c\u8fdb\u884c\u90e8\u7f72\u6216\u5347\u7ea7\uff0c \u4e0d\u5305\u62ec\u5176\u5b83 kubeneters \u7ec4\u4ef6\u7684\u90e8\u7f72\u6216\u5347\u7ea7\u3002

                \u672c\u6587\u9002\u7528\u4ee5\u4e0b\u79bb\u7ebf\u573a\u666f\uff1a

                • \u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u64cd\u4f5c\u6307\u5357\uff0c\u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u6240\u521b\u5efa\u7684\u975e\u754c\u9762\u4e2d\u63a8\u8350\u7684 Kubernetes \u7248\u672c\u3002
                • \u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u5236\u4f5c\u589e\u91cf\u79bb\u7ebf\u5305\u7684\u65b9\u5f0f\u5bf9\u4f7f\u7528 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u6240\u521b\u5efa\u7684\u5de5\u4f5c\u96c6\u7fa4\u7684 kubernetes \u7684\u7248\u672c\u8fdb\u884c\u5347\u7ea7\u3002

                \u6574\u4f53\u7684\u601d\u8def\u4e3a\uff1a

                1. \u5728\u8054\u7f51\u8282\u70b9\u6784\u5efa\u79bb\u7ebf\u5305
                2. \u5c06\u79bb\u7ebf\u5305\u5bfc\u5165\u706b\u79cd\u8282\u70b9
                3. \u66f4\u65b0\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684 Kubernetes \u7248\u672c\u6e05\u5355
                4. \u4f7f\u7528\u5e73\u53f0 UI \u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u6216\u5347\u7ea7\u5de5\u4f5c\u96c6\u7fa4\u7684 kubernetes \u7248\u672c

                Note

                \u76ee\u524d\u652f\u6301\u6784\u5efa\u7684\u79bb\u7ebf kubernetes \u7248\u672c\uff0c\u8bf7\u53c2\u8003 kubean \u652f\u6301\u7684 kubernetes \u7248\u672c\u5217\u8868\u3002

                "},{"location":"admin/kpanda/best-practice/update-offline-cluster.html#_2","title":"\u5728\u8054\u7f51\u8282\u70b9\u6784\u5efa\u79bb\u7ebf\u5305","text":"

                \u7531\u4e8e\u79bb\u7ebf\u73af\u5883\u65e0\u6cd5\u8054\u7f51\uff0c\u7528\u6237\u9700\u8981\u4e8b\u5148\u51c6\u5907\u4e00\u53f0\u80fd\u591f \u8054\u7f51\u7684\u8282\u70b9 \u6765\u8fdb\u884c\u589e\u91cf\u79bb\u7ebf\u5305\u7684\u6784\u5efa\uff0c\u5e76\u4e14\u5728\u8fd9\u4e2a\u8282\u70b9\u4e0a\u542f\u52a8 Docker \u6216\u8005 podman \u670d\u52a1\u3002 \u53c2\u9605\u5982\u4f55\u5b89\u88c5 Docker\uff1f

                1. \u68c0\u67e5\u8054\u7f51\u8282\u70b9\u7684 Docker \u670d\u52a1\u8fd0\u884c\u72b6\u6001

                  ps aux|grep docker\n

                  \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                  root     12341  0.5  0.2 654372 26736 ?        Ssl  23:45   0:00 /usr/bin/docked\nroot     12351  0.2  0.1 625080 13740 ?        Ssl  23:45   0:00 docker-containerd --config /var/run/docker/containerd/containerd.toml\nroot     13024  0.0  0.0 112824   980 pts/0    S+   23:45   0:00 grep --color=auto docker\n
                2. \u5728\u8054\u7f51\u8282\u70b9\u7684 /root \u76ee\u5f55\u4e0b\u521b\u5efa\u4e00\u4e2a\u540d\u4e3a manifest.yaml \u7684\u6587\u4ef6\uff0c\u547d\u4ee4\u5982\u4e0b\uff1a

                  vi manifest.yaml\n

                  manifest.yaml \u5185\u5bb9\u5982\u4e0b\uff1a

                  manifest.yaml
                  image_arch:\n- \"amd64\"\nkube_version: # \u586b\u5199\u5f85\u5347\u7ea7\u7684\u96c6\u7fa4\u7248\u672c\n- \"v1.28.0\"\n
                  • image_arch \u7528\u4e8e\u6307\u5b9a CPU \u7684\u67b6\u6784\u7c7b\u578b\uff0c\u53ef\u586b\u5165\u7684\u53c2\u6570\u4e3a amd64 \u548c arm64 \u3002
                  • kube_version \u7528\u4e8e\u6307\u5b9a\u9700\u8981\u6784\u5efa\u7684 kubernetes \u79bb\u7ebf\u5305\u7248\u672c\uff0c\u53ef\u53c2\u8003\u4e0a\u6587\u7684\u652f\u6301\u6784\u5efa\u7684\u79bb\u7ebf kubernetes \u7248\u672c\u3002
                3. \u5728 /root \u76ee\u5f55\u4e0b\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a /data \u7684\u6587\u4ef6\u5939\u6765\u5b58\u50a8\u589e\u91cf\u79bb\u7ebf\u5305\u3002

                  mkdir data\n

                  \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u4f7f\u7528 kubean airgap-patch \u955c\u50cf\u751f\u6210\u79bb\u7ebf\u5305\u3002 airgap-patch \u955c\u50cf tag \u4e0e Kubean \u7248\u672c\u4e00\u81f4\uff0c\u9700\u786e\u4fdd Kubean \u7248\u672c\u8986\u76d6\u9700\u8981\u5347\u7ea7\u7684 Kubernetes \u7248\u672c\u3002

                  # \u5047\u8bbe kubean \u7248\u672c\u4e3a v0.13.9\ndocker run --rm -v $(pwd)/manifest.yml:/manifest.yml -v $(pwd)/data:/data ghcr.m.daocloud.io/kubean-io/airgap-patch:v0.13.9\n

                  \u7b49\u5f85 Docker \u670d\u52a1\u8fd0\u884c\u5b8c\u6210\u540e\uff0c\u68c0\u67e5 /data \u6587\u4ef6\u5939\u4e0b\u7684\u6587\u4ef6\uff0c\u6587\u4ef6\u76ee\u5f55\u5982\u4e0b\uff1a

                  data\n\u251c\u2500\u2500 amd64\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 files\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 import_files.sh\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 offline-files.tar.gz\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 images\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 import_images.sh\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 offline-images.tar.gz\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 os-pkgs\n\u2502\u00a0\u00a0     \u2514\u2500\u2500 import_ospkgs.sh\n\u2514\u2500\u2500 localartifactset.cr.yaml\n
                "},{"location":"admin/kpanda/best-practice/update-offline-cluster.html#_3","title":"\u5c06\u79bb\u7ebf\u5305\u5bfc\u5165\u706b\u79cd\u8282\u70b9","text":"
                1. \u5c06\u8054\u7f51\u8282\u70b9\u7684 /data \u6587\u4ef6\u62f7\u8d1d\u81f3\u706b\u79cd\u8282\u70b9\u7684 /root \u76ee\u5f55\u4e0b\uff0c\u5728 \u8054\u7f51\u8282\u70b9 \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

                  scp -r data root@x.x.x.x:/root\n

                  x.x.x.x \u4e3a\u706b\u79cd\u8282\u70b9 IP \u5730\u5740

                2. \u5728\u706b\u79cd\u8282\u70b9\u4e0a\u5c06 /data \u6587\u4ef6\u5185\u7684\u955c\u50cf\u6587\u4ef6\u62f7\u8d1d\u81f3\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 docker resgitry \u4ed3\u5e93\u3002\u767b\u5f55\u706b\u79cd\u8282\u70b9\u540e\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

                  1. \u8fdb\u5165\u955c\u50cf\u6587\u4ef6\u6240\u5728\u7684\u76ee\u5f55

                    cd data/amd64/images\n
                  2. \u6267\u884c import_images.sh \u811a\u672c\u5c06\u955c\u50cf\u5bfc\u5165\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Docker Resgitry \u4ed3\u5e93\u3002

                    REGISTRY_ADDR=\"127.0.0.1\"  ./import_images.sh\n

                  Note

                  \u4e0a\u8ff0\u547d\u4ee4\u4ec5\u4ec5\u9002\u7528\u4e8e\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Docker Resgitry \u4ed3\u5e93\uff0c\u5982\u679c\u4f7f\u7528\u5916\u90e8\u4ed3\u5e93\u8bf7\u4f7f\u7528\u5982\u4e0b\u547d\u4ee4\uff1a

                  REGISTRY_SCHEME=https REGISTRY_ADDR=${registry_address} REGISTRY_USER=${username} REGISTRY_PASS=${password} ./import_images.sh\n
                  • REGISTRY_ADDR \u662f\u955c\u50cf\u4ed3\u5e93\u7684\u5730\u5740\uff0c\u6bd4\u59821.2.3.4:5000
                  • \u5f53\u955c\u50cf\u4ed3\u5e93\u5b58\u5728\u7528\u6237\u540d\u5bc6\u7801\u9a8c\u8bc1\u65f6\uff0c\u9700\u8981\u8bbe\u7f6e REGISTRY_USER \u548c REGISTRY_PASS
                3. \u5728\u706b\u79cd\u8282\u70b9\u4e0a\u5c06 /data \u6587\u4ef6\u5185\u7684\u4e8c\u8fdb\u5236\u6587\u4ef6\u62f7\u8d1d\u81f3\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Minio \u670d\u52a1\u4e0a\u3002

                  1. \u8fdb\u5165\u4e8c\u8fdb\u5236\u6587\u4ef6\u6240\u5728\u7684\u76ee\u5f55

                    cd data/amd64/files/\n
                  2. \u6267\u884c import_files.sh \u811a\u672c\u5c06\u4e8c\u8fdb\u5236\u6587\u4ef6\u5bfc\u5165\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Minio \u670d\u52a1\u4e0a\u3002

                    MINIO_USER=rootuser MINIO_PASS=rootpass123 ./import_files.sh http://127.0.0.1:9000\n

                Note

                \u4e0a\u8ff0\u547d\u4ee4\u4ec5\u4ec5\u9002\u7528\u4e8e\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Minio \u670d\u52a1\uff0c\u5982\u679c\u4f7f\u7528\u5916\u90e8 Minio \u8bf7\u5c06 http://127.0.0.1:9000 \u66ff\u6362\u4e3a\u5916\u90e8 Minio \u7684\u8bbf\u95ee\u5730\u5740\u3002 \u201crootuser\u201d \u548c \u201crootpass123\u201d\u662f\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Minio \u670d\u52a1\u7684\u9ed8\u8ba4\u8d26\u6237\u548c\u5bc6\u7801\u3002

                "},{"location":"admin/kpanda/best-practice/update-offline-cluster.html#kubernetes","title":"\u66f4\u65b0\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684 kubernetes \u7248\u672c\u6e05\u5355","text":"

                \u706b\u79cd\u8282\u70b9\u4e0a\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5c06 localartifactset \u8d44\u6e90\u90e8\u7f72\u5230\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\uff1a

                kubectl apply -f data/kubeanofflineversion.cr.patch.yaml\n
                "},{"location":"admin/kpanda/best-practice/update-offline-cluster.html#_4","title":"\u4e0b\u4e00\u6b65","text":"

                \u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3\u7684 UI \u7ba1\u7406\u754c\u9762\uff0c\u60a8\u53ef\u4ee5\u7ee7\u7eed\u6267\u884c\u4ee5\u4e0b\u64cd\u4f5c\uff1a

                1. \u53c2\u7167\u521b\u5efa\u96c6\u7fa4\u7684\u6587\u6863\u8fdb\u884c\u5de5\u4f5c\u96c6\u7fa4\u521b\u5efa\uff0c\u6b64\u65f6\u53ef\u4ee5\u9009\u62e9 Kubernetes \u589e\u91cf\u7248\u672c\u3002

                2. \u53c2\u7167\u5347\u7ea7\u96c6\u7fa4\u7684\u6587\u6863\u5bf9\u81ea\u5efa\u7684\u5de5\u4f5c\u96c6\u7fa4\u8fdb\u884c\u5347\u7ea7\u3002

                "},{"location":"admin/kpanda/best-practice/use-otherlinux-create-custer.html","title":"\u5728\u975e\u4e3b\u6d41\u64cd\u4f5c\u7cfb\u7edf\u4e0a\u521b\u5efa\u96c6\u7fa4","text":"

                \u672c\u6587\u4ecb\u7ecd\u79bb\u7ebf\u6a21\u5f0f\u4e0b\u5982\u4f55\u5728 \u672a\u58f0\u660e\u652f\u6301\u7684 OS \u4e0a\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u3002AI \u7b97\u529b\u4e2d\u5fc3\u58f0\u660e\u652f\u6301\u7684 OS \u8303\u56f4\u8bf7\u53c2\u8003 AI \u7b97\u529b\u4e2d\u5fc3\u652f\u6301\u7684\u64cd\u4f5c\u7cfb\u7edf

                \u79bb\u7ebf\u6a21\u5f0f\u4e0b\u5728\u672a\u58f0\u660e\u652f\u6301\u7684 OS \u4e0a\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\uff0c\u4e3b\u8981\u7684\u6d41\u7a0b\u5982\u4e0b\u56fe\uff1a

                \u63a5\u4e0b\u6765\uff0c\u672c\u6587\u5c06\u4ee5 openAnolis \u64cd\u4f5c\u7cfb\u7edf\u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u5728\u975e\u4e3b\u6d41\u64cd\u4f5c\u7cfb\u7edf\u4e0a\u521b\u5efa\u96c6\u7fa4\u3002

                "},{"location":"admin/kpanda/best-practice/use-otherlinux-create-custer.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                • \u5df2\u7ecf\u90e8\u7f72\u597d\u4e00\u4e2a AI \u7b97\u529b\u4e2d\u5fc3\u5168\u6a21\u5f0f\uff0c\u90e8\u7f72\u53c2\u8003\u6587\u6863\u79bb\u7ebf\u5b89\u88c5 AI \u7b97\u529b\u4e2d\u5fc3\u5546\u4e1a\u7248
                • \u81f3\u5c11\u62e5\u6709\u4e00\u53f0\u53ef\u4ee5\u8054\u7f51\u7684\u540c\u67b6\u6784\u540c\u7248\u672c\u7684\u8282\u70b9\u3002
                "},{"location":"admin/kpanda/best-practice/use-otherlinux-create-custer.html#_3","title":"\u5728\u7ebf\u8282\u70b9\u6784\u5efa\u79bb\u7ebf\u5305","text":"

                \u627e\u5230\u4e00\u4e2a\u548c\u5f85\u5efa\u96c6\u7fa4\u8282\u70b9\u67b6\u6784\u548c OS \u5747\u4e00\u81f4\u7684\u5728\u7ebf\u73af\u5883\uff0c\u672c\u6587\u4ee5 AnolisOS 8.8 GA \u4e3a\u4f8b\u3002\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u751f\u6210\u79bb\u7ebf os-pkgs \u5305\u3002

                # \u4e0b\u8f7d\u76f8\u5173\u811a\u672c\u5e76\u6784\u5efa os packages \u5305\ncurl -Lo ./pkgs.yml https://raw.githubusercontent.com/kubean-io/kubean/main/build/os-packages/others/pkgs.yml\ncurl -Lo ./other_os_pkgs.sh https://raw.githubusercontent.com/kubean-io/kubean/main/build/os-packages/others/other_os_pkgs.sh && chmod +x  other_os_pkgs.sh\n./other_os_pkgs.sh build # \u6784\u5efa\u79bb\u7ebf\u5305\n

                \u6267\u884c\u5b8c\u4e0a\u8ff0\u547d\u4ee4\u540e\uff0c\u9884\u671f\u5c06\u5728\u5f53\u524d\u8def\u5f84\u4e0b\u751f\u6210\u4e00\u4e2a\u540d\u4e3a os-pkgs-anolis-8.8.tar.gz \u7684\u538b\u7f29\u5305\u3002\u5f53\u524d\u8def\u5f84\u4e0b\u6587\u4ef6\u76ee\u5f55\u5927\u6982\u5982\u4e0b\uff1a

                    .\n    \u251c\u2500\u2500 other_os_pkgs.sh\n    \u251c\u2500\u2500 pkgs.yml\n    \u2514\u2500\u2500 os-pkgs-anolis-8.8.tar.gz\n
                "},{"location":"admin/kpanda/best-practice/use-otherlinux-create-custer.html#_4","title":"\u79bb\u7ebf\u8282\u70b9\u5b89\u88c5\u79bb\u7ebf\u5305","text":"

                \u5c06\u5728\u7ebf\u8282\u70b9\u4e2d\u751f\u6210\u7684 other_os_pkgs.sh \u3001 pkgs.yml \u3001 os-pkgs-anolis-8.8.tar.gz \u4e09\u4e2a\u6587\u4ef6\u62f7\u8d1d\u81f3\u79bb\u7ebf\u73af\u5883\u4e2d\u7684\u5f85\u5efa\u96c6\u7fa4\u7684**\u6240\u6709**\u8282\u70b9\u4e0a\u3002

                \u767b\u5f55\u79bb\u7ebf\u73af\u5883\u4e2d\uff0c\u4efb\u4e00\u5f85\u5efa\u96c6\u7fa4\u7684\u5176\u4e2d\u4e00\u4e2a\u8282\u70b9\u4e0a\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u4e3a\u8282\u70b9\u5b89\u88c5 os-pkg \u5305\u3002

                # \u914d\u7f6e\u73af\u5883\u53d8\u91cf\nexport PKGS_YML_PATH=/root/workspace/os-pkgs/pkgs.yml # \u5f53\u524d\u79bb\u7ebf\u8282\u70b9 pkgs.yml \u6587\u4ef6\u7684\u8def\u5f84\nexport PKGS_TAR_PATH=/root/workspace/os-pkgs/os-pkgs-anolis-8.8.tar.gz # \u5f53\u524d\u79bb\u7ebf\u8282\u70b9 os-pkgs-anolis-8.8.tar.gz \u7684\u8def\u5f84\nexport SSH_USER=root # \u5f53\u524d\u79bb\u7ebf\u8282\u70b9\u7684\u7528\u6237\u540d\nexport SSH_PASS=dangerous # \u5f53\u524d\u79bb\u7ebf\u8282\u70b9\u7684\u5bc6\u7801\nexport HOST_IPS='172.30.41.168' # \u5f53\u524d\u79bb\u7ebf\u8282\u70b9\u7684 IP\n./other_os_pkgs.sh install #\u5b89\u88c5\u79bb\u7ebf\u5305\n

                \u6267\u884c\u5b8c\u6210\u4e0a\u8ff0\u547d\u4ee4\u540e\uff0c\u7b49\u5f85\u754c\u9762\u63d0\u793a\uff1a All packages for node (X.X.X.X) have been installed \u5373\u8868\u793a\u5b89\u88c5\u5b8c\u6210\u3002

                "},{"location":"admin/kpanda/best-practice/use-otherlinux-create-custer.html#_5","title":"\u4e0b\u4e00\u6b65","text":"

                \u53c2\u8003\u6587\u6863\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\uff0c\u5728 UI \u754c\u9762\u4e0a\u521b\u5efa openAnolis \u96c6\u7fa4\u3002

                "},{"location":"admin/kpanda/best-practice/co-located/index.html","title":"\u5728\u79bb\u7ebf\u6df7\u90e8","text":"

                \u4f01\u4e1a\u4e2d\u4e00\u822c\u5b58\u5728\u4e24\u79cd\u7c7b\u578b\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff1a\u5728\u7ebf\u670d\u52a1\uff08latency-sensitive service\uff09\u548c\u79bb\u7ebf\u4efb\u52a1\uff08batch job\uff09\u3002 \u5728\u7ebf\u670d\u52a1\u5982\u641c\u7d22/\u652f\u4ed8/\u63a8\u8350\u7b49\uff0c\u5177\u6709\u5904\u7406\u4f18\u5148\u7ea7\u9ad8\u3001\u65f6\u5ef6\u654f\u611f\u6027\u9ad8\u3001\u9519\u8bef\u5bb9\u5fcd\u5ea6\u4f4e\u4ee5\u53ca\u767d\u5929\u8d1f\u8f7d\u9ad8\u665a\u4e0a\u8d1f\u8f7d\u4f4e\u7b49\u7279\u70b9\u3002 \u800c\u79bb\u7ebf\u4efb\u52a1\u5982 AI \u8bad\u7ec3/\u5927\u6570\u636e\u5904\u7406\u7b49\uff0c\u5177\u6709\u5904\u7406\u4f18\u5148\u7ea7\u4f4e\u3001\u65f6\u5ef6\u654f\u611f\u6027\u4f4e\u3001\u9519\u8bef\u5bb9\u5fcd\u5ea6\u9ad8\u4ee5\u53ca\u8fd0\u884c\u65f6\u8d1f\u8f7d\u4e00\u76f4\u8f83\u9ad8\u7b49\u7279\u70b9\u3002 \u7531\u4e8e\u5728\u7ebf\u670d\u52a1\u4e0e\u79bb\u7ebf\u4efb\u52a1\u8fd9\u4e24\u7c7b\u5de5\u4f5c\u8d1f\u8f7d\u5929\u7136\u5b58\u5728\u4e92\u8865\u6027\uff0c\u5c06\u5728/\u79bb\u7ebf\u4e1a\u52a1\u6df7\u5408\u90e8\u7f72\u662f\u63d0\u9ad8\u670d\u52a1\u5668\u8d44\u6e90\u5229\u7528\u7387\u7684\u6709\u6548\u9014\u5f84\u3002

                • \u53ef\u4ee5\u5c06\u79bb\u7ebf\u4e1a\u52a1\u6df7\u90e8\u5230\u5728\u7ebf\u4e1a\u52a1\u7684\u670d\u52a1\u5668\u4e0a\uff0c\u8ba9\u79bb\u7ebf\u4e1a\u52a1\u80fd\u591f\u5145\u5206\u5229\u7528\u5728\u7ebf\u4e1a\u52a1\u670d\u52a1\u5668\u7684\u7a7a\u95f2\u8d44\u6e90\uff0c\u63d0\u9ad8\u5728\u7ebf\u4e1a\u52a1\u670d\u52a1\u5668\u8d44\u6e90\u5229\u7528\u7387\uff0c\u5b9e\u73b0\u964d\u672c\u589e\u6548\u3002

                • \u5f53\u4e1a\u52a1\u4e2d\u4e34\u65f6\u9700\u8981\u5927\u91cf\u7684\u8d44\u6e90\uff0c\u8fd9\u4e2a\u65f6\u5019\u53ef\u4ee5\u5c06\u5728\u7ebf\u4e1a\u52a1\u5f39\u6027\u6df7\u90e8\u5230\u79bb\u7ebf\u4e1a\u52a1\u7684\u670d\u52a1\u5668\u4e0a\uff0c\u4f18\u5148\u4fdd\u8bc1\u5728\u7ebf\u4e1a\u52a1\u7684\u8d44\u6e90\u9700\u6c42\uff0c\u4e34\u65f6\u9700\u6c42\u7ed3\u675f\u540e\u518d\u628a\u8d44\u6e90\u5f52\u8fd8\u7ed9\u79bb\u7ebf\u4e1a\u52a1\u3002

                \u5f53\u524d\u4f7f\u7528\u5f00\u6e90\u9879\u76ee Koordinator \u4f5c\u4e3a\u5728\u79bb\u7ebf\u6df7\u90e8\u7684\u89e3\u51b3\u65b9\u6848\u3002

                Koordinator \u662f\u4e00\u4e2a\u57fa\u4e8e QoS \u7684 Kubernetes \u6df7\u5408\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u7cfb\u7edf\u3002 \u5b83\u65e8\u5728\u63d0\u9ad8\u5bf9\u5ef6\u8fdf\u654f\u611f\u7684\u5de5\u4f5c\u8d1f\u8f7d\u548c\u6279\u5904\u7406\u4f5c\u4e1a\u7684\u8fd0\u884c\u65f6\u6548\u7387\u548c\u53ef\u9760\u6027\uff0c\u7b80\u5316\u4e0e\u8d44\u6e90\u76f8\u5173\u7684\u914d\u7f6e\u8c03\u6574\u7684\u590d\u6742\u6027\uff0c\u5e76\u589e\u52a0 Pod \u90e8\u7f72\u5bc6\u5ea6\u4ee5\u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u7387\u3002

                "},{"location":"admin/kpanda/best-practice/co-located/index.html#koordinator-qos","title":"Koordinator QoS","text":"

                Koordinator \u8c03\u5ea6\u7cfb\u7edf\u652f\u6301\u7684 QoS \u6709\u4e94\u79cd\u7c7b\u578b:

                QoS \u7279\u70b9 \u8bf4\u660e SYSTEM \u7cfb\u7edf\u8fdb\u7a0b\uff0c\u8d44\u6e90\u53d7\u9650 \u5bf9\u4e8e DaemonSets \u7b49\u7cfb\u7edf\u670d\u52a1\uff0c\u867d\u7136\u9700\u8981\u4fdd\u8bc1\u7cfb\u7edf\u670d\u52a1\u7684\u5ef6\u8fdf\uff0c\u4f46\u4e5f\u9700\u8981\u9650\u5236\u8282\u70b9\u4e0a\u8fd9\u4e9b\u7cfb\u7edf\u670d\u52a1\u5bb9\u5668\u7684\u8d44\u6e90\u4f7f\u7528\uff0c\u4ee5\u786e\u4fdd\u5176\u4e0d\u5360\u7528\u8fc7\u591a\u7684\u8d44\u6e90 LSE(Latency Sensitive Exclusive) \u4fdd\u7559\u8d44\u6e90\u5e76\u7ec4\u7ec7\u540c QoS \u7684 Pod \u5171\u4eab\u8d44\u6e90 \u5f88\u5c11\u4f7f\u7528\uff0c\u5e38\u89c1\u4e8e\u4e2d\u95f4\u4ef6\u7c7b\u5e94\u7528\uff0c\u4e00\u822c\u5728\u72ec\u7acb\u7684\u8d44\u6e90\u6c60\u4e2d\u4f7f\u7528 LSR(Latency Sensitive Reserved) \u9884\u7559\u8d44\u6e90\u4ee5\u83b7\u5f97\u66f4\u597d\u7684\u786e\u5b9a\u6027 \u7c7b\u4f3c\u4e8e\u793e\u533a\u7684 Guaranteed\uff0cCPU \u6838\u88ab\u7ed1\u5b9a LS(Latency Sensitive) \u5171\u4eab\u8d44\u6e90\uff0c\u5bf9\u7a81\u53d1\u6d41\u91cf\u6709\u66f4\u597d\u7684\u5f39\u6027 \u5fae\u670d\u52a1\u5de5\u4f5c\u8d1f\u8f7d\u7684\u5178\u578bQoS\u7ea7\u522b\uff0c\u5b9e\u73b0\u66f4\u597d\u7684\u8d44\u6e90\u5f39\u6027\u548c\u66f4\u7075\u6d3b\u7684\u8d44\u6e90\u8c03\u6574\u80fd\u529b BE(Best Effort) \u5171\u4eab\u4e0d\u5305\u62ec LSE \u7684\u8d44\u6e90\uff0c\u8d44\u6e90\u8fd0\u884c\u8d28\u91cf\u6709\u9650\uff0c\u751a\u81f3\u5728\u6781\u7aef\u60c5\u51b5\u4e0b\u88ab\u6740\u6b7b \u6279\u91cf\u4f5c\u4e1a\u7684\u5178\u578b QoS \u6c34\u5e73\uff0c\u5728\u4e00\u5b9a\u65f6\u671f\u5185\u7a33\u5b9a\u7684\u8ba1\u7b97\u541e\u5410\u91cf\uff0c\u4f4e\u6210\u672c\u8d44\u6e90"},{"location":"admin/kpanda/best-practice/co-located/index.html#koordinator-qos-cpu","title":"Koordinator QoS CPU \u7f16\u6392\u539f\u5219","text":"
                • LSE/LSR Pod \u7684 Request \u548c Limit \u5fc5\u987b\u76f8\u7b49\uff0cCPU \u503c\u5fc5\u987b\u662f 1000 \u7684\u6574\u6570\u500d\u3002
                • LSE Pod \u5206\u914d\u7684 CPU \u662f\u5b8c\u5168\u72ec\u5360\u7684\uff0c\u4e0d\u5f97\u5171\u4eab\u3002\u5982\u679c\u8282\u70b9\u662f\u8d85\u7ebf\u7a0b\u67b6\u6784\uff0c\u53ea\u4fdd\u8bc1\u903b\u8f91\u6838\u5fc3\u7ef4\u5ea6\u662f\u9694\u79bb\u7684\uff0c\u4f46\u662f\u53ef\u4ee5\u901a\u8fc7 CPUBindPolicyFullPCPUs \u7b56\u7565\u83b7\u5f97\u66f4\u597d\u7684\u9694\u79bb\u3002
                • LSR Pod \u5206\u914d\u7684 CPU \u53ea\u80fd\u4e0e BE Pod \u5171\u4eab\u3002
                • LS Pod \u7ed1\u5b9a\u4e86\u4e0e LSE/LSR Pod \u72ec\u5360\u4e4b\u5916\u7684\u5171\u4eab CPU \u6c60\u3002
                • BE Pod \u7ed1\u5b9a\u4f7f\u7528\u8282\u70b9\u4e2d\u9664 LSE Pod \u72ec\u5360\u4e4b\u5916\u7684\u6240\u6709 CPU \u3002
                • \u5982\u679c kubelet \u7684 CPU \u7ba1\u7406\u5668\u7b56\u7565\u4e3a static \u7b56\u7565\uff0c\u5219\u5df2\u7ecf\u8fd0\u884c\u7684 K8s Guaranteed Pods \u7b49\u4ef7\u4e8e Koordinator LSR\u3002
                • \u5982\u679c kubelet \u7684 CPU \u7ba1\u7406\u5668\u7b56\u7565\u4e3a none \u7b56\u7565\uff0c\u5219\u5df2\u7ecf\u8fd0\u884c\u7684 K8s Guaranteed Pods \u7b49\u4ef7\u4e8e Koordinator LS\u3002
                • \u65b0\u521b\u5efa\u4f46\u672a\u6307\u5b9a Koordinator QoS \u7684 K8s Guaranteed Pod \u7b49\u4ef7\u4e8e Koordinator LS\u3002
                "},{"location":"admin/kpanda/best-practice/co-located/index.html#_2","title":"\u5feb\u901f\u4e0a\u624b","text":""},{"location":"admin/kpanda/best-practice/co-located/index.html#_3","title":"\u524d\u63d0\u6761\u4ef6","text":"
                • \u5df2\u7ecf\u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u4e14\u5e73\u53f0\u8fd0\u884c\u6b63\u5e38\u3002
                • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                • \u5f53\u524d\u96c6\u7fa4\u5df2\u5b89\u88c5 koordinator \u5e76\u6b63\u5e38\u8fd0\u884c\uff0c\u5b89\u88c5\u6b65\u9aa4\u53ef\u53c2\u8003 koordinator \u79bb\u7ebf\u5b89\u88c5\u3002
                "},{"location":"admin/kpanda/best-practice/co-located/index.html#_4","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                \u4ee5\u4e0b\u793a\u4f8b\u4e2d\u521b\u5efa4\u4e2a\u526f\u672c\u6570\u4e3a1\u7684 deployment, \u8bbe\u7f6e QoS \u7c7b\u522b\u4e3a LSE, LSR, LS, BE, \u5f85 pod \u521b\u5efa\u5b8c\u6210\u540e\uff0c\u89c2\u5bdf\u5404 pod \u7684 CPU \u5206\u914d\u60c5\u51b5\u3002

                1. \u521b\u5efa\u540d\u79f0\u4e3a nginx-lse \u7684 deployment\uff0cQoS \u7c7b\u522b\u4e3a LSE, yaml \u6587\u4ef6\u5982\u4e0b\u3002

                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nginx-lse\n  labels:\n    app: nginx-lse\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: nginx-lse\n  template:\n    metadata:\n      name: nginx-lse\n      labels:\n        app: nginx-lse\n        koordinator.sh/qosClass: LSE # \u8bbe\u7f6e QoS \u7c7b\u522b\u4e3a LSE\n        # \u8c03\u5ea6\u5668\u5c06\u5728\u7269\u7406\u5185\u6838\u4e4b\u95f4\u5747\u5300\u7684\u5206\u914d\u903b\u8f91 CPU\n      annotations:\n          scheduling.koordinator.sh/resource-spec: '{\"preferredCPUBindPolicy\": \"SpreadByPCPUs\"}'\n    spec:\n      schedulerName: koord-scheduler # \u4f7f\u7528 koord-scheduler \u8c03\u5ea6\u5668\n      containers:\n      - name: nginx\n        image: release.daocloud.io/kpanda/nginx:1.25.3-alpine\n        resources:\n          limits:\n            cpu: '2'\n          requests:\n            cpu: '2'\n      priorityClassName: koord-prod\n
                2. \u521b\u5efa\u540d\u79f0\u4e3a nginx-lsr \u7684 deployment\uff0cQoS \u7c7b\u522b\u4e3a LSR, yaml \u6587\u4ef6\u5982\u4e0b\u3002

                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nginx-lsr\n  labels:\n    app: nginx-lsr\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: nginx-lsr\n  template:\n    metadata:\n      name: nginx-lsr\n      labels:\n        app: nginx-lsr\n        koordinator.sh/qosClass: LSR # \u8bbe\u7f6e QoS \u7c7b\u522b\u4e3a LSR\n        # \u8c03\u5ea6\u5668\u5c06\u5728\u7269\u7406\u5185\u6838\u4e4b\u95f4\u5747\u5300\u7684\u5206\u914d\u903b\u8f91 CPU\n      annotations:\n          scheduling.koordinator.sh/resource-spec: '{\"preferredCPUBindPolicy\": \"SpreadByPCPUs\"}'\n    spec:\n      schedulerName: koord-scheduler # \u4f7f\u7528 koord-scheduler \u8c03\u5ea6\u5668\n      containers:\n      - name: nginx\n        image: release.daocloud.io/kpanda/nginx:1.25.3-alpine\n        resources:\n          limits:\n            cpu: '2'\n          requests:\n            cpu: '2'\n      priorityClassName: koord-prod\n
                3. \u521b\u5efa\u540d\u79f0\u4e3a nginx-ls \u7684 deployment\uff0cQoS \u7c7b\u522b\u4e3a LS, yaml \u6587\u4ef6\u5982\u4e0b\u3002

                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nginx-ls\n  labels:\n    app: nginx-ls\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: nginx-ls\n  template:\n    metadata:\n      name: nginx-ls\n      labels:\n        app: nginx-ls\n        koordinator.sh/qosClass: LS # \u8bbe\u7f6e QoS \u7c7b\u522b\u4e3a LS\n        # \u8c03\u5ea6\u5668\u5c06\u5728\u7269\u7406\u5185\u6838\u4e4b\u95f4\u5747\u5300\u7684\u5206\u914d\u903b\u8f91 CPU\n      annotations:\n          scheduling.koordinator.sh/resource-spec: '{\"preferredCPUBindPolicy\": \"SpreadByPCPUs\"}'\n    spec:\n      schedulerName: koord-scheduler \n      containers:\n      - name: nginx\n        image: release.daocloud.io/kpanda/nginx:1.25.3-alpine\n        resources:\n          limits:\n            cpu: '2'\n          requests:\n            cpu: '2'\n      priorityClassName: koord-prod\n
                4. \u521b\u5efa\u540d\u79f0\u4e3a nginx-be \u7684 deployment\uff0cQoS \u7c7b\u522b\u4e3a BE, yaml \u6587\u4ef6\u5982\u4e0b\u3002

                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nginx-be\n  labels:\n    app: nginx-be\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: nginx-be\n  template:\n    metadata:\n      name: nginx-be\n      labels:\n        app: nginx-be\n        koordinator.sh/qosClass: BE # \u8bbe\u7f6e QoS \u7c7b\u522b\u4e3a BE\n        # \u8c03\u5ea6\u5668\u5c06\u5728\u7269\u7406\u5185\u6838\u4e4b\u95f4\u5747\u5300\u7684\u5206\u914d\u903b\u8f91 CPU\n      annotations:\n          scheduling.koordinator.sh/resource-spec: '{\"preferredCPUBindPolicy\": \"SpreadByPCPUs\"}'\n    spec:\n      schedulerName: koord-scheduler # \u4f7f\u7528 koord-scheduler \u8c03\u5ea6\u5668\n      containers:\n      - name: nginx\n        image: release.daocloud.io/kpanda/nginx:1.25.3-alpine\n        resources:\n          limits:\n            kubernetes.io/batch-cpu: 2k\n          requests:\n            kubernetes.io/batch-cpu: 2k\n      priorityClassName: koord-batch\n

                  \u67e5\u770b pod \u72b6\u6001\uff0c\u5f53 pod \u5904\u4e8e running \u540e\uff0c\u67e5\u770b\u5404 pod \u7684 CPU \u5206\u914d\u60c5\u51b5\u3002

                  [root@controller-node-1 ~]# kubectl get pod\nNAME                         READY   STATUS    RESTARTS   AGE\nnginx-be-577c946b89-js2qn    1/1     Running   0          4h41m\nnginx-ls-54746c8cf8-rh4b7    1/1     Running   0          4h51m\nnginx-lse-56c9cd77f5-cdqbd   1/1     Running   0          4h41m\nnginx-lsr-c7fdb97d8-b58h8    1/1     Running   0          4h51m\n

                  \u672c\u793a\u4f8b\u4e2d\u4f7f\u7528 get_cpuset.sh \u811a\u672c\u67e5\u770b Pod \u7684 cpuset \u4fe1\u606f\uff0c\u811a\u672c\u5185\u5bb9\u5982\u4e0b\u3002

                  #!/bin/bash\n\n# \u83b7\u53d6Pod\u7684\u540d\u79f0\u548c\u547d\u540d\u7a7a\u95f4\u4f5c\u4e3a\u8f93\u5165\u53c2\u6570\nPOD_NAME=$1\nNAMESPACE=${2-default}\n\n# \u786e\u4fdd\u63d0\u4f9b\u4e86Pod\u540d\u79f0\u548c\u547d\u540d\u7a7a\u95f4\nif [ -z \"$POD_NAME\" ] || [ -z \"$NAMESPACE\" ]; then\n    echo \"Usage: $0 <pod_name> <namespace>\"\n    exit 1\nfi\n\n# \u4f7f\u7528kubectl\u83b7\u53d6Pod\u7684UID\u548cQoS\u7c7b\u522b\nPOD_INFO=$(kubectl get pod \"$POD_NAME\" -n \"$NAMESPACE\" -o jsonpath=\"{.metadata.uid} {.status.qosClass} {.status.containerStatuses[0].containerID}\")\nread -r POD_UID POD_QOS CONTAINER_ID <<< \"$POD_INFO\"\n\n# \u68c0\u67e5UID\u548cQoS\u7c7b\u522b\u662f\u5426\u6210\u529f\u83b7\u53d6\nif [ -z \"$POD_UID\" ] || [ -z \"$POD_QOS\" ]; then\n    echo \"Failed to get UID or QoS Class for Pod $POD_NAME in namespace $NAMESPACE.\"\n    exit 1\nfi\n\nPOD_UID=\"${POD_UID//-/_}\"\nCONTAINER_ID=\"${CONTAINER_ID//containerd:\\/\\//cri-containerd-}\".scope\n\n# \u6839\u636eQoS\u7c7b\u522b\u6784\u5efacgroup\u8def\u5f84\ncase \"$POD_QOS\" in\n    Guaranteed)\n        QOS_PATH=\"kubepods-pod.slice/$POD_UID.slice\"\n        ;;\n    Burstable)\n        QOS_PATH=\"kubepods-burstable.slice/kubepods-burstable-pod$POD_UID.slice\"\n        ;;\n    BestEffort)\n        QOS_PATH=\"kubepods-besteffort.slice/kubepods-besteffort-pod$POD_UID.slice\"\n        ;;\n    *)\n        echo \"Unknown QoS Class: $POD_QOS\"\n        exit 1\n        ;;\nesac\n\nCPUGROUP_PATH=\"/sys/fs/cgroup/kubepods.slice/$QOS_PATH\"\n\n# \u68c0\u67e5\u8def\u5f84\u662f\u5426\u5b58\u5728\nif [ ! -d \"$CPUGROUP_PATH\" ]; then\n    echo \"CPUs cgroup path for Pod $POD_NAME does not exist: $CPUGROUP_PATH\"\n    exit 1\nfi\n\n# \u8bfb\u53d6\u5e76\u6253\u5370cpuset\u503c\nCPUSET=$(cat \"$CPUGROUP_PATH/$CONTAINER_ID/cpuset.cpus\")\necho \"CPU set for Pod $POD_NAME ($POD_QOS QoS): $CPUSET\"\n

                \u67e5\u770b\u5404 Pod \u7684 cpuset \u5206\u914d\u60c5\u51b5\u3002

                1. QoS \u7c7b\u578b\u4e3a LSE \u7684 Pod, \u72ec\u5360 0-1 \u6838\uff0c\u4e0d\u4e0e\u5176\u4ed6\u7c7b\u578b\u7684 Pod \u5171\u4eab CPU\u3002

                  [root@controller-node-1 ~]# ./get_cpuset.sh nginx-lse-56c9cd77f5-cdqbd\nCPU set for Pod nginx-lse-56c9cd77f5-cdqbd (Burstable QoS): 0-1\n
                2. QoS \u7c7b\u578b\u4e3a LSR \u7684 Pod, \u7ed1\u5b9a CPU 2-3 \u6838\uff0c\u53ef\u4e0e BE \u7c7b\u578b\u7684 Pod \u5171\u4eab\u3002

                  [root@controller-node-1 ~]# ./get_cpuset.sh nginx-lsr-c7fdb97d8-b58h8\nCPU set for Pod nginx-lsr-c7fdb97d8-b58h8 (Burstable QoS): 2-3\n
                3. QoS \u7c7b\u578b\u4e3a LS \u7684 Pod, \u4f7f\u7528 CPU 4-15 \u6838\uff0c\u7ed1\u5b9a\u4e86\u4e0e LSE/LSR Pod \u72ec\u5360\u4e4b\u5916\u7684\u5171\u4eab CPU \u6c60\u3002

                  [root@controller-node-1 ~]# ./get_cpuset.sh nginx-ls-54746c8cf8-rh4b7\nCPU set for Pod nginx-ls-54746c8cf8-rh4b7 (Burstable QoS): 4-15\n
                4. QoS \u7c7b\u578b\u4e3a BE \u7684 pod, \u53ef\u4f7f\u7528 LSE Pod \u72ec\u5360\u4e4b\u5916\u7684 CPU\u3002

                  [root@controller-node-1 ~]# ./get_cpuset.sh nginx-be-577c946b89-js2qn\nCPU set for Pod nginx-be-577c946b89-js2qn (BestEffort QoS): 2,4-12\n
                "},{"location":"admin/kpanda/best-practice/co-located/install.html","title":"Koordinator \u79bb\u7ebf\u5b89\u88c5","text":"

                Koordinator \u662f\u4e00\u4e2a\u57fa\u4e8e QoS \u7684 Kubernetes \u6df7\u5408\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u7cfb\u7edf\u3002\u5b83\u65e8\u5728\u63d0\u9ad8\u5bf9\u5ef6\u8fdf\u654f\u611f\u7684\u5de5\u4f5c\u8d1f\u8f7d\u548c\u6279\u5904\u7406\u4f5c\u4e1a\u7684\u8fd0\u884c\u65f6\u6548\u7387\u548c\u53ef\u9760\u6027\uff0c \u7b80\u5316\u4e0e\u8d44\u6e90\u76f8\u5173\u7684\u914d\u7f6e\u8c03\u6574\u7684\u590d\u6742\u6027\uff0c\u5e76\u589e\u52a0 Pod \u90e8\u7f72\u5bc6\u5ea6\u4ee5\u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u7387\u3002

                AI \u7b97\u529b\u4e2d\u5fc3\u9884\u7f6e\u4e86 Koordinator v1.5.0 \u79bb\u7ebf\u5305\u3002

                \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u79bb\u7ebf\u90e8\u7f72 Koordinator\u3002

                "},{"location":"admin/kpanda/best-practice/co-located/install.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                1. \u7528\u6237\u5df2\u7ecf\u5728\u5e73\u53f0\u4e0a\u5b89\u88c5\u4e86 v0.20.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u7684 addon \u79bb\u7ebf\u5305\u3002
                2. \u5f85\u5b89\u88c5\u96c6\u7fa4\u7684 Kubernetes version >= 1.18.
                3. \u4e3a\u4e86\u6700\u597d\u7684\u4f53\u9a8c\uff0c\u63a8\u8350\u4f7f\u7528 linux kernel 4.19 \u6216\u8005\u66f4\u9ad8\u7248\u672c\u3002
                "},{"location":"admin/kpanda/best-practice/co-located/install.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                \u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 Koordinator \u63d2\u4ef6\u3002

                1. \u767b\u5f55\u5e73\u53f0\uff0c\u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 -> \u5f85\u5b89\u88c5 Koordinator \u7684\u96c6\u7fa4 -> \u8fdb\u5165\u96c6\u7fa4\u8be6\u60c5\u3002

                2. \u5728 Helm \u6a21\u677f \u9875\u9762\uff0c\u9009\u62e9 \u5168\u90e8\u4ed3\u5e93 \uff0c\u641c\u7d22 koordinator \u3002

                3. \u9009\u62e9 koordinator \uff0c\u70b9\u51fb \u5b89\u88c5 \u3002

                4. \u8fdb\u5165 koordinator \u5b89\u88c5\u9875\u9762\uff0c\u70b9\u51fb \u786e\u5b9a\uff0c\u4f7f\u7528\u9ed8\u8ba4\u914d\u7f6e\u5b89\u88c5 koordinator\u3002

                5. \u67e5\u770b koordinator-system \u547d\u540d\u7a7a\u95f4\u4e0b\u7684 Pod \u662f\u5426\u6b63\u5e38\u8fd0\u884c

                "},{"location":"admin/kpanda/clusterops/cluster-oversold.html","title":"\u96c6\u7fa4\u52a8\u6001\u8d44\u6e90\u8d85\u5356","text":"

                \u76ee\u524d\uff0c\u8bb8\u591a\u4e1a\u52a1\u5b58\u5728\u5cf0\u503c\u548c\u4f4e\u8c37\u7684\u73b0\u8c61\u3002\u4e3a\u4e86\u786e\u4fdd\u670d\u52a1\u7684\u6027\u80fd\u548c\u7a33\u5b9a\u6027\uff0c\u5728\u90e8\u7f72\u670d\u52a1\u65f6\uff0c\u901a\u5e38\u4f1a\u6839\u636e\u5cf0\u503c\u9700\u6c42\u6765\u7533\u8bf7\u8d44\u6e90\u3002 \u7136\u800c\uff0c\u5cf0\u503c\u671f\u53ef\u80fd\u975e\u5e38\u77ed\u6682\uff0c\u5bfc\u81f4\u5728\u975e\u5cf0\u503c\u671f\u65f6\u8d44\u6e90\u88ab\u6d6a\u8d39\u3002 \u96c6\u7fa4\u8d44\u6e90\u8d85\u5356 \u5c31\u662f\u5c06\u8fd9\u4e9b\u7533\u8bf7\u4e86\u800c\u672a\u4f7f\u7528\u7684\u8d44\u6e90\uff08\u5373\u7533\u8bf7\u91cf\u4e0e\u4f7f\u7528\u91cf\u7684\u5dee\u503c\uff09\u5229\u7528\u8d77\u6765\uff0c\u4ece\u800c\u63d0\u5347\u96c6\u7fa4\u8d44\u6e90\u5229\u7528\u7387\uff0c\u51cf\u5c11\u8d44\u6e90\u6d6a\u8d39\u3002

                \u672c\u6587\u4e3b\u8981\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u96c6\u7fa4\u52a8\u6001\u8d44\u6e90\u8d85\u5356\u529f\u80fd\u3002

                "},{"location":"admin/kpanda/clusterops/cluster-oversold.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 Cluster Admin \uff0c \u8be6\u60c5\u53ef\u53c2\u8003\u96c6\u7fa4\u6388\u6743\u3002
                "},{"location":"admin/kpanda/clusterops/cluster-oversold.html#_3","title":"\u5f00\u542f\u96c6\u7fa4\u8d85\u5356","text":"
                1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762

                2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u96c6\u7fa4\u8fd0\u7ef4 -> \u96c6\u7fa4\u8bbe\u7f6e \uff0c\u7136\u540e\u9009\u62e9 \u9ad8\u7ea7\u914d\u7f6e \u9875\u7b7e

                3. \u6253\u5f00\u96c6\u7fa4\u8d85\u5356\uff0c\u8bbe\u7f6e\u8d85\u5356\u6bd4

                  • \u82e5\u672a\u5b89\u88c5 cro-operator \u63d2\u4ef6\uff0c\u70b9\u51fb \u7acb\u5373\u5b89\u88c5 \u6309\u94ae\uff0c\u5b89\u88c5\u6d41\u7a0b\u53c2\u8003\u7ba1\u7406 Helm \u5e94\u7528
                  • \u82e5\u5df2\u5b89\u88c5 cro-operator \u63d2\u4ef6\uff0c\u6253\u5f00\u96c6\u7fa4\u8d85\u5356\u5f00\u5173\uff0c\u5219\u53ef\u4ee5\u5f00\u59cb\u4f7f\u7528\u96c6\u7fa4\u8d85\u5356\u529f\u80fd\u3002

                  Note

                  \u9700\u8981\u5728\u96c6\u7fa4\u4e0b\u5bf9\u5e94\u7684 namespace \u6253\u4e0a\u5982\u4e0b\u6807\u7b7e\uff0c\u96c6\u7fa4\u8d85\u5356\u7b56\u7565\u624d\u80fd\u751f\u6548\u3002

                  clusterresourceoverrides.admission.autoscaling.openshift.io/enabled: \"true\"\n

                "},{"location":"admin/kpanda/clusterops/cluster-oversold.html#_4","title":"\u4f7f\u7528\u96c6\u7fa4\u8d85\u5356","text":"

                \u8bbe\u7f6e\u597d\u96c6\u7fa4\u52a8\u6001\u8d44\u6e90\u8d85\u5356\u6bd4\u540e\uff0c\u4f1a\u5728\u5de5\u4f5c\u8d1f\u8f7d\u8fd0\u884c\u65f6\u751f\u6548\u3002\u4e0b\u6587\u4ee5 niginx \u4e3a\u4f8b\uff0c\u9a8c\u8bc1\u4f7f\u7528\u8d44\u6e90\u8d85\u5356\u80fd\u529b\u3002

                1. \u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d nginx \u5e76\u8bbe\u7f6e\u5bf9\u5e94\u7684\u8d44\u6e90\u9650\u5236\u503c\uff0c\u521b\u5efa\u6d41\u7a0b\u53c2\u8003\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\uff08Deployment\uff09

                2. \u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u7684 Pod \u8d44\u6e90\u7533\u8bf7\u503c\u4e0e\u9650\u5236\u503c\u7684\u6bd4\u503c\u662f\u5426\u7b26\u5408\u8d85\u552e\u6bd4

                "},{"location":"admin/kpanda/clusterops/cluster-settings.html","title":"\u96c6\u7fa4\u8bbe\u7f6e","text":"

                \u96c6\u7fa4\u8bbe\u7f6e\u7528\u4e8e\u4e3a\u60a8\u7684\u96c6\u7fa4\u81ea\u5b9a\u4e49\u9ad8\u7ea7\u7279\u6027\u8bbe\u7f6e\uff0c\u5305\u62ec\u662f\u5426\u542f\u7528 GPU\u3001Helm \u4ed3\u5e93\u5237\u65b0\u5468\u671f\u3001Helm \u64cd\u4f5c\u8bb0\u5f55\u4fdd\u7559\u7b49\u3002

                • \u542f\u7528 GPU\uff1a\u9700\u8981\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU \u5361\u53ca\u5bf9\u5e94\u9a71\u52a8\u63d2\u4ef6\u3002

                  \u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u6700\u8fd1\u64cd\u4f5c -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \u3002

                • Helm \u64cd\u4f5c\u57fa\u7840\u955c\u50cf\u3001\u4ed3\u5e93\u5237\u65b0\u5468\u671f\u3001\u64cd\u4f5c\u8bb0\u5f55\u4fdd\u7559\u6761\u6570\u3001\u662f\u5426\u5f00\u542f\u96c6\u7fa4\u5220\u9664\u4fdd\u62a4\uff08\u5f00\u542f\u540e\u96c6\u7fa4\u5c06\u4e0d\u80fd\u76f4\u63a5\u5378\u8f7d\uff09

                "},{"location":"admin/kpanda/clusterops/latest-operations.html","title":"\u6700\u8fd1\u64cd\u4f5c","text":"

                \u5728\u8be5\u9875\u9762\u53ef\u4ee5\u67e5\u770b\u6700\u8fd1\u7684\u96c6\u7fa4\u64cd\u4f5c\u8bb0\u5f55\u548c Helm \u64cd\u4f5c\u8bb0\u5f55\uff0c\u4ee5\u53ca\u5404\u9879\u64cd\u4f5c\u7684 YAML \u6587\u4ef6\u548c\u65e5\u5fd7\uff0c\u4e5f\u53ef\u4ee5\u5220\u9664\u67d0\u4e00\u6761\u8bb0\u5f55\u3002

                \u8bbe\u7f6e Helm \u64cd\u4f5c\u7684\u4fdd\u7559\u6761\u6570\uff1a

                \u7cfb\u7edf\u9ed8\u8ba4\u4fdd\u7559\u6700\u8fd1 100 \u6761 Helm \u64cd\u4f5c\u8bb0\u5f55\u3002\u82e5\u4fdd\u7559\u6761\u6570\u592a\u591a\uff0c\u53ef\u80fd\u4f1a\u9020\u6210\u6570\u636e\u5197\u4f59\uff0c\u4fdd\u7559\u6761\u6570\u592a\u5c11\u53ef\u80fd\u4f1a\u9020\u6210\u60a8\u6240\u9700\u8981\u7684\u5173\u952e\u64cd\u4f5c\u8bb0\u5f55\u7684\u7f3a\u5931\u3002\u9700\u8981\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u8bbe\u7f6e\u5408\u7406\u7684\u4fdd\u7559\u6570\u91cf\u3002\u5177\u4f53\u6b65\u9aa4\u5982\u4e0b\uff1a

                1. \u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u6700\u8fd1\u64cd\u4f5c -> Helm \u64cd\u4f5c -> \u8bbe\u7f6e\u4fdd\u7559\u6761\u6570 \u3002

                2. \u8bbe\u7f6e\u9700\u8981\u4fdd\u7559\u591a\u5c11\u6761 Helm \u64cd\u4f5c\u8bb0\u5f55\uff0c\u5e76\u70b9\u51fb \u786e\u5b9a \u3002

                "},{"location":"admin/kpanda/clusters/access-cluster.html","title":"\u8bbf\u95ee\u96c6\u7fa4","text":"

                \u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\u63a5\u5165\u6216\u521b\u5efa\u7684\u96c6\u7fa4\uff0c\u4e0d\u4ec5\u53ef\u4ee5\u901a\u8fc7 UI \u754c\u9762\u76f4\u63a5\u8bbf\u95ee\uff0c\u4e5f\u53ef\u4ee5\u901a\u8fc7\u5176\u4ed6\u4e24\u79cd\u65b9\u5f0f\u8fdb\u884c\u8bbf\u95ee\u63a7\u5236\uff1a

                • \u901a\u8fc7 CloudShell \u5728\u7ebf\u8bbf\u95ee
                • \u4e0b\u8f7d\u96c6\u7fa4\u8bc1\u4e66\u540e\u901a\u8fc7 kubectl \u8fdb\u884c\u8bbf\u95ee

                Note

                \u8bbf\u95ee\u96c6\u7fa4\u65f6\uff0c\u7528\u6237\u5e94\u5177\u6709 Cluster Admin \u6743\u9650\u6216\u66f4\u9ad8\u6743\u9650\u3002

                "},{"location":"admin/kpanda/clusters/access-cluster.html#cloudshell","title":"\u901a\u8fc7 CloudShell \u8bbf\u95ee","text":"
                1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9009\u62e9\u9700\u8981\u901a\u8fc7 CloudShell \u8bbf\u95ee\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u56fe\u6807\u5e76\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u70b9\u51fb \u63a7\u5236\u53f0 \u3002

                2. \u5728 CloudShell \u63a7\u5236\u53f0\u6267\u884c kubectl get node \u547d\u4ee4\uff0c\u9a8c\u8bc1 CloudShell \u4e0e\u96c6\u7fa4\u7684\u8fde\u901a\u6027\u3002\u5982\u56fe\uff0c\u63a7\u5236\u53f0\u5c06\u8fd4\u56de\u96c6\u7fa4\u4e0b\u7684\u8282\u70b9\u4fe1\u606f\u3002

                \u73b0\u5728\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7 CloudShell \u6765\u8bbf\u95ee\u5e76\u7ba1\u7406\u8be5\u96c6\u7fa4\u4e86\u3002

                "},{"location":"admin/kpanda/clusters/access-cluster.html#kubectl","title":"\u901a\u8fc7 kubectl \u8bbf\u95ee","text":"

                \u901a\u8fc7\u672c\u5730\u8282\u70b9\u8bbf\u95ee\u5e76\u7ba1\u7406\u4e91\u7aef\u96c6\u7fa4\u65f6\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u6761\u4ef6\uff1a

                • \u672c\u5730\u8282\u70b9\u548c\u4e91\u7aef\u96c6\u7fa4\u7684\u7f51\u7edc\u4e92\u8054\u4e92\u901a\u3002
                • \u5df2\u7ecf\u5c06\u96c6\u7fa4\u8bc1\u4e66\u4e0b\u8f7d\u5230\u4e86\u672c\u5730\u8282\u70b9\u3002
                • \u672c\u5730\u8282\u70b9\u5df2\u7ecf\u5b89\u88c5\u4e86 kubectl \u5de5\u5177\u3002\u5173\u4e8e\u8be6\u7ec6\u7684\u5b89\u88c5\u65b9\u5f0f\uff0c\u8bf7\u53c2\u9605\u5b89\u88c5 kubectl\u3002

                \u6ee1\u8db3\u4e0a\u8ff0\u6761\u4ef6\u540e\uff0c\u6309\u7167\u4e0b\u65b9\u6b65\u9aa4\u4ece\u672c\u5730\u8bbf\u95ee\u4e91\u7aef\u96c6\u7fa4\uff1a

                1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9009\u62e9\u9700\u8981\u4e0b\u8f7d\u8bc1\u4e66\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \uff0c\u5e76\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u70b9\u51fb \u8bc1\u4e66\u83b7\u53d6 \u3002

                2. \u9009\u62e9\u8bc1\u4e66\u6709\u6548\u671f\u5e76\u70b9\u51fb \u4e0b\u8f7d\u8bc1\u4e66 \u3002

                3. \u6253\u5f00\u4e0b\u8f7d\u597d\u7684\u96c6\u7fa4\u8bc1\u4e66\uff0c\u5c06\u8bc1\u4e66\u5185\u5bb9\u590d\u5236\u81f3\u672c\u5730\u8282\u70b9\u7684 config \u6587\u4ef6\u3002

                  kubectl \u5de5\u5177\u9ed8\u8ba4\u4f1a\u4ece\u672c\u5730\u8282\u70b9\u7684 $HOME/.kube \u76ee\u5f55\u4e0b\u67e5\u627e\u540d\u4e3a config \u7684\u6587\u4ef6\u3002\u8be5\u6587\u4ef6\u5b58\u50a8\u4e86\u76f8\u5173\u96c6\u7fa4\u7684\u8bbf\u95ee\u51ed\u8bc1\uff0ckubectl \u53ef\u4ee5\u51ed\u8be5\u914d\u7f6e\u6587\u4ef6\u8fde\u63a5\u81f3\u96c6\u7fa4\u3002

                4. \u5728\u672c\u5730\u8282\u70b9\u4e0a\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u9a8c\u8bc1\u96c6\u7fa4\u7684\u8fde\u901a\u6027\uff1a

                  kubectl get pod -n default\n

                  \u9884\u671f\u7684\u8f93\u51fa\u7c7b\u4f3c\u4e8e:

                  NAME                            READY   STATUS      RESTARTS    AGE\ndao-2048-2048-58c7f7fc5-mq7h4   1/1     Running     0           30h\n

                \u73b0\u5728\u60a8\u53ef\u4ee5\u5728\u672c\u5730\u901a\u8fc7 kubectl \u8bbf\u95ee\u5e76\u7ba1\u7406\u8be5\u96c6\u7fa4\u4e86\u3002

                "},{"location":"admin/kpanda/clusters/cluster-role.html","title":"\u96c6\u7fa4\u89d2\u8272","text":"

                \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u57fa\u4e8e\u96c6\u7fa4\u7684\u4e0d\u540c\u529f\u80fd\u5b9a\u4f4d\u5bf9\u96c6\u7fa4\u8fdb\u884c\u4e86\u89d2\u8272\u5206\u7c7b\uff0c\u5e2e\u52a9\u7528\u6237\u66f4\u597d\u5730\u7ba1\u7406 IT \u57fa\u7840\u8bbe\u65bd\u3002

                "},{"location":"admin/kpanda/clusters/cluster-role.html#_2","title":"\u5168\u5c40\u670d\u52a1\u96c6\u7fa4","text":"

                \u6b64\u96c6\u7fa4\u7528\u4e8e\u8fd0\u884c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7ec4\u4ef6\uff0c\u4f8b\u5982\u5bb9\u5668\u7ba1\u7406\u3001\u5168\u5c40\u7ba1\u7406\u3001\u53ef\u89c2\u6d4b\u6027\u3001\u955c\u50cf\u4ed3\u5e93\u7b49\u3002 \u4e00\u822c\u4e0d\u627f\u8f7d\u4e1a\u52a1\u8d1f\u8f7d\u3002

                \u652f\u6301\u7684\u529f\u80fd \u63cf\u8ff0 K8s \u7248\u672c 1.22+ \u64cd\u4f5c\u7cfb\u7edf RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86\uff1bUbuntu 18.04 x86, Ubuntu 20.04 x86\uff1bCentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD \u96c6\u7fa4\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406 \u652f\u6301 K8s \u8d44\u6e90\u7ba1\u7406 \u652f\u6301 \u4e91\u539f\u751f\u5b58\u50a8 \u652f\u6301 \u4e91\u539f\u751f\u7f51\u7edc Calico\u3001Cillium\u3001Multus \u548c\u5176\u5b83 CNI \u7b56\u7565\u7ba1\u7406 \u652f\u6301\u7f51\u7edc\u7b56\u7565\u3001\u914d\u989d\u7b56\u7565\u3001\u8d44\u6e90\u9650\u5236\u3001\u707e\u5907\u7b56\u7565\u3001\u5b89\u5168\u7b56\u7565"},{"location":"admin/kpanda/clusters/cluster-role.html#_3","title":"\u7ba1\u7406\u96c6\u7fa4","text":"

                \u6b64\u96c6\u7fa4\u7528\u4e8e\u7ba1\u7406\u5de5\u4f5c\u96c6\u7fa4\uff0c\u4e00\u822c\u4e0d\u627f\u8f7d\u4e1a\u52a1\u8d1f\u8f7d\u3002

                \u652f\u6301\u7684\u529f\u80fd \u63cf\u8ff0 K8s \u7248\u672c 1.22+ \u64cd\u4f5c\u7cfb\u7edf RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86\uff1bUbuntu 18.04 x86, Ubuntu 20.04 x86\uff1bCentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD \u96c6\u7fa4\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406 \u652f\u6301 K8s \u8d44\u6e90\u7ba1\u7406 \u652f\u6301 \u4e91\u539f\u751f\u5b58\u50a8 \u652f\u6301 \u4e91\u539f\u751f\u7f51\u7edc Calico\u3001Cillium\u3001Multus \u548c\u5176\u5b83 CNI \u7b56\u7565\u7ba1\u7406 \u652f\u6301\u7f51\u7edc\u7b56\u7565\u3001\u914d\u989d\u7b56\u7565\u3001\u8d44\u6e90\u9650\u5236\u3001\u707e\u5907\u7b56\u7565\u3001\u5b89\u5168\u7b56\u7565"},{"location":"admin/kpanda/clusters/cluster-role.html#_4","title":"\u5de5\u4f5c\u96c6\u7fa4","text":"

                \u8fd9\u662f\u4f7f\u7528\u5bb9\u5668\u7ba1\u7406\u521b\u5efa\u7684\u96c6\u7fa4\uff0c\u4e3b\u8981\u7528\u4e8e\u627f\u8f7d\u4e1a\u52a1\u8d1f\u8f7d\u3002\u8be5\u96c6\u7fa4\u7531\u7ba1\u7406\u96c6\u7fa4\u8fdb\u884c\u7ba1\u7406\u3002

                \u652f\u6301\u7684\u529f\u80fd \u63cf\u8ff0 K8s \u7248\u672c \u652f\u6301 K8s 1.22 \u53ca\u4ee5\u4e0a\u7248\u672c \u64cd\u4f5c\u7cfb\u7edf RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86\uff1bUbuntu 18.04 x86, Ubuntu 20.04 x86\uff1bCentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD \u96c6\u7fa4\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406 \u652f\u6301 K8s \u8d44\u6e90\u7ba1\u7406 \u652f\u6301 \u4e91\u539f\u751f\u5b58\u50a8 \u652f\u6301 \u4e91\u539f\u751f\u7f51\u7edc Calico\u3001Cillium\u3001Multus \u548c\u5176\u5b83 CNI \u7b56\u7565\u7ba1\u7406 \u652f\u6301\u7f51\u7edc\u7b56\u7565\u3001\u914d\u989d\u7b56\u7565\u3001\u8d44\u6e90\u9650\u5236\u3001\u707e\u5907\u7b56\u7565\u3001\u5b89\u5168\u7b56\u7565"},{"location":"admin/kpanda/clusters/cluster-role.html#_5","title":"\u63a5\u5165\u96c6\u7fa4","text":"

                \u6b64\u96c6\u7fa4\u7528\u4e8e\u63a5\u5165\u5df2\u6709\u7684\u6807\u51c6 K8s \u96c6\u7fa4\uff0c\u5305\u62ec\u4f46\u4e0d\u9650\u4e8e\u672c\u5730\u6570\u636e\u4e2d\u5fc3\u81ea\u5efa\u96c6\u7fa4\u3001\u516c\u6709\u4e91\u5382\u5546\u63d0\u4f9b\u7684\u96c6\u7fa4\u3001\u79c1\u6709\u4e91\u5382\u5546\u63d0\u4f9b\u7684\u96c6\u7fa4\u3001\u8fb9\u7f18\u96c6\u7fa4\u3001\u4fe1\u521b\u96c6\u7fa4\u3001\u5f02\u6784\u96c6\u7fa4\u3002\u4e3b\u8981\u7528\u4e8e\u627f\u62c5\u4e1a\u52a1\u8d1f\u8f7d\u3002

                \u652f\u6301\u7684\u529f\u80fd \u63cf\u8ff0 K8s \u7248\u672c 1.18+ \u652f\u6301\u53cb\u5546 Vmware Tanzu\u3001Amazon EKS\u3001Redhat Openshift\u3001SUSE Rancher\u3001\u963f\u91cc ACK\u3001\u534e\u4e3a CCE\u3001\u817e\u8baf TKE\u3001\u6807\u51c6 K8s \u96c6\u7fa4\u3001\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u96c6\u7fa4\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406 \u4e0d\u652f\u6301 K8s \u8d44\u6e90\u7ba1\u7406 \u652f\u6301 \u4e91\u539f\u751f\u5b58\u50a8 \u652f\u6301 \u4e91\u539f\u751f\u7f51\u7edc \u4f9d\u8d56\u4e8e\u63a5\u5165\u96c6\u7fa4\u53d1\u884c\u7248\u7f51\u7edc\u6a21\u5f0f \u7b56\u7565\u7ba1\u7406 \u652f\u6301\u7f51\u7edc\u7b56\u7565\u3001\u914d\u989d\u7b56\u7565\u3001\u8d44\u6e90\u9650\u5236\u3001\u707e\u5907\u7b56\u7565\u3001\u5b89\u5168\u7b56\u7565

                Note

                \u4e00\u4e2a\u96c6\u7fa4\u53ef\u4ee5\u6709\u591a\u4e2a\u96c6\u7fa4\u89d2\u8272\uff0c\u4f8b\u5982\u4e00\u4e2a\u96c6\u7fa4\u65e2\u53ef\u4ee5\u662f\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\uff0c\u4e5f\u53ef\u4ee5\u662f\u7ba1\u7406\u96c6\u7fa4\u6216\u5de5\u4f5c\u96c6\u7fa4\u3002

                "},{"location":"admin/kpanda/clusters/cluster-scheduler-plugin.html","title":"\u5982\u4f55\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72\u7b2c\u4e8c\u8c03\u5ea6\u5668 scheduler-plugins","text":"

                \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72\u7b2c\u4e8c\u4e2a\u8c03\u5ea6\u5668 scheduler-plugins\u3002

                "},{"location":"admin/kpanda/clusters/cluster-scheduler-plugin.html#scheduler-plugins_1","title":"\u4e3a\u4ec0\u4e48\u9700\u8981 scheduler-plugins\uff1f","text":"

                \u901a\u8fc7\u5e73\u53f0\u521b\u5efa\u7684\u96c6\u7fa4\u4e2d\u4f1a\u5b89\u88c5 K8s \u539f\u751f\u7684\u8c03\u5ea6\u5668\uff0c\u4f46\u662f\u539f\u751f\u7684\u8c03\u5ea6\u5668\u5b58\u5728\u5f88\u591a\u7684\u5c40\u9650\u6027\uff1a

                • \u539f\u751f\u7684\u8c03\u5ea6\u5668\u65e0\u6cd5\u6ee1\u8db3\u8c03\u5ea6\u9700\u6c42\uff0c\u4f60\u53ef\u4ee5\u9009\u62e9\u4f7f\u7528 CoScheduling\u3001 CapacityScheduling \u7b49 scheduler-plugins \u63d2\u4ef6\u3002
                • \u5728\u7279\u6b8a\u7684\u573a\u666f\uff0c\u9700\u8981\u65b0\u7684\u8c03\u5ea6\u5668\u6765\u5b8c\u6210\u8c03\u5ea6\u4efb\u52a1\u800c\u4e0d\u5f71\u54cd\u539f\u751f\u8c03\u5ea6\u5668\u7684\u6d41\u7a0b\u3002
                • \u533a\u5206\u4e0d\u540c\u529f\u80fd\u7684\u8c03\u5ea6\u5668\uff0c\u901a\u8fc7\u5207\u6362\u8c03\u5ea6\u5668\u540d\u79f0\u6765\u5b9e\u73b0\u4e0d\u540c\u7684\u8c03\u5ea6\u573a\u666f\u3002

                \u672c\u6587\u4ee5\u4f7f\u7528 vgpu \u8c03\u5ea6\u5668\u7684\u540c\u65f6\uff0c\u60f3\u7ed3\u5408 scheduler-plugins \u7684 coscheduling \u63d2\u4ef6\u80fd\u529b\u7684\u573a\u666f\u4e3a\u793a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5\u5e76\u4f7f\u7528 scheduler-plugins\u3002

                "},{"location":"admin/kpanda/clusters/cluster-scheduler-plugin.html#scheduler-plugins_2","title":"\u5b89\u88c5 scheduler-plugins","text":""},{"location":"admin/kpanda/clusters/cluster-scheduler-plugin.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                • kubean \u662f\u5728 v0.13.0 \u7248\u672c\u63a8\u51fa\u7684\u65b0\u529f\u80fd\uff0c\u9009\u62e9\u7ba1\u7406\u96c6\u7fa4\u65f6\u8bf7\u786e\u4fdd\u7248\u672c\u4e0d\u4f4e\u4e8e\u6b64\u7248\u672c\u3002
                • \u5b89\u88c5 scheduler-plugins \u7248\u672c\u4e3a v0.27.8\uff0c\u8bf7\u786e\u4fdd\u96c6\u7fa4\u7248\u672c\u662f\u5426\u4e0e\u5b83\u517c\u5bb9\u3002 \u53c2\u8003\u6587\u6863 Compatibility Matrix\u3002
                "},{"location":"admin/kpanda/clusters/cluster-scheduler-plugin.html#_2","title":"\u5b89\u88c5\u6d41\u7a0b","text":"
                1. \u5728 \u521b\u5efa\u96c6\u7fa4 -> \u9ad8\u7ea7\u914d\u7f6e -> \u81ea\u5b9a\u4e49\u53c2\u6570 \u4e2d\u6dfb\u52a0 scheduler-plugins \u53c2\u6570

                  scheduler_plugins_enabled:true\nscheduler_plugins_plugin_config:\n  - name: Coscheduling\n    args:\n      permitWaitingTimeSeconds: 10 # default is 60\n

                  \u53c2\u6570\u8bf4\u660e\uff1a

                  • scheduler_plugins_enabled \u8bbe\u7f6e\u4e3a true \u65f6\uff0c\u5f00\u542f scheduler-plugins \u63d2\u4ef6\u80fd\u529b\u3002
                  • \u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e scheduler_plugins_enabled_plugins \u6216 scheduler_plugins_disabled_plugins \u9009\u9879\u6765\u542f\u7528\u6216\u7981\u7528\u67d0\u4e9b\u63d2\u4ef6\u3002 \u53c2\u9605 K8s \u5b98\u65b9\u63d2\u4ef6\u540d\u79f0\u3002
                  • \u5982\u679c\u9700\u8981\u8bbe\u7f6e\u81ea\u5b9a\u4e49\u63d2\u4ef6\u7684\u53c2\u6570\u8bf7\u914d\u7f6e scheduler_plugins_plugin_config\uff0c\u4f8b\u5982\uff1a\u8bbe\u7f6e coscheduling \u7684 permitWaitingTimeoutSeconds \u53c2\u6570\u3002 \u53c2\u9605 K8s \u5b98\u65b9\u63d2\u4ef6\u914d\u7f6e\u9879
                2. \u96c6\u7fa4\u521b\u5efa\u6210\u529f\u540e\u7cfb\u7edf\u4f1a\u81ea\u52a8\u5b89\u88c5 scheduler-plugins \u548c controller \u7ec4\u4ef6\u8d1f\u8f7d\uff0c\u53ef\u4ee5\u5728\u5bf9\u5e94\u96c6\u7fa4\u7684\u65e0\u72b6\u6001\u8d1f\u8f7d\u4e2d\u67e5\u770b\u8d1f\u8f7d\u72b6\u6001\u3002

                "},{"location":"admin/kpanda/clusters/cluster-scheduler-plugin.html#scheduler-plugins_3","title":"\u4f7f\u7528 scheduler-plugins","text":"

                \u4ee5\u4e0b\u4ee5\u4f7f\u7528 vgpu \u8c03\u5ea6\u5668\u7684\u540c\u65f6\uff0c\u60f3\u7ed3\u5408 scheduler-plugins \u7684 coscheduling \u63d2\u4ef6\u80fd\u529b\u573a\u666f\u4e3a\u793a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528 scheduler-plugins\u3002

                1. \u5728 Helm \u6a21\u677f\u4e2d\u5b89\u88c5 vgpu\uff0c\u8bbe\u7f6e values.yaml \u53c2\u6570\u3002

                  • schedulerName: scheduler-plugins-scheduler\uff0c\u8fd9\u662f kubean \u9ed8\u8ba4\u5b89\u88c5\u7684 scheduler-plugins \u7684 scheduler \u540d\u79f0\uff0c\u76ee\u524d\u4e0d\u80fd\u4fee\u6539\u3002
                  • scheduler.kubeScheduler.enabled: false\uff0c\u4e0d\u5b89\u88c5 kube-scheduler\uff0c\u5c06 vgpu-scheduler \u4f5c\u4e3a\u5355\u72ec\u7684 extender\u3002
                2. \u5728 scheduler-plugins \u4e0a\u6269\u5c55 vgpu-scheduler\u3002

                  [root@master01 charts]# kubectl get cm -n scheduler-plugins scheduler-config -ojsonpath=\"{.data.scheduler-config\\.yaml}\"\n
                  apiVersion: kubescheduler.config.k8s.io/v1\nkind: KubeSchedulerConfiguration\nleaderElection:\n  leaderElect: false\nprofiles:\n  # Compose all plugins in one profile\n  - schedulerName: scheduler-plugins-scheduler\n    plugins:\n      multiPoint:\n        enabled:\n          - name: Coscheduling\n          - name: CapacityScheduling\n          - name: NodeResourceTopologyMatch\n          - name: NodeResourcesAllocatable\n        disabled:\n          - name: PrioritySort\npluginConfig:\n  - args:\n      permitWaitingTimeSeconds: 10\n    name: Coscheduling\n

                  \u4fee\u6539 scheduler-plugins \u7684 scheduler-config \u7684 configmap \u53c2\u6570\uff0c\u5982\u4e0b\uff1a

                  [root@master01 charts]# kubectl get cm -n scheduler-plugins scheduler-config -ojsonpath=\"{.data.scheduler-config\\.yaml}\"\n
                  apiVersion: kubescheduler.config.k8s.io/v1\nkind: KubeSchedulerConfiguration\nleaderElection:\n  leaderElect: false\nprofiles:\n  # Compose all plugins in one profile\n  - schedulerName: scheduler-plugins-scheduler\n    plugins:\n      multiPoint:\n        enabled:\n          - name: Coscheduling\n          - name: CapacityScheduling\n          - name: NodeResourceTopologyMatch\n          - name: NodeResourcesAllocatable\n        disabled:\n          - name: PrioritySort\npluginConfig:\n  - args:\n      permitWaitingTimeSeconds: 10\n    name: Coscheduling\nextenders:\n  - urlPrefix: \"${urlPrefix}\"\n    filterVerb: filter\n    bindVerb: bind\n    nodeCacheCapable: true\n    ignorable: true\n    httpTimeout: 30s\n    weight: 1\n    enableHTTPS: true\n    tlsConfig:\n      insecure: true\n    managedResources:\n      - name: nvidia.com/vgpu\n        ignoredByScheduler: true\n      - name: nvidia.com/gpumem\n        ignoredByScheduler: true\n      - name: nvidia.com/gpucores\n        ignoredByScheduler: true\n      - name: nvidia.com/gpumem-percentage\n        ignoredByScheduler: true\n      - name: nvidia.com/priority\n        ignoredByScheduler: true\n      - name: cambricon.com/mlunum\n        ignoredByScheduler: true\n
                3. \u5b89\u88c5\u5b8c vgpu-scheduler \u540e\uff0c\u7cfb\u7edf\u4f1a\u81ea\u52a8\u521b\u5efa svc\uff0curlPrefix \u6307\u5b9a svc \u7684 URL\u3002

                  Note

                  • svc \u6307 pod \u670d\u52a1\u8d1f\u8f7d\uff0c\u60a8\u53ef\u4ee5\u5230\u5b89\u88c5\u4e86 nvidia-vgpu \u63d2\u4ef6\u7684\u547d\u540d\u7a7a\u95f4\u4e0b\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u62ff\u5230 443 \u7aef\u53e3\u5bf9\u5e94\u7684\u5916\u90e8\u8bbf\u95ee\u4fe1\u606f\u3002

                    kubectl get svc -n ${namespace} \n
                  • urlprifix \u683c\u5f0f\u4e3a https://${ip \u5730\u5740}:${\u7aef\u53e3}

                4. \u5c06 scheduler-plugins \u7684 scheduler Pod \u91cd\u542f\uff0c\u52a0\u8f7d\u65b0\u7684\u914d\u7f6e\u6587\u4ef6\u3002

                  Note

                  \u5728\u521b\u5efa vgpu \u5e94\u7528\u65f6\u4e0d\u9700\u8981\u6307\u5b9a\u8c03\u5ea6\u5668\u540d\u79f0\uff0cvgpu-scheduler \u7684 Webhook \u4f1a\u81ea\u52a8\u5c06 Scheduler \u7684\u540d\u79f0\u4fee\u6539\u4e3a scheduler-plugins-scheduler\uff0c\u4e0d\u7528\u624b\u52a8\u6307\u5b9a\u3002

                "},{"location":"admin/kpanda/clusters/cluster-status.html","title":"\u96c6\u7fa4\u72b6\u6001","text":"

                \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u7eb3\u7ba1\u4e24\u79cd\u7c7b\u578b\u7684\u96c6\u7fa4\uff1a\u63a5\u5165\u96c6\u7fa4\u548c\u81ea\u5efa\u96c6\u7fa4\u3002 \u5173\u4e8e\u96c6\u7fa4\u7eb3\u7ba1\u7c7b\u578b\u7684\u66f4\u591a\u4fe1\u606f\uff0c\u8bf7\u53c2\u89c1\u96c6\u7fa4\u89d2\u8272\u3002

                \u8fd9\u4e24\u79cd\u96c6\u7fa4\u7684\u72b6\u6001\u5982\u4e0b\u6240\u8ff0\u3002

                "},{"location":"admin/kpanda/clusters/cluster-status.html#_2","title":"\u63a5\u5165\u96c6\u7fa4","text":"\u72b6\u6001 \u63cf\u8ff0 \u63a5\u5165\u4e2d\uff08Joining\uff09 \u96c6\u7fa4\u6b63\u5728\u63a5\u5165 \u89e3\u9664\u63a5\u5165\u4e2d\uff08Removing\uff09 \u96c6\u7fa4\u6b63\u5728\u89e3\u9664\u63a5\u5165 \u8fd0\u884c\u4e2d\uff08Running\uff09 \u96c6\u7fa4\u6b63\u5e38\u8fd0\u884c \u672a\u77e5\uff08Unknown\uff09 \u96c6\u7fa4\u5df2\u5931\u8054\uff0c\u7cfb\u7edf\u5c55\u793a\u6570\u636e\u4e3a\u5931\u8054\u524d\u7f13\u5b58\u6570\u636e\uff0c\u4e0d\u4ee3\u8868\u771f\u5b9e\u6570\u636e\uff0c\u540c\u65f6\u5931\u8054\u72b6\u6001\u4e0b\u6267\u884c\u7684\u4efb\u4f55\u64cd\u4f5c\u90fd\u5c06\u4e0d\u751f\u6548\uff0c\u8bf7\u68c0\u67e5\u96c6\u7fa4\u7f51\u7edc\u8fde\u901a\u6027\u6216\u4e3b\u673a\u72b6\u6001\u3002"},{"location":"admin/kpanda/clusters/cluster-status.html#_3","title":"\u81ea\u5efa\u96c6\u7fa4","text":"\u72b6\u6001 \u63cf\u8ff0 \u521b\u5efa\u4e2d\uff08Creating\uff09 \u96c6\u7fa4\u6b63\u5728\u521b\u5efa \u66f4\u65b0\u4e2d\uff08Updating\uff09 \u66f4\u65b0\u96c6\u7fa4 Kubernetes \u7248\u672c \u5220\u9664\u4e2d\uff08Deleting\uff09 \u96c6\u7fa4\u6b63\u5728\u5220\u9664 \u8fd0\u884c\u4e2d\uff08Running\uff09 \u96c6\u7fa4\u6b63\u5e38\u8fd0\u884c \u672a\u77e5\uff08Unknown\uff09 \u96c6\u7fa4\u5df2\u5931\u8054\uff0c\u7cfb\u7edf\u5c55\u793a\u6570\u636e\u4e3a\u5931\u8054\u524d\u7f13\u5b58\u6570\u636e\uff0c\u4e0d\u4ee3\u8868\u771f\u5b9e\u6570\u636e\uff0c\u540c\u65f6\u5931\u8054\u72b6\u6001\u4e0b\u6267\u884c\u7684\u4efb\u4f55\u64cd\u4f5c\u90fd\u5c06\u4e0d\u751f\u6548\uff0c\u8bf7\u68c0\u67e5\u96c6\u7fa4\u7f51\u7edc\u8fde\u901a\u6027\u6216\u4e3b\u673a\u72b6\u6001\u3002 \u521b\u5efa\u5931\u8d25\uff08Failed\uff09 \u96c6\u7fa4\u521b\u5efa\u5931\u8d25\uff0c\u8bf7\u67e5\u770b\u65e5\u5fd7\u4ee5\u83b7\u53d6\u8be6\u7ec6\u5931\u8d25\u539f\u56e0"},{"location":"admin/kpanda/clusters/cluster-version.html","title":"\u96c6\u7fa4\u7248\u672c\u652f\u6301\u8303\u56f4","text":"

                \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\uff0c\u63a5\u5165\u578b\u96c6\u7fa4\u548c\u81ea\u5efa\u96c6\u7fa4\u91c7\u53d6\u4e0d\u540c\u7684\u7248\u672c\u652f\u6301\u673a\u5236\u3002

                \u672c\u6587\u4e3b\u8981\u4ecb\u7ecd\u81ea\u5efa\u96c6\u7fa4\u7684\u7248\u672c\u652f\u6301\u673a\u5236\u3002

                Kubernetes \u793e\u533a\u652f\u6301 3 \u4e2a\u7248\u672c\u8303\u56f4\uff0c\u5982 1.26\u30011.27\u30011.28\u3002\u5f53\u793e\u533a\u65b0\u7248\u672c\u53d1\u5e03\u4e4b\u540e\uff0c\u652f\u6301\u7684\u7248\u672c\u8303\u56f4\u5c06\u4f1a\u8fdb\u884c\u9012\u589e\u3002 \u5982\u793e\u533a\u6700\u65b0\u7684 1.29 \u7248\u672c\u5df2\u7ecf\u53d1\u5e03\uff0c\u6b64\u65f6\u793e\u533a\u652f\u6301\u7684\u7248\u672c\u8303\u56f4\u662f 1.27\u30011.28\u30011.29\u3002

                \u4f8b\u5982\uff0c\u793e\u533a\u652f\u6301\u7684\u7248\u672c\u8303\u56f4\u662f 1.25\u30011.26\u30011.27\uff0c\u5219\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528\u754c\u9762\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u7248\u672c\u8303\u56f4\u662f 1.24\u30011.25\u30011.26\uff0c\u5e76\u4e14\u4f1a\u4e3a\u7528\u6237\u63a8\u8350\u4e00\u4e2a\u7a33\u5b9a\u7684\u7248\u672c\uff0c\u5982 1.24.7\u3002

                \u9664\u6b64\u4e4b\u5916\uff0c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528\u754c\u9762\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u7248\u672c\u8303\u56f4\u4e0e\u793e\u533a\u4fdd\u6301\u9ad8\u5ea6\u540c\u6b65\uff0c\u5f53\u793e\u533a\u7248\u672c\u8fdb\u884c\u9012\u589e\u540e\uff0c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528\u754c\u9762\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u7248\u672c\u8303\u56f4\u4e5f\u4f1a\u540c\u6b65\u9012\u589e\u4e00\u4e2a\u7248\u672c\u3002

                "},{"location":"admin/kpanda/clusters/cluster-version.html#kubernetes","title":"Kubernetes \u7248\u672c\u652f\u6301\u8303\u56f4","text":"Kubernetes \u793e\u533a\u7248\u672c\u8303\u56f4 \u81ea\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7248\u672c\u8303\u56f4 \u81ea\u5efa\u5de5\u4f5c\u96c6\u7fa4\u63a8\u8350\u7248\u672c \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5b89\u88c5\u5668 \u53d1\u5e03\u65f6\u95f4
                • 1.26
                • 1.27
                • 1.28
                • 1.25
                • 1.26
                • 1.27
                1.27.5 v0.13.0 2023.11.30"},{"location":"admin/kpanda/clusters/create-cluster.html","title":"\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4","text":"

                \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\uff0c\u96c6\u7fa4\u89d2\u8272\u5206\u56db\u7c7b\uff1a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u3001\u7ba1\u7406\u96c6\u7fa4\u3001\u5de5\u4f5c\u96c6\u7fa4\u3001\u63a5\u5165\u96c6\u7fa4\u3002 \u5176\u4e2d\uff0c\u63a5\u5165\u96c6\u7fa4\u53ea\u80fd\u4ece\u7b2c\u4e09\u65b9\u5382\u5546\u63a5\u5165\uff0c\u53c2\u89c1\u63a5\u5165\u96c6\u7fa4\u3002

                \u672c\u9875\u4ecb\u7ecd\u5982\u4f55\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\uff0c\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u65b0\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u5de5\u4f5c\u8282\u70b9 OS \u7c7b\u578b\u548c CPU \u67b6\u6784\u9700\u8981\u4e0e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4fdd\u6301\u4e00\u81f4\u3002 \u5982\u9700\u4f7f\u7528\u533a\u522b\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4 OS \u6216\u67b6\u6784\u7684\u8282\u70b9\u521b\u5efa\u96c6\u7fa4\uff0c\u53c2\u9605\u5728 centos \u7ba1\u7406\u5e73\u53f0\u4e0a\u521b\u5efa ubuntu \u5de5\u4f5c\u96c6\u7fa4\u8fdb\u884c\u521b\u5efa\u3002

                \u63a8\u8350\u4f7f\u7528 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u7684\u64cd\u4f5c\u7cfb\u7edf\u6765\u521b\u5efa\u96c6\u7fa4\u3002 \u5982\u60a8\u672c\u5730\u8282\u70b9\u4e0d\u5728\u4e0a\u8ff0\u652f\u6301\u8303\u56f4\uff0c\u53ef\u53c2\u8003\u5728\u975e\u4e3b\u6d41\u64cd\u4f5c\u7cfb\u7edf\u4e0a\u521b\u5efa\u96c6\u7fa4\u8fdb\u884c\u521b\u5efa\u3002

                "},{"location":"admin/kpanda/clusters/create-cluster.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                \u521b\u5efa\u96c6\u7fa4\u4e4b\u524d\u9700\u8981\u6ee1\u8db3\u4e00\u5b9a\u7684\u524d\u63d0\u6761\u4ef6\uff1a

                • \u6839\u636e\u4e1a\u52a1\u9700\u6c42\u51c6\u5907\u4e00\u5b9a\u6570\u91cf\u7684\u8282\u70b9\uff0c\u4e14\u8282\u70b9 OS \u7c7b\u578b\u548c CPU \u67b6\u6784\u4e00\u81f4\u3002
                • \u63a8\u8350 Kubernetes \u7248\u672c 1.29.5\uff0c\u5177\u4f53\u7248\u672c\u8303\u56f4\uff0c\u53c2\u9605 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u96c6\u7fa4\u7248\u672c\u652f\u6301\u4f53\u7cfb\uff0c \u76ee\u524d\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u81ea\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7248\u672c\u8303\u56f4\u5728 v1.28.0-v1.30.2\u3002\u5982\u9700\u521b\u5efa\u4f4e\u7248\u672c\u7684\u96c6\u7fa4\uff0c\u8bf7\u53c2\u8003\u96c6\u7fa4\u7248\u672c\u652f\u6301\u8303\u56f4\u3001\u90e8\u7f72\u4e0e\u5347\u7ea7 Kubean \u5411\u4e0b\u517c\u5bb9\u7248\u672c\u3002
                • \u76ee\u6807\u4e3b\u673a\u9700\u8981\u5141\u8bb8 IPv4 \u8f6c\u53d1\u3002\u5982\u679c Pod \u548c Service \u4f7f\u7528\u7684\u662f IPv6\uff0c\u5219\u76ee\u6807\u670d\u52a1\u5668\u9700\u8981\u5141\u8bb8 IPv6 \u8f6c\u53d1\u3002
                • \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u6682\u4e0d\u63d0\u4f9b\u5bf9\u9632\u706b\u5899\u7684\u7ba1\u7406\u529f\u80fd\uff0c\u60a8\u9700\u8981\u9884\u5148\u81ea\u884c\u5b9a\u4e49\u76ee\u6807\u4e3b\u673a\u9632\u706b\u5899\u89c4\u5219\u3002\u4e3a\u4e86\u907f\u514d\u521b\u5efa\u96c6\u7fa4\u7684\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u95ee\u9898\uff0c\u5efa\u8bae\u7981\u7528\u76ee\u6807\u4e3b\u673a\u7684\u9632\u706b\u5899\u3002
                • \u53c2\u9605\u8282\u70b9\u53ef\u7528\u6027\u68c0\u67e5\u3002
                "},{"location":"admin/kpanda/clusters/create-cluster.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u4e2d\uff0c\u70b9\u51fb \u521b\u5efa\u96c6\u7fa4 \u6309\u94ae\u3002

                2. \u53c2\u8003\u4e0b\u5217\u8981\u6c42\u586b\u5199\u96c6\u7fa4\u57fa\u672c\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                  • \u96c6\u7fa4\u540d\u79f0\uff1a\u540d\u79f0\u53ea\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u8fde\u5b57\u7b26\uff08\"-\"\uff09\uff0c\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u8005\u6570\u5b57\u5f00\u5934\u548c\u7ed3\u5c3e\uff0c\u6700\u957f 63 \u4e2a\u5b57\u7b26\u3002
                  • \u88ab\u7eb3\u7ba1\uff1a\u9009\u62e9\u7531\u54ea\u4e2a\u96c6\u7fa4\u6765\u7ba1\u7406\u6b64\u96c6\u7fa4\uff0c\u4f8b\u5982\u5728\u96c6\u7fa4\u751f\u547d\u5468\u671f\u4e2d\u521b\u5efa\u3001\u5347\u7ea7\u3001\u8282\u70b9\u6269\u7f29\u5bb9\u3001\u5220\u9664\u96c6\u7fa4\u7b49\u3002
                  • \u8fd0\u884c\u65f6\uff1a\u9009\u62e9\u96c6\u7fa4\u7684\u8fd0\u884c\u65f6\u73af\u5883\uff0c\u76ee\u524d\u652f\u6301 containerd \u548c docker\uff0c\u5982\u4f55\u9009\u62e9\u5bb9\u5668\u8fd0\u884c\u65f6\u3002
                  • Kubernetes \u7248\u672c\uff1a\u652f\u6301 3 \u4e2a\u7248\u672c\u8de8\u5ea6\uff0c\u5177\u4f53\u53d6\u51b3\u4e8e\u88ab\u7eb3\u7ba1\u96c6\u7fa4\u6240\u652f\u6301\u7684\u7248\u672c\u3002

                3. \u586b\u5199\u8282\u70b9\u914d\u7f6e\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                  • \u9ad8\u53ef\u7528\uff1a\u5f00\u542f\u540e\u9700\u8981\u63d0\u4f9b\u81f3\u5c11 3 \u4e2a\u63a7\u5236\u5668\u8282\u70b9\u3002\u5173\u95ed\u540e\uff0c\u53ea\u63d0\u4f9b 1 \u4e2a\u63a7\u5236\u5668\u8282\u70b9\u5373\u53ef\u3002

                    \u751f\u4ea7\u73af\u5883\u4e2d\u5efa\u8bae\u4f7f\u7528\u9ad8\u53ef\u7528\u6a21\u5f0f\u3002

                  • \u8ba4\u8bc1\u65b9\u5f0f\uff1a\u9009\u62e9\u901a\u8fc7\u7528\u6237\u540d/\u5bc6\u7801\u8fd8\u662f\u516c\u79c1\u94a5\u8bbf\u95ee\u8282\u70b9\u3002

                    \u5982\u679c\u4f7f\u7528\u516c\u79c1\u94a5\u65b9\u5f0f\u8bbf\u95ee\u8282\u70b9\uff0c\u9700\u8981\u9884\u5148\u914d\u7f6e\u8282\u70b9\u7684 SSH \u5bc6\u94a5\u3002\u53c2\u9605\u4f7f\u7528 SSH \u5bc6\u94a5\u8ba4\u8bc1\u8282\u70b9\u3002

                  • \u4f7f\u7528\u7edf\u4e00\u7684\u5bc6\u7801\uff1a\u5f00\u542f\u540e\u96c6\u7fa4\u4e2d\u6240\u6709\u8282\u70b9\u7684\u8bbf\u95ee\u5bc6\u7801\u90fd\u76f8\u540c\uff0c\u9700\u8981\u5728\u4e0b\u65b9\u8f93\u5165\u8bbf\u95ee\u6240\u6709\u8282\u70b9\u7684\u7edf\u4e00\u5bc6\u7801\u3002\u5982\u679c\u5173\u95ed\uff0c\u5219\u53ef\u4ee5\u4e3a\u6bcf\u4e2a\u8282\u70b9\u8bbe\u7f6e\u5355\u72ec\u7684\u7528\u6237\u540d\u548c\u5bc6\u7801\u3002

                  • \u8282\u70b9\u4fe1\u606f\uff1a\u586b\u5199\u8282\u70b9\u540d\u79f0\u548c IP \u5730\u5740\u3002

                  • \u81ea\u5b9a\u4e49\u53c2\u6570\uff1a\u8bbe\u7f6e\u53d8\u91cf\u63a7\u5236 Ansible \u4e0e\u8fdc\u7a0b\u4e3b\u673a\u4ea4\u4e92\u3002\u53ef\u8bbe\u7f6e\u53d8\u91cf\u53c2\u8003\u8fde\u63a5\u5230\u4e3b\u673a\uff1a\u884c\u4e3a\u6e05\u5355\u53c2\u6570
                  • NTP \u65f6\u95f4\u540c\u6b65\uff1a\u5f00\u542f\u540e\u4f1a\u81ea\u52a8\u540c\u6b65\u5404\u4e2a\u8282\u70b9\u4e0a\u7684\u65f6\u95f4\uff0c\u9700\u8981\u63d0\u4f9b NTP \u670d\u52a1\u5668\u5730\u5740\u3002

                4. \u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb\u8282\u70b9\u68c0\u67e5\u3002\u5982\u679c\u68c0\u67e5\u901a\u8fc7\u5219\u7ee7\u7eed\u4e0b\u4e00\u6b65\u64cd\u4f5c\u3002\u5982\u679c\u68c0\u67e5\u672a\u901a\u8fc7\uff0c\u5219\u66f4\u65b0 \u8282\u70b9\u4fe1\u606f \u5e76\u518d\u6b21\u6267\u884c\u68c0\u67e5\u3002

                5. \u586b\u5199\u7f51\u7edc\u914d\u7f6e\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                  • \u7f51\u7edc\u63d2\u4ef6\uff1a\u8d1f\u8d23\u4e3a\u96c6\u7fa4\u5185\u7684 Pod \u63d0\u4f9b\u7f51\u7edc\u670d\u52a1\uff0c\u521b\u5efa\u96c6\u7fa4\u540e\u4e0d\u53ef\u66f4\u6539\u7f51\u7edc\u63d2\u4ef6\u3002\u652f\u6301 cilium \u548c calico\u3002\u9009\u62e9 none \u8868\u793a\u6682\u4e0d\u5b89\u88c5\u7f51\u7edc\u63d2\u4ef6\u3002

                  • \u5bb9\u5668\u7f51\u6bb5\uff1a\u96c6\u7fa4\u4e0b\u5bb9\u5668\u4f7f\u7528\u7684\u7f51\u6bb5\uff0c\u51b3\u5b9a\u96c6\u7fa4\u4e0b\u5bb9\u5668\u7684\u6570\u91cf\u4e0a\u9650\u3002\u521b\u5efa\u540e\u4e0d\u53ef\u4fee\u6539\u3002

                  • \u670d\u52a1\u7f51\u6bb5\uff1a\u540c\u4e00\u96c6\u7fa4\u4e0b\u5bb9\u5668\u4e92\u76f8\u8bbf\u95ee\u65f6\u4f7f\u7528\u7684 Service \u8d44\u6e90\u7684\u7f51\u6bb5\uff0c\u51b3\u5b9a Service \u8d44\u6e90\u7684\u4e0a\u9650\u3002\u521b\u5efa\u540e\u4e0d\u53ef\u4fee\u6539\u3002

                6. \u586b\u5199\u63d2\u4ef6\u914d\u7f6e\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                7. \u586b\u5199\u9ad8\u7ea7\u914d\u7f6e\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u786e\u5b9a \u3002

                  • kubelet_max_pods \uff1a\u8bbe\u7f6e\u6bcf\u4e2a\u8282\u70b9\u7684\u6700\u5927 Pod \u6570\u91cf\uff0c\u9ed8\u8ba4\u4e3a 110 \u4e2a\u3002
                  • hostname_overide \uff1a\u91cd\u7f6e\u4e3b\u673a\u540d\uff0c\u5efa\u8bae\u4f7f\u7528\u9ed8\u8ba4\u503c\uff0c\u91c7\u7528\u7cfb\u7edf\u9ed8\u8ba4\u751f\u6210\u7684\u540d\u79f0\u4f5c\u4e3a\u4e3b\u673a\u540d\u79f0\u3002
                  • kubernetes_audit \uff1aKubernetes \u7684\u5ba1\u8ba1\u65e5\u5fd7\uff0c\u9ed8\u8ba4\u5f00\u542f\u3002
                  • auto_renew_certificate \uff1a\u5728\u6bcf\u6708\u7b2c\u4e00\u4e2a\u661f\u671f\u4e00\u81ea\u52a8\u66f4\u65b0 Kubernetes \u63a7\u5236\u5e73\u9762\u8bc1\u4e66\uff0c\u9ed8\u8ba4\u5f00\u542f\u3002
                  • disable_firewalld&ufw \uff1a\u7981\u7528\u9632\u706b\u5899\uff0c\u907f\u514d\u8282\u70b9\u5728\u5b89\u88c5\u8fc7\u7a0b\u4e2d\u65e0\u6cd5\u88ab\u8bbf\u95ee\u3002
                  • Insecure_registries \uff1a\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u914d\u7f6e\u3002\u4f7f\u7528\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u521b\u5efa\u96c6\u7fa4\u65f6\uff0c\u4e3a\u4e86\u907f\u514d\u8bc1\u4e66\u95ee\u9898\u5bfc\u81f4\u5bb9\u5668\u5f15\u64ce\u62d2\u7edd\u8bbf\u95ee\uff0c\u9700\u8981\u5728\u8fd9\u91cc\u586b\u5199\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u5730\u5740\uff0c\u4ee5\u7ed5\u8fc7\u5bb9\u5668\u5f15\u64ce\u7684\u8bc1\u4e66\u8ba4\u8bc1\u800c\u83b7\u53d6\u955c\u50cf\u3002
                  • yum_repos \uff1a\u586b\u5199 Yum \u6e90\u4ed3\u5e93\u5730\u5740\u3002\u79bb\u7ebf\u73af\u5883\u4e0b\uff0c\u9ed8\u8ba4\u7ed9\u51fa\u7684\u5730\u5740\u9009\u9879\u4ec5\u4f9b\u53c2\u8003\uff0c\u8bf7\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u586b\u5199\u3002

                Success

                • \u586b\u5199\u6b63\u786e\u4fe1\u606f\u5e76\u5b8c\u6210\u4e0a\u8ff0\u6b65\u9aa4\u540e\uff0c\u9875\u9762\u4f1a\u63d0\u793a\u96c6\u7fa4\u6b63\u5728\u521b\u5efa\u4e2d\u3002
                • \u521b\u5efa\u96c6\u7fa4\u8017\u65f6\u8f83\u957f\uff0c\u9700\u8981\u8010\u5fc3\u7b49\u5f85\u3002\u5176\u95f4\uff0c\u53ef\u4ee5\u70b9\u51fb \u8fd4\u56de\u96c6\u7fa4\u5217\u8868 \u6309\u94ae\u8ba9\u5b89\u88c5\u8fc7\u7a0b\u540e\u53f0\u8fd0\u884c\u3002
                • \u5982\u9700\u67e5\u770b\u5f53\u524d\u72b6\u6001\uff0c\u53ef\u70b9\u51fb \u5b9e\u65f6\u65e5\u5fd7 \u3002

                Note

                • \u5f53\u96c6\u7fa4\u51fa\u73b0\u672a\u77e5\u72b6\u6001\u65f6\uff0c\u8868\u793a\u5f53\u524d\u96c6\u7fa4\u5df2\u5931\u8054\u3002
                • \u7cfb\u7edf\u5c55\u793a\u6570\u636e\u4e3a\u5931\u8054\u524d\u7f13\u5b58\u6570\u636e\uff0c\u4e0d\u4ee3\u8868\u771f\u5b9e\u6570\u636e\u3002
                • \u540c\u65f6\u5931\u8054\u72b6\u6001\u4e0b\u6267\u884c\u7684\u4efb\u4f55\u64cd\u4f5c\u90fd\u5c06\u4e0d\u751f\u6548\uff0c\u8bf7\u68c0\u67e5\u96c6\u7fa4\u7f51\u7edc\u8fde\u901a\u6027\u6216\u4e3b\u673a\u72b6\u6001\u3002

                "},{"location":"admin/kpanda/clusters/delete-cluster.html","title":"\u5378\u8f7d/\u89e3\u9664\u63a5\u5165\u96c6\u7fa4","text":"

                \u901a\u8fc7\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0 \u521b\u5efa\u7684\u96c6\u7fa4 \u652f\u6301 \u5378\u8f7d\u96c6\u7fa4 \u6216 \u89e3\u9664\u63a5\u5165 \u64cd\u4f5c\uff0c\u4ece\u5176\u4ed6\u73af\u5883\u76f4\u63a5 \u63a5\u5165\u7684\u96c6\u7fa4 \u4ec5\u652f\u6301 \u89e3\u9664\u63a5\u5165 \u64cd\u4f5c\u3002

                Info

                \u5982\u679c\u60f3\u5f7b\u5e95\u5220\u9664\u4e00\u4e2a\u63a5\u5165\u7684\u96c6\u7fa4\uff0c\u9700\u8981\u524d\u5f80\u521b\u5efa\u8be5\u96c6\u7fa4\u7684\u539f\u59cb\u5e73\u53f0\u64cd\u4f5c\u3002\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e0d\u652f\u6301\u5220\u9664\u63a5\u5165\u7684\u96c6\u7fa4\u3002

                \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\uff0c \u5378\u8f7d\u96c6\u7fa4 \u548c \u89e3\u9664\u63a5\u5165 \u7684\u533a\u522b\u5728\u4e8e\uff1a

                • \u5378\u8f7d\u96c6\u7fa4 \u64cd\u4f5c\u4f1a\u9500\u6bc1\u8be5\u96c6\u7fa4\uff0c\u5e76\u91cd\u7f6e\u96c6\u7fa4\u4e0b\u6240\u6709\u8282\u70b9\u7684\u6570\u636e\u3002\u6240\u6709\u6570\u636e\u90fd\u5c06\u88ab\u9500\u6bc1\uff0c\u5efa\u8bae\u505a\u597d\u5907\u4efd\u3002\u540e\u671f\u9700\u8981\u65f6\u5fc5\u987b\u91cd\u65b0\u521b\u5efa\u4e00\u4e2a\u96c6\u7fa4\u3002
                • \u89e3\u9664\u63a5\u5165 \u64cd\u4f5c\u4f1a\u5c06\u5f53\u524d\u96c6\u7fa4\u4ece\u5e73\u53f0\u4e2d\u79fb\u9664\uff0c\u4e0d\u4f1a\u6467\u6bc1\u96c6\u7fa4\uff0c\u4e5f\u4e0d\u4f1a\u9500\u6bc1\u6570\u636e\u3002
                "},{"location":"admin/kpanda/clusters/delete-cluster.html#_2","title":"\u5378\u8f7d\u96c6\u7fa4","text":"

                Note

                • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u5907 Admin \u6216 Kpanda Owner \u6743\u9650\u624d\u80fd\u6267\u884c\u5378\u8f7d\u96c6\u7fa4\u7684\u64cd\u4f5c\u3002
                • \u5378\u8f7d\u96c6\u7fa4\u4e4b\u524d\uff0c\u5e94\u8be5\u5148\u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u5728 \u96c6\u7fa4\u8fd0\u7ef4 -> \u96c6\u7fa4\u8bbe\u7f6e -> \u9ad8\u7ea7\u914d\u7f6e \u4e2d\u5173\u95ed \u96c6\u7fa4\u5220\u9664\u4fdd\u62a4 \uff0c \u5426\u5219\u4e0d\u663e\u793a \u5378\u8f7d\u96c6\u7fa4 \u7684\u9009\u9879\u3002
                • \u5168\u5c40\u670d\u52a1\u96c6\u7fa4 \u4e0d\u652f\u6301\u5378\u8f7d\u6216\u79fb\u9664\u64cd\u4f5c\u3002
                1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u627e\u5230\u9700\u8981\u5378\u8f7d\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u5e76\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u70b9\u51fb \u5378\u8f7d\u96c6\u7fa4 \u3002

                2. \u8f93\u5165\u96c6\u7fa4\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\uff0c\u7136\u540e\u70b9\u51fb \u5220\u9664 \u3002

                  \u5982\u679c\u63d0\u793a\u96c6\u7fa4\u4e2d\u8fd8\u6709\u4e00\u4e9b\u6b8b\u7559\u7684\u8d44\u6e90\uff0c\u5219\u9700\u8981\u6309\u63d0\u793a\u5220\u9664\u76f8\u5173\u8d44\u6e90\u540e\u624d\u80fd\u6267\u884c\u5378\u8f7d\u64cd\u4f5c\u3002

                3. \u8fd4\u56de \u96c6\u7fa4\u5217\u8868 \u9875\u53ef\u4ee5\u770b\u5230\u8be5\u96c6\u7fa4\u7684\u72b6\u6001\u5df2\u7ecf\u53d8\u6210 \u5220\u9664\u4e2d \u3002\u5378\u8f7d\u96c6\u7fa4\u53ef\u80fd\u9700\u8981\u4e00\u6bb5\u65f6\u95f4\uff0c\u8bf7\u60a8\u8010\u5fc3\u7b49\u5019\u3002

                "},{"location":"admin/kpanda/clusters/delete-cluster.html#_3","title":"\u89e3\u9664\u63a5\u5165\u96c6\u7fa4","text":"

                Note

                • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u5907 Admin \u6216 Kpanda Owner \u6743\u9650\u624d\u80fd\u6267\u884c\u89e3\u9664\u63a5\u5165\u7684\u64cd\u4f5c\u3002
                • \u5168\u5c40\u670d\u52a1\u96c6\u7fa4 \u4e0d\u652f\u6301\u89e3\u9664\u63a5\u5165\u3002
                1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u627e\u5230\u9700\u8981\u5378\u8f7d\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u5e76\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u70b9\u51fb \u89e3\u9664\u63a5\u5165 \u3002

                2. \u8f93\u5165\u96c6\u7fa4\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\uff0c\u7136\u540e\u70b9\u51fb \u89e3\u9664\u63a5\u5165 \u3002

                  \u5982\u679c\u63d0\u793a\u96c6\u7fa4\u4e2d\u8fd8\u6709\u4e00\u4e9b\u6b8b\u7559\u7684\u8d44\u6e90\uff0c\u5219\u9700\u8981\u6309\u63d0\u793a\u5220\u9664\u76f8\u5173\u8d44\u6e90\u540e\u624d\u80fd\u89e3\u9664\u63a5\u5165\u3002

                "},{"location":"admin/kpanda/clusters/delete-cluster.html#_4","title":"\u6e05\u7406\u89e3\u9664\u63a5\u5165\u96c6\u7fa4\u914d\u7f6e\u6570\u636e","text":"

                \u96c6\u7fa4\u88ab\u79fb\u9664\u540e\uff0c\u96c6\u7fa4\u4e2d\u539f\u6709\u7684\u7ba1\u7406\u5e73\u53f0\u6570\u636e\u4e0d\u4f1a\u88ab\u81ea\u52a8\u6e05\u9664\uff0c\u5982\u9700\u5c06\u96c6\u7fa4\u63a5\u5165\u81f3\u65b0\u7ba1\u7406\u5e73\u53f0\u5219\u9700\u8981\u624b\u52a8\u6267\u884c\u5982\u4e0b\u64cd\u4f5c\uff1a

                \u5220\u9664 kpanda-system\u3001insight-system \u547d\u540d\u7a7a\u95f4

                kubectl delete ns kpanda-system insight-system\n
                "},{"location":"admin/kpanda/clusters/integrate-cluster.html","title":"\u63a5\u5165\u96c6\u7fa4","text":"

                \u901a\u8fc7\u63a5\u5165\u96c6\u7fa4\u64cd\u4f5c\uff0c\u80fd\u591f\u5bf9\u4f17\u591a\u4e91\u670d\u52a1\u5e73\u53f0\u96c6\u7fa4\u548c\u672c\u5730\u79c1\u6709\u7269\u7406\u96c6\u7fa4\u8fdb\u884c\u7edf\u4e00\u7eb3\u7ba1\uff0c\u5f62\u6210\u7edf\u4e00\u6cbb\u7406\u5e73\u53f0\uff0c\u6709\u6548\u907f\u514d\u4e86\u88ab\u5382\u5546\u9501\u5b9a\u98ce\u9669\uff0c\u52a9\u529b\u4f01\u4e1a\u4e1a\u52a1\u5b89\u5168\u4e0a\u4e91\u3002

                \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u63a5\u5165\u591a\u79cd\u4e3b\u6d41\u7684\u5bb9\u5668\u96c6\u7fa4\uff0c\u4f8b\u5982 Redhat Openshift, SUSE Rancher, VMware Tanzu, Amazon EKS, Aliyun ACK, Huawei CCE, Tencent TKE, \u6807\u51c6 Kubernetes \u96c6\u7fa4\u3002

                "},{"location":"admin/kpanda/clusters/integrate-cluster.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                • \u51c6\u5907\u4e00\u4e2a\u5f85\u63a5\u5165\u7684\u96c6\u7fa4\uff0c\u786e\u4fdd\u5bb9\u5668\u7ba1\u7406\u96c6\u7fa4\u548c\u5f85\u63a5\u5165\u96c6\u7fa4\u4e4b\u95f4\u7f51\u7edc\u901a\u7545\uff0c\u5e76\u4e14\u96c6\u7fa4\u7684 Kubernetes \u7248\u672c 1.22+\u3002
                • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 Kpanda Owner \u6216\u66f4\u9ad8\u6743\u9650\u3002
                "},{"location":"admin/kpanda/clusters/integrate-cluster.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                1. \u8fdb\u5165 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u63a5\u5165\u96c6\u7fa4 \u6309\u94ae\u3002

                2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u3002

                  • \u96c6\u7fa4\u540d\u79f0\uff1a\u540d\u79f0\u5e94\u5177\u6709\u552f\u4e00\u6027\uff0c\u8bbe\u7f6e\u540e\u4e0d\u53ef\u66f4\u6539\u3002\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26(\"-\")\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002
                  • \u96c6\u7fa4\u522b\u540d\uff1a\u53ef\u8f93\u5165\u4efb\u610f\u5b57\u7b26\uff0c\u4e0d\u8d85\u8fc7 60 \u4e2a\u5b57\u7b26\u3002
                  • \u53d1\u884c\u7248\uff1a\u96c6\u7fa4\u7684\u53d1\u884c\u5382\u5546\uff0c\u5305\u62ec\u5e02\u573a\u4e3b\u6d41\u4e91\u5382\u5546\u548c\u672c\u5730\u79c1\u6709\u7269\u7406\u96c6\u7fa4\u3002
                3. \u586b\u5199\u76ee\u6807\u96c6\u7fa4\u7684 KubeConfig\uff0c\u70b9\u51fb \u9a8c\u8bc1 Config \uff0c\u9a8c\u8bc1\u901a\u8fc7\u540e\u624d\u80fd\u6210\u529f\u63a5\u5165\u96c6\u7fa4\u3002

                  \u5982\u679c\u4e0d\u77e5\u9053\u5982\u4f55\u83b7\u53d6\u96c6\u7fa4\u7684 KubeConfig \u6587\u4ef6\uff0c\u53ef\u4ee5\u5728\u8f93\u5165\u6846\u53f3\u4e0a\u89d2\u70b9\u51fb \u5982\u4f55\u83b7\u53d6 kubeConfig \u67e5\u770b\u5bf9\u5e94\u6b65\u9aa4\u3002

                4. \u786e\u8ba4\u6240\u6709\u53c2\u6570\u586b\u5199\u6b63\u786e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u3002

                Note

                • \u65b0\u63a5\u5165\u7684\u96c6\u7fa4\u72b6\u6001\u4e3a \u63a5\u5165\u4e2d \uff0c\u63a5\u5165\u6210\u529f\u540e\u53d8\u4e3a \u8fd0\u884c\u4e2d \u3002
                • \u5982\u679c\u96c6\u7fa4\u72b6\u6001\u4e00\u76f4\u5904\u4e8e \u63a5\u5165\u4e2d \uff0c\u8bf7\u786e\u8ba4\u63a5\u5165\u811a\u672c\u662f\u5426\u5728\u5bf9\u5e94\u96c6\u7fa4\u4e0a\u6267\u884c\u6210\u529f\u3002\u6709\u5173\u96c6\u7fa4\u72b6\u6001\u7684\u66f4\u591a\u8be6\u60c5\uff0c\u8bf7\u53c2\u8003\u96c6\u7fa4\u72b6\u6001\u3002
                "},{"location":"admin/kpanda/clusters/integrate-rancher-cluster.html","title":"\u63a5\u5165 rancher \u96c6\u7fa4","text":"

                \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u63a5\u5165 rancher \u96c6\u7fa4\u3002

                "},{"location":"admin/kpanda/clusters/integrate-rancher-cluster.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                • \u51c6\u5907\u4e00\u4e2a\u5177\u6709\u7ba1\u7406\u5458\u6743\u9650\u7684\u5f85\u63a5\u5165 ranhcer \u96c6\u7fa4\uff0c\u786e\u4fdd\u5bb9\u5668\u7ba1\u7406\u96c6\u7fa4\u548c\u5f85\u63a5\u5165\u96c6\u7fa4\u4e4b\u95f4\u7f51\u7edc\u901a\u7545\u3002
                • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 Kpanda Owner \u6216\u66f4\u9ad8\u6743\u9650\u3002
                "},{"location":"admin/kpanda/clusters/integrate-rancher-cluster.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"admin/kpanda/clusters/integrate-rancher-cluster.html#rancher-serviceaccount","title":"\u6b65\u9aa4\u4e00\uff1a\u5728 rancher \u96c6\u7fa4\u521b\u5efa\u5177\u6709\u7ba1\u7406\u5458\u6743\u9650\u7684 ServiceAccount \u7528\u6237","text":"
                1. \u4f7f\u7528\u5177\u6709\u7ba1\u7406\u5458\u6743\u9650\u7684\u89d2\u8272\u8fdb\u5165 rancher \u96c6\u7fa4\uff0c\u5e76\u4f7f\u7528\u7ec8\u7aef\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a sa.yaml \u7684\u6587\u4ef6\u3002

                  vi sa.yaml\n

                  \u7136\u540e\u6309\u4e0b i \u952e\u8fdb\u5165\u63d2\u5165\u6a21\u5f0f\uff0c\u8f93\u5165\u4ee5\u4e0b\u5185\u5bb9\uff1a

                  sa.yaml
                  apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: rancher-rke\nrules:\n  - apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n  - nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: rancher-rke\nroleRef:\n    apiGroup: rbac.authorization.k8s.io\n    kind: ClusterRole\n    name: rancher-rke\n  subjects:\n  - kind: ServiceAccount\n    name: rancher-rke\n    namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: rancher-rke\n  namespace: kube-system\n

                  \u6309\u4e0b esc \u952e\u9000\u51fa\u63d2\u5165\u6a21\u5f0f\uff0c\u7136\u540e\u8f93\u5165 __ :wq__ \u4fdd\u5b58\u5e76\u9000\u51fa\u3002

                2. \u5728\u5f53\u524d\u8def\u5f84\u4e0b\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u65b0\u5efa\u540d\u4e3a rancher-rke \u7684 ServiceAccount\uff08\u4ee5\u4e0b\u7b80\u79f0\u4e3a SA \uff09\uff1a

                  kubectl apply -f sa.yaml\n

                  \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                  clusterrole.rbac.authorization.k8s.io/rancher-rke created\nclusterrolebinding.rbac.authorization.k8s.io/rancher-rke created\nserviceaccount/rancher-rke created\n
                3. \u521b\u5efa\u540d\u4e3a rancher-rke-secret \u7684\u5bc6\u94a5\uff0c\u5e76\u5c06\u5bc6\u94a5\u548c rancher-rke SA \u7ed1\u5b9a\u3002

                  kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: rancher-rke-secret\n  namespace: kube-system\n  annotations:\n    kubernetes.io/service-account.name: rancher-rke\n  type: kubernetes.io/service-account-token\nEOF\n

                  \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                  secret/rancher-rke-secret created\n

                  Note

                  \u5982\u679c\u60a8\u7684\u96c6\u7fa4\u7248\u672c\u4f4e\u4e8e 1.24\uff0c\u8bf7\u5ffd\u7565\u6b64\u6b65\u9aa4\uff0c\u76f4\u63a5\u524d\u5f80\u4e0b\u4e00\u6b65\u3002

                4. \u67e5\u627e rancher-rke SA \u7684\u5bc6\u94a5\uff1a

                  kubectl -n kube-system get secret | grep rancher-rke | awk '{print $1}'\n

                  \u9884\u671f\u8f93\u51fa\uff1a

                  rancher-rke-secret\n

                  \u67e5\u770b\u5bc6\u94a5 rancher-rke-secret \u7684\u8be6\u60c5\uff1a

                  kubectl -n kube-system describe secret rancher-rke-secret\n

                  \u9884\u671f\u8f93\u51fa\uff1a

                  Name:         rancher-rke-secret\nNamespace:    kube-system\nLabels:       <none>\nAnnotations:  kubernetes.io/service-account.name: rancher-rke\n            kubernetes.io/service-account.uid: d83df5d9-bd7d-488d-a046-b740618a0174\n\nType:  kubernetes.io/service-account-token\n\nData\n====\nca.crt:     570 bytes\nnamespace:  11 bytes\ntoken:      eyJhbGciOiJSUzI1NiIsImtpZCI6IjUtNE9nUWZLRzVpbEJORkZaNmtCQXhqVzRsZHU4MHhHcDBfb0VCaUo0V1kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJyYW5jaGVyLXJrZS1zZWNyZXQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoicmFuY2hlci1ya2UiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkODNkZjVkOS1iZDdkLTQ4OGQtYTA0Ni1iNzQwNjE4YTAxNzQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06cmFuY2hlci1ya2UifQ.VNsMtPEFOdDDeGt_8VHblcMRvjOwPXMM-79o9UooHx6q-VkHOcIOp3FOT2hnEdNnIsyODZVKCpEdCgyozX-3y5x2cZSZpocnkMcBbQm-qfTyUcUhAY7N5gcYUtHUhvRAsNWJcsDCn6d96gT_qo-ddo_cT8Ri39Lc123FDYOnYG-YGFKSgRQVy7Vyv34HIajZCCjZzy7i--eE_7o4DXeTjNqAFMFstUxxHBOXI3Rdn1zKQKqh5Jhg4ES7X-edSviSUfJUX-QV_LlAw5DuAyGPH7bDH4QaQ5k-p6cIctmpWZE-9wRDlKA4LYRblKE7MJcI6OmM4ldlMM0Jc8N-gCtl4w\n
                "},{"location":"admin/kpanda/clusters/integrate-rancher-cluster.html#rancher-rke-sa-kubeconfig","title":"\u6b65\u9aa4\u4e8c\uff1a\u5728\u672c\u5730\u4f7f\u7528 rancher-rke SA \u7684\u8ba4\u8bc1\u4fe1\u606f\u66f4\u65b0 kubeconfig \u6587\u4ef6","text":"

                \u5728\u4efb\u610f\u4e00\u53f0\u5b89\u88c5\u4e86 kubelet \u7684\u672c\u5730\u8282\u70b9\u6267\u884c\u5982\u4e0b\u64cd\u4f5c\uff1a

                1. \u914d\u7f6e kubelet token\uff1a

                  kubectl config set-credentials rancher-rke --token=`rancher-rke-secret` \u91cc\u9762\u7684 token \u4fe1\u606f\n

                  \u4f8b\u5982\uff1a

                  kubectl config set-credentials eks-admin --token=eyJhbGciOiJSUzI1NiIsImtpZCI6IjUtNE9nUWZLRzVpbEJORkZaNmtCQXhqVzRsZHU4MHhHcDBfb0VCaUo0V1kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJyYW5jaGVyLXJrZS1zZWNyZXQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoicmFuY2hlci1ya2UiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkODNkZjVkOS1iZDdkLTQ4OGQtYTA0Ni1iNzQwNjE4YTAxNzQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06cmFuY2hlci1ya2UifQ.VNsMtPEFOdDDeGt_8VHblcMRvjOwPXMM-79o9UooHx6q-VkHOcIOp3FOT2hnEdNnIsyODZVKCpEdCgyozX-3y5x2cZSZpocnkMcBbQm-qfTyUcUhAY7N5gcYUtHUhvRAsNWJcsDCn6d96gT_qo-ddo_cT8Ri39Lc123FDYOnYG-YGFKSgRQVy7Vyv34HIajZCCjZzy7i--eE_7o4DXeTjNqAFMFstUxxHBOXI3Rdn1zKQKqh5Jhg4ES7X-edSviSUfJUX-QV_LlAw5DuAyGPH7bDH4QaQ5k-p6cIctmpWZE-9wRDlKA4LYRblKE7MJcI6OmM4ldlMM0Jc8N-gCtl4w\n
                2. \u914d\u7f6e kubelet APIServer \u4fe1\u606f\uff1a

                  kubectl config set-cluster {\u96c6\u7fa4\u540d} --insecure-skip-tls-verify=true --server={APIServer}\n
                  • {\u96c6\u7fa4\u540d} \uff1a\u6307 rancher \u96c6\u7fa4\u7684\u540d\u79f0\u3002
                  • {APIServer} \uff1a\u6307\u96c6\u7fa4\u7684\u8bbf\u95ee\u5730\u5740\uff0c\u4e00\u822c\u4e3a\u96c6\u7fa4\u63a7\u5236\u8282\u70b9 IP + 6443 \u7aef\u53e3\uff0c\u5982 https://10.X.X.X:6443

                  \u4f8b\u5982\uff1a

                  kubectl config set-cluster rancher-rke --insecure-skip-tls-verify=true --server=https://10.X.X.X:6443\n
                3. \u914d\u7f6e kubelet \u4e0a\u4e0b\u6587\u4fe1\u606f\uff1a

                  kubectl config set-context {\u4e0a\u4e0b\u6587\u540d\u79f0} --cluster={\u96c6\u7fa4\u540d} --user={SA \u7528\u6237\u540d}\n

                  \u4f8b\u5982\uff1a

                  kubectl config set-context rancher-rke-context --cluster=rancher-rke --user=rancher-rke\n
                4. \u5728 kubelet \u4e2d\u6307\u5b9a\u6211\u4eec\u521a\u521a\u65b0\u5efa\u7684\u4e0a\u4e0b\u6587 rancher-rke-context \uff1a

                  kubectl config use-context rancher-rke-context\n
                5. \u83b7\u53d6\u4e0a\u4e0b\u6587 rancher-rke-context \u4e2d\u7684 kubeconfig \u4fe1\u606f\u3002

                  kubectl config view --minify --flatten --raw\n

                  \u9884\u671f\u8f93\u51fa\uff1a

                  apiVersion: v1\n  clusters:\n  - cluster:\n    insecure-skip-tls-verify: true\n    server: https://77C321BCF072682C70C8665ED4BFA10D.gr7.ap-southeast-1.eks.amazonaws.com\n  name: joincluster\n  contexts:\n  - context:\n    cluster: joincluster\n    user: eks-admin\n  name: ekscontext\n  current-context: ekscontext\n  kind: Config\n  preferences: {}\n  users:\n  - name: eks-admin\n  user:\n    token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImcxTjJwNkktWm5IbmRJU1RFRExvdWY1TGFWVUtGQ3VIejFtNlFQcUNFalEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2V\n
                "},{"location":"admin/kpanda/clusters/integrate-rancher-cluster.html#ai","title":"\u6b65\u9aa4\u4e09\uff1a\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u754c\u9762\u63a5\u5165\u96c6\u7fa4","text":"

                \u4f7f\u7528\u521a\u521a\u83b7\u53d6\u7684 kubeconfig \u6587\u4ef6\uff0c\u53c2\u8003\u63a5\u5165\u96c6\u7fa4\u6587\u6863\uff0c\u5c06 rancher \u96c6\u7fa4\u63a5\u5165\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u3002

                "},{"location":"admin/kpanda/clusters/k8s-cert.html","title":"Kubernetes \u96c6\u7fa4\u8bc1\u4e66\u66f4\u65b0","text":"

                \u4e3a\u4fdd\u8bc1 Kubernetes \u5404\u7ec4\u4ef6\u4e4b\u95f4\u7684\u901a\u4fe1\u5b89\u5168\uff0c\u7ec4\u4ef6\u4e4b\u95f4\u7684\u8c03\u7528\u4f1a\u8fdb\u884c TLS \u8eab\u4efd\u9a8c\u8bc1\uff0c\u6267\u884c\u9a8c\u8bc1\u64cd\u4f5c\u9700\u8981\u914d\u7f6e\u96c6\u7fa4 PKI \u8bc1\u4e66\u3002

                \u96c6\u7fa4\u8bc1\u4e66\u6709\u6548\u671f\u4e3a1\u5e74\uff0c\u4e3a\u907f\u514d\u8bc1\u4e66\u8fc7\u671f\u5bfc\u81f4\u4e1a\u52a1\u65e0\u6cd5\u4f7f\u7528\uff0c\u8bf7\u53ca\u65f6\u66f4\u65b0\u8bc1\u4e66\u3002

                \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u624b\u52a8\u8fdb\u884c\u8bc1\u4e66\u66f4\u65b0\u3002

                "},{"location":"admin/kpanda/clusters/k8s-cert.html#_1","title":"\u68c0\u67e5\u8bc1\u4e66\u662f\u5426\u8fc7\u671f","text":"

                \u60a8\u53ef\u4ee5\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b\u8bc1\u4e66\u662f\u5426\u8fc7\u671f\uff1a

                kubeadm certs check-expiration\n

                \u8f93\u51fa\u7c7b\u4f3c\u4e8e\u4ee5\u4e0b\u5185\u5bb9\uff1a

                CERTIFICATE                EXPIRES                  RESIDUAL TIME   CERTIFICATE AUTHORITY   EXTERNALLY MANAGED\nadmin.conf                 Dec 14, 2024 07:26 UTC   204d                                    no      \napiserver                  Dec 14, 2024 07:26 UTC   204d            ca                      no      \napiserver-etcd-client      Dec 14, 2024 07:26 UTC   204d            etcd-ca                 no      \napiserver-kubelet-client   Dec 14, 2024 07:26 UTC   204d            ca                      no      \ncontroller-manager.conf    Dec 14, 2024 07:26 UTC   204d                                    no      \netcd-healthcheck-client    Dec 14, 2024 07:26 UTC   204d            etcd-ca                 no      \netcd-peer                  Dec 14, 2024 07:26 UTC   204d            etcd-ca                 no      \netcd-server                Dec 14, 2024 07:26 UTC   204d            etcd-ca                 no      \nfront-proxy-client         Dec 14, 2024 07:26 UTC   204d            front-proxy-ca          no      \nscheduler.conf             Dec 14, 2024 07:26 UTC   204d                                    no      \n\nCERTIFICATE AUTHORITY   EXPIRES                  RESIDUAL TIME   EXTERNALLY MANAGED\nca                      Dec 12, 2033 07:26 UTC   9y              no      \netcd-ca                 Dec 12, 2033 07:26 UTC   9y              no      \nfront-proxy-ca          Dec 12, 2033 07:26 UTC   9y              no      \n
                "},{"location":"admin/kpanda/clusters/k8s-cert.html#_2","title":"\u624b\u52a8\u66f4\u65b0\u8bc1\u4e66","text":"

                \u60a8\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u624b\u52a8\u66f4\u65b0\u8bc1\u4e66\uff0c\u53ea\u9700\u5e26\u4e0a\u5408\u9002\u7684\u547d\u4ee4\u884c\u9009\u9879\u3002\u66f4\u65b0\u8bc1\u4e66\u524d\u8bf7\u5148\u5907\u4efd\u5f53\u524d\u8bc1\u4e66\u3002

                \u66f4\u65b0\u6307\u5b9a\u8bc1\u4e66\uff1a

                kubeadm certs renew\n

                \u66f4\u65b0\u5168\u90e8\u8bc1\u4e66\uff1a

                kubeadm certs renew all\n

                \u66f4\u65b0\u540e\u7684\u8bc1\u4e66\u53ef\u4ee5\u5728 /etc/kubernetes/pki \u76ee\u5f55\u4e0b\u67e5\u770b\uff0c\u6709\u6548\u671f\u5ef6\u7eed 1 \u5e74\u3002 \u4ee5\u4e0b\u5bf9\u5e94\u7684\u51e0\u4e2a\u914d\u7f6e\u6587\u4ef6\u4e5f\u4f1a\u540c\u6b65\u66f4\u65b0\uff1a

                • /etc/kubernetes/admin.conf
                • /etc/kubernetes/controller-manager.conf
                • /etc/kubernetes/scheduler.conf

                Note

                • \u5982\u679c\u60a8\u90e8\u7f72\u7684\u662f\u4e00\u4e2a\u9ad8\u53ef\u7528\u96c6\u7fa4\uff0c\u8fd9\u4e2a\u547d\u4ee4\u9700\u8981\u5728\u6240\u6709\u63a7\u5236\u8282\u70b9\u4e0a\u6267\u884c\u3002
                • \u6b64\u547d\u4ee4\u7528 CA\uff08\u6216\u8005 front-proxy-CA \uff09\u8bc1\u4e66\u548c\u5b58\u50a8\u5728 /etc/kubernetes/pki \u4e2d\u7684\u5bc6\u94a5\u6267\u884c\u66f4\u65b0\u3002
                "},{"location":"admin/kpanda/clusters/k8s-cert.html#_3","title":"\u91cd\u542f\u670d\u52a1","text":"

                \u6267\u884c\u66f4\u65b0\u64cd\u4f5c\u4e4b\u540e\uff0c\u4f60\u9700\u8981\u91cd\u542f\u63a7\u5236\u9762 Pod\u3002\u56e0\u4e3a\u52a8\u6001\u8bc1\u4e66\u91cd\u8f7d\u76ee\u524d\u8fd8\u4e0d\u88ab\u6240\u6709\u7ec4\u4ef6\u548c\u8bc1\u4e66\u652f\u6301\uff0c\u6240\u6709\u8fd9\u9879\u64cd\u4f5c\u662f\u5fc5\u987b\u7684\u3002

                \u9759\u6001 Pod \u662f\u88ab\u672c\u5730 kubelet \u800c\u4e0d\u662f API \u670d\u52a1\u5668\u7ba1\u7406\uff0c\u6240\u4ee5 kubectl \u4e0d\u80fd\u7528\u6765\u5220\u9664\u6216\u91cd\u542f\u4ed6\u4eec\u3002

                \u8981\u91cd\u542f\u9759\u6001 Pod\uff0c\u4f60\u53ef\u4ee5\u4e34\u65f6\u5c06\u6e05\u5355\u6587\u4ef6\u4ece /etc/kubernetes/manifests/ \u79fb\u9664\u5e76\u7b49\u5f85 20 \u79d2\u3002 \u53c2\u8003 KubeletConfiguration \u7ed3\u6784\u4e2d\u7684 fileCheckFrequency \u503c\u3002

                \u5982\u679c Pod \u4e0d\u5728\u6e05\u5355\u76ee\u5f55\u91cc\uff0ckubelet \u5c06\u4f1a\u7ec8\u6b62\u5b83\u3002 \u5728\u53e6\u4e00\u4e2a fileCheckFrequency \u5468\u671f\u4e4b\u540e\u4f60\u53ef\u4ee5\u5c06\u6587\u4ef6\u79fb\u56de\u53bb\uff0ckubelet \u53ef\u4ee5\u5b8c\u6210 Pod \u7684\u91cd\u5efa\uff0c\u800c\u7ec4\u4ef6\u7684\u8bc1\u4e66\u66f4\u65b0\u64cd\u4f5c\u4e5f\u5f97\u4ee5\u5b8c\u6210\u3002

                mv ./manifests/* ./temp/\nmv ./temp/* ./manifests/\n

                Note

                \u5982\u679c\u5bb9\u5668\u670d\u52a1\u4f7f\u7528\u7684\u662f Docker\uff0c\u4e3a\u4e86\u8ba9\u8bc1\u4e66\u751f\u6548\uff0c\u53ef\u4ee5\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u5bf9\u6d89\u53ca\u5230\u8bc1\u4e66\u4f7f\u7528\u7684\u51e0\u4e2a\u670d\u52a1\u8fdb\u884c\u91cd\u542f\uff1a

                docker ps | grep -E 'k8s_kube-apiserver|k8s_kube-controller-manager|k8s_kube-scheduler|k8s_etcd_etcd' | awk -F ' ' '{print $1}' | xargs docker restart\n
                "},{"location":"admin/kpanda/clusters/k8s-cert.html#kubeconfig","title":"\u66f4\u65b0 KubeConfig","text":"

                \u6784\u5efa\u96c6\u7fa4\u65f6\u901a\u5e38\u4f1a\u5c06 admin.conf \u8bc1\u4e66\u590d\u5236\u5230 $HOME/.kube/config \u4e2d\uff0c\u4e3a\u4e86\u5728\u66f4\u65b0 admin.conf \u540e\u66f4\u65b0 $HOME/.kube/config \u7684\u5185\u5bb9\uff0c \u5fc5\u987b\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\nsudo chown $(id -u):$(id -g) $HOME/.kube/config\n
                "},{"location":"admin/kpanda/clusters/k8s-cert.html#kubelet","title":"\u4e3a kubelet \u914d\u7f6e\u8bc1\u4e66\u8f6e\u6362","text":"

                \u5b8c\u6210\u4ee5\u4e0a\u64cd\u4f5c\u540e\uff0c\u57fa\u672c\u5b8c\u6210\u4e86\u96c6\u7fa4\u6240\u6709\u8bc1\u4e66\u7684\u66f4\u65b0\uff0c\u4f46\u4e0d\u5305\u62ec kubelet\u3002

                \u56e0\u4e3a kubernetes \u5305\u542b\u7279\u6027 kubelet \u8bc1\u4e66\u8f6e\u6362\uff0c \u5728\u5f53\u524d\u8bc1\u4e66\u5373\u5c06\u8fc7\u671f\u65f6\uff0c \u5c06\u81ea\u52a8\u751f\u6210\u65b0\u7684\u79d8\u94a5\uff0c\u5e76\u4ece Kubernetes API \u7533\u8bf7\u65b0\u7684\u8bc1\u4e66\u3002 \u4e00\u65e6\u65b0\u7684\u8bc1\u4e66\u53ef\u7528\uff0c\u5b83\u5c06\u88ab\u7528\u4e8e\u4e0e Kubernetes API \u95f4\u7684\u8fde\u63a5\u8ba4\u8bc1\u3002

                Note

                \u6b64\u7279\u6027\u9002\u7528\u4e8e Kubernetes 1.8.0 \u6216\u66f4\u9ad8\u7684\u7248\u672c\u3002

                \u542f\u7528\u5ba2\u6237\u7aef\u8bc1\u4e66\u8f6e\u6362\uff0c\u914d\u7f6e\u53c2\u6570\u5982\u4e0b\uff1a

                • kubelet \u8fdb\u7a0b\u63a5\u6536 --rotate-certificates \u53c2\u6570\uff0c\u8be5\u53c2\u6570\u51b3\u5b9a kubelet \u5728\u5f53\u524d\u4f7f\u7528\u7684 \u8bc1\u4e66\u5373\u5c06\u5230\u671f\u65f6\uff0c\u662f\u5426\u4f1a\u81ea\u52a8\u7533\u8bf7\u65b0\u7684\u8bc1\u4e66\u3002

                • kube-controller-manager \u8fdb\u7a0b\u63a5\u6536 --cluster-signing-duration \u53c2\u6570 \uff08\u5728 1.19 \u7248\u672c\u4e4b\u524d\u4e3a --experimental-cluster-signing-duration\uff09\uff0c\u7528\u6765\u63a7\u5236\u7b7e\u53d1\u8bc1\u4e66\u7684\u6709\u6548\u671f\u9650\u3002

                \u66f4\u591a\u8be6\u60c5\u53c2\u8003\u4e3a kubelet \u914d\u7f6e\u8bc1\u4e66\u8f6e\u6362\u3002

                "},{"location":"admin/kpanda/clusters/k8s-cert.html#_4","title":"\u81ea\u52a8\u66f4\u65b0\u8bc1\u4e66","text":"

                \u4e3a\u4e86\u66f4\u9ad8\u6548\u4fbf\u6377\u5904\u7406\u5df2\u8fc7\u671f\u6216\u8005\u5373\u5c06\u8fc7\u671f\u7684 kubernetes \u96c6\u7fa4\u8bc1\u4e66\uff0c\u53ef\u53c2\u8003 k8s \u7248\u672c\u96c6\u7fa4\u8bc1\u4e66\u66f4\u65b0\u3002

                "},{"location":"admin/kpanda/clusters/runtime.html","title":"\u5982\u4f55\u9009\u62e9\u5bb9\u5668\u8fd0\u884c\u65f6","text":"

                \u5bb9\u5668\u8fd0\u884c\u65f6\u662f kubernetes \u4e2d\u5bf9\u5bb9\u5668\u548c\u5bb9\u5668\u955c\u50cf\u751f\u547d\u5468\u671f\u8fdb\u884c\u7ba1\u7406\u7684\u91cd\u8981\u7ec4\u4ef6\u3002 kubernetes \u5728 1.19 \u7248\u672c\u4e2d\u5c06 containerd \u8bbe\u4e3a\u9ed8\u8ba4\u7684\u5bb9\u5668\u8fd0\u884c\u65f6\uff0c\u5e76\u5728 1.24 \u7248\u672c\u4e2d\u79fb\u9664\u4e86 Dockershim \u7ec4\u4ef6\u7684\u652f\u6301\u3002

                \u56e0\u6b64\u76f8\u8f83\u4e8e Docker \u8fd0\u884c\u65f6\uff0c\u6211\u4eec\u66f4\u52a0 \u63a8\u8350\u60a8\u4f7f\u7528\u8f7b\u91cf\u7684 containerd \u4f5c\u4e3a\u60a8\u7684\u5bb9\u5668\u8fd0\u884c\u65f6\uff0c\u56e0\u4e3a\u8fd9\u5df2\u7ecf\u6210\u4e3a\u5f53\u524d\u4e3b\u6d41\u7684\u8fd0\u884c\u65f6\u9009\u62e9\u3002

                \u9664\u6b64\u4e4b\u5916\uff0c\u4e00\u4e9b\u64cd\u4f5c\u7cfb\u7edf\u53d1\u884c\u5382\u5546\u5bf9 Docker \u8fd0\u884c\u65f6\u7684\u517c\u5bb9\u4e5f\u4e0d\u591f\u53cb\u597d\uff0c\u4e0d\u540c\u64cd\u4f5c\u7cfb\u7edf\u5bf9\u8fd0\u884c\u65f6\u7684\u652f\u6301\u5982\u4e0b\u8868\uff1a

                "},{"location":"admin/kpanda/clusters/runtime.html#_2","title":"\u4e0d\u540c\u64cd\u4f5c\u7cfb\u7edf\u548c\u63a8\u8350\u7684\u8fd0\u884c\u65f6\u7248\u672c\u5bf9\u5e94\u5173\u7cfb","text":"\u64cd\u4f5c\u7cfb\u7edf \u63a8\u8350\u7684 containerd \u7248\u672c \u63a8\u8350\u7684 Docker \u7248\u672c CentOS 1.7.5 20.10 RedHatOS 1.7.5 20.10 KylinOS 1.7.5 19.03\uff08\u4ec5 ARM \u67b6\u6784\u652f\u6301 \uff0c\u5728 x86 \u67b6\u6784\u4e0b\u4e0d\u652f\u6301\u4f7f\u7528 Docker \u4f5c\u4e3a\u8fd0\u884c\u65f6\uff09

                \u66f4\u591a\u652f\u6301\u7684\u8fd0\u884c\u65f6\u7248\u672c\u4fe1\u606f\uff0c\u8bf7\u53c2\u8003 RedHatOS \u652f\u6301\u7684\u8fd0\u884c\u65f6\u7248\u672c \u548c KylinOS \u652f\u6301\u7684\u8fd0\u884c\u65f6\u7248\u672c

                Note

                \u5728\u79bb\u7ebf\u5b89\u88c5\u6a21\u5f0f\u4e0b\uff0c\u9700\u8981\u63d0\u524d\u51c6\u5907\u76f8\u5173\u64cd\u4f5c\u7cfb\u7edf\u7684\u8fd0\u884c\u65f6\u79bb\u7ebf\u5305\u3002

                "},{"location":"admin/kpanda/clusters/upgrade-cluster.html","title":"\u96c6\u7fa4\u5347\u7ea7","text":"

                Kubernetes \u793e\u533a\u6bcf\u4e2a\u5b63\u5ea6\u90fd\u4f1a\u53d1\u5e03\u4e00\u6b21\u5c0f\u7248\u672c\uff0c\u6bcf\u4e2a\u7248\u672c\u7684\u7ef4\u62a4\u5468\u671f\u5927\u6982\u53ea\u6709 9 \u4e2a\u6708\u3002 \u7248\u672c\u505c\u6b62\u7ef4\u62a4\u540e\u5c31\u4e0d\u4f1a\u518d\u66f4\u65b0\u4e00\u4e9b\u91cd\u5927\u6f0f\u6d1e\u6216\u5b89\u5168\u6f0f\u6d1e\u3002\u624b\u52a8\u5347\u7ea7\u96c6\u7fa4\u64cd\u4f5c\u8f83\u4e3a\u7e41\u7410\uff0c\u7ed9\u7ba1\u7406\u4eba\u5458\u5e26\u6765\u4e86\u6781\u5927\u7684\u5de5\u4f5c\u8d1f\u62c5\u3002

                \u672c\u8282\u5c06\u4ecb\u7ecd\u5982\u4f55\u5728\u901a\u8fc7 Web UI \u754c\u9762\u4e00\u952e\u5f0f\u5728\u7ebf\u5347\u7ea7\u5de5\u4f5c\u96c6\u7fa4 Kubernetes \u7248\u672c\uff0c \u5982\u9700\u79bb\u7ebf\u5347\u7ea7\u5de5\u4f5c\u96c6\u7fa4\u7684 kubernetes \u7248\u672c\uff0c\u8bf7\u53c2\u9605\u5de5\u4f5c\u96c6\u7fa4\u79bb\u7ebf\u5347\u7ea7\u6307\u5357\u8fdb\u884c\u5347\u7ea7\u3002

                Danger

                \u7248\u672c\u5347\u7ea7\u540e\u5c06\u65e0\u6cd5\u56de\u9000\u5230\u4e4b\u524d\u7684\u7248\u672c\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                Note

                • Kubernetes \u7248\u672c\u4ee5 x.y.z \u8868\u793a\uff0c\u5176\u4e2d x \u662f\u4e3b\u8981\u7248\u672c\uff0c y \u662f\u6b21\u8981\u7248\u672c\uff0c z \u662f\u8865\u4e01\u7248\u672c\u3002
                • \u4e0d\u5141\u8bb8\u8de8\u6b21\u8981\u7248\u672c\u5bf9\u96c6\u7fa4\u8fdb\u884c\u5347\u7ea7\uff0c\u4f8b\u5982\u4e0d\u80fd\u4ece 1.23 \u76f4\u63a5\u5347\u7ea7\u5230 1.25\u3002
                • \u63a5\u5165\u96c6\u7fa4 \u4e0d\u652f\u6301\u7248\u672c\u5347\u7ea7\u3002\u5982\u679c\u5de6\u4fa7\u5bfc\u822a\u680f\u6ca1\u6709 \u96c6\u7fa4\u5347\u7ea7 \uff0c\u8bf7\u68c0\u67e5\u8be5\u96c6\u7fa4\u662f\u5426\u4e3a \u63a5\u5165\u96c6\u7fa4 \u3002
                • \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u53ea\u80fd\u901a\u8fc7\u7ec8\u7aef\u8fdb\u884c\u5347\u7ea7\u3002
                • \u5347\u7ea7\u5de5\u4f5c\u96c6\u7fa4\u65f6\uff0c\u8be5\u5de5\u4f5c\u96c6\u7fa4\u7684\u7ba1\u7406\u96c6\u7fa4\u5e94\u8be5\u5df2\u7ecf\u63a5\u5165\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\uff0c\u5e76\u4e14\u5904\u4e8e\u6b63\u5e38\u8fd0\u884c\u4e2d\u3002
                • \u5982\u679c\u9700\u8981\u4fee\u6539\u96c6\u7fa4\u53c2\u6570\uff0c\u53ef\u4ee5\u901a\u8fc7\u5347\u7ea7\u76f8\u540c\u7248\u672c\u7684\u65b9\u5f0f\u5b9e\u73b0\uff0c\u5177\u4f53\u64cd\u4f5c\u53c2\u8003\u4e0b\u6587\u3002
                1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                2. \u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u96c6\u7fa4\u8fd0\u7ef4 -> \u96c6\u7fa4\u5347\u7ea7 \uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u70b9\u51fb \u7248\u672c\u5347\u7ea7 \u3002

                3. \u9009\u62e9\u53ef\u5347\u7ea7\u7684\u7248\u672c\uff0c\u8f93\u5165\u96c6\u7fa4\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\u3002

                  Note

                  \u5982\u679c\u60a8\u662f\u60f3\u901a\u8fc7\u5347\u7ea7\u65b9\u5f0f\u6765\u4fee\u6539\u96c6\u7fa4\u53c2\u6570\uff0c\u8bf7\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff1a

                  1. \u627e\u5230\u96c6\u7fa4\u5bf9\u5e94\u7684 ConfigMap\uff0c\u60a8\u53ef\u4ee5\u767b\u5f55\u63a7\u5236\u8282\u70b9\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u627e\u5230 varsConfRef \u4e2d\u7684 ConfigMap \u540d\u79f0\u3002

                    kubectl get cluster.kubean.io <clustername> -o yaml\n
                  2. \u6839\u636e\u9700\u8981\uff0c\u4fee\u6539 ConfigMap \u4e2d\u7684\u53c2\u6570\u4fe1\u606f\u3002

                  3. \u5728\u6b64\u5904\u9009\u62e9\u76f8\u540c\u7248\u672c\u8fdb\u884c\u5347\u7ea7\u64cd\u4f5c\uff0c\u5347\u7ea7\u5b8c\u6210\u5373\u53ef\u6210\u529f\u66f4\u65b0\u5bf9\u5e94\u7684\u96c6\u7fa4\u53c2\u6570\u3002

                4. \u70b9\u51fb \u786e\u5b9a \u540e\uff0c\u53ef\u4ee5\u770b\u5230\u96c6\u7fa4\u7684\u5347\u7ea7\u8fdb\u5ea6\u3002

                5. \u96c6\u7fa4\u5347\u7ea7\u9884\u8ba1\u9700\u8981 30 \u5206\u949f\uff0c\u53ef\u4ee5\u70b9\u51fb \u5b9e\u65f6\u65e5\u5fd7 \u6309\u94ae\u67e5\u770b\u96c6\u7fa4\u5347\u7ea7\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/configmap-hot-loading.html","title":"configmap/secret \u70ed\u52a0\u8f7d","text":"

                configmap/secret \u70ed\u52a0\u8f7d\u662f\u6307\u5c06 configmap/secret \u4f5c\u4e3a\u6570\u636e\u5377\u6302\u8f7d\u5728\u5bb9\u5668\u4e2d\u6302\u8f7d\u65f6\uff0c\u5f53\u914d\u7f6e\u53d1\u751f\u6539\u53d8\u65f6\uff0c\u5bb9\u5668\u5c06\u81ea\u52a8\u8bfb\u53d6 configmap/secret \u66f4\u65b0\u540e\u7684\u914d\u7f6e\uff0c\u800c\u65e0\u9700\u91cd\u542f Pod\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/configmap-hot-loading.html#_1","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                1. \u53c2\u8003\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d - \u5bb9\u5668\u914d\u7f6e\uff0c\u914d\u7f6e\u5bb9\u5668\u6570\u636e\u5b58\u50a8\uff0c\u9009\u62e9 Configmap \u3001 Configmap Key \u3001 Secret \u3001 Secret Key \u4f5c\u4e3a\u6570\u636e\u5377\u6302\u8f7d\u81f3\u5bb9\u5668\u3002

                  Note

                  \u4f7f\u7528\u5b50\u8def\u5f84\uff08SubPath\uff09\u65b9\u5f0f\u6302\u8f7d\u7684\u914d\u7f6e\u6587\u4ef6\u4e0d\u652f\u6301\u70ed\u52a0\u8f7d\u3002

                2. \u8fdb\u5165\u3010\u914d\u7f6e\u4e0e\u5bc6\u94a5\u3011\u9875\u9762\uff0c\u8fdb\u5165\u914d\u7f6e\u9879\u8be6\u60c5\u9875\u9762\uff0c\u5728\u3010\u5173\u8054\u8d44\u6e90\u3011\u4e2d\u627e\u5230\u5bf9\u5e94\u7684 container \u8d44\u6e90\uff0c\u70b9\u51fb \u7acb\u5373\u52a0\u8f7d \u6309\u94ae\uff0c\u8fdb\u5165\u914d\u7f6e\u70ed\u52a0\u8f7d\u9875\u9762\u3002

                  Note

                  \u5982\u679c\u60a8\u7684\u5e94\u7528\u652f\u6301\u81ea\u52a8\u8bfb\u53d6 configmap/secret \u66f4\u65b0\u540e\u7684\u914d\u7f6e\uff0c\u5219\u65e0\u9700\u624b\u52a8\u6267\u884c\u70ed\u52a0\u8f7d\u64cd\u4f5c\u3002

                3. \u5728\u70ed\u52a0\u8f7d\u914d\u7f6e\u5f39\u7a97\u4e2d\uff0c\u8f93\u5165\u8fdb\u5165\u5bb9\u5668\u5185\u7684 \u6267\u884c\u547d\u4ee4 \u5e76\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u4ee5\u91cd\u8f7d\u914d\u7f6e\u3002\u4f8b\u5982\uff0c\u5728 nginx \u5bb9\u5668\u4e2d\uff0c\u4ee5 root \u7528\u6237\u6743\u9650\uff0c\u6267\u884c nginx -s reload \u547d\u4ee4\u6765\u91cd\u8f7d\u914d\u7f6e\u3002

                4. \u5728\u754c\u9762\u5f39\u51fa\u7684 web \u7ec8\u7aef\u4e2d\u67e5\u770b\u5e94\u7528\u91cd\u8f7d\u60c5\u51b5\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/create-configmap.html","title":"\u521b\u5efa\u914d\u7f6e\u9879","text":"

                \u914d\u7f6e\u9879\uff08ConfigMap\uff09\u4ee5\u952e\u503c\u5bf9\u7684\u5f62\u5f0f\u5b58\u50a8\u975e\u673a\u5bc6\u6027\u6570\u636e\uff0c\u5b9e\u73b0\u914d\u7f6e\u6570\u636e\u548c\u5e94\u7528\u4ee3\u7801\u76f8\u4e92\u89e3\u8026\u7684\u6548\u679c\u3002\u914d\u7f6e\u9879\u53ef\u7528\u4f5c\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u3001\u547d\u4ee4\u884c\u53c2\u6570\u6216\u8005\u5b58\u50a8\u5377\u4e2d\u7684\u914d\u7f6e\u6587\u4ef6\u3002

                Note

                • \u5728\u914d\u7f6e\u9879\u4e2d\u4fdd\u5b58\u7684\u6570\u636e\u4e0d\u53ef\u8d85\u8fc7 1 MiB\u3002\u5982\u679c\u9700\u8981\u5b58\u50a8\u4f53\u79ef\u66f4\u5927\u7684\u6570\u636e\uff0c\u5efa\u8bae\u6302\u8f7d\u5b58\u50a8\u5377\u6216\u8005\u4f7f\u7528\u72ec\u7acb\u7684\u6570\u636e\u5e93\u6216\u8005\u6587\u4ef6\u670d\u52a1\u3002

                • \u914d\u7f6e\u9879\u4e0d\u63d0\u4f9b\u4fdd\u5bc6\u6216\u8005\u52a0\u5bc6\u529f\u80fd\u3002\u5982\u679c\u8981\u5b58\u50a8\u52a0\u5bc6\u6570\u636e\uff0c\u5efa\u8bae\u4f7f\u7528\u5bc6\u94a5\uff0c\u6216\u8005\u5176\u4ed6\u7b2c\u4e09\u65b9\u5de5\u5177\u6765\u4fdd\u8bc1\u6570\u636e\u7684\u79c1\u5bc6\u6027\u3002

                \u652f\u6301\u4e24\u79cd\u521b\u5efa\u65b9\u5f0f\uff1a

                • \u56fe\u5f62\u5316\u8868\u5355\u521b\u5efa
                • YAML \u521b\u5efa
                "},{"location":"admin/kpanda/configmaps-secrets/create-configmap.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762

                • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a NS Editor \u89d2\u8272 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/create-configmap.html#_3","title":"\u56fe\u5f62\u5316\u8868\u5355\u521b\u5efa","text":"
                1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u914d\u7f6e\u9879 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u521b\u5efa\u914d\u7f6e\u9879 \u6309\u94ae\u3002

                3. \u5728 \u521b\u5efa\u914d\u7f6e\u9879 \u9875\u9762\u4e2d\u586b\u5199\u914d\u7f6e\u4fe1\u606f\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                  Note

                  \u70b9\u51fb \u4e0a\u4f20\u6587\u4ef6 \u53ef\u4ee5\u4ece\u672c\u5730\u5bfc\u5165\u5df2\u6709\u7684\u6587\u4ef6\uff0c\u5feb\u901f\u521b\u5efa\u914d\u7f6e\u9879\u3002

                4. \u521b\u5efa\u5b8c\u6210\u540e\u5728\u914d\u7f6e\u9879\u53f3\u4fa7\u70b9\u51fb\u66f4\u591a\u53ef\u4ee5\uff0c\u53ef\u4ee5\u7f16\u8f91 YAML\u3001\u66f4\u65b0\u3001\u5bfc\u51fa\u3001\u5220\u9664\u7b49\u64cd\u4f5c\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/create-configmap.html#yaml","title":"YAML \u521b\u5efa","text":"
                1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u914d\u7f6e\u9879 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 YAML \u521b\u5efa \u6309\u94ae\u3002

                3. \u586b\u5199\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684\u914d\u7f6e\u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u3002

                  Note

                  • \u70b9\u51fb \u5bfc\u5165 \u53ef\u4ee5\u4ece\u672c\u5730\u5bfc\u5165\u5df2\u6709\u7684\u6587\u4ef6\uff0c\u5feb\u901f\u521b\u5efa\u914d\u7f6e\u9879\u3002
                  • \u586b\u5199\u6570\u636e\u4e4b\u540e\u70b9\u51fb \u4e0b\u8f7d \u53ef\u4ee5\u5c06\u914d\u7f6e\u6587\u4ef6\u4fdd\u5b58\u5728\u672c\u5730\u3002

                4. \u521b\u5efa\u5b8c\u6210\u540e\u5728\u914d\u7f6e\u9879\u53f3\u4fa7\u70b9\u51fb\u66f4\u591a\u53ef\u4ee5\uff0c\u53ef\u4ee5\u7f16\u8f91 YAML\u3001\u66f4\u65b0\u3001\u5bfc\u51fa\u3001\u5220\u9664\u7b49\u64cd\u4f5c\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/create-configmap.html#yaml_1","title":"\u914d\u7f6e\u9879 YAML \u793a\u4f8b","text":"
                ```yaml\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: kube-root-ca.crt\n  namespace: default\n  annotations:\ndata:\n  version: '1.0'\n```\n

                \u4e0b\u4e00\u6b65\uff1a\u4f7f\u7528\u914d\u7f6e\u9879

                "},{"location":"admin/kpanda/configmaps-secrets/create-secret.html","title":"\u521b\u5efa\u5bc6\u94a5","text":"

                \u5bc6\u94a5\u662f\u4e00\u79cd\u7528\u4e8e\u5b58\u50a8\u548c\u7ba1\u7406\u5bc6\u7801\u3001OAuth \u4ee4\u724c\u3001SSH\u3001TLS \u51ed\u636e\u7b49\u654f\u611f\u4fe1\u606f\u7684\u8d44\u6e90\u5bf9\u8c61\u3002\u4f7f\u7528\u5bc6\u94a5\u610f\u5473\u7740\u60a8\u4e0d\u9700\u8981\u5728\u5e94\u7528\u7a0b\u5e8f\u4ee3\u7801\u4e2d\u5305\u542b\u654f\u611f\u7684\u673a\u5bc6\u6570\u636e\u3002

                \u5bc6\u94a5\u4f7f\u7528\u573a\u666f\uff1a

                • \u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u4f7f\u7528\uff0c\u63d0\u4f9b\u5bb9\u5668\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u6240\u9700\u7684\u4e00\u4e9b\u5fc5\u8981\u4fe1\u606f\u3002
                • \u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a Pod \u7684\u6570\u636e\u5377\u3002
                • \u5728 kubelet \u62c9\u53d6\u5bb9\u5668\u955c\u50cf\u65f6\u4f5c\u4e3a\u955c\u50cf\u4ed3\u5e93\u7684\u8eab\u4efd\u8ba4\u8bc1\u51ed\u8bc1\u3002

                \u652f\u6301\u4e24\u79cd\u521b\u5efa\u65b9\u5f0f\uff1a

                • \u56fe\u5f62\u5316\u8868\u5355\u521b\u5efa
                • YAML \u521b\u5efa
                "},{"location":"admin/kpanda/configmaps-secrets/create-secret.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762

                • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a NS Editor \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/create-secret.html#_3","title":"\u56fe\u5f62\u5316\u8868\u5355\u521b\u5efa","text":"
                1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u5bc6\u94a5 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u521b\u5efa\u5bc6\u94a5 \u6309\u94ae\u3002

                3. \u5728 \u521b\u5efa\u5bc6\u94a5 \u9875\u9762\u4e2d\u586b\u5199\u914d\u7f6e\u4fe1\u606f\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                  \u586b\u5199\u914d\u7f6e\u65f6\u9700\u8981\u6ce8\u610f\uff1a

                  • \u5bc6\u94a5\u7684\u540d\u79f0\u5728\u540c\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u4e2d\u5fc5\u987b\u5177\u6709\u552f\u4e00\u6027
                  • \u5bc6\u94a5\u7c7b\u578b\uff1a
                    • \u9ed8\u8ba4\uff08Opaque\uff09\uff1aKubernetes \u9ed8\u8ba4\u7684\u5bc6\u94a5\u7c7b\u578b\uff0c\u652f\u6301\u7528\u6237\u5b9a\u4e49\u7684\u4efb\u610f\u6570\u636e\u3002
                    • TLS (kubernetes.io/tls)\uff1a\u7528\u4e8e TLS \u5ba2\u6237\u7aef\u6216\u8005\u670d\u52a1\u5668\u7aef\u6570\u636e\u8bbf\u95ee\u7684\u51ed\u8bc1\u3002
                    • \u955c\u50cf\u4ed3\u5e93\u4fe1\u606f (kubernetes.io/dockerconfigjson)\uff1a\u7528\u4e8e\u955c\u50cf\u4ed3\u5e93\u8bbf\u95ee\u7684\u51ed\u8bc1\u3002
                    • \u7528\u6237\u540d\u548c\u5bc6\u7801\uff08kubernetes.io/basic-auth\uff09\uff1a\u7528\u4e8e\u57fa\u672c\u8eab\u4efd\u8ba4\u8bc1\u7684\u51ed\u8bc1\u3002
                    • \u81ea\u5b9a\u4e49\uff1a\u7528\u6237\u6839\u636e\u4e1a\u52a1\u9700\u8981\u81ea\u5b9a\u4e49\u7684\u7c7b\u578b\u3002
                  • \u5bc6\u94a5\u6570\u636e\uff1a\u5bc6\u94a5\u6240\u5b58\u50a8\u7684\u6570\u636e\uff0c\u4e0d\u540c\u6570\u636e\u9700\u8981\u586b\u5199\u7684\u53c2\u6570\u6709\u6240\u4e0d\u540c
                    • \u5f53\u5bc6\u94a5\u7c7b\u578b\u4e3a\u9ed8\u8ba4\uff08Opaque\uff09/\u81ea\u5b9a\u4e49\uff1a\u53ef\u4ee5\u586b\u5165\u591a\u4e2a\u952e\u503c\u5bf9\u6570\u636e\u3002
                    • \u5f53\u5bc6\u94a5\u7c7b\u578b\u4e3a TLS (kubernetes.io/tls)\uff1a\u9700\u8981\u586b\u5165\u8bc1\u4e66\u51ed\u8bc1\u548c\u79c1\u94a5\u6570\u636e\u3002\u8bc1\u4e66\u662f\u81ea\u7b7e\u540d\u6216 CA \u7b7e\u540d\u8fc7\u7684\u51ed\u636e\uff0c\u7528\u6765\u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\u3002\u8bc1\u4e66\u8bf7\u6c42\u662f\u5bf9\u7b7e\u540d\u7684\u8bf7\u6c42\uff0c\u9700\u8981\u4f7f\u7528\u79c1\u94a5\u8fdb\u884c\u7b7e\u540d\u3002
                    • \u5f53\u5bc6\u94a5\u7c7b\u578b\u4e3a\u955c\u50cf\u4ed3\u5e93\u4fe1\u606f (kubernetes.io/dockerconfigjson)\uff1a\u9700\u8981\u586b\u5165\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u7684\u8d26\u53f7\u548c\u5bc6\u7801\u3002
                    • \u5f53\u5bc6\u94a5\u7c7b\u578b\u4e3a\u7528\u6237\u540d\u548c\u5bc6\u7801\uff08kubernetes.io/basic-auth\uff09\uff1a\u9700\u8981\u6307\u5b9a\u7528\u6237\u540d\u548c\u5bc6\u7801\u3002
                "},{"location":"admin/kpanda/configmaps-secrets/create-secret.html#yaml","title":"YAML \u521b\u5efa","text":"
                1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u5bc6\u94a5 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 YAML \u521b\u5efa \u6309\u94ae\u3002

                3. \u5728 YAML \u521b\u5efa \u9875\u9762\u4e2d\u586b\u5199 YAML \u914d\u7f6e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                  \u652f\u6301\u4ece\u672c\u5730\u5bfc\u5165 YAML \u6587\u4ef6\u6216\u5c06\u586b\u5199\u597d\u7684\u6587\u4ef6\u4e0b\u8f7d\u4fdd\u5b58\u5230\u672c\u5730\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/create-secret.html#yaml_1","title":"\u5bc6\u94a5 YAML \u793a\u4f8b","text":"
                ```yaml\napiVersion: v1\nkind: Secret\nmetadata:\n  name: secretdemo\ntype: Opaque\ndata:\n  username: ******\n  password: ******\n```\n

                \u4e0b\u4e00\u6b65\uff1a\u4f7f\u7528\u5bc6\u94a5

                "},{"location":"admin/kpanda/configmaps-secrets/use-configmap.html","title":"\u4f7f\u7528\u914d\u7f6e\u9879","text":"

                \u914d\u7f6e\u9879\uff08ConfigMap\uff09\u662f Kubernetes \u7684\u4e00\u79cd API \u5bf9\u8c61\uff0c\u7528\u6765\u5c06\u975e\u673a\u5bc6\u6027\u7684\u6570\u636e\u4fdd\u5b58\u5230\u952e\u503c\u5bf9\u4e2d\uff0c\u53ef\u4ee5\u5b58\u50a8\u5176\u4ed6\u5bf9\u8c61\u6240\u9700\u8981\u4f7f\u7528\u7684\u914d\u7f6e\u3002 \u4f7f\u7528\u65f6\uff0c \u5bb9\u5668\u53ef\u4ee5\u5c06\u5176\u7528\u4f5c\u73af\u5883\u53d8\u91cf\u3001\u547d\u4ee4\u884c\u53c2\u6570\u6216\u8005\u5b58\u50a8\u5377\u4e2d\u7684\u914d\u7f6e\u6587\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528\u914d\u7f6e\u9879\uff0c\u80fd\u591f\u5c06\u914d\u7f6e\u6570\u636e\u548c\u5e94\u7528\u7a0b\u5e8f\u4ee3\u7801\u5206\u5f00\uff0c\u4e3a\u5e94\u7528\u914d\u7f6e\u7684\u4fee\u6539\u63d0\u4f9b\u66f4\u52a0\u7075\u6d3b\u7684\u9014\u5f84\u3002

                Note

                \u914d\u7f6e\u9879\u5e76\u4e0d\u63d0\u4f9b\u4fdd\u5bc6\u6216\u8005\u52a0\u5bc6\u529f\u80fd\u3002\u5982\u679c\u8981\u5b58\u50a8\u7684\u6570\u636e\u662f\u673a\u5bc6\u7684\uff0c\u8bf7\u4f7f\u7528\u5bc6\u94a5\uff0c\u6216\u8005\u4f7f\u7528\u5176\u4ed6\u7b2c\u4e09\u65b9\u5de5\u5177\u6765\u4fdd\u8bc1\u6570\u636e\u7684\u79c1\u5bc6\u6027\uff0c\u800c\u4e0d\u662f\u7528\u914d\u7f6e\u9879\u3002 \u6b64\u5916\u5728\u5bb9\u5668\u91cc\u4f7f\u7528\u914d\u7f6e\u9879\u65f6\uff0c\u5bb9\u5668\u548c\u914d\u7f6e\u9879\u5fc5\u987b\u5904\u4e8e\u540c\u4e00\u96c6\u7fa4\u7684\u547d\u540d\u7a7a\u95f4\u4e2d\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/use-configmap.html#_2","title":"\u4f7f\u7528\u573a\u666f","text":"

                \u60a8\u53ef\u4ee5\u5728 Pod \u4e2d\u4f7f\u7528\u914d\u7f6e\u9879\uff0c\u6709\u591a\u79cd\u4f7f\u7528\u573a\u666f\uff0c\u4e3b\u8981\u5305\u62ec\uff1a

                • \u4f7f\u7528\u914d\u7f6e\u9879\u8bbe\u7f6e\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf

                • \u4f7f\u7528\u914d\u7f6e\u9879\u8bbe\u7f6e\u5bb9\u5668\u7684\u547d\u4ee4\u884c\u53c2\u6570

                • \u4f7f\u7528\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u7684\u6570\u636e\u5377

                "},{"location":"admin/kpanda/configmaps-secrets/use-configmap.html#_3","title":"\u8bbe\u7f6e\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf","text":"

                \u60a8\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u5316\u754c\u9762\u6216\u8005\u7ec8\u7aef\u547d\u4ee4\u884c\u6765\u4f7f\u7528\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u3002

                Note

                \u914d\u7f6e\u9879\u5bfc\u5165\u662f\u5c06\u914d\u7f6e\u9879\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\uff1b\u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165\u662f\u5c06\u914d\u7f6e\u9879\u4e2d\u67d0\u4e00\u53c2\u6570\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/use-configmap.html#_4","title":"\u56fe\u5f62\u5316\u754c\u9762\u64cd\u4f5c","text":"

                \u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u53ef\u4ee5\u5728 \u73af\u5883\u53d8\u91cf \u754c\u9762\u901a\u8fc7\u9009\u62e9 \u914d\u7f6e\u9879\u5bfc\u5165 \u6216 \u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165 \u4e3a\u5bb9\u5668\u8bbe\u7f6e\u73af\u5883\u53d8\u91cf\u3002

                1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u4e2d\uff0c\u5728 \u5bb9\u5668\u914d\u7f6e \u8fd9\u4e00\u6b65\u4e2d\uff0c\u9009\u62e9 \u73af\u5883\u53d8\u91cf \u914d\u7f6e\uff0c\u70b9\u51fb \u6dfb\u52a0\u73af\u5883\u53d8\u91cf \u6309\u94ae\u3002

                2. \u5728\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u5904\u9009\u62e9 \u914d\u7f6e\u9879\u5bfc\u5165 \u6216 \u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165 \u3002

                  • \u5f53\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u9009\u62e9\u4e3a \u914d\u7f6e\u9879\u5bfc\u5165 \u65f6\uff0c\u4f9d\u6b21\u8f93\u5165 \u53d8\u91cf\u540d \u3001 \u524d\u7f00 \u540d\u79f0\u3001 \u914d\u7f6e\u9879 \u7684\u540d\u79f0\u3002

                  • \u5f53\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u9009\u62e9\u4e3a \u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165 \u65f6\uff0c\u4f9d\u6b21\u8f93\u5165 \u53d8\u91cf\u540d \u3001 \u914d\u7f6e\u9879 \u540d\u79f0\u3001 \u952e \u7684\u540d\u79f0\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/use-configmap.html#_5","title":"\u547d\u4ee4\u884c\u64cd\u4f5c","text":"

                \u60a8\u53ef\u4ee5\u5728\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\u5c06\u914d\u7f6e\u9879\u8bbe\u7f6e\u4e3a\u73af\u5883\u53d8\u91cf\uff0c\u4f7f\u7528 valueFrom \u53c2\u6570\u5f15\u7528 ConfigMap \u4e2d\u7684 Key/Value\u3002

                apiVersion: v1\nkind: Pod\nmetadata:\n  name: configmap-pod-1\nspec:\n  containers:\n    - name: test-container\n      image: busybox\n      command: [ \"/bin/sh\", \"-c\", \"env\" ]\n      env:\n        - name: SPECIAL_LEVEL_KEY\n          valueFrom:                  # (1)!\n            configMapKeyRef:\n              name: kpanda-configmap  # (2)!\n              key: SPECIAL_LEVEL      # (3)!\n  restartPolicy: Never\n
                1. \u4f7f\u7528 valueFrom \u6765\u6307\u5b9a env \u5f15\u7528\u914d\u7f6e\u9879\u7684 value \u503c
                2. \u5f15\u7528\u7684\u914d\u7f6e\u6587\u4ef6\u540d\u79f0
                3. \u5f15\u7528\u7684\u914d\u7f6e\u9879 key
                "},{"location":"admin/kpanda/configmaps-secrets/use-configmap.html#_6","title":"\u8bbe\u7f6e\u5bb9\u5668\u7684\u547d\u4ee4\u884c\u53c2\u6570","text":"

                \u60a8\u53ef\u4ee5\u4f7f\u7528\u914d\u7f6e\u9879\u8bbe\u7f6e\u5bb9\u5668\u4e2d\u7684\u547d\u4ee4\u6216\u8005\u53c2\u6570\u503c\uff0c\u4f7f\u7528\u73af\u5883\u53d8\u91cf\u66ff\u6362\u8bed\u6cd5 $(VAR_NAME) \u6765\u8fdb\u884c\u3002\u5982\u4e0b\u6240\u793a\u3002

                apiVersion: v1\nkind: Pod\nmetadata:\n  name: configmap-pod-3\nspec:\n  containers:\n    - name: test-container\n      image: busybox\n      command: [ \"/bin/sh\", \"-c\", \"echo $(SPECIAL_LEVEL_KEY) $(SPECIAL_TYPE_KEY)\" ]\n      env:\n        - name: SPECIAL_LEVEL_KEY\n          valueFrom:\n            configMapKeyRef:\n              name: kpanda-configmap\n              key: SPECIAL_LEVEL\n        - name: SPECIAL_TYPE_KEY\n          valueFrom:\n            configMapKeyRef:\n              name: kpanda-configmap\n              key: SPECIAL_TYPE\n  restartPolicy: Never\n

                \u8fd9\u4e2a Pod \u8fd0\u884c\u540e\uff0c\u8f93\u51fa\u5982\u4e0b\u5185\u5bb9\u3002

                Hello Kpanda\n
                "},{"location":"admin/kpanda/configmaps-secrets/use-configmap.html#_7","title":"\u7528\u4f5c\u5bb9\u5668\u6570\u636e\u5377","text":"

                \u60a8\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u5316\u754c\u9762\u6216\u8005\u7ec8\u7aef\u547d\u4ee4\u884c\u6765\u4f7f\u7528\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/use-configmap.html#_8","title":"\u56fe\u5f62\u5316\u64cd\u4f5c","text":"

                \u5728\u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u5728 \u6570\u636e\u5b58\u50a8 \u754c\u9762\u9009\u62e9\u5b58\u50a8\u7c7b\u578b\u4e3a \u914d\u7f6e\u9879 \uff0c\u5c06\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u7684\u6570\u636e\u5377\u3002

                1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u4e2d\uff0c\u5728 \u5bb9\u5668\u914d\u7f6e \u8fd9\u4e00\u6b65\u4e2d\uff0c\u9009\u62e9 \u6570\u636e\u5b58\u50a8 \u914d\u7f6e\uff0c\u5728 \u8282\u70b9\u8def\u5f84\u6620\u5c04 \u5217\u8868\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u3002

                2. \u5728\u5b58\u50a8\u7c7b\u578b\u5904\u9009\u62e9 \u914d\u7f6e\u9879 \uff0c\u5e76\u4f9d\u6b21\u8f93\u5165 \u5bb9\u5668\u8def\u5f84 \u3001 \u5b50\u8def\u5f84 \u7b49\u4fe1\u606f\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/use-configmap.html#_9","title":"\u547d\u4ee4\u884c\u64cd\u4f5c","text":"

                \u8981\u5728\u4e00\u4e2a Pod \u7684\u5b58\u50a8\u5377\u4e2d\u4f7f\u7528 ConfigMap\u3002

                \u4e0b\u9762\u662f\u4e00\u4e2a\u5c06 ConfigMap \u4ee5\u5377\u7684\u5f62\u5f0f\u8fdb\u884c\u6302\u8f7d\u7684 Pod \u793a\u4f8b\uff1a

                apiVersion: v1\nkind: Pod\nmetadata:\n  name: mypod\nspec:\n  containers:\n  - name: mypod\n    image: redis\n    volumeMounts:\n    - name: foo\n      mountPath: \"/etc/foo\"\n      readOnly: true\n  volumes:\n  - name: foo\n    configMap:\n      name: myconfigmap\n

                \u5982\u679c Pod \u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\uff0c\u5219\u6bcf\u4e2a\u5bb9\u5668\u90fd\u9700\u8981\u81ea\u5df1\u7684 volumeMounts \u5757\uff0c\u4f46\u9488\u5bf9\u6bcf\u4e2a ConfigMap\uff0c\u60a8\u53ea\u9700\u8981\u8bbe\u7f6e\u4e00\u4e2a spec.volumes \u5757\u3002

                Note

                \u5c06\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u6302\u8f7d\u7684\u6570\u636e\u5377\u65f6\uff0c\u914d\u7f6e\u9879\u53ea\u80fd\u4f5c\u4e3a\u53ea\u8bfb\u6587\u4ef6\u8fdb\u884c\u8bfb\u53d6\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/use-secret.html","title":"\u4f7f\u7528\u5bc6\u94a5","text":"

                \u5bc6\u94a5\u662f\u4e00\u79cd\u7528\u4e8e\u5b58\u50a8\u548c\u7ba1\u7406\u5bc6\u7801\u3001OAuth \u4ee4\u724c\u3001SSH\u3001TLS \u51ed\u636e\u7b49\u654f\u611f\u4fe1\u606f\u7684\u8d44\u6e90\u5bf9\u8c61\u3002\u4f7f\u7528\u5bc6\u94a5\u610f\u5473\u7740\u60a8\u4e0d\u9700\u8981\u5728\u5e94\u7528\u7a0b\u5e8f\u4ee3\u7801\u4e2d\u5305\u542b\u654f\u611f\u7684\u673a\u5bc6\u6570\u636e\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/use-secret.html#_2","title":"\u4f7f\u7528\u573a\u666f","text":"

                \u60a8\u53ef\u4ee5\u5728 Pod \u4e2d\u4f7f\u7528\u5bc6\u94a5\uff0c\u6709\u591a\u79cd\u4f7f\u7528\u573a\u666f\uff0c\u4e3b\u8981\u5305\u62ec\uff1a

                • \u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u4f7f\u7528\uff0c\u63d0\u4f9b\u5bb9\u5668\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u6240\u9700\u7684\u4e00\u4e9b\u5fc5\u8981\u4fe1\u606f\u3002
                • \u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a Pod \u7684\u6570\u636e\u5377\u3002
                • \u5728 kubelet \u62c9\u53d6\u5bb9\u5668\u955c\u50cf\u65f6\u7528\u4f5c\u955c\u50cf\u4ed3\u5e93\u7684\u8eab\u4efd\u8ba4\u8bc1\u51ed\u8bc1\u4f7f\u7528\u3002
                "},{"location":"admin/kpanda/configmaps-secrets/use-secret.html#_3","title":"\u4f7f\u7528\u5bc6\u94a5\u8bbe\u7f6e\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf","text":"

                \u60a8\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u5316\u754c\u9762\u6216\u8005\u7ec8\u7aef\u547d\u4ee4\u884c\u6765\u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u3002

                Note

                \u5bc6\u94a5\u5bfc\u5165\u662f\u5c06\u5bc6\u94a5\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\uff1b\u5bc6\u94a5\u952e\u503c\u5bfc\u5165\u662f\u5c06\u5bc6\u94a5\u4e2d\u67d0\u4e00\u53c2\u6570\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/use-secret.html#_4","title":"\u56fe\u5f62\u754c\u9762\u64cd\u4f5c","text":"

                \u5728\u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u60a8\u53ef\u4ee5\u5728 \u73af\u5883\u53d8\u91cf \u754c\u9762\u901a\u8fc7\u9009\u62e9 \u5bc6\u94a5\u5bfc\u5165 \u6216 \u5bc6\u94a5\u952e\u503c\u5bfc\u5165 \u4e3a\u5bb9\u5668\u8bbe\u7f6e\u73af\u5883\u53d8\u91cf\u3002

                1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u3002

                2. \u5728 \u5bb9\u5668\u914d\u7f6e \u9009\u62e9 \u73af\u5883\u53d8\u91cf \u914d\u7f6e\uff0c\u70b9\u51fb \u6dfb\u52a0\u73af\u5883\u53d8\u91cf \u6309\u94ae\u3002

                3. \u5728\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u5904\u9009\u62e9 \u5bc6\u94a5\u5bfc\u5165 \u6216 \u5bc6\u94a5\u952e\u503c\u5bfc\u5165 \u3002

                  • \u5f53\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u9009\u62e9\u4e3a \u5bc6\u94a5\u5bfc\u5165 \u65f6\uff0c\u4f9d\u6b21\u8f93\u5165 \u53d8\u91cf\u540d \u3001 \u524d\u7f00 \u3001 \u5bc6\u94a5 \u7684\u540d\u79f0\u3002

                  • \u5f53\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u9009\u62e9\u4e3a \u5bc6\u94a5\u952e\u503c\u5bfc\u5165 \u65f6\uff0c\u4f9d\u6b21\u8f93\u5165 \u53d8\u91cf\u540d \u3001 \u5bc6\u94a5 \u3001 \u952e \u7684\u540d\u79f0\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/use-secret.html#_5","title":"\u547d\u4ee4\u884c\u64cd\u4f5c","text":"

                \u5982\u4e0b\u4f8b\u6240\u793a\uff0c\u60a8\u53ef\u4ee5\u5728\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\u5c06\u5bc6\u94a5\u8bbe\u7f6e\u4e3a\u73af\u5883\u53d8\u91cf\uff0c\u4f7f\u7528 valueFrom \u53c2\u6570\u5f15\u7528 Secret \u4e2d\u7684 Key/Value\u3002

                apiVersion: v1\nkind: Pod\nmetadata:\n  name: secret-env-pod\nspec:\n  containers:\n  - name: mycontainer\n    image: redis\n    env:\n      - name: SECRET_USERNAME\n        valueFrom:\n          secretKeyRef:\n            name: mysecret\n            key: username\n            optional: false # (1)!\n      - name: SECRET_PASSWORD\n        valueFrom:\n          secretKeyRef:\n            name: mysecret\n            key: password\n            optional: false # (2)!\n
                1. \u6b64\u503c\u4e3a\u9ed8\u8ba4\u503c\uff1b\u610f\u5473\u7740 \"mysecret\"\uff0c\u5fc5\u987b\u5b58\u5728\u4e14\u5305\u542b\u540d\u4e3a \"username\" \u7684\u4e3b\u952e
                2. \u6b64\u503c\u4e3a\u9ed8\u8ba4\u503c\uff1b\u610f\u5473\u7740 \"mysecret\"\uff0c\u5fc5\u987b\u5b58\u5728\u4e14\u5305\u542b\u540d\u4e3a \"password\" \u7684\u4e3b\u952e
                "},{"location":"admin/kpanda/configmaps-secrets/use-secret.html#pod","title":"\u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a Pod \u7684\u6570\u636e\u5377","text":""},{"location":"admin/kpanda/configmaps-secrets/use-secret.html#_6","title":"\u56fe\u5f62\u754c\u9762\u64cd\u4f5c","text":"

                \u5728\u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u5728 \u6570\u636e\u5b58\u50a8 \u754c\u9762\u9009\u62e9\u5b58\u50a8\u7c7b\u578b\u4e3a \u5bc6\u94a5 \uff0c\u5c06\u5bc6\u94a5\u4f5c\u4e3a\u5bb9\u5668\u7684\u6570\u636e\u5377\u3002

                1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u3002

                2. \u5728 \u5bb9\u5668\u914d\u7f6e \u9009\u62e9 \u6570\u636e\u5b58\u50a8 \u914d\u7f6e\uff0c\u5728 \u8282\u70b9\u8def\u5f84\u6620\u5c04 \u5217\u8868\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u3002

                3. \u5728\u5b58\u50a8\u7c7b\u578b\u5904\u9009\u62e9 \u5bc6\u94a5 \uff0c\u5e76\u4f9d\u6b21\u8f93\u5165 \u5bb9\u5668\u8def\u5f84 \u3001 \u5b50\u8def\u5f84 \u7b49\u4fe1\u606f\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/use-secret.html#_7","title":"\u547d\u4ee4\u884c\u64cd\u4f5c","text":"

                \u4e0b\u9762\u662f\u4e00\u4e2a\u901a\u8fc7\u6570\u636e\u5377\u6765\u6302\u8f7d\u540d\u4e3a mysecret \u7684 Secret \u7684 Pod \u793a\u4f8b\uff1a

                apiVersion: v1\nkind: Pod\nmetadata:\n  name: mypod\nspec:\n  containers:\n  - name: mypod\n    image: redis\n    volumeMounts:\n    - name: foo\n      mountPath: \"/etc/foo\"\n      readOnly: true\n  volumes:\n  - name: foo\n    secret:\n      secretName: mysecret\n      optional: false # (1)!\n
                1. \u9ed8\u8ba4\u8bbe\u7f6e\uff0c\u610f\u5473\u7740 \"mysecret\" \u5fc5\u987b\u5df2\u7ecf\u5b58\u5728

                \u5982\u679c Pod \u4e2d\u5305\u542b\u591a\u4e2a\u5bb9\u5668\uff0c\u5219\u6bcf\u4e2a\u5bb9\u5668\u9700\u8981\u81ea\u5df1\u7684 volumeMounts \u5757\uff0c\u4e0d\u8fc7\u9488\u5bf9\u6bcf\u4e2a Secret \u800c\u8a00\uff0c\u53ea\u9700\u8981\u4e00\u4efd .spec.volumes \u8bbe\u7f6e\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/use-secret.html#kubelet","title":"\u5728 kubelet \u62c9\u53d6\u5bb9\u5668\u955c\u50cf\u65f6\u7528\u4f5c\u955c\u50cf\u4ed3\u5e93\u7684\u8eab\u4efd\u8ba4\u8bc1\u51ed\u8bc1","text":"

                \u60a8\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u5316\u754c\u9762\u6216\u8005\u7ec8\u7aef\u547d\u4ee4\u884c\u6765\u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a\u955c\u50cf\u4ed3\u5e93\u8eab\u4efd\u8ba4\u8bc1\u51ed\u8bc1\u3002

                "},{"location":"admin/kpanda/configmaps-secrets/use-secret.html#_8","title":"\u56fe\u5f62\u5316\u64cd\u4f5c","text":"

                \u5728\u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u5728 \u6570\u636e\u5b58\u50a8 \u754c\u9762\u9009\u62e9\u5b58\u50a8\u7c7b\u578b\u4e3a \u5bc6\u94a5 \uff0c\u5c06\u5bc6\u94a5\u4f5c\u4e3a\u5bb9\u5668\u7684\u6570\u636e\u5377\u3002

                1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u3002

                2. \u5728\u7b2c\u4e8c\u6b65 \u5bb9\u5668\u914d\u7f6e \u65f6\u9009\u62e9 \u57fa\u672c\u4fe1\u606f \u914d\u7f6e\uff0c\u70b9\u51fb \u9009\u62e9\u955c\u50cf \u6309\u94ae\u3002

                3. \u5728\u5f39\u6846\u7684 \u955c\u50cf\u4ed3\u5e93 \u4e0b\u62c9\u9009\u62e9\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u540d\u79f0\u3002\u5173\u4e8e\u79c1\u6709\u955c\u50cf\u5bc6\u94a5\u521b\u5efa\u8bf7\u67e5\u770b\u521b\u5efa\u5bc6\u94a5\u4e86\u89e3\u8be6\u60c5\u3002

                4. \u8f93\u5165\u79c1\u6709\u4ed3\u5e93\u5185\u7684\u955c\u50cf\u540d\u79f0\uff0c\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u955c\u50cf\u9009\u62e9\u3002

                Note

                \u521b\u5efa\u5bc6\u94a5\u65f6\uff0c\u9700\u8981\u786e\u4fdd\u8f93\u5165\u6b63\u786e\u7684\u955c\u50cf\u4ed3\u5e93\u5730\u5740\u3001\u7528\u6237\u540d\u79f0\u3001\u5bc6\u7801\u5e76\u9009\u62e9\u6b63\u786e\u7684\u955c\u50cf\u540d\u79f0\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u83b7\u53d6\u955c\u50cf\u4ed3\u5e93\u4e2d\u7684\u955c\u50cf\u3002

                "},{"location":"admin/kpanda/custom-resources/create.html","title":"\u521b\u5efa\u81ea\u5b9a\u4e49\u8d44\u6e90 (CRD)","text":"

                \u5728 Kubernetes \u4e2d\u4e00\u5207\u5bf9\u8c61\u90fd\u88ab\u62bd\u8c61\u4e3a\u8d44\u6e90\uff0c\u5982 Pod\u3001Deployment\u3001Service\u3001Volume \u7b49\u662f Kubernetes \u63d0\u4f9b\u7684\u9ed8\u8ba4\u8d44\u6e90\uff0c \u8fd9\u4e3a\u6211\u4eec\u7684\u65e5\u5e38\u8fd0\u7ef4\u548c\u7ba1\u7406\u5de5\u4f5c\u63d0\u4f9b\u4e86\u91cd\u8981\u652f\u6491\uff0c\u4f46\u662f\u5728\u4e00\u4e9b\u7279\u6b8a\u7684\u573a\u666f\u4e2d\uff0c\u73b0\u6709\u7684\u9884\u7f6e\u8d44\u6e90\u5e76\u4e0d\u80fd\u6ee1\u8db3\u4e1a\u52a1\u7684\u9700\u8981\uff0c \u56e0\u6b64\u6211\u4eec\u5e0c\u671b\u53bb\u6269\u5c55 Kubernetes API \u7684\u80fd\u529b\uff0c\u81ea\u5b9a\u4e49\u8d44\u6e90\uff08CustomResourceDefinition, CRD\uff09\u6b63\u662f\u57fa\u4e8e\u8fd9\u6837\u7684\u9700\u6c42\u5e94\u8fd0\u800c\u751f\u3002

                \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u5bf9\u81ea\u5b9a\u4e49\u8d44\u6e90\u7684\u754c\u9762\u5316\u7ba1\u7406\uff0c\u4e3b\u8981\u529f\u80fd\u5982\u4e0b\uff1a

                • \u83b7\u53d6\u96c6\u7fa4\u4e0b\u81ea\u5b9a\u4e49\u8d44\u6e90\u5217\u8868\u548c\u8be6\u7ec6\u4fe1\u606f
                • \u57fa\u4e8e YAML \u521b\u5efa\u81ea\u5b9a\u8d44\u6e90
                • \u57fa\u4e8e YAML \u521b\u5efa\u81ea\u5b9a\u4e49\u8d44\u6e90\u793a\u4f8b CR\uff08Custom Resource\uff09
                • \u5220\u9664\u81ea\u5b9a\u4e49\u8d44\u6e90
                "},{"location":"admin/kpanda/custom-resources/create.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762

                • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a Cluster Admin \u89d2\u8272 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u6388\u6743

                "},{"location":"admin/kpanda/custom-resources/create.html#yaml","title":"\u901a\u8fc7 YAML \u521b\u5efa\u81ea\u5b9a\u4e49\u8d44\u6e90","text":"
                1. \u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 YAML \u521b\u5efa \u6309\u94ae\u3002

                3. \u5728 YAML \u521b\u5efa \u9875\u9762\u4e2d\uff0c\u586b\u5199 YAML \u8bed\u53e5\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                4. \u8fd4\u56de\u81ea\u5b9a\u4e49\u8d44\u6e90\u5217\u8868\u9875\uff0c\u5373\u53ef\u67e5\u770b\u521a\u521a\u521b\u5efa\u7684\u540d\u4e3a crontabs.stable.example.com \u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\u3002

                \u81ea\u5b9a\u4e49\u8d44\u6e90\u793a\u4f8b\uff1a

                CRD example
                apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  name: crontabs.stable.example.com\nspec:\n  group: stable.example.com\n  versions:\n    - name: v1\n      served: true\n      storage: true\n      schema:\n        openAPIV3Schema:\n          type: object\n          properties:\n            spec:\n              type: object\n              properties:\n                cronSpec:\n                  type: string\n                image:\n                  type: string\n                replicas:\n                  type: integer\n  scope: Namespaced\n  names:\n    plural: crontabs\n    singular: crontab\n    kind: CronTab\n    shortNames:\n    - ct\n
                "},{"location":"admin/kpanda/custom-resources/create.html#yaml_1","title":"\u901a\u8fc7 YAML \u521b\u5efa\u81ea\u5b9a\u4e49\u8d44\u6e90\u793a\u4f8b","text":"
                1. \u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u8fdb\u5165\u81ea\u5b9a\u4e49\u8d44\u6e90\u5217\u8868\u9875\u9762\u3002

                3. \u70b9\u51fb\u540d\u4e3a crontabs.stable.example.com \u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\uff0c\u8fdb\u5165\u8be6\u60c5\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 YAML \u521b\u5efa \u6309\u94ae\u3002

                4. \u5728 YAML \u521b\u5efa \u9875\u9762\u4e2d\uff0c\u586b\u5199 YAML \u8bed\u53e5\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                5. \u8fd4\u56de crontabs.stable.example.com \u7684\u8be6\u60c5\u9875\u9762\uff0c\u5373\u53ef\u67e5\u770b\u521a\u521a\u521b\u5efa\u7684\u540d\u4e3a my-new-cron-object \u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\u3002

                CR \u793a\u4f8b\uff1a

                CR example
                apiVersion: \"stable.example.com/v1\"\nkind: CronTab\nmetadata:\n  name: my-new-cron-object\nspec:\n  cronSpec: \"* * * * */5\"\n  image: my-awesome-cron-image\n
                "},{"location":"admin/kpanda/gpu/index.html","title":"GPU \u7ba1\u7406\u6982\u8ff0","text":"

                \u672c\u6587\u4ecb\u7ecd \u7b97\u4e30 AI \u7b97\u529b\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\u5bf9 GPU\u4e3a\u4ee3\u8868\u7684\u5f02\u6784\u8d44\u6e90\u7edf\u4e00\u8fd0\u7ef4\u7ba1\u7406\u80fd\u529b\u3002

                "},{"location":"admin/kpanda/gpu/index.html#_1","title":"\u80cc\u666f","text":"

                \u968f\u7740 AI \u5e94\u7528\u3001\u5927\u6a21\u578b\u3001\u4eba\u5de5\u667a\u80fd\u3001\u81ea\u52a8\u9a7e\u9a76\u7b49\u65b0\u5174\u6280\u672f\u7684\u5feb\u901f\u53d1\u5c55\uff0c\u4f01\u4e1a\u9762\u4e34\u7740\u8d8a\u6765\u8d8a\u591a\u7684\u8ba1\u7b97\u5bc6\u96c6\u578b\u4efb\u52a1\u548c\u6570\u636e\u5904\u7406\u9700\u6c42\u3002 \u4ee5 CPU \u4e3a\u4ee3\u8868\u7684\u4f20\u7edf\u8ba1\u7b97\u67b6\u6784\u5df2\u65e0\u6cd5\u6ee1\u8db3\u4f01\u4e1a\u65e5\u76ca\u589e\u957f\u7684\u8ba1\u7b97\u9700\u6c42\u3002\u6b64\u65f6\uff0c\u4ee5 GPU \u4e3a\u4ee3\u8868\u7684\u5f02\u6784\u8ba1\u7b97\u56e0\u5728\u5904\u7406\u5927\u89c4\u6a21\u6570\u636e\u3001\u8fdb\u884c\u590d\u6742\u8ba1\u7b97\u548c\u5b9e\u65f6\u56fe\u5f62\u6e32\u67d3\u65b9\u9762\u5177\u6709\u72ec\u7279\u7684\u4f18\u52bf\u88ab\u5e7f\u6cdb\u5e94\u7528\u3002

                \u4e0e\u6b64\u540c\u65f6\uff0c\u7531\u4e8e\u7f3a\u4e4f\u5f02\u6784\u8d44\u6e90\u8c03\u5ea6\u7ba1\u7406\u7b49\u65b9\u9762\u7684\u7ecf\u9a8c\u548c\u4e13\u4e1a\u7684\u89e3\u51b3\u65b9\u6848\uff0c\u5bfc\u81f4\u4e86 GPU \u8bbe\u5907\u7684\u8d44\u6e90\u5229\u7528\u7387\u6781\u4f4e\uff0c\u7ed9\u4f01\u4e1a\u5e26\u6765\u4e86\u9ad8\u6602\u7684 AI \u751f\u4ea7\u6210\u672c\u3002 \u5982\u4f55\u964d\u672c\u589e\u6548\uff0c\u63d0\u9ad8 GPU \u7b49\u5f02\u6784\u8d44\u6e90\u7684\u5229\u7528\u6548\u7387\uff0c\u6210\u4e3a\u4e86\u5f53\u524d\u4f17\u591a\u4f01\u4e1a\u4e9f\u9700\u8de8\u8d8a\u7684\u4e00\u9053\u96be\u9898\u3002

                "},{"location":"admin/kpanda/gpu/index.html#gpu_1","title":"GPU \u80fd\u529b\u4ecb\u7ecd","text":"

                \u7b97\u4e30 AI \u7b97\u529b\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\u652f\u6301\u5bf9 GPU\u3001NPU \u7b49\u5f02\u6784\u8d44\u6e90\u8fdb\u884c\u7edf\u4e00\u8c03\u5ea6\u548c\u8fd0\u7ef4\u7ba1\u7406\uff0c\u5145\u5206\u91ca\u653e GPU \u8d44\u6e90\u7b97\u529b\uff0c\u52a0\u901f\u4f01\u4e1a AI \u7b49\u65b0\u5174\u5e94\u7528\u53d1\u5c55\u3002GPU \u7ba1\u7406\u80fd\u529b\u5982\u4e0b\uff1a

                • \u652f\u6301\u7edf\u4e00\u7eb3\u7ba1 NVIDIA\u3001\u534e\u4e3a\u6607\u817e\u3001\u5929\u6570\u7b49\u56fd\u5185\u5916\u5382\u5546\u7684\u5f02\u6784\u8ba1\u7b97\u8d44\u6e90\u3002
                • \u652f\u6301\u540c\u4e00\u96c6\u7fa4\u591a\u5361\u5f02\u6784\u8c03\u5ea6\uff0c\u5e76\u652f\u6301\u96c6\u7fa4 GPU \u5361\u81ea\u52a8\u8bc6\u522b\u3002
                • \u652f\u6301 NVIDIA GPU\u3001vGPU\u3001MIG \u7b49 GPU \u539f\u751f\u7ba1\u7406\u65b9\u6848\uff0c\u5e76\u63d0\u4f9b\u4e91\u539f\u751f\u80fd\u529b\u3002
                • \u652f\u6301\u5355\u5757\u7269\u7406\u5361\u5207\u5206\u7ed9\u4e0d\u540c\u7684\u79df\u6237\u4f7f\u7528\uff0c\u5e76\u652f\u6301\u5bf9\u79df\u6237\u548c\u5bb9\u5668\u4f7f\u7528 GPU \u8d44\u6e90\u6309\u7167\u7b97\u529b\u3001\u663e\u5b58\u8fdb\u884c GPU \u8d44\u6e90\u914d\u989d\u3002
                • \u652f\u6301\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5e94\u7528\u7b49\u591a\u7ef4\u5ea6 GPU \u8d44\u6e90\u76d1\u63a7\uff0c\u5e2e\u52a9\u8fd0\u7ef4\u4eba\u5458\u7ba1\u7406 GPU \u8d44\u6e90\u3002
                • \u517c\u5bb9 TensorFlow\u3001pytorch \u7b49\u591a\u79cd\u8bad\u7ec3\u6846\u67b6\u3002
                "},{"location":"admin/kpanda/gpu/index.html#gpu-operator","title":"GPU Operator \u4ecb\u7ecd","text":"

                \u540c\u666e\u901a\u8ba1\u7b97\u673a\u786c\u4ef6\u4e00\u6837\uff0cNVIDIA GPU \u5361\u4f5c\u4e3a\u7269\u7406\u786c\u4ef6\uff0c\u5fc5\u987b\u5b89\u88c5 NVIDIA GPU \u9a71\u52a8\u540e\u624d\u80fd\u4f7f\u7528\u3002 \u4e3a\u4e86\u964d\u4f4e\u7528\u6237\u5728 kuberneets \u4e0a\u4f7f\u7528 GPU \u7684\u6210\u672c\uff0cNVIDIA \u5b98\u65b9\u63d0\u4f9b\u4e86 NVIDIA GPU Operator \u7ec4\u4ef6\u6765\u7ba1\u7406\u4f7f\u7528 NVIDIA GPU \u6240\u4f9d\u8d56\u7684\u5404\u79cd\u7ec4\u4ef6\u3002 \u8fd9\u4e9b\u7ec4\u4ef6\u5305\u62ec NVIDIA \u9a71\u52a8\u7a0b\u5e8f\uff08\u7528\u4e8e\u542f\u7528 CUDA\uff09\u3001NVIDIA \u5bb9\u5668\u8fd0\u884c\u65f6\u3001GPU \u8282\u70b9\u6807\u8bb0\u3001\u57fa\u4e8e DCGM \u7684\u76d1\u63a7\u7b49\u3002 \u7406\u8bba\u4e0a\u6765\u8bf4\u7528\u6237\u53ea\u9700\u8981\u5c06 GPU \u5361\u63d2\u5728\u5df2\u7ecf\u88ab kubernetes \u6240\u7eb3\u7ba1\u7684\u8ba1\u7b97\u8bbe\u5907\u4e0a\uff0c\u7136\u540e\u901a\u8fc7 GPU Operator \u5c31\u80fd\u4f7f\u7528 NVIDIA GPU \u7684\u6240\u6709\u80fd\u529b\u4e86\u3002 \u4e86\u89e3\u66f4\u591a NVIDIA GPU Operator \u76f8\u5173\u4fe1\u606f\uff0c\u8bf7\u53c2\u8003 NVIDIA \u5b98\u65b9\u6587\u6863\u3002 \u5982\u4f55\u90e8\u7f72\u8bf7\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5

                NVIDIA GPU Operator \u67b6\u6784\u56fe\uff1a

                "},{"location":"admin/kpanda/gpu/FAQ.html","title":"GPU \u76f8\u5173 FAQ","text":""},{"location":"admin/kpanda/gpu/FAQ.html#pod-nvidia-smi-gpu","title":"Pod \u5185 nvidia-smi \u770b\u4e0d\u5230 GPU \u8fdb\u7a0b","text":"

                Q: \u5728\u4f7f\u7528 GPU \u7684 Pod \u5185\u6267\u884c nvidia-smi \u547d\u4ee4\u770b\u4e0d\u5230\u4f7f\u7528 GPU \u7684\u8fdb\u7a0b\u4fe1\u606f\uff0c\u5305\u62ec\u6574\u5361\u6a21\u5f0f\u3001vGPU \u6a21\u5f0f\u7b49\u3002

                A: \u56e0\u4e3a\u6709 PID namespace \u9694\u79bb\uff0c\u5bfc\u81f4\u5728 Pod \u5185\u67e5\u770b\u4e0d\u5230 GPU \u8fdb\u7a0b\uff0c\u5982\u679c\u8981\u67e5\u770b GPU \u8fdb\u7a0b\u6709\u5982\u4e0b\u51e0\u79cd\u65b9\u6cd5\uff1a

                • \u5728\u4f7f\u7528 GPU \u7684\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e hostPID: true\uff0c\u4f7f\u5176\u53ef\u4ee5\u67e5\u770b\u5230\u5bbf\u4e3b\u673a\u4e0a\u7684 PID
                • \u5728 gpu-operator \u7684 driver Pod \u4e2d\u6267\u884c nvidia-smi \u547d\u4ee4\u67e5\u770b\u8fdb\u7a0b
                • \u5728\u5bbf\u4e3b\u673a\u4e0a\u6267\u884c chroot /run/nvidia/driver nvidia-smi \u547d\u4ee4\u67e5\u770b\u8fdb\u7a0b
                "},{"location":"admin/kpanda/gpu/Iluvatar_usage.html","title":"App \u4f7f\u7528\u5929\u6570\u667a\u82af\uff08Iluvatar\uff09GPU","text":"

                \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528\u5929\u6570\u667a\u82af\u865a\u62df GPU\u3002

                "},{"location":"admin/kpanda/gpu/Iluvatar_usage.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                • \u5df2\u7ecf\u90e8\u7f72 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u4e14\u5e73\u53f0\u8fd0\u884c\u6b63\u5e38\u3002
                • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                • \u5f53\u524d\u96c6\u7fa4\u5df2\u5b89\u88c5\u5929\u6570\u667a\u82af GPU \u9a71\u52a8\uff0c\u9a71\u52a8\u5b89\u88c5\u8bf7\u53c2\u8003\u5929\u6570\u667a\u82af\u5b98\u65b9\u6587\u6863\u3002
                • \u5f53\u524d\u96c6\u7fa4\u5185 GPU \u5361\u672a\u8fdb\u884c\u4efb\u4f55\u865a\u62df\u5316\u64cd\u4f5c\u4e14\u672a\u88ab\u5176\u5b83 App \u5360\u7528\u3002
                "},{"location":"admin/kpanda/gpu/Iluvatar_usage.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"admin/kpanda/gpu/Iluvatar_usage.html#_3","title":"\u4f7f\u7528\u754c\u9762\u914d\u7f6e","text":"
                1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u68c0\u6d4b GPU \u5361\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002 \u76ee\u524d\u96c6\u7fa4\u4f1a\u81ea\u52a8\u542f\u7528 GPU \uff0c\u5e76\u4e14\u8bbe\u7f6e GPU \u7c7b\u578b\u4e3a Iluvatar \u3002

                2. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08Iluvatar\uff09\u4e4b\u540e\uff0c\u9700\u8981\u914d\u7f6e App \u4f7f\u7528\u7684 GPU \u8d44\u6e90\uff1a

                  • \u7269\u7406\u5361\u6570\u91cf\uff08iluvatar.ai/vcuda-core\uff09\uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u8f93\u5165\u503c\u5fc5\u987b\u4e3a\u6574\u6570\u4e14 \u5c0f\u4e8e\u7b49\u4e8e \u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002
                  • \u663e\u5b58\u4f7f\u7528\u6570\u91cf\uff08iluvatar.ai/vcuda-memory\uff09\uff1a\u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u663e\u5b58\uff0c\u503c\u5355\u4f4d\u4e3a MB\uff0c\u6700\u5c0f\u503c\u4e3a 1\uff0c\u6700\u5927\u503c\u4e3a\u6574\u5361\u7684\u663e\u5b58\u503c\u3002

                  \u5982\u679c\u4e0a\u8ff0\u503c\u914d\u7f6e\u7684\u6709\u95ee\u9898\u5219\u4f1a\u51fa\u73b0\u8c03\u5ea6\u5931\u8d25\uff0c\u8d44\u6e90\u5206\u914d\u4e0d\u4e86\u7684\u60c5\u51b5\u3002

                "},{"location":"admin/kpanda/gpu/Iluvatar_usage.html#yaml","title":"\u4f7f\u7528 YAML \u914d\u7f6e","text":"

                \u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u7533\u8bf7 GPU \u8d44\u6e90\uff0c\u5728\u8d44\u6e90\u7533\u8bf7\u548c\u9650\u5236\u914d\u7f6e\u4e2d\u589e\u52a0iluvatar.ai/vcuda-core: 1\u3001iluvatar.ai/vcuda-memory: 200 \u53c2\u6570\uff0c\u914d\u7f6e App \u4f7f\u7528\u7269\u7406\u5361\u7684\u8d44\u6e90\u3002

                apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-iluvatar-gpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-iluvatar-gpu-demo\n  template:\n    metadata:\n      labels:\n        app: full-iluvatar-gpu-demo\n    spec:\n      containers:\n      - image: nginx:perl\n        name: container-0\n        resources:\n          limits:\n            cpu: 250m\n            iluvatar.ai/vcuda-core: '1'\n            iluvatar.ai/vcuda-memory: '200'\n            memory: 512Mi\n          requests:\n            cpu: 250m\n            memory: 512Mi\n      imagePullSecrets:\n      - name: default-secret\n
                "},{"location":"admin/kpanda/gpu/dynamic-regulation.html","title":"GPU \u8d44\u6e90\u52a8\u6001\u8c03\u8282","text":"

                \u63d0\u4f9b GPU \u8d44\u6e90\u52a8\u6001\u8c03\u6574\u529f\u80fd\uff0c\u5141\u8bb8\u60a8\u5728\u65e0\u9700\u91cd\u65b0\u52a0\u8f7d\u3001\u91cd\u7f6e\u6216\u91cd\u542f\u6574\u4e2a\u8fd0\u884c\u73af\u5883\u7684\u60c5\u51b5\u4e0b\uff0c\u5bf9\u5df2\u7ecf\u5206\u914d\u7684 vGPU \u8d44\u6e90\u8fdb\u884c\u5b9e\u65f6\u3001\u52a8\u6001\u7684\u8c03\u6574\u3002 \u8fd9\u4e00\u529f\u80fd\u65e8\u5728\u6700\u5927\u7a0b\u5ea6\u5730\u51cf\u5c11\u5bf9\u4e1a\u52a1\u8fd0\u884c\u7684\u5f71\u54cd\uff0c\u786e\u4fdd\u60a8\u7684\u4e1a\u52a1\u80fd\u591f\u6301\u7eed\u7a33\u5b9a\u5730\u8fd0\u884c\uff0c\u540c\u65f6\u6839\u636e\u5b9e\u9645\u9700\u6c42\u7075\u6d3b\u8c03\u6574 GPU \u8d44\u6e90\u3002

                "},{"location":"admin/kpanda/gpu/dynamic-regulation.html#_1","title":"\u4f7f\u7528\u573a\u666f","text":"
                • \u5f39\u6027\u8d44\u6e90\u5206\u914d \uff1a\u5f53\u4e1a\u52a1\u9700\u6c42\u6216\u5de5\u4f5c\u8d1f\u8f7d\u53d1\u751f\u53d8\u5316\u65f6\uff0c\u53ef\u4ee5\u5feb\u901f\u8c03\u6574 GPU \u8d44\u6e90\u4ee5\u6ee1\u8db3\u65b0\u7684\u6027\u80fd\u8981\u6c42\u3002
                • \u5373\u65f6\u54cd\u5e94 \uff1a\u5728\u9762\u5bf9\u7a81\u53d1\u7684\u9ad8\u8d1f\u8f7d\u6216\u4e1a\u52a1\u9700\u6c42\u65f6\uff0c\u53ef\u4ee5\u8fc5\u901f\u589e\u52a0 GPU \u8d44\u6e90\u800c\u65e0\u9700\u4e2d\u65ad\u4e1a\u52a1\u8fd0\u884c\uff0c\u4ee5\u786e\u4fdd\u670d\u52a1\u7684\u7a33\u5b9a\u6027\u548c\u6027\u80fd\u3002
                "},{"location":"admin/kpanda/gpu/dynamic-regulation.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                \u4ee5\u4e0b\u662f\u4e00\u4e2a\u5177\u4f53\u7684\u64cd\u4f5c\u793a\u4f8b\uff0c\u5c55\u793a\u5982\u4f55\u5728\u4e0d\u91cd\u542f vGPU Pod \u7684\u60c5\u51b5\u4e0b\u52a8\u6001\u8c03\u6574 vGPU \u7684\u7b97\u529b\u548c\u663e\u5b58\u8d44\u6e90\uff1a

                "},{"location":"admin/kpanda/gpu/dynamic-regulation.html#vgpu-pod","title":"\u521b\u5efa\u4e00\u4e2a vGPU Pod","text":"

                \u9996\u5148\uff0c\u6211\u4eec\u4f7f\u7528\u4ee5\u4e0b YAML \u521b\u5efa\u4e00\u4e2a vGPU Pod\uff0c\u5176\u7b97\u529b\u521d\u59cb\u4e0d\u9650\u5236\uff0c\u663e\u5b58\u9650\u5236\u4e3a 200Mb\u3002

                kind: Deployment\napiVersion: apps/v1\nmetadata:\n  name: gpu-burn-test\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: gpu-burn-test\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: gpu-burn-test\n    spec:\n      containers:\n        - name: container-1\n          image: docker.io/chrstnhntschl/gpu_burn:latest\n          command:\n            - sleep\n            - '100000'\n          resources:\n            limits:\n              cpu: 1m\n              memory: 1Gi\n              nvidia.com/gpucores: '0'\n              nvidia.com/gpumem: '200'\n              nvidia.com/vgpu: '1'\n

                \u8c03\u6574\u524d\u67e5\u770b Pod \u4e2d\u7684\u8d44\u6e90 GPU \u5206\u914d\u8d44\u6e90\uff1a

                "},{"location":"admin/kpanda/gpu/dynamic-regulation.html#_3","title":"\u52a8\u6001\u8c03\u6574\u7b97\u529b","text":"

                \u5982\u679c\u9700\u8981\u4fee\u6539\u7b97\u529b\u4e3a 10%\uff0c\u53ef\u4ee5\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u64cd\u4f5c\uff1a

                1. \u8fdb\u5165\u5bb9\u5668\uff1a

                  kubectl exec -it <pod-name> -- /bin/bash\n
                2. \u6267\u884c\uff1a

                  export CUDA_DEVICE_SM_LIMIT=10\n
                3. \u5728\u5f53\u524d\u7ec8\u7aef\u76f4\u63a5\u8fd0\u884c\uff1a

                  ./gpu_burn 60\n

                  \u7a0b\u5e8f\u5373\u53ef\u751f\u6548\u3002\u6ce8\u610f\uff0c\u4e0d\u80fd\u9000\u51fa\u5f53\u524d Bash \u7ec8\u7aef\u3002

                "},{"location":"admin/kpanda/gpu/dynamic-regulation.html#_4","title":"\u52a8\u6001\u8c03\u6574\u663e\u5b58","text":"

                \u5982\u679c\u9700\u8981\u4fee\u6539\u663e\u5b58\u4e3a 300 MB\uff0c\u53ef\u4ee5\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u64cd\u4f5c\uff1a

                1. \u8fdb\u5165\u5bb9\u5668\uff1a

                  kubectl exec -it <pod-name> -- /bin/bash\n
                2. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u6765\u8bbe\u7f6e\u663e\u5b58\u9650\u5236\uff1a

                  export CUDA_DEVICE_MEMORY_LIMIT_0=300m\nexport CUDA_DEVICE_MEMORY_SHARED_CACHE=/usr/local/vgpu/d.cache\n

                  Note

                  \u6bcf\u6b21\u4fee\u6539\u663e\u5b58\u5927\u5c0f\u65f6\uff0cd.cache \u8fd9\u4e2a\u6587\u4ef6\u540d\u5b57\u90fd\u9700\u8981\u4fee\u6539\uff0c\u6bd4\u5982\u6539\u4e3a a.cache\u30011.cache \u7b49\uff0c\u4ee5\u907f\u514d\u7f13\u5b58\u51b2\u7a81\u3002

                3. \u5728\u5f53\u524d\u7ec8\u7aef\u76f4\u63a5\u8fd0\u884c\uff1a

                  ./gpu_burn 60\n

                  \u7a0b\u5e8f\u5373\u53ef\u751f\u6548\u3002\u540c\u6837\u5730\uff0c\u4e0d\u80fd\u9000\u51fa\u5f53\u524d Bash \u7ec8\u7aef\u3002

                \u8c03\u6574\u540e\u67e5\u770b Pod \u4e2d\u7684\u8d44\u6e90 GPU \u5206\u914d\u8d44\u6e90\uff1a

                \u901a\u8fc7\u4e0a\u8ff0\u6b65\u9aa4\uff0c\u60a8\u53ef\u4ee5\u5728\u4e0d\u91cd\u542f vGPU Pod \u7684\u60c5\u51b5\u4e0b\u52a8\u6001\u5730\u8c03\u6574\u5176\u7b97\u529b\u548c\u663e\u5b58\u8d44\u6e90\uff0c\u4ece\u800c\u66f4\u7075\u6d3b\u5730\u6ee1\u8db3\u4e1a\u52a1\u9700\u6c42\u5e76\u4f18\u5316\u8d44\u6e90\u5229\u7528\u3002

                "},{"location":"admin/kpanda/gpu/gpu_matrix.html","title":"GPU \u652f\u6301\u77e9\u9635","text":"

                \u672c\u9875\u8bf4\u660e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u7684 GPU \u53ca\u64cd\u4f5c\u7cfb\u7edf\u6240\u5bf9\u5e94\u7684\u77e9\u9635\u3002

                "},{"location":"admin/kpanda/gpu/gpu_matrix.html#nvidia-gpu","title":"NVIDIA GPU","text":"GPU \u5382\u5546\u53ca\u7c7b\u578b \u652f\u6301 GPU \u578b\u53f7 \u9002\u914d\u7684\u64cd\u4f5c\u7cfb\u7edf\uff08\u5728\u7ebf\uff09 \u63a8\u8350\u5185\u6838 \u63a8\u8350\u7684\u64cd\u4f5c\u7cfb\u7edf\u53ca\u5185\u6838 \u5b89\u88c5\u6587\u6863 NVIDIA GPU\uff08\u6574\u5361/vGPU\uff09 NVIDIA Fermi (2.1) \u67b6\u6784 CentOS 7 Kernel 3.10.0-123 ~ 3.10.0-1160\u5185\u6838\u53c2\u8003\u6587\u6863\u5efa\u8bae\u4f7f\u7528\u64cd\u4f5c\u7cfb\u7edf\u5bf9\u5e94 Kernel \u7248\u672c \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a 3.10.0-1160 GPU Operator \u79bb\u7ebf\u5b89\u88c5 NVIDIA GeForce 400 \u7cfb\u5217 CentOS 8 Kernel 4.18.0-80 ~ 4.18.0-348 NVIDIA Quadro 4000 \u7cfb\u5217 Ubuntu 20.04 Kernel 5.4 NVIDIA Tesla 20 \u7cfb\u5217 Ubuntu 22.04 Kernel 5.19 NVIDIA Ampere \u67b6\u6784\u7cfb\u5217(A100;A800;H100) RHEL 7 Kernel 3.10.0-123 ~ 3.10.0-1160 RHEL 8 Kernel 4.18.0-80 ~ 4.18.0-348 NVIDIA MIG NVIDIA Ampere \u67b6\u6784\u7cfb\u5217\uff08A100\u3001A800\u3001H100\uff09 CentOS 7 Kernel 3.10.0-123 ~ 3.10.0-1160 \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a3.10.0-1160 GPU Operator \u79bb\u7ebf\u5b89\u88c5 CentOS 8 Kernel 4.18.0-80 ~ 4.18.0-348 Ubuntu 20.04 Kernel 5.4 Ubuntu 22.04 Kernel 5.19 RHEL 7 Kernel 3.10.0-123 ~ 3.10.0-1160 RHEL 8 Kernel 4.18.0-80 ~ 4.18.0-348"},{"location":"admin/kpanda/gpu/gpu_matrix.html#ascendnpu","title":"\u6607\u817e\uff08Ascend\uff09NPU","text":"GPU \u5382\u5546\u53ca\u7c7b\u578b \u652f\u6301 NPU \u578b\u53f7 \u9002\u914d\u7684\u64cd\u4f5c\u7cfb\u7edf\uff08\u5728\u7ebf\uff09 \u63a8\u8350\u5185\u6838 \u63a8\u8350\u7684\u64cd\u4f5c\u7cfb\u7edf\u53ca\u5185\u6838 \u5b89\u88c5\u6587\u6863 \u6607\u817e\uff08Ascend 310\uff09 Ascend 310 Ubuntu 20.04 \u8be6\u60c5\u53c2\u8003\uff1a\u5185\u6838\u7248\u672c\u8981\u6c42 \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a3.10.0-1160 300 \u548c 310P \u9a71\u52a8\u6587\u6863 Ascend 310P\uff1b CentOS 7.6 CentOS 8.2 KylinV10SP1 \u64cd\u4f5c\u7cfb\u7edf openEuler \u64cd\u4f5c\u7cfb\u7edf \u6607\u817e\uff08Ascend 910\uff09 Ascend 910B Ubuntu 20.04 \u8be6\u60c5\u53c2\u8003\u5185\u6838\u7248\u672c\u8981\u6c42 \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a3.10.0-1160 910 \u9a71\u52a8\u6587\u6863 CentOS 7.6 CentOS 8.2 KylinV10SP1 \u64cd\u4f5c\u7cfb\u7edf openEuler \u64cd\u4f5c\u7cfb\u7edf"},{"location":"admin/kpanda/gpu/gpu_matrix.html#iluvatargpu","title":"\u5929\u6570\u667a\u82af\uff08Iluvatar\uff09GPU","text":"GPU \u5382\u5546\u53ca\u7c7b\u578b \u652f\u6301\u7684 GPU \u578b\u53f7 \u9002\u914d\u7684\u64cd\u4f5c\u7cfb\u7edf\uff08\u5728\u7ebf\uff09 \u63a8\u8350\u5185\u6838 \u63a8\u8350\u7684\u64cd\u4f5c\u7cfb\u7edf\u53ca\u5185\u6838 \u5b89\u88c5\u6587\u6863 \u5929\u6570\u667a\u82af(Iluvatar vGPU) BI100 CentOS 7 Kernel 3.10.0-957.el7.x86_64 ~ 3.10.0-1160.42.2.el7.x86_64 \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a 3.10.0-1160 \u8865\u5145\u4e2d MR100\uff1b CentOS 8 Kernel 4.18.0-80.el8.x86_64 ~ 4.18.0-305.19.1.el8_4.x86_64 Ubuntu 20.04 Kernel 4.15.0-20-generic ~ 4.15.0-160-generic Kernel 5.4.0-26-generic ~ 5.4.0-89-generic Kernel 5.8.0-23-generic ~ 5.8.0-63-generic Ubuntu 21.04 Kernel 4.15.0-20-generic ~ 4.15.0-160-generic Kernel 5.4.0-26-generic ~ 5.4.0-89-generic Kernel 5.8.0-23-generic ~ 5.8.0-63-generic openEuler 22.03 LTS Kernel \u7248\u672c\u5927\u4e8e\u7b49\u4e8e 5.1 \u4e14\u5c0f\u4e8e\u7b49\u4e8e 5.10"},{"location":"admin/kpanda/gpu/gpu_matrix.html#metaxgpu","title":"\u6c90\u66e6\uff08Metax\uff09GPU","text":"GPU \u5382\u5546\u53ca\u7c7b\u578b \u652f\u6301\u7684 GPU \u578b\u53f7 \u9002\u914d\u7684\u64cd\u4f5c\u7cfb\u7edf\uff08\u5728\u7ebf\uff09 \u63a8\u8350\u5185\u6838 \u63a8\u8350\u7684\u64cd\u4f5c\u7cfb\u7edf\u53ca\u5185\u6838 \u5b89\u88c5\u6587\u6863 \u6c90\u66e6Metax\uff08\u6574\u5361/vGPU\uff09 \u66e6\u4e91 C500 \u6c90\u66e6 GPU \u5b89\u88c5\u4f7f\u7528"},{"location":"admin/kpanda/gpu/gpu_scheduler_config.html","title":"GPU \u8c03\u5ea6\u914d\u7f6e\uff08Binpack \u548c Spread \uff09","text":"

                \u672c\u6587\u4ecb\u7ecd\u4f7f\u7528 NVIDIA vGPU \u65f6\uff0c\u5982\u4f55\u901a\u8fc7 Binpack \u548c Spread \u7684 GPU \u8c03\u5ea6\u914d\u7f6e\u51cf\u5c11 GPU \u8d44\u6e90\u788e\u7247\u3001\u9632\u6b62\u5355\u70b9\u6545\u969c\u7b49\uff0c\u5b9e\u73b0 vGPU \u7684\u9ad8\u7ea7\u8c03\u5ea6\u3002 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u4e86\u96c6\u7fa4\u548c\u5de5\u4f5c\u8d1f\u8f7d\u4e24\u79cd\u7ef4\u5ea6\u7684 Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565\uff0c\u5206\u522b\u6ee1\u8db3\u4e0d\u540c\u573a\u666f\u4e0b\u7684\u4f7f\u7528\u9700\u6c42\u3002

                "},{"location":"admin/kpanda/gpu/gpu_scheduler_config.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                • \u96c6\u7fa4\u8282\u70b9\u4e0a\u5df2\u6b63\u786e\u5b89\u88c5 GPU \u8bbe\u5907\u3002
                • \u96c6\u7fa4\u4e2d\u5df2\u6b63\u786e\u5b89\u88c5 gpu-operator \u7ec4\u4ef6 \u548c Nvidia-vgpu \u7ec4\u4ef6\u3002
                • \u96c6\u7fa4\u8282\u70b9\u5217\u8868\u4e2d\uff0cGPU \u6a21\u5f0f\u4e0b\u5b58\u5728 NVIDIA-vGPU \u7c7b\u578b\u3002
                "},{"location":"admin/kpanda/gpu/gpu_scheduler_config.html#_2","title":"\u4f7f\u7528\u573a\u666f","text":"
                • \u57fa\u4e8e GPU \u5361\u7ef4\u5ea6\u8c03\u5ea6\u7b56\u7565

                  • Binpack\uff1a\u4f18\u5148\u9009\u62e9\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\uff0c\u9002\u7528\u4e8e\u63d0\u9ad8 GPU \u5229\u7528\u7387\uff0c\u51cf\u5c11\u8d44\u6e90\u788e\u7247\u3002
                  • Spread\uff1a\u591a\u4e2a Pod \u4f1a\u5206\u6563\u5728\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u9ad8\u53ef\u7528\u573a\u666f\uff0c\u907f\u514d\u5355\u5361\u6545\u969c\u3002
                • \u57fa\u4e8e\u8282\u70b9\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565

                  • Binpack\uff1a \u591a\u4e2a Pod \u4f1a\u4f18\u5148\u9009\u62e9\u540c\u4e00\u4e2a\u8282\u70b9\uff0c\u9002\u7528\u4e8e\u63d0\u9ad8 GPU \u5229\u7528\u7387\uff0c\u51cf\u5c11\u8d44\u6e90\u788e\u7247\u3002
                  • Spread\uff1a\u591a\u4e2a Pod \u4f1a\u5206\u6563\u5728\u4e0d\u540c\u8282\u70b9\u4e0a\uff0c\u9002\u7528\u4e8e\u9ad8\u53ef\u7528\u573a\u666f\uff0c\u907f\u514d\u5355\u8282\u70b9\u6545\u969c\u3002
                "},{"location":"admin/kpanda/gpu/gpu_scheduler_config.html#binpack-spread","title":"\u96c6\u7fa4\u7ef4\u5ea6\u4f7f\u7528 Binpack \u548c Spread \u8c03\u5ea6\u914d\u7f6e","text":"

                Note

                \u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u4f1a\u9075\u5faa\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack \u548c Spread \u8c03\u5ea6\u914d\u7f6e\u3002 \u82e5\u5de5\u4f5c\u8d1f\u8f7d\u5355\u72ec\u8bbe\u7f6e\u4e86\u4e0e\u96c6\u7fa4\u4e0d\u4e00\u81f4\u7684 Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565\uff0c\u5219\u8be5\u5de5\u4f5c\u8d1f\u8f7d\u4f18\u5148\u9075\u5faa\u5176\u672c\u8eab\u7684\u8c03\u5ea6\u7b56\u7565\u3002

                1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9009\u62e9\u9700\u8981\u8c03\u6574 Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u56fe\u6807\u5e76\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u70b9\u51fb GPU \u8c03\u5ea6\u914d\u7f6e \u3002

                2. \u6839\u636e\u4e1a\u52a1\u573a\u666f\u8c03\u6574 GPU \u8c03\u5ea6\u914d\u7f6e\uff0c\u5e76\u70b9\u51fb \u786e\u5b9a \u540e\u4fdd\u5b58\u3002

                "},{"location":"admin/kpanda/gpu/gpu_scheduler_config.html#binpack-spread_1","title":"\u5de5\u4f5c\u8d1f\u8f7d\u7ef4\u5ea6\u4f7f\u7528 Binpack \u548c Spread \u8c03\u5ea6\u914d\u7f6e","text":"

                Note

                \u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ef4\u5ea6\u7684 Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684\u914d\u7f6e\u51b2\u7a81\u65f6\uff0c\u4f18\u5148\u9075\u5faa\u5de5\u4f5c\u8d1f\u8f7d\u7ef4\u5ea6\u7684\u914d\u7f6e\u3002

                \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u65e0\u72b6\u6001\u8d1f\u8f7d\uff0c\u5e76\u5728\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u914d\u7f6e Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565 \u3002

                1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\uff0c\u5e76\u5728 \u5bb9\u5668\u914d\u7f6e \u4e2d\u542f\u7528 GPU \u914d\u7f6e\uff0c\u9009\u62e9 GPU \u7c7b\u578b\u4e3a NVIDIA vGPU\uff0c \u70b9\u51fb \u9ad8\u7ea7\u8bbe\u7f6e \uff0c\u542f\u7528 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\uff0c\u6839\u636e\u4e1a\u52a1\u573a\u666f\u8c03\u6574 GPU \u8c03\u5ea6\u914d\u7f6e\u3002\u914d\u7f6e\u5b8c\u6210\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \uff0c \u8fdb\u5165 \u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                "},{"location":"admin/kpanda/gpu/vgpu_quota.html","title":"GPU \u914d\u989d\u7ba1\u7406","text":"

                \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528 vGPU \u80fd\u529b\u3002

                "},{"location":"admin/kpanda/gpu/vgpu_quota.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                \u5f53\u524d\u96c6\u7fa4\u5df2\u901a\u8fc7 Operator \u6216\u624b\u52a8\u65b9\u5f0f\u90e8\u7f72\u5bf9\u5e94\u7c7b\u578b GPU \u9a71\u52a8\uff08NVIDIA GPU\u3001NVIDIA MIG\u3001\u5929\u6570\u3001\u6607\u817e\uff09

                "},{"location":"admin/kpanda/gpu/vgpu_quota.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                1. \u8fdb\u5165 Namespaces \u4e2d\uff0c\u70b9\u51fb \u914d\u989d\u7ba1\u7406 \u53ef\u4ee5\u914d\u7f6e\u5f53\u524d Namespace \u53ef\u4ee5\u4f7f\u7528\u7684 GPU \u8d44\u6e90\u3002

                2. \u5f53\u524d\u547d\u540d\u7a7a\u95f4\u914d\u989d\u7ba1\u7406\u8986\u76d6\u7684\u5361\u7c7b\u578b\u4e3a\uff1aNVIDIA vGPU\u3001NVIDIA MIG\u3001\u5929\u6570\u3001\u6607\u817e\u3002

                  NVIDIA vGPU \u914d\u989d\u7ba1\u7406 \uff1a\u914d\u7f6e\u5177\u4f53\u53ef\u4ee5\u4f7f\u7528\u7684\u914d\u989d\uff0c\u4f1a\u521b\u5efa ResourcesQuota CR\uff1a

                  • \u7269\u7406\u5361\u6570\u91cf\uff08nvidia.com/vgpu\uff09\uff1a\u8868\u793a\u5f53\u524d POD \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u5e76\u4e14\u8981 \u5c0f\u4e8e\u7b49\u4e8e \u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002
                  • GPU \u7b97\u529b\uff08nvidia.com/gpucores\uff09\uff1a\u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u7b97\u529b\uff0c\u503c\u8303\u56f4\u4e3a 0-100\uff1b\u5982\u679c\u914d\u7f6e\u4e3a 0\uff0c\u5219\u8ba4\u4e3a\u4e0d\u5f3a\u5236\u9694\u79bb\uff1b\u914d\u7f6e\u4e3a 100\uff0c\u5219\u8ba4\u4e3a\u72ec\u5360\u6574\u5f20\u5361\u3002
                  • GPU \u663e\u5b58\uff08nvidia.com/gpumem\uff09\uff1a\u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u663e\u5b58\uff0c\u503c\u5355\u4f4d\u4e3a MB\uff0c\u6700\u5c0f\u503c\u4e3a 1\uff0c\u6700\u5927\u503c\u4e3a\u6574\u5361\u7684\u663e\u5b58\u503c\u3002

                "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html","title":"\u6607\u817e NPU \u7ec4\u4ef6\u5b89\u88c5","text":"

                \u672c\u7ae0\u8282\u63d0\u4f9b\u6607\u817e NPU \u9a71\u52a8\u3001Device Plugin\u3001NPU-Exporter \u7b49\u7ec4\u4ef6\u7684\u5b89\u88c5\u6307\u5bfc\u3002

                "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                1. \u5b89\u88c5\u524d\u8bf7\u786e\u8ba4\u652f\u6301\u7684 NPU \u578b\u53f7\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\u6607\u817e NPU \u77e9\u9635
                2. \u8bf7\u786e\u8ba4 \u5bf9\u5e94 NPU \u578b\u53f7\u6240\u8981\u6c42\u7684\u5185\u6838\u7248\u672c\u662f\u5426\u5339\u914d\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\u6607\u817e NPU \u77e9\u9635
                3. \u51c6\u5907 Kubernetes \u57fa\u7840\u73af\u5883
                "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html#_2","title":"\u5b89\u88c5\u6b65\u9aa4","text":"

                \u4f7f\u7528 NPU \u8d44\u6e90\u4e4b\u524d\uff0c\u9700\u8981\u5b8c\u6210\u56fa\u4ef6\u5b89\u88c5\u3001NPU \u9a71\u52a8\u5b89\u88c5\u3001 Docker Runtime \u5b89\u88c5\u3001\u7528\u6237\u521b\u5efa\u3001\u65e5\u5fd7\u76ee\u5f55\u521b\u5efa\u4ee5\u53ca NPU Device Plugin \u5b89\u88c5\uff0c\u8be6\u60c5\u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u3002

                "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html#_3","title":"\u5b89\u88c5\u56fa\u4ef6","text":"
                1. \u5b89\u88c5\u524d\u8bf7\u786e\u8ba4\u5185\u6838\u7248\u672c\u5728\u201c\u4e8c\u8fdb\u5236\u5b89\u88c5\u201d\u5b89\u88c5\u65b9\u5f0f\u5bf9\u5e94\u7684\u7248\u672c\u8303\u56f4\u5185\uff0c\u5219\u53ef\u4ee5\u76f4\u63a5\u5b89\u88c5NPU\u9a71\u52a8\u56fa\u4ef6\u3002
                2. \u56fa\u4ef6\u4e0e\u9a71\u52a8\u4e0b\u8f7d\u8bf7\u53c2\u8003\u56fa\u4ef6\u4e0b\u8f7d\u5730\u5740
                3. \u56fa\u4ef6\u5b89\u88c5\u8bf7\u53c2\u8003\u5b89\u88c5 NPU \u9a71\u52a8\u56fa\u4ef6
                "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html#npu_1","title":"\u5b89\u88c5 NPU \u9a71\u52a8","text":"
                1. \u5982\u9a71\u52a8\u672a\u5b89\u88c5\uff0c\u8bf7\u53c2\u8003\u6607\u817e\u5b98\u65b9\u6587\u6863\u8fdb\u884c\u5b89\u88c5\u3002\u4f8b\u5982 Ascend910\uff0c\u53c2\u8003 910 \u9a71\u52a8\u5b89\u88c5\u6587\u6863\u3002
                2. \u8fd0\u884c npu-smi info \u547d\u4ee4\uff0c\u5e76\u4e14\u80fd\u591f\u6b63\u5e38\u8fd4\u56de NPU \u4fe1\u606f\uff0c\u8868\u793a NPU \u9a71\u52a8\u4e0e\u56fa\u4ef6\u5df2\u5c31\u7eea\u3002
                "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html#docker-runtime","title":"\u5b89\u88c5 Docker Runtime","text":"
                1. \u4e0b\u8f7d Ascend Docker Runtime

                  \u793e\u533a\u7248\u4e0b\u8f7d\u5730\u5740\uff1ahttps://www.hiascend.com/zh/software/mindx-dl/community

                  wget -c https://mindx.obs.cn-south-1.myhuaweicloud.com/OpenSource/MindX/MindX%205.0.RC2/MindX%20DL%205.0.RC2/Ascend-docker-runtime_5.0.RC2_linux-x86_64.run\n

                  \u5b89\u88c5\u5230\u6307\u5b9a\u8def\u5f84\u4e0b\uff0c\u4f9d\u6b21\u6267\u884c\u4ee5\u4e0b\u4e24\u6761\u547d\u4ee4\uff0c\u53c2\u6570\u4e3a\u6307\u5b9a\u7684\u5b89\u88c5\u8def\u5f84:

                  chmod u+x Ascend-docker-runtime_5.0.RC2_linux-x86_64.run \n./Ascend-docker-runtime_{version}_linux-{arch}.run --install --install-path=<path>\n
                2. \u4fee\u6539 containerd \u914d\u7f6e\u6587\u4ef6

                  containerd \u65e0\u9ed8\u8ba4\u914d\u7f6e\u6587\u4ef6\u65f6\uff0c\u4f9d\u6b21\u6267\u884c\u4ee5\u4e0b3\u6761\u547d\u4ee4\uff0c\u521b\u5efa\u914d\u7f6e\u6587\u4ef6\uff1a

                  mkdir /etc/containerd \ncontainerd config default > /etc/containerd/config.toml \nvim /etc/containerd/config.toml\n

                  containerd \u6709\u914d\u7f6e\u6587\u4ef6\u65f6\uff1a

                  vim /etc/containerd/config.toml\n

                  \u6839\u636e\u5b9e\u9645\u60c5\u51b5\u4fee\u6539 runtime \u7684\u5b89\u88c5\u8def\u5f84\uff0c\u4e3b\u8981\u4fee\u6539 runtime \u5b57\u6bb5\uff1a

                  ... \n[plugins.\"io.containerd.monitor.v1.cgroups\"]\n   no_prometheus = false  \n[plugins.\"io.containerd.runtime.v1.linux\"]\n   shim = \"containerd-shim\"\n   runtime = \"/usr/local/Ascend/Ascend-Docker-Runtime/ascend-docker-runtime\"\n   runtime_root = \"\"\n   no_shim = false\n   shim_debug = false\n [plugins.\"io.containerd.runtime.v2.task\"]\n   platforms = [\"linux/amd64\"]\n...\n

                  \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff0c\u91cd\u542f containerd\uff1a

                  systemctl restart containerd\n
                "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html#_4","title":"\u7528\u6237\u521b\u5efa","text":"

                \u5728\u5bf9\u5e94\u7ec4\u4ef6\u5b89\u88c5\u7684\u8282\u70b9\u4e0a\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u521b\u5efa\u7528\u6237\u3002

                # Ubuntu \u64cd\u4f5c\u7cfb\u7edf\nuseradd -d /home/hwMindX -u 9000 -m -s /usr/sbin/nologin hwMindX\nusermod -a -G HwHiAiUser hwMindX\n# Centos \u64cd\u4f5c\u7cfb\u7edf\nuseradd -d /home/hwMindX -u 9000 -m -s /sbin/nologin hwMindX\nusermod -a -G HwHiAiUser hwMindX\n
                "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html#_5","title":"\u65e5\u5fd7\u76ee\u5f55\u521b\u5efa","text":"

                \u5728\u5bf9\u5e94\u8282\u70b9\u521b\u5efa\u7ec4\u4ef6\u65e5\u5fd7\u7236\u76ee\u5f55\u548c\u5404\u7ec4\u4ef6\u7684\u65e5\u5fd7\u76ee\u5f55\uff0c\u5e76\u8bbe\u7f6e\u76ee\u5f55\u5bf9\u5e94\u5c5e\u4e3b\u548c\u6743\u9650\u3002\u6267\u884c\u4e0b\u8ff0\u547d\u4ee4\uff0c\u521b\u5efa\u7ec4\u4ef6\u65e5\u5fd7\u7236\u76ee\u5f55\u3002

                mkdir -m 755 /var/log/mindx-dl\nchown root:root /var/log/mindx-dl\n

                \u6267\u884c\u4e0b\u8ff0\u547d\u4ee4\uff0c\u521b\u5efa Device Plugin \u7ec4\u4ef6\u65e5\u5fd7\u76ee\u5f55\u3002

                mkdir -m 750 /var/log/mindx-dl/devicePlugin\nchown root:root /var/log/mindx-dl/devicePlugin\n

                Note

                \u8bf7\u5206\u522b\u4e3a\u6240\u9700\u7ec4\u4ef6\u521b\u5efa\u5bf9\u5e94\u7684\u65e5\u5fd7\u76ee\u5f55\uff0c\u5f53\u524d\u6848\u4f8b\u4e2d\u53ea\u9700\u8981 Device Plugin \u7ec4\u4ef6\u3002 \u5982\u679c\u6709\u5176\u4ed6\u7ec4\u4ef6\u9700\u6c42\u8bf7\u53c2\u8003\u5b98\u65b9\u6587\u6863

                "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html#label","title":"\u521b\u5efa\u8282\u70b9 Label","text":"

                \u53c2\u8003\u4e0b\u8ff0\u547d\u4ee4\u5728\u5bf9\u5e94\u8282\u70b9\u4e0a\u521b\u5efa Label\uff1a

                # \u5728\u5b89\u88c5\u4e86\u9a71\u52a8\u7684\u8ba1\u7b97\u8282\u70b9\u521b\u5efa\u6b64\u6807\u7b7e\nkubectl label node {nodename} huawei.com.ascend/Driver=installed\nkubectl label node {nodename} node-role.kubernetes.io/worker=worker\nkubectl label node {nodename} workerselector=dls-worker-node\nkubectl label node {nodename} host-arch=huawei-arm //\u6216\u8005host-arch=huawei-x86 \uff0c\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u9009\u62e9\nkubectl label node {nodename} accelerator=huawei-Ascend910 //\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u8fdb\u884c\u9009\u62e9\n# \u5728\u63a7\u5236\u8282\u70b9\u521b\u5efa\u6b64\u6807\u7b7e\nkubectl label node {nodename} masterselector=dls-master-node\n
                "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html#device-plugin-npuexporter","title":"\u5b89\u88c5 Device Plugin \u548c NpuExporter","text":"

                \u529f\u80fd\u6a21\u5757\u8def\u5f84\uff1a \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u7ba1\u7406 \uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f -> \u641c\u7d22 ascend-mindxdl \u3002

                • DevicePlugin \uff1a\u901a\u8fc7\u63d0\u4f9b\u901a\u7528\u8bbe\u5907\u63d2\u4ef6\u673a\u5236\u548c\u6807\u51c6\u7684\u8bbe\u5907API\u63a5\u53e3\uff0c\u4f9bKubernetes\u4f7f\u7528\u8bbe\u5907\u3002\u5efa\u8bae\u4f7f\u7528\u9ed8\u8ba4\u7684\u955c\u50cf\u53ca\u7248\u672c\u3002
                • NpuExporter \uff1a\u57fa\u4e8ePrometheus/Telegraf\u751f\u6001\uff0c\u8be5\u7ec4\u4ef6\u63d0\u4f9b\u63a5\u53e3\uff0c\u5e2e\u52a9\u7528\u6237\u80fd\u591f\u5173\u6ce8\u5230\u6607\u817e\u7cfb\u5217AI\u5904\u7406\u5668\u4ee5\u53ca\u5bb9\u5668\u7ea7\u5206\u914d\u72b6\u6001\u3002\u5efa\u8bae\u4f7f\u7528\u9ed8\u8ba4\u7684\u955c\u50cf\u53ca\u7248\u672c\u3002
                • ServiceMonitor \uff1a\u9ed8\u8ba4\u4e0d\u5f00\u542f\uff0c\u5f00\u542f\u540e\u53ef\u524d\u5f80\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u67e5\u770b NPU \u76f8\u5173\u76d1\u63a7\u3002\u5982\u9700\u5f00\u542f\uff0c\u8bf7\u786e\u4fdd insight-agent \u5df2\u5b89\u88c5\u5e76\u5904\u4e8e\u8fd0\u884c\u72b6\u6001\uff0c\u5426\u5219\u5c06\u5bfc\u81f4 ascend-mindxdl \u5b89\u88c5\u5931\u8d25\u3002
                • isVirtualMachine \uff1a\u9ed8\u8ba4\u4e0d\u5f00\u542f\uff0c\u5982\u679c NPU \u8282\u70b9\u4e3a\u4e91\u4e3b\u673a\u573a\u666f\uff0c\u8bf7\u5f00\u542f\u00a0isVirtualMachine \u53c2\u6570\u3002

                \u5b89\u88c5\u6210\u529f\u540e\uff0c\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u4e0b\u4f1a\u51fa\u73b0\u4e24\u4e2a\u7ec4\u4ef6\uff0c\u5982\u4e0b\u56fe\uff1a

                \u540c\u65f6\u8282\u70b9\u4fe1\u606f\u4e0a\u4e5f\u4f1a\u51fa\u73b0\u5bf9\u5e94 NPU \u7684\u4fe1\u606f\uff1a

                \u4e00\u5207\u5c31\u7eea\u540e\uff0c\u6211\u4eec\u901a\u8fc7\u9875\u9762\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u5c31\u80fd\u591f\u9009\u62e9\u5230\u5bf9\u5e94\u7684 NPU \u8bbe\u5907\uff0c\u5982\u4e0b\u56fe\uff1a

                Note

                \u6709\u5173\u8be6\u7ec6\u4f7f\u7528\u6b65\u9aa4\uff0c\u8bf7\u53c2\u7167\u5e94\u7528\u4f7f\u7528\u6607\u817e\uff08Ascend\uff09NPU\u3002

                "},{"location":"admin/kpanda/gpu/ascend/ascend_usage.html","title":"\u5e94\u7528\u4f7f\u7528\u6607\u817e\uff08Ascend\uff09NPU","text":"

                \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528\u6607\u817e GPU\u3002

                "},{"location":"admin/kpanda/gpu/ascend/ascend_usage.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                • \u5f53\u524d NPU \u8282\u70b9\u5df2\u5b89\u88c5\u6607\u817e \uff08Ascend\uff09\u9a71\u52a8\u3002
                • \u5f53\u524d NPU \u8282\u70b9\u5df2\u5b89\u88c5 Ascend-Docker-Runtime \u7ec4\u4ef6\u3002
                • \u5f53\u524d\u96c6\u7fa4\u5df2\u5b89\u88c5 NPU MindX DL \u5957\u4ef6\u3002
                • \u5f53\u524d\u96c6\u7fa4\u5185 NPU \u5361\u672a\u8fdb\u884c\u4efb\u4f55\u865a\u62df\u5316\u64cd\u4f5c\u6216\u88ab\u5176\u5b83\u5e94\u7528\u5360\u7528\u3002

                \u8bf7\u53c2\u8003\u6607\u817e NPU \u7ec4\u4ef6\u5b89\u88c5\u6587\u6863\u5b89\u88c5\u57fa\u7840\u73af\u5883\u3002

                "},{"location":"admin/kpanda/gpu/ascend/ascend_usage.html#_2","title":"\u5feb\u901f\u4f7f\u7528","text":"

                \u672c\u6587\u4f7f\u7528\u6607\u817e\u793a\u4f8b\u5e93\u4e2d\u7684 AscentCL \u56fe\u7247\u5206\u7c7b\u5e94\u7528\u793a\u4f8b\u3002

                1. \u4e0b\u8f7d\u6607\u817e\u4ee3\u7801\u5e93

                  \u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u4e0b\u8f7d\u6607\u817e Demo \u793a\u4f8b\u4ee3\u7801\u5e93\uff0c\u5e76\u4e14\u8bf7\u8bb0\u4f4f\u4ee3\u7801\u5b58\u653e\u7684\u4f4d\u7f6e\uff0c\u540e\u7eed\u9700\u8981\u4f7f\u7528\u3002

                  git clone https://gitee.com/ascend/samples.git\n
                2. \u51c6\u5907\u57fa\u7840\u955c\u50cf

                  \u6b64\u4f8b\u4f7f\u7528 Ascent-pytorch \u57fa\u7840\u955c\u50cf\uff0c\u53ef\u8bbf\u95ee\u6607\u817e\u955c\u50cf\u4ed3\u5e93\u83b7\u53d6\u3002

                3. \u51c6\u5907 YAML

                  ascend-demo.yaml
                  apiVersion: batch/v1\nkind: Job\nmetadata:\n  name: resnetinfer1-1-1usoc\nspec:\n  template:\n    spec:\n      containers:\n        - image: ascendhub.huawei.com/public-ascendhub/ascend-pytorch:23.0.RC2-ubuntu18.04 # Inference image name\n          imagePullPolicy: IfNotPresent\n          name: resnet50infer\n          securityContext:\n            runAsUser: 0\n          command:\n            - \"/bin/bash\"\n            - \"-c\"\n            - |\n              source /usr/local/Ascend/ascend-toolkit/set_env.sh &&\n              TEMP_DIR=/root/samples_copy_$(date '+%Y%m%d_%H%M%S_%N') &&\n              cp -r /root/samples \"$TEMP_DIR\" &&\n              cd \"$TEMP_DIR\"/inference/modelInference/sampleResnetQuickStart/python/model &&\n              wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/resnet50/resnet50.onnx &&\n              atc --model=resnet50.onnx --framework=5 --output=resnet50 --input_shape=\"actual_input_1:1,3,224,224\"  --soc_version=Ascend910 &&\n              cd ../data &&\n              wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg &&\n              cd ../scripts &&\n              bash sample_run.sh\n          resources:\n            requests:\n              huawei.com/Ascend910: 1 # Number of the Ascend 910 Processors\n            limits:\n              huawei.com/Ascend910: 1 # The value should be the same as that of requests\n          volumeMounts:\n            - name: hiai-driver\n              mountPath: /usr/local/Ascend/driver\n              readOnly: true\n            - name: slog\n              mountPath: /var/log/npu/conf/slog/slog.conf\n            - name: localtime # The container time must be the same as the host time\n              mountPath: /etc/localtime\n            - name: dmp\n              mountPath: /var/dmp_daemon\n            - name: slogd\n              mountPath: /var/slogd\n            - name: hbasic\n              mountPath: /etc/hdcBasic.cfg\n            - name: sys-version\n              mountPath: /etc/sys_version.conf\n            - name: aicpu\n              mountPath: /usr/lib64/aicpu_kernels\n            - name: tfso\n              mountPath: /usr/lib64/libtensorflow.so\n            - name: sample-path\n              mountPath: /root/samples\n      volumes:\n        - name: hiai-driver\n          hostPath:\n            path: /usr/local/Ascend/driver\n        - name: slog\n          hostPath:\n            path: /var/log/npu/conf/slog/slog.conf\n        - name: localtime\n          hostPath:\n            path: /etc/localtime\n        - name: dmp\n          hostPath:\n            path: /var/dmp_daemon\n        - name: slogd\n          hostPath:\n            path: /var/slogd\n        - name: hbasic\n          hostPath:\n            path: /etc/hdcBasic.cfg\n        - name: sys-version\n          hostPath:\n            path: /etc/sys_version.conf\n        - name: aicpu\n          hostPath:\n            path: /usr/lib64/aicpu_kernels\n        - name: tfso\n          hostPath:\n            path: /usr/lib64/libtensorflow.so\n        - name: sample-path\n          hostPath:\n            path: /root/samples\n      restartPolicy: OnFailure\n

                  \u4ee5\u4e0a YAML \u4e2d\u6709\u4e00\u4e9b\u5b57\u6bb5\u9700\u8981\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u8fdb\u884c\u4fee\u6539\uff1a

                  1. atc ... --soc_version=Ascend910 \u4f7f\u7528\u7684\u662f Ascend910 \uff0c\u8bf7\u4ee5\u5b9e\u9645\u60c5\u51b5\u4e3a\u4e3b \u60a8\u53ef\u4ee5\u4f7f\u7528 npu-smi info \u547d\u4ee4\u67e5\u770b\u663e\u5361\u578b\u53f7\u7136\u540e\u52a0\u4e0a Ascend \u524d\u7f00\u5373\u53ef
                  2. samples-path \u4ee5\u5b9e\u9645\u60c5\u51b5\u4e3a\u51c6
                  3. resources \u4ee5\u5b9e\u9645\u60c5\u51b5\u4e3a\u51c6
                4. \u90e8\u7f72 Job \u5e76\u67e5\u770b\u7ed3\u679c

                  \u4f7f\u7528\u5982\u4e0b\u547d\u4ee4\u521b\u5efa Job\uff1a

                  kubectl apply -f ascend-demo.yaml\n

                  \u67e5\u770b Pod \u8fd0\u884c\u72b6\u6001\uff1a

                  Pod \u6210\u529f\u8fd0\u884c\u540e\uff0c\u67e5\u770b\u65e5\u5fd7\u7ed3\u679c\u3002\u5728\u5c4f\u5e55\u4e0a\u7684\u5173\u952e\u63d0\u793a\u4fe1\u606f\u793a\u4f8b\u5982\u4e0b\u56fe\uff0c\u63d0\u793a\u4fe1\u606f\u4e2d\u7684 Label \u8868\u793a\u7c7b\u522b\u6807\u8bc6\uff0c Conf \u8868\u793a\u8be5\u5206\u7c7b\u7684\u6700\u5927\u7f6e\u4fe1\u5ea6\uff0cClass \u8868\u793a\u6240\u5c5e\u7c7b\u522b\u3002\u8fd9\u4e9b\u503c\u53ef\u80fd\u4f1a\u6839\u636e\u7248\u672c\u3001\u73af\u5883\u6709\u6240\u4e0d\u540c\uff0c\u8bf7\u4ee5\u5b9e\u9645\u60c5\u51b5\u4e3a\u51c6\uff1a

                  \u7ed3\u679c\u56fe\u7247\u5c55\u793a\uff1a

                "},{"location":"admin/kpanda/gpu/ascend/ascend_usage.html#_3","title":"\u754c\u9762\u4f7f\u7528","text":"
                1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u68c0\u6d4b GPU \u5361\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002 \u76ee\u524d\u96c6\u7fa4\u4f1a\u81ea\u52a8\u542f\u7528 GPU \uff0c\u5e76\u4e14\u8bbe\u7f6e GPU \u7c7b\u578b\u4e3a Ascend \u3002

                2. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08Ascend\uff09\u4e4b\u540e\uff0c\u9700\u8981\u914d\u7f6e\u5e94\u7528\u4f7f\u7528\u7684\u7269\u7406\u5361\u6570\u91cf\uff1a

                  \u7269\u7406\u5361\u6570\u91cf\uff08huawei.com/Ascend910\uff09 \uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u8f93\u5165\u503c\u5fc5\u987b\u4e3a\u6574\u6570\u4e14**\u5c0f\u4e8e\u7b49\u4e8e**\u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002

                  \u5982\u679c\u4e0a\u8ff0\u503c\u914d\u7f6e\u7684\u6709\u95ee\u9898\u5219\u4f1a\u51fa\u73b0\u8c03\u5ea6\u5931\u8d25\uff0c\u8d44\u6e90\u5206\u914d\u4e0d\u4e86\u7684\u60c5\u51b5\u3002

                "},{"location":"admin/kpanda/gpu/ascend/vnpu.html","title":"\u542f\u7528\u6607\u817e\u865a\u62df\u5316","text":"

                \u6607\u817e\u865a\u62df\u5316\u5206\u4e3a\u52a8\u6001\u865a\u62df\u5316\u548c\u9759\u6001\u865a\u62df\u5316\uff0c\u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5f00\u542f\u5e76\u4f7f\u7528\u6607\u817e\u9759\u6001\u865a\u62df\u5316\u80fd\u529b\u3002

                "},{"location":"admin/kpanda/gpu/ascend/vnpu.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                • Kubernetes \u96c6\u7fa4\u73af\u5883\u642d\u5efa\u3002
                • \u5f53\u524d NPU \u8282\u70b9\u5df2\u5b89\u88c5\u6607\u817e \uff08Ascend\uff09\u9a71\u52a8\u3002
                • \u5f53\u524d NPU \u8282\u70b9\u5df2\u5b89\u88c5 Ascend-Docker-Runtime \u7ec4\u4ef6\u3002
                • \u5f53\u524d\u96c6\u7fa4\u5df2\u5b89\u88c5 NPU MindX DL \u5957\u4ef6\u3002
                • \u652f\u6301\u7684 NPU \u5361\u578b\u53f7\uff1a

                  • Ascend 310P\uff0c\u5df2\u9a8c\u8bc1
                  • Ascend 910b\uff0820 \u6838\uff09\uff0c\u5df2\u9a8c\u8bc1
                  • Ascend 910\uff0832 \u6838\uff09\uff0c\u5b98\u65b9\u4ecb\u7ecd\u652f\u6301\uff0c\u672a\u5b9e\u9645\u9a8c\u8bc1
                  • Ascend 910\uff0830 \u6838\uff09\uff0c\u5b98\u65b9\u4ecb\u7ecd\u652f\u6301\uff0c\u672a\u5b9e\u9645\u9a8c\u8bc1

                  \u66f4\u591a\u7ec6\u8282\u53c2\u9605\u5b98\u65b9\u865a\u62df\u5316\u786c\u4ef6\u8bf4\u660e\u3002

                \u8bf7\u53c2\u8003\u6607\u817e NPU \u7ec4\u4ef6\u5b89\u88c5\u6587\u6863\u5b89\u88c5\u57fa\u7840\u73af\u5883\u3002

                "},{"location":"admin/kpanda/gpu/ascend/vnpu.html#_3","title":"\u5f00\u542f\u865a\u62df\u5316\u80fd\u529b","text":"

                \u5f00\u542f\u865a\u62df\u5316\u80fd\u529b\u9700\u8981\u624b\u52a8\u4fee\u6539\u00a0ascend-device-plugin-daemonset \u7ec4\u4ef6\u7684\u542f\u52a8\u53c2\u6570\uff0c\u53c2\u8003\u4e0b\u8ff0\u547d\u4ee4\uff1a

                - device-plugin -useAscendDocker=true -volcanoType=false -presetVirtualDevice=true\n- logFile=/var/log/mindx-dl/devicePlugin/devicePlugin.log -logLevel=0\n
                "},{"location":"admin/kpanda/gpu/ascend/vnpu.html#vnpu","title":"\u5207\u5206 VNPU \u5b9e\u4f8b","text":"

                \u9759\u6001\u865a\u62df\u5316\u9700\u8981\u624b\u52a8\u5bf9 VNPU \u5b9e\u4f8b\u7684\u5207\u5206\uff0c\u8bf7\u53c2\u8003\u4e0b\u8ff0\u547d\u4ee4\uff1a

                npu-smi set -t create-vnpu -i 13 -c 0 -f vir02\n
                • i \u6307\u7684\u662f card id
                • c \u6307\u7684\u662f chip id
                • vir02 \u6307\u7684\u662f\u5207\u5206\u89c4\u683c\u6a21\u677f

                \u5173\u4e8e card id \u548c chip id\uff0c\u53ef\u4ee5\u901a\u8fc7 npu-smi info \u67e5\u8be2\uff0c\u5207\u5206\u89c4\u683c\u53ef\u901a\u8fc7 ascend \u5b98\u65b9\u6a21\u677f\u8fdb\u884c\u67e5\u8be2\u3002

                \u5207\u5206\u5b9e\u4f8b\u8fc7\u540e\u53ef\u901a\u8fc7\u4e0b\u8ff0\u547d\u4ee4\u67e5\u8be2\u5207\u5206\u7ed3\u679c\uff1a

                npu-smi info -t info-vnpu -i 13 -c 0\n

                \u67e5\u8be2\u7ed3\u679c\u5982\u4e0b\uff1a

                "},{"location":"admin/kpanda/gpu/ascend/vnpu.html#ascend-device-plugin-daemonset","title":"\u91cd\u542f\u00a0ascend-device-plugin-daemonset","text":"

                \u5207\u5206\u5b9e\u4f8b\u540e\u624b\u52a8\u91cd\u542f device-plugin pod\uff0c\u7136\u540e\u4f7f\u7528 kubectl describe \u547d\u4ee4\u67e5\u770b\u5df2\u6ce8\u518c node \u7684\u8d44\u6e90\uff1a

                kubectl describe node {{nodename}}\n

                "},{"location":"admin/kpanda/gpu/ascend/vnpu.html#_4","title":"\u5982\u4f55\u4f7f\u7528\u8bbe\u5907","text":"

                \u5728\u521b\u5efa\u5e94\u7528\u65f6\uff0c\u6307\u5b9a\u8d44\u6e90 key\uff0c\u53c2\u8003\u4e0b\u8ff0 YAML\uff1a

                ......\nresources:\n  requests:\n    huawei.com/Ascend310P-2c: 1\n  limits:\n    huawei.com/Ascend310P-2c: 1\n......\n
                "},{"location":"admin/kpanda/gpu/metax/usemetax.html","title":"\u6c90\u66e6 GPU \u7ec4\u4ef6\u5b89\u88c5\u4e0e\u4f7f\u7528","text":"

                \u672c\u7ae0\u8282\u63d0\u4f9b\u6c90\u66e6 gpu-extensions\u3001gpu-operator \u7b49\u7ec4\u4ef6\u7684\u5b89\u88c5\u6307\u5bfc\u548c\u6c90\u66e6 GPU \u6574\u5361\u548c vGPU \u4e24\u79cd\u6a21\u5f0f\u7684\u4f7f\u7528\u65b9\u6cd5\u3002

                "},{"location":"admin/kpanda/gpu/metax/usemetax.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                1. \u5df2\u5728\u6c90\u66e6\u8f6f\u4ef6\u4e2d\u5fc3\u4e0b\u8f7d\u5e76\u5b89\u88c5\u6240\u9700\u7684 tar \u5305\uff0c \u672c\u6587\u4ee5 metax-gpu-k8s-package.0.7.10.tar.gz \u4e3a\u4f8b\u3002
                2. \u51c6\u5907 Kubernetes \u57fa\u7840\u73af\u5883
                "},{"location":"admin/kpanda/gpu/metax/usemetax.html#_2","title":"\u7ec4\u4ef6\u4ecb\u7ecd","text":"

                Metax \u63d0\u4f9b\u4e86\u4e24\u4e2a helm-chart \u5305\uff0c\u4e00\u4e2a\u662f metax-extensions\uff0c\u4e00\u4e2a\u662f gpu-operator\uff0c\u6839\u636e\u4f7f\u7528\u573a\u666f\u53ef\u9009\u62e9\u5b89\u88c5\u4e0d\u540c\u7684\u7ec4\u4ef6\u3002

                1. Metax-extensions\uff1a\u5305\u542b gpu-device \u548c gpu-label \u4e24\u4e2a\u7ec4\u4ef6\u3002\u5728\u4f7f\u7528 Metax-extensions \u65b9\u6848\u65f6\uff0c\u7528\u6237\u7684\u5e94\u7528\u5bb9\u5668\u955c\u50cf\u9700\u8981\u57fa\u4e8e MXMACA\u00ae \u57fa\u7840\u955c\u50cf\u6784\u5efa\u3002\u4e14 Metax-extensions \u4ec5\u9002\u7528\u4e8e GPU \u6574\u5361\u4f7f\u7528\u573a\u666f\u3002
                2. gpu-operator\uff1a\u5305\u542b gpu-device\u3001gpu-label\u3001driver-manager\u3001container-runtime\u3001operator-controller \u8fd9\u4e9b\u7ec4\u4ef6\u3002 \u4f7f\u7528 gpu-operator \u65b9\u6848\u65f6\uff0c\u7528\u6237\u53ef\u9009\u62e9\u5236\u4f5c\u4e0d\u5305\u542b MXMACA\u00ae SDK \u7684\u5e94\u7528\u5bb9\u5668\u955c\u50cf\u3002gpu-operator \u9002\u7528\u4e8e GPU \u6574\u5361\u548c vGPU \u573a\u666f\u3002
                "},{"location":"admin/kpanda/gpu/metax/usemetax.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                1. \u4ece /home/metax/metax-docs/k8s/metax-gpu-k8s-package.0.7.10.tar.gz \u6587\u4ef6\u4e2d\u89e3\u538b\u51fa

                  • deploy-gpu-extensions.yaml # \u90e8\u7f72yaml
                  • metax-gpu-extensions-0.7.10.tgz\u3001metax-operator-0.7.10.tgz # helm chart\u6587\u4ef6
                  • metax-k8s-images.0.7.10.run # \u79bb\u7ebf\u955c\u50cf
                2. \u67e5\u770b\u7cfb\u7edf\u662f\u5426\u5b89\u88c5\u9a71\u52a8

                  $ lsmod | grep metax \nmetax 1605632 0 \nttm 86016 3 drm_vram_helper,metax,drm_ttm_helper \ndrm 618496 7 drm_kms_helper,drm_vram_helper,ast,metax,drm_ttm_helper,ttm\n
                  • \u5982\u6ca1\u6709\u5185\u5bb9\u663e\u793a\uff0c\u5c31\u8868\u793a\u6ca1\u6709\u5b89\u88c5\u8fc7\u8f6f\u4ef6\u5305\u3002\u5982\u6709\u5185\u5bb9\u663e\u793a\uff0c\u5219\u8868\u793a\u5b89\u88c5\u8fc7\u8f6f\u4ef6\u5305\u3002
                  • \u4f7f\u7528 metax-opeartor \u65f6\uff0c\u4e0d\u63a8\u8350\u5728\u5de5\u4f5c\u8282\u70b9\u9884\u5b89\u88c5 MXMACA \u5185\u6838\u6001\u9a71\u52a8\uff0c\u82e5\u5df2\u5b89\u88c5\u4e5f\u65e0\u9700\u5378\u8f7d\u3002
                3. \u5b89\u88c5\u9a71\u52a8

                "},{"location":"admin/kpanda/gpu/metax/usemetax.html#gpu-extensions","title":"gpu-extensions","text":"
                1. \u63a8\u9001\u955c\u50cf

                  tar -xf metax-gpu-k8s-package.0.7.10.tar.gz\n./metax-k8s-images.0.7.10.run push {registry}/metax\n
                2. \u63a8\u9001 Helm Chart

                  helm plugin install https://github.com/chartmuseum/helm-push\nhelm repo add  --username rootuser --password rootpass123  metax http://172.16.16.5:8081\nhelm cm-push metax-operator-0.7.10.tgz metax\nhelm cm-push metax-gpu-extensions-0.7.10.tgz metax\n
                3. \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e0a\u5b89\u88c5 metax-gpu-extensions

                  \u90e8\u7f72\u6210\u529f\u4e4b\u540e\uff0c\u53ef\u4ee5\u5728\u8282\u70b9\u4e0a\u67e5\u770b\u5230\u8d44\u6e90\u3002

                4. \u4fee\u6539\u6210\u529f\u4e4b\u540e\u5c31\u53ef\u4ee5\u5728\u8282\u70b9\u4e0a\u770b\u5230\u5e26\u6709 Metax GPU \u7684\u6807\u7b7e

                "},{"location":"admin/kpanda/gpu/metax/usemetax.html#gpu-operator","title":"gpu-operator","text":"

                \u5b89\u88c5 gpu-opeartor \u65f6\u7684\u5df2\u77e5\u95ee\u9898\uff1a

                1. metax-operator\u3001gpu-label\u3001gpu-device \u3001container-runtime \u8fd9\u51e0\u4e2a\u7ec4\u4ef6\u955c\u50cf\u8981\u5e26\u6709 amd64 \u540e\u7f00\u3002

                2. metax-maca \u7ec4\u4ef6\u7684\u955c\u50cf\u4e0d\u5728 metax-k8s-images.0.7.13.run \u5305\u91cc\u9762\uff0c\u9700\u8981\u5355\u72ec\u4e0b\u8f7d maca-mxc500-2.23.0.23-ubuntu20.04-x86_64.tar.xz \u8fd9\u7c7b\u955c\u50cf\uff0cload \u4e4b\u540e\u91cd\u65b0\u4fee\u6539 metax-maca \u7ec4\u4ef6\u7684\u955c\u50cf\u3002

                3. metax-driver \u7ec4\u4ef6\u7684\u955c\u50cf\u9700\u8981\u4ece https://pub-docstore.metax-tech.com:7001 \u8fd9\u4e2a\u7f51\u7ad9\u4e0b\u8f7d k8s-driver-image.2.23.0.25.run \u6587\u4ef6\uff0c\u7136\u540e\u6267\u884c k8s-driver-image.2.23.0.25.run push {registry}/metax \u547d\u4ee4\u628a\u955c\u50cf\u63a8\u9001\u5230\u955c\u50cf\u4ed3\u5e93\u3002\u63a8\u9001\u4e4b\u540e\u4fee\u6539 metax-driver \u7ec4\u4ef6\u7684\u955c\u50cf\u5730\u5740\u3002

                "},{"location":"admin/kpanda/gpu/metax/usemetax.html#gpu_1","title":"\u4f7f\u7528 GPU","text":"

                \u5b89\u88c5\u540e\u53ef\u5728\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u4f7f\u7528\u6c90\u66e6 GPU\u3002\u6ce8\u610f\u542f\u7528 GPU \u540e\uff0c\u9700\u9009\u62e9GPU\u7c7b\u578b\u4e3a Metax GPU

                \u8fdb\u5165\u5bb9\u5668\uff0c\u6267\u884c mx-smi \u53ef\u67e5\u770b GPU \u7684\u4f7f\u7528\u60c5\u51b5.

                "},{"location":"admin/kpanda/gpu/mlu/use-mlu.html","title":"\u4f7f\u7528\u5bd2\u6b66\u7eaa GPU","text":"

                \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528\u5bd2\u6b66\u7eaa GPU\u3002

                "},{"location":"admin/kpanda/gpu/mlu/use-mlu.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                • \u5df2\u7ecf\u90e8\u7f72 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u4e14\u5e73\u53f0\u8fd0\u884c\u6b63\u5e38\u3002
                • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                • \u5f53\u524d\u96c6\u7fa4\u5df2\u5b89\u88c5\u5bd2\u6b66\u7eaa\u56fa\u4ef6\u3001\u9a71\u52a8\u4ee5\u53caDevicePlugin\u7ec4\u4ef6\uff0c\u5b89\u88c5\u8be6\u60c5\u8bf7\u53c2\u8003\u5b98\u65b9\u6587\u6863\uff1a
                  • \u9a71\u52a8\u56fa\u4ef6\u5b89\u88c5
                  • DevicePlugin \u5b89\u88c5

                \u5728\u5b89\u88c5 DevicePlugin \u65f6\u8bf7\u5173\u95ed --enable-device-type \u53c2\u6570\uff0c\u5426\u5219\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5c06\u65e0\u6cd5\u6b63\u786e\u8bc6\u522b\u5bd2\u6b66\u7eaa GPU\u3002

                "},{"location":"admin/kpanda/gpu/mlu/use-mlu.html#gpu_1","title":"\u5bd2\u6b66\u7eaa GPU \u6a21\u5f0f\u4ecb\u7ecd","text":"

                \u5bd2\u6b66\u7eaa GPU \u6709\u4ee5\u4e0b\u51e0\u79cd\u6a21\u5f0f\uff1a

                • \u6574\u5361\u6a21\u5f0f\uff1a\u5c06\u5bd2\u6b66\u7eaaGPU\u4ee5\u6574\u5361\u7684\u65b9\u5f0f\u6ce8\u518c\u5230\u96c6\u7fa4\u5f53\u4e2d\u8fdb\u884c\u4f7f\u7528\u3002
                • Share \u6a21\u5f0f\uff1a\u53ef\u4ee5\u5c06\u4e00\u5f20\u5bd2\u6b66\u7eaaGPU\u5171\u4eab\u7ed9\u591a\u4e2a Pod \u8fdb\u884c\u4f7f\u7528\uff0c\u53ef\u4ee5\u901a\u8fc7 virtualization-num \u53c2\u6570\u8fdb\u884c\u8bbe\u7f6e\u53ef\u5171\u4eab\u5bb9\u5668\u7684\u6570\u91cf\u3002
                • Dynamic smlu \u6a21\u5f0f\uff1a\u8fdb\u4e00\u6b65\u5bf9\u8d44\u6e90\u8fdb\u884c\u4e86\u7ec6\u5316\uff0c\u53ef\u4ee5\u63a7\u5236\u5206\u914d\u7ed9\u5bb9\u5668\u7684\u663e\u5b58\u3001\u7b97\u529b\u7684\u5927\u5c0f\u3002
                • Mim \u6a21\u5f0f\uff1a\u53ef\u4ee5\u5c06\u5bd2\u6b66\u7eaa GPU \u6309\u7167\u56fa\u5b9a\u7684\u89c4\u683c\u5207\u5206\u6210\u591a\u5f20 GPU \u8fdb\u884c\u4f7f\u7528\u3002
                "},{"location":"admin/kpanda/gpu/mlu/use-mlu.html#ai","title":"\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528\u5bd2\u6b66\u7eaa","text":"

                \u8fd9\u91cc\u4ee5 Dynamic smlu \u6a21\u5f0f\u4e3a\u4f8b\uff1a

                1. \u5728\u6b63\u786e\u5b89\u88c5 DevicePlugin \u7b49\u7ec4\u4ef6\u540e\uff0c\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8fd0\u7ef4-> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002

                2. \u70b9\u51fb\u8282\u70b9\u7ba1\u7406\u9875\u9762\uff0c\u67e5\u770b\u8282\u70b9\u662f\u5426\u5df2\u7ecf\u6b63\u786e\u8bc6\u522b\u5230\u5bf9\u5e94\u7684GPU\u7c7b\u578b\u3002

                3. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08MLU VGPU\uff09\u4e4b\u540e\uff0c\u9700\u8981\u914d\u7f6e App \u4f7f\u7528\u7684 GPU \u8d44\u6e90\uff1a

                  • GPU \u7b97\u529b\uff08cambricon.com/mlu.smlu.vcore\uff09\uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u4f7f\u7528\u6838\u5fc3\u7684\u767e\u5206\u6bd4\u6570\u91cf\u3002
                  • GPU \u663e\u5b58\uff08cambricon.com/mlu.smlu.vmemory\uff09\uff1a\u8868\u793a\u5f53\u524dPod\u9700\u8981\u4f7f\u7528\u663e\u5b58\u7684\u5927\u5c0f\uff0c\u5355\u4f4d\u662fMB\u3002

                "},{"location":"admin/kpanda/gpu/mlu/use-mlu.html#yaml","title":"\u4f7f\u7528 YAML \u914d\u7f6e","text":"

                \u53c2\u8003 YAML \u6587\u4ef6\u5982\u4e0b\uff1a

                apiVersion: v1  \nkind: Pod  \nmetadata:  \n  name: pod1  \nspec:  \n  restartPolicy: OnFailure  \n  containers:  \n    - image: ubuntu:16.04  \n      name: pod1-ctr  \n      command: [\"sleep\"]  \n      args: [\"100000\"]  \n      resources:  \n        limits:  \n          cambricon.com/mlu: \"1\" # use this when device type is not enabled, else delete this line.  \n          #cambricon.com/mlu: \"1\" #uncomment to use when device type is enabled  \n          #cambricon.com/mlu.share: \"1\" #uncomment to use device with env-share mode  \n          #cambricon.com/mlu.mim-2m.8gb: \"1\" #uncomment to use device with mim mode  \n          #cambricon.com/mlu.smlu.vcore: \"100\" #uncomment to use device with mim mode  \n          #cambricon.com/mlu.smlu.vmemory: \"1024\" #uncomment to use device with mim mode\n
                "},{"location":"admin/kpanda/gpu/nvidia/index.html","title":"NVIDIA GPU \u5361\u4f7f\u7528\u6a21\u5f0f","text":"

                NVIDIA \u4f5c\u4e3a\u4e1a\u5185\u77e5\u540d\u7684\u56fe\u5f62\u8ba1\u7b97\u4f9b\u5e94\u5546\uff0c\u4e3a\u7b97\u529b\u7684\u63d0\u5347\u63d0\u4f9b\u4e86\u8bf8\u591a\u8f6f\u786c\u4ef6\u89e3\u51b3\u65b9\u6848\uff0c\u5176\u4e2d NVIDIA \u5728 GPU \u7684\u4f7f\u7528\u65b9\u5f0f\u4e0a\u63d0\u4f9b\u4e86\u5982\u4e0b\u4e09\u79cd\u89e3\u51b3\u65b9\u6848\uff1a

                "},{"location":"admin/kpanda/gpu/nvidia/index.html#full-gpu","title":"\u6574\u5361\uff08Full GPU\uff09","text":"

                \u6574\u5361\u662f\u6307\u5c06\u6574\u4e2a NVIDIA GPU \u5206\u914d\u7ed9\u5355\u4e2a\u7528\u6237\u6216\u5e94\u7528\u7a0b\u5e8f\u3002\u5728\u8fd9\u79cd\u914d\u7f6e\u4e0b\uff0c\u5e94\u7528\u53ef\u4ee5\u5b8c\u5168\u5360\u7528 GPU \u7684\u6240\u6709\u8d44\u6e90\uff0c \u5e76\u83b7\u5f97\u6700\u5927\u7684\u8ba1\u7b97\u6027\u80fd\u3002\u6574\u5361\u9002\u7528\u4e8e\u9700\u8981\u5927\u91cf\u8ba1\u7b97\u8d44\u6e90\u548c\u5185\u5b58\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5982\u6df1\u5ea6\u5b66\u4e60\u8bad\u7ec3\u3001\u79d1\u5b66\u8ba1\u7b97\u7b49\u3002

                "},{"location":"admin/kpanda/gpu/nvidia/index.html#vgpuvirtual-gpu","title":"vGPU\uff08Virtual GPU\uff09","text":"

                vGPU \u662f\u4e00\u79cd\u865a\u62df\u5316\u6280\u672f\uff0c\u5141\u8bb8\u5c06\u4e00\u4e2a\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a\u865a\u62df GPU\uff0c\u6bcf\u4e2a\u865a\u62df GPU \u5206\u914d\u7ed9\u4e0d\u540c\u7684\u4e91\u4e3b\u673a\u6216\u7528\u6237\u3002 vGPU \u4f7f\u591a\u4e2a\u7528\u6237\u53ef\u4ee5\u5171\u4eab\u540c\u4e00\u53f0\u7269\u7406 GPU\uff0c\u5e76\u5728\u5404\u81ea\u7684\u865a\u62df\u73af\u5883\u4e2d\u72ec\u7acb\u4f7f\u7528 GPU \u8d44\u6e90\u3002 \u6bcf\u4e2a\u865a\u62df GPU \u53ef\u4ee5\u83b7\u5f97\u4e00\u5b9a\u7684\u8ba1\u7b97\u80fd\u529b\u548c\u663e\u5b58\u5bb9\u91cf\u3002vGPU \u9002\u7528\u4e8e\u865a\u62df\u5316\u73af\u5883\u548c\u4e91\u8ba1\u7b97\u573a\u666f\uff0c\u53ef\u4ee5\u63d0\u4f9b\u66f4\u9ad8\u7684\u8d44\u6e90\u5229\u7528\u7387\u548c\u7075\u6d3b\u6027\u3002

                "},{"location":"admin/kpanda/gpu/nvidia/index.html#migmulti-instance-gpu","title":"MIG\uff08Multi-Instance GPU\uff09","text":"

                MIG \u662f NVIDIA Ampere \u67b6\u6784\u5f15\u5165\u7684\u4e00\u9879\u529f\u80fd\uff0c\u5b83\u5141\u8bb8\u5c06\u4e00\u4e2a\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a\u7269\u7406 GPU \u5b9e\u4f8b\uff0c\u6bcf\u4e2a\u5b9e\u4f8b\u53ef\u4ee5\u72ec\u7acb\u5206\u914d\u7ed9\u4e0d\u540c\u7684\u7528\u6237\u6216\u5de5\u4f5c\u8d1f\u8f7d\u3002 \u6bcf\u4e2a MIG \u5b9e\u4f8b\u5177\u6709\u81ea\u5df1\u7684\u8ba1\u7b97\u8d44\u6e90\u3001\u663e\u5b58\u548c PCIe \u5e26\u5bbd\uff0c\u5c31\u50cf\u4e00\u4e2a\u72ec\u7acb\u7684\u865a\u62df GPU\u3002 MIG \u63d0\u4f9b\u4e86\u66f4\u7ec6\u7c92\u5ea6\u7684 GPU \u8d44\u6e90\u5206\u914d\u548c\u7ba1\u7406\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u6c42\u52a8\u6001\u8c03\u6574\u5b9e\u4f8b\u7684\u6570\u91cf\u548c\u5927\u5c0f\u3002 MIG \u9002\u7528\u4e8e\u591a\u79df\u6237\u73af\u5883\u3001\u5bb9\u5668\u5316\u5e94\u7528\u7a0b\u5e8f\u548c\u6279\u5904\u7406\u4f5c\u4e1a\u7b49\u573a\u666f\u3002

                \u65e0\u8bba\u662f\u5728\u865a\u62df\u5316\u73af\u5883\u4e2d\u4f7f\u7528 vGPU\uff0c\u8fd8\u662f\u5728\u7269\u7406 GPU \u4e0a\u4f7f\u7528 MIG\uff0cNVIDIA \u4e3a\u7528\u6237\u63d0\u4f9b\u4e86\u66f4\u591a\u7684\u9009\u62e9\u548c\u4f18\u5316 GPU \u8d44\u6e90\u7684\u65b9\u5f0f\u3002 \u7b97\u4e30 AI \u7b97\u529b\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\u5168\u9762\u517c\u5bb9\u4e86\u4e0a\u8ff0 NVIDIA \u7684\u80fd\u529b\u7279\u6027\uff0c\u7528\u6237\u53ea\u9700\u901a\u8fc7\u7b80\u5355\u7684\u754c\u9762\u64cd\u4f5c\uff0c\u5c31\u80fd\u591f\u83b7\u5f97\u5168\u90e8 NVIDIA GPU \u7684\u8ba1\u7b97\u80fd\u529b\uff0c\u4ece\u800c\u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u7387\u5e76\u964d\u4f4e\u6210\u672c\u3002

                • Single \u6a21\u5f0f\uff0c\u8282\u70b9\u4ec5\u5728\u5176\u6240\u6709 GPU \u4e0a\u516c\u5f00\u5355\u4e00\u7c7b\u578b\u7684 MIG \u8bbe\u5907\uff0c\u8282\u70b9\u4e0a\u7684\u6240\u6709 GPU \u5fc5\u987b\uff1a
                  • \u5c5e\u4e8e\u540c\u4e00\u4e2a\u578b\u53f7\uff08\u4f8b\u5982 A100-SXM-40GB\uff09\uff0c\u53ea\u6709\u540c\u4e00\u578b\u53f7 GPU \u7684 MIG Profile \u624d\u662f\u4e00\u6837\u7684
                  • \u542f\u7528 MIG \u914d\u7f6e\uff0c\u9700\u8981\u91cd\u542f\u673a\u5668\u624d\u80fd\u751f\u6548
                  • \u4e3a\u5728\u6240\u6709\u4ea7\u54c1\u4e2d\u516c\u5f00\u201c\u5b8c\u5168\u76f8\u540c\u201d\u7684 MIG \u8bbe\u5907\u7c7b\u578b\uff0c\u521b\u5efa\u76f8\u540c\u7684GI \u548c CI
                • Mixed \u6a21\u5f0f\uff0c\u8282\u70b9\u5728\u5176\u6240\u6709 GPU \u4e0a\u516c\u5f00\u6df7\u5408 MIG \u8bbe\u5907\u7c7b\u578b\u3002\u8bf7\u6c42\u7279\u5b9a\u7684 MIG \u8bbe\u5907\u7c7b\u578b\u9700\u8981\u8bbe\u5907\u7c7b\u578b\u63d0\u4f9b\u7684\u8ba1\u7b97\u5207\u7247\u6570\u91cf\u548c\u5185\u5b58\u603b\u91cf\u3002
                  • \u8282\u70b9\u4e0a\u7684\u6240\u6709 GPU \u5fc5\u987b\uff1a\u5c5e\u4e8e\u540c\u4e00\u4ea7\u54c1\u7ebf\uff08\u4f8b\u5982 A100-SXM-40GB\uff09
                  • \u6bcf\u4e2a GPU \u53ef\u542f\u7528\u6216\u4e0d\u542f\u7528 MIG\uff0c\u5e76\u4e14\u53ef\u4ee5\u81ea\u7531\u914d\u7f6e\u4efb\u4f55\u53ef\u7528 MIG \u8bbe\u5907\u7c7b\u578b\u7684\u6df7\u5408\u642d\u914d\u3002
                  • \u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 k8s-device-plugin \u5c06\uff1a
                    • \u4f7f\u7528\u4f20\u7edf\u7684 nvidia.com/gpu \u8d44\u6e90\u7c7b\u578b\u516c\u5f00\u4efb\u4f55\u4e0d\u5904\u4e8e MIG \u6a21\u5f0f\u7684 GPU
                    • \u4f7f\u7528\u9075\u5faa\u67b6\u6784 nvidia.com/mig-g.gb \u7684\u8d44\u6e90\u7c7b\u578b\u516c\u5f00\u5404\u4e2a MIG \u8bbe\u5907

                      \u5f00\u542f\u914d\u7f6e\u8be6\u60c5\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/index.html#_1","title":"\u5982\u4f55\u4f7f\u7528","text":"

                      \u60a8\u53ef\u4ee5\u53c2\u8003\u4ee5\u4e0b\u94fe\u63a5\uff0c\u5feb\u901f\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5173\u4e8e NVIDIA GPU \u5361\u7684\u7ba1\u7406\u80fd\u529b\u3002

                      • NVIDIA GPU \u6574\u5361\u4f7f\u7528
                      • NVIDIA vGPU \u4f7f\u7528
                      • NVIDIA MIG \u4f7f\u7528
                      "},{"location":"admin/kpanda/gpu/nvidia/full_gpu_userguide.html","title":"\u5e94\u7528\u4f7f\u7528 GPU \u6574\u5361","text":"

                      \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5c06\u6574\u4e2a NVIDIA GPU \u5361\u5206\u914d\u7ed9\u5355\u4e2a\u5e94\u7528\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/full_gpu_userguide.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u5df2\u7ecf\u90e8\u7f72 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u4e14\u5e73\u53f0\u8fd0\u884c\u6b63\u5e38\u3002
                      • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                      • \u5f53\u524d\u96c6\u7fa4\u5df2\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u5e76\u5df2\u542f\u7528 NVIDIA DevicePlugin \uff0c\u53ef\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5\u3002
                      • \u5f53\u524d\u96c6\u7fa4\u5185 GPU \u5361\u672a\u8fdb\u884c\u4efb\u4f55\u865a\u62df\u5316\u64cd\u4f5c\u6216\u88ab\u5176\u5b83\u5e94\u7528\u5360\u7528\u3002
                      "},{"location":"admin/kpanda/gpu/nvidia/full_gpu_userguide.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"admin/kpanda/gpu/nvidia/full_gpu_userguide.html#ui","title":"\u4f7f\u7528 UI \u754c\u9762\u914d\u7f6e","text":"
                      1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u68c0\u6d4b GPU \u5361\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002 \u76ee\u524d\u96c6\u7fa4\u4f1a\u81ea\u52a8\u542f\u7528 GPU \uff0c\u5e76\u4e14\u8bbe\u7f6e GPU \u7c7b\u578b\u4e3a Nvidia GPU \u3002

                      2. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08Nvidia GPU\uff09\u4e4b\u540e\uff0c\u9700\u8981\u914d\u7f6e\u5e94\u7528\u4f7f\u7528\u7684\u7269\u7406\u5361\u6570\u91cf\uff1a

                        \u7269\u7406\u5361\u6570\u91cf\uff08nvidia.com/gpu\uff09 \uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u8f93\u5165\u503c\u5fc5\u987b\u4e3a\u6574\u6570\u4e14 \u5c0f\u4e8e\u7b49\u4e8e \u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002

                        \u5982\u679c\u4e0a\u8ff0\u503c\u914d\u7f6e\u7684\u6709\u95ee\u9898\u5219\u4f1a\u51fa\u73b0\u8c03\u5ea6\u5931\u8d25\uff0c\u8d44\u6e90\u5206\u914d\u4e0d\u4e86\u7684\u60c5\u51b5\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/full_gpu_userguide.html#yaml","title":"\u4f7f\u7528 YAML \u914d\u7f6e","text":"

                      \u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u7533\u8bf7 GPU \u8d44\u6e90\uff0c\u5728\u8d44\u6e90\u7533\u8bf7\u548c\u9650\u5236\u914d\u7f6e\u4e2d\u589e\u52a0 nvidia.com/gpu: 1 \u53c2\u6570\u914d\u7f6e\u5e94\u7528\u4f7f\u7528\u7269\u7406\u5361\u7684\u6570\u91cf\u3002

                      apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-gpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-gpu-demo\n  template:\n    metadata:\n      labels:\n        app: full-gpu-demo\n    spec:\n      containers:\n      - image: chrstnhntschl/gpu_burn\n        name: container-0\n        resources:\n          requests:\n            cpu: 250m\n            memory: 512Mi\n            nvidia.com/gpu: 1   # \u7533\u8bf7 GPU \u7684\u6570\u91cf\n          limits:\n            cpu: 250m\n            memory: 512Mi\n            nvidia.com/gpu: 1   # GPU \u6570\u91cf\u7684\u4f7f\u7528\u4e0a\u9650\n      imagePullSecrets:\n      - name: default-secret\n

                      Note

                      \u4f7f\u7528 nvidia.com/gpu \u53c2\u6570\u6307\u5b9a GPU \u6570\u91cf\u65f6\uff0crequests \u548c limits \u503c\u9700\u8981\u4fdd\u6301\u4e00\u81f4\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html","title":"GPU Operator \u79bb\u7ebf\u5b89\u88c5","text":"

                      \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9884\u7f6e\u4e86 Ubuntu22.04\u3001Ubuntu20.04\u3001CentOS 7.9 \u8fd9\u4e09\u4e2a\u64cd\u4f5c\u7cfb\u7edf\u7684 Driver \u955c\u50cf\uff0c\u9a71\u52a8\u7248\u672c\u662f 535.104.12\uff1b \u5e76\u4e14\u5185\u7f6e\u4e86\u5404\u64cd\u4f5c\u7cfb\u7edf\u6240\u9700\u7684 Toolkit \u955c\u50cf\uff0c\u7528\u6237\u4e0d\u518d\u9700\u8981\u624b\u52a8\u79bb\u7ebf Toolkit \u955c\u50cf\u3002

                      \u672c\u6587\u4f7f\u7528 AMD \u67b6\u6784\u7684 CentOS 7.9\uff083.10.0-1160\uff09\u8fdb\u884c\u6f14\u793a\u3002\u5982\u9700\u4f7f\u7528 Red Hat 8.4 \u90e8\u7f72\uff0c \u8bf7\u53c2\u8003\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20 Red Hat GPU Opreator \u79bb\u7ebf\u955c\u50cf\u548c\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u5f85\u90e8\u7f72 gpu-operator \u7684\u96c6\u7fa4\u8282\u70b9\u5185\u6838\u7248\u672c\u5fc5\u987b\u5b8c\u5168\u4e00\u81f4\u3002\u8282\u70b9\u6240\u5728\u7684\u53d1\u884c\u7248\u548c GPU \u5361\u578b\u53f7\u5728 GPU \u652f\u6301\u77e9\u9635\u7684\u8303\u56f4\u5185\u3002
                      • \u5b89\u88c5 gpu-operator \u65f6\u9009\u62e9 v23.9.0+2 \u53ca\u4ee5\u4e0a\u7248\u672c
                      "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                      \u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 gpu-operator \u63d2\u4ef6\u3002

                      1. \u767b\u5f55\u5e73\u53f0\uff0c\u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 -> \u5f85\u5b89\u88c5 gpu-operator \u7684\u96c6\u7fa4 -> \u8fdb\u5165\u96c6\u7fa4\u8be6\u60c5\u3002

                      2. \u5728 Helm \u6a21\u677f \u9875\u9762\uff0c\u9009\u62e9 \u5168\u90e8\u4ed3\u5e93 \uff0c\u641c\u7d22 gpu-operator \u3002

                      3. \u9009\u62e9 gpu-operator \uff0c\u70b9\u51fb \u5b89\u88c5 \u3002

                      4. \u53c2\u8003\u4e0b\u6587\u53c2\u6570\u914d\u7f6e\uff0c\u914d\u7f6e gpu-operator \u5b89\u88c5\u53c2\u6570\uff0c\u5b8c\u6210 gpu-operator \u7684\u5b89\u88c5\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_3","title":"\u53c2\u6570\u914d\u7f6e","text":"
                      • systemOS \uff1a\u9009\u62e9\u673a\u5668\u7684\u64cd\u4f5c\u7cfb\u7edf\uff0c\u5f53\u524d\u5185\u7f6e\u4e86 Ubuntu 22.04\u3001Ubuntu20.04\u3001Centos7.9 \u3001other \u56db\u4e2a\u9009\u9879\uff0c\u8bf7\u6b63\u786e\u7684\u9009\u62e9\u64cd\u4f5c\u7cfb\u7edf\u3002
                      "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_4","title":"\u57fa\u672c\u53c2\u6570\u914d\u7f6e","text":"
                      • \u540d\u79f0 \uff1a\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\u3002
                      • \u547d\u540d\u7a7a\u95f4 \uff1a\u9009\u62e9\u5c06\u63d2\u4ef6\u5b89\u88c5\u7684\u547d\u540d\u7a7a\u95f4\u3002
                      • \u7248\u672c \uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 v23.9.0+2 \u7248\u672c\u4e3a\u4f8b\u3002
                      • \u5931\u8d25\u5220\u9664 \uff1a\u5b89\u88c5\u5931\u8d25\uff0c\u5219\u5220\u9664\u5df2\u7ecf\u5b89\u88c5\u7684\u5173\u8054\u8d44\u6e90\u3002\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u3002
                      • \u5c31\u7eea\u7b49\u5f85 \uff1a\u542f\u7528\u540e\uff0c\u6240\u6709\u5173\u8054\u8d44\u6e90\u90fd\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002
                      • \u8be6\u60c5\u65e5\u5fd7 \uff1a\u5f00\u542f\u540e\uff0c\u5c06\u8bb0\u5f55\u5b89\u88c5\u8fc7\u7a0b\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002
                      "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_5","title":"\u9ad8\u7ea7\u53c2\u6570\u914d\u7f6e","text":""},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#operator","title":"Operator \u53c2\u6570\u914d\u7f6e","text":"
                      • InitContainer.image \uff1a\u914d\u7f6e CUDA \u955c\u50cf\uff0c\u63a8\u8350\u9ed8\u8ba4\u955c\u50cf\uff1a nvidia/cuda
                      • InitContainer.repository \uff1aCUDA \u955c\u50cf\u6240\u5728\u7684\u955c\u50cf\u4ed3\u5e93\uff0c\u9ed8\u8ba4\u4e3a nvcr.m.daocloud.io \u4ed3\u5e93
                      • InitContainer.version : CUDA \u955c\u50cf\u7684\u7248\u672c\uff0c\u8bf7\u4f7f\u7528\u9ed8\u8ba4\u53c2\u6570
                      "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#driver","title":"Driver \u53c2\u6570\u914d\u7f6e","text":"
                      • Driver.enable \uff1a\u914d\u7f6e\u662f\u5426\u5728\u8282\u70b9\u4e0a\u90e8\u7f72 NVIDIA \u9a71\u52a8\uff0c\u9ed8\u8ba4\u5f00\u542f\uff0c\u5982\u679c\u60a8\u5728\u4f7f\u7528 GPU Operator \u90e8\u7f72\u524d\uff0c\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u90e8\u7f72\u4e86 NVIDIA \u9a71\u52a8\u7a0b\u5e8f\uff0c\u8bf7\u5173\u95ed\u3002\uff08\u82e5\u624b\u52a8\u90e8\u7f72\u9a71\u52a8\u7a0b\u5e8f\u9700\u8981\u5173\u6ce8 CUDA Toolkit \u4e0e Toolkit Driver Version \u7684\u9002\u914d\u5173\u7cfb\uff0c\u901a\u8fc7 GPU operator \u5b89\u88c5\u5219\u65e0\u9700\u5173\u6ce8\uff09\u3002
                      • Driver.usePrecompiled \uff1a\u542f\u7528\u9884\u7f16\u8bd1\u7684GPU\u9a71\u52a8
                      • Driver.image \uff1a\u914d\u7f6e GPU \u9a71\u52a8\u955c\u50cf\uff0c\u63a8\u8350\u9ed8\u8ba4\u955c\u50cf\uff1a nvidia/driver \u3002
                      • Driver.repository \uff1aGPU \u9a71\u52a8\u955c\u50cf\u6240\u5728\u7684\u955c\u50cf\u4ed3\u5e93\uff0c\u9ed8\u8ba4\u4e3a nvidia \u7684 nvcr.io \u4ed3\u5e93\u3002
                      • Driver.usePrecompiled \uff1a\u5f00\u542f\u9884\u7f16\u8bd1\u6a21\u5f0f\u5b89\u88c5\u9a71\u52a8\u3002
                      • Driver.version \uff1aGPU \u9a71\u52a8\u955c\u50cf\u7684\u7248\u672c\uff0c\u79bb\u7ebf\u90e8\u7f72\u8bf7\u4f7f\u7528\u9ed8\u8ba4\u53c2\u6570\uff0c\u4ec5\u5728\u7ebf\u5b89\u88c5\u65f6\u9700\u914d\u7f6e\u3002\u4e0d\u540c\u7c7b\u578b\u64cd\u4f5c\u7cfb\u7edf\u7684 Driver \u955c\u50cf\u7684\u7248\u672c\u5b58\u5728\u5982\u4e0b\u5dee\u5f02\uff0c \u8be6\u60c5\u53ef\u53c2\u8003\uff1aNvidia GPU Driver \u7248\u672c\u3002 \u5982\u4e0b\u4e0d\u540c\u64cd\u4f5c\u7cfb\u7edf\u7684 Driver Version \u793a\u4f8b\uff1a

                        Note

                        \u4f7f\u7528\u5185\u7f6e\u7684\u64cd\u4f5c\u7cfb\u7edf\u7248\u672c\u65e0\u9700\u4fee\u6539\u955c\u50cf\u7248\u672c\uff0c\u5176\u4ed6\u64cd\u4f5c\u7cfb\u7edf\u7248\u672c\u8bf7\u53c2\u8003\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20\u955c\u50cf\u3002 \u6ce8\u610f\u7248\u672c\u53f7\u540e\u65e0\u9700\u586b\u5199 Ubuntu\u3001CentOS\u3001Red Hat \u7b49\u64cd\u4f5c\u7cfb\u7edf\u540d\u79f0\uff0c\u82e5\u5b98\u65b9\u955c\u50cf\u542b\u6709\u64cd\u4f5c\u7cfb\u7edf\u540e\u7f00\uff0c\u8bf7\u624b\u52a8\u79fb\u9664\u3002

                        • Red Hat \u7cfb\u7edf\uff0c\u4f8b\u5982 525.105.17
                        • Ubuntu \u7cfb\u7edf\uff0c\u4f8b\u5982 535-5.15.0-1043-nvidia
                        • CentOS \u7cfb\u7edf\uff0c\u4f8b\u5982 525.147.05
                      • Driver.RepoConfig.ConfigMapName \uff1a\u7528\u6765\u8bb0\u5f55 GPU Operator \u7684\u79bb\u7ebf yum \u6e90\u914d\u7f6e\u6587\u4ef6\u540d\u79f0\uff0c \u5f53\u4f7f\u7528\u9884\u7f6e\u7684\u79bb\u7ebf\u5305\u65f6\uff0c\u5404\u7c7b\u578b\u7684\u64cd\u4f5c\u7cfb\u7edf\u8bf7\u53c2\u8003\u5982\u4e0b\u7684\u6587\u6863\u3002

                        • \u6784\u5efa CentOS 7.9 \u79bb\u7ebf yum \u6e90
                        • \u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90
                      "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#toolkit","title":"Toolkit \u914d\u7f6e\u53c2\u6570","text":"

                      Toolkit.enable \uff1a\u9ed8\u8ba4\u5f00\u542f\uff0c\u8be5\u7ec4\u4ef6\u8ba9 conatainerd/docker \u652f\u6301\u8fd0\u884c\u9700\u8981 GPU \u7684\u5bb9\u5668\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#mig","title":"MIG \u914d\u7f6e\u53c2\u6570","text":"

                      \u8be6\u7ec6\u914d\u7f6e\u65b9\u5f0f\u8bf7\u53c2\u8003\u5f00\u542f MIG \u529f\u80fd

                      MigManager.Config.name \uff1aMIG \u7684\u5207\u5206\u914d\u7f6e\u6587\u4ef6\u540d\uff0c\u7528\u4e8e\u5b9a\u4e49 MIG \u7684\uff08GI, CI\uff09\u5207\u5206\u7b56\u7565\u3002 \u9ed8\u8ba4\u4e3a default-mig-parted-config \u3002\u81ea\u5b9a\u4e49\u53c2\u6570\u53c2\u8003\u5f00\u542f MIG \u529f\u80fd\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_6","title":"\u4e0b\u4e00\u6b65\u64cd\u4f5c","text":"

                      \u5b8c\u6210\u4e0a\u8ff0\u76f8\u5173\u53c2\u6570\u914d\u7f6e\u548c\u521b\u5efa\u540e\uff1a

                      • \u5982\u679c\u4f7f\u7528 \u6574\u5361\u6a21\u5f0f\uff0c\u5e94\u7528\u521b\u5efa\u65f6\u53ef\u4f7f\u7528 GPU \u8d44\u6e90

                      • \u5982\u679c\u4f7f\u7528 vGPU \u6a21\u5f0f \uff0c\u5b8c\u6210\u4e0a\u8ff0\u76f8\u5173\u53c2\u6570\u914d\u7f6e\u548c\u521b\u5efa\u540e\uff0c\u4e0b\u4e00\u6b65\u8bf7\u5b8c\u6210 vGPU Addon \u5b89\u88c5

                      • \u5982\u679c\u4f7f\u7528 MIG \u6a21\u5f0f\uff0c\u5e76\u4e14\u9700\u8981\u7ed9\u4e2a\u522b GPU \u8282\u70b9\u6309\u7167\u67d0\u79cd\u5207\u5206\u89c4\u683c\u8fdb\u884c\u4f7f\u7528\uff0c \u5426\u5219\u6309\u7167 MigManager.Config \u4e2d\u7684 default \u503c\u8fdb\u884c\u5207\u5206\u3002

                        • single \u6a21\u5f0f\u8bf7\u7ed9\u5bf9\u5e94\u8282\u70b9\u6253\u4e0a\u5982\u4e0b Label\uff1a

                          kubectl label nodes {node} nvidia.com/mig.config=\"all-1g.10gb\" --overwrite\n
                        • mixed \u6a21\u5f0f\u8bf7\u7ed9\u5bf9\u5e94\u8282\u70b9\u6253\u4e0a\u5982\u4e0b Label\uff1a

                          kubectl label nodes {node} nvidia.com/mig.config=\"custom-config\" --overwrite\n

                      \u200b \u5207\u5206\u540e\uff0c\u5e94\u7528\u53ef\u4f7f\u7528 MIG GPU \u8d44\u6e90\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/push_image_to_repo.html","title":"\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20 Red Hat GPU Opreator \u79bb\u7ebf\u955c\u50cf","text":"

                      \u672c\u6587\u4ee5 Red Hat 8.4 \u7684 nvcr.io/nvidia/driver:525.105.17-rhel8.4 \u79bb\u7ebf\u9a71\u52a8\u955c\u50cf\u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20\u79bb\u7ebf\u955c\u50cf\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/push_image_to_repo.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      1. \u706b\u79cd\u8282\u70b9\u53ca\u5176\u7ec4\u4ef6\u72b6\u6001\u8fd0\u884c\u6b63\u5e38\u3002
                      2. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u548c\u706b\u79cd\u8282\u70b9\u7684\u8282\u70b9\uff0c\u4e14\u8282\u70b9\u4e0a\u5df2\u7ecf\u5b8c\u6210 Docker \u7684\u5b89\u88c5\u3002
                      "},{"location":"admin/kpanda/gpu/nvidia/push_image_to_repo.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"admin/kpanda/gpu/nvidia/push_image_to_repo.html#_3","title":"\u5728\u8054\u7f51\u8282\u70b9\u83b7\u53d6\u79bb\u7ebf\u955c\u50cf","text":"

                      \u4ee5\u4e0b\u64cd\u4f5c\u5728\u8054\u7f51\u8282\u70b9\u4e0a\u8fdb\u884c\u3002

                      1. \u5728\u8054\u7f51\u673a\u5668\u4e0a\u62c9\u53d6 nvcr.io/nvidia/driver:525.105.17-rhel8.4 \u79bb\u7ebf\u9a71\u52a8\u955c\u50cf\uff1a

                        docker pull nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                      2. \u955c\u50cf\u62c9\u53d6\u5b8c\u6210\u540e\uff0c\u6253\u5305\u955c\u50cf\u4e3a nvidia-driver.tar \u538b\u7f29\u5305\uff1a

                        docker save nvcr.io/nvidia/driver:525.105.17-rhel8.4 > nvidia-driver.tar\n
                      3. \u62f7\u8d1d nvidia-driver.tar \u955c\u50cf\u538b\u7f29\u5305\u5230\u706b\u79cd\u8282\u70b9\uff1a

                        scp  nvidia-driver.tar user@ip:/root\n

                        \u4f8b\u5982\uff1a

                        scp  nvidia-driver.tar root@10.6.175.10:/root\n
                      "},{"location":"admin/kpanda/gpu/nvidia/push_image_to_repo.html#_4","title":"\u63a8\u9001\u955c\u50cf\u5230\u706b\u79cd\u8282\u70b9\u4ed3\u5e93","text":"

                      \u4ee5\u4e0b\u64cd\u4f5c\u5728\u706b\u79cd\u8282\u70b9\u4e0a\u8fdb\u884c\u3002

                      1. \u767b\u5f55\u706b\u79cd\u8282\u70b9\uff0c\u5c06\u8054\u7f51\u8282\u70b9\u62f7\u8d1d\u7684\u955c\u50cf\u538b\u7f29\u5305 nvidia-driver.tar \u5bfc\u5165\u672c\u5730\uff1a

                        docker load -i nvidia-driver.tar\n
                      2. \u67e5\u770b\u521a\u521a\u5bfc\u5165\u7684\u955c\u50cf\uff1a

                        docker images -a |grep nvidia\n

                        \u9884\u671f\u8f93\u51fa\uff1a

                        nvcr.io/nvidia/driver                 e3ed7dee73e9   1 days ago   1.02GB\n
                      3. \u91cd\u65b0\u6807\u8bb0\u955c\u50cf\uff0c\u4f7f\u5176\u4e0e\u8fdc\u7a0b Registry \u4ed3\u5e93\u4e2d\u7684\u76ee\u6807\u4ed3\u5e93\u5bf9\u5e94\uff1a

                        docker tag <image-name> <registry-url>/<repository-name>:<tag>\n
                        • <image-name> \u662f\u4e0a\u4e00\u6b65 nvidia \u955c\u50cf\u7684\u540d\u79f0\uff0c
                        • <registry-url> \u662f\u706b\u79cd\u8282\u70b9\u4e0a Registry \u670d\u52a1\u7684\u5730\u5740\uff0c
                        • <repository-name> \u662f\u60a8\u8981\u63a8\u9001\u5230\u7684\u4ed3\u5e93\u540d\u79f0\uff0c
                        • <tag> \u662f\u60a8\u4e3a\u955c\u50cf\u6307\u5b9a\u7684\u6807\u7b7e\u3002

                        \u4f8b\u5982\uff1a

                        registry\uff1adocker tag nvcr.io/nvidia/driver 10.6.10.5/nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                      4. \u5c06\u955c\u50cf\u63a8\u9001\u5230\u706b\u79cd\u8282\u70b9\u955c\u50cf\u4ed3\u5e93\uff1a

                        docker push {ip}/nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                      "},{"location":"admin/kpanda/gpu/nvidia/push_image_to_repo.html#_5","title":"\u63a5\u4e0b\u6765","text":"

                      \u53c2\u8003\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90\u548c GPU Operator \u79bb\u7ebf\u5b89\u88c5\u6765\u4e3a\u96c6\u7fa4\u90e8\u7f72 GPU Operator\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/rhel9.2_offline_install_driver.html","title":"RHEL 9.2 \u79bb\u7ebf\u5b89\u88c5 gpu-operator \u9a71\u52a8","text":"

                      \u524d\u63d0\u6761\u4ef6\uff1a\u5df2\u5b89\u88c5 gpu-operator v23.9.0+2 \u53ca\u66f4\u9ad8\u7248\u672c

                      RHEL 9.2 \u9a71\u52a8\u955c\u50cf\u4e0d\u80fd\u76f4\u63a5\u5b89\u88c5\uff0c\u5b98\u65b9\u7684\u9a71\u52a8\u811a\u672c\u5b58\u5728\u4e00\u70b9\u95ee\u9898\uff0c\u5728\u5b98\u65b9\u4fee\u590d\u4e4b\u524d\uff0c\u63d0\u4f9b\u5982\u4e0b\u7684\u6b65\u9aa4\u6765\u5b9e\u73b0\u79bb\u7ebf\u5b89\u88c5\u9a71\u52a8\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/rhel9.2_offline_install_driver.html#nouveau","title":"\u7981\u7528nouveau\u9a71\u52a8","text":"

                      \u5728 RHEL 9.2 \u4e2d\u5b58\u5728 nouveau \u975e\u5b98\u65b9\u7684 Nvidia \u9a71\u52a8\uff0c\u56e0\u6b64\u9700\u8981\u5148\u7981\u7528\u3002

                      # \u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u6587\u4ef6\nsudo vi /etc/modprobe.d/blacklist-nouveau.conf\n# \u6dfb\u52a0\u4ee5\u4e0b\u4e24\u884c\u5185\u5bb9:\nblacklist nouveau\noptions nouveau modeset=0\n# \u7981\u7528Nouveau\nsudo dracut --force\n# \u91cd\u542fvm\nsudo reboot\n# \u68c0\u67e5\u662f\u5426\u5df2\u7ecf\u6210\u529f\u7981\u7528\nlsmod | grep nouveau\n
                      "},{"location":"admin/kpanda/gpu/nvidia/rhel9.2_offline_install_driver.html#_1","title":"\u81ea\u5b9a\u4e49\u9a71\u52a8\u955c\u50cf","text":"

                      \u5148\u5728\u672c\u5730\u521b\u5efa nvidia-driver \u6587\u4ef6\uff1a

                      \u70b9\u51fb\u67e5\u770b\u5b8c\u6574\u7684 nvidia-driver \u6587\u4ef6\u5185\u5bb9
                      #! /bin/bash -x\n# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.\n\nset -eu\n\nRUN_DIR=/run/nvidia\nPID_FILE=${RUN_DIR}/${0##*/}.pid\nDRIVER_VERSION=${DRIVER_VERSION:?\"Missing DRIVER_VERSION env\"}\nKERNEL_UPDATE_HOOK=/run/kernel/postinst.d/update-nvidia-driver\nNUM_VGPU_DEVICES=0\nNVIDIA_MODULE_PARAMS=()\nNVIDIA_UVM_MODULE_PARAMS=()\nNVIDIA_MODESET_MODULE_PARAMS=()\nNVIDIA_PEERMEM_MODULE_PARAMS=()\nTARGETARCH=${TARGETARCH:?\"Missing TARGETARCH env\"}\nUSE_HOST_MOFED=\"${USE_HOST_MOFED:-false}\"\nDNF_RELEASEVER=${DNF_RELEASEVER:-\"\"}\nRHEL_VERSION=${RHEL_VERSION:-\"\"}\nRHEL_MAJOR_VERSION=9\n\nOPEN_KERNEL_MODULES_ENABLED=${OPEN_KERNEL_MODULES_ENABLED:-false}\n[[ \"${OPEN_KERNEL_MODULES_ENABLED}\" == \"true\" ]] && KERNEL_TYPE=kernel-open || KERNEL_TYPE=kernel\n\nDRIVER_ARCH=${TARGETARCH/amd64/x86_64} && DRIVER_ARCH=${DRIVER_ARCH/arm64/aarch64}\necho \"DRIVER_ARCH is $DRIVER_ARCH\"\n\nSCRIPT_DIR=$( cd -- \"$( dirname -- \"${BASH_SOURCE[0]}\" )\" &> /dev/null && pwd )\nsource $SCRIPT_DIR/common.sh\n\n_update_package_cache() {\n    if [ \"${PACKAGE_TAG:-}\" != \"builtin\" ]; then\n        echo \"Updating the package cache...\"\n        if ! yum -q makecache; then\n            echo \"FATAL: failed to reach RHEL package repositories. \"\\\n                 \"Ensure that the cluster can access the proper networks.\"\n            exit 1\n        fi\n    fi\n}\n\n_cleanup_package_cache() {\n    if [ \"${PACKAGE_TAG:-}\" != \"builtin\" ]; then\n        echo \"Cleaning up the package cache...\"\n        rm -rf /var/cache/yum/*\n    fi\n}\n\n_get_rhel_version_from_kernel() {\n    local rhel_version_underscore rhel_version_arr\n    rhel_version_underscore=$(echo \"${KERNEL_VERSION}\" | sed 's/.*el\\([0-9]\\+_[0-9]\\+\\).*/\\1/g')\n    # For e.g. :- from the kernel version 4.18.0-513.9.1.el8_9, we expect to extract the string \"8_9\"\n    if [[ ! ${rhel_version_underscore} =~ ^[0-9]+_[0-9]+$ ]]; then\n        echo \"Unable to resolve RHEL version from kernel version\" >&2\n        return 1\n    fi\n    IFS='_' read -r -a rhel_version_arr <<< \"$rhel_version_underscore\"\n    if [[ ${#rhel_version_arr[@]} -ne 2 ]]; then\n        echo \"Unable to resolve RHEL version from kernel version\" >&2\n        return 1\n    fi\n    RHEL_VERSION=\"${rhel_version_arr[0]}.${rhel_version_arr[1]}\"\n    echo \"RHEL VERSION successfully resolved from kernel: ${RHEL_VERSION}\"\n    return 0\n}\n\n_resolve_rhel_version() {\n    _get_rhel_version_from_kernel || RHEL_VERSION=\"${RHEL_MAJOR_VERSION}\"\n    # set dnf release version as rhel version by default\n    if [[ -z \"${DNF_RELEASEVER}\" ]]; then\n        DNF_RELEASEVER=\"${RHEL_VERSION}\"\n    fi\n    return 0\n}\n\n# Resolve the kernel version to the form major.minor.patch-revision.\n_resolve_kernel_version() {\n    echo \"Resolving Linux kernel version...\"\n    local version=$(yum -q list available --showduplicates kernel-headers |\n      awk -v arch=$(uname -m) 'NR>1 {print $2\".\"arch}' | tac | grep -E -m1 \"^${KERNEL_VERSION/latest/.*}\")\n\n    if [ -z \"${version}\" ]; then\n        echo \"Could not resolve Linux kernel version\" >&2\n        return 1\n    fi\n    KERNEL_VERSION=\"${version}\"\n    echo \"Proceeding with Linux kernel version ${KERNEL_VERSION}\"\n    return 0\n}\n\n# Install the kernel modules header/builtin/order files and generate the kernel version string.\n_install_prerequisites() (\n    local tmp_dir=$(mktemp -d)\n\n    trap \"rm -rf ${tmp_dir}\" EXIT\n    cd ${tmp_dir}\n\n    echo \"Installing elfutils...\"\n    if ! dnf install -q -y elfutils-libelf.$DRIVER_ARCH; then\n        echo \"FATAL: failed to install elfutils packages. RHEL entitlement may be improperly deployed.\"\n        exit 1\n    fi\n    if ! dnf install -q -y elfutils-libelf-devel.$DRIVER_ARCH; then\n        echo \"FATAL: failed to install elfutils packages. RHEL entitlement may be improperly deployed.\"\n        exit 1\n    fi    \n\n    rm -rf /lib/modules/${KERNEL_VERSION}\n    mkdir -p /lib/modules/${KERNEL_VERSION}/proc\n\n    echo \"Enabling RHOCP and EUS RPM repos...\"\n    if [ -n \"${OPENSHIFT_VERSION:-}\" ]; then\n        dnf config-manager --set-enabled rhocp-${OPENSHIFT_VERSION}-for-rhel-9-$DRIVER_ARCH-rpms || true\n        if ! dnf makecache --releasever=${DNF_RELEASEVER}; then\n            dnf config-manager --set-disabled rhocp-${OPENSHIFT_VERSION}-for-rhel-9-$DRIVER_ARCH-rpms || true\n        fi\n    fi\n\n    dnf config-manager --set-enabled rhel-9-for-$DRIVER_ARCH-baseos-eus-rpms  || true\n    if ! dnf makecache --releasever=${DNF_RELEASEVER}; then\n            dnf config-manager --set-disabled rhel-9-for-$DRIVER_ARCH-baseos-eus-rpms || true\n    fi\n\n    # try with EUS disabled, if it does not work, then try just major version\n    if ! dnf makecache --releasever=${DNF_RELEASEVER}; then\n      # If pointing to DNF_RELEASEVER does not work, we point to the RHEL_MAJOR_VERSION as a last resort\n      if ! dnf makecache --releasever=${RHEL_MAJOR_VERSION}; then\n        echo \"FATAL: failed to update the dnf metadata cache after multiple attempts with releasevers ${DNF_RELEASEVER}, ${RHEL_MAJOR_VERSION}\"\n        exit 1\n      else\n        DNF_RELEASEVER=${RHEL_MAJOR_VERSION}\n      fi\n    fi\n\n    echo \"Installing Linux kernel headers...\"\n    dnf -q -y --releasever=${DNF_RELEASEVER} install kernel-headers-${KERNEL_VERSION} kernel-devel-${KERNEL_VERSION} --allowerasing > /dev/null\n    ln -s /usr/src/kernels/${KERNEL_VERSION} /lib/modules/${KERNEL_VERSION}/build\n\n    echo \"Installing Linux kernel module files...\"\n    dnf -q -y --releasever=${DNF_RELEASEVER} install kernel-core-${KERNEL_VERSION} > /dev/null\n\n    # Prevent depmod from giving a WARNING about missing files\n    touch /lib/modules/${KERNEL_VERSION}/modules.order\n    touch /lib/modules/${KERNEL_VERSION}/modules.builtin\n\n    depmod ${KERNEL_VERSION}\n\n    echo \"Generating Linux kernel version string...\"\n    if [ \"$TARGETARCH\" = \"arm64\" ]; then\n        gunzip -c /lib/modules/${KERNEL_VERSION}/vmlinuz | strings | grep -E '^Linux version' | sed 's/^\\(.*\\)\\s\\+(.*)$/\\1/' > version\n    else\n        extract-vmlinux /lib/modules/${KERNEL_VERSION}/vmlinuz | strings | grep -E '^Linux version' | sed 's/^\\(.*\\)\\s\\+(.*)$/\\1/' > version\n    fi\n    if [ -z \"$(<version)\" ]; then\n        echo \"Could not locate Linux kernel version string\" >&2\n        return 1\n    fi\n    mv version /lib/modules/${KERNEL_VERSION}/proc\n\n    # Parse gcc version\n    # gcc_version is expected to match x.y.z\n    # current_gcc is expected to match 'gcc-x.y.z-rel.el8.x86_64\n    local gcc_version=$(cat /lib/modules/${KERNEL_VERSION}/proc/version | grep -Eo \"gcc \\(GCC\\) ([0-9\\.]+)\" | grep -Eo \"([0-9\\.]+)\")\n    local current_gcc=$(rpm -qa gcc)\n    echo \"kernel requires gcc version: 'gcc-${gcc_version}', current gcc version is '${current_gcc}'\"\n\n    if ! [[ \"${current_gcc}\" =~ \"gcc-${gcc_version}\"-.* ]]; then\n        dnf install -q -y --releasever=${DNF_RELEASEVER} \"gcc-${gcc_version}\"\n    fi\n)\n\n# Cleanup the prerequisites installed above.\n_remove_prerequisites() {\n    true\n    if [ \"${PACKAGE_TAG:-}\" != \"builtin\" ]; then\n        dnf -q -y remove kernel-headers-${KERNEL_VERSION} kernel-devel-${KERNEL_VERSION} > /dev/null\n        # TODO remove module files not matching an existing driver package.\n    fi\n}\n\n# Check if the kernel version requires a new precompiled driver packages.\n_kernel_requires_package() {\n    local proc_mount_arg=\"\"\n\n    echo \"Checking NVIDIA driver packages...\"\n\n    [[ ! -d /usr/src/nvidia-${DRIVER_VERSION}/${KERNEL_TYPE} ]] && return 0\n    cd /usr/src/nvidia-${DRIVER_VERSION}/${KERNEL_TYPE}\n\n    proc_mount_arg=\"--proc-mount-point /lib/modules/${KERNEL_VERSION}/proc\"\n    for pkg_name in $(ls -d -1 precompiled/** 2> /dev/null); do\n        is_match=$(../mkprecompiled --match ${pkg_name} ${proc_mount_arg})\n        if [ \"${is_match}\" == \"kernel interface matches.\" ]; then\n            echo \"Found NVIDIA driver package ${pkg_name##*/}\"\n            return 1\n        fi\n    done\n    return 0\n}\n\n# Compile the kernel modules, optionally sign them, and generate a precompiled package for use by the nvidia-installer.\n_create_driver_package() (\n    local pkg_name=\"nvidia-modules-${KERNEL_VERSION%%-*}${PACKAGE_TAG:+-${PACKAGE_TAG}}\"\n    local nvidia_sign_args=\"\"\n    local nvidia_modeset_sign_args=\"\"\n    local nvidia_uvm_sign_args=\"\"\n\n    trap \"make -s -j ${MAX_THREADS} SYSSRC=/lib/modules/${KERNEL_VERSION}/build clean > /dev/null\" EXIT\n\n    echo \"Compiling NVIDIA driver kernel modules...\"\n    cd /usr/src/nvidia-${DRIVER_VERSION}/${KERNEL_TYPE}\n\n    if _gpu_direct_rdma_enabled; then\n        ln -s /run/mellanox/drivers/usr/src/ofa_kernel /usr/src/\n        # if arch directory exists(MOFED >=5.5) then create a symlink as expected by GPU driver installer\n        # This is required as currently GPU driver installer doesn't expect headers in x86_64 folder, but only in either default or kernel-version folder.\n        # ls -ltr /usr/src/ofa_kernel/\n        # lrwxrwxrwx 1 root root   36 Dec  8 20:10 default -> /etc/alternatives/ofa_kernel_headers\n        # drwxr-xr-x 4 root root 4096 Dec  8 20:14 x86_64\n        # lrwxrwxrwx 1 root root   44 Dec  9 19:05 5.4.0-90-generic -> /usr/src/ofa_kernel/x86_64/5.4.0-90-generic/\n        if [[ -d \"/run/mellanox/drivers/usr/src/ofa_kernel/$(uname -m)/$(uname -r)\" ]]; then\n            if [[ ! -e \"/usr/src/ofa_kernel/$(uname -r)\" ]]; then\n                ln -s \"/run/mellanox/drivers/usr/src/ofa_kernel/$(uname -m)/$(uname -r)\" /usr/src/ofa_kernel/\n            fi\n        fi\n    fi\n\n    make -s -j ${MAX_THREADS} SYSSRC=/lib/modules/${KERNEL_VERSION}/build nv-linux.o nv-modeset-linux.o > /dev/null\n\n    echo \"Relinking NVIDIA driver kernel modules...\"\n    rm -f nvidia.ko nvidia-modeset.ko\n    ld -d -r -o nvidia.ko ./nv-linux.o ./nvidia/nv-kernel.o_binary\n    ld -d -r -o nvidia-modeset.ko ./nv-modeset-linux.o ./nvidia-modeset/nv-modeset-kernel.o_binary\n\n    if [ -n \"${PRIVATE_KEY}\" ]; then\n        echo \"Signing NVIDIA driver kernel modules...\"\n        donkey get ${PRIVATE_KEY} sh -c \"PATH=${PATH}:/usr/src/linux-headers-${KERNEL_VERSION}/scripts && \\\n          sign-file sha512 \\$DONKEY_FILE pubkey.x509 nvidia.ko nvidia.ko.sign &&                          \\\n          sign-file sha512 \\$DONKEY_FILE pubkey.x509 nvidia-modeset.ko nvidia-modeset.ko.sign &&          \\\n          sign-file sha512 \\$DONKEY_FILE pubkey.x509 nvidia-uvm.ko\"\n        nvidia_sign_args=\"--linked-module nvidia.ko --signed-module nvidia.ko.sign\"\n        nvidia_modeset_sign_args=\"--linked-module nvidia-modeset.ko --signed-module nvidia-modeset.ko.sign\"\n        nvidia_uvm_sign_args=\"--signed\"\n    fi\n\n    echo \"Building NVIDIA driver package ${pkg_name}...\"\n    ../mkprecompiled --pack ${pkg_name} --description ${KERNEL_VERSION}                              \\\n                                        --proc-mount-point /lib/modules/${KERNEL_VERSION}/proc       \\\n                                        --driver-version ${DRIVER_VERSION}                           \\\n                                        --kernel-interface nv-linux.o                                \\\n                                        --linked-module-name nvidia.ko                               \\\n                                        --core-object-name nvidia/nv-kernel.o_binary                 \\\n                                        ${nvidia_sign_args}                                          \\\n                                        --target-directory .                                         \\\n                                        --kernel-interface nv-modeset-linux.o                        \\\n                                        --linked-module-name nvidia-modeset.ko                       \\\n                                        --core-object-name nvidia-modeset/nv-modeset-kernel.o_binary \\\n                                        ${nvidia_modeset_sign_args}                                  \\\n                                        --target-directory .                                         \\\n                                        --kernel-module nvidia-uvm.ko                                \\\n                                        ${nvidia_uvm_sign_args}                                      \\\n                                        --target-directory .\n    mkdir -p precompiled\n    mv ${pkg_name} precompiled\n)\n\n_assert_nvswitch_system() {\n    [ -d /proc/driver/nvidia-nvswitch ] || return 1\n    entries=$(ls -1 /proc/driver/nvidia-nvswitch/devices/*)\n    if [ -z \"${entries}\" ]; then\n        return 1\n    fi\n    return 0\n}\n\n# For each kernel module configuration file mounted into the container,\n# parse the file contents and extract the custom module parameters that\n# are to be passed as input to 'modprobe'.\n#\n# Assumptions:\n# - Configuration files are named <module-name>.conf (i.e. nvidia.conf, nvidia-uvm.conf).\n# - Configuration files are mounted inside the container at /drivers.\n# - Each line in the file contains at least one parameter, where parameters on the same line\n#   are space delimited. It is up to the user to properly format the file to ensure\n#   the correct set of parameters are passed to 'modprobe'.\n_get_module_params() {\n    local base_path=\"/drivers\"\n    # nvidia\n    if [ -f \"${base_path}/nvidia.conf\" ]; then\n       while IFS=\"\" read -r param || [ -n \"$param\" ]; do\n           NVIDIA_MODULE_PARAMS+=(\"$param\")\n       done <\"${base_path}/nvidia.conf\"\n       echo \"Module parameters provided for nvidia: ${NVIDIA_MODULE_PARAMS[@]}\"\n    fi\n    # nvidia-uvm\n    if [ -f \"${base_path}/nvidia-uvm.conf\" ]; then\n       while IFS=\"\" read -r param || [ -n \"$param\" ]; do\n           NVIDIA_UVM_MODULE_PARAMS+=(\"$param\")\n       done <\"${base_path}/nvidia-uvm.conf\"\n       echo \"Module parameters provided for nvidia-uvm: ${NVIDIA_UVM_MODULE_PARAMS[@]}\"\n    fi\n    # nvidia-modeset\n    if [ -f \"${base_path}/nvidia-modeset.conf\" ]; then\n       while IFS=\"\" read -r param || [ -n \"$param\" ]; do\n           NVIDIA_MODESET_MODULE_PARAMS+=(\"$param\")\n       done <\"${base_path}/nvidia-modeset.conf\"\n       echo \"Module parameters provided for nvidia-modeset: ${NVIDIA_MODESET_MODULE_PARAMS[@]}\"\n    fi\n    # nvidia-peermem\n    if [ -f \"${base_path}/nvidia-peermem.conf\" ]; then\n       while IFS=\"\" read -r param || [ -n \"$param\" ]; do\n           NVIDIA_PEERMEM_MODULE_PARAMS+=(\"$param\")\n       done <\"${base_path}/nvidia-peermem.conf\"\n       echo \"Module parameters provided for nvidia-peermem: ${NVIDIA_PEERMEM_MODULE_PARAMS[@]}\"\n    fi\n}\n\n# Load the kernel modules and start persistenced.\n_load_driver() {\n    echo \"Parsing kernel module parameters...\"\n    _get_module_params\n\n    local nv_fw_search_path=\"$RUN_DIR/driver/lib/firmware\"\n    local set_fw_path=\"true\"\n    local fw_path_config_file=\"/sys/module/firmware_class/parameters/path\"\n    for param in \"${NVIDIA_MODULE_PARAMS[@]}\"; do\n        if [[ \"$param\" == \"NVreg_EnableGpuFirmware=0\" ]]; then\n          set_fw_path=\"false\"\n        fi\n    done\n\n    if [[ \"$set_fw_path\" == \"true\" ]]; then\n        echo \"Configuring the following firmware search path in '$fw_path_config_file': $nv_fw_search_path\"\n        if [[ ! -z $(grep '[^[:space:]]' $fw_path_config_file) ]]; then\n            echo \"WARNING: A search path is already configured in $fw_path_config_file\"\n            echo \"         Retaining the current configuration\"\n        else\n            echo -n \"$nv_fw_search_path\" > $fw_path_config_file || echo \"WARNING: Failed to configure the firmware search path\"\n        fi\n    fi\n\n    echo \"Loading ipmi and i2c_core kernel modules...\"\n    modprobe -a i2c_core ipmi_msghandler ipmi_devintf\n\n    echo \"Loading NVIDIA driver kernel modules...\"\n    set -o xtrace +o nounset\n    modprobe nvidia \"${NVIDIA_MODULE_PARAMS[@]}\"\n    modprobe nvidia-uvm \"${NVIDIA_UVM_MODULE_PARAMS[@]}\"\n    modprobe nvidia-modeset \"${NVIDIA_MODESET_MODULE_PARAMS[@]}\"\n    set +o xtrace -o nounset\n\n    if _gpu_direct_rdma_enabled; then\n        echo \"Loading NVIDIA Peer Memory kernel module...\"\n        set -o xtrace +o nounset\n        modprobe -a nvidia-peermem \"${NVIDIA_PEERMEM_MODULE_PARAMS[@]}\"\n        set +o xtrace -o nounset\n    fi\n\n    echo \"Starting NVIDIA persistence daemon...\"\n    nvidia-persistenced --persistence-mode\n\n    if [ \"${DRIVER_TYPE}\" = \"vgpu\" ]; then\n        echo \"Copying gridd.conf...\"\n        cp /drivers/gridd.conf /etc/nvidia/gridd.conf\n        if [ \"${VGPU_LICENSE_SERVER_TYPE}\" = \"NLS\" ]; then\n            echo \"Copying ClientConfigToken...\"\n            mkdir -p  /etc/nvidia/ClientConfigToken/\n            cp /drivers/ClientConfigToken/* /etc/nvidia/ClientConfigToken/\n        fi\n\n        echo \"Starting nvidia-gridd..\"\n        LD_LIBRARY_PATH=/usr/lib64/nvidia/gridd nvidia-gridd\n\n        # Start virtual topology daemon\n        _start_vgpu_topology_daemon\n    fi\n\n    if _assert_nvswitch_system; then\n        echo \"Starting NVIDIA fabric manager daemon...\"\n        nv-fabricmanager -c /usr/share/nvidia/nvswitch/fabricmanager.cfg\n    fi\n}\n\n# Stop persistenced and unload the kernel modules if they are currently loaded.\n_unload_driver() {\n    local rmmod_args=()\n    local nvidia_deps=0\n    local nvidia_refs=0\n    local nvidia_uvm_refs=0\n    local nvidia_modeset_refs=0\n    local nvidia_peermem_refs=0\n\n    echo \"Stopping NVIDIA persistence daemon...\"\n    if [ -f /var/run/nvidia-persistenced/nvidia-persistenced.pid ]; then\n        local pid=$(< /var/run/nvidia-persistenced/nvidia-persistenced.pid)\n\n        kill -SIGTERM \"${pid}\"\n        for i in $(seq 1 50); do\n            kill -0 \"${pid}\" 2> /dev/null || break\n            sleep 0.1\n        done\n        if [ $i -eq 50 ]; then\n            echo \"Could not stop NVIDIA persistence daemon\" >&2\n            return 1\n        fi\n    fi\n\n    if [ -f /var/run/nvidia-gridd/nvidia-gridd.pid ]; then\n        echo \"Stopping NVIDIA grid daemon...\"\n        local pid=$(< /var/run/nvidia-gridd/nvidia-gridd.pid)\n\n        kill -SIGTERM \"${pid}\"\n        for i in $(seq 1 10); do\n            kill -0 \"${pid}\" 2> /dev/null || break\n            sleep 0.1\n        done\n        if [ $i -eq 10 ]; then\n            echo \"Could not stop NVIDIA Grid daemon\" >&2\n            return 1\n        fi\n    fi\n\n    if [ -f /var/run/nvidia-fabricmanager/nv-fabricmanager.pid ]; then\n        echo \"Stopping NVIDIA fabric manager daemon...\"\n        local pid=$(< /var/run/nvidia-fabricmanager/nv-fabricmanager.pid)\n\n        kill -SIGTERM \"${pid}\"\n        for i in $(seq 1 50); do\n            kill -0 \"${pid}\" 2> /dev/null || break\n            sleep 0.1\n        done\n        if [ $i -eq 50 ]; then\n            echo \"Could not stop NVIDIA fabric manager daemon\" >&2\n            return 1\n        fi\n    fi\n\n    echo \"Unloading NVIDIA driver kernel modules...\"\n    if [ -f /sys/module/nvidia_modeset/refcnt ]; then\n        nvidia_modeset_refs=$(< /sys/module/nvidia_modeset/refcnt)\n        rmmod_args+=(\"nvidia-modeset\")\n        ((++nvidia_deps))\n    fi\n    if [ -f /sys/module/nvidia_uvm/refcnt ]; then\n        nvidia_uvm_refs=$(< /sys/module/nvidia_uvm/refcnt)\n        rmmod_args+=(\"nvidia-uvm\")\n        ((++nvidia_deps))\n    fi\n    if [ -f /sys/module/nvidia/refcnt ]; then\n        nvidia_refs=$(< /sys/module/nvidia/refcnt)\n        rmmod_args+=(\"nvidia\")\n    fi\n    if [ -f /sys/module/nvidia_peermem/refcnt ]; then\n        nvidia_peermem_refs=$(< /sys/module/nvidia_peermem/refcnt)\n        rmmod_args+=(\"nvidia-peermem\")\n        ((++nvidia_deps))\n    fi\n    if [ ${nvidia_refs} -gt ${nvidia_deps} ] || [ ${nvidia_uvm_refs} -gt 0 ] || [ ${nvidia_modeset_refs} -gt 0 ] || [ ${nvidia_peermem_refs} -gt 0 ]; then\n        echo \"Could not unload NVIDIA driver kernel modules, driver is in use\" >&2\n        return 1\n    fi\n\n    if [ ${#rmmod_args[@]} -gt 0 ]; then\n        rmmod ${rmmod_args[@]}\n    fi\n    return 0\n}\n\n# Link and install the kernel modules from a precompiled package using the nvidia-installer.\n_install_driver() {\n    local install_args=()\n\n    echo \"Installing NVIDIA driver kernel modules...\"\n    cd /usr/src/nvidia-${DRIVER_VERSION}\n    rm -rf /lib/modules/${KERNEL_VERSION}/video\n\n    if [ \"${ACCEPT_LICENSE}\" = \"yes\" ]; then\n        install_args+=(\"--accept-license\")\n    fi\n    IGNORE_CC_MISMATCH=1 nvidia-installer --kernel-module-only --no-drm --ui=none --no-nouveau-check -m=${KERNEL_TYPE} ${install_args[@]+\"${install_args[@]}\"}\n    # May need to add no-cc-check for Rhel, otherwise it complains about cc missing in path\n    # /proc/version and lib/modules/KERNEL_VERSION/proc are different, by default installer looks at /proc/ so, added the proc-mount-point\n    # TODO: remove the -a flag. its not needed. in the new driver version, license-acceptance is implicit\n    #nvidia-installer --kernel-module-only --no-drm --ui=none --no-nouveau-check --no-cc-version-check --proc-mount-point /lib/modules/${KERNEL_VERSION}/proc ${install_args[@]+\"${install_args[@]}\"}\n}\n\n# Mount the driver rootfs into the run directory with the exception of sysfs.\n_mount_rootfs() {\n    echo \"Mounting NVIDIA driver rootfs...\"\n    mount --make-runbindable /sys\n    mount --make-private /sys\n    mkdir -p ${RUN_DIR}/driver\n    mount --rbind / ${RUN_DIR}/driver\n\n    echo \"Check SELinux status\"\n    if [ -e /sys/fs/selinux ]; then\n        echo \"SELinux is enabled\"\n        echo \"Change device files security context for selinux compatibility\"\n        chcon -R -t container_file_t ${RUN_DIR}/driver/dev\n    else\n        echo \"SELinux is disabled, skipping...\"\n    fi\n}\n\n# Unmount the driver rootfs from the run directory.\n_unmount_rootfs() {\n    echo \"Unmounting NVIDIA driver rootfs...\"\n    if findmnt -r -o TARGET | grep \"${RUN_DIR}/driver\" > /dev/null; then\n        umount -l -R ${RUN_DIR}/driver\n    fi\n}\n\n# Write a kernel postinst.d script to automatically precompile packages on kernel update (similar to DKMS).\n_write_kernel_update_hook() {\n    if [ ! -d ${KERNEL_UPDATE_HOOK%/*} ]; then\n        return\n    fi\n\n    echo \"Writing kernel update hook...\"\n    cat > ${KERNEL_UPDATE_HOOK} <<'EOF'\n#!/bin/bash\n\nset -eu\ntrap 'echo \"ERROR: Failed to update the NVIDIA driver\" >&2; exit 0' ERR\n\nNVIDIA_DRIVER_PID=$(< /run/nvidia/nvidia-driver.pid)\n\nexport \"$(grep -z DRIVER_VERSION /proc/${NVIDIA_DRIVER_PID}/environ)\"\nnsenter -t \"${NVIDIA_DRIVER_PID}\" -m -- nvidia-driver update --kernel \"$1\"\nEOF\n    chmod +x ${KERNEL_UPDATE_HOOK}\n}\n\n_shutdown() {\n    if _unload_driver; then\n        _unmount_rootfs\n        rm -f ${PID_FILE} ${KERNEL_UPDATE_HOOK}\n        return 0\n    fi\n    return 1\n}\n\n_find_vgpu_driver_version() {\n    local count=\"\"\n    local version=\"\"\n    local drivers_path=\"/drivers\"\n\n    if [ \"${DISABLE_VGPU_VERSION_CHECK}\" = \"true\" ]; then\n        echo \"vgpu version compatibility check is disabled\"\n        return 0\n    fi\n    # check if vgpu devices are present\n    count=$(vgpu-util count)\n    if [ $? -ne 0 ]; then\n         echo \"cannot find vgpu devices on host, pleae check /var/log/vgpu-util.log for more details...\"\n         return 0\n    fi\n    NUM_VGPU_DEVICES=$(echo \"$count\" | awk -F= '{print $2}')\n    if [ $NUM_VGPU_DEVICES -eq 0 ]; then\n        # no vgpu devices found, treat as passthrough\n        return 0\n    fi\n    echo \"found $NUM_VGPU_DEVICES vgpu devices on host\"\n\n    # find compatible guest driver using driver catalog\n    if [ -d \"/mnt/shared-nvidia-driver-toolkit/drivers\" ]; then\n        drivers_path=\"/mnt/shared-nvidia-driver-toolkit/drivers\"\n    fi\n    version=$(vgpu-util match -i \"${drivers_path}\" -c \"${drivers_path}/vgpuDriverCatalog.yaml\")\n    if [ $? -ne 0 ]; then\n        echo \"cannot find match for compatible vgpu driver from available list, please check /var/log/vgpu-util.log for more details...\"\n        return 1\n    fi\n    DRIVER_VERSION=$(echo \"$version\" | awk -F= '{print $2}')\n    echo \"vgpu driver version selected: ${DRIVER_VERSION}\"\n    return 0\n}\n\n_start_vgpu_topology_daemon() {\n    type nvidia-topologyd > /dev/null 2>&1 || return 0\n    echo \"Starting nvidia-topologyd..\"\n    nvidia-topologyd\n}\n\n_prepare() {\n    if [ \"${DRIVER_TYPE}\" = \"vgpu\" ]; then\n        _find_vgpu_driver_version || exit 1\n    fi\n\n    # Install the userspace components and copy the kernel module sources.\n    sh NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION.run -x && \\\n        cd NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION && \\\n        sh /tmp/install.sh nvinstall && \\\n        mkdir -p /usr/src/nvidia-$DRIVER_VERSION && \\\n        mv LICENSE mkprecompiled ${KERNEL_TYPE} /usr/src/nvidia-$DRIVER_VERSION && \\\n        sed '9,${/^\\(kernel\\|LICENSE\\)/!d}' .manifest > /usr/src/nvidia-$DRIVER_VERSION/.manifest\n\n    echo -e \"\\n========== NVIDIA Software Installer ==========\\n\"\n    echo -e \"Starting installation of NVIDIA driver version ${DRIVER_VERSION} for Linux kernel version ${KERNEL_VERSION}\\n\"\n}\n\n_prepare_exclusive() {\n    _prepare\n\n    exec 3> ${PID_FILE}\n    if ! flock -n 3; then\n        echo \"An instance of the NVIDIA driver is already running, aborting\"\n        exit 1\n    fi\n    echo $$ >&3\n\n    trap \"echo 'Caught signal'; exit 1\" HUP INT QUIT PIPE TERM\n    trap \"_shutdown\" EXIT\n\n    _unload_driver || exit 1\n    _unmount_rootfs\n}\n\n_build() {\n    # Install dependencies\n    if _kernel_requires_package; then\n        _update_package_cache\n        _install_prerequisites\n        _create_driver_package\n        #_remove_prerequisites\n        _cleanup_package_cache\n    fi\n\n    # Build the driver\n    _install_driver\n}\n\n_load() {\n    _load_driver\n    _mount_rootfs\n    _write_kernel_update_hook\n\n    echo \"Done, now waiting for signal\"\n    sleep infinity &\n    trap \"echo 'Caught signal'; _shutdown && { kill $!; exit 0; }\" HUP INT QUIT PIPE TERM\n    trap - EXIT\n    while true; do wait $! || continue; done\n    exit 0\n}\n\ninit() {\n    _prepare_exclusive\n\n    _build\n\n    _load\n}\n\nbuild() {\n    _prepare\n\n    _build\n}\n\nload() {\n    _prepare_exclusive\n\n    _load\n}\n\nupdate() {\n    exec 3>&2\n    if exec 2> /dev/null 4< ${PID_FILE}; then\n        if ! flock -n 4 && read pid <&4 && kill -0 \"${pid}\"; then\n            exec > >(tee -a \"/proc/${pid}/fd/1\")\n            exec 2> >(tee -a \"/proc/${pid}/fd/2\" >&3)\n        else\n            exec 2>&3\n        fi\n        exec 4>&-\n    fi\n    exec 3>&-\n\n    # vgpu driver version is chosen dynamically during runtime, so pre-compile modules for\n    # only non-vgpu driver types\n    if [ \"${DRIVER_TYPE}\" != \"vgpu\" ]; then\n        # Install the userspace components and copy the kernel module sources.\n        if [ ! -e /usr/src/nvidia-${DRIVER_VERSION}/mkprecompiled ]; then\n            sh NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION.run -x && \\\n                cd NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION && \\\n                sh /tmp/install.sh nvinstall && \\\n                mkdir -p /usr/src/nvidia-$DRIVER_VERSION && \\\n                mv LICENSE mkprecompiled ${KERNEL_TYPE} /usr/src/nvidia-$DRIVER_VERSION && \\\n                sed '9,${/^\\(kernel\\|LICENSE\\)/!d}' .manifest > /usr/src/nvidia-$DRIVER_VERSION/.manifest\n        fi\n    fi\n\n    echo -e \"\\n========== NVIDIA Software Updater ==========\\n\"\n    echo -e \"Starting update of NVIDIA driver version ${DRIVER_VERSION} for Linux kernel version ${KERNEL_VERSION}\\n\"\n\n    trap \"echo 'Caught signal'; exit 1\" HUP INT QUIT PIPE TERM\n\n    _update_package_cache\n    _resolve_kernel_version || exit 1\n    _install_prerequisites\n    if _kernel_requires_package; then\n        _create_driver_package\n    fi\n    _remove_prerequisites\n    _cleanup_package_cache\n\n    echo \"Done\"\n    exit 0\n}\n\n# Wait for MOFED drivers to be loaded and load nvidia-peermem whenever it gets unloaded during MOFED driver updates\nreload_nvidia_peermem() {\n    if [ \"$USE_HOST_MOFED\" = \"true\" ]; then\n        until  lsmod | grep mlx5_core > /dev/null 2>&1 && [ -f /run/nvidia/validations/.driver-ctr-ready ];\n        do\n            echo \"waiting for mellanox ofed and nvidia drivers to be installed\"\n            sleep 10\n        done\n    else\n        # use driver readiness flag created by MOFED container\n        until  [ -f /run/mellanox/drivers/.driver-ready ] && [ -f /run/nvidia/validations/.driver-ctr-ready ];\n        do\n            echo \"waiting for mellanox ofed and nvidia drivers to be installed\"\n            sleep 10\n        done\n    fi\n    # get any parameters provided for nvidia-peermem\n    _get_module_params && set +o nounset\n    if chroot /run/nvidia/driver modprobe nvidia-peermem \"${NVIDIA_PEERMEM_MODULE_PARAMS[@]}\"; then\n        if [ -f /sys/module/nvidia_peermem/refcnt ]; then\n            echo \"successfully loaded nvidia-peermem module, now waiting for signal\"\n            sleep inf\n            trap \"echo 'Caught signal'; exit 1\" HUP INT QUIT PIPE TERM\n        fi\n    fi\n    echo \"failed to load nvidia-peermem module\"\n    exit 1\n}\n\n# probe by gpu-operator for liveness/startup checks for nvidia-peermem module to be loaded when MOFED drivers are ready\nprobe_nvidia_peermem() {\n    if lsmod | grep mlx5_core > /dev/null 2>&1; then\n        if [ ! -f /sys/module/nvidia_peermem/refcnt ]; then\n            echo \"nvidia-peermem module is not loaded\"\n            return 1\n        fi\n    else\n        echo \"MOFED drivers are not ready, skipping probe to avoid container restarts...\"\n    fi\n    return 0\n}\n\nusage() {\n    cat >&2 <<EOF\nUsage: $0 COMMAND [ARG...]\n\nCommands:\n  init   [-a | --accept-license] [-m | --max-threads MAX_THREADS]\n  build  [-a | --accept-license] [-m | --max-threads MAX_THREADS]\n  load\n  update [-k | --kernel VERSION] [-s | --sign KEYID] [-t | --tag TAG] [-m | --max-threads MAX_THREADS]\nEOF\n    exit 1\n}\n\nif [ $# -eq 0 ]; then\n    usage\nfi\ncommand=$1; shift\ncase \"${command}\" in\n    init) options=$(getopt -l accept-license,max-threads: -o am: -- \"$@\") ;;\n    build) options=$(getopt -l accept-license,tag:,max-threads: -o a:t:m: -- \"$@\") ;;\n    load) options=\"\" ;;\n    update) options=$(getopt -l kernel:,sign:,tag:,max-threads: -o k:s:t:m: -- \"$@\") ;;\n    reload_nvidia_peermem) options=\"\" ;;\n    probe_nvidia_peermem) options=\"\" ;;\n    *) usage ;;\nesac\nif [ $? -ne 0 ]; then\n    usage\nfi\neval set -- \"${options}\"\n\nACCEPT_LICENSE=\"\"\nMAX_THREADS=\"\"\nKERNEL_VERSION=$(uname -r)\nPRIVATE_KEY=\"\"\nPACKAGE_TAG=\"\"\n\nfor opt in ${options}; do\n    case \"$opt\" in\n    -a | --accept-license) ACCEPT_LICENSE=\"yes\"; shift 1 ;;\n    -k | --kernel) KERNEL_VERSION=$2; shift 2 ;;\n    -m | --max-threads) MAX_THREADS=$2; shift 2 ;;\n    -s | --sign) PRIVATE_KEY=$2; shift 2 ;;\n    -t | --tag) PACKAGE_TAG=$2; shift 2 ;;\n    --) shift; break ;;\n    esac\ndone\nif [ $# -ne 0 ]; then\n    usage\nfi\n\n_resolve_rhel_version || exit 1\n\n$command\n

                      \u4f7f\u7528\u5b98\u65b9\u7684\u955c\u50cf\u6765\u4e8c\u6b21\u6784\u5efa\u81ea\u5b9a\u4e49\u955c\u50cf\uff0c\u5982\u4e0b\u662f\u4e00\u4e2a Dockerfile \u6587\u4ef6\u7684\u5185\u5bb9\uff1a

                      FROM nvcr.io/nvidia/driver:535.183.06-rhel9.2\nCOPY nvidia-driver /usr/local/bin\nRUN chmod +x /usr/local/bin/nvidia-driver\nCMD [\"/bin/bash\", \"-c\"]\n

                      \u6784\u5efa\u547d\u4ee4\u5e76\u63a8\u9001\u5230\u706b\u79cd\u96c6\u7fa4\uff1a

                      docker build -t {\u706b\u79cdregistry}/nvcr.m.daocloud.io/nvidia/driver:535.183.06-01-rhel9.2 -f Dockerfile .\ndocker push {\u706b\u79cdregistry}/nvcr.m.daocloud.io/nvidia/driver:535.183.06-01-rhel9.2\n
                      "},{"location":"admin/kpanda/gpu/nvidia/rhel9.2_offline_install_driver.html#_2","title":"\u5b89\u88c5\u9a71\u52a8","text":"
                      1. \u5b89\u88c5 gpu-operator addon
                      2. \u8bbe\u7f6e driver.version=535.183.06-01
                      "},{"location":"admin/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html","title":"Ubuntu22.04 \u79bb\u7ebf\u5b89\u88c5 gpu-operator \u9a71\u52a8","text":"

                      \u524d\u63d0\u6761\u4ef6\uff1a\u5df2\u5b89\u88c5 gpu-operator v23.9.0+2 \u53ca\u66f4\u9ad8\u7248\u672c

                      "},{"location":"admin/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html#_1","title":"\u51c6\u5907\u79bb\u7ebf\u955c\u50cf","text":"
                      1. \u67e5\u770b\u5185\u6838\u7248\u672c

                        $ uname -r\n5.15.0-78-generic\n
                      2. \u67e5\u770b\u5185\u6838\u5bf9\u5e94\u7684 GPU Driver \u955c\u50cf\u7248\u672c\uff0c https://catalog.ngc.nvidia.com/orgs/nvidia/containers/driver/tags\u3002 \u4f7f\u7528\u5185\u6838\u67e5\u8be2\u955c\u50cf\u7248\u672c\uff0c\u901a\u8fc7 ctr export \u4fdd\u5b58\u955c\u50cf\u3002

                        ctr i pull nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04\nctr i export --all-platforms driver.tar.gz nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 \n
                      3. \u628a\u955c\u50cf\u5bfc\u5165\u5230\u706b\u79cd\u96c6\u7fa4\u7684\u955c\u50cf\u4ed3\u5e93\u4e2d

                        ctr i import driver.tar.gz\nctr i tag nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 {\u706b\u79cdregistry}/nvcr.m.daocloud.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04\nctr i push {\u706b\u79cdregistry}/nvcr.m.daocloud.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 --skip-verify=true\n
                      "},{"location":"admin/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html#_2","title":"\u5b89\u88c5\u9a71\u52a8","text":"
                      1. \u5b89\u88c5 gpu-operator addon
                      2. \u82e5\u4f7f\u7528\u9884\u7f16\u8bd1\u6a21\u5f0f\uff0c\u5219\u8bbe\u7f6e driver.usePrecompiled=true,\u5e76\u8bbe\u7f6e driver.version=535\uff0c\u8fd9\u91cc\u8981\u6ce8\u610f\uff0c\u5199\u7684\u662f 535\uff0c\u4e0d\u662f 535.104.12\u3002\uff08\u975e\u9884\u7f16\u8bd1\u6a21\u5f0f\u8df3\u8fc7\u6b64\u6b65\uff0c\u76f4\u63a5\u5b89\u88c5\u5373\u53ef\uff09
                      "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html","title":"\u6784\u5efa CentOS 7.9 \u79bb\u7ebf yum \u6e90","text":""},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#_1","title":"\u4f7f\u7528\u573a\u666f\u4ecb\u7ecd","text":"

                      \u5f53\u5de5\u4f5c\u8282\u70b9\u7684\u5185\u6838\u7248\u672c\u4e0e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u5185\u6838\u7248\u672c\u6216 OS \u7c7b\u578b\u4e0d\u4e00\u81f4\u65f6\uff0c\u9700\u8981\u7528\u6237\u624b\u52a8\u6784\u5efa\u79bb\u7ebf yum \u6e90\u3002

                      \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u6784\u5efa\u79bb\u7ebf yum \u6e90\uff0c \u5e76\u5728\u5b89\u88c5 Gpu Operator \u65f6\uff0c\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      1. \u7528\u6237\u5df2\u7ecf\u5728\u5e73\u53f0\u4e0a\u5b89\u88c5\u4e86 v0.12.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u7684 addon \u79bb\u7ebf\u5305\u3002
                      2. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u548c\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u7f51\u7edc\u80fd\u591f\u8054\u901a\u7684\u6587\u4ef6\u670d\u52a1\u5668\uff0c\u5982 nginx \u6216 minio\u3002
                      3. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u3001\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\uff0c \u4e14\u8282\u70b9\u4e0a\u5df2\u7ecf\u5b8c\u6210 Docker \u7684\u5b89\u88c5\u3002
                      "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                      \u672c\u6587\u4ee5\u5185\u6838\u7248\u672c\u4e3a 3.10.0-1160.95.1.el7.x86_64 \u7684 CentOS 7.9 \u8282\u70b9\u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u6784\u5efa GPU operator \u79bb\u7ebf\u5305\u7684 yum \u6e90\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#os","title":"\u68c0\u67e5\u96c6\u7fa4\u8282\u70b9\u7684 OS \u548c\u5185\u6838\u7248\u672c","text":"

                      \u5206\u522b\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u548c\u5f85\u90e8\u7f72 GPU Operator \u7684\u8282\u70b9\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u82e5\u4e24\u4e2a\u8282\u70b9\u7684 OS \u548c\u5185\u6838\u7248\u672c\u4e00\u81f4\u5219\u65e0\u9700\u6784\u5efa yum \u6e90\uff0c \u53ef\u53c2\u8003\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u6587\u6863\u76f4\u63a5\u5b89\u88c5\uff1b\u82e5\u4e24\u4e2a\u8282\u70b9\u7684 OS \u6216\u5185\u6838\u7248\u672c\u4e0d\u4e00\u81f4\uff0c\u8bf7\u6267\u884c\u4e0b\u4e00\u6b65\u3002

                      1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u67e5\u770b\u96c6\u7fa4\u4e0b\u5f85\u90e8\u7f72 GPU Operator \u8282\u70b9\u7684\u53d1\u884c\u7248\u540d\u79f0\u548c\u7248\u672c\u53f7\u3002

                        cat /etc/redhat-release\n

                        \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                        CentOS Linux release 7.9 (Core)\n

                        \u8f93\u51fa\u7ed3\u679c\u4e3a\u5f53\u524d\u8282\u70b9\u5185\u6838\u7248\u672c CentOS 7.9 \u3002

                      2. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u67e5\u770b\u96c6\u7fa4\u4e0b\u5f85\u90e8\u7f72 GPU Operator \u8282\u70b9\u7684\u5185\u6838\u7248\u672c\u3002

                        uname -a\n

                        \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                        Linux localhost.localdomain 3.10.0-1160.95.1.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux\n

                        \u8f93\u51fa\u7ed3\u679c\u4e3a\u5f53\u524d\u8282\u70b9\u5185\u6838\u7248\u672c 3.10.0-1160.el7.x86_64\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#yum","title":"\u5236\u4f5c\u79bb\u7ebf yum \u6e90","text":"

                      \u5728\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\u4e0a\u8fdb\u884c\u64cd\u4f5c\u3002

                      1. \u5728\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\u4e0a\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a yum.sh \u7684\u811a\u672c\u6587\u4ef6\u3002

                        vi yum.sh\n

                        \u7136\u540e\u6309\u4e0b i \u952e\u8fdb\u5165\u63d2\u5165\u6a21\u5f0f\uff0c\u8f93\u5165\u4ee5\u4e0b\u5185\u5bb9\uff1a

                        export TARGET_KERNEL_VERSION=$1\n\ncat >> run.sh << \\EOF\n#! /bin/bash\necho \"start install kernel repo\"\necho ${KERNEL_VERSION}\nmkdir centos-base\n\nif [ \"$OS\" -eq 7 ]; then\n    yum install --downloadonly --downloaddir=./centos-base perl\n    yum install --downloadonly --downloaddir=./centos-base elfutils-libelf.x86_64\n    yum install --downloadonly --downloaddir=./redhat-base elfutils-libelf-devel.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-headers-${KERNEL_VERSION}.el7.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-devel-${KERNEL_VERSION}.el7.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-${KERNEL_VERSION}.el7.x86_64\n    yum install  -y --downloadonly --downloaddir=./centos-base groff-base\nelif [ \"$OS\" -eq 8 ]; then\n    yum install --downloadonly --downloaddir=./centos-base perl\n    yum install --downloadonly --downloaddir=./centos-base elfutils-libelf.x86_64\n    yum install --downloadonly --downloaddir=./redhat-base elfutils-libelf-devel.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-headers-${KERNEL_VERSION}.el8.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-devel-${KERNEL_VERSION}.el8.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-${KERNEL_VERSION}.el8.x86_64\n    yum install  -y --downloadonly --downloaddir=./centos-base groff-base\nelse\n    echo \"Error os version\"\nfi\n\ncreaterepo centos-base/\nls -lh centos-base/\ntar -zcf centos-base.tar.gz centos-base/\necho \"end install kernel repo\"\nEOF\n\ncat >> Dockerfile << EOF\nFROM centos:7\nENV KERNEL_VERSION=\"\"\nENV OS=7\nRUN yum install -y createrepo\nCOPY run.sh .\nENTRYPOINT [\"/bin/bash\",\"run.sh\"]\nEOF\n\ndocker build -t test:v1 -f Dockerfile .\ndocker run -e KERNEL_VERSION=$TARGET_KERNEL_VERSION --name centos7.9 test:v1\ndocker cp centos7.9:/centos-base.tar.gz .\ntar -xzf centos-base.tar.gz\n

                        \u6309\u4e0b esc \u952e\u9000\u51fa\u63d2\u5165\u6a21\u5f0f\uff0c\u7136\u540e\u8f93\u5165 __ :wq__ \u4fdd\u5b58\u5e76\u9000\u51fa\u3002

                      2. \u8fd0\u884c yum.sh \u6587\u4ef6\uff1a

                        bash -x yum.sh TARGET_KERNEL_VERSION\n

                        TARGET_KERNEL_VERSION \u53c2\u6570\u7528\u4e8e\u6307\u5b9a\u96c6\u7fa4\u8282\u70b9\u7684\u5185\u6838\u7248\u672c\uff0c\u6ce8\u610f\uff1a\u53d1\u884c\u7248\u6807\u8bc6\u7b26\uff08\u5982 __ .el7.x86_64 __ \uff09\u65e0\u9700\u8f93\u5165\u3002 \u4f8b\u5982\uff1a

                        bash -x yum.sh 3.10.0-1160.95.1\n

                      \u81f3\u6b64\uff0c\u60a8\u5df2\u7ecf\u751f\u6210\u4e86\u5185\u6838\u4e3a 3.10.0-1160.95.1.el7.x86_64 \u7684\u79bb\u7ebf\u7684 yum \u6e90\uff1a centos-base \u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#yum_1","title":"\u4e0a\u4f20\u79bb\u7ebf yum \u6e90\u5230\u6587\u4ef6\u670d\u52a1\u5668","text":"

                      \u5728\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\u4e0a\u8fdb\u884c\u64cd\u4f5c\u3002\u4e3b\u8981\u7528\u4e8e\u5c06\u4e0a\u4e00\u6b65\u4e2d\u751f\u6210\u7684 yum \u6e90\u4e0a\u4f20\u5230\u53ef\u4ee5\u88ab\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u8fdb\u884c\u8bbf\u95ee\u7684\u6587\u4ef6\u670d\u52a1\u5668\u4e2d\u3002 \u6587\u4ef6\u670d\u52a1\u5668\u53ef\u4ee5\u4e3a Nginx \u3001 Minio \u6216\u5176\u5b83\u652f\u6301 Http \u534f\u8bae\u7684\u6587\u4ef6\u670d\u52a1\u5668\u3002

                      \u672c\u64cd\u4f5c\u793a\u4f8b\u91c7\u7528\u7684\u662f\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Minio \u4f5c\u4e3a\u6587\u4ef6\u670d\u52a1\u5668\uff0cMinio \u76f8\u5173\u4fe1\u606f\u5982\u4e0b\uff1a

                      • \u8bbf\u95ee\u5730\u5740\uff1a http://10.5.14.200:9000\uff08\u4e00\u822c\u4e3a{\u706b\u79cd\u8282\u70b9 IP} + {9000 \u7aef\u53e3}\uff09
                      • \u767b\u5f55\u7528\u6237\u540d\uff1arootuser
                      • \u767b\u5f55\u5bc6\u7801\uff1arootpass123

                      • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u5c06\u8282\u70b9\u672c\u5730 mc \u547d\u4ee4\u884c\u5de5\u5177\u548c minio \u670d\u52a1\u5668\u5efa\u7acb\u94fe\u63a5\u3002

                        mc config host add minio http://10.5.14.200:9000 rootuser rootpass123\n

                        \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                        Added `minio` successfully.\n

                        mc \u547d\u4ee4\u884c\u5de5\u5177\u662f Minio \u6587\u4ef6\u670d\u52a1\u5668\u63d0\u4f9b\u7684\u5ba2\u6237\u7aef\u547d\u4ee4\u884c\u5de5\u5177\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\uff1a MinIO Client\u3002

                      • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a centos-base \u7684\u5b58\u50a8\u6876\uff08bucket\uff09\u3002

                        mc mb -p minio/centos-base\n

                        \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                        Bucket created successfully __minio/centos-base__ .\n
                      • \u5c06\u5b58\u50a8\u6876 centos-base \u7684\u8bbf\u95ee\u7b56\u7565\u8bbe\u7f6e\u4e3a\u5141\u8bb8\u516c\u5f00\u4e0b\u8f7d\u3002\u4ee5\u4fbf\u5728\u540e\u671f\u5b89\u88c5 GPU-operator \u65f6\u80fd\u591f\u88ab\u8bbf\u95ee\u3002

                        mc anonymous set download minio/centos-base\n

                        \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                        Access permission for `minio/centos-base` is set to `download` \n
                      • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u5c06\u6b65\u9aa4\u4e8c\u751f\u6210\u7684\u79bb\u7ebf yum \u6e90\u6587\u4ef6 centos-base \u590d\u5236\u5230 minio \u670d\u52a1\u5668\u7684 minio/centos-base \u5b58\u50a8\u6876\u4e2d\u3002

                        mc cp centos-base minio/centos-base --recursive\n
                      "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#yum_2","title":"\u5728\u96c6\u7fa4\u521b\u5efa\u914d\u7f6e\u9879\u7528\u6765\u4fdd\u5b58 yum \u6e90\u4fe1\u606f","text":"

                      \u5728\u5f85\u90e8\u7f72 GPU Operator \u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u4e0a\u8fdb\u884c\u64cd\u4f5c\u3002

                      1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\u521b\u5efa\u540d\u4e3a CentOS-Base.repo \u7684\u6587\u4ef6\uff0c\u7528\u6765\u6307\u5b9a yum \u6e90\u5b58\u50a8\u7684\u914d\u7f6e\u4fe1\u606f\u3002

                        # \u6587\u4ef6\u540d\u79f0\u5fc5\u987b\u4e3a CentOS-Base.repo\uff0c\u5426\u5219\u5b89\u88c5 gpu-operator \u65f6\u65e0\u6cd5\u88ab\u8bc6\u522b\ncat > CentOS-Base.repo << EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base #\u6b65\u9aa4\u4e09\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base #\u6b65\u9aa4\u4e09\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                      2. \u57fa\u4e8e\u521b\u5efa\u7684 CentOS-Base.repo \u6587\u4ef6\uff0c\u5728 gpu-operator \u547d\u540d\u7a7a\u95f4\u4e0b\uff0c\u521b\u5efa\u540d\u4e3a local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\uff1a

                        kubectl create configmap local-repo-config  -n gpu-operator --from-file=CentOS-Base.repo=/etc/yum.repos.d/extension.repo\n

                        \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                        configmap/local-repo-config created\n

                        local-repo-config \u914d\u7f6e\u6587\u4ef6\u7528\u4e8e\u5728\u5b89\u88c5 gpu-operator \u65f6\uff0c\u63d0\u4f9b RepoConfig.ConfigMapName \u53c2\u6570\u7684\u503c\uff0c\u914d\u7f6e\u6587\u4ef6\u540d\u79f0\u7528\u6237\u53ef\u81ea\u5b9a\u4e49\u3002

                      3. \u67e5\u770b local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\u7684\u5185\u5bb9\uff1a

                        kubectl get configmap local-repo-config  -n gpu-operator -oyaml\n

                        \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                        apiVersion: v1\ndata:\nCentOS-Base.repo: \"[extension-0]\\nbaseurl = http://10.6.232.5:32618/centos-base#\u6b65\u9aa4\u4e8c\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u8def\u5f84\\ngpgcheck = 0\\nname = kubean extension 0\\n  \\n[extension-1]\\nbaseurl\n    = http://10.6.232.5:32618/centos-base #\u6b65\u9aa4\u4e8c\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u8def\u5f84\\ngpgcheck = 0\\nname\n    = kubean extension 1\\n\"\nkind: ConfigMap\nmetadata:\ncreationTimestamp: \"2023-10-18T01:59:02Z\"\nname: local-repo-config\nnamespace: gpu-operator\nresourceVersion: \"59445080\"\nuid: c5f0ebab-046f-442c-b932-f9003e014387\n

                      \u81f3\u6b64\uff0c\u60a8\u5df2\u6210\u529f\u4e3a\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u521b\u5efa\u4e86\u79bb\u7ebf yum \u6e90\u914d\u7f6e\u6587\u4ef6\u3002 \u901a\u8fc7\u5728\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u65f6\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html","title":"\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90","text":"

                      \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9884\u7f6e\u4e86 CentOS 7.9\uff0c\u5185\u6838\u4e3a 3.10.0-1160 \u7684 GPU operator \u79bb\u7ebf\u5305\u3002\u5176\u5b83 OS \u7c7b\u578b\u7684\u8282\u70b9\u6216\u5185\u6838\u9700\u8981\u7528\u6237\u624b\u52a8\u6784\u5efa\u79bb\u7ebf yum \u6e90\u3002

                      \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u57fa\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4efb\u610f\u8282\u70b9\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90\u5305\uff0c\u5e76\u5728\u5b89\u88c5 Gpu Operator \u65f6\uff0c\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      1. \u7528\u6237\u5df2\u7ecf\u5728\u5e73\u53f0\u4e0a\u5b89\u88c5\u4e86 v0.12.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u7684 addon \u79bb\u7ebf\u5305\u3002
                      2. \u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u8282\u70b9 OS \u5fc5\u987b\u4e3a Red Hat 8.4\uff0c\u4e14\u5185\u6838\u7248\u672c\u5b8c\u5168\u4e00\u81f4\u3002
                      3. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u548c\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u7f51\u7edc\u80fd\u591f\u8054\u901a\u7684\u6587\u4ef6\u670d\u52a1\u5668\uff0c\u5982 nginx \u6216 minio\u3002
                      4. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u3001\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\uff0c\u4e14\u8282\u70b9\u4e0a\u5df2\u7ecf\u5b8c\u6210 Docker \u7684\u5b89\u88c5\u3002
                      5. \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u8282\u70b9\u5fc5\u987b\u4e3a Red Hat 8.4 4.18.0-305.el8.x86_64\u3002
                      "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                      \u672c\u6587\u4ee5 Red Hat 8.4 4.18.0-305.el8.x86_64 \u8282\u70b9\u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u57fa\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4efb\u610f\u8282\u70b9\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90\u5305\uff0c \u5e76\u5728\u5b89\u88c5 Gpu Operator \u65f6\uff0c\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#yum","title":"\u4e0b\u8f7d\u706b\u79cd\u8282\u70b9\u4e2d\u7684 yum \u6e90","text":"

                      \u4ee5\u4e0b\u64cd\u4f5c\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684 master \u8282\u70b9\u4e0a\u6267\u884c\u3002

                      1. \u4f7f\u7528 ssh \u6216\u5176\u5b83\u65b9\u5f0f\u8fdb\u5165\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5185\u4efb\u4e00\u8282\u70b9\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

                        cat /etc/yum.repos.d/extension.repo #\u67e5\u770b extension.repo \u4e2d\u7684\u5185\u5bb9\n

                        \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                        [extension-0]\nbaseurl = http://10.5.14.200:9000/kubean/redhat/$releasever/os/$basearch\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/kubean/redhat-iso/$releasever/os/$basearch/AppStream\ngpgcheck = 0\nname = kubean extension 1\n\n[extension-2]\nbaseurl = http://10.5.14.200:9000/kubean/redhat-iso/$releasever/os/$basearch/BaseOS\ngpgcheck = 0\nname = kubean extension 2\n
                      2. \u5728 root \u8def\u5f84\u4e0b\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a redhat-base-repo \u7684\u6587\u4ef6\u5939

                        mkdir redhat-base-repo\n
                      3. \u4e0b\u8f7d yum \u6e90\u4e2d\u7684 rpm \u5305\u5230\u672c\u5730\uff1a

                        yum install yum-utils\n
                      4. \u4e0b\u8f7d extension-1 \u4e2d\u7684 rpm \u5305\uff1a

                        reposync  -p redhat-base-repo  -n --repoid=extension-1\n
                      5. \u4e0b\u8f7d extension-2 \u4e2d\u7684 rpm \u5305\uff1a

                        reposync  -p redhat-base-repo  -n --repoid=extension-2\n
                      "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#elfutils-libelf-devel-0187-4el8x86_64rpm","title":"\u4e0b\u8f7d elfutils-libelf-devel-0.187-4.el8.x86_64.rpm \u5305","text":"

                      \u4ee5\u4e0b\u64cd\u4f5c\u5728\u8054\u7f51\u8282\u70b9\u6267\u884c\u64cd\u4f5c\uff0c\u5728\u64cd\u4f5c\u524d\uff0c\u60a8\u9700\u8981\u4fdd\u8bc1\u8054\u7f51\u8282\u70b9\u548c\u5168\u5c40\u670d\u52a1\u96c6\u7fa4 master \u8282\u70b9\u95f4\u7684\u7f51\u7edc\u8054\u901a\u6027\u3002

                      1. \u5728\u8054\u7f51\u8282\u70b9\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u4e0b\u8f7d elfutils-libelf-devel-0.187-4.el8.x86_64.rpm \u5305\uff1a

                        wget https://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/elfutils-libelf-devel-0.187-4.el8.x86_64.rpm\n
                      2. \u5728\u5f53\u524d\u76ee\u5f55\u4e0b\u5c06 elfutils-libelf-devel-0.187-4.el8.x86_64.rpm \u5305\u4f20\u8f93\u81f3\u6b65\u9aa4\u4e00\u4e2d\u7684\u8282\u70b9\u4e0a\uff1a

                        scp  elfutils-libelf-devel-0.187-4.el8.x86_64.rpm user@ip:~/redhat-base-repo/extension-2/Packages/\n

                        \u4f8b\u5982\uff1a

                        scp  elfutils-libelf-devel-0.187-4.el8.x86_64.rpm root@10.6.175.10:~/redhat-base-repo/extension-2/Packages/\n
                      "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#yum-repo","title":"\u751f\u6210\u672c\u5730 yum repo","text":"

                      \u4ee5\u4e0b\u64cd\u4f5c\u5728\u6b65\u9aa4\u4e00\u4e2d\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684 master \u8282\u70b9\u4e0a\u6267\u884c\u3002

                      1. \u8fdb\u5165 yum repo \u76ee\u5f55\uff1a

                        cd ~/redhat-base-repo/extension-1/Packages\ncd ~/redhat-base-repo/extension-2/Packages\n
                      2. \u751f\u6210\u76ee\u5f55 repo \u7d22\u5f15\uff1a

                        yum install createrepo -y  # \u82e5\u5df2\u5b89\u88c5 createrepo \u53ef\u7701\u7565\u6b64\u6b65\u9aa4\ncreaterepo_c ./\n

                      \u81f3\u6b64\uff0c\u60a8\u5df2\u7ecf\u751f\u6210\u4e86\u5185\u6838\u4e3a 4.18.0-305.el8.x86_64 \u7684\u79bb\u7ebf\u7684 yum \u6e90\uff1a redhat-base-repo \u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#yum-repo_1","title":"\u5c06\u672c\u5730\u751f\u6210\u7684 yum repo \u4e0a\u4f20\u81f3\u6587\u4ef6\u670d\u52a1\u5668","text":"

                      \u672c\u64cd\u4f5c\u793a\u4f8b\u91c7\u7528\u7684\u662f\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Minio \u4f5c\u4e3a\u6587\u4ef6\u670d\u52a1\u5668\uff0c\u7528\u6237\u53ef\u57fa\u4e8e\u81ea\u8eab\u60c5\u51b5\u9009\u62e9\u6587\u4ef6\u670d\u52a1\u5668\u3002Minio \u76f8\u5173\u4fe1\u606f\u5982\u4e0b\uff1a

                      • \u8bbf\u95ee\u5730\u5740\uff1a http://10.5.14.200:9000\uff08\u4e00\u822c\u4e3a{\u706b\u79cd\u8282\u70b9 IP} + {9000 \u7aef\u53e3}\uff09
                      • \u767b\u5f55\u7528\u6237\u540d\uff1arootuser
                      • \u767b\u5f55\u5bc6\u7801\uff1arootpass123

                      • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u5c06\u8282\u70b9\u672c\u5730 mc \u547d\u4ee4\u884c\u5de5\u5177\u548c minio \u670d\u52a1\u5668\u5efa\u7acb\u94fe\u63a5\u3002

                        mc config host add minio \u6587\u4ef6\u670d\u52a1\u5668\u8bbf\u95ee\u5730\u5740 \u7528\u6237\u540d \u5bc6\u7801\n

                        \u4f8b\u5982\uff1a

                        mc config host add minio http://10.5.14.200:9000 rootuser rootpass123\n

                        \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                        Added `minio` successfully.\n

                        mc \u547d\u4ee4\u884c\u5de5\u5177\u662f Minio \u6587\u4ef6\u670d\u52a1\u5668\u63d0\u4f9b\u7684\u5ba2\u6237\u7aef\u547d\u4ee4\u884c\u5de5\u5177\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\uff1a MinIO Client\u3002

                      • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a redhat-base \u7684\u5b58\u50a8\u6876(bucket)\u3002

                        mc mb -p minio/redhat-base\n

                        \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                        Bucket created successfully `minio/redhat-base`.\n
                      • \u5c06\u5b58\u50a8\u6876 redhat-base \u7684\u8bbf\u95ee\u7b56\u7565\u8bbe\u7f6e\u4e3a\u5141\u8bb8\u516c\u5f00\u4e0b\u8f7d\u3002\u4ee5\u4fbf\u5728\u540e\u671f\u5b89\u88c5 GPU-operator \u65f6\u80fd\u591f\u88ab\u8bbf\u95ee\u3002

                        mc anonymous set download minio/redhat-base\n

                        \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                        Access permission for `minio/redhat-base` is set to `download` \n
                      • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u5c06\u6b65\u9aa4\u4e8c\u751f\u6210\u7684\u79bb\u7ebf yum \u6e90\u6587\u4ef6 redhat-base-repo \u590d\u5236\u5230 minio \u670d\u52a1\u5668\u7684 minio/redhat-base \u5b58\u50a8\u6876\u4e2d\u3002

                        mc cp redhat-base-repo minio/redhat-base --recursive\n
                      "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#yum_1","title":"\u5728\u96c6\u7fa4\u521b\u5efa\u914d\u7f6e\u9879\u7528\u6765\u4fdd\u5b58 yum \u6e90\u4fe1\u606f","text":"

                      \u672c\u6b65\u9aa4\u5728\u5f85\u90e8\u7f72 GPU Operator \u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u4e0a\u8fdb\u884c\u64cd\u4f5c\u3002

                      1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\u521b\u5efa\u540d\u4e3a redhat.repo \u7684\u6587\u4ef6\uff0c\u7528\u6765\u6307\u5b9a yum \u6e90\u5b58\u50a8\u7684\u914d\u7f6e\u4fe1\u606f\u3002

                        # \u6587\u4ef6\u540d\u79f0\u5fc5\u987b\u4e3a redhat.repo\uff0c\u5426\u5219\u5b89\u88c5 gpu-operator \u65f6\u65e0\u6cd5\u88ab\u8bc6\u522b\ncat > redhat.repo << EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/redhat-base/redhat-base-repo/Packages #\u6b65\u9aa4\u4e00\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/redhat-base/redhat-base-repo/Packages #\u6b65\u9aa4\u4e00\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                      2. \u57fa\u4e8e\u521b\u5efa\u7684 redhat.repo \u6587\u4ef6\uff0c\u5728 gpu-operator \u547d\u540d\u7a7a\u95f4\u4e0b\uff0c\u521b\u5efa\u540d\u4e3a local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\uff1a

                        kubectl create configmap local-repo-config  -n gpu-operator --from-file=./redhat.repo \n

                        \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                        configmap/local-repo-config created\n

                        local-repo-config \u914d\u7f6e\u6587\u4ef6\u7528\u4e8e\u5728\u5b89\u88c5 gpu-operator \u65f6\uff0c\u63d0\u4f9b RepoConfig.ConfigMapName \u53c2\u6570\u7684\u503c\uff0c\u914d\u7f6e\u6587\u4ef6\u540d\u79f0\u7528\u6237\u53ef\u81ea\u5b9a\u4e49\u3002

                      3. \u67e5\u770b local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\u7684\u5185\u5bb9\uff1a

                        kubectl get configmap local-repo-config  -n gpu-operator -oyaml\n

                      \u81f3\u6b64\uff0c\u60a8\u5df2\u6210\u529f\u4e3a\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u521b\u5efa\u4e86\u79bb\u7ebf yum \u6e90\u914d\u7f6e\u6587\u4ef6\u3002 \u901a\u8fc7\u5728\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u65f6\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html","title":"\u6784\u5efa Red Hat 7.9 \u79bb\u7ebf yum \u6e90","text":""},{"location":"admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#_1","title":"\u4f7f\u7528\u573a\u666f\u4ecb\u7ecd","text":"

                      \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9884\u7f6e\u4e86 CentOS 7.9\uff0c\u5185\u6838\u4e3a 3.10.0-1160 \u7684 GPU Operator \u79bb\u7ebf\u5305\u3002\u5176\u5b83 OS \u7c7b\u578b\u7684\u8282\u70b9\u6216\u5185\u6838\u9700\u8981\u7528\u6237\u624b\u52a8\u6784\u5efa\u79bb\u7ebf yum \u6e90\u3002

                      \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u57fa\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4efb\u610f\u8282\u70b9\u6784\u5efa Red Hat 7.9 \u79bb\u7ebf yum \u6e90\u5305\uff0c\u5e76\u5728\u5b89\u88c5 Gpu Operator \u65f6\u4f7f\u7528 RepoConfig.ConfigMapName \u53c2\u6570\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      1. \u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u8282\u70b9 OS \u5fc5\u987b\u4e3a Red Hat 7.9\uff0c\u4e14\u5185\u6838\u7248\u672c\u5b8c\u5168\u4e00\u81f4
                      2. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u4e0e\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u7f51\u7edc\u8054\u901a\u7684\u6587\u4ef6\u670d\u52a1\u5668\uff0c\u5982 nginx \u6216 minio
                      3. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u3001\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\uff0c \u4e14\u8282\u70b9\u4e0a\u5df2\u7ecf\u5b8c\u6210 Docker \u7684\u5b89\u88c5
                      4. \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u8282\u70b9\u5fc5\u987b\u4e3a Red Hat 7.9
                      "},{"location":"admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#1-yum","title":"1. \u6784\u5efa\u76f8\u5173\u5185\u6838\u7248\u672c\u7684\u79bb\u7ebf Yum \u6e90","text":"
                      1. \u4e0b\u8f7d rhel7.9 ISO

                      2. \u4e0b\u8f7d\u4e0e Kubean \u7248\u672c\u5bf9\u5e94\u7684\u7684 rhel7.9 ospackage

                        \u5728 \u5bb9\u5668\u7ba1\u7406 \u7684\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u627e\u5230 Helm \u5e94\u7528 \uff0c\u641c\u7d22 kubean\uff0c\u53ef\u67e5\u770b kubean \u7684\u7248\u672c\u53f7\u3002

                        \u5728 kubean\u7684\u4ee3\u7801\u4ed3\u5e93 \u4e2d\u4e0b\u8f7d\u8be5\u7248\u672c\u7684 rhel7.9 ospackage\u3002

                      3. \u901a\u8fc7\u5b89\u88c5\u5668\u5bfc\u5165\u79bb\u7ebf\u8d44\u6e90

                        \u53c2\u8003\u5bfc\u5165\u79bb\u7ebf\u8d44\u6e90\u6587\u6863\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#2-red-hat-79-os","title":"2. \u4e0b\u8f7d Red Hat 7.9 OS \u7684\u79bb\u7ebf\u9a71\u52a8\u955c\u50cf","text":"

                      \u70b9\u51fb\u67e5\u770b\u4e0b\u8f7d\u5730\u5740\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#3-red-hat-gpu-opreator","title":"3. \u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20 Red Hat GPU Opreator \u79bb\u7ebf\u955c\u50cf","text":"

                      \u53c2\u8003\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20 Red Hat GPU Opreator \u79bb\u7ebf\u955c\u50cf\u3002

                      Note

                      \u6b64\u53c2\u8003\u4ee5 rhel8.4 \u4e3a\u4f8b\uff0c\u8bf7\u6ce8\u610f\u4fee\u6539\u6210 rhel7.9\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#4-yum","title":"4. \u5728\u96c6\u7fa4\u521b\u5efa\u914d\u7f6e\u9879\u7528\u6765\u4fdd\u5b58 Yum \u6e90\u4fe1\u606f","text":"

                      \u5728\u5f85\u90e8\u7f72 GPU Operator \u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u4e0a\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u3002

                      1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\u521b\u5efa\u540d\u4e3a CentOS-Base.repo \u7684\u6587\u4ef6\uff0c\u7528\u6765\u6307\u5b9a yum \u6e90\u5b58\u50a8\u7684\u914d\u7f6e\u4fe1\u606f\u3002

                        # \u6587\u4ef6\u540d\u79f0\u5fc5\u987b\u4e3a CentOS-Base.repo\uff0c\u5426\u5219\u5b89\u88c5 gpu-operator \u65f6\u65e0\u6cd5\u88ab\u8bc6\u522b\ncat > CentOS-Base.repo <<  EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # \u706b\u79cd\u8282\u70b9\u7684\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\uff0c\u4e00\u822c\u4e3a{\u706b\u79cd\u8282\u70b9 IP} + {9000 \u7aef\u53e3}\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # \u706b\u79cd\u8282\u70b9\u7684\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\uff0c\u4e00\u822c\u4e3a{\u706b\u79cd\u8282\u70b9 IP} + {9000 \u7aef\u53e3}\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                      2. \u57fa\u4e8e\u521b\u5efa\u7684 CentOS-Base.repo \u6587\u4ef6\uff0c\u5728 gpu-operator \u547d\u540d\u7a7a\u95f4\u4e0b\uff0c\u521b\u5efa\u540d\u4e3a local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\uff1a

                        kubectl create configmap local-repo-config -n gpu-operator --from-file=CentOS-Base.repo=/etc/yum.repos.d/extension.repo\n

                        \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                        configmap/local-repo-config created\n

                        local-repo-config \u914d\u7f6e\u6587\u4ef6\u7528\u4e8e\u5728\u5b89\u88c5 gpu-operator \u65f6\uff0c\u63d0\u4f9b RepoConfig.ConfigMapName \u53c2\u6570\u7684\u503c\uff0c\u914d\u7f6e\u6587\u4ef6\u540d\u79f0\u7528\u6237\u53ef\u81ea\u5b9a\u4e49\u3002

                      3. \u67e5\u770b local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\u7684\u5185\u5bb9\uff1a

                        kubectl get configmap local-repo-config -n gpu-operator -oyaml\n

                        \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                        local-repo-config.yaml
                        apiVersion: v1\ndata:\n  CentOS-Base.repo: \"[extension-0]\\nbaseurl = http://10.6.232.5:32618/centos-base # \u6b65\u9aa4 2 \u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u8def\u5f84 \\ngpgcheck = 0\\nname = kubean extension 0\\n  \\n[extension-1]\\nbaseurl\n  = http://10.6.232.5:32618/centos-base # \u6b65\u9aa4 2 \u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u8def\u5f84 \\ngpgcheck = 0\\nname\n  = kubean extension 1\\n\"\nkind: ConfigMap\nmetadata:\n  creationTimestamp: \"2023-10-18T01:59:02Z\"\n  name: local-repo-config\n  namespace: gpu-operator\n  resourceVersion: \"59445080\"\n  uid: c5f0ebab-046f-442c-b932-f9003e014387\n

                      \u81f3\u6b64\uff0c\u60a8\u5df2\u6210\u529f\u4e3a\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u521b\u5efa\u4e86\u79bb\u7ebf yum \u6e90\u914d\u7f6e\u6587\u4ef6\u3002 \u5176\u4e2d\u5728\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u65f6\u4f7f\u7528\u4e86 RepoConfig.ConfigMapName \u53c2\u6570\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html","title":"GPU \u544a\u8b66\u89c4\u5219","text":"

                      \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8bbe\u7f6e GPU \u76f8\u5173\u7684\u544a\u8b66\u89c4\u5219\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                      • \u96c6\u7fa4\u8282\u70b9\u4e0a\u5df2\u6b63\u786e\u5b89\u88c5 GPU \u8bbe\u5907
                      • \u96c6\u7fa4\u4e2d\u5df2\u6b63\u786e\u5b89\u88c5 gpu-operator \u7ec4\u4ef6
                      • \u5982\u679c\u7528\u5230\u4e86 vGPU \u8fd8\u9700\u8981\u5728\u96c6\u7fa4\u4e2d\u5b89\u88c5 Nvidia-vgpu \u7ec4\u4ef6\uff0c\u5e76\u4e14\u5f00\u542f servicemonitor
                      • \u96c6\u7fa4\u6b63\u786e\u5b89\u88c5\u4e86 insight-agent \u7ec4\u4ef6
                      "},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#gpu_1","title":"\u544a\u8b66\u5e38\u7528 GPU \u6307\u6807","text":"

                      \u672c\u8282\u4ecb\u7ecd GPU \u544a\u8b66\u5e38\u7528\u7684\u6307\u6807\uff0c\u5206\u4e3a\u4e24\u4e2a\u90e8\u5206\uff1a

                      • GPU \u5361\u7eac\u5ea6\u7684\u6307\u6807\uff0c\u4e3b\u8981\u53cd\u5e94\u5355\u4e2a GPU \u8bbe\u5907\u7684\u8fd0\u884c\u72b6\u6001\u3002
                      • \u5e94\u7528\u7eac\u5ea6\u7684\u6307\u6807\uff0c\u4e3b\u8981\u53cd\u5e94 Pod \u5728 GPU \u4e0a\u7684\u8fd0\u884c\u72b6\u6001\u3002
                      "},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#gpu_2","title":"GPU \u5361\u6307\u6807","text":"\u6307\u6807\u540d\u79f0 \u6307\u6807\u5355\u4f4d \u8bf4\u660e DCGM_FI_DEV_GPU_UTIL % GPU \u5229\u7528\u7387 DCGM_FI_DEV_MEM_COPY_UTIL % \u663e\u5b58\u5229\u7528\u7387 DCGM_FI_DEV_ENC_UTIL % \u7f16\u7801\u5668\u5229\u7528\u7387 DCGM_FI_DEV_DEC_UTIL % \u89e3\u7801\u5668\u5229\u7528\u7387 DCGM_FI_DEV_FB_FREE MB \u8868\u793a\u663e\u5b58\u5269\u4f59\u91cf DCGM_FI_DEV_FB_USED MB \u8868\u793a\u663e\u5b58\u4f7f\u7528\u91cf DCGM_FI_DEV_GPU_TEMP \u6444\u6c0f\u5ea6 \u8868\u793a\u5f53\u524d GPU \u7684\u6e29\u5ea6\u5ea6\u6570 DCGM_FI_DEV_POWER_USAGE W \u8bbe\u5907\u7535\u6e90\u4f7f\u7528\u60c5\u51b5 DCGM_FI_DEV_XID_ERRORS - \u8868\u793a\u4e00\u6bb5\u65f6\u95f4\u5185\uff0c\u6700\u540e\u53d1\u751f\u7684 XID \u9519\u8bef\u53f7\u3002XID \u63d0\u4f9b GPU \u786c\u4ef6\u3001NVIDIA \u8f6f\u4ef6\u6216\u5e94\u7528\u4e2d\u7684\u9519\u8bef\u7c7b\u578b\u3001\u9519\u8bef\u4f4d\u7f6e\u3001\u9519\u8bef\u4ee3\u7801\u7b49\u4fe1\u606f\uff0c\u66f4\u591a XID \u4fe1\u606f"},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#_2","title":"\u5e94\u7528\u7ef4\u5ea6\u7684\u6307\u6807","text":"\u6307\u6807\u540d\u79f0 \u6307\u6807\u5355\u4f4d \u8bf4\u660e kpanda_gpu_pod_utilization % \u8868\u793a Pod \u5bf9 GPU \u7684\u4f7f\u7528\u7387 kpanda_gpu_mem_pod_usage MB \u8868\u793a Pod \u5bf9 GPU \u663e\u5b58\u7684\u4f7f\u7528\u91cf kpanda_gpu_mem_pod_utilization % \u8868\u793a Pod \u5bf9 GPU \u663e\u5b58\u7684\u4f7f\u7528\u7387"},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#_3","title":"\u8bbe\u7f6e\u544a\u8b66\u89c4\u5219","text":"

                      \u8fd9\u91cc\u4f1a\u4ecb\u7ecd\u5982\u4f55\u8bbe\u7f6e GPU \u544a\u8b66\u89c4\u5219\uff0c\u4f7f\u7528 GPU \u5361\u5229\u7528\u7387\u6307\u6807\u4f5c\u4e3a\u6848\u4f8b\uff0c\u8bf7\u7528\u6237\u6839\u636e\u5b9e\u9645\u7684\u4e1a\u52a1\u573a\u666f\u9009\u62e9\u6307\u6807\u4ee5\u53ca\u7f16\u5199 promql\u3002

                      \u76ee\u6807\uff1a\u5f53GPU\u5361\u5229\u7528\u7387\u5728\u4e94\u79d2\u949f\u5185\u4e00\u76f4\u4fdd\u6301 80% \u7684\u5229\u7528\u7387\u65f6\u53d1\u51fa\u544a\u8b66

                      1. \u5728\u53ef\u89c2\u6d4b\u9875\u9762\uff0c\u70b9\u51fb \u544a\u8b66 -> \u544a\u8b66\u7b56\u7565 -> \u521b\u5efa\u544a\u8b66\u7b56\u7565

                      2. \u586b\u5199\u57fa\u672c\u4fe1\u606f

                      3. \u6dfb\u52a0\u89c4\u5219

                      4. \u9009\u62e9\u901a\u77e5\u65b9\u5f0f

                      5. \u8bbe\u7f6e\u5b8c\u6210\u540e\uff0c\u5f53\u4e00\u4e2a GPU \u5728 5s \u5185\u4e00\u76f4\u4fdd\u6301 80% \u7684\u5229\u7528\u7387\uff0c\u4f1a\u6536\u5230\u5982\u4e0b\u7684\u544a\u8b66\u4fe1\u606f\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-metrics.html","title":"GPU \u76d1\u63a7\u6307\u6807","text":"

                      \u672c\u9875\u5217\u51fa\u4e00\u4e9b\u5e38\u7528\u7684 GPU \u76d1\u63a7\u6307\u6807\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-metrics.html#_1","title":"\u96c6\u7fa4\u7ef4\u5ea6","text":"\u6307\u6807\u540d\u79f0 \u63cf\u8ff0 GPU \u5361\u6570 \u96c6\u7fa4\u4e0b\u6240\u6709\u7684 GPU \u5361\u6570\u91cf GPU \u5e73\u5747\u4f7f\u7528\u7387 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u5e73\u5747\u7b97\u529b\u4f7f\u7528\u7387 GPU \u5e73\u5747\u663e\u5b58\u4f7f\u7528\u7387 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u5e73\u5747\u663e\u5b58\u4f7f\u7528\u7387 GPU \u5361\u529f\u7387 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u529f\u7387 GPU \u5361\u6e29\u5ea6 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u6e29\u5ea6 GPU \u7b97\u529b\u4f7f\u7528\u7387\u7ec6\u8282 24 \u5c0f\u65f6\u5185\uff0c\u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u4f7f\u7528\u7387\u7ec6\u8282\uff08\u5305\u542b max\u3001avg\u3001current\uff09 GPU \u663e\u5b58\u4f7f\u7528\u91cf\u7ec6\u8282 24 \u5c0f\u65f6\u5185\uff0c\u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u663e\u5b58\u4f7f\u7528\u91cf\u7ec6\u8282\uff08\u5305\u542b min\u3001max\u3001avg\u3001current\uff09 GPU \u663e\u5b58\u5e26\u5bbd\u4f7f\u7528\u7387 \u8868\u793a\u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387\u3002\u4ee5 Nvidia GPU V100 \u4e3a\u4f8b\uff0c\u5176\u6700\u5927\u5185\u5b58\u5e26\u5bbd\u4e3a 900 GB/sec\uff0c\u5982\u679c\u5f53\u524d\u7684\u5185\u5b58\u5e26\u5bbd\u4e3a 450 GB/sec\uff0c\u5219\u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387\u4e3a 50%"},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-metrics.html#_2","title":"\u8282\u70b9\u7ef4\u5ea6","text":"\u6307\u6807\u540d\u79f0 \u63cf\u8ff0 GPU \u6a21\u5f0f \u8282\u70b9\u4e0a GPU \u5361\u7684\u4f7f\u7528\u6a21\u5f0f\uff0c\u5305\u542b\u6574\u5361\u6a21\u5f0f\u3001MIG \u6a21\u5f0f\u3001vGPU \u6a21\u5f0f GPU \u7269\u7406\u5361\u6570 \u8282\u70b9\u4e0a\u6240\u6709\u7684 GPU \u5361\u6570\u91cf GPU \u865a\u62df\u5361\u6570 \u8282\u70b9\u4e0a\u5df2\u7ecf\u88ab\u521b\u5efa\u51fa\u6765\u7684 vGPU \u8bbe\u5907\u6570\u91cf GPU MIG \u5b9e\u4f8b\u6570 \u8282\u70b9\u4e0a\u5df2\u7ecf\u88ab\u521b\u5efa\u51fa\u6765\u7684 MIG \u5b9e\u4f8b\u6570 GPU \u663e\u5b58\u5206\u914d\u7387 \u8282\u70b9\u4e0a\u6240\u6709 GPU \u5361\u7684\u663e\u5b58\u5206\u914d\u7387 GPU \u7b97\u529b\u5e73\u5747\u4f7f\u7528\u7387 \u8282\u70b9\u4e0a\u6240\u6709 GPU \u5361\u7684\u7b97\u529b\u5e73\u5747\u4f7f\u7528\u7387 GPU \u663e\u5b58\u5e73\u5747\u4f7f\u7528\u7387 \u8282\u70b9\u4e0a\u6240\u6709 GPU \u5361\u7684\u5e73\u5747\u663e\u5b58\u4f7f\u7528\u7387 GPU \u9a71\u52a8\u7248\u672c \u8282\u70b9\u4e0a GPU \u5361\u9a71\u52a8\u7684\u7248\u672c\u4fe1\u606f GPU \u7b97\u529b\u4f7f\u7528\u7387\u7ec6\u8282 24 \u5c0f\u65f6\u5185\uff0c\u8282\u70b9\u4e0a\u6bcf\u5f20 GPU \u5361\u7684\u7b97\u529b\u4f7f\u7528\u7387\u7ec6\u8282\uff08\u5305\u542b max\u3001avg\u3001current\uff09 GPU \u663e\u5b58\u4f7f\u7528\u91cf 24 \u5c0f\u65f6\u5185\uff0c\u8282\u70b9\u4e0a\u6bcf\u5f20 GPU \u5361\u7684\u663e\u5b58\u4f7f\u7528\u91cf\u7ec6\u8282\uff08\u5305\u542b min\u3001max\u3001avg\u3001current\uff09

                      \u6839\u636e XID \u72b6\u6001\u6392\u67e5 GPU \u76f8\u5173\u95ee\u9898

                      XID \u6d88\u606f\u662f NVIDIA \u9a71\u52a8\u7a0b\u5e8f\u5411\u64cd\u4f5c\u7cfb\u7edf\u7684\u5185\u6838\u65e5\u5fd7\u6216\u4e8b\u4ef6\u65e5\u5fd7\u6253\u5370\u7684\u9519\u8bef\u62a5\u544a\u3002XID \u6d88\u606f\u7528\u4e8e\u6807\u8bc6 GPU \u9519\u8bef\u4e8b\u4ef6\uff0c \u63d0\u4f9b GPU \u786c\u4ef6\u3001NVIDIA \u8f6f\u4ef6\u6216\u5e94\u7528\u4e2d\u7684\u9519\u8bef\u7c7b\u578b\u3001\u9519\u8bef\u4f4d\u7f6e\u3001\u9519\u8bef\u4ee3\u7801\u7b49\u4fe1\u606f\u3002 \u5982\u68c0\u67e5\u9879 GPU \u8282\u70b9\u4e0a\u7684 XID \u5f02\u5e38\u4e3a\u7a7a\uff0c\u8868\u660e\u65e0 XID \u6d88\u606f\uff1b\u5982\u6709\uff0c\u60a8\u53ef\u6309\u7167\u4e0b\u8868\u81ea\u52a9\u6392\u67e5\u5e76\u89e3\u51b3\u95ee\u9898\uff0c \u6216\u67e5\u770b\u66f4\u591a XID \u6d88\u606f\u3002

                      XID \u6d88\u606f \u8bf4\u660e 13 Graphics Engine Exception. \u901a\u5e38\u662f\u6570\u7ec4\u8d8a\u754c\u3001\u6307\u4ee4\u9519\u8bef\uff0c\u5c0f\u6982\u7387\u662f\u786c\u4ef6\u95ee\u9898\u3002 31 GPU memory page fault. \u901a\u5e38\u662f\u5e94\u7528\u7a0b\u5e8f\u7684\u975e\u6cd5\u5730\u5740\u8bbf\u95ee\uff0c\u6781\u5c0f\u6982\u7387\u662f\u9a71\u52a8\u6216\u8005\u786c\u4ef6\u95ee\u9898\u3002 32 Invalid or corrupted push buffer stream. \u4e8b\u4ef6\u7531 PCIE \u603b\u7ebf\u4e0a\u7ba1\u7406 NVIDIA \u9a71\u52a8\u548c GPU \u4e4b\u95f4\u901a\u4fe1\u7684 DMA \u63a7\u5236\u5668\u4e0a\u62a5\uff0c\u901a\u5e38\u662f PCI \u8d28\u91cf\u95ee\u9898\u5bfc\u81f4\uff0c\u800c\u975e\u60a8\u7684\u7a0b\u5e8f\u4ea7\u751f\u3002 38 Driver firmware error. \u901a\u5e38\u662f\u9a71\u52a8\u56fa\u4ef6\u9519\u8bef\u800c\u975e\u786c\u4ef6\u95ee\u9898\u3002 43 GPU stopped processing. \u901a\u5e38\u662f\u60a8\u5e94\u7528\u81ea\u8eab\u9519\u8bef\uff0c\u800c\u975e\u786c\u4ef6\u95ee\u9898\u3002 45 Preemptive cleanup, due to previous errors -- Most likely to see when running multiple cuda applications and hitting a DBE. \u901a\u5e38\u662f\u60a8\u624b\u52a8\u9000\u51fa\u6216\u8005\u5176\u4ed6\u6545\u969c\uff08\u786c\u4ef6\u3001\u8d44\u6e90\u9650\u5236\u7b49\uff09\u5bfc\u81f4\u7684 GPU \u5e94\u7528\u9000\u51fa\uff0cXID 45 \u53ea\u63d0\u4f9b\u4e00\u4e2a\u7ed3\u679c\uff0c\u5177\u4f53\u539f\u56e0\u901a\u5e38\u9700\u8981\u8fdb\u4e00\u6b65\u5206\u6790\u65e5\u5fd7\u3002 48 Double Bit ECC Error (DBE). \u5f53 GPU \u53d1\u751f\u4e0d\u53ef\u7ea0\u6b63\u7684\u9519\u8bef\u65f6\uff0c\u4f1a\u4e0a\u62a5\u6b64\u4e8b\u4ef6\uff0c\u8be5\u9519\u8bef\u4e5f\u4f1a\u540c\u65f6\u53cd\u9988\u7ed9\u60a8\u7684\u5e94\u7528\u7a0b\u5e8f\u3002\u901a\u5e38\u9700\u8981\u91cd\u7f6e GPU \u6216\u91cd\u542f\u8282\u70b9\u6765\u6e05\u9664\u8fd9\u4e2a\u9519\u8bef\u3002 61 Internal micro-controller breakpoint/warning. GPU \u5185\u90e8\u5f15\u64ce\u505c\u6b62\u5de5\u4f5c\uff0c\u60a8\u7684\u4e1a\u52a1\u5df2\u7ecf\u53d7\u5230\u5f71\u54cd\u3002 62 Internal micro-controller halt. \u4e0e XID 61 \u7684\u89e6\u53d1\u573a\u666f\u7c7b\u4f3c\u3002 63 ECC page retirement or row remapping recording event. \u5f53\u5e94\u7528\u7a0b\u5e8f\u906d\u9047\u5230 GPU \u663e\u5b58\u786c\u4ef6\u9519\u8bef\u65f6\uff0cNVIDIA \u81ea\u7ea0\u9519\u673a\u5236\u4f1a\u5c06\u9519\u8bef\u7684\u5185\u5b58\u533a\u57df retire \u6216\u8005 remap\uff0cretirement \u548c remapped \u4fe1\u606f\u9700\u8bb0\u5f55\u5230 infoROM \u4e2d\u624d\u80fd\u6c38\u4e45\u751f\u6548\u3002Volt \u67b6\u6784\uff1a\u6210\u529f\u8bb0\u5f55 ECC page retirement \u4e8b\u4ef6\u5230 infoROM\u3002Ampere \u67b6\u6784\uff1a\u6210\u529f\u8bb0\u5f55 row remapping \u4e8b\u4ef6\u5230 infoROM\u3002 64 ECC page retirement or row remapper recording failure. \u4e0e XID 63 \u7684\u89e6\u53d1\u573a\u666f\u7c7b\u4f3c\u3002\u4f46 XID 63 \u4ee3\u8868 retirement \u548c remapped \u4fe1\u606f\u6210\u529f\u8bb0\u5f55\u5230\u4e86 infoROM\uff0cXID 64 \u4ee3\u8868\u8be5\u8bb0\u5f55\u64cd\u4f5c\u5931\u8d25\u3002 68 NVDEC0 Exception. \u901a\u5e38\u662f\u786c\u4ef6\u6216\u9a71\u52a8\u95ee\u9898\u3002 74 NVLINK Error. NVLink \u786c\u4ef6\u9519\u8bef\u4ea7\u751f\u7684 XID\uff0c\u8868\u660e GPU \u5df2\u7ecf\u51fa\u73b0\u4e25\u91cd\u786c\u4ef6\u6545\u969c\uff0c\u9700\u8981\u4e0b\u7ebf\u7ef4\u4fee\u3002 79 GPU has fallen off the bus. GPU \u786c\u4ef6\u68c0\u6d4b\u5230\u6389\u5361\uff0c\u603b\u7ebf\u4e0a\u65e0\u6cd5\u68c0\u6d4b\u8be5 GPU\uff0c\u8868\u660e\u8be5 GPU \u5df2\u7ecf\u51fa\u73b0\u4e25\u91cd\u786c\u4ef6\u6545\u969c\uff0c\u9700\u8981\u4e0b\u7ebf\u7ef4\u4fee\u3002 92 High single-bit ECC error rate. \u786c\u4ef6\u6216\u9a71\u52a8\u6545\u969c\u3002 94 Contained ECC error. \u5f53\u5e94\u7528\u7a0b\u5e8f\u906d\u9047\u5230 GPU \u4e0d\u53ef\u7ea0\u6b63\u7684\u663e\u5b58 ECC \u9519\u8bef\u65f6\uff0cNVIDIA \u9519\u8bef\u6291\u5236\u673a\u5236\u4f1a\u5c1d\u8bd5\u5c06\u9519\u8bef\u6291\u5236\u5728\u53d1\u751f\u786c\u4ef6\u6545\u969c\u7684\u5e94\u7528\u7a0b\u5e8f\uff0c\u907f\u514d\u8be5\u9519\u8bef\u5f71\u54cd GPU \u8282\u70b9\u4e0a\u8fd0\u884c\u7684\u5176\u4ed6\u5e94\u7528\u7a0b\u5e8f\u3002\u5f53\u6291\u5236\u673a\u5236\u6210\u529f\u6291\u5236\u9519\u8bef\u65f6\uff0c\u4f1a\u4ea7\u751f\u8be5\u4e8b\u4ef6\uff0c\u4ec5\u51fa\u73b0\u4e0d\u53ef\u7ea0\u6b63 ECC \u9519\u8bef\u7684\u5e94\u7528\u7a0b\u5e8f\u53d7\u5230\u5f71\u54cd\u3002 95 Uncontained ECC error. \u4e0e XID 94 \u7684\u89e6\u53d1\u573a\u666f\u7c7b\u4f3c\u3002\u4f46 XID 94 \u4ee3\u8868\u6291\u5236\u6210\u529f\uff0c\u800c XID 95 \u4ee3\u8868\u6291\u5236\u5931\u8d25\uff0c\u8868\u660e\u8fd0\u884c\u5728\u8be5 GPU \u4e0a\u7684\u6240\u6709\u5e94\u7528\u7a0b\u5e8f\u90fd\u5df2\u53d7\u5230\u5f71\u54cd\u3002"},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-metrics.html#pod","title":"Pod \u7ef4\u5ea6","text":"\u5206\u7c7b \u6307\u6807\u540d\u79f0 \u63cf\u8ff0 \u5e94\u7528\u6982\u89c8 GPU \u5361 - \u7b97\u529b & \u663e\u5b58 Pod GPU \u7b97\u529b\u4f7f\u7528\u7387 \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u7b97\u529b\u4f7f\u7528\u7387 Pod GPU \u663e\u5b58\u4f7f\u7528\u7387 \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u663e\u5b58\u4f7f\u7528\u7387 Pod \u663e\u5b58\u4f7f\u7528\u91cf \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u663e\u5b58\u4f7f\u7528\u91cf \u663e\u5b58\u5206\u914d\u91cf \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u663e\u5b58\u5206\u914d\u91cf Pod GPU \u663e\u5b58\u590d\u5236\u4f7f\u7528\u7387 \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u663e\u5b58\u663e\u5b58\u590d\u5236\u6bd4\u7387 GPU \u5361 - \u5f15\u64ce\u6982\u89c8 GPU \u56fe\u5f62\u5f15\u64ce\u6d3b\u52a8\u767e\u5206\u6bd4 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cGraphics \u6216 Compute \u5f15\u64ce\u5904\u4e8e Active \u7684\u65f6\u95f4\u5360\u603b\u7684\u65f6\u95f4\u7684\u6bd4\u4f8b GPU \u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387 \u8868\u793a\u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387\uff08Memory BW Utilization\uff09\u5c06\u6570\u636e\u53d1\u9001\u5230\u8bbe\u5907\u5185\u5b58\u6216\u4ece\u8bbe\u5907\u5185\u5b58\u63a5\u6536\u6570\u636e\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u8f83\u9ad8\u7684\u503c\u8868\u793a\u8bbe\u5907\u5185\u5b58\u7684\u5229\u7528\u7387\u8f83\u9ad8\u3002\u8be5\u503c\u4e3a 1\uff08100%\uff09\u8868\u793a\u5728\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u6bcf\u4e2a\u5468\u671f\u6267\u884c\u4e00\u6761 DRAM \u6307\u4ee4\uff08\u5b9e\u9645\u4e0a\uff0c\u5cf0\u503c\u7ea6\u4e3a 0.8 (80%) \u662f\u53ef\u5b9e\u73b0\u7684\u6700\u5927\u503c\uff09\u3002\u5047\u8bbe\u8be5\u503c\u4e3a 0.2\uff0820%\uff09\uff0c\u8868\u793a 20% \u7684\u5468\u671f\u5728\u65f6\u95f4\u95f4\u9694\u5185\u8bfb\u53d6\u6216\u5199\u5165\u8bbe\u5907\u5185\u5b58\u3002 Tensor \u6838\u5fc3\u5f15\u64ce\u4f7f\u7528\u7387 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cTensor Core \u7ba1\u9053\uff08Pipe\uff09\u5904\u4e8e Active \u65f6\u95f4\u5360\u603b\u65f6\u95f4\u7684\u6bd4\u4f8b FP16 \u5f15\u64ce\u4f7f\u7528\u7387 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cFP16 \u7ba1\u9053\u5904\u4e8e Active \u7684\u65f6\u95f4\u5360\u603b\u7684\u65f6\u95f4\u7684\u6bd4\u4f8b FP32 \u5f15\u64ce\u4f7f\u7528\u7387 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cFP32 \u7ba1\u9053\u5904\u4e8e Active \u7684\u65f6\u95f4\u5360\u603b\u7684\u65f6\u95f4\u7684\u6bd4\u4f8b FP64 \u5f15\u64ce\u4f7f\u7528\u7387 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cFP64 \u7ba1\u9053\u5904\u4e8e Active \u7684\u65f6\u95f4\u5360\u603b\u7684\u65f6\u95f4\u7684\u6bd4\u4f8b GPU \u89e3\u7801\u4f7f\u7528\u7387 GPU \u5361\u89e3\u7801\u5f15\u64ce\u6bd4\u7387 GPU \u7f16\u7801\u4f7f\u7528\u7387 GPU \u5361\u7f16\u7801\u5f15\u64ce\u6bd4\u7387 GPU \u5361 - \u6e29\u5ea6 & \u529f\u8017 GPU \u5361\u6e29\u5ea6 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u6e29\u5ea6 GPU \u5361\u529f\u7387 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u529f\u7387 GPU \u5361 - \u603b\u8017\u80fd GPU \u5361\u603b\u5171\u6d88\u8017\u7684\u80fd\u91cf GPU \u5361 - Clock GPU \u5361\u5185\u5b58\u9891\u7387 \u5185\u5b58\u9891\u7387 GPU \u5361\u5e94\u7528SM \u65f6\u949f\u9891\u7387 \u5e94\u7528\u7684 SM \u65f6\u949f\u9891\u7387 GPU \u5361\u5e94\u7528\u5185\u5b58\u9891\u7387 \u5e94\u7528\u5185\u5b58\u9891\u7387 GPU \u5361\u89c6\u9891\u5f15\u64ce\u9891\u7387 \u89c6\u9891\u5f15\u64ce\u9891\u7387 GPU \u5361\u964d\u9891\u539f\u56e0 \u964d\u9891\u539f\u56e0 GPU \u5361 - \u5176\u4ed6\u7ec6\u8282 \u56fe\u5f62\u5f15\u64ce\u6d3b\u52a8 \u56fe\u5f62\u6216\u8ba1\u7b97\u5f15\u64ce\u7684\u4efb\u4f55\u90e8\u5206\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u65f6\u95f4\u6bd4\u4f8b\u3002\u5982\u679c\u56fe\u5f62/\u8ba1\u7b97\u4e0a\u4e0b\u6587\u5df2\u7ed1\u5b9a\u4e14\u56fe\u5f62/\u8ba1\u7b97\u7ba1\u9053\u7e41\u5fd9\uff0c\u5219\u56fe\u5f62\u5f15\u64ce\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002 SM\u6d3b\u52a8 \u591a\u5904\u7406\u5668\u4e0a\u81f3\u5c11\u4e00\u4e2a Warp \u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u65f6\u95f4\u6bd4\u4f8b\uff0c\u6240\u6709\u591a\u5904\u7406\u5668\u7684\u5e73\u5747\u503c\u3002\u8bf7\u6ce8\u610f\uff0c\u201c\u6d3b\u52a8\u201d\u5e76\u4e0d\u4e00\u5b9a\u610f\u5473\u7740 Warp \u6b63\u5728\u79ef\u6781\u8ba1\u7b97\u3002\u4f8b\u5982\uff0c\u7b49\u5f85\u5185\u5b58\u8bf7\u6c42\u7684 Warp \u88ab\u89c6\u4e3a\u6d3b\u52a8\u72b6\u6001\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u30020.8 \u6216\u66f4\u5927\u7684\u503c\u662f\u6709\u6548\u4f7f\u7528 GPU \u7684\u5fc5\u8981\u6761\u4ef6\uff0c\u4f46\u8fd8\u4e0d\u591f\u3002\u5c0f\u4e8e 0.5 \u7684\u503c\u53ef\u80fd\u8868\u793a GPU \u4f7f\u7528\u6548\u7387\u4f4e\u4e0b\u3002\u7ed9\u51fa\u4e00\u4e2a\u7b80\u5316\u7684 GPU \u67b6\u6784\u89c6\u56fe\uff0c\u5982\u679c GPU \u6709 N \u4e2a SM\uff0c\u5219\u4f7f\u7528 N \u4e2a\u5757\u5e76\u5728\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u8fd0\u884c\u7684\u5185\u6838\u5c06\u5bf9\u5e94\u4e8e\u6d3b\u52a8 1\uff08100%\uff09\u3002\u4f7f\u7528 N/5 \u4e2a\u5757\u5e76\u5728\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u8fd0\u884c\u7684\u5185\u6838\u5c06\u5bf9\u5e94\u4e8e\u6d3b\u52a8 0.2\uff0820%\uff09\u3002\u4f7f\u7528 N \u4e2a\u5757\u5e76\u8fd0\u884c\u4e94\u5206\u4e4b\u4e00\u65f6\u95f4\u95f4\u9694\u7684\u5185\u6838\uff0c\u5982\u679c SM \u5904\u4e8e\u7a7a\u95f2\u72b6\u6001\uff0c\u5219\u6d3b\u52a8\u4e5f\u5c06\u4e3a 0.2\uff0820%\uff09\u3002\u8be5\u503c\u4e0e\u6bcf\u4e2a\u5757\u7684\u7ebf\u7a0b\u6570\u65e0\u5173\uff08\u53c2\u89c1DCGM_FI_PROF_SM_OCCUPANCY\uff09\u3002 SM \u5165\u4f4f\u7387 \u591a\u5904\u7406\u5668\u4e0a\u9a7b\u7559 Warp \u7684\u6bd4\u4f8b\uff0c\u76f8\u5bf9\u4e8e\u591a\u5904\u7406\u5668\u4e0a\u652f\u6301\u7684\u6700\u5927\u5e76\u53d1 Warp \u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u5360\u7528\u7387\u8d8a\u9ad8\u5e76\u4e0d\u4e00\u5b9a\u8868\u793a GPU \u4f7f\u7528\u7387\u8d8a\u9ad8\u3002\u5bf9\u4e8e GPU \u5185\u5b58\u5e26\u5bbd\u53d7\u9650\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff08\u53c2\u89c1DCGM_FI_PROF_DRAM_ACTIVE\uff09\uff0c\u5360\u7528\u7387\u8d8a\u9ad8\u8868\u660e GPU \u4f7f\u7528\u7387\u8d8a\u9ad8\u3002\u4f46\u662f\uff0c\u5982\u679c\u5de5\u4f5c\u8d1f\u8f7d\u662f\u8ba1\u7b97\u53d7\u9650\u7684\uff08\u5373\u4e0d\u53d7 GPU \u5185\u5b58\u5e26\u5bbd\u6216\u5ef6\u8fdf\u9650\u5236\uff09\uff0c\u5219\u5360\u7528\u7387\u8d8a\u9ad8\u5e76\u4e0d\u4e00\u5b9a\u4e0e GPU \u4f7f\u7528\u7387\u8d8a\u9ad8\u76f8\u5173\u3002\u8ba1\u7b97\u5360\u7528\u7387\u5e76\u4e0d\u7b80\u5355\uff0c\u5b83\u53d6\u51b3\u4e8e GPU \u5c5e\u6027\u3001\u6bcf\u4e2a\u5757\u7684\u7ebf\u7a0b\u6570\u3001\u6bcf\u4e2a\u7ebf\u7a0b\u7684\u5bc4\u5b58\u5668\u4ee5\u53ca\u6bcf\u4e2a\u5757\u7684\u5171\u4eab\u5185\u5b58\u7b49\u56e0\u7d20\u3002\u4f7f\u7528CUDA \u5360\u7528\u7387\u8ba1\u7b97\u5668 \u63a2\u7d22\u5404\u79cd\u5360\u7528\u7387\u573a\u666f\u3002 \u5f20\u91cf\u6d3b\u52a8 \u5f20\u91cf (HMMA / IMMA) \u7ba1\u9053\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0c\u5f20\u91cf\u6838\u5fc3\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8 1 (100%) \u76f8\u5f53\u4e8e\u5728\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u6bcf\u9694\u4e00\u4e2a\u5468\u671f\u53d1\u51fa\u4e00\u4e2a\u5f20\u91cf\u6307\u4ee4\u3002\u6d3b\u52a8 0.2 (20%) \u53ef\u80fd\u8868\u793a 20% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u7684\u5229\u7528\u7387\u4e3a 100%\uff0c100% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u7684\u5229\u7528\u7387\u4e3a 20%\uff0c100% \u7684 SM \u5728 20% \u7684\u65f6\u95f4\u6bb5\u5185\u7684\u5229\u7528\u7387\u4e3a 100%\uff0c\u6216\u8005\u4ecb\u4e8e\u4e24\u8005\u4e4b\u95f4\u7684\u4efb\u4f55\u7ec4\u5408\uff08\u8bf7\u53c2\u9605DCGM_FI_PROF_SM_ACTIVE\u4ee5\u5e2e\u52a9\u6d88\u9664\u8fd9\u4e9b\u53ef\u80fd\u6027\u7684\u6b67\u4e49\uff09\u3002 FP64 \u5f15\u64ce\u6d3b\u52a8 FP64\uff08\u53cc\u7cbe\u5ea6\uff09\u7ba1\u9053\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0cFP64 \u6838\u5fc3\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8\u91cf 1\uff08100%\uff09\u76f8\u5f53\u4e8e\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185 Volta \u4e0a\u6bcf\u56db\u4e2a\u5468\u671f\u7684\u6bcf\u4e2a SM\u4e0a\u6267\u884c\u4e00\u6761 FP64 \u6307\u4ee4 \u3002\u6d3b\u52a8\u91cf 0.2\uff0820%\uff09\u53ef\u80fd\u8868\u793a 20% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c100% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 20%\uff0c100% \u7684 SM \u5728 20% \u7684\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c\u6216\u8005\u4ecb\u4e8e\u4e24\u8005\u4e4b\u95f4\u7684\u4efb\u4f55\u7ec4\u5408\uff08\u8bf7\u53c2\u9605 DCGM_FI_PROF_SM_ACTIVE \u4ee5\u5e2e\u52a9\u6d88\u9664\u8fd9\u4e9b\u53ef\u80fd\u6027\u7684\u6b67\u4e49\uff09\u3002 FP32 \u5f15\u64ce\u6d3b\u52a8 FMA\uff08FP32\uff08\u5355\u7cbe\u5ea6\uff09\u548c\u6574\u6570\uff09\u7ba1\u9053\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0cFP32 \u6838\u5fc3\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8\u91cf 1\uff08100%\uff09\u76f8\u5f53\u4e8e\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u6bcf\u9694\u4e00\u4e2a\u5468\u671f\u6267\u884c\u4e00\u6b21 FP32 \u6307\u4ee4\u3002\u6d3b\u52a8\u91cf 0.2\uff0820%\uff09\u53ef\u80fd\u8868\u793a 20% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c100% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 20%\uff0c100% \u7684 SM \u5728 20% \u7684\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c\u6216\u8005\u4e24\u8005\u4e4b\u95f4\u7684\u4efb\u4f55\u7ec4\u5408\uff08\u8bf7\u53c2\u9605DCGM_FI_PROF_SM_ACTIVE\u4ee5\u5e2e\u52a9\u6d88\u9664\u8fd9\u4e9b\u53ef\u80fd\u6027\u7684\u6b67\u4e49\uff09\u3002 FP16 \u5f15\u64ce\u6d3b\u52a8 FP16\uff08\u534a\u7cbe\u5ea6\uff09\u7ba1\u9053\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0cFP16 \u6838\u5fc3\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8\u91cf 1\uff08100%\uff09\u76f8\u5f53\u4e8e\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u6bcf\u9694\u4e00\u4e2a\u5468\u671f\u6267\u884c\u4e00\u6b21 FP16 \u6307\u4ee4\u3002\u6d3b\u52a8\u91cf 0.2\uff0820%\uff09\u53ef\u80fd\u8868\u793a 20% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c100% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 20%\uff0c100% \u7684 SM \u5728 20% \u7684\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c\u6216\u8005\u4ecb\u4e8e\u4e24\u8005\u4e4b\u95f4\u7684\u4efb\u4f55\u7ec4\u5408\uff08\u8bf7\u53c2\u9605DCGM_FI_PROF_SM_ACTIVE\u4ee5\u5e2e\u52a9\u6d88\u9664\u8fd9\u4e9b\u53ef\u80fd\u6027\u7684\u6b67\u4e49\uff09\u3002 \u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387 \u5411\u8bbe\u5907\u5185\u5b58\u53d1\u9001\u6570\u636e\u6216\u4ece\u8bbe\u5907\u5185\u5b58\u63a5\u6536\u6570\u636e\u7684\u5468\u671f\u6bd4\u4f8b\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0c\u8bbe\u5907\u5185\u5b58\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8\u7387\u4e3a 1 (100%) \u76f8\u5f53\u4e8e\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u6bcf\u4e2a\u5468\u671f\u6267\u884c\u4e00\u6761 DRAM \u6307\u4ee4\uff08\u5b9e\u9645\u4e0a\uff0c\u5cf0\u503c\u7ea6\u4e3a 0.8 (80%) \u662f\u53ef\u5b9e\u73b0\u7684\u6700\u5927\u503c\uff09\u3002\u6d3b\u52a8\u7387\u4e3a 0.2 (20%) \u8868\u793a\u5728\u65f6\u95f4\u95f4\u9694\u5185\u6709 20% \u7684\u5468\u671f\u6b63\u5728\u8bfb\u53d6\u6216\u5199\u5165\u8bbe\u5907\u5185\u5b58\u3002 NVLink \u5e26\u5bbd \u901a\u8fc7 NVLink \u4f20\u8f93/\u63a5\u6536\u7684\u6570\u636e\u901f\u7387\uff08\u4e0d\u5305\u62ec\u534f\u8bae\u6807\u5934\uff09\uff0c\u4ee5\u6bcf\u79d2\u5b57\u8282\u6570\u4e3a\u5355\u4f4d\u3002\u8be5\u503c\u8868\u793a\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u901f\u7387\u662f\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u5e73\u5747\u503c\u3002\u4f8b\u5982\uff0c\u5982\u679c 1 \u79d2\u5185\u4f20\u8f93\u4e86 1 GB \u7684\u6570\u636e\uff0c\u5219\u65e0\u8bba\u6570\u636e\u662f\u4ee5\u6052\u5b9a\u901f\u7387\u8fd8\u662f\u7a81\u53d1\u901f\u7387\u4f20\u8f93\uff0c\u901f\u7387\u90fd\u662f 1 GB/s\u3002\u7406\u8bba\u4e0a\uff0c\u6bcf\u4e2a\u94fe\u8def\u6bcf\u4e2a\u65b9\u5411\u7684\u6700\u5927 NVLink Gen2 \u5e26\u5bbd\u4e3a 25 GB/s\u3002 PCIe \u5e26\u5bbd \u901a\u8fc7 PCIe \u603b\u7ebf\u4f20\u8f93/\u63a5\u6536\u7684\u6570\u636e\u901f\u7387\uff0c\u5305\u62ec\u534f\u8bae\u6807\u5934\u548c\u6570\u636e\u6709\u6548\u8d1f\u8f7d\uff0c\u4ee5\u5b57\u8282/\u79d2\u4e3a\u5355\u4f4d\u3002\u8be5\u503c\u8868\u793a\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u8be5\u901f\u7387\u662f\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u5e73\u5747\u503c\u3002\u4f8b\u5982\uff0c\u5982\u679c 1 \u79d2\u5185\u4f20\u8f93\u4e86 1 GB \u7684\u6570\u636e\uff0c\u5219\u65e0\u8bba\u6570\u636e\u662f\u4ee5\u6052\u5b9a\u901f\u7387\u8fd8\u662f\u7a81\u53d1\u901f\u7387\u4f20\u8f93\uff0c\u901f\u7387\u90fd\u662f 1 GB/s\u3002\u7406\u8bba\u4e0a\u6700\u5927 PCIe Gen3 \u5e26\u5bbd\u4e3a\u6bcf\u901a\u9053 985 MB/s\u3002 PCIe \u4f20\u8f93\u901f\u7387 \u8282\u70b9 GPU \u5361\u901a\u8fc7 PCIe \u603b\u7ebf\u4f20\u8f93\u7684\u6570\u636e\u901f\u7387 PCIe \u63a5\u6536\u901f\u7387 \u8282\u70b9 GPU \u5361\u901a\u8fc7 PCIe \u603b\u7ebf\u63a5\u6536\u7684\u6570\u636e\u901f\u7387"},{"location":"admin/kpanda/gpu/nvidia/mig/index.html","title":"NVIDIA \u591a\u5b9e\u4f8b GPU(MIG) \u6982\u8ff0","text":""},{"location":"admin/kpanda/gpu/nvidia/mig/index.html#mig","title":"MIG \u573a\u666f","text":"
                      • \u591a\u79df\u6237\u4e91\u73af\u5883

                        MIG \u5141\u8bb8\u4e91\u670d\u52a1\u63d0\u4f9b\u5546\u5c06\u4e00\u5757\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a\u72ec\u7acb\u7684 GPU \u5b9e\u4f8b\uff0c\u6bcf\u4e2a\u5b9e\u4f8b\u53ef\u4ee5\u72ec\u7acb\u5206\u914d\u7ed9\u4e0d\u540c\u7684\u79df\u6237\u3002\u8fd9\u6837\u53ef\u4ee5\u5b9e\u73b0\u8d44\u6e90\u7684\u9694\u79bb\u548c\u72ec\u7acb\u6027\uff0c\u6ee1\u8db3\u591a\u4e2a\u79df\u6237\u5bf9 GPU \u8ba1\u7b97\u80fd\u529b\u7684\u9700\u6c42\u3002

                      • \u5bb9\u5668\u5316\u5e94\u7528\u7a0b\u5e8f

                        MIG \u53ef\u4ee5\u5728\u5bb9\u5668\u5316\u73af\u5883\u4e2d\u5b9e\u73b0\u66f4\u7ec6\u7c92\u5ea6\u7684 GPU \u8d44\u6e90\u7ba1\u7406\u3002\u901a\u8fc7\u5c06\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a MIG \u5b9e\u4f8b\uff0c\u53ef\u4ee5\u4e3a\u6bcf\u4e2a\u5bb9\u5668\u5206\u914d\u72ec\u7acb\u7684 GPU \u8ba1\u7b97\u8d44\u6e90\uff0c\u63d0\u4f9b\u66f4\u597d\u7684\u6027\u80fd\u9694\u79bb\u548c\u8d44\u6e90\u5229\u7528\u3002

                      • \u6279\u5904\u7406\u4f5c\u4e1a

                        \u5bf9\u4e8e\u9700\u8981\u5927\u89c4\u6a21\u5e76\u884c\u8ba1\u7b97\u7684\u6279\u5904\u7406\u4f5c\u4e1a\uff0cMIG \u53ef\u4ee5\u63d0\u4f9b\u66f4\u9ad8\u7684\u8ba1\u7b97\u6027\u80fd\u548c\u66f4\u5927\u7684\u663e\u5b58\u5bb9\u91cf\u3002\u6bcf\u4e2a MIG \u5b9e\u4f8b\u53ef\u4ee5\u5229\u7528\u7269\u7406 GPU \u7684\u4e00\u90e8\u5206\u8ba1\u7b97\u8d44\u6e90\uff0c\u4ece\u800c\u52a0\u901f\u5927\u89c4\u6a21\u8ba1\u7b97\u4efb\u52a1\u7684\u5904\u7406\u3002

                      • AI/\u673a\u5668\u5b66\u4e60\u8bad\u7ec3

                        MIG \u53ef\u4ee5\u5728\u8bad\u7ec3\u5927\u89c4\u6a21\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u65f6\u63d0\u4f9b\u66f4\u5927\u7684\u8ba1\u7b97\u80fd\u529b\u548c\u663e\u5b58\u5bb9\u91cf\u3002\u5c06\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a MIG \u5b9e\u4f8b\uff0c\u6bcf\u4e2a\u5b9e\u4f8b\u53ef\u4ee5\u72ec\u7acb\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\uff0c\u63d0\u9ad8\u8bad\u7ec3\u6548\u7387\u548c\u541e\u5410\u91cf\u3002

                      \u603b\u4f53\u800c\u8a00\uff0cNVIDIA MIG \u9002\u7528\u4e8e\u9700\u8981\u66f4\u7ec6\u7c92\u5ea6\u7684GPU\u8d44\u6e90\u5206\u914d\u548c\u7ba1\u7406\u7684\u573a\u666f\uff0c\u53ef\u4ee5\u5b9e\u73b0\u8d44\u6e90\u7684\u9694\u79bb\u3001\u63d0\u9ad8\u6027\u80fd\u5229\u7528\u7387\uff0c\u5e76\u4e14\u6ee1\u8db3\u591a\u4e2a\u7528\u6237\u6216\u5e94\u7528\u7a0b\u5e8f\u5bf9 GPU \u8ba1\u7b97\u80fd\u529b\u7684\u9700\u6c42\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/mig/index.html#mig_1","title":"MIG \u6982\u8ff0","text":"

                      NVIDIA \u591a\u5b9e\u4f8b GPU\uff08Multi-Instance GPU\uff0c\u7b80\u79f0 MIG\uff09\u662f NVIDIA \u5728 H100\uff0cA100\uff0cA30 \u7cfb\u5217 GPU \u5361\u4e0a\u63a8\u51fa\u7684\u4e00\u9879\u65b0\u7279\u6027\uff0c \u65e8\u5728\u5c06\u4e00\u5757\u7269\u7406 GPU \u5206\u5272\u4e3a\u591a\u4e2a GPU \u5b9e\u4f8b\uff0c\u4ee5\u63d0\u4f9b\u66f4\u7ec6\u7c92\u5ea6\u7684\u8d44\u6e90\u5171\u4eab\u548c\u9694\u79bb\u3002MIG \u6700\u591a\u53ef\u5c06\u4e00\u5757 GPU \u5212\u5206\u6210\u4e03\u4e2a GPU \u5b9e\u4f8b\uff0c \u4f7f\u5f97\u4e00\u4e2a \u7269\u7406 GPU \u5361\u53ef\u4e3a\u591a\u4e2a\u7528\u6237\u63d0\u4f9b\u5355\u72ec\u7684 GPU \u8d44\u6e90\uff0c\u4ee5\u5b9e\u73b0\u6700\u4f73 GPU \u5229\u7528\u7387\u3002

                      \u8fd9\u4e2a\u529f\u80fd\u4f7f\u5f97\u591a\u4e2a\u5e94\u7528\u7a0b\u5e8f\u6216\u7528\u6237\u53ef\u4ee5\u540c\u65f6\u5171\u4eabGPU\u8d44\u6e90\uff0c\u63d0\u9ad8\u4e86\u8ba1\u7b97\u8d44\u6e90\u7684\u5229\u7528\u7387\uff0c\u5e76\u589e\u52a0\u4e86\u7cfb\u7edf\u7684\u53ef\u6269\u5c55\u6027\u3002

                      \u901a\u8fc7 MIG\uff0c\u6bcf\u4e2a GPU \u5b9e\u4f8b\u7684\u5904\u7406\u5668\u5728\u6574\u4e2a\u5185\u5b58\u7cfb\u7edf\u4e2d\u5177\u6709\u72ec\u7acb\u4e14\u9694\u79bb\u7684\u8def\u5f84\u2014\u2014\u82af\u7247\u4e0a\u7684\u4ea4\u53c9\u5f00\u5173\u7aef\u53e3\u3001L2 \u9ad8\u901f\u7f13\u5b58\u7ec4\u3001\u5185\u5b58\u63a7\u5236\u5668\u548c DRAM \u5730\u5740\u603b\u7ebf\u90fd\u552f\u4e00\u5206\u914d\u7ed9\u5355\u4e2a\u5b9e\u4f8b\u3002

                      \u8fd9\u786e\u4fdd\u4e86\u5355\u4e2a\u7528\u6237\u7684\u5de5\u4f5c\u8d1f\u8f7d\u80fd\u591f\u4ee5\u53ef\u9884\u6d4b\u7684\u541e\u5410\u91cf\u548c\u5ef6\u8fdf\u8fd0\u884c\uff0c\u5e76\u5177\u6709\u76f8\u540c\u7684\u4e8c\u7ea7\u7f13\u5b58\u5206\u914d\u548c DRAM \u5e26\u5bbd\u3002 MIG \u53ef\u4ee5\u5212\u5206\u53ef\u7528\u7684 GPU \u8ba1\u7b97\u8d44\u6e90\uff08\u5305\u62ec\u6d41\u591a\u5904\u7406\u5668\u6216 SM \u548c GPU \u5f15\u64ce\uff0c\u5982\u590d\u5236\u5f15\u64ce\u6216\u89e3\u7801\u5668\uff09\u8fdb\u884c\u5206\u533a\uff0c \u4ee5\u4fbf\u4e3a\u4e0d\u540c\u7684\u5ba2\u6237\u7aef\uff08\u5982\u4e91\u4e3b\u673a\u3001\u5bb9\u5668\u6216\u8fdb\u7a0b\uff09\u63d0\u4f9b\u5b9a\u4e49\u7684\u670d\u52a1\u8d28\u91cf\uff08QoS\uff09\u548c\u6545\u969c\u9694\u79bb\uff09\u3002 MIG \u4f7f\u591a\u4e2a GPU \u5b9e\u4f8b\u80fd\u591f\u5728\u5355\u4e2a\u7269\u7406 GPU \u4e0a\u5e76\u884c\u8fd0\u884c\u3002

                      MIG \u5141\u8bb8\u591a\u4e2a vGPU\uff08\u4ee5\u53ca\u4e91\u4e3b\u673a\uff09\u5728\u5355\u4e2a GPU \u5b9e\u4f8b\u4e0a\u5e76\u884c\u8fd0\u884c\uff0c\u540c\u65f6\u4fdd\u7559 vGPU \u63d0\u4f9b\u7684\u9694\u79bb\u4fdd\u8bc1\u3002 \u6709\u5173\u4f7f\u7528 vGPU \u548c MIG \u8fdb\u884c GPU \u5206\u533a\u7684\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605 NVIDIA Multi-Instance GPU and NVIDIA Virtual Compute Server\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/mig/index.html#mig_2","title":"MIG \u67b6\u6784","text":"

                      \u5982\u4e0b\u662f\u4e00\u4e2a MIG \u7684\u6982\u8ff0\u56fe\uff0c\u53ef\u4ee5\u770b\u51fa MIG \u5c06\u4e00\u5f20\u7269\u7406 GPU \u5361\u865a\u62df\u5316\u6210\u4e86 7 \u4e2a GPU \u5b9e\u4f8b\uff0c\u8fd9\u4e9b GPU \u5b9e\u4f8b\u80fd\u591f\u53ef\u4ee5\u88ab\u591a\u4e2a User \u4f7f\u7528\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/mig/index.html#_1","title":"\u91cd\u8981\u6982\u5ff5","text":"
                      • SM \uff1a\u6d41\u5f0f\u591a\u5904\u7406\u5668\uff08Streaming Multiprocessor\uff09\uff0cGPU \u7684\u6838\u5fc3\u8ba1\u7b97\u5355\u5143\uff0c\u8d1f\u8d23\u6267\u884c\u56fe\u5f62\u6e32\u67d3\u548c\u901a\u7528\u8ba1\u7b97\u4efb\u52a1\u3002 \u6bcf\u4e2a SM \u5305\u542b\u4e00\u7ec4 CUDA \u6838\u5fc3\uff0c\u4ee5\u53ca\u5171\u4eab\u5185\u5b58\u3001\u5bc4\u5b58\u5668\u6587\u4ef6\u548c\u5176\u4ed6\u8d44\u6e90\uff0c\u53ef\u4ee5\u540c\u65f6\u6267\u884c\u591a\u4e2a\u7ebf\u7a0b\u3002 \u6bcf\u4e2a MIG \u5b9e\u4f8b\u90fd\u62e5\u6709\u4e00\u5b9a\u6570\u91cf\u7684 SM \u548c\u5176\u4ed6\u76f8\u5173\u8d44\u6e90\uff0c\u4ee5\u53ca\u88ab\u5212\u5206\u51fa\u6765\u7684\u663e\u5b58\u3002
                      • GPU Memory Slice \uff1aGPU \u5185\u5b58\u5207\u7247\uff0cGPU \u5185\u5b58\u5207\u7247\u662f GPU \u5185\u5b58\u7684\u6700\u5c0f\u90e8\u5206\uff0c\u5305\u62ec\u76f8\u5e94\u7684\u5185\u5b58\u63a7\u5236\u5668\u548c\u7f13\u5b58\u3002 GPU \u5185\u5b58\u5207\u7247\u5927\u7ea6\u662f GPU \u5185\u5b58\u8d44\u6e90\u603b\u91cf\u7684\u516b\u5206\u4e4b\u4e00\uff0c\u5305\u62ec\u5bb9\u91cf\u548c\u5e26\u5bbd\u3002
                      • GPU SM Slice \uff1aGPU SM \u5207\u7247\u662f GPU \u4e0a SM \u7684\u6700\u5c0f\u8ba1\u7b97\u5355\u4f4d\u3002\u5728 MIG \u6a21\u5f0f\u4e0b\u914d\u7f6e\u65f6\uff0c GPU SM \u5207\u7247\u5927\u7ea6\u662f GPU \u4e2d\u53ef\u7528 SMS \u603b\u6570\u7684\u4e03\u5206\u4e4b\u4e00\u3002
                      • GPU Slice \uff1aGPU \u5207\u7247\u662f GPU \u4e2d\u7531\u5355\u4e2a GPU \u5185\u5b58\u5207\u7247\u548c\u5355\u4e2a GPU SM \u5207\u7247\u7ec4\u5408\u5728\u4e00\u8d77\u7684\u6700\u5c0f\u90e8\u5206\u3002
                      • GPU Instance \uff1aGPU \u5b9e\u4f8b \uff08GI\uff09 \u662f GPU \u5207\u7247\u548c GPU \u5f15\u64ce\uff08DMA\u3001NVDEC \u7b49\uff09\u7684\u7ec4\u5408\u3002 GPU \u5b9e\u4f8b\u4e2d\u7684\u4efb\u4f55\u5185\u5bb9\u59cb\u7ec8\u5171\u4eab\u6240\u6709 GPU \u5185\u5b58\u5207\u7247\u548c\u5176\u4ed6 GPU \u5f15\u64ce\uff0c\u4f46\u5b83\u7684 SM \u5207\u7247\u53ef\u4ee5\u8fdb\u4e00\u6b65\u7ec6\u5206\u4e3a\u8ba1\u7b97\u5b9e\u4f8b\uff08CI\uff09\u3002 GPU \u5b9e\u4f8b\u63d0\u4f9b\u5185\u5b58 QoS\u3002\u6bcf\u4e2a GPU \u5207\u7247\u90fd\u5305\u542b\u4e13\u7528\u7684 GPU \u5185\u5b58\u8d44\u6e90\uff0c\u8fd9\u4e9b\u8d44\u6e90\u4f1a\u9650\u5236\u53ef\u7528\u5bb9\u91cf\u548c\u5e26\u5bbd\uff0c\u5e76\u63d0\u4f9b\u5185\u5b58 QoS\u3002 \u6bcf\u4e2a GPU \u5185\u5b58\u5207\u7247\u83b7\u5f97\u603b GPU \u5185\u5b58\u8d44\u6e90\u7684\u516b\u5206\u4e4b\u4e00\uff0c\u6bcf\u4e2a GPU SM \u5207\u7247\u83b7\u5f97 SM \u603b\u6570\u7684\u4e03\u5206\u4e4b\u4e00\u3002
                      • Compute Instance \uff1aGPU \u5b9e\u4f8b\u7684\u8ba1\u7b97\u5207\u7247\u53ef\u4ee5\u8fdb\u4e00\u6b65\u7ec6\u5206\u4e3a\u591a\u4e2a\u8ba1\u7b97\u5b9e\u4f8b \uff08CI\uff09\uff0c\u5176\u4e2d CI \u5171\u4eab\u7236 GI \u7684\u5f15\u64ce\u548c\u5185\u5b58\uff0c\u4f46\u6bcf\u4e2a CI \u90fd\u6709\u4e13\u7528\u7684 SM \u8d44\u6e90\u3002
                      "},{"location":"admin/kpanda/gpu/nvidia/mig/index.html#gpu-gi","title":"GPU \u5b9e\u4f8b\uff08GI\uff09","text":"

                      \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728 GPU \u4e0a\u521b\u5efa\u5404\u79cd\u5206\u533a\u3002\u5c06\u4f7f\u7528 A100-40GB \u4f5c\u4e3a\u793a\u4f8b\u6f14\u793a\u5982\u4f55\u5bf9\u5355\u4e2a GPU \u7269\u7406\u5361\u4e0a\u8fdb\u884c\u5206\u533a\u3002

                      GPU \u7684\u5206\u533a\u662f\u4f7f\u7528\u5185\u5b58\u5207\u7247\u8fdb\u884c\u7684\uff0c\u56e0\u6b64\u53ef\u4ee5\u8ba4\u4e3a A100-40GB GPU \u5177\u6709 8x5GB \u5185\u5b58\u5207\u7247\u548c 7 \u4e2a GPU SM \u5207\u7247\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff0c\u5c55\u793a\u4e86 A100 \u4e0a\u53ef\u7528\u7684\u5185\u5b58\u5207\u7247\u3002

                      \u5982\u4e0a\u6240\u8ff0\uff0c\u521b\u5efa GPU \u5b9e\u4f8b \uff08GI\uff09 \u9700\u8981\u5c06\u4e00\u5b9a\u6570\u91cf\u7684\u5185\u5b58\u5207\u7247\u4e0e\u4e00\u5b9a\u6570\u91cf\u7684\u8ba1\u7b97\u5207\u7247\u76f8\u7ed3\u5408\u3002 \u5728\u4e0b\u56fe\u4e2d\uff0c\u4e00\u4e2a 5GB \u5185\u5b58\u5207\u7247\u4e0e 1 \u4e2a\u8ba1\u7b97\u5207\u7247\u76f8\u7ed3\u5408\uff0c\u4ee5\u521b\u5efa 1g.5gb GI \u914d\u7f6e\u6587\u4ef6\uff1a

                      \u540c\u6837\uff0c4x5GB \u5185\u5b58\u5207\u7247\u53ef\u4ee5\u4e0e 4x1 \u8ba1\u7b97\u5207\u7247\u7ed3\u5408\u4f7f\u7528\u4ee5\u521b\u5efa 4g.20gb \u7684 GI \u914d\u7f6e\u6587\u4ef6\uff1a

                      "},{"location":"admin/kpanda/gpu/nvidia/mig/index.html#ci","title":"\u8ba1\u7b97\u5b9e\u4f8b\uff08CI\uff09","text":"

                      GPU \u5b9e\u4f8b\u7684\u8ba1\u7b97\u5207\u7247(GI)\u53ef\u4ee5\u8fdb\u4e00\u6b65\u7ec6\u5206\u4e3a\u591a\u4e2a\u8ba1\u7b97\u5b9e\u4f8b\uff08CI\uff09\uff0c\u5176\u4e2d CI \u5171\u4eab\u7236 GI \u7684\u5f15\u64ce\u548c\u5185\u5b58\uff0c \u4f46\u6bcf\u4e2a CI \u90fd\u6709\u4e13\u7528\u7684 SM \u8d44\u6e90\u3002\u4f7f\u7528\u4e0a\u9762\u7684\u76f8\u540c 4g.20gb \u793a\u4f8b\uff0c\u53ef\u4ee5\u521b\u5efa\u4e00\u4e2a CI \u4ee5\u4ec5\u4f7f\u7528\u7b2c\u4e00\u4e2a\u8ba1\u7b97\u5207\u7247\u7684 1c.4g.20gb \u8ba1\u7b97\u914d\u7f6e\uff0c\u5982\u4e0b\u56fe\u84dd\u8272\u90e8\u5206\u6240\u793a\uff1a

                      \u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u53ef\u4ee5\u901a\u8fc7\u9009\u62e9\u4efb\u4f55\u8ba1\u7b97\u5207\u7247\u6765\u521b\u5efa 4 \u4e2a\u4e0d\u540c\u7684 CI\u3002\u8fd8\u53ef\u4ee5\u5c06\u4e24\u4e2a\u8ba1\u7b97\u5207\u7247\u7ec4\u5408\u5728\u4e00\u8d77\u4ee5\u521b\u5efa 2c.4g.20gb \u7684\u8ba1\u7b97\u914d\u7f6e\uff09\uff1a

                      \u9664\u6b64\u4e4b\u5916\uff0c\u8fd8\u53ef\u4ee5\u7ec4\u5408 3 \u4e2a\u8ba1\u7b97\u5207\u7247\u4ee5\u521b\u5efa\u8ba1\u7b97\u914d\u7f6e\u6587\u4ef6\uff0c\u6216\u8005\u53ef\u4ee5\u7ec4\u5408\u6240\u6709 4 \u4e2a\u8ba1\u7b97\u5207\u7247\u4ee5\u521b\u5efa 3c.4g.20gb \u3001 4c.4g.20gb \u8ba1\u7b97\u914d\u7f6e\u6587\u4ef6\u3002 \u5408\u5e76\u6240\u6709 4 \u4e2a\u8ba1\u7b97\u5207\u7247\u65f6\uff0c\u914d\u7f6e\u6587\u4ef6\u7b80\u79f0\u4e3a 4g.20gb \u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/mig/create_mig.html","title":"\u5f00\u542f MIG \u529f\u80fd","text":"

                      \u672c\u7ae0\u8282\u4ecb\u7ecd\u5982\u4f55\u5f00\u542f NVIDIA MIG \u529f\u80fd\u65b9\u5f0f\uff0cNVIDIA \u5f53\u524d\u63d0\u4f9b\u4e24\u79cd\u5728 Kubernetes \u8282\u70b9\u4e0a\u516c\u5f00 MIG \u8bbe\u5907\u7684\u7b56\u7565\uff1a

                      • Single \u6a21\u5f0f\uff0c\u8282\u70b9\u4ec5\u5728\u5176\u6240\u6709 GPU \u4e0a\u516c\u5f00\u5355\u4e00\u7c7b\u578b\u7684 MIG \u8bbe\u5907\u3002
                      • Mixed \u6a21\u5f0f\uff0c\u8282\u70b9\u5728\u5176\u6240\u6709 GPU \u4e0a\u516c\u5f00\u6df7\u5408 MIG \u8bbe\u5907\u7c7b\u578b\u3002

                      \u8be6\u60c5\u53c2\u8003 NVIDIA GPU \u5361\u4f7f\u7528\u6a21\u5f0f\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/mig/create_mig.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u5f85\u5b89\u88c5 GPU \u9a71\u52a8\u8282\u70b9\u7cfb\u7edf\u8981\u6c42\u8bf7\u53c2\u8003\uff1aGPU \u652f\u6301\u77e9\u9635
                      • \u786e\u8ba4\u96c6\u7fa4\u8282\u70b9\u4e0a\u5177\u6709\u5bf9\u5e94\u578b\u53f7\u7684 GPU \u5361\uff08NVIDIA H100\u3001 A100 \u548c A30 Tensor Core GPU\uff09\uff0c \u8be6\u60c5\u53c2\u8003 GPU \u652f\u6301\u77e9\u9635\u3002
                      • \u8282\u70b9\u4e0a\u7684\u6240\u6709 GPU \u5fc5\u987b\uff1a\u5c5e\u4e8e\u540c\u4e00\u4ea7\u54c1\u7ebf\uff08\u4f8b\u5982 A100-SXM-40GB\uff09
                      "},{"location":"admin/kpanda/gpu/nvidia/mig/create_mig.html#gpu-operator-addon","title":"\u5b89\u88c5 gpu-operator Addon","text":""},{"location":"admin/kpanda/gpu/nvidia/mig/create_mig.html#_2","title":"\u53c2\u6570\u914d\u7f6e","text":"

                      \u5b89\u88c5 Operator \u65f6\u9700\u8981\u5bf9\u5e94\u8bbe\u7f6e MigManager Config \u53c2\u6570\uff0c \u9ed8\u8ba4\u4e3a default-mig-parted-config \uff0c\u540c\u65f6\u4e5f\u53ef\u4ee5\u81ea\u5b9a\u4e49\u5207\u5206\u7b56\u7565\u914d\u7f6e\u6587\u4ef6\uff1a

                      "},{"location":"admin/kpanda/gpu/nvidia/mig/create_mig.html#_3","title":"\u81ea\u5b9a\u4e49\u5207\u5206\u7b56\u7565","text":"
                        ## \u81ea\u5b9a\u4e49\u5207\u5206 GI \u5b9e\u4f8b\u914d\u7f6e\n  all-disabled:\n    - devices: all\n      mig-enabled: false\n  all-enabled:\n    - devices: all\n      mig-enabled: true\n      mig-devices: {}\n  all-1g.10gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.5gb: 7\n  all-1g.10gb.me:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.10gb+me: 1\n  all-1g.20gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.20gb: 4\n  all-2g.20gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        2g.20gb: 3\n  all-3g.40gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        3g.40gb: 2\n  all-4g.40gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        4g.40gb: 1\n  all-7g.80gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        7g.80gb: 1\n  all-balanced:\n    - device-filter: [\"0x233110DE\", \"0x232210DE\", \"0x20B210DE\", \"0x20B510DE\", \"0x20F310DE\", \"0x20F510DE\"]\n      devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.10gb: 2\n        2g.20gb: 1\n        3g.40gb: 1\n  # \u8bbe\u7f6e\u540e\u4f1a\u6309\u7167\u8bbe\u7f6e\u89c4\u683c\u5207\u5206 CI \u5b9e\u4f8b\n  custom-config:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        3g.40gb: 2\n

                      \u5728\u4e0a\u8ff0\u7684 YAML \u4e2d\u8bbe\u7f6e custom-config \uff0c\u8bbe\u7f6e\u540e\u4f1a\u6309\u7167\u89c4\u683c\u5207\u5206 CI \u5b9e\u4f8b\u3002

                      custom-config:\n  - devices: all\n    mig-enabled: true\n    mig-devices:\n      1c.3g.40gb: 6\n

                      \u8bbe\u7f6e\u5b8c\u6210\u540e\uff0c\u5728\u786e\u8ba4\u90e8\u7f72\u5e94\u7528\u65f6\u5373\u53ef\u4f7f\u7528 GPU MIG \u8d44\u6e90\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/mig/create_mig.html#gpu","title":"\u5207\u6362\u8282\u70b9 GPU \u6a21\u5f0f","text":"

                      Note

                      \u5207\u6362 GPU \u6a21\u5f0f\u6216\u8005\u4fee\u6539\u5207\u5206\u89c4\u683c\u540e\u9700\u8981\u91cd\u542f nvidia-mig-manager\u3002

                      \u5f53\u6211\u4eec\u6210\u529f\u5b89\u88c5 gpu-operator \u4e4b\u540e\uff0c\u8282\u70b9\u9ed8\u8ba4\u662f\u6574\u5361\u6a21\u5f0f\uff0c\u5728\u8282\u70b9\u7ba1\u7406\u9875\u9762\u4f1a\u6709\u6807\u8bc6\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff1a

                      \u70b9\u51fb\u8282\u70b9\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u9009\u62e9 GPU \u6a21\u5f0f\u5207\u6362 \uff0c\u7136\u540e\u9009\u62e9\u5bf9\u5e94\u7684 MIG \u6a21\u5f0f\u4ee5\u53ca\u5207\u5206\u7684\u7b56\u7565\uff0c\u8fd9\u91cc\u4ee5 MIXED \u6a21\u5f0f\u4e3a\u4f8b\uff1a

                      \u8fd9\u91cc\u4e00\u5171\u6709\u4e24\u4e2a\u914d\u7f6e\uff1a

                      1. MIg \u7b56\u7565\uff1aMixed \u4ee5\u53ca Single \u3002
                      2. \u5207\u5206\u7b56\u7565\uff1a\u8fd9\u91cc\u7684\u7b56\u7565\u9700\u8981\u4e0e default-mig-parted-config \uff08\u6216\u8005\u7528\u6237\u81ea\u5b9a\u4e49\u7684\u5207\u5206\u7b56\u7565\uff09\u914d\u7f6e\u6587\u4ef6\u4e2d\u7684 key \u4fdd\u6301\u4e00\u81f4\u3002

                      \u70b9\u51fb \u786e\u5b9a \u6309\u94ae\u540e\uff0c\u7b49\u5f85\u7ea6\u4e00\u5206\u949f\u5de6\u53f3\u5237\u65b0\u9875\u9762\uff0cMIG \u6a21\u5f0f\u5207\u6362\u6210\uff1a

                      "},{"location":"admin/kpanda/gpu/nvidia/mig/mig_command.html","title":"MIG \u76f8\u5173\u547d\u4ee4","text":"

                      GI \u76f8\u5173\u547d\u540d\uff1a

                      \u5b50\u547d\u4ee4 \u8bf4\u660e nvidia-smi mig -lgi \u67e5\u770b\u521b\u5efa GI \u5b9e\u4f8b\u5217\u8868 nvidia-smi mig -dgi -gi \u5220\u9664\u6307\u5b9a\u7684 GI \u5b9e\u4f8b nvidia-smi mig -lgip \u67e5\u770b GI \u7684 profile nvidia-smi mig -cgi \u901a\u8fc7\u6307\u5b9a profile \u7684 ID \u521b\u5efa GI

                      CI \u76f8\u5173\u547d\u4ee4\uff1a

                      \u5b50\u547d\u4ee4 \u8bf4\u660e nvidia-smi mig -lcip { -gi {gi Instance ID}} \u67e5\u770b CI \u7684 profile \uff0c\u6307\u5b9a -gi \u53ef\u4ee5\u67e5\u770b\u7279\u5b9a GI \u5b9e\u4f8b\u53ef\u4ee5\u521b\u5efa\u7684 CI nvidia-smi mig -lci \u67e5\u770b\u521b\u5efa\u7684 CI \u5b9e\u4f8b\u5217\u8868 nvidia-smi mig -cci {profile id} -gi {gi instance id} \u6307\u5b9a\u7684 GI \u521b\u5efa CI \u5b9e\u4f8b nvidia-smi mig -dci -ci \u5220\u9664\u6307\u5b9a CI \u5b9e\u4f8b

                      GI+CI \u76f8\u5173\u547d\u4ee4\uff1a

                      \u5b50\u547d\u4ee4 \u8bf4\u660e nvidia-smi mig -i 0 -cgi {gi profile id} -C {ci profile id} \u76f4\u63a5\u521b\u5efa GI + CI \u5b9e\u4f8b"},{"location":"admin/kpanda/gpu/nvidia/mig/mig_usage.html","title":"\u4f7f\u7528 MIG GPU \u8d44\u6e90","text":"

                      \u672c\u8282\u4ecb\u7ecd\u5e94\u7528\u5982\u4f55\u4f7f\u7528 MIG GPU \u8d44\u6e90\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/mig/mig_usage.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u5df2\u7ecf\u90e8\u7f72 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u4e14\u5e73\u53f0\u8fd0\u884c\u6b63\u5e38\u3002
                      • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                      • \u5df2\u5b89\u88c5 GPU Operator\u3002
                      • \u96c6\u7fa4\u8282\u70b9\u4e0a\u5177\u6709\u5bf9\u5e94\u578b\u53f7\u7684 GPU \u5361
                      "},{"location":"admin/kpanda/gpu/nvidia/mig/mig_usage.html#ui-mig-gpu","title":"UI \u754c\u9762\u4f7f\u7528 MIG GPU","text":"
                      1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u8bc6\u522b GPU \u5361\u7c7b\u578b

                        \u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 -> \u8282\u70b9\u7ba1\u7406 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u6b63\u786e\u8bc6\u522b\u4e3a MIG \u6a21\u5f0f\u3002

                      2. \u901a\u8fc7\u955c\u50cf\u90e8\u7f72\u5e94\u7528\uff0c\u53ef\u9009\u62e9\u5e76\u4f7f\u7528 NVIDIA MIG \u8d44\u6e90\u3002

                        • MIG Single \u6a21\u5f0f\u793a\u4f8b\uff08\u4e0e\u6574\u5361\u4f7f\u7528\u65b9\u5f0f\u76f8\u540c\uff09\uff1a

                          Note

                          MIG single \u7b56\u7565\u5141\u8bb8\u7528\u6237\u4ee5\u4e0e GPU \u6574\u5361\u76f8\u540c\u7684\u65b9\u5f0f\uff08nvidia.com/gpu\uff09\u8bf7\u6c42\u548c\u4f7f\u7528GPU\u8d44\u6e90\uff0c\u4e0d\u540c\u7684\u662f\u8fd9\u4e9b\u8d44\u6e90\u53ef\u4ee5\u662f GPU \u7684\u4e00\u90e8\u5206\uff08MIG\u8bbe\u5907\uff09\uff0c\u800c\u4e0d\u662f\u6574\u4e2aGPU\u3002\u4e86\u89e3\u66f4\u591a GPU MIG \u6a21\u5f0f\u8bbe\u8ba1

                        • MIG Mixed \u6a21\u5f0f\u793a\u4f8b\uff1a

                      "},{"location":"admin/kpanda/gpu/nvidia/mig/mig_usage.html#yaml-mig","title":"YAML \u914d\u7f6e\u4f7f\u7528 MIG","text":"

                      MIG Single \u6a21\u5f0f\uff1a

                      apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mig-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mig-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mig-demo\n    spec:\n      containers:\n        - name: mig-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/gpu: 2 # (1)!\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                      1. \u7533\u8bf7 MIG GPU \u7684\u6570\u91cf

                      MIG Mixed \u6a21\u5f0f\uff1a

                      apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mig-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mig-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mig-demo\n    spec:\n      containers:\n        - name: mig-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/mig-4g.20gb: 1 # (1)!\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                      1. \u901a\u8fc7 nvidia.com/mig-g.gb \u7684\u8d44\u6e90\u7c7b\u578b\u516c\u5f00\u5404\u4e2a MIG \u8bbe\u5907

                      \u8fdb\u5165\u5bb9\u5668\u540e\u53ef\u4ee5\u67e5\u770b\u53ea\u4f7f\u7528\u4e86\u4e00\u4e2a MIG \u8bbe\u5907\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/vgpu/hami.html","title":"\u6784\u5efa vGPU \u663e\u5b58\u8d85\u914d\u955c\u50cf","text":"

                      Hami \u9879\u76ee\u4e2d vGPU \u663e\u5b58\u8d85\u914d\u7684\u529f\u80fd\u5df2\u7ecf\u4e0d\u5b58\u5728\uff0c\u76ee\u524d\u4f7f\u7528\u6709\u663e\u5b58\u8d85\u914d\u7684 libvgpu.so \u6587\u4ef6\u91cd\u65b0\u6784\u5efa\u3002

                      Dockerfile
                      FROM docker.m.daocloud.io/projecthami/hami:v2.3.11\nCOPY libvgpu.so /k8s-vgpu/lib/nvidia/\n

                      \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u6784\u5efa\u955c\u50cf\uff1a

                      docker build -t release.daocloud.io/projecthami/hami:v2.3.11 -f Dockerfile .\n

                      \u7136\u540e\u628a\u955c\u50cf push \u5230 release.daocloud.io \u4e2d\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/vgpu/vgpu_addon.html","title":"\u5b89\u88c5 NVIDIA vGPU Addon","text":"

                      \u5982\u9700\u5c06\u4e00\u5f20 NVIDIA \u865a\u62df\u5316\u6210\u591a\u4e2a\u865a\u62df GPU\uff0c\u5e76\u5c06\u5176\u5206\u914d\u7ed9\u4e0d\u540c\u7684\u4e91\u4e3b\u673a\u6216\u7528\u6237\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528 NVIDIA \u7684 vGPU \u80fd\u529b\u3002 \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u5b89\u88c5 vGPU \u63d2\u4ef6\uff0c\u8fd9\u662f\u4f7f\u7528 NVIDIA vGPU \u80fd\u529b\u7684\u524d\u63d0\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/vgpu/vgpu_addon.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u53c2\u8003 GPU \u652f\u6301\u77e9\u9635 \u786e\u8ba4\u96c6\u7fa4\u8282\u70b9\u4e0a\u5177\u6709\u5bf9\u5e94\u578b\u53f7\u7684 GPU \u5361\u3002
                      • \u5f53\u524d\u96c6\u7fa4\u5df2\u901a\u8fc7 Operator \u90e8\u7f72 NVIDIA \u9a71\u52a8\uff0c\u5177\u4f53\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5\u3002
                      "},{"location":"admin/kpanda/gpu/nvidia/vgpu/vgpu_addon.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u529f\u80fd\u6a21\u5757\u8def\u5f84\uff1a \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u7ba1\u7406 \uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f -> \u641c\u7d22 nvidia-vgpu \u3002

                      2. \u5728\u5b89\u88c5 vGPU \u7684\u8fc7\u7a0b\u4e2d\u63d0\u4f9b\u4e86\u51e0\u4e2a\u57fa\u672c\u4fee\u6539\u7684\u53c2\u6570\uff0c\u5982\u679c\u9700\u8981\u4fee\u6539\u9ad8\u7ea7\u53c2\u6570\u70b9\u51fb YAML \u5217\u8fdb\u884c\u4fee\u6539\uff1a

                        • deviceCoreScaling \uff1aNVIDIA \u88c5\u7f6e\u7b97\u529b\u4f7f\u7528\u6bd4\u4f8b\uff0c\u9884\u8bbe\u503c\u662f 1\u3002\u53ef\u4ee5\u5927\u4e8e 1\uff08\u542f\u7528\u865a\u62df\u7b97\u529b\uff0c\u5b9e\u9a8c\u529f\u80fd\uff09\u3002\u5982\u679c\u6211\u4eec\u914d\u7f6e devicePlugin.deviceCoreScaling \u53c2\u6570\u4e3a S\uff0c\u5728\u90e8\u7f72\u4e86\u6211\u4eec\u88c5\u7f6e\u63d2\u4ef6\u7684 Kubernetes \u96c6\u7fa4\u4e2d\uff0c\u8fd9\u5f20 GPU \u5206\u51fa\u7684 vGPU \u5c06\u603b\u5171\u5305\u542b S * 100% \u7b97\u529b\u3002

                        • deviceMemoryScaling \uff1aNVIDIA \u88c5\u7f6e\u663e\u5b58\u4f7f\u7528\u6bd4\u4f8b\uff0c\u9884\u8bbe\u503c\u662f 1\u3002\u53ef\u4ee5\u5927\u4e8e 1\uff08\u542f\u7528\u865a\u62df\u663e\u5b58\uff0c\u5b9e\u9a8c\u529f\u80fd\uff09\u3002 \u5bf9\u4e8e\u6709 M \u663e\u5b58\u5927\u5c0f\u7684 NVIDIA GPU\uff0c\u5982\u679c\u6211\u4eec\u914d\u7f6e devicePlugin.deviceMemoryScaling \u53c2\u6570\u4e3a S\uff0c \u5728\u90e8\u7f72\u4e86\u6211\u4eec\u88c5\u7f6e\u63d2\u4ef6\u7684 Kubernetes \u96c6\u7fa4\u4e2d\uff0c\u8fd9\u5f20 GPU \u5206\u51fa\u7684 vGPU \u5c06\u603b\u5171\u5305\u542b S * M \u663e\u5b58\u3002

                        • deviceSplitCount \uff1a\u6574\u6570\u7c7b\u578b\uff0c\u9884\u8bbe\u503c\u662f 10\u3002GPU \u7684\u5206\u5272\u6570\uff0c\u6bcf\u4e00\u5f20 GPU \u90fd\u4e0d\u80fd\u5206\u914d\u8d85\u8fc7\u5176\u914d\u7f6e\u6570\u76ee\u7684\u4efb\u52a1\u3002 \u82e5\u5176\u914d\u7f6e\u4e3a N \u7684\u8bdd\uff0c\u6bcf\u4e2a GPU \u4e0a\u6700\u591a\u53ef\u4ee5\u540c\u65f6\u5b58\u5728 N \u4e2a\u4efb\u52a1\u3002

                        • Resources \uff1a\u5c31\u662f\u5bf9\u5e94 vgpu-device-plugin \u548c vgpu-schedule pod \u7684\u8d44\u6e90\u4f7f\u7528\u91cf\u3002

                        • ServiceMonitor \uff1a\u9ed8\u8ba4\u4e0d\u5f00\u542f\uff0c\u5f00\u542f\u540e\u53ef\u524d\u5f80\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u67e5\u770b vGPU \u76f8\u5173\u76d1\u63a7\u3002\u5982\u9700\u5f00\u542f\uff0c\u8bf7\u786e\u4fdd insight-agent \u5df2\u5b89\u88c5\u5e76\u5904\u4e8e\u8fd0\u884c\u72b6\u6001\uff0c\u5426\u5219\u5c06\u5bfc\u81f4 NVIDIA vGPU Addon \u5b89\u88c5\u5931\u8d25\u3002

                      3. \u5b89\u88c5\u6210\u529f\u4e4b\u540e\u4f1a\u5728\u6307\u5b9a Namespace \u4e0b\u51fa\u73b0\u5982\u4e0b\u4e24\u4e2a\u7c7b\u578b\u7684 Pod\uff0c\u5373\u8868\u793a NVIDIA vGPU \u63d2\u4ef6\u5df2\u5b89\u88c5\u6210\u529f\uff1a

                      \u5b89\u88c5\u6210\u529f\u540e\uff0c\u90e8\u7f72\u5e94\u7528\u53ef\u4f7f\u7528 vGPU \u8d44\u6e90\u3002

                      Note

                      NVIDIA vGPU Addon \u4e0d\u652f\u6301\u4ece\u8001\u7248\u672c v2.0.0 \u76f4\u63a5\u5347\u7ea7\u4e3a\u6700\u65b0\u7248 v2.0.0+1\uff1b \u5982\u9700\u5347\u7ea7\uff0c\u8bf7\u5378\u8f7d\u8001\u7248\u672c\u540e\u91cd\u65b0\u5b89\u88c5\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html","title":"\u5e94\u7528\u4f7f\u7528 Nvidia vGPU","text":"

                      \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528 vGPU \u80fd\u529b\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u96c6\u7fa4\u8282\u70b9\u4e0a\u5177\u6709\u5bf9\u5e94\u578b\u53f7\u7684 GPU \u5361
                      • \u5df2\u6210\u529f\u5b89\u88c5 vGPU Addon\uff0c\u8be6\u60c5\u53c2\u8003 GPU Addon \u5b89\u88c5
                      • \u5df2\u5b89\u88c5 GPU Operator\uff0c\u5e76\u5df2 \u5173\u95ed Nvidia.DevicePlugin \u80fd\u529b\uff0c\u53ef\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5
                      "},{"location":"admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html#vgpu","title":"\u754c\u9762\u4f7f\u7528 vGPU","text":"
                      1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u68c0\u6d4b GPU \u5361\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002 \u76ee\u524d\u96c6\u7fa4\u4f1a\u81ea\u52a8\u542f\u7528 GPU \uff0c\u5e76\u4e14\u8bbe\u7f6e GPU \u7c7b\u578b\u4e3a Nvidia vGPU \u3002

                      2. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08Nvidia vGPU\uff09\u4e4b\u540e\uff0c\u4f1a\u81ea\u52a8\u51fa\u73b0\u5982\u4e0b\u51e0\u4e2a\u53c2\u6570\u9700\u8981\u586b\u5199\uff1a

                        • \u7269\u7406\u5361\u6570\u91cf\uff08nvidia.com/vgpu\uff09\uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u8f93\u5165\u503c\u5fc5\u987b\u4e3a\u6574\u6570\u4e14 \u5c0f\u4e8e\u7b49\u4e8e \u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002
                        • GPU \u7b97\u529b\uff08nvidia.com/gpucores\uff09: \u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u7b97\u529b\uff0c\u503c\u8303\u56f4\u4e3a 0-100\uff1b \u5982\u679c\u914d\u7f6e\u4e3a 0\uff0c \u5219\u8ba4\u4e3a\u4e0d\u5f3a\u5236\u9694\u79bb\uff1b\u914d\u7f6e\u4e3a100\uff0c\u5219\u8ba4\u4e3a\u72ec\u5360\u6574\u5f20\u5361\u3002
                        • GPU \u663e\u5b58\uff08nvidia.com/gpumem\uff09: \u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u663e\u5b58\uff0c\u503c\u5355\u4f4d\u4e3a MB\uff0c\u6700\u5c0f\u503c\u4e3a 1\uff0c\u6700\u5927\u503c\u4e3a\u6574\u5361\u7684\u663e\u5b58\u503c\u3002

                        \u5982\u679c\u4e0a\u8ff0\u503c\u914d\u7f6e\u7684\u6709\u95ee\u9898\u5219\u4f1a\u51fa\u73b0\u8c03\u5ea6\u5931\u8d25\uff0c\u8d44\u6e90\u5206\u914d\u4e0d\u4e86\u7684\u60c5\u51b5\u3002

                      "},{"location":"admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html#yaml-vgpu","title":"YAML \u914d\u7f6e\u4f7f\u7528 vGPU","text":"

                      \u53c2\u8003\u5982\u4e0b\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\uff0c\u5728\u8d44\u6e90\u7533\u8bf7\u548c\u9650\u5236\u914d\u7f6e\u4e2d\u589e\u52a0 nvidia.com/vgpu: '1' \u53c2\u6570\u6765\u914d\u7f6e\u5e94\u7528\u4f7f\u7528\u7269\u7406\u5361\u7684\u6570\u91cf\u3002

                      apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-vgpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-vgpu-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: full-vgpu-demo\n    spec:\n      containers:\n        - name: full-vgpu-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/gpucores: '20'   # \u7533\u8bf7\u6bcf\u5f20\u5361\u5360\u7528 20% \u7684 GPU \u7b97\u529b\n              nvidia.com/gpumem: '200'   # \u7533\u8bf7\u6bcf\u5f20\u5361\u5360\u7528 200MB \u7684\u663e\u5b58\n              nvidia.com/vgpu: '1'   # \u7533\u8bf7GPU\u7684\u6570\u91cf\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                      "},{"location":"admin/kpanda/gpu/volcano/drf.html","title":"DRF\uff08Dominant Resource Fairness\uff09 \u8c03\u5ea6\u7b56\u7565","text":"

                      DRF \u8c03\u5ea6\u7b56\u7565\u8ba4\u4e3a\u5360\u7528\u8d44\u6e90\u8f83\u5c11\u7684\u4efb\u52a1\u5177\u6709\u66f4\u9ad8\u7684\u4f18\u5148\u7ea7\u3002\u8fd9\u6837\u80fd\u591f\u6ee1\u8db3\u66f4\u591a\u7684\u4f5c\u4e1a\uff0c\u4e0d\u4f1a\u56e0\u4e3a\u4e00\u4e2a\u80d6\u4e1a\u52a1\uff0c \u997f\u6b7b\u5927\u6279\u5c0f\u4e1a\u52a1\u3002DRF \u8c03\u5ea6\u7b97\u6cd5\u80fd\u591f\u786e\u4fdd\u5728\u591a\u79cd\u7c7b\u578b\u8d44\u6e90\u5171\u5b58\u7684\u73af\u5883\u4e0b\uff0c\u5c3d\u53ef\u80fd\u6ee1\u8db3\u5206\u914d\u7684\u516c\u5e73\u539f\u5219\u3002

                      "},{"location":"admin/kpanda/gpu/volcano/drf.html#_1","title":"\u4f7f\u7528\u65b9\u5f0f","text":"

                      DRF \u8c03\u5ea6\u7b56\u7565\u9ed8\u8ba4\u5df2\u542f\u7528\uff0c\u65e0\u9700\u4efb\u4f55\u914d\u7f6e\u3002

                      kubectl -n volcano-system view configmaps volcano-scheduler-configmap\n
                      "},{"location":"admin/kpanda/gpu/volcano/drf.html#_2","title":"\u4f7f\u7528\u6848\u4f8b","text":"

                      \u5728 AI \u8bad\u7ec3\uff0c\u6216\u5927\u6570\u636e\u8ba1\u7b97\u4e2d\uff0c\u901a\u8fc7\u6709\u9650\u8fd0\u884c\u4f7f\u7528\u8d44\u6e90\u5c11\u7684\u4efb\u52a1\uff0c\u8fd9\u6837\u53ef\u4ee5\u8ba9\u96c6\u7fa4\u8d44\u6e90\u4f7f\u7528\u7387\u66f4\u9ad8\uff0c\u800c\u4e14\u8fd8\u80fd\u907f\u514d\u5c0f\u4efb\u52a1\u88ab\u997f\u6b7b\u3002 \u5982\u4e0b\u521b\u5efa\u4e24\u4e2a Job\uff0c\u4e00\u4e2a\u662f\u5c0f\u8d44\u6e90\u9700\u6c42\uff0c\u4e00\u4e2a\u662f\u5927\u8d44\u6e90\u9700\u6c42\uff0c\u53ef\u4ee5\u770b\u51fa\u6765\u5c0f\u8d44\u6e90\u9700\u6c42\u7684 Job \u4f18\u5148\u8fd0\u884c\u8d77\u6765\u3002

                      cat <<EOF | kubectl apply -f -  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: small-resource  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: small-resource  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"1\"  \n          restartPolicy: OnFailure  \n---  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: large-resource  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: large-resource  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"2\"  \n          restartPolicy: OnFailure  \nEOF\n
                      "},{"location":"admin/kpanda/gpu/volcano/numa.html","title":"NUMA \u4eb2\u548c\u6027\u8c03\u5ea6","text":"

                      NUMA \u8282\u70b9\u662f Non-Uniform Memory Access\uff08\u975e\u7edf\u4e00\u5185\u5b58\u8bbf\u95ee\uff09\u67b6\u6784\u4e2d\u7684\u4e00\u4e2a\u57fa\u672c\u7ec4\u6210\u5355\u5143\uff0c\u4e00\u4e2a Node \u8282\u70b9\u662f\u591a\u4e2a NUMA \u8282\u70b9\u7684\u96c6\u5408\uff0c \u5728\u591a\u4e2a NUMA \u8282\u70b9\u4e4b\u95f4\u8fdb\u884c\u5185\u5b58\u8bbf\u95ee\u65f6\u4f1a\u4ea7\u751f\u5ef6\u8fdf\uff0c\u5f00\u53d1\u8005\u53ef\u4ee5\u901a\u8fc7\u4f18\u5316\u4efb\u52a1\u8c03\u5ea6\u548c\u5185\u5b58\u5206\u914d\u7b56\u7565\uff0c\u6765\u63d0\u9ad8\u5185\u5b58\u8bbf\u95ee\u6548\u7387\u548c\u6574\u4f53\u6027\u80fd\u3002

                      "},{"location":"admin/kpanda/gpu/volcano/numa.html#_1","title":"\u4f7f\u7528\u573a\u666f","text":"

                      Numa \u4eb2\u548c\u6027\u8c03\u5ea6\u7684\u5e38\u89c1\u573a\u666f\u662f\u90a3\u4e9b\u5bf9 CPU \u53c2\u6570\u654f\u611f/\u8c03\u5ea6\u5ef6\u8fdf\u654f\u611f\u7684\u8ba1\u7b97\u5bc6\u96c6\u578b\u4f5c\u4e1a\u3002\u5982\u79d1\u5b66\u8ba1\u7b97\u3001\u89c6\u9891\u89e3\u7801\u3001\u52a8\u6f2b\u52a8\u753b\u6e32\u67d3\u3001\u5927\u6570\u636e\u79bb\u7ebf\u5904\u7406\u7b49\u5177\u4f53\u573a\u666f\u3002

                      "},{"location":"admin/kpanda/gpu/volcano/numa.html#_2","title":"\u8c03\u5ea6\u7b56\u7565","text":"

                      Pod \u8c03\u5ea6\u65f6\u53ef\u4ee5\u91c7\u7528\u7684 NUMA \u653e\u7f6e\u7b56\u7565\uff0c\u5177\u4f53\u7b56\u7565\u5bf9\u5e94\u7684\u8c03\u5ea6\u884c\u4e3a\u8bf7\u53c2\u89c1 Pod \u8c03\u5ea6\u884c\u4e3a\u8bf4\u660e\u3002

                      • single-numa-node\uff1aPod \u8c03\u5ea6\u65f6\u4f1a\u9009\u62e9\u62d3\u6251\u7ba1\u7406\u7b56\u7565\u5df2\u7ecf\u8bbe\u7f6e\u4e3a single-numa-node \u7684\u8282\u70b9\u6c60\u4e2d\u7684\u8282\u70b9\uff0c\u4e14 CPU \u9700\u8981\u653e\u7f6e\u5728\u76f8\u540c NUMA \u4e0b\uff0c\u5982\u679c\u8282\u70b9\u6c60\u4e2d\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u8282\u70b9\uff0cPod \u5c06\u65e0\u6cd5\u88ab\u8c03\u5ea6\u3002
                      • restricted\uff1aPod \u8c03\u5ea6\u65f6\u4f1a\u9009\u62e9\u62d3\u6251\u7ba1\u7406\u7b56\u7565\u5df2\u7ecf\u8bbe\u7f6e\u4e3a restricted \u8282\u70b9\u6c60\u7684\u8282\u70b9\uff0c\u4e14 CPU \u9700\u8981\u653e\u7f6e\u5728\u76f8\u540c\u7684 NUMA \u96c6\u5408\u4e0b\uff0c\u5982\u679c\u8282\u70b9\u6c60\u4e2d\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u8282\u70b9\uff0cPod \u5c06\u65e0\u6cd5\u88ab\u8c03\u5ea6\u3002
                      • best-effort\uff1aPod \u8c03\u5ea6\u65f6\u4f1a\u9009\u62e9\u62d3\u6251\u7ba1\u7406\u7b56\u7565\u5df2\u7ecf\u8bbe\u7f6e\u4e3a best-effort \u8282\u70b9\u6c60\u7684\u8282\u70b9\uff0c\u4e14\u5c3d\u91cf\u5c06 CPU \u653e\u7f6e\u5728\u76f8\u540c NUMA \u4e0b\uff0c\u5982\u679c\u6ca1\u6709\u8282\u70b9\u6ee1\u8db3\u8fd9\u4e00\u6761\u4ef6\uff0c\u5219\u9009\u62e9\u6700\u4f18\u8282\u70b9\u8fdb\u884c\u653e\u7f6e\u3002
                      "},{"location":"admin/kpanda/gpu/volcano/numa.html#_3","title":"\u8c03\u5ea6\u539f\u7406","text":"

                      \u5f53Pod\u8bbe\u7f6e\u4e86\u62d3\u6251\u7b56\u7565\u65f6\uff0cVolcano \u4f1a\u6839\u636e Pod \u8bbe\u7f6e\u7684\u62d3\u6251\u7b56\u7565\u9884\u6d4b\u5339\u914d\u7684\u8282\u70b9\u5217\u8868\u3002 \u8c03\u5ea6\u8fc7\u7a0b\u5982\u4e0b\uff1a

                      1. \u6839\u636e Pod \u8bbe\u7f6e\u7684 Volcano \u62d3\u6251\u7b56\u7565\uff0c\u7b5b\u9009\u5177\u6709\u76f8\u540c\u7b56\u7565\u7684\u8282\u70b9\u3002

                      2. \u5728\u8bbe\u7f6e\u4e86\u76f8\u540c\u7b56\u7565\u7684\u8282\u70b9\u4e2d\uff0c\u7b5b\u9009 CPU \u62d3\u6251\u6ee1\u8db3\u8be5\u7b56\u7565\u8981\u6c42\u7684\u8282\u70b9\u8fdb\u884c\u8c03\u5ea6\u3002

                      Pod \u53ef\u914d\u7f6e\u7684\u62d3\u6251\u7b56\u7565 1. \u6839\u636e Pod \u8bbe\u7f6e\u7684\u62d3\u6251\u7b56\u7565\uff0c\u7b5b\u9009\u53ef\u8c03\u5ea6\u7684\u8282\u70b9 2. \u8fdb\u4e00\u6b65\u7b5b\u9009 CPU \u62d3\u6251\u6ee1\u8db3\u7b56\u7565\u7684\u8282\u70b9\u8fdb\u884c\u8c03\u5ea6 none \u9488\u5bf9\u914d\u7f6e\u4e86\u4ee5\u4e0b\u51e0\u79cd\u62d3\u6251\u7b56\u7565\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u65f6\u5747\u65e0\u7b5b\u9009\u884c\u4e3a\u3002none\uff1a\u53ef\u8c03\u5ea6\uff1bbest-effort\uff1a\u53ef\u8c03\u5ea6\uff1brestricted\uff1a\u53ef\u8c03\u5ea6\uff1bsingle-numa-node\uff1a\u53ef\u8c03\u5ea6 - best-effort \u7b5b\u9009\u62d3\u6251\u7b56\u7565\u540c\u6837\u4e3a\u201cbest-effort\u201d\u7684\u8282\u70b9\uff1anone\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bbest-effort\uff1a\u53ef\u8c03\u5ea6\uff1brestricted\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bsingle-numa-node\uff1a\u4e0d\u53ef\u8c03\u5ea6 \u5c3d\u53ef\u80fd\u6ee1\u8db3\u7b56\u7565\u8981\u6c42\u8fdb\u884c\u8c03\u5ea6\uff1a\u4f18\u5148\u8c03\u5ea6\u81f3\u5355 NUMA \u8282\u70b9\uff0c\u5982\u679c\u5355 NUMA \u8282\u70b9\u65e0\u6cd5\u6ee1\u8db3 CPU \u7533\u8bf7\u503c\uff0c\u5141\u8bb8\u8c03\u5ea6\u81f3\u591a\u4e2a NUMA \u8282\u70b9\u3002 restricted \u7b5b\u9009\u62d3\u6251\u7b56\u7565\u540c\u6837\u4e3a\u201crestricted\u201d\u7684\u8282\u70b9\uff1anone\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bbest-effort\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1brestricted\uff1a\u53ef\u8c03\u5ea6\uff1bsingle-numa-node\uff1a\u4e0d\u53ef\u8c03\u5ea6 \u4e25\u683c\u9650\u5236\u7684\u8c03\u5ea6\u7b56\u7565\uff1a\u5355 NUMA \u8282\u70b9\u7684CPU\u5bb9\u91cf\u4e0a\u9650\u5927\u4e8e\u7b49\u4e8e CPU \u7684\u7533\u8bf7\u503c\u65f6\uff0c\u4ec5\u5141\u8bb8\u8c03\u5ea6\u81f3\u5355 NUMA \u8282\u70b9\u3002\u6b64\u65f6\u5982\u679c\u5355 NUMA \u8282\u70b9\u5269\u4f59\u7684 CPU \u53ef\u4f7f\u7528\u91cf\u4e0d\u8db3\uff0c\u5219 Pod \u65e0\u6cd5\u8c03\u5ea6\u3002\u5355 NUMA \u8282\u70b9\u7684 CPU \u5bb9\u91cf\u4e0a\u9650\u5c0f\u4e8e CPU \u7684\u7533\u8bf7\u503c\u65f6\uff0c\u53ef\u5141\u8bb8\u8c03\u5ea6\u81f3\u591a\u4e2a NUMA \u8282\u70b9\u3002 single-numa-node \u7b5b\u9009\u62d3\u6251\u7b56\u7565\u540c\u6837\u4e3a\u201csingle-numa-node\u201d\u7684\u8282\u70b9\uff1anone\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bbest-effort\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1brestricted\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bsingle-numa-node\uff1a\u53ef\u8c03\u5ea6 \u4ec5\u5141\u8bb8\u8c03\u5ea6\u81f3\u5355 NUMA \u8282\u70b9\u3002"},{"location":"admin/kpanda/gpu/volcano/numa.html#numa_1","title":"\u914d\u7f6e NUMA \u4eb2\u548c\u8c03\u5ea6\u7b56\u7565","text":"
                      1. \u5728 Job \u4e2d\u914d\u7f6e policies

                        task: \n  - replicas: 1 \n    name: \"test-1\" \n    topologyPolicy: single-numa-node \n  - replicas: 1 \n    name: \"test-2\" \n    topologyPolicy: best-effort \n
                      2. \u4fee\u6539 kubelet \u7684\u8c03\u5ea6\u7b56\u7565\uff0c\u8bbe\u7f6e --topology-manager-policy \u53c2\u6570\uff0c\u652f\u6301\u7684\u7b56\u7565\u6709\u56db\u79cd\uff1a

                        • none\uff08\u9ed8\u8ba4\uff09
                        • best-effort
                        • restricted
                        • single-numa-node
                      "},{"location":"admin/kpanda/gpu/volcano/numa.html#_4","title":"\u4f7f\u7528\u6848\u4f8b","text":"
                      1. \u793a\u4f8b\u4e00\uff1a\u5728\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u914d\u7f6e NUMA \u4eb2\u548c\u6027\u3002

                        kind: Deployment  \napiVersion: apps/v1  \nmetadata:  \n  name: numa-tset  \nspec:  \n  replicas: 1  \n  selector:  \n    matchLabels:  \n      app: numa-tset  \n  template:  \n    metadata:  \n      labels:  \n        app: numa-tset  \n      annotations:  \n        volcano.sh/numa-topology-policy: single-numa-node    # set the topology policy  \n    spec:  \n      containers:  \n        - name: container-1  \n          image: nginx:alpine  \n          resources:  \n            requests:  \n              cpu: 2           # \u5fc5\u987b\u4e3a\u6574\u6570\uff0c\u4e14\u9700\u8981\u4e0elimits\u4e2d\u4e00\u81f4  \n              memory: 2048Mi  \n            limits:  \n              cpu: 2           # \u5fc5\u987b\u4e3a\u6574\u6570\uff0c\u4e14\u9700\u8981\u4e0erequests\u4e2d\u4e00\u81f4  \n              memory: 2048Mi  \n      imagePullSecrets:  \n      - name: default-secret\n
                      2. \u793a\u4f8b\u4e8c\uff1a\u521b\u5efa\u4e00\u4e2a Volcano Job\uff0c\u5e76\u4f7f\u7528 NUMA \u4eb2\u548c\u6027\u3002

                        apiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: vj-test  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 1  \n  tasks:  \n    - replicas: 1  \n      name: \"test\"  \n      topologyPolicy: best-effort   # set the topology policy for task  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                limits:  \n                  cpu: 20  \n                  memory: \"100Mi\"  \n          restartPolicy: OnFailure\n
                      "},{"location":"admin/kpanda/gpu/volcano/numa.html#numa_2","title":"NUMA \u8c03\u5ea6\u5206\u6790","text":"

                      \u5047\u8bbe NUMA \u8282\u70b9\u60c5\u51b5\u5982\u4e0b\uff1a

                      \u5de5\u4f5c\u8282\u70b9 \u8282\u70b9\u7b56\u7565\u62d3\u6251\u7ba1\u7406\u5668\u7b56\u7565 NUMA \u8282\u70b9 0 \u4e0a\u7684\u53ef\u5206\u914d CPU NUMA \u8282\u70b9 1 \u4e0a\u7684\u53ef\u5206\u914d CPU node-1 single-numa-node 16U 16U node-2 best-effort 16U 16U node-3 best-effort 20U 20U
                      • \u793a\u4f8b\u4e00\u4e2d\uff0cPod \u7684 CPU \u7533\u8bf7\u503c\u4e3a 2U\uff0c\u8bbe\u7f6e\u62d3\u6251\u7b56\u7565\u4e3a\u201csingle-numa-node\u201d\uff0c\u56e0\u6b64\u4f1a\u88ab\u8c03\u5ea6\u5230\u76f8\u540c\u7b56\u7565\u7684 node-1\u3002
                      • \u793a\u4f8b\u4e8c\u4e2d\uff0cPod \u7684 CPU \u7533\u8bf7\u503c\u4e3a20U\uff0c\u8bbe\u7f6e\u62d3\u6251\u7b56\u7565\u4e3a\u201cbest-effort\u201d\uff0c\u5b83\u5c06\u88ab\u8c03\u5ea6\u5230 node-3\uff0c \u56e0\u4e3a node-3 \u53ef\u4ee5\u5728\u5355\u4e2a NUMA \u8282\u70b9\u4e0a\u5206\u914d Pod \u7684 CPU \u8bf7\u6c42\uff0c\u800c node-2 \u9700\u8981\u5728\u4e24\u4e2a NUMA \u8282\u70b9\u4e0a\u6267\u884c\u6b64\u64cd\u4f5c\u3002
                      "},{"location":"admin/kpanda/gpu/volcano/numa.html#cpu","title":"\u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684 CPU \u6982\u51b5","text":"

                      \u60a8\u53ef\u4ee5\u901a\u8fc7 lscpu \u547d\u4ee4\u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684 CPU \u6982\u51b5\uff1a

                      lscpu \n... \nCPU(s): 32 \nNUMA node(s): 2 \nNUMA node0 CPU(s): 0-15 \nNUMA node1 CPU(s): 16-31\n
                      "},{"location":"admin/kpanda/gpu/volcano/numa.html#cpu_1","title":"\u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684 CPU \u5206\u914d","text":"

                      \u7136\u540e\u67e5\u770b NUMA \u8282\u70b9\u4f7f\u7528\u60c5\u51b5\uff1a

                      # \u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684 CPU \u5206\u914d\ncat /var/lib/kubelet/cpu_manager_state\n{\"policyName\":\"static\",\"defaultCpuSet\":\"0,10-15,25-31\",\"entries\":{\"777870b5-c64f-42f5-9296-688b9dc212ba\":{\"container-1\":\"16-24\"},\"fb15e10a-b6a5-4aaa-8fcd-76c1aa64e6fd\":{\"container-1\":\"1-9\"}},\"checksum\":318470969}\n

                      \u4ee5\u4e0a\u793a\u4f8b\u4e2d\u8868\u793a\uff0c\u8282\u70b9\u4e0a\u8fd0\u884c\u4e86\u4e24\u4e2a\u5bb9\u5668\uff0c\u4e00\u4e2a\u5360\u7528\u4e86 NUMA node0 \u76841-9 \u6838\uff0c\u53e6\u4e00\u4e2a\u5360\u7528\u4e86 NUMA node1 \u7684 16-24 \u6838\u3002

                      "},{"location":"admin/kpanda/gpu/volcano/volcano-gang-scheduler.html","title":"\u4f7f\u7528 Volcano \u7684 Gang Scheduler","text":"

                      Gang \u8c03\u5ea6\u7b56\u7565\u662f volcano-scheduler \u7684\u6838\u5fc3\u8c03\u5ea6\u7b97\u6cd5\u4e4b\u4e00\uff0c\u5b83\u6ee1\u8db3\u4e86\u8c03\u5ea6\u8fc7\u7a0b\u4e2d\u7684 \u201cAll or nothing\u201d \u7684\u8c03\u5ea6\u9700\u6c42\uff0c \u907f\u514d Pod \u7684\u4efb\u610f\u8c03\u5ea6\u5bfc\u81f4\u96c6\u7fa4\u8d44\u6e90\u7684\u6d6a\u8d39\u3002\u5177\u4f53\u7b97\u6cd5\u662f\uff0c\u89c2\u5bdf Job \u4e0b\u7684 Pod \u5df2\u8c03\u5ea6\u6570\u91cf\u662f\u5426\u6ee1\u8db3\u4e86\u6700\u5c0f\u8fd0\u884c\u6570\u91cf\uff0c \u5f53 Job \u7684\u6700\u5c0f\u8fd0\u884c\u6570\u91cf\u5f97\u5230\u6ee1\u8db3\u65f6\uff0c\u4e3a Job \u4e0b\u7684\u6240\u6709 Pod \u6267\u884c\u8c03\u5ea6\u52a8\u4f5c\uff0c\u5426\u5219\uff0c\u4e0d\u6267\u884c\u3002

                      "},{"location":"admin/kpanda/gpu/volcano/volcano-gang-scheduler.html#_1","title":"\u4f7f\u7528\u573a\u666f","text":"

                      \u57fa\u4e8e\u5bb9\u5668\u7ec4\u6982\u5ff5\u7684 Gang \u8c03\u5ea6\u7b97\u6cd5\u5341\u5206\u9002\u5408\u9700\u8981\u591a\u8fdb\u7a0b\u534f\u4f5c\u7684\u573a\u666f\u3002AI \u573a\u666f\u5f80\u5f80\u5305\u542b\u590d\u6742\u7684\u6d41\u7a0b\uff0c Data Ingestion\u3001Data Analysts\u3001Data Splitting\u3001Trainer\u3001Serving\u3001Logging \u7b49\uff0c \u9700\u8981\u4e00\u7ec4\u5bb9\u5668\u8fdb\u884c\u534f\u540c\u5de5\u4f5c\uff0c\u5c31\u5f88\u9002\u5408\u57fa\u4e8e\u5bb9\u5668\u7ec4\u7684 Gang \u8c03\u5ea6\u7b56\u7565\u3002 MPI \u8ba1\u7b97\u6846\u67b6\u4e0b\u7684\u591a\u7ebf\u7a0b\u5e76\u884c\u8ba1\u7b97\u901a\u4fe1\u573a\u666f\uff0c\u7531\u4e8e\u9700\u8981\u4e3b\u4ece\u8fdb\u7a0b\u534f\u540c\u5de5\u4f5c\uff0c\u4e5f\u975e\u5e38\u9002\u5408\u4f7f\u7528 Gang \u8c03\u5ea6\u7b56\u7565\u3002 \u5bb9\u5668\u7ec4\u4e0b\u7684\u5bb9\u5668\u9ad8\u5ea6\u76f8\u5173\u4e5f\u53ef\u80fd\u5b58\u5728\u8d44\u6e90\u4e89\u62a2\uff0c\u6574\u4f53\u8c03\u5ea6\u5206\u914d\uff0c\u80fd\u591f\u6709\u6548\u89e3\u51b3\u6b7b\u9501\u3002

                      \u5728\u96c6\u7fa4\u8d44\u6e90\u4e0d\u8db3\u7684\u573a\u666f\u4e0b\uff0cGang \u7684\u8c03\u5ea6\u7b56\u7565\u5bf9\u4e8e\u96c6\u7fa4\u8d44\u6e90\u7684\u5229\u7528\u7387\u7684\u63d0\u5347\u662f\u975e\u5e38\u660e\u663e\u7684\u3002 \u6bd4\u5982\u96c6\u7fa4\u73b0\u5728\u53ea\u80fd\u5bb9\u7eb3 2 \u4e2a Pod\uff0c\u73b0\u5728\u8981\u6c42\u6700\u5c0f\u8c03\u5ea6\u7684 Pod \u6570\u4e3a 3\u3002 \u90a3\u73b0\u5728\u8fd9\u4e2a Job \u7684\u6240\u6709\u7684 Pod \u90fd\u4f1a pending\uff0c\u76f4\u5230\u96c6\u7fa4\u80fd\u591f\u5bb9\u7eb3 3 \u4e2a Pod\uff0cPod \u624d\u4f1a\u88ab\u8c03\u5ea6\u3002 \u6709\u6548\u9632\u6b62\u8c03\u5ea6\u90e8\u5206 Pod\uff0c\u4e0d\u6ee1\u8db3\u8981\u6c42\u53c8\u5360\u7528\u4e86\u8d44\u6e90\uff0c\u4f7f\u5176\u4ed6 Job \u65e0\u6cd5\u8fd0\u884c\u7684\u60c5\u51b5\u3002

                      "},{"location":"admin/kpanda/gpu/volcano/volcano-gang-scheduler.html#_2","title":"\u6982\u5ff5\u8bf4\u660e","text":"

                      Gang Scheduler \u662f Volcano \u7684\u6838\u5fc3\u7684\u8c03\u5ea6\u63d2\u4ef6\uff0c\u5b89\u88c5 Volcano \u540e\u9ed8\u8ba4\u5c31\u5f00\u542f\u4e86\u3002 \u5728\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\u53ea\u9700\u8981\u6307\u5b9a\u8c03\u5ea6\u5668\u7684\u540d\u79f0\u4e3a Volcano \u5373\u53ef\u3002

                      Volcano \u662f\u4ee5 PodGroup \u4e3a\u5355\u4f4d\u8fdb\u884c\u8c03\u5ea6\u7684\uff0c\u5728\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u5e76\u4e0d\u9700\u8981\u624b\u52a8\u521b\u5efa PodGroup \u8d44\u6e90\uff0c Volcano \u4f1a\u6839\u636e\u5de5\u4f5c\u8d1f\u8f7d\u7684\u4fe1\u606f\u81ea\u52a8\u521b\u5efa\u3002\u4e0b\u9762\u662f\u4e00\u4e2a PodGroup \u7684\u793a\u4f8b\uff1a

                      apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  name: test\n  namespace: default\nspec:\n  minMember: 1  # (1)!\n  minResources:  # (2)!\n    cpu: \"3\"\n    memory: \"2048Mi\"\n  priorityClassName: high-prority # (3)!\n  queue: default # (4)!\n
                      1. \u8868\u793a\u8be5 PodGroup \u4e0b \u6700\u5c11 \u9700\u8981\u8fd0\u884c\u7684 Pod \u6216\u4efb\u52a1\u6570\u91cf\u3002 \u5982\u679c\u96c6\u7fa4\u8d44\u6e90\u4e0d\u6ee1\u8db3 miniMember \u6570\u91cf\u4efb\u52a1\u7684\u8fd0\u884c\u9700\u6c42\uff0c\u8c03\u5ea6\u5668\u5c06\u4e0d\u4f1a\u8c03\u5ea6\u4efb\u4f55\u4e00\u4e2a\u8be5 PodGroup \u5185\u7684\u4efb\u52a1\u3002
                      2. \u8868\u793a\u8fd0\u884c\u8be5 PodGroup \u6240\u9700\u8981\u7684\u6700\u5c11\u8d44\u6e90\u3002\u5f53\u96c6\u7fa4\u53ef\u5206\u914d\u8d44\u6e90\u4e0d\u6ee1\u8db3 minResources \u65f6\uff0c\u8c03\u5ea6\u5668\u5c06\u4e0d\u4f1a\u8c03\u5ea6\u4efb\u4f55\u4e00\u4e2a\u8be5 PodGroup \u5185\u7684\u4efb\u52a1\u3002
                      3. \u8868\u793a\u8be5 PodGroup \u7684\u4f18\u5148\u7ea7\uff0c\u7528\u4e8e\u8c03\u5ea6\u5668\u4e3a\u8be5 queue \u4e2d\u6240\u6709 PodGroup \u8fdb\u884c\u8c03\u5ea6\u65f6\u8fdb\u884c\u6392\u5e8f\u3002 system-node-critical \u548c system-cluster-critical \u662f 2 \u4e2a\u9884\u7559\u7684\u503c\uff0c\u8868\u793a\u6700\u9ad8\u4f18\u5148\u7ea7\u3002\u4e0d\u7279\u522b\u6307\u5b9a\u65f6\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u4f18\u5148\u7ea7\u6216 zero \u4f18\u5148\u7ea7\u3002
                      4. \u8868\u793a\u8be5 PodGroup \u6240\u5c5e\u7684 queue\u3002queue \u5fc5\u987b\u63d0\u524d\u5df2\u521b\u5efa\u4e14\u72b6\u6001\u4e3a open\u3002
                      "},{"location":"admin/kpanda/gpu/volcano/volcano-gang-scheduler.html#_3","title":"\u4f7f\u7528\u6848\u4f8b","text":"

                      \u5728 MPI \u8ba1\u7b97\u6846\u67b6\u4e0b\u7684\u591a\u7ebf\u7a0b\u5e76\u884c\u8ba1\u7b97\u901a\u4fe1\u573a\u666f\u4e2d\uff0c\u6211\u4eec\u8981\u786e\u4fdd\u6240\u6709\u7684 Pod \u90fd\u80fd\u8c03\u5ea6\u6210\u529f\u624d\u80fd\u4fdd\u8bc1\u4efb\u52a1\u6b63\u5e38\u5b8c\u6210\u3002 \u8bbe\u7f6e minAvailable \u4e3a 4\uff0c\u8868\u793a\u8981\u6c42 1 \u4e2a mpimaster \u548c 3 \u4e2a mpiworker \u80fd\u8fd0\u884c\u3002

                      apiVersion: batch.volcano.sh/v1alpha1\nkind: Job\nmetadata:\n  name: lm-mpi-job\n  labels:\n    \"volcano.sh/job-type\": \"MPI\"\nspec:\n  minAvailable: 4\n  schedulerName: volcano\n  plugins:\n    ssh: []\n    svc: []\n  policies:\n    - event: PodEvicted\n      action: RestartJob\n  tasks:\n    - replicas: 1\n      name: mpimaster\n      policies:\n        - event: TaskCompleted\n          action: CompleteJob\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  MPI_HOST=`cat /etc/volcano/mpiworker.host | tr \"\\n\" \",\"`;\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd;\n                  mpiexec --allow-run-as-root --host ${MPI_HOST} -np 3 mpi_hello_world;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpimaster\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"500m\"\n                limits:\n                  cpu: \"500m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n    - replicas: 3\n      name: mpiworker\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd -D;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpiworker\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"1000m\"\n                limits:\n                  cpu: \"1000m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n

                      \u751f\u6210 PodGroup \u7684\u8d44\u6e90\uff1a

                      apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  annotations:\n  creationTimestamp: \"2024-05-28T09:18:50Z\"\n  generation: 5\n  labels:\n    volcano.sh/job-type: MPI\n  name: lm-mpi-job-9c571015-37c7-4a1a-9604-eaa2248613f2\n  namespace: default\n  ownerReferences:\n  - apiVersion: batch.volcano.sh/v1alpha1\n    blockOwnerDeletion: true\n    controller: true\n    kind: Job\n    name: lm-mpi-job\n    uid: 9c571015-37c7-4a1a-9604-eaa2248613f2\n  resourceVersion: \"25173454\"\n  uid: 7b04632e-7cff-4884-8e9a-035b7649d33b\nspec:\n  minMember: 4\n  minResources:\n    count/pods: \"4\"\n    cpu: 3500m\n    limits.cpu: 3500m\n    pods: \"4\"\n    requests.cpu: 3500m\n  minTaskMember:\n    mpimaster: 1\n    mpiworker: 3\n  queue: default\nstatus:\n  conditions:\n  - lastTransitionTime: \"2024-05-28T09:19:01Z\"\n    message: '3/4 tasks in gang unschedulable: pod group is not ready, 1 Succeeded,\n      3 Releasing, 4 minAvailable'\n    reason: NotEnoughResources\n    status: \"True\"\n    transitionID: f875efa5-0358-4363-9300-06cebc0e7466\n    type: Unschedulable\n  - lastTransitionTime: \"2024-05-28T09:18:53Z\"\n    reason: tasks in gang are ready to be scheduled\n    status: \"True\"\n    transitionID: 5a7708c8-7d42-4c33-9d97-0581f7c06dab\n    type: Scheduled\n  phase: Pending\n  succeeded: 1\n

                      \u4ece PodGroup \u53ef\u4ee5\u770b\u51fa\uff0c\u901a\u8fc7 ownerReferences \u5173\u8054\u5230\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5e76\u8bbe\u7f6e\u6700\u5c0f\u8fd0\u884c\u7684 Pod \u6570\u4e3a 4\u3002

                      "},{"location":"admin/kpanda/gpu/volcano/volcano_binpack.html","title":"\u4f7f\u7528 Volcano Binpack \u8c03\u5ea6\u7b56\u7565","text":"

                      Binpack \u8c03\u5ea6\u7b97\u6cd5\u7684\u76ee\u6807\u662f\u5c3d\u91cf\u628a\u5df2\u88ab\u5360\u7528\u7684\u8282\u70b9\u586b\u6ee1\uff08\u5c3d\u91cf\u4e0d\u5f80\u7a7a\u767d\u8282\u70b9\u5206\u914d\uff09\u3002\u5177\u4f53\u5b9e\u73b0\u4e0a\uff0cBinpack \u8c03\u5ea6\u7b97\u6cd5\u4f1a\u7ed9\u6295\u9012\u7684\u8282\u70b9\u6253\u5206\uff0c \u5206\u6570\u8d8a\u9ad8\u8868\u793a\u8282\u70b9\u7684\u8d44\u6e90\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u901a\u8fc7\u5c3d\u53ef\u80fd\u586b\u6ee1\u8282\u70b9\uff0c\u5c06\u5e94\u7528\u8d1f\u8f7d\u9760\u62e2\u5728\u90e8\u5206\u8282\u70b9\uff0c\u8fd9\u79cd\u8c03\u5ea6\u7b97\u6cd5\u80fd\u591f\u5c3d\u53ef\u80fd\u51cf\u5c0f\u8282\u70b9\u5185\u7684\u788e\u7247\uff0c \u5728\u7a7a\u95f2\u7684\u673a\u5668\u4e0a\u4e3a\u7533\u8bf7\u4e86\u66f4\u5927\u8d44\u6e90\u8bf7\u6c42\u7684 Pod \u9884\u7559\u8db3\u591f\u7684\u8d44\u6e90\u7a7a\u95f4\uff0c\u4f7f\u96c6\u7fa4\u4e0b\u7a7a\u95f2\u8d44\u6e90\u5f97\u5230\u6700\u5927\u5316\u7684\u5229\u7528\u3002

                      "},{"location":"admin/kpanda/gpu/volcano/volcano_binpack.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"

                      \u9884\u5148\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e0a\u5b89\u88c5 Volcano \u7ec4\u4ef6\u3002

                      "},{"location":"admin/kpanda/gpu/volcano/volcano_binpack.html#binpack","title":"Binpack \u7b97\u6cd5\u539f\u7406","text":"

                      Binpack \u5728\u5bf9\u4e00\u4e2a\u8282\u70b9\u6253\u5206\u65f6\uff0c\u4f1a\u6839\u636e Binpack \u63d2\u4ef6\u81ea\u8eab\u6743\u91cd\u548c\u5404\u8d44\u6e90\u8bbe\u7f6e\u7684\u6743\u91cd\u503c\u7efc\u5408\u6253\u5206\u3002 \u9996\u5148\uff0c\u5bf9 Pod \u8bf7\u6c42\u8d44\u6e90\u4e2d\u7684\u6bcf\u7c7b\u8d44\u6e90\u4f9d\u6b21\u6253\u5206\uff0c\u4ee5 CPU \u4e3a\u4f8b\uff0cCPU \u8d44\u6e90\u5728\u5f85\u8c03\u5ea6\u8282\u70b9\u7684\u5f97\u5206\u4fe1\u606f\u5982\u4e0b\uff1a

                      CPU.weight * (request + used) / allocatable\n

                      \u5373 CPU \u6743\u91cd\u503c\u8d8a\u9ad8\uff0c\u5f97\u5206\u8d8a\u9ad8\uff0c\u8282\u70b9\u8d44\u6e90\u4f7f\u7528\u91cf\u8d8a\u6ee1\uff0c\u5f97\u5206\u8d8a\u9ad8\u3002Memory\u3001GPU \u7b49\u8d44\u6e90\u539f\u7406\u7c7b\u4f3c\u3002\u5176\u4e2d\uff1a

                      • CPU.weight \u4e3a\u7528\u6237\u8bbe\u7f6e\u7684 CPU \u6743\u91cd
                      • request \u4e3a\u5f53\u524d Pod \u8bf7\u6c42\u7684 CPU \u8d44\u6e90\u91cf
                      • used \u4e3a\u5f53\u524d\u8282\u70b9\u5df2\u7ecf\u5206\u914d\u4f7f\u7528\u7684 CPU \u91cf
                      • allocatable \u4e3a\u5f53\u524d\u8282\u70b9 CPU \u53ef\u7528\u603b\u91cf

                      \u901a\u8fc7 Binpack \u7b56\u7565\u7684\u8282\u70b9\u603b\u5f97\u5206\u5982\u4e0b\uff1a

                      binpack.weight - (CPU.score + Memory.score + GPU.score) / (CPU.weight + Memory.weight + GPU.weight) - 100\n

                      \u5373 Binpack \u63d2\u4ef6\u7684\u6743\u91cd\u503c\u8d8a\u5927\uff0c\u5f97\u5206\u8d8a\u9ad8\uff0c\u67d0\u7c7b\u8d44\u6e90\u7684\u6743\u91cd\u8d8a\u5927\uff0c\u8be5\u8d44\u6e90\u5728\u6253\u5206\u65f6\u7684\u5360\u6bd4\u8d8a\u5927\u3002\u5176\u4e2d\uff1a

                      • binpack.weight \u4e3a\u7528\u6237\u8bbe\u7f6e\u7684\u88c5\u7bb1\u8c03\u5ea6\u7b56\u7565\u6743\u91cd
                      • CPU.score \u4e3a CPU \u8d44\u6e90\u5f97\u5206\uff0cCPU.weight \u4e3a CPU \u6743\u91cd
                      • Memory.score \u4e3a Memory \u8d44\u6e90\u5f97\u5206\uff0cMemory.weight \u4e3a Memory \u6743\u91cd
                      • GPU.score \u4e3a GPU \u8d44\u6e90\u5f97\u5206\uff0cGPU.weight \u4e3a GPU \u6743\u91cd

                      \u5982\u56fe\u6240\u793a\uff0c\u96c6\u7fa4\u4e2d\u5b58\u5728\u4e24\u4e2a\u8282\u70b9\uff0c\u5206\u522b\u4e3a Node1 \u548c Node 2\uff0c\u5728\u8c03\u5ea6 Pod \u65f6\uff0cBinpack \u7b56\u7565\u5bf9\u4e24\u4e2a\u8282\u70b9\u5206\u522b\u6253\u5206\u3002 \u5047\u8bbe\u96c6\u7fa4\u4e2d CPU.weight \u914d\u7f6e\u4e3a 1\uff0cMemory.weight \u914d\u7f6e\u4e3a 1\uff0cGPU.weight \u914d\u7f6e\u4e3a 2\uff0cbinpack.weight \u914d\u7f6e\u4e3a 5\u3002

                      1. Binpack \u5bf9 Node 1 \u7684\u8d44\u6e90\u6253\u5206\uff0c\u5404\u8d44\u6e90\u7684\u8ba1\u7b97\u516c\u5f0f\u4e3a\uff1a

                        • CPU Score\uff1a

                          CPU.weight - (request + used) / allocatable = 1 - (2 + 4) / 8 = 0.75

                        • Memory Score\uff1a

                          Memory.weight - (request + used) / allocatable = 1 - (4 + 8) / 16 = 0.75

                        • GPU Score\uff1a

                          GPU.weight - (request + used) / allocatable = 2 - (4 + 4) / 8 = 2

                      2. \u8282\u70b9\u603b\u5f97\u5206\u7684\u8ba1\u7b97\u516c\u5f0f\u4e3a\uff1a

                        binpack.weight - (CPU.score + Memory.score + GPU.score) / (CPU.weight + Memory.weight + GPU.weight) - 100\n

                        \u5047\u8bbe binpack.weight \u914d\u7f6e\u4e3a 5\uff0cNode 1 \u5728 Binpack \u7b56\u7565\u4e0b\u7684\u5f97\u5206\u4e3a\uff1a

                        5 - (0.75 + 0.75 + 2) / (1 + 1 + 2) - 100 = 437.5\n
                      3. Binpack \u5bf9 Node 2 \u7684\u8d44\u6e90\u6253\u5206\uff1a

                        • CPU Score\uff1a

                          CPU.weight - (request + used) / allocatable = 1 - (2 + 6) / 8 = 1

                        • Memory Score\uff1a

                          Memory.weight - (request + used) / allocatable = 1 - (4 + 8) / 16 = 0.75

                        • GPU Score\uff1a

                          GPU.weight - (request + used) / allocatable = 2 - (4 + 4) / 8 = 2

                      4. Node 2 \u5728 Binpack \u7b56\u7565\u4e0b\u7684\u5f97\u5206\u4e3a\uff1a

                        5 - (1 + 0.75 + 2) / (1 + 1 + 2) - 100 = 468.75\n

                      \u7efc\u4e0a\uff0cNode 2 \u5f97\u5206\u5927\u4e8e Node 1\uff0c\u6309\u7167 Binpack \u7b56\u7565\uff0cPod \u5c06\u4f1a\u4f18\u5148\u8c03\u5ea6\u81f3 Node 2\u3002

                      "},{"location":"admin/kpanda/gpu/volcano/volcano_binpack.html#_2","title":"\u4f7f\u7528\u6848\u4f8b","text":"

                      Binpack \u8c03\u5ea6\u63d2\u4ef6\u5728\u5b89\u88c5 Volcano \u7684\u65f6\u5019\u9ed8\u8ba4\u5c31\u4f1a\u5f00\u542f\uff1b\u5982\u679c\u7528\u6237\u6ca1\u6709\u914d\u7f6e\u6743\u91cd\uff0c\u5219\u4f7f\u7528\u5982\u4e0b\u9ed8\u8ba4\u7684\u914d\u7f6e\u6743\u91cd\u3002

                      - plugins:\n    - name: binpack\n      arguments:\n        binpack.weight: 1\n        binpack.cpu: 1\n        binpack.memory: 1\n

                      \u9ed8\u8ba4\u6743\u91cd\u4e0d\u80fd\u4f53\u73b0\u5806\u53e0\u7279\u6027\uff0c\u56e0\u6b64\u9700\u8981\u4fee\u6539\u4e3a binpack.weight: 10\u3002

                      kubectl -n volcano-system edit configmaps volcano-scheduler-configmap\n
                      - plugins:\n    - name: binpack\n      arguments:\n        binpack.weight: 10\n        binpack.cpu: 1\n        binpack.memory: 1\n        binpack.resources: nvidia.com/gpu, example.com/foo\n        binpack.resources.nvidia.com/gpu: 2\n        binpack.resources.example.com/foo: 3\n

                      \u6539\u597d\u4e4b\u540e\u91cd\u542f volcano-scheduler Pod \u4f7f\u5176\u751f\u6548\u3002

                      \u521b\u5efa\u5982\u4e0b\u7684 Deployment\u3002

                      apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: binpack-test\n  labels:\n    app: binpack-test\nspec:\n  replicas: 2\n  selector:\n    matchLabels:\n      app: test\n  template:\n    metadata:\n      labels:\n        app: test\n    spec:\n      schedulerName: volcano\n      containers:\n        - name: test\n          image: busybox\n          imagePullPolicy: IfNotPresent\n          command: [\"sh\", \"-c\", 'echo \"Hello, Kubernetes!\" && sleep 3600']\n          resources:\n            requests:\n              cpu: 500m\n            limits:\n              cpu: 500m\n

                      \u5728\u4e24\u4e2a Node \u7684\u96c6\u7fa4\u4e0a\u53ef\u4ee5\u770b\u5230 Pod \u88ab\u8c03\u5ea6\u5230\u4e00\u4e2a Node \u4e0a\u3002

                      "},{"location":"admin/kpanda/gpu/volcano/volcano_priority.html","title":"\u4f18\u5148\u7ea7\u62a2\u5360\uff08Preemption scheduling\uff09\u7b56\u7565","text":"

                      Volcano \u901a\u8fc7 Priority \u63d2\u4ef6\u5b9e\u73b0\u4e86\u4f18\u5148\u7ea7\u62a2\u5360\u7b56\u7565\uff0c\u5373 Preemption scheduling \u7b56\u7565\u3002\u5728\u96c6\u7fa4\u8d44\u6e90\u6709\u9650\u4e14\u591a\u4e2a Job \u7b49\u5f85\u8c03\u5ea6\u65f6\uff0c \u5982\u679c\u4f7f\u7528 Kubernetes \u9ed8\u8ba4\u8c03\u5ea6\u5668\uff0c\u53ef\u80fd\u4f1a\u5bfc\u81f4\u5177\u6709\u66f4\u591a Pod \u6570\u91cf\u7684 Job \u5206\u5f97\u66f4\u591a\u8d44\u6e90\u3002\u800c Volcano-scheduler \u63d0\u4f9b\u4e86\u7b97\u6cd5\uff0c\u652f\u6301\u4e0d\u540c\u7684 Job \u4ee5 fair-share \u7684\u5f62\u5f0f\u5171\u4eab\u96c6\u7fa4\u8d44\u6e90\u3002

                      Priority \u63d2\u4ef6\u5141\u8bb8\u7528\u6237\u81ea\u5b9a\u4e49 Job \u548c Task \u7684\u4f18\u5148\u7ea7\uff0c\u5e76\u6839\u636e\u9700\u6c42\u5728\u4e0d\u540c\u5c42\u6b21\u4e0a\u5b9a\u5236\u8c03\u5ea6\u7b56\u7565\u3002 \u4f8b\u5982\uff0c\u5bf9\u4e8e\u91d1\u878d\u573a\u666f\u3001\u7269\u8054\u7f51\u76d1\u63a7\u573a\u666f\u7b49\u9700\u8981\u8f83\u9ad8\u5b9e\u65f6\u6027\u7684\u5e94\u7528\uff0cPriority \u63d2\u4ef6\u80fd\u591f\u786e\u4fdd\u5176\u4f18\u5148\u5f97\u5230\u8c03\u5ea6\u3002

                      "},{"location":"admin/kpanda/gpu/volcano/volcano_priority.html#_1","title":"\u4f7f\u7528\u65b9\u5f0f","text":"

                      \u4f18\u5148\u7ea7\u7684\u51b3\u5b9a\u57fa\u4e8e\u914d\u7f6e\u7684 PriorityClass \u4e2d\u7684 Value \u503c\uff0c\u503c\u8d8a\u5927\u4f18\u5148\u7ea7\u8d8a\u9ad8\u3002\u9ed8\u8ba4\u5df2\u542f\u7528\uff0c\u65e0\u9700\u4fee\u6539\u3002\u53ef\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u786e\u8ba4\u6216\u4fee\u6539\u3002

                      kubectl -n volcano-system edit configmaps volcano-scheduler-configmap\n
                      "},{"location":"admin/kpanda/gpu/volcano/volcano_priority.html#_2","title":"\u4f7f\u7528\u6848\u4f8b","text":"

                      \u5047\u8bbe\u96c6\u7fa4\u4e2d\u5b58\u5728\u4e24\u4e2a\u7a7a\u95f2\u8282\u70b9\uff0c\u5e76\u6709\u4e09\u4e2a\u4f18\u5148\u7ea7\u4e0d\u540c\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff1ahigh-priority\u3001med-priority \u548c low-priority\u3002 \u5f53 high-priority \u5de5\u4f5c\u8d1f\u8f7d\u8fd0\u884c\u5e76\u5360\u6ee1\u96c6\u7fa4\u8d44\u6e90\u540e\uff0c\u518d\u63d0\u4ea4 med-priority \u548c low-priority \u5de5\u4f5c\u8d1f\u8f7d\u3002 \u7531\u4e8e\u96c6\u7fa4\u8d44\u6e90\u5168\u90e8\u88ab\u66f4\u9ad8\u4f18\u5148\u7ea7\u7684\u5de5\u4f5c\u8d1f\u8f7d\u5360\u7528\uff0cmed-priority \u548c low-priority \u5de5\u4f5c\u8d1f\u8f7d\u5c06\u5904\u4e8e pending \u72b6\u6001\u3002 \u5f53 high-priority \u5de5\u4f5c\u8d1f\u8f7d\u7ed3\u675f\u540e\uff0c\u6839\u636e\u4f18\u5148\u7ea7\u8c03\u5ea6\u539f\u5219\uff0cmed-priority \u5de5\u4f5c\u8d1f\u8f7d\u5c06\u4f18\u5148\u88ab\u8c03\u5ea6\u3002

                      1. \u901a\u8fc7 priority.yaml \u521b\u5efa 3 \u4e2a\u4f18\u5148\u7ea7\u5b9a\u4e49\uff0c\u5206\u522b\u4e3a\uff1ahigh-priority\uff0cmed-priority\uff0clow-priority\u3002

                        \u67e5\u770b priority.yaml

                        cat <<EOF | kubectl apply -f - \napiVersion: scheduling.k8s.io/v1 \nkind: PriorityClass \nitems: \n  - metadata: \n      name: high-priority \n    value: 100 \n    globalDefault: false \n    description: \"This priority class should be used for volcano job only.\" \n  - metadata: \n      name: med-priority \n    value: 50 \n    globalDefault: false \n    description: \"This priority class should be used for volcano job only.\" \n  - metadata: \n      name: low-priority \n    value: 10 \n    globalDefault: false \n    description: \"This priority class should be used for volcano job only.\" \nEOF\n
                        2. \u67e5\u770b\u4f18\u5148\u7ea7\u5b9a\u4e49\u4fe1\u606f\u3002

                        kubectl get PriorityClass\n
                        NAME                      VALUE        GLOBAL-DEFAULT   AGE  \nhigh-priority             100          false            97s  \nlow-priority              10           false            97s  \nmed-priority              50           false            97s  \nsystem-cluster-critical   2000000000   false            6d6h  \nsystem-node-critical      2000001000   false            6d6h\n

                      2. \u521b\u5efa\u9ad8\u4f18\u5148\u7ea7\u5de5\u4f5c\u8d1f\u8f7d high-priority-job\uff0c\u5360\u7528\u96c6\u7fa4\u7684\u5168\u90e8\u8d44\u6e90\u3002

                        \u67e5\u770b high-priority-job
                        cat <<EOF | kubectl apply -f -  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: priority-high  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: high-priority  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"4\"  \n          restartPolicy: OnFailure  \nEOF\n

                        \u901a\u8fc7 kubectl get pod \u67e5\u770b Pod\u8fd0\u884c \u4fe1\u606f\uff1a

                        kubectl get pods\n
                        NAME                   READY   STATUS    RESTARTS   AGE  \npriority-high-test-0   1/1     Running   0          3s  \npriority-high-test-1   1/1     Running   0          3s  \npriority-high-test-2   1/1     Running   0          3s  \npriority-high-test-3   1/1     Running   0          3s\n

                        \u6b64\u65f6\uff0c\u96c6\u7fa4\u8282\u70b9\u8d44\u6e90\u5df2\u5168\u90e8\u88ab\u5360\u7528\u3002

                      3. \u521b\u5efa\u4e2d\u4f18\u5148\u7ea7\u5de5\u4f5c\u8d1f\u8f7d med-priority-job \u548c\u4f4e\u4f18\u5148\u7ea7\u5de5\u4f5c\u8d1f\u8f7d low-priority-job\u3002

                        med-priority-job
                        cat <<EOF | kubectl apply -f -  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: priority-medium  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: med-priority  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"4\"  \n          restartPolicy: OnFailure  \nEOF\n
                        low-priority-job
                        cat <<EOF | kubectl apply -f -  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: priority-low  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: low-priority  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"4\"  \n          restartPolicy: OnFailure  \nEOF\n

                        \u901a\u8fc7 kubectl get pod \u67e5\u770b Pod \u8fd0\u884c\u4fe1\u606f\uff0c\u96c6\u7fa4\u8d44\u6e90\u4e0d\u8db3\uff0cPod \u5904\u4e8e Pending \u72b6\u6001\uff1a

                        kubectl get pods\n
                        NAME                     READY   STATUS    RESTARTS   AGE  \npriority-high-test-0     1/1     Running   0          3m29s  \npriority-high-test-1     1/1     Running   0          3m29s  \npriority-high-test-2     1/1     Running   0          3m29s  \npriority-high-test-3     1/1     Running   0          3m29s  \npriority-low-test-0      0/1     Pending   0          2m26s  \npriority-low-test-1      0/1     Pending   0          2m26s  \npriority-low-test-2      0/1     Pending   0          2m26s  \npriority-low-test-3      0/1     Pending   0          2m26s  \npriority-medium-test-0   0/1     Pending   0          2m36s  \npriority-medium-test-1   0/1     Pending   0          2m36s  \npriority-medium-test-2   0/1     Pending   0          2m36s  \npriority-medium-test-3   0/1     Pending   0          2m36s\n

                      4. \u5220\u9664 high_priority_job \u5de5\u4f5c\u8d1f\u8f7d\uff0c\u91ca\u653e\u96c6\u7fa4\u8d44\u6e90\uff0cmed_priority_job \u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002 \u6267\u884c kubectl delete -f high_priority_job.yaml \u91ca\u653e\u96c6\u7fa4\u8d44\u6e90\uff0c\u67e5\u770b Pod \u7684\u8c03\u5ea6\u4fe1\u606f\uff1a

                        kubectl get pods\n
                        NAME                     READY   STATUS    RESTARTS   AGE  \npriority-low-test-0      0/1     Pending   0          5m18s  \npriority-low-test-1      0/1     Pending   0          5m18s  \npriority-low-test-2      0/1     Pending   0          5m18s  \npriority-low-test-3      0/1     Pending   0          5m18s  \npriority-medium-test-0   1/1     Running   0          5m28s  \npriority-medium-test-1   1/1     Running   0          5m28s  \npriority-medium-test-2   1/1     Running   0          5m28s  \npriority-medium-test-3   1/1     Running   0          5m28s\n

                      "},{"location":"admin/kpanda/gpu/volcano/volcano_user_guide.html","title":"\u5b89\u88c5 Volcano","text":"

                      \u968f\u7740 Kubernetes\uff08K8s\uff09\u6210\u4e3a\u4e91\u539f\u751f\u5e94\u7528\u7f16\u6392\u4e0e\u7ba1\u7406\u7684\u9996\u9009\u5e73\u53f0\uff0c\u4f17\u591a\u5e94\u7528\u6b63\u79ef\u6781\u5411 K8s \u8fc1\u79fb\u3002 \u5728\u4eba\u5de5\u667a\u80fd\u4e0e\u673a\u5668\u5b66\u4e60\u9886\u57df\uff0c\u7531\u4e8e\u8fd9\u4e9b\u4efb\u52a1\u901a\u5e38\u6d89\u53ca\u5927\u91cf\u8ba1\u7b97\uff0c\u5f00\u53d1\u8005\u503e\u5411\u4e8e\u5728 Kubernetes \u4e0a\u6784\u5efa AI \u5e73\u53f0\uff0c \u4ee5\u5145\u5206\u5229\u7528\u5176\u5728\u8d44\u6e90\u7ba1\u7406\u3001\u5e94\u7528\u7f16\u6392\u53ca\u8fd0\u7ef4\u76d1\u63a7\u65b9\u9762\u7684\u4f18\u52bf\u3002

                      \u7136\u800c\uff0cKubernetes \u7684\u9ed8\u8ba4\u8c03\u5ea6\u5668\u4e3b\u8981\u9488\u5bf9\u957f\u671f\u8fd0\u884c\u7684\u670d\u52a1\u8bbe\u8ba1\uff0c\u5bf9\u4e8e AI\u3001\u5927\u6570\u636e\u7b49\u9700\u8981\u6279\u91cf\u548c\u5f39\u6027\u8c03\u5ea6\u7684\u4efb\u52a1\u5b58\u5728\u8bf8\u591a\u4e0d\u8db3\u3002 \u4f8b\u5982\uff0c\u5728\u8d44\u6e90\u7ade\u4e89\u6fc0\u70c8\u7684\u60c5\u51b5\u4e0b\uff0c\u9ed8\u8ba4\u8c03\u5ea6\u5668\u53ef\u80fd\u5bfc\u81f4\u8d44\u6e90\u5206\u914d\u4e0d\u5747\uff0c\u8fdb\u800c\u5f71\u54cd\u4efb\u52a1\u7684\u6b63\u5e38\u6267\u884c\u3002

                      \u4ee5 TensorFlow \u4f5c\u4e1a\u4e3a\u4f8b\uff0c\u5176\u5305\u542b PS\uff08\u53c2\u6570\u670d\u52a1\u5668\uff09\u548c Worker \u4e24\u79cd\u89d2\u8272\uff0c\u4e24\u8005\u9700\u534f\u540c\u5de5\u4f5c\u624d\u80fd\u5b8c\u6210\u4efb\u52a1\u3002 \u82e5\u4ec5\u90e8\u7f72\u5355\u4e00\u89d2\u8272\uff0c\u4f5c\u4e1a\u5c06\u65e0\u6cd5\u8fd0\u884c\u3002\u800c\u9ed8\u8ba4\u8c03\u5ea6\u5668\u5bf9 Pod \u7684\u8c03\u5ea6\u662f\u9010\u4e2a\u8fdb\u884c\u7684\uff0c\u65e0\u6cd5\u611f\u77e5 TFJob \u4e2d PS \u548c Worker \u7684\u4f9d\u8d56\u5173\u7cfb\u3002 \u5728\u9ad8\u8d1f\u8f7d\u60c5\u51b5\u4e0b\uff0c\u8fd9\u53ef\u80fd\u5bfc\u81f4\u591a\u4e2a\u4f5c\u4e1a\u5404\u81ea\u5206\u914d\u5230\u90e8\u5206\u8d44\u6e90\uff0c\u4f46\u5747\u65e0\u6cd5\u5b8c\u6210\uff0c\u4ece\u800c\u9020\u6210\u8d44\u6e90\u6d6a\u8d39\u3002

                      "},{"location":"admin/kpanda/gpu/volcano/volcano_user_guide.html#volcano_1","title":"Volcano \u7684\u8c03\u5ea6\u7b56\u7565\u4f18\u52bf","text":"

                      Volcano \u63d0\u4f9b\u4e86\u591a\u79cd\u8c03\u5ea6\u7b56\u7565\uff0c\u4ee5\u5e94\u5bf9\u4e0a\u8ff0\u6311\u6218\u3002\u5176\u4e2d\uff0cGang-scheduling \u7b56\u7565\u80fd\u786e\u4fdd\u5206\u5e03\u5f0f\u673a\u5668\u5b66\u4e60\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u591a\u4e2a\u4efb\u52a1\uff08Pod\uff09\u540c\u65f6\u542f\u52a8\uff0c \u907f\u514d\u6b7b\u9501\uff1bPreemption scheduling \u7b56\u7565\u5219\u5141\u8bb8\u9ad8\u4f18\u5148\u7ea7\u4f5c\u4e1a\u5728\u8d44\u6e90\u4e0d\u8db3\u65f6\u62a2\u5360\u4f4e\u4f18\u5148\u7ea7\u4f5c\u4e1a\u7684\u8d44\u6e90\uff0c\u786e\u4fdd\u5173\u952e\u4efb\u52a1\u4f18\u5148\u5b8c\u6210\u3002

                      \u6b64\u5916\uff0cVolcano \u4e0e Spark\u3001TensorFlow\u3001PyTorch \u7b49\u4e3b\u6d41\u8ba1\u7b97\u6846\u67b6\u65e0\u7f1d\u5bf9\u63a5\uff0c\u5e76\u652f\u6301 CPU \u548c GPU \u7b49\u5f02\u6784\u8bbe\u5907\u7684\u6df7\u5408\u8c03\u5ea6\uff0c\u4e3a AI \u8ba1\u7b97\u4efb\u52a1\u63d0\u4f9b\u4e86\u5168\u9762\u7684\u4f18\u5316\u652f\u6301\u3002

                      \u63a5\u4e0b\u6765\uff0c\u6211\u4eec\u5c06\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5\u548c\u4f7f\u7528 Volcano\uff0c\u4ee5\u4fbf\u60a8\u80fd\u591f\u5145\u5206\u5229\u7528\u5176\u8c03\u5ea6\u7b56\u7565\u4f18\u52bf\uff0c\u4f18\u5316 AI \u8ba1\u7b97\u4efb\u52a1\u3002

                      "},{"location":"admin/kpanda/gpu/volcano/volcano_user_guide.html#volcano_2","title":"\u5b89\u88c5 Volcano","text":"
                      1. \u5728 \u96c6\u7fa4\u8be6\u60c5 -> Helm \u5e94\u7528 -> Helm \u6a21\u677f \u4e2d\u627e\u5230 Volcano \u5e76\u5b89\u88c5\u3002

                      2. \u68c0\u67e5\u5e76\u786e\u8ba4 Volcano \u662f\u5426\u5b89\u88c5\u5b8c\u6210\uff0c\u5373 volcano-admission\u3001volcano-controllers\u3001volcano-scheduler \u7ec4\u4ef6\u662f\u5426\u6b63\u5e38\u8fd0\u884c\u3002

                      \u901a\u5e38 Volcano \u4f1a\u548c AI Lab \u5e73\u53f0\u914d\u5408\u4f7f\u7528\uff0c\u4ee5\u5b9e\u73b0\u6570\u636e\u96c6\u3001Notebook\u3001\u4efb\u52a1\u8bad\u7ec3\u7684\u6574\u4e2a\u5f00\u53d1\u3001\u8bad\u7ec3\u6d41\u7a0b\u7684\u6709\u6548\u95ed\u73af\u3002

                      "},{"location":"admin/kpanda/helm/index.html","title":"Helm \u6a21\u677f","text":"

                      Helm \u662f Kubernetes \u7684\u5305\u7ba1\u7406\u5de5\u5177\uff0c\u65b9\u4fbf\u7528\u6237\u5feb\u901f\u53d1\u73b0\u3001\u5171\u4eab\u548c\u4f7f\u7528 Kubernetes \u6784\u5efa\u7684\u5e94\u7528\u3002\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u63d0\u4f9b\u4e86\u4e0a\u767e\u4e2a Helm \u6a21\u677f\uff0c\u6db5\u76d6\u5b58\u50a8\u3001\u7f51\u7edc\u3001\u76d1\u63a7\u3001\u6570\u636e\u5e93\u7b49\u4e3b\u8981\u573a\u666f\u3002\u501f\u52a9\u8fd9\u4e9b\u6a21\u677f\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7 UI \u754c\u9762\u5feb\u901f\u90e8\u7f72\u3001\u4fbf\u6377\u7ba1\u7406 Helm \u5e94\u7528\u3002\u6b64\u5916\uff0c\u652f\u6301\u901a\u8fc7\u6dfb\u52a0 Helm \u4ed3\u5e93 \u6dfb\u52a0\u66f4\u591a\u7684\u4e2a\u6027\u5316\u6a21\u677f\uff0c\u6ee1\u8db3\u591a\u6837\u9700\u6c42\u3002

                      \u5173\u952e\u6982\u5ff5\uff1a

                      \u4f7f\u7528 Helm \u65f6\u9700\u8981\u4e86\u89e3\u4ee5\u4e0b\u51e0\u4e2a\u5173\u952e\u6982\u5ff5\uff1a

                      • Chart\uff1a\u4e00\u4e2a Helm \u5b89\u88c5\u5305\uff0c\u5176\u4e2d\u5305\u542b\u4e86\u8fd0\u884c\u4e00\u4e2a\u5e94\u7528\u6240\u9700\u8981\u7684\u955c\u50cf\u3001\u4f9d\u8d56\u548c\u8d44\u6e90\u5b9a\u4e49\u7b49\uff0c\u8fd8\u53ef\u80fd\u5305\u542b Kubernetes \u96c6\u7fa4\u4e2d\u7684\u670d\u52a1\u5b9a\u4e49\uff0c\u7c7b\u4f3c Homebrew \u4e2d\u7684 formula\u3001APT \u7684 dpkg \u6216\u8005 Yum \u7684 rpm \u6587\u4ef6\u3002Chart \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u79f0\u4e3a Helm \u6a21\u677f \u3002

                      • Release\uff1a\u5728 Kubernetes \u96c6\u7fa4\u4e0a\u8fd0\u884c\u7684\u4e00\u4e2a Chart \u5b9e\u4f8b\u3002\u4e00\u4e2a Chart \u53ef\u4ee5\u5728\u540c\u4e00\u4e2a\u96c6\u7fa4\u5185\u591a\u6b21\u5b89\u88c5\uff0c\u6bcf\u6b21\u5b89\u88c5\u90fd\u4f1a\u521b\u5efa\u4e00\u4e2a\u65b0\u7684 Release\u3002Release \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u79f0\u4e3a Helm \u5e94\u7528 \u3002

                      • Repository\uff1a\u7528\u4e8e\u53d1\u5e03\u548c\u5b58\u50a8 Chart \u7684\u5b58\u50a8\u5e93\u3002Repository \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u79f0\u4e3a Helm \u4ed3\u5e93\u3002

                      \u66f4\u591a\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u524d\u5f80 Helm \u5b98\u7f51\u67e5\u770b\u3002

                      \u76f8\u5173\u64cd\u4f5c\uff1a

                      • \u4e0a\u4f20 Helm \u6a21\u677f\uff0c\u4ecb\u7ecd\u4e0a\u4f20 Helm \u6a21\u677f\u64cd\u4f5c\u3002
                      • \u7ba1\u7406 Helm \u5e94\u7528\uff0c\u5305\u62ec\u5b89\u88c5\u3001\u66f4\u65b0\u3001\u5378\u8f7d Helm \u5e94\u7528\uff0c\u67e5\u770b Helm \u64cd\u4f5c\u8bb0\u5f55\u7b49\u3002
                      • \u7ba1\u7406 Helm \u4ed3\u5e93\uff0c\u5305\u62ec\u5b89\u88c5\u3001\u66f4\u65b0\u3001\u5220\u9664 Helm \u4ed3\u5e93\u7b49\u3002
                      "},{"location":"admin/kpanda/helm/Import-addon.html","title":"\u5c06\u81ea\u5b9a\u4e49 Helm \u5e94\u7528\u5bfc\u5165\u7cfb\u7edf\u5185\u7f6e Addon","text":"

                      \u672c\u6587\u4ece\u79bb\u7ebf\u548c\u5728\u7ebf\u4e24\u79cd\u73af\u5883\u8bf4\u660e\u5982\u4f55\u5c06 Helm \u5e94\u7528\u5bfc\u5165\u5230\u7cfb\u7edf\u5185\u7f6e\u7684 Addon \u4e2d\u3002

                      "},{"location":"admin/kpanda/helm/Import-addon.html#_1","title":"\u79bb\u7ebf\u73af\u5883","text":"

                      \u79bb\u7ebf\u73af\u5883\u6307\u7684\u662f\u65e0\u6cd5\u8fde\u901a\u4e92\u8054\u7f51\u6216\u5c01\u95ed\u7684\u79c1\u6709\u7f51\u7edc\u73af\u5883\u3002

                      "},{"location":"admin/kpanda/helm/Import-addon.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u5b58\u5728\u53ef\u4ee5\u8fd0\u884c\u7684\u00a0charts-syncer\u3002 \u82e5\u6ca1\u6709\uff0c\u53ef\u70b9\u51fb\u4e0b\u8f7d\u3002
                      • Helm Chart \u5df2\u7ecf\u5b8c\u6210\u9002\u914d charts-syncer\u3002 \u5373\u5728 Helm Chart \u5185\u6dfb\u52a0\u4e86 .relok8s-images.yaml \u6587\u4ef6\u3002\u8be5\u6587\u4ef6\u9700\u8981\u5305\u542b Chart \u4e2d\u6240\u6709\u4f7f\u7528\u5230\u955c\u50cf\uff0c \u4e5f\u53ef\u4ee5\u5305\u542b Chart \u4e2d\u672a\u76f4\u63a5\u4f7f\u7528\u7684\u955c\u50cf\uff0c\u7c7b\u4f3c Operator \u4e2d\u4f7f\u7528\u7684\u955c\u50cf\u3002

                      Note

                      • \u5982\u4f55\u7f16\u5199 Chart \u53ef\u53c2\u8003\u00a0image-hints-file\u3002 \u8981\u6c42\u955c\u50cf\u7684\u00a0registry \u548c repository \u5fc5\u987b\u5206\u5f00\uff0c\u56e0\u4e3a load \u955c\u50cf\u65f6\u9700\u66ff\u6362\u6216\u4fee\u6539 registry/repository\u3002
                      • \u5b89\u88c5\u5668\u6240\u5728\u7684\u706b\u79cd\u96c6\u7fa4\u5df2\u5b89\u88c5 charts-syncer\u3002 \u82e5\u5c06\u81ea\u5b9a\u4e49 Helm \u5e94\u7528\u5bfc\u5165\u5b89\u88c5\u5668\u6240\u5728\u706b\u79cd\u96c6\u7fa4\uff0c\u53ef\u8df3\u8fc7\u4e0b\u8f7d\u76f4\u63a5\u9002\u914d\uff1b \u82e5\u672a\u5b89\u88c5\u00a0charts-syncer\u4e8c\u8fdb\u5236\u6587\u4ef6\uff0c \u53ef\u7acb\u5373\u4e0b\u8f7d\u3002
                      "},{"location":"admin/kpanda/helm/Import-addon.html#helm-chart","title":"\u540c\u6b65 Helm Chart","text":"
                      1. \u8fdb\u5165\u5bb9\u5668\u7ba1\u7406 -> Helm \u5e94\u7528 -> Helm \u4ed3\u5e93\uff0c\u641c\u7d22 addon\uff0c\u83b7\u53d6\u5185\u7f6e\u4ed3\u5e93\u5730\u5740\u548c\u7528\u6237\u540d/\u5bc6\u7801\uff08\u7cfb\u7edf\u5185\u7f6e\u4ed3\u5e93\u9ed8\u8ba4\u7528\u6237\u540d/\u5bc6\u7801\u4e3a rootuser/rootpass123\uff09\u3002
                      1. \u540c\u6b65 Helm Chart \u5230\u5bb9\u5668\u7ba1\u7406\u5185\u7f6e\u4ed3\u5e93 Addon

                        • \u7f16\u5199\u5982\u4e0b\u914d\u7f6e\u6587\u4ef6\uff0c\u53ef\u4ee5\u6839\u636e\u5177\u4f53\u914d\u7f6e\u4fee\u6539\uff0c\u5e76\u4fdd\u5b58\u4e3a sync-dao-2048.yaml\u3002

                          source:  # helm charts \u6e90\u4fe1\u606f\n  repo:\n    kind: HARBOR # \u4e5f\u53ef\u4ee5\u662f\u4efb\u4f55\u5176\u4ed6\u652f\u6301\u7684 Helm Chart \u4ed3\u5e93\u7c7b\u522b\uff0c\u6bd4\u5982 CHARTMUSEUM\n    url: https://release-ci.daocloud.io/chartrepo/community #  \u9700\u66f4\u6539\u4e3a chart repo url\n    #auth: # \u7528\u6237\u540d/\u5bc6\u7801,\u82e5\u6ca1\u6709\u8bbe\u7f6e\u5bc6\u7801\u53ef\u4ee5\u4e0d\u586b\u5199\n      #username: \"admin\"\n      #password: \"Harbor12345\"\ncharts:  # \u9700\u8981\u540c\u6b65\n  - name: dao-2048 # helm charts \u4fe1\u606f\uff0c\u82e5\u4e0d\u586b\u5199\u5219\u540c\u6b65\u6e90 helm repo \u5185\u6240\u6709 charts\n    versions:\n      - 1.4.1\ntarget:  # helm charts \u76ee\u6807\u4fe1\u606f\n  containerRegistry: 10.5.14.40 # \u955c\u50cf\u4ed3\u5e93 url\n  repo:\n    kind: CHARTMUSEUM # \u4e5f\u53ef\u4ee5\u662f\u4efb\u4f55\u5176\u4ed6\u652f\u6301\u7684 Helm Chart \u4ed3\u5e93\u7c7b\u522b\uff0c\u6bd4\u5982 HARBOR\n    url: http://10.5.14.40:8081 #  \u9700\u66f4\u6539\u4e3a\u6b63\u786e chart repo url\uff0c\u53ef\u4ee5\u901a\u8fc7 helm repo add $HELM-REPO \u9a8c\u8bc1\u5730\u5740\u662f\u5426\u6b63\u786e\n    auth: # \u7528\u6237\u540d/\u5bc6\u7801\uff0c\u82e5\u6ca1\u6709\u8bbe\u7f6e\u5bc6\u7801\u53ef\u4ee5\u4e0d\u586b\u5199\n      username: \"rootuser\"\n      password: \"rootpass123\"\n  containers:\n    # kind: HARBOR # \u82e5\u955c\u50cf\u4ed3\u5e93\u4e3a HARBOR \u4e14\u5e0c\u671b charts-syncer \u81ea\u52a8\u521b\u5efa\u955c\u50cf Repository \u5219\u586b\u5199\u8be5\u5b57\u6bb5  \n    # auth: # \u7528\u6237\u540d/\u5bc6\u7801\uff0c\u82e5\u6ca1\u6709\u8bbe\u7f6e\u5bc6\u7801\u53ef\u4ee5\u4e0d\u586b\u5199 \n      # username: \"admin\"\n      # password: \"Harbor12345\"\n\n# leverage .relok8s-images.yaml file inside the Charts to move the container images too\nrelocateContainerImages: true\n
                        • \u6267\u884c charts-syncer \u547d\u4ee4\u540c\u6b65 Chart \u53ca\u5176\u5305\u542b\u7684\u955c\u50cf

                          charts-syncer sync --config sync-dao-2048.yaml --insecure --auto-create-repository\n

                          \u9884\u671f\u8f93\u51fa\u4e3a\uff1a

                          I1222 15:01:47.119777    8743 sync.go:45] Using config file: \"examples/sync-dao-2048.yaml\"\nW1222 15:01:47.234238    8743 syncer.go:263] Ignoring skipDependencies option as dependency sync is not supported if container image relocation is true or syncing from/to intermediate directory\nI1222 15:01:47.234685    8743 sync.go:58] There is 1 chart out of sync!\nI1222 15:01:47.234706    8743 sync.go:66] Syncing \"dao-2048_1.4.1\" chart...\n.relok8s-images.yaml hints file found\nComputing relocation...\n\nRelocating dao-2048@1.4.1...\nPushing 10.5.14.40/daocloud/dao-2048:v1.4.1...\nDone\nDone moving /var/folders/vm/08vw0t3j68z9z_4lcqyhg8nm0000gn/T/charts-syncer869598676/dao-2048-1.4.1.tgz\n
                      2. \u5f85\u4e0a\u4e00\u6b65\u6267\u884c\u5b8c\u6210\u540e\uff0c\u8fdb\u5165\u5bb9\u5668\u7ba1\u7406 -> Helm \u5e94\u7528 -> Helm \u4ed3\u5e93\uff0c\u627e\u5230\u5bf9\u5e94 Addon\uff0c \u5728\u64cd\u4f5c\u680f\u70b9\u51fb\u540c\u6b65\u4ed3\u5e93\uff0c\u56de\u5230 Helm \u6a21\u677f\u5c31\u53ef\u4ee5\u770b\u5230\u4e0a\u4f20\u7684 Helm \u5e94\u7528

                      3. \u540e\u7eed\u53ef\u6b63\u5e38\u8fdb\u884c\u5b89\u88c5\u3001\u5347\u7ea7\u3001\u5378\u8f7d

                      "},{"location":"admin/kpanda/helm/Import-addon.html#_3","title":"\u5728\u7ebf\u73af\u5883","text":"

                      \u5728\u7ebf\u73af\u5883\u7684 Helm Repo \u5730\u5740\u4e3a release.daocloud.io\u3002 \u5982\u679c\u7528\u6237\u65e0\u6743\u9650\u6dfb\u52a0 Helm Repo\uff0c\u5219\u65e0\u6cd5\u5c06\u81ea\u5b9a\u4e49 Helm \u5e94\u7528\u5bfc\u5165\u7cfb\u7edf\u5185\u7f6e Addon\u3002 \u60a8\u53ef\u4ee5\u6dfb\u52a0\u81ea\u5df1\u642d\u5efa\u7684 Helm \u4ed3\u5e93\uff0c\u7136\u540e\u6309\u7167\u79bb\u7ebf\u73af\u5883\u4e2d\u540c\u6b65 Helm Chart \u7684\u6b65\u9aa4\u5c06\u60a8\u7684 Helm \u4ed3\u5e93\u96c6\u6210\u5230\u5e73\u53f0\u4f7f\u7528\u3002

                      "},{"location":"admin/kpanda/helm/helm-app.html","title":"\u7ba1\u7406 Helm \u5e94\u7528","text":"

                      \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u5bf9 Helm \u8fdb\u884c\u754c\u9762\u5316\u7ba1\u7406\uff0c\u5305\u62ec\u4f7f\u7528 Helm \u6a21\u677f\u521b\u5efa Helm \u5b9e\u4f8b\u3001\u81ea\u5b9a\u4e49 Helm \u5b9e\u4f8b\u53c2\u6570\u3001\u5bf9 Helm \u5b9e\u4f8b\u8fdb\u884c\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406\u7b49\u529f\u80fd\u3002

                      \u672c\u8282\u5c06\u4ee5 cert-manager \u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u754c\u9762\u521b\u5efa\u5e76\u7ba1\u7406 Helm \u5e94\u7528\u3002

                      "},{"location":"admin/kpanda/helm/helm-app.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                      • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Admin \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                      "},{"location":"admin/kpanda/helm/helm-app.html#helm_1","title":"\u5b89\u88c5 Helm \u5e94\u7528","text":"

                      \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u5b89\u88c5 Helm \u5e94\u7528\u3002

                      1. \u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u8fdb\u5165 Helm \u6a21\u677f\u9875\u9762\u3002

                        \u5728 Helm \u6a21\u677f\u9875\u9762\u9009\u62e9\u540d\u4e3a addon \u7684 Helm \u4ed3\u5e93\uff0c\u6b64\u65f6\u754c\u9762\u4e0a\u5c06\u5448\u73b0 addon \u4ed3\u5e93\u4e0b\u6240\u6709\u7684 Helm chart \u6a21\u677f\u3002 \u70b9\u51fb\u540d\u79f0\u4e3a cert-manager \u7684 Chart\u3002

                      3. \u5728\u5b89\u88c5\u9875\u9762\uff0c\u80fd\u591f\u770b\u5230 Chart \u7684\u76f8\u5173\u8be6\u7ec6\u4fe1\u606f\uff0c\u5728\u754c\u9762\u53f3\u4e0a\u89d2\u9009\u62e9\u9700\u8981\u5b89\u88c5\u7684\u7248\u672c\uff0c\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u6b64\u5904\u9009\u62e9 v1.9.1 \u7248\u672c\u8fdb\u884c\u5b89\u88c5\u3002

                      4. \u914d\u7f6e \u540d\u79f0 \u3001 \u547d\u540d\u7a7a\u95f4 \u53ca \u7248\u672c\u4fe1\u606f \uff0c\u4e5f\u53ef\u4ee5\u5728\u4e0b\u65b9\u7684 \u53c2\u6570\u914d\u7f6e \u533a\u57df\u901a\u8fc7\u4fee\u6539 YAML \u6765\u81ea\u5b9a\u4e49\u53c2\u6570\u3002\u70b9\u51fb \u786e\u5b9a \u3002

                      5. \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de Helm \u5e94\u7528\u5217\u8868\uff0c\u65b0\u521b\u5efa\u7684 Helm \u5e94\u7528\u72b6\u6001\u4e3a \u5b89\u88c5\u4e2d \uff0c\u7b49\u5f85\u4e00\u6bb5\u65f6\u95f4\u540e\u72b6\u6001\u53d8\u4e3a \u8fd0\u884c\u4e2d \u3002

                      "},{"location":"admin/kpanda/helm/helm-app.html#helm_2","title":"\u66f4\u65b0 Helm \u5e94\u7528","text":"

                      \u5f53\u6211\u4eec\u901a\u8fc7\u754c\u9762\u5b8c\u6210\u4e00\u4e2a Helm \u5e94\u7528\u7684\u5b89\u88c5\u540e\uff0c\u6211\u4eec\u53ef\u4ee5\u5bf9 Helm \u5e94\u7528\u6267\u884c\u66f4\u65b0\u64cd\u4f5c\u3002\u6ce8\u610f\uff1a\u53ea\u6709\u901a\u8fc7\u754c\u9762\u5b89\u88c5\u7684 Helm \u5e94\u7528\u624d\u652f\u6301\u4f7f\u7528\u754c\u9762\u8fdb\u884c\u66f4\u65b0\u64cd\u4f5c\u3002

                      \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u66f4\u65b0 Helm \u5e94\u7528\u3002

                      1. \u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb Helm \u5e94\u7528 \uff0c\u8fdb\u5165 Helm \u5e94\u7528\u5217\u8868\u9875\u9762\u3002

                        \u5728 Helm \u5e94\u7528\u5217\u8868\u9875\u9009\u62e9\u9700\u8981\u66f4\u65b0\u7684 Helm \u5e94\u7528\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff0c\u5728\u4e0b\u62c9\u9009\u62e9\u4e2d\u9009\u62e9 \u66f4\u65b0 \u64cd\u4f5c\u3002

                      3. \u70b9\u51fb \u66f4\u65b0 \u6309\u94ae\u540e\uff0c\u7cfb\u7edf\u5c06\u8df3\u8f6c\u81f3\u66f4\u65b0\u754c\u9762\uff0c\u60a8\u53ef\u4ee5\u6839\u636e\u9700\u8981\u5bf9 Helm \u5e94\u7528\u8fdb\u884c\u66f4\u65b0\uff0c\u6b64\u5904\u6211\u4eec\u4ee5\u66f4\u65b0 dao-2048 \u8fd9\u4e2a\u5e94\u7528\u7684 http \u7aef\u53e3\u4e3a\u4f8b\u3002

                      4. \u4fee\u6539\u5b8c\u76f8\u5e94\u53c2\u6570\u540e\u3002\u60a8\u53ef\u4ee5\u5728\u53c2\u6570\u914d\u7f6e\u4e0b\u70b9\u51fb \u53d8\u5316 \u6309\u94ae\uff0c\u5bf9\u6bd4\u4fee\u6539\u524d\u540e\u7684\u6587\u4ef6\uff0c\u786e\u5b9a\u65e0\u8bef\u540e\uff0c\u70b9\u51fb\u5e95\u90e8 \u786e\u5b9a \u6309\u94ae\uff0c\u5b8c\u6210 Helm \u5e94\u7528\u7684\u66f4\u65b0\u3002

                      5. \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de Helm \u5e94\u7528\u5217\u8868\uff0c\u53f3\u4e0a\u89d2\u5f39\u7a97\u63d0\u793a \u66f4\u65b0\u6210\u529f \u3002

                      "},{"location":"admin/kpanda/helm/helm-app.html#helm_3","title":"\u67e5\u770b Helm \u64cd\u4f5c\u8bb0\u5f55","text":"

                      Helm \u5e94\u7528\u7684\u6bcf\u6b21\u5b89\u88c5\u3001\u66f4\u65b0\u3001\u5220\u9664\u90fd\u6709\u8be6\u7ec6\u7684\u64cd\u4f5c\u8bb0\u5f55\u548c\u65e5\u5fd7\u53ef\u4f9b\u67e5\u770b\u3002

                      1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb \u96c6\u7fa4\u8fd0\u7ef4 -> \u6700\u8fd1\u64cd\u4f5c \uff0c\u7136\u540e\u5728\u9875\u9762\u4e0a\u65b9\u9009\u62e9 Helm \u64cd\u4f5c \u6807\u7b7e\u9875\u3002\u6bcf\u4e00\u6761\u8bb0\u5f55\u5bf9\u5e94\u4e00\u6b21\u5b89\u88c5/\u66f4\u65b0/\u5220\u9664\u64cd\u4f5c\u3002

                      2. \u5982\u9700\u67e5\u770b\u6bcf\u4e00\u6b21\u64cd\u4f5c\u7684\u8be6\u7ec6\u65e5\u5fd7\uff1a\u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u65e5\u5fd7 \u3002

                      3. \u6b64\u65f6\u9875\u9762\u4e0b\u65b9\u5c06\u4ee5\u63a7\u5236\u53f0\u7684\u5f62\u5f0f\u5c55\u793a\u8be6\u7ec6\u7684\u8fd0\u884c\u65e5\u5fd7\u3002

                      "},{"location":"admin/kpanda/helm/helm-app.html#helm_4","title":"\u5220\u9664 Helm \u5e94\u7528","text":"

                      \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u5220\u9664 Helm \u5e94\u7528\u3002

                      1. \u627e\u5230\u5f85\u5220\u9664\u7684 Helm \u5e94\u7528\u6240\u5728\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb Helm \u5e94\u7528 \uff0c\u8fdb\u5165 Helm \u5e94\u7528\u5217\u8868\u9875\u9762\u3002

                        \u5728 Helm \u5e94\u7528\u5217\u8868\u9875\u9009\u62e9\u60a8\u9700\u8981\u5220\u9664\u7684 Helm \u5e94\u7528\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff0c\u5728\u4e0b\u62c9\u9009\u62e9\u4e2d\u9009\u62e9 \u5220\u9664 \u3002

                      3. \u5728\u5f39\u7a97\u5185\u8f93\u5165 Helm \u5e94\u7528\u7684\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\uff0c\u7136\u540e\u70b9\u51fb \u5220\u9664 \u6309\u94ae\u3002

                      "},{"location":"admin/kpanda/helm/helm-repo.html","title":"\u7ba1\u7406 Helm \u4ed3\u5e93","text":"

                      Helm \u4ed3\u5e93\u662f\u7528\u6765\u5b58\u50a8\u548c\u53d1\u5e03 Chart \u7684\u5b58\u50a8\u5e93\u3002Helm \u5e94\u7528\u6a21\u5757\u652f\u6301\u901a\u8fc7 HTTP(s) \u534f\u8bae\u6765\u8bbf\u95ee\u5b58\u50a8\u5e93\u4e2d\u7684 Chart \u5305\u3002\u7cfb\u7edf\u9ed8\u8ba4\u5185\u7f6e\u4e86\u4e0b\u8868\u6240\u793a\u7684 4 \u4e2a Helm \u4ed3\u5e93\u4ee5\u6ee1\u8db3\u4f01\u4e1a\u751f\u4ea7\u8fc7\u7a0b\u4e2d\u7684\u5e38\u89c1\u9700\u6c42\u3002

                      \u4ed3\u5e93 \u63cf\u8ff0 \u793a\u4f8b partner \u7531\u751f\u6001\u5408\u4f5c\u4f19\u4f34\u6240\u63d0\u4f9b\u7684\u5404\u7c7b\u4f18\u8d28\u7279\u8272 Chart tidb system \u7cfb\u7edf\u6838\u5fc3\u529f\u80fd\u7ec4\u4ef6\u53ca\u90e8\u5206\u9ad8\u7ea7\u529f\u80fd\u6240\u5fc5\u9700\u4f9d\u8d56\u7684 Chart\uff0c\u5982\u5fc5\u9700\u5b89\u88c5 insight-agent \u624d\u80fd\u591f\u83b7\u53d6\u96c6\u7fa4\u7684\u76d1\u63a7\u4fe1\u606f Insight addon \u4e1a\u52a1\u573a\u666f\u4e2d\u5e38\u89c1\u7684 Chart cert-manager community Kubernetes \u793e\u533a\u8f83\u4e3a\u70ed\u95e8\u7684\u5f00\u6e90\u7ec4\u4ef6 Chart Istio

                      \u9664\u4e0a\u8ff0\u9884\u7f6e\u4ed3\u5e93\u5916\uff0c\u60a8\u4e5f\u53ef\u4ee5\u81ea\u884c\u6dfb\u52a0\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93\u3002\u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u6dfb\u52a0\u3001\u66f4\u65b0\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93\u3002

                      "},{"location":"admin/kpanda/helm/helm-repo.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762

                      • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Admin \u6216\u66f4\u9ad8\u6743\u9650 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                      • \u5982\u679c\u4f7f\u7528\u79c1\u6709\u4ed3\u5e93\uff0c\u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u62e5\u6709\u5bf9\u8be5\u79c1\u6709\u4ed3\u5e93\u7684\u8bfb\u5199\u6743\u9650\u3002

                      "},{"location":"admin/kpanda/helm/helm-repo.html#helm_1","title":"\u5f15\u5165\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93","text":"

                      \u4e0b\u9762\u4ee5 Kubevela \u516c\u5f00\u7684\u955c\u50cf\u4ed3\u5e93\u4e3a\u4f8b\uff0c\u5f15\u5165 Helm \u4ed3\u5e93\u5e76\u7ba1\u7406\u3002

                      1. \u627e\u5230\u9700\u8981\u5f15\u5165\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u4ed3\u5e93 \uff0c\u8fdb\u5165 Helm \u4ed3\u5e93\u9875\u9762\u3002

                      3. \u5728 Helm \u4ed3\u5e93\u9875\u9762\u70b9\u51fb \u521b\u5efa\u4ed3\u5e93 \u6309\u94ae\uff0c\u8fdb\u5165\u521b\u5efa\u4ed3\u5e93\u9875\u9762\uff0c\u6309\u7167\u4e0b\u8868\u914d\u7f6e\u76f8\u5173\u53c2\u6570\u3002

                        • \u4ed3\u5e93\u540d\u79f0\uff1a\u8bbe\u7f6e\u4ed3\u5e93\u540d\u79f0\u3002\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26 - \uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u5e76\u7ed3\u5c3e\uff0c\u4f8b\u5982 kubevela
                        • \u4ed3\u5e93\u5730\u5740\uff1a\u7528\u6765\u6307\u5411\u76ee\u6807 Helm \u4ed3\u5e93\u7684 http\uff08s\uff09\u5730\u5740\u3002\u4f8b\u5982 https://charts.kubevela.net/core
                        • \u8df3\u8fc7 TLS \u9a8c\u8bc1: \u5982\u679c\u6dfb\u52a0\u7684 Helm \u4ed3\u5e93\u4e3a https \u5730\u5740\u4e14\u9700\u8df3\u8fc7 TLS \u9a8c\u8bc1\uff0c\u53ef\u4ee5\u52fe\u9009\u6b64\u9009\u9879\uff0c\u9ed8\u8ba4\u4e3a\u4e0d\u52fe\u9009
                        • \u8ba4\u8bc1\u65b9\u5f0f\uff1a\u8fde\u63a5\u4ed3\u5e93\u5730\u5740\u540e\u7528\u6765\u8fdb\u884c\u8eab\u4efd\u6821\u9a8c\u7684\u65b9\u5f0f\u3002\u5bf9\u4e8e\u516c\u5f00\u4ed3\u5e93\uff0c\u53ef\u4ee5\u9009\u62e9 None \uff0c\u79c1\u6709\u7684\u4ed3\u5e93\u9700\u8981\u8f93\u5165\u7528\u6237\u540d/\u5bc6\u7801\u4ee5\u8fdb\u884c\u8eab\u4efd\u6821\u9a8c
                        • \u6807\u7b7e\uff1a\u4e3a\u8be5 Helm \u4ed3\u5e93\u6dfb\u52a0\u6807\u7b7e\u3002\u4f8b\u5982 key: repo4\uff1bvalue: Kubevela
                        • \u6ce8\u89e3\uff1a\u4e3a\u8be5 Helm \u4ed3\u5e93\u6dfb\u52a0\u6ce8\u89e3\u3002\u4f8b\u5982 key: repo4\uff1bvalue: Kubevela
                        • \u63cf\u8ff0\uff1a\u4e3a\u8be5 Helm \u4ed3\u5e93\u6dfb\u52a0\u63cf\u8ff0\u3002\u4f8b\u5982\uff1a\u8fd9\u662f\u4e00\u4e2a Kubevela \u516c\u5f00 Helm \u4ed3\u5e93

                      4. \u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210 Helm \u4ed3\u5e93\u7684\u521b\u5efa\u3002\u9875\u9762\u4f1a\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u4ed3\u5e93\u5217\u8868\u3002

                      "},{"location":"admin/kpanda/helm/helm-repo.html#helm_2","title":"\u66f4\u65b0 Helm \u4ed3\u5e93","text":"

                      \u5f53 Helm \u4ed3\u5e93\u7684\u5730\u5740\u4fe1\u606f\u53d1\u751f\u53d8\u5316\u65f6\uff0c\u53ef\u4ee5\u66f4\u65b0 Helm \u4ed3\u5e93\u7684\u5730\u5740\u3001\u8ba4\u8bc1\u65b9\u5f0f\u3001\u6807\u7b7e\u3001\u6ce8\u89e3\u53ca\u63cf\u8ff0\u4fe1\u606f\u3002

                      1. \u627e\u5230\u5f85\u66f4\u65b0\u4ed3\u5e93\u6240\u5728\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u4ed3\u5e93 \uff0c\u8fdb\u5165 Helm \u4ed3\u5e93\u5217\u8868\u9875\u9762\u3002

                      3. \u5728\u4ed3\u5e93\u5217\u8868\u9875\u9762\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684 Helm \u4ed3\u5e93\uff0c\u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \u6309\u94ae\uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u70b9\u51fb \u66f4\u65b0 \u3002

                      4. \u5728 \u7f16\u8f91 Helm \u4ed3\u5e93 \u9875\u9762\u8fdb\u884c\u66f4\u65b0\uff0c\u5b8c\u6210\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                      5. \u8fd4\u56de Helm \u4ed3\u5e93\u5217\u8868\uff0c\u5c4f\u5e55\u63d0\u793a\u66f4\u65b0\u6210\u529f\u3002

                      "},{"location":"admin/kpanda/helm/helm-repo.html#helm_3","title":"\u5220\u9664 Helm \u4ed3\u5e93","text":"

                      \u9664\u4e86\u5f15\u5165\u3001\u66f4\u65b0\u4ed3\u5e93\u5916\uff0c\u60a8\u4e5f\u53ef\u4ee5\u5c06\u4e0d\u9700\u8981\u7684\u4ed3\u5e93\u5220\u9664\uff0c\u5305\u62ec\u7cfb\u7edf\u9884\u7f6e\u4ed3\u5e93\u548c\u7b2c\u4e09\u65b9\u4ed3\u5e93\u3002

                      1. \u627e\u5230\u5f85\u5220\u9664\u4ed3\u5e93\u6240\u5728\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u4ed3\u5e93 \uff0c\u8fdb\u5165 Helm \u4ed3\u5e93\u5217\u8868\u9875\u9762\u3002

                      3. \u5728\u4ed3\u5e93\u5217\u8868\u9875\u9762\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684 Helm \u4ed3\u5e93\uff0c\u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \u6309\u94ae\uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u70b9\u51fb \u5220\u9664 \u3002

                      4. \u8f93\u5165\u4ed3\u5e93\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\uff0c\u70b9\u51fb \u5220\u9664 \u3002

                      5. \u8fd4\u56de Helm \u4ed3\u5e93\u5217\u8868\uff0c\u5c4f\u5e55\u63d0\u793a\u5220\u9664\u6210\u529f\u3002

                      "},{"location":"admin/kpanda/helm/multi-archi-helm.html","title":"Helm \u5e94\u7528\u591a\u67b6\u6784\u548c\u5347\u7ea7\u5bfc\u5165\u6b65\u9aa4","text":"

                      \u901a\u5e38\u5728\u591a\u67b6\u6784\u96c6\u7fa4\u4e2d\uff0c\u4e5f\u4f1a\u4f7f\u7528\u591a\u67b6\u6784\u7684 Helm \u5305\u6765\u90e8\u7f72\u5e94\u7528\uff0c\u4ee5\u89e3\u51b3\u67b6\u6784\u5dee\u5f02\u5e26\u6765\u7684\u90e8\u7f72\u95ee\u9898\u3002 \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u5c06\u5355\u67b6\u6784 Helm \u5e94\u7528\u878d\u5408\u4e3a\u591a\u67b6\u6784\uff0c\u4ee5\u53ca\u591a\u67b6\u6784\u4e0e\u591a\u67b6\u6784 Helm \u5e94\u7528\u7684\u76f8\u4e92\u878d\u5408\u3002

                      "},{"location":"admin/kpanda/helm/multi-archi-helm.html#_1","title":"\u5bfc\u5165","text":""},{"location":"admin/kpanda/helm/multi-archi-helm.html#_2","title":"\u5355\u67b6\u6784\u5bfc\u5165","text":"

                      \u51c6\u5907\u597d\u5f85\u5bfc\u5165\u7684\u79bb\u7ebf\u5305 addon-offline-full-package-${version}-${arch}.tar.gz \u3002 \u628a\u8def\u5f84\u586b\u5199\u81f3 clusterConfig.yml \u914d\u7f6e\u6587\u4ef6\uff0c\u4f8b\u5982\uff1a

                      addonPackage:\n  path: \"/home/addon-offline-full-package-v0.9.0-amd64.tar.gz\"\n

                      \u7136\u540e\u6267\u884c\u5bfc\u5165\u547d\u4ee4\uff1a

                      ~/dce5-installer cluster-create -c /home/dce5/sample/clusterConfig.yaml -m /home/dce5/sample/manifest.yaml -d -j13\n
                      "},{"location":"admin/kpanda/helm/multi-archi-helm.html#_3","title":"\u591a\u67b6\u6784\u878d\u5408","text":"

                      \u51c6\u5907\u597d\u5f85\u878d\u5408\u7684\u79bb\u7ebf\u5305 addon-offline-full-package-${version}-${arch}.tar.gz\u3002

                      \u4ee5 addon-offline-full-package-v0.9.0-arm64.tar.gz \u4e3a\u4f8b\uff0c\u6267\u884c\u5bfc\u5165\u547d\u4ee4\uff1a

                      ~/dce5-installer import-addon -c /home/dce5/sample/clusterConfig.yaml --addon-path=/home/addon-offline-full-package-v0.9.0-arm64.tar.gz\n
                      "},{"location":"admin/kpanda/helm/multi-archi-helm.html#_4","title":"\u5347\u7ea7","text":""},{"location":"admin/kpanda/helm/multi-archi-helm.html#_5","title":"\u5355\u67b6\u6784\u5347\u7ea7","text":"

                      \u51c6\u5907\u597d\u5f85\u5bfc\u5165\u7684\u79bb\u7ebf\u5305 addon-offline-full-package-${version}-${arch}.tar.gz\u3002

                      \u628a\u8def\u5f84\u586b\u5199\u81f3 clusterConfig.yml \u914d\u7f6e\u6587\u4ef6\uff0c\u4f8b\u5982\uff1a

                      addonPackage:\n  path: \"/home/addon-offline-full-package-v0.11.0-amd64.tar.gz\"\n

                      \u7136\u540e\u6267\u884c\u5bfc\u5165\u547d\u4ee4\uff1a

                      ~/dce5-installer cluster-create -c /home/dce5/sample/clusterConfig.yaml -m /home/dce5/sample/manifest.yaml -d -j13\n
                      "},{"location":"admin/kpanda/helm/multi-archi-helm.html#_6","title":"\u591a\u67b6\u6784\u878d\u5408","text":"

                      \u51c6\u5907\u597d\u5f85\u878d\u5408\u7684\u79bb\u7ebf\u5305 addon-offline-full-package-${version}-${arch}.tar.gz\u3002

                      \u4ee5 addon-offline-full-package-v0.11.0-arm64.tar.gz \u4e3a\u4f8b\uff0c\u6267\u884c\u5bfc\u5165\u547d\u4ee4\uff1a

                      ~/dce5-installer import-addon -c /home/dce5/sample/clusterConfig.yaml --addon-path=/home/addon-offline-full-package-v0.11.0-arm64.tar.gz\n
                      "},{"location":"admin/kpanda/helm/multi-archi-helm.html#_7","title":"\u6ce8\u610f\u4e8b\u9879","text":""},{"location":"admin/kpanda/helm/multi-archi-helm.html#_8","title":"\u78c1\u76d8\u7a7a\u95f4","text":"

                      \u79bb\u7ebf\u5305\u6bd4\u8f83\u5927\uff0c\u4e14\u8fc7\u7a0b\u4e2d\u9700\u8981\u89e3\u538b\u548c load \u955c\u50cf\uff0c\u9700\u8981\u9884\u7559\u5145\u8db3\u7684\u7a7a\u95f4\uff0c\u5426\u5219\u53ef\u80fd\u5728\u8fc7\u7a0b\u4e2d\u62a5 \u201cno space left\u201d \u800c\u4e2d\u65ad\u3002

                      "},{"location":"admin/kpanda/helm/multi-archi-helm.html#_9","title":"\u5931\u8d25\u540e\u91cd\u8bd5","text":"

                      \u5982\u679c\u5728\u591a\u67b6\u6784\u878d\u5408\u6b65\u9aa4\u6267\u884c\u5931\u8d25\uff0c\u91cd\u8bd5\u524d\u9700\u8981\u6e05\u7406\u4e00\u4e0b\u6b8b\u7559\uff1a

                      rm -rf addon-offline-target-package\n
                      "},{"location":"admin/kpanda/helm/multi-archi-helm.html#_10","title":"\u955c\u50cf\u7a7a\u95f4","text":"

                      \u5982\u679c\u878d\u5408\u7684\u79bb\u7ebf\u5305\u4e2d\u5305\u542b\u4e86\u4e0e\u5bfc\u5165\u7684\u79bb\u7ebf\u5305\u4e0d\u4e00\u81f4\u7684\u955c\u50cf\u7a7a\u95f4\uff0c\u53ef\u80fd\u4f1a\u5728\u878d\u5408\u8fc7\u7a0b\u4e2d\u56e0\u4e3a\u955c\u50cf\u7a7a\u95f4\u4e0d\u5b58\u5728\u800c\u62a5\u9519\uff1a

                      \u89e3\u51b3\u529e\u6cd5\uff1a\u53ea\u9700\u8981\u5728\u878d\u5408\u4e4b\u524d\u521b\u5efa\u597d\u8be5\u955c\u50cf\u7a7a\u95f4\u5373\u53ef\uff0c\u4f8b\u5982\u4e0a\u56fe\u62a5\u9519\u53ef\u901a\u8fc7\u521b\u5efa\u955c\u50cf\u7a7a\u95f4 localhost \u63d0\u524d\u907f\u514d\u3002

                      "},{"location":"admin/kpanda/helm/multi-archi-helm.html#_11","title":"\u67b6\u6784\u51b2\u7a81","text":"

                      \u5347\u7ea7\u81f3\u4f4e\u4e8e 0.12.0 \u7248\u672c\u7684 addon \u65f6\uff0c\u7531\u4e8e\u76ee\u6807\u79bb\u7ebf\u5305\u91cc\u7684 charts-syncer \u6ca1\u6709\u68c0\u67e5\u955c\u50cf\u5b58\u5728\u5219\u4e0d\u63a8\u9001\u529f\u80fd\uff0c\u56e0\u6b64\u4f1a\u5728\u5347\u7ea7\u7684\u8fc7\u7a0b\u4e2d\u4f1a\u91cd\u65b0\u628a\u591a\u67b6\u6784\u51b2\u6210\u5355\u67b6\u6784\u3002 \u4f8b\u5982\uff1a\u5728 v0.10 \u7248\u672c\u5c06 addon \u5b9e\u73b0\u4e3a\u591a\u67b6\u6784\uff0c\u6b64\u65f6\u82e5\u5347\u7ea7\u4e3a v0.11 \u7248\u672c\uff0c\u5219\u591a\u67b6\u6784 addon \u4f1a\u88ab\u8986\u76d6\u4e3a\u5355\u67b6\u6784\uff1b\u82e5\u5347\u7ea7\u4e3a 0.12.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u5219\u4ecd\u80fd\u591f\u4fdd\u6301\u591a\u67b6\u6784\u3002

                      "},{"location":"admin/kpanda/helm/upload-helm.html","title":"\u4e0a\u4f20 Helm \u6a21\u677f","text":"

                      \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u4e0a\u4f20 Helm \u6a21\u677f\uff0c\u64cd\u4f5c\u6b65\u9aa4\u89c1\u4e0b\u6587\u3002

                      1. \u5f15\u5165 Helm \u4ed3\u5e93\uff0c\u64cd\u4f5c\u6b65\u9aa4\u53c2\u8003\u5f15\u5165\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93\u3002

                      2. \u4e0a\u4f20 Helm Chart \u5230 Helm \u4ed3\u5e93\u3002

                        \u5ba2\u6237\u7aef\u4e0a\u4f20\u9875\u9762\u4e0a\u4f20

                        Note

                        \u6b64\u65b9\u5f0f\u9002\u7528\u4e8e Harbor\u3001ChartMuseum\u3001JFrog \u7c7b\u578b\u4ed3\u5e93\u3002

                        1. \u767b\u5f55\u4e00\u4e2a\u53ef\u4ee5\u8bbf\u95ee\u5230 Helm \u4ed3\u5e93\u7684\u8282\u70b9\uff0c\u5c06 Helm \u4e8c\u8fdb\u5236\u6587\u4ef6\u4e0a\u4f20\u5230\u8282\u70b9\uff0c\u5e76\u5b89\u88c5 cm-push \u63d2\u4ef6\uff08\u9700\u8981\u8fde\u901a\u5916\u7f51\u5e76\u63d0\u524d\u5b89\u88c5 Git\uff09\u3002

                          \u5b89\u88c5\u63d2\u4ef6\u6d41\u7a0b\u53c2\u8003\u5b89\u88c5 cm-push \u63d2\u4ef6\u3002

                        2. \u63a8\u9001 Helm Chart \u5230 Helm \u4ed3\u5e93\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1b

                          helm cm-push ${charts-dir} ${HELM_REPO_URL} --username ${username} --password ${password}\n

                          \u5b57\u6bb5\u8bf4\u660e\uff1a

                          • charts-dir\uff1aHelm Chart \u7684\u76ee\u5f55\uff0c\u6216\u8005\u662f\u6253\u5305\u597d\u7684 Chart\uff08\u5373 .tgz \u6587\u4ef6\uff09\u3002
                          • HELM_REPO_URL\uff1aHelm \u4ed3\u5e93\u7684 URL\u3002
                          • username/password\uff1a\u6709\u63a8\u9001\u6743\u9650\u7684 Helm \u4ed3\u5e93\u7528\u6237\u540d\u548c\u5bc6\u7801\u3002
                          • \u5982\u679c\u91c7\u7528 https \u8bbf\u95ee\u4e14\u9700\u8981\u8df3\u8fc7\u8bc1\u4e66\u9a8c\u8bc1\uff0c\u53ef\u6dfb\u52a0\u53c2\u6570 --insecure

                        Note

                        \u6b64\u65b9\u5f0f\u4ec5\u9002\u7528\u4e8e Harbor \u7c7b\u578b\u4ed3\u5e93\u3002

                        1. \u767b\u5f55\u7f51\u9875 Harbor \u4ed3\u5e93\uff0c\u8bf7\u786e\u4fdd\u767b\u5f55\u7528\u6237\u6709\u63a8\u9001\u6743\u9650\uff1b

                        2. \u8fdb\u5165\u5230\u5bf9\u5e94\u9879\u76ee\uff0c\u9009\u62e9 Helm Charts \u9875\u7b7e\uff0c\u70b9\u51fb\u9875\u9762 \u4e0a\u4f20 \u6309\u94ae\uff0c\u5b8c\u6210 Helm Chart \u4e0a\u4f20\u3002

                      3. \u540c\u6b65\u8fdc\u7aef\u4ed3\u5e93\u6570\u636e

                        \u624b\u52a8\u540c\u6b65\u81ea\u52a8\u540c\u6b65

                        \u9ed8\u8ba4\u96c6\u7fa4\u672a\u5f00\u542f Helm \u4ed3\u5e93\u81ea\u52a8\u5237\u65b0 \uff0c\u9700\u8981\u6267\u884c\u624b\u52a8\u540c\u6b65\u64cd\u4f5c\uff0c\u5927\u81f4\u6b65\u9aa4\u4e3a\uff1a

                        \u8fdb\u5165 Helm \u5e94\u7528 -> Helm \u4ed3\u5e93 \uff0c\u70b9\u51fb\u4ed3\u5e93\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \u6309\u94ae\uff0c\u9009\u62e9 \u540c\u6b65\u4ed3\u5e93 \uff0c\u5b8c\u6210\u4ed3\u5e93\u6570\u636e\u540c\u6b65\u3002

                        \u5982\u9700\u5f00\u542f Helm \u4ed3\u5e93\u81ea\u52a8\u540c\u6b65\u529f\u80fd\uff0c\u53ef\u8fdb\u5165 \u96c6\u7fa4\u8fd0\u7ef4 -> \u96c6\u7fa4\u8bbe\u7f6e -> \u9ad8\u7ea7\u914d\u7f6e \uff0c\u5f00\u542f Helm \u4ed3\u5e93\u81ea\u52a8\u5237\u65b0\u5f00\u5173\u3002

                      "},{"location":"admin/kpanda/inspect/index.html","title":"\u96c6\u7fa4\u5de1\u68c0","text":"

                      \u96c6\u7fa4\u5de1\u68c0\u53ef\u4ee5\u901a\u8fc7\u81ea\u52a8\u6216\u624b\u52a8\u65b9\u5f0f\uff0c\u5b9a\u671f\u6216\u968f\u65f6\u68c0\u67e5\u96c6\u7fa4\u7684\u6574\u4f53\u5065\u5eb7\u72b6\u6001\uff0c\u8ba9\u7ba1\u7406\u5458\u83b7\u5f97\u4fdd\u969c\u96c6\u7fa4\u5b89\u5168\u7684\u4e3b\u52a8\u6743\u3002 \u57fa\u4e8e\u5408\u7406\u7684\u5de1\u68c0\u8ba1\u5212\uff0c\u8fd9\u79cd\u4e3b\u52a8\u81ea\u53d1\u7684\u96c6\u7fa4\u68c0\u67e5\u53ef\u4ee5\u8ba9\u7ba1\u7406\u5458\u968f\u65f6\u638c\u63e1\u96c6\u7fa4\u72b6\u6001\uff0c\u6446\u8131\u4e4b\u524d\u51fa\u73b0\u6545\u969c\u65f6\u53ea\u80fd\u88ab\u52a8\u6392\u67e5\u95ee\u9898\u7684\u56f0\u5883\uff0c\u505a\u5230\u4e8b\u5148\u76d1\u63a7\u3001\u63d0\u524d\u9632\u8303\u3002

                      \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u63d0\u4f9b\u7684\u96c6\u7fa4\u5de1\u68c0\u529f\u80fd\uff0c\u652f\u6301\u4ece\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5bb9\u5668\u7ec4\uff08Pod\uff09\u4e09\u4e2a\u7ef4\u5ea6\u8fdb\u884c\u81ea\u5b9a\u4e49\u5de1\u68c0\u9879\uff0c\u5de1\u68c0\u7ed3\u675f\u540e\u4f1a\u81ea\u52a8\u751f\u6210\u53ef\u89c6\u5316\u7684\u5de1\u68c0\u62a5\u544a\u3002

                      • \u96c6\u7fa4\u7ef4\u5ea6\uff1a\u68c0\u67e5\u96c6\u7fa4\u4e2d\u7cfb\u7edf\u7ec4\u4ef6\u7684\u8fd0\u884c\u60c5\u51b5\uff0c\u5305\u62ec\u96c6\u7fa4\u72b6\u6001\u3001\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u4ee5\u53ca\u63a7\u5236\u8282\u70b9\u7279\u6709\u7684\u5de1\u68c0\u9879\u7b49\uff0c\u4f8b\u5982 kube-apiserver \u548c etcd \u7684\u72b6\u6001\u3002
                      • \u8282\u70b9\u7ef4\u5ea6\uff1a\u5305\u62ec\u63a7\u5236\u8282\u70b9\u548c\u5de5\u4f5c\u8282\u70b9\u901a\u7528\u7684\u68c0\u67e5\u9879\uff0c\u4f8b\u5982\u8282\u70b9\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u3001\u53e5\u67c4\u6570\u3001PID \u72b6\u6001\u3001\u7f51\u7edc\u72b6\u6001\u3002
                      • \u5bb9\u5668\u7ec4\u7ef4\u5ea6\uff1a\u68c0\u67e5 Pod \u7684 CPU \u548c\u5185\u5b58\u4f7f\u7528\u60c5\u51b5\u3001\u8fd0\u884c\u72b6\u6001\u3001PV \u548c PVC \u7684\u72b6\u6001\u7b49\u3002

                      \u5982\u9700\u4e86\u89e3\u6216\u6267\u884c\u5b89\u5168\u65b9\u9762\u7684\u5de1\u68c0\uff0c\u53ef\u53c2\u8003\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u7684\u5b89\u5168\u626b\u63cf\u7c7b\u578b\u3002

                      "},{"location":"admin/kpanda/inspect/config.html","title":"\u521b\u5efa\u5de1\u68c0\u914d\u7f6e","text":"

                      \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u63d0\u4f9b\u96c6\u7fa4\u5de1\u68c0\u529f\u80fd\uff0c\u652f\u6301\u4ece\u96c6\u7fa4\u7ef4\u5ea6\u3001\u8282\u70b9\u7ef4\u5ea6\u3001\u5bb9\u5668\u7ec4\u7ef4\u5ea6\u8fdb\u884c\u5de1\u68c0\u3002

                      • \u96c6\u7fa4\u7ef4\u5ea6\uff1a\u68c0\u67e5\u96c6\u7fa4\u4e2d\u7cfb\u7edf\u7ec4\u4ef6\u7684\u8fd0\u884c\u60c5\u51b5\uff0c\u5305\u62ec\u96c6\u7fa4\u72b6\u6001\u3001\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\uff0c\u4ee5\u53ca\u63a7\u5236\u8282\u70b9\u7279\u6709\u7684\u5de1\u68c0\u9879\u7b49\uff0c\u4f8b\u5982 kube-apiserver \u548c etcd \u7684\u72b6\u6001\u3002
                      • \u8282\u70b9\u7ef4\u5ea6\uff1a\u5305\u62ec\u63a7\u5236\u8282\u70b9\u548c\u5de5\u4f5c\u8282\u70b9\u901a\u7528\u7684\u68c0\u67e5\u9879\uff0c\u4f8b\u5982\u8282\u70b9\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u3001\u53e5\u67c4\u6570\u3001PID \u72b6\u6001\u3001\u7f51\u7edc\u72b6\u6001\u3002
                      • \u5bb9\u5668\u7ec4\u7ef4\u5ea6\uff1a\u68c0\u67e5 Pod \u7684 CPU \u548c\u5185\u5b58\u4f7f\u7528\u60c5\u51b5\u3001\u8fd0\u884c\u72b6\u6001\u3001PV \u548c PVC \u7684\u72b6\u6001\u7b49\u3002

                      \u4e0b\u9762\u4ecb\u7ecd\u5982\u4f55\u521b\u5efa\u5de1\u68c0\u914d\u7f6e\u3002

                      "},{"location":"admin/kpanda/inspect/config.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4
                      • \u6240\u9009\u96c6\u7fa4\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u4e14\u5df2\u7ecf\u5728\u96c6\u7fa4\u4e2d\u5b89\u88c5\u4e86 insight \u7ec4\u4ef6
                      "},{"location":"admin/kpanda/inspect/config.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u96c6\u7fa4\u5de1\u68c0 \u3002

                      2. \u5728\u9875\u9762\u53f3\u4fa7\u70b9\u51fb \u5de1\u68c0\u914d\u7f6e \u3002

                      3. \u53c2\u8003\u4ee5\u4e0b\u8bf4\u660e\u586b\u5199\u5de1\u68c0\u914d\u7f6e\uff0c\u7136\u540e\u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                        • \u96c6\u7fa4\uff1a\u4e0b\u62c9\u9009\u62e9\u8981\u5bf9\u54ea\u4e9b\u96c6\u7fa4\u8fdb\u884c\u5de1\u68c0\u3002\u5982\u679c\u9009\u62e9\u591a\u4e2a\u96c6\u7fa4\uff0c\u5219\u81ea\u52a8\u751f\u6210\u591a\u4e2a\u5de1\u68c0\u914d\u7f6e\uff08\u4ec5\u5de1\u68c0\u7684\u96c6\u7fa4\u4e0d\u4e00\u81f4\uff0c\u5176\u4ed6\u914d\u7f6e\u90fd\u5b8c\u5168\u4e00\u81f4\uff09
                        • \u5b9a\u65f6\u5de1\u68c0\uff1a\u542f\u7528\u540e\u53ef\u6839\u636e\u4e8b\u5148\u8bbe\u7f6e\u7684\u5de1\u68c0\u9891\u7387\u5b9a\u671f\u81ea\u52a8\u6267\u884c\u96c6\u7fa4\u5de1\u68c0
                        • \u5de1\u68c0\u9891\u7387\uff1a\u8bbe\u7f6e\u81ea\u52a8\u5de1\u68c0\u7684\u5468\u671f\uff0c\u4f8b\u5982\u6bcf\u5468\u4e8c\u4e0a\u5348\u5341\u70b9\u3002\u652f\u6301\u81ea\u5b9a\u4e49 CronExpression\uff0c\u53ef\u53c2\u8003 Cron \u65f6\u95f4\u8868\u8bed\u6cd5
                        • \u5de1\u68c0\u8bb0\u5f55\u4fdd\u7559\u6761\u6570\uff1a\u7d2f\u8ba1\u6700\u591a\u4fdd\u7559\u591a\u5c11\u6761\u5de1\u68c0\u8bb0\u5f55\uff0c\u5305\u62ec\u6240\u6709\u96c6\u7fa4\u7684\u5de1\u68c0\u8bb0\u5f55
                        • \u53c2\u6570\u914d\u7f6e\uff1a\u53c2\u6570\u914d\u7f6e\u5206\u4e3a\u96c6\u7fa4\u7ef4\u5ea6\u3001\u8282\u70b9\u7ef4\u5ea6\u3001\u5bb9\u5668\u7ec4\u7ef4\u5ea6\u4e09\u90e8\u5206\uff0c\u53ef\u4ee5\u6839\u636e\u573a\u666f\u9700\u6c42\u542f\u7528\u6216\u7981\u7528\u67d0\u4e9b\u5de1\u68c0\u9879\u3002

                      \u5de1\u68c0\u914d\u7f6e\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u4f1a\u81ea\u52a8\u663e\u793a\u5728\u5de1\u68c0\u914d\u7f6e\u5217\u8868\u4e2d\u3002\u5728\u914d\u7f6e\u53f3\u4fa7\u70b9\u51fb\u66f4\u591a\u64cd\u4f5c\u6309\u94ae\u53ef\u4ee5\u7acb\u5373\u6267\u884c\u5de1\u68c0\u3001\u4fee\u6539\u5de1\u68c0\u914d\u7f6e\u3001\u5220\u9664\u5de1\u68c0\u914d\u7f6e\u548c\u5de1\u68c0\u8bb0\u5f55\u3002

                      • \u70b9\u51fb \u5de1\u68c0 \u53ef\u4ee5\u6839\u636e\u8be5\u914d\u7f6e\u7acb\u5373\u6267\u884c\u4e00\u6b21\u5de1\u68c0\u3002
                      • \u70b9\u51fb \u5de1\u68c0\u914d\u7f6e \u53ef\u4ee5\u4fee\u6539\u5de1\u68c0\u914d\u7f6e\u3002
                      • \u70b9\u51fb \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u5de1\u68c0\u914d\u7f6e\u548c\u5386\u53f2\u7684\u5de1\u68c0\u8bb0\u5f55

                      Note

                      • \u5de1\u68c0\u914d\u7f6e\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u5982\u679c\u542f\u7528\u4e86 \u5b9a\u65f6\u5de1\u68c0 \u914d\u7f6e\uff0c\u5219\u4f1a\u5728\u6307\u5b9a\u65f6\u95f4\u81ea\u52a8\u6267\u884c\u5de1\u68c0\u3002
                      • \u5982\u672a\u542f\u7528 \u5b9a\u65f6\u5de1\u68c0 \u914d\u7f6e\uff0c\u5219\u9700\u8981\u624b\u52a8\u89e6\u53d1\u5de1\u68c0\u3002
                      "},{"location":"admin/kpanda/inspect/inspect.html","title":"\u6267\u884c\u96c6\u7fa4\u5de1\u68c0","text":"

                      \u5de1\u68c0\u914d\u7f6e\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u5982\u679c\u542f\u7528\u4e86 \u5b9a\u65f6\u5de1\u68c0 \u914d\u7f6e\uff0c\u5219\u4f1a\u5728\u6307\u5b9a\u65f6\u95f4\u81ea\u52a8\u6267\u884c\u5de1\u68c0\u3002\u5982\u672a\u542f\u7528 \u5b9a\u65f6\u5de1\u68c0 \u914d\u7f6e\uff0c\u5219\u9700\u8981\u624b\u52a8\u89e6\u53d1\u5de1\u68c0\u3002

                      \u6b64\u9875\u4ecb\u7ecd\u5982\u4f55\u624b\u52a8\u6267\u884c\u96c6\u7fa4\u5de1\u68c0\u3002

                      "},{"location":"admin/kpanda/inspect/inspect.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4
                      • \u5df2\u521b\u5efa\u5de1\u68c0\u914d\u7f6e
                      • \u6240\u9009\u96c6\u7fa4\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u4e14\u5df2\u7ecf\u5728\u96c6\u7fa4\u4e2d\u5b89\u88c5\u4e86 insight \u7ec4\u4ef6
                      "},{"location":"admin/kpanda/inspect/inspect.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                      \u6267\u884c\u5de1\u68c0\u65f6\uff0c\u652f\u6301\u52fe\u9009\u591a\u4e2a\u96c6\u7fa4\u8fdb\u884c\u6279\u91cf\u5de1\u68c0\uff0c\u6216\u8005\u4ec5\u5bf9\u67d0\u4e00\u4e2a\u96c6\u7fa4\u8fdb\u884c\u5355\u72ec\u5de1\u68c0\u3002

                      \u6279\u91cf\u5de1\u68c0\u5355\u72ec\u5de1\u68c0
                      1. \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684\u4e00\u7ea7\u5bfc\u822a\u680f\u70b9\u51fb \u96c6\u7fa4\u5de1\u68c0 \uff0c\u7136\u540e\u5728\u9875\u9762\u53f3\u4fa7\u70b9\u51fb \u5de1\u68c0 \u3002

                      2. \u52fe\u9009\u9700\u8981\u5de1\u68c0\u7684\u96c6\u7fa4\uff0c\u7136\u540e\u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                        • \u82e5\u9009\u62e9\u591a\u4e2a\u96c6\u7fa4\u8fdb\u884c\u540c\u65f6\u5de1\u68c0\uff0c\u7cfb\u7edf\u5c06\u6839\u636e\u4e0d\u540c\u96c6\u7fa4\u7684\u5de1\u68c0\u914d\u7f6e\u8fdb\u884c\u5de1\u68c0\u3002
                        • \u5982\u672a\u8bbe\u7f6e\u96c6\u7fa4\u5de1\u68c0\u914d\u7f6e\uff0c\u5c06\u4f7f\u7528\u7cfb\u7edf\u9ed8\u8ba4\u914d\u7f6e\u3002

                      1. \u8fdb\u5165\u96c6\u7fa4\u5de1\u68c0\u9875\u9762\u3002
                      2. \u5728\u5bf9\u5e94\u5de1\u68c0\u914d\u7f6e\u7684\u53f3\u4fa7\u70b9\u51fb \u2507 \u66f4\u591a\u64cd\u4f5c\u6309\u94ae\uff0c\u7136\u540e\u5728\u5f39\u51fa\u7684\u83dc\u5355\u4e2d\u9009\u62e9 \u5de1\u68c0 \u5373\u53ef\u3002

                      "},{"location":"admin/kpanda/inspect/report.html","title":"\u67e5\u770b\u5de1\u68c0\u62a5\u544a","text":"

                      \u5de1\u68c0\u6267\u884c\u5b8c\u6210\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u5de1\u68c0\u8bb0\u5f55\u548c\u8be6\u7ec6\u7684\u5de1\u68c0\u62a5\u544a\u3002

                      "},{"location":"admin/kpanda/inspect/report.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u5df2\u7ecf\u521b\u5efa\u4e86\u5de1\u68c0\u914d\u7f6e
                      • \u5df2\u7ecf\u6267\u884c\u8fc7\u81f3\u5c11\u4e00\u6b21\u5de1\u68c0
                      "},{"location":"admin/kpanda/inspect/report.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u8fdb\u5165\u96c6\u7fa4\u5de1\u68c0\u9875\u9762\uff0c\u70b9\u51fb\u76ee\u6807\u5de1\u68c0\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                      2. \u70b9\u51fb\u60f3\u8981\u67e5\u770b\u7684\u5de1\u68c0\u8bb0\u5f55\u540d\u79f0\u3002

                        • \u6bcf\u6267\u884c\u4e00\u6b21\u5de1\u68c0\uff0c\u5c31\u4f1a\u751f\u6210\u4e00\u6761\u5de1\u68c0\u8bb0\u5f55\u3002
                        • \u5f53\u5de1\u68c0\u8bb0\u5f55\u8d85\u8fc7\u5de1\u68c0\u914d\u7f6e\u4e2d\u8bbe\u7f6e\u7684\u6700\u5927\u4fdd\u7559\u6761\u6570\u65f6\uff0c\u4ece\u6267\u884c\u65f6\u95f4\u6700\u65e9\u7684\u8bb0\u5f55\u5f00\u59cb\u5220\u9664\u3002

                      3. \u67e5\u770b\u5de1\u68c0\u7684\u8be6\u7ec6\u4fe1\u606f\uff0c\u6839\u636e\u5de1\u68c0\u914d\u7f6e\u53ef\u80fd\u5305\u62ec\u96c6\u7fa4\u8d44\u6e90\u6982\u89c8\u3001\u7cfb\u7edf\u7ec4\u4ef6\u7684\u8fd0\u884c\u60c5\u51b5\u7b49\u3002

                        \u5728\u9875\u9762\u53f3\u4e0a\u89d2\u53ef\u4ee5\u4e0b\u8f7d\u5de1\u68c0\u62a5\u544a\u6216\u5220\u9664\u8be5\u9879\u5de1\u68c0\u62a5\u544a\u3002

                      "},{"location":"admin/kpanda/namespaces/createns.html","title":"\u547d\u540d\u7a7a\u95f4","text":"

                      \u547d\u540d\u7a7a\u95f4\u662f Kubernetes \u4e2d\u7528\u6765\u8fdb\u884c\u8d44\u6e90\u9694\u79bb\u7684\u4e00\u79cd\u62bd\u8c61\u3002\u4e00\u4e2a\u96c6\u7fa4\u4e0b\u53ef\u4ee5\u5305\u542b\u591a\u4e2a\u4e0d\u91cd\u540d\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u6bcf\u4e2a\u547d\u540d\u7a7a\u95f4\u4e2d\u7684\u8d44\u6e90\u76f8\u4e92\u9694\u79bb\u3002\u6709\u5173\u547d\u540d\u7a7a\u95f4\u7684\u8be6\u7ec6\u4ecb\u7ecd\uff0c\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u3002

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u547d\u540d\u7a7a\u95f4\u7684\u76f8\u5173\u64cd\u4f5c\u3002

                      "},{"location":"admin/kpanda/namespaces/createns.html#_2","title":"\u521b\u5efa\u547d\u540d\u7a7a\u95f4","text":"

                      \u652f\u6301\u901a\u8fc7\u8868\u5355\u8f7b\u677e\u521b\u5efa\u547d\u540d\u7a7a\u95f4\uff0c\u4e5f\u652f\u6301\u901a\u8fc7\u7f16\u5199\u6216\u5bfc\u5165 YAML \u6587\u4ef6\u5feb\u901f\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u3002

                      Note

                      • \u5728\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u4e4b\u524d\uff0c\u9700\u8981\u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\u3002
                      • \u96c6\u7fa4\u521d\u59cb\u5316\u540e\u901a\u5e38\u4f1a\u81ea\u52a8\u751f\u6210\u9ed8\u8ba4\u7684\u547d\u540d\u7a7a\u95f4 default \u3002\u4f46\u5bf9\u4e8e\u751f\u4ea7\u96c6\u7fa4\u800c\u8a00\uff0c\u4e3a\u4fbf\u4e8e\u7ba1\u7406\uff0c\u5efa\u8bae\u521b\u5efa\u5176\u4ed6\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u800c\u975e\u76f4\u63a5\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002
                      "},{"location":"admin/kpanda/namespaces/createns.html#_3","title":"\u8868\u5355\u521b\u5efa","text":"
                      1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u547d\u540d\u7a7a\u95f4 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae\u3002

                      3. \u586b\u5199\u547d\u540d\u7a7a\u95f4\u7684\u540d\u79f0\uff0c\u914d\u7f6e\u5de5\u4f5c\u7a7a\u95f4\u548c\u6807\u7b7e\uff08\u53ef\u9009\u8bbe\u7f6e\uff09\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                        Info

                        • \u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4\u4e4b\u540e\uff0c\u8be5\u547d\u540d\u7a7a\u95f4\u7684\u8d44\u6e90\u5c31\u4f1a\u5171\u4eab\u7ed9\u6240\u7ed1\u5b9a\u7684\u5de5\u4f5c\u7a7a\u95f4\u3002\u6709\u5173\u5de5\u4f5c\u7a7a\u95f4\u7684\u8be6\u7ec6\u8bf4\u660e\uff0c\u53ef\u53c2\u8003\u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7\u3002

                        • \u547d\u540d\u7a7a\u95f4\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u4ecd\u7136\u53ef\u4ee5\u7ed1\u5b9a/\u89e3\u7ed1\u5de5\u4f5c\u7a7a\u95f4\u3002

                      4. \u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3002\u5728\u547d\u540d\u7a7a\u95f4\u5217\u8868\u53f3\u4fa7\uff0c\u70b9\u51fb \u2507 \uff0c\u53ef\u4ee5\u4ece\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9\u67e5\u770b YAML\u3001\u4fee\u6539\u6807\u7b7e\u3001\u7ed1\u5b9a/\u89e3\u7ed1\u5de5\u4f5c\u7a7a\u95f4\u3001\u914d\u989d\u7ba1\u7406\u3001\u5220\u9664\u7b49\u66f4\u591a\u64cd\u4f5c\u3002

                      "},{"location":"admin/kpanda/namespaces/createns.html#yaml","title":"YAML \u521b\u5efa","text":"
                      1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u547d\u540d\u7a7a\u95f4 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4fa7\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                      3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u5185\u5bb9\uff0c\u6216\u8005\u4ece\u672c\u5730\u76f4\u63a5\u5bfc\u5165\u5df2\u6709\u7684 YAML \u6587\u4ef6\u3002

                        \u8f93\u5165 YAML \u5185\u5bb9\u540e\uff0c\u70b9\u51fb \u4e0b\u8f7d \u53ef\u4ee5\u5c06\u8be5 YAML \u6587\u4ef6\u4fdd\u5b58\u5230\u672c\u5730\u3002

                      4. \u6700\u540e\u5728\u5f39\u6846\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                      "},{"location":"admin/kpanda/namespaces/exclusive.html","title":"\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9","text":"

                      \u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\u6307\u5728 kubernetes \u96c6\u7fa4\u4e2d\uff0c\u901a\u8fc7\u6c61\u70b9\u548c\u6c61\u70b9\u5bb9\u5fcd\u7684\u65b9\u5f0f\u5b9e\u73b0\u7279\u5b9a\u547d\u540d\u7a7a\u95f4\u5bf9\u4e00\u4e2a\u6216\u591a\u4e2a\u8282\u70b9 CPU\u3001\u5185\u5b58\u7b49\u8d44\u6e90\u7684\u72ec\u4eab\u3002\u4e3a\u7279\u5b9a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\u72ec\u4eab\u8282\u70b9\u540e\uff0c\u5176\u5b83\u975e\u6b64\u547d\u540d\u7a7a\u95f4\u7684\u5e94\u7528\u548c\u670d\u52a1\u5747\u4e0d\u80fd\u8fd0\u884c\u5728\u88ab\u72ec\u4eab\u7684\u8282\u70b9\u4e0a\u3002\u4f7f\u7528\u72ec\u4eab\u8282\u70b9\u53ef\u4ee5\u8ba9\u91cd\u8981\u5e94\u7528\u72ec\u4eab\u4e00\u90e8\u5206\u8ba1\u7b97\u8d44\u6e90\uff0c\u4ece\u800c\u548c\u5176\u4ed6\u5e94\u7528\u5b9e\u73b0\u7269\u7406\u9694\u79bb\u3002

                      Note

                      \u5728\u8282\u70b9\u88ab\u8bbe\u7f6e\u4e3a\u72ec\u4eab\u8282\u70b9\u524d\u5df2\u7ecf\u8fd0\u884c\u5728\u6b64\u8282\u70b9\u4e0a\u7684\u5e94\u7528\u548c\u670d\u52a1\u5c06\u4e0d\u4f1a\u53d7\u5f71\u54cd\uff0c\u4f9d\u7136\u4f1a\u6b63\u5e38\u8fd0\u884c\u5728\u8be5\u8282\u70b9\u4e0a\uff0c\u4ec5\u5f53\u8fd9\u4e9b Pod \u88ab\u5220\u9664\u6216\u91cd\u5efa\u65f6\uff0c\u624d\u4f1a\u8c03\u5ea6\u5230\u5176\u5b83\u975e\u72ec\u4eab\u8282\u70b9\u4e0a\u3002

                      "},{"location":"admin/kpanda/namespaces/exclusive.html#_2","title":"\u51c6\u5907\u5de5\u4f5c","text":"

                      \u68c0\u67e5\u5f53\u524d\u96c6\u7fa4\u7684 kube-apiserver \u662f\u5426\u542f\u7528\u4e86 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668\u3002

                      \u4f7f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\u529f\u80fd\u9700\u8981\u7528\u6237\u542f\u7528 kube-apiserver \u4e0a\u7684 PodNodeSelector \u548c PodTolerationRestriction \u4e24\u4e2a\u7279\u6027\u51c6\u5165\u63a7\u5236\u5668\uff08Admission Controllers\uff09\uff0c\u5173\u4e8e\u51c6\u5165\u63a7\u5236\u5668\u66f4\u591a\u8bf4\u660e\u8bf7\u53c2\u9605 kubernetes Admission Controllers Reference\u3002

                      \u60a8\u53ef\u4ee5\u524d\u5f80\u5f53\u524d\u96c6\u7fa4\u4e0b\u4efb\u610f\u4e00\u4e2a Master \u8282\u70b9\u4e0a\u68c0\u67e5 kube-apiserver.yaml \u6587\u4ef6\u5185\u662f\u5426\u542f\u7528\u4e86\u8fd9\u4e24\u4e2a\u7279\u6027\uff0c\u4e5f\u53ef\u4ee5\u5728 Master \u8282\u70b9\u4e0a\u6267\u884c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u8fdb\u884c\u5feb\u901f\u68c0\u67e5\uff1a

                      ```bash\n[root@g-master1 ~]# cat /etc/kubernetes/manifests/kube-apiserver.yaml | grep  enable-admission-plugins\n\n# \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a\n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction\n```\n
                      "},{"location":"admin/kpanda/namespaces/exclusive.html#_3","title":"\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9","text":"

                      \u7531\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u8fd0\u884c\u7740 kpanda\u3001ghippo\u3001insight \u7b49\u5e73\u53f0\u57fa\u7840\u7ec4\u4ef6\uff0c\u5728 Global \u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\u5c06\u53ef\u80fd\u5bfc\u81f4\u5f53\u7cfb\u7edf\u7ec4\u4ef6\u91cd\u542f\u540e\uff0c\u7cfb\u7edf\u7ec4\u4ef6\u65e0\u6cd5\u8c03\u5ea6\u5230\u88ab\u72ec\u4eab\u7684\u8282\u70b9\u4e0a\uff0c\u5f71\u54cd\u7cfb\u7edf\u7684\u6574\u4f53\u9ad8\u53ef\u7528\u80fd\u529b\u3002\u56e0\u6b64\uff0c\u901a\u5e38\u60c5\u51b5\u4e0b\uff0c\u6211\u4eec\u4e0d\u63a8\u8350\u7528\u6237\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\u7279\u6027\u3002

                      \u5982\u679c\u60a8\u786e\u5b9e\u9700\u8981\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\uff0c\u8bf7\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u8fdb\u884c\u5f00\u542f\uff1a

                      1. \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684 kube-apiserver \u542f\u7528\u4e86 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668

                        Note

                        \u5982\u679c\u96c6\u7fa4\u5df2\u542f\u7528\u4e86\u4e0a\u8ff0\u7684\u4e24\u4e2a\u51c6\u5165\u63a7\u5236\u5668\uff0c\u8bf7\u8df3\u8fc7\u6b64\u6b65\uff0c\u76f4\u63a5\u524d\u5f80\u914d\u7f6e\u7cfb\u7edf\u7ec4\u4ef6\u5bb9\u5fcd\u3002

                        \u524d\u5f80\u5f53\u524d\u96c6\u7fa4\u4e0b\u4efb\u610f\u4e00\u4e2a Master \u8282\u70b9\u4e0a\u4fee\u6539 kube-apiserver.yaml \u914d\u7f6e\u6587\u4ef6\uff0c\u4e5f\u53ef\u4ee5\u5728 Master \u8282\u70b9\u4e0a\u6267\u884c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\uff1a

                        [root@g-master1 ~]# vi /etc/kubernetes/manifests/kube-apiserver.yaml\n\n# \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a\napiVersion: v1\nkind: Pod\nmetadata:\n    ......\nspec:\ncontainers:\n- command:\n    - kube-apiserver\n    ......\n    - --default-not-ready-toleration-seconds=300\n    - --default-unreachable-toleration-seconds=300\n    - --enable-admission-plugins=NodeRestriction   #\u542f\u7528\u7684\u51c6\u5165\u63a7\u5236\u5668\u5217\u8868\n    - --enable-aggregator-routing=False\n    - --enable-bootstrap-token-auth=true\n    - --endpoint-reconciler-type=lease\n    - --etcd-cafile=/etc/kubernetes/ssl/etcd/ca.crt\n    ......\n

                        \u627e\u5230 --enable-admission-plugins \u53c2\u6570\uff0c\u52a0\u5165\uff08\u4ee5\u82f1\u6587\u9017\u53f7\u5206\u9694\u7684\uff09 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668\u3002\u53c2\u8003\u5982\u4e0b\uff1a

                        # \u52a0\u5165 __ ,PodNodeSelector,PodTolerationRestriction__ \n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction \n
                      2. \u4e3a\u5e73\u53f0\u7ec4\u4ef6\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u5bb9\u5fcd\u6ce8\u89e3

                        \u5b8c\u6210\u51c6\u5165\u63a7\u5236\u5668\u7684\u5f00\u542f\u540e\uff0c\u60a8\u9700\u8981\u4e3a\u5e73\u53f0\u7ec4\u4ef6\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u5bb9\u5fcd\u6ce8\u89e3\uff0c\u4ee5\u4fdd\u8bc1\u5e73\u53f0\u7ec4\u4ef6\u7684\u9ad8\u53ef\u7528\u3002

                        \u76ee\u524d\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u7cfb\u7edf\u7ec4\u4ef6\u547d\u540d\u7a7a\u95f4\u5982\u4e0b\u8868\uff1a

                        \u547d\u540d\u7a7a\u95f4 \u6240\u5305\u542b\u7684\u7cfb\u7edf\u7ec4\u4ef6 kpanda-system kpanda hwameiStor-system hwameiStor istio-system istio metallb-system metallb cert-manager-system cert-manager contour-system contour kubean-system kubean ghippo-system ghippo kcoral-system kcoral kcollie-system kcollie insight-system insight\u3001insight-agent: ipavo-system ipavo kairship-system kairship karmada-system karmada amamba-system amamba\u3001jenkins skoala-system skoala mspider-system mspider mcamel-system mcamel-rabbitmq\u3001mcamel-elasticsearch\u3001mcamel-mysql\u3001mcamel-redis\u3001mcamel-kafka\u3001mcamel-minio\u3001mcamel-postgresql spidernet-system spidernet kangaroo-system kangaroo gmagpie-system gmagpie dowl-system dowl

                        \u68c0\u67e5\u5f53\u524d\u96c6\u7fa4\u4e2d\u6240\u6709\u547d\u540d\u7a7a\u95f4\u662f\u5426\u5b58\u5728\u4e0a\u8ff0\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5206\u522b\u4e3a\u6bcf\u4e2a\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u6ce8\u89e3\uff1a scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]' \u3002

                        kubectl annotate ns <namespace-name> scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \n\"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\n
                        \u8bf7\u786e\u4fdd\u5c06 <namespace-name> \u66ff\u6362\u4e3a\u8981\u6dfb\u52a0\u6ce8\u89e3\u7684\u5e73\u53f0\u547d\u540d\u7a7a\u95f4\u540d\u79f0\u3002

                      3. \u4f7f\u7528\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9

                        \u5f53\u60a8\u786e\u8ba4\u96c6\u7fa4 API \u670d\u52a1\u5668\u4e0a\u7684 PodNodeSelector \u548c PodTolerationRestriction \u4e24\u4e2a\u7279\u6027\u51c6\u5165\u63a7\u5236\u5668\u5df2\u7ecf\u5f00\u542f\u540e\uff0c\u8bf7\u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684 UI \u7ba1\u7406\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9\u4e86\u3002

                        1. \u5728\u96c6\u7fa4\u5217\u8868\u9875\u9762\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u547d\u540d\u7a7a\u95f4 \u3002

                        2. \u70b9\u51fb\u547d\u540d\u7a7a\u95f4\u540d\u79f0\uff0c\u7136\u540e\u70b9\u51fb \u72ec\u4eab\u8282\u70b9 \u9875\u7b7e\uff0c\u5728\u4e0b\u65b9\u53f3\u4fa7\u70b9\u51fb \u6dfb\u52a0\u8282\u70b9 \u3002

                        3. \u5728\u9875\u9762\u5de6\u4fa7\u9009\u62e9\u8ba9\u8be5\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u54ea\u4e9b\u8282\u70b9\uff0c\u5728\u53f3\u4fa7\u53ef\u4ee5\u6e05\u7a7a\u6216\u5220\u9664\u67d0\u4e2a\u5df2\u9009\u8282\u70b9\uff0c\u6700\u540e\u5728\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                        4. \u53ef\u4ee5\u5728\u5217\u8868\u4e2d\u67e5\u770b\u6b64\u547d\u540d\u7a7a\u95f4\u7684\u5df2\u6709\u7684\u72ec\u4eab\u8282\u70b9\uff0c\u5728\u8282\u70b9\u53f3\u4fa7\u53ef\u4ee5\u9009\u62e9 \u53d6\u6d88\u72ec\u4eab \u3002

                          \u53d6\u6d88\u72ec\u4eab\u4e4b\u540e\uff0c\u5176\u4ed6\u547d\u540d\u7a7a\u95f4\u4e0b\u7684 Pod \u4e5f\u53ef\u4ee5\u88ab\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u4e0a\u3002

                      "},{"location":"admin/kpanda/namespaces/exclusive.html#_4","title":"\u5728 \u975e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9","text":"

                      \u5728 \u975e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\uff0c\u8bf7\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u8fdb\u884c\u5f00\u542f\uff1a

                      1. \u4e3a\u5f53\u524d\u96c6\u7fa4\u7684 kube-apiserver \u542f\u7528\u4e86 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668

                        Note

                        \u5982\u679c\u96c6\u7fa4\u5df2\u542f\u7528\u4e86\u4e0a\u8ff0\u7684\u4e24\u4e2a\u51c6\u5165\u63a7\u5236\u5668\uff0c\u8bf7\u8df3\u8fc7\u6b64\u6b65\uff0c\u76f4\u63a5\u524d\u5f80\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9

                        \u524d\u5f80\u5f53\u524d\u96c6\u7fa4\u4e0b\u4efb\u610f\u4e00\u4e2a Master \u8282\u70b9\u4e0a\u4fee\u6539 kube-apiserver.yaml \u914d\u7f6e\u6587\u4ef6\uff0c\u4e5f\u53ef\u4ee5\u5728 Master \u8282\u70b9\u4e0a\u6267\u884c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\uff1a

                        [root@g-master1 ~]# vi /etc/kubernetes/manifests/kube-apiserver.yaml\n\n# \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a\napiVersion: v1\nkind: Pod\nmetadata:\n    ......\nspec:\ncontainers:\n- command:\n    - kube-apiserver\n    ......\n    - --default-not-ready-toleration-seconds=300\n    - --default-unreachable-toleration-seconds=300\n    - --enable-admission-plugins=NodeRestriction   #\u542f\u7528\u7684\u51c6\u5165\u63a7\u5236\u5668\u5217\u8868\n    - --enable-aggregator-routing=False\n    - --enable-bootstrap-token-auth=true\n    - --endpoint-reconciler-type=lease\n    - --etcd-cafile=/etc/kubernetes/ssl/etcd/ca.crt\n    ......\n

                        \u627e\u5230 --enable-admission-plugins \u53c2\u6570\uff0c\u52a0\u5165\uff08\u4ee5\u82f1\u6587\u9017\u53f7\u5206\u9694\u7684\uff09 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668\u3002\u53c2\u8003\u5982\u4e0b\uff1a

                        # \u52a0\u5165 __ ,PodNodeSelector,PodTolerationRestriction__ \n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction \n
                      2. \u4f7f\u7528\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9

                        \u5f53\u60a8\u786e\u8ba4\u96c6\u7fa4 API \u670d\u52a1\u5668\u4e0a\u7684 PodNodeSelector \u548c PodTolerationRestriction \u4e24\u4e2a\u7279\u6027\u51c6\u5165\u63a7\u5236\u5668\u5df2\u7ecf\u5f00\u542f\u540e\uff0c\u8bf7\u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684 UI \u7ba1\u7406\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9\u4e86\u3002

                        1. \u5728\u96c6\u7fa4\u5217\u8868\u9875\u9762\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u547d\u540d\u7a7a\u95f4 \u3002

                        2. \u70b9\u51fb\u547d\u540d\u7a7a\u95f4\u540d\u79f0\uff0c\u7136\u540e\u70b9\u51fb \u72ec\u4eab\u8282\u70b9 \u9875\u7b7e\uff0c\u5728\u4e0b\u65b9\u53f3\u4fa7\u70b9\u51fb \u6dfb\u52a0\u8282\u70b9 \u3002

                        3. \u5728\u9875\u9762\u5de6\u4fa7\u9009\u62e9\u8ba9\u8be5\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u54ea\u4e9b\u8282\u70b9\uff0c\u5728\u53f3\u4fa7\u53ef\u4ee5\u6e05\u7a7a\u6216\u5220\u9664\u67d0\u4e2a\u5df2\u9009\u8282\u70b9\uff0c\u6700\u540e\u5728\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                        4. \u53ef\u4ee5\u5728\u5217\u8868\u4e2d\u67e5\u770b\u6b64\u547d\u540d\u7a7a\u95f4\u7684\u5df2\u6709\u7684\u72ec\u4eab\u8282\u70b9\uff0c\u5728\u8282\u70b9\u53f3\u4fa7\u53ef\u4ee5\u9009\u62e9 \u53d6\u6d88\u72ec\u4eab \u3002

                          \u53d6\u6d88\u72ec\u4eab\u4e4b\u540e\uff0c\u5176\u4ed6\u547d\u540d\u7a7a\u95f4\u4e0b\u7684 Pod \u4e5f\u53ef\u4ee5\u88ab\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u4e0a\u3002

                      3. \u4e3a\u9700\u8981\u9ad8\u53ef\u7528\u7684\u7ec4\u4ef6\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u5bb9\u5fcd\u6ce8\u89e3\uff08\u53ef\u9009\uff09

                        \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u9700\u8981\u9ad8\u53ef\u7528\u7684\u7ec4\u4ef6\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u6ce8\u89e3\uff1ascheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\u3002

                        kubectl annotate ns <namespace-name> scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \n\"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\n

                        \u8bf7\u786e\u4fdd\u5c06 <namespace-name> \u66ff\u6362\u4e3a\u8981\u6dfb\u52a0\u6ce8\u89e3\u7684\u5e73\u53f0\u547d\u540d\u7a7a\u95f4\u540d\u79f0\u3002

                      "},{"location":"admin/kpanda/namespaces/podsecurity.html","title":"\u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565","text":"

                      \u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565\u6307\u5728 kubernetes \u96c6\u7fa4\u4e2d\uff0c\u901a\u8fc7\u4e3a\u6307\u5b9a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\u4e0d\u540c\u7684\u7b49\u7ea7\u548c\u6a21\u5f0f\uff0c\u5b9e\u73b0\u5728\u5b89\u5168\u7684\u5404\u4e2a\u65b9\u9762\u63a7\u5236 Pod \u7684\u884c\u4e3a\uff0c\u53ea\u6709\u6ee1\u8db3\u4e00\u5b9a\u7684\u6761\u4ef6\u7684 Pod \u624d\u4f1a\u88ab\u7cfb\u7edf\u63a5\u53d7\u3002\u5b83\u8bbe\u7f6e\u4e09\u4e2a\u7b49\u7ea7\u548c\u4e09\u79cd\u6a21\u5f0f\uff0c\u7528\u6237\u53ef\u4ee5\u6839\u636e\u81ea\u5df1\u7684\u9700\u6c42\u9009\u62e9\u66f4\u52a0\u5408\u9002\u7684\u65b9\u6848\u6765\u8bbe\u7f6e\u9650\u5236\u7b56\u7565\u3002

                      Note

                      \u4e00\u6761\u5b89\u5168\u6a21\u5f0f\u4ec5\u80fd\u914d\u7f6e\u4e00\u6761\u5b89\u5168\u7b56\u7565\u3002\u540c\u65f6\u8bf7\u8c28\u614e\u4e3a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e enforce \u7684\u5b89\u5168\u6a21\u5f0f\uff0c\u8fdd\u53cd\u540e\u5c06\u4f1a\u5bfc\u81f4 Pod \u65e0\u6cd5\u521b\u5efa\u3002

                      \u672c\u8282\u5c06\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565\u3002

                      "},{"location":"admin/kpanda/namespaces/podsecurity.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u96c6\u7fa4\u7684\u7248\u672c\u9700\u8981\u5728 v1.22 \u4ee5\u4e0a\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                      • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Admin \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                      "},{"location":"admin/kpanda/namespaces/podsecurity.html#_3","title":"\u4e3a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565","text":"
                      1. \u9009\u62e9\u9700\u8981\u914d\u7f6e\u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u8fdb\u5165\u8be6\u60c5\u9875\u3002\u5728 \u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565 \u9875\u9762\u70b9\u51fb \u914d\u7f6e\u7b56\u7565 \uff0c\u8fdb\u5165\u914d\u7f6e\u9875\u3002

                      2. \u5728\u914d\u7f6e\u9875\u70b9\u51fb \u6dfb\u52a0\u7b56\u7565 \uff0c\u5219\u4f1a\u51fa\u73b0\u4e00\u6761\u7b56\u7565\uff0c\u5305\u62ec\u5b89\u5168\u7ea7\u522b\u548c\u5b89\u5168\u6a21\u5f0f\uff0c\u4ee5\u4e0b\u662f\u5bf9\u5b89\u5168\u7ea7\u522b\u548c\u5b89\u5168\u7b56\u7565\u7684\u8be6\u7ec6\u4ecb\u7ecd\u3002

                        \u5b89\u5168\u7ea7\u522b \u63cf\u8ff0 Privileged \u4e0d\u53d7\u9650\u5236\u7684\u7b56\u7565\uff0c\u63d0\u4f9b\u6700\u5927\u53ef\u80fd\u8303\u56f4\u7684\u6743\u9650\u8bb8\u53ef\u3002\u6b64\u7b56\u7565\u5141\u8bb8\u5df2\u77e5\u7684\u7279\u6743\u63d0\u5347\u3002 Baseline \u9650\u5236\u6027\u6700\u5f31\u7684\u7b56\u7565\uff0c\u7981\u6b62\u5df2\u77e5\u7684\u7b56\u7565\u63d0\u5347\u3002\u5141\u8bb8\u4f7f\u7528\u9ed8\u8ba4\u7684\uff08\u89c4\u5b9a\u6700\u5c11\uff09Pod \u914d\u7f6e\u3002 Restricted \u9650\u5236\u6027\u975e\u5e38\u5f3a\u7684\u7b56\u7565\uff0c\u9075\u5faa\u5f53\u524d\u7684\u4fdd\u62a4 Pod \u7684\u6700\u4f73\u5b9e\u8df5\u3002 \u5b89\u5168\u6a21\u5f0f \u63cf\u8ff0 Audit \u8fdd\u53cd\u6307\u5b9a\u7b56\u7565\u4f1a\u5728\u5ba1\u8ba1\u65e5\u5fd7\u4e2d\u6dfb\u52a0\u65b0\u7684\u5ba1\u8ba1\u4e8b\u4ef6\uff0cPod \u53ef\u4ee5\u88ab\u521b\u5efa\u3002 Warn \u8fdd\u53cd\u6307\u5b9a\u7b56\u7565\u4f1a\u8fd4\u56de\u7528\u6237\u53ef\u89c1\u7684\u544a\u8b66\u4fe1\u606f\uff0cPod \u53ef\u4ee5\u88ab\u521b\u5efa\u3002 Enforce \u8fdd\u53cd\u6307\u5b9a\u7b56\u7565\u4f1a\u5bfc\u81f4 Pod \u65e0\u6cd5\u521b\u5efa\u3002

                      3. \u4e0d\u540c\u7684\u5b89\u5168\u7ea7\u522b\u5bf9\u5e94\u4e0d\u540c\u7684\u68c0\u67e5\u9879\uff0c\u82e5\u60a8\u4e0d\u77e5\u9053\u8be5\u5982\u4f55\u4e3a\u60a8\u7684\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\uff0c\u53ef\u4ee5\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u7b56\u7565\u914d\u7f6e\u9879\u8bf4\u660e \u67e5\u770b\u8be6\u7ec6\u4fe1\u606f\u3002

                      4. \u70b9\u51fb\u786e\u5b9a\uff0c\u82e5\u521b\u5efa\u6210\u529f\uff0c\u5219\u9875\u9762\u4e0a\u5c06\u51fa\u73b0\u60a8\u914d\u7f6e\u7684\u5b89\u5168\u7b56\u7565\u3002

                      5. \u70b9\u51fb \u2507 \u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u8005\u5220\u9664\u60a8\u914d\u7f6e\u7684\u5b89\u5168\u7b56\u7565\u3002

                      "},{"location":"admin/kpanda/network/create-ingress.html","title":"\u521b\u5efa\u8def\u7531\uff08Ingress\uff09","text":"

                      \u5728 Kubernetes \u96c6\u7fa4\u4e2d\uff0cIngress \u516c\u5f00\u4ece\u96c6\u7fa4\u5916\u90e8\u5230\u96c6\u7fa4\u5185\u670d\u52a1\u7684 HTTP \u548c HTTPS \u8def\u7531\u3002 \u6d41\u91cf\u8def\u7531\u7531 Ingress \u8d44\u6e90\u4e0a\u5b9a\u4e49\u7684\u89c4\u5219\u63a7\u5236\u3002\u4e0b\u9762\u662f\u4e00\u4e2a\u5c06\u6240\u6709\u6d41\u91cf\u90fd\u53d1\u9001\u5230\u540c\u4e00 Service \u7684\u7b80\u5355 Ingress \u793a\u4f8b\uff1a

                      Ingress \u662f\u5bf9\u96c6\u7fa4\u4e2d\u670d\u52a1\u7684\u5916\u90e8\u8bbf\u95ee\u8fdb\u884c\u7ba1\u7406\u7684 API \u5bf9\u8c61\uff0c\u5178\u578b\u7684\u8bbf\u95ee\u65b9\u5f0f\u662f HTTP\u3002Ingress \u53ef\u4ee5\u63d0\u4f9b\u8d1f\u8f7d\u5747\u8861\u3001SSL \u7ec8\u7ed3\u548c\u57fa\u4e8e\u540d\u79f0\u7684\u865a\u62df\u6258\u7ba1\u3002

                      "},{"location":"admin/kpanda/network/create-ingress.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                      • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a NS Editor \u89d2\u8272 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002
                      • \u5df2\u7ecf\u5b8c\u6210 Ingress \u5b9e\u4f8b\u7684\u521b\u5efa\uff0c\u5df2\u90e8\u7f72\u5e94\u7528\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5e76\u4e14\u5df2\u521b\u5efa\u5bf9\u5e94 Service
                      • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002
                      "},{"location":"admin/kpanda/network/create-ingress.html#_2","title":"\u521b\u5efa\u8def\u7531","text":"
                      1. \u4ee5 NS Editor \u7528\u6237\u6210\u529f\u767b\u5f55\u540e\uff0c\u70b9\u51fb\u5de6\u4e0a\u89d2\u7684 \u96c6\u7fa4\u5217\u8868 \u8fdb\u5165 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u3002\u5728\u96c6\u7fa4\u5217\u8868\u4e2d\uff0c\u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u8def\u7531 \u8fdb\u5165\u670d\u52a1\u5217\u8868\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u521b\u5efa\u8def\u7531 \u6309\u94ae\u3002

                        Note

                        \u4e5f\u53ef\u4ee5\u901a\u8fc7 YAML \u521b\u5efa \u4e00\u4e2a\u8def\u7531\u3002

                      3. \u6253\u5f00 \u521b\u5efa\u8def\u7531 \u9875\u9762\uff0c\u8fdb\u884c\u914d\u7f6e\u3002\u53ef\u9009\u62e9\u4e24\u79cd\u534f\u8bae\u7c7b\u578b\uff0c\u53c2\u8003\u4ee5\u4e0b\u4e24\u4e2a\u53c2\u6570\u8868\u8fdb\u884c\u914d\u7f6e\u3002

                      "},{"location":"admin/kpanda/network/create-ingress.html#http","title":"\u521b\u5efa HTTP \u534f\u8bae\u8def\u7531","text":"

                      \u8f93\u5165\u5982\u4e0b\u53c2\u6570\uff1a

                      • \u8def\u7531\u540d\u79f0 \uff1a\u5fc5\u586b\uff0c\u8f93\u5165\u65b0\u5efa\u8def\u7531\u7684\u540d\u79f0\u3002
                      • \u547d\u540d\u7a7a\u95f4 \uff1a\u5fc5\u586b\uff0c\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002
                      • \u8bbe\u7f6e\u8def\u7531\u89c4\u5219 \uff1a
                        • \u57df\u540d \uff1a\u5fc5\u586b\uff0c\u4f7f\u7528\u57df\u540d\u5bf9\u5916\u63d0\u4f9b\u8bbf\u95ee\u670d\u52a1\u3002\u9ed8\u8ba4\u4e3a\u96c6\u7fa4\u7684\u57df\u540d\u3002
                        • \u534f\u8bae \uff1a\u5fc5\u586b\uff0c\u6307\u6388\u6743\u5165\u7ad9\u5230\u8fbe\u96c6\u7fa4\u670d\u52a1\u7684\u534f\u8bae\uff0c\u652f\u6301 HTTP \uff08\u4e0d\u9700\u8981\u8eab\u4efd\u8ba4\u8bc1\uff09\u6216 HTTPS\uff08\u9700\u9700\u8981\u914d\u7f6e\u8eab\u4efd\u8ba4\u8bc1\uff09 \u534f\u8bae\u3002 \u8fd9\u91cc\u9009\u62e9 HTTP \u534f\u8bae\u7684\u8def\u7531\u3002
                        • \u8f6c\u53d1\u7b56\u7565 \uff1a\u9009\u586b\uff0c\u6307\u5b9a Ingress \u7684\u8bbf\u95ee\u7b56\u7565
                        • \u8def\u5f84 \uff1a\u6307\u5b9a\u670d\u52a1\u8bbf\u95ee\u7684URL\u8def\u5f84\uff0c\u9ed8\u8ba4\u4e3a\u6839\u8def\u5f84
                        • \u76ee\u6807\u670d\u52a1 \uff1a\u8fdb\u884c\u8def\u7531\u7684\u670d\u52a1\u540d\u79f0
                        • \u76ee\u6807\u670d\u52a1\u7aef\u53e3 \uff1a\u670d\u52a1\u5bf9\u5916\u66b4\u9732\u7684\u7aef\u53e3
                      • \u8d1f\u8f7d\u5747\u8861\u5668\u7c7b\u578b \uff1a\u5fc5\u586b\uff0cIngress \u5b9e\u4f8b\u7684\u4f7f\u7528\u8303\u56f4
                        • \u5e73\u53f0\u7ea7\u8d1f\u8f7d\u5747\u8861\u5668 \uff1a\u540c\u4e00\u4e2a\u96c6\u7fa4\u5185\uff0c\u5171\u4eab\u540c\u4e00\u4e2a Ingress \u5b9e\u4f8b\uff0c\u5176\u4e2d Pod \u90fd\u53ef\u4ee5\u63a5\u6536\u5230\u7531\u8be5\u8d1f\u8f7d\u5747\u8861\u5206\u53d1\u7684\u8bf7\u6c42
                        • \u79df\u6237\u7ea7\u8d1f\u8f7d\u5747\u8861\u5668 \uff1a\u79df\u6237\u8d1f\u8f7d\u5747\u8861\u5668\uff0cIngress \u5b9e\u4f8b\u72ec\u5c5e\u4e8e\u5f53\u524d\u547d\u540d\u7a7a\uff0c\u6216\u8005\u72ec\u5c5e\u4e8e\u67d0\u4e00\u5de5\u4f5c\u7a7a\u95f4\uff0c \u5e76\u4e14\u8bbe\u7f6e\u7684\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u5305\u542b\u5f53\u524d\u547d\u540d\u7a7a\u95f4\uff0c\u5176\u4e2d Pod \u90fd\u53ef\u4ee5\u63a5\u6536\u5230\u7531\u8be5\u8d1f\u8f7d\u5747\u8861\u5206\u53d1\u7684\u8bf7\u6c42
                      • Ingress Class \uff1a\u9009\u586b\uff0c\u9009\u62e9\u5bf9\u5e94\u7684 Ingress \u5b9e\u4f8b\uff0c\u9009\u62e9\u540e\u5c06\u6d41\u91cf\u5bfc\u5165\u5230\u6307\u5b9a\u7684 Ingress \u5b9e\u4f8b\u3002
                        • \u4e3a None \u65f6\u4f7f\u7528\u9ed8\u8ba4\u7684 DefaultClass\uff0c\u8bf7\u5728\u521b\u5efa Ingress \u5b9e\u4f8b\u65f6\u8bbe\u7f6e DefaultClass\uff0c \u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003 Ingress Class
                        • \u82e5\u9009\u62e9\u5176\u4ed6\u5b9e\u4f8b\uff08\u5982 ngnix \uff09\uff0c\u5219\u4f1a\u51fa\u73b0\u9ad8\u7ea7\u914d\u7f6e\uff0c\u53ef\u8bbe\u7f6e \u4f1a\u8bdd\u4fdd\u6301 \u3001 \u8def\u5f84\u91cd\u5199 \u3001 \u91cd\u5b9a\u5411 \u548c \u6d41\u91cf\u5206\u53d1 \u3002
                      • \u4f1a\u8bdd\u4fdd\u6301 \uff1a\u9009\u586b\uff0c\u4f1a\u8bdd\u4fdd\u6301\u5206\u4e3a \u4e09\u79cd\u7c7b\u578b\uff1a L4 \u6e90\u5730\u5740\u54c8\u5e0c \u3001 Cookie Key \u3001 L7 Header Name \uff0c\u5f00\u542f\u540e\u6839\u636e\u5bf9\u5e94\u89c4\u5219\u8fdb\u884c\u4f1a\u8bdd\u4fdd\u6301\u3002
                        • L4 \u6e90\u5730\u5740\u54c8\u5e0c \uff1a\u5f00\u542f\u540e\u9ed8\u8ba4\u5728 Annotation \u4e2d\u52a0\u5165\u5982\u4e0b\u6807\u7b7e\uff1a nginx.ingress.kubernetes.io/upstream-hash-by: \"$binary_remote_addr\"
                        • Cookie Key \uff1a\u5f00\u542f\u540e\u6765\u81ea\u7279\u5b9a\u5ba2\u6237\u7aef\u7684\u8fde\u63a5\u5c06\u4f20\u9012\u81f3\u76f8\u540c Pod\uff0c\u5f00\u542f\u540e \u9ed8\u8ba4\u5728 Annotation \u4e2d\u589e\u52a0\u5982\u4e0b\u53c2\u6570\uff1a nginx.ingress.kubernetes.io/affinity: \"cookie\"\u3002nginx.ingress.kubernetes.io/affinity-mode: persistent
                        • L7 Header Name \uff1a\u5f00\u542f\u540e\u9ed8\u8ba4\u5728 Annotation \u4e2d\u52a0\u5165\u5982\u4e0b\u6807\u7b7e\uff1a nginx.ingress.kubernetes.io/upstream-hash-by: \"$http_x_forwarded_for\"
                      • \u8def\u5f84\u91cd\u5199 \uff1a\u9009\u586b\uff0c rewrite-target \uff0c\u67d0\u4e9b\u573a\u666f\u4e2d\u540e\u7aef\u670d\u52a1\u66b4\u9732\u7684URL\u4e0eIngress\u89c4\u5219\u4e2d\u6307\u5b9a\u7684\u8def\u5f84\u4e0d\u540c\uff0c\u5982\u679c\u4e0d\u8fdb\u884cURL\u91cd\u5199\u914d\u7f6e\uff0c\u8bbf\u95ee\u4f1a\u51fa\u73b0\u9519\u8bef\u3002
                      • \u91cd\u5b9a\u5411 \uff1a\u9009\u586b\uff0c permanent-redirect \uff0c\u6c38\u4e45\u91cd\u5b9a\u5411\uff0c\u8f93\u5165\u91cd\u5199\u8def\u5f84\u540e\uff0c\u8bbf\u95ee\u8def\u5f84\u91cd\u5b9a\u5411\u81f3\u8bbe\u7f6e\u7684\u5730\u5740\u3002
                      • \u6d41\u91cf\u5206\u53d1 \uff1a\u9009\u586b\uff0c\u5f00\u542f\u540e\u5e76\u8bbe\u7f6e\u540e\uff0c\u6839\u636e\u8bbe\u5b9a\u6761\u4ef6\u8fdb\u884c\u6d41\u91cf\u5206\u53d1\u3002
                        • \u57fa\u4e8e\u6743\u91cd \uff1a\u8bbe\u5b9a\u6743\u91cd\u540e\uff0c\u5728\u521b\u5efa\u7684 Ingress \u6dfb\u52a0\u5982\u4e0b Annotation\uff1a nginx.ingress.kubernetes.io/canary-weight: \"10\"
                        • \u57fa\u4e8e Cookie \uff1a\u8bbe\u5b9a Cookie \u89c4\u5219\u540e\uff0c\u6d41\u91cf\u6839\u636e\u8bbe\u5b9a\u7684 Cookie \u6761\u4ef6\u8fdb\u884c\u6d41\u91cf\u5206\u53d1
                        • \u57fa\u4e8e Header \uff1a \u8bbe\u5b9a Header \u89c4\u5219\u540e\uff0c\u6d41\u91cf\u6839\u636e\u8bbe\u5b9a\u7684 Header \u6761\u4ef6\u8fdb\u884c\u6d41\u91cf\u5206\u53d1
                      • \u6807\u7b7e \uff1a\u9009\u586b\uff0c\u4e3a\u8def\u7531\u6dfb\u52a0\u6807\u7b7e
                      • \u6ce8\u89e3 \uff1a\u9009\u586b\uff0c\u4e3a\u8def\u7531\u6dfb\u52a0\u6ce8\u89e3
                      "},{"location":"admin/kpanda/network/create-ingress.html#https","title":"\u521b\u5efa HTTPS \u534f\u8bae\u8def\u7531","text":"

                      \u8f93\u5165\u5982\u4e0b\u53c2\u6570\uff1a

                      Note

                      \u6ce8\u610f\uff1a\u4e0e HTTP \u534f\u8bae \u8bbe\u7f6e\u8def\u7531\u89c4\u5219 \u4e0d\u540c\uff0c\u589e\u52a0\u5bc6\u94a5\u9009\u62e9\u8bc1\u4e66\uff0c\u5176\u4ed6\u57fa\u672c\u4e00\u81f4\u3002

                      • \u534f\u8bae \uff1a\u5fc5\u586b\u6307\u6388\u6743\u5165\u7ad9\u5230\u8fbe\u96c6\u7fa4\u670d\u52a1\u7684\u534f\u8bae\uff0c\u652f\u6301 HTTP \uff08\u4e0d\u9700\u8981\u8eab\u4efd\u8ba4\u8bc1\uff09\u6216 HTTPS\uff08\u9700\u9700\u8981\u914d\u7f6e\u8eab\u4efd\u8ba4\u8bc1\uff09 \u534f\u8bae\u3002\u8fd9\u91cc\u9009\u62e9 HTTPS \u534f\u8bae\u7684\u8def\u7531\u3002
                      • \u5bc6\u94a5 \uff1a\u5fc5\u586b\uff0cHttps TLS \u8bc1\u4e66\uff0c\u521b\u5efa\u79d8\u94a5\u3002
                      "},{"location":"admin/kpanda/network/create-ingress.html#_3","title":"\u5b8c\u6210\u8def\u7531\u521b\u5efa","text":"

                      \u914d\u7f6e\u5b8c\u6240\u6709\u53c2\u6570\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u81ea\u52a8\u8fd4\u56de\u8def\u7531\u5217\u8868\u3002\u5728\u5217\u8868\u53f3\u4fa7\uff0c\u70b9\u51fb \u2507 \uff0c\u53ef\u4ee5\u4fee\u6539\u6216\u5220\u9664\u6240\u9009\u8def\u7531\u3002

                      "},{"location":"admin/kpanda/network/create-services.html","title":"\u521b\u5efa\u670d\u52a1\uff08Service\uff09","text":"

                      \u5728 Kubernetes \u96c6\u7fa4\u4e2d\uff0c\u6bcf\u4e2a Pod \u90fd\u6709\u4e00\u4e2a\u5185\u90e8\u72ec\u7acb\u7684 IP \u5730\u5740\uff0c\u4f46\u662f\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u7684 Pod \u53ef\u80fd\u4f1a\u88ab\u968f\u65f6\u521b\u5efa\u548c\u5220\u9664\uff0c\u76f4\u63a5\u4f7f\u7528 Pod IP \u5730\u5740\u5e76\u4e0d\u80fd\u5bf9\u5916\u63d0\u4f9b\u670d\u52a1\u3002

                      \u8fd9\u5c31\u9700\u8981\u521b\u5efa\u670d\u52a1\uff0c\u901a\u8fc7\u670d\u52a1\u60a8\u4f1a\u83b7\u5f97\u4e00\u4e2a\u56fa\u5b9a\u7684 IP \u5730\u5740\uff0c\u4ece\u800c\u5b9e\u73b0\u5de5\u4f5c\u8d1f\u8f7d\u524d\u7aef\u548c\u540e\u7aef\u7684\u89e3\u8026\uff0c\u8ba9\u5916\u90e8\u7528\u6237\u80fd\u591f\u8bbf\u95ee\u670d\u52a1\u3002\u540c\u65f6\uff0c\u670d\u52a1\u8fd8\u63d0\u4f9b\u4e86\u8d1f\u8f7d\u5747\u8861\uff08LoadBalancer\uff09\u529f\u80fd\uff0c\u4f7f\u7528\u6237\u80fd\u4ece\u516c\u7f51\u8bbf\u95ee\u5230\u5de5\u4f5c\u8d1f\u8f7d\u3002

                      "},{"location":"admin/kpanda/network/create-services.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                      • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a NS Editor \u89d2\u8272 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                      • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                      "},{"location":"admin/kpanda/network/create-services.html#_2","title":"\u521b\u5efa\u670d\u52a1","text":"
                      1. \u4ee5 NS Editor \u7528\u6237\u6210\u529f\u767b\u5f55\u540e\uff0c\u70b9\u51fb\u5de6\u4e0a\u89d2\u7684 \u96c6\u7fa4\u5217\u8868 \u8fdb\u5165 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u3002\u5728\u96c6\u7fa4\u5217\u8868\u4e2d\uff0c\u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u670d\u52a1 \u8fdb\u5165\u670d\u52a1\u5217\u8868\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                        Tip

                        \u4e5f\u53ef\u4ee5\u901a\u8fc7 YAML \u521b\u5efa \u4e00\u4e2a\u670d\u52a1\u3002

                      3. \u6253\u5f00 \u521b\u5efa\u670d\u52a1 \u9875\u9762\uff0c\u9009\u62e9\u4e00\u79cd\u8bbf\u95ee\u7c7b\u578b\uff0c\u53c2\u8003\u4ee5\u4e0b\u51e0\u4e2a\u53c2\u6570\u8868\u8fdb\u884c\u914d\u7f6e\u3002

                      "},{"location":"admin/kpanda/network/create-services.html#clusterip","title":"\u521b\u5efa ClusterIP \u670d\u52a1","text":"

                      \u70b9\u9009 \u96c6\u7fa4\u5185\u8bbf\u95ee\uff08ClusterIP\uff09 \uff0c\u8fd9\u662f\u6307\u901a\u8fc7\u96c6\u7fa4\u7684\u5185\u90e8 IP \u66b4\u9732\u670d\u52a1\uff0c\u9009\u62e9\u6b64\u9879\u7684\u670d\u52a1\u53ea\u80fd\u5728\u96c6\u7fa4\u5185\u90e8\u8bbf\u95ee\u3002\u8fd9\u662f\u9ed8\u8ba4\u7684\u670d\u52a1\u7c7b\u578b\u3002\u53c2\u8003\u4e0b\u8868\u914d\u7f6e\u53c2\u6570\u3002

                      \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8bbf\u95ee\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6307\u5b9a Pod \u670d\u52a1\u53d1\u73b0\u7684\u65b9\u5f0f\uff0c\u8fd9\u91cc\u9009\u62e9\u96c6\u7fa4\u5185\u8bbf\u95ee\uff08ClusterIP\uff09\u3002 ClusterIP \u670d\u52a1\u540d\u79f0 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u65b0\u5efa\u670d\u52a1\u7684\u540d\u79f0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 Svc-01 \u547d\u540d\u7a7a\u95f4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 default \u6807\u7b7e\u9009\u62e9\u5668 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6dfb\u52a0\u6807\u7b7e\uff0cService \u6839\u636e\u6807\u7b7e\u9009\u62e9 Pod\uff0c\u586b\u5199\u540e\u70b9\u51fb\u201c\u6dfb\u52a0\u201d\u3002\u4e5f\u53ef\u4ee5\u5f15\u7528\u5df2\u6709\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6807\u7b7e\uff0c\u70b9\u51fb \u5f15\u7528\u8d1f\u8f7d\u6807\u7b7e \uff0c\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\u9009\u62e9\u8d1f\u8f7d\uff0c\u7cfb\u7edf\u4f1a\u9ed8\u8ba4\u5c06\u6240\u9009\u7684\u8d1f\u8f7d\u6807\u7b7e\u4f5c\u4e3a\u9009\u62e9\u5668\u3002 app:job01 \u7aef\u53e3\u914d\u7f6e \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u534f\u8bae\u7aef\u53e3\uff0c\u9700\u8981\u5148\u9009\u62e9\u7aef\u53e3\u534f\u8bae\u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301 TCP\u3001UDP \u4e24\u79cd\u4f20\u8f93\u534f\u8bae\u3002\u7aef\u53e3\u540d\u79f0\uff1a\u8f93\u5165\u81ea\u5b9a\u4e49\u7684\u7aef\u53e3\u7684\u540d\u79f0\u3002\u670d\u52a1\u7aef\u53e3\uff08port\uff09\uff1aPod \u5bf9\u5916\u63d0\u4f9b\u670d\u52a1\u7684\u8bbf\u95ee\u7aef\u53e3\u3002\u5bb9\u5668\u7aef\u53e3\uff08targetport\uff09\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u9645\u76d1\u542c\u7684\u5bb9\u5668\u7aef\u53e3\uff0c\u7528\u6765\u5bf9\u96c6\u7fa4\u5185\u66b4\u9732\u670d\u52a1\u3002 \u4f1a\u8bdd\u4fdd\u6301 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5f00\u542f\u540e\uff0c\u76f8\u540c\u5ba2\u6237\u7aef\u7684\u8bf7\u6c42\u5c06\u8f6c\u53d1\u81f3\u540c\u4e00 Pod \u5f00\u542f \u4f1a\u8bdd\u4fdd\u6301\u6700\u5927\u65f6\u957f \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5f00\u542f\u4f1a\u8bdd\u4fdd\u6301\u540e\uff0c\u4fdd\u6301\u7684\u6700\u5927\u65f6\u957f\uff0c\u9ed8\u8ba4\u4e3a 30 \u79d2 30 \u79d2 \u6ce8\u89e3 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u6ce8\u89e3"},{"location":"admin/kpanda/network/create-services.html#nodeport","title":"\u521b\u5efa NodePort \u670d\u52a1","text":"

                      \u70b9\u9009 \u8282\u70b9\u8bbf\u95ee\uff08NodePort\uff09 \uff0c\u8fd9\u662f\u6307\u901a\u8fc7\u6bcf\u4e2a\u8282\u70b9\u4e0a\u7684 IP \u548c\u9759\u6001\u7aef\u53e3\uff08 NodePort \uff09\u66b4\u9732\u670d\u52a1\u3002 NodePort \u670d\u52a1\u4f1a\u8def\u7531\u5230\u81ea\u52a8\u521b\u5efa\u7684 ClusterIP \u670d\u52a1\u3002\u901a\u8fc7\u8bf7\u6c42 <\u8282\u70b9 IP>:<\u8282\u70b9\u7aef\u53e3> \uff0c\u60a8\u53ef\u4ee5\u4ece\u96c6\u7fa4\u7684\u5916\u90e8\u8bbf\u95ee\u4e00\u4e2a NodePort \u670d\u52a1\u3002\u53c2\u8003\u4e0b\u8868\u914d\u7f6e\u53c2\u6570\u3002

                      \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8bbf\u95ee\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6307\u5b9a Pod \u670d\u52a1\u53d1\u73b0\u7684\u65b9\u5f0f\uff0c\u8fd9\u91cc\u9009\u62e9\u8282\u70b9\u8bbf\u95ee\uff08NodePort\uff09\u3002 NodePort \u670d\u52a1\u540d\u79f0 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u65b0\u5efa\u670d\u52a1\u7684\u540d\u79f0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 Svc-01 \u547d\u540d\u7a7a\u95f4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 default \u6807\u7b7e\u9009\u62e9\u5668 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6dfb\u52a0\u6807\u7b7e\uff0cService \u6839\u636e\u6807\u7b7e\u9009\u62e9 Pod\uff0c\u586b\u5199\u540e\u70b9\u51fb\u201c\u6dfb\u52a0\u201d\u3002\u4e5f\u53ef\u4ee5\u5f15\u7528\u5df2\u6709\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6807\u7b7e\uff0c\u70b9\u51fb \u5f15\u7528\u8d1f\u8f7d\u6807\u7b7e \uff0c\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\u9009\u62e9\u8d1f\u8f7d\uff0c\u7cfb\u7edf\u4f1a\u9ed8\u8ba4\u5c06\u6240\u9009\u7684\u8d1f\u8f7d\u6807\u7b7e\u4f5c\u4e3a\u9009\u62e9\u5668\u3002 \u7aef\u53e3\u914d\u7f6e \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u534f\u8bae\u7aef\u53e3\uff0c\u9700\u8981\u5148\u9009\u62e9\u7aef\u53e3\u534f\u8bae\u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301 TCP\u3001UDP \u4e24\u79cd\u4f20\u8f93\u534f\u8bae\u3002\u7aef\u53e3\u540d\u79f0\uff1a\u8f93\u5165\u81ea\u5b9a\u4e49\u7684\u7aef\u53e3\u7684\u540d\u79f0\u3002\u670d\u52a1\u7aef\u53e3\uff08port\uff09\uff1aPod \u5bf9\u5916\u63d0\u4f9b\u670d\u52a1\u7684\u8bbf\u95ee\u7aef\u53e3\u3002\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u4e3a\u4e86\u65b9\u4fbf\u8d77\u89c1\uff0c\u670d\u52a1\u7aef\u53e3\u88ab\u8bbe\u7f6e\u4e3a\u4e0e\u5bb9\u5668\u7aef\u53e3\u5b57\u6bb5\u76f8\u540c\u7684\u503c\u3002\u5bb9\u5668\u7aef\u53e3\uff08targetport\uff09\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u9645\u76d1\u542c\u7684\u5bb9\u5668\u7aef\u53e3\u3002\u8282\u70b9\u7aef\u53e3\uff08nodeport\uff09\uff1a\u8282\u70b9\u7684\u7aef\u53e3\uff0c\u63a5\u6536\u6765\u81ea ClusterIP \u4f20\u8f93\u7684\u6d41\u91cf\u3002\u7528\u6765\u505a\u5916\u90e8\u6d41\u91cf\u8bbf\u95ee\u7684\u5165\u53e3\u3002 \u4f1a\u8bdd\u4fdd\u6301 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5f00\u542f\u540e\uff0c\u76f8\u540c\u5ba2\u6237\u7aef\u7684\u8bf7\u6c42\u5c06\u8f6c\u53d1\u81f3\u540c\u4e00 Pod\u5f00\u542f\u540e Service \u7684 .spec.sessionAffinity \u4e3a ClientIP \uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\uff1aService \u7684\u4f1a\u8bdd\u4eb2\u548c\u6027 \u5f00\u542f \u4f1a\u8bdd\u4fdd\u6301\u6700\u5927\u65f6\u957f \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5f00\u542f\u4f1a\u8bdd\u4fdd\u6301\u540e\uff0c\u4fdd\u6301\u7684\u6700\u5927\u65f6\u957f\uff0c\u9ed8\u8ba4\u8d85\u65f6\u65f6\u957f\u4e3a 30 \u79d2.spec.sessionAffinityConfig.clientIP.timeoutSeconds \u9ed8\u8ba4\u8bbe\u7f6e\u4e3a 30 \u79d2 30 \u79d2 \u6ce8\u89e3 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u6ce8\u89e3"},{"location":"admin/kpanda/network/create-services.html#loadbalancer","title":"\u521b\u5efa LoadBalancer \u670d\u52a1","text":"

                      \u70b9\u9009 \u8d1f\u8f7d\u5747\u8861\uff08LoadBalancer\uff09 \uff0c\u8fd9\u662f\u6307\u4f7f\u7528\u4e91\u63d0\u4f9b\u5546\u7684\u8d1f\u8f7d\u5747\u8861\u5668\u5411\u5916\u90e8\u66b4\u9732\u670d\u52a1\u3002 \u5916\u90e8\u8d1f\u8f7d\u5747\u8861\u5668\u53ef\u4ee5\u5c06\u6d41\u91cf\u8def\u7531\u5230\u81ea\u52a8\u521b\u5efa\u7684 NodePort \u670d\u52a1\u548c ClusterIP \u670d\u52a1\u4e0a\u3002\u53c2\u8003\u4e0b\u8868\u914d\u7f6e\u53c2\u6570\u3002

                      \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8bbf\u95ee\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6307\u5b9a Pod \u670d\u52a1\u53d1\u73b0\u7684\u65b9\u5f0f\uff0c\u8fd9\u91cc\u9009\u62e9\u8d1f\u8f7d\u5747\u8861\uff08LoadBalancer\uff09\u3002 LoadBalancer \u670d\u52a1\u540d\u79f0 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u65b0\u5efa\u670d\u52a1\u7684\u540d\u79f0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 Svc-01 \u547d\u540d\u7a7a\u95f4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 default \u5916\u90e8\u6d41\u91cf\u7b56\u7565 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8bbe\u7f6e\u5916\u90e8\u6d41\u91cf\u7b56\u7565\u3002Cluster\uff1a\u6d41\u91cf\u53ef\u4ee5\u8f6c\u53d1\u5230\u96c6\u7fa4\u4e2d\u6240\u6709\u8282\u70b9\u4e0a\u7684 Pod\u3002Local\uff1a\u6d41\u91cf\u53ea\u53d1\u7ed9\u672c\u8282\u70b9\u4e0a\u7684 Pod\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 \u6807\u7b7e\u9009\u62e9\u5668 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6dfb\u52a0\u6807\u7b7e\uff0cService \u6839\u636e\u6807\u7b7e\u9009\u62e9 Pod\uff0c\u586b\u5199\u540e\u70b9\u51fb\u201c\u6dfb\u52a0\u201d\u3002\u4e5f\u53ef\u4ee5\u5f15\u7528\u5df2\u6709\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6807\u7b7e\uff0c\u70b9\u51fb \u5f15\u7528\u8d1f\u8f7d\u6807\u7b7e \uff0c\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\u9009\u62e9\u8d1f\u8f7d\uff0c\u7cfb\u7edf\u4f1a\u9ed8\u8ba4\u5c06\u6240\u9009\u7684\u8d1f\u8f7d\u6807\u7b7e\u4f5c\u4e3a\u9009\u62e9\u5668\u3002 \u8d1f\u8f7d\u5747\u8861\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u4f7f\u7528\u7684\u8d1f\u8f7d\u5747\u8861\u7c7b\u578b\uff0c\u5f53\u524d\u652f\u6301 MetalLB \u548c\u5176\u4ed6\u3002 MetalLB IP \u6c60 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u7684 \u8d1f\u8f7d\u5747\u8861\u7c7b\u578b\u4e3a MetalLB \u65f6\uff0cLoadBalancer Service\u9ed8\u8ba4\u4f1a\u4ece\u8fd9\u4e2a\u6c60\u4e2d\u5206\u914d IP \u5730\u5740, \u5e76\u4e14\u901a\u8fc7 APR \u5ba3\u544a\u8fd9\u4e2a\u6c60\u4e2d\u7684\u6240\u6709 IP \u5730\u5740 \u8d1f\u8f7d\u5747\u8861\u5730\u5740 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u30111.\u5982\u4f7f\u7528\u7684\u662f\u516c\u6709\u4e91 CloudProvider\uff0c\u6b64\u5904\u586b\u5199\u7684\u4e3a\u4e91\u5382\u5546\u63d0\u4f9b\u7684\u8d1f\u8f7d\u5747\u8861\u5730\u5740\uff1b2.\u5982\u679c\u4e0a\u8ff0\u8d1f\u8f7d\u5747\u8861\u7c7b\u578b\u9009\u62e9\u4e3a MetalLB \uff0c\u9ed8\u8ba4\u4ece\u4e0a\u8ff0 IP \u6c60\u4e2d\u83b7\u53d6 IP \uff0c\u5982\u679c\u4e0d\u586b\u5219\u81ea\u52a8\u83b7\u53d6\u3002 \u7aef\u53e3\u914d\u7f6e \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u534f\u8bae\u7aef\u53e3\uff0c\u9700\u8981\u5148\u9009\u62e9\u7aef\u53e3\u534f\u8bae\u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301 TCP\u3001UDP \u4e24\u79cd\u4f20\u8f93\u534f\u8bae\u3002\u7aef\u53e3\u540d\u79f0\uff1a\u8f93\u5165\u81ea\u5b9a\u4e49\u7684\u7aef\u53e3\u7684\u540d\u79f0\u3002\u670d\u52a1\u7aef\u53e3\uff08port\uff09\uff1aPod \u5bf9\u5916\u63d0\u4f9b\u670d\u52a1\u7684\u8bbf\u95ee\u7aef\u53e3\u3002\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u4e3a\u4e86\u65b9\u4fbf\u8d77\u89c1\uff0c\u670d\u52a1\u7aef\u53e3\u88ab\u8bbe\u7f6e\u4e3a\u4e0e\u5bb9\u5668\u7aef\u53e3\u5b57\u6bb5\u76f8\u540c\u7684\u503c\u3002\u5bb9\u5668\u7aef\u53e3\uff08targetport\uff09\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u9645\u76d1\u542c\u7684\u5bb9\u5668\u7aef\u53e3\u3002\u8282\u70b9\u7aef\u53e3\uff08nodeport\uff09\uff1a\u8282\u70b9\u7684\u7aef\u53e3\uff0c\u63a5\u6536\u6765\u81ea ClusterIP \u4f20\u8f93\u7684\u6d41\u91cf\u3002\u7528\u6765\u505a\u5916\u90e8\u6d41\u91cf\u8bbf\u95ee\u7684\u5165\u53e3\u3002 \u6ce8\u89e3 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u6ce8\u89e3"},{"location":"admin/kpanda/network/create-services.html#externalname","title":"\u521b\u5efa ExternalName \u670d\u52a1","text":"

                      \u70b9\u9009 \u5916\u90e8\u670d\u52a1\uff08ExternalName\uff09 \uff0c\u8fd9\u662f\u6307\u901a\u8fc7\u5c06\u670d\u52a1\u6620\u5c04\u5230\u5916\u90e8\u57df\u540d\u6765\u66b4\u9732\u670d\u52a1\u3002\u9009\u62e9\u6b64\u9879\u7684\u670d\u52a1\u4e0d\u4f1a\u521b\u5efa\u5178\u578b\u7684 ClusterIP \u6216 NodePort\uff0c\u800c\u662f\u901a\u8fc7 DNS \u540d\u79f0\u89e3\u6790\u5c06\u8bf7\u6c42\u91cd\u5b9a\u5411\u5230\u5916\u90e8\u7684\u670d\u52a1\u5730\u5740\u3002\u53c2\u8003\u4e0b\u8868\u914d\u7f6e\u53c2\u6570\u3002

                      \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8bbf\u95ee\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6307\u5b9a Pod \u670d\u52a1\u53d1\u73b0\u7684\u65b9\u5f0f\uff0c\u8fd9\u91cc\u9009\u62e9\u5916\u90e8\u670d\u52a1\uff08ExternalName\uff09\u3002 ExternalName \u670d\u52a1\u540d\u79f0 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u65b0\u5efa\u670d\u52a1\u7684\u540d\u79f0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 Svc-01 \u547d\u540d\u7a7a\u95f4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 default \u57df\u540d \u3010\u7c7b\u578b\u3011\u5fc5\u586b"},{"location":"admin/kpanda/network/create-services.html#_3","title":"\u5b8c\u6210\u670d\u52a1\u521b\u5efa","text":"

                      \u914d\u7f6e\u5b8c\u6240\u6709\u53c2\u6570\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u81ea\u52a8\u8fd4\u56de\u670d\u52a1\u5217\u8868\u3002\u5728\u5217\u8868\u53f3\u4fa7\uff0c\u70b9\u51fb \u2507 \uff0c\u53ef\u4ee5\u4fee\u6539\u6216\u5220\u9664\u6240\u9009\u670d\u52a1\u3002

                      "},{"location":"admin/kpanda/network/network-policy.html","title":"\u7f51\u7edc\u7b56\u7565","text":"

                      \u7f51\u7edc\u7b56\u7565\uff08NetworkPolicy\uff09\u53ef\u4ee5\u5728 IP \u5730\u5740\u6216\u7aef\u53e3\u5c42\u9762\uff08OSI \u7b2c 3 \u5c42\u6216\u7b2c 4 \u5c42\uff09\u63a7\u5236\u7f51\u7edc\u6d41\u91cf\u3002\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u76ee\u524d\u652f\u6301\u521b\u5efa\u57fa\u4e8e Pod \u6216\u547d\u540d\u7a7a\u95f4\u7684\u7f51\u7edc\u7b56\u7565\uff0c\u652f\u6301\u901a\u8fc7\u6807\u7b7e\u9009\u62e9\u5668\u6765\u8bbe\u5b9a\u54ea\u4e9b\u6d41\u91cf\u53ef\u4ee5\u8fdb\u5165\u6216\u79bb\u5f00\u5e26\u6709\u7279\u5b9a\u6807\u7b7e\u7684 Pod\u3002

                      \u6709\u5173\u7f51\u7edc\u7b56\u7565\u7684\u66f4\u591a\u8be6\u60c5\uff0c\u53ef\u53c2\u8003 Kubernetes \u5b98\u65b9\u6587\u6863\u7f51\u7edc\u7b56\u7565\u3002

                      "},{"location":"admin/kpanda/network/network-policy.html#_2","title":"\u521b\u5efa\u7f51\u7edc\u7b56\u7565","text":"

                      \u76ee\u524d\u652f\u6301\u901a\u8fc7 YAML \u548c\u8868\u5355\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u7f51\u7edc\u7b56\u7565\uff0c\u8fd9\u4e24\u79cd\u65b9\u5f0f\u5404\u6709\u4f18\u52a3\uff0c\u53ef\u4ee5\u6ee1\u8db3\u4e0d\u540c\u7528\u6237\u7684\u4f7f\u7528\u9700\u6c42\u3002

                      \u901a\u8fc7 YAML \u521b\u5efa\u6b65\u9aa4\u66f4\u5c11\u3001\u66f4\u9ad8\u6548\uff0c\u4f46\u95e8\u69db\u8981\u6c42\u8f83\u9ad8\uff0c\u9700\u8981\u719f\u6089\u7f51\u7edc\u7b56\u7565\u7684 YAML \u6587\u4ef6\u914d\u7f6e\u3002

                      \u901a\u8fc7\u8868\u5355\u521b\u5efa\u66f4\u76f4\u89c2\u66f4\u7b80\u5355\uff0c\u6839\u636e\u63d0\u793a\u586b\u5199\u5bf9\u5e94\u7684\u503c\u5373\u53ef\uff0c\u4f46\u6b65\u9aa4\u66f4\u52a0\u7e41\u7410\u3002

                      "},{"location":"admin/kpanda/network/network-policy.html#yaml","title":"YAML \u521b\u5efa","text":"
                      1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u7f51\u7edc\u7b56\u7565 -> YAML \u521b\u5efa \u3002

                      2. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                      "},{"location":"admin/kpanda/network/network-policy.html#_3","title":"\u8868\u5355\u521b\u5efa","text":"
                      1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u7f51\u7edc\u7b56\u7565 -> \u521b\u5efa\u7b56\u7565 \u3002

                      2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u3002

                        \u540d\u79f0\u548c\u547d\u540d\u7a7a\u95f4\u5728\u521b\u5efa\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002

                      3. \u586b\u5199\u7b56\u7565\u914d\u7f6e\u3002

                        \u7b56\u7565\u914d\u7f6e\u5206\u4e3a\u5165\u6d41\u91cf\u7b56\u7565\u548c\u51fa\u6d41\u91cf\u7b56\u7565\u3002\u5982\u679c\u6e90 Pod \u60f3\u8981\u6210\u529f\u8fde\u63a5\u5230\u76ee\u6807 Pod\uff0c\u6e90 Pod \u7684\u51fa\u6d41\u91cf\u7b56\u7565\u548c\u76ee\u6807 Pod \u7684\u5165\u6d41\u91cf\u7b56\u7565\u90fd\u9700\u8981\u5141\u8bb8\u8fde\u63a5\u3002\u5982\u679c\u4efb\u4f55\u4e00\u65b9\u4e0d\u5141\u8bb8\u8fde\u63a5\uff0c\u90fd\u4f1a\u5bfc\u81f4\u8fde\u63a5\u5931\u8d25\u3002

                        • \u5165\u6d41\u91cf\u7b56\u7565\uff1a\u70b9\u51fb \u2795 \u5f00\u59cb\u914d\u7f6e\u7b56\u7565\uff0c\u652f\u6301\u914d\u7f6e\u591a\u6761\u7b56\u7565\u3002\u591a\u6761\u7f51\u7edc\u7b56\u7565\u7684\u6548\u679c\u76f8\u4e92\u53e0\u52a0\uff0c\u53ea\u6709\u540c\u65f6\u6ee1\u8db3\u6240\u6709\u7f51\u7edc\u7b56\u7565\uff0c\u624d\u80fd\u6210\u529f\u5efa\u7acb\u8fde\u63a5\u3002

                        • \u51fa\u6d41\u91cf\u7b56\u7565

                      "},{"location":"admin/kpanda/network/network-policy.html#_4","title":"\u67e5\u770b\u7f51\u7edc\u7b56\u7565","text":"
                      1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u7f51\u7edc\u7b56\u7565 \uff0c\u70b9\u51fb\u7f51\u7edc\u7b56\u7565\u7684\u540d\u79f0\u3002

                      2. \u67e5\u770b\u8be5\u7b56\u7565\u7684\u57fa\u672c\u914d\u7f6e\u3001\u5173\u8054\u5b9e\u4f8b\u4fe1\u606f\u3001\u5165\u6d41\u91cf\u7b56\u7565\u3001\u51fa\u6d41\u91cf\u7b56\u7565\u3002

                      Info

                      \u5728\u5173\u8054\u5b9e\u4f8b\u9875\u7b7e\u4e0b\uff0c\u652f\u6301\u67e5\u770b\u5b9e\u4f8b\u76d1\u63a7\u3001\u65e5\u5fd7\u3001\u5bb9\u5668\u5217\u8868\u3001YAML \u6587\u4ef6\u3001\u4e8b\u4ef6\u7b49\u3002

                      "},{"location":"admin/kpanda/network/network-policy.html#_5","title":"\u66f4\u65b0\u7f51\u7edc\u7b56\u7565","text":"

                      \u6709\u4e24\u79cd\u9014\u5f84\u53ef\u4ee5\u66f4\u65b0\u7f51\u7edc\u7b56\u7565\u3002\u652f\u6301\u901a\u8fc7\u8868\u5355\u6216 YAML \u6587\u4ef6\u66f4\u65b0\u7f51\u7edc\u7b56\u7565\u3002

                      • \u5728\u7f51\u7edc\u7b56\u7565\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u7b56\u7565\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                      • \u70b9\u51fb\u7f51\u7edc\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u7f51\u7edc\u7b56\u7565\u7684\u8be6\u60c5\u9875\u9762\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                      "},{"location":"admin/kpanda/network/network-policy.html#_6","title":"\u5220\u9664\u7f51\u7edc\u7b56\u7565","text":"

                      \u6709\u4e24\u79cd\u9014\u5f84\u53ef\u4ee5\u5220\u9664\u7f51\u7edc\u7b56\u7565\u3002\u652f\u6301\u901a\u8fc7\u8868\u5355\u6216 YAML \u6587\u4ef6\u66f4\u65b0\u7f51\u7edc\u7b56\u7565\u3002

                      • \u5728\u7f51\u7edc\u7b56\u7565\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u7b56\u7565\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u5220\u9664\u3002

                      • \u70b9\u51fb\u7f51\u7edc\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u7f51\u7edc\u7b56\u7565\u7684\u8be6\u60c5\u9875\u9762\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u5220\u9664\u3002

                      "},{"location":"admin/kpanda/nodes/add-node.html","title":"\u96c6\u7fa4\u8282\u70b9\u6269\u5bb9","text":"

                      \u968f\u7740\u4e1a\u52a1\u5e94\u7528\u4e0d\u65ad\u589e\u957f\uff0c\u96c6\u7fa4\u8d44\u6e90\u65e5\u8d8b\u7d27\u5f20\uff0c\u8fd9\u65f6\u53ef\u4ee5\u57fa\u4e8e kubean \u5bf9\u96c6\u7fa4\u8282\u70b9\u8fdb\u884c\u6269\u5bb9\u3002\u6269\u5bb9\u540e\uff0c\u5e94\u7528\u53ef\u4ee5\u8fd0\u884c\u5728\u65b0\u589e\u7684\u8282\u70b9\u4e0a\uff0c\u7f13\u89e3\u8d44\u6e90\u538b\u529b\u3002

                      \u53ea\u6709\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u521b\u5efa\u7684\u96c6\u7fa4\u624d\u652f\u6301\u8282\u70b9\u6269\u7f29\u5bb9\uff0c\u4ece\u5916\u90e8\u63a5\u5165\u7684\u96c6\u7fa4\u4e0d\u652f\u6301\u6b64\u64cd\u4f5c\u3002\u672c\u6587\u4e3b\u8981\u4ecb\u7ecd\u540c\u79cd\u67b6\u6784\u4e0b\u5de5\u4f5c\u96c6\u7fa4\u7684 \u5de5\u4f5c\u8282\u70b9 \u6269\u5bb9\u3002

                      1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                        \u82e5 \u96c6\u7fa4\u89d2\u8272 \u4e2d\u5e26\u6709 \u63a5\u5165\u96c6\u7fa4 \u7684\u6807\u7b7e\uff0c\u5219\u8bf4\u660e\u8be5\u96c6\u7fa4\u4e0d\u652f\u6301\u8282\u70b9\u6269\u7f29\u5bb9\u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u7136\u540e\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u70b9\u51fb \u63a5\u5165\u8282\u70b9 \u3002

                      3. \u8f93\u5165\u4e3b\u673a\u540d\u79f0\u548c\u8282\u70b9 IP \u5e76\u70b9\u51fb \u786e\u5b9a \u3002

                        \u70b9\u51fb \u2795 \u6dfb\u52a0\u5de5\u4f5c\u8282\u70b9 \u53ef\u4ee5\u7ee7\u7eed\u63a5\u5165\u66f4\u591a\u8282\u70b9\u3002

                      Note

                      \u63a5\u5165\u8282\u70b9\u5927\u7ea6\u9700\u8981 20 \u5206\u949f\uff0c\u8bf7\u60a8\u8010\u5fc3\u7b49\u5f85\u3002

                      "},{"location":"admin/kpanda/nodes/add-node.html#_2","title":"\u53c2\u8003\u6587\u6863","text":"
                      • \u5bf9\u5de5\u4f5c\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u6269\u5bb9
                      • \u4e3a\u5de5\u4f5c\u96c6\u7fa4\u6dfb\u52a0\u5f02\u6784\u8282\u70b9
                      • \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u5de5\u4f5c\u8282\u70b9\u6269\u5bb9
                      • \u66ff\u6362\u5de5\u4f5c\u96c6\u7fa4\u7684\u9996\u4e2a\u63a7\u5236\u8282\u70b9
                      "},{"location":"admin/kpanda/nodes/delete-node.html","title":"\u96c6\u7fa4\u8282\u70b9\u7f29\u5bb9","text":"

                      \u5f53\u4e1a\u52a1\u9ad8\u5cf0\u671f\u7ed3\u675f\u4e4b\u540e\uff0c\u4e3a\u4e86\u8282\u7701\u8d44\u6e90\u6210\u672c\uff0c\u53ef\u4ee5\u7f29\u5c0f\u96c6\u7fa4\u89c4\u6a21\uff0c\u5378\u8f7d\u5197\u4f59\u7684\u8282\u70b9\uff0c\u5373\u8282\u70b9\u7f29\u5bb9\u3002\u8282\u70b9\u5378\u8f7d\u540e\uff0c\u5e94\u7528\u65e0\u6cd5\u7ee7\u7eed\u8fd0\u884c\u5728\u8be5\u8282\u70b9\u4e0a\u3002

                      "},{"location":"admin/kpanda/nodes/delete-node.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5177\u6709 Cluster Admin \u89d2\u8272\u6388\u6743 \u3002
                      • \u53ea\u6709\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u521b\u5efa\u7684\u96c6\u7fa4\u624d\u652f\u6301\u8282\u70b9\u6269\u7f29\u5bb9\uff0c\u4ece\u5916\u90e8\u63a5\u5165\u7684\u96c6\u7fa4\u4e0d\u652f\u6301\u6b64\u64cd\u4f5c\u3002
                      • \u5378\u8f7d\u8282\u70b9\u4e4b\u524d\uff0c\u9700\u8981\u6682\u505c\u8c03\u5ea6\u8be5\u8282\u70b9\uff0c\u5e76\u4e14\u5c06\u8be5\u8282\u70b9\u4e0a\u7684\u5e94\u7528\u90fd\u9a71\u9010\u81f3\u5176\u4ed6\u8282\u70b9\u3002
                      • \u9a71\u9010\u65b9\u5f0f\uff1a\u767b\u5f55\u63a7\u5236\u5668\u8282\u70b9\uff0c\u901a\u8fc7 kubectl drain \u547d\u4ee4\u9a71\u9010\u8282\u70b9\u4e0a\u6240\u6709 Pod\u3002\u5b89\u5168\u9a71\u9010\u7684\u65b9\u5f0f\u53ef\u4ee5\u5141\u8bb8\u5bb9\u5668\u7ec4\u91cc\u9762\u7684\u5bb9\u5668\u4f18\u96c5\u5730\u4e2d\u6b62\u3002
                      "},{"location":"admin/kpanda/nodes/delete-node.html#_3","title":"\u6ce8\u610f\u4e8b\u9879","text":"
                      1. \u96c6\u7fa4\u8282\u70b9\u7f29\u5bb9\u65f6\uff0c\u53ea\u80fd\u9010\u4e2a\u8fdb\u884c\u5378\u8f7d\uff0c\u65e0\u6cd5\u6279\u91cf\u5378\u8f7d\u3002

                      2. \u5982\u9700\u5378\u8f7d\u96c6\u7fa4\u63a7\u5236\u5668\u8282\u70b9\uff0c\u9700\u8981\u786e\u4fdd\u6700\u7ec8\u63a7\u5236\u5668\u8282\u70b9\u6570\u4e3a \u5947\u6570\u3002

                      3. \u96c6\u7fa4\u8282\u70b9\u7f29\u5bb9\u65f6\u4e0d\u53ef\u4e0b\u7ebf \u7b2c\u4e00\u4e2a\u63a7\u5236\u5668 \u8282\u70b9\u3002\u5982\u679c\u5fc5\u987b\u6267\u884c\u6b64\u64cd\u4f5c\uff0c\u8bf7\u8054\u7cfb\u552e\u540e\u5de5\u7a0b\u5e08\u3002

                      "},{"location":"admin/kpanda/nodes/delete-node.html#_4","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                        \u82e5 \u96c6\u7fa4\u89d2\u8272 \u4e2d\u5e26\u6709 \u63a5\u5165\u96c6\u7fa4 \u7684\u6807\u7b7e\uff0c\u5219\u8bf4\u660e\u8be5\u96c6\u7fa4\u4e0d\u652f\u6301\u8282\u70b9\u6269\u7f29\u5bb9\u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u627e\u5230\u9700\u8981\u5378\u8f7d\u7684\u8282\u70b9\uff0c\u70b9\u51fb \u2507 \u9009\u62e9 \u79fb\u9664\u8282\u70b9 \u3002

                      3. \u8f93\u5165\u8282\u70b9\u540d\u79f0\uff0c\u5e76\u70b9\u51fb \u5220\u9664 \u8fdb\u884c\u786e\u8ba4\u3002

                      "},{"location":"admin/kpanda/nodes/labels-annotations.html","title":"\u6807\u7b7e\u4e0e\u6ce8\u89e3","text":"

                      \u6807\u7b7e\uff08Labels\uff09\u662f\u4e3a Pod\u3001\u8282\u70b9\u3001\u96c6\u7fa4\u7b49 Kubernetes \u5bf9\u8c61\u6dfb\u52a0\u7684\u6807\u8bc6\u6027\u952e\u503c\u5bf9\uff0c\u53ef\u7ed3\u5408\u6807\u7b7e\u9009\u62e9\u5668\u67e5\u627e\u5e76\u7b5b\u9009\u6ee1\u8db3\u67d0\u4e9b\u6761\u4ef6\u7684 Kubernetes \u5bf9\u8c61\u3002\u6bcf\u4e2a\u952e\u5bf9\u4e8e\u7ed9\u5b9a\u5bf9\u8c61\u5fc5\u987b\u662f\u552f\u4e00\u7684\u3002

                      \u6ce8\u89e3\uff08Annotations\uff09\u548c\u6807\u7b7e\u4e00\u6837\uff0c\u4e5f\u662f\u952e/\u503c\u5bf9\uff0c\u4f46\u4e0d\u5177\u5907\u6807\u8bc6\u6216\u7b5b\u9009\u529f\u80fd\u3002 \u4f7f\u7528\u6ce8\u89e3\u53ef\u4ee5\u4e3a\u8282\u70b9\u6dfb\u52a0\u4efb\u610f\u7684\u5143\u6570\u636e\u3002 \u6ce8\u89e3\u7684\u952e\u901a\u5e38\u4f7f\u7528\u7684\u683c\u5f0f\u4e3a \u524d\u7f00\uff08\u53ef\u9009\uff09/\u540d\u79f0\uff08\u5fc5\u586b\uff09 \uff0c\u4f8b\u5982 nfd.node.kubernetes.io/extended-resources \u3002 \u5982\u679c\u7701\u7565\u524d\u7f00\uff0c\u8868\u793a\u8be5\u6ce8\u89e3\u952e\u662f\u7528\u6237\u79c1\u6709\u7684\u3002

                      \u6709\u5173\u6807\u7b7e\u548c\u6ce8\u89e3\u7684\u66f4\u591a\u4fe1\u606f\uff0c\u53ef\u53c2\u8003 Kubernetes \u7684\u5b98\u65b9\u6587\u6863\u6807\u7b7e\u548c\u9009\u62e9\u7b97\u7b26\u6216\u6ce8\u89e3\u3002

                      \u6dfb\u52a0/\u5220\u9664\u6807\u7b7e\u4e0e\u6ce8\u89e3\u7684\u6b65\u9aa4\u5982\u4e0b\uff1a

                      1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u5728\u8282\u70b9\u53f3\u4fa7\u70b9\u51fb \u2507 \u64cd\u4f5c\u56fe\u6807\uff0c\u70b9\u51fb \u4fee\u6539\u6807\u7b7e \u6216 \u4fee\u6539\u6ce8\u89e3 \u3002

                      3. \u70b9\u51fb \u2795 \u6dfb\u52a0 \u53ef\u4ee5\u6dfb\u52a0\u6807\u7b7e\u6216\u6ce8\u89e3\uff0c\u70b9\u51fb X \u53ef\u4ee5\u5220\u9664\u6807\u7b7e\u6216\u6ce8\u89e3\uff0c\u6700\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                      "},{"location":"admin/kpanda/nodes/node-authentication.html","title":"\u8282\u70b9\u8ba4\u8bc1","text":""},{"location":"admin/kpanda/nodes/node-authentication.html#ssh","title":"\u4f7f\u7528 SSH \u5bc6\u94a5\u8ba4\u8bc1\u8282\u70b9","text":"

                      \u5982\u679c\u60a8\u9009\u62e9\u4f7f\u7528 SSH \u5bc6\u94a5\u4f5c\u4e3a\u5f85\u521b\u5efa\u96c6\u7fa4\u7684\u8282\u70b9\u8ba4\u8bc1\u65b9\u5f0f\uff0c\u60a8\u9700\u8981\u6309\u7167\u5982\u4e0b\u8bf4\u660e\u914d\u7f6e\u516c\u79c1\u94a5\u3002

                      1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5728 \u5f85\u5efa\u96c6\u7fa4\u7684\u7ba1\u7406\u96c6\u7fa4\u4e2d\u7684\u4efb\u610f\u8282\u70b9 \u4e0a\u751f\u6210\u516c\u79c1\u94a5\u3002

                        cd /root/.ssh\nssh-keygen -t rsa\n
                      2. \u6267\u884c ls \u547d\u4ee4\u67e5\u770b\u7ba1\u7406\u96c6\u7fa4\u4e0a\u7684\u5bc6\u94a5\u662f\u5426\u521b\u5efa\u6210\u529f\uff0c\u6b63\u786e\u53cd\u9988\u5982\u4e0b\uff1a

                        ls\nid_rsa  id_rsa.pub  known_hosts\n

                        \u5176\u4e2d\u540d\u4e3a id_rsa \u7684\u6587\u4ef6\u662f\u79c1\u94a5\uff0c\u540d\u4e3a id_rsa.pub \u7684\u6587\u4ef6\u662f\u516c\u94a5\u3002

                      3. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5206\u522b\u5c06\u516c\u94a5\u6587\u4ef6 id_rsa.pub \u52a0\u8f7d\u5230\u5f85\u521b\u5efa\u96c6\u7fa4\u7684\u6240\u6709\u8282\u70b9\u4e0a\u3002

                        ssh-copy-id -i /root/.ssh/id_rsa.pub root@10.0.0.0\n

                        \u5c06\u4e0a\u9762\u547d\u4ee4\u4e2d\u7684 root@10.0.0.0 \u7528\u6237\u8d26\u53f7\u548c\u8282\u70b9 IP \u66ff\u6362\u4e3a\u5f85\u521b\u5efa\u96c6\u7fa4\u7684\u8282\u70b9\u7528\u6237\u540d\u548c IP\u3002** \u9700\u8981\u5728\u5f85\u521b\u5efa\u96c6\u7fa4\u7684\u6bcf\u53f0\u8282\u70b9\u90fd\u6267\u884c\u76f8\u540c\u7684\u64cd\u4f5c **\u3002

                      4. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u67e5\u770b\u6b65\u9aa4 1 \u6240\u521b\u5efa\u7684\u79c1\u94a5\u6587\u4ef6 id_rsa \u3002

                        cat /root/.ssh/id_rsa\n

                        \u8f93\u51fa\u5982\u4e0b\u5185\u5bb9\uff1a

                        -----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEA3UvyKINzY5BFuemQ+uJ6q+GqgfvnWwNC8HzZhpcMSjJy26MM\nUtBEBJxy8fMi57XcjYxPibXW/wnd+32ICCycqCwByUmuXeCC1cjlCQDqjcAvXae7\nY54IXGF7wm2IsMNwf0kjFEXjuS48FLDA0mGRaN3BG+Up5geXcHckg3K5LD8kXFFx\ndEmSIjdyw55NaUitmEdHzN7cIdfi6Z56jcV8dcFBgWKUx+ebiyPmZBkXToz6GnMF\nrswzzZCl+G6Jb2xTGy7g7ozb4BoZd1IpSD5EhDanRrESVE0C5YuJ5zUAC0CvVd1l\nv67AK8Ko6MXToHp01/bcsvlM6cqgwUFXZKVeOwIDAQABAoIBAQCO36GQlo3BEjxy\nM2HvGJmqrx+unDxafliRe4nVY2AD515Qf4xNSzke4QM1QoyenMOwf446krQkJPK0\nk+9nl6Xszby5gGCbK4BNFk8I6RaGPjZWeRx6zGUJf8avWJiPxx6yjz2esSC9RiR0\nF0nmiiefVMyAfgv2/5++dK2WUFNNRKLgSRRpP5bRaD5wMzzxtSSXrUon6217HO8p\n3RoWsI51MbVzhdVgpHUNABcoa0rpr9svT6XLKZxY8mxpKFYjM0Wv2JIDABg3kBvh\nQbJ7kStCO3naZjKMU9UuSqVJs06cflGYw7Or8/tABR3LErNQKPjkhAQqt0DXw7Iw\n3tKdTAJBAoGBAP687U7JAOqQkcphek2E/A/sbO/d37ix7Z3vNOy065STrA+ZWMZn\npZ6Ui1B/oJpoZssnfvIoz9sn559X0j67TljFALFd2ZGS0Fqh9KVCqDvfk+Vst1dq\n+3r/yZdTOyswoccxkJiC/GDwZGK0amJWqvob39JCZhDAKIGLbGMmjdAHAoGBAN5k\nm1WGnni1nZ+3dryIwgB6z1hWcnLTamzSET6KhSuo946ET0IRG9xtlheCx6dqICbr\nVk1Y4NtRZjK/p/YGx59rDWf7E3I8ZMgR7mjieOcUZ4lUlA4l7ZIlW/2WZHW+nUXO\nTi20fqJ8qSp4BUvOvuth1pz2GLUHe2/Fxjf7HIstAoGBAPHpPr9r+TfIlPsJeRj2\n6lzA3G8qWFRQfGRYjv0fjv0pA+RIb1rzgP/I90g5+63G6Z+R4WdcxI/OJJNY1iuG\nuw9n/pFxm7U4JC990BPE6nj5iLz+clpNGYckNDBF9VG9vFSrSDLdaYkxoVNvG/xJ\na9Na90H4lm7f3VewrPy310KvAoGAZr+mwNoEh5Kpc6xo8Gxi7aPP/mlaUVD6X7Ki\ngvmu02AqmC7rC4QqEiqTaONkaSXwGusqIWxJ3yp5hELmUBYLzszAEeV/s4zRp1oZ\ng133LBRSTbHFAdBmNdqK6Nu+KGRb92980UMOKvZbliKDl+W6cbfvVu+gtKrzTc3b\naevb4TUCgYEAnJAxyVYDP1nJf7bjBSHXQu1E/DMwbtrqw7dylRJ8cAzI7IxfSCez\n7BYWq41PqVd9/zrb3Pbh2phiVzKe783igAIMqummcjo/kZyCwFsYBzK77max1jF5\naPQsLbRS2aDz8kIH6jHPZ/R+15EROmdtLmA7vIJZGerWWQR0dUU+XXA=\n

                        \u5c06\u79c1\u94a5\u5185\u5bb9\u590d\u5236\u540e\u586b\u81f3\u754c\u9762\u5bc6\u94a5\u8f93\u5165\u6846\u3002

                      "},{"location":"admin/kpanda/nodes/node-check.html","title":"\u521b\u5efa\u96c6\u7fa4\u8282\u70b9\u53ef\u7528\u6027\u68c0\u67e5","text":"

                      \u5728\u521b\u5efa\u96c6\u7fa4\u6216\u4e3a\u5df2\u6709\u96c6\u7fa4\u6dfb\u52a0\u8282\u70b9\u65f6\uff0c\u8bf7\u53c2\u9605\u4e0b\u8868\uff0c\u68c0\u67e5\u8282\u70b9\u914d\u7f6e\uff0c\u4ee5\u907f\u514d\u56e0\u8282\u70b9\u914d\u7f6e\u9519\u8bef\u5bfc\u81f4\u96c6\u7fa4\u521b\u5efa\u6216\u6269\u5bb9\u5931\u8d25\u3002

                      \u68c0\u67e5\u9879 \u63cf\u8ff0 \u64cd\u4f5c\u7cfb\u7edf \u53c2\u8003\u652f\u6301\u7684\u67b6\u6784\u53ca\u64cd\u4f5c\u7cfb\u7edf SELinux \u5173\u95ed \u9632\u706b\u5899 \u5173\u95ed \u67b6\u6784\u4e00\u81f4\u6027 \u8282\u70b9\u95f4 CPU \u67b6\u6784\u4e00\u81f4\uff08\u5982\u5747\u4e3a ARM \u6216 x86\uff09 \u4e3b\u673a\u65f6\u95f4 \u6240\u6709\u4e3b\u673a\u95f4\u540c\u6b65\u8bef\u5dee\u5c0f\u4e8e 10 \u79d2\u3002 \u7f51\u7edc\u8054\u901a\u6027 \u8282\u70b9\u53ca\u5176 SSH \u7aef\u53e3\u80fd\u591f\u6b63\u5e38\u88ab\u5e73\u53f0\u8bbf\u95ee\u3002 CPU \u53ef\u7528 CPU \u8d44\u6e90\u5927\u4e8e 4 Core \u5185\u5b58 \u53ef\u7528\u5185\u5b58\u8d44\u6e90\u5927\u4e8e 8 GB"},{"location":"admin/kpanda/nodes/node-check.html#_2","title":"\u652f\u6301\u7684\u67b6\u6784\u53ca\u64cd\u4f5c\u7cfb\u7edf","text":"\u67b6\u6784 \u64cd\u4f5c\u7cfb\u7edf \u5907\u6ce8 ARM Kylin Linux Advanced Server release V10 (Sword) SP2 \u63a8\u8350 ARM UOS Linux ARM openEuler x86 CentOS 7.x \u63a8\u8350 x86 Redhat 7.x \u63a8\u8350 x86 Redhat 8.x \u63a8\u8350 x86 Flatcar Container Linux by Kinvolk x86 Debian Bullseye, Buster, Jessie, Stretch x86 Ubuntu 16.04, 18.04, 20.04, 22.04 x86 Fedora 35, 36 x86 Fedora CoreOS x86 openSUSE Leap 15.x/Tumbleweed x86 Oracle Linux 7, 8, 9 x86 Alma Linux 8, 9 x86 Rocky Linux 8, 9 x86 Amazon Linux 2 x86 Kylin Linux Advanced Server release V10 (Sword) - SP2 \u6d77\u5149 x86 UOS Linux x86 openEuler"},{"location":"admin/kpanda/nodes/node-details.html","title":"\u8282\u70b9\u8be6\u60c5","text":"

                      \u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4\u4e4b\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u96c6\u7fa4\u4e2d\u5404\u4e2a\u8282\u70b9\u7684\u4fe1\u606f\uff0c\u5305\u62ec\u8282\u70b9\u72b6\u6001\u3001\u6807\u7b7e\u3001\u8d44\u6e90\u7528\u91cf\u3001Pod\u3001\u76d1\u63a7\u4fe1\u606f\u7b49\u3002

                      1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u53ef\u4ee5\u67e5\u770b\u8282\u70b9\u72b6\u6001\u3001\u89d2\u8272\u3001\u6807\u7b7e\u3001CPU/\u5185\u5b58\u4f7f\u7528\u60c5\u51b5\u3001IP \u5730\u5740\u3001\u521b\u5efa\u65f6\u95f4\u3002

                      3. \u70b9\u51fb\u8282\u70b9\u540d\u79f0\uff0c\u53ef\u4ee5\u8fdb\u5165\u8282\u70b9\u8be6\u60c5\u9875\u9762\u67e5\u770b\u66f4\u591a\u4fe1\u606f\uff0c\u5305\u62ec\u6982\u89c8\u4fe1\u606f\u3001\u5bb9\u5668\u7ec4\u4fe1\u606f\u3001\u6807\u7b7e\u6ce8\u89e3\u4fe1\u606f\u3001\u4e8b\u4ef6\u5217\u8868\u3001\u72b6\u6001\u7b49\u3002

                        \u6b64\u5916\uff0c\u8fd8\u53ef\u4ee5\u67e5\u770b\u8282\u70b9\u7684 YAML \u6587\u4ef6\u3001\u76d1\u63a7\u4fe1\u606f\u3001\u6807\u7b7e\u548c\u6ce8\u89e3\u7b49\u3002

                      "},{"location":"admin/kpanda/nodes/schedule.html","title":"\u8282\u70b9\u8c03\u5ea6","text":"

                      \u652f\u6301\u5c06\u8282\u70b9\u6682\u505c\u8c03\u5ea6\u6216\u6062\u590d\u8c03\u5ea6\u3002\u6682\u505c\u8c03\u5ea6\u6307\uff0c\u505c\u6b62\u5c06 Pod \u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002\u6062\u590d\u8c03\u5ea6\u6307\uff0c\u53ef\u4ee5\u5c06 Pod \u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002

                      1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u5728\u8282\u70b9\u53f3\u4fa7\u70b9\u51fb \u2507 \u64cd\u4f5c\u56fe\u6807\uff0c\u70b9\u51fb \u6682\u505c\u8c03\u5ea6 \u6309\u94ae\u5373\u53ef\u6682\u505c\u8c03\u5ea6\u8be5\u8282\u70b9\u3002

                      3. \u5728\u8282\u70b9\u53f3\u4fa7\u70b9\u51fb \u2507 \u64cd\u4f5c\u56fe\u6807\uff0c\u70b9\u51fb \u6062\u590d\u8c03\u5ea6 \u6309\u94ae\u5373\u53ef\u6062\u590d\u8c03\u5ea6\u8be5\u8282\u70b9\u3002

                      \u8282\u70b9\u8c03\u5ea6\u72b6\u6001\u53ef\u80fd\u56e0\u7f51\u7edc\u60c5\u51b5\u6709\u6240\u5ef6\u8fdf\uff0c\u70b9\u51fb\u641c\u7d22\u6846\u53f3\u4fa7\u7684\u5237\u65b0\u56fe\u6807\u53ef\u4ee5\u5237\u65b0\u8282\u70b9\u8c03\u5ea6\u72b6\u6001\u3002

                      "},{"location":"admin/kpanda/nodes/taints.html","title":"\u8282\u70b9\u6c61\u70b9\u7ba1\u7406","text":"

                      \u6c61\u70b9 (Taint) \u80fd\u591f\u4f7f\u8282\u70b9\u6392\u65a5\u67d0\u4e00\u7c7b Pod\uff0c\u907f\u514d Pod \u88ab\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u4e0a\u3002 \u6bcf\u4e2a\u8282\u70b9\u4e0a\u53ef\u4ee5\u5e94\u7528\u4e00\u4e2a\u6216\u591a\u4e2a\u6c61\u70b9\uff0c\u4e0d\u80fd\u5bb9\u5fcd\u8fd9\u4e9b\u6c61\u70b9\u7684 Pod \u5219\u4e0d\u4f1a\u88ab\u8c03\u5ea6\u8be5\u8282\u70b9\u4e0a\u3002

                      "},{"location":"admin/kpanda/nodes/taints.html#_2","title":"\u6ce8\u610f\u4e8b\u9879","text":"
                      1. \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u5907 NS Editor \u89d2\u8272\u6388\u6743\u6216\u5176\u4ed6\u66f4\u9ad8\u6743\u9650\u3002
                      2. \u4e3a\u8282\u70b9\u6dfb\u52a0\u6c61\u70b9\u4e4b\u540e\uff0c\u53ea\u6709\u80fd\u5bb9\u5fcd\u8be5\u6c61\u70b9\u7684 Pod \u624d\u80fd\u88ab\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002
                      "},{"location":"admin/kpanda/nodes/taints.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u627e\u5230\u76ee\u6807\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u6982\u89c8 \u9875\u9762\u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u627e\u5230\u9700\u8981\u4fee\u6539\u6c61\u70b9\u7684\u8282\u70b9\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u56fe\u6807\u5e76\u70b9\u51fb \u4fee\u6539\u6c61\u70b9 \u6309\u94ae\u3002

                      3. \u5728\u5f39\u6846\u5185\u8f93\u5165\u6c61\u70b9\u7684\u952e\u503c\u4fe1\u606f\uff0c\u9009\u62e9\u6c61\u70b9\u6548\u679c\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                        \u70b9\u51fb \u2795 \u6dfb\u52a0 \u53ef\u4ee5\u4e3a\u8282\u70b9\u6dfb\u52a0\u591a\u4e2a\u6c61\u70b9\uff0c\u70b9\u51fb\u6c61\u70b9\u6548\u679c\u53f3\u4fa7\u7684 X \u53ef\u4ee5\u5220\u9664\u6c61\u70b9\u3002

                        \u76ee\u524d\u652f\u6301\u4e09\u79cd\u6c61\u70b9\u6548\u679c\uff1a

                        • NoSchedule\uff1a\u65b0\u7684 Pod \u4e0d\u4f1a\u88ab\u8c03\u5ea6\u5230\u5e26\u6709\u6b64\u6c61\u70b9\u7684\u8282\u70b9\u4e0a\uff0c\u9664\u975e\u65b0\u7684 Pod \u5177\u6709\u76f8\u5339\u914d\u7684\u5bb9\u5fcd\u5ea6\u3002\u5f53\u524d\u6b63\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u4e0d\u4f1a \u88ab\u9a71\u9010\u3002
                        • NoExecute\uff1a\u8fd9\u4f1a\u5f71\u54cd\u5df2\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod\uff1a
                          • \u5982\u679c Pod \u4e0d\u80fd\u5bb9\u5fcd\u6b64\u6c61\u70b9\uff0c\u4f1a\u9a6c\u4e0a\u88ab\u9a71\u9010\u3002
                          • \u5982\u679c Pod \u80fd\u591f\u5bb9\u5fcd\u6b64\u6c61\u70b9\uff0c\u4f46\u662f\u5728\u5bb9\u5fcd\u5ea6\u5b9a\u4e49\u4e2d\u6ca1\u6709\u6307\u5b9a tolerationSeconds\uff0c\u5219 Pod \u8fd8\u4f1a\u4e00\u76f4\u5728\u8fd9\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u3002
                          • \u5982\u679c Pod \u80fd\u591f\u5bb9\u5fcd\u6b64\u6c61\u70b9\u800c\u4e14\u6307\u5b9a\u4e86 tolerationSeconds\uff0c\u5219 Pod \u8fd8\u80fd\u5728\u8fd9\u4e2a\u8282\u70b9\u4e0a\u7ee7\u7eed\u8fd0\u884c\u6307\u5b9a\u7684\u65f6\u957f\u3002\u8fd9\u6bb5\u65f6\u95f4\u8fc7\u53bb\u540e\uff0c\u518d\u4ece\u8282\u70b9\u4e0a\u9a71\u9664\u8fd9\u4e9b Pod\u3002
                        • PreferNoSchedule\uff1a\u8fd9\u662f\u201c\u8f6f\u6027\u201d\u7684 NoSchedule\u3002\u63a7\u5236\u5e73\u9762\u5c06**\u5c1d\u8bd5**\u907f\u514d\u5c06\u4e0d\u5bb9\u5fcd\u6b64\u6c61\u70b9\u7684 Pod \u8c03\u5ea6\u5230\u8282\u70b9\u4e0a\uff0c\u4f46\u4e0d\u80fd\u4fdd\u8bc1\u5b8c\u5168\u907f\u514d\u3002\u6240\u4ee5\u8981\u5c3d\u91cf\u907f\u514d\u4f7f\u7528\u6b64\u6c61\u70b9\u3002

                      \u6709\u5173\u6c61\u70b9\u7684\u66f4\u591a\u8be6\u60c5\uff0c\u8bf7\u53c2\u9605 Kubernetes \u5b98\u65b9\u6587\u6863\uff1a\u6c61\u70b9\u548c\u5bb9\u5fcd\u5ea6\u3002

                      "},{"location":"admin/kpanda/olm/import-miniooperator.html","title":"\u5bfc\u5165\u79bb\u7ebf MinIo Operator","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5728\u79bb\u7ebf\u73af\u5883\u4e0b\u5982\u4f55\u5bfc\u5165 MinIo Operator\u3002

                      "},{"location":"admin/kpanda/olm/import-miniooperator.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u5f53\u524d\u96c6\u7fa4\u5df2\u63a5\u5165\u5bb9\u5668\u7ba1\u7406\u4e14\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5df2\u7ecf\u5b89\u88c5 kolm \u7ec4\u4ef6\uff08helm \u6a21\u677f\u641c\u7d22 kolm\uff09
                      • \u5f53\u524d\u96c6\u7fa4\u5df2\u7ecf\u5b89\u88c5 olm \u7ec4\u4ef6\u4e14\u7248\u672c >= 0.2.4 (helm \u6a21\u677f\u641c\u7d22 olm)
                      • \u652f\u6301\u6267\u884c Docker \u547d\u4ee4
                      • \u51c6\u5907\u4e00\u4e2a\u955c\u50cf\u4ed3\u5e93
                      "},{"location":"admin/kpanda/olm/import-miniooperator.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u5728\u6267\u884c\u73af\u5883\u4e2d\u8bbe\u7f6e\u73af\u5883\u53d8\u91cf\u5e76\u5728\u540e\u7eed\u6b65\u9aa4\u4f7f\u7528\uff0c\u6267\u884c\u547d\u4ee4\uff1a

                        export OPM_IMG=10.5.14.200/quay.m.daocloud.io/operator-framework/opm:v1.29.0 \nexport BUNDLE_IMG=10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3 \n

                        \u5982\u4f55\u83b7\u53d6\u4e0a\u8ff0\u955c\u50cf\u5730\u5740\uff1a

                        \u524d\u5f80 \u5bb9\u5668\u7ba1\u7406 -> \u9009\u62e9\u5f53\u524d\u96c6\u7fa4 -> helm \u5e94\u7528 -> \u67e5\u770b olm \u7ec4\u4ef6 -> \u63d2\u4ef6\u8bbe\u7f6e \uff0c\u627e\u5230\u540e\u7eed\u6b65\u9aa4\u6240\u9700 opm\uff0cminio\uff0cminio bundle\uff0cminio operator \u7684\u955c\u50cf\u3002

                        \u4ee5\u4e0a\u8bc9\u622a\u56fe\u4e3a\u4f8b\uff0c\u5219\u56db\u4e2a\u955c\u50cf\u5730\u5740\u5982\u4e0b\n\n# opm \u955c\u50cf \n10.5.14.200/quay.m.daocloud.io/operator-framework/opm:v1.29.0\n\n# minio \u955c\u50cf\n10.5.14.200/quay.m.daocloud.io/minio/minio:RELEASE.2023-03-24T21-41-23Z\n\n# minio bundle \u955c\u50cf\n10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3\n\n# minio operator \u955c\u50cf \n10.5.14.200/quay.m.daocloud.io/minio/operator:v5.0.3\n
                      2. \u6267\u884c opm \u547d\u4ee4\u83b7\u53d6\u79bb\u7ebf bundle \u955c\u50cf\u5305\u542b\u7684 operator\u3002

                        # \u521b\u5efa operator \u5b58\u653e\u76ee\u5f55\n$ mkdir minio-operator && cd minio-operator \n\n# \u83b7\u53d6 operator yaml \n$ docker run --user root  -v $PWD/minio-operator:/minio-operator ${OPM_IMG} alpha bundle unpack --skip-tls-verify -v -d ${BUNDLE_IMG} -o ./minio-operator\n\n# \u9884\u671f\u7ed3\u679c\n.\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n3 directories, 9 files\n
                      3. \u66ff\u6362\u00a0 minio-operator/manifests/minio-operator.clusterserviceversion.yaml\u00a0 \u6587\u4ef6\u4e2d\u7684\u6240\u6709\u955c\u50cf\u5730\u5740\u4e3a\u79bb\u7ebf\u955c\u50cf\u4ed3\u5e93\u5730\u5740\u955c\u50cf\u3002

                        \u66ff\u6362\u524d\uff1a

                        \u66ff\u6362\u540e\uff1a

                      4. \u751f\u6210\u6784\u5efa bundle \u955c\u50cf\u7684 Dockerfile

                        $ docker run --user root  -v $PWD:/minio-operator -w /minio-operator ${OPM_IMG} alpha bundle generate --channels stable,beta -d /minio-operator/minio-operator/manifests -e stable -p minio-operator \u00a0\n\n# \u9884\u671f\u7ed3\u679c\n.\n\u251c\u2500\u2500 bundle.Dockerfile\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n3 directories, 10 files\n
                      5. \u6267\u884c\u6784\u5efa\u547d\u4ee4\uff0c\u6784\u5efa bundle \u955c\u50cf\u4e14\u63a8\u9001\u5230\u79bb\u7ebf registry\u3002

                        # \u8bbe\u7f6e\u65b0\u7684 bundle image \nexport OFFLINE_BUNDLE_IMG=10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3-offline \n\n$ docker build . -f bundle.Dockerfile -t ${OFFLINE_BUNDLE_IMG} \u00a0\n\n$ docker push ${OFFLINE_BUNDLE_IMG}\n
                      6. \u751f\u6210\u6784\u5efa catalog \u955c\u50cf\u7684 Dockerfile\u3002

                        $ docker run --user root  -v $PWD:/minio-operator  -w /minio-operator ${OPM_IMG} index add  --bundles ${OFFLINE_BUNDLE_IMG} --generate --binary-image ${OPM_IMG} --skip-tls-verify\n\n# \u9884\u671f\u7ed3\u679c\n.\n\u251c\u2500\u2500 bundle.Dockerfile\n\u251c\u2500\u2500 database\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 index.db\n\u251c\u2500\u2500 index.Dockerfile\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n4 directories, 12 files\n
                      7. \u6784\u5efa catalog \u955c\u50cf

                        # \u8bbe\u7f6e\u65b0\u7684 catalog image  \nexport OFFLINE_CATALOG_IMG=10.5.14.200/release.daocloud.io/operator-framework/system-operator-index:v0.1.0-offline\n\n$ docker build . -f index.Dockerfile -t ${OFFLINE_CATALOG_IMG}  \n\n$ docker push ${OFFLINE_CATALOG_IMG}\n
                      8. \u524d\u5f80\u5bb9\u5668\u7ba1\u7406\uff0c\u66f4\u65b0 helm \u5e94\u7528 olm \u7684\u5185\u7f6e catsrc \u955c\u50cf\uff08\u586b\u5199\u6784\u5efa catalog \u955c\u50cf\u6307\u5b9a\u7684 ${catalog-image} \u5373\u53ef\uff09

                      9. \u66f4\u65b0\u6210\u529f\u540e\uff0cOperator Hub \u4e2d\u4f1a\u51fa\u73b0 minio-operator \u7ec4\u4ef6

                      "},{"location":"admin/kpanda/permissions/cluster-ns-auth.html","title":"\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u6388\u6743","text":"

                      \u5bb9\u5668\u7ba1\u7406\u57fa\u4e8e\u5168\u5c40\u6743\u9650\u7ba1\u7406\u53ca\u5168\u5c40\u7528\u6237/\u7528\u6237\u7ec4\u7ba1\u7406\u5b9e\u73b0\u6388\u6743\uff0c\u5982\u9700\u4e3a\u7528\u6237\u6388\u4e88\u5bb9\u5668\u7ba1\u7406\u7684\u6700\u9ad8\u6743\u9650\uff08\u53ef\u4ee5\u521b\u5efa\u3001\u7ba1\u7406\u3001\u5220\u9664\u6240\u6709\u96c6\u7fa4\uff09\uff0c\u8bf7\u53c2\u89c1\u4ec0\u4e48\u662f\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u3002

                      "},{"location":"admin/kpanda/permissions/cluster-ns-auth.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u7ed9\u7528\u6237/\u7528\u6237\u7ec4\u6388\u6743\u4e4b\u524d\uff0c\u8bf7\u5b8c\u6210\u5982\u4e0b\u51c6\u5907\uff1a

                      • \u5df2\u5728\u5168\u5c40\u7ba1\u7406\u4e2d\u521b\u5efa\u4e86\u5f85\u6388\u6743\u7684\u7528\u6237/\u7528\u6237\u7ec4\uff0c\u8bf7\u53c2\u8003\u7528\u6237\u3002

                      • \u4ec5 Kpanda Owner \u53ca\u5f53\u524d\u96c6\u7fa4\u7684 Cluster Admin \u5177\u5907\u96c6\u7fa4\u6388\u6743\u80fd\u529b\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u6743\u9650\u8bf4\u660e\u3002

                      • \u4ec5 Kpanda Owner\u3001\u5f53\u524d\u96c6\u7fa4\u7684 Cluster Admin\uff0c\u5f53\u524d\u547d\u540d\u7a7a\u95f4\u7684 NS Admin \u5177\u5907\u547d\u540d\u7a7a\u95f4\u6388\u6743\u80fd\u529b\u3002

                      "},{"location":"admin/kpanda/permissions/cluster-ns-auth.html#_3","title":"\u96c6\u7fa4\u6388\u6743","text":"
                      1. \u7528\u6237\u767b\u5f55\u5e73\u53f0\u540e\uff0c\u70b9\u51fb\u5de6\u4fa7\u83dc\u5355\u680f \u5bb9\u5668\u7ba1\u7406 \u4e0b\u7684 \u6743\u9650\u7ba1\u7406 \uff0c\u9ed8\u8ba4\u4f4d\u4e8e \u96c6\u7fa4\u6743\u9650 \u9875\u7b7e\u3002

                      2. \u70b9\u51fb \u6dfb\u52a0\u6388\u6743 \u6309\u94ae\u3002

                      3. \u5728 \u6dfb\u52a0\u96c6\u7fa4\u6743\u9650 \u9875\u9762\u4e2d\uff0c\u9009\u62e9\u76ee\u6807\u96c6\u7fa4\u3001\u5f85\u6388\u6743\u7684\u7528\u6237/\u7528\u6237\u7ec4\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                        \u76ee\u524d\u4ec5\u652f\u6301\u7684\u96c6\u7fa4\u89d2\u8272\u4e3a Cluster Admin \uff0c\u8be6\u60c5\u6743\u9650\u53ef\u53c2\u8003\u6743\u9650\u8bf4\u660e\u3002\u5982\u9700\u8981\u7ed9\u591a\u4e2a\u7528\u6237/\u7528\u6237\u7ec4\u540c\u65f6\u8fdb\u884c\u6388\u6743\uff0c \u53ef\u70b9\u51fb \u6dfb\u52a0\u7528\u6237\u6743\u9650 \u8fdb\u884c\u591a\u6b21\u6dfb\u52a0\u3002

                      4. \u8fd4\u56de\u96c6\u7fa4\u6743\u9650\u7ba1\u7406\u9875\u9762\uff0c\u5c4f\u5e55\u51fa\u73b0\u6d88\u606f\uff1a \u6dfb\u52a0\u96c6\u7fa4\u6743\u9650\u6210\u529f \u3002

                      "},{"location":"admin/kpanda/permissions/cluster-ns-auth.html#_4","title":"\u547d\u540d\u7a7a\u95f4\u6388\u6743","text":"
                      1. \u7528\u6237\u767b\u5f55\u5e73\u53f0\u540e\uff0c\u70b9\u51fb\u5de6\u4fa7\u83dc\u5355\u680f \u5bb9\u5668\u7ba1\u7406 \u4e0b\u7684 \u6743\u9650\u7ba1\u7406 \uff0c\u70b9\u51fb \u547d\u540d\u7a7a\u95f4\u6743\u9650 \u9875\u7b7e\u3002

                      2. \u70b9\u51fb \u6dfb\u52a0\u6388\u6743 \u6309\u94ae\u3002\u5728 \u6dfb\u52a0\u547d\u540d\u7a7a\u95f4\u6743\u9650 \u9875\u9762\u4e2d\uff0c\u9009\u62e9\u76ee\u6807\u96c6\u7fa4\u3001\u76ee\u6807\u547d\u540d\u7a7a\u95f4\uff0c\u4ee5\u53ca\u5f85\u6388\u6743\u7684\u7528\u6237/\u7528\u6237\u7ec4\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                        \u76ee\u524d\u652f\u6301\u7684\u547d\u540d\u7a7a\u95f4\u89d2\u8272\u4e3a NS Admin\u3001NS Editor\u3001NS Viewer\uff0c\u8be6\u60c5\u6743\u9650\u53ef\u53c2\u8003\u6743\u9650\u8bf4\u660e\u3002\u5982\u9700\u7ed9\u591a\u4e2a\u7528\u6237/\u7528\u6237\u7ec4\u540c\u65f6\u8fdb\u884c\u6388\u6743\uff0c\u53ef\u70b9\u51fb \u6dfb\u52a0\u7528\u6237\u6743\u9650 \u8fdb\u884c\u591a\u6b21\u6dfb\u52a0\u3002\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u6743\u9650\u6388\u6743\u3002

                      3. \u8fd4\u56de\u547d\u540d\u7a7a\u95f4\u6743\u9650\u7ba1\u7406\u9875\u9762\uff0c\u5c4f\u5e55\u51fa\u73b0\u6d88\u606f\uff1a \u6dfb\u52a0\u96c6\u7fa4\u6743\u9650\u6210\u529f \u3002

                        Tip

                        \u540e\u7eed\u5982\u9700\u5220\u9664\u6216\u7f16\u8f91\u6743\u9650\uff0c\u53ef\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u9009\u62e9 \u7f16\u8f91 \u6216 \u5220\u9664 \u3002

                      "},{"location":"admin/kpanda/permissions/custom-kpanda-role.html","title":"\u589e\u52a0 Kpanda \u5185\u7f6e\u89d2\u8272\u6743\u9650\u70b9","text":"

                      \u8fc7\u53bb Kpanda \u5185\u7f6e\u89d2\u8272\u7684\u6743\u9650\u70b9\uff08rbac rules\uff09\u90fd\u662f\u63d0\u524d\u9884\u5b9a\u4e49\u597d\u7684\u4e14\u7528\u6237\u65e0\u6cd5\u4fee\u6539\uff0c\u56e0\u4e3a\u4ee5\u524d\u4fee\u6539\u5185\u7f6e\u89d2\u8272\u7684\u6743\u9650\u70b9\u4e4b\u540e\u4e5f\u4f1a\u88ab Kpanda \u63a7\u5236\u5668\u8fd8\u539f\u6210\u9884\u5b9a\u4e49\u7684\u6743\u9650\u70b9\u3002 \u4e3a\u4e86\u652f\u6301\u66f4\u52a0\u7075\u6d3b\u7684\u6743\u9650\u914d\u7f6e\uff0c\u6ee1\u8db3\u5bf9\u7cfb\u7edf\u89d2\u8272\u7684\u81ea\u5b9a\u4e49\u9700\u6c42\uff0c\u76ee\u524d Kpanda \u652f\u6301\u4e3a\u5185\u7f6e\u7cfb\u7edf\u89d2\u8272\uff08cluster admin\u3001ns admin\u3001ns editor\u3001ns viewer\uff09\u4fee\u6539\u6743\u9650\u70b9\u3002 \u4ee5\u4e0b\u793a\u4f8b\u6f14\u793a\u5982\u4f55\u65b0\u589e ns-viewer \u6743\u9650\u70b9\uff0c\u5c1d\u8bd5\u589e\u52a0\u53ef\u4ee5\u5220\u9664 Deployment \u7684\u6743\u9650\u3002\u5176\u4ed6\u6743\u9650\u70b9\u64cd\u4f5c\u7c7b\u4f3c\u3002

                      "},{"location":"admin/kpanda/permissions/custom-kpanda-role.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u9002\u7528\u4e8e\u5bb9\u5668\u7ba1\u7406 v0.27.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u3002
                      • \u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                      • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Viewer \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                      Note

                      • \u53ea\u9700\u5728 Global Cluster \u589e\u52a0\u6743\u9650\u70b9\uff0cKpanda \u63a7\u5236\u5668\u4f1a\u628a Global Cluster \u589e\u52a0\u7684\u6743\u9650\u70b9\u540c\u6b65\u5230\u6240\u6709\u63a5\u5165\u5b50\u96c6\u7fa4\u4e2d\uff0c\u540c\u6b65\u9700\u4e00\u6bb5\u65f6\u95f4\u624d\u80fd\u5b8c\u6210
                      • \u53ea\u80fd\u5728 Global Cluster \u589e\u52a0\u6743\u9650\u70b9\uff0c\u5728\u5b50\u96c6\u7fa4\u65b0\u589e\u7684\u6743\u9650\u70b9\u4f1a\u88ab Global Cluster \u5185\u7f6e\u89d2\u8272\u6743\u9650\u70b9\u8986\u76d6
                      • \u53ea\u652f\u6301\u4f7f\u7528\u56fa\u5b9a Label \u7684 ClusterRole \u8ffd\u52a0\u6743\u9650\uff0c\u4e0d\u652f\u6301\u66ff\u6362\u6216\u8005\u5220\u9664\u6743\u9650\uff0c\u4e5f\u4e0d\u80fd\u4f7f\u7528 role \u8ffd\u52a0\u6743\u9650\uff0c\u5185\u7f6e\u89d2\u8272\u8ddf\u7528\u6237\u521b\u5efa\u7684 ClusterRole Label \u5bf9\u5e94\u5173\u7cfb\u5982\u4e0b

                        cluster-admin: rbac.kpanda.io/role-template-cluster-admin: \"true\"\ncluster-edit: rbac.kpanda.io/role-template-cluster-edit: \"true\"\ncluster-view: rbac.kpanda.io/role-template-cluster-view: \"true\"\nns-admin: rbac.kpanda.io/role-template-ns-admin: \"true\"\nns-edit: rbac.kpanda.io/role-template-ns-edit: \"true\"\nns-view: rbac.kpanda.io/role-template-ns-view: \"true\"\n
                      "},{"location":"admin/kpanda/permissions/custom-kpanda-role.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u4f7f\u7528 admin \u6216\u8005 cluster admin \u6743\u9650\u7684\u7528\u6237\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d

                      2. \u6388\u6743 ns-viewer\uff0c\u7528\u6237\u6709\u8be5 namespace ns-view \u6743\u9650

                      3. \u5207\u6362\u767b\u5f55\u7528\u6237\u4e3a ns-viewer\uff0c\u6253\u5f00\u63a7\u5236\u53f0\u83b7\u53d6 ns-viewer \u7528\u6237\u5bf9\u5e94\u7684 token\uff0c\u4f7f\u7528 curl \u8bf7\u6c42\u5220\u9664\u4e0a\u8ff0\u7684 deployment nginx\uff0c\u53d1\u73b0\u65e0\u5220\u9664\u6743\u9650

                        [root@master-01 ~]# curl -k -X DELETE  'https://${URL}/apis/kpanda.io/v1alpha1/clusters/cluster-member/namespaces/default/deployments/nginx' -H 'authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJOU044MG9BclBRMzUwZ2VVU2ZyNy1xMEREVWY4MmEtZmJqR05uRE1sd1lFIn0.eyJleHAiOjE3MTU3NjY1NzksImlhdCI6MTcxNTY4MDE3OSwiYXV0aF90aW1lIjoxNzE1NjgwMTc3LCJqdGkiOiIxZjI3MzJlNC1jYjFhLTQ4OTktYjBiZC1iN2IxZWY1MzAxNDEiLCJpc3MiOiJodHRwczovLzEwLjYuMjAxLjIwMTozMDE0Ny9hdXRoL3JlYWxtcy9naGlwcG8iLCJhdWQiOiJfX2ludGVybmFsLWdoaXBwbyIsInN1YiI6ImMxZmMxM2ViLTAwZGUtNDFiYS05ZTllLWE5OGU2OGM0MmVmMCIsInR5cCI6IklEIiwiYXpwIjoiX19pbnRlcm5hbC1naGlwcG8iLCJzZXNzaW9uX3N0YXRlIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiYXRfaGFzaCI6IlJhTHoyQjlKQ2FNc1RrbGVMR3V6blEiLCJhY3IiOiIwIiwic2lkIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJncm91cHMiOltdLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJucy12aWV3ZXIiLCJsb2NhbGUiOiIifQ.As2ipMjfvzvgONAGlc9RnqOd3zMwAj82VXlcqcR74ZK9tAq3Q4ruQ1a6WuIfqiq8Kq4F77ljwwzYUuunfBli2zhU2II8zyxVhLoCEBu4pBVBd_oJyUycXuNa6HfQGnl36E1M7-_QG8b-_T51wFxxVb5b7SEDE1AvIf54NAlAr-rhDmGRdOK1c9CohQcS00ab52MD3IPiFFZ8_Iljnii-RpXKZoTjdcULJVn_uZNk_SzSUK-7MVWmPBK15m6sNktOMSf0pCObKWRqHd15JSe-2aA2PKBo1jBH3tHbOgZyMPdsLI0QdmEnKB5FiiOeMpwn_oHnT6IjT-BZlB18VkW8rA'\n{\"code\":7,\"message\":\"[RBAC] delete resources(deployments: nginx) is forbidden for user(ns-viewer) in cluster(cluster-member)\",\"details\":[]}[root@master-01 ~]#\n[root@master-01 ~]#\n
                      4. \u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u521b\u5efa\u5982\u4e0b ClusterRole\uff1a

                        apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: append-ns-view # (1)!\n  labels:\n    rbac.kpanda.io/role-template-ns-view: \"true\" # (2)!\nrules:\n  - apiGroups: [ \"apps\" ]\n    resources: [ \"deployments\" ]\n    verbs: [ \"delete\" ]\n
                        1. \u6b64\u5b57\u6bb5\u503c\u53ef\u4efb\u610f\u6307\u5b9a\uff0c\u53ea\u9700\u4e0d\u91cd\u590d\u4e14\u7b26\u5408 Kubernetes \u8d44\u6e90\u540d\u79f0\u89c4\u5219\u8981\u6c42
                        2. \u6ce8\u610f\u7ed9\u4e0d\u540c\u7684\u89d2\u8272\u6dfb\u52a0\u6743\u9650\u65f6\u5e94\u6253\u4e0a\u4e0d\u540c\u7684 label
                      5. \u7b49\u5f85 Kpanda \u63a7\u5236\u5668\u6dfb\u52a0\u7528\u6237\u521b\u5efa\u6743\u9650\u5230\u5185\u7f6e\u89d2\u8272 ns-viewer \u4e2d\uff0c\u53ef\u67e5\u770b\u5bf9\u5e94\u5185\u7f6e\u89d2\u8272\u5982\u662f\u5426\u6709\u4e0a\u4e00\u6b65\u65b0\u589e\u7684\u6743\u9650\u70b9

                        [root@master-01 ~]# kubectl get clusterrole role-template-ns-view -oyaml|grep deployments -C 10|tail -n 6\n
                        - apiGroups:\n  - apps\n  resources:\n  - deployments\n  verbs:\n  - delete\n

                      6. \u518d\u6b21\u4f7f\u7528 curl \u8bf7\u6c42\u5220\u9664\u4e0a\u8ff0\u7684 deployment nginx\uff0c\u8fd9\u6b21\u6210\u529f\u5220\u9664\u4e86\u3002\u4e5f\u5c31\u662f\u8bf4\uff0cns-viewer \u6210\u529f\u65b0\u589e\u4e86\u5220\u9664 Deployment \u7684\u6743\u9650\u3002

                        [root@master-01 ~]# curl -k -X DELETE  'https://${URL}/apis/kpanda.io/v1alpha1/clusters/cluster-member/namespaces/default/deployments/nginx' -H 'authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJOU044MG9BclBRMzUwZ2VVU2ZyNy1xMEREVWY4MmEtZmJqR05uRE1sd1lFIn0.eyJleHAiOjE3MTU3NjY1NzksImlhdCI6MTcxNTY4MDE3OSwiYXV0aF90aW1lIjoxNzE1NjgwMTc3LCJqdGkiOiIxZjI3MzJlNC1jYjFhLTQ4OTktYjBiZC1iN2IxZWY1MzAxNDEiLCJpc3MiOiJodHRwczovLzEwLjYuMjAxLjIwMTozMDE0Ny9hdXRoL3JlYWxtcy9naGlwcG8iLCJhdWQiOiJfX2ludGVybmFsLWdoaXBwbyIsInN1YiI6ImMxZmMxM2ViLTAwZGUtNDFiYS05ZTllLWE5OGU2OGM0MmVmMCIsInR5cCI6IklEIiwiYXpwIjoiX19pbnRlcm5hbC1naGlwcG8iLCJzZXNzaW9uX3N0YXRlIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiYXRfaGFzaCI6IlJhTHoyQjlKQ2FNc1RrbGVMR3V6blEiLCJhY3IiOiIwIiwic2lkIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJncm91cHMiOltdLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJucy12aWV3ZXIiLCJsb2NhbGUiOiIifQ.As2ipMjfvzvgONAGlc9RnqOd3zMwAj82VXlcqcR74ZK9tAq3Q4ruQ1a6WuIfqiq8Kq4F77ljwwzYUuunfBli2zhU2II8zyxVhLoCEBu4pBVBd_oJyUycXuNa6HfQGnl36E1M7-_QG8b-_T51wFxxVb5b7SEDE1AvIf54NAlAr-rhDmGRdOK1c9CohQcS00ab52MD3IPiFFZ8_Iljnii-RpXKZoTjdcULJVn_uZNk_SzSUK-7MVWmPBK15m6sNktOMSf0pCObKWRqHd15JSe-2aA2PKBo1jBH3tHbOgZyMPdsLI0QdmEnKB5FiiOeMpwn_oHnT6IjT-BZlB18VkW8rA'\n
                      "},{"location":"admin/kpanda/permissions/permission-brief.html","title":"\u5bb9\u5668\u7ba1\u7406\u6743\u9650\u8bf4\u660e","text":"

                      \u5bb9\u5668\u7ba1\u7406\u6743\u9650\u57fa\u4e8e\u5168\u5c40\u6743\u9650\u7ba1\u7406\u4ee5\u53ca Kubernetes RBAC \u6743\u9650\u7ba1\u7406\u6253\u9020\u7684\u591a\u7ef4\u5ea6\u6743\u9650\u7ba1\u7406\u4f53\u7cfb\u3002 \u652f\u6301\u96c6\u7fa4\u7ea7\u3001\u547d\u540d\u7a7a\u95f4\u7ea7\u7684\u6743\u9650\u63a7\u5236\uff0c\u5e2e\u52a9\u7528\u6237\u4fbf\u6377\u7075\u6d3b\u5730\u5bf9\u79df\u6237\u4e0b\u7684 IAM \u7528\u6237\u3001\u7528\u6237\u7ec4\uff08\u7528\u6237\u7684\u96c6\u5408\uff09\u8bbe\u5b9a\u4e0d\u540c\u7684\u64cd\u4f5c\u6743\u9650\u3002

                      "},{"location":"admin/kpanda/permissions/permission-brief.html#_2","title":"\u96c6\u7fa4\u6743\u9650","text":"

                      \u96c6\u7fa4\u6743\u9650\u57fa\u4e8e Kubernetes RBAC \u7684 ClusterRolebinding \u6388\u6743\uff0c\u96c6\u7fa4\u6743\u9650\u8bbe\u7f6e\u53ef\u8ba9\u7528\u6237/\u7528\u6237\u7ec4\u5177\u5907\u96c6\u7fa4\u76f8\u5173\u6743\u9650\u3002 \u76ee\u524d\u7684\u9ed8\u8ba4\u96c6\u7fa4\u89d2\u8272\u4e3a Cluster Admin \uff08\u4e0d\u5177\u5907\u96c6\u7fa4\u7684\u521b\u5efa\u3001\u5220\u9664\u6743\u9650\uff09\u3002

                      "},{"location":"admin/kpanda/permissions/permission-brief.html#cluster-admin","title":"Cluster Admin","text":"

                      Cluster Admin \u5177\u6709\u4ee5\u4e0b\u6743\u9650\uff1a

                      • \u53ef\u7ba1\u7406\u3001\u7f16\u8f91\u3001\u67e5\u770b\u5bf9\u5e94\u96c6\u7fa4

                      • \u7ba1\u7406\u3001\u7f16\u8f91\u3001\u67e5\u770b \u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u5de5\u4f5c\u8d1f\u8f7d\u53ca\u96c6\u7fa4\u5185\u6240\u6709\u8d44\u6e90

                      • \u53ef\u6388\u6743\u7528\u6237\u4e3a\u96c6\u7fa4\u5185\u89d2\u8272 (Cluster Admin\u3001NS Admin\u3001NS Editor\u3001NS Viewer)

                      \u8be5\u96c6\u7fa4\u89d2\u8272\u7684 YAML \u793a\u4f8b\u5982\u4e0b\uff1a

                      apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:49Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-cluster-admin\n  resourceVersion: \"15168\"\n  uid: f8f86d42-d5ef-47aa-b284-097615795076\nrules:\n- apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n- nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'\n
                      "},{"location":"admin/kpanda/permissions/permission-brief.html#_3","title":"\u547d\u540d\u7a7a\u95f4\u6743\u9650","text":"

                      \u547d\u540d\u7a7a\u95f4\u6743\u9650\u662f\u57fa\u4e8e Kubernetes RBAC \u80fd\u529b\u7684\u6388\u6743\uff0c\u53ef\u4ee5\u5b9e\u73b0\u4e0d\u540c\u7684\u7528\u6237/\u7528\u6237\u7ec4\u5bf9\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u8d44\u6e90\u5177\u6709\u4e0d\u540c\u7684\u64cd\u4f5c\u6743\u9650(\u5305\u62ec Kubernetes API \u6743\u9650)\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\uff1aKubernetes RBAC\u3002\u76ee\u524d\u5bb9\u5668\u7ba1\u7406\u7684\u9ed8\u8ba4\u89d2\u8272\u4e3a\uff1aNS Admin\u3001NS Editor\u3001NS Viewer\u3002

                      "},{"location":"admin/kpanda/permissions/permission-brief.html#ns-admin","title":"NS Admin","text":"

                      NS Admin \u5177\u6709\u4ee5\u4e0b\u6743\u9650\uff1a

                      • \u53ef\u67e5\u770b\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4
                      • \u7ba1\u7406\u3001\u7f16\u8f91\u3001\u67e5\u770b \u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u53ca\u81ea\u5b9a\u4e49\u8d44\u6e90
                      • \u53ef\u6388\u6743\u7528\u6237\u4e3a\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u89d2\u8272 (NS Editor\u3001NS Viewer)

                      \u8be5\u96c6\u7fa4\u89d2\u8272\u7684 YAML \u793a\u4f8b\u5982\u4e0b\uff1a

                      apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:49Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-admin\n  resourceVersion: \"15173\"\n  uid: 69f64c7e-70e7-4c7c-a3e0-053f507f2bc3\nrules:\n- apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n- nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'    \n
                      "},{"location":"admin/kpanda/permissions/permission-brief.html#ns-editor","title":"NS Editor","text":"

                      NS Editor \u5177\u6709\u4ee5\u4e0b\u6743\u9650\uff1a

                      • \u53ef\u67e5\u770b\u5bf9\u5e94\u6709\u6743\u9650\u7684\u547d\u540d\u7a7a\u95f4
                      • \u7ba1\u7406\u3001\u7f16\u8f91\u3001\u67e5\u770b \u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u5de5\u4f5c\u8d1f\u8f7d
                      \u70b9\u51fb\u67e5\u770b\u96c6\u7fa4\u89d2\u8272\u7684 YAML \u793a\u4f8b
                      apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:50Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-edit\n  resourceVersion: \"15175\"\n  uid: ca9e690e-96c0-4978-8915-6e4c00c748fe\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - configmaps\n  - endpoints\n  - persistentvolumeclaims\n  - persistentvolumeclaims/status\n  - pods\n  - replicationcontrollers\n  - replicationcontrollers/scale\n  - serviceaccounts\n  - services\n  - services/status\n  verbs:\n  - '*'\n- apiGroups:\n  - \"\"\n  resources:\n  - bindings\n  - events\n  - limitranges\n  - namespaces/status\n  - pods/log\n  - pods/status\n  - replicationcontrollers/status\n  - resourcequotas\n  - resourcequotas/status\n  verbs:\n  - '*'\n- apiGroups:\n  - \"\"\n  resources:\n  - namespaces\n  verbs:\n  - '*'\n- apiGroups:\n  - apps\n  resources:\n  - controllerrevisions\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - statefulsets\n  - statefulsets/scale\n  - statefulsets/status\n  verbs:\n  - '*'\n- apiGroups:\n  - autoscaling\n  resources:\n  - horizontalpodautoscalers\n  - horizontalpodautoscalers/status\n  verbs:\n  - '*'\n- apiGroups:\n  - batch\n  resources:\n  - cronjobs\n  - cronjobs/status\n  - jobs\n  - jobs/status\n  verbs:\n  - '*'\n- apiGroups:\n  - extensions\n  resources:\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - replicationcontrollers/scale\n  verbs:\n  - '*'\n- apiGroups:\n  - policy\n  resources:\n  - poddisruptionbudgets\n  - poddisruptionbudgets/status\n  verbs:\n  - '*'\n- apiGroups:\n  - networking.k8s.io\n  resources:\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  verbs:\n  - '*'      \n
                      "},{"location":"admin/kpanda/permissions/permission-brief.html#ns-viewer","title":"NS Viewer","text":"

                      NS Viewer \u5177\u6709\u4ee5\u4e0b\u6743\u9650\uff1a

                      • \u53ef\u67e5\u770b\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4
                      • \u53ef\u67e5\u770b\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u53ca\u81ea\u5b9a\u4e49\u8d44\u6e90
                      \u70b9\u51fb\u67e5\u770b\u96c6\u7fa4\u89d2\u8272\u7684 YAML \u793a\u4f8b
                      apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:50Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-view\n  resourceVersion: \"15183\"\n  uid: 853888fd-6ee8-42ac-b91e-63923918baf8\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - configmaps\n  - endpoints\n  - persistentvolumeclaims\n  - persistentvolumeclaims/status\n  - pods\n  - replicationcontrollers\n  - replicationcontrollers/scale\n  - serviceaccounts\n  - services\n  - services/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - bindings\n  - events\n  - limitranges\n  - namespaces/status\n  - pods/log\n  - pods/status\n  - replicationcontrollers/status\n  - resourcequotas\n  - resourcequotas/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - namespaces\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - apps\n  resources:\n  - controllerrevisions\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - statefulsets\n  - statefulsets/scale\n  - statefulsets/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - autoscaling\n  resources:\n  - horizontalpodautoscalers\n  - horizontalpodautoscalers/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - batch\n  resources:\n  - cronjobs\n  - cronjobs/status\n  - jobs\n  - jobs/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - extensions\n  resources:\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - replicationcontrollers/scale\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - policy\n  resources:\n  - poddisruptionbudgets\n  - poddisruptionbudgets/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - networking.k8s.io\n  resources:\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  verbs:\n  - get\n  - list\n  - watch \n
                      "},{"location":"admin/kpanda/permissions/permission-brief.html#faq","title":"\u6743\u9650 FAQ","text":"
                      1. \u5168\u5c40\u6743\u9650\u548c\u5bb9\u5668\u7ba1\u7406\u6743\u9650\u7ba1\u7406\u7684\u5173\u7cfb\uff1f

                        \u7b54\uff1a\u5168\u5c40\u6743\u9650\u4ec5\u6388\u6743\u4e3a\u7c97\u7c92\u5ea6\u6743\u9650\uff0c\u53ef\u7ba1\u7406\u6240\u6709\u96c6\u7fa4\u7684\u521b\u5efa\u3001\u7f16\u8f91\u3001\u5220\u9664\uff1b\u800c\u5bf9\u4e8e\u7ec6\u7c92\u5ea6\u7684\u6743\u9650\uff0c\u5982\u5355\u4e2a\u96c6\u7fa4\u7684\u7ba1\u7406\u6743\u9650\uff0c\u5355\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u7ba1\u7406\u3001\u7f16\u8f91\u3001\u5220\u9664\u6743\u9650\uff0c\u9700\u8981\u57fa\u4e8e Kubernetes RBAC \u7684\u5bb9\u5668\u7ba1\u7406\u6743\u9650\u8fdb\u884c\u5b9e\u73b0\u3002 \u4e00\u822c\u6743\u9650\u7684\u7528\u6237\u4ec5\u9700\u8981\u5728\u5bb9\u5668\u7ba1\u7406\u4e2d\u8fdb\u884c\u6388\u6743\u5373\u53ef\u3002

                      2. \u76ee\u524d\u4ec5\u652f\u6301\u56db\u4e2a\u9ed8\u8ba4\u89d2\u8272\uff0c\u540e\u53f0\u81ea\u5b9a\u4e49\u89d2\u8272\u7684 RoleBinding \u4ee5\u53ca ClusterRoleBinding \uff08Kubernetes \u7ec6\u7c92\u5ea6\u7684 RBAC\uff09\u662f\u5426\u4e5f\u80fd\u751f\u6548\uff1f

                        \u7b54\uff1a\u76ee\u524d\u81ea\u5b9a\u4e49\u6743\u9650\u6682\u65f6\u65e0\u6cd5\u901a\u8fc7\u56fe\u5f62\u754c\u9762\u8fdb\u884c\u7ba1\u7406\uff0c\u4f46\u662f\u901a\u8fc7 kubectl \u521b\u5efa\u7684\u6743\u9650\u89c4\u5219\u540c\u6837\u80fd\u751f\u6548\u3002

                      "},{"location":"admin/kpanda/scale/create-hpa.html","title":"\u57fa\u4e8e\u5185\u7f6e\u6307\u6807\u521b\u5efa HPA","text":"

                      \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301 Pod \u8d44\u6e90\u57fa\u4e8e\u6307\u6807\u8fdb\u884c\u5f39\u6027\u4f38\u7f29\uff08Horizontal Pod Autoscaling, HPA\uff09\u3002 \u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e CPU \u5229\u7528\u7387\u3001\u5185\u5b58\u7528\u91cf\u53ca\u81ea\u5b9a\u4e49\u6307\u6807\u6307\u6807\u6765\u52a8\u6001\u8c03\u6574 Pod \u8d44\u6e90\u7684\u526f\u672c\u6570\u91cf\u3002 \u4f8b\u5982\uff0c\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u8bbe\u7f6e\u57fa\u4e8e CPU \u5229\u7528\u7387\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u540e\uff0c\u5f53 Pod \u7684 CPU \u5229\u7528\u7387\u8d85\u8fc7/\u4f4e\u4e8e\u60a8\u8bbe\u7f6e\u7684\u6307\u6807\u9600\u503c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u63a7\u5236\u5668\u5c06\u4f1a\u81ea\u52a8\u589e\u52a0/\u8f83\u5c11 Pod \u526f\u672c\u6570\u3002

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u57fa\u4e8e\u5185\u7f6e\u6307\u6807\u7684\u5f39\u6027\u4f38\u7f29\u3002

                      Note

                      1. HPA \u4ec5\u9002\u7528\u4e8e Deployment \u548c StatefulSet\uff0c\u6bcf\u4e2a\u5de5\u4f5c\u8d1f\u8f7d\u53ea\u80fd\u521b\u5efa\u4e00\u4e2a HPA\u3002
                      2. \u5982\u679c\u57fa\u4e8e CPU \u5229\u7528\u7387\u521b\u5efa HPA \u7b56\u7565\uff0c\u5fc5\u987b\u9884\u5148\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u8bbe\u7f6e\u914d\u7f6e\u9650\u5236\uff08Limit\uff09\uff0c\u5426\u5219\u65e0\u6cd5\u8ba1\u7b97 CPU \u5229\u7528\u7387\u3002
                      3. \u5982\u679c\u540c\u65f6\u4f7f\u7528\u5185\u7f6e\u6307\u6807\u548c\u591a\u79cd\u81ea\u5b9a\u4e49\u6307\uff0cHPA \u4f1a\u6839\u636e\u591a\u9879\u6307\u6807\u5206\u522b\u8ba1\u7b97\u6240\u9700\u4f38\u7f29\u526f\u672c\u6570\uff0c\u53d6\u8f83\u5927\u503c\uff08\u4f46\u4e0d\u4f1a\u8d85\u8fc7\u8bbe\u7f6e HPA \u7b56\u7565\u65f6\u914d\u7f6e\u7684\u6700\u5927\u526f\u672c\u6570\uff09\u8fdb\u884c\u5f39\u6027\u4f38\u7f29\u3002
                      "},{"location":"admin/kpanda/scale/create-hpa.html#_1","title":"\u5185\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565","text":"

                      \u7cfb\u7edf\u5185\u7f6e\u4e86 CPU \u548c\u5185\u5b58\u4e24\u79cd\u5f39\u6027\u4f38\u7f29\u6307\u6807\u4ee5\u6ee1\u8db3\u7528\u6237\u7684\u57fa\u7840\u4e1a\u52a1\u4f7f\u7528\u573a\u666f\u3002

                      "},{"location":"admin/kpanda/scale/create-hpa.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u5728\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u5185\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                      • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa\u6216\u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa\u3002

                      • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                      • \u5df2\u5b8c\u6210 metrics-server \u63d2\u4ef6\u5b89\u88c5 \u3002

                      "},{"location":"admin/kpanda/scale/create-hpa.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                      \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u5185\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u3002

                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \u8fdb\u5165\u96c6\u7fa4\u5217\u8868\u9875\u9762\u3002\u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                      2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d \u8fdb\u5165\u5de5\u4f5c\u8d1f\u8f7d\u5217\u8868\u540e\uff0c\u70b9\u51fb\u4e00\u4e2a\u8d1f\u8f7d\u540d\u79f0\uff0c\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5 \u9875\u9762\u3002

                      3. \u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u7684\u5f39\u6027\u4f38\u7f29\u914d\u7f6e\u60c5\u51b5\u3002

                      4. \u786e\u8ba4\u96c6\u7fa4\u5df2\u5b89\u88c5\u4e86 metrics-server \u63d2\u4ef6\uff0c\u4e14\u63d2\u4ef6\u8fd0\u884c\u72b6\u6001\u4e3a\u6b63\u5e38\u540e\uff0c\u5373\u53ef\u70b9\u51fb \u65b0\u5efa\u4f38\u7f29 \u6309\u94ae\u3002

                      5. \u521b\u5efa\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u53c2\u6570\u3002

                        • \u7b56\u7565\u540d\u79f0\uff1a\u8f93\u5165\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 hpa-my-dep\u3002
                        • \u547d\u540d\u7a7a\u95f4\uff1a\u8d1f\u8f7d\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002
                        • \u5de5\u4f5c\u8d1f\u8f7d\uff1a\u6267\u884c\u5f39\u6027\u4f38\u7f29\u7684\u5de5\u4f5c\u8d1f\u8f7d\u5bf9\u8c61\u3002
                        • \u76ee\u6807 CPU \u5229\u7528\u7387\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u4e0b Pod \u7684 CPU \u4f7f\u7528\u7387\u3002\u8ba1\u7b97\u65b9\u5f0f\u4e3a\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u7684 Pod \u8d44\u6e90 / \u5de5\u4f5c\u8d1f\u8f7d\u7684\u8bf7\u6c42\uff08request\uff09\u503c\u3002\u5f53\u5b9e\u9645 CPU \u7528\u91cf\u5927\u4e8e/\u5c0f\u4e8e\u76ee\u6807\u503c\u65f6\uff0c\u7cfb\u7edf\u81ea\u52a8\u51cf\u5c11/\u589e\u52a0 Pod \u526f\u672c\u6570\u91cf\u3002
                        • \u76ee\u6807\u5185\u5b58\u7528\u91cf\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u4e0b\u7684 Pod \u7684\u5185\u5b58\u7528\u91cf\u3002\u5f53\u5b9e\u9645\u5185\u5b58\u7528\u91cf\u5927\u4e8e/\u5c0f\u4e8e\u76ee\u6807\u503c\u65f6\uff0c\u7cfb\u7edf\u81ea\u52a8\u51cf\u5c11/\u589e\u52a0 Pod \u526f\u672c\u6570\u91cf\u3002
                        • \u526f\u672c\u8303\u56f4\uff1aPod \u526f\u672c\u6570\u7684\u5f39\u6027\u4f38\u7f29\u8303\u56f4\u3002\u9ed8\u8ba4\u533a\u95f4\u4e3a\u4e3a 1 - 10\u3002
                      6. \u5b8c\u6210\u53c2\u6570\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u81ea\u52a8\u8fd4\u56de\u5f39\u6027\u4f38\u7f29\u8be6\u60c5\u9875\u9762\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u6267\u884c\u7f16\u8f91\u3001\u5220\u9664\u64cd\u4f5c\uff0c\u8fd8\u53ef\u4ee5\u67e5\u770b\u76f8\u5173\u4e8b\u4ef6\u3002

                      "},{"location":"admin/kpanda/scale/create-vpa.html","title":"\u521b\u5efa VPA","text":"

                      \u5bb9\u5668\u5782\u76f4\u6269\u7f29\u5bb9\u7b56\u7565\uff08Vertical Pod Autoscaler, VPA\uff09\u901a\u8fc7\u76d1\u63a7 Pod \u5728\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u8d44\u6e90\u7533\u8bf7\u548c\u7528\u91cf\uff0c \u8ba1\u7b97\u51fa\u5bf9\u8be5 Pod \u800c\u8a00\u6700\u9002\u5408\u7684 CPU \u548c\u5185\u5b58\u8bf7\u6c42\u503c\u3002\u4f7f\u7528 VPA \u53ef\u4ee5\u66f4\u52a0\u5408\u7406\u5730\u4e3a\u96c6\u7fa4\u4e0b\u6bcf\u4e2a Pod \u5206\u914d\u8d44\u6e90\uff0c\u63d0\u9ad8\u96c6\u7fa4\u7684\u6574\u4f53\u8d44\u6e90\u5229\u7528\u7387\uff0c\u907f\u514d\u96c6\u7fa4\u8d44\u6e90\u6d6a\u8d39\u3002

                      \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u901a\u8fc7\u5bb9\u5668\u5782\u76f4\u6269\u7f29\u5bb9\u7b56\u7565\uff08Vertical Pod Autoscaler, VPA\uff09\uff0c\u57fa\u4e8e\u6b64\u529f\u80fd\u53ef\u4ee5\u6839\u636e\u5bb9\u5668\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\u52a8\u6001\u8c03\u6574 Pod \u8bf7\u6c42\u503c\u3002 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u901a\u8fc7\u624b\u52a8\u548c\u81ea\u52a8\u4e24\u79cd\u65b9\u5f0f\u6765\u4fee\u6539\u8d44\u6e90\u8bf7\u6c42\u503c\uff0c\u60a8\u53ef\u4ee5\u6839\u636e\u5b9e\u9645\u9700\u8981\u8fdb\u884c\u914d\u7f6e\u3002

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e Pod \u5782\u76f4\u4f38\u7f29\u3002

                      Warning

                      \u4f7f\u7528 VPA \u4fee\u6539 Pod \u8d44\u6e90\u8bf7\u6c42\u4f1a\u89e6\u53d1 Pod \u91cd\u542f\u3002\u7531\u4e8e Kubernetes \u672c\u8eab\u7684\u9650\u5236\uff0c Pod \u91cd\u542f\u540e\u53ef\u80fd\u4f1a\u88ab\u8c03\u5ea6\u5230\u5176\u5b83\u8282\u70b9\u4e0a\u3002

                      "},{"location":"admin/kpanda/scale/create-vpa.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u5782\u76f4\u4f38\u7f29\u7b56\u7565\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                      • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u3001\u7528\u6237\u3001\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u6216\u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u3002

                      • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                      • \u5f53\u524d\u96c6\u7fa4\u5df2\u7ecf\u5b89\u88c5 metrics-server \u548c VPA \u63d2\u4ef6\u3002

                      "},{"location":"admin/kpanda/scale/create-vpa.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                      \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u5185\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u3002

                      1. \u5728 \u96c6\u7fa4\u5217\u8868 \u4e2d\u627e\u5230\u76ee\u524d\u96c6\u7fa4\uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u627e\u5230\u9700\u8981\u521b\u5efa VPA \u7684\u8d1f\u8f7d\uff0c\u70b9\u51fb\u8be5\u8d1f\u8f7d\u7684\u540d\u79f0\u3002

                        3. \u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u7684\u5f39\u6027\u4f38\u7f29\u914d\u7f6e\uff0c\u786e\u8ba4\u5df2\u7ecf\u5b89\u88c5\u4e86\u76f8\u5173\u63d2\u4ef6\u5e76\u4e14\u63d2\u4ef6\u662f\u5426\u8fd0\u884c\u6b63\u5e38\u3002

                      3. \u70b9\u51fb \u65b0\u5efa\u4f38\u7f29 \u6309\u94ae\uff0c\u5e76\u914d\u7f6e VPA \u5782\u76f4\u4f38\u7f29\u7b56\u7565\u53c2\u6570\u3002

                        • \u7b56\u7565\u540d\u79f0\uff1a\u8f93\u5165\u5782\u76f4\u4f38\u7f29\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 vpa-my-dep\u3002
                        • \u4f38\u7f29\u6a21\u5f0f\uff1a\u6267\u884c\u4fee\u6539 CPU \u548c\u5185\u5b58\u8bf7\u6c42\u503c\u7684\u65b9\u5f0f\uff0c\u76ee\u524d\u5782\u76f4\u4f38\u7f29\u652f\u6301\u624b\u52a8\u548c\u81ea\u52a8\u4e24\u79cd\u4f38\u7f29\u6a21\u5f0f\u3002
                          • \u624b\u52a8\u4f38\u7f29\uff1a\u5782\u76f4\u4f38\u7f29\u7b56\u7565\u8ba1\u7b97\u51fa\u63a8\u8350\u7684\u8d44\u6e90\u914d\u7f6e\u503c\u540e\uff0c\u9700\u7528\u6237\u624b\u52a8\u4fee\u6539\u5e94\u7528\u7684\u8d44\u6e90\u914d\u989d\u3002
                          • \u81ea\u52a8\u4f38\u7f29\uff1a\u5782\u76f4\u4f38\u7f29\u7b56\u7565\u81ea\u52a8\u8ba1\u7b97\u548c\u4fee\u6539\u5e94\u7528\u7684\u8d44\u6e90\u914d\u989d\u3002
                        • \u76ee\u6807\u5bb9\u5668\uff1a\u9009\u62e9\u9700\u8981\u8fdb\u884c\u5782\u76f4\u4f38\u7f29\u7684\u5bb9\u5668\u3002
                      4. \u5b8c\u6210\u53c2\u6570\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u81ea\u52a8\u8fd4\u56de\u5f39\u6027\u4f38\u7f29\u8be6\u60c5\u9875\u9762\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u6267\u884c\u7f16\u8f91\u3001\u5220\u9664\u64cd\u4f5c\u3002

                      Note

                      \u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c--min-replicas \u7684\u503c\u4e3a 2\u3002\u8868\u793a\u5f53\u526f\u672c\u6570\u5927\u4e8e 1 \u65f6\uff0cVPA \u624d\u4f1a\u751f\u6548\uff0c \u53ef\u4ee5\u901a\u8fc7\u4fee\u6539 updater \u7684 --min-replicas \u53c2\u6570\u503c\u6765\u6539\u53d8\u8fd9\u4e00\u9ed8\u8ba4\u884c\u4e3a\u3002

                      spec: \n  containers: \n  - name: updater \n  args: \n  - \"--min-replicas=2\"\n
                      "},{"location":"admin/kpanda/scale/custom-hpa.html","title":"\u57fa\u4e8e\u81ea\u5b9a\u4e49\u6307\u6807\u521b\u5efa HPA","text":"

                      \u5f53\u7cfb\u7edf\u5185\u7f6e\u7684 CPU \u548c\u5185\u5b58\u4e24\u79cd\u6307\u6807\u4e0d\u80fd\u6ee1\u8db3\u60a8\u4e1a\u52a1\u7684\u5b9e\u9645\u9700\u6c42\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u914d\u7f6e ServiceMonitoring \u6765\u6dfb\u52a0\u81ea\u5b9a\u4e49\u6307\u6807\uff0c \u5e76\u57fa\u4e8e\u81ea\u5b9a\u4e49\u6307\u6807\u5b9e\u73b0\u5f39\u6027\u4f38\u7f29\u3002\u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u57fa\u4e8e\u81ea\u5b9a\u4e49\u6307\u6807\u8fdb\u884c\u5f39\u6027\u4f38\u7f29\u3002

                      Note

                      1. HPA \u4ec5\u9002\u7528\u4e8e Deployment \u548c StatefulSet\uff0c\u6bcf\u4e2a\u5de5\u4f5c\u8d1f\u8f7d\u53ea\u80fd\u521b\u5efa\u4e00\u4e2a HPA\u3002
                      2. \u5982\u679c\u540c\u65f6\u4f7f\u7528\u5185\u7f6e\u6307\u6807\u548c\u591a\u79cd\u81ea\u5b9a\u4e49\u6307\uff0cHPA \u4f1a\u6839\u636e\u591a\u9879\u6307\u6807\u5206\u522b\u8ba1\u7b97\u6240\u9700\u4f38\u7f29\u526f\u672c\u6570\uff0c\u53d6\u8f83\u5927\u503c\uff08\u4f46\u4e0d\u4f1a\u8d85\u8fc7\u8bbe\u7f6e HPA \u7b56\u7565\u65f6\u914d\u7f6e\u7684\u6700\u5927\u526f\u672c\u6570\uff09\u8fdb\u884c\u5f39\u6027\u4f38\u7f29\u3002
                      "},{"location":"admin/kpanda/scale/custom-hpa.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u5728\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c \u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762
                      • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa\u6216\u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa
                      • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c \u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743
                      • \u5df2\u5b89\u88c5 metrics-server \u63d2\u4ef6
                      • \u5df2\u5b89\u88c5 insight-agent \u63d2\u4ef6
                      • \u5df2\u5b89\u88c5 Prometheus-adapter \u63d2\u4ef6
                      "},{"location":"admin/kpanda/scale/custom-hpa.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                      \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u3002

                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \u8fdb\u5165\u96c6\u7fa4\u5217\u8868\u9875\u9762\u3002\u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                      2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d \u8fdb\u5165\u5de5\u4f5c\u8d1f\u8f7d\u5217\u8868\u540e\uff0c\u70b9\u51fb\u4e00\u4e2a\u8d1f\u8f7d\u540d\u79f0\uff0c\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5 \u9875\u9762\u3002

                      3. \u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u7684\u5f39\u6027\u4f38\u7f29\u914d\u7f6e\u60c5\u51b5\u3002

                      4. \u786e\u8ba4\u96c6\u7fa4\u5df2\u5b89\u88c5\u4e86 metrics-server \u3001Insight\u3001Prometheus-adapter \u63d2\u4ef6\u4e14\u63d2\u4ef6\u8fd0\u884c\u72b6\u6001\u4e3a\u6b63\u5e38\u540e\uff0c\u5373\u53ef\u70b9\u51fb \u65b0\u5efa\u4f38\u7f29 \u6309\u94ae\u3002

                        Note

                        \u5982\u679c\u76f8\u5173\u63d2\u4ef6\u672a\u5b89\u88c5\u6216\u63d2\u4ef6\u5904\u4e8e\u5f02\u5e38\u72b6\u6001\uff0c\u60a8\u5728\u9875\u9762\u4e0a\u5c06\u65e0\u6cd5\u770b\u89c1\u521b\u5efa\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u5165\u53e3\u3002

                      5. \u521b\u5efa\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u53c2\u6570\u3002

                        • \u7b56\u7565\u540d\u79f0\uff1a\u8f93\u5165\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 hpa-my-dep\u3002
                        • \u547d\u540d\u7a7a\u95f4\uff1a\u8d1f\u8f7d\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002
                        • \u5de5\u4f5c\u8d1f\u8f7d\uff1a\u6267\u884c\u5f39\u6027\u4f38\u7f29\u7684\u5de5\u4f5c\u8d1f\u8f7d\u5bf9\u8c61\u3002
                        • \u8d44\u6e90\u7c7b\u578b\uff1a\u8fdb\u884c\u76d1\u63a7\u7684\u81ea\u5b9a\u4e49\u6307\u6807\u7c7b\u578b\uff0c\u5305\u542b Pod \u548c Service \u4e24\u79cd\u7c7b\u578b\u3002
                        • \u6307\u6807\uff1a\u4f7f\u7528 ServiceMonitoring \u521b\u5efa\u7684\u81ea\u5b9a\u4e49\u6307\u6807\u540d\u79f0\u6216\u7cfb\u7edf\u5185\u7f6e\u7684\u81ea\u5b9a\u4e49\u6307\u6807\u540d\u79f0\u3002
                        • \u6570\u636e\u7c7b\u578b\uff1a\u7528\u4e8e\u8ba1\u7b97\u6307\u6807\u503c\u7684\u65b9\u6cd5\uff0c\u5305\u542b\u76ee\u6807\u503c\u548c\u76ee\u6807\u5e73\u5747\u503c\u4e24\u79cd\u7c7b\u578b\uff0c\u5f53\u8d44\u6e90\u7c7b\u578b\u4e3a Pod \u65f6\uff0c\u53ea\u652f\u6301\u4f7f\u7528\u76ee\u6807\u5e73\u5747\u503c\u3002
                      "},{"location":"admin/kpanda/scale/custom-hpa.html#_3","title":"\u64cd\u4f5c\u793a\u4f8b","text":"

                      \u672c\u6848\u4f8b\u4ee5 Golang \u4e1a\u52a1\u7a0b\u5e8f\u4e3a\u4f8b\uff0c\u8be5\u793a\u4f8b\u7a0b\u5e8f\u66b4\u9732\u4e86 httpserver_requests_total \u6307\u6807\uff0c\u5e76\u8bb0\u5f55 HTTP \u7684\u8bf7\u6c42\uff0c\u901a\u8fc7\u8be5\u6307\u6807\u53ef\u4ee5\u8ba1\u7b97\u51fa\u4e1a\u52a1\u7a0b\u5e8f\u7684 QPS \u503c\u3002

                      "},{"location":"admin/kpanda/scale/custom-hpa.html#_4","title":"\u90e8\u7f72\u4e1a\u52a1\u7a0b\u5e8f","text":"

                      \u4f7f\u7528 Deployment \u90e8\u7f72\u4e1a\u52a1\u7a0b\u5e8f\uff1a

                      apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: httpserver\n  namespace: httpserver\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: httpserver\n  template:\n    metadata:\n      labels:\n        app: httpserver\n    spec:\n      containers:\n      - name: httpserver\n        image: registry.imroc.cc/test/httpserver:custom-metrics\n        imagePullPolicy: Always\n---\n\napiVersion: v1\nkind: Service\nmetadata:\n  name: httpserver\n  namespace: httpserver\n  labels:\n    app: httpserver\n  annotations:\n    prometheus.io/scrape: \"true\"\n    prometheus.io/path: \"/metrics\"\n    prometheus.io/port: \"http\"\nspec:\n  type: ClusterIP\n  ports:\n  - port: 80\n    protocol: TCP\n    name: http\n  selector:\n    app: httpserver\n
                      "},{"location":"admin/kpanda/scale/custom-hpa.html#prometheus","title":"Prometheus \u91c7\u96c6\u4e1a\u52a1\u76d1\u63a7","text":"

                      \u82e5\u5df2\u5b89\u88c5 insight-agent\uff0c\u53ef\u4ee5\u901a\u8fc7\u521b\u5efa ServiceMonitor \u7684 CRD \u5bf9\u8c61\u914d\u7f6e Prometheus\u3002

                      \u64cd\u4f5c\u6b65\u9aa4\uff1a\u5728 \u96c6\u7fa4\u8be6\u60c5 -> \u81ea\u5b9a\u4e49\u8d44\u6e90 \u641c\u7d22\u201cservicemonitors.monitoring.coreos.com\"\uff0c\u70b9\u51fb\u540d\u79f0\u8fdb\u5165\u8be6\u60c5\u3002 \u901a\u8fc7\u521b\u5efa YAML\uff0c\u5728\u547d\u540d\u7a7a\u95f4 httpserver \u4e0b\u521b\u5efa\u5982\u4e0b\u793a\u4f8b\u7684 CRD\uff1a

                      apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: httpserver\n  namespace: httpserver\n  labels:\n    operator.insight.io/managed-by: insight\nspec:\n  endpoints:\n  - port: http\n    interval: 5s\n  namespaceSelector:\n    matchNames:\n    - httpserver\n  selector:\n    matchLabels:\n      app: httpserver\n

                      Note

                      \u82e5\u901a\u8fc7 insight \u5b89\u88c5 Prometheus\uff0c\u5219 serviceMonitor \u4e0a\u5fc5\u987b\u6253\u4e0a operator.insight.io/managed-by: insight \u8fd9\u4e2a label\uff0c\u901a\u8fc7\u5176\u4ed6\u65b9\u5f0f\u5b89\u88c5\u5219\u65e0\u9700\u6b64 label\u3002

                      "},{"location":"admin/kpanda/scale/custom-hpa.html#prometheus-adapter","title":"\u5728 prometheus-adapter \u4e2d\u914d\u7f6e\u6307\u6807\u89c4\u5219","text":"

                      \u64cd\u4f5c\u6b65\u9aa4\uff1a\u5728 \u96c6\u7fa4\u8be6\u60c5 -> Helm \u5e94\u7528 \u641c\u7d22 \u201cprometheus-adapter\"\uff0c\u901a\u8fc7\u64cd\u4f5c\u680f\u8fdb\u5165\u66f4\u65b0\u9875\u9762\uff0c\u5728 YAML \u4e2d\u914d\u7f6e\u81ea\u5b9a\u4e49\u6307\u6807\uff0c\u793a\u4f8b\u5982\u4e0b\uff1a

                      rules:\n  custom:\n    - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)\n      name:\n        as: httpserver_requests_qps\n        matches: httpserver_requests_total\n      resources:\n        template: <<.Resource>>\n      seriesQuery: httpserver_requests_total\n

                      "},{"location":"admin/kpanda/scale/custom-hpa.html#_5","title":"\u521b\u5efa\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u53c2\u6570","text":"

                      \u6309\u7167\u4e0a\u8ff0\u6b65\u9aa4\u5728 Deployment \u4e2d\u627e\u5230\u5e94\u7528\u7a0b\u5e8f httpserver \u5e76\u901a\u8fc7\u81ea\u5b9a\u4e49\u6307\u6807\u521b\u5efa\u5f39\u6027\u4f38\u7f29\u3002

                      "},{"location":"admin/kpanda/scale/hpa-cronhpa-compatibility-rules.html","title":"HPA \u548c CronHPA \u517c\u5bb9\u89c4\u5219","text":"

                      HPA \u5168\u79f0\u4e3a HorizontalPodAutoscaler\uff0c\u5373 Pod \u6c34\u5e73\u81ea\u52a8\u4f38\u7f29\u3002

                      CronHPA \u5168\u79f0\u4e3a Cron HorizontalPodAutoscaler\uff0c\u5373 Pod \u5b9a\u65f6\u7684\u6c34\u5e73\u81ea\u52a8\u4f38\u7f29\u3002

                      "},{"location":"admin/kpanda/scale/hpa-cronhpa-compatibility-rules.html#cronhpa-hpa","title":"CronHPA \u548c HPA \u517c\u5bb9\u51b2\u7a81","text":"

                      \u5b9a\u65f6\u4f38\u7f29 CronHPA \u901a\u8fc7\u8bbe\u7f6e\u5b9a\u65f6\u7684\u65b9\u5f0f\u89e6\u53d1\u5bb9\u5668\u7684\u6c34\u5e73\u526f\u672c\u4f38\u7f29\u3002\u4e3a\u4e86\u9632\u6b62\u7a81\u53d1\u7684\u6d41\u91cf\u51b2\u51fb\u7b49\u72b6\u51b5\uff0c \u60a8\u53ef\u80fd\u5df2\u7ecf\u914d\u7f6e HPA \u4fdd\u969c\u5e94\u7528\u7684\u6b63\u5e38\u8fd0\u884c\u3002\u5982\u679c\u540c\u65f6\u68c0\u6d4b\u5230\u4e86 HPA \u548c CronHPA \u7684\u5b58\u5728\uff0c \u7531\u4e8e CronHPA \u548c HPA \u76f8\u4e92\u72ec\u7acb\u65e0\u6cd5\u611f\u77e5\uff0c\u5c31\u4f1a\u51fa\u73b0\u4e24\u4e2a\u63a7\u5236\u5668\u5404\u81ea\u5de5\u4f5c\uff0c\u540e\u6267\u884c\u7684\u64cd\u4f5c\u4f1a\u8986\u76d6\u5148\u6267\u884c\u7684\u64cd\u4f5c\u3002

                      \u5bf9\u6bd4 CronHPA \u548c HPA \u7684\u5b9a\u4e49\u6a21\u677f\uff0c\u53ef\u4ee5\u89c2\u5bdf\u5230\u4ee5\u4e0b\u51e0\u70b9\uff1a

                      • CronHPA \u548c HPA \u90fd\u662f\u901a\u8fc7 scaleTargetRef \u5b57\u6bb5\u6765\u83b7\u53d6\u4f38\u7f29\u5bf9\u8c61\u3002
                      • CronHPA \u901a\u8fc7 jobs \u7684 crontab \u89c4\u5219\u5b9a\u65f6\u4f38\u7f29\u526f\u672c\u6570\u3002
                      • HPA \u901a\u8fc7\u8d44\u6e90\u5229\u7528\u7387\u5224\u65ad\u4f38\u7f29\u60c5\u51b5\u3002

                      Note

                      \u5982\u679c\u540c\u65f6\u8bbe\u7f6e CronHPA \u548c HPA\uff0c\u4f1a\u51fa\u73b0 CronHPA \u548c HPA \u540c\u65f6\u64cd\u4f5c\u4e00\u4e2a scaleTargetRef \u7684\u573a\u666f\u3002

                      "},{"location":"admin/kpanda/scale/hpa-cronhpa-compatibility-rules.html#cronhpa-hpa_1","title":"CronHPA \u548c HPA \u517c\u5bb9\u65b9\u6848","text":"

                      \u4ece\u4e0a\u6587\u53ef\u77e5\uff0cCronHPA \u548c HPA \u540c\u65f6\u4f7f\u7528\u4f1a\u5bfc\u81f4\u540e\u6267\u884c\u7684\u64cd\u4f5c\u8986\u76d6\u5148\u6267\u884c\u64cd\u4f5c\u7684\u672c\u8d28\u539f\u56e0\u662f\u4e24\u4e2a\u63a7\u5236\u5668\u65e0\u6cd5\u76f8\u4e92\u611f\u77e5\uff0c \u90a3\u4e48\u53ea\u9700\u8981\u8ba9 CronHPA \u611f\u77e5 HPA \u7684\u5f53\u524d\u72b6\u6001\u5c31\u80fd\u89e3\u51b3\u51b2\u7a81\u95ee\u9898\u3002

                      \u7cfb\u7edf\u4f1a\u5c06 HPA \u4f5c\u4e3a\u5b9a\u65f6\u4f38\u7f29 CronHPA \u7684\u6269\u7f29\u5bb9\u5bf9\u8c61\uff0c\u4ece\u800c\u5b9e\u73b0\u5bf9\u8be5 HPA \u5b9a\u4e49\u7684 Deployment \u5bf9\u8c61\u7684\u5b9a\u65f6\u6269\u7f29\u5bb9\u3002

                      HPA \u7684\u5b9a\u4e49\u5c06 Deployment \u914d\u7f6e\u5728 scaleTargetRef \u5b57\u6bb5\u4e0b\uff0c\u7136\u540e Deployment \u901a\u8fc7\u81ea\u8eab\u5b9a\u4e49\u67e5\u627e ReplicaSet\uff0c\u6700\u540e\u901a\u8fc7 ReplicaSet \u8c03\u6574\u771f\u5b9e\u7684\u526f\u672c\u6570\u76ee\u3002

                      \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5c06 CronHPA \u4e2d\u7684 scaleTargetRef \u8bbe\u7f6e\u4e3a HPA \u5bf9\u8c61\uff0c\u7136\u540e\u901a\u8fc7 HPA \u5bf9\u8c61\u6765\u5bfb\u627e\u771f\u5b9e\u7684 scaleTargetRef\uff0c\u4ece\u800c\u8ba9 CronHPA \u611f\u77e5 HPA \u7684\u5f53\u524d\u72b6\u6001\u3002

                      CronHPA \u4f1a\u901a\u8fc7\u8c03\u6574 HPA \u7684\u65b9\u5f0f\u611f\u77e5 HPA\u3002CronHPA \u901a\u8fc7\u8bc6\u522b\u8981\u8fbe\u5230\u7684\u526f\u672c\u6570\u4e0e\u5f53\u524d\u526f\u672c\u6570\u4e24\u8005\u95f4\u7684\u8f83\u5927\u503c\uff0c \u5224\u65ad\u662f\u5426\u9700\u8981\u6269\u7f29\u5bb9\u53ca\u4fee\u6539 HPA \u7684\u4e0a\u9650\uff1bCronHPA \u901a\u8fc7\u8bc6\u522b CronHPA \u8981\u8fbe\u5230\u7684\u526f\u672c\u6570\u4e0e HPA \u7684\u914d\u7f6e\u95f4\u7684\u8f83\u5c0f\u503c\uff0c\u5224\u65ad\u662f\u5426\u9700\u8981\u4fee\u6539 HPA \u7684\u4e0b\u9650\u3002

                      "},{"location":"admin/kpanda/scale/install-cronhpa.html","title":"\u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6","text":"

                      \u5bb9\u5668\u526f\u672c\u5b9a\u65f6\u6c34\u5e73\u6269\u7f29\u5bb9\u7b56\u7565\uff08CronHPA\uff09\u80fd\u591f\u4e3a\u5468\u671f\u6027\u9ad8\u5e76\u53d1\u5e94\u7528\u63d0\u4f9b\u7a33\u5b9a\u7684\u8ba1\u7b97\u8d44\u6e90\u4fdd\u969c\uff0c kubernetes-cronhpa-controller \u5219\u662f\u5b9e\u73b0 CronHPA \u7684\u5173\u952e\u7ec4\u4ef6\u3002

                      \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6\u3002

                      Note

                      \u4e3a\u4e86\u4f7f\u7528 CornHPA\uff0c\u4e0d\u4ec5\u9700\u8981\u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6\uff0c\u8fd8\u8981\u5b89\u88c5 metrics-server \u63d2\u4ef6\u3002

                      "},{"location":"admin/kpanda/scale/install-cronhpa.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                      • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u3002

                      • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                      "},{"location":"admin/kpanda/scale/install-cronhpa.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                      \u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6\u3002

                      1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u627e\u5230\u9700\u8981\u5b89\u88c5\u6b64\u63d2\u4ef6\u7684\u76ee\u6807\u96c6\u7fa4\uff0c\u70b9\u51fb\u8be5\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u70b9\u51fb \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d \uff0c\u70b9\u51fb\u76ee\u6807\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u3002

                      2. \u5728\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u5728 CronHPA \u53f3\u4fa7\u70b9\u51fb \u5b89\u88c5 \u3002

                      3. \u9605\u8bfb\u8be5\u63d2\u4ef6\u7684\u76f8\u5173\u4ecb\u7ecd\uff0c\u9009\u62e9\u7248\u672c\u540e\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u63a8\u8350\u5b89\u88c5 1.3.0 \u6216\u66f4\u9ad8\u7248\u672c\u3002

                      4. \u53c2\u8003\u4ee5\u4e0b\u8bf4\u660e\u914d\u7f6e\u53c2\u6570\u3002

                        • \u540d\u79f0\uff1a\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 kubernetes-cronhpa-controller\u3002
                        • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u63d2\u4ef6\u5b89\u88c5\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u6b64\u5904\u4ee5 default \u4e3a\u4f8b\u3002
                        • \u7248\u672c\uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 1.3.0 \u7248\u672c\u4e3a\u4f8b\u3002
                        • \u5c31\u7eea\u7b49\u5f85\uff1a\u542f\u7528\u540e\uff0c\u5c06\u7b49\u5f85\u5e94\u7528\u4e0b\u7684\u6240\u6709\u5173\u8054\u8d44\u6e90\u90fd\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002
                        • \u5931\u8d25\u5220\u9664\uff1a\u5982\u679c\u63d2\u4ef6\u5b89\u88c5\u5931\u8d25\uff0c\u5219\u5220\u9664\u5df2\u7ecf\u5b89\u88c5\u7684\u5173\u8054\u8d44\u6e90\u3002\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u3002
                        • \u8be6\u60c5\u65e5\u5fd7\uff1a\u5f00\u542f\u540e\uff0c\u5c06\u8bb0\u5f55\u5b89\u88c5\u8fc7\u7a0b\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002

                        Note

                        \u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u548c/\u6216 \u5931\u8d25\u5220\u9664 \u540e\uff0c\u5e94\u7528\u9700\u8981\u8f83\u957f\u65f6\u95f4\u624d\u4f1a\u88ab\u6807\u8bb0\u4e3a\u201c\u8fd0\u884c\u4e2d\u201d\u72b6\u6001\u3002

                      5. \u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \uff0c\u7cfb\u7edf\u5c06\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u3002\u7a0d\u7b49\u51e0\u5206\u949f\u540e\u5237\u65b0\u9875\u9762\u4f5c\uff0c\u5373\u53ef\u770b\u5230\u521a\u521a\u5b89\u88c5\u7684\u5e94\u7528\u3002

                        Warning

                        \u5982\u9700\u5220\u9664 kubernetes-cronhpa-controller \u63d2\u4ef6\uff0c\u5e94\u5728 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u624d\u80fd\u5c06\u5176\u5f7b\u5e95\u5220\u9664\u3002

                        \u5982\u679c\u5728\u5de5\u4f5c\u8d1f\u8f7d\u7684 \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\u4e0b\u5220\u9664\u63d2\u4ef6\uff0c\u8fd9\u53ea\u662f\u5220\u9664\u4e86\u8be5\u63d2\u4ef6\u7684\u5de5\u4f5c\u8d1f\u8f7d\u526f\u672c\uff0c\u63d2\u4ef6\u672c\u8eab\u4ecd\u672a\u5220\u9664\uff0c\u540e\u7eed\u91cd\u65b0\u5b89\u88c5\u8be5\u63d2\u4ef6\u65f6\u4e5f\u4f1a\u63d0\u793a\u9519\u8bef\u3002

                      6. \u56de\u5230\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\u9762\u4e0b\u7684 \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u53ef\u4ee5\u770b\u5230\u754c\u9762\u663e\u793a \u63d2\u4ef6\u5df2\u5b89\u88c5 \u3002\u73b0\u5728\u53ef\u4ee5\u5f00\u59cb\u521b\u5efa CronHPA \u7b56\u7565\u4e86\u3002

                      "},{"location":"admin/kpanda/scale/install-metrics-server.html","title":"\u5b89\u88c5 metrics-server \u63d2\u4ef6","text":"

                      metrics-server \u662f Kubernetes \u5185\u7f6e\u7684\u8d44\u6e90\u4f7f\u7528\u6307\u6807\u91c7\u96c6\u7ec4\u4ef6\u3002 \u60a8\u53ef\u4ee5\u901a\u8fc7\u914d\u7f6e\u5f39\u6027\u4f38\u7f29\uff08HPA\uff09\u7b56\u7565\u6765\u5b9e\u73b0\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u81ea\u52a8\u6c34\u5e73\u4f38\u7f29 Pod \u526f\u672c\u3002

                      \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5 metrics-server \u3002

                      "},{"location":"admin/kpanda/scale/install-metrics-server.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u5b89\u88c5 metrics-server \u63d2\u4ef6\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                      • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3002

                      • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                      "},{"location":"admin/kpanda/scale/install-metrics-server.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                      \u8bf7\u6267\u884c\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 metrics-server \u63d2\u4ef6\u3002

                      1. \u5728\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u4e0b\u7684\u5f39\u6027\u4f38\u7f29\u9875\u9762\uff0c\u70b9\u51fb \u53bb\u5b89\u88c5 \uff0c\u8fdb\u5165 metrics-server \u63d2\u4ef6\u5b89\u88c5\u754c\u9762\u3002

                      2. \u9605\u8bfb metrics-server \u63d2\u4ef6\u76f8\u5173\u4ecb\u7ecd\uff0c\u9009\u62e9\u7248\u672c\u540e\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u672c\u6587\u5c06\u4ee5 3.8.2 \u7248\u672c\u4e3a\u4f8b\u8fdb\u884c\u5b89\u88c5\uff0c\u63a8\u8350\u60a8\u5b89\u88c5 3.8.2 \u53ca\u66f4\u9ad8\u7248\u672c\u3002

                      3. \u5728\u5b89\u88c5\u914d\u7f6e\u754c\u9762\u914d\u7f6e\u57fa\u672c\u53c2\u6570\u3002

                        • \u540d\u79f0\uff1a\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 metrics-server-01\u3002
                        • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u63d2\u4ef6\u5b89\u88c5\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u6b64\u5904\u4ee5 default \u4e3a\u4f8b\u3002
                        • \u7248\u672c\uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 3.8.2 \u7248\u672c\u4e3a\u4f8b\u3002
                        • \u5c31\u7eea\u7b49\u5f85\uff1a\u542f\u7528\u540e\uff0c\u5c06\u7b49\u5f85\u5e94\u7528\u4e0b\u6240\u6709\u5173\u8054\u8d44\u6e90\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002
                        • \u5931\u8d25\u5220\u9664\uff1a\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f\u5c31\u7eea\u7b49\u5f85\u3002\u5982\u679c\u5b89\u88c5\u5931\u8d25\uff0c\u5c06\u5220\u9664\u5b89\u88c5\u76f8\u5173\u8d44\u6e90\u3002
                        • \u8be6\u60c5\u65e5\u5fd7\uff1a\u5f00\u542f\u5b89\u88c5\u8fc7\u7a0b\u65e5\u5fd7\u7684\u8be6\u7ec6\u8f93\u51fa\u3002

                        Note

                        \u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u548c/\u6216 \u5931\u8d25\u5220\u9664 \u540e\uff0c\u5e94\u7528\u9700\u8981\u7ecf\u8fc7\u8f83\u957f\u65f6\u95f4\u624d\u4f1a\u88ab\u6807\u8bb0\u4e3a \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                      4. \u9ad8\u7ea7\u53c2\u6570\u914d\u7f6e

                        • \u5982\u679c\u96c6\u7fa4\u7f51\u7edc\u65e0\u6cd5\u8bbf\u95ee k8s.gcr.io \u4ed3\u5e93\uff0c\u8bf7\u5c1d\u8bd5\u4fee\u6539 repositort \u53c2\u6570\u4e3a repository: k8s.m.daocloud.io/metrics-server/metrics-server

                        • \u5b89\u88c5 metrics-server \u63d2\u4ef6\u8fd8\u9700\u63d0\u4f9b SSL \u8bc1\u4e66\u3002\u5982\u9700\u7ed5\u8fc7\u8bc1\u4e66\u6821\u9a8c\uff0c\u9700\u8981\u5728 defaultArgs: \u5904\u6dfb\u52a0 - --kubelet-insecure-tls \u53c2\u6570\u3002

                        \u70b9\u51fb\u67e5\u770b\u63a8\u8350\u7684 YAML \u53c2\u6570
                        image:\n  repository: k8s.m.daocloud.io/metrics-server/metrics-server # \u5c06\u4ed3\u5e93\u6e90\u5730\u5740\u4fee\u6539\u4e3a k8s.m.daocloud.io\n  tag: ''\n  pullPolicy: IfNotPresent\nimagePullSecrets: []\nnameOverride: ''\nfullnameOverride: ''\nserviceAccount:\n  create: true\n  annotations: {}\n  name: ''\nrbac:\n  create: true\n  pspEnabled: false\napiService:\n  create: true\npodLabels: {}\npodAnnotations: {}\npodSecurityContext: {}\nsecurityContext:\n  allowPrivilegeEscalation: false\n  readOnlyRootFilesystem: true\n  runAsNonRoot: true\n  runAsUser: 1000\npriorityClassName: system-cluster-critical\ncontainerPort: 4443\nhostNetwork:\n  enabled: false\nreplicas: 1\nupdateStrategy: {}\npodDisruptionBudget:\n  enabled: false\n  minAvailable: null\n  maxUnavailable: null\ndefaultArgs:\n  - '--cert-dir=/tmp'\n  - '--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname'\n  - '--kubelet-use-node-status-port'\n  - '--metric-resolution=15s'\n  - --kubelet-insecure-tls # \u7ed5\u8fc7\u8bc1\u4e66\u6821\u9a8c\nargs: []\nlivenessProbe:\n  httpGet:\n    path: /livez\n    port: https\n    scheme: HTTPS\n  initialDelaySeconds: 0\n  periodSeconds: 10\n  failureThreshold: 3\nreadinessProbe:\n  httpGet:\n    path: /readyz\n    port: https\n    scheme: HTTPS\n  initialDelaySeconds: 20\n  periodSeconds: 10\n  failureThreshold: 3\nservice:\n  type: ClusterIP\n  port: 443\n  annotations: {}\n  labels: {}\nmetrics:\n  enabled: false\nserviceMonitor:\n  enabled: false\n  additionalLabels: {}\n  interval: 1m\n  scrapeTimeout: 10s\nresources: {}\nextraVolumeMounts: []\nextraVolumes: []\nnodeSelector: {}\ntolerations: []\naffinity: {}\n
                      5. \u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u5b8c\u6210 metrics-server \u63d2\u4ef6\u7684\u5b89\u88c5\uff0c\u4e4b\u540e\u7cfb\u7edf\u5c06\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\uff0c \u7a0d\u7b49\u51e0\u5206\u949f\u540e\uff0c\u4e3a\u9875\u9762\u6267\u884c\u5237\u65b0\u64cd\u4f5c\uff0c\u5373\u53ef\u770b\u5230\u521a\u521a\u5b89\u88c5\u7684\u5e94\u7528\u3002

                      Note

                      \u5220\u9664 metrics-server \u63d2\u4ef6\u65f6\uff0c\u5728 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u624d\u80fd\u5f7b\u5e95\u5220\u9664\u8be5\u63d2\u4ef6\u3002\u5982\u679c\u4ec5\u5728\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u5220\u9664 metrics-server \uff0c \u8fd9\u53ea\u662f\u5220\u9664\u4e86\u8be5\u5e94\u7528\u7684\u5de5\u4f5c\u8d1f\u8f7d\u526f\u672c\uff0c\u5e94\u7528\u672c\u8eab\u4ecd\u672a\u5220\u9664\uff0c\u540e\u7eed\u91cd\u65b0\u5b89\u88c5\u8be5\u63d2\u4ef6\u65f6\u4e5f\u4f1a\u63d0\u793a\u9519\u8bef\u3002

                      "},{"location":"admin/kpanda/scale/install-vpa.html","title":"\u5b89\u88c5 vpa \u63d2\u4ef6","text":"

                      \u5bb9\u5668\u5782\u76f4\u6269\u7f29\u5bb9\u7b56\u7565\uff08Vertical Pod Autoscaler, VPA\uff09\u80fd\u591f\u8ba9\u96c6\u7fa4\u7684\u8d44\u6e90\u914d\u7f6e\u66f4\u52a0\u5408\u7406\uff0c\u907f\u514d\u96c6\u7fa4\u8d44\u6e90\u6d6a\u8d39\u3002 vpa \u5219\u662f\u5b9e\u73b0\u5bb9\u5668\u5782\u76f4\u6269\u7f29\u5bb9\u7684\u5173\u952e\u7ec4\u4ef6\u3002

                      \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5 vpa \u63d2\u4ef6\u3002

                      \u4e3a\u4e86\u4f7f\u7528 VPA \u7b56\u7565\uff0c\u4e0d\u4ec5\u9700\u8981\u5b89\u88c5 __vpa__ \u63d2\u4ef6\uff0c\u8fd8\u8981[\u5b89\u88c5 __metrics-server__ \u63d2\u4ef6](install-metrics-server.md)\u3002\n
                      "},{"location":"admin/kpanda/scale/install-vpa.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u5b89\u88c5 vpa \u63d2\u4ef6\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                      • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u3002

                      • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                      "},{"location":"admin/kpanda/scale/install-vpa.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                      \u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 vpa \u63d2\u4ef6\u3002

                      1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u627e\u5230\u9700\u8981\u5b89\u88c5\u6b64\u63d2\u4ef6\u7684\u76ee\u6807\u96c6\u7fa4\uff0c\u70b9\u51fb\u8be5\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u70b9\u51fb \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d \uff0c\u70b9\u51fb\u76ee\u6807\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u3002

                      2. \u5728\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u5728 VPA \u53f3\u4fa7\u70b9\u51fb \u5b89\u88c5 \u3002

                        3. \u9605\u8bfb\u8be5\u63d2\u4ef6\u7684\u76f8\u5173\u4ecb\u7ecd\uff0c\u9009\u62e9\u7248\u672c\u540e\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u63a8\u8350\u5b89\u88c5 1.5.0 \u6216\u66f4\u9ad8\u7248\u672c\u3002

                        4. \u67e5\u770b\u4ee5\u4e0b\u8bf4\u660e\u914d\u7f6e\u53c2\u6570\u3002

                        - \u540d\u79f0\uff1a\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 kubernetes-cronhpa-controller\u3002 - \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u63d2\u4ef6\u5b89\u88c5\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u6b64\u5904\u4ee5 default \u4e3a\u4f8b\u3002 - \u7248\u672c\uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 4.5.0 \u7248\u672c\u4e3a\u4f8b\u3002 - \u5c31\u7eea\u7b49\u5f85\uff1a\u542f\u7528\u540e\uff0c\u5c06\u7b49\u5f85\u5e94\u7528\u4e0b\u7684\u6240\u6709\u5173\u8054\u8d44\u6e90\u90fd\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002 - \u5931\u8d25\u5220\u9664\uff1a\u5982\u679c\u63d2\u4ef6\u5b89\u88c5\u5931\u8d25\uff0c\u5219\u5220\u9664\u5df2\u7ecf\u5b89\u88c5\u7684\u5173\u8054\u8d44\u6e90\u3002\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u3002 - \u8be6\u60c5\u65e5\u5fd7\uff1a\u5f00\u542f\u540e\uff0c\u5c06\u8bb0\u5f55\u5b89\u88c5\u8fc7\u7a0b\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002

                        Note

                        \u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u548c/\u6216 \u5931\u8d25\u5220\u9664 \u540e\uff0c\u5e94\u7528\u9700\u8981\u7ecf\u8fc7\u8f83\u957f\u65f6\u95f4\u624d\u4f1a\u88ab\u6807\u8bb0\u4e3a\u201c\u8fd0\u884c\u4e2d\u201d\u72b6\u6001\u3002

                      3. \u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \uff0c\u7cfb\u7edf\u5c06\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u3002\u7a0d\u7b49\u51e0\u5206\u949f\u540e\u5237\u65b0\u9875\u9762\u4f5c\uff0c\u5373\u53ef\u770b\u5230\u521a\u521a\u5b89\u88c5\u7684\u5e94\u7528\u3002

                        Warning

                        \u5982\u9700\u5220\u9664 vpa \u63d2\u4ef6\uff0c\u5e94\u5728 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u624d\u80fd\u5c06\u5176\u5f7b\u5e95\u5220\u9664\u3002

                        \u5982\u679c\u5728\u5de5\u4f5c\u8d1f\u8f7d\u7684 \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\u4e0b\u5220\u9664\u63d2\u4ef6\uff0c\u8fd9\u53ea\u662f\u5220\u9664\u4e86\u8be5\u63d2\u4ef6\u7684\u5de5\u4f5c\u8d1f\u8f7d\u526f\u672c\uff0c\u63d2\u4ef6\u672c\u8eab\u4ecd\u672a\u5220\u9664\uff0c\u540e\u7eed\u91cd\u65b0\u5b89\u88c5\u8be5\u63d2\u4ef6\u65f6\u4e5f\u4f1a\u63d0\u793a\u9519\u8bef\u3002

                      4. \u56de\u5230\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\u9762\u4e0b\u7684 \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u53ef\u4ee5\u770b\u5230\u754c\u9762\u663e\u793a \u63d2\u4ef6\u5df2\u5b89\u88c5 \u3002\u73b0\u5728\u53ef\u4ee5\u5f00\u59cb\u521b\u5efa VPA \u7b56\u7565\u4e86\u3002

                      "},{"location":"admin/kpanda/scale/knative/install.html","title":"\u5b89\u88c5","text":"

                      Knative \u662f\u4e00\u4e2a\u9762\u5411\u65e0\u670d\u52a1\u5668\u90e8\u7f72\u7684\u8de8\u5e73\u53f0\u89e3\u51b3\u65b9\u6848\u3002

                      1. \u767b\u5f55\u96c6\u7fa4\uff0c\u70b9\u51fb\u4fa7\u8fb9\u680f Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u5728\u53f3\u4fa7\u4e0a\u65b9\u641c\u7d22\u6846\u8f93\u5165 knative \uff0c\u7136\u540e\u6309\u56de\u8f66\u952e\u641c\u7d22\u3002

                      2. \u70b9\u51fb\u641c\u7d22\u51fa\u7684 knative-operator \uff0c\u8fdb\u5165\u5b89\u88c5\u914d\u7f6e\u754c\u9762\u3002\u4f60\u53ef\u4ee5\u5728\u8be5\u754c\u9762\u67e5\u770b\u53ef\u7528\u7248\u672c\u4ee5\u53ca Helm values \u7684 Parameters \u53ef\u9009\u9879\u3002

                      3. \u70b9\u51fb\u5b89\u88c5\u6309\u94ae\u540e\uff0c\u8fdb\u5165\u5b89\u88c5\u914d\u7f6e\u754c\u9762\u3002

                      4. \u8f93\u5165\u540d\u79f0\uff0c\u5b89\u88c5\u79df\u6237\uff0c\u5efa\u8bae\u52fe\u9009 \u5c31\u7eea\u7b49\u5f85 \u548c \u8be6\u7ec6\u65e5\u5fd7 \u3002

                      5. \u5728\u4e0b\u65b9\u8bbe\u7f6e\uff0c\u53ef\u4ee5\u52fe\u9009 Serving \uff0c\u5e76\u8f93\u5165 Knative Serving \u7ec4\u4ef6\u7684\u5b89\u88c5\u79df\u6237\uff0c\u4f1a\u5728\u5b89\u88c5\u540e\u90e8\u7f72 Knative Serving \u7ec4\u4ef6\uff0c\u8be5\u7ec4\u4ef6\u7531 Knative Operator \u7ba1\u7406\u3002

                      "},{"location":"admin/kpanda/scale/knative/knative.html","title":"Kantive \u4ecb\u7ecd","text":"

                      Knative \u63d0\u4f9b\u4e86\u4e00\u79cd\u66f4\u9ad8\u5c42\u6b21\u7684\u62bd\u8c61\uff0c\u7b80\u5316\u5e76\u52a0\u901f\u4e86\u5728 Kubernetes \u4e0a\u6784\u5efa\u3001\u90e8\u7f72\u548c\u7ba1\u7406\u5e94\u7528\u7684\u8fc7\u7a0b\u3002\u5b83\u4f7f\u5f97\u5f00\u53d1\u4eba\u5458\u80fd\u591f\u66f4\u4e13\u6ce8\u4e8e\u4e1a\u52a1\u903b\u8f91\u7684\u5b9e\u73b0\uff0c\u800c\u5c06\u5927\u90e8\u5206\u57fa\u7840\u8bbe\u65bd\u548c\u8fd0\u7ef4\u5de5\u4f5c\u4ea4\u7ed9 Knative \u53bb\u5904\u7406\uff0c\u4ece\u800c\u663e\u8457\u63d0\u9ad8\u751f\u4ea7\u529b\u3002

                      "},{"location":"admin/kpanda/scale/knative/knative.html#_1","title":"\u7ec4\u4ef6","text":"

                      knative-operator \u8fd0\u884c\u7ec4\u4ef6\u5982\u4e0b\u3002

                      knative-operator   knative-operator-58f7d7db5c-7f6r5      1/1     Running     0     6m55s\nknative-operator   operator-webhook-667dc67bc-qvrv4       1/1     Running     0     6m55s\n

                      knative-serving \u7ec4\u4ef6\u5982\u4e0b\u3002

                      knative-serving        3scale-kourier-gateway-d69fbfbd-bd8d8   1/1     Running     0                 7m13s\nknative-serving        activator-7c6fddd698-wdlng              1/1     Running     0                 7m3s\nknative-serving        autoscaler-8f4b876bb-kd25p              1/1     Running     0                 7m17s\nknative-serving        autoscaler-hpa-5f7f74679c-vkc7p         1/1     Running     0                 7m15s\nknative-serving        controller-789c896c46-tfvsv             1/1     Running     0                 7m17s\nknative-serving        net-kourier-controller-7db578c889-7gd5l 1/1     Running     0                 7m14s\nknative-serving        webhook-5c88b94c5-78x7m                 1/1     Running     0                 7m1s\nknative-serving        storage-version-migration-serving-serving-1.12.2-t7zvd   0/1  Completed   0   7m15s\n
                      \u7ec4\u4ef6 \u4f5c\u7528 Activator \u5bf9\u8bf7\u6c42\u6392\u961f\uff08\u5982\u679c\u4e00\u4e2a Knative Service \u5df2\u7ecf\u7f29\u51cf\u5230\u96f6\uff09\u3002\u8c03\u7528 autoscaler\uff0c\u5c06\u7f29\u51cf\u5230 0 \u7684\u670d\u52a1\u6062\u590d\u5e76\u8f6c\u53d1\u6392\u961f\u7684\u8bf7\u6c42\u3002Activator \u8fd8\u53ef\u4ee5\u5145\u5f53\u8bf7\u6c42\u7f13\u51b2\u5668\uff0c\u5904\u7406\u7a81\u53d1\u6d41\u91cf\u3002 Autoscaler Autoscaler \u8d1f\u8d23\u6839\u636e\u914d\u7f6e\u3001\u6307\u6807\u548c\u8fdb\u5165\u7684\u8bf7\u6c42\u6765\u7f29\u653e Knative \u670d\u52a1\u3002 Controller \u7ba1\u7406 Knative CR \u7684\u72b6\u6001\u3002\u5b83\u4f1a\u76d1\u89c6\u591a\u4e2a\u5bf9\u8c61\uff0c\u7ba1\u7406\u4f9d\u8d56\u8d44\u6e90\u7684\u751f\u547d\u5468\u671f\uff0c\u5e76\u66f4\u65b0\u8d44\u6e90\u72b6\u6001\u3002 Queue-Proxy Sidecar \u5bb9\u5668\uff0c\u6bcf\u4e2a Knative Service \u90fd\u4f1a\u6ce8\u5165\u4e00\u4e2a\u3002\u8d1f\u8d23\u6536\u96c6\u6d41\u91cf\u6570\u636e\u5e76\u62a5\u544a\u7ed9 Autoscaler\uff0cAutoscaler \u6839\u636e\u8fd9\u4e9b\u6570\u636e\u548c\u9884\u8bbe\u7684\u89c4\u5219\u6765\u53d1\u8d77\u6269\u5bb9\u6216\u7f29\u5bb9\u8bf7\u6c42\u3002 Webhooks Knative Serving \u6709\u51e0\u4e2a Webhooks \u8d1f\u8d23\u9a8c\u8bc1\u548c\u53d8\u66f4 Knative \u8d44\u6e90\u3002"},{"location":"admin/kpanda/scale/knative/knative.html#ingress","title":"Ingress \u6d41\u91cf\u5165\u53e3\u65b9\u6848","text":"\u65b9\u6848 \u9002\u7528\u573a\u666f Istio \u5982\u679c\u5df2\u7ecf\u7528\u4e86 Istio\uff0c\u53ef\u4ee5\u9009\u62e9 Istio \u4f5c\u4e3a\u6d41\u91cf\u5165\u53e3\u65b9\u6848\u3002 Contour \u5982\u679c\u96c6\u7fa4\u4e2d\u5df2\u7ecf\u542f\u7528\u4e86 Contour\uff0c\u53ef\u4ee5\u9009\u62e9 Contour \u4f5c\u4e3a\u6d41\u91cf\u5165\u53e3\u65b9\u6848\u3002 Kourier \u5982\u679c\u5728\u6ca1\u6709\u4e0a\u8ff0 2 \u79cd Ingress \u7ec4\u4ef6\u65f6\uff0c\u53ef\u4ee5\u4f7f\u7528 Knative \u57fa\u4e8e Envoy \u5b9e\u73b0\u7684 Kourier Ingress \u4f5c\u4e3a\u6d41\u91cf\u5165\u53e3\u3002"},{"location":"admin/kpanda/scale/knative/knative.html#autoscaler","title":"Autoscaler \u65b9\u6848\u5bf9\u6bd4","text":"Autoscaler \u7c7b\u578b \u662f\u5426\u4e3a Knative Serving \u6838\u5fc3\u90e8\u5206 \u9ed8\u8ba4\u542f\u7528 Scale to Zero \u652f\u6301 \u57fa\u4e8e CPU \u7684 Autoscaling \u652f\u6301 Knative Pod Autoscaler (KPA) \u662f \u662f \u662f \u5426 Horizontal Pod Autoscaler (HPA) \u5426 \u9700\u5b89\u88c5 Knative Serving \u540e\u542f\u7528 \u5426 \u662f"},{"location":"admin/kpanda/scale/knative/knative.html#crd","title":"CRD","text":"\u8d44\u6e90\u7c7b\u578b API \u540d\u79f0 \u63cf\u8ff0 Services service.serving.knative.dev \u81ea\u52a8\u7ba1\u7406 Workload \u7684\u6574\u4e2a\u751f\u547d\u5468\u671f\uff0c\u63a7\u5236\u5176\u4ed6\u5bf9\u8c61\u7684\u521b\u5efa\uff0c\u786e\u4fdd\u5e94\u7528\u5177\u6709 Routes\u3001Configurations \u4ee5\u53ca\u6bcf\u6b21\u66f4\u65b0\u65f6\u7684\u65b0 revision\u3002 Routes route.serving.knative.dev \u5c06\u7f51\u7edc\u7aef\u70b9\u6620\u5c04\u5230\u4e00\u4e2a\u6216\u591a\u4e2a\u4fee\u8ba2\u7248\u672c\uff0c\u652f\u6301\u6d41\u91cf\u5206\u914d\u548c\u7248\u672c\u8def\u7531\u3002 Configurations configuration.serving.knative.dev \u7ef4\u62a4\u90e8\u7f72\u7684\u671f\u671b\u72b6\u6001\uff0c\u63d0\u4f9b\u4ee3\u7801\u548c\u914d\u7f6e\u4e4b\u95f4\u7684\u5206\u79bb\uff0c\u9075\u5faa Twelve-Factor \u5e94\u7528\u7a0b\u5e8f\u65b9\u6cd5\u8bba\uff0c\u4fee\u6539\u914d\u7f6e\u4f1a\u521b\u5efa\u65b0\u7684 revision\u3002 Revisions revision.serving.knative.dev \u6bcf\u6b21\u5bf9\u5de5\u4f5c\u8d1f\u8f7d\u4fee\u6539\u7684\u65f6\u95f4\u70b9\u5feb\u7167\uff0c\u662f\u4e0d\u53ef\u53d8\u5bf9\u8c61\uff0c\u53ef\u6839\u636e\u6d41\u91cf\u81ea\u52a8\u6269\u5bb9\u548c\u7f29\u5bb9\u3002"},{"location":"admin/kpanda/scale/knative/playground.html","title":"Knative \u4f7f\u7528\u5b9e\u8df5","text":"

                      \u5728\u672c\u8282\u4e2d\uff0c\u6211\u4eec\u5c06\u901a\u8fc7\u51e0\u4e2a\u5b9e\u8df5\u6765\u6df1\u5165\u4e86\u89e3\u5b66\u4e60 Knative\u3002

                      "},{"location":"admin/kpanda/scale/knative/playground.html#case-1-hello-world","title":"case 1 - Hello World","text":"
                      apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n

                      \u53ef\u4ee5\u4f7f\u7528 kubectl \u5df2\u90e8\u7f72\u7684\u5e94\u7528\u7684\u72b6\u6001\uff0c\u8fd9\u4e2a\u5e94\u7528\u7531 knative \u81ea\u52a8\u914d\u7f6e\u4e86 ingress \u548c\u4f38\u7f29\u5668\u3002

                      ~ kubectl get service.serving.knative.dev/hello\nNAME    URL                                              LATESTCREATED   LATESTREADY   READY   REASON\nhello   http://hello.knative-serving.knative.loulan.me   hello-00001     hello-00001   True\n

                      \u90e8\u7f72\u51fa\u7684 Pod YAML \u5982\u4e0b\uff0c\u7531 2 \u4e2a Pod \u7ec4\u6210\uff1auser-container \u548c queue-proxy\u3002

                      apiVersion: v1\nkind: Pod\nmetadata:\n  name: hello-00003-deployment-5fcb8ccbf-7qjfk\nspec:\n  containers:\n  - name: user-container\n  - name: queue-proxy\n

                      \u8bf7\u6c42\u6d41\uff1a

                      1. case1 \u5728\u4f4e\u6d41\u91cf\u6216\u96f6\u6d41\u91cf\u65f6\uff0c\u6d41\u91cf\u5c06\u8def\u7531\u5230 activator
                      2. case2 \u6d41\u91cf\u5927\u65f6\uff0c\u6d41\u91cf\u5927\u4e8e target-burst-capacity \u65f6\u624d\u76f4\u63a5\u8def\u7531\u5230 Pod
                        1. \u914d\u7f6e\u4e3a 0\uff0c\u53ea\u6709\u4ece 0 \u6269\u5bb9\u5b58\u5728
                        2. \u914d\u7f6e\u4e3a -1\uff0cactivator \u4f1a\u4e00\u76f4\u5b58\u5728\u8bf7\u6c42\u8def\u5f84
                        3. \u914d\u7f6e\u4e3a >0\uff0c\u89e6\u53d1\u6269\u7f29\u5bb9\u4e4b\u524d\uff0c\u7cfb\u7edf\u80fd\u591f\u989d\u5916\u5904\u7406\u7684\u5e76\u53d1\u8bf7\u6c42\u6570\u91cf\u3002
                      3. case3 \u6d41\u91cf\u518d\u53d8\u5c0f\u65f6\uff0c\u6d41\u91cf\u4f4e\u4e8e current_demand + target-burst-capacity > (pods * concurrency-target) \u65f6\u5c06\u518d\u6b21\u8def\u7531\u5230 activator

                        \u5f85\u5904\u7406\u7684\u8bf7\u6c42\u603b\u6570 + \u80fd\u63a5\u53d7\u7684\u8d85\u8fc7\u76ee\u6807\u5e76\u53d1\u6570\u7684\u8bf7\u6c42\u6570\u91cf > \u6bcf\u4e2a Pod \u7684\u76ee\u6807\u5e76\u53d1\u6570 * Pod \u6570\u91cf

                      "},{"location":"admin/kpanda/scale/knative/playground.html#case-2-","title":"case 2 - \u57fa\u4e8e\u5e76\u53d1\u5f39\u6027\u4f38\u7f29","text":"

                      \u6211\u4eec\u9996\u5148\u5728\u96c6\u7fa4\u5e94\u7528\u4e0b\u9762 YAML \u5b9a\u4e49\u3002

                      apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"1\"\n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"\n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n

                      \u6267\u884c\u4e0b\u9762\u547d\u4ee4\u6d4b\u8bd5\uff0c\u5e76\u53ef\u4ee5\u901a\u8fc7 kubectl get pods -A -w \u6765\u89c2\u5bdf\u6269\u5bb9\u7684 Pod\u3002

                      wrk -t2 -c4 -d6s http://hello.knative-serving.knative.daocloud.io/\n
                      "},{"location":"admin/kpanda/scale/knative/playground.html#case-3-","title":"case 3 - \u57fa\u4e8e\u5e76\u53d1\u5f39\u6027\u4f38\u7f29\uff0c\u8fbe\u5230\u7279\u5b9a\u6bd4\u4f8b\u63d0\u524d\u6269\u5bb9","text":"

                      \u6211\u4eec\u53ef\u4ee5\u5f88\u8f7b\u677e\u7684\u5b9e\u73b0\uff0c\u4f8b\u5982\u9650\u5236\u6bcf\u4e2a\u5bb9\u5668\u5e76\u53d1\u4e3a 10\uff0c\u53ef\u4ee5\u901a\u8fc7 autoscaling.knative.dev/target-utilization-percentage: 70 \u6765\u5b9e\u73b0\uff0c\u8fbe\u5230 70% \u5c31\u5f00\u59cb\u6269\u5bb9 Pod\u3002

                      apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"10\"\n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"\n \u00a0 \u00a0 \u00a0 \u00a0autoscaling.knative.dev/target-utilization-percentage: \"70\" \n \u00a0 \u00a0 \u00a0 \u00a0autoscaling.knative.dev/metric: \"concurrency\"\n \u00a0 \u00a0 spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n
                      "},{"location":"admin/kpanda/scale/knative/playground.html#case-4-","title":"case 4 - \u7070\u5ea6\u53d1\u5e03/\u6d41\u91cf\u767e\u5206\u6bd4","text":"

                      \u6211\u4eec\u53ef\u4ee5\u901a\u8fc7 spec.traffic \u5b9e\u73b0\u5230\u6bcf\u4e2a\u7248\u672c\u6d41\u91cf\u7684\u63a7\u5236\u3002

                      apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"1\"  \n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"         \n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n  traffic:\n  - latestRevision: true\n    percent: 50\n  - latestRevision: false\n    percent: 50\n    revisionName: hello-00001\n
                      "},{"location":"admin/kpanda/scale/knative/scene.html","title":"\u4f7f\u7528\u573a\u666f","text":""},{"location":"admin/kpanda/scale/knative/scene.html#_2","title":"\u9002\u5408\u7684\u573a\u666f","text":"
                      • \u77ed\u8fde\u63a5\u9ad8\u5e76\u53d1\u4e1a\u52a1
                      • \u9700\u8981\u5f39\u6027\u4f38\u7f29\u7684\u4e1a\u52a1
                      • \u5927\u91cf\u5e94\u7528\u9700\u8981\u7f29\u5bb9\u5230 0 \u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u7387
                      • AI Serving \u670d\u52a1\uff0c\u57fa\u4e8e\u7279\u5b9a\u6307\u6807\u8fdb\u884c\u6269\u5bb9

                      Tip

                      \u77ed\u8fde\u63a5\u9ad8\u5e76\u53d1\u4e1a\u52a1\u4ee5\u53ca\u9700\u8981\u5f39\u6027\u4f38\u7f29\u7684\u4e1a\u52a1\uff0c\u63a8\u8350\u4f7f\u7528 HPA \u548c VPA \u80fd\u529b\u3002

                      "},{"location":"admin/kpanda/scale/knative/scene.html#_3","title":"\u4e0d\u9002\u5408\u7684\u573a\u666f","text":"
                      • \u957f\u8fde\u63a5\u4e1a\u52a1
                      • \u5ef6\u65f6\u654f\u611f\u4e1a\u52a1
                      • \u57fa\u4e8e cookie \u7684\u6d41\u91cf\u5206\u6d41
                      • \u57fa\u4e8e header \u7684\u6d41\u91cf\u5206\u6d41
                      "},{"location":"admin/kpanda/security/index.html","title":"\u5b89\u5168\u626b\u63cf\u7c7b\u578b","text":"

                      \u5728Kubernetes\uff08\u7b80\u79f0K8s\uff09\u73af\u5883\u4e2d\uff0c\u5b89\u5168\u626b\u63cf\u662f\u786e\u4fdd\u96c6\u7fa4\u5b89\u5168\u6027\u7684\u5173\u952e\u63aa\u65bd\u4e4b\u4e00\u3002\u5176\u4e2d\uff0c\u5408\u89c4\u6027\u626b\u63cf\uff08\u57fa\u4e8eCIS Benchmark\uff09\u3001\u6743\u9650\u626b\u63cf\uff08\u57fa\u4e8ekube-audit\u5ba1\u8ba1\u529f\u80fd\uff09\u3001\u6f0f\u6d1e\u626b\u63cf\uff08\u57fa\u4e8e kube-hunter\uff09\u662f\u4e09\u79cd\u5e38\u89c1\u4e14\u91cd\u8981\u7684\u5b89\u5168\u626b\u63cf\u624b\u6bb5\uff1a

                      • \u5408\u89c4\u6027\u626b\u63cf\uff1a\u57fa\u4e8e CIS Benchmark \u5bf9\u96c6\u7fa4\u8282\u70b9\u8fdb\u884c\u5b89\u5168\u626b\u63cf\u3002CIS Benchmark \u662f\u4e00\u5957\u5168\u7403\u516c\u8ba4\u7684\u6700\u4f73\u5b9e\u8df5\u6807\u51c6\uff0c\u4e3a Kubernetes \u96c6\u7fa4\u63d0\u4f9b\u4e86\u8be6\u7ec6\u7684\u5b89\u5168\u914d\u7f6e\u6307\u5357\u548c\u81ea\u52a8\u5316\u68c0\u67e5\u5de5\u5177\uff08\u5982Kube-Bench\uff09\uff0c\u5e2e\u52a9\u7ec4\u7ec7\u786e\u4fdd\u5176K8s\u96c6\u7fa4\u7b26\u5408\u5b89\u5168\u57fa\u7ebf\u8981\u6c42\uff0c\u4fdd\u62a4\u7cfb\u7edf\u548c\u6570\u636e\u514d\u53d7\u5a01\u80c1\u3002

                      • \u6743\u9650\u626b\u63cf\uff1a\u57fa\u4e8ekube-audit\u5ba1\u8ba1\u529f\u80fd\u3002\u6743\u9650\u626b\u63cf\u4e3b\u8981\u89e3\u51b3\u96c6\u7fa4\u8bbf\u95ee\u63a7\u5236\u548c\u64cd\u4f5c\u900f\u660e\u5ea6\u7684\u95ee\u9898\u3002\u901a\u8fc7\u5ba1\u8ba1\u65e5\u5fd7\uff0c\u96c6\u7fa4\u7ba1\u7406\u5458\u80fd\u591f\u8ffd\u6eaf\u96c6\u7fa4\u8d44\u6e90\u7684\u8bbf\u95ee\u5386\u53f2\uff0c\u8bc6\u522b\u5f02\u5e38\u884c\u4e3a\uff0c\u5982\u672a\u7ecf\u6388\u6743\u7684\u8bbf\u95ee\u3001\u654f\u611f\u6570\u636e\u7684\u6cc4\u9732\u3001\u6709\u5b89\u5168\u6f0f\u6d1e\u7684\u64cd\u4f5c\u8bb0\u5f55\u7b49\u3002\u8fd9\u5bf9\u4e8e\u6545\u969c\u6392\u67e5\u3001\u5b89\u5168\u4e8b\u4ef6\u54cd\u5e94\u4ee5\u53ca\u6ee1\u8db3\u5408\u89c4\u6027\u8981\u6c42\u81f3\u5173\u91cd\u8981\u3002\u6b64\u5916\uff0c\u6743\u9650\u626b\u63cf\u8fd8\u53ef\u4ee5\u5e2e\u52a9\u7ec4\u7ec7\u53d1\u73b0\u6f5c\u5728\u7684\u6743\u9650\u6ee5\u7528\u95ee\u9898\uff0c\u53ca\u65f6\u91c7\u53d6\u63aa\u65bd\u9632\u6b62\u5b89\u5168\u4e8b\u4ef6\u7684\u53d1\u751f\u3002

                      • \u6f0f\u6d1e\u626b\u63cf\uff1a\u57fa\u4e8e kube-hunter\uff0c\u4e3b\u8981\u89e3\u51b3 Kubernetes \u96c6\u7fa4\u4e2d\u5b58\u5728\u7684\u5df2\u77e5\u6f0f\u6d1e\u548c\u914d\u7f6e\u9519\u8bef\u95ee\u9898\u3002kube-hunter \u901a\u8fc7\u6a21\u62df\u653b\u51fb\u884c\u4e3a\uff0c\u80fd\u591f\u8bc6\u522b\u96c6\u7fa4\u4e2d\u53ef\u88ab\u6076\u610f\u5229\u7528\u7684\u6f0f\u6d1e\uff0c\u5982\u672a\u6388\u6743\u8bbf\u95ee\u3001\u66b4\u9732\u7684\u670d\u52a1\u548cAPI\u7aef\u70b9\u3001\u914d\u7f6e\u9519\u8bef\u7684\u89d2\u8272\u548c\u7ed1\u5b9a\u7b56\u7565\u7b49\u3002\u7279\u522b\u5730\uff0ckube-hunter\u80fd\u591f\u8bc6\u522b\u5e76\u62a5\u544a CVE \u6f0f\u6d1e\uff0c\u8fd9\u4e9b\u6f0f\u6d1e\u5982\u679c\u88ab\u6076\u610f\u5229\u7528\uff0c\u53ef\u80fd\u5bfc\u81f4\u6570\u636e\u6cc4\u9732\u3001\u670d\u52a1\u4e2d\u65ad\u7b49\u4e25\u91cd\u540e\u679c\u3002CVE \u6f0f\u6d1e\u662f\u7531\u56fd\u9645\u77e5\u540d\u7684\u5b89\u5168\u7ec4\u7ec7\u5982MITRE\u6240\u5b9a\u4e49\u548c\u7ef4\u62a4\u7684\uff0cCVE\u6570\u636e\u5e93\u4e3a\u8f6f\u4ef6\u548c\u56fa\u4ef6\u4e2d\u7684\u5df2\u77e5\u6f0f\u6d1e\u63d0\u4f9b\u4e86\u552f\u4e00\u6807\u8bc6\u7b26\uff0c\u6210\u4e3a\u5168\u7403\u5b89\u5168\u793e\u533a\u5171\u540c\u9075\u5faa\u7684\u6807\u51c6\u3002kube-hunter \u901a\u8fc7\u5229\u7528 CVE \u6570\u636e\u5e93\u4e2d\u7684\u4fe1\u606f\uff0c\u80fd\u591f\u5e2e\u52a9\u7528\u6237\u5feb\u901f\u8bc6\u522b\u5e76\u54cd\u5e94Kubernetes\u96c6\u7fa4\u4e2d\u7684\u5b89\u5168\u5a01\u80c1\u3002

                      "},{"location":"admin/kpanda/security/index.html#_2","title":"\u5408\u89c4\u6027\u626b\u63cf","text":"

                      \u5408\u89c4\u6027\u626b\u63cf\u7684\u5bf9\u8c61\u662f\u96c6\u7fa4\u8282\u70b9\u3002\u626b\u63cf\u7ed3\u679c\u4e2d\u4f1a\u5217\u51fa\u626b\u63cf\u9879\u4ee5\u53ca\u626b\u63cf\u7ed3\u679c\uff0c\u5e76\u9488\u5bf9\u672a\u901a\u8fc7\u7684\u626b\u63cf\u9879\u7ed9\u51fa\u4fee\u590d\u5efa\u8bae\u3002\u6709\u5173\u626b\u63cf\u65f6\u7528\u5230\u7684\u5177\u4f53\u5b89\u5168\u89c4\u5219\uff0c\u53ef\u53c2\u8003 CIS Kubernetes Benchmark

                      \u68c0\u67e5\u4e0d\u540c\u7c7b\u578b\u7684\u8282\u70b9\u65f6\uff0c\u626b\u63cf\u7684\u4fa7\u91cd\u70b9\u6709\u6240\u4e0d\u540c\u3002

                      • \u626b\u63cf\u63a7\u5236\u5e73\u9762\u8282\u70b9\uff08Controller\uff09

                        • \u5173\u6ce8 API Server \u3001 controller-manager \u3001 scheduler \u3001 kubelet \u7b49\u7cfb\u7edf\u7ec4\u4ef6\u7684\u5b89\u5168\u6027
                        • \u68c0\u67e5 Etcd \u6570\u636e\u5e93\u7684\u5b89\u5168\u914d\u7f6e
                        • \u68c0\u67e5\u96c6\u7fa4\u8eab\u4efd\u9a8c\u8bc1\u673a\u5236\u3001\u6388\u6743\u7b56\u7565\u548c\u7f51\u7edc\u5b89\u5168\u914d\u7f6e\u662f\u5426\u7b26\u5408\u5b89\u5168\u6807\u51c6
                      • \u626b\u63cf\u5de5\u4f5c\u8282\u70b9\uff08Worker\uff09

                        • \u68c0\u67e5 kubelet\u3001Docker\u7b49\u5bb9\u5668\u8fd0\u884c\u65f6\u7684\u914d\u7f6e\u5426\u7b26\u5408\u5b89\u5168\u6807\u51c6
                        • \u68c0\u67e5\u5bb9\u5668\u955c\u50cf\u662f\u5426\u7ecf\u8fc7\u4fe1\u4efb\u9a8c\u8bc1
                        • \u68c0\u67e5\u8282\u70b9\u7684\u7f51\u7edc\u5b89\u5168\u914d\u7f6e\u5426\u7b26\u5408\u5b89\u5168\u6807\u51c6

                      Tip

                      \u4f7f\u7528\u5408\u89c4\u6027\u626b\u63cf\u65f6\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u914d\u7f6e\uff0c\u7136\u540e\u57fa\u4e8e\u8be5\u914d\u7f6e\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002\u6267\u884c\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u626b\u63cf\u62a5\u544a\u3002

                      "},{"location":"admin/kpanda/security/index.html#_3","title":"\u6743\u9650\u626b\u63cf","text":"

                      \u6743\u9650\u626b\u63cf\u4fa7\u91cd\u4e8e\u6743\u9650\u95ee\u9898\u5f15\u53d1\u7684\u5b89\u5168\u6f0f\u6d1e\u3002\u6743\u9650\u626b\u63cf\u53ef\u4ee5\u5e2e\u52a9\u7528\u6237\u8bc6\u522b Kubernetes \u96c6\u7fa4\u4e2d\u7684\u5b89\u5168\u5a01\u80c1\uff0c\u6807\u8bc6\u54ea\u4e9b\u8d44\u6e90\u9700\u8981\u8fdb\u884c\u8fdb\u4e00\u6b65\u7684\u5ba1\u67e5\u548c\u4fdd\u62a4\u63aa\u65bd\u3002\u901a\u8fc7\u6267\u884c\u8fd9\u4e9b\u68c0\u67e5\u9879\uff0c\u7528\u6237\u53ef\u4ee5\u66f4\u6e05\u695a\u3001\u66f4\u5168\u9762\u5730\u4e86\u89e3\u81ea\u5df1\u7684 Kubernetes \u73af\u5883\uff0c\u786e\u4fdd\u96c6\u7fa4\u73af\u5883\u7b26\u5408 Kubernetes \u7684\u6700\u4f73\u5b9e\u8df5\u548c\u5b89\u5168\u6807\u51c6\u3002

                      \u5177\u4f53\u800c\u8a00\uff0c\u6743\u9650\u626b\u63cf\u652f\u6301\u4ee5\u4e0b\u64cd\u4f5c\uff1a

                      • \u626b\u63cf\u96c6\u7fa4\u4e2d\u7684\u6240\u6709\u8282\u70b9\u7684\u5065\u5eb7\u72b6\u6001\u3002

                      • \u626b\u63cf\u96c6\u7fa4\u7ec4\u4ef6\u7684\u8fd0\u884c\u72b6\u51b5\uff0c\u5982 kube-apiserver \u3001 kube-controller-manager \u3001 kube-scheduler \u7b49\u3002

                      • \u626b\u63cf\u5b89\u5168\u914d\u7f6e\uff1a\u68c0\u67e5 Kubernetes \u7684\u5b89\u5168\u914d\u7f6e

                        • API \u5b89\u5168\uff1a\u542f\u7528\u4e86\u4e0d\u5b89\u5168\u7684 API \u7248\u672c\uff0c\u662f\u5426\u8bbe\u7f6e\u4e86\u9002\u5f53\u7684 RBAC \u89d2\u8272\u548c\u6743\u9650\u9650\u5236\u7b49
                        • \u5bb9\u5668\u5b89\u5168\uff1a\u662f\u5426\u4f7f\u7528\u4e86\u4e0d\u5b89\u5168\u7684 Image\u3001\u662f\u5426\u5f00\u653e\u4e86\u7279\u6743\u6a21\u5f0f\uff0c\u662f\u5426\u8bbe\u7f6e\u4e86\u5408\u9002\u7684\u5b89\u5168\u4e0a\u4e0b\u6587\u7b49
                        • \u7f51\u7edc\u5b89\u5168\uff1a\u662f\u5426\u542f\u7528\u4e86\u5408\u9002\u7684\u7f51\u7edc\u7b56\u7565\u6765\u9650\u5236\u6d41\u91cf\uff0c\u662f\u5426\u4f7f\u7528\u4e86 TLS \u52a0\u5bc6\u7b49
                        • \u5b58\u50a8\u5b89\u5168\uff1a\u662f\u5426\u542f\u7528\u4e86\u9002\u5f53\u7684\u52a0\u5bc6\u3001\u8bbf\u95ee\u63a7\u5236\u7b49\u3002
                        • \u5e94\u7528\u7a0b\u5e8f\u5b89\u5168\uff1a\u662f\u5426\u8bbe\u7f6e\u4e86\u5fc5\u8981\u7684\u5b89\u5168\u63aa\u65bd\uff0c\u4f8b\u5982\u5bc6\u7801\u7ba1\u7406\u3001\u8de8\u7ad9\u811a\u672c\u653b\u51fb\u9632\u5fa1\u7b49\u3002
                      • \u63d0\u4f9b\u8b66\u544a\u548c\u5efa\u8bae\uff1a\u5efa\u8bae\u96c6\u7fa4\u7ba1\u7406\u5458\u6267\u884c\u7684\u5b89\u5168\u6700\u4f73\u5b9e\u8df5\uff0c\u4f8b\u5982\u5b9a\u671f\u8f6e\u6362\u8bc1\u4e66\u3001\u4f7f\u7528\u5f3a\u5bc6\u7801\u3001\u9650\u5236\u7f51\u7edc\u8bbf\u95ee\u7b49\u3002

                      Tip

                      \u4f7f\u7528\u5408\u89c4\u6027\u626b\u63cf\u65f6\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002\u6267\u884c\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u626b\u63cf\u62a5\u544a\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5b89\u5168\u626b\u63cf\u3002

                      "},{"location":"admin/kpanda/security/index.html#_4","title":"\u6f0f\u6d1e\u626b\u63cf","text":"

                      \u6f0f\u6d1e\u626b\u63cf\u4fa7\u91cd\u4e8e\u626b\u63cf\u6f5c\u5728\u7684\u6076\u610f\u653b\u51fb\u548c\u5b89\u5168\u6f0f\u6d1e\uff0c\u4f8b\u5982\u8fdc\u7a0b\u4ee3\u7801\u6267\u884c\u3001SQL \u6ce8\u5165\u3001XSS \u653b\u51fb\u7b49\uff0c\u4ee5\u53ca\u4e00\u4e9b\u9488\u5bf9 Kubernetes \u7279\u5b9a\u7684\u653b\u51fb\u3002\u6700\u7ec8\u7684\u626b\u63cf\u62a5\u544a\u4f1a\u5217\u51fa\u96c6\u7fa4\u4e2d\u5b58\u5728\u7684\u5b89\u5168\u6f0f\u6d1e\uff0c\u5e76\u63d0\u51fa\u4fee\u590d\u5efa\u8bae\u3002

                      Tip

                      \u4f7f\u7528\u5408\u89c4\u6027\u626b\u63cf\u65f6\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002\u6267\u884c\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u626b\u63cf\u62a5\u544a\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u6f0f\u6d1e\u626b\u63cf\u3002

                      "},{"location":"admin/kpanda/security/audit.html","title":"\u6743\u9650\u626b\u63cf","text":"

                      \u4e3a\u4e86\u4f7f\u7528\u6743\u9650\u626b\u63cf\u529f\u80fd\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u7b56\u7565\uff0c\u6267\u884c\u8be5\u7b56\u7565\u4e4b\u540e\u4f1a\u81ea\u52a8\u751f\u6210\u626b\u63cf\u62a5\u544a\u4ee5\u4f9b\u67e5\u770b\u3002

                      "},{"location":"admin/kpanda/security/audit.html#_2","title":"\u521b\u5efa\u626b\u63cf\u7b56\u7565","text":"
                      1. \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684\u9996\u9875\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5b89\u5168\u7ba1\u7406 \u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u6743\u9650\u626b\u63cf \uff0c\u70b9\u51fb \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\uff0c\u5728\u53f3\u4fa7\u70b9\u51fb \u521b\u5efa\u626b\u63cf\u7b56\u7565 \u3002

                      3. \u53c2\u8003\u4e0b\u5217\u8bf4\u660e\u586b\u5199\u914d\u7f6e\uff0c\u6700\u540e\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                        • \u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u626b\u63cf\u54ea\u4e2a\u96c6\u7fa4\u3002\u53ef\u9009\u7684\u96c6\u7fa4\u5217\u8868\u6765\u81ea\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u7684\u96c6\u7fa4\u3002\u5982\u679c\u6ca1\u6709\u60f3\u9009\u7684\u96c6\u7fa4\uff0c\u53ef\u4ee5\u53bb\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4\u3002
                        • \u626b\u63cf\u7c7b\u578b\uff1a

                          • \u7acb\u5373\u626b\u63cf\uff1a\u5728\u626b\u63cf\u7b56\u7565\u521b\u5efa\u597d\u4e4b\u540e\u7acb\u5373\u6267\u884c\u4e00\u6b21\u626b\u63cf\uff0c\u540e\u7eed\u4e0d\u53ef\u4ee5\u81ea\u52a8/\u624b\u52a8\u518d\u6b21\u6267\u884c\u626b\u63cf\u3002
                          • \u5b9a\u65f6\u626b\u63cf\uff1a\u901a\u8fc7\u8bbe\u7f6e\u626b\u63cf\u5468\u671f\uff0c\u81ea\u52a8\u6309\u65f6\u91cd\u590d\u6267\u884c\u626b\u63cf\u3002
                        • \u626b\u63cf\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff1a\u8bbe\u7f6e\u6700\u591a\u4fdd\u7559\u591a\u5c11\u626b\u63cf\u62a5\u544a\u3002\u8d85\u8fc7\u6307\u5b9a\u7684\u4fdd\u7559\u6570\u91cf\u65f6\uff0c\u4ece\u6700\u65e9\u7684\u62a5\u544a\u5f00\u59cb\u5220\u9664\u3002

                      "},{"location":"admin/kpanda/security/audit.html#_3","title":"\u66f4\u65b0/\u5220\u9664\u626b\u63cf\u7b56\u7565","text":"

                      \u521b\u5efa\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u8981\u66f4\u65b0\u6216\u5220\u9664\u626b\u63cf\u7b56\u7565\u3002

                      \u5728 \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u914d\u7f6e\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff1a

                      • \u5bf9\u4e8e\u5468\u671f\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a

                        • \u9009\u62e9 \u7acb\u5373\u6267\u884c \u610f\u5473\u7740\uff0c\u5728\u5468\u671f\u8ba1\u5212\u4e4b\u5916\u7acb\u5373\u518d\u626b\u63cf\u4e00\u6b21\u96c6\u7fa4
                        • \u9009\u62e9 \u7981\u7528 \u4f1a\u4e2d\u65ad\u626b\u63cf\u8ba1\u5212\uff0c\u76f4\u5230\u70b9\u51fb \u542f\u7528 \u624d\u53ef\u4ee5\u7ee7\u7eed\u6839\u636e\u5468\u671f\u8ba1\u5212\u6267\u884c\u8be5\u626b\u63cf\u7b56\u7565\u3002
                        • \u9009\u62e9 \u7f16\u8f91 \u53ef\u4ee5\u66f4\u65b0\u914d\u7f6e\uff0c\u652f\u6301\u66f4\u65b0\u626b\u63cf\u914d\u7f6e\u3001\u7c7b\u578b\u3001\u626b\u63cf\u5468\u671f\u3001\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff0c\u4e0d\u53ef\u66f4\u6539\u914d\u7f6e\u540d\u79f0\u548c\u9700\u8981\u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4\u3002
                        • \u9009\u62e9 \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u914d\u7f6e
                      • \u5bf9\u4e8e\u4e00\u6b21\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a\u4ec5\u652f\u6301 \u5220\u9664 \u64cd\u4f5c\u3002

                      "},{"location":"admin/kpanda/security/audit.html#_4","title":"\u67e5\u770b\u626b\u63cf\u62a5\u544a","text":"
                      1. \u5728 \u5b89\u5168\u7ba1\u7406 -> \u6743\u9650\u626b\u63cf -> \u626b\u63cf\u62a5\u544a \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u62a5\u544a\u540d\u79f0

                        \u5728\u62a5\u544a\u53f3\u4fa7\u70b9\u51fb \u5220\u9664 \u53ef\u4ee5\u624b\u52a8\u5220\u9664\u62a5\u544a\u3002

                      2. \u67e5\u770b\u626b\u63cf\u62a5\u544a\u5185\u5bb9\uff0c\u5305\u62ec\uff1a

                        • \u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4
                        • \u4f7f\u7528\u7684\u626b\u63cf\u7b56\u7565
                        • \u626b\u63cf\u9879\u603b\u6570\u3001\u8b66\u544a\u6570\u3001\u9519\u8bef\u6570
                        • \u5728\u5468\u671f\u6027\u626b\u63cf\u7b56\u7565\u751f\u6210\u7684\u626b\u63cf\u62a5\u544a\u4e2d\uff0c\u8fd8\u53ef\u4ee5\u67e5\u770b\u626b\u63cf\u9891\u7387
                        • \u626b\u63cf\u5f00\u59cb\u7684\u65f6\u95f4
                        • \u68c0\u67e5\u8be6\u60c5\uff0c\u4f8b\u5982\u88ab\u68c0\u67e5\u7684\u8d44\u6e90\u3001\u8d44\u6e90\u7c7b\u578b\u3001\u626b\u63cf\u7ed3\u679c\u3001\u9519\u8bef\u7c7b\u578b\u3001\u9519\u8bef\u8be6\u60c5

                      "},{"location":"admin/kpanda/security/hunter.html","title":"\u6f0f\u6d1e\u626b\u63cf","text":"

                      \u4e3a\u4e86\u4f7f\u7528\u6f0f\u6d1e\u626b\u63cf\u529f\u80fd\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u7b56\u7565\uff0c\u6267\u884c\u8be5\u7b56\u7565\u4e4b\u540e\u4f1a\u81ea\u52a8\u751f\u6210\u626b\u63cf\u62a5\u544a\u4ee5\u4f9b\u67e5\u770b\u3002

                      "},{"location":"admin/kpanda/security/hunter.html#_2","title":"\u521b\u5efa\u626b\u63cf\u7b56\u7565","text":"
                      1. \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684\u9996\u9875\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5b89\u5168\u7ba1\u7406 \u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u6f0f\u6d1e\u626b\u63cf \uff0c\u70b9\u51fb \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\uff0c\u5728\u53f3\u4fa7\u70b9\u51fb \u521b\u5efa\u626b\u63cf\u7b56\u7565 \u3002

                      3. \u53c2\u8003\u4e0b\u5217\u8bf4\u660e\u586b\u5199\u914d\u7f6e\uff0c\u6700\u540e\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                        • \u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u626b\u63cf\u54ea\u4e2a\u96c6\u7fa4\u3002\u53ef\u9009\u7684\u96c6\u7fa4\u5217\u8868\u6765\u81ea\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u7684\u96c6\u7fa4\u3002\u5982\u679c\u6ca1\u6709\u60f3\u9009\u7684\u96c6\u7fa4\uff0c\u53ef\u4ee5\u53bb\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4\u3002
                        • \u626b\u63cf\u7c7b\u578b\uff1a

                          • \u7acb\u5373\u626b\u63cf\uff1a\u5728\u626b\u63cf\u7b56\u7565\u521b\u5efa\u597d\u4e4b\u540e\u7acb\u5373\u6267\u884c\u4e00\u6b21\u626b\u63cf\uff0c\u540e\u7eed\u4e0d\u53ef\u4ee5\u81ea\u52a8/\u624b\u52a8\u518d\u6b21\u6267\u884c\u626b\u63cf\u3002
                          • \u5b9a\u65f6\u626b\u63cf\uff1a\u901a\u8fc7\u8bbe\u7f6e\u626b\u63cf\u5468\u671f\uff0c\u81ea\u52a8\u6309\u65f6\u91cd\u590d\u6267\u884c\u626b\u63cf\u3002
                        • \u626b\u63cf\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff1a\u8bbe\u7f6e\u6700\u591a\u4fdd\u7559\u591a\u5c11\u626b\u63cf\u62a5\u544a\u3002\u8d85\u8fc7\u6307\u5b9a\u7684\u4fdd\u7559\u6570\u91cf\u65f6\uff0c\u4ece\u6700\u65e9\u7684\u62a5\u544a\u5f00\u59cb\u5220\u9664\u3002

                      "},{"location":"admin/kpanda/security/hunter.html#_3","title":"\u66f4\u65b0/\u5220\u9664\u626b\u63cf\u7b56\u7565","text":"

                      \u521b\u5efa\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u8981\u66f4\u65b0\u6216\u5220\u9664\u626b\u63cf\u7b56\u7565\u3002

                      \u5728 \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u914d\u7f6e\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff1a

                      • \u5bf9\u4e8e\u5468\u671f\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a

                        • \u9009\u62e9 \u7acb\u5373\u6267\u884c \u610f\u5473\u7740\uff0c\u5728\u5468\u671f\u8ba1\u5212\u4e4b\u5916\u7acb\u5373\u518d\u626b\u63cf\u4e00\u6b21\u96c6\u7fa4
                        • \u9009\u62e9 \u7981\u7528 \u4f1a\u4e2d\u65ad\u626b\u63cf\u8ba1\u5212\uff0c\u76f4\u5230\u70b9\u51fb \u542f\u7528 \u624d\u53ef\u4ee5\u7ee7\u7eed\u6839\u636e\u5468\u671f\u8ba1\u5212\u6267\u884c\u8be5\u626b\u63cf\u7b56\u7565\u3002
                        • \u9009\u62e9 \u7f16\u8f91 \u53ef\u4ee5\u66f4\u65b0\u914d\u7f6e\uff0c\u652f\u6301\u66f4\u65b0\u626b\u63cf\u914d\u7f6e\u3001\u7c7b\u578b\u3001\u626b\u63cf\u5468\u671f\u3001\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff0c\u4e0d\u53ef\u66f4\u6539\u914d\u7f6e\u540d\u79f0\u548c\u9700\u8981\u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4\u3002
                        • \u9009\u62e9 \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u914d\u7f6e
                      • \u5bf9\u4e8e\u4e00\u6b21\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a\u4ec5\u652f\u6301 \u5220\u9664 \u64cd\u4f5c\u3002

                      "},{"location":"admin/kpanda/security/hunter.html#_4","title":"\u67e5\u770b\u626b\u63cf\u62a5\u544a","text":"
                      1. \u5728 \u5b89\u5168\u7ba1\u7406 -> \u6743\u9650\u626b\u63cf -> \u626b\u63cf\u62a5\u544a \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u62a5\u544a\u540d\u79f0

                        \u5728\u62a5\u544a\u53f3\u4fa7\u70b9\u51fb \u5220\u9664 \u53ef\u4ee5\u624b\u52a8\u5220\u9664\u62a5\u544a\u3002

                      2. \u67e5\u770b\u626b\u63cf\u62a5\u544a\u5185\u5bb9\uff0c\u5305\u62ec\uff1a

                        • \u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4
                        • \u4f7f\u7528\u7684\u626b\u63cf\u7b56\u7565
                        • \u626b\u63cf\u9891\u7387
                        • \u98ce\u9669\u603b\u6570\u3001\u9ad8\u98ce\u9669\u6570\u3001\u4e2d\u98ce\u9669\u6570\u3001\u4f4e\u98ce\u9669\u6570
                        • \u626b\u63cf\u65f6\u95f4
                        • \u68c0\u67e5\u8be6\u60c5\uff0c\u4f8b\u5982\u6f0f\u6d1e ID\u3001\u6f0f\u6d1e\u7c7b\u578b\u3001\u6f0f\u6d1e\u540d\u79f0\u3001\u6f0f\u6d1e\u63cf\u8ff0\u7b49

                      "},{"location":"admin/kpanda/security/cis/config.html","title":"\u626b\u63cf\u914d\u7f6e","text":"

                      \u4f7f\u7528\u5408\u89c4\u6027\u626b\u63cf\u7684\u7b2c\u4e00\u6b65\uff0c\u5c31\u662f\u5148\u521b\u5efa\u626b\u63cf\u914d\u7f6e\u3002\u57fa\u4e8e\u626b\u63cf\u914d\u7f6e\u518d\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3001\u6267\u884c\u626b\u63cf\u7b56\u7565\uff0c\u6700\u540e\u67e5\u770b\u626b\u63cf\u7ed3\u679c\u3002

                      "},{"location":"admin/kpanda/security/cis/config.html#_2","title":"\u521b\u5efa\u626b\u63cf\u914d\u7f6e","text":"

                      \u521b\u5efa\u626b\u63cf\u914d\u7f6e\u7684\u6b65\u9aa4\u5982\u4e0b\uff1a

                      1. \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684\u9996\u9875\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5b89\u5168\u7ba1\u7406 \u3002

                      2. \u9ed8\u8ba4\u8fdb\u5165 \u5408\u89c4\u6027\u626b\u63cf \u9875\u9762\uff0c\u70b9\u51fb \u626b\u63cf\u914d\u7f6e \u9875\u7b7e\uff0c\u7136\u540e\u5728\u53f3\u4e0a\u89d2\u70b9\u51fb \u521b\u5efa\u626b\u63cf\u914d\u7f6e \u3002

                      3. \u586b\u5199\u914d\u7f6e\u540d\u79f0\u3001\u9009\u62e9\u914d\u7f6e\u6a21\u677f\u3001\u6309\u9700\u52fe\u9009\u626b\u63cf\u9879\uff0c\u6700\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                        \u626b\u63cf\u6a21\u677f\uff1a\u76ee\u524d\u63d0\u4f9b\u4e86\u4e24\u4e2a\u6a21\u677f\u3002 kubeadm \u6a21\u677f\u9002\u7528\u4e8e\u4e00\u822c\u60c5\u51b5\u4e0b\u7684 Kubernetes \u96c6\u7fa4\u3002 \u6211\u4eec\u5728 kubeadm \u6a21\u677f\u57fa\u7840\u4e0a\uff0c\u7ed3\u5408\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u5e73\u53f0\u8bbe\u8ba1\u5ffd\u7565\u4e86\u4e0d\u9002\u7528\u4e8e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u626b\u63cf\u9879\u3002

                      "},{"location":"admin/kpanda/security/cis/config.html#_3","title":"\u67e5\u770b\u626b\u63cf\u914d\u7f6e","text":"

                      \u5728\u626b\u63cf\u914d\u7f6e\u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u626b\u63cf\u914d\u7f6e\u7684\u540d\u79f0\uff0c\u53ef\u4ee5\u67e5\u770b\u8be5\u914d\u7f6e\u7684\u7c7b\u578b\u3001\u626b\u63cf\u9879\u6570\u91cf\u3001\u521b\u5efa\u65f6\u95f4\u3001\u914d\u7f6e\u6a21\u677f\uff0c\u4ee5\u53ca\u8be5\u914d\u7f6e\u542f\u7528\u7684\u5177\u4f53\u626b\u63cf\u9879\u3002

                      "},{"location":"admin/kpanda/security/cis/config.html#_4","title":"\u66f4\u65b0/\u5220\u9664\u626b\u63cf\u914d\u7f6e","text":"

                      \u626b\u63cf\u914d\u7f6e\u521b\u5efa\u6210\u529f\u4e4b\u540e\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u6c42\u66f4\u65b0\u914d\u7f6e\u6216\u5220\u9664\u8be5\u914d\u7f6e\u3002

                      \u5728\u626b\u63cf\u914d\u7f6e\u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u914d\u7f6e\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff1a

                      • \u9009\u62e9 \u7f16\u8f91 \u53ef\u4ee5\u66f4\u65b0\u914d\u7f6e\uff0c\u652f\u6301\u66f4\u65b0\u63cf\u8ff0\u3001\u6a21\u677f\u548c\u626b\u63cf\u9879\u3002\u4e0d\u53ef\u66f4\u6539\u914d\u7f6e\u540d\u79f0\u3002
                      • \u9009\u62e9 \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u914d\u7f6e\u3002

                      "},{"location":"admin/kpanda/security/cis/policy.html","title":"\u626b\u63cf\u7b56\u7565","text":""},{"location":"admin/kpanda/security/cis/policy.html#_2","title":"\u521b\u5efa\u626b\u63cf\u7b56\u7565","text":"

                      \u521b\u5efa\u626b\u63cf\u914d\u7f6e\u4e4b\u540e\uff0c\u53ef\u4ee5\u57fa\u4e8e\u914d\u7f6e\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002

                      1. \u5728 \u5b89\u5168\u7ba1\u7406 -> \u5408\u89c4\u6027\u626b\u63cf \u9875\u9762\u7684 \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\u4e0b\uff0c\u5728\u53f3\u4fa7\u70b9\u51fb\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002

                      2. \u53c2\u8003\u4e0b\u5217\u8bf4\u660e\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                        • \u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u626b\u63cf\u54ea\u4e2a\u96c6\u7fa4\u3002\u53ef\u9009\u7684\u96c6\u7fa4\u5217\u8868\u6765\u81ea\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u7684\u96c6\u7fa4\u3002\u5982\u679c\u6ca1\u6709\u60f3\u9009\u7684\u96c6\u7fa4\uff0c\u53ef\u4ee5\u53bb\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4\u3002
                        • \u626b\u63cf\u914d\u7f6e\uff1a\u9009\u62e9\u4e8b\u5148\u521b\u5efa\u597d\u7684\u626b\u63cf\u914d\u7f6e\u3002\u626b\u63cf\u914d\u7f6e\u89c4\u5b9a\u4e86\u9700\u8981\u6267\u884c\u54ea\u4e9b\u5177\u4f53\u7684\u626b\u63cf\u9879\u3002
                        • \u626b\u63cf\u7c7b\u578b\uff1a

                          • \u7acb\u5373\u626b\u63cf\uff1a\u5728\u626b\u63cf\u7b56\u7565\u521b\u5efa\u597d\u4e4b\u540e\u7acb\u5373\u6267\u884c\u4e00\u6b21\u626b\u63cf\uff0c\u540e\u7eed\u4e0d\u53ef\u4ee5\u81ea\u52a8/\u624b\u52a8\u518d\u6b21\u6267\u884c\u626b\u63cf\u3002
                          • \u5b9a\u65f6\u626b\u63cf\uff1a\u901a\u8fc7\u8bbe\u7f6e\u626b\u63cf\u5468\u671f\uff0c\u81ea\u52a8\u6309\u65f6\u91cd\u590d\u6267\u884c\u626b\u63cf\u3002
                        • \u626b\u63cf\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff1a\u8bbe\u7f6e\u6700\u591a\u4fdd\u7559\u591a\u5c11\u626b\u63cf\u62a5\u544a\u3002\u8d85\u8fc7\u6307\u5b9a\u7684\u4fdd\u7559\u6570\u91cf\u65f6\uff0c\u4ece\u6700\u65e9\u7684\u62a5\u544a\u5f00\u59cb\u5220\u9664\u3002

                      "},{"location":"admin/kpanda/security/cis/policy.html#_3","title":"\u66f4\u65b0/\u5220\u9664\u626b\u63cf\u7b56\u7565","text":"

                      \u521b\u5efa\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u8981\u66f4\u65b0\u6216\u5220\u9664\u626b\u63cf\u7b56\u7565\u3002

                      \u5728 \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u914d\u7f6e\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff1a

                      • \u5bf9\u4e8e\u5468\u671f\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a

                        • \u9009\u62e9 \u7acb\u5373\u6267\u884c \u610f\u5473\u7740\uff0c\u5728\u5468\u671f\u8ba1\u5212\u4e4b\u5916\u7acb\u5373\u518d\u626b\u63cf\u4e00\u6b21\u96c6\u7fa4
                        • \u9009\u62e9 \u7981\u7528 \u4f1a\u4e2d\u65ad\u626b\u63cf\u8ba1\u5212\uff0c\u76f4\u5230\u70b9\u51fb \u542f\u7528 \u624d\u53ef\u4ee5\u7ee7\u7eed\u6839\u636e\u5468\u671f\u8ba1\u5212\u6267\u884c\u8be5\u626b\u63cf\u7b56\u7565\u3002
                        • \u9009\u62e9 \u7f16\u8f91 \u53ef\u4ee5\u66f4\u65b0\u914d\u7f6e\uff0c\u652f\u6301\u66f4\u65b0\u626b\u63cf\u914d\u7f6e\u3001\u7c7b\u578b\u3001\u626b\u63cf\u5468\u671f\u3001\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff0c\u4e0d\u53ef\u66f4\u6539\u914d\u7f6e\u540d\u79f0\u548c\u9700\u8981\u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4\u3002
                        • \u9009\u62e9 \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u914d\u7f6e
                      • \u5bf9\u4e8e\u4e00\u6b21\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a\u4ec5\u652f\u6301 \u5220\u9664 \u64cd\u4f5c\u3002

                      "},{"location":"admin/kpanda/security/cis/report.html","title":"\u626b\u63cf\u62a5\u544a","text":"

                      hide\uff1a - toc

                      "},{"location":"admin/kpanda/security/cis/report.html#_1","title":"\u626b\u63cf\u62a5\u544a","text":"

                      \u6267\u884c\u626b\u63cf\u7b56\u7565\u4e4b\u540e\u4f1a\u81ea\u52a8\u751f\u6210\u626b\u63cf\u62a5\u544a\u3002\u60a8\u53ef\u4ee5\u5728\u7ebf\u67e5\u770b\u626b\u63cf\u62a5\u544a\u6216\u5c06\u5176\u4e0b\u8f7d\u5230\u672c\u5730\u67e5\u770b\u3002

                      • \u4e0b\u8f7d\u67e5\u770b\u626b\u63cf\u62a5\u544a

                        \u5b89\u5168\u7ba1\u7406 -> \u5408\u89c4\u6027\u626b\u63cf \u9875\u9762\u7684 \u626b\u63cf\u62a5\u544a \u9875\u7b7e\u70b9\u51fb\u62a5\u544a\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u4e0b\u8f7d \u3002

                      • \u5728\u7ebf\u67e5\u770b\u626b\u63cf\u62a5\u544a

                        \u70b9\u51fb\u67d0\u4e2a\u62a5\u544a\u7684\u540d\u79f0\uff0c\u60a8\u53ef\u4ee5\u5728\u7ebf\u67e5\u770b CIS \u5408\u89c4\u6027\u626b\u63cf\u7684\u62a5\u544a\u5185\u5bb9\u3002\u5177\u4f53\u5305\u62ec\uff1a

                        • \u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4
                        • \u4f7f\u7528\u7684\u626b\u63cf\u7b56\u7565\u548c\u626b\u63cf\u914d\u7f6e
                        • \u626b\u63cf\u5f00\u59cb\u65f6\u95f4
                        • \u626b\u63cf\u9879\u603b\u6570\u3001\u901a\u8fc7\u6570\u4e0e\u672a\u901a\u8fc7\u6570
                        • \u5bf9\u4e8e\u672a\u901a\u8fc7\u7684\u626b\u63cf\u9879\u7ed9\u51fa\u5bf9\u5e94\u7684\u4fee\u590d\u5efa\u8bae
                        • \u5bf9\u4e8e\u901a\u8fc7\u7684\u626b\u63cf\u9879\u7ed9\u51fa\u66f4\u5b89\u5168\u7684\u64cd\u4f5c\u5efa\u8bae

                      "},{"location":"admin/kpanda/storage/pv.html","title":"\u6570\u636e\u5377(PV)","text":"

                      \u6570\u636e\u5377\uff08PersistentVolume\uff0cPV\uff09\u662f\u96c6\u7fa4\u4e2d\u7684\u4e00\u5757\u5b58\u50a8\uff0c\u53ef\u7531\u7ba1\u7406\u5458\u4e8b\u5148\u5236\u5907\uff0c\u6216\u4f7f\u7528\u5b58\u50a8\u7c7b\uff08Storage Class\uff09\u6765\u52a8\u6001\u5236\u5907\u3002PV \u662f\u96c6\u7fa4\u8d44\u6e90\uff0c\u4f46\u62e5\u6709\u72ec\u7acb\u7684\u751f\u547d\u5468\u671f\uff0c\u4e0d\u4f1a\u968f\u7740 Pod \u8fdb\u7a0b\u7ed3\u675f\u800c\u88ab\u5220\u9664\u3002\u5c06 PV \u6302\u8f7d\u5230\u5de5\u4f5c\u8d1f\u8f7d\u53ef\u4ee5\u5b9e\u73b0\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u636e\u6301\u4e45\u5316\u3002PV \u4e2d\u4fdd\u5b58\u4e86\u53ef\u88ab Pod \u4e2d\u5bb9\u5668\u8bbf\u95ee\u7684\u6570\u636e\u76ee\u5f55\u3002

                      "},{"location":"admin/kpanda/storage/pv.html#_1","title":"\u521b\u5efa\u6570\u636e\u5377","text":"

                      \u76ee\u524d\u652f\u6301\u901a\u8fc7 YAML \u548c\u8868\u5355\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u6570\u636e\u5377\uff0c\u8fd9\u4e24\u79cd\u65b9\u5f0f\u5404\u6709\u4f18\u52a3\uff0c\u53ef\u4ee5\u6ee1\u8db3\u4e0d\u540c\u7528\u6237\u7684\u4f7f\u7528\u9700\u6c42\u3002

                      • \u901a\u8fc7 YAML \u521b\u5efa\u6b65\u9aa4\u66f4\u5c11\u3001\u66f4\u9ad8\u6548\uff0c\u4f46\u95e8\u69db\u8981\u6c42\u8f83\u9ad8\uff0c\u9700\u8981\u719f\u6089\u6570\u636e\u5377\u7684 YAML \u6587\u4ef6\u914d\u7f6e\u3002

                      • \u901a\u8fc7\u8868\u5355\u521b\u5efa\u66f4\u76f4\u89c2\u66f4\u7b80\u5355\uff0c\u6839\u636e\u63d0\u793a\u586b\u5199\u5bf9\u5e94\u7684\u503c\u5373\u53ef\uff0c\u4f46\u6b65\u9aa4\u66f4\u52a0\u7e41\u7410\u3002

                      "},{"location":"admin/kpanda/storage/pv.html#yaml","title":"YAML \u521b\u5efa","text":"
                      1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377(PV) -> YAML \u521b\u5efa \u3002

                      2. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                        \u652f\u6301\u4ece\u672c\u5730\u5bfc\u5165 YAML \u6587\u4ef6\u6216\u5c06\u586b\u5199\u597d\u7684\u6587\u4ef6\u4e0b\u8f7d\u4fdd\u5b58\u5230\u672c\u5730\u3002

                      "},{"location":"admin/kpanda/storage/pv.html#_2","title":"\u8868\u5355\u521b\u5efa","text":"
                      1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377(PV) -> \u521b\u5efa\u6570\u636e\u5377(PV) \u3002

                      2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u3002

                        • \u6570\u636e\u5377\u540d\u79f0\u3001\u6570\u636e\u5377\u7c7b\u578b\u3001\u6302\u8f7d\u8def\u5f84\u3001\u5377\u6a21\u5f0f\u3001\u8282\u70b9\u4eb2\u548c\u6027\u5728\u521b\u5efa\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                        • \u6570\u636e\u5377\u7c7b\u578b\uff1a\u6709\u5173\u5377\u7c7b\u578b\u7684\u8be6\u7ec6\u4ecb\u7ecd\uff0c\u53ef\u53c2\u8003 Kubernetes \u5b98\u65b9\u6587\u6863\u5377\u3002

                        • Local\uff1a\u5c06 Node \u8282\u70b9\u7684\u672c\u5730\u5b58\u50a8\u5305\u88c5\u6210 PVC \u63a5\u53e3\uff0c\u5bb9\u5668\u76f4\u63a5\u4f7f\u7528 PVC \u800c\u65e0\u9700\u5173\u6ce8\u5e95\u5c42\u7684\u5b58\u50a8\u7c7b\u578b\u3002Local \u5377\u4e0d\u652f\u6301\u52a8\u6001\u914d\u7f6e\u6570\u636e\u5377\uff0c\u4f46\u652f\u6301\u914d\u7f6e\u8282\u70b9\u4eb2\u548c\u6027\uff0c\u53ef\u4ee5\u9650\u5236\u80fd\u4ece\u54ea\u4e9b\u8282\u70b9\u4e0a\u8bbf\u95ee\u8be5\u6570\u636e\u5377\u3002

                        • HostPath\uff1a\u4f7f\u7528 Node \u8282\u70b9\u7684\u6587\u4ef6\u7cfb\u7edf\u4e0a\u7684\u6587\u4ef6\u6216\u76ee\u5f55\u4f5c\u4e3a\u6570\u636e\u5377\uff0c\u4e0d\u652f\u6301\u57fa\u4e8e\u8282\u70b9\u4eb2\u548c\u6027\u7684 Pod \u8c03\u5ea6\u3002

                        • \u6302\u8f7d\u8def\u5f84\uff1a\u5c06\u6570\u636e\u5377\u6302\u8f7d\u5230\u5bb9\u5668\u4e2d\u7684\u67d0\u4e2a\u5177\u4f53\u76ee\u5f55\u4e0b\u3002

                        • \u8bbf\u95ee\u6a21\u5f0f\uff1a

                          • ReadWriteOnce\uff1a\u6570\u636e\u5377\u53ef\u4ee5\u88ab\u4e00\u4e2a\u8282\u70b9\u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002
                          • ReadWriteMany\uff1a\u6570\u636e\u5377\u53ef\u4ee5\u88ab\u591a\u4e2a\u8282\u70b9\u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002
                          • ReadOnlyMany\uff1a\u6570\u636e\u5377\u53ef\u4ee5\u88ab\u591a\u4e2a\u8282\u70b9\u4ee5\u53ea\u8bfb\u65b9\u5f0f\u6302\u8f7d\u3002
                          • ReadWriteOncePod\uff1a\u6570\u636e\u5377\u53ef\u4ee5\u88ab\u5355\u4e2a Pod \u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002
                        • \u56de\u6536\u7b56\u7565\uff1a

                          • Retain\uff1a\u4e0d\u5220\u9664 PV\uff0c\u4ec5\u5c06\u5176\u72b6\u6001\u53d8\u4e3a released \uff0c\u9700\u8981\u7528\u6237\u624b\u52a8\u56de\u6536\u3002\u6709\u5173\u5982\u4f55\u624b\u52a8\u56de\u6536\uff0c\u53ef\u53c2\u8003\u6301\u4e45\u5377\u3002
                          • Recycle\uff1a\u4fdd\u7559 PV \u4f46\u6e05\u7a7a\u5176\u4e2d\u7684\u6570\u636e\uff0c\u6267\u884c\u57fa\u672c\u7684\u64e6\u9664\u64cd\u4f5c\uff08 rm -rf /thevolume/* \uff09\u3002
                          • Delete\uff1a\u5220\u9664 PV \u65f6\u53ca\u5176\u4e2d\u7684\u6570\u636e\u3002
                        • \u5377\u6a21\u5f0f\uff1a

                          • \u6587\u4ef6\u7cfb\u7edf\uff1a\u6570\u636e\u5377\u5c06\u88ab Pod \u6302\u8f7d\u5230\u67d0\u4e2a\u76ee\u5f55\u3002\u5982\u679c\u6570\u636e\u5377\u7684\u5b58\u50a8\u6765\u81ea\u67d0\u5757\u8bbe\u5907\u800c\u8be5\u8bbe\u5907\u76ee\u524d\u4e3a\u7a7a\uff0c\u7b2c\u4e00\u6b21\u6302\u8f7d\u5377\u4e4b\u524d\u4f1a\u5728\u8bbe\u5907\u4e0a\u521b\u5efa\u6587\u4ef6\u7cfb\u7edf\u3002
                          • \u5757\uff1a\u5c06\u6570\u636e\u5377\u4f5c\u4e3a\u539f\u59cb\u5757\u8bbe\u5907\u6765\u4f7f\u7528\u3002\u8fd9\u7c7b\u5377\u4ee5\u5757\u8bbe\u5907\u7684\u65b9\u5f0f\u4ea4\u7ed9 Pod \u4f7f\u7528\uff0c\u5176\u4e0a\u6ca1\u6709\u4efb\u4f55\u6587\u4ef6\u7cfb\u7edf\uff0c\u53ef\u4ee5\u8ba9 Pod \u66f4\u5feb\u5730\u8bbf\u95ee\u6570\u636e\u5377\u3002
                        • \u8282\u70b9\u4eb2\u548c\u6027\uff1a

                      "},{"location":"admin/kpanda/storage/pv.html#_3","title":"\u67e5\u770b\u6570\u636e\u5377","text":"

                      \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377(PV) \u3002

                      • \u8be5\u9875\u9762\u53ef\u4ee5\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u4e2d\u7684\u6240\u6709\u6570\u636e\u5377\uff0c\u4ee5\u53ca\u5404\u4e2a\u6570\u636e\u5377\u7684\u72b6\u6001\u3001\u5bb9\u91cf\u3001\u547d\u540d\u7a7a\u95f4\u7b49\u4fe1\u606f\u3002

                      • \u652f\u6301\u6309\u7167\u6570\u636e\u5377\u7684\u540d\u79f0\u3001\u72b6\u6001\u3001\u547d\u540d\u7a7a\u95f4\u3001\u521b\u5efa\u65f6\u95f4\u8fdb\u884c\u987a\u5e8f\u6216\u9006\u5e8f\u6392\u5e8f\u3002

                      • \u70b9\u51fb\u6570\u636e\u5377\u7684\u540d\u79f0\uff0c\u53ef\u4ee5\u67e5\u770b\u8be5\u6570\u636e\u5377\u7684\u57fa\u672c\u914d\u7f6e\u3001\u5b58\u50a8\u6c60\u4fe1\u606f\u3001\u6807\u7b7e\u3001\u6ce8\u89e3\u7b49\u4fe1\u606f\u3002

                      "},{"location":"admin/kpanda/storage/pv.html#_4","title":"\u514b\u9686\u6570\u636e\u5377","text":"

                      \u901a\u8fc7\u514b\u9686\u6570\u636e\u5377\uff0c\u53ef\u4ee5\u57fa\u4e8e\u88ab\u514b\u9686\u6570\u636e\u5377\u7684\u914d\u7f6e\uff0c\u91cd\u65b0\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u6570\u636e\u5377\u3002

                      1. \u8fdb\u5165\u514b\u9686\u9875\u9762

                        • \u5728\u6570\u636e\u5377\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u514b\u9686\u7684\u6570\u636e\u5377\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u514b\u9686 \u3002

                          \u4e5f\u53ef\u4ee5\u70b9\u51fb\u6570\u636e\u5377\u7684\u540d\u79f0\uff0c\u5728\u8be6\u60c5\u9875\u9762\u7684\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u514b\u9686 \u3002

                      2. \u76f4\u63a5\u4f7f\u7528\u539f\u914d\u7f6e\uff0c\u6216\u8005\u6309\u9700\u8fdb\u884c\u4fee\u6539\uff0c\u7136\u540e\u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                      "},{"location":"admin/kpanda/storage/pv.html#_5","title":"\u66f4\u65b0\u6570\u636e\u5377","text":"

                      \u6709\u4e24\u79cd\u9014\u5f84\u53ef\u4ee5\u66f4\u65b0\u6570\u636e\u5377\u3002\u652f\u6301\u901a\u8fc7\u8868\u5355\u6216 YAML \u6587\u4ef6\u66f4\u65b0\u6570\u636e\u5377\u3002

                      Note

                      \u4ec5\u652f\u6301\u66f4\u65b0\u6570\u636e\u5377\u7684\u522b\u540d\u3001\u5bb9\u91cf\u3001\u8bbf\u95ee\u6a21\u5f0f\u3001\u56de\u6536\u7b56\u7565\u3001\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                      • \u5728\u6570\u636e\u5377\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u6570\u636e\u5377\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                      • \u70b9\u51fb\u6570\u636e\u5377\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u6570\u636e\u5377\u7684\u8be6\u60c5\u9875\u9762\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                      "},{"location":"admin/kpanda/storage/pv.html#_6","title":"\u5220\u9664\u6570\u636e\u5377","text":"

                      \u5728\u6570\u636e\u5377\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u5220\u9664\u7684\u6570\u636e\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u5220\u9664 \u3002

                      \u4e5f\u53ef\u4ee5\u70b9\u51fb\u6570\u636e\u5377\u7684\u540d\u79f0\uff0c\u5728\u8be6\u60c5\u9875\u9762\u7684\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u5220\u9664 \u3002

                      "},{"location":"admin/kpanda/storage/pvc.html","title":"\u6570\u636e\u5377\u58f0\u660e(PVC)","text":"

                      \u6301\u4e45\u5377\u58f0\u660e\uff08PersistentVolumeClaim\uff0cPVC\uff09\u8868\u8fbe\u7684\u662f\u7528\u6237\u5bf9\u5b58\u50a8\u7684\u8bf7\u6c42\u3002PVC \u6d88\u8017 PV \u8d44\u6e90\uff0c\u7533\u9886\u4f7f\u7528\u7279\u5b9a\u5927\u5c0f\u3001\u7279\u5b9a\u8bbf\u95ee\u6a21\u5f0f\u7684\u6570\u636e\u5377\uff0c\u4f8b\u5982\u8981\u6c42 PV \u5377\u4ee5 ReadWriteOnce\u3001ReadOnlyMany \u6216 ReadWriteMany \u7b49\u6a21\u5f0f\u6765\u6302\u8f7d\u3002

                      "},{"location":"admin/kpanda/storage/pvc.html#_1","title":"\u521b\u5efa\u6570\u636e\u5377\u58f0\u660e","text":"

                      \u76ee\u524d\u652f\u6301\u901a\u8fc7 YAML \u548c\u8868\u5355\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u6570\u636e\u5377\u58f0\u660e\uff0c\u8fd9\u4e24\u79cd\u65b9\u5f0f\u5404\u6709\u4f18\u52a3\uff0c\u53ef\u4ee5\u6ee1\u8db3\u4e0d\u540c\u7528\u6237\u7684\u4f7f\u7528\u9700\u6c42\u3002

                      • \u901a\u8fc7 YAML \u521b\u5efa\u6b65\u9aa4\u66f4\u5c11\u3001\u66f4\u9ad8\u6548\uff0c\u4f46\u95e8\u69db\u8981\u6c42\u8f83\u9ad8\uff0c\u9700\u8981\u719f\u6089\u6570\u636e\u5377\u58f0\u660e\u7684 YAML \u6587\u4ef6\u914d\u7f6e\u3002

                      • \u901a\u8fc7\u8868\u5355\u521b\u5efa\u66f4\u76f4\u89c2\u66f4\u7b80\u5355\uff0c\u6839\u636e\u63d0\u793a\u586b\u5199\u5bf9\u5e94\u7684\u503c\u5373\u53ef\uff0c\u4f46\u6b65\u9aa4\u66f4\u52a0\u7e41\u7410\u3002

                      "},{"location":"admin/kpanda/storage/pvc.html#yaml","title":"YAML \u521b\u5efa","text":"
                      1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e (PVC) -> YAML \u521b\u5efa \u3002

                      2. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                        \u652f\u6301\u4ece\u672c\u5730\u5bfc\u5165 YAML \u6587\u4ef6\u6216\u5c06\u586b\u5199\u597d\u7684\u6587\u4ef6\u4e0b\u8f7d\u4fdd\u5b58\u5230\u672c\u5730\u3002

                      "},{"location":"admin/kpanda/storage/pvc.html#_2","title":"\u8868\u5355\u521b\u5efa","text":"
                      1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e (PVC) -> \u521b\u5efa\u6570\u636e\u5377\u58f0\u660e (PVC) \u3002

                      2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u3002

                        • \u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\u3001\u547d\u540d\u7a7a\u95f4\u3001\u521b\u5efa\u65b9\u5f0f\u3001\u6570\u636e\u5377\u3001\u5bb9\u91cf\u3001\u8bbf\u95ee\u6a21\u5f0f\u5728\u521b\u5efa\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                        • \u521b\u5efa\u65b9\u5f0f\uff1a\u5728\u5df2\u6709\u7684\u5b58\u50a8\u6c60\u6216\u8005\u6570\u636e\u5377\u4e2d\u52a8\u6001\u521b\u5efa\u65b0\u7684\u6570\u636e\u5377\u58f0\u660e\uff0c\u6216\u8005\u57fa\u4e8e\u6570\u636e\u5377\u58f0\u660e\u7684\u5feb\u7167\u521b\u5efa\u65b0\u7684\u6570\u636e\u5377\u58f0\u660e\u3002

                          \u57fa\u4e8e\u5feb\u7167\u521b\u5efa\u65f6\u65e0\u6cd5\u4fee\u6539\u6570\u636e\u5377\u58f0\u660e\u7684\u5bb9\u91cf\uff0c\u53ef\u4ee5\u5728\u521b\u5efa\u5b8c\u6210\u540e\u518d\u8fdb\u884c\u4fee\u6539\u3002

                        • \u9009\u62e9\u521b\u5efa\u65b9\u5f0f\u4e4b\u540e\uff0c\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u9009\u62e9\u60f3\u8981\u4f7f\u7528\u7684\u5b58\u50a8\u6c60/\u6570\u636e\u5377/\u5feb\u7167\u3002

                        • \u8bbf\u95ee\u6a21\u5f0f\uff1a

                        • ReadWriteOnce\uff0c\u6570\u636e\u5377\u58f0\u660e\u53ef\u4ee5\u88ab\u4e00\u4e2a\u8282\u70b9\u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002

                        • ReadWriteMany\uff0c\u6570\u636e\u5377\u58f0\u660e\u53ef\u4ee5\u88ab\u591a\u4e2a\u8282\u70b9\u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002
                        • ReadOnlyMany\uff0c\u6570\u636e\u5377\u58f0\u660e\u53ef\u4ee5\u88ab\u591a\u4e2a\u8282\u70b9\u4ee5\u53ea\u8bfb\u65b9\u5f0f\u6302\u8f7d\u3002
                        • ReadWriteOncePod\uff0c\u6570\u636e\u5377\u58f0\u660e\u53ef\u4ee5\u88ab\u5355\u4e2a Pod \u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002

                      "},{"location":"admin/kpanda/storage/pvc.html#_3","title":"\u67e5\u770b\u6570\u636e\u5377\u58f0\u660e","text":"

                      \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e(PVC) \u3002

                      • \u8be5\u9875\u9762\u53ef\u4ee5\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u4e2d\u7684\u6240\u6709\u6570\u636e\u5377\u58f0\u660e\uff0c\u4ee5\u53ca\u5404\u4e2a\u6570\u636e\u5377\u58f0\u660e\u7684\u72b6\u6001\u3001\u5bb9\u91cf\u3001\u547d\u540d\u7a7a\u95f4\u7b49\u4fe1\u606f\u3002

                      • \u652f\u6301\u6309\u7167\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\u3001\u72b6\u6001\u3001\u547d\u540d\u7a7a\u95f4\u3001\u521b\u5efa\u65f6\u95f4\u8fdb\u884c\u987a\u5e8f\u6216\u9006\u5e8f\u6392\u5e8f\u3002

                      • \u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u53ef\u4ee5\u67e5\u770b\u8be5\u6570\u636e\u5377\u58f0\u660e\u7684\u57fa\u672c\u914d\u7f6e\u3001\u5b58\u50a8\u6c60\u4fe1\u606f\u3001\u6807\u7b7e\u3001\u6ce8\u89e3\u7b49\u4fe1\u606f\u3002

                      "},{"location":"admin/kpanda/storage/pvc.html#_4","title":"\u6269\u5bb9\u6570\u636e\u5377\u58f0\u660e","text":"
                      1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e(PVC) \uff0c\u627e\u5230\u60f3\u8981\u8c03\u6574\u5bb9\u91cf\u7684\u6570\u636e\u5377\u58f0\u660e\u3002

                      2. \u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u6269\u5bb9 \u3002

                      3. \u8f93\u5165\u76ee\u6807\u5bb9\u91cf\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                      "},{"location":"admin/kpanda/storage/pvc.html#_5","title":"\u514b\u9686\u6570\u636e\u5377\u58f0\u660e","text":"

                      \u901a\u8fc7\u514b\u9686\u6570\u636e\u5377\u58f0\u660e\uff0c\u53ef\u4ee5\u57fa\u4e8e\u88ab\u514b\u9686\u6570\u636e\u5377\u58f0\u660e\u7684\u914d\u7f6e\uff0c\u91cd\u65b0\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u6570\u636e\u5377\u58f0\u660e\u3002

                      1. \u8fdb\u5165\u514b\u9686\u9875\u9762

                        • \u5728\u6570\u636e\u5377\u58f0\u660e\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u514b\u9686\u7684\u6570\u636e\u5377\u58f0\u660e\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u514b\u9686 \u3002

                          \u4e5f\u53ef\u4ee5\u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u5728\u8be6\u60c5\u9875\u9762\u7684\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u514b\u9686 \u3002

                      2. \u76f4\u63a5\u4f7f\u7528\u539f\u914d\u7f6e\uff0c\u6216\u8005\u6309\u9700\u8fdb\u884c\u4fee\u6539\uff0c\u7136\u540e\u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                      "},{"location":"admin/kpanda/storage/pvc.html#_6","title":"\u66f4\u65b0\u6570\u636e\u5377\u58f0\u660e","text":"

                      \u6709\u4e24\u79cd\u9014\u5f84\u53ef\u4ee5\u66f4\u65b0\u6570\u636e\u5377\u58f0\u660e\u3002\u652f\u6301\u901a\u8fc7\u8868\u5355\u6216 YAML \u6587\u4ef6\u66f4\u65b0\u6570\u636e\u5377\u58f0\u660e\u3002

                      Note

                      \u4ec5\u652f\u6301\u66f4\u65b0\u6570\u636e\u5377\u58f0\u660e\u7684\u522b\u540d\u3001\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                      • \u5728\u6570\u636e\u5377\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u6570\u636e\u5377\u58f0\u660e\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                      • \u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u6570\u636e\u5377\u58f0\u660e\u7684\u8be6\u60c5\u9875\u9762\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                      "},{"location":"admin/kpanda/storage/pvc.html#_7","title":"\u5220\u9664\u6570\u636e\u5377\u58f0\u660e","text":"

                      \u5728\u6570\u636e\u5377\u58f0\u660e\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u5220\u9664\u7684\u6570\u636e\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u5220\u9664 \u3002

                      \u4e5f\u53ef\u4ee5\u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u5728\u8be6\u60c5\u9875\u9762\u7684\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u5220\u9664 \u3002

                      "},{"location":"admin/kpanda/storage/pvc.html#_8","title":"\u5e38\u89c1\u95ee\u9898","text":"
                      1. \u5982\u679c\u5217\u8868\u4e2d\u6ca1\u6709\u53ef\u9009\u7684\u5b58\u50a8\u6c60\u6216\u6570\u636e\u5377\uff0c\u53ef\u4ee5\u521b\u5efa\u5b58\u50a8\u6c60\u6216\u521b\u5efa\u6570\u636e\u5377\u3002

                      2. \u5982\u679c\u5217\u8868\u4e2d\u6ca1\u6709\u53ef\u9009\u7684\u5feb\u7167\uff0c\u53ef\u4ee5\u8fdb\u5165\u6570\u636e\u5377\u58f0\u660e\u7684\u8be6\u60c5\u9875\uff0c\u5728\u53f3\u4e0a\u89d2\u5236\u4f5c\u5feb\u7167\u3002

                      3. \u5982\u679c\u6570\u636e\u5377\u58f0\u660e\u6240\u4f7f\u7528\u7684\u5b58\u50a8\u6c60 (SC) \u6ca1\u6709\u542f\u7528\u5feb\u7167\uff0c\u5219\u65e0\u6cd5\u5236\u4f5c\u5feb\u7167\uff0c\u9875\u9762\u4e0d\u4f1a\u663e\u793a\u201c\u5236\u4f5c\u5feb\u7167\u201d\u9009\u9879\u3002

                      4. \u5982\u679c\u6570\u636e\u5377\u58f0\u660e\u6240\u4f7f\u7528\u7684\u5b58\u50a8\u6c60 (SC) \u6ca1\u6709\u5f00\u542f\u6269\u5bb9\u529f\u80fd\uff0c\u5219\u8be5\u6570\u636e\u5377\u4e0d\u652f\u6301\u6269\u5bb9\uff0c\u9875\u9762\u4e0d\u4f1a\u663e\u793a\u6269\u5bb9\u9009\u9879\u3002

                      "},{"location":"admin/kpanda/storage/sc-share.html","title":"\u5171\u4eab\u5b58\u50a8\u6c60","text":"

                      \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u5c06\u4e00\u4e2a\u5b58\u50a8\u6c60\u5171\u4eab\u7ed9\u591a\u4e2a\u547d\u540d\u7a7a\u95f4\u4f7f\u7528\uff0c\u4ee5\u4fbf\u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u6548\u7387\u3002

                      1. \u5728\u5b58\u50a8\u6c60\u5217\u8868\u4e2d\u627e\u5230\u9700\u8981\u5171\u4eab\u7684\u5b58\u50a8\u6c60\uff0c\u5728\u53f3\u4fa7\u64cd\u4f5c\u680f\u4e0b\u70b9\u51fb \u6388\u6743\u547d\u540d\u7a7a\u95f4 \u3002

                      2. \u70b9\u51fb \u81ea\u5b9a\u4e49\u547d\u540d\u7a7a\u95f4 \u53ef\u4ee5\u9010\u4e00\u9009\u62e9\u9700\u8981\u5c06\u6b64\u5b58\u50a8\u6c60\u5171\u4eab\u5230\u54ea\u4e9b\u547d\u540d\u7a7a\u95f4\u3002

                        • \u70b9\u51fb \u6388\u6743\u6240\u6709\u547d\u540d\u7a7a\u95f4 \u53ef\u4ee5\u4e00\u6b21\u6027\u5c06\u6b64\u5b58\u50a8\u6c60\u5171\u4eab\u5230\u5f53\u524d\u96c6\u7fa4\u4e0b\u7684\u6240\u6709\u547d\u540d\u7a7a\u95f4\u3002
                        • \u5728\u5217\u8868\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u65b9\u70b9\u51fb \u79fb\u9664\u6388\u6743 \uff0c\u53ef\u4ee5\u89e3\u9664\u6388\u6743\uff0c\u505c\u6b62\u5c06\u6b64\u5b58\u50a8\u6c60\u5171\u4eab\u5230\u8be5\u547d\u540d\u7a7a\u95f4\u3002

                      "},{"location":"admin/kpanda/storage/sc.html","title":"\u5b58\u50a8\u6c60(SC)","text":"

                      \u5b58\u50a8\u6c60\u6307\u5c06\u8bb8\u591a\u7269\u7406\u78c1\u76d8\u7ec4\u6210\u4e00\u4e2a\u5927\u578b\u5b58\u50a8\u8d44\u6e90\u6c60\uff0c\u672c\u5e73\u53f0\u652f\u6301\u63a5\u5165\u5404\u7c7b\u5b58\u50a8\u5382\u5546\u540e\u521b\u5efa\u5757\u5b58\u50a8\u6c60\u3001\u672c\u5730\u5b58\u50a8\u6c60\u3001\u81ea\u5b9a\u4e49\u5b58\u50a8\u6c60\uff0c\u7136\u540e\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u52a8\u6001\u914d\u7f6e\u6570\u636e\u5377\u3002

                      "},{"location":"admin/kpanda/storage/sc.html#sc_1","title":"\u521b\u5efa\u5b58\u50a8\u6c60(SC)","text":"

                      \u76ee\u524d\u652f\u6301\u901a\u8fc7 YAML \u548c\u8868\u5355\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u5b58\u50a8\u6c60\uff0c\u8fd9\u4e24\u79cd\u65b9\u5f0f\u5404\u6709\u4f18\u52a3\uff0c\u53ef\u4ee5\u6ee1\u8db3\u4e0d\u540c\u7528\u6237\u7684\u4f7f\u7528\u9700\u6c42\u3002

                      • \u901a\u8fc7 YAML \u521b\u5efa\u6b65\u9aa4\u66f4\u5c11\u3001\u66f4\u9ad8\u6548\uff0c\u4f46\u95e8\u69db\u8981\u6c42\u8f83\u9ad8\uff0c\u9700\u8981\u719f\u6089\u5b58\u50a8\u6c60\u7684 YAML \u6587\u4ef6\u914d\u7f6e\u3002

                      • \u901a\u8fc7\u8868\u5355\u521b\u5efa\u66f4\u76f4\u89c2\u66f4\u7b80\u5355\uff0c\u6839\u636e\u63d0\u793a\u586b\u5199\u5bf9\u5e94\u7684\u503c\u5373\u53ef\uff0c\u4f46\u6b65\u9aa4\u66f4\u52a0\u7e41\u7410\u3002

                      "},{"location":"admin/kpanda/storage/sc.html#yaml","title":"YAML \u521b\u5efa","text":"
                      1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u5b58\u50a8\u6c60(SC) -> YAML \u521b\u5efa \u3002

                      2. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                        \u652f\u6301\u4ece\u672c\u5730\u5bfc\u5165 YAML \u6587\u4ef6\u6216\u5c06\u586b\u5199\u597d\u7684\u6587\u4ef6\u4e0b\u8f7d\u4fdd\u5b58\u5230\u672c\u5730\u3002

                      "},{"location":"admin/kpanda/storage/sc.html#_1","title":"\u8868\u5355\u521b\u5efa","text":"
                      1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u5b58\u50a8\u6c60(SC) -> \u521b\u5efa\u5b58\u50a8\u6c60(SC) \u3002

                      2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\uff0c\u7136\u540e\u5728\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                        \u81ea\u5b9a\u4e49\u5b58\u50a8\u7cfb\u7edf

                        • \u5b58\u50a8\u6c60\u540d\u79f0\u3001\u9a71\u52a8\u3001\u56de\u6536\u7b56\u7565\u5728\u521b\u5efa\u540e\u4e0d\u53ef\u4fee\u6539\u3002
                        • CSI \u5b58\u50a8\u9a71\u52a8\uff1a\u57fa\u4e8e\u6807\u51c6 Kubernetes \u7684\u5bb9\u5668\u5b58\u50a8\u63a5\u53e3\u63d2\u4ef6\uff0c\u9700\u9075\u5b88\u5b58\u50a8\u5382\u5546\u89c4\u5b9a\u7684\u683c\u5f0f\uff0c\u4f8b\u5982 rancher.io/local-path \u3002

                          • \u6709\u5173\u5982\u4f55\u586b\u5199\u4e0d\u540c\u5382\u5546\u63d0\u4f9b\u7684 CSI \u9a71\u52a8\uff0c\u53ef\u53c2\u8003 Kubernetes \u5b98\u65b9\u6587\u6863\u5b58\u50a8\u7c7b\u3002
                            • \u56de\u6536\u7b56\u7565\uff1a\u5220\u9664\u6570\u636e\u5377\u65f6\uff0c\u4fdd\u7559\u6570\u636e\u5377\u4e2d\u7684\u6570\u636e\u6216\u8005\u5220\u9664\u5176\u4e2d\u7684\u6570\u636e\u3002
                            • \u5feb\u7167/\u6269\u5bb9\uff1a\u5f00\u542f\u540e\uff0c\u57fa\u4e8e\u8be5\u5b58\u50a8\u6c60\u7684\u6570\u636e\u5377/\u6570\u636e\u5377\u58f0\u660e\u624d\u80fd\u652f\u6301\u6269\u5bb9\u548c\u5feb\u7167\u529f\u80fd\uff0c\u4f46 \u524d\u63d0\u662f\u5e95\u5c42\u4f7f\u7528\u7684\u5b58\u50a8\u9a71\u52a8\u652f\u6301\u5feb\u7167\u548c\u6269\u5bb9\u529f\u80fd\u3002

                        HwameiStor \u5b58\u50a8\u7cfb\u7edf

                        • \u5b58\u50a8\u6c60\u540d\u79f0\u3001\u9a71\u52a8\u3001\u56de\u6536\u7b56\u7565\u5728\u521b\u5efa\u540e\u4e0d\u53ef\u4fee\u6539\u3002
                        • \u5b58\u50a8\u7cfb\u7edf\uff1aHwameiStor \u5b58\u50a8\u7cfb\u7edf\u3002
                        • \u5b58\u50a8\u7c7b\u578b\uff1a\u652f\u6301 LVM\uff0c\u88f8\u78c1\u76d8\u7c7b\u578b
                          • LVM \u7c7b\u578b \uff1aHwameiStor \u63a8\u8350\u4f7f\u7528\u6b64\u65b9\u5f0f\uff0c\u53ef\u4f7f\u7528\u9ad8\u53ef\u7528\u6570\u636e\u5377\uff0c\u5bf9\u5e94\u7684\u7684 CSI \u5b58\u50a8\u9a71\u52a8\u4e3a lvm.hwameistor.io\u3002
                          • \u88f8\u78c1\u76d8\u6570\u636e\u5377 \uff1a \u9002\u7528\u4e8e\u975e\u9ad8\u53ef\u7528\u573a\u666f\uff0c\u65e0\u9ad8\u53ef\u7528\u80fd\u529b\uff0c\u5bf9\u5e94\u7684 CSI \u9a71\u52a8\u4e3a hdd.hwameistor.io
                        • \u9ad8\u53ef\u7528\u6a21\u5f0f\uff1a\u4f7f\u7528\u9ad8\u53ef\u7528\u80fd\u529b\u4e4b\u524d\u8bf7\u786e\u8ba4 DRBD \u7ec4\u4ef6 \u5df2\u5b89\u88c5\u3002\u5f00\u542f\u9ad8\u53ef\u7528\u6a21\u5f0f\u540e\uff0c\u53ef\u5c06\u6570\u636e\u5377\u526f\u672c\u6570\u8bbe\u7f6e\u4e3a 1 \u548c 2\u3002 \u5982\u9700\u8981\u53ef\u5c06\u6570\u636e\u5377\u526f\u672c\u4ece 1 Convert \u6210 1
                        • \u56de\u6536\u7b56\u7565\uff1a\u5220\u9664\u6570\u636e\u5377\u65f6\uff0c\u4fdd\u7559\u6570\u636e\u5377\u4e2d\u7684\u6570\u636e\u6216\u8005\u5220\u9664\u5176\u4e2d\u7684\u6570\u636e\u3002
                        • \u5feb\u7167/\u6269\u5bb9\uff1a\u5f00\u542f\u540e\uff0c\u57fa\u4e8e\u8be5\u5b58\u50a8\u6c60\u7684\u6570\u636e\u5377/\u6570\u636e\u5377\u58f0\u660e\u624d\u80fd\u652f\u6301\u6269\u5bb9\u548c\u5feb\u7167\u529f\u80fd\uff0c\u4f46 \u524d\u63d0\u662f\u5e95\u5c42\u4f7f\u7528\u7684\u5b58\u50a8\u9a71\u52a8\u652f\u6301\u5feb\u7167\u548c\u6269\u5bb9\u529f\u80fd\u3002

                        Note

                        \u76ee\u524d HwameiStor xfs\u3001ext4 \u4e24\u79cd\u6587\u4ef6\u7cfb\u7edf\uff0c\u5176\u4e2d\u9ed8\u8ba4\u4f7f\u7528\u7684\u662f xfs \u6587\u4ef6\u7cfb\u7edf\uff0c\u5982\u679c\u60f3\u8981\u66ff\u6362\u4e3a ext4\uff0c\u53ef\u4ee5\u5728\u81ea\u5b9a\u4e49\u53c2\u6570\u6dfb\u52a0 csi.storage.k8s.io/fstype: ext4

                      "},{"location":"admin/kpanda/storage/sc.html#sc_2","title":"\u66f4\u65b0\u5b58\u50a8\u6c60(SC)","text":"

                      \u5728\u5b58\u50a8\u6c60\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u5b58\u50a8\u6c60\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u7f16\u8f91 \u5373\u53ef\u901a\u8fc7\u66f4\u65b0\u5b58\u50a8\u6c60\u3002

                      Info

                      \u9009\u62e9 \u67e5\u770b YAML \u53ef\u4ee5\u67e5\u770b\u8be5\u5b58\u50a8\u6c60\u7684 YAML \u6587\u4ef6\uff0c\u4f46\u4e0d\u652f\u6301\u7f16\u8f91\u3002

                      "},{"location":"admin/kpanda/storage/sc.html#sc_3","title":"\u5220\u9664\u5b58\u50a8\u6c60(SC)","text":"

                      \u5728\u5b58\u50a8\u6c60\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u5220\u9664\u7684\u5b58\u50a8\u6c60\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u5220\u9664 \u3002

                      "},{"location":"admin/kpanda/workloads/create-cronjob.html","title":"\u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\uff08CronJob\uff09","text":"

                      \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\uff08CronJob\uff09\u3002

                      \u5b9a\u65f6\u4efb\u52a1\uff08CronJob\uff09\u9002\u7528\u4e8e\u4e8e\u6267\u884c\u5468\u671f\u6027\u7684\u64cd\u4f5c\uff0c\u4f8b\u5982\u5907\u4efd\u3001\u62a5\u544a\u751f\u6210\u7b49\u3002\u8fd9\u4e9b\u4efb\u52a1\u53ef\u4ee5\u914d\u7f6e\u4e3a\u5468\u671f\u6027\u91cd\u590d\u7684\uff08\u4f8b\u5982\uff1a\u6bcf\u5929/\u6bcf\u5468/\u6bcf\u6708\u4e00\u6b21\uff09\uff0c\u53ef\u4ee5\u5b9a\u4e49\u4efb\u52a1\u5f00\u59cb\u6267\u884c\u7684\u65f6\u95f4\u95f4\u9694\u3002

                      "},{"location":"admin/kpanda/workloads/create-cronjob.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\uff08CronJob\uff09\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                      • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                      • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                      • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                      "},{"location":"admin/kpanda/workloads/create-cronjob.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                      \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u5b9a\u65f6\u4efb\u52a1\u3002

                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                      2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u5b9a\u65f6\u4efb\u52a1 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                      3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u5b9a\u65f6\u4efb\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                        \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u5b9a\u65f6\u4efb\u52a1 \u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u5b9a\u65f6\u4efb\u52a1\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u91cd\u542f\u7b49\u64cd\u4f5c\u3002

                      "},{"location":"admin/kpanda/workloads/create-cronjob.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"

                      \u5728 \u521b\u5efa\u5b9a\u65f6\u4efb\u52a1 \u9875\u9762\u4e2d\uff0c\u6839\u636e\u4e0b\u8868\u8f93\u5165\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                      • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                      • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u5b9a\u65f6\u4efb\u52a1\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                      • \u63cf\u8ff0\uff1a\u8f93\u5165\u5de5\u4f5c\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u91cf\u5e94\u4e0d\u8d85\u8fc7 512 \u4e2a\u3002
                      "},{"location":"admin/kpanda/workloads/create-cronjob.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                      \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                      \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                      \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                      \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                      • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                      • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                      • \u955c\u50cf\uff1a
                        • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u63a5\u5165\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                        • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                        • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                        • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                      • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                      • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                      • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                        • \u6574\u5361\u6a21\u5f0f\uff1a
                          • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                        • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                          • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                          • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                          • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                          • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                          • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                          • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                        • Mig \u6a21\u5f0f
                          • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                          • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                      \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                      \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                      \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                      \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                      \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                      \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                      "},{"location":"admin/kpanda/workloads/create-cronjob.html#_5","title":"\u5b9a\u65f6\u4efb\u52a1\u914d\u7f6e","text":"
                      • \u5e76\u53d1\u7b56\u7565\uff1a\u662f\u5426\u5141\u8bb8\u591a\u4e2a Job \u4efb\u52a1\u5e76\u884c\u6267\u884c\u3002

                        • Allow \uff1a\u53ef\u4ee5\u5728\u524d\u4e00\u4e2a\u4efb\u52a1\u672a\u5b8c\u6210\u65f6\u5c31\u521b\u5efa\u65b0\u7684\u5b9a\u65f6\u4efb\u52a1\uff0c\u800c\u4e14\u591a\u4e2a\u4efb\u52a1\u53ef\u4ee5\u5e76\u884c\u3002\u4efb\u52a1\u592a\u591a\u53ef\u80fd\u62a2\u5360\u96c6\u7fa4\u8d44\u6e90\u3002
                        • Forbid \uff1a\u5728\u524d\u4e00\u4e2a\u4efb\u52a1\u5b8c\u6210\u4e4b\u524d\uff0c\u4e0d\u80fd\u521b\u5efa\u65b0\u4efb\u52a1\uff0c\u5982\u679c\u65b0\u4efb\u52a1\u7684\u6267\u884c\u65f6\u95f4\u5230\u4e86\u800c\u4e4b\u524d\u7684\u4efb\u52a1\u4ecd\u672a\u6267\u884c\u5b8c\uff0cCronJob \u4f1a\u5ffd\u7565\u65b0\u4efb\u52a1\u7684\u6267\u884c\u3002
                        • Replace \uff1a\u5982\u679c\u65b0\u4efb\u52a1\u7684\u6267\u884c\u65f6\u95f4\u5230\u4e86\uff0c\u4f46\u524d\u4e00\u4e2a\u4efb\u52a1\u8fd8\u672a\u5b8c\u6210\uff0c\u65b0\u7684\u4efb\u52a1\u4f1a\u53d6\u4ee3\u524d\u4e00\u4e2a\u4efb\u52a1\u3002

                        \u4e0a\u8ff0\u89c4\u5219\u4ec5\u9002\u7528\u4e8e\u540c\u4e00\u4e2a CronJob \u521b\u5efa\u7684\u591a\u4e2a\u4efb\u52a1\u3002\u591a\u4e2a CronJob \u521b\u5efa\u7684\u591a\u4e2a\u4efb\u52a1\u603b\u662f\u5141\u8bb8\u5e76\u53d1\u6267\u884c\u3002

                      • \u5b9a\u65f6\u89c4\u5219\uff1a\u57fa\u4e8e\u5206\u949f\u3001\u5c0f\u65f6\u3001\u5929\u3001\u5468\u3001\u6708\u8bbe\u7f6e\u4efb\u52a1\u6267\u884c\u7684\u65f6\u95f4\u5468\u671f\u3002\u652f\u6301\u7528\u6570\u5b57\u548c * \u81ea\u5b9a\u4e49 Cron \u8868\u8fbe\u5f0f\uff0c\u8f93\u5165\u8868\u8fbe\u5f0f\u540e\u4e0b\u65b9\u4f1a\u63d0\u793a\u5f53\u524d\u8868\u8fbe\u5f0f\u7684\u542b\u4e49\u3002\u6709\u5173\u8be6\u7ec6\u7684\u8868\u8fbe\u5f0f\u8bed\u6cd5\u89c4\u5219\uff0c\u53ef\u53c2\u8003 Cron \u65f6\u95f4\u8868\u8bed\u6cd5\u3002

                      • \u4efb\u52a1\u8bb0\u5f55\uff1a\u8bbe\u5b9a\u4fdd\u7559\u591a\u5c11\u6761\u4efb\u52a1\u6267\u884c\u6210\u529f\u6216\u5931\u8d25\u7684\u8bb0\u5f55\u3002 0 \u8868\u793a\u4e0d\u4fdd\u7559\u3002
                      • \u8d85\u65f6\u65f6\u95f4\uff1a\u8d85\u51fa\u8be5\u65f6\u95f4\u65f6\uff0c\u4efb\u52a1\u5c31\u4f1a\u88ab\u6807\u8bc6\u4e3a\u6267\u884c\u5931\u8d25\uff0c\u4efb\u52a1\u4e0b\u7684\u6240\u6709 Pod \u90fd\u4f1a\u88ab\u5220\u9664\u3002\u4e3a\u7a7a\u65f6\u8868\u793a\u4e0d\u8bbe\u7f6e\u8d85\u65f6\u65f6\u95f4\u3002\u9ed8\u8ba4\u503c\u4e3a 360 s\u3002
                      • \u91cd\u8bd5\u6b21\u6570\uff1a\u4efb\u52a1\u53ef\u91cd\u8bd5\u6b21\u6570\uff0c\u9ed8\u8ba4\u503c\u4e3a 6\u3002
                      • \u91cd\u542f\u7b56\u7565\uff1a\u8bbe\u7f6e\u4efb\u52a1\u5931\u8d25\u65f6\u662f\u5426\u91cd\u542f Pod\u3002
                      "},{"location":"admin/kpanda/workloads/create-cronjob.html#_6","title":"\u670d\u52a1\u914d\u7f6e","text":"

                      \u4e3a\u6709\u72b6\u6001\u8d1f\u8f7d\u914d\u7f6e\u670d\u52a1\uff08Service\uff09\uff0c\u4f7f\u6709\u72b6\u6001\u8d1f\u8f7d\u80fd\u591f\u88ab\u5916\u90e8\u8bbf\u95ee\u3002

                      1. \u70b9\u51fb \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                      2. \u53c2\u8003\u521b\u5efa\u670d\u52a1\uff0c\u914d\u7f6e\u670d\u52a1\u53c2\u6570\u3002

                      3. \u70b9\u51fb \u786e\u5b9a \uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                      "},{"location":"admin/kpanda/workloads/create-cronjob.html#_7","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                      \u5b9a\u65f6\u4efb\u52a1\u7684\u9ad8\u7ea7\u914d\u7f6e\u4e3b\u8981\u6d89\u53ca\u6807\u7b7e\u4e0e\u6ce8\u89e3\u3002

                      \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u4f8b Pod \u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                      "},{"location":"admin/kpanda/workloads/create-cronjob.html#yaml","title":"YAML \u521b\u5efa","text":"

                      \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\u3002

                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                      2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u5b9a\u65f6\u4efb\u52a1 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                      3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                      \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\u7684 YAML \u793a\u4f8b
                      apiVersion: batch/v1\nkind: CronJob\nmetadata:\n  creationTimestamp: '2022-12-26T09:45:47Z'\n  generation: 1\n  name: demo\n  namespace: default\n  resourceVersion: '92726617'\n  uid: d030d8d7-a405-4dcd-b09a-176942ef36c9\nspec:\n  concurrencyPolicy: Allow\n  failedJobsHistoryLimit: 1\n  jobTemplate:\n    metadata:\n      creationTimestamp: null\n    spec:\n      activeDeadlineSeconds: 360\n      backoffLimit: 6\n      template:\n        metadata:\n          creationTimestamp: null\n        spec:\n          containers:\n            - image: nginx\n              imagePullPolicy: IfNotPresent\n              lifecycle: {}\n              name: container-3\n              resources:\n                limits:\n                  cpu: 250m\n                  memory: 512Mi\n                requests:\n                  cpu: 250m\n                  memory: 512Mi\n              securityContext:\n                privileged: false\n              terminationMessagePath: /dev/termination-log\n              terminationMessagePolicy: File\n          dnsPolicy: ClusterFirst\n          restartPolicy: Never\n          schedulerName: default-scheduler\n          securityContext: {}\n          terminationGracePeriodSeconds: 30\n  schedule: 0 0 13 * 5\n  successfulJobsHistoryLimit: 3\n  suspend: false\nstatus: {}\n
                      "},{"location":"admin/kpanda/workloads/create-daemonset.html","title":"\u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b(DaemonSet)","text":"

                      \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b\uff08DaemonSet\uff09\u3002

                      \u5b88\u62a4\u8fdb\u7a0b\uff08DaemonSet\uff09\u901a\u8fc7\u8282\u70b9\u4eb2\u548c\u6027\u4e0e\u6c61\u70b9\u529f\u80fd\u786e\u4fdd\u5728\u5168\u90e8\u6216\u90e8\u5206\u8282\u70b9\u4e0a\u8fd0\u884c\u4e00\u4e2a Pod \u7684\u526f\u672c\u3002\u5bf9\u4e8e\u65b0\u52a0\u5165\u96c6\u7fa4\u7684\u8282\u70b9\uff0cDaemonSet \u81ea\u52a8\u5728\u65b0\u8282\u70b9\u4e0a\u90e8\u7f72\u76f8\u5e94\u7684 Pod\uff0c\u5e76\u8ddf\u8e2a Pod \u7684\u8fd0\u884c\u72b6\u6001\u3002\u5f53\u8282\u70b9\u88ab\u79fb\u9664\u65f6\uff0cDaemonSet \u5219\u5220\u9664\u5176\u521b\u5efa\u7684\u6240\u6709 Pod\u3002

                      \u5b88\u62a4\u8fdb\u7a0b\u7684\u5e38\u89c1\u7528\u4f8b\u5305\u62ec\uff1a

                      • \u5728\u6bcf\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u96c6\u7fa4\u5b88\u62a4\u8fdb\u7a0b\u3002

                      • \u5728\u6bcf\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u65e5\u5fd7\u6536\u96c6\u5b88\u62a4\u8fdb\u7a0b\u3002

                      • \u5728\u6bcf\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u76d1\u63a7\u5b88\u62a4\u8fdb\u7a0b\u3002

                      \u7b80\u5355\u8d77\u89c1\uff0c\u53ef\u4ee5\u5728\u6bcf\u4e2a\u8282\u70b9\u4e0a\u4e3a\u6bcf\u79cd\u7c7b\u578b\u7684\u5b88\u62a4\u8fdb\u7a0b\u90fd\u542f\u52a8\u4e00\u4e2a DaemonSet\u3002\u5982\u9700\u66f4\u7cbe\u7ec6\u3001\u66f4\u9ad8\u7ea7\u5730\u7ba1\u7406\u5b88\u62a4\u8fdb\u7a0b\uff0c\u4e5f\u53ef\u4ee5\u4e3a\u540c\u4e00\u79cd\u5b88\u62a4\u8fdb\u7a0b\u90e8\u7f72\u591a\u4e2a DaemonSet\u3002\u6bcf\u4e2a DaemonSet \u5177\u6709\u4e0d\u540c\u7684\u6807\u5fd7\uff0c\u5e76\u4e14\u5bf9\u4e0d\u540c\u786c\u4ef6\u7c7b\u578b\u5177\u6709\u4e0d\u540c\u7684\u5185\u5b58\u3001CPU \u8981\u6c42\u3002

                      "},{"location":"admin/kpanda/workloads/create-daemonset.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u521b\u5efa DaemonSet \u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                      • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                      • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                      • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                      "},{"location":"admin/kpanda/workloads/create-daemonset.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                      \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u5b88\u62a4\u8fdb\u7a0b\u3002

                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                      2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u5b88\u62a4\u8fdb\u7a0b \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                      3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                        \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u5b88\u62a4\u8fdb\u7a0b \u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u5b88\u62a4\u8fdb\u7a0b\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u91cd\u542f\u7b49\u64cd\u4f5c\u3002

                      "},{"location":"admin/kpanda/workloads/create-daemonset.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"

                      \u5728 \u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b \u9875\u9762\u4e2d\uff0c\u6839\u636e\u4e0b\u8868\u8f93\u5165\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                      • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                      • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u5b88\u62a4\u8fdb\u7a0b\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                      • \u63cf\u8ff0\uff1a\u8f93\u5165\u5de5\u4f5c\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u91cf\u5e94\u4e0d\u8d85\u8fc7 512 \u4e2a\u3002
                      "},{"location":"admin/kpanda/workloads/create-daemonset.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                      \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                      \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                      \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                      \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                      • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                      • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                      • \u955c\u50cf\uff1a
                        • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u63a5\u5165\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                        • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                        • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                        • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                      • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                      • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                      • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                        • \u6574\u5361\u6a21\u5f0f\uff1a
                          • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                        • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                          • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                          • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                          • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                          • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                          • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                          • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                        • Mig \u6a21\u5f0f
                          • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                          • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                      \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                      \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                      \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                      \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                      \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                      \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                      "},{"location":"admin/kpanda/workloads/create-daemonset.html#_5","title":"\u670d\u52a1\u914d\u7f6e","text":"

                      \u4e3a\u5b88\u62a4\u8fdb\u7a0b\u521b\u5efa\u670d\u52a1\uff08Service\uff09\uff0c\u4f7f\u5b88\u62a4\u8fdb\u7a0b\u80fd\u591f\u88ab\u5916\u90e8\u8bbf\u95ee\u3002

                      1. \u70b9\u51fb \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                      2. \u914d\u7f6e\u670d\u52a1\u53c2\u6570\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\u521b\u5efa\u670d\u52a1\u3002

                      3. \u70b9\u51fb \u786e\u5b9a \uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                      "},{"location":"admin/kpanda/workloads/create-daemonset.html#_6","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                      \u9ad8\u7ea7\u914d\u7f6e\u5305\u62ec\u8d1f\u8f7d\u7684\u7f51\u7edc\u914d\u7f6e\u3001\u5347\u7ea7\u7b56\u7565\u3001\u8c03\u5ea6\u7b56\u7565\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u56db\u90e8\u5206\uff0c\u53ef\u70b9\u51fb\u4e0b\u65b9\u7684\u9875\u7b7e\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                      \u7f51\u7edc\u914d\u7f6e\u5347\u7ea7\u7b56\u7565\u8c03\u5ea6\u7b56\u7565\u6807\u7b7e\u4e0e\u6ce8\u89e3

                      \u5e94\u7528\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u4f1a\u51fa\u73b0\u5197\u4f59\u7684 DNS \u67e5\u8be2\u3002Kubernetes \u4e3a\u5e94\u7528\u63d0\u4f9b\u4e86\u4e0e DNS \u76f8\u5173\u7684\u914d\u7f6e\u9009\u9879\uff0c\u80fd\u591f\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u6709\u6548\u5730\u51cf\u5c11\u5197\u4f59\u7684 DNS \u67e5\u8be2\uff0c\u63d0\u5347\u4e1a\u52a1\u5e76\u53d1\u91cf\u3002

                      • DNS \u7b56\u7565

                        • Default\uff1a\u4f7f\u5bb9\u5668\u4f7f\u7528 kubelet \u7684 --resolv-conf \u53c2\u6570\u6307\u5411\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u3002\u8be5\u914d\u7f6e\u53ea\u80fd\u89e3\u6790\u6ce8\u518c\u5230\u4e92\u8054\u7f51\u4e0a\u7684\u5916\u90e8\u57df\u540d\uff0c\u65e0\u6cd5\u89e3\u6790\u96c6\u7fa4\u5185\u90e8\u57df\u540d\uff0c\u4e14\u4e0d\u5b58\u5728\u65e0\u6548\u7684 DNS \u67e5\u8be2\u3002
                        • ClusterFirstWithHostNet\uff1a\u5e94\u7528\u5bf9\u63a5\u4e3b\u673a\u7684\u57df\u540d\u6587\u4ef6\u3002
                        • ClusterFirst\uff1a\u5e94\u7528\u5bf9\u63a5 Kube-DNS/CoreDNS\u3002
                        • None\uff1aKubernetes v1.9\uff08Beta in v1.10\uff09\u4e2d\u5f15\u5165\u7684\u65b0\u9009\u9879\u503c\u3002\u8bbe\u7f6e\u4e3a None \u4e4b\u540e\uff0c\u5fc5\u987b\u8bbe\u7f6e dnsConfig\uff0c\u6b64\u65f6\u5bb9\u5668\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u5c06\u5b8c\u5168\u901a\u8fc7 dnsConfig \u7684\u914d\u7f6e\u6765\u751f\u6210\u3002
                      • \u57df\u540d\u670d\u52a1\u5668\uff1a\u586b\u5199\u57df\u540d\u670d\u52a1\u5668\u7684\u5730\u5740\uff0c\u4f8b\u5982 10.6.175.20 \u3002

                      • \u641c\u7d22\u57df\uff1a\u57df\u540d\u67e5\u8be2\u65f6\u7684 DNS \u641c\u7d22\u57df\u5217\u8868\u3002\u6307\u5b9a\u540e\uff0c\u63d0\u4f9b\u7684\u641c\u7d22\u57df\u5217\u8868\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 search \u5b57\u6bb5\u4e2d\uff0c\u5e76\u5220\u9664\u91cd\u590d\u7684\u57df\u540d\u3002Kubernetes \u6700\u591a\u5141\u8bb8 6 \u4e2a\u641c\u7d22\u57df\u3002
                      • Options\uff1aDNS \u7684\u914d\u7f6e\u9009\u9879\uff0c\u5176\u4e2d\u6bcf\u4e2a\u5bf9\u8c61\u53ef\u4ee5\u5177\u6709 name \u5c5e\u6027\uff08\u5fc5\u9700\uff09\u548c value \u5c5e\u6027\uff08\u53ef\u9009\uff09\u3002\u8be5\u5b57\u6bb5\u4e2d\u7684\u5185\u5bb9\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 options \u5b57\u6bb5\u4e2d\uff0cdnsConfig \u7684 options \u7684\u67d0\u4e9b\u9009\u9879\u5982\u679c\u4e0e\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684\u9009\u9879\u51b2\u7a81\uff0c\u5219\u4f1a\u88ab dnsConfig \u6240\u8986\u76d6\u3002
                      • \u4e3b\u673a\u522b\u540d\uff1a\u4e3a\u4e3b\u673a\u8bbe\u7f6e\u7684\u522b\u540d\u3002

                      • \u5347\u7ea7\u65b9\u5f0f\uff1a \u6eda\u52a8\u5347\u7ea7 \u6307\u9010\u6b65\u7528\u65b0\u7248\u672c\u7684\u5b9e\u4f8b\u66ff\u6362\u65e7\u7248\u672c\u7684\u5b9e\u4f8b\uff0c\u5347\u7ea7\u7684\u8fc7\u7a0b\u4e2d\uff0c\u4e1a\u52a1\u6d41\u91cf\u4f1a\u540c\u65f6\u8d1f\u8f7d\u5747\u8861\u5206\u5e03\u5230\u65b0\u8001\u7684\u5b9e\u4f8b\u4e0a\uff0c\u56e0\u6b64\u4e1a\u52a1\u4e0d\u4f1a\u4e2d\u65ad\u3002 \u91cd\u5efa\u5347\u7ea7 \u6307\u5148\u5220\u9664\u8001\u7248\u672c\u7684\u8d1f\u8f7d\u5b9e\u4f8b\uff0c\u518d\u5b89\u88c5\u6307\u5b9a\u7684\u65b0\u7248\u672c\uff0c\u5347\u7ea7\u8fc7\u7a0b\u4e2d\u4e1a\u52a1\u4f1a\u4e2d\u65ad\u3002
                      • \u6700\u5927\u65e0\u6548 Pod \u6570\uff1a\u6307\u5b9a\u8d1f\u8f7d\u66f4\u65b0\u8fc7\u7a0b\u4e2d\u4e0d\u53ef\u7528 Pod \u7684\u6700\u5927\u503c\u6216\u6bd4\u7387\uff0c\u9ed8\u8ba4 25%\u3002\u5982\u679c\u7b49\u4e8e\u5b9e\u4f8b\u6570\u6709\u670d\u52a1\u4e2d\u65ad\u7684\u98ce\u9669\u3002
                      • \u6700\u5927\u6d6a\u6d8c\uff1a\u66f4\u65b0 Pod \u7684\u8fc7\u7a0b\u4e2d Pod \u603b\u6570\u8d85\u8fc7 Pod \u671f\u671b\u526f\u672c\u6570\u90e8\u5206\u7684\u6700\u5927\u503c\u6216\u6bd4\u7387\u3002\u9ed8\u8ba4 25%\u3002
                      • \u6700\u5927\u4fdd\u7559\u7248\u672c\u6570\uff1a\u8bbe\u7f6e\u7248\u672c\u56de\u6eda\u65f6\u4fdd\u7559\u7684\u65e7\u7248\u672c\u6570\u91cf\u3002\u9ed8\u8ba4 10\u3002
                      • Pod \u53ef\u7528\u6700\u77ed\u65f6\u95f4\uff1aPod \u5c31\u7eea\u7684\u6700\u77ed\u65f6\u95f4\uff0c\u53ea\u6709\u8d85\u51fa\u8fd9\u4e2a\u65f6\u95f4 Pod \u624d\u88ab\u8ba4\u4e3a\u53ef\u7528\uff0c\u9ed8\u8ba4 0 \u79d2\u3002
                      • \u5347\u7ea7\u6700\u5927\u6301\u7eed\u65f6\u95f4\uff1a\u5982\u679c\u8d85\u8fc7\u6240\u8bbe\u7f6e\u7684\u65f6\u95f4\u4ecd\u672a\u90e8\u7f72\u6210\u529f\uff0c\u5219\u5c06\u8be5\u8d1f\u8f7d\u6807\u8bb0\u4e3a\u5931\u8d25\u3002\u9ed8\u8ba4 600 \u79d2\u3002
                      • \u7f29\u5bb9\u65f6\u95f4\u7a97\uff1a\u8d1f\u8f7d\u505c\u6b62\u524d\u547d\u4ee4\u7684\u6267\u884c\u65f6\u95f4\u7a97\uff080-9,999\u79d2\uff09\uff0c\u9ed8\u8ba4 30 \u79d2\u3002

                      • \u5bb9\u5fcd\u65f6\u95f4\uff1a\u8d1f\u8f7d\u5b9e\u4f8b\u6240\u5728\u7684\u8282\u70b9\u4e0d\u53ef\u7528\u65f6\uff0c\u5c06\u8d1f\u8f7d\u5b9e\u4f8b\u91cd\u65b0\u8c03\u5ea6\u5230\u5176\u5b83\u53ef\u7528\u8282\u70b9\u7684\u65f6\u95f4\uff0c\u9ed8\u8ba4\u4e3a 300 \u79d2\u3002
                      • \u8282\u70b9\u4eb2\u548c\u6027\uff1a\u6839\u636e\u8282\u70b9\u4e0a\u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002
                      • \u5de5\u4f5c\u8d1f\u8f7d\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                      • \u5de5\u4f5c\u8d1f\u8f7d\u53cd\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u4e0d\u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                      • \u62d3\u6251\u57df\uff1a\u5373 topologyKey\uff0c\u7528\u4e8e\u6307\u5b9a\u53ef\u4ee5\u8c03\u5ea6\u7684\u4e00\u7ec4\u8282\u70b9\u3002\u4f8b\u5982\uff0c kubernetes.io/os \u8868\u793a\u53ea\u8981\u67d0\u4e2a\u64cd\u4f5c\u7cfb\u7edf\u7684\u8282\u70b9\u6ee1\u8db3 labelSelector \u7684\u6761\u4ef6\u5c31\u53ef\u4ee5\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002

                      \u5177\u4f53\u8be6\u60c5\u8bf7\u53c2\u8003\u8c03\u5ea6\u7b56\u7565\u3002

                      \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u548c\u5bb9\u5668\u7ec4\u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                      "},{"location":"admin/kpanda/workloads/create-daemonset.html#yaml","title":"YAML \u521b\u5efa","text":"

                      \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b\u3002

                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                      2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u5b88\u62a4\u8fdb\u7a0b \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                      3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                      \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b\u7684 YAML \u793a\u4f8b
                      kind: DaemonSet\napiVersion: apps/v1\nmetadata:\n  name: hwameistor-local-disk-manager\n  namespace: hwameistor\n  uid: ccbdc098-7de3-4a8a-96dd-d1cee159c92b\n  resourceVersion: '90999552'\n  generation: 1\n  creationTimestamp: '2022-12-15T09:03:44Z'\n  labels:\n    app.kubernetes.io/managed-by: Helm\n  annotations:\n    deprecated.daemonset.template.generation: '1'\n    meta.helm.sh/release-name: hwameistor\n    meta.helm.sh/release-namespace: hwameistor\nspec:\n  selector:\n    matchLabels:\n      app: hwameistor-local-disk-manager\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: hwameistor-local-disk-manager\n    spec:\n      volumes:\n        - name: udev\n          hostPath:\n            path: /run/udev\n            type: Directory\n        - name: procmount\n          hostPath:\n            path: /proc\n            type: Directory\n        - name: devmount\n          hostPath:\n            path: /dev\n            type: Directory\n        - name: socket-dir\n          hostPath:\n            path: /var/lib/kubelet/plugins/disk.hwameistor.io\n            type: DirectoryOrCreate\n        - name: registration-dir\n          hostPath:\n            path: /var/lib/kubelet/plugins_registry/\n            type: Directory\n        - name: plugin-dir\n          hostPath:\n            path: /var/lib/kubelet/plugins\n            type: DirectoryOrCreate\n        - name: pods-mount-dir\n          hostPath:\n            path: /var/lib/kubelet/pods\n            type: DirectoryOrCreate\n      containers:\n        - name: registrar\n          image: k8s-gcr.m.daocloud.io/sig-storage/csi-node-driver-registrar:v2.5.0\n          args:\n            - '--v=5'\n            - '--csi-address=/csi/csi.sock'\n            - >-\n              --kubelet-registration-path=/var/lib/kubelet/plugins/disk.hwameistor.io/csi.sock\n          env:\n            - name: KUBE_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: spec.nodeName\n          resources: {}\n          volumeMounts:\n            - name: socket-dir\n              mountPath: /csi\n            - name: registration-dir\n              mountPath: /registration\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /bin/sh\n                  - '-c'\n                  - >-\n                    rm -rf /registration/disk.hwameistor.io \n                    /registration/disk.hwameistor.io-reg.sock\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: manager\n          image: ghcr.m.daocloud.io/hwameistor/local-disk-manager:v0.6.1\n          command:\n            - /local-disk-manager\n          args:\n            - '--endpoint=$(CSI_ENDPOINT)'\n            - '--nodeid=$(NODENAME)'\n            - '--csi-enable=true'\n          env:\n            - name: CSI_ENDPOINT\n              value: unix://var/lib/kubelet/plugins/disk.hwameistor.io/csi.sock\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: WATCH_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: NODENAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: spec.nodeName\n            - name: OPERATOR_NAME\n              value: local-disk-manager\n          resources: {}\n          volumeMounts:\n            - name: udev\n              mountPath: /run/udev\n            - name: procmount\n              readOnly: true\n              mountPath: /host/proc\n            - name: devmount\n              mountPath: /dev\n            - name: registration-dir\n              mountPath: /var/lib/kubelet/plugins_registry\n            - name: plugin-dir\n              mountPath: /var/lib/kubelet/plugins\n              mountPropagation: Bidirectional\n            - name: pods-mount-dir\n              mountPath: /var/lib/kubelet/pods\n              mountPropagation: Bidirectional\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            privileged: true\n      restartPolicy: Always\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      serviceAccountName: hwameistor-admin\n      serviceAccount: hwameistor-admin\n      hostNetwork: true\n      hostPID: true\n      securityContext: {}\n      schedulerName: default-scheduler\n      tolerations:\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - key: node.kubernetes.io/not-ready\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/master\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/control-plane\n          operator: Exists\n          effect: NoSchedule\n        - key: node.cloudprovider.kubernetes.io/uninitialized\n          operator: Exists\n          effect: NoSchedule\n  updateStrategy:\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n      maxSurge: 0\n  revisionHistoryLimit: 10\nstatus:\n  currentNumberScheduled: 4\n  numberMisscheduled: 0\n  desiredNumberScheduled: 4\n  numberReady: 4\n  observedGeneration: 1\n  updatedNumberScheduled: 4\n  numberAvailable: 4\n
                      "},{"location":"admin/kpanda/workloads/create-deployment.html","title":"\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\uff08Deployment\uff09","text":"

                      \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\u3002

                      \u65e0\u72b6\u6001\u8d1f\u8f7d\uff08Deployment\uff09\u662f Kubernetes \u4e2d\u7684\u4e00\u79cd\u5e38\u89c1\u8d44\u6e90\uff0c\u4e3b\u8981\u4e3a Pod \u548c ReplicaSet \u63d0\u4f9b\u58f0\u660e\u5f0f\u66f4\u65b0\uff0c\u652f\u6301\u5f39\u6027\u4f38\u7f29\u3001\u6eda\u52a8\u5347\u7ea7\u3001\u7248\u672c\u56de\u9000\u7b49\u529f\u80fd\u3002\u5728 Deployment \u4e2d\u58f0\u660e\u671f\u671b\u7684 Pod \u72b6\u6001\uff0cDeployment Controller \u4f1a\u901a\u8fc7 ReplicaSet \u4fee\u6539\u5f53\u524d\u72b6\u6001\uff0c\u4f7f\u5176\u8fbe\u5230\u9884\u5148\u58f0\u660e\u7684\u671f\u671b\u72b6\u6001\u3002Deployment \u662f\u65e0\u72b6\u6001\u7684\uff0c\u4e0d\u652f\u6301\u6570\u636e\u6301\u4e45\u5316\uff0c\u9002\u7528\u4e8e\u90e8\u7f72\u65e0\u72b6\u6001\u7684\u3001\u4e0d\u9700\u8981\u4fdd\u5b58\u6570\u636e\u3001\u968f\u65f6\u53ef\u4ee5\u91cd\u542f\u56de\u6eda\u7684\u5e94\u7528\u3002

                      \u901a\u8fc7\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\uff0c\u53ef\u4ee5\u57fa\u4e8e\u76f8\u5e94\u7684\u89d2\u8272\u6743\u9650\u8f7b\u677e\u7ba1\u7406\u591a\u4e91\u591a\u96c6\u7fa4\u4e0a\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5305\u62ec\u5bf9\u65e0\u72b6\u6001\u8d1f\u8f7d\u7684\u521b\u5efa\u3001\u66f4\u65b0\u3001\u5220\u9664\u3001\u5f39\u6027\u6269\u7f29\u3001\u91cd\u542f\u3001\u7248\u672c\u56de\u9000\u7b49\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406\u3002

                      "},{"location":"admin/kpanda/workloads/create-deployment.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u5728\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                      • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                      • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                      • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                      "},{"location":"admin/kpanda/workloads/create-deployment.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                      \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u65e0\u72b6\u6001\u8d1f\u8f7d\u3002

                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                      2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                      3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                        \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u65e0\u72b6\u6001\u8d1f\u8f7d \u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u8d1f\u8f7d\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u5f39\u6027\u6269\u7f29\u3001\u91cd\u542f\u3001\u7248\u672c\u56de\u9000\u7b49\u64cd\u4f5c\u3002\u5982\u679c\u8d1f\u8f7d\u72b6\u6001\u51fa\u73b0\u5f02\u5e38\uff0c\u8bf7\u67e5\u770b\u5177\u4f53\u5f02\u5e38\u4fe1\u606f\uff0c\u53ef\u53c2\u8003\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u3002

                      "},{"location":"admin/kpanda/workloads/create-deployment.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"
                      • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 deployment-01\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                      • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u8d1f\u8f7d\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                      • \u5b9e\u4f8b\u6570\uff1a\u8f93\u5165\u8d1f\u8f7d\u7684 Pod \u5b9e\u4f8b\u6570\u91cf\uff0c\u9ed8\u8ba4\u521b\u5efa 1 \u4e2a Pod \u5b9e\u4f8b\u3002
                      • \u63cf\u8ff0\uff1a\u8f93\u5165\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u4e0d\u8d85\u8fc7 512\u3002

                      "},{"location":"admin/kpanda/workloads/create-deployment.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                      \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                      \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                      \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                      \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                      • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                      • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                      • \u955c\u50cf\uff1a
                        • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u5b89\u88c5\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                        • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                        • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                        • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                      • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                      • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                      • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                        • \u6574\u5361\u6a21\u5f0f\uff1a
                          • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                        • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                          • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                          • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                          • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                          • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                          • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                          • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                        • Mig \u6a21\u5f0f
                          • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                          • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                      \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                      \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                      \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                      \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                      \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                      \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                      "},{"location":"admin/kpanda/workloads/create-deployment.html#_5","title":"\u670d\u52a1\u914d\u7f6e","text":"

                      \u4e3a\u65e0\u72b6\u6001\u8d1f\u8f7d\u914d\u7f6e\u670d\u52a1\uff08Service\uff09\uff0c\u4f7f\u65e0\u72b6\u6001\u8d1f\u8f7d\u80fd\u591f\u88ab\u5916\u90e8\u8bbf\u95ee\u3002

                      1. \u70b9\u51fb \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                      2. \u53c2\u8003\u521b\u5efa\u670d\u52a1\uff0c\u914d\u7f6e\u670d\u52a1\u53c2\u6570\u3002

                      3. \u70b9\u51fb \u786e\u5b9a \uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                      "},{"location":"admin/kpanda/workloads/create-deployment.html#_6","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                      \u9ad8\u7ea7\u914d\u7f6e\u5305\u62ec\u8d1f\u8f7d\u7684\u7f51\u7edc\u914d\u7f6e\u3001\u5347\u7ea7\u7b56\u7565\u3001\u8c03\u5ea6\u7b56\u7565\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u56db\u90e8\u5206\uff0c\u53ef\u70b9\u51fb\u4e0b\u65b9\u7684\u9875\u7b7e\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                      \u7f51\u7edc\u914d\u7f6e\u5347\u7ea7\u7b56\u7565\u8c03\u5ea6\u7b56\u7565\u6807\u7b7e\u4e0e\u6ce8\u89e3
                      • \u5982\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72\u4e86 SpiderPool \u548c Multus \u7ec4\u4ef6\uff0c\u5219\u53ef\u4ee5\u5728\u7f51\u7edc\u914d\u7f6e\u4e2d\u914d\u7f6e\u5bb9\u5668\u7f51\u5361\u3002

                      • DNS \u914d\u7f6e\uff1a\u5e94\u7528\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u4f1a\u51fa\u73b0\u5197\u4f59\u7684 DNS \u67e5\u8be2\u3002Kubernetes \u4e3a\u5e94\u7528\u63d0\u4f9b\u4e86\u4e0e DNS \u76f8\u5173\u7684\u914d\u7f6e\u9009\u9879\uff0c\u80fd\u591f\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u6709\u6548\u5730\u51cf\u5c11\u5197\u4f59\u7684 DNS \u67e5\u8be2\uff0c\u63d0\u5347\u4e1a\u52a1\u5e76\u53d1\u91cf\u3002

                      • DNS \u7b56\u7565

                        • Default\uff1a\u4f7f\u5bb9\u5668\u4f7f\u7528 kubelet \u7684 --resolv-conf \u53c2\u6570\u6307\u5411\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u3002\u8be5\u914d\u7f6e\u53ea\u80fd\u89e3\u6790\u6ce8\u518c\u5230\u4e92\u8054\u7f51\u4e0a\u7684\u5916\u90e8\u57df\u540d\uff0c\u65e0\u6cd5\u89e3\u6790\u96c6\u7fa4\u5185\u90e8\u57df\u540d\uff0c\u4e14\u4e0d\u5b58\u5728\u65e0\u6548\u7684 DNS \u67e5\u8be2\u3002
                        • ClusterFirstWithHostNet\uff1a\u5e94\u7528\u5bf9\u63a5\u4e3b\u673a\u7684\u57df\u540d\u6587\u4ef6\u3002
                        • ClusterFirst\uff1a\u5e94\u7528\u5bf9\u63a5 Kube-DNS/CoreDNS\u3002
                        • None\uff1aKubernetes v1.9\uff08Beta in v1.10\uff09\u4e2d\u5f15\u5165\u7684\u65b0\u9009\u9879\u503c\u3002\u8bbe\u7f6e\u4e3a None \u4e4b\u540e\uff0c\u5fc5\u987b\u8bbe\u7f6e dnsConfig\uff0c\u6b64\u65f6\u5bb9\u5668\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u5c06\u5b8c\u5168\u901a\u8fc7 dnsConfig \u7684\u914d\u7f6e\u6765\u751f\u6210\u3002
                      • \u57df\u540d\u670d\u52a1\u5668\uff1a\u586b\u5199\u57df\u540d\u670d\u52a1\u5668\u7684\u5730\u5740\uff0c\u4f8b\u5982 10.6.175.20 \u3002

                      • \u641c\u7d22\u57df\uff1a\u57df\u540d\u67e5\u8be2\u65f6\u7684 DNS \u641c\u7d22\u57df\u5217\u8868\u3002\u6307\u5b9a\u540e\uff0c\u63d0\u4f9b\u7684\u641c\u7d22\u57df\u5217\u8868\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 search \u5b57\u6bb5\u4e2d\uff0c\u5e76\u5220\u9664\u91cd\u590d\u7684\u57df\u540d\u3002Kubernetes \u6700\u591a\u5141\u8bb8 6 \u4e2a\u641c\u7d22\u57df\u3002
                      • Options\uff1aDNS \u7684\u914d\u7f6e\u9009\u9879\uff0c\u5176\u4e2d\u6bcf\u4e2a\u5bf9\u8c61\u53ef\u4ee5\u5177\u6709 name \u5c5e\u6027\uff08\u5fc5\u9700\uff09\u548c value \u5c5e\u6027\uff08\u53ef\u9009\uff09\u3002\u8be5\u5b57\u6bb5\u4e2d\u7684\u5185\u5bb9\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 options \u5b57\u6bb5\u4e2d\uff0cdnsConfig \u7684 options \u7684\u67d0\u4e9b\u9009\u9879\u5982\u679c\u4e0e\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684\u9009\u9879\u51b2\u7a81\uff0c\u5219\u4f1a\u88ab dnsConfig \u6240\u8986\u76d6\u3002
                      • \u4e3b\u673a\u522b\u540d\uff1a\u4e3a\u4e3b\u673a\u8bbe\u7f6e\u7684\u522b\u540d\u3002

                      • \u5347\u7ea7\u65b9\u5f0f\uff1a \u6eda\u52a8\u5347\u7ea7 \u6307\u9010\u6b65\u7528\u65b0\u7248\u672c\u7684\u5b9e\u4f8b\u66ff\u6362\u65e7\u7248\u672c\u7684\u5b9e\u4f8b\uff0c\u5347\u7ea7\u7684\u8fc7\u7a0b\u4e2d\uff0c\u4e1a\u52a1\u6d41\u91cf\u4f1a\u540c\u65f6\u8d1f\u8f7d\u5747\u8861\u5206\u5e03\u5230\u65b0\u8001\u7684\u5b9e\u4f8b\u4e0a\uff0c\u56e0\u6b64\u4e1a\u52a1\u4e0d\u4f1a\u4e2d\u65ad\u3002 \u91cd\u5efa\u5347\u7ea7 \u6307\u5148\u5220\u9664\u8001\u7248\u672c\u7684\u8d1f\u8f7d\u5b9e\u4f8b\uff0c\u518d\u5b89\u88c5\u6307\u5b9a\u7684\u65b0\u7248\u672c\uff0c\u5347\u7ea7\u8fc7\u7a0b\u4e2d\u4e1a\u52a1\u4f1a\u4e2d\u65ad\u3002
                      • \u6700\u5927\u4e0d\u53ef\u7528\uff1a\u6307\u5b9a\u8d1f\u8f7d\u66f4\u65b0\u8fc7\u7a0b\u4e2d\u4e0d\u53ef\u7528 Pod \u7684\u6700\u5927\u503c\u6216\u6bd4\u7387\uff0c\u9ed8\u8ba4 25%\u3002\u5982\u679c\u7b49\u4e8e\u5b9e\u4f8b\u6570\u6709\u670d\u52a1\u4e2d\u65ad\u7684\u98ce\u9669\u3002
                      • \u6700\u5927\u5cf0\u503c\uff1a\u66f4\u65b0 Pod \u7684\u8fc7\u7a0b\u4e2d Pod \u603b\u6570\u8d85\u8fc7 Pod \u671f\u671b\u526f\u672c\u6570\u90e8\u5206\u7684\u6700\u5927\u503c\u6216\u6bd4\u7387\u3002\u9ed8\u8ba4 25%\u3002
                      • \u6700\u5927\u4fdd\u7559\u7248\u672c\u6570\uff1a\u8bbe\u7f6e\u7248\u672c\u56de\u6eda\u65f6\u4fdd\u7559\u7684\u65e7\u7248\u672c\u6570\u91cf\u3002\u9ed8\u8ba4 10\u3002
                      • Pod \u53ef\u7528\u6700\u77ed\u65f6\u95f4\uff1aPod \u5c31\u7eea\u7684\u6700\u77ed\u65f6\u95f4\uff0c\u53ea\u6709\u8d85\u51fa\u8fd9\u4e2a\u65f6\u95f4 Pod \u624d\u88ab\u8ba4\u4e3a\u53ef\u7528\uff0c\u9ed8\u8ba4 0 \u79d2\u3002
                      • \u5347\u7ea7\u6700\u5927\u6301\u7eed\u65f6\u95f4\uff1a\u5982\u679c\u8d85\u8fc7\u6240\u8bbe\u7f6e\u7684\u65f6\u95f4\u4ecd\u672a\u90e8\u7f72\u6210\u529f\uff0c\u5219\u5c06\u8be5\u8d1f\u8f7d\u6807\u8bb0\u4e3a\u5931\u8d25\u3002\u9ed8\u8ba4 600 \u79d2\u3002
                      • \u7f29\u5bb9\u65f6\u95f4\u7a97\uff1a\u8d1f\u8f7d\u505c\u6b62\u524d\u547d\u4ee4\u7684\u6267\u884c\u65f6\u95f4\u7a97\uff080-9,999\u79d2\uff09\uff0c\u9ed8\u8ba4 30 \u79d2\u3002

                      • \u5bb9\u5fcd\u65f6\u95f4\uff1a\u8d1f\u8f7d\u5b9e\u4f8b\u6240\u5728\u7684\u8282\u70b9\u4e0d\u53ef\u7528\u65f6\uff0c\u5c06\u8d1f\u8f7d\u5b9e\u4f8b\u91cd\u65b0\u8c03\u5ea6\u5230\u5176\u5b83\u53ef\u7528\u8282\u70b9\u7684\u65f6\u95f4\uff0c\u9ed8\u8ba4\u4e3a 300 \u79d2\u3002
                      • \u8282\u70b9\u4eb2\u548c\u6027\uff1a\u6839\u636e\u8282\u70b9\u4e0a\u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002
                      • \u5de5\u4f5c\u8d1f\u8f7d\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                      • \u5de5\u4f5c\u8d1f\u8f7d\u53cd\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u4e0d\u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002

                      \u5177\u4f53\u8be6\u60c5\u8bf7\u53c2\u8003\u8c03\u5ea6\u7b56\u7565\u3002

                      \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u548c\u5bb9\u5668\u7ec4\u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                      "},{"location":"admin/kpanda/workloads/create-deployment.html#yaml","title":"YAML \u521b\u5efa","text":"

                      \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\u3002

                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                      2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                      3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                      \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\u7684 YAML \u793a\u4f8b
                      apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nginx-deployment\nspec:\n  selector:\n    matchLabels:\n      app: nginx\n  replicas: 2 # \u544a\u77e5 Deployment \u8fd0\u884c 2 \u4e2a\u4e0e\u8be5\u6a21\u677f\u5339\u914d\u7684 Pod\n  template:\n    metadata:\n      labels:\n        app: nginx\n    spec:\n      containers:\n      - name: nginx\n        image: nginx:1.14.2\n        ports:\n        - containerPort: 80\n
                      "},{"location":"admin/kpanda/workloads/create-job.html","title":"\u521b\u5efa\u4efb\u52a1\uff08Job\uff09","text":"

                      \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u4efb\u52a1\uff08Job\uff09\u3002

                      \u4efb\u52a1\uff08Job\uff09\u9002\u7528\u4e8e\u6267\u884c\u4e00\u6b21\u6027\u4efb\u52a1\u3002Job \u4f1a\u521b\u5efa\u4e00\u4e2a\u6216\u591a\u4e2a Pod\uff0cJob \u4f1a\u4e00\u76f4\u91cd\u65b0\u5c1d\u8bd5\u6267\u884c Pod\uff0c\u76f4\u5230\u6210\u529f\u7ec8\u6b62\u7684 Pod \u8fbe\u5230\u4e00\u5b9a\u6570\u91cf\u3002\u6210\u529f\u7ec8\u6b62\u7684 Pod \u8fbe\u5230\u6307\u5b9a\u7684\u6570\u91cf\u540e\uff0cJob \u4e5f\u968f\u4e4b\u7ed3\u675f\u3002\u5220\u9664 Job \u65f6\u4f1a\u4e00\u540c\u6e05\u9664\u8be5 Job \u521b\u5efa\u7684\u6240\u6709 Pod\u3002\u6682\u505c Job \u65f6\u5220\u9664\u8be5 Job \u4e2d\u7684\u6240\u6709\u6d3b\u8dc3 Pod\uff0c\u76f4\u5230 Job \u88ab\u7ee7\u7eed\u6267\u884c\u3002\u6709\u5173\u4efb\u52a1\uff08Job\uff09\u7684\u66f4\u591a\u4ecb\u7ecd\uff0c\u53ef\u53c2\u8003Job\u3002

                      "},{"location":"admin/kpanda/workloads/create-job.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                      • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                      • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                      • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                      "},{"location":"admin/kpanda/workloads/create-job.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                      \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u4efb\u52a1\u3002

                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                      2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u4efb\u52a1 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                      3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                        \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u4efb\u52a1 \u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u4efb\u52a1\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u91cd\u542f\u7b49\u64cd\u4f5c\u3002

                      "},{"location":"admin/kpanda/workloads/create-job.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"

                      \u5728 \u521b\u5efa\u4efb\u52a1 \u9875\u9762\u4e2d\uff0c\u6839\u636e\u4e0b\u8868\u8f93\u5165\u57fa\u672c\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                      • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                      • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u4efb\u52a1\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                      • \u5b9e\u4f8b\u6570\uff1a\u8f93\u5165\u5de5\u4f5c\u8d1f\u8f7d\u7684 Pod \u5b9e\u4f8b\u6570\u91cf\u3002\u9ed8\u8ba4\u521b\u5efa 1 \u4e2a Pod \u5b9e\u4f8b\u3002
                      • \u63cf\u8ff0\uff1a\u8f93\u5165\u5de5\u4f5c\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u91cf\u5e94\u4e0d\u8d85\u8fc7 512 \u4e2a\u3002
                      "},{"location":"admin/kpanda/workloads/create-job.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                      \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                      \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                      \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                      \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                      • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                      • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                      • \u955c\u50cf\uff1a
                        • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u63a5\u5165\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                        • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                        • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                        • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                      • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                      • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                      • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                        • \u6574\u5361\u6a21\u5f0f\uff1a
                          • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                        • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                          • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                          • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                          • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                          • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                          • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                          • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                        • Mig \u6a21\u5f0f
                          • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                          • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                      \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                      \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                      \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                      \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                      \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                      \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                      "},{"location":"admin/kpanda/workloads/create-job.html#_5","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                      \u9ad8\u7ea7\u914d\u7f6e\u5305\u62ec\u4efb\u52a1\u8bbe\u7f6e\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u4e24\u90e8\u5206\u3002

                      \u4efb\u52a1\u8bbe\u7f6e\u6807\u7b7e\u4e0e\u6ce8\u89e3

                      • \u5e76\u884c\u6570\uff1a\u4efb\u52a1\u6267\u884c\u8fc7\u7a0b\u4e2d\u5141\u8bb8\u540c\u65f6\u521b\u5efa\u7684\u6700\u5927 Pod \u6570\uff0c\u5e76\u884c\u6570\u5e94\u4e0d\u5927\u4e8e Pod \u603b\u6570\u3002\u9ed8\u8ba4\u4e3a 1\u3002
                      • \u8d85\u65f6\u65f6\u95f4\uff1a\u8d85\u51fa\u8be5\u65f6\u95f4\u65f6\uff0c\u4efb\u52a1\u4f1a\u88ab\u6807\u8bc6\u4e3a\u6267\u884c\u5931\u8d25\uff0c\u4efb\u52a1\u4e0b\u7684\u6240\u6709 Pod \u90fd\u4f1a\u88ab\u5220\u9664\u3002\u4e3a\u7a7a\u65f6\u8868\u793a\u4e0d\u8bbe\u7f6e\u8d85\u65f6\u65f6\u95f4\u3002
                      • \u91cd\u542f\u7b56\u7565\uff1a\u8bbe\u7f6e\u5931\u8d25\u65f6\u662f\u5426\u91cd\u542f Pod\u3002

                      \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u4f8b Pod \u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                      "},{"location":"admin/kpanda/workloads/create-job.html#yaml","title":"YAML \u521b\u5efa","text":"

                      \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u4efb\u52a1\u3002

                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                      2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u4efb\u52a1 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                      3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                      \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u4efb\u52a1\u7684 YAML \u793a\u4f8b
                      kind: Job\napiVersion: batch/v1\nmetadata:\n  name: demo\n  namespace: default\n  uid: a9708239-0358-4aa1-87d3-a092c080836e\n  resourceVersion: '92751876'\n  generation: 1\n  creationTimestamp: '2022-12-26T10:52:22Z'\n  labels:\n    app: demo\n    controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n    job-name: demo\n  annotations:\n    revisions: >-\n      {\"1\":{\"status\":\"running\",\"uid\":\"a9708239-0358-4aa1-87d3-a092c080836e\",\"start-time\":\"2022-12-26T10:52:22Z\",\"completion-time\":\"0001-01-01T00:00:00Z\"}}\nspec:\n  parallelism: 1\n  backoffLimit: 6\n  selector:\n    matchLabels:\n      controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: demo\n        controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n        job-name: demo\n    spec:\n      containers:\n        - name: container-4\n          image: nginx\n          resources:\n            limits:\n              cpu: 250m\n              memory: 512Mi\n            requests:\n              cpu: 250m\n              memory: 512Mi\n          lifecycle: {}\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            privileged: false\n      restartPolicy: Never\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      securityContext: {}\n      schedulerName: default-scheduler\n  completionMode: NonIndexed\n  suspend: false\nstatus:\n  startTime: '2022-12-26T10:52:22Z'\n  active: 1\n
                      "},{"location":"admin/kpanda/workloads/create-statefulset.html","title":"\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\uff08StatefulSet\uff09","text":"

                      \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\uff08StatefulSet\uff09\u3002

                      \u6709\u72b6\u6001\u8d1f\u8f7d\uff08StatefulSet\uff09\u662f Kubernetes \u4e2d\u7684\u4e00\u79cd\u5e38\u89c1\u8d44\u6e90\uff0c\u548c\u65e0\u72b6\u6001\u8d1f\u8f7d\uff08Deployment\uff09\u7c7b\u4f3c\uff0c\u4e3b\u8981\u7528\u4e8e\u7ba1\u7406 Pod \u96c6\u5408\u7684\u90e8\u7f72\u548c\u4f38\u7f29\u3002\u4e8c\u8005\u7684\u4e3b\u8981\u533a\u522b\u5728\u4e8e\uff0cDeployment \u662f\u65e0\u72b6\u6001\u7684\uff0c\u4e0d\u4fdd\u5b58\u6570\u636e\uff0c\u800c StatefulSet \u662f\u6709\u72b6\u6001\u7684\uff0c\u4e3b\u8981\u7528\u4e8e\u7ba1\u7406\u6709\u72b6\u6001\u5e94\u7528\u3002\u6b64\u5916\uff0cStatefulSet \u4e2d\u7684 Pod \u5177\u6709\u6c38\u4e45\u4e0d\u53d8\u7684 ID\uff0c\u4fbf\u4e8e\u5728\u5339\u914d\u5b58\u50a8\u5377\u65f6\u8bc6\u522b\u5bf9\u5e94\u7684 Pod\u3002

                      \u901a\u8fc7\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\uff0c\u53ef\u4ee5\u57fa\u4e8e\u76f8\u5e94\u7684\u89d2\u8272\u6743\u9650\u8f7b\u677e\u7ba1\u7406\u591a\u4e91\u591a\u96c6\u7fa4\u4e0a\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5305\u62ec\u5bf9\u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa\u3001\u66f4\u65b0\u3001\u5220\u9664\u3001\u5f39\u6027\u6269\u7f29\u3001\u91cd\u542f\u3001\u7248\u672c\u56de\u9000\u7b49\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406\u3002

                      "},{"location":"admin/kpanda/workloads/create-statefulset.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u5728\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                      • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                      • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                      • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                      "},{"location":"admin/kpanda/workloads/create-statefulset.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                      \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u6709\u72b6\u6001\u8d1f\u8f7d\u3002

                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                      2. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u6709\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u53f3\u4e0a\u89d2 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                      3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                        \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d \u5217\u8868\uff0c\u7b49\u5f85\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u53d8\u4e3a \u8fd0\u884c\u4e2d \u3002\u5982\u679c\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u51fa\u73b0\u5f02\u5e38\uff0c\u8bf7\u67e5\u770b\u5177\u4f53\u5f02\u5e38\u4fe1\u606f\uff0c\u53ef\u53c2\u8003\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u3002

                        \u70b9\u51fb\u65b0\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u5217\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u5de5\u4f5c\u8d1f\u8f7d\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u5f39\u6027\u6269\u7f29\u3001\u91cd\u542f\u3001\u7248\u672c\u56de\u9000\u7b49\u64cd\u4f5c\u3002

                      "},{"location":"admin/kpanda/workloads/create-statefulset.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"
                      • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 deployment-01\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                      • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u8d1f\u8f7d\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                      • \u5b9e\u4f8b\u6570\uff1a\u8f93\u5165\u8d1f\u8f7d\u7684 Pod \u5b9e\u4f8b\u6570\u91cf\uff0c\u9ed8\u8ba4\u521b\u5efa 1 \u4e2a Pod \u5b9e\u4f8b\u3002
                      • \u63cf\u8ff0\uff1a\u8f93\u5165\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u4e0d\u8d85\u8fc7 512\u3002

                      "},{"location":"admin/kpanda/workloads/create-statefulset.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                      \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                      \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                      \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                      \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                      • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                      • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                      • \u955c\u50cf\uff1a
                        • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u63a5\u5165\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                        • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                        • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                        • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                      • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                      • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                      • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                        • \u6574\u5361\u6a21\u5f0f\uff1a
                          • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                        • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                          • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                          • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                          • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                          • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                          • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                          • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                        • Mig \u6a21\u5f0f
                          • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                          • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                      \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                      \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                      \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\u3002\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                      \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                      \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                      \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                      "},{"location":"admin/kpanda/workloads/create-statefulset.html#_5","title":"\u670d\u52a1\u914d\u7f6e","text":"

                      \u4e3a\u6709\u72b6\u6001\u8d1f\u8f7d\u914d\u7f6e\u670d\u52a1\uff08Service\uff09\uff0c\u4f7f\u6709\u72b6\u6001\u8d1f\u8f7d\u80fd\u591f\u88ab\u5916\u90e8\u8bbf\u95ee\u3002

                      1. \u70b9\u51fb \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                      2. \u53c2\u8003\u521b\u5efa\u670d\u52a1\uff0c\u914d\u7f6e\u670d\u52a1\u53c2\u6570\u3002

                      3. \u70b9\u51fb \u786e\u5b9a \uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                      "},{"location":"admin/kpanda/workloads/create-statefulset.html#_6","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                      \u9ad8\u7ea7\u914d\u7f6e\u5305\u62ec\u8d1f\u8f7d\u7684\u7f51\u7edc\u914d\u7f6e\u3001\u5347\u7ea7\u7b56\u7565\u3001\u8c03\u5ea6\u7b56\u7565\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u56db\u90e8\u5206\uff0c\u53ef\u70b9\u51fb\u4e0b\u65b9\u7684\u9875\u7b7e\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                      \u7f51\u7edc\u914d\u7f6e\u5347\u7ea7\u7b56\u7565\u5bb9\u5668\u7ba1\u7406\u7b56\u7565\u8c03\u5ea6\u7b56\u7565\u6807\u7b7e\u4e0e\u6ce8\u89e3
                      • \u5982\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72\u4e86 SpiderPool \u548c Multus \u7ec4\u4ef6\uff0c\u5219\u53ef\u4ee5\u5728\u7f51\u7edc\u914d\u7f6e\u4e2d\u914d\u7f6e\u5bb9\u5668\u7f51\u5361\u3002

                      • DNS \u914d\u7f6e\uff1a\u5e94\u7528\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u4f1a\u51fa\u73b0\u5197\u4f59\u7684 DNS \u67e5\u8be2\u3002Kubernetes \u4e3a\u5e94\u7528\u63d0\u4f9b\u4e86\u4e0e DNS \u76f8\u5173\u7684\u914d\u7f6e\u9009\u9879\uff0c\u80fd\u591f\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u6709\u6548\u5730\u51cf\u5c11\u5197\u4f59\u7684 DNS \u67e5\u8be2\uff0c\u63d0\u5347\u4e1a\u52a1\u5e76\u53d1\u91cf\u3002

                      • DNS \u7b56\u7565

                        • Default\uff1a\u4f7f\u5bb9\u5668\u4f7f\u7528 kubelet \u7684 --resolv-conf \u53c2\u6570\u6307\u5411\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u3002\u8be5\u914d\u7f6e\u53ea\u80fd\u89e3\u6790\u6ce8\u518c\u5230\u4e92\u8054\u7f51\u4e0a\u7684\u5916\u90e8\u57df\u540d\uff0c\u65e0\u6cd5\u89e3\u6790\u96c6\u7fa4\u5185\u90e8\u57df\u540d\uff0c\u4e14\u4e0d\u5b58\u5728\u65e0\u6548\u7684 DNS \u67e5\u8be2\u3002
                        • ClusterFirstWithHostNet\uff1a\u5e94\u7528\u5bf9\u63a5\u4e3b\u673a\u7684\u57df\u540d\u6587\u4ef6\u3002
                        • ClusterFirst\uff1a\u5e94\u7528\u5bf9\u63a5 Kube-DNS/CoreDNS\u3002
                        • None\uff1aKubernetes v1.9\uff08Beta in v1.10\uff09\u4e2d\u5f15\u5165\u7684\u65b0\u9009\u9879\u503c\u3002\u8bbe\u7f6e\u4e3a None \u4e4b\u540e\uff0c\u5fc5\u987b\u8bbe\u7f6e dnsConfig\uff0c\u6b64\u65f6\u5bb9\u5668\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u5c06\u5b8c\u5168\u901a\u8fc7 dnsConfig \u7684\u914d\u7f6e\u6765\u751f\u6210\u3002
                      • \u57df\u540d\u670d\u52a1\u5668\uff1a\u586b\u5199\u57df\u540d\u670d\u52a1\u5668\u7684\u5730\u5740\uff0c\u4f8b\u5982 10.6.175.20 \u3002

                      • \u641c\u7d22\u57df\uff1a\u57df\u540d\u67e5\u8be2\u65f6\u7684 DNS \u641c\u7d22\u57df\u5217\u8868\u3002\u6307\u5b9a\u540e\uff0c\u63d0\u4f9b\u7684\u641c\u7d22\u57df\u5217\u8868\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 search \u5b57\u6bb5\u4e2d\uff0c\u5e76\u5220\u9664\u91cd\u590d\u7684\u57df\u540d\u3002Kubernetes \u6700\u591a\u5141\u8bb8 6 \u4e2a\u641c\u7d22\u57df\u3002
                      • Options\uff1aDNS \u7684\u914d\u7f6e\u9009\u9879\uff0c\u5176\u4e2d\u6bcf\u4e2a\u5bf9\u8c61\u53ef\u4ee5\u5177\u6709 name \u5c5e\u6027\uff08\u5fc5\u9700\uff09\u548c value \u5c5e\u6027\uff08\u53ef\u9009\uff09\u3002\u8be5\u5b57\u6bb5\u4e2d\u7684\u5185\u5bb9\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 options \u5b57\u6bb5\u4e2d\uff0cdnsConfig \u7684 options \u7684\u67d0\u4e9b\u9009\u9879\u5982\u679c\u4e0e\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684\u9009\u9879\u51b2\u7a81\uff0c\u5219\u4f1a\u88ab dnsConfig \u6240\u8986\u76d6\u3002
                      • \u4e3b\u673a\u522b\u540d\uff1a\u4e3a\u4e3b\u673a\u8bbe\u7f6e\u7684\u522b\u540d\u3002

                      • \u5347\u7ea7\u65b9\u5f0f\uff1a \u6eda\u52a8\u5347\u7ea7 \u6307\u9010\u6b65\u7528\u65b0\u7248\u672c\u7684\u5b9e\u4f8b\u66ff\u6362\u65e7\u7248\u672c\u7684\u5b9e\u4f8b\uff0c\u5347\u7ea7\u7684\u8fc7\u7a0b\u4e2d\uff0c\u4e1a\u52a1\u6d41\u91cf\u4f1a\u540c\u65f6\u8d1f\u8f7d\u5747\u8861\u5206\u5e03\u5230\u65b0\u8001\u7684\u5b9e\u4f8b\u4e0a\uff0c\u56e0\u6b64\u4e1a\u52a1\u4e0d\u4f1a\u4e2d\u65ad\u3002 \u91cd\u5efa\u5347\u7ea7 \u6307\u5148\u5220\u9664\u8001\u7248\u672c\u7684\u8d1f\u8f7d\u5b9e\u4f8b\uff0c\u518d\u5b89\u88c5\u6307\u5b9a\u7684\u65b0\u7248\u672c\uff0c\u5347\u7ea7\u8fc7\u7a0b\u4e2d\u4e1a\u52a1\u4f1a\u4e2d\u65ad\u3002
                      • \u6700\u5927\u4fdd\u7559\u7248\u672c\u6570\uff1a\u8bbe\u7f6e\u7248\u672c\u56de\u6eda\u65f6\u4fdd\u7559\u7684\u65e7\u7248\u672c\u6570\u91cf\u3002\u9ed8\u8ba4 10\u3002
                      • \u7f29\u5bb9\u65f6\u95f4\u7a97\uff1a\u8d1f\u8f7d\u505c\u6b62\u524d\u547d\u4ee4\u7684\u6267\u884c\u65f6\u95f4\u7a97\uff080-9,999\u79d2\uff09\uff0c\u9ed8\u8ba4 30 \u79d2\u3002

                      Kubernetes v1.7 \u53ca\u5176\u4e4b\u540e\u7684\u7248\u672c\u53ef\u4ee5\u901a\u8fc7 .spec.podManagementPolicy \u8bbe\u7f6e Pod \u7684\u7ba1\u7406\u7b56\u7565\uff0c\u652f\u6301\u4ee5\u4e0b\u4e24\u79cd\u65b9\u5f0f\uff1a

                      • \u6309\u5e8f\u7b56\u7565\uff08OrderedReady\uff09 \uff1a\u9ed8\u8ba4\u7684 Pod \u7ba1\u7406\u7b56\u7565\uff0c\u8868\u793a\u6309\u987a\u5e8f\u90e8\u7f72 Pod\uff0c\u53ea\u6709\u524d\u4e00\u4e2a Pod \u90e8\u7f72 \u6210\u529f\u5b8c\u6210\u540e\uff0c\u6709\u72b6\u6001\u8d1f\u8f7d\u624d\u4f1a\u5f00\u59cb\u90e8\u7f72\u4e0b\u4e00\u4e2a Pod\u3002\u5220\u9664 Pod \u65f6\u5219\u91c7\u7528\u9006\u5e8f\uff0c\u6700\u540e\u521b\u5efa\u7684\u6700\u5148\u88ab\u5220\u9664\u3002

                      • \u5e76\u884c\u7b56\u7565\uff08Parallel\uff09 \uff1a\u5e76\u884c\u521b\u5efa\u6216\u5220\u9664\u5bb9\u5668\uff0c\u548c Deployment \u7c7b\u578b\u7684 Pod \u4e00\u6837\u3002StatefulSet \u63a7\u5236\u5668\u5e76\u884c\u5730\u542f\u52a8\u6216\u7ec8\u6b62\u6240\u6709\u7684\u5bb9\u5668\u3002\u542f\u52a8\u6216\u8005\u7ec8\u6b62\u5176\u4ed6 Pod \u524d\uff0c\u65e0\u9700\u7b49\u5f85 Pod \u8fdb\u5165 Running \u548c ready \u6216\u8005\u5b8c\u5168\u505c\u6b62\u72b6\u6001\u3002 \u8fd9\u4e2a\u9009\u9879\u53ea\u4f1a\u5f71\u54cd\u6269\u7f29\u64cd\u4f5c\u7684\u884c\u4e3a\uff0c\u4e0d\u5f71\u54cd\u66f4\u65b0\u65f6\u7684\u987a\u5e8f\u3002

                      • \u5bb9\u5fcd\u65f6\u95f4\uff1a\u8d1f\u8f7d\u5b9e\u4f8b\u6240\u5728\u7684\u8282\u70b9\u4e0d\u53ef\u7528\u65f6\uff0c\u5c06\u8d1f\u8f7d\u5b9e\u4f8b\u91cd\u65b0\u8c03\u5ea6\u5230\u5176\u5b83\u53ef\u7528\u8282\u70b9\u7684\u65f6\u95f4\uff0c\u9ed8\u8ba4\u4e3a 300 \u79d2\u3002
                      • \u8282\u70b9\u4eb2\u548c\u6027\uff1a\u6839\u636e\u8282\u70b9\u4e0a\u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002
                      • \u5de5\u4f5c\u8d1f\u8f7d\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                      • \u5de5\u4f5c\u8d1f\u8f7d\u53cd\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u4e0d\u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                      • \u62d3\u6251\u57df\uff1a\u5373 topologyKey\uff0c\u7528\u4e8e\u6307\u5b9a\u53ef\u4ee5\u8c03\u5ea6\u7684\u4e00\u7ec4\u8282\u70b9\u3002\u4f8b\u5982\uff0c kubernetes.io/os \u8868\u793a\u53ea\u8981\u67d0\u4e2a\u64cd\u4f5c\u7cfb\u7edf\u7684\u8282\u70b9\u6ee1\u8db3 labelSelector \u7684\u6761\u4ef6\u5c31\u53ef\u4ee5\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002

                      \u5177\u4f53\u8be6\u60c5\u8bf7\u53c2\u8003\u8c03\u5ea6\u7b56\u7565\u3002

                      ![\u8c03\u5ea6\u7b56\u7565](../../../images/deploy15_1.png)\n

                      \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u548c\u5bb9\u5668\u7ec4\u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                      "},{"location":"admin/kpanda/workloads/create-statefulset.html#yaml","title":"YAML \u521b\u5efa","text":"

                      \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\u3002

                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                      2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u6709\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                      3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                      \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\u7684 YAML \u793a\u4f8b
                      kind: StatefulSet\napiVersion: apps/v1\nmetadata:\n  name: test-mysql-123-mysql\n  namespace: default\n  uid: d3f45527-a0ab-4b22-9013-5842a06f4e0e\n  resourceVersion: '20504385'\n  generation: 1\n  creationTimestamp: '2022-09-22T09:34:10Z'\n  ownerReferences:\n    - apiVersion: mysql.presslabs.org/v1alpha1\n      kind: MysqlCluster\n      name: test-mysql-123\n      uid: 5e877cc3-5167-49da-904e-820940cf1a6d\n      controller: true\n      blockOwnerDeletion: true\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app.kubernetes.io/managed-by: mysql.presslabs.org\n      app.kubernetes.io/name: mysql\n      mysql.presslabs.org/cluster: test-mysql-123\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app.kubernetes.io/component: database\n        app.kubernetes.io/instance: test-mysql-123\n        app.kubernetes.io/managed-by: mysql.presslabs.org\n        app.kubernetes.io/name: mysql\n        app.kubernetes.io/version: 5.7.31\n        mysql.presslabs.org/cluster: test-mysql-123\n      annotations:\n        config_rev: '13941099'\n        prometheus.io/port: '9125'\n        prometheus.io/scrape: 'true'\n        secret_rev: '13941101'\n    spec:\n      volumes:\n        - name: conf\n          emptyDir: {}\n        - name: init-scripts\n          emptyDir: {}\n        - name: config-map\n          configMap:\n            name: test-mysql-123-mysql\n            defaultMode: 420\n        - name: data\n          persistentVolumeClaim:\n            claimName: data\n      initContainers:\n        - name: init\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - clone-and-init\n          envFrom:\n            - secretRef:\n                name: test-mysql-123-mysql-operated\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: BACKUP_USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: BACKUP_USER\n                  optional: true\n            - name: BACKUP_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: BACKUP_PASSWORD\n                  optional: true\n          resources: {}\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: config-map\n              mountPath: /mnt/conf\n            - name: data\n              mountPath: /var/lib/mysql\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n      containers:\n        - name: mysql\n          image: docker.m.daocloud.io/mysql:5.7.31\n          ports:\n            - name: mysql\n              containerPort: 3306\n              protocol: TCP\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: ORCH_CLUSTER_ALIAS\n              value: test-mysql-123.default\n            - name: ORCH_HTTP_API\n              value: http://mysql-operator.mcamel-system/api\n            - name: MYSQL_ROOT_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: ROOT_PASSWORD\n                  optional: false\n            - name: MYSQL_USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: USER\n                  optional: true\n            - name: MYSQL_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: PASSWORD\n                  optional: true\n            - name: MYSQL_DATABASE\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: DATABASE\n                  optional: true\n          resources:\n            limits:\n              cpu: '1'\n              memory: 1Gi\n            requests:\n              cpu: 100m\n              memory: 512Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: data\n              mountPath: /var/lib/mysql\n          livenessProbe:\n            exec:\n              command:\n                - mysqladmin\n                - '--defaults-file=/etc/mysql/client.conf'\n                - ping\n            initialDelaySeconds: 60\n            timeoutSeconds: 5\n            periodSeconds: 5\n            successThreshold: 1\n            failureThreshold: 3\n          readinessProbe:\n            exec:\n              command:\n                - /bin/sh\n                - '-c'\n                - >-\n                  test $(mysql --defaults-file=/etc/mysql/client.conf -NB -e\n                  'SELECT COUNT(*) FROM sys_operator.status WHERE\n                  name=\"configured\" AND value=\"1\"') -eq 1\n            initialDelaySeconds: 5\n            timeoutSeconds: 5\n            periodSeconds: 2\n            successThreshold: 1\n            failureThreshold: 3\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - bash\n                  - /etc/mysql/pre-shutdown-ha.sh\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: sidecar\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - config-and-serve\n          ports:\n            - name: sidecar-http\n              containerPort: 8080\n              protocol: TCP\n          envFrom:\n            - secretRef:\n                name: test-mysql-123-mysql-operated\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: XTRABACKUP_TARGET_DIR\n              value: /tmp/xtrabackup_backupfiles/\n          resources:\n            limits:\n              cpu: '1'\n              memory: 1Gi\n            requests:\n              cpu: 10m\n              memory: 64Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: data\n              mountPath: /var/lib/mysql\n          readinessProbe:\n            httpGet:\n              path: /health\n              port: 8080\n              scheme: HTTP\n            initialDelaySeconds: 30\n            timeoutSeconds: 5\n            periodSeconds: 5\n            successThreshold: 1\n            failureThreshold: 3\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: metrics-exporter\n          image: prom/mysqld-exporter:v0.13.0\n          args:\n            - '--web.listen-address=0.0.0.0:9125'\n            - '--web.telemetry-path=/metrics'\n            - '--collect.heartbeat'\n            - '--collect.heartbeat.database=sys_operator'\n          ports:\n            - name: prometheus\n              containerPort: 9125\n              protocol: TCP\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: METRICS_EXPORTER_USER\n                  optional: false\n            - name: PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: METRICS_EXPORTER_PASSWORD\n                  optional: false\n            - name: DATA_SOURCE_NAME\n              value: $(USER):$(PASSWORD)@(127.0.0.1:3306)/\n          resources:\n            limits:\n              cpu: 100m\n              memory: 128Mi\n            requests:\n              cpu: 10m\n              memory: 32Mi\n          livenessProbe:\n            httpGet:\n              path: /metrics\n              port: 9125\n              scheme: HTTP\n            initialDelaySeconds: 30\n            timeoutSeconds: 30\n            periodSeconds: 30\n            successThreshold: 1\n            failureThreshold: 3\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: pt-heartbeat\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - pt-heartbeat\n            - '--update'\n            - '--replace'\n            - '--check-read-only'\n            - '--create-table'\n            - '--database'\n            - sys_operator\n            - '--table'\n            - heartbeat\n            - '--utc'\n            - '--defaults-file'\n            - /etc/mysql/heartbeat.conf\n            - '--fail-successive-errors=20'\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n          resources:\n            limits:\n              cpu: 100m\n              memory: 64Mi\n            requests:\n              cpu: 10m\n              memory: 32Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n      restartPolicy: Always\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      securityContext:\n        runAsUser: 999\n        fsGroup: 999\n      affinity:\n        podAntiAffinity:\n          preferredDuringSchedulingIgnoredDuringExecution:\n            - weight: 100\n              podAffinityTerm:\n                labelSelector:\n                  matchLabels:\n                    app.kubernetes.io/component: database\n                    app.kubernetes.io/instance: test-mysql-123\n                    app.kubernetes.io/managed-by: mysql.presslabs.org\n                    app.kubernetes.io/name: mysql\n                    app.kubernetes.io/version: 5.7.31\n                    mysql.presslabs.org/cluster: test-mysql-123\n                topologyKey: kubernetes.io/hostname\n      schedulerName: default-scheduler\n  volumeClaimTemplates:\n    - kind: PersistentVolumeClaim\n      apiVersion: v1\n      metadata:\n        name: data\n        creationTimestamp: null\n        ownerReferences:\n          - apiVersion: mysql.presslabs.org/v1alpha1\n            kind: MysqlCluster\n            name: test-mysql-123\n            uid: 5e877cc3-5167-49da-904e-820940cf1a6d\n            controller: true\n      spec:\n        accessModes:\n          - ReadWriteOnce\n        resources:\n          limits:\n            storage: 1Gi\n          requests:\n            storage: 1Gi\n        storageClassName: local-path\n        volumeMode: Filesystem\n      status:\n        phase: Pending\n  serviceName: mysql\n  podManagementPolicy: OrderedReady\n  updateStrategy:\n    type: RollingUpdate\n    rollingUpdate:\n      partition: 0\n  revisionHistoryLimit: 10\nstatus:\n  observedGeneration: 1\n  replicas: 1\n  readyReplicas: 1\n  currentReplicas: 1\n  updatedReplicas: 1\n  currentRevision: test-mysql-123-mysql-6b8f5577c7\n  updateRevision: test-mysql-123-mysql-6b8f5577c7\n  collisionCount: 0\n  availableReplicas: 1\n
                      "},{"location":"admin/kpanda/workloads/pod-config/env-variables.html","title":"\u914d\u7f6e\u73af\u5883\u53d8\u91cf","text":"

                      \u73af\u5883\u53d8\u91cf\u662f\u6307\u5bb9\u5668\u8fd0\u884c\u73af\u5883\u4e2d\u8bbe\u5b9a\u7684\u4e00\u4e2a\u53d8\u91cf\uff0c\u7528\u4e8e\u7ed9 Pod \u6dfb\u52a0\u73af\u5883\u6807\u5fd7\u6216\u4f20\u9012\u914d\u7f6e\u7b49\uff0c\u652f\u6301\u901a\u8fc7\u952e\u503c\u5bf9\u7684\u5f62\u5f0f\u4e3a Pod \u914d\u7f6e\u73af\u5883\u53d8\u91cf\u3002

                      \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u5728\u539f\u751f Kubernetes \u7684\u57fa\u7840\u4e0a\u589e\u52a0\u4e86\u56fe\u5f62\u5316\u754c\u9762\u4e3a Pod \u914d\u7f6e\u73af\u5883\u53d8\u91cf\uff0c\u652f\u6301\u4ee5\u4e0b\u51e0\u79cd\u914d\u7f6e\u65b9\u5f0f\uff1a

                      • \u952e\u503c\u5bf9\uff08Key/Value Pair\uff09\uff1a\u5c06\u81ea\u5b9a\u4e49\u7684\u952e\u503c\u5bf9\u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf
                      • \u8d44\u6e90\u5f15\u7528\uff08Resource\uff09\uff1a\u5c06 Container \u5b9a\u4e49\u7684\u5b57\u6bb5\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\uff0c\u4f8b\u5982\u5bb9\u5668\u7684\u5185\u5b58\u9650\u5236\u3001\u526f\u672c\u6570\u7b49
                      • \u53d8\u91cf/\u53d8\u91cf\u5f15\u7528\uff08Pod Field\uff09\uff1a\u5c06 Pod \u5b57\u6bb5\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\uff0c\u4f8b\u5982 Pod \u7684\u540d\u79f0
                      • \u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165\uff08ConfigMap key\uff09\uff1a\u5bfc\u5165\u914d\u7f6e\u9879\u4e2d\u67d0\u4e2a\u952e\u7684\u503c\u4f5c\u4e3a\u67d0\u4e2a\u73af\u5883\u53d8\u91cf\u7684\u503c
                      • \u5bc6\u94a5\u952e\u503c\u5bfc\u5165\uff08Secret Key\uff09\uff1a\u4f7f\u7528\u6765\u81ea Secret \u4e2d\u7684\u6570\u636e\u5b9a\u4e49\u73af\u5883\u53d8\u91cf\u7684\u503c
                      • \u5bc6\u94a5\u5bfc\u5165\uff08Secret\uff09\uff1a\u5c06 Secret \u4e2d\u7684\u6240\u6709\u952e\u503c\u90fd\u5bfc\u5165\u4e3a\u73af\u5883\u53d8\u91cf
                      • \u914d\u7f6e\u9879\u5bfc\u5165\uff08ConfigMap\uff09\uff1a\u5c06\u914d\u7f6e\u9879\u4e2d\u6240\u6709\u952e\u503c\u90fd\u5bfc\u5165\u4e3a\u73af\u5883\u53d8\u91cf
                      "},{"location":"admin/kpanda/workloads/pod-config/health-check.html","title":"\u5bb9\u5668\u7684\u5065\u5eb7\u68c0\u67e5","text":"

                      \u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u6839\u636e\u7528\u6237\u9700\u6c42\uff0c\u68c0\u67e5\u5bb9\u5668\u7684\u5065\u5eb7\u72b6\u51b5\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5185\u7684\u5e94\u7528\u7a0b\u5e8f\u5165\u5982\u679c\u5f02\u5e38\uff0c\u5bb9\u5668\u4f1a\u81ea\u52a8\u8fdb\u884c\u91cd\u542f\u6062\u590d\u3002Kubernetes \u63d0\u4f9b\u4e86\u5b58\u6d3b\uff08Liveness\uff09\u68c0\u67e5\u3001\u5c31\u7eea\uff08Readiness\uff09\u68c0\u67e5\u548c\u542f\u52a8\uff08Startup\uff09\u68c0\u67e5\u3002

                      • \u5b58\u6d3b\u68c0\u67e5\uff08LivenessProbe\uff09 \u53ef\u63a2\u6d4b\u5230\u5e94\u7528\u6b7b\u9501\uff08\u5e94\u7528\u7a0b\u5e8f\u5728\u8fd0\u884c\uff0c\u4f46\u662f\u65e0\u6cd5\u7ee7\u7eed\u6267\u884c\u540e\u9762\u7684\u6b65\u9aa4\uff09\u60c5\u51b5\u3002 \u91cd\u542f\u8fd9\u79cd\u72b6\u6001\u4e0b\u7684\u5bb9\u5668\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\uff0c\u5373\u4f7f\u5176\u4e2d\u5b58\u5728\u7f3a\u9677\u3002

                      • \u5c31\u7eea\u68c0\u67e5\uff08ReadinessProbe\uff09 \u53ef\u63a2\u77e5\u5bb9\u5668\u4f55\u65f6\u51c6\u5907\u597d\u63a5\u53d7\u8bf7\u6c42\u6d41\u91cf\uff0c\u5f53\u4e00\u4e2a Pod \u5185\u7684\u6240\u6709\u5bb9\u5668\u90fd\u5c31\u7eea\u65f6\uff0c\u624d\u80fd\u8ba4\u4e3a\u8be5 Pod \u5c31\u7eea\u3002 \u8fd9\u79cd\u4fe1\u53f7\u7684\u4e00\u4e2a\u7528\u9014\u5c31\u662f\u63a7\u5236\u54ea\u4e2a Pod \u4f5c\u4e3a Service \u7684\u540e\u7aef\u3002 \u82e5 Pod \u5c1a\u672a\u5c31\u7eea\uff0c\u4f1a\u88ab\u4ece Service \u7684\u8d1f\u8f7d\u5747\u8861\u5668\u4e2d\u5254\u9664\u3002

                      • \u542f\u52a8\u68c0\u67e5\uff08StartupProbe\uff09 \u53ef\u4ee5\u4e86\u89e3\u5e94\u7528\u5bb9\u5668\u4f55\u65f6\u542f\u52a8\uff0c\u914d\u7f6e\u540e\uff0c\u53ef\u63a7\u5236\u5bb9\u5668\u5728\u542f\u52a8\u6210\u529f\u540e\u518d\u8fdb\u884c\u5b58\u6d3b\u6027\u548c\u5c31\u7eea\u6001\u68c0\u67e5\uff0c \u786e\u4fdd\u8fd9\u4e9b\u5b58\u6d3b\u3001\u5c31\u7eea\u63a2\u6d4b\u5668\u4e0d\u4f1a\u5f71\u54cd\u5e94\u7528\u7684\u542f\u52a8\u3002 \u542f\u52a8\u63a2\u6d4b\u53ef\u4ee5\u7528\u4e8e\u5bf9\u6162\u542f\u52a8\u5bb9\u5668\u8fdb\u884c\u5b58\u6d3b\u6027\u68c0\u6d4b\uff0c\u907f\u514d\u5b83\u4eec\u5728\u542f\u52a8\u8fd0\u884c\u4e4b\u524d\u5c31\u88ab\u6740\u6389\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/health-check.html#_2","title":"\u5b58\u6d3b\u548c\u5c31\u7eea\u68c0\u67e5","text":"

                      \u5b58\u6d3b\u68c0\u67e5\uff08LivenessProbe\uff09\u7684\u914d\u7f6e\u548c\u5c31\u7eea\u68c0\u67e5\uff08ReadinessProbe\uff09\u7684\u914d\u7f6e\u53c2\u6570\u76f8\u4f3c\uff0c \u552f\u4e00\u533a\u522b\u662f\u8981\u4f7f\u7528 readinessProbe \u5b57\u6bb5\uff0c\u800c\u4e0d\u662f livenessProbe \u5b57\u6bb5\u3002

                      HTTP GET \u53c2\u6570\u8bf4\u660e\uff1a

                      \u53c2\u6570 \u53c2\u6570\u8bf4\u660e \u8def\u5f84\uff08 Path\uff09 \u8bbf\u95ee\u7684\u8bf7\u6c42\u8def\u5f84\u3002\u5982\uff1a \u793a\u4f8b\u4e2d\u7684 /healthz \u8def\u5f84 \u7aef\u53e3(Port) \u670d\u52a1\u76d1\u542c\u7aef\u53e3\u3002 \u5982\uff1a \u793a\u4f8b\u4e2d\u7684 8080 \u7aef\u53e3 \u534f\u8bae \u8bbf\u95ee\u534f\u8bae\uff0cHttp \u6216\u8005Https \u5ef6\u8fdf\u65f6\u95f4\uff08initialDelaySeconds\uff09 \u5ef6\u8fdf\u68c0\u67e5\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\uff0c\u6b64\u8bbe\u7f6e\u4e0e\u4e1a\u52a1\u7a0b\u5e8f\u6b63\u5e38\u542f\u52a8\u65f6\u95f4\u76f8\u5173\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a30\uff0c\u8868\u660e\u5bb9\u5668\u542f\u52a8\u540e30\u79d2\u624d\u5f00\u59cb\u5065\u5eb7\u68c0\u67e5\uff0c\u8be5\u65f6\u95f4\u662f\u9884\u7559\u7ed9\u4e1a\u52a1\u7a0b\u5e8f\u542f\u52a8\u7684\u65f6\u95f4\u3002 \u8d85\u65f6\u65f6\u95f4\uff08timeoutSeconds\uff09 \u8d85\u65f6\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a10\uff0c\u8868\u660e\u6267\u884c\u5065\u5eb7\u68c0\u67e5\u7684\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a10\u79d2\uff0c\u5982\u679c\u8d85\u8fc7\u8fd9\u4e2a\u65f6\u95f4\uff0c\u672c\u6b21\u5065\u5eb7\u68c0\u67e5\u5c31\u88ab\u89c6\u4e3a\u5931\u8d25\u3002\u82e5\u8bbe\u7f6e\u4e3a0\u6216\u4e0d\u8bbe\u7f6e\uff0c\u9ed8\u8ba4\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a1\u79d2\u3002 \u8d85\u65f6\u65f6\u95f4\uff08timeoutSeconds\uff09 \u8d85\u65f6\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a10\uff0c\u8868\u660e\u6267\u884c\u5065\u5eb7\u68c0\u67e5\u7684\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a10\u79d2\uff0c\u5982\u679c\u8d85\u8fc7\u8fd9\u4e2a\u65f6\u95f4\uff0c\u672c\u6b21\u5065\u5eb7\u68c0\u67e5\u5c31\u88ab\u89c6\u4e3a\u5931\u8d25\u3002\u82e5\u8bbe\u7f6e\u4e3a0\u6216\u4e0d\u8bbe\u7f6e\uff0c\u9ed8\u8ba4\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a1\u79d2\u3002 \u6210\u529f\u9608\u503c\uff08successThreshold\uff09 \u63a2\u6d4b\u5931\u8d25\u540e\uff0c\u88ab\u89c6\u4e3a\u6210\u529f\u7684\u6700\u5c0f\u8fde\u7eed\u6210\u529f\u6570\u3002\u9ed8\u8ba4\u503c\u662f 1\uff0c\u6700\u5c0f\u503c\u662f 1\u3002\u5b58\u6d3b\u548c\u542f\u52a8\u63a2\u6d4b\u7684\u8fd9\u4e2a\u503c\u5fc5\u987b\u662f 1\u3002 \u6700\u5927\u5931\u8d25\u6b21\u6570\uff08failureThreshold\uff09 \u5f53\u63a2\u6d4b\u5931\u8d25\u65f6\u91cd\u8bd5\u7684\u6b21\u6570\u3002\u5b58\u6d3b\u63a2\u6d4b\u60c5\u51b5\u4e0b\u7684\u653e\u5f03\u5c31\u610f\u5473\u7740\u91cd\u65b0\u542f\u52a8\u5bb9\u5668\u3002\u5c31\u7eea\u63a2\u6d4b\u60c5\u51b5\u4e0b\u7684\u653e\u5f03 Pod \u4f1a\u88ab\u6253\u4e0a\u672a\u5c31\u7eea\u7684\u6807\u7b7e\u3002\u9ed8\u8ba4\u503c\u662f 3\u3002\u6700\u5c0f\u503c\u662f 1\u3002"},{"location":"admin/kpanda/workloads/pod-config/health-check.html#http-get","title":"\u4f7f\u7528 HTTP GET \u8bf7\u6c42\u68c0\u67e5","text":"

                      YAML \u793a\u4f8b\uff1a

                      apiVersion: v1\nkind: Pod\nmetadata:\n  labels:\n    test: liveness\n  name: liveness-http\nspec:\n  containers:\n  - name: liveness\n    image: k8s.gcr.io/liveness\n    args:\n    - /server\n    livenessProbe:\n      httpGet:\n        path: /healthz  # \u8bbf\u95ee\u7684\u8bf7\u6c42\u8def\u5f84\n        port: 8080  # \u670d\u52a1\u76d1\u542c\u7aef\u53e3\n        httpHeaders:\n        - name: Custom-Header\n          value: Awesome\n      initialDelaySeconds: 3  # kubelet \u5728\u6267\u884c\u7b2c\u4e00\u6b21\u63a2\u6d4b\u524d\u5e94\u8be5\u7b49\u5f85 3 \u79d2\n      periodSeconds: 3   # kubelet \u6bcf\u9694 3 \u79d2\u6267\u884c\u4e00\u6b21\u5b58\u6d3b\u63a2\u6d4b\n

                      \u6309\u7167\u8bbe\u5b9a\u7684\u89c4\u5219\uff0ckubelet \u5411\u5bb9\u5668\u5185\u8fd0\u884c\u7684\u670d\u52a1\uff08\u670d\u52a1\u5728\u76d1\u542c 8080 \u7aef\u53e3\uff09\u53d1\u9001\u4e00\u4e2a HTTP GET \u8bf7\u6c42\u6765\u6267\u884c\u63a2\u6d4b\u3002\u5982\u679c\u670d\u52a1\u5668\u4e0a /healthz \u8def\u5f84\u4e0b\u7684\u5904\u7406\u7a0b\u5e8f\u8fd4\u56de\u6210\u529f\u4ee3\u7801\uff0c\u5219 kubelet \u8ba4\u4e3a\u5bb9\u5668\u662f\u5065\u5eb7\u5b58\u6d3b\u7684\u3002 \u5982\u679c\u5904\u7406\u7a0b\u5e8f\u8fd4\u56de\u5931\u8d25\u4ee3\u7801\uff0c\u5219 kubelet \u4f1a\u6740\u6b7b\u8fd9\u4e2a\u5bb9\u5668\u5e76\u5c06\u5176\u91cd\u542f\u3002\u8fd4\u56de\u5927\u4e8e\u6216\u7b49\u4e8e 200 \u5e76\u4e14\u5c0f\u4e8e 400 \u7684\u4efb\u4f55\u4ee3\u7801\u90fd\u6807\u793a\u6210\u529f\uff0c\u5176\u5b83\u8fd4\u56de\u4ee3\u7801\u90fd\u6807\u793a\u5931\u8d25\u3002 \u5bb9\u5668\u5b58\u6d3b\u671f\u95f4\u7684\u6700\u5f00\u59cb 10 \u79d2\u4e2d\uff0c /healthz \u5904\u7406\u7a0b\u5e8f\u8fd4\u56de 200 \u7684\u72b6\u6001\u7801\u3002 \u4e4b\u540e\u5904\u7406\u7a0b\u5e8f\u8fd4\u56de 500 \u7684\u72b6\u6001\u7801\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/health-check.html#tcp","title":"\u4f7f\u7528 TCP \u7aef\u53e3\u68c0\u67e5","text":"

                      TCP \u7aef\u53e3\u53c2\u6570\u8bf4\u660e\uff1a

                      \u53c2\u6570 \u53c2\u6570\u8bf4\u660e \u7aef\u53e3(Port) \u670d\u52a1\u76d1\u542c\u7aef\u53e3\u3002 \u5982\uff1a \u793a\u4f8b\u4e2d\u7684 8080 \u7aef\u53e3 \u5ef6\u8fdf\u65f6\u95f4\uff08initialDelaySeconds\uff09 \u5ef6\u8fdf\u68c0\u67e5\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\uff0c\u6b64\u8bbe\u7f6e\u4e0e\u4e1a\u52a1\u7a0b\u5e8f\u6b63\u5e38\u542f\u52a8\u65f6\u95f4\u76f8\u5173\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a30\uff0c\u8868\u660e\u5bb9\u5668\u542f\u52a8\u540e30\u79d2\u624d\u5f00\u59cb\u5065\u5eb7\u68c0\u67e5\uff0c\u8be5\u65f6\u95f4\u662f\u9884\u7559\u7ed9\u4e1a\u52a1\u7a0b\u5e8f\u542f\u52a8\u7684\u65f6\u95f4\u3002 \u8d85\u65f6\u65f6\u95f4\uff08timeoutSeconds\uff09 \u8d85\u65f6\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a10\uff0c\u8868\u660e\u6267\u884c\u5065\u5eb7\u68c0\u67e5\u7684\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a10\u79d2\uff0c\u5982\u679c\u8d85\u8fc7\u8fd9\u4e2a\u65f6\u95f4\uff0c\u672c\u6b21\u5065\u5eb7\u68c0\u67e5\u5c31\u88ab\u89c6\u4e3a\u5931\u8d25\u3002\u82e5\u8bbe\u7f6e\u4e3a0\u6216\u4e0d\u8bbe\u7f6e\uff0c\u9ed8\u8ba4\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a1\u79d2\u3002

                      \u5bf9\u4e8e\u63d0\u4f9bTCP\u901a\u4fe1\u670d\u52a1\u7684\u5bb9\u5668\uff0c\u57fa\u4e8e\u6b64\u914d\u7f6e\uff0c\u6309\u7167\u8bbe\u5b9a\u89c4\u5219\u96c6\u7fa4\u5bf9\u8be5\u5bb9\u5668\u5efa\u7acbTCP\u8fde\u63a5\uff0c\u5982\u679c\u8fde\u63a5\u6210\u529f\uff0c\u5219\u8bc1\u660e\u63a2\u6d4b\u6210\u529f\uff0c\u5426\u5219\u63a2\u6d4b\u5931\u8d25\u3002\u9009\u62e9TCP\u7aef\u53e3\u63a2\u6d4b\u65b9\u5f0f\uff0c\u5fc5\u987b\u6307\u5b9a\u5bb9\u5668\u76d1\u542c\u7684\u7aef\u53e3\u3002

                      YAML \u793a\u4f8b\uff1a

                      apiVersion: v1\nkind: Pod\nmetadata:\n  name: goproxy\n  labels:\n    app: goproxy\nspec:\n  containers:\n  - name: goproxy\n    image: k8s.gcr.io/goproxy:0.1\n    ports:\n    - containerPort: 8080\n    readinessProbe:\n      tcpSocket:\n        port: 8080\n      initialDelaySeconds: 5\n      periodSeconds: 10\n    livenessProbe:\n      tcpSocket:\n        port: 8080\n      initialDelaySeconds: 15\n      periodSeconds: 20\n

                      \u6b64\u793a\u4f8b\u540c\u65f6\u4f7f\u7528\u5c31\u7eea\u548c\u5b58\u6d3b\u63a2\u9488\u3002kubelet \u5728\u5bb9\u5668\u542f\u52a8 5 \u79d2\u540e\u53d1\u9001\u7b2c\u4e00\u4e2a\u5c31\u7eea\u63a2\u6d4b\u3002 \u5c1d\u8bd5\u8fde\u63a5 goproxy \u5bb9\u5668\u7684 8080 \u7aef\u53e3\uff0c \u5982\u679c\u63a2\u6d4b\u6210\u529f\uff0c\u8fd9\u4e2a Pod \u4f1a\u88ab\u6807\u8bb0\u4e3a\u5c31\u7eea\u72b6\u6001\uff0ckubelet \u5c06\u7ee7\u7eed\u6bcf\u9694 10 \u79d2\u8fd0\u884c\u4e00\u6b21\u68c0\u6d4b\u3002

                      \u9664\u4e86\u5c31\u7eea\u63a2\u6d4b\uff0c\u8fd9\u4e2a\u914d\u7f6e\u5305\u62ec\u4e86\u4e00\u4e2a\u5b58\u6d3b\u63a2\u6d4b\u3002 kubelet \u4f1a\u5728\u5bb9\u5668\u542f\u52a8 15 \u79d2\u540e\u8fdb\u884c\u7b2c\u4e00\u6b21\u5b58\u6d3b\u63a2\u6d4b\u3002 \u5c31\u7eea\u63a2\u6d4b\u4f1a\u5c1d\u8bd5\u8fde\u63a5 goproxy \u5bb9\u5668\u7684 8080 \u7aef\u53e3\u3002 \u5982\u679c\u5b58\u6d3b\u63a2\u6d4b\u5931\u8d25\uff0c\u5bb9\u5668\u4f1a\u88ab\u91cd\u65b0\u542f\u52a8\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/health-check.html#_3","title":"\u6267\u884c\u547d\u4ee4\u68c0\u67e5","text":"

                      YAML \u793a\u4f8b:

                      apiVersion: v1\nkind: Pod\nmetadata:\n  labels:\n    test: liveness\n  name: liveness-exec\nspec:\n  containers:\n  - name: liveness\n    image: k8s.gcr.io/busybox\n    args:\n    - /bin/sh\n    - -c\n    - touch /tmp/healthy; sleep 30; rm -f /tmp/healthy; sleep 600\n    livenessProbe:\n      exec:\n        command:\n        - cat\n        - /tmp/healthy\n      initialDelaySeconds: 5 # kubelet \u5728\u6267\u884c\u7b2c\u4e00\u6b21\u63a2\u6d4b\u524d\u7b49\u5f85 5 \u79d2\n      periodSeconds: 5  #kubelet \u6bcf 5 \u79d2\u6267\u884c\u4e00\u6b21\u5b58\u6d3b\u63a2\u6d4b\n

                      periodSeconds \u5b57\u6bb5\u6307\u5b9a\u4e86 kubelet \u6bcf 5 \u79d2\u6267\u884c\u4e00\u6b21\u5b58\u6d3b\u63a2\u6d4b\uff0c initialDelaySeconds \u5b57\u6bb5\u6307\u5b9a kubelet \u5728\u6267\u884c\u7b2c\u4e00\u6b21\u63a2\u6d4b\u524d\u7b49\u5f85 5 \u79d2\u3002\u6309\u7167\u8bbe\u5b9a\u89c4\u5219\uff0c\u96c6\u7fa4\u5468\u671f\u6027\u7684\u901a\u8fc7 kubelet \u5728\u5bb9\u5668\u5185\u6267\u884c\u547d\u4ee4 cat /tmp/healthy \u6765\u8fdb\u884c\u63a2\u6d4b\u3002 \u5982\u679c\u547d\u4ee4\u6267\u884c\u6210\u529f\u5e76\u4e14\u8fd4\u56de\u503c\u4e3a 0\uff0ckubelet \u5c31\u4f1a\u8ba4\u4e3a\u8fd9\u4e2a\u5bb9\u5668\u662f\u5065\u5eb7\u5b58\u6d3b\u7684\u3002 \u5982\u679c\u8fd9\u4e2a\u547d\u4ee4\u8fd4\u56de\u975e 0 \u503c\uff0ckubelet \u4f1a\u6740\u6b7b\u8fd9\u4e2a\u5bb9\u5668\u5e76\u91cd\u65b0\u542f\u52a8\u5b83\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/health-check.html#_4","title":"\u4f7f\u7528\u542f\u52a8\u524d\u68c0\u67e5\u4fdd\u62a4\u6162\u542f\u52a8\u5bb9\u5668","text":"

                      \u6709\u4e9b\u5e94\u7528\u5728\u542f\u52a8\u65f6\u9700\u8981\u8f83\u957f\u7684\u521d\u59cb\u5316\u65f6\u95f4\uff0c\u9700\u8981\u4f7f\u7528\u76f8\u540c\u7684\u547d\u4ee4\u6765\u8bbe\u7f6e\u542f\u52a8\u63a2\u6d4b\uff0c\u9488\u5bf9 HTTP \u6216 TCP \u68c0\u6d4b\uff0c\u53ef\u4ee5\u901a\u8fc7\u5c06 failureThreshold * periodSeconds \u53c2\u6570\u8bbe\u7f6e\u4e3a\u8db3\u591f\u957f\u7684\u65f6\u95f4\u6765\u5e94\u5bf9\u542f\u52a8\u9700\u8981\u8f83\u957f\u65f6\u95f4\u7684\u573a\u666f\u3002

                      YAML \u793a\u4f8b\uff1a

                      ports:\n- name: liveness-port\n  containerPort: 8080\n  hostPort: 8080\n\nlivenessProbe:\n  httpGet:\n    path: /healthz\n    port: liveness-port\n  failureThreshold: 1\n  periodSeconds: 10\n\nstartupProbe:\n  httpGet:\n    path: /healthz\n    port: liveness-port\n  failureThreshold: 30\n  periodSeconds: 10\n

                      \u5982\u4e0a\u8bbe\u7f6e\uff0c\u5e94\u7528\u5c06\u6709\u6700\u591a 5 \u5206\u949f\uff0830 * 10 = 300s\uff09\u7684\u65f6\u95f4\u6765\u5b8c\u6210\u542f\u52a8\u8fc7\u7a0b\uff0c \u4e00\u65e6\u542f\u52a8\u63a2\u6d4b\u6210\u529f\uff0c\u5b58\u6d3b\u63a2\u6d4b\u4efb\u52a1\u5c31\u4f1a\u63a5\u7ba1\u5bf9\u5bb9\u5668\u7684\u63a2\u6d4b\uff0c\u5bf9\u5bb9\u5668\u6b7b\u9501\u4f5c\u51fa\u5feb\u901f\u54cd\u5e94\u3002 \u5982\u679c\u542f\u52a8\u63a2\u6d4b\u4e00\u76f4\u6ca1\u6709\u6210\u529f\uff0c\u5bb9\u5668\u4f1a\u5728 300 \u79d2\u540e\u88ab\u6740\u6b7b\uff0c\u5e76\u4e14\u6839\u636e restartPolicy \u6765 \u6267\u884c\u8fdb\u4e00\u6b65\u5904\u7f6e\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/job-parameters.html","title":"\u4efb\u52a1\u53c2\u6570\u8bf4\u660e","text":"

                      \u6839\u636e .spec.completions \u548c .spec.Parallelism \u7684\u8bbe\u7f6e\uff0c\u53ef\u4ee5\u5c06\u4efb\u52a1\uff08Job\uff09\u5212\u5206\u4e3a\u4ee5\u4e0b\u51e0\u79cd\u7c7b\u578b:

                      Job \u7c7b\u578b \u8bf4\u660e \u975e\u5e76\u884c Job \u521b\u5efa\u4e00\u4e2a Pod \u76f4\u81f3\u5176 Job \u6210\u529f\u7ed3\u675f \u5177\u6709\u786e\u5b9a\u5b8c\u6210\u8ba1\u6570\u7684\u5e76\u884c Job \u5f53\u6210\u529f\u7684 Pod \u4e2a\u6570\u8fbe\u5230 .spec.completions \u65f6\uff0cJob \u88ab\u89c6\u4e3a\u5b8c\u6210 \u5e76\u884c Job \u521b\u5efa\u4e00\u4e2a\u6216\u591a\u4e2a Pod \u76f4\u81f3\u6709\u4e00\u4e2a\u6210\u529f\u7ed3\u675f

                      \u53c2\u6570\u8bf4\u660e

                      RestartPolicy \u521b\u5efa\u4e00\u4e2a Pod \u76f4\u81f3\u5176\u6210\u529f\u7ed3\u675f .spec.completions \u8868\u793a Job \u7ed3\u675f\u9700\u8981\u6210\u529f\u8fd0\u884c\u7684 Pod \u4e2a\u6570\uff0c\u9ed8\u8ba4\u4e3a 1 .spec.parallelism \u8868\u793a\u5e76\u884c\u8fd0\u884c\u7684 Pod \u7684\u4e2a\u6570\uff0c\u9ed8\u8ba4\u4e3a 1 spec.backoffLimit \u8868\u793a\u5931\u8d25 Pod \u7684\u91cd\u8bd5\u6700\u5927\u6b21\u6570\uff0c\u8d85\u8fc7\u8fd9\u4e2a\u6b21\u6570\u4e0d\u4f1a\u7ee7\u7eed\u91cd\u8bd5\u3002 .spec.activeDeadlineSeconds \u8868\u793a Pod \u8fd0\u884c\u65f6\u95f4\uff0c\u4e00\u65e6\u8fbe\u5230\u8fd9\u4e2a\u65f6\u95f4\uff0cJob \u5373\u5176\u6240\u6709\u7684 Pod \u90fd\u4f1a\u505c\u6b62\u3002\u4e14activeDeadlineSeconds \u4f18\u5148\u7ea7\u9ad8\u4e8e backoffLimit\uff0c\u5373\u5230\u8fbe activeDeadlineSeconds \u7684 Job \u4f1a\u5ffd\u7565backoffLimit \u7684\u8bbe\u7f6e\u3002

                      \u4ee5\u4e0b\u662f\u4e00\u4e2a Job \u914d\u7f6e\u793a\u4f8b\uff0c\u4fdd\u5b58\u5728 myjob.yaml \u4e2d\uff0c\u5176\u8ba1\u7b97 \u03c0 \u5230 2000 \u4f4d\u5e76\u6253\u5370\u8f93\u51fa\u3002

                      apiVersion: batch/v1\nkind: Job            #\u5f53\u524d\u8d44\u6e90\u7684\u7c7b\u578b\nmetadata:\n  name: myjob\nspec:\n  completions: 50        # Job\u7ed3\u675f\u9700\u8981\u8fd0\u884c50\u4e2aPod\uff0c\u8fd9\u4e2a\u793a\u4f8b\u4e2d\u5c31\u662f\u6253\u5370\u03c0 50\u6b21\n  parallelism: 5        # \u5e76\u884c5\u4e2aPod\n  backoffLimit: 5        # \u6700\u591a\u91cd\u8bd55\u6b21\n  template:\n    spec:\n      containers:\n      - name: pi\n        image: perl\n        command: [\"perl\",  \"-Mbignum=bpi\", \"-wle\", \"print bpi(2000)\"]\n      restartPolicy: Never #\u91cd\u542f\u7b56\u7565\n

                      \u76f8\u5173\u547d\u4ee4

                      kubectl apply -f myjob.yaml  #\u542f\u52a8 job\nkubectl get job #\u67e5\u770b\u8fd9\u4e2ajob\nkubectl logs myjob-1122dswzs \u67e5\u770bJob Pod \u7684\u65e5\u5fd7\n
                      "},{"location":"admin/kpanda/workloads/pod-config/lifecycle.html","title":"\u914d\u7f6e\u5bb9\u5668\u751f\u547d\u5468\u671f","text":"

                      Pod \u9075\u5faa\u4e00\u4e2a\u9884\u5b9a\u4e49\u7684\u751f\u547d\u5468\u671f\uff0c\u8d77\u59cb\u4e8e Pending \u9636\u6bb5\uff0c\u5982\u679c Pod \u5185\u81f3\u5c11\u6709\u4e00\u4e2a\u5bb9\u5668\u6b63\u5e38\u542f\u52a8\uff0c\u5219\u8fdb\u5165 Running \u72b6\u6001\u3002\u5982\u679c Pod \u4e2d\u6709\u5bb9\u5668\u4ee5\u5931\u8d25\u72b6\u6001\u7ed3\u675f\uff0c\u5219\u72b6\u6001\u53d8\u4e3a Failed \u3002\u4ee5\u4e0b phase \u5b57\u6bb5\u503c\u8868\u660e\u4e86\u4e00\u4e2a Pod \u5904\u4e8e\u751f\u547d\u5468\u671f\u7684\u54ea\u4e2a\u9636\u6bb5\u3002

                      \u503c \u63cf\u8ff0 Pending \uff08\u60ac\u51b3\uff09 Pod \u5df2\u88ab\u7cfb\u7edf\u63a5\u53d7\uff0c\u4f46\u6709\u4e00\u4e2a\u6216\u8005\u591a\u4e2a\u5bb9\u5668\u5c1a\u672a\u521b\u5efa\u4ea6\u672a\u8fd0\u884c\u3002\u8fd9\u4e2a\u9636\u6bb5\u5305\u62ec\u7b49\u5f85 Pod \u88ab\u8c03\u5ea6\u7684\u65f6\u95f4\u548c\u901a\u8fc7\u7f51\u7edc\u4e0b\u8f7d\u955c\u50cf\u7684\u65f6\u95f4\u3002 Running \uff08\u8fd0\u884c\u4e2d\uff09 Pod \u5df2\u7ecf\u7ed1\u5b9a\u5230\u4e86\u67d0\u4e2a\u8282\u70b9\uff0cPod \u4e2d\u7684\u6240\u6709\u5bb9\u5668\u90fd\u5df2\u88ab\u521b\u5efa\u3002\u81f3\u5c11\u6709\u4e00\u4e2a\u5bb9\u5668\u4ecd\u5728\u8fd0\u884c\uff0c\u6216\u8005\u6b63\u5904\u4e8e\u542f\u52a8\u6216\u91cd\u542f\u72b6\u6001\u3002 Succeeded \uff08\u6210\u529f\uff09 Pod \u4e2d\u7684\u6240\u6709\u5bb9\u5668\u90fd\u5df2\u6210\u529f\u7ec8\u6b62\uff0c\u5e76\u4e14\u4e0d\u4f1a\u518d\u91cd\u542f\u3002 Failed \uff08\u5931\u8d25\uff09 Pod \u4e2d\u7684\u6240\u6709\u5bb9\u5668\u90fd\u5df2\u7ec8\u6b62\uff0c\u5e76\u4e14\u81f3\u5c11\u6709\u4e00\u4e2a\u5bb9\u5668\u662f\u56e0\u4e3a\u5931\u8d25\u800c\u7ec8\u6b62\u3002\u4e5f\u5c31\u662f\u8bf4\uff0c\u5bb9\u5668\u4ee5\u975e 0 \u72b6\u6001\u9000\u51fa\u6216\u8005\u88ab\u7cfb\u7edf\u7ec8\u6b62\u3002 Unknown \uff08\u672a\u77e5\uff09 \u56e0\u4e3a\u67d0\u4e9b\u539f\u56e0\u65e0\u6cd5\u53d6\u5f97 Pod \u7684\u72b6\u6001\uff0c\u8fd9\u79cd\u60c5\u51b5\u901a\u5e38\u662f\u56e0\u4e3a\u4e0e Pod \u6240\u5728\u4e3b\u673a\u901a\u4fe1\u5931\u8d25\u6240\u81f4\u3002

                      \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u4e2d\u521b\u5efa\u4e00\u4e2a\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u901a\u5e38\u4f7f\u7528\u955c\u50cf\u6765\u6307\u5b9a\u5bb9\u5668\u4e2d\u7684\u8fd0\u884c\u73af\u5883\u3002\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u5728\u6784\u5efa\u955c\u50cf\u65f6\uff0c\u53ef\u4ee5\u901a\u8fc7 Entrypoint \u548c CMD \u4e24\u4e2a\u5b57\u6bb5\u6765\u5b9a\u4e49\u5bb9\u5668\u8fd0\u884c\u65f6\u6267\u884c\u7684\u547d\u4ee4\u548c\u53c2\u6570\u3002\u5982\u679c\u9700\u8981\u66f4\u6539\u5bb9\u5668\u955c\u50cf\u542f\u52a8\u524d\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u7684\u547d\u4ee4\u548c\u53c2\u6570\uff0c\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e\u5bb9\u5668\u7684\u751f\u547d\u5468\u671f\u4e8b\u4ef6\u547d\u4ee4\u548c\u53c2\u6570\uff0c\u6765\u8986\u76d6\u955c\u50cf\u4e2d\u9ed8\u8ba4\u7684\u547d\u4ee4\u548c\u53c2\u6570\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/lifecycle.html#_2","title":"\u751f\u547d\u5468\u671f\u914d\u7f6e","text":"

                      \u6839\u636e\u4e1a\u52a1\u9700\u8981\u5bf9\u5bb9\u5668\u7684\u542f\u52a8\u547d\u4ee4\u3001\u542f\u52a8\u540e\u547d\u4ee4\u3001\u505c\u6b62\u524d\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\u3002

                      \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u542f\u52a8\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5bb9\u5668\u5c06\u6309\u7167\u542f\u52a8\u547d\u4ee4\u8fdb\u884c\u542f\u52a8\u3002 \u542f\u52a8\u540e\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5bb9\u5668\u542f\u52a8\u540e\u51fa\u53d1\u7684\u547d\u4ee4 \u505c\u6b62\u524d\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5bb9\u5668\u5728\u6536\u5230\u505c\u6b62\u547d\u4ee4\u540e\u6267\u884c\u7684\u547d\u4ee4\u3002\u786e\u4fdd\u5347\u7ea7\u6216\u5b9e\u4f8b\u5220\u9664\u65f6\u53ef\u63d0\u524d\u5c06\u5b9e\u4f8b\u4e2d\u8fd0\u884c\u7684\u4e1a\u52a1\u6392\u6c34\u3002 --"},{"location":"admin/kpanda/workloads/pod-config/lifecycle.html#_3","title":"\u542f\u52a8\u547d\u4ee4","text":"

                      \u6839\u636e\u4e0b\u8868\u5bf9\u542f\u52a8\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\u3002

                      \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8fd0\u884c\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u53ef\u6267\u884c\u7684\u547d\u4ee4\uff0c\u591a\u4e2a\u547d\u4ee4\u4e4b\u95f4\u7528\u7a7a\u683c\u8fdb\u884c\u5206\u5272\uff0c\u5982\u547d\u4ee4\u672c\u8eab\u5e26\u7a7a\u683c\uff0c\u5219\u9700\u8981\u52a0\uff08\u201c\u201d\uff09\u3002\u3010\u542b\u4e49\u3011\u591a\u547d\u4ee4\u65f6\uff0c\u8fd0\u884c\u547d\u4ee4\u5efa\u8bae\u7528/bin/sh\u6216\u5176\u4ed6\u7684shell\uff0c\u5176\u4ed6\u5168\u90e8\u547d\u4ee4\u4f5c\u4e3a\u53c2\u6570\u6765\u4f20\u5165\u3002 /run/server \u8fd0\u884c\u53c2\u6570 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u63a7\u5236\u5bb9\u5668\u8fd0\u884c\u547d\u4ee4\u53c2\u6570\u3002 port=8080"},{"location":"admin/kpanda/workloads/pod-config/lifecycle.html#_4","title":"\u542f\u52a8\u540e\u547d\u4ee4","text":"

                      \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u547d\u4ee4\u884c\u811a\u672c\u548c HTTP \u8bf7\u6c42\u4e24\u79cd\u5904\u7406\u7c7b\u578b\u5bf9\u542f\u52a8\u540e\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\u3002\u60a8\u53ef\u4ee5\u6839\u636e\u4e0b\u8868\u9009\u62e9\u9002\u5408\u60a8\u7684\u914d\u7f6e\u65b9\u5f0f\u3002

                      \u547d\u4ee4\u884c\u811a\u672c\u914d\u7f6e

                      \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8fd0\u884c\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u53ef\u6267\u884c\u7684\u547d\u4ee4\uff0c\u591a\u4e2a\u547d\u4ee4\u4e4b\u95f4\u7528\u7a7a\u683c\u8fdb\u884c\u5206\u5272\uff0c\u5982\u547d\u4ee4\u672c\u8eab\u5e26\u7a7a\u683c\uff0c\u5219\u9700\u8981\u52a0\uff08\u201c\u201d\uff09\u3002\u3010\u542b\u4e49\u3011\u591a\u547d\u4ee4\u65f6\uff0c\u8fd0\u884c\u547d\u4ee4\u5efa\u8bae\u7528/bin/sh\u6216\u5176\u4ed6\u7684shell\uff0c\u5176\u4ed6\u5168\u90e8\u547d\u4ee4\u4f5c\u4e3a\u53c2\u6570\u6765\u4f20\u5165\u3002 /run/server \u8fd0\u884c\u53c2\u6570 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u63a7\u5236\u5bb9\u5668\u8fd0\u884c\u547d\u4ee4\u53c2\u6570\u3002 port=8080"},{"location":"admin/kpanda/workloads/pod-config/lifecycle.html#_5","title":"\u505c\u6b62\u524d\u547d\u4ee4","text":"

                      \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u547d\u4ee4\u884c\u811a\u672c\u548c HTTP \u8bf7\u6c42\u4e24\u79cd\u5904\u7406\u7c7b\u578b\u5bf9\u505c\u6b62\u524d\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\u3002\u60a8\u53ef\u4ee5\u6839\u636e\u4e0b\u8868\u9009\u62e9\u9002\u5408\u60a8\u7684\u914d\u7f6e\u65b9\u5f0f\u3002

                      HTTP \u8bf7\u6c42\u914d\u7f6e

                      \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c URL \u8def\u5f84 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8bf7\u6c42\u7684URL\u8def\u5f84\u3002\u3010\u542b\u4e49\u3011\u591a\u547d\u4ee4\u65f6\uff0c\u8fd0\u884c\u547d\u4ee4\u5efa\u8bae\u7528/bin/sh\u6216\u5176\u4ed6\u7684shell\uff0c\u5176\u4ed6\u5168\u90e8\u547d\u4ee4\u4f5c\u4e3a\u53c2\u6570\u6765\u4f20\u5165\u3002 /run/server \u7aef\u53e3 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8bf7\u6c42\u7684\u7aef\u53e3\u3002 port=8080 \u8282\u70b9\u5730\u5740 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8bf7\u6c42\u7684 IP \u5730\u5740\uff0c\u9ed8\u8ba4\u662f\u5bb9\u5668\u6240\u5728\u7684\u8282\u70b9 IP\u3002 --"},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html","title":"\u8c03\u5ea6\u7b56\u7565","text":"

                      \u5728 Kubernetes \u96c6\u7fa4\u4e2d\uff0c\u8282\u70b9\u4e5f\u6709\u6807\u7b7e\u3002\u60a8\u53ef\u4ee5\u624b\u52a8\u6dfb\u52a0\u6807\u7b7e\u3002 Kubernetes \u4e5f\u4f1a\u4e3a\u96c6\u7fa4\u4e2d\u6240\u6709\u8282\u70b9\u6dfb\u52a0\u4e00\u4e9b\u6807\u51c6\u7684\u6807\u7b7e\u3002\u53c2\u89c1\u5e38\u7528\u7684\u6807\u7b7e\u3001\u6ce8\u89e3\u548c\u6c61\u70b9\u4ee5\u4e86\u89e3\u5e38\u89c1\u7684\u8282\u70b9\u6807\u7b7e\u3002\u901a\u8fc7\u4e3a\u8282\u70b9\u6dfb\u52a0\u6807\u7b7e\uff0c\u60a8\u53ef\u4ee5\u8ba9 Pod \u8c03\u5ea6\u5230\u7279\u5b9a\u8282\u70b9\u6216\u8282\u70b9\u7ec4\u4e0a\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u8fd9\u4e2a\u529f\u80fd\u6765\u786e\u4fdd\u7279\u5b9a\u7684 Pod \u53ea\u80fd\u8fd0\u884c\u5728\u5177\u6709\u4e00\u5b9a\u9694\u79bb\u6027\uff0c\u5b89\u5168\u6027\u6216\u76d1\u7ba1\u5c5e\u6027\u7684\u8282\u70b9\u4e0a\u3002

                      nodeSelector \u662f\u8282\u70b9\u9009\u62e9\u7ea6\u675f\u7684\u6700\u7b80\u5355\u63a8\u8350\u5f62\u5f0f\u3002\u60a8\u53ef\u4ee5\u5c06 nodeSelector \u5b57\u6bb5\u6dfb\u52a0\u5230 Pod \u7684\u89c4\u7ea6\u4e2d\u8bbe\u7f6e\u60a8\u5e0c\u671b\u76ee\u6807\u8282\u70b9\u6240\u5177\u6709\u7684\u8282\u70b9\u6807\u7b7e\u3002Kubernetes \u53ea\u4f1a\u5c06 Pod \u8c03\u5ea6\u5230\u62e5\u6709\u6307\u5b9a\u6bcf\u4e2a\u6807\u7b7e\u7684\u8282\u70b9\u4e0a\u3002 nodeSelector \u63d0\u4f9b\u4e86\u4e00\u79cd\u6700\u7b80\u5355\u7684\u65b9\u6cd5\u6765\u5c06 Pod \u7ea6\u675f\u5230\u5177\u6709\u7279\u5b9a\u6807\u7b7e\u7684\u8282\u70b9\u4e0a\u3002\u4eb2\u548c\u6027\u548c\u53cd\u4eb2\u548c\u6027\u6269\u5c55\u4e86\u60a8\u53ef\u4ee5\u5b9a\u4e49\u7684\u7ea6\u675f\u7c7b\u578b\u3002\u4f7f\u7528\u4eb2\u548c\u6027\u4e0e\u53cd\u4eb2\u548c\u6027\u7684\u4e00\u4e9b\u597d\u5904\u6709\uff1a

                      • \u4eb2\u548c\u6027\u3001\u53cd\u4eb2\u548c\u6027\u8bed\u8a00\u7684\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002 nodeSelector \u53ea\u80fd\u9009\u62e9\u62e5\u6709\u6240\u6709\u6307\u5b9a\u6807\u7b7e\u7684\u8282\u70b9\u3002\u4eb2\u548c\u6027\u3001\u53cd\u4eb2\u548c\u6027\u4e3a\u60a8\u63d0\u4f9b\u5bf9\u9009\u62e9\u903b\u8f91\u7684\u66f4\u5f3a\u63a7\u5236\u80fd\u529b\u3002

                      • \u60a8\u53ef\u4ee5\u6807\u660e\u67d0\u89c4\u5219\u662f\u201c\u8f6f\u9700\u6c42\u201d\u6216\u8005\u201c\u504f\u597d\u201d\uff0c\u8fd9\u6837\u8c03\u5ea6\u5668\u5728\u65e0\u6cd5\u627e\u5230\u5339\u914d\u8282\u70b9\u65f6\uff0c\u4f1a\u5ffd\u7565\u4eb2\u548c\u6027/\u53cd\u4eb2\u548c\u6027\u89c4\u5219\uff0c\u786e\u4fdd Pod \u8c03\u5ea6\u6210\u529f\u3002

                      • \u60a8\u53ef\u4ee5\u4f7f\u7528\u8282\u70b9\u4e0a\uff08\u6216\u5176\u4ed6\u62d3\u6251\u57df\u4e2d\uff09\u8fd0\u884c\u7684\u5176\u4ed6 Pod \u7684\u6807\u7b7e\u6765\u5b9e\u65bd\u8c03\u5ea6\u7ea6\u675f\uff0c\u800c\u4e0d\u662f\u53ea\u80fd\u4f7f\u7528\u8282\u70b9\u672c\u8eab\u7684\u6807\u7b7e\u3002\u8fd9\u4e2a\u80fd\u529b\u8ba9\u60a8\u80fd\u591f\u5b9a\u4e49\u89c4\u5219\u5141\u8bb8\u54ea\u4e9b Pod \u53ef\u4ee5\u88ab\u653e\u7f6e\u5728\u4e00\u8d77\u3002

                      \u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e\u4eb2\u548c\uff08affinity\uff09\u4e0e\u53cd\u4eb2\u548c\uff08anti-affinity\uff09\u6765\u9009\u62e9 Pod \u8981\u90e8\u7f72\u7684\u8282\u70b9\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_2","title":"\u5bb9\u5fcd\u65f6\u95f4","text":"

                      \u5f53\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u4f8b\u6240\u5728\u7684\u8282\u70b9\u4e0d\u53ef\u7528\u65f6\uff0c\u7cfb\u7edf\u5c06\u5b9e\u4f8b\u91cd\u65b0\u8c03\u5ea6\u5230\u5176\u5b83\u53ef\u7528\u8282\u70b9\u7684\u65f6\u95f4\u7a97\u3002\u9ed8\u8ba4\u4e3a 300 \u79d2\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#nodeaffinity","title":"\u8282\u70b9\u4eb2\u548c\u6027\uff08nodeAffinity\uff09","text":"

                      \u8282\u70b9\u4eb2\u548c\u6027\u6982\u5ff5\u4e0a\u7c7b\u4f3c\u4e8e nodeSelector \uff0c \u5b83\u4f7f\u60a8\u53ef\u4ee5\u6839\u636e\u8282\u70b9\u4e0a\u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002 \u8282\u70b9\u4eb2\u548c\u6027\u6709\u4e24\u79cd\uff1a

                      • \u5fc5\u987b\u6ee1\u8db3\uff1a\uff08 requiredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u53ea\u6709\u5728\u89c4\u5219\u88ab\u6ee1\u8db3\u7684\u65f6\u5019\u624d\u80fd\u6267\u884c\u8c03\u5ea6\u3002\u6b64\u529f\u80fd\u7c7b\u4f3c\u4e8e nodeSelector \uff0c \u4f46\u5176\u8bed\u6cd5\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002\u60a8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002

                      • \u5c3d\u91cf\u6ee1\u8db3\uff1a\uff08 preferredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u4f1a\u5c1d\u8bd5\u5bfb\u627e\u6ee1\u8db3\u5bf9\u5e94\u89c4\u5219\u7684\u8282\u70b9\u3002\u5982\u679c\u627e\u4e0d\u5230\u5339\u914d\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u5668\u4ecd\u7136\u4f1a\u8c03\u5ea6\u8be5 Pod\u3002\u60a8\u8fd8\u53ef\u4e3a\u8f6f\u7ea6\u675f\u89c4\u5219\u8bbe\u5b9a\u6743\u91cd\uff0c\u5177\u4f53\u8c03\u5ea6\u65f6\uff0c\u82e5\u5b58\u5728\u591a\u4e2a\u7b26\u5408\u6761\u4ef6\u7684\u8282\u70b9\uff0c\u6743\u91cd\u6700\u5927\u7684\u8282\u70b9\u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002\u540c\u65f6\u60a8\u8fd8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_3","title":"\u6807\u7b7e\u540d","text":"

                      \u5bf9\u5e94\u8282\u70b9\u7684\u6807\u7b7e\uff0c\u53ef\u4ee5\u4f7f\u7528\u9ed8\u8ba4\u7684\u6807\u7b7e\u4e5f\u53ef\u4ee5\u7528\u6237\u81ea\u5b9a\u4e49\u6807\u7b7e\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_4","title":"\u64cd\u4f5c\u7b26","text":"
                      • In\uff1a\u6807\u7b7e\u503c\u9700\u8981\u5728 values \u7684\u5217\u8868\u4e2d
                      • NotIn\uff1a\u6807\u7b7e\u7684\u503c\u4e0d\u5728\u67d0\u4e2a\u5217\u8868\u4e2d
                      • Exists\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                      • DoesNotExist\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u4e0d\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                      • Gt\uff1a\u6807\u7b7e\u7684\u503c\u5927\u4e8e\u67d0\u4e2a\u503c\uff08\u5b57\u7b26\u4e32\u6bd4\u8f83\uff09
                      • Lt\uff1a\u6807\u7b7e\u7684\u503c\u5c0f\u4e8e\u67d0\u4e2a\u503c\uff08\u5b57\u7b26\u4e32\u6bd4\u8f83\uff09
                      "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_5","title":"\u6743\u91cd","text":"

                      \u4ec5\u652f\u6301\u5728\u201c\u5c3d\u91cf\u6ee1\u8db3\u201d\u7b56\u7565\u4e2d\u6dfb\u52a0\uff0c\u53ef\u4ee5\u7406\u89e3\u4e3a\u8c03\u5ea6\u7684\u4f18\u5148\u7ea7\uff0c\u6743\u91cd\u5927\u7684\u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002\u53d6\u503c\u8303\u56f4\u662f 1 \u5230 100\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_6","title":"\u5de5\u4f5c\u8d1f\u8f7d\u4eb2\u548c\u6027","text":"

                      \u4e0e\u8282\u70b9\u4eb2\u548c\u6027\u7c7b\u4f3c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u7684\u4eb2\u548c\u6027\u4e5f\u6709\u4e24\u79cd\u7c7b\u578b\uff1a

                      • \u5fc5\u987b\u6ee1\u8db3\uff1a\uff08 requiredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u53ea\u6709\u5728\u89c4\u5219\u88ab\u6ee1\u8db3\u7684\u65f6\u5019\u624d\u80fd\u6267\u884c\u8c03\u5ea6\u3002\u6b64\u529f\u80fd\u7c7b\u4f3c\u4e8e nodeSelector \uff0c \u4f46\u5176\u8bed\u6cd5\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002\u60a8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002
                      • \u5c3d\u91cf\u6ee1\u8db3\uff1a\uff08 preferredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u4f1a\u5c1d\u8bd5\u5bfb\u627e\u6ee1\u8db3\u5bf9\u5e94\u89c4\u5219\u7684\u8282\u70b9\u3002\u5982\u679c\u627e\u4e0d\u5230\u5339\u914d\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u5668\u4ecd\u7136\u4f1a\u8c03\u5ea6\u8be5 Pod\u3002\u60a8\u8fd8\u53ef\u4e3a\u8f6f\u7ea6\u675f\u89c4\u5219\u8bbe\u5b9a\u6743\u91cd\uff0c\u5177\u4f53\u8c03\u5ea6\u65f6\uff0c\u82e5\u5b58\u5728\u591a\u4e2a\u7b26\u5408\u6761\u4ef6\u7684\u8282\u70b9\uff0c\u6743\u91cd\u6700\u5927\u7684\u8282\u70b9\u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002\u540c\u65f6\u60a8\u8fd8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002

                      \u5de5\u4f5c\u8d1f\u8f7d\u7684\u4eb2\u548c\u6027\u4e3b\u8981\u7528\u6765\u51b3\u5b9a\u5de5\u4f5c\u8d1f\u8f7d\u7684 Pod \u53ef\u4ee5\u548c\u54ea\u4e9b Pod\u90e8 \u7f72\u5728\u540c\u4e00\u62d3\u6251\u57df\u3002\u4f8b\u5982\uff0c\u5bf9\u4e8e\u76f8\u4e92\u901a\u4fe1\u7684\u670d\u52a1\uff0c\u53ef\u901a\u8fc7\u5e94\u7528\u4eb2\u548c\u6027\u8c03\u5ea6\uff0c\u5c06\u5176\u90e8\u7f72\u5230\u540c\u4e00\u62d3\u6251\u57df\uff08\u5982\u540c\u4e00\u53ef\u7528\u533a\uff09\u4e2d\uff0c\u51cf\u5c11\u5b83\u4eec\u4e4b\u95f4\u7684\u7f51\u7edc\u5ef6\u8fdf\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_7","title":"\u6807\u7b7e\u540d","text":"

                      \u5bf9\u5e94\u8282\u70b9\u7684\u6807\u7b7e\uff0c\u53ef\u4ee5\u4f7f\u7528\u9ed8\u8ba4\u7684\u6807\u7b7e\u4e5f\u53ef\u4ee5\u7528\u6237\u81ea\u5b9a\u4e49\u6807\u7b7e\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_8","title":"\u547d\u540d\u7a7a\u95f4","text":"

                      \u6307\u5b9a\u8c03\u5ea6\u7b56\u7565\u751f\u6548\u7684\u547d\u540d\u7a7a\u95f4\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_9","title":"\u64cd\u4f5c\u7b26","text":"
                      • In\uff1a\u6807\u7b7e\u503c\u9700\u8981\u5728 values \u7684\u5217\u8868\u4e2d
                      • NotIn\uff1a\u6807\u7b7e\u7684\u503c\u4e0d\u5728\u67d0\u4e2a\u5217\u8868\u4e2d
                      • Exists\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                      • DoesNotExist\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u4e0d\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                      "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_10","title":"\u62d3\u6251\u57df","text":"

                      \u6307\u5b9a\u8c03\u5ea6\u65f6\u7684\u5f71\u54cd\u8303\u56f4\u3002\u4f8b\u5982\uff0c\u5982\u679c\u6307\u5b9a\u4e3a kubernetes.io/Clustername \u8868\u793a\u4ee5 Node \u8282\u70b9\u4e3a\u533a\u5206\u8303\u56f4\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_11","title":"\u5de5\u4f5c\u8d1f\u8f7d\u53cd\u4eb2\u548c\u6027","text":"

                      \u4e0e\u8282\u70b9\u4eb2\u548c\u6027\u7c7b\u4f3c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u7684\u53cd\u4eb2\u548c\u6027\u4e5f\u6709\u4e24\u79cd\u7c7b\u578b\uff1a

                      • \u5fc5\u987b\u6ee1\u8db3\uff1a\uff08 requiredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u53ea\u6709\u5728\u89c4\u5219\u88ab\u6ee1\u8db3\u7684\u65f6\u5019\u624d\u80fd\u6267\u884c\u8c03\u5ea6\u3002\u6b64\u529f\u80fd\u7c7b\u4f3c\u4e8e nodeSelector \uff0c \u4f46\u5176\u8bed\u6cd5\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002\u60a8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002
                      • \u5c3d\u91cf\u6ee1\u8db3\uff1a\uff08 preferredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u4f1a\u5c1d\u8bd5\u5bfb\u627e\u6ee1\u8db3\u5bf9\u5e94\u89c4\u5219\u7684\u8282\u70b9\u3002\u5982\u679c\u627e\u4e0d\u5230\u5339\u914d\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u5668\u4ecd\u7136\u4f1a\u8c03\u5ea6\u8be5 Pod\u3002\u60a8\u8fd8\u53ef\u4e3a\u8f6f\u7ea6\u675f\u89c4\u5219\u8bbe\u5b9a\u6743\u91cd\uff0c\u5177\u4f53\u8c03\u5ea6\u65f6\uff0c\u82e5\u5b58\u5728\u591a\u4e2a\u7b26\u5408\u6761\u4ef6\u7684\u8282\u70b9\uff0c\u6743\u91cd\u6700\u5927\u7684\u8282\u70b9\u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002\u540c\u65f6\u60a8\u8fd8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002

                      \u5de5\u4f5c\u8d1f\u8f7d\u7684\u53cd\u4eb2\u548c\u6027\u4e3b\u8981\u7528\u6765\u51b3\u5b9a\u5de5\u4f5c\u8d1f\u8f7d\u7684 Pod \u4e0d\u53ef\u4ee5\u548c\u54ea\u4e9b Pod \u90e8\u7f72\u5728\u540c\u4e00\u62d3\u6251\u57df\u3002\u4f8b\u5982\uff0c\u5c06\u4e00\u4e2a\u8d1f\u8f7d\u7684\u76f8\u540c Pod \u5206\u6563\u90e8\u7f72\u5230\u4e0d\u540c\u7684\u62d3\u6251\u57df\uff08\u4f8b\u5982\u4e0d\u540c\u4e3b\u673a\uff09\u4e2d\uff0c\u63d0\u9ad8\u8d1f\u8f7d\u672c\u8eab\u7684\u7a33\u5b9a\u6027\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_12","title":"\u6807\u7b7e\u540d","text":"

                      \u5bf9\u5e94\u8282\u70b9\u7684\u6807\u7b7e\uff0c\u53ef\u4ee5\u4f7f\u7528\u9ed8\u8ba4\u7684\u6807\u7b7e\u4e5f\u53ef\u4ee5\u7528\u6237\u81ea\u5b9a\u4e49\u6807\u7b7e\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_13","title":"\u547d\u540d\u7a7a\u95f4","text":"

                      \u6307\u5b9a\u8c03\u5ea6\u7b56\u7565\u751f\u6548\u7684\u547d\u540d\u7a7a\u95f4\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_14","title":"\u64cd\u4f5c\u7b26","text":"
                      • In\uff1a\u6807\u7b7e\u503c\u9700\u8981\u5728 values \u7684\u5217\u8868\u4e2d
                      • NotIn\uff1a\u6807\u7b7e\u7684\u503c\u4e0d\u5728\u67d0\u4e2a\u5217\u8868\u4e2d
                      • Exists\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                      • DoesNotExist\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u4e0d\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                      "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_15","title":"\u62d3\u6251\u57df","text":"

                      \u6307\u5b9a\u8c03\u5ea6\u65f6\u7684\u5f71\u54cd\u8303\u56f4\u3002\u4f8b\u5982\uff0c\u5982\u679c\u6307\u5b9a\u4e3a kubernetes.io/Clustername \u8868\u793a\u4ee5 Node \u8282\u70b9\u4e3a\u533a\u5206\u8303\u56f4\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/workload-status.html","title":"\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001","text":"

                      \u5de5\u4f5c\u8d1f\u8f7d\u662f\u8fd0\u884c\u5728 Kubernetes \u4e0a\u7684\u4e00\u4e2a\u5e94\u7528\u7a0b\u5e8f\uff0c\u5728 Kubernetes \u4e2d\uff0c\u65e0\u8bba\u60a8\u7684\u5e94\u7528\u7a0b\u5e8f\u662f\u7531\u5355\u4e2a\u540c\u4e00\u7ec4\u4ef6\u6216\u662f\u7531\u591a\u4e2a\u4e0d\u540c\u7684\u7ec4\u4ef6\u6784\u6210\uff0c\u90fd\u53ef\u4ee5\u4f7f\u7528\u4e00\u7ec4 Pod \u6765\u8fd0\u884c\u5b83\u3002Kubernetes \u63d0\u4f9b\u4e86\u4e94\u79cd\u5185\u7f6e\u7684\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u6765\u7ba1\u7406 Pod\uff1a

                      • \u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d
                      • \u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d
                      • \u5b88\u62a4\u8fdb\u7a0b
                      • \u4efb\u52a1
                      • \u5b9a\u65f6\u4efb\u52a1

                      \u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e\u81ea\u5b9a\u4e49\u8d44\u6e90 CRD \u6765\u5b9e\u73b0\u5bf9\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u7684\u6269\u5c55\u3002\u5728\u7b2c\u4e94\u4ee3\u5bb9\u5668\u7ba1\u7406\u4e2d\uff0c\u652f\u6301\u5bf9\u5de5\u4f5c\u8d1f\u8f7d\u8fdb\u884c\u521b\u5efa\u3001\u66f4\u65b0\u3001\u6269\u5bb9\u3001\u76d1\u63a7\u3001\u65e5\u5fd7\u3001\u5220\u9664\u3001\u7248\u672c\u7ba1\u7406\u7b49\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/workload-status.html#pod","title":"Pod \u72b6\u6001","text":"

                      Pod \u662f Kuberneters \u4e2d\u521b\u5efa\u548c\u7ba1\u7406\u7684\u3001\u6700\u5c0f\u7684\u8ba1\u7b97\u5355\u5143\uff0c\u5373\u4e00\u7ec4\u5bb9\u5668\u7684\u96c6\u5408\u3002\u8fd9\u4e9b\u5bb9\u5668\u5171\u4eab\u5b58\u50a8\u3001\u7f51\u7edc\u4ee5\u53ca\u7ba1\u7406\u63a7\u5236\u5bb9\u5668\u8fd0\u884c\u65b9\u5f0f\u7684\u7b56\u7565\u3002 Pod \u901a\u5e38\u4e0d\u7531\u7528\u6237\u76f4\u63a5\u521b\u5efa\uff0c\u800c\u662f\u901a\u8fc7\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u6765\u521b\u5efa\u3002 Pod \u9075\u5faa\u4e00\u4e2a\u9884\u5b9a\u4e49\u7684\u751f\u547d\u5468\u671f\uff0c\u8d77\u59cb\u4e8e Pending \u9636\u6bb5\uff0c\u5982\u679c\u81f3\u5c11\u5176\u4e2d\u6709\u4e00\u4e2a\u4e3b\u8981\u5bb9\u5668\u6b63\u5e38\u542f\u52a8\uff0c\u5219\u8fdb\u5165 Running \uff0c\u4e4b\u540e\u53d6\u51b3\u4e8e Pod \u4e2d\u662f\u5426\u6709\u5bb9\u5668\u4ee5\u5931\u8d25\u72b6\u6001\u7ed3\u675f\u800c\u8fdb\u5165 Succeeded \u6216\u8005 Failed \u9636\u6bb5\u3002

                      "},{"location":"admin/kpanda/workloads/pod-config/workload-status.html#_2","title":"\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001","text":"

                      \u7b2c\u4e94\u4ee3\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4f9d\u636e Pod \u7684\u72b6\u6001\u3001\u526f\u672c\u6570\u7b49\u56e0\u7d20\uff0c\u8bbe\u8ba1\u4e86\u4e00\u79cd\u5185\u7f6e\u7684\u5de5\u4f5c\u8d1f\u8f7d\u751f\u547d\u5468\u671f\u7684\u72b6\u6001\u96c6\uff0c\u4ee5\u8ba9\u7528\u6237\u80fd\u591f\u66f4\u52a0\u771f\u5b9e\u7684\u611f\u77e5\u5de5\u4f5c\u8d1f\u8f7d\u8fd0\u884c\u60c5\u51b5\u3002 \u7531\u4e8e\u4e0d\u540c\u7684\u5de5\u4f5c\u8d1f\u8f7d\u7c7b\u578b\uff08\u6bd4\u5982\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u548c\u4efb\u52a1\uff09\u5bf9 Pod \u7684\u7ba1\u7406\u673a\u5236\u4e0d\u4e00\u81f4\uff0c\u56e0\u6b64\uff0c\u4e0d\u540c\u7684\u5de5\u4f5c\u8d1f\u8f7d\u5728\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u4f1a\u5448\u73b0\u4e0d\u540c\u7684\u751f\u547d\u5468\u671f\u72b6\u6001\uff0c\u5177\u4f53\u5982\u4e0b\u8868\uff1a

                      "},{"location":"admin/kpanda/workloads/pod-config/workload-status.html#_3","title":"\u65e0\u72b6\u6001\u8d1f\u8f7d\u3001\u6709\u72b6\u6001\u8d1f\u8f7d\u3001\u5b88\u62a4\u8fdb\u7a0b\u72b6\u6001","text":"\u72b6\u6001 \u63cf\u8ff0 \u7b49\u5f85\u4e2d 1. \u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u6b63\u5728\u8fdb\u884c\u4e2d\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u30022. \u89e6\u53d1\u5347\u7ea7\u6216\u8005\u56de\u6eda\u52a8\u4f5c\u540e\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u30023. \u89e6\u53d1\u6682\u505c/\u6269\u7f29\u5bb9\u7b49\u64cd\u4f5c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u5728\u6b64\u72b6\u6001\u3002 \u8fd0\u884c\u4e2d \u8d1f\u8f7d\u4e0b\u7684\u6240\u6709\u5b9e\u4f8b\u90fd\u5728\u8fd0\u884c\u4e2d\u4e14\u526f\u672c\u6570\u4e0e\u7528\u6237\u9884\u5b9a\u4e49\u7684\u6570\u91cf\u4e00\u81f4\u65f6\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5220\u9664\u4e2d \u6267\u884c\u5220\u9664\u64cd\u4f5c\u65f6\uff0c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\uff0c\u76f4\u5230\u5220\u9664\u5b8c\u6210\u3002 \u5f02\u5e38 \u56e0\u4e3a\u67d0\u4e9b\u539f\u56e0\u65e0\u6cd5\u53d6\u5f97\u5de5\u4f5c\u8d1f\u8f7d\u7684\u72b6\u6001\u3002\u8fd9\u79cd\u60c5\u51b5\u901a\u5e38\u662f\u56e0\u4e3a\u4e0e Pod \u6240\u5728\u4e3b\u673a\u901a\u4fe1\u5931\u8d25\u3002 \u672a\u5c31\u7eea \u5bb9\u5668\u5904\u4e8e\u5f02\u5e38\uff0cpending \u72b6\u6001\u65f6\uff0c\u56e0\u672a\u77e5\u9519\u8bef\u5bfc\u81f4\u8d1f\u8f7d\u65e0\u6cd5\u542f\u52a8\u65f6\u663e\u793a\u6b64\u72b6\u6001"},{"location":"admin/kpanda/workloads/pod-config/workload-status.html#_4","title":"\u4efb\u52a1\u72b6\u6001","text":"\u72b6\u6001 \u63cf\u8ff0 \u7b49\u5f85\u4e2d \u4efb\u52a1\u521b\u5efa\u6b63\u5728\u8fdb\u884c\u4e2d\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u6267\u884c\u4e2d \u4efb\u52a1\u6b63\u5728\u6267\u884c\u4e2d\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u6267\u884c\u5b8c\u6210 \u4efb\u52a1\u6267\u884c\u5b8c\u6210\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5220\u9664\u4e2d \u89e6\u53d1\u5220\u9664\u64cd\u4f5c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u5728\u6b64\u72b6\u6001\u3002 \u5f02\u5e38 \u56e0\u4e3a\u67d0\u4e9b\u539f\u56e0\u65e0\u6cd5\u53d6\u5f97 Pod \u7684\u72b6\u6001\u3002\u8fd9\u79cd\u60c5\u51b5\u901a\u5e38\u662f\u56e0\u4e3a\u4e0e Pod \u6240\u5728\u4e3b\u673a\u901a\u4fe1\u5931\u8d25\u3002"},{"location":"admin/kpanda/workloads/pod-config/workload-status.html#_5","title":"\u5b9a\u65f6\u4efb\u52a1\u72b6\u6001","text":"\u72b6\u6001 \u63cf\u8ff0 \u7b49\u5f85\u4e2d \u5b9a\u65f6\u4efb\u52a1\u521b\u5efa\u6b63\u5728\u8fdb\u884c\u4e2d\uff0c\u5b9a\u65f6\u4efb\u52a1\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5df2\u542f\u52a8 \u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\u6210\u529f\u540e\uff0c\u6b63\u5e38\u8fd0\u884c\u6216\u5c06\u5df2\u6682\u505c\u7684\u4efb\u52a1\u542f\u52a8\u65f6\u5b9a\u65f6\u4efb\u52a1\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5df2\u505c\u6b62 \u6267\u884c\u505c\u6b62\u4efb\u52a1\u64cd\u4f5c\u65f6\uff0c\u5b9a\u65f6\u4efb\u52a1\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5220\u9664\u4e2d \u89e6\u53d1\u5220\u9664\u64cd\u4f5c\uff0c\u5b9a\u65f6\u4efb\u52a1\u5904\u5728\u6b64\u72b6\u6001\u3002

                      \u5f53\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u5f02\u5e38\u6216\u672a\u5c31\u7eea\u72b6\u6001\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u5c06\u9f20\u6807\u79fb\u52a8\u5230\u8d1f\u8f7d\u7684\u72b6\u6001\u503c\u4e0a\uff0c\u7cfb\u7edf\u5c06\u901a\u8fc7\u63d0\u793a\u6846\u5c55\u793a\u66f4\u52a0\u8be6\u7ec6\u7684\u9519\u8bef\u4fe1\u606f\u3002\u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u67e5\u770b\u65e5\u5fd7\u6216\u4e8b\u4ef6\u6765\u83b7\u53d6\u5de5\u4f5c\u8d1f\u8f7d\u7684\u76f8\u5173\u8fd0\u884c\u4fe1\u606f\u3002

                      "},{"location":"admin/register/index.html","title":"\u7528\u6237\u6ce8\u518c","text":"

                      \u65b0\u7528\u6237\u9996\u6b21\u4f7f\u7528 AI \u7b97\u529b\u5e73\u53f0\u9700\u8981\u8fdb\u884c\u6ce8\u518c\u3002

                      "},{"location":"admin/register/index.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                      • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                      • \u5df2\u5f00\u542f\u90ae\u7bb1\u6ce8\u518c\u529f\u80fd
                      • \u6709\u4e00\u4e2a\u53ef\u7528\u7684\u90ae\u7bb1
                      "},{"location":"admin/register/index.html#_3","title":"\u90ae\u7bb1\u6ce8\u518c\u6b65\u9aa4","text":"
                      1. \u6253\u5f00 AI \u7b97\u529b\u5e73\u53f0\u9996\u9875 https://ai.isuanova.com/\uff0c\u70b9\u51fb \u6ce8\u518c

                      2. \u952e\u5165\u7528\u6237\u540d\u3001\u5bc6\u7801\u3001\u90ae\u7bb1\u540e\u70b9\u51fb \u6ce8\u518c

                      3. \u7cfb\u7edf\u63d0\u793a\u53d1\u9001\u4e86\u4e00\u5c01\u90ae\u4ef6\u5230\u60a8\u7684\u90ae\u7bb1\u3002

                      4. \u767b\u5f55\u81ea\u5df1\u7684\u90ae\u7bb1\uff0c\u627e\u5230\u90ae\u4ef6\uff0c\u70b9\u51fb\u94fe\u63a5\u3002

                      5. \u606d\u559c\uff0c\u60a8\u6210\u529f\u8fdb\u5165\u4e86 AI \u7b97\u529b\u5e73\u53f0\uff0c\u73b0\u5728\u53ef\u4ee5\u5f00\u59cb\u60a8\u7684 AI \u4e4b\u65c5\u4e86\u3002

                      \u4e0b\u4e00\u6b65\uff1a\u4e3a\u7528\u6237\u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4

                      "},{"location":"admin/register/bindws.html","title":"\u4e3a\u7528\u6237\u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4","text":"

                      \u7528\u6237\u6210\u529f\u6ce8\u518c\u4e4b\u540e\uff0c\u9700\u8981\u4e3a\u5176\u7ed1\u5b9a\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u3002

                      "},{"location":"admin/register/bindws.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                      • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                      • \u7528\u6237\u5df2\u6210\u529f\u6ce8\u518c
                      • \u6709\u4e00\u4e2a\u53ef\u7528\u7684\u7ba1\u7406\u5458\u8d26\u53f7
                      "},{"location":"admin/register/bindws.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u4ee5\u7ba1\u7406\u5458\u8eab\u4efd\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                      2. \u5bfc\u822a\u5207\u6362\u81f3 \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \uff0c\u70b9\u51fb \u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4

                      3. \u8f93\u5165\u540d\u79f0\uff0c\u9009\u62e9\u6587\u4ef6\u5939\u540e\u70b9\u51fb \u786e\u5b9a \uff0c\u521b\u5efa\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4

                      4. \u7ed9\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u8d44\u6e90

                        \u53ef\u4ee5\u5728\u8fd9\u4e2a\u754c\u9762\u4e0a\u70b9\u51fb \u521b\u5efa\u96c6\u7fa4-\u547d\u540d\u7a7a\u95f4 \u6765\u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u3002

                      5. \u6dfb\u52a0\u6388\u6743\uff1a\u5c06\u7528\u6237\u5206\u914d\u81f3\u5de5\u4f5c\u7a7a\u95f4

                      6. \u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u67e5\u770b\u662f\u5426\u5177\u6709\u5de5\u4f5c\u7a7a\u95f4\u53ca\u547d\u540d\u7a7a\u95f4\u7684\u6743\u9650\u3002 \u7ba1\u7406\u5458\u53ef\u4ee5\u901a\u8fc7\u53f3\u4fa7\u7684 \u2507 \u6267\u884c\u66f4\u591a\u64cd\u4f5c\u3002

                      \u4e0b\u4e00\u6b65\uff1a\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u8d44\u6e90

                      "},{"location":"admin/register/wsres.html","title":"\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u8d44\u6e90","text":"

                      \u5c06\u7528\u6237\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4\u540e\uff0c\u9700\u8981\u7ed9\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u5408\u9002\u7684\u8d44\u6e90\u3002

                      "},{"location":"admin/register/wsres.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                      • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                      • \u6709\u4e00\u4e2a\u53ef\u7528\u7684\u7ba1\u7406\u5458\u8d26\u53f7
                      • \u5de5\u4f5c\u7a7a\u95f4\u5df2\u521b\u5efa\u4e14\u7ed1\u5b9a\u4e86\u547d\u540d\u7a7a\u95f4
                      "},{"location":"admin/register/wsres.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u4ee5\u7ba1\u7406\u5458\u8eab\u4efd\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                      2. \u5bfc\u822a\u5230 \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7\uff0c\u627e\u5230\u8981\u6dfb\u52a0\u8d44\u6e90\u7684\u5de5\u4f5c\u7a7a\u95f4\uff0c\u70b9\u51fb \u65b0\u589e\u5171\u4eab\u8d44\u6e90

                      3. \u9009\u62e9\u96c6\u7fa4\uff0c\u8bbe\u7f6e\u5408\u9002\u7684\u8d44\u6e90\u914d\u989d\u540e\uff0c\u70b9\u51fb \u786e\u5b9a

                      4. \u8fd4\u56de\u5171\u4eab\u8d44\u6e90\u9875\uff0c\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u6210\u529f\u5206\u914d\u4e86\u8d44\u6e90\uff0c\u7ba1\u7406\u5458\u53ef\u4ee5\u901a\u8fc7\u53f3\u4fa7\u7684 \u2507 \u968f\u65f6\u4fee\u6539\u3002

                      \u4e0b\u4e00\u6b65\uff1a\u521b\u5efa\u4e91\u4e3b\u673a

                      "},{"location":"admin/security/index.html","title":"\u4e91\u539f\u751f\u5b89\u5168","text":"

                      \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9488\u5bf9\u5bb9\u5668\u3001Pod\u3001\u955c\u50cf\u3001\u8fd0\u884c\u65f6\u3001\u5fae\u670d\u52a1\u63d0\u4f9b\u4e86\u5168\u9762\u81ea\u52a8\u5316\u7684\u5b89\u5168\u5b9e\u73b0\u3002 \u4e0b\u8868\u5217\u51fa\u4e86\u4e00\u4e9b\u5df2\u5b9e\u73b0\u6216\u6b63\u5728\u5b9e\u73b0\u4e2d\u7684\u5b89\u5168\u7279\u6027\u3002

                      \u5b89\u5168\u7279\u6027 \u7ec6\u76ee \u63cf\u8ff0 \u955c\u50cf\u5b89\u5168 \u53ef\u4fe1\u955c\u50cf\u5206\u53d1 \u4e3a\u5b9e\u73b0\u955c\u50cf\u7684\u5b89\u5168\u4f20\u8f93\uff0c\u9700\u5177\u5907\u5bc6\u94a5\u5bf9\u548c\u7b7e\u540d\u4fe1\u606f\uff0c\u4fdd\u8bc1\u4f20\u8f93\u5b89\u5168\u3002\u5728\u4f20\u8f93\u955c\u50cf\u65f6\u5177\u5907\u9009\u62e9\u5bc6\u94a5\u8fdb\u884c\u955c\u50cf\u7b7e\u540d\u80fd\u529b\u3002 \u8fd0\u884c\u65f6\u5b89\u5168 \u4e8b\u4ef6\u5173\u8054\u5206\u6790 \u652f\u6301\u5bf9\u8fd0\u884c\u65f6\u68c0\u6d4b\u51fa\u7684\u5b89\u5168\u4e8b\u4ef6\u505a\u5173\u8054\u4e0e\u98ce\u9669\u5206\u6790\uff0c\u589e\u52a0\u653b\u51fb\u6eaf\u6e90\u80fd\u529b\uff0c\u6536\u655b\u544a\u8b66\uff0c\u964d\u4f4e\u65e0\u6548\u544a\u8b66\uff0c\u63d0\u9ad8\u4e8b\u4ef6\u54cd\u5e94\u6548\u7387\u3002 - \u5bb9\u5668\u8bf1\u9975\u4ed3\u5e93 \u5177\u5907\u5bb9\u5668\u8bf1\u9975\u4ed3\u5e93\uff0c\u5e38\u89c1\u8bf1\u9975\u5305\u62ec\u4f46\u4e0d\u9650\u4e8e\uff1a\u672a\u6388\u6743\u8bbf\u95ee\u6f0f\u6d1e\u3001\u4ee3\u7801\u6267\u884c\u6f0f\u6d1e\u3001\u672c\u5730\u6587\u4ef6\u8bfb\u53d6\u6f0f\u6d1e\u3001\u8fdc\u7a0b\u547d\u4ee4\u6267\u884c RCE \u6f0f\u6d1e\u7b49\u5bb9\u5668\u8bf1\u9975\u3002 - \u5bb9\u5668\u8bf1\u9975\u90e8\u7f72 \u652f\u6301\u81ea\u5b9a\u4e49\u65b0\u589e\u8bf1\u9975\u5bb9\u5668\uff0c\u53ef\u4ee5\u81ea\u5b9a\u4e49\u670d\u52a1\u540d\u79f0\u3001\u670d\u52a1\u4f4d\u7f6e\u7b49\u3002 - \u5bb9\u5668\u8bf1\u9975\u544a\u8b66 \u652f\u6301\u5bf9\u5bb9\u5668\u8bf1\u9975\u4e2d\u53ef\u7591\u884c\u4e3a\u8fdb\u884c\u544a\u8b66\u3002 - \u504f\u79fb\u68c0\u6d4b \u5728\u626b\u63cf\u955c\u50cf\u540c\u65f6\uff0c\u5b66\u4e60\u955c\u50cf\u4e2d\u5168\u90e8\u4e8c\u8fdb\u5236\u6587\u4ef6\u4fe1\u606f\uff0c\u5e76\u5f62\u6210\u201c\u767d\u540d\u5355\u201d\uff0c\u5bb9\u5668\u4e0a\u7ebf\u540e\u4ec5\u5141\u8bb8\u201c\u767d\u540d\u5355\u201d\u4e2d\u4e8c\u8fdb\u5236\u6587\u4ef6\u8fd0\u884c\uff0c\u786e\u4fdd\u5bb9\u5668\u5185\u4e0d\u80fd\u8fd0\u884c\u4e0d\u6388\u4fe1\uff08\u5982\u975e\u6cd5\u4e0b\u8f7d\uff09\u7684\u53ef\u6267\u884c\u6587\u4ef6\u3002 \u5fae\u9694\u79bb \u9694\u79bb\u7b56\u7565\u667a\u80fd\u63a8\u8350 \u652f\u6301\u8bb0\u5f55\u8d44\u6e90\u5386\u53f2\u8bbf\u95ee\u6d41\u91cf\uff0c\u5e76\u5728\u5bf9\u8d44\u6e90\u8fdb\u884c\u9694\u79bb\u7b56\u7565\u914d\u7f6e\u65f6\u80fd\u591f\u667a\u80fd\u4f9d\u636e\u5386\u53f2\u8bbf\u95ee\u6d41\u91cf\u8fdb\u884c\u7b56\u7565\u63a8\u8350\u3002 - \u79df\u6237\u9694\u79bb \u652f\u6301\u5bf9 Kubernetes \u96c6\u7fa4\u5185\u79df\u6237\u8fdb\u884c\u9694\u79bb\u63a7\u5236\uff0c\u5177\u5907\u5bf9\u4e0d\u540c\u7684\u79df\u6237\u8bbe\u7f6e\u4e0d\u540c\u7684\u7f51\u7edc\u5b89\u5168\u7ec4\u7684\u80fd\u529b\uff0c\u652f\u6301\u79df\u6237\u7ea7\u522b\u7684\u5b89\u5168\u7b56\u7565\u8bbe\u7f6e\u529f\u80fd\uff0c\u901a\u8fc7\u4e0d\u540c\u5b89\u5168\u7ec4\u548c\u8bbe\u7f6e\u7684\u5b89\u5168\u7b56\u7565\u5b9e\u73b0\u79df\u6237\u95f4\u7f51\u7edc\u7684\u8bbf\u95ee\u4e0e\u9694\u79bb\u3002 \u5fae\u670d\u52a1\u5b89\u5168 \u670d\u52a1\u53ca API \u5b89\u5168\u626b\u63cf \u5bf9\u96c6\u7fa4\u5185\u7684\u670d\u52a1\u53ca API \u652f\u6301\u81ea\u52a8\u626b\u63cf\u3001\u624b\u52a8\u626b\u63cf\u53ca\u5468\u671f\u6027\u626b\u63cf\u7684\u5b89\u5168\u626b\u63cf\u65b9\u5f0f\uff0c\u652f\u6301\u5168\u90e8\u7684\u4f20\u7edf web \u626b\u63cf\u9879\u76ee\u5305\u62ec XSS \u6f0f\u6d1e\u3001SQL \u6ce8\u5165\u3001\u547d\u4ee4/\u4ee3\u7801\u6ce8\u5165\u3001\u76ee\u5f55\u679a\u4e3e\u3001\u8def\u5f84\u7a7f\u8d8a\u3001XML \u5b9e\u4f53\u6ce8\u5165\u3001poc\u3001\u6587\u4ef6\u4e0a\u4f20\u3001\u5f31\u53e3\u4ee4\u3001jsonp\u3001ssrf\u3001\u4efb\u610f\u8df3\u8f6c\u3001CRLF \u6ce8\u5165\u7b49\u98ce\u9669\uff0c\u4ee5\u53ca\u5bb9\u5668\u73af\u5883\u7279\u6709\u7684\u9879\uff0c\u9488\u5bf9\u53d1\u73b0\u7684\u6f0f\u6d1e\u652f\u6301\u6f0f\u6d1e\u7c7b\u578b\u5c55\u793a\u3001url \u5c55\u793a\u3001\u53c2\u6570\u5c55\u793a\u3001\u5371\u9669\u7ea7\u522b\u5c55\u793a\u3001\u6d4b\u8bd5\u65b9\u6cd5\u5c55\u793a\u7b49\u3002"},{"location":"admin/security/falco-exporter.html","title":"Falco-exporter","text":"

                      Falco-exporter \u662f\u4e00\u4e2a Falco \u8f93\u51fa\u4e8b\u4ef6\u7684 Prometheus Metrics \u5bfc\u51fa\u5668\u3002

                      Falco-exporter \u4f1a\u90e8\u7f72\u4e3a Kubernetes \u96c6\u7fa4\u4e0a\u7684\u5b88\u62a4\u8fdb\u7a0b\u96c6\u3002\u5982\u679c\u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5\u5e76\u8fd0\u884c Prometheus\uff0cPrometheus \u5c06\u81ea\u52a8\u53d1\u73b0 Falco-exporter \u63d0\u4f9b\u7684\u6307\u6807\u3002

                      "},{"location":"admin/security/falco-exporter.html#falco-exporter_1","title":"\u5b89\u88c5 Falco-exporter","text":"

                      \u672c\u9875\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5 Falco-exporter \u7ec4\u4ef6\u3002

                      Note

                      \u5728\u5b89\u88c5\u4f7f\u7528 Falco-exporter \u4e4b\u524d\uff0c\u9700\u8981\u5b89\u88c5\u5e76\u8fd0\u884c Falco\uff0c\u5e76\u542f\u7528 gRPC \u8f93\u51fa\uff08\u9ed8\u8ba4\u901a\u8fc7 Unix \u5957\u63a5\u5b57\u542f\u7528\uff09\u3002 \u5173\u4e8e\u542f\u7528 gRPC \u8f93\u51fa\u7684\u66f4\u591a\u4fe1\u606f\uff0c\u53ef\u53c2\u9605\u5728 Falco Helm Chart \u4e2d\u542f\u7528 gRPC \u8f93\u51fa\u3002

                      \u8bf7\u786e\u8ba4\u60a8\u7684\u96c6\u7fa4\u5df2\u6210\u529f\u63a5\u5165\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u7136\u540e\u6267\u884c\u4ee5\u4e0b\u6b65\u9aa4\u5b89\u88c5 Falco-exporter\u3002

                      1. \u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb\u5bb9\u5668\u7ba1\u7406\u2014>\u96c6\u7fa4\u5217\u8868\uff0c\u7136\u540e\u627e\u5230\u51c6\u5907\u5b89\u88c5 Falco-exporter \u7684\u96c6\u7fa4\u540d\u79f0\u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u9009\u62e9 Helm \u5e94\u7528 -> Helm \u6a21\u677f\uff0c\u627e\u5230\u5e76\u70b9\u51fb falco-exporter\u3002

                      3. \u5728\u7248\u672c\u9009\u62e9\u4e2d\u9009\u62e9\u5e0c\u671b\u5b89\u88c5\u7684\u7248\u672c\uff0c\u70b9\u51fb\u5b89\u88c5\u3002

                      4. \u5728\u5b89\u88c5\u754c\u9762\uff0c\u586b\u5199\u6240\u9700\u7684\u5b89\u88c5\u53c2\u6570\u3002

                        \u5728\u5982\u4e0a\u754c\u9762\u4e2d\uff0c\u586b\u5199\u5e94\u7528\u540d\u79f0\u3001\u547d\u540d\u7a7a\u95f4\u3001\u7248\u672c\u7b49\u3002

                        \u5728\u5982\u4e0a\u754c\u9762\u4e2d\uff0c\u586b\u5199\u4ee5\u4e0b\u53c2\u6570:

                        • Falco Prometheus Exporter -> Image Settings -> Registry\uff1a\u8bbe\u7f6e falco-exporter \u955c\u50cf\u7684\u4ed3\u5e93\u5730\u5740\uff0c\u5df2\u7ecf\u9ed8\u8ba4\u586b\u5199\u53ef\u7528\u7684\u5728\u7ebf\u4ed3\u5e93\u3002\u5982\u679c\u662f\u79c1\u6709\u5316\u73af\u5883\uff0c\u53ef\u4fee\u6539\u4e3a\u79c1\u6709\u4ed3\u5e93\u5730\u5740\u3002
                        • Falco Prometheus Exporter -> Prometheus ServiceMonitor Settings -> Repository\uff1a\u8bbe\u7f6e falco-exporter \u955c\u50cf\u540d\u3002
                        • Falco Prometheus Exporter -> Prometheus ServiceMonitor Settings -> Install ServiceMonitor\uff1a\u5b89\u88c5 Prometheus Operator \u670d\u52a1\u76d1\u89c6\u5668\uff0c\u9ed8\u8ba4\u5f00\u542f\u3002
                        • Falco Prometheus Exporter -> Prometheus ServiceMonitor Settings -> Scrape Interval\uff1a\u7528\u6237\u81ea\u5b9a\u4e49\u7684\u95f4\u9694\uff1b\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528 Prometheus \u9ed8\u8ba4\u95f4\u9694\u3002
                        • Falco Prometheus Exporter -> Prometheus ServiceMonitor Settings -> Scrape Timeout\uff1a\u7528\u6237\u81ea\u5b9a\u4e49\u7684\u6293\u53d6\u8d85\u65f6\u65f6\u95f4\uff1b\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528 Prometheus \u9ed8\u8ba4\u7684\u6293\u53d6\u8d85\u65f6\u65f6\u95f4\u3002

                        \u5728\u5982\u4e0a\u754c\u9762\u4e2d\uff0c\u586b\u5199\u4ee5\u4e0b\u53c2\u6570:

                        • Falco Prometheus Exporter -> Prometheus prometheusRules -> Install prometheusRules\uff1a\u521b\u5efa PrometheusRules\uff0c\u5bf9\u4f18\u5148\u4e8b\u4ef6\u53d1\u51fa\u8b66\u62a5\uff0c\u9ed8\u8ba4\u5f00\u542f\u3002
                        • Falco Prometheus Exporter -> Prometheus prometheusRules -> Alerts settings\uff1a\u8b66\u62a5\u8bbe\u7f6e\uff0c\u4e3a\u4e0d\u540c\u7ea7\u522b\u7684\u65e5\u5fd7\u4e8b\u4ef6\u8bbe\u7f6e\u8b66\u62a5\u662f\u5426\u542f\u7528\u3001\u8b66\u62a5\u7684\u95f4\u9694\u65f6\u95f4\u3001\u8b66\u62a5\u7684\u9608\u503c\u3002
                      5. \u70b9\u51fb\u53f3\u4e0b\u89d2\u786e\u5b9a\u6309\u94ae\u5373\u53ef\u5b8c\u6210\u5b89\u88c5\u3002

                      "},{"location":"admin/security/falco-install.html","title":"\u5b89\u88c5 Falco","text":"

                      \u8bf7\u786e\u8ba4\u60a8\u7684\u96c6\u7fa4\u5df2\u6210\u529f\u63a5\u5165\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u7136\u540e\u6267\u884c\u4ee5\u4e0b\u6b65\u9aa4\u5b89\u88c5 Falco\u3002

                      1. \u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb\u5bb9\u5668\u7ba1\u7406\u2014>\u96c6\u7fa4\u5217\u8868\uff0c\u7136\u540e\u627e\u5230\u51c6\u5907\u5b89\u88c5 Falco \u7684\u96c6\u7fa4\u540d\u79f0\u3002

                      2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u9009\u62e9 Helm \u5e94\u7528 -> Helm \u6a21\u677f\uff0c\u627e\u5230\u5e76\u70b9\u51fb Falco\u3002

                      3. \u5728\u7248\u672c\u9009\u62e9\u4e2d\u9009\u62e9\u5e0c\u671b\u5b89\u88c5\u7684\u7248\u672c\uff0c\u70b9\u51fb\u5b89\u88c5\u3002

                      4. \u5728\u5b89\u88c5\u754c\u9762\uff0c\u586b\u5199\u6240\u9700\u7684\u5b89\u88c5\u53c2\u6570\u3002

                        \u5728\u5982\u4e0a\u754c\u9762\u4e2d\uff0c\u586b\u5199\u5e94\u7528\u540d\u79f0\u3001\u547d\u540d\u7a7a\u95f4\u3001\u7248\u672c\u7b49\u3002

                        \u5728\u5982\u4e0a\u754c\u9762\u4e2d\uff0c\u586b\u5199\u4ee5\u4e0b\u53c2\u6570\uff1a

                        • Falco -> Image Settings -> Registry\uff1a\u8bbe\u7f6e Falco \u955c\u50cf\u7684\u4ed3\u5e93\u5730\u5740\uff0c\u5df2\u7ecf\u9ed8\u8ba4\u586b\u5199\u53ef\u7528\u7684\u5728\u7ebf\u4ed3\u5e93\u3002\u5982\u679c\u662f\u79c1\u6709\u5316\u73af\u5883\uff0c\u53ef\u4fee\u6539\u4e3a\u79c1\u6709\u4ed3\u5e93\u5730\u5740\u3002

                        • Falco -> Image Settings -> Repository\uff1a\u8bbe\u7f6e Falco \u955c\u50cf\u540d\u3002

                        • Falco -> Falco Driver -> Image Settings -> Registry\uff1a\u8bbe\u7f6e Falco Driver \u955c\u50cf\u7684\u4ed3\u5e93\u5730\u5740\uff0c\u5df2\u7ecf\u9ed8\u8ba4\u586b\u5199\u53ef\u7528\u7684\u5728\u7ebf\u4ed3\u5e93\u3002\u5982\u679c\u662f\u79c1\u6709\u5316\u73af\u5883\uff0c\u53ef\u4fee\u6539\u4e3a\u79c1\u6709\u4ed3\u5e93\u5730\u5740\u3002

                        • Falco -> Falco Driver -> Image Settings -> Repository\uff1a\u8bbe\u7f6e Falco Driver \u955c\u50cf\u540d\u3002

                        • Falco -> Falco Driver -> Image Settings -> Driver Kind\uff1a\u8bbe\u7f6e Driver Kind\uff0c\u63d0\u4f9b\u4ee5\u4e0b\u4e24\u79cd\u9009\u62e9\uff1a

                          1. ebpf\uff1a\u4f7f\u7528 ebpf \u6765\u68c0\u6d4b\u4e8b\u4ef6\uff0c\u8fd9\u9700\u8981 Linux \u5185\u6838\u652f\u6301 ebpf\uff0c\u5e76\u542f\u7528 CONFIG_BPF_JIT \u548c sysctl net.core.bpf_jit_enable=1\u3002

                          2. module\uff1a\u4f7f\u7528\u5185\u6838\u6a21\u5757\u68c0\u6d4b\uff0c\u652f\u6301\u6709\u9650\u7684\u64cd\u4f5c\u7cfb\u7edf\u7248\u672c\uff0c\u53c2\u8003 module \u652f\u6301\u7cfb\u7edf\u7248\u672c\u3002

                        • Falco -> Falco Driver -> Image Settings -> Log Level\uff1a\u8981\u5305\u542b\u5728\u65e5\u5fd7\u4e2d\u7684\u6700\u5c0f\u65e5\u5fd7\u7ea7\u522b\u3002

                          \u53ef\u9009\u62e9\u503c\u4e3a\uff1aemergency, alert, critical, error, warning, notice, info, debug\u3002

                        • Falco -> Falco Driver -> Image Settings -> Registry\uff1a\u8bbe\u7f6e Falco Driver \u955c\u50cf\u7684\u4ed3\u5e93\u5730\u5740\uff0c\u5df2\u7ecf\u9ed8\u8ba4\u586b\u5199\u53ef\u7528\u7684\u5728\u7ebf\u4ed3\u5e93\u3002\u5982\u679c\u662f\u79c1\u6709\u5316\u73af\u5883\uff0c\u53ef\u4fee\u6539\u4e3a\u79c1\u6709\u4ed3\u5e93\u5730\u5740\u3002

                        • Falco -> Falco Driver -> Image Settings -> Repository\uff1a\u8bbe\u7f6e Falco Driver \u955c\u50cf\u540d\u3002

                        • Falco -> Falco Driver -> Image Settings -> Driver Kind\uff1a\u8bbe\u7f6e Driver Kind\uff0c\u63d0\u4f9b\u4ee5\u4e0b\u4e24\u79cd\u9009\u62e9\uff1a

                          1. ebpf\uff1a\u4f7f\u7528 ebpf \u6765\u68c0\u6d4b\u4e8b\u4ef6\uff0c\u8fd9\u9700\u8981 Linux \u5185\u6838\u652f\u6301 ebpf\uff0c\u5e76\u542f\u7528 CONFIG_BPF_JIT \u548c sysctl net.core.bpf_jit_enable=1\u3002
                          2. module\uff1a\u4f7f\u7528\u5185\u6838\u6a21\u5757\u68c0\u6d4b\uff0c\u652f\u6301\u6709\u9650\u7684\u64cd\u4f5c\u7cfb\u7edf\u7248\u672c\uff0c\u53c2\u8003 module \u652f\u6301\u7cfb\u7edf\u7248\u672c\u3002
                        • Falco -> Falco Driver -> Image Settings -> Log Level\uff1a\u8981\u5305\u542b\u5728\u65e5\u5fd7\u4e2d\u7684\u6700\u5c0f\u65e5\u5fd7\u7ea7\u522b\u3002

                          \u53ef\u9009\u62e9\u503c\u4e3a\uff1aemergency\u3001alert\u3001critical\u3001error\u3001warning\u3001notice\u3001info\u3001debug\u3002

                      5. \u70b9\u51fb\u53f3\u4e0b\u89d2\u786e\u5b9a\u6309\u94ae\u5373\u53ef\u5b8c\u6210\u5b89\u88c5\u3002

                      "},{"location":"admin/security/falco.html","title":"\u4ec0\u4e48\u662f Falco","text":"

                      Falco \u662f\u4e00\u4e2a\u4e91\u539f\u751f\u8fd0\u884c\u65f6\u5b89\u5168\u5de5\u5177\uff0c\u65e8\u5728\u68c0\u6d4b\u5e94\u7528\u7a0b\u5e8f\u4e2d\u7684\u5f02\u5e38\u6d3b\u52a8\uff0c\u53ef\u7528\u4e8e\u76d1\u63a7 Kubernetes \u5e94\u7528\u7a0b\u5e8f\u548c\u5185\u90e8\u7ec4\u4ef6\u7684\u8fd0\u884c\u65f6\u5b89\u5168\u6027\u3002\u4ec5\u9700\u4e3a Falco \u64b0\u5199\u4e00\u5957\u89c4\u5219\uff0c\u5373\u53ef\u6301\u7eed\u76d1\u6d4b\u5e76\u76d1\u63a7\u5bb9\u5668\u3001\u5e94\u7528\u3001\u4e3b\u673a\u53ca\u7f51\u7edc\u7684\u5f02\u5e38\u6d3b\u52a8\u3002

                      "},{"location":"admin/security/falco.html#falco_1","title":"Falco \u80fd\u68c0\u6d4b\u5230\u4ec0\u4e48\uff1f","text":"

                      Falco \u53ef\u5bf9\u4efb\u4f55\u6d89\u53ca Linux \u7cfb\u7edf\u8c03\u7528\u7684\u884c\u4e3a\u8fdb\u884c\u68c0\u6d4b\u548c\u62a5\u8b66\u3002Falco \u7684\u8b66\u62a5\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u7279\u5b9a\u7684\u7cfb\u7edf\u8c03\u7528\u3001\u53c2\u6570\u4ee5\u53ca\u8c03\u7528\u8fdb\u7a0b\u7684\u5c5e\u6027\u6765\u89e6\u53d1\u3002\u4f8b\u5982\uff0cFalco \u53ef\u4ee5\u8f7b\u677e\u68c0\u6d4b\u5230\u5305\u62ec\u4f46\u4e0d\u9650\u4e8e\u4ee5\u4e0b\u4e8b\u4ef6\uff1a

                      • Kubernetes \u4e2d\u7684\u5bb9\u5668\u6216 pod \u5185\u6b63\u5728\u8fd0\u884c\u4e00\u4e2a shell \u3002
                      • \u5bb9\u5668\u4ee5\u7279\u6743\u6a21\u5f0f\u8fd0\u884c\uff0c\u6216\u4ece\u4e3b\u673a\u6302\u8f7d\u654f\u611f\u8def\u5f84\uff0c\u5982 /proc\u3002
                      • \u4e00\u4e2a\u670d\u52a1\u5668\u8fdb\u7a0b\u6b63\u5728\u751f\u6210\u4e00\u4e2a\u610f\u5916\u7c7b\u578b\u7684\u5b50\u8fdb\u7a0b\u3002
                      • \u610f\u5916\u8bfb\u53d6\u4e00\u4e2a\u654f\u611f\u6587\u4ef6\uff0c\u5982 /etc/shadow\u3002
                      • \u4e00\u4e2a\u975e\u8bbe\u5907\u6587\u4ef6\u88ab\u5199\u5230 /dev\u3002
                      • \u4e00\u4e2a\u6807\u51c6\u7684\u7cfb\u7edf\u4e8c\u8fdb\u5236\u6587\u4ef6\uff0c\u5982 ls\uff0c\u6b63\u5728\u8fdb\u884c\u4e00\u4e2a\u5916\u5411\u7684\u7f51\u7edc\u8fde\u63a5\u3002
                      • \u5728 Kubernetes \u96c6\u7fa4\u4e2d\u542f\u52a8\u4e00\u4e2a\u6709\u7279\u6743\u7684 Pod\u3002

                      \u5173\u4e8e Falco \u9644\u5e26\u7684\u66f4\u591a\u9ed8\u8ba4\u89c4\u5219\uff0c\u8bf7\u53c2\u8003 Rules \u6587\u6863\u3002

                      "},{"location":"admin/security/falco.html#falco_2","title":"\u4ec0\u4e48\u662f Falco \u89c4\u5219\uff1f","text":"

                      Falco \u89c4\u5219\u5b9a\u4e49 Falco \u5e94\u76d1\u89c6\u7684\u884c\u4e3a\u53ca\u4e8b\u4ef6\uff1b\u53ef\u4ee5\u5728 Falco \u89c4\u5219\u6587\u4ef6\u6216\u901a\u7528\u914d\u7f6e\u6587\u4ef6\u64b0\u5199\u89c4\u5219\u3002\u6709\u5173\u7f16\u5199\u3001\u7ba1\u7406\u548c\u90e8\u7f72\u89c4\u5219\u7684\u66f4\u591a\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605 Falco Rules\u3002

                      "},{"location":"admin/security/falco.html#falco_3","title":"\u4ec0\u4e48\u662f Falco \u8b66\u62a5\uff1f","text":"

                      \u8b66\u62a5\u662f\u53ef\u914d\u7f6e\u7684\u4e0b\u6e38\u64cd\u4f5c\uff0c\u53ef\u4ee5\u50cf\u8bb0\u5f55\u65e5\u5fd7\u4e00\u6837\u7b80\u5355\uff0c\u4e5f\u53ef\u4ee5\u50cf STDOUT \u5411\u5ba2\u6237\u7aef\u4f20\u9012 gRPC \u8c03\u7528\u4e00\u6837\u590d\u6742\u3002\u6709\u5173\u914d\u7f6e\u3001\u7406\u89e3\u548c\u5f00\u53d1\u8b66\u62a5\u7684\u66f4\u591a\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605Falco \u8b66\u62a5\u3002Falco \u53ef\u4ee5\u5c06\u8b66\u62a5\u53d1\u9001\u81f3\uff1a

                      • \u6807\u51c6\u8f93\u51fa
                      • \u4e00\u4efd\u6587\u4ef6
                      • \u7cfb\u7edf\u65e5\u5fd7
                      • \u751f\u6210\u7684\u7a0b\u5e8f
                      • \u4e00\u4e2a HTTP[s] \u7aef\u70b9
                      • \u901a\u8fc7 gRPC API \u7684\u5ba2\u6237\u7aef
                      "},{"location":"admin/security/falco.html#falco_4","title":"Falco \u7531\u54ea\u4e9b\u90e8\u5206\u7ec4\u6210\uff1f","text":"

                      Falco \u7531\u4ee5\u4e0b\u51e0\u4e2a\u4e3b\u8981\u7ec4\u4ef6\u7ec4\u6210\uff1a

                      • \u7528\u6237\u7a7a\u95f4\u7a0b\u5e8f\uff1aCLI \u5de5\u5177\uff0c\u53ef\u7528\u4e8e\u4e0e Falco \u4ea4\u4e92\u3002\u7528\u6237\u7a7a\u95f4\u7a0b\u5e8f\u5904\u7406\u4fe1\u53f7\uff0c\u89e3\u6790\u6765\u81ea Falco \u9a71\u52a8\u7684\u4fe1\u606f\uff0c\u5e76\u53d1\u9001\u8b66\u62a5\u3002

                      • \u914d\u7f6e\uff1a\u5b9a\u4e49 Falco \u7684\u8fd0\u884c\u65b9\u5f0f\u3001\u8981\u65ad\u8a00\u7684\u89c4\u5219\u4ee5\u53ca\u5982\u4f55\u6267\u884c\u8b66\u62a5\u3002\u6709\u5173\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605\u914d\u7f6e\u3002

                      • Driver\uff1a\u4e00\u6b3e\u9075\u5faa Falco \u9a71\u52a8\u89c4\u8303\u5e76\u53d1\u9001\u7cfb\u7edf\u8c03\u7528\u4fe1\u606f\u6d41\u7684\u8f6f\u4ef6\u3002\u5982\u679c\u4e0d\u5b89\u88c5\u9a71\u52a8\u7a0b\u5e8f\uff0c\u5c06\u65e0\u6cd5\u8fd0\u884c Falco\u3002\u76ee\u524d\uff0cFalco \u652f\u6301\u4ee5\u4e0b\u9a71\u52a8\u7a0b\u5e8f\uff1a

                        • \u57fa\u4e8e C++ \u5e93\u6784\u5efa libscap \u7684\u5185\u6838\u6a21\u5757 libsinsp\uff08\u9ed8\u8ba4\uff09
                        • \u7531\u76f8\u540c\u6a21\u5757\u6784\u5efa\u7684 BPF \u63a2\u9488
                        • \u7528\u6237\u7a7a\u95f4\u68c0\u6d4b

                          \u6709\u5173\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605 Falco \u9a71\u52a8\u7a0b\u5e8f\u3002

                      • \u63d2\u4ef6\uff1a\u53ef\u7528\u4e8e\u6269\u5c55 falco libraries/falco \u53ef\u6267\u884c\u6587\u4ef6\u7684\u529f\u80fd\uff0c\u6269\u5c55\u65b9\u5f0f\u662f\u901a\u8fc7\u6dfb\u52a0\u65b0\u7684\u4e8b\u4ef6\u6e90\u548c\u4ece\u4e8b\u4ef6\u4e2d\u63d0\u53d6\u4fe1\u606f\u7684\u65b0\u5b57\u6bb5\u3002 \u6709\u5173\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605\u63d2\u4ef6\u3002

                      "},{"location":"admin/share/infer.html","title":"\u521b\u5efa\u63a8\u7406\u670d\u52a1","text":""},{"location":"admin/share/job.html","title":"\u521b\u5efa\u8bad\u7ec3\u4efb\u52a1","text":""},{"location":"admin/share/notebook.html","title":"\u4f7f\u7528 Notebook","text":"

                      Notebook \u901a\u5e38\u6307\u7684\u662f Jupyter Notebook \u6216\u7c7b\u4f3c\u7684\u4ea4\u4e92\u5f0f\u8ba1\u7b97\u73af\u5883\u3002 \u8fd9\u662f\u4e00\u79cd\u975e\u5e38\u6d41\u884c\u7684\u5de5\u5177\uff0c\u5e7f\u6cdb\u7528\u4e8e\u6570\u636e\u79d1\u5b66\u3001\u673a\u5668\u5b66\u4e60\u548c\u6df1\u5ea6\u5b66\u4e60\u7b49\u9886\u57df\u3002 \u672c\u9875\u8bf4\u660e\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528 Notebook\u3002

                      "},{"location":"admin/share/notebook.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                      • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                      • \u7528\u6237\u5df2\u6210\u529f\u6ce8\u518c
                      • \u7ba1\u7406\u5458\u4e3a\u7528\u6237\u5206\u914d\u4e86\u5de5\u4f5c\u7a7a\u95f4
                      • \u5df2\u51c6\u5907\u597d\u6570\u636e\u96c6\uff08\u4ee3\u7801\u3001\u6570\u636e\u7b49\uff09
                      "},{"location":"admin/share/notebook.html#notebook_1","title":"\u521b\u5efa\u548c\u4f7f\u7528 Notebook \u5b9e\u4f8b","text":"
                      1. \u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                      2. \u5bfc\u822a\u81f3 AI Lab -> \u8fd0\u7ef4\u7ba1\u7406 -> \u961f\u5217\u7ba1\u7406 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae

                      3. \u952e\u5165\u540d\u79f0\uff0c\u9009\u62e9\u96c6\u7fa4\u3001\u5de5\u4f5c\u7a7a\u95f4\u548c\u914d\u989d\u540e\uff0c\u70b9\u51fb \u786e\u5b9a

                      4. \u4ee5 \u7528\u6237\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5bfc\u822a\u81f3 AI Lab -> Notebook \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae

                      5. \u914d\u7f6e\u5404\u9879\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a

                        \u57fa\u672c\u4fe1\u606f\u8d44\u6e90\u914d\u7f6e\u9ad8\u7ea7\u914d\u7f6e

                        \u952e\u5165\u540d\u79f0\uff0c\u9009\u62e9\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\uff0c\u9009\u62e9\u521a\u521b\u5efa\u7684\u961f\u5217\uff0c\u70b9\u51fb \u4e00\u952e\u521d\u59cb\u5316

                        \u9009\u62e9 Notebook \u7c7b\u578b\uff0c\u914d\u7f6e\u5185\u5b58\u3001CPU\uff0c\u5f00\u542f GPU\uff0c\u521b\u5efa\u548c\u914d\u7f6e PVC\uff1a

                        \u5f00\u542f SSH \u5916\u7f51\u8bbf\u95ee\uff1a

                      6. \u81ea\u52a8\u8df3\u8f6c\u5230 Notebook \u5b9e\u4f8b\u5217\u8868\uff0c\u70b9\u51fb\u5b9e\u4f8b\u540d\u79f0

                      7. \u8fdb\u5165 Notebook \u5b9e\u4f8b\u8be6\u60c5\u9875\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u6253\u5f00 \u6309\u94ae

                      8. \u8fdb\u5165\u4e86 Notebook \u5f00\u53d1\u73af\u5883\uff0c\u6bd4\u5982\u5728 /home/jovyan \u76ee\u5f55\u6302\u8f7d\u4e86\u6301\u4e45\u5377\uff0c\u53ef\u4ee5\u901a\u8fc7 git \u514b\u9686\u4ee3\u7801\uff0c\u901a\u8fc7 SSH \u8fde\u63a5\u540e\u4e0a\u4f20\u6570\u636e\u7b49\u3002

                      "},{"location":"admin/share/notebook.html#ssh-notebook","title":"\u901a\u8fc7 SSH \u8bbf\u95ee Notebook \u5b9e\u4f8b","text":"
                      1. \u5728\u81ea\u5df1\u7684\u7535\u8111\u4e0a\u751f\u6210 SSH \u5bc6\u94a5\u5bf9

                        \u5728\u81ea\u5df1\u7535\u8111\u4e0a\u6253\u5f00\u547d\u4ee4\u884c\uff0c\u6bd4\u5982\u5728 Windows \u4e0a\u6253\u5f00 git bash\uff0c\u8f93\u5165 ssh-keygen.exe -t rsa\uff0c\u7136\u540e\u4e00\u8def\u56de\u8f66\u3002

                      2. \u901a\u8fc7 cat ~/.ssh/id_rsa.pub \u7b49\u547d\u4ee4\u67e5\u770b\u5e76\u590d\u5236\u516c\u94a5

                      3. \u4ee5\u7528\u6237\u8eab\u4efd\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5728\u53f3\u4e0a\u89d2\u70b9\u51fb \u4e2a\u4eba\u4e2d\u5fc3 -> SSH \u516c\u94a5 -> \u5bfc\u5165 SSH \u516c\u94a5

                      4. \u8fdb\u5165 Notebook \u5b9e\u4f8b\u7684\u8be6\u60c5\u9875\uff0c\u590d\u5236 SSH \u7684\u94fe\u63a5

                      5. \u5728\u5ba2\u6237\u7aef\u4f7f\u7528 SSH \u8bbf\u95ee Notebook \u5b9e\u4f8b

                      \u4e0b\u4e00\u6b65\uff1a\u521b\u5efa\u8bad\u7ec3\u4efb\u52a1

                      "},{"location":"admin/share/quota.html","title":"\u914d\u989d\u7ba1\u7406","text":"

                      \u7528\u6237\u88ab\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4\u540e\uff0c\u5373\u53ef\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u8d44\u6e90\uff0c\u7ba1\u7406\u8d44\u6e90\u914d\u989d\u3002

                      "},{"location":"admin/share/quota.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                      • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                      • \u6709\u4e00\u4e2a\u53ef\u7528\u7684\u7ba1\u7406\u5458\u8d26\u53f7
                      "},{"location":"admin/share/quota.html#_3","title":"\u521b\u5efa\u548c\u7ba1\u7406\u914d\u989d","text":"
                      1. \u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                      2. \u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4\u548c\u547d\u540d\u7a7a\u95f4\uff0c\u5e76\u7ed1\u5b9a\u7528\u6237
                      3. \u4e3a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u8d44\u6e90\u914d\u989d

                      4. \u7ba1\u7406\u547d\u540d\u7a7a\u95f4 test-ns-1 \u7684\u8d44\u6e90\u914d\u989d\uff0c\u5176\u6570\u503c\u4e0d\u80fd\u8d85\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u7684\u914d\u989d\u3002

                      5. \u4ee5 \u7528\u6237\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u67e5\u770b\u5176\u662f\u5426\u88ab\u5206\u914d\u4e86 test-ns-1 \u547d\u540d\u7a7a\u95f4\u3002

                      \u4e0b\u4e00\u6b65\uff1a\u521b\u5efa AI \u8d1f\u8f7d\u4f7f\u7528 GPU \u8d44\u6e90

                      "},{"location":"admin/share/workload.html","title":"\u521b\u5efa AI \u8d1f\u8f7d\u4f7f\u7528 GPU \u8d44\u6e90","text":"

                      \u7ba1\u7406\u5458\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u8d44\u6e90\u914d\u989d\u540e\uff0c\u7528\u6237\u5c31\u53ef\u4ee5\u521b\u5efa AI \u5de5\u4f5c\u8d1f\u8f7d\u6765\u4f7f\u7528 GPU \u7b97\u529b\u8d44\u6e90\u3002

                      "},{"location":"admin/share/workload.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                      • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                      • \u7528\u6237\u5df2\u6210\u529f\u6ce8\u518c
                      • \u7ba1\u7406\u5458\u4e3a\u7528\u6237\u5206\u914d\u4e86\u5de5\u4f5c\u7a7a\u95f4
                      • \u4e3a\u5de5\u4f5c\u7a7a\u95f4\u8bbe\u7f6e\u4e86\u8d44\u6e90\u914d\u989d
                      • \u5df2\u7ecf\u521b\u5efa\u4e86\u4e00\u4e2a\u96c6\u7fa4
                      "},{"location":"admin/share/workload.html#ai","title":"\u521b\u5efa AI \u8d1f\u8f7d\u6b65\u9aa4","text":"
                      1. \u4ee5\u7528\u6237\u8eab\u4efd\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                      2. \u5bfc\u822a\u81f3 \u5bb9\u5668\u7ba1\u7406 \uff0c\u9009\u62e9\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u70b9\u51fb \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u8d1f\u8f7d \uff0c \u70b9\u51fb\u53f3\u4fa7\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae

                      3. \u914d\u7f6e\u5404\u9879\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a

                        \u57fa\u672c\u4fe1\u606f\u5bb9\u5668\u914d\u7f6e\u5176\u4ed6

                        \u9009\u62e9\u81ea\u5df1\u7684\u547d\u540d\u7a7a\u95f4\u3002

                        \u8bbe\u7f6e\u955c\u50cf\uff0c\u914d\u7f6e CPU\u3001\u5185\u5b58\u3001GPU \u7b49\u8d44\u6e90\uff0c\u8bbe\u7f6e\u542f\u52a8\u547d\u4ee4\u3002

                        \u670d\u52a1\u914d\u7f6e\u548c\u9ad8\u7ea7\u914d\u7f6e\u53ef\u4ee5\u4f7f\u7528\u9ed8\u8ba4\u914d\u7f6e\u3002

                      4. \u81ea\u52a8\u8fd4\u56de\u65e0\u72b6\u6001\u8d1f\u8f7d\u5217\u8868\uff0c\u70b9\u51fb\u8d1f\u8f7d\u540d\u79f0

                      5. \u8fdb\u5165\u8be6\u60c5\u9875\uff0c\u53ef\u4ee5\u770b\u5230 GPU \u914d\u989d

                      6. \u4f60\u8fd8\u53ef\u4ee5\u8fdb\u5165\u63a7\u5236\u53f0\uff0c\u8fd0\u884c mx-smi \u547d\u4ee4\u67e5\u770b GPU \u8d44\u6e90

                      \u4e0b\u4e00\u6b65\uff1a\u4f7f\u7528 Notebook

                      "},{"location":"admin/virtnest/best-practice/import-ubuntu.html","title":"\u5982\u4f55\u4ece VMWare \u5bfc\u5165\u4f20\u7edf Linux \u4e91\u4e3b\u673a\u5230\u4e91\u539f\u751f\u4e91\u4e3b\u673a\u5e73\u53f0","text":"

                      \u672c\u6587\u5c06\u8be6\u7ec6\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u547d\u4ee4\u884c\u5c06\u5916\u90e8\u5e73\u53f0 VMware \u4e0a\u7684 Linux \u4e91\u4e3b\u673a\u5bfc\u5165\u5230 AI \u7b97\u529b\u4e2d\u5fc3\u7684\u4e91\u4e3b\u673a\u4e2d\u3002

                      Info

                      \u672c\u6587\u6863\u5916\u90e8\u865a\u62df\u5e73\u53f0\u662f VMware vSphere Client\uff0c\u540e\u7eed\u7b80\u5199\u4e3a vSphere\u3002 \u6280\u672f\u4e0a\u662f\u4f9d\u9760 kubevirt cdi \u6765\u5b9e\u73b0\u7684\u3002\u64cd\u4f5c\u524d\uff0cvSphere \u4e0a\u88ab\u5bfc\u5165\u7684\u4e91\u4e3b\u673a\u9700\u8981\u5173\u673a\u3002 \u4ee5 Ubuntu \u64cd\u4f5c\u7cfb\u7edf\u7684\u4e91\u4e3b\u673a\u4e3a\u4f8b\u3002

                      "},{"location":"admin/virtnest/best-practice/import-ubuntu.html#vsphere","title":"\u83b7\u53d6 vSphere \u7684\u4e91\u4e3b\u673a\u57fa\u7840\u4fe1\u606f","text":"
                      • vSphere URL\uff1a\u76ee\u6807\u5e73\u53f0\u7684 URL \u5730\u5740\u4fe1\u606f

                      • vSphere SSL \u8bc1\u4e66\u6307\u7eb9 thumbprint\uff1a\u9700\u8981\u901a\u8fc7 openssl \u83b7\u53d6

                        openssl s_client -connect 10.64.56.11:443 </dev/null | openssl x509 -in /dev/stdin -fingerprint -sha1 -noout\n

                        \u8f93\u51fa\u7c7b\u4f3c\u4e8e\uff1a

                        Can't use SSL_get_servername\ndepth=0 CN = vcsa.daocloud.io\nverify error:num=20:unable to get local issuer certificate\nverify return:1\ndepth=0 CN = vcsa.daocloud.io\nverify error:num=21:unable to verify the first certificate\nverify return:1\ndepth=0 CN = vcsa.daocloud.io\nverify return:1\nDONE\nsha1 Fingerprint=C3:9D:D7:55:6A:43:11:2B:DE:BA:27:EA:3B:C2:13:AF:E4:12:62:4D  # \u6240\u9700\u503c\n
                      • vSphere \u8d26\u53f7\uff1a\u83b7\u53d6 vSphere \u7684\u8d26\u53f7\u4fe1\u606f\uff0c\u6ce8\u610f\u6743\u9650\u95ee\u9898

                      • vSphere \u5bc6\u7801\uff1a\u83b7\u53d6 vSphere \u7684\u5bc6\u7801\u4fe1\u606f

                      • \u9700\u8981\u5bfc\u5165\u4e91\u4e3b\u673a\u7684 UUID\uff08\u9700\u8981\u5728 vSphere \u7684 web \u9875\u9762\u83b7\u53d6\uff09

                        • \u8fdb\u5165 Vsphere \u9875\u9762\u4e2d\uff0c\u8fdb\u5165\u88ab\u5bfc\u5165\u4e91\u4e3b\u673a\u7684\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb \u7f16\u8f91\u914d\u7f6e \uff0c\u6b64\u65f6\u6253\u5f00\u6d4f\u89c8\u5668\u7684\u5f00\u53d1\u8005\u63a7\u5236\u53f0\uff0c \u70b9\u51fb \u7f51\u7edc \u2014> \u6807\u5934 \u627e\u5230\u5982\u4e0b\u56fe\u6240\u793a\u7684 URL\u3002

                        • \u70b9\u51fb \u54cd\u5e94 \uff0c\u5b9a\u4f4d\u5230 vmConfigContext \u2014> config \uff0c\u6700\u7ec8\u627e\u5230\u76ee\u6807\u503c uuid \u3002

                      • \u9700\u8981\u5bfc\u5165\u4e91\u4e3b\u673a\u7684 vmdk \u6587\u4ef6 path

                      "},{"location":"admin/virtnest/best-practice/import-ubuntu.html#_1","title":"\u7f51\u7edc\u914d\u7f6e","text":"

                      \u9700\u8981\u6839\u636e\u7f51\u7edc\u6a21\u5f0f\u7684\u4e0d\u540c\u914d\u7f6e\u4e0d\u540c\u7684\u4fe1\u606f\uff0c\u82e5\u6709\u56fa\u5b9a IP \u7684\u9700\u6c42\uff0c\u9700\u8981\u9009\u62e9 Bridge \u7f51\u7edc\u6a21\u5f0f

                      • \u521b\u5efa ovs \u7c7b\u578b\u7684 Multus CR\uff0c\u53ef\u53c2\u8003\u521b\u5efa Multus CR
                      • \u521b\u5efa\u5b50\u7f51\u53ca IP \u6c60\uff0c\u53c2\u8003\u521b\u5efa\u5b50\u7f51\u548c IP \u6c60

                        apiVersion: spiderpool.spidernet.io/v2beta1\nkind: SpiderIPPool\nmetadata:\n  name: test2\nspec:\n  ips:\n  - 10.20.3.90\n  subnet: 10.20.0.0/16\n  gateway: 10.20.0.1\n\n---\napiVersion: spiderpool.spidernet.io/v2beta1\nkind: SpiderIPPool\nmetadata:\n  name: test3\nspec:\n  ips:\n  - 10.20.240.1\n  subnet: 10.20.0.0/16\n  gateway: 10.20.0.1\n\n---\napiVersion: spiderpool.spidernet.io/v2beta1\nkind: SpiderMultusConfig\nmetadata:\n  name: test1\n  namespace: kube-system\nspec:\n  cniType: ovs\n  coordinator:\n    detectGateway: false\n    detectIPConflict: false\n    mode: auto\n    tunePodRoutes: true\n  disableIPAM: false\n  enableCoordinator: true\n  ovs:\n    bridge: br-1\n    ippools:\n    ipv4:\n    - test1\n    - test2\n
                      "},{"location":"admin/virtnest/best-practice/import-ubuntu.html#vsphere-secret","title":"\u83b7\u53d6 vSphere \u7684\u8d26\u53f7\u5bc6\u7801 secret","text":"
                      apiVersion: v1\nkind: Secret\nmetadata:\n  name: vsphere   # \u53ef\u66f4\u6539\n  labels:\n    app: containerized-data-importer  # \u8bf7\u52ff\u66f4\u6539\ntype: Opaque\ndata:\n  accessKeyId: \"username-base64\"\n  secretKey: \"password-base64\"\n
                      "},{"location":"admin/virtnest/best-practice/import-ubuntu.html#kubevirt-vm-yaml-vm","title":"\u7f16\u5199 kubevirt vm yaml \u521b\u5efa vm","text":"

                      Tip

                      \u82e5\u6709\u56fa\u5b9aIP\u9700\u6c42\uff0c\u5219\u8be5 yaml \u4e0e\u4f7f\u7528\u9ed8\u8ba4\u7f51\u7edc\u7684 yaml \u6709\u4e00\u4e9b\u533a\u522b\uff0c\u5df2\u6807\u6ce8\u3002

                      apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n    virtnest.io/alias-name: \"\"\n    virtnest.io/image-secret: \"\"\n  creationTimestamp: \"2024-05-23T06:46:28Z\"\n  finalizers:\n  - kubevirt.io/virtualMachineControllerFinalize\n  generation: 1\n  labels:\n    virtnest.io/os-family: Ubuntu\n    virtnest.io/os-version: \"22.04\"\n  name: export-ubuntu\n  namespace: default\nspec:\n  dataVolumeTemplates:\n  - metadata:\n      creationTimestamp: null\n      name: export-ubuntu-rootdisk\n      namespace: default\n    spec:\n      pvc:\n        accessModes:\n        - ReadWriteOnce\n        resources:\n          requests:\n            storage: 10Gi\n        storageClassName: local-path\n      source:\n        vddk:\n          backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-ubuntu/virtnest-export-ubuntu.vmdk\"  \n          url: \"https://10.64.56.21\"                                                       \n          uuid: \"421d6135-4edb-df80-ee54-8c5b10cc4e78\"                                     \n          thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"            \n          secretRef: \"vsphere\"\n          initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"\n  runStrategy: Manual\n  template:\n    metadata:\n      annotations:\n        ipam.spidernet.io/ippools: '[{\"cleangateway\":false,\"ipv4\":[\"test2\"]}]'  // \u8fd9\u91cc\u6dfb\u52a0 spiderpool \u7f51\u7edc\n      creationTimestamp: null\n    spec:\n      architecture: amd64\n      domain:\n        devices:\n          disks:\n          - bootOrder: 1\n            disk:\n              bus: virtio\n            name: rootdisk\n          interfaces:                                                          // \u4fee\u6539\u8fd9\u91cc\u7684\u7f51\u7edc\u914d\u7f6e\n          - bridge: {}\n            name: ovs-bridge0\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 4Gi\n      networks:                                                                // \u4fee\u6539\u8fd9\u91cc\u7684\u7f51\u7edc\u914d\u7f6e\n      - multus:\n          default: true\n          networkName: kube-system/test1\n        name: ovs-bridge0\n      volumes:\n      - dataVolume:\n          name: export-ubuntu-rootdisk\n        name: rootdisk\n
                      "},{"location":"admin/virtnest/best-practice/import-ubuntu.html#vnc","title":"\u8fdb\u5165 VNC \u68c0\u67e5\u662f\u5426\u6210\u529f\u8fd0\u884c","text":"
                      1. \u4fee\u6539\u4e91\u4e3b\u673a\u7684\u7f51\u7edc\u914d\u7f6e

                      2. \u67e5\u770b\u5f53\u524d\u7f51\u7edc

                        \u5728\u5b9e\u9645\u5bfc\u5165\u5b8c\u6210\u65f6\uff0c\u5982\u4e0b\u56fe\u6240\u793a\u7684\u914d\u7f6e\u5df2\u7ecf\u5b8c\u6210\u3002\u7136\u800c\uff0c\u9700\u8981\u6ce8\u610f\u7684\u662f\uff0cenp1s0\u63a5\u53e3\u5e76\u6ca1\u6709\u5305\u542binet\u5b57\u6bb5\uff0c\u56e0\u6b64\u65e0\u6cd5\u8fde\u63a5\u5230\u5916\u90e8\u7f51\u7edc\u3002

                      3. \u914d\u7f6e netplan

                        \u5728\u4e0a\u56fe\u6240\u793a\u7684\u914d\u7f6e\u4e2d\uff0c\u5c06 ethernets \u4e2d\u7684\u5bf9\u8c61\u66f4\u6539\u4e3a enp1s0\uff0c\u5e76\u4f7f\u7528 DHCP \u83b7\u5f97 IP \u5730\u5740\u3002

                      4. \u5c06 netplan \u914d\u7f6e\u5e94\u7528\u5230\u7cfb\u7edf\u7f51\u7edc\u914d\u7f6e\u4e2d

                        sudo netplan apply\n
                      5. \u5bf9\u5916\u90e8\u7f51\u7edc\u8fdb\u884c ping \u6d4b\u8bd5

                      6. \u901a\u8fc7 SSH \u5728\u8282\u70b9\u4e0a\u8bbf\u95ee\u4e91\u4e3b\u673a\u3002

                      "},{"location":"admin/virtnest/best-practice/import-windows.html","title":"\u5982\u4f55\u4ece VMWare \u5bfc\u5165\u4f20\u7edf Windows \u4e91\u4e3b\u673a\u5230\u4e91\u539f\u751f\u4e91\u4e3b\u673a\u5e73\u53f0","text":"

                      \u672c\u6587\u5c06\u8be6\u7ec6\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u547d\u4ee4\u884c\u5c06\u5916\u90e8\u5e73\u53f0 VMware \u4e0a\u7684\u4e91\u4e3b\u673a\u5bfc\u5165\u5230 AI \u7b97\u529b\u4e2d\u5fc3\u7684\u4e91\u4e3b\u673a\u4e2d\u3002

                      Info

                      \u672c\u6587\u6863\u5916\u90e8\u865a\u62df\u5e73\u53f0\u662f VMware vSphere Client\uff0c\u540e\u7eed\u7b80\u5199\u4e3a vSphere\u3002 \u6280\u672f\u4e0a\u662f\u4f9d\u9760 kubevirt cdi \u6765\u5b9e\u73b0\u7684\u3002\u64cd\u4f5c\u524d\uff0cvSphere \u4e0a\u88ab\u5bfc\u5165\u7684\u4e91\u4e3b\u673a\u9700\u8981\u5173\u673a\u3002 \u4ee5 Windows \u64cd\u4f5c\u7cfb\u7edf\u7684\u4e91\u4e3b\u673a\u4e3a\u4f8b\u3002

                      "},{"location":"admin/virtnest/best-practice/import-windows.html#_1","title":"\u73af\u5883\u51c6\u5907","text":"

                      \u5bfc\u5165\u524d\uff0c\u9700\u8981\u53c2\u8003\u7f51\u7edc\u914d\u7f6e\u51c6\u5907\u73af\u5883\u3002

                      "},{"location":"admin/virtnest/best-practice/import-windows.html#windows","title":"\u83b7\u53d6 Windows \u4e91\u4e3b\u673a\u7684\u4fe1\u606f","text":"

                      \u4e0e\u5bfc\u5165 Linux \u64cd\u4f5c\u7cfb\u7edf\u7684\u4e91\u4e3b\u673a\u7c7b\u4f3c\uff0c\u53ef\u53c2\u8003\u5982\u4f55\u4ece VMWare \u5bfc\u5165\u4f20\u7edf Linuxs \u4e91\u4e3b\u673a\u5230\u4e91\u539f\u751f\u4e91\u4e3b\u673a\u5e73\u53f0\u83b7\u53d6\u4ee5\u4e0b\u4fe1\u606f\uff1a

                      • \u83b7\u53d6 vSphere \u8d26\u53f7\u5bc6\u7801
                      • \u83b7\u53d6 vSphere \u4e91\u4e3b\u673a\u4fe1\u606f
                      "},{"location":"admin/virtnest/best-practice/import-windows.html#windows_1","title":"\u68c0\u67e5 Windows \u7684\u5f15\u5bfc\u7c7b\u578b","text":"

                      \u5c06\u5916\u90e8\u5e73\u53f0\u7684\u4e91\u4e3b\u673a\u5bfc\u5165\u5230 AI \u7b97\u529b\u4e2d\u5fc3\u7684\u865a\u62df\u5316\u5e73\u53f0\u4e2d\u65f6\uff0c\u9700\u8981\u6839\u636e\u4e91\u4e3b\u673a\u7684\u542f\u52a8\u7c7b\u578b\uff08BIOS \u6216 UEFI\uff09\u8fdb\u884c\u76f8\u5e94\u7684\u914d\u7f6e\uff0c\u4ee5\u786e\u4fdd\u4e91\u4e3b\u673a\u80fd\u591f\u6b63\u786e\u542f\u52a8\u548c\u8fd0\u884c\u3002

                      \u53ef\u4ee5\u901a\u8fc7\"\u7cfb\u7edf\u4fe1\u606f\"\u68c0\u67e5 Windows \u662f BIOS \u8fd8\u662f UEFI \u5f15\u5bfc\u3002\u5982\u679c\u662f UEFI \u5219\u9700\u8981\u5728 YAML \u6587\u4ef6\u4e2d\u6dfb\u52a0\u76f8\u5173\u4fe1\u606f\u3002

                      "},{"location":"admin/virtnest/best-practice/import-windows.html#_2","title":"\u5bfc\u5165\u8fc7\u7a0b","text":"

                      \u51c6\u5907 window.yaml \u6587\u4ef6\uff0c\u6ce8\u610f\u4ee5\u4e0b\u914d\u7f6e\u9879

                      • \u5f15\u5bfc Virtio \u9a71\u52a8\u7684 PVC
                      • \u78c1\u76d8\u603b\u7ebf\u7c7b\u578b\uff0c\u6839\u636e\u5f15\u5bfc\u7c7b\u578b\u8bbe\u7f6e\u4e3a sata \u6216 virtio
                      • \u5982\u679c\u4f7f\u7528 UEFI\uff0c\u9700\u8981\u6dfb\u52a0 UEFI \u914d\u7f6e
                      \u70b9\u51fb\u67e5\u770b window.yaml \u793a\u4f8b window.yaml
                      apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  labels:\n    virtnest.io/os-family: windows\n    virtnest.io/os-version: \"server2019\"\n  name: export-window-21\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: export-window-21-rootdisk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 22Gi\n          storageClassName: local-path\n        source:\n          vddk:\n            backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-window/virtnest-export-window.vmdk\"\n            url: \"https://10.64.56.21\"\n            uuid: \"421d40f2-21a2-cfeb-d5c9-e7f8abfc2faa\"\n            thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"\n            secretRef: \"vsphere21\"\n            initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"\n    - metadata:\n        name: export-window-21-datadisk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 1Gi\n          storageClassName: local-path\n        source:\n          vddk:\n            backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-window/virtnest-export-window_1.vmdk\"\n            url: \"https://10.64.56.21\"\n            uuid: \"421d40f2-21a2-cfeb-d5c9-e7f8abfc2faa\"\n            thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"\n            secretRef: \"vsphere21\"\n            initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"\n    # <1>. \u5f15\u5bfc virtio \u9a71\u52a8\u7684 pvc\n    # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n    - metadata:\n        name: virtio-disk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Mi\n          storageClassName: local-path\n        source:\n          blank: {}\n          # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n  running: true\n  template:\n    metadata:\n      annotations:\n        ipam.spidernet.io/ippools: '[{\"cleangateway\":false,\"ipv4\":[\"test86\"]}]'\n    spec:\n      dnsConfig:\n        nameservers:\n        - 223.5.5.5\n      domain:\n        cpu:\n          cores: 2\n        memory:\n          guest: 4Gi\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: sata   # <2> \u78c1\u76d8\u603b\u7ebf\u7c7b\u578b\uff0c\u6839\u636e\u5f15\u5bfc\u7c7b\u578b\u8bbe\u7f6e\u4e3a sata \u6216 virtio\n              name: rootdisk\n            - bootOrder: 2\n              disk:\n                bus: sata   # <2> \u78c1\u76d8\u603b\u7ebf\u7c7b\u578b\uff0c\u6839\u636e\u5f15\u5bfc\u7c7b\u578b\u8bbe\u7f6e\u4e3a sata \u6216 virtio\n              name: datadisk\n            # <1>. \u5f15\u5bfc virtio \u9a71\u52a8\u7684 disk\n            # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n            - bootOrder: 3\n              disk:\n                bus: virtio\n              name: virtdisk\n            - bootOrder: 4\n              cdrom:\n                bus: sata\n              name: virtiocontainerdisk\n            # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n          interfaces:\n            - bridge: {}\n              name: ovs-bridge0\n        # <3> \u5728\u4e0a\u6587\u201c\u67e5\u770b window \u5f15\u5bfc\u662f BIOS \u8fd8\u662f UEFI\u201d\n        # \u5982\u679c\u4f7f\u7528\u4e86 UEFI \u9700\u8981\u6dfb\u52a0\u7684\u4fe1\u606f\n        # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n        features:\n          smm:\n            enabled: true\n        firmware:\n          bootloader:\n            efi:\n              secureBoot: false\n        # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 4Gi\n      networks:\n        - multus:\n            default: true\n            networkName: kube-system/test1\n          name: ovs-bridge0\n      volumes:\n        - dataVolume:\n            name: export-window-21-rootdisk\n          name: rootdisk\n        - dataVolume:\n            name: export-window-21-datadisk\n          name: datadisk      \n        # <1> \u5f15\u5bfc virtio \u9a71\u52a8\u7684 volumes\n        # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n        - dataVolume:\n            name: virtio-disk\n          name: virtdisk\n        - containerDisk:\n            image: release-ci.daocloud.io/virtnest/kubevirt/virtio-win:v4.12.12-5\n          name: virtiocontainerdisk\n        # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n
                      "},{"location":"admin/virtnest/best-practice/import-windows.html#vnc-virtio","title":"\u901a\u8fc7 VNC \u5b89\u88c5 VirtIO \u9a71\u52a8","text":"
                      1. \u901a\u8fc7 VNC \u8bbf\u95ee\u548c\u8fde\u63a5\u5230\u4e91\u4e3b\u673a\u3002
                      2. \u6839\u636e Windows \u7248\u672c\u4e0b\u8f7d\u5e76\u5b89\u88c5\u76f8\u5e94\u7684 VirtIO \u9a71\u52a8\u7a0b\u5e8f\u3002
                      3. \u53ef\u4ee5\u5f00\u542f\u8fdc\u7a0b\u684c\u9762\uff08Remote Desktop\uff09\uff0c\u65b9\u4fbf\u5728\u540e\u7eed\u901a\u8fc7\u8fdc\u7a0b\u684c\u9762\u534f\u8bae\uff08RDP\uff09\u8fde\u63a5\u5230\u4e91\u4e3b\u673a\u3002
                      4. \u5b89\u88c5\u5b8c\u6210\u540e\uff0c\u91cd\u542f\u4e91\u4e3b\u673a\u540e\u66f4\u65b0 YAML\u3002
                      "},{"location":"admin/virtnest/best-practice/import-windows.html#yaml","title":"\u91cd\u542f\u540e\u66f4\u65b0 YAML","text":"\u70b9\u51fb\u67e5\u770b\u4fee\u6539\u540e\u7684 window.yaml \u793a\u4f8b window.yaml
                      # \u5220\u9664 \u6807\u53f7 <1> \u76f8\u5173\u5b57\u6bb5\uff0c\u4fee\u6539\u6807\u53f7 <2> \u5b57\u6bb5\uff1asata \u6539\u6210 virtio\napiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  labels:\n    virtnest.io/os-family: windows\n    virtnest.io/os-version: \"server2019\"\n  name: export-window-21\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: export-window-21-rootdisk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 22Gi\n          storageClassName: local-path\n        source:\n          vddk:\n            backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-window/virtnest-export-window.vmdk\"\n            url: \"https://10.64.56.21\"\n            uuid: \"421d40f2-21a2-cfeb-d5c9-e7f8abfc2faa\"\n            thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"\n            secretRef: \"vsphere21\"\n            initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"\n    - metadata:\n        name: export-window-21-datadisk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 1Gi\n          storageClassName: local-path\n        source:\n          vddk:\n            backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-window/virtnest-export-window_1.vmdk\"\n            url: \"https://10.64.56.21\"\n            uuid: \"421d40f2-21a2-cfeb-d5c9-e7f8abfc2faa\"\n            thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"\n            secretRef: \"vsphere21\"\n            initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"\n  running: true\n  template:\n    metadata:\n      annotations:\n        ipam.spidernet.io/ippools: '[{\"cleangateway\":false,\"ipv4\":[\"test86\"]}]'\n    spec:\n      dnsConfig:\n        nameservers:\n        - 223.5.5.5\n      domain:\n        cpu:\n          cores: 2\n        memory:\n          guest: 4Gi\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio  # <2>\n              name: rootdisk\n            - bootOrder: 2\n              disk:\n                bus: virtio  # <2>\n              name: datadisk\n          interfaces:\n            - bridge: {}\n              name: ovs-bridge0\n        # <3> \u5728\u4e0a\u6587\u201c\u67e5\u770b window \u5f15\u5bfc\u662f BIOS \u8fd8\u662f UEFI\u201d\n        # \u5982\u679c\u4f7f\u7528\u4e86 UEFI \u9700\u8981\u6dfb\u52a0\u7684\u4fe1\u606f\n        # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n        features:\n          smm:\n            enabled: true\n        firmware:\n          bootloader:\n            efi:\n              secureBoot: false\n        # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 4Gi\n      networks:\n        - multus:\n            default: true\n            networkName: kube-system/test1\n          name: ovs-bridge0\n      volumes:\n        - dataVolume:\n            name: export-window-21-rootdisk\n          name: rootdisk\n        - dataVolume:\n            name: export-window-21-datadisk\n          name: datadisk\n
                      "},{"location":"admin/virtnest/best-practice/import-windows.html#rdp","title":"RDP \u8bbf\u95ee\u548c\u9a8c\u8bc1","text":"
                      • \u4f7f\u7528 RDP \u5ba2\u6237\u7aef\u8fde\u63a5\u5230\u4e91\u4e3b\u673a\u3002\u8f93\u5165\u9ed8\u8ba4\u8d26\u53f7 admin \u548c\u5bc6\u7801 dangerous!123 \u8fdb\u884c\u767b\u5f55\u3002

                      • \u9a8c\u8bc1\u7f51\u7edc\u8bbf\u95ee\u548c\u6570\u636e\u76d8\u6570\u636e

                      "},{"location":"admin/virtnest/best-practice/import-windows.html#linux-windows","title":"\u5bf9\u6bd4\u5bfc\u5165 Linux \u548c Windows \u4e91\u4e3b\u673a\u7684\u5dee\u5f02","text":"
                      • Windows \u53ef\u80fd\u9700\u8981 UEFI \u914d\u7f6e\u3002
                      • Windows \u901a\u5e38\u9700\u8981\u5b89\u88c5 VirtIO \u9a71\u52a8\u3002
                      • Windows \u591a\u78c1\u76d8\u5bfc\u5165\u901a\u5e38\u4e0d\u9700\u8981\u91cd\u65b0\u6302\u8f7d\u78c1\u76d8\u3002
                      "},{"location":"admin/virtnest/best-practice/vm-windows.html","title":"\u521b\u5efa Windows \u4e91\u4e3b\u673a","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u547d\u4ee4\u884c\u521b\u5efa Windows \u4e91\u4e3b\u673a\u3002

                      "},{"location":"admin/virtnest/best-practice/vm-windows.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      1. \u521b\u5efa Windows \u4e91\u4e3b\u673a\u4e4b\u524d\uff0c\u9700\u8981\u5148\u53c2\u8003\u5b89\u88c5\u4e91\u4e3b\u673a\u6a21\u5757\u7684\u4f9d\u8d56\u548c\u524d\u63d0\u786e\u5b9a\u60a8\u7684\u73af\u5883\u5df2\u7ecf\u51c6\u5907\u5c31\u7eea\u3002
                      2. \u521b\u5efa\u8fc7\u7a0b\u5efa\u8bae\u53c2\u8003\u5b98\u65b9\u6587\u6863\uff1a\u5b89\u88c5 windows \u7684\u6587\u6863\u3001 \u5b89\u88c5 Windows \u76f8\u5173\u9a71\u52a8\u7a0b\u5e8f\u3002
                      3. Windows \u4e91\u4e3b\u673a\u5efa\u8bae\u4f7f\u7528 VNC \u7684\u8bbf\u95ee\u65b9\u5f0f\u3002
                      "},{"location":"admin/virtnest/best-practice/vm-windows.html#iso","title":"\u5bfc\u5165 ISO \u955c\u50cf","text":"

                      \u200b\u521b\u5efa Windows \u4e91\u4e3b\u673a\u9700\u8981\u5bfc\u5165 ISO \u955c\u50cf\u7684\u4e3b\u8981\u539f\u56e0\u662f\u4e3a\u4e86\u5b89\u88c5 Windows \u64cd\u4f5c\u7cfb\u7edf\u3002 \u4e0e Linux \u64cd\u4f5c\u7cfb\u7edf\u4e0d\u540c\uff0cWindows \u64cd\u4f5c\u7cfb\u7edf\u5b89\u88c5\u8fc7\u7a0b\u901a\u5e38\u9700\u8981\u4ece\u5b89\u88c5\u5149\u76d8\u6216 ISO \u955c\u50cf\u6587\u4ef6\u4e2d\u5f15\u5bfc\u3002 \u56e0\u6b64\uff0c\u5728\u521b\u5efa Windows \u4e91\u4e3b\u673a\u65f6\uff0c\u9700\u8981\u5148\u5bfc\u5165 Windows \u64cd\u4f5c\u7cfb\u7edf\u7684\u5b89\u88c5 ISO \u955c\u50cf\u6587\u4ef6\uff0c\u4ee5\u4fbf\u4e91\u4e3b\u673a\u80fd\u591f\u6b63\u5e38\u5b89\u88c5\u3002

                      \u4ee5\u4e0b\u4ecb\u7ecd\u4e24\u4e2a\u5bfc\u5165 ISO \u955c\u50cf\u7684\u529e\u6cd5\uff1a

                      1. \uff08\u63a8\u8350\uff09\u5236\u4f5c Docker \u955c\u50cf\uff0c\u5efa\u8bae\u53c2\u8003 \u6784\u5efa\u955c\u50cf

                      2. \uff08\u4e0d\u63a8\u8350\uff09\u4f7f\u7528 virtctl \u5c06\u955c\u50cf\u5bfc\u5165\u5230 PVC \u4e2d

                        \u53ef\u53c2\u8003\u5982\u4e0b\u547d\u4ee4

                        virtctl image-upload -n <\u547d\u540d\u7a7a\u95f4> pvc <PVC \u540d\u79f0> \\ \n   --image-path=<IOS \u6587\u4ef6\u8def\u5f84> \\ \n   --access-mode=ReadWriteOnce \\ \n   --size=6G \\ --uploadproxy-url=<https://cdi-uploadproxy ClusterIP \u548c\u7aef\u53e3> \\ \n   --force-bind \\ \n   --insecure \\ \n   --wait-secs=240 \\ \n   --storage-class=<SC>\n

                        \u4f8b\u5982\uff1a

                        virtctl image-upload -n <\u547d\u540d\u7a7a\u95f4> pvc <PVC \u540d\u79f0> \\ \n   --image-path=<IOS \u6587\u4ef6\u8def\u5f84> \\ \n   --access-mode=ReadWriteOnce \\ \n   --size=6G \\ --uploadproxy-url=<https://cdi-uploadproxy ClusterIP \u548c\u7aef\u53e3> \\ \n   --force-bind \\ \n   --insecure \\ \n   --wait-secs=240 \\ \n   --storage-class=<SC>\n
                      "},{"location":"admin/virtnest/best-practice/vm-windows.html#yaml-windows","title":"YAML \u521b\u5efa Windows \u4e91\u4e3b\u673a","text":"

                      \u4f7f\u7528 yaml \u521b\u5efa Windows \u4e91\u4e3b\u673a\uff0c\u66f4\u52a0\u7075\u6d3b\u5e76\u4e14\u66f4\u6613\u7f16\u5199\u559d\u7ef4\u62a4\u3002\u4ee5\u4e0b\u4ecb\u7ecd\u4e09\u79cd\u53c2\u8003\u7684 yaml\uff1a

                      1. \u63a8\u8350\u4f7f\u7528 Virtio \u9a71\u52a8 + Docker \u955c\u50cf\u7684\u65b9\u5f0f

                        • \u5982\u679c\u4f60\u9700\u8981\u4f7f\u7528\u5b58\u50a8\u80fd\u529b-\u6302\u8f7d\u78c1\u76d8\uff0c\u8bf7\u5b89\u88c5 viostor \u9a71\u52a8\u7a0b\u5e8f
                        • \u5982\u679c\u4f60\u9700\u8981\u4f7f\u7528\u7f51\u7edc\u80fd\u529b\uff0c\u8bf7\u5b89\u88c5 NetKVM \u9a71\u52a8\u7a0b\u5e8f
                        apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n  labels:\n    virtnest.io/os-family: Windows\n    virtnest.io/os-version: '10'\n  name: windows10-virtio\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: win10-system-virtio\n        namespace: default\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 32Gi\n          storageClassName: local-path\n        source:\n          blank: {}\n  running: true\n  template:\n    metadata:\n      labels:\n        app: windows10-virtio\n        version: v1\n        kubevirt.io/domain: windows10-virtio\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 8\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio # \u4f7f\u7528 virtio\n              name: win10-system-virtio\n            - bootOrder: 2\n              cdrom:\n                bus: sata # \u5bf9\u4e8e ISO \u955c\u50cf\uff0c\u4f7f\u7528 sata\n              name: iso-win10\n            - bootOrder: 3\n              cdrom:\n                bus: sata # \u5bf9\u4e8e containerdisk\uff0c\u4f7f\u7528 sata\n              name: virtiocontainerdisk\n          interfaces:\n            - name: default\n              masquerade: {}\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 8G\n      networks:\n        - name: default\n          pod: {}\n      volumes:\n        - name: iso-win10\n          persistentVolumeClaim:\n            claimName: iso-win10\n        - name: win10-system-virtio\n          persistentVolumeClaim:\n            claimName: win10-system-virtio\n        - containerDisk:\n            image: kubevirt/virtio-container-disk\n          name: virtiocontainerdisk\n
                      2. \uff08\u4e0d\u63a8\u8350\uff09\u4f7f\u7528 Virtio \u9a71\u52a8\u548c virtctl \u5de5\u5177\u7684\u7ec4\u5408\u65b9\u5f0f\uff0c\u5c06\u955c\u50cf\u5bfc\u5165\u5230 Persistent Volume Claim\uff08PVC\uff09\u4e2d\u3002

                        apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n  labels:\n    virtnest.io/os-family: Windows\n    virtnest.io/os-version: '10'\n  name: windows10-virtio\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: win10-system-virtio\n        namespace: default\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 32Gi\n          storageClassName: local-path\n        source:\n          blank: {}\n  running: true\n  template:\n    metadata:\n      labels:\n        app: windows10-virtio\n        version: v1\n        kubevirt.io/domain: windows10-virtio\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 8\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              # \u8bf7\u4f7f\u7528 virtio\n              disk:\n                bus: virtio\n              name: win10-system-virtio\n              # ISO \u955c\u50cf\u8bf7\u4f7f\u7528 sata\n            - bootOrder: 2\n              cdrom:\n                bus: sata\n              name: iso-win10\n\u00a0 \u00a0 \u00a0 \u00a0 \u00a0 \u00a0 \u00a0 # containerdisk \u8bf7\u4f7f\u7528 sata\n            - bootOrder: 3\n              cdrom:\n                bus: sata\n              name: virtiocontainerdisk\n          interfaces:\n            - name: default\n              masquerade: {}\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 8G\n      networks:\n        - name: default\n          pod: {}\n      volumes:\n        - name: iso-win10\n          persistentVolumeClaim:\n            claimName: iso-win10\n        - name: win10-system-virtio\n          persistentVolumeClaim:\n            claimName: win10-system-virtio\n        - containerDisk:\n            image: kubevirt/virtio-container-disk\n          name: virtiocontainerdisk\n
                      3. \uff08\u4e0d\u63a8\u8350\uff09\u4e0d\u4f7f\u7528 Virtio \u9a71\u52a8\u7684\u60c5\u51b5\u4e0b\uff0c\u4f7f\u7528 virtctl \u5de5\u5177\u5c06\u955c\u50cf\u5bfc\u5165\u5230 Persistent Volume Claim\uff08PVC\uff09\u4e2d\u3002\u4e91\u4e3b\u673a\u53ef\u80fd\u4f7f\u7528\u5176\u4ed6\u7c7b\u578b\u7684\u9a71\u52a8\u6216\u9ed8\u8ba4\u9a71\u52a8\u6765\u64cd\u4f5c\u78c1\u76d8\u548c\u7f51\u7edc\u8bbe\u5907\u3002

                        apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n  labels:\n    virtnest.io/os-family: Windows\n    virtnest.io/os-version: '10'\n  name: windows10\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    # \u521b\u5efa\u7cfb\u7edf\u76d8\uff0c\u4f60\u521b\u5efa\u591a\u4e2a PVC\uff08\u78c1\u76d8\uff09\n    - metadata:\n        name: win10-system\n        namespace: default\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 32Gi\n          storageClassName: local-path\n        source:\n          blank: {}\n  running: true\n  template:\n    metadata:\n      labels:\n        app: windows10\n        version: v1\n        kubevirt.io/domain: windows10\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 8\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              # \u65e0 virtio \u9a71\u52a8\uff0c\u8bf7\u4f7f\u7528 sata\n\u00a0 \u00a0 \u00a0 \u00a0 \u00a0 \u00a0 \u00a0cdrom:\n                bus: sata\n              name: win10-system\n              # ISO \u955c\u50cf\uff0c\u8bf7\u4f7f\u7528 sata\n            - bootOrder: 2\n              cdrom:\n                bus: sata\n              name: iso-win10\n          interfaces:\n            - name: default\n              masquerade: {}\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 8G\n      networks:\n        - name: default\n          pod: {}\n      volumes:\n        - name: iso-win10\n          persistentVolumeClaim:\n            claimName: iso-win10\n        - name: win10-system\n          persistentVolumeClaim:\n            claimName: win10-system\n
                      "},{"location":"admin/virtnest/best-practice/vm-windows.html#_2","title":"\u4e91\u684c\u9762","text":"

                      Windows \u7248\u672c\u7684\u4e91\u4e3b\u673a\u5927\u591a\u6570\u60c5\u51b5\u662f\u9700\u8981\u8fdc\u7a0b\u684c\u9762\u63a7\u5236\u8bbf\u95ee\u7684\uff0c\u5efa\u8bae\u4f7f\u7528 Microsoft \u8fdc\u7a0b\u684c\u9762\u63a7\u5236\u60a8\u7684\u4e91\u4e3b\u673a\u3002

                      Note

                      • \u4f60\u7684 Windows \u7248\u672c\u9700\u652f\u6301\u8fdc\u7a0b\u684c\u9762\u63a7\u5236\uff0c\u624d\u80fd\u4f7f\u7528 Microsoft \u8fdc\u7a0b\u684c\u9762\u3002
                      • \u9700\u8981\u5173\u95ed Windows \u7684\u9632\u706b\u5899\u3002
                      "},{"location":"admin/virtnest/best-practice/vm-windows.html#_3","title":"\u589e\u52a0\u6570\u636e\u76d8","text":"

                      Windows \u4e91\u4e3b\u673a\u6dfb\u52a0\u6570\u636e\u76d8\u7684\u65b9\u5f0f\u548c Linux \u4e91\u4e3b\u673a\u4e00\u81f4\u3002\u4f60\u53ef\u4ee5\u53c2\u8003\u4e0b\u9762\u7684 YAML \u793a\u4f8b\uff1a

                        apiVersion: kubevirt.io/v1\n  kind: VirtualMachine\n  <...>\n  spec:\n    dataVolumeTemplates:\n      # \u6dfb\u52a0\u4e00\u5757\u6570\u636e\u76d8\n      - metadata:\n        name: win10-disk\n        namespace: default\n        spec:\n          pvc:\n            accessModes:\n              - ReadWriteOnce\n            resources:\n              requests:\n                storage: 16Gi\n            storageClassName: hwameistor-storage-lvm-hdd\n          source:\n            blank: {}\n    template:\n      spec:\n        domain:\n          devices:\n            disks:\n              - bootOrder: 1\n                disk:\n                  bus: virtio\n                name: win10-system\n              # \u6dfb\u52a0\u4e00\u5757\u6570\u636e\u76d8\n              - bootOrder: 2\n                disk:\n                  bus: virtio\n                name: win10-disk\n            <....>\n        volumes:\n          <....>\n          # \u6dfb\u52a0\u4e00\u5757\u6570\u636e\u76d8\n          - name: win10-disk\n            persistentVolumeClaim:\n              claimName: win10-disk\n
                      "},{"location":"admin/virtnest/best-practice/vm-windows.html#_4","title":"\u5feb\u7167\u3001\u514b\u9686\u3001\u5b9e\u65f6\u8fc1\u79fb","text":"

                      \u8fd9\u4e9b\u80fd\u529b\u548c Linux \u4e91\u4e3b\u673a\u4e00\u81f4\uff0c\u53ef\u76f4\u63a5\u53c2\u8003\u914d\u7f6e Linux \u4e91\u4e3b\u673a\u7684\u65b9\u5f0f\u3002

                      "},{"location":"admin/virtnest/best-practice/vm-windows.html#windows_1","title":"\u8bbf\u95ee Windows \u4e91\u4e3b\u673a","text":"
                      1. \u521b\u5efa\u6210\u529f\u540e\uff0c\u8fdb\u5165\u4e91\u4e3b\u673a\u5217\u8868\u9875\u9762\uff0c\u53d1\u73b0\u4e91\u4e3b\u673a\u6b63\u5e38\u8fd0\u884c\u3002

                      2. \u70b9\u51fb\u63a7\u5236\u53f0\u8bbf\u95ee\uff08VNC\uff09\uff0c\u53ef\u4ee5\u6b63\u5e38\u8bbf\u95ee\u3002

                      "},{"location":"admin/virtnest/gpu/vm-gpu.html","title":"\u4e91\u4e3b\u673a\u914d\u7f6e GPU\uff08\u76f4\u901a\u6a21\u5f0f\uff09","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u5728\u521b\u5efa\u4e91\u4e3b\u673a\u65f6\uff0c\u914d\u7f6e GPU \u7684\u524d\u63d0\u6761\u4ef6\u3002

                      \u914d\u7f6e\u4e91\u4e3b\u673a\u7684 GPU \u7684\u91cd\u70b9\u662f\u5bf9 GPU Operator \u8fdb\u884c\u914d\u7f6e\uff0c\u4ee5\u4fbf\u5728\u5de5\u4f5c\u8282\u70b9\u4e0a\u90e8\u7f72\u4e0d\u540c\u7684\u8f6f\u4ef6\u7ec4\u4ef6\uff0c \u5177\u4f53\u53d6\u51b3\u4e8e\u8fd9\u4e9b\u8282\u70b9\u4e0a\u914d\u7f6e\u8fd0\u884c\u7684 GPU \u5de5\u4f5c\u8d1f\u8f7d\u3002\u4ee5\u4e0b\u4e09\u4e2a\u8282\u70b9\u4e3a\u4f8b\uff1a

                      • controller-node-1 \u8282\u70b9\u914d\u7f6e\u4e3a\u8fd0\u884c\u5bb9\u5668\u3002
                      • work-node-1 \u8282\u70b9\u914d\u7f6e\u4e3a\u8fd0\u884c\u5177\u6709\u76f4\u901a GPU \u7684\u4e91\u4e3b\u673a\u3002
                      • work-node-2 \u8282\u70b9\u914d\u7f6e\u4e3a\u8fd0\u884c\u5177\u6709\u865a\u62df vGPU \u7684\u4e91\u4e3b\u673a\u3002
                      "},{"location":"admin/virtnest/gpu/vm-gpu.html#_1","title":"\u5047\u8bbe\u3001\u9650\u5236\u548c\u4f9d\u8d56\u6027","text":"

                      \u5de5\u4f5c\u8282\u70b9\u53ef\u4ee5\u8fd0\u884c GPU \u52a0\u901f\u5bb9\u5668\uff0c\u4e5f\u53ef\u4ee5\u8fd0\u884c\u5177\u6709 GPU \u76f4\u901a\u7684 GPU \u52a0\u901f VM\uff0c\u6216\u8005\u5177\u6709 vGPU \u7684 GPU \u52a0\u901f VM\uff0c\u4f46\u4e0d\u80fd\u8fd0\u884c\u5176\u4e2d\u4efb\u4f55\u4e00\u4e2a\u7684\u7ec4\u5408\u3002

                      1. \u96c6\u7fa4\u7ba1\u7406\u5458\u6216\u5f00\u53d1\u4eba\u5458\u9700\u8981\u63d0\u524d\u4e86\u89e3\u96c6\u7fa4\u60c5\u51b5\uff0c\u5e76\u6b63\u786e\u6807\u8bb0\u8282\u70b9\u4ee5\u6307\u793a\u5b83\u4eec\u5c06\u8fd0\u884c\u7684 GPU \u5de5\u4f5c\u8d1f\u8f7d\u7c7b\u578b\u3002
                      2. \u8fd0\u884c\u5177\u6709 GPU \u76f4\u901a\u6216 vGPU \u7684 GPU \u52a0\u901f VM \u7684\u5de5\u4f5c\u8282\u70b9\u88ab\u5047\u5b9a\u4e3a\u88f8\u673a\uff0c\u5982\u679c\u5de5\u4f5c\u8282\u70b9\u662f\u4e91\u4e3b\u673a\uff0c \u5219\u9700\u8981\u5728\u4e91\u4e3b\u673a\u5e73\u53f0\u4e0a\u542f\u7528 GPU \u76f4\u901a\u529f\u80fd\uff0c\u8bf7\u5411\u4e91\u4e3b\u673a\u5e73\u53f0\u63d0\u4f9b\u5546\u54a8\u8be2\u3002
                      3. \u4e0d\u652f\u6301 Nvidia MIG \u7684 vGPU\u3002
                      4. GPU Operator \u4e0d\u4f1a\u81ea\u52a8\u5728 VM \u4e2d\u5b89\u88c5 GPU \u9a71\u52a8\u7a0b\u5e8f\u3002
                      "},{"location":"admin/virtnest/gpu/vm-gpu.html#iommu","title":"\u542f\u7528 IOMMU","text":"

                      \u4e3a\u4e86\u542f\u7528GPU\u76f4\u901a\u529f\u80fd\uff0c\u96c6\u7fa4\u8282\u70b9\u9700\u8981\u5f00\u542fIOMMU\u3002\u8bf7\u53c2\u8003\u5982\u4f55\u5f00\u542f IOMMU\u3002 \u5982\u679c\u60a8\u7684\u96c6\u7fa4\u662f\u5728\u4e91\u4e3b\u673a\u4e0a\u8fd0\u884c\uff0c\u8bf7\u54a8\u8be2\u60a8\u7684\u4e91\u4e3b\u673a\u5e73\u53f0\u63d0\u4f9b\u5546\u3002

                      "},{"location":"admin/virtnest/gpu/vm-gpu.html#_2","title":"\u6807\u8bb0\u96c6\u7fa4\u8282\u70b9","text":"

                      \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \uff0c\u9009\u53d6\u60a8\u7684\u5de5\u4f5c\u96c6\u7fa4\uff0c\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \u7684\u64cd\u4f5c\u680f \u4fee\u6539\u6807\u7b7e \uff0c\u4e3a\u8282\u70b9\u6dfb\u52a0\u6807\u7b7e\uff0c\u6bcf\u4e2a\u8282\u70b9\u53ea\u80fd\u6709\u4e00\u79cd\u6807\u7b7e\u3002

                      \u60a8\u53ef\u4ee5\u4e3a\u6807\u7b7e\u5206\u914d\u4ee5\u4e0b\u503c\uff1acontainer\u3001vm-passthrough \u548c vm-vgpu\u3002

                      "},{"location":"admin/virtnest/gpu/vm-gpu.html#nvidia-operator","title":"\u5b89\u88c5 Nvidia Operator","text":"
                      1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \uff0c\u9009\u53d6\u60a8\u7684\u5de5\u4f5c\u96c6\u7fa4\uff0c\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u9009\u62e9\u5e76\u5b89\u88c5 gpu-operator\u3002 \u9700\u8981\u4fee\u6539\u4e00\u4e9b yaml \u4e2d\u7684\u76f8\u5173\u5b57\u6bb5\u3002

                        gpu-operator.sandboxWorkloads.enabled=true\ngpu-operator.vfioManager.enabled=true\ngpu-operator.sandboxDevicePlugin.enabled=true\ngpu-operator.sandboxDevicePlugin.version=v1.2.4   # (1)!\ngpu-operator.toolkit.version=v1.14.3-ubuntu20.04\n
                        1. version \u9700\u8981 >= v1.2.4
                      2. \u7b49\u5f85\u5b89\u88c5\u6210\u529f\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff1a

                      "},{"location":"admin/virtnest/gpu/vm-gpu.html#virtnest-agent-cr","title":"\u5b89\u88c5 virtnest-agent \u5e76\u914d\u7f6e CR","text":"
                      1. \u5b89\u88c5 virtnest-agent\uff0c\u53c2\u8003\u5b89\u88c5 virtnest-agent\u3002

                      2. \u5c06 vGPU \u548c GPU \u76f4\u901a\u52a0\u5165 Virtnest Kubevirt CR\uff0c\u4ee5\u4e0b\u793a\u4f8b\u662f\u6dfb\u52a0 vGPU \u548c GPU \u76f4\u901a\u540e\u7684 \u90e8\u5206\u5173\u952e yaml\uff1a

                        spec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    permittedHostDevices: # (1)!\n      mediatedDevices:            # (2)!\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com /GRID_P4-1Q\n      pciHostDevices:             # (3)!\n      - externalResourceProvider:  true\n        pciVendorSelector: 10DE:1BB3\n        resourceName: nvidia.com /GP104GL_TESLA_P4\n
                        1. \u4e0b\u9762\u662f\u9700\u8981\u586b\u5199\u7684\u4fe1\u606f
                        2. vGPU
                        3. GPU \u76f4\u901a
                      3. \u5728 kubevirt CR yaml \u4e2d\uff0cpermittedHostDevices \u7528\u4e8e\u5bfc\u5165 VM \u8bbe\u5907\uff0cvGPU \u9700\u5728\u5176\u4e2d\u6dfb\u52a0 mediatedDevices\uff0c\u5177\u4f53\u7ed3\u6784\u5982\u4e0b\uff1a

                        mediatedDevices:          \n- mdevNameSelector: GRID P4-1Q          # (1)!\n  resourceName: nvidia.com/GRID_P4-1Q   # (2)!\n
                        1. \u8bbe\u5907\u540d\u79f0
                        2. GPU Operator \u6ce8\u518c\u5230\u8282\u70b9\u7684 vGPU \u4fe1\u606f
                      4. GPU \u76f4\u901a\u9700\u8981\u5728 permittedHostDevices \u4e0b\u6dfb\u52a0 pciHostDevices\uff0c\u5177\u4f53\u7ed3\u6784\u5982\u4e0b\uff1a

                        pciHostDevices:           \n- externalResourceProvider: true            # (1)!\n  pciVendorSelector: 10DE:1BB3              # (2)!\n  resourceName: nvidia.com/GP104GL_TESLA_P4 # (3)!\n
                        1. \u9ed8\u8ba4\u4e0d\u8981\u66f4\u6539
                        2. \u5f53\u524d pci \u8bbe\u5907\u7684 vednor id
                        3. GPU Operator \u6ce8\u518c\u5230\u8282\u70b9\u7684 GPU \u4fe1\u606f
                      5. \u83b7\u53d6 vGPU \u4fe1\u606f\u793a\u4f8b\uff08\u4ec5\u9002\u7528\u4e8e vGPU\uff09\uff1a\u5728\u6807\u8bb0\u4e3a nvidia.com/gpu.workload.config=vm-gpu \u7684\u8282\u70b9\uff08\u4f8b\u5982 work-node-2\uff09\u4e0a\u67e5\u770b\u8282\u70b9\u4fe1\u606f\uff0c Capacity \u4e2d\u7684 nvidia.com/GRID_P4-1Q: 8 \u8868\u793a\u53ef\u7528 vGPU\uff1a

                        kubectl describe node work-node-2\n
                        Capacity:\n  cpu:                                 64\n  devices.kubevirt.io/kvm:             1k\n  devices.kubevirt.io/tun:             1k\n  devices.kubevirt.io/vhost-net:       1k\n  ephemeral-storage:                   102626232Ki\n  hugepages-1Gi:                       0\n  hugepages-2Mi:                       0\n  memory:                              264010840Ki\n  nvidia.com/GRID_P4-1Q :              8\n  pods:                                110\nAllocatable:\n  cpu:                                  64\n  devices.kubevirt.io/kvm:              1k\n  devices.kubevirt.io/tun:              1k\n  devices.kubevirt.io/vhost-net:        1k\n  ephemeral-storage:                    94580335255\n  hugepages-1Gi:                        0\n  hugepages-2Mi:                        0\n  memory:                               263908440Ki\n  nvidia.com/GRID_P4-1Q:                8\n  pods:                                 110\n

                        \u90a3\u4e48 mdevNameSelector \u5e94\u8be5\u662f \u201cGRID P4-1Q\u201d\uff0cresourceName \u5e94\u8be5\u662f \u201cGRID_P4-1Q\u201d

                      6. \u83b7\u53d6 GPU \u76f4\u901a\u4fe1\u606f\uff1a\u5728\u6807\u8bb0 nvidia.com/gpu.workload.config=vm-passthrough \u7684 node \u4e0a\uff08\u672c\u6587\u6863\u793a\u4f8b node \u4e3a work-node-1\uff09\uff0c \u67e5\u770b node \u4fe1\u606f\uff0cCapacity \u4e2d nvidia.com/GP104GL_TESLA_P4: 2 \u5c31\u662f\u53ef\u7528 vGPU\uff1a

                        kubectl describe node work-node-1\n
                        Capacity:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              102626232Ki\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         264010840Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\nAllocatable:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              94580335255\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         263908440Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\n

                        \u90a3\u4e48 resourceName \u5e94\u8be5\u662f \u201cGRID_P4-1Q\u201d, \u5982\u4f55\u83b7\u53d6 pciVendorSelector \u5462\uff1f\u901a\u8fc7 ssh \u767b\u5f55\u5230 work-node-1 \u76ee\u6807\u8282\u70b9\uff0c \u901a\u8fc7 lspci -nnk -d 10de: \u547d\u4ee4\u83b7\u53d6 Nvidia GPU PCI \u4fe1\u606f\uff0c\u5982\u4e0b\u6240\u793a\uff1a\u7ea2\u6846\u6240\u793a\u5373\u662f pciVendorSelector \u4fe1\u606f\u3002

                      7. \u7f16\u8f91 kubevirt CR \u63d0\u793a\uff1a\u5982\u679c\u540c\u4e00\u578b\u53f7 GPU \u6709\u591a\u4e2a\uff0c\u53ea\u9700\u5728 CR \u4e2d\u5199\u5165\u4e00\u4e2a\u5373\u53ef\uff0c\u65e0\u9700\u5217\u51fa\u6bcf\u4e2a GPU\u3002

                        kubectl -n virtnest-system edit kubevirt kubevirt\n
                        spec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    permittedHostDevices: # (1)!\n      mediatedDevices:                    # (2)!\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com/GRID_P4-1Q\n      pciHostDevices:                     # (3)!\n      - externalResourceProvider: true\n        pciVendorSelector: 10DE:1BB3\n        resourceName: nvidia.com/GP104GL_TESLA_P4 \n

                        1. \u4e0b\u9762\u662f\u9700\u8981\u586b\u5199\u7684\u4fe1\u606f
                        2. vGPU
                        3. GPU \u76f4\u901a\uff0c\u4e0a\u9762\u7684\u793a\u4f8b\u4e2d TEESLA P4 \u6709\u4e24\u4e2a GPU\uff0c\u8fd9\u91cc\u53ea\u9700\u8981\u6ce8\u518c\u4e00\u4e2a\u5373\u53ef
                      "},{"location":"admin/virtnest/gpu/vm-gpu.html#yaml-vm-gpu","title":"\u901a\u8fc7 YAML \u521b\u5efa VM \u5e76\u4f7f\u7528 GPU \u52a0\u901f","text":"

                      \u4e0e\u666e\u901a\u4e91\u4e3b\u673a\u552f\u4e00\u7684\u533a\u522b\u662f\u5728 devices \u4e2d\u6dfb\u52a0 GPU \u76f8\u5173\u4fe1\u606f\u3002

                      \u70b9\u51fb\u67e5\u770b\u5b8c\u6574 YAML
                      apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  name: testvm-gpu1\n  namespace: default\nspec:\n  dataVolumeTemplates:\n  - metadata:\n      creationTimestamp: null\n      name: systemdisk-testvm-gpu1\n      namespace: default\n    spec:\n      pvc:\n        accessModes:\n        - ReadWriteOnce\n        resources:\n          requests:\n            storage: 10Gi\n        storageClassName: www\n      source:\n        registry:\n          url: docker://release-ci.daocloud.io/virtnest/system-images/debian-12-x86_64:v1\nrunStrategy: Manual\ntemplate:\n    metadata:\n      creationTimestamp: null\n    spec:\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n          - bootOrder: 1\n            disk:\n              bus: virtio\n            name: systemdisk-testvm-gpu1\n          - disk:\n              bus: virtio\n            name: cloudinitdisk\n        gpus:\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n            name: gpu-0-0\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n          name: gpu-0-1\n        interfaces:\n        - masquerade: {}\n          name: default\n      machine:\n        type: q35\n      resources:\n        requests:\n          memory: 2Gi\n    networks:\n    - name: default\n      pod: {}\n    volumes:\n    - dataVolume:\n        name: systemdisk-testvm-gpu1\n      name: systemdisk-testvm-gpu1\n    - cloudInitNoCloud:\n        userDataBase64: I2Nsb3VkLWNvbmZpZwpzc2hfcHdhdXRoOiB0cnVlCmRpc2FibGVfcm9vdDogZmFsc2UKY2hwYXNzd2Q6IHsibGlzdCI6ICJyb290OmRhbmdlcm91cyIsIGV4cGlyZTogRmFsc2V9CgoKcnVuY21kOgogIC0gc2VkIC1pICIvI1w/UGVybWl0Um9vdExvZ2luL3MvXi4qJC9QZXJtaXRSb290TG9naW4geWVzL2ciIC9ldGMvc3NoL3NzaGRfY29uZmlnCiAgLSBzeXN0ZW1jdGwgcmVzdGFydCBzc2guc2VydmljZQ==\n        name: cloudinitdisk\n
                      "},{"location":"admin/virtnest/gpu/vm-vgpu.html","title":"\u4e91\u4e3b\u673a\u914d\u7f6e GPU\uff08vGPU\uff09","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u5728\u521b\u5efa\u4e91\u4e3b\u673a\u65f6\uff0c\u914d\u7f6e GPU \u7684\u524d\u63d0\u6761\u4ef6\u3002

                      \u914d\u7f6e\u4e91\u4e3b\u673a\u7684 GPU \u7684\u91cd\u70b9\u662f\u5bf9 GPU Operator \u8fdb\u884c\u914d\u7f6e\uff0c\u4ee5\u4fbf\u5728\u5de5\u4f5c\u8282\u70b9\u4e0a\u90e8\u7f72\u4e0d\u540c\u7684\u8f6f\u4ef6\u7ec4\u4ef6\uff0c \u5177\u4f53\u53d6\u51b3\u4e8e\u8fd9\u4e9b\u8282\u70b9\u4e0a\u914d\u7f6e\u8fd0\u884c\u7684 GPU \u5de5\u4f5c\u8d1f\u8f7d\u3002\u4ee5\u4e0b\u4e09\u4e2a\u8282\u70b9\u4e3a\u4f8b\uff1a

                      • controller-node-1 \u8282\u70b9\u914d\u7f6e\u4e3a\u8fd0\u884c\u5bb9\u5668\u3002
                      • work-node-1 \u8282\u70b9\u914d\u7f6e\u4e3a\u8fd0\u884c\u5177\u6709\u76f4\u901a GPU \u7684\u4e91\u4e3b\u673a\u3002
                      • work-node-2 \u8282\u70b9\u914d\u7f6e\u4e3a\u8fd0\u884c\u5177\u6709\u865a\u62df vGPU \u7684\u4e91\u4e3b\u673a\u3002
                      "},{"location":"admin/virtnest/gpu/vm-vgpu.html#_1","title":"\u5047\u8bbe\u3001\u9650\u5236\u548c\u4f9d\u8d56\u6027","text":"

                      \u5de5\u4f5c\u8282\u70b9\u53ef\u4ee5\u8fd0\u884c GPU \u52a0\u901f\u5bb9\u5668\uff0c\u4e5f\u53ef\u4ee5\u8fd0\u884c\u5177\u6709 GPU \u76f4\u901a\u7684 GPU \u52a0\u901f VM\uff0c\u6216\u8005\u5177\u6709 vGPU \u7684 GPU \u52a0\u901f VM\uff0c\u4f46\u4e0d\u80fd\u8fd0\u884c\u5176\u4e2d\u4efb\u4f55\u4e00\u4e2a\u7684\u7ec4\u5408\u3002

                      1. \u5de5\u4f5c\u8282\u70b9\u53ef\u4ee5\u5355\u72ec\u8fd0\u884c GPU \u52a0\u901f\u5bb9\u5668\u3001\u5177\u6709 GPU \u76f4\u901a\u7684 GPU \u52a0\u901f VM\uff0c\u6216\u8005\u5177\u6709 vGPU \u7684 GPU \u52a0\u901f VM\uff0c\u4e0d\u652f\u6301\u4efb\u4f55\u7ec4\u5408\u5f62\u5f0f\u3002
                      2. \u96c6\u7fa4\u7ba1\u7406\u5458\u6216\u5f00\u53d1\u4eba\u5458\u9700\u8981\u63d0\u524d\u4e86\u89e3\u96c6\u7fa4\u60c5\u51b5\uff0c\u5e76\u6b63\u786e\u6807\u8bb0\u8282\u70b9\u4ee5\u6307\u793a\u5b83\u4eec\u5c06\u8fd0\u884c\u7684 GPU \u5de5\u4f5c\u8d1f\u8f7d\u7c7b\u578b\u3002
                      3. \u8fd0\u884c\u5177\u6709 GPU \u76f4\u901a\u6216vGPU\u7684 GPU \u52a0\u901f VM\u7684\u5de5\u4f5c\u8282\u70b9\u88ab\u5047\u5b9a\u4e3a\u88f8\u673a\uff0c\u5982\u679c\u5de5\u4f5c\u8282\u70b9\u662f\u4e91\u4e3b\u673a\uff0c\u5219\u9700\u8981\u5728\u4e91\u4e3b\u673a\u5e73\u53f0\u4e0a\u542f\u7528GPU\u76f4\u901a\u529f\u80fd\uff0c\u8bf7\u5411\u4e91\u4e3b\u673a\u5e73\u53f0\u63d0\u4f9b\u5546\u54a8\u8be2\u3002
                      4. \u4e0d\u652f\u6301 Nvidia MIG \u7684 vGPU\u3002
                      5. GPU Operator \u4e0d\u4f1a\u81ea\u52a8\u5728 VM \u4e2d\u5b89\u88c5 GPU \u9a71\u52a8\u7a0b\u5e8f\u3002
                      "},{"location":"admin/virtnest/gpu/vm-vgpu.html#iommu","title":"\u542f\u7528 IOMMU","text":"

                      \u4e3a\u4e86\u542f\u7528GPU\u76f4\u901a\u529f\u80fd\uff0c\u96c6\u7fa4\u8282\u70b9\u9700\u8981\u5f00\u542fIOMMU\u3002\u8bf7\u53c2\u8003\u5982\u4f55\u5f00\u542fIOMMU\u3002 \u5982\u679c\u60a8\u7684\u96c6\u7fa4\u662f\u5728\u4e91\u4e3b\u673a\u4e0a\u8fd0\u884c\uff0c\u8bf7\u54a8\u8be2\u60a8\u7684\u4e91\u4e3b\u673a\u5e73\u53f0\u63d0\u4f9b\u5546\u3002

                      "},{"location":"admin/virtnest/gpu/vm-vgpu.html#vgpu-manager","title":"\u6784\u5efa vGPU Manager \u955c\u50cf","text":"

                      \u6ce8\u610f\uff1a\u4ec5\u5f53\u4f7f\u7528 NVIDIA vGPU \u65f6\u624d\u9700\u8981\u6784\u5efa vGPU Manager \u955c\u50cf\u3002\u5982\u679c\u60a8\u8ba1\u5212\u4ec5\u4f7f\u7528 GPU \u76f4\u901a\uff0c\u8bf7\u8df3\u8fc7\u6b64\u90e8\u5206\u3002

                      \u4ee5\u4e0b\u662f\u6784\u5efa vGPU Manager \u955c\u50cf\u5e76\u5c06\u5176\u63a8\u9001\u5230\u955c\u50cf\u4ed3\u5e93\u4e2d\u7684\u6b65\u9aa4\uff1a

                      1. \u4ece NVIDIA Licensing Portal \u4e0b\u8f7d vGPU \u8f6f\u4ef6\u3002

                        • \u767b\u5f55 NVIDIA Licensing Portal\uff0c\u8f6c\u5230 Software Downloads \u9875\u9762\u3002
                        • NVIDIA vGPU \u8f6f\u4ef6\u4f4d\u4e8e Software Downloads \u9875\u9762\u7684 Driver downloads \u9009\u9879\u5361\u4e2d\u3002
                        • \u5728\u7b5b\u9009\u6761\u4ef6\u4e2d\u9009\u62e9 VGPU + Linux \uff0c\u70b9\u51fb \u4e0b\u8f7d \u4ee5\u83b7\u53d6 Linux KVM \u7684\u8f6f\u4ef6\u5305\u3002 \u8bf7\u89e3\u538b\u4e0b\u8f7d\u7684\u6587\u4ef6\uff08NVIDIA-Linux-x86_64-<version>-vgpu-kvm.run\uff09\u3002

                      2. \u6253\u5f00\u7ec8\u7aef\u514b\u9686 container-images/driver \u4ed3\u5e93

                        git clone https://gitlab.com/nvidia/container-images/driver cd driver\n
                      3. \u5207\u6362\u5230\u60a8\u7684\u64cd\u4f5c\u7cfb\u7edf\u5bf9\u5e94 vgpu-manager \u76ee\u5f55

                        cd vgpu-manager/<your-os>\n
                      4. \u5c06\u6b65\u9aa4 1 \u4e2d\u63d0\u53d6\u7684 .run \u6587\u4ef6 copy \u5230\u5f53\u524d\u76ee\u5f55

                        cp <local-driver-download-directory>/*-vgpu-kvm.run ./\n
                      5. \u8bbe\u7f6e\u73af\u5883\u53d8\u91cf

                        • PRIVATE_REGISTRY\uff1a\u4e13\u7528\u6ce8\u518c\u8868\u7684\u540d\u79f0\uff0c\u7528\u4e8e\u5b58\u50a8\u9a71\u52a8\u7a0b\u5e8f\u6620\u50cf\u3002
                        • VERSION\uff1aNVIDIA vGPU\u7ba1\u7406\u5668\u7684\u7248\u672c\uff0c\u4eceNVIDIA\u8f6f\u4ef6\u95e8\u6237\u4e0b\u8f7d\u3002
                        • OS_TAG\uff1a\u5fc5\u987b\u4e0e\u96c6\u7fa4\u8282\u70b9\u64cd\u4f5c\u7cfb\u7edf\u7248\u672c\u5339\u914d\u3002
                        • CUDA_VERSION\uff1a\u7528\u4e8e\u6784\u5efa\u9a71\u52a8\u7a0b\u5e8f\u6620\u50cf\u7684CUDA\u57fa\u672c\u6620\u50cf\u7248\u672c\u3002
                        export PRIVATE_REGISTRY=my/private/registry VERSION=510.73.06 OS_TAG=ubuntu22.04 CUDA_VERSION=12.2.0\n
                      6. \u6784\u5efa NVIDIA vGPU Manager Image

                        docker build \\\n  --build-arg DRIVER_VERSION=${VERSION} \\\n  --build-arg CUDA_VERSION=${CUDA_VERSION} \\\n  -t ${PRIVATE_REGISTRY}/vgpu-manager:${VERSION}-${OS_TAG} .\n
                      7. \u5c06 NVIDIA vGPU Manager \u6620\u50cf\u63a8\u9001\u5230\u60a8\u7684\u955c\u50cf\u4ed3\u5e93

                        docker push ${PRIVATE_REGISTRY}/vgpu-manager:${VERSION}-${OS_TAG}\n
                      "},{"location":"admin/virtnest/gpu/vm-vgpu.html#_2","title":"\u6807\u8bb0\u96c6\u7fa4\u8282\u70b9","text":"

                      \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \uff0c\u9009\u53d6\u60a8\u7684\u5de5\u4f5c\u96c6\u7fa4\uff0c\u7136\u540e\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 , \u8fdb\u5165\u5217\u8868\u9875\u9762\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u9009\u62e9 \u4fee\u6539\u6807\u7b7e \uff0c\u4e3a\u8282\u70b9\u6dfb\u52a0\u6807\u7b7e\uff0c\u6bcf\u4e2a\u8282\u70b9\u53ea\u80fd\u6709\u4e00\u79cd\u6807\u7b7e\u3002

                      \u60a8\u53ef\u4ee5\u4e3a\u6807\u7b7e\u5206\u914d\u4ee5\u4e0b\u503c\uff1acontainer\u3001vm-passthrough \u548c vm-vgpu\u3002

                      "},{"location":"admin/virtnest/gpu/vm-vgpu.html#nvidia-operator","title":"\u5b89\u88c5 Nvidia Operator","text":"
                      1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \uff0c\u9009\u53d6\u60a8\u7684\u5de5\u4f5c\u96c6\u7fa4\uff0c\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u9009\u62e9\u5e76\u5b89\u88c5 gpu-operator\u3002\u9700\u8981\u4fee\u6539\u4e00\u4e9b yaml \u4e2d\u7684\u76f8\u5173\u5b57\u6bb5\u3002

                        gpu-operator.sandboxWorkloads.enabled=true\ngpu-operator.vgpuManager.enabled=true\ngpu-operator.vgpuManager.repository=<your-register-url>      # (1)!\ngpu-operator.vgpuManager.image=vgpu-manager\ngpu-operator.vgpuManager.version=<your-vgpu-manager-version> # (2)!\ngpu-operator.vgpuDeviceManager.enabled=true\n
                        1. \u201c\u6784\u5efa vGPU Manager \u955c\u50cf\u201d \u6b65\u9aa4\u4e2d\u7684\u955c\u50cf\u4ed3\u5e93\u5730\u5740
                        2. \u201c\u6784\u5efa vGPU Manager \u955c\u50cf\u201d \u6b65\u9aa4\u4e2d\u7684 VERSION
                      2. \u7b49\u5f85\u5b89\u88c5\u6210\u529f\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff1a

                      "},{"location":"admin/virtnest/gpu/vm-vgpu.html#virtnest-agent-cr","title":"\u5b89\u88c5 virtnest-agent \u5e76\u914d\u7f6e CR","text":"
                      1. \u5b89\u88c5 virtnest-agent\uff0c\u53c2\u8003\u5b89\u88c5 virtnest-agent\u3002

                      2. \u5c06 vGPU \u548c GPU \u76f4\u901a\u52a0\u5165 Virtnest Kubevirt CR\uff0c\u4ee5\u4e0b\u793a\u4f8b\u662f\u6dfb\u52a0 vGPU \u548c GPU \u76f4\u901a\u540e\u7684 \u90e8\u5206\u5173\u952e yaml\uff1a

                        spec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    permittedHostDevices: # (1)!\n      mediatedDevices:            # (2)!\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com /GRID_P4-1Q\n      pciHostDevices:             # (3)!\n      - externalResourceProvider:  true\n        pciVendorSelector: 10DE:1BB3\n        resourceName: nvidia.com /GP104GL_TESLA_P4\n
                        1. \u4e0b\u9762\u662f\u9700\u8981\u586b\u5199\u7684\u4fe1\u606f
                        2. vGPU
                        3. GPU \u76f4\u901a
                      3. \u5728 kubevirt CR yaml \u4e2d\uff0cpermittedHostDevices \u7528\u4e8e\u5bfc\u5165 VM \u8bbe\u5907\uff0cvGPU \u9700\u5728\u5176\u4e2d\u6dfb\u52a0 mediatedDevices\uff0c\u5177\u4f53\u7ed3\u6784\u5982\u4e0b\uff1a

                        mediatedDevices:          \n- mdevNameSelector: GRID P4-1Q          # (1)!\n  resourceName: nvidia.com/GRID_P4-1Q   # (2)!\n
                        1. \u8bbe\u5907\u540d\u79f0
                        2. GPU Operator \u6ce8\u518c\u5230\u8282\u70b9\u7684 vGPU \u4fe1\u606f
                      4. GPU \u76f4\u901a\u9700\u8981\u5728 permittedHostDevices \u4e0b\u6dfb\u52a0 pciHostDevices\uff0c\u5177\u4f53\u7ed3\u6784\u5982\u4e0b\uff1a

                        pciHostDevices:           \n- externalResourceProvider: true            # (1)!\n  pciVendorSelector: 10DE:1BB3              # (2)!\n  resourceName: nvidia.com/GP104GL_TESLA_P4 # (3)!\n
                        1. \u9ed8\u8ba4\u4e0d\u8981\u66f4\u6539
                        2. \u5f53\u524d pci \u8bbe\u5907\u7684 vednor id
                        3. GPU Operator \u6ce8\u518c\u5230\u8282\u70b9\u7684 GPU \u4fe1\u606f
                      5. \u83b7\u53d6 vGPU \u4fe1\u606f\u793a\u4f8b\uff08\u4ec5\u9002\u7528\u4e8e vGPU\uff09\uff1a\u5728\u6807\u8bb0\u4e3a nvidia.com/gpu.workload.config=vm-gpu \u7684\u8282\u70b9\uff08\u4f8b\u5982 work-node-2\uff09\u4e0a\u67e5\u770b\u8282\u70b9\u4fe1\u606f\uff0c Capacity \u4e2d\u7684 nvidia.com/GRID_P4-1Q: 8 \u8868\u793a\u53ef\u7528 vGPU\uff1a

                        kubectl describe node work-node-2\n
                        Capacity:\n  cpu:                                 64\n  devices.kubevirt.io/kvm:             1k\n  devices.kubevirt.io/tun:             1k\n  devices.kubevirt.io/vhost-net:       1k\n  ephemeral-storage:                   102626232Ki\n  hugepages-1Gi:                       0\n  hugepages-2Mi:                       0\n  memory:                              264010840Ki\n  nvidia.com/GRID_P4-1Q :              8\n  pods:                                110\nAllocatable:\n  cpu:                                  64\n  devices.kubevirt.io/kvm:              1k\n  devices.kubevirt.io/tun:              1k\n  devices.kubevirt.io/vhost-net:        1k\n  ephemeral-storage:                    94580335255\n  hugepages-1Gi:                        0\n  hugepages-2Mi:                        0\n  memory:                               263908440Ki\n  nvidia.com/GRID_P4-1Q:                8\n  pods:                                 110\n

                        \u90a3\u4e48 mdevNameSelector \u5e94\u8be5\u662f \u201cGRID P4-1Q\u201d\uff0cresourceName \u5e94\u8be5\u662f \u201cGRID_P4-1Q\u201d

                      6. \u83b7\u53d6 GPU \u76f4\u901a\u4fe1\u606f\uff1a\u5728\u6807\u8bb0 nvidia.com/gpu.workload.config=vm-passthrough \u7684 node \u4e0a\uff08\u672c\u6587\u6863\u793a\u4f8b node \u4e3a work-node-1\uff09\uff0c \u67e5\u770b node \u4fe1\u606f\uff0cCapacity \u4e2d nvidia.com/GP104GL_TESLA_P4: 2 \u5c31\u662f\u53ef\u7528 vGPU\uff1a

                        kubectl describe node work-node-1\n
                        Capacity:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              102626232Ki\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         264010840Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\nAllocatable:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              94580335255\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         263908440Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\n

                        \u90a3\u4e48 resourceName \u5e94\u8be5\u662f \u201cGRID_P4-1Q\u201d, \u5982\u4f55\u83b7\u53d6 pciVendorSelector \u5462\uff1f\u901a\u8fc7 ssh \u767b\u5f55\u5230 work-node-1 \u76ee\u6807\u8282\u70b9\uff0c \u901a\u8fc7 lspci -nnk -d 10de: \u547d\u4ee4\u83b7\u53d6 Nvidia GPU PCI \u4fe1\u606f\uff0c\u5982\u4e0b\u6240\u793a\uff1a\u7ea2\u6846\u6240\u793a\u5373\u662f pciVendorSelector \u4fe1\u606f\u3002

                      7. \u7f16\u8f91 kubevirt CR \u63d0\u793a\uff1a\u5982\u679c\u540c\u4e00\u578b\u53f7 GPU \u6709\u591a\u4e2a\uff0c\u53ea\u9700\u5728 CR \u4e2d\u5199\u5165\u4e00\u4e2a\u5373\u53ef\uff0c\u65e0\u9700\u5217\u51fa\u6bcf\u4e2a GPU\u3002

                        kubectl -n virtnest-system edit kubevirt kubevirt\n
                        spec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    permittedHostDevices: # (1)!\n      mediatedDevices:                    # (2)!\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com/GRID_P4-1Q\n      pciHostDevices:                       # (3)!\n      - externalResourceProvider: true\n        pciVendorSelector: 10DE:1BB3\n        resourceName: nvidia.com/GP104GL_TESLA_P4 \n

                        1. \u4e0b\u9762\u662f\u9700\u8981\u586b\u5199\u7684\u4fe1\u606f
                        2. vGPU
                        3. GPU \u76f4\u901a\uff0c\u4e0a\u9762\u7684\u793a\u4f8b\u4e2d TEESLA P4 \u6709\u4e24\u4e2a GPU\uff0c\u8fd9\u91cc\u53ea\u9700\u8981\u6ce8\u518c\u4e00\u4e2a\u5373\u53ef
                      "},{"location":"admin/virtnest/gpu/vm-vgpu.html#yaml-vm-gpu","title":"\u901a\u8fc7 YAML \u521b\u5efa VM \u5e76\u4f7f\u7528 GPU \u52a0\u901f","text":"

                      \u4e0e\u666e\u901a\u4e91\u4e3b\u673a\u552f\u4e00\u7684\u533a\u522b\u662f\u5728 devices \u4e2d\u6dfb\u52a0 gpu \u76f8\u5173\u4fe1\u606f\u3002

                      \u70b9\u51fb\u67e5\u770b\u5b8c\u6574 YAML
                      apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  name: testvm-gpu1\n  namespace: default\nspec:\n  dataVolumeTemplates:\n  - metadata:\n      creationTimestamp: null\n      name: systemdisk-testvm-gpu1\n      namespace: default\n    spec:\n      pvc:\n        accessModes:\n        - ReadWriteOnce\n        resources:\n          requests:\n            storage: 10Gi\n        storageClassName: www\n      source:\n        registry:\n          url: docker://release-ci.daocloud.io/virtnest/system-images/debian-12-x86_64:v1\nrunStrategy: Manual\ntemplate:\n    metadata:\n      creationTimestamp: null\n    spec:\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n          - bootOrder: 1\n            disk:\n              bus: virtio\n            name: systemdisk-testvm-gpu1\n          - disk:\n              bus: virtio\n            name: cloudinitdisk\n        gpus:\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n            name: gpu-0-0\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n          name: gpu-0-1\n        interfaces:\n        - masquerade: {}\n          name: default\n      machine:\n        type: q35\n      resources:\n        requests:\n          memory: 2Gi\n    networks:\n    - name: default\n      pod: {}\n    volumes:\n    - dataVolume:\n        name: systemdisk-testvm-gpu1\n      name: systemdisk-testvm-gpu1\n    - cloudInitNoCloud:\n        userDataBase64: I2Nsb3VkLWNvbmZpZwpzc2hfcHdhdXRoOiB0cnVlCmRpc2FibGVfcm9vdDogZmFsc2UKY2hwYXNzd2Q6IHsibGlzdCI6ICJyb290OmRhbmdlcm91cyIsIGV4cGlyZTogRmFsc2V9CgoKcnVuY21kOgogIC0gc2VkIC1pICIvI1w/UGVybWl0Um9vdExvZ2luL3MvXi4qJC9QZXJtaXRSb290TG9naW4geWVzL2ciIC9ldGMvc3NoL3NzaGRfY29uZmlnCiAgLSBzeXN0ZW1jdGwgcmVzdGFydCBzc2guc2VydmljZQ==\n        name: cloudinitdisk\n
                      "},{"location":"admin/virtnest/install/index.html","title":"\u5b89\u88c5\u4e91\u4e3b\u673a\u6a21\u5757","text":"

                      \u672c\u9875\u8bf4\u660e\u5982\u4f55\u5b89\u88c5\u4e91\u4e3b\u673a\u6a21\u5757\u3002

                      Info

                      \u4e0b\u8ff0\u547d\u4ee4\u6216\u811a\u672c\u5185\u51fa\u73b0\u7684 virtnest \u5b57\u6837\u662f\u4e91\u4e3b\u673a\u6a21\u5757\u7684\u5185\u90e8\u5f00\u53d1\u4ee3\u53f7\u3002

                      "},{"location":"admin/virtnest/install/index.html#virtnest-helm","title":"\u914d\u7f6e virtnest helm \u4ed3\u5e93","text":"

                      helm-charts \u4ed3\u5e93\u5730\u5740\uff1ahttps://release.daocloud.io/harbor/projects/10/helm-charts/virtnest/versions

                      helm repo add virtnest-release https://release.daocloud.io/chartrepo/virtnest\nhelm repo update virtnest-release\n

                      \u5982\u679c\u60a8\u60f3\u4f53\u9a8c\u6700\u65b0\u5f00\u53d1\u7248\u7684 virtnest\uff0c\u90a3\u4e48\u8bf7\u6dfb\u52a0\u5982\u4e0b\u4ed3\u5e93\u5730\u5740\uff08\u5f00\u53d1\u7248\u672c\u7684 virtnest \u6781\u5176\u4e0d\u7a33\u5b9a\uff09

                      helm repo add virtnest-release-ci https://release-ci.daocloud.io/chartrepo/virtnest\nhelm repo update virtnest-release-ci\n
                      "},{"location":"admin/virtnest/install/index.html#virtnest","title":"\u9009\u62e9\u60a8\u60f3\u5b89\u88c5\u7684 virtnest \u7248\u672c","text":"

                      \u5efa\u8bae\u5b89\u88c5\u6700\u65b0\u7248\u672c\u3002

                      [root@master ~]# helm search repo virtnest-release/virtnest --versions\nNAME                   CHART VERSION  APP VERSION  DESCRIPTION\nvirtnest-release/virtnest  0.6.0          v0.6.0       A Helm chart for virtnest\n
                      "},{"location":"admin/virtnest/install/index.html#namespace","title":"\u521b\u5efa namespace","text":"
                      kubectl create namespace virtnest-system\n
                      "},{"location":"admin/virtnest/install/index.html#_2","title":"\u6267\u884c\u5b89\u88c5\u6b65\u9aa4","text":"
                      helm install virtnest virtnest-release/virtnest -n virtnest-system --version 0.6.0\n
                      "},{"location":"admin/virtnest/install/index.html#_3","title":"\u5347\u7ea7","text":""},{"location":"admin/virtnest/install/index.html#virtnest-helm_1","title":"\u66f4\u65b0 virtnest helm \u4ed3\u5e93","text":"
                      helm repo update virtnest-release\n
                      "},{"location":"admin/virtnest/install/index.html#-set","title":"\u5907\u4efd --set \u53c2\u6570","text":"

                      \u5728\u5347\u7ea7 virtnest \u7248\u672c\u4e4b\u524d\uff0c\u6211\u4eec\u5efa\u8bae\u60a8\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5907\u4efd\u4e0a\u4e00\u4e2a\u7248\u672c\u7684 --set \u53c2\u6570

                      helm get values virtnest -n virtnest-system -o yaml > bak.yaml\n
                      "},{"location":"admin/virtnest/install/index.html#helm-upgrade","title":"\u6267\u884c helm upgrade","text":"
                      helm upgrade virtnest virtnest-release/virtnest \\\n    -n virtnest-system \\\n    -f ./bak.yaml \\\n    --version 0.6.0\n
                      "},{"location":"admin/virtnest/install/index.html#_4","title":"\u5378\u8f7d","text":"
                      helm delete virtnest -n virtnest-system\n
                      "},{"location":"admin/virtnest/install/install-dependency.html","title":"\u5b89\u88c5\u4f9d\u8d56\u548c\u524d\u63d0\u6761\u4ef6","text":"

                      \u672c\u9875\u8bf4\u660e\u5b89\u88c5\u4e91\u4e3b\u673a\u6a21\u5757\u7684\u4f9d\u8d56\u548c\u524d\u63d0\u6761\u4ef6\u3002

                      Info

                      \u4e0b\u8ff0\u547d\u4ee4\u6216\u811a\u672c\u5185\u51fa\u73b0\u7684 virtnest \u5b57\u6837\u662f\u5168\u5c40\u7ba1\u7406\u6a21\u5757\u7684\u5185\u90e8\u5f00\u53d1\u4ee3\u53f7\u3002

                      "},{"location":"admin/virtnest/install/install-dependency.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":""},{"location":"admin/virtnest/install/install-dependency.html#411","title":"\u64cd\u4f5c\u7cfb\u7edf\u5185\u6838\u7248\u672c\u9700\u8981\u5728 4.11 \u4ee5\u4e0a","text":"

                      \u76ee\u6807\u96c6\u7fa4\u6240\u6709\u8282\u70b9\u7684\u64cd\u4f5c\u7cfb\u7edf\u5185\u6838\u7248\u672c\u9700\u8981\u5927\u4e8e 4.11\uff08\u8be6\u89c1 kubevirt issue\uff09\u3002 \u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b\u5185\u6838\u7248\u672c\uff1a

                      uname -a\n

                      \u793a\u4f8b\u8f93\u51fa\uff1a

                      Linux master 6.5.3-1.el7.elrepo.x86_64 #1 SMP PREEMPT_DYNAMIC Wed Sep 13 11:46:28 EDT 2023 x86_64 x86_64 x86_64 GNU/Linux\n
                      "},{"location":"admin/virtnest/install/install-dependency.html#cpu-x86-64-v2","title":"CPU \u9700\u652f\u6301 x86-64-v2 \u53ca\u4ee5\u4e0a\u7684\u6307\u4ee4\u96c6","text":"

                      \u4f7f\u7528\u4ee5\u4e0b\u811a\u672c\u68c0\u67e5\u5f53\u524d\u8282\u70b9\u7684 CPU \u662f\u5426\u652f\u6301\uff1a

                      Note

                      \u82e5\u51fa\u73b0\u4e0e\u8f93\u51fa\u4fe1\u606f\u65e0\u5173\u7684\u62a5\u9519\uff08\u5982\u4e0b\u6240\u793a\uff09\uff0c\u53ef\u65e0\u9700\u5173\u6ce8\uff0c\u4e0d\u5f71\u54cd\u6700\u7ec8\u7ed3\u679c\u3002

                      \u793a\u4f8b
                      $ sh detect-cpu.sh\ndetect-cpu.sh: line 3: fpu: command not found\n
                      cat <<EOF > detect-cpu.sh\n#!/bin/sh -eu\n\nflags=$(cat /proc/cpuinfo | grep flags | head -n 1 | cut -d: -f2)\n\nsupports_v2='awk \"/cx16/&&/lahf/&&/popcnt/&&/sse4_1/&&/sse4_2/&&/ssse3/ {found=1} END {exit !found}\"'\nsupports_v3='awk \"/avx/&&/avx2/&&/bmi1/&&/bmi2/&&/f16c/&&/fma/&&/abm/&&/movbe/&&/xsave/ {found=1} END {exit !found}\"'\nsupports_v4='awk \"/avx512f/&&/avx512bw/&&/avx512cd/&&/avx512dq/&&/avx512vl/ {found=1} END {exit !found}\"'\n\necho \"$flags\" | eval $supports_v2 || exit 2 && echo \"CPU supports x86-64-v2\"\necho \"$flags\" | eval $supports_v3 || exit 3 && echo \"CPU supports x86-64-v3\"\necho \"$flags\" | eval $supports_v4 || exit 4 && echo \"CPU supports x86-64-v4\"\nEOF\nchmod +x detect-cpu.sh\nsh detect-cpu.sh\n
                      "},{"location":"admin/virtnest/install/install-dependency.html#_3","title":"\u6240\u6709\u8282\u70b9\u5fc5\u987b\u542f\u7528\u786c\u4ef6\u865a\u62df\u5316\uff08\u5d4c\u5957\u865a\u62df\u5316\uff09","text":"
                      • \u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u68c0\u67e5\uff1a

                        virt-host-validate qemu\n
                        # \u6210\u529f\u7684\u60c5\u51b5\nQEMU: Checking for hardware virtualization                                 : PASS\nQEMU: Checking if device /dev/kvm exists                                   : PASS\nQEMU: Checking if device /dev/kvm is accessible                            : PASS\nQEMU: Checking if device /dev/vhost-net exists                             : PASS\nQEMU: Checking if device /dev/net/tun exists                               : PASS\nQEMU: Checking for cgroup 'cpu' controller support                         : PASS\nQEMU: Checking for cgroup 'cpuacct' controller support                     : PASS\nQEMU: Checking for cgroup 'cpuset' controller support                      : PASS\nQEMU: Checking for cgroup 'memory' controller support                      : PASS\nQEMU: Checking for cgroup 'devices' controller support                     : PASS\nQEMU: Checking for cgroup 'blkio' controller support                       : PASS\nQEMU: Checking for device assignment IOMMU support                         : PASS\nQEMU: Checking if IOMMU is enabled by kernel                               : PASS\nQEMU: Checking for secure guest support                                    : WARN (Unknown if this platform has Secure Guest support)\n\n# \u5931\u8d25\u7684\u60c5\u51b5\nQEMU: Checking for hardware virtualization                                 : FAIL (Only emulated CPUs are available, performance will be significantly limited)\nQEMU: Checking if device /dev/vhost-net exists                             : PASS\nQEMU: Checking if device /dev/net/tun exists                               : PASS\nQEMU: Checking for cgroup 'memory' controller support                      : PASS\nQEMU: Checking for cgroup 'memory' controller mount-point                  : PASS\nQEMU: Checking for cgroup 'cpu' controller support                         : PASS\nQEMU: Checking for cgroup 'cpu' controller mount-point                     : PASS\nQEMU: Checking for cgroup 'cpuacct' controller support                     : PASS\nQEMU: Checking for cgroup 'cpuacct' controller mount-point                 : PASS\nQEMU: Checking for cgroup 'cpuset' controller support                      : PASS\nQEMU: Checking for cgroup 'cpuset' controller mount-point                  : PASS\nQEMU: Checking for cgroup 'devices' controller support                     : PASS\nQEMU: Checking for cgroup 'devices' controller mount-point                 : PASS\nQEMU: Checking for cgroup 'blkio' controller support                       : PASS\nQEMU: Checking for cgroup 'blkio' controller mount-point                   : PASS\nWARN (Unknown if this platform has IOMMU support)\n
                      • \u5b89\u88c5 virt-host-validate\uff1a

                        \u5728 CentOS \u4e0a\u5b89\u88c5\u5728 Ubuntu \u4e0a\u5b89\u88c5
                        yum install -y qemu-kvm libvirt virt-install bridge-utils\n
                        apt install qemu-kvm libvirt-daemon-system libvirt-clients bridge-utils\n
                      • \u786c\u4ef6\u865a\u62df\u5316\u542f\u7528\u65b9\u6cd5\uff1a

                        \u4e0d\u540c\u5e73\u53f0\u542f\u7528\u786c\u4ef6\u865a\u62df\u5316\u7684\u65b9\u6cd5\u4e5f\u4e0d\u4e00\u6837\uff0c\u4ee5 vsphere \u4e3a\u4f8b\uff0c \u65b9\u6cd5\u8bf7\u53c2\u7167 vmware \u5b98\u7f51\u6587\u6863\u3002

                      "},{"location":"admin/virtnest/install/install-dependency.html#docker-engine","title":"\u5982\u679c\u4f7f\u7528 Docker Engine \u4f5c\u4e3a\u5bb9\u5668\u8fd0\u884c\u65f6","text":"

                      \u5982\u679c\u96c6\u7fa4\u4f7f\u7528 Docker Engine \u4f5c\u4e3a\u5bb9\u5668\u8fd0\u884c\u65f6\uff0c\u5219 Docker Engine \u7248\u672c\u9700\u8981\u5927\u4e8e 20.10.10

                      "},{"location":"admin/virtnest/install/install-dependency.html#iommu","title":"\u5efa\u8bae\u5f00\u542f IOMMU","text":"

                      \u4e3a\u4e86\u540e\u7eed\u529f\u80fd\u505a\u51c6\u5907\uff0c\u5efa\u8bae\u5f00\u542f IOMMU\u3002

                      "},{"location":"admin/virtnest/install/offline-install.html","title":"\u79bb\u7ebf\u5347\u7ea7","text":"

                      \u672c\u9875\u8bf4\u660e\u4ece\u4e0b\u8f7d\u4e2d\u5fc3\u4e0b\u8f7d\u4e91\u4e3b\u673a\u6a21\u5757\u540e\uff0c\u5e94\u8be5\u5982\u4f55\u5b89\u88c5\u6216\u5347\u7ea7\u3002

                      Info

                      \u4e0b\u8ff0\u547d\u4ee4\u6216\u811a\u672c\u5185\u51fa\u73b0\u7684 virtnest \u5b57\u6837\u662f\u4e91\u4e3b\u673a\u6a21\u5757\u7684\u5185\u90e8\u5f00\u53d1\u4ee3\u53f7\u3002

                      "},{"location":"admin/virtnest/install/offline-install.html#_2","title":"\u4ece\u5b89\u88c5\u5305\u4e2d\u52a0\u8f7d\u955c\u50cf","text":"

                      \u60a8\u53ef\u4ee5\u6839\u636e\u4e0b\u9762\u4e24\u79cd\u65b9\u5f0f\u4e4b\u4e00\u52a0\u8f7d\u955c\u50cf\uff0c\u5f53\u73af\u5883\u4e2d\u5b58\u5728\u955c\u50cf\u4ed3\u5e93\u65f6\uff0c\u5efa\u8bae\u9009\u62e9chart-syncer\u540c\u6b65\u955c\u50cf\u5230\u955c\u50cf\u4ed3\u5e93\uff0c\u8be5\u65b9\u6cd5\u66f4\u52a0\u9ad8\u6548\u4fbf\u6377\u3002

                      "},{"location":"admin/virtnest/install/offline-install.html#chart-syncer","title":"chart-syncer \u540c\u6b65\u955c\u50cf\u5230\u955c\u50cf\u4ed3\u5e93","text":"
                      1. \u521b\u5efa load-image.yaml

                        Note

                        \u8be5 YAML \u6587\u4ef6\u4e2d\u7684\u5404\u9879\u53c2\u6570\u5747\u4e3a\u5fc5\u586b\u9879\u3002\u60a8\u9700\u8981\u4e00\u4e2a\u79c1\u6709\u7684\u955c\u50cf\u4ed3\u5e93\uff0c\u5e76\u4fee\u6539\u76f8\u5173\u914d\u7f6e\u3002

                        \u5df2\u5b89\u88c5 chart repo\u672a\u5b89\u88c5 chart repo

                        \u82e5\u5f53\u524d\u73af\u5883\u5df2\u5b89\u88c5 chart repo\uff0cchart-syncer \u4e5f\u652f\u6301\u5c06 chart \u5bfc\u51fa\u4e3a tgz \u6587\u4ef6\u3002

                        load-image.yaml
                        source:\n  intermediateBundlesPath: virtnest-offline # \u5230\u6267\u884c charts-syncer \u547d\u4ee4\u7684\u76f8\u5bf9\u8def\u5f84\uff0c\u800c\u4e0d\u662f\u6b64 YAML \u6587\u4ef6\u548c\u79bb\u7ebf\u5305\u4e4b\u95f4\u7684\u76f8\u5bf9\u8def\u5f84\ntarget:\n  containerRegistry: 10.16.10.111 # \u9700\u66f4\u6539\u4e3a\u4f60\u7684\u955c\u50cf\u4ed3\u5e93 url\n  containerRepository: release.daocloud.io/virtnest # \u9700\u66f4\u6539\u4e3a\u4f60\u7684\u955c\u50cf\u4ed3\u5e93\n  repo:\n    kind: HARBOR # \u4e5f\u53ef\u4ee5\u662f\u4efb\u4f55\u5176\u4ed6\u652f\u6301\u7684 Helm Chart \u4ed3\u5e93\u7c7b\u522b\n    url: http://10.16.10.111/chartrepo/release.daocloud.io # \u9700\u66f4\u6539\u4e3a chart repo url\n    auth:\n      username: \"admin\" # \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u7528\u6237\u540d\n      password: \"Harbor12345\" # \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u5bc6\u7801\n  containers:\n    auth:\n      username: \"admin\" # \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u7528\u6237\u540d\n      password: \"Harbor12345\" # \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u5bc6\u7801\n

                        \u82e5\u5f53\u524d\u73af\u5883\u672a\u5b89\u88c5 chart repo\uff0cchart-syncer \u4e5f\u652f\u6301\u5c06 chart \u5bfc\u51fa\u4e3a tgz \u6587\u4ef6\uff0c\u5e76\u5b58\u653e\u5728\u6307\u5b9a\u8def\u5f84\u3002

                        load-image.yaml
                        source:\n  intermediateBundlesPath: virtnest-offline # \u5230\u6267\u884c charts-syncer \u547d\u4ee4\u7684\u76f8\u5bf9\u8def\u5f84\uff0c\u800c\u4e0d\u662f\u6b64 YAML \u6587\u4ef6\u548c\u79bb\u7ebf\u5305\u4e4b\u95f4\u7684\u76f8\u5bf9\u8def\u5f84\ntarget:\n  containerRegistry: 10.16.10.111 # \u9700\u66f4\u6539\u4e3a\u4f60\u7684\u955c\u50cf\u4ed3\u5e93 url\n  containerRepository: release.daocloud.io/virtnest # \u9700\u66f4\u6539\u4e3a\u4f60\u7684\u955c\u50cf\u4ed3\u5e93\n  repo:\n    kind: LOCAL\n    path: ./local-repo # chart \u672c\u5730\u8def\u5f84\n  containers:\n    auth:\n      username: \"admin\" # \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u7528\u6237\u540d\n      password: \"Harbor12345\" # \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u5bc6\u7801\n
                      2. \u6267\u884c\u540c\u6b65\u955c\u50cf\u547d\u4ee4\u3002

                        charts-syncer sync --config load-image.yaml\n
                      "},{"location":"admin/virtnest/install/offline-install.html#docker-containerd","title":"Docker \u6216 containerd \u76f4\u63a5\u52a0\u8f7d","text":"

                      \u89e3\u538b\u5e76\u52a0\u8f7d\u955c\u50cf\u6587\u4ef6\u3002

                      1. \u89e3\u538b tar \u538b\u7f29\u5305\u3002

                        tar xvf virtnest.bundle.tar\n

                        \u89e3\u538b\u6210\u529f\u540e\u4f1a\u5f97\u5230 3 \u4e2a\u6587\u4ef6\uff1a

                        • hints.yaml
                        • images.tar
                        • original-chart
                      2. \u4ece\u672c\u5730\u52a0\u8f7d\u955c\u50cf\u5230 Docker \u6216 containerd\u3002

                        Dockercontainerd
                        docker load -i images.tar\n
                        ctr -n k8s.io image import images.tar\n

                      Note

                      \u6bcf\u4e2a node \u90fd\u9700\u8981\u505a Docker \u6216 containerd \u52a0\u8f7d\u955c\u50cf\u64cd\u4f5c\uff0c \u52a0\u8f7d\u5b8c\u6210\u540e\u9700\u8981 tag \u955c\u50cf\uff0c\u4fdd\u6301 Registry\u3001Repository \u4e0e\u5b89\u88c5\u65f6\u4e00\u81f4\u3002

                      "},{"location":"admin/virtnest/install/offline-install.html#_3","title":"\u5347\u7ea7","text":"

                      \u6709\u4e24\u79cd\u5347\u7ea7\u65b9\u5f0f\u3002\u60a8\u53ef\u4ee5\u6839\u636e\u524d\u7f6e\u64cd\u4f5c\uff0c\u9009\u62e9\u5bf9\u5e94\u7684\u5347\u7ea7\u65b9\u6848\uff1a

                      \u901a\u8fc7 helm repo \u5347\u7ea7\u901a\u8fc7 chart \u5305\u5347\u7ea7
                      1. \u68c0\u67e5\u4e91\u4e3b\u673a helm \u4ed3\u5e93\u662f\u5426\u5b58\u5728\u3002

                        helm repo list | grep virtnest\n

                        \u82e5\u8fd4\u56de\u7ed3\u679c\u4e3a\u7a7a\u6216\u5982\u4e0b\u63d0\u793a\uff0c\u5219\u8fdb\u884c\u4e0b\u4e00\u6b65\uff1b\u53cd\u4e4b\u5219\u8df3\u8fc7\u4e0b\u4e00\u6b65\u3002

                        Error: no repositories to show\n
                      2. \u6dfb\u52a0\u4e91\u4e3b\u673a\u7684 helm \u4ed3\u5e93\u3002

                        helm repo add virtnest http://{harbor url}/chartrepo/{project}\n
                      3. \u66f4\u65b0\u4e91\u4e3b\u673a\u7684 helm \u4ed3\u5e93\u3002

                        helm repo update virtnest # (1)\n
                        1. helm \u7248\u672c\u8fc7\u4f4e\u4f1a\u5bfc\u81f4\u5931\u8d25\uff0c\u82e5\u5931\u8d25\uff0c\u8bf7\u5c1d\u8bd5\u6267\u884c helm update repo
                      4. \u9009\u62e9\u60a8\u60f3\u5b89\u88c5\u7684\u4e91\u4e3b\u673a\u7248\u672c\uff08\u5efa\u8bae\u5b89\u88c5\u6700\u65b0\u7248\u672c\uff09\u3002

                        helm search repo virtnest/virtnest --versions\n
                        [root@master ~]# helm search repo virtnest/virtnest --versions\nNAME                   CHART VERSION  APP VERSION  DESCRIPTION\nvirtnest/virtnest  0.2.0          v0.2.0       A Helm chart for virtnest\n...\n
                      5. \u5907\u4efd --set \u53c2\u6570\u3002

                        \u5728\u5347\u7ea7\u4e91\u4e3b\u673a\u7248\u672c\u4e4b\u524d\uff0c\u5efa\u8bae\u60a8\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5907\u4efd\u8001\u7248\u672c\u7684 --set \u53c2\u6570\u3002

                        helm get values virtnest -n virtnest-system -o yaml > bak.yaml\n
                      6. \u66f4\u65b0 virtnest crds

                        helm pull virtnest/virtnest --version 0.2.0 && tar -zxf virtnest-0.2.0.tgz\nkubectl apply -f virtnest/crds\n
                      7. \u6267\u884c helm upgrade\u3002

                        \u5347\u7ea7\u524d\u5efa\u8bae\u60a8\u8986\u76d6 bak.yaml \u4e2d\u7684 global.imageRegistry \u5b57\u6bb5\u4e3a\u5f53\u524d\u4f7f\u7528\u7684\u955c\u50cf\u4ed3\u5e93\u5730\u5740\u3002

                        export imageRegistry={\u4f60\u7684\u955c\u50cf\u4ed3\u5e93}\n
                        helm upgrade virtnest virtnest/virtnest \\\n  -n virtnest-system \\\n  -f ./bak.yaml \\\n  --set global.imageRegistry=$imageRegistry \\\n  --version 0.2.0\n
                      1. \u5907\u4efd --set \u53c2\u6570\u3002

                        \u5728\u5347\u7ea7\u4e91\u4e3b\u673a\u7248\u672c\u4e4b\u524d\uff0c\u5efa\u8bae\u60a8\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5907\u4efd\u8001\u7248\u672c\u7684 --set \u53c2\u6570\u3002

                        helm get values virtnest -n virtnest-system -o yaml > bak.yaml\n
                      2. \u66f4\u65b0 virtnest crds

                        kubectl apply -f ./crds\n
                      3. \u6267\u884c helm upgrade\u3002

                        \u5347\u7ea7\u524d\u5efa\u8bae\u60a8\u8986\u76d6 bak.yaml \u4e2d\u7684 global.imageRegistry \u4e3a\u5f53\u524d\u4f7f\u7528\u7684\u955c\u50cf\u4ed3\u5e93\u5730\u5740\u3002

                        export imageRegistry={\u4f60\u7684\u955c\u50cf\u4ed3\u5e93}\n
                        helm upgrade virtnest . \\\n  -n virtnest-system \\\n  -f ./bak.yaml \\\n  --set global.imageRegistry=$imageRegistry\n
                      "},{"location":"admin/virtnest/install/virtnest-agent.html","title":"\u5b89\u88c5 virtnest-agent","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u5728\u6307\u5b9a\u96c6\u7fa4\u5185\u5b89\u88c5 virtnest-agent\u3002

                      "},{"location":"admin/virtnest/install/virtnest-agent.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u5b89\u88c5 virtnest-agent \u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u64cd\u4f5c\u7cfb\u7edf\u5185\u6838\u7248\u672c\u9700\u8981\u5728 v4.11 \u4ee5\u4e0a\u3002
                      "},{"location":"admin/virtnest/install/virtnest-agent.html#_2","title":"\u5b89\u88c5\u6b65\u9aa4","text":"

                      \u521d\u59cb\u96c6\u7fa4\u9700\u8981\u5728 Helm \u4e2d\u5b89\u88c5 virtnest-agent \u7ec4\u4ef6\u540e\u65b9\u53ef\u4f7f\u7528\u4e91\u4e3b\u673a\u7684\u76f8\u5173\u80fd\u529b\u3002

                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u82e5\u672a\u5b89\u88c5 virtnest-agent \u7ec4\u4ef6\uff0c\u5219\u65e0\u6cd5\u6b63\u5e38\u4f7f\u7528\u4e91\u4e3b\u673a\u80fd\u529b\u3002\u5c06\u63d0\u9192\u7528\u6237\u5728\u6240\u9700\u96c6\u7fa4\u5185\u8fdb\u884c\u5b89\u88c5\u3002

                      2. \u9009\u62e9\u6240\u9700\u96c6\u7fa4\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 Helm \u5e94\u7528 \uff0c\u7136\u540e\u70b9\u51fb Helm \u6a21\u677f \uff0c\u67e5\u770b\u6a21\u677f\u5217\u8868\u3002

                      3. \u641c\u7d22 virtnest-agent \u7ec4\u4ef6\uff0c\u8fdb\u5165\u7ec4\u4ef6\u8be6\u60c5\uff0c\u9009\u62e9\u5408\u9002\u7248\u672c\uff0c\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\uff0c\u8fdb\u884c\u5b89\u88c5\u3002

                      4. \u8fdb\u5165\u5b89\u88c5\u8868\u5355\u9875\u9762\uff0c\u586b\u5199\u57fa\u672c\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \uff0c\u5b89\u88c5\u5b8c\u6210\u3002

                      5. \u91cd\u65b0\u70b9\u51fb \u4e91\u4e3b\u673a \u5bfc\u822a\u680f\uff0c\u6210\u529f\u51fa\u73b0\u4e91\u4e3b\u673a\u5217\u8868\uff0c\u53ef\u4ee5\u6b63\u5e38\u4f7f\u7528\u4e91\u4e3b\u673a\u80fd\u529b\u3002

                      "},{"location":"admin/virtnest/quickstart/index.html","title":"\u521b\u5efa\u4e91\u4e3b\u673a","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u4e91\u4e3b\u673a\u3002

                      \u4e91\u4e3b\u673a\u57fa\u4e8e KubeVirt \u6280\u672f\u5c06\u4e91\u4e3b\u673a\u4f5c\u4e3a\u4e91\u539f\u751f\u5e94\u7528\u8fdb\u884c\u7ba1\u7406\uff0c\u4e0e\u5bb9\u5668\u65e0\u7f1d\u5730\u8854\u63a5\u5728\u4e00\u8d77\uff0c \u4f7f\u7528\u6237\u80fd\u591f\u8f7b\u677e\u5730\u90e8\u7f72\u4e91\u4e3b\u673a\u5e94\u7528\uff0c\u4eab\u53d7\u4e0e\u5bb9\u5668\u5e94\u7528\u4e00\u81f4\u7684\u4e1d\u6ed1\u4f53\u9a8c\u3002

                      "},{"location":"admin/virtnest/quickstart/index.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u521b\u5efa\u4e91\u4e3b\u673a\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u5411\u7528\u6237\u673a\u64cd\u4f5c\u7cfb\u7edf\u516c\u5f00\u786c\u4ef6\u8f85\u52a9\u7684\u865a\u62df\u5316\u3002
                      • \u5728\u6307\u5b9a\u96c6\u7fa4\u5b89\u88c5 virtnest-agent\uff0c\u64cd\u4f5c\u7cfb\u7edf\u5185\u6838\u7248\u672c\u9700\u8981\u5728 3.15 \u4ee5\u4e0a\u3002
                      • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u3002
                      • \u63d0\u524d\u51c6\u5907\u597d\u955c\u50cf\uff0c\u5e73\u53f0\u5185\u7f6e\u4e09\u79cd\u955c\u50cf (\u5982\u4e0b\u6587\u6240\u793a)\uff0c\u5982\u9700\u5236\u4f5c\u955c\u50cf\uff0c\u53ef\u53c2\u8003\u5f00\u6e90\u9879\u76ee\u5236\u4f5c\u955c\u50cf\u3002
                      • \u8fdb\u884c\u7f51\u7edc\u914d\u7f6e\u65f6\uff0c\u82e5\u9009\u62e9\u4f7f\u7528 Passt \u7f51\u7edc\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u5347\u7ea7\u81f3 0.4.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u3002
                      "},{"location":"admin/virtnest/quickstart/index.html#_3","title":"\u955c\u50cf\u521b\u5efa","text":"

                      \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u4e91\u4e3b\u673a\u3002

                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u8fdb\u5165 \u4e91\u4e3b\u673a \u9875\u9762\u3002

                      2. \u5728\u4e91\u4e3b\u673a\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb \u521b\u5efa\u4e91\u4e3b\u673a -> \u9009\u62e9 \u901a\u8fc7\u955c\u50cf\u521b\u5efa \u3002

                      3. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u9875\u9762\uff0c\u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u955c\u50cf\u914d\u7f6e\u3001\u5b58\u50a8\u4e0e\u7f51\u7edc\u3001\u767b\u5f55\u8bbe\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                        \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de\u4e91\u4e3b\u673a\u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u4e91\u4e3b\u673a\u6267\u884c\u5173\u673a/\u5f00\u542f\u3001\u91cd\u542f\u3001\u514b\u9686\u3001\u66f4\u65b0\u3001\u521b\u5efa\u5feb\u7167\u3001\u63a7\u5236\u53f0\u8bbf\u95ee\uff08VNC\uff09\u3001\u5220\u9664\u7b49\u64cd\u4f5c\u3002 \u514b\u9686\u548c\u5feb\u7167\u80fd\u529b\u4f9d\u8d56\u4e8e\u5b58\u50a8\u6c60\u7684\u9009\u62e9\u3002

                      "},{"location":"admin/virtnest/quickstart/index.html#_4","title":"\u57fa\u672c\u4fe1\u606f","text":"

                      \u5728 \u521b\u5efa\u4e91\u4e3b\u673a \u9875\u9762\u4e2d\uff0c\u6839\u636e\u4e0b\u8868\u8f93\u5165\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                      • \u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002 \u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u540d\u79f0\u5728\u4e91\u4e3b\u673a\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                      • \u522b\u540d\uff1a\u5141\u8bb8\u4efb\u4f55\u5b57\u7b26\uff0c\u6700\u957f 60 \u4e2a\u5b57\u7b26\u3002
                      • \u96c6\u7fa4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u4e91\u4e3b\u673a\u90e8\u7f72\u5728\u54ea\u4e2a\u96c6\u7fa4\u5185\uff0c\u82e5\u6709\u4f7f\u7528 GPU \u80fd\u529b\u7684\u9700\u6c42\uff0c\u5219\u9700\u8981\u9009\u62e9\u6709 GPU/vGPU \u5361\u7684\u96c6\u7fa4\u3002
                      • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u4e91\u4e3b\u673a\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\u3002 \u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                      • \u6807\u7b7e/\u6ce8\u89e3\uff1a\u9009\u62e9\u4e3a\u4e91\u4e3b\u673a\u6dfb\u52a0\u6240\u9700\u7684\u6807\u7b7e/\u6ce8\u89e3\u4fe1\u606f\u3002
                      "},{"location":"admin/virtnest/quickstart/index.html#_5","title":"\u955c\u50cf\u914d\u7f6e","text":"

                      \u6839\u636e\u4e0b\u8868\u586b\u5199\u955c\u50cf\u76f8\u5173\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65

                      1. \u955c\u50cf\u6765\u6e90\uff1a\u652f\u6301\u4e09\u79cd\u7c7b\u578b\u7684\u6765\u6e90\u3002

                        • \u955c\u50cf\u4ed3\u5e93\u7c7b\u578b\uff1a\u955c\u50cf\u5b58\u50a8\u5728\u5bb9\u5668\u955c\u50cf\u4ed3\u5e93\u4e2d\uff0c\u652f\u6301\u4ece\u955c\u50cf\u4ed3\u5e93\u4e2d\u6309\u9700\u9009\u62e9\u955c\u50cf\uff1b
                        • HTTP \u7c7b\u578b\uff1a\u955c\u50cf\u5b58\u50a8\u4e8e HTTP \u534f\u8bae\u7684\u6587\u4ef6\u670d\u52a1\u5668\u4e2d\uff0c\u652f\u6301 HTTPS://\u548c HTTP://\u524d\u7f00\uff1b
                        • \u5bf9\u8c61\u5b58\u50a8\uff08S3\uff09\uff1a\u652f\u6301\u901a\u8fc7\u5bf9\u8c61\u5b58\u50a8\u534f\u8bae (S3) \u83b7\u53d6\u7684\u4e91\u4e3b\u673a\u955c\u50cf\uff0c\u82e5\u662f\u65e0\u9700\u8ba4\u8bc1\u7684\u5bf9\u8c61\u5b58\u50a8\u6587\u4ef6\uff0c\u8bf7\u4f7f\u7528 HTTP \u6765\u6e90\u3002
                      2. \u4ee5\u4e0b\u662f\u5e73\u53f0\u5185\u7f6e\u7684\u955c\u50cf\u4fe1\u606f\uff0c\u5305\u62ec\u64cd\u4f5c\u7cfb\u7edf\u548c\u7248\u672c\u3001\u955c\u50cf\u5730\u5740\u3002\u540c\u65f6\u4e5f\u652f\u6301\u81ea\u5b9a\u4e49\u4e91\u4e3b\u673a\u955c\u50cf\u3002

                        \u64cd\u4f5c\u7cfb\u7edf \u5bf9\u5e94\u7248\u672c \u955c\u50cf\u5730\u5740 CentOS CentOS 7.9 release-ci.daocloud.io/virtnest/system-images/centos-7.9-x86_64:v1 Ubuntu Ubuntu 22.04 release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1 Debian Debian 12 release-ci.daocloud.io/virtnest/system-images/debian-12-x86_64:v1
                      3. \u955c\u50cf\u5bc6\u94a5\uff1a\u4ec5\u652f\u6301\u9ed8\u8ba4\uff08Opaque\uff09\u7c7b\u578b\u5bc6\u94a5\uff0c\u5177\u4f53\u683c\u5f0f\u8bf7\u53c2\u8003\u521b\u5efa\u5bc6\u94a5\u3002

                        \u5e73\u53f0\u5185\u7f6e\u955c\u50cf\u5b58\u50a8\u5728\u70b9\u706b\u96c6\u7fa4\u4e2d\uff0c\u800c\u70b9\u706b\u96c6\u7fa4\u7684\u955c\u50cf\u4ed3\u5e93\u672a\u52a0\u5bc6\uff0c\u56e0\u6b64\u5f53\u9009\u62e9\u5185\u7f6e\u955c\u50cf\u65f6\uff0c\u65e0\u9700\u9009\u62e9\u5bc6\u94a5\u3002

                      Note

                      CPU \u548c\u5185\u5b58\u7684\u70ed\u52a0\u8f7d\u914d\u7f6e\u8981\u6c42\uff1avirtnest \u7684\u7248\u672c\u4e0d\u4f4e\u4e8e v0.10.0\uff0c\u5e76\u4e14 virtnest-agent \u7248\u672c\u4e0d\u4f4e\u4e8e v0.7.0\uff1b\u652f\u6301\u5b9e\u65f6\u8fc1\u79fb\uff08\u786e\u4fdd PVC \u8bbf\u95ee\u6a21\u5f0f\u4e3a ReadWriteMany\uff09\u3002

                      1. \u8d44\u6e90\u914d\u7f6e\uff1aCPU \u5efa\u8bae\u4f7f\u7528\u6574\u6570\uff0c\u82e5\u586b\u5199\u5c0f\u6570\u5219\u4f1a\u5411\u4e0a\u53d6\u6574\u3002\u652f\u6301 CPU\u3001\u5185\u5b58\u7684\u70ed\u52a0\u8f7d\u3002

                      2. GPU \u914d\u7f6e\uff1a\u542f\u7528 GPU \u529f\u80fd\u9700\u8981\u9700\u8981\u6ee1\u8db3\u524d\u63d0\u6761\u4ef6\uff0c\u5177\u4f53\u53ef\u53c2\u8003 \u4e91\u4e3b\u673a\u914d\u7f6e GPU\uff08Nvidia)\u3002 \u4e91\u4e3b\u673a\u652f\u6301 Nvidia\u2014GPU \u548c Nvidia\u2014vGPU \u4e24\u79cd\u7c7b\u578b\uff0c\u9009\u62e9\u6240\u9700\u7c7b\u578b\u540e\uff0c\u9700\u8981\u9009\u62e9\u5bf9\u5e94\u7684 GPU \u578b\u53f7\u548c\u5361\u7684\u6570\u91cf\u3002

                      "},{"location":"admin/virtnest/quickstart/index.html#_6","title":"\u5b58\u50a8\u4e0e\u7f51\u7edc\u914d\u7f6e","text":"
                      • \u5b58\u50a8\uff1a

                        • \u5b58\u50a8\u548c\u4e91\u4e3b\u673a\u7684\u529f\u80fd\u606f\u606f\u76f8\u5173\uff0c\u4e3b\u8981\u662f\u901a\u8fc7\u4f7f\u7528 Kubernetes \u7684\u6301\u4e45\u5377\u548c\u5b58\u50a8\u7c7b\uff0c\u63d0\u4f9b\u4e86\u7075\u6d3b\u4e14\u53ef\u6269\u5c55\u7684\u4e91\u4e3b\u673a\u5b58\u50a8\u80fd\u529b\u3002\u6bd4\u5982\u4e91\u4e3b\u673a\u955c\u50cf\u5b58\u50a8\u5728 pvc \u91cc\uff0c\u652f\u6301\u548c\u5176\u4ed6\u6570\u636e\u4e00\u8d77\u514b\u9686\u3001\u5feb\u7167\u7b49\u3002

                        • \u7cfb\u7edf\u76d8\uff1a\u7cfb\u7edf\u9ed8\u8ba4\u521b\u5efa\u4e00\u4e2a VirtIO \u7c7b\u578b\u7684 rootfs \u7cfb\u7edf\u76d8\uff0c\u7528\u4e8e\u5b58\u653e\u64cd\u4f5c\u7cfb\u7edf\u548c\u6570\u636e\u3002

                        • \u6570\u636e\u76d8\uff1a\u6570\u636e\u76d8\u662f\u4e91\u4e3b\u673a\u4e2d\u7528\u4e8e\u5b58\u50a8\u7528\u6237\u6570\u636e\u3001\u5e94\u7528\u7a0b\u5e8f\u6570\u636e\u6216\u5176\u4ed6\u975e\u64cd\u4f5c\u7cfb\u7edf\u76f8\u5173\u6587\u4ef6\u7684\u5b58\u50a8\u8bbe\u5907\u3002\u4e0e\u7cfb\u7edf\u76d8\u76f8\u6bd4\uff0c\u6570\u636e\u76d8\u662f\u975e\u5fc5\u9009\u7684\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u8981\u52a8\u6001\u6dfb\u52a0\u6216\u79fb\u9664\u3002\u6570\u636e\u76d8\u7684\u5bb9\u91cf\u4e5f\u53ef\u4ee5\u6839\u636e\u9700\u6c42\u8fdb\u884c\u7075\u6d3b\u914d\u7f6e\u3002

                        • \u9ed8\u8ba4\u4f7f\u7528\u5757\u5b58\u50a8\u3002\u5982\u679c\u9700\u8981\u4f7f\u7528\u514b\u9686\u548c\u5feb\u7167\u529f\u80fd\uff0c\u8bf7\u786e\u4fdd\u60a8\u7684\u5b58\u50a8\u6c60\u5df2\u7ecf\u521b\u5efa\u4e86\u5bf9\u5e94\u7684 VolumeSnapshotClass\uff0c \u53ef\u4ee5\u53c2\u8003\u4ee5\u4e0b\u793a\u4f8b\u3002\u5982\u679c\u9700\u8981\u4f7f\u7528\u5b9e\u65f6\u8fc1\u79fb\u529f\u80fd\uff0c\u8bf7\u786e\u4fdd\u60a8\u7684\u5b58\u50a8\u652f\u6301\u5e76\u9009\u62e9\u4e86 ReadWriteMany \u7684\u8bbf\u95ee\u6a21\u5f0f \u3002

                          \u5927\u591a\u6570\u60c5\u51b5\u4e0b\uff0c\u5b58\u50a8\u5728\u5b89\u88c5\u8fc7\u7a0b\u4e2d\u4e0d\u4f1a\u81ea\u52a8\u521b\u5efa\u8fd9\u6837\u7684 VolumeSnapshotClass\uff0c\u56e0\u6b64\u60a8\u9700\u8981\u624b\u52a8\u521b\u5efa VolumeSnapshotClass\u3002 \u4ee5\u4e0b\u662f\u4e00\u4e2a HwameiStor \u521b\u5efa VolumeSnapshotClass \u7684\u793a\u4f8b\uff1a

                          kind: VolumeSnapshotClass\napiVersion: snapshot.storage.k8s.io/v1\nmetadata:\n  name: hwameistor-storage-lvm-snapshot\n  annotations:\n    snapshot.storage.kubernetes.io/is-default-class: \"true\"\nparameters:\n  snapsize: \"1073741824\"\ndriver: lvm.hwameistor.io\ndeletionPolicy: Delete\n
                        • \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u68c0\u67e5 VolumeSnapshotClass \u662f\u5426\u521b\u5efa\u6210\u529f\u3002

                          kubectl get VolumeSnapshotClass\n
                        • \u67e5\u770b\u5df2\u521b\u5efa\u7684 Snapshotclass\uff0c\u5e76\u4e14\u786e\u8ba4 provisioner \u5c5e\u6027\u540c\u5b58\u50a8\u6c60\u4e2d\u7684 Driver \u5c5e\u6027\u4e00\u81f4\u3002

                      • \u7f51\u7edc\uff1a

                        • \u7f51\u7edc\u914d\u7f6e\u53ef\u4ee5\u6839\u636e\u8868\u683c\u4fe1\u606f\u6309\u9700\u7ec4\u5408\u3002

                          \u7f51\u7edc\u6a21\u5f0f CNI \u662f\u5426\u5b89\u88c5 Spiderpool \u7f51\u5361\u6a21\u5f0f \u56fa\u5b9a IP \u5b9e\u65f6\u8fc1\u79fb Masquerade\uff08NAT\uff09 Calico \u274c \u5355\u7f51\u5361 \u274c \u2705 Cilium \u274c \u5355\u7f51\u5361 \u274c \u2705 Flannel \u274c \u5355\u7f51\u5361 \u274c \u2705 Bridge\uff08\u6865\u63a5\uff09 OVS \u2705 \u591a\u7f51\u5361 \u2705 \u2705

                        • \u7f51\u7edc\u6a21\u5f0f\u5206\u4e3a Masquerade\uff08NAT\uff09\u548c Bridge\uff08\u6865\u63a5\uff09\uff0cBridge\uff08\u6865\u63a5\uff09\u6a21\u5f0f\u9700\u8981\u5b89\u88c5\u4e86 Spiderpool \u7ec4\u4ef6\u540e\u65b9\u53ef\u4f7f\u7528\u3002

                          • \u9ed8\u8ba4\u9009\u62e9 Masquerade\uff08NAT\uff09\u7684\u7f51\u7edc\u6a21\u5f0f\uff0c\u4f7f\u7528 eth0 \u9ed8\u8ba4\u7f51\u5361\u3002
                          • \u82e5\u96c6\u7fa4\u5185\u5b89\u88c5\u4e86 spiderpool \u7ec4\u4ef6\uff0c\u5219\u652f\u6301\u9009\u62e9 Bridge\uff08\u6865\u63a5\uff09\u6a21\u5f0f\uff0cBridge\uff08\u6865\u63a5\uff09\u6a21\u5f0f\u652f\u6301\u591a\u7f51\u5361\u5f62\u5f0f\u3002

                        • \u6dfb\u52a0\u7f51\u5361

                          • Passt\uff08\u76f4\u901a\uff09/Bridge\uff08\u6865\u63a5\uff09\u6a21\u5f0f\u4e0b\u652f\u6301\u624b\u52a8\u6dfb\u52a0\u7f51\u5361\u3002\u70b9\u51fb \u6dfb\u52a0\u7f51\u5361 \uff0c\u8fdb\u884c\u7f51\u5361 IP \u6c60\u7684\u914d\u7f6e\u3002\u9009\u62e9\u548c\u7f51\u7edc\u6a21\u5f0f\u5339\u914d\u7684 Multus CR\uff0c\u82e5\u6ca1\u6709\u5219\u9700\u8981\u81ea\u884c\u521b\u5efa\u3002
                          • \u82e5\u6253\u5f00 \u4f7f\u7528\u9ed8\u8ba4 IP \u6c60 \u5f00\u5173\uff0c\u5219\u4f7f\u7528 multus CR \u914d\u7f6e\u4e2d\u7684\u9ed8\u8ba4 IP \u6c60\u3002\u82e5\u5173\u95ed\u5f00\u5173\uff0c\u5219\u624b\u52a8\u9009\u62e9 IP \u6c60\u3002

                      "},{"location":"admin/virtnest/quickstart/index.html#_7","title":"\u767b\u5f55\u8bbe\u7f6e","text":"
                      • \u7528\u6237\u540d/\u5bc6\u7801\uff1a\u53ef\u4ee5\u901a\u8fc7\u7528\u6237\u540d\u548c\u5bc6\u7801\u767b\u5f55\u81f3\u4e91\u4e3b\u673a\u3002
                      • SSH\uff1a\u9009\u62e9 SSH \u767b\u5f55\u65b9\u5f0f\u65f6\u53ef\u4e3a\u4e91\u4e3b\u673a\u7ed1\u5b9a SSH \u5bc6\u94a5\uff0c\u7528\u4e8e\u65e5\u540e\u767b\u5f55\u4e91\u4e3b\u673a\u3002
                      "},{"location":"admin/virtnest/quickstart/index.html#yaml","title":"YAML \u521b\u5efa","text":"

                      \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u4e91\u4e3b\u673a\u3002

                      \u8fdb\u5165\u4e91\u4e3b\u673a\u5217\u8868\u9875\uff0c\u70b9\u51fb \u901a\u8fc7 YAML \u521b\u5efa \u6309\u94ae\u3002

                      \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u4e91\u4e3b\u673a\u7684 YAML \u793a\u4f8b
                      apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  name: demo\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: systemdisk-demo\n        namespace: default\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Gi\n          storageClassName: hwameistor-storage-lvm-hdd\n        source:\n          registry:\n            url: >-\n              docker://release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  runStrategy: Always\n  template:\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio\n              name: systemdisk-demo\n            - disk:\n                bus: virtio\n              name: cloudinitdisk\n          interfaces:\n            - masquerade: {}\n              name: default\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 2Gi\n      networks:\n        - name: default\n          pod: {}\n      volumes:\n        - dataVolume:\n            name: systemdisk-demo\n          name: systemdisk-demo\n        - cloudInitNoCloud:\n            userDataBase64: >-\n              I2Nsb3VkLWNvbmZpZwpzc2hfcHdhdXRoOiB0cnVlCmRpc2FibGVfcm9vdDogZmFsc2UKY2hwYXNzd2Q6IHsibGlzdCI6ICJyb290OjEyMzQ1NiIsIGV4cGlyZTogRmFsc2V9CgoKcnVuY21kOgogIC0gc2VkIC1pICIvI1w/UGVybWl0Um9vdExvZ2luL3MvXi4qJC9QZXJtaXRSb290TG9naW4geWVzL2ciIC9ldGMvc3NoL3NzaGRfY29uZmlnCiAgLSBzeXN0ZW1jdGwgcmVzdGFydCBzc2guc2VydmljZQ==\n          name: cloudinitdisk\n
                      "},{"location":"admin/virtnest/quickstart/access.html","title":"\u8fde\u63a5\u4e91\u4e3b\u673a","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u4e24\u79cd\u8fde\u63a5\u4e91\u4e3b\u673a\u7684\u65b9\u5f0f\uff0c\u5206\u522b\u4e3a \u63a7\u5236\u53f0\u8bbf\u95ee\uff08VNC\uff09\u548c\u7ec8\u7aef\u65b9\u5f0f\u3002

                      "},{"location":"admin/virtnest/quickstart/access.html#_2","title":"\u7ec8\u7aef","text":"

                      \u901a\u8fc7\u7ec8\u7aef\u8bbf\u95ee\u4e91\u4e3b\u673a\u7684\u65b9\u5f0f\u66f4\u52a0\u7075\u6d3b\u548c\u8f7b\u91cf\uff0c\u4f46\u662f\u65e0\u6cd5\u76f4\u63a5\u5c55\u793a\u56fe\u5f62\u754c\u9762\uff0c\u4ea4\u4e92\u6027\u8f83\u5dee\uff0c\u4e14\u65e0\u6cd5\u591a\u7ec8\u7aef\u540c\u65f6\u5728\u7ebf\u3002

                      \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u8fdb\u5165\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u652f\u6301\u901a\u8fc7\u7ec8\u7aef\u65b9\u5f0f\u8bbf\u95ee\u4e91\u4e3b\u673a\u3002

                      "},{"location":"admin/virtnest/quickstart/access.html#vnc","title":"\u63a7\u5236\u53f0\u8bbf\u95ee\uff08VNC\uff09","text":"

                      \u901a\u8fc7 VNC \u8bbf\u95ee\u4e91\u4e3b\u673a\u7684\u65b9\u5f0f\u53ef\u4ee5\u5b9e\u73b0\u5bf9\u8fdc\u7a0b\u8ba1\u7b97\u673a\u7684\u5b8c\u6574\u56fe\u5f62\u754c\u9762\u7684\u8bbf\u95ee\u548c\u63a7\u5236\uff0c\u80fd\u591f\u76f4\u89c2\u5730\u64cd\u4f5c\u8fdc\u7a0b\u8bbe\u5907\uff0c\u4ea4\u4e92\u6027\u66f4\u52a0\u597d\uff0c\u4f46\u662f\u6027\u80fd\u4f1a\u53d7\u5230\u4e00\u5b9a\u5f71\u54cd\uff0c\u4e14\u65e0\u6cd5\u591a\u7ec8\u7aef\u540c\u65f6\u5728\u7ebf\u3002

                      Windows \u7cfb\u7edf\u9009\u62e9 VNC\u3002

                      \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u8fdb\u5165\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u652f\u6301\u901a\u8fc7\u63a7\u5236\u53f0\u8bbf\u95ee\uff08VNC\uff09\u7684\u65b9\u5f0f\u8bbf\u95ee\u4e91\u4e3b\u673a\u3002

                      "},{"location":"admin/virtnest/quickstart/detail.html","title":"\u4e91\u4e3b\u673a\u8be6\u60c5","text":"

                      \u6210\u529f\u521b\u5efa\u4e91\u4e3b\u673a\u540e\uff0c\u53ef\u8fdb\u5165\u4e91\u4e3b\u673a\u8be6\u60c5\u9875\u9762\uff0c\u652f\u6301\u67e5\u770b\u57fa\u672c\u4fe1\u606f\u3001\u914d\u7f6e\u4fe1\u606f\u3001GPU \u4fe1\u606f\u3001\u6982\u89c8\u3001\u5b58\u50a8\u3001\u7f51\u7edc\u3001\u5feb\u7167\u3001\u4e8b\u4ef6\u7b49\u3002

                      \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u96c6\u7fa4\u5217\u8868 \uff0c\u8fdb\u5165\u4e91\u4e3b\u673a\u6240\u5728\u96c6\u7fa4\u8be6\u60c5\uff0c\u70b9\u51fb\u4e91\u4e3b\u673a\u540d\u79f0\u67e5\u770b\u4e91\u4e3b\u673a\u8be6\u60c5\u3002

                      "},{"location":"admin/virtnest/quickstart/detail.html#_2","title":"\u57fa\u672c\u4fe1\u606f","text":"

                      \u4e91\u4e3b\u673a\u57fa\u672c\u4fe1\u606f\u5305\u542b\u72b6\u6001\u3001\u522b\u540d\u3001\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001IP\u3001\u6807\u7b7e\u3001\u8282\u70b9\u3001\u7528\u6237\u540d\u3001\u5bc6\u7801\u3001\u521b\u5efa\u65f6\u95f4\u7b49\u3002\u5176\u4e2d\uff0c

                      • \u72b6\u6001\uff1a\u4e91\u4e3b\u673a\u5f53\u524d\u7684\u8fd0\u884c\u72b6\u6001\uff08\u8fd0\u884c\u4e2d/\u5904\u7406\u4e2d/\u5173\u673a/\u9519\u8bef\uff09\u3002
                      • IP \uff1a\u4e91\u4e3b\u673a\u7684 IP \u5730\u5740\u3002\u5bf9\u4e8e\u6dfb\u52a0\u591a\u5f20\u7f51\u5361\u7684\u4e91\u4e3b\u673a\uff0c\u4f1a\u4e3a\u5176\u5206\u914d\u591a\u4e2a IP \u5730\u5740\u3002
                      "},{"location":"admin/virtnest/quickstart/detail.html#gpu","title":"\u914d\u7f6e\u4fe1\u606f & GPU \u914d\u7f6e","text":"

                      \u4e91\u4e3b\u673a\u914d\u7f6e\u4fe1\u606f\u5305\u62ec\uff1a

                      • \u64cd\u4f5c\u7cfb\u7edf\uff1a\u5b89\u88c5\u5728\u4e91\u4e3b\u673a\u4e0a\u7528\u4e8e\u6267\u884c\u7a0b\u5e8f\u7684\u64cd\u4f5c\u7cfb\u7edf\u3002
                      • \u955c\u50cf\u5730\u5740\uff1a\u5411\u4e00\u4e2a\u865a\u62df\u786c\u76d8\u6587\u4ef6\u6216\u64cd\u4f5c\u7cfb\u7edf\u5b89\u88c5\u4ecb\u8d28\u7684\u94fe\u63a5\uff0c\u8fd9\u4e2a\u5730\u5740\u7528\u4e8e\u5728\u4e91\u4e3b\u673a\u8f6f\u4ef6\u4e2d\u52a0\u8f7d\u548c\u5b89\u88c5\u64cd\u4f5c\u7cfb\u7edf\u3002
                      • \u7f51\u7edc\u6a21\u5f0f\uff1a\u4e91\u4e3b\u673a\u914d\u7f6e\u7684\u7f51\u7edc\u6a21\u5f0f\uff0cBridge\uff08\u6865\u63a5\uff09\u6216 Masquerade\uff08NAT\uff09\u3002
                      • CPU\u3001\u5185\u5b58\uff1a\u4e3a\u4e91\u4e3b\u673a\u5206\u914d\u7684\u8d44\u6e90\u3002

                      GPU \u914d\u7f6e\u4fe1\u606f\u5305\u542b GPU \u7c7b\u578b\u3001GPU \u578b\u53f7\u4ee5\u53ca\u5361\u6570\u91cf\u3002

                      "},{"location":"admin/virtnest/quickstart/detail.html#_3","title":"\u5176\u4ed6\u4fe1\u606f","text":"\u6982\u89c8\u50a8\u5b58\u7f51\u7edc\u5feb\u7167\u4e8b\u4ef6\u5217\u8868

                      \u4e91\u4e3b\u673a\u6982\u89c8\u9875\u53ef\u67e5\u770b\u4e91\u4e3b\u673a\u7684\u76d1\u63a7\u5185\u5bb9\u3002\u8bf7\u6ce8\u610f\uff0c\u82e5\u672a\u5b89\u88c5 insight-agent \u7ec4\u4ef6\uff0c\u5219\u65e0\u6cd5\u83b7\u53d6\u76d1\u63a7\u4fe1\u606f\u3002

                      \u5c55\u793a\u4e91\u4e3b\u673a\u6240\u7528\u7684\u5b58\u50a8\uff0c\u5305\u62ec\u7cfb\u7edf\u76d8\u548c\u6570\u636e\u76d8\u7684\u4fe1\u606f\u3002

                      \u5c55\u793a\u4e91\u4e3b\u673a\u7684\u7f51\u7edc\u914d\u7f6e\uff0c\u5305\u62ec Multus CR\u3001\u7f51\u5361\u540d\u79f0\u3001IP \u5730\u5740\u7b49\u4fe1\u606f\u3002

                      \u82e5\u5df2\u7ecf\u521b\u5efa\u5feb\u7167\uff0c\u672c\u9875\u5c06\u5c55\u793a\u4e91\u4e3b\u673a\u7684\u5feb\u7167\u4fe1\u606f\uff0c\u652f\u6301\u901a\u8fc7\u5feb\u7167\u6062\u590d\u4e91\u4e3b\u673a\u3002

                      \u4e8b\u4ef6\u5217\u8868\u5305\u542b\u4e91\u4e3b\u673a\u7684\u751f\u547d\u5468\u671f\u4e2d\u53d1\u751f\u7684\u5404\u79cd\u72b6\u6001\u53d8\u5316\u3001\u64cd\u4f5c\u8bb0\u5f55\u548c\u7cfb\u7edf\u6d88\u606f\u7b49\u3002

                      "},{"location":"admin/virtnest/quickstart/nodeport.html","title":"\u901a\u8fc7 NodePort \u8bbf\u95ee\u4e91\u4e3b\u673a","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7 NodePort \u8bbf\u95ee\u4e91\u4e3b\u673a\u3002

                      "},{"location":"admin/virtnest/quickstart/nodeport.html#_1","title":"\u73b0\u6709\u8bbf\u95ee\u65b9\u5f0f\u7684\u7f3a\u9677","text":"
                      1. \u4e91\u4e3b\u673a\u652f\u6301\u901a\u8fc7 VNC \u6216\u8005 console \u8bbf\u95ee\uff0c\u4f46\u8fd9\u4e24\u79cd\u8bbf\u95ee\u65b9\u5f0f\u90fd\u6709\u4e00\u4e2a\u5f0a\u7aef\uff0c\u65e0\u6cd5\u591a\u7ec8\u7aef\u540c\u65f6\u5728\u7ebf\u3002

                      2. \u901a\u8fc7 NodePort \u5f62\u5f0f\u7684 Service\uff0c\u53ef\u4ee5\u5e2e\u52a9\u89e3\u51b3\u8fd9\u4e2a\u95ee\u9898\u3002

                      "},{"location":"admin/virtnest/quickstart/nodeport.html#service","title":"\u521b\u5efa service \u7684\u65b9\u5f0f","text":"
                      1. \u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u9875\u9762

                        • \u9009\u62e9\u76ee\u6807\u8bbf\u95ee\u7684\u4e91\u4e3b\u673a\u6240\u5728\u96c6\u7fa4\u9875\u9762\u521b\u5efa\u670d\u52a1\uff08Service\uff09
                        • \u9009\u62e9\u8bbf\u95ee\u7c7b\u578b\u4e3a\u8282\u70b9\u8bbf\u95ee\uff08NodePort\uff09
                        • \u9009\u62e9\u547d\u540d\u7a7a\u95f4\uff08\u4e91\u4e3b\u673a\u6240\u5728 namespace\uff09
                        • \u6807\u7b7e\u9009\u62e9\u5668\u586b\u5199 vm.kubevirt.io/name: you-vm-name
                        • \u7aef\u53e3\u914d\u7f6e\uff1a\u534f\u8bae\u9009\u62e9 TCP\uff0c\u7aef\u53e3\u540d\u79f0\u81ea\u5b9a\u4e49\uff0c\u670d\u52a1\u7aef\u53e3\u3001\u5bb9\u5668\u7aef\u53e3\u586b\u5199 22
                      2. \u521b\u5efa\u6210\u529f\u540e\uff0c\u5c31\u53ef\u4ee5\u901a\u8fc7 ssh username@nodeip -p port \u6765\u8bbf\u95ee\u4e91\u4e3b\u673a

                      "},{"location":"admin/virtnest/quickstart/nodeport.html#kubectl-svc","title":"\u901a\u8fc7 kubectl \u521b\u5efa svc","text":"
                      1. \u7f16\u5199 YAML \u6587\u4ef6\uff0c\u793a\u4f8b\u5982\u4e0b\uff1a

                        apiVersion: v1\nkind: Service\n  metadata:\n    name: test-ssh\nspec:\n  ports:\n  - name: tcp-ssh\n    nodePort: 32090\n    protocol: TCP\n    // 22 \u7aef\u53e3\uff0c\u4e0d\u8981\u66f4\u6539\n    port: 22 \n    targetPort: 22\n  selector:\n    // \u4e91\u4e3b\u673a\u7684 name\n\u00a0 \u00a0vm.kubevirt.io/name: test-image-s3\n  type: NodePort\n
                      2. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4

                        kubectl apply -f you-svc.yaml\n
                      3. \u521b\u5efa\u6210\u529f\u540e\uff0c\u5c31\u53ef\u4ee5\u901a\u8fc7 ssh username@nodeip -p 32090 \u6765\u8bbf\u95ee\u4e91\u4e3b\u673a

                      "},{"location":"admin/virtnest/quickstart/update.html","title":"\u66f4\u65b0\u4e91\u4e3b\u673a","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u8868\u5355\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u66f4\u65b0\u4e91\u4e3b\u673a\u3002

                      "},{"location":"admin/virtnest/quickstart/update.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u5f00\u673a\u72b6\u6001\u4e0b\u66f4\u65b0\u4e91\u4e3b\u673a CPU\u3001\u5185\u5b58\u3001\u6570\u636e\u76d8\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u4e91\u4e3b\u673a\u652f\u6301\u5b9e\u65f6\u8fc1\u79fb\u80fd\u529b\u3002
                      "},{"location":"admin/virtnest/quickstart/update.html#_3","title":"\u8868\u5355\u66f4\u65b0\u4e91\u4e3b\u673a","text":"

                      \u5728\u4e91\u4e3b\u673a\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb \u66f4\u65b0 \u8fdb\u5165\u4e91\u4e3b\u673a\u66f4\u65b0\u9875\u9762\u3002

                      \u57fa\u672c\u4fe1\u606f\u955c\u50cf\u914d\u7f6e\u5b58\u50a8\u4e0e\u7f51\u7edc\u767b\u5f55\u8bbe\u7f6e

                      \u57fa\u672c\u4fe1\u606f\u9875\u9762\u4e2d\uff0c \u522b\u540d \u4e0e \u6807\u7b7e\u6ce8\u89e3 \u652f\u6301\u66f4\u65b0\uff0c\u5176\u4ed6\u4fe1\u606f\u65e0\u6cd5\u66f4\u6539\u3002\u5b8c\u6210\u66f4\u65b0\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \u8fdb\u5165\u955c\u50cf\u914d\u7f6e\u7684\u754c\u9762\u3002

                      \u5728\u955c\u50cf\u914d\u7f6e\u9875\u9762\u4e2d\uff0c\u955c\u50cf\u6765\u6e90\u3001\u64cd\u4f5c\u7cfb\u7edf\u3001\u7248\u672c\u7b49\u53c2\u6570\u4e00\u65e6\u9009\u62e9\u540e\u65e0\u6cd5\u66f4\u6539\uff0c\u5141\u8bb8\u7528\u6237\u66f4\u65b0 GPU \u914d\u7f6e \uff0c \u5305\u62ec\u542f\u7528\u6216\u7981\u7528 GPU \u652f\u6301\uff0c\u9009\u62e9 GPU \u7684\u7c7b\u578b\uff0c\u6307\u5b9a\u6240\u9700\u7684\u578b\u53f7\uff0c\u4ee5\u53ca\u914d\u7f6e GPU \u5361\u7684\u6570\u91cf\uff0c\u66f4\u65b0\u540e\u9700\u8981\u91cd\u542f\u624d\u80fd\u751f\u6548\u3002 \u5b8c\u6210\u66f4\u65b0\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \u8fdb\u5165\u5b58\u50a8\u4e0e\u7f51\u7edc\u7684\u754c\u9762\u3002

                      \u5728\u5b58\u50a8\u4e0e\u7f51\u7edc\u9875\u9762\u4e2d\uff0c\u7cfb\u7edf\u76d8\u7684\u5b58\u50a8\u6c60\u548c PVC \u8bbf\u95ee\u6a21\u5f0f\u4e00\u65e6\u9009\u62e9\u540e\u65e0\u6cd5\u66f4\u6539\uff0c\u652f\u6301\u589e\u52a0\u78c1\u76d8\u5bb9\u91cf\uff0c\u4e0d\u53ef\u51cf\u5c11\u3002 \u6b64\u5916\uff0c\u7528\u6237\u53ef\u4ee5\u81ea\u7531\u6dfb\u52a0\u6216\u8005\u79fb\u9664\u6570\u636e\u76d8\u3002\u4e0d\u652f\u6301\u66f4\u65b0\u7f51\u7edc\u914d\u7f6e\u3002\u5b8c\u6210\u66f4\u65b0\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \u8fdb\u5165\u767b\u5f55\u8bbe\u7f6e\u7684\u754c\u9762\u3002

                      Note

                      \u5efa\u8bae\u5728\u4fee\u6539\u5b58\u50a8\u5bb9\u91cf\u6216\u589e\u52a0\u6570\u636e\u76d8\u540e\u91cd\u542f\u4e91\u4e3b\u673a\uff0c\u4ee5\u786e\u4fdd\u914d\u7f6e\u751f\u6548\u3002

                      \u5728\u767b\u5f55\u8bbe\u7f6e\u9875\u9762\u4e2d\uff0c\u7528\u6237\u540d\u3001\u5bc6\u7801\u4ee5\u53ca SSH \u5bc6\u94a5\u914d\u7f6e\u4e00\u65e6\u8bbe\u7f6e\uff0c\u4e0d\u5141\u8bb8\u66f4\u6539\u3002\u786e\u8ba4\u60a8\u7684\u767b\u5f55\u4fe1\u606f\u65e0\u8bef\u540e\uff0c\u70b9\u51fb\u786e\u5b9a\u6309\u94ae\u4ee5\u5b8c\u6210\u66f4\u65b0\u6d41\u7a0b\u3002

                      "},{"location":"admin/virtnest/quickstart/update.html#yaml","title":"\u7f16\u8f91 YAML","text":"

                      \u9664\u4e86\u901a\u8fc7\u8868\u5355\u65b9\u5f0f\u66f4\u65b0\u4e91\u4e3b\u673a\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u66f4\u65b0\u4e91\u4e3b\u673a\u3002

                      \u8fdb\u5165\u4e91\u4e3b\u673a\u5217\u8868\u9875\uff0c\u70b9\u51fb \u7f16\u8f91 YAML \u6309\u94ae\u3002

                      "},{"location":"admin/virtnest/template/index.html","title":"\u901a\u8fc7\u6a21\u677f\u521b\u5efa\u4e91\u4e3b\u673a","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u6a21\u677f\u521b\u5efa\u4e91\u4e3b\u673a\u3002

                      \u901a\u8fc7\u5185\u7f6e\u6a21\u677f\u548c\u81ea\u5b9a\u4e49\u6a21\u677f\uff0c\u7528\u6237\u53ef\u4ee5\u8f7b\u677e\u521b\u5efa\u65b0\u7684\u4e91\u4e3b\u673a\u3002\u6b64\u5916\uff0c\u6211\u4eec\u8fd8\u63d0\u4f9b\u5c06\u73b0\u6709\u4e91\u4e3b\u673a\u8f6c\u6362\u4e3a\u4e91\u4e3b\u673a\u6a21\u677f\u7684\u529f\u80fd\uff0c\u8ba9\u7528\u6237\u80fd\u591f\u66f4\u52a0\u7075\u6d3b\u5730\u7ba1\u7406\u548c\u4f7f\u7528\u8d44\u6e90\u3002

                      "},{"location":"admin/virtnest/template/index.html#_2","title":"\u6a21\u677f\u521b\u5efa","text":"

                      \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u6a21\u677f\u521b\u5efa\u4e00\u4e2a\u4e91\u4e3b\u673a\u3002

                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u8fdb\u5165 \u4e91\u4e3b\u673a\u7ba1\u7406 \u9875\u9762\u3002\u5728\u4e91\u4e3b\u673a\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb\u521b\u5efa\u4e91\u4e3b\u673a-\u9009\u62e9\u6a21\u677f\u521b\u5efa\u4e91\u4e3b\u673a\u3002

                      2. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u9875\u9762\uff0c\u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u6a21\u677f\u914d\u7f6e\u3001\u5b58\u50a8\u4e0e\u7f51\u7edc\u3001\u767b\u5f55\u8bbe\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                        \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de\u4e91\u4e3b\u673a\u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u4e91\u4e3b\u673a\u6267\u884c\u5173\u673a/\u5f00\u542f\u3001\u91cd\u542f\u3001\u514b\u9686\u3001\u66f4\u65b0\u3001\u521b\u5efa\u5feb\u7167\u3001\u914d\u7f6e\u8f6c\u6362\u4e3a\u6a21\u677f\u3001\u63a7\u5236\u53f0\u8bbf\u95ee\uff08VNC\uff09\u3001\u5220\u9664\u7b49\u64cd\u4f5c\u3002 \u514b\u9686\u548c\u5feb\u7167\u80fd\u529b\u4f9d\u8d56\u4e8e\u5b58\u50a8\u6c60\u7684\u9009\u62e9\u3002

                      "},{"location":"admin/virtnest/template/index.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"

                      \u5728\u521b\u5efa\u4e91\u4e3b\u673a\u9875\u9762\u4e2d\uff0c\u6839\u636e\u4e0b\u8868\u8f93\u5165\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                      • \u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002 \u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u540d\u79f0\u5728\u4e91\u4e3b\u673a\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                      • \u522b\u540d\uff1a\u5141\u8bb8\u4efb\u4f55\u5b57\u7b26\uff0c\u6700\u957f60\u4e2a\u5b57\u7b26\u3002
                      • \u96c6\u7fa4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u4e91\u4e3b\u673a\u90e8\u7f72\u5728\u54ea\u4e2a\u96c6\u7fa4\u5185\u3002
                      • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u4e91\u4e3b\u673a\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\u3002 \u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                      "},{"location":"admin/virtnest/template/index.html#_4","title":"\u6a21\u677f\u914d\u7f6e","text":"

                      \u51fa\u73b0\u6a21\u677f\u5217\u8868\uff0c\u6309\u9700\u9009\u62e9\u5185\u7f6e\u6a21\u677f/\u81ea\u5b9a\u4e49\u6a21\u677f\u3002

                      • \u9009\u62e9\u5185\u7f6e\u6a21\u677f\uff1a\u5e73\u53f0\u5185\u7f6e\u4e862\u4e2a\u6807\u51c6\u6a21\u677f\uff0c\u4e0d\u5141\u8bb8\u7f16\u8f91\u548c\u5220\u9664\u3002\u9009\u62e9\u5185\u7f6e\u6a21\u677f\u540e\uff0c\u955c\u50cf\u6765\u6e90\u3001\u64cd\u4f5c\u7cfb\u7edf\u3001\u955c\u50cf\u5730\u5740\u7b49\u5c06\u4f7f\u7528\u6a21\u677f\u5185\u7684\u4fe1\u606f\uff0c\u65e0\u6cd5\u4fee\u6539\uff1b\u8d44\u6e90\u914d\u989d\u4e5f\u5c06\u4f7f\u7528\u6a21\u677f\u5185\u7684\u4fe1\u606f\uff0c\u5141\u8bb8\u4fee\u6539\u3002

                      • \u9009\u62e9\u81ea\u5b9a\u4e49\u6a21\u677f\uff1a\u7531\u4e91\u4e3b\u673a\u914d\u7f6e\u8f6c\u5316\u800c\u6765\u7684\u6a21\u677f\uff0c\u652f\u6301\u7f16\u8f91\u548c\u5220\u9664\u3002\u4f7f\u7528\u81ea\u5b9a\u4e49\u6a21\u677f\u5219\u6839\u636e\u5177\u4f53\u60c5\u51b5\u652f\u6301\u4fee\u6539\u955c\u50cf\u6765\u6e90\u7b49\u4fe1\u606f\u3002

                      "},{"location":"admin/virtnest/template/index.html#_5","title":"\u5b58\u50a8\u4e0e\u7f51\u7edc\u914d\u7f6e","text":"
                      • \u5b58\u50a8\uff1a\u7cfb\u7edf\u9ed8\u8ba4\u521b\u5efa\u4e00\u4e2a VirtIO \u7c7b\u578b\u7684 rootfs \u7cfb\u7edf\u76d8\uff0c\u7528\u4e8e\u5b58\u653e\u64cd\u4f5c\u7cfb\u7edf\u548c\u6570\u636e\u3002 \u9ed8\u8ba4\u4f7f\u7528\u5757\u5b58\u50a8\u3002\u5982\u679c\u9700\u8981\u4f7f\u7528\u514b\u9686\u548c\u5feb\u7167\u529f\u80fd\uff0c\u8bf7\u786e\u4fdd\u60a8\u7684\u5b58\u50a8\u6c60\u652f\u6301 VolumeSnapshots \u529f\u80fd\uff0c \u5e76\u5728\u5b58\u50a8\u6c60\uff08SC\uff09\u4e2d\u8fdb\u884c\u521b\u5efa\u3002\u8bf7\u6ce8\u610f\uff0c\u5b58\u50a8\u6c60\uff08SC\uff09\u8fd8\u6709\u5176\u4ed6\u4e00\u4e9b\u5148\u51b3\u6761\u4ef6\u9700\u8981\u6ee1\u8db3\u3002

                        • \u5148\u51b3\u6761\u4ef6\uff1a

                          • KubeVirt \u5229\u7528 Kubernetes CSI \u9a71\u52a8\u7a0b\u5e8f\u7684 VolumeSnapshot\u529f\u80fd\u6765\u6355\u83b7\u6301\u4e45\u5316\u4e91\u4e3b\u673a\u72b6\u6001\u3002 \u56e0\u6b64\uff0c\u60a8\u9700\u8981\u786e\u4fdd\u60a8\u7684\u4e91\u4e3b\u673a\u4f7f\u7528\u7531\u652f\u6301 VolumeSnapshots \u7684 StorageClass \u5e76\u914d\u7f6e\u4e86\u6b63\u786e\u7684 VolumeSnapshotClass\u3002
                          • \u67e5\u770b\u5df2\u521b\u5efa\u7684 Snapshotclass \uff0c\u5e76\u4e14\u786e\u8ba4 provisioner \u5c5e\u6027\u540c\u5b58\u50a8\u6c60\u4e2d\u7684 Driver \u5c5e\u6027\u4e00\u81f4\u3002
                        • \u652f\u6301\u6dfb\u52a0\u4e00\u5757\u7cfb\u7edf\u76d8\u548c\u591a\u5757\u6570\u636e\u76d8\u3002

                      • \u7f51\u7edc\uff1a\u82e5\u60a8\u4e0d\u505a\u4efb\u4f55\u914d\u7f6e\uff0c\u7cfb\u7edf\u5c06\u9ed8\u8ba4\u521b\u5efa\u4e00\u4e2a VirtIO \u7c7b\u578b\u7684\u7f51\u7edc\u3002

                      "},{"location":"admin/virtnest/template/index.html#_6","title":"\u767b\u5f55\u8bbe\u7f6e","text":"
                      • \u7528\u6237\u540d/\u5bc6\u7801\uff1a\u53ef\u4ee5\u901a\u8fc7\u7528\u6237\u540d\u548c\u5bc6\u7801\u767b\u5f55\u81f3\u4e91\u4e3b\u673a\u3002
                      • SSH\uff1a\u9009\u62e9 SSH \u767b\u5f55\u65b9\u5f0f\u65f6\u53ef\u4e3a\u4e91\u4e3b\u673a\u7ed1\u5b9a SSH \u5bc6\u94a5\uff0c\u7528\u4e8e\u65e5\u540e\u767b\u5f55\u4e91\u4e3b\u673a\u3002
                      "},{"location":"admin/virtnest/template/tep.html","title":"\u4e91\u4e3b\u673a\u6a21\u677f","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5185\u7f6e\u4e91\u4e3b\u673a\u6a21\u677f\u548c\u81ea\u5b9a\u4e49\u4e91\u4e3b\u673a\u6a21\u677f\u3002

                      \u901a\u8fc7\u5185\u7f6e\u6a21\u677f\u548c\u81ea\u5b9a\u4e49\u6a21\u677f\uff0c\u7528\u6237\u53ef\u4ee5\u8f7b\u677e\u521b\u5efa\u65b0\u7684\u4e91\u4e3b\u673a\u3002\u6b64\u5916\uff0c\u6211\u4eec\u8fd8\u63d0\u4f9b\u5c06\u73b0\u6709\u4e91\u4e3b\u673a\u8f6c\u6362\u4e3a\u4e91\u4e3b\u673a\u6a21\u677f\u7684\u529f\u80fd\uff0c\u8ba9\u7528\u6237\u80fd\u591f\u66f4\u52a0\u7075\u6d3b\u5730\u7ba1\u7406\u548c\u4f7f\u7528\u8d44\u6e90\u3002

                      "},{"location":"admin/virtnest/template/tep.html#_2","title":"\u4e91\u4e3b\u673a\u6a21\u677f","text":"
                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a\u6a21\u677f \uff0c\u8fdb\u5165 \u4e91\u4e3b\u673a\u6a21\u677f \u9875\u9762\uff0c\u82e5\u8be5\u6a21\u677f\u662f\u7531\u914d\u7f6e\u4e86 GPU \u7684\u4e91\u4e3b\u673a\u8f6c\u6362\u800c\u6765\uff0c\u6a21\u677f\u4e5f\u4f1a\u5e26\u6709 GPU \u7684\u4fe1\u606f\uff0c\u5c06\u5728\u6a21\u677f\u5217\u8868\u4e2d\u5c55\u793a\u3002

                      2. \u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u5185\u7f6e\u6a21\u677f\u6267\u884c\u521b\u5efa\u4e91\u4e3b\u673a\u548c\u67e5\u770b YAML \u64cd\u4f5c\uff1b\u5bf9\u81ea\u5b9a\u4e49\u6a21\u677f\u652f\u6301\u521b\u5efa\u4e91\u4e3b\u673a\u3001\u7f16\u8f91 YAML \u548c\u5220\u9664\u64cd\u4f5c\u3002

                      "},{"location":"admin/virtnest/template/tep.html#_3","title":"\u5185\u7f6e\u6a21\u677f","text":"
                      • \u5e73\u53f0\u5185\u5185\u7f6e\u4e24\u79cd\u6a21\u677f\uff0c\u5206\u522b\u662f CentOS \u548c Ubuntu\u3002

                      "},{"location":"admin/virtnest/template/tep.html#_4","title":"\u81ea\u5b9a\u4e49\u6a21\u677f","text":"

                      \u81ea\u5b9a\u4e49\u6a21\u677f\u662f\u7531\u4e91\u4e3b\u673a\u914d\u7f6e\u8f6c\u5316\u800c\u6765\u7684\u6a21\u677f\u3002\u4ee5\u4e0b\u4ecb\u7ecd\u5982\u4f55\u4ece\u4e91\u4e3b\u673a\u914d\u7f6e\u8f6c\u6362\u4e3a\u6a21\u677f\u3002

                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u8fdb\u5165\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \u652f\u6301\u5c06\u914d\u7f6e\u8f6c\u6362\u4e3a\u6a21\u677f\u3002\u53ea\u6709\u8fd0\u884c\u4e2d/\u5173\u95ed\u72b6\u6001\u4e0b\u7684\u4e91\u4e3b\u673a\u652f\u6301\u8f6c\u5316\u3002

                      2. \u586b\u5199\u65b0\u6a21\u677f\u7684\u540d\u79f0\uff0c\u63d0\u793a\u539f\u59cb\u4e91\u4e3b\u673a\u5c06\u4f1a\u4fdd\u7559\u5e76\u4e14\u53ef\u7528\u3002\u8f6c\u6362\u6210\u529f\u540e\uff0c\u5c06\u4f1a\u5728\u6a21\u677f\u5217\u8868\u65b0\u589e\u4e00\u6761\u6570\u636e\u3002

                      "},{"location":"admin/virtnest/template/tep.html#_5","title":"\u6a21\u677f\u8be6\u60c5","text":"

                      \u6210\u529f\u521b\u5efa\u51fa\u6765\u4e00\u4e2a\u6a21\u677f\u540e\uff0c\u70b9\u51fb\u6a21\u677f\u540d\u79f0\uff0c\u53ef\u4ee5\u67e5\u770b\u4e91\u4e3b\u673a\u8be6\u60c5\uff0c\u5305\u62ec\u57fa\u672c\u4fe1\u606f\u3001GPU \u4fe1\u606f\u3001\u5b58\u50a8\u3001\u7f51\u7edc\u7b49\u3002\u5982\u679c\u9700\u8981\u5feb\u901f\u57fa\u4e8e\u8be5\u6a21\u677f\u90e8\u7f72\u65b0\u7684\u4e91\u4e3b\u673a\uff0c\u53ea\u9700\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa\u4e91\u4e3b\u673a \u6309\u94ae\u5373\u53ef\u4fbf\u6377\u64cd\u4f5c\u3002

                      "},{"location":"admin/virtnest/vm/auto-migrate.html","title":"\u4e91\u4e3b\u673a\u81ea\u52a8\u6f02\u79fb","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5f53\u96c6\u7fa4\u5185\u67d0\u4e2a\u8282\u70b9\u56e0\u4e3a\u65ad\u7535\u6216\u7f51\u7edc\u6545\u969c\uff0c\u5bfc\u81f4\u8be5\u8282\u70b9\u4e0a\u7684\u4e91\u4e3b\u673a\u65e0\u6cd5\u8bbf\u95ee\u65f6\uff0c \u5982\u4f55\u5c06\u6b63\u5728\u8fd0\u884c\u7684\u4e91\u4e3b\u673a\u65e0\u7f1d\u8fc1\u79fb\u5230\u5176\u4ed6\u7684\u8282\u70b9\u4e0a\uff0c\u540c\u65f6\u4fdd\u8bc1\u4e1a\u52a1\u7684\u8fde\u7eed\u6027\u548c\u6570\u636e\u7684\u5b89\u5168\u6027\u3002

                      \u4e0e\u5b9e\u65f6\u8fc1\u79fb\u76f8\u6bd4\uff0c\u81ea\u52a8\u6f02\u79fb\u4e0d\u9700\u8981\u60a8\u5728\u754c\u9762\u4e2d\u4e3b\u52a8\u64cd\u4f5c\uff0c\u800c\u662f\u7cfb\u7edf\u81ea\u52a8\u89e6\u53d1\u8fc1\u79fb\u8fc7\u7a0b\u3002

                      "},{"location":"admin/virtnest/vm/auto-migrate.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u5b9e\u73b0\u81ea\u52a8\u6f02\u79fb\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u4e91\u4e3b\u673a\u672a\u8fdb\u884c\u78c1\u76d8\u843d\u76d8\u64cd\u4f5c\uff0c\u6216\u4f7f\u7528 Rook-Ceph\u3001HwameiStor HA \u6a21\u5f0f\u4f5c\u4e3a\u5b58\u50a8\u7cfb\u7edf
                      • \u8282\u70b9\u5931\u8054\u65f6\u95f4\u8d85\u8fc7\u4e94\u5206\u949f
                      • \u786e\u4fdd\u96c6\u7fa4\u5185\u81f3\u5c11\u6709\u4e24\u4e2a\u8282\u70b9\u53ef\u4f9b\u4f7f\u7528\uff0c\u5e76\u4e14\u4e91\u4e3b\u673a\u6ca1\u6709\u6307\u5b9a\u8c03\u5ea6\u8282\u70b9
                      • \u4e91\u4e3b\u673a\u7684 launcher pod \u5df2\u88ab\u5220\u9664
                      "},{"location":"admin/virtnest/vm/auto-migrate.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u68c0\u67e5\u4e91\u4e3b\u673a launcher pod \u72b6\u6001\uff1a

                        kubectl get pod\n

                        \u67e5\u770b launcher pod \u662f\u5426\u5904\u4e8e Terminating \u72b6\u6001\u3002

                      2. \u5f3a\u5236\u5220\u9664 launcher pod\uff1a

                        \u5982\u679c launcher pod \u72b6\u6001\u4e3a Terminating\uff0c\u53ef\u4ee5\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u8fdb\u884c\u5f3a\u5236\u5220\u9664\uff1a

                        kubectl delete <launcher pod> --force\n

                        \u66ff\u6362 <launcher pod> \u4e3a\u4f60\u7684 launcher pod \u540d\u79f0\u3002

                      3. \u7b49\u5f85\u91cd\u65b0\u521b\u5efa\u5e76\u68c0\u67e5\u72b6\u6001\uff1a

                        \u5220\u9664\u540e\uff0c\u7cfb\u7edf\u5c06\u81ea\u52a8\u91cd\u65b0\u521b\u5efa launcher pod\u3002 \u7b49\u5f85\u5176\u72b6\u6001\u53d8\u4e3a running\uff0c\u7136\u540e\u5237\u65b0\u4e91\u4e3b\u673a\u5217\u8868\uff0c\u89c2\u5bdf\u4e91\u4e3b\u673a\u662f\u5426\u6210\u529f\u8fc1\u79fb\u5230\u65b0\u8282\u70b9\u3002

                      "},{"location":"admin/virtnest/vm/auto-migrate.html#_4","title":"\u6ce8\u610f\u4e8b\u9879","text":"

                      \u5982\u679c\u4f7f\u7528 rook-ceph \u4f5c\u4e3a\u5b58\u50a8\uff0c\u9700\u8981\u914d\u7f6e\u4e3a ReadWriteOnce \u6a21\u5f0f\uff1a

                      1. \u5f3a\u5236\u5220\u9664 pod \u540e\uff0c\u9700\u8981\u7b49\u5f85\u5927\u7ea6\u516d\u5206\u949f\u4ee5\u8ba9 launcher pod \u542f\u52a8\uff0c\u6216\u8005\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u7acb\u5373\u542f\u52a8 pod\uff1a

                        kubectl get pv | grep <vm name>\nkubectl get VolumeAttachment | grep <pv name>\n

                        \u66ff\u6362 <vm name> \u548c <pv name> \u4e3a\u4f60\u7684\u4e91\u4e3b\u673a\u540d\u79f0\u548c\u6301\u4e45\u5377\u540d\u79f0\u3002

                      2. \u7136\u540e\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u5220\u9664\u5bf9\u5e94\u7684 VolumeAttachment\uff1a

                        kubectl delete VolumeAttachment <vm>\n

                        \u66ff\u6362 <vm> \u4e3a\u4f60\u7684\u4e91\u4e3b\u673a\u540d\u79f0\u3002

                      "},{"location":"admin/virtnest/vm/clone.html","title":"\u514b\u9686\u4e91\u4e3b\u673a","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u514b\u9686\u4e00\u53f0\u65b0\u7684\u4e91\u4e3b\u673a\u3002

                      \u7528\u6237\u53ef\u4ee5\u514b\u9686\u4e00\u53f0\u65b0\u7684\u4e91\u4e3b\u673a\uff0c\u514b\u9686\u540e\u7684\u4e91\u4e3b\u673a\u5c06\u5177\u6709\u4e0e\u539f\u59cb\u4e91\u4e3b\u673a\u76f8\u540c\u7684\u64cd\u4f5c\u7cfb\u7edf\u548c\u7cfb\u7edf\u914d\u7f6e\uff0c\u80fd\u591f\u5b9e\u73b0\u5feb\u901f\u90e8\u7f72\u548c\u6269\u5c55\uff0c\u5feb\u901f\u521b\u5efa\u76f8\u4f3c\u914d\u7f6e\u7684\u65b0\u4e91\u4e3b\u673a\uff0c\u800c\u65e0\u9700\u4ece\u5934\u5f00\u59cb\u5b89\u88c5\u3002

                      "},{"location":"admin/virtnest/vm/clone.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u4f7f\u7528\u514b\u9686\u529f\u80fd\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff08\u548c\u5feb\u7167\u529f\u80fd\u7684\u524d\u63d0\u6761\u4ef6\u4e00\u81f4\uff09\uff1a

                      • \u53ea\u6709\u975e\u9519\u8bef\u72b6\u6001\u4e0b\u7684\u4e91\u4e3b\u673a\u624d\u80fd\u4f7f\u7528\u514b\u9686\u529f\u80fd\u3002
                      • \u5b89\u88c5 Snapshot CRDs\u3001Snapshot Controller\u3001CSI Driver\u3002 \u5177\u4f53\u5b89\u88c5\u6b65\u9aa4\u53ef\u53c2\u8003 CSI Snapshotter\u3002
                      • \u7b49\u5f85 snapshot-controller \u7ec4\u4ef6\u51c6\u5907\u5c31\u7eea, \u8be5\u7ec4\u4ef6\u4f1a\u76d1\u63a7 VolumeSnapshot \u548c VolumeSnapshotContent \u76f8\u5173\u4e8b\u4ef6\uff0c\u5e76\u89e6\u53d1\u76f8\u5173\u64cd\u4f5c\u3002
                      • \u7b49\u5f85 CSI Driver \u51c6\u5907\u5c31\u7eea, \u786e\u4fdd csi-snapshotter sidecar \u8dd1\u5728 CSI Driver \u91cc\uff0ccsi-snapshotter sidecar \u4f1a\u76d1\u63a7 VolumeSnapshotContent \u76f8\u5173\u4e8b\u4ef6\uff0c\u5e76\u89e6\u53d1\u76f8\u5173\u64cd\u4f5c\u3002
                        • \u5982\u5b58\u50a8\u662f Rook-Ceph\uff0c\u53ef\u53c2\u8003 ceph-csi-snapshot
                        • \u5982\u5b58\u50a8\u662f HwameiStor\uff0c\u53ef\u53c2\u8003 huameistor-snapshot
                      "},{"location":"admin/virtnest/vm/clone.html#_3","title":"\u514b\u9686\u4e91\u4e3b\u673a","text":"
                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u8fdb\u5165\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u975e\u9519\u8bef\u72b6\u6001\u4e0b\u7684\u4e91\u4e3b\u673a\u6267\u884c\u5feb\u7167\u64cd\u4f5c\u3002

                      2. \u5f39\u51fa\u5f39\u6846\uff0c\u9700\u8981\u586b\u5199\u514b\u9686\u65b0\u7684\u4e91\u4e3b\u673a\u7684\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u514b\u9686\u64cd\u4f5c\u53ef\u80fd\u9700\u8981\u4e00\u4e9b\u65f6\u95f4\uff0c\u5177\u4f53\u53d6\u51b3\u4e8e\u4e91\u4e3b\u673a\u7684\u5927\u5c0f\u548c\u5b58\u50a8\u6027\u80fd\u3002

                      3. \u514b\u9686\u6210\u529f\u540e\u53ef\u4ee5\u5728\u4e91\u4e3b\u673a\u5217\u8868\u5185\u67e5\u770b\u5230\u65b0\u7684\u4e91\u4e3b\u673a\uff0c\u65b0\u521b\u5efa\u51fa\u6765\u7684\u4e91\u4e3b\u673a\u5904\u4e8e\u5173\u673a\u72b6\u6001\uff0c\u82e5\u9700\u8981\u5f00\u673a\u9700\u8981\u624b\u52a8\u64cd\u4f5c\u3002

                      4. \u514b\u9686\u524d\u5efa\u8bae\u5bf9\u539f\u6709\u4e91\u4e3b\u673a\u8fdb\u884c\u5feb\u7167\uff0c\u5982\u679c\u514b\u9686\u8fc7\u7a0b\u4e2d\u9047\u5230\u95ee\u9898\uff0c\u8bf7\u68c0\u67e5\u5148\u51b3\u6761\u4ef6\u662f\u5426\u6ee1\u8db3\uff0c\u5e76\u5c1d\u8bd5\u91cd\u65b0\u6267\u884c\u514b\u9686\u64cd\u4f5c\u3002

                      "},{"location":"admin/virtnest/vm/create-secret.html","title":"\u521b\u5efa\u5bc6\u94a5","text":"

                      \u5f53\u521b\u5efa\u4e91\u4e3b\u673a\u4f7f\u7528\u5bf9\u8c61\u5b58\u50a8\uff08S3\uff09\u4f5c\u4e3a\u955c\u50cf\u6765\u6e90\u65f6\uff0c\u6709\u65f6\u5019\u9700\u8981\u586b\u5199\u5bc6\u94a5\u6765\u83b7\u53d6\u901a\u8fc7 S3 \u7684\u9a8c\u8bc1\u3002\u4ee5\u4e0b\u5c06\u4ecb\u7ecd\u5982\u4f55\u521b\u5efa\u7b26\u5408\u4e91\u4e3b\u673a\u8981\u6c42\u7684\u5bc6\u94a5\u3002

                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u96c6\u7fa4\u5217\u8868 \uff0c\u8fdb\u5165\u4e91\u4e3b\u673a\u6240\u5728\u96c6\u7fa4\u8be6\u60c5\uff0c\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 \uff0c\u9009\u62e9 \u5bc6\u94a5 \uff0c\u70b9\u51fb \u521b\u5efa\u5bc6\u94a5 \u3002

                      2. \u8fdb\u5165\u521b\u5efa\u9875\u9762\uff0c\u586b\u5199\u5bc6\u94a5\u540d\u79f0\uff0c\u9009\u62e9\u548c\u4e91\u4e3b\u673a\u76f8\u540c\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u6ce8\u610f\u9700\u8981\u9009\u62e9 \u9ed8\u8ba4\uff08Opaque\uff09 \u7c7b\u578b\u3002\u5bc6\u94a5\u6570\u636e\u9700\u8981\u9075\u5faa\u4ee5\u4e0b\u539f\u5219

                        • accessKeyId: \u9700\u8981\u4ee5 Base64 \u7f16\u7801\u65b9\u5f0f\u8868\u793a\u7684\u6570\u636e
                        • secretKey: \u9700\u8981\u4ee5 Base64 \u7f16\u7801\u65b9\u5f0f\u8868\u793a\u7684\u6570\u636e
                      3. \u521b\u5efa\u6210\u529f\u540e\u53ef\u4ee5\u5728\u521b\u5efa\u4e91\u4e3b\u673a\u65f6\u4f7f\u7528\u6240\u9700\u5bc6\u94a5\uff0c\u6700\u540e\u901a\u8fc7\u9a8c\u8bc1\u3002

                      "},{"location":"admin/virtnest/vm/cross-cluster-migrate.html","title":"\u4e91\u4e3b\u673a\u8de8\u96c6\u7fa4\u8fc1\u79fb","text":"

                      \u672c\u529f\u80fd\u6682\u672a\u505a UI \u754c\u9762\u80fd\u529b\uff0c\u8bf7\u53c2\u8003\u6587\u6863\u7684\u64cd\u4f5c\u6b65\u9aa4\u6267\u884c\u3002

                      "},{"location":"admin/virtnest/vm/cross-cluster-migrate.html#_2","title":"\u4f7f\u7528\u573a\u666f","text":"
                      • \u5f53\u539f\u96c6\u7fa4\u53d1\u751f\u6545\u969c\u6216\u6027\u80fd\u4e0b\u964d\u5bfc\u81f4\u8be5\u96c6\u7fa4\u4e0a\u7684\u4e91\u4e3b\u673a\u65e0\u6cd5\u8bbf\u95ee\u65f6\uff0c\u5c06\u4e91\u4e3b\u673a\u8fc1\u79fb\u5230\u5176\u4ed6\u7684\u96c6\u7fa4\u4e0a\u3002
                      • \u9700\u8981\u5bf9\u96c6\u7fa4\u8fdb\u884c\u8ba1\u5212\u5185\u7684\u7ef4\u62a4\u6216\u5347\u7ea7\u65f6\uff0c\u5c06\u4e91\u4e3b\u673a\u8fc1\u79fb\u5230\u5176\u4ed6\u7684\u96c6\u7fa4\u4e0a\u3002
                      • \u5f53\u7279\u5b9a\u5e94\u7528\u7684\u6027\u80fd\u9700\u6c42\u53d8\u5316\uff0c\u9700\u8981\u8c03\u6574\u8d44\u6e90\u5206\u914d\u65f6\uff0c\u8fc1\u79fb\u4e91\u4e3b\u673a\u5230\u5176\u4ed6\u7684\u96c6\u7fa4\u4e0a\u4ee5\u5339\u914d\u66f4\u5408\u9002\u7684\u8d44\u6e90\u914d\u7f6e\u3002
                      "},{"location":"admin/virtnest/vm/cross-cluster-migrate.html#_3","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u5b9e\u73b0\u4e91\u4e3b\u673a\u8de8\u96c6\u7fa4\u8fc1\u79fb\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u96c6\u7fa4\u7f51\u7edc\u4e92\u901a\uff1a\u786e\u4fdd\u539f\u6709\u96c6\u7fa4\u4e0e\u76ee\u6807\u8fc1\u79fb\u96c6\u7fa4\u4e4b\u95f4\u7684\u7f51\u7edc\u662f\u4e92\u901a\u7684
                      • \u76f8\u540c\u5b58\u50a8\u7c7b\u578b\uff1a\u76ee\u6807\u8fc1\u79fb\u96c6\u7fa4\u9700\u652f\u6301\u4e0e\u539f\u6709\u96c6\u7fa4\u76f8\u540c\u7684\u5b58\u50a8\u7c7b\u578b\uff08\u4f8b\u5982\uff0c\u5982\u679c\u5bfc\u51fa\u96c6\u7fa4\u4f7f\u7528 rook-ceph-block \u7c7b\u578b\u7684 StorageClass\uff0c\u5219\u5bfc\u5165\u96c6\u7fa4\u4e5f\u5fc5\u987b\u652f\u6301\u6b64\u7c7b\u578b\uff09\u3002
                      • \u5728\u539f\u6709\u96c6\u7fa4\u7684 KubeVirt \u4e2d\u5f00\u542f VMExport Feature Gate\u3002
                      "},{"location":"admin/virtnest/vm/cross-cluster-migrate.html#vmexport-feature-gate","title":"\u5f00\u542f VMExport Feature Gate","text":"

                      \u6fc0\u6d3b VMExport Feature Gate\uff0c\u5728\u539f\u6709\u96c6\u7fa4\u5185\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c \u53ef\u53c2\u8003How to activate a feature gate

                      kubectl edit kubevirt kubevirt -n virtnest-system\n

                      \u8fd9\u6761\u547d\u4ee4\u5c06\u4fee\u6539 featureGates \uff0c\u589e\u52a0 VMExport \u3002

                      apiVersion: kubevirt.io/v1\nkind: KubeVirt\nmetadata:\n  name: kubevirt\n  namespace: virtnest-system\nspec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n        - DataVolumes\n        - LiveMigration\n        - VMExport\n
                      "},{"location":"admin/virtnest/vm/cross-cluster-migrate.html#ingress","title":"\u914d\u7f6e\u539f\u6709\u96c6\u7fa4\u7684 Ingress","text":"

                      \u4ee5 Nginx Ingress \u4e3a\u4f8b\uff0c\u914d\u7f6e Ingress \u4ee5\u6307\u5411 virt-exportproxy Service\uff1a

                      apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: ingress-vm-export\n  namespace: virtnest-system\nspec:\n  tls:\n    - hosts:\n        - upgrade-test.com\n      secretName: nginx-tls\n  rules:\n    - host: upgrade-test.com\n      http:\n        paths:\n          - path: /\n            pathType: Prefix\n            backend:\n              service:\n                name: virt-exportproxy\n                port:\n                  number: 8443\n  ingressClassName: nginx\n
                      "},{"location":"admin/virtnest/vm/cross-cluster-migrate.html#_4","title":"\u8fc1\u79fb\u6b65\u9aa4","text":"
                      1. \u521b\u5efa VirtualMachineExport CR

                        • \u5982\u679c \u4e91\u4e3b\u673a\u5173\u673a\u72b6\u6001 \u4e0b\u8fdb\u884c\u8fc1\u79fb\uff08\u51b7\u8fc1\u79fb\uff09\uff1a

                          apiVersion: v1\nkind: Secret\nmetadata:\n  name: example-token # \u5bfc\u51fa\u4e91\u4e3b\u673a\u6240\u7528 token\n  namespace: default # \u4e91\u4e3b\u673a\u6240\u5728\u547d\u540d\u7a7a\u95f4\nstringData:\n  token: 1234567890ab # \u5bfc\u51fa\u4f7f\u7528\u7684 token,\u53ef\u4fee\u6539\n\n---\napiVersion: export.kubevirt.io/v1alpha1\nkind: VirtualMachineExport\nmetadata:\n  name: example-export # \u5bfc\u51fa\u540d\u79f0, \u53ef\u81ea\u884c\u4fee\u6539\n  namespace: default # \u4e91\u4e3b\u673a\u6240\u5728\u547d\u540d\u7a7a\u95f4\nspec:\n  tokenSecretRef: example-token # \u548c\u4e0a\u9762\u521b\u5efa\u7684token\u540d\u79f0\u4fdd\u6301\u4e00\u81f4\n  source:\n    apiGroup: \"kubevirt.io\"\n    kind: VirtualMachine\n    name: testvm # \u4e91\u4e3b\u673a\u540d\u79f0\n
                        • \u5982\u679c\u8981\u5728 \u4e91\u4e3b\u673a\u4e0d\u5173\u673a \u7684\u72b6\u6001\u4e0b\uff0c\u4f7f\u7528\u4e91\u4e3b\u673a\u5feb\u7167\u8fdb\u884c\u8fc1\u79fb\uff08\u70ed\u8fc1\u79fb\uff09\uff1a

                          apiVersion: v1\nkind: Secret\nmetadata:\n  name: example-token # \u5bfc\u51fa\u4e91\u4e3b\u673a\u6240\u7528 token\n  namespace: default # \u4e91\u4e3b\u673a\u6240\u5728\u547d\u540d\u7a7a\u95f4\nstringData:\n  token: 1234567890ab # \u5bfc\u51fa\u4f7f\u7528\u7684 token ,\u53ef\u4fee\u6539\n\n---\napiVersion: export.kubevirt.io/v1alpha1\nkind: VirtualMachineExport\nmetadata:\n  name: export-snapshot # \u5bfc\u51fa\u540d\u79f0, \u53ef\u81ea\u884c\u4fee\u6539\n  namespace: default # \u4e91\u4e3b\u673a\u6240\u5728\u547d\u540d\u7a7a\u95f4\nspec:\n  tokenSecretRef: export-token # \u548c\u4e0a\u9762\u521b\u5efa\u7684token\u540d\u79f0\u4fdd\u6301\u4e00\u81f4\n  source:\n    apiGroup: \"snapshot.kubevirt.io\"\n    kind: VirtualMachineSnapshot\n    name: export-snap-202407191524 # \u5bf9\u5e94\u7684\u4e91\u4e3b\u673a\u5feb\u7167\u540d\u79f0\n
                      2. \u68c0\u67e5 VirtualMachineExport \u662f\u5426\u51c6\u5907\u5c31\u7eea\uff1a

                        # \u8fd9\u91cc\u7684 example-export \u9700\u8981\u66ff\u6362\u4e3a\u521b\u5efa\u7684 VirtualMachineExport \u540d\u79f0\nkubectl get VirtualMachineExport example-export -n default\n\nNAME             SOURCEKIND       SOURCENAME   PHASE\nexample-export   VirtualMachine   testvm       Ready\n
                      3. \u5f53 VirtualMachineExport \u51c6\u5907\u5c31\u7eea\u540e\uff0c\u5bfc\u51fa\u4e91\u4e3b\u673a YAML\u3002

                        • \u5982\u679c\u5df2\u5b89\u88c5 virtctl \uff0c\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u5bfc\u51fa\u4e91\u4e3b\u673a\u7684 YAML\uff1a

                          # \u81ea\u884c\u5c06 example-export\u66ff\u6362\u4e3a\u521b\u5efa\u7684 VirtualMachineExport \u540d\u79f0\n# \u81ea\u884c\u901a\u8fc7 -n \u6307\u5b9a\u547d\u540d\u7a7a\u95f4\nvirtctl vmexport download example-export --manifest --include-secret --output=manifest.yaml\n
                        • \u5982\u679c\u6ca1\u6709\u5b89\u88c5 virtctl \uff0c\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u5bfc\u51fa\u4e91\u4e3b\u673a YAML\uff1a

                          # \u81ea\u884c\u66ff\u6362 example-export \u66ff\u6362\u4e3a\u521b\u5efa\u7684 VirtualMachineExport \u540d\u79f0 \u548c\u547d\u540d\u7a7a\u95f4\nmanifesturl=$(kubectl get VirtualMachineExport example-export -n default -o=jsonpath='{.status.links.internal.manifests[0].url}')\nsecreturl=$(kubectl get VirtualMachineExport example-export -n default -o=jsonpath='{.status.links.internal.manifests[1].url}')\n# \u81ea\u884c\u66ff\u6362 secert \u540d\u79f0\u548c\u547d\u540d\u7a7a\u95f4\ntoken=$(kubectl get secret example-token -n default -o=jsonpath='{.data.token}' | base64 -d)\n\ncurl -H \"Accept: application/yaml\" -H \"x-kubevirt-export-token: $token\"  --insecure  $secreturl > manifest.yaml\ncurl -H \"Accept: application/yaml\" -H \"x-kubevirt-export-token: $token\"  --insecure  $manifesturl >> manifest.yaml\n
                      4. \u5bfc\u5165\u4e91\u4e3b\u673a

                        \u5c06\u5bfc\u51fa\u7684 manifest.yaml \u590d\u5236\u5230\u76ee\u6807\u8fc1\u79fb\u96c6\u7fa4\u5e76\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff08\u5982\u679c\u547d\u540d\u7a7a\u95f4\u4e0d\u5b58\u5728\u5219\u9700\u8981\u63d0\u524d\u521b\u5efa\uff09\uff1a

                        kubectl apply -f manifest.yaml\n
                        \u521b\u5efa\u6210\u529f\u540e\uff0c\u91cd\u542f\u4e91\u4e3b\u673a\uff0c\u4e91\u4e3b\u673a\u6210\u529f\u8fd0\u884c\u540e\uff0c\u5728\u539f\u6709\u96c6\u7fa4\u5185\u5220\u9664\u539f\u4e91\u4e3b\u673a\uff08\u4e91\u4e3b\u673a\u672a\u542f\u52a8\u6210\u529f\u65f6\uff0c\u8bf7\u52ff\u5220\u9664\u539f\u4e91\u4e3b\u673a\uff09\u3002

                      "},{"location":"admin/virtnest/vm/health-check.html","title":"\u5065\u5eb7\u68c0\u67e5","text":"

                      \u5f53\u914d\u7f6e\u4e91\u4e3b\u673a\u7684\u5b58\u6d3b\uff08Liveness\uff09\u548c\u5c31\u7eea\uff08Readiness\uff09\u63a2\u9488\u65f6\uff0c\u4e0e Kubernetes \u7684\u914d\u7f6e\u8fc7\u7a0b\u76f8\u4f3c\u3002\u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7 YAML \u4e3a\u4e91\u4e3b\u673a\u914d\u7f6e\u5065\u5eb7\u68c0\u67e5\u53c2\u6570\u3002

                      \u4f46\u662f\u9700\u8981\u6ce8\u610f\uff1a\u9700\u8981\u5728\u4e91\u4e3b\u673a\u521b\u5efa\u6210\u529f\u5e76\u4e14\u5904\u4e8e\u5173\u673a\u72b6\u6001\u4e0b\uff0c\u4fee\u6539 YAML \u8fdb\u884c\u914d\u7f6e\u3002

                      "},{"location":"admin/virtnest/vm/health-check.html#http-liveness-probe","title":"\u914d\u7f6e HTTP Liveness Probe","text":"
                      1. \u5728 spec.template.spec \u4e2d\u914d\u7f6e livenessProbe.httpGet\u3002
                      2. \u4fee\u6539 cloudInitNoCloud \u4ee5\u542f\u52a8\u4e00\u4e2a HTTP \u670d\u52a1\u5668\u3002

                        \u70b9\u51fb\u67e5\u770b YAML \u793a\u4f8b\u914d\u7f6e
                        apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n    virtnest.io/alias-name: ''\n    virtnest.io/image-secret: ''\n    virtnest.io/image-source: docker\n    virtnest.io/os-image: release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  creationTimestamp: '2024-10-15T02:39:45Z'\n  finalizers:\n    - kubevirt.io/virtualMachineControllerFinalize\n  generation: 1\n  labels:\n    virtnest.io/os-family: Ubuntu\n    virtnest.io/os-version: '22.04'\n  name: test-probe\n  namespace: amamba-team\n  resourceVersion: '254032135'\n  uid: 6d92779d-7415-4721-8c7b-a2dde163d758\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        creationTimestamp: null\n        name: test-probe-rootdisk\n        namespace: amamba-team\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Gi\n          storageClassName: hwameistor-storage-lvm-hdd\n        source:\n          registry:\n            url: >-\n          docker://release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  runStrategy: Halted\n  template:\n    metadata:\n      creationTimestamp: null\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio\n              name: rootdisk\n            - disk:\n                bus: virtio\n              name: cloudinitdisk\n          interfaces:\n            - masquerade: {}\n              name: default\n        machine:\n          type: q35\n        memory:\n          guest: 2Gi\n        resources:\n          requests:\n            memory: 2Gi\n      networks:\n        - name: default\n          pod: {}\n      livenessProbe:\n        initialDelaySeconds: 120\n        periodSeconds: 20\n        httpGet:\n          port: 1500\n        timeoutSeconds: 10\n      volumes:\n        - dataVolume:\n            name: test-probe-rootdisk\n          name: rootdisk\n        - cloudInitNoCloud:\n            userData: |\n              #cloud-config\n              ssh_pwauth: true\n              disable_root: false\n              chpasswd: {\"list\": \"root:dangerous\", expire: False}\n              runcmd:\n                - sed -i \"/#\\?PermitRootLogin/s/^.*$/PermitRootLogin yes/g\" /etc/ssh/sshd_config\n                - systemctl restart ssh.service\n                - dhclient -r && dhclient\n                - apt-get update && apt-get install -y ncat\n                - [\"systemd-run\", \"--unit=httpserver\", \"ncat\", \"-klp\", \"1500\", \"-e\", '/usr/bin/echo -e HTTP/1.1 200 OK\\nContent-Length: 12\\n\\nHello World!']\n          name: cloudinitdisk\n
                      3. \u6839\u636e\u64cd\u4f5c\u7cfb\u7edf\uff08\u5982 Ubuntu/Debian \u6216 CentOS\uff09\uff0cuserData \u7684\u914d\u7f6e\u53ef\u80fd\u6709\u6240\u4e0d\u540c\u3002\u4e3b\u8981\u533a\u522b\uff1a

                        • \u5305\u7ba1\u7406\u5668\uff1a

                          Ubuntu/Debian \u4f7f\u7528 apt-get \u4f5c\u4e3a\u5305\u7ba1\u7406\u5668\u3002 CentOS \u4f7f\u7528 yum \u4f5c\u4e3a\u5305\u7ba1\u7406\u5668\u3002

                        • SSH \u670d\u52a1\u91cd\u542f\u547d\u4ee4\uff1a

                          Ubuntu/Debian \u4f7f\u7528 systemctl restart ssh.service\u3002 CentOS \u4f7f\u7528 systemctl restart sshd.service\uff08\u6ce8\u610f CentOS 7 \u53ca\u4e4b\u524d\u7248\u672c\u4f7f\u7528 service sshd restart\uff09\u3002

                        • \u5b89\u88c5\u7684\u8f6f\u4ef6\u5305\uff1a

                          Ubuntu/Debian \u5b89\u88c5 ncat\u3002 CentOS \u5b89\u88c5 nmap-ncat\uff08\u56e0\u4e3a ncat \u5728 CentOS \u7684\u9ed8\u8ba4\u4ed3\u5e93\u4e2d\u53ef\u80fd\u4e0d\u53ef\u7528\uff09\u3002

                      "},{"location":"admin/virtnest/vm/health-check.html#tcp-liveness-probe","title":"\u914d\u7f6e TCP Liveness Probe","text":"

                      \u5728 spec.template.spec \u4e2d\u914d\u7f6e livenessProbe.tcpSocket\u3002

                      \u70b9\u51fb\u67e5\u770b YAML \u793a\u4f8b\u914d\u7f6e
                      apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n    virtnest.io/alias-name: ''\n    virtnest.io/image-secret: ''\n    virtnest.io/image-source: docker\n    virtnest.io/os-image: release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  creationTimestamp: '2024-10-15T02:39:45Z'\n  finalizers:\n    - kubevirt.io/virtualMachineControllerFinalize\n  generation: 1\n  labels:\n    virtnest.io/os-family: Ubuntu\n    virtnest.io/os-version: '22.04'\n  name: test-probe\n  namespace: amamba-team\n  resourceVersion: '254032135'\n  uid: 6d92779d-7415-4721-8c7b-a2dde163d758\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        creationTimestamp: null\n        name: test-probe-rootdisk\n        namespace: amamba-team\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Gi\n          storageClassName: hwameistor-storage-lvm-hdd\n        source:\n          registry:\n            url: >-\n          docker://release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  runStrategy: Halted\n  template:\n    metadata:\n      creationTimestamp: null\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio\n              name: rootdisk\n            - disk:\n                bus: virtio\n              name: cloudinitdisk\n          interfaces:\n            - masquerade: {}\n              name: default\n        machine:\n          type: q35\n        memory:\n          guest: 2Gi\n        resources:\n          requests:\n            memory: 2Gi\n      networks:\n        - name: default\n          pod: {}\n      livenessProbe:\n        initialDelaySeconds: 120\n        periodSeconds: 20\n        tcpSocket:\n          port: 1500\n        timeoutSeconds: 10\n      volumes:\n        - dataVolume:\n            name: test-probe-rootdisk\n          name: rootdisk\n        - cloudInitNoCloud:\n            userData: |\n              #cloud-config\n              ssh_pwauth: true\n              disable_root: false\n              chpasswd: {\"list\": \"root:dangerous\", expire: False}\n              runcmd:\n                - sed -i \"/#\\?PermitRootLogin/s/^.*$/PermitRootLogin yes/g\" /etc/ssh/sshd_config\n                - systemctl restart ssh.service\n                - dhclient -r && dhclient\n                - apt-get update && apt-get install -y ncat\n                - [\"systemd-run\", \"--unit=httpserver\", \"ncat\", \"-klp\", \"1500\", \"-e\", '/usr/bin/echo -e HTTP/1.1 200 OK\\nContent-Length: 12\\n\\nHello World!']\n          name: cloudinitdisk\n
                      "},{"location":"admin/virtnest/vm/health-check.html#readiness-probes","title":"\u914d\u7f6e Readiness Probes","text":"

                      \u5728 spec.template.spec \u4e2d\u914d\u7f6e readiness\u3002

                      \u70b9\u51fb\u67e5\u770b YAML \u793a\u4f8b\u914d\u7f6e
                      apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n    virtnest.io/alias-name: ''\n    virtnest.io/image-secret: ''\n    virtnest.io/image-source: docker\n    virtnest.io/os-image: release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  creationTimestamp: '2024-10-15T02:39:45Z'\n  finalizers:\n    - kubevirt.io/virtualMachineControllerFinalize\n  generation: 1\n  labels:\n    virtnest.io/os-family: Ubuntu\n    virtnest.io/os-version: '22.04'\n  name: test-probe\n  namespace: amamba-team\n  resourceVersion: '254032135'\n  uid: 6d92779d-7415-4721-8c7b-a2dde163d758\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        creationTimestamp: null\n        name: test-probe-rootdisk\n        namespace: amamba-team\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Gi\n          storageClassName: hwameistor-storage-lvm-hdd\n        source:\n          registry:\n            url: >-\n          docker://release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  runStrategy: Halted\n  template:\n    metadata:\n      creationTimestamp: null\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio\n              name: rootdisk\n            - disk:\n                bus: virtio\n              name: cloudinitdisk\n          interfaces:\n            - masquerade: {}\n              name: default\n        machine:\n          type: q35\n        memory:\n          guest: 2Gi\n        resources:\n          requests:\n            memory: 2Gi\n      networks:\n        - name: default\n          pod: {}\n      readiness:\n        initialDelaySeconds: 120\n        periodSeconds: 20\n        httpGet:\n          port: 1500\n        timeoutSeconds: 10\n      volumes:\n        - dataVolume:\n            name: test-probe-rootdisk\n          name: rootdisk\n        - cloudInitNoCloud:\n            userData: |\n              #cloud-config\n              ssh_pwauth: true\n              disable_root: false\n              chpasswd: {\"list\": \"root:dangerous\", expire: False}\n              runcmd:\n                - sed -i \"/#\\?PermitRootLogin/s/^.*$/PermitRootLogin yes/g\" /etc/ssh/sshd_config\n                - systemctl restart ssh.service\n                - dhclient -r && dhclient\n                - apt-get update && apt-get install -y ncat\n                - [\"systemd-run\", \"--unit=httpserver\", \"ncat\", \"-klp\", \"1500\", \"-e\", '/usr/bin/echo -e HTTP/1.1 200 OK\\nContent-Length: 12\\n\\nHello World!']\n          name: cloudinitdisk\n
                      "},{"location":"admin/virtnest/vm/live-migration.html","title":"\u5b9e\u65f6\u8fc1\u79fb","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u5c06\u4e91\u4e3b\u673a\u4ece\u4e00\u4e2a\u8282\u70b9\u79fb\u52a8\u5230\u53e6\u4e00\u4e2a\u8282\u70b9\u3002

                      \u5f53\u8282\u70b9\u7ef4\u62a4\u6216\u8005\u5347\u7ea7\u65f6\uff0c\u7528\u6237\u53ef\u4ee5\u5c06\u6b63\u5728\u8fd0\u884c\u7684\u4e91\u4e3b\u673a\u65e0\u7f1d\u8fc1\u79fb\u5230\u5176\u4ed6\u7684\u8282\u70b9\u4e0a\uff0c\u540c\u65f6\u53ef\u4ee5\u4fdd\u8bc1\u4e1a\u52a1\u7684\u8fde\u7eed\u6027\u548c\u6570\u636e\u7684\u5b89\u5168\u6027\u3002

                      "},{"location":"admin/virtnest/vm/live-migration.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u4f7f\u7528\u5b9e\u65f6\u8fc1\u79fb\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u4e91\u4e3b\u673a\u5fc5\u987b\u5904\u4e8e\u8fd0\u884c\u72b6\u6001\u624d\u80fd\u8fdb\u884c\u5b9e\u65f6\u8fc1\u79fb\u3002
                      • \u786e\u4fdd\u60a8\u7684 PVC \u8bbf\u95ee\u6a21\u5f0f\u4e3a ReadWriteMany\uff0c\u4ee5\u4fbf\u4f7f\u7528\u5b9e\u65f6\u8fc1\u79fb\u529f\u80fd\u3002
                      • \u786e\u4fdd\u96c6\u7fa4\u5185\u81f3\u5c11\u6709\u4e24\u4e2a\u8282\u70b9\u53ef\u4f9b\u4f7f\u7528\u3002
                      "},{"location":"admin/virtnest/vm/live-migration.html#_3","title":"\u5b9e\u65f6\u8fc1\u79fb","text":"
                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u8fdb\u5165\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u8fd0\u884c\u72b6\u6001\u4e0b\u7684\u4e91\u4e3b\u673a\u8fdb\u884c\u8fc1\u79fb\u52a8\u4f5c\u3002\u76ee\u524d\u4e91\u4e3b\u673a\u6240\u5728\u8282\u70b9\u4e3a controller-node-3 \u3002

                      2. \u5f39\u51fa\u5f39\u6846\uff0c\u63d0\u793a\u5728\u5b9e\u65f6\u8fc1\u79fb\u671f\u95f4\uff0c\u6b63\u5728\u8fd0\u884c\u7684\u4e91\u4e3b\u673a\u5b9e\u4f8b\u4f1a\u79fb\u52a8\u5230\u53e6\u4e00\u4e2a\u8282\u70b9\uff0c\u53ef\u4ee5\u9009\u62e9\u6307\u5b9a\u8282\u70b9\u8fc1\u79fb\uff0c\u4e5f\u53ef\u4ee5\u968f\u673a\u8fc1\u79fb\uff0c\u8bf7\u786e\u4fdd\u5176\u4ed6\u8282\u70b9\u8d44\u6e90\u5145\u8db3\u3002

                      3. \u8fc1\u79fb\u9700\u8981\u4e00\u6bb5\u65f6\u95f4\uff0c\u8bf7\u8010\u5fc3\u7b49\u5f85\uff0c\u6210\u529f\u540e\u53ef\u4ee5\u5728\u4e91\u4e3b\u673a\u5217\u8868\u5185\u67e5\u770b\u8282\u70b9\u4fe1\u606f\uff0c\u6b64\u65f6\u8282\u70b9\u8fc1\u79fb\u5230 controller-node-1 \u3002

                      "},{"location":"admin/virtnest/vm/migratiom.html","title":"\u96c6\u7fa4\u5185\u51b7\u8fc1\u79fb","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5728\u5173\u673a\u72b6\u6001\u4e0b\u5982\u4f55\u5c06\u4e91\u4e3b\u673a\u5728\u540c\u4e00\u96c6\u7fa4\u5185\u4ece\u4e00\u4e2a\u8282\u70b9\u79fb\u52a8\u5230\u53e6\u4e00\u4e2a\u8282\u70b9\u3002

                      \u51b7\u8fc1\u79fb\u7684\u4e3b\u8981\u7279\u70b9\u662f\uff0c\u4e91\u4e3b\u673a\u5728\u8fc1\u79fb\u8fc7\u7a0b\u4e2d\u4f1a\u5904\u4e8e\u79bb\u7ebf\u72b6\u6001\uff0c\u8fd9\u53ef\u80fd\u4f1a\u5bf9\u4e1a\u52a1\u8fde\u7eed\u6027\u4ea7\u751f\u5f71\u54cd\u3002\u56e0\u6b64\uff0c \u5728\u5b9e\u65bd\u51b7\u8fc1\u79fb\u65f6\u9700\u8981\u4ed4\u7ec6\u89c4\u5212\u8fc1\u79fb\u65f6\u95f4\u7a97\u53e3\uff0c\u5e76\u8003\u8651\u4e1a\u52a1\u9700\u6c42\u548c\u7cfb\u7edf\u53ef\u7528\u6027\u3002\u901a\u5e38\uff0c\u51b7\u8fc1\u79fb\u9002\u7528\u4e8e\u5bf9\u505c\u673a\u65f6\u95f4\u8981\u6c42\u4e0d\u662f\u975e\u5e38\u4e25\u683c\u7684\u573a\u666f\u3002

                      "},{"location":"admin/virtnest/vm/migratiom.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u4f7f\u7528\u51b7\u8fc1\u79fb\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u4e91\u4e3b\u673a\u5fc5\u987b\u5904\u4e8e\u5173\u673a\u72b6\u6001\u624d\u80fd\u8fdb\u884c\u51b7\u8fc1\u79fb\u3002
                      "},{"location":"admin/virtnest/vm/migratiom.html#_3","title":"\u51b7\u8fc1\u79fb","text":"
                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u8fdb\u5165\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c \u53ef\u4ee5\u5bf9\u5173\u673a\u72b6\u6001\u4e0b\u7684\u4e91\u4e3b\u673a\u8fdb\u884c\u8fc1\u79fb\u52a8\u4f5c\u3002\u4e91\u4e3b\u673a\u5728\u5173\u673a\u72b6\u6001\u4e0b\u65f6\u65e0\u6cd5\u67e5\u770b\u6240\u5728\u8282\u70b9\uff0c\u9700\u8981\u63d0\u524d\u89c4\u5212\u6216\u8005\u5f00\u673a\u67e5\u8be2\u3002

                        Note

                        \u5982\u679c\u60a8\u5728\u539f\u59cb\u8282\u70b9\u7684\u5b58\u50a8\u6c60\u4e2d\u4f7f\u7528\u4e86 local-path\uff0c\u8de8\u8282\u70b9\u8fc1\u79fb\u65f6\u53ef\u80fd\u51fa\u73b0\u95ee\u9898\uff0c\u8bf7\u8c28\u614e\u9009\u62e9\u3002

                      2. \u70b9\u51fb\u8fc1\u79fb\u540e\uff0c\u63d0\u793a\u5728\u8fc1\u79fb\u671f\u95f4\uff0c\u53ef\u4ee5\u9009\u62e9\u6307\u5b9a\u8282\u70b9\u8fc1\u79fb\uff0c\u4e5f\u53ef\u4ee5\u968f\u673a\u8fc1\u79fb\uff0c\u82e5\u9700\u8981\u4fee\u6539\u5b58\u50a8\u6c60\uff0c \u9700\u8981\u786e\u4fdd\u76ee\u6807\u8282\u70b9\u5185\u6709\u53ef\u7528\u5b58\u50a8\u6c60\u3002\u540c\u65f6\u9700\u8981\u76ee\u6807\u8282\u70b9\u8d44\u6e90\u5145\u8db3\uff0c\u8fc1\u79fb\u8fc7\u7a0b\u8017\u8d39\u65f6\u95f4\u8f83\u957f\uff0c\u8bf7\u8010\u5fc3\u7b49\u5f85\u3002

                      3. \u8fc1\u79fb\u9700\u8981\u4e00\u6bb5\u65f6\u95f4\uff0c\u8bf7\u8010\u5fc3\u7b49\u5f85\uff0c\u6210\u529f\u540e\u9700\u8981\u91cd\u542f\u67e5\u770b\u662f\u5426\u8fc1\u79fb\u6210\u529f\u3002\u672c\u793a\u4f8b\u5df2\u7ecf\u5f00\u673a\u67e5\u770b\u8fc1\u79fb\u6548\u679c\u3002

                      "},{"location":"admin/virtnest/vm/monitor.html","title":"\u4e91\u4e3b\u673a\u76d1\u63a7","text":"

                      \u4e91\u4e3b\u673a\u57fa\u4e8e Kubevirt \u5f00\u6e90\u7684 Grafana Dashboard\uff0c\u4e3a\u4e86\u6bcf\u4e00\u4e2a\u4e91\u4e3b\u673a\u751f\u6210\u4e86\u76d1\u63a7\u770b\u677f

                      \u4e91\u4e3b\u673a\u7684\u76d1\u63a7\u4fe1\u606f\u53ef\u4ee5\u66f4\u597d\u7684\u4e86\u89e3\u4e91\u4e3b\u673a\u7684\u8d44\u6e90\u6d88\u8017\u60c5\u51b5\uff0c\u6bd4\u5982 CPU\u3001\u5185\u5b58\u3001\u5b58\u50a8\u548c\u7f51\u7edc\u7b49\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\uff0c \u4ece\u800c\u8fdb\u884c\u8d44\u6e90\u7684\u4f18\u5316\u548c\u89c4\u5212\uff0c\u63d0\u5347\u6574\u4f53\u7684\u8d44\u6e90\u5229\u7528\u6548\u7387\u3002

                      "},{"location":"admin/virtnest/vm/monitor.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u67e5\u770b\u4e91\u4e3b\u673a\u76d1\u63a7\u7684\u76f8\u5173\u4fe1\u606f\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u5728\u4e91\u4e3b\u673a\u6240\u5728\u7684\u540c\u4e00\u96c6\u7fa4\u5185\u5b89\u88c5 Insight-agent \u7ec4\u4ef6\uff0c\u5e76\u4e14\u4fdd\u8bc1 Insight-agent \u7ec4\u4ef6\u6b63\u5e38\u53ef\u7528\u3002
                      "},{"location":"admin/virtnest/vm/monitor.html#_3","title":"\u4e91\u4e3b\u673a\u76d1\u63a7","text":"

                      \u8fdb\u5165\u4e91\u4e3b\u673a\u7684\u8be6\u7ec6\u4fe1\u606f\u5e76\u70b9\u51fb \u6982\u89c8 \uff0c\u5373\u53ef\u67e5\u770b\u4e91\u4e3b\u673a\u7684\u76d1\u63a7\u5185\u5bb9\u3002\u8bf7\u6ce8\u610f\uff0c\u82e5\u672a\u5b89\u88c5 Insight-agent \u7ec4\u4ef6\uff0c\u5219\u65e0\u6cd5\u83b7\u53d6\u76d1\u63a7\u4fe1\u606f\u3002\u4ee5\u4e0b\u662f\u8be6\u7ec6\u4fe1\u606f\uff1a

                      • CPU \u603b\u91cf\u3001CPU \u4f7f\u7528\u91cf\u3001\u5185\u5b58\u603b\u91cf\u3001\u5185\u5b58\u4f7f\u7528\u91cf\u3002

                      • CPU \u4f7f\u7528\u7387\uff1a\u6307\u5f53\u524d\u4e91\u4e3b\u673a\u6b63\u5728\u4f7f\u7528\u7684 CPU \u8d44\u6e90\u7684\u767e\u5206\u6bd4\uff1b

                      • \u5185\u5b58\u4f7f\u7528\u7387\uff1a\u6307\u5f53\u524d\u4e91\u4e3b\u673a\u6b63\u5728\u4f7f\u7528\u7684\u5185\u5b58\u8d44\u6e90\u5360\u603b\u53ef\u7528\u5185\u5b58\u7684\u767e\u5206\u6bd4\u3002

                      • \u7f51\u7edc\u6d41\u91cf\uff1a\u6307\u4e91\u4e3b\u673a\u5728\u7279\u5b9a\u65f6\u95f4\u6bb5\u5185\u53d1\u9001\u548c\u63a5\u6536\u7684\u7f51\u7edc\u6570\u636e\u91cf\uff1b

                      • \u7f51\u7edc\u4e22\u5305\u7387\uff1a\u6307\u5728\u6570\u636e\u4f20\u8f93\u8fc7\u7a0b\u4e2d\u4e22\u5931\u7684\u6570\u636e\u5305\u5360\u603b\u53d1\u9001\u6570\u636e\u5305\u6570\u91cf\u7684\u6bd4\u4f8b\u3002

                      • \u7f51\u7edc\u9519\u8bef\u7387\uff1a\u6307\u5728\u7f51\u7edc\u4f20\u8f93\u8fc7\u7a0b\u4e2d\u53d1\u751f\u7684\u9519\u8bef\u7684\u6bd4\u7387\uff1b

                      • \u78c1\u76d8\u541e\u5410\uff1a\u6307\u4e91\u4e3b\u673a\u7cfb\u7edf\u5728\u4e00\u5b9a\u65f6\u95f4\u5185\u8bfb\u53d6\u548c\u5199\u5165\u78c1\u76d8\u7684\u901f\u5ea6\u548c\u80fd\u529b\u3002

                      • IOPS\uff1a\u6307\u7684\u662f\u5728\u4e00\u79d2\u949f\u5185\u4e91\u4e3b\u673a\u7cfb\u7edf\u8fdb\u884c\u7684\u8f93\u5165/\u8f93\u51fa\u64cd\u4f5c\u7684\u6b21\u6570\u3002\u78c1\u76d8\u5ef6\u8fdf\uff1a\u6307\u4e91\u4e3b\u673a\u7cfb\u7edf\u5728\u8fdb\u884c\u78c1\u76d8\u8bfb\u5199\u64cd\u4f5c\u65f6\u6240\u7ecf\u5386\u7684\u65f6\u95f4\u5ef6\u8fdf\u3002

                      "},{"location":"admin/virtnest/vm/scheduled-snapshot.html","title":"\u5b9a\u65f6\u5feb\u7167","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u4e3a\u4e91\u4e3b\u673a\u5b9a\u65f6\u521b\u5efa\u5feb\u7167\u3002

                      \u7528\u6237\u53ef\u4ee5\u4e3a\u4e91\u4e3b\u673a\u5b9a\u65f6\u521b\u5efa\u5feb\u7167\uff0c\u80fd\u591f\u4e3a\u6570\u636e\u63d0\u4f9b\u6301\u7eed\u7684\u4fdd\u62a4\uff0c\u786e\u4fdd\u5728\u53d1\u751f\u6570\u636e\u4e22\u5931\u3001\u635f\u574f\u6216\u5220\u9664\u7684\u60c5\u51b5\u4e0b\u53ef\u4ee5\u8fdb\u884c\u6709\u6548\u7684\u6570\u636e\u6062\u590d\u3002

                      "},{"location":"admin/virtnest/vm/scheduled-snapshot.html#_2","title":"\u5b9a\u65f6\u5feb\u7167\u6b65\u9aa4","text":"
                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \uff0c\u5728\u5217\u8868\u9875\u9762\uff0c\u9009\u62e9\u76ee\u6807\u4e91\u4e3b\u673a\u6240\u5728\u7684\u96c6\u7fa4\u3002 \u8fdb\u5165\u96c6\u7fa4\u540e\uff0c\u70b9\u51fb \u5de5\u4f5c\u8d1f\u8f7d -> \u5b9a\u65f6\u4efb\u52a1 \uff0c\u9009\u62e9 YAML \u521b\u5efa \u5b9a\u65f6\u4efb\u52a1\uff0c\u53c2\u8003\u4ee5\u4e0b YAML \u793a\u4f8b\u53ef\u4e3a\u6307\u5b9a\u4e91\u4e3b\u673a\u5b9a\u65f6\u521b\u5efa\u5feb\u7167\u3002

                        \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\u7684 YAML \u793a\u4f8b
                        apiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: xxxxx-xxxxx-cronjob # \u5b9a\u65f6\u4efb\u52a1\u540d\u79f0, \u53ef\u81ea\u5b9a\u4e49\n  namespace: virtnest-system # \u8bf7\u52ff\u4fee\u6539\u6b64namespace\nspec:\n  schedule: \"5 * * * *\" # \u6309\u9700\u4fee\u6539\u5b9a\u65f6\u4efb\u52a1\u6267\u884c\u95f4\u9694\n  concurrencyPolicy: Allow\n  suspend: false\n  successfulJobsHistoryLimit: 10\n  failedJobsHistoryLimit: 3\n  startingDeadlineSeconds: 60\n  jobTemplate:\n    spec:\n      template:\n        metadata:\n          labels:\n            virtnest.io/vm: xxxx # \u4fee\u6539\u4e3a\u9700\u8981\u5feb\u7167\u7684\u4e91\u4e3b\u673a\u540d\u79f0\n            virtnest.io/namespace: xxxx # \u4fee\u6539\u4e3a\u4e91\u4e3b\u673a\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\n        spec:\n          serviceAccountName: kubevirt-operator\n          containers:\n            - name: snapshot-job\n              image: release.daocloud.io/virtnest/tools:v0.1.5 # \u79bb\u7ebf\u73af\u5883\u4e0b,\u4ed3\u5e93\u5730\u5740\u4fee\u6539\u4e3a\u5bf9\u5e94\u706b\u79cd\u96c6\u7fa4\u4ed3\u5e93\u5730\u5740\n              imagePullPolicy: IfNotPresent\n              env:\n                - name: NS\n                  valueFrom:\n                    fieldRef:\n                      fieldPath: metadata.labels['virtnest.io/namespace']\n                - name: VM\n                  valueFrom:\n                    fieldRef:\n                      fieldPath: metadata.labels['virtnest.io/vm']\n              command:\n                - /bin/sh\n                - -c\n                - |\n                  export SUFFIX=$(date +\"%Y%m%d-%H%M%S\")\n                  cat <<EOF | kubectl apply -f -\n                  apiVersion: snapshot.kubevirt.io/v1alpha1\n                  kind: VirtualMachineSnapshot\n                  metadata:\n                    name: $(VM)-snapshot-$SUFFIX\n                    namespace: $(NS)\n                  spec:\n                    source:\n                      apiGroup: kubevirt.io\n                      kind: VirtualMachine\n                      name: $(VM)\n                  EOF\n          restartPolicy: OnFailure\n
                      2. \u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\u5e76\u6210\u529f\u8fd0\u884c\u540e\uff0c\u53ef\u70b9\u51fb \u4e91\u4e3b\u673a \u5728\u5217\u8868\u9875\u9762\u9009\u62e9\u76ee\u6807\u4e91\u4e3b\u673a\uff0c\u8fdb\u5165\u8be6\u60c5\u540e\u53ef\u67e5\u770b\u5feb\u7167\u5217\u8868\u3002

                      "},{"location":"admin/virtnest/vm/snapshot.html","title":"\u5feb\u7167\u7ba1\u7406","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u4e3a\u4e91\u4e3b\u673a\u521b\u5efa\u5feb\u7167\uff0c\u5e76\u4ece\u5feb\u7167\u4e2d\u6062\u590d\u7684\u3002

                      \u7528\u6237\u53ef\u4ee5\u4e3a\u4e91\u4e3b\u673a\u521b\u5efa\u5feb\u7167\uff0c\u4fdd\u5b58\u4e91\u4e3b\u673a\u5f53\u4e0b\u7684\u72b6\u6001\uff0c\u4e00\u4e2a\u5feb\u7167\u53ef\u4ee5\u652f\u6301\u591a\u6b21\u6062\u590d\uff0c\u6bcf\u6b21\u6062\u590d\u65f6\uff0c \u4e91\u4e3b\u673a\u5c06\u88ab\u8fd8\u539f\u5230\u5feb\u7167\u521b\u5efa\u65f6\u7684\u72b6\u6001\u3002\u901a\u5e38\u53ef\u4ee5\u7528\u4e8e\u5907\u4efd\u3001\u6062\u590d\u3001\u56de\u6eda\u7b49\u573a\u666f\u3002

                      "},{"location":"admin/virtnest/vm/snapshot.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u4f7f\u7528\u5feb\u7167\u529f\u80fd\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u53ea\u6709\u975e\u9519\u8bef\u72b6\u6001\u4e0b\u7684\u4e91\u4e3b\u673a\u624d\u80fd\u4f7f\u7528\u5feb\u7167\u529f\u80fd\u3002
                      • \u5b89\u88c5 Snapshot CRDs\u3001Snapshot Controller\u3001CSI Driver\u3002 \u5177\u4f53\u5b89\u88c5\u6b65\u9aa4\u53ef\u53c2\u8003 CSI Snapshotter\u3002
                      • \u7b49\u5f85 snapshot-controller \u7ec4\u4ef6\u51c6\u5907\u5c31\u7eea, \u8be5\u7ec4\u4ef6\u4f1a\u76d1\u63a7 VolumeSnapshot \u548c VolumeSnapshotContent \u76f8\u5173\u4e8b\u4ef6\uff0c\u5e76\u89e6\u53d1\u76f8\u5173\u64cd\u4f5c\u3002
                      • \u7b49\u5f85 CSI Driver \u51c6\u5907\u5c31\u7eea, \u786e\u4fdd csi-snapshotter sidecar \u8dd1\u5728 CSI Driver \u91cc\uff0ccsi-snapshotter sidecar \u4f1a\u76d1\u63a7 VolumeSnapshotContent \u76f8\u5173\u4e8b\u4ef6\uff0c\u5e76\u89e6\u53d1\u76f8\u5173\u64cd\u4f5c\u3002\u5982 POC \u4f7f\u7528\u7684\u5b58\u50a8\u662f rook-ceph\uff0c\u53ef\u53c2\u8003 ceph-csi-snapshot
                        • \u5982\u5b58\u50a8\u662f Rook-Ceph\uff0c\u53ef\u53c2\u8003 ceph-csi-snapshot
                        • \u5982\u5b58\u50a8\u662f HwameiStor\uff0c\u53ef\u53c2\u8003 huameistor-snapshot
                      "},{"location":"admin/virtnest/vm/snapshot.html#_3","title":"\u521b\u5efa\u5feb\u7167","text":"
                      1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u8fdb\u5165\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u975e\u9519\u8bef\u72b6\u6001\u4e0b\u7684\u4e91\u4e3b\u673a\u6267\u884c\u5feb\u7167\u64cd\u4f5c\u3002

                      2. \u5f39\u51fa\u5f39\u6846\uff0c\u9700\u8981\u586b\u5199\u5feb\u7167\u7684\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u521b\u5efa\u5feb\u7167\u5927\u6982\u9700\u8981\u51e0\u5206\u949f\u7684\u65f6\u95f4\uff0c\u5728\u6b64\u671f\u95f4\u65e0\u6cd5\u5bf9\u4e91\u4e3b\u673a\u505a\u4efb\u4f55\u64cd\u4f5c\u3002

                      3. \u521b\u5efa\u6210\u529f\u540e\u53ef\u4ee5\u5728\u4e91\u4e3b\u673a\u8be6\u60c5\u5185\u67e5\u770b\u5feb\u7167\u4fe1\u606f\uff0c\u652f\u6301\u7f16\u8f91\u63cf\u8ff0\u3001\u4ece\u5feb\u7167\u4e2d\u6062\u590d\u3001\u5220\u9664\u7b49\u64cd\u4f5c\u3002

                      "},{"location":"admin/virtnest/vm/snapshot.html#_4","title":"\u4ece\u5feb\u7167\u4e2d\u6062\u590d","text":"
                      1. \u70b9\u51fb \u4ece\u5feb\u7167\u6062\u590d \uff0c\u9700\u8981\u586b\u5199\u4e91\u4e3b\u673a\u6062\u590d\u8bb0\u5f55\u7684\u540d\u79f0\uff0c\u540c\u65f6\u6062\u590d\u64cd\u4f5c\u53ef\u80fd\u9700\u8981\u4e00\u4e9b\u65f6\u95f4\u6765\u5b8c\u6210\uff0c\u5177\u4f53\u53d6\u51b3\u4e8e\u5feb\u7167\u7684\u5927\u5c0f\u548c\u5176\u4ed6\u56e0\u7d20\u3002\u6062\u590d\u6210\u529f\u540e\uff0c\u4e91\u4e3b\u673a\u5c06\u56de\u5230\u5feb\u7167\u521b\u5efa\u65f6\u7684\u72b6\u6001\u3002

                      2. \u4e00\u6bb5\u65f6\u95f4\u540e\uff0c\u4e0b\u62c9\u5feb\u7167\u4fe1\u606f\uff0c\u53ef\u4ee5\u67e5\u770b\u5f53\u524d\u5feb\u7167\u7684\u6240\u6709\u6062\u590d\u8bb0\u5f55\uff0c\u5e76\u4e14\u652f\u6301\u5c55\u793a\u5b9a\u4f4d\u6062\u590d\u7684\u4f4d\u7f6e\u3002

                      "},{"location":"admin/virtnest/vm/vm-network.html","title":"\u4e91\u4e3b\u673a\u7f51\u7edc","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u5728\u521b\u5efa\u4e91\u4e3b\u673a\u65f6\uff0c\u914d\u7f6e\u7f51\u7edc\u4fe1\u606f\u3002

                      \u5728\u4e91\u4e3b\u673a\u4e2d\uff0c\u7f51\u7edc\u7ba1\u7406\u662f\u4e00\u4e2a\u5173\u952e\u7684\u90e8\u5206\uff0c\u5b83\u4f7f\u5f97\u6211\u4eec\u80fd\u591f\u5728 Kubernetes \u73af\u5883\u4e2d\u7ba1\u7406\u548c\u914d\u7f6e\u4e91\u4e3b\u673a\u7684\u7f51\u7edc\u8fde\u63a5\uff0c\u53ef\u4ee5\u6839\u636e\u4e0d\u540c\u7684\u9700\u6c42\u548c\u573a\u666f\u6765\u8fdb\u884c\u914d\u7f6e\uff0c\u5b9e\u73b0\u66f4\u7075\u6d3b\u548c\u591a\u6837\u5316\u7684\u7f51\u7edc\u67b6\u6784\u3002

                      1. \u5355\u7f51\u5361\u573a\u666f\uff1a\u5bf9\u4e8e\u4e00\u4e9b\u7b80\u5355\u7684\u53ea\u9700\u8981\u57fa\u672c\u7f51\u7edc\u8fde\u63a5\u7684\u5e94\u7528\uff0c\u6216\u8005\u5b58\u5728\u8d44\u6e90\u9650\u5236\u7684\u65f6\u5019\uff0c\u4f7f\u7528\u5355\u7f51\u5361\u53ef\u4ee5\u8282\u7ea6\u7f51\u7edc\u8d44\u6e90\uff0c\u5e76\u907f\u514d\u8d44\u6e90\u7684\u6d6a\u8d39\u3002
                      2. \u591a\u7f51\u5361\u573a\u666f\uff1a\u5f53\u9700\u8981\u5b9e\u73b0\u4e0d\u540c\u7f51\u7edc\u73af\u5883\u4e4b\u95f4\u7684\u5b89\u5168\u9694\u79bb\u65f6\uff0c\u53ef\u4ee5\u4f7f\u7528\u591a\u7f51\u5361\u6765\u5212\u5206\u4e0d\u540c\u7684\u7f51\u7edc\u533a\u57df\u3002\u540c\u65f6\u4e5f\u53ef\u4ee5\u5bf9\u63a7\u5236\u548c\u6d41\u91cf\u8fdb\u884c\u7ba1\u7406\u3002
                      "},{"location":"admin/virtnest/vm/vm-network.html#_2","title":"\u7f51\u7edc\u914d\u7f6e\u524d\u63d0","text":"

                      \u5728\u4f7f\u7528\u4e91\u4e3b\u673a\u7f51\u7edc\u529f\u80fd\u4e4b\u524d\uff0c\u9700\u8981\u6839\u636e\u7f51\u7edc\u6a21\u5f0f\u7684\u4e0d\u540c\u914d\u7f6e\u4e0d\u540c\u7684\u4fe1\u606f\uff1a

                      • \u9009\u62e9 Bridge \u7f51\u7edc\u6a21\u5f0f\u65f6\u9700\u8981\u63d0\u524d\u914d\u7f6e\u4e00\u4e9b\u4fe1\u606f\uff1a

                        • \u5728\u4e3b\u673a\u8282\u70b9\u4e0a\u5b89\u88c5\u5e76\u8fd0\u884c Open vSwitch, \u53ef\u53c2\u8003\u8fd9\u91cc
                        • \u5728\u4e3b\u673a\u8282\u70b9\u4e0a\u914d\u7f6e Open vSwitch \u7f51\u6865, \u53ef\u53c2\u8003\u8fd9\u91cc
                        • \u5b89\u88c5 Spiderpool\uff0c\u53ef\u53c2\u8003\u5b89\u88c5 Spiderpool, Spiderpool \u9ed8\u8ba4\u4f1a\u628a Multus CNI \u548c Ovs CNI \u90fd\u88c5\u4e0a
                        • \u521b\u5efa ovs \u7c7b\u578b\u7684 Multus CR\uff0c\u53ef\u53c2\u8003\u754c\u9762\u521b\u5efa Multus CR \u6216 YAML \u521b\u5efa Multus CR
                        • \u521b\u5efa\u5b50\u7f51\u53ca IP \u6c60\uff0c\u53c2\u8003\u521b\u5efa\u5b50\u7f51\u548c IP \u6c60
                      "},{"location":"admin/virtnest/vm/vm-network.html#_3","title":"\u7f51\u7edc\u914d\u7f6e","text":"
                      1. \u914d\u7f6e\u4e91\u4e3b\u673a\u7684\u7f51\u7edc\u914d\u7f6e\uff0c\u53ef\u4ee5\u6839\u636e\u8868\u683c\u4fe1\u606f\u6309\u9700\u7ec4\u5408\u3002

                        \u7f51\u7edc\u6a21\u5f0f CNI \u662f\u5426\u5b89\u88c5 Spiderpool \u7f51\u5361\u6a21\u5f0f \u56fa\u5b9a IP \u5b9e\u65f6\u8fc1\u79fb Masquerade\uff08NAT\uff09 Calico \u274c \u5355\u7f51\u5361 \u274c \u2705 Cilium \u274c \u5355\u7f51\u5361 \u274c \u2705 Flannel \u274c \u5355\u7f51\u5361 \u274c \u2705 Bridge\uff08\u6865\u63a5\uff09 OVS \u2705 \u591a\u7f51\u5361 \u2705 \u2705

                      2. \u7f51\u7edc\u6a21\u5f0f\uff1a\u5206\u4e3a Masquerade\uff08NAT\uff09\u3001Bridge\uff08\u6865\u63a5\uff09\u4e24\u79cd\uff0cBridge\uff08\u6865\u63a5\uff09\u6a21\u5f0f\u9700\u8981\u5b89\u88c5\u4e86 spiderpool \u7ec4\u4ef6\u540e\u65b9\u53ef\u4f7f\u7528\u3002

                        1. \u9ed8\u8ba4\u9009\u62e9 Masquerade\uff08NAT\uff09\u7684\u7f51\u7edc\u6a21\u5f0f\uff0c\u4f7f\u7528 eth0 \u9ed8\u8ba4\u7f51\u5361\u3002

                        2. \u82e5\u96c6\u7fa4\u5185\u5b89\u88c5\u4e86 spiderpool \u7ec4\u4ef6\uff0c\u5219\u652f\u6301\u9009\u62e9 Bridge\uff08\u6865\u63a5\uff09\u6a21\u5f0f\uff0c\u652f\u6301\u591a\u7f51\u5361\u5f62\u5f0f\u3002

                          \u9009\u62e9 Bridge \u6a21\u5f0f\u65f6\uff0c\u9700\u8981\u6709\u4e00\u4e9b\u524d\u63d0\u6761\u4ef6

                      3. \u6dfb\u52a0\u7f51\u5361

                        1. Bridge\uff08\u6865\u63a5\uff09\u6a21\u5f0f\u4e0b\u652f\u6301\u624b\u52a8\u6dfb\u52a0\u7f51\u5361\u3002\u70b9\u51fb \u6dfb\u52a0\u7f51\u5361 \uff0c\u8fdb\u884c\u7f51\u5361 IP \u6c60\u7684\u914d\u7f6e\u3002\u9009\u62e9\u548c\u7f51\u7edc\u6a21\u5f0f\u5339\u914d\u7684 Multus CR\uff0c\u82e5\u6ca1\u6709\u5219\u9700\u8981\u81ea\u884c\u521b\u5efa\u3002

                        2. \u82e5\u6253\u5f00 \u4f7f\u7528\u9ed8\u8ba4 IP \u6c60 \u5f00\u5173\uff0c\u5219\u4f7f\u7528 multus CR \u914d\u7f6e\u4e2d\u7684\u9ed8\u8ba4 IP \u6c60\u3002\u82e5\u5173\u95ed\u5f00\u5173\uff0c\u5219\u624b\u52a8\u9009\u62e9 IP \u6c60\u3002

                      "},{"location":"admin/virtnest/vm/vm-sc.html","title":"\u4e91\u4e3b\u673a\u5b58\u50a8","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u5728\u521b\u5efa\u4e91\u4e3b\u673a\u65f6\uff0c\u914d\u7f6e\u5b58\u50a8\u4fe1\u606f\u3002

                      \u5b58\u50a8\u548c\u4e91\u4e3b\u673a\u7684\u529f\u80fd\u606f\u606f\u76f8\u5173\uff0c\u4e3b\u8981\u662f\u901a\u8fc7\u4f7f\u7528 Kubernetes \u7684\u6301\u4e45\u5377\u548c\u5b58\u50a8\u7c7b\uff0c\u63d0\u4f9b\u4e86\u7075\u6d3b\u4e14\u53ef\u6269\u5c55\u7684\u4e91\u4e3b\u673a\u5b58\u50a8\u80fd\u529b\u3002 \u6bd4\u5982\u4e91\u4e3b\u673a\u955c\u50cf\u5b58\u50a8\u5728 PVC \u91cc\uff0c\u652f\u6301\u548c\u5176\u4ed6\u6570\u636e\u4e00\u8d77\u514b\u9686\u3001\u5feb\u7167\u7b49

                      "},{"location":"admin/virtnest/vm/vm-sc.html#_2","title":"\u90e8\u7f72\u4e0d\u540c\u7684\u5b58\u50a8","text":"

                      \u5728\u4f7f\u7528\u4e91\u4e3b\u673a\u5b58\u50a8\u529f\u80fd\u4e4b\u524d\uff0c\u9700\u8981\u6839\u636e\u9700\u8981\u90e8\u7f72\u4e0d\u540c\u7684\u5b58\u50a8\uff1a

                      1. \u53c2\u8003\u90e8\u7f72 hwameistor\uff0c \u6216\u8005\u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684 Helm \u6a21\u677f\u4e2d\u5b89\u88c5 hwameistor-operator\u3002
                      2. \u53c2\u8003\u90e8\u7f72 rook-ceph
                      3. \u90e8\u7f72 localpath\uff0c\u4f7f\u7528\u547d\u4ee4 kubectl apply -f \u521b\u5efa\u4ee5\u4e0b YAML\uff1a
                      \u70b9\u51fb\u67e5\u770b\u5b8c\u6574 YAML
                      ---\napiVersion: v1\nkind: Namespace\nmetadata:\n  name: local-path-storage\n\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: local-path-provisioner-service-account\n  namespace: local-path-storage\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: local-path-provisioner-role\nrules:\n- apiGroups: [\"\"]\n  resources: [\"nodes\", \"persistentvolumeclaims\", \"configmaps\"]\n  verbs: [\"get\", \"list\", \"watch\"]\n- apiGroups: [\"\"]\n  resources: [\"endpoints\", \"persistentvolumes\", \"pods\"]\n  verbs: [\"*\"]\n- apiGroups: [\"\"]\n  resources: [\"events\"]\n  verbs: [\"create\", \"patch\"]\n- apiGroups: [\"storage.k8s.io\"]\n  resources: [\"storageclasses\"]\n  verbs: [\"get\", \"list\", \"watch\"]\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: local-path-provisioner-bind\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: local-path-provisioner-role\nsubjects:\n- kind: ServiceAccount\n  name: local-path-provisioner-service-account\n  namespace: local-path-storage\n\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: local-path-provisioner\n  namespace: local-path-storage\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: local-path-provisioner\n  template:\n    metadata:\n      labels:\n        app: local-path-provisioner\n    spec:\n      serviceAccountName: local-path-provisioner-service-account\n      containers:\n      - name: local-path-provisioner\n        image: rancher/local-path-provisioner:v0.0.22\n        imagePullPolicy: IfNotPresent\n        command:\n        - local-path-provisioner\n        - --debug\n        - start\n        - --config\n        - /etc/config/config.json\n        volumeMounts:\n        - name: config-volume\n          mountPath: /etc/config/\n        env:\n        - name: POD_NAMESPACE\n          valueFrom:\n            fieldRef:\n              fieldPath: metadata.namespace\n      volumes:\n      - name: config-volume\n        configMap:\n          name: local-path-config\n\n---\napiVersion: storage.k8s.io/v1\nkind: StorageClass\nmetadata:\n  name: local-path\nprovisioner: rancher.io/local-path\nvolumeBindingMode: WaitForFirstConsumer\nreclaimPolicy: Delete\n\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: local-path-config\n  namespace: local-path-storage\ndata:\n  config.json: |-\n    {\n      \"nodePathMap\": [\n        {\n          \"node\": \"DEFAULT_PATH_FOR_NON_LISTED_NODES\",\n          \"paths\": [\"/opt/local-path-provisioner\"]\n        }\n      ]\n    }\n  setup: |-\n    #!/bin/sh\n    set -eu\n    mkdir -m 0777 -p \"$VOL_DIR\"\n  teardown: |-\n    #!/bin/sh\n    set -eu\n    rm -rf \"$VOL_DIR\"\n  helperPod.yaml: |-\n    apiVersion: v1\n    kind: Pod\n    metadata:\n      name: helper-pod\n    spec:\n      containers:\n      - name: helper-pod\n        image: busybox\n        imagePullPolicy: IfNotPresent\n
                      "},{"location":"admin/virtnest/vm/vm-sc.html#_3","title":"\u4e91\u4e3b\u673a\u5b58\u50a8","text":"
                      1. \u7cfb\u7edf\u76d8\uff1a\u7cfb\u7edf\u9ed8\u8ba4\u521b\u5efa\u4e00\u4e2a VirtIO \u7c7b\u578b\u7684 rootfs \u7cfb\u7edf\u76d8\uff0c\u7528\u4e8e\u5b58\u653e\u64cd\u4f5c\u7cfb\u7edf\u548c\u6570\u636e\u3002

                      2. \u6570\u636e\u76d8\uff1a\u6570\u636e\u76d8\u662f\u4e91\u4e3b\u673a\u4e2d\u7528\u4e8e\u5b58\u50a8\u7528\u6237\u6570\u636e\u3001\u5e94\u7528\u7a0b\u5e8f\u6570\u636e\u6216\u5176\u4ed6\u975e\u64cd\u4f5c\u7cfb\u7edf\u76f8\u5173\u6587\u4ef6\u7684\u5b58\u50a8\u8bbe\u5907\u3002\u4e0e\u7cfb\u7edf\u76d8\u76f8\u6bd4\uff0c\u6570\u636e\u76d8\u662f\u975e\u5fc5\u9009\u7684\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u8981\u52a8\u6001\u6dfb\u52a0\u6216\u79fb\u9664\u3002\u6570\u636e\u76d8\u7684\u5bb9\u91cf\u4e5f\u53ef\u4ee5\u6839\u636e\u9700\u6c42\u8fdb\u884c\u7075\u6d3b\u914d\u7f6e\u3002

                        \u9ed8\u8ba4\u4f7f\u7528\u5757\u5b58\u50a8\u3002\u5982\u679c\u9700\u8981\u4f7f\u7528\u514b\u9686\u548c\u5feb\u7167\u529f\u80fd\uff0c\u8bf7\u786e\u4fdd\u60a8\u7684\u5b58\u50a8\u6c60\u5df2\u7ecf\u521b\u5efa\u4e86\u5bf9\u5e94\u7684 VolumeSnapshotClass\uff0c \u53ef\u4ee5\u53c2\u8003\u4ee5\u4e0b\u793a\u4f8b\u3002\u5982\u679c\u9700\u8981\u4f7f\u7528\u5b9e\u65f6\u8fc1\u79fb\u529f\u80fd\uff0c\u8bf7\u786e\u4fdd\u60a8\u7684\u5b58\u50a8\u652f\u6301\u5e76\u9009\u62e9\u4e86 ReadWriteMany \u7684\u8bbf\u95ee\u6a21\u5f0f \u3002

                        \u5927\u591a\u6570\u60c5\u51b5\u4e0b\uff0c\u5b58\u50a8\u5728\u5b89\u88c5\u8fc7\u7a0b\u4e2d\u4e0d\u4f1a\u81ea\u52a8\u521b\u5efa\u8fd9\u6837\u7684 VolumeSnapshotClass\uff0c\u56e0\u6b64\u60a8\u9700\u8981\u624b\u52a8\u521b\u5efa VolumeSnapshotClass\u3002 \u4ee5\u4e0b\u662f\u4e00\u4e2a HwameiStor \u521b\u5efa VolumeSnapshotClass \u7684\u793a\u4f8b\uff1a

                        kind: VolumeSnapshotClass\napiVersion: snapshot.storage.k8s.io/v1\nmetadata:\n  name: hwameistor-storage-lvm-snapshot\n  annotations:\n    snapshot.storage.kubernetes.io/is-default-class: \"true\"\nparameters:\n  snapsize: \"1073741824\"\ndriver: lvm.hwameistor.io\ndeletionPolicy: Delete\n
                        • \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u68c0\u67e5 VolumeSnapshotClass \u662f\u5426\u521b\u5efa\u6210\u529f\u3002

                          kubectl get VolumeSnapshotClass\n
                        • \u67e5\u770b\u5df2\u521b\u5efa\u7684 Snapshotclass\uff0c\u5e76\u4e14\u786e\u8ba4 Provisioner \u5c5e\u6027\u540c\u5b58\u50a8\u6c60\u4e2d\u7684 Driver \u5c5e\u6027\u4e00\u81f4\u3002

                      "},{"location":"admin/virtnest/vm-image/index.html","title":"\u6784\u5efa\u4e91\u4e3b\u673a\u955c\u50cf","text":"

                      \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u6784\u5efa\u9700\u8981\u7684\u4e91\u4e3b\u673a\u955c\u50cf\u3002

                      \u4e91\u4e3b\u673a\u955c\u50cf\u5176\u5b9e\u5c31\u662f\u526f\u672c\u6587\u4ef6\uff0c\u662f\u5b89\u88c5\u6709\u64cd\u4f5c\u7cfb\u7edf\u7684\u4e00\u4e2a\u78c1\u76d8\u5206\u533a\u3002\u5e38\u89c1\u7684\u955c\u50cf\u6587\u4ef6\u683c\u5f0f\u5305\u62ec raw\u3001qcow2\u3001vmdk\u7b49\u3002

                      "},{"location":"admin/virtnest/vm-image/index.html#_2","title":"\u6784\u5efa\u955c\u50cf","text":"

                      \u4e0b\u9762\u662f\u6784\u5efa\u4e91\u4e3b\u673a\u955c\u50cf\u7684\u4e00\u4e9b\u8be6\u7ec6\u6b65\u9aa4\uff1a

                      1. \u4e0b\u8f7d\u7cfb\u7edf\u955c\u50cf

                        \u5728\u6784\u5efa\u4e91\u4e3b\u673a\u955c\u50cf\u4e4b\u524d\uff0c\u60a8\u9700\u8981\u4e0b\u8f7d\u6240\u9700\u7684\u7cfb\u7edf\u955c\u50cf\u3002\u6211\u4eec\u63a8\u8350\u4f7f\u7528 qcow2\u3001raw \u6216 vmdk \u683c\u5f0f\u7684\u955c\u50cf\u3002\u53ef\u4ee5\u8bbf\u95ee\u4ee5\u4e0b\u94fe\u63a5\u83b7\u53d6 CentOS \u548c Fedora \u7684\u955c\u50cf\uff1a

                        • CentOS Cloud Images\uff1a\u652f\u6301\u4ece\u5b98\u65b9 CentOS \u9879\u76ee\u6216\u5176\u4ed6\u8d44\u6e90\u4e2d\u83b7\u53d6 CentOS \u955c\u50cf\u3002\u8bf7\u786e\u4fdd\u9009\u62e9\u4e0e\u60a8\u7684\u865a\u62df\u5316\u5e73\u53f0\u517c\u5bb9\u7684\u7248\u672c\u3002
                        • Fedora Cloud Images\uff1a \u652f\u6301\u4ece\u5b98\u65b9 Fedora \u9879\u76ee\u83b7\u53d6\u955c\u50cf\u3002\u6839\u636e\u60a8\u7684\u9700\u6c42\u9009\u62e9\u5408\u9002\u7684\u7248\u672c\u3002
                      2. \u6784\u5efa Docker \u955c\u50cf\u5e76\u63a8\u9001\u5230\u5bb9\u5668\u955c\u50cf\u4ed3\u5e93

                        \u5728\u6b64\u6b65\u9aa4\u4e2d\uff0c\u6211\u4eec\u5c06\u4f7f\u7528 Docker \u6784\u5efa\u4e00\u4e2a\u955c\u50cf\uff0c\u5e76\u5c06\u5176\u63a8\u9001\u5230\u5bb9\u5668\u955c\u50cf\u4ed3\u5e93\uff0c\u4ee5\u4fbf\u5728\u9700\u8981\u65f6\u80fd\u591f\u65b9\u4fbf\u5730\u90e8\u7f72\u548c\u4f7f\u7528\u3002

                        • \u521b\u5efa Dockerfile \u6587\u4ef6

                          FROM scratch\nADD --chown=107:107 CentOS-7-x86_64-GenericCloud.qcow2 /disk/\n

                          \u5411\u57fa\u4e8e\u7a7a\u767d\u955c\u50cf\u6784\u5efa\u7684\u955c\u50cf\u4e2d\u6dfb\u52a0\u540d\u4e3a CentOS-7-x86_64-GenericCloud.qcow2 \u7684\u6587\u4ef6\uff0c\u5e76\u5c06\u5176\u653e\u7f6e\u5728\u955c\u50cf\u4e2d\u7684 /disk/ \u76ee\u5f55\u4e0b\u3002\u901a\u8fc7\u8fd9\u4e2a\u64cd\u4f5c\uff0c\u955c\u50cf\u5c31\u5305\u542b\u4e86\u8fd9\u4e2a\u6587\u4ef6\uff0c\u53ef\u4ee5\u5728\u521b\u5efa\u4e91\u4e3b\u673a\u65f6\u4f7f\u7528\u5b83\u6765\u63d0\u4f9b CentOS 7 x86_64 \u7684\u64cd\u4f5c\u7cfb\u7edf\u73af\u5883\u3002

                        • \u6784\u5efa\u955c\u50cf

                          docker build -t release-ci.daocloud.io/ghippo/kubevirt-demo/centos7:v1 .\n

                          \u4e0a\u8ff0\u547d\u4ee4\u5c06\u4f7f\u7528 Dockerfile \u4e2d\u7684\u6307\u4ee4\u6784\u5efa\u4e00\u4e2a\u540d\u4e3a release-ci.daocloud.io/ghippo/kubevirt-demo/centos7:v1 \u7684\u955c\u50cf\u3002\u60a8\u53ef\u4ee5\u6839\u636e\u9879\u76ee\u9700\u6c42\u4fee\u6539\u955c\u50cf\u540d\u79f0\u3002

                        • \u63a8\u9001\u955c\u50cf\u81f3\u5bb9\u5668\u955c\u50cf\u4ed3\u5e93

                          \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u5c06\u6784\u5efa\u597d\u7684\u955c\u50cf\u63a8\u9001\u5230\u540d\u4e3a release-ci.daocloud.io \u7684\u955c\u50cf\u4ed3\u5e93\uff0c\u60a8\u8fd8\u53ef\u4ee5\u6839\u636e\u9700\u8981\u4fee\u6539\u955c\u50cf\u4ed3\u5e93\u7684\u540d\u79f0\u548c\u5730\u5740\u3002

                          docker push release-ci.daocloud.io/ghippo/kubevirt-demo/centos7:v1\n

                      \u4ee5\u4e0a\u662f\u6784\u5efa\u4e91\u4e3b\u673a\u955c\u50cf\u7684\u8be6\u7ec6\u6b65\u9aa4\u548c\u8bf4\u660e\u3002\u901a\u8fc7\u6309\u7167\u8fd9\u4e9b\u6b65\u9aa4\u64cd\u4f5c\uff0c\u60a8\u5c06\u80fd\u591f\u6210\u529f\u6784\u5efa\u5e76\u63a8\u9001\u7528\u4e8e\u4e91\u4e3b\u673a\u7684\u955c\u50cf\uff0c\u4ee5\u6ee1\u8db3\u60a8\u7684\u4f7f\u7528\u9700\u6c42\u3002

                      "},{"location":"end-user/index.html","title":"\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 - \u7ec8\u7aef\u7528\u6237","text":"

                      \u8fd9\u662f\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9762\u5411\u7ec8\u7aef\u7528\u6237\u7684\u4f7f\u7528\u6587\u6863\u3002

                      • \u7528\u6237\u6ce8\u518c

                        \u7528\u6237\u6ce8\u518c\u662f\u4f7f\u7528 AI \u7b97\u529b\u5e73\u53f0\u7684\u7b2c\u4e00\u6b65\u3002

                        • \u7528\u6237\u6ce8\u518c
                      • \u4e91\u4e3b\u673a

                        \u4e91\u4e3b\u673a\u662f\u90e8\u7f72\u5728\u4e91\u7aef\u7684\u865a\u62df\u673a\u3002

                        • \u521b\u5efa\u4e91\u4e3b\u673a
                        • \u4f7f\u7528\u4e91\u4e3b\u673a
                      • \u5bb9\u5668\u7ba1\u7406

                        \u5bb9\u5668\u7ba1\u7406\u662f AI \u7b97\u529b\u4e2d\u5fc3\u7684\u6838\u5fc3\u6a21\u5757\u3002

                        • \u4e91\u4e0a K8s \u96c6\u7fa4
                        • \u8282\u70b9\u7ba1\u7406
                        • \u5de5\u4f5c\u8d1f\u8f7d
                        • Helm \u5e94\u7528\u548c\u6a21\u677f
                      • \u7b97\u6cd5\u5f00\u53d1

                        \u7ba1\u7406\u6570\u636e\u96c6\uff0c\u6267\u884c AI \u8bad\u7ec3\u548c\u63a8\u7406\u4efb\u52a1\u3002

                        • \u521b\u5efa AI \u5de5\u4f5c\u8d1f\u8f7d
                        • \u4f7f\u7528 Notebook
                        • \u521b\u5efa\u8bad\u7ec3\u4efb\u52a1
                        • \u521b\u5efa\u63a8\u7406\u670d\u52a1
                      • \u53ef\u89c2\u6d4b\u6027

                        \u901a\u8fc7\u4eea\u8868\u76d8\u76d1\u63a7\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u51b5\u3002

                        • \u76d1\u63a7\u96c6\u7fa4/\u8282\u70b9
                        • \u6307\u6807
                        • \u65e5\u5fd7
                        • \u94fe\u8def\u8ffd\u8e2a
                      • \u4e2a\u4eba\u4e2d\u5fc3

                        \u5728\u4e2a\u4eba\u4e2d\u5fc3\u8bbe\u7f6e\u5bc6\u7801\u3001\u5bc6\u94a5\u548c\u8bed\u8a00\u3002

                        • \u5b89\u5168\u8bbe\u7f6e
                        • \u8bbf\u95ee\u5bc6\u94a5
                        • \u8bed\u8a00\u8bbe\u7f6e
                      "},{"location":"end-user/baize/dataset/create-use-delete.html","title":"\u6570\u636e\u96c6\u5217\u8868","text":"

                      AI Lab \u63d0\u4f9b\u6a21\u578b\u5f00\u53d1\u3001\u8bad\u7ec3\u4ee5\u53ca\u63a8\u7406\u8fc7\u7a0b\u6240\u6709\u9700\u8981\u7684\u6570\u636e\u96c6\u7ba1\u7406\u529f\u80fd\u3002\u76ee\u524d\u652f\u6301\u5c06\u591a\u79cd\u6570\u636e\u6e90\u7edf\u4e00\u63a5\u5165\u80fd\u529b\u3002

                      \u901a\u8fc7\u7b80\u5355\u914d\u7f6e\u5373\u53ef\u5c06\u6570\u636e\u6e90\u63a5\u5165\u5230 AI Lab \u4e2d\uff0c\u5b9e\u73b0\u6570\u636e\u7684\u7edf\u4e00\u7eb3\u7ba1\u3001\u9884\u70ed\u3001\u6570\u636e\u96c6\u7ba1\u7406\u7b49\u529f\u80fd\u3002

                      "},{"location":"end-user/baize/dataset/create-use-delete.html#_2","title":"\u521b\u5efa\u6570\u636e\u96c6","text":"
                      1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u6570\u636e\u7ba1\u7406 -> \u6570\u636e\u96c6\u5217\u8868 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae\u3002

                      2. \u9009\u62e9\u6570\u636e\u96c6\u5f52\u5c5e\u7684\u5de5\u4f5c\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4 \u4e0b\u4e00\u6b65 \u3002

                      3. \u914d\u7f6e\u76ee\u6807\u6570\u636e\u7684\u6570\u636e\u6e90\u7c7b\u578b\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                        \u76ee\u524d\u652f\u6301\u8fd9\u51e0\u79cd\u6570\u636e\u6e90\uff1a

                        • GIT\uff1a\u652f\u6301 GitHub\u3001GitLab\u3001Gitee \u7b49\u4ed3\u5e93
                        • S3\uff1a\u652f\u6301 Amazon \u4e91\u7b49\u5bf9\u8c61\u5b58\u50a8
                        • HTTP\uff1a\u76f4\u63a5\u8f93\u5165\u4e00\u4e2a\u6709\u6548\u7684 HTTP \u7f51\u5740
                        • PVC\uff1a\u652f\u6301\u9884\u5148\u521b\u5efa\u7684 Kubernetes PersistentVolumeClaim
                        • NFS\uff1a\u652f\u6301 NFS \u5171\u4eab\u5b58\u50a8
                      4. \u6570\u636e\u96c6\u521b\u5efa\u6210\u529f\u5c06\u8fd4\u56de\u6570\u636e\u96c6\u5217\u8868\u3002\u4f60\u53ef\u4ee5\u901a\u8fc7\u53f3\u4fa7\u7684 \u2507 \u6267\u884c\u66f4\u591a\u64cd\u4f5c\u3002

                      Info

                      \u7cfb\u7edf\u81ea\u52a8\u4f1a\u5728\u6570\u636e\u96c6\u521b\u5efa\u6210\u529f\u540e\uff0c\u7acb\u5373\u8fdb\u884c\u4e00\u6b21\u6027\u7684\u6570\u636e\u9884\u52a0\u8f7d\uff1b\u5728\u9884\u52a0\u8f7d\u5b8c\u6210\u4e4b\u524d\uff0c\u6570\u636e\u96c6\u4e0d\u53ef\u4ee5\u4f7f\u7528\u3002

                      "},{"location":"end-user/baize/dataset/create-use-delete.html#_3","title":"\u6570\u636e\u96c6\u4f7f\u7528","text":"

                      \u6570\u636e\u96c6\u521b\u5efa\u6210\u529f\u540e\uff0c\u53ef\u4ee5\u5728\u6a21\u578b\u8bad\u7ec3\u3001\u63a8\u7406\u7b49\u4efb\u52a1\u4e2d\u4f7f\u7528\u3002

                      "},{"location":"end-user/baize/dataset/create-use-delete.html#notebook","title":"\u5728 Notebook \u4e2d\u4f7f\u7528","text":"

                      \u5728\u521b\u5efa Notebook \u4e2d\uff0c\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u6570\u636e\u96c6\uff1b\u4f7f\u7528\u65b9\u5f0f\u5982\u4e0b\uff1a

                      • \u4f7f\u7528\u6570\u636e\u96c6\u505a\u8bad\u7ec3\u6570\u636e\u6302\u8f7d
                      • \u4f7f\u7528\u6570\u636e\u96c6\u505a\u4ee3\u7801\u6302\u8f7d

                      "},{"location":"end-user/baize/dataset/create-use-delete.html#_4","title":"\u5728 \u8bad\u7ec3\u4efb\u52a1 \u4e2d\u4f7f\u7528","text":"
                      • \u4f7f\u7528\u6570\u636e\u96c6\u6307\u5b9a\u4efb\u52a1\u8f93\u51fa
                      • \u4f7f\u7528\u6570\u636e\u96c6\u6307\u5b9a\u4efb\u52a1\u8f93\u5165
                      • \u4f7f\u7528\u6570\u636e\u96c6\u6307\u5b9a TensorBoard \u8f93\u51fa
                      "},{"location":"end-user/baize/dataset/create-use-delete.html#_5","title":"\u5728\u63a8\u7406\u670d\u52a1 \u4e2d\u4f7f\u7528","text":"
                      • \u4f7f\u7528\u6570\u636e\u96c6\u6302\u8f7d\u6a21\u578b
                      "},{"location":"end-user/baize/dataset/create-use-delete.html#_6","title":"\u5220\u9664\u6570\u636e\u96c6","text":"

                      \u5982\u679c\u53d1\u73b0\u6570\u636e\u96c6\u5197\u4f59\u3001\u8fc7\u671f\u6216\u56e0\u5176\u4ed6\u7f18\u6545\u4e0d\u518d\u9700\u8981\uff0c\u53ef\u4ee5\u4ece\u6570\u636e\u96c6\u5217\u8868\u4e2d\u5220\u9664\u3002

                      1. \u5728\u6570\u636e\u96c6\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u5220\u9664 \u3002

                      2. \u5728\u5f39\u7a97\u4e2d\u786e\u8ba4\u8981\u5220\u9664\u7684\u6570\u636e\u96c6\uff0c\u8f93\u5165\u6570\u636e\u96c6\u540d\u79f0\u540e\u70b9\u51fb \u5220\u9664 \u3002

                      3. \u5c4f\u5e55\u63d0\u793a\u5220\u9664\u6210\u529f\uff0c\u8be5\u6570\u636e\u96c6\u4ece\u5217\u8868\u4e2d\u6d88\u5931\u3002

                      Caution

                      \u6570\u636e\u96c6\u4e00\u65e6\u5220\u9664\u5c06\u4e0d\u53ef\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                      "},{"location":"end-user/baize/dataset/environments.html","title":"\u7ba1\u7406\u73af\u5883","text":"

                      \u672c\u6587\u8bf4\u660e\u5982\u4f55\u5728 AI Lab \u4e2d\u7ba1\u7406\u4f60\u7684\u73af\u5883\u4f9d\u8d56\u5e93\uff0c\u4ee5\u4e0b\u662f\u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u548c\u6ce8\u610f\u4e8b\u9879\u3002

                      1. \u73af\u5883\u7ba1\u7406\u6982\u8ff0
                      2. \u521b\u5efa\u65b0\u73af\u5883
                      3. \u914d\u7f6e\u73af\u5883
                      4. \u6545\u969c\u6392\u9664
                      "},{"location":"end-user/baize/dataset/environments.html#_2","title":"\u73af\u5883\u7ba1\u7406\u6982\u8ff0","text":"

                      \u4f20\u7edf\u65b9\u5f0f\uff0c\u4e00\u822c\u4f1a\u5c06 Python \u73af\u5883\u4f9d\u8d56\u5728\u955c\u50cf\u4e2d\u6784\u5efa\uff0c\u955c\u50cf\u5e26\u6709 Python \u7248\u672c\u548c\u4f9d\u8d56\u5305\u7684\u955c\u50cf\uff0c\u7ef4\u62a4\u6210\u672c\u8f83\u9ad8\u4e14\u66f4\u65b0\u4e0d\u65b9\u4fbf\uff0c\u5f80\u5f80\u9700\u8981\u91cd\u65b0\u6784\u5efa\u955c\u50cf\u3002

                      \u800c\u5728 AI Lab \u4e2d\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 \u73af\u5883\u7ba1\u7406 \u6a21\u5757\u6765\u7ba1\u7406\u7eaf\u7cb9\u7684\u73af\u5883\u4f9d\u8d56\uff0c\u5c06\u8fd9\u90e8\u5206\u4ece\u955c\u50cf\u4e2d\u89e3\u8026\uff0c\u5e26\u6765\u7684\u4f18\u52bf\u6709\uff1a

                      • \u4e00\u4efd\u73af\u5883\u591a\u5904\u4f7f\u7528\uff0c\u540c\u65f6\u53ef\u4ee5\u5728 Notebook\u3001\u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u3001\u4e43\u81f3\u63a8\u7406\u670d\u52a1\u4e2d\u4f7f\u7528\u3002
                      • \u66f4\u65b0\u4f9d\u8d56\u5305\u66f4\u52a0\u65b9\u4fbf\uff0c\u53ea\u9700\u8981\u66f4\u65b0\u73af\u5883\u4f9d\u8d56\u5373\u53ef\uff0c\u65e0\u9700\u91cd\u65b0\u6784\u5efa\u955c\u50cf\u3002

                      \u4ee5\u4e0b\u4e3a\u73af\u5883\u7ba1\u7406\u7684\u4e3b\u8981\u7ec4\u6210\u90e8\u5206\uff1a

                      • \u96c6\u7fa4 \uff1a\u9009\u62e9\u9700\u8981\u64cd\u4f5c\u7684\u96c6\u7fa4\u3002
                      • \u547d\u540d\u7a7a\u95f4 \uff1a\u9009\u62e9\u547d\u540d\u7a7a\u95f4\u4ee5\u9650\u5b9a\u64cd\u4f5c\u8303\u56f4\u3002
                      • \u73af\u5883\u5217\u8868 \uff1a\u5c55\u793a\u5f53\u524d\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u73af\u5883\u53ca\u5176\u72b6\u6001\u3002

                      \u5b57\u6bb5 \u63cf\u8ff0 \u4e3e\u4f8b\u503c \u540d\u79f0 \u73af\u5883\u7684\u540d\u79f0 my-environment \u72b6\u6001 \u73af\u5883\u5f53\u524d\u7684\u72b6\u6001\uff08\u6b63\u5e38\u6216\u5931\u8d25\uff09\uff0c\u65b0\u521b\u5efa\u73af\u5883\u6709\u4e00\u4e2a\u9884\u70ed\u8fc7\u7a0b\uff0c\u9884\u70ed\u6210\u529f\u540e\u5373\u53ef\u5728\u5176\u4ed6\u4efb\u52a1\u4e2d\u4f7f\u7528 \u6b63\u5e38 \u521b\u5efa\u65f6\u95f4 \u73af\u5883\u521b\u5efa\u7684\u65f6\u95f4 2023-10-01 10:00:00"},{"location":"end-user/baize/dataset/environments.html#_3","title":"\u521b\u5efa\u65b0\u73af\u5883","text":"

                      \u5728 \u73af\u5883\u7ba1\u7406 \u754c\u9762\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u521b\u5efa\u73af\u5883\u7684\u6d41\u7a0b\u3002

                      \u5b57\u6bb5 \u63cf\u8ff0 \u4e3e\u4f8b\u503c \u540d\u79f0 \u8f93\u5165\u73af\u5883\u7684\u540d\u79f0\uff0c\u957f\u5ea6\u4e3a 2-63 \u4e2a\u5b57\u7b26\uff0c\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u5f00\u5934\u548c\u7ed3\u5c3e\u3002 my-environment \u90e8\u7f72\u4f4d\u7f6e \u96c6\u7fa4 \uff1a\u9009\u62e9\u9700\u8981\u90e8\u7f72\u7684\u96c6\u7fa4 gpu-cluster \u547d\u540d\u7a7a\u95f4 \uff1a\u9009\u62e9\u547d\u540d\u7a7a\u95f4 default \u5907\u6ce8 \u586b\u5199\u5907\u6ce8\u4fe1\u606f\u3002 \u8fd9\u662f\u4e00\u4e2a\u6d4b\u8bd5\u73af\u5883 \u6807\u7b7e \u4e3a\u73af\u5883\u6dfb\u52a0\u6807\u7b7e\u3002 env:test \u6ce8\u89e3 \u4e3a\u73af\u5883\u6dfb\u52a0\u6ce8\u89e3\u3002\u586b\u5199\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u8fdb\u5165\u73af\u5883\u914d\u7f6e\u3002 \u6ce8\u89e3\u793a\u4f8b"},{"location":"end-user/baize/dataset/environments.html#_4","title":"\u914d\u7f6e\u73af\u5883","text":"

                      \u5728\u73af\u5883\u914d\u7f6e\u6b65\u9aa4\u4e2d\uff0c\u7528\u6237\u9700\u8981\u914d\u7f6e Python \u7248\u672c\u548c\u4f9d\u8d56\u5305\u7ba1\u7406\u5de5\u5177\u3002

                      \u5b57\u6bb5 \u63cf\u8ff0 \u4e3e\u4f8b\u503c Python \u7248\u672c \u9009\u62e9\u6240\u9700\u7684 Python \u7248\u672c 3.12.3 \u5305\u7ba1\u7406\u5668 \u9009\u62e9\u5305\u7ba1\u7406\u5de5\u5177\uff0c\u53ef\u9009 PIP \u6216 CONDA PIP Environment Data \u5982\u679c\u9009\u62e9 PIP\uff1a\u5728\u4e0b\u65b9\u7f16\u8f91\u5668\u4e2d\u8f93\u5165 requirements.txt \u683c\u5f0f\u7684\u4f9d\u8d56\u5305\u5217\u8868\u3002 numpy==1.21.0 \u5982\u679c\u9009\u62e9 CONDA\uff1a\u5728\u4e0b\u65b9\u7f16\u8f91\u5668\u4e2d\u8f93\u5165 environment.yaml \u683c\u5f0f\u7684\u4f9d\u8d56\u5305\u5217\u8868\u3002 \u5176\u4ed6\u9009\u9879 pip \u989d\u5916\u7d22\u5f15\u5730\u5740 \uff1a\u914d\u7f6e pip \u989d\u5916\u7684\u7d22\u5f15\u5730\u5740\uff1b\u9002\u7528\u4e8e\u4f01\u4e1a\u5185\u90e8\u6709\u81ea\u5df1\u7684\u79c1\u6709\u4ed3\u5e93\u6216\u8005 PIP \u52a0\u901f\u7ad9\u70b9\u3002 https://pypi.example.com GPU \u914d\u7f6e \uff1a\u542f\u7528\u6216\u7981\u7528 GPU \u914d\u7f6e\uff1b\u90e8\u5206\u6d89\u53ca\u5230 GPU \u7684\u4f9d\u8d56\u5305\u9700\u8981\u5728\u9884\u52a0\u8f7d\u65f6\u914d\u7f6e GPU \u8d44\u6e90\u3002 \u542f\u7528 \u5173\u8054\u5b58\u50a8 \uff1a\u9009\u62e9\u5173\u8054\u7684\u5b58\u50a8\u914d\u7f6e\uff1b\u73af\u5883\u4f9d\u8d56\u5305\u4f1a\u5b58\u50a8\u5728\u5173\u8054\u5b58\u50a8\u4e2d\u3002\u6ce8\u610f\uff1a\u9700\u8981\u4f7f\u7528\u652f\u6301 ReadWriteMany \u7684\u5b58\u50a8\u3002 my-storage-config

                      \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u521b\u5efa \u6309\u94ae\uff0c\u7cfb\u7edf\u4f1a\u81ea\u52a8\u521b\u5efa\u5e76\u914d\u7f6e\u65b0\u7684 Python \u73af\u5883\u3002

                      "},{"location":"end-user/baize/dataset/environments.html#_5","title":"\u6545\u969c\u6392\u9664","text":"
                      • \u5982\u679c\u73af\u5883\u521b\u5efa\u5931\u8d25\uff1a

                        • \u68c0\u67e5\u7f51\u7edc\u8fde\u63a5\u662f\u5426\u6b63\u5e38\u3002
                        • \u786e\u8ba4\u586b\u5199\u7684 Python \u7248\u672c\u548c\u5305\u7ba1\u7406\u5668\u914d\u7f6e\u65e0\u8bef\u3002
                        • \u786e\u4fdd\u6240\u9009\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u53ef\u7528\u3002
                      • \u5982\u679c\u4f9d\u8d56\u9884\u70ed\u5931\u8d25\uff1a

                        • \u68c0\u67e5 requirements.txt \u6216 environment.yaml \u6587\u4ef6\u683c\u5f0f\u662f\u5426\u6b63\u786e\u3002
                        • \u786e\u8ba4\u4f9d\u8d56\u5305\u540d\u79f0\u548c\u7248\u672c\u662f\u5426\u6b63\u786e\u65e0\u8bef\u3002\u5982\u9047\u5230\u5176\u4ed6\u95ee\u9898\uff0c\u8bf7\u8054\u7cfb\u5e73\u53f0\u7ba1\u7406\u5458\u6216\u67e5\u770b\u5e73\u53f0\u5e2e\u52a9\u6587\u6863\u83b7\u53d6\u66f4\u591a\u652f\u6301\u3002

                      \u4ee5\u4e0a\u5373\u4e3a\u5728 AI Lab \u4e2d\u7ba1\u7406 Python \u4f9d\u8d56\u5e93\u7684\u57fa\u672c\u64cd\u4f5c\u6b65\u9aa4\u548c\u6ce8\u610f\u4e8b\u9879\u3002

                      "},{"location":"end-user/baize/inference/models.html","title":"\u4e86\u89e3\u6a21\u578b\u652f\u6301\u60c5\u51b5","text":"

                      \u968f\u7740 AI Lab \u7684\u5feb\u901f\u8fed\u4ee3\uff0c\u6211\u4eec\u5df2\u7ecf\u652f\u6301\u4e86\u591a\u79cd\u6a21\u578b\u7684\u63a8\u7406\u670d\u52a1\uff0c\u60a8\u53ef\u4ee5\u5728\u8fd9\u91cc\u770b\u5230\u6240\u652f\u6301\u7684\u6a21\u578b\u4fe1\u606f\u3002

                      • AI Lab v0.3.0 \u4e0a\u7ebf\u4e86\u6a21\u578b\u63a8\u7406\u670d\u52a1\uff0c\u9488\u5bf9\u4f20\u7edf\u7684\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\uff0c\u65b9\u4fbf\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528AI Lab \u7684\u63a8\u7406\u670d\u52a1\uff0c\u65e0\u9700\u5173\u5fc3\u6a21\u578b\u7684\u90e8\u7f72\u548c\u7ef4\u62a4\u3002
                      • AI Lab v0.6.0 \u652f\u6301\u4e86\u5b8c\u6574\u7248\u672c\u7684 vLLM \u63a8\u7406\u80fd\u529b\uff0c\u652f\u6301\u8bf8\u591a\u5927\u8bed\u8a00\u6a21\u578b\uff0c\u5982 LLama\u3001Qwen\u3001ChatGLM \u7b49\u3002

                      \u60a8\u53ef\u4ee5\u5728 AI Lab \u4e2d\u4f7f\u7528\u7ecf\u8fc7\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9a8c\u8bc1\u8fc7\u7684 GPU \u7c7b\u578b\uff1b \u66f4\u591a\u7ec6\u8282\u53c2\u9605 GPU \u652f\u6301\u77e9\u9635\u3002

                      "},{"location":"end-user/baize/inference/models.html#triton-inference-server","title":"Triton Inference Server","text":"

                      \u901a\u8fc7 Triton Inference Server \u53ef\u4ee5\u5f88\u597d\u7684\u652f\u6301\u4f20\u7edf\u7684\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\uff0c\u6211\u4eec\u76ee\u524d\u652f\u6301\u4e3b\u6d41\u7684\u63a8\u7406\u540e\u7aef\u670d\u52a1\uff1a

                      Backend \u652f\u6301\u6a21\u578b\u683c\u5f0f \u4ecb\u7ecd pytorch TorchScript\u3001PyTorch 2.0 \u683c\u5f0f\u7684\u6a21\u578b triton-inference-server/pytorch_backend tensorflow TensorFlow 2.x triton-inference-server/tensorflow_backend vLLM(Deprecated) \u4e0e vLLM \u4e00\u81f4 \u652f\u6301\u7684\u6a21\u578b\u548c vLLM support Model \u4e00\u81f4

                      Danger

                      \u4f7f\u7528 Triton \u7684 Backend vLLM \u7684\u65b9\u5f0f\u5df2\u88ab\u5f03\u7528\uff0c\u63a8\u8350\u4f7f\u7528\u6700\u65b0\u652f\u6301 vLLM \u6765\u90e8\u7f72\u60a8\u7684\u5927\u8bed\u8a00\u6a21\u578b\u3002

                      "},{"location":"end-user/baize/inference/models.html#vllm","title":"vLLM","text":"

                      \u901a\u8fc7 vLLM \u6211\u4eec\u53ef\u4ee5\u5f88\u5feb\u7684\u4f7f\u7528\u5927\u8bed\u8a00\u6a21\u578b\uff0c\u60a8\u53ef\u4ee5\u5728\u8fd9\u91cc\u770b\u5230\u6211\u4eec\u652f\u6301\u7684\u6a21\u578b\u5217\u8868\uff0c\u8fd9\u901a\u5e38\u548c vLLM Support Models \u4fdd\u6301\u4e00\u81f4\u3002

                      • HuggingFace \u6a21\u578b\uff1a\u6211\u4eec\u652f\u6301\u4e86 HuggingFace \u7684\u5927\u90e8\u5206\u6a21\u578b\uff0c\u60a8\u53ef\u4ee5\u5728 HuggingFace Model Hub \u67e5\u770b\u66f4\u591a\u6a21\u578b\u3002
                      • vLLM \u652f\u6301\u6a21\u578b\u5217\u51fa\u4e86\u652f\u6301\u7684\u5927\u8bed\u8a00\u6a21\u578b\u548c\u89c6\u89c9\u8bed\u8a00\u6a21\u578b\u3002
                      • \u4f7f\u7528 vLLM \u652f\u6301\u6846\u67b6\u7684\u6a21\u578b\u8fdb\u884c\u5fae\u8c03\u540e\u7684\u6a21\u578b\u3002
                      "},{"location":"end-user/baize/inference/models.html#vllm_1","title":"vLLM \u65b0\u7279\u6027","text":"

                      \u76ee\u524d\uff0cAI Lab \u8fd8\u652f\u6301\u5728\u4f7f\u7528 vLLM \u4f5c\u4e3a\u63a8\u7406\u5de5\u5177\u65f6\u7684\u4e00\u4e9b\u65b0\u7279\u6027\uff1a

                      • \u5728\u63a8\u7406\u6a21\u578b\u65f6\uff0c\u542f\u7528 Lora Adapter \u6765\u4f18\u5316\u6a21\u578b\u63a8\u7406\u670d\u52a1
                      • \u63d0\u4f9b\u517c\u5bb9 OpenAI \u7684 OpenAPI \u63a5\u53e3\uff0c\u65b9\u4fbf\u7528\u6237\u5207\u6362\u5230\u672c\u5730\u63a8\u7406\u670d\u52a1\u65f6\uff0c\u53ef\u4ee5\u4f4e\u6210\u672c\u7684\u5feb\u901f\u5207\u6362
                      "},{"location":"end-user/baize/inference/models.html#_2","title":"\u4e0b\u4e00\u6b65","text":"
                      • \u521b\u5efa Triton \u63a8\u7406\u670d\u52a1
                      • \u521b\u5efa vLLM \u63a8\u7406\u670d\u52a1
                      "},{"location":"end-user/baize/inference/triton-inference.html","title":"\u521b\u5efa Triton \u63a8\u7406\u670d\u52a1","text":"

                      AI Lab \u76ee\u524d\u63d0\u4f9b\u4ee5 Triton\u3001vLLM \u4f5c\u4e3a\u63a8\u7406\u6846\u67b6\uff0c\u7528\u6237\u53ea\u9700\u7b80\u5355\u914d\u7f6e\u5373\u53ef\u5feb\u901f\u542f\u52a8\u4e00\u4e2a\u9ad8\u6027\u80fd\u7684\u63a8\u7406\u670d\u52a1\u3002

                      Danger

                      \u4f7f\u7528 Triton \u7684 Backend vLLM \u7684\u65b9\u5f0f\u5df2\u88ab\u5f03\u7528\uff0c\u63a8\u8350\u4f7f\u7528\u6700\u65b0\u652f\u6301 vLLM \u6765\u90e8\u7f72\u60a8\u7684\u5927\u8bed\u8a00\u6a21\u578b\u3002

                      "},{"location":"end-user/baize/inference/triton-inference.html#triton_1","title":"Triton\u4ecb\u7ecd","text":"

                      Triton \u662f\u7531 NVIDIA \u5f00\u53d1\u7684\u4e00\u4e2a\u5f00\u6e90\u63a8\u7406\u670d\u52a1\u5668\uff0c\u65e8\u5728\u7b80\u5316\u673a\u5668\u5b66\u4e60\u6a21\u578b\u7684\u90e8\u7f72\u548c\u63a8\u7406\u670d\u52a1\u3002\u5b83\u652f\u6301\u591a\u79cd\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\uff0c\u5305\u62ec TensorFlow\u3001PyTorch \u7b49\uff0c\u4f7f\u5f97\u7528\u6237\u80fd\u591f\u8f7b\u677e\u7ba1\u7406\u548c\u90e8\u7f72\u4e0d\u540c\u7c7b\u578b\u7684\u6a21\u578b\u3002

                      "},{"location":"end-user/baize/inference/triton-inference.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u51c6\u5907\u6a21\u578b\u6570\u636e\uff1a\u5728\u6570\u636e\u96c6\u7ba1\u7406\u4e2d\u7eb3\u7ba1\u6a21\u578b\u4ee3\u7801\uff0c\u5e76\u4fdd\u8bc1\u6570\u636e\u6210\u529f\u9884\u52a0\u8f7d\uff0c\u4e0b\u9762\u4ee5 mnist \u624b\u5199\u6570\u5b57\u8bc6\u522b\u7684 PyTorch \u6a21\u578b\u4e3a\u4f8b\u3002

                      Note

                      \u5f85\u63a8\u7406\u7684\u6a21\u578b\u5728\u6570\u636e\u96c6\u4e2d\u9700\u8981\u9075\u4ee5\u4e0b\u76ee\u5f55\u683c\u5f0f\uff1a

                        <model-repository-name>\n  \u2514\u2500\u2500 <model-name>\n     \u2514\u2500\u2500 <version>\n        \u2514\u2500\u2500 <model-definition-file>\n

                      \u672c\u4f8b\u4e2d\u7684\u76ee\u5f55\u683c\u5f0f\u4e3a\uff1a

                          model-repo\n    \u2514\u2500\u2500 mnist-cnn\n        \u2514\u2500\u2500 1\n            \u2514\u2500\u2500 model.pt\n
                      "},{"location":"end-user/baize/inference/triton-inference.html#_2","title":"\u521b\u5efa\u63a8\u7406\u670d\u52a1","text":"

                      \u76ee\u524d\u5df2\u7ecf\u652f\u6301\u8868\u5355\u521b\u5efa\uff0c\u53ef\u4ee5\u754c\u9762\u5b57\u6bb5\u63d0\u793a\uff0c\u8fdb\u884c\u670d\u52a1\u521b\u5efa\u3002

                      "},{"location":"end-user/baize/inference/triton-inference.html#_3","title":"\u914d\u7f6e\u6a21\u578b\u8def\u5f84","text":"

                      \u6a21\u578b\u8def\u5f84 model-repo/mnist-cnn/1/model.pt \u9700\u8981\u548c\u6570\u636e\u96c6\u4e2d\u7684\u6a21\u578b\u76ee\u5f55\u683c\u5f0f\u4e00\u81f4\u3002

                      "},{"location":"end-user/baize/inference/triton-inference.html#_4","title":"\u6a21\u578b\u914d\u7f6e","text":""},{"location":"end-user/baize/inference/triton-inference.html#_5","title":"\u914d\u7f6e\u8f93\u5165\u548c\u8f93\u51fa\u53c2\u6570","text":"

                      Note

                      \u8f93\u5165\u548c\u8f93\u51fa\u53c2\u6570\u7684\u7b2c\u4e00\u4e2a\u7ef4\u5ea6\u9ed8\u8ba4\u4e3a batchsize \u7684\u5927\u5c0f\uff0c\u8bbe\u7f6e\u4e3a -1 \u53ef\u4ee5\u6839\u636e\u8f93\u5165\u7684\u63a8\u7406\u6570\u636e\u81ea\u52a8\u8ba1\u7b97 batchsize\u3002\u53c2\u6570\u5176\u4f59\u7ef4\u5ea6\u548c\u6570\u636e\u7c7b\u578b\u9700\u8981\u4e0e\u6a21\u578b\u8f93\u5165\u5339\u914d\u3002

                      "},{"location":"end-user/baize/inference/triton-inference.html#_6","title":"\u914d\u7f6e\u73af\u5883","text":"

                      \u53ef\u4ee5\u5bfc\u5165 \u73af\u5883\u7ba1\u7406 \u4e2d\u521b\u5efa\u7684\u73af\u5883\u4f5c\u4e3a\u63a8\u7406\u65f6\u7684\u8fd0\u884c\u73af\u5883\u3002

                      "},{"location":"end-user/baize/inference/triton-inference.html#_7","title":"\u9ad8\u7ea7\u914d\u7f6e","text":""},{"location":"end-user/baize/inference/triton-inference.html#_8","title":"\u914d\u7f6e\u8ba4\u8bc1\u7b56\u7565","text":"

                      \u652f\u6301 API key \u7684\u8bf7\u6c42\u65b9\u5f0f\u8ba4\u8bc1\uff0c\u7528\u6237\u53ef\u4ee5\u81ea\u5b9a\u4e49\u589e\u52a0\u8ba4\u8bc1\u53c2\u6570\u3002

                      "},{"location":"end-user/baize/inference/triton-inference.html#_9","title":"\u4eb2\u548c\u6027\u8c03\u5ea6","text":"

                      \u652f\u6301 \u6839\u636e GPU \u8d44\u6e90\u7b49\u8282\u70b9\u914d\u7f6e\u5b9e\u73b0\u81ea\u52a8\u5316\u7684\u4eb2\u548c\u6027\u8c03\u5ea6\uff0c\u540c\u65f6\u4e5f\u65b9\u4fbf\u7528\u6237\u81ea\u5b9a\u4e49\u8c03\u5ea6\u7b56\u7565\u3002

                      "},{"location":"end-user/baize/inference/triton-inference.html#_10","title":"\u8bbf\u95ee","text":""},{"location":"end-user/baize/inference/triton-inference.html#api","title":"API \u8bbf\u95ee","text":"
                      • Triton \u63d0\u4f9b\u4e86\u4e00\u4e2a\u57fa\u4e8e REST \u7684 API\uff0c\u5141\u8bb8\u5ba2\u6237\u7aef\u901a\u8fc7 HTTP POST \u8bf7\u6c42\u8fdb\u884c\u6a21\u578b\u63a8\u7406\u3002
                      • \u5ba2\u6237\u7aef\u53ef\u4ee5\u53d1\u9001 JSON \u683c\u5f0f\u7684\u8bf7\u6c42\u4f53\uff0c\u5176\u4e2d\u5305\u542b\u8f93\u5165\u6570\u636e\u548c\u76f8\u5173\u7684\u5143\u6570\u636e\u3002
                      "},{"location":"end-user/baize/inference/triton-inference.html#http","title":"HTTP \u8bbf\u95ee","text":"
                      1. \u53d1\u9001 HTTP POST \u8bf7\u6c42\uff1a\u4f7f\u7528\u5de5\u5177\u5982 curl \u6216 HTTP \u5ba2\u6237\u7aef\u5e93\uff08\u5982 Python \u7684 requests \u5e93\uff09\u5411 Triton Server \u53d1\u9001 POST \u8bf7\u6c42\u3002

                      2. \u8bbe\u7f6e HTTP \u5934\uff1a\u6839\u636e\u7528\u6237\u914d\u7f6e\u9879\u81ea\u52a8\u751f\u6210\u7684\u914d\u7f6e\uff0c\u5305\u542b\u6a21\u578b\u8f93\u5165\u548c\u8f93\u51fa\u7684\u5143\u6570\u636e\u3002

                      3. \u6784\u5efa\u8bf7\u6c42\u4f53\uff1a\u8bf7\u6c42\u4f53\u901a\u5e38\u5305\u542b\u8981\u8fdb\u884c\u63a8\u7406\u7684\u8f93\u5165\u6570\u636e\uff0c\u4ee5\u53ca\u6a21\u578b\u7279\u5b9a\u7684\u5143\u6570\u636e\u3002

                      "},{"location":"end-user/baize/inference/triton-inference.html#curl","title":"\u793a\u4f8b curl \u547d\u4ee4","text":"
                        curl -X POST \"http://<ip>:<port>/v2/models/<inference-name>/infer\" \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\n    \"inputs\": [\n      {\n        \"name\": \"model_input\",            \n        \"shape\": [1, 1, 32, 32],          \n        \"datatype\": \"FP32\",               \n        \"data\": [\n          [0.1234, 0.5678, 0.9101, ... ]  \n        ]\n      }\n    ]\n  }'\n
                      • <ip> \u662f Triton Inference Server \u8fd0\u884c\u7684\u4e3b\u673a\u5730\u5740\u3002
                      • <port> \u662f Triton Inference Server \u8fd0\u884c\u7684\u4e3b\u673a\u7aef\u53e3\u53f7\u3002
                      • <inference-name> \u662f\u6240\u521b\u5efa\u7684\u63a8\u7406\u670d\u52a1\u7684\u540d\u79f0\u3002
                      • \"name\" \u8981\u4e0e\u6a21\u578b\u914d\u7f6e\u4e2d\u7684\u8f93\u5165\u53c2\u6570\u7684 name \u4e00\u81f4\u3002
                      • \"shape\" \u8981\u4e0e\u6a21\u578b\u914d\u7f6e\u4e2d\u7684\u8f93\u5165\u53c2\u6570\u7684 dims \u4e00\u81f4\u3002
                      • \"datatype\" \u8981\u4e0e\u6a21\u578b\u914d\u7f6e\u4e2d\u7684\u8f93\u5165\u53c2\u6570\u7684 Data Type \u4e00\u81f4\u3002
                      • \"data\" \u66ff\u6362\u4e3a\u5b9e\u9645\u7684\u63a8\u7406\u6570\u636e\u3002

                      \u8bf7\u6ce8\u610f\uff0c\u4e0a\u8ff0\u793a\u4f8b\u4ee3\u7801\u9700\u8981\u6839\u636e\u4f60\u7684\u5177\u4f53\u6a21\u578b\u548c\u73af\u5883\u8fdb\u884c\u8c03\u6574\uff0c\u8f93\u5165\u6570\u636e\u7684\u683c\u5f0f\u548c\u5185\u5bb9\u4e5f\u9700\u8981\u7b26\u5408\u6a21\u578b\u7684\u8981\u6c42\u3002

                      "},{"location":"end-user/baize/inference/vllm-inference.html","title":"\u521b\u5efa vLLM \u63a8\u7406\u670d\u52a1","text":"

                      AI Lab \u652f\u6301\u4ee5 vLLM \u4f5c\u4e3a\u63a8\u7406\u670d\u52a1\uff0c\u63d0\u4f9b\u5168\u90e8 vLLM \u7684\u80fd\u529b\uff0c\u540c\u65f6\u63d0\u4f9b\u4e86\u5b8c\u5168\u9002\u914d OpenAI \u63a5\u53e3\u5b9a\u4e49\u3002

                      "},{"location":"end-user/baize/inference/vllm-inference.html#vllm_1","title":"vLLM \u4ecb\u7ecd","text":"

                      vLLM \u662f\u4e00\u4e2a\u5feb\u901f\u4e14\u6613\u4e8e\u4f7f\u7528\u7684\u7528\u4e8e\u63a8\u7406\u548c\u670d\u52a1\u7684\u5e93\uff0cvLLM \u65e8\u5728\u6781\u5927\u5730\u63d0\u5347\u5b9e\u65f6\u573a\u666f\u4e0b\u7684\u8bed\u8a00\u6a21\u578b\u670d\u52a1\u7684\u541e\u5410\u4e0e\u5185\u5b58\u4f7f\u7528\u6548\u7387\u3002vLLM \u5728\u901f\u5ea6\u3001\u7075\u6d3b\u6027\u65b9\u9762\u5177\u6709\u4ee5\u4e0b\u90e8\u5206\u7279\u70b9\uff1a

                      • \u8fde\u7eed\u6279\u5904\u7406\u4f20\u5165\u8bf7\u6c42\uff1b
                      • \u4f7f\u7528 PagedAttention \u9ad8\u6548\u7ba1\u7406\u6ce8\u610f\u529b\u952e\u548c\u503c\u5185\u5b58\uff1b
                      • \u4e0e\u6d41\u884c\u7684 HuggingFace \u578b\u53f7\u65e0\u7f1d\u96c6\u6210\uff1b
                      • \u517c\u5bb9 OpenAI \u7684 API \u670d\u52a1\u5668\u3002
                      "},{"location":"end-user/baize/inference/vllm-inference.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u51c6\u5907\u6a21\u578b\u6570\u636e\uff1a\u5728\u6570\u636e\u96c6\u7ba1\u7406\u4e2d\u7eb3\u7ba1\u6a21\u578b\u4ee3\u7801\uff0c\u5e76\u4fdd\u8bc1\u6570\u636e\u6210\u529f\u9884\u52a0\u8f7d\u3002

                      "},{"location":"end-user/baize/inference/vllm-inference.html#_2","title":"\u521b\u5efa\u63a8\u7406\u670d\u52a1","text":"
                      1. \u9009\u62e9 vLLM \u63a8\u7406\u6846\u67b6\uff0c\u5e76\u5728\u9009\u62e9\u6a21\u578b\u6a21\u5757\u9009\u62e9\u63d0\u524d\u521b\u5efa\u597d\u7684\u6a21\u578b\u6570\u636e\u96c6 hdd-models \u5e76\u586b\u5199\u6570\u636e\u96c6\u4e2d\u6a21\u578b\u6240\u5728\u7684\u8def\u5f84\u4fe1\u606f\u3002

                        \u672c\u6587\u63a8\u7406\u670d\u52a1\u7684\u521b\u5efa\u4f7f\u7528 ChatGLM3 \u6a21\u578b\u3002

                      2. \u914d\u7f6e\u63a8\u7406\u670d\u52a1\u7684\u8d44\u6e90\uff0c\u5e76\u8c03\u6574\u63a8\u7406\u670d\u52a1\u8fd0\u884c\u7684\u53c2\u6570\u3002

                        \u53c2\u6570\u540d \u63cf\u8ff0 GPU \u8d44\u6e90 \u6839\u636e\u6a21\u578b\u89c4\u6a21\u4ee5\u53ca\u96c6\u7fa4\u8d44\u6e90\u53ef\u4ee5\u4e3a\u63a8\u7406\u914d\u7f6e GPU \u8d44\u6e90\u3002 \u5141\u8bb8\u8fdc\u7a0b\u4ee3\u7801 \u63a7\u5236 vLLM \u662f\u5426\u4fe1\u4efb\u5e76\u6267\u884c\u6765\u81ea\u8fdc\u7a0b\u6e90\u7684\u4ee3\u7801 LoRA LoRA \u662f\u4e00\u79cd\u9488\u5bf9\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u7684\u53c2\u6570\u9ad8\u6548\u8c03\u6574\u6280\u672f\u3002\u5b83\u901a\u8fc7\u5c06\u539f\u59cb\u6a21\u578b\u53c2\u6570\u77e9\u9635\u5206\u89e3\u4e3a\u4f4e\u79e9\u77e9\u9635\uff0c\u4ece\u800c\u51cf\u5c11\u53c2\u6570\u6570\u91cf\u548c\u8ba1\u7b97\u590d\u6742\u5ea6\u3002 1. --lora-modules\uff1a\u7528\u6765\u6307\u5b9a\u7279\u5b9a\u6a21\u5757\u6216\u5c42\u8fdb\u884c\u4f4e\u79e9\u8fd1\u4f3c 2. max_loras_rank\uff1a\u7528\u6765\u6307\u5b9a LoRA \u6a21\u578b\u4e2d\u6bcf\u4e2a\u9002\u914d\u5c42\u7684\u6700\u5927\u79e9\uff0c\u5bf9\u4e8e\u7b80\u5355\u7684\u4efb\u52a1\uff0c\u53ef\u4ee5\u9009\u62e9\u8f83\u5c0f\u7684\u79e9\u503c\uff0c\u800c\u5bf9\u4e8e\u590d\u6742\u4efb\u52a1\uff0c\u53ef\u80fd\u9700\u8981\u8f83\u5927\u7684\u79e9\u503c\u6765\u4fdd\u8bc1\u6a21\u578b\u6027\u80fd\u3002 3. max_loras\uff1a\u8868\u793a\u6a21\u578b\u4e2d\u53ef\u4ee5\u5305\u542b\u7684 LoRA \u5c42\u7684\u6700\u5927\u6570\u91cf\uff0c\u6839\u636e\u6a21\u578b\u5927\u5c0f\u3001\u63a8\u7406\u590d\u6742\u5ea6\u7b49\u56e0\u7d20\u81ea\u5b9a 4. max_cpu_loras\uff1a\u7528\u4e8e\u6307\u5b9a\u5728 CPU \u73af\u5883\u4e2d\u53ef\u4ee5\u5904\u7406\u7684 LoRA \u5c42\u7684\u6700\u5927\u6570\u3002 \u5173\u8054\u73af\u5883 \u901a\u8fc7\u9009\u62e9\u73af\u5883\u9884\u5b9a\u4e49\u63a8\u7406\u65f6\u6240\u9700\u7684\u73af\u5883\u4f9d\u8d56\u3002

                        Info

                        \u652f\u6301\u914d\u7f6e LoRA \u53c2\u6570\u7684\u6a21\u578b\u53ef\u53c2\u8003 vLLM \u652f\u6301\u7684\u6a21\u578b\u3002

                      3. \u5728 \u9ad8\u7ea7\u914d\u7f6e \u4e2d\uff0c\u652f\u6301\u6839\u636e GPU \u8d44\u6e90\u7b49\u8282\u70b9\u914d\u7f6e\u5b9e\u73b0\u81ea\u52a8\u5316\u7684\u4eb2\u548c\u6027\u8c03\u5ea6\uff0c\u540c\u65f6\u4e5f\u65b9\u4fbf\u7528\u6237\u81ea\u5b9a\u4e49\u8c03\u5ea6\u7b56\u7565\u3002

                      "},{"location":"end-user/baize/inference/vllm-inference.html#_3","title":"\u9a8c\u8bc1\u63a8\u7406\u670d\u52a1","text":"

                      \u63a8\u7406\u670d\u52a1\u521b\u5efa\u5b8c\u6210\u4e4b\u540e\uff0c\u70b9\u51fb\u63a8\u7406\u670d\u52a1\u540d\u79f0\u8fdb\u5165\u8be6\u60c5\uff0c\u67e5\u770b API \u8c03\u7528\u65b9\u6cd5\u3002\u901a\u8fc7\u4f7f\u7528 Curl\u3001Python\u3001Nodejs \u7b49\u65b9\u5f0f\u9a8c\u8bc1\u6267\u884c\u7ed3\u679c\u3002

                      \u62f7\u8d1d\u8be6\u60c5\u4e2d\u7684 curl \u547d\u4ee4\uff0c\u5e76\u5728\u7ec8\u7aef\u4e2d\u6267\u884c\u547d\u4ee4\u53d1\u9001\u4e00\u6761\u6a21\u578b\u63a8\u7406\u8bf7\u6c42\uff0c\u9884\u671f\u8f93\u51fa\uff1a

                      "},{"location":"end-user/baize/jobs/create.html","title":"\u521b\u5efa\u4efb\u52a1\uff08Job\uff09","text":"

                      \u4efb\u52a1\u7ba1\u7406\u662f\u6307\u901a\u8fc7\u4f5c\u4e1a\u8c03\u5ea6\u548c\u7ba1\u63a7\u7ec4\u4ef6\u6765\u521b\u5efa\u548c\u7ba1\u7406\u4efb\u52a1\u751f\u547d\u5468\u671f\u7684\u529f\u80fd\u3002

                      AI Lab \u91c7\u7528 Kubernetes \u7684 Job \u673a\u5236\u6765\u8c03\u5ea6\u5404\u9879 AI \u63a8\u7406\u3001\u8bad\u7ec3\u4efb\u52a1\u3002

                      "},{"location":"end-user/baize/jobs/create.html#_1","title":"\u901a\u7528\u6b65\u9aa4","text":"
                      1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u4efb\u52a1\u4e2d\u5fc3 -> \u8bad\u7ec3\u4efb\u52a1 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae\u3002

                      2. \u7cfb\u7edf\u4f1a\u9884\u5148\u586b\u5145\u57fa\u7840\u914d\u7f6e\u6570\u636e\uff0c\u5305\u62ec\u8981\u90e8\u7f72\u7684\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u4efb\u52a1\u7c7b\u578b\u3001\u961f\u5217\u3001\u4f18\u5148\u7ea7\u7b49\u3002 \u8c03\u6574\u8fd9\u4e9b\u53c2\u6570\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                      3. \u914d\u7f6e\u955c\u50cf\u5730\u5740\u3001\u8fd0\u884c\u53c2\u6570\u4ee5\u53ca\u5173\u8054\u7684\u6570\u636e\u96c6\u3001\u73af\u5883\u548c\u8d44\u6e90\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                      4. \u6309\u9700\u6dfb\u52a0\u6807\u7b7e\u3001\u6ce8\u89e3\u3001\u73af\u5883\u53d8\u91cf\u7b49\u4efb\u52a1\u53c2\u6570\uff0c\u9009\u62e9\u8c03\u5ea6\u7b56\u7565\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                      5. \u4efb\u52a1\u521b\u5efa\u6210\u529f\u540e\uff0c\u4f1a\u6709\u51e0\u79cd\u8fd0\u884c\u72b6\u6001\uff1a

                        • \u8fd0\u884c\u4e2d
                        • \u6392\u961f\u4e2d
                        • \u63d0\u4ea4\u6210\u529f\u3001\u63d0\u4ea4\u5931\u8d25
                        • \u4efb\u52a1\u6210\u529f\u3001\u4efb\u52a1\u5931\u8d25
                      "},{"location":"end-user/baize/jobs/create.html#_2","title":"\u521b\u5efa\u7279\u5b9a\u4efb\u52a1","text":"
                      • \u521b\u5efa Pytorch \u4efb\u52a1
                      • \u521b\u5efa TensorFlow \u4efb\u52a1
                      • \u521b\u5efa MPI \u4efb\u52a1
                      • \u521b\u5efa MXNet \u4efb\u52a1
                      • \u521b\u5efa PaddlePaddle \u4efb\u52a1
                      "},{"location":"end-user/baize/jobs/delete.html","title":"\u5220\u9664\u4efb\u52a1\uff08Job\uff09","text":"

                      \u5982\u679c\u53d1\u73b0\u4efb\u52a1\u5197\u4f59\u3001\u8fc7\u671f\u6216\u56e0\u5176\u4ed6\u7f18\u6545\u4e0d\u518d\u9700\u8981\uff0c\u53ef\u4ee5\u4ece\u8bad\u7ec3\u4efb\u52a1\u5217\u8868\u4e2d\u5220\u9664\u3002

                      1. \u5728\u8bad\u7ec3\u4efb\u52a1\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u5220\u9664 \u3002

                      2. \u5728\u5f39\u7a97\u4e2d\u786e\u8ba4\u8981\u5220\u9664\u7684\u4efb\u52a1\uff0c\u8f93\u5165\u4efb\u52a1\u540d\u79f0\u540e\u70b9\u51fb \u5220\u9664 \u3002

                      3. \u5c4f\u5e55\u63d0\u793a\u5220\u9664\u6210\u529f\uff0c\u8be5\u4efb\u52a1\u4ece\u5217\u8868\u4e2d\u6d88\u5931\u3002

                      Caution

                      \u4efb\u52a1\u4e00\u65e6\u5220\u9664\u5c06\u4e0d\u53ef\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                      "},{"location":"end-user/baize/jobs/mpi.html","title":"MPI \u4efb\u52a1","text":"

                      MPI\uff08Message Passing Interface\uff09\u662f\u4e00\u79cd\u7528\u4e8e\u5e76\u884c\u8ba1\u7b97\u7684\u901a\u4fe1\u534f\u8bae\uff0c\u5b83\u5141\u8bb8\u591a\u4e2a\u8ba1\u7b97\u8282\u70b9\u4e4b\u95f4\u8fdb\u884c\u6d88\u606f\u4f20\u9012\u548c\u534f\u4f5c\u3002 MPI \u4efb\u52a1\u662f\u4f7f\u7528 MPI \u534f\u8bae\u8fdb\u884c\u5e76\u884c\u8ba1\u7b97\u7684\u4efb\u52a1\uff0c\u9002\u7528\u4e8e\u9700\u8981\u5927\u89c4\u6a21\u5e76\u884c\u5904\u7406\u7684\u5e94\u7528\u573a\u666f\uff0c\u4f8b\u5982\u5206\u5e03\u5f0f\u8bad\u7ec3\u3001\u79d1\u5b66\u8ba1\u7b97\u7b49\u3002

                      \u5728 AI Lab \u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86 MPI \u4efb\u52a1\u7684\u652f\u6301\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c\u5feb\u901f\u521b\u5efa MPI \u4efb\u52a1\uff0c\u8fdb\u884c\u9ad8\u6027\u80fd\u7684\u5e76\u884c\u8ba1\u7b97\u3002 \u672c\u6559\u7a0b\u5c06\u6307\u5bfc\u60a8\u5982\u4f55\u5728 AI Lab \u4e2d\u521b\u5efa\u548c\u8fd0\u884c\u4e00\u4e2a MPI \u4efb\u52a1\u3002

                      "},{"location":"end-user/baize/jobs/mpi.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
                      • \u4efb\u52a1\u7c7b\u578b \uff1aMPI\uff0c\u7528\u4e8e\u8fd0\u884c\u5e76\u884c\u8ba1\u7b97\u4efb\u52a1\u3002
                      • \u8fd0\u884c\u73af\u5883 \uff1a\u9009\u7528\u9884\u88c5\u4e86 MPI \u73af\u5883\u7684\u955c\u50cf\uff0c\u6216\u8005\u5728\u4efb\u52a1\u4e2d\u6307\u5b9a\u5b89\u88c5\u5fc5\u8981\u7684\u4f9d\u8d56\u3002
                      • MPIJob \u914d\u7f6e \uff1a\u7406\u89e3\u5e76\u914d\u7f6e MPIJob \u7684\u5404\u9879\u53c2\u6570\uff0c\u5982\u526f\u672c\u6570\u3001\u8d44\u6e90\u8bf7\u6c42\u7b49\u3002
                      "},{"location":"end-user/baize/jobs/mpi.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

                      \u5728\u8fd9\u91cc\u6211\u4eec\u4f7f\u7528 baize-notebook \u57fa\u7840\u955c\u50cf\u548c \u5173\u8054\u73af\u5883 \u7684\u65b9\u5f0f\u6765\u4f5c\u4e3a\u4efb\u52a1\u7684\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002 \u786e\u4fdd\u8fd0\u884c\u73af\u5883\u4e2d\u5305\u542b MPI \u53ca\u76f8\u5173\u5e93\uff0c\u5982 OpenMPI\u3001mpi4py \u7b49\u3002

                      \u6ce8\u610f \uff1a\u4e86\u89e3\u5982\u4f55\u521b\u5efa\u73af\u5883\uff0c\u8bf7\u53c2\u8003\u73af\u5883\u5217\u8868\u3002

                      "},{"location":"end-user/baize/jobs/mpi.html#mpi_1","title":"\u521b\u5efa MPI \u4efb\u52a1","text":""},{"location":"end-user/baize/jobs/mpi.html#mpi_2","title":"MPI \u4efb\u52a1\u521b\u5efa\u6b65\u9aa4","text":"
                      1. \u767b\u5f55\u5e73\u53f0 \uff1a\u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3\uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
                      2. \u521b\u5efa\u4efb\u52a1 \uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                      3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b \uff1a\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\uff0c\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a MPI\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
                      4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f \uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cbenchmarks-mpi\u201d\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
                      5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570 \uff1a\u6839\u636e\u60a8\u7684\u9700\u6c42\uff0c\u914d\u7f6e\u4efb\u52a1\u7684\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u4fe1\u606f\u3002
                      "},{"location":"end-user/baize/jobs/mpi.html#_3","title":"\u8fd0\u884c\u53c2\u6570","text":"
                      • \u542f\u52a8\u547d\u4ee4 \uff1a\u4f7f\u7528 mpirun\uff0c\u8fd9\u662f\u8fd0\u884c MPI \u7a0b\u5e8f\u7684\u547d\u4ee4\u3002
                      • \u547d\u4ee4\u53c2\u6570 \uff1a\u8f93\u5165\u60a8\u8981\u8fd0\u884c\u7684 MPI \u7a0b\u5e8f\u7684\u53c2\u6570\u3002

                      \u793a\u4f8b\uff1a\u8fd0\u884c TensorFlow Benchmarks

                      \u5728\u672c\u793a\u4f8b\u4e2d\uff0c\u6211\u4eec\u5c06\u8fd0\u884c\u4e00\u4e2a TensorFlow \u7684\u57fa\u51c6\u6d4b\u8bd5\u7a0b\u5e8f\uff0c\u4f7f\u7528 Horovod \u8fdb\u884c\u5206\u5e03\u5f0f\u8bad\u7ec3\u3002 \u9996\u5148\uff0c\u786e\u4fdd\u60a8\u4f7f\u7528\u7684\u955c\u50cf\u4e2d\u5305\u542b\u6240\u9700\u7684\u4f9d\u8d56\u9879\uff0c\u4f8b\u5982 TensorFlow\u3001Horovod\u3001Open MPI \u7b49\u3002

                      \u955c\u50cf\u9009\u62e9 \uff1a\u4f7f\u7528\u5305\u542b TensorFlow \u548c MPI \u7684\u955c\u50cf\uff0c\u4f8b\u5982 mai.daocloud.io/docker.io/mpioperator/tensorflow-benchmarks:latest\u3002

                      \u547d\u4ee4\u53c2\u6570 \uff1a

                      mpirun --allow-run-as-root -np 2 -bind-to none -map-by slot \\\n  -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH -x PATH \\\n  -mca pml ob1 -mca btl ^openib \\\n  python scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py \\\n  --model=resnet101 --batch_size=64 --variable_update=horovod\n

                      \u8bf4\u660e \uff1a

                      • mpirun\uff1aMPI \u7684\u542f\u52a8\u547d\u4ee4\u3002
                      • --allow-run-as-root\uff1a\u5141\u8bb8\u4ee5 root \u7528\u6237\u8fd0\u884c\uff08\u5728\u5bb9\u5668\u4e2d\u901a\u5e38\u662f root \u7528\u6237\uff09\u3002
                      • -np 2\uff1a\u6307\u5b9a\u8fd0\u884c\u7684\u8fdb\u7a0b\u6570\u4e3a 2\u3002
                      • -bind-to none\uff0c-map-by slot\uff1aMPI \u8fdb\u7a0b\u7ed1\u5b9a\u548c\u6620\u5c04\u7684\u914d\u7f6e\u3002
                      • -x NCCL_DEBUG=INFO\uff1a\u8bbe\u7f6e NCCL\uff08NVIDIA Collective Communication Library\uff09\u7684\u8c03\u8bd5\u4fe1\u606f\u7ea7\u522b\u3002
                      • -x LD_LIBRARY_PATH\uff0c-x PATH\uff1a\u5728 MPI \u73af\u5883\u4e2d\u4f20\u9012\u5fc5\u8981\u7684\u73af\u5883\u53d8\u91cf\u3002
                      • -mca pml ob1 -mca btl ^openib\uff1aMPI \u7684\u914d\u7f6e\u53c2\u6570\uff0c\u6307\u5b9a\u4f20\u8f93\u5c42\u548c\u6d88\u606f\u5c42\u534f\u8bae\u3002
                      • python scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py\uff1a\u8fd0\u884c TensorFlow \u57fa\u51c6\u6d4b\u8bd5\u811a\u672c\u3002
                      • --model=resnet101\uff0c--batch_size=64\uff0c--variable_update=horovod\uff1aTensorFlow \u811a\u672c\u7684\u53c2\u6570\uff0c\u6307\u5b9a\u6a21\u578b\u3001\u6279\u91cf\u5927\u5c0f\u548c\u4f7f\u7528 Horovod \u8fdb\u884c\u53c2\u6570\u66f4\u65b0\u3002
                      "},{"location":"end-user/baize/jobs/mpi.html#_4","title":"\u8d44\u6e90\u914d\u7f6e","text":"

                      \u5728\u4efb\u52a1\u914d\u7f6e\u4e2d\uff0c\u9700\u8981\u4e3a\u6bcf\u4e2a\u8282\u70b9\uff08Launcher \u548c Worker\uff09\u5206\u914d\u9002\u5f53\u7684\u8d44\u6e90\uff0c\u4f8b\u5982 CPU\u3001\u5185\u5b58\u548c GPU\u3002

                      \u8d44\u6e90\u793a\u4f8b \uff1a

                      • Launcher\uff08\u542f\u52a8\u5668\uff09 \uff1a

                        • \u526f\u672c\u6570 \uff1a1
                        • \u8d44\u6e90\u8bf7\u6c42 \uff1a
                          • CPU\uff1a2 \u6838
                          • \u5185\u5b58\uff1a4 GiB
                      • Worker\uff08\u5de5\u4f5c\u8282\u70b9\uff09 \uff1a

                        • \u526f\u672c\u6570 \uff1a2
                        • \u8d44\u6e90\u8bf7\u6c42 \uff1a
                          • CPU\uff1a2 \u6838
                          • \u5185\u5b58\uff1a4 GiB
                          • GPU\uff1a\u6839\u636e\u9700\u6c42\u5206\u914d
                      "},{"location":"end-user/baize/jobs/mpi.html#mpijob","title":"\u5b8c\u6574\u7684 MPIJob \u914d\u7f6e\u793a\u4f8b","text":"

                      \u4ee5\u4e0b\u662f\u5b8c\u6574\u7684 MPIJob \u914d\u7f6e\u793a\u4f8b\uff0c\u4f9b\u60a8\u53c2\u8003\u3002

                      apiVersion: kubeflow.org/v1\nkind: MPIJob\nmetadata:\n  name: tensorflow-benchmarks\nspec:\n  slotsPerWorker: 1\n  runPolicy:\n    cleanPodPolicy: Running\n  mpiReplicaSpecs:\n    Launcher:\n      replicas: 1\n      template:\n        spec:\n          containers:\n            - name: tensorflow-benchmarks\n              image: mai.daocloud.io/docker.io/mpioperator/tensorflow-benchmarks:latest\n              command:\n                - mpirun\n                - --allow-run-as-root\n                - -np\n                - \"2\"\n                - -bind-to\n                - none\n                - -map-by\n                - slot\n                - -x\n                - NCCL_DEBUG=INFO\n                - -x\n                - LD_LIBRARY_PATH\n                - -x\n                - PATH\n                - -mca\n                - pml\n                - ob1\n                - -mca\n                - btl\n                - ^openib\n                - python\n                - scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py\n                - --model=resnet101\n                - --batch_size=64\n                - --variable_update=horovod\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n    Worker:\n      replicas: 2\n      template:\n        spec:\n          containers:\n            - name: tensorflow-benchmarks\n              image: mai.daocloud.io/docker.io/mpioperator/tensorflow-benchmarks:latest\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpumem: 1k\n                  nvidia.com/vgpu: \"1\"\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n

                      \u914d\u7f6e\u89e3\u6790 \uff1a

                      • apiVersion \u548c kind\uff1a\u8868\u793a\u8d44\u6e90\u7684 API \u7248\u672c\u548c\u7c7b\u578b\uff0cMPIJob \u662f Kubeflow \u5b9a\u4e49\u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\uff0c\u7528\u4e8e\u521b\u5efa MPI \u7c7b\u578b\u7684\u4efb\u52a1\u3002
                      • metadata\uff1a\u5143\u6570\u636e\uff0c\u5305\u542b\u4efb\u52a1\u7684\u540d\u79f0\u7b49\u4fe1\u606f\u3002
                      • spec\uff1a\u4efb\u52a1\u7684\u8be6\u7ec6\u914d\u7f6e\u3002
                        • slotsPerWorker\uff1a\u6bcf\u4e2a Worker \u8282\u70b9\u7684\u69fd\u4f4d\u6570\u91cf\uff0c\u901a\u5e38\u8bbe\u7f6e\u4e3a 1\u3002
                        • runPolicy\uff1a\u8fd0\u884c\u7b56\u7565\uff0c\u4f8b\u5982\u4efb\u52a1\u5b8c\u6210\u540e\u662f\u5426\u6e05\u7406 Pod\u3002
                        • mpiReplicaSpecs\uff1aMPI \u4efb\u52a1\u7684\u526f\u672c\u914d\u7f6e\u3002
                          • Launcher\uff1a\u542f\u52a8\u5668\uff0c\u8d1f\u8d23\u542f\u52a8 MPI \u4efb\u52a1\u3002
                            • replicas\uff1a\u526f\u672c\u6570\uff0c\u901a\u5e38\u4e3a 1\u3002
                            • template\uff1aPod \u6a21\u677f\uff0c\u5b9a\u4e49\u5bb9\u5668\u8fd0\u884c\u7684\u955c\u50cf\u3001\u547d\u4ee4\u3001\u8d44\u6e90\u7b49\u3002
                          • Worker\uff1a\u5de5\u4f5c\u8282\u70b9\uff0c\u5b9e\u9645\u6267\u884c\u4efb\u52a1\u7684\u8ba1\u7b97\u8282\u70b9\u3002
                            • replicas\uff1a\u526f\u672c\u6570\uff0c\u6839\u636e\u5e76\u884c\u9700\u6c42\u8bbe\u7f6e\uff0c\u8fd9\u91cc\u8bbe\u7f6e\u4e3a 2\u3002
                            • template\uff1aPod \u6a21\u677f\uff0c\u540c\u6837\u5b9a\u4e49\u5bb9\u5668\u7684\u8fd0\u884c\u73af\u5883\u548c\u8d44\u6e90\u3002
                      "},{"location":"end-user/baize/jobs/mpi.html#_5","title":"\u8bbe\u7f6e\u4efb\u52a1\u526f\u672c\u6570","text":"

                      \u5728\u521b\u5efa MPI \u4efb\u52a1\u65f6\uff0c\u9700\u8981\u6839\u636e mpiReplicaSpecs \u4e2d\u914d\u7f6e\u7684\u526f\u672c\u6570\uff0c\u6b63\u786e\u8bbe\u7f6e \u4efb\u52a1\u526f\u672c\u6570\u3002

                      • \u603b\u526f\u672c\u6570 = Launcher \u526f\u672c\u6570 + Worker \u526f\u672c\u6570
                      • \u672c\u793a\u4f8b\u4e2d\uff1a

                        • Launcher \u526f\u672c\u6570\uff1a1
                        • Worker \u526f\u672c\u6570\uff1a2
                        • \u603b\u526f\u672c\u6570 \uff1a1 + 2 = 3

                      \u56e0\u6b64\uff0c\u5728\u4efb\u52a1\u914d\u7f6e\u4e2d\uff0c\u60a8\u9700\u8981\u5c06 \u4efb\u52a1\u526f\u672c\u6570 \u8bbe\u7f6e\u4e3a 3\u3002

                      "},{"location":"end-user/baize/jobs/mpi.html#_6","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

                      \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c MPI \u4efb\u52a1\u3002

                      "},{"location":"end-user/baize/jobs/mpi.html#_7","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

                      \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\u540e\uff0c\u60a8\u53ef\u4ee5\u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\u548c\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u3002 \u4ece\u53f3\u4e0a\u89d2\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\uff0c\u53ef\u4ee5\u67e5\u770b\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u6bcf\u4e2a\u8282\u70b9\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                      \u793a\u4f8b\u8f93\u51fa\uff1a

                      TensorFlow:  1.13\nModel:       resnet101\nMode:        training\nBatch size:  64\n...\n\nTotal images/sec: 125.67\n

                      \u8fd9\u8868\u793a MPI \u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0cTensorFlow \u57fa\u51c6\u6d4b\u8bd5\u7a0b\u5e8f\u5b8c\u6210\u4e86\u5206\u5e03\u5f0f\u8bad\u7ec3\u3002

                      "},{"location":"end-user/baize/jobs/mpi.html#_8","title":"\u5c0f\u7ed3","text":"

                      \u901a\u8fc7\u672c\u6559\u7a0b\uff0c\u60a8\u5b66\u4e60\u4e86\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c\u4e00\u4e2a MPI \u4efb\u52a1\u3002\u6211\u4eec\u8be6\u7ec6\u4ecb\u7ecd\u4e86 MPIJob \u7684\u914d\u7f6e\u65b9\u5f0f\uff0c \u4ee5\u53ca\u5982\u4f55\u5728\u4efb\u52a1\u4e2d\u6307\u5b9a\u8fd0\u884c\u7684\u547d\u4ee4\u548c\u8d44\u6e90\u9700\u6c42\u3002\u5e0c\u671b\u672c\u6559\u7a0b\u5bf9\u60a8\u6709\u6240\u5e2e\u52a9\uff0c\u5982\u6709\u4efb\u4f55\u95ee\u9898\uff0c\u8bf7\u53c2\u8003\u5e73\u53f0\u63d0\u4f9b\u7684\u5176\u4ed6\u6587\u6863\u6216\u8054\u7cfb\u6280\u672f\u652f\u6301\u3002

                      \u9644\u5f55 \uff1a

                      • \u5982\u679c\u60a8\u7684\u8fd0\u884c\u73af\u5883\u672a\u9884\u88c5\u6240\u9700\u7684\u5e93\uff08\u5982 mpi4py\u3001Horovod \u7b49\uff09\uff0c\u8bf7\u5728\u4efb\u52a1\u4e2d\u6dfb\u52a0\u5b89\u88c5\u547d\u4ee4\uff0c\u6216\u8005\u4f7f\u7528\u9884\u88c5\u4e86\u76f8\u5173\u4f9d\u8d56\u7684\u955c\u50cf\u3002
                      • \u5728\u5b9e\u9645\u5e94\u7528\u4e2d\uff0c\u60a8\u53ef\u4ee5\u6839\u636e\u9700\u6c42\u4fee\u6539 MPIJob \u7684\u914d\u7f6e\uff0c\u4f8b\u5982\u66f4\u6539\u955c\u50cf\u3001\u547d\u4ee4\u53c2\u6570\u3001\u8d44\u6e90\u8bf7\u6c42\u7b49\u3002
                      "},{"location":"end-user/baize/jobs/mxnet.html","title":"MXNet \u4efb\u52a1","text":"

                      Warning

                      \u7531\u4e8e Apache MXNet \u9879\u76ee\u5df2\u5b58\u6863\uff0c\u56e0\u6b64 Kubeflow MXJob \u5c06\u5728\u672a\u6765\u7684 Training Operator 1.9 \u7248\u672c\u4e2d\u5f03\u7528\u548c\u5220\u9664\u3002

                      Apache MXNet \u662f\u4e00\u4e2a\u9ad8\u6027\u80fd\u7684\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\uff0c\u652f\u6301\u591a\u79cd\u7f16\u7a0b\u8bed\u8a00\u3002MXNet \u4efb\u52a1\u53ef\u4ee5\u4f7f\u7528\u591a\u79cd\u65b9\u5f0f\u8fdb\u884c\u8bad\u7ec3\uff0c\u5305\u62ec\u5355\u673a\u6a21\u5f0f\u548c\u5206\u5e03\u5f0f\u6a21\u5f0f\u3002\u5728 AI Lab \u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86\u5bf9 MXNet \u4efb\u52a1\u7684\u652f\u6301\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c\u5feb\u901f\u521b\u5efa MXNet \u4efb\u52a1\uff0c\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\u3002

                      \u672c\u6559\u7a0b\u5c06\u6307\u5bfc\u60a8\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c MXNet \u7684\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4efb\u52a1\u3002

                      "},{"location":"end-user/baize/jobs/mxnet.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
                      • \u4efb\u52a1\u7c7b\u578b\uff1aMXNet\uff0c\u652f\u6301\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4e24\u79cd\u6a21\u5f0f\u3002
                      • \u8fd0\u884c\u73af\u5883\uff1a\u9009\u62e9\u5305\u542b MXNet \u6846\u67b6\u7684\u955c\u50cf\uff0c\u6216\u5728\u4efb\u52a1\u4e2d\u5b89\u88c5\u5fc5\u8981\u7684\u4f9d\u8d56\u3002
                      "},{"location":"end-user/baize/jobs/mxnet.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

                      \u6211\u4eec\u4f7f\u7528 release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest \u955c\u50cf\u4f5c\u4e3a\u4efb\u52a1\u7684\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002\u8be5\u955c\u50cf\u9884\u88c5\u4e86 MXNet \u53ca\u5176\u76f8\u5173\u4f9d\u8d56\uff0c\u652f\u6301 GPU \u52a0\u901f\u3002

                      \u6ce8\u610f\uff1a\u4e86\u89e3\u5982\u4f55\u521b\u5efa\u548c\u7ba1\u7406\u73af\u5883\uff0c\u8bf7\u53c2\u8003 \u73af\u5883\u5217\u8868\u3002

                      "},{"location":"end-user/baize/jobs/mxnet.html#mxnet_1","title":"\u521b\u5efa MXNet \u4efb\u52a1","text":""},{"location":"end-user/baize/jobs/mxnet.html#mxnet_2","title":"MXNet \u5355\u673a\u4efb\u52a1","text":""},{"location":"end-user/baize/jobs/mxnet.html#_3","title":"\u521b\u5efa\u6b65\u9aa4","text":"
                      1. \u767b\u5f55\u5e73\u53f0\uff1a\u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3\uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
                      2. \u521b\u5efa\u4efb\u52a1\uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                      3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\uff1a\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\uff0c\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a MXNet\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
                      4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f\uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cMXNet \u5355\u673a\u8bad\u7ec3\u4efb\u52a1\u201d\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a\u3002
                      5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570\uff1a\u6839\u636e\u60a8\u7684\u9700\u6c42\uff0c\u914d\u7f6e\u4efb\u52a1\u7684\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u4fe1\u606f\u3002
                      "},{"location":"end-user/baize/jobs/mxnet.html#_4","title":"\u8fd0\u884c\u53c2\u6570","text":"
                      • \u542f\u52a8\u547d\u4ee4\uff1apython3
                      • \u547d\u4ee4\u53c2\u6570\uff1a

                        /mxnet/mxnet/example/gluon/mnist/mnist.py --epochs 10 --cuda\n

                        \u8bf4\u660e\uff1a

                        • /mxnet/mxnet/example/gluon/mnist/mnist.py\uff1aMXNet \u63d0\u4f9b\u7684 MNIST \u624b\u5199\u6570\u5b57\u8bc6\u522b\u793a\u4f8b\u811a\u672c\u3002
                        • --epochs 10\uff1a\u8bbe\u7f6e\u8bad\u7ec3\u8f6e\u6570\u4e3a 10\u3002
                        • --cuda\uff1a\u4f7f\u7528 CUDA \u8fdb\u884c GPU \u52a0\u901f\u3002
                      "},{"location":"end-user/baize/jobs/mxnet.html#_5","title":"\u8d44\u6e90\u914d\u7f6e","text":"
                      • \u526f\u672c\u6570\uff1a1\uff08\u5355\u673a\u4efb\u52a1\uff09
                      • \u8d44\u6e90\u8bf7\u6c42\uff1a
                        • CPU\uff1a2 \u6838
                        • \u5185\u5b58\uff1a4 GiB
                        • GPU\uff1a1 \u5757
                      "},{"location":"end-user/baize/jobs/mxnet.html#mxjob","title":"\u5b8c\u6574\u7684 MXJob \u914d\u7f6e\u793a\u4f8b","text":"

                      \u4ee5\u4e0b\u662f\u5355\u673a MXJob \u7684 YAML \u914d\u7f6e\uff1a

                      apiVersion: \"kubeflow.org/v1\"\nkind: \"MXJob\"\nmetadata:\n  name: \"mxnet-single-job\"\nspec:\n  jobMode: MXTrain\n  mxReplicaSpecs:\n    Worker:\n      replicas: 1\n      restartPolicy: Never\n      template:\n        spec:\n          containers:\n            - name: mxnet\n              image: release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest\n              command: [\"python3\"]\n              args:\n                [\n                  \"/mxnet/mxnet/example/gluon/mnist/mnist.py\",\n                  \"--epochs\",\n                  \"10\",\n                  \"--cuda\",\n                ]\n              ports:\n                - containerPort: 9991\n                  name: mxjob-port\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n

                      \u914d\u7f6e\u89e3\u6790\uff1a

                      • apiVersion \u548c kind\uff1a\u6307\u5b9a\u8d44\u6e90\u7684 API \u7248\u672c\u548c\u7c7b\u578b\uff0c\u8fd9\u91cc\u662f MXJob\u3002
                      • metadata\uff1a\u5143\u6570\u636e\uff0c\u5305\u62ec\u4efb\u52a1\u540d\u79f0\u7b49\u4fe1\u606f\u3002
                      • spec\uff1a\u4efb\u52a1\u7684\u8be6\u7ec6\u914d\u7f6e\u3002
                        • jobMode\uff1a\u8bbe\u7f6e\u4e3a MXTrain\uff0c\u8868\u793a\u8bad\u7ec3\u4efb\u52a1\u3002
                        • mxReplicaSpecs\uff1aMXNet \u4efb\u52a1\u7684\u526f\u672c\u914d\u7f6e\u3002
                          • Worker\uff1a\u6307\u5b9a\u5de5\u4f5c\u8282\u70b9\u7684\u914d\u7f6e\u3002
                            • replicas\uff1a\u526f\u672c\u6570\uff0c\u8fd9\u91cc\u4e3a 1\u3002
                            • restartPolicy\uff1a\u91cd\u542f\u7b56\u7565\uff0c\u8bbe\u4e3a Never\uff0c\u8868\u793a\u4efb\u52a1\u5931\u8d25\u65f6\u4e0d\u91cd\u542f\u3002
                            • template\uff1aPod \u6a21\u677f\uff0c\u5b9a\u4e49\u5bb9\u5668\u7684\u8fd0\u884c\u73af\u5883\u548c\u8d44\u6e90\u3002
                              • containers\uff1a\u5bb9\u5668\u5217\u8868\u3002
                                • name\uff1a\u5bb9\u5668\u540d\u79f0\u3002
                                • image\uff1a\u4f7f\u7528\u7684\u955c\u50cf\u3002
                                • command \u548c args\uff1a\u542f\u52a8\u547d\u4ee4\u548c\u53c2\u6570\u3002
                                • ports\uff1a\u5bb9\u5668\u7aef\u53e3\u914d\u7f6e\u3002
                                • resources\uff1a\u8d44\u6e90\u8bf7\u6c42\u548c\u9650\u5236\u3002
                      "},{"location":"end-user/baize/jobs/mxnet.html#_6","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

                      \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c MXNet \u5355\u673a\u4efb\u52a1\u3002

                      "},{"location":"end-user/baize/jobs/mxnet.html#_7","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

                      \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\u540e\uff0c\u60a8\u53ef\u4ee5\u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\u548c\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u3002\u4ece\u53f3\u4e0a\u89d2\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\uff0c\u53ef\u4ee5\u67e5\u770b\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                      \u793a\u4f8b\u8f93\u51fa\uff1a

                      Epoch 1: accuracy=0.95\nEpoch 2: accuracy=0.97\n...\nEpoch 10: accuracy=0.98\nTraining completed.\n

                      \u8fd9\u8868\u793a MXNet \u5355\u673a\u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0c\u6a21\u578b\u8bad\u7ec3\u5b8c\u6210\u3002

                      "},{"location":"end-user/baize/jobs/mxnet.html#mxnet_3","title":"MXNet \u5206\u5e03\u5f0f\u4efb\u52a1","text":"

                      \u5728\u5206\u5e03\u5f0f\u6a21\u5f0f\u4e0b\uff0cMXNet \u4efb\u52a1\u53ef\u4ee5\u4f7f\u7528\u591a\u53f0\u8ba1\u7b97\u8282\u70b9\u5171\u540c\u5b8c\u6210\u8bad\u7ec3\uff0c\u63d0\u9ad8\u8bad\u7ec3\u6548\u7387\u3002

                      "},{"location":"end-user/baize/jobs/mxnet.html#_8","title":"\u521b\u5efa\u6b65\u9aa4","text":"
                      1. \u767b\u5f55\u5e73\u53f0\uff1a\u540c\u4e0a\u3002
                      2. \u521b\u5efa\u4efb\u52a1\uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                      3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\uff1a\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a MXNet\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
                      4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f\uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cMXNet \u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u201d\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a\u3002
                      5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570\uff1a\u6839\u636e\u9700\u6c42\uff0c\u914d\u7f6e\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u3002
                      "},{"location":"end-user/baize/jobs/mxnet.html#_9","title":"\u8fd0\u884c\u53c2\u6570","text":"
                      • \u542f\u52a8\u547d\u4ee4\uff1apython3
                      • \u547d\u4ee4\u53c2\u6570\uff1a

                        /mxnet/mxnet/example/image-classification/train_mnist.py --num-epochs 10 --num-layers 2 --kv-store dist_device_sync --gpus 0\n

                        \u8bf4\u660e\uff1a

                        • /mxnet/mxnet/example/image-classification/train_mnist.py\uff1aMXNet \u63d0\u4f9b\u7684\u56fe\u50cf\u5206\u7c7b\u793a\u4f8b\u811a\u672c\u3002
                        • --num-epochs 10\uff1a\u8bad\u7ec3\u8f6e\u6570\u4e3a 10\u3002
                        • --num-layers 2\uff1a\u6a21\u578b\u7684\u5c42\u6570\u4e3a 2\u3002
                        • --kv-store dist_device_sync\uff1a\u4f7f\u7528\u5206\u5e03\u5f0f\u8bbe\u5907\u540c\u6b65\u6a21\u5f0f\u3002
                        • --gpus 0\uff1a\u4f7f\u7528 GPU \u8fdb\u884c\u52a0\u901f\u3002
                      "},{"location":"end-user/baize/jobs/mxnet.html#_10","title":"\u8d44\u6e90\u914d\u7f6e","text":"
                      • \u4efb\u52a1\u526f\u672c\u6570\uff1a3\uff08\u5305\u62ec Scheduler\u3001Server \u548c Worker\uff09
                      • \u5404\u89d2\u8272\u8d44\u6e90\u8bf7\u6c42\uff1a
                        • Scheduler\uff08\u8c03\u5ea6\u5668\uff09\uff1a
                          • \u526f\u672c\u6570\uff1a1
                          • \u8d44\u6e90\u8bf7\u6c42\uff1a
                            • CPU\uff1a2 \u6838
                            • \u5185\u5b58\uff1a4 GiB
                            • GPU\uff1a1 \u5757
                        • Server\uff08\u53c2\u6570\u670d\u52a1\u5668\uff09\uff1a
                          • \u526f\u672c\u6570\uff1a1
                          • \u8d44\u6e90\u8bf7\u6c42\uff1a
                            • CPU\uff1a2 \u6838
                            • \u5185\u5b58\uff1a4 GiB
                            • GPU\uff1a1 \u5757
                        • Worker\uff08\u5de5\u4f5c\u8282\u70b9\uff09\uff1a
                          • \u526f\u672c\u6570\uff1a1
                          • \u8d44\u6e90\u8bf7\u6c42\uff1a
                            • CPU\uff1a2 \u6838
                            • \u5185\u5b58\uff1a4 GiB
                            • GPU\uff1a1 \u5757
                      "},{"location":"end-user/baize/jobs/mxnet.html#mxjob_1","title":"\u5b8c\u6574\u7684 MXJob \u914d\u7f6e\u793a\u4f8b","text":"

                      \u4ee5\u4e0b\u662f\u5206\u5e03\u5f0f MXJob \u7684 YAML \u914d\u7f6e\uff1a

                      apiVersion: \"kubeflow.org/v1\"\nkind: \"MXJob\"\nmetadata:\n  name: \"mxnet-job\"\nspec:\n  jobMode: MXTrain\n  mxReplicaSpecs:\n    Scheduler:\n      replicas: 1\n      restartPolicy: Never\n      template:\n        spec:\n          containers:\n            - name: mxnet\n              image: release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest\n              ports:\n                - containerPort: 9991\n                  name: mxjob-port\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n    Server:\n      replicas: 1\n      restartPolicy: Never\n      template:\n        spec:\n          containers:\n            - name: mxnet\n              image: release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest\n              ports:\n                - containerPort: 9991\n                  name: mxjob-port\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n    Worker:\n      replicas: 1\n      restartPolicy: Never\n      template:\n        spec:\n          containers:\n            - name: mxnet\n              image: release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest\n              command: [\"python3\"]\n              args:\n                [\n                  \"/mxnet/mxnet/example/image-classification/train_mnist.py\",\n                  \"--num-epochs\",\n                  \"10\",\n                  \"--num-layers\",\n                  \"2\",\n                  \"--kv-store\",\n                  \"dist_device_sync\",\n                  \"--gpus\",\n                  \"0\",\n                ]\n              ports:\n                - containerPort: 9991\n                  name: mxjob-port\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n

                      \u914d\u7f6e\u89e3\u6790\uff1a

                      • Scheduler\uff08\u8c03\u5ea6\u5668\uff09\uff1a\u8d1f\u8d23\u534f\u8c03\u96c6\u7fa4\u4e2d\u5404\u8282\u70b9\u7684\u4efb\u52a1\u8c03\u5ea6\u3002
                      • Server\uff08\u53c2\u6570\u670d\u52a1\u5668\uff09\uff1a\u7528\u4e8e\u5b58\u50a8\u548c\u66f4\u65b0\u6a21\u578b\u53c2\u6570\uff0c\u5b9e\u73b0\u5206\u5e03\u5f0f\u53c2\u6570\u540c\u6b65\u3002
                      • Worker\uff08\u5de5\u4f5c\u8282\u70b9\uff09\uff1a\u5b9e\u9645\u6267\u884c\u8bad\u7ec3\u4efb\u52a1\u3002
                      • \u8d44\u6e90\u914d\u7f6e\uff1a\u4e3a\u5404\u89d2\u8272\u5206\u914d\u9002\u5f53\u7684\u8d44\u6e90\uff0c\u786e\u4fdd\u4efb\u52a1\u987a\u5229\u8fd0\u884c\u3002
                      "},{"location":"end-user/baize/jobs/mxnet.html#_11","title":"\u8bbe\u7f6e\u4efb\u52a1\u526f\u672c\u6570","text":"

                      \u5728\u521b\u5efa MXNet \u5206\u5e03\u5f0f\u4efb\u52a1\u65f6\uff0c\u9700\u8981\u6839\u636e mxReplicaSpecs \u4e2d\u914d\u7f6e\u7684\u526f\u672c\u6570\uff0c\u6b63\u786e\u8bbe\u7f6e \u4efb\u52a1\u526f\u672c\u6570\u3002

                      • \u603b\u526f\u672c\u6570 = Scheduler \u526f\u672c\u6570 + Server \u526f\u672c\u6570 + Worker \u526f\u672c\u6570
                      • \u672c\u793a\u4f8b\u4e2d\uff1a
                        • Scheduler \u526f\u672c\u6570\uff1a1
                        • Server \u526f\u672c\u6570\uff1a1
                        • Worker \u526f\u672c\u6570\uff1a1
                        • \u603b\u526f\u672c\u6570\uff1a1 + 1 + 1 = 3

                      \u56e0\u6b64\uff0c\u5728\u4efb\u52a1\u914d\u7f6e\u4e2d\uff0c\u9700\u8981\u5c06 \u4efb\u52a1\u526f\u672c\u6570 \u8bbe\u7f6e\u4e3a 3\u3002

                      "},{"location":"end-user/baize/jobs/mxnet.html#_12","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

                      \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c MXNet \u5206\u5e03\u5f0f\u4efb\u52a1\u3002

                      "},{"location":"end-user/baize/jobs/mxnet.html#_13","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

                      \u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u548c\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u3002\u60a8\u53ef\u4ee5\u67e5\u770b\u6bcf\u4e2a\u89d2\u8272\uff08Scheduler\u3001Server\u3001Worker\uff09\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                      \u793a\u4f8b\u8f93\u51fa\uff1a

                      INFO:root:Epoch[0] Batch [50]     Speed: 1000 samples/sec   accuracy=0.85\nINFO:root:Epoch[0] Batch [100]    Speed: 1200 samples/sec   accuracy=0.87\n...\nINFO:root:Epoch[9] Batch [100]    Speed: 1300 samples/sec   accuracy=0.98\nTraining completed.\n

                      \u8fd9\u8868\u793a MXNet \u5206\u5e03\u5f0f\u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0c\u6a21\u578b\u8bad\u7ec3\u5b8c\u6210\u3002

                      "},{"location":"end-user/baize/jobs/mxnet.html#_14","title":"\u5c0f\u7ed3","text":"

                      \u901a\u8fc7\u672c\u6559\u7a0b\uff0c\u60a8\u5b66\u4e60\u4e86\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c MXNet \u7684\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4efb\u52a1\u3002\u6211\u4eec\u8be6\u7ec6\u4ecb\u7ecd\u4e86 MXJob \u7684\u914d\u7f6e\u65b9\u5f0f\uff0c\u4ee5\u53ca\u5982\u4f55\u5728\u4efb\u52a1\u4e2d\u6307\u5b9a\u8fd0\u884c\u7684\u547d\u4ee4\u548c\u8d44\u6e90\u9700\u6c42\u3002\u5e0c\u671b\u672c\u6559\u7a0b\u5bf9\u60a8\u6709\u6240\u5e2e\u52a9\uff0c\u5982\u6709\u4efb\u4f55\u95ee\u9898\uff0c\u8bf7\u53c2\u8003\u5e73\u53f0\u63d0\u4f9b\u7684\u5176\u4ed6\u6587\u6863\u6216\u8054\u7cfb\u6280\u672f\u652f\u6301\u3002

                      "},{"location":"end-user/baize/jobs/mxnet.html#_15","title":"\u9644\u5f55","text":"
                      • \u6ce8\u610f\u4e8b\u9879\uff1a

                        • \u786e\u4fdd\u60a8\u4f7f\u7528\u7684\u955c\u50cf\u5305\u542b\u6240\u9700\u7684 MXNet \u7248\u672c\u548c\u4f9d\u8d56\u3002
                        • \u6839\u636e\u5b9e\u9645\u9700\u6c42\u8c03\u6574\u8d44\u6e90\u914d\u7f6e\uff0c\u907f\u514d\u8d44\u6e90\u4e0d\u8db3\u6216\u6d6a\u8d39\u3002
                        • \u5982\u9700\u4f7f\u7528\u81ea\u5b9a\u4e49\u7684\u8bad\u7ec3\u811a\u672c\uff0c\u8bf7\u4fee\u6539\u542f\u52a8\u547d\u4ee4\u548c\u53c2\u6570\u3002
                      • \u53c2\u8003\u6587\u6863\uff1a

                        • MXNet \u5b98\u65b9\u6587\u6863
                        • Kubeflow MXJob \u6307\u5357
                      "},{"location":"end-user/baize/jobs/paddle.html","title":"PaddlePaddle \u4efb\u52a1","text":"

                      PaddlePaddle\uff08\u98de\u6868\uff09\u662f\u767e\u5ea6\u5f00\u6e90\u7684\u6df1\u5ea6\u5b66\u4e60\u5e73\u53f0\uff0c\u652f\u6301\u4e30\u5bcc\u7684\u795e\u7ecf\u7f51\u7edc\u6a21\u578b\u548c\u5206\u5e03\u5f0f\u8bad\u7ec3\u65b9\u5f0f\u3002PaddlePaddle \u4efb\u52a1\u53ef\u4ee5\u901a\u8fc7\u5355\u673a\u6216\u5206\u5e03\u5f0f\u6a21\u5f0f\u8fdb\u884c\u8bad\u7ec3\u3002\u5728 AI Lab \u5e73\u53f0\u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86\u5bf9 PaddlePaddle \u4efb\u52a1\u7684\u652f\u6301\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c\u5feb\u901f\u521b\u5efa PaddlePaddle \u4efb\u52a1\uff0c\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\u3002

                      \u672c\u6559\u7a0b\u5c06\u6307\u5bfc\u60a8\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c PaddlePaddle \u7684\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4efb\u52a1\u3002

                      "},{"location":"end-user/baize/jobs/paddle.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
                      • \u4efb\u52a1\u7c7b\u578b\uff1aPaddlePaddle\uff0c\u652f\u6301\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4e24\u79cd\u6a21\u5f0f\u3002
                      • \u8fd0\u884c\u73af\u5883\uff1a\u9009\u62e9\u5305\u542b PaddlePaddle \u6846\u67b6\u7684\u955c\u50cf\uff0c\u6216\u5728\u4efb\u52a1\u4e2d\u5b89\u88c5\u5fc5\u8981\u7684\u4f9d\u8d56\u3002
                      "},{"location":"end-user/baize/jobs/paddle.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

                      \u6211\u4eec\u4f7f\u7528 registry.baidubce.com/paddlepaddle/paddle:2.4.0rc0-cpu \u955c\u50cf\u4f5c\u4e3a\u4efb\u52a1\u7684\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002\u8be5\u955c\u50cf\u9884\u88c5\u4e86 PaddlePaddle \u6846\u67b6\uff0c\u9002\u7528\u4e8e CPU \u8ba1\u7b97\u3002\u5982\u679c\u9700\u8981\u4f7f\u7528 GPU\uff0c\u8bf7\u9009\u62e9\u5bf9\u5e94\u7684 GPU \u7248\u672c\u955c\u50cf\u3002

                      \u6ce8\u610f\uff1a\u4e86\u89e3\u5982\u4f55\u521b\u5efa\u548c\u7ba1\u7406\u73af\u5883\uff0c\u8bf7\u53c2\u8003 \u73af\u5883\u5217\u8868\u3002

                      "},{"location":"end-user/baize/jobs/paddle.html#paddlepaddle_1","title":"\u521b\u5efa PaddlePaddle \u4efb\u52a1","text":""},{"location":"end-user/baize/jobs/paddle.html#paddlepaddle_2","title":"PaddlePaddle \u5355\u673a\u8bad\u7ec3\u4efb\u52a1","text":""},{"location":"end-user/baize/jobs/paddle.html#_3","title":"\u521b\u5efa\u6b65\u9aa4","text":"
                      1. \u767b\u5f55\u5e73\u53f0\uff1a\u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3\uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
                      2. \u521b\u5efa\u4efb\u52a1\uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                      3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\uff1a\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\uff0c\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a PaddlePaddle\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
                      4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f\uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cPaddlePaddle \u5355\u673a\u8bad\u7ec3\u4efb\u52a1\u201d\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a\u3002
                      5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570\uff1a\u6839\u636e\u60a8\u7684\u9700\u6c42\uff0c\u914d\u7f6e\u4efb\u52a1\u7684\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u4fe1\u606f\u3002
                      "},{"location":"end-user/baize/jobs/paddle.html#_4","title":"\u8fd0\u884c\u53c2\u6570","text":"
                      • \u542f\u52a8\u547d\u4ee4\uff1apython
                      • \u547d\u4ee4\u53c2\u6570\uff1a

                        -m paddle.distributed.launch run_check\n

                        \u8bf4\u660e\uff1a

                        • -m paddle.distributed.launch\uff1a\u4f7f\u7528 PaddlePaddle \u63d0\u4f9b\u7684\u5206\u5e03\u5f0f\u542f\u52a8\u6a21\u5757\uff0c\u5373\u4f7f\u5728\u5355\u673a\u6a21\u5f0f\u4e0b\u4e5f\u53ef\u4ee5\u4f7f\u7528\uff0c\u65b9\u4fbf\u5c06\u6765\u8fc1\u79fb\u5230\u5206\u5e03\u5f0f\u3002
                        • run_check\uff1aPaddlePaddle \u63d0\u4f9b\u7684\u6d4b\u8bd5\u811a\u672c\uff0c\u7528\u4e8e\u68c0\u67e5\u5206\u5e03\u5f0f\u73af\u5883\u662f\u5426\u6b63\u5e38\u3002
                      "},{"location":"end-user/baize/jobs/paddle.html#_5","title":"\u8d44\u6e90\u914d\u7f6e","text":"
                      • \u526f\u672c\u6570\uff1a1\uff08\u5355\u673a\u4efb\u52a1\uff09
                      • \u8d44\u6e90\u8bf7\u6c42\uff1a
                        • CPU\uff1a\u6839\u636e\u9700\u6c42\u8bbe\u7f6e\uff0c\u5efa\u8bae\u81f3\u5c11 1 \u6838
                        • \u5185\u5b58\uff1a\u6839\u636e\u9700\u6c42\u8bbe\u7f6e\uff0c\u5efa\u8bae\u81f3\u5c11 2 GiB
                        • GPU\uff1a\u5982\u679c\u9700\u8981\u4f7f\u7528 GPU\uff0c\u9009\u62e9 GPU \u7248\u672c\u7684\u955c\u50cf\uff0c\u5e76\u5206\u914d\u76f8\u5e94\u7684 GPU \u8d44\u6e90
                      "},{"location":"end-user/baize/jobs/paddle.html#paddlejob","title":"\u5b8c\u6574\u7684 PaddleJob \u914d\u7f6e\u793a\u4f8b","text":"

                      \u4ee5\u4e0b\u662f\u5355\u673a PaddleJob \u7684 YAML \u914d\u7f6e\uff1a

                      apiVersion: kubeflow.org/v1\nkind: PaddleJob\nmetadata:\n    name: paddle-simple-cpu\n    namespace: kubeflow\nspec:\n    paddleReplicaSpecs:\n        Worker:\n            replicas: 1\n            restartPolicy: OnFailure\n            template:\n                spec:\n                    containers:\n                        - name: paddle\n                          image: registry.baidubce.com/paddlepaddle/paddle:2.4.0rc0-cpu\n                          command:\n                              [\n                                  'python',\n                                  '-m',\n                                  'paddle.distributed.launch',\n                                  'run_check',\n                              ]\n

                      \u914d\u7f6e\u89e3\u6790\uff1a

                      • apiVersion \u548c kind\uff1a\u6307\u5b9a\u8d44\u6e90\u7684 API \u7248\u672c\u548c\u7c7b\u578b\uff0c\u8fd9\u91cc\u662f PaddleJob\u3002
                      • metadata\uff1a\u5143\u6570\u636e\uff0c\u5305\u62ec\u4efb\u52a1\u540d\u79f0\u548c\u547d\u540d\u7a7a\u95f4\u3002
                      • spec\uff1a\u4efb\u52a1\u7684\u8be6\u7ec6\u914d\u7f6e\u3002
                        • paddleReplicaSpecs\uff1aPaddlePaddle \u4efb\u52a1\u7684\u526f\u672c\u914d\u7f6e\u3002
                          • Worker\uff1a\u6307\u5b9a\u5de5\u4f5c\u8282\u70b9\u7684\u914d\u7f6e\u3002
                            • replicas\uff1a\u526f\u672c\u6570\uff0c\u8fd9\u91cc\u4e3a 1\uff0c\u8868\u793a\u5355\u673a\u8bad\u7ec3\u3002
                            • restartPolicy\uff1a\u91cd\u542f\u7b56\u7565\uff0c\u8bbe\u4e3a OnFailure\uff0c\u8868\u793a\u4efb\u52a1\u5931\u8d25\u65f6\u81ea\u52a8\u91cd\u542f\u3002
                            • template\uff1aPod \u6a21\u677f\uff0c\u5b9a\u4e49\u5bb9\u5668\u7684\u8fd0\u884c\u73af\u5883\u548c\u8d44\u6e90\u3002
                              • containers\uff1a\u5bb9\u5668\u5217\u8868\u3002
                                • name\uff1a\u5bb9\u5668\u540d\u79f0\u3002
                                • image\uff1a\u4f7f\u7528\u7684\u955c\u50cf\u3002
                                • command\uff1a\u542f\u52a8\u547d\u4ee4\u548c\u53c2\u6570\u3002
                      "},{"location":"end-user/baize/jobs/paddle.html#_6","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

                      \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c PaddlePaddle \u5355\u673a\u4efb\u52a1\u3002

                      "},{"location":"end-user/baize/jobs/paddle.html#_7","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

                      \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\u540e\uff0c\u60a8\u53ef\u4ee5\u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\u548c\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u3002\u4ece\u53f3\u4e0a\u89d2\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\uff0c\u53ef\u4ee5\u67e5\u770b\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                      \u793a\u4f8b\u8f93\u51fa\uff1a

                      run check success, PaddlePaddle is installed correctly on this node :)\n

                      \u8fd9\u8868\u793a PaddlePaddle \u5355\u673a\u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0c\u73af\u5883\u914d\u7f6e\u6b63\u5e38\u3002

                      "},{"location":"end-user/baize/jobs/paddle.html#paddlepaddle_3","title":"PaddlePaddle \u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1","text":"

                      \u5728\u5206\u5e03\u5f0f\u6a21\u5f0f\u4e0b\uff0cPaddlePaddle \u4efb\u52a1\u53ef\u4ee5\u4f7f\u7528\u591a\u53f0\u8ba1\u7b97\u8282\u70b9\u5171\u540c\u5b8c\u6210\u8bad\u7ec3\uff0c\u63d0\u9ad8\u8bad\u7ec3\u6548\u7387\u3002

                      "},{"location":"end-user/baize/jobs/paddle.html#_8","title":"\u521b\u5efa\u6b65\u9aa4","text":"
                      1. \u767b\u5f55\u5e73\u53f0\uff1a\u540c\u4e0a\u3002
                      2. \u521b\u5efa\u4efb\u52a1\uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                      3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\uff1a\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a PaddlePaddle\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
                      4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f\uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cPaddlePaddle \u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u201d\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a\u3002
                      5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570\uff1a\u6839\u636e\u9700\u6c42\uff0c\u914d\u7f6e\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u3002
                      "},{"location":"end-user/baize/jobs/paddle.html#_9","title":"\u8fd0\u884c\u53c2\u6570","text":"
                      • \u542f\u52a8\u547d\u4ee4\uff1apython
                      • \u547d\u4ee4\u53c2\u6570\uff1a

                        -m paddle.distributed.launch train.py --epochs=10\n

                        \u8bf4\u660e\uff1a

                        • -m paddle.distributed.launch\uff1a\u4f7f\u7528 PaddlePaddle \u63d0\u4f9b\u7684\u5206\u5e03\u5f0f\u542f\u52a8\u6a21\u5757\u3002
                        • train.py\uff1a\u60a8\u7684\u8bad\u7ec3\u811a\u672c\uff0c\u9700\u8981\u653e\u5728\u955c\u50cf\u4e2d\u6216\u6302\u8f7d\u5230\u5bb9\u5668\u5185\u3002
                        • --epochs=10\uff1a\u8bad\u7ec3\u7684\u8f6e\u6570\uff0c\u8fd9\u91cc\u8bbe\u7f6e\u4e3a 10\u3002
                      "},{"location":"end-user/baize/jobs/paddle.html#_10","title":"\u8d44\u6e90\u914d\u7f6e","text":"
                      • \u4efb\u52a1\u526f\u672c\u6570\uff1a\u6839\u636e Worker \u526f\u672c\u6570\u8bbe\u7f6e\uff0c\u8fd9\u91cc\u4e3a 2\u3002
                      • \u8d44\u6e90\u8bf7\u6c42\uff1a
                        • CPU\uff1a\u6839\u636e\u9700\u6c42\u8bbe\u7f6e\uff0c\u5efa\u8bae\u81f3\u5c11 1 \u6838
                        • \u5185\u5b58\uff1a\u6839\u636e\u9700\u6c42\u8bbe\u7f6e\uff0c\u5efa\u8bae\u81f3\u5c11 2 GiB
                        • GPU\uff1a\u5982\u679c\u9700\u8981\u4f7f\u7528 GPU\uff0c\u9009\u62e9 GPU \u7248\u672c\u7684\u955c\u50cf\uff0c\u5e76\u5206\u914d\u76f8\u5e94\u7684 GPU \u8d44\u6e90
                      "},{"location":"end-user/baize/jobs/paddle.html#paddlejob_1","title":"\u5b8c\u6574\u7684 PaddleJob \u914d\u7f6e\u793a\u4f8b","text":"

                      \u4ee5\u4e0b\u662f\u5206\u5e03\u5f0f PaddleJob \u7684 YAML \u914d\u7f6e\uff1a

                      apiVersion: kubeflow.org/v1\nkind: PaddleJob\nmetadata:\n    name: paddle-distributed-job\n    namespace: kubeflow\nspec:\n    paddleReplicaSpecs:\n        Worker:\n            replicas: 2\n            restartPolicy: OnFailure\n            template:\n                spec:\n                    containers:\n                        - name: paddle\n                          image: registry.baidubce.com/paddlepaddle/paddle:2.4.0rc0-cpu\n                          command:\n                              [\n                                  'python',\n                                  '-m',\n                                  'paddle.distributed.launch',\n                                  'train.py',\n                              ]\n                          args:\n                              - '--epochs=10'\n

                      \u914d\u7f6e\u89e3\u6790\uff1a

                      • Worker\uff1a
                        • replicas\uff1a\u526f\u672c\u6570\uff0c\u8bbe\u7f6e\u4e3a 2\uff0c\u8868\u793a\u4f7f\u7528 2 \u4e2a\u5de5\u4f5c\u8282\u70b9\u8fdb\u884c\u5206\u5e03\u5f0f\u8bad\u7ec3\u3002
                        • \u5176\u4ed6\u914d\u7f6e\u4e0e\u5355\u673a\u6a21\u5f0f\u7c7b\u4f3c\u3002
                      "},{"location":"end-user/baize/jobs/paddle.html#_11","title":"\u8bbe\u7f6e\u4efb\u52a1\u526f\u672c\u6570","text":"

                      \u5728\u521b\u5efa PaddlePaddle \u5206\u5e03\u5f0f\u4efb\u52a1\u65f6\uff0c\u9700\u8981\u6839\u636e paddleReplicaSpecs \u4e2d\u914d\u7f6e\u7684\u526f\u672c\u6570\uff0c\u6b63\u786e\u8bbe\u7f6e \u4efb\u52a1\u526f\u672c\u6570\u3002

                      • \u603b\u526f\u672c\u6570 = Worker \u526f\u672c\u6570
                      • \u672c\u793a\u4f8b\u4e2d\uff1a
                        • Worker \u526f\u672c\u6570\uff1a2
                        • \u603b\u526f\u672c\u6570\uff1a2

                      \u56e0\u6b64\uff0c\u5728\u4efb\u52a1\u914d\u7f6e\u4e2d\uff0c\u9700\u8981\u5c06 \u4efb\u52a1\u526f\u672c\u6570 \u8bbe\u7f6e\u4e3a 2\u3002

                      "},{"location":"end-user/baize/jobs/paddle.html#_12","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

                      \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c PaddlePaddle \u5206\u5e03\u5f0f\u4efb\u52a1\u3002

                      "},{"location":"end-user/baize/jobs/paddle.html#_13","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

                      \u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u548c\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u3002\u60a8\u53ef\u4ee5\u67e5\u770b\u6bcf\u4e2a\u5de5\u4f5c\u8282\u70b9\u7684\u65e5\u5fd7\u8f93\u51fa\uff0c\u786e\u8ba4\u5206\u5e03\u5f0f\u8bad\u7ec3\u662f\u5426\u6b63\u5e38\u8fd0\u884c\u3002

                      \u793a\u4f8b\u8f93\u51fa\uff1a

                      Worker 0: Epoch 1, Batch 100, Loss 0.5\nWorker 1: Epoch 1, Batch 100, Loss 0.6\n...\nTraining completed.\n

                      \u8fd9\u8868\u793a PaddlePaddle \u5206\u5e03\u5f0f\u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0c\u6a21\u578b\u8bad\u7ec3\u5b8c\u6210\u3002

                      "},{"location":"end-user/baize/jobs/paddle.html#_14","title":"\u5c0f\u7ed3","text":"

                      \u901a\u8fc7\u672c\u6559\u7a0b\uff0c\u60a8\u5b66\u4e60\u4e86\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c PaddlePaddle \u7684\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4efb\u52a1\u3002\u6211\u4eec\u8be6\u7ec6\u4ecb\u7ecd\u4e86 PaddleJob \u7684\u914d\u7f6e\u65b9\u5f0f\uff0c\u4ee5\u53ca\u5982\u4f55\u5728\u4efb\u52a1\u4e2d\u6307\u5b9a\u8fd0\u884c\u7684\u547d\u4ee4\u548c\u8d44\u6e90\u9700\u6c42\u3002\u5e0c\u671b\u672c\u6559\u7a0b\u5bf9\u60a8\u6709\u6240\u5e2e\u52a9\uff0c\u5982\u6709\u4efb\u4f55\u95ee\u9898\uff0c\u8bf7\u53c2\u8003\u5e73\u53f0\u63d0\u4f9b\u7684\u5176\u4ed6\u6587\u6863\u6216\u8054\u7cfb\u6280\u672f\u652f\u6301\u3002

                      "},{"location":"end-user/baize/jobs/paddle.html#_15","title":"\u9644\u5f55","text":"
                      • \u6ce8\u610f\u4e8b\u9879\uff1a

                        • \u8bad\u7ec3\u811a\u672c\uff1a\u786e\u4fdd train.py\uff08\u6216\u5176\u4ed6\u8bad\u7ec3\u811a\u672c\uff09\u5728\u5bb9\u5668\u5185\u5b58\u5728\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u81ea\u5b9a\u4e49\u955c\u50cf\u3001\u6302\u8f7d\u6301\u4e45\u5316\u5b58\u50a8\u7b49\u65b9\u5f0f\u5c06\u811a\u672c\u653e\u5165\u5bb9\u5668\u3002
                        • \u955c\u50cf\u9009\u62e9\uff1a\u6839\u636e\u60a8\u7684\u9700\u6c42\u9009\u62e9\u5408\u9002\u7684\u955c\u50cf\uff0c\u4f8b\u5982\u4f7f\u7528 GPU \u65f6\u9009\u62e9 paddle:2.4.0rc0-gpu \u7b49\u3002
                        • \u53c2\u6570\u8c03\u6574\uff1a\u53ef\u4ee5\u901a\u8fc7\u4fee\u6539 command \u548c args \u6765\u4f20\u9012\u4e0d\u540c\u7684\u8bad\u7ec3\u53c2\u6570\u3002
                      • \u53c2\u8003\u6587\u6863\uff1a

                        • PaddlePaddle \u5b98\u65b9\u6587\u6863
                        • Kubeflow PaddleJob \u6307\u5357
                      "},{"location":"end-user/baize/jobs/pytorch.html","title":"Pytorch \u4efb\u52a1","text":"

                      Pytorch \u662f\u4e00\u4e2a\u5f00\u6e90\u7684\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\uff0c\u5b83\u63d0\u4f9b\u4e86\u4e00\u4e2a\u7075\u6d3b\u7684\u8bad\u7ec3\u548c\u90e8\u7f72\u73af\u5883\u3002 Pytorch \u4efb\u52a1\u662f\u4e00\u4e2a\u4f7f\u7528 Pytorch \u6846\u67b6\u7684\u4efb\u52a1\u3002

                      \u5728 AI Lab \u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86 Pytorch \u4efb\u52a1\u652f\u6301\u548c\u9002\u914d\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c \u5feb\u901f\u521b\u5efa Pytorch \u4efb\u52a1\uff0c\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\u3002

                      "},{"location":"end-user/baize/jobs/pytorch.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
                      • \u4efb\u52a1\u7c7b\u578b\u540c\u65f6\u652f\u6301 Pytorch \u5355\u673a \u548c Pytorch \u5206\u5e03\u5f0f \u4e24\u79cd\u6a21\u5f0f\u3002
                      • \u8fd0\u884c\u955c\u50cf\u5185\u5df2\u7ecf\u9ed8\u8ba4\u652f\u6301 Pytorch \u6846\u67b6\uff0c\u65e0\u9700\u989d\u5916\u5b89\u88c5\u3002
                      "},{"location":"end-user/baize/jobs/pytorch.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

                      \u5728\u8fd9\u91cc\u6211\u4eec\u4f7f\u7528 baize-notebook \u57fa\u7840\u955c\u50cf \u548c \u5173\u8054\u73af\u5883 \u7684\u65b9\u5f0f\u6765\u4f5c\u4e3a\u4efb\u52a1\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002

                      \u4e86\u89e3\u5982\u4f55\u521b\u5efa\u73af\u5883\uff0c\u8bf7\u53c2\u8003\u73af\u5883\u5217\u8868\u3002

                      "},{"location":"end-user/baize/jobs/pytorch.html#_3","title":"\u521b\u5efa\u4efb\u52a1","text":""},{"location":"end-user/baize/jobs/pytorch.html#pytorch_1","title":"Pytorch \u5355\u673a\u4efb\u52a1","text":"
                      1. \u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3 \uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
                      2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                      3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a Pytorch \u5355\u673a\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002
                      4. \u586b\u5199\u4efb\u52a1\u540d\u79f0\u3001\u63cf\u8ff0\u540e\u70b9\u51fb \u786e\u5b9a \u3002
                      "},{"location":"end-user/baize/jobs/pytorch.html#_4","title":"\u8fd0\u884c\u53c2\u6570","text":"
                      • \u542f\u52a8\u547d\u4ee4 \u4f7f\u7528 bash
                      • \u547d\u4ee4\u53c2\u6570\u4f7f\u7528
                      import torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n# \u5b9a\u4e49\u4e00\u4e2a\u7b80\u5355\u7684\u795e\u7ecf\u7f51\u7edc\nclass SimpleNet(nn.Module):\n    def __init__(self):\n        super(SimpleNet, self).__init__()\n        self.fc = nn.Linear(10, 1)\n\n    def forward(self, x):\n        return self.fc(x)\n\n# \u521b\u5efa\u6a21\u578b\u3001\u635f\u5931\u51fd\u6570\u548c\u4f18\u5316\u5668\nmodel = SimpleNet()\ncriterion = nn.MSELoss()\noptimizer = optim.SGD(model.parameters(), lr=0.01)\n\n# \u751f\u6210\u4e00\u4e9b\u968f\u673a\u6570\u636e\nx = torch.randn(100, 10)\ny = torch.randn(100, 1)\n\n# \u8bad\u7ec3\u6a21\u578b\nfor epoch in range(100):\n    # \u524d\u5411\u4f20\u64ad\n    outputs = model(x)\n    loss = criterion(outputs, y)\n\n    # \u53cd\u5411\u4f20\u64ad\u548c\u4f18\u5316\n    optimizer.zero_grad()\n    loss.backward()\n    optimizer.step()\n\n    if (epoch + 1) % 10 == 0:\n        print(f'Epoch [{epoch+1}/100], Loss: {loss.item():.4f}')\n\nprint('Training finished.')\n
                      "},{"location":"end-user/baize/jobs/pytorch.html#_5","title":"\u8fd0\u884c\u7ed3\u679c","text":"

                      \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\uff0c\u6211\u4eec\u53ef\u4ee5\u8fdb\u5165\u4efb\u52a1\u8be6\u60c5\u67e5\u770b\u5230\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\uff0c\u4ece\u53f3\u4e0a\u89d2\u53bb\u5f80 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5 \uff0c\u53ef\u4ee5\u67e5\u770b\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u7684\u65e5\u5fd7\u8f93\u51fa

                      [HAMI-core Warn(1:140244541377408:utils.c:183)]: get default cuda from (null)\n[HAMI-core Msg(1:140244541377408:libvgpu.c:855)]: Initialized\nEpoch [10/100], Loss: 1.1248\nEpoch [20/100], Loss: 1.0486\nEpoch [30/100], Loss: 0.9969\nEpoch [40/100], Loss: 0.9611\nEpoch [50/100], Loss: 0.9360\nEpoch [60/100], Loss: 0.9182\nEpoch [70/100], Loss: 0.9053\nEpoch [80/100], Loss: 0.8960\nEpoch [90/100], Loss: 0.8891\nEpoch [100/100], Loss: 0.8841\nTraining finished.\n[HAMI-core Msg(1:140244541377408:multiprocess_memory_limit.c:468)]: Calling exit handler 1\n
                      "},{"location":"end-user/baize/jobs/pytorch.html#pytorch_2","title":"Pytorch \u5206\u5e03\u5f0f\u4efb\u52a1","text":"
                      1. \u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3 \uff0c\u8fdb\u5165 \u4efb\u52a1\u5217\u8868 \u9875\u9762\u3002
                      2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                      3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a Pytorch \u5206\u5e03\u5f0f\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002
                      4. \u586b\u5199\u4efb\u52a1\u540d\u79f0\u3001\u63cf\u8ff0\u540e\u70b9\u51fb \u786e\u5b9a \u3002
                      "},{"location":"end-user/baize/jobs/pytorch.html#_6","title":"\u8fd0\u884c\u53c2\u6570","text":"
                      • \u542f\u52a8\u547d\u4ee4 \u4f7f\u7528 bash
                      • \u547d\u4ee4\u53c2\u6570\u4f7f\u7528
                      import os\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nclass SimpleModel(nn.Module):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = nn.Linear(10, 1)\n\n    def forward(self, x):\n        return self.fc(x)\n\ndef train():\n    # \u6253\u5370\u73af\u5883\u4fe1\u606f\n    print(f'PyTorch version: {torch.__version__}')\n    print(f'CUDA available: {torch.cuda.is_available()}')\n    if torch.cuda.is_available():\n        print(f'CUDA version: {torch.version.cuda}')\n        print(f'CUDA device count: {torch.cuda.device_count()}')\n\n    rank = int(os.environ.get('RANK', '0'))\n    world_size = int(os.environ.get('WORLD_SIZE', '1'))\n\n    print(f'Rank: {rank}, World Size: {world_size}')\n\n    # \u521d\u59cb\u5316\u5206\u5e03\u5f0f\u73af\u5883\n    try:\n        if world_size > 1:\n            dist.init_process_group('nccl')\n            print('Distributed process group initialized successfully')\n        else:\n            print('Running in non-distributed mode')\n    except Exception as e:\n        print(f'Error initializing process group: {e}')\n        return\n\n    # \u8bbe\u7f6e\u8bbe\u5907\n    try:\n        if torch.cuda.is_available():\n            device = torch.device(f'cuda:{rank % torch.cuda.device_count()}')\n            print(f'Using CUDA device: {device}')\n        else:\n            device = torch.device('cpu')\n            print('CUDA not available, using CPU')\n    except Exception as e:\n        print(f'Error setting device: {e}')\n        device = torch.device('cpu')\n        print('Falling back to CPU')\n\n    try:\n        model = SimpleModel().to(device)\n        print('Model moved to device successfully')\n    except Exception as e:\n        print(f'Error moving model to device: {e}')\n        return\n\n    try:\n        if world_size > 1:\n            ddp_model = DDP(model, device_ids=[rank % torch.cuda.device_count()] if torch.cuda.is_available() else None)\n            print('DDP model created successfully')\n        else:\n            ddp_model = model\n            print('Using non-distributed model')\n    except Exception as e:\n        print(f'Error creating DDP model: {e}')\n        return\n\n    loss_fn = nn.MSELoss()\n    optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)\n\n    # \u751f\u6210\u4e00\u4e9b\u968f\u673a\u6570\u636e\n    try:\n        data = torch.randn(100, 10, device=device)\n        labels = torch.randn(100, 1, device=device)\n        print('Data generated and moved to device successfully')\n    except Exception as e:\n        print(f'Error generating or moving data to device: {e}')\n        return\n\n    for epoch in range(10):\n        try:\n            ddp_model.train()\n            outputs = ddp_model(data)\n            loss = loss_fn(outputs, labels)\n            optimizer.zero_grad()\n            loss.backward()\n            optimizer.step()\n\n            if rank == 0:\n                print(f'Epoch {epoch}, Loss: {loss.item():.4f}')\n        except Exception as e:\n            print(f'Error during training epoch {epoch}: {e}')\n            break\n\n    if world_size > 1:\n        dist.destroy_process_group()\n\nif __name__ == '__main__':\n    train()\n
                      "},{"location":"end-user/baize/jobs/pytorch.html#_7","title":"\u4efb\u52a1\u526f\u672c\u6570","text":"

                      \u6ce8\u610f Pytorch \u5206\u5e03\u5f0f \u8bad\u7ec3\u4efb\u52a1\u4f1a\u521b\u5efa\u4e00\u7ec4 Master \u548c Worker \u7684\u8bad\u7ec3 Pod\uff0c Master \u8d1f\u8d23\u534f\u8c03\u8bad\u7ec3\u4efb\u52a1\uff0cWorker \u8d1f\u8d23\u5b9e\u9645\u7684\u8bad\u7ec3\u5de5\u4f5c\u3002

                      Note

                      \u672c\u6b21\u6f14\u793a\u4e2d\uff1aMaster \u526f\u672c\u6570\u4e3a 1\uff0cWorker \u526f\u672c\u6570\u4e3a 2\uff1b \u6240\u4ee5\u6211\u4eec\u9700\u8981\u5728 \u4efb\u52a1\u914d\u7f6e \u4e2d\u8bbe\u7f6e\u526f\u672c\u6570\u4e3a 3\uff0c\u5373 Master \u526f\u672c\u6570 + Worker \u526f\u672c\u6570\u3002 Pytorch \u4f1a\u81ea\u52a8\u8c03\u8c10 Master \u548c Worker \u7684\u89d2\u8272\u3002

                      "},{"location":"end-user/baize/jobs/pytorch.html#_8","title":"\u8fd0\u884c\u7ed3\u679c","text":"

                      \u540c\u6837\uff0c\u6211\u4eec\u53ef\u4ee5\u8fdb\u5165\u4efb\u52a1\u8be6\u60c5\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\uff0c\u4ee5\u53ca\u6bcf\u4e2a Pod \u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                      "},{"location":"end-user/baize/jobs/tensorboard.html","title":"\u4efb\u52a1\u5206\u6790\u4ecb\u7ecd","text":"

                      \u5728 AI Lab \u6a21\u5757\u4e2d\uff0c\u63d0\u4f9b\u4e86\u6a21\u578b\u5f00\u53d1\u8fc7\u7a0b\u91cd\u8981\u7684\u53ef\u89c6\u5316\u5206\u6790\u5de5\u5177\uff0c\u7528\u4e8e\u5c55\u793a\u673a\u5668\u5b66\u4e60\u6a21\u578b\u7684\u8bad\u7ec3\u8fc7\u7a0b\u548c\u7ed3\u679c\u3002 \u672c\u6587\u5c06\u4ecb\u7ecd \u4efb\u52a1\u5206\u6790\uff08Tensorboard\uff09\u7684\u57fa\u672c\u6982\u5ff5\u3001\u5728 AI Lab \u7cfb\u7edf\u4e2d\u7684\u4f7f\u7528\u65b9\u6cd5\uff0c\u4ee5\u53ca\u5982\u4f55\u914d\u7f6e\u6570\u636e\u96c6\u7684\u65e5\u5fd7\u5185\u5bb9\u3002

                      Note

                      Tensorboard \u662f TensorFlow \u63d0\u4f9b\u7684\u4e00\u4e2a\u53ef\u89c6\u5316\u5de5\u5177\uff0c\u7528\u4e8e\u5c55\u793a\u673a\u5668\u5b66\u4e60\u6a21\u578b\u7684\u8bad\u7ec3\u8fc7\u7a0b\u548c\u7ed3\u679c\u3002 \u5b83\u53ef\u4ee5\u5e2e\u52a9\u5f00\u53d1\u8005\u66f4\u76f4\u89c2\u5730\u7406\u89e3\u6a21\u578b\u7684\u8bad\u7ec3\u52a8\u6001\uff0c\u5206\u6790\u6a21\u578b\u6027\u80fd\uff0c\u8c03\u8bd5\u6a21\u578b\u95ee\u9898\u7b49\u3002

                      Tensorboard \u5728\u6a21\u578b\u5f00\u53d1\u8fc7\u7a0b\u4e2d\u7684\u4f5c\u7528\u53ca\u4f18\u52bf\uff1a

                      • \u53ef\u89c6\u5316\u8bad\u7ec3\u8fc7\u7a0b\uff1a\u901a\u8fc7\u56fe\u8868\u5c55\u793a\u8bad\u7ec3\u548c\u9a8c\u8bc1\u7684\u635f\u5931\u3001\u7cbe\u5ea6\u7b49\u6307\u6807\uff0c\u5e2e\u52a9\u5f00\u53d1\u8005\u76f4\u89c2\u5730\u89c2\u5bdf\u6a21\u578b\u7684\u8bad\u7ec3\u6548\u679c\u3002
                      • \u8c03\u8bd5\u548c\u4f18\u5316\u6a21\u578b\uff1a\u901a\u8fc7\u67e5\u770b\u4e0d\u540c\u5c42\u7684\u6743\u91cd\u3001\u68af\u5ea6\u5206\u5e03\u7b49\uff0c\u5e2e\u52a9\u5f00\u53d1\u8005\u53d1\u73b0\u548c\u4fee\u6b63\u6a21\u578b\u4e2d\u7684\u95ee\u9898\u3002
                      • \u5bf9\u6bd4\u4e0d\u540c\u5b9e\u9a8c\uff1a\u53ef\u4ee5\u540c\u65f6\u5c55\u793a\u591a\u4e2a\u5b9e\u9a8c\u7684\u7ed3\u679c\uff0c\u65b9\u4fbf\u5f00\u53d1\u8005\u5bf9\u6bd4\u4e0d\u540c\u6a21\u578b\u548c\u8d85\u53c2\u6570\u914d\u7f6e\u7684\u6548\u679c\u3002
                      • \u8ffd\u8e2a\u8bad\u7ec3\u6570\u636e\uff1a\u8bb0\u5f55\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u4f7f\u7528\u7684\u6570\u636e\u96c6\u548c\u53c2\u6570\uff0c\u786e\u4fdd\u5b9e\u9a8c\u7684\u53ef\u590d\u73b0\u6027\u3002
                      "},{"location":"end-user/baize/jobs/tensorboard.html#tensorboard","title":"\u5982\u4f55\u521b\u5efa Tensorboard","text":"

                      \u5728 AI Lab \u7cfb\u7edf\u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86\u4fbf\u6377\u7684\u65b9\u5f0f\u6765\u521b\u5efa\u548c\u7ba1\u7406 Tensorboard\u3002\u4ee5\u4e0b\u662f\u5177\u4f53\u6b65\u9aa4\uff1a

                      "},{"location":"end-user/baize/jobs/tensorboard.html#notebook-tensorboard","title":"\u5728\u521b\u5efa\u65f6 Notebook \u542f\u7528 Tensorboard","text":"
                      1. \u521b\u5efa Notebook\uff1a\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u4e00\u4e2a\u65b0\u7684 Notebook\u3002
                      2. \u542f\u7528 Tensorboard\uff1a\u5728\u521b\u5efa Notebook \u7684\u9875\u9762\u4e2d\uff0c\u542f\u7528 Tensorboard \u9009\u9879\uff0c\u5e76\u6307\u5b9a\u6570\u636e\u96c6\u548c\u65e5\u5fd7\u8def\u5f84\u3002

                      "},{"location":"end-user/baize/jobs/tensorboard.html#tensorboard_1","title":"\u5728\u5206\u5e03\u5f0f\u4efb\u52a1\u521b\u5efa\u53ca\u5b8c\u6210\u540e\u542f\u7528 Tensorboard","text":"
                      1. \u521b\u5efa\u5206\u5e03\u5f0f\u4efb\u52a1\uff1a\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u3002
                      2. \u914d\u7f6e Tensorboard\uff1a\u5728\u4efb\u52a1\u914d\u7f6e\u9875\u9762\u4e2d\uff0c\u542f\u7528 Tensorboard \u9009\u9879\uff0c\u5e76\u6307\u5b9a\u6570\u636e\u96c6\u548c\u65e5\u5fd7\u8def\u5f84\u3002
                      3. \u4efb\u52a1\u5b8c\u6210\u540e\u67e5\u770b Tensorboard\uff1a\u4efb\u52a1\u5b8c\u6210\u540e\uff0c\u53ef\u4ee5\u5728\u4efb\u52a1\u8be6\u60c5\u9875\u9762\u4e2d\u67e5\u770b Tensorboard \u7684\u94fe\u63a5\uff0c\u70b9\u51fb\u94fe\u63a5\u5373\u53ef\u67e5\u770b\u8bad\u7ec3\u8fc7\u7a0b\u7684\u53ef\u89c6\u5316\u7ed3\u679c\u3002

                      "},{"location":"end-user/baize/jobs/tensorboard.html#notebook-tensorboard_1","title":"\u5728 Notebook \u4e2d\u76f4\u63a5\u5f15\u7528 Tensorboard","text":"

                      \u5728 Notebook \u4e2d\uff0c\u53ef\u4ee5\u901a\u8fc7\u4ee3\u7801\u76f4\u63a5\u542f\u52a8 Tensorboard\u3002\u4ee5\u4e0b\u662f\u4e00\u4e2a\u793a\u4f8b\u4ee3\u7801\uff1a

                      # \u5bfc\u5165\u5fc5\u8981\u7684\u5e93\nimport tensorflow as tf\nimport datetime\n\n# \u5b9a\u4e49\u65e5\u5fd7\u76ee\u5f55\nlog_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n# \u521b\u5efa Tensorboard \u56de\u8c03\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n\n# \u6784\u5efa\u5e76\u7f16\u8bd1\u6a21\u578b\nmodel = tf.keras.models.Sequential([\n    tf.keras.layers.Flatten(input_shape=(28, 28)),\n    tf.keras.layers.Dense(512, activation='relu'),\n    tf.keras.layers.Dropout(0.2),\n    tf.keras.layers.Dense(10, activation='softmax')\n])\n\nmodel.compile(optimizer='adam',\n              loss='sparse_categorical_crossentropy',\n              metrics=['accuracy'])\n\n# \u8bad\u7ec3\u6a21\u578b\u5e76\u542f\u7528 Tensorboard \u56de\u8c03\nmodel.fit(x_train, y_train, epochs=5, validation_data=(x_test, y_test), callbacks=[tensorboard_callback])\n
                      "},{"location":"end-user/baize/jobs/tensorboard.html#_2","title":"\u5982\u4f55\u914d\u7f6e\u6570\u636e\u96c6\u7684\u65e5\u5fd7\u5185\u5bb9","text":"

                      \u5728\u4f7f\u7528 Tensorboard \u65f6\uff0c\u53ef\u4ee5\u8bb0\u5f55\u548c\u914d\u7f6e\u4e0d\u540c\u7684\u6570\u636e\u96c6\u548c\u65e5\u5fd7\u5185\u5bb9\u3002\u4ee5\u4e0b\u662f\u4e00\u4e9b\u5e38\u89c1\u7684\u914d\u7f6e\u65b9\u5f0f\uff1a

                      "},{"location":"end-user/baize/jobs/tensorboard.html#_3","title":"\u914d\u7f6e\u8bad\u7ec3\u548c\u9a8c\u8bc1\u6570\u636e\u96c6\u7684\u65e5\u5fd7","text":"

                      \u5728\u8bad\u7ec3\u6a21\u578b\u65f6\uff0c\u53ef\u4ee5\u901a\u8fc7 TensorFlow \u7684 tf.summary API \u6765\u8bb0\u5f55\u8bad\u7ec3\u548c\u9a8c\u8bc1\u6570\u636e\u96c6\u7684\u65e5\u5fd7\u3002\u4ee5\u4e0b\u662f\u4e00\u4e2a\u793a\u4f8b\u4ee3\u7801\uff1a

                      # \u5bfc\u5165\u5fc5\u8981\u7684\u5e93\nimport tensorflow as tf\n\n# \u521b\u5efa\u65e5\u5fd7\u76ee\u5f55\ntrain_log_dir = 'logs/gradient_tape/train'\nval_log_dir = 'logs/gradient_tape/val'\ntrain_summary_writer = tf.summary.create_file_writer(train_log_dir)\nval_summary_writer = tf.summary.create_file_writer(val_log_dir)\n\n# \u8bad\u7ec3\u6a21\u578b\u5e76\u8bb0\u5f55\u65e5\u5fd7\nfor epoch in range(EPOCHS):\n    for (x_train, y_train) in train_dataset:\n        # \u8bad\u7ec3\u6b65\u9aa4\n        train_step(x_train, y_train)\n        with train_summary_writer.as_default():\n            tf.summary.scalar('loss', train_loss.result(), step=epoch)\n            tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)\n\n    for (x_val, y_val) in val_dataset:\n        # \u9a8c\u8bc1\u6b65\u9aa4\n        val_step(x_val, y_val)\n        with val_summary_writer.as_default():\n            tf.summary.scalar('loss', val_loss.result(), step=epoch)\n            tf.summary.scalar('accuracy', val_accuracy.result(), step=epoch)\n
                      "},{"location":"end-user/baize/jobs/tensorboard.html#_4","title":"\u914d\u7f6e\u81ea\u5b9a\u4e49\u65e5\u5fd7","text":"

                      \u9664\u4e86\u8bad\u7ec3\u548c\u9a8c\u8bc1\u6570\u636e\u96c6\u7684\u65e5\u5fd7\u5916\uff0c\u8fd8\u53ef\u4ee5\u8bb0\u5f55\u5176\u4ed6\u81ea\u5b9a\u4e49\u7684\u65e5\u5fd7\u5185\u5bb9\uff0c\u4f8b\u5982\u5b66\u4e60\u7387\u3001\u68af\u5ea6\u5206\u5e03\u7b49\u3002\u4ee5\u4e0b\u662f\u4e00\u4e2a\u793a\u4f8b\u4ee3\u7801\uff1a

                      # \u8bb0\u5f55\u81ea\u5b9a\u4e49\u65e5\u5fd7\nwith train_summary_writer.as_default():\n    tf.summary.scalar('learning_rate', learning_rate, step=epoch)\n    tf.summary.histogram('gradients', gradients, step=epoch)\n
                      "},{"location":"end-user/baize/jobs/tensorboard.html#tensorboard_2","title":"Tensorboard \u7ba1\u7406","text":"

                      \u5728 AI Lab \u4e2d\uff0c\u901a\u8fc7\u5404\u79cd\u65b9\u5f0f\u521b\u5efa\u51fa\u6765\u7684 Tensorboard \u4f1a\u7edf\u4e00\u5c55\u793a\u5728\u4efb\u52a1\u5206\u6790\u7684\u9875\u9762\u4e2d\uff0c\u65b9\u4fbf\u7528\u6237\u67e5\u770b\u548c\u7ba1\u7406\u3002

                      \u7528\u6237\u53ef\u4ee5\u5728\u4efb\u52a1\u5206\u6790\u9875\u9762\u4e2d\u67e5\u770b Tensorboard \u7684\u94fe\u63a5\u3001\u72b6\u6001\u3001\u521b\u5efa\u65f6\u95f4\u7b49\u4fe1\u606f\uff0c\u5e76\u901a\u8fc7\u94fe\u63a5\u76f4\u63a5\u8bbf\u95ee Tensorboard \u7684\u53ef\u89c6\u5316\u7ed3\u679c\u3002

                      "},{"location":"end-user/baize/jobs/tensorflow.html","title":"Tensorflow \u4efb\u52a1","text":"

                      Tensorflow \u662f\u9664\u4e86 Pytorch \u53e6\u5916\u4e00\u4e2a\u975e\u5e38\u6d3b\u8dc3\u7684\u5f00\u6e90\u7684\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\uff0c\u5b83\u63d0\u4f9b\u4e86\u4e00\u4e2a\u7075\u6d3b\u7684\u8bad\u7ec3\u548c\u90e8\u7f72\u73af\u5883\u3002

                      \u5728 AI Lab \u4e2d\uff0c\u6211\u4eec\u540c\u6837\u63d0\u4f9b\u4e86 Tensorflow \u6846\u67b6\u7684\u652f\u6301\u548c\u9002\u914d\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c\u5feb\u901f\u521b\u5efa Tensorflow \u4efb\u52a1\uff0c\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\u3002

                      "},{"location":"end-user/baize/jobs/tensorflow.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
                      • \u4efb\u52a1\u7c7b\u578b\u540c\u65f6\u652f\u6301 Tensorflow \u5355\u673a \u548c Tensorflow \u5206\u5e03\u5f0f \u4e24\u79cd\u6a21\u5f0f\u3002
                      • \u8fd0\u884c\u955c\u50cf\u5185\u5df2\u7ecf\u9ed8\u8ba4\u652f\u6301 Tensorflow \u6846\u67b6\uff0c\u65e0\u9700\u989d\u5916\u5b89\u88c5\u3002
                      "},{"location":"end-user/baize/jobs/tensorflow.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

                      \u5728\u8fd9\u91cc\u6211\u4eec\u4f7f\u7528 baize-notebook \u57fa\u7840\u955c\u50cf \u548c \u5173\u8054\u73af\u5883 \u7684\u65b9\u5f0f\u6765\u4f5c\u4e3a\u4efb\u52a1\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002

                      \u4e86\u89e3\u5982\u4f55\u521b\u5efa\u73af\u5883\uff0c\u8bf7\u53c2\u8003\u73af\u5883\u5217\u8868\u3002

                      "},{"location":"end-user/baize/jobs/tensorflow.html#_3","title":"\u521b\u5efa\u4efb\u52a1","text":""},{"location":"end-user/baize/jobs/tensorflow.html#tfjob","title":"\u793a\u4f8b TFJob \u5355\u673a\u4efb\u52a1","text":"
                      1. \u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3 \uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
                      2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                      3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a Tensorflow \u5355\u673a\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002
                      4. \u586b\u5199\u4efb\u52a1\u540d\u79f0\u3001\u63cf\u8ff0\u540e\u70b9\u51fb \u786e\u5b9a \u3002
                      "},{"location":"end-user/baize/jobs/tensorflow.html#_4","title":"\u63d0\u524d\u9884\u70ed\u4ee3\u7801\u4ed3\u5e93","text":"

                      \u4f7f\u7528 AI Lab -> \u6570\u636e\u96c6\u5217\u8868 \uff0c\u521b\u5efa\u4e00\u4e2a\u6570\u636e\u96c6\uff0c\u5e76\u5c06\u8fdc\u7aef Github \u7684\u4ee3\u7801\u62c9\u53d6\u5230\u6570\u636e\u96c6\u4e2d\uff0c \u8fd9\u6837\u5728\u521b\u5efa\u4efb\u52a1\u65f6\uff0c\u53ef\u4ee5\u76f4\u63a5\u9009\u62e9\u6570\u636e\u96c6\uff0c\u5c06\u4ee3\u7801\u6302\u8f7d\u5230\u4efb\u52a1\u4e2d\u3002

                      \u6f14\u793a\u4ee3\u7801\u4ed3\u5e93\u5730\u5740\uff1ahttps://github.com/d-run/training-sample-code/

                      "},{"location":"end-user/baize/jobs/tensorflow.html#_5","title":"\u8fd0\u884c\u53c2\u6570","text":"
                      • \u542f\u52a8\u547d\u4ee4 \u4f7f\u7528 bash
                      • \u547d\u4ee4\u53c2\u6570\u4f7f\u7528 python /code/tensorflow/tf-single.py
                      \"\"\"\n  pip install tensorflow numpy\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\n# \u521b\u5efa\u4e00\u4e9b\u968f\u673a\u6570\u636e\nx = np.random.rand(100, 1)\ny = 2 * x + 1 + np.random.rand(100, 1) * 0.1\n\n# \u521b\u5efa\u4e00\u4e2a\u7b80\u5355\u7684\u6a21\u578b\nmodel = tf.keras.Sequential([\n    tf.keras.layers.Dense(1, input_shape=(1,))\n])\n\n# \u7f16\u8bd1\u6a21\u578b\nmodel.compile(optimizer='adam', loss='mse')\n\n# \u8bad\u7ec3\u6a21\u578b\uff0c\u5c06 epochs \u6539\u4e3a 10\nhistory = model.fit(x, y, epochs=10, verbose=1)\n\n# \u6253\u5370\u6700\u7ec8\u635f\u5931\nprint('Final loss: {' + str(history.history['loss'][-1]) +'}')\n\n# \u4f7f\u7528\u6a21\u578b\u8fdb\u884c\u9884\u6d4b\ntest_x = np.array([[0.5]])\nprediction = model.predict(test_x)\nprint(f'Prediction for x=0.5: {prediction[0][0]}')\n
                      "},{"location":"end-user/baize/jobs/tensorflow.html#_6","title":"\u8fd0\u884c\u7ed3\u679c","text":"

                      \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\u540e\uff0c\u53ef\u4ee5\u8fdb\u5165\u4efb\u52a1\u8be6\u60c5\u67e5\u770b\u5230\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\uff0c\u4ece\u53f3\u4e0a\u89d2\u53bb\u5f80 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5 \uff0c\u53ef\u4ee5\u67e5\u770b\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                      "},{"location":"end-user/baize/jobs/tensorflow.html#tfjob_1","title":"TFJob \u5206\u5e03\u5f0f\u4efb\u52a1","text":"
                      1. \u767b\u5f55 AI Lab \uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3 \uff0c\u8fdb\u5165 \u4efb\u52a1\u5217\u8868 \u9875\u9762\u3002
                      2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                      3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a Tensorflow \u5206\u5e03\u5f0f\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002
                      4. \u586b\u5199\u4efb\u52a1\u540d\u79f0\u3001\u63cf\u8ff0\u540e\u70b9\u51fb \u786e\u5b9a \u3002
                      "},{"location":"end-user/baize/jobs/tensorflow.html#_7","title":"\u793a\u4f8b\u4efb\u52a1\u4ecb\u7ecd","text":"

                      \u672c\u6b21\u5305\u542b\u4e86\u4e09\u79cd\u89d2\u8272\uff1aChief\u3001Worker \u548c Parameter Server (PS)\u3002

                      • Chief: \u4e3b\u8981\u8d1f\u8d23\u534f\u8c03\u8bad\u7ec3\u8fc7\u7a0b\u548c\u6a21\u578b\u68c0\u67e5\u70b9\u7684\u4fdd\u5b58\u3002
                      • Worker: \u6267\u884c\u5b9e\u9645\u7684\u6a21\u578b\u8bad\u7ec3\u3002
                      • PS: \u5728\u5f02\u6b65\u8bad\u7ec3\u4e2d\u7528\u4e8e\u5b58\u50a8\u548c\u66f4\u65b0\u6a21\u578b\u53c2\u6570\u3002

                      \u4e3a\u4e0d\u540c\u7684\u89d2\u8272\u5206\u914d\u4e86\u4e0d\u540c\u7684\u8d44\u6e90\u3002Chief \u548c Worker \u4f7f\u7528 GPU\uff0c\u800c PS \u4f7f\u7528 CPU \u548c\u8f83\u5927\u7684\u5185\u5b58\u3002

                      "},{"location":"end-user/baize/jobs/tensorflow.html#_8","title":"\u8fd0\u884c\u53c2\u6570","text":"
                      • \u542f\u52a8\u547d\u4ee4 \u4f7f\u7528 bash
                      • \u547d\u4ee4\u53c2\u6570\u4f7f\u7528 python /code/tensorflow/tensorflow-distributed.py
                      import os\nimport json\nimport tensorflow as tf\n\nclass SimpleModel(tf.keras.Model):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = tf.keras.layers.Dense(1, input_shape=(10,))\n\n    def call(self, x):\n        return self.fc(x)\n\ndef train():\n    # \u6253\u5370\u73af\u5883\u4fe1\u606f\n    print(f\"TensorFlow version: {tf.__version__}\")\n    print(f\"GPU available: {tf.test.is_gpu_available()}\")\n    if tf.test.is_gpu_available():\n        print(f\"GPU device count: {len(tf.config.list_physical_devices('GPU'))}\")\n\n    # \u83b7\u53d6\u5206\u5e03\u5f0f\u8bad\u7ec3\u4fe1\u606f\n    tf_config = json.loads(os.environ.get('TF_CONFIG') or '{}')\n    task_type = tf_config.get('task', {}).get('type')\n    task_id = tf_config.get('task', {}).get('index')\n\n    print(f\"Task type: {task_type}, Task ID: {task_id}\")\n\n    # \u8bbe\u7f6e\u5206\u5e03\u5f0f\u7b56\u7565\n    strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()\n\n    with strategy.scope():\n        model = SimpleModel()\n        loss_fn = tf.keras.losses.MeanSquaredError()\n        optimizer = tf.keras.optimizers.SGD(learning_rate=0.001)\n\n    # \u751f\u6210\u4e00\u4e9b\u968f\u673a\u6570\u636e\n    data = tf.random.normal((100, 10))\n    labels = tf.random.normal((100, 1))\n\n    @tf.function\n    def train_step(inputs, labels):\n        with tf.GradientTape() as tape:\n            predictions = model(inputs)\n            loss = loss_fn(labels, predictions)\n        gradients = tape.gradient(loss, model.trainable_variables)\n        optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n        return loss\n\n    for epoch in range(10):\n        loss = train_step(data, labels)\n        if task_type == 'chief':\n            print(f'Epoch {epoch}, Loss: {loss.numpy():.4f}')\n\nif __name__ == '__main__':\n    train()\n
                      "},{"location":"end-user/baize/jobs/tensorflow.html#_9","title":"\u8fd0\u884c\u7ed3\u679c","text":"

                      \u540c\u6837\uff0c\u6211\u4eec\u53ef\u4ee5\u8fdb\u5165\u4efb\u52a1\u8be6\u60c5\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\uff0c\u4ee5\u53ca\u6bcf\u4e2a Pod \u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                      "},{"location":"end-user/baize/jobs/view.html","title":"\u67e5\u770b\u4efb\u52a1\uff08Job\uff09\u5de5\u4f5c\u8d1f\u8f7d","text":"

                      \u4efb\u52a1\u521b\u5efa\u597d\u540e\uff0c\u90fd\u4f1a\u663e\u793a\u5728\u8bad\u7ec3\u4efb\u52a1\u5217\u8868\u4e2d\u3002

                      1. \u5728\u8bad\u7ec3\u8bad\u7ec3\u4efb\u52a1\u5217\u8868\u4e2d\uff0c\u70b9\u51fb\u67d0\u4e2a\u4efb\u52a1\u53f3\u4fa7\u7684 \u2507 -> \u4efb\u52a1\u8d1f\u8f7d\u8be6\u60c5 \u3002

                      2. \u51fa\u73b0\u4e00\u4e2a\u5f39\u7a97\u9009\u62e9\u8981\u67e5\u770b\u54ea\u4e2a Pod \u540e\uff0c\u70b9\u51fb \u8fdb\u5165 \u3002

                      3. \u8df3\u8f6c\u5230\u5bb9\u5668\u7ba1\u7406\u754c\u9762\uff0c\u53ef\u4ee5\u67e5\u770b\u5bb9\u5668\u7684\u5de5\u4f5c\u72b6\u6001\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u4ee5\u53ca\u53d1\u751f\u7684\u4e8b\u4ef6\u3002

                      4. \u4f60\u8fd8\u53ef\u4ee5\u67e5\u770b\u5f53\u524d Pod \u6700\u8fd1\u4e00\u6bb5\u65f6\u95f4\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002 \u6b64\u5904\u9ed8\u8ba4\u5c55\u793a 100 \u884c\u65e5\u5fd7\uff0c\u5982\u679c\u8981\u67e5\u770b\u66f4\u8be6\u7ec6\u7684\u65e5\u5fd7\u6d3b\u4e0b\u8f7d\u65e5\u5fd7\uff0c\u8bf7\u70b9\u51fb\u9876\u90e8\u7684\u84dd\u8272 \u53ef\u89c2\u6d4b\u6027 \u6587\u5b57\u3002

                      5. \u5f53\u7136\u4f60\u8fd8\u53ef\u4ee5\u901a\u8fc7\u53f3\u4e0a\u89d2\u7684 ... \uff0c\u67e5\u770b\u5f53\u524d Pod \u7684 YAML\u3001\u4e0a\u4f20\u548c\u4e0b\u8f7d\u6587\u4ef6\u3002 \u4ee5\u4e0b\u662f\u4e00\u4e2a Pod \u7684 YAML \u793a\u4f8b\u3002

                      kind: Pod\napiVersion: v1\nmetadata:\n  name: neko-tensorboard-job-test-202404181843-skxivllb-worker-0\n  namespace: default\n  uid: ddedb6ff-c278-47eb-ae1e-0de9b7c62f8c\n  resourceVersion: '41092552'\n  creationTimestamp: '2024-04-18T10:43:36Z'\n  labels:\n    training.kubeflow.org/job-name: neko-tensorboard-job-test-202404181843-skxivllb\n    training.kubeflow.org/operator-name: pytorchjob-controller\n    training.kubeflow.org/replica-index: '0'\n    training.kubeflow.org/replica-type: worker\n  annotations:\n    cni.projectcalico.org/containerID: 0cfbb9af257d5e69027c603c6cb2d3890a17c4ae1a145748d5aef73a10d7fbe1\n    cni.projectcalico.org/podIP: ''\n    cni.projectcalico.org/podIPs: ''\n    hami.io/bind-phase: success\n    hami.io/bind-time: '1713437016'\n    hami.io/vgpu-devices-allocated: GPU-29d5fa0d-935b-2966-aff8-483a174d61d1,NVIDIA,1024,20:;\n    hami.io/vgpu-devices-to-allocate: ;\n    hami.io/vgpu-node: worker-a800-1\n    hami.io/vgpu-time: '1713437016'\n    k8s.v1.cni.cncf.io/network-status: |-\n      [{\n          \"name\": \"kube-system/calico\",\n          \"ips\": [\n              \"10.233.97.184\"\n          ],\n          \"default\": true,\n          \"dns\": {}\n      }]\n    k8s.v1.cni.cncf.io/networks-status: |-\n      [{\n          \"name\": \"kube-system/calico\",\n          \"ips\": [\n              \"10.233.97.184\"\n          ],\n          \"default\": true,\n          \"dns\": {}\n      }]\n  ownerReferences:\n    - apiVersion: kubeflow.org/v1\n      kind: PyTorchJob\n      name: neko-tensorboard-job-test-202404181843-skxivllb\n      uid: e5a8b05d-1f03-4717-8e1c-4ec928014b7b\n      controller: true\n      blockOwnerDeletion: true\nspec:\n  volumes:\n    - name: 0-dataset-pytorch-examples\n      persistentVolumeClaim:\n        claimName: pytorch-examples\n    - name: kube-api-access-wh9rh\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n  containers:\n    - name: pytorch\n      image: m.daocloud.io/docker.io/pytorch/pytorch\n      command:\n        - bash\n      args:\n        - '-c'\n        - >-\n          ls -la /root && which pip && pip install pytorch_lightning tensorboard\n          && python /root/Git/pytorch/examples/mnist/main.py\n      ports:\n        - name: pytorchjob-port\n          containerPort: 23456\n          protocol: TCP\n      env:\n        - name: PYTHONUNBUFFERED\n          value: '1'\n        - name: PET_NNODES\n          value: '1'\n      resources:\n        limits:\n          cpu: '4'\n          memory: 8Gi\n          nvidia.com/gpucores: '20'\n          nvidia.com/gpumem: '1024'\n          nvidia.com/vgpu: '1'\n        requests:\n          cpu: '4'\n          memory: 8Gi\n          nvidia.com/gpucores: '20'\n          nvidia.com/gpumem: '1024'\n          nvidia.com/vgpu: '1'\n      volumeMounts:\n        - name: 0-dataset-pytorch-examples\n          mountPath: /root/Git/pytorch/examples\n        - name: kube-api-access-wh9rh\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  restartPolicy: Never\n  terminationGracePeriodSeconds: 30\n  dnsPolicy: ClusterFirst\n  serviceAccountName: default\n  serviceAccount: default\n  nodeName: worker-a800-1\n  securityContext: {}\n  affinity: {}\n  schedulerName: hami-scheduler\n  tolerations:\n    - key: node.kubernetes.io/not-ready\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n    - key: node.kubernetes.io/unreachable\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n  priorityClassName: baize-high-priority\n  priority: 100000\n  enableServiceLinks: true\n  preemptionPolicy: PreemptLowerPriority\nstatus:\n  phase: Succeeded\n  conditions:\n    - type: Initialized\n      status: 'True'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:43:36Z'\n      reason: PodCompleted\n    - type: Ready\n      status: 'False'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:46:34Z'\n      reason: PodCompleted\n    - type: ContainersReady\n      status: 'False'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:46:34Z'\n      reason: PodCompleted\n    - type: PodScheduled\n      status: 'True'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:43:36Z'\n  hostIP: 10.20.100.211\n  podIP: 10.233.97.184\n  podIPs:\n    - ip: 10.233.97.184\n  startTime: '2024-04-18T10:43:36Z'\n  containerStatuses:\n    - name: pytorch\n      state:\n        terminated:\n          exitCode: 0\n          reason: Completed\n          startedAt: '2024-04-18T10:43:39Z'\n          finishedAt: '2024-04-18T10:46:34Z'\n          containerID: >-\n            containerd://09010214bcf3315e81d38fba50de3943c9d2b48f50a6cc2e83f8ef0e5c6eeec1\n      lastState: {}\n      ready: false\n      restartCount: 0\n      image: m.daocloud.io/docker.io/pytorch/pytorch:latest\n      imageID: >-\n        m.daocloud.io/docker.io/pytorch/pytorch@sha256:11691e035a3651d25a87116b4f6adc113a27a29d8f5a6a583f8569e0ee5ff897\n      containerID: >-\n        containerd://09010214bcf3315e81d38fba50de3943c9d2b48f50a6cc2e83f8ef0e5c6eeec1\n      started: false\n  qosClass: Guaranteed\n
                      "},{"location":"end-user/ghippo/personal-center/accesstoken.html","title":"\u8bbf\u95ee\u5bc6\u94a5","text":"

                      \u8bbf\u95ee\u5bc6\u94a5\uff08Access Key\uff09\u53ef\u7528\u4e8e\u8bbf\u95ee\u5f00\u653e API \u548c\u6301\u7eed\u53d1\u5e03\uff0c\u7528\u6237\u53ef\u5728\u4e2a\u4eba\u4e2d\u5fc3\u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u83b7\u53d6\u5bc6\u94a5\u5e76\u8bbf\u95ee API\u3002

                      "},{"location":"end-user/ghippo/personal-center/accesstoken.html#_2","title":"\u83b7\u53d6\u5bc6\u94a5","text":"

                      \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5728\u53f3\u4e0a\u89d2\u7684\u4e0b\u62c9\u83dc\u5355\u4e2d\u627e\u5230 \u4e2a\u4eba\u4e2d\u5fc3 \uff0c\u53ef\u4ee5\u5728 \u8bbf\u95ee\u5bc6\u94a5 \u9875\u9762\u7ba1\u7406\u8d26\u53f7\u7684\u8bbf\u95ee\u5bc6\u94a5\u3002

                      Info

                      \u8bbf\u95ee\u5bc6\u94a5\u4fe1\u606f\u4ec5\u663e\u793a\u4e00\u6b21\u3002\u5982\u679c\u60a8\u5fd8\u8bb0\u4e86\u8bbf\u95ee\u5bc6\u94a5\u4fe1\u606f\uff0c\u60a8\u9700\u8981\u91cd\u65b0\u521b\u5efa\u65b0\u7684\u8bbf\u95ee\u5bc6\u94a5\u3002

                      "},{"location":"end-user/ghippo/personal-center/accesstoken.html#api","title":"\u4f7f\u7528\u5bc6\u94a5\u8bbf\u95ee API","text":"

                      \u5728\u8bbf\u95ee\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0openAPI \u65f6\uff0c\u5728\u8bf7\u6c42\u4e2d\u52a0\u4e0a\u8bf7\u6c42\u5934 Authorization:Bearer ${token} \u4ee5\u6807\u8bc6\u8bbf\u95ee\u8005\u7684\u8eab\u4efd\uff0c \u5176\u4e2d ${token} \u662f\u4e0a\u4e00\u6b65\u4e2d\u83b7\u53d6\u5230\u7684\u5bc6\u94a5\uff0c\u5177\u4f53\u63a5\u53e3\u4fe1\u606f\u53c2\u89c1 OpenAPI \u63a5\u53e3\u6587\u6863\u3002

                      \u8bf7\u6c42\u793a\u4f8b

                      curl -X GET -H 'Authorization:Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRKVjlBTHRBLXZ4MmtQUC1TQnVGS0dCSWc1cnBfdkxiQVVqM2U3RVByWnMiLCJ0eXAiOiJKV1QifQ.eyJleHAiOjE2NjE0MTU5NjksImlhdCI6MTY2MDgxMTE2OSwiaXNzIjoiZ2hpcHBvLmlvIiwic3ViIjoiZjdjOGIxZjUtMTc2MS00NjYwLTg2MWQtOWI3MmI0MzJmNGViIiwicHJlZmVycmVkX3VzZXJuYW1lIjoiYWRtaW4iLCJncm91cHMiOltdfQ.RsUcrAYkQQ7C6BxMOrdD3qbBRUt0VVxynIGeq4wyIgye6R8Ma4cjxG5CbU1WyiHKpvIKJDJbeFQHro2euQyVde3ygA672ozkwLTnx3Tu-_mB1BubvWCBsDdUjIhCQfT39rk6EQozMjb-1X1sbLwzkfzKMls-oxkjagI_RFrYlTVPwT3Oaw-qOyulRSw7Dxd7jb0vINPq84vmlQIsI3UuTZSNO5BCgHpubcWwBss-Aon_DmYA-Et_-QtmPBA3k8E2hzDSzc7eqK0I68P25r9rwQ3DeKwD1dbRyndqWORRnz8TLEXSiCFXdZT2oiMrcJtO188Ph4eLGut1-4PzKhwgrQ' https://demo-dev.daocloud.io/apis/ghippo.io/v1alpha1/users?page=1&pageSize=10 -k\n

                      \u8bf7\u6c42\u7ed3\u679c

                      {\n    \"items\": [\n        {\n            \"id\": \"a7cfd010-ebbe-4601-987f-d098d9ef766e\",\n            \"name\": \"a\",\n            \"email\": \"\",\n            \"description\": \"\",\n            \"firstname\": \"\",\n            \"lastname\": \"\",\n            \"source\": \"locale\",\n            \"enabled\": true,\n            \"createdAt\": \"1660632794800\",\n            \"updatedAt\": \"0\",\n            \"lastLoginAt\": \"\"\n        }\n    ],\n    \"pagination\": {\n        \"page\": 1,\n        \"pageSize\": 10,\n        \"total\": 1\n    }\n}\n
                      "},{"location":"end-user/ghippo/personal-center/language.html","title":"\u8bed\u8a00\u8bbe\u7f6e","text":"

                      \u672c\u8282\u8bf4\u660e\u5982\u4f55\u8bbe\u7f6e\u754c\u9762\u8bed\u8a00\u3002\u76ee\u524d\u652f\u6301\u4e2d\u6587\u3001English \u4e24\u4e2a\u8bed\u8a00\u3002

                      \u8bed\u8a00\u8bbe\u7f6e\u662f\u5e73\u53f0\u63d0\u4f9b\u591a\u8bed\u8a00\u670d\u52a1\u7684\u5165\u53e3\uff0c\u5e73\u53f0\u9ed8\u8ba4\u663e\u793a\u4e3a\u4e2d\u6587\uff0c\u7528\u6237\u53ef\u6839\u636e\u9700\u8981\u9009\u62e9\u82f1\u8bed\u6216\u81ea\u52a8\u68c0\u6d4b\u6d4f\u89c8\u5668\u8bed\u8a00\u9996\u9009\u9879\u7684\u65b9\u5f0f\u6765\u5207\u6362\u5e73\u53f0\u8bed\u8a00\u3002 \u6bcf\u4e2a\u7528\u6237\u7684\u591a\u8bed\u8a00\u670d\u52a1\u662f\u76f8\u4e92\u72ec\u7acb\u7684\uff0c\u5207\u6362\u540e\u4e0d\u4f1a\u5f71\u54cd\u5176\u4ed6\u7528\u6237\u3002

                      \u5e73\u53f0\u63d0\u4f9b\u4e09\u79cd\u5207\u6362\u8bed\u8a00\u65b9\u5f0f\uff1a\u4e2d\u6587\u3001\u82f1\u8bed-English\u3001\u81ea\u52a8\u68c0\u6d4b\u60a8\u7684\u6d4f\u89c8\u5668\u8bed\u8a00\u9996\u9009\u9879\u3002

                      \u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\u3002

                      1. \u4f7f\u7528\u60a8\u7684\u7528\u6237\u540d/\u5bc6\u7801\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 \u3002

                      2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684\u7528\u6237\u540d\u4f4d\u7f6e\uff0c\u9009\u62e9 \u4e2a\u4eba\u4e2d\u5fc3 \u3002

                      3. \u70b9\u51fb \u8bed\u8a00\u8bbe\u7f6e \u9875\u7b7e\u3002

                      4. \u5207\u6362\u8bed\u8a00\u9009\u9879\u3002

                      "},{"location":"end-user/ghippo/personal-center/security-setting.html","title":"\u5b89\u5168\u8bbe\u7f6e","text":"

                      \u529f\u80fd\u8bf4\u660e\uff1a\u7528\u4e8e\u586b\u5199\u90ae\u7bb1\u5730\u5740\u548c\u4fee\u6539\u767b\u5f55\u5bc6\u7801\u3002

                      • \u90ae\u7bb1\uff1a\u5f53\u7ba1\u7406\u5458\u914d\u7f6e\u90ae\u7bb1\u670d\u52a1\u5668\u5730\u5740\u4e4b\u540e\uff0c\u7528\u6237\u80fd\u591f\u901a\u8fc7\u767b\u5f55\u9875\u7684\u5fd8\u8bb0\u5bc6\u7801\u6309\u94ae\uff0c\u586b\u5199\u8be5\u5904\u7684\u90ae\u7bb1\u5730\u5740\u4ee5\u627e\u56de\u5bc6\u7801\u3002
                      • \u5bc6\u7801\uff1a\u7528\u4e8e\u767b\u5f55\u5e73\u53f0\u7684\u5bc6\u7801\uff0c\u5efa\u8bae\u5b9a\u671f\u4fee\u6539\u5bc6\u7801\u3002

                      \u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\uff1a

                      1. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684\u7528\u6237\u540d\u4f4d\u7f6e\uff0c\u9009\u62e9 \u4e2a\u4eba\u4e2d\u5fc3 \u3002

                      2. \u70b9\u51fb \u5b89\u5168\u8bbe\u7f6e \u9875\u7b7e\u3002\u586b\u5199\u60a8\u7684\u90ae\u7bb1\u5730\u5740\u6216\u4fee\u6539\u767b\u5f55\u5bc6\u7801\u3002

                      "},{"location":"end-user/ghippo/personal-center/ssh-key.html","title":"\u914d\u7f6e SSH \u516c\u94a5","text":"

                      \u672c\u6587\u8bf4\u660e\u5982\u4f55\u914d\u7f6e SSH \u516c\u94a5\u3002

                      "},{"location":"end-user/ghippo/personal-center/ssh-key.html#1-ssh","title":"\u6b65\u9aa4 1\uff1a\u67e5\u770b\u5df2\u5b58\u5728\u7684 SSH \u5bc6\u94a5","text":"

                      \u5728\u751f\u6210\u65b0\u7684 SSH \u5bc6\u94a5\u524d\uff0c\u8bf7\u5148\u786e\u8ba4\u662f\u5426\u9700\u8981\u4f7f\u7528\u672c\u5730\u5df2\u751f\u6210\u7684 SSH \u5bc6\u94a5\uff0cSSH \u5bc6\u94a5\u5bf9\u4e00\u822c\u5b58\u653e\u5728\u672c\u5730\u7528\u6237\u7684\u6839\u76ee\u5f55\u4e0b\u3002 Linux\u3001Mac \u8bf7\u76f4\u63a5\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b\u5df2\u5b58\u5728\u7684\u516c\u94a5\uff0cWindows \u7528\u6237\u5728 WSL\uff08\u9700\u8981 Windows 10 \u6216\u4ee5\u4e0a\uff09\u6216 Git Bash \u4e0b\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b\u5df2\u751f\u6210\u7684\u516c\u94a5\u3002

                      • ED25519 \u7b97\u6cd5\uff1a

                        cat ~/.ssh/id_ed25519.pub\n
                      • RSA \u7b97\u6cd5\uff1a

                        cat ~/.ssh/id_rsa.pub\n

                      \u5982\u679c\u8fd4\u56de\u4e00\u957f\u4e32\u4ee5 ssh-ed25519 \u6216 ssh-rsa \u5f00\u5934\u7684\u5b57\u7b26\u4e32\uff0c\u8bf4\u660e\u5df2\u5b58\u5728\u672c\u5730\u516c\u94a5\uff0c \u60a8\u53ef\u4ee5\u8df3\u8fc7\u6b65\u9aa4 2 \u751f\u6210 SSH \u5bc6\u94a5\uff0c\u76f4\u63a5\u64cd\u4f5c\u6b65\u9aa4 3\u3002

                      "},{"location":"end-user/ghippo/personal-center/ssh-key.html#2-ssh","title":"\u6b65\u9aa4 2\uff1a\u751f\u6210 SSH \u5bc6\u94a5","text":"

                      \u82e5\u6b65\u9aa4 1 \u672a\u8fd4\u56de\u6307\u5b9a\u7684\u5185\u5bb9\u5b57\u7b26\u4e32\uff0c\u8868\u793a\u672c\u5730\u6682\u65e0\u53ef\u7528 SSH \u5bc6\u94a5\uff0c\u9700\u8981\u751f\u6210\u65b0\u7684 SSH \u5bc6\u94a5\uff0c\u8bf7\u6309\u5982\u4e0b\u6b65\u9aa4\u64cd\u4f5c\uff1a

                      1. \u8bbf\u95ee\u7ec8\u7aef\uff08Windows \u8bf7\u4f7f\u7528 WSL \u6216 Git Bash\uff09\uff0c \u8fd0\u884c ssh-keygen -t\u3002

                      2. \u8f93\u5165\u5bc6\u94a5\u7b97\u6cd5\u7c7b\u578b\u548c\u53ef\u9009\u7684\u6ce8\u91ca\u3002

                        \u6ce8\u91ca\u4f1a\u51fa\u73b0\u5728 .pub \u6587\u4ef6\u4e2d\uff0c\u4e00\u822c\u53ef\u4f7f\u7528\u90ae\u7bb1\u4f5c\u4e3a\u6ce8\u91ca\u5185\u5bb9\u3002

                        • \u57fa\u4e8e ED25519 \u7b97\u6cd5\uff0c\u751f\u6210\u5bc6\u94a5\u5bf9\u547d\u4ee4\u5982\u4e0b\uff1a

                          ssh-keygen -t ed25519 -C \"<\u6ce8\u91ca\u5185\u5bb9>\"\n
                        • \u57fa\u4e8e RSA \u7b97\u6cd5\uff0c\u751f\u6210\u5bc6\u94a5\u5bf9\u547d\u4ee4\u5982\u4e0b\uff1a

                          ssh-keygen -t rsa -C \"<\u6ce8\u91ca\u5185\u5bb9>\"\n
                      3. \u70b9\u51fb\u56de\u8f66\uff0c\u9009\u62e9 SSH \u5bc6\u94a5\u751f\u6210\u8def\u5f84\u3002

                        \u4ee5 ED25519 \u7b97\u6cd5\u4e3a\u4f8b\uff0c\u9ed8\u8ba4\u8def\u5f84\u5982\u4e0b\uff1a

                        Generating public/private ed25519 key pair.\nEnter file in which to save the key (/home/user/.ssh/id_ed25519):\n

                        \u5bc6\u94a5\u9ed8\u8ba4\u751f\u6210\u8def\u5f84\uff1a/home/user/.ssh/id_ed25519\uff0c\u516c\u94a5\u4e0e\u4e4b\u5bf9\u5e94\u4e3a\uff1a/home/user/.ssh/id_ed25519.pub\u3002

                      4. \u8bbe\u7f6e\u4e00\u4e2a\u5bc6\u94a5\u53e3\u4ee4\u3002

                        Enter passphrase (empty for no passphrase):\nEnter same passphrase again:\n

                        \u53e3\u4ee4\u9ed8\u8ba4\u4e3a\u7a7a\uff0c\u60a8\u53ef\u4ee5\u9009\u62e9\u4f7f\u7528\u53e3\u4ee4\u4fdd\u62a4\u79c1\u94a5\u6587\u4ef6\u3002 \u5982\u679c\u60a8\u4e0d\u60f3\u5728\u6bcf\u6b21\u4f7f\u7528 SSH \u534f\u8bae\u8bbf\u95ee\u4ed3\u5e93\u65f6\uff0c\u90fd\u8981\u8f93\u5165\u7528\u4e8e\u4fdd\u62a4\u79c1\u94a5\u6587\u4ef6\u7684\u53e3\u4ee4\uff0c\u53ef\u4ee5\u5728\u521b\u5efa\u5bc6\u94a5\u65f6\uff0c\u8f93\u5165\u7a7a\u53e3\u4ee4\u3002

                      5. \u70b9\u51fb\u56de\u8f66\uff0c\u5b8c\u6210\u5bc6\u94a5\u5bf9\u521b\u5efa\u3002

                      "},{"location":"end-user/ghippo/personal-center/ssh-key.html#3","title":"\u6b65\u9aa4 3\uff1a\u62f7\u8d1d\u516c\u94a5","text":"

                      \u9664\u4e86\u5728\u547d\u4ee4\u884c\u6253\u5370\u51fa\u5df2\u751f\u6210\u7684\u516c\u94a5\u4fe1\u606f\u624b\u52a8\u590d\u5236\u5916\uff0c\u53ef\u4ee5\u4f7f\u7528\u547d\u4ee4\u62f7\u8d1d\u516c\u94a5\u5230\u7c98\u8d34\u677f\u4e0b\uff0c\u8bf7\u53c2\u8003\u64cd\u4f5c\u7cfb\u7edf\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u8fdb\u884c\u62f7\u8d1d\u3002

                      • Windows\uff08\u5728 WSL \u6216 Git Bash \u4e0b\uff09\uff1a

                        cat ~/.ssh/id_ed25519.pub | clip\n
                      • Mac\uff1a

                        tr -d '\\n'< ~/.ssh/id_ed25519.pub | pbcopy\n
                      • GNU/Linux (requires xclip):

                        xclip -sel clip < ~/.ssh/id_ed25519.pub\n
                      "},{"location":"end-user/ghippo/personal-center/ssh-key.html#4-ai","title":"\u6b65\u9aa4 4\uff1a\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e0a\u8bbe\u7f6e\u516c\u94a5","text":"
                      1. \u767b\u5f55\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0UI \u9875\u9762\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u4e2a\u4eba\u4e2d\u5fc3 -> SSH \u516c\u94a5 \u3002

                      2. \u6dfb\u52a0\u751f\u6210\u7684 SSH \u516c\u94a5\u4fe1\u606f\u3002

                        1. SSH \u516c\u94a5\u5185\u5bb9\u3002

                        2. \u516c\u94a5\u6807\u9898\uff1a\u652f\u6301\u81ea\u5b9a\u4e49\u516c\u94a5\u540d\u79f0\uff0c\u7528\u4e8e\u533a\u5206\u7ba1\u7406\u3002

                        3. \u8fc7\u671f\u65f6\u95f4\uff1a\u8bbe\u7f6e\u516c\u94a5\u8fc7\u671f\u65f6\u95f4\uff0c\u5230\u671f\u540e\u516c\u94a5\u5c06\u81ea\u52a8\u5931\u6548\uff0c\u4e0d\u53ef\u4f7f\u7528\uff1b\u5982\u679c\u4e0d\u8bbe\u7f6e\uff0c\u5219\u6c38\u4e45\u6709\u6548\u3002

                      "},{"location":"end-user/ghippo/workspace/folder-permission.html","title":"\u6587\u4ef6\u5939\u6743\u9650\u8bf4\u660e","text":"

                      \u6587\u4ef6\u5939\u5177\u6709\u6743\u9650\u6620\u5c04\u80fd\u529b\uff0c\u80fd\u591f\u5c06\u7528\u6237/\u7528\u6237\u7ec4\u5728\u672c\u6587\u4ef6\u5939\u7684\u6743\u9650\u6620\u5c04\u5230\u5176\u4e0b\u7684\u5b50\u6587\u4ef6\u5939\u3001\u5de5\u4f5c\u7a7a\u95f4\u4ee5\u53ca\u8d44\u6e90\u4e0a\u3002

                      \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u672c\u6587\u4ef6\u5939\u662f Folder Admin \u89d2\u8272\uff0c\u6620\u5c04\u5230\u5b50\u6587\u4ef6\u5939\u4ecd\u4e3a Folder Admin \u89d2\u8272\uff0c\u6620\u5c04\u5230\u5176\u4e0b\u7684\u5de5\u4f5c\u7a7a\u95f4\u5219\u4e3a Workspace Admin\uff1b \u82e5\u5728 \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 -> \u8d44\u6e90\u7ec4 \u4e2d\u7ed1\u5b9a\u4e86 Namespace\uff0c\u5219\u6620\u5c04\u540e\u8be5\u7528\u6237/\u7528\u6237\u7ec4\u540c\u65f6\u8fd8\u662f Namespace Admin\u3002

                      Note

                      \u6587\u4ef6\u5939\u7684\u6743\u9650\u6620\u5c04\u80fd\u529b\u4e0d\u4f1a\u4f5c\u7528\u5230\u5171\u4eab\u8d44\u6e90\u4e0a\uff0c\u56e0\u4e3a\u5171\u4eab\u662f\u5c06\u96c6\u7fa4\u7684\u4f7f\u7528\u6743\u9650\u5171\u4eab\u7ed9\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff0c\u800c\u4e0d\u662f\u5c06\u7ba1\u7406\u6743\u9650\u53d7\u8ba9\u7ed9\u5de5\u4f5c\u7a7a\u95f4\uff0c\u56e0\u6b64\u4e0d\u4f1a\u5b9e\u73b0\u6743\u9650\u7ee7\u627f\u548c\u89d2\u8272\u6620\u5c04\u3002

                      "},{"location":"end-user/ghippo/workspace/folder-permission.html#_2","title":"\u5e94\u7528\u573a\u666f","text":"

                      \u6587\u4ef6\u5939\u5177\u6709\u5c42\u7ea7\u80fd\u529b\uff0c\u56e0\u6b64\u5c06\u6587\u4ef6\u5939\u5bf9\u5e94\u4e8e\u4f01\u4e1a\u4e2d\u7684\u90e8\u95e8/\u4f9b\u5e94\u5546/\u9879\u76ee\u7b49\u5c42\u7ea7\u65f6\uff0c

                      • \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u4e00\u7ea7\u90e8\u95e8\u5177\u6709\u7ba1\u7406\u6743\u9650\uff08Admin\uff09\uff0c\u5176\u4e0b\u7684\u4e8c\u7ea7\u3001\u4e09\u7ea7\u3001\u56db\u7ea7\u90e8\u95e8\u6216\u9879\u76ee\u540c\u6837\u5177\u6709\u7ba1\u7406\u6743\u9650\uff1b
                      • \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u4e00\u7ea7\u90e8\u95e8\u5177\u6709\u4f7f\u7528\u6743\u9650\uff08Editor\uff09\uff0c\u5176\u4e0b\u7684\u4e8c\u7ea7\u3001\u4e09\u7ea7\u3001\u56db\u7ea7\u90e8\u95e8\u6216\u9879\u76ee\u540c\u6837\u5177\u6709\u4f7f\u7528\u6743\u9650\uff1b
                      • \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u4e00\u7ea7\u90e8\u95e8\u5177\u6709\u53ea\u8bfb\u6743\u9650\uff08Viewer\uff09\uff0c\u5176\u4e0b\u7684\u4e8c\u7ea7\u3001\u4e09\u7ea7\u3001\u56db\u7ea7\u90e8\u95e8\u6216\u9879\u76ee\u540c\u6837\u5177\u6709\u53ea\u8bfb\u6743\u9650\u3002
                      \u5bf9\u8c61 \u64cd\u4f5c Folder Admin Folder Editor Folder Viewer \u5bf9\u6587\u4ef6\u5939\u672c\u8eab \u67e5\u770b \u2713 \u2713 \u2713 \u6388\u6743 \u2713 \u2717 \u2717 \u4fee\u6539\u522b\u540d \u2713 \u2717 \u2717 \u5bf9\u5b50\u6587\u4ef6\u5939 \u521b\u5efa \u2713 \u2717 \u2717 \u67e5\u770b \u2713 \u2713 \u2713 \u6388\u6743 \u2713 \u2717 \u2717 \u4fee\u6539\u522b\u540d \u2713 \u2717 \u2717 \u5bf9\u5176\u4e0b\u7684\u5de5\u4f5c\u7a7a\u95f4 \u521b\u5efa \u2713 \u2717 \u2717 \u67e5\u770b \u2713 \u2713 \u2713 \u6388\u6743 \u2713 \u2717 \u2717 \u4fee\u6539\u522b\u540d \u2713 \u2717 \u2717 \u5bf9\u5176\u4e0b\u7684\u5de5\u4f5c\u7a7a\u95f4 - \u8d44\u6e90\u7ec4 \u67e5\u770b \u2713 \u2713 \u2713 \u8d44\u6e90\u7ed1\u5b9a \u2713 \u2717 \u2717 \u89e3\u9664\u7ed1\u5b9a \u2713 \u2717 \u2717 \u5bf9\u5176\u4e0b\u7684\u5de5\u4f5c\u7a7a\u95f4 - \u5171\u4eab\u8d44\u6e90 \u67e5\u770b \u2713 \u2713 \u2713 \u65b0\u589e\u5171\u4eab \u2713 \u2717 \u2717 \u89e3\u9664\u5171\u4eab \u2713 \u2717 \u2717 \u8d44\u6e90\u9650\u989d \u2713 \u2717 \u2717"},{"location":"end-user/ghippo/workspace/folders.html","title":"\u521b\u5efa/\u5220\u9664\u6587\u4ef6\u5939","text":"

                      \u6587\u4ef6\u5939\u5177\u6709\u6743\u9650\u6620\u5c04\u80fd\u529b\uff0c\u80fd\u591f\u5c06\u7528\u6237/\u7528\u6237\u7ec4\u5728\u672c\u6587\u4ef6\u5939\u7684\u6743\u9650\u6620\u5c04\u5230\u5176\u4e0b\u7684\u5b50\u6587\u4ef6\u5939\u3001\u5de5\u4f5c\u7a7a\u95f4\u4ee5\u53ca\u8d44\u6e90\u4e0a\u3002

                      \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u521b\u5efa\u4e00\u4e2a\u6587\u4ef6\u5939\u3002

                      1. \u4f7f\u7528 admin/folder admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u3002

                      2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa\u6587\u4ef6\u5939 \u6309\u94ae\u3002

                      3. \u586b\u5199\u6587\u4ef6\u5939\u540d\u79f0\u3001\u4e0a\u4e00\u7ea7\u6587\u4ef6\u5939\u7b49\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u521b\u5efa\u6587\u4ef6\u5939\u3002

                      Tip

                      \u521b\u5efa\u6210\u529f\u540e\u6587\u4ef6\u5939\u540d\u79f0\u5c06\u663e\u793a\u5728\u5de6\u4fa7\u7684\u6811\u72b6\u7ed3\u6784\u4e2d\uff0c\u4ee5\u4e0d\u540c\u7684\u56fe\u6807\u8868\u793a\u5de5\u4f5c\u7a7a\u95f4\u548c\u6587\u4ef6\u5939\u3002

                      Note

                      \u9009\u4e2d\u67d0\u4e00\u4e2a\u6587\u4ef6\u5939\u6216\u6587\u4ef6\u5939\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u53ef\u4ee5\u8fdb\u884c\u7f16\u8f91\u6216\u5220\u9664\u3002

                      • \u5f53\u8be5\u6587\u4ef6\u5939\u4e0b\u8d44\u6e90\u7ec4\u3001\u5171\u4eab\u8d44\u6e90\u4e2d\u5b58\u5728\u8d44\u6e90\u65f6\uff0c\u8be5\u6587\u4ef6\u5939\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u6240\u6709\u8d44\u6e90\u89e3\u7ed1\u540e\u518d\u5220\u9664\u3002

                      • \u5f53\u5fae\u670d\u52a1\u5f15\u64ce\u6a21\u5757\u5728\u8be5\u6587\u4ef6\u5939\u4e0b\u5b58\u5728\u63a5\u5165\u6ce8\u518c\u4e2d\u5fc3\u8d44\u6e90\u65f6\uff0c\u8be5\u6587\u4ef6\u5939\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u6240\u6709\u63a5\u5165\u6ce8\u518c\u4e2d\u5fc3\u79fb\u9664\u540e\u518d\u5220\u9664\u6587\u4ef6\u5939\u3002

                      "},{"location":"end-user/ghippo/workspace/quota.html","title":"\u8d44\u6e90\u914d\u989d\uff08Quota\uff09","text":"

                      \u5171\u4eab\u8d44\u6e90\u5e76\u975e\u610f\u5473\u7740\u88ab\u5171\u4eab\u8005\u53ef\u4ee5\u65e0\u9650\u5236\u5730\u4f7f\u7528\u88ab\u5171\u4eab\u7684\u8d44\u6e90\u3002 Admin\u3001Kpanda Owner \u548c Workspace Admin \u53ef\u4ee5\u901a\u8fc7\u5171\u4eab\u8d44\u6e90\u4e2d\u7684 \u8d44\u6e90\u914d\u989d \u529f\u80fd\u9650\u5236\u67d0\u4e2a\u7528\u6237\u7684\u6700\u5927\u4f7f\u7528\u989d\u5ea6\u3002 \u82e5\u4e0d\u9650\u5236\uff0c\u5219\u8868\u793a\u53ef\u4ee5\u65e0\u9650\u5236\u4f7f\u7528\u3002

                      • CPU \u8bf7\u6c42\uff08Core\uff09
                      • CPU \u9650\u5236\uff08Core\uff09
                      • \u5185\u5b58\u8bf7\u6c42\uff08MB\uff09
                      • \u5185\u5b58\u9650\u5236\uff08MB\uff09
                      • \u5b58\u50a8\u8bf7\u6c42\u603b\u91cf\uff08GB\uff09
                      • \u5b58\u50a8\u5377\u58f0\u660e\uff08\u4e2a\uff09
                      • GPU \u7c7b\u578b\u3001\u89c4\u683c\u3001\u6570\u91cf\uff08\u5305\u62ec\u4f46\u4e0d\u9650\u4e8e Nvidia\u3001Ascend\u3001lluvatar\u7b49GPU\u5361\u7c7b\u578b\uff09

                      \u4e00\u4e2a\u8d44\u6e90\uff08\u96c6\u7fa4\uff09\u53ef\u4ee5\u88ab\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u5171\u4eab\uff0c\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u4e5f\u53ef\u4ee5\u540c\u65f6\u4f7f\u7528\u591a\u4e2a\u5171\u4eab\u96c6\u7fa4\u4e2d\u7684\u8d44\u6e90\u3002

                      "},{"location":"end-user/ghippo/workspace/quota.html#_1","title":"\u8d44\u6e90\u7ec4\u548c\u5171\u4eab\u8d44\u6e90","text":"

                      \u5171\u4eab\u8d44\u6e90\u548c\u8d44\u6e90\u7ec4\u4e2d\u7684\u96c6\u7fa4\u8d44\u6e90\u5747\u6765\u81ea\u5bb9\u5668\u7ba1\u7406\uff0c\u4f46\u662f\u96c6\u7fa4\u7ed1\u5b9a\u548c\u5171\u4eab\u7ed9\u540c\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u5c06\u4f1a\u4ea7\u751f\u4e24\u79cd\u622a\u7136\u4e0d\u540c\u7684\u6548\u679c\u3002

                      1. \u7ed1\u5b9a\u8d44\u6e90

                        \u4f7f\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u7528\u6237/\u7528\u6237\u7ec4\u5177\u6709\u8be5\u96c6\u7fa4\u7684\u5168\u90e8\u7ba1\u7406\u548c\u4f7f\u7528\u6743\u9650\uff0cWorkspace Admin \u5c06\u88ab\u6620\u5c04\u4e3a Cluster Admin\u3002 Workspace Admin \u80fd\u591f\u8fdb\u5165\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7ba1\u7406\u8be5\u96c6\u7fa4\u3002

                        Note

                        \u5f53\u524d\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u6682\u65e0 Cluster Editor \u548c Cluster Viewer \u89d2\u8272\uff0c\u56e0\u6b64 Workspace Editor\u3001Workspace Viewer \u8fd8\u65e0\u6cd5\u6620\u5c04\u3002

                      2. \u65b0\u589e\u5171\u4eab\u8d44\u6e90

                        \u4f7f\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u7528\u6237/\u7528\u6237\u7ec4\u5177\u6709\u8be5\u96c6\u7fa4\u8d44\u6e90\u7684\u4f7f\u7528\u6743\u9650\uff0c\u8fd9\u4e9b\u8d44\u6e90\u53ef\u4ee5\u5728\u521b\u5efa\u547d\u540d\u7a7a\u95f4\uff08Namespace\uff09\u65f6\u4f7f\u7528\u3002

                        \u4e0e\u8d44\u6e90\u7ec4\u4e0d\u540c\uff0c\u5c06\u96c6\u7fa4\u5171\u4eab\u5230\u5de5\u4f5c\u7a7a\u95f4\u65f6\uff0c\u7528\u6237\u5728\u5de5\u4f5c\u7a7a\u95f4\u7684\u89d2\u8272\u4e0d\u4f1a\u6620\u5c04\u5230\u8d44\u6e90\u4e0a\uff0c\u56e0\u6b64 Workspace Admin \u4e0d\u4f1a\u88ab\u6620\u5c04\u4e3a Cluster admin\u3002

                      \u672c\u8282\u5c55\u793a 3 \u4e2a\u4e0e\u8d44\u6e90\u914d\u989d\u6709\u5173\u7684\u573a\u666f\u3002

                      "},{"location":"end-user/ghippo/workspace/quota.html#_2","title":"\u521b\u5efa\u547d\u540d\u7a7a\u95f4","text":"

                      \u521b\u5efa\u547d\u540d\u7a7a\u95f4\u65f6\u4f1a\u6d89\u53ca\u5230\u8d44\u6e90\u914d\u989d\u3002

                      1. \u5728\u5de5\u4f5c\u7a7a\u95f4 ws01 \u65b0\u589e\u4e00\u4e2a\u5171\u4eab\u96c6\u7fa4\u3002

                      2. \u5728\u5e94\u7528\u5de5\u4f5c\u53f0\u9009\u62e9\u5de5\u4f5c\u7a7a\u95f4 ws01 \u548c\u5171\u4eab\u96c6\u7fa4\uff0c\u521b\u5efa\u547d\u540d\u7a7a\u95f4 ns01\u3002

                        • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u4e2d\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4e0d\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\u3002
                        • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u4e2d\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff08\u4f8b\u5982 CPU \u8bf7\u6c42 = 100 core\uff09\uff0c\u5219\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u65f6 CPU \u8bf7\u6c42 \u2264 100 core \u3002
                      "},{"location":"end-user/ghippo/workspace/quota.html#_3","title":"\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4","text":"

                      \u524d\u63d0\uff1a\u5de5\u4f5c\u7a7a\u95f4 ws01 \u5df2\u65b0\u589e\u5171\u4eab\u96c6\u7fa4\uff0c\u64cd\u4f5c\u8005\u4e3a Workspace Admin + Kpanda Owner \u6216 Admin \u89d2\u8272\u3002

                      \u4ee5\u4e0b\u4e24\u79cd\u7ed1\u5b9a\u65b9\u5f0f\u7684\u6548\u679c\u76f8\u540c\u3002

                      • \u5728\u5bb9\u5668\u7ba1\u7406\u4e2d\u5c06\u521b\u5efa\u7684\u547d\u540d\u7a7a\u95f4 ns01 \u7ed1\u5b9a\u5230 ws01

                        • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u65e0\u8bba\u662f\u5426\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5747\u53ef\u6210\u529f\u7ed1\u5b9a\u3002
                        • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d CPU \u8bf7\u6c42 = 100 core \uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u5fc5\u987b\u6ee1\u8db3 CPU \u8bf7\u6c42 \u2264 100 core \u624d\u80fd\u7ed1\u5b9a\u6210\u529f\u3002
                      • \u5728\u5168\u5c40\u7ba1\u7406\u4e2d\uff0c\u5c06\u547d\u540d\u7a7a\u95f4 ns01 \u7ed1\u5b9a\u5230 ws01

                        • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u65e0\u8bba\u662f\u5426\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5747\u53ef\u6210\u529f\u7ed1\u5b9a\u3002
                        • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d CPU \u8bf7\u6c42 = 100 core \uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u5fc5\u987b\u6ee1\u8db3 CPU \u8bf7\u6c42 \u2264 100 core \u624d\u80fd\u7ed1\u5b9a\u6210\u529f\u3002
                      "},{"location":"end-user/ghippo/workspace/quota.html#_4","title":"\u4ece\u5de5\u4f5c\u7a7a\u95f4\u89e3\u7ed1\u547d\u540d\u7a7a\u95f4","text":"

                      \u4ee5\u4e0b\u4e24\u79cd\u89e3\u7ed1\u65b9\u5f0f\u7684\u6548\u679c\u76f8\u540c\u3002

                      • \u5728\u5bb9\u5668\u7ba1\u7406\u4e2d\u5c06\u547d\u540d\u7a7a\u95f4 ns01 \u4ece\u5de5\u4f5c\u7a7a\u95f4 ws01 \u89e3\u7ed1

                        • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u4e2d\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u65e0\u8bba\u662f\u5426\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u89e3\u7ed1\u540e\u5747\u4e0d\u4f1a\u5bf9\u8d44\u6e90\u914d\u989d\u4ea7\u751f\u5f71\u54cd\u3002
                        • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d CPU \u8bf7\u6c42 = 100 core \uff0c\u547d\u540d\u7a7a\u95f4 ns01 \u4e5f\u8bbe\u7f6e\u4e86\u8d44\u6e90\u914d\u989d\uff0c\u5219\u89e3\u7ed1\u540e\u5c06\u91ca\u653e\u76f8\u5e94\u7684\u8d44\u6e90\u989d\u5ea6\u3002
                      • \u5728\u5168\u5c40\u7ba1\u7406\u4e2d\u5c06\u547d\u540d\u7a7a\u95f4 ns01 \u4ece\u5de5\u4f5c\u7a7a\u95f4 ws01 \u89e3\u7ed1

                        • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u65e0\u8bba\u662f\u5426\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u89e3\u7ed1\u540e\u5747\u4e0d\u4f1a\u5bf9\u8d44\u6e90\u914d\u989d\u4ea7\u751f\u5f71\u54cd\u3002
                        • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d CPU \u8bf7\u6c42 = 100 core \uff0c\u547d\u540d\u7a7a\u95f4 ns01 \u4e5f\u8bbe\u7f6e\u4e86\u8d44\u6e90\u914d\u989d\uff0c\u5219\u89e3\u7ed1\u540e\u5c06\u91ca\u653e\u76f8\u5e94\u7684\u8d44\u6e90\u989d\u5ea6\u3002
                      "},{"location":"end-user/ghippo/workspace/res-gp-and-shared-res.html","title":"\u8d44\u6e90\u7ec4\u4e0e\u5171\u4eab\u8d44\u6e90\u7684\u533a\u522b","text":"

                      \u8d44\u6e90\u7ec4\u4e0e\u5171\u4eab\u8d44\u6e90\u5747\u652f\u6301\u7ed1\u5b9a\u96c6\u7fa4\uff0c\u4f46\u4f7f\u7528\u4e0a\u5b58\u5728\u5f88\u5927\u533a\u522b\u3002

                      "},{"location":"end-user/ghippo/workspace/res-gp-and-shared-res.html#_2","title":"\u4f7f\u7528\u573a\u666f\u533a\u522b","text":"
                      • \u8d44\u6e90\u7ec4\u7ed1\u5b9a\u96c6\u7fa4\uff1a\u8d44\u6e90\u7ec4\u7ed1\u5b9a\u96c6\u7fa4\u901a\u5e38\u88ab\u7528\u6765\u6279\u91cf\u6388\u6743\u3002\u8d44\u6e90\u7ec4\u7ed1\u5b9a\u96c6\u7fa4\u540e\uff0c \u5de5\u4f5c\u7a7a\u95f4\u7ba1\u7406\u5458\u5c06\u88ab\u6620\u5c04\u4e3a\u96c6\u7fa4\u7ba1\u7406\u5458\uff0c\u80fd\u591f\u7ba1\u7406\u5e76\u4f7f\u7528\u96c6\u7fa4\u8d44\u6e90\u3002
                      • \u5171\u4eab\u8d44\u6e90\u7ed1\u5b9a\u96c6\u7fa4\uff1a\u8d44\u6e90\u5171\u4eab\u7ed1\u5b9a\u96c6\u7fa4\u901a\u5e38\u88ab\u7528\u6765\u505a\u8d44\u6e90\u9650\u989d\u3002 \u5178\u578b\u7684\u573a\u666f\u662f\u5e73\u53f0\u7ba1\u7406\u5458\u5c06\u96c6\u7fa4\u5206\u914d\u7ed9\u4e00\u7ea7\u4f9b\u5e94\u5546\u540e\uff0c\u518d\u7531\u4e00\u7ea7\u4f9b\u5e94\u5546\u5206\u914d\u7ed9\u4e8c\u7ea7\u4f9b\u5e94\u5546\u5e76\u5bf9\u4e8c\u7ea7\u4f9b\u5e94\u5546\u8fdb\u884c\u8d44\u6e90\u9650\u989d\u3002

                      \u8bf4\u660e\uff1a\u5728\u8be5\u573a\u666f\u4e2d\uff0c\u9700\u8981\u5e73\u53f0\u7ba1\u7406\u5458\u5bf9\u4e8c\u7ea7\u4f9b\u5e94\u5546\u8fdb\u884c\u8d44\u6e90\u9650\u5236\uff0c\u6682\u65f6\u8fd8\u4e0d\u652f\u6301\u4e00\u7ea7\u4f9b\u5e94\u5546\u9650\u5236\u4e8c\u7ea7\u4f9b\u5e94\u5546\u7684\u96c6\u7fa4\u989d\u5ea6\u3002

                      "},{"location":"end-user/ghippo/workspace/res-gp-and-shared-res.html#_3","title":"\u96c6\u7fa4\u989d\u5ea6\u7684\u4f7f\u7528\u533a\u522b","text":"
                      • \u8d44\u6e90\u7ec4\u7ed1\u5b9a\u96c6\u7fa4\uff1a\u5de5\u4f5c\u7a7a\u95f4\u7684\u7ba1\u7406\u5458\u5c06\u88ab\u6620\u5c04\u4e3a\u8be5\u96c6\u7fa4\u7684\u7ba1\u7406\u5458\uff0c\u76f8\u5f53\u4e8e\u5728\u5bb9\u5668\u7ba1\u7406-\u6743\u9650\u7ba1\u7406\u4e2d\u88ab\u6388\u4e88 Cluster Admin \u89d2\u8272\uff0c \u80fd\u591f\u65e0\u9650\u5236\u652f\u914d\u8be5\u96c6\u7fa4\u8d44\u6e90\uff0c\u7ba1\u7406\u8282\u70b9\u7b49\u91cd\u8981\u5185\u5bb9\uff0c\u4e14\u8d44\u6e90\u7ec4\u4e0d\u80fd\u591f\u88ab\u8d44\u6e90\u9650\u989d\u3002
                      • \u5171\u4eab\u8d44\u6e90\u7ed1\u5b9a\u8d44\u6e90\uff1a\u5de5\u4f5c\u7a7a\u95f4\u7ba1\u7406\u5458\u4ec5\u80fd\u591f\u4f7f\u7528\u96c6\u7fa4\u4e2d\u7684\u989d\u5ea6\u5728\u5e94\u7528\u5de5\u4f5c\u53f0\u521b\u5efa\u547d\u540d\u7a7a\u95f4\uff0c\u4e0d\u5177\u5907\u96c6\u7fa4\u7684\u7ba1\u7406\u6743\u9650\u3002 \u82e5\u5bf9\u8be5\u5de5\u4f5c\u7a7a\u95f4\u9650\u5236\u989d\u5ea6\uff0c\u5219\u5de5\u4f5c\u7a7a\u95f4\u7ba1\u7406\u4ec5\u80fd\u591f\u5728\u989d\u5ea6\u8303\u56f4\u5185\u521b\u5efa\u5e76\u4f7f\u7528\u547d\u540d\u7a7a\u95f4\u3002
                      "},{"location":"end-user/ghippo/workspace/res-gp-and-shared-res.html#_4","title":"\u8d44\u6e90\u7c7b\u578b\u7684\u533a\u522b","text":"
                      • \u8d44\u6e90\u7ec4\uff1a\u80fd\u591f\u7ed1\u5b9a\u96c6\u7fa4\u3001\u96c6\u7fa4-\u547d\u540d\u7a7a\u95f4\u3001\u591a\u4e91\u3001\u591a\u4e91-\u547d\u540d\u7a7a\u95f4\u3001\u7f51\u683c\u3001\u7f51\u683c-\u547d\u540d\u7a7a\u95f4
                      • \u5171\u4eab\u8d44\u6e90\uff1a\u4ec5\u80fd\u591f\u7ed1\u5b9a\u96c6\u7fa4
                      "},{"location":"end-user/ghippo/workspace/res-gp-and-shared-res.html#_5","title":"\u8d44\u6e90\u7ec4\u4e0e\u5171\u4eab\u8d44\u6e90\u7684\u76f8\u540c\u70b9","text":"

                      \u5728\u8d44\u6e90\u7ec4/\u5171\u4eab\u8d44\u6e90\u7ed1\u5b9a\u96c6\u7fa4\u540e\u90fd\u53ef\u4ee5\u524d\u5f80\u5e94\u7528\u5de5\u4f5c\u53f0\u521b\u5efa\u547d\u540d\u7a7a\u95f4\uff0c\u521b\u5efa\u540e\u547d\u540d\u7a7a\u95f4\u5c06\u81ea\u52a8\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4\u3002

                      "},{"location":"end-user/ghippo/workspace/workspace.html","title":"\u521b\u5efa/\u5220\u9664\u5de5\u4f5c\u7a7a\u95f4","text":"

                      \u5de5\u4f5c\u7a7a\u95f4\u662f\u4e00\u79cd\u8d44\u6e90\u8303\u7574\uff0c\u4ee3\u8868\u4e00\u79cd\u8d44\u6e90\u5c42\u7ea7\u5173\u7cfb\u3002 \u5de5\u4f5c\u7a7a\u95f4\u53ef\u4ee5\u5305\u542b\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u6ce8\u518c\u4e2d\u5fc3\u7b49\u8d44\u6e90\u3002 \u901a\u5e38\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u5bf9\u5e94\u4e00\u4e2a\u9879\u76ee\uff0c\u53ef\u4ee5\u4e3a\u6bcf\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u4e0d\u540c\u7684\u8d44\u6e90\uff0c\u6307\u6d3e\u4e0d\u540c\u7684\u7528\u6237\u548c\u7528\u6237\u7ec4\u3002

                      \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u521b\u5efa\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u3002

                      1. \u4f7f\u7528 admin/folder admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u3002

                      2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4 \u6309\u94ae\u3002

                      3. \u586b\u5199\u5de5\u4f5c\u7a7a\u95f4\u540d\u79f0\u3001\u6240\u5c5e\u6587\u4ef6\u5939\u7b49\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4\u3002

                      Tip

                      \u521b\u5efa\u6210\u529f\u540e\u5de5\u4f5c\u7a7a\u95f4\u540d\u79f0\u5c06\u663e\u793a\u5728\u5de6\u4fa7\u7684\u6811\u72b6\u7ed3\u6784\u4e2d\uff0c\u4ee5\u4e0d\u540c\u7684\u56fe\u6807\u8868\u793a\u6587\u4ef6\u5939\u548c\u5de5\u4f5c\u7a7a\u95f4\u3002

                      Note

                      \u9009\u4e2d\u67d0\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u6216\u6587\u4ef6\u5939\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 ... \u53ef\u4ee5\u8fdb\u884c\u7f16\u8f91\u6216\u5220\u9664\u3002

                      • \u5f53\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u8d44\u6e90\u7ec4\u3001\u5171\u4eab\u8d44\u6e90\u4e2d\u5b58\u5728\u8d44\u6e90\u65f6\uff0c\u8be5\u5de5\u4f5c\u7a7a\u95f4\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u6240\u6709\u8d44\u6e90\u89e3\u7ed1\u540e\u518d\u5220\u9664\u3002
                      • \u5f53\u5fae\u670d\u52a1\u5f15\u64ce\u6a21\u5757\u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u5b58\u5728\u63a5\u5165\u6ce8\u518c\u4e2d\u5fc3\u8d44\u6e90\u65f6\uff0c\u8be5\u5de5\u4f5c\u7a7a\u95f4\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u6240\u6709\u63a5\u5165\u6ce8\u518c\u4e2d\u5fc3\u79fb\u9664\u540e\u518d\u5220\u9664\u5de5\u4f5c\u7a7a\u95f4\u3002
                      • \u5f53\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u5b58\u5728\u955c\u50cf\u7a7a\u95f4\u6216\u96c6\u6210\u4ed3\u5e93\u65f6\uff0c\u8be5\u5de5\u4f5c\u7a7a\u95f4\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u955c\u50cf\u7a7a\u95f4\u89e3\u7ed1\uff0c\u5c06\u4ed3\u5e93\u96c6\u6210\u5220\u9664\u540e\u518d\u5220\u9664\u5de5\u4f5c\u7a7a\u95f4\u3002
                      "},{"location":"end-user/ghippo/workspace/ws-folder.html","title":"\u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7","text":"

                      \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u662f\u4e00\u4e2a\u5177\u6709\u5c42\u7ea7\u7684\u8d44\u6e90\u9694\u79bb\u548c\u8d44\u6e90\u5206\u7ec4\u7279\u6027\uff0c\u4e3b\u8981\u89e3\u51b3\u8d44\u6e90\u7edf\u4e00\u6388\u6743\u3001\u8d44\u6e90\u5206\u7ec4\u4ee5\u53ca\u8d44\u6e90\u9650\u989d\u95ee\u9898\u3002

                      \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u6709\u4e24\u4e2a\u6982\u5ff5\uff1a\u5de5\u4f5c\u7a7a\u95f4\u548c\u6587\u4ef6\u5939\u3002

                      "},{"location":"end-user/ghippo/workspace/ws-folder.html#_2","title":"\u5de5\u4f5c\u7a7a\u95f4","text":"

                      \u5de5\u4f5c\u7a7a\u95f4\u53ef\u901a\u8fc7 \u6388\u6743 \u3001 \u8d44\u6e90\u7ec4 \u548c \u5171\u4eab\u8d44\u6e90 \u6765\u7ba1\u7406\u8d44\u6e90\uff0c\u4f7f\u7528\u6237\uff08\u7528\u6237\u7ec4\uff09\u4e4b\u95f4\u80fd\u591f\u5171\u4eab\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u8d44\u6e90\u3002

                      • \u8d44\u6e90

                        \u8d44\u6e90\u5904\u4e8e\u8d44\u6e90\u7ba1\u7406\u6a21\u5757\u5c42\u7ea7\u7ed3\u6784\u7684\u6700\u4f4e\u5c42\u7ea7\uff0c\u8d44\u6e90\u5305\u62ec Cluster\u3001Namespace\u3001Pipeline\u3001\u7f51\u5173\u7b49\u3002 \u6240\u6709\u8fd9\u4e9b\u8d44\u6e90\u7684\u7236\u7ea7\u53ea\u80fd\u662f\u5de5\u4f5c\u7a7a\u95f4\uff0c\u800c\u5de5\u4f5c\u7a7a\u95f4\u4f5c\u4e3a\u8d44\u6e90\u5bb9\u5668\u662f\u4e00\u79cd\u8d44\u6e90\u5206\u7ec4\u5355\u4f4d\u3002

                      • \u5de5\u4f5c\u7a7a\u95f4

                        \u5de5\u4f5c\u7a7a\u95f4\u901a\u5e38\u4ee3\u6307\u4e00\u4e2a\u9879\u76ee\u6216\u73af\u5883\uff0c\u6bcf\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u7684\u8d44\u6e90\u76f8\u5bf9\u4e8e\u5176\u4ed6\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u8d44\u6e90\u65f6\u903b\u8f91\u9694\u79bb\u7684\u3002 \u60a8\u53ef\u4ee5\u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u6388\u6743\uff0c\u6388\u4e88\u7528\u6237\uff08\u7528\u6237\u7ec4\uff09\u540c\u4e00\u7ec4\u8d44\u6e90\u7684\u4e0d\u540c\u8bbf\u95ee\u6743\u9650\u3002

                        \u4ece\u5c42\u6b21\u7ed3\u6784\u7684\u5e95\u5c42\u7b97\u8d77\uff0c\u5de5\u4f5c\u7a7a\u95f4\u4f4d\u4e8e\u7b2c\u4e00\u5c42\uff0c\u4e14\u5305\u542b\u8d44\u6e90\u3002 \u9664\u5171\u4eab\u8d44\u6e90\u5916\uff0c\u6240\u6709\u8d44\u6e90\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u7236\u9879\u3002\u6240\u6709\u5de5\u4f5c\u7a7a\u95f4\u4e5f\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u7236\u7ea7\u6587\u4ef6\u5939\u3002

                        \u8d44\u6e90\u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u8fdb\u884c\u5206\u7ec4\uff0c\u800c\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u5b58\u5728\u4e24\u79cd\u5206\u7ec4\u6a21\u5f0f\uff0c\u5206\u522b\u662f \u8d44\u6e90\u7ec4 \u548c \u5171\u4eab\u8d44\u6e90 \u3002

                      • \u8d44\u6e90\u7ec4

                        \u4e00\u4e2a\u8d44\u6e90\u53ea\u80fd\u52a0\u5165\u4e00\u4e2a\u8d44\u6e90\u7ec4\uff0c\u8d44\u6e90\u7ec4\u4e0e\u5de5\u4f5c\u7a7a\u95f4\u4e00\u4e00\u5bf9\u5e94\u3002 \u8d44\u6e90\u88ab\u52a0\u5165\u5230\u8d44\u6e90\u7ec4\u540e\uff0cWorkspace Admin \u5c06\u83b7\u5f97\u8d44\u6e90\u7684\u7ba1\u7406\u6743\u9650\uff0c\u76f8\u5f53\u4e8e\u8be5\u8d44\u6e90\u7684\u6240\u6709\u8005\u3002

                      • \u5171\u4eab\u8d44\u6e90

                        \u800c\u5bf9\u4e8e\u5171\u4eab\u8d44\u6e90\u6765\u8bf4\uff0c\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u53ef\u4ee5\u5171\u4eab\u540c\u4e00\u4e2a\u6216\u8005\u591a\u4e2a\u8d44\u6e90\u3002 \u8d44\u6e90\u7684\u6240\u6709\u8005\uff0c\u53ef\u4ee5\u9009\u62e9\u5c06\u81ea\u5df1\u62e5\u6709\u7684\u8d44\u6e90\u5171\u4eab\u7ed9\u5de5\u4f5c\u7a7a\u95f4\u4f7f\u7528\uff0c\u4e00\u822c\u5171\u4eab\u65f6\u8d44\u6e90\u6240\u6709\u8005\u4f1a\u9650\u5236\u88ab\u5171\u4eab\u5de5\u4f5c\u7a7a\u95f4\u80fd\u591f\u4f7f\u7528\u7684\u8d44\u6e90\u989d\u5ea6\u3002 \u8d44\u6e90\u88ab\u5171\u4eab\u540e\uff0cWorkspace Admin \u4ec5\u5177\u6709\u8d44\u6e90\u9650\u989d\u4e0b\u7684\u8d44\u6e90\u4f7f\u7528\u6743\u9650\uff0c\u65e0\u6cd5\u7ba1\u7406\u8d44\u6e90\u6216\u8005\u8c03\u6574\u5de5\u4f5c\u7a7a\u95f4\u80fd\u591f\u4f7f\u7528\u7684\u8d44\u6e90\u91cf\u3002

                        \u540c\u65f6\u5171\u4eab\u8d44\u6e90\u5bf9\u4e8e\u8d44\u6e90\u672c\u8eab\u4e5f\u5177\u6709\u4e00\u5b9a\u7684\u8981\u6c42\uff0c\u53ea\u6709 Cluster\uff08\u96c6\u7fa4\uff09\u8d44\u6e90\u53ef\u4ee5\u88ab\u5171\u4eab\u3002 Cluster Admin \u80fd\u591f\u5c06 Cluster \u8d44\u6e90\u5206\u4eab\u7ed9\u4e0d\u540c\u7684\u5de5\u4f5c\u7a7a\u95f4\u4f7f\u7528\uff0c\u5e76\u4e14\u9650\u5236\u5de5\u4f5c\u7a7a\u95f4\u5728\u6b64 Cluster \u4e0a\u7684\u4f7f\u7528\u989d\u5ea6\u3002

                        Workspace Admin \u5728\u8d44\u6e90\u9650\u989d\u5185\u80fd\u591f\u521b\u5efa\u591a\u4e2a Namespace\uff0c\u4f46\u662f Namespace \u7684\u8d44\u6e90\u989d\u5ea6\u603b\u548c\u4e0d\u80fd\u8d85\u8fc7 Cluster \u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u7684\u8d44\u6e90\u9650\u989d\u3002 \u5bf9\u4e8e Kubernetes \u8d44\u6e90\uff0c\u5f53\u524d\u80fd\u591f\u5206\u4eab\u7684\u8d44\u6e90\u7c7b\u578b\u4ec5\u6709 Cluster\u3002

                      "},{"location":"end-user/ghippo/workspace/ws-folder.html#_3","title":"\u6587\u4ef6\u5939","text":"

                      \u6587\u4ef6\u5939\u53ef\u7528\u4e8e\u6784\u5efa\u4f01\u4e1a\u4e1a\u52a1\u5c42\u7ea7\u5173\u7cfb\u3002

                      • \u6587\u4ef6\u5939\u662f\u5728\u5de5\u4f5c\u7a7a\u95f4\u57fa\u7840\u4e4b\u4e0a\u7684\u8fdb\u4e00\u6b65\u5206\u7ec4\u673a\u5236\uff0c\u5177\u6709\u5c42\u7ea7\u7ed3\u6784\u3002 \u4e00\u4e2a\u6587\u4ef6\u5939\u53ef\u4ee5\u5305\u542b\u5de5\u4f5c\u7a7a\u95f4\u3001\u5176\u4ed6\u6587\u4ef6\u5939\u6216\u4e24\u8005\u7684\u7ec4\u5408\uff0c\u80fd\u591f\u5f62\u6210\u6811\u72b6\u7684\u7ec4\u7ec7\u5173\u7cfb\u3002

                      • \u501f\u52a9\u6587\u4ef6\u5939\u60a8\u53ef\u4ee5\u6620\u5c04\u4f01\u4e1a\u4e1a\u52a1\u5c42\u7ea7\u5173\u7cfb\uff0c\u6309\u7167\u90e8\u95e8\u5bf9\u5de5\u4f5c\u7a7a\u95f4\u8fdb\u884c\u5206\u7ec4\u3002 \u6587\u4ef6\u5939\u4e0d\u76f4\u63a5\u4e0e\u8d44\u6e90\u6302\u94a9\uff0c\u800c\u662f\u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u95f4\u63a5\u5b9e\u73b0\u8d44\u6e90\u5206\u7ec4\u3002

                      • \u6587\u4ef6\u5939\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u7236\u7ea7\u6587\u4ef6\u5939\uff0c\u800c\u6839\u6587\u4ef6\u5939\u662f\u5c42\u6b21\u7ed3\u6784\u7684\u6700\u9ad8\u5c42\u7ea7\u3002 \u6839\u6587\u4ef6\u5939\u6ca1\u6709\u7236\u7ea7\uff0c\u6587\u4ef6\u5939\u548c\u5de5\u4f5c\u7a7a\u95f4\u5747\u6302\u9760\u5230\u6839\u6587\u4ef6\u5939\u4e0b\u3002

                      \u53e6\u5916\uff0c\u7528\u6237\uff08\u7528\u6237\u7ec4\uff09\u5728\u6587\u4ef6\u5939\u4e2d\u80fd\u591f\u901a\u8fc7\u5c42\u7ea7\u7ed3\u6784\u7ee7\u627f\u6765\u81ea\u7236\u9879\u7684\u6743\u9650\u3002 \u7528\u6237\u5728\u5c42\u6b21\u7ed3\u6784\u4e2d\u7684\u6743\u9650\u6765\u81ea\u5f53\u524d\u5c42\u7ea7\u7684\u6743\u9650\u4ee5\u53ca\u7ee7\u627f\u5176\u7236\u9879\u6743\u9650\u7684\u7ec4\u5408\u7ed3\u679c\uff0c\u6743\u9650\u4e4b\u95f4\u662f\u52a0\u5408\u5173\u7cfb\u4e0d\u5b58\u5728\u4e92\u65a5\u3002

                      "},{"location":"end-user/ghippo/workspace/ws-permission.html","title":"\u5de5\u4f5c\u7a7a\u95f4\u6743\u9650\u8bf4\u660e","text":"

                      \u5de5\u4f5c\u7a7a\u95f4\u5177\u6709\u6743\u9650\u6620\u5c04\u548c\u8d44\u6e90\u9694\u79bb\u80fd\u529b\uff0c\u80fd\u591f\u5c06\u7528\u6237/\u7528\u6237\u7ec4\u5728\u5de5\u4f5c\u7a7a\u95f4\u7684\u6743\u9650\u6620\u5c04\u5230\u5176\u4e0b\u7684\u8d44\u6e90\u4e0a\u3002 \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u5de5\u4f5c\u7a7a\u95f4\u662f Workspace Admin \u89d2\u8272\uff0c\u540c\u65f6\u5de5\u4f5c\u7a7a\u95f4-\u8d44\u6e90\u7ec4\u4e2d\u7ed1\u5b9a\u4e86\u8d44\u6e90 Namespace\uff0c\u5219\u6620\u5c04\u540e\u8be5\u7528\u6237/\u7528\u6237\u7ec4\u5c06\u6210\u4e3a Namespace Admin\u3002

                      Note

                      \u5de5\u4f5c\u7a7a\u95f4\u7684\u6743\u9650\u6620\u5c04\u80fd\u529b\u4e0d\u4f1a\u4f5c\u7528\u5230\u5171\u4eab\u8d44\u6e90\u4e0a\uff0c\u56e0\u4e3a\u5171\u4eab\u662f\u5c06\u96c6\u7fa4\u7684\u4f7f\u7528\u6743\u9650\u5171\u4eab\u7ed9\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff0c\u800c\u4e0d\u662f\u5c06\u7ba1\u7406\u6743\u9650\u53d7\u8ba9\u7ed9\u5de5\u4f5c\u7a7a\u95f4\uff0c\u56e0\u6b64\u4e0d\u4f1a\u5b9e\u73b0\u6743\u9650\u7ee7\u627f\u548c\u89d2\u8272\u6620\u5c04\u3002

                      "},{"location":"end-user/ghippo/workspace/ws-permission.html#_2","title":"\u5e94\u7528\u573a\u666f","text":"

                      \u901a\u8fc7\u5c06\u8d44\u6e90\u7ed1\u5b9a\u5230\u4e0d\u540c\u7684\u5de5\u4f5c\u7a7a\u95f4\u80fd\u591f\u5b9e\u73b0\u8d44\u6e90\u9694\u79bb\u3002 \u56e0\u6b64\u501f\u52a9\u6743\u9650\u6620\u5c04\u3001\u8d44\u6e90\u9694\u79bb\u548c\u5171\u4eab\u8d44\u6e90\u80fd\u529b\u80fd\u591f\u5c06\u8d44\u6e90\u7075\u6d3b\u5206\u914d\u7ed9\u5404\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09\u3002

                      \u901a\u5e38\u9002\u7528\u4e8e\u4ee5\u4e0b\u4e24\u4e2a\u573a\u666f\uff1a

                      • \u96c6\u7fa4\u4e00\u5bf9\u4e00

                        \u666e\u901a\u96c6\u7fa4 \u90e8\u95e8/\u79df\u6237\uff08\u5de5\u4f5c\u7a7a\u95f4\uff09 \u7528\u9014 \u96c6\u7fa4 01 A \u7ba1\u7406\u548c\u4f7f\u7528 \u96c6\u7fa4 02 B \u7ba1\u7406\u548c\u4f7f\u7528
                      • \u96c6\u7fa4\u4e00\u5bf9\u591a

                        \u96c6\u7fa4 \u90e8\u95e8/\u79df\u6237\uff08\u5de5\u4f5c\u7a7a\u95f4\uff09 \u8d44\u6e90\u9650\u989d \u96c6\u7fa4 01 A 100 \u6838 CPU B 50 \u6838 CPU
                      "},{"location":"end-user/ghippo/workspace/ws-permission.html#_3","title":"\u6743\u9650\u8bf4\u660e","text":"\u64cd\u4f5c\u5bf9\u8c61 \u64cd\u4f5c Workspace Admin Workspace Editor Workspace Viewer \u672c\u8eab \u67e5\u770b \u2713 \u2713 \u2713 - \u6388\u6743 \u2713 \u2717 \u2717 - \u4fee\u6539\u522b\u540d \u2713 \u2713 \u2717 \u8d44\u6e90\u7ec4 \u67e5\u770b \u2713 \u2713 \u2713 - \u8d44\u6e90\u7ed1\u5b9a \u2713 \u2717 \u2717 - \u89e3\u9664\u7ed1\u5b9a \u2713 \u2717 \u2717 \u5171\u4eab\u8d44\u6e90 \u67e5\u770b \u2713 \u2713 \u2713 - \u65b0\u589e\u5171\u4eab \u2713 \u2717 \u2717 - \u89e3\u9664\u5171\u4eab \u2713 \u2717 \u2717 - \u8d44\u6e90\u9650\u989d \u2713 \u2717 \u2717 - \u4f7f\u7528\u5171\u4eab\u8d44\u6e90 1 \u2713 \u2717 \u2717
                      1. \u6388\u6743\u7528\u6237\u53ef\u524d\u5f80\u5e94\u7528\u5de5\u4f5c\u53f0\u3001\u5fae\u670d\u52a1\u5f15\u64ce\u3001\u4e2d\u95f4\u4ef6\u3001\u591a\u4e91\u7f16\u6392\u3001\u670d\u52a1\u7f51\u683c\u7b49\u6a21\u5757\u4f7f\u7528\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u8d44\u6e90\u3002 \u6709\u5173 Workspace Admin\u3001Workspace Editor\u3001Workspace Viewer \u89d2\u8272\u5728\u5404\u4ea7\u54c1\u6a21\u5757\u7684\u64cd\u4f5c\u8303\u56f4\uff0c\u8bf7\u67e5\u9605\u5404\u6a21\u5757\u7684\u6743\u9650\u8bf4\u660e\uff1a

                        • \u5e94\u7528\u5de5\u4f5c\u53f0\u6743\u9650\u8bf4\u660e
                        • \u670d\u52a1\u7f51\u683c\u6743\u9650\u8bf4\u660e
                        • \u4e2d\u95f4\u4ef6\u6743\u9650\u8bf4\u660e
                        • \u5fae\u670d\u52a1\u5f15\u64ce\u6743\u9650\u8bf4\u660e
                        • \u5bb9\u5668\u7ba1\u7406\u6743\u9650\u8bf4\u660e

                        \u21a9

                      "},{"location":"end-user/ghippo/workspace/wsbind-permission.html","title":"\u8d44\u6e90\u7ed1\u5b9a\u6743\u9650\u8bf4\u660e","text":"

                      \u5047\u5982\u7528\u6237\u5c0f\u660e\uff08\u201c\u5c0f\u660e\u201d\u4ee3\u8868\u4efb\u4f55\u6709\u8d44\u6e90\u7ed1\u5b9a\u9700\u6c42\u7684\u7528\u6237\uff09\u5df2\u7ecf\u5177\u5907\u4e86 Workspace Admin \u89d2\u8272\u6216\u5df2\u901a\u8fc7\u81ea\u5b9a\u4e49\u89d2\u8272\u6388\u6743\uff0c \u540c\u65f6\u81ea\u5b9a\u4e49\u89d2\u8272\u4e2d\u5305\u542b\u5de5\u4f5c\u7a7a\u95f4\u7684\u201c\u8d44\u6e90\u7ed1\u5b9a\u201d\u6743\u9650\uff0c\u5e0c\u671b\u5c06\u67d0\u4e2a\u96c6\u7fa4\u6216\u8005\u67d0\u4e2a\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u5176\u6240\u5728\u7684\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u3002

                      \u8981\u5c06\u96c6\u7fa4/\u547d\u540d\u7a7a\u95f4\u8d44\u6e90\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4\uff0c\u4e0d\u4ec5\u9700\u8981\u8be5\u5de5\u4f5c\u7a7a\u95f4\u7684\u201c\u8d44\u6e90\u7ed1\u5b9a\u201d\u6743\u9650\uff0c\u8fd8\u9700\u8981 Cluster Admin \u7684\u8d44\u6e90\u6743\u9650\u3002

                      "},{"location":"end-user/ghippo/workspace/wsbind-permission.html#_2","title":"\u7ed9\u5c0f\u660e\u6388\u6743","text":"
                      1. \u4f7f\u7528\u5e73\u53f0 Admin \u89d2\u8272\uff0c \u5728 \u5de5\u4f5c\u7a7a\u95f4 -> \u6388\u6743 \u9875\u9762\u7ed9\u5c0f\u660e\u6388\u4e88 Workspace Admin \u89d2\u8272\u3002

                      2. \u7136\u540e\u5728 \u5bb9\u5668\u7ba1\u7406 -> \u6743\u9650\u7ba1\u7406 \u9875\u9762\uff0c\u901a\u8fc7 \u6dfb\u52a0\u6388\u6743 \u5c06\u5c0f\u660e\u6388\u6743\u4e3a Cluster Admin\u3002

                      "},{"location":"end-user/ghippo/workspace/wsbind-permission.html#_3","title":"\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4","text":"

                      \u4f7f\u7528\u5c0f\u660e\u7684\u8d26\u53f7\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5728 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \u9875\u9762\uff0c\u901a\u8fc7 \u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4 \u529f\u80fd\uff0c \u5c0f\u660e\u53ef\u4ee5\u5c06\u6307\u5b9a\u96c6\u7fa4\u7ed1\u5b9a\u5230\u81ea\u5df1\u7684\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u3002

                      Note

                      \u5c0f\u660e\u80fd\u4e14\u53ea\u80fd\u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5c06\u96c6\u7fa4\u6216\u8005\u8be5\u96c6\u7fa4\u4e0b\u7684\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff0c\u65e0\u6cd5\u5728\u5168\u5c40\u7ba1\u7406\u6a21\u5757\u5b8c\u6210\u6b64\u64cd\u4f5c\u3002

                      \u7ed1\u5b9a\u547d\u540d\u7a7a\u95f4\u5230\u5de5\u4f5c\u7a7a\u95f4\u4e5f\u81f3\u5c11\u9700\u8981 Workspace Admin + Cluster Admin \u6743\u9650\u3002

                      "},{"location":"end-user/host/createhost.html","title":"\u521b\u5efa\u548c\u542f\u52a8\u4e91\u4e3b\u673a","text":"

                      \u7528\u6237\u5b8c\u6210\u6ce8\u518c\uff0c\u4e3a\u5176\u5206\u914d\u4e86\u5de5\u4f5c\u7a7a\u95f4\u3001\u547d\u540d\u7a7a\u95f4\u548c\u8d44\u6e90\u540e\uff0c\u5373\u53ef\u4ee5\u521b\u5efa\u5e76\u542f\u52a8\u4e91\u4e3b\u673a\u3002

                      "},{"location":"end-user/host/createhost.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                      • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                      • \u7528\u6237\u5df2\u6210\u529f\u6ce8\u518c
                      • \u7ba1\u7406\u5458\u4e3a\u7528\u6237\u7ed1\u5b9a\u4e86\u5de5\u4f5c\u7a7a\u95f4
                      • \u7ba1\u7406\u5458\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u4e86\u8d44\u6e90
                      "},{"location":"end-user/host/createhost.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u4ece\u5bfc\u822a\u680f\u8fdb\u5165 \u4e91\u4e3b\u673a
                      2. \u70b9\u51fb \u521b\u5efa\u4e91\u4e3b\u673a -> \u901a\u8fc7\u6a21\u677f\u521b\u5efa

                      3. \u5b9a\u4e49\u7684\u4e91\u4e3b\u673a\u5404\u9879\u914d\u7f6e\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65

                        \u57fa\u672c\u914d\u7f6e\u6a21\u677f\u914d\u7f6e\u5b58\u50a8\u4e0e\u7f51\u7edc

                      4. \u914d\u7f6e root \u5bc6\u7801\u6216 ssh \u5bc6\u94a5\u540e\u70b9\u51fb \u786e\u5b9a

                      5. \u8fd4\u56de\u4e3b\u673a\u5217\u8868\uff0c\u7b49\u5f85\u72b6\u6001\u53d8\u4e3a \u8fd0\u884c\u4e2d \u4e4b\u540e\uff0c\u53ef\u4ee5\u901a\u8fc7\u53f3\u4fa7\u7684 \u2507 \u542f\u52a8\u4e3b\u673a\u3002

                      \u4e0b\u4e00\u6b65\uff1a\u4f7f\u7528\u4e91\u4e3b\u673a

                      "},{"location":"end-user/host/usehost.html","title":"\u4f7f\u7528\u4e91\u4e3b\u673a","text":"

                      \u521b\u5efa\u5e76\u542f\u52a8\u4e91\u4e3b\u673a\u4e4b\u540e\uff0c\u7528\u6237\u5c31\u53ef\u4ee5\u5f00\u59cb\u4f7f\u7528\u4e91\u4e3b\u673a\u3002

                      "},{"location":"end-user/host/usehost.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                      • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                      • \u7528\u6237\u5df2\u521b\u5efa\u5e76\u542f\u52a8\u4e91\u4e3b\u673a
                      "},{"location":"end-user/host/usehost.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u4ee5\u7ba1\u7406\u5458\u8eab\u4efd\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                      2. \u5bfc\u822a\u5230 \u5bb9\u5668\u7ba1\u7406 -> \u5bb9\u5668\u7f51\u7edc -> \u670d\u52a1 \uff0c\u70b9\u51fb\u670d\u52a1\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u670d\u52a1\u8be6\u60c5\u9875\uff0c\u5728\u53f3\u4e0a\u89d2\u70b9\u51fb \u66f4\u65b0

                      3. \u66f4\u6539\u7aef\u53e3\u8303\u56f4\u4e3a 30900-30999\uff0c\u4f46\u4e0d\u80fd\u51b2\u7a81\u3002

                      4. \u4ee5\u7ec8\u7aef\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5bfc\u822a\u5230\u5bf9\u5e94\u7684\u670d\u52a1\uff0c\u67e5\u770b\u8bbf\u95ee\u7aef\u53e3\u3002

                      5. \u5728\u5916\u7f51\u4f7f\u7528 SSH \u5ba2\u6237\u7aef\u767b\u5f55\u4e91\u4e3b\u673a

                      6. \u81f3\u6b64\uff0c\u4f60\u53ef\u4ee5\u5728\u4e91\u4e3b\u673a\u4e0a\u6267\u884c\u5404\u9879\u64cd\u4f5c\u3002

                      \u4e0b\u4e00\u6b65\uff1a\u4f7f\u7528 Notebook

                      "},{"location":"end-user/insight/alert-center/index.html","title":"\u544a\u8b66\u4e2d\u5fc3","text":"

                      \u544a\u8b66\u4e2d\u5fc3\u662f AI \u7b97\u529b\u5e73\u53f0 \u63d0\u4f9b\u7684\u4e00\u4e2a\u91cd\u8981\u529f\u80fd\uff0c\u5b83\u8ba9\u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u754c\u9762\u65b9\u4fbf\u5730\u6309\u7167\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u67e5\u770b\u6240\u6709\u6d3b\u52a8\u548c\u5386\u53f2\u544a\u8b66\uff0c \u5e76\u6839\u636e\u544a\u8b66\u7ea7\u522b\uff08\u7d27\u6025\u3001\u8b66\u544a\u3001\u63d0\u793a\uff09\u6765\u641c\u7d22\u544a\u8b66\u3002

                      \u6240\u6709\u544a\u8b66\u90fd\u662f\u57fa\u4e8e\u9884\u8bbe\u7684\u544a\u8b66\u89c4\u5219\u8bbe\u5b9a\u7684\u9608\u503c\u6761\u4ef6\u89e6\u53d1\u7684\u3002\u5728 AI \u7b97\u529b\u5e73\u53f0\u4e2d\uff0c\u5185\u7f6e\u4e86\u4e00\u4e9b\u5168\u5c40\u544a\u8b66\u7b56\u7565\uff0c\u540c\u65f6\u60a8\u4e5f\u53ef\u4ee5\u968f\u65f6\u521b\u5efa\u3001\u5220\u9664\u544a\u8b66\u7b56\u7565\uff0c\u5bf9\u4ee5\u4e0b\u6307\u6807\u8fdb\u884c\u8bbe\u7f6e\uff1a

                      • CPU \u4f7f\u7528\u91cf
                      • \u5185\u5b58\u4f7f\u7528\u91cf
                      • \u78c1\u76d8\u4f7f\u7528\u91cf
                      • \u78c1\u76d8\u6bcf\u79d2\u8bfb\u6b21\u6570
                      • \u78c1\u76d8\u6bcf\u79d2\u5199\u6b21\u6570
                      • \u96c6\u7fa4\u78c1\u76d8\u8bfb\u53d6\u541e\u5410\u91cf
                      • \u96c6\u7fa4\u78c1\u76d8\u5199\u5165\u541e\u5410\u91cf
                      • \u7f51\u7edc\u53d1\u9001\u901f\u7387
                      • \u7f51\u7edc\u63a5\u6536\u901f\u7387

                      \u8fd8\u53ef\u4ee5\u4e3a\u544a\u8b66\u89c4\u5219\u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002\u544a\u8b66\u89c4\u5219\u5206\u4e3a\u6d3b\u8dc3\u548c\u8fc7\u671f\u89c4\u5219\uff0c\u652f\u6301\u542f\u7528/\u7981\u7528\u67d0\u4e9b\u89c4\u5219\u6765\u5b9e\u73b0\u544a\u8b66\u9759\u9ed8\u3002

                      \u5f53\u8fbe\u5230\u9608\u503c\u6761\u4ef6\u540e\uff0c\u53ef\u4ee5\u914d\u7f6e\u544a\u8b66\u901a\u77e5\u65b9\u5f0f\uff0c\u5305\u62ec\u90ae\u4ef6\u3001\u9489\u9489\u3001\u4f01\u4e1a\u5fae\u4fe1\u3001Webhook \u548c\u77ed\u4fe1\u901a\u77e5\u3002 \u6240\u6709\u901a\u77e5\u7684\u6d88\u606f\u6a21\u677f\u90fd\u53ef\u4ee5\u81ea\u5b9a\u4e49\uff0c\u540c\u65f6\u8fd8\u652f\u6301\u6309\u8bbe\u5b9a\u7684\u95f4\u9694\u65f6\u95f4\u53d1\u9001\u901a\u77e5\u3002

                      \u6b64\u5916\uff0c\u544a\u8b66\u4e2d\u5fc3\u8fd8\u652f\u6301\u901a\u8fc7\u963f\u91cc\u4e91\u3001\u817e\u8baf\u4e91\u7b49\u63d0\u4f9b\u7684\u77ed\u4fe1\u670d\u52a1\u5c06\u544a\u8b66\u6d88\u606f\u53d1\u9001\u7ed9\u6307\u5b9a\u7528\u6237\uff0c\u5b9e\u73b0\u591a\u79cd\u65b9\u5f0f\u7684\u544a\u8b66\u901a\u77e5\u3002

                      AI \u7b97\u529b\u5e73\u53f0 \u544a\u8b66\u4e2d\u5fc3\u662f\u4e00\u4e2a\u529f\u80fd\u5f3a\u5927\u7684\u544a\u8b66\u7ba1\u7406\u5e73\u53f0\uff0c\u53ef\u5e2e\u52a9\u7528\u6237\u53ca\u65f6\u53d1\u73b0\u548c\u89e3\u51b3\u96c6\u7fa4\u4e2d\u51fa\u73b0\u7684\u95ee\u9898\uff0c \u63d0\u9ad8\u4e1a\u52a1\u7a33\u5b9a\u6027\u548c\u53ef\u7528\u6027\uff0c\u4fbf\u4e8e\u96c6\u7fa4\u5de1\u68c0\u548c\u6545\u969c\u6392\u67e5\u3002

                      "},{"location":"end-user/insight/alert-center/alert-policy.html","title":"\u544a\u8b66\u7b56\u7565","text":"

                      \u544a\u8b66\u7b56\u7565\u662f\u5728\u53ef\u89c2\u6d4b\u6027\u7cfb\u7edf\u4e2d\u5b9a\u4e49\u7684\u4e00\u7ec4\u89c4\u5219\u548c\u6761\u4ef6\uff0c\u7528\u4e8e\u68c0\u6d4b\u548c\u89e6\u53d1\u8b66\u62a5\uff0c\u4ee5\u4fbf\u5728\u7cfb\u7edf\u51fa\u73b0\u5f02\u5e38\u6216\u8fbe\u5230\u9884\u5b9a\u7684\u9608\u503c\u65f6\u53ca\u65f6\u901a\u77e5\u76f8\u5173\u4eba\u5458\u6216\u7cfb\u7edf\u3002

                      \u6bcf\u6761\u544a\u8b66\u7b56\u7565\u662f\u4e00\u7ec4\u544a\u8b66\u89c4\u5219\u7684\u96c6\u5408\uff0c\u652f\u6301\u5bf9\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5de5\u4f5c\u8d1f\u8f7d\u7b49\u8d44\u6e90\u3001\u65e5\u5fd7\u3001\u4e8b\u4ef6\u8bbe\u7f6e\u544a\u8b66\u89c4\u5219\u3002\u5f53\u544a\u8b66\u5bf9\u8c61\u8fbe\u5230\u7b56\u7565\u4e0b\u4efb\u4e00\u89c4\u5219\u8bbe\u5b9a\u7684\u9608\u503c\uff0c\u5219\u4f1a\u81ea\u52a8\u89e6\u53d1\u544a\u8b66\u5e76\u53d1\u9001\u901a\u77e5\u3002

                      "},{"location":"end-user/insight/alert-center/alert-policy.html#_2","title":"\u67e5\u770b\u544a\u8b66\u7b56\u7565","text":"
                      1. \u70b9\u51fb\u4e00\u7ea7\u5bfc\u822a\u680f\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027\u3002
                      2. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u544a\u8b66\u4e2d\u5fc3 -> \u544a\u8b66\u7b56\u7565\u3002

                        • \u96c6\u7fa4\uff1a\u5355\u51fb\u96c6\u7fa4\u4e0b\u62c9\u6846\u53ef\u5207\u6362\u96c6\u7fa4\uff1b
                        • \u547d\u540d\u7a7a\u95f4\uff1a\u5355\u51fb\u547d\u540d\u7a7a\u95f4\u5207\u6362\u4e0b\u62c9\u6846\u3002

                      3. \u70b9\u51fb\u544a\u8b66\u7b56\u7565\u540d\u79f0\u53ef\u67e5\u770b\u7b56\u7565\u7684\u57fa\u672c\u4fe1\u606f\u3001\u89c4\u5219\u4ee5\u53ca\u901a\u77e5\u914d\u7f6e\u3002

                        1. \u5728\u89c4\u5219\u5217\u8868\u4e2d\u53ef\u67e5\u770b\u89c4\u5219\u7c7b\u578b\u3001\u89c4\u5219\u7684\u8868\u8fbe\u5f0f\u3001\u7ea7\u522b\u3001\u72b6\u6001\u7b49\u4fe1\u606f\u3002
                        2. \u8fdb\u5165\u7b56\u7565\u8be6\u60c5\uff0c\u53ef\u4ee5\u6dfb\u52a0\u3001\u7f16\u8f91\u3001\u5220\u9664\u5176\u4e0b\u7684\u544a\u8b66\u89c4\u5219\u3002

                      "},{"location":"end-user/insight/alert-center/alert-policy.html#_3","title":"\u521b\u5efa\u544a\u8b66\u7b56\u7565","text":"
                      1. \u586b\u5199\u57fa\u672c\u4fe1\u606f\uff0c\u9009\u62e9\u4e00\u4e2a\u6216\u591a\u4e2a\u96c6\u7fa4\u3001\u8282\u70b9\u6216\u5de5\u4f5c\u8d1f\u8f7d\u4e3a\u544a\u8b66\u5bf9\u8c61\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002

                        Note

                        • \u9009\u62e9\u5168\u90e8\u96c6\u7fa4\u3001\u8282\u70b9\u6216\u5de5\u4f5c\u8d1f\u8f7d\uff1a\u521b\u5efa\u7684\u544a\u8b66\u89c4\u5219\u5bf9\u6240\u6709\u5df2\u5b89\u88c5 insight-agent \u7684\u96c6\u7fa4\u751f\u6548\u3002
                        • \u9009\u62e9\u5355\u4e2a\u6216\u591a\u4e2a\u96c6\u7fa4\u96c6\u7fa4\u3001\u8282\u70b9\u6216\u5de5\u4f5c\u8d1f\u8f7d\uff1a\u521b\u5efa\u7684\u544a\u8b66\u89c4\u5219\u4ec5\u5bf9\u6240\u9009\u7684\u8d44\u6e90\u5bf9\u8c61\u751f\u6548\u3002
                        • \u540c\u65f6\uff0c\u7528\u6237\u53ea\u80fd\u5bf9\u5df2\u6743\u9650\u7684\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u544a\u8b66\u89c4\u5219\u3002
                      "},{"location":"end-user/insight/alert-center/alert-policy.html#_4","title":"\u624b\u52a8\u6dfb\u52a0\u89c4\u5219","text":"
                      1. \u5728\u521b\u5efa\u544a\u8b66\u7b56\u7565\u7684\u7b2c\u4e8c\u90e8\u4e2d\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4e0a\u89d2\u7684\u6dfb\u52a0\u89c4\u5219\u3002

                      2. \u5728\u5f39\u7a97\u4e2d\u521b\u5efa\u544a\u8b66\u89c4\u5219\uff0c\u586b\u5199\u5404\u9879\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a\u3002

                        • \u6a21\u677f\u89c4\u5219\uff1a\u9884\u5b9a\u4e49\u4e86\u57fa\u7840\u6307\u6807\uff0c\u53ef\u4ee5\u6309 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u3001\u7f51\u7edc\u8bbe\u5b9a\u8981\u76d1\u63a7\u7684\u6307\u6807\u3002
                        • PromQL \u89c4\u5219\uff1a\u8f93\u5165\u4e00\u4e2a PromQL \u8868\u8fbe\u5f0f\uff0c\u5177\u4f53\u8bf7\u67e5\u8be2 Prometheus \u8868\u8fbe\u5f0f\u3002
                        • \u6301\u7eed\u65f6\u957f\uff1a\u544a\u8b66\u88ab\u89e6\u53d1\u4e14\u6301\u7eed\u65f6\u95f4\u8fbe\u5230\u8be5\u8bbe\u5b9a\u503c\u540e\uff0c\u544a\u8b66\u7b56\u7565\u5c06\u53d8\u4e3a\u89e6\u53d1\u4e2d\u72b6\u6001\u3002
                        • \u544a\u8b66\u7ea7\u522b\uff1a\u5305\u542b\u7d27\u6025\u3001\u8b66\u544a\u3001\u4fe1\u606f\u4e09\u79cd\u7ea7\u522b\u3002
                        • \u9ad8\u7ea7\u8bbe\u7f6e\uff1a\u53ef\u4ee5\u81ea\u5b9a\u4e49\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                        Info

                        \u7cfb\u7edf\u5b9a\u4e49\u4e86\u5185\u7f6e\u6807\u7b7e\uff0c\u82e5\u81ea\u5b9a\u4e49\u6807\u7b7e\u4e0e\u5185\u7f6e\u6807\u7b7e\u7684\u952e\u503c\u76f8\u540c\uff0c\u5219\u81ea\u5b9a\u4e49\u6807\u7b7e\u4e0d\u751f\u6548\u3002 \u5185\u7f6e\u6807\u7b7e\u6709\uff1aseverity\u3001rule_id\uff0csource\u3001cluster_name\u3001group_id\u3001 target_type \u548c target\u3002

                      "},{"location":"end-user/insight/alert-center/alert-policy.html#_5","title":"\u521b\u5efa\u65e5\u5fd7\u89c4\u5219","text":"

                      \u5b8c\u6210\u57fa\u672c\u4fe1\u606f\u7684\u586b\u5199\u540e\uff0c\u70b9\u51fb \u6dfb\u52a0\u89c4\u5219\uff0c\u89c4\u5219\u7c7b\u578b\u9009\u62e9 \u65e5\u5fd7\u89c4\u5219\u3002

                      Note

                      \u4ec5\u5f53\u8d44\u6e90\u5bf9\u8c61\u9009\u62e9\u8282\u70b9\u6216\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u652f\u6301\u521b\u5efa\u65e5\u5fd7\u89c4\u5219\u3002

                      \u5b57\u6bb5\u8bf4\u660e\uff1a

                      • \u8fc7\u6ee4\u6761\u4ef6\uff1a\u67e5\u8be2\u65e5\u5fd7\u5185\u5bb9\u7684\u5b57\u6bb5\uff0c\u652f\u6301\u4e0e\u3001\u6216\u3001\u6b63\u5219\u5339\u914d\u3001\u6a21\u7cca\u5339\u914d\u56db\u79cd\u8fc7\u6ee4\u6761\u4ef6\u3002
                      • \u5224\u65ad\u6761\u4ef6\uff1a\u6839\u636e \u8fc7\u6ee4\u6761\u4ef6\uff0c\u8f93\u5165\u5173\u952e\u5b57\u6216\u5339\u914d\u6761\u4ef6\u3002
                      • \u65f6\u95f4\u8303\u56f4\uff1a\u65e5\u5fd7\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u3002
                      • \u9608\u503c\u6761\u4ef6\uff1a\u5728\u8f93\u5165\u6846\u4e2d\u8f93\u5165\u544a\u8b66\u9608\u503c\u3002\u5f53\u8fbe\u5230\u8bbe\u7f6e\u7684\u9608\u503c\u65f6\uff0c\u5219\u89e6\u53d1\u544a\u8b66\u3002\u652f\u6301\u7684\u6bd4\u8f83\u8fd0\u7b97\u7b26\u6709\uff1a >\u3001\u2265\u3001=\u3001\u2264\u3001<\u3002
                      • \u544a\u8b66\u7ea7\u522b\uff1a\u9009\u62e9\u544a\u8b66\u7ea7\u522b\uff0c\u7528\u4e8e\u8868\u793a\u544a\u8b66\u7684\u4e25\u91cd\u7a0b\u5ea6\u3002
                      "},{"location":"end-user/insight/alert-center/alert-policy.html#_6","title":"\u521b\u5efa\u4e8b\u4ef6\u89c4\u5219","text":"

                      \u5b8c\u6210\u57fa\u672c\u4fe1\u606f\u7684\u586b\u5199\u540e\uff0c\u70b9\u51fb \u6dfb\u52a0\u89c4\u5219\uff0c\u89c4\u5219\u7c7b\u578b\u9009\u62e9 \u4e8b\u4ef6\u89c4\u5219\u3002

                      Note

                      \u4ec5\u5f53\u8d44\u6e90\u5bf9\u8c61\u9009\u62e9\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u652f\u6301\u521b\u5efa\u4e8b\u4ef6\u89c4\u5219\u3002

                      \u5b57\u6bb5\u8bf4\u660e\uff1a

                      • \u4e8b\u4ef6\u89c4\u5219\uff1a\u4ec5\u652f\u6301\u8d44\u6e90\u5bf9\u8c61\u9009\u62e9\u5de5\u4f5c\u8d1f\u8f7d
                      • \u4e8b\u4ef6\u539f\u56e0\uff1a\u4e0d\u540c\u7684\u5de5\u4f5c\u8d1f\u8f7d\u7c7b\u578b\u7684\u4e8b\u4ef6\u539f\u56e0\u4e0d\u540c\uff0c\u4e8b\u4ef6\u539f\u56e0\u4e4b\u95f4\u662f\u201c\u548c\u201d\u7684\u5173\u7cfb\u3002
                      • \u65f6\u95f4\u8303\u56f4\uff1a\u68c0\u6d4b\u8be5\u65f6\u95f4\u8303\u56f4\u5185\u4ea7\u751f\u6570\u636e\uff0c\u82e5\u8fbe\u5230\u8bbe\u7f6e\u7684\u9608\u503c\u6761\u4ef6\uff0c\u5219\u89e6\u53d1\u544a\u8b66\u4e8b\u4ef6\u3002
                      • \u9608\u503c\u6761\u4ef6\uff1a\u5f53\u4ea7\u751f\u7684\u4e8b\u4ef6\u8fbe\u5230\u8bbe\u7f6e\u7684\u9608\u503c\u65f6\uff0c\u5219\u89e6\u53d1\u544a\u8b66\u4e8b\u4ef6\u3002
                      • \u8d8b\u52bf\u56fe\uff1a\u9ed8\u8ba4\u67e5\u8be2 10 \u5206\u949f\u5185\u7684\u4e8b\u4ef6\u53d8\u5316\u8d8b\u52bf\uff0c\u6bcf\u4e2a\u70b9\u7684\u6570\u503c\u7edf\u8ba1\u7684\u662f\u5f53\u524d\u65f6\u95f4\u70b9\u5230\u4e4b\u524d\u7684\u67d0\u6bb5\u65f6\u95f4\uff08\u65f6\u95f4\u8303\u56f4\uff09\u5185\u53d1\u751f\u7684\u603b\u6b21\u6570\u3002
                      "},{"location":"end-user/insight/alert-center/alert-policy.html#_7","title":"\u5bfc\u5165\u89c4\u5219\u6a21\u677f","text":"
                      1. \u53ef\u70b9\u51fb \u6a21\u677f\u5bfc\u5165\uff0c\u9009\u62e9\u5e73\u53f0\u7ba1\u7406\u5458\u5df2\u521b\u5efa\u597d\u7684\u544a\u8b66\u6a21\u677f\u6279\u91cf\u5bfc\u5165\u544a\u8b66\u89c4\u5219\u3002

                      2. \u70b9\u51fb \u4e0b\u4e00\u6b65 \u540e\u914d\u7f6e\u901a\u77e5\u3002

                      3. \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u8fd4\u56de\u544a\u8b66\u7b56\u7565\u5217\u8868\u3002

                      Tip

                      \u65b0\u5efa\u7684\u544a\u8b66\u7b56\u7565\u4e3a \u672a\u89e6\u53d1 \u72b6\u6001\u3002\u4e00\u65e6\u6ee1\u8db3\u89c4\u5219\u4e2d\u7684\u9608\u503c\u6761\u4ef6\u548c\u6301\u7eed\u65f6\u95f4\u540e\uff0c\u5c06\u53d8\u4e3a \u89e6\u53d1\u4e2d \u72b6\u6001\u3002

                      Warning

                      \u5220\u9664\u540e\u7684\u544a\u8b66\u7b56\u7565\u5c06\u5b8c\u5168\u6d88\u5931\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                      "},{"location":"end-user/insight/alert-center/alert-policy.html#yaml","title":"\u901a\u8fc7 YAML \u5bfc\u5165\u544a\u8b66\u7b56\u7565","text":"
                      1. \u8fdb\u5165\u544a\u8b66\u7b56\u7565\u5217\u8868\uff0c\u70b9\u51fb YAML \u521b\u5efa\u3002

                      2. \u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u7684\u9009\u62e9\u662f\u4e3a\u4e86\u544a\u8b66\u7b56\u7565\u7684\u7ba1\u7406\u6743\u9650\u3002

                      3. YAML \u7f16\u8f91\u5668\u4e2d\u8bf7\u586b\u5199 spec \u53ca\u5176\u4e2d\u7684\u5185\u5bb9\uff0c\u4ec5\u652f\u6301\u5bfc\u5165\u4e00\u4e2a group\u3002
                      4. \u544a\u8b66\u89c4\u5219\u540d\u79f0 \u9700\u8981\u7b26\u5408\u89c4\u8303\uff1a\u540d\u79f0\u53ea\u80fd\u5305\u542b\u5927\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u3001\u4e0b\u5212\u7ebf\uff08_\uff09\u548c\u8fde\u5b57\u7b26\uff08-\uff09\uff0c\u5fc5\u987b\u4ee5\u5b57\u6bcd\u5f00\u5934\uff0c\u6700\u957f 63 \u4e2a\u5b57\u7b26\u3002
                      5. \u5fc5\u586b severity \u4e14\u7b26\u5408\u89c4\u8303\uff1acritical\u3001warning\u3001info\u3002
                      6. \u5fc5\u586b\u8868\u8fbe\u5f0f expr\u3002

                      7. \u5bfc\u5165 YAML \u6587\u4ef6\u540e\uff0c\u70b9\u51fb \u9884\u89c8\uff0c\u53ef\u4ee5\u5bf9\u5bfc\u5165\u7684 YAML \u683c\u5f0f\u8fdb\u884c\u9a8c\u8bc1\uff0c\u5e76\u5feb\u901f\u786e\u8ba4\u5bfc\u5165\u7684\u544a\u8b66\u89c4\u5219\u3002

                      "},{"location":"end-user/insight/alert-center/alert-template.html","title":"\u544a\u8b66\u6a21\u677f","text":"

                      \u544a\u8b66\u6a21\u677f\u53ef\u652f\u6301\u5e73\u53f0\u7ba1\u7406\u5458\u521b\u5efa\u544a\u8b66\u6a21\u677f\u53ca\u89c4\u5219\uff0c\u4e1a\u52a1\u4fa7\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u544a\u8b66\u6a21\u677f\u521b\u5efa\u544a\u8b66\u7b56\u7565\u3002 \u8fd9\u4e2a\u529f\u80fd\u53ef\u4ee5\u51cf\u5c11\u4e1a\u52a1\u4eba\u5458\u5bf9\u544a\u8b66\u89c4\u5219\u7684\u7ba1\u7406\uff0c\u4e14\u53ef\u4ee5\u6839\u636e\u73af\u5883\u5b9e\u9645\u60c5\u51b5\u81ea\u884c\u4fee\u6539\u544a\u8b66\u9608\u503c\u3002

                      "},{"location":"end-user/insight/alert-center/alert-template.html#_2","title":"\u521b\u5efa\u544a\u8b66\u6a21\u677f","text":"
                      1. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9\u00a0\u544a\u8b66\u4e2d\u5fc3\u00a0->\u00a0\u544a\u8b66\u7b56\u7565\uff0c\u5355\u51fb\u9876\u90e8\u7684 \u544a\u8b66\u6a21\u677f \u3002

                      2. \u70b9\u51fb \u521b\u5efa\u544a\u8b66\u6a21\u677f \uff0c\u8bbe\u7f6e\u544a\u8b66\u6a21\u677f\u7684\u540d\u79f0\u3001\u63cf\u8ff0\u7b49\u4fe1\u606f\u3002

                        \u53c2\u6570 \u8bf4\u660e \u6a21\u677f\u540d\u79f0 \u540d\u79f0\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u8fde\u5b57\u7b26\uff08-\uff09\uff0c\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u548c\u7ed3\u5c3e\uff0c\u6700\u957f 63 \u4e2a\u5b57\u7b26\u3002 \u63cf\u8ff0 \u63cf\u8ff0\u53ef\u5305\u542b\u4efb\u610f\u5b57\u7b26\uff0c\u6700\u957f 256 \u4e2a\u5b57\u7b26\u3002 \u8d44\u6e90\u7c7b\u578b \u7528\u4e8e\u6307\u5b9a\u544a\u8b66\u6a21\u677f\u7684\u5339\u914d\u7c7b\u578b\u3002 \u544a\u8b66\u89c4\u5219 \u652f\u6301\u9884\u5b9a\u4e49\u591a\u4e2a\u544a\u8b66\u89c4\u5219\uff0c\u53ef\u6dfb\u52a0\u6a21\u677f\u89c4\u5219\u3001PromQL \u89c4\u5219\u3002
                      3. \u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u540e\u8fd4\u56de\u544a\u8b66\u6a21\u677f\u5217\u8868\uff0c\u70b9\u51fb\u6a21\u677f\u540d\u79f0\u540e\u53ef\u67e5\u770b\u6a21\u677f\u8be6\u60c5\u3002

                      "},{"location":"end-user/insight/alert-center/alert-template.html#_3","title":"\u7f16\u8f91\u544a\u8b66\u6a21\u677f","text":"

                      \u70b9\u51fb\u76ee\u6807\u89c4\u5219\u540e\u7684 \u2507 \uff0c\u70b9\u51fb \u7f16\u8f91\uff0c\u8fdb\u5165\u6291\u5236\u89c4\u5219\u7684\u7f16\u8f91\u9875\u3002

                      "},{"location":"end-user/insight/alert-center/alert-template.html#_4","title":"\u5220\u9664\u544a\u8b66\u6a21\u677f","text":"

                      \u70b9\u51fb\u76ee\u6807\u6a21\u677f\u540e\u4fa7\u7684 \u2507 \uff0c\u70b9\u51fb \u5220\u9664\uff0c\u5728\u8f93\u5165\u6846\u4e2d\u8f93\u5165\u544a\u8b66\u6a21\u677f\u7684\u540d\u79f0\u5373\u53ef\u5220\u9664\u3002

                      "},{"location":"end-user/insight/alert-center/inhibition.html","title":"\u544a\u8b66\u6291\u5236","text":"

                      \u544a\u8b66\u6291\u5236\u4e3b\u8981\u662f\u5bf9\u4e8e\u67d0\u4e9b\u4e0d\u9700\u8981\u7acb\u5373\u5173\u6ce8\u7684\u544a\u8b66\u8fdb\u884c\u4e34\u65f6\u9690\u85cf\u6216\u8005\u964d\u4f4e\u5176\u4f18\u5148\u7ea7\u7684\u4e00\u79cd\u673a\u5236\u3002\u8fd9\u4e2a\u529f\u80fd\u7684\u76ee\u7684\u662f\u4e3a\u4e86\u51cf\u5c11\u4e0d\u5fc5\u8981\u7684\u544a\u8b66\u4fe1\u606f\u5bf9\u8fd0\u7ef4\u4eba\u5458\u7684\u5e72\u6270\uff0c\u4f7f\u4ed6\u4eec\u80fd\u591f\u96c6\u4e2d\u7cbe\u529b\u5904\u7406\u66f4\u91cd\u8981\u7684\u95ee\u9898\u3002

                      \u544a\u8b66\u6291\u5236\u901a\u8fc7\u5b9a\u4e49\u4e00\u7ec4\u89c4\u5219\u6765\u8bc6\u522b\u548c\u5ffd\u7565\u67d0\u4e9b\u544a\u8b66\uff0c\u5f53\u5b83\u4eec\u5728\u7279\u5b9a\u6761\u4ef6\u4e0b\u53d1\u751f\u65f6\u3002\u4e3b\u8981\u6709\u4ee5\u4e0b\u51e0\u79cd\u60c5\u51b5\uff1a

                      • \u7236\u5b50\u5173\u7cfb\u6291\u5236\uff1a\u5f53\u4e00\u4e2a\u7236\u544a\u8b66\uff08\u4f8b\u5982\u67d0\u4e2a\u8282\u70b9\u7684\u5d29\u6e83\uff09\u89e6\u53d1\u65f6\uff0c\u53ef\u4ee5\u6291\u5236\u6240\u6709\u7531\u6b64\u5f15\u8d77\u7684\u5b50\u544a\u8b66\uff08\u4f8b\u5982\u8be5\u8282\u70b9\u4e0a\u8fd0\u884c\u7684\u5bb9\u5668\u5d29\u6e83\uff09\u3002
                      • \u76f8\u4f3c\u544a\u8b66\u6291\u5236\uff1a\u5f53\u591a\u4e2a\u544a\u8b66\u5177\u6709\u76f8\u540c\u7684\u7279\u5f81\uff08\u4f8b\u5982\u540c\u4e00\u5b9e\u4f8b\u4e0a\u7684\u76f8\u540c\u95ee\u9898\uff09\u65f6\uff0c\u53ef\u4ee5\u6291\u5236\u91cd\u590d\u7684\u544a\u8b66\u901a\u77e5\u3002
                      "},{"location":"end-user/insight/alert-center/inhibition.html#_2","title":"\u521b\u5efa\u6291\u5236\u89c4\u5219","text":"
                      1. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9\u00a0\u544a\u8b66\u4e2d\u5fc3\u00a0->\u00a0\u544a\u8b66\u964d\u566a\uff0c\u5355\u51fb\u9876\u90e8\u7684 \u544a\u8b66\u6291\u5236 \u3002

                      2. \u70b9\u51fb \u65b0\u5efa\u6291\u5236\u89c4\u5219 \uff0c\u8bbe\u7f6e\u6291\u5236\u89c4\u5219\u7684\u540d\u79f0\u3001\u89c4\u5219\u7b49\u3002

                        Note

                        \u901a\u8fc7\u89c4\u5219\u6807\u7b7e\u548c\u544a\u8b66\u6807\u7b7e\u5b9a\u4e49\u4e00\u7ec4\u89c4\u5219\u6765\u8bc6\u522b\u548c\u5ffd\u7565\u67d0\u4e9b\u544a\u8b66\uff0c\u8fbe\u5230\u907f\u514d\u540c\u4e00\u95ee\u9898\u53ef\u80fd\u4f1a\u89e6\u53d1\u591a\u4e2a\u76f8\u4f3c\u6216\u76f8\u5173\u7684\u544a\u8b66\u7684\u95ee\u9898\u3002

                        \u53c2\u6570\u65f6\u95f4 \u8bf4\u660e \u6291\u5236\u89c4\u5219\u540d\u79f0 \u6291\u5236\u89c4\u5219\u540d\u79f0\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u8fde\u5b57\u7b26\uff08-\uff09\uff0c\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u548c\u7ed3\u5c3e\uff0c\u6700\u957f 63 \u4e2a\u5b57\u7b26\u3002 \u63cf\u8ff0 \u63cf\u8ff0\u53ef\u5305\u542b\u4efb\u610f\u5b57\u7b26\uff0c\u6700\u957f 256 \u4e2a\u5b57\u7b26\u3002 \u96c6\u7fa4 \u8be5\u6291\u5236\u89c4\u5219\u4f5c\u7528\u7684\u96c6\u7fa4\u3002 \u547d\u540d\u7a7a\u95f4 \u8be5\u6291\u5236\u89c4\u5219\u4f5c\u7528\u7684\u547d\u540d\u7a7a\u95f4\u3002 \u6839\u6e90\u544a\u8b66 \u901a\u8fc7\u586b\u5199\u7684\u6807\u7b7e\u6761\u4ef6\u5339\u914d\u544a\u8b66\uff0c\u4f1a\u5c06\u7b26\u5408\u6240\u6709\u6807\u7b7e\u6761\u4ef6\u7684\u544a\u8b66\u4e0e\u7b26\u5408\u6291\u5236\u6761\u4ef6\u7684\u8fdb\u884c\u5bf9\u6bd4\uff0c\u4e0d\u7b26\u5408\u6291\u5236\u6761\u4ef6\u7684\u544a\u8b66\u5c06\u7167\u5e38\u53d1\u9001\u6d88\u606f\u7ed9\u7528\u6237\u3002 \u53d6\u503c\u8303\u56f4\u8bf4\u660e\uff1a - \u544a\u8b66\u7ea7\u522b\uff1a\u6307\u6807\u6216\u4e8b\u4ef6\u544a\u8b66\u7684\u7ea7\u522b\uff0c\u53ef\u4ee5\u8bbe\u7f6e\u4e3a\uff1a\u7d27\u6025\u3001\u91cd\u8981\u3001\u63d0\u793a\u3002 - \u8d44\u6e90\u7c7b\u578b\uff1a\u544a\u8b66\u5bf9\u8c61\u6240\u5bf9\u5e94\u7684\u8d44\u6e90\u7c7b\u578b\uff0c\u53ef\u4ee5\u8bbe\u7f6e\u4e3a\uff1a\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u65e0\u72b6\u6001\u8d1f\u8f7d\u3001\u6709\u72b6\u5bb9\u8d1f\u8f7d\u3001\u5b88\u62a4\u8fdb\u7a0b\u3001\u5bb9\u5668\u7ec4\u3002 - \u6807\u7b7e\uff1a\u544a\u8b66\u6807\u8bc6\u5c5e\u6027\uff0c\u7531\u6807\u7b7e\u540d\u548c\u6807\u7b7e\u503c\u6784\u6210\uff0c\u652f\u6301\u7528\u6237\u81ea\u5b9a\u4e49\u3002 \u6291\u5236\u544a\u8b66 \u7528\u4e8e\u6307\u5b9a\u76ee\u6807\u8b66\u62a5\uff08\u5c06\u88ab\u6291\u5236\u7684\u8b66\u62a5\uff09\u7684\u5339\u914d\u6761\u4ef6\uff0c\u7b26\u5408\u6240\u6709\u6807\u7b7e\u6761\u4ef6\u7684\u544a\u8b66\u5c06\u4e0d\u4f1a\u518d\u53d1\u9001\u6d88\u606f\u7ed9\u7528\u6237\u3002 \u5339\u914d\u6807\u7b7e \u7528\u4e8e\u6307\u5b9a\u5e94\u8be5\u6bd4\u8f83\u7684\u6807\u7b7e\u5217\u8868\uff0c\u4ee5\u786e\u5b9a\u6e90\u8b66\u62a5\u548c\u76ee\u6807\u8b66\u62a5\u662f\u5426\u5339\u914d\u3002\u53ea\u6709\u5728\u00a0equal\u00a0\u4e2d\u6307\u5b9a\u7684\u6807\u7b7e\u5728\u6e90\u548c\u76ee\u6807\u8b66\u62a5\u4e2d\u7684\u503c\u5b8c\u5168\u76f8\u540c\u7684\u60c5\u51b5\u4e0b\uff0c\u624d\u4f1a\u89e6\u53d1\u6291\u5236\u3002equal\u00a0\u5b57\u6bb5\u662f\u53ef\u9009\u7684\u3002\u5982\u679c\u7701\u7565\u00a0equal\u00a0\u5b57\u6bb5\uff0c\u5219\u4f1a\u5c06\u6240\u6709\u6807\u7b7e\u7528\u4e8e\u5339\u914d
                      3. \u70b9\u51fb**\u786e\u5b9a**\u5b8c\u6210\u521b\u5efa\u540e\u8fd4\u56de\u544a\u8b66\u6291\u5236\u5217\u8868\uff0c\u70b9\u51fb\u544a\u8b66\u6291\u5236\u540d\u79f0\u540e\u53ef\u67e5\u770b\u6291\u5236\u89c4\u5219\u8be6\u60c5\u3002

                      "},{"location":"end-user/insight/alert-center/inhibition.html#_3","title":"\u67e5\u770b\u89c4\u5219\u6807\u7b7e","text":"
                      1. \u70b9\u51fb\u53f3\u4fa7\u5bfc\u822a\u680f\u9009\u62e9\u00a0\u544a\u8b66\u4e2d\u5fc3\u00a0->\u00a0\u544a\u8b66\u7b56\u7565 \uff0c\u70b9\u51fb\u89c4\u5219\u6240\u5728\u7684\u7b56\u7565\u8be6\u60c5\u3002
                      2. \u70b9\u51fb\u76ee\u6807\u89c4\u5219\u540d\u79f0\uff0c\u67e5\u770b\u89c4\u5219\u8be6\u60c5\uff0c\u67e5\u770b\u5bf9\u5e94\u544a\u8b66\u89c4\u5219\u7684\u6807\u7b7e\u3002

                        Note

                        \u5728\u6dfb\u52a0\u89c4\u5219\u65f6\u53ef\u6dfb\u52a0\u81ea\u5b9a\u4e49\u6807\u7b7e\u3002

                      "},{"location":"end-user/insight/alert-center/inhibition.html#_4","title":"\u67e5\u770b\u544a\u8b66\u6807\u7b7e","text":"
                      1. \u70b9\u51fb\u53f3\u4fa7\u5bfc\u822a\u680f\u9009\u62e9\u00a0\u544a\u8b66\u4e2d\u5fc3\u00a0->\u00a0\u544a\u8b66\u5217\u8868 \uff0c\u70b9\u51fb\u544a\u8b66\u6240\u5728\u884c\u67e5\u770b\u544a\u8b66\u8be6\u60c5\u3002

                        Note

                        \u544a\u8b66\u6807\u7b7e\u7528\u4e8e\u63cf\u8ff0\u544a\u8b66\u7684\u8be6\u7ec6\u4fe1\u606f\u548c\u5c5e\u6027\uff0c\u53ef\u4ee5\u7528\u6765\u521b\u5efa\u6291\u5236\u89c4\u5219\u3002

                      "},{"location":"end-user/insight/alert-center/inhibition.html#_5","title":"\u7f16\u8f91\u6291\u5236\u89c4\u5219","text":"
                      1. \u70b9\u51fb\u76ee\u6807\u89c4\u5219\u540e\u4fa7\u7684 \u2507 \uff0c\u70b9\u51fb \u7f16\u8f91\uff0c\u8fdb\u5165\u6291\u5236\u89c4\u5219\u7684\u7f16\u8f91\u9875\u3002

                      "},{"location":"end-user/insight/alert-center/inhibition.html#_6","title":"\u5220\u9664\u6291\u5236\u89c4\u5219","text":"

                      \u70b9\u51fb\u76ee\u6807\u89c4\u5219\u540e\u4fa7\u7684 \u2507 \uff0c\u70b9\u51fb \u5220\u9664\uff0c\u5728\u8f93\u5165\u6846\u4e2d\u8f93\u5165\u6291\u5236\u89c4\u5219\u7684\u540d\u79f0\u5373\u53ef\u5220\u9664\u3002

                      "},{"location":"end-user/insight/alert-center/message.html","title":"\u901a\u77e5\u914d\u7f6e","text":"

                      \u5728 \u901a\u77e5\u914d\u7f6e \u9875\u9762\uff0c\u53ef\u4ee5\u914d\u7f6e\u901a\u8fc7\u90ae\u4ef6\u3001\u4f01\u4e1a\u5fae\u4fe1\u3001\u9489\u9489\u3001Webhook \u548c\u77ed\u4fe1\u7b49\u65b9\u5f0f\u5411\u7528\u6237\u53d1\u9001\u6d88\u606f\u3002

                      "},{"location":"end-user/insight/alert-center/message.html#_2","title":"\u90ae\u4ef6\u7ec4","text":"
                      1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u540e\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e\uff0c\u9ed8\u8ba4\u4f4d\u4e8e\u90ae\u4ef6\u901a\u77e5\u5bf9\u8c61\u3002

                      2. \u70b9\u51fb \u6dfb\u52a0\u90ae\u7bb1\u7ec4\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u90ae\u4ef6\u5730\u5740\u3002

                      3. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u90ae\u7bb1\u7ec4\u3002

                      "},{"location":"end-user/insight/alert-center/message.html#_3","title":"\u4f01\u4e1a\u5fae\u4fe1","text":"
                      1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u4f01\u4e1a\u5fae\u4fe1\u3002

                        \u6709\u5173\u4f01\u4e1a\u5fae\u4fe1\u7fa4\u673a\u5668\u4eba\u7684 URL\uff0c\u8bf7\u53c2\u9605\u4f01\u4e1a\u5fae\u4fe1\u5b98\u65b9\u6587\u6863\uff1a\u5982\u4f55\u4f7f\u7528\u7fa4\u673a\u5668\u4eba\u3002

                      2. \u70b9\u51fb \u6dfb\u52a0\u7fa4\u673a\u5668\u4eba\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u7fa4\u673a\u5668\u4eba\u3002

                      3. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\uff0c\u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u7fa4\u673a\u5668\u4eba\u3002

                      "},{"location":"end-user/insight/alert-center/message.html#_4","title":"\u9489\u9489","text":"
                      1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u9489\u9489\uff0c\u70b9\u51fb \u6dfb\u52a0\u7fa4\u673a\u5668\u4eba\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u7fa4\u673a\u5668\u4eba\u3002

                        \u6709\u5173\u9489\u9489\u7fa4\u673a\u5668\u4eba\u7684 URL\uff0c\u8bf7\u53c2\u9605\u9489\u9489\u5b98\u65b9\u6587\u6863\uff1a\u81ea\u5b9a\u4e49\u673a\u5668\u4eba\u63a5\u5165\u3002

                        Note

                        \u52a0\u7b7e\u7684\u65b9\u5f0f\u662f\u9489\u9489\u673a\u5668\u4eba\u4e0e\u5f00\u53d1\u8005\u53cc\u5411\u8fdb\u884c\u5b89\u5168\u8ba4\u8bc1\uff0c\u82e5\u5728\u521b\u5efa\u9489\u9489\u673a\u5668\u4eba\u65f6\u5f00\u542f\u4e86\u52a0\u7b7e\uff0c\u5219\u9700\u8981\u5728\u6b64\u5904\u8f93\u5165\u9489\u9489\u751f\u6210\u7684\u5bc6\u94a5\u3002 \u53ef\u53c2\u8003\u9489\u9489\u81ea\u5b9a\u4e49\u673a\u5668\u4eba\u5b89\u5168\u8bbe\u7f6e\u3002

                      2. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\uff0c\u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u7fa4\u673a\u5668\u4eba\u3002

                      "},{"location":"end-user/insight/alert-center/message.html#_5","title":"\u98de\u4e66","text":"
                      1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u98de\u4e66\uff0c\u70b9\u51fb \u6dfb\u52a0\u7fa4\u673a\u5668\u4eba\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u7fa4\u673a\u5668\u4eba\u3002

                        Note

                        \u5f53\u98de\u4e66\u7684\u7fa4\u673a\u5668\u4eba\u5f00\u542f\u7b7e\u540d\u6821\u9a8c\u65f6\uff0c\u6dfb\u52a0\u98de\u4e66\u901a\u77e5\u65f6\u9700\u8981\u586b\u5199\u5bf9\u5e94\u7684\u7b7e\u540d\u5bc6\u94a5\u3002\u8bf7\u67e5\u9605 \u81ea\u5b9a\u4e49\u673a\u5668\u4eba\u4f7f\u7528\u6307\u5357\u3002

                      2. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\uff0c\u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u7fa4\u673a\u5668\u4eba\u3002

                      "},{"location":"end-user/insight/alert-center/message.html#webhook","title":"Webhook","text":"
                      1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> Webhook\u3002

                        \u6709\u5173 Webhook URL \u53ca\u66f4\u591a\u914d\u7f6e\u65b9\u5f0f\uff0c\u8bf7\u53c2\u9605 webhook \u6587\u6863\u3002

                      2. \u70b9\u51fb \u65b0\u5efa Webhook\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a Webhook\u3002

                        HTTP Headers\uff1a\u975e\u5fc5\u586b\uff0c\u8bbe\u7f6e\u8bf7\u6c42\u5934\u3002\u53ef\u4ee5\u6dfb\u52a0\u591a\u4e2a Headers\u3002

                        Note

                        \u6709\u5173 Webhook URL \u53ca\u66f4\u591a\u914d\u7f6e\u65b9\u5f0f\uff0c\u8bf7\u53c2\u9605 webhook \u6587\u6863\u3002

                      3. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\uff0c\u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664 Webhook\u3002

                      "},{"location":"end-user/insight/alert-center/message.html#_6","title":"\u7ad9\u5185\u4fe1","text":"

                      Note

                      \u544a\u8b66\u6d88\u606f\u53d1\u9001\u81f3\u7528\u6237\u4e2a\u4eba\u7684\u7ad9\u5185\u4fe1\uff0c\u70b9\u51fb\u9876\u90e8\u7684 \ud83d\udd14 \u7b26\u53f7\u53ef\u4ee5\u67e5\u770b\u901a\u77e5\u6d88\u606f\u3002

                      1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u7ad9\u5185\u4fe1\uff0c\u70b9\u51fb\u521b\u5efa\u3002

                        • \u7ad9\u5185\u4fe1\u901a\u77e5\u5141\u8bb8\u6dfb\u52a0\u591a\u4e2a\u7528\u6237\u3002

                      2. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de \u7ad9\u5185\u4fe1\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\u3002

                      "},{"location":"end-user/insight/alert-center/message.html#_7","title":"\u77ed\u4fe1\u7ec4","text":"
                      1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u77ed\u4fe1\uff0c\u70b9\u51fb \u6dfb\u52a0\u77ed\u4fe1\u7ec4\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u77ed\u4fe1\u7ec4\u3002

                      2. \u5728\u5f39\u7a97\u4e2d\u8f93\u5165\u540d\u79f0\u3001\u63a5\u6536\u77ed\u4fe1\u7684\u5bf9\u8c61\u3001\u624b\u673a\u53f7\u4ee5\u53ca\u901a\u77e5\u670d\u52a1\u5668\u3002

                        \u901a\u77e5\u670d\u52a1\u5668\u9700\u8981\u9884\u5148\u5728 \u901a\u77e5\u914d\u7f6e -> \u901a\u77e5\u670d\u52a1\u5668 \u4e2d\u6dfb\u52a0\u521b\u5efa\u3002\u76ee\u524d\u652f\u6301\u963f\u91cc\u4e91\u3001\u817e\u8baf\u4e91\u4e24\u79cd\u4e91\u670d\u52a1\u5668\uff0c\u5177\u4f53\u914d\u7f6e\u7684\u53c2\u6570\u8bf7\u53c2\u9605\u81ea\u5df1\u7684\u4e91\u670d\u52a1\u5668\u4fe1\u606f\u3002

                      3. \u77ed\u4fe1\u7ec4\u6dfb\u52a0\u6210\u529f\u540e\uff0c\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u77ed\u4fe1\u7ec4\u3002

                      "},{"location":"end-user/insight/alert-center/msg-template.html","title":"\u6d88\u606f\u6a21\u677f","text":"

                      \u53ef\u89c2\u6d4b\u6027\u63d0\u4f9b\u81ea\u5b9a\u4e49\u6d88\u606f\u6a21\u677f\u5185\u5bb9\u7684\u80fd\u529b\uff0c\u652f\u6301\u90ae\u4ef6\u3001\u4f01\u4e1a\u5fae\u4fe1\u3001\u9489\u9489\u3001Webhook\u3001\u98de\u4e66\u3001\u7ad9\u5185\u4fe1\u7b49\u4e0d\u540c\u7684\u901a\u77e5\u5bf9\u8c61\u5b9a\u4e49\u4e0d\u540c\u7684\u6d88\u606f\u901a\u77e5\u5185\u5bb9\u3002

                      "},{"location":"end-user/insight/alert-center/msg-template.html#_2","title":"\u521b\u5efa\u6d88\u606f\u6a21\u677f","text":"
                      1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u544a\u8b66\u4e2d\u5fc3 -> \u6d88\u606f\u6a21\u677f\u3002

                        Insight \u9ed8\u8ba4\u5185\u7f6e\u4e2d\u82f1\u6587\u4e24\u4e2a\u6a21\u677f\uff0c\u4ee5\u4fbf\u7528\u6237\u4f7f\u7528\u3002

                      2. \u70b9\u51fb \u65b0\u5efa\u6d88\u606f\u6a21\u677f \u6309\u94ae\uff0c\u586b\u5199\u6a21\u677f\u5185\u5bb9\u3002

                      Info

                      \u53ef\u89c2\u6d4b\u6027\u9884\u7f6e\u4e86\u6d88\u606f\u6a21\u677f\u3002\u82e5\u9700\u8981\u5b9a\u4e49\u6a21\u677f\u7684\u5185\u5bb9\uff0c\u8bf7\u53c2\u8003\u914d\u7f6e\u901a\u77e5\u6a21\u677f\u3002

                      "},{"location":"end-user/insight/alert-center/msg-template.html#_3","title":"\u6d88\u606f\u6a21\u677f\u8be6\u60c5","text":"

                      \u70b9\u51fb\u67d0\u4e00\u6d88\u606f\u6a21\u677f\u7684\u540d\u79f0\uff0c\u53f3\u4fa7\u6ed1\u5757\u53ef\u67e5\u770b\u6d88\u606f\u6a21\u677f\u7684\u8be6\u60c5\u3002

                      \u53c2\u6570 \u53d8\u91cf \u63cf\u8ff0 \u89c4\u5219\u540d\u79f0 {{ .Labels.alertname }} \u89e6\u53d1\u544a\u8b66\u7684\u89c4\u5219\u540d\u79f0 \u7b56\u7565\u540d\u79f0 {{ .Labels.alertgroup }} \u89e6\u53d1\u544a\u8b66\u89c4\u5219\u6240\u5c5e\u7684\u544a\u8b66\u7b56\u7565\u540d\u79f0 \u544a\u8b66\u7ea7\u522b {{ .Labels.severity }} \u89e6\u53d1\u544a\u8b66\u7684\u7ea7\u522b \u96c6\u7fa4 {{ .Labels.cluster }} \u89e6\u53d1\u544a\u8b66\u7684\u8d44\u6e90\u6240\u5728\u7684\u96c6\u7fa4 \u547d\u540d\u7a7a\u95f4 {{ .Labels.namespace }} \u89e6\u53d1\u544a\u8b66\u7684\u8d44\u6e90\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4 \u8282\u70b9 {{ .Labels.node }} \u89e6\u53d1\u544a\u8b66\u7684\u8d44\u6e90\u6240\u5728\u7684\u8282\u70b9 \u8d44\u6e90\u7c7b\u578b {{ .Labels.target_type }} \u544a\u8b66\u5bf9\u8c61\u7684\u8d44\u6e90\u7c7b\u578b \u8d44\u6e90\u540d\u79f0 {{ .Labels.target }} \u89e6\u53d1\u544a\u8b66\u7684\u5bf9\u8c61\u540d\u79f0 \u89e6\u53d1\u503c {{ .Annotations.value }} \u89e6\u53d1\u544a\u8b66\u901a\u77e5\u65f6\u7684\u6307\u6807\u503c \u53d1\u751f\u65f6\u95f4 {{ .StartsAt }} \u544a\u8b66\u5f00\u59cb\u53d1\u751f\u7684\u65f6\u95f4 \u7ed3\u675f\u65f6\u95f4 {{ .EndsAT }} \u544a\u8b66\u7ed3\u675f\u7684\u65f6\u95f4 \u63cf\u8ff0 {{ .Annotations.description }} \u544a\u8b66\u7684\u8be6\u7ec6\u63cf\u8ff0 \u6807\u7b7e {{ for .labels}} {{end}} \u544a\u8b66\u7684\u6240\u6709\u6807\u7b7e\uff0c\u4f7f\u7528 for \u51fd\u6570\u904d\u5386 labels \u5217\u8868\uff0c\u83b7\u53d6\u544a\u8b66\u7684\u6240\u6709\u6807\u7b7e\u5185\u5bb9\u3002"},{"location":"end-user/insight/alert-center/msg-template.html#_4","title":"\u7f16\u8f91\u6216\u5220\u9664\u6d88\u606f\u6a21\u677f","text":"

                      \u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507\uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u7f16\u8f91 \u6216 \u5220\u9664\uff0c\u53ef\u4ee5\u4fee\u6539\u6216\u5220\u9664\u6d88\u606f\u6a21\u677f\u3002

                      Warning

                      \u8bf7\u6ce8\u610f\uff0c\u5220\u9664\u6a21\u677f\u540e\u65e0\u6cd5\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                      "},{"location":"end-user/insight/alert-center/silent.html","title":"\u544a\u8b66\u9759\u9ed8","text":"

                      \u544a\u8b66\u9759\u9ed8\u662f\u6307\u5728\u7279\u5b9a\u7684\u65f6\u95f4\u8303\u56f4\u5185\uff0c\u6839\u636e\u5b9a\u4e49\u597d\u7684\u89c4\u5219\u5bf9\u7b26\u5408\u6761\u4ef6\u7684\u544a\u8b66\u4e0d\u518d\u53d1\u9001\u544a\u8b66\u901a\u77e5\u3002\u8be5\u529f\u80fd\u53ef\u4ee5\u5e2e\u52a9\u8fd0\u7ef4\u4eba\u5458\u907f\u514d\u5728\u67d0\u4e9b\u64cd\u4f5c\u6216\u4e8b\u4ef6\u671f\u95f4\u63a5\u6536\u5230\u8fc7\u591a\u7684\u566a\u58f0\u544a\u8b66\uff0c\u540c\u65f6\u4fbf\u4e8e\u66f4\u52a0\u7cbe\u786e\u5730\u5904\u7406\u771f\u6b63\u9700\u8981\u89e3\u51b3\u7684\u95ee\u9898\u3002

                      \u5728\u544a\u8b66\u9759\u9ed8\u9875\u9762\u4e0a\uff0c\u7528\u6237\u53ef\u4ee5\u770b\u5230\u4e24\u4e2a\u9875\u7b7e\uff1a\u6d3b\u8dc3\u89c4\u5219\u548c\u8fc7\u671f\u89c4\u5219\u3002 \u5176\u4e2d\uff0c\u6d3b\u8dc3\u89c4\u5219\u8868\u793a\u76ee\u524d\u6b63\u5728\u751f\u6548\u7684\u89c4\u5219\uff0c\u800c\u8fc7\u671f\u89c4\u5219\u5219\u662f\u4ee5\u524d\u5b9a\u4e49\u8fc7\u4f46\u5df2\u7ecf\u8fc7\u671f\uff08\u6216\u8005\u7528\u6237\u4e3b\u52a8\u5220\u9664\uff09\u7684\u89c4\u5219\u3002

                      "},{"location":"end-user/insight/alert-center/silent.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u544a\u8b66\u4e2d\u5fc3 -> \u544a\u8b66\u9759\u9ed8 ,\u70b9\u51fb \u65b0\u5efa\u9759\u9ed8\u89c4\u5219 \u6309\u94ae\u3002

                      2. \u586b\u5199\u9759\u9ed8\u89c4\u5219\u7684\u5404\u9879\u53c2\u6570\uff0c\u5982\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u6807\u7b7e\u3001\u65f6\u95f4\u7b49\uff0c\u4ee5\u5b9a\u4e49\u8fd9\u6761\u89c4\u5219\u7684\u4f5c\u7528\u8303\u56f4\u548c\u751f\u6548\u65f6\u95f4\u3002

                      3. \u8fd4\u56de\u89c4\u5219\u5217\u8868\uff0c\u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u9759\u9ed8\u89c4\u5219\u3002

                      \u901a\u8fc7\u544a\u8b66\u9759\u9ed8\u529f\u80fd\uff0c\u60a8\u53ef\u4ee5\u7075\u6d3b\u5730\u63a7\u5236\u54ea\u4e9b\u544a\u8b66\u9700\u8981\u88ab\u5ffd\u7565\uff0c\u5728\u4ec0\u4e48\u65f6\u95f4\u6bb5\u5185\u751f\u6548\uff0c\u4ece\u800c\u63d0\u9ad8\u8fd0\u7ef4\u6548\u7387\uff0c\u51cf\u5c11\u8bef\u62a5\u7684\u53ef\u80fd\u6027\u3002

                      "},{"location":"end-user/insight/alert-center/sms-provider.html","title":"\u914d\u7f6e\u901a\u77e5\u670d\u52a1\u5668","text":"

                      \u53ef\u89c2\u6d4b\u6027 Insight \u652f\u6301\u77ed\u4fe1\u901a\u77e5\uff0c\u76ee\u524d\u901a\u8fc7\u96c6\u6210\u963f\u91cc\u4e91\u3001\u817e\u8baf\u4e91\u7684\u77ed\u4fe1\u670d\u52a1\u53d1\u9001\u544a\u8b66\u6d88\u606f\u3002\u672c\u6587\u4ecb\u7ecd\u4e86\u5982\u4f55\u5728 insight \u4e2d\u914d\u7f6e\u77ed\u4fe1\u901a\u77e5\u7684\u670d\u52a1\u5668\u3002\u77ed\u4fe1\u7b7e\u540d\u4e2d\u652f\u6301\u7684\u53d8\u91cf\u4e3a\u6d88\u606f\u6a21\u677f\u4e2d\u7684\u9ed8\u8ba4\u53d8\u91cf\uff0c\u540c\u65f6\u7531\u4e8e\u77ed\u4fe1\u5b57\u6570\u6709\u9650\uff0c\u5efa\u8bae\u9009\u62e9\u8f83\u4e3a\u660e\u786e\u7684\u53d8\u91cf\u3002

                      \u5982\u4f55\u914d\u7f6e\u77ed\u4fe1\u63a5\u6536\u4eba\u53ef\u53c2\u8003\u6587\u6863\uff1a\u914d\u7f6e\u77ed\u4fe1\u901a\u77e5\u7ec4\u3002

                      "},{"location":"end-user/insight/alert-center/sms-provider.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u8fdb\u5165 \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u901a\u77e5\u670d\u52a1\u5668 \u3002

                      2. \u70b9\u51fb \u6dfb\u52a0\u901a\u77e5\u670d\u52a1\u5668 \u3002

                        1. \u914d\u7f6e\u963f\u91cc\u4e91\u670d\u52a1\u5668\u3002

                          \u7533\u8bf7\u963f\u91cc\u4e91\u77ed\u4fe1\u670d\u52a1\uff0c\u8bf7\u53c2\u8003\u963f\u91cc\u4e91\u77ed\u4fe1\u670d\u52a1\u3002

                          \u5b57\u6bb5\u8bf4\u660e\uff1a

                          • AccessKey ID \uff1a\u963f\u91cc\u4e91\u7528\u4e8e\u6807\u8bc6\u7528\u6237\u7684\u53c2\u6570\u3002
                          • AccessKey Secret \uff1a\u963f\u91cc\u4e91\u7528\u4e8e\u9a8c\u8bc1\u7528\u6237\u7684\u5bc6\u94a5\u3002AccessKey Secret \u5fc5\u987b\u4fdd\u5bc6\u3002
                          • \u77ed\u4fe1\u7b7e\u540d \uff1a\u77ed\u4fe1\u670d\u52a1\u652f\u6301\u6839\u636e\u7528\u6237\u9700\u6c42\u521b\u5efa\u7b26\u5408\u8981\u6c42\u7684\u7b7e\u540d\u3002\u53d1\u9001\u77ed\u4fe1\u65f6\uff0c\u77ed\u4fe1\u5e73\u53f0\u4f1a\u5c06\u5df2\u5ba1\u6838\u901a\u8fc7\u7684\u77ed\u4fe1\u7b7e\u540d\u6dfb\u52a0\u5230\u77ed\u4fe1\u5185\u5bb9\u4e2d\uff0c\u518d\u53d1\u9001\u7ed9\u77ed\u4fe1\u63a5\u6536\u65b9\u3002
                          • \u6a21\u677f CODE \uff1a\u77ed\u4fe1\u6a21\u677f\u662f\u53d1\u9001\u77ed\u4fe1\u7684\u5177\u4f53\u5185\u5bb9\u3002
                          • \u53c2\u6570\u6a21\u677f \uff1a\u77ed\u4fe1\u6b63\u6587\u6a21\u677f\u53ef\u4ee5\u5305\u542b\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u901a\u8fc7\u53d8\u91cf\u5b9e\u73b0\u81ea\u5b9a\u4e49\u77ed\u4fe1\u5185\u5bb9\u3002

                          \u8bf7\u53c2\u8003\u963f\u91cc\u4e91\u53d8\u91cf\u89c4\u8303\u3002

                          Note

                          \u4e3e\u4f8b\uff1a\u5728\u963f\u91cc\u4e91\u5b9a\u4e49\u7684\u6a21\u677f\u5185\u5bb9\u4e3a\uff1a\\({severity}\uff1a\\) \u88ab\u89e6\u53d1\u3002\u53c2\u6570\u6a21\u677f\u4e2d\u7684\u914d\u7f6e\u53c2\u8003\u4e0a\u56fe\u3002} \u5728 ${startat

                        2. \u914d\u7f6e\u817e\u8baf\u4e91\u670d\u52a1\u5668\u3002

                          \u7533\u8bf7\u817e\u8baf\u4e91\u77ed\u4fe1\u670d\u52a1\uff0c\u8bf7\u53c2\u8003\u817e\u8baf\u4e91\u77ed\u4fe1\u3002

                          \u5b57\u6bb5\u8bf4\u660e\uff1a

                          • Secret ID \uff1a\u817e\u8baf\u4e91\u7528\u4e8e\u6807\u8bc6 API \u8c03\u7528\u8005\u8eab\u4efd\u53c2\u6570\u3002
                          • SecretKey \uff1a\u817e\u8baf\u4e91\u7528\u4e8e\u9a8c\u8bc1 API \u8c03\u7528\u8005\u7684\u8eab\u4efd\u7684\u53c2\u6570\u3002
                          • \u77ed\u4fe1\u6a21\u677f ID \uff1a\u77ed\u4fe1\u6a21\u677f ID\uff0c\u7531\u817e\u8baf\u4e91\u7cfb\u7edf\u81ea\u52a8\u751f\u6210\u3002
                          • \u7b7e\u540d\u5185\u5bb9 \uff1a\u77ed\u4fe1\u7b7e\u540d\u5185\u5bb9\uff0c\u5373\u5728\u817e\u8baf\u4e91\u77ed\u4fe1\u7b7e\u540d\u4e2d\u5b9a\u4e49\u7684\u5b9e\u9645\u7f51\u7ad9\u540d\u7684\u5168\u79f0\u6216\u7b80\u79f0\u3002
                          • SdkAppId \uff1a\u77ed\u4fe1 SdkAppId\uff0c\u5728\u817e\u8baf\u4e91\u77ed\u4fe1\u63a7\u5236\u53f0\u6dfb\u52a0\u5e94\u7528\u540e\u751f\u6210\u7684\u5b9e\u9645 SdkAppId\u3002
                          • \u53c2\u6570\u6a21\u677f \uff1a\u77ed\u4fe1\u6b63\u6587\u6a21\u677f\u53ef\u4ee5\u5305\u542b\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u901a\u8fc7\u53d8\u91cf\u5b9e\u73b0\u81ea\u5b9a\u4e49\u77ed\u4fe1\u5185\u5bb9\u3002\u8bf7\u53c2\u8003\uff1a\u817e\u8baf\u4e91\u53d8\u91cf\u89c4\u8303\u3002

                          Note

                          \u4e3e\u4f8b\uff1a\u5728\u817e\u8baf\u4e91\u5b9a\u4e49\u7684\u6a21\u677f\u5185\u5bb9\u4e3a\uff1a{1}\uff1a{2} \u5728 {3} \u88ab\u89e6\u53d1\u3002\u53c2\u6570\u6a21\u677f\u4e2d\u7684\u914d\u7f6e\u53c2\u8003\u4e0a\u56fe\u3002

                      "},{"location":"end-user/insight/collection-manag/agent-status.html","title":"insight-agent \u7ec4\u4ef6\u72b6\u6001\u8bf4\u660e","text":"

                      \u5728 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u53ef\u89c2\u6d4b\u6027 Insight \u4f5c\u4e3a\u591a\u96c6\u7fa4\u89c2\u6d4b\u4ea7\u54c1\uff0c\u4e3a\u4e86\u5b9e\u73b0\u591a\u96c6\u7fa4\u89c2\u6d4b\u6570\u636e\u7684\u7edf\u4e00\u91c7\u96c6\uff0c\u9700\u8981\u7528\u6237\u5b89\u88c5 Helm \u5e94\u7528 insight-agent \uff08\u9ed8\u8ba4\u5b89\u88c5\u5728 insight-system \u547d\u540d\u7a7a\u95f4\uff09\u3002\u53c2\u9605\u5982\u4f55\u5b89\u88c5 insight-agent \u3002

                      "},{"location":"end-user/insight/collection-manag/agent-status.html#_1","title":"\u72b6\u6001\u8bf4\u660e","text":"

                      \u5728 \u53ef\u89c2\u6d4b\u6027 -> \u91c7\u96c6\u7ba1\u7406 \u90e8\u5206\u53ef\u67e5\u770b\u5404\u96c6\u7fa4\u5b89\u88c5 insight-agent \u7684\u60c5\u51b5\u3002

                      • \u672a\u5b89\u88c5 \uff1a\u8be5\u96c6\u7fa4\u4e2d\u672a\u5728 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\u5b89\u88c5 insight-agent
                      • \u8fd0\u884c\u4e2d \uff1a\u8be5\u96c6\u7fa4\u4e2d\u6210\u529f\u5b89\u88c5 insight-agent \uff0c\u4e14\u90e8\u7f72\u7684\u6240\u6709\u7ec4\u4ef6\u5747\u5904\u4e8e\u8fd0\u884c\u4e2d\u72b6\u6001
                      • \u5f02\u5e38 \uff1a\u82e5 insight-agent \u5904\u4e8e\u6b64\u72b6\u6001\uff0c\u8bf4\u660e helm \u90e8\u7f72\u5931\u8d25\u6216\u5b58\u5728\u90e8\u7f72\u7684\u7ec4\u4ef6\u5904\u4e8e\u975e\u8fd0\u884c\u4e2d\u72b6\u6001

                      \u53ef\u901a\u8fc7\u4ee5\u4e0b\u65b9\u5f0f\u6392\u67e5\uff1a

                      1. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff0c\u82e5\u72b6\u6001\u4e3a deployed \uff0c\u5219\u6267\u884c\u4e0b\u4e00\u6b65\u3002\u82e5\u4e3a failed \uff0c\u7531\u4e8e\u4f1a\u5f71\u54cd\u5e94\u7528\u7684\u5347\u7ea7\uff0c\u5efa\u8bae\u5728 \u5bb9\u5668\u7ba1\u7406 -> helm \u5e94\u7528 \u5378\u8f7d\u540e\u91cd\u65b0\u5b89\u88c5 :

                        helm list -n insight-system\n
                      2. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u6216\u5728 \u53ef\u89c2\u6d4b\u6027 -> \u91c7\u96c6\u7ba1\u7406 \u4e2d\u67e5\u770b\u8be5\u96c6\u7fa4\u90e8\u7f72\u7684\u7ec4\u4ef6\u7684\u72b6\u6001\uff0c\u82e5\u5b58\u5728\u975e \u8fd0\u884c\u4e2d \u72b6\u6001\u7684\u5bb9\u5668\u7ec4\uff0c\u8bf7\u91cd\u542f\u5f02\u5e38\u7684\u5bb9\u5668\u7ec4\u3002

                        kubectl get pods -n insight-system\n
                      "},{"location":"end-user/insight/collection-manag/agent-status.html#_2","title":"\u8865\u5145\u8bf4\u660e","text":"
                      1. insight-agent \u4e2d\u6307\u6807\u91c7\u96c6\u7ec4\u4ef6 Prometheus \u7684\u8d44\u6e90\u6d88\u8017\u4e0e\u96c6\u7fa4\u4e2d\u8fd0\u884c\u7684\u5bb9\u5668\u7ec4\u6570\u91cf\u5b58\u5728\u6b63\u6bd4\u5173\u7cfb\uff0c \u8bf7\u6839\u636e\u96c6\u7fa4\u89c4\u6a21\u8c03\u6574 Prometheus \u7684\u8d44\u6e90\uff0c\u8bf7\u53c2\u8003\uff1aPrometheus \u8d44\u6e90\u89c4\u5212

                      2. \u7531\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u6307\u6807\u5b58\u50a8\u7ec4\u4ef6 vmstorage \u7684\u5b58\u50a8\u5bb9\u91cf\u4e0e\u5404\u4e2a\u96c6\u7fa4\u5bb9\u5668\u7ec4\u6570\u91cf\u603b\u548c\u5b58\u5728\u6b63\u6bd4\u5173\u7cfb\u3002

                        • \u8bf7\u8054\u7cfb\u5e73\u53f0\u7ba1\u7406\u5458\u6839\u636e\u96c6\u7fa4\u89c4\u6a21\u8c03\u6574 vmstorage \u7684\u78c1\u76d8\u5bb9\u91cf\uff0c\u53c2\u9605 vmstorage \u78c1\u76d8\u5bb9\u91cf\u89c4\u5212
                        • \u6839\u636e\u591a\u96c6\u7fa4\u89c4\u6a21\u8c03\u6574 vmstorage \u78c1\u76d8\uff0c\u53c2\u9605 vmstorge \u78c1\u76d8\u6269\u5bb9
                      "},{"location":"end-user/insight/collection-manag/collection-manag.html","title":"\u91c7\u96c6\u7ba1\u7406","text":"

                      \u91c7\u96c6\u7ba1\u7406 \u4e3b\u8981\u662f\u96c6\u4e2d\u7ba1\u7406\u3001\u5c55\u793a\u96c6\u7fa4\u5b89\u88c5\u91c7\u96c6\u63d2\u4ef6 insight-agent \u7684\u5165\u53e3\uff0c\u5e2e\u52a9\u7528\u6237\u5feb\u901f\u7684\u67e5\u770b\u96c6\u7fa4\u91c7\u96c6\u63d2\u4ef6\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u5e76\u63d0\u4f9b\u4e86\u5feb\u6377\u5165\u53e3\u914d\u7f6e\u91c7\u96c6\u89c4\u5219\u3002

                      \u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\uff1a

                      1. \u70b9\u51fb\u5de6\u4e0a\u89d2\u7684\uff0c\u9009\u62e9 \u53ef\u89c2\u6d4b\u6027 \u3002

                      2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u91c7\u96c6\u7ba1\u7406 \uff0c\u67e5\u770b\u5168\u90e8\u96c6\u7fa4\u91c7\u96c6\u63d2\u4ef6\u7684\u72b6\u6001\u3002

                      3. \u96c6\u7fa4\u63a5\u5165 insight-agent \u4e14\u5904\u4e8e\u8fd0\u884c\u4e2d\u72b6\u6001\u65f6\uff0c\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u540d\u79f0\u8fdb\u5165\u8be6\u60c5\u3002

                      4. \u5728 \u670d\u52a1\u76d1\u63a7 \u9875\u7b7e\u4e2d\uff0c\u70b9\u51fb\u5feb\u6377\u94fe\u63a5\u8df3\u8f6c\u5230 \u5bb9\u5668\u7ba1\u7406 -> \u81ea\u5b9a\u4e49\u8d44\u6e90 \u6dfb\u52a0\u670d\u52a1\u53d1\u73b0\u89c4\u5219\u3002

                      "},{"location":"end-user/insight/collection-manag/metric-collect.html","title":"\u6307\u6807\u6293\u53d6\u65b9\u5f0f","text":"

                      Prometheus \u4e3b\u8981\u901a\u8fc7 Pull \u7684\u65b9\u5f0f\u6765\u6293\u53d6\u76ee\u6807\u670d\u52a1\u66b4\u9732\u51fa\u6765\u7684\u76d1\u63a7\u63a5\u53e3\uff0c\u56e0\u6b64\u9700\u8981\u914d\u7f6e\u5bf9\u5e94\u7684\u6293\u53d6\u4efb\u52a1\u6765\u8bf7\u6c42\u76d1\u63a7\u6570\u636e\u5e76\u5199\u5165\u5230 Prometheus \u63d0\u4f9b\u7684\u5b58\u50a8\u4e2d\uff0c\u76ee\u524d Prometheus \u670d\u52a1\u63d0\u4f9b\u4e86\u5982\u4e0b\u51e0\u4e2a\u4efb\u52a1\u7684\u914d\u7f6e\uff1a

                      • \u539f\u751f Job \u914d\u7f6e\uff1a\u63d0\u4f9b Prometheus \u539f\u751f\u6293\u53d6 Job \u7684\u914d\u7f6e\u3002
                      • Pod Monitor\uff1a\u5728 K8S \u751f\u6001\u4e0b\uff0c\u57fa\u4e8e Prometheus Operator \u6765\u6293\u53d6 Pod \u4e0a\u5bf9\u5e94\u7684\u76d1\u63a7\u6570\u636e\u3002
                      • Service Monitor\uff1a\u5728 K8S \u751f\u6001\u4e0b\uff0c\u57fa\u4e8e Prometheus Operator \u6765\u6293\u53d6 Service \u5bf9\u5e94 Endpoints \u4e0a\u7684\u76d1\u63a7\u6570\u636e\u3002

                      Note

                      [ ] \u4e2d\u7684\u914d\u7f6e\u9879\u4e3a\u53ef\u9009\u3002

                      "},{"location":"end-user/insight/collection-manag/metric-collect.html#job","title":"\u539f\u751f Job \u914d\u7f6e","text":"

                      \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

                      # \u6293\u53d6\u4efb\u52a1\u540d\u79f0\uff0c\u540c\u65f6\u4f1a\u5728\u5bf9\u5e94\u6293\u53d6\u7684\u6307\u6807\u4e2d\u52a0\u4e86\u4e00\u4e2a label(job=job_name)\njob_name: <job_name>\n\n# \u6293\u53d6\u4efb\u52a1\u65f6\u95f4\u95f4\u9694\n[ scrape_interval: <duration> | default = <global_config.scrape_interval> ]\n\n# \u6293\u53d6\u8bf7\u6c42\u8d85\u65f6\u65f6\u95f4\n[ scrape_timeout: <duration> | default = <global_config.scrape_timeout> ]\n\n# \u6293\u53d6\u4efb\u52a1\u8bf7\u6c42 URI \u8def\u5f84\n[ metrics_path: <path> | default = /metrics ]\n\n# \u89e3\u51b3\u5f53\u6293\u53d6\u7684 label \u4e0e\u540e\u7aef Prometheus \u6dfb\u52a0 label \u51b2\u7a81\u65f6\u7684\u5904\u7406\u3002\n# true: \u4fdd\u7559\u6293\u53d6\u5230\u7684 label\uff0c\u5ffd\u7565\u4e0e\u540e\u7aef Prometheus \u51b2\u7a81\u7684 label\uff1b\n# false: \u5bf9\u51b2\u7a81\u7684 label\uff0c\u628a\u6293\u53d6\u7684 label \u524d\u52a0\u4e0a exported_<original-label>\uff0c\u6dfb\u52a0\u540e\u7aef Prometheus \u589e\u52a0\u7684 label\uff1b\n[ honor_labels: <boolean> | default = false ]\n\n# \u662f\u5426\u4f7f\u7528\u6293\u53d6\u5230 target \u4e0a\u4ea7\u751f\u7684\u65f6\u95f4\u3002\n# true: \u5982\u679c target \u4e2d\u6709\u65f6\u95f4\uff0c\u4f7f\u7528 target \u4e0a\u7684\u65f6\u95f4\uff1b\n# false: \u76f4\u63a5\u5ffd\u7565 target \u4e0a\u7684\u65f6\u95f4\uff1b\n[ honor_timestamps: <boolean> | default = true ]\n\n# \u6293\u53d6\u534f\u8bae: http \u6216\u8005 https\n[ scheme: <scheme> | default = http ]\n\n# \u6293\u53d6\u8bf7\u6c42\u5bf9\u5e94 URL \u53c2\u6570\nparams:\n  [ <string>: [<string>, ...] ]\n\n# \u901a\u8fc7 basic auth \u8bbe\u7f6e\u6293\u53d6\u8bf7\u6c42\u5934\u4e2d `Authorization` \u7684\u503c\uff0cpassword/password_file \u4e92\u65a5\uff0c\u4f18\u5148\u53d6 password_file \u91cc\u9762\u7684\u503c\u3002\nbasic_auth:\n  [ username: <string> ]\n  [ password: <secret> ]\n  [ password_file: <string> ]\n\n# \u901a\u8fc7 bearer token \u8bbe\u7f6e\u6293\u53d6\u8bf7\u6c42\u5934\u4e2d `Authorization` bearer_token/bearer_token_file \u4e92\u65a5\uff0c\u4f18\u5148\u53d6 bearer_token \u91cc\u9762\u7684\u503c\u3002\n[ bearer_token: <secret> ]\n\n# \u901a\u8fc7 bearer token \u8bbe\u7f6e\u6293\u53d6\u8bf7\u6c42\u5934\u4e2d `Authorization` bearer_token/bearer_token_file \u4e92\u65a5\uff0c\u4f18\u5148\u53d6 bearer_token \u91cc\u9762\u7684\u503c\u3002\n[ bearer_token_file: <filename> ]\n\n# \u6293\u53d6\u8fde\u63a5\u662f\u5426\u901a\u8fc7 TLS \u5b89\u5168\u901a\u9053\uff0c\u914d\u7f6e\u5bf9\u5e94\u7684 TLS \u53c2\u6570\ntls_config:\n  [ <tls_config> ]\n\n# \u901a\u8fc7\u4ee3\u7406\u670d\u52a1\u6765\u6293\u53d6 target \u4e0a\u7684\u6307\u6807\uff0c\u586b\u5199\u5bf9\u5e94\u7684\u4ee3\u7406\u670d\u52a1\u5730\u5740\u3002\n[ proxy_url: <string> ]\n\n# \u901a\u8fc7\u9759\u6001\u914d\u7f6e\u6765\u6307\u5b9a target\uff0c\u8be6\u89c1\u4e0b\u9762\u7684\u8bf4\u660e\u3002\nstatic_configs:\n  [ - <static_config> ... ]\n\n# CVM \u670d\u52a1\u53d1\u73b0\u914d\u7f6e\uff0c\u8be6\u89c1\u4e0b\u9762\u7684\u8bf4\u660e\u3002\ncvm_sd_configs:\n  [ - <cvm_sd_config> ... ]\n\n# \u5728\u6293\u53d6\u6570\u636e\u4e4b\u540e\uff0c\u628a target \u4e0a\u5bf9\u5e94\u7684 label \u901a\u8fc7 relabel \u7684\u673a\u5236\u8fdb\u884c\u6539\u5199\uff0c\u6309\u987a\u5e8f\u6267\u884c\u591a\u4e2a relabel \u89c4\u5219\u3002\n# relabel_config \u8be6\u89c1\u4e0b\u6587\u8bf4\u660e\u3002\nrelabel_configs:\n  [ - <relabel_config> ... ]\n\n# \u6570\u636e\u6293\u53d6\u5b8c\u6210\u5199\u5165\u4e4b\u524d\uff0c\u901a\u8fc7 relabel \u673a\u5236\u8fdb\u884c\u6539\u5199 label \u5bf9\u5e94\u7684\u503c\uff0c\u6309\u987a\u5e8f\u6267\u884c\u591a\u4e2a relabel \u89c4\u5219\u3002\n# relabel_config \u8be6\u89c1\u4e0b\u6587\u8bf4\u660e\u3002\nmetric_relabel_configs:\n  [ - <relabel_config> ... ]\n\n# \u4e00\u6b21\u6293\u53d6\u6570\u636e\u70b9\u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n[ sample_limit: <int> | default = 0 ]\n\n# \u4e00\u6b21\u6293\u53d6 Target \u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n[ target_limit: <int> | default = 0 ]\n
                      "},{"location":"end-user/insight/collection-manag/metric-collect.html#pod-monitor","title":"Pod Monitor","text":"

                      \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

                      # Prometheus Operator CRD \u7248\u672c\napiVersion: monitoring.coreos.com/v1\n# \u5bf9\u5e94 K8S \u7684\u8d44\u6e90\u7c7b\u578b\uff0c\u8fd9\u91cc\u9762 Pod Monitor\nkind: PodMonitor\n# \u5bf9\u5e94 K8S \u7684 Metadata\uff0c\u8fd9\u91cc\u53ea\u7528\u5173\u5fc3 name\uff0c\u5982\u679c\u6ca1\u6709\u6307\u5b9a jobLabel\uff0c\u5bf9\u5e94\u6293\u53d6\u6307\u6807 label \u4e2d job \u7684\u503c\u4e3a <namespace>/<name>\nmetadata:\n  name: redis-exporter # \u586b\u5199\u4e00\u4e2a\u552f\u4e00\u540d\u79f0\n  namespace: cm-prometheus  # namespace \u56fa\u5b9a\uff0c\u4e0d\u9700\u8981\u4fee\u6539\n# \u63cf\u8ff0\u6293\u53d6\u76ee\u6807 Pod \u7684\u9009\u53d6\u53ca\u6293\u53d6\u4efb\u52a1\u7684\u914d\u7f6e\n  label:\n    operator.insight.io/managed-by: insight # Insight \u7ba1\u7406\u7684\u6807\u7b7e\u6807\u8bc6\nspec:\n  # \u586b\u5199\u5bf9\u5e94 Pod \u7684 label\uff0cpod monitor \u4f1a\u53d6\u5bf9\u5e94\u7684\u503c\u4f5c\u4e3a job label \u7684\u503c\u3002\n  # \u5982\u679c\u67e5\u770b\u7684\u662f Pod Yaml\uff0c\u53d6 pod.metadata.labels \u4e2d\u7684\u503c\u3002\n  # \u5982\u679c\u67e5\u770b\u7684\u662f Deployment/Daemonset/Statefulset\uff0c\u53d6 spec.template.metadata.labels\u3002\n  [ jobLabel: string ]\n  # \u628a\u5bf9\u5e94 Pod \u4e0a\u7684 Label \u6dfb\u52a0\u5230 Target \u7684 Label \u4e2d\n  [ podTargetLabels: []string ]\n  # \u4e00\u6b21\u6293\u53d6\u6570\u636e\u70b9\u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n  [ sampleLimit: uint64 ]\n  # \u4e00\u6b21\u6293\u53d6 Target \u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n  [ targetLimit: uint64 ]\n  # \u914d\u7f6e\u9700\u8981\u6293\u53d6\u66b4\u9732\u7684 Prometheus HTTP \u63a5\u53e3\uff0c\u53ef\u4ee5\u914d\u7f6e\u591a\u4e2a Endpoint\n  podMetricsEndpoints:\n  [ - <endpoint_config> ... ] # \u8be6\u89c1\u4e0b\u9762 endpoint \u8bf4\u660e\n  # \u9009\u62e9\u8981\u76d1\u63a7 Pod \u6240\u5728\u7684 namespace\uff0c\u4e0d\u586b\u4e3a\u9009\u53d6\u6240\u6709 namespace\n  [ namespaceSelector: ]\n    # \u662f\u5426\u9009\u53d6\u6240\u6709 namespace\n    [ any: bool ]\n    # \u9700\u8981\u9009\u53d6 namespace \u5217\u8868\n    [ matchNames: []string ]\n  # \u586b\u5199\u8981\u76d1\u63a7 Pod \u7684 Label \u503c\uff0c\u4ee5\u5b9a\u4f4d\u76ee\u6807 Pod [K8S metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)\n  selector:\n    [ matchExpressions: array ]\n      [ example: - {key: tier, operator: In, values: [cache]} ]\n    [ matchLabels: object ]\n      [ example: k8s-app: redis-exporter ]\n
                      "},{"location":"end-user/insight/collection-manag/metric-collect.html#1","title":"\u4e3e\u4f8b 1","text":"
                      apiVersion: monitoring.coreos.com/v1\nkind: PodMonitor\nmetadata:\n  name: redis-exporter # \u586b\u5199\u4e00\u4e2a\u552f\u4e00\u540d\u79f0\n  namespace: cm-prometheus # namespace \u56fa\u5b9a\uff0c\u4e0d\u8981\u4fee\u6539\n  label:\n    operator.insight.io/managed-by: insight  # Insight \u7ba1\u7406\u7684\u6807\u7b7e\u6807\u8bc6\uff0c\u5fc5\u586b\u3002\nspec:\n  podMetricsEndpoints:\n    - interval: 30s\n      port: metric-port # \u586b\u5199 pod yaml \u4e2d Prometheus Exporter \u5bf9\u5e94\u7684 Port \u7684 Name\n      path: /metrics # \u586b\u5199 Prometheus Exporter \u5bf9\u5e94\u7684 Path \u7684\u503c\uff0c\u4e0d\u586b\u9ed8\u8ba4 /metrics\n      relabelings:\n        - action: replace\n          sourceLabels:\n            - instance\n          regex: (.*)\n          targetLabel: instance\n          replacement: \"crs-xxxxxx\" # \u8c03\u6574\u6210\u5bf9\u5e94\u7684 Redis \u5b9e\u4f8b ID\n        - action: replace\n          sourceLabels:\n            - instance\n          regex: (.*)\n          targetLabel: ip\n          replacement: \"1.x.x.x\" # \u8c03\u6574\u6210\u5bf9\u5e94\u7684 Redis \u5b9e\u4f8b IP\n  namespaceSelector: # \u9009\u62e9\u8981\u76d1\u63a7 Pod \u6240\u5728\u7684 namespace\n    matchNames:\n      - redis-test\n  selector: # \u586b\u5199\u8981\u76d1\u63a7 Pod \u7684 Label \u503c\uff0c\u4ee5\u5b9a\u4f4d\u76ee\u6807 pod\n    matchLabels:\n      k8s-app: redis-exporter\n
                      "},{"location":"end-user/insight/collection-manag/metric-collect.html#2","title":"\u4e3e\u4f8b 2","text":"
                      job_name: prometheus\nscrape_interval: 30s\nstatic_configs:\n- targets:\n  - 127.0.0.1:9090\n
                      "},{"location":"end-user/insight/collection-manag/metric-collect.html#service-monitor","title":"Service Monitor","text":"

                      \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

                      # Prometheus Operator CRD \u7248\u672c\napiVersion: monitoring.coreos.com/v1\n# \u5bf9\u5e94 K8S \u7684\u8d44\u6e90\u7c7b\u578b\uff0c\u8fd9\u91cc\u9762 Service Monitor\nkind: ServiceMonitor\n# \u5bf9\u5e94 K8S \u7684 Metadata\uff0c\u8fd9\u91cc\u53ea\u7528\u5173\u5fc3 name\uff0c\u5982\u679c\u6ca1\u6709\u6307\u5b9a jobLabel\uff0c\u5bf9\u5e94\u6293\u53d6\u6307\u6807 label \u4e2d job \u7684\u503c\u4e3a Service \u7684\u540d\u79f0\u3002\nmetadata:\n  name: redis-exporter # \u586b\u5199\u4e00\u4e2a\u552f\u4e00\u540d\u79f0\n  namespace: cm-prometheus  # namespace \u56fa\u5b9a\uff0c\u4e0d\u9700\u8981\u4fee\u6539\n# \u63cf\u8ff0\u6293\u53d6\u76ee\u6807 Pod \u7684\u9009\u53d6\u53ca\u6293\u53d6\u4efb\u52a1\u7684\u914d\u7f6e\n  label:\n    operator.insight.io/managed-by: insight  # Insight \u7ba1\u7406\u7684\u6807\u7b7e\u6807\u8bc6\uff0c\u5fc5\u586b\u3002\nspec:\n  # \u586b\u5199\u5bf9\u5e94 Pod \u7684 label(metadata/labels)\uff0cservice monitor \u4f1a\u53d6\u5bf9\u5e94\u7684\u503c\u4f5c\u4e3a job label \u7684\u503c\n  [ jobLabel: string ]\n  # \u628a\u5bf9\u5e94 service \u4e0a\u7684 Label \u6dfb\u52a0\u5230 Target \u7684 Label \u4e2d\n  [ targetLabels: []string ]\n  # \u628a\u5bf9\u5e94 Pod \u4e0a\u7684 Label \u6dfb\u52a0\u5230 Target \u7684 Label \u4e2d\n  [ podTargetLabels: []string ]\n  # \u4e00\u6b21\u6293\u53d6\u6570\u636e\u70b9\u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n  [ sampleLimit: uint64 ]\n  # \u4e00\u6b21\u6293\u53d6 Target \u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n  [ targetLimit: uint64 ]\n  # \u914d\u7f6e\u9700\u8981\u6293\u53d6\u66b4\u9732\u7684 Prometheus HTTP \u63a5\u53e3\uff0c\u53ef\u4ee5\u914d\u7f6e\u591a\u4e2a Endpoint\n  endpoints:\n  [ - <endpoint_config> ... ] # \u8be6\u89c1\u4e0b\u9762 endpoint \u8bf4\u660e\n  # \u9009\u62e9\u8981\u76d1\u63a7 Pod \u6240\u5728\u7684 namespace\uff0c\u4e0d\u586b\u4e3a\u9009\u53d6\u6240\u6709 namespace\n  [ namespaceSelector: ]\n    # \u662f\u5426\u9009\u53d6\u6240\u6709 namespace\n    [ any: bool ]\n    # \u9700\u8981\u9009\u53d6 namespace \u5217\u8868\n    [ matchNames: []string ]\n  # \u586b\u5199\u8981\u76d1\u63a7 Pod \u7684 Label \u503c\uff0c\u4ee5\u5b9a\u4f4d\u76ee\u6807 Pod [K8S metav1.LabelSelector](https://v1-17.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#labelselector-v1-meta)\n  selector:\n    [ matchExpressions: array ]\n      [ example: - {key: tier, operator: In, values: [cache]} ]\n    [ matchLabels: object ]\n      [ example: k8s-app: redis-exporter ]\n
                      "},{"location":"end-user/insight/collection-manag/metric-collect.html#_2","title":"\u4e3e\u4f8b","text":"
                      apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: go-demo # \u586b\u5199\u4e00\u4e2a\u552f\u4e00\u540d\u79f0\n  namespace: cm-prometheus # namespace \u56fa\u5b9a\uff0c\u4e0d\u8981\u4fee\u6539\n  label:\n    operator.insight.io/managed-by: insight  # Insight \u7ba1\u7406\u7684\u6807\u7b7e\u6807\u8bc6\uff0c\u5fc5\u586b\u3002\nspec:\n  endpoints:\n    - interval: 30s\n      # \u586b\u5199 service yaml \u4e2d Prometheus Exporter \u5bf9\u5e94\u7684 Port \u7684 Name\n      port: 8080-8080-tcp\n      # \u586b\u5199 Prometheus Exporter \u5bf9\u5e94\u7684 Path \u7684\u503c\uff0c\u4e0d\u586b\u9ed8\u8ba4 /metrics\n      path: /metrics\n      relabelings:\n        # ** \u5fc5\u987b\u8981\u6709\u4e00\u4e2a label \u4e3a application\uff0c\u8fd9\u91cc\u5047\u8bbe k8s \u6709\u4e00\u4e2a label \u4e3a app\uff0c\n        # \u6211\u4eec\u901a\u8fc7 relabel \u7684 replace \u52a8\u4f5c\u628a\u5b83\u66ff\u6362\u6210\u4e86 application\n        - action: replace\n          sourceLabels: [__meta_kubernetes_pod_label_app]\n          targetLabel: application\n  # \u9009\u62e9\u8981\u76d1\u63a7 service \u6240\u5728\u7684 namespace\n  namespaceSelector:\n    matchNames:\n      - golang-demo\n  # \u586b\u5199\u8981\u76d1\u63a7 service \u7684 Label \u503c\uff0c\u4ee5\u5b9a\u4f4d\u76ee\u6807 service\n  selector:\n    matchLabels:\n      app: golang-app-demo\n
                      "},{"location":"end-user/insight/collection-manag/metric-collect.html#endpoint_config","title":"endpoint_config","text":"

                      \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

                      # \u5bf9\u5e94 port \u7684\u540d\u79f0\uff0c\u8fd9\u91cc\u9700\u8981\u6ce8\u610f\u4e0d\u662f\u5bf9\u5e94\u7684\u7aef\u53e3\uff0c\u9ed8\u8ba4\uff1a80\uff0c\u5bf9\u5e94\u7684\u53d6\u503c\u5982\u4e0b\uff1a\n# ServiceMonitor: \u5bf9\u5e94 Service>spec/ports/name;\n# PodMonitor: \u8bf4\u660e\u5982\u4e0b\uff1a\n#   \u5982\u679c\u67e5\u770b\u7684\u662f Pod Yaml\uff0c\u53d6 pod.spec.containers.ports.name \u4e2d\u7684\u503c\u3002\n#   \u5982\u679c\u67e5\u770b\u7684\u662f Deployment/Daemonset/Statefulset\uff0c\u53d6\u503c spec.template.spec.containers.ports.name\n[ port: string | default = 80]\n# \u6293\u53d6\u4efb\u52a1\u8bf7\u6c42 URI \u8def\u5f84\n[ path: string | default = /metrics ]\n# \u6293\u53d6\u534f\u8bae: http \u6216\u8005 https\n[ scheme: string | default = http]\n# \u6293\u53d6\u8bf7\u6c42\u5bf9\u5e94 URL \u53c2\u6570\n[ params: map[string][]string]\n# \u6293\u53d6\u4efb\u52a1\u95f4\u9694\u7684\u65f6\u95f4\n[ interval: string | default = 30s ]\n# \u6293\u53d6\u4efb\u52a1\u8d85\u65f6\n[ scrapeTimeout: string | default = 30s]\n# \u6293\u53d6\u8fde\u63a5\u662f\u5426\u901a\u8fc7 TLS \u5b89\u5168\u901a\u9053\uff0c\u914d\u7f6e\u5bf9\u5e94\u7684 TLS \u53c2\u6570\n[ tlsConfig: TLSConfig ]\n# \u901a\u8fc7\u5bf9\u5e94\u7684\u6587\u4ef6\u8bfb\u53d6 bearer token \u5bf9\u5e94\u7684\u503c\uff0c\u653e\u5230\u6293\u53d6\u4efb\u52a1\u7684 header \u4e2d\n[ bearerTokenFile: string ]\n# \u901a\u8fc7\u5bf9\u5e94\u7684 K8S secret key \u8bfb\u53d6\u5bf9\u5e94\u7684 bearer token\uff0c\u6ce8\u610f secret namespace \u9700\u8981\u548c PodMonitor/ServiceMonitor \u76f8\u540c\n[ bearerTokenSecret: string ]\n# \u89e3\u51b3\u5f53\u6293\u53d6\u7684 label \u4e0e\u540e\u7aef Prometheus \u6dfb\u52a0 label \u51b2\u7a81\u65f6\u7684\u5904\u7406\u3002\n# true: \u4fdd\u7559\u6293\u53d6\u5230\u7684 label\uff0c\u5ffd\u7565\u4e0e\u540e\u7aef Prometheus \u51b2\u7a81\u7684 label\uff1b\n# false: \u5bf9\u51b2\u7a81\u7684 label\uff0c\u628a\u6293\u53d6\u7684 label \u524d\u52a0\u4e0a exported_<original-label>\uff0c\u6dfb\u52a0\u540e\u7aef Prometheus \u589e\u52a0\u7684 label\uff1b\n[ honorLabels: bool | default = false ]\n# \u662f\u5426\u4f7f\u7528\u6293\u53d6\u5230 target \u4e0a\u4ea7\u751f\u7684\u65f6\u95f4\u3002\n# true: \u5982\u679c target \u4e2d\u6709\u65f6\u95f4\uff0c\u4f7f\u7528 target \u4e0a\u7684\u65f6\u95f4\uff1b\n# false: \u76f4\u63a5\u5ffd\u7565 target \u4e0a\u7684\u65f6\u95f4\uff1b\n[ honorTimestamps: bool | default = true ]\n# basic auth \u7684\u8ba4\u8bc1\u4fe1\u606f\uff0cusername/password \u586b\u5199\u5bf9\u5e94 K8S secret key \u7684\u503c\uff0c\u6ce8\u610f secret namespace \u9700\u8981\u548c PodMonitor/ServiceMonitor \u76f8\u540c\u3002\n[ basicAuth: BasicAuth ]\n# \u901a\u8fc7\u4ee3\u7406\u670d\u52a1\u6765\u6293\u53d6 target \u4e0a\u7684\u6307\u6807\uff0c\u586b\u5199\u5bf9\u5e94\u7684\u4ee3\u7406\u670d\u52a1\u5730\u5740\n[ proxyUrl: string ]\n# \u5728\u6293\u53d6\u6570\u636e\u4e4b\u540e\uff0c\u628a target \u4e0a\u5bf9\u5e94\u7684 label \u901a\u8fc7 relabel \u7684\u673a\u5236\u8fdb\u884c\u6539\u5199\uff0c\u6309\u987a\u5e8f\u6267\u884c\u591a\u4e2a relabel \u89c4\u5219\u3002\n# relabel_config \u8be6\u89c1\u4e0b\u6587\u8bf4\u660e\nrelabelings:\n[ - <relabel_config> ...]\n# \u6570\u636e\u6293\u53d6\u5b8c\u6210\u5199\u5165\u4e4b\u524d\uff0c\u901a\u8fc7 relabel \u673a\u5236\u8fdb\u884c\u6539\u5199 label \u5bf9\u5e94\u7684\u503c\uff0c\u6309\u987a\u5e8f\u6267\u884c\u591a\u4e2a relabel \u89c4\u5219\u3002\n# relabel_config \u8be6\u89c1\u4e0b\u6587\u8bf4\u660e\nmetricRelabelings:\n[ - <relabel_config> ...]\n
                      "},{"location":"end-user/insight/collection-manag/metric-collect.html#relabel_config","title":"relabel_config","text":"

                      \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

                      # \u4ece\u539f\u59cb labels \u4e2d\u53d6\u54ea\u4e9b label \u7684\u503c\u8fdb\u884c relabel\uff0c\u53d6\u51fa\u6765\u7684\u503c\u901a\u8fc7 separator \u4e2d\u7684\u5b9a\u4e49\u8fdb\u884c\u5b57\u7b26\u62fc\u63a5\u3002\n# \u5982\u679c\u662f PodMonitor/ServiceMonitor \u5bf9\u5e94\u7684\u914d\u7f6e\u9879\u4e3a sourceLabels\n[ source_labels: '[' <labelname> [, ...] ']' ]\n# \u5b9a\u4e49\u9700\u8981 relabel \u7684 label \u503c\u62fc\u63a5\u7684\u5b57\u7b26\uff0c\u9ed8\u8ba4\u4e3a ';'\n[ separator: <string> | default = ; ]\n\n# action \u4e3a replace/hashmod \u65f6\uff0c\u901a\u8fc7 target_label \u6765\u6307\u5b9a\u5bf9\u5e94 label name\u3002\n# \u5982\u679c\u662f PodMonitor/ServiceMonitor \u5bf9\u5e94\u7684\u914d\u7f6e\u9879\u4e3a targetLabel\n[ target_label: <labelname> ]\n\n# \u9700\u8981\u5bf9 source labels \u5bf9\u5e94\u503c\u8fdb\u884c\u6b63\u5219\u5339\u914d\u7684\u8868\u8fbe\u5f0f\n[ regex: <regex> | default = (.*) ]\n\n# action \u4e3a hashmod \u65f6\u7528\u5230\uff0c\u6839\u636e source label \u5bf9\u5e94\u503c md5 \u53d6\u6a21\u503c\n[ modulus: <int> ]\n\n# action \u4e3a replace \u7684\u65f6\u5019\uff0c\u901a\u8fc7 replacement \u6765\u5b9a\u4e49\u5f53 regex \u5339\u914d\u4e4b\u540e\u9700\u8981\u66ff\u6362\u7684\u8868\u8fbe\u5f0f\uff0c\u53ef\u4ee5\u7ed3\u5408 regex \u6b63\u89c4\u5219\u8868\u8fbe\u5f0f\u66ff\u6362\n[ replacement: <string> | default = $1 ]\n\n# \u57fa\u4e8e regex \u5339\u914d\u5230\u7684\u503c\u8fdb\u884c\u76f8\u5173\u7684\u64cd\u4f5c\uff0c\u5bf9\u5e94\u7684 action \u5982\u4e0b\uff0c\u9ed8\u8ba4\u4e3a replace\uff1a\n# replace: \u5982\u679c regex \u5339\u914d\u5230\uff0c\u901a\u8fc7 replacement \u4e2d\u5b9a\u4e49\u7684\u503c\u66ff\u6362\u76f8\u5e94\u7684\u503c\uff0c\u5e76\u901a\u8fc7 target_label \u8bbe\u503c\u5e76\u6dfb\u52a0\u76f8\u5e94\u7684 label\n# keep: \u5982\u679c regex \u6ca1\u6709\u5339\u914d\u5230\uff0c\u4e22\u5f03\n# drop: \u5982\u679c regex \u5339\u914d\u5230\uff0c\u4e22\u5f03\n# hashmod: \u901a\u8fc7 moduels \u6307\u5b9a\u7684\u503c\u628a source label \u5bf9\u5e94\u7684 md5 \u503c\u53d6\u6a21\n# \u5e76\u6dfb\u52a0\u4e00\u4e2a\u65b0\u7684 label\uff0clabel name \u901a\u8fc7 target_label \u6307\u5b9a\n# labelmap: \u5982\u679c regex \u5339\u914d\u5230\uff0c\u4f7f\u7528 replacement \u66ff\u6362\u5bf9\u5c31\u7684 label name\n# labeldrop: \u5982\u679c regex \u5339\u914d\u5230\uff0c\u5220\u9664\u5bf9\u5e94\u7684 label\n# labelkeep: \u5982\u679c regex \u6ca1\u6709\u5339\u914d\u5230\uff0c\u5220\u9664\u5bf9\u5e94\u7684 label\n[ action: <relabel_action> | default = replace ]\n
                      "},{"location":"end-user/insight/collection-manag/probe-module.html","title":"\u81ea\u5b9a\u4e49\u63a2\u6d4b\u65b9\u5f0f","text":"

                      Insight \u4f7f\u7528 Prometheus \u5b98\u65b9\u63d0\u4f9b\u7684 Blackbox Exporter \u4f5c\u4e3a\u9ed1\u76d2\u76d1\u63a7\u89e3\u51b3\u65b9\u6848\uff0c\u53ef\u4ee5\u901a\u8fc7 HTTP\u3001HTTPS\u3001DNS\u3001ICMP\u3001TCP \u548c gRPC \u65b9\u5f0f\u5bf9\u76ee\u6807\u5b9e\u4f8b\u8fdb\u884c\u68c0\u6d4b\u3002\u53ef\u7528\u4e8e\u4ee5\u4e0b\u4f7f\u7528\u573a\u666f\uff1a

                      • HTTP/HTTPS\uff1aURL/API\u53ef\u7528\u6027\u68c0\u6d4b
                      • ICMP\uff1a\u4e3b\u673a\u5b58\u6d3b\u68c0\u6d4b
                      • TCP\uff1a\u7aef\u53e3\u5b58\u6d3b\u68c0\u6d4b
                      • DNS\uff1a\u57df\u540d\u89e3\u6790

                      \u5728\u672c\u6587\u4e2d\uff0c\u6211\u4eec\u5c06\u4ecb\u7ecd\u5982\u4f55\u5728\u5df2\u6709\u7684 Blackbox ConfigMap \u4e2d\u914d\u7f6e\u81ea\u5b9a\u4e49\u7684\u63a2\u6d4b\u65b9\u5f0f\u3002

                      Insight \u9ed8\u8ba4\u672a\u5f00\u542f ICMP \u63a2\u6d4b\u65b9\u5f0f\uff0c\u56e0\u4e3a ICMP \u9700\u8981\u66f4\u9ad8\u6743\u9650\uff0c\u56e0\u6b64\uff0c\u6211\u4eec\u5c06\u4ee5 ICMP \u548c HTTP \u63a2\u6d4b\u65b9\u5f0f\u4f5c\u4e3a\u793a\u4f8b\uff0c\u5c55\u793a\u5982\u4f55\u4fee\u6539 ConfigMap \u4ee5\u5b9e\u73b0\u81ea\u5b9a\u4e49\u7684 ICMP \u548c HTTP \u63a2\u6d4b\u3002

                      "},{"location":"end-user/insight/collection-manag/probe-module.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb\u8fdb\u5165\u76ee\u6807\u96c6\u7fa4\u7684\u8be6\u60c5\uff1b
                      2. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\uff0c\u9009\u62e9 \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u914d\u7f6e\u9879 \uff1b
                      3. \u627e\u5230\u540d\u4e3a insight-agent-prometheus-blackbox-exporter \u7684\u914d\u7f6e\u9879\uff0c\u70b9\u51fb \u7f16\u8f91 YAML\uff1b

                        \u5728 modules \u4e0b\u6dfb\u52a0\u81ea\u5b9a\u4e49\u63a2\u6d4b\u65b9\u5f0f\uff1a

                      HTTP \u63a2\u6d4bICMP \u63a2\u6d4b
                      module:\n  http_2xx:\n    prober: http\n    timeout: 5s\n    http:\n      valid_http_versions: [HTTP/1.1, HTTP/2]\n      valid_status_codes: []  # Defaults to 2xx\n      method: GET\n

                      module:\n  ICMP: # ICMP \u63a2\u6d4b\u914d\u7f6e\u7684\u793a\u4f8b\n    prober: icmp\n    timeout: 5s\n    icmp:\n      preferred_ip_protocol: ip4\nicmp_example: # ICMP \u63a2\u6d4b\u914d\u7f6e\u7684\u793a\u4f8b 2\n  prober: icmp\n  timeout: 5s\n  icmp:\n    preferred_ip_protocol: \"ip4\"\n    source_ip_address: \"127.0.0.1\"\n
                      \u7531\u4e8e ICMP \u9700\u8981\u66f4\u9ad8\u6743\u9650\uff0c\u56e0\u6b64\uff0c\u6211\u4eec\u8fd8\u9700\u8981\u63d0\u5347 Pod \u6743\u9650\uff0c\u5426\u5219\u4f1a\u51fa\u73b0 operation not permitted \u7684\u9519\u8bef\u3002\u6709\u4ee5\u4e0b\u4e24\u79cd\u65b9\u5f0f\u63d0\u5347\u6743\u9650\uff1a

                      • \u65b9\u5f0f\u4e00\uff1a \u76f4\u63a5\u7f16\u8f91 BlackBox Exporter \u90e8\u7f72\u6587\u4ef6\u5f00\u542f

                        apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: insight-agent-prometheus-blackbox-exporter\n  namespace: insight-system\nspec:\n  template:\n    spec:\n      containers:\n        - name: blackbox-exporter\n          image: # ... (image, args, ports \u7b49\u4fdd\u6301\u4e0d\u53d8)\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            allowPrivilegeEscalation: false\n            capabilities:\n              add:\n              - NET_RAW\n              drop:\n              - ALL\n            readOnlyRootFilesystem: true\n            runAsGroup: 0\n            runAsNonRoot: false\n            runAsUser: 0\n
                      • \u65b9\u5f0f\u4e8c\uff1a \u901a\u8fc7 Helm Upgrade \u65b9\u5f0f\u63d0\u6743

                        prometheus-blackbox-exporter:\n  enabled: true\n  securityContext:\n    runAsUser: 0\n    runAsGroup: 0\n    readOnlyRootFilesystem: true\n    runAsNonRoot: false\n    allowPrivilegeEscalation: false\n    capabilities:\n      add: [\"NET_RAW\"]\n

                      Info

                      \u66f4\u591a\u63a2\u6d4b\u65b9\u5f0f\u53ef\u53c2\u8003 blackbox_exporter Configuration\u3002

                      "},{"location":"end-user/insight/collection-manag/probe-module.html#_3","title":"\u5176\u4ed6\u53c2\u8003","text":"

                      \u4ee5\u4e0b YAML \u6587\u4ef6\u4e2d\u5305\u542b\u4e86 HTTP\u3001TCP\u3001SMTP\u3001ICMP\u3001DNS \u7b49\u591a\u79cd\u63a2\u6d4b\u65b9\u5f0f\uff0c\u53ef\u6839\u636e\u9700\u6c42\u81ea\u884c\u4fee\u6539 insight-agent-prometheus-blackbox-exporter \u7684\u914d\u7f6e\u6587\u4ef6\u3002

                      \u70b9\u51fb\u67e5\u770b\u5b8c\u6574\u7684 YAML \u6587\u4ef6
                      kind: ConfigMap\napiVersion: v1\nmetadata:\n  name: insight-agent-prometheus-blackbox-exporter\n  namespace: insight-system\n  labels:\n    app.kubernetes.io/instance: insight-agent\n    app.kubernetes.io/managed-by: Helm\n    app.kubernetes.io/name: prometheus-blackbox-exporter\n    app.kubernetes.io/version: v0.24.0\n    helm.sh/chart: prometheus-blackbox-exporter-8.8.0\n  annotations:\n    meta.helm.sh/release-name: insight-agent\n    meta.helm.sh/release-namespace: insight-system\ndata:\n  blackbox.yaml: |\n    modules:\n      HTTP_GET:\n        prober: http\n        timeout: 5s\n        http:\n          method: GET\n          valid_http_versions: [\"HTTP/1.1\", \"HTTP/2.0\"]\n          follow_redirects: true\n          preferred_ip_protocol: \"ip4\"\n      HTTP_POST:\n        prober: http\n        timeout: 5s\n        http:\n          method: POST\n          body_size_limit: 1MB\n      TCP:\n        prober: tcp\n        timeout: 5s\n      # \u9ed8\u8ba4\u672a\u5f00\u542f\uff1a\n      # ICMP:\n      #   prober: icmp\n      #   timeout: 5s\n      #   icmp:\n      #     preferred_ip_protocol: ip4\n      SSH:\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n          - expect: \"^SSH-2.0-\"\n      POP3S:\n        prober: tcp\n        tcp:\n          query_response:\n          - expect: \"^+OK\"\n          tls: true\n          tls_config:\n            insecure_skip_verify: false\n      http_2xx_example:               # http \u63a2\u6d4b\u793a\u4f8b\n        prober: http\n        timeout: 5s                   # \u63a2\u6d4b\u7684\u8d85\u65f6\u65f6\u95f4\n        http:\n          valid_http_versions: [\"HTTP/1.1\", \"HTTP/2.0\"]                   # \u8fd4\u56de\u4fe1\u606f\u4e2d\u7684 Version\uff0c\u4e00\u822c\u9ed8\u8ba4\u5373\u53ef\n          valid_status_codes: []  # Defaults to 2xx                       # \u6709\u6548\u7684\u8fd4\u56de\u7801\u8303\u56f4\uff0c\u5982\u679c\u8bf7\u6c42\u7684\u8fd4\u56de\u7801\u5728\u8be5\u8303\u56f4\u5185\uff0c\u89c6\u4e3a\u63a2\u6d4b\u6210\u529f\n          method: GET                 # \u8bf7\u6c42\u65b9\u6cd5\n          headers:                    # \u8bf7\u6c42\u7684\u5934\u90e8\n            Host: vhost.example.com\n            Accept-Language: en-US\n            Origin: example.com\n          no_follow_redirects: false  # \u662f\u5426\u5141\u8bb8\u91cd\u5b9a\u5411\n          fail_if_ssl: false   \n          fail_if_not_ssl: false\n          fail_if_body_matches_regexp:\n            - \"Could not connect to database\"\n          fail_if_body_not_matches_regexp:\n            - \"Download the latest version here\"\n          fail_if_header_matches: # Verifies that no cookies are set\n            - header: Set-Cookie\n              allow_missing: true\n              regexp: '.*'\n          fail_if_header_not_matches:\n            - header: Access-Control-Allow-Origin\n              regexp: '(\\*|example\\.com)'\n          tls_config:                  # \u9488\u5bf9 https \u8bf7\u6c42\u7684 tls \u7684\u914d\u7f6e\n            insecure_skip_verify: false\n          preferred_ip_protocol: \"ip4\" # defaults to \"ip6\"                 # \u9996\u9009\u7684 IP \u534f\u8bae\u7248\u672c\n          ip_protocol_fallback: false  # no fallback to \"ip6\"            \n      http_post_2xx:                   # \u5e26 Body \u7684 http \u63a2\u6d4b\u7684\u793a\u4f8b\n        prober: http\n        timeout: 5s\n        http:\n          method: POST                 # \u63a2\u6d4b\u7684\u8bf7\u6c42\u65b9\u6cd5\n          headers:\n            Content-Type: application/json\n          body: '{\"username\":\"admin\",\"password\":\"123456\"}'                   # \u63a2\u6d4b\u65f6\u643a\u5e26\u7684 body\n      http_basic_auth_example:         # \u5e26\u7528\u6237\u540d\u5bc6\u7801\u7684\u63a2\u6d4b\u7684\u793a\u4f8b\n        prober: http\n        timeout: 5s\n        http:\n          method: POST\n          headers:\n            Host: \"login.example.com\"\n          basic_auth:                  # \u63a2\u6d4b\u65f6\u8981\u52a0\u7684\u7528\u6237\u540d\u5bc6\u7801\n            username: \"username\"\n            password: \"mysecret\"\n      http_custom_ca_example:\n        prober: http\n        http:\n          method: GET\n          tls_config:                  # \u6307\u5b9a\u63a2\u6d4b\u65f6\u4f7f\u7528\u7684\u6839\u8bc1\u4e66\n            ca_file: \"/certs/my_cert.crt\"\n      http_gzip:\n        prober: http\n        http:\n          method: GET\n          compression: gzip            # \u63a2\u6d4b\u65f6\u4f7f\u7528\u7684\u538b\u7f29\u65b9\u6cd5\n      http_gzip_with_accept_encoding:\n        prober: http\n        http:\n          method: GET\n          compression: gzip\n          headers:\n            Accept-Encoding: gzip\n      tls_connect:                     # TCP \u63a2\u6d4b\u7684\u793a\u4f8b\n        prober: tcp\n        timeout: 5s\n        tcp:\n          tls: true                    # \u662f\u5426\u4f7f\u7528 TLS\n      tcp_connect_example:\n        prober: tcp\n        timeout: 5s\n      imap_starttls:                   # \u63a2\u6d4b IMAP \u90ae\u7bb1\u670d\u52a1\u5668\u7684\u914d\u7f6e\u793a\u4f8b\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - expect: \"OK.*STARTTLS\"\n            - send: \". STARTTLS\"\n            - expect: \"OK\"\n            - starttls: true\n            - send: \". capability\"\n            - expect: \"CAPABILITY IMAP4rev1\"\n      smtp_starttls:                   # \u63a2\u6d4b SMTP \u90ae\u7bb1\u670d\u52a1\u5668\u7684\u914d\u7f6e\u793a\u4f8b\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - expect: \"^220 ([^ ]+) ESMTP (.+)$\"\n            - send: \"EHLO prober\\r\"\n            - expect: \"^250-STARTTLS\"\n            - send: \"STARTTLS\\r\"\n            - expect: \"^220\"\n            - starttls: true\n            - send: \"EHLO prober\\r\"\n            - expect: \"^250-AUTH\"\n            - send: \"QUIT\\r\"\n      irc_banner_example:\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - send: \"NICK prober\"\n            - send: \"USER prober prober prober :prober\"\n            - expect: \"PING :([^ ]+)\"\n              send: \"PONG ${1}\"\n            - expect: \"^:[^ ]+ 001\"\n      # icmp_example:                    # ICMP \u63a2\u6d4b\u914d\u7f6e\u7684\u793a\u4f8b\n      #   prober: icmp\n      #   timeout: 5s\n      #   icmp:\n      #     preferred_ip_protocol: \"ip4\"\n      #     source_ip_address: \"127.0.0.1\"\n      dns_udp_example:                 # \u4f7f\u7528 UDP \u8fdb\u884c DNS \u67e5\u8be2\u7684\u793a\u4f8b\n        prober: dns\n        timeout: 5s\n        dns:\n          query_name: \"www.prometheus.io\"                 # \u8981\u89e3\u6790\u7684\u57df\u540d\n          query_type: \"A\"              # \u8be5\u57df\u540d\u5bf9\u5e94\u7684\u7c7b\u578b\n          valid_rcodes:\n          - NOERROR\n          validate_answer_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n            fail_if_all_match_regexp:\n            - \".*127.0.0.1\"\n            fail_if_not_matches_regexp:\n            - \"www.prometheus.io.\\t300\\tIN\\tA\\t127.0.0.1\"\n            fail_if_none_matches_regexp:\n            - \"127.0.0.1\"\n          validate_authority_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n          validate_additional_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n      dns_soa:\n        prober: dns\n        dns:\n          query_name: \"prometheus.io\"\n          query_type: \"SOA\"\n      dns_tcp_example:               # \u4f7f\u7528 TCP \u8fdb\u884c DNS \u67e5\u8be2\u7684\u793a\u4f8b\n        prober: dns\n        dns:\n          transport_protocol: \"tcp\" # defaults to \"udp\"\n          preferred_ip_protocol: \"ip4\" # defaults to \"ip6\"\n          query_name: \"www.prometheus.io\"\n
                      "},{"location":"end-user/insight/collection-manag/service-monitor.html","title":"\u914d\u7f6e\u670d\u52a1\u53d1\u73b0\u89c4\u5219","text":"

                      \u53ef\u89c2\u6d4b Insight \u652f\u6301\u901a\u8fc7 \u5bb9\u5668\u7ba1\u7406 \u521b\u5efa CRD ServiceMonitor \u7684\u65b9\u5f0f\u6765\u6ee1\u8db3\u60a8\u81ea\u5b9a\u4e49\u670d\u52a1\u53d1\u73b0\u7684\u91c7\u96c6\u9700\u6c42\u3002 \u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528 ServiceMonitor \u81ea\u884c\u5b9a\u4e49 Pod \u53d1\u73b0\u7684 Namespace \u8303\u56f4\u4ee5\u53ca\u901a\u8fc7 matchLabel \u6765\u9009\u62e9\u76d1\u542c\u7684 Service\u3002

                      "},{"location":"end-user/insight/collection-manag/service-monitor.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u96c6\u7fa4\u5df2\u5b89\u88c5 Helm \u5e94\u7528 insight-agent \u4e14\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                      "},{"location":"end-user/insight/collection-manag/service-monitor.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u91c7\u96c6\u7ba1\u7406 \uff0c\u67e5\u770b\u5168\u90e8\u96c6\u7fa4\u91c7\u96c6\u63d2\u4ef6\u7684\u72b6\u6001\u3002

                      2. \u70b9\u51fb\u5217\u8868\u4e2d\u7684\u67d0\u4e2a\u96c6\u7fa4\u540d\u79f0\u8fdb\u5165\u91c7\u96c6\u914d\u7f6e\u8be6\u60c5\u3002

                      3. \u70b9\u51fb\u94fe\u63a5\u8df3\u8f6c\u5230 \u5bb9\u5668\u7ba1\u7406 \u4e2d\u521b\u5efa Service Monitor\u3002

                        apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: micrometer-demo # (1)\n  namespace: insight-system # (2)\n    labels: \n      operator.insight.io/managed-by: insight\nspec:\n  endpoints: # (3)\n    - honorLabels: true\n        interval: 15s\n        path: /actuator/prometheus\n        port: http\n  namespaceSelector: # (4)\n    matchNames:\n      - insight-system  # (5)\n  selector: # (6)\n    matchLabels:\n          micrometer-prometheus-discovery: \"true\"\n
                        1. \u6307\u5b9a ServiceMonitor \u7684\u540d\u79f0
                        2. \u6307\u5b9a ServiceMonitor \u7684\u547d\u540d\u7a7a\u95f4
                        3. \u8fd9\u662f\u670d\u52a1\u7aef\u70b9\uff0c\u4ee3\u8868 Prometheus \u6240\u9700\u7684\u91c7\u96c6 Metrics \u7684\u5730\u5740\u3002 endpoints \u4e3a\u4e00\u4e2a\u6570\u7ec4\uff0c \u540c\u65f6\u53ef\u4ee5\u521b\u5efa\u591a\u4e2a endpoints \u3002\u6bcf\u4e2a endpoints \u5305\u542b\u4e09\u4e2a\u5b57\u6bb5\uff0c\u6bcf\u4e2a\u5b57\u6bb5\u7684\u542b\u4e49\u5982\u4e0b\uff1a

                          • interval \uff1a\u6307\u5b9a Prometheus \u5bf9\u5f53\u524d endpoints \u91c7\u96c6\u7684\u5468\u671f\u3002\u5355\u4f4d\u4e3a\u79d2\uff0c\u5728\u672c\u6b21\u793a\u4f8b\u4e2d\u8bbe\u5b9a\u4e3a 15s \u3002
                          • path \uff1a\u6307\u5b9a Prometheus \u7684\u91c7\u96c6\u8def\u5f84\u3002\u5728\u672c\u6b21\u793a\u4f8b\u4e2d\uff0c\u6307\u5b9a\u4e3a /actuator/prometheus \u3002
                          • port \uff1a\u6307\u5b9a\u91c7\u96c6\u6570\u636e\u9700\u8981\u901a\u8fc7\u7684\u7aef\u53e3\uff0c\u8bbe\u7f6e\u7684\u7aef\u53e3\u4e3a\u91c7\u96c6\u7684 Service \u7aef\u53e3\u6240\u8bbe\u7f6e\u7684 name \u3002
                        4. \u8fd9\u662f\u9700\u8981\u53d1\u73b0\u7684 Service \u7684\u8303\u56f4\u3002 namespaceSelector \u5305\u542b\u4e24\u4e2a\u4e92\u65a5\u5b57\u6bb5\uff0c\u5b57\u6bb5\u7684\u542b\u4e49\u5982\u4e0b\uff1a

                          • any \uff1a\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u503c true \uff0c\u5f53\u8be5\u5b57\u6bb5\u88ab\u8bbe\u7f6e\u65f6\uff0c\u5c06\u76d1\u542c\u6240\u6709\u7b26\u5408 Selector \u8fc7\u6ee4\u6761\u4ef6\u7684 Service \u7684\u53d8\u52a8\u3002
                          • matchNames \uff1a\u6570\u7ec4\u503c\uff0c\u6307\u5b9a\u9700\u8981\u76d1\u542c\u7684 namespace \u7684\u8303\u56f4\u3002\u4f8b\u5982\uff0c\u53ea\u60f3\u76d1\u542c default \u548c insight-system \u4e24\u4e2a\u547d\u540d\u7a7a\u95f4\u4e2d\u7684 Service\uff0c\u90a3\u4e48 matchNames \u8bbe\u7f6e\u5982\u4e0b\uff1a

                            namespaceSelector:\n  matchNames:\n    - default\n    - insight-system\n
                        5. \u6b64\u5904\u5339\u914d\u7684\u547d\u540d\u7a7a\u95f4\u4e3a\u9700\u8981\u66b4\u9732\u6307\u6807\u7684\u5e94\u7528\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4

                        6. \u7528\u4e8e\u9009\u62e9 Service
                      "},{"location":"end-user/insight/dashboard/dashboard.html","title":"\u4eea\u8868\u76d8","text":"

                      Grafana \u662f\u4e00\u79cd\u5f00\u6e90\u7684\u6570\u636e\u53ef\u89c6\u5316\u548c\u76d1\u63a7\u5e73\u53f0\uff0c\u5b83\u63d0\u4f9b\u4e86\u4e30\u5bcc\u7684\u56fe\u8868\u548c\u9762\u677f\uff0c\u7528\u4e8e\u5b9e\u65f6\u76d1\u63a7\u3001\u5206\u6790\u548c\u53ef\u89c6\u5316\u5404\u79cd\u6570\u636e\u6e90\u7684\u6307\u6807\u548c\u65e5\u5fd7\u3002\u53ef\u89c2\u6d4b\u6027 Insight \u4f7f\u7528\u5f00\u6e90 Grafana \u63d0\u4f9b\u76d1\u63a7\u670d\u52a1\uff0c\u652f\u6301\u4ece\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u547d\u540d\u7a7a\u95f4\u7b49\u591a\u7ef4\u5ea6\u67e5\u770b\u8d44\u6e90\u6d88\u8017\u60c5\u51b5\uff0c

                      \u5173\u4e8e\u5f00\u6e90 Grafana \u7684\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u89c1 Grafana \u5b98\u65b9\u6587\u6863\u3002

                      "},{"location":"end-user/insight/dashboard/dashboard.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 \u4eea\u8868\u76d8 \u3002

                        • \u5728 Insight /\u6982\u89c8 \u4eea\u8868\u76d8\u4e2d\uff0c\u53ef\u67e5\u770b\u591a\u9009\u96c6\u7fa4\u7684\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\uff0c\u5e76\u4ee5\u547d\u540d\u7a7a\u95f4\u3001\u5bb9\u5668\u7ec4\u7b49\u591a\u4e2a\u7ef4\u5ea6\u5206\u6790\u4e86\u8d44\u6e90\u4f7f\u7528\u3001\u7f51\u7edc\u3001\u5b58\u50a8\u7b49\u60c5\u51b5\u3002

                        • \u70b9\u51fb\u4eea\u8868\u76d8\u5de6\u4e0a\u4fa7\u7684\u4e0b\u62c9\u6846\u53ef\u5207\u6362\u96c6\u7fa4\u3002

                        • \u70b9\u51fb\u4eea\u8868\u76d8\u53f3\u4e0b\u4fa7\u53ef\u5207\u6362\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u3002

                      2. Insight \u7cbe\u9009\u591a\u4e2a\u793e\u533a\u63a8\u8350\u4eea\u8868\u76d8\uff0c\u53ef\u4ece\u8282\u70b9\u3001\u547d\u540d\u7a7a\u95f4\u3001\u5de5\u4f5c\u8d1f\u8f7d\u7b49\u591a\u4e2a\u7ef4\u5ea6\u8fdb\u884c\u76d1\u63a7\u3002\u70b9\u51fb insight-system / Insight /\u6982\u89c8 \u533a\u57df\u5207\u6362\u4eea\u8868\u76d8\u3002

                      Note

                      1. \u8bbf\u95ee Grafana UI \u8bf7\u53c2\u8003\u4ee5\u7ba1\u7406\u5458\u8eab\u4efd\u767b\u5f55 Grafana\u3002

                      2. \u5bfc\u5165\u81ea\u5b9a\u4e49\u4eea\u8868\u76d8\u8bf7\u53c2\u8003\u5bfc\u5165\u81ea\u5b9a\u4e49\u4eea\u8868\u76d8\u3002

                      "},{"location":"end-user/insight/dashboard/import-dashboard.html","title":"\u5bfc\u5165\u81ea\u5b9a\u4e49\u4eea\u8868\u76d8","text":"

                      \u901a\u8fc7\u4f7f\u7528 Grafana CRD\uff0c\u53ef\u4ee5\u5c06\u4eea\u8868\u677f\u7684\u7ba1\u7406\u548c\u90e8\u7f72\u7eb3\u5165\u5230 Kubernetes \u7684\u751f\u547d\u5468\u671f\u7ba1\u7406\u4e2d\uff0c\u5b9e\u73b0\u4eea\u8868\u677f\u7684\u7248\u672c\u63a7\u5236\u3001\u81ea\u52a8\u5316\u90e8\u7f72\u548c\u96c6\u7fa4\u7ea7\u7684\u7ba1\u7406\u3002\u672c\u9875\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7 CRD \u548c UI \u754c\u9762\u5bfc\u5165\u81ea\u5b9a\u4e49\u7684\u4eea\u8868\u76d8\u3002

                      "},{"location":"end-user/insight/dashboard/import-dashboard.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0 \u5e73\u53f0\uff0c\u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \uff0c\u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u9009\u62e9 kpanda-global-cluster \u3002

                      2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u5728\u5217\u8868\u4e2d\u67e5\u627e grafanadashboards.integreatly.org \u6587\u4ef6\uff0c\u8fdb\u5165\u8be6\u60c5\u3002

                      3. \u70b9\u51fb Yaml \u521b\u5efa \uff0c\u4f7f\u7528\u4ee5\u4e0b\u6a21\u677f\uff0c\u5728 Json \u5b57\u6bb5\u4e2d\u66ff\u6362\u4eea\u8868\u76d8 JSON\u3002

                        • namespace \uff1a\u586b\u5199\u76ee\u6807\u547d\u540d\u7a7a\u95f4\uff1b
                        • name \uff1a\u586b\u5199\u4eea\u8868\u76d8\u7684\u540d\u79f0\u3002
                        • label \uff1a\u5fc5\u586b\uff0c operator.insight.io/managed-by: insight \u3002
                        apiVersion: integreatly.org/v1alpha1\nkind: GrafanaDashboard\nmetadata:\n  labels:\n    app: insight-grafana-operator\n    operator.insight.io/managed-by: insight\n  name: sample-dashboard\n  namespace: insight-system\nspec:\n  json: >\n    {\n      \"id\": null,\n      \"title\": \"Simple Dashboard\",\n      \"tags\": [],\n      \"style\": \"dark\",\n      \"timezone\": \"browser\",\n      \"editable\": true,\n      \"hideControls\": false,\n      \"graphTooltip\": 1,\n      \"panels\": [],\n      \"time\": {\n        \"from\": \"now-6h\",\n        \"to\": \"now\"\n      },\n      \"timepicker\": {\n        \"time_options\": [],\n        \"refresh_intervals\": []\n      },\n      \"templating\": {\n        \"list\": []\n      },\n      \"annotations\": {\n        \"list\": []\n      },\n      \"refresh\": \"5s\",\n      \"schemaVersion\": 17,\n      \"version\": 0,\n      \"links\": []\n    }\n
                      4. \u70b9\u51fb \u786e\u8ba4 \u540e\uff0c\u7a0d\u7b49\u7247\u523b\u5373\u53ef\u5728 \u4eea\u8868\u76d8 \u4e2d\u67e5\u770b\u521a\u521a\u5bfc\u5165\u7684\u4eea\u8868\u76d8\u3002

                      Info

                      \u81ea\u5b9a\u4e49\u8bbe\u8ba1\u4eea\u8868\u76d8\uff0c\u8bf7\u53c2\u8003\u6dfb\u52a0\u4eea\u8868\u76d8\u9762\u677f\u3002

                      "},{"location":"end-user/insight/dashboard/login-grafana.html","title":"\u8bbf\u95ee\u539f\u751f Grafana","text":"

                      Insight \u501f\u52a9 Grafana \u63d0\u4f9b\u4e86\u4e30\u5bcc\u7684\u53ef\u89c6\u5316\u80fd\u529b\uff0c\u540c\u65f6\u4fdd\u7559\u4e86\u8bbf\u95ee\u539f\u751f Grafana \u7684\u5165\u53e3\u3002

                      "},{"location":"end-user/insight/dashboard/login-grafana.html#_1","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u767b\u5f55\u6d4f\u89c8\u5668\uff0c\u5728\u6d4f\u89c8\u5668\u4e2d\u8f93\u5165 Grafana \u5730\u5740\u3002

                        \u8bbf\u95ee\u5730\u5740\uff1a http://ip:\u8bbf\u95ee\u7aef\u53e3/ui/insight-grafana/login

                        \u4f8b\u5982\uff1a http://10.6.10.233:30209/ui/insight-grafana/login

                      2. \u70b9\u51fb\u53f3\u4e0b\u89d2\u7684\u767b\u5f55\uff0c\u4f7f\u7528\u9ed8\u8ba4\u7528\u6237\u540d\u3001\u5bc6\u7801\uff08admin/admin\uff09\u8fdb\u884c\u767b\u5f55\u3002

                      3. \u70b9\u51fb Log in \u5b8c\u6210\u767b\u5f55\u3002

                      "},{"location":"end-user/insight/dashboard/overview.html","title":"\u6982\u89c8","text":"

                      \u6982\u7387 \u4ec5\u7edf\u8ba1\u5df2\u5b89\u88c5 insight-agent \u4e14\u5176\u8fd0\u884c\u72b6\u6001\u4e3a\u6b63\u5e38\u7684\u96c6\u7fa4\u6570\u636e\u3002\u53ef\u5728\u6982\u89c8\u4e2d\u591a\u96c6\u7fa4\u7684\u8d44\u6e90\u6982\u51b5\uff1a

                      • \u544a\u8b66\u7edf\u8ba1\uff1a\u53ef\u67e5\u770b\u6240\u6709\u96c6\u7fa4\u7684\u6b63\u5728\u544a\u8b66\u7684\u7edf\u8ba1\u6570\u636e\u3002
                      • \u8d44\u6e90\u6d88\u8017\uff1a\u53ef\u6309 CPU \u4f7f\u7528\u7387\u3001\u5185\u5b58\u4f7f\u7528\u7387\u548c\u78c1\u76d8\u4f7f\u7528\u7387\u5206\u522b\u67e5\u770b\u8fd1\u4e00\u5c0f\u65f6 TOP5 \u96c6\u7fa4\u3001\u8282\u70b9\u7684\u8d44\u6e90\u53d8\u5316\u8d8b\u52bf\u3002
                      • \u9ed8\u8ba4\u6309\u7167\u6839\u636e CPU \u4f7f\u7528\u7387\u6392\u5e8f\u3002\u60a8\u53ef\u5207\u6362\u6307\u6807\u5207\u6362\u96c6\u7fa4\u3001\u8282\u70b9\u7684\u6392\u5e8f\u65b9\u5f0f\u3002
                      • \u8d44\u6e90\u53d8\u5316\u8d8b\u52bf\uff1a\u53ef\u67e5\u770b\u8fd1 15 \u5929\u7684\u8282\u70b9\u4e2a\u6570\u8d8b\u52bf\u4ee5\u53ca\u4e00\u5c0f\u65f6 Pod \u7684\u8fd0\u884c\u8d8b\u52bf\u3002
                      • \u670d\u52a1\u8bf7\u6c42\u6392\u884c\uff1a\u53ef\u67e5\u770b\u591a\u96c6\u7fa4\u4e2d\u8bf7\u6c42\u5ef6\u65f6\u3001\u9519\u8bef\u7387\u6392\u884c TOP5 \u7684\u670d\u52a1\u53ca\u6240\u5728\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u3002
                      "},{"location":"end-user/insight/dashboard/overview.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                      \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u6982\u89c8 \u3002

                      "},{"location":"end-user/insight/data-query/log.html","title":"\u65e5\u5fd7\u67e5\u8be2","text":"

                      Insight \u9ed8\u8ba4\u91c7\u96c6\u8282\u70b9\u65e5\u5fd7\u3001\u5bb9\u5668\u65e5\u5fd7\u4ee5\u53ca kubernetes \u5ba1\u8ba1\u65e5\u5fd7\u3002\u5728\u65e5\u5fd7\u67e5\u8be2\u9875\u9762\u4e2d\uff0c\u53ef\u67e5\u8be2\u767b\u5f55\u8d26\u53f7\u6743\u9650\u5185\u7684\u6807\u51c6\u8f93\u51fa (stdout) \u65e5\u5fd7\uff0c\u5305\u62ec\u8282\u70b9\u65e5\u5fd7\u3001\u4ea7\u54c1\u65e5\u5fd7\u3001Kubenetes \u5ba1\u8ba1\u65e5\u5fd7\u7b49\uff0c\u5feb\u901f\u5728\u5927\u91cf\u65e5\u5fd7\u4e2d\u67e5\u8be2\u5230\u6240\u9700\u7684\u65e5\u5fd7\uff0c\u540c\u65f6\u7ed3\u5408\u65e5\u5fd7\u7684\u6765\u6e90\u4fe1\u606f\u548c\u4e0a\u4e0b\u6587\u539f\u59cb\u6570\u636e\u8f85\u52a9\u5b9a\u4f4d\u95ee\u9898\u3002

                      "},{"location":"end-user/insight/data-query/log.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u70b9\u51fb\u4e00\u7ea7\u5bfc\u822a\u680f\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u3002
                      2. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u65e5\u5fd7 \u3002

                        • \u9ed8\u8ba4\u67e5\u8be2\u6700\u8fd1 24 \u5c0f\u65f6\uff1b
                        • \u7b2c\u4e00\u6b21\u8fdb\u5165\u65f6\uff0c\u9ed8\u8ba4\u6839\u636e\u767b\u5f55\u8d26\u53f7\u6743\u9650\u67e5\u8be2\u6709\u6743\u9650\u7684\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\u7684\u5bb9\u5668\u65e5\u5fd7\uff1b

                      3. \u9876\u90e8 Tab \u9ed8\u8ba4\u8fdb\u5165 \u666e\u901a\u67e5\u8be2 \u3002

                        1. \u70b9\u51fb \u7b5b\u9009 \u5c55\u5f00\u8fc7\u6ee4\u9762\u677f\uff0c\u53ef\u5207\u6362\u65e5\u5fd7\u641c\u7d22\u6761\u4ef6\u548c\u7c7b\u578b\u3002
                        2. \u65e5\u5fd7\u7c7b\u578b\uff1a

                          • \u5bb9\u5668\u65e5\u5fd7 \uff1a\u8bb0\u5f55\u96c6\u7fa4\u4e2d\u5bb9\u5668\u5185\u90e8\u7684\u6d3b\u52a8\u548c\u4e8b\u4ef6\uff0c\u5305\u62ec\u5e94\u7528\u7a0b\u5e8f\u7684\u8f93\u51fa\u3001\u9519\u8bef\u6d88\u606f\u3001\u8b66\u544a\u548c\u8c03\u8bd5\u4fe1\u606f\u7b49\u3002\u652f\u6301\u901a\u8fc7\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u5bb9\u5668\u7ec4\u3001\u5bb9\u5668\u8fc7\u6ee4\u65e5\u5fd7\u3002
                          • \u8282\u70b9\u65e5\u5fd7 \uff1a\u8bb0\u5f55\u96c6\u7fa4\u4e2d\u6bcf\u4e2a\u8282\u70b9\u7684\u7cfb\u7edf\u7ea7\u522b\u65e5\u5fd7\u3002\u8fd9\u4e9b\u65e5\u5fd7\u5305\u542b\u8282\u70b9\u7684\u64cd\u4f5c\u7cfb\u7edf\u3001\u5185\u6838\u3001\u670d\u52a1\u548c\u7ec4\u4ef6\u7684\u76f8\u5173\u4fe1\u606f\u3002\u652f\u6301\u901a\u8fc7\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u6587\u4ef6\u8def\u5f84\u8fc7\u6ee4\u65e5\u5fd7\u3002
                        3. \u652f\u6301\u5bf9\u5355\u4e2a\u5173\u952e\u5b57\u8fdb\u884c\u6a21\u7cca\u641c\u7d22\u3002

                      4. \u9876\u90e8\u5207\u6362 Tab \u9009\u62e9 Lucene \u8bed\u6cd5\u67e5\u8be2 \u3002

                        \u7b2c\u4e00\u6b21\u8fdb\u5165\u65f6\uff0c\u9ed8\u8ba4\u9009\u62e9\u767b\u5f55\u8d26\u53f7\u6743\u9650\u67e5\u8be2\u6709\u6743\u9650\u7684\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\u7684\u5bb9\u5668\u65e5\u5fd7\u3002

                        Lucene \u8bed\u6cd5\u8bf4\u660e\uff1a

                        1. \u4f7f\u7528 \u903b\u8f91\u64cd\u4f5c\u7b26\uff08AND\u3001OR\u3001NOT\u3001\"\" \uff09\u7b26\u67e5\u8be2\u591a\u4e2a\u5173\u952e\u5b57\uff0c\u4f8b\u5982\uff1akeyword1 AND (keyword2 OR keyword3) NOT keyword4\u3002
                        2. \u4f7f\u7528\u6ce2\u6d6a\u53f7 (~) \u5b9e\u73b0\u6a21\u7cca\u67e5\u8be2\uff0c\u5728 \"~\" \u540e\u53ef\u6307\u5b9a\u53ef\u9009\u7684\u53c2\u6570\uff0c\u7528\u4e8e\u63a7\u5236\u6a21\u7cca\u67e5\u8be2\u7684\u76f8\u4f3c\u5ea6\uff0c\u4e0d\u6307\u5b9a\u5219\u9ed8\u8ba4\u4f7f\u7528 0.5\u3002\u4f8b\u5982\uff1aerror~\u3002
                        3. \u4f7f\u7528\u901a\u914d\u7b26 (*\u3001?) \u7528\u4f5c\u5355\u5b57\u7b26\u901a\u914d\u7b26\uff0c\u8868\u793a\u5339\u914d\u4efb\u610f\u4e00\u4e2a\u5b57\u7b26\u3002
                        4. \u4f7f\u7528\u65b9\u62ec\u53f7 [ ] \u6216\u82b1\u62ec\u53f7 { } \u6765\u67e5\u8be2\u8303\u56f4\uff0c\u65b9\u62ec\u53f7\u00a0[ ]\u00a0\u8868\u793a\u95ed\u533a\u95f4\uff0c\u5305\u542b\u8fb9\u754c\u503c\u3002\u82b1\u62ec\u53f7\u00a0{ }\u00a0\u8868\u793a\u5f00\u533a\u95f4\uff0c\u6392\u9664\u8fb9\u754c\u503c\u3002\u8303\u56f4\u67e5\u8be2\u53ea\u9002\u7528\u4e8e\u80fd\u591f\u8fdb\u884c\u6392\u5e8f\u7684\u5b57\u6bb5\u7c7b\u578b\uff0c\u5982\u6570\u5b57\u3001\u65e5\u671f\u7b49\u3002\u4f8b\u5982\uff1atimestamp:[2022-01-01 TO 2022-01-31]\u3002
                        5. \u66f4\u591a\u7528\u6cd5\u8bf7\u67e5\u770b\uff1aLucene \u8bed\u6cd5\u8bf4\u660e\u3002
                      "},{"location":"end-user/insight/data-query/log.html#_3","title":"\u5176\u4ed6\u64cd\u4f5c","text":""},{"location":"end-user/insight/data-query/log.html#_4","title":"\u67e5\u770b\u65e5\u5fd7\u4e0a\u4e0b\u6587","text":"

                      \u70b9\u51fb\u65e5\u5fd7\u540e\u7684\u6309\u94ae\uff0c\u5728\u53f3\u4fa7\u5212\u51fa\u9762\u677f\u4e2d\u53ef\u67e5\u770b\u8be5\u6761\u65e5\u5fd7\u7684\u9ed8\u8ba4 100 \u6761\u4e0a\u4e0b\u6587\u3002\u53ef\u5207\u6362 \u663e\u793a\u884c\u6570 \u67e5\u770b\u66f4\u591a\u4e0a\u4e0b\u6587\u5185\u5bb9\u3002

                      "},{"location":"end-user/insight/data-query/log.html#_5","title":"\u5bfc\u51fa\u65e5\u5fd7\u6570\u636e","text":"

                      \u70b9\u51fb\u5217\u8868\u53f3\u4e0a\u4fa7\u7684\u4e0b\u8f7d\u6309\u94ae\u3002

                      • \u652f\u6301\u914d\u7f6e\u5bfc\u51fa\u7684\u65e5\u5fd7\u5b57\u6bb5\uff0c\u6839\u636e\u65e5\u5fd7\u7c7b\u578b\u53ef\u914d\u7f6e\u7684\u5b57\u6bb5\u4e0d\u540c\uff0c\u5176\u4e2d \u65e5\u5fd7\u5185\u5bb9 \u5b57\u6bb5\u4e3a\u5fc5\u9009\u3002
                      • \u652f\u6301\u5c06\u65e5\u5fd7\u67e5\u8be2\u7ed3\u679c\u5bfc\u51fa\u4e3a .txt \u6216 .csv \u683c\u5f0f\u3002

                      Note

                      \u82e5\u9700\u6307\u5b9a\u4e0d\u91c7\u96c6\u67d0\u4e00\u4e9b\u5bb9\u5668\u7ec4\u7684\u65e5\u5fd7\uff0c\u53ef\u53c2\u8003\uff1a\u5bb9\u5668\u65e5\u5fd7\u9ed1\u540d\u5355\u3002

                      "},{"location":"end-user/insight/data-query/metric.html","title":"\u6307\u6807\u67e5\u8be2","text":"

                      \u6307\u6807\u67e5\u8be2\u652f\u6301\u67e5\u8be2\u5bb9\u5668\u5404\u8d44\u6e90\u7684\u6307\u6807\u6570\u636e\uff0c\u53ef\u67e5\u770b\u76d1\u63a7\u6307\u6807\u7684\u8d8b\u52bf\u53d8\u5316\u3002\u540c\u65f6\uff0c\u9ad8\u7ea7\u67e5\u8be2\u652f\u6301\u539f\u751f PromQL \u8bed\u53e5\u8fdb\u884c\u6307\u6807\u67e5\u8be2\u3002

                      "},{"location":"end-user/insight/data-query/metric.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002
                      "},{"location":"end-user/insight/data-query/metric.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u70b9\u51fb\u4e00\u7ea7\u5bfc\u822a\u680f\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u3002

                      2. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u6307\u6807 \u3002

                      3. \u9009\u62e9\u96c6\u7fa4\u3001\u7c7b\u578b\u3001\u8282\u70b9\u3001\u6307\u6807\u540d\u79f0\u67e5\u8be2\u6761\u4ef6\u540e\uff0c\u70b9\u51fb \u641c\u7d22 \uff0c\u5c4f\u5e55\u53f3\u4fa7\u5c06\u663e\u793a\u5bf9\u5e94\u6307\u6807\u56fe\u8868\u53ca\u6570\u636e\u8be6\u60c5\u3002

                      4. \u652f\u6301\u81ea\u5b9a\u4e49\u65f6\u95f4\u8303\u56f4\u3002\u53ef\u624b\u52a8\u70b9\u51fb \u5237\u65b0 \u56fe\u6807\u6216\u9009\u62e9\u9ed8\u8ba4\u65f6\u95f4\u95f4\u9694\u8fdb\u884c\u5237\u65b0\u3002

                      5. \u70b9\u51fb \u9ad8\u7ea7\u67e5\u8be2 \u9875\u7b7e\u901a\u8fc7\u539f\u751f\u7684 PromQL \u67e5\u8be2\u3002

                      Note

                      \u53c2\u9605 PromQL \u8bed\u6cd5\u3002

                      "},{"location":"end-user/insight/infra/cluster.html","title":"\u96c6\u7fa4\u76d1\u63a7","text":"

                      \u901a\u8fc7\u96c6\u7fa4\u76d1\u63a7\uff0c\u4f60\u53ef\u4ee5\u67e5\u770b\u96c6\u7fa4\u7684\u57fa\u672c\u4fe1\u606f\u3001\u8be5\u96c6\u7fa4\u4e2d\u7684\u8d44\u6e90\u6d88\u8017\u4ee5\u53ca\u4e00\u6bb5\u65f6\u95f4\u7684\u8d44\u6e90\u6d88\u8017\u53d8\u5316\u8d8b\u52bf\u7b49\u3002

                      "},{"location":"end-user/insight/infra/cluster.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                      "},{"location":"end-user/insight/infra/cluster.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

                      2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd -> \u96c6\u7fa4 \u3002\u5728\u8be5\u9875\u9762\u53ef\u67e5\u770b\u4ee5\u4e0b\u4fe1\u606f\uff1a

                        • \u8d44\u6e90\u6982\u89c8 \uff1a\u591a\u9009\u96c6\u7fa4\u4e2d\u7684\u8282\u70b9\u3001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6b63\u5e38\u548c\u5168\u90e8\u7684\u6570\u91cf\u7edf\u8ba1\uff1b
                        • \u6545\u969c \uff1a\u7edf\u8ba1\u5f53\u524d\u96c6\u7fa4\u4ea7\u751f\u7684\u544a\u8b66\u6570\u91cf\uff1b
                        • \u8d44\u6e90\u6d88\u8017 \uff1a\u6240\u9009\u96c6\u7fa4\u7684 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u7684\u5b9e\u9645\u4f7f\u7528\u91cf\u548c\u603b\u91cf\uff1b
                        • \u6307\u6807\u8bf4\u660e \uff1a\u6240\u9009\u96c6\u7fa4\u7684 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u8bfb\u5199\u3001\u7f51\u7edc\u63a5\u6536\u53d1\u9001\u7684\u53d8\u5316\u8d8b\u52bf\u3002

                      3. \u5207\u6362\u5230 \u8d44\u6e90\u6c34\u4f4d\u7ebf\u76d1\u63a7 \u9875\u7b7e\uff0c\u53ef\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u7684\u66f4\u591a\u76d1\u63a7\u6570\u636e\u3002

                      "},{"location":"end-user/insight/infra/cluster.html#_4","title":"\u53c2\u8003\u6307\u6807\u8bf4\u660e","text":"\u6307\u6807\u540d \u8bf4\u660e CPU \u4f7f\u7528\u7387 \u8be5\u6307\u6807\u662f\u6307\u96c6\u7fa4\u4e2d\u6240\u6709 Pod \u8d44\u6e90\u7684\u5b9e\u9645 CPU \u7528\u91cf\u4e0e\u6240\u6709\u8282\u70b9\u7684 CPU \u603b\u91cf\u7684\u6bd4\u7387\u3002 CPU \u5206\u914d\u7387 \u8be5\u6307\u6807\u662f\u6307\u96c6\u7fa4\u4e2d\u6240\u6709 Pod \u7684 CPU \u8bf7\u6c42\u91cf\u7684\u603b\u548c\u4e0e\u6240\u6709\u8282\u70b9\u7684 CPU \u603b\u91cf\u7684\u6bd4\u7387\u3002 \u5185\u5b58\u4f7f\u7528\u7387 \u8be5\u6307\u6807\u662f\u6307\u96c6\u7fa4\u4e2d\u6240\u6709 Pod \u8d44\u6e90\u7684\u5b9e\u9645\u5185\u5b58\u7528\u91cf\u4e0e\u6240\u6709\u8282\u70b9\u7684\u5185\u5b58\u603b\u91cf\u7684\u6bd4\u7387\u3002 \u5185\u5b58\u5206\u914d\u7387 \u8be5\u6307\u6807\u662f\u6307\u96c6\u7fa4\u4e2d\u6240\u6709 Pod \u7684\u5185\u5b58\u8bf7\u6c42\u91cf\u7684\u603b\u548c\u4e0e\u6240\u6709\u8282\u70b9\u7684\u5185\u5b58\u603b\u91cf\u7684\u6bd4\u7387\u3002"},{"location":"end-user/insight/infra/container.html","title":"\u5bb9\u5668\u76d1\u63a7","text":"

                      \u5bb9\u5668\u76d1\u63a7\u662f\u5bf9\u96c6\u7fa4\u7ba1\u7406\u4e2d\u5de5\u4f5c\u8d1f\u8f7d\u7684\u76d1\u63a7\uff0c\u5728\u5217\u8868\u4e2d\u53ef\u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u57fa\u672c\u4fe1\u606f\u548c\u72b6\u6001\u3002\u5728\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\uff0c\u53ef\u67e5\u770b\u6b63\u5728\u544a\u8b66\u7684\u6570\u91cf\u4ee5\u53ca CPU\u3001\u5185\u5b58\u7b49\u8d44\u6e90\u6d88\u8017\u7684\u53d8\u5316\u8d8b\u52bf\u3002

                      "},{"location":"end-user/insight/infra/container.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u96c6\u7fa4\u5df2\u5b89\u88c5 insight-agent\uff0c\u4e14\u6240\u6709\u7684\u5bb9\u5668\u7ec4\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                      • \u5b89\u88c5 insight-agent\uff0c\u8bf7\u53c2\u8003\u5728\u7ebf\u5b89\u88c5 insight-agent \u6216\u79bb\u7ebf\u5347\u7ea7 insight-agent\u3002
                      "},{"location":"end-user/insight/infra/container.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                      \u8bf7\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u67e5\u770b\u670d\u52a1\u76d1\u63a7\u6307\u6807\uff1a

                      1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

                      2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd -> \u5de5\u4f5c\u8d1f\u8f7d \u3002

                      3. \u5207\u6362\u9876\u90e8 Tab\uff0c\u67e5\u770b\u4e0d\u540c\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u636e\u3002

                      4. \u70b9\u51fb\u76ee\u6807\u5de5\u4f5c\u8d1f\u8f7d\u540d\u79f0\u67e5\u770b\u8be6\u60c5\u3002

                        1. \u6545\u969c\uff1a\u5728\u6545\u969c\u5361\u7247\u4e2d\u7edf\u8ba1\u8be5\u5de5\u4f5c\u8d1f\u8f7d\u5f53\u524d\u6b63\u5728\u544a\u8b66\u7684\u603b\u6570\u3002
                        2. \u8d44\u6e90\u6d88\u8017\uff1a\u5728\u8be5\u5361\u7247\u53ef\u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u7684 CPU\u3001\u5185\u5b58\u3001\u7f51\u7edc\u7684\u4f7f\u7528\u60c5\u51b5\u3002
                        3. \u76d1\u63a7\u6307\u6807\uff1a\u53ef\u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u9ed8\u8ba4 1 \u5c0f\u65f6\u7684 CPU\u3001\u5185\u5b58\u3001\u7f51\u7edc\u548c\u78c1\u76d8\u7684\u53d8\u5316\u8d8b\u52bf\u3002

                      5. \u5207\u6362 Tab \u5230 \u5bb9\u5668\u7ec4\u5217\u8868 \uff0c\u53ef\u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u5404\u4e2a\u5bb9\u5668\u7ec4\u72b6\u6001\u3001\u6240\u5728\u8282\u70b9\u3001\u91cd\u542f\u6b21\u6570\u7b49\u4fe1\u606f\u3002

                      6. \u5207\u6362 Tab \u5230 JVM \u76d1\u63a7 \uff0c\u53ef\u67e5\u770b\u5404\u4e2a\u5bb9\u5668\u7ec4\u7684 JVM \u6307\u6807\u3002

                        Note

                        1. JVM \u76d1\u63a7\u529f\u80fd\u4ec5\u652f\u6301 Java \u8bed\u8a00\u3002
                        2. \u5f00\u542f JVM \u76d1\u63a7\u529f\u80fd\uff0c\u8bf7\u53c2\u8003\u5f00\u59cb\u76d1\u63a7 Java \u5e94\u7528\u3002
                      "},{"location":"end-user/insight/infra/container.html#_4","title":"\u6307\u6807\u53c2\u8003\u8bf4\u660e","text":"\u6307\u6807\u540d\u79f0 \u8bf4\u660e CPU \u4f7f\u7528\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684 CPU \u4f7f\u7528\u91cf\u4e4b\u548c\u3002 CPU \u8bf7\u6c42\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684 CPU \u8bf7\u6c42\u91cf\u4e4b\u548c\u3002 CPU \u9650\u5236\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684 CPU \u9650\u5236\u91cf\u4e4b\u548c\u3002 \u5185\u5b58\u4f7f\u7528\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u4f7f\u7528\u91cf\u4e4b\u548c\u3002 \u5185\u5b58\u8bf7\u6c42\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u4f7f\u7528\u91cf\u4e4b\u548c\u3002 \u5185\u5b58\u9650\u5236\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u9650\u5236\u91cf\u4e4b\u548c\u3002 \u78c1\u76d8\u8bfb\u5199\u901f\u7387 \u6307\u5b9a\u65f6\u95f4\u8303\u56f4\u5185\u78c1\u76d8\u6bcf\u79d2\u8fde\u7eed\u8bfb\u53d6\u548c\u5199\u5165\u7684\u603b\u548c\uff0c\u8868\u793a\u78c1\u76d8\u6bcf\u79d2\u8bfb\u53d6\u548c\u5199\u5165\u64cd\u4f5c\u6570\u7684\u6027\u80fd\u5ea6\u91cf\u3002 \u7f51\u7edc\u53d1\u9001\u63a5\u6536\u901f\u7387 \u6307\u5b9a\u65f6\u95f4\u8303\u56f4\u5185\uff0c\u6309\u5de5\u4f5c\u8d1f\u8f7d\u7edf\u8ba1\u7684\u7f51\u7edc\u6d41\u91cf\u7684\u6d41\u5165\u3001\u6d41\u51fa\u901f\u7387\u3002"},{"location":"end-user/insight/infra/event.html","title":"\u4e8b\u4ef6\u67e5\u8be2","text":"

                      AI \u7b97\u529b\u5e73\u53f0 Insight \u652f\u6301\u6309\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u67e5\u8be2\u4e8b\u4ef6\uff0c\u5e76\u63d0\u4f9b\u4e86\u4e8b\u4ef6\u72b6\u6001\u5206\u5e03\u56fe\uff0c\u5bf9\u91cd\u8981\u4e8b\u4ef6\u8fdb\u884c\u7edf\u8ba1\u3002

                      "},{"location":"end-user/insight/infra/event.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u70b9\u51fb\u4e00\u7ea7\u5bfc\u822a\u680f\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u3002
                      2. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u57fa\u7840\u8bbe\u7f6e > \u4e8b\u4ef6 \u3002

                      "},{"location":"end-user/insight/infra/event.html#_3","title":"\u4e8b\u4ef6\u72b6\u6001\u5206\u5e03","text":"

                      \u9ed8\u8ba4\u663e\u793a\u6700\u8fd1 12 \u5c0f\u65f6\u5185\u53d1\u751f\u7684\u4e8b\u4ef6\uff0c\u60a8\u53ef\u4ee5\u5728\u53f3\u4e0a\u89d2\u9009\u62e9\u4e0d\u540c\u7684\u65f6\u95f4\u8303\u56f4\u6765\u67e5\u770b\u8f83\u957f\u6216\u8f83\u77ed\u7684\u65f6\u95f4\u6bb5\u3002 \u60a8\u8fd8\u53ef\u4ee5\u81ea\u5b9a\u4e49\u91c7\u6837\u95f4\u9694\u4e3a 1 \u5206\u949f\u81f3 5 \u5c0f\u65f6\u3002

                      \u901a\u8fc7\u4e8b\u4ef6\u72b6\u6001\u5206\u5e03\u56fe\uff0c\u60a8\u53ef\u4ee5\u76f4\u89c2\u5730\u4e86\u89e3\u4e8b\u4ef6\u7684\u5bc6\u96c6\u7a0b\u5ea6\u548c\u5206\u6563\u60c5\u51b5\u3002 \u8fd9\u6709\u52a9\u4e8e\u5bf9\u540e\u7eed\u7684\u96c6\u7fa4\u8fd0\u7ef4\u8fdb\u884c\u8bc4\u4f30\uff0c\u5e76\u505a\u597d\u51c6\u5907\u548c\u5b89\u6392\u5de5\u4f5c\u3002 \u5982\u679c\u4e8b\u4ef6\u5bc6\u96c6\u53d1\u751f\u5728\u7279\u5b9a\u65f6\u6bb5\uff0c\u60a8\u53ef\u80fd\u9700\u8981\u8c03\u914d\u66f4\u591a\u7684\u8d44\u6e90\u6216\u91c7\u53d6\u76f8\u5e94\u63aa\u65bd\u6765\u786e\u4fdd\u96c6\u7fa4\u7a33\u5b9a\u6027\u548c\u9ad8\u53ef\u7528\u6027\u3002 \u800c\u5982\u679c\u4e8b\u4ef6\u8f83\u4e3a\u5206\u6563\uff0c\u5728\u6b64\u671f\u95f4\u60a8\u53ef\u4ee5\u5408\u7406\u5b89\u6392\u5176\u4ed6\u8fd0\u7ef4\u5de5\u4f5c\uff0c\u4f8b\u5982\u7cfb\u7edf\u4f18\u5316\u3001\u5347\u7ea7\u6216\u5904\u7406\u5176\u4ed6\u4efb\u52a1\u3002

                      \u901a\u8fc7\u7efc\u5408\u8003\u8651\u4e8b\u4ef6\u72b6\u6001\u5206\u5e03\u56fe\u548c\u65f6\u95f4\u8303\u56f4\uff0c\u60a8\u80fd\u66f4\u597d\u5730\u89c4\u5212\u548c\u7ba1\u7406\u96c6\u7fa4\u7684\u8fd0\u7ef4\u5de5\u4f5c\uff0c\u786e\u4fdd\u7cfb\u7edf\u7a33\u5b9a\u6027\u548c\u53ef\u9760\u6027\u3002

                      "},{"location":"end-user/insight/infra/event.html#_4","title":"\u4e8b\u4ef6\u603b\u6570\u548c\u7edf\u8ba1","text":"

                      \u901a\u8fc7\u91cd\u8981\u4e8b\u4ef6\u7edf\u8ba1\uff0c\u60a8\u53ef\u4ee5\u65b9\u4fbf\u5730\u4e86\u89e3\u955c\u50cf\u62c9\u53d6\u5931\u8d25\u6b21\u6570\u3001\u5065\u5eb7\u68c0\u67e5\u5931\u8d25\u6b21\u6570\u3001\u5bb9\u5668\u7ec4\uff08Pod\uff09\u8fd0\u884c\u5931\u8d25\u6b21\u6570\u3001 Pod \u8c03\u5ea6\u5931\u8d25\u6b21\u6570\u3001\u5bb9\u5668 OOM \u5185\u5b58\u8017\u5c3d\u6b21\u6570\u3001\u5b58\u50a8\u5377\u6302\u8f7d\u5931\u8d25\u6b21\u6570\u4ee5\u53ca\u6240\u6709\u4e8b\u4ef6\u7684\u603b\u6570\u3002\u8fd9\u4e9b\u4e8b\u4ef6\u901a\u5e38\u5206\u4e3a\u300cWarning\u300d\u548c\u300cNormal\u300d\u4e24\u7c7b\u3002

                      "},{"location":"end-user/insight/infra/event.html#_5","title":"\u4e8b\u4ef6\u5217\u8868","text":"

                      \u4e8b\u4ef6\u5217\u8868\u4ee5\u65f6\u95f4\u4e3a\u8f74\uff0c\u4ee5\u6d41\u6c34\u7684\u5f62\u5f0f\u5c55\u793a\u53d1\u751f\u7684\u4e8b\u4ef6\u3002\u60a8\u53ef\u4ee5\u6839\u636e\u300c\u6700\u8fd1\u53d1\u751f\u65f6\u95f4\u300d\u548c\u300c\u7ea7\u522b\u300d\u8fdb\u884c\u6392\u5e8f\u3002

                      \u70b9\u51fb\u53f3\u4fa7\u7684 \u2699\ufe0f \u56fe\u6807\uff0c\u60a8\u53ef\u4ee5\u6839\u636e\u81ea\u5df1\u7684\u559c\u597d\u548c\u9700\u6c42\u6765\u81ea\u5b9a\u4e49\u663e\u793a\u7684\u5217\u3002

                      \u5728\u9700\u8981\u7684\u65f6\u5019\uff0c\u60a8\u8fd8\u53ef\u4ee5\u70b9\u51fb\u5237\u65b0\u56fe\u6807\u6765\u66f4\u65b0\u5f53\u524d\u7684\u4e8b\u4ef6\u5217\u8868\u3002

                      "},{"location":"end-user/insight/infra/event.html#_6","title":"\u5176\u4ed6\u64cd\u4f5c","text":"
                      1. \u5728\u4e8b\u4ef6\u5217\u8868\u4e2d\u64cd\u4f5c\u5217\u7684\u56fe\u6807\uff0c\u53ef\u67e5\u770b\u67d0\u4e00\u4e8b\u4ef6\u7684\u5143\u6570\u636e\u4fe1\u606f\u3002

                      2. \u70b9\u51fb\u9876\u90e8\u9875\u7b7e\u7684 \u4e0a\u4e0b\u6587 \u53ef\u67e5\u770b\u8be5\u4e8b\u4ef6\u5bf9\u5e94\u8d44\u6e90\u7684\u5386\u53f2\u4e8b\u4ef6\u8bb0\u5f55\u3002

                      "},{"location":"end-user/insight/infra/event.html#_7","title":"\u53c2\u8003","text":"

                      \u6709\u5173\u7cfb\u7edf\u81ea\u5e26\u7684 Event \u4e8b\u4ef6\u7684\u8be6\u7ec6\u542b\u4e49\uff0c\u8bf7\u53c2\u9605 Kubenetest API \u4e8b\u4ef6\u5217\u8868\u3002

                      "},{"location":"end-user/insight/infra/namespace.html","title":"\u547d\u540d\u7a7a\u95f4\u76d1\u63a7","text":"

                      \u4ee5\u547d\u540d\u7a7a\u95f4\u4e3a\u7ef4\u5ea6\uff0c\u5feb\u901f\u67e5\u8be2\u547d\u540d\u7a7a\u95f4\u5185\u7684\u8d44\u6e90\u6d88\u8017\u548c\u53d8\u5316\u8d8b\u52bf\u3002

                      "},{"location":"end-user/insight/infra/namespace.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                      "},{"location":"end-user/insight/infra/namespace.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

                      2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd > \u547d\u540d\u7a7a\u95f4 \u3002\u5728\u8be5\u9875\u9762\u53ef\u67e5\u770b\u4ee5\u4e0b\u4fe1\u606f\uff1a

                        1. \u5207\u6362\u547d\u540d\u7a7a\u95f4\uff1a\u5728\u9876\u90e8\u5207\u6362\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\uff1b
                        2. \u8d44\u6e90\u6982\u89c8\uff1a\u7edf\u8ba1\u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6b63\u5e38\u548c\u5168\u90e8\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u91cf\uff1b
                        3. \u6545\u969c\uff1a\u7edf\u8ba1\u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e0b\u4ea7\u751f\u7684\u544a\u8b66\u6570\u91cf\uff1b
                        4. \u4e8b\u4ef6\uff1a\u7edf\u8ba1\u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e0b 24 \u5c0f\u65f6\u5185 Warning \u7ea7\u522b\u7684\u4e8b\u4ef6\u6570\u91cf\uff1b
                        5. \u8d44\u6e90\u6d88\u8017\uff1a\u7edf\u8ba1\u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e0b\u5bb9\u5668\u7ec4\u7684 CPU\u3001\u5185\u5b58\u4f7f\u7528\u91cf\u4e4b\u548c \u53ca CPU\u3001\u5185\u5b58\u914d\u989d\u60c5\u51b5\u3002

                      "},{"location":"end-user/insight/infra/namespace.html#_4","title":"\u6307\u6807\u8bf4\u660e","text":"\u6307\u6807\u540d \u8bf4\u660e CPU \u4f7f\u7528\u91cf \u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e2d\u5bb9\u5668\u7ec4\u7684 CPU \u4f7f\u7528\u91cf\u4e4b\u548c \u5185\u5b58\u4f7f\u7528\u91cf \u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e2d\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u4f7f\u7528\u91cf\u4e4b\u548c \u5bb9\u5668\u7ec4 CPU \u4f7f\u7528\u91cf \u547d\u540d\u7a7a\u95f4\u4e2d\u5404\u5bb9\u5668\u7ec4\u7684 CPU \u4f7f\u7528\u91cf \u5bb9\u5668\u7ec4\u5185\u5b58\u4f7f\u7528\u91cf \u547d\u540d\u7a7a\u95f4\u4e2d\u5404\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u4f7f\u7528\u91cf"},{"location":"end-user/insight/infra/node.html","title":"\u8282\u70b9\u76d1\u63a7","text":"

                      \u901a\u8fc7\u8282\u70b9\u76d1\u63a7\uff0c\u4f60\u53ef\u4ee5\u6982\u89c8\u6240\u9009\u96c6\u7fa4\u4e0b\u8282\u70b9\u7684\u5f53\u524d\u5065\u5eb7\u72b6\u6001\u3001\u5bf9\u5e94\u5bb9\u5668\u7ec4\u7684\u5f02\u5e38\u6570\u91cf\uff1b \u5728\u5f53\u524d\u8282\u70b9\u8be6\u60c5\u9875\uff0c\u4f60\u53ef\u4ee5\u67e5\u770b\u6b63\u5728\u544a\u8b66\u7684\u6570\u91cf\u4ee5\u53ca CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u7b49\u8d44\u6e90\u6d88\u8017\u7684\u53d8\u5316\u8d8b\u52bf\u56fe\u3002

                      "},{"location":"end-user/insight/infra/node.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                      "},{"location":"end-user/insight/infra/node.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

                      2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd -> \u8282\u70b9 \u3002\u5728\u8be5\u9875\u9762\u53ef\u67e5\u770b\u4ee5\u4e0b\u4fe1\u606f\uff1a

                        • \u96c6\u7fa4\u5207\u6362 \uff1a\u5207\u6362\u9876\u90e8\u7684\u4e0b\u62c9\u6846\u53ef\u5207\u6362\u96c6\u7fa4\uff1b
                        • \u8282\u70b9\u5217\u8868 \uff1a\u6240\u9009\u96c6\u7fa4\u4e2d\u7684\u8282\u70b9\u5217\u8868\uff0c\u5355\u51fb\u5207\u6362\u8282\u70b9\u3002
                        • \u6545\u969c \uff1a\u7edf\u8ba1\u5f53\u524d\u96c6\u7fa4\u4ea7\u751f\u7684\u544a\u8b66\u6570\u91cf\uff1b
                        • \u8d44\u6e90\u6d88\u8017 \uff1a\u6240\u9009\u8282\u70b9\u7684 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u7684\u5b9e\u9645\u4f7f\u7528\u91cf\u548c\u603b\u91cf\uff1b
                        • \u6307\u6807\u8bf4\u660e \uff1a\u6240\u9009\u8282\u70b9\u7684 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u8bfb\u5199\u3001\u7f51\u7edc\u63a5\u6536\u53d1\u9001\u7684\u53d8\u5316\u8d8b\u52bf\u3002

                      3. \u5207\u6362\u5230 \u8d44\u6e90\u6c34\u4f4d\u7ebf\u76d1\u63a7 \u9875\u7b7e\uff0c\u53ef\u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684\u66f4\u591a\u76d1\u63a7\u6570\u636e\u3002

                      "},{"location":"end-user/insight/infra/probe.html","title":"\u62e8\u6d4b","text":"

                      \u62e8\u6d4b\uff08Probe\uff09\u6307\u7684\u662f\u57fa\u4e8e\u9ed1\u76d2\u76d1\u63a7\uff0c\u5b9a\u671f\u901a\u8fc7 HTTP\u3001TCP \u7b49\u65b9\u5f0f\u5bf9\u76ee\u6807\u8fdb\u884c\u8fde\u901a\u6027\u6d4b\u8bd5\uff0c\u5feb\u901f\u53d1\u73b0\u6b63\u5728\u53d1\u751f\u7684\u6545\u969c\u3002

                      Insight \u57fa\u4e8e Prometheus Blackbox Exporter \u5de5\u5177\u901a\u8fc7 HTTP\u3001HTTPS\u3001DNS\u3001TCP \u548c ICMP \u7b49\u534f\u8bae\uff0c\u5bf9\u7f51\u7edc\u8fdb\u884c\u63a2\u6d4b\u5e76\u8fd4\u56de\u63a2\u6d4b\u7ed3\u679c\u4ee5\u4fbf\u4e86\u89e3\u7f51\u7edc\u72b6\u6001\u3002

                      "},{"location":"end-user/insight/infra/probe.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u76ee\u6807\u96c6\u7fa4\u4e2d\u5df2\u6210\u529f\u90e8\u7f72 insight-agent\uff0c\u4e14\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                      "},{"location":"end-user/insight/infra/probe.html#_3","title":"\u67e5\u770b\u62e8\u6d4b\u4efb\u52a1","text":"
                      1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\uff1b
                      2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd -> \u62e8\u6d4b\u3002

                        • \u70b9\u51fb\u8868\u683c\u4e2d\u7684\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\u4e0b\u62c9\u6846\uff0c\u53ef\u5207\u6362\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4
                        • \u4f60\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u2699\ufe0f \u4fee\u6539\u663e\u793a\u7684\u5217\uff0c\u9ed8\u8ba4\u4e3a\u62e8\u6d4b\u540d\u79f0\u3001\u63a2\u6d4b\u65b9\u5f0f\u3001\u63a2\u6d4b\u76ee\u6807\u3001\u8fde\u901a\u72b6\u6001\u3001\u521b\u5efa\u65f6\u95f4
                        • \u8fde\u901a\u72b6\u6001\u6709 3 \u79cd\uff1a
                          • \u6b63\u5e38\uff1aProbe \u6210\u529f\u8fde\u63a5\u5230\u4e86\u76ee\u6807\uff0c\u76ee\u6807\u8fd4\u56de\u4e86\u9884\u671f\u7684\u54cd\u5e94
                          • \u5f02\u5e38\uff1aProbe \u65e0\u6cd5\u8fde\u63a5\u5230\u76ee\u6807\uff0c\u6216\u76ee\u6807\u6ca1\u6709\u8fd4\u56de\u9884\u671f\u7684\u54cd\u5e94
                          • Pending\uff1aProbe \u6b63\u5728\u5c1d\u8bd5\u8fde\u63a5\u76ee\u6807
                        • \u4f60\u53ef\u4ee5\u5728 \ud83d\udd0d \u641c\u7d22\u6846\u4e2d\u952e\u5165\u540d\u79f0\uff0c\u6a21\u7cca\u641c\u7d22\u67d0\u4e9b\u62e8\u6d4b\u4efb\u52a1

                      "},{"location":"end-user/insight/infra/probe.html#_4","title":"\u521b\u5efa\u62e8\u6d4b\u4efb\u52a1","text":"
                      1. \u70b9\u51fb \u521b\u5efa\u62e8\u6d4b\u4efb\u52a1\u3002
                      2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65

                        • \u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u62e8\u6d4b\u7684\u96c6\u7fa4
                        • \u547d\u540d\u7a7a\u95f4\uff1a\u62e8\u6d4b\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4

                      3. \u914d\u7f6e\u63a2\u6d4b\u53c2\u6570\u3002

                        • Blackbox \u5b9e\u4f8b\uff1a\u9009\u62e9\u8d1f\u8d23\u63a2\u6d4b\u7684 blackbox \u5b9e\u4f8b
                        • \u63a2\u6d4b\u65b9\u5f0f\uff1a
                          • HTTP\uff1a\u901a\u8fc7\u53d1\u9001 HTTP \u6216 HTTPS \u8bf7\u6c42\u5230\u76ee\u6807 URL\uff0c\u68c0\u6d4b\u5176\u8fde\u901a\u6027\u548c\u54cd\u5e94\u65f6\u95f4\uff0c\u8fd9\u53ef\u4ee5\u7528\u4e8e\u76d1\u6d4b\u7f51\u7ad9\u6216 Web \u5e94\u7528\u7684\u53ef\u7528\u6027\u548c\u6027\u80fd
                          • TCP\uff1a\u901a\u8fc7\u5efa\u7acb\u5230\u76ee\u6807\u4e3b\u673a\u548c\u7aef\u53e3\u7684 TCP \u8fde\u63a5\uff0c\u68c0\u6d4b\u5176\u8fde\u901a\u6027\u548c\u54cd\u5e94\u65f6\u95f4\u3002\u8fd9\u53ef\u4ee5\u7528\u4e8e\u76d1\u6d4b\u57fa\u4e8e TCP \u7684\u670d\u52a1\uff0c\u5982 Web \u670d\u52a1\u5668\u3001\u6570\u636e\u5e93\u670d\u52a1\u5668\u7b49
                          • \u5176\u4ed6\uff1a\u652f\u6301\u901a\u8fc7\u914d\u7f6e ConfigMap \u81ea\u5b9a\u4e49\u63a2\u6d4b\u65b9\u5f0f\uff0c\u53ef\u53c2\u8003\u81ea\u5b9a\u4e49\u62e8\u6d4b\u65b9\u5f0f
                        • \u63a2\u6d4b\u76ee\u6807\uff1a\u63a2\u6d4b\u7684\u76ee\u6807\u5730\u5740\uff0c\u652f\u6301\u57df\u540d\u6216 IP \u5730\u5740\u7b49
                        • \u6807\u7b7e\uff1a\u81ea\u5b9a\u4e49\u6807\u7b7e\uff0c\u8be5\u6807\u7b7e\u4f1a\u81ea\u52a8\u6dfb\u52a0\u5230 Prometheus \u7684 Label \u4e2d
                        • \u63a2\u6d4b\u95f4\u9694\uff1a\u63a2\u6d4b\u95f4\u9694\u65f6\u95f4
                        • \u63a2\u6d4b\u8d85\u65f6\uff1a\u63a2\u6d4b\u76ee\u6807\u65f6\u7684\u6700\u957f\u7b49\u5f85\u65f6\u95f4

                      4. \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                      Warning

                      \u62e8\u6d4b\u4efb\u52a1\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u9700\u8981\u5927\u6982 3 \u5206\u949f\u7684\u65f6\u95f4\u6765\u540c\u6b65\u914d\u7f6e\u3002\u5728\u6b64\u671f\u95f4\uff0c\u4e0d\u4f1a\u8fdb\u884c\u63a2\u6d4b\uff0c\u65e0\u6cd5\u67e5\u770b\u63a2\u6d4b\u7ed3\u679c\u3002

                      "},{"location":"end-user/insight/infra/probe.html#_5","title":"\u7f16\u8f91\u62e8\u6d4b\u4efb\u52a1","text":"

                      \u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 -> \u7f16\u8f91\uff0c\u5b8c\u6210\u7f16\u8f91\u540e\u70b9\u51fb \u786e\u5b9a\u3002

                      "},{"location":"end-user/insight/infra/probe.html#_6","title":"\u67e5\u770b\u76d1\u63a7\u9762\u677f","text":"

                      \u70b9\u51fb\u62e8\u6d4b\u540d\u79f0 \u67e5\u770b\u62e8\u6d4b\u4efb\u52a1\u4e2d\u6bcf\u4e2a\u76ee\u6807\u7684\u76d1\u63a7\u72b6\u6001\uff0c\u4ee5\u56fe\u8868\u65b9\u5f0f\u663e\u793a\u9488\u5bf9\u7f51\u7edc\u72b6\u51b5\u7684\u63a2\u6d4b\u7ed3\u679c\u3002

                      \u6307\u6807\u540d\u79f0 \u63cf\u8ff0 Current Status Response \u8868\u793a HTTP \u63a2\u6d4b\u8bf7\u6c42\u7684\u54cd\u5e94\u72b6\u6001\u7801\u3002 Ping Status \u8868\u793a\u63a2\u6d4b\u8bf7\u6c42\u662f\u5426\u6210\u529f\u30021 \u8868\u793a\u63a2\u6d4b\u8bf7\u6c42\u6210\u529f\uff0c0 \u8868\u793a\u63a2\u6d4b\u8bf7\u6c42\u5931\u8d25\u3002 IP Protocol \u8868\u793a\u63a2\u6d4b\u8bf7\u6c42\u4f7f\u7528\u7684 IP \u534f\u8bae\u7248\u672c\u3002 SSL Expiry \u8868\u793a SSL/TLS \u8bc1\u4e66\u7684\u6700\u65e9\u5230\u671f\u65f6\u95f4\u3002 DNS Response (Latency) \u8868\u793a\u6574\u4e2a\u63a2\u6d4b\u8fc7\u7a0b\u7684\u6301\u7eed\u65f6\u95f4\uff0c\u5355\u4f4d\u662f\u79d2\u3002 HTTP Duration \u8868\u793a\u4ece\u53d1\u9001\u8bf7\u6c42\u5230\u63a5\u6536\u5230\u5b8c\u6574\u54cd\u5e94\u7684\u6574\u4e2a\u8fc7\u7a0b\u7684\u65f6\u95f4\u3002"},{"location":"end-user/insight/infra/probe.html#_7","title":"\u5220\u9664\u62e8\u6d4b\u4efb\u52a1","text":"

                      \u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 -> \u5220\u9664\uff0c\u786e\u8ba4\u65e0\u8bef\u540e\u70b9\u51fb \u786e\u5b9a\u3002

                      Caution

                      \u5220\u9664\u64cd\u4f5c\u4e0d\u53ef\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                      "},{"location":"end-user/insight/quickstart/install/index.html","title":"\u5f00\u59cb\u89c2\u6d4b","text":"

                      AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\u5b9e\u73b0\u4e86\u5bf9\u591a\u4e91\u591a\u96c6\u7fa4\u7684\u7eb3\u7ba1\uff0c\u5e76\u652f\u6301\u521b\u5efa\u96c6\u7fa4\u3002\u5728\u6b64\u57fa\u7840\u4e0a\uff0c\u53ef\u89c2\u6d4b\u6027 Insight \u4f5c\u4e3a\u591a\u96c6\u7fa4\u7edf\u4e00\u89c2\u6d4b\u65b9\u6848\uff0c\u901a\u8fc7\u90e8\u7f72 insight-agent \u63d2\u4ef6\u5b9e\u73b0\u5bf9\u591a\u96c6\u7fa4\u89c2\u6d4b\u6570\u636e\u7684\u91c7\u96c6\uff0c\u5e76\u652f\u6301\u901a\u8fc7 AI \u7b97\u529b\u4e2d\u5fc3 \u53ef\u89c2\u6d4b\u6027\u4ea7\u54c1\u5b9e\u73b0\u5bf9\u6307\u6807\u3001\u65e5\u5fd7\u3001\u94fe\u8def\u6570\u636e\u7684\u67e5\u8be2\u3002

                      insight-agent \u662f\u53ef\u89c2\u6d4b\u6027\u5b9e\u73b0\u5bf9\u591a\u96c6\u7fa4\u6570\u636e\u91c7\u96c6\u7684\u5de5\u5177\uff0c\u5b89\u88c5\u540e\u65e0\u9700\u4efb\u4f55\u4fee\u6539\uff0c\u5373\u53ef\u5b9e\u73b0\u5bf9\u6307\u6807\u3001\u65e5\u5fd7\u4ee5\u53ca\u94fe\u8def\u6570\u636e\u7684\u81ea\u52a8\u5316\u91c7\u96c6\u3002

                      \u901a\u8fc7 \u5bb9\u5668\u7ba1\u7406 \u521b\u5efa\u7684\u96c6\u7fa4\u9ed8\u8ba4\u4f1a\u5b89\u88c5 insight-agent\uff0c\u6545\u5728\u6b64\u4ec5\u9488\u5bf9\u63a5\u5165\u7684\u96c6\u7fa4\u5982\u4f55\u5f00\u542f\u89c2\u6d4b\u80fd\u529b\u63d0\u4f9b\u6307\u5bfc\u3002

                      • \u5728\u7ebf\u5b89\u88c5 insight-agent

                      \u53ef\u89c2\u6d4b\u6027 Insight \u4f5c\u4e3a\u591a\u96c6\u7fa4\u7684\u7edf\u4e00\u89c2\u6d4b\u5e73\u53f0\uff0c\u5176\u90e8\u5206\u7ec4\u4ef6\u7684\u8d44\u6e90\u6d88\u8017\u4e0e\u521b\u5efa\u96c6\u7fa4\u7684\u6570\u636e\u3001\u63a5\u5165\u96c6\u7fa4\u7684\u6570\u91cf\u606f\u606f\u76f8\u5173\uff0c\u5728\u5b89\u88c5 insight-agent \u65f6\uff0c\u9700\u8981\u6839\u636e\u96c6\u7fa4\u89c4\u6a21\u5bf9\u76f8\u5e94\u7ec4\u4ef6\u7684\u8d44\u6e90\u8fdb\u884c\u8c03\u6574\u3002

                      1. \u6839\u636e\u521b\u5efa\u96c6\u7fa4\u7684\u89c4\u6a21\u6216\u63a5\u5165\u96c6\u7fa4\u7684\u89c4\u6a21\uff0c\u8c03\u6574 insight-agent \u4e2d\u91c7\u96c6\u7ec4\u4ef6 Prometheus \u7684 CPU \u548c\u5185\u5b58\uff0c\u8bf7\u53c2\u8003: Prometheus \u8d44\u6e90\u89c4\u5212

                      2. \u7531\u4e8e\u591a\u96c6\u7fa4\u7684\u6307\u6807\u6570\u636e\u4f1a\u7edf\u4e00\u5b58\u50a8\uff0c\u5219\u9700\u8981 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\u7ba1\u7406\u5458\u6839\u636e\u521b\u5efa\u96c6\u7fa4\u7684\u89c4\u6a21\u3001\u63a5\u5165\u96c6\u7fa4\u7684\u89c4\u6a21\u5bf9\u5e94\u8c03\u6574 vmstorage \u7684\u78c1\u76d8\uff0c\u8bf7\u53c2\u8003\uff1avmstorage \u78c1\u76d8\u5bb9\u91cf\u89c4\u5212\u3002

                      3. \u5982\u4f55\u8c03\u6574 vmstorage \u7684\u78c1\u76d8\uff0c\u8bf7\u53c2\u8003\uff1avmstorge \u78c1\u76d8\u6269\u5bb9\u3002

                      \u7531\u4e8e AI \u7b97\u529b\u4e2d\u5fc3 \u652f\u6301\u5bf9\u591a\u4e91\u591a\u96c6\u7fa4\u7684\u7eb3\u7ba1\uff0cinsight-agent \u76ee\u524d\u4e5f\u5b8c\u6210\u4e86\u90e8\u5206\u9a8c\u8bc1\uff0c\u7531\u4e8e\u76d1\u63a7\u7ec4\u4ef6\u51b2\u7a81\u95ee\u9898\u5bfc\u81f4\u5728 AI \u7b97\u529b\u4e2d\u5fc34.0 \u96c6\u7fa4\u548c Openshift 4.x \u96c6\u7fa4\u4e2d\u5b89\u88c5 insight-agent \u4f1a\u51fa\u73b0\u95ee\u9898\uff0c\u82e5\u60a8\u9047\u5230\u540c\u6837\u95ee\u9898\uff0c\u8bf7\u53c2\u8003\u4ee5\u4e0b\u6587\u6863\uff1a

                      • \u5728 Openshift 4.x \u5b89\u88c5 insight-agent
                      "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html","title":"\u5f00\u542f\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f","text":"

                      \u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u4e3a\u4e86\u63d0\u9ad8\u5927\u89c4\u6a21\u73af\u5883\u4e0b\u7684\u6570\u636e\u5199\u5165\u80fd\u529b\uff0c\u652f\u6301\u5c06\u65e5\u5fd7\u5207\u6362\u4e3a \u5927\u65e5\u5fd7 \u6a21\u5f0f\u3001\u5c06\u94fe\u8def\u5207\u6362\u4e3a \u5927\u94fe\u8def \u6a21\u5f0f\u3002\u672c\u6587\u5c06\u4ecb\u7ecd\u4ee5\u4e0b\u51e0\u79cd\u5f00\u542f\u65b9\u5f0f\uff1a

                      • \u901a\u8fc7\u5b89\u88c5\u5668\u5f00\u542f\u6216\u5347\u7ea7\u81f3\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f\uff08\u901a\u8fc7 manifest.yaml \u4e2d\u540c\u4e00\u4e2a\u53c2\u6570\u503c\u63a7\u5236\uff09
                      • \u901a\u8fc7 Helm \u547d\u4ee4\u624b\u52a8\u5f00\u542f\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f
                      "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_2","title":"\u65e5\u5fd7","text":"

                      \u672c\u8282\u8bf4\u660e\u666e\u901a\u65e5\u5fd7\u6a21\u5f0f\u548c\u5927\u65e5\u5fd7\u6a21\u5f0f\u7684\u533a\u522b\u3002

                      "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_3","title":"\u65e5\u5fd7\u6a21\u5f0f","text":"

                      \u7ec4\u4ef6\uff1aFluentbit + Elasticsearch

                      \u8be5\u6a21\u5f0f\u7b80\u79f0\u4e3a ES \u6a21\u5f0f\uff0c\u6570\u636e\u6d41\u56fe\u5982\u4e0b\u6240\u793a\uff1a

                      "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_4","title":"\u5927\u65e5\u5fd7\u6a21\u5f0f","text":"

                      \u7ec4\u4ef6\uff1aFluentbit + Kafka + Vector + Elasticsearch

                      \u8be5\u6a21\u5f0f\u7b80\u79f0\u4e3a Kafka \u6a21\u5f0f\uff0c\u6570\u636e\u6d41\u56fe\u5982\u4e0b\u6240\u793a\uff1a

                      "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_5","title":"\u94fe\u8def","text":"

                      \u672c\u8282\u8bf4\u660e\u666e\u901a\u94fe\u8def\u6a21\u5f0f\u548c\u5927\u94fe\u8def\u6a21\u5f0f\u7684\u533a\u522b\u3002

                      "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_6","title":"\u94fe\u8def\u6a21\u5f0f","text":"

                      \u7ec4\u4ef6\uff1aAgent opentelemetry-collector + Global opentelemetry-collector + Jaeger-collector + Elasticsearch

                      \u8be5\u6a21\u5f0f\u7b80\u79f0\u4e3a OTlp \u6a21\u5f0f\uff0c\u6570\u636e\u6d41\u56fe\u5982\u4e0b\u6240\u793a\uff1a

                      "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_7","title":"\u5927\u94fe\u8def\u6a21\u5f0f","text":"

                      \u7ec4\u4ef6\uff1aAgent opentelemetry-collector + Kafka + Global opentelemetry-collector + Jaeger-collector + Elasticsearch

                      \u8be5\u6a21\u5f0f\u7b80\u79f0\u4e3a Kafka \u6a21\u5f0f\uff0c\u6570\u636e\u6d41\u56fe\u5982\u4e0b\u6240\u793a\uff1a

                      "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_8","title":"\u901a\u8fc7\u5b89\u88c5\u5668\u5f00\u542f","text":"

                      \u901a\u8fc7\u5b89\u88c5\u5668\u90e8\u7f72/\u5347\u7ea7 AI \u7b97\u529b\u4e2d\u5fc3 \u65f6\u4f7f\u7528\u7684 manifest.yaml \u4e2d\u5b58\u5728 infrastructures.kafka \u5b57\u6bb5\uff0c \u5982\u679c\u60f3\u5f00\u542f\u53ef\u89c2\u6d4b\u7684\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u542f\u7528 kafka\uff1a

                      manifest.yaml
                      apiVersion: manifest.daocloud.io/v1alpha1\nkind: DCEManifest\n...\ninfrastructures:\n  ...\n  kafka:\n    enable: true # \u9ed8\u8ba4\u4e3a false\n    cpuLimit: 1\n    memLimit: 2Gi\n    pvcSize: 15Gi\n
                      "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_9","title":"\u5f00\u542f","text":"

                      \u5b89\u88c5\u65f6\u4f7f\u7528\u542f\u7528 kafka \u7684 manifest.yaml\uff0c\u5219\u4f1a\u9ed8\u8ba4\u5b89\u88c5 kafka \u4e2d\u95f4\u4ef6\uff0c \u5e76\u5728\u5b89\u88c5 Insight \u65f6\u9ed8\u8ba4\u5f00\u542f\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f\u3002\u5b89\u88c5\u547d\u4ee4\u4e3a\uff1a

                      ./dce5-installer cluster-create -c clusterConfig.yaml -m manifest.yaml\n
                      "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_10","title":"\u5347\u7ea7","text":"

                      \u5347\u7ea7\u540c\u6837\u662f\u4fee\u6539 kafka \u5b57\u6bb5\u3002\u4f46\u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u56e0\u4e3a\u8001\u73af\u5883\u5b89\u88c5\u65f6\u4f7f\u7528\u7684\u662f kafka: false\uff0c \u6240\u4ee5\u73af\u5883\u4e2d\u65e0 kafka\u3002\u6b64\u65f6\u5347\u7ea7\u9700\u8981\u6307\u5b9a\u5347\u7ea7 middleware\uff0c\u624d\u4f1a\u540c\u65f6\u5b89\u88c5 kafka \u4e2d\u95f4\u4ef6\u3002\u5347\u7ea7\u547d\u4ee4\u4e3a\uff1a

                      ./dce5-installer cluster-create -c clusterConfig.yaml -m manifest.yaml -u gproduct,middleware\n

                      Note

                      \u5728\u5347\u7ea7\u5b8c\u6210\u540e\uff0c\u9700\u8981\u624b\u52a8\u91cd\u542f\u4ee5\u4e0b\u7ec4\u4ef6\uff1a

                      • insight-agent-fluent-bit
                      • insight-agent-opentelemetry-collector
                      • insight-opentelemetry-collector
                      "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#helm","title":"\u901a\u8fc7 Helm \u547d\u4ee4\u5f00\u542f","text":"

                      \u524d\u63d0\u6761\u4ef6\uff1a\u9700\u8981\u4fdd\u8bc1\u5b58\u5728 \u53ef\u7528\u7684 kafka \u4e14\u5730\u5740\u53ef\u6b63\u5e38\u8bbf\u95ee\u3002

                      \u6839\u636e\u4ee5\u4e0b\u547d\u4ee4\u83b7\u53d6\u8001\u7248\u672c insight \u548c insight-agent \u7684 values\uff08\u5efa\u8bae\u505a\u597d\u5907\u4efd\uff09\uff1a

                      helm get values insight -n insight-system -o yaml > insight.yaml\nhelm get values insight-agent -n insight-system -o yaml > insight-agent.yaml\n
                      "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_11","title":"\u5f00\u542f\u5927\u65e5\u5fd7","text":"

                      \u6709\u4ee5\u4e0b\u51e0\u79cd\u65b9\u5f0f\u5f00\u542f\u6216\u5347\u7ea7\u81f3\u5927\u65e5\u5fd7\u6a21\u5f0f\uff1a

                      \u5728 helm upgrade \u547d\u4ee4\u4e2d\u4f7f\u7528 --set\u4fee\u6539 YAML \u540e\u8fd0\u884c helm upgrade\u5bb9\u5668\u7ba1\u7406 UI \u5347\u7ea7

                      \u5148\u8fd0\u884c\u4ee5\u4e0b insight \u5347\u7ea7\u547d\u4ee4\uff0c\u6ce8\u610f kafka brokers \u5730\u5740\u9700\u6b63\u786e\uff1a

                      helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --set global.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.kafka.enabled=true \\\n  --set vector.enabled=true \\\n  --version 0.30.1\n

                      \u7136\u540e\u8fd0\u884c\u4ee5\u4e0b insight-agent \u5347\u7ea7\u547d\u4ee4\uff0c\u6ce8\u610f kafka brokers \u5730\u5740\u9700\u6b63\u786e\uff1a

                      helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --set global.exporters.logging.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.exporters.logging.output=kafka \\\n  --version 0.30.1\n

                      \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539 YAMl \u540e\u8fd0\u884c helm upgrade \u547d\u4ee4\uff1a

                      1. \u4fee\u6539 insight.yaml

                        insight.yaml
                        global:\n  ...\n  kafka:\n    brokers: 10.6.216.111:30592\n    enabled: true\n...\nvector:\n  enabled: true\n
                      2. \u5347\u7ea7 insight \u7ec4\u4ef6\uff1a

                        helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --version 0.30.1\n
                      3. \u4fee\u6539 insight-agent.yaml

                        insight-agent.yaml
                        global:\n  ...\n  exporters:\n    ...\n    logging:\n      ...\n      kafka:\n        brokers: 10.6.216.111:30592\n      output: kafka\n
                      4. \u5347\u7ea7 insight-agent\uff1a

                        helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --version 0.30.1\n

                      \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\uff0c\u627e\u5230\u5bf9\u5e94\u7684\u96c6\u7fa4\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 Helm \u5e94\u7528 \uff0c\u627e\u5230\u5e76\u66f4\u65b0 insight-agent\u3002

                      \u5728 Logging Settings \u4e2d\uff0c\u4e3a output \u9009\u62e9 kafka\uff0c\u5e76\u586b\u5199\u6b63\u786e\u7684 brokers \u5730\u5740\u3002

                      \u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u5728\u5347\u7ea7\u5b8c\u6210\u540e\uff0c\u9700\u624b\u52a8\u91cd\u542f insight-agent-fluent-bit \u7ec4\u4ef6\u3002

                      "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_12","title":"\u5f00\u542f\u5927\u94fe\u8def","text":"

                      \u6709\u4ee5\u4e0b\u51e0\u79cd\u65b9\u5f0f\u5f00\u542f\u6216\u5347\u7ea7\u81f3\u5927\u94fe\u8def\u6a21\u5f0f\uff1a

                      \u5728 helm upgrade \u547d\u4ee4\u4e2d\u4f7f\u7528 --set\u4fee\u6539 YAML \u540e\u8fd0\u884c helm upgrade\u5bb9\u5668\u7ba1\u7406 UI \u5347\u7ea7

                      \u5148\u8fd0\u884c\u4ee5\u4e0b insight \u5347\u7ea7\u547d\u4ee4\uff0c\u6ce8\u610f kafka brokers \u5730\u5740\u9700\u6b63\u786e\uff1a

                      helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --set global.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.kafka.enabled=true \\\n  --set global.tracing.kafkaReceiver.enabled=true \\\n  --version 0.30.1\n

                      \u7136\u540e\u8fd0\u884c\u4ee5\u4e0b insight-agent \u5347\u7ea7\u547d\u4ee4\uff0c\u6ce8\u610f kafka brokers \u5730\u5740\u9700\u6b63\u786e\uff1a

                      helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --set global.exporters.trace.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.exporters.trace.output=kafka \\\n  --version 0.30.1\n

                      \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539 YAMl \u540e\u8fd0\u884c helm upgrade \u547d\u4ee4\uff1a

                      1. \u4fee\u6539 insight.yaml

                        insight.yaml
                        global:\n  ...\n  kafka:\n    brokers: 10.6.216.111:30592\n    enabled: true\n...\ntracing:\n  ...\n  kafkaReceiver:\n    enabled: true\n
                      2. \u5347\u7ea7 insight \u7ec4\u4ef6\uff1a

                        helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --version 0.30.1\n
                      3. \u4fee\u6539 insight-agent.yaml

                        insight-agent.yaml
                        global:\n  ...\n  exporters:\n    ...\n    trace:\n      ...\n      kafka:\n        brokers: 10.6.216.111:30592\n      output: kafka\n
                      4. \u5347\u7ea7 insight-agent\uff1a

                        helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --version 0.30.1\n

                      \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\uff0c\u627e\u5230\u5bf9\u5e94\u7684\u96c6\u7fa4\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 Helm \u5e94\u7528 \uff0c\u627e\u5230\u5e76\u66f4\u65b0 insight-agent\u3002

                      \u5728 Trace Settings \u4e2d\uff0c\u4e3a output \u9009\u62e9 kafka\uff0c\u5e76\u586b\u5199\u6b63\u786e\u7684 brokers \u5730\u5740\u3002

                      \u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u5728\u5347\u7ea7\u5b8c\u6210\u540e\uff0c\u9700\u624b\u52a8 \u91cd\u542f insight-agent-opentelemetry-collector \u548c insight-opentelemetry-collector \u7ec4\u4ef6\u3002

                      "},{"location":"end-user/insight/quickstart/install/component-scheduling.html","title":"\u81ea\u5b9a\u4e49 Insight \u7ec4\u4ef6\u8c03\u5ea6\u7b56\u7565","text":"

                      \u5f53\u90e8\u7f72\u53ef\u89c2\u6d4b\u5e73\u53f0 Insight \u5230 Kubernetes \u73af\u5883\u65f6\uff0c\u6b63\u786e\u7684\u8d44\u6e90\u7ba1\u7406\u548c\u4f18\u5316\u81f3\u5173\u91cd\u8981\u3002 Insight \u5305\u542b\u591a\u4e2a\u6838\u5fc3\u7ec4\u4ef6\uff0c\u5982 Prometheus\u3001OpenTelemetry\u3001FluentBit\u3001Vector\u3001Elasticsearch \u7b49\uff0c \u8fd9\u4e9b\u7ec4\u4ef6\u5728\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u53ef\u80fd\u56e0\u4e3a\u8d44\u6e90\u5360\u7528\u95ee\u9898\u5bf9\u96c6\u7fa4\u5185\u5176\u4ed6 Pod \u7684\u6027\u80fd\u4ea7\u751f\u8d1f\u9762\u5f71\u54cd\u3002 \u4e3a\u4e86\u6709\u6548\u5730\u7ba1\u7406\u8d44\u6e90\u5e76\u4f18\u5316\u96c6\u7fa4\u7684\u8fd0\u884c\uff0c\u8282\u70b9\u4eb2\u548c\u6027\u6210\u4e3a\u4e00\u9879\u91cd\u8981\u7684\u914d\u7f6e\u9009\u9879\u3002

                      \u672c\u6587\u5c06\u91cd\u70b9\u63a2\u8ba8\u5982\u4f55\u901a\u8fc7\u6c61\u70b9\u548c\u8282\u70b9\u4eb2\u548c\u6027\u7684\u914d\u7f6e\u7b56\u7565\uff0c\u4f7f\u5f97\u6bcf\u4e2a\u7ec4\u4ef6\u80fd\u591f\u5728\u9002\u5f53\u7684\u8282\u70b9\u4e0a\u8fd0\u884c\uff0c \u5e76\u907f\u514d\u8d44\u6e90\u7ade\u4e89\u6216\u4e89\u7528\uff0c\u4ece\u800c\u786e\u4fdd\u6574\u4e2a Kubernetes \u96c6\u7fa4\u7684\u7a33\u5b9a\u6027\u548c\u9ad8\u6548\u6027\u3002

                      "},{"location":"end-user/insight/quickstart/install/component-scheduling.html#insight_1","title":"\u901a\u8fc7\u6c61\u70b9\u4e3a Insight \u914d\u7f6e\u4e13\u6709\u8282\u70b9","text":"

                      \u7531\u4e8e Insight Agent \u5305\u542b\u4e86 DaemonSet \u7ec4\u4ef6\uff0c\u6240\u4ee5\u672c\u8282\u6240\u8ff0\u7684\u914d\u7f6e\u65b9\u5f0f\u662f\u8ba9\u9664\u4e86 Insight DameonSet \u4e4b\u5916\u7684\u5176\u4f59\u7ec4\u4ef6\u5747\u8fd0\u884c\u5728\u4e13\u6709\u8282\u70b9\u4e0a\u3002

                      \u8be5\u65b9\u5f0f\u662f\u901a\u8fc7\u4e3a\u4e13\u6709\u8282\u70b9\u6dfb\u52a0\u6c61\u70b9\uff08taint\uff09\uff0c\u5e76\u914d\u5408\u6c61\u70b9\u5bb9\u5fcd\u5ea6\uff08tolerations\uff09\u6765\u5b9e\u73b0\u7684\u3002 \u66f4\u591a\u7ec6\u8282\u53ef\u4ee5\u53c2\u8003 Kubernetes \u5b98\u65b9\u6587\u6863\u3002

                      \u53ef\u4ee5\u53c2\u8003\u5982\u4e0b\u547d\u4ee4\u4e3a\u8282\u70b9\u6dfb\u52a0\u53ca\u79fb\u9664\u6c61\u70b9\uff1a

                      # \u6dfb\u52a0\u6c61\u70b9\nkubectl taint nodes worker1 node.daocloud.io=insight-only:NoSchedule\n\n# \u79fb\u9664\u6c61\u70b9\nkubectl taint nodes worker1 node.daocloud.io:NoSchedule-\n

                      \u6709\u4ee5\u4e0b\u4e24\u79cd\u9014\u5f84\u8ba9 Insight \u7ec4\u4ef6\u8c03\u5ea6\u81f3\u4e13\u6709\u8282\u70b9\uff1a

                      "},{"location":"end-user/insight/quickstart/install/component-scheduling.html#1","title":"1. \u4e3a\u6bcf\u4e2a\u7ec4\u4ef6\u6dfb\u52a0\u6c61\u70b9\u5bb9\u5fcd\u5ea6","text":"

                      \u9488\u5bf9 insight-server \u548c insight-agent \u4e24\u4e2a Chart \u5206\u522b\u8fdb\u884c\u914d\u7f6e\uff1a

                      insight-server Chart \u914d\u7f6einsight-agent Chart \u914d\u7f6e
                      server:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nui:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nrunbook:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\n# mysql:\nvictoria-metrics-k8s-stack:\n  victoria-metrics-operator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  vmcluster:\n    spec:\n      vmstorage:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n      vmselect:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n      vminsert:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n  vmalert:\n    spec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n  alertmanager:\n    spec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n\njaeger:\n  collector:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  query:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n\nopentelemetry-collector-aggregator:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nopentelemetry-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\ngrafana-operator:\n  operator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  grafana:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\nkibana:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nelastic-alert:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nvector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n
                      kube-prometheus-stack:\n  prometheus:\n    prometheusSpec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n  prometheus-node-exporter:\n    tolerations:\n      - effect: NoSchedule\n        operator: Exists\n  prometheusOperator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n\nkube-state-metrics:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-operator:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\ntailing-sidecar-operator:\n  operator:\n    tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-kubernetes-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nprometheus-blackbox-exporter:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\netcd-exporter:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\" \n
                      "},{"location":"end-user/insight/quickstart/install/component-scheduling.html#2","title":"2. \u901a\u8fc7\u547d\u540d\u7a7a\u95f4\u7ea7\u522b\u914d\u7f6e","text":"

                      \u8ba9 insight-system \u547d\u540d\u7a7a\u95f4\u7684 Pod \u90fd\u5bb9\u5fcd node.daocloud.io=insight-only \u6c61\u70b9\u3002

                      1. \u8c03\u6574 apiserver \u7684\u914d\u7f6e\u6587\u4ef6 /etc/kubernetes/manifests/kube-apiserver.yaml\uff0c\u653e\u5f00 PodTolerationRestriction,PodNodeSelector, \u53c2\u8003\u4e0b\u56fe\uff1a

                      2. \u7ed9 insight-system \u547d\u540d\u7a7a\u95f4\u589e\u52a0\u6ce8\u89e3\uff1a

                        apiVersion: v1\nkind: Namespace\nmetadata:\n  name: insight-system\n  annotations:\n    scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Equal\", \"effect\": \"NoSchedule\", \"key\": \"node.daocloud.io\", \"value\": \"insight-only\"}]'\n

                      \u91cd\u542f insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\u9762\u7684\u7ec4\u4ef6\u5373\u53ef\u6b63\u5e38\u5bb9\u5fcd insight-system \u4e0b\u7684 Pod \u8c03\u5ea6\u3002

                      "},{"location":"end-user/insight/quickstart/install/component-scheduling.html#label","title":"\u4e3a\u8282\u70b9\u6dfb\u52a0 Label \u548c\u8282\u70b9\u4eb2\u548c\u6027\u6765\u7ba1\u7406\u7ec4\u4ef6\u8c03\u5ea6","text":"

                      Info

                      \u8282\u70b9\u4eb2\u548c\u6027\u6982\u5ff5\u4e0a\u7c7b\u4f3c\u4e8e nodeSelector\uff0c\u5b83\u4f7f\u4f60\u53ef\u4ee5\u6839\u636e\u8282\u70b9\u4e0a\u7684 \u6807\u7b7e(label) \u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002 \u8282\u70b9\u4eb2\u548c\u6027\u6709\u4e24\u79cd\uff1a

                      1. requiredDuringSchedulingIgnoredDuringExecution\uff1a\u8c03\u5ea6\u5668\u53ea\u6709\u5728\u89c4\u5219\u88ab\u6ee1\u8db3\u7684\u65f6\u5019\u624d\u80fd\u6267\u884c\u8c03\u5ea6\u3002\u6b64\u529f\u80fd\u7c7b\u4f3c\u4e8e nodeSelector\uff0c \u4f46\u5176\u8bed\u6cd5\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002
                      2. preferredDuringSchedulingIgnoredDuringExecution\uff1a\u8c03\u5ea6\u5668\u4f1a\u5c1d\u8bd5\u5bfb\u627e\u6ee1\u8db3\u5bf9\u5e94\u89c4\u5219\u7684\u8282\u70b9\u3002\u5982\u679c\u627e\u4e0d\u5230\u5339\u914d\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u5668\u4ecd\u7136\u4f1a\u8c03\u5ea6\u8be5 Pod\u3002

                      \u66f4\u8fc7\u7ec6\u8282\u8bf7\u53c2\u8003 kubernetes \u5b98\u65b9\u6587\u6863\u3002

                      \u4e3a\u4e86\u5b9e\u73b0\u4e0d\u540c\u7528\u6237\u5bf9 Insight \u7ec4\u4ef6\u8c03\u5ea6\u7684\u7075\u6d3b\u9700\u6c42\uff0cInsight \u5206\u522b\u63d0\u4f9b\u4e86\u8f83\u4e3a\u7ec6\u7c92\u5ea6\u7684 Label \u6765\u5b9e\u73b0\u4e0d\u540c\u7ec4\u4ef6\u7684\u8c03\u5ea6\u7b56\u7565\uff0c\u4ee5\u4e0b\u662f\u6807\u7b7e\u4e0e\u7ec4\u4ef6\u7684\u5173\u7cfb\u8bf4\u660e\uff1a

                      \u6807\u7b7e Key \u6807\u7b7e Value \u8bf4\u660e node.daocloud.io/insight-any \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u4ee3\u8868 Insight \u6240\u6709\u7ec4\u4ef6\u4f18\u5148\u8003\u8651\u5e26\u4e86\u8be5\u6807\u7b7e\u7684\u8282\u70b9 node.daocloud.io/insight-prometheus \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u7279\u6307 Prometheus \u7ec4\u4ef6 node.daocloud.io/insight-vmstorage \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u7279\u6307 VictoriaMetrics vmstorage \u7ec4\u4ef6 node.daocloud.io/insight-vector \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u7279\u6307 Vector \u7ec4\u4ef6 node.daocloud.io/insight-otel-col \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u7279\u6307 OpenTelemetry \u7ec4\u4ef6

                      \u53ef\u4ee5\u53c2\u8003\u5982\u4e0b\u547d\u4ee4\u4e3a\u8282\u70b9\u6dfb\u52a0\u53ca\u79fb\u9664\u6807\u7b7e\uff1a

                      # \u4e3a node8 \u6dfb\u52a0\u6807\u7b7e\uff0c\u5148\u5c06 insight-prometheus \u8c03\u5ea6\u5230 node8 \nkubectl label nodes node8 node.daocloud.io/insight-prometheus=true\n\n# \u79fb\u9664 node8 \u7684 node.daocloud.io/insight-prometheus \u6807\u7b7e\nkubectl label nodes node8 node.daocloud.io/insight-prometheus-\n

                      \u4ee5\u4e0b\u662f insight-prometheus \u7ec4\u4ef6\u5728\u90e8\u7f72\u65f6\u9ed8\u8ba4\u7684\u4eb2\u548c\u6027\u504f\u597d\uff1a

                      affinity:\n  nodeAffinity:\n    preferredDuringSchedulingIgnoredDuringExecution:\n    - preference:\n        matchExpressions:\n        - key: node-role.kubernetes.io/control-plane\n          operator: DoesNotExist\n      weight: 1\n    - preference:\n        matchExpressions:\n        - key: node.daocloud.io/insight-prometheus # (1)!\n          operator: Exists\n      weight: 2\n    - preference:\n        matchExpressions:\n        - key: node.daocloud.io/insight-any\n          operator: Exists\n      weight: 3\n    podAntiAffinity:\n      preferredDuringSchedulingIgnoredDuringExecution:\n        - weight: 1\n          podAffinityTerm:\n            topologyKey: kubernetes.io/hostname\n            labelSelector:\n              matchExpressions:\n                - key: app.kubernetes.io/instance\n                  operator: In\n                  values:\n                    - insight-agent-kube-prometh-prometheus\n
                      1. \u5148\u5c06 insight-prometheus \u8c03\u5ea6\u5230\u5e26\u6709 node.daocloud.io/insight-prometheus \u6807\u7b7e\u7684\u8282\u70b9
                      "},{"location":"end-user/insight/quickstart/install/gethosturl.html","title":"\u83b7\u53d6\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u6570\u636e\u5b58\u50a8\u5730\u5740","text":"

                      \u53ef\u89c2\u6d4b\u6027\u662f\u591a\u96c6\u7fa4\u7edf\u4e00\u89c2\u6d4b\u7684\u4ea7\u54c1\uff0c\u4e3a\u5b9e\u73b0\u5bf9\u591a\u96c6\u7fa4\u89c2\u6d4b\u6570\u636e\u7684\u7edf\u4e00\u5b58\u50a8\u3001\u67e5\u8be2\uff0c \u5b50\u96c6\u7fa4\u9700\u8981\u5c06\u91c7\u96c6\u7684\u89c2\u6d4b\u6570\u636e\u4e0a\u62a5\u7ed9\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u8fdb\u884c\u7edf\u4e00\u5b58\u50a8\u3002 \u672c\u6587\u63d0\u4f9b\u4e86\u5728\u5b89\u88c5\u91c7\u96c6\u7ec4\u4ef6 insight-agent \u65f6\u5fc5\u586b\u7684\u5b58\u50a8\u7ec4\u4ef6\u7684\u5730\u5740\u3002

                      "},{"location":"end-user/insight/quickstart/install/gethosturl.html#insight-agent","title":"\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5b89\u88c5 insight-agent","text":"

                      \u5982\u679c\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5b89\u88c5 insight-agent\uff0c\u63a8\u8350\u901a\u8fc7\u57df\u540d\u6765\u8bbf\u95ee\u96c6\u7fa4\uff1a

                      export vminsert_host=\"vminsert-insight-victoria-metrics-k8s-stack.insight-system.svc.cluster.local\" # (1)!\nexport es_host=\"insight-es-master.insight-system.svc.cluster.local\" # (2)!\nexport otel_col_host=\"insight-opentelemetry-collector.insight-system.svc.cluster.local\" # (3)!\n
                      "},{"location":"end-user/insight/quickstart/install/gethosturl.html#insight-agent_1","title":"\u5728\u5176\u4ed6\u96c6\u7fa4\u5b89\u88c5 insight-agent","text":""},{"location":"end-user/insight/quickstart/install/gethosturl.html#insight-server","title":"\u901a\u8fc7 Insight Server \u63d0\u4f9b\u7684\u63a5\u53e3\u83b7\u53d6\u5730\u5740","text":"
                      1. \u7ba1\u7406\u96c6\u7fa4\u4f7f\u7528\u9ed8\u8ba4\u7684 LoadBalancer \u65b9\u5f0f\u66b4\u9732

                        \u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                        export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam'\n

                        Note

                        \u8bf7\u66ff\u6362\u547d\u4ee4\u4e2d\u7684 ${INSIGHT_SERVER_IP} \u53c2\u6570\u3002

                        \u83b7\u5f97\u5982\u4e0b\u8fd4\u56de\u503c\uff1a

                        {\n  \"values\": {\n    \"global\": {\n      \"exporters\": {\n        \"logging\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"metric\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"auditLog\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"trace\": {\n          \"host\": \"10.6.182.32\"\n        }\n      }\n    },\n    \"opentelemetry-operator\": {\n      \"enabled\": true\n    },\n    \"opentelemetry-collector\": {\n      \"enabled\": true\n    }\n  }\n}\n
                        • global.exporters.logging.host \u662f\u65e5\u5fd7\u670d\u52a1\u5730\u5740\uff0c\u4e0d\u9700\u8981\u518d\u8bbe\u7f6e\u5bf9\u5e94\u670d\u52a1\u7684\u7aef\u53e3\uff0c\u90fd\u4f1a\u4f7f\u7528\u76f8\u5e94\u9ed8\u8ba4\u503c
                        • global.exporters.metric.host \u662f\u6307\u6807\u670d\u52a1\u5730\u5740
                        • global.exporters.trace.host \u662f\u94fe\u8def\u670d\u52a1\u5730\u5740
                        • global.exporters.auditLog.host \u662f\u5ba1\u8ba1\u65e5\u5fd7\u670d\u52a1\u5730\u5740\uff08\u548c\u94fe\u8def\u4f7f\u7528\u7684\u540c\u4e00\u4e2a\u670d\u52a1\u4e0d\u540c\u7aef\u53e3\uff09
                      2. \u7ba1\u7406\u96c6\u7fa4\u7981\u7528 LoadBalancer

                        \u8c03\u7528\u63a5\u53e3\u65f6\u9700\u8981\u989d\u5916\u4f20\u9012\u96c6\u7fa4\u4e2d\u4efb\u610f\u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u8282\u70b9 IP\uff0c\u4f1a\u4f7f\u7528\u8be5 IP \u62fc\u63a5\u51fa\u5bf9\u5e94\u670d\u52a1\u7684\u5b8c\u6574\u8bbf\u95ee\u5730\u5740\u3002

                        export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam' --data '{\"extra\": {\"EXPORTER_EXTERNAL_IP\": \"10.5.14.51\"}}'\n

                        \u5c06\u83b7\u5f97\u5982\u4e0b\u7684\u8fd4\u56de\u503c\uff1a

                        {\n  \"values\": {\n    \"global\": {\n      \"exporters\": {\n        \"logging\": {\n          \"scheme\": \"https\",\n          \"host\": \"10.5.14.51\",\n          \"port\": 32007,\n          \"user\": \"elastic\",\n          \"password\": \"j8V1oVoM1184HvQ1F3C8Pom2\"\n        },\n        \"metric\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30683\n        },\n        \"auditLog\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30884\n        },\n        \"trace\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30274\n        }\n      }\n    },\n    \"opentelemetry-operator\": {\n      \"enabled\": true\n    },\n    \"opentelemetry-collector\": {\n      \"enabled\": true\n    }\n  }\n}\n
                        • global.exporters.logging.host \u662f\u65e5\u5fd7\u670d\u52a1\u5730\u5740
                        • global.exporters.logging.port \u662f\u65e5\u5fd7\u670d\u52a1\u66b4\u9732\u7684 NodePort
                        • global.exporters.metric.host \u662f\u6307\u6807\u670d\u52a1\u5730\u5740
                        • global.exporters.metric.port \u662f\u6307\u6807\u670d\u52a1\u66b4\u9732\u7684 NodePort
                        • global.exporters.trace.host \u662f\u94fe\u8def\u670d\u52a1\u5730\u5740
                        • global.exporters.trace.port \u662f\u94fe\u8def\u670d\u52a1\u66b4\u9732\u7684 NodePort
                        • global.exporters.auditLog.host \u662f\u5ba1\u8ba1\u65e5\u5fd7\u670d\u52a1\u5730\u5740\uff08\u548c\u94fe\u8def\u4f7f\u7528\u7684\u540c\u4e00\u4e2a\u670d\u52a1\u4e0d\u540c\u7aef\u53e3\uff09
                        • global.exporters.auditLog.host \u662f\u5ba1\u8ba1\u65e5\u5fd7\u670d\u52a1\u66b4\u9732\u7684 NodePort
                      "},{"location":"end-user/insight/quickstart/install/gethosturl.html#loadbalancer","title":"\u901a\u8fc7 LoadBalancer \u8fde\u63a5","text":"
                      1. \u82e5\u96c6\u7fa4\u4e2d\u5f00\u542f LoadBalancer \u4e14\u4e3a Insight \u8bbe\u7f6e\u4e86 VIP \u65f6\uff0c\u60a8\u4e5f\u53ef\u4ee5\u624b\u52a8\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u83b7\u53d6 vminsert \u4ee5\u53ca opentelemetry-collector \u7684\u5730\u5740\u4fe1\u606f\uff1a

                        $ kubectl get service -n insight-system | grep lb\nlb-insight-opentelemetry-collector               LoadBalancer   10.233.23.12    <pending>     4317:31286/TCP,8006:31351/TCP  24d\nlb-vminsert-insight-victoria-metrics-k8s-stack   LoadBalancer   10.233.63.67    <pending>     8480:31629/TCP                 24d\n
                        • lb-vminsert-insight-victoria-metrics-k8s-stack \u662f\u6307\u6807\u670d\u52a1\u7684\u5730\u5740
                        • lb-insight-opentelemetry-collector \u662f\u94fe\u8def\u670d\u52a1\u7684\u5730\u5740
                      2. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u83b7\u53d6 elasticsearch \u5730\u5740\u4fe1\u606f\uff1a

                        $ kubectl get service -n mcamel-system | grep es\nmcamel-common-es-cluster-masters-es-http               NodePort    10.233.16.120   <none>        9200:30465/TCP               47d\n

                        mcamel-common-es-cluster-masters-es-http \u662f\u65e5\u5fd7\u670d\u52a1\u7684\u5730\u5740

                      "},{"location":"end-user/insight/quickstart/install/gethosturl.html#nodeport","title":"\u901a\u8fc7 NodePort \u8fde\u63a5","text":"

                      \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7981\u7528 LB \u7279\u6027

                      \u5728\u8be5\u60c5\u51b5\u4e0b\uff0c\u9ed8\u8ba4\u4e0d\u4f1a\u521b\u5efa\u4e0a\u8ff0\u7684 LoadBalancer \u8d44\u6e90\uff0c\u5bf9\u5e94\u670d\u52a1\u540d\u4e3a\uff1a

                      • vminsert-insight-victoria-metrics-k8s-stack\uff08\u6307\u6807\u670d\u52a1\uff09
                      • common-es\uff08\u65e5\u5fd7\u670d\u52a1\uff09
                      • insight-opentelemetry-collector\uff08\u94fe\u8def\u670d\u52a1\uff09

                      \u4e0a\u9762\u4e24\u79cd\u60c5\u51b5\u83b7\u53d6\u5230\u5bf9\u5e94\u670d\u52a1\u7684\u5bf9\u5e94\u7aef\u53e3\u4fe1\u606f\u540e\uff0c\u8fdb\u884c\u5982\u4e0b\u8bbe\u7f6e\uff1a

                      --set global.exporters.logging.host=  # (1)!\n--set global.exporters.logging.port=  # (2)!\n--set global.exporters.metric.host=   # (3)!\n--set global.exporters.metric.port=   # (4)!\n--set global.exporters.trace.host=    # (5)!\n--set global.exporters.trace.port=    # (6)!\n--set global.exporters.auditLog.host= # (7)!\n
                      1. \u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u7ba1\u7406\u96c6\u7fa4 NodeIP
                      2. \u65e5\u5fd7\u670d\u52a1 9200 \u7aef\u53e3\u5bf9\u5e94\u7684 NodePort
                      3. \u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u7ba1\u7406\u96c6\u7fa4 NodeIP
                      4. \u6307\u6807\u670d\u52a1 8480 \u7aef\u53e3\u5bf9\u5e94\u7684 NodePort
                      5. \u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u7ba1\u7406\u96c6\u7fa4 NodeIP
                      6. \u94fe\u8def\u670d\u52a1 4317 \u7aef\u53e3\u5bf9\u5e94\u7684 NodePort
                      7. \u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u7ba1\u7406\u96c6\u7fa4 NodeIP
                      "},{"location":"end-user/insight/quickstart/install/helm-installagent.html","title":"\u901a\u8fc7 Helm \u90e8\u7f72 Insight Agent","text":"

                      \u672c\u6587\u63cf\u8ff0\u4e86\u5728\u547d\u4ee4\u884c\u4e2d\u901a\u8fc7 Helm \u547d\u4ee4\u5b89\u88c5 Insight Agent \u793e\u533a\u7248\u7684\u64cd\u4f5c\u6b65\u9aa4\u3002

                      "},{"location":"end-user/insight/quickstart/install/helm-installagent.html#insight-agent","title":"\u5b89\u88c5 Insight Agent","text":"
                      1. \u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u6dfb\u52a0\u955c\u50cf\u4ed3\u5e93\u7684\u5730\u5740

                        helm repo add insight https://release.daocloud.io/chartrepo/insight\nhelm repo upgrade\nhelm search repo  insight/insight-agent --versions\n
                      2. \u5b89\u88c5 Insight Agent \u9700\u8981\u786e\u4fdd\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u7684 Insight Server \u6b63\u5e38\u8fd0\u884c\uff0c\u6267\u884c\u4ee5\u4e0b\u5b89\u88c5\u547d\u4ee4\u5b89\u88c5 Insight Agent \u793e\u533a\u7248\uff0c\u8be5\u914d\u7f6e\u4e0d\u542f\u7528 Tracing \u529f\u80fd\uff1a

                        helm upgrade --install --create-namespace --cleanup-on-fail \\\n    --version ${version} \\      # \u8bf7\u6307\u5b9a\u90e8\u7f72\u7248\u672c\n    insight-agent  insight/insight-agent \\\n    --set global.exporters.logging.elasticsearch.host=10.10.10.x \\    # \u8bf7\u66ff\u6362\u201c10.10.10.x\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6216\u5916\u7f6e\u7684 Elasticsearch \u7684\u5730\u5740\n    --set global.exporters.logging.elasticsearch.port=32517 \\     # \u8bf7\u66ff\u6362\u201c32517\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6216\u5916\u7f6e\u7684 Elasticsearch \u66b4\u9732\u7684\u7aef\u53e3\n    --set global.exporters.logging.elasticsearch.user=elastic \\     # \u8bf7\u66ff\u6362\u201celastic\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6216\u5916\u7f6e\u7684 Elasticsearch \u7684\u7528\u6237\u540d\n    --set global.exporters.logging.elasticsearch.password=dangerous \\  # \u8bf7\u66ff\u6362\u201cdangerous\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6216\u5916\u7f6e\u7684 Elasticsearch \u7684\u5bc6\u7801\n    --set global.exporters.metric.host=${vminsert_address} \\    # \u8bf7\u66ff\u6362\u201c10.10.10.x\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d vminsert \u7684\u5730\u5740\n    --set global.exporters.metric.port=${vminsert_port} \\    # \u8bf7\u66ff\u6362\u201c32517\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d vminsert \u7684\u5730\u5740\n    --set global.exporters.auditLog.host=${opentelemetry-collector address} \\     # \u8bf7\u66ff\u6362\u201c32517\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d opentelemetry-collector \u7684\u7aef\u53e3\n    --set global.exporters.auditLog.port=${otel_col_auditlog_port}\\   # \u8bf7\u66ff\u6362\u201c32517\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d opentelemetry-collector \u5bb9\u5668\u7aef\u53e3\u4e3a 8006 \u7684 service \u5bf9\u5916\u8bbf\u95ee\u7684\u5730\u5740\n    -n insight-system\n

                        Info

                        \u53ef\u53c2\u8003 \u5982\u4f55\u83b7\u53d6\u8fde\u63a5\u5730\u5740 \u83b7\u53d6\u5730\u5740\u4fe1\u606f\u3002

                      3. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u786e\u8ba4\u5b89\u88c5\u72b6\u6001\uff1a

                        helm list -A\nkubectl get pods -n insight-system\n
                      "},{"location":"end-user/insight/quickstart/install/helm-installagent.html#_1","title":"\u5982\u4f55\u83b7\u53d6\u8fde\u63a5\u5730\u5740","text":""},{"location":"end-user/insight/quickstart/install/helm-installagent.html#insight-agent_1","title":"\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5b89\u88c5 Insight Agent","text":"

                      \u5982\u679c Agent \u662f\u5b89\u88c5\u5728\u7ba1\u7406\u96c6\u7fa4\uff0c\u63a8\u8350\u901a\u8fc7\u57df\u540d\u6765\u8bbf\u95ee\u96c6\u7fa4\uff1a

                      export vminsert_host=\"vminsert-insight-victoria-metrics-k8s-stack.insight-system.svc.cluster.local\" # \u6307\u6807\nexport es_host=\"insight-es-master.insight-system.svc.cluster.local\" # \u65e5\u5fd7\nexport otel_col_host=\"insight-opentelemetry-collector.insight-system.svc.cluster.local\" # \u94fe\u8def\n
                      "},{"location":"end-user/insight/quickstart/install/helm-installagent.html#insight-agent_2","title":"\u5728\u5de5\u4f5c\u96c6\u7fa4\u5b89\u88c5 Insight Agent","text":"\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4f7f\u7528\u9ed8\u8ba4\u7684 LoadBalancer\u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\u64cd\u4f5c\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4f7f\u7528 Nodeport

                      \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4f7f\u7528\u9ed8\u8ba4\u7684 LoadBalancer \u65b9\u5f0f\u66b4\u9732\u670d\u52a1\u65f6\uff0c\u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                      export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam'\n

                      \u5c06\u83b7\u5f97\u5982\u4e0b\u7684\u8fd4\u56de\u503c\uff1a

                      {\"global\":{\"exporters\":{\"logging\":{\"output\":\"elasticsearch\",\"elasticsearch\":{\"host\":\"10.6.182.32\"},\"kafka\":{},\"host\":\"10.6.182.32\"},\"metric\":{\"host\":\"10.6.182.32\"},\"auditLog\":    {\"host\":\"10.6.182.32\"}}},\"opentelemetry-operator\":{\"enabled\":true},\"opentelemetry-collector\":{\"enabled\":true}}\n

                      \u5176\u4e2d\uff1a

                      • global.exporters.logging.elasticsearch.host \u662f\u65e5\u5fd7\u670d\u52a1\u5730\u5740\u3010\u4e0d\u9700\u8981\u518d\u8bbe\u7f6e\u5bf9\u5e94\u670d\u52a1\u7684\u7aef\u53e3\uff0c\u90fd\u4f1a\u4f7f\u7528\u76f8\u5e94\u9ed8\u8ba4\u503c\u3011\uff1b
                      • global.exporters.metric.host \u662f\u6307\u6807\u670d\u52a1\u5730\u5740\uff1b
                      • global.exporters.trace.host \u662f\u94fe\u8def\u670d\u52a1\u5730\u5740\uff1b
                      • global.exporters.auditLog.host \u662f\u5ba1\u8ba1\u65e5\u5fd7\u670d\u52a1\u5730\u5740 (\u548c\u94fe\u8def\u4f7f\u7528\u7684\u540c\u4e00\u4e2a\u670d\u52a1\u4e0d\u540c\u7aef\u53e3)\uff1b

                      \u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                      kubectl get service -n insight-system | grep lb\nkubectl get service -n mcamel-system | grep es\n

                      \u5176\u4e2d\uff1a

                      • lb-vminsert-insight-victoria-metrics-k8s-stack \u662f\u6307\u6807\u670d\u52a1\u7684\u5730\u5740\uff1b
                      • lb-insight-opentelemetry-collector \u662f\u94fe\u8def\u670d\u52a1\u7684\u5730\u5740;
                      • mcamel-common-es-cluster-masters-es-http \u662f\u65e5\u5fd7\u670d\u52a1\u7684\u5730\u5740;

                      \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4f7f\u7528 Nodeport \u65b9\u5f0f\u66b4\u9732\u670d\u52a1\u65f6\uff0c\u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                      kubectl get service -n insight-system\nkubectl get service -n mcamel-system\n

                      \u5176\u4e2d\uff1a

                      • vminsert-insight-victoria-metrics-k8s-stack \u662f\u6307\u6807\u670d\u52a1\u7684\u5730\u5740\uff1b
                      • insight-opentelemetry-collector \u662f\u94fe\u8def\u670d\u52a1\u7684\u5730\u5740;
                      • mcamel-common-es-cluster-masters-es-http \u662f\u65e5\u5fd7\u670d\u52a1\u7684\u5730\u5740;
                      "},{"location":"end-user/insight/quickstart/install/helm-installagent.html#insight-agent_3","title":"\u5347\u7ea7 Insight Agent","text":"
                      1. \u767b\u5f55\u76ee\u6807\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u5907\u4efd --set \u53c2\u6570\u3002

                        helm get values insight-agent -n insight-system -o yaml > insight-agent.yaml\n
                      2. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u66f4\u65b0\u4ed3\u5e93\u3002

                        helm repo upgrade\n
                      3. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u8fdb\u884c\u5347\u7ea7\u3002

                        helm upgrade insight-agent insight/insight-agent \\\n-n insight-system \\\n-f ./insight-agent.yaml \\\n--version ${version}   # \u6307\u5b9a\u5347\u7ea7\u7248\u672c\n
                      4. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u786e\u8ba4\u5b89\u88c5\u72b6\u6001\uff1a

                        kubectl get pods -n insight-system\n
                      "},{"location":"end-user/insight/quickstart/install/helm-installagent.html#insight-agent_4","title":"\u5378\u8f7d Insight Agent","text":"
                      helm uninstall insight-agent -n insight-system --timeout 10m\n
                      "},{"location":"end-user/insight/quickstart/install/install-agent.html","title":"\u5728\u7ebf\u5b89\u88c5 insight-agent","text":"

                      insight-agent \u662f\u96c6\u7fa4\u89c2\u6d4b\u6570\u636e\u91c7\u96c6\u7684\u63d2\u4ef6\uff0c\u652f\u6301\u5bf9\u6307\u6807\u3001\u94fe\u8def\u3001\u65e5\u5fd7\u6570\u636e\u7684\u7edf\u4e00\u89c2\u6d4b\u3002\u672c\u6587\u63cf\u8ff0\u4e86\u5982\u4f55\u5728\u5728\u7ebf\u73af\u5883\u4e2d\u4e3a\u63a5\u5165\u96c6\u7fa4\u5b89\u88c5 insight-agent\u3002

                      "},{"location":"end-user/insight/quickstart/install/install-agent.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u96c6\u7fa4\u5df2\u6210\u529f\u63a5\u5165 \u5bb9\u5668\u7ba1\u7406 \u6a21\u5757\u3002\u5982\u4f55\u63a5\u5165\u96c6\u7fa4\uff0c\u8bf7\u53c2\u8003\uff1a\u63a5\u5165\u96c6\u7fa4
                      "},{"location":"end-user/insight/quickstart/install/install-agent.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \u6a21\u5757\uff0c\u5728 \u96c6\u7fa4\u5217\u8868 \u4e2d\u627e\u5230\u8981\u5b89\u88c5 insight-agent \u7684\u96c6\u7fa4\u540d\u79f0\u3002

                      2. \u9009\u62e9 \u7acb\u5373\u5b89\u88c5 \u8df3\u8f6c\uff0c\u6216\u70b9\u51fb\u96c6\u7fa4\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u5185\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u641c\u7d22\u6846\u67e5\u8be2 insight-agent \uff0c\u70b9\u51fb\u8be5\u5361\u7247\u8fdb\u5165\u8be6\u60c5\u3002

                      3. \u67e5\u770b insight-agent \u7684\u5b89\u88c5\u9875\u9762\uff0c\u70b9\u51fb \u5b89\u88c5 \u8fdb\u5165\u4e0b\u4e00\u6b65\u3002

                      4. \u9009\u62e9\u5b89\u88c5\u7684\u7248\u672c\u5e76\u5728\u4e0b\u65b9\u8868\u5355\u5206\u522b\u586b\u5199\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u5bf9\u5e94\u6570\u636e\u5b58\u50a8\u7ec4\u4ef6\u7684\u5730\u5740\uff0c\u786e\u8ba4\u586b\u5199\u7684\u4fe1\u606f\u65e0\u8bef\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                        • insight-agent \u9ed8\u8ba4\u90e8\u7f72\u5728\u96c6\u7fa4\u7684 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\u3002
                        • \u5efa\u8bae\u5b89\u88c5\u6700\u65b0\u7248\u672c\u7684 insight-agent\u3002
                        • \u7cfb\u7edf\u9ed8\u8ba4\u5df2\u586b\u5199\u6570\u636e\u4e0a\u62a5\u7684\u7ec4\u4ef6\u7684\u5730\u5740\uff0c\u4ecd\u8bf7\u60a8\u68c0\u67e5\u65e0\u8bef\u540e\u518d\u70b9\u51fb \u786e\u5b9a \u8fdb\u884c\u5b89\u88c5\u3002 \u5982\u9700\u4fee\u6539\u6570\u636e\u4e0a\u62a5\u5730\u5740\uff0c\u8bf7\u53c2\u8003\uff1a\u83b7\u53d6\u6570\u636e\u4e0a\u62a5\u5730\u5740\u3002

                      5. \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de\u00a0 Helm \u5e94\u7528 \u5217\u8868\uff0c\u5f53\u5e94\u7528 insight-agent \u7684\u72b6\u6001\u4ece\u00a0 \u672a\u5c31\u7eea \u53d8\u4e3a \u5df2\u90e8\u7f72 \uff0c\u4e14\u6240\u6709\u7684\u7ec4\u4ef6\u72b6\u6001\u4e3a \u8fd0\u884c\u4e2d \u65f6\uff0c\u5219\u5b89\u88c5\u6210\u529f\u3002\u7b49\u5f85\u4e00\u6bb5\u65f6\u95f4\u540e\uff0c\u53ef\u5728 \u53ef\u89c2\u6d4b\u6027 \u6a21\u5757\u67e5\u770b\u8be5\u96c6\u7fa4\u7684\u6570\u636e\u3002

                      Note

                      • \u70b9\u51fb\u6700\u53f3\u4fa7\u7684 \u2507 \uff0c\u60a8\u53ef\u4ee5\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u6267\u884c\u66f4\u591a\u64cd\u4f5c\uff0c\u5982 \u66f4\u65b0 \u3001 \u67e5\u770b YAML \u548c \u5220\u9664 \u3002
                      "},{"location":"end-user/insight/quickstart/install/knownissues.html","title":"\u5df2\u77e5\u95ee\u9898","text":"

                      \u672c\u9875\u5217\u51fa\u4e00\u4e9b Insight Agent \u5b89\u88c5\u548c\u5378\u8f7d\u6709\u5173\u7684\u95ee\u9898\u53ca\u5176\u89e3\u51b3\u529e\u6cd5\u3002

                      "},{"location":"end-user/insight/quickstart/install/knownissues.html#v0230","title":"v0.23.0","text":""},{"location":"end-user/insight/quickstart/install/knownissues.html#insight-agent","title":"Insight Agent","text":""},{"location":"end-user/insight/quickstart/install/knownissues.html#insight-agent_1","title":"Insight Agent \u5378\u8f7d\u5931\u8d25","text":"

                      \u5f53\u4f60\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u5378\u8f7d Insight Agent \u65f6\u3002

                      helm uninstall insight-agent -n insight-system\n

                      otel-oprator \u6240\u4f7f\u7528\u7684 tls secret \u672a\u88ab\u5378\u8f7d\u6389\u3002

                      otel-operator \u5b9a\u4e49\u7684\u201c\u91cd\u590d\u5229\u7528 tls secret\u201d\u7684\u903b\u8f91\u4e2d\uff0c\u4f1a\u53bb\u5224\u65ad otel-oprator \u7684 MutationConfiguration \u662f\u5426\u5b58\u5728\u5e76\u91cd\u590d\u5229\u7528 MutationConfiguration \u4e2d\u7ed1\u5b9a\u7684 CA cert\u3002\u4f46\u662f\u7531\u4e8e helm uninstall \u5df2\u5378\u8f7d MutationConfiguration\uff0c\u5bfc\u81f4\u51fa\u73b0\u7a7a\u503c\u3002

                      \u7efc\u4e0a\u8bf7\u624b\u52a8\u5220\u9664\u5bf9\u5e94\u7684 secret\uff0c\u4ee5\u4e0b\u4e24\u79cd\u65b9\u5f0f\u4efb\u9009\u4e00\u79cd\u5373\u53ef\uff1a

                      • \u901a\u8fc7\u547d\u4ee4\u884c\u5220\u9664\uff1a\u767b\u5f55\u76ee\u6807\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                        kubectl -n insight-system delete secret insight-agent-opentelemetry-operator-controller-manager-service-cert\n
                      • \u901a\u8fc7 UI \u5220\u9664\uff1a\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5bb9\u5668\u7ba1\u7406\uff0c\u9009\u62e9\u76ee\u6807\u96c6\u7fa4\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u8fdb\u5165\u5bc6\u94a5\uff0c\u8f93\u5165 insight-agent-opentelemetry-operator-controller-manager-service-cert\uff0c\u9009\u62e9\u5220\u9664\u3002

                      "},{"location":"end-user/insight/quickstart/install/knownissues.html#v0220","title":"v0.22.0","text":""},{"location":"end-user/insight/quickstart/install/knownissues.html#insight-agent_2","title":"Insight Agent","text":""},{"location":"end-user/insight/quickstart/install/knownissues.html#insight-agent_3","title":"\u5347\u7ea7 Insight Agent \u65f6\u66f4\u65b0\u65e5\u5fd7\u6536\u96c6\u7aef\uff0c\u672a\u751f\u6548","text":"

                      \u66f4\u65b0 insight-agent \u65e5\u5fd7\u914d\u7f6e\u4ece elasticsearch \u6539\u4e3a kafka \u6216\u8005\u4ece kafka \u6539\u4e3a elasticsearch\uff0c\u5b9e\u9645\u4e0a\u90fd\u672a\u751f\u6548\uff0c\u8fd8\u662f\u4f7f\u7528\u66f4\u65b0\u524d\u914d\u7f6e\u3002

                      \u89e3\u51b3\u65b9\u6848 \uff1a

                      \u624b\u52a8\u91cd\u542f\u96c6\u7fa4\u4e2d\u7684 fluentbit\u3002

                      "},{"location":"end-user/insight/quickstart/install/knownissues.html#v0210","title":"v0.21.0","text":""},{"location":"end-user/insight/quickstart/install/knownissues.html#insight-agent_4","title":"Insight Agent","text":""},{"location":"end-user/insight/quickstart/install/knownissues.html#podmonitor-jvm","title":"PodMonitor \u91c7\u96c6\u591a\u4efd JVM \u6307\u6807\u6570\u636e","text":"
                      1. \u8fd9\u4e2a\u7248\u672c\u7684 PodMonitor/insight-kubernetes-pod \u5b58\u5728\u7f3a\u9677\uff1a\u4f1a\u9519\u8bef\u5730\u521b\u5efa Job \u53bb\u91c7\u96c6\u6807\u8bb0\u4e86 insight.opentelemetry.io/metric-scrape=true \u7684 Pod \u7684\u6240\u6709 container\uff1b\u800c\u5b9e\u9645\u4e0a\u53ea\u9700\u91c7\u96c6 insight.opentelemetry.io/metric-port \u6240\u5bf9\u5e94 container \u7684\u7aef\u53e3\u3002

                      2. \u56e0\u4e3a PodMonitor \u58f0\u660e\u4e4b\u540e\uff0cPromethuesOperator \u4f1a\u9884\u8bbe\u7f6e\u4e00\u4e9b\u670d\u52a1\u53d1\u73b0\u914d\u7f6e\u3002 \u518d\u8003\u8651\u5230 CRD \u7684\u517c\u5bb9\u6027\u7684\u95ee\u9898\u3002\u56e0\u6b64\uff0c\u653e\u5f03\u901a\u8fc7 PodMonitor \u6765\u914d\u7f6e\u901a\u8fc7 annotation \u521b\u5efa\u91c7\u96c6\u4efb\u52a1\u7684\u673a\u5236\u3002

                      3. \u901a\u8fc7 Prometheus \u81ea\u5e26\u7684 additional scrape config \u673a\u5236\uff0c\u5c06\u670d\u52a1\u53d1\u73b0\u89c4\u5219\u914d\u7f6e\u5728 secret \u4e2d\uff0c\u5728\u5f15\u5165 Prometheus \u91cc\u3002

                      \u7efc\u4e0a\uff1a

                      1. \u5220\u9664\u8fd9\u4e2a PodMonitor \u7684\u5f53\u524d insight-kubernetes-pod
                      2. \u4f7f\u7528\u65b0\u7684\u89c4\u5219

                      \u65b0\u7684\u89c4\u5219\u91cc\u901a\u8fc7 action: keepequal \u6765\u6bd4\u8f83 source_labels \u548c target_label \u7684\u4e00\u81f4\u6027\uff0c \u6765\u5224\u65ad\u662f\u5426\u8981\u7ed9\u67d0\u4e2a container \u7684 port \u521b\u5efa\u91c7\u96c6\u4efb\u52a1\u3002\u9700\u8981\u6ce8\u610f\uff0c\u8fd9\u4e2a\u662f Prometheus 2.41.0\uff082022-12-20\uff09\u548c\u66f4\u9ad8\u7248\u672c\u624d\u5177\u5907\u7684\u529f\u80fd\u3002

                      +    - source_labels: [__meta_kubernetes_pod_annotation_insight_opentelemetry_io_metric_port]\n+      separator: ;\n+      target_label: __meta_kubernetes_pod_container_port_number\n+      action: keepequal\n
                      "},{"location":"end-user/insight/quickstart/install/upgrade-note.html","title":"\u5347\u7ea7\u6ce8\u610f\u4e8b\u9879","text":"

                      \u672c\u9875\u4ecb\u7ecd\u4e00\u4e9b\u5347\u7ea7 insight-server \u548c insight-agent \u7684\u6ce8\u610f\u4e8b\u9879\u3002

                      "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#insight-agent","title":"insight-agent","text":""},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v028x-v029x","title":"\u4ece v0.28.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.29.x","text":"

                      \u7531\u4e8e v0.29.0 \u5347\u7ea7\u4e86 Opentelemetry \u793e\u533a\u7684 operator chart \u7248\u672c\uff0cvalues \u4e2d\u7684 featureGates \u7684\u652f\u6301\u7684\u503c\u6709\u6240\u53d8\u5316\uff0c\u56e0\u6b64\uff0c\u5728 upgrade \u4e4b\u524d\uff0c\u9700\u8981\u5c06 featureGates \u7684\u503c\u8bbe\u7f6e\u4e3a\u7a7a, \u5373\uff1a

                      -  --set opentelemetry-operator.manager.featureGates=\"+operator.autoinstrumentation.go,+operator.autoinstrumentation.multi-instrumentation,+operator.autoinstrumentation.nginx\" \\\n+  --set opentelemetry-operator.manager.featureGates=\"\"\n
                      "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#insight-server","title":"insight-server","text":""},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v026x-v027x","title":"\u4ece v0.26.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.27.x \u6216\u66f4\u9ad8\u7248\u672c","text":"

                      \u5728 v0.27.x \u7248\u672c\u4e2d\u5c06 vector \u7ec4\u4ef6\u7684\u5f00\u5173\u5355\u72ec\u62bd\u51fa\u3002\u6545\u539f\u6709\u73af\u5883\u5f00\u542f\u4e86 vector\uff0c\u90a3\u5728\u5347\u7ea7 insight-server \u65f6\uff0c\u9700\u8981\u6307\u5b9a --set vector.enabled=true \u3002

                      "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v019x-020x","title":"\u4ece v0.19.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 0.20.x","text":"

                      \u5728\u5347\u7ea7 Insight \u4e4b\u524d\uff0c\u60a8\u9700\u8981\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u624b\u52a8\u5220\u9664 jaeger-collector \u548c jaeger-query \u90e8\u7f72\uff1a

                      kubectl -n insight-system delete deployment insight-jaeger-collector\nkubectl -n insight-system delete deployment insight-jaeger-query\n
                      "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v017x-v018x","title":"\u4ece v0.17.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.18.x","text":"

                      \u7531\u4e8e 0.18.x \u4e2d\u66f4\u65b0\u4e86 Jaeger \u76f8\u5173\u90e8\u7f72\u6587\u4ef6\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-server \u524d\u624b\u52a8\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

                      kubectl -n insight-system delete deployment insight-jaeger-collector\nkubectl -n insight-system delete deployment insight-jaeger-query\n

                      \u7531\u4e8e 0.18.x \u4e2d\u6307\u6807\u540d\u4ea7\u751f\u4e86\u53d8\u52a8\uff0c\u56e0\u6b64\uff0c\u9700\u8981\u5728\u5347\u7ea7 insight-server \u4e4b\u540e\uff0cinsight-agent \u4e5f\u5e94\u8be5\u505a\u5347\u7ea7\u3002

                      \u6b64\u5916\uff0c\u8c03\u6574\u4e86\u5f00\u542f\u94fe\u8def\u6a21\u5757\u7684\u53c2\u6570\uff0c\u4ee5\u53ca ElasticSearch \u8fde\u63a5\u8c03\u6574\u3002\u5177\u4f53\u53c2\u8003\u4ee5\u4e0b\u53c2\u6570\uff1a

                      +  --set global.tracing.enable=true \\\n-  --set jaeger.collector.enabled=true \\\n-  --set jaeger.query.enabled=true \\\n+  --set global.elasticsearch.scheme=${your-external-elasticsearch-scheme} \\\n+  --set global.elasticsearch.host=${your-external-elasticsearch-host} \\\n+  --set global.elasticsearch.port=${your-external-elasticsearch-port} \\\n+  --set global.elasticsearch.user=${your-external-elasticsearch-username} \\\n+  --set global.elasticsearch.password=${your-external-elasticsearch-password} \\\n-  --set jaeger.storage.elasticsearch.scheme=${your-external-elasticsearch-scheme} \\\n-  --set jaeger.storage.elasticsearch.host=${your-external-elasticsearch-host} \\\n-  --set jaeger.storage.elasticsearch.port=${your-external-elasticsearch-port} \\\n-  --set jaeger.storage.elasticsearch.user=${your-external-elasticsearch-username} \\\n-  --set jaeger.storage.elasticsearch.password=${your-external-elasticsearch-password} \\\n
                      "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v015x-v016x","title":"\u4ece v0.15.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.16.x","text":"

                      \u7531\u4e8e 0.16.x \u4e2d\u4f7f\u7528\u4e86 vmalertmanagers CRD \u7684\u65b0\u7279\u6027\u53c2\u6570 disableRouteContinueEnforce\uff0c \u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-server \u524d\u624b\u52a8\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u3002

                      kubectl apply --server-side -f https://raw.githubusercontent.com/VictoriaMetrics/operator/v0.33.0/config/crd/bases/operator.victoriametrics.com_vmalertmanagers.yaml --force-conflicts\n

                      Note

                      \u5982\u60a8\u662f\u79bb\u7ebf\u5b89\u88c5\uff0c\u53ef\u4ee5\u5728\u89e3\u538b Insight \u79bb\u7ebf\u5305\u540e\uff0c\u8bf7\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u66f4\u65b0 CRD\u3002

                      kubectl apply --server-side -f insight/dependency-crds --force-conflicts \n
                      "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#insight-agent_1","title":"insight-agent","text":""},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v023x-v024x","title":"\u4ece v0.23.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.24.x","text":"

                      \u7531\u4e8e 0.24.x \u7248\u672c\u4e2d OTEL operator chart \u4e2d\u65b0\u589e\u4e86 CRD\uff0c\u4f46\u7531\u4e8e Helm Upgrade \u65f6\u5e76\u4e0d\u4f1a\u66f4\u65b0 CRD\uff0c\u56e0\u6b64\uff0c\u9700\u8981\u624b\u52a8\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                      kubectl apply -f https://raw.githubusercontent.com/open-telemetry/opentelemetry-helm-charts/main/charts/opentelemetry-operator/crds/crd-opentelemetry.io_opampbridges.yaml\n

                      \u5982\u60a8\u662f\u79bb\u7ebf\u5b89\u88c5\uff0c\u53ef\u4ee5\u5728\u89e3\u538b insight-agent \u79bb\u7ebf\u5305\u540e\u53ef\u627e\u5230\u4e0a\u8ff0 CRD \u7684 yaml\uff0c\u89e3\u538b Insight-Agent Chart \u4e4b\u540e\u624b\u52a8\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                      kubectl apply -f charts/agent/crds/crd-opentelemetry.io_opampbridges.yaml\n
                      "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v019x-v020x","title":"\u4ece v0.19.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.20.x","text":"

                      \u7531\u4e8e 0.20.x \u4e2d\u589e\u52a0\u4e86 Kafka \u65e5\u5fd7\u5bfc\u51fa\u914d\u7f6e\uff0c\u65e5\u5fd7\u5bfc\u51fa\u914d\u7f6e\u505a\u4e86\u4e00\u4e9b\u8c03\u6574\u3002\u5347\u7ea7 insight-agent \u4e4b\u524d\u9700\u8981\u6ce8\u610f\u53c2\u6570\u53d8\u5316\uff0c \u5373\u539f\u6765 logging \u7684\u914d\u7f6e\u5df2\u7ecf\u79fb\u5230\u4e86\u914d\u7f6e\u4e2d logging.elasticsearch\uff1a

                      -  --set global.exporters.logging.host \\\n-  --set global.exporters.logging.port \\\n+  --set global.exporters.logging.elasticsearch.host \\\n+  --set global.exporters.logging.elasticsearch.port \\\n
                      "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v017x-v018x_1","title":"\u4ece v0.17.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.18.x","text":"

                      \u7531\u4e8e 0.18.x \u4e2d\u66f4\u65b0\u4e86 Jaeger \u76f8\u5173\u90e8\u7f72\u6587\u4ef6\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-agent \u524d\u9700\u8981\u6ce8\u610f\u53c2\u6570\u7684\u6539\u52a8\u3002

                      +  --set global.exporters.trace.enable=true \\\n-  --set opentelemetry-collector.enabled=true \\\n-  --set opentelemetry-operator.enabled=true \\\n
                      "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v016x-v017x","title":"\u4ece v0.16.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.17.x","text":"

                      \u5728 v0.17.x \u7248\u672c\u4e2d\u5c06 kube-prometheus-stack chart \u7248\u672c\u4ece 41.9.1 \u5347\u7ea7\u81f3 45.28.1, \u5176\u4e2d\u4f7f\u7528\u7684 CRD \u4e5f\u5b58\u5728\u4e00\u4e9b\u5b57\u6bb5\u7684\u5347\u7ea7\uff0c\u5982 servicemonitor \u7684 attachMetadata \u5b57\u6bb5\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-agent \u524d\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

                      kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --force-conflicts\n

                      \u5982\u60a8\u662f\u79bb\u7ebf\u5b89\u88c5\uff0c\u53ef\u4ee5\u5728\u89e3\u538b insight-agent \u79bb\u7ebf\u5305\u540e\uff0c\u5728 insight-agent/dependency-crds \u4e2d\u627e\u5230\u4e0a\u8ff0 CRD \u7684 yaml\u3002

                      "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v011x-v012x","title":"\u4ece v0.11.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.12.x","text":"

                      \u5728 v0.12.x \u5c06 kube-prometheus-stack chart \u4ece 39.6.0 \u5347\u7ea7\u5230 41.9.1\uff0c\u5176\u4e2d\u5305\u62ec prometheus-operator \u5347\u7ea7\u5230 v0.60.1, prometheus-node-exporter chart \u5347\u7ea7\u5230 4.3.0 \u7b49\u3002 prometheus-node-exporter \u5347\u7ea7\u540e\u4f7f\u7528\u4e86 Kubernetes \u63a8\u8350 label\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7\u524d\u5220\u9664 node-exporter \u7684 DaemonSet\u3002 prometheus-operator \u66f4\u65b0\u4e86 CRD\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-agent \u524d\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

                      kubectl delete daemonset insight-agent-prometheus-node-exporter -n insight-system\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml --force-conflicts\n

                      Note

                      \u5982\u60a8\u662f\u79bb\u7ebf\u5b89\u88c5\uff0c\u53ef\u4ee5\u5728\u89e3\u538b insight-agent \u79bb\u7ebf\u5305\u540e\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u66f4\u65b0 CRD\u3002

                      kubectl apply --server-side -f insight-agent/dependency-crds --force-conflicts\n
                      "},{"location":"end-user/insight/quickstart/otel/operator.html","title":"\u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a","text":"

                      \u76ee\u524d\u53ea\u6709 Java\u3001NodeJs\u3001Python\u3001.Net\u3001Golang \u652f\u6301 Operator \u7684\u65b9\u5f0f\u65e0\u4fb5\u5165\u63a5\u5165\u3002

                      "},{"location":"end-user/insight/quickstart/otel/operator.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u8bf7\u786e\u4fdd insight-agent \u5df2\u7ecf\u5c31\u7eea\u3002\u5982\u82e5\u6ca1\u6709\uff0c\u8bf7\u53c2\u8003\u5b89\u88c5 insight-agent \u91c7\u96c6\u6570\u636e\u5e76\u786e\u4fdd\u4ee5\u4e0b\u4e09\u9879\u5c31\u7eea\uff1a

                      • \u4e3a insight-agent \u5f00\u542f trace \u529f\u80fd
                      • trace \u6570\u636e\u7684\u5730\u5740\u4ee5\u53ca\u7aef\u53e3\u662f\u5426\u586b\u5199\u6b63\u786e
                      • deployment/insight-agent-opentelemetry-operator \u548c deployment/insight-agent-opentelemetry-collector \u5bf9\u5e94\u7684 Pod \u5df2\u7ecf\u51c6\u5907\u5c31\u7eea
                      "},{"location":"end-user/insight/quickstart/otel/operator.html#instrumentation-cr","title":"\u5b89\u88c5 Instrumentation CR","text":"

                      Tip

                      \u4ece Insight v0.22.0 \u5f00\u59cb\uff0c\u4e0d\u518d\u9700\u8981\u624b\u52a8\u5b89\u88c5 Instrumentation CR\u3002

                      \u5728 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\u5b89\u88c5\uff0c\u4e0d\u540c\u7248\u672c\u4e4b\u95f4\u6709\u4e00\u4e9b\u7ec6\u5c0f\u7684\u5dee\u522b\u3002

                      Insight v0.21.xInsight v0.20.xInsight v0.18.xInsight v0.17.xInsight v0.16.x
                      K8S_CLUSTER_UID=$(kubectl get namespace kube-system -o jsonpath='{.metadata.uid}')\nkubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/openinsight-proj/autoinstrumentation-java:1.31.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n      - name: OTEL_K8S_CLUSTER_UID\n        value: $K8S_CLUSTER_UID\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                      kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.29.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0-rc.2\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                      kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.25.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.37.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.38b0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.1-alpha\nEOF\n
                      kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.23.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.34.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.33b0\nEOF\n
                      kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.23.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.34.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.33b0\nEOF\n
                      "},{"location":"end-user/insight/quickstart/otel/operator.html#_2","title":"\u4e0e\u670d\u52a1\u7f51\u683c\u94fe\u8def\u4e32\u8054\u573a\u666f","text":"

                      \u5982\u679c\u60a8\u5f00\u542f\u4e86\u670d\u52a1\u7f51\u683c\u7684\u94fe\u8def\u8ffd\u8e2a\u80fd\u529b\uff0c\u9700\u8981\u989d\u5916\u589e\u52a0\u4e00\u4e2a\u73af\u5883\u53d8\u91cf\u6ce8\u5165\u7684\u914d\u7f6e\uff1a

                      "},{"location":"end-user/insight/quickstart/otel/operator.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b","text":"
                      1. \u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3.0\uff0c\u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \u540e\u9009\u62e9\u8fdb\u5165\u76ee\u6807\u96c6\u7fa4\uff0c
                      2. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u627e\u5230 instrumentations.opentelemetry.io \u540e\u8fdb\u5165\u8be6\u60c5\u9875\u3002
                      3. \u9009\u62e9 insight-system \u547d\u540d\u7a7a\u95f4\u540e\uff0c\u7f16\u8f91 insight-opentelemetry-autoinstrumentation \uff0c\u5728 spec:env: \u4e0b\u6dfb\u52a0\u4ee5\u4e0b\u5185\u5bb9\uff1a

                            - name: OTEL_SERVICE_NAME\n      valueFrom:\n        fieldRef:\n          fieldPath: metadata.labels['app'] \n

                        \u5b8c\u6574\u7684\u547d\u4ee4\u5982\u4e0b\uff08For Insight v0.21.x\uff09\uff1a

                        K8S_CLUSTER_UID=$(kubectl get namespace kube-system -o jsonpath='{.metadata.uid}')\nkubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n    - name: OTEL_SERVICE_NAME\n      valueFrom:\n        fieldRef:\n          fieldPath: metadata.labels['app'] \n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/openinsight-proj/autoinstrumentation-java:1.31.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n      - name: OTEL_K8S_CLUSTER_UID\n        value: $K8S_CLUSTER_UID\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                      "},{"location":"end-user/insight/quickstart/otel/operator.html#_4","title":"\u6dfb\u52a0\u6ce8\u89e3\uff0c\u81ea\u52a8\u63a5\u5165\u94fe\u8def","text":"

                      \u4ee5\u4e0a\u5c31\u7eea\u4e4b\u540e\uff0c\u60a8\u5c31\u53ef\u4ee5\u901a\u8fc7\u6ce8\u89e3\uff08Annotation\uff09\u65b9\u5f0f\u4e3a\u5e94\u7528\u7a0b\u5e8f\u63a5\u5165\u94fe\u8def\u8ffd\u8e2a\u4e86\uff0cOTel \u76ee\u524d\u652f\u6301\u901a\u8fc7\u6ce8\u89e3\u7684\u65b9\u5f0f\u63a5\u5165\u94fe\u8def\u3002 \u6839\u636e\u670d\u52a1\u8bed\u8a00\uff0c\u9700\u8981\u6dfb\u52a0\u4e0a\u4e0d\u540c\u7684 pod annotations\u3002\u6bcf\u4e2a\u670d\u52a1\u53ef\u6dfb\u52a0\u4e24\u7c7b\u6ce8\u89e3\u4e4b\u4e00\uff1a

                      • \u53ea\u6ce8\u5165\u73af\u5883\u53d8\u91cf\u6ce8\u89e3

                        \u8fd9\u7c7b\u6ce8\u89e3\u53ea\u6709\u4e00\u4e2a\uff0c\u7528\u4e8e\u6dfb\u52a0 otel \u76f8\u5173\u7684\u73af\u5883\u53d8\u91cf\uff0c\u6bd4\u5982\u94fe\u8def\u4e0a\u62a5\u5730\u5740\u3001\u5bb9\u5668\u6240\u5728\u7684\u96c6\u7fa4 id\u3001\u547d\u540d\u7a7a\u95f4\u7b49\uff08\u8fd9\u4e2a\u6ce8\u89e3\u5728\u5e94\u7528\u4e0d\u652f\u6301\u81ea\u52a8\u63a2\u9488\u8bed\u8a00\u65f6\u5341\u5206\u6709\u7528\uff09

                        instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                        \u5176\u4e2d value \u88ab / \u5206\u6210\u4e24\u90e8\u5206\uff0c\u7b2c\u4e00\u4e2a\u503c (insight-system) \u662f\u4e0a\u4e00\u6b65\u5b89\u88c5\u7684 CR \u7684\u547d\u540d\u7a7a\u95f4\uff0c \u7b2c\u4e8c\u4e2a\u503c (insight-opentelemetry-autoinstrumentation) \u662f\u8fd9\u4e2a CR \u7684\u540d\u5b57\u3002

                      • \u81ea\u52a8\u63a2\u9488\u6ce8\u5165\u4ee5\u53ca\u73af\u5883\u53d8\u91cf\u6ce8\u5165\u6ce8\u89e3

                        \u8fd9\u7c7b\u6ce8\u89e3\u76ee\u524d\u6709 4 \u4e2a\uff0c\u5206\u522b\u5bf9\u5e94 4 \u79cd\u4e0d\u540c\u7684\u7f16\u7a0b\u8bed\u8a00\uff1ajava\u3001nodejs\u3001python\u3001dotnet\uff0c \u4f7f\u7528\u5b83\u540e\u5c31\u4f1a\u5bf9 spec.pod \u4e0b\u7684\u7b2c\u4e00\u4e2a\u5bb9\u5668\u6ce8\u5165\u81ea\u52a8\u63a2\u9488\u4ee5\u53ca otel \u9ed8\u8ba4\u73af\u5883\u53d8\u91cf\uff1a

                        Java \u5e94\u7528NodeJs \u5e94\u7528Python \u5e94\u7528Dotnet \u5e94\u7528Golang \u5e94\u7528
                        instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                        instrumentation.opentelemetry.io/inject-nodejs: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                        instrumentation.opentelemetry.io/inject-python: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                        instrumentation.opentelemetry.io/inject-dotnet: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                        \u7531\u4e8e Go \u81ea\u52a8\u68c0\u6d4b\u9700\u8981\u8bbe\u7f6e OTEL_GO_AUTO_TARGET_EXE\uff0c \u56e0\u6b64\u60a8\u5fc5\u987b\u901a\u8fc7\u6ce8\u89e3\u6216 Instrumentation \u8d44\u6e90\u63d0\u4f9b\u6709\u6548\u7684\u53ef\u6267\u884c\u8def\u5f84\u3002\u672a\u8bbe\u7f6e\u6b64\u503c\u4f1a\u5bfc\u81f4 Go \u81ea\u52a8\u68c0\u6d4b\u6ce8\u5165\u4e2d\u6b62\uff0c\u4ece\u800c\u5bfc\u81f4\u63a5\u5165\u94fe\u8def\u5931\u8d25\u3002

                        instrumentation.opentelemetry.io/inject-go: \"insight-system/insight-opentelemetry-autoinstrumentation\"\ninstrumentation.opentelemetry.io/otel-go-auto-target-exe: \"/path/to/container/executable\"\n

                        Go \u81ea\u52a8\u68c0\u6d4b\u4e5f\u9700\u8981\u63d0\u5347\u6743\u9650\u3002\u4ee5\u4e0b\u6743\u9650\u662f\u81ea\u52a8\u8bbe\u7f6e\u7684\u5e76\u4e14\u662f\u5fc5\u9700\u7684\u3002

                        securityContext:\n  privileged: true\n  runAsUser: 0\n

                      Tip

                      OpenTelemetry Operator \u5728\u6ce8\u5165\u63a2\u9488\u65f6\u4f1a\u81ea\u52a8\u6dfb\u52a0\u4e00\u4e9b OTel \u76f8\u5173\u73af\u5883\u53d8\u91cf\uff0c\u540c\u65f6\u4e5f\u652f\u6301\u8fd9\u4e9b\u73af\u5883\u53d8\u91cf\u7684\u8986\u76d6\u3002\u8fd9\u4e9b\u73af\u5883\u53d8\u91cf\u7684\u8986\u76d6\u4f18\u5148\u7ea7\uff1a

                      original container env vars -> language specific env vars -> common env vars -> instrument spec configs' vars\n

                      \u4f46\u662f\u9700\u8981\u907f\u514d\u624b\u52a8\u8986\u76d6 OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\uff0c\u5b83\u5728 Operator \u5185\u90e8\u4f5c\u4e3a\u4e00\u4e2a Pod \u662f\u5426\u5df2\u7ecf\u6ce8\u5165\u63a2\u9488\u7684\u6807\u8bc6\uff0c\u5982\u679c\u624b\u52a8\u6dfb\u52a0\u4e86\uff0c\u63a2\u9488\u53ef\u80fd\u65e0\u6cd5\u6ce8\u5165\u3002

                      "},{"location":"end-user/insight/quickstart/otel/operator.html#demo","title":"\u81ea\u52a8\u6ce8\u5165\u793a\u4f8b Demo","text":"

                      \u6ce8\u610f\u8fd9\u4e2a annotations \u662f\u52a0\u5728 spec.annotations \u4e0b\u7684\u3002

                      apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-app\n  labels:\n    app: my-app\nspec:\n  selector:\n    matchLabels:\n      app: my-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-app\n      annotations:\n        instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n    spec:\n      containers:\n      - name: myapp\n        image: jaegertracing/vertx-create-span:operator-e2e-tests\n        ports:\n          - containerPort: 8080\n            protocol: TCP\n

                      \u6700\u7ec8\u751f\u6210\u7684 YAML \u5185\u5bb9\u5982\u4e0b\uff1a

                      apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-deployment-with-sidecar-565bd877dd-nqkk6\n  generateName: my-deployment-with-sidecar-565bd877dd-\n  namespace: default\n  uid: aa89ca0d-620c-4d20-8bc1-37d67bad4ea4\n  resourceVersion: '2668986'\n  creationTimestamp: '2022-04-08T05:58:48Z'\n  labels:\n    app: my-pod-with-sidecar\n    pod-template-hash: 565bd877dd\n  annotations:\n    cni.projectcalico.org/containerID: 234eae5e55ea53db2a4bc2c0384b9a1021ed3908f82a675e4a92a49a7e80dd61\n    cni.projectcalico.org/podIP: 192.168.134.133/32\n    cni.projectcalico.org/podIPs: 192.168.134.133/32\n    instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\nspec:\n  volumes:\n    - name: kube-api-access-sp2mz\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n    - name: opentelemetry-auto-instrumentation\n      emptyDir: {}\n  initContainers:\n    - name: opentelemetry-auto-instrumentation\n      image: >-\n        ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java\n      command:\n        - cp\n        - /javaagent.jar\n        - /otel-auto-instrumentation/javaagent.jar\n      resources: {}\n      volumeMounts:\n        - name: opentelemetry-auto-instrumentation\n          mountPath: /otel-auto-instrumentation\n        - name: kube-api-access-sp2mz\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  containers:\n    - name: myapp\n      image: ghcr.io/pavolloffay/spring-petclinic:latest\n      env:\n        - name: OTEL_JAVAAGENT_DEBUG\n          value: 'true'\n        - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n          value: 'true'\n        - name: SPLUNK_PROFILER_ENABLED\n          value: 'false'\n        - name: JAVA_TOOL_OPTIONS\n          value: ' -javaagent:/otel-auto-instrumentation/javaagent.jar'\n        - name: OTEL_TRACES_EXPORTER\n          value: otlp\n        - name: OTEL_EXPORTER_OTLP_ENDPOINT\n          value: http://insight-agent-opentelemetry-collector.svc.cluster.local:4317\n        - name: OTEL_EXPORTER_OTLP_TIMEOUT\n          value: '20'\n        - name: OTEL_TRACES_SAMPLER\n          value: parentbased_traceidratio\n        - name: OTEL_TRACES_SAMPLER_ARG\n          value: '0.85'\n        - name: SPLUNK_TRACE_RESPONSE_HEADER_ENABLED\n          value: 'true'\n        - name: OTEL_SERVICE_NAME\n          value: my-deployment-with-sidecar\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.name\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_UID\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.uid\n        - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: spec.nodeName\n        - name: OTEL_RESOURCE_ATTRIBUTES\n          value: >-\n            k8s.container.name=myapp,k8s.deployment.name=my-deployment-with-sidecar,k8s.deployment.uid=8de6929d-dda0-436c-bca1-604e9ca7ea4e,k8s.namespace.name=default,k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME),k8s.pod.uid=$(OTEL_RESOURCE_ATTRIBUTES_POD_UID),k8s.replicaset.name=my-deployment-with-sidecar-565bd877dd,k8s.replicaset.uid=190d5f6e-ba7f-4794-b2e6-390b5879a6c4\n        - name: OTEL_PROPAGATORS\n          value: jaeger,b3\n      resources: {}\n      volumeMounts:\n        - name: kube-api-access-sp2mz\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n        - name: opentelemetry-auto-instrumentation\n          mountPath: /otel-auto-instrumentation\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  restartPolicy: Always\n  terminationGracePeriodSeconds: 30\n  dnsPolicy: ClusterFirst\n  serviceAccountName: default\n  serviceAccount: default\n  nodeName: k8s-master3\n  securityContext:\n    runAsUser: 1000\n    runAsGroup: 3000\n    fsGroup: 2000\n  schedulerName: default-scheduler\n  tolerations:\n    - key: node.kubernetes.io/not-ready\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n    - key: node.kubernetes.io/unreachable\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n  priority: 0\n  enableServiceLinks: true\n  preemptionPolicy: PreemptLowerPriority\n
                      "},{"location":"end-user/insight/quickstart/otel/operator.html#_5","title":"\u94fe\u8def\u67e5\u8be2","text":"

                      \u5982\u4f55\u67e5\u8be2\u5df2\u7ecf\u63a5\u5165\u7684\u670d\u52a1\uff0c\u53c2\u8003\u94fe\u8def\u67e5\u8be2\u3002

                      "},{"location":"end-user/insight/quickstart/otel/otel.html","title":"\u4f7f\u7528 OTel \u8d4b\u4e88\u5e94\u7528\u53ef\u89c2\u6d4b\u6027","text":"

                      \u589e\u5f3a\u662f\u4f7f\u5e94\u7528\u7a0b\u5e8f\u4ee3\u7801\u80fd\u591f\u751f\u6210\u9065\u6d4b\u6570\u636e\u7684\u8fc7\u7a0b\u3002\u5373\u4e00\u4e9b\u53ef\u4ee5\u5e2e\u52a9\u60a8\u76d1\u89c6\u6216\u6d4b\u91cf\u5e94\u7528\u7a0b\u5e8f\u7684\u6027\u80fd\u548c\u72b6\u6001\u7684\u4e1c\u897f\u3002

                      OpenTelemetry \u662f\u9886\u5148\u7684\u5f00\u6e90\u9879\u76ee\uff0c\u4e3a\u4e3b\u8981\u7f16\u7a0b\u8bed\u8a00\u548c\u6d41\u884c\u6846\u67b6\u63d0\u4f9b\u68c0\u6d4b\u5e93\u3002\u5b83\u662f\u4e91\u539f\u751f\u8ba1\u7b97\u57fa\u91d1\u4f1a\u4e0b\u7684\u4e00\u4e2a\u9879\u76ee\uff0c\u5f97\u5230\u4e86\u793e\u533a\u5e9e\u5927\u8d44\u6e90\u7684\u652f\u6301\u3002 \u5b83\u4e3a\u91c7\u96c6\u7684\u6570\u636e\u63d0\u4f9b\u6807\u51c6\u5316\u7684\u6570\u636e\u683c\u5f0f\uff0c\u65e0\u9700\u96c6\u6210\u7279\u5b9a\u7684\u4f9b\u5e94\u5546\u3002

                      Insight \u652f\u6301\u7528\u4e8e\u68c0\u6d4b\u5e94\u7528\u7a0b\u5e8f\u7684 OpenTelemetry \u6765\u589e\u5f3a\u60a8\u7684\u5e94\u7528\u7a0b\u5e8f\u3002

                      \u672c\u6307\u5357\u4ecb\u7ecd\u4e86\u4f7f\u7528 OpenTelemetry \u8fdb\u884c\u9065\u6d4b\u589e\u5f3a\u7684\u57fa\u672c\u6982\u5ff5\u3002 OpenTelemetry \u8fd8\u6709\u4e00\u4e2a\u7531\u5e93\u3001\u63d2\u4ef6\u3001\u96c6\u6210\u548c\u5176\u4ed6\u6709\u7528\u5de5\u5177\u7ec4\u6210\u7684\u751f\u6001\u7cfb\u7edf\u6765\u6269\u5c55\u5b83\u3002 \u60a8\u53ef\u4ee5\u5728 Otel Registry \u4e2d\u627e\u5230\u8fd9\u4e9b\u8d44\u6e90\u3002

                      \u60a8\u53ef\u4ee5\u4f7f\u7528\u4efb\u4f55\u5f00\u653e\u6807\u51c6\u5e93\u8fdb\u884c\u9065\u6d4b\u589e\u5f3a\uff0c\u5e76\u4f7f\u7528 Insight \u4f5c\u4e3a\u53ef\u89c2\u5bdf\u6027\u540e\u7aef\u6765\u6444\u53d6\u3001\u5206\u6790\u548c\u53ef\u89c6\u5316\u6570\u636e\u3002

                      \u4e3a\u4e86\u589e\u5f3a\u60a8\u7684\u4ee3\u7801\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528 OpenTelemetry \u4e3a\u7279\u5b9a\u8bed\u8a00\u63d0\u4f9b\u7684\u589e\u5f3a\u64cd\u4f5c\uff1a

                      Insight \u76ee\u524d\u63d0\u4f9b\u4e86\u4f7f\u7528 OpenTelemetry \u589e\u5f3a .Net NodeJS\u3001Java\u3001Python \u548c Golang \u5e94\u7528\u7a0b\u5e8f\u7684\u7b80\u5355\u65b9\u6cd5\u3002\u8bf7\u9075\u5faa\u4ee5\u4e0b\u6307\u5357\u3002

                      "},{"location":"end-user/insight/quickstart/otel/otel.html#_1","title":"\u94fe\u8def\u589e\u5f3a","text":"
                      • \u94fe\u8def\u63a5\u5165\u7684\u6700\u4f73\u5b9e\u8df5\uff1a\u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a
                      • \u4ee5 Go \u8bed\u8a00\u4e3a\u4f8b\u7684\u624b\u52a8\u57cb\u70b9\u63a5\u5165\uff1a\u4f7f\u7528 OpenTelemetry SDK \u589e\u5f3a Go \u5e94\u7528\u7a0b\u5e8f
                      • \u5229\u7528 ebpf \u5b9e\u73b0 Go \u8bed\u8a00\u65e0\u4fb5\u5165\u63a2\u9488\uff08\u5b9e\u9a8c\u6027\u529f\u80fd\uff09
                      "},{"location":"end-user/insight/quickstart/otel/send_tracing_to_insight.html","title":"\u5411 Insight \u53d1\u9001\u94fe\u8def\u6570\u636e","text":"

                      \u6b64\u6587\u6863\u4e3b\u8981\u63cf\u8ff0\u5ba2\u6237\u5e94\u7528\u5982\u4f55\u81ea\u884c\u5c06\u94fe\u8def\u6570\u636e\u4e0a\u62a5\u7ed9 Insight\u3002\u4e3b\u8981\u5305\u542b\u5982\u4e0b\u4e24\u79cd\u573a\u666f\uff1a

                      1. \u5ba2\u6237\u5e94\u7528\u901a\u8fc7 OTEL Agent/SDK \u4e0a\u62a5\u94fe\u8def\u7ed9 Insight
                      2. \u901a\u8fc7 Opentelemtry Collector(\u7b80\u79f0 OTEL COL) \u5c06\u94fe\u8def\u8f6c\u53d1\u7ed9 Insight

                      \u5728\u6bcf\u4e2a\u5df2\u5b89\u88c5 Insight Agent \u7684\u96c6\u7fa4\u4e2d\u90fd\u6709 insight-agent-otel-col \u7ec4\u4ef6\u7528\u4e8e\u7edf\u4e00\u63a5\u6536\u8be5\u96c6\u7fa4\u7684\u94fe\u8def\u6570\u636e\u3002 \u56e0\u6b64\uff0c\u8be5\u7ec4\u4ef6\u4f5c\u4e3a\u7528\u6237\u63a5\u5165\u4fa7\u7684\u5165\u53e3\uff0c\u9700\u8981\u5148\u83b7\u53d6\u8be5\u5730\u5740\u3002\u53ef\u4ee5\u901a\u8fc7 AI \u7b97\u529b\u4e2d\u5fc3 \u754c\u9762\u83b7\u53d6\u8be5\u96c6\u7fa4 Opentelemtry Collector \u7684\u5730\u5740\uff0c \u6bd4\u5982 insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 \uff1a

                      \u9664\u6b64\u4e4b\u5916\uff0c\u9488\u5bf9\u4e0d\u540c\u4e0a\u62a5\u65b9\u5f0f\uff0c\u6709\u4e00\u4e9b\u7ec6\u5fae\u5dee\u522b\uff1a

                      "},{"location":"end-user/insight/quickstart/otel/send_tracing_to_insight.html#otel-agentsdk-insight-agent-opentelemtry-collector","title":"\u5ba2\u6237\u5e94\u7528\u901a\u8fc7 OTel Agent/SDK \u4e0a\u62a5\u94fe\u8def\u7ed9 Insight Agent Opentelemtry Collector","text":"

                      \u4e3a\u4e86\u80fd\u591f\u5c06\u94fe\u8def\u6570\u636e\u6b63\u5e38\u4e0a\u62a5\u81f3 Insight \u5e76\u80fd\u591f\u5728 Insight \u6b63\u5e38\u5c55\u793a\uff0c\u9700\u8981\u5e76\u5efa\u8bae\u901a\u8fc7\u5982\u4e0b\u73af\u5883\u53d8\u91cf\u63d0\u4f9b OTLP \u6240\u9700\u7684\u5143\u6570\u636e (Resource Attribute)\uff0c\u6709\u4e24\u79cd\u65b9\u5f0f\u53ef\u5b9e\u73b0\uff1a

                      • \u5728\u90e8\u7f72\u6587\u4ef6 YAML \u4e2d\u624b\u52a8\u6dfb\u52a0\uff0c\u4f8b\u5982\uff1a

                        ...\n- name: OTEL_EXPORTER_OTLP_ENDPOINT\n  value: \"http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\"\n- name: \"OTEL_SERVICE_NAME\"\n  value: my-java-app-name\n- name: \"OTEL_K8S_NAMESPACE\"\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: metadata.namespace\n- name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: spec.nodeName\n- name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: metadata.name\n- name: OTEL_RESOURCE_ATTRIBUTES\n  value: \"k8s.namespace.name=$(OTEL_K8S_NAMESPACE),k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME)\"\n
                      • \u5229\u7528 Insight Agent \u81ea\u52a8\u6ce8\u5165\u5982\u4e0a\u5143\u6570\u636e (Resource Attribute) \u80fd\u529b

                        \u786e\u4fdd Insight Agent \u6b63\u5e38\u5de5\u4f5c\u5e76 \u5b89\u88c5 Instrumentation CR \u4e4b\u540e\uff0c \u53ea\u9700\u8981\u4e3a Pod \u6dfb\u52a0\u5982\u4e0b Annotation \u5373\u53ef\uff1a

                        instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                        \u4e3e\u4f8b\uff1a

                        apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-with-aotu-instrumentation\nspec:\n  selector:\n    matchLabels:\n      app.kubernetes.io/name: my-deployment-with-aotu-instrumentation-kuberntes\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: my-deployment-with-aotu-instrumentation-kuberntes\n      annotations:\n        sidecar.opentelemetry.io/inject: \"false\"\n        instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                      "},{"location":"end-user/insight/quickstart/otel/send_tracing_to_insight.html#opentelemtry-collector-insight-agent-opentelemtry-collector","title":"\u901a\u8fc7 Opentelemtry Collector \u5c06\u94fe\u8def\u8f6c\u53d1\u7ed9 Insight Agent Opentelemtry Collector","text":"

                      \u5728\u4fdd\u8bc1\u5e94\u7528\u6dfb\u52a0\u4e86\u5982\u4e0a\u5143\u6570\u636e\u4e4b\u540e\uff0c\u53ea\u9700\u5728\u5ba2\u6237 Opentelemtry Collector \u91cc\u9762\u65b0\u589e\u4e00\u4e2a OTLP Exporter \u5c06\u94fe\u8def\u6570\u636e\u8f6c\u53d1\u7ed9 Insight Agent Opentelemtry Collector \u5373\u53ef\uff0c\u5982\u4e0b Opentelemtry Collector \u914d\u7f6e\u6587\u4ef6\u6240\u793a\uff1a

                      ...\nexporters:\n  otlp/insight:\n    endpoint: insight-opentelemetry-collector.insight-system.svc.cluster.local:4317\nservice:\n...\npipelines:\n...\ntraces:\n  exporters:\n    - otlp/insight\n
                      "},{"location":"end-user/insight/quickstart/otel/send_tracing_to_insight.html#_1","title":"\u53c2\u8003","text":"
                      • \u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a
                      • \u4f7f\u7528 OTel \u8d4b\u4e88\u5e94\u7528\u53ef\u89c2\u6d4b\u6027
                      "},{"location":"end-user/insight/quickstart/otel/golang/golang.html","title":"\u4f7f\u7528 OTel SDK \u589e\u5f3a Go \u5e94\u7528\u7a0b\u5e8f","text":"

                      Golang \u65e0\u4fb5\u5165\u5f0f\u63a5\u5165\u94fe\u8def\u8bf7\u53c2\u8003 \u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a \u6587\u6863\uff0c\u901a\u8fc7\u6ce8\u89e3\u5b9e\u73b0\u81ea\u52a8\u63a5\u5165\u94fe\u8def\u3002

                      OpenTelemetry \u4e5f\u7b80\u79f0\u4e3a OTel\uff0c\u662f\u4e00\u4e2a\u5f00\u6e90\u7684\u53ef\u89c2\u6d4b\u6027\u6846\u67b6\uff0c\u53ef\u4ee5\u5e2e\u52a9\u5728 Go \u5e94\u7528\u7a0b\u5e8f\u4e2d\u751f\u6210\u548c\u6536\u96c6\u9065\u6d4b\u6570\u636e\uff1a\u94fe\u8def\u3001\u6307\u6807\u548c\u65e5\u5fd7\u3002

                      \u672c\u6587\u4e3b\u8981\u8bb2\u89e3\u5982\u4f55\u5728 Go \u5e94\u7528\u7a0b\u5e8f\u4e2d\u901a\u8fc7 OpenTelemetry Go SDK \u589e\u5f3a\u5e76\u63a5\u5165\u94fe\u8def\u76d1\u63a7\u3002

                      "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#otel-sdk-go_1","title":"\u4f7f\u7528 OTel SDK \u589e\u5f3a Go \u5e94\u7528","text":""},{"location":"end-user/insight/quickstart/otel/golang/golang.html#_1","title":"\u5b89\u88c5\u76f8\u5173\u4f9d\u8d56","text":"

                      \u5fc5\u987b\u5148\u5b89\u88c5\u4e0e OpenTelemetry exporter \u548c SDK \u76f8\u5173\u7684\u4f9d\u8d56\u9879\u3002\u5982\u679c\u60a8\u6b63\u5728\u4f7f\u7528\u5176\u4ed6\u8bf7\u6c42\u8def\u7531\u5668\uff0c\u8bf7\u53c2\u8003\u8bf7\u6c42\u8def\u7531\u3002 \u5207\u6362/\u8fdb\u5165\u5230\u5e94\u7528\u7a0b\u5e8f\u6e90\u6587\u4ef6\u5939\u540e\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                      go get go.opentelemetry.io/otel@v1.19.0 \\\n  go.opentelemetry.io/otel/trace@v1.19.0 \\\n  go.opentelemetry.io/otel/sdk@v1.19.0 \\\n  go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin@v0.46.1 \\\n  go.opentelemetry.io/otel/exporters/otlp/otlptrace@v1.19.0 \\\n  go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc@v1.19.0\n
                      "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#otel-sdk","title":"\u4f7f\u7528 OTel SDK \u521b\u5efa\u521d\u59cb\u5316\u51fd\u6570","text":"

                      \u4e3a\u4e86\u8ba9\u5e94\u7528\u7a0b\u5e8f\u80fd\u591f\u53d1\u9001\u6570\u636e\uff0c\u9700\u8981\u4e00\u4e2a\u51fd\u6570\u6765\u521d\u59cb\u5316 OpenTelemetry\u3002\u5728 main.go \u6587\u4ef6\u4e2d\u6dfb\u52a0\u4ee5\u4e0b\u4ee3\u7801\u7247\u6bb5:

                      import (\n    \"context\"\n    \"os\"\n    \"time\"\n\n    \"go.opentelemetry.io/otel\"\n    \"go.opentelemetry.io/otel/exporters/otlp/otlptrace\"\n    \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc\"\n    \"go.opentelemetry.io/otel/propagation\"\n    \"go.opentelemetry.io/otel/sdk/resource\"\n    sdktrace \"go.opentelemetry.io/otel/sdk/trace\"\n    semconv \"go.opentelemetry.io/otel/semconv/v1.7.0\"\n    \"go.uber.org/zap\"\n    \"google.golang.org/grpc\"\n)\n\nvar tracerExp *otlptrace.Exporter\n\nfunc retryInitTracer() func() {\n    var shutdown func()\n    go func() {\n        for {\n            // otel will reconnected and re-send spans when otel col recover. so, we don't need to re-init tracer exporter.\n            if tracerExp == nil {\n                shutdown = initTracer()\n            } else {\n                break\n            }\n            time.Sleep(time.Minute * 5)\n        }\n    }()\n    return shutdown\n}\n\nfunc initTracer() func() {\n    // temporarily set timeout to 10s\n    ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n    defer cancel()\n\n    serviceName, ok := os.LookupEnv(\"OTEL_SERVICE_NAME\")\n    if !ok {\n        serviceName = \"server_name\"\n        os.Setenv(\"OTEL_SERVICE_NAME\", serviceName)\n    }\n    otelAgentAddr, ok := os.LookupEnv(\"OTEL_EXPORTER_OTLP_ENDPOINT\")\n    if !ok {\n        otelAgentAddr = \"http://localhost:4317\"\n        os.Setenv(\"OTEL_EXPORTER_OTLP_ENDPOINT\", otelAgentAddr)\n    }\n    zap.S().Infof(\"OTLP Trace connect to: %s with service name: %s\", otelAgentAddr, serviceName)\n\n    traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithInsecure(), otlptracegrpc.WithDialOption(grpc.WithBlock()))\n    if err != nil {\n        handleErr(err, \"OTLP Trace gRPC Creation\")\n        return nil\n    }\n\n    tracerProvider := sdktrace.NewTracerProvider(\n        sdktrace.WithBatcher(traceExporter),\n        sdktrace.WithSampler(sdktrace.AlwaysSample()),\n    sdktrace.WithResource(resource.NewWithAttributes(semconv.SchemaURL)))\n\n    otel.SetTracerProvider(tracerProvider)\n    otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))\n\n    tracerExp = traceExporter\n    return func() {\n        // Shutdown will flush any remaining spans and shut down the exporter.\n        handleErr(tracerProvider.Shutdown(ctx), \"failed to shutdown TracerProvider\")\n    }\n}\n\nfunc handleErr(err error, message string) {\n    if err != nil {\n        zap.S().Errorf(\"%s: %v\", message, err)\n    }\n}\n
                      "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#maingo","title":"\u5728 main.go \u4e2d\u521d\u59cb\u5316\u8ddf\u8e2a\u5668","text":"

                      \u4fee\u6539 main \u51fd\u6570\u4ee5\u5728 main.go \u4e2d\u521d\u59cb\u5316\u8ddf\u8e2a\u5668\u3002\u53e6\u5916\u5f53\u60a8\u7684\u670d\u52a1\u5173\u95ed\u65f6\uff0c\u5e94\u8be5\u8c03\u7528 TracerProvider.Shutdown() \u786e\u4fdd\u5bfc\u51fa\u6240\u6709 Span\u3002\u8be5\u670d\u52a1\u5c06\u8be5\u8c03\u7528\u4f5c\u4e3a\u4e3b\u51fd\u6570\u4e2d\u7684\u5ef6\u8fdf\u51fd\u6570\uff1a

                      func main() {\n    // start otel tracing\n    if shutdown := retryInitTracer(); shutdown != nil {\n            defer shutdown()\n        }\n    ......\n}\n
                      "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#otel-gin","title":"\u4e3a\u5e94\u7528\u6dfb\u52a0 OTel Gin \u4e2d\u95f4\u4ef6","text":"

                      \u901a\u8fc7\u5728 main.go \u4e2d\u6dfb\u52a0\u4ee5\u4e0b\u884c\u6765\u914d\u7f6e Gin \u4ee5\u4f7f\u7528\u4e2d\u95f4\u4ef6:

                      import (\n    ....\n  \"go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin\"\n)\n\nfunc main() {\n    ......\n    r := gin.Default()\n    r.Use(otelgin.Middleware(\"my-app\"))\n    ......\n}\n
                      "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#_2","title":"\u8fd0\u884c\u5e94\u7528\u7a0b\u5e8f","text":"
                      • \u672c\u5730\u8c03\u8bd5\u8fd0\u884c

                        \u6ce8\u610f: \u6b64\u6b65\u9aa4\u4ec5\u7528\u4e8e\u672c\u5730\u5f00\u53d1\u8c03\u8bd5\uff0c\u751f\u4ea7\u73af\u5883\u4e2d Operator \u4f1a\u81ea\u52a8\u5b8c\u6210\u4ee5\u4e0b\u73af\u5883\u53d8\u91cf\u7684\u6ce8\u5165\u3002

                        \u4ee5\u4e0a\u6b65\u9aa4\u5df2\u7ecf\u5b8c\u6210\u4e86\u521d\u59cb\u5316 SDK \u7684\u5de5\u4f5c\uff0c\u73b0\u5728\u5982\u679c\u9700\u8981\u5728\u672c\u5730\u5f00\u53d1\u8fdb\u884c\u8c03\u8bd5\uff0c\u9700\u8981\u63d0\u524d\u83b7\u53d6\u5230 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b insight-agent-opentelemerty-collector \u7684\u5730\u5740\uff0c\u5047\u8bbe\u4e3a\uff1a insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 \u3002

                        \u56e0\u6b64\uff0c\u53ef\u4ee5\u5728\u4f60\u672c\u5730\u542f\u52a8\u5e94\u7528\u7a0b\u5e8f\u7684\u65f6\u5019\u6dfb\u52a0\u5982\u4e0b\u73af\u5883\u53d8\u91cf\uff1a

                        OTEL_SERVICE_NAME=my-golang-app OTEL_EXPORTER_OTLP_ENDPOINT=http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 go run main.go...\n
                      • \u751f\u4ea7\u73af\u5883\u8fd0\u884c

                        \u8bf7\u53c2\u8003\u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a \u4e2d \u53ea\u6ce8\u5165\u73af\u5883\u53d8\u91cf\u6ce8\u89e3 \u76f8\u5173\u4ecb\u7ecd\uff0c\u4e3a deployment yaml \u6dfb\u52a0\u6ce8\u89e3\uff1a

                        instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                        \u5982\u679c\u65e0\u6cd5\u4f7f\u7528\u6ce8\u89e3\u7684\u65b9\u5f0f\uff0c\u60a8\u53ef\u4ee5\u624b\u52a8\u5728 deployment yaml \u6dfb\u52a0\u5982\u4e0b\u73af\u5883\u53d8\u91cf\uff1a

                      \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\nenv:\n  - name: OTEL_EXPORTER_OTLP_ENDPOINT\n    value: 'http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317'\n  - name: OTEL_SERVICE_NAME\n    value: \"your depolyment name\" # (1)!\n  - name: OTEL_K8S_NAMESPACE\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: metadata.namespace\n  - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: spec.nodeName\n  - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: metadata.name\n  - name: OTEL_RESOURCE_ATTRIBUTES\n    value: 'k8s.namespace.name=$(OTEL_K8S_NAMESPACE),k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME)'\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
                      1. \u4fee\u6539\u6b64\u503c
                      "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#_3","title":"\u8bf7\u6c42\u8def\u7531","text":""},{"location":"end-user/insight/quickstart/otel/golang/golang.html#opentelemetry-gingonic","title":"OpenTelemetry gin/gonic \u589e\u5f3a","text":"
                      # Add one line to your import() stanza depending upon your request router:\nmiddleware \"go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin\"\n

                      \u7136\u540e\u6ce8\u5165 OpenTelemetry \u4e2d\u95f4\u4ef6\uff1a

                      router.Use(middleware.Middleware(\"my-app\"))\n
                      "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#opentelemetry-gorillamux","title":"OpenTelemetry gorillamux \u589e\u5f3a","text":"
                      # Add one line to your import() stanza depending upon your request router:\nmiddleware \"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux\"\n

                      \u7136\u540e\u6ce8\u5165 OpenTelemetry \u4e2d\u95f4\u4ef6\uff1a

                      router.Use(middleware.Middleware(\"my-app\"))\n
                      "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#grpc","title":"gRPC \u589e\u5f3a","text":"

                      \u540c\u6837\uff0cOpenTelemetry \u4e5f\u53ef\u4ee5\u5e2e\u52a9\u60a8\u81ea\u52a8\u68c0\u6d4b gRPC \u8bf7\u6c42\u3002\u8981\u68c0\u6d4b\u60a8\u62e5\u6709\u7684\u4efb\u4f55 gRPC \u670d\u52a1\u5668\uff0c\u8bf7\u5c06\u62e6\u622a\u5668\u6dfb\u52a0\u5230\u670d\u52a1\u5668\u7684\u5b9e\u4f8b\u5316\u4e2d\u3002

                      import (\n  grpcotel \"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc\"\n)\nfunc main() {\n  [...]\n\n    s := grpc.NewServer(\n        grpc.UnaryInterceptor(grpcotel.UnaryServerInterceptor()),\n        grpc.StreamInterceptor(grpcotel.StreamServerInterceptor()),\n    )\n}\n

                      \u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u5982\u679c\u4f60\u7684\u7a0b\u5e8f\u91cc\u9762\u4f7f\u7528\u5230\u4e86 Grpc Client \u8c03\u7528\u7b2c\u4e09\u65b9\u670d\u52a1\uff0c\u4f60\u8fd8\u9700\u8981\u5bf9 Grpc Client \u6dfb\u52a0\u62e6\u622a\u5668\uff1a

                          [...]\n\n    conn, err := grpc.Dial(addr, grpc.WithTransportCredentials(insecure.NewCredentials()),\n        grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()),\n        grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()),\n    )\n
                      "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#_4","title":"\u5982\u679c\u4e0d\u4f7f\u7528\u8bf7\u6c42\u8def\u7531","text":"
                      import (\n  \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\"\n)\n

                      \u5728\u5c06 http.Handler \u4f20\u9012\u7ed9 ServeMux \u7684\u6bcf\u4e2a\u5730\u65b9\uff0c\u60a8\u90fd\u5c06\u5305\u88c5\u5904\u7406\u7a0b\u5e8f\u51fd\u6570\u3002\u4f8b\u5982\uff0c\u5c06\u8fdb\u884c\u4ee5\u4e0b\u66ff\u6362\uff1a

                      - mux.Handle(\"/path\", h)\n+ mux.Handle(\"/path\", otelhttp.NewHandler(h, \"description of path\"))\n---\n- mux.Handle(\"/path\", http.HandlerFunc(f))\n+ mux.Handle(\"/path\", otelhttp.NewHandler(http.HandlerFunc(f), \"description of path\"))\n

                      \u901a\u8fc7\u8fd9\u79cd\u65b9\u5f0f\uff0c\u60a8\u53ef\u4ee5\u786e\u4fdd\u4f7f\u7528 othttp \u5305\u88c5\u7684\u6bcf\u4e2a\u51fd\u6570\u90fd\u4f1a\u81ea\u52a8\u6536\u96c6\u5176\u5143\u6570\u636e\u5e76\u542f\u52a8\u76f8\u5e94\u7684\u8ddf\u8e2a\u3002

                      "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#_5","title":"\u6570\u636e\u5e93\u8bbf\u95ee\u589e\u5f3a","text":""},{"location":"end-user/insight/quickstart/otel/golang/golang.html#golang-gorm","title":"Golang Gorm","text":"

                      OpenTelemetry \u793e\u533a\u4e5f\u5f00\u53d1\u4e86\u6570\u636e\u5e93\u8bbf\u95ee\u5e93\u7684\u4e2d\u95f4\u4ef6\uff0c\u6bd4\u5982 Gorm:

                      import (\n    \"github.com/uptrace/opentelemetry-go-extra/otelgorm\"\n    \"gorm.io/driver/sqlite\"\n    \"gorm.io/gorm\"\n)\n\ndb, err := gorm.Open(sqlite.Open(\"file::memory:?cache=shared\"), &gorm.Config{})\nif err != nil {\n    panic(err)\n}\n\notelPlugin := otelgorm.NewPlugin(otelgorm.WithDBName(\"mydb\"), # \u7f3a\u5931\u4f1a\u5bfc\u81f4\u6570\u636e\u5e93\u76f8\u5173\u62d3\u6251\u5c55\u793a\u4e0d\u5b8c\u6574\n    otelgorm.WithAttributes(semconv.ServerAddress(\"memory\"))) # \u7f3a\u5931\u4f1a\u5bfc\u81f4\u6570\u636e\u5e93\u76f8\u5173\u62d3\u6251\u5c55\u793a\u4e0d\u5b8c\u6574\nif err := db.Use(otelPlugin); err != nil {\n    panic(err)\n}\n

                      "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#span","title":"\u81ea\u5b9a\u4e49 Span","text":"

                      \u5f88\u591a\u65f6\u5019\uff0cOpenTelemetry \u63d0\u4f9b\u7684\u4e2d\u95f4\u4ef6\u4e0d\u80fd\u5e2e\u52a9\u6211\u4eec\u8bb0\u5f55\u66f4\u591a\u5185\u90e8\u8c03\u7528\u7684\u51fd\u6570\uff0c\u9700\u8981\u6211\u4eec\u81ea\u5b9a\u4e49 Span \u6765\u8bb0\u5f55

                       \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n    _, span := otel.Tracer(\"GetServiceDetail\").Start(ctx,\n        \"spanMetricDao.GetServiceDetail\",\n        trace.WithSpanKind(trace.SpanKindInternal))\n    defer span.End()\n  \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
                      "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#span_1","title":"\u5411 span \u6dfb\u52a0\u81ea\u5b9a\u4e49\u5c5e\u6027\u548c\u4e8b\u4ef6","text":"

                      \u4e5f\u53ef\u4ee5\u5c06\u81ea\u5b9a\u4e49\u5c5e\u6027\u6216\u6807\u7b7e\u8bbe\u7f6e\u4e3a Span\u3002\u8981\u6dfb\u52a0\u81ea\u5b9a\u4e49\u5c5e\u6027\u548c\u4e8b\u4ef6\uff0c\u8bf7\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u64cd\u4f5c\uff1a

                      "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#_6","title":"\u5bfc\u5165\u8ddf\u8e2a\u548c\u5c5e\u6027\u5e93","text":"
                      import (\n    ...\n    \"go.opentelemetry.io/otel/attribute\"\n    \"go.opentelemetry.io/otel/trace\"\n)\n
                      "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#span_2","title":"\u4ece\u4e0a\u4e0b\u6587\u4e2d\u83b7\u53d6\u5f53\u524d Span","text":"
                      span := trace.SpanFromContext(c.Request.Context())\n
                      "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#span_3","title":"\u5728\u5f53\u524d Span \u4e2d\u8bbe\u7f6e\u5c5e\u6027","text":"
                      span.SetAttributes(attribute.String(\"controller\", \"books\"))\n
                      "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#span-event","title":"\u4e3a\u5f53\u524d Span \u6dfb\u52a0 Event","text":"

                      \u6dfb\u52a0 span \u4e8b\u4ef6\u662f\u4f7f\u7528 span \u5bf9\u8c61\u4e0a\u7684 AddEvent \u5b8c\u6210\u7684\u3002

                      span.AddEvent(msg)\n
                      "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#_7","title":"\u8bb0\u5f55\u9519\u8bef\u548c\u5f02\u5e38","text":"
                      import \"go.opentelemetry.io/otel/codes\"\n\n// \u83b7\u53d6\u5f53\u524d span\nspan := trace.SpanFromContext(ctx)\n\n// RecordError \u4f1a\u81ea\u52a8\u5c06\u4e00\u4e2a\u9519\u8bef\u8f6c\u6362\u6210 span even\nspan.RecordError(err)\n\n// \u6807\u8bb0\u8fd9\u4e2a span \u9519\u8bef\nspan.SetStatus(codes.Error, \"internal error\")\n
                      "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#_8","title":"\u53c2\u8003","text":"

                      \u6709\u5173 Demo \u6f14\u793a\u8bf7\u53c2\u8003\uff1a - opentelemetry-demo/productcatalogservice - opentelemetry-collector-contrib/demo

                      "},{"location":"end-user/insight/quickstart/otel/golang/meter.html","title":"\u4f7f\u7528 OTel SDK \u4e3a\u5e94\u7528\u7a0b\u5e8f\u66b4\u9732\u6307\u6807","text":"

                      \u672c\u6587\u4ec5\u4f9b\u5e0c\u671b\u8bc4\u4f30\u6216\u63a2\u7d22\u6b63\u5728\u5f00\u53d1\u7684 OTLP \u6307\u6807\u7684\u7528\u6237\u53c2\u8003\u3002

                      OpenTelemetry \u9879\u76ee\u8981\u6c42\u4ee5\u5fc5\u987b\u5728 OpenTelemetry \u534f\u8bae (OTLP) \u4e2d\u53d1\u51fa\u6570\u636e\u7684\u8bed\u8a00\u63d0\u4f9b API \u548c SDK\u3002

                      "},{"location":"end-user/insight/quickstart/otel/golang/meter.html#golang","title":"\u9488\u5bf9 Golang \u5e94\u7528\u7a0b\u5e8f","text":"

                      Golang \u53ef\u4ee5\u901a\u8fc7 sdk \u66b4\u9732 runtime \u6307\u6807\uff0c\u5177\u4f53\u6765\u8bf4\uff0c\u5728\u5e94\u7528\u4e2d\u6dfb\u52a0\u4ee5\u4e0b\u65b9\u6cd5\u5f00\u542f metrics \u66b4\u9732\u5668\uff1a

                      "},{"location":"end-user/insight/quickstart/otel/golang/meter.html#_1","title":"\u5b89\u88c5\u76f8\u5173\u4f9d\u8d56","text":"

                      \u5207\u6362/\u8fdb\u5165\u5230\u5e94\u7528\u7a0b\u5e8f\u6e90\u6587\u4ef6\u5939\u540e\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                      go get go.opentelemetry.io/otel \\\n  go.opentelemetry.io/otel/attribute \\\n  go.opentelemetry.io/otel/exporters/prometheus \\\n  go.opentelemetry.io/otel/metric/global \\\n  go.opentelemetry.io/otel/metric/instrument \\\n  go.opentelemetry.io/otel/sdk/metric\n
                      "},{"location":"end-user/insight/quickstart/otel/golang/meter.html#otel-sdk_1","title":"\u4f7f\u7528 OTel SDK \u521b\u5efa\u521d\u59cb\u5316\u51fd\u6570","text":"
                      import (\n    .....\n\n    \"go.opentelemetry.io/otel/attribute\"\n    otelPrometheus \"go.opentelemetry.io/otel/exporters/prometheus\"\n    \"go.opentelemetry.io/otel/metric/global\"\n    \"go.opentelemetry.io/otel/metric/instrument\"\n    \"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram\"\n    controller \"go.opentelemetry.io/otel/sdk/metric/controller/basic\"\n    \"go.opentelemetry.io/otel/sdk/metric/export/aggregation\"\n    processor \"go.opentelemetry.io/otel/sdk/metric/processor/basic\"\n    selector \"go.opentelemetry.io/otel/sdk/metric/selector/simple\"\n)\nfunc (s *insightServer) initMeter() *otelPrometheus.Exporter {\n    s.meter = global.Meter(\"xxx\")\n\n    config := otelPrometheus.Config{\n        DefaultHistogramBoundaries: []float64{1, 2, 5, 10, 20, 50},\n        Gatherer:                   prometheus.DefaultGatherer,\n        Registry:                   prometheus.NewRegistry(),\n        Registerer:                 prometheus.DefaultRegisterer,\n    }\n\n    c := controller.New(\n        processor.NewFactory(\n            selector.NewWithHistogramDistribution(\n                histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries),\n            ),\n            aggregation.CumulativeTemporalitySelector(),\n            processor.WithMemory(true),\n        ),\n    )\n\n    exporter, err := otelPrometheus.New(config, c)\n    if err != nil {\n        zap.S().Panicf(\"failed to initialize prometheus exporter %v\", err)\n    }\n\n    global.SetMeterProvider(exporter.MeterProvider())\n\n    http.HandleFunc(\"/metrics\", exporter.ServeHTTP)\n\n    go func() {\n        _ = http.ListenAndServe(fmt.Sprintf(\":%d\", 8888), nil)\n    }()\n\n    zap.S().Info(\"Prometheus server running on \", fmt.Sprintf(\":%d\", port))\n    return exporter\n}\n

                      \u4ee5\u4e0a\u65b9\u6cd5\u4f1a\u4e3a\u60a8\u7684\u5e94\u7528\u66b4\u9732\u4e00\u4e2a\u6307\u6807\u63a5\u53e3: http://localhost:8888/metrics

                      \u968f\u540e\uff0c\u5728 main.go \u4e2d\u5bf9\u5176\u8fdb\u884c\u521d\u59cb\u5316\uff1a

                      func main() {\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n    tp := initMeter()\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n}\n

                      \u6b64\u5916\uff0c\u5982\u679c\u60f3\u6dfb\u52a0\u81ea\u5b9a\u4e49\u6307\u6807\uff0c\u53ef\u4ee5\u53c2\u8003\uff1a

                      // exposeClusterMetric expose metric like \"insight_logging_count{} 1\"\nfunc (s *insightServer) exposeLoggingMetric(lserver *log.LogService) {\n    s.meter = global.Meter(\"insight.io/basic\")\n\n    var lock sync.Mutex\n    logCounter, err := s.meter.AsyncFloat64().Counter(\"insight_log_total\")\n    if err != nil {\n        zap.S().Panicf(\"failed to initialize instrument: %v\", err)\n    }\n\n    _ = s.meter.RegisterCallback([]instrument.Asynchronous{logCounter}, func(ctx context.Context) {\n        lock.Lock()\n        defer lock.Unlock()\n        count, err := lserver.Count(ctx)\n        if err == nil || count != -1 {\n            logCounter.Observe(ctx, float64(count))\n        }\n    })\n}\n

                      \u968f\u540e\uff0c\u5728 main.go \u8c03\u7528\u8be5\u65b9\u6cd5\uff1a

                      \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\ns.exposeLoggingMetric(lservice)\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n

                      \u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbf\u95ee http://localhost:8888/metrics \u6765\u68c0\u67e5\u60a8\u7684\u6307\u6807\u662f\u5426\u6b63\u5e38\u5de5\u4f5c\u3002

                      "},{"location":"end-user/insight/quickstart/otel/golang/meter.html#java","title":"\u9488\u5bf9 Java \u5e94\u7528\u7a0b\u5e8f","text":"

                      Java \u5728\u4f7f\u7528 otel agent \u5728\u5b8c\u6210\u94fe\u8def\u7684\u81ea\u52a8\u63a5\u5165\u7684\u57fa\u7840\u4e0a\uff0c\u901a\u8fc7\u6dfb\u52a0\u73af\u5883\u53d8\u91cf\uff1a

                      OTEL_METRICS_EXPORTER=prometheus\n

                      \u5c31\u53ef\u4ee5\u76f4\u63a5\u66b4\u9732 JVM \u76f8\u5173\u6307\u6807\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbf\u95ee http://localhost:8888/metrics \u6765\u68c0\u67e5\u60a8\u7684\u6307\u6807\u662f\u5426\u6b63\u5e38\u5de5\u4f5c\u3002

                      \u968f\u540e\uff0c\u518d\u914d\u5408 prometheus serviceMonitor \u5373\u53ef\u5b8c\u6210\u6307\u6807\u7684\u63a5\u5165\u3002 \u5982\u679c\u60f3\u66b4\u9732\u81ea\u5b9a\u4e49\u6307\u6807\u8bf7\u53c2\u9605 opentelemetry-java-docs/prometheus\u3002

                      \u4e3b\u8981\u5206\u4ee5\u4e0b\u4e24\u6b65\uff1a

                      • \u521b\u5efa meter provider\uff0c\u5e76\u6307\u5b9a prometheus \u4f5c\u4e3a exporter\u3002

                        /*\n* Copyright The OpenTelemetry Authors\n* SPDX-License-Identifier: Apache-2.0\n*/\n\npackage io.opentelemetry.example.prometheus;\n\nimport io.opentelemetry.api.metrics.MeterProvider;\nimport io.opentelemetry.exporter.prometheus.PrometheusHttpServer;\nimport io.opentelemetry.sdk.metrics.SdkMeterProvider;\nimport io.opentelemetry.sdk.metrics.export.MetricReader;\n\npublic final class ExampleConfiguration {\n\n  /**\n  * Initializes the Meter SDK and configures the prometheus collector with all default settings.\n  *\n  * @param prometheusPort the port to open up for scraping.\n  * @return A MeterProvider for use in instrumentation.\n  */\n  static MeterProvider initializeOpenTelemetry(int prometheusPort) {\n    MetricReader prometheusReader = PrometheusHttpServer.builder().setPort(prometheusPort).build();\n\n    return SdkMeterProvider.builder().registerMetricReader(prometheusReader).build();\n  }\n}\n
                      • \u81ea\u5b9a\u4e49 meter \u5e76\u5f00\u542f http server

                        package io.opentelemetry.example.prometheus;\n\nimport io.opentelemetry.api.common.Attributes;\nimport io.opentelemetry.api.metrics.Meter;\nimport io.opentelemetry.api.metrics.MeterProvider;\nimport java.util.concurrent.ThreadLocalRandom;\n\n/**\n* Example of using the PrometheusHttpServer to convert OTel metrics to Prometheus format and expose\n* these to a Prometheus instance via a HttpServer exporter.\n*\n* <p>A Gauge is used to periodically measure how many incoming messages are awaiting processing.\n* The Gauge callback gets executed every collection interval.\n*/\npublic final class PrometheusExample {\n  private long incomingMessageCount;\n\n  public PrometheusExample(MeterProvider meterProvider) {\n    Meter meter = meterProvider.get(\"PrometheusExample\");\n    meter\n        .gaugeBuilder(\"incoming.messages\")\n        .setDescription(\"No of incoming messages awaiting processing\")\n        .setUnit(\"message\")\n        .buildWithCallback(result -> result.record(incomingMessageCount, Attributes.empty()));\n  }\n\n  void simulate() {\n    for (int i = 500; i > 0; i--) {\n      try {\n        System.out.println(\n            i + \" Iterations to go, current incomingMessageCount is:  \" + incomingMessageCount);\n        incomingMessageCount = ThreadLocalRandom.current().nextLong(100);\n        Thread.sleep(1000);\n      } catch (InterruptedException e) {\n        // ignored here\n      }\n    }\n  }\n\n  public static void main(String[] args) {\n    int prometheusPort = 8888;\n\n    // it is important to initialize the OpenTelemetry SDK as early as possible in your process.\n    MeterProvider meterProvider = ExampleConfiguration.initializeOpenTelemetry(prometheusPort);\n\n    PrometheusExample prometheusExample = new PrometheusExample(meterProvider);\n\n    prometheusExample.simulate();\n\n    System.out.println(\"Exiting\");\n  }\n}\n

                      \u968f\u540e\uff0c\u5f85 java \u5e94\u7528\u7a0b\u5e8f\u8fd0\u884c\u4e4b\u540e\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbf\u95ee http://localhost:8888/metrics \u6765\u68c0\u67e5\u60a8\u7684\u6307\u6807\u662f\u5426\u6b63\u5e38\u5de5\u4f5c\u3002

                      "},{"location":"end-user/insight/quickstart/otel/golang/meter.html#insight","title":"Insight \u91c7\u96c6\u6307\u6807","text":"

                      \u6700\u540e\u91cd\u8981\u7684\u662f\uff0c\u60a8\u5df2\u7ecf\u5728\u5e94\u7528\u7a0b\u5e8f\u4e2d\u66b4\u9732\u51fa\u4e86\u6307\u6807\uff0c\u73b0\u5728\u9700\u8981 Insight \u6765\u91c7\u96c6\u6307\u6807\u3002

                      \u63a8\u8350\u7684\u6307\u6807\u66b4\u9732\u65b9\u5f0f\u662f\u901a\u8fc7 servicemonitor \u6216\u8005 podmonitor\u3002

                      "},{"location":"end-user/insight/quickstart/otel/golang/meter.html#servicemonitorpodmonitor","title":"\u521b\u5efa servicemonitor/podmonitor","text":"

                      \u6dfb\u52a0\u7684 servicemonitor/podmonitor \u9700\u8981\u6253\u4e0a label\uff1a\"operator.insight.io/managed-by\": \"insight\" \u624d\u4f1a\u88ab Operator \u8bc6\u522b\uff1a

                      apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: example-app\n  labels:\n    operator.insight.io/managed-by: insight\nspec:\n  selector:\n    matchLabels:\n      app: example-app\n  endpoints:\n  - port: web\n  namespaceSelector:\n    any: true\n
                      "},{"location":"end-user/insight/quickstart/otel/java/index.html","title":"\u5f00\u59cb\u76d1\u63a7 Java \u5e94\u7528","text":"
                      1. Java \u5e94\u7528\u94fe\u8def\u63a5\u5165\u4e0e\u76d1\u63a7\u8bf7\u53c2\u8003\u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a \u6587\u6863\uff0c\u901a\u8fc7\u6ce8\u89e3\u5b9e\u73b0\u81ea\u52a8\u63a5\u5165\u94fe\u8def\u3002

                      2. Java \u5e94\u7528\u7684 JVM \u8fdb\u884c\u76d1\u63a7\uff1a\u5df2\u7ecf\u66b4\u9732 JVM \u6307\u6807\u548c\u4ecd\u672a\u66b4\u9732 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5982\u4f55\u4e0e\u53ef\u89c2\u6d4b\u6027 Insight \u5bf9\u63a5\u3002

                        • \u5982\u679c\u60a8\u7684 Java \u5e94\u7528\u672a\u5f00\u59cb\u66b4\u9732 JVM \u6307\u6807\uff0c\u60a8\u53ef\u4ee5\u53c2\u8003\u5982\u4e0b\u6587\u6863\uff1a

                          • \u4f7f\u7528 JMX Exporter \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807
                          • \u4f7f\u7528 OpenTelemetry Java Agent \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807
                        • \u5982\u679c\u60a8\u7684 Java \u5e94\u7528\u5df2\u7ecf\u66b4\u9732 JVM \u6307\u6807\uff0c\u60a8\u53ef\u4ee5\u53c2\u8003\u5982\u4e0b\u6587\u6863\uff1a

                          • \u5df2\u6709 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5bf9\u63a5\u53ef\u89c2\u6d4b\u6027
                      3. \u5c06 TraceId \u548c SpanId \u5199\u5165 Java \u5e94\u7528\u65e5\u5fd7, \u5b9e\u73b0\u94fe\u8def\u6570\u636e\u4e0e\u65e5\u5fd7\u6570\u636e\u5173\u8054

                      "},{"location":"end-user/insight/quickstart/otel/java/mdc.html","title":"\u5c06 TraceId \u548c SpanId \u5199\u5165 Java \u5e94\u7528\u65e5\u5fd7","text":"

                      \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528 OpenTelemetry \u5c06 TraceId \u548c SpanId \u81ea\u52a8\u5199\u5165 Java \u5e94\u7528\u65e5\u5fd7\u3002 TraceId \u4e0e SpanId \u5199\u5165\u65e5\u5fd7\u540e\uff0c\u60a8\u53ef\u4ee5\u5c06\u5206\u5e03\u5f0f\u94fe\u8def\u6570\u636e\u4e0e\u65e5\u5fd7\u6570\u636e\u5173\u8054\u8d77\u6765\uff0c\u5b9e\u73b0\u66f4\u9ad8\u6548\u7684\u6545\u969c\u8bca\u65ad\u548c\u6027\u80fd\u5206\u6790\u3002

                      "},{"location":"end-user/insight/quickstart/otel/java/mdc.html#_1","title":"\u652f\u6301\u7684\u65e5\u5fd7\u5e93","text":"

                      \u66f4\u591a\u4fe1\u606f\uff0c\u8bf7\u53c2\u89c1 Logger MDC auto-instrumentation\u3002

                      \u65e5\u5fd7\u6846\u67b6 \u652f\u6301\u81ea\u52a8\u57cb\u70b9\u7684\u7248\u672c \u624b\u52a8\u57cb\u70b9\u9700\u8981\u5f15\u5165\u7684\u4f9d\u8d56 Log4j 1 1.2+ \u65e0 Log4j 2 2.7+ opentelemetry-log4j-context-data-2.17-autoconfigure Logback 1.0+ opentelemetry-logback-mdc-1.0"},{"location":"end-user/insight/quickstart/otel/java/mdc.html#logbackspringboot","title":"\u4f7f\u7528 Logback\uff08SpringBoot \u9879\u76ee\uff09","text":"

                      Spring Boot \u9879\u76ee\u5185\u7f6e\u4e86\u65e5\u5fd7\u6846\u67b6\uff0c\u5e76\u4e14\u9ed8\u8ba4\u4f7f\u7528 Logback \u4f5c\u4e3a\u5176\u65e5\u5fd7\u5b9e\u73b0\u3002\u5982\u679c\u60a8\u7684 Java \u9879\u76ee\u4e3a SpringBoot \u9879\u76ee\uff0c\u53ea\u9700\u5c11\u91cf\u914d\u7f6e\u5373\u53ef\u5c06 TraceId \u5199\u5165\u65e5\u5fd7\u3002

                      \u5728 application.properties \u4e2d\u8bbe\u7f6e logging.pattern.level\uff0c\u6dfb\u52a0 %mdc{trace_id} \u4e0e %mdc{span_id} \u5230\u65e5\u5fd7\u4e2d\u3002

                      logging.pattern.level=trace_id=%mdc{trace_id} span_id=%mdc{span_id} %5p ....\u7701\u7565...\n

                      \u4ee5\u4e0b\u4e3a\u65e5\u5fd7\u793a\u4f8b\uff1a

                      2024-06-26 10:56:31.200 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.a.c.c.C.[Tomcat].[localhost].[/]       : Initializing Spring DispatcherServlet 'dispatcherServlet'\n2024-06-26 10:56:31.201 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.s.web.servlet.DispatcherServlet        : Initializing Servlet 'dispatcherServlet'\n2024-06-26 10:56:31.209 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.s.web.servlet.DispatcherServlet        : Completed initialization in 8 ms\n2024-06-26 10:56:31.296 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=5743699405074f4e  INFO 53724 --- [nio-8081-exec-1] com.example.httpserver.ot.OTServer       : hello world\n
                      "},{"location":"end-user/insight/quickstart/otel/java/mdc.html#log4j2","title":"\u4f7f\u7528 Log4j2","text":"
                      1. \u5728 pom.xml \u4e2d\u6dfb\u52a0 OpenTelemetry Log4j2 \u4f9d\u8d56:

                        Tip

                        \u8bf7\u5c06 OPENTELEMETRY_VERSION \u66ff\u6362\u4e3a\u6700\u65b0\u7248\u672c

                        <dependencies>\n  <dependency>\n    <groupId>io.opentelemetry.instrumentation</groupId>\n    <artifactId>opentelemetry-log4j-context-data-2.17-autoconfigure</artifactId>\n    <version>OPENTELEMETRY_VERSION</version>\n    <scope>runtime</scope>\n  </dependency>\n</dependencies>\n
                      2. \u4fee\u6539 log4j2.xml \u914d\u7f6e\uff0c\u5728 pattern \u4e2d\u6dfb\u52a0 %X{trace_id} \u4e0e %X{span_id}\uff0c\u53ef\u4ee5\u5c06 TraceId \u4e0e SpanId \u81ea\u52a8\u5199\u5165\u65e5\u5fd7:

                        <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Configuration>\n  <Appenders>\n    <Console name=\"Console\" target=\"SYSTEM_OUT\">\n      <PatternLayout\n          pattern=\"%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} trace_id=%X{trace_id} span_id=%X{span_id} trace_flags=%X{trace_flags} - %msg%n\"/>\n    </Console>\n  </Appenders>\n  <Loggers>\n    <Root>\n      <AppenderRef ref=\"Console\" level=\"All\"/>\n    </Root>\n  </Loggers>\n</Configuration>\n
                      3. \u4f7f\u7528 Logback \u5728 pom.xml \u4e2d\u6dfb\u52a0 OpenTelemetry Logback \u4f9d\u8d56\u3002

                        Tip

                        \u8bf7\u5c06 OPENTELEMETRY_VERSION \u66ff\u6362\u4e3a\u6700\u65b0\u7248\u672c

                        <dependencies>\n  <dependency>\n    <groupId>io.opentelemetry.instrumentation</groupId>\n    <artifactId>opentelemetry-logback-mdc-1.0</artifactId>\n    <version>OPENTELEMETRY_VERSION</version>\n  </dependency>\n</dependencies>\n
                      4. \u4fee\u6539 log4j2.xml \u914d\u7f6e\uff0c\u5728 pattern \u4e2d\u6dfb\u52a0 %X{trace_id} \u4e0e %X{span_id}\uff0c\u53ef\u4ee5\u5c06 TraceId \u4e0e SpanId \u81ea\u52a8\u5199\u5165\u65e5\u5fd7:

                        <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<configuration>\n  <appender name=\"CONSOLE\" class=\"ch.qos.logback.core.ConsoleAppender\">\n    <encoder>\n      <pattern>%d{HH:mm:ss.SSS} trace_id=%X{trace_id} span_id=%X{span_id} trace_flags=%X{trace_flags} %msg%n</pattern>\n    </encoder>\n  </appender>\n\n  <!-- Just wrap your logging appender, for example ConsoleAppender, with OpenTelemetryAppender -->\n  <appender name=\"OTEL\" class=\"io.opentelemetry.instrumentation.logback.mdc.v1_0.OpenTelemetryAppender\">\n    <appender-ref ref=\"CONSOLE\"/>\n  </appender>\n\n  <!-- Use the wrapped \"OTEL\" appender instead of the original \"CONSOLE\" one -->\n  <root level=\"INFO\">\n    <appender-ref ref=\"OTEL\"/>\n  </root>\n\n</configuration>\n
                      "},{"location":"end-user/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html","title":"\u4f7f\u7528 JMX Exporter \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807","text":"

                      JMX-Exporter \u63d0\u4f9b\u4e86\u4e24\u79cd\u7528\u6cd5:

                      1. \u542f\u52a8\u72ec\u7acb\u8fdb\u7a0b\u3002JVM \u542f\u52a8\u65f6\u6307\u5b9a\u53c2\u6570\uff0c\u66b4\u9732 JMX \u7684 RMI \u63a5\u53e3\uff0cJMX Exporter \u8c03\u7528 RMI \u83b7\u53d6 JVM \u8fd0\u884c\u65f6\u72b6\u6001\u6570\u636e\uff0c \u8f6c\u6362\u4e3a Prometheus metrics \u683c\u5f0f\uff0c\u5e76\u66b4\u9732\u7aef\u53e3\u8ba9 Prometheus \u91c7\u96c6\u3002
                      2. JVM \u8fdb\u7a0b\u5185\u542f\u52a8(in-process)\u3002JVM \u542f\u52a8\u65f6\u6307\u5b9a\u53c2\u6570\uff0c\u901a\u8fc7 javaagent \u7684\u5f62\u5f0f\u8fd0\u884c JMX-Exporter \u7684 jar \u5305\uff0c \u8fdb\u7a0b\u5185\u8bfb\u53d6 JVM \u8fd0\u884c\u65f6\u72b6\u6001\u6570\u636e\uff0c\u8f6c\u6362\u4e3a Prometheus metrics \u683c\u5f0f\uff0c\u5e76\u66b4\u9732\u7aef\u53e3\u8ba9 Prometheus \u91c7\u96c6\u3002

                      Note

                      \u5b98\u65b9\u4e0d\u63a8\u8350\u4f7f\u7528\u7b2c\u4e00\u79cd\u65b9\u5f0f\uff0c\u4e00\u65b9\u9762\u914d\u7f6e\u590d\u6742\uff0c\u53e6\u4e00\u65b9\u9762\u56e0\u4e3a\u5b83\u9700\u8981\u4e00\u4e2a\u5355\u72ec\u7684\u8fdb\u7a0b\uff0c\u800c\u8fd9\u4e2a\u8fdb\u7a0b\u672c\u8eab\u7684\u76d1\u63a7\u53c8\u6210\u4e86\u65b0\u7684\u95ee\u9898\uff0c \u6240\u4ee5\u672c\u6587\u91cd\u70b9\u56f4\u7ed5\u7b2c\u4e8c\u79cd\u7528\u6cd5\u8bb2\u5982\u4f55\u5728 Kubernetes \u73af\u5883\u4e0b\u4f7f\u7528 JMX Exporter \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807\u3002

                      \u8fd9\u91cc\u4f7f\u7528\u7b2c\u4e8c\u79cd\u7528\u6cd5\uff0c\u542f\u52a8 JVM \u65f6\u9700\u8981\u6307\u5b9a JMX Exporter \u7684 jar \u5305\u6587\u4ef6\u548c\u914d\u7f6e\u6587\u4ef6\u3002 jar \u5305\u662f\u4e8c\u8fdb\u5236\u6587\u4ef6\uff0c\u4e0d\u597d\u901a\u8fc7 configmap \u6302\u8f7d\uff0c\u914d\u7f6e\u6587\u4ef6\u6211\u4eec\u51e0\u4e4e\u4e0d\u9700\u8981\u4fee\u6539\uff0c \u6240\u4ee5\u5efa\u8bae\u662f\u76f4\u63a5\u5c06 JMX Exporter \u7684 jar \u5305\u548c\u914d\u7f6e\u6587\u4ef6\u90fd\u6253\u5305\u5230\u4e1a\u52a1\u5bb9\u5668\u955c\u50cf\u4e2d\u3002

                      \u5176\u4e2d\uff0c\u7b2c\u4e8c\u79cd\u65b9\u5f0f\u6211\u4eec\u53ef\u4ee5\u9009\u62e9\u5c06 JMX Exporter \u7684 jar \u6587\u4ef6\u653e\u5728\u4e1a\u52a1\u5e94\u7528\u955c\u50cf\u4e2d\uff0c \u4e5f\u53ef\u4ee5\u9009\u62e9\u5728\u90e8\u7f72\u7684\u65f6\u5019\u6302\u8f7d\u8fdb\u53bb\u3002\u8fd9\u91cc\u5206\u522b\u5bf9\u4e24\u79cd\u65b9\u5f0f\u505a\u4e00\u4e2a\u4ecb\u7ecd\uff1a

                      "},{"location":"end-user/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html#jmx-exporter-jar","title":"\u65b9\u5f0f\u4e00\uff1a\u5c06 JMX Exporter JAR \u6587\u4ef6\u6784\u5efa\u81f3\u4e1a\u52a1\u955c\u50cf\u4e2d","text":"

                      prometheus-jmx-config.yaml \u5185\u5bb9\u5982\u4e0b\uff1a

                      prometheus-jmx-config.yaml
                      ...\nssl: false\nlowercaseOutputName: false\nlowercaseOutputLabelNames: false\nrules:\n- pattern: \".*\"\n

                      Note

                      \u66f4\u591a\u914d\u7f6e\u9879\u8bf7\u53c2\u8003\u5e95\u90e8\u4ecb\u7ecd\u6216Prometheus \u5b98\u65b9\u6587\u6863\u3002

                      \u7136\u540e\u51c6\u5907 jar \u5305\u6587\u4ef6\uff0c\u53ef\u4ee5\u5728 jmx_exporter \u7684 Github \u9875\u9762\u627e\u5230\u6700\u65b0\u7684 jar \u5305\u4e0b\u8f7d\u5730\u5740\u5e76\u53c2\u8003\u5982\u4e0b Dockerfile:

                      FROM openjdk:11.0.15-jre\nWORKDIR /app/\nCOPY target/my-app.jar ./\nCOPY prometheus-jmx-config.yaml ./\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\nENV JAVA_TOOL_OPTIONS=-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\nEXPOSE 8081 8999 8080 8888\nENTRYPOINT java $JAVA_OPTS -jar my-app.jar\n

                      \u6ce8\u610f\uff1a

                      • \u542f\u52a8\u53c2\u6570\u683c\u5f0f\uff1a-javaagent:=:
                      • \u8fd9\u91cc\u4f7f\u7528\u4e86 8088 \u7aef\u53e3\u66b4\u9732 JVM \u7684\u76d1\u63a7\u6307\u6807\uff0c\u5982\u679c\u548c Java \u5e94\u7528\u51b2\u7a81\uff0c\u53ef\u81ea\u884c\u66f4\u6539
                      "},{"location":"end-user/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html#init-container","title":"\u65b9\u5f0f\u4e8c\uff1a\u901a\u8fc7 init container \u5bb9\u5668\u6302\u8f7d","text":"

                      \u6211\u4eec\u9700\u8981\u5148\u5c06 JMX exporter \u505a\u6210 Docker \u955c\u50cf, \u4ee5\u4e0b Dockerfile \u4ec5\u4f9b\u53c2\u8003\uff1a

                      FROM alpine/curl:3.14\nWORKDIR /app/\n# \u5c06\u524d\u9762\u521b\u5efa\u7684 config \u6587\u4ef6\u62f7\u8d1d\u81f3\u955c\u50cf\nCOPY prometheus-jmx-config.yaml ./\n# \u5728\u7ebf\u4e0b\u8f7d jmx prometheus javaagent jar\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\n

                      \u6839\u636e\u4e0a\u9762 Dockerfile \u6784\u5efa\u955c\u50cf\uff1a docker build -t my-jmx-exporter .

                      \u5728 Java \u5e94\u7528\u90e8\u7f72 Yaml \u4e2d\u52a0\u5165\u5982\u4e0b init container\uff1a

                      \u70b9\u51fb\u5c55\u5f00 YAML \u6587\u4ef6
                      apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-demo-app\n  labels:\n    app: my-demo-app\nspec:\n  selector:\n    matchLabels:\n      app: my-demo-app\n  template:\n    metadata:\n      labels:\n        app: my-demo-app\n    spec:\n      imagePullSecrets:\n      - name: registry-pull\n      initContainers:\n      - name: jmx-sidecar\n        image: my-jmx-exporter\n        command: [\"cp\", \"-r\", \"/app/jmx_prometheus_javaagent-0.17.2.jar\", \"/target/jmx_prometheus_javaagent-0.17.2.jar\"]  \u278a\n        volumeMounts:\n        - name: sidecar\n          mountPath: /target\n      containers:\n      - image: my-demo-app-image\n        name: my-demo-app\n        resources:\n          requests:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n          limits:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n        ports:\n        - containerPort: 18083\n        env:\n        - name: JAVA_TOOL_OPTIONS\n          value: \"-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\" \u278b\n        volumeMounts:\n        - name: host-time\n          mountPath: /etc/localtime\n          readOnly: true\n        - name: sidecar\n          mountPath: /sidecar\n      volumes:\n      - name: host-time\n        hostPath:\n          path: /etc/localtime\n      - name: sidecar  #\u5171\u4eab agent \u6587\u4ef6\u5939\n        emptyDir: {}\n      restartPolicy: Always\n

                      \u7ecf\u8fc7\u5982\u4e0a\u7684\u6539\u9020\u4e4b\u540e\uff0c\u793a\u4f8b\u5e94\u7528 my-demo-app \u5177\u5907\u4e86\u66b4\u9732 JVM \u6307\u6807\u7684\u80fd\u529b\u3002 \u8fd0\u884c\u670d\u52a1\u4e4b\u540e\uff0c\u6211\u4eec\u53ef\u4ee5\u901a\u8fc7 http://lcoalhost:8088 \u8bbf\u95ee\u670d\u52a1\u66b4\u9732\u51fa\u6765\u7684 prometheus \u683c\u5f0f\u7684\u6307\u6807\u3002

                      \u63a5\u7740\uff0c\u60a8\u53ef\u4ee5\u53c2\u8003 \u5df2\u6709 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5bf9\u63a5\u53ef\u89c2\u6d4b\u6027\u3002

                      "},{"location":"end-user/insight/quickstart/otel/java/jvm-monitor/legacy-jvm.html","title":"\u5df2\u6709 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5bf9\u63a5\u53ef\u89c2\u6d4b\u6027","text":"

                      \u5982\u679c\u60a8\u7684 Java \u5e94\u7528\u901a\u8fc7\u5176\u4ed6\u65b9\u5f0f\uff08\u6bd4\u5982 Spring Boot Actuator\uff09\u66b4\u9732\u4e86 JVM \u7684\u76d1\u63a7\u6307\u6807\uff0c \u6211\u4eec\u9700\u8981\u8ba9\u76d1\u63a7\u6570\u636e\u88ab\u91c7\u96c6\u5230\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u5728\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u6dfb\u52a0\u6ce8\u89e3\uff08Kubernetes Annotations\uff09\u7684\u65b9\u5f0f\u8ba9 Insight \u6765\u91c7\u96c6\u5df2\u6709\u7684 JVM \u6307\u6807\uff1a

                      annatation: \n  insight.opentelemetry.io/metric-scrape: \"true\" # \u662f\u5426\u91c7\u96c6\n  insight.opentelemetry.io/metric-path: \"/\"      # \u91c7\u96c6\u6307\u6807\u7684\u8def\u5f84\n  insight.opentelemetry.io/metric-port: \"9464\"   # \u91c7\u96c6\u6307\u6807\u7684\u7aef\u53e3\n

                      \u4f8b\u5982\u4e3a my-deployment-app \u6dfb\u52a0\u6ce8\u89e3\uff1a

                      apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-app\nspec:\n  selector:\n    matchLabels:\n      app: my-deployment-app\n      app.kubernetes.io/name: my-deployment-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-deployment-app\n        app.kubernetes.io/name: my-deployment-app\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\" # \u662f\u5426\u91c7\u96c6\n        insight.opentelemetry.io/metric-path: \"/\"      # \u91c7\u96c6\u6307\u6807\u7684\u8def\u5f84\n        insight.opentelemetry.io/metric-port: \"9464\"   # \u91c7\u96c6\u6307\u6807\u7684\u7aef\u53e3\n

                      \u4ee5\u4e0b\u662f\u5b8c\u6574\u793a\u4f8b\uff1a

                      ---\napiVersion: v1\nkind: Service\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  type: NodePort\n  selector:\n    #app: my-deployment-with-aotu-instrumentation-app\n    app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  ports:\n    - name: http\n      port: 8080\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  selector:\n    matchLabels:\n      #app: my-deployment-with-aotu-instrumentation-app\n      app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\" # \u662f\u5426\u91c7\u96c6\n        insight.opentelemetry.io/metric-path: \"/actuator/prometheus\"      # \u91c7\u96c6\u6307\u6807\u7684\u8def\u5f84\n        insight.opentelemetry.io/metric-port: \"8080\"   # \u91c7\u96c6\u6307\u6807\u7684\u7aef\u53e3\n    spec:\n      containers:\n        - name: myapp\n          image: docker.m.daocloud.io/wutang/spring-boot-actuator-prometheus-metrics-demo\n          ports:\n            - name: http\n              containerPort: 8080\n          resources:\n            limits:\n              cpu: 500m\n              memory: 800Mi\n            requests:\n              cpu: 200m\n              memory: 400Mi\n

                      \u4ee5\u4e0a\u793a\u4f8b\u4e2d\uff0cInsight \u4f1a\u901a\u8fc7 :8080//actuator/prometheus \u6293\u53d6\u901a\u8fc7 Spring Boot Actuator \u66b4\u9732\u51fa\u6765\u7684 Prometheus \u6307\u6807\u3002

                      "},{"location":"end-user/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html","title":"\u4f7f\u7528 OpenTelemetry Java Agent \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807","text":"

                      \u5728 Opentelemetry Agent v1.20.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u4e2d\uff0cOpentelemetry Agent \u65b0\u589e\u4e86 JMX Metric Insight \u6a21\u5757\uff0c\u5982\u679c\u4f60\u7684\u5e94\u7528\u5df2\u7ecf\u96c6\u6210\u4e86 Opentelemetry Agent \u53bb\u91c7\u96c6\u5e94\u7528\u94fe\u8def\uff0c\u90a3\u4e48\u4f60\u4e0d\u518d\u9700\u8981\u53e6\u5916\u5f15\u5165\u5176\u4ed6 Agent \u53bb\u4e3a\u6211\u4eec\u7684\u5e94\u7528\u66b4\u9732 JMX \u6307\u6807\u3002Opentelemetry Agent \u4e5f\u662f\u901a\u8fc7\u68c0\u6d4b\u5e94\u7528\u7a0b\u5e8f\u4e2d\u672c\u5730\u53ef\u7528\u7684 MBean \u516c\u5f00\u7684\u6307\u6807\uff0c\u5bf9\u5176\u8fdb\u884c\u6536\u96c6\u5e76\u66b4\u9732\u6307\u6807\u3002

                      Opentelemetry Agent \u4e5f\u9488\u5bf9\u5e38\u89c1\u7684 Java Server \u6216\u6846\u67b6\u5185\u7f6e\u4e86\u4e00\u4e9b\u76d1\u63a7\u7684\u6837\u4f8b\uff0c\u8bf7\u53c2\u8003\u9884\u5b9a\u4e49\u7684\u6307\u6807\u3002

                      \u4f7f\u7528 OpenTelemetry Java Agent \u540c\u6837\u9700\u8981\u8003\u8651\u5982\u4f55\u5c06 JAR \u6302\u8f7d\u8fdb\u5bb9\u5668\uff0c\u9664\u4e86\u53ef\u4ee5\u53c2\u8003\u4e0a\u9762 JMX Exporter \u6302\u8f7d JAR \u6587\u4ef6\u7684\u65b9\u5f0f\u5916\uff0c\u6211\u4eec\u8fd8\u53ef\u4ee5\u501f\u52a9 Opentelemetry \u63d0\u4f9b\u7684 Operator \u7684\u80fd\u529b\u6765\u5b9e\u73b0\u81ea\u52a8\u4e3a\u6211\u4eec\u7684\u5e94\u7528\u5f00\u542f JVM \u6307\u6807\u66b4\u9732\uff1a

                      \u5982\u679c\u4f60\u7684\u5e94\u7528\u5df2\u7ecf\u96c6\u6210\u4e86 Opentelemetry Agent \u53bb\u91c7\u96c6\u5e94\u7528\u94fe\u8def\uff0c\u90a3\u4e48\u4f60\u4e0d\u518d\u9700\u8981\u53e6\u5916\u5f15\u5165\u5176\u4ed6 Agent \u53bb\u4e3a\u6211\u4eec\u7684\u5e94\u7528\u66b4\u9732 JMX \u6307\u6807\u3002Opentelemetry Agent \u901a\u8fc7\u68c0\u6d4b\u5e94\u7528\u7a0b\u5e8f\u4e2d\u672c\u5730\u53ef\u7528\u7684 MBean \u516c\u5f00\u7684\u6307\u6807\uff0c\u73b0\u5728\u53ef\u4ee5\u672c\u5730\u6536\u96c6\u5e76\u66b4\u9732\u6307\u6807\u63a5\u53e3\u3002

                      \u4f46\u662f\uff0c\u622a\u81f3\u76ee\u524d\u7248\u672c\uff0c\u4f60\u4ecd\u7136\u9700\u8981\u624b\u52a8\u4e3a\u5e94\u7528\u52a0\u4e0a\u76f8\u5e94\u6ce8\u89e3\u4e4b\u540e\uff0cJVM \u6570\u636e\u624d\u4f1a\u88ab Insight \u91c7\u96c6\u5230\uff0c\u5177\u4f53\u6ce8\u89e3\u5185\u5bb9\u8bf7\u53c2\u8003 \u5df2\u6709 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5bf9\u63a5\u53ef\u89c2\u6d4b\u6027\u3002

                      "},{"location":"end-user/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html#java","title":"\u4e3a Java \u4e2d\u95f4\u4ef6\u66b4\u9732\u6307\u6807","text":"

                      Opentelemetry Agent \u4e5f\u5185\u7f6e\u4e86\u4e00\u4e9b\u4e2d\u95f4\u4ef6\u76d1\u63a7\u7684\u6837\u4f8b\uff0c\u8bf7\u53c2\u8003 \u9884\u5b9a\u4e49\u6307\u6807\u3002

                      \u9ed8\u8ba4\u6ca1\u6709\u6307\u5b9a\u4efb\u4f55\u7c7b\u578b\uff0c\u9700\u8981\u901a\u8fc7 -Dotel.jmx.target.system JVM Options \u6307\u5b9a,\u6bd4\u5982 -Dotel.jmx.target.system=jetty,kafka-broker \u3002

                      "},{"location":"end-user/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html#_1","title":"\u53c2\u8003","text":"
                      • Gaining JMX Metric Insights with the OpenTelemetry Java Agent

                      • Otel jmx metrics

                      "},{"location":"end-user/insight/quickstart/other/install-agent-on-ocp.html","title":"OpenShift \u5b89\u88c5 Insight Agent","text":"

                      \u867d\u7136 OpenShift \u7cfb\u7edf\u81ea\u5e26\u4e86\u4e00\u5957\u76d1\u63a7\u7cfb\u7edf\uff0c\u56e0\u4e3a\u6570\u636e\u91c7\u96c6\u7ea6\u5b9a\u7684\u4e00\u4e9b\u89c4\u5219\uff0c\u6211\u4eec\u8fd8\u662f\u4f1a\u5b89\u88c5 Insight Agent\u3002

                      \u5176\u4e2d\uff0c\u5b89\u9664\u4e86\u57fa\u7840\u7684\u5b89\u88c5\u914d\u7f6e\u4e4b\u5916\uff0chelm install \u7684\u65f6\u5019\u8fd8\u9700\u8981\u589e\u52a0\u5982\u4e0b\u7684\u53c2\u6570\uff1a

                      ## \u9488\u5bf9 fluentbit \u76f8\u5173\u7684\u53c2\u6570\uff1b\n--set fluent-bit.ocp.enabled=true \\\n--set fluent-bit.serviceAccount.create=false \\\n--set fluent-bit.securityContext.runAsUser=0 \\\n--set fluent-bit.securityContext.seLinuxOptions.type=spc_t \\\n--set fluent-bit.securityContext.readOnlyRootFilesystem=false \\\n--set fluent-bit.securityContext.allowPrivilegeEscalation=false \\\n\n## \u542f\u7528\u9002\u914d OpenShift4.x \u7684 Prometheus(CR)\n--set compatibility.openshift.prometheus.enabled=true \\\n\n## \u5173\u95ed\u9ad8\u7248\u672c\u7684 Prometheus \u5b9e\u4f8b\n--set kube-prometheus-stack.prometheus.enabled=false \\\n--set kube-prometheus-stack.kubeApiServer.enabled=false \\\n--set kube-prometheus-stack.kubelet.enabled=false \\\n--set kube-prometheus-stack.kubeControllerManager.enabled=false \\\n--set kube-prometheus-stack.coreDns.enabled=false \\\n--set kube-prometheus-stack.kubeDns.enabled=false \\\n--set kube-prometheus-stack.kubeEtcd.enabled=false \\\n--set kube-prometheus-stack.kubeEtcd.enabled=false \\\n--set kube-prometheus-stack.kubeScheduler.enabled=false \\\n--set kube-prometheus-stack.kubeStateMetrics.enabled=false \\\n--set kube-prometheus-stack.nodeExporter.enabled=false \\\n\n## \u9650\u5236 PrometheusOperator \u5904\u7406\u7684 namespace\uff0c\u907f\u514d\u4e0e OpenShift \u81ea\u5e26\u7684 PrometheusOperator \u76f8\u4e92\u7ade\u4e89\n--set kube-prometheus-stack.prometheusOperator.kubeletService.namespace=\"insight-system\" \\\n--set kube-prometheus-stack.prometheusOperator.prometheusInstanceNamespaces=\"insight-system\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[0]=\"openshift-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[1]=\"openshift-user-workload-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[2]=\"openshift-customer-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[3]=\"openshift-route-monitor-operator\" \\\n
                      "},{"location":"end-user/insight/quickstart/other/install-agent-on-ocp.html#openshift-prometheus","title":"\u901a\u8fc7 OpenShift \u81ea\u8eab\u673a\u5236\uff0c\u5c06\u7cfb\u7edf\u76d1\u63a7\u6570\u636e\u5199\u5165 Prometheus \u4e2d","text":"
                      apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: cluster-monitoring-config\n  namespace: openshift-monitoring\ndata:\n  config.yaml: |\n    prometheusK8s:\n      remoteWrite:\n        - queueConfig:\n            batchSendDeadline: 60s\n            maxBackoff: 5s\n            minBackoff: 30ms\n            minShards: 1\n            capacity: 5000\n            maxSamplesPerSend: 1000\n            maxShards: 100\n          remoteTimeout: 30s\n          url: http://insight-agent-prometheus.insight-system.svc.cluster.local:9090/api/v1/write\n          writeRelabelConfigs:\n            - action: keep\n              regex: etcd|kubelet|node-exporter|apiserver|kube-state-metrics\n              sourceLabels:\n                - job\n
                      "},{"location":"end-user/insight/quickstart/res-plan/index.html","title":"\u90e8\u7f72\u5bb9\u91cf\u89c4\u5212","text":"

                      \u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u4e3a\u4e86\u907f\u514d\u6d88\u8017\u8fc7\u591a\u8d44\u6e90\uff0c\u5df2\u7ecf\u8bbe\u7f6e\u4e86\u8d44\u6e90\u4e0a\u7ebf\uff08resource limit\uff09\uff0c\u53ef\u89c2\u6d4b\u7cfb\u7edf\u9700\u8981\u5904\u7406\u5927\u91cf\u7684\u6570\u636e\uff0c\u5982\u679c\u5bb9\u91cf\u89c4\u5212\u4e0d\u5408\u7406\uff0c\u53ef\u80fd\u4f1a\u5bfc\u81f4\u7cfb\u7edf\u8d1f\u8f7d\u8fc7\u9ad8\uff0c\u5f71\u54cd\u7a33\u5b9a\u6027\u548c\u53ef\u9760\u6027\u3002

                      "},{"location":"end-user/insight/quickstart/res-plan/index.html#_2","title":"\u89c2\u6d4b\u7ec4\u4ef6\u7684\u8d44\u6e90\u89c4\u5212","text":"

                      \u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u5305\u542b Insight \u548c Insight Agent\u3002\u5176\u4e2d\uff0cInsight \u4e3b\u8981\u8d1f\u8d23\u89c2\u6d4b\u6570\u636e\u7684\u5b58\u50a8\uff0c\u5206\u6790\u4e0e\u5c55\u793a\u3002\u800c Insight Agent \u5305\u542b\u4e86\u6570\u636e\u91c7\u96c6\u3001\u6570\u636e\u5904\u7406\u3001\u6570\u636e\u4e0a\u4f20\u7b49\u529f\u80fd\u3002

                      "},{"location":"end-user/insight/quickstart/res-plan/index.html#_3","title":"\u5b58\u50a8\u7ec4\u4ef6\u7684\u5bb9\u91cf\u89c4\u5212","text":"

                      Insight \u7684\u5b58\u50a8\u7ec4\u4ef6\u4e3b\u8981\u5305\u62ec ElasticSearch \u548c VictoriaMetrics. \u5176\u4e2d\uff0cElasticSearch \u4e3b\u8981\u8d1f\u8d23\u5b58\u50a8\u548c\u67e5\u8be2\u65e5\u5fd7\u4e0e\u94fe\u8def\u6570\u636e\uff0cVictoriaMetrics \u4e3b\u8981\u8d1f\u8d23\u5b58\u50a8\u548c\u67e5\u8be2\u6307\u6807\u6570\u636e\u3002

                      • VictoriaMetircs: \u5176\u78c1\u76d8\u7528\u91cf\u4e0e\u5b58\u50a8\u7684\u6307\u6807\u6709\u5173\uff0c\u6839\u636e vmstorage \u7684\u78c1\u76d8\u89c4\u5212 \u9884\u4f30\u5bb9\u91cf\u540e \u8c03\u6574 vmstorage \u78c1\u76d8\u3002
                      "},{"location":"end-user/insight/quickstart/res-plan/index.html#_4","title":"\u91c7\u96c6\u5668\u7684\u8d44\u6e90\u89c4\u5212","text":"

                      Insight Agent \u7684\u91c7\u96c6\u5668\u4e2d\u5305\u542b Proemtheus\uff0c\u867d\u7136 Prometheus \u672c\u8eab\u662f\u4e00\u4e2a\u72ec\u7acb\u7684\u7ec4\u4ef6\uff0c\u4f46\u662f\u5728 Insight Agent \u4e2d\uff0cPrometheus \u4f1a\u88ab\u7528\u4e8e\u91c7\u96c6\u6570\u636e\uff0c\u56e0\u6b64\u9700\u8981\u5bf9 Prometheus \u7684\u8d44\u6e90\u8fdb\u884c\u89c4\u5212\u3002

                      • Prometheus\uff1a\u5176\u8d44\u6e90\u7528\u91cf\u4e0e\u91c7\u96c6\u7684\u6307\u6807\u91cf\u6709\u5173\uff0c\u53ef\u4ee5\u53c2\u8003 Prometheus \u8d44\u6e90\u89c4\u5212 \u8fdb\u884c\u8c03\u6574\u3002
                      "},{"location":"end-user/insight/quickstart/res-plan/modify-vms-disk.html","title":"vmstorge \u78c1\u76d8\u6269\u5bb9","text":"

                      \u672c\u6587\u63cf\u8ff0\u4e86 vmstorge \u78c1\u76d8\u6269\u5bb9\u7684\u65b9\u6cd5\uff0c vmstorge \u78c1\u76d8\u89c4\u8303\u8bf7\u53c2\u8003 vmstorage \u78c1\u76d8\u5bb9\u91cf\u89c4\u5212\u3002

                      "},{"location":"end-user/insight/quickstart/res-plan/modify-vms-disk.html#_1","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"end-user/insight/quickstart/res-plan/modify-vms-disk.html#_2","title":"\u5f00\u542f\u5b58\u50a8\u6c60\u6269\u5bb9","text":"
                      1. \u4ee5\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7ba1\u7406\u5458\u6743\u9650\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\uff0c\u70b9\u51fb \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb kpanda-global-cluster \u96c6\u7fa4\u3002

                      2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e(PVC) \uff0c\u627e\u5230 vmstorage \u7ed1\u5b9a\u7684\u6570\u636e\u5377\u58f0\u660e\u3002

                      3. \u70b9\u51fb\u67d0\u4e2a vmstorage PVC\uff0c\u8fdb\u5165 vmstorage \u7684\u6570\u636e\u5377\u58f0\u660e\u8be6\u60c5\uff0c\u786e\u8ba4\u8be5 PVC \u7ed1\u5b9a\u7684\u5b58\u50a8\u6c60\u3002

                      4. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5bb9\u5668\u5b58\u50a8 -> \u5b58\u50a8\u6c60(SC) \uff0c\u627e\u5230 local-path \uff0c\u70b9\u51fb\u76ee\u6807\u53f3\u4fa7\u7684 \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u7f16\u8f91 \u3002

                      5. \u5f00\u542f \u6269\u5bb9 \u540e\u70b9\u51fb \u786e\u5b9a \u3002

                      "},{"location":"end-user/insight/quickstart/res-plan/modify-vms-disk.html#vmstorage","title":"\u66f4\u6539 vmstorage \u7684\u78c1\u76d8\u5bb9\u91cf","text":"
                      1. \u4ee5\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7ba1\u7406\u5458\u6743\u9650\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\uff0c\u8fdb\u5165 kpanda-global-cluster \u96c6\u7fa4\u8be6\u60c5\u3002

                      2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u627e\u5230 vmcluster \u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\u3002

                      3. \u70b9\u51fb\u8be5 vmcluster \u81ea\u5b9a\u4e49\u8d44\u6e90\u8fdb\u5165\u8be6\u60c5\u9875\uff0c\u5207\u6362\u5230 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\uff0c\u4ece insight-victoria-metrics-k8s-stack \u53f3\u4fa7\u83dc\u5355\u9009\u62e9 \u7f16\u8f91 YAML \u3002

                      4. \u6839\u636e\u56fe\u4f8b\u4fee\u6539\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                      5. \u518d\u6b21\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e(PVC) \uff0c\u627e\u5230 vmstorage \u7ed1\u5b9a\u7684\u6570\u636e\u5377\u58f0\u660e\u786e\u8ba4\u4fee\u6539\u5df2\u751f\u6548\u3002\u5728\u67d0\u4e2a PVC \u8be6\u60c5\u9875\uff0c\u70b9\u51fb\u5173\u8054\u5b58\u50a8\u6e90 (PV)\u3002

                      6. \u6253\u5f00\u6570\u636e\u5377\u8be6\u60c5\u9875\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u66f4\u65b0 \u6309\u94ae\u3002

                      7. \u4fee\u6539 \u5bb9\u91cf \u540e\u70b9\u51fb \u786e\u5b9a \uff0c\u7a0d\u7b49\u7247\u523b\u7b49\u5230\u6269\u5bb9\u6210\u529f\u3002

                      "},{"location":"end-user/insight/quickstart/res-plan/modify-vms-disk.html#_3","title":"\u514b\u9686\u5b58\u50a8\u5377","text":"

                      \u82e5\u5b58\u50a8\u5377\u6269\u5bb9\u5931\u8d25\uff0c\u53ef\u53c2\u8003\u4ee5\u4e0b\u65b9\u6cd5\u514b\u9686\u5b58\u50a8\u5377\u3002

                      1. \u4ee5\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7ba1\u7406\u5458\u6743\u9650\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\uff0c\u8fdb\u5165 kpanda-global-cluster \u96c6\u7fa4\u8be6\u60c5\u3002

                      2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5de5\u4f5c\u8d1f\u8f7d -> \u6709\u72b6\u6001\u8d1f\u8f7d \uff0c\u627e\u5230 vmstorage \u7684\u6709\u72b6\u6001\u8d1f\u8f7d\uff0c\u70b9\u51fb\u76ee\u6807\u53f3\u4fa7\u7684 \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u72b6\u6001 -> \u505c\u6b62 -> \u786e\u5b9a \u3002

                      3. \u5728\u547d\u4ee4\u884c\u4e2d\u767b\u5f55 kpanda-global-cluster \u96c6\u7fa4\u7684 master \u8282\u70b9\u540e\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u590d\u5236 vmstorage \u5bb9\u5668\u4e2d\u7684 vm-data \u76ee\u5f55\u5c06\u6307\u6807\u4fe1\u606f\u5b58\u50a8\u5728\u672c\u5730\uff1a

                        kubectl cp -n insight-system vmstorage-insight-victoria-metrics-k8s-stack-1:vm-data ./vm-data\n
                      4. \u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\u8fdb\u5165 kpanda-global-cluster \u96c6\u7fa4\u8be6\u60c5\uff0c\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377(PV) \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u514b\u9686 \uff0c\u5e76\u4fee\u6539\u6570\u636e\u5377\u7684\u5bb9\u91cf\u3002

                      5. \u5220\u9664\u4e4b\u524d vmstorage \u7684\u6570\u636e\u5377\u3002

                      6. \u7a0d\u7b49\u7247\u523b\uff0c\u5f85\u5b58\u50a8\u5377\u58f0\u660e\u8ddf\u514b\u9686\u7684\u6570\u636e\u5377\u7ed1\u5b9a\u540e\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u5c06\u7b2c 3 \u6b65\u4e2d\u5bfc\u51fa\u7684\u6570\u636e\u5bfc\u5165\u5230\u5bf9\u5e94\u7684\u5bb9\u5668\u4e2d\uff0c\u7136\u540e\u5f00\u542f\u4e4b\u524d\u6682\u505c\u7684 vmstorage \u3002

                        kubectl cp -n insight-system ./vm-data vmstorage-insight-victoria-metrics-k8s-stack-1:vm-data\n
                      "},{"location":"end-user/insight/quickstart/res-plan/prometheus-res.html","title":"Prometheus \u8d44\u6e90\u89c4\u5212","text":"

                      Prometheus \u5728\u5b9e\u9645\u4f7f\u7528\u8fc7\u7a0b\u4e2d\uff0c\u53d7\u5230\u96c6\u7fa4\u5bb9\u5668\u6570\u91cf\u4ee5\u53ca\u5f00\u542f Istio \u7684\u5f71\u54cd\uff0c\u4f1a\u5bfc\u81f4 Prometheus \u7684 CPU\u3001\u5185\u5b58\u7b49\u8d44\u6e90\u4f7f\u7528\u91cf\u8d85\u51fa\u8bbe\u5b9a\u7684\u8d44\u6e90\u3002

                      \u4e3a\u4e86\u4fdd\u8bc1\u4e0d\u540c\u89c4\u6a21\u96c6\u7fa4\u4e0b Prometheus \u7684\u6b63\u5e38\u8fd0\u884c\uff0c\u9700\u8981\u6839\u636e\u96c6\u7fa4\u7684\u5b9e\u9645\u89c4\u6a21\u5bf9 Prometheus \u8fdb\u884c\u8d44\u6e90\u8c03\u6574\u3002

                      "},{"location":"end-user/insight/quickstart/res-plan/prometheus-res.html#_1","title":"\u53c2\u8003\u8d44\u6e90\u89c4\u5212","text":"

                      \u5728\u672a\u5f00\u542f\u7f51\u683c\u60c5\u51b5\u4e0b\uff0c\u6d4b\u8bd5\u60c5\u51b5\u7edf\u8ba1\u51fa\u7cfb\u7edf Job \u6307\u6807\u91cf\u4e0e Pod \u7684\u5173\u7cfb\u4e3a Series \u6570\u91cf = 800 * Pod \u6570\u91cf

                      \u5728\u5f00\u542f\u670d\u52a1\u7f51\u683c\u65f6\uff0c\u5f00\u542f\u529f\u80fd\u540e Pod \u4ea7\u751f\u7684 Istio \u76f8\u5173\u6307\u6807\u6570\u91cf\u7ea7\u4e3a Series \u6570\u91cf = 768 * Pod \u6570\u91cf

                      "},{"location":"end-user/insight/quickstart/res-plan/prometheus-res.html#_2","title":"\u5f53\u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c\u65f6","text":"

                      \u4ee5\u4e0b\u8d44\u6e90\u89c4\u5212\u4e3a \u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c \u573a\u666f\u4e0b\uff0cPrometheus \u7684\u8d44\u6e90\u89c4\u5212\u63a8\u8350\uff1a

                      \u96c6\u7fa4\u89c4\u6a21(Pod \u6570) \u6307\u6807\u91cf(\u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c) CPU(core) \u5185\u5b58(GB) 100 8w Request: 0.5Limit\uff1a1 Request\uff1a2GBLimit\uff1a4GB 200 16w Request\uff1a1Limit\uff1a1.5 Request\uff1a3GBLimit\uff1a6GB 300 24w Request\uff1a1Limit\uff1a2 Request\uff1a3GBLimit\uff1a6GB 400 32w Request\uff1a1Limit\uff1a2 Request\uff1a4GBLimit\uff1a8GB 500 40w Request\uff1a1.5Limit\uff1a3 Request\uff1a5GBLimit\uff1a10GB 800 64w Request\uff1a2Limit\uff1a4 Request\uff1a8GBLimit\uff1a16GB 1000 80w Request\uff1a2.5Limit\uff1a5 Request\uff1a9GBLimit\uff1a18GB 2000 160w Request\uff1a3.5Limit\uff1a7 Request\uff1a20GBLimit\uff1a40GB 3000 240w Request\uff1a4Limit\uff1a8 Request\uff1a33GBLimit\uff1a66GB"},{"location":"end-user/insight/quickstart/res-plan/prometheus-res.html#_3","title":"\u5f53\u5f00\u542f\u670d\u52a1\u7f51\u683c\u529f\u80fd\u65f6","text":"

                      \u4ee5\u4e0b\u8d44\u6e90\u89c4\u5212\u4e3a \u5f00\u542f\u670d\u52a1\u7f51\u683c \u573a\u666f\u4e0b\uff0cPrometheus \u7684\u8d44\u6e90\u89c4\u5212\u63a8\u8350\uff1a

                      \u96c6\u7fa4\u89c4\u6a21(Pod \u6570) \u6307\u6807\u91cf(\u5df2\u5f00\u542f\u670d\u52a1\u7f51\u683c) CPU(core) \u5185\u5b58(GB) 100 15w Request: 1Limit\uff1a2 Request\uff1a3GBLimit\uff1a6GB 200 31w Request\uff1a2Limit\uff1a3 Request\uff1a5GBLimit\uff1a10GB 300 46w Request\uff1a2Limit\uff1a4 Request\uff1a6GBLimit\uff1a12GB 400 62w Request\uff1a2Limit\uff1a4 Request\uff1a8GBLimit\uff1a16GB 500 78w Request\uff1a3Limit\uff1a6 Request\uff1a10GBLimit\uff1a20GB 800 125w Request\uff1a4Limit\uff1a8 Request\uff1a15GBLimit\uff1a30GB 1000 156w Request\uff1a5Limit\uff1a10 Request\uff1a18GBLimit\uff1a36GB 2000 312w Request\uff1a7Limit\uff1a14 Request\uff1a40GBLimit\uff1a80GB 3000 468w Request\uff1a8Limit\uff1a16 Request\uff1a65GBLimit\uff1a130GB

                      Note

                      1. \u8868\u683c\u4e2d\u7684 Pod \u6570\u91cf \u6307\u96c6\u7fa4\u4e2d\u57fa\u672c\u7a33\u5b9a\u8fd0\u884c\u7684 Pod \u6570\u91cf\uff0c\u5982\u51fa\u73b0\u5927\u91cf\u7684 Pod \u91cd\u542f\uff0c\u5219\u4f1a\u9020\u6210\u77ed\u65f6\u95f4\u5185\u6307\u6807\u91cf\u7684\u9661\u589e\uff0c\u6b64\u65f6\u8d44\u6e90\u9700\u8981\u8fdb\u884c\u76f8\u5e94\u4e0a\u8c03\u3002
                      2. Prometheus \u5185\u5b58\u4e2d\u9ed8\u8ba4\u4fdd\u5b58\u4e24\u5c0f\u65f6\u6570\u636e\uff0c\u4e14\u96c6\u7fa4\u4e2d\u5f00\u542f\u4e86 Remote Write \u529f\u80fd\u65f6\uff0c\u4f1a\u5360\u7528\u4e00\u5b9a\u5185\u5b58\uff0c\u8d44\u6e90\u8d85\u914d\u6bd4\u5efa\u8bae\u914d\u7f6e\u4e3a 2\u3002
                      3. \u8868\u683c\u4e2d\u6570\u636e\u4e3a\u63a8\u8350\u503c\uff0c\u9002\u7528\u4e8e\u901a\u7528\u60c5\u51b5\u3002\u5982\u73af\u5883\u6709\u7cbe\u786e\u7684\u8d44\u6e90\u8981\u6c42\uff0c\u5efa\u8bae\u5728\u96c6\u7fa4\u8fd0\u884c\u4e00\u6bb5\u65f6\u95f4\u540e\uff0c\u67e5\u770b\u5bf9\u5e94 Prometheus \u7684\u8d44\u6e90\u5360\u7528\u91cf\u8fdb\u884c\u7cbe\u786e\u914d\u7f6e\u3002
                      "},{"location":"end-user/insight/quickstart/res-plan/vms-res-plan.html","title":"vmstorage \u78c1\u76d8\u5bb9\u91cf\u89c4\u5212","text":"

                      vmstorage \u662f\u8d1f\u8d23\u5b58\u50a8\u53ef\u89c2\u6d4b\u6027\u591a\u96c6\u7fa4\u6307\u6807\u3002 \u4e3a\u4fdd\u8bc1 vmstorage \u7684\u7a33\u5b9a\u6027\uff0c\u9700\u8981\u6839\u636e\u96c6\u7fa4\u6570\u91cf\u53ca\u96c6\u7fa4\u89c4\u6a21\u8c03\u6574 vmstorage \u7684\u78c1\u76d8\u5bb9\u91cf\u3002 \u66f4\u591a\u8d44\u6599\u8bf7\u53c2\u8003 vmstorage \u4fdd\u7559\u671f\u4e0e\u78c1\u76d8\u7a7a\u95f4\u3002

                      "},{"location":"end-user/insight/quickstart/res-plan/vms-res-plan.html#_1","title":"\u6d4b\u8bd5\u7ed3\u679c","text":"

                      \u7ecf\u8fc7 14 \u5929\u5bf9\u4e0d\u540c\u89c4\u6a21\u7684\u96c6\u7fa4\u7684 vmstorage \u7684\u78c1\u76d8\u89c2\u6d4b\uff0c \u6211\u4eec\u53d1\u73b0 vmstorage \u7684\u78c1\u76d8\u7528\u91cf\u4e0e\u5176\u5b58\u50a8\u7684\u6307\u6807\u91cf\u548c\u5355\u4e2a\u6570\u636e\u70b9\u5360\u7528\u78c1\u76d8\u6b63\u76f8\u5173\u3002

                      1. \u77ac\u65f6\u5b58\u50a8\u7684\u6307\u6807\u91cf increase(vm_rows{ type != \"indexdb\"}[30s]) \u4ee5\u83b7\u53d6 30s \u5185\u589e\u52a0\u7684\u6307\u6807\u91cf
                      2. \u5355\u4e2a\u6570\u636e\u70b9 (datapoint) \u7684\u5360\u7528\u78c1\u76d8\uff1a sum(vm_data_size_bytes{type!=\"indexdb\"}) /\u00a0sum(vm_rows{type\u00a0!=\u00a0\"indexdb\"})
                      "},{"location":"end-user/insight/quickstart/res-plan/vms-res-plan.html#_2","title":"\u8ba1\u7b97\u65b9\u6cd5","text":"

                      \u78c1\u76d8\u7528\u91cf = \u77ac\u65f6\u6307\u6807\u91cf x 2 x \u5355\u4e2a\u6570\u636e\u70b9\u7684\u5360\u7528\u78c1\u76d8 x 60 x 24 x \u5b58\u50a8\u65f6\u95f4 (\u5929)

                      \u53c2\u6570\u8bf4\u660e\uff1a

                      1. \u78c1\u76d8\u7528\u91cf\u5355\u4f4d\u4e3a Byte \u3002
                      2. \u5b58\u50a8\u65f6\u957f(\u5929) x 60 x 24 \u5c06\u65f6\u95f4(\u5929)\u6362\u7b97\u6210\u5206\u949f\u4ee5\u4fbf\u8ba1\u7b97\u78c1\u76d8\u7528\u91cf\u3002
                      3. Insight Agent \u4e2d Prometheus \u9ed8\u8ba4\u91c7\u96c6\u65f6\u95f4\u4e3a 30s \uff0c\u6545\u5728 1 \u5206\u949f\u5185\u4ea7\u751f\u4e24\u500d\u7684\u6307\u6807\u91cf\u3002
                      4. vmstorage \u4e2d\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 1 \u4e2a\u6708\uff0c\u4fee\u6539\u914d\u7f6e\u8bf7\u53c2\u8003\u4fee\u6539\u7cfb\u7edf\u914d\u7f6e\u3002

                      Warning

                      \u8be5\u516c\u5f0f\u4e3a\u901a\u7528\u65b9\u6848\uff0c\u5efa\u8bae\u5728\u8ba1\u7b97\u7ed3\u679c\u4e0a\u9884\u7559\u5197\u4f59\u78c1\u76d8\u5bb9\u91cf\u4ee5\u4fdd\u8bc1 vmstorage \u7684\u6b63\u5e38\u8fd0\u884c\u3002

                      "},{"location":"end-user/insight/quickstart/res-plan/vms-res-plan.html#_3","title":"\u53c2\u8003\u5bb9\u91cf","text":"

                      \u8868\u683c\u4e2d\u6570\u636e\u662f\u6839\u636e\u9ed8\u8ba4\u5b58\u50a8\u65f6\u95f4\u4e3a\u4e00\u4e2a\u6708 (30 \u5929)\uff0c\u5355\u4e2a\u6570\u636e\u70b9 (datapoint) \u7684\u5360\u7528\u78c1\u76d8\u53d6 0.9 \u8ba1\u7b97\u6240\u5f97\u7ed3\u679c\u3002 \u591a\u96c6\u7fa4\u573a\u666f\u4e0b\uff0cPod \u6570\u91cf\u8868\u793a\u591a\u96c6\u7fa4 Pod \u6570\u91cf\u7684\u603b\u548c\u3002

                      "},{"location":"end-user/insight/quickstart/res-plan/vms-res-plan.html#_4","title":"\u5f53\u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c\u65f6","text":"\u96c6\u7fa4\u89c4\u6a21 (Pod \u6570) \u6307\u6807\u91cf \u78c1\u76d8\u5bb9\u91cf 100 8w 6 GiB 200 16w 12 GiB 300 24w 18 GiB 400 32w 24 GiB 500 40w 30 GiB 800 64w 48 GiB 1000 80w 60 GiB 2000 160w 120 GiB 3000 240w 180 GiB"},{"location":"end-user/insight/quickstart/res-plan/vms-res-plan.html#_5","title":"\u5f53\u5f00\u542f\u670d\u52a1\u7f51\u683c\u65f6","text":"\u96c6\u7fa4\u89c4\u6a21 (Pod \u6570) \u6307\u6807\u91cf \u78c1\u76d8\u5bb9\u91cf 100 15w 12 GiB 200 31w 24 GiB 300 46w 36 GiB 400 62w 48 GiB 500 78w 60 GiB 800 125w 94 GiB 1000 156w 120 GiB 2000 312w 235 GiB 3000 468w 350 GiB"},{"location":"end-user/insight/quickstart/res-plan/vms-res-plan.html#_6","title":"\u4e3e\u4f8b\u8bf4\u660e","text":"

                      AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\u4e2d\u6709\u4e24\u4e2a\u96c6\u7fa4\uff0c\u5176\u4e2d\u5168\u5c40\u670d\u52a1\u96c6\u7fa4(\u5f00\u542f\u670d\u52a1\u7f51\u683c)\u4e2d\u8fd0\u884c 500 \u4e2a Pod\uff0c\u5de5\u4f5c\u96c6\u7fa4(\u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c)\u8fd0\u884c\u4e86 1000 \u4e2a Pod\uff0c\u9884\u671f\u6307\u6807\u5b58 30 \u5929\u3002

                      • \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u6307\u6807\u91cf\u4e3a 800x500 + 768x500 = 784000
                      • \u5de5\u4f5c\u96c6\u7fa4\u6307\u6807\u91cf\u4e3a 800x1000 = 800000

                      \u5219\u5f53\u524d vmstorage \u78c1\u76d8\u7528\u91cf\u5e94\u8bbe\u7f6e\u4e3a (784000+80000)x2x0.9x60x24x31 = 124384896000 byte = 116 GiB

                      Note

                      \u96c6\u7fa4\u4e2d\u6307\u6807\u91cf\u4e0e Pod \u6570\u91cf\u7684\u5173\u7cfb\u53ef\u53c2\u8003 Prometheus \u8d44\u6e90\u89c4\u5212\u3002

                      "},{"location":"end-user/insight/system-config/modify-config.html","title":"\u4fee\u6539\u7cfb\u7edf\u914d\u7f6e","text":"

                      \u53ef\u89c2\u6d4b\u6027\u4f1a\u9ed8\u8ba4\u6301\u4e45\u5316\u4fdd\u5b58\u6307\u6807\u3001\u65e5\u5fd7\u3001\u94fe\u8def\u7684\u6570\u636e\uff0c\u60a8\u53ef\u53c2\u9605\u672c\u6587\u4fee\u6539\u7cfb\u7edf\u914d\u7f6e\u3002\u8be5\u6587\u6863\u4ec5\u9002\u7528\u4e8e\u5185\u7f6e\u90e8\u7f72\u7684 Elasticsearch\uff0c\u82e5\u4f7f\u7528\u5916\u90e8 Elasticsearch \u53ef\u81ea\u884c\u8c03\u6574\u3002

                      "},{"location":"end-user/insight/system-config/modify-config.html#_2","title":"\u5982\u4f55\u4fee\u6539\u6307\u6807\u6570\u636e\u4fdd\u7559\u671f\u9650","text":"

                      \u5148 ssh \u767b\u5f55\u5230\u5bf9\u5e94\u7684\u8282\u70b9\uff0c\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539\u6307\u6807\u6570\u636e\u4fdd\u7559\u671f\u9650\u3002

                      1. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                        kubectl edit vmcluster insight-victoria-metrics-k8s-stack -n insight-system\n
                      2. \u5728 Yaml \u6587\u4ef6\u4e2d\uff0c retentionPeriod \u7684\u9ed8\u8ba4\u503c\u4e3a 14 \uff0c\u5355\u4f4d\u4e3a \u5929 \u3002\u60a8\u53ef\u6839\u636e\u9700\u6c42\u4fee\u6539\u53c2\u6570\u3002

                        apiVersion: operator.victoriametrics.com/v1beta1\nkind: VMCluster\nmetadata:\n  annotations:\n    meta.helm.sh/release-name: insight\n    meta.helm.sh/release-namespace: insight-system\n  creationTimestamp: \"2022-08-25T04:31:02Z\"\n  finalizers:\n  - apps.victoriametrics.com/finalizer\n  generation: 2\n  labels:\n    app.kubernetes.io/instance: insight\n    app.kubernetes.io/managed-by: Helm\n    app.kubernetes.io/name: victoria-metrics-k8s-stack\n    app.kubernetes.io/version: 1.77.2\n    helm.sh/chart: victoria-metrics-k8s-stack-0.9.3\n  name: insight-victoria-metrics-k8s-stack\n  namespace: insight-system\n  resourceVersion: \"123007381\"\n  uid: 55cee8d6-c651-404b-b2c9-50603b405b54\nspec:\n  replicationFactor: 1\n  retentionPeriod: \"14\"\n  vminsert:\n    extraArgs:\n      maxLabelsPerTimeseries: \"45\"\n    image:\n      repository: docker.m.daocloud.io/victoriametrics/vminsert\n      tag: v1.80.0-cluster\n      replicaCount: 1\n
                      3. \u4fdd\u5b58\u4fee\u6539\u540e\uff0c\u8d1f\u8d23\u5b58\u50a8\u6307\u6807\u7684\u7ec4\u4ef6\u7684\u5bb9\u5668\u7ec4\u4f1a\u81ea\u52a8\u91cd\u542f\uff0c\u7a0d\u7b49\u7247\u523b\u5373\u53ef\u3002

                      "},{"location":"end-user/insight/system-config/modify-config.html#_3","title":"\u5982\u4f55\u4fee\u6539\u65e5\u5fd7\u6570\u636e\u5b58\u50a8\u65f6\u957f","text":"

                      \u5148 ssh \u767b\u5f55\u5230\u5bf9\u5e94\u7684\u8282\u70b9\uff0c\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539\u65e5\u5fd7\u6570\u636e\u4fdd\u7559\u671f\u9650\uff1a

                      "},{"location":"end-user/insight/system-config/modify-config.html#json","title":"\u65b9\u6cd5\u4e00\uff1a\u4fee\u6539 Json \u6587\u4ef6","text":"
                      1. \u4fee\u6539\u4ee5\u4e0b\u6587\u4ef6\u4e2d rollover \u5b57\u6bb5\u4e2d\u7684 max_age \u53c2\u6570\uff0c\u5e76\u8bbe\u7f6e\u4fdd\u7559\u671f\u9650\uff0c\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 7d \u3002\u6ce8\u610f\u9700\u8981\u4fee\u6539\u7b2c\u4e00\u884c\u4e2d\u7684 Elastic \u7528\u6237\u540d\u548c\u5bc6\u7801\u3001IP \u5730\u5740\u548c\u7d22\u5f15\u3002

                        curl  --insecure --location -u\"elastic:amyVt4o826e322TUVi13Ezw6\" -X PUT \"https://172.30.47.112:30468/_ilm/policy/insight-es-k8s-logs-policy?pretty\" -H 'Content-Type: application/json' -d'\n{\n    \"policy\": {\n        \"phases\": {\n            \"hot\": {\n                \"min_age\": \"0ms\",\n                \"actions\": {\n                    \"set_priority\": {\n                        \"priority\": 100\n                    },\n                    \"rollover\": {\n                        \"max_age\": \"8d\",\n                        \"max_size\": \"10gb\"\n                    }\n                }\n            },\n            \"warm\": {\n                \"min_age\": \"10d\",\n                \"actions\": {\n                    \"forcemerge\": {\n                        \"max_num_segments\": 1\n                    }\n                }\n            },\n            \"delete\": {\n                \"min_age\": \"30d\",\n                \"actions\": {\n                    \"delete\": {}\n                }\n            }\n        }\n    }\n}'\n
                      2. \u4fee\u6539\u5b8c\u540e\uff0c\u6267\u884c\u4ee5\u4e0a\u547d\u4ee4\u3002\u5b83\u4f1a\u6253\u5370\u51fa\u5982\u4e0b\u6240\u793a\u5185\u5bb9\uff0c\u5219\u4fee\u6539\u6210\u529f\u3002

                        {\n\"acknowledged\" : true\n}\n
                      "},{"location":"end-user/insight/system-config/modify-config.html#ui","title":"\u65b9\u6cd5\u4e8c\uff1a\u4ece UI \u4fee\u6539","text":"
                      1. \u767b\u5f55 kibana \uff0c\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f Stack Management \u3002

                      2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a Index Lifecycle Polices \uff0c\u5e76\u627e\u5230\u7d22\u5f15 insight-es-k8s-logs-policy \uff0c\u70b9\u51fb\u8fdb\u5165\u8be6\u60c5\u3002

                      3. \u5c55\u5f00 Hot phase \u914d\u7f6e\u9762\u677f\uff0c\u4fee\u6539 Maximum age \u53c2\u6570\uff0c\u5e76\u8bbe\u7f6e\u4fdd\u7559\u671f\u9650\uff0c\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 7d \u3002

                      4. \u4fee\u6539\u5b8c\u540e\uff0c\u70b9\u51fb\u9875\u9762\u5e95\u90e8\u7684 Save policy \u5373\u4fee\u6539\u6210\u529f\u3002

                      "},{"location":"end-user/insight/system-config/modify-config.html#_4","title":"\u5982\u4f55\u4fee\u6539\u94fe\u8def\u6570\u636e\u5b58\u50a8\u65f6\u957f","text":"

                      \u5148 ssh \u767b\u5f55\u5230\u5bf9\u5e94\u7684\u8282\u70b9\uff0c\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539\u94fe\u8def\u6570\u636e\u4fdd\u7559\u671f\u9650\uff1a

                      "},{"location":"end-user/insight/system-config/modify-config.html#json_1","title":"\u65b9\u6cd5\u4e00\uff1a\u4fee\u6539 Json \u6587\u4ef6","text":"
                      1. \u4fee\u6539\u4ee5\u4e0b\u6587\u4ef6\u4e2d rollover \u5b57\u6bb5\u4e2d\u7684 max_age \u53c2\u6570\uff0c\u5e76\u8bbe\u7f6e\u4fdd\u7559\u671f\u9650\uff0c\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 7d \u3002\u6ce8\u610f\u9700\u8981\u4fee\u6539\u7b2c\u4e00\u884c\u4e2d\u7684 Elastic \u7528\u6237\u540d\u548c\u5bc6\u7801\u3001IP \u5730\u5740\u548c\u7d22\u5f15\u3002

                        curl --insecure --location -u\"elastic:amyVt4o826e322TUVi13Ezw6\" -X PUT \"https://172.30.47.112:30468/_ilm/policy/jaeger-ilm-policy?pretty\" -H 'Content-Type: application/json' -d'\n{\n    \"policy\": {\n        \"phases\": {\n            \"hot\": {\n                \"min_age\": \"0ms\",\n                \"actions\": {\n                    \"set_priority\": {\n                        \"priority\": 100\n                    },\n                    \"rollover\": {\n                        \"max_age\": \"6d\",\n                        \"max_size\": \"10gb\"\n                    }\n                }\n            },\n            \"warm\": {\n                \"min_age\": \"10d\",\n                \"actions\": {\n                    \"forcemerge\": {\n                        \"max_num_segments\": 1\n                    }\n                }\n            },\n            \"delete\": {\n                \"min_age\": \"30d\",\n                \"actions\": {\n                    \"delete\": {}\n                }\n            }\n        }\n    }\n}'\n
                      2. \u4fee\u6539\u5b8c\u540e\uff0c\u5728\u63a7\u5236\u53f0\u6267\u884c\u4ee5\u4e0a\u547d\u4ee4\u3002\u5b83\u4f1a\u6253\u5370\u51fa\u5982\u4e0b\u6240\u793a\u5185\u5bb9\uff0c\u5219\u4fee\u6539\u6210\u529f\u3002

                        {\n\"acknowledged\" : true\n}\n
                      "},{"location":"end-user/insight/system-config/modify-config.html#ui_1","title":"\u65b9\u6cd5\u4e8c\uff1a\u4ece UI \u4fee\u6539","text":"
                      1. \u767b\u5f55 kibana \uff0c\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f Stack Management \u3002

                      2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a Index Lifecycle Polices \uff0c\u5e76\u627e\u5230\u7d22\u5f15 jaeger-ilm-policy \uff0c\u70b9\u51fb\u8fdb\u5165\u8be6\u60c5\u3002

                      3. \u5c55\u5f00 Hot phase \u914d\u7f6e\u9762\u677f\uff0c\u4fee\u6539 Maximum age \u53c2\u6570\uff0c\u5e76\u8bbe\u7f6e\u4fdd\u7559\u671f\u9650\uff0c\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 7d \u3002

                      4. \u4fee\u6539\u5b8c\u540e\uff0c\u70b9\u51fb\u9875\u9762\u5e95\u90e8\u7684 Save policy \u5373\u4fee\u6539\u6210\u529f\u3002

                      "},{"location":"end-user/insight/system-config/system-component.html","title":"\u7cfb\u7edf\u7ec4\u4ef6","text":"

                      \u5728\u7cfb\u7edf\u7ec4\u4ef6\u9875\u9762\u53ef\u5feb\u901f\u7684\u67e5\u770b\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u4e2d\u7cfb\u7edf\u7ec4\u4ef6\u7684\u8fd0\u884c\u72b6\u6001\uff0c\u5f53\u7cfb\u7528\u7ec4\u4ef6\u53d1\u751f\u6545\u969c\u65f6\uff0c\u4f1a\u5bfc\u81f4\u53ef\u89c2\u6d4b\u6a21\u5757\u4e2d\u7684\u90e8\u5206\u529f\u80fd\u4e0d\u53ef\u7528\u3002

                      1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\uff0c
                      2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u7cfb\u7edf\u7ba1\u7406 -> \u7cfb\u7edf\u7ec4\u4ef6 \u3002

                      "},{"location":"end-user/insight/system-config/system-component.html#_2","title":"\u7ec4\u4ef6\u8bf4\u660e","text":"\u6a21\u5757 \u7ec4\u4ef6\u540d\u79f0 \u8bf4\u660e \u6307\u6807 vminsert-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u5c06\u5404\u96c6\u7fa4\u4e2d Prometheus \u91c7\u96c6\u5230\u7684\u6307\u6807\u6570\u636e\u5199\u5165\u5b58\u50a8\u7ec4\u4ef6\u3002\u8be5\u7ec4\u4ef6\u5f02\u5e38\u4f1a\u5bfc\u81f4\u65e0\u6cd5\u5199\u5165\u5de5\u4f5c\u96c6\u7fa4\u7684\u6307\u6807\u6570\u636e\u3002 \u6307\u6807 vmalert-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u751f\u6548 VM Rule \u4e2d\u914d\u7f6e\u7684 recording \u548c Alert \u89c4\u5219\uff0c\u5e76\u5c06\u89e6\u53d1\u7684\u544a\u8b66\u89c4\u5219\u53d1\u9001\u7ed9 alertmanager\u3002 \u6307\u6807 vmalertmanager-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u5728\u544a\u8b66\u89e6\u65f6\u53d1\u9001\u6d88\u606f\u3002\u8be5\u7ec4\u4ef6\u5f02\u5e38\u4f1a\u5bfc\u81f4\u65e0\u6cd5\u53d1\u9001\u544a\u8b66\u4fe1\u606f\u3002 \u6307\u6807 vmselect-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u67e5\u8be2\u6307\u6807\u6570\u636e\u3002\u8be5\u7ec4\u4ef6\u5f02\u5e38\u4f1a\u5bfc\u81f4\u65e0\u6cd5\u67e5\u8be2\u6307\u6807\u3002 \u6307\u6807 vmstorage-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u5b58\u50a8\u591a\u96c6\u7fa4\u7684\u6307\u6807\u6570\u636e\u3002 \u4eea\u8868\u76d8 grafana-deployment \u63d0\u4f9b\u76d1\u63a7\u9762\u677f\u80fd\u529b\u3002\u8be5\u7ec4\u4ef6\u5f02\u5e38\u4f1a\u5bfc\u81f4\u65e0\u6cd5\u67e5\u770b\u5185\u7f6e\u7684\u4eea\u8868\u76d8\u3002 \u94fe\u8def insight-jaeger-collector \u8d1f\u8d23\u63a5\u6536\u00a0opentelemetry-collector\u00a0\u4e2d\u94fe\u8def\u6570\u636e\u5e76\u5c06\u5176\u8fdb\u884c\u5b58\u50a8\u3002 \u94fe\u8def insight-jaeger-query \u8d1f\u8d23\u67e5\u8be2\u5404\u96c6\u7fa4\u4e2d\u91c7\u96c6\u5230\u7684\u94fe\u8def\u6570\u636e\u3002 \u94fe\u8def insight-opentelemetry-collector \u8d1f\u8d23\u63a5\u6536\u5404\u5b50\u96c6\u7fa4\u8f6c\u53d1\u7684\u94fe\u8def\u6570\u636e \u65e5\u5fd7 elasticsearch \u8d1f\u8d23\u5b58\u50a8\u5404\u96c6\u7fa4\u7684\u65e5\u5fd7\u6570\u636e\u3002

                      Note

                      \u82e5\u4f7f\u7528\u5916\u90e8 Elasticsearch \u53ef\u80fd\u65e0\u6cd5\u83b7\u53d6\u90e8\u5206\u6570\u636e\u4ee5\u81f4\u4e8e Elasticsearch \u7684\u4fe1\u606f\u4e3a\u7a7a\u3002

                      "},{"location":"end-user/insight/system-config/system-config.html","title":"\u7cfb\u7edf\u914d\u7f6e","text":"

                      \u7cfb\u7edf\u914d\u7f6e \u5c55\u793a\u6307\u6807\u3001\u65e5\u5fd7\u3001\u94fe\u8def\u9ed8\u8ba4\u7684\u4fdd\u5b58\u65f6\u957f\u4ee5\u53ca\u9ed8\u8ba4\u7684 Apdex \u9608\u503c\u3002

                      1. \u70b9\u51fb\u53f3\u4fa7\u5bfc\u822a\u680f\uff0c\u9009\u62e9 \u7cfb\u7edf\u914d\u7f6e\u3002

                      2. \u4fee\u6539\u5386\u53f2\u544a\u8b66\u5b58\u50a8\u65f6\u957f\uff0c\u70b9\u51fb \u7f16\u8f91 \u8f93\u5165\u76ee\u6807\u65f6\u957f\u3002

                        \u5f53\u5b58\u50a8\u65f6\u957f\u8bbe\u7f6e\u4e3a \"0\" \u5c06\u4e0d\u6e05\u9664\u5386\u53f2\u544a\u8b66\u3002

                      3. \u4fee\u6539\u62d3\u6251\u56fe\u6e32\u67d3\u9ed8\u8ba4\u914d\u7f6e\uff0c\u70b9\u51fb \u7f16\u8f91 \u6839\u636e\u9700\u6c42\u5b9a\u4e49\u7cfb\u7edf\u4e2d\u62d3\u6251\u56fe\u9608\u503c\u3002

                        \u9608\u503c\u8bbe\u7f6e\u5fc5\u987b\u5927\u4e8e 0\uff0c\u524d\u9762\u586b\u5199\u7684\u9608\u503c\u5fc5\u987b\u5c0f\u4e8e\u540e\u9762\u586b\u5199\u7684\u3002\u4e14\u586b\u5199\u7684\u9608\u503c\u5fc5\u987b\u5728\u6700\u5927\u548c\u6700\u5c0f\u7684\u8303\u56f4\u4e4b\u95f4\u3002

                      Note

                      \u4fee\u6539\u5176\u4ed6\u914d\u7f6e\uff0c\u8bf7\u70b9\u51fb\u67e5\u770b\u5982\u4f55\u4fee\u6539\u7cfb\u7edf\u914d\u7f6e\uff1f

                      "},{"location":"end-user/insight/trace/service.html","title":"\u670d\u52a1\u76d1\u63a7","text":"

                      \u5728 \u53ef\u89c2\u6d4b\u6027 Insight \u4e2d\u670d\u52a1\u662f\u6307\u4f7f\u7528 Opentelemtry SDK \u63a5\u5165\u94fe\u8def\u6570\u636e\uff0c\u670d\u52a1\u76d1\u63a7\u80fd\u591f\u8f85\u52a9\u8fd0\u7ef4\u8fc7\u7a0b\u4e2d\u89c2\u5bdf\u5e94\u7528\u7a0b\u5e8f\u7684\u6027\u80fd\u548c\u72b6\u6001\u3002

                      \u5982\u4f55\u4f7f\u7528 OpenTelemetry \u8bf7\u53c2\u8003\u4f7f\u7528 OTel \u8d4b\u4e88\u5e94\u7528\u53ef\u89c2\u6d4b\u6027\u3002

                      "},{"location":"end-user/insight/trace/service.html#_2","title":"\u540d\u8bcd\u89e3\u91ca","text":"
                      • \u670d\u52a1 \uff1a\u670d\u52a1\u8868\u793a\u4e3a\u4f20\u5165\u8bf7\u6c42\u63d0\u4f9b\u76f8\u540c\u884c\u4e3a\u7684\u4e00\u7ec4\u5de5\u4f5c\u8d1f\u8f7d\u3002\u60a8\u53ef\u4ee5\u5728\u4f7f\u7528 OpenTelemetry SDK \u65f6\u5b9a\u4e49\u670d\u52a1\u540d\u79f0\u6216\u4f7f\u7528 Istio \u4e2d\u5b9a\u4e49\u7684\u540d\u79f0\u3002
                      • \u64cd\u4f5c \uff1a\u64cd\u4f5c\u662f\u6307\u4e00\u4e2a\u670d\u52a1\u5904\u7406\u7684\u7279\u5b9a\u8bf7\u6c42\u6216\u64cd\u4f5c\uff0c\u6bcf\u4e2a Span \u90fd\u6709\u4e00\u4e2a\u64cd\u4f5c\u540d\u79f0\u3002
                      • \u51fa\u53e3\u6d41\u91cf \uff1a\u51fa\u53e3\u6d41\u91cf\u662f\u6307\u5f53\u524d\u670d\u52a1\u53d1\u8d77\u8bf7\u6c42\u7684\u6240\u6709\u6d41\u91cf\u3002
                      • \u5165\u53e3\u6d41\u91cf \uff1a\u5165\u53e3\u6d41\u91cf\u662f\u6307\u4e0a\u6e38\u670d\u52a1\u5bf9\u5f53\u524d\u670d\u52a1\u53d1\u8d77\u8bf7\u6c42\u7684\u6240\u6709\u6d41\u91cf\u3002
                      "},{"location":"end-user/insight/trace/service.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                      \u670d\u52a1\u5217\u8868\u9875\u9762\u5c55\u793a\u4e86\u96c6\u7fa4\u4e2d\u6240\u6709\u5df2\u63a5\u5165\u94fe\u8def\u6570\u636e\u7684\u670d\u52a1\u7684\u541e\u5410\u7387\u3001\u9519\u8bef\u7387\u3001\u8bf7\u6c42\u5ef6\u65f6\u7b49\u5173\u952e\u6307\u6807\u3002 \u60a8\u53ef\u4ee5\u6839\u636e\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u5bf9\u670d\u52a1\u8fdb\u884c\u8fc7\u6ee4\uff0c\u4e5f\u53ef\u4ee5\u6309\u7167\u541e\u5410\u7387\u3001\u9519\u8bef\u7387\u3001\u8bf7\u6c42\u5ef6\u65f6\u5bf9\u8be5\u5217\u8868\u8fdb\u884c\u6392\u5e8f\u3002\u5217\u8868\u4e2d\u7684\u6307\u6807\u6570\u636e\u9ed8\u8ba4\u65f6\u95f4\u4e3a 1 \u5c0f\u65f6\uff0c\u60a8\u53ef\u4ee5\u81ea\u5b9a\u4e49\u65f6\u95f4\u8303\u56f4\u3002

                      \u8bf7\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u67e5\u770b\u670d\u52a1\u76d1\u63a7\u6307\u6807\uff1a

                      1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

                      2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u94fe\u8def\u8ffd\u8e2a -> \u670d\u52a1 \u3002

                        Attention

                        1. \u82e5\u5217\u8868\u4e2d\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u4e3a unknown \u65f6\uff0c\u5219\u8868\u793a\u8be5\u670d\u52a1\u672a\u89c4\u8303\u63a5\u5165\uff0c\u5efa\u8bae\u91cd\u65b0\u63a5\u5165\u3002
                        2. \u82e5\u63a5\u5165\u7684\u670d\u52a1\u5b58\u5728\u540c\u540d\u4e14\u5747\u672a\u6b63\u786e\u586b\u5199\u73af\u5883\u53d8\u91cf\u4e2d\u7684 \u547d\u540d\u7a7a\u95f4 \u65f6\uff0c\u5217\u8868\u53ca\u670d\u52a1\u8be6\u60c5\u9875\u4e2d\u5c55\u793a\u7684\u76d1\u63a7\u6570\u636e\u4e3a\u591a\u4e2a\u670d\u52a1\u7684\u6c47\u603b\u6570\u636e\u3002
                      3. \u70b9\u51fb\u670d\u52a1\u540d (\u4ee5 insight-server \u4e3a\u4f8b)\uff0c\u70b9\u51fb\u8fdb\u5165\u670d\u52a1\u8be6\u60c5\u9875\uff0c\u67e5\u770b\u670d\u52a1\u7684\u8be6\u7ec6\u6307\u6807\u548c\u8be5\u670d\u52a1\u7684\u64cd\u4f5c\u6307\u6807\u3002

                        1. \u5728\u670d\u52a1\u62d3\u6251\u6a21\u5757\u4e2d\uff0c\u60a8\u53ef\u4ee5\u67e5\u770b\u5f53\u524d\u6240\u9009\u670d\u52a1\u7684\u4e0a\u4e0b\u5404\u4e00\u5c42\u7684\u670d\u52a1\u62d3\u6251\uff0c\u9f20\u6807\u60ac\u6d6e\u5728\u8282\u70b9\u4e0a\u65f6\u53ef\u4ee5\u67e5\u770b\u8282\u70b9\u7684\u4fe1\u606f\u3002
                        2. \u5728\u6d41\u91cf\u6307\u6807\u6a21\u5757\uff0c\u60a8\u53ef\u67e5\u770b\u5230\u8be5\u670d\u52a1\u9ed8\u8ba4\u4e00\u5c0f\u65f6\u5185\u5168\u90e8\u8bf7\u6c42\uff08\u5305\u542b\u5165\u53e3\u6d41\u91cf\u548c\u51fa\u53e3\u6d41\u91cf\uff09\u7684\u76d1\u63a7\u6307\u6807\u3002
                        3. \u652f\u6301\u901a\u8fc7\u53f3\u4e0a\u89d2\u7684\u65f6\u95f4\u9009\u62e9\u5668\u5feb\u901f\u9009\u62e9\u65f6\u95f4\u8303\u56f4\uff0c\u6216\u81ea\u5b9a\u4e49\u65f6\u95f4\u8303\u56f4\u3002
                        4. \u5728 \u5173\u8054\u5bb9\u5668 \u6a21\u5757\u70b9\u51fb\u5bb9\u5668\u7ec4\u540d\u79f0\uff0c\u53ef\u8df3\u8f6c\u81f3\u5bb9\u5668\u7ec4\u8be6\u60c5\u9875\u3002

                      4. \u70b9\u51fb Tab \u5207\u6362\u5230 \u64cd\u4f5c\u6307\u6807 \uff0c\u53ef\u67e5\u8be2\u591a\u9009\u670d\u52a1\u76f8\u540c\u64cd\u4f5c\u7684\u805a\u5408\u8d77\u6765\u7684\u6d41\u91cf\u6307\u6807\u3002

                        1. \u652f\u6301\u5bf9\u64cd\u4f5c\u6307\u6807\u4e2d\u7684\u541e\u5410\u7387\u3001\u9519\u8bef\u7387\u3001\u8bf7\u6c42\u5ef6\u65f6\u7b49\u6307\u6807\u8fdb\u884c\u6392\u5e8f\u3002
                        2. \u70b9\u51fb\u5355\u4e2a\u64cd\u4f5c\u540e\u7684\u56fe\u6807\uff0c\u53ef\u8df3\u8f6c\u81f3 \u8c03\u7528\u94fe \u5feb\u901f\u67e5\u8be2\u76f8\u5173\u94fe\u8def\u3002

                      "},{"location":"end-user/insight/trace/service.html#_4","title":"\u670d\u52a1\u6307\u6807\u8bf4\u660e","text":"\u53c2\u6570 \u8bf4\u660e \u541e\u5410\u7387 \u5355\u4f4d\u65f6\u95f4\u5185\u5904\u7406\u8bf7\u6c42\u7684\u6570\u91cf\u3002 \u9519\u8bef\u7387 \u67e5\u8be2\u65f6\u95f4\u8303\u56f4\u5185\u9519\u8bef\u8bf7\u6c42\u4e0e\u8bf7\u6c42\u603b\u6570\u7684\u6bd4\u503c\u3002 P50 \u8bf7\u6c42\u5ef6\u65f6 \u5728\u6240\u6709\u7684\u8bf7\u6c42\u4e2d\uff0c\u6709 50% \u7684\u8bf7\u6c42\u54cd\u5e94\u65f6\u95f4\u5c0f\u4e8e\u6216\u7b49\u4e8e\u8be5\u503c\u3002 P95 \u8bf7\u6c42\u5ef6\u65f6 \u5728\u6240\u6709\u7684\u8bf7\u6c42\u4e2d\uff0c\u6709 95% \u7684\u8bf7\u6c42\u54cd\u5e94\u65f6\u95f4\u5c0f\u4e8e\u6216\u7b49\u4e8e\u8be5\u503c\u3002 P99 \u8bf7\u6c42\u5ef6\u65f6 \u5728\u6240\u6709\u7684\u8bf7\u6c42\u4e2d\uff0c\u6709 95% \u7684\u8bf7\u6c42\u54cd\u5e94\u65f6\u95f4\u5c0f\u4e8e\u6216\u7b49\u4e8e\u8be5\u503c\u3002"},{"location":"end-user/insight/trace/topology.html","title":"\u670d\u52a1\u62d3\u6251","text":"

                      \u670d\u52a1\u62d3\u6251\u56fe\u662f\u5bf9\u670d\u52a1\u4e4b\u95f4\u8fde\u63a5\u3001\u901a\u4fe1\u548c\u4f9d\u8d56\u5173\u7cfb\u7684\u53ef\u89c6\u5316\u8868\u793a\u3002\u901a\u8fc7\u53ef\u89c6\u5316\u62d3\u6251\u4e86\u89e3\u670d\u52a1\u95f4\u7684\u8c03\u7528\u5173\u7cfb\uff0c \u67e5\u770b\u670d\u52a1\u5728\u6307\u5b9a\u65f6\u95f4\u5185\u7684\u8c03\u7528\u53ca\u5176\u6027\u80fd\u72b6\u51b5\u3002\u62d3\u6251\u56fe\u7684\u8282\u70b9\u4e4b\u95f4\u7684\u8054\u7cfb\u4ee3\u8868\u4e24\u4e2a\u670d\u52a1\u5728\u67e5\u8be2\u65f6\u95f4\u8303\u56f4\u5185\u670d\u52a1\u4e4b\u95f4\u7684\u5b58\u5728\u8c03\u7528\u5173\u7cfb\u3002

                      "},{"location":"end-user/insight/trace/topology.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      1. \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002
                      2. \u670d\u52a1\u5df2\u901a\u8fc7 Operator \u6216 Opentelemetry SDK \u7684\u65b9\u5f0f\u63a5\u5165\u94fe\u8def\u3002
                      "},{"location":"end-user/insight/trace/topology.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                      1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u6a21\u5757
                      2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u94fe\u8def\u8ffd\u8e2a -> \u670d\u52a1\u62d3\u6251
                      3. \u5728\u62d3\u6251\u56fe\u4e2d\uff0c\u60a8\u53ef\u6309\u9700\u6267\u884c\u4ee5\u4e0b\u64cd\u4f5c\uff1a

                        • \u5355\u51fb \u8282\u70b9\uff0c\u4ece\u53f3\u4fa7\u5212\u51fa\u670d\u52a1\u7684\u8be6\u60c5\uff0c\u53ef\u67e5\u770b\u670d\u52a1\u7684\u8bf7\u6c42\u5ef6\u65f6\u3001\u541e\u5410\u7387\u3001\u9519\u8bef\u7387\u7684\u6307\u6807\u3002\u70b9\u51fb\u670d\u52a1\u540d\u79f0\u53ef\u8df3\u8f6c\u81f3\u5bf9\u5e94\u670d\u52a1\u7684\u8be6\u60c5\u9875\u3002
                        • \u9f20\u6807\u60ac\u6d6e\u5728\u8fde\u7ebf\u4e0a\u65f6\uff0c\u53ef\u67e5\u770b\u4e24\u4e2a\u670d\u52a1\u4e4b\u95f4\u8bf7\u6c42\u7684\u6d41\u91cf\u6307\u6807\u3002
                        • \u5728 \u663e\u793a\u8bbe\u7f6e \u6a21\u5757\uff0c\u53ef\u914d\u7f6e\u62d3\u6251\u56fe\u4e2d\u7684\u663e\u793a\u5143\u7d20\u3002

                      4. \u70b9\u51fb\u53f3\u4e0b\u89d2 \u56fe\u4f8b \uff0c\u53ef\u901a\u8fc7 \u4e34\u65f6\u914d\u7f6e \u4fee\u6539\u5f53\u524d\u7684\u62d3\u6251\u56fe\u5b9a\u4e49\u7684\u6e32\u67d3\u9608\u503c\uff0c\u8df3\u51fa\u6216\u5173\u95ed\u8be5\u9875\u9762\u5373\u4f1a\u4e22\u5931\u8be5\u914d\u7f6e\u3002

                        \u9608\u503c\u8bbe\u7f6e\u5fc5\u987b\u5927\u4e8e 0\uff0c\u524d\u9762\u586b\u5199\u7684\u9608\u503c\u5fc5\u987b\u5c0f\u4e8e\u540e\u9762\u586b\u5199\u7684\u3002\u4e14\u586b\u5199\u7684\u9608\u503c\u5fc5\u987b\u5728\u6700\u5927\u548c\u6700\u5c0f\u7684\u8303\u56f4\u4e4b\u95f4\u3002

                      "},{"location":"end-user/insight/trace/topology.html#_4","title":"\u5176\u4ed6\u8282\u70b9","text":"

                      \u5728\u670d\u52a1\u62d3\u6251\u4e2d\u4f1a\u5b58\u5728\u6e38\u79bb\u5728\u96c6\u7fa4\u4e4b\u5916\u7684\u8282\u70b9\uff0c\u8fd9\u4e9b\u6e38\u79bb\u5728\u5916\u7684\u8282\u70b9\u53ef\u5206\u6210\u4e09\u7c7b\uff1a

                      • \u6570\u636e\u5e93
                      • \u6d88\u606f\u961f\u5217
                      • \u865a\u62df\u8282\u70b9

                      • \u82e5\u670d\u52a1\u53d1\u8d77\u8bf7\u6c42\u5230\u6570\u636e\u5e93\u6216\u6d88\u606f\u961f\u5217\u65f6\uff0c\u62d3\u6251\u56fe\u4e2d\u4f1a\u9ed8\u8ba4\u5c55\u793a\u8fd9\u4e24\u7c7b\u8282\u70b9\u3002 \u800c\u865a\u62df\u670d\u52a1\u8868\u793a\u96c6\u7fa4\u5185\u670d\u52a1\u8bf7\u6c42\u4e86\u96c6\u7fa4\u5916\u7684\u8282\u70b9\u6216\u8005\u672a\u63a5\u5165\u94fe\u8def\u7684\u670d\u52a1\uff0c\u62d3\u6251\u56fe\u4e2d\u9ed8\u8ba4\u4e0d\u4f1a\u5c55\u793a \u865a\u62df\u670d\u52a1\u3002

                      • \u5f53\u670d\u52a1\u8bf7\u6c42\u5230 MySQL\u3001PostgreSQL\u3001Oracle Database \u8fd9\u4e09\u79cd\u6570\u636e\u5e93\u65f6\uff0c\u5728\u62d3\u6251\u56fe\u4e2d\u53ef\u4ee5\u770b\u5230\u8bf7\u6c42\u7684\u8be6\u7ec6\u6570\u636e\u5e93\u7c7b\u578b\u3002

                      "},{"location":"end-user/insight/trace/topology.html#_5","title":"\u5f00\u542f\u865a\u62df\u8282\u70b9","text":"
                      1. \u66f4\u65b0 insight-server chart \u7684 values\uff0c\u627e\u5230\u4e0b\u56fe\u6240\u793a\u53c2\u6570\uff0c\u5c06 false \u6539\u4e3a true\u3002

                      2. \u5728\u670d\u52a1\u62d3\u6251\u7684\u663e\u793a\u8bbe\u7f6e\u4e2d\u52fe\u9009 \u865a\u62df\u670d\u52a1 \u3002

                      "},{"location":"end-user/insight/trace/trace.html","title":"\u94fe\u8def\u67e5\u8be2","text":"

                      \u5728\u94fe\u8def\u67e5\u8be2\u9875\u9762\uff0c\u60a8\u53ef\u4ee5\u8fc7 TraceID \u6216\u7cbe\u786e\u67e5\u8be2\u8c03\u7528\u94fe\u8def\u8be6\u7ec6\u60c5\u51b5\u6216\u7ed3\u5408\u591a\u79cd\u6761\u4ef6\u7b5b\u9009\u67e5\u8be2\u8c03\u7528\u94fe\u8def\u3002

                      "},{"location":"end-user/insight/trace/trace.html#_2","title":"\u540d\u8bcd\u89e3\u91ca","text":"
                      • TraceID\uff1a\u7528\u4e8e\u6807\u8bc6\u4e00\u4e2a\u5b8c\u6574\u7684\u8bf7\u6c42\u8c03\u7528\u94fe\u8def\u3002
                      • \u64cd\u4f5c\uff1a\u63cf\u8ff0 Span \u6240\u4ee3\u8868\u7684\u5177\u4f53\u64cd\u4f5c\u6216\u4e8b\u4ef6\u3002
                      • \u5165\u53e3 Span\uff1a\u5165\u53e3 Span \u4ee3\u8868\u4e86\u6574\u4e2a\u8bf7\u6c42\u7684\u7b2c\u4e00\u4e2a\u8bf7\u6c42\u3002
                      • \u5ef6\u65f6\uff1a\u6574\u4e2a\u8c03\u7528\u94fe\u4ece\u5f00\u59cb\u63a5\u6536\u8bf7\u6c42\u5230\u5b8c\u6210\u54cd\u5e94\u7684\u6301\u7eed\u65f6\u95f4\u3002
                      • Span\uff1a\u6574\u4e2a\u94fe\u8def\u4e2d\u5305\u542b\u7684 Span \u4e2a\u6570\u3002
                      • \u53d1\u751f\u65f6\u95f4\uff1a\u5f53\u524d\u94fe\u8def\u5f00\u59cb\u7684\u65f6\u95f4\u3002
                      • Tag\uff1a\u4e00\u7ec4\u952e\u503c\u5bf9\u6784\u6210\u7684 Span \u6807\u7b7e\u96c6\u5408\uff0cTag \u662f\u7528\u6765\u5bf9 Span \u8fdb\u884c\u7b80\u5355\u7684\u6ce8\u89e3\u548c\u8865\u5145\uff0c\u6bcf\u4e2a Span \u53ef\u4ee5\u6709\u591a\u4e2a\u7b80\u76f4\u5bf9\u5f62\u5f0f\u7684 Tag\u3002
                      "},{"location":"end-user/insight/trace/trace.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                      \u8bf7\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u67e5\u8be2\u94fe\u8def\uff1a

                      1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\uff0c
                      2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u94fe\u8def\u8ffd\u8e2a -> \u8c03\u7528\u94fe\u3002

                        Note

                        \u5217\u8868\u4e2d\u652f\u6301\u5bf9 Span \u6570\u3001\u5ef6\u65f6\u3001\u53d1\u751f\u65f6\u95f4\u8fdb\u884c\u6392\u5e8f\u3002

                      3. \u70b9\u51fb\u7b5b\u9009\u680f\u4e2d\u7684 TraceID \u641c\u7d22 \u5207\u6362\u4f7f\u7528 TraceID \u641c\u7d22\u94fe\u8def\u3002

                      4. \u4f7f\u7528 TraceID \u641c\u7d22\u8bf7\u8f93\u5165\u5b8c\u6574\u7684 TraceID\u3002

                      "},{"location":"end-user/insight/trace/trace.html#_4","title":"\u5176\u4ed6\u64cd\u4f5c","text":""},{"location":"end-user/insight/trace/trace.html#_5","title":"\u67e5\u770b\u94fe\u8def\u8be6\u60c5","text":"
                      1. \u70b9\u51fb\u94fe\u8def\u5217\u8868\u4e2d\u7684\u67d0\u4e00\u94fe\u8def\u7684 TraceID\uff0c\u53ef\u67e5\u770b\u8be5\u94fe\u8def\u7684\u8be6\u60c5\u8c03\u7528\u60c5\u51b5\u3002

                      "},{"location":"end-user/insight/trace/trace.html#_6","title":"\u67e5\u770b\u5173\u8054\u65e5\u5fd7","text":"
                      1. \u70b9\u51fb\u94fe\u8def\u6570\u636e\u53f3\u4fa7\u7684\u56fe\u6807\uff0c\u53ef\u67e5\u8be2\u8be5\u94fe\u8def\u7684\u5173\u8054\u65e5\u5fd7\u3002

                        • \u9ed8\u8ba4\u67e5\u8be2\u8be5\u94fe\u8def\u7684\u6301\u7eed\u65f6\u95f4\u53ca\u5176\u7ed3\u675f\u4e4b\u540e\u4e00\u5206\u949f\u5185\u7684\u65e5\u5fd7\u6570\u636e\u3002
                        • \u67e5\u8be2\u7684\u65e5\u5fd7\u5185\u5bb9\u4e3a\u65e5\u5fd7\u6587\u672c\u4e2d\u5305\u542b\u8be5\u94fe\u8def\u7684 TraceID \u7684\u65e5\u5fd7\u548c\u94fe\u8def\u8c03\u7528\u8fc7\u7a0b\u4e2d\u76f8\u5173\u7684\u5bb9\u5668\u65e5\u5fd7\u3002
                      2. \u70b9\u51fb \u67e5\u770b\u66f4\u591a \u540e\u53ef\u5e26\u6761\u4ef6\u8df3\u8f6c\u5230 \u65e5\u5fd7\u67e5\u8be2 \u7684\u9875\u9762\u3002

                      3. \u9ed8\u8ba4\u641c\u7d22\u5168\u90e8\u65e5\u5fd7\uff0c\u4f46\u53ef\u4e0b\u62c9\u6839\u636e\u94fe\u8def\u7684 TraceID \u6216\u94fe\u8def\u8c03\u7528\u8fc7\u7a0b\u4e2d\u76f8\u5173\u7684\u5bb9\u5668\u65e5\u5fd7\u8fdb\u884c\u8fc7\u6ee4\u3002

                        Note

                        \u7531\u4e8e\u94fe\u8def\u4f1a\u8de8\u96c6\u7fa4\u6216\u8de8\u547d\u540d\u7a7a\u95f4\uff0c\u82e5\u7528\u6237\u6743\u9650\u4e0d\u8db3\uff0c\u5219\u65e0\u6cd5\u67e5\u8be2\u8be5\u94fe\u8def\u7684\u5173\u8054\u65e5\u5fd7\u3002

                      "},{"location":"end-user/k8s/add-node.html","title":"\u6dfb\u52a0\u5de5\u4f5c\u8282\u70b9","text":"

                      \u5982\u679c\u8282\u70b9\u4e0d\u591f\u7528\u4e86\uff0c\u53ef\u4ee5\u6dfb\u52a0\u66f4\u591a\u8282\u70b9\u5230\u96c6\u7fa4\u4e2d\u3002

                      "},{"location":"end-user/k8s/add-node.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                      • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                      • \u6709\u4e00\u4e2a\u7ba1\u7406\u5458\u5e10\u53f7
                      • \u5df2\u521b\u5efa\u5e26 GPU \u8282\u70b9\u7684\u96c6\u7fa4
                      • \u51c6\u5907\u4e00\u53f0\u4e91\u4e3b\u673a
                      "},{"location":"end-user/k8s/add-node.html#_3","title":"\u6dfb\u52a0\u6b65\u9aa4","text":"
                      1. \u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                      2. \u5bfc\u822a\u81f3 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0

                      3. \u8fdb\u5165\u96c6\u7fa4\u6982\u89c8\u9875\uff0c\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u63a5\u5165\u8282\u70b9 \u6309\u94ae

                      4. \u6309\u7167\u5411\u5bfc\uff0c\u586b\u5199\u5404\u9879\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a

                        \u57fa\u672c\u4fe1\u606f\u53c2\u6570\u914d\u7f6e

                      5. \u5728\u5f39\u7a97\u4e2d\u70b9\u51fb \u786e\u5b9a

                      6. \u8fd4\u56de\u8282\u70b9\u5217\u8868\uff0c\u65b0\u63a5\u5165\u7684\u8282\u70b9\u72b6\u6001\u4e3a \u63a5\u5165\u4e2d \uff0c\u7b49\u5f85\u51e0\u5206\u949f\u540e\u72b6\u6001\u53d8\u4e3a \u5065\u5eb7 \u5219\u8868\u793a\u63a5\u5165\u6210\u529f\u3002

                      Tip

                      \u5bf9\u4e8e\u521a\u63a5\u5165\u6210\u529f\u7684\u8282\u70b9\uff0c\u53ef\u80fd\u8fd8\u8981\u7b49 2-3 \u5206\u949f\u624d\u80fd\u8bc6\u522b\u51fa GPU\u3002

                      "},{"location":"end-user/k8s/create-k8s.html","title":"\u521b\u5efa\u4e91\u4e0a Kubernetes \u96c6\u7fa4","text":"

                      \u90e8\u7f72 Kubernetes \u96c6\u7fa4\u662f\u4e3a\u4e86\u652f\u6301\u9ad8\u6548\u7684 AI \u7b97\u529b\u8c03\u5ea6\u548c\u7ba1\u7406\uff0c\u5b9e\u73b0\u5f39\u6027\u4f38\u7f29\uff0c\u63d0\u4f9b\u9ad8\u53ef\u7528\u6027\uff0c\u4ece\u800c\u4f18\u5316\u6a21\u578b\u8bad\u7ec3\u548c\u63a8\u7406\u8fc7\u7a0b\u3002

                      "},{"location":"end-user/k8s/create-k8s.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                      • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0\u5df2
                      • \u6709\u4e00\u4e2a\u7ba1\u7406\u5458\u6743\u9650\u7684\u8d26\u53f7
                      • \u51c6\u5907\u4e00\u53f0\u5e26 GPU \u7684\u7269\u7406\u673a
                      • \u5206\u914d\u4e24\u6bb5 IP \u5730\u5740\uff08Pod CIDR 18 \u4f4d\u3001SVC CIDR 18 \u4f4d\uff0c\u4e0d\u80fd\u4e0e\u73b0\u6709\u7f51\u6bb5\u51b2\u7a81\uff09
                      "},{"location":"end-user/k8s/create-k8s.html#_2","title":"\u521b\u5efa\u6b65\u9aa4","text":"
                      1. \u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                      2. \u521b\u5efa\u5e76\u542f\u52a8 3 \u53f0\u4e0d\u5e26 GPU \u7684\u4e91\u4e3b\u673a\u7528\u4f5c\u96c6\u7fa4\u7684 Master \u8282\u70b9

                        • \u914d\u7f6e\u8d44\u6e90\uff0cCPU 16 \u6838\uff0c\u5185\u5b58 32 GB\uff0c\u7cfb\u7edf\u76d8 200 GB\uff08ReadWriteOnce\uff09
                        • \u7f51\u7edc\u6a21\u5f0f\u9009\u62e9 Bridge\uff08\u6865\u63a5\uff09
                        • \u8bbe\u7f6e root \u5bc6\u7801\u6216\u6dfb\u52a0 SSH \u516c\u94a5\uff0c\u65b9\u4fbf\u4ee5 SSH \u8fde\u63a5
                        • \u8bb0\u5f55\u597d 3 \u53f0\u4e3b\u673a\u7684 IP
                      3. \u5bfc\u822a\u81f3 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa\u96c6\u7fa4 \u6309\u94ae

                      4. \u6309\u7167\u5411\u5bfc\uff0c\u914d\u7f6e\u96c6\u7fa4\u7684\u5404\u9879\u53c2\u6570

                        \u57fa\u672c\u4fe1\u606f\u8282\u70b9\u914d\u7f6e\u7f51\u7edc\u914d\u7f6eAddon \u914d\u7f6e\u9ad8\u7ea7\u914d\u7f6e

                        \u914d\u7f6e\u5b8c\u8282\u70b9\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u5f00\u59cb\u68c0\u67e5 \uff0c

                        \u6bcf\u4e2a\u8282\u70b9\u9ed8\u8ba4\u53ef\u8fd0\u884c 110 \u4e2a Pod\uff08\u5bb9\u5668\u7ec4\uff09\uff0c\u5982\u679c\u8282\u70b9\u914d\u7f6e\u6bd4\u8f83\u9ad8\uff0c\u53ef\u4ee5\u8c03\u6574\u5230 200 \u6216 300 \u4e2a Pod\u3002

                      5. \u7b49\u5f85\u96c6\u7fa4\u521b\u5efa\u5b8c\u6210\u3002

                      6. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\uff0c\u627e\u5230\u521a\u521b\u5efa\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u5bfc\u822a\u5230 Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u5728\u641c\u7d22\u6846\u5185\u641c\u7d22 metax-gpu-extensions\uff0c\u70b9\u51fb\u5361\u7247

                      7. \u70b9\u51fb\u53f3\u4fa7\u7684 \u5b89\u88c5 \u6309\u94ae\uff0c\u5f00\u59cb\u5b89\u88c5 GPU \u63d2\u4ef6

                        \u5e94\u7528\u8bbe\u7f6eKubernetes \u7f16\u6392\u786e\u8ba4

                        \u8f93\u5165\u540d\u79f0\uff0c\u9009\u62e9\u547d\u540d\u7a7a\u95f4\uff0c\u5728 YAMl \u4e2d\u4fee\u6539\u955c\u50cf\u5730\u5740\uff1a

                      8. \u81ea\u52a8\u8fd4\u56de Helm \u5e94\u7528\u5217\u8868\uff0c\u7b49\u5f85 metax-gpu-extensions \u72b6\u6001\u53d8\u4e3a \u5df2\u90e8\u7f72

                      9. \u5230\u6b64\u96c6\u7fa4\u521b\u5efa\u6210\u529f\uff0c\u53ef\u4ee5\u53bb\u67e5\u770b\u96c6\u7fa4\u6240\u5305\u542b\u7684\u8282\u70b9\u3002\u4f60\u53ef\u4ee5\u53bb\u521b\u5efa AI \u5de5\u4f5c\u8d1f\u8f7d\u5e76\u4f7f\u7528 GPU \u4e86\u3002

                      \u4e0b\u4e00\u6b65\uff1a\u521b\u5efa AI \u5de5\u4f5c\u8d1f\u8f7d

                      "},{"location":"end-user/k8s/remove-node.html","title":"\u79fb\u9664 GPU \u5de5\u4f5c\u8282\u70b9","text":"

                      GPU \u8d44\u6e90\u7684\u6210\u672c\u76f8\u5bf9\u8f83\u9ad8\uff0c\u5982\u679c\u6682\u65f6\u7528\u4e0d\u5230 GPU\uff0c\u53ef\u4ee5\u5c06\u5e26 GPU \u7684\u5de5\u4f5c\u8282\u70b9\u79fb\u9664\u3002 \u4ee5\u4e0b\u6b65\u9aa4\u4e5f\u540c\u6837\u9002\u7528\u4e8e\u79fb\u9664\u666e\u901a\u5de5\u4f5c\u8282\u70b9\u3002

                      "},{"location":"end-user/k8s/remove-node.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                      • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                      • \u6709\u4e00\u4e2a\u7ba1\u7406\u5458\u5e10\u53f7
                      • \u5df2\u521b\u5efa\u5e26 GPU \u8282\u70b9\u7684\u96c6\u7fa4
                      "},{"location":"end-user/k8s/remove-node.html#_2","title":"\u79fb\u9664\u6b65\u9aa4","text":"
                      1. \u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                      2. \u5bfc\u822a\u81f3 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0

                      3. \u8fdb\u5165\u96c6\u7fa4\u6982\u89c8\u9875\uff0c\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u627e\u5230\u8981\u79fb\u9664\u7684\u8282\u70b9\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u79fb\u9664\u8282\u70b9

                      4. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u8282\u70b9\u540d\u79f0\uff0c\u786e\u8ba4\u65e0\u8bef\u540e\u70b9\u51fb \u5220\u9664

                      5. \u81ea\u52a8\u8fd4\u56de\u8282\u70b9\u5217\u8868\uff0c\u72b6\u6001\u4e3a \u79fb\u9664\u4e2d \uff0c\u51e0\u5206\u949f\u540e\u5237\u65b0\u9875\u9762\uff0c\u8282\u70b9\u4e0d\u5728\u4e86\uff0c\u8bf4\u660e\u8282\u70b9\u88ab\u6210\u529f\u79fb\u9664

                      6. \u4ece UI \u5217\u8868\u79fb\u9664\u8282\u70b9\u540e\uff0c\u901a\u8fc7 SSH \u767b\u5f55\u5230\u5df2\u79fb\u9664\u7684\u8282\u70b9\u4e3b\u673a\uff0c\u6267\u884c\u5173\u673a\u547d\u4ee4\u3002

                      Tip

                      \u5728 UI \u4e0a\u79fb\u9664\u8282\u70b9\u5e76\u5c06\u5176\u5173\u673a\u540e\uff0c\u8282\u70b9\u4e0a\u7684\u6570\u636e\u5e76\u672a\u88ab\u7acb\u5373\u5220\u9664\uff0c\u8282\u70b9\u6570\u636e\u4f1a\u88ab\u4fdd\u7559\u4e00\u6bb5\u65f6\u95f4\u3002

                      "},{"location":"end-user/kpanda/backup/index.html","title":"\u5907\u4efd\u6062\u590d","text":"

                      \u5907\u4efd\u6062\u590d\u5206\u4e3a\u5907\u4efd\u548c\u6062\u590d\u4e24\u65b9\u9762\uff0c\u5b9e\u9645\u5e94\u7528\u65f6\u9700\u8981\u5148\u5907\u4efd\u7cfb\u7edf\u5728\u67d0\u4e00\u65f6\u70b9\u7684\u6570\u636e\uff0c\u7136\u540e\u5b89\u5168\u5b58\u50a8\u5730\u5907\u4efd\u6570\u636e\u3002\u540e\u7eed\u5982\u679c\u51fa\u73b0\u6570\u636e\u635f\u574f\u3001\u4e22\u5931\u3001\u8bef\u5220\u7b49\u4e8b\u6545\uff0c\u5c31\u53ef\u4ee5\u57fa\u4e8e\u4e4b\u524d\u7684\u6570\u636e\u5907\u4efd\u5feb\u901f\u8fd8\u539f\u7cfb\u7edf\uff0c\u7f29\u77ed\u6545\u969c\u65f6\u95f4\uff0c\u51cf\u5c11\u635f\u5931\u3002

                      • \u5728\u771f\u5b9e\u7684\u751f\u4ea7\u73af\u5883\u4e2d\uff0c\u670d\u52a1\u53ef\u80fd\u5206\u5e03\u5f0f\u5730\u90e8\u7f72\u5728\u4e0d\u540c\u7684\u4e91\u3001\u4e0d\u540c\u533a\u57df\u6216\u53ef\u7528\u533a\uff0c\u5982\u679c\u67d0\u4e00\u4e2a\u57fa\u7840\u8bbe\u65bd\u81ea\u8eab\u51fa\u73b0\u6545\u969c\uff0c\u4f01\u4e1a\u9700\u8981\u5728\u5176\u4ed6\u53ef\u7528\u73af\u5883\u4e2d\u5feb\u901f\u6062\u590d\u5e94\u7528\u3002\u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u8de8\u4e91/\u8de8\u96c6\u7fa4\u7684\u5907\u4efd\u6062\u590d\u663e\u5f97\u975e\u5e38\u91cd\u8981\u3002
                      • \u5728\u5927\u89c4\u6a21\u7cfb\u7edf\u4e2d\u5f80\u5f80\u6709\u5f88\u591a\u89d2\u8272\u548c\u7528\u6237\uff0c\u6743\u9650\u7ba1\u7406\u4f53\u7cfb\u590d\u6742\uff0c\u64cd\u4f5c\u8005\u4f17\u591a\uff0c\u96be\u514d\u6709\u4eba\u8bef\u64cd\u4f5c\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u4e5f\u9700\u8981\u80fd\u591f\u901a\u8fc7\u4e4b\u524d\u5907\u4efd\u7684\u6570\u636e\u5feb\u901f\u56de\u6eda\u7cfb\u7edf\uff0c\u5426\u5219\u5982\u679c\u4f9d\u8d56\u4eba\u4e3a\u6392\u67e5\u6545\u969c\u3001\u4fee\u590d\u6545\u969c\u3001\u6062\u590d\u7cfb\u7edf\u5c31\u4f1a\u8017\u8d39\u5927\u91cf\u65f6\u95f4\uff0c\u7cfb\u7edf\u4e0d\u53ef\u7528\u65f6\u95f4\u8d8a\u957f\uff0c\u4f01\u4e1a\u7684\u635f\u5931\u8d8a\u5927\u3002
                      • \u6b64\u5916\uff0c\u8fd8\u6709\u7f51\u7edc\u653b\u51fb\u3001\u81ea\u7136\u707e\u5bb3\u3001\u8bbe\u5907\u6545\u969c\u7b49\u5404\u79cd\u56e0\u7d20\u4e5f\u53ef\u80fd\u5bfc\u81f4\u6570\u636e\u4e8b\u6545

                      \u56e0\u6b64\uff0c\u5907\u4efd\u6062\u590d\u975e\u5e38\u91cd\u8981\uff0c\u53ef\u4ee5\u89c6\u4e4b\u4e3a\u7ef4\u62a4\u7cfb\u7edf\u7a33\u5b9a\u548c\u6570\u636e\u5b89\u5168\u7684\u6700\u540e\u4e00\u9053\u4fdd\u9669\u3002

                      \u5907\u4efd\u901a\u5e38\u5206\u4e3a\u5168\u91cf\u5907\u4efd\u3001\u589e\u91cf\u5907\u4efd\u3001\u5dee\u5f02\u5907\u4efd\u4e09\u79cd\u3002\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u76ee\u524d\u652f\u6301\u5168\u91cf\u5907\u4efd\u548c\u589e\u91cf\u5907\u4efd\u3002

                      \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u7684\u5907\u4efd\u6062\u590d\u53ef\u4ee5\u5206\u4e3a \u5e94\u7528\u5907\u4efd \u548c ETCD \u5907\u4efd \u4e24\u79cd\uff0c\u652f\u6301\u624b\u52a8\u5907\u4efd\uff0c\u6216\u57fa\u4e8e CronJob \u5b9a\u65f6\u81ea\u52a8\u5907\u4efd\u3002

                      • \u5e94\u7528\u5907\u4efd

                        \u5e94\u7528\u5907\u4efd\u6307\uff0c\u5907\u4efd\u96c6\u7fa4\u4e2d\u7684\u67d0\u4e2a\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u636e\uff0c\u7136\u540e\u5c06\u8be5\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u636e\u6062\u590d\u5230\u672c\u96c6\u7fa4\u6216\u8005\u5176\u4ed6\u96c6\u7fa4\u3002\u652f\u6301\u5907\u4efd\u6574\u4e2a\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u8d44\u6e90\uff0c\u4e5f\u652f\u6301\u901a\u8fc7\u6807\u7b7e\u9009\u62e9\u5668\u8fc7\u6ee4\uff0c\u4ec5\u5907\u4efd\u5e26\u6709\u7279\u5b9a\u6807\u7b7e\u7684\u8d44\u6e90\u3002

                        \u5e94\u7528\u5907\u4efd\u652f\u6301\u8de8\u96c6\u7fa4\u5907\u4efd\u6709\u72b6\u6001\u5e94\u7528\uff0c\u5177\u4f53\u6b65\u9aa4\u53ef\u53c2\u8003MySQL \u5e94\u7528\u53ca\u6570\u636e\u7684\u8de8\u96c6\u7fa4\u5907\u4efd\u6062\u590d\u3002

                      • ETCD \u5907\u4efd

                        etcd \u662f Kubernetes \u7684\u6570\u636e\u5b58\u50a8\u7ec4\u4ef6\uff0cKubernetes \u5c06\u81ea\u8eab\u7684\u7ec4\u4ef6\u6570\u636e\u548c\u5176\u4e2d\u7684\u5e94\u7528\u6570\u636e\u90fd\u5b58\u50a8\u5728 etcd \u4e2d\u3002\u56e0\u6b64\uff0c\u5907\u4efd etcd \u5c31\u76f8\u5f53\u4e8e\u5907\u4efd\u6574\u4e2a\u96c6\u7fa4\u7684\u6570\u636e\uff0c\u53ef\u4ee5\u5728\u6545\u969c\u65f6\u5feb\u901f\u5c06\u96c6\u7fa4\u6062\u590d\u5230\u4e4b\u524d\u67d0\u4e00\u65f6\u70b9\u7684\u72b6\u6001\u3002

                        \u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u76ee\u524d\u4ec5\u652f\u6301\u5c06 etcd \u5907\u4efd\u6570\u636e\u6062\u590d\u5230\u540c\u4e00\u96c6\u7fa4\uff08\u539f\u96c6\u7fa4\uff09\u3002

                      "},{"location":"end-user/kpanda/backup/deployment.html","title":"\u5e94\u7528\u5907\u4efd","text":"

                      \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4e3a\u5e94\u7528\u505a\u5907\u4efd\uff0c\u672c\u6559\u7a0b\u4e2d\u4f7f\u7528\u7684\u6f14\u793a\u5e94\u7528\u540d\u4e3a dao-2048 \uff0c\u5c5e\u4e8e\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u3002

                      "},{"location":"end-user/kpanda/backup/deployment.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u5728\u5bf9\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u8fdb\u884c\u5907\u4efd\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                      • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                      • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                      • \u5b89\u88c5 velero \u7ec4\u4ef6\uff0c\u4e14 velero \u7ec4\u4ef6\u8fd0\u884c\u6b63\u5e38\u3002

                      • \u521b\u5efa\u4e00\u4e2a\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\uff08\u672c\u6559\u7a0b\u4e2d\u7684\u8d1f\u8f7d\u540d\u4e3a dao-2048 \uff09\uff0c\u5e76\u4e3a\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u6253\u4e0a app: dao-2048 \u7684\u6807\u7b7e\u3002

                      "},{"location":"end-user/kpanda/backup/deployment.html#_3","title":"\u5907\u4efd\u5de5\u4f5c\u8d1f\u8f7d","text":"

                      \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u5907\u4efd\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d dao-2048 \u3002

                      1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c \u70b9\u51fb \u5bb9\u5668\u7ba1\u7406 -> \u5907\u4efd\u6062\u590d \u3002

                      2. \u8fdb\u5165 \u5e94\u7528\u5907\u4efd \u5217\u8868\u9875\u9762\uff0c\u4ece\u96c6\u7fa4\u4e0b\u62c9\u5217\u8868\u4e2d\u9009\u62e9\u5df2\u5b89\u88c5\u4e86 velero \u548c dao-2048 \u7684\u96c6\u7fa4\u3002 \u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa\u5907\u4efd\u8ba1\u5212 \u6309\u94ae\u3002

                      3. \u53c2\u8003\u4e0b\u65b9\u8bf4\u660e\u586b\u5199\u5907\u4efd\u914d\u7f6e\u3002

                      4. \u53c2\u8003\u4e0b\u65b9\u8bf4\u660e\u8bbe\u7f6e\u5907\u4efd\u6267\u884c\u9891\u7387\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                        • \u5907\u4efd\u9891\u7387\uff1a\u57fa\u4e8e\u5206\u949f\u3001\u5c0f\u65f6\u3001\u5929\u3001\u5468\u3001\u6708\u8bbe\u7f6e\u4efb\u52a1\u6267\u884c\u7684\u65f6\u95f4\u5468\u671f\u3002\u652f\u6301\u7528\u6570\u5b57\u548c * \u81ea\u5b9a\u4e49 Cron \u8868\u8fbe\u5f0f\uff0c\u8f93\u5165\u8868\u8fbe\u5f0f\u540e\u4e0b\u65b9\u4f1a\u63d0\u793a\u5f53\u524d\u8868\u8fbe\u5f0f\u7684\u542b\u4e49 \u3002\u6709\u5173\u8be6\u7ec6\u7684\u8868\u8fbe\u5f0f\u8bed\u6cd5\u89c4\u5219\uff0c\u53ef\u53c2\u8003 Cron \u65f6\u95f4\u8868\u8bed\u6cd5\u3002
                        • \u7559\u5b58\u65f6\u957f\uff08\u5929\uff09\uff1a\u8bbe\u7f6e\u5907\u4efd\u8d44\u6e90\u4fdd\u5b58\u7684\u65f6\u95f4\uff0c\u9ed8\u8ba4\u4e3a 30 \u5929\uff0c\u8fc7\u671f\u540e\u5c06\u4f1a\u88ab\u5220\u9664\u3002
                        • \u5907\u4efd\u6570\u636e\u5377\uff08PV\uff09\uff1a\u662f\u5426\u5907\u4efd\u6570\u636e\u5377\uff08PV\uff09\u4e2d\u7684\u6570\u636e\uff0c\u652f\u6301\u76f4\u63a5\u590d\u5236\u548c\u4f7f\u7528 CSI \u5feb\u7167\u4e24\u79cd\u65b9\u5f0f\u3002
                          • \u76f4\u63a5\u590d\u5236\uff1a\u76f4\u63a5\u590d\u5236\u6570\u636e\u5377\uff08PV\uff09\u4e2d\u7684\u6570\u636e\u7528\u4e8e\u5907\u4efd\uff1b
                          • \u4f7f\u7528 CSI \u5feb\u7167\uff1a\u4f7f\u7528 CSI \u5feb\u7167\u6765\u5907\u4efd\u6570\u636e\u5377\uff08PV\uff09\u3002\u9700\u8981\u96c6\u7fa4\u4e2d\u6709\u53ef\u7528\u4e8e\u5907\u4efd\u7684 CSI \u5feb\u7167\u7c7b\u578b\u3002

                      5. \u70b9\u51fb \u786e\u5b9a \uff0c\u9875\u9762\u4f1a\u81ea\u52a8\u8fd4\u56de\u5e94\u7528\u5907\u4efd\u8ba1\u5212\u5217\u8868\u3002\u60a8\u53ef\u4ee5\u627e\u5230\u65b0\u5efa\u7684 dao-2048 \u5907\u4efd\u8ba1\u5212\uff0c\u5728\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u9009\u62e9 \u7acb\u5373\u6267\u884c \u5f00\u59cb\u5907\u4efd\u3002

                      6. \u6b64\u65f6\u96c6\u7fa4\u7684 \u4e0a\u4e00\u6b21\u6267\u884c\u72b6\u6001 \u5c06\u8f6c\u53d8\u4e3a \u5907\u4efd\u4e2d \u3002\u7b49\u5f85\u5907\u4efd\u5b8c\u6210\u540e\u53ef\u4ee5\u70b9\u51fb\u5907\u4efd\u8ba1\u5212\u7684\u540d\u79f0\uff0c\u67e5\u770b\u5907\u4efd\u8ba1\u5212\u8be6\u60c5\u3002

                      Note

                      \u5982\u679c Job \u7c7b\u578b\u7684\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u4e3a \u6267\u884c\u5b8c\u6210 \uff0c\u5219\u4e0d\u652f\u6301\u5907\u4efd\u3002

                      "},{"location":"end-user/kpanda/backup/etcd-backup.html","title":"etcd \u5907\u4efd","text":"

                      etcd \u5907\u4efd\u662f\u4ee5\u96c6\u7fa4\u6570\u636e\u4e3a\u6838\u5fc3\u7684\u5907\u4efd\u3002\u5728\u786c\u4ef6\u8bbe\u5907\u635f\u574f\uff0c\u5f00\u53d1\u6d4b\u8bd5\u914d\u7f6e\u9519\u8bef\u7b49\u573a\u666f\u4e2d\uff0c\u53ef\u4ee5\u901a\u8fc7 etcd \u5907\u4efd\u6062\u590d\u96c6\u7fa4\u6570\u636e\u3002

                      \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u4e3a\u96c6\u7fa4\u5236\u4f5c etcd \u5907\u4efd\u3002

                      "},{"location":"end-user/kpanda/backup/etcd-backup.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                      • \u63a5\u5165\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                      • \u521b\u5efa\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Admin \u6216\u66f4\u9ad8\u6743\u9650\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                      • \u51c6\u5907\u4e00\u4e2a MinIO \u5b9e\u4f8b\u3002

                      "},{"location":"end-user/kpanda/backup/etcd-backup.html#etcd_1","title":"\u521b\u5efa etcd \u5907\u4efd","text":"

                      \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u521b\u5efa etcd \u5907\u4efd\u3002

                      1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 -> \u5907\u4efd\u6062\u590d -> etcd \u5907\u4efd \uff0c\u70b9\u51fb \u5907\u4efd\u7b56\u7565 \u9875\u7b7e\uff0c\u7136\u540e\u5728\u53f3\u4fa7\u70b9\u51fb \u521b\u5efa\u5907\u4efd\u7b56\u7565 \u3002

                      2. \u53c2\u8003\u4ee5\u4e0b\u8bf4\u660e\u586b\u5199 \u57fa\u672c\u4fe1\u606f \u3002\u586b\u5199\u5b8c\u6bd5\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \uff0c\u7cfb\u7edf\u5c06\u81ea\u52a8\u6821\u9a8c etcd \u7684\u8054\u901a\u6027\uff0c\u6821\u9a8c\u901a\u8fc7\u4e4b\u540e\u53ef\u4ee5\u8fdb\u884c\u4e0b\u4e00\u6b65\u3002

                        • \u5907\u4efd\u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u5907\u4efd\u54ea\u4e2a\u96c6\u7fa4\u7684 etcd \u6570\u636e\uff0c\u5e76\u5728\u7ec8\u7aef\u767b\u5f55
                        • etcd \u5730\u5740\uff1a\u683c\u5f0f\u4e3a https://${\u8282\u70b9IP}:${\u7aef\u53e3\u53f7}

                          • \u5728\u6807\u51c6 Kubernetes \u96c6\u7fa4\u4e2d\uff0cetcd \u7684\u9ed8\u8ba4\u7aef\u53e3\u53f7\u4e3a 2379
                          • \u5728\u516c\u6709\u4e91\u6258\u7ba1\u96c6\u7fa4\u4e2d\uff0c\u9700\u8981\u8054\u7cfb\u76f8\u5173\u5f00\u53d1\u4eba\u5458\u83b7\u53d6 etcd \u7684\u7aef\u53e3\u53f7\u3002 \u8fd9\u662f\u56e0\u4e3a\u516c\u6709\u4e91\u96c6\u7fa4\u7684\u63a7\u5236\u9762\u7ec4\u4ef6\u7531\u4e91\u670d\u52a1\u63d0\u4f9b\u5546\u7ef4\u62a4\u548c\u7ba1\u7406\uff0c\u7528\u6237\u65e0\u6cd5\u76f4\u63a5\u8bbf\u95ee\u6216\u67e5\u770b\u8fd9\u4e9b\u7ec4\u4ef6\uff0c \u4e5f\u65e0\u6cd5\u901a\u8fc7\u5e38\u89c4\u547d\u4ee4\uff08\u5982 kubectl\uff09\u65e0\u6cd5\u83b7\u53d6\u5230\u63a7\u5236\u9762\u7684\u7aef\u53e3\u7b49\u4fe1\u606f\u3002
                          \u83b7\u53d6\u7aef\u53e3\u53f7\u7684\u65b9\u5f0f
                          1. \u5728 kube-system \u547d\u540d\u7a7a\u95f4\u4e0b\u67e5\u627e etcd Pod

                            kubectl get po -n kube-system | grep etcd\n
                          2. \u83b7\u53d6 etcd Pod \u7684 listen-client-urls \u4e2d\u7684\u7aef\u53e3\u53f7

                            kubectl get po -n kube-system ${etcd_pod_name} -oyaml | grep listen-client-urls # (1)!\n
                            1. \u5c06 etcd_pod_name \u66ff\u6362\u4e3a\u5b9e\u9645\u7684 Pod \u540d\u79f0

                            \u9884\u671f\u8f93\u51fa\u7ed3\u679c\u5982\u4e0b\uff0c\u8282\u70b9 IP \u540e\u7684\u6570\u5b57\u5373\u4e3a\u7aef\u53e3\u53f7:

                            - --listen-client-urls=https://127.0.0.1:2379,https://10.6.229.191:2379\n
                        • CA \u8bc1\u4e66\uff1a\u53ef\u901a\u8fc7\u5982\u4e0b\u547d\u4ee4\u67e5\u770b\u8bc1\u4e66\uff0c\u7136\u540e\u5c06\u8bc1\u4e66\u5185\u5bb9\u590d\u5236\u7c98\u8d34\u5230\u5bf9\u5e94\u4f4d\u7f6e\uff1a

                          cat /etc/kubernetes/ssl/etcd/ca.crt\n
                        • Cert \u8bc1\u4e66\uff1a\u53ef\u901a\u8fc7\u5982\u4e0b\u547d\u4ee4\u67e5\u770b\u8bc1\u4e66\uff0c\u7136\u540e\u5c06\u8bc1\u4e66\u5185\u5bb9\u590d\u5236\u7c98\u8d34\u5230\u5bf9\u5e94\u4f4d\u7f6e\uff1a

                          cat /etc/kubernetes/ssl/apiserver-etcd-client.crt\n
                        • Key\uff1a\u53ef\u901a\u8fc7\u5982\u4e0b\u547d\u4ee4\u67e5\u770b\u8bc1\u4e66\uff0c\u7136\u540e\u5c06\u8bc1\u4e66\u5185\u5bb9\u590d\u5236\u7c98\u8d34\u5230\u5bf9\u5e94\u4f4d\u7f6e\uff1a

                          cat /etc/kubernetes/ssl/apiserver-etcd-client.key\n

                        Note

                        \u70b9\u51fb\u8f93\u5165\u6846\u4e0b\u65b9\u7684 \u5982\u4f55\u83b7\u53d6 \u53ef\u4ee5\u5728 UI \u9875\u9762\u67e5\u770b\u83b7\u53d6\u5bf9\u5e94\u4fe1\u606f\u7684\u65b9\u5f0f\u3002

                      3. \u53c2\u8003\u4ee5\u4e0b\u4fe1\u606f\u586b\u5199 \u5907\u4efd\u7b56\u7565 \u3002

                        • \u5907\u4efd\u65b9\u5f0f\uff1a\u9009\u62e9\u624b\u52a8\u5907\u4efd\u6216\u5b9a\u65f6\u5907\u4efd

                          • \u624b\u52a8\u5907\u4efd\uff1a\u57fa\u4e8e\u5907\u4efd\u914d\u7f6e\u7acb\u5373\u6267\u884c\u4e00\u6b21 etcd \u5168\u91cf\u6570\u636e\u7684\u5907\u4efd\u3002
                          • \u5b9a\u65f6\u5907\u4efd\uff1a\u6309\u7167\u8bbe\u7f6e\u7684\u5907\u4efd\u9891\u7387\u5bf9 etcd \u6570\u636e\u8fdb\u884c\u5468\u671f\u6027\u5168\u91cf\u5907\u4efd\u3002
                        • \u5907\u4efd\u94fe\u957f\u5ea6\uff1a\u6700\u591a\u4fdd\u7559\u591a\u5c11\u6761\u5907\u4efd\u6570\u636e\u3002\u9ed8\u8ba4\u4e3a 30 \u6761\u3002

                        • \u5907\u4efd\u9891\u7387\uff1a\u652f\u6301\u5c0f\u65f6\u3001\u65e5\u3001\u5468\u3001\u6708\u7ea7\u522b\u548c\u81ea\u5b9a\u4e49\u65b9\u5f0f\u3002

                      4. \u53c2\u8003\u4ee5\u4e0b\u4fe1\u606f\u586b\u5199 \u5b58\u50a8\u4f4d\u7f6e \u3002

                        • \u5b58\u50a8\u4f9b\u5e94\u5546\uff1a\u9ed8\u8ba4\u9009\u62e9 S3 \u5b58\u50a8
                        • \u5bf9\u8c61\u5b58\u50a8\u8bbf\u95ee\u5730\u5740\uff1aMinIO \u7684\u8bbf\u95ee\u5730\u5740
                        • \u5b58\u50a8\u6876\uff1a\u5728 MinIO \u4e2d\u521b\u5efa\u4e00\u4e2a Bucket\uff0c\u586b\u5199 Bucket \u7684\u540d\u79f0
                        • \u7528\u6237\u540d\uff1aMinIO \u7684\u767b\u5f55\u7528\u6237\u540d
                        • \u5bc6\u7801\uff1aMinIO \u7684\u767b\u5f55\u5bc6\u7801

                      5. \u70b9\u51fb \u786e\u5b9a \u540e\u9875\u9762\u81ea\u52a8\u8df3\u8f6c\u5230\u5907\u4efd\u7b56\u7565\u5217\u8868\uff0c\u53ef\u4ee5\u67e5\u770b\u76ee\u524d\u521b\u5efa\u597d\u7684\u6240\u6709\u7b56\u7565\u3002

                        • \u5728\u7b56\u7565\u53f3\u4fa7\u70b9\u51fb \u2507 \u64cd\u4f5c\u6309\u94ae\u53ef\u4ee5\u67e5\u770b\u65e5\u5fd7\u3001\u67e5\u770b YAML\u3001\u66f4\u65b0\u7b56\u7565\u3001\u505c\u6b62\u7b56\u7565\u3001\u7acb\u5373\u6267\u884c\u7b56\u7565\u7b49\u3002
                        • \u5f53\u5907\u4efd\u65b9\u5f0f\u4e3a\u624b\u52a8\u65f6\uff0c\u53ef\u4ee5\u70b9\u51fb \u7acb\u5373\u6267\u884c \u8fdb\u884c\u5907\u4efd\u3002
                        • \u5f53\u5907\u4efd\u65b9\u5f0f\u4e3a\u5b9a\u65f6\u5907\u4efd\u65f6\uff0c\u5219\u4f1a\u6839\u636e\u914d\u7f6e\u7684\u65f6\u95f4\u8fdb\u884c\u5907\u4efd\u3002

                      "},{"location":"end-user/kpanda/backup/etcd-backup.html#_2","title":"\u67e5\u770b\u5907\u4efd\u7b56\u7565\u65e5\u5fd7","text":"

                      \u70b9\u51fb \u65e5\u5fd7 \u53ef\u4ee5\u67e5\u770b\u65e5\u5fd7\u5185\u5bb9\uff0c\u9ed8\u8ba4\u5c55\u793a 100 \u884c\u3002\u82e5\u60f3\u67e5\u770b\u66f4\u591a\u65e5\u5fd7\u4fe1\u606f\u6216\u8005\u4e0b\u8f7d\u65e5\u5fd7\uff0c\u53ef\u5728\u65e5\u5fd7\u4e0a\u65b9\u6839\u636e\u63d0\u793a\u524d\u5f80\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u3002

                      "},{"location":"end-user/kpanda/backup/etcd-backup.html#_3","title":"\u67e5\u770b\u5907\u4efd\u7b56\u7565\u8be6\u60c5","text":"

                      \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 -> \u5907\u4efd\u6062\u590d -> etcd \u5907\u4efd \uff0c\u70b9\u51fb \u5907\u4efd\u7b56\u7565 \u9875\u7b7e\uff0c\u63a5\u7740\u70b9\u51fb\u7b56\u7565\u540d\u79f0\u53ef\u4ee5\u67e5\u770b\u7b56\u7565\u8be6\u60c5\u3002

                      "},{"location":"end-user/kpanda/backup/etcd-backup.html#_4","title":"\u67e5\u770b\u5907\u4efd\u70b9","text":"
                      1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 -> \u5907\u4efd\u6062\u590d -> etcd \u5907\u4efd \uff0c\u70b9\u51fb \u5907\u4efd\u70b9 \u9875\u7b7e\u3002
                      2. \u9009\u62e9\u76ee\u6807\u96c6\u7fa4\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u8be5\u96c6\u7fa4\u4e0b\u6240\u6709\u5907\u4efd\u4fe1\u606f\u3002

                        \u6bcf\u6267\u884c\u4e00\u6b21\u5907\u4efd\uff0c\u5bf9\u5e94\u751f\u6210\u4e00\u4e2a\u5907\u4efd\u70b9\uff0c\u53ef\u901a\u8fc7\u6210\u529f\u72b6\u6001\u7684\u5907\u4efd\u70b9\u5feb\u901f\u6062\u590d\u5e94\u7528\u3002

                      "},{"location":"end-user/kpanda/backup/install-velero.html","title":"\u5b89\u88c5 velero \u63d2\u4ef6","text":"

                      velero \u662f\u4e00\u4e2a\u5907\u4efd\u548c\u6062\u590d Kubernetes \u96c6\u7fa4\u8d44\u6e90\u7684\u5f00\u6e90\u5de5\u5177\u3002\u5b83\u53ef\u4ee5\u5c06 Kubernetes \u96c6\u7fa4\u4e2d\u7684\u8d44\u6e90\u5907\u4efd\u5230\u4e91\u5b58\u50a8\u670d\u52a1\u3001\u672c\u5730\u5b58\u50a8\u6216\u5176\u4ed6\u4f4d\u7f6e\uff0c\u5e76\u4e14\u53ef\u4ee5\u5728\u9700\u8981\u65f6\u5c06\u8fd9\u4e9b\u8d44\u6e90\u6062\u590d\u5230\u540c\u4e00\u6216\u4e0d\u540c\u7684\u96c6\u7fa4\u4e2d\u3002

                      \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528 Helm \u5e94\u7528 \u90e8\u7f72 velero \u63d2\u4ef6\u3002

                      "},{"location":"end-user/kpanda/backup/install-velero.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                      \u5b89\u88c5 velero \u63d2\u4ef6\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                      • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                      • \u521b\u5efa velero \u547d\u540d\u7a7a\u95f4\u3002

                      • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                      "},{"location":"end-user/kpanda/backup/install-velero.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                      \u8bf7\u6267\u884c\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 velero \u63d2\u4ef6\u3002

                      1. \u5728\u96c6\u7fa4\u5217\u8868\u9875\u9762\u627e\u5230\u9700\u8981\u5b89\u88c5 velero \u63d2\u4ef6\u7684\u76ee\u6807\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u5728\u641c\u7d22\u680f\u8f93\u5165 velero \u8fdb\u884c\u641c\u7d22\u3002

                      2. \u9605\u8bfb velero \u63d2\u4ef6\u76f8\u5173\u4ecb\u7ecd\uff0c\u9009\u62e9\u7248\u672c\u540e\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u672c\u6587\u5c06\u4ee5 4.0.2 \u7248\u672c\u4e3a\u4f8b\u8fdb\u884c\u5b89\u88c5\uff0c\u63a8\u8350\u5b89\u88c5 4.0.2 \u6216\u66f4\u9ad8\u7248\u672c\u3002

                      3. \u586b\u5199\u548c\u914d\u7f6e\u53c2\u6570\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65

                        \u57fa\u672c\u53c2\u6570\u53c2\u6570\u914d\u7f6e

                        • \u540d\u79f0\uff1a\u5fc5\u586b\u53c2\u6570\uff0c\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09,\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 metrics-server-01\u3002
                        • \u547d\u540d\u7a7a\u95f4\uff1a\u63d2\u4ef6\u5b89\u88c5\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4e3a velero \u547d\u540d\u7a7a\u95f4\u3002
                        • \u7248\u672c\uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 4.0.2 \u7248\u672c\u4e3a\u4f8b\u3002
                        • \u5c31\u7eea\u7b49\u5f85\uff1a\u53ef\u9009\u53c2\u6570\uff0c\u542f\u7528\u540e\uff0c\u5c06\u7b49\u5f85\u5e94\u7528\u4e0b\u6240\u6709\u5173\u8054\u8d44\u6e90\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002
                        • \u5931\u8d25\u5220\u9664\uff1a\u53ef\u9009\u53c2\u6570\uff0c\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f\u5c31\u7eea\u7b49\u5f85\u3002\u5982\u679c\u5b89\u88c5\u5931\u8d25\uff0c\u5c06\u5220\u9664\u5b89\u88c5\u76f8\u5173\u8d44\u6e90\u3002
                        • \u8be6\u60c5\u65e5\u5fd7\uff1a\u53ef\u9009\u53c2\u6570\uff0c\u5f00\u542f\u540e\u5c06\u8f93\u51fa\u5b89\u88c5\u8fc7\u7a0b\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002

                        Note

                        \u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u548c/\u6216 \u5931\u8d25\u5220\u9664 \u540e\uff0c\u5e94\u7528\u9700\u8981\u7ecf\u8fc7\u8f83\u957f\u65f6\u95f4\u624d\u4f1a\u88ab\u6807\u8bb0\u4e3a \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                        • S3 Credentials\uff1a

                          • Use secret \uff1a\u4fdd\u6301\u9ed8\u8ba4\u914d\u7f6e true \u3002
                          • Secret name \uff1a\u4fdd\u6301\u9ed8\u8ba4\u914d\u7f6e velero-s3-credential \u3002
                          • SecretContents.aws_access_key_id = \uff1a\u914d\u7f6e\u8bbf\u95ee\u5bf9\u8c61\u5b58\u50a8\u7684\u7528\u6237\u540d\uff0c\u66ff\u6362 \u4e3a\u771f\u5b9e\u53c2\u6570\u3002
                          • SecretContents.aws_secret_access_key = \uff1a\u914d\u7f6e\u8bbf\u95ee\u5bf9\u8c61\u5b58\u50a8\u7684\u5bc6\u7801\uff0c\u66ff\u6362 \u4e3a\u771f\u5b9e\u53c2\u6570\u3002

                            config \"SecretContents \u6837\u4f8b\" [default] aws_access_key_id = minio aws_secret_access_key = minio123

                          • Velero Configuration\uff1a

                            • Backupstoragelocation \uff1avelero \u5907\u4efd\u6570\u636e\u5b58\u50a8\u7684\u4f4d\u7f6e
                            • S3 bucket \uff1a\u7528\u4e8e\u4fdd\u5b58\u5907\u4efd\u6570\u636e\u7684\u5b58\u50a8\u6876\u540d\u79f0(\u9700\u4e3a minio \u5df2\u7ecf\u5b58\u5728\u7684\u771f\u5b9e\u5b58\u50a8\u6876)
                            • Is default BackupStorage \uff1a\u4fdd\u6301\u9ed8\u8ba4\u914d\u7f6e true
                            • S3 access mode \uff1avelero \u5bf9\u6570\u636e\u7684\u8bbf\u95ee\u6a21\u5f0f\uff0c\u53ef\u4ee5\u9009\u62e9
                              • ReadWrite \uff1a\u5141\u8bb8 velero \u8bfb\u5199\u5907\u4efd\u6570\u636e
                              • ReadOnly \uff1a\u5141\u8bb8 velero \u8bfb\u53d6\u5907\u4efd\u6570\u636e\uff0c\u4e0d\u80fd\u4fee\u6539\u5907\u4efd\u6570\u636e
                              • WriteOnly \uff1a\u53ea\u5141\u8bb8 velero \u5199\u5165\u5907\u4efd\u6570\u636e\uff0c\u4e0d\u80fd\u8bfb\u53d6\u5907\u4efd\u6570\u636e
                            • S3 Configs \uff1aS3 \u5b58\u50a8\uff08minio\uff09\u7684\u8be6\u7ec6\u914d\u7f6e
                            • S3 region \uff1a\u4e91\u5b58\u50a8\u7684\u5730\u7406\u533a\u57df\u3002\u9ed8\u8ba4\u4f7f\u7528 us-east-1 \u53c2\u6570\uff0c\u7531\u7cfb\u7edf\u7ba1\u7406\u5458\u63d0\u4f9b
                            • S3 force path style \uff1a\u4fdd\u6301\u9ed8\u8ba4\u914d\u7f6e true
                            • S3 server URL \uff1a\u5bf9\u8c61\u5b58\u50a8\uff08minio\uff09\u7684\u63a7\u5236\u53f0\u8bbf\u95ee\u5730\u5740\uff0cminio \u4e00\u822c\u63d0\u4f9b\u4e86 UI \u8bbf\u95ee\u548c\u63a7\u5236\u53f0\u8bbf\u95ee\u4e24\u4e2a\u670d\u52a1\uff0c\u6b64\u5904\u8bf7\u4f7f\u7528\u63a7\u5236\u53f0\u8bbf\u95ee\u7684\u5730\u5740

                            Note

                            \u8bf7\u786e\u4fdd s3 \u5b58\u50a8\u670d\u52a1\u65f6\u95f4\u8ddf\u5907\u4efd\u8fd8\u539f\u96c6\u7fa4\u65f6\u95f4\u5dee\u572810\u5206\u949f\u4ee5\u5185\uff0c\u6700\u597d\u662f\u65f6\u95f4\u4fdd\u6301\u540c\u6b65\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u6267\u884c\u5907\u4efd\u64cd\u4f5c\u3002

                          • migration plugin configuration\uff1a\u542f\u7528\u4e4b\u540e\uff0c\u5c06\u5728\u4e0b\u4e00\u6b65\u7684 YAML \u4ee3\u7801\u6bb5\u4e2d\u65b0\u589e\uff1a

                            ...\ninitContainers:\n  - image: 'release.daocloud.io/kcoral/velero-plugin-for-migration:v0.3.0'\n    imagePullPolicy: IfNotPresent\n    name: velero-plugin-for-migration\n    volumeMounts:\n      - mountPath: /target\n        name: plugins\n  - image: 'docker.m.daocloud.io/velero/velero-plugin-for-csi:v0.7.0'\n    imagePullPolicy: IfNotPresent\n    name: velero-plugin-for-csi\n    volumeMounts:\n      - mountPath: /target\n        name: plugins\n  - image: 'docker.m.daocloud.io/velero/velero-plugin-for-aws:v1.9.0'\n    imagePullPolicy: IfNotPresent\n    name: velero-plugin-for-aws\n    volumeMounts:\n      - mountPath: /target\n        name: plugins\n...\n
                          • \u786e\u8ba4 YAML \u65e0\u8bef\u540e\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210 velero \u63d2\u4ef6\u7684\u5b89\u88c5\u3002 \u4e4b\u540e\u7cfb\u7edf\u5c06\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\uff0c\u7a0d\u7b49\u51e0\u5206\u949f\u540e\uff0c\u4e3a\u9875\u9762\u6267\u884c\u5237\u65b0\u64cd\u4f5c\uff0c\u5373\u53ef\u770b\u5230\u521a\u521a\u5b89\u88c5\u7684\u5e94\u7528\u3002

                          • "},{"location":"end-user/kpanda/clusterops/cluster-oversold.html","title":"\u96c6\u7fa4\u52a8\u6001\u8d44\u6e90\u8d85\u5356","text":"

                            \u76ee\u524d\uff0c\u8bb8\u591a\u4e1a\u52a1\u5b58\u5728\u5cf0\u503c\u548c\u4f4e\u8c37\u7684\u73b0\u8c61\u3002\u4e3a\u4e86\u786e\u4fdd\u670d\u52a1\u7684\u6027\u80fd\u548c\u7a33\u5b9a\u6027\uff0c\u5728\u90e8\u7f72\u670d\u52a1\u65f6\uff0c\u901a\u5e38\u4f1a\u6839\u636e\u5cf0\u503c\u9700\u6c42\u6765\u7533\u8bf7\u8d44\u6e90\u3002 \u7136\u800c\uff0c\u5cf0\u503c\u671f\u53ef\u80fd\u975e\u5e38\u77ed\u6682\uff0c\u5bfc\u81f4\u5728\u975e\u5cf0\u503c\u671f\u65f6\u8d44\u6e90\u88ab\u6d6a\u8d39\u3002 \u96c6\u7fa4\u8d44\u6e90\u8d85\u5356 \u5c31\u662f\u5c06\u8fd9\u4e9b\u7533\u8bf7\u4e86\u800c\u672a\u4f7f\u7528\u7684\u8d44\u6e90\uff08\u5373\u7533\u8bf7\u91cf\u4e0e\u4f7f\u7528\u91cf\u7684\u5dee\u503c\uff09\u5229\u7528\u8d77\u6765\uff0c\u4ece\u800c\u63d0\u5347\u96c6\u7fa4\u8d44\u6e90\u5229\u7528\u7387\uff0c\u51cf\u5c11\u8d44\u6e90\u6d6a\u8d39\u3002

                            \u672c\u6587\u4e3b\u8981\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u96c6\u7fa4\u52a8\u6001\u8d44\u6e90\u8d85\u5356\u529f\u80fd\u3002

                            "},{"location":"end-user/kpanda/clusterops/cluster-oversold.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                            • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                            • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 Cluster Admin \uff0c \u8be6\u60c5\u53ef\u53c2\u8003\u96c6\u7fa4\u6388\u6743\u3002
                            "},{"location":"end-user/kpanda/clusterops/cluster-oversold.html#_3","title":"\u5f00\u542f\u96c6\u7fa4\u8d85\u5356","text":"
                            1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762

                            2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u96c6\u7fa4\u8fd0\u7ef4 -> \u96c6\u7fa4\u8bbe\u7f6e \uff0c\u7136\u540e\u9009\u62e9 \u9ad8\u7ea7\u914d\u7f6e \u9875\u7b7e

                            3. \u6253\u5f00\u96c6\u7fa4\u8d85\u5356\uff0c\u8bbe\u7f6e\u8d85\u5356\u6bd4

                              • \u82e5\u672a\u5b89\u88c5 cro-operator \u63d2\u4ef6\uff0c\u70b9\u51fb \u7acb\u5373\u5b89\u88c5 \u6309\u94ae\uff0c\u5b89\u88c5\u6d41\u7a0b\u53c2\u8003\u7ba1\u7406 Helm \u5e94\u7528
                              • \u82e5\u5df2\u5b89\u88c5 cro-operator \u63d2\u4ef6\uff0c\u6253\u5f00\u96c6\u7fa4\u8d85\u5356\u5f00\u5173\uff0c\u5219\u53ef\u4ee5\u5f00\u59cb\u4f7f\u7528\u96c6\u7fa4\u8d85\u5356\u529f\u80fd\u3002

                              Note

                              \u9700\u8981\u5728\u96c6\u7fa4\u4e0b\u5bf9\u5e94\u7684 namespace \u6253\u4e0a\u5982\u4e0b\u6807\u7b7e\uff0c\u96c6\u7fa4\u8d85\u5356\u7b56\u7565\u624d\u80fd\u751f\u6548\u3002

                              clusterresourceoverrides.admission.autoscaling.openshift.io/enabled: \"true\"\n

                            "},{"location":"end-user/kpanda/clusterops/cluster-oversold.html#_4","title":"\u4f7f\u7528\u96c6\u7fa4\u8d85\u5356","text":"

                            \u8bbe\u7f6e\u597d\u96c6\u7fa4\u52a8\u6001\u8d44\u6e90\u8d85\u5356\u6bd4\u540e\uff0c\u4f1a\u5728\u5de5\u4f5c\u8d1f\u8f7d\u8fd0\u884c\u65f6\u751f\u6548\u3002\u4e0b\u6587\u4ee5 niginx \u4e3a\u4f8b\uff0c\u9a8c\u8bc1\u4f7f\u7528\u8d44\u6e90\u8d85\u5356\u80fd\u529b\u3002

                            1. \u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d nginx \u5e76\u8bbe\u7f6e\u5bf9\u5e94\u7684\u8d44\u6e90\u9650\u5236\u503c\uff0c\u521b\u5efa\u6d41\u7a0b\u53c2\u8003\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\uff08Deployment\uff09

                            2. \u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u7684 Pod \u8d44\u6e90\u7533\u8bf7\u503c\u4e0e\u9650\u5236\u503c\u7684\u6bd4\u503c\u662f\u5426\u7b26\u5408\u8d85\u552e\u6bd4

                            "},{"location":"end-user/kpanda/clusterops/cluster-settings.html","title":"\u96c6\u7fa4\u8bbe\u7f6e","text":"

                            \u96c6\u7fa4\u8bbe\u7f6e\u7528\u4e8e\u4e3a\u60a8\u7684\u96c6\u7fa4\u81ea\u5b9a\u4e49\u9ad8\u7ea7\u7279\u6027\u8bbe\u7f6e\uff0c\u5305\u62ec\u662f\u5426\u542f\u7528 GPU\u3001Helm \u4ed3\u5e93\u5237\u65b0\u5468\u671f\u3001Helm \u64cd\u4f5c\u8bb0\u5f55\u4fdd\u7559\u7b49\u3002

                            • \u542f\u7528 GPU\uff1a\u9700\u8981\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU \u5361\u53ca\u5bf9\u5e94\u9a71\u52a8\u63d2\u4ef6\u3002

                              \u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u6700\u8fd1\u64cd\u4f5c -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \u3002

                            • Helm \u64cd\u4f5c\u57fa\u7840\u955c\u50cf\u3001\u4ed3\u5e93\u5237\u65b0\u5468\u671f\u3001\u64cd\u4f5c\u8bb0\u5f55\u4fdd\u7559\u6761\u6570\u3001\u662f\u5426\u5f00\u542f\u96c6\u7fa4\u5220\u9664\u4fdd\u62a4\uff08\u5f00\u542f\u540e\u96c6\u7fa4\u5c06\u4e0d\u80fd\u76f4\u63a5\u5378\u8f7d\uff09

                            "},{"location":"end-user/kpanda/clusterops/latest-operations.html","title":"\u6700\u8fd1\u64cd\u4f5c","text":"

                            \u5728\u8be5\u9875\u9762\u53ef\u4ee5\u67e5\u770b\u6700\u8fd1\u7684\u96c6\u7fa4\u64cd\u4f5c\u8bb0\u5f55\u548c Helm \u64cd\u4f5c\u8bb0\u5f55\uff0c\u4ee5\u53ca\u5404\u9879\u64cd\u4f5c\u7684 YAML \u6587\u4ef6\u548c\u65e5\u5fd7\uff0c\u4e5f\u53ef\u4ee5\u5220\u9664\u67d0\u4e00\u6761\u8bb0\u5f55\u3002

                            \u8bbe\u7f6e Helm \u64cd\u4f5c\u7684\u4fdd\u7559\u6761\u6570\uff1a

                            \u7cfb\u7edf\u9ed8\u8ba4\u4fdd\u7559\u6700\u8fd1 100 \u6761 Helm \u64cd\u4f5c\u8bb0\u5f55\u3002\u82e5\u4fdd\u7559\u6761\u6570\u592a\u591a\uff0c\u53ef\u80fd\u4f1a\u9020\u6210\u6570\u636e\u5197\u4f59\uff0c\u4fdd\u7559\u6761\u6570\u592a\u5c11\u53ef\u80fd\u4f1a\u9020\u6210\u60a8\u6240\u9700\u8981\u7684\u5173\u952e\u64cd\u4f5c\u8bb0\u5f55\u7684\u7f3a\u5931\u3002\u9700\u8981\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u8bbe\u7f6e\u5408\u7406\u7684\u4fdd\u7559\u6570\u91cf\u3002\u5177\u4f53\u6b65\u9aa4\u5982\u4e0b\uff1a

                            1. \u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u6700\u8fd1\u64cd\u4f5c -> Helm \u64cd\u4f5c -> \u8bbe\u7f6e\u4fdd\u7559\u6761\u6570 \u3002

                            2. \u8bbe\u7f6e\u9700\u8981\u4fdd\u7559\u591a\u5c11\u6761 Helm \u64cd\u4f5c\u8bb0\u5f55\uff0c\u5e76\u70b9\u51fb \u786e\u5b9a \u3002

                            "},{"location":"end-user/kpanda/clusters/access-cluster.html","title":"\u8bbf\u95ee\u96c6\u7fa4","text":"

                            \u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\u63a5\u5165\u6216\u521b\u5efa\u7684\u96c6\u7fa4\uff0c\u4e0d\u4ec5\u53ef\u4ee5\u901a\u8fc7 UI \u754c\u9762\u76f4\u63a5\u8bbf\u95ee\uff0c\u4e5f\u53ef\u4ee5\u901a\u8fc7\u5176\u4ed6\u4e24\u79cd\u65b9\u5f0f\u8fdb\u884c\u8bbf\u95ee\u63a7\u5236\uff1a

                            • \u901a\u8fc7 CloudShell \u5728\u7ebf\u8bbf\u95ee
                            • \u4e0b\u8f7d\u96c6\u7fa4\u8bc1\u4e66\u540e\u901a\u8fc7 kubectl \u8fdb\u884c\u8bbf\u95ee

                            Note

                            \u8bbf\u95ee\u96c6\u7fa4\u65f6\uff0c\u7528\u6237\u5e94\u5177\u6709 Cluster Admin \u6743\u9650\u6216\u66f4\u9ad8\u6743\u9650\u3002

                            "},{"location":"end-user/kpanda/clusters/access-cluster.html#cloudshell","title":"\u901a\u8fc7 CloudShell \u8bbf\u95ee","text":"
                            1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9009\u62e9\u9700\u8981\u901a\u8fc7 CloudShell \u8bbf\u95ee\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u56fe\u6807\u5e76\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u70b9\u51fb \u63a7\u5236\u53f0 \u3002

                            2. \u5728 CloudShell \u63a7\u5236\u53f0\u6267\u884c kubectl get node \u547d\u4ee4\uff0c\u9a8c\u8bc1 CloudShell \u4e0e\u96c6\u7fa4\u7684\u8fde\u901a\u6027\u3002\u5982\u56fe\uff0c\u63a7\u5236\u53f0\u5c06\u8fd4\u56de\u96c6\u7fa4\u4e0b\u7684\u8282\u70b9\u4fe1\u606f\u3002

                            \u73b0\u5728\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7 CloudShell \u6765\u8bbf\u95ee\u5e76\u7ba1\u7406\u8be5\u96c6\u7fa4\u4e86\u3002

                            "},{"location":"end-user/kpanda/clusters/access-cluster.html#kubectl","title":"\u901a\u8fc7 kubectl \u8bbf\u95ee","text":"

                            \u901a\u8fc7\u672c\u5730\u8282\u70b9\u8bbf\u95ee\u5e76\u7ba1\u7406\u4e91\u7aef\u96c6\u7fa4\u65f6\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u6761\u4ef6\uff1a

                            • \u672c\u5730\u8282\u70b9\u548c\u4e91\u7aef\u96c6\u7fa4\u7684\u7f51\u7edc\u4e92\u8054\u4e92\u901a\u3002
                            • \u5df2\u7ecf\u5c06\u96c6\u7fa4\u8bc1\u4e66\u4e0b\u8f7d\u5230\u4e86\u672c\u5730\u8282\u70b9\u3002
                            • \u672c\u5730\u8282\u70b9\u5df2\u7ecf\u5b89\u88c5\u4e86 kubectl \u5de5\u5177\u3002\u5173\u4e8e\u8be6\u7ec6\u7684\u5b89\u88c5\u65b9\u5f0f\uff0c\u8bf7\u53c2\u9605\u5b89\u88c5 kubectl\u3002

                            \u6ee1\u8db3\u4e0a\u8ff0\u6761\u4ef6\u540e\uff0c\u6309\u7167\u4e0b\u65b9\u6b65\u9aa4\u4ece\u672c\u5730\u8bbf\u95ee\u4e91\u7aef\u96c6\u7fa4\uff1a

                            1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9009\u62e9\u9700\u8981\u4e0b\u8f7d\u8bc1\u4e66\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \uff0c\u5e76\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u70b9\u51fb \u8bc1\u4e66\u83b7\u53d6 \u3002

                            2. \u9009\u62e9\u8bc1\u4e66\u6709\u6548\u671f\u5e76\u70b9\u51fb \u4e0b\u8f7d\u8bc1\u4e66 \u3002

                            3. \u6253\u5f00\u4e0b\u8f7d\u597d\u7684\u96c6\u7fa4\u8bc1\u4e66\uff0c\u5c06\u8bc1\u4e66\u5185\u5bb9\u590d\u5236\u81f3\u672c\u5730\u8282\u70b9\u7684 config \u6587\u4ef6\u3002

                              kubectl \u5de5\u5177\u9ed8\u8ba4\u4f1a\u4ece\u672c\u5730\u8282\u70b9\u7684 $HOME/.kube \u76ee\u5f55\u4e0b\u67e5\u627e\u540d\u4e3a config \u7684\u6587\u4ef6\u3002\u8be5\u6587\u4ef6\u5b58\u50a8\u4e86\u76f8\u5173\u96c6\u7fa4\u7684\u8bbf\u95ee\u51ed\u8bc1\uff0ckubectl \u53ef\u4ee5\u51ed\u8be5\u914d\u7f6e\u6587\u4ef6\u8fde\u63a5\u81f3\u96c6\u7fa4\u3002

                            4. \u5728\u672c\u5730\u8282\u70b9\u4e0a\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u9a8c\u8bc1\u96c6\u7fa4\u7684\u8fde\u901a\u6027\uff1a

                              kubectl get pod -n default\n

                              \u9884\u671f\u7684\u8f93\u51fa\u7c7b\u4f3c\u4e8e:

                              NAME                            READY   STATUS      RESTARTS    AGE\ndao-2048-2048-58c7f7fc5-mq7h4   1/1     Running     0           30h\n

                            \u73b0\u5728\u60a8\u53ef\u4ee5\u5728\u672c\u5730\u901a\u8fc7 kubectl \u8bbf\u95ee\u5e76\u7ba1\u7406\u8be5\u96c6\u7fa4\u4e86\u3002

                            "},{"location":"end-user/kpanda/clusters/cluster-role.html","title":"\u96c6\u7fa4\u89d2\u8272","text":"

                            \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u57fa\u4e8e\u96c6\u7fa4\u7684\u4e0d\u540c\u529f\u80fd\u5b9a\u4f4d\u5bf9\u96c6\u7fa4\u8fdb\u884c\u4e86\u89d2\u8272\u5206\u7c7b\uff0c\u5e2e\u52a9\u7528\u6237\u66f4\u597d\u5730\u7ba1\u7406 IT \u57fa\u7840\u8bbe\u65bd\u3002

                            "},{"location":"end-user/kpanda/clusters/cluster-role.html#_2","title":"\u5168\u5c40\u670d\u52a1\u96c6\u7fa4","text":"

                            \u6b64\u96c6\u7fa4\u7528\u4e8e\u8fd0\u884c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7ec4\u4ef6\uff0c\u4f8b\u5982\u5bb9\u5668\u7ba1\u7406\u3001\u5168\u5c40\u7ba1\u7406\u3001\u53ef\u89c2\u6d4b\u6027\u3001\u955c\u50cf\u4ed3\u5e93\u7b49\u3002 \u4e00\u822c\u4e0d\u627f\u8f7d\u4e1a\u52a1\u8d1f\u8f7d\u3002

                            \u652f\u6301\u7684\u529f\u80fd \u63cf\u8ff0 K8s \u7248\u672c 1.22+ \u64cd\u4f5c\u7cfb\u7edf RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86\uff1bUbuntu 18.04 x86, Ubuntu 20.04 x86\uff1bCentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD \u96c6\u7fa4\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406 \u652f\u6301 K8s \u8d44\u6e90\u7ba1\u7406 \u652f\u6301 \u4e91\u539f\u751f\u5b58\u50a8 \u652f\u6301 \u4e91\u539f\u751f\u7f51\u7edc Calico\u3001Cillium\u3001Multus \u548c\u5176\u5b83 CNI \u7b56\u7565\u7ba1\u7406 \u652f\u6301\u7f51\u7edc\u7b56\u7565\u3001\u914d\u989d\u7b56\u7565\u3001\u8d44\u6e90\u9650\u5236\u3001\u707e\u5907\u7b56\u7565\u3001\u5b89\u5168\u7b56\u7565"},{"location":"end-user/kpanda/clusters/cluster-role.html#_3","title":"\u7ba1\u7406\u96c6\u7fa4","text":"

                            \u6b64\u96c6\u7fa4\u7528\u4e8e\u7ba1\u7406\u5de5\u4f5c\u96c6\u7fa4\uff0c\u4e00\u822c\u4e0d\u627f\u8f7d\u4e1a\u52a1\u8d1f\u8f7d\u3002

                            • \u7ecf\u5178\u6a21\u5f0f\u5c06\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u548c\u7ba1\u7406\u96c6\u7fa4\u90e8\u7f72\u5728\u4e0d\u540c\u7684\u96c6\u7fa4\uff0c\u9002\u7528\u4e8e\u4f01\u4e1a\u591a\u6570\u636e\u4e2d\u5fc3\u3001\u591a\u67b6\u6784\u7684\u573a\u666f\u3002
                            • \u7b80\u7ea6\u6a21\u5f0f\u5c06\u7ba1\u7406\u96c6\u7fa4\u548c\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u90e8\u7f72\u5728\u540c\u4e00\u4e2a\u96c6\u7fa4\u5185\u3002
                            \u652f\u6301\u7684\u529f\u80fd \u63cf\u8ff0 K8s \u7248\u672c 1.22+ \u64cd\u4f5c\u7cfb\u7edf RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86\uff1bUbuntu 18.04 x86, Ubuntu 20.04 x86\uff1bCentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD \u96c6\u7fa4\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406 \u652f\u6301 K8s \u8d44\u6e90\u7ba1\u7406 \u652f\u6301 \u4e91\u539f\u751f\u5b58\u50a8 \u652f\u6301 \u4e91\u539f\u751f\u7f51\u7edc Calico\u3001Cillium\u3001Multus \u548c\u5176\u5b83 CNI \u7b56\u7565\u7ba1\u7406 \u652f\u6301\u7f51\u7edc\u7b56\u7565\u3001\u914d\u989d\u7b56\u7565\u3001\u8d44\u6e90\u9650\u5236\u3001\u707e\u5907\u7b56\u7565\u3001\u5b89\u5168\u7b56\u7565"},{"location":"end-user/kpanda/clusters/cluster-role.html#_4","title":"\u5de5\u4f5c\u96c6\u7fa4","text":"

                            \u8fd9\u662f\u4f7f\u7528\u5bb9\u5668\u7ba1\u7406\u521b\u5efa\u7684\u96c6\u7fa4\uff0c\u4e3b\u8981\u7528\u4e8e\u627f\u8f7d\u4e1a\u52a1\u8d1f\u8f7d\u3002\u8be5\u96c6\u7fa4\u7531\u7ba1\u7406\u96c6\u7fa4\u8fdb\u884c\u7ba1\u7406\u3002

                            \u652f\u6301\u7684\u529f\u80fd \u63cf\u8ff0 K8s \u7248\u672c \u652f\u6301 K8s 1.22 \u53ca\u4ee5\u4e0a\u7248\u672c \u64cd\u4f5c\u7cfb\u7edf RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86\uff1bUbuntu 18.04 x86, Ubuntu 20.04 x86\uff1bCentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD \u96c6\u7fa4\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406 \u652f\u6301 K8s \u8d44\u6e90\u7ba1\u7406 \u652f\u6301 \u4e91\u539f\u751f\u5b58\u50a8 \u652f\u6301 \u4e91\u539f\u751f\u7f51\u7edc Calico\u3001Cillium\u3001Multus \u548c\u5176\u5b83 CNI \u7b56\u7565\u7ba1\u7406 \u652f\u6301\u7f51\u7edc\u7b56\u7565\u3001\u914d\u989d\u7b56\u7565\u3001\u8d44\u6e90\u9650\u5236\u3001\u707e\u5907\u7b56\u7565\u3001\u5b89\u5168\u7b56\u7565"},{"location":"end-user/kpanda/clusters/cluster-role.html#_5","title":"\u63a5\u5165\u96c6\u7fa4","text":"

                            \u6b64\u96c6\u7fa4\u7528\u4e8e\u63a5\u5165\u5df2\u6709\u7684\u6807\u51c6 K8s \u96c6\u7fa4\uff0c\u5305\u62ec\u4f46\u4e0d\u9650\u4e8e\u672c\u5730\u6570\u636e\u4e2d\u5fc3\u81ea\u5efa\u96c6\u7fa4\u3001\u516c\u6709\u4e91\u5382\u5546\u63d0\u4f9b\u7684\u96c6\u7fa4\u3001\u79c1\u6709\u4e91\u5382\u5546\u63d0\u4f9b\u7684\u96c6\u7fa4\u3001\u8fb9\u7f18\u96c6\u7fa4\u3001\u4fe1\u521b\u96c6\u7fa4\u3001\u5f02\u6784\u96c6\u7fa4\u3002\u4e3b\u8981\u7528\u4e8e\u627f\u62c5\u4e1a\u52a1\u8d1f\u8f7d\u3002

                            \u652f\u6301\u7684\u529f\u80fd \u63cf\u8ff0 K8s \u7248\u672c 1.18+ \u652f\u6301\u53cb\u5546 Vmware Tanzu\u3001Amazon EKS\u3001Redhat Openshift\u3001SUSE Rancher\u3001\u963f\u91cc ACK\u3001\u534e\u4e3a CCE\u3001\u817e\u8baf TKE\u3001\u6807\u51c6 K8s \u96c6\u7fa4\u3001\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u96c6\u7fa4\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406 \u4e0d\u652f\u6301 K8s \u8d44\u6e90\u7ba1\u7406 \u652f\u6301 \u4e91\u539f\u751f\u5b58\u50a8 \u652f\u6301 \u4e91\u539f\u751f\u7f51\u7edc \u4f9d\u8d56\u4e8e\u63a5\u5165\u96c6\u7fa4\u53d1\u884c\u7248\u7f51\u7edc\u6a21\u5f0f \u7b56\u7565\u7ba1\u7406 \u652f\u6301\u7f51\u7edc\u7b56\u7565\u3001\u914d\u989d\u7b56\u7565\u3001\u8d44\u6e90\u9650\u5236\u3001\u707e\u5907\u7b56\u7565\u3001\u5b89\u5168\u7b56\u7565

                            Note

                            \u4e00\u4e2a\u96c6\u7fa4\u53ef\u4ee5\u6709\u591a\u4e2a\u96c6\u7fa4\u89d2\u8272\uff0c\u4f8b\u5982\u4e00\u4e2a\u96c6\u7fa4\u65e2\u53ef\u4ee5\u662f\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\uff0c\u4e5f\u53ef\u4ee5\u662f\u7ba1\u7406\u96c6\u7fa4\u6216\u5de5\u4f5c\u96c6\u7fa4\u3002

                            "},{"location":"end-user/kpanda/clusters/cluster-scheduler-plugin.html","title":"\u5982\u4f55\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72\u7b2c\u4e8c\u8c03\u5ea6\u5668 scheduler-plugins","text":"

                            \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72\u7b2c\u4e8c\u4e2a\u8c03\u5ea6\u5668 scheduler-plugins\u3002

                            "},{"location":"end-user/kpanda/clusters/cluster-scheduler-plugin.html#scheduler-plugins_1","title":"\u4e3a\u4ec0\u4e48\u9700\u8981 scheduler-plugins\uff1f","text":"

                            \u901a\u8fc7\u5e73\u53f0\u521b\u5efa\u7684\u96c6\u7fa4\u4e2d\u4f1a\u5b89\u88c5 K8s \u539f\u751f\u7684\u8c03\u5ea6\u5668\uff0c\u4f46\u662f\u539f\u751f\u7684\u8c03\u5ea6\u5668\u5b58\u5728\u5f88\u591a\u7684\u5c40\u9650\u6027\uff1a

                            • \u539f\u751f\u7684\u8c03\u5ea6\u5668\u65e0\u6cd5\u6ee1\u8db3\u8c03\u5ea6\u9700\u6c42\uff0c\u4f60\u53ef\u4ee5\u9009\u62e9\u4f7f\u7528 CoScheduling\u3001 CapacityScheduling \u7b49 scheduler-plugins \u63d2\u4ef6\u3002
                            • \u5728\u7279\u6b8a\u7684\u573a\u666f\uff0c\u9700\u8981\u65b0\u7684\u8c03\u5ea6\u5668\u6765\u5b8c\u6210\u8c03\u5ea6\u4efb\u52a1\u800c\u4e0d\u5f71\u54cd\u539f\u751f\u8c03\u5ea6\u5668\u7684\u6d41\u7a0b\u3002
                            • \u533a\u5206\u4e0d\u540c\u529f\u80fd\u7684\u8c03\u5ea6\u5668\uff0c\u901a\u8fc7\u5207\u6362\u8c03\u5ea6\u5668\u540d\u79f0\u6765\u5b9e\u73b0\u4e0d\u540c\u7684\u8c03\u5ea6\u573a\u666f\u3002

                            \u672c\u6587\u4ee5\u4f7f\u7528 vgpu \u8c03\u5ea6\u5668\u7684\u540c\u65f6\uff0c\u60f3\u7ed3\u5408 scheduler-plugins \u7684 coscheduling \u63d2\u4ef6\u80fd\u529b\u7684\u573a\u666f\u4e3a\u793a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5\u5e76\u4f7f\u7528 scheduler-plugins\u3002

                            "},{"location":"end-user/kpanda/clusters/cluster-scheduler-plugin.html#scheduler-plugins_2","title":"\u5b89\u88c5 scheduler-plugins","text":""},{"location":"end-user/kpanda/clusters/cluster-scheduler-plugin.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                            • kubean \u662f\u5728 v0.13.0 \u7248\u672c\u63a8\u51fa\u7684\u65b0\u529f\u80fd\uff0c\u9009\u62e9\u7ba1\u7406\u96c6\u7fa4\u65f6\u8bf7\u786e\u4fdd\u7248\u672c\u4e0d\u4f4e\u4e8e\u6b64\u7248\u672c\u3002
                            • \u5b89\u88c5 scheduler-plugins \u7248\u672c\u4e3a v0.27.8\uff0c\u8bf7\u786e\u4fdd\u96c6\u7fa4\u7248\u672c\u662f\u5426\u4e0e\u5b83\u517c\u5bb9\u3002 \u53c2\u8003\u6587\u6863 Compatibility Matrix\u3002
                            "},{"location":"end-user/kpanda/clusters/cluster-scheduler-plugin.html#_2","title":"\u5b89\u88c5\u6d41\u7a0b","text":"
                            1. \u5728 \u521b\u5efa\u96c6\u7fa4 -> \u9ad8\u7ea7\u914d\u7f6e -> \u81ea\u5b9a\u4e49\u53c2\u6570 \u4e2d\u6dfb\u52a0 scheduler-plugins \u53c2\u6570

                              scheduler_plugins_enabled:true\nscheduler_plugins_plugin_config:\n  - name: Coscheduling\n    args:\n      permitWaitingTimeSeconds: 10 # default is 60\n

                              \u53c2\u6570\u8bf4\u660e\uff1a

                              • scheduler_plugins_enabled \u8bbe\u7f6e\u4e3a true \u65f6\uff0c\u5f00\u542f scheduler-plugins \u63d2\u4ef6\u80fd\u529b\u3002
                              • \u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e scheduler_plugins_enabled_plugins \u6216 scheduler_plugins_disabled_plugins \u9009\u9879\u6765\u542f\u7528\u6216\u7981\u7528\u67d0\u4e9b\u63d2\u4ef6\u3002 \u53c2\u9605 K8s \u5b98\u65b9\u63d2\u4ef6\u540d\u79f0\u3002
                              • \u5982\u679c\u9700\u8981\u8bbe\u7f6e\u81ea\u5b9a\u4e49\u63d2\u4ef6\u7684\u53c2\u6570\u8bf7\u914d\u7f6e scheduler_plugins_plugin_config\uff0c\u4f8b\u5982\uff1a\u8bbe\u7f6e coscheduling \u7684 permitWaitingTimeoutSeconds \u53c2\u6570\u3002 \u53c2\u9605 K8s \u5b98\u65b9\u63d2\u4ef6\u914d\u7f6e\u9879
                            2. \u96c6\u7fa4\u521b\u5efa\u6210\u529f\u540e\u7cfb\u7edf\u4f1a\u81ea\u52a8\u5b89\u88c5 scheduler-plugins \u548c controller \u7ec4\u4ef6\u8d1f\u8f7d\uff0c\u53ef\u4ee5\u5728\u5bf9\u5e94\u96c6\u7fa4\u7684\u65e0\u72b6\u6001\u8d1f\u8f7d\u4e2d\u67e5\u770b\u8d1f\u8f7d\u72b6\u6001\u3002

                            "},{"location":"end-user/kpanda/clusters/cluster-scheduler-plugin.html#scheduler-plugins_3","title":"\u4f7f\u7528 scheduler-plugins","text":"

                            \u4ee5\u4e0b\u4ee5\u4f7f\u7528 vgpu \u8c03\u5ea6\u5668\u7684\u540c\u65f6\uff0c\u60f3\u7ed3\u5408 scheduler-plugins \u7684 coscheduling \u63d2\u4ef6\u80fd\u529b\u573a\u666f\u4e3a\u793a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528 scheduler-plugins\u3002

                            1. \u5728 Helm \u6a21\u677f\u4e2d\u5b89\u88c5 vgpu\uff0c\u8bbe\u7f6e values.yaml \u53c2\u6570\u3002

                              • schedulerName: scheduler-plugins-scheduler\uff0c\u8fd9\u662f kubean \u9ed8\u8ba4\u5b89\u88c5\u7684 scheduler-plugins \u7684 scheduler \u540d\u79f0\uff0c\u76ee\u524d\u4e0d\u80fd\u4fee\u6539\u3002
                              • scheduler.kubeScheduler.enabled: false\uff0c\u4e0d\u5b89\u88c5 kube-scheduler\uff0c\u5c06 vgpu-scheduler \u4f5c\u4e3a\u5355\u72ec\u7684 extender\u3002
                            2. \u5728 scheduler-plugins \u4e0a\u6269\u5c55 vgpu-scheduler\u3002

                              [root@master01 charts]# kubectl get cm -n scheduler-plugins scheduler-config -ojsonpath=\"{.data.scheduler-config\\.yaml}\"\n
                              apiVersion: kubescheduler.config.k8s.io/v1\nkind: KubeSchedulerConfiguration\nleaderElection:\n  leaderElect: false\nprofiles:\n  # Compose all plugins in one profile\n  - schedulerName: scheduler-plugins-scheduler\n    plugins:\n      multiPoint:\n        enabled:\n          - name: Coscheduling\n          - name: CapacityScheduling\n          - name: NodeResourceTopologyMatch\n          - name: NodeResourcesAllocatable\n        disabled:\n          - name: PrioritySort\npluginConfig:\n  - args:\n      permitWaitingTimeSeconds: 10\n    name: Coscheduling\n

                              \u4fee\u6539 scheduler-plugins \u7684 scheduler-config \u7684 configmap \u53c2\u6570\uff0c\u5982\u4e0b\uff1a

                              [root@master01 charts]# kubectl get cm -n scheduler-plugins scheduler-config -ojsonpath=\"{.data.scheduler-config\\.yaml}\"\n
                              apiVersion: kubescheduler.config.k8s.io/v1\nkind: KubeSchedulerConfiguration\nleaderElection:\n  leaderElect: false\nprofiles:\n  # Compose all plugins in one profile\n  - schedulerName: scheduler-plugins-scheduler\n    plugins:\n      multiPoint:\n        enabled:\n          - name: Coscheduling\n          - name: CapacityScheduling\n          - name: NodeResourceTopologyMatch\n          - name: NodeResourcesAllocatable\n        disabled:\n          - name: PrioritySort\npluginConfig:\n  - args:\n      permitWaitingTimeSeconds: 10\n    name: Coscheduling\nextenders:\n  - urlPrefix: \"${urlPrefix}\"\n    filterVerb: filter\n    bindVerb: bind\n    nodeCacheCapable: true\n    ignorable: true\n    httpTimeout: 30s\n    weight: 1\n    enableHTTPS: true\n    tlsConfig:\n      insecure: true\n    managedResources:\n      - name: nvidia.com/vgpu\n        ignoredByScheduler: true\n      - name: nvidia.com/gpumem\n        ignoredByScheduler: true\n      - name: nvidia.com/gpucores\n        ignoredByScheduler: true\n      - name: nvidia.com/gpumem-percentage\n        ignoredByScheduler: true\n      - name: nvidia.com/priority\n        ignoredByScheduler: true\n      - name: cambricon.com/mlunum\n        ignoredByScheduler: true\n
                            3. \u5b89\u88c5\u5b8c vgpu-scheduler \u540e\uff0c\u7cfb\u7edf\u4f1a\u81ea\u52a8\u521b\u5efa svc\uff0curlPrefix \u6307\u5b9a svc \u7684 URL\u3002

                              Note

                              • svc \u6307 pod \u670d\u52a1\u8d1f\u8f7d\uff0c\u60a8\u53ef\u4ee5\u5230\u5b89\u88c5\u4e86 nvidia-vgpu \u63d2\u4ef6\u7684\u547d\u540d\u7a7a\u95f4\u4e0b\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u62ff\u5230 443 \u7aef\u53e3\u5bf9\u5e94\u7684\u5916\u90e8\u8bbf\u95ee\u4fe1\u606f\u3002

                                kubectl get svc -n ${namespace} \n
                              • urlprifix \u683c\u5f0f\u4e3a https://${ip \u5730\u5740}:${\u7aef\u53e3}

                            4. \u5c06 scheduler-plugins \u7684 scheduler Pod \u91cd\u542f\uff0c\u52a0\u8f7d\u65b0\u7684\u914d\u7f6e\u6587\u4ef6\u3002

                              Note

                              \u5728\u521b\u5efa vgpu \u5e94\u7528\u65f6\u4e0d\u9700\u8981\u6307\u5b9a\u8c03\u5ea6\u5668\u540d\u79f0\uff0cvgpu-scheduler \u7684 Webhook \u4f1a\u81ea\u52a8\u5c06 Scheduler \u7684\u540d\u79f0\u4fee\u6539\u4e3a scheduler-plugins-scheduler\uff0c\u4e0d\u7528\u624b\u52a8\u6307\u5b9a\u3002

                            "},{"location":"end-user/kpanda/clusters/cluster-status.html","title":"\u96c6\u7fa4\u72b6\u6001","text":"

                            \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u7eb3\u7ba1\u4e24\u79cd\u7c7b\u578b\u7684\u96c6\u7fa4\uff1a\u63a5\u5165\u96c6\u7fa4\u548c\u81ea\u5efa\u96c6\u7fa4\u3002 \u5173\u4e8e\u96c6\u7fa4\u7eb3\u7ba1\u7c7b\u578b\u7684\u66f4\u591a\u4fe1\u606f\uff0c\u8bf7\u53c2\u89c1\u96c6\u7fa4\u89d2\u8272\u3002

                            \u8fd9\u4e24\u79cd\u96c6\u7fa4\u7684\u72b6\u6001\u5982\u4e0b\u6240\u8ff0\u3002

                            "},{"location":"end-user/kpanda/clusters/cluster-status.html#_2","title":"\u63a5\u5165\u96c6\u7fa4","text":"\u72b6\u6001 \u63cf\u8ff0 \u63a5\u5165\u4e2d\uff08Joining\uff09 \u96c6\u7fa4\u6b63\u5728\u63a5\u5165 \u89e3\u9664\u63a5\u5165\u4e2d\uff08Removing\uff09 \u96c6\u7fa4\u6b63\u5728\u89e3\u9664\u63a5\u5165 \u8fd0\u884c\u4e2d\uff08Running\uff09 \u96c6\u7fa4\u6b63\u5e38\u8fd0\u884c \u672a\u77e5\uff08Unknown\uff09 \u96c6\u7fa4\u5df2\u5931\u8054\uff0c\u7cfb\u7edf\u5c55\u793a\u6570\u636e\u4e3a\u5931\u8054\u524d\u7f13\u5b58\u6570\u636e\uff0c\u4e0d\u4ee3\u8868\u771f\u5b9e\u6570\u636e\uff0c\u540c\u65f6\u5931\u8054\u72b6\u6001\u4e0b\u6267\u884c\u7684\u4efb\u4f55\u64cd\u4f5c\u90fd\u5c06\u4e0d\u751f\u6548\uff0c\u8bf7\u68c0\u67e5\u96c6\u7fa4\u7f51\u7edc\u8fde\u901a\u6027\u6216\u4e3b\u673a\u72b6\u6001\u3002"},{"location":"end-user/kpanda/clusters/cluster-status.html#_3","title":"\u81ea\u5efa\u96c6\u7fa4","text":"\u72b6\u6001 \u63cf\u8ff0 \u521b\u5efa\u4e2d\uff08Creating\uff09 \u96c6\u7fa4\u6b63\u5728\u521b\u5efa \u66f4\u65b0\u4e2d\uff08Updating\uff09 \u66f4\u65b0\u96c6\u7fa4 Kubernetes \u7248\u672c \u5220\u9664\u4e2d\uff08Deleting\uff09 \u96c6\u7fa4\u6b63\u5728\u5220\u9664 \u8fd0\u884c\u4e2d\uff08Running\uff09 \u96c6\u7fa4\u6b63\u5e38\u8fd0\u884c \u672a\u77e5\uff08Unknown\uff09 \u96c6\u7fa4\u5df2\u5931\u8054\uff0c\u7cfb\u7edf\u5c55\u793a\u6570\u636e\u4e3a\u5931\u8054\u524d\u7f13\u5b58\u6570\u636e\uff0c\u4e0d\u4ee3\u8868\u771f\u5b9e\u6570\u636e\uff0c\u540c\u65f6\u5931\u8054\u72b6\u6001\u4e0b\u6267\u884c\u7684\u4efb\u4f55\u64cd\u4f5c\u90fd\u5c06\u4e0d\u751f\u6548\uff0c\u8bf7\u68c0\u67e5\u96c6\u7fa4\u7f51\u7edc\u8fde\u901a\u6027\u6216\u4e3b\u673a\u72b6\u6001\u3002 \u521b\u5efa\u5931\u8d25\uff08Failed\uff09 \u96c6\u7fa4\u521b\u5efa\u5931\u8d25\uff0c\u8bf7\u67e5\u770b\u65e5\u5fd7\u4ee5\u83b7\u53d6\u8be6\u7ec6\u5931\u8d25\u539f\u56e0"},{"location":"end-user/kpanda/clusters/cluster-version.html","title":"\u96c6\u7fa4\u7248\u672c\u652f\u6301\u8303\u56f4","text":"

                            \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\uff0c\u63a5\u5165\u578b\u96c6\u7fa4\u548c\u81ea\u5efa\u96c6\u7fa4\u91c7\u53d6\u4e0d\u540c\u7684\u7248\u672c\u652f\u6301\u673a\u5236\u3002

                            \u672c\u6587\u4e3b\u8981\u4ecb\u7ecd\u81ea\u5efa\u96c6\u7fa4\u7684\u7248\u672c\u652f\u6301\u673a\u5236\u3002

                            Kubernetes \u793e\u533a\u652f\u6301 3 \u4e2a\u7248\u672c\u8303\u56f4\uff0c\u5982 1.26\u30011.27\u30011.28\u3002\u5f53\u793e\u533a\u65b0\u7248\u672c\u53d1\u5e03\u4e4b\u540e\uff0c\u652f\u6301\u7684\u7248\u672c\u8303\u56f4\u5c06\u4f1a\u8fdb\u884c\u9012\u589e\u3002 \u5982\u793e\u533a\u6700\u65b0\u7684 1.29 \u7248\u672c\u5df2\u7ecf\u53d1\u5e03\uff0c\u6b64\u65f6\u793e\u533a\u652f\u6301\u7684\u7248\u672c\u8303\u56f4\u662f 1.27\u30011.28\u30011.29\u3002

                            \u4f8b\u5982\uff0c\u793e\u533a\u652f\u6301\u7684\u7248\u672c\u8303\u56f4\u662f 1.25\u30011.26\u30011.27\uff0c\u5219\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528\u754c\u9762\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u7248\u672c\u8303\u56f4\u662f 1.24\u30011.25\u30011.26\uff0c\u5e76\u4e14\u4f1a\u4e3a\u7528\u6237\u63a8\u8350\u4e00\u4e2a\u7a33\u5b9a\u7684\u7248\u672c\uff0c\u5982 1.24.7\u3002

                            \u9664\u6b64\u4e4b\u5916\uff0c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528\u754c\u9762\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u7248\u672c\u8303\u56f4\u4e0e\u793e\u533a\u4fdd\u6301\u9ad8\u5ea6\u540c\u6b65\uff0c\u5f53\u793e\u533a\u7248\u672c\u8fdb\u884c\u9012\u589e\u540e\uff0c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528\u754c\u9762\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u7248\u672c\u8303\u56f4\u4e5f\u4f1a\u540c\u6b65\u9012\u589e\u4e00\u4e2a\u7248\u672c\u3002

                            "},{"location":"end-user/kpanda/clusters/cluster-version.html#kubernetes","title":"Kubernetes \u7248\u672c\u652f\u6301\u8303\u56f4","text":"Kubernetes \u793e\u533a\u7248\u672c\u8303\u56f4 \u81ea\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7248\u672c\u8303\u56f4 \u81ea\u5efa\u5de5\u4f5c\u96c6\u7fa4\u63a8\u8350\u7248\u672c \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5b89\u88c5\u5668 \u53d1\u5e03\u65f6\u95f4
                            • 1.26
                            • 1.27
                            • 1.28
                            • 1.25
                            • 1.26
                            • 1.27
                            1.27.5 v0.13.0 2023.11.30"},{"location":"end-user/kpanda/clusters/create-cluster.html","title":"\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4","text":"

                            \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\uff0c\u96c6\u7fa4\u89d2\u8272\u5206\u56db\u7c7b\uff1a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u3001\u7ba1\u7406\u96c6\u7fa4\u3001\u5de5\u4f5c\u96c6\u7fa4\u3001\u63a5\u5165\u96c6\u7fa4\u3002 \u5176\u4e2d\uff0c\u63a5\u5165\u96c6\u7fa4\u53ea\u80fd\u4ece\u7b2c\u4e09\u65b9\u5382\u5546\u63a5\u5165\uff0c\u53c2\u89c1\u63a5\u5165\u96c6\u7fa4\u3002

                            \u672c\u9875\u4ecb\u7ecd\u5982\u4f55\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\uff0c\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u65b0\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u5de5\u4f5c\u8282\u70b9 OS \u7c7b\u578b\u548c CPU \u67b6\u6784\u9700\u8981\u4e0e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4fdd\u6301\u4e00\u81f4\u3002 \u5982\u9700\u4f7f\u7528\u533a\u522b\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4 OS \u6216\u67b6\u6784\u7684\u8282\u70b9\u521b\u5efa\u96c6\u7fa4\uff0c\u53c2\u9605\u5728 centos \u7ba1\u7406\u5e73\u53f0\u4e0a\u521b\u5efa ubuntu \u5de5\u4f5c\u96c6\u7fa4\u8fdb\u884c\u521b\u5efa\u3002

                            \u63a8\u8350\u4f7f\u7528 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u7684\u64cd\u4f5c\u7cfb\u7edf\u6765\u521b\u5efa\u96c6\u7fa4\u3002 \u5982\u60a8\u672c\u5730\u8282\u70b9\u4e0d\u5728\u4e0a\u8ff0\u652f\u6301\u8303\u56f4\uff0c\u53ef\u53c2\u8003\u5728\u975e\u4e3b\u6d41\u64cd\u4f5c\u7cfb\u7edf\u4e0a\u521b\u5efa\u96c6\u7fa4\u8fdb\u884c\u521b\u5efa\u3002

                            "},{"location":"end-user/kpanda/clusters/create-cluster.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                            \u521b\u5efa\u96c6\u7fa4\u4e4b\u524d\u9700\u8981\u6ee1\u8db3\u4e00\u5b9a\u7684\u524d\u63d0\u6761\u4ef6\uff1a

                            • \u6839\u636e\u4e1a\u52a1\u9700\u6c42\u51c6\u5907\u4e00\u5b9a\u6570\u91cf\u7684\u8282\u70b9\uff0c\u4e14\u8282\u70b9 OS \u7c7b\u578b\u548c CPU \u67b6\u6784\u4e00\u81f4\u3002
                            • \u63a8\u8350 Kubernetes \u7248\u672c 1.29.5\uff0c\u5177\u4f53\u7248\u672c\u8303\u56f4\uff0c\u53c2\u9605 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u96c6\u7fa4\u7248\u672c\u652f\u6301\u4f53\u7cfb\uff0c \u76ee\u524d\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u81ea\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7248\u672c\u8303\u56f4\u5728 v1.28.0-v1.30.2\u3002\u5982\u9700\u521b\u5efa\u4f4e\u7248\u672c\u7684\u96c6\u7fa4\uff0c\u8bf7\u53c2\u8003\u96c6\u7fa4\u7248\u672c\u652f\u6301\u8303\u56f4\u3001\u90e8\u7f72\u4e0e\u5347\u7ea7 Kubean \u5411\u4e0b\u517c\u5bb9\u7248\u672c\u3002
                            • \u76ee\u6807\u4e3b\u673a\u9700\u8981\u5141\u8bb8 IPv4 \u8f6c\u53d1\u3002\u5982\u679c Pod \u548c Service \u4f7f\u7528\u7684\u662f IPv6\uff0c\u5219\u76ee\u6807\u670d\u52a1\u5668\u9700\u8981\u5141\u8bb8 IPv6 \u8f6c\u53d1\u3002
                            • \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u6682\u4e0d\u63d0\u4f9b\u5bf9\u9632\u706b\u5899\u7684\u7ba1\u7406\u529f\u80fd\uff0c\u60a8\u9700\u8981\u9884\u5148\u81ea\u884c\u5b9a\u4e49\u76ee\u6807\u4e3b\u673a\u9632\u706b\u5899\u89c4\u5219\u3002\u4e3a\u4e86\u907f\u514d\u521b\u5efa\u96c6\u7fa4\u7684\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u95ee\u9898\uff0c\u5efa\u8bae\u7981\u7528\u76ee\u6807\u4e3b\u673a\u7684\u9632\u706b\u5899\u3002
                            • \u53c2\u9605\u8282\u70b9\u53ef\u7528\u6027\u68c0\u67e5\u3002
                            "},{"location":"end-user/kpanda/clusters/create-cluster.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                            1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u4e2d\uff0c\u70b9\u51fb \u521b\u5efa\u96c6\u7fa4 \u6309\u94ae\u3002

                            2. \u53c2\u8003\u4e0b\u5217\u8981\u6c42\u586b\u5199\u96c6\u7fa4\u57fa\u672c\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                              • \u96c6\u7fa4\u540d\u79f0\uff1a\u540d\u79f0\u53ea\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u8fde\u5b57\u7b26\uff08\"-\"\uff09\uff0c\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u8005\u6570\u5b57\u5f00\u5934\u548c\u7ed3\u5c3e\uff0c\u6700\u957f 63 \u4e2a\u5b57\u7b26\u3002
                              • \u88ab\u7eb3\u7ba1\uff1a\u9009\u62e9\u7531\u54ea\u4e2a\u96c6\u7fa4\u6765\u7ba1\u7406\u6b64\u96c6\u7fa4\uff0c\u4f8b\u5982\u5728\u96c6\u7fa4\u751f\u547d\u5468\u671f\u4e2d\u521b\u5efa\u3001\u5347\u7ea7\u3001\u8282\u70b9\u6269\u7f29\u5bb9\u3001\u5220\u9664\u96c6\u7fa4\u7b49\u3002
                              • \u8fd0\u884c\u65f6\uff1a\u9009\u62e9\u96c6\u7fa4\u7684\u8fd0\u884c\u65f6\u73af\u5883\uff0c\u76ee\u524d\u652f\u6301 containerd \u548c docker\uff0c\u5982\u4f55\u9009\u62e9\u5bb9\u5668\u8fd0\u884c\u65f6\u3002
                              • Kubernetes \u7248\u672c\uff1a\u652f\u6301 3 \u4e2a\u7248\u672c\u8de8\u5ea6\uff0c\u5177\u4f53\u53d6\u51b3\u4e8e\u88ab\u7eb3\u7ba1\u96c6\u7fa4\u6240\u652f\u6301\u7684\u7248\u672c\u3002

                            3. \u586b\u5199\u8282\u70b9\u914d\u7f6e\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                              • \u9ad8\u53ef\u7528\uff1a\u5f00\u542f\u540e\u9700\u8981\u63d0\u4f9b\u81f3\u5c11 3 \u4e2a\u63a7\u5236\u5668\u8282\u70b9\u3002\u5173\u95ed\u540e\uff0c\u53ea\u63d0\u4f9b 1 \u4e2a\u63a7\u5236\u5668\u8282\u70b9\u5373\u53ef\u3002

                                \u751f\u4ea7\u73af\u5883\u4e2d\u5efa\u8bae\u4f7f\u7528\u9ad8\u53ef\u7528\u6a21\u5f0f\u3002

                              • \u8ba4\u8bc1\u65b9\u5f0f\uff1a\u9009\u62e9\u901a\u8fc7\u7528\u6237\u540d/\u5bc6\u7801\u8fd8\u662f\u516c\u79c1\u94a5\u8bbf\u95ee\u8282\u70b9\u3002

                                \u5982\u679c\u4f7f\u7528\u516c\u79c1\u94a5\u65b9\u5f0f\u8bbf\u95ee\u8282\u70b9\uff0c\u9700\u8981\u9884\u5148\u914d\u7f6e\u8282\u70b9\u7684 SSH \u5bc6\u94a5\u3002\u53c2\u9605\u4f7f\u7528 SSH \u5bc6\u94a5\u8ba4\u8bc1\u8282\u70b9\u3002

                              • \u4f7f\u7528\u7edf\u4e00\u7684\u5bc6\u7801\uff1a\u5f00\u542f\u540e\u96c6\u7fa4\u4e2d\u6240\u6709\u8282\u70b9\u7684\u8bbf\u95ee\u5bc6\u7801\u90fd\u76f8\u540c\uff0c\u9700\u8981\u5728\u4e0b\u65b9\u8f93\u5165\u8bbf\u95ee\u6240\u6709\u8282\u70b9\u7684\u7edf\u4e00\u5bc6\u7801\u3002\u5982\u679c\u5173\u95ed\uff0c\u5219\u53ef\u4ee5\u4e3a\u6bcf\u4e2a\u8282\u70b9\u8bbe\u7f6e\u5355\u72ec\u7684\u7528\u6237\u540d\u548c\u5bc6\u7801\u3002

                              • \u8282\u70b9\u4fe1\u606f\uff1a\u586b\u5199\u8282\u70b9\u540d\u79f0\u548c IP \u5730\u5740\u3002

                              • \u81ea\u5b9a\u4e49\u53c2\u6570\uff1a\u8bbe\u7f6e\u53d8\u91cf\u63a7\u5236 Ansible \u4e0e\u8fdc\u7a0b\u4e3b\u673a\u4ea4\u4e92\u3002\u53ef\u8bbe\u7f6e\u53d8\u91cf\u53c2\u8003\u8fde\u63a5\u5230\u4e3b\u673a\uff1a\u884c\u4e3a\u6e05\u5355\u53c2\u6570
                              • NTP \u65f6\u95f4\u540c\u6b65\uff1a\u5f00\u542f\u540e\u4f1a\u81ea\u52a8\u540c\u6b65\u5404\u4e2a\u8282\u70b9\u4e0a\u7684\u65f6\u95f4\uff0c\u9700\u8981\u63d0\u4f9b NTP \u670d\u52a1\u5668\u5730\u5740\u3002

                            4. \u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb\u8282\u70b9\u68c0\u67e5\u3002\u5982\u679c\u68c0\u67e5\u901a\u8fc7\u5219\u7ee7\u7eed\u4e0b\u4e00\u6b65\u64cd\u4f5c\u3002\u5982\u679c\u68c0\u67e5\u672a\u901a\u8fc7\uff0c\u5219\u66f4\u65b0 \u8282\u70b9\u4fe1\u606f \u5e76\u518d\u6b21\u6267\u884c\u68c0\u67e5\u3002

                            5. \u586b\u5199\u7f51\u7edc\u914d\u7f6e\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                              • \u7f51\u7edc\u63d2\u4ef6\uff1a\u8d1f\u8d23\u4e3a\u96c6\u7fa4\u5185\u7684 Pod \u63d0\u4f9b\u7f51\u7edc\u670d\u52a1\uff0c\u521b\u5efa\u96c6\u7fa4\u540e\u4e0d\u53ef\u66f4\u6539\u7f51\u7edc\u63d2\u4ef6\u3002\u652f\u6301 cilium \u548c calico\u3002\u9009\u62e9 none \u8868\u793a\u6682\u4e0d\u5b89\u88c5\u7f51\u7edc\u63d2\u4ef6\u3002

                              • \u5bb9\u5668\u7f51\u6bb5\uff1a\u96c6\u7fa4\u4e0b\u5bb9\u5668\u4f7f\u7528\u7684\u7f51\u6bb5\uff0c\u51b3\u5b9a\u96c6\u7fa4\u4e0b\u5bb9\u5668\u7684\u6570\u91cf\u4e0a\u9650\u3002\u521b\u5efa\u540e\u4e0d\u53ef\u4fee\u6539\u3002

                              • \u670d\u52a1\u7f51\u6bb5\uff1a\u540c\u4e00\u96c6\u7fa4\u4e0b\u5bb9\u5668\u4e92\u76f8\u8bbf\u95ee\u65f6\u4f7f\u7528\u7684 Service \u8d44\u6e90\u7684\u7f51\u6bb5\uff0c\u51b3\u5b9a Service \u8d44\u6e90\u7684\u4e0a\u9650\u3002\u521b\u5efa\u540e\u4e0d\u53ef\u4fee\u6539\u3002

                            6. \u586b\u5199\u63d2\u4ef6\u914d\u7f6e\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                            7. \u586b\u5199\u9ad8\u7ea7\u914d\u7f6e\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u786e\u5b9a \u3002

                              • kubelet_max_pods \uff1a\u8bbe\u7f6e\u6bcf\u4e2a\u8282\u70b9\u7684\u6700\u5927 Pod \u6570\u91cf\uff0c\u9ed8\u8ba4\u4e3a 110 \u4e2a\u3002
                              • hostname_overide \uff1a\u91cd\u7f6e\u4e3b\u673a\u540d\uff0c\u5efa\u8bae\u4f7f\u7528\u9ed8\u8ba4\u503c\uff0c\u91c7\u7528\u7cfb\u7edf\u9ed8\u8ba4\u751f\u6210\u7684\u540d\u79f0\u4f5c\u4e3a\u4e3b\u673a\u540d\u79f0\u3002
                              • kubernetes_audit \uff1aKubernetes \u7684\u5ba1\u8ba1\u65e5\u5fd7\uff0c\u9ed8\u8ba4\u5f00\u542f\u3002
                              • auto_renew_certificate \uff1a\u5728\u6bcf\u6708\u7b2c\u4e00\u4e2a\u661f\u671f\u4e00\u81ea\u52a8\u66f4\u65b0 Kubernetes \u63a7\u5236\u5e73\u9762\u8bc1\u4e66\uff0c\u9ed8\u8ba4\u5f00\u542f\u3002
                              • disable_firewalld&ufw \uff1a\u7981\u7528\u9632\u706b\u5899\uff0c\u907f\u514d\u8282\u70b9\u5728\u5b89\u88c5\u8fc7\u7a0b\u4e2d\u65e0\u6cd5\u88ab\u8bbf\u95ee\u3002
                              • Insecure_registries \uff1a\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u914d\u7f6e\u3002\u4f7f\u7528\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u521b\u5efa\u96c6\u7fa4\u65f6\uff0c\u4e3a\u4e86\u907f\u514d\u8bc1\u4e66\u95ee\u9898\u5bfc\u81f4\u5bb9\u5668\u5f15\u64ce\u62d2\u7edd\u8bbf\u95ee\uff0c\u9700\u8981\u5728\u8fd9\u91cc\u586b\u5199\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u5730\u5740\uff0c\u4ee5\u7ed5\u8fc7\u5bb9\u5668\u5f15\u64ce\u7684\u8bc1\u4e66\u8ba4\u8bc1\u800c\u83b7\u53d6\u955c\u50cf\u3002
                              • yum_repos \uff1a\u586b\u5199 Yum \u6e90\u4ed3\u5e93\u5730\u5740\u3002\u79bb\u7ebf\u73af\u5883\u4e0b\uff0c\u9ed8\u8ba4\u7ed9\u51fa\u7684\u5730\u5740\u9009\u9879\u4ec5\u4f9b\u53c2\u8003\uff0c\u8bf7\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u586b\u5199\u3002

                            Success

                            • \u586b\u5199\u6b63\u786e\u4fe1\u606f\u5e76\u5b8c\u6210\u4e0a\u8ff0\u6b65\u9aa4\u540e\uff0c\u9875\u9762\u4f1a\u63d0\u793a\u96c6\u7fa4\u6b63\u5728\u521b\u5efa\u4e2d\u3002
                            • \u521b\u5efa\u96c6\u7fa4\u8017\u65f6\u8f83\u957f\uff0c\u9700\u8981\u8010\u5fc3\u7b49\u5f85\u3002\u5176\u95f4\uff0c\u53ef\u4ee5\u70b9\u51fb \u8fd4\u56de\u96c6\u7fa4\u5217\u8868 \u6309\u94ae\u8ba9\u5b89\u88c5\u8fc7\u7a0b\u540e\u53f0\u8fd0\u884c\u3002
                            • \u5982\u9700\u67e5\u770b\u5f53\u524d\u72b6\u6001\uff0c\u53ef\u70b9\u51fb \u5b9e\u65f6\u65e5\u5fd7 \u3002

                            Note

                            • \u5f53\u96c6\u7fa4\u51fa\u73b0\u672a\u77e5\u72b6\u6001\u65f6\uff0c\u8868\u793a\u5f53\u524d\u96c6\u7fa4\u5df2\u5931\u8054\u3002
                            • \u7cfb\u7edf\u5c55\u793a\u6570\u636e\u4e3a\u5931\u8054\u524d\u7f13\u5b58\u6570\u636e\uff0c\u4e0d\u4ee3\u8868\u771f\u5b9e\u6570\u636e\u3002
                            • \u540c\u65f6\u5931\u8054\u72b6\u6001\u4e0b\u6267\u884c\u7684\u4efb\u4f55\u64cd\u4f5c\u90fd\u5c06\u4e0d\u751f\u6548\uff0c\u8bf7\u68c0\u67e5\u96c6\u7fa4\u7f51\u7edc\u8fde\u901a\u6027\u6216\u4e3b\u673a\u72b6\u6001\u3002

                            "},{"location":"end-user/kpanda/clusters/delete-cluster.html","title":"\u5378\u8f7d/\u89e3\u9664\u63a5\u5165\u96c6\u7fa4","text":"

                            \u901a\u8fc7\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0 \u521b\u5efa\u7684\u96c6\u7fa4 \u652f\u6301 \u5378\u8f7d\u96c6\u7fa4 \u6216 \u89e3\u9664\u63a5\u5165 \u64cd\u4f5c\uff0c\u4ece\u5176\u4ed6\u73af\u5883\u76f4\u63a5 \u63a5\u5165\u7684\u96c6\u7fa4 \u4ec5\u652f\u6301 \u89e3\u9664\u63a5\u5165 \u64cd\u4f5c\u3002

                            Info

                            \u5982\u679c\u60f3\u5f7b\u5e95\u5220\u9664\u4e00\u4e2a\u63a5\u5165\u7684\u96c6\u7fa4\uff0c\u9700\u8981\u524d\u5f80\u521b\u5efa\u8be5\u96c6\u7fa4\u7684\u539f\u59cb\u5e73\u53f0\u64cd\u4f5c\u3002\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e0d\u652f\u6301\u5220\u9664\u63a5\u5165\u7684\u96c6\u7fa4\u3002

                            \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\uff0c \u5378\u8f7d\u96c6\u7fa4 \u548c \u89e3\u9664\u63a5\u5165 \u7684\u533a\u522b\u5728\u4e8e\uff1a

                            • \u5378\u8f7d\u96c6\u7fa4 \u64cd\u4f5c\u4f1a\u9500\u6bc1\u8be5\u96c6\u7fa4\uff0c\u5e76\u91cd\u7f6e\u96c6\u7fa4\u4e0b\u6240\u6709\u8282\u70b9\u7684\u6570\u636e\u3002\u6240\u6709\u6570\u636e\u90fd\u5c06\u88ab\u9500\u6bc1\uff0c\u5efa\u8bae\u505a\u597d\u5907\u4efd\u3002\u540e\u671f\u9700\u8981\u65f6\u5fc5\u987b\u91cd\u65b0\u521b\u5efa\u4e00\u4e2a\u96c6\u7fa4\u3002
                            • \u89e3\u9664\u63a5\u5165 \u64cd\u4f5c\u4f1a\u5c06\u5f53\u524d\u96c6\u7fa4\u4ece\u5e73\u53f0\u4e2d\u79fb\u9664\uff0c\u4e0d\u4f1a\u6467\u6bc1\u96c6\u7fa4\uff0c\u4e5f\u4e0d\u4f1a\u9500\u6bc1\u6570\u636e\u3002
                            "},{"location":"end-user/kpanda/clusters/delete-cluster.html#_2","title":"\u5378\u8f7d\u96c6\u7fa4","text":"

                            Note

                            • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u5907 Admin \u6216 Kpanda Owner \u6743\u9650\u624d\u80fd\u6267\u884c\u5378\u8f7d\u96c6\u7fa4\u7684\u64cd\u4f5c\u3002
                            • \u5378\u8f7d\u96c6\u7fa4\u4e4b\u524d\uff0c\u5e94\u8be5\u5148\u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u5728 \u96c6\u7fa4\u8fd0\u7ef4 -> \u96c6\u7fa4\u8bbe\u7f6e -> \u9ad8\u7ea7\u914d\u7f6e \u4e2d\u5173\u95ed \u96c6\u7fa4\u5220\u9664\u4fdd\u62a4 \uff0c \u5426\u5219\u4e0d\u663e\u793a \u5378\u8f7d\u96c6\u7fa4 \u7684\u9009\u9879\u3002
                            • \u5168\u5c40\u670d\u52a1\u96c6\u7fa4 \u4e0d\u652f\u6301\u5378\u8f7d\u6216\u79fb\u9664\u64cd\u4f5c\u3002
                            1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u627e\u5230\u9700\u8981\u5378\u8f7d\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u5e76\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u70b9\u51fb \u5378\u8f7d\u96c6\u7fa4 \u3002

                            2. \u8f93\u5165\u96c6\u7fa4\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\uff0c\u7136\u540e\u70b9\u51fb \u5220\u9664 \u3002

                              \u5982\u679c\u63d0\u793a\u96c6\u7fa4\u4e2d\u8fd8\u6709\u4e00\u4e9b\u6b8b\u7559\u7684\u8d44\u6e90\uff0c\u5219\u9700\u8981\u6309\u63d0\u793a\u5220\u9664\u76f8\u5173\u8d44\u6e90\u540e\u624d\u80fd\u6267\u884c\u5378\u8f7d\u64cd\u4f5c\u3002

                            3. \u8fd4\u56de \u96c6\u7fa4\u5217\u8868 \u9875\u53ef\u4ee5\u770b\u5230\u8be5\u96c6\u7fa4\u7684\u72b6\u6001\u5df2\u7ecf\u53d8\u6210 \u5220\u9664\u4e2d \u3002\u5378\u8f7d\u96c6\u7fa4\u53ef\u80fd\u9700\u8981\u4e00\u6bb5\u65f6\u95f4\uff0c\u8bf7\u60a8\u8010\u5fc3\u7b49\u5019\u3002

                            "},{"location":"end-user/kpanda/clusters/delete-cluster.html#_3","title":"\u89e3\u9664\u63a5\u5165\u96c6\u7fa4","text":"

                            Note

                            • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u5907 Admin \u6216 Kpanda Owner \u6743\u9650\u624d\u80fd\u6267\u884c\u89e3\u9664\u63a5\u5165\u7684\u64cd\u4f5c\u3002
                            • \u5168\u5c40\u670d\u52a1\u96c6\u7fa4 \u4e0d\u652f\u6301\u89e3\u9664\u63a5\u5165\u3002
                            1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u627e\u5230\u9700\u8981\u5378\u8f7d\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u5e76\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u70b9\u51fb \u89e3\u9664\u63a5\u5165 \u3002

                            2. \u8f93\u5165\u96c6\u7fa4\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\uff0c\u7136\u540e\u70b9\u51fb \u89e3\u9664\u63a5\u5165 \u3002

                              \u5982\u679c\u63d0\u793a\u96c6\u7fa4\u4e2d\u8fd8\u6709\u4e00\u4e9b\u6b8b\u7559\u7684\u8d44\u6e90\uff0c\u5219\u9700\u8981\u6309\u63d0\u793a\u5220\u9664\u76f8\u5173\u8d44\u6e90\u540e\u624d\u80fd\u89e3\u9664\u63a5\u5165\u3002

                            "},{"location":"end-user/kpanda/clusters/delete-cluster.html#_4","title":"\u6e05\u7406\u89e3\u9664\u63a5\u5165\u96c6\u7fa4\u914d\u7f6e\u6570\u636e","text":"

                            \u96c6\u7fa4\u88ab\u79fb\u9664\u540e\uff0c\u96c6\u7fa4\u4e2d\u539f\u6709\u7684\u7ba1\u7406\u5e73\u53f0\u6570\u636e\u4e0d\u4f1a\u88ab\u81ea\u52a8\u6e05\u9664\uff0c\u5982\u9700\u5c06\u96c6\u7fa4\u63a5\u5165\u81f3\u65b0\u7ba1\u7406\u5e73\u53f0\u5219\u9700\u8981\u624b\u52a8\u6267\u884c\u5982\u4e0b\u64cd\u4f5c\uff1a

                            \u5220\u9664 kpanda-system\u3001insight-system \u547d\u540d\u7a7a\u95f4

                            kubectl delete ns kpanda-system insight-system\n
                            "},{"location":"end-user/kpanda/clusters/integrate-cluster.html","title":"\u63a5\u5165\u96c6\u7fa4","text":"

                            \u901a\u8fc7\u63a5\u5165\u96c6\u7fa4\u64cd\u4f5c\uff0c\u80fd\u591f\u5bf9\u4f17\u591a\u4e91\u670d\u52a1\u5e73\u53f0\u96c6\u7fa4\u548c\u672c\u5730\u79c1\u6709\u7269\u7406\u96c6\u7fa4\u8fdb\u884c\u7edf\u4e00\u7eb3\u7ba1\uff0c\u5f62\u6210\u7edf\u4e00\u6cbb\u7406\u5e73\u53f0\uff0c\u6709\u6548\u907f\u514d\u4e86\u88ab\u5382\u5546\u9501\u5b9a\u98ce\u9669\uff0c\u52a9\u529b\u4f01\u4e1a\u4e1a\u52a1\u5b89\u5168\u4e0a\u4e91\u3002

                            \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u63a5\u5165\u591a\u79cd\u4e3b\u6d41\u7684\u5bb9\u5668\u96c6\u7fa4\uff0c\u4f8b\u5982 Redhat Openshift, SUSE Rancher, VMware Tanzu, Amazon EKS, Aliyun ACK, Huawei CCE, Tencent TKE, \u6807\u51c6 Kubernetes \u96c6\u7fa4\u3002

                            "},{"location":"end-user/kpanda/clusters/integrate-cluster.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                            • \u51c6\u5907\u4e00\u4e2a\u5f85\u63a5\u5165\u7684\u96c6\u7fa4\uff0c\u786e\u4fdd\u5bb9\u5668\u7ba1\u7406\u96c6\u7fa4\u548c\u5f85\u63a5\u5165\u96c6\u7fa4\u4e4b\u95f4\u7f51\u7edc\u901a\u7545\uff0c\u5e76\u4e14\u96c6\u7fa4\u7684 Kubernetes \u7248\u672c 1.22+\u3002
                            • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 Kpanda Owner \u6216\u66f4\u9ad8\u6743\u9650\u3002
                            "},{"location":"end-user/kpanda/clusters/integrate-cluster.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                            1. \u8fdb\u5165 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u63a5\u5165\u96c6\u7fa4 \u6309\u94ae\u3002

                            2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u3002

                              • \u96c6\u7fa4\u540d\u79f0\uff1a\u540d\u79f0\u5e94\u5177\u6709\u552f\u4e00\u6027\uff0c\u8bbe\u7f6e\u540e\u4e0d\u53ef\u66f4\u6539\u3002\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26(\"-\")\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002
                              • \u96c6\u7fa4\u522b\u540d\uff1a\u53ef\u8f93\u5165\u4efb\u610f\u5b57\u7b26\uff0c\u4e0d\u8d85\u8fc7 60 \u4e2a\u5b57\u7b26\u3002
                              • \u53d1\u884c\u7248\uff1a\u96c6\u7fa4\u7684\u53d1\u884c\u5382\u5546\uff0c\u5305\u62ec\u5e02\u573a\u4e3b\u6d41\u4e91\u5382\u5546\u548c\u672c\u5730\u79c1\u6709\u7269\u7406\u96c6\u7fa4\u3002
                            3. \u586b\u5199\u76ee\u6807\u96c6\u7fa4\u7684 KubeConfig\uff0c\u70b9\u51fb \u9a8c\u8bc1 Config \uff0c\u9a8c\u8bc1\u901a\u8fc7\u540e\u624d\u80fd\u6210\u529f\u63a5\u5165\u96c6\u7fa4\u3002

                              \u5982\u679c\u4e0d\u77e5\u9053\u5982\u4f55\u83b7\u53d6\u96c6\u7fa4\u7684 KubeConfig \u6587\u4ef6\uff0c\u53ef\u4ee5\u5728\u8f93\u5165\u6846\u53f3\u4e0a\u89d2\u70b9\u51fb \u5982\u4f55\u83b7\u53d6 kubeConfig \u67e5\u770b\u5bf9\u5e94\u6b65\u9aa4\u3002

                            4. \u786e\u8ba4\u6240\u6709\u53c2\u6570\u586b\u5199\u6b63\u786e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u3002

                            Note

                            • \u65b0\u63a5\u5165\u7684\u96c6\u7fa4\u72b6\u6001\u4e3a \u63a5\u5165\u4e2d \uff0c\u63a5\u5165\u6210\u529f\u540e\u53d8\u4e3a \u8fd0\u884c\u4e2d \u3002
                            • \u5982\u679c\u96c6\u7fa4\u72b6\u6001\u4e00\u76f4\u5904\u4e8e \u63a5\u5165\u4e2d \uff0c\u8bf7\u786e\u8ba4\u63a5\u5165\u811a\u672c\u662f\u5426\u5728\u5bf9\u5e94\u96c6\u7fa4\u4e0a\u6267\u884c\u6210\u529f\u3002\u6709\u5173\u96c6\u7fa4\u72b6\u6001\u7684\u66f4\u591a\u8be6\u60c5\uff0c\u8bf7\u53c2\u8003\u96c6\u7fa4\u72b6\u6001\u3002
                            "},{"location":"end-user/kpanda/clusters/integrate-rancher-cluster.html","title":"\u63a5\u5165 rancher \u96c6\u7fa4","text":"

                            \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u63a5\u5165 rancher \u96c6\u7fa4\u3002

                            "},{"location":"end-user/kpanda/clusters/integrate-rancher-cluster.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                            • \u51c6\u5907\u4e00\u4e2a\u5177\u6709\u7ba1\u7406\u5458\u6743\u9650\u7684\u5f85\u63a5\u5165 ranhcer \u96c6\u7fa4\uff0c\u786e\u4fdd\u5bb9\u5668\u7ba1\u7406\u96c6\u7fa4\u548c\u5f85\u63a5\u5165\u96c6\u7fa4\u4e4b\u95f4\u7f51\u7edc\u901a\u7545\u3002
                            • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 Kpanda Owner \u6216\u66f4\u9ad8\u6743\u9650\u3002
                            "},{"location":"end-user/kpanda/clusters/integrate-rancher-cluster.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"end-user/kpanda/clusters/integrate-rancher-cluster.html#rancher-serviceaccount","title":"\u6b65\u9aa4\u4e00\uff1a\u5728 rancher \u96c6\u7fa4\u521b\u5efa\u5177\u6709\u7ba1\u7406\u5458\u6743\u9650\u7684 ServiceAccount \u7528\u6237","text":"
                            1. \u4f7f\u7528\u5177\u6709\u7ba1\u7406\u5458\u6743\u9650\u7684\u89d2\u8272\u8fdb\u5165 rancher \u96c6\u7fa4\uff0c\u5e76\u4f7f\u7528\u7ec8\u7aef\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a sa.yaml \u7684\u6587\u4ef6\u3002

                              vi sa.yaml\n

                              \u7136\u540e\u6309\u4e0b i \u952e\u8fdb\u5165\u63d2\u5165\u6a21\u5f0f\uff0c\u8f93\u5165\u4ee5\u4e0b\u5185\u5bb9\uff1a

                              sa.yaml
                              apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: rancher-rke\nrules:\n  - apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n  - nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: rancher-rke\nroleRef:\n    apiGroup: rbac.authorization.k8s.io\n    kind: ClusterRole\n    name: rancher-rke\n  subjects:\n  - kind: ServiceAccount\n    name: rancher-rke\n    namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: rancher-rke\n  namespace: kube-system\n

                              \u6309\u4e0b esc \u952e\u9000\u51fa\u63d2\u5165\u6a21\u5f0f\uff0c\u7136\u540e\u8f93\u5165 __ :wq__ \u4fdd\u5b58\u5e76\u9000\u51fa\u3002

                            2. \u5728\u5f53\u524d\u8def\u5f84\u4e0b\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u65b0\u5efa\u540d\u4e3a rancher-rke \u7684 ServiceAccount\uff08\u4ee5\u4e0b\u7b80\u79f0\u4e3a SA \uff09\uff1a

                              kubectl apply -f sa.yaml\n

                              \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                              clusterrole.rbac.authorization.k8s.io/rancher-rke created\nclusterrolebinding.rbac.authorization.k8s.io/rancher-rke created\nserviceaccount/rancher-rke created\n
                            3. \u521b\u5efa\u540d\u4e3a rancher-rke-secret \u7684\u5bc6\u94a5\uff0c\u5e76\u5c06\u5bc6\u94a5\u548c rancher-rke SA \u7ed1\u5b9a\u3002

                              kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: rancher-rke-secret\n  namespace: kube-system\n  annotations:\n    kubernetes.io/service-account.name: rancher-rke\n  type: kubernetes.io/service-account-token\nEOF\n

                              \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                              secret/rancher-rke-secret created\n

                              Note

                              \u5982\u679c\u60a8\u7684\u96c6\u7fa4\u7248\u672c\u4f4e\u4e8e 1.24\uff0c\u8bf7\u5ffd\u7565\u6b64\u6b65\u9aa4\uff0c\u76f4\u63a5\u524d\u5f80\u4e0b\u4e00\u6b65\u3002

                            4. \u67e5\u627e rancher-rke SA \u7684\u5bc6\u94a5\uff1a

                              kubectl -n kube-system get secret | grep rancher-rke | awk '{print $1}'\n

                              \u9884\u671f\u8f93\u51fa\uff1a

                              rancher-rke-secret\n

                              \u67e5\u770b\u5bc6\u94a5 rancher-rke-secret \u7684\u8be6\u60c5\uff1a

                              kubectl -n kube-system describe secret rancher-rke-secret\n

                              \u9884\u671f\u8f93\u51fa\uff1a

                              Name:         rancher-rke-secret\nNamespace:    kube-system\nLabels:       <none>\nAnnotations:  kubernetes.io/service-account.name: rancher-rke\n            kubernetes.io/service-account.uid: d83df5d9-bd7d-488d-a046-b740618a0174\n\nType:  kubernetes.io/service-account-token\n\nData\n====\nca.crt:     570 bytes\nnamespace:  11 bytes\ntoken:      eyJhbGciOiJSUzI1NiIsImtpZCI6IjUtNE9nUWZLRzVpbEJORkZaNmtCQXhqVzRsZHU4MHhHcDBfb0VCaUo0V1kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJyYW5jaGVyLXJrZS1zZWNyZXQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoicmFuY2hlci1ya2UiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkODNkZjVkOS1iZDdkLTQ4OGQtYTA0Ni1iNzQwNjE4YTAxNzQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06cmFuY2hlci1ya2UifQ.VNsMtPEFOdDDeGt_8VHblcMRvjOwPXMM-79o9UooHx6q-VkHOcIOp3FOT2hnEdNnIsyODZVKCpEdCgyozX-3y5x2cZSZpocnkMcBbQm-qfTyUcUhAY7N5gcYUtHUhvRAsNWJcsDCn6d96gT_qo-ddo_cT8Ri39Lc123FDYOnYG-YGFKSgRQVy7Vyv34HIajZCCjZzy7i--eE_7o4DXeTjNqAFMFstUxxHBOXI3Rdn1zKQKqh5Jhg4ES7X-edSviSUfJUX-QV_LlAw5DuAyGPH7bDH4QaQ5k-p6cIctmpWZE-9wRDlKA4LYRblKE7MJcI6OmM4ldlMM0Jc8N-gCtl4w\n
                            "},{"location":"end-user/kpanda/clusters/integrate-rancher-cluster.html#rancher-rke-sa-kubeconfig","title":"\u6b65\u9aa4\u4e8c\uff1a\u5728\u672c\u5730\u4f7f\u7528 rancher-rke SA \u7684\u8ba4\u8bc1\u4fe1\u606f\u66f4\u65b0 kubeconfig \u6587\u4ef6","text":"

                            \u5728\u4efb\u610f\u4e00\u53f0\u5b89\u88c5\u4e86 kubelet \u7684\u672c\u5730\u8282\u70b9\u6267\u884c\u5982\u4e0b\u64cd\u4f5c\uff1a

                            1. \u914d\u7f6e kubelet token\uff1a

                              kubectl config set-credentials rancher-rke --token=`rancher-rke-secret` \u91cc\u9762\u7684 token \u4fe1\u606f\n

                              \u4f8b\u5982\uff1a

                              kubectl config set-credentials eks-admin --token=eyJhbGciOiJSUzI1NiIsImtpZCI6IjUtNE9nUWZLRzVpbEJORkZaNmtCQXhqVzRsZHU4MHhHcDBfb0VCaUo0V1kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJyYW5jaGVyLXJrZS1zZWNyZXQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoicmFuY2hlci1ya2UiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkODNkZjVkOS1iZDdkLTQ4OGQtYTA0Ni1iNzQwNjE4YTAxNzQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06cmFuY2hlci1ya2UifQ.VNsMtPEFOdDDeGt_8VHblcMRvjOwPXMM-79o9UooHx6q-VkHOcIOp3FOT2hnEdNnIsyODZVKCpEdCgyozX-3y5x2cZSZpocnkMcBbQm-qfTyUcUhAY7N5gcYUtHUhvRAsNWJcsDCn6d96gT_qo-ddo_cT8Ri39Lc123FDYOnYG-YGFKSgRQVy7Vyv34HIajZCCjZzy7i--eE_7o4DXeTjNqAFMFstUxxHBOXI3Rdn1zKQKqh5Jhg4ES7X-edSviSUfJUX-QV_LlAw5DuAyGPH7bDH4QaQ5k-p6cIctmpWZE-9wRDlKA4LYRblKE7MJcI6OmM4ldlMM0Jc8N-gCtl4w\n
                            2. \u914d\u7f6e kubelet APIServer \u4fe1\u606f\uff1a

                              kubectl config set-cluster {\u96c6\u7fa4\u540d} --insecure-skip-tls-verify=true --server={APIServer}\n
                              • {\u96c6\u7fa4\u540d} \uff1a\u6307 rancher \u96c6\u7fa4\u7684\u540d\u79f0\u3002
                              • {APIServer} \uff1a\u6307\u96c6\u7fa4\u7684\u8bbf\u95ee\u5730\u5740\uff0c\u4e00\u822c\u4e3a\u96c6\u7fa4\u63a7\u5236\u8282\u70b9 IP + 6443 \u7aef\u53e3\uff0c\u5982 https://10.X.X.X:6443

                              \u4f8b\u5982\uff1a

                              kubectl config set-cluster rancher-rke --insecure-skip-tls-verify=true --server=https://10.X.X.X:6443\n
                            3. \u914d\u7f6e kubelet \u4e0a\u4e0b\u6587\u4fe1\u606f\uff1a

                              kubectl config set-context {\u4e0a\u4e0b\u6587\u540d\u79f0} --cluster={\u96c6\u7fa4\u540d} --user={SA \u7528\u6237\u540d}\n

                              \u4f8b\u5982\uff1a

                              kubectl config set-context rancher-rke-context --cluster=rancher-rke --user=rancher-rke\n
                            4. \u5728 kubelet \u4e2d\u6307\u5b9a\u6211\u4eec\u521a\u521a\u65b0\u5efa\u7684\u4e0a\u4e0b\u6587 rancher-rke-context \uff1a

                              kubectl config use-context rancher-rke-context\n
                            5. \u83b7\u53d6\u4e0a\u4e0b\u6587 rancher-rke-context \u4e2d\u7684 kubeconfig \u4fe1\u606f\u3002

                              kubectl config view --minify --flatten --raw\n

                              \u9884\u671f\u8f93\u51fa\uff1a

                              apiVersion: v1\n  clusters:\n  - cluster:\n    insecure-skip-tls-verify: true\n    server: https://77C321BCF072682C70C8665ED4BFA10D.gr7.ap-southeast-1.eks.amazonaws.com\n  name: joincluster\n  contexts:\n  - context:\n    cluster: joincluster\n    user: eks-admin\n  name: ekscontext\n  current-context: ekscontext\n  kind: Config\n  preferences: {}\n  users:\n  - name: eks-admin\n  user:\n    token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImcxTjJwNkktWm5IbmRJU1RFRExvdWY1TGFWVUtGQ3VIejFtNlFQcUNFalEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2V\n
                            "},{"location":"end-user/kpanda/clusters/integrate-rancher-cluster.html#ai","title":"\u6b65\u9aa4\u4e09\uff1a\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u754c\u9762\u63a5\u5165\u96c6\u7fa4","text":"

                            \u4f7f\u7528\u521a\u521a\u83b7\u53d6\u7684 kubeconfig \u6587\u4ef6\uff0c\u53c2\u8003\u63a5\u5165\u96c6\u7fa4\u6587\u6863\uff0c\u5c06 rancher \u96c6\u7fa4\u63a5\u5165\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u3002

                            "},{"location":"end-user/kpanda/clusters/k8s-cert.html","title":"Kubernetes \u96c6\u7fa4\u8bc1\u4e66\u66f4\u65b0","text":"

                            \u4e3a\u4fdd\u8bc1 Kubernetes \u5404\u7ec4\u4ef6\u4e4b\u95f4\u7684\u901a\u4fe1\u5b89\u5168\uff0c\u7ec4\u4ef6\u4e4b\u95f4\u7684\u8c03\u7528\u4f1a\u8fdb\u884c TLS \u8eab\u4efd\u9a8c\u8bc1\uff0c\u6267\u884c\u9a8c\u8bc1\u64cd\u4f5c\u9700\u8981\u914d\u7f6e\u96c6\u7fa4 PKI \u8bc1\u4e66\u3002

                            \u96c6\u7fa4\u8bc1\u4e66\u6709\u6548\u671f\u4e3a1\u5e74\uff0c\u4e3a\u907f\u514d\u8bc1\u4e66\u8fc7\u671f\u5bfc\u81f4\u4e1a\u52a1\u65e0\u6cd5\u4f7f\u7528\uff0c\u8bf7\u53ca\u65f6\u66f4\u65b0\u8bc1\u4e66\u3002

                            \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u624b\u52a8\u8fdb\u884c\u8bc1\u4e66\u66f4\u65b0\u3002

                            "},{"location":"end-user/kpanda/clusters/k8s-cert.html#_1","title":"\u68c0\u67e5\u8bc1\u4e66\u662f\u5426\u8fc7\u671f","text":"

                            \u60a8\u53ef\u4ee5\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b\u8bc1\u4e66\u662f\u5426\u8fc7\u671f\uff1a

                            kubeadm certs check-expiration\n

                            \u8f93\u51fa\u7c7b\u4f3c\u4e8e\u4ee5\u4e0b\u5185\u5bb9\uff1a

                            CERTIFICATE                EXPIRES                  RESIDUAL TIME   CERTIFICATE AUTHORITY   EXTERNALLY MANAGED\nadmin.conf                 Dec 14, 2024 07:26 UTC   204d                                    no      \napiserver                  Dec 14, 2024 07:26 UTC   204d            ca                      no      \napiserver-etcd-client      Dec 14, 2024 07:26 UTC   204d            etcd-ca                 no      \napiserver-kubelet-client   Dec 14, 2024 07:26 UTC   204d            ca                      no      \ncontroller-manager.conf    Dec 14, 2024 07:26 UTC   204d                                    no      \netcd-healthcheck-client    Dec 14, 2024 07:26 UTC   204d            etcd-ca                 no      \netcd-peer                  Dec 14, 2024 07:26 UTC   204d            etcd-ca                 no      \netcd-server                Dec 14, 2024 07:26 UTC   204d            etcd-ca                 no      \nfront-proxy-client         Dec 14, 2024 07:26 UTC   204d            front-proxy-ca          no      \nscheduler.conf             Dec 14, 2024 07:26 UTC   204d                                    no      \n\nCERTIFICATE AUTHORITY   EXPIRES                  RESIDUAL TIME   EXTERNALLY MANAGED\nca                      Dec 12, 2033 07:26 UTC   9y              no      \netcd-ca                 Dec 12, 2033 07:26 UTC   9y              no      \nfront-proxy-ca          Dec 12, 2033 07:26 UTC   9y              no      \n
                            "},{"location":"end-user/kpanda/clusters/k8s-cert.html#_2","title":"\u624b\u52a8\u66f4\u65b0\u8bc1\u4e66","text":"

                            \u60a8\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u624b\u52a8\u66f4\u65b0\u8bc1\u4e66\uff0c\u53ea\u9700\u5e26\u4e0a\u5408\u9002\u7684\u547d\u4ee4\u884c\u9009\u9879\u3002\u66f4\u65b0\u8bc1\u4e66\u524d\u8bf7\u5148\u5907\u4efd\u5f53\u524d\u8bc1\u4e66\u3002

                            \u66f4\u65b0\u6307\u5b9a\u8bc1\u4e66\uff1a

                            kubeadm certs renew\n

                            \u66f4\u65b0\u5168\u90e8\u8bc1\u4e66\uff1a

                            kubeadm certs renew all\n

                            \u66f4\u65b0\u540e\u7684\u8bc1\u4e66\u53ef\u4ee5\u5728 /etc/kubernetes/pki \u76ee\u5f55\u4e0b\u67e5\u770b\uff0c\u6709\u6548\u671f\u5ef6\u7eed 1 \u5e74\u3002 \u4ee5\u4e0b\u5bf9\u5e94\u7684\u51e0\u4e2a\u914d\u7f6e\u6587\u4ef6\u4e5f\u4f1a\u540c\u6b65\u66f4\u65b0\uff1a

                            • /etc/kubernetes/admin.conf
                            • /etc/kubernetes/controller-manager.conf
                            • /etc/kubernetes/scheduler.conf

                            Note

                            • \u5982\u679c\u60a8\u90e8\u7f72\u7684\u662f\u4e00\u4e2a\u9ad8\u53ef\u7528\u96c6\u7fa4\uff0c\u8fd9\u4e2a\u547d\u4ee4\u9700\u8981\u5728\u6240\u6709\u63a7\u5236\u8282\u70b9\u4e0a\u6267\u884c\u3002
                            • \u6b64\u547d\u4ee4\u7528 CA\uff08\u6216\u8005 front-proxy-CA \uff09\u8bc1\u4e66\u548c\u5b58\u50a8\u5728 /etc/kubernetes/pki \u4e2d\u7684\u5bc6\u94a5\u6267\u884c\u66f4\u65b0\u3002
                            "},{"location":"end-user/kpanda/clusters/k8s-cert.html#_3","title":"\u91cd\u542f\u670d\u52a1","text":"

                            \u6267\u884c\u66f4\u65b0\u64cd\u4f5c\u4e4b\u540e\uff0c\u4f60\u9700\u8981\u91cd\u542f\u63a7\u5236\u9762 Pod\u3002\u56e0\u4e3a\u52a8\u6001\u8bc1\u4e66\u91cd\u8f7d\u76ee\u524d\u8fd8\u4e0d\u88ab\u6240\u6709\u7ec4\u4ef6\u548c\u8bc1\u4e66\u652f\u6301\uff0c\u6240\u6709\u8fd9\u9879\u64cd\u4f5c\u662f\u5fc5\u987b\u7684\u3002

                            \u9759\u6001 Pod \u662f\u88ab\u672c\u5730 kubelet \u800c\u4e0d\u662f API \u670d\u52a1\u5668\u7ba1\u7406\uff0c\u6240\u4ee5 kubectl \u4e0d\u80fd\u7528\u6765\u5220\u9664\u6216\u91cd\u542f\u4ed6\u4eec\u3002

                            \u8981\u91cd\u542f\u9759\u6001 Pod\uff0c\u4f60\u53ef\u4ee5\u4e34\u65f6\u5c06\u6e05\u5355\u6587\u4ef6\u4ece /etc/kubernetes/manifests/ \u79fb\u9664\u5e76\u7b49\u5f85 20 \u79d2\u3002 \u53c2\u8003 KubeletConfiguration \u7ed3\u6784\u4e2d\u7684 fileCheckFrequency \u503c\u3002

                            \u5982\u679c Pod \u4e0d\u5728\u6e05\u5355\u76ee\u5f55\u91cc\uff0ckubelet \u5c06\u4f1a\u7ec8\u6b62\u5b83\u3002 \u5728\u53e6\u4e00\u4e2a fileCheckFrequency \u5468\u671f\u4e4b\u540e\u4f60\u53ef\u4ee5\u5c06\u6587\u4ef6\u79fb\u56de\u53bb\uff0ckubelet \u53ef\u4ee5\u5b8c\u6210 Pod \u7684\u91cd\u5efa\uff0c\u800c\u7ec4\u4ef6\u7684\u8bc1\u4e66\u66f4\u65b0\u64cd\u4f5c\u4e5f\u5f97\u4ee5\u5b8c\u6210\u3002

                            mv ./manifests/* ./temp/\nmv ./temp/* ./manifests/\n

                            Note

                            \u5982\u679c\u5bb9\u5668\u670d\u52a1\u4f7f\u7528\u7684\u662f Docker\uff0c\u4e3a\u4e86\u8ba9\u8bc1\u4e66\u751f\u6548\uff0c\u53ef\u4ee5\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u5bf9\u6d89\u53ca\u5230\u8bc1\u4e66\u4f7f\u7528\u7684\u51e0\u4e2a\u670d\u52a1\u8fdb\u884c\u91cd\u542f\uff1a

                            docker ps | grep -E 'k8s_kube-apiserver|k8s_kube-controller-manager|k8s_kube-scheduler|k8s_etcd_etcd' | awk -F ' ' '{print $1}' | xargs docker restart\n
                            "},{"location":"end-user/kpanda/clusters/k8s-cert.html#kubeconfig","title":"\u66f4\u65b0 KubeConfig","text":"

                            \u6784\u5efa\u96c6\u7fa4\u65f6\u901a\u5e38\u4f1a\u5c06 admin.conf \u8bc1\u4e66\u590d\u5236\u5230 $HOME/.kube/config \u4e2d\uff0c\u4e3a\u4e86\u5728\u66f4\u65b0 admin.conf \u540e\u66f4\u65b0 $HOME/.kube/config \u7684\u5185\u5bb9\uff0c \u5fc5\u987b\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                            sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\nsudo chown $(id -u):$(id -g) $HOME/.kube/config\n
                            "},{"location":"end-user/kpanda/clusters/k8s-cert.html#kubelet","title":"\u4e3a kubelet \u914d\u7f6e\u8bc1\u4e66\u8f6e\u6362","text":"

                            \u5b8c\u6210\u4ee5\u4e0a\u64cd\u4f5c\u540e\uff0c\u57fa\u672c\u5b8c\u6210\u4e86\u96c6\u7fa4\u6240\u6709\u8bc1\u4e66\u7684\u66f4\u65b0\uff0c\u4f46\u4e0d\u5305\u62ec kubelet\u3002

                            \u56e0\u4e3a kubernetes \u5305\u542b\u7279\u6027 kubelet \u8bc1\u4e66\u8f6e\u6362\uff0c \u5728\u5f53\u524d\u8bc1\u4e66\u5373\u5c06\u8fc7\u671f\u65f6\uff0c \u5c06\u81ea\u52a8\u751f\u6210\u65b0\u7684\u79d8\u94a5\uff0c\u5e76\u4ece Kubernetes API \u7533\u8bf7\u65b0\u7684\u8bc1\u4e66\u3002 \u4e00\u65e6\u65b0\u7684\u8bc1\u4e66\u53ef\u7528\uff0c\u5b83\u5c06\u88ab\u7528\u4e8e\u4e0e Kubernetes API \u95f4\u7684\u8fde\u63a5\u8ba4\u8bc1\u3002

                            Note

                            \u6b64\u7279\u6027\u9002\u7528\u4e8e Kubernetes 1.8.0 \u6216\u66f4\u9ad8\u7684\u7248\u672c\u3002

                            \u542f\u7528\u5ba2\u6237\u7aef\u8bc1\u4e66\u8f6e\u6362\uff0c\u914d\u7f6e\u53c2\u6570\u5982\u4e0b\uff1a

                            • kubelet \u8fdb\u7a0b\u63a5\u6536 --rotate-certificates \u53c2\u6570\uff0c\u8be5\u53c2\u6570\u51b3\u5b9a kubelet \u5728\u5f53\u524d\u4f7f\u7528\u7684 \u8bc1\u4e66\u5373\u5c06\u5230\u671f\u65f6\uff0c\u662f\u5426\u4f1a\u81ea\u52a8\u7533\u8bf7\u65b0\u7684\u8bc1\u4e66\u3002

                            • kube-controller-manager \u8fdb\u7a0b\u63a5\u6536 --cluster-signing-duration \u53c2\u6570 \uff08\u5728 1.19 \u7248\u672c\u4e4b\u524d\u4e3a --experimental-cluster-signing-duration\uff09\uff0c\u7528\u6765\u63a7\u5236\u7b7e\u53d1\u8bc1\u4e66\u7684\u6709\u6548\u671f\u9650\u3002

                            \u66f4\u591a\u8be6\u60c5\u53c2\u8003\u4e3a kubelet \u914d\u7f6e\u8bc1\u4e66\u8f6e\u6362\u3002

                            "},{"location":"end-user/kpanda/clusters/k8s-cert.html#_4","title":"\u81ea\u52a8\u66f4\u65b0\u8bc1\u4e66","text":"

                            \u4e3a\u4e86\u66f4\u9ad8\u6548\u4fbf\u6377\u5904\u7406\u5df2\u8fc7\u671f\u6216\u8005\u5373\u5c06\u8fc7\u671f\u7684 kubernetes \u96c6\u7fa4\u8bc1\u4e66\uff0c\u53ef\u53c2\u8003 k8s \u7248\u672c\u96c6\u7fa4\u8bc1\u4e66\u66f4\u65b0\u3002

                            "},{"location":"end-user/kpanda/clusters/runtime.html","title":"\u5982\u4f55\u9009\u62e9\u5bb9\u5668\u8fd0\u884c\u65f6","text":"

                            \u5bb9\u5668\u8fd0\u884c\u65f6\u662f kubernetes \u4e2d\u5bf9\u5bb9\u5668\u548c\u5bb9\u5668\u955c\u50cf\u751f\u547d\u5468\u671f\u8fdb\u884c\u7ba1\u7406\u7684\u91cd\u8981\u7ec4\u4ef6\u3002 kubernetes \u5728 1.19 \u7248\u672c\u4e2d\u5c06 containerd \u8bbe\u4e3a\u9ed8\u8ba4\u7684\u5bb9\u5668\u8fd0\u884c\u65f6\uff0c\u5e76\u5728 1.24 \u7248\u672c\u4e2d\u79fb\u9664\u4e86 Dockershim \u7ec4\u4ef6\u7684\u652f\u6301\u3002

                            \u56e0\u6b64\u76f8\u8f83\u4e8e Docker \u8fd0\u884c\u65f6\uff0c\u6211\u4eec\u66f4\u52a0 \u63a8\u8350\u60a8\u4f7f\u7528\u8f7b\u91cf\u7684 containerd \u4f5c\u4e3a\u60a8\u7684\u5bb9\u5668\u8fd0\u884c\u65f6\uff0c\u56e0\u4e3a\u8fd9\u5df2\u7ecf\u6210\u4e3a\u5f53\u524d\u4e3b\u6d41\u7684\u8fd0\u884c\u65f6\u9009\u62e9\u3002

                            \u9664\u6b64\u4e4b\u5916\uff0c\u4e00\u4e9b\u64cd\u4f5c\u7cfb\u7edf\u53d1\u884c\u5382\u5546\u5bf9 Docker \u8fd0\u884c\u65f6\u7684\u517c\u5bb9\u4e5f\u4e0d\u591f\u53cb\u597d\uff0c\u4e0d\u540c\u64cd\u4f5c\u7cfb\u7edf\u5bf9\u8fd0\u884c\u65f6\u7684\u652f\u6301\u5982\u4e0b\u8868\uff1a

                            "},{"location":"end-user/kpanda/clusters/runtime.html#_2","title":"\u4e0d\u540c\u64cd\u4f5c\u7cfb\u7edf\u548c\u63a8\u8350\u7684\u8fd0\u884c\u65f6\u7248\u672c\u5bf9\u5e94\u5173\u7cfb","text":"\u64cd\u4f5c\u7cfb\u7edf \u63a8\u8350\u7684 containerd \u7248\u672c \u63a8\u8350\u7684 Docker \u7248\u672c CentOS 1.7.5 20.10 RedHatOS 1.7.5 20.10 KylinOS 1.7.5 19.03\uff08\u4ec5 ARM \u67b6\u6784\u652f\u6301 \uff0c\u5728 x86 \u67b6\u6784\u4e0b\u4e0d\u652f\u6301\u4f7f\u7528 Docker \u4f5c\u4e3a\u8fd0\u884c\u65f6\uff09

                            \u66f4\u591a\u652f\u6301\u7684\u8fd0\u884c\u65f6\u7248\u672c\u4fe1\u606f\uff0c\u8bf7\u53c2\u8003 RedHatOS \u652f\u6301\u7684\u8fd0\u884c\u65f6\u7248\u672c \u548c KylinOS \u652f\u6301\u7684\u8fd0\u884c\u65f6\u7248\u672c

                            Note

                            \u5728\u79bb\u7ebf\u5b89\u88c5\u6a21\u5f0f\u4e0b\uff0c\u9700\u8981\u63d0\u524d\u51c6\u5907\u76f8\u5173\u64cd\u4f5c\u7cfb\u7edf\u7684\u8fd0\u884c\u65f6\u79bb\u7ebf\u5305\u3002

                            "},{"location":"end-user/kpanda/clusters/upgrade-cluster.html","title":"\u96c6\u7fa4\u5347\u7ea7","text":"

                            Kubernetes \u793e\u533a\u6bcf\u4e2a\u5b63\u5ea6\u90fd\u4f1a\u53d1\u5e03\u4e00\u6b21\u5c0f\u7248\u672c\uff0c\u6bcf\u4e2a\u7248\u672c\u7684\u7ef4\u62a4\u5468\u671f\u5927\u6982\u53ea\u6709 9 \u4e2a\u6708\u3002 \u7248\u672c\u505c\u6b62\u7ef4\u62a4\u540e\u5c31\u4e0d\u4f1a\u518d\u66f4\u65b0\u4e00\u4e9b\u91cd\u5927\u6f0f\u6d1e\u6216\u5b89\u5168\u6f0f\u6d1e\u3002\u624b\u52a8\u5347\u7ea7\u96c6\u7fa4\u64cd\u4f5c\u8f83\u4e3a\u7e41\u7410\uff0c\u7ed9\u7ba1\u7406\u4eba\u5458\u5e26\u6765\u4e86\u6781\u5927\u7684\u5de5\u4f5c\u8d1f\u62c5\u3002

                            \u672c\u8282\u5c06\u4ecb\u7ecd\u5982\u4f55\u5728\u901a\u8fc7 Web UI \u754c\u9762\u4e00\u952e\u5f0f\u5728\u7ebf\u5347\u7ea7\u5de5\u4f5c\u96c6\u7fa4 Kubernetes \u7248\u672c\uff0c \u5982\u9700\u79bb\u7ebf\u5347\u7ea7\u5de5\u4f5c\u96c6\u7fa4\u7684 kubernetes \u7248\u672c\uff0c\u8bf7\u53c2\u9605\u5de5\u4f5c\u96c6\u7fa4\u79bb\u7ebf\u5347\u7ea7\u6307\u5357\u8fdb\u884c\u5347\u7ea7\u3002

                            Danger

                            \u7248\u672c\u5347\u7ea7\u540e\u5c06\u65e0\u6cd5\u56de\u9000\u5230\u4e4b\u524d\u7684\u7248\u672c\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                            Note

                            • Kubernetes \u7248\u672c\u4ee5 x.y.z \u8868\u793a\uff0c\u5176\u4e2d x \u662f\u4e3b\u8981\u7248\u672c\uff0c y \u662f\u6b21\u8981\u7248\u672c\uff0c z \u662f\u8865\u4e01\u7248\u672c\u3002
                            • \u4e0d\u5141\u8bb8\u8de8\u6b21\u8981\u7248\u672c\u5bf9\u96c6\u7fa4\u8fdb\u884c\u5347\u7ea7\uff0c\u4f8b\u5982\u4e0d\u80fd\u4ece 1.23 \u76f4\u63a5\u5347\u7ea7\u5230 1.25\u3002
                            • \u63a5\u5165\u96c6\u7fa4 \u4e0d\u652f\u6301\u7248\u672c\u5347\u7ea7\u3002\u5982\u679c\u5de6\u4fa7\u5bfc\u822a\u680f\u6ca1\u6709 \u96c6\u7fa4\u5347\u7ea7 \uff0c\u8bf7\u68c0\u67e5\u8be5\u96c6\u7fa4\u662f\u5426\u4e3a \u63a5\u5165\u96c6\u7fa4 \u3002
                            • \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u53ea\u80fd\u901a\u8fc7\u7ec8\u7aef\u8fdb\u884c\u5347\u7ea7\u3002
                            • \u5347\u7ea7\u5de5\u4f5c\u96c6\u7fa4\u65f6\uff0c\u8be5\u5de5\u4f5c\u96c6\u7fa4\u7684\u7ba1\u7406\u96c6\u7fa4\u5e94\u8be5\u5df2\u7ecf\u63a5\u5165\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\uff0c\u5e76\u4e14\u5904\u4e8e\u6b63\u5e38\u8fd0\u884c\u4e2d\u3002
                            • \u5982\u679c\u9700\u8981\u4fee\u6539\u96c6\u7fa4\u53c2\u6570\uff0c\u53ef\u4ee5\u901a\u8fc7\u5347\u7ea7\u76f8\u540c\u7248\u672c\u7684\u65b9\u5f0f\u5b9e\u73b0\uff0c\u5177\u4f53\u64cd\u4f5c\u53c2\u8003\u4e0b\u6587\u3002
                            1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                            2. \u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u96c6\u7fa4\u8fd0\u7ef4 -> \u96c6\u7fa4\u5347\u7ea7 \uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u70b9\u51fb \u7248\u672c\u5347\u7ea7 \u3002

                            3. \u9009\u62e9\u53ef\u5347\u7ea7\u7684\u7248\u672c\uff0c\u8f93\u5165\u96c6\u7fa4\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\u3002

                              Note

                              \u5982\u679c\u60a8\u662f\u60f3\u901a\u8fc7\u5347\u7ea7\u65b9\u5f0f\u6765\u4fee\u6539\u96c6\u7fa4\u53c2\u6570\uff0c\u8bf7\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff1a

                              1. \u627e\u5230\u96c6\u7fa4\u5bf9\u5e94\u7684 ConfigMap\uff0c\u60a8\u53ef\u4ee5\u767b\u5f55\u63a7\u5236\u8282\u70b9\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u627e\u5230 varsConfRef \u4e2d\u7684 ConfigMap \u540d\u79f0\u3002

                                kubectl get cluster.kubean.io <clustername> -o yaml\n
                              2. \u6839\u636e\u9700\u8981\uff0c\u4fee\u6539 ConfigMap \u4e2d\u7684\u53c2\u6570\u4fe1\u606f\u3002

                              3. \u5728\u6b64\u5904\u9009\u62e9\u76f8\u540c\u7248\u672c\u8fdb\u884c\u5347\u7ea7\u64cd\u4f5c\uff0c\u5347\u7ea7\u5b8c\u6210\u5373\u53ef\u6210\u529f\u66f4\u65b0\u5bf9\u5e94\u7684\u96c6\u7fa4\u53c2\u6570\u3002

                            4. \u70b9\u51fb \u786e\u5b9a \u540e\uff0c\u53ef\u4ee5\u770b\u5230\u96c6\u7fa4\u7684\u5347\u7ea7\u8fdb\u5ea6\u3002

                            5. \u96c6\u7fa4\u5347\u7ea7\u9884\u8ba1\u9700\u8981 30 \u5206\u949f\uff0c\u53ef\u4ee5\u70b9\u51fb \u5b9e\u65f6\u65e5\u5fd7 \u6309\u94ae\u67e5\u770b\u96c6\u7fa4\u5347\u7ea7\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/configmap-hot-loading.html","title":"configmap/secret \u70ed\u52a0\u8f7d","text":"

                            configmap/secret \u70ed\u52a0\u8f7d\u662f\u6307\u5c06 configmap/secret \u4f5c\u4e3a\u6570\u636e\u5377\u6302\u8f7d\u5728\u5bb9\u5668\u4e2d\u6302\u8f7d\u65f6\uff0c\u5f53\u914d\u7f6e\u53d1\u751f\u6539\u53d8\u65f6\uff0c\u5bb9\u5668\u5c06\u81ea\u52a8\u8bfb\u53d6 configmap/secret \u66f4\u65b0\u540e\u7684\u914d\u7f6e\uff0c\u800c\u65e0\u9700\u91cd\u542f Pod\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/configmap-hot-loading.html#_1","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                            1. \u53c2\u8003\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d - \u5bb9\u5668\u914d\u7f6e\uff0c\u914d\u7f6e\u5bb9\u5668\u6570\u636e\u5b58\u50a8\uff0c\u9009\u62e9 Configmap \u3001 Configmap Key \u3001 Secret \u3001 Secret Key \u4f5c\u4e3a\u6570\u636e\u5377\u6302\u8f7d\u81f3\u5bb9\u5668\u3002

                              Note

                              \u4f7f\u7528\u5b50\u8def\u5f84\uff08SubPath\uff09\u65b9\u5f0f\u6302\u8f7d\u7684\u914d\u7f6e\u6587\u4ef6\u4e0d\u652f\u6301\u70ed\u52a0\u8f7d\u3002

                            2. \u8fdb\u5165\u3010\u914d\u7f6e\u4e0e\u5bc6\u94a5\u3011\u9875\u9762\uff0c\u8fdb\u5165\u914d\u7f6e\u9879\u8be6\u60c5\u9875\u9762\uff0c\u5728\u3010\u5173\u8054\u8d44\u6e90\u3011\u4e2d\u627e\u5230\u5bf9\u5e94\u7684 container \u8d44\u6e90\uff0c\u70b9\u51fb \u7acb\u5373\u52a0\u8f7d \u6309\u94ae\uff0c\u8fdb\u5165\u914d\u7f6e\u70ed\u52a0\u8f7d\u9875\u9762\u3002

                              Note

                              \u5982\u679c\u60a8\u7684\u5e94\u7528\u652f\u6301\u81ea\u52a8\u8bfb\u53d6 configmap/secret \u66f4\u65b0\u540e\u7684\u914d\u7f6e\uff0c\u5219\u65e0\u9700\u624b\u52a8\u6267\u884c\u70ed\u52a0\u8f7d\u64cd\u4f5c\u3002

                            3. \u5728\u70ed\u52a0\u8f7d\u914d\u7f6e\u5f39\u7a97\u4e2d\uff0c\u8f93\u5165\u8fdb\u5165\u5bb9\u5668\u5185\u7684 \u6267\u884c\u547d\u4ee4 \u5e76\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u4ee5\u91cd\u8f7d\u914d\u7f6e\u3002\u4f8b\u5982\uff0c\u5728 nginx \u5bb9\u5668\u4e2d\uff0c\u4ee5 root \u7528\u6237\u6743\u9650\uff0c\u6267\u884c nginx -s reload \u547d\u4ee4\u6765\u91cd\u8f7d\u914d\u7f6e\u3002

                            4. \u5728\u754c\u9762\u5f39\u51fa\u7684 web \u7ec8\u7aef\u4e2d\u67e5\u770b\u5e94\u7528\u91cd\u8f7d\u60c5\u51b5\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/create-configmap.html","title":"\u521b\u5efa\u914d\u7f6e\u9879","text":"

                            \u914d\u7f6e\u9879\uff08ConfigMap\uff09\u4ee5\u952e\u503c\u5bf9\u7684\u5f62\u5f0f\u5b58\u50a8\u975e\u673a\u5bc6\u6027\u6570\u636e\uff0c\u5b9e\u73b0\u914d\u7f6e\u6570\u636e\u548c\u5e94\u7528\u4ee3\u7801\u76f8\u4e92\u89e3\u8026\u7684\u6548\u679c\u3002\u914d\u7f6e\u9879\u53ef\u7528\u4f5c\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u3001\u547d\u4ee4\u884c\u53c2\u6570\u6216\u8005\u5b58\u50a8\u5377\u4e2d\u7684\u914d\u7f6e\u6587\u4ef6\u3002

                            Note

                            • \u5728\u914d\u7f6e\u9879\u4e2d\u4fdd\u5b58\u7684\u6570\u636e\u4e0d\u53ef\u8d85\u8fc7 1 MiB\u3002\u5982\u679c\u9700\u8981\u5b58\u50a8\u4f53\u79ef\u66f4\u5927\u7684\u6570\u636e\uff0c\u5efa\u8bae\u6302\u8f7d\u5b58\u50a8\u5377\u6216\u8005\u4f7f\u7528\u72ec\u7acb\u7684\u6570\u636e\u5e93\u6216\u8005\u6587\u4ef6\u670d\u52a1\u3002

                            • \u914d\u7f6e\u9879\u4e0d\u63d0\u4f9b\u4fdd\u5bc6\u6216\u8005\u52a0\u5bc6\u529f\u80fd\u3002\u5982\u679c\u8981\u5b58\u50a8\u52a0\u5bc6\u6570\u636e\uff0c\u5efa\u8bae\u4f7f\u7528\u5bc6\u94a5\uff0c\u6216\u8005\u5176\u4ed6\u7b2c\u4e09\u65b9\u5de5\u5177\u6765\u4fdd\u8bc1\u6570\u636e\u7684\u79c1\u5bc6\u6027\u3002

                            \u652f\u6301\u4e24\u79cd\u521b\u5efa\u65b9\u5f0f\uff1a

                            • \u56fe\u5f62\u5316\u8868\u5355\u521b\u5efa
                            • YAML \u521b\u5efa
                            "},{"location":"end-user/kpanda/configmaps-secrets/create-configmap.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                            • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762

                            • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a NS Editor \u89d2\u8272 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/create-configmap.html#_3","title":"\u56fe\u5f62\u5316\u8868\u5355\u521b\u5efa","text":"
                            1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                            2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u914d\u7f6e\u9879 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u521b\u5efa\u914d\u7f6e\u9879 \u6309\u94ae\u3002

                            3. \u5728 \u521b\u5efa\u914d\u7f6e\u9879 \u9875\u9762\u4e2d\u586b\u5199\u914d\u7f6e\u4fe1\u606f\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                              Note

                              \u70b9\u51fb \u4e0a\u4f20\u6587\u4ef6 \u53ef\u4ee5\u4ece\u672c\u5730\u5bfc\u5165\u5df2\u6709\u7684\u6587\u4ef6\uff0c\u5feb\u901f\u521b\u5efa\u914d\u7f6e\u9879\u3002

                            4. \u521b\u5efa\u5b8c\u6210\u540e\u5728\u914d\u7f6e\u9879\u53f3\u4fa7\u70b9\u51fb\u66f4\u591a\u53ef\u4ee5\uff0c\u53ef\u4ee5\u7f16\u8f91 YAML\u3001\u66f4\u65b0\u3001\u5bfc\u51fa\u3001\u5220\u9664\u7b49\u64cd\u4f5c\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/create-configmap.html#yaml","title":"YAML \u521b\u5efa","text":"
                            1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                            2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u914d\u7f6e\u9879 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 YAML \u521b\u5efa \u6309\u94ae\u3002

                            3. \u586b\u5199\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684\u914d\u7f6e\u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u3002

                              Note

                              • \u70b9\u51fb \u5bfc\u5165 \u53ef\u4ee5\u4ece\u672c\u5730\u5bfc\u5165\u5df2\u6709\u7684\u6587\u4ef6\uff0c\u5feb\u901f\u521b\u5efa\u914d\u7f6e\u9879\u3002
                              • \u586b\u5199\u6570\u636e\u4e4b\u540e\u70b9\u51fb \u4e0b\u8f7d \u53ef\u4ee5\u5c06\u914d\u7f6e\u6587\u4ef6\u4fdd\u5b58\u5728\u672c\u5730\u3002

                            4. \u521b\u5efa\u5b8c\u6210\u540e\u5728\u914d\u7f6e\u9879\u53f3\u4fa7\u70b9\u51fb\u66f4\u591a\u53ef\u4ee5\uff0c\u53ef\u4ee5\u7f16\u8f91 YAML\u3001\u66f4\u65b0\u3001\u5bfc\u51fa\u3001\u5220\u9664\u7b49\u64cd\u4f5c\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/create-configmap.html#yaml_1","title":"\u914d\u7f6e\u9879 YAML \u793a\u4f8b","text":"
                            ```yaml\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: kube-root-ca.crt\n  namespace: default\n  annotations:\ndata:\n  version: '1.0'\n```\n

                            \u4e0b\u4e00\u6b65\uff1a\u4f7f\u7528\u914d\u7f6e\u9879

                            "},{"location":"end-user/kpanda/configmaps-secrets/create-secret.html","title":"\u521b\u5efa\u5bc6\u94a5","text":"

                            \u5bc6\u94a5\u662f\u4e00\u79cd\u7528\u4e8e\u5b58\u50a8\u548c\u7ba1\u7406\u5bc6\u7801\u3001OAuth \u4ee4\u724c\u3001SSH\u3001TLS \u51ed\u636e\u7b49\u654f\u611f\u4fe1\u606f\u7684\u8d44\u6e90\u5bf9\u8c61\u3002\u4f7f\u7528\u5bc6\u94a5\u610f\u5473\u7740\u60a8\u4e0d\u9700\u8981\u5728\u5e94\u7528\u7a0b\u5e8f\u4ee3\u7801\u4e2d\u5305\u542b\u654f\u611f\u7684\u673a\u5bc6\u6570\u636e\u3002

                            \u5bc6\u94a5\u4f7f\u7528\u573a\u666f\uff1a

                            • \u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u4f7f\u7528\uff0c\u63d0\u4f9b\u5bb9\u5668\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u6240\u9700\u7684\u4e00\u4e9b\u5fc5\u8981\u4fe1\u606f\u3002
                            • \u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a Pod \u7684\u6570\u636e\u5377\u3002
                            • \u5728 kubelet \u62c9\u53d6\u5bb9\u5668\u955c\u50cf\u65f6\u4f5c\u4e3a\u955c\u50cf\u4ed3\u5e93\u7684\u8eab\u4efd\u8ba4\u8bc1\u51ed\u8bc1\u3002

                            \u652f\u6301\u4e24\u79cd\u521b\u5efa\u65b9\u5f0f\uff1a

                            • \u56fe\u5f62\u5316\u8868\u5355\u521b\u5efa
                            • YAML \u521b\u5efa
                            "},{"location":"end-user/kpanda/configmaps-secrets/create-secret.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                            • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762

                            • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a NS Editor \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/create-secret.html#_3","title":"\u56fe\u5f62\u5316\u8868\u5355\u521b\u5efa","text":"
                            1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                            2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u5bc6\u94a5 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u521b\u5efa\u5bc6\u94a5 \u6309\u94ae\u3002

                            3. \u5728 \u521b\u5efa\u5bc6\u94a5 \u9875\u9762\u4e2d\u586b\u5199\u914d\u7f6e\u4fe1\u606f\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                              \u586b\u5199\u914d\u7f6e\u65f6\u9700\u8981\u6ce8\u610f\uff1a

                              • \u5bc6\u94a5\u7684\u540d\u79f0\u5728\u540c\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u4e2d\u5fc5\u987b\u5177\u6709\u552f\u4e00\u6027
                              • \u5bc6\u94a5\u7c7b\u578b\uff1a
                                • \u9ed8\u8ba4\uff08Opaque\uff09\uff1aKubernetes \u9ed8\u8ba4\u7684\u5bc6\u94a5\u7c7b\u578b\uff0c\u652f\u6301\u7528\u6237\u5b9a\u4e49\u7684\u4efb\u610f\u6570\u636e\u3002
                                • TLS (kubernetes.io/tls)\uff1a\u7528\u4e8e TLS \u5ba2\u6237\u7aef\u6216\u8005\u670d\u52a1\u5668\u7aef\u6570\u636e\u8bbf\u95ee\u7684\u51ed\u8bc1\u3002
                                • \u955c\u50cf\u4ed3\u5e93\u4fe1\u606f (kubernetes.io/dockerconfigjson)\uff1a\u7528\u4e8e\u955c\u50cf\u4ed3\u5e93\u8bbf\u95ee\u7684\u51ed\u8bc1\u3002
                                • \u7528\u6237\u540d\u548c\u5bc6\u7801\uff08kubernetes.io/basic-auth\uff09\uff1a\u7528\u4e8e\u57fa\u672c\u8eab\u4efd\u8ba4\u8bc1\u7684\u51ed\u8bc1\u3002
                                • \u81ea\u5b9a\u4e49\uff1a\u7528\u6237\u6839\u636e\u4e1a\u52a1\u9700\u8981\u81ea\u5b9a\u4e49\u7684\u7c7b\u578b\u3002
                              • \u5bc6\u94a5\u6570\u636e\uff1a\u5bc6\u94a5\u6240\u5b58\u50a8\u7684\u6570\u636e\uff0c\u4e0d\u540c\u6570\u636e\u9700\u8981\u586b\u5199\u7684\u53c2\u6570\u6709\u6240\u4e0d\u540c
                                • \u5f53\u5bc6\u94a5\u7c7b\u578b\u4e3a\u9ed8\u8ba4\uff08Opaque\uff09/\u81ea\u5b9a\u4e49\uff1a\u53ef\u4ee5\u586b\u5165\u591a\u4e2a\u952e\u503c\u5bf9\u6570\u636e\u3002
                                • \u5f53\u5bc6\u94a5\u7c7b\u578b\u4e3a TLS (kubernetes.io/tls)\uff1a\u9700\u8981\u586b\u5165\u8bc1\u4e66\u51ed\u8bc1\u548c\u79c1\u94a5\u6570\u636e\u3002\u8bc1\u4e66\u662f\u81ea\u7b7e\u540d\u6216 CA \u7b7e\u540d\u8fc7\u7684\u51ed\u636e\uff0c\u7528\u6765\u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\u3002\u8bc1\u4e66\u8bf7\u6c42\u662f\u5bf9\u7b7e\u540d\u7684\u8bf7\u6c42\uff0c\u9700\u8981\u4f7f\u7528\u79c1\u94a5\u8fdb\u884c\u7b7e\u540d\u3002
                                • \u5f53\u5bc6\u94a5\u7c7b\u578b\u4e3a\u955c\u50cf\u4ed3\u5e93\u4fe1\u606f (kubernetes.io/dockerconfigjson)\uff1a\u9700\u8981\u586b\u5165\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u7684\u8d26\u53f7\u548c\u5bc6\u7801\u3002
                                • \u5f53\u5bc6\u94a5\u7c7b\u578b\u4e3a\u7528\u6237\u540d\u548c\u5bc6\u7801\uff08kubernetes.io/basic-auth\uff09\uff1a\u9700\u8981\u6307\u5b9a\u7528\u6237\u540d\u548c\u5bc6\u7801\u3002
                            "},{"location":"end-user/kpanda/configmaps-secrets/create-secret.html#yaml","title":"YAML \u521b\u5efa","text":"
                            1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                            2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u5bc6\u94a5 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 YAML \u521b\u5efa \u6309\u94ae\u3002

                            3. \u5728 YAML \u521b\u5efa \u9875\u9762\u4e2d\u586b\u5199 YAML \u914d\u7f6e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                              \u652f\u6301\u4ece\u672c\u5730\u5bfc\u5165 YAML \u6587\u4ef6\u6216\u5c06\u586b\u5199\u597d\u7684\u6587\u4ef6\u4e0b\u8f7d\u4fdd\u5b58\u5230\u672c\u5730\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/create-secret.html#yaml_1","title":"\u5bc6\u94a5 YAML \u793a\u4f8b","text":"
                            ```yaml\napiVersion: v1\nkind: Secret\nmetadata:\n  name: secretdemo\ntype: Opaque\ndata:\n  username: ******\n  password: ******\n```\n

                            \u4e0b\u4e00\u6b65\uff1a\u4f7f\u7528\u5bc6\u94a5

                            "},{"location":"end-user/kpanda/configmaps-secrets/use-configmap.html","title":"\u4f7f\u7528\u914d\u7f6e\u9879","text":"

                            \u914d\u7f6e\u9879\uff08ConfigMap\uff09\u662f Kubernetes \u7684\u4e00\u79cd API \u5bf9\u8c61\uff0c\u7528\u6765\u5c06\u975e\u673a\u5bc6\u6027\u7684\u6570\u636e\u4fdd\u5b58\u5230\u952e\u503c\u5bf9\u4e2d\uff0c\u53ef\u4ee5\u5b58\u50a8\u5176\u4ed6\u5bf9\u8c61\u6240\u9700\u8981\u4f7f\u7528\u7684\u914d\u7f6e\u3002 \u4f7f\u7528\u65f6\uff0c \u5bb9\u5668\u53ef\u4ee5\u5c06\u5176\u7528\u4f5c\u73af\u5883\u53d8\u91cf\u3001\u547d\u4ee4\u884c\u53c2\u6570\u6216\u8005\u5b58\u50a8\u5377\u4e2d\u7684\u914d\u7f6e\u6587\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528\u914d\u7f6e\u9879\uff0c\u80fd\u591f\u5c06\u914d\u7f6e\u6570\u636e\u548c\u5e94\u7528\u7a0b\u5e8f\u4ee3\u7801\u5206\u5f00\uff0c\u4e3a\u5e94\u7528\u914d\u7f6e\u7684\u4fee\u6539\u63d0\u4f9b\u66f4\u52a0\u7075\u6d3b\u7684\u9014\u5f84\u3002

                            Note

                            \u914d\u7f6e\u9879\u5e76\u4e0d\u63d0\u4f9b\u4fdd\u5bc6\u6216\u8005\u52a0\u5bc6\u529f\u80fd\u3002\u5982\u679c\u8981\u5b58\u50a8\u7684\u6570\u636e\u662f\u673a\u5bc6\u7684\uff0c\u8bf7\u4f7f\u7528\u5bc6\u94a5\uff0c\u6216\u8005\u4f7f\u7528\u5176\u4ed6\u7b2c\u4e09\u65b9\u5de5\u5177\u6765\u4fdd\u8bc1\u6570\u636e\u7684\u79c1\u5bc6\u6027\uff0c\u800c\u4e0d\u662f\u7528\u914d\u7f6e\u9879\u3002 \u6b64\u5916\u5728\u5bb9\u5668\u91cc\u4f7f\u7528\u914d\u7f6e\u9879\u65f6\uff0c\u5bb9\u5668\u548c\u914d\u7f6e\u9879\u5fc5\u987b\u5904\u4e8e\u540c\u4e00\u96c6\u7fa4\u7684\u547d\u540d\u7a7a\u95f4\u4e2d\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/use-configmap.html#_2","title":"\u4f7f\u7528\u573a\u666f","text":"

                            \u60a8\u53ef\u4ee5\u5728 Pod \u4e2d\u4f7f\u7528\u914d\u7f6e\u9879\uff0c\u6709\u591a\u79cd\u4f7f\u7528\u573a\u666f\uff0c\u4e3b\u8981\u5305\u62ec\uff1a

                            • \u4f7f\u7528\u914d\u7f6e\u9879\u8bbe\u7f6e\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf

                            • \u4f7f\u7528\u914d\u7f6e\u9879\u8bbe\u7f6e\u5bb9\u5668\u7684\u547d\u4ee4\u884c\u53c2\u6570

                            • \u4f7f\u7528\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u7684\u6570\u636e\u5377

                            "},{"location":"end-user/kpanda/configmaps-secrets/use-configmap.html#_3","title":"\u8bbe\u7f6e\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf","text":"

                            \u60a8\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u5316\u754c\u9762\u6216\u8005\u7ec8\u7aef\u547d\u4ee4\u884c\u6765\u4f7f\u7528\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u3002

                            Note

                            \u914d\u7f6e\u9879\u5bfc\u5165\u662f\u5c06\u914d\u7f6e\u9879\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\uff1b\u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165\u662f\u5c06\u914d\u7f6e\u9879\u4e2d\u67d0\u4e00\u53c2\u6570\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/use-configmap.html#_4","title":"\u56fe\u5f62\u5316\u754c\u9762\u64cd\u4f5c","text":"

                            \u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u53ef\u4ee5\u5728 \u73af\u5883\u53d8\u91cf \u754c\u9762\u901a\u8fc7\u9009\u62e9 \u914d\u7f6e\u9879\u5bfc\u5165 \u6216 \u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165 \u4e3a\u5bb9\u5668\u8bbe\u7f6e\u73af\u5883\u53d8\u91cf\u3002

                            1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u4e2d\uff0c\u5728 \u5bb9\u5668\u914d\u7f6e \u8fd9\u4e00\u6b65\u4e2d\uff0c\u9009\u62e9 \u73af\u5883\u53d8\u91cf \u914d\u7f6e\uff0c\u70b9\u51fb \u6dfb\u52a0\u73af\u5883\u53d8\u91cf \u6309\u94ae\u3002

                            2. \u5728\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u5904\u9009\u62e9 \u914d\u7f6e\u9879\u5bfc\u5165 \u6216 \u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165 \u3002

                              • \u5f53\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u9009\u62e9\u4e3a \u914d\u7f6e\u9879\u5bfc\u5165 \u65f6\uff0c\u4f9d\u6b21\u8f93\u5165 \u53d8\u91cf\u540d \u3001 \u524d\u7f00 \u540d\u79f0\u3001 \u914d\u7f6e\u9879 \u7684\u540d\u79f0\u3002

                              • \u5f53\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u9009\u62e9\u4e3a \u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165 \u65f6\uff0c\u4f9d\u6b21\u8f93\u5165 \u53d8\u91cf\u540d \u3001 \u914d\u7f6e\u9879 \u540d\u79f0\u3001 \u952e \u7684\u540d\u79f0\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/use-configmap.html#_5","title":"\u547d\u4ee4\u884c\u64cd\u4f5c","text":"

                            \u60a8\u53ef\u4ee5\u5728\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\u5c06\u914d\u7f6e\u9879\u8bbe\u7f6e\u4e3a\u73af\u5883\u53d8\u91cf\uff0c\u4f7f\u7528 valueFrom \u53c2\u6570\u5f15\u7528 ConfigMap \u4e2d\u7684 Key/Value\u3002

                            apiVersion: v1\nkind: Pod\nmetadata:\n  name: configmap-pod-1\nspec:\n  containers:\n    - name: test-container\n      image: busybox\n      command: [ \"/bin/sh\", \"-c\", \"env\" ]\n      env:\n        - name: SPECIAL_LEVEL_KEY\n          valueFrom:                  # (1)!\n            configMapKeyRef:\n              name: kpanda-configmap  # (2)!\n              key: SPECIAL_LEVEL      # (3)!\n  restartPolicy: Never\n
                            1. \u4f7f\u7528 valueFrom \u6765\u6307\u5b9a env \u5f15\u7528\u914d\u7f6e\u9879\u7684 value \u503c
                            2. \u5f15\u7528\u7684\u914d\u7f6e\u6587\u4ef6\u540d\u79f0
                            3. \u5f15\u7528\u7684\u914d\u7f6e\u9879 key
                            "},{"location":"end-user/kpanda/configmaps-secrets/use-configmap.html#_6","title":"\u8bbe\u7f6e\u5bb9\u5668\u7684\u547d\u4ee4\u884c\u53c2\u6570","text":"

                            \u60a8\u53ef\u4ee5\u4f7f\u7528\u914d\u7f6e\u9879\u8bbe\u7f6e\u5bb9\u5668\u4e2d\u7684\u547d\u4ee4\u6216\u8005\u53c2\u6570\u503c\uff0c\u4f7f\u7528\u73af\u5883\u53d8\u91cf\u66ff\u6362\u8bed\u6cd5 $(VAR_NAME) \u6765\u8fdb\u884c\u3002\u5982\u4e0b\u6240\u793a\u3002

                            apiVersion: v1\nkind: Pod\nmetadata:\n  name: configmap-pod-3\nspec:\n  containers:\n    - name: test-container\n      image: busybox\n      command: [ \"/bin/sh\", \"-c\", \"echo $(SPECIAL_LEVEL_KEY) $(SPECIAL_TYPE_KEY)\" ]\n      env:\n        - name: SPECIAL_LEVEL_KEY\n          valueFrom:\n            configMapKeyRef:\n              name: kpanda-configmap\n              key: SPECIAL_LEVEL\n        - name: SPECIAL_TYPE_KEY\n          valueFrom:\n            configMapKeyRef:\n              name: kpanda-configmap\n              key: SPECIAL_TYPE\n  restartPolicy: Never\n

                            \u8fd9\u4e2a Pod \u8fd0\u884c\u540e\uff0c\u8f93\u51fa\u5982\u4e0b\u5185\u5bb9\u3002

                            Hello Kpanda\n
                            "},{"location":"end-user/kpanda/configmaps-secrets/use-configmap.html#_7","title":"\u7528\u4f5c\u5bb9\u5668\u6570\u636e\u5377","text":"

                            \u60a8\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u5316\u754c\u9762\u6216\u8005\u7ec8\u7aef\u547d\u4ee4\u884c\u6765\u4f7f\u7528\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/use-configmap.html#_8","title":"\u56fe\u5f62\u5316\u64cd\u4f5c","text":"

                            \u5728\u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u5728 \u6570\u636e\u5b58\u50a8 \u754c\u9762\u9009\u62e9\u5b58\u50a8\u7c7b\u578b\u4e3a \u914d\u7f6e\u9879 \uff0c\u5c06\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u7684\u6570\u636e\u5377\u3002

                            1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u4e2d\uff0c\u5728 \u5bb9\u5668\u914d\u7f6e \u8fd9\u4e00\u6b65\u4e2d\uff0c\u9009\u62e9 \u6570\u636e\u5b58\u50a8 \u914d\u7f6e\uff0c\u5728 \u8282\u70b9\u8def\u5f84\u6620\u5c04 \u5217\u8868\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u3002

                            2. \u5728\u5b58\u50a8\u7c7b\u578b\u5904\u9009\u62e9 \u914d\u7f6e\u9879 \uff0c\u5e76\u4f9d\u6b21\u8f93\u5165 \u5bb9\u5668\u8def\u5f84 \u3001 \u5b50\u8def\u5f84 \u7b49\u4fe1\u606f\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/use-configmap.html#_9","title":"\u547d\u4ee4\u884c\u64cd\u4f5c","text":"

                            \u8981\u5728\u4e00\u4e2a Pod \u7684\u5b58\u50a8\u5377\u4e2d\u4f7f\u7528 ConfigMap\u3002

                            \u4e0b\u9762\u662f\u4e00\u4e2a\u5c06 ConfigMap \u4ee5\u5377\u7684\u5f62\u5f0f\u8fdb\u884c\u6302\u8f7d\u7684 Pod \u793a\u4f8b\uff1a

                            apiVersion: v1\nkind: Pod\nmetadata:\n  name: mypod\nspec:\n  containers:\n  - name: mypod\n    image: redis\n    volumeMounts:\n    - name: foo\n      mountPath: \"/etc/foo\"\n      readOnly: true\n  volumes:\n  - name: foo\n    configMap:\n      name: myconfigmap\n

                            \u5982\u679c Pod \u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\uff0c\u5219\u6bcf\u4e2a\u5bb9\u5668\u90fd\u9700\u8981\u81ea\u5df1\u7684 volumeMounts \u5757\uff0c\u4f46\u9488\u5bf9\u6bcf\u4e2a ConfigMap\uff0c\u60a8\u53ea\u9700\u8981\u8bbe\u7f6e\u4e00\u4e2a spec.volumes \u5757\u3002

                            Note

                            \u5c06\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u6302\u8f7d\u7684\u6570\u636e\u5377\u65f6\uff0c\u914d\u7f6e\u9879\u53ea\u80fd\u4f5c\u4e3a\u53ea\u8bfb\u6587\u4ef6\u8fdb\u884c\u8bfb\u53d6\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html","title":"\u4f7f\u7528\u5bc6\u94a5","text":"

                            \u5bc6\u94a5\u662f\u4e00\u79cd\u7528\u4e8e\u5b58\u50a8\u548c\u7ba1\u7406\u5bc6\u7801\u3001OAuth \u4ee4\u724c\u3001SSH\u3001TLS \u51ed\u636e\u7b49\u654f\u611f\u4fe1\u606f\u7684\u8d44\u6e90\u5bf9\u8c61\u3002\u4f7f\u7528\u5bc6\u94a5\u610f\u5473\u7740\u60a8\u4e0d\u9700\u8981\u5728\u5e94\u7528\u7a0b\u5e8f\u4ee3\u7801\u4e2d\u5305\u542b\u654f\u611f\u7684\u673a\u5bc6\u6570\u636e\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html#_2","title":"\u4f7f\u7528\u573a\u666f","text":"

                            \u60a8\u53ef\u4ee5\u5728 Pod \u4e2d\u4f7f\u7528\u5bc6\u94a5\uff0c\u6709\u591a\u79cd\u4f7f\u7528\u573a\u666f\uff0c\u4e3b\u8981\u5305\u62ec\uff1a

                            • \u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u4f7f\u7528\uff0c\u63d0\u4f9b\u5bb9\u5668\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u6240\u9700\u7684\u4e00\u4e9b\u5fc5\u8981\u4fe1\u606f\u3002
                            • \u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a Pod \u7684\u6570\u636e\u5377\u3002
                            • \u5728 kubelet \u62c9\u53d6\u5bb9\u5668\u955c\u50cf\u65f6\u7528\u4f5c\u955c\u50cf\u4ed3\u5e93\u7684\u8eab\u4efd\u8ba4\u8bc1\u51ed\u8bc1\u4f7f\u7528\u3002
                            "},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html#_3","title":"\u4f7f\u7528\u5bc6\u94a5\u8bbe\u7f6e\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf","text":"

                            \u60a8\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u5316\u754c\u9762\u6216\u8005\u7ec8\u7aef\u547d\u4ee4\u884c\u6765\u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u3002

                            Note

                            \u5bc6\u94a5\u5bfc\u5165\u662f\u5c06\u5bc6\u94a5\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\uff1b\u5bc6\u94a5\u952e\u503c\u5bfc\u5165\u662f\u5c06\u5bc6\u94a5\u4e2d\u67d0\u4e00\u53c2\u6570\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html#_4","title":"\u56fe\u5f62\u754c\u9762\u64cd\u4f5c","text":"

                            \u5728\u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u60a8\u53ef\u4ee5\u5728 \u73af\u5883\u53d8\u91cf \u754c\u9762\u901a\u8fc7\u9009\u62e9 \u5bc6\u94a5\u5bfc\u5165 \u6216 \u5bc6\u94a5\u952e\u503c\u5bfc\u5165 \u4e3a\u5bb9\u5668\u8bbe\u7f6e\u73af\u5883\u53d8\u91cf\u3002

                            1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u3002

                            2. \u5728 \u5bb9\u5668\u914d\u7f6e \u9009\u62e9 \u73af\u5883\u53d8\u91cf \u914d\u7f6e\uff0c\u70b9\u51fb \u6dfb\u52a0\u73af\u5883\u53d8\u91cf \u6309\u94ae\u3002

                            3. \u5728\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u5904\u9009\u62e9 \u5bc6\u94a5\u5bfc\u5165 \u6216 \u5bc6\u94a5\u952e\u503c\u5bfc\u5165 \u3002

                              • \u5f53\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u9009\u62e9\u4e3a \u5bc6\u94a5\u5bfc\u5165 \u65f6\uff0c\u4f9d\u6b21\u8f93\u5165 \u53d8\u91cf\u540d \u3001 \u524d\u7f00 \u3001 \u5bc6\u94a5 \u7684\u540d\u79f0\u3002

                              • \u5f53\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u9009\u62e9\u4e3a \u5bc6\u94a5\u952e\u503c\u5bfc\u5165 \u65f6\uff0c\u4f9d\u6b21\u8f93\u5165 \u53d8\u91cf\u540d \u3001 \u5bc6\u94a5 \u3001 \u952e \u7684\u540d\u79f0\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html#_5","title":"\u547d\u4ee4\u884c\u64cd\u4f5c","text":"

                            \u5982\u4e0b\u4f8b\u6240\u793a\uff0c\u60a8\u53ef\u4ee5\u5728\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\u5c06\u5bc6\u94a5\u8bbe\u7f6e\u4e3a\u73af\u5883\u53d8\u91cf\uff0c\u4f7f\u7528 valueFrom \u53c2\u6570\u5f15\u7528 Secret \u4e2d\u7684 Key/Value\u3002

                            apiVersion: v1\nkind: Pod\nmetadata:\n  name: secret-env-pod\nspec:\n  containers:\n  - name: mycontainer\n    image: redis\n    env:\n      - name: SECRET_USERNAME\n        valueFrom:\n          secretKeyRef:\n            name: mysecret\n            key: username\n            optional: false # (1)!\n      - name: SECRET_PASSWORD\n        valueFrom:\n          secretKeyRef:\n            name: mysecret\n            key: password\n            optional: false # (2)!\n
                            1. \u6b64\u503c\u4e3a\u9ed8\u8ba4\u503c\uff1b\u610f\u5473\u7740 \"mysecret\"\uff0c\u5fc5\u987b\u5b58\u5728\u4e14\u5305\u542b\u540d\u4e3a \"username\" \u7684\u4e3b\u952e
                            2. \u6b64\u503c\u4e3a\u9ed8\u8ba4\u503c\uff1b\u610f\u5473\u7740 \"mysecret\"\uff0c\u5fc5\u987b\u5b58\u5728\u4e14\u5305\u542b\u540d\u4e3a \"password\" \u7684\u4e3b\u952e
                            "},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html#pod","title":"\u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a Pod \u7684\u6570\u636e\u5377","text":""},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html#_6","title":"\u56fe\u5f62\u754c\u9762\u64cd\u4f5c","text":"

                            \u5728\u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u5728 \u6570\u636e\u5b58\u50a8 \u754c\u9762\u9009\u62e9\u5b58\u50a8\u7c7b\u578b\u4e3a \u5bc6\u94a5 \uff0c\u5c06\u5bc6\u94a5\u4f5c\u4e3a\u5bb9\u5668\u7684\u6570\u636e\u5377\u3002

                            1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u3002

                            2. \u5728 \u5bb9\u5668\u914d\u7f6e \u9009\u62e9 \u6570\u636e\u5b58\u50a8 \u914d\u7f6e\uff0c\u5728 \u8282\u70b9\u8def\u5f84\u6620\u5c04 \u5217\u8868\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u3002

                            3. \u5728\u5b58\u50a8\u7c7b\u578b\u5904\u9009\u62e9 \u5bc6\u94a5 \uff0c\u5e76\u4f9d\u6b21\u8f93\u5165 \u5bb9\u5668\u8def\u5f84 \u3001 \u5b50\u8def\u5f84 \u7b49\u4fe1\u606f\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html#_7","title":"\u547d\u4ee4\u884c\u64cd\u4f5c","text":"

                            \u4e0b\u9762\u662f\u4e00\u4e2a\u901a\u8fc7\u6570\u636e\u5377\u6765\u6302\u8f7d\u540d\u4e3a mysecret \u7684 Secret \u7684 Pod \u793a\u4f8b\uff1a

                            apiVersion: v1\nkind: Pod\nmetadata:\n  name: mypod\nspec:\n  containers:\n  - name: mypod\n    image: redis\n    volumeMounts:\n    - name: foo\n      mountPath: \"/etc/foo\"\n      readOnly: true\n  volumes:\n  - name: foo\n    secret:\n      secretName: mysecret\n      optional: false # (1)!\n
                            1. \u9ed8\u8ba4\u8bbe\u7f6e\uff0c\u610f\u5473\u7740 \"mysecret\" \u5fc5\u987b\u5df2\u7ecf\u5b58\u5728

                            \u5982\u679c Pod \u4e2d\u5305\u542b\u591a\u4e2a\u5bb9\u5668\uff0c\u5219\u6bcf\u4e2a\u5bb9\u5668\u9700\u8981\u81ea\u5df1\u7684 volumeMounts \u5757\uff0c\u4e0d\u8fc7\u9488\u5bf9\u6bcf\u4e2a Secret \u800c\u8a00\uff0c\u53ea\u9700\u8981\u4e00\u4efd .spec.volumes \u8bbe\u7f6e\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html#kubelet","title":"\u5728 kubelet \u62c9\u53d6\u5bb9\u5668\u955c\u50cf\u65f6\u7528\u4f5c\u955c\u50cf\u4ed3\u5e93\u7684\u8eab\u4efd\u8ba4\u8bc1\u51ed\u8bc1","text":"

                            \u60a8\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u5316\u754c\u9762\u6216\u8005\u7ec8\u7aef\u547d\u4ee4\u884c\u6765\u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a\u955c\u50cf\u4ed3\u5e93\u8eab\u4efd\u8ba4\u8bc1\u51ed\u8bc1\u3002

                            "},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html#_8","title":"\u56fe\u5f62\u5316\u64cd\u4f5c","text":"

                            \u5728\u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u5728 \u6570\u636e\u5b58\u50a8 \u754c\u9762\u9009\u62e9\u5b58\u50a8\u7c7b\u578b\u4e3a \u5bc6\u94a5 \uff0c\u5c06\u5bc6\u94a5\u4f5c\u4e3a\u5bb9\u5668\u7684\u6570\u636e\u5377\u3002

                            1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u3002

                            2. \u5728\u7b2c\u4e8c\u6b65 \u5bb9\u5668\u914d\u7f6e \u65f6\u9009\u62e9 \u57fa\u672c\u4fe1\u606f \u914d\u7f6e\uff0c\u70b9\u51fb \u9009\u62e9\u955c\u50cf \u6309\u94ae\u3002

                            3. \u5728\u5f39\u6846\u7684 \u955c\u50cf\u4ed3\u5e93 \u4e0b\u62c9\u9009\u62e9\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u540d\u79f0\u3002\u5173\u4e8e\u79c1\u6709\u955c\u50cf\u5bc6\u94a5\u521b\u5efa\u8bf7\u67e5\u770b\u521b\u5efa\u5bc6\u94a5\u4e86\u89e3\u8be6\u60c5\u3002

                            4. \u8f93\u5165\u79c1\u6709\u4ed3\u5e93\u5185\u7684\u955c\u50cf\u540d\u79f0\uff0c\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u955c\u50cf\u9009\u62e9\u3002

                            Note

                            \u521b\u5efa\u5bc6\u94a5\u65f6\uff0c\u9700\u8981\u786e\u4fdd\u8f93\u5165\u6b63\u786e\u7684\u955c\u50cf\u4ed3\u5e93\u5730\u5740\u3001\u7528\u6237\u540d\u79f0\u3001\u5bc6\u7801\u5e76\u9009\u62e9\u6b63\u786e\u7684\u955c\u50cf\u540d\u79f0\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u83b7\u53d6\u955c\u50cf\u4ed3\u5e93\u4e2d\u7684\u955c\u50cf\u3002

                            "},{"location":"end-user/kpanda/custom-resources/create.html","title":"\u521b\u5efa\u81ea\u5b9a\u4e49\u8d44\u6e90 (CRD)","text":"

                            \u5728 Kubernetes \u4e2d\u4e00\u5207\u5bf9\u8c61\u90fd\u88ab\u62bd\u8c61\u4e3a\u8d44\u6e90\uff0c\u5982 Pod\u3001Deployment\u3001Service\u3001Volume \u7b49\u662f Kubernetes \u63d0\u4f9b\u7684\u9ed8\u8ba4\u8d44\u6e90\uff0c \u8fd9\u4e3a\u6211\u4eec\u7684\u65e5\u5e38\u8fd0\u7ef4\u548c\u7ba1\u7406\u5de5\u4f5c\u63d0\u4f9b\u4e86\u91cd\u8981\u652f\u6491\uff0c\u4f46\u662f\u5728\u4e00\u4e9b\u7279\u6b8a\u7684\u573a\u666f\u4e2d\uff0c\u73b0\u6709\u7684\u9884\u7f6e\u8d44\u6e90\u5e76\u4e0d\u80fd\u6ee1\u8db3\u4e1a\u52a1\u7684\u9700\u8981\uff0c \u56e0\u6b64\u6211\u4eec\u5e0c\u671b\u53bb\u6269\u5c55 Kubernetes API \u7684\u80fd\u529b\uff0c\u81ea\u5b9a\u4e49\u8d44\u6e90\uff08CustomResourceDefinition, CRD\uff09\u6b63\u662f\u57fa\u4e8e\u8fd9\u6837\u7684\u9700\u6c42\u5e94\u8fd0\u800c\u751f\u3002

                            \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u5bf9\u81ea\u5b9a\u4e49\u8d44\u6e90\u7684\u754c\u9762\u5316\u7ba1\u7406\uff0c\u4e3b\u8981\u529f\u80fd\u5982\u4e0b\uff1a

                            • \u83b7\u53d6\u96c6\u7fa4\u4e0b\u81ea\u5b9a\u4e49\u8d44\u6e90\u5217\u8868\u548c\u8be6\u7ec6\u4fe1\u606f
                            • \u57fa\u4e8e YAML \u521b\u5efa\u81ea\u5b9a\u8d44\u6e90
                            • \u57fa\u4e8e YAML \u521b\u5efa\u81ea\u5b9a\u4e49\u8d44\u6e90\u793a\u4f8b CR\uff08Custom Resource\uff09
                            • \u5220\u9664\u81ea\u5b9a\u4e49\u8d44\u6e90
                            "},{"location":"end-user/kpanda/custom-resources/create.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                            • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762

                            • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a Cluster Admin \u89d2\u8272 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u6388\u6743

                            "},{"location":"end-user/kpanda/custom-resources/create.html#yaml","title":"\u901a\u8fc7 YAML \u521b\u5efa\u81ea\u5b9a\u4e49\u8d44\u6e90","text":"
                            1. \u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                            2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 YAML \u521b\u5efa \u6309\u94ae\u3002

                            3. \u5728 YAML \u521b\u5efa \u9875\u9762\u4e2d\uff0c\u586b\u5199 YAML \u8bed\u53e5\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                            4. \u8fd4\u56de\u81ea\u5b9a\u4e49\u8d44\u6e90\u5217\u8868\u9875\uff0c\u5373\u53ef\u67e5\u770b\u521a\u521a\u521b\u5efa\u7684\u540d\u4e3a crontabs.stable.example.com \u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\u3002

                            \u81ea\u5b9a\u4e49\u8d44\u6e90\u793a\u4f8b\uff1a

                            CRD example
                            apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  name: crontabs.stable.example.com\nspec:\n  group: stable.example.com\n  versions:\n    - name: v1\n      served: true\n      storage: true\n      schema:\n        openAPIV3Schema:\n          type: object\n          properties:\n            spec:\n              type: object\n              properties:\n                cronSpec:\n                  type: string\n                image:\n                  type: string\n                replicas:\n                  type: integer\n  scope: Namespaced\n  names:\n    plural: crontabs\n    singular: crontab\n    kind: CronTab\n    shortNames:\n    - ct\n
                            "},{"location":"end-user/kpanda/custom-resources/create.html#yaml_1","title":"\u901a\u8fc7 YAML \u521b\u5efa\u81ea\u5b9a\u4e49\u8d44\u6e90\u793a\u4f8b","text":"
                            1. \u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                            2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u8fdb\u5165\u81ea\u5b9a\u4e49\u8d44\u6e90\u5217\u8868\u9875\u9762\u3002

                            3. \u70b9\u51fb\u540d\u4e3a crontabs.stable.example.com \u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\uff0c\u8fdb\u5165\u8be6\u60c5\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 YAML \u521b\u5efa \u6309\u94ae\u3002

                            4. \u5728 YAML \u521b\u5efa \u9875\u9762\u4e2d\uff0c\u586b\u5199 YAML \u8bed\u53e5\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                            5. \u8fd4\u56de crontabs.stable.example.com \u7684\u8be6\u60c5\u9875\u9762\uff0c\u5373\u53ef\u67e5\u770b\u521a\u521a\u521b\u5efa\u7684\u540d\u4e3a my-new-cron-object \u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\u3002

                            CR \u793a\u4f8b\uff1a

                            CR example
                            apiVersion: \"stable.example.com/v1\"\nkind: CronTab\nmetadata:\n  name: my-new-cron-object\nspec:\n  cronSpec: \"* * * * */5\"\n  image: my-awesome-cron-image\n
                            "},{"location":"end-user/kpanda/gpu/index.html","title":"GPU \u7ba1\u7406\u6982\u8ff0","text":"

                            \u672c\u6587\u4ecb\u7ecd \u7b97\u4e30 AI \u7b97\u529b\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\u5bf9 GPU\u4e3a\u4ee3\u8868\u7684\u5f02\u6784\u8d44\u6e90\u7edf\u4e00\u8fd0\u7ef4\u7ba1\u7406\u80fd\u529b\u3002

                            "},{"location":"end-user/kpanda/gpu/index.html#_1","title":"\u80cc\u666f","text":"

                            \u968f\u7740 AI \u5e94\u7528\u3001\u5927\u6a21\u578b\u3001\u4eba\u5de5\u667a\u80fd\u3001\u81ea\u52a8\u9a7e\u9a76\u7b49\u65b0\u5174\u6280\u672f\u7684\u5feb\u901f\u53d1\u5c55\uff0c\u4f01\u4e1a\u9762\u4e34\u7740\u8d8a\u6765\u8d8a\u591a\u7684\u8ba1\u7b97\u5bc6\u96c6\u578b\u4efb\u52a1\u548c\u6570\u636e\u5904\u7406\u9700\u6c42\u3002 \u4ee5 CPU \u4e3a\u4ee3\u8868\u7684\u4f20\u7edf\u8ba1\u7b97\u67b6\u6784\u5df2\u65e0\u6cd5\u6ee1\u8db3\u4f01\u4e1a\u65e5\u76ca\u589e\u957f\u7684\u8ba1\u7b97\u9700\u6c42\u3002\u6b64\u65f6\uff0c\u4ee5 GPU \u4e3a\u4ee3\u8868\u7684\u5f02\u6784\u8ba1\u7b97\u56e0\u5728\u5904\u7406\u5927\u89c4\u6a21\u6570\u636e\u3001\u8fdb\u884c\u590d\u6742\u8ba1\u7b97\u548c\u5b9e\u65f6\u56fe\u5f62\u6e32\u67d3\u65b9\u9762\u5177\u6709\u72ec\u7279\u7684\u4f18\u52bf\u88ab\u5e7f\u6cdb\u5e94\u7528\u3002

                            \u4e0e\u6b64\u540c\u65f6\uff0c\u7531\u4e8e\u7f3a\u4e4f\u5f02\u6784\u8d44\u6e90\u8c03\u5ea6\u7ba1\u7406\u7b49\u65b9\u9762\u7684\u7ecf\u9a8c\u548c\u4e13\u4e1a\u7684\u89e3\u51b3\u65b9\u6848\uff0c\u5bfc\u81f4\u4e86 GPU \u8bbe\u5907\u7684\u8d44\u6e90\u5229\u7528\u7387\u6781\u4f4e\uff0c\u7ed9\u4f01\u4e1a\u5e26\u6765\u4e86\u9ad8\u6602\u7684 AI \u751f\u4ea7\u6210\u672c\u3002 \u5982\u4f55\u964d\u672c\u589e\u6548\uff0c\u63d0\u9ad8 GPU \u7b49\u5f02\u6784\u8d44\u6e90\u7684\u5229\u7528\u6548\u7387\uff0c\u6210\u4e3a\u4e86\u5f53\u524d\u4f17\u591a\u4f01\u4e1a\u4e9f\u9700\u8de8\u8d8a\u7684\u4e00\u9053\u96be\u9898\u3002

                            "},{"location":"end-user/kpanda/gpu/index.html#gpu_1","title":"GPU \u80fd\u529b\u4ecb\u7ecd","text":"

                            \u7b97\u4e30 AI \u7b97\u529b\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\u652f\u6301\u5bf9 GPU\u3001NPU \u7b49\u5f02\u6784\u8d44\u6e90\u8fdb\u884c\u7edf\u4e00\u8c03\u5ea6\u548c\u8fd0\u7ef4\u7ba1\u7406\uff0c\u5145\u5206\u91ca\u653e GPU \u8d44\u6e90\u7b97\u529b\uff0c\u52a0\u901f\u4f01\u4e1a AI \u7b49\u65b0\u5174\u5e94\u7528\u53d1\u5c55\u3002GPU \u7ba1\u7406\u80fd\u529b\u5982\u4e0b\uff1a

                            • \u652f\u6301\u7edf\u4e00\u7eb3\u7ba1 NVIDIA\u3001\u534e\u4e3a\u6607\u817e\u3001\u5929\u6570\u7b49\u56fd\u5185\u5916\u5382\u5546\u7684\u5f02\u6784\u8ba1\u7b97\u8d44\u6e90\u3002
                            • \u652f\u6301\u540c\u4e00\u96c6\u7fa4\u591a\u5361\u5f02\u6784\u8c03\u5ea6\uff0c\u5e76\u652f\u6301\u96c6\u7fa4 GPU \u5361\u81ea\u52a8\u8bc6\u522b\u3002
                            • \u652f\u6301 NVIDIA GPU\u3001vGPU\u3001MIG \u7b49 GPU \u539f\u751f\u7ba1\u7406\u65b9\u6848\uff0c\u5e76\u63d0\u4f9b\u4e91\u539f\u751f\u80fd\u529b\u3002
                            • \u652f\u6301\u5355\u5757\u7269\u7406\u5361\u5207\u5206\u7ed9\u4e0d\u540c\u7684\u79df\u6237\u4f7f\u7528\uff0c\u5e76\u652f\u6301\u5bf9\u79df\u6237\u548c\u5bb9\u5668\u4f7f\u7528 GPU \u8d44\u6e90\u6309\u7167\u7b97\u529b\u3001\u663e\u5b58\u8fdb\u884c GPU \u8d44\u6e90\u914d\u989d\u3002
                            • \u652f\u6301\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5e94\u7528\u7b49\u591a\u7ef4\u5ea6 GPU \u8d44\u6e90\u76d1\u63a7\uff0c\u5e2e\u52a9\u8fd0\u7ef4\u4eba\u5458\u7ba1\u7406 GPU \u8d44\u6e90\u3002
                            • \u517c\u5bb9 TensorFlow\u3001pytorch \u7b49\u591a\u79cd\u8bad\u7ec3\u6846\u67b6\u3002
                            "},{"location":"end-user/kpanda/gpu/index.html#gpu-operator","title":"GPU Operator \u4ecb\u7ecd","text":"

                            \u540c\u666e\u901a\u8ba1\u7b97\u673a\u786c\u4ef6\u4e00\u6837\uff0cNVIDIA GPU \u5361\u4f5c\u4e3a\u7269\u7406\u786c\u4ef6\uff0c\u5fc5\u987b\u5b89\u88c5 NVIDIA GPU \u9a71\u52a8\u540e\u624d\u80fd\u4f7f\u7528\u3002 \u4e3a\u4e86\u964d\u4f4e\u7528\u6237\u5728 kuberneets \u4e0a\u4f7f\u7528 GPU \u7684\u6210\u672c\uff0cNVIDIA \u5b98\u65b9\u63d0\u4f9b\u4e86 NVIDIA GPU Operator \u7ec4\u4ef6\u6765\u7ba1\u7406\u4f7f\u7528 NVIDIA GPU \u6240\u4f9d\u8d56\u7684\u5404\u79cd\u7ec4\u4ef6\u3002 \u8fd9\u4e9b\u7ec4\u4ef6\u5305\u62ec NVIDIA \u9a71\u52a8\u7a0b\u5e8f\uff08\u7528\u4e8e\u542f\u7528 CUDA\uff09\u3001NVIDIA \u5bb9\u5668\u8fd0\u884c\u65f6\u3001GPU \u8282\u70b9\u6807\u8bb0\u3001\u57fa\u4e8e DCGM \u7684\u76d1\u63a7\u7b49\u3002 \u7406\u8bba\u4e0a\u6765\u8bf4\u7528\u6237\u53ea\u9700\u8981\u5c06 GPU \u5361\u63d2\u5728\u5df2\u7ecf\u88ab kubernetes \u6240\u7eb3\u7ba1\u7684\u8ba1\u7b97\u8bbe\u5907\u4e0a\uff0c\u7136\u540e\u901a\u8fc7 GPU Operator \u5c31\u80fd\u4f7f\u7528 NVIDIA GPU \u7684\u6240\u6709\u80fd\u529b\u4e86\u3002 \u4e86\u89e3\u66f4\u591a NVIDIA GPU Operator \u76f8\u5173\u4fe1\u606f\uff0c\u8bf7\u53c2\u8003 NVIDIA \u5b98\u65b9\u6587\u6863\u3002 \u5982\u4f55\u90e8\u7f72\u8bf7\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5

                            NVIDIA GPU Operator \u67b6\u6784\u56fe\uff1a

                            "},{"location":"end-user/kpanda/gpu/FAQ.html","title":"GPU \u76f8\u5173 FAQ","text":""},{"location":"end-user/kpanda/gpu/FAQ.html#pod-nvidia-smi-gpu","title":"Pod \u5185 nvidia-smi \u770b\u4e0d\u5230 GPU \u8fdb\u7a0b","text":"

                            Q: \u5728\u4f7f\u7528 GPU \u7684 Pod \u5185\u6267\u884c nvidia-smi \u547d\u4ee4\u770b\u4e0d\u5230\u4f7f\u7528 GPU \u7684\u8fdb\u7a0b\u4fe1\u606f\uff0c\u5305\u62ec\u6574\u5361\u6a21\u5f0f\u3001vGPU \u6a21\u5f0f\u7b49\u3002

                            A: \u56e0\u4e3a\u6709 PID namespace \u9694\u79bb\uff0c\u5bfc\u81f4\u5728 Pod \u5185\u67e5\u770b\u4e0d\u5230 GPU \u8fdb\u7a0b\uff0c\u5982\u679c\u8981\u67e5\u770b GPU \u8fdb\u7a0b\u6709\u5982\u4e0b\u51e0\u79cd\u65b9\u6cd5\uff1a

                            • \u5728\u4f7f\u7528 GPU \u7684\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e hostPID: true\uff0c\u4f7f\u5176\u53ef\u4ee5\u67e5\u770b\u5230\u5bbf\u4e3b\u673a\u4e0a\u7684 PID
                            • \u5728 gpu-operator \u7684 driver Pod \u4e2d\u6267\u884c nvidia-smi \u547d\u4ee4\u67e5\u770b\u8fdb\u7a0b
                            • \u5728\u5bbf\u4e3b\u673a\u4e0a\u6267\u884c chroot /run/nvidia/driver nvidia-smi \u547d\u4ee4\u67e5\u770b\u8fdb\u7a0b
                            "},{"location":"end-user/kpanda/gpu/Iluvatar_usage.html","title":"App \u4f7f\u7528\u5929\u6570\u667a\u82af\uff08Iluvatar\uff09GPU","text":"

                            \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528\u5929\u6570\u667a\u82af\u865a\u62df GPU\u3002

                            "},{"location":"end-user/kpanda/gpu/Iluvatar_usage.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                            • \u5df2\u7ecf\u90e8\u7f72 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u4e14\u5e73\u53f0\u8fd0\u884c\u6b63\u5e38\u3002
                            • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                            • \u5f53\u524d\u96c6\u7fa4\u5df2\u5b89\u88c5\u5929\u6570\u667a\u82af GPU \u9a71\u52a8\uff0c\u9a71\u52a8\u5b89\u88c5\u8bf7\u53c2\u8003\u5929\u6570\u667a\u82af\u5b98\u65b9\u6587\u6863\u3002
                            • \u5f53\u524d\u96c6\u7fa4\u5185 GPU \u5361\u672a\u8fdb\u884c\u4efb\u4f55\u865a\u62df\u5316\u64cd\u4f5c\u4e14\u672a\u88ab\u5176\u5b83 App \u5360\u7528\u3002
                            "},{"location":"end-user/kpanda/gpu/Iluvatar_usage.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"end-user/kpanda/gpu/Iluvatar_usage.html#_3","title":"\u4f7f\u7528\u754c\u9762\u914d\u7f6e","text":"
                            1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u68c0\u6d4b GPU \u5361\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002 \u76ee\u524d\u96c6\u7fa4\u4f1a\u81ea\u52a8\u542f\u7528 GPU \uff0c\u5e76\u4e14\u8bbe\u7f6e GPU \u7c7b\u578b\u4e3a Iluvatar \u3002

                            2. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08Iluvatar\uff09\u4e4b\u540e\uff0c\u9700\u8981\u914d\u7f6e App \u4f7f\u7528\u7684 GPU \u8d44\u6e90\uff1a

                              • \u7269\u7406\u5361\u6570\u91cf\uff08iluvatar.ai/vcuda-core\uff09\uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u8f93\u5165\u503c\u5fc5\u987b\u4e3a\u6574\u6570\u4e14 \u5c0f\u4e8e\u7b49\u4e8e \u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002
                              • \u663e\u5b58\u4f7f\u7528\u6570\u91cf\uff08iluvatar.ai/vcuda-memory\uff09\uff1a\u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u663e\u5b58\uff0c\u503c\u5355\u4f4d\u4e3a MB\uff0c\u6700\u5c0f\u503c\u4e3a 1\uff0c\u6700\u5927\u503c\u4e3a\u6574\u5361\u7684\u663e\u5b58\u503c\u3002

                              \u5982\u679c\u4e0a\u8ff0\u503c\u914d\u7f6e\u7684\u6709\u95ee\u9898\u5219\u4f1a\u51fa\u73b0\u8c03\u5ea6\u5931\u8d25\uff0c\u8d44\u6e90\u5206\u914d\u4e0d\u4e86\u7684\u60c5\u51b5\u3002

                            "},{"location":"end-user/kpanda/gpu/Iluvatar_usage.html#yaml","title":"\u4f7f\u7528 YAML \u914d\u7f6e","text":"

                            \u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u7533\u8bf7 GPU \u8d44\u6e90\uff0c\u5728\u8d44\u6e90\u7533\u8bf7\u548c\u9650\u5236\u914d\u7f6e\u4e2d\u589e\u52a0iluvatar.ai/vcuda-core: 1\u3001iluvatar.ai/vcuda-memory: 200 \u53c2\u6570\uff0c\u914d\u7f6e App \u4f7f\u7528\u7269\u7406\u5361\u7684\u8d44\u6e90\u3002

                            apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-iluvatar-gpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-iluvatar-gpu-demo\n  template:\n    metadata:\n      labels:\n        app: full-iluvatar-gpu-demo\n    spec:\n      containers:\n      - image: nginx:perl\n        name: container-0\n        resources:\n          limits:\n            cpu: 250m\n            iluvatar.ai/vcuda-core: '1'\n            iluvatar.ai/vcuda-memory: '200'\n            memory: 512Mi\n          requests:\n            cpu: 250m\n            memory: 512Mi\n      imagePullSecrets:\n      - name: default-secret\n
                            "},{"location":"end-user/kpanda/gpu/dynamic-regulation.html","title":"GPU \u8d44\u6e90\u52a8\u6001\u8c03\u8282","text":"

                            \u63d0\u4f9b GPU \u8d44\u6e90\u52a8\u6001\u8c03\u6574\u529f\u80fd\uff0c\u5141\u8bb8\u60a8\u5728\u65e0\u9700\u91cd\u65b0\u52a0\u8f7d\u3001\u91cd\u7f6e\u6216\u91cd\u542f\u6574\u4e2a\u8fd0\u884c\u73af\u5883\u7684\u60c5\u51b5\u4e0b\uff0c\u5bf9\u5df2\u7ecf\u5206\u914d\u7684 vGPU \u8d44\u6e90\u8fdb\u884c\u5b9e\u65f6\u3001\u52a8\u6001\u7684\u8c03\u6574\u3002 \u8fd9\u4e00\u529f\u80fd\u65e8\u5728\u6700\u5927\u7a0b\u5ea6\u5730\u51cf\u5c11\u5bf9\u4e1a\u52a1\u8fd0\u884c\u7684\u5f71\u54cd\uff0c\u786e\u4fdd\u60a8\u7684\u4e1a\u52a1\u80fd\u591f\u6301\u7eed\u7a33\u5b9a\u5730\u8fd0\u884c\uff0c\u540c\u65f6\u6839\u636e\u5b9e\u9645\u9700\u6c42\u7075\u6d3b\u8c03\u6574 GPU \u8d44\u6e90\u3002

                            "},{"location":"end-user/kpanda/gpu/dynamic-regulation.html#_1","title":"\u4f7f\u7528\u573a\u666f","text":"
                            • \u5f39\u6027\u8d44\u6e90\u5206\u914d \uff1a\u5f53\u4e1a\u52a1\u9700\u6c42\u6216\u5de5\u4f5c\u8d1f\u8f7d\u53d1\u751f\u53d8\u5316\u65f6\uff0c\u53ef\u4ee5\u5feb\u901f\u8c03\u6574 GPU \u8d44\u6e90\u4ee5\u6ee1\u8db3\u65b0\u7684\u6027\u80fd\u8981\u6c42\u3002
                            • \u5373\u65f6\u54cd\u5e94 \uff1a\u5728\u9762\u5bf9\u7a81\u53d1\u7684\u9ad8\u8d1f\u8f7d\u6216\u4e1a\u52a1\u9700\u6c42\u65f6\uff0c\u53ef\u4ee5\u8fc5\u901f\u589e\u52a0 GPU \u8d44\u6e90\u800c\u65e0\u9700\u4e2d\u65ad\u4e1a\u52a1\u8fd0\u884c\uff0c\u4ee5\u786e\u4fdd\u670d\u52a1\u7684\u7a33\u5b9a\u6027\u548c\u6027\u80fd\u3002
                            "},{"location":"end-user/kpanda/gpu/dynamic-regulation.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                            \u4ee5\u4e0b\u662f\u4e00\u4e2a\u5177\u4f53\u7684\u64cd\u4f5c\u793a\u4f8b\uff0c\u5c55\u793a\u5982\u4f55\u5728\u4e0d\u91cd\u542f vGPU Pod \u7684\u60c5\u51b5\u4e0b\u52a8\u6001\u8c03\u6574 vGPU \u7684\u7b97\u529b\u548c\u663e\u5b58\u8d44\u6e90\uff1a

                            "},{"location":"end-user/kpanda/gpu/dynamic-regulation.html#vgpu-pod","title":"\u521b\u5efa\u4e00\u4e2a vGPU Pod","text":"

                            \u9996\u5148\uff0c\u6211\u4eec\u4f7f\u7528\u4ee5\u4e0b YAML \u521b\u5efa\u4e00\u4e2a vGPU Pod\uff0c\u5176\u7b97\u529b\u521d\u59cb\u4e0d\u9650\u5236\uff0c\u663e\u5b58\u9650\u5236\u4e3a 200Mb\u3002

                            kind: Deployment\napiVersion: apps/v1\nmetadata:\n  name: gpu-burn-test\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: gpu-burn-test\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: gpu-burn-test\n    spec:\n      containers:\n        - name: container-1\n          image: docker.io/chrstnhntschl/gpu_burn:latest\n          command:\n            - sleep\n            - '100000'\n          resources:\n            limits:\n              cpu: 1m\n              memory: 1Gi\n              nvidia.com/gpucores: '0'\n              nvidia.com/gpumem: '200'\n              nvidia.com/vgpu: '1'\n

                            \u8c03\u6574\u524d\u67e5\u770b Pod \u4e2d\u7684\u8d44\u6e90 GPU \u5206\u914d\u8d44\u6e90\uff1a

                            "},{"location":"end-user/kpanda/gpu/dynamic-regulation.html#_3","title":"\u52a8\u6001\u8c03\u6574\u7b97\u529b","text":"

                            \u5982\u679c\u9700\u8981\u4fee\u6539\u7b97\u529b\u4e3a 10%\uff0c\u53ef\u4ee5\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u64cd\u4f5c\uff1a

                            1. \u8fdb\u5165\u5bb9\u5668\uff1a

                              kubectl exec -it <pod-name> -- /bin/bash\n
                            2. \u6267\u884c\uff1a

                              export CUDA_DEVICE_SM_LIMIT=10\n
                            3. \u5728\u5f53\u524d\u7ec8\u7aef\u76f4\u63a5\u8fd0\u884c\uff1a

                              ./gpu_burn 60\n

                              \u7a0b\u5e8f\u5373\u53ef\u751f\u6548\u3002\u6ce8\u610f\uff0c\u4e0d\u80fd\u9000\u51fa\u5f53\u524d Bash \u7ec8\u7aef\u3002

                            "},{"location":"end-user/kpanda/gpu/dynamic-regulation.html#_4","title":"\u52a8\u6001\u8c03\u6574\u663e\u5b58","text":"

                            \u5982\u679c\u9700\u8981\u4fee\u6539\u663e\u5b58\u4e3a 300 MB\uff0c\u53ef\u4ee5\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u64cd\u4f5c\uff1a

                            1. \u8fdb\u5165\u5bb9\u5668\uff1a

                              kubectl exec -it <pod-name> -- /bin/bash\n
                            2. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u6765\u8bbe\u7f6e\u663e\u5b58\u9650\u5236\uff1a

                              export CUDA_DEVICE_MEMORY_LIMIT_0=300m\nexport CUDA_DEVICE_MEMORY_SHARED_CACHE=/usr/local/vgpu/d.cache\n

                              Note

                              \u6bcf\u6b21\u4fee\u6539\u663e\u5b58\u5927\u5c0f\u65f6\uff0cd.cache \u8fd9\u4e2a\u6587\u4ef6\u540d\u5b57\u90fd\u9700\u8981\u4fee\u6539\uff0c\u6bd4\u5982\u6539\u4e3a a.cache\u30011.cache \u7b49\uff0c\u4ee5\u907f\u514d\u7f13\u5b58\u51b2\u7a81\u3002

                            3. \u5728\u5f53\u524d\u7ec8\u7aef\u76f4\u63a5\u8fd0\u884c\uff1a

                              ./gpu_burn 60\n

                              \u7a0b\u5e8f\u5373\u53ef\u751f\u6548\u3002\u540c\u6837\u5730\uff0c\u4e0d\u80fd\u9000\u51fa\u5f53\u524d Bash \u7ec8\u7aef\u3002

                            \u8c03\u6574\u540e\u67e5\u770b Pod \u4e2d\u7684\u8d44\u6e90 GPU \u5206\u914d\u8d44\u6e90\uff1a

                            \u901a\u8fc7\u4e0a\u8ff0\u6b65\u9aa4\uff0c\u60a8\u53ef\u4ee5\u5728\u4e0d\u91cd\u542f vGPU Pod \u7684\u60c5\u51b5\u4e0b\u52a8\u6001\u5730\u8c03\u6574\u5176\u7b97\u529b\u548c\u663e\u5b58\u8d44\u6e90\uff0c\u4ece\u800c\u66f4\u7075\u6d3b\u5730\u6ee1\u8db3\u4e1a\u52a1\u9700\u6c42\u5e76\u4f18\u5316\u8d44\u6e90\u5229\u7528\u3002

                            "},{"location":"end-user/kpanda/gpu/gpu_matrix.html","title":"GPU \u652f\u6301\u77e9\u9635","text":"

                            \u672c\u9875\u8bf4\u660e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u7684 GPU \u53ca\u64cd\u4f5c\u7cfb\u7edf\u6240\u5bf9\u5e94\u7684\u77e9\u9635\u3002

                            "},{"location":"end-user/kpanda/gpu/gpu_matrix.html#nvidia-gpu","title":"NVIDIA GPU","text":"GPU \u5382\u5546\u53ca\u7c7b\u578b \u652f\u6301 GPU \u578b\u53f7 \u9002\u914d\u7684\u64cd\u4f5c\u7cfb\u7edf\uff08\u5728\u7ebf\uff09 \u63a8\u8350\u5185\u6838 \u63a8\u8350\u7684\u64cd\u4f5c\u7cfb\u7edf\u53ca\u5185\u6838 \u5b89\u88c5\u6587\u6863 NVIDIA GPU\uff08\u6574\u5361/vGPU\uff09 NVIDIA Fermi (2.1) \u67b6\u6784 CentOS 7 Kernel 3.10.0-123 ~ 3.10.0-1160\u5185\u6838\u53c2\u8003\u6587\u6863\u5efa\u8bae\u4f7f\u7528\u64cd\u4f5c\u7cfb\u7edf\u5bf9\u5e94 Kernel \u7248\u672c \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a 3.10.0-1160 GPU Operator \u79bb\u7ebf\u5b89\u88c5 NVIDIA GeForce 400 \u7cfb\u5217 CentOS 8 Kernel 4.18.0-80 ~ 4.18.0-348 NVIDIA Quadro 4000 \u7cfb\u5217 Ubuntu 20.04 Kernel 5.4 NVIDIA Tesla 20 \u7cfb\u5217 Ubuntu 22.04 Kernel 5.19 NVIDIA Ampere \u67b6\u6784\u7cfb\u5217(A100;A800;H100) RHEL 7 Kernel 3.10.0-123 ~ 3.10.0-1160 RHEL 8 Kernel 4.18.0-80 ~ 4.18.0-348 NVIDIA MIG NVIDIA Ampere \u67b6\u6784\u7cfb\u5217\uff08A100\u3001A800\u3001H100\uff09 CentOS 7 Kernel 3.10.0-123 ~ 3.10.0-1160 \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a3.10.0-1160 GPU Operator \u79bb\u7ebf\u5b89\u88c5 CentOS 8 Kernel 4.18.0-80 ~ 4.18.0-348 Ubuntu 20.04 Kernel 5.4 Ubuntu 22.04 Kernel 5.19 RHEL 7 Kernel 3.10.0-123 ~ 3.10.0-1160 RHEL 8 Kernel 4.18.0-80 ~ 4.18.0-348"},{"location":"end-user/kpanda/gpu/gpu_matrix.html#ascendnpu","title":"\u6607\u817e\uff08Ascend\uff09NPU","text":"GPU \u5382\u5546\u53ca\u7c7b\u578b \u652f\u6301 NPU \u578b\u53f7 \u9002\u914d\u7684\u64cd\u4f5c\u7cfb\u7edf\uff08\u5728\u7ebf\uff09 \u63a8\u8350\u5185\u6838 \u63a8\u8350\u7684\u64cd\u4f5c\u7cfb\u7edf\u53ca\u5185\u6838 \u5b89\u88c5\u6587\u6863 \u6607\u817e\uff08Ascend 310\uff09 Ascend 310 Ubuntu 20.04 \u8be6\u60c5\u53c2\u8003\uff1a\u5185\u6838\u7248\u672c\u8981\u6c42 \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a3.10.0-1160 300 \u548c 310P \u9a71\u52a8\u6587\u6863 Ascend 310P\uff1b CentOS 7.6 CentOS 8.2 KylinV10SP1 \u64cd\u4f5c\u7cfb\u7edf openEuler \u64cd\u4f5c\u7cfb\u7edf \u6607\u817e\uff08Ascend 910\uff09 Ascend 910B Ubuntu 20.04 \u8be6\u60c5\u53c2\u8003\u5185\u6838\u7248\u672c\u8981\u6c42 \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a3.10.0-1160 910 \u9a71\u52a8\u6587\u6863 CentOS 7.6 CentOS 8.2 KylinV10SP1 \u64cd\u4f5c\u7cfb\u7edf openEuler \u64cd\u4f5c\u7cfb\u7edf"},{"location":"end-user/kpanda/gpu/gpu_matrix.html#iluvatargpu","title":"\u5929\u6570\u667a\u82af\uff08Iluvatar\uff09GPU","text":"GPU \u5382\u5546\u53ca\u7c7b\u578b \u652f\u6301\u7684 GPU \u578b\u53f7 \u9002\u914d\u7684\u64cd\u4f5c\u7cfb\u7edf\uff08\u5728\u7ebf\uff09 \u63a8\u8350\u5185\u6838 \u63a8\u8350\u7684\u64cd\u4f5c\u7cfb\u7edf\u53ca\u5185\u6838 \u5b89\u88c5\u6587\u6863 \u5929\u6570\u667a\u82af(Iluvatar vGPU) BI100 CentOS 7 Kernel 3.10.0-957.el7.x86_64 ~ 3.10.0-1160.42.2.el7.x86_64 \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a 3.10.0-1160 \u8865\u5145\u4e2d MR100\uff1b CentOS 8 Kernel 4.18.0-80.el8.x86_64 ~ 4.18.0-305.19.1.el8_4.x86_64 Ubuntu 20.04 Kernel 4.15.0-20-generic ~ 4.15.0-160-generic Kernel 5.4.0-26-generic ~ 5.4.0-89-generic Kernel 5.8.0-23-generic ~ 5.8.0-63-generic Ubuntu 21.04 Kernel 4.15.0-20-generic ~ 4.15.0-160-generic Kernel 5.4.0-26-generic ~ 5.4.0-89-generic Kernel 5.8.0-23-generic ~ 5.8.0-63-generic openEuler 22.03 LTS Kernel \u7248\u672c\u5927\u4e8e\u7b49\u4e8e 5.1 \u4e14\u5c0f\u4e8e\u7b49\u4e8e 5.10"},{"location":"end-user/kpanda/gpu/gpu_matrix.html#metaxgpu","title":"\u6c90\u66e6\uff08Metax\uff09GPU","text":"GPU \u5382\u5546\u53ca\u7c7b\u578b \u652f\u6301\u7684 GPU \u578b\u53f7 \u9002\u914d\u7684\u64cd\u4f5c\u7cfb\u7edf\uff08\u5728\u7ebf\uff09 \u63a8\u8350\u5185\u6838 \u63a8\u8350\u7684\u64cd\u4f5c\u7cfb\u7edf\u53ca\u5185\u6838 \u5b89\u88c5\u6587\u6863 \u6c90\u66e6Metax\uff08\u6574\u5361/vGPU\uff09 \u66e6\u4e91 C500 \u6c90\u66e6 GPU \u5b89\u88c5\u4f7f\u7528"},{"location":"end-user/kpanda/gpu/gpu_scheduler_config.html","title":"GPU \u8c03\u5ea6\u914d\u7f6e\uff08Binpack \u548c Spread \uff09","text":"

                            \u672c\u6587\u4ecb\u7ecd\u4f7f\u7528 NVIDIA vGPU \u65f6\uff0c\u5982\u4f55\u901a\u8fc7 Binpack \u548c Spread \u7684 GPU \u8c03\u5ea6\u914d\u7f6e\u51cf\u5c11 GPU \u8d44\u6e90\u788e\u7247\u3001\u9632\u6b62\u5355\u70b9\u6545\u969c\u7b49\uff0c\u5b9e\u73b0 vGPU \u7684\u9ad8\u7ea7\u8c03\u5ea6\u3002 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u4e86\u96c6\u7fa4\u548c\u5de5\u4f5c\u8d1f\u8f7d\u4e24\u79cd\u7ef4\u5ea6\u7684 Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565\uff0c\u5206\u522b\u6ee1\u8db3\u4e0d\u540c\u573a\u666f\u4e0b\u7684\u4f7f\u7528\u9700\u6c42\u3002

                            "},{"location":"end-user/kpanda/gpu/gpu_scheduler_config.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                            • \u96c6\u7fa4\u8282\u70b9\u4e0a\u5df2\u6b63\u786e\u5b89\u88c5 GPU \u8bbe\u5907\u3002
                            • \u96c6\u7fa4\u4e2d\u5df2\u6b63\u786e\u5b89\u88c5 gpu-operator \u7ec4\u4ef6 \u548c Nvidia-vgpu \u7ec4\u4ef6\u3002
                            • \u96c6\u7fa4\u8282\u70b9\u5217\u8868\u4e2d\uff0cGPU \u6a21\u5f0f\u4e0b\u5b58\u5728 NVIDIA-vGPU \u7c7b\u578b\u3002
                            "},{"location":"end-user/kpanda/gpu/gpu_scheduler_config.html#_2","title":"\u4f7f\u7528\u573a\u666f","text":"
                            • \u57fa\u4e8e GPU \u5361\u7ef4\u5ea6\u8c03\u5ea6\u7b56\u7565

                              • Binpack\uff1a\u4f18\u5148\u9009\u62e9\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\uff0c\u9002\u7528\u4e8e\u63d0\u9ad8 GPU \u5229\u7528\u7387\uff0c\u51cf\u5c11\u8d44\u6e90\u788e\u7247\u3002
                              • Spread\uff1a\u591a\u4e2a Pod \u4f1a\u5206\u6563\u5728\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u9ad8\u53ef\u7528\u573a\u666f\uff0c\u907f\u514d\u5355\u5361\u6545\u969c\u3002
                            • \u57fa\u4e8e\u8282\u70b9\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565

                              • Binpack\uff1a \u591a\u4e2a Pod \u4f1a\u4f18\u5148\u9009\u62e9\u540c\u4e00\u4e2a\u8282\u70b9\uff0c\u9002\u7528\u4e8e\u63d0\u9ad8 GPU \u5229\u7528\u7387\uff0c\u51cf\u5c11\u8d44\u6e90\u788e\u7247\u3002
                              • Spread\uff1a\u591a\u4e2a Pod \u4f1a\u5206\u6563\u5728\u4e0d\u540c\u8282\u70b9\u4e0a\uff0c\u9002\u7528\u4e8e\u9ad8\u53ef\u7528\u573a\u666f\uff0c\u907f\u514d\u5355\u8282\u70b9\u6545\u969c\u3002
                            "},{"location":"end-user/kpanda/gpu/gpu_scheduler_config.html#binpack-spread","title":"\u96c6\u7fa4\u7ef4\u5ea6\u4f7f\u7528 Binpack \u548c Spread \u8c03\u5ea6\u914d\u7f6e","text":"

                            Note

                            \u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u4f1a\u9075\u5faa\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack \u548c Spread \u8c03\u5ea6\u914d\u7f6e\u3002 \u82e5\u5de5\u4f5c\u8d1f\u8f7d\u5355\u72ec\u8bbe\u7f6e\u4e86\u4e0e\u96c6\u7fa4\u4e0d\u4e00\u81f4\u7684 Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565\uff0c\u5219\u8be5\u5de5\u4f5c\u8d1f\u8f7d\u4f18\u5148\u9075\u5faa\u5176\u672c\u8eab\u7684\u8c03\u5ea6\u7b56\u7565\u3002

                            1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9009\u62e9\u9700\u8981\u8c03\u6574 Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u56fe\u6807\u5e76\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u70b9\u51fb GPU \u8c03\u5ea6\u914d\u7f6e \u3002

                            2. \u6839\u636e\u4e1a\u52a1\u573a\u666f\u8c03\u6574 GPU \u8c03\u5ea6\u914d\u7f6e\uff0c\u5e76\u70b9\u51fb \u786e\u5b9a \u540e\u4fdd\u5b58\u3002

                            "},{"location":"end-user/kpanda/gpu/gpu_scheduler_config.html#binpack-spread_1","title":"\u5de5\u4f5c\u8d1f\u8f7d\u7ef4\u5ea6\u4f7f\u7528 Binpack \u548c Spread \u8c03\u5ea6\u914d\u7f6e","text":"

                            Note

                            \u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ef4\u5ea6\u7684 Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684\u914d\u7f6e\u51b2\u7a81\u65f6\uff0c\u4f18\u5148\u9075\u5faa\u5de5\u4f5c\u8d1f\u8f7d\u7ef4\u5ea6\u7684\u914d\u7f6e\u3002

                            \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u65e0\u72b6\u6001\u8d1f\u8f7d\uff0c\u5e76\u5728\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u914d\u7f6e Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565 \u3002

                            1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                            2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                            3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\uff0c\u5e76\u5728 \u5bb9\u5668\u914d\u7f6e \u4e2d\u542f\u7528 GPU \u914d\u7f6e\uff0c\u9009\u62e9 GPU \u7c7b\u578b\u4e3a NVIDIA vGPU\uff0c \u70b9\u51fb \u9ad8\u7ea7\u8bbe\u7f6e \uff0c\u542f\u7528 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\uff0c\u6839\u636e\u4e1a\u52a1\u573a\u666f\u8c03\u6574 GPU \u8c03\u5ea6\u914d\u7f6e\u3002\u914d\u7f6e\u5b8c\u6210\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \uff0c \u8fdb\u5165 \u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                            "},{"location":"end-user/kpanda/gpu/vgpu_quota.html","title":"GPU \u914d\u989d\u7ba1\u7406","text":"

                            \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528 vGPU \u80fd\u529b\u3002

                            "},{"location":"end-user/kpanda/gpu/vgpu_quota.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                            \u5f53\u524d\u96c6\u7fa4\u5df2\u901a\u8fc7 Operator \u6216\u624b\u52a8\u65b9\u5f0f\u90e8\u7f72\u5bf9\u5e94\u7c7b\u578b GPU \u9a71\u52a8\uff08NVIDIA GPU\u3001NVIDIA MIG\u3001\u5929\u6570\u3001\u6607\u817e\uff09

                            "},{"location":"end-user/kpanda/gpu/vgpu_quota.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                            1. \u8fdb\u5165 Namespaces \u4e2d\uff0c\u70b9\u51fb \u914d\u989d\u7ba1\u7406 \u53ef\u4ee5\u914d\u7f6e\u5f53\u524d Namespace \u53ef\u4ee5\u4f7f\u7528\u7684 GPU \u8d44\u6e90\u3002

                            2. \u5f53\u524d\u547d\u540d\u7a7a\u95f4\u914d\u989d\u7ba1\u7406\u8986\u76d6\u7684\u5361\u7c7b\u578b\u4e3a\uff1aNVIDIA vGPU\u3001NVIDIA MIG\u3001\u5929\u6570\u3001\u6607\u817e\u3002

                              NVIDIA vGPU \u914d\u989d\u7ba1\u7406 \uff1a\u914d\u7f6e\u5177\u4f53\u53ef\u4ee5\u4f7f\u7528\u7684\u914d\u989d\uff0c\u4f1a\u521b\u5efa ResourcesQuota CR\uff1a

                              • \u7269\u7406\u5361\u6570\u91cf\uff08nvidia.com/vgpu\uff09\uff1a\u8868\u793a\u5f53\u524d POD \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u5e76\u4e14\u8981 \u5c0f\u4e8e\u7b49\u4e8e \u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002
                              • GPU \u7b97\u529b\uff08nvidia.com/gpucores\uff09\uff1a\u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u7b97\u529b\uff0c\u503c\u8303\u56f4\u4e3a 0-100\uff1b\u5982\u679c\u914d\u7f6e\u4e3a 0\uff0c\u5219\u8ba4\u4e3a\u4e0d\u5f3a\u5236\u9694\u79bb\uff1b\u914d\u7f6e\u4e3a 100\uff0c\u5219\u8ba4\u4e3a\u72ec\u5360\u6574\u5f20\u5361\u3002
                              • GPU \u663e\u5b58\uff08nvidia.com/gpumem\uff09\uff1a\u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u663e\u5b58\uff0c\u503c\u5355\u4f4d\u4e3a MB\uff0c\u6700\u5c0f\u503c\u4e3a 1\uff0c\u6700\u5927\u503c\u4e3a\u6574\u5361\u7684\u663e\u5b58\u503c\u3002

                            "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html","title":"\u6607\u817e NPU \u7ec4\u4ef6\u5b89\u88c5","text":"

                            \u672c\u7ae0\u8282\u63d0\u4f9b\u6607\u817e NPU \u9a71\u52a8\u3001Device Plugin\u3001NPU-Exporter \u7b49\u7ec4\u4ef6\u7684\u5b89\u88c5\u6307\u5bfc\u3002

                            "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                            1. \u5b89\u88c5\u524d\u8bf7\u786e\u8ba4\u652f\u6301\u7684 NPU \u578b\u53f7\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\u6607\u817e NPU \u77e9\u9635
                            2. \u8bf7\u786e\u8ba4 \u5bf9\u5e94 NPU \u578b\u53f7\u6240\u8981\u6c42\u7684\u5185\u6838\u7248\u672c\u662f\u5426\u5339\u914d\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\u6607\u817e NPU \u77e9\u9635
                            3. \u51c6\u5907 Kubernetes \u57fa\u7840\u73af\u5883
                            "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html#_2","title":"\u5b89\u88c5\u6b65\u9aa4","text":"

                            \u4f7f\u7528 NPU \u8d44\u6e90\u4e4b\u524d\uff0c\u9700\u8981\u5b8c\u6210\u56fa\u4ef6\u5b89\u88c5\u3001NPU \u9a71\u52a8\u5b89\u88c5\u3001 Docker Runtime \u5b89\u88c5\u3001\u7528\u6237\u521b\u5efa\u3001\u65e5\u5fd7\u76ee\u5f55\u521b\u5efa\u4ee5\u53ca NPU Device Plugin \u5b89\u88c5\uff0c\u8be6\u60c5\u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u3002

                            "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html#_3","title":"\u5b89\u88c5\u56fa\u4ef6","text":"
                            1. \u5b89\u88c5\u524d\u8bf7\u786e\u8ba4\u5185\u6838\u7248\u672c\u5728\u201c\u4e8c\u8fdb\u5236\u5b89\u88c5\u201d\u5b89\u88c5\u65b9\u5f0f\u5bf9\u5e94\u7684\u7248\u672c\u8303\u56f4\u5185\uff0c\u5219\u53ef\u4ee5\u76f4\u63a5\u5b89\u88c5NPU\u9a71\u52a8\u56fa\u4ef6\u3002
                            2. \u56fa\u4ef6\u4e0e\u9a71\u52a8\u4e0b\u8f7d\u8bf7\u53c2\u8003\u56fa\u4ef6\u4e0b\u8f7d\u5730\u5740
                            3. \u56fa\u4ef6\u5b89\u88c5\u8bf7\u53c2\u8003\u5b89\u88c5 NPU \u9a71\u52a8\u56fa\u4ef6
                            "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html#npu_1","title":"\u5b89\u88c5 NPU \u9a71\u52a8","text":"
                            1. \u5982\u9a71\u52a8\u672a\u5b89\u88c5\uff0c\u8bf7\u53c2\u8003\u6607\u817e\u5b98\u65b9\u6587\u6863\u8fdb\u884c\u5b89\u88c5\u3002\u4f8b\u5982 Ascend910\uff0c\u53c2\u8003 910 \u9a71\u52a8\u5b89\u88c5\u6587\u6863\u3002
                            2. \u8fd0\u884c npu-smi info \u547d\u4ee4\uff0c\u5e76\u4e14\u80fd\u591f\u6b63\u5e38\u8fd4\u56de NPU \u4fe1\u606f\uff0c\u8868\u793a NPU \u9a71\u52a8\u4e0e\u56fa\u4ef6\u5df2\u5c31\u7eea\u3002
                            "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html#docker-runtime","title":"\u5b89\u88c5 Docker Runtime","text":"
                            1. \u4e0b\u8f7d Ascend Docker Runtime

                              \u793e\u533a\u7248\u4e0b\u8f7d\u5730\u5740\uff1ahttps://www.hiascend.com/zh/software/mindx-dl/community

                              wget -c https://mindx.obs.cn-south-1.myhuaweicloud.com/OpenSource/MindX/MindX%205.0.RC2/MindX%20DL%205.0.RC2/Ascend-docker-runtime_5.0.RC2_linux-x86_64.run\n

                              \u5b89\u88c5\u5230\u6307\u5b9a\u8def\u5f84\u4e0b\uff0c\u4f9d\u6b21\u6267\u884c\u4ee5\u4e0b\u4e24\u6761\u547d\u4ee4\uff0c\u53c2\u6570\u4e3a\u6307\u5b9a\u7684\u5b89\u88c5\u8def\u5f84:

                              chmod u+x Ascend-docker-runtime_5.0.RC2_linux-x86_64.run \n./Ascend-docker-runtime_{version}_linux-{arch}.run --install --install-path=<path>\n
                            2. \u4fee\u6539 containerd \u914d\u7f6e\u6587\u4ef6

                              containerd \u65e0\u9ed8\u8ba4\u914d\u7f6e\u6587\u4ef6\u65f6\uff0c\u4f9d\u6b21\u6267\u884c\u4ee5\u4e0b3\u6761\u547d\u4ee4\uff0c\u521b\u5efa\u914d\u7f6e\u6587\u4ef6\uff1a

                              mkdir /etc/containerd \ncontainerd config default > /etc/containerd/config.toml \nvim /etc/containerd/config.toml\n

                              containerd \u6709\u914d\u7f6e\u6587\u4ef6\u65f6\uff1a

                              vim /etc/containerd/config.toml\n

                              \u6839\u636e\u5b9e\u9645\u60c5\u51b5\u4fee\u6539 runtime \u7684\u5b89\u88c5\u8def\u5f84\uff0c\u4e3b\u8981\u4fee\u6539 runtime \u5b57\u6bb5\uff1a

                              ... \n[plugins.\"io.containerd.monitor.v1.cgroups\"]\n   no_prometheus = false  \n[plugins.\"io.containerd.runtime.v1.linux\"]\n   shim = \"containerd-shim\"\n   runtime = \"/usr/local/Ascend/Ascend-Docker-Runtime/ascend-docker-runtime\"\n   runtime_root = \"\"\n   no_shim = false\n   shim_debug = false\n [plugins.\"io.containerd.runtime.v2.task\"]\n   platforms = [\"linux/amd64\"]\n...\n

                              \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff0c\u91cd\u542f containerd\uff1a

                              systemctl restart containerd\n
                            "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html#_4","title":"\u7528\u6237\u521b\u5efa","text":"

                            \u5728\u5bf9\u5e94\u7ec4\u4ef6\u5b89\u88c5\u7684\u8282\u70b9\u4e0a\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u521b\u5efa\u7528\u6237\u3002

                            # Ubuntu \u64cd\u4f5c\u7cfb\u7edf\nuseradd -d /home/hwMindX -u 9000 -m -s /usr/sbin/nologin hwMindX\nusermod -a -G HwHiAiUser hwMindX\n# Centos \u64cd\u4f5c\u7cfb\u7edf\nuseradd -d /home/hwMindX -u 9000 -m -s /sbin/nologin hwMindX\nusermod -a -G HwHiAiUser hwMindX\n
                            "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html#_5","title":"\u65e5\u5fd7\u76ee\u5f55\u521b\u5efa","text":"

                            \u5728\u5bf9\u5e94\u8282\u70b9\u521b\u5efa\u7ec4\u4ef6\u65e5\u5fd7\u7236\u76ee\u5f55\u548c\u5404\u7ec4\u4ef6\u7684\u65e5\u5fd7\u76ee\u5f55\uff0c\u5e76\u8bbe\u7f6e\u76ee\u5f55\u5bf9\u5e94\u5c5e\u4e3b\u548c\u6743\u9650\u3002\u6267\u884c\u4e0b\u8ff0\u547d\u4ee4\uff0c\u521b\u5efa\u7ec4\u4ef6\u65e5\u5fd7\u7236\u76ee\u5f55\u3002

                            mkdir -m 755 /var/log/mindx-dl\nchown root:root /var/log/mindx-dl\n

                            \u6267\u884c\u4e0b\u8ff0\u547d\u4ee4\uff0c\u521b\u5efa Device Plugin \u7ec4\u4ef6\u65e5\u5fd7\u76ee\u5f55\u3002

                            mkdir -m 750 /var/log/mindx-dl/devicePlugin\nchown root:root /var/log/mindx-dl/devicePlugin\n

                            Note

                            \u8bf7\u5206\u522b\u4e3a\u6240\u9700\u7ec4\u4ef6\u521b\u5efa\u5bf9\u5e94\u7684\u65e5\u5fd7\u76ee\u5f55\uff0c\u5f53\u524d\u6848\u4f8b\u4e2d\u53ea\u9700\u8981 Device Plugin \u7ec4\u4ef6\u3002 \u5982\u679c\u6709\u5176\u4ed6\u7ec4\u4ef6\u9700\u6c42\u8bf7\u53c2\u8003\u5b98\u65b9\u6587\u6863

                            "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html#label","title":"\u521b\u5efa\u8282\u70b9 Label","text":"

                            \u53c2\u8003\u4e0b\u8ff0\u547d\u4ee4\u5728\u5bf9\u5e94\u8282\u70b9\u4e0a\u521b\u5efa Label\uff1a

                            # \u5728\u5b89\u88c5\u4e86\u9a71\u52a8\u7684\u8ba1\u7b97\u8282\u70b9\u521b\u5efa\u6b64\u6807\u7b7e\nkubectl label node {nodename} huawei.com.ascend/Driver=installed\nkubectl label node {nodename} node-role.kubernetes.io/worker=worker\nkubectl label node {nodename} workerselector=dls-worker-node\nkubectl label node {nodename} host-arch=huawei-arm //\u6216\u8005host-arch=huawei-x86 \uff0c\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u9009\u62e9\nkubectl label node {nodename} accelerator=huawei-Ascend910 //\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u8fdb\u884c\u9009\u62e9\n# \u5728\u63a7\u5236\u8282\u70b9\u521b\u5efa\u6b64\u6807\u7b7e\nkubectl label node {nodename} masterselector=dls-master-node\n
                            "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html#device-plugin-npuexporter","title":"\u5b89\u88c5 Device Plugin \u548c NpuExporter","text":"

                            \u529f\u80fd\u6a21\u5757\u8def\u5f84\uff1a \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u7ba1\u7406 \uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f -> \u641c\u7d22 ascend-mindxdl \u3002

                            • DevicePlugin \uff1a\u901a\u8fc7\u63d0\u4f9b\u901a\u7528\u8bbe\u5907\u63d2\u4ef6\u673a\u5236\u548c\u6807\u51c6\u7684\u8bbe\u5907API\u63a5\u53e3\uff0c\u4f9bKubernetes\u4f7f\u7528\u8bbe\u5907\u3002\u5efa\u8bae\u4f7f\u7528\u9ed8\u8ba4\u7684\u955c\u50cf\u53ca\u7248\u672c\u3002
                            • NpuExporter \uff1a\u57fa\u4e8ePrometheus/Telegraf\u751f\u6001\uff0c\u8be5\u7ec4\u4ef6\u63d0\u4f9b\u63a5\u53e3\uff0c\u5e2e\u52a9\u7528\u6237\u80fd\u591f\u5173\u6ce8\u5230\u6607\u817e\u7cfb\u5217AI\u5904\u7406\u5668\u4ee5\u53ca\u5bb9\u5668\u7ea7\u5206\u914d\u72b6\u6001\u3002\u5efa\u8bae\u4f7f\u7528\u9ed8\u8ba4\u7684\u955c\u50cf\u53ca\u7248\u672c\u3002
                            • ServiceMonitor \uff1a\u9ed8\u8ba4\u4e0d\u5f00\u542f\uff0c\u5f00\u542f\u540e\u53ef\u524d\u5f80\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u67e5\u770b NPU \u76f8\u5173\u76d1\u63a7\u3002\u5982\u9700\u5f00\u542f\uff0c\u8bf7\u786e\u4fdd insight-agent \u5df2\u5b89\u88c5\u5e76\u5904\u4e8e\u8fd0\u884c\u72b6\u6001\uff0c\u5426\u5219\u5c06\u5bfc\u81f4 ascend-mindxdl \u5b89\u88c5\u5931\u8d25\u3002
                            • isVirtualMachine \uff1a\u9ed8\u8ba4\u4e0d\u5f00\u542f\uff0c\u5982\u679c NPU \u8282\u70b9\u4e3a\u4e91\u4e3b\u673a\u573a\u666f\uff0c\u8bf7\u5f00\u542f\u00a0isVirtualMachine \u53c2\u6570\u3002

                            \u5b89\u88c5\u6210\u529f\u540e\uff0c\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u4e0b\u4f1a\u51fa\u73b0\u4e24\u4e2a\u7ec4\u4ef6\uff0c\u5982\u4e0b\u56fe\uff1a

                            \u540c\u65f6\u8282\u70b9\u4fe1\u606f\u4e0a\u4e5f\u4f1a\u51fa\u73b0\u5bf9\u5e94 NPU \u7684\u4fe1\u606f\uff1a

                            \u4e00\u5207\u5c31\u7eea\u540e\uff0c\u6211\u4eec\u901a\u8fc7\u9875\u9762\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u5c31\u80fd\u591f\u9009\u62e9\u5230\u5bf9\u5e94\u7684 NPU \u8bbe\u5907\uff0c\u5982\u4e0b\u56fe\uff1a

                            Note

                            \u6709\u5173\u8be6\u7ec6\u4f7f\u7528\u6b65\u9aa4\uff0c\u8bf7\u53c2\u7167\u5e94\u7528\u4f7f\u7528\u6607\u817e\uff08Ascend\uff09NPU\u3002

                            "},{"location":"end-user/kpanda/gpu/ascend/ascend_usage.html","title":"\u5e94\u7528\u4f7f\u7528\u6607\u817e\uff08Ascend\uff09NPU","text":"

                            \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528\u6607\u817e GPU\u3002

                            "},{"location":"end-user/kpanda/gpu/ascend/ascend_usage.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                            • \u5f53\u524d NPU \u8282\u70b9\u5df2\u5b89\u88c5\u6607\u817e \uff08Ascend\uff09\u9a71\u52a8\u3002
                            • \u5f53\u524d NPU \u8282\u70b9\u5df2\u5b89\u88c5 Ascend-Docker-Runtime \u7ec4\u4ef6\u3002
                            • \u5f53\u524d\u96c6\u7fa4\u5df2\u5b89\u88c5 NPU MindX DL \u5957\u4ef6\u3002
                            • \u5f53\u524d\u96c6\u7fa4\u5185 NPU \u5361\u672a\u8fdb\u884c\u4efb\u4f55\u865a\u62df\u5316\u64cd\u4f5c\u6216\u88ab\u5176\u5b83\u5e94\u7528\u5360\u7528\u3002

                            \u8bf7\u53c2\u8003\u6607\u817e NPU \u7ec4\u4ef6\u5b89\u88c5\u6587\u6863\u5b89\u88c5\u57fa\u7840\u73af\u5883\u3002

                            "},{"location":"end-user/kpanda/gpu/ascend/ascend_usage.html#_2","title":"\u5feb\u901f\u4f7f\u7528","text":"

                            \u672c\u6587\u4f7f\u7528\u6607\u817e\u793a\u4f8b\u5e93\u4e2d\u7684 AscentCL \u56fe\u7247\u5206\u7c7b\u5e94\u7528\u793a\u4f8b\u3002

                            1. \u4e0b\u8f7d\u6607\u817e\u4ee3\u7801\u5e93

                              \u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u4e0b\u8f7d\u6607\u817e Demo \u793a\u4f8b\u4ee3\u7801\u5e93\uff0c\u5e76\u4e14\u8bf7\u8bb0\u4f4f\u4ee3\u7801\u5b58\u653e\u7684\u4f4d\u7f6e\uff0c\u540e\u7eed\u9700\u8981\u4f7f\u7528\u3002

                              git clone https://gitee.com/ascend/samples.git\n
                            2. \u51c6\u5907\u57fa\u7840\u955c\u50cf

                              \u6b64\u4f8b\u4f7f\u7528 Ascent-pytorch \u57fa\u7840\u955c\u50cf\uff0c\u53ef\u8bbf\u95ee\u6607\u817e\u955c\u50cf\u4ed3\u5e93\u83b7\u53d6\u3002

                            3. \u51c6\u5907 YAML

                              ascend-demo.yaml
                              apiVersion: batch/v1\nkind: Job\nmetadata:\n  name: resnetinfer1-1-1usoc\nspec:\n  template:\n    spec:\n      containers:\n        - image: ascendhub.huawei.com/public-ascendhub/ascend-pytorch:23.0.RC2-ubuntu18.04 # Inference image name\n          imagePullPolicy: IfNotPresent\n          name: resnet50infer\n          securityContext:\n            runAsUser: 0\n          command:\n            - \"/bin/bash\"\n            - \"-c\"\n            - |\n              source /usr/local/Ascend/ascend-toolkit/set_env.sh &&\n              TEMP_DIR=/root/samples_copy_$(date '+%Y%m%d_%H%M%S_%N') &&\n              cp -r /root/samples \"$TEMP_DIR\" &&\n              cd \"$TEMP_DIR\"/inference/modelInference/sampleResnetQuickStart/python/model &&\n              wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/resnet50/resnet50.onnx &&\n              atc --model=resnet50.onnx --framework=5 --output=resnet50 --input_shape=\"actual_input_1:1,3,224,224\"  --soc_version=Ascend910 &&\n              cd ../data &&\n              wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg &&\n              cd ../scripts &&\n              bash sample_run.sh\n          resources:\n            requests:\n              huawei.com/Ascend910: 1 # Number of the Ascend 910 Processors\n            limits:\n              huawei.com/Ascend910: 1 # The value should be the same as that of requests\n          volumeMounts:\n            - name: hiai-driver\n              mountPath: /usr/local/Ascend/driver\n              readOnly: true\n            - name: slog\n              mountPath: /var/log/npu/conf/slog/slog.conf\n            - name: localtime # The container time must be the same as the host time\n              mountPath: /etc/localtime\n            - name: dmp\n              mountPath: /var/dmp_daemon\n            - name: slogd\n              mountPath: /var/slogd\n            - name: hbasic\n              mountPath: /etc/hdcBasic.cfg\n            - name: sys-version\n              mountPath: /etc/sys_version.conf\n            - name: aicpu\n              mountPath: /usr/lib64/aicpu_kernels\n            - name: tfso\n              mountPath: /usr/lib64/libtensorflow.so\n            - name: sample-path\n              mountPath: /root/samples\n      volumes:\n        - name: hiai-driver\n          hostPath:\n            path: /usr/local/Ascend/driver\n        - name: slog\n          hostPath:\n            path: /var/log/npu/conf/slog/slog.conf\n        - name: localtime\n          hostPath:\n            path: /etc/localtime\n        - name: dmp\n          hostPath:\n            path: /var/dmp_daemon\n        - name: slogd\n          hostPath:\n            path: /var/slogd\n        - name: hbasic\n          hostPath:\n            path: /etc/hdcBasic.cfg\n        - name: sys-version\n          hostPath:\n            path: /etc/sys_version.conf\n        - name: aicpu\n          hostPath:\n            path: /usr/lib64/aicpu_kernels\n        - name: tfso\n          hostPath:\n            path: /usr/lib64/libtensorflow.so\n        - name: sample-path\n          hostPath:\n            path: /root/samples\n      restartPolicy: OnFailure\n

                              \u4ee5\u4e0a YAML \u4e2d\u6709\u4e00\u4e9b\u5b57\u6bb5\u9700\u8981\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u8fdb\u884c\u4fee\u6539\uff1a

                              1. atc ... --soc_version=Ascend910 \u4f7f\u7528\u7684\u662f Ascend910 \uff0c\u8bf7\u4ee5\u5b9e\u9645\u60c5\u51b5\u4e3a\u4e3b \u60a8\u53ef\u4ee5\u4f7f\u7528 npu-smi info \u547d\u4ee4\u67e5\u770b\u663e\u5361\u578b\u53f7\u7136\u540e\u52a0\u4e0a Ascend \u524d\u7f00\u5373\u53ef
                              2. samples-path \u4ee5\u5b9e\u9645\u60c5\u51b5\u4e3a\u51c6
                              3. resources \u4ee5\u5b9e\u9645\u60c5\u51b5\u4e3a\u51c6
                            4. \u90e8\u7f72 Job \u5e76\u67e5\u770b\u7ed3\u679c

                              \u4f7f\u7528\u5982\u4e0b\u547d\u4ee4\u521b\u5efa Job\uff1a

                              kubectl apply -f ascend-demo.yaml\n

                              \u67e5\u770b Pod \u8fd0\u884c\u72b6\u6001\uff1a

                              Pod \u6210\u529f\u8fd0\u884c\u540e\uff0c\u67e5\u770b\u65e5\u5fd7\u7ed3\u679c\u3002\u5728\u5c4f\u5e55\u4e0a\u7684\u5173\u952e\u63d0\u793a\u4fe1\u606f\u793a\u4f8b\u5982\u4e0b\u56fe\uff0c\u63d0\u793a\u4fe1\u606f\u4e2d\u7684 Label \u8868\u793a\u7c7b\u522b\u6807\u8bc6\uff0c Conf \u8868\u793a\u8be5\u5206\u7c7b\u7684\u6700\u5927\u7f6e\u4fe1\u5ea6\uff0cClass \u8868\u793a\u6240\u5c5e\u7c7b\u522b\u3002\u8fd9\u4e9b\u503c\u53ef\u80fd\u4f1a\u6839\u636e\u7248\u672c\u3001\u73af\u5883\u6709\u6240\u4e0d\u540c\uff0c\u8bf7\u4ee5\u5b9e\u9645\u60c5\u51b5\u4e3a\u51c6\uff1a

                              \u7ed3\u679c\u56fe\u7247\u5c55\u793a\uff1a

                            "},{"location":"end-user/kpanda/gpu/ascend/ascend_usage.html#_3","title":"\u754c\u9762\u4f7f\u7528","text":"
                            1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u68c0\u6d4b GPU \u5361\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002 \u76ee\u524d\u96c6\u7fa4\u4f1a\u81ea\u52a8\u542f\u7528 GPU \uff0c\u5e76\u4e14\u8bbe\u7f6e GPU \u7c7b\u578b\u4e3a Ascend \u3002

                            2. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08Ascend\uff09\u4e4b\u540e\uff0c\u9700\u8981\u914d\u7f6e\u5e94\u7528\u4f7f\u7528\u7684\u7269\u7406\u5361\u6570\u91cf\uff1a

                              \u7269\u7406\u5361\u6570\u91cf\uff08huawei.com/Ascend910\uff09 \uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u8f93\u5165\u503c\u5fc5\u987b\u4e3a\u6574\u6570\u4e14**\u5c0f\u4e8e\u7b49\u4e8e**\u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002

                              \u5982\u679c\u4e0a\u8ff0\u503c\u914d\u7f6e\u7684\u6709\u95ee\u9898\u5219\u4f1a\u51fa\u73b0\u8c03\u5ea6\u5931\u8d25\uff0c\u8d44\u6e90\u5206\u914d\u4e0d\u4e86\u7684\u60c5\u51b5\u3002

                            "},{"location":"end-user/kpanda/gpu/ascend/vnpu.html","title":"\u542f\u7528\u6607\u817e\u865a\u62df\u5316","text":"

                            \u6607\u817e\u865a\u62df\u5316\u5206\u4e3a\u52a8\u6001\u865a\u62df\u5316\u548c\u9759\u6001\u865a\u62df\u5316\uff0c\u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5f00\u542f\u5e76\u4f7f\u7528\u6607\u817e\u9759\u6001\u865a\u62df\u5316\u80fd\u529b\u3002

                            "},{"location":"end-user/kpanda/gpu/ascend/vnpu.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                            • Kubernetes \u96c6\u7fa4\u73af\u5883\u642d\u5efa\u3002
                            • \u5f53\u524d NPU \u8282\u70b9\u5df2\u5b89\u88c5\u6607\u817e \uff08Ascend\uff09\u9a71\u52a8\u3002
                            • \u5f53\u524d NPU \u8282\u70b9\u5df2\u5b89\u88c5 Ascend-Docker-Runtime \u7ec4\u4ef6\u3002
                            • \u5f53\u524d\u96c6\u7fa4\u5df2\u5b89\u88c5 NPU MindX DL \u5957\u4ef6\u3002
                            • \u652f\u6301\u7684 NPU \u5361\u578b\u53f7\uff1a

                              • Ascend 310P\uff0c\u5df2\u9a8c\u8bc1
                              • Ascend 910b\uff0820 \u6838\uff09\uff0c\u5df2\u9a8c\u8bc1
                              • Ascend 910\uff0832 \u6838\uff09\uff0c\u5b98\u65b9\u4ecb\u7ecd\u652f\u6301\uff0c\u672a\u5b9e\u9645\u9a8c\u8bc1
                              • Ascend 910\uff0830 \u6838\uff09\uff0c\u5b98\u65b9\u4ecb\u7ecd\u652f\u6301\uff0c\u672a\u5b9e\u9645\u9a8c\u8bc1

                              \u66f4\u591a\u7ec6\u8282\u53c2\u9605\u5b98\u65b9\u865a\u62df\u5316\u786c\u4ef6\u8bf4\u660e\u3002

                            \u8bf7\u53c2\u8003\u6607\u817e NPU \u7ec4\u4ef6\u5b89\u88c5\u6587\u6863\u5b89\u88c5\u57fa\u7840\u73af\u5883\u3002

                            "},{"location":"end-user/kpanda/gpu/ascend/vnpu.html#_3","title":"\u5f00\u542f\u865a\u62df\u5316\u80fd\u529b","text":"

                            \u5f00\u542f\u865a\u62df\u5316\u80fd\u529b\u9700\u8981\u624b\u52a8\u4fee\u6539\u00a0ascend-device-plugin-daemonset \u7ec4\u4ef6\u7684\u542f\u52a8\u53c2\u6570\uff0c\u53c2\u8003\u4e0b\u8ff0\u547d\u4ee4\uff1a

                            - device-plugin -useAscendDocker=true -volcanoType=false -presetVirtualDevice=true\n- logFile=/var/log/mindx-dl/devicePlugin/devicePlugin.log -logLevel=0\n
                            "},{"location":"end-user/kpanda/gpu/ascend/vnpu.html#vnpu","title":"\u5207\u5206 VNPU \u5b9e\u4f8b","text":"

                            \u9759\u6001\u865a\u62df\u5316\u9700\u8981\u624b\u52a8\u5bf9 VNPU \u5b9e\u4f8b\u7684\u5207\u5206\uff0c\u8bf7\u53c2\u8003\u4e0b\u8ff0\u547d\u4ee4\uff1a

                            npu-smi set -t create-vnpu -i 13 -c 0 -f vir02\n
                            • i \u6307\u7684\u662f card id
                            • c \u6307\u7684\u662f chip id
                            • vir02 \u6307\u7684\u662f\u5207\u5206\u89c4\u683c\u6a21\u677f

                            \u5173\u4e8e card id \u548c chip id\uff0c\u53ef\u4ee5\u901a\u8fc7 npu-smi info \u67e5\u8be2\uff0c\u5207\u5206\u89c4\u683c\u53ef\u901a\u8fc7 ascend \u5b98\u65b9\u6a21\u677f\u8fdb\u884c\u67e5\u8be2\u3002

                            \u5207\u5206\u5b9e\u4f8b\u8fc7\u540e\u53ef\u901a\u8fc7\u4e0b\u8ff0\u547d\u4ee4\u67e5\u8be2\u5207\u5206\u7ed3\u679c\uff1a

                            npu-smi info -t info-vnpu -i 13 -c 0\n

                            \u67e5\u8be2\u7ed3\u679c\u5982\u4e0b\uff1a

                            "},{"location":"end-user/kpanda/gpu/ascend/vnpu.html#ascend-device-plugin-daemonset","title":"\u91cd\u542f\u00a0ascend-device-plugin-daemonset","text":"

                            \u5207\u5206\u5b9e\u4f8b\u540e\u624b\u52a8\u91cd\u542f device-plugin pod\uff0c\u7136\u540e\u4f7f\u7528 kubectl describe \u547d\u4ee4\u67e5\u770b\u5df2\u6ce8\u518c node \u7684\u8d44\u6e90\uff1a

                            kubectl describe node {{nodename}}\n

                            "},{"location":"end-user/kpanda/gpu/ascend/vnpu.html#_4","title":"\u5982\u4f55\u4f7f\u7528\u8bbe\u5907","text":"

                            \u5728\u521b\u5efa\u5e94\u7528\u65f6\uff0c\u6307\u5b9a\u8d44\u6e90 key\uff0c\u53c2\u8003\u4e0b\u8ff0 YAML\uff1a

                            ......\nresources:\n  requests:\n    huawei.com/Ascend310P-2c: 1\n  limits:\n    huawei.com/Ascend310P-2c: 1\n......\n
                            "},{"location":"end-user/kpanda/gpu/metax/usemetax.html","title":"\u6c90\u66e6 GPU \u7ec4\u4ef6\u5b89\u88c5\u4e0e\u4f7f\u7528","text":"

                            \u672c\u7ae0\u8282\u63d0\u4f9b\u6c90\u66e6 gpu-extensions\u3001gpu-operator \u7b49\u7ec4\u4ef6\u7684\u5b89\u88c5\u6307\u5bfc\u548c\u6c90\u66e6 GPU \u6574\u5361\u548c vGPU \u4e24\u79cd\u6a21\u5f0f\u7684\u4f7f\u7528\u65b9\u6cd5\u3002

                            "},{"location":"end-user/kpanda/gpu/metax/usemetax.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                            1. \u5df2\u5728\u6c90\u66e6\u8f6f\u4ef6\u4e2d\u5fc3\u4e0b\u8f7d\u5e76\u5b89\u88c5\u6240\u9700\u7684 tar \u5305\uff0c \u672c\u6587\u4ee5 metax-gpu-k8s-package.0.7.10.tar.gz \u4e3a\u4f8b\u3002
                            2. \u51c6\u5907 Kubernetes \u57fa\u7840\u73af\u5883
                            "},{"location":"end-user/kpanda/gpu/metax/usemetax.html#_2","title":"\u7ec4\u4ef6\u4ecb\u7ecd","text":"

                            Metax \u63d0\u4f9b\u4e86\u4e24\u4e2a helm-chart \u5305\uff0c\u4e00\u4e2a\u662f metax-extensions\uff0c\u4e00\u4e2a\u662f gpu-operator\uff0c\u6839\u636e\u4f7f\u7528\u573a\u666f\u53ef\u9009\u62e9\u5b89\u88c5\u4e0d\u540c\u7684\u7ec4\u4ef6\u3002

                            1. Metax-extensions\uff1a\u5305\u542b gpu-device \u548c gpu-label \u4e24\u4e2a\u7ec4\u4ef6\u3002\u5728\u4f7f\u7528 Metax-extensions \u65b9\u6848\u65f6\uff0c\u7528\u6237\u7684\u5e94\u7528\u5bb9\u5668\u955c\u50cf\u9700\u8981\u57fa\u4e8e MXMACA\u00ae \u57fa\u7840\u955c\u50cf\u6784\u5efa\u3002\u4e14 Metax-extensions \u4ec5\u9002\u7528\u4e8e GPU \u6574\u5361\u4f7f\u7528\u573a\u666f\u3002
                            2. gpu-operator\uff1a\u5305\u542b gpu-device\u3001gpu-label\u3001driver-manager\u3001container-runtime\u3001operator-controller \u8fd9\u4e9b\u7ec4\u4ef6\u3002 \u4f7f\u7528 gpu-operator \u65b9\u6848\u65f6\uff0c\u7528\u6237\u53ef\u9009\u62e9\u5236\u4f5c\u4e0d\u5305\u542b MXMACA\u00ae SDK \u7684\u5e94\u7528\u5bb9\u5668\u955c\u50cf\u3002gpu-operator \u9002\u7528\u4e8e GPU \u6574\u5361\u548c vGPU \u573a\u666f\u3002
                            "},{"location":"end-user/kpanda/gpu/metax/usemetax.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                            1. \u4ece /home/metax/metax-docs/k8s/metax-gpu-k8s-package.0.7.10.tar.gz \u6587\u4ef6\u4e2d\u89e3\u538b\u51fa

                              • deploy-gpu-extensions.yaml # \u90e8\u7f72yaml
                              • metax-gpu-extensions-0.7.10.tgz\u3001metax-operator-0.7.10.tgz # helm chart\u6587\u4ef6
                              • metax-k8s-images.0.7.10.run # \u79bb\u7ebf\u955c\u50cf
                            2. \u67e5\u770b\u7cfb\u7edf\u662f\u5426\u5b89\u88c5\u9a71\u52a8

                              $ lsmod | grep metax \nmetax 1605632 0 \nttm 86016 3 drm_vram_helper,metax,drm_ttm_helper \ndrm 618496 7 drm_kms_helper,drm_vram_helper,ast,metax,drm_ttm_helper,ttm\n
                              • \u5982\u6ca1\u6709\u5185\u5bb9\u663e\u793a\uff0c\u5c31\u8868\u793a\u6ca1\u6709\u5b89\u88c5\u8fc7\u8f6f\u4ef6\u5305\u3002\u5982\u6709\u5185\u5bb9\u663e\u793a\uff0c\u5219\u8868\u793a\u5b89\u88c5\u8fc7\u8f6f\u4ef6\u5305\u3002
                              • \u4f7f\u7528 metax-opeartor \u65f6\uff0c\u4e0d\u63a8\u8350\u5728\u5de5\u4f5c\u8282\u70b9\u9884\u5b89\u88c5 MXMACA \u5185\u6838\u6001\u9a71\u52a8\uff0c\u82e5\u5df2\u5b89\u88c5\u4e5f\u65e0\u9700\u5378\u8f7d\u3002
                            3. \u5b89\u88c5\u9a71\u52a8

                            "},{"location":"end-user/kpanda/gpu/metax/usemetax.html#gpu-extensions","title":"gpu-extensions","text":"
                            1. \u63a8\u9001\u955c\u50cf

                              tar -xf metax-gpu-k8s-package.0.7.10.tar.gz\n./metax-k8s-images.0.7.10.run push {registry}/metax\n
                            2. \u63a8\u9001 Helm Chart

                              helm plugin install https://github.com/chartmuseum/helm-push\nhelm repo add  --username rootuser --password rootpass123  metax http://172.16.16.5:8081\nhelm cm-push metax-operator-0.7.10.tgz metax\nhelm cm-push metax-gpu-extensions-0.7.10.tgz metax\n
                            3. \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e0a\u5b89\u88c5 metax-gpu-extensions

                              \u90e8\u7f72\u6210\u529f\u4e4b\u540e\uff0c\u53ef\u4ee5\u5728\u8282\u70b9\u4e0a\u67e5\u770b\u5230\u8d44\u6e90\u3002

                            4. \u4fee\u6539\u6210\u529f\u4e4b\u540e\u5c31\u53ef\u4ee5\u5728\u8282\u70b9\u4e0a\u770b\u5230\u5e26\u6709 Metax GPU \u7684\u6807\u7b7e

                            "},{"location":"end-user/kpanda/gpu/metax/usemetax.html#gpu-operator","title":"gpu-operator","text":"

                            \u5b89\u88c5 gpu-opeartor \u65f6\u7684\u5df2\u77e5\u95ee\u9898\uff1a

                            1. metax-operator\u3001gpu-label\u3001gpu-device \u3001container-runtime \u8fd9\u51e0\u4e2a\u7ec4\u4ef6\u955c\u50cf\u8981\u5e26\u6709 amd64 \u540e\u7f00\u3002

                            2. metax-maca \u7ec4\u4ef6\u7684\u955c\u50cf\u4e0d\u5728 metax-k8s-images.0.7.13.run \u5305\u91cc\u9762\uff0c\u9700\u8981\u5355\u72ec\u4e0b\u8f7d maca-mxc500-2.23.0.23-ubuntu20.04-x86_64.tar.xz \u8fd9\u7c7b\u955c\u50cf\uff0cload \u4e4b\u540e\u91cd\u65b0\u4fee\u6539 metax-maca \u7ec4\u4ef6\u7684\u955c\u50cf\u3002

                            3. metax-driver \u7ec4\u4ef6\u7684\u955c\u50cf\u9700\u8981\u4ece https://pub-docstore.metax-tech.com:7001 \u8fd9\u4e2a\u7f51\u7ad9\u4e0b\u8f7d k8s-driver-image.2.23.0.25.run \u6587\u4ef6\uff0c\u7136\u540e\u6267\u884c k8s-driver-image.2.23.0.25.run push {registry}/metax \u547d\u4ee4\u628a\u955c\u50cf\u63a8\u9001\u5230\u955c\u50cf\u4ed3\u5e93\u3002\u63a8\u9001\u4e4b\u540e\u4fee\u6539 metax-driver \u7ec4\u4ef6\u7684\u955c\u50cf\u5730\u5740\u3002

                            "},{"location":"end-user/kpanda/gpu/metax/usemetax.html#gpu_1","title":"\u4f7f\u7528 GPU","text":"

                            \u5b89\u88c5\u540e\u53ef\u5728\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u4f7f\u7528\u6c90\u66e6 GPU\u3002\u6ce8\u610f\u542f\u7528 GPU \u540e\uff0c\u9700\u9009\u62e9GPU\u7c7b\u578b\u4e3a Metax GPU

                            \u8fdb\u5165\u5bb9\u5668\uff0c\u6267\u884c mx-smi \u53ef\u67e5\u770b GPU \u7684\u4f7f\u7528\u60c5\u51b5.

                            "},{"location":"end-user/kpanda/gpu/mlu/use-mlu.html","title":"\u4f7f\u7528\u5bd2\u6b66\u7eaa GPU","text":"

                            \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528\u5bd2\u6b66\u7eaa GPU\u3002

                            "},{"location":"end-user/kpanda/gpu/mlu/use-mlu.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                            • \u5df2\u7ecf\u90e8\u7f72 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u4e14\u5e73\u53f0\u8fd0\u884c\u6b63\u5e38\u3002
                            • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                            • \u5f53\u524d\u96c6\u7fa4\u5df2\u5b89\u88c5\u5bd2\u6b66\u7eaa\u56fa\u4ef6\u3001\u9a71\u52a8\u4ee5\u53caDevicePlugin\u7ec4\u4ef6\uff0c\u5b89\u88c5\u8be6\u60c5\u8bf7\u53c2\u8003\u5b98\u65b9\u6587\u6863\uff1a
                              • \u9a71\u52a8\u56fa\u4ef6\u5b89\u88c5
                              • DevicePlugin \u5b89\u88c5

                            \u5728\u5b89\u88c5 DevicePlugin \u65f6\u8bf7\u5173\u95ed --enable-device-type \u53c2\u6570\uff0c\u5426\u5219\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5c06\u65e0\u6cd5\u6b63\u786e\u8bc6\u522b\u5bd2\u6b66\u7eaa GPU\u3002

                            "},{"location":"end-user/kpanda/gpu/mlu/use-mlu.html#gpu_1","title":"\u5bd2\u6b66\u7eaa GPU \u6a21\u5f0f\u4ecb\u7ecd","text":"

                            \u5bd2\u6b66\u7eaa GPU \u6709\u4ee5\u4e0b\u51e0\u79cd\u6a21\u5f0f\uff1a

                            • \u6574\u5361\u6a21\u5f0f\uff1a\u5c06\u5bd2\u6b66\u7eaaGPU\u4ee5\u6574\u5361\u7684\u65b9\u5f0f\u6ce8\u518c\u5230\u96c6\u7fa4\u5f53\u4e2d\u8fdb\u884c\u4f7f\u7528\u3002
                            • Share \u6a21\u5f0f\uff1a\u53ef\u4ee5\u5c06\u4e00\u5f20\u5bd2\u6b66\u7eaaGPU\u5171\u4eab\u7ed9\u591a\u4e2a Pod \u8fdb\u884c\u4f7f\u7528\uff0c\u53ef\u4ee5\u901a\u8fc7 virtualization-num \u53c2\u6570\u8fdb\u884c\u8bbe\u7f6e\u53ef\u5171\u4eab\u5bb9\u5668\u7684\u6570\u91cf\u3002
                            • Dynamic smlu \u6a21\u5f0f\uff1a\u8fdb\u4e00\u6b65\u5bf9\u8d44\u6e90\u8fdb\u884c\u4e86\u7ec6\u5316\uff0c\u53ef\u4ee5\u63a7\u5236\u5206\u914d\u7ed9\u5bb9\u5668\u7684\u663e\u5b58\u3001\u7b97\u529b\u7684\u5927\u5c0f\u3002
                            • Mim \u6a21\u5f0f\uff1a\u53ef\u4ee5\u5c06\u5bd2\u6b66\u7eaa GPU \u6309\u7167\u56fa\u5b9a\u7684\u89c4\u683c\u5207\u5206\u6210\u591a\u5f20 GPU \u8fdb\u884c\u4f7f\u7528\u3002
                            "},{"location":"end-user/kpanda/gpu/mlu/use-mlu.html#ai","title":"\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528\u5bd2\u6b66\u7eaa","text":"

                            \u8fd9\u91cc\u4ee5 Dynamic smlu \u6a21\u5f0f\u4e3a\u4f8b\uff1a

                            1. \u5728\u6b63\u786e\u5b89\u88c5 DevicePlugin \u7b49\u7ec4\u4ef6\u540e\uff0c\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8fd0\u7ef4-> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002

                            2. \u70b9\u51fb\u8282\u70b9\u7ba1\u7406\u9875\u9762\uff0c\u67e5\u770b\u8282\u70b9\u662f\u5426\u5df2\u7ecf\u6b63\u786e\u8bc6\u522b\u5230\u5bf9\u5e94\u7684GPU\u7c7b\u578b\u3002

                            3. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08MLU VGPU\uff09\u4e4b\u540e\uff0c\u9700\u8981\u914d\u7f6e App \u4f7f\u7528\u7684 GPU \u8d44\u6e90\uff1a

                              • GPU \u7b97\u529b\uff08cambricon.com/mlu.smlu.vcore\uff09\uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u4f7f\u7528\u6838\u5fc3\u7684\u767e\u5206\u6bd4\u6570\u91cf\u3002
                              • GPU \u663e\u5b58\uff08cambricon.com/mlu.smlu.vmemory\uff09\uff1a\u8868\u793a\u5f53\u524dPod\u9700\u8981\u4f7f\u7528\u663e\u5b58\u7684\u5927\u5c0f\uff0c\u5355\u4f4d\u662fMB\u3002

                            "},{"location":"end-user/kpanda/gpu/mlu/use-mlu.html#yaml","title":"\u4f7f\u7528 YAML \u914d\u7f6e","text":"

                            \u53c2\u8003 YAML \u6587\u4ef6\u5982\u4e0b\uff1a

                            apiVersion: v1  \nkind: Pod  \nmetadata:  \n  name: pod1  \nspec:  \n  restartPolicy: OnFailure  \n  containers:  \n    - image: ubuntu:16.04  \n      name: pod1-ctr  \n      command: [\"sleep\"]  \n      args: [\"100000\"]  \n      resources:  \n        limits:  \n          cambricon.com/mlu: \"1\" # use this when device type is not enabled, else delete this line.  \n          #cambricon.com/mlu: \"1\" #uncomment to use when device type is enabled  \n          #cambricon.com/mlu.share: \"1\" #uncomment to use device with env-share mode  \n          #cambricon.com/mlu.mim-2m.8gb: \"1\" #uncomment to use device with mim mode  \n          #cambricon.com/mlu.smlu.vcore: \"100\" #uncomment to use device with mim mode  \n          #cambricon.com/mlu.smlu.vmemory: \"1024\" #uncomment to use device with mim mode\n
                            "},{"location":"end-user/kpanda/gpu/nvidia/index.html","title":"NVIDIA GPU \u5361\u4f7f\u7528\u6a21\u5f0f","text":"

                            NVIDIA \u4f5c\u4e3a\u4e1a\u5185\u77e5\u540d\u7684\u56fe\u5f62\u8ba1\u7b97\u4f9b\u5e94\u5546\uff0c\u4e3a\u7b97\u529b\u7684\u63d0\u5347\u63d0\u4f9b\u4e86\u8bf8\u591a\u8f6f\u786c\u4ef6\u89e3\u51b3\u65b9\u6848\uff0c\u5176\u4e2d NVIDIA \u5728 GPU \u7684\u4f7f\u7528\u65b9\u5f0f\u4e0a\u63d0\u4f9b\u4e86\u5982\u4e0b\u4e09\u79cd\u89e3\u51b3\u65b9\u6848\uff1a

                            "},{"location":"end-user/kpanda/gpu/nvidia/index.html#full-gpu","title":"\u6574\u5361\uff08Full GPU\uff09","text":"

                            \u6574\u5361\u662f\u6307\u5c06\u6574\u4e2a NVIDIA GPU \u5206\u914d\u7ed9\u5355\u4e2a\u7528\u6237\u6216\u5e94\u7528\u7a0b\u5e8f\u3002\u5728\u8fd9\u79cd\u914d\u7f6e\u4e0b\uff0c\u5e94\u7528\u53ef\u4ee5\u5b8c\u5168\u5360\u7528 GPU \u7684\u6240\u6709\u8d44\u6e90\uff0c \u5e76\u83b7\u5f97\u6700\u5927\u7684\u8ba1\u7b97\u6027\u80fd\u3002\u6574\u5361\u9002\u7528\u4e8e\u9700\u8981\u5927\u91cf\u8ba1\u7b97\u8d44\u6e90\u548c\u5185\u5b58\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5982\u6df1\u5ea6\u5b66\u4e60\u8bad\u7ec3\u3001\u79d1\u5b66\u8ba1\u7b97\u7b49\u3002

                            "},{"location":"end-user/kpanda/gpu/nvidia/index.html#vgpuvirtual-gpu","title":"vGPU\uff08Virtual GPU\uff09","text":"

                            vGPU \u662f\u4e00\u79cd\u865a\u62df\u5316\u6280\u672f\uff0c\u5141\u8bb8\u5c06\u4e00\u4e2a\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a\u865a\u62df GPU\uff0c\u6bcf\u4e2a\u865a\u62df GPU \u5206\u914d\u7ed9\u4e0d\u540c\u7684\u4e91\u4e3b\u673a\u6216\u7528\u6237\u3002 vGPU \u4f7f\u591a\u4e2a\u7528\u6237\u53ef\u4ee5\u5171\u4eab\u540c\u4e00\u53f0\u7269\u7406 GPU\uff0c\u5e76\u5728\u5404\u81ea\u7684\u865a\u62df\u73af\u5883\u4e2d\u72ec\u7acb\u4f7f\u7528 GPU \u8d44\u6e90\u3002 \u6bcf\u4e2a\u865a\u62df GPU \u53ef\u4ee5\u83b7\u5f97\u4e00\u5b9a\u7684\u8ba1\u7b97\u80fd\u529b\u548c\u663e\u5b58\u5bb9\u91cf\u3002vGPU \u9002\u7528\u4e8e\u865a\u62df\u5316\u73af\u5883\u548c\u4e91\u8ba1\u7b97\u573a\u666f\uff0c\u53ef\u4ee5\u63d0\u4f9b\u66f4\u9ad8\u7684\u8d44\u6e90\u5229\u7528\u7387\u548c\u7075\u6d3b\u6027\u3002

                            "},{"location":"end-user/kpanda/gpu/nvidia/index.html#migmulti-instance-gpu","title":"MIG\uff08Multi-Instance GPU\uff09","text":"

                            MIG \u662f NVIDIA Ampere \u67b6\u6784\u5f15\u5165\u7684\u4e00\u9879\u529f\u80fd\uff0c\u5b83\u5141\u8bb8\u5c06\u4e00\u4e2a\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a\u7269\u7406 GPU \u5b9e\u4f8b\uff0c\u6bcf\u4e2a\u5b9e\u4f8b\u53ef\u4ee5\u72ec\u7acb\u5206\u914d\u7ed9\u4e0d\u540c\u7684\u7528\u6237\u6216\u5de5\u4f5c\u8d1f\u8f7d\u3002 \u6bcf\u4e2a MIG \u5b9e\u4f8b\u5177\u6709\u81ea\u5df1\u7684\u8ba1\u7b97\u8d44\u6e90\u3001\u663e\u5b58\u548c PCIe \u5e26\u5bbd\uff0c\u5c31\u50cf\u4e00\u4e2a\u72ec\u7acb\u7684\u865a\u62df GPU\u3002 MIG \u63d0\u4f9b\u4e86\u66f4\u7ec6\u7c92\u5ea6\u7684 GPU \u8d44\u6e90\u5206\u914d\u548c\u7ba1\u7406\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u6c42\u52a8\u6001\u8c03\u6574\u5b9e\u4f8b\u7684\u6570\u91cf\u548c\u5927\u5c0f\u3002 MIG \u9002\u7528\u4e8e\u591a\u79df\u6237\u73af\u5883\u3001\u5bb9\u5668\u5316\u5e94\u7528\u7a0b\u5e8f\u548c\u6279\u5904\u7406\u4f5c\u4e1a\u7b49\u573a\u666f\u3002

                            \u65e0\u8bba\u662f\u5728\u865a\u62df\u5316\u73af\u5883\u4e2d\u4f7f\u7528 vGPU\uff0c\u8fd8\u662f\u5728\u7269\u7406 GPU \u4e0a\u4f7f\u7528 MIG\uff0cNVIDIA \u4e3a\u7528\u6237\u63d0\u4f9b\u4e86\u66f4\u591a\u7684\u9009\u62e9\u548c\u4f18\u5316 GPU \u8d44\u6e90\u7684\u65b9\u5f0f\u3002 \u7b97\u4e30 AI \u7b97\u529b\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\u5168\u9762\u517c\u5bb9\u4e86\u4e0a\u8ff0 NVIDIA \u7684\u80fd\u529b\u7279\u6027\uff0c\u7528\u6237\u53ea\u9700\u901a\u8fc7\u7b80\u5355\u7684\u754c\u9762\u64cd\u4f5c\uff0c\u5c31\u80fd\u591f\u83b7\u5f97\u5168\u90e8 NVIDIA GPU \u7684\u8ba1\u7b97\u80fd\u529b\uff0c\u4ece\u800c\u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u7387\u5e76\u964d\u4f4e\u6210\u672c\u3002

                            • Single \u6a21\u5f0f\uff0c\u8282\u70b9\u4ec5\u5728\u5176\u6240\u6709 GPU \u4e0a\u516c\u5f00\u5355\u4e00\u7c7b\u578b\u7684 MIG \u8bbe\u5907\uff0c\u8282\u70b9\u4e0a\u7684\u6240\u6709 GPU \u5fc5\u987b\uff1a
                              • \u5c5e\u4e8e\u540c\u4e00\u4e2a\u578b\u53f7\uff08\u4f8b\u5982 A100-SXM-40GB\uff09\uff0c\u53ea\u6709\u540c\u4e00\u578b\u53f7 GPU \u7684 MIG Profile \u624d\u662f\u4e00\u6837\u7684
                              • \u542f\u7528 MIG \u914d\u7f6e\uff0c\u9700\u8981\u91cd\u542f\u673a\u5668\u624d\u80fd\u751f\u6548
                              • \u4e3a\u5728\u6240\u6709\u4ea7\u54c1\u4e2d\u516c\u5f00\u201c\u5b8c\u5168\u76f8\u540c\u201d\u7684 MIG \u8bbe\u5907\u7c7b\u578b\uff0c\u521b\u5efa\u76f8\u540c\u7684GI \u548c CI
                            • Mixed \u6a21\u5f0f\uff0c\u8282\u70b9\u5728\u5176\u6240\u6709 GPU \u4e0a\u516c\u5f00\u6df7\u5408 MIG \u8bbe\u5907\u7c7b\u578b\u3002\u8bf7\u6c42\u7279\u5b9a\u7684 MIG \u8bbe\u5907\u7c7b\u578b\u9700\u8981\u8bbe\u5907\u7c7b\u578b\u63d0\u4f9b\u7684\u8ba1\u7b97\u5207\u7247\u6570\u91cf\u548c\u5185\u5b58\u603b\u91cf\u3002
                              • \u8282\u70b9\u4e0a\u7684\u6240\u6709 GPU \u5fc5\u987b\uff1a\u5c5e\u4e8e\u540c\u4e00\u4ea7\u54c1\u7ebf\uff08\u4f8b\u5982 A100-SXM-40GB\uff09
                              • \u6bcf\u4e2a GPU \u53ef\u542f\u7528\u6216\u4e0d\u542f\u7528 MIG\uff0c\u5e76\u4e14\u53ef\u4ee5\u81ea\u7531\u914d\u7f6e\u4efb\u4f55\u53ef\u7528 MIG \u8bbe\u5907\u7c7b\u578b\u7684\u6df7\u5408\u642d\u914d\u3002
                              • \u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 k8s-device-plugin \u5c06\uff1a
                                • \u4f7f\u7528\u4f20\u7edf\u7684 nvidia.com/gpu \u8d44\u6e90\u7c7b\u578b\u516c\u5f00\u4efb\u4f55\u4e0d\u5904\u4e8e MIG \u6a21\u5f0f\u7684 GPU
                                • \u4f7f\u7528\u9075\u5faa\u67b6\u6784 nvidia.com/mig-g.gb \u7684\u8d44\u6e90\u7c7b\u578b\u516c\u5f00\u5404\u4e2a MIG \u8bbe\u5907

                                  \u5f00\u542f\u914d\u7f6e\u8be6\u60c5\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/index.html#_1","title":"\u5982\u4f55\u4f7f\u7528","text":"

                                  \u60a8\u53ef\u4ee5\u53c2\u8003\u4ee5\u4e0b\u94fe\u63a5\uff0c\u5feb\u901f\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5173\u4e8e NVIDIA GPU \u5361\u7684\u7ba1\u7406\u80fd\u529b\u3002

                                  • NVIDIA GPU \u6574\u5361\u4f7f\u7528
                                  • NVIDIA vGPU \u4f7f\u7528
                                  • NVIDIA MIG \u4f7f\u7528
                                  "},{"location":"end-user/kpanda/gpu/nvidia/full_gpu_userguide.html","title":"\u5e94\u7528\u4f7f\u7528 GPU \u6574\u5361","text":"

                                  \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5c06\u6574\u4e2a NVIDIA GPU \u5361\u5206\u914d\u7ed9\u5355\u4e2a\u5e94\u7528\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/full_gpu_userguide.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  • \u5df2\u7ecf\u90e8\u7f72 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u4e14\u5e73\u53f0\u8fd0\u884c\u6b63\u5e38\u3002
                                  • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                                  • \u5f53\u524d\u96c6\u7fa4\u5df2\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u5e76\u5df2\u542f\u7528 NVIDIA DevicePlugin \uff0c\u53ef\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5\u3002
                                  • \u5f53\u524d\u96c6\u7fa4\u5185 GPU \u5361\u672a\u8fdb\u884c\u4efb\u4f55\u865a\u62df\u5316\u64cd\u4f5c\u6216\u88ab\u5176\u5b83\u5e94\u7528\u5360\u7528\u3002
                                  "},{"location":"end-user/kpanda/gpu/nvidia/full_gpu_userguide.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"end-user/kpanda/gpu/nvidia/full_gpu_userguide.html#ui","title":"\u4f7f\u7528 UI \u754c\u9762\u914d\u7f6e","text":"
                                  1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u68c0\u6d4b GPU \u5361\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002 \u76ee\u524d\u96c6\u7fa4\u4f1a\u81ea\u52a8\u542f\u7528 GPU \uff0c\u5e76\u4e14\u8bbe\u7f6e GPU \u7c7b\u578b\u4e3a Nvidia GPU \u3002

                                  2. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08Nvidia GPU\uff09\u4e4b\u540e\uff0c\u9700\u8981\u914d\u7f6e\u5e94\u7528\u4f7f\u7528\u7684\u7269\u7406\u5361\u6570\u91cf\uff1a

                                    \u7269\u7406\u5361\u6570\u91cf\uff08nvidia.com/gpu\uff09 \uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u8f93\u5165\u503c\u5fc5\u987b\u4e3a\u6574\u6570\u4e14 \u5c0f\u4e8e\u7b49\u4e8e \u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002

                                    \u5982\u679c\u4e0a\u8ff0\u503c\u914d\u7f6e\u7684\u6709\u95ee\u9898\u5219\u4f1a\u51fa\u73b0\u8c03\u5ea6\u5931\u8d25\uff0c\u8d44\u6e90\u5206\u914d\u4e0d\u4e86\u7684\u60c5\u51b5\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/full_gpu_userguide.html#yaml","title":"\u4f7f\u7528 YAML \u914d\u7f6e","text":"

                                  \u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u7533\u8bf7 GPU \u8d44\u6e90\uff0c\u5728\u8d44\u6e90\u7533\u8bf7\u548c\u9650\u5236\u914d\u7f6e\u4e2d\u589e\u52a0 nvidia.com/gpu: 1 \u53c2\u6570\u914d\u7f6e\u5e94\u7528\u4f7f\u7528\u7269\u7406\u5361\u7684\u6570\u91cf\u3002

                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-gpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-gpu-demo\n  template:\n    metadata:\n      labels:\n        app: full-gpu-demo\n    spec:\n      containers:\n      - image: chrstnhntschl/gpu_burn\n        name: container-0\n        resources:\n          requests:\n            cpu: 250m\n            memory: 512Mi\n            nvidia.com/gpu: 1   # \u7533\u8bf7 GPU \u7684\u6570\u91cf\n          limits:\n            cpu: 250m\n            memory: 512Mi\n            nvidia.com/gpu: 1   # GPU \u6570\u91cf\u7684\u4f7f\u7528\u4e0a\u9650\n      imagePullSecrets:\n      - name: default-secret\n

                                  Note

                                  \u4f7f\u7528 nvidia.com/gpu \u53c2\u6570\u6307\u5b9a GPU \u6570\u91cf\u65f6\uff0crequests \u548c limits \u503c\u9700\u8981\u4fdd\u6301\u4e00\u81f4\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html","title":"GPU Operator \u79bb\u7ebf\u5b89\u88c5","text":"

                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9884\u7f6e\u4e86 Ubuntu22.04\u3001Ubuntu20.04\u3001CentOS 7.9 \u8fd9\u4e09\u4e2a\u64cd\u4f5c\u7cfb\u7edf\u7684 Driver \u955c\u50cf\uff0c\u9a71\u52a8\u7248\u672c\u662f 535.104.12\uff1b \u5e76\u4e14\u5185\u7f6e\u4e86\u5404\u64cd\u4f5c\u7cfb\u7edf\u6240\u9700\u7684 Toolkit \u955c\u50cf\uff0c\u7528\u6237\u4e0d\u518d\u9700\u8981\u624b\u52a8\u79bb\u7ebf Toolkit \u955c\u50cf\u3002

                                  \u672c\u6587\u4f7f\u7528 AMD \u67b6\u6784\u7684 CentOS 7.9\uff083.10.0-1160\uff09\u8fdb\u884c\u6f14\u793a\u3002\u5982\u9700\u4f7f\u7528 Red Hat 8.4 \u90e8\u7f72\uff0c \u8bf7\u53c2\u8003\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20 Red Hat GPU Opreator \u79bb\u7ebf\u955c\u50cf\u548c\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  • \u5f85\u90e8\u7f72 gpu-operator \u7684\u96c6\u7fa4\u8282\u70b9\u5185\u6838\u7248\u672c\u5fc5\u987b\u5b8c\u5168\u4e00\u81f4\u3002\u8282\u70b9\u6240\u5728\u7684\u53d1\u884c\u7248\u548c GPU \u5361\u578b\u53f7\u5728 GPU \u652f\u6301\u77e9\u9635\u7684\u8303\u56f4\u5185\u3002
                                  • \u5b89\u88c5 gpu-operator \u65f6\u9009\u62e9 v23.9.0+2 \u53ca\u4ee5\u4e0a\u7248\u672c
                                  "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                  \u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 gpu-operator \u63d2\u4ef6\u3002

                                  1. \u767b\u5f55\u5e73\u53f0\uff0c\u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 -> \u5f85\u5b89\u88c5 gpu-operator \u7684\u96c6\u7fa4 -> \u8fdb\u5165\u96c6\u7fa4\u8be6\u60c5\u3002

                                  2. \u5728 Helm \u6a21\u677f \u9875\u9762\uff0c\u9009\u62e9 \u5168\u90e8\u4ed3\u5e93 \uff0c\u641c\u7d22 gpu-operator \u3002

                                  3. \u9009\u62e9 gpu-operator \uff0c\u70b9\u51fb \u5b89\u88c5 \u3002

                                  4. \u53c2\u8003\u4e0b\u6587\u53c2\u6570\u914d\u7f6e\uff0c\u914d\u7f6e gpu-operator \u5b89\u88c5\u53c2\u6570\uff0c\u5b8c\u6210 gpu-operator \u7684\u5b89\u88c5\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_3","title":"\u53c2\u6570\u914d\u7f6e","text":"
                                  • systemOS \uff1a\u9009\u62e9\u673a\u5668\u7684\u64cd\u4f5c\u7cfb\u7edf\uff0c\u5f53\u524d\u5185\u7f6e\u4e86 Ubuntu 22.04\u3001Ubuntu20.04\u3001Centos7.9 \u3001other \u56db\u4e2a\u9009\u9879\uff0c\u8bf7\u6b63\u786e\u7684\u9009\u62e9\u64cd\u4f5c\u7cfb\u7edf\u3002
                                  "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_4","title":"\u57fa\u672c\u53c2\u6570\u914d\u7f6e","text":"
                                  • \u540d\u79f0 \uff1a\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\u3002
                                  • \u547d\u540d\u7a7a\u95f4 \uff1a\u9009\u62e9\u5c06\u63d2\u4ef6\u5b89\u88c5\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                  • \u7248\u672c \uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 v23.9.0+2 \u7248\u672c\u4e3a\u4f8b\u3002
                                  • \u5931\u8d25\u5220\u9664 \uff1a\u5b89\u88c5\u5931\u8d25\uff0c\u5219\u5220\u9664\u5df2\u7ecf\u5b89\u88c5\u7684\u5173\u8054\u8d44\u6e90\u3002\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u3002
                                  • \u5c31\u7eea\u7b49\u5f85 \uff1a\u542f\u7528\u540e\uff0c\u6240\u6709\u5173\u8054\u8d44\u6e90\u90fd\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002
                                  • \u8be6\u60c5\u65e5\u5fd7 \uff1a\u5f00\u542f\u540e\uff0c\u5c06\u8bb0\u5f55\u5b89\u88c5\u8fc7\u7a0b\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002
                                  "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_5","title":"\u9ad8\u7ea7\u53c2\u6570\u914d\u7f6e","text":""},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#operator","title":"Operator \u53c2\u6570\u914d\u7f6e","text":"
                                  • InitContainer.image \uff1a\u914d\u7f6e CUDA \u955c\u50cf\uff0c\u63a8\u8350\u9ed8\u8ba4\u955c\u50cf\uff1a nvidia/cuda
                                  • InitContainer.repository \uff1aCUDA \u955c\u50cf\u6240\u5728\u7684\u955c\u50cf\u4ed3\u5e93\uff0c\u9ed8\u8ba4\u4e3a nvcr.m.daocloud.io \u4ed3\u5e93
                                  • InitContainer.version : CUDA \u955c\u50cf\u7684\u7248\u672c\uff0c\u8bf7\u4f7f\u7528\u9ed8\u8ba4\u53c2\u6570
                                  "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#driver","title":"Driver \u53c2\u6570\u914d\u7f6e","text":"
                                  • Driver.enable \uff1a\u914d\u7f6e\u662f\u5426\u5728\u8282\u70b9\u4e0a\u90e8\u7f72 NVIDIA \u9a71\u52a8\uff0c\u9ed8\u8ba4\u5f00\u542f\uff0c\u5982\u679c\u60a8\u5728\u4f7f\u7528 GPU Operator \u90e8\u7f72\u524d\uff0c\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u90e8\u7f72\u4e86 NVIDIA \u9a71\u52a8\u7a0b\u5e8f\uff0c\u8bf7\u5173\u95ed\u3002\uff08\u82e5\u624b\u52a8\u90e8\u7f72\u9a71\u52a8\u7a0b\u5e8f\u9700\u8981\u5173\u6ce8 CUDA Toolkit \u4e0e Toolkit Driver Version \u7684\u9002\u914d\u5173\u7cfb\uff0c\u901a\u8fc7 GPU operator \u5b89\u88c5\u5219\u65e0\u9700\u5173\u6ce8\uff09\u3002
                                  • Driver.usePrecompiled \uff1a\u542f\u7528\u9884\u7f16\u8bd1\u7684GPU\u9a71\u52a8
                                  • Driver.image \uff1a\u914d\u7f6e GPU \u9a71\u52a8\u955c\u50cf\uff0c\u63a8\u8350\u9ed8\u8ba4\u955c\u50cf\uff1a nvidia/driver \u3002
                                  • Driver.repository \uff1aGPU \u9a71\u52a8\u955c\u50cf\u6240\u5728\u7684\u955c\u50cf\u4ed3\u5e93\uff0c\u9ed8\u8ba4\u4e3a nvidia \u7684 nvcr.io \u4ed3\u5e93\u3002
                                  • Driver.usePrecompiled \uff1a\u5f00\u542f\u9884\u7f16\u8bd1\u6a21\u5f0f\u5b89\u88c5\u9a71\u52a8\u3002
                                  • Driver.version \uff1aGPU \u9a71\u52a8\u955c\u50cf\u7684\u7248\u672c\uff0c\u79bb\u7ebf\u90e8\u7f72\u8bf7\u4f7f\u7528\u9ed8\u8ba4\u53c2\u6570\uff0c\u4ec5\u5728\u7ebf\u5b89\u88c5\u65f6\u9700\u914d\u7f6e\u3002\u4e0d\u540c\u7c7b\u578b\u64cd\u4f5c\u7cfb\u7edf\u7684 Driver \u955c\u50cf\u7684\u7248\u672c\u5b58\u5728\u5982\u4e0b\u5dee\u5f02\uff0c \u8be6\u60c5\u53ef\u53c2\u8003\uff1aNvidia GPU Driver \u7248\u672c\u3002 \u5982\u4e0b\u4e0d\u540c\u64cd\u4f5c\u7cfb\u7edf\u7684 Driver Version \u793a\u4f8b\uff1a

                                    Note

                                    \u4f7f\u7528\u5185\u7f6e\u7684\u64cd\u4f5c\u7cfb\u7edf\u7248\u672c\u65e0\u9700\u4fee\u6539\u955c\u50cf\u7248\u672c\uff0c\u5176\u4ed6\u64cd\u4f5c\u7cfb\u7edf\u7248\u672c\u8bf7\u53c2\u8003\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20\u955c\u50cf\u3002 \u6ce8\u610f\u7248\u672c\u53f7\u540e\u65e0\u9700\u586b\u5199 Ubuntu\u3001CentOS\u3001Red Hat \u7b49\u64cd\u4f5c\u7cfb\u7edf\u540d\u79f0\uff0c\u82e5\u5b98\u65b9\u955c\u50cf\u542b\u6709\u64cd\u4f5c\u7cfb\u7edf\u540e\u7f00\uff0c\u8bf7\u624b\u52a8\u79fb\u9664\u3002

                                    • Red Hat \u7cfb\u7edf\uff0c\u4f8b\u5982 525.105.17
                                    • Ubuntu \u7cfb\u7edf\uff0c\u4f8b\u5982 535-5.15.0-1043-nvidia
                                    • CentOS \u7cfb\u7edf\uff0c\u4f8b\u5982 525.147.05
                                  • Driver.RepoConfig.ConfigMapName \uff1a\u7528\u6765\u8bb0\u5f55 GPU Operator \u7684\u79bb\u7ebf yum \u6e90\u914d\u7f6e\u6587\u4ef6\u540d\u79f0\uff0c \u5f53\u4f7f\u7528\u9884\u7f6e\u7684\u79bb\u7ebf\u5305\u65f6\uff0c\u5404\u7c7b\u578b\u7684\u64cd\u4f5c\u7cfb\u7edf\u8bf7\u53c2\u8003\u5982\u4e0b\u7684\u6587\u6863\u3002

                                    • \u6784\u5efa CentOS 7.9 \u79bb\u7ebf yum \u6e90
                                    • \u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90
                                  "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#toolkit","title":"Toolkit \u914d\u7f6e\u53c2\u6570","text":"

                                  Toolkit.enable \uff1a\u9ed8\u8ba4\u5f00\u542f\uff0c\u8be5\u7ec4\u4ef6\u8ba9 conatainerd/docker \u652f\u6301\u8fd0\u884c\u9700\u8981 GPU \u7684\u5bb9\u5668\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#mig","title":"MIG \u914d\u7f6e\u53c2\u6570","text":"

                                  \u8be6\u7ec6\u914d\u7f6e\u65b9\u5f0f\u8bf7\u53c2\u8003\u5f00\u542f MIG \u529f\u80fd

                                  MigManager.Config.name \uff1aMIG \u7684\u5207\u5206\u914d\u7f6e\u6587\u4ef6\u540d\uff0c\u7528\u4e8e\u5b9a\u4e49 MIG \u7684\uff08GI, CI\uff09\u5207\u5206\u7b56\u7565\u3002 \u9ed8\u8ba4\u4e3a default-mig-parted-config \u3002\u81ea\u5b9a\u4e49\u53c2\u6570\u53c2\u8003\u5f00\u542f MIG \u529f\u80fd\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_6","title":"\u4e0b\u4e00\u6b65\u64cd\u4f5c","text":"

                                  \u5b8c\u6210\u4e0a\u8ff0\u76f8\u5173\u53c2\u6570\u914d\u7f6e\u548c\u521b\u5efa\u540e\uff1a

                                  • \u5982\u679c\u4f7f\u7528 \u6574\u5361\u6a21\u5f0f\uff0c\u5e94\u7528\u521b\u5efa\u65f6\u53ef\u4f7f\u7528 GPU \u8d44\u6e90

                                  • \u5982\u679c\u4f7f\u7528 vGPU \u6a21\u5f0f \uff0c\u5b8c\u6210\u4e0a\u8ff0\u76f8\u5173\u53c2\u6570\u914d\u7f6e\u548c\u521b\u5efa\u540e\uff0c\u4e0b\u4e00\u6b65\u8bf7\u5b8c\u6210 vGPU Addon \u5b89\u88c5

                                  • \u5982\u679c\u4f7f\u7528 MIG \u6a21\u5f0f\uff0c\u5e76\u4e14\u9700\u8981\u7ed9\u4e2a\u522b GPU \u8282\u70b9\u6309\u7167\u67d0\u79cd\u5207\u5206\u89c4\u683c\u8fdb\u884c\u4f7f\u7528\uff0c \u5426\u5219\u6309\u7167 MigManager.Config \u4e2d\u7684 default \u503c\u8fdb\u884c\u5207\u5206\u3002

                                    • single \u6a21\u5f0f\u8bf7\u7ed9\u5bf9\u5e94\u8282\u70b9\u6253\u4e0a\u5982\u4e0b Label\uff1a

                                      kubectl label nodes {node} nvidia.com/mig.config=\"all-1g.10gb\" --overwrite\n
                                    • mixed \u6a21\u5f0f\u8bf7\u7ed9\u5bf9\u5e94\u8282\u70b9\u6253\u4e0a\u5982\u4e0b Label\uff1a

                                      kubectl label nodes {node} nvidia.com/mig.config=\"custom-config\" --overwrite\n

                                  \u200b \u5207\u5206\u540e\uff0c\u5e94\u7528\u53ef\u4f7f\u7528 MIG GPU \u8d44\u6e90\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/push_image_to_repo.html","title":"\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20 Red Hat GPU Opreator \u79bb\u7ebf\u955c\u50cf","text":"

                                  \u672c\u6587\u4ee5 Red Hat 8.4 \u7684 nvcr.io/nvidia/driver:525.105.17-rhel8.4 \u79bb\u7ebf\u9a71\u52a8\u955c\u50cf\u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20\u79bb\u7ebf\u955c\u50cf\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/push_image_to_repo.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  1. \u706b\u79cd\u8282\u70b9\u53ca\u5176\u7ec4\u4ef6\u72b6\u6001\u8fd0\u884c\u6b63\u5e38\u3002
                                  2. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u548c\u706b\u79cd\u8282\u70b9\u7684\u8282\u70b9\uff0c\u4e14\u8282\u70b9\u4e0a\u5df2\u7ecf\u5b8c\u6210 Docker \u7684\u5b89\u88c5\u3002
                                  "},{"location":"end-user/kpanda/gpu/nvidia/push_image_to_repo.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"end-user/kpanda/gpu/nvidia/push_image_to_repo.html#_3","title":"\u5728\u8054\u7f51\u8282\u70b9\u83b7\u53d6\u79bb\u7ebf\u955c\u50cf","text":"

                                  \u4ee5\u4e0b\u64cd\u4f5c\u5728\u8054\u7f51\u8282\u70b9\u4e0a\u8fdb\u884c\u3002

                                  1. \u5728\u8054\u7f51\u673a\u5668\u4e0a\u62c9\u53d6 nvcr.io/nvidia/driver:525.105.17-rhel8.4 \u79bb\u7ebf\u9a71\u52a8\u955c\u50cf\uff1a

                                    docker pull nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                  2. \u955c\u50cf\u62c9\u53d6\u5b8c\u6210\u540e\uff0c\u6253\u5305\u955c\u50cf\u4e3a nvidia-driver.tar \u538b\u7f29\u5305\uff1a

                                    docker save nvcr.io/nvidia/driver:525.105.17-rhel8.4 > nvidia-driver.tar\n
                                  3. \u62f7\u8d1d nvidia-driver.tar \u955c\u50cf\u538b\u7f29\u5305\u5230\u706b\u79cd\u8282\u70b9\uff1a

                                    scp  nvidia-driver.tar user@ip:/root\n

                                    \u4f8b\u5982\uff1a

                                    scp  nvidia-driver.tar root@10.6.175.10:/root\n
                                  "},{"location":"end-user/kpanda/gpu/nvidia/push_image_to_repo.html#_4","title":"\u63a8\u9001\u955c\u50cf\u5230\u706b\u79cd\u8282\u70b9\u4ed3\u5e93","text":"

                                  \u4ee5\u4e0b\u64cd\u4f5c\u5728\u706b\u79cd\u8282\u70b9\u4e0a\u8fdb\u884c\u3002

                                  1. \u767b\u5f55\u706b\u79cd\u8282\u70b9\uff0c\u5c06\u8054\u7f51\u8282\u70b9\u62f7\u8d1d\u7684\u955c\u50cf\u538b\u7f29\u5305 nvidia-driver.tar \u5bfc\u5165\u672c\u5730\uff1a

                                    docker load -i nvidia-driver.tar\n
                                  2. \u67e5\u770b\u521a\u521a\u5bfc\u5165\u7684\u955c\u50cf\uff1a

                                    docker images -a |grep nvidia\n

                                    \u9884\u671f\u8f93\u51fa\uff1a

                                    nvcr.io/nvidia/driver                 e3ed7dee73e9   1 days ago   1.02GB\n
                                  3. \u91cd\u65b0\u6807\u8bb0\u955c\u50cf\uff0c\u4f7f\u5176\u4e0e\u8fdc\u7a0b Registry \u4ed3\u5e93\u4e2d\u7684\u76ee\u6807\u4ed3\u5e93\u5bf9\u5e94\uff1a

                                    docker tag <image-name> <registry-url>/<repository-name>:<tag>\n
                                    • <image-name> \u662f\u4e0a\u4e00\u6b65 nvidia \u955c\u50cf\u7684\u540d\u79f0\uff0c
                                    • <registry-url> \u662f\u706b\u79cd\u8282\u70b9\u4e0a Registry \u670d\u52a1\u7684\u5730\u5740\uff0c
                                    • <repository-name> \u662f\u60a8\u8981\u63a8\u9001\u5230\u7684\u4ed3\u5e93\u540d\u79f0\uff0c
                                    • <tag> \u662f\u60a8\u4e3a\u955c\u50cf\u6307\u5b9a\u7684\u6807\u7b7e\u3002

                                    \u4f8b\u5982\uff1a

                                    registry\uff1adocker tag nvcr.io/nvidia/driver 10.6.10.5/nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                  4. \u5c06\u955c\u50cf\u63a8\u9001\u5230\u706b\u79cd\u8282\u70b9\u955c\u50cf\u4ed3\u5e93\uff1a

                                    docker push {ip}/nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                  "},{"location":"end-user/kpanda/gpu/nvidia/push_image_to_repo.html#_5","title":"\u63a5\u4e0b\u6765","text":"

                                  \u53c2\u8003\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90\u548c GPU Operator \u79bb\u7ebf\u5b89\u88c5\u6765\u4e3a\u96c6\u7fa4\u90e8\u7f72 GPU Operator\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/rhel9.2_offline_install_driver.html","title":"RHEL 9.2 \u79bb\u7ebf\u5b89\u88c5 gpu-operator \u9a71\u52a8","text":"

                                  \u524d\u63d0\u6761\u4ef6\uff1a\u5df2\u5b89\u88c5 gpu-operator v23.9.0+2 \u53ca\u66f4\u9ad8\u7248\u672c

                                  RHEL 9.2 \u9a71\u52a8\u955c\u50cf\u4e0d\u80fd\u76f4\u63a5\u5b89\u88c5\uff0c\u5b98\u65b9\u7684\u9a71\u52a8\u811a\u672c\u5b58\u5728\u4e00\u70b9\u95ee\u9898\uff0c\u5728\u5b98\u65b9\u4fee\u590d\u4e4b\u524d\uff0c\u63d0\u4f9b\u5982\u4e0b\u7684\u6b65\u9aa4\u6765\u5b9e\u73b0\u79bb\u7ebf\u5b89\u88c5\u9a71\u52a8\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/rhel9.2_offline_install_driver.html#nouveau","title":"\u7981\u7528nouveau\u9a71\u52a8","text":"

                                  \u5728 RHEL 9.2 \u4e2d\u5b58\u5728 nouveau \u975e\u5b98\u65b9\u7684 Nvidia \u9a71\u52a8\uff0c\u56e0\u6b64\u9700\u8981\u5148\u7981\u7528\u3002

                                  # \u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u6587\u4ef6\nsudo vi /etc/modprobe.d/blacklist-nouveau.conf\n# \u6dfb\u52a0\u4ee5\u4e0b\u4e24\u884c\u5185\u5bb9:\nblacklist nouveau\noptions nouveau modeset=0\n# \u7981\u7528Nouveau\nsudo dracut --force\n# \u91cd\u542fvm\nsudo reboot\n# \u68c0\u67e5\u662f\u5426\u5df2\u7ecf\u6210\u529f\u7981\u7528\nlsmod | grep nouveau\n
                                  "},{"location":"end-user/kpanda/gpu/nvidia/rhel9.2_offline_install_driver.html#_1","title":"\u81ea\u5b9a\u4e49\u9a71\u52a8\u955c\u50cf","text":"

                                  \u5148\u5728\u672c\u5730\u521b\u5efa nvidia-driver \u6587\u4ef6\uff1a

                                  \u70b9\u51fb\u67e5\u770b\u5b8c\u6574\u7684 nvidia-driver \u6587\u4ef6\u5185\u5bb9
                                  #! /bin/bash -x\n# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.\n\nset -eu\n\nRUN_DIR=/run/nvidia\nPID_FILE=${RUN_DIR}/${0##*/}.pid\nDRIVER_VERSION=${DRIVER_VERSION:?\"Missing DRIVER_VERSION env\"}\nKERNEL_UPDATE_HOOK=/run/kernel/postinst.d/update-nvidia-driver\nNUM_VGPU_DEVICES=0\nNVIDIA_MODULE_PARAMS=()\nNVIDIA_UVM_MODULE_PARAMS=()\nNVIDIA_MODESET_MODULE_PARAMS=()\nNVIDIA_PEERMEM_MODULE_PARAMS=()\nTARGETARCH=${TARGETARCH:?\"Missing TARGETARCH env\"}\nUSE_HOST_MOFED=\"${USE_HOST_MOFED:-false}\"\nDNF_RELEASEVER=${DNF_RELEASEVER:-\"\"}\nRHEL_VERSION=${RHEL_VERSION:-\"\"}\nRHEL_MAJOR_VERSION=9\n\nOPEN_KERNEL_MODULES_ENABLED=${OPEN_KERNEL_MODULES_ENABLED:-false}\n[[ \"${OPEN_KERNEL_MODULES_ENABLED}\" == \"true\" ]] && KERNEL_TYPE=kernel-open || KERNEL_TYPE=kernel\n\nDRIVER_ARCH=${TARGETARCH/amd64/x86_64} && DRIVER_ARCH=${DRIVER_ARCH/arm64/aarch64}\necho \"DRIVER_ARCH is $DRIVER_ARCH\"\n\nSCRIPT_DIR=$( cd -- \"$( dirname -- \"${BASH_SOURCE[0]}\" )\" &> /dev/null && pwd )\nsource $SCRIPT_DIR/common.sh\n\n_update_package_cache() {\n    if [ \"${PACKAGE_TAG:-}\" != \"builtin\" ]; then\n        echo \"Updating the package cache...\"\n        if ! yum -q makecache; then\n            echo \"FATAL: failed to reach RHEL package repositories. \"\\\n                 \"Ensure that the cluster can access the proper networks.\"\n            exit 1\n        fi\n    fi\n}\n\n_cleanup_package_cache() {\n    if [ \"${PACKAGE_TAG:-}\" != \"builtin\" ]; then\n        echo \"Cleaning up the package cache...\"\n        rm -rf /var/cache/yum/*\n    fi\n}\n\n_get_rhel_version_from_kernel() {\n    local rhel_version_underscore rhel_version_arr\n    rhel_version_underscore=$(echo \"${KERNEL_VERSION}\" | sed 's/.*el\\([0-9]\\+_[0-9]\\+\\).*/\\1/g')\n    # For e.g. :- from the kernel version 4.18.0-513.9.1.el8_9, we expect to extract the string \"8_9\"\n    if [[ ! ${rhel_version_underscore} =~ ^[0-9]+_[0-9]+$ ]]; then\n        echo \"Unable to resolve RHEL version from kernel version\" >&2\n        return 1\n    fi\n    IFS='_' read -r -a rhel_version_arr <<< \"$rhel_version_underscore\"\n    if [[ ${#rhel_version_arr[@]} -ne 2 ]]; then\n        echo \"Unable to resolve RHEL version from kernel version\" >&2\n        return 1\n    fi\n    RHEL_VERSION=\"${rhel_version_arr[0]}.${rhel_version_arr[1]}\"\n    echo \"RHEL VERSION successfully resolved from kernel: ${RHEL_VERSION}\"\n    return 0\n}\n\n_resolve_rhel_version() {\n    _get_rhel_version_from_kernel || RHEL_VERSION=\"${RHEL_MAJOR_VERSION}\"\n    # set dnf release version as rhel version by default\n    if [[ -z \"${DNF_RELEASEVER}\" ]]; then\n        DNF_RELEASEVER=\"${RHEL_VERSION}\"\n    fi\n    return 0\n}\n\n# Resolve the kernel version to the form major.minor.patch-revision.\n_resolve_kernel_version() {\n    echo \"Resolving Linux kernel version...\"\n    local version=$(yum -q list available --showduplicates kernel-headers |\n      awk -v arch=$(uname -m) 'NR>1 {print $2\".\"arch}' | tac | grep -E -m1 \"^${KERNEL_VERSION/latest/.*}\")\n\n    if [ -z \"${version}\" ]; then\n        echo \"Could not resolve Linux kernel version\" >&2\n        return 1\n    fi\n    KERNEL_VERSION=\"${version}\"\n    echo \"Proceeding with Linux kernel version ${KERNEL_VERSION}\"\n    return 0\n}\n\n# Install the kernel modules header/builtin/order files and generate the kernel version string.\n_install_prerequisites() (\n    local tmp_dir=$(mktemp -d)\n\n    trap \"rm -rf ${tmp_dir}\" EXIT\n    cd ${tmp_dir}\n\n    echo \"Installing elfutils...\"\n    if ! dnf install -q -y elfutils-libelf.$DRIVER_ARCH; then\n        echo \"FATAL: failed to install elfutils packages. RHEL entitlement may be improperly deployed.\"\n        exit 1\n    fi\n    if ! dnf install -q -y elfutils-libelf-devel.$DRIVER_ARCH; then\n        echo \"FATAL: failed to install elfutils packages. RHEL entitlement may be improperly deployed.\"\n        exit 1\n    fi    \n\n    rm -rf /lib/modules/${KERNEL_VERSION}\n    mkdir -p /lib/modules/${KERNEL_VERSION}/proc\n\n    echo \"Enabling RHOCP and EUS RPM repos...\"\n    if [ -n \"${OPENSHIFT_VERSION:-}\" ]; then\n        dnf config-manager --set-enabled rhocp-${OPENSHIFT_VERSION}-for-rhel-9-$DRIVER_ARCH-rpms || true\n        if ! dnf makecache --releasever=${DNF_RELEASEVER}; then\n            dnf config-manager --set-disabled rhocp-${OPENSHIFT_VERSION}-for-rhel-9-$DRIVER_ARCH-rpms || true\n        fi\n    fi\n\n    dnf config-manager --set-enabled rhel-9-for-$DRIVER_ARCH-baseos-eus-rpms  || true\n    if ! dnf makecache --releasever=${DNF_RELEASEVER}; then\n            dnf config-manager --set-disabled rhel-9-for-$DRIVER_ARCH-baseos-eus-rpms || true\n    fi\n\n    # try with EUS disabled, if it does not work, then try just major version\n    if ! dnf makecache --releasever=${DNF_RELEASEVER}; then\n      # If pointing to DNF_RELEASEVER does not work, we point to the RHEL_MAJOR_VERSION as a last resort\n      if ! dnf makecache --releasever=${RHEL_MAJOR_VERSION}; then\n        echo \"FATAL: failed to update the dnf metadata cache after multiple attempts with releasevers ${DNF_RELEASEVER}, ${RHEL_MAJOR_VERSION}\"\n        exit 1\n      else\n        DNF_RELEASEVER=${RHEL_MAJOR_VERSION}\n      fi\n    fi\n\n    echo \"Installing Linux kernel headers...\"\n    dnf -q -y --releasever=${DNF_RELEASEVER} install kernel-headers-${KERNEL_VERSION} kernel-devel-${KERNEL_VERSION} --allowerasing > /dev/null\n    ln -s /usr/src/kernels/${KERNEL_VERSION} /lib/modules/${KERNEL_VERSION}/build\n\n    echo \"Installing Linux kernel module files...\"\n    dnf -q -y --releasever=${DNF_RELEASEVER} install kernel-core-${KERNEL_VERSION} > /dev/null\n\n    # Prevent depmod from giving a WARNING about missing files\n    touch /lib/modules/${KERNEL_VERSION}/modules.order\n    touch /lib/modules/${KERNEL_VERSION}/modules.builtin\n\n    depmod ${KERNEL_VERSION}\n\n    echo \"Generating Linux kernel version string...\"\n    if [ \"$TARGETARCH\" = \"arm64\" ]; then\n        gunzip -c /lib/modules/${KERNEL_VERSION}/vmlinuz | strings | grep -E '^Linux version' | sed 's/^\\(.*\\)\\s\\+(.*)$/\\1/' > version\n    else\n        extract-vmlinux /lib/modules/${KERNEL_VERSION}/vmlinuz | strings | grep -E '^Linux version' | sed 's/^\\(.*\\)\\s\\+(.*)$/\\1/' > version\n    fi\n    if [ -z \"$(<version)\" ]; then\n        echo \"Could not locate Linux kernel version string\" >&2\n        return 1\n    fi\n    mv version /lib/modules/${KERNEL_VERSION}/proc\n\n    # Parse gcc version\n    # gcc_version is expected to match x.y.z\n    # current_gcc is expected to match 'gcc-x.y.z-rel.el8.x86_64\n    local gcc_version=$(cat /lib/modules/${KERNEL_VERSION}/proc/version | grep -Eo \"gcc \\(GCC\\) ([0-9\\.]+)\" | grep -Eo \"([0-9\\.]+)\")\n    local current_gcc=$(rpm -qa gcc)\n    echo \"kernel requires gcc version: 'gcc-${gcc_version}', current gcc version is '${current_gcc}'\"\n\n    if ! [[ \"${current_gcc}\" =~ \"gcc-${gcc_version}\"-.* ]]; then\n        dnf install -q -y --releasever=${DNF_RELEASEVER} \"gcc-${gcc_version}\"\n    fi\n)\n\n# Cleanup the prerequisites installed above.\n_remove_prerequisites() {\n    true\n    if [ \"${PACKAGE_TAG:-}\" != \"builtin\" ]; then\n        dnf -q -y remove kernel-headers-${KERNEL_VERSION} kernel-devel-${KERNEL_VERSION} > /dev/null\n        # TODO remove module files not matching an existing driver package.\n    fi\n}\n\n# Check if the kernel version requires a new precompiled driver packages.\n_kernel_requires_package() {\n    local proc_mount_arg=\"\"\n\n    echo \"Checking NVIDIA driver packages...\"\n\n    [[ ! -d /usr/src/nvidia-${DRIVER_VERSION}/${KERNEL_TYPE} ]] && return 0\n    cd /usr/src/nvidia-${DRIVER_VERSION}/${KERNEL_TYPE}\n\n    proc_mount_arg=\"--proc-mount-point /lib/modules/${KERNEL_VERSION}/proc\"\n    for pkg_name in $(ls -d -1 precompiled/** 2> /dev/null); do\n        is_match=$(../mkprecompiled --match ${pkg_name} ${proc_mount_arg})\n        if [ \"${is_match}\" == \"kernel interface matches.\" ]; then\n            echo \"Found NVIDIA driver package ${pkg_name##*/}\"\n            return 1\n        fi\n    done\n    return 0\n}\n\n# Compile the kernel modules, optionally sign them, and generate a precompiled package for use by the nvidia-installer.\n_create_driver_package() (\n    local pkg_name=\"nvidia-modules-${KERNEL_VERSION%%-*}${PACKAGE_TAG:+-${PACKAGE_TAG}}\"\n    local nvidia_sign_args=\"\"\n    local nvidia_modeset_sign_args=\"\"\n    local nvidia_uvm_sign_args=\"\"\n\n    trap \"make -s -j ${MAX_THREADS} SYSSRC=/lib/modules/${KERNEL_VERSION}/build clean > /dev/null\" EXIT\n\n    echo \"Compiling NVIDIA driver kernel modules...\"\n    cd /usr/src/nvidia-${DRIVER_VERSION}/${KERNEL_TYPE}\n\n    if _gpu_direct_rdma_enabled; then\n        ln -s /run/mellanox/drivers/usr/src/ofa_kernel /usr/src/\n        # if arch directory exists(MOFED >=5.5) then create a symlink as expected by GPU driver installer\n        # This is required as currently GPU driver installer doesn't expect headers in x86_64 folder, but only in either default or kernel-version folder.\n        # ls -ltr /usr/src/ofa_kernel/\n        # lrwxrwxrwx 1 root root   36 Dec  8 20:10 default -> /etc/alternatives/ofa_kernel_headers\n        # drwxr-xr-x 4 root root 4096 Dec  8 20:14 x86_64\n        # lrwxrwxrwx 1 root root   44 Dec  9 19:05 5.4.0-90-generic -> /usr/src/ofa_kernel/x86_64/5.4.0-90-generic/\n        if [[ -d \"/run/mellanox/drivers/usr/src/ofa_kernel/$(uname -m)/$(uname -r)\" ]]; then\n            if [[ ! -e \"/usr/src/ofa_kernel/$(uname -r)\" ]]; then\n                ln -s \"/run/mellanox/drivers/usr/src/ofa_kernel/$(uname -m)/$(uname -r)\" /usr/src/ofa_kernel/\n            fi\n        fi\n    fi\n\n    make -s -j ${MAX_THREADS} SYSSRC=/lib/modules/${KERNEL_VERSION}/build nv-linux.o nv-modeset-linux.o > /dev/null\n\n    echo \"Relinking NVIDIA driver kernel modules...\"\n    rm -f nvidia.ko nvidia-modeset.ko\n    ld -d -r -o nvidia.ko ./nv-linux.o ./nvidia/nv-kernel.o_binary\n    ld -d -r -o nvidia-modeset.ko ./nv-modeset-linux.o ./nvidia-modeset/nv-modeset-kernel.o_binary\n\n    if [ -n \"${PRIVATE_KEY}\" ]; then\n        echo \"Signing NVIDIA driver kernel modules...\"\n        donkey get ${PRIVATE_KEY} sh -c \"PATH=${PATH}:/usr/src/linux-headers-${KERNEL_VERSION}/scripts && \\\n          sign-file sha512 \\$DONKEY_FILE pubkey.x509 nvidia.ko nvidia.ko.sign &&                          \\\n          sign-file sha512 \\$DONKEY_FILE pubkey.x509 nvidia-modeset.ko nvidia-modeset.ko.sign &&          \\\n          sign-file sha512 \\$DONKEY_FILE pubkey.x509 nvidia-uvm.ko\"\n        nvidia_sign_args=\"--linked-module nvidia.ko --signed-module nvidia.ko.sign\"\n        nvidia_modeset_sign_args=\"--linked-module nvidia-modeset.ko --signed-module nvidia-modeset.ko.sign\"\n        nvidia_uvm_sign_args=\"--signed\"\n    fi\n\n    echo \"Building NVIDIA driver package ${pkg_name}...\"\n    ../mkprecompiled --pack ${pkg_name} --description ${KERNEL_VERSION}                              \\\n                                        --proc-mount-point /lib/modules/${KERNEL_VERSION}/proc       \\\n                                        --driver-version ${DRIVER_VERSION}                           \\\n                                        --kernel-interface nv-linux.o                                \\\n                                        --linked-module-name nvidia.ko                               \\\n                                        --core-object-name nvidia/nv-kernel.o_binary                 \\\n                                        ${nvidia_sign_args}                                          \\\n                                        --target-directory .                                         \\\n                                        --kernel-interface nv-modeset-linux.o                        \\\n                                        --linked-module-name nvidia-modeset.ko                       \\\n                                        --core-object-name nvidia-modeset/nv-modeset-kernel.o_binary \\\n                                        ${nvidia_modeset_sign_args}                                  \\\n                                        --target-directory .                                         \\\n                                        --kernel-module nvidia-uvm.ko                                \\\n                                        ${nvidia_uvm_sign_args}                                      \\\n                                        --target-directory .\n    mkdir -p precompiled\n    mv ${pkg_name} precompiled\n)\n\n_assert_nvswitch_system() {\n    [ -d /proc/driver/nvidia-nvswitch ] || return 1\n    entries=$(ls -1 /proc/driver/nvidia-nvswitch/devices/*)\n    if [ -z \"${entries}\" ]; then\n        return 1\n    fi\n    return 0\n}\n\n# For each kernel module configuration file mounted into the container,\n# parse the file contents and extract the custom module parameters that\n# are to be passed as input to 'modprobe'.\n#\n# Assumptions:\n# - Configuration files are named <module-name>.conf (i.e. nvidia.conf, nvidia-uvm.conf).\n# - Configuration files are mounted inside the container at /drivers.\n# - Each line in the file contains at least one parameter, where parameters on the same line\n#   are space delimited. It is up to the user to properly format the file to ensure\n#   the correct set of parameters are passed to 'modprobe'.\n_get_module_params() {\n    local base_path=\"/drivers\"\n    # nvidia\n    if [ -f \"${base_path}/nvidia.conf\" ]; then\n       while IFS=\"\" read -r param || [ -n \"$param\" ]; do\n           NVIDIA_MODULE_PARAMS+=(\"$param\")\n       done <\"${base_path}/nvidia.conf\"\n       echo \"Module parameters provided for nvidia: ${NVIDIA_MODULE_PARAMS[@]}\"\n    fi\n    # nvidia-uvm\n    if [ -f \"${base_path}/nvidia-uvm.conf\" ]; then\n       while IFS=\"\" read -r param || [ -n \"$param\" ]; do\n           NVIDIA_UVM_MODULE_PARAMS+=(\"$param\")\n       done <\"${base_path}/nvidia-uvm.conf\"\n       echo \"Module parameters provided for nvidia-uvm: ${NVIDIA_UVM_MODULE_PARAMS[@]}\"\n    fi\n    # nvidia-modeset\n    if [ -f \"${base_path}/nvidia-modeset.conf\" ]; then\n       while IFS=\"\" read -r param || [ -n \"$param\" ]; do\n           NVIDIA_MODESET_MODULE_PARAMS+=(\"$param\")\n       done <\"${base_path}/nvidia-modeset.conf\"\n       echo \"Module parameters provided for nvidia-modeset: ${NVIDIA_MODESET_MODULE_PARAMS[@]}\"\n    fi\n    # nvidia-peermem\n    if [ -f \"${base_path}/nvidia-peermem.conf\" ]; then\n       while IFS=\"\" read -r param || [ -n \"$param\" ]; do\n           NVIDIA_PEERMEM_MODULE_PARAMS+=(\"$param\")\n       done <\"${base_path}/nvidia-peermem.conf\"\n       echo \"Module parameters provided for nvidia-peermem: ${NVIDIA_PEERMEM_MODULE_PARAMS[@]}\"\n    fi\n}\n\n# Load the kernel modules and start persistenced.\n_load_driver() {\n    echo \"Parsing kernel module parameters...\"\n    _get_module_params\n\n    local nv_fw_search_path=\"$RUN_DIR/driver/lib/firmware\"\n    local set_fw_path=\"true\"\n    local fw_path_config_file=\"/sys/module/firmware_class/parameters/path\"\n    for param in \"${NVIDIA_MODULE_PARAMS[@]}\"; do\n        if [[ \"$param\" == \"NVreg_EnableGpuFirmware=0\" ]]; then\n          set_fw_path=\"false\"\n        fi\n    done\n\n    if [[ \"$set_fw_path\" == \"true\" ]]; then\n        echo \"Configuring the following firmware search path in '$fw_path_config_file': $nv_fw_search_path\"\n        if [[ ! -z $(grep '[^[:space:]]' $fw_path_config_file) ]]; then\n            echo \"WARNING: A search path is already configured in $fw_path_config_file\"\n            echo \"         Retaining the current configuration\"\n        else\n            echo -n \"$nv_fw_search_path\" > $fw_path_config_file || echo \"WARNING: Failed to configure the firmware search path\"\n        fi\n    fi\n\n    echo \"Loading ipmi and i2c_core kernel modules...\"\n    modprobe -a i2c_core ipmi_msghandler ipmi_devintf\n\n    echo \"Loading NVIDIA driver kernel modules...\"\n    set -o xtrace +o nounset\n    modprobe nvidia \"${NVIDIA_MODULE_PARAMS[@]}\"\n    modprobe nvidia-uvm \"${NVIDIA_UVM_MODULE_PARAMS[@]}\"\n    modprobe nvidia-modeset \"${NVIDIA_MODESET_MODULE_PARAMS[@]}\"\n    set +o xtrace -o nounset\n\n    if _gpu_direct_rdma_enabled; then\n        echo \"Loading NVIDIA Peer Memory kernel module...\"\n        set -o xtrace +o nounset\n        modprobe -a nvidia-peermem \"${NVIDIA_PEERMEM_MODULE_PARAMS[@]}\"\n        set +o xtrace -o nounset\n    fi\n\n    echo \"Starting NVIDIA persistence daemon...\"\n    nvidia-persistenced --persistence-mode\n\n    if [ \"${DRIVER_TYPE}\" = \"vgpu\" ]; then\n        echo \"Copying gridd.conf...\"\n        cp /drivers/gridd.conf /etc/nvidia/gridd.conf\n        if [ \"${VGPU_LICENSE_SERVER_TYPE}\" = \"NLS\" ]; then\n            echo \"Copying ClientConfigToken...\"\n            mkdir -p  /etc/nvidia/ClientConfigToken/\n            cp /drivers/ClientConfigToken/* /etc/nvidia/ClientConfigToken/\n        fi\n\n        echo \"Starting nvidia-gridd..\"\n        LD_LIBRARY_PATH=/usr/lib64/nvidia/gridd nvidia-gridd\n\n        # Start virtual topology daemon\n        _start_vgpu_topology_daemon\n    fi\n\n    if _assert_nvswitch_system; then\n        echo \"Starting NVIDIA fabric manager daemon...\"\n        nv-fabricmanager -c /usr/share/nvidia/nvswitch/fabricmanager.cfg\n    fi\n}\n\n# Stop persistenced and unload the kernel modules if they are currently loaded.\n_unload_driver() {\n    local rmmod_args=()\n    local nvidia_deps=0\n    local nvidia_refs=0\n    local nvidia_uvm_refs=0\n    local nvidia_modeset_refs=0\n    local nvidia_peermem_refs=0\n\n    echo \"Stopping NVIDIA persistence daemon...\"\n    if [ -f /var/run/nvidia-persistenced/nvidia-persistenced.pid ]; then\n        local pid=$(< /var/run/nvidia-persistenced/nvidia-persistenced.pid)\n\n        kill -SIGTERM \"${pid}\"\n        for i in $(seq 1 50); do\n            kill -0 \"${pid}\" 2> /dev/null || break\n            sleep 0.1\n        done\n        if [ $i -eq 50 ]; then\n            echo \"Could not stop NVIDIA persistence daemon\" >&2\n            return 1\n        fi\n    fi\n\n    if [ -f /var/run/nvidia-gridd/nvidia-gridd.pid ]; then\n        echo \"Stopping NVIDIA grid daemon...\"\n        local pid=$(< /var/run/nvidia-gridd/nvidia-gridd.pid)\n\n        kill -SIGTERM \"${pid}\"\n        for i in $(seq 1 10); do\n            kill -0 \"${pid}\" 2> /dev/null || break\n            sleep 0.1\n        done\n        if [ $i -eq 10 ]; then\n            echo \"Could not stop NVIDIA Grid daemon\" >&2\n            return 1\n        fi\n    fi\n\n    if [ -f /var/run/nvidia-fabricmanager/nv-fabricmanager.pid ]; then\n        echo \"Stopping NVIDIA fabric manager daemon...\"\n        local pid=$(< /var/run/nvidia-fabricmanager/nv-fabricmanager.pid)\n\n        kill -SIGTERM \"${pid}\"\n        for i in $(seq 1 50); do\n            kill -0 \"${pid}\" 2> /dev/null || break\n            sleep 0.1\n        done\n        if [ $i -eq 50 ]; then\n            echo \"Could not stop NVIDIA fabric manager daemon\" >&2\n            return 1\n        fi\n    fi\n\n    echo \"Unloading NVIDIA driver kernel modules...\"\n    if [ -f /sys/module/nvidia_modeset/refcnt ]; then\n        nvidia_modeset_refs=$(< /sys/module/nvidia_modeset/refcnt)\n        rmmod_args+=(\"nvidia-modeset\")\n        ((++nvidia_deps))\n    fi\n    if [ -f /sys/module/nvidia_uvm/refcnt ]; then\n        nvidia_uvm_refs=$(< /sys/module/nvidia_uvm/refcnt)\n        rmmod_args+=(\"nvidia-uvm\")\n        ((++nvidia_deps))\n    fi\n    if [ -f /sys/module/nvidia/refcnt ]; then\n        nvidia_refs=$(< /sys/module/nvidia/refcnt)\n        rmmod_args+=(\"nvidia\")\n    fi\n    if [ -f /sys/module/nvidia_peermem/refcnt ]; then\n        nvidia_peermem_refs=$(< /sys/module/nvidia_peermem/refcnt)\n        rmmod_args+=(\"nvidia-peermem\")\n        ((++nvidia_deps))\n    fi\n    if [ ${nvidia_refs} -gt ${nvidia_deps} ] || [ ${nvidia_uvm_refs} -gt 0 ] || [ ${nvidia_modeset_refs} -gt 0 ] || [ ${nvidia_peermem_refs} -gt 0 ]; then\n        echo \"Could not unload NVIDIA driver kernel modules, driver is in use\" >&2\n        return 1\n    fi\n\n    if [ ${#rmmod_args[@]} -gt 0 ]; then\n        rmmod ${rmmod_args[@]}\n    fi\n    return 0\n}\n\n# Link and install the kernel modules from a precompiled package using the nvidia-installer.\n_install_driver() {\n    local install_args=()\n\n    echo \"Installing NVIDIA driver kernel modules...\"\n    cd /usr/src/nvidia-${DRIVER_VERSION}\n    rm -rf /lib/modules/${KERNEL_VERSION}/video\n\n    if [ \"${ACCEPT_LICENSE}\" = \"yes\" ]; then\n        install_args+=(\"--accept-license\")\n    fi\n    IGNORE_CC_MISMATCH=1 nvidia-installer --kernel-module-only --no-drm --ui=none --no-nouveau-check -m=${KERNEL_TYPE} ${install_args[@]+\"${install_args[@]}\"}\n    # May need to add no-cc-check for Rhel, otherwise it complains about cc missing in path\n    # /proc/version and lib/modules/KERNEL_VERSION/proc are different, by default installer looks at /proc/ so, added the proc-mount-point\n    # TODO: remove the -a flag. its not needed. in the new driver version, license-acceptance is implicit\n    #nvidia-installer --kernel-module-only --no-drm --ui=none --no-nouveau-check --no-cc-version-check --proc-mount-point /lib/modules/${KERNEL_VERSION}/proc ${install_args[@]+\"${install_args[@]}\"}\n}\n\n# Mount the driver rootfs into the run directory with the exception of sysfs.\n_mount_rootfs() {\n    echo \"Mounting NVIDIA driver rootfs...\"\n    mount --make-runbindable /sys\n    mount --make-private /sys\n    mkdir -p ${RUN_DIR}/driver\n    mount --rbind / ${RUN_DIR}/driver\n\n    echo \"Check SELinux status\"\n    if [ -e /sys/fs/selinux ]; then\n        echo \"SELinux is enabled\"\n        echo \"Change device files security context for selinux compatibility\"\n        chcon -R -t container_file_t ${RUN_DIR}/driver/dev\n    else\n        echo \"SELinux is disabled, skipping...\"\n    fi\n}\n\n# Unmount the driver rootfs from the run directory.\n_unmount_rootfs() {\n    echo \"Unmounting NVIDIA driver rootfs...\"\n    if findmnt -r -o TARGET | grep \"${RUN_DIR}/driver\" > /dev/null; then\n        umount -l -R ${RUN_DIR}/driver\n    fi\n}\n\n# Write a kernel postinst.d script to automatically precompile packages on kernel update (similar to DKMS).\n_write_kernel_update_hook() {\n    if [ ! -d ${KERNEL_UPDATE_HOOK%/*} ]; then\n        return\n    fi\n\n    echo \"Writing kernel update hook...\"\n    cat > ${KERNEL_UPDATE_HOOK} <<'EOF'\n#!/bin/bash\n\nset -eu\ntrap 'echo \"ERROR: Failed to update the NVIDIA driver\" >&2; exit 0' ERR\n\nNVIDIA_DRIVER_PID=$(< /run/nvidia/nvidia-driver.pid)\n\nexport \"$(grep -z DRIVER_VERSION /proc/${NVIDIA_DRIVER_PID}/environ)\"\nnsenter -t \"${NVIDIA_DRIVER_PID}\" -m -- nvidia-driver update --kernel \"$1\"\nEOF\n    chmod +x ${KERNEL_UPDATE_HOOK}\n}\n\n_shutdown() {\n    if _unload_driver; then\n        _unmount_rootfs\n        rm -f ${PID_FILE} ${KERNEL_UPDATE_HOOK}\n        return 0\n    fi\n    return 1\n}\n\n_find_vgpu_driver_version() {\n    local count=\"\"\n    local version=\"\"\n    local drivers_path=\"/drivers\"\n\n    if [ \"${DISABLE_VGPU_VERSION_CHECK}\" = \"true\" ]; then\n        echo \"vgpu version compatibility check is disabled\"\n        return 0\n    fi\n    # check if vgpu devices are present\n    count=$(vgpu-util count)\n    if [ $? -ne 0 ]; then\n         echo \"cannot find vgpu devices on host, pleae check /var/log/vgpu-util.log for more details...\"\n         return 0\n    fi\n    NUM_VGPU_DEVICES=$(echo \"$count\" | awk -F= '{print $2}')\n    if [ $NUM_VGPU_DEVICES -eq 0 ]; then\n        # no vgpu devices found, treat as passthrough\n        return 0\n    fi\n    echo \"found $NUM_VGPU_DEVICES vgpu devices on host\"\n\n    # find compatible guest driver using driver catalog\n    if [ -d \"/mnt/shared-nvidia-driver-toolkit/drivers\" ]; then\n        drivers_path=\"/mnt/shared-nvidia-driver-toolkit/drivers\"\n    fi\n    version=$(vgpu-util match -i \"${drivers_path}\" -c \"${drivers_path}/vgpuDriverCatalog.yaml\")\n    if [ $? -ne 0 ]; then\n        echo \"cannot find match for compatible vgpu driver from available list, please check /var/log/vgpu-util.log for more details...\"\n        return 1\n    fi\n    DRIVER_VERSION=$(echo \"$version\" | awk -F= '{print $2}')\n    echo \"vgpu driver version selected: ${DRIVER_VERSION}\"\n    return 0\n}\n\n_start_vgpu_topology_daemon() {\n    type nvidia-topologyd > /dev/null 2>&1 || return 0\n    echo \"Starting nvidia-topologyd..\"\n    nvidia-topologyd\n}\n\n_prepare() {\n    if [ \"${DRIVER_TYPE}\" = \"vgpu\" ]; then\n        _find_vgpu_driver_version || exit 1\n    fi\n\n    # Install the userspace components and copy the kernel module sources.\n    sh NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION.run -x && \\\n        cd NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION && \\\n        sh /tmp/install.sh nvinstall && \\\n        mkdir -p /usr/src/nvidia-$DRIVER_VERSION && \\\n        mv LICENSE mkprecompiled ${KERNEL_TYPE} /usr/src/nvidia-$DRIVER_VERSION && \\\n        sed '9,${/^\\(kernel\\|LICENSE\\)/!d}' .manifest > /usr/src/nvidia-$DRIVER_VERSION/.manifest\n\n    echo -e \"\\n========== NVIDIA Software Installer ==========\\n\"\n    echo -e \"Starting installation of NVIDIA driver version ${DRIVER_VERSION} for Linux kernel version ${KERNEL_VERSION}\\n\"\n}\n\n_prepare_exclusive() {\n    _prepare\n\n    exec 3> ${PID_FILE}\n    if ! flock -n 3; then\n        echo \"An instance of the NVIDIA driver is already running, aborting\"\n        exit 1\n    fi\n    echo $$ >&3\n\n    trap \"echo 'Caught signal'; exit 1\" HUP INT QUIT PIPE TERM\n    trap \"_shutdown\" EXIT\n\n    _unload_driver || exit 1\n    _unmount_rootfs\n}\n\n_build() {\n    # Install dependencies\n    if _kernel_requires_package; then\n        _update_package_cache\n        _install_prerequisites\n        _create_driver_package\n        #_remove_prerequisites\n        _cleanup_package_cache\n    fi\n\n    # Build the driver\n    _install_driver\n}\n\n_load() {\n    _load_driver\n    _mount_rootfs\n    _write_kernel_update_hook\n\n    echo \"Done, now waiting for signal\"\n    sleep infinity &\n    trap \"echo 'Caught signal'; _shutdown && { kill $!; exit 0; }\" HUP INT QUIT PIPE TERM\n    trap - EXIT\n    while true; do wait $! || continue; done\n    exit 0\n}\n\ninit() {\n    _prepare_exclusive\n\n    _build\n\n    _load\n}\n\nbuild() {\n    _prepare\n\n    _build\n}\n\nload() {\n    _prepare_exclusive\n\n    _load\n}\n\nupdate() {\n    exec 3>&2\n    if exec 2> /dev/null 4< ${PID_FILE}; then\n        if ! flock -n 4 && read pid <&4 && kill -0 \"${pid}\"; then\n            exec > >(tee -a \"/proc/${pid}/fd/1\")\n            exec 2> >(tee -a \"/proc/${pid}/fd/2\" >&3)\n        else\n            exec 2>&3\n        fi\n        exec 4>&-\n    fi\n    exec 3>&-\n\n    # vgpu driver version is chosen dynamically during runtime, so pre-compile modules for\n    # only non-vgpu driver types\n    if [ \"${DRIVER_TYPE}\" != \"vgpu\" ]; then\n        # Install the userspace components and copy the kernel module sources.\n        if [ ! -e /usr/src/nvidia-${DRIVER_VERSION}/mkprecompiled ]; then\n            sh NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION.run -x && \\\n                cd NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION && \\\n                sh /tmp/install.sh nvinstall && \\\n                mkdir -p /usr/src/nvidia-$DRIVER_VERSION && \\\n                mv LICENSE mkprecompiled ${KERNEL_TYPE} /usr/src/nvidia-$DRIVER_VERSION && \\\n                sed '9,${/^\\(kernel\\|LICENSE\\)/!d}' .manifest > /usr/src/nvidia-$DRIVER_VERSION/.manifest\n        fi\n    fi\n\n    echo -e \"\\n========== NVIDIA Software Updater ==========\\n\"\n    echo -e \"Starting update of NVIDIA driver version ${DRIVER_VERSION} for Linux kernel version ${KERNEL_VERSION}\\n\"\n\n    trap \"echo 'Caught signal'; exit 1\" HUP INT QUIT PIPE TERM\n\n    _update_package_cache\n    _resolve_kernel_version || exit 1\n    _install_prerequisites\n    if _kernel_requires_package; then\n        _create_driver_package\n    fi\n    _remove_prerequisites\n    _cleanup_package_cache\n\n    echo \"Done\"\n    exit 0\n}\n\n# Wait for MOFED drivers to be loaded and load nvidia-peermem whenever it gets unloaded during MOFED driver updates\nreload_nvidia_peermem() {\n    if [ \"$USE_HOST_MOFED\" = \"true\" ]; then\n        until  lsmod | grep mlx5_core > /dev/null 2>&1 && [ -f /run/nvidia/validations/.driver-ctr-ready ];\n        do\n            echo \"waiting for mellanox ofed and nvidia drivers to be installed\"\n            sleep 10\n        done\n    else\n        # use driver readiness flag created by MOFED container\n        until  [ -f /run/mellanox/drivers/.driver-ready ] && [ -f /run/nvidia/validations/.driver-ctr-ready ];\n        do\n            echo \"waiting for mellanox ofed and nvidia drivers to be installed\"\n            sleep 10\n        done\n    fi\n    # get any parameters provided for nvidia-peermem\n    _get_module_params && set +o nounset\n    if chroot /run/nvidia/driver modprobe nvidia-peermem \"${NVIDIA_PEERMEM_MODULE_PARAMS[@]}\"; then\n        if [ -f /sys/module/nvidia_peermem/refcnt ]; then\n            echo \"successfully loaded nvidia-peermem module, now waiting for signal\"\n            sleep inf\n            trap \"echo 'Caught signal'; exit 1\" HUP INT QUIT PIPE TERM\n        fi\n    fi\n    echo \"failed to load nvidia-peermem module\"\n    exit 1\n}\n\n# probe by gpu-operator for liveness/startup checks for nvidia-peermem module to be loaded when MOFED drivers are ready\nprobe_nvidia_peermem() {\n    if lsmod | grep mlx5_core > /dev/null 2>&1; then\n        if [ ! -f /sys/module/nvidia_peermem/refcnt ]; then\n            echo \"nvidia-peermem module is not loaded\"\n            return 1\n        fi\n    else\n        echo \"MOFED drivers are not ready, skipping probe to avoid container restarts...\"\n    fi\n    return 0\n}\n\nusage() {\n    cat >&2 <<EOF\nUsage: $0 COMMAND [ARG...]\n\nCommands:\n  init   [-a | --accept-license] [-m | --max-threads MAX_THREADS]\n  build  [-a | --accept-license] [-m | --max-threads MAX_THREADS]\n  load\n  update [-k | --kernel VERSION] [-s | --sign KEYID] [-t | --tag TAG] [-m | --max-threads MAX_THREADS]\nEOF\n    exit 1\n}\n\nif [ $# -eq 0 ]; then\n    usage\nfi\ncommand=$1; shift\ncase \"${command}\" in\n    init) options=$(getopt -l accept-license,max-threads: -o am: -- \"$@\") ;;\n    build) options=$(getopt -l accept-license,tag:,max-threads: -o a:t:m: -- \"$@\") ;;\n    load) options=\"\" ;;\n    update) options=$(getopt -l kernel:,sign:,tag:,max-threads: -o k:s:t:m: -- \"$@\") ;;\n    reload_nvidia_peermem) options=\"\" ;;\n    probe_nvidia_peermem) options=\"\" ;;\n    *) usage ;;\nesac\nif [ $? -ne 0 ]; then\n    usage\nfi\neval set -- \"${options}\"\n\nACCEPT_LICENSE=\"\"\nMAX_THREADS=\"\"\nKERNEL_VERSION=$(uname -r)\nPRIVATE_KEY=\"\"\nPACKAGE_TAG=\"\"\n\nfor opt in ${options}; do\n    case \"$opt\" in\n    -a | --accept-license) ACCEPT_LICENSE=\"yes\"; shift 1 ;;\n    -k | --kernel) KERNEL_VERSION=$2; shift 2 ;;\n    -m | --max-threads) MAX_THREADS=$2; shift 2 ;;\n    -s | --sign) PRIVATE_KEY=$2; shift 2 ;;\n    -t | --tag) PACKAGE_TAG=$2; shift 2 ;;\n    --) shift; break ;;\n    esac\ndone\nif [ $# -ne 0 ]; then\n    usage\nfi\n\n_resolve_rhel_version || exit 1\n\n$command\n

                                  \u4f7f\u7528\u5b98\u65b9\u7684\u955c\u50cf\u6765\u4e8c\u6b21\u6784\u5efa\u81ea\u5b9a\u4e49\u955c\u50cf\uff0c\u5982\u4e0b\u662f\u4e00\u4e2a Dockerfile \u6587\u4ef6\u7684\u5185\u5bb9\uff1a

                                  FROM nvcr.io/nvidia/driver:535.183.06-rhel9.2\nCOPY nvidia-driver /usr/local/bin\nRUN chmod +x /usr/local/bin/nvidia-driver\nCMD [\"/bin/bash\", \"-c\"]\n

                                  \u6784\u5efa\u547d\u4ee4\u5e76\u63a8\u9001\u5230\u706b\u79cd\u96c6\u7fa4\uff1a

                                  docker build -t {\u706b\u79cdregistry}/nvcr.m.daocloud.io/nvidia/driver:535.183.06-01-rhel9.2 -f Dockerfile .\ndocker push {\u706b\u79cdregistry}/nvcr.m.daocloud.io/nvidia/driver:535.183.06-01-rhel9.2\n
                                  "},{"location":"end-user/kpanda/gpu/nvidia/rhel9.2_offline_install_driver.html#_2","title":"\u5b89\u88c5\u9a71\u52a8","text":"
                                  1. \u5b89\u88c5 gpu-operator addon
                                  2. \u8bbe\u7f6e driver.version=535.183.06-01
                                  "},{"location":"end-user/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html","title":"Ubuntu22.04 \u79bb\u7ebf\u5b89\u88c5 gpu-operator \u9a71\u52a8","text":"

                                  \u524d\u63d0\u6761\u4ef6\uff1a\u5df2\u5b89\u88c5 gpu-operator v23.9.0+2 \u53ca\u66f4\u9ad8\u7248\u672c

                                  "},{"location":"end-user/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html#_1","title":"\u51c6\u5907\u79bb\u7ebf\u955c\u50cf","text":"
                                  1. \u67e5\u770b\u5185\u6838\u7248\u672c

                                    $ uname -r\n5.15.0-78-generic\n
                                  2. \u67e5\u770b\u5185\u6838\u5bf9\u5e94\u7684 GPU Driver \u955c\u50cf\u7248\u672c\uff0c https://catalog.ngc.nvidia.com/orgs/nvidia/containers/driver/tags\u3002 \u4f7f\u7528\u5185\u6838\u67e5\u8be2\u955c\u50cf\u7248\u672c\uff0c\u901a\u8fc7 ctr export \u4fdd\u5b58\u955c\u50cf\u3002

                                    ctr i pull nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04\nctr i export --all-platforms driver.tar.gz nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 \n
                                  3. \u628a\u955c\u50cf\u5bfc\u5165\u5230\u706b\u79cd\u96c6\u7fa4\u7684\u955c\u50cf\u4ed3\u5e93\u4e2d

                                    ctr i import driver.tar.gz\nctr i tag nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 {\u706b\u79cdregistry}/nvcr.m.daocloud.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04\nctr i push {\u706b\u79cdregistry}/nvcr.m.daocloud.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 --skip-verify=true\n
                                  "},{"location":"end-user/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html#_2","title":"\u5b89\u88c5\u9a71\u52a8","text":"
                                  1. \u5b89\u88c5 gpu-operator addon
                                  2. \u82e5\u4f7f\u7528\u9884\u7f16\u8bd1\u6a21\u5f0f\uff0c\u5219\u8bbe\u7f6e driver.usePrecompiled=true,\u5e76\u8bbe\u7f6e driver.version=535\uff0c\u8fd9\u91cc\u8981\u6ce8\u610f\uff0c\u5199\u7684\u662f 535\uff0c\u4e0d\u662f 535.104.12\u3002\uff08\u975e\u9884\u7f16\u8bd1\u6a21\u5f0f\u8df3\u8fc7\u6b64\u6b65\uff0c\u76f4\u63a5\u5b89\u88c5\u5373\u53ef\uff09
                                  "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html","title":"\u6784\u5efa CentOS 7.9 \u79bb\u7ebf yum \u6e90","text":""},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#_1","title":"\u4f7f\u7528\u573a\u666f\u4ecb\u7ecd","text":"

                                  \u5f53\u5de5\u4f5c\u8282\u70b9\u7684\u5185\u6838\u7248\u672c\u4e0e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u5185\u6838\u7248\u672c\u6216 OS \u7c7b\u578b\u4e0d\u4e00\u81f4\u65f6\uff0c\u9700\u8981\u7528\u6237\u624b\u52a8\u6784\u5efa\u79bb\u7ebf yum \u6e90\u3002

                                  \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u6784\u5efa\u79bb\u7ebf yum \u6e90\uff0c \u5e76\u5728\u5b89\u88c5 Gpu Operator \u65f6\uff0c\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  1. \u7528\u6237\u5df2\u7ecf\u5728\u5e73\u53f0\u4e0a\u5b89\u88c5\u4e86 v0.12.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u7684 addon \u79bb\u7ebf\u5305\u3002
                                  2. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u548c\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u7f51\u7edc\u80fd\u591f\u8054\u901a\u7684\u6587\u4ef6\u670d\u52a1\u5668\uff0c\u5982 nginx \u6216 minio\u3002
                                  3. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u3001\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\uff0c \u4e14\u8282\u70b9\u4e0a\u5df2\u7ecf\u5b8c\u6210 Docker \u7684\u5b89\u88c5\u3002
                                  "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                  \u672c\u6587\u4ee5\u5185\u6838\u7248\u672c\u4e3a 3.10.0-1160.95.1.el7.x86_64 \u7684 CentOS 7.9 \u8282\u70b9\u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u6784\u5efa GPU operator \u79bb\u7ebf\u5305\u7684 yum \u6e90\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#os","title":"\u68c0\u67e5\u96c6\u7fa4\u8282\u70b9\u7684 OS \u548c\u5185\u6838\u7248\u672c","text":"

                                  \u5206\u522b\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u548c\u5f85\u90e8\u7f72 GPU Operator \u7684\u8282\u70b9\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u82e5\u4e24\u4e2a\u8282\u70b9\u7684 OS \u548c\u5185\u6838\u7248\u672c\u4e00\u81f4\u5219\u65e0\u9700\u6784\u5efa yum \u6e90\uff0c \u53ef\u53c2\u8003\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u6587\u6863\u76f4\u63a5\u5b89\u88c5\uff1b\u82e5\u4e24\u4e2a\u8282\u70b9\u7684 OS \u6216\u5185\u6838\u7248\u672c\u4e0d\u4e00\u81f4\uff0c\u8bf7\u6267\u884c\u4e0b\u4e00\u6b65\u3002

                                  1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u67e5\u770b\u96c6\u7fa4\u4e0b\u5f85\u90e8\u7f72 GPU Operator \u8282\u70b9\u7684\u53d1\u884c\u7248\u540d\u79f0\u548c\u7248\u672c\u53f7\u3002

                                    cat /etc/redhat-release\n

                                    \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                    CentOS Linux release 7.9 (Core)\n

                                    \u8f93\u51fa\u7ed3\u679c\u4e3a\u5f53\u524d\u8282\u70b9\u5185\u6838\u7248\u672c CentOS 7.9 \u3002

                                  2. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u67e5\u770b\u96c6\u7fa4\u4e0b\u5f85\u90e8\u7f72 GPU Operator \u8282\u70b9\u7684\u5185\u6838\u7248\u672c\u3002

                                    uname -a\n

                                    \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                    Linux localhost.localdomain 3.10.0-1160.95.1.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux\n

                                    \u8f93\u51fa\u7ed3\u679c\u4e3a\u5f53\u524d\u8282\u70b9\u5185\u6838\u7248\u672c 3.10.0-1160.el7.x86_64\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#yum","title":"\u5236\u4f5c\u79bb\u7ebf yum \u6e90","text":"

                                  \u5728\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\u4e0a\u8fdb\u884c\u64cd\u4f5c\u3002

                                  1. \u5728\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\u4e0a\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a yum.sh \u7684\u811a\u672c\u6587\u4ef6\u3002

                                    vi yum.sh\n

                                    \u7136\u540e\u6309\u4e0b i \u952e\u8fdb\u5165\u63d2\u5165\u6a21\u5f0f\uff0c\u8f93\u5165\u4ee5\u4e0b\u5185\u5bb9\uff1a

                                    export TARGET_KERNEL_VERSION=$1\n\ncat >> run.sh << \\EOF\n#! /bin/bash\necho \"start install kernel repo\"\necho ${KERNEL_VERSION}\nmkdir centos-base\n\nif [ \"$OS\" -eq 7 ]; then\n    yum install --downloadonly --downloaddir=./centos-base perl\n    yum install --downloadonly --downloaddir=./centos-base elfutils-libelf.x86_64\n    yum install --downloadonly --downloaddir=./redhat-base elfutils-libelf-devel.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-headers-${KERNEL_VERSION}.el7.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-devel-${KERNEL_VERSION}.el7.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-${KERNEL_VERSION}.el7.x86_64\n    yum install  -y --downloadonly --downloaddir=./centos-base groff-base\nelif [ \"$OS\" -eq 8 ]; then\n    yum install --downloadonly --downloaddir=./centos-base perl\n    yum install --downloadonly --downloaddir=./centos-base elfutils-libelf.x86_64\n    yum install --downloadonly --downloaddir=./redhat-base elfutils-libelf-devel.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-headers-${KERNEL_VERSION}.el8.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-devel-${KERNEL_VERSION}.el8.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-${KERNEL_VERSION}.el8.x86_64\n    yum install  -y --downloadonly --downloaddir=./centos-base groff-base\nelse\n    echo \"Error os version\"\nfi\n\ncreaterepo centos-base/\nls -lh centos-base/\ntar -zcf centos-base.tar.gz centos-base/\necho \"end install kernel repo\"\nEOF\n\ncat >> Dockerfile << EOF\nFROM centos:7\nENV KERNEL_VERSION=\"\"\nENV OS=7\nRUN yum install -y createrepo\nCOPY run.sh .\nENTRYPOINT [\"/bin/bash\",\"run.sh\"]\nEOF\n\ndocker build -t test:v1 -f Dockerfile .\ndocker run -e KERNEL_VERSION=$TARGET_KERNEL_VERSION --name centos7.9 test:v1\ndocker cp centos7.9:/centos-base.tar.gz .\ntar -xzf centos-base.tar.gz\n

                                    \u6309\u4e0b esc \u952e\u9000\u51fa\u63d2\u5165\u6a21\u5f0f\uff0c\u7136\u540e\u8f93\u5165 __ :wq__ \u4fdd\u5b58\u5e76\u9000\u51fa\u3002

                                  2. \u8fd0\u884c yum.sh \u6587\u4ef6\uff1a

                                    bash -x yum.sh TARGET_KERNEL_VERSION\n

                                    TARGET_KERNEL_VERSION \u53c2\u6570\u7528\u4e8e\u6307\u5b9a\u96c6\u7fa4\u8282\u70b9\u7684\u5185\u6838\u7248\u672c\uff0c\u6ce8\u610f\uff1a\u53d1\u884c\u7248\u6807\u8bc6\u7b26\uff08\u5982 __ .el7.x86_64 __ \uff09\u65e0\u9700\u8f93\u5165\u3002 \u4f8b\u5982\uff1a

                                    bash -x yum.sh 3.10.0-1160.95.1\n

                                  \u81f3\u6b64\uff0c\u60a8\u5df2\u7ecf\u751f\u6210\u4e86\u5185\u6838\u4e3a 3.10.0-1160.95.1.el7.x86_64 \u7684\u79bb\u7ebf\u7684 yum \u6e90\uff1a centos-base \u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#yum_1","title":"\u4e0a\u4f20\u79bb\u7ebf yum \u6e90\u5230\u6587\u4ef6\u670d\u52a1\u5668","text":"

                                  \u5728\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\u4e0a\u8fdb\u884c\u64cd\u4f5c\u3002\u4e3b\u8981\u7528\u4e8e\u5c06\u4e0a\u4e00\u6b65\u4e2d\u751f\u6210\u7684 yum \u6e90\u4e0a\u4f20\u5230\u53ef\u4ee5\u88ab\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u8fdb\u884c\u8bbf\u95ee\u7684\u6587\u4ef6\u670d\u52a1\u5668\u4e2d\u3002 \u6587\u4ef6\u670d\u52a1\u5668\u53ef\u4ee5\u4e3a Nginx \u3001 Minio \u6216\u5176\u5b83\u652f\u6301 Http \u534f\u8bae\u7684\u6587\u4ef6\u670d\u52a1\u5668\u3002

                                  \u672c\u64cd\u4f5c\u793a\u4f8b\u91c7\u7528\u7684\u662f\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Minio \u4f5c\u4e3a\u6587\u4ef6\u670d\u52a1\u5668\uff0cMinio \u76f8\u5173\u4fe1\u606f\u5982\u4e0b\uff1a

                                  • \u8bbf\u95ee\u5730\u5740\uff1a http://10.5.14.200:9000\uff08\u4e00\u822c\u4e3a{\u706b\u79cd\u8282\u70b9 IP} + {9000 \u7aef\u53e3}\uff09
                                  • \u767b\u5f55\u7528\u6237\u540d\uff1arootuser
                                  • \u767b\u5f55\u5bc6\u7801\uff1arootpass123

                                  • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u5c06\u8282\u70b9\u672c\u5730 mc \u547d\u4ee4\u884c\u5de5\u5177\u548c minio \u670d\u52a1\u5668\u5efa\u7acb\u94fe\u63a5\u3002

                                    mc config host add minio http://10.5.14.200:9000 rootuser rootpass123\n

                                    \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                    Added `minio` successfully.\n

                                    mc \u547d\u4ee4\u884c\u5de5\u5177\u662f Minio \u6587\u4ef6\u670d\u52a1\u5668\u63d0\u4f9b\u7684\u5ba2\u6237\u7aef\u547d\u4ee4\u884c\u5de5\u5177\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\uff1a MinIO Client\u3002

                                  • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a centos-base \u7684\u5b58\u50a8\u6876\uff08bucket\uff09\u3002

                                    mc mb -p minio/centos-base\n

                                    \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                    Bucket created successfully __minio/centos-base__ .\n
                                  • \u5c06\u5b58\u50a8\u6876 centos-base \u7684\u8bbf\u95ee\u7b56\u7565\u8bbe\u7f6e\u4e3a\u5141\u8bb8\u516c\u5f00\u4e0b\u8f7d\u3002\u4ee5\u4fbf\u5728\u540e\u671f\u5b89\u88c5 GPU-operator \u65f6\u80fd\u591f\u88ab\u8bbf\u95ee\u3002

                                    mc anonymous set download minio/centos-base\n

                                    \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                    Access permission for `minio/centos-base` is set to `download` \n
                                  • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u5c06\u6b65\u9aa4\u4e8c\u751f\u6210\u7684\u79bb\u7ebf yum \u6e90\u6587\u4ef6 centos-base \u590d\u5236\u5230 minio \u670d\u52a1\u5668\u7684 minio/centos-base \u5b58\u50a8\u6876\u4e2d\u3002

                                    mc cp centos-base minio/centos-base --recursive\n
                                  "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#yum_2","title":"\u5728\u96c6\u7fa4\u521b\u5efa\u914d\u7f6e\u9879\u7528\u6765\u4fdd\u5b58 yum \u6e90\u4fe1\u606f","text":"

                                  \u5728\u5f85\u90e8\u7f72 GPU Operator \u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u4e0a\u8fdb\u884c\u64cd\u4f5c\u3002

                                  1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\u521b\u5efa\u540d\u4e3a CentOS-Base.repo \u7684\u6587\u4ef6\uff0c\u7528\u6765\u6307\u5b9a yum \u6e90\u5b58\u50a8\u7684\u914d\u7f6e\u4fe1\u606f\u3002

                                    # \u6587\u4ef6\u540d\u79f0\u5fc5\u987b\u4e3a CentOS-Base.repo\uff0c\u5426\u5219\u5b89\u88c5 gpu-operator \u65f6\u65e0\u6cd5\u88ab\u8bc6\u522b\ncat > CentOS-Base.repo << EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base #\u6b65\u9aa4\u4e09\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base #\u6b65\u9aa4\u4e09\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                  2. \u57fa\u4e8e\u521b\u5efa\u7684 CentOS-Base.repo \u6587\u4ef6\uff0c\u5728 gpu-operator \u547d\u540d\u7a7a\u95f4\u4e0b\uff0c\u521b\u5efa\u540d\u4e3a local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\uff1a

                                    kubectl create configmap local-repo-config  -n gpu-operator --from-file=CentOS-Base.repo=/etc/yum.repos.d/extension.repo\n

                                    \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                    configmap/local-repo-config created\n

                                    local-repo-config \u914d\u7f6e\u6587\u4ef6\u7528\u4e8e\u5728\u5b89\u88c5 gpu-operator \u65f6\uff0c\u63d0\u4f9b RepoConfig.ConfigMapName \u53c2\u6570\u7684\u503c\uff0c\u914d\u7f6e\u6587\u4ef6\u540d\u79f0\u7528\u6237\u53ef\u81ea\u5b9a\u4e49\u3002

                                  3. \u67e5\u770b local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\u7684\u5185\u5bb9\uff1a

                                    kubectl get configmap local-repo-config  -n gpu-operator -oyaml\n

                                    \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                    apiVersion: v1\ndata:\nCentOS-Base.repo: \"[extension-0]\\nbaseurl = http://10.6.232.5:32618/centos-base#\u6b65\u9aa4\u4e8c\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u8def\u5f84\\ngpgcheck = 0\\nname = kubean extension 0\\n  \\n[extension-1]\\nbaseurl\n    = http://10.6.232.5:32618/centos-base #\u6b65\u9aa4\u4e8c\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u8def\u5f84\\ngpgcheck = 0\\nname\n    = kubean extension 1\\n\"\nkind: ConfigMap\nmetadata:\ncreationTimestamp: \"2023-10-18T01:59:02Z\"\nname: local-repo-config\nnamespace: gpu-operator\nresourceVersion: \"59445080\"\nuid: c5f0ebab-046f-442c-b932-f9003e014387\n

                                  \u81f3\u6b64\uff0c\u60a8\u5df2\u6210\u529f\u4e3a\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u521b\u5efa\u4e86\u79bb\u7ebf yum \u6e90\u914d\u7f6e\u6587\u4ef6\u3002 \u901a\u8fc7\u5728\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u65f6\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html","title":"\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90","text":"

                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9884\u7f6e\u4e86 CentOS 7.9\uff0c\u5185\u6838\u4e3a 3.10.0-1160 \u7684 GPU operator \u79bb\u7ebf\u5305\u3002\u5176\u5b83 OS \u7c7b\u578b\u7684\u8282\u70b9\u6216\u5185\u6838\u9700\u8981\u7528\u6237\u624b\u52a8\u6784\u5efa\u79bb\u7ebf yum \u6e90\u3002

                                  \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u57fa\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4efb\u610f\u8282\u70b9\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90\u5305\uff0c\u5e76\u5728\u5b89\u88c5 Gpu Operator \u65f6\uff0c\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  1. \u7528\u6237\u5df2\u7ecf\u5728\u5e73\u53f0\u4e0a\u5b89\u88c5\u4e86 v0.12.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u7684 addon \u79bb\u7ebf\u5305\u3002
                                  2. \u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u8282\u70b9 OS \u5fc5\u987b\u4e3a Red Hat 8.4\uff0c\u4e14\u5185\u6838\u7248\u672c\u5b8c\u5168\u4e00\u81f4\u3002
                                  3. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u548c\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u7f51\u7edc\u80fd\u591f\u8054\u901a\u7684\u6587\u4ef6\u670d\u52a1\u5668\uff0c\u5982 nginx \u6216 minio\u3002
                                  4. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u3001\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\uff0c\u4e14\u8282\u70b9\u4e0a\u5df2\u7ecf\u5b8c\u6210 Docker \u7684\u5b89\u88c5\u3002
                                  5. \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u8282\u70b9\u5fc5\u987b\u4e3a Red Hat 8.4 4.18.0-305.el8.x86_64\u3002
                                  "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                  \u672c\u6587\u4ee5 Red Hat 8.4 4.18.0-305.el8.x86_64 \u8282\u70b9\u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u57fa\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4efb\u610f\u8282\u70b9\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90\u5305\uff0c \u5e76\u5728\u5b89\u88c5 Gpu Operator \u65f6\uff0c\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#yum","title":"\u4e0b\u8f7d\u706b\u79cd\u8282\u70b9\u4e2d\u7684 yum \u6e90","text":"

                                  \u4ee5\u4e0b\u64cd\u4f5c\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684 master \u8282\u70b9\u4e0a\u6267\u884c\u3002

                                  1. \u4f7f\u7528 ssh \u6216\u5176\u5b83\u65b9\u5f0f\u8fdb\u5165\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5185\u4efb\u4e00\u8282\u70b9\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

                                    cat /etc/yum.repos.d/extension.repo #\u67e5\u770b extension.repo \u4e2d\u7684\u5185\u5bb9\n

                                    \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                    [extension-0]\nbaseurl = http://10.5.14.200:9000/kubean/redhat/$releasever/os/$basearch\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/kubean/redhat-iso/$releasever/os/$basearch/AppStream\ngpgcheck = 0\nname = kubean extension 1\n\n[extension-2]\nbaseurl = http://10.5.14.200:9000/kubean/redhat-iso/$releasever/os/$basearch/BaseOS\ngpgcheck = 0\nname = kubean extension 2\n
                                  2. \u5728 root \u8def\u5f84\u4e0b\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a redhat-base-repo \u7684\u6587\u4ef6\u5939

                                    mkdir redhat-base-repo\n
                                  3. \u4e0b\u8f7d yum \u6e90\u4e2d\u7684 rpm \u5305\u5230\u672c\u5730\uff1a

                                    yum install yum-utils\n
                                  4. \u4e0b\u8f7d extension-1 \u4e2d\u7684 rpm \u5305\uff1a

                                    reposync  -p redhat-base-repo  -n --repoid=extension-1\n
                                  5. \u4e0b\u8f7d extension-2 \u4e2d\u7684 rpm \u5305\uff1a

                                    reposync  -p redhat-base-repo  -n --repoid=extension-2\n
                                  "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#elfutils-libelf-devel-0187-4el8x86_64rpm","title":"\u4e0b\u8f7d elfutils-libelf-devel-0.187-4.el8.x86_64.rpm \u5305","text":"

                                  \u4ee5\u4e0b\u64cd\u4f5c\u5728\u8054\u7f51\u8282\u70b9\u6267\u884c\u64cd\u4f5c\uff0c\u5728\u64cd\u4f5c\u524d\uff0c\u60a8\u9700\u8981\u4fdd\u8bc1\u8054\u7f51\u8282\u70b9\u548c\u5168\u5c40\u670d\u52a1\u96c6\u7fa4 master \u8282\u70b9\u95f4\u7684\u7f51\u7edc\u8054\u901a\u6027\u3002

                                  1. \u5728\u8054\u7f51\u8282\u70b9\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u4e0b\u8f7d elfutils-libelf-devel-0.187-4.el8.x86_64.rpm \u5305\uff1a

                                    wget https://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/elfutils-libelf-devel-0.187-4.el8.x86_64.rpm\n
                                  2. \u5728\u5f53\u524d\u76ee\u5f55\u4e0b\u5c06 elfutils-libelf-devel-0.187-4.el8.x86_64.rpm \u5305\u4f20\u8f93\u81f3\u6b65\u9aa4\u4e00\u4e2d\u7684\u8282\u70b9\u4e0a\uff1a

                                    scp  elfutils-libelf-devel-0.187-4.el8.x86_64.rpm user@ip:~/redhat-base-repo/extension-2/Packages/\n

                                    \u4f8b\u5982\uff1a

                                    scp  elfutils-libelf-devel-0.187-4.el8.x86_64.rpm root@10.6.175.10:~/redhat-base-repo/extension-2/Packages/\n
                                  "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#yum-repo","title":"\u751f\u6210\u672c\u5730 yum repo","text":"

                                  \u4ee5\u4e0b\u64cd\u4f5c\u5728\u6b65\u9aa4\u4e00\u4e2d\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684 master \u8282\u70b9\u4e0a\u6267\u884c\u3002

                                  1. \u8fdb\u5165 yum repo \u76ee\u5f55\uff1a

                                    cd ~/redhat-base-repo/extension-1/Packages\ncd ~/redhat-base-repo/extension-2/Packages\n
                                  2. \u751f\u6210\u76ee\u5f55 repo \u7d22\u5f15\uff1a

                                    yum install createrepo -y  # \u82e5\u5df2\u5b89\u88c5 createrepo \u53ef\u7701\u7565\u6b64\u6b65\u9aa4\ncreaterepo_c ./\n

                                  \u81f3\u6b64\uff0c\u60a8\u5df2\u7ecf\u751f\u6210\u4e86\u5185\u6838\u4e3a 4.18.0-305.el8.x86_64 \u7684\u79bb\u7ebf\u7684 yum \u6e90\uff1a redhat-base-repo \u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#yum-repo_1","title":"\u5c06\u672c\u5730\u751f\u6210\u7684 yum repo \u4e0a\u4f20\u81f3\u6587\u4ef6\u670d\u52a1\u5668","text":"

                                  \u672c\u64cd\u4f5c\u793a\u4f8b\u91c7\u7528\u7684\u662f\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Minio \u4f5c\u4e3a\u6587\u4ef6\u670d\u52a1\u5668\uff0c\u7528\u6237\u53ef\u57fa\u4e8e\u81ea\u8eab\u60c5\u51b5\u9009\u62e9\u6587\u4ef6\u670d\u52a1\u5668\u3002Minio \u76f8\u5173\u4fe1\u606f\u5982\u4e0b\uff1a

                                  • \u8bbf\u95ee\u5730\u5740\uff1a http://10.5.14.200:9000\uff08\u4e00\u822c\u4e3a{\u706b\u79cd\u8282\u70b9 IP} + {9000 \u7aef\u53e3}\uff09
                                  • \u767b\u5f55\u7528\u6237\u540d\uff1arootuser
                                  • \u767b\u5f55\u5bc6\u7801\uff1arootpass123

                                  • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u5c06\u8282\u70b9\u672c\u5730 mc \u547d\u4ee4\u884c\u5de5\u5177\u548c minio \u670d\u52a1\u5668\u5efa\u7acb\u94fe\u63a5\u3002

                                    mc config host add minio \u6587\u4ef6\u670d\u52a1\u5668\u8bbf\u95ee\u5730\u5740 \u7528\u6237\u540d \u5bc6\u7801\n

                                    \u4f8b\u5982\uff1a

                                    mc config host add minio http://10.5.14.200:9000 rootuser rootpass123\n

                                    \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                    Added `minio` successfully.\n

                                    mc \u547d\u4ee4\u884c\u5de5\u5177\u662f Minio \u6587\u4ef6\u670d\u52a1\u5668\u63d0\u4f9b\u7684\u5ba2\u6237\u7aef\u547d\u4ee4\u884c\u5de5\u5177\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\uff1a MinIO Client\u3002

                                  • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a redhat-base \u7684\u5b58\u50a8\u6876(bucket)\u3002

                                    mc mb -p minio/redhat-base\n

                                    \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                    Bucket created successfully `minio/redhat-base`.\n
                                  • \u5c06\u5b58\u50a8\u6876 redhat-base \u7684\u8bbf\u95ee\u7b56\u7565\u8bbe\u7f6e\u4e3a\u5141\u8bb8\u516c\u5f00\u4e0b\u8f7d\u3002\u4ee5\u4fbf\u5728\u540e\u671f\u5b89\u88c5 GPU-operator \u65f6\u80fd\u591f\u88ab\u8bbf\u95ee\u3002

                                    mc anonymous set download minio/redhat-base\n

                                    \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                    Access permission for `minio/redhat-base` is set to `download` \n
                                  • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u5c06\u6b65\u9aa4\u4e8c\u751f\u6210\u7684\u79bb\u7ebf yum \u6e90\u6587\u4ef6 redhat-base-repo \u590d\u5236\u5230 minio \u670d\u52a1\u5668\u7684 minio/redhat-base \u5b58\u50a8\u6876\u4e2d\u3002

                                    mc cp redhat-base-repo minio/redhat-base --recursive\n
                                  "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#yum_1","title":"\u5728\u96c6\u7fa4\u521b\u5efa\u914d\u7f6e\u9879\u7528\u6765\u4fdd\u5b58 yum \u6e90\u4fe1\u606f","text":"

                                  \u672c\u6b65\u9aa4\u5728\u5f85\u90e8\u7f72 GPU Operator \u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u4e0a\u8fdb\u884c\u64cd\u4f5c\u3002

                                  1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\u521b\u5efa\u540d\u4e3a redhat.repo \u7684\u6587\u4ef6\uff0c\u7528\u6765\u6307\u5b9a yum \u6e90\u5b58\u50a8\u7684\u914d\u7f6e\u4fe1\u606f\u3002

                                    # \u6587\u4ef6\u540d\u79f0\u5fc5\u987b\u4e3a redhat.repo\uff0c\u5426\u5219\u5b89\u88c5 gpu-operator \u65f6\u65e0\u6cd5\u88ab\u8bc6\u522b\ncat > redhat.repo << EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/redhat-base/redhat-base-repo/Packages #\u6b65\u9aa4\u4e00\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/redhat-base/redhat-base-repo/Packages #\u6b65\u9aa4\u4e00\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                  2. \u57fa\u4e8e\u521b\u5efa\u7684 redhat.repo \u6587\u4ef6\uff0c\u5728 gpu-operator \u547d\u540d\u7a7a\u95f4\u4e0b\uff0c\u521b\u5efa\u540d\u4e3a local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\uff1a

                                    kubectl create configmap local-repo-config  -n gpu-operator --from-file=./redhat.repo \n

                                    \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                    configmap/local-repo-config created\n

                                    local-repo-config \u914d\u7f6e\u6587\u4ef6\u7528\u4e8e\u5728\u5b89\u88c5 gpu-operator \u65f6\uff0c\u63d0\u4f9b RepoConfig.ConfigMapName \u53c2\u6570\u7684\u503c\uff0c\u914d\u7f6e\u6587\u4ef6\u540d\u79f0\u7528\u6237\u53ef\u81ea\u5b9a\u4e49\u3002

                                  3. \u67e5\u770b local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\u7684\u5185\u5bb9\uff1a

                                    kubectl get configmap local-repo-config  -n gpu-operator -oyaml\n

                                  \u81f3\u6b64\uff0c\u60a8\u5df2\u6210\u529f\u4e3a\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u521b\u5efa\u4e86\u79bb\u7ebf yum \u6e90\u914d\u7f6e\u6587\u4ef6\u3002 \u901a\u8fc7\u5728\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u65f6\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html","title":"\u6784\u5efa Red Hat 7.9 \u79bb\u7ebf yum \u6e90","text":""},{"location":"end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#_1","title":"\u4f7f\u7528\u573a\u666f\u4ecb\u7ecd","text":"

                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9884\u7f6e\u4e86 CentOS 7.9\uff0c\u5185\u6838\u4e3a 3.10.0-1160 \u7684 GPU Operator \u79bb\u7ebf\u5305\u3002\u5176\u5b83 OS \u7c7b\u578b\u7684\u8282\u70b9\u6216\u5185\u6838\u9700\u8981\u7528\u6237\u624b\u52a8\u6784\u5efa\u79bb\u7ebf yum \u6e90\u3002

                                  \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u57fa\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4efb\u610f\u8282\u70b9\u6784\u5efa Red Hat 7.9 \u79bb\u7ebf yum \u6e90\u5305\uff0c\u5e76\u5728\u5b89\u88c5 Gpu Operator \u65f6\u4f7f\u7528 RepoConfig.ConfigMapName \u53c2\u6570\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  1. \u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u8282\u70b9 OS \u5fc5\u987b\u4e3a Red Hat 7.9\uff0c\u4e14\u5185\u6838\u7248\u672c\u5b8c\u5168\u4e00\u81f4
                                  2. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u4e0e\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u7f51\u7edc\u8054\u901a\u7684\u6587\u4ef6\u670d\u52a1\u5668\uff0c\u5982 nginx \u6216 minio
                                  3. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u3001\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\uff0c \u4e14\u8282\u70b9\u4e0a\u5df2\u7ecf\u5b8c\u6210 Docker \u7684\u5b89\u88c5
                                  4. \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u8282\u70b9\u5fc5\u987b\u4e3a Red Hat 7.9
                                  "},{"location":"end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#1-yum","title":"1. \u6784\u5efa\u76f8\u5173\u5185\u6838\u7248\u672c\u7684\u79bb\u7ebf Yum \u6e90","text":"
                                  1. \u4e0b\u8f7d rhel7.9 ISO

                                  2. \u4e0b\u8f7d\u4e0e Kubean \u7248\u672c\u5bf9\u5e94\u7684\u7684 rhel7.9 ospackage

                                    \u5728 \u5bb9\u5668\u7ba1\u7406 \u7684\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u627e\u5230 Helm \u5e94\u7528 \uff0c\u641c\u7d22 kubean\uff0c\u53ef\u67e5\u770b kubean \u7684\u7248\u672c\u53f7\u3002

                                    \u5728 kubean\u7684\u4ee3\u7801\u4ed3\u5e93 \u4e2d\u4e0b\u8f7d\u8be5\u7248\u672c\u7684 rhel7.9 ospackage\u3002

                                  3. \u901a\u8fc7\u5b89\u88c5\u5668\u5bfc\u5165\u79bb\u7ebf\u8d44\u6e90

                                    \u53c2\u8003\u5bfc\u5165\u79bb\u7ebf\u8d44\u6e90\u6587\u6863\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#2-red-hat-79-os","title":"2. \u4e0b\u8f7d Red Hat 7.9 OS \u7684\u79bb\u7ebf\u9a71\u52a8\u955c\u50cf","text":"

                                  \u70b9\u51fb\u67e5\u770b\u4e0b\u8f7d\u5730\u5740\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#3-red-hat-gpu-opreator","title":"3. \u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20 Red Hat GPU Opreator \u79bb\u7ebf\u955c\u50cf","text":"

                                  \u53c2\u8003\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20 Red Hat GPU Opreator \u79bb\u7ebf\u955c\u50cf\u3002

                                  Note

                                  \u6b64\u53c2\u8003\u4ee5 rhel8.4 \u4e3a\u4f8b\uff0c\u8bf7\u6ce8\u610f\u4fee\u6539\u6210 rhel7.9\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#4-yum","title":"4. \u5728\u96c6\u7fa4\u521b\u5efa\u914d\u7f6e\u9879\u7528\u6765\u4fdd\u5b58 Yum \u6e90\u4fe1\u606f","text":"

                                  \u5728\u5f85\u90e8\u7f72 GPU Operator \u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u4e0a\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u3002

                                  1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\u521b\u5efa\u540d\u4e3a CentOS-Base.repo \u7684\u6587\u4ef6\uff0c\u7528\u6765\u6307\u5b9a yum \u6e90\u5b58\u50a8\u7684\u914d\u7f6e\u4fe1\u606f\u3002

                                    # \u6587\u4ef6\u540d\u79f0\u5fc5\u987b\u4e3a CentOS-Base.repo\uff0c\u5426\u5219\u5b89\u88c5 gpu-operator \u65f6\u65e0\u6cd5\u88ab\u8bc6\u522b\ncat > CentOS-Base.repo <<  EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # \u706b\u79cd\u8282\u70b9\u7684\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\uff0c\u4e00\u822c\u4e3a{\u706b\u79cd\u8282\u70b9 IP} + {9000 \u7aef\u53e3}\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # \u706b\u79cd\u8282\u70b9\u7684\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\uff0c\u4e00\u822c\u4e3a{\u706b\u79cd\u8282\u70b9 IP} + {9000 \u7aef\u53e3}\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                  2. \u57fa\u4e8e\u521b\u5efa\u7684 CentOS-Base.repo \u6587\u4ef6\uff0c\u5728 gpu-operator \u547d\u540d\u7a7a\u95f4\u4e0b\uff0c\u521b\u5efa\u540d\u4e3a local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\uff1a

                                    kubectl create configmap local-repo-config -n gpu-operator --from-file=CentOS-Base.repo=/etc/yum.repos.d/extension.repo\n

                                    \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                    configmap/local-repo-config created\n

                                    local-repo-config \u914d\u7f6e\u6587\u4ef6\u7528\u4e8e\u5728\u5b89\u88c5 gpu-operator \u65f6\uff0c\u63d0\u4f9b RepoConfig.ConfigMapName \u53c2\u6570\u7684\u503c\uff0c\u914d\u7f6e\u6587\u4ef6\u540d\u79f0\u7528\u6237\u53ef\u81ea\u5b9a\u4e49\u3002

                                  3. \u67e5\u770b local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\u7684\u5185\u5bb9\uff1a

                                    kubectl get configmap local-repo-config -n gpu-operator -oyaml\n

                                    \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                    local-repo-config.yaml
                                    apiVersion: v1\ndata:\n  CentOS-Base.repo: \"[extension-0]\\nbaseurl = http://10.6.232.5:32618/centos-base # \u6b65\u9aa4 2 \u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u8def\u5f84 \\ngpgcheck = 0\\nname = kubean extension 0\\n  \\n[extension-1]\\nbaseurl\n  = http://10.6.232.5:32618/centos-base # \u6b65\u9aa4 2 \u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u8def\u5f84 \\ngpgcheck = 0\\nname\n  = kubean extension 1\\n\"\nkind: ConfigMap\nmetadata:\n  creationTimestamp: \"2023-10-18T01:59:02Z\"\n  name: local-repo-config\n  namespace: gpu-operator\n  resourceVersion: \"59445080\"\n  uid: c5f0ebab-046f-442c-b932-f9003e014387\n

                                  \u81f3\u6b64\uff0c\u60a8\u5df2\u6210\u529f\u4e3a\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u521b\u5efa\u4e86\u79bb\u7ebf yum \u6e90\u914d\u7f6e\u6587\u4ef6\u3002 \u5176\u4e2d\u5728\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u65f6\u4f7f\u7528\u4e86 RepoConfig.ConfigMapName \u53c2\u6570\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html","title":"GPU \u544a\u8b66\u89c4\u5219","text":"

                                  \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8bbe\u7f6e GPU \u76f8\u5173\u7684\u544a\u8b66\u89c4\u5219\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                  • \u96c6\u7fa4\u8282\u70b9\u4e0a\u5df2\u6b63\u786e\u5b89\u88c5 GPU \u8bbe\u5907
                                  • \u96c6\u7fa4\u4e2d\u5df2\u6b63\u786e\u5b89\u88c5 gpu-operator \u7ec4\u4ef6
                                  • \u5982\u679c\u7528\u5230\u4e86 vGPU \u8fd8\u9700\u8981\u5728\u96c6\u7fa4\u4e2d\u5b89\u88c5 Nvidia-vgpu \u7ec4\u4ef6\uff0c\u5e76\u4e14\u5f00\u542f servicemonitor
                                  • \u96c6\u7fa4\u6b63\u786e\u5b89\u88c5\u4e86 insight-agent \u7ec4\u4ef6
                                  "},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#gpu_1","title":"\u544a\u8b66\u5e38\u7528 GPU \u6307\u6807","text":"

                                  \u672c\u8282\u4ecb\u7ecd GPU \u544a\u8b66\u5e38\u7528\u7684\u6307\u6807\uff0c\u5206\u4e3a\u4e24\u4e2a\u90e8\u5206\uff1a

                                  • GPU \u5361\u7eac\u5ea6\u7684\u6307\u6807\uff0c\u4e3b\u8981\u53cd\u5e94\u5355\u4e2a GPU \u8bbe\u5907\u7684\u8fd0\u884c\u72b6\u6001\u3002
                                  • \u5e94\u7528\u7eac\u5ea6\u7684\u6307\u6807\uff0c\u4e3b\u8981\u53cd\u5e94 Pod \u5728 GPU \u4e0a\u7684\u8fd0\u884c\u72b6\u6001\u3002
                                  "},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#gpu_2","title":"GPU \u5361\u6307\u6807","text":"\u6307\u6807\u540d\u79f0 \u6307\u6807\u5355\u4f4d \u8bf4\u660e DCGM_FI_DEV_GPU_UTIL % GPU \u5229\u7528\u7387 DCGM_FI_DEV_MEM_COPY_UTIL % \u663e\u5b58\u5229\u7528\u7387 DCGM_FI_DEV_ENC_UTIL % \u7f16\u7801\u5668\u5229\u7528\u7387 DCGM_FI_DEV_DEC_UTIL % \u89e3\u7801\u5668\u5229\u7528\u7387 DCGM_FI_DEV_FB_FREE MB \u8868\u793a\u663e\u5b58\u5269\u4f59\u91cf DCGM_FI_DEV_FB_USED MB \u8868\u793a\u663e\u5b58\u4f7f\u7528\u91cf DCGM_FI_DEV_GPU_TEMP \u6444\u6c0f\u5ea6 \u8868\u793a\u5f53\u524d GPU \u7684\u6e29\u5ea6\u5ea6\u6570 DCGM_FI_DEV_POWER_USAGE W \u8bbe\u5907\u7535\u6e90\u4f7f\u7528\u60c5\u51b5 DCGM_FI_DEV_XID_ERRORS - \u8868\u793a\u4e00\u6bb5\u65f6\u95f4\u5185\uff0c\u6700\u540e\u53d1\u751f\u7684 XID \u9519\u8bef\u53f7\u3002XID \u63d0\u4f9b GPU \u786c\u4ef6\u3001NVIDIA \u8f6f\u4ef6\u6216\u5e94\u7528\u4e2d\u7684\u9519\u8bef\u7c7b\u578b\u3001\u9519\u8bef\u4f4d\u7f6e\u3001\u9519\u8bef\u4ee3\u7801\u7b49\u4fe1\u606f\uff0c\u66f4\u591a XID \u4fe1\u606f"},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#_2","title":"\u5e94\u7528\u7ef4\u5ea6\u7684\u6307\u6807","text":"\u6307\u6807\u540d\u79f0 \u6307\u6807\u5355\u4f4d \u8bf4\u660e kpanda_gpu_pod_utilization % \u8868\u793a Pod \u5bf9 GPU \u7684\u4f7f\u7528\u7387 kpanda_gpu_mem_pod_usage MB \u8868\u793a Pod \u5bf9 GPU \u663e\u5b58\u7684\u4f7f\u7528\u91cf kpanda_gpu_mem_pod_utilization % \u8868\u793a Pod \u5bf9 GPU \u663e\u5b58\u7684\u4f7f\u7528\u7387"},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#_3","title":"\u8bbe\u7f6e\u544a\u8b66\u89c4\u5219","text":"

                                  \u8fd9\u91cc\u4f1a\u4ecb\u7ecd\u5982\u4f55\u8bbe\u7f6e GPU \u544a\u8b66\u89c4\u5219\uff0c\u4f7f\u7528 GPU \u5361\u5229\u7528\u7387\u6307\u6807\u4f5c\u4e3a\u6848\u4f8b\uff0c\u8bf7\u7528\u6237\u6839\u636e\u5b9e\u9645\u7684\u4e1a\u52a1\u573a\u666f\u9009\u62e9\u6307\u6807\u4ee5\u53ca\u7f16\u5199 promql\u3002

                                  \u76ee\u6807\uff1a\u5f53GPU\u5361\u5229\u7528\u7387\u5728\u4e94\u79d2\u949f\u5185\u4e00\u76f4\u4fdd\u6301 80% \u7684\u5229\u7528\u7387\u65f6\u53d1\u51fa\u544a\u8b66

                                  1. \u5728\u53ef\u89c2\u6d4b\u9875\u9762\uff0c\u70b9\u51fb \u544a\u8b66 -> \u544a\u8b66\u7b56\u7565 -> \u521b\u5efa\u544a\u8b66\u7b56\u7565

                                  2. \u586b\u5199\u57fa\u672c\u4fe1\u606f

                                  3. \u6dfb\u52a0\u89c4\u5219

                                  4. \u9009\u62e9\u901a\u77e5\u65b9\u5f0f

                                  5. \u8bbe\u7f6e\u5b8c\u6210\u540e\uff0c\u5f53\u4e00\u4e2a GPU \u5728 5s \u5185\u4e00\u76f4\u4fdd\u6301 80% \u7684\u5229\u7528\u7387\uff0c\u4f1a\u6536\u5230\u5982\u4e0b\u7684\u544a\u8b66\u4fe1\u606f\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-metrics.html","title":"GPU \u76d1\u63a7\u6307\u6807","text":"

                                  \u672c\u9875\u5217\u51fa\u4e00\u4e9b\u5e38\u7528\u7684 GPU \u76d1\u63a7\u6307\u6807\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-metrics.html#_1","title":"\u96c6\u7fa4\u7ef4\u5ea6","text":"\u6307\u6807\u540d\u79f0 \u63cf\u8ff0 GPU \u5361\u6570 \u96c6\u7fa4\u4e0b\u6240\u6709\u7684 GPU \u5361\u6570\u91cf GPU \u5e73\u5747\u4f7f\u7528\u7387 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u5e73\u5747\u7b97\u529b\u4f7f\u7528\u7387 GPU \u5e73\u5747\u663e\u5b58\u4f7f\u7528\u7387 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u5e73\u5747\u663e\u5b58\u4f7f\u7528\u7387 GPU \u5361\u529f\u7387 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u529f\u7387 GPU \u5361\u6e29\u5ea6 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u6e29\u5ea6 GPU \u7b97\u529b\u4f7f\u7528\u7387\u7ec6\u8282 24 \u5c0f\u65f6\u5185\uff0c\u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u4f7f\u7528\u7387\u7ec6\u8282\uff08\u5305\u542b max\u3001avg\u3001current\uff09 GPU \u663e\u5b58\u4f7f\u7528\u91cf\u7ec6\u8282 24 \u5c0f\u65f6\u5185\uff0c\u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u663e\u5b58\u4f7f\u7528\u91cf\u7ec6\u8282\uff08\u5305\u542b min\u3001max\u3001avg\u3001current\uff09 GPU \u663e\u5b58\u5e26\u5bbd\u4f7f\u7528\u7387 \u8868\u793a\u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387\u3002\u4ee5 Nvidia GPU V100 \u4e3a\u4f8b\uff0c\u5176\u6700\u5927\u5185\u5b58\u5e26\u5bbd\u4e3a 900 GB/sec\uff0c\u5982\u679c\u5f53\u524d\u7684\u5185\u5b58\u5e26\u5bbd\u4e3a 450 GB/sec\uff0c\u5219\u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387\u4e3a 50%"},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-metrics.html#_2","title":"\u8282\u70b9\u7ef4\u5ea6","text":"\u6307\u6807\u540d\u79f0 \u63cf\u8ff0 GPU \u6a21\u5f0f \u8282\u70b9\u4e0a GPU \u5361\u7684\u4f7f\u7528\u6a21\u5f0f\uff0c\u5305\u542b\u6574\u5361\u6a21\u5f0f\u3001MIG \u6a21\u5f0f\u3001vGPU \u6a21\u5f0f GPU \u7269\u7406\u5361\u6570 \u8282\u70b9\u4e0a\u6240\u6709\u7684 GPU \u5361\u6570\u91cf GPU \u865a\u62df\u5361\u6570 \u8282\u70b9\u4e0a\u5df2\u7ecf\u88ab\u521b\u5efa\u51fa\u6765\u7684 vGPU \u8bbe\u5907\u6570\u91cf GPU MIG \u5b9e\u4f8b\u6570 \u8282\u70b9\u4e0a\u5df2\u7ecf\u88ab\u521b\u5efa\u51fa\u6765\u7684 MIG \u5b9e\u4f8b\u6570 GPU \u663e\u5b58\u5206\u914d\u7387 \u8282\u70b9\u4e0a\u6240\u6709 GPU \u5361\u7684\u663e\u5b58\u5206\u914d\u7387 GPU \u7b97\u529b\u5e73\u5747\u4f7f\u7528\u7387 \u8282\u70b9\u4e0a\u6240\u6709 GPU \u5361\u7684\u7b97\u529b\u5e73\u5747\u4f7f\u7528\u7387 GPU \u663e\u5b58\u5e73\u5747\u4f7f\u7528\u7387 \u8282\u70b9\u4e0a\u6240\u6709 GPU \u5361\u7684\u5e73\u5747\u663e\u5b58\u4f7f\u7528\u7387 GPU \u9a71\u52a8\u7248\u672c \u8282\u70b9\u4e0a GPU \u5361\u9a71\u52a8\u7684\u7248\u672c\u4fe1\u606f GPU \u7b97\u529b\u4f7f\u7528\u7387\u7ec6\u8282 24 \u5c0f\u65f6\u5185\uff0c\u8282\u70b9\u4e0a\u6bcf\u5f20 GPU \u5361\u7684\u7b97\u529b\u4f7f\u7528\u7387\u7ec6\u8282\uff08\u5305\u542b max\u3001avg\u3001current\uff09 GPU \u663e\u5b58\u4f7f\u7528\u91cf 24 \u5c0f\u65f6\u5185\uff0c\u8282\u70b9\u4e0a\u6bcf\u5f20 GPU \u5361\u7684\u663e\u5b58\u4f7f\u7528\u91cf\u7ec6\u8282\uff08\u5305\u542b min\u3001max\u3001avg\u3001current\uff09

                                  \u6839\u636e XID \u72b6\u6001\u6392\u67e5 GPU \u76f8\u5173\u95ee\u9898

                                  XID \u6d88\u606f\u662f NVIDIA \u9a71\u52a8\u7a0b\u5e8f\u5411\u64cd\u4f5c\u7cfb\u7edf\u7684\u5185\u6838\u65e5\u5fd7\u6216\u4e8b\u4ef6\u65e5\u5fd7\u6253\u5370\u7684\u9519\u8bef\u62a5\u544a\u3002XID \u6d88\u606f\u7528\u4e8e\u6807\u8bc6 GPU \u9519\u8bef\u4e8b\u4ef6\uff0c \u63d0\u4f9b GPU \u786c\u4ef6\u3001NVIDIA \u8f6f\u4ef6\u6216\u5e94\u7528\u4e2d\u7684\u9519\u8bef\u7c7b\u578b\u3001\u9519\u8bef\u4f4d\u7f6e\u3001\u9519\u8bef\u4ee3\u7801\u7b49\u4fe1\u606f\u3002 \u5982\u68c0\u67e5\u9879 GPU \u8282\u70b9\u4e0a\u7684 XID \u5f02\u5e38\u4e3a\u7a7a\uff0c\u8868\u660e\u65e0 XID \u6d88\u606f\uff1b\u5982\u6709\uff0c\u60a8\u53ef\u6309\u7167\u4e0b\u8868\u81ea\u52a9\u6392\u67e5\u5e76\u89e3\u51b3\u95ee\u9898\uff0c \u6216\u67e5\u770b\u66f4\u591a XID \u6d88\u606f\u3002

                                  XID \u6d88\u606f \u8bf4\u660e 13 Graphics Engine Exception. \u901a\u5e38\u662f\u6570\u7ec4\u8d8a\u754c\u3001\u6307\u4ee4\u9519\u8bef\uff0c\u5c0f\u6982\u7387\u662f\u786c\u4ef6\u95ee\u9898\u3002 31 GPU memory page fault. \u901a\u5e38\u662f\u5e94\u7528\u7a0b\u5e8f\u7684\u975e\u6cd5\u5730\u5740\u8bbf\u95ee\uff0c\u6781\u5c0f\u6982\u7387\u662f\u9a71\u52a8\u6216\u8005\u786c\u4ef6\u95ee\u9898\u3002 32 Invalid or corrupted push buffer stream. \u4e8b\u4ef6\u7531 PCIE \u603b\u7ebf\u4e0a\u7ba1\u7406 NVIDIA \u9a71\u52a8\u548c GPU \u4e4b\u95f4\u901a\u4fe1\u7684 DMA \u63a7\u5236\u5668\u4e0a\u62a5\uff0c\u901a\u5e38\u662f PCI \u8d28\u91cf\u95ee\u9898\u5bfc\u81f4\uff0c\u800c\u975e\u60a8\u7684\u7a0b\u5e8f\u4ea7\u751f\u3002 38 Driver firmware error. \u901a\u5e38\u662f\u9a71\u52a8\u56fa\u4ef6\u9519\u8bef\u800c\u975e\u786c\u4ef6\u95ee\u9898\u3002 43 GPU stopped processing. \u901a\u5e38\u662f\u60a8\u5e94\u7528\u81ea\u8eab\u9519\u8bef\uff0c\u800c\u975e\u786c\u4ef6\u95ee\u9898\u3002 45 Preemptive cleanup, due to previous errors -- Most likely to see when running multiple cuda applications and hitting a DBE. \u901a\u5e38\u662f\u60a8\u624b\u52a8\u9000\u51fa\u6216\u8005\u5176\u4ed6\u6545\u969c\uff08\u786c\u4ef6\u3001\u8d44\u6e90\u9650\u5236\u7b49\uff09\u5bfc\u81f4\u7684 GPU \u5e94\u7528\u9000\u51fa\uff0cXID 45 \u53ea\u63d0\u4f9b\u4e00\u4e2a\u7ed3\u679c\uff0c\u5177\u4f53\u539f\u56e0\u901a\u5e38\u9700\u8981\u8fdb\u4e00\u6b65\u5206\u6790\u65e5\u5fd7\u3002 48 Double Bit ECC Error (DBE). \u5f53 GPU \u53d1\u751f\u4e0d\u53ef\u7ea0\u6b63\u7684\u9519\u8bef\u65f6\uff0c\u4f1a\u4e0a\u62a5\u6b64\u4e8b\u4ef6\uff0c\u8be5\u9519\u8bef\u4e5f\u4f1a\u540c\u65f6\u53cd\u9988\u7ed9\u60a8\u7684\u5e94\u7528\u7a0b\u5e8f\u3002\u901a\u5e38\u9700\u8981\u91cd\u7f6e GPU \u6216\u91cd\u542f\u8282\u70b9\u6765\u6e05\u9664\u8fd9\u4e2a\u9519\u8bef\u3002 61 Internal micro-controller breakpoint/warning. GPU \u5185\u90e8\u5f15\u64ce\u505c\u6b62\u5de5\u4f5c\uff0c\u60a8\u7684\u4e1a\u52a1\u5df2\u7ecf\u53d7\u5230\u5f71\u54cd\u3002 62 Internal micro-controller halt. \u4e0e XID 61 \u7684\u89e6\u53d1\u573a\u666f\u7c7b\u4f3c\u3002 63 ECC page retirement or row remapping recording event. \u5f53\u5e94\u7528\u7a0b\u5e8f\u906d\u9047\u5230 GPU \u663e\u5b58\u786c\u4ef6\u9519\u8bef\u65f6\uff0cNVIDIA \u81ea\u7ea0\u9519\u673a\u5236\u4f1a\u5c06\u9519\u8bef\u7684\u5185\u5b58\u533a\u57df retire \u6216\u8005 remap\uff0cretirement \u548c remapped \u4fe1\u606f\u9700\u8bb0\u5f55\u5230 infoROM \u4e2d\u624d\u80fd\u6c38\u4e45\u751f\u6548\u3002Volt \u67b6\u6784\uff1a\u6210\u529f\u8bb0\u5f55 ECC page retirement \u4e8b\u4ef6\u5230 infoROM\u3002Ampere \u67b6\u6784\uff1a\u6210\u529f\u8bb0\u5f55 row remapping \u4e8b\u4ef6\u5230 infoROM\u3002 64 ECC page retirement or row remapper recording failure. \u4e0e XID 63 \u7684\u89e6\u53d1\u573a\u666f\u7c7b\u4f3c\u3002\u4f46 XID 63 \u4ee3\u8868 retirement \u548c remapped \u4fe1\u606f\u6210\u529f\u8bb0\u5f55\u5230\u4e86 infoROM\uff0cXID 64 \u4ee3\u8868\u8be5\u8bb0\u5f55\u64cd\u4f5c\u5931\u8d25\u3002 68 NVDEC0 Exception. \u901a\u5e38\u662f\u786c\u4ef6\u6216\u9a71\u52a8\u95ee\u9898\u3002 74 NVLINK Error. NVLink \u786c\u4ef6\u9519\u8bef\u4ea7\u751f\u7684 XID\uff0c\u8868\u660e GPU \u5df2\u7ecf\u51fa\u73b0\u4e25\u91cd\u786c\u4ef6\u6545\u969c\uff0c\u9700\u8981\u4e0b\u7ebf\u7ef4\u4fee\u3002 79 GPU has fallen off the bus. GPU \u786c\u4ef6\u68c0\u6d4b\u5230\u6389\u5361\uff0c\u603b\u7ebf\u4e0a\u65e0\u6cd5\u68c0\u6d4b\u8be5 GPU\uff0c\u8868\u660e\u8be5 GPU \u5df2\u7ecf\u51fa\u73b0\u4e25\u91cd\u786c\u4ef6\u6545\u969c\uff0c\u9700\u8981\u4e0b\u7ebf\u7ef4\u4fee\u3002 92 High single-bit ECC error rate. \u786c\u4ef6\u6216\u9a71\u52a8\u6545\u969c\u3002 94 Contained ECC error. \u5f53\u5e94\u7528\u7a0b\u5e8f\u906d\u9047\u5230 GPU \u4e0d\u53ef\u7ea0\u6b63\u7684\u663e\u5b58 ECC \u9519\u8bef\u65f6\uff0cNVIDIA \u9519\u8bef\u6291\u5236\u673a\u5236\u4f1a\u5c1d\u8bd5\u5c06\u9519\u8bef\u6291\u5236\u5728\u53d1\u751f\u786c\u4ef6\u6545\u969c\u7684\u5e94\u7528\u7a0b\u5e8f\uff0c\u907f\u514d\u8be5\u9519\u8bef\u5f71\u54cd GPU \u8282\u70b9\u4e0a\u8fd0\u884c\u7684\u5176\u4ed6\u5e94\u7528\u7a0b\u5e8f\u3002\u5f53\u6291\u5236\u673a\u5236\u6210\u529f\u6291\u5236\u9519\u8bef\u65f6\uff0c\u4f1a\u4ea7\u751f\u8be5\u4e8b\u4ef6\uff0c\u4ec5\u51fa\u73b0\u4e0d\u53ef\u7ea0\u6b63 ECC \u9519\u8bef\u7684\u5e94\u7528\u7a0b\u5e8f\u53d7\u5230\u5f71\u54cd\u3002 95 Uncontained ECC error. \u4e0e XID 94 \u7684\u89e6\u53d1\u573a\u666f\u7c7b\u4f3c\u3002\u4f46 XID 94 \u4ee3\u8868\u6291\u5236\u6210\u529f\uff0c\u800c XID 95 \u4ee3\u8868\u6291\u5236\u5931\u8d25\uff0c\u8868\u660e\u8fd0\u884c\u5728\u8be5 GPU \u4e0a\u7684\u6240\u6709\u5e94\u7528\u7a0b\u5e8f\u90fd\u5df2\u53d7\u5230\u5f71\u54cd\u3002"},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-metrics.html#pod","title":"Pod \u7ef4\u5ea6","text":"\u5206\u7c7b \u6307\u6807\u540d\u79f0 \u63cf\u8ff0 \u5e94\u7528\u6982\u89c8 GPU \u5361 - \u7b97\u529b & \u663e\u5b58 Pod GPU \u7b97\u529b\u4f7f\u7528\u7387 \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u7b97\u529b\u4f7f\u7528\u7387 Pod GPU \u663e\u5b58\u4f7f\u7528\u7387 \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u663e\u5b58\u4f7f\u7528\u7387 Pod \u663e\u5b58\u4f7f\u7528\u91cf \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u663e\u5b58\u4f7f\u7528\u91cf \u663e\u5b58\u5206\u914d\u91cf \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u663e\u5b58\u5206\u914d\u91cf Pod GPU \u663e\u5b58\u590d\u5236\u4f7f\u7528\u7387 \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u663e\u5b58\u663e\u5b58\u590d\u5236\u6bd4\u7387 GPU \u5361 - \u5f15\u64ce\u6982\u89c8 GPU \u56fe\u5f62\u5f15\u64ce\u6d3b\u52a8\u767e\u5206\u6bd4 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cGraphics \u6216 Compute \u5f15\u64ce\u5904\u4e8e Active \u7684\u65f6\u95f4\u5360\u603b\u7684\u65f6\u95f4\u7684\u6bd4\u4f8b GPU \u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387 \u8868\u793a\u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387\uff08Memory BW Utilization\uff09\u5c06\u6570\u636e\u53d1\u9001\u5230\u8bbe\u5907\u5185\u5b58\u6216\u4ece\u8bbe\u5907\u5185\u5b58\u63a5\u6536\u6570\u636e\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u8f83\u9ad8\u7684\u503c\u8868\u793a\u8bbe\u5907\u5185\u5b58\u7684\u5229\u7528\u7387\u8f83\u9ad8\u3002\u8be5\u503c\u4e3a 1\uff08100%\uff09\u8868\u793a\u5728\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u6bcf\u4e2a\u5468\u671f\u6267\u884c\u4e00\u6761 DRAM \u6307\u4ee4\uff08\u5b9e\u9645\u4e0a\uff0c\u5cf0\u503c\u7ea6\u4e3a 0.8 (80%) \u662f\u53ef\u5b9e\u73b0\u7684\u6700\u5927\u503c\uff09\u3002\u5047\u8bbe\u8be5\u503c\u4e3a 0.2\uff0820%\uff09\uff0c\u8868\u793a 20% \u7684\u5468\u671f\u5728\u65f6\u95f4\u95f4\u9694\u5185\u8bfb\u53d6\u6216\u5199\u5165\u8bbe\u5907\u5185\u5b58\u3002 Tensor \u6838\u5fc3\u5f15\u64ce\u4f7f\u7528\u7387 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cTensor Core \u7ba1\u9053\uff08Pipe\uff09\u5904\u4e8e Active \u65f6\u95f4\u5360\u603b\u65f6\u95f4\u7684\u6bd4\u4f8b FP16 \u5f15\u64ce\u4f7f\u7528\u7387 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cFP16 \u7ba1\u9053\u5904\u4e8e Active \u7684\u65f6\u95f4\u5360\u603b\u7684\u65f6\u95f4\u7684\u6bd4\u4f8b FP32 \u5f15\u64ce\u4f7f\u7528\u7387 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cFP32 \u7ba1\u9053\u5904\u4e8e Active \u7684\u65f6\u95f4\u5360\u603b\u7684\u65f6\u95f4\u7684\u6bd4\u4f8b FP64 \u5f15\u64ce\u4f7f\u7528\u7387 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cFP64 \u7ba1\u9053\u5904\u4e8e Active \u7684\u65f6\u95f4\u5360\u603b\u7684\u65f6\u95f4\u7684\u6bd4\u4f8b GPU \u89e3\u7801\u4f7f\u7528\u7387 GPU \u5361\u89e3\u7801\u5f15\u64ce\u6bd4\u7387 GPU \u7f16\u7801\u4f7f\u7528\u7387 GPU \u5361\u7f16\u7801\u5f15\u64ce\u6bd4\u7387 GPU \u5361 - \u6e29\u5ea6 & \u529f\u8017 GPU \u5361\u6e29\u5ea6 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u6e29\u5ea6 GPU \u5361\u529f\u7387 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u529f\u7387 GPU \u5361 - \u603b\u8017\u80fd GPU \u5361\u603b\u5171\u6d88\u8017\u7684\u80fd\u91cf GPU \u5361 - Clock GPU \u5361\u5185\u5b58\u9891\u7387 \u5185\u5b58\u9891\u7387 GPU \u5361\u5e94\u7528SM \u65f6\u949f\u9891\u7387 \u5e94\u7528\u7684 SM \u65f6\u949f\u9891\u7387 GPU \u5361\u5e94\u7528\u5185\u5b58\u9891\u7387 \u5e94\u7528\u5185\u5b58\u9891\u7387 GPU \u5361\u89c6\u9891\u5f15\u64ce\u9891\u7387 \u89c6\u9891\u5f15\u64ce\u9891\u7387 GPU \u5361\u964d\u9891\u539f\u56e0 \u964d\u9891\u539f\u56e0 GPU \u5361 - \u5176\u4ed6\u7ec6\u8282 \u56fe\u5f62\u5f15\u64ce\u6d3b\u52a8 \u56fe\u5f62\u6216\u8ba1\u7b97\u5f15\u64ce\u7684\u4efb\u4f55\u90e8\u5206\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u65f6\u95f4\u6bd4\u4f8b\u3002\u5982\u679c\u56fe\u5f62/\u8ba1\u7b97\u4e0a\u4e0b\u6587\u5df2\u7ed1\u5b9a\u4e14\u56fe\u5f62/\u8ba1\u7b97\u7ba1\u9053\u7e41\u5fd9\uff0c\u5219\u56fe\u5f62\u5f15\u64ce\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002 SM\u6d3b\u52a8 \u591a\u5904\u7406\u5668\u4e0a\u81f3\u5c11\u4e00\u4e2a Warp \u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u65f6\u95f4\u6bd4\u4f8b\uff0c\u6240\u6709\u591a\u5904\u7406\u5668\u7684\u5e73\u5747\u503c\u3002\u8bf7\u6ce8\u610f\uff0c\u201c\u6d3b\u52a8\u201d\u5e76\u4e0d\u4e00\u5b9a\u610f\u5473\u7740 Warp \u6b63\u5728\u79ef\u6781\u8ba1\u7b97\u3002\u4f8b\u5982\uff0c\u7b49\u5f85\u5185\u5b58\u8bf7\u6c42\u7684 Warp \u88ab\u89c6\u4e3a\u6d3b\u52a8\u72b6\u6001\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u30020.8 \u6216\u66f4\u5927\u7684\u503c\u662f\u6709\u6548\u4f7f\u7528 GPU \u7684\u5fc5\u8981\u6761\u4ef6\uff0c\u4f46\u8fd8\u4e0d\u591f\u3002\u5c0f\u4e8e 0.5 \u7684\u503c\u53ef\u80fd\u8868\u793a GPU \u4f7f\u7528\u6548\u7387\u4f4e\u4e0b\u3002\u7ed9\u51fa\u4e00\u4e2a\u7b80\u5316\u7684 GPU \u67b6\u6784\u89c6\u56fe\uff0c\u5982\u679c GPU \u6709 N \u4e2a SM\uff0c\u5219\u4f7f\u7528 N \u4e2a\u5757\u5e76\u5728\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u8fd0\u884c\u7684\u5185\u6838\u5c06\u5bf9\u5e94\u4e8e\u6d3b\u52a8 1\uff08100%\uff09\u3002\u4f7f\u7528 N/5 \u4e2a\u5757\u5e76\u5728\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u8fd0\u884c\u7684\u5185\u6838\u5c06\u5bf9\u5e94\u4e8e\u6d3b\u52a8 0.2\uff0820%\uff09\u3002\u4f7f\u7528 N \u4e2a\u5757\u5e76\u8fd0\u884c\u4e94\u5206\u4e4b\u4e00\u65f6\u95f4\u95f4\u9694\u7684\u5185\u6838\uff0c\u5982\u679c SM \u5904\u4e8e\u7a7a\u95f2\u72b6\u6001\uff0c\u5219\u6d3b\u52a8\u4e5f\u5c06\u4e3a 0.2\uff0820%\uff09\u3002\u8be5\u503c\u4e0e\u6bcf\u4e2a\u5757\u7684\u7ebf\u7a0b\u6570\u65e0\u5173\uff08\u53c2\u89c1DCGM_FI_PROF_SM_OCCUPANCY\uff09\u3002 SM \u5165\u4f4f\u7387 \u591a\u5904\u7406\u5668\u4e0a\u9a7b\u7559 Warp \u7684\u6bd4\u4f8b\uff0c\u76f8\u5bf9\u4e8e\u591a\u5904\u7406\u5668\u4e0a\u652f\u6301\u7684\u6700\u5927\u5e76\u53d1 Warp \u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u5360\u7528\u7387\u8d8a\u9ad8\u5e76\u4e0d\u4e00\u5b9a\u8868\u793a GPU \u4f7f\u7528\u7387\u8d8a\u9ad8\u3002\u5bf9\u4e8e GPU \u5185\u5b58\u5e26\u5bbd\u53d7\u9650\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff08\u53c2\u89c1DCGM_FI_PROF_DRAM_ACTIVE\uff09\uff0c\u5360\u7528\u7387\u8d8a\u9ad8\u8868\u660e GPU \u4f7f\u7528\u7387\u8d8a\u9ad8\u3002\u4f46\u662f\uff0c\u5982\u679c\u5de5\u4f5c\u8d1f\u8f7d\u662f\u8ba1\u7b97\u53d7\u9650\u7684\uff08\u5373\u4e0d\u53d7 GPU \u5185\u5b58\u5e26\u5bbd\u6216\u5ef6\u8fdf\u9650\u5236\uff09\uff0c\u5219\u5360\u7528\u7387\u8d8a\u9ad8\u5e76\u4e0d\u4e00\u5b9a\u4e0e GPU \u4f7f\u7528\u7387\u8d8a\u9ad8\u76f8\u5173\u3002\u8ba1\u7b97\u5360\u7528\u7387\u5e76\u4e0d\u7b80\u5355\uff0c\u5b83\u53d6\u51b3\u4e8e GPU \u5c5e\u6027\u3001\u6bcf\u4e2a\u5757\u7684\u7ebf\u7a0b\u6570\u3001\u6bcf\u4e2a\u7ebf\u7a0b\u7684\u5bc4\u5b58\u5668\u4ee5\u53ca\u6bcf\u4e2a\u5757\u7684\u5171\u4eab\u5185\u5b58\u7b49\u56e0\u7d20\u3002\u4f7f\u7528CUDA \u5360\u7528\u7387\u8ba1\u7b97\u5668 \u63a2\u7d22\u5404\u79cd\u5360\u7528\u7387\u573a\u666f\u3002 \u5f20\u91cf\u6d3b\u52a8 \u5f20\u91cf (HMMA / IMMA) \u7ba1\u9053\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0c\u5f20\u91cf\u6838\u5fc3\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8 1 (100%) \u76f8\u5f53\u4e8e\u5728\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u6bcf\u9694\u4e00\u4e2a\u5468\u671f\u53d1\u51fa\u4e00\u4e2a\u5f20\u91cf\u6307\u4ee4\u3002\u6d3b\u52a8 0.2 (20%) \u53ef\u80fd\u8868\u793a 20% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u7684\u5229\u7528\u7387\u4e3a 100%\uff0c100% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u7684\u5229\u7528\u7387\u4e3a 20%\uff0c100% \u7684 SM \u5728 20% \u7684\u65f6\u95f4\u6bb5\u5185\u7684\u5229\u7528\u7387\u4e3a 100%\uff0c\u6216\u8005\u4ecb\u4e8e\u4e24\u8005\u4e4b\u95f4\u7684\u4efb\u4f55\u7ec4\u5408\uff08\u8bf7\u53c2\u9605DCGM_FI_PROF_SM_ACTIVE\u4ee5\u5e2e\u52a9\u6d88\u9664\u8fd9\u4e9b\u53ef\u80fd\u6027\u7684\u6b67\u4e49\uff09\u3002 FP64 \u5f15\u64ce\u6d3b\u52a8 FP64\uff08\u53cc\u7cbe\u5ea6\uff09\u7ba1\u9053\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0cFP64 \u6838\u5fc3\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8\u91cf 1\uff08100%\uff09\u76f8\u5f53\u4e8e\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185 Volta \u4e0a\u6bcf\u56db\u4e2a\u5468\u671f\u7684\u6bcf\u4e2a SM\u4e0a\u6267\u884c\u4e00\u6761 FP64 \u6307\u4ee4 \u3002\u6d3b\u52a8\u91cf 0.2\uff0820%\uff09\u53ef\u80fd\u8868\u793a 20% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c100% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 20%\uff0c100% \u7684 SM \u5728 20% \u7684\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c\u6216\u8005\u4ecb\u4e8e\u4e24\u8005\u4e4b\u95f4\u7684\u4efb\u4f55\u7ec4\u5408\uff08\u8bf7\u53c2\u9605 DCGM_FI_PROF_SM_ACTIVE \u4ee5\u5e2e\u52a9\u6d88\u9664\u8fd9\u4e9b\u53ef\u80fd\u6027\u7684\u6b67\u4e49\uff09\u3002 FP32 \u5f15\u64ce\u6d3b\u52a8 FMA\uff08FP32\uff08\u5355\u7cbe\u5ea6\uff09\u548c\u6574\u6570\uff09\u7ba1\u9053\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0cFP32 \u6838\u5fc3\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8\u91cf 1\uff08100%\uff09\u76f8\u5f53\u4e8e\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u6bcf\u9694\u4e00\u4e2a\u5468\u671f\u6267\u884c\u4e00\u6b21 FP32 \u6307\u4ee4\u3002\u6d3b\u52a8\u91cf 0.2\uff0820%\uff09\u53ef\u80fd\u8868\u793a 20% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c100% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 20%\uff0c100% \u7684 SM \u5728 20% \u7684\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c\u6216\u8005\u4e24\u8005\u4e4b\u95f4\u7684\u4efb\u4f55\u7ec4\u5408\uff08\u8bf7\u53c2\u9605DCGM_FI_PROF_SM_ACTIVE\u4ee5\u5e2e\u52a9\u6d88\u9664\u8fd9\u4e9b\u53ef\u80fd\u6027\u7684\u6b67\u4e49\uff09\u3002 FP16 \u5f15\u64ce\u6d3b\u52a8 FP16\uff08\u534a\u7cbe\u5ea6\uff09\u7ba1\u9053\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0cFP16 \u6838\u5fc3\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8\u91cf 1\uff08100%\uff09\u76f8\u5f53\u4e8e\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u6bcf\u9694\u4e00\u4e2a\u5468\u671f\u6267\u884c\u4e00\u6b21 FP16 \u6307\u4ee4\u3002\u6d3b\u52a8\u91cf 0.2\uff0820%\uff09\u53ef\u80fd\u8868\u793a 20% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c100% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 20%\uff0c100% \u7684 SM \u5728 20% \u7684\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c\u6216\u8005\u4ecb\u4e8e\u4e24\u8005\u4e4b\u95f4\u7684\u4efb\u4f55\u7ec4\u5408\uff08\u8bf7\u53c2\u9605DCGM_FI_PROF_SM_ACTIVE\u4ee5\u5e2e\u52a9\u6d88\u9664\u8fd9\u4e9b\u53ef\u80fd\u6027\u7684\u6b67\u4e49\uff09\u3002 \u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387 \u5411\u8bbe\u5907\u5185\u5b58\u53d1\u9001\u6570\u636e\u6216\u4ece\u8bbe\u5907\u5185\u5b58\u63a5\u6536\u6570\u636e\u7684\u5468\u671f\u6bd4\u4f8b\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0c\u8bbe\u5907\u5185\u5b58\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8\u7387\u4e3a 1 (100%) \u76f8\u5f53\u4e8e\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u6bcf\u4e2a\u5468\u671f\u6267\u884c\u4e00\u6761 DRAM \u6307\u4ee4\uff08\u5b9e\u9645\u4e0a\uff0c\u5cf0\u503c\u7ea6\u4e3a 0.8 (80%) \u662f\u53ef\u5b9e\u73b0\u7684\u6700\u5927\u503c\uff09\u3002\u6d3b\u52a8\u7387\u4e3a 0.2 (20%) \u8868\u793a\u5728\u65f6\u95f4\u95f4\u9694\u5185\u6709 20% \u7684\u5468\u671f\u6b63\u5728\u8bfb\u53d6\u6216\u5199\u5165\u8bbe\u5907\u5185\u5b58\u3002 NVLink \u5e26\u5bbd \u901a\u8fc7 NVLink \u4f20\u8f93/\u63a5\u6536\u7684\u6570\u636e\u901f\u7387\uff08\u4e0d\u5305\u62ec\u534f\u8bae\u6807\u5934\uff09\uff0c\u4ee5\u6bcf\u79d2\u5b57\u8282\u6570\u4e3a\u5355\u4f4d\u3002\u8be5\u503c\u8868\u793a\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u901f\u7387\u662f\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u5e73\u5747\u503c\u3002\u4f8b\u5982\uff0c\u5982\u679c 1 \u79d2\u5185\u4f20\u8f93\u4e86 1 GB \u7684\u6570\u636e\uff0c\u5219\u65e0\u8bba\u6570\u636e\u662f\u4ee5\u6052\u5b9a\u901f\u7387\u8fd8\u662f\u7a81\u53d1\u901f\u7387\u4f20\u8f93\uff0c\u901f\u7387\u90fd\u662f 1 GB/s\u3002\u7406\u8bba\u4e0a\uff0c\u6bcf\u4e2a\u94fe\u8def\u6bcf\u4e2a\u65b9\u5411\u7684\u6700\u5927 NVLink Gen2 \u5e26\u5bbd\u4e3a 25 GB/s\u3002 PCIe \u5e26\u5bbd \u901a\u8fc7 PCIe \u603b\u7ebf\u4f20\u8f93/\u63a5\u6536\u7684\u6570\u636e\u901f\u7387\uff0c\u5305\u62ec\u534f\u8bae\u6807\u5934\u548c\u6570\u636e\u6709\u6548\u8d1f\u8f7d\uff0c\u4ee5\u5b57\u8282/\u79d2\u4e3a\u5355\u4f4d\u3002\u8be5\u503c\u8868\u793a\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u8be5\u901f\u7387\u662f\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u5e73\u5747\u503c\u3002\u4f8b\u5982\uff0c\u5982\u679c 1 \u79d2\u5185\u4f20\u8f93\u4e86 1 GB \u7684\u6570\u636e\uff0c\u5219\u65e0\u8bba\u6570\u636e\u662f\u4ee5\u6052\u5b9a\u901f\u7387\u8fd8\u662f\u7a81\u53d1\u901f\u7387\u4f20\u8f93\uff0c\u901f\u7387\u90fd\u662f 1 GB/s\u3002\u7406\u8bba\u4e0a\u6700\u5927 PCIe Gen3 \u5e26\u5bbd\u4e3a\u6bcf\u901a\u9053 985 MB/s\u3002 PCIe \u4f20\u8f93\u901f\u7387 \u8282\u70b9 GPU \u5361\u901a\u8fc7 PCIe \u603b\u7ebf\u4f20\u8f93\u7684\u6570\u636e\u901f\u7387 PCIe \u63a5\u6536\u901f\u7387 \u8282\u70b9 GPU \u5361\u901a\u8fc7 PCIe \u603b\u7ebf\u63a5\u6536\u7684\u6570\u636e\u901f\u7387"},{"location":"end-user/kpanda/gpu/nvidia/mig/index.html","title":"NVIDIA \u591a\u5b9e\u4f8b GPU(MIG) \u6982\u8ff0","text":""},{"location":"end-user/kpanda/gpu/nvidia/mig/index.html#mig","title":"MIG \u573a\u666f","text":"
                                  • \u591a\u79df\u6237\u4e91\u73af\u5883

                                    MIG \u5141\u8bb8\u4e91\u670d\u52a1\u63d0\u4f9b\u5546\u5c06\u4e00\u5757\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a\u72ec\u7acb\u7684 GPU \u5b9e\u4f8b\uff0c\u6bcf\u4e2a\u5b9e\u4f8b\u53ef\u4ee5\u72ec\u7acb\u5206\u914d\u7ed9\u4e0d\u540c\u7684\u79df\u6237\u3002\u8fd9\u6837\u53ef\u4ee5\u5b9e\u73b0\u8d44\u6e90\u7684\u9694\u79bb\u548c\u72ec\u7acb\u6027\uff0c\u6ee1\u8db3\u591a\u4e2a\u79df\u6237\u5bf9 GPU \u8ba1\u7b97\u80fd\u529b\u7684\u9700\u6c42\u3002

                                  • \u5bb9\u5668\u5316\u5e94\u7528\u7a0b\u5e8f

                                    MIG \u53ef\u4ee5\u5728\u5bb9\u5668\u5316\u73af\u5883\u4e2d\u5b9e\u73b0\u66f4\u7ec6\u7c92\u5ea6\u7684 GPU \u8d44\u6e90\u7ba1\u7406\u3002\u901a\u8fc7\u5c06\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a MIG \u5b9e\u4f8b\uff0c\u53ef\u4ee5\u4e3a\u6bcf\u4e2a\u5bb9\u5668\u5206\u914d\u72ec\u7acb\u7684 GPU \u8ba1\u7b97\u8d44\u6e90\uff0c\u63d0\u4f9b\u66f4\u597d\u7684\u6027\u80fd\u9694\u79bb\u548c\u8d44\u6e90\u5229\u7528\u3002

                                  • \u6279\u5904\u7406\u4f5c\u4e1a

                                    \u5bf9\u4e8e\u9700\u8981\u5927\u89c4\u6a21\u5e76\u884c\u8ba1\u7b97\u7684\u6279\u5904\u7406\u4f5c\u4e1a\uff0cMIG \u53ef\u4ee5\u63d0\u4f9b\u66f4\u9ad8\u7684\u8ba1\u7b97\u6027\u80fd\u548c\u66f4\u5927\u7684\u663e\u5b58\u5bb9\u91cf\u3002\u6bcf\u4e2a MIG \u5b9e\u4f8b\u53ef\u4ee5\u5229\u7528\u7269\u7406 GPU \u7684\u4e00\u90e8\u5206\u8ba1\u7b97\u8d44\u6e90\uff0c\u4ece\u800c\u52a0\u901f\u5927\u89c4\u6a21\u8ba1\u7b97\u4efb\u52a1\u7684\u5904\u7406\u3002

                                  • AI/\u673a\u5668\u5b66\u4e60\u8bad\u7ec3

                                    MIG \u53ef\u4ee5\u5728\u8bad\u7ec3\u5927\u89c4\u6a21\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u65f6\u63d0\u4f9b\u66f4\u5927\u7684\u8ba1\u7b97\u80fd\u529b\u548c\u663e\u5b58\u5bb9\u91cf\u3002\u5c06\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a MIG \u5b9e\u4f8b\uff0c\u6bcf\u4e2a\u5b9e\u4f8b\u53ef\u4ee5\u72ec\u7acb\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\uff0c\u63d0\u9ad8\u8bad\u7ec3\u6548\u7387\u548c\u541e\u5410\u91cf\u3002

                                  \u603b\u4f53\u800c\u8a00\uff0cNVIDIA MIG \u9002\u7528\u4e8e\u9700\u8981\u66f4\u7ec6\u7c92\u5ea6\u7684GPU\u8d44\u6e90\u5206\u914d\u548c\u7ba1\u7406\u7684\u573a\u666f\uff0c\u53ef\u4ee5\u5b9e\u73b0\u8d44\u6e90\u7684\u9694\u79bb\u3001\u63d0\u9ad8\u6027\u80fd\u5229\u7528\u7387\uff0c\u5e76\u4e14\u6ee1\u8db3\u591a\u4e2a\u7528\u6237\u6216\u5e94\u7528\u7a0b\u5e8f\u5bf9 GPU \u8ba1\u7b97\u80fd\u529b\u7684\u9700\u6c42\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/mig/index.html#mig_1","title":"MIG \u6982\u8ff0","text":"

                                  NVIDIA \u591a\u5b9e\u4f8b GPU\uff08Multi-Instance GPU\uff0c\u7b80\u79f0 MIG\uff09\u662f NVIDIA \u5728 H100\uff0cA100\uff0cA30 \u7cfb\u5217 GPU \u5361\u4e0a\u63a8\u51fa\u7684\u4e00\u9879\u65b0\u7279\u6027\uff0c \u65e8\u5728\u5c06\u4e00\u5757\u7269\u7406 GPU \u5206\u5272\u4e3a\u591a\u4e2a GPU \u5b9e\u4f8b\uff0c\u4ee5\u63d0\u4f9b\u66f4\u7ec6\u7c92\u5ea6\u7684\u8d44\u6e90\u5171\u4eab\u548c\u9694\u79bb\u3002MIG \u6700\u591a\u53ef\u5c06\u4e00\u5757 GPU \u5212\u5206\u6210\u4e03\u4e2a GPU \u5b9e\u4f8b\uff0c \u4f7f\u5f97\u4e00\u4e2a \u7269\u7406 GPU \u5361\u53ef\u4e3a\u591a\u4e2a\u7528\u6237\u63d0\u4f9b\u5355\u72ec\u7684 GPU \u8d44\u6e90\uff0c\u4ee5\u5b9e\u73b0\u6700\u4f73 GPU \u5229\u7528\u7387\u3002

                                  \u8fd9\u4e2a\u529f\u80fd\u4f7f\u5f97\u591a\u4e2a\u5e94\u7528\u7a0b\u5e8f\u6216\u7528\u6237\u53ef\u4ee5\u540c\u65f6\u5171\u4eabGPU\u8d44\u6e90\uff0c\u63d0\u9ad8\u4e86\u8ba1\u7b97\u8d44\u6e90\u7684\u5229\u7528\u7387\uff0c\u5e76\u589e\u52a0\u4e86\u7cfb\u7edf\u7684\u53ef\u6269\u5c55\u6027\u3002

                                  \u901a\u8fc7 MIG\uff0c\u6bcf\u4e2a GPU \u5b9e\u4f8b\u7684\u5904\u7406\u5668\u5728\u6574\u4e2a\u5185\u5b58\u7cfb\u7edf\u4e2d\u5177\u6709\u72ec\u7acb\u4e14\u9694\u79bb\u7684\u8def\u5f84\u2014\u2014\u82af\u7247\u4e0a\u7684\u4ea4\u53c9\u5f00\u5173\u7aef\u53e3\u3001L2 \u9ad8\u901f\u7f13\u5b58\u7ec4\u3001\u5185\u5b58\u63a7\u5236\u5668\u548c DRAM \u5730\u5740\u603b\u7ebf\u90fd\u552f\u4e00\u5206\u914d\u7ed9\u5355\u4e2a\u5b9e\u4f8b\u3002

                                  \u8fd9\u786e\u4fdd\u4e86\u5355\u4e2a\u7528\u6237\u7684\u5de5\u4f5c\u8d1f\u8f7d\u80fd\u591f\u4ee5\u53ef\u9884\u6d4b\u7684\u541e\u5410\u91cf\u548c\u5ef6\u8fdf\u8fd0\u884c\uff0c\u5e76\u5177\u6709\u76f8\u540c\u7684\u4e8c\u7ea7\u7f13\u5b58\u5206\u914d\u548c DRAM \u5e26\u5bbd\u3002 MIG \u53ef\u4ee5\u5212\u5206\u53ef\u7528\u7684 GPU \u8ba1\u7b97\u8d44\u6e90\uff08\u5305\u62ec\u6d41\u591a\u5904\u7406\u5668\u6216 SM \u548c GPU \u5f15\u64ce\uff0c\u5982\u590d\u5236\u5f15\u64ce\u6216\u89e3\u7801\u5668\uff09\u8fdb\u884c\u5206\u533a\uff0c \u4ee5\u4fbf\u4e3a\u4e0d\u540c\u7684\u5ba2\u6237\u7aef\uff08\u5982\u4e91\u4e3b\u673a\u3001\u5bb9\u5668\u6216\u8fdb\u7a0b\uff09\u63d0\u4f9b\u5b9a\u4e49\u7684\u670d\u52a1\u8d28\u91cf\uff08QoS\uff09\u548c\u6545\u969c\u9694\u79bb\uff09\u3002 MIG \u4f7f\u591a\u4e2a GPU \u5b9e\u4f8b\u80fd\u591f\u5728\u5355\u4e2a\u7269\u7406 GPU \u4e0a\u5e76\u884c\u8fd0\u884c\u3002

                                  MIG \u5141\u8bb8\u591a\u4e2a vGPU\uff08\u4ee5\u53ca\u4e91\u4e3b\u673a\uff09\u5728\u5355\u4e2a GPU \u5b9e\u4f8b\u4e0a\u5e76\u884c\u8fd0\u884c\uff0c\u540c\u65f6\u4fdd\u7559 vGPU \u63d0\u4f9b\u7684\u9694\u79bb\u4fdd\u8bc1\u3002 \u6709\u5173\u4f7f\u7528 vGPU \u548c MIG \u8fdb\u884c GPU \u5206\u533a\u7684\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605 NVIDIA Multi-Instance GPU and NVIDIA Virtual Compute Server\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/mig/index.html#mig_2","title":"MIG \u67b6\u6784","text":"

                                  \u5982\u4e0b\u662f\u4e00\u4e2a MIG \u7684\u6982\u8ff0\u56fe\uff0c\u53ef\u4ee5\u770b\u51fa MIG \u5c06\u4e00\u5f20\u7269\u7406 GPU \u5361\u865a\u62df\u5316\u6210\u4e86 7 \u4e2a GPU \u5b9e\u4f8b\uff0c\u8fd9\u4e9b GPU \u5b9e\u4f8b\u80fd\u591f\u53ef\u4ee5\u88ab\u591a\u4e2a User \u4f7f\u7528\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/mig/index.html#_1","title":"\u91cd\u8981\u6982\u5ff5","text":"
                                  • SM \uff1a\u6d41\u5f0f\u591a\u5904\u7406\u5668\uff08Streaming Multiprocessor\uff09\uff0cGPU \u7684\u6838\u5fc3\u8ba1\u7b97\u5355\u5143\uff0c\u8d1f\u8d23\u6267\u884c\u56fe\u5f62\u6e32\u67d3\u548c\u901a\u7528\u8ba1\u7b97\u4efb\u52a1\u3002 \u6bcf\u4e2a SM \u5305\u542b\u4e00\u7ec4 CUDA \u6838\u5fc3\uff0c\u4ee5\u53ca\u5171\u4eab\u5185\u5b58\u3001\u5bc4\u5b58\u5668\u6587\u4ef6\u548c\u5176\u4ed6\u8d44\u6e90\uff0c\u53ef\u4ee5\u540c\u65f6\u6267\u884c\u591a\u4e2a\u7ebf\u7a0b\u3002 \u6bcf\u4e2a MIG \u5b9e\u4f8b\u90fd\u62e5\u6709\u4e00\u5b9a\u6570\u91cf\u7684 SM \u548c\u5176\u4ed6\u76f8\u5173\u8d44\u6e90\uff0c\u4ee5\u53ca\u88ab\u5212\u5206\u51fa\u6765\u7684\u663e\u5b58\u3002
                                  • GPU Memory Slice \uff1aGPU \u5185\u5b58\u5207\u7247\uff0cGPU \u5185\u5b58\u5207\u7247\u662f GPU \u5185\u5b58\u7684\u6700\u5c0f\u90e8\u5206\uff0c\u5305\u62ec\u76f8\u5e94\u7684\u5185\u5b58\u63a7\u5236\u5668\u548c\u7f13\u5b58\u3002 GPU \u5185\u5b58\u5207\u7247\u5927\u7ea6\u662f GPU \u5185\u5b58\u8d44\u6e90\u603b\u91cf\u7684\u516b\u5206\u4e4b\u4e00\uff0c\u5305\u62ec\u5bb9\u91cf\u548c\u5e26\u5bbd\u3002
                                  • GPU SM Slice \uff1aGPU SM \u5207\u7247\u662f GPU \u4e0a SM \u7684\u6700\u5c0f\u8ba1\u7b97\u5355\u4f4d\u3002\u5728 MIG \u6a21\u5f0f\u4e0b\u914d\u7f6e\u65f6\uff0c GPU SM \u5207\u7247\u5927\u7ea6\u662f GPU \u4e2d\u53ef\u7528 SMS \u603b\u6570\u7684\u4e03\u5206\u4e4b\u4e00\u3002
                                  • GPU Slice \uff1aGPU \u5207\u7247\u662f GPU \u4e2d\u7531\u5355\u4e2a GPU \u5185\u5b58\u5207\u7247\u548c\u5355\u4e2a GPU SM \u5207\u7247\u7ec4\u5408\u5728\u4e00\u8d77\u7684\u6700\u5c0f\u90e8\u5206\u3002
                                  • GPU Instance \uff1aGPU \u5b9e\u4f8b \uff08GI\uff09 \u662f GPU \u5207\u7247\u548c GPU \u5f15\u64ce\uff08DMA\u3001NVDEC \u7b49\uff09\u7684\u7ec4\u5408\u3002 GPU \u5b9e\u4f8b\u4e2d\u7684\u4efb\u4f55\u5185\u5bb9\u59cb\u7ec8\u5171\u4eab\u6240\u6709 GPU \u5185\u5b58\u5207\u7247\u548c\u5176\u4ed6 GPU \u5f15\u64ce\uff0c\u4f46\u5b83\u7684 SM \u5207\u7247\u53ef\u4ee5\u8fdb\u4e00\u6b65\u7ec6\u5206\u4e3a\u8ba1\u7b97\u5b9e\u4f8b\uff08CI\uff09\u3002 GPU \u5b9e\u4f8b\u63d0\u4f9b\u5185\u5b58 QoS\u3002\u6bcf\u4e2a GPU \u5207\u7247\u90fd\u5305\u542b\u4e13\u7528\u7684 GPU \u5185\u5b58\u8d44\u6e90\uff0c\u8fd9\u4e9b\u8d44\u6e90\u4f1a\u9650\u5236\u53ef\u7528\u5bb9\u91cf\u548c\u5e26\u5bbd\uff0c\u5e76\u63d0\u4f9b\u5185\u5b58 QoS\u3002 \u6bcf\u4e2a GPU \u5185\u5b58\u5207\u7247\u83b7\u5f97\u603b GPU \u5185\u5b58\u8d44\u6e90\u7684\u516b\u5206\u4e4b\u4e00\uff0c\u6bcf\u4e2a GPU SM \u5207\u7247\u83b7\u5f97 SM \u603b\u6570\u7684\u4e03\u5206\u4e4b\u4e00\u3002
                                  • Compute Instance \uff1aGPU \u5b9e\u4f8b\u7684\u8ba1\u7b97\u5207\u7247\u53ef\u4ee5\u8fdb\u4e00\u6b65\u7ec6\u5206\u4e3a\u591a\u4e2a\u8ba1\u7b97\u5b9e\u4f8b \uff08CI\uff09\uff0c\u5176\u4e2d CI \u5171\u4eab\u7236 GI \u7684\u5f15\u64ce\u548c\u5185\u5b58\uff0c\u4f46\u6bcf\u4e2a CI \u90fd\u6709\u4e13\u7528\u7684 SM \u8d44\u6e90\u3002
                                  "},{"location":"end-user/kpanda/gpu/nvidia/mig/index.html#gpu-gi","title":"GPU \u5b9e\u4f8b\uff08GI\uff09","text":"

                                  \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728 GPU \u4e0a\u521b\u5efa\u5404\u79cd\u5206\u533a\u3002\u5c06\u4f7f\u7528 A100-40GB \u4f5c\u4e3a\u793a\u4f8b\u6f14\u793a\u5982\u4f55\u5bf9\u5355\u4e2a GPU \u7269\u7406\u5361\u4e0a\u8fdb\u884c\u5206\u533a\u3002

                                  GPU \u7684\u5206\u533a\u662f\u4f7f\u7528\u5185\u5b58\u5207\u7247\u8fdb\u884c\u7684\uff0c\u56e0\u6b64\u53ef\u4ee5\u8ba4\u4e3a A100-40GB GPU \u5177\u6709 8x5GB \u5185\u5b58\u5207\u7247\u548c 7 \u4e2a GPU SM \u5207\u7247\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff0c\u5c55\u793a\u4e86 A100 \u4e0a\u53ef\u7528\u7684\u5185\u5b58\u5207\u7247\u3002

                                  \u5982\u4e0a\u6240\u8ff0\uff0c\u521b\u5efa GPU \u5b9e\u4f8b \uff08GI\uff09 \u9700\u8981\u5c06\u4e00\u5b9a\u6570\u91cf\u7684\u5185\u5b58\u5207\u7247\u4e0e\u4e00\u5b9a\u6570\u91cf\u7684\u8ba1\u7b97\u5207\u7247\u76f8\u7ed3\u5408\u3002 \u5728\u4e0b\u56fe\u4e2d\uff0c\u4e00\u4e2a 5GB \u5185\u5b58\u5207\u7247\u4e0e 1 \u4e2a\u8ba1\u7b97\u5207\u7247\u76f8\u7ed3\u5408\uff0c\u4ee5\u521b\u5efa 1g.5gb GI \u914d\u7f6e\u6587\u4ef6\uff1a

                                  \u540c\u6837\uff0c4x5GB \u5185\u5b58\u5207\u7247\u53ef\u4ee5\u4e0e 4x1 \u8ba1\u7b97\u5207\u7247\u7ed3\u5408\u4f7f\u7528\u4ee5\u521b\u5efa 4g.20gb \u7684 GI \u914d\u7f6e\u6587\u4ef6\uff1a

                                  "},{"location":"end-user/kpanda/gpu/nvidia/mig/index.html#ci","title":"\u8ba1\u7b97\u5b9e\u4f8b\uff08CI\uff09","text":"

                                  GPU \u5b9e\u4f8b\u7684\u8ba1\u7b97\u5207\u7247(GI)\u53ef\u4ee5\u8fdb\u4e00\u6b65\u7ec6\u5206\u4e3a\u591a\u4e2a\u8ba1\u7b97\u5b9e\u4f8b\uff08CI\uff09\uff0c\u5176\u4e2d CI \u5171\u4eab\u7236 GI \u7684\u5f15\u64ce\u548c\u5185\u5b58\uff0c \u4f46\u6bcf\u4e2a CI \u90fd\u6709\u4e13\u7528\u7684 SM \u8d44\u6e90\u3002\u4f7f\u7528\u4e0a\u9762\u7684\u76f8\u540c 4g.20gb \u793a\u4f8b\uff0c\u53ef\u4ee5\u521b\u5efa\u4e00\u4e2a CI \u4ee5\u4ec5\u4f7f\u7528\u7b2c\u4e00\u4e2a\u8ba1\u7b97\u5207\u7247\u7684 1c.4g.20gb \u8ba1\u7b97\u914d\u7f6e\uff0c\u5982\u4e0b\u56fe\u84dd\u8272\u90e8\u5206\u6240\u793a\uff1a

                                  \u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u53ef\u4ee5\u901a\u8fc7\u9009\u62e9\u4efb\u4f55\u8ba1\u7b97\u5207\u7247\u6765\u521b\u5efa 4 \u4e2a\u4e0d\u540c\u7684 CI\u3002\u8fd8\u53ef\u4ee5\u5c06\u4e24\u4e2a\u8ba1\u7b97\u5207\u7247\u7ec4\u5408\u5728\u4e00\u8d77\u4ee5\u521b\u5efa 2c.4g.20gb \u7684\u8ba1\u7b97\u914d\u7f6e\uff09\uff1a

                                  \u9664\u6b64\u4e4b\u5916\uff0c\u8fd8\u53ef\u4ee5\u7ec4\u5408 3 \u4e2a\u8ba1\u7b97\u5207\u7247\u4ee5\u521b\u5efa\u8ba1\u7b97\u914d\u7f6e\u6587\u4ef6\uff0c\u6216\u8005\u53ef\u4ee5\u7ec4\u5408\u6240\u6709 4 \u4e2a\u8ba1\u7b97\u5207\u7247\u4ee5\u521b\u5efa 3c.4g.20gb \u3001 4c.4g.20gb \u8ba1\u7b97\u914d\u7f6e\u6587\u4ef6\u3002 \u5408\u5e76\u6240\u6709 4 \u4e2a\u8ba1\u7b97\u5207\u7247\u65f6\uff0c\u914d\u7f6e\u6587\u4ef6\u7b80\u79f0\u4e3a 4g.20gb \u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/mig/create_mig.html","title":"\u5f00\u542f MIG \u529f\u80fd","text":"

                                  \u672c\u7ae0\u8282\u4ecb\u7ecd\u5982\u4f55\u5f00\u542f NVIDIA MIG \u529f\u80fd\u65b9\u5f0f\uff0cNVIDIA \u5f53\u524d\u63d0\u4f9b\u4e24\u79cd\u5728 Kubernetes \u8282\u70b9\u4e0a\u516c\u5f00 MIG \u8bbe\u5907\u7684\u7b56\u7565\uff1a

                                  • Single \u6a21\u5f0f\uff0c\u8282\u70b9\u4ec5\u5728\u5176\u6240\u6709 GPU \u4e0a\u516c\u5f00\u5355\u4e00\u7c7b\u578b\u7684 MIG \u8bbe\u5907\u3002
                                  • Mixed \u6a21\u5f0f\uff0c\u8282\u70b9\u5728\u5176\u6240\u6709 GPU \u4e0a\u516c\u5f00\u6df7\u5408 MIG \u8bbe\u5907\u7c7b\u578b\u3002

                                  \u8be6\u60c5\u53c2\u8003 NVIDIA GPU \u5361\u4f7f\u7528\u6a21\u5f0f\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/mig/create_mig.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  • \u5f85\u5b89\u88c5 GPU \u9a71\u52a8\u8282\u70b9\u7cfb\u7edf\u8981\u6c42\u8bf7\u53c2\u8003\uff1aGPU \u652f\u6301\u77e9\u9635
                                  • \u786e\u8ba4\u96c6\u7fa4\u8282\u70b9\u4e0a\u5177\u6709\u5bf9\u5e94\u578b\u53f7\u7684 GPU \u5361\uff08NVIDIA H100\u3001 A100 \u548c A30 Tensor Core GPU\uff09\uff0c \u8be6\u60c5\u53c2\u8003 GPU \u652f\u6301\u77e9\u9635\u3002
                                  • \u8282\u70b9\u4e0a\u7684\u6240\u6709 GPU \u5fc5\u987b\uff1a\u5c5e\u4e8e\u540c\u4e00\u4ea7\u54c1\u7ebf\uff08\u4f8b\u5982 A100-SXM-40GB\uff09
                                  "},{"location":"end-user/kpanda/gpu/nvidia/mig/create_mig.html#gpu-operator-addon","title":"\u5b89\u88c5 gpu-operator Addon","text":""},{"location":"end-user/kpanda/gpu/nvidia/mig/create_mig.html#_2","title":"\u53c2\u6570\u914d\u7f6e","text":"

                                  \u5b89\u88c5 Operator \u65f6\u9700\u8981\u5bf9\u5e94\u8bbe\u7f6e MigManager Config \u53c2\u6570\uff0c \u9ed8\u8ba4\u4e3a default-mig-parted-config \uff0c\u540c\u65f6\u4e5f\u53ef\u4ee5\u81ea\u5b9a\u4e49\u5207\u5206\u7b56\u7565\u914d\u7f6e\u6587\u4ef6\uff1a

                                  "},{"location":"end-user/kpanda/gpu/nvidia/mig/create_mig.html#_3","title":"\u81ea\u5b9a\u4e49\u5207\u5206\u7b56\u7565","text":"
                                    ## \u81ea\u5b9a\u4e49\u5207\u5206 GI \u5b9e\u4f8b\u914d\u7f6e\n  all-disabled:\n    - devices: all\n      mig-enabled: false\n  all-enabled:\n    - devices: all\n      mig-enabled: true\n      mig-devices: {}\n  all-1g.10gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.5gb: 7\n  all-1g.10gb.me:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.10gb+me: 1\n  all-1g.20gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.20gb: 4\n  all-2g.20gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        2g.20gb: 3\n  all-3g.40gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        3g.40gb: 2\n  all-4g.40gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        4g.40gb: 1\n  all-7g.80gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        7g.80gb: 1\n  all-balanced:\n    - device-filter: [\"0x233110DE\", \"0x232210DE\", \"0x20B210DE\", \"0x20B510DE\", \"0x20F310DE\", \"0x20F510DE\"]\n      devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.10gb: 2\n        2g.20gb: 1\n        3g.40gb: 1\n  # \u8bbe\u7f6e\u540e\u4f1a\u6309\u7167\u8bbe\u7f6e\u89c4\u683c\u5207\u5206 CI \u5b9e\u4f8b\n  custom-config:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        3g.40gb: 2\n

                                  \u5728\u4e0a\u8ff0\u7684 YAML \u4e2d\u8bbe\u7f6e custom-config \uff0c\u8bbe\u7f6e\u540e\u4f1a\u6309\u7167\u89c4\u683c\u5207\u5206 CI \u5b9e\u4f8b\u3002

                                  custom-config:\n  - devices: all\n    mig-enabled: true\n    mig-devices:\n      1c.3g.40gb: 6\n

                                  \u8bbe\u7f6e\u5b8c\u6210\u540e\uff0c\u5728\u786e\u8ba4\u90e8\u7f72\u5e94\u7528\u65f6\u5373\u53ef\u4f7f\u7528 GPU MIG \u8d44\u6e90\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/mig/create_mig.html#gpu","title":"\u5207\u6362\u8282\u70b9 GPU \u6a21\u5f0f","text":"

                                  Note

                                  \u5207\u6362 GPU \u6a21\u5f0f\u6216\u8005\u4fee\u6539\u5207\u5206\u89c4\u683c\u540e\u9700\u8981\u91cd\u542f nvidia-mig-manager\u3002

                                  \u5f53\u6211\u4eec\u6210\u529f\u5b89\u88c5 gpu-operator \u4e4b\u540e\uff0c\u8282\u70b9\u9ed8\u8ba4\u662f\u6574\u5361\u6a21\u5f0f\uff0c\u5728\u8282\u70b9\u7ba1\u7406\u9875\u9762\u4f1a\u6709\u6807\u8bc6\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff1a

                                  \u70b9\u51fb\u8282\u70b9\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u9009\u62e9 GPU \u6a21\u5f0f\u5207\u6362 \uff0c\u7136\u540e\u9009\u62e9\u5bf9\u5e94\u7684 MIG \u6a21\u5f0f\u4ee5\u53ca\u5207\u5206\u7684\u7b56\u7565\uff0c\u8fd9\u91cc\u4ee5 MIXED \u6a21\u5f0f\u4e3a\u4f8b\uff1a

                                  \u8fd9\u91cc\u4e00\u5171\u6709\u4e24\u4e2a\u914d\u7f6e\uff1a

                                  1. MIg \u7b56\u7565\uff1aMixed \u4ee5\u53ca Single \u3002
                                  2. \u5207\u5206\u7b56\u7565\uff1a\u8fd9\u91cc\u7684\u7b56\u7565\u9700\u8981\u4e0e default-mig-parted-config \uff08\u6216\u8005\u7528\u6237\u81ea\u5b9a\u4e49\u7684\u5207\u5206\u7b56\u7565\uff09\u914d\u7f6e\u6587\u4ef6\u4e2d\u7684 key \u4fdd\u6301\u4e00\u81f4\u3002

                                  \u70b9\u51fb \u786e\u5b9a \u6309\u94ae\u540e\uff0c\u7b49\u5f85\u7ea6\u4e00\u5206\u949f\u5de6\u53f3\u5237\u65b0\u9875\u9762\uff0cMIG \u6a21\u5f0f\u5207\u6362\u6210\uff1a

                                  "},{"location":"end-user/kpanda/gpu/nvidia/mig/mig_command.html","title":"MIG \u76f8\u5173\u547d\u4ee4","text":"

                                  GI \u76f8\u5173\u547d\u540d\uff1a

                                  \u5b50\u547d\u4ee4 \u8bf4\u660e nvidia-smi mig -lgi \u67e5\u770b\u521b\u5efa GI \u5b9e\u4f8b\u5217\u8868 nvidia-smi mig -dgi -gi \u5220\u9664\u6307\u5b9a\u7684 GI \u5b9e\u4f8b nvidia-smi mig -lgip \u67e5\u770b GI \u7684 profile nvidia-smi mig -cgi \u901a\u8fc7\u6307\u5b9a profile \u7684 ID \u521b\u5efa GI

                                  CI \u76f8\u5173\u547d\u4ee4\uff1a

                                  \u5b50\u547d\u4ee4 \u8bf4\u660e nvidia-smi mig -lcip { -gi {gi Instance ID}} \u67e5\u770b CI \u7684 profile \uff0c\u6307\u5b9a -gi \u53ef\u4ee5\u67e5\u770b\u7279\u5b9a GI \u5b9e\u4f8b\u53ef\u4ee5\u521b\u5efa\u7684 CI nvidia-smi mig -lci \u67e5\u770b\u521b\u5efa\u7684 CI \u5b9e\u4f8b\u5217\u8868 nvidia-smi mig -cci {profile id} -gi {gi instance id} \u6307\u5b9a\u7684 GI \u521b\u5efa CI \u5b9e\u4f8b nvidia-smi mig -dci -ci \u5220\u9664\u6307\u5b9a CI \u5b9e\u4f8b

                                  GI+CI \u76f8\u5173\u547d\u4ee4\uff1a

                                  \u5b50\u547d\u4ee4 \u8bf4\u660e nvidia-smi mig -i 0 -cgi {gi profile id} -C {ci profile id} \u76f4\u63a5\u521b\u5efa GI + CI \u5b9e\u4f8b"},{"location":"end-user/kpanda/gpu/nvidia/mig/mig_usage.html","title":"\u4f7f\u7528 MIG GPU \u8d44\u6e90","text":"

                                  \u672c\u8282\u4ecb\u7ecd\u5e94\u7528\u5982\u4f55\u4f7f\u7528 MIG GPU \u8d44\u6e90\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/mig/mig_usage.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  • \u5df2\u7ecf\u90e8\u7f72 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u4e14\u5e73\u53f0\u8fd0\u884c\u6b63\u5e38\u3002
                                  • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                                  • \u5df2\u5b89\u88c5 GPU Operator\u3002
                                  • \u96c6\u7fa4\u8282\u70b9\u4e0a\u5177\u6709\u5bf9\u5e94\u578b\u53f7\u7684 GPU \u5361
                                  "},{"location":"end-user/kpanda/gpu/nvidia/mig/mig_usage.html#ui-mig-gpu","title":"UI \u754c\u9762\u4f7f\u7528 MIG GPU","text":"
                                  1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u8bc6\u522b GPU \u5361\u7c7b\u578b

                                    \u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 -> \u8282\u70b9\u7ba1\u7406 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u6b63\u786e\u8bc6\u522b\u4e3a MIG \u6a21\u5f0f\u3002

                                  2. \u901a\u8fc7\u955c\u50cf\u90e8\u7f72\u5e94\u7528\uff0c\u53ef\u9009\u62e9\u5e76\u4f7f\u7528 NVIDIA MIG \u8d44\u6e90\u3002

                                    • MIG Single \u6a21\u5f0f\u793a\u4f8b\uff08\u4e0e\u6574\u5361\u4f7f\u7528\u65b9\u5f0f\u76f8\u540c\uff09\uff1a

                                      Note

                                      MIG single \u7b56\u7565\u5141\u8bb8\u7528\u6237\u4ee5\u4e0e GPU \u6574\u5361\u76f8\u540c\u7684\u65b9\u5f0f\uff08nvidia.com/gpu\uff09\u8bf7\u6c42\u548c\u4f7f\u7528GPU\u8d44\u6e90\uff0c\u4e0d\u540c\u7684\u662f\u8fd9\u4e9b\u8d44\u6e90\u53ef\u4ee5\u662f GPU \u7684\u4e00\u90e8\u5206\uff08MIG\u8bbe\u5907\uff09\uff0c\u800c\u4e0d\u662f\u6574\u4e2aGPU\u3002\u4e86\u89e3\u66f4\u591a GPU MIG \u6a21\u5f0f\u8bbe\u8ba1

                                    • MIG Mixed \u6a21\u5f0f\u793a\u4f8b\uff1a

                                  "},{"location":"end-user/kpanda/gpu/nvidia/mig/mig_usage.html#yaml-mig","title":"YAML \u914d\u7f6e\u4f7f\u7528 MIG","text":"

                                  MIG Single \u6a21\u5f0f\uff1a

                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mig-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mig-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mig-demo\n    spec:\n      containers:\n        - name: mig-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/gpu: 2 # (1)!\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                                  1. \u7533\u8bf7 MIG GPU \u7684\u6570\u91cf

                                  MIG Mixed \u6a21\u5f0f\uff1a

                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mig-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mig-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mig-demo\n    spec:\n      containers:\n        - name: mig-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/mig-4g.20gb: 1 # (1)!\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                                  1. \u901a\u8fc7 nvidia.com/mig-g.gb \u7684\u8d44\u6e90\u7c7b\u578b\u516c\u5f00\u5404\u4e2a MIG \u8bbe\u5907

                                  \u8fdb\u5165\u5bb9\u5668\u540e\u53ef\u4ee5\u67e5\u770b\u53ea\u4f7f\u7528\u4e86\u4e00\u4e2a MIG \u8bbe\u5907\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/vgpu/hami.html","title":"\u6784\u5efa vGPU \u663e\u5b58\u8d85\u914d\u955c\u50cf","text":"

                                  Hami \u9879\u76ee\u4e2d vGPU \u663e\u5b58\u8d85\u914d\u7684\u529f\u80fd\u5df2\u7ecf\u4e0d\u5b58\u5728\uff0c\u76ee\u524d\u4f7f\u7528\u6709\u663e\u5b58\u8d85\u914d\u7684 libvgpu.so \u6587\u4ef6\u91cd\u65b0\u6784\u5efa\u3002

                                  Dockerfile
                                  FROM docker.m.daocloud.io/projecthami/hami:v2.3.11\nCOPY libvgpu.so /k8s-vgpu/lib/nvidia/\n

                                  \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u6784\u5efa\u955c\u50cf\uff1a

                                  docker build -t release.daocloud.io/projecthami/hami:v2.3.11 -f Dockerfile .\n

                                  \u7136\u540e\u628a\u955c\u50cf push \u5230 release.daocloud.io \u4e2d\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/vgpu/vgpu_addon.html","title":"\u5b89\u88c5 NVIDIA vGPU Addon","text":"

                                  \u5982\u9700\u5c06\u4e00\u5f20 NVIDIA \u865a\u62df\u5316\u6210\u591a\u4e2a\u865a\u62df GPU\uff0c\u5e76\u5c06\u5176\u5206\u914d\u7ed9\u4e0d\u540c\u7684\u4e91\u4e3b\u673a\u6216\u7528\u6237\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528 NVIDIA \u7684 vGPU \u80fd\u529b\u3002 \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u5b89\u88c5 vGPU \u63d2\u4ef6\uff0c\u8fd9\u662f\u4f7f\u7528 NVIDIA vGPU \u80fd\u529b\u7684\u524d\u63d0\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/vgpu/vgpu_addon.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  • \u53c2\u8003 GPU \u652f\u6301\u77e9\u9635 \u786e\u8ba4\u96c6\u7fa4\u8282\u70b9\u4e0a\u5177\u6709\u5bf9\u5e94\u578b\u53f7\u7684 GPU \u5361\u3002
                                  • \u5f53\u524d\u96c6\u7fa4\u5df2\u901a\u8fc7 Operator \u90e8\u7f72 NVIDIA \u9a71\u52a8\uff0c\u5177\u4f53\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5\u3002
                                  "},{"location":"end-user/kpanda/gpu/nvidia/vgpu/vgpu_addon.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                  1. \u529f\u80fd\u6a21\u5757\u8def\u5f84\uff1a \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u7ba1\u7406 \uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f -> \u641c\u7d22 nvidia-vgpu \u3002

                                  2. \u5728\u5b89\u88c5 vGPU \u7684\u8fc7\u7a0b\u4e2d\u63d0\u4f9b\u4e86\u51e0\u4e2a\u57fa\u672c\u4fee\u6539\u7684\u53c2\u6570\uff0c\u5982\u679c\u9700\u8981\u4fee\u6539\u9ad8\u7ea7\u53c2\u6570\u70b9\u51fb YAML \u5217\u8fdb\u884c\u4fee\u6539\uff1a

                                    • deviceCoreScaling \uff1aNVIDIA \u88c5\u7f6e\u7b97\u529b\u4f7f\u7528\u6bd4\u4f8b\uff0c\u9884\u8bbe\u503c\u662f 1\u3002\u53ef\u4ee5\u5927\u4e8e 1\uff08\u542f\u7528\u865a\u62df\u7b97\u529b\uff0c\u5b9e\u9a8c\u529f\u80fd\uff09\u3002\u5982\u679c\u6211\u4eec\u914d\u7f6e devicePlugin.deviceCoreScaling \u53c2\u6570\u4e3a S\uff0c\u5728\u90e8\u7f72\u4e86\u6211\u4eec\u88c5\u7f6e\u63d2\u4ef6\u7684 Kubernetes \u96c6\u7fa4\u4e2d\uff0c\u8fd9\u5f20 GPU \u5206\u51fa\u7684 vGPU \u5c06\u603b\u5171\u5305\u542b S * 100% \u7b97\u529b\u3002

                                    • deviceMemoryScaling \uff1aNVIDIA \u88c5\u7f6e\u663e\u5b58\u4f7f\u7528\u6bd4\u4f8b\uff0c\u9884\u8bbe\u503c\u662f 1\u3002\u53ef\u4ee5\u5927\u4e8e 1\uff08\u542f\u7528\u865a\u62df\u663e\u5b58\uff0c\u5b9e\u9a8c\u529f\u80fd\uff09\u3002 \u5bf9\u4e8e\u6709 M \u663e\u5b58\u5927\u5c0f\u7684 NVIDIA GPU\uff0c\u5982\u679c\u6211\u4eec\u914d\u7f6e devicePlugin.deviceMemoryScaling \u53c2\u6570\u4e3a S\uff0c \u5728\u90e8\u7f72\u4e86\u6211\u4eec\u88c5\u7f6e\u63d2\u4ef6\u7684 Kubernetes \u96c6\u7fa4\u4e2d\uff0c\u8fd9\u5f20 GPU \u5206\u51fa\u7684 vGPU \u5c06\u603b\u5171\u5305\u542b S * M \u663e\u5b58\u3002

                                    • deviceSplitCount \uff1a\u6574\u6570\u7c7b\u578b\uff0c\u9884\u8bbe\u503c\u662f 10\u3002GPU \u7684\u5206\u5272\u6570\uff0c\u6bcf\u4e00\u5f20 GPU \u90fd\u4e0d\u80fd\u5206\u914d\u8d85\u8fc7\u5176\u914d\u7f6e\u6570\u76ee\u7684\u4efb\u52a1\u3002 \u82e5\u5176\u914d\u7f6e\u4e3a N \u7684\u8bdd\uff0c\u6bcf\u4e2a GPU \u4e0a\u6700\u591a\u53ef\u4ee5\u540c\u65f6\u5b58\u5728 N \u4e2a\u4efb\u52a1\u3002

                                    • Resources \uff1a\u5c31\u662f\u5bf9\u5e94 vgpu-device-plugin \u548c vgpu-schedule pod \u7684\u8d44\u6e90\u4f7f\u7528\u91cf\u3002

                                    • ServiceMonitor \uff1a\u9ed8\u8ba4\u4e0d\u5f00\u542f\uff0c\u5f00\u542f\u540e\u53ef\u524d\u5f80\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u67e5\u770b vGPU \u76f8\u5173\u76d1\u63a7\u3002\u5982\u9700\u5f00\u542f\uff0c\u8bf7\u786e\u4fdd insight-agent \u5df2\u5b89\u88c5\u5e76\u5904\u4e8e\u8fd0\u884c\u72b6\u6001\uff0c\u5426\u5219\u5c06\u5bfc\u81f4 NVIDIA vGPU Addon \u5b89\u88c5\u5931\u8d25\u3002

                                  3. \u5b89\u88c5\u6210\u529f\u4e4b\u540e\u4f1a\u5728\u6307\u5b9a Namespace \u4e0b\u51fa\u73b0\u5982\u4e0b\u4e24\u4e2a\u7c7b\u578b\u7684 Pod\uff0c\u5373\u8868\u793a NVIDIA vGPU \u63d2\u4ef6\u5df2\u5b89\u88c5\u6210\u529f\uff1a

                                  \u5b89\u88c5\u6210\u529f\u540e\uff0c\u90e8\u7f72\u5e94\u7528\u53ef\u4f7f\u7528 vGPU \u8d44\u6e90\u3002

                                  Note

                                  NVIDIA vGPU Addon \u4e0d\u652f\u6301\u4ece\u8001\u7248\u672c v2.0.0 \u76f4\u63a5\u5347\u7ea7\u4e3a\u6700\u65b0\u7248 v2.0.0+1\uff1b \u5982\u9700\u5347\u7ea7\uff0c\u8bf7\u5378\u8f7d\u8001\u7248\u672c\u540e\u91cd\u65b0\u5b89\u88c5\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html","title":"\u5e94\u7528\u4f7f\u7528 Nvidia vGPU","text":"

                                  \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528 vGPU \u80fd\u529b\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  • \u96c6\u7fa4\u8282\u70b9\u4e0a\u5177\u6709\u5bf9\u5e94\u578b\u53f7\u7684 GPU \u5361
                                  • \u5df2\u6210\u529f\u5b89\u88c5 vGPU Addon\uff0c\u8be6\u60c5\u53c2\u8003 GPU Addon \u5b89\u88c5
                                  • \u5df2\u5b89\u88c5 GPU Operator\uff0c\u5e76\u5df2 \u5173\u95ed Nvidia.DevicePlugin \u80fd\u529b\uff0c\u53ef\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5
                                  "},{"location":"end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html#vgpu","title":"\u754c\u9762\u4f7f\u7528 vGPU","text":"
                                  1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u68c0\u6d4b GPU \u5361\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002 \u76ee\u524d\u96c6\u7fa4\u4f1a\u81ea\u52a8\u542f\u7528 GPU \uff0c\u5e76\u4e14\u8bbe\u7f6e GPU \u7c7b\u578b\u4e3a Nvidia vGPU \u3002

                                  2. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08Nvidia vGPU\uff09\u4e4b\u540e\uff0c\u4f1a\u81ea\u52a8\u51fa\u73b0\u5982\u4e0b\u51e0\u4e2a\u53c2\u6570\u9700\u8981\u586b\u5199\uff1a

                                    • \u7269\u7406\u5361\u6570\u91cf\uff08nvidia.com/vgpu\uff09\uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u8f93\u5165\u503c\u5fc5\u987b\u4e3a\u6574\u6570\u4e14 \u5c0f\u4e8e\u7b49\u4e8e \u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002
                                    • GPU \u7b97\u529b\uff08nvidia.com/gpucores\uff09: \u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u7b97\u529b\uff0c\u503c\u8303\u56f4\u4e3a 0-100\uff1b \u5982\u679c\u914d\u7f6e\u4e3a 0\uff0c \u5219\u8ba4\u4e3a\u4e0d\u5f3a\u5236\u9694\u79bb\uff1b\u914d\u7f6e\u4e3a100\uff0c\u5219\u8ba4\u4e3a\u72ec\u5360\u6574\u5f20\u5361\u3002
                                    • GPU \u663e\u5b58\uff08nvidia.com/gpumem\uff09: \u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u663e\u5b58\uff0c\u503c\u5355\u4f4d\u4e3a MB\uff0c\u6700\u5c0f\u503c\u4e3a 1\uff0c\u6700\u5927\u503c\u4e3a\u6574\u5361\u7684\u663e\u5b58\u503c\u3002

                                    \u5982\u679c\u4e0a\u8ff0\u503c\u914d\u7f6e\u7684\u6709\u95ee\u9898\u5219\u4f1a\u51fa\u73b0\u8c03\u5ea6\u5931\u8d25\uff0c\u8d44\u6e90\u5206\u914d\u4e0d\u4e86\u7684\u60c5\u51b5\u3002

                                  "},{"location":"end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html#yaml-vgpu","title":"YAML \u914d\u7f6e\u4f7f\u7528 vGPU","text":"

                                  \u53c2\u8003\u5982\u4e0b\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\uff0c\u5728\u8d44\u6e90\u7533\u8bf7\u548c\u9650\u5236\u914d\u7f6e\u4e2d\u589e\u52a0 nvidia.com/vgpu: '1' \u53c2\u6570\u6765\u914d\u7f6e\u5e94\u7528\u4f7f\u7528\u7269\u7406\u5361\u7684\u6570\u91cf\u3002

                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-vgpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-vgpu-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: full-vgpu-demo\n    spec:\n      containers:\n        - name: full-vgpu-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/gpucores: '20'   # \u7533\u8bf7\u6bcf\u5f20\u5361\u5360\u7528 20% \u7684 GPU \u7b97\u529b\n              nvidia.com/gpumem: '200'   # \u7533\u8bf7\u6bcf\u5f20\u5361\u5360\u7528 200MB \u7684\u663e\u5b58\n              nvidia.com/vgpu: '1'   # \u7533\u8bf7GPU\u7684\u6570\u91cf\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                                  "},{"location":"end-user/kpanda/gpu/volcano/drf.html","title":"DRF\uff08Dominant Resource Fairness\uff09 \u8c03\u5ea6\u7b56\u7565","text":"

                                  DRF \u8c03\u5ea6\u7b56\u7565\u8ba4\u4e3a\u5360\u7528\u8d44\u6e90\u8f83\u5c11\u7684\u4efb\u52a1\u5177\u6709\u66f4\u9ad8\u7684\u4f18\u5148\u7ea7\u3002\u8fd9\u6837\u80fd\u591f\u6ee1\u8db3\u66f4\u591a\u7684\u4f5c\u4e1a\uff0c\u4e0d\u4f1a\u56e0\u4e3a\u4e00\u4e2a\u80d6\u4e1a\u52a1\uff0c \u997f\u6b7b\u5927\u6279\u5c0f\u4e1a\u52a1\u3002DRF \u8c03\u5ea6\u7b97\u6cd5\u80fd\u591f\u786e\u4fdd\u5728\u591a\u79cd\u7c7b\u578b\u8d44\u6e90\u5171\u5b58\u7684\u73af\u5883\u4e0b\uff0c\u5c3d\u53ef\u80fd\u6ee1\u8db3\u5206\u914d\u7684\u516c\u5e73\u539f\u5219\u3002

                                  "},{"location":"end-user/kpanda/gpu/volcano/drf.html#_1","title":"\u4f7f\u7528\u65b9\u5f0f","text":"

                                  DRF \u8c03\u5ea6\u7b56\u7565\u9ed8\u8ba4\u5df2\u542f\u7528\uff0c\u65e0\u9700\u4efb\u4f55\u914d\u7f6e\u3002

                                  kubectl -n volcano-system view configmaps volcano-scheduler-configmap\n
                                  "},{"location":"end-user/kpanda/gpu/volcano/drf.html#_2","title":"\u4f7f\u7528\u6848\u4f8b","text":"

                                  \u5728 AI \u8bad\u7ec3\uff0c\u6216\u5927\u6570\u636e\u8ba1\u7b97\u4e2d\uff0c\u901a\u8fc7\u6709\u9650\u8fd0\u884c\u4f7f\u7528\u8d44\u6e90\u5c11\u7684\u4efb\u52a1\uff0c\u8fd9\u6837\u53ef\u4ee5\u8ba9\u96c6\u7fa4\u8d44\u6e90\u4f7f\u7528\u7387\u66f4\u9ad8\uff0c\u800c\u4e14\u8fd8\u80fd\u907f\u514d\u5c0f\u4efb\u52a1\u88ab\u997f\u6b7b\u3002 \u5982\u4e0b\u521b\u5efa\u4e24\u4e2a Job\uff0c\u4e00\u4e2a\u662f\u5c0f\u8d44\u6e90\u9700\u6c42\uff0c\u4e00\u4e2a\u662f\u5927\u8d44\u6e90\u9700\u6c42\uff0c\u53ef\u4ee5\u770b\u51fa\u6765\u5c0f\u8d44\u6e90\u9700\u6c42\u7684 Job \u4f18\u5148\u8fd0\u884c\u8d77\u6765\u3002

                                  cat <<EOF | kubectl apply -f -  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: small-resource  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: small-resource  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"1\"  \n          restartPolicy: OnFailure  \n---  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: large-resource  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: large-resource  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"2\"  \n          restartPolicy: OnFailure  \nEOF\n
                                  "},{"location":"end-user/kpanda/gpu/volcano/numa.html","title":"NUMA \u4eb2\u548c\u6027\u8c03\u5ea6","text":"

                                  NUMA \u8282\u70b9\u662f Non-Uniform Memory Access\uff08\u975e\u7edf\u4e00\u5185\u5b58\u8bbf\u95ee\uff09\u67b6\u6784\u4e2d\u7684\u4e00\u4e2a\u57fa\u672c\u7ec4\u6210\u5355\u5143\uff0c\u4e00\u4e2a Node \u8282\u70b9\u662f\u591a\u4e2a NUMA \u8282\u70b9\u7684\u96c6\u5408\uff0c \u5728\u591a\u4e2a NUMA \u8282\u70b9\u4e4b\u95f4\u8fdb\u884c\u5185\u5b58\u8bbf\u95ee\u65f6\u4f1a\u4ea7\u751f\u5ef6\u8fdf\uff0c\u5f00\u53d1\u8005\u53ef\u4ee5\u901a\u8fc7\u4f18\u5316\u4efb\u52a1\u8c03\u5ea6\u548c\u5185\u5b58\u5206\u914d\u7b56\u7565\uff0c\u6765\u63d0\u9ad8\u5185\u5b58\u8bbf\u95ee\u6548\u7387\u548c\u6574\u4f53\u6027\u80fd\u3002

                                  "},{"location":"end-user/kpanda/gpu/volcano/numa.html#_1","title":"\u4f7f\u7528\u573a\u666f","text":"

                                  Numa \u4eb2\u548c\u6027\u8c03\u5ea6\u7684\u5e38\u89c1\u573a\u666f\u662f\u90a3\u4e9b\u5bf9 CPU \u53c2\u6570\u654f\u611f/\u8c03\u5ea6\u5ef6\u8fdf\u654f\u611f\u7684\u8ba1\u7b97\u5bc6\u96c6\u578b\u4f5c\u4e1a\u3002\u5982\u79d1\u5b66\u8ba1\u7b97\u3001\u89c6\u9891\u89e3\u7801\u3001\u52a8\u6f2b\u52a8\u753b\u6e32\u67d3\u3001\u5927\u6570\u636e\u79bb\u7ebf\u5904\u7406\u7b49\u5177\u4f53\u573a\u666f\u3002

                                  "},{"location":"end-user/kpanda/gpu/volcano/numa.html#_2","title":"\u8c03\u5ea6\u7b56\u7565","text":"

                                  Pod \u8c03\u5ea6\u65f6\u53ef\u4ee5\u91c7\u7528\u7684 NUMA \u653e\u7f6e\u7b56\u7565\uff0c\u5177\u4f53\u7b56\u7565\u5bf9\u5e94\u7684\u8c03\u5ea6\u884c\u4e3a\u8bf7\u53c2\u89c1 Pod \u8c03\u5ea6\u884c\u4e3a\u8bf4\u660e\u3002

                                  • single-numa-node\uff1aPod \u8c03\u5ea6\u65f6\u4f1a\u9009\u62e9\u62d3\u6251\u7ba1\u7406\u7b56\u7565\u5df2\u7ecf\u8bbe\u7f6e\u4e3a single-numa-node \u7684\u8282\u70b9\u6c60\u4e2d\u7684\u8282\u70b9\uff0c\u4e14 CPU \u9700\u8981\u653e\u7f6e\u5728\u76f8\u540c NUMA \u4e0b\uff0c\u5982\u679c\u8282\u70b9\u6c60\u4e2d\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u8282\u70b9\uff0cPod \u5c06\u65e0\u6cd5\u88ab\u8c03\u5ea6\u3002
                                  • restricted\uff1aPod \u8c03\u5ea6\u65f6\u4f1a\u9009\u62e9\u62d3\u6251\u7ba1\u7406\u7b56\u7565\u5df2\u7ecf\u8bbe\u7f6e\u4e3a restricted \u8282\u70b9\u6c60\u7684\u8282\u70b9\uff0c\u4e14 CPU \u9700\u8981\u653e\u7f6e\u5728\u76f8\u540c\u7684 NUMA \u96c6\u5408\u4e0b\uff0c\u5982\u679c\u8282\u70b9\u6c60\u4e2d\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u8282\u70b9\uff0cPod \u5c06\u65e0\u6cd5\u88ab\u8c03\u5ea6\u3002
                                  • best-effort\uff1aPod \u8c03\u5ea6\u65f6\u4f1a\u9009\u62e9\u62d3\u6251\u7ba1\u7406\u7b56\u7565\u5df2\u7ecf\u8bbe\u7f6e\u4e3a best-effort \u8282\u70b9\u6c60\u7684\u8282\u70b9\uff0c\u4e14\u5c3d\u91cf\u5c06 CPU \u653e\u7f6e\u5728\u76f8\u540c NUMA \u4e0b\uff0c\u5982\u679c\u6ca1\u6709\u8282\u70b9\u6ee1\u8db3\u8fd9\u4e00\u6761\u4ef6\uff0c\u5219\u9009\u62e9\u6700\u4f18\u8282\u70b9\u8fdb\u884c\u653e\u7f6e\u3002
                                  "},{"location":"end-user/kpanda/gpu/volcano/numa.html#_3","title":"\u8c03\u5ea6\u539f\u7406","text":"

                                  \u5f53Pod\u8bbe\u7f6e\u4e86\u62d3\u6251\u7b56\u7565\u65f6\uff0cVolcano \u4f1a\u6839\u636e Pod \u8bbe\u7f6e\u7684\u62d3\u6251\u7b56\u7565\u9884\u6d4b\u5339\u914d\u7684\u8282\u70b9\u5217\u8868\u3002 \u8c03\u5ea6\u8fc7\u7a0b\u5982\u4e0b\uff1a

                                  1. \u6839\u636e Pod \u8bbe\u7f6e\u7684 Volcano \u62d3\u6251\u7b56\u7565\uff0c\u7b5b\u9009\u5177\u6709\u76f8\u540c\u7b56\u7565\u7684\u8282\u70b9\u3002

                                  2. \u5728\u8bbe\u7f6e\u4e86\u76f8\u540c\u7b56\u7565\u7684\u8282\u70b9\u4e2d\uff0c\u7b5b\u9009 CPU \u62d3\u6251\u6ee1\u8db3\u8be5\u7b56\u7565\u8981\u6c42\u7684\u8282\u70b9\u8fdb\u884c\u8c03\u5ea6\u3002

                                  Pod \u53ef\u914d\u7f6e\u7684\u62d3\u6251\u7b56\u7565 1. \u6839\u636e Pod \u8bbe\u7f6e\u7684\u62d3\u6251\u7b56\u7565\uff0c\u7b5b\u9009\u53ef\u8c03\u5ea6\u7684\u8282\u70b9 2. \u8fdb\u4e00\u6b65\u7b5b\u9009 CPU \u62d3\u6251\u6ee1\u8db3\u7b56\u7565\u7684\u8282\u70b9\u8fdb\u884c\u8c03\u5ea6 none \u9488\u5bf9\u914d\u7f6e\u4e86\u4ee5\u4e0b\u51e0\u79cd\u62d3\u6251\u7b56\u7565\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u65f6\u5747\u65e0\u7b5b\u9009\u884c\u4e3a\u3002none\uff1a\u53ef\u8c03\u5ea6\uff1bbest-effort\uff1a\u53ef\u8c03\u5ea6\uff1brestricted\uff1a\u53ef\u8c03\u5ea6\uff1bsingle-numa-node\uff1a\u53ef\u8c03\u5ea6 - best-effort \u7b5b\u9009\u62d3\u6251\u7b56\u7565\u540c\u6837\u4e3a\u201cbest-effort\u201d\u7684\u8282\u70b9\uff1anone\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bbest-effort\uff1a\u53ef\u8c03\u5ea6\uff1brestricted\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bsingle-numa-node\uff1a\u4e0d\u53ef\u8c03\u5ea6 \u5c3d\u53ef\u80fd\u6ee1\u8db3\u7b56\u7565\u8981\u6c42\u8fdb\u884c\u8c03\u5ea6\uff1a\u4f18\u5148\u8c03\u5ea6\u81f3\u5355 NUMA \u8282\u70b9\uff0c\u5982\u679c\u5355 NUMA \u8282\u70b9\u65e0\u6cd5\u6ee1\u8db3 CPU \u7533\u8bf7\u503c\uff0c\u5141\u8bb8\u8c03\u5ea6\u81f3\u591a\u4e2a NUMA \u8282\u70b9\u3002 restricted \u7b5b\u9009\u62d3\u6251\u7b56\u7565\u540c\u6837\u4e3a\u201crestricted\u201d\u7684\u8282\u70b9\uff1anone\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bbest-effort\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1brestricted\uff1a\u53ef\u8c03\u5ea6\uff1bsingle-numa-node\uff1a\u4e0d\u53ef\u8c03\u5ea6 \u4e25\u683c\u9650\u5236\u7684\u8c03\u5ea6\u7b56\u7565\uff1a\u5355 NUMA \u8282\u70b9\u7684CPU\u5bb9\u91cf\u4e0a\u9650\u5927\u4e8e\u7b49\u4e8e CPU \u7684\u7533\u8bf7\u503c\u65f6\uff0c\u4ec5\u5141\u8bb8\u8c03\u5ea6\u81f3\u5355 NUMA \u8282\u70b9\u3002\u6b64\u65f6\u5982\u679c\u5355 NUMA \u8282\u70b9\u5269\u4f59\u7684 CPU \u53ef\u4f7f\u7528\u91cf\u4e0d\u8db3\uff0c\u5219 Pod \u65e0\u6cd5\u8c03\u5ea6\u3002\u5355 NUMA \u8282\u70b9\u7684 CPU \u5bb9\u91cf\u4e0a\u9650\u5c0f\u4e8e CPU \u7684\u7533\u8bf7\u503c\u65f6\uff0c\u53ef\u5141\u8bb8\u8c03\u5ea6\u81f3\u591a\u4e2a NUMA \u8282\u70b9\u3002 single-numa-node \u7b5b\u9009\u62d3\u6251\u7b56\u7565\u540c\u6837\u4e3a\u201csingle-numa-node\u201d\u7684\u8282\u70b9\uff1anone\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bbest-effort\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1brestricted\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bsingle-numa-node\uff1a\u53ef\u8c03\u5ea6 \u4ec5\u5141\u8bb8\u8c03\u5ea6\u81f3\u5355 NUMA \u8282\u70b9\u3002"},{"location":"end-user/kpanda/gpu/volcano/numa.html#numa_1","title":"\u914d\u7f6e NUMA \u4eb2\u548c\u8c03\u5ea6\u7b56\u7565","text":"
                                  1. \u5728 Job \u4e2d\u914d\u7f6e policies

                                    task: \n  - replicas: 1 \n    name: \"test-1\" \n    topologyPolicy: single-numa-node \n  - replicas: 1 \n    name: \"test-2\" \n    topologyPolicy: best-effort \n
                                  2. \u4fee\u6539 kubelet \u7684\u8c03\u5ea6\u7b56\u7565\uff0c\u8bbe\u7f6e --topology-manager-policy \u53c2\u6570\uff0c\u652f\u6301\u7684\u7b56\u7565\u6709\u56db\u79cd\uff1a

                                    • none\uff08\u9ed8\u8ba4\uff09
                                    • best-effort
                                    • restricted
                                    • single-numa-node
                                  "},{"location":"end-user/kpanda/gpu/volcano/numa.html#_4","title":"\u4f7f\u7528\u6848\u4f8b","text":"
                                  1. \u793a\u4f8b\u4e00\uff1a\u5728\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u914d\u7f6e NUMA \u4eb2\u548c\u6027\u3002

                                    kind: Deployment  \napiVersion: apps/v1  \nmetadata:  \n  name: numa-tset  \nspec:  \n  replicas: 1  \n  selector:  \n    matchLabels:  \n      app: numa-tset  \n  template:  \n    metadata:  \n      labels:  \n        app: numa-tset  \n      annotations:  \n        volcano.sh/numa-topology-policy: single-numa-node    # set the topology policy  \n    spec:  \n      containers:  \n        - name: container-1  \n          image: nginx:alpine  \n          resources:  \n            requests:  \n              cpu: 2           # \u5fc5\u987b\u4e3a\u6574\u6570\uff0c\u4e14\u9700\u8981\u4e0elimits\u4e2d\u4e00\u81f4  \n              memory: 2048Mi  \n            limits:  \n              cpu: 2           # \u5fc5\u987b\u4e3a\u6574\u6570\uff0c\u4e14\u9700\u8981\u4e0erequests\u4e2d\u4e00\u81f4  \n              memory: 2048Mi  \n      imagePullSecrets:  \n      - name: default-secret\n
                                  2. \u793a\u4f8b\u4e8c\uff1a\u521b\u5efa\u4e00\u4e2a Volcano Job\uff0c\u5e76\u4f7f\u7528 NUMA \u4eb2\u548c\u6027\u3002

                                    apiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: vj-test  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 1  \n  tasks:  \n    - replicas: 1  \n      name: \"test\"  \n      topologyPolicy: best-effort   # set the topology policy for task  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                limits:  \n                  cpu: 20  \n                  memory: \"100Mi\"  \n          restartPolicy: OnFailure\n
                                  "},{"location":"end-user/kpanda/gpu/volcano/numa.html#numa_2","title":"NUMA \u8c03\u5ea6\u5206\u6790","text":"

                                  \u5047\u8bbe NUMA \u8282\u70b9\u60c5\u51b5\u5982\u4e0b\uff1a

                                  \u5de5\u4f5c\u8282\u70b9 \u8282\u70b9\u7b56\u7565\u62d3\u6251\u7ba1\u7406\u5668\u7b56\u7565 NUMA \u8282\u70b9 0 \u4e0a\u7684\u53ef\u5206\u914d CPU NUMA \u8282\u70b9 1 \u4e0a\u7684\u53ef\u5206\u914d CPU node-1 single-numa-node 16U 16U node-2 best-effort 16U 16U node-3 best-effort 20U 20U
                                  • \u793a\u4f8b\u4e00\u4e2d\uff0cPod \u7684 CPU \u7533\u8bf7\u503c\u4e3a 2U\uff0c\u8bbe\u7f6e\u62d3\u6251\u7b56\u7565\u4e3a\u201csingle-numa-node\u201d\uff0c\u56e0\u6b64\u4f1a\u88ab\u8c03\u5ea6\u5230\u76f8\u540c\u7b56\u7565\u7684 node-1\u3002
                                  • \u793a\u4f8b\u4e8c\u4e2d\uff0cPod \u7684 CPU \u7533\u8bf7\u503c\u4e3a20U\uff0c\u8bbe\u7f6e\u62d3\u6251\u7b56\u7565\u4e3a\u201cbest-effort\u201d\uff0c\u5b83\u5c06\u88ab\u8c03\u5ea6\u5230 node-3\uff0c \u56e0\u4e3a node-3 \u53ef\u4ee5\u5728\u5355\u4e2a NUMA \u8282\u70b9\u4e0a\u5206\u914d Pod \u7684 CPU \u8bf7\u6c42\uff0c\u800c node-2 \u9700\u8981\u5728\u4e24\u4e2a NUMA \u8282\u70b9\u4e0a\u6267\u884c\u6b64\u64cd\u4f5c\u3002
                                  "},{"location":"end-user/kpanda/gpu/volcano/numa.html#cpu","title":"\u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684 CPU \u6982\u51b5","text":"

                                  \u60a8\u53ef\u4ee5\u901a\u8fc7 lscpu \u547d\u4ee4\u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684 CPU \u6982\u51b5\uff1a

                                  lscpu \n... \nCPU(s): 32 \nNUMA node(s): 2 \nNUMA node0 CPU(s): 0-15 \nNUMA node1 CPU(s): 16-31\n
                                  "},{"location":"end-user/kpanda/gpu/volcano/numa.html#cpu_1","title":"\u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684 CPU \u5206\u914d","text":"

                                  \u7136\u540e\u67e5\u770b NUMA \u8282\u70b9\u4f7f\u7528\u60c5\u51b5\uff1a

                                  # \u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684 CPU \u5206\u914d\ncat /var/lib/kubelet/cpu_manager_state\n{\"policyName\":\"static\",\"defaultCpuSet\":\"0,10-15,25-31\",\"entries\":{\"777870b5-c64f-42f5-9296-688b9dc212ba\":{\"container-1\":\"16-24\"},\"fb15e10a-b6a5-4aaa-8fcd-76c1aa64e6fd\":{\"container-1\":\"1-9\"}},\"checksum\":318470969}\n

                                  \u4ee5\u4e0a\u793a\u4f8b\u4e2d\u8868\u793a\uff0c\u8282\u70b9\u4e0a\u8fd0\u884c\u4e86\u4e24\u4e2a\u5bb9\u5668\uff0c\u4e00\u4e2a\u5360\u7528\u4e86 NUMA node0 \u76841-9 \u6838\uff0c\u53e6\u4e00\u4e2a\u5360\u7528\u4e86 NUMA node1 \u7684 16-24 \u6838\u3002

                                  "},{"location":"end-user/kpanda/gpu/volcano/volcano-gang-scheduler.html","title":"\u4f7f\u7528 Volcano \u7684 Gang Scheduler","text":"

                                  Gang \u8c03\u5ea6\u7b56\u7565\u662f volcano-scheduler \u7684\u6838\u5fc3\u8c03\u5ea6\u7b97\u6cd5\u4e4b\u4e00\uff0c\u5b83\u6ee1\u8db3\u4e86\u8c03\u5ea6\u8fc7\u7a0b\u4e2d\u7684 \u201cAll or nothing\u201d \u7684\u8c03\u5ea6\u9700\u6c42\uff0c \u907f\u514d Pod \u7684\u4efb\u610f\u8c03\u5ea6\u5bfc\u81f4\u96c6\u7fa4\u8d44\u6e90\u7684\u6d6a\u8d39\u3002\u5177\u4f53\u7b97\u6cd5\u662f\uff0c\u89c2\u5bdf Job \u4e0b\u7684 Pod \u5df2\u8c03\u5ea6\u6570\u91cf\u662f\u5426\u6ee1\u8db3\u4e86\u6700\u5c0f\u8fd0\u884c\u6570\u91cf\uff0c \u5f53 Job \u7684\u6700\u5c0f\u8fd0\u884c\u6570\u91cf\u5f97\u5230\u6ee1\u8db3\u65f6\uff0c\u4e3a Job \u4e0b\u7684\u6240\u6709 Pod \u6267\u884c\u8c03\u5ea6\u52a8\u4f5c\uff0c\u5426\u5219\uff0c\u4e0d\u6267\u884c\u3002

                                  "},{"location":"end-user/kpanda/gpu/volcano/volcano-gang-scheduler.html#_1","title":"\u4f7f\u7528\u573a\u666f","text":"

                                  \u57fa\u4e8e\u5bb9\u5668\u7ec4\u6982\u5ff5\u7684 Gang \u8c03\u5ea6\u7b97\u6cd5\u5341\u5206\u9002\u5408\u9700\u8981\u591a\u8fdb\u7a0b\u534f\u4f5c\u7684\u573a\u666f\u3002AI \u573a\u666f\u5f80\u5f80\u5305\u542b\u590d\u6742\u7684\u6d41\u7a0b\uff0c Data Ingestion\u3001Data Analysts\u3001Data Splitting\u3001Trainer\u3001Serving\u3001Logging \u7b49\uff0c \u9700\u8981\u4e00\u7ec4\u5bb9\u5668\u8fdb\u884c\u534f\u540c\u5de5\u4f5c\uff0c\u5c31\u5f88\u9002\u5408\u57fa\u4e8e\u5bb9\u5668\u7ec4\u7684 Gang \u8c03\u5ea6\u7b56\u7565\u3002 MPI \u8ba1\u7b97\u6846\u67b6\u4e0b\u7684\u591a\u7ebf\u7a0b\u5e76\u884c\u8ba1\u7b97\u901a\u4fe1\u573a\u666f\uff0c\u7531\u4e8e\u9700\u8981\u4e3b\u4ece\u8fdb\u7a0b\u534f\u540c\u5de5\u4f5c\uff0c\u4e5f\u975e\u5e38\u9002\u5408\u4f7f\u7528 Gang \u8c03\u5ea6\u7b56\u7565\u3002 \u5bb9\u5668\u7ec4\u4e0b\u7684\u5bb9\u5668\u9ad8\u5ea6\u76f8\u5173\u4e5f\u53ef\u80fd\u5b58\u5728\u8d44\u6e90\u4e89\u62a2\uff0c\u6574\u4f53\u8c03\u5ea6\u5206\u914d\uff0c\u80fd\u591f\u6709\u6548\u89e3\u51b3\u6b7b\u9501\u3002

                                  \u5728\u96c6\u7fa4\u8d44\u6e90\u4e0d\u8db3\u7684\u573a\u666f\u4e0b\uff0cGang \u7684\u8c03\u5ea6\u7b56\u7565\u5bf9\u4e8e\u96c6\u7fa4\u8d44\u6e90\u7684\u5229\u7528\u7387\u7684\u63d0\u5347\u662f\u975e\u5e38\u660e\u663e\u7684\u3002 \u6bd4\u5982\u96c6\u7fa4\u73b0\u5728\u53ea\u80fd\u5bb9\u7eb3 2 \u4e2a Pod\uff0c\u73b0\u5728\u8981\u6c42\u6700\u5c0f\u8c03\u5ea6\u7684 Pod \u6570\u4e3a 3\u3002 \u90a3\u73b0\u5728\u8fd9\u4e2a Job \u7684\u6240\u6709\u7684 Pod \u90fd\u4f1a pending\uff0c\u76f4\u5230\u96c6\u7fa4\u80fd\u591f\u5bb9\u7eb3 3 \u4e2a Pod\uff0cPod \u624d\u4f1a\u88ab\u8c03\u5ea6\u3002 \u6709\u6548\u9632\u6b62\u8c03\u5ea6\u90e8\u5206 Pod\uff0c\u4e0d\u6ee1\u8db3\u8981\u6c42\u53c8\u5360\u7528\u4e86\u8d44\u6e90\uff0c\u4f7f\u5176\u4ed6 Job \u65e0\u6cd5\u8fd0\u884c\u7684\u60c5\u51b5\u3002

                                  "},{"location":"end-user/kpanda/gpu/volcano/volcano-gang-scheduler.html#_2","title":"\u6982\u5ff5\u8bf4\u660e","text":"

                                  Gang Scheduler \u662f Volcano \u7684\u6838\u5fc3\u7684\u8c03\u5ea6\u63d2\u4ef6\uff0c\u5b89\u88c5 Volcano \u540e\u9ed8\u8ba4\u5c31\u5f00\u542f\u4e86\u3002 \u5728\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\u53ea\u9700\u8981\u6307\u5b9a\u8c03\u5ea6\u5668\u7684\u540d\u79f0\u4e3a Volcano \u5373\u53ef\u3002

                                  Volcano \u662f\u4ee5 PodGroup \u4e3a\u5355\u4f4d\u8fdb\u884c\u8c03\u5ea6\u7684\uff0c\u5728\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u5e76\u4e0d\u9700\u8981\u624b\u52a8\u521b\u5efa PodGroup \u8d44\u6e90\uff0c Volcano \u4f1a\u6839\u636e\u5de5\u4f5c\u8d1f\u8f7d\u7684\u4fe1\u606f\u81ea\u52a8\u521b\u5efa\u3002\u4e0b\u9762\u662f\u4e00\u4e2a PodGroup \u7684\u793a\u4f8b\uff1a

                                  apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  name: test\n  namespace: default\nspec:\n  minMember: 1  # (1)!\n  minResources:  # (2)!\n    cpu: \"3\"\n    memory: \"2048Mi\"\n  priorityClassName: high-prority # (3)!\n  queue: default # (4)!\n
                                  1. \u8868\u793a\u8be5 PodGroup \u4e0b \u6700\u5c11 \u9700\u8981\u8fd0\u884c\u7684 Pod \u6216\u4efb\u52a1\u6570\u91cf\u3002 \u5982\u679c\u96c6\u7fa4\u8d44\u6e90\u4e0d\u6ee1\u8db3 miniMember \u6570\u91cf\u4efb\u52a1\u7684\u8fd0\u884c\u9700\u6c42\uff0c\u8c03\u5ea6\u5668\u5c06\u4e0d\u4f1a\u8c03\u5ea6\u4efb\u4f55\u4e00\u4e2a\u8be5 PodGroup \u5185\u7684\u4efb\u52a1\u3002
                                  2. \u8868\u793a\u8fd0\u884c\u8be5 PodGroup \u6240\u9700\u8981\u7684\u6700\u5c11\u8d44\u6e90\u3002\u5f53\u96c6\u7fa4\u53ef\u5206\u914d\u8d44\u6e90\u4e0d\u6ee1\u8db3 minResources \u65f6\uff0c\u8c03\u5ea6\u5668\u5c06\u4e0d\u4f1a\u8c03\u5ea6\u4efb\u4f55\u4e00\u4e2a\u8be5 PodGroup \u5185\u7684\u4efb\u52a1\u3002
                                  3. \u8868\u793a\u8be5 PodGroup \u7684\u4f18\u5148\u7ea7\uff0c\u7528\u4e8e\u8c03\u5ea6\u5668\u4e3a\u8be5 queue \u4e2d\u6240\u6709 PodGroup \u8fdb\u884c\u8c03\u5ea6\u65f6\u8fdb\u884c\u6392\u5e8f\u3002 system-node-critical \u548c system-cluster-critical \u662f 2 \u4e2a\u9884\u7559\u7684\u503c\uff0c\u8868\u793a\u6700\u9ad8\u4f18\u5148\u7ea7\u3002\u4e0d\u7279\u522b\u6307\u5b9a\u65f6\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u4f18\u5148\u7ea7\u6216 zero \u4f18\u5148\u7ea7\u3002
                                  4. \u8868\u793a\u8be5 PodGroup \u6240\u5c5e\u7684 queue\u3002queue \u5fc5\u987b\u63d0\u524d\u5df2\u521b\u5efa\u4e14\u72b6\u6001\u4e3a open\u3002
                                  "},{"location":"end-user/kpanda/gpu/volcano/volcano-gang-scheduler.html#_3","title":"\u4f7f\u7528\u6848\u4f8b","text":"

                                  \u5728 MPI \u8ba1\u7b97\u6846\u67b6\u4e0b\u7684\u591a\u7ebf\u7a0b\u5e76\u884c\u8ba1\u7b97\u901a\u4fe1\u573a\u666f\u4e2d\uff0c\u6211\u4eec\u8981\u786e\u4fdd\u6240\u6709\u7684 Pod \u90fd\u80fd\u8c03\u5ea6\u6210\u529f\u624d\u80fd\u4fdd\u8bc1\u4efb\u52a1\u6b63\u5e38\u5b8c\u6210\u3002 \u8bbe\u7f6e minAvailable \u4e3a 4\uff0c\u8868\u793a\u8981\u6c42 1 \u4e2a mpimaster \u548c 3 \u4e2a mpiworker \u80fd\u8fd0\u884c\u3002

                                  apiVersion: batch.volcano.sh/v1alpha1\nkind: Job\nmetadata:\n  name: lm-mpi-job\n  labels:\n    \"volcano.sh/job-type\": \"MPI\"\nspec:\n  minAvailable: 4\n  schedulerName: volcano\n  plugins:\n    ssh: []\n    svc: []\n  policies:\n    - event: PodEvicted\n      action: RestartJob\n  tasks:\n    - replicas: 1\n      name: mpimaster\n      policies:\n        - event: TaskCompleted\n          action: CompleteJob\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  MPI_HOST=`cat /etc/volcano/mpiworker.host | tr \"\\n\" \",\"`;\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd;\n                  mpiexec --allow-run-as-root --host ${MPI_HOST} -np 3 mpi_hello_world;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpimaster\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"500m\"\n                limits:\n                  cpu: \"500m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n    - replicas: 3\n      name: mpiworker\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd -D;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpiworker\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"1000m\"\n                limits:\n                  cpu: \"1000m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n

                                  \u751f\u6210 PodGroup \u7684\u8d44\u6e90\uff1a

                                  apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  annotations:\n  creationTimestamp: \"2024-05-28T09:18:50Z\"\n  generation: 5\n  labels:\n    volcano.sh/job-type: MPI\n  name: lm-mpi-job-9c571015-37c7-4a1a-9604-eaa2248613f2\n  namespace: default\n  ownerReferences:\n  - apiVersion: batch.volcano.sh/v1alpha1\n    blockOwnerDeletion: true\n    controller: true\n    kind: Job\n    name: lm-mpi-job\n    uid: 9c571015-37c7-4a1a-9604-eaa2248613f2\n  resourceVersion: \"25173454\"\n  uid: 7b04632e-7cff-4884-8e9a-035b7649d33b\nspec:\n  minMember: 4\n  minResources:\n    count/pods: \"4\"\n    cpu: 3500m\n    limits.cpu: 3500m\n    pods: \"4\"\n    requests.cpu: 3500m\n  minTaskMember:\n    mpimaster: 1\n    mpiworker: 3\n  queue: default\nstatus:\n  conditions:\n  - lastTransitionTime: \"2024-05-28T09:19:01Z\"\n    message: '3/4 tasks in gang unschedulable: pod group is not ready, 1 Succeeded,\n      3 Releasing, 4 minAvailable'\n    reason: NotEnoughResources\n    status: \"True\"\n    transitionID: f875efa5-0358-4363-9300-06cebc0e7466\n    type: Unschedulable\n  - lastTransitionTime: \"2024-05-28T09:18:53Z\"\n    reason: tasks in gang are ready to be scheduled\n    status: \"True\"\n    transitionID: 5a7708c8-7d42-4c33-9d97-0581f7c06dab\n    type: Scheduled\n  phase: Pending\n  succeeded: 1\n

                                  \u4ece PodGroup \u53ef\u4ee5\u770b\u51fa\uff0c\u901a\u8fc7 ownerReferences \u5173\u8054\u5230\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5e76\u8bbe\u7f6e\u6700\u5c0f\u8fd0\u884c\u7684 Pod \u6570\u4e3a 4\u3002

                                  "},{"location":"end-user/kpanda/gpu/volcano/volcano_binpack.html","title":"\u4f7f\u7528 Volcano Binpack \u8c03\u5ea6\u7b56\u7565","text":"

                                  Binpack \u8c03\u5ea6\u7b97\u6cd5\u7684\u76ee\u6807\u662f\u5c3d\u91cf\u628a\u5df2\u88ab\u5360\u7528\u7684\u8282\u70b9\u586b\u6ee1\uff08\u5c3d\u91cf\u4e0d\u5f80\u7a7a\u767d\u8282\u70b9\u5206\u914d\uff09\u3002\u5177\u4f53\u5b9e\u73b0\u4e0a\uff0cBinpack \u8c03\u5ea6\u7b97\u6cd5\u4f1a\u7ed9\u6295\u9012\u7684\u8282\u70b9\u6253\u5206\uff0c \u5206\u6570\u8d8a\u9ad8\u8868\u793a\u8282\u70b9\u7684\u8d44\u6e90\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u901a\u8fc7\u5c3d\u53ef\u80fd\u586b\u6ee1\u8282\u70b9\uff0c\u5c06\u5e94\u7528\u8d1f\u8f7d\u9760\u62e2\u5728\u90e8\u5206\u8282\u70b9\uff0c\u8fd9\u79cd\u8c03\u5ea6\u7b97\u6cd5\u80fd\u591f\u5c3d\u53ef\u80fd\u51cf\u5c0f\u8282\u70b9\u5185\u7684\u788e\u7247\uff0c \u5728\u7a7a\u95f2\u7684\u673a\u5668\u4e0a\u4e3a\u7533\u8bf7\u4e86\u66f4\u5927\u8d44\u6e90\u8bf7\u6c42\u7684 Pod \u9884\u7559\u8db3\u591f\u7684\u8d44\u6e90\u7a7a\u95f4\uff0c\u4f7f\u96c6\u7fa4\u4e0b\u7a7a\u95f2\u8d44\u6e90\u5f97\u5230\u6700\u5927\u5316\u7684\u5229\u7528\u3002

                                  "},{"location":"end-user/kpanda/gpu/volcano/volcano_binpack.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"

                                  \u9884\u5148\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e0a\u5b89\u88c5 Volcano \u7ec4\u4ef6\u3002

                                  "},{"location":"end-user/kpanda/gpu/volcano/volcano_binpack.html#binpack","title":"Binpack \u7b97\u6cd5\u539f\u7406","text":"

                                  Binpack \u5728\u5bf9\u4e00\u4e2a\u8282\u70b9\u6253\u5206\u65f6\uff0c\u4f1a\u6839\u636e Binpack \u63d2\u4ef6\u81ea\u8eab\u6743\u91cd\u548c\u5404\u8d44\u6e90\u8bbe\u7f6e\u7684\u6743\u91cd\u503c\u7efc\u5408\u6253\u5206\u3002 \u9996\u5148\uff0c\u5bf9 Pod \u8bf7\u6c42\u8d44\u6e90\u4e2d\u7684\u6bcf\u7c7b\u8d44\u6e90\u4f9d\u6b21\u6253\u5206\uff0c\u4ee5 CPU \u4e3a\u4f8b\uff0cCPU \u8d44\u6e90\u5728\u5f85\u8c03\u5ea6\u8282\u70b9\u7684\u5f97\u5206\u4fe1\u606f\u5982\u4e0b\uff1a

                                  CPU.weight * (request + used) / allocatable\n

                                  \u5373 CPU \u6743\u91cd\u503c\u8d8a\u9ad8\uff0c\u5f97\u5206\u8d8a\u9ad8\uff0c\u8282\u70b9\u8d44\u6e90\u4f7f\u7528\u91cf\u8d8a\u6ee1\uff0c\u5f97\u5206\u8d8a\u9ad8\u3002Memory\u3001GPU \u7b49\u8d44\u6e90\u539f\u7406\u7c7b\u4f3c\u3002\u5176\u4e2d\uff1a

                                  • CPU.weight \u4e3a\u7528\u6237\u8bbe\u7f6e\u7684 CPU \u6743\u91cd
                                  • request \u4e3a\u5f53\u524d Pod \u8bf7\u6c42\u7684 CPU \u8d44\u6e90\u91cf
                                  • used \u4e3a\u5f53\u524d\u8282\u70b9\u5df2\u7ecf\u5206\u914d\u4f7f\u7528\u7684 CPU \u91cf
                                  • allocatable \u4e3a\u5f53\u524d\u8282\u70b9 CPU \u53ef\u7528\u603b\u91cf

                                  \u901a\u8fc7 Binpack \u7b56\u7565\u7684\u8282\u70b9\u603b\u5f97\u5206\u5982\u4e0b\uff1a

                                  binpack.weight - (CPU.score + Memory.score + GPU.score) / (CPU.weight + Memory.weight + GPU.weight) - 100\n

                                  \u5373 Binpack \u63d2\u4ef6\u7684\u6743\u91cd\u503c\u8d8a\u5927\uff0c\u5f97\u5206\u8d8a\u9ad8\uff0c\u67d0\u7c7b\u8d44\u6e90\u7684\u6743\u91cd\u8d8a\u5927\uff0c\u8be5\u8d44\u6e90\u5728\u6253\u5206\u65f6\u7684\u5360\u6bd4\u8d8a\u5927\u3002\u5176\u4e2d\uff1a

                                  • binpack.weight \u4e3a\u7528\u6237\u8bbe\u7f6e\u7684\u88c5\u7bb1\u8c03\u5ea6\u7b56\u7565\u6743\u91cd
                                  • CPU.score \u4e3a CPU \u8d44\u6e90\u5f97\u5206\uff0cCPU.weight \u4e3a CPU \u6743\u91cd
                                  • Memory.score \u4e3a Memory \u8d44\u6e90\u5f97\u5206\uff0cMemory.weight \u4e3a Memory \u6743\u91cd
                                  • GPU.score \u4e3a GPU \u8d44\u6e90\u5f97\u5206\uff0cGPU.weight \u4e3a GPU \u6743\u91cd

                                  \u5982\u56fe\u6240\u793a\uff0c\u96c6\u7fa4\u4e2d\u5b58\u5728\u4e24\u4e2a\u8282\u70b9\uff0c\u5206\u522b\u4e3a Node1 \u548c Node 2\uff0c\u5728\u8c03\u5ea6 Pod \u65f6\uff0cBinpack \u7b56\u7565\u5bf9\u4e24\u4e2a\u8282\u70b9\u5206\u522b\u6253\u5206\u3002 \u5047\u8bbe\u96c6\u7fa4\u4e2d CPU.weight \u914d\u7f6e\u4e3a 1\uff0cMemory.weight \u914d\u7f6e\u4e3a 1\uff0cGPU.weight \u914d\u7f6e\u4e3a 2\uff0cbinpack.weight \u914d\u7f6e\u4e3a 5\u3002

                                  1. Binpack \u5bf9 Node 1 \u7684\u8d44\u6e90\u6253\u5206\uff0c\u5404\u8d44\u6e90\u7684\u8ba1\u7b97\u516c\u5f0f\u4e3a\uff1a

                                    • CPU Score\uff1a

                                      CPU.weight - (request + used) / allocatable = 1 - (2 + 4) / 8 = 0.75

                                    • Memory Score\uff1a

                                      Memory.weight - (request + used) / allocatable = 1 - (4 + 8) / 16 = 0.75

                                    • GPU Score\uff1a

                                      GPU.weight - (request + used) / allocatable = 2 - (4 + 4) / 8 = 2

                                  2. \u8282\u70b9\u603b\u5f97\u5206\u7684\u8ba1\u7b97\u516c\u5f0f\u4e3a\uff1a

                                    binpack.weight - (CPU.score + Memory.score + GPU.score) / (CPU.weight + Memory.weight + GPU.weight) - 100\n

                                    \u5047\u8bbe binpack.weight \u914d\u7f6e\u4e3a 5\uff0cNode 1 \u5728 Binpack \u7b56\u7565\u4e0b\u7684\u5f97\u5206\u4e3a\uff1a

                                    5 - (0.75 + 0.75 + 2) / (1 + 1 + 2) - 100 = 437.5\n
                                  3. Binpack \u5bf9 Node 2 \u7684\u8d44\u6e90\u6253\u5206\uff1a

                                    • CPU Score\uff1a

                                      CPU.weight - (request + used) / allocatable = 1 - (2 + 6) / 8 = 1

                                    • Memory Score\uff1a

                                      Memory.weight - (request + used) / allocatable = 1 - (4 + 8) / 16 = 0.75

                                    • GPU Score\uff1a

                                      GPU.weight - (request + used) / allocatable = 2 - (4 + 4) / 8 = 2

                                  4. Node 2 \u5728 Binpack \u7b56\u7565\u4e0b\u7684\u5f97\u5206\u4e3a\uff1a

                                    5 - (1 + 0.75 + 2) / (1 + 1 + 2) - 100 = 468.75\n

                                  \u7efc\u4e0a\uff0cNode 2 \u5f97\u5206\u5927\u4e8e Node 1\uff0c\u6309\u7167 Binpack \u7b56\u7565\uff0cPod \u5c06\u4f1a\u4f18\u5148\u8c03\u5ea6\u81f3 Node 2\u3002

                                  "},{"location":"end-user/kpanda/gpu/volcano/volcano_binpack.html#_2","title":"\u4f7f\u7528\u6848\u4f8b","text":"

                                  Binpack \u8c03\u5ea6\u63d2\u4ef6\u5728\u5b89\u88c5 Volcano \u7684\u65f6\u5019\u9ed8\u8ba4\u5c31\u4f1a\u5f00\u542f\uff1b\u5982\u679c\u7528\u6237\u6ca1\u6709\u914d\u7f6e\u6743\u91cd\uff0c\u5219\u4f7f\u7528\u5982\u4e0b\u9ed8\u8ba4\u7684\u914d\u7f6e\u6743\u91cd\u3002

                                  - plugins:\n    - name: binpack\n      arguments:\n        binpack.weight: 1\n        binpack.cpu: 1\n        binpack.memory: 1\n

                                  \u9ed8\u8ba4\u6743\u91cd\u4e0d\u80fd\u4f53\u73b0\u5806\u53e0\u7279\u6027\uff0c\u56e0\u6b64\u9700\u8981\u4fee\u6539\u4e3a binpack.weight: 10\u3002

                                  kubectl -n volcano-system edit configmaps volcano-scheduler-configmap\n
                                  - plugins:\n    - name: binpack\n      arguments:\n        binpack.weight: 10\n        binpack.cpu: 1\n        binpack.memory: 1\n        binpack.resources: nvidia.com/gpu, example.com/foo\n        binpack.resources.nvidia.com/gpu: 2\n        binpack.resources.example.com/foo: 3\n

                                  \u6539\u597d\u4e4b\u540e\u91cd\u542f volcano-scheduler Pod \u4f7f\u5176\u751f\u6548\u3002

                                  \u521b\u5efa\u5982\u4e0b\u7684 Deployment\u3002

                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: binpack-test\n  labels:\n    app: binpack-test\nspec:\n  replicas: 2\n  selector:\n    matchLabels:\n      app: test\n  template:\n    metadata:\n      labels:\n        app: test\n    spec:\n      schedulerName: volcano\n      containers:\n        - name: test\n          image: busybox\n          imagePullPolicy: IfNotPresent\n          command: [\"sh\", \"-c\", 'echo \"Hello, Kubernetes!\" && sleep 3600']\n          resources:\n            requests:\n              cpu: 500m\n            limits:\n              cpu: 500m\n

                                  \u5728\u4e24\u4e2a Node \u7684\u96c6\u7fa4\u4e0a\u53ef\u4ee5\u770b\u5230 Pod \u88ab\u8c03\u5ea6\u5230\u4e00\u4e2a Node \u4e0a\u3002

                                  "},{"location":"end-user/kpanda/gpu/volcano/volcano_priority.html","title":"\u4f18\u5148\u7ea7\u62a2\u5360\uff08Preemption scheduling\uff09\u7b56\u7565","text":"

                                  Volcano \u901a\u8fc7 Priority \u63d2\u4ef6\u5b9e\u73b0\u4e86\u4f18\u5148\u7ea7\u62a2\u5360\u7b56\u7565\uff0c\u5373 Preemption scheduling \u7b56\u7565\u3002\u5728\u96c6\u7fa4\u8d44\u6e90\u6709\u9650\u4e14\u591a\u4e2a Job \u7b49\u5f85\u8c03\u5ea6\u65f6\uff0c \u5982\u679c\u4f7f\u7528 Kubernetes \u9ed8\u8ba4\u8c03\u5ea6\u5668\uff0c\u53ef\u80fd\u4f1a\u5bfc\u81f4\u5177\u6709\u66f4\u591a Pod \u6570\u91cf\u7684 Job \u5206\u5f97\u66f4\u591a\u8d44\u6e90\u3002\u800c Volcano-scheduler \u63d0\u4f9b\u4e86\u7b97\u6cd5\uff0c\u652f\u6301\u4e0d\u540c\u7684 Job \u4ee5 fair-share \u7684\u5f62\u5f0f\u5171\u4eab\u96c6\u7fa4\u8d44\u6e90\u3002

                                  Priority \u63d2\u4ef6\u5141\u8bb8\u7528\u6237\u81ea\u5b9a\u4e49 Job \u548c Task \u7684\u4f18\u5148\u7ea7\uff0c\u5e76\u6839\u636e\u9700\u6c42\u5728\u4e0d\u540c\u5c42\u6b21\u4e0a\u5b9a\u5236\u8c03\u5ea6\u7b56\u7565\u3002 \u4f8b\u5982\uff0c\u5bf9\u4e8e\u91d1\u878d\u573a\u666f\u3001\u7269\u8054\u7f51\u76d1\u63a7\u573a\u666f\u7b49\u9700\u8981\u8f83\u9ad8\u5b9e\u65f6\u6027\u7684\u5e94\u7528\uff0cPriority \u63d2\u4ef6\u80fd\u591f\u786e\u4fdd\u5176\u4f18\u5148\u5f97\u5230\u8c03\u5ea6\u3002

                                  "},{"location":"end-user/kpanda/gpu/volcano/volcano_priority.html#_1","title":"\u4f7f\u7528\u65b9\u5f0f","text":"

                                  \u4f18\u5148\u7ea7\u7684\u51b3\u5b9a\u57fa\u4e8e\u914d\u7f6e\u7684 PriorityClass \u4e2d\u7684 Value \u503c\uff0c\u503c\u8d8a\u5927\u4f18\u5148\u7ea7\u8d8a\u9ad8\u3002\u9ed8\u8ba4\u5df2\u542f\u7528\uff0c\u65e0\u9700\u4fee\u6539\u3002\u53ef\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u786e\u8ba4\u6216\u4fee\u6539\u3002

                                  kubectl -n volcano-system edit configmaps volcano-scheduler-configmap\n
                                  "},{"location":"end-user/kpanda/gpu/volcano/volcano_priority.html#_2","title":"\u4f7f\u7528\u6848\u4f8b","text":"

                                  \u5047\u8bbe\u96c6\u7fa4\u4e2d\u5b58\u5728\u4e24\u4e2a\u7a7a\u95f2\u8282\u70b9\uff0c\u5e76\u6709\u4e09\u4e2a\u4f18\u5148\u7ea7\u4e0d\u540c\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff1ahigh-priority\u3001med-priority \u548c low-priority\u3002 \u5f53 high-priority \u5de5\u4f5c\u8d1f\u8f7d\u8fd0\u884c\u5e76\u5360\u6ee1\u96c6\u7fa4\u8d44\u6e90\u540e\uff0c\u518d\u63d0\u4ea4 med-priority \u548c low-priority \u5de5\u4f5c\u8d1f\u8f7d\u3002 \u7531\u4e8e\u96c6\u7fa4\u8d44\u6e90\u5168\u90e8\u88ab\u66f4\u9ad8\u4f18\u5148\u7ea7\u7684\u5de5\u4f5c\u8d1f\u8f7d\u5360\u7528\uff0cmed-priority \u548c low-priority \u5de5\u4f5c\u8d1f\u8f7d\u5c06\u5904\u4e8e pending \u72b6\u6001\u3002 \u5f53 high-priority \u5de5\u4f5c\u8d1f\u8f7d\u7ed3\u675f\u540e\uff0c\u6839\u636e\u4f18\u5148\u7ea7\u8c03\u5ea6\u539f\u5219\uff0cmed-priority \u5de5\u4f5c\u8d1f\u8f7d\u5c06\u4f18\u5148\u88ab\u8c03\u5ea6\u3002

                                  1. \u901a\u8fc7 priority.yaml \u521b\u5efa 3 \u4e2a\u4f18\u5148\u7ea7\u5b9a\u4e49\uff0c\u5206\u522b\u4e3a\uff1ahigh-priority\uff0cmed-priority\uff0clow-priority\u3002

                                    \u67e5\u770b priority.yaml

                                    cat <<EOF | kubectl apply -f - \napiVersion: scheduling.k8s.io/v1 \nkind: PriorityClass \nitems: \n  - metadata: \n      name: high-priority \n    value: 100 \n    globalDefault: false \n    description: \"This priority class should be used for volcano job only.\" \n  - metadata: \n      name: med-priority \n    value: 50 \n    globalDefault: false \n    description: \"This priority class should be used for volcano job only.\" \n  - metadata: \n      name: low-priority \n    value: 10 \n    globalDefault: false \n    description: \"This priority class should be used for volcano job only.\" \nEOF\n
                                    2. \u67e5\u770b\u4f18\u5148\u7ea7\u5b9a\u4e49\u4fe1\u606f\u3002

                                    kubectl get PriorityClass\n
                                    NAME                      VALUE        GLOBAL-DEFAULT   AGE  \nhigh-priority             100          false            97s  \nlow-priority              10           false            97s  \nmed-priority              50           false            97s  \nsystem-cluster-critical   2000000000   false            6d6h  \nsystem-node-critical      2000001000   false            6d6h\n

                                  2. \u521b\u5efa\u9ad8\u4f18\u5148\u7ea7\u5de5\u4f5c\u8d1f\u8f7d high-priority-job\uff0c\u5360\u7528\u96c6\u7fa4\u7684\u5168\u90e8\u8d44\u6e90\u3002

                                    \u67e5\u770b high-priority-job
                                    cat <<EOF | kubectl apply -f -  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: priority-high  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: high-priority  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"4\"  \n          restartPolicy: OnFailure  \nEOF\n

                                    \u901a\u8fc7 kubectl get pod \u67e5\u770b Pod\u8fd0\u884c \u4fe1\u606f\uff1a

                                    kubectl get pods\n
                                    NAME                   READY   STATUS    RESTARTS   AGE  \npriority-high-test-0   1/1     Running   0          3s  \npriority-high-test-1   1/1     Running   0          3s  \npriority-high-test-2   1/1     Running   0          3s  \npriority-high-test-3   1/1     Running   0          3s\n

                                    \u6b64\u65f6\uff0c\u96c6\u7fa4\u8282\u70b9\u8d44\u6e90\u5df2\u5168\u90e8\u88ab\u5360\u7528\u3002

                                  3. \u521b\u5efa\u4e2d\u4f18\u5148\u7ea7\u5de5\u4f5c\u8d1f\u8f7d med-priority-job \u548c\u4f4e\u4f18\u5148\u7ea7\u5de5\u4f5c\u8d1f\u8f7d low-priority-job\u3002

                                    med-priority-job
                                    cat <<EOF | kubectl apply -f -  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: priority-medium  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: med-priority  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"4\"  \n          restartPolicy: OnFailure  \nEOF\n
                                    low-priority-job
                                    cat <<EOF | kubectl apply -f -  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: priority-low  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: low-priority  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"4\"  \n          restartPolicy: OnFailure  \nEOF\n

                                    \u901a\u8fc7 kubectl get pod \u67e5\u770b Pod \u8fd0\u884c\u4fe1\u606f\uff0c\u96c6\u7fa4\u8d44\u6e90\u4e0d\u8db3\uff0cPod \u5904\u4e8e Pending \u72b6\u6001\uff1a

                                    kubectl get pods\n
                                    NAME                     READY   STATUS    RESTARTS   AGE  \npriority-high-test-0     1/1     Running   0          3m29s  \npriority-high-test-1     1/1     Running   0          3m29s  \npriority-high-test-2     1/1     Running   0          3m29s  \npriority-high-test-3     1/1     Running   0          3m29s  \npriority-low-test-0      0/1     Pending   0          2m26s  \npriority-low-test-1      0/1     Pending   0          2m26s  \npriority-low-test-2      0/1     Pending   0          2m26s  \npriority-low-test-3      0/1     Pending   0          2m26s  \npriority-medium-test-0   0/1     Pending   0          2m36s  \npriority-medium-test-1   0/1     Pending   0          2m36s  \npriority-medium-test-2   0/1     Pending   0          2m36s  \npriority-medium-test-3   0/1     Pending   0          2m36s\n

                                  4. \u5220\u9664 high_priority_job \u5de5\u4f5c\u8d1f\u8f7d\uff0c\u91ca\u653e\u96c6\u7fa4\u8d44\u6e90\uff0cmed_priority_job \u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002 \u6267\u884c kubectl delete -f high_priority_job.yaml \u91ca\u653e\u96c6\u7fa4\u8d44\u6e90\uff0c\u67e5\u770b Pod \u7684\u8c03\u5ea6\u4fe1\u606f\uff1a

                                    kubectl get pods\n
                                    NAME                     READY   STATUS    RESTARTS   AGE  \npriority-low-test-0      0/1     Pending   0          5m18s  \npriority-low-test-1      0/1     Pending   0          5m18s  \npriority-low-test-2      0/1     Pending   0          5m18s  \npriority-low-test-3      0/1     Pending   0          5m18s  \npriority-medium-test-0   1/1     Running   0          5m28s  \npriority-medium-test-1   1/1     Running   0          5m28s  \npriority-medium-test-2   1/1     Running   0          5m28s  \npriority-medium-test-3   1/1     Running   0          5m28s\n

                                  "},{"location":"end-user/kpanda/gpu/volcano/volcano_user_guide.html","title":"\u5b89\u88c5 Volcano","text":"

                                  \u968f\u7740 Kubernetes\uff08K8s\uff09\u6210\u4e3a\u4e91\u539f\u751f\u5e94\u7528\u7f16\u6392\u4e0e\u7ba1\u7406\u7684\u9996\u9009\u5e73\u53f0\uff0c\u4f17\u591a\u5e94\u7528\u6b63\u79ef\u6781\u5411 K8s \u8fc1\u79fb\u3002 \u5728\u4eba\u5de5\u667a\u80fd\u4e0e\u673a\u5668\u5b66\u4e60\u9886\u57df\uff0c\u7531\u4e8e\u8fd9\u4e9b\u4efb\u52a1\u901a\u5e38\u6d89\u53ca\u5927\u91cf\u8ba1\u7b97\uff0c\u5f00\u53d1\u8005\u503e\u5411\u4e8e\u5728 Kubernetes \u4e0a\u6784\u5efa AI \u5e73\u53f0\uff0c \u4ee5\u5145\u5206\u5229\u7528\u5176\u5728\u8d44\u6e90\u7ba1\u7406\u3001\u5e94\u7528\u7f16\u6392\u53ca\u8fd0\u7ef4\u76d1\u63a7\u65b9\u9762\u7684\u4f18\u52bf\u3002

                                  \u7136\u800c\uff0cKubernetes \u7684\u9ed8\u8ba4\u8c03\u5ea6\u5668\u4e3b\u8981\u9488\u5bf9\u957f\u671f\u8fd0\u884c\u7684\u670d\u52a1\u8bbe\u8ba1\uff0c\u5bf9\u4e8e AI\u3001\u5927\u6570\u636e\u7b49\u9700\u8981\u6279\u91cf\u548c\u5f39\u6027\u8c03\u5ea6\u7684\u4efb\u52a1\u5b58\u5728\u8bf8\u591a\u4e0d\u8db3\u3002 \u4f8b\u5982\uff0c\u5728\u8d44\u6e90\u7ade\u4e89\u6fc0\u70c8\u7684\u60c5\u51b5\u4e0b\uff0c\u9ed8\u8ba4\u8c03\u5ea6\u5668\u53ef\u80fd\u5bfc\u81f4\u8d44\u6e90\u5206\u914d\u4e0d\u5747\uff0c\u8fdb\u800c\u5f71\u54cd\u4efb\u52a1\u7684\u6b63\u5e38\u6267\u884c\u3002

                                  \u4ee5 TensorFlow \u4f5c\u4e1a\u4e3a\u4f8b\uff0c\u5176\u5305\u542b PS\uff08\u53c2\u6570\u670d\u52a1\u5668\uff09\u548c Worker \u4e24\u79cd\u89d2\u8272\uff0c\u4e24\u8005\u9700\u534f\u540c\u5de5\u4f5c\u624d\u80fd\u5b8c\u6210\u4efb\u52a1\u3002 \u82e5\u4ec5\u90e8\u7f72\u5355\u4e00\u89d2\u8272\uff0c\u4f5c\u4e1a\u5c06\u65e0\u6cd5\u8fd0\u884c\u3002\u800c\u9ed8\u8ba4\u8c03\u5ea6\u5668\u5bf9 Pod \u7684\u8c03\u5ea6\u662f\u9010\u4e2a\u8fdb\u884c\u7684\uff0c\u65e0\u6cd5\u611f\u77e5 TFJob \u4e2d PS \u548c Worker \u7684\u4f9d\u8d56\u5173\u7cfb\u3002 \u5728\u9ad8\u8d1f\u8f7d\u60c5\u51b5\u4e0b\uff0c\u8fd9\u53ef\u80fd\u5bfc\u81f4\u591a\u4e2a\u4f5c\u4e1a\u5404\u81ea\u5206\u914d\u5230\u90e8\u5206\u8d44\u6e90\uff0c\u4f46\u5747\u65e0\u6cd5\u5b8c\u6210\uff0c\u4ece\u800c\u9020\u6210\u8d44\u6e90\u6d6a\u8d39\u3002

                                  "},{"location":"end-user/kpanda/gpu/volcano/volcano_user_guide.html#volcano_1","title":"Volcano \u7684\u8c03\u5ea6\u7b56\u7565\u4f18\u52bf","text":"

                                  Volcano \u63d0\u4f9b\u4e86\u591a\u79cd\u8c03\u5ea6\u7b56\u7565\uff0c\u4ee5\u5e94\u5bf9\u4e0a\u8ff0\u6311\u6218\u3002\u5176\u4e2d\uff0cGang-scheduling \u7b56\u7565\u80fd\u786e\u4fdd\u5206\u5e03\u5f0f\u673a\u5668\u5b66\u4e60\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u591a\u4e2a\u4efb\u52a1\uff08Pod\uff09\u540c\u65f6\u542f\u52a8\uff0c \u907f\u514d\u6b7b\u9501\uff1bPreemption scheduling \u7b56\u7565\u5219\u5141\u8bb8\u9ad8\u4f18\u5148\u7ea7\u4f5c\u4e1a\u5728\u8d44\u6e90\u4e0d\u8db3\u65f6\u62a2\u5360\u4f4e\u4f18\u5148\u7ea7\u4f5c\u4e1a\u7684\u8d44\u6e90\uff0c\u786e\u4fdd\u5173\u952e\u4efb\u52a1\u4f18\u5148\u5b8c\u6210\u3002

                                  \u6b64\u5916\uff0cVolcano \u4e0e Spark\u3001TensorFlow\u3001PyTorch \u7b49\u4e3b\u6d41\u8ba1\u7b97\u6846\u67b6\u65e0\u7f1d\u5bf9\u63a5\uff0c\u5e76\u652f\u6301 CPU \u548c GPU \u7b49\u5f02\u6784\u8bbe\u5907\u7684\u6df7\u5408\u8c03\u5ea6\uff0c\u4e3a AI \u8ba1\u7b97\u4efb\u52a1\u63d0\u4f9b\u4e86\u5168\u9762\u7684\u4f18\u5316\u652f\u6301\u3002

                                  \u63a5\u4e0b\u6765\uff0c\u6211\u4eec\u5c06\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5\u548c\u4f7f\u7528 Volcano\uff0c\u4ee5\u4fbf\u60a8\u80fd\u591f\u5145\u5206\u5229\u7528\u5176\u8c03\u5ea6\u7b56\u7565\u4f18\u52bf\uff0c\u4f18\u5316 AI \u8ba1\u7b97\u4efb\u52a1\u3002

                                  "},{"location":"end-user/kpanda/gpu/volcano/volcano_user_guide.html#volcano_2","title":"\u5b89\u88c5 Volcano","text":"
                                  1. \u5728 \u96c6\u7fa4\u8be6\u60c5 -> Helm \u5e94\u7528 -> Helm \u6a21\u677f \u4e2d\u627e\u5230 Volcano \u5e76\u5b89\u88c5\u3002

                                  2. \u68c0\u67e5\u5e76\u786e\u8ba4 Volcano \u662f\u5426\u5b89\u88c5\u5b8c\u6210\uff0c\u5373 volcano-admission\u3001volcano-controllers\u3001volcano-scheduler \u7ec4\u4ef6\u662f\u5426\u6b63\u5e38\u8fd0\u884c\u3002

                                  \u901a\u5e38 Volcano \u4f1a\u548c AI Lab \u5e73\u53f0\u914d\u5408\u4f7f\u7528\uff0c\u4ee5\u5b9e\u73b0\u6570\u636e\u96c6\u3001Notebook\u3001\u4efb\u52a1\u8bad\u7ec3\u7684\u6574\u4e2a\u5f00\u53d1\u3001\u8bad\u7ec3\u6d41\u7a0b\u7684\u6709\u6548\u95ed\u73af\u3002

                                  "},{"location":"end-user/kpanda/helm/index.html","title":"Helm \u6a21\u677f","text":"

                                  Helm \u662f Kubernetes \u7684\u5305\u7ba1\u7406\u5de5\u5177\uff0c\u65b9\u4fbf\u7528\u6237\u5feb\u901f\u53d1\u73b0\u3001\u5171\u4eab\u548c\u4f7f\u7528 Kubernetes \u6784\u5efa\u7684\u5e94\u7528\u3002\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u63d0\u4f9b\u4e86\u4e0a\u767e\u4e2a Helm \u6a21\u677f\uff0c\u6db5\u76d6\u5b58\u50a8\u3001\u7f51\u7edc\u3001\u76d1\u63a7\u3001\u6570\u636e\u5e93\u7b49\u4e3b\u8981\u573a\u666f\u3002\u501f\u52a9\u8fd9\u4e9b\u6a21\u677f\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7 UI \u754c\u9762\u5feb\u901f\u90e8\u7f72\u3001\u4fbf\u6377\u7ba1\u7406 Helm \u5e94\u7528\u3002\u6b64\u5916\uff0c\u652f\u6301\u901a\u8fc7\u6dfb\u52a0 Helm \u4ed3\u5e93 \u6dfb\u52a0\u66f4\u591a\u7684\u4e2a\u6027\u5316\u6a21\u677f\uff0c\u6ee1\u8db3\u591a\u6837\u9700\u6c42\u3002

                                  \u5173\u952e\u6982\u5ff5\uff1a

                                  \u4f7f\u7528 Helm \u65f6\u9700\u8981\u4e86\u89e3\u4ee5\u4e0b\u51e0\u4e2a\u5173\u952e\u6982\u5ff5\uff1a

                                  • Chart\uff1a\u4e00\u4e2a Helm \u5b89\u88c5\u5305\uff0c\u5176\u4e2d\u5305\u542b\u4e86\u8fd0\u884c\u4e00\u4e2a\u5e94\u7528\u6240\u9700\u8981\u7684\u955c\u50cf\u3001\u4f9d\u8d56\u548c\u8d44\u6e90\u5b9a\u4e49\u7b49\uff0c\u8fd8\u53ef\u80fd\u5305\u542b Kubernetes \u96c6\u7fa4\u4e2d\u7684\u670d\u52a1\u5b9a\u4e49\uff0c\u7c7b\u4f3c Homebrew \u4e2d\u7684 formula\u3001APT \u7684 dpkg \u6216\u8005 Yum \u7684 rpm \u6587\u4ef6\u3002Chart \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u79f0\u4e3a Helm \u6a21\u677f \u3002

                                  • Release\uff1a\u5728 Kubernetes \u96c6\u7fa4\u4e0a\u8fd0\u884c\u7684\u4e00\u4e2a Chart \u5b9e\u4f8b\u3002\u4e00\u4e2a Chart \u53ef\u4ee5\u5728\u540c\u4e00\u4e2a\u96c6\u7fa4\u5185\u591a\u6b21\u5b89\u88c5\uff0c\u6bcf\u6b21\u5b89\u88c5\u90fd\u4f1a\u521b\u5efa\u4e00\u4e2a\u65b0\u7684 Release\u3002Release \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u79f0\u4e3a Helm \u5e94\u7528 \u3002

                                  • Repository\uff1a\u7528\u4e8e\u53d1\u5e03\u548c\u5b58\u50a8 Chart \u7684\u5b58\u50a8\u5e93\u3002Repository \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u79f0\u4e3a Helm \u4ed3\u5e93\u3002

                                  \u66f4\u591a\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u524d\u5f80 Helm \u5b98\u7f51\u67e5\u770b\u3002

                                  \u76f8\u5173\u64cd\u4f5c\uff1a

                                  • \u4e0a\u4f20 Helm \u6a21\u677f\uff0c\u4ecb\u7ecd\u4e0a\u4f20 Helm \u6a21\u677f\u64cd\u4f5c\u3002
                                  • \u7ba1\u7406 Helm \u5e94\u7528\uff0c\u5305\u62ec\u5b89\u88c5\u3001\u66f4\u65b0\u3001\u5378\u8f7d Helm \u5e94\u7528\uff0c\u67e5\u770b Helm \u64cd\u4f5c\u8bb0\u5f55\u7b49\u3002
                                  • \u7ba1\u7406 Helm \u4ed3\u5e93\uff0c\u5305\u62ec\u5b89\u88c5\u3001\u66f4\u65b0\u3001\u5220\u9664 Helm \u4ed3\u5e93\u7b49\u3002
                                  "},{"location":"end-user/kpanda/helm/Import-addon.html","title":"\u5c06\u81ea\u5b9a\u4e49 Helm \u5e94\u7528\u5bfc\u5165\u7cfb\u7edf\u5185\u7f6e Addon","text":"

                                  \u672c\u6587\u4ece\u79bb\u7ebf\u548c\u5728\u7ebf\u4e24\u79cd\u73af\u5883\u8bf4\u660e\u5982\u4f55\u5c06 Helm \u5e94\u7528\u5bfc\u5165\u5230\u7cfb\u7edf\u5185\u7f6e\u7684 Addon \u4e2d\u3002

                                  "},{"location":"end-user/kpanda/helm/Import-addon.html#_1","title":"\u79bb\u7ebf\u73af\u5883","text":"

                                  \u79bb\u7ebf\u73af\u5883\u6307\u7684\u662f\u65e0\u6cd5\u8fde\u901a\u4e92\u8054\u7f51\u6216\u5c01\u95ed\u7684\u79c1\u6709\u7f51\u7edc\u73af\u5883\u3002

                                  "},{"location":"end-user/kpanda/helm/Import-addon.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  • \u5b58\u5728\u53ef\u4ee5\u8fd0\u884c\u7684\u00a0charts-syncer\u3002 \u82e5\u6ca1\u6709\uff0c\u53ef\u70b9\u51fb\u4e0b\u8f7d\u3002
                                  • Helm Chart \u5df2\u7ecf\u5b8c\u6210\u9002\u914d charts-syncer\u3002 \u5373\u5728 Helm Chart \u5185\u6dfb\u52a0\u4e86 .relok8s-images.yaml \u6587\u4ef6\u3002\u8be5\u6587\u4ef6\u9700\u8981\u5305\u542b Chart \u4e2d\u6240\u6709\u4f7f\u7528\u5230\u955c\u50cf\uff0c \u4e5f\u53ef\u4ee5\u5305\u542b Chart \u4e2d\u672a\u76f4\u63a5\u4f7f\u7528\u7684\u955c\u50cf\uff0c\u7c7b\u4f3c Operator \u4e2d\u4f7f\u7528\u7684\u955c\u50cf\u3002

                                  Note

                                  • \u5982\u4f55\u7f16\u5199 Chart \u53ef\u53c2\u8003\u00a0image-hints-file\u3002 \u8981\u6c42\u955c\u50cf\u7684\u00a0registry \u548c repository \u5fc5\u987b\u5206\u5f00\uff0c\u56e0\u4e3a load \u955c\u50cf\u65f6\u9700\u66ff\u6362\u6216\u4fee\u6539 registry/repository\u3002
                                  • \u5b89\u88c5\u5668\u6240\u5728\u7684\u706b\u79cd\u96c6\u7fa4\u5df2\u5b89\u88c5 charts-syncer\u3002 \u82e5\u5c06\u81ea\u5b9a\u4e49 Helm \u5e94\u7528\u5bfc\u5165\u5b89\u88c5\u5668\u6240\u5728\u706b\u79cd\u96c6\u7fa4\uff0c\u53ef\u8df3\u8fc7\u4e0b\u8f7d\u76f4\u63a5\u9002\u914d\uff1b \u82e5\u672a\u5b89\u88c5\u00a0charts-syncer\u4e8c\u8fdb\u5236\u6587\u4ef6\uff0c \u53ef\u7acb\u5373\u4e0b\u8f7d\u3002
                                  "},{"location":"end-user/kpanda/helm/Import-addon.html#helm-chart","title":"\u540c\u6b65 Helm Chart","text":"
                                  1. \u8fdb\u5165\u5bb9\u5668\u7ba1\u7406 -> Helm \u5e94\u7528 -> Helm \u4ed3\u5e93\uff0c\u641c\u7d22 addon\uff0c\u83b7\u53d6\u5185\u7f6e\u4ed3\u5e93\u5730\u5740\u548c\u7528\u6237\u540d/\u5bc6\u7801\uff08\u7cfb\u7edf\u5185\u7f6e\u4ed3\u5e93\u9ed8\u8ba4\u7528\u6237\u540d/\u5bc6\u7801\u4e3a rootuser/rootpass123\uff09\u3002
                                  1. \u540c\u6b65 Helm Chart \u5230\u5bb9\u5668\u7ba1\u7406\u5185\u7f6e\u4ed3\u5e93 Addon

                                    • \u7f16\u5199\u5982\u4e0b\u914d\u7f6e\u6587\u4ef6\uff0c\u53ef\u4ee5\u6839\u636e\u5177\u4f53\u914d\u7f6e\u4fee\u6539\uff0c\u5e76\u4fdd\u5b58\u4e3a sync-dao-2048.yaml\u3002

                                      source:  # helm charts \u6e90\u4fe1\u606f\n  repo:\n    kind: HARBOR # \u4e5f\u53ef\u4ee5\u662f\u4efb\u4f55\u5176\u4ed6\u652f\u6301\u7684 Helm Chart \u4ed3\u5e93\u7c7b\u522b\uff0c\u6bd4\u5982 CHARTMUSEUM\n    url: https://release-ci.daocloud.io/chartrepo/community #  \u9700\u66f4\u6539\u4e3a chart repo url\n    #auth: # \u7528\u6237\u540d/\u5bc6\u7801,\u82e5\u6ca1\u6709\u8bbe\u7f6e\u5bc6\u7801\u53ef\u4ee5\u4e0d\u586b\u5199\n      #username: \"admin\"\n      #password: \"Harbor12345\"\ncharts:  # \u9700\u8981\u540c\u6b65\n  - name: dao-2048 # helm charts \u4fe1\u606f\uff0c\u82e5\u4e0d\u586b\u5199\u5219\u540c\u6b65\u6e90 helm repo \u5185\u6240\u6709 charts\n    versions:\n      - 1.4.1\ntarget:  # helm charts \u76ee\u6807\u4fe1\u606f\n  containerRegistry: 10.5.14.40 # \u955c\u50cf\u4ed3\u5e93 url\n  repo:\n    kind: CHARTMUSEUM # \u4e5f\u53ef\u4ee5\u662f\u4efb\u4f55\u5176\u4ed6\u652f\u6301\u7684 Helm Chart \u4ed3\u5e93\u7c7b\u522b\uff0c\u6bd4\u5982 HARBOR\n    url: http://10.5.14.40:8081 #  \u9700\u66f4\u6539\u4e3a\u6b63\u786e chart repo url\uff0c\u53ef\u4ee5\u901a\u8fc7 helm repo add $HELM-REPO \u9a8c\u8bc1\u5730\u5740\u662f\u5426\u6b63\u786e\n    auth: # \u7528\u6237\u540d/\u5bc6\u7801\uff0c\u82e5\u6ca1\u6709\u8bbe\u7f6e\u5bc6\u7801\u53ef\u4ee5\u4e0d\u586b\u5199\n      username: \"rootuser\"\n      password: \"rootpass123\"\n  containers:\n    # kind: HARBOR # \u82e5\u955c\u50cf\u4ed3\u5e93\u4e3a HARBOR \u4e14\u5e0c\u671b charts-syncer \u81ea\u52a8\u521b\u5efa\u955c\u50cf Repository \u5219\u586b\u5199\u8be5\u5b57\u6bb5  \n    # auth: # \u7528\u6237\u540d/\u5bc6\u7801\uff0c\u82e5\u6ca1\u6709\u8bbe\u7f6e\u5bc6\u7801\u53ef\u4ee5\u4e0d\u586b\u5199 \n      # username: \"admin\"\n      # password: \"Harbor12345\"\n\n# leverage .relok8s-images.yaml file inside the Charts to move the container images too\nrelocateContainerImages: true\n
                                    • \u6267\u884c charts-syncer \u547d\u4ee4\u540c\u6b65 Chart \u53ca\u5176\u5305\u542b\u7684\u955c\u50cf

                                      charts-syncer sync --config sync-dao-2048.yaml --insecure --auto-create-repository\n

                                      \u9884\u671f\u8f93\u51fa\u4e3a\uff1a

                                      I1222 15:01:47.119777    8743 sync.go:45] Using config file: \"examples/sync-dao-2048.yaml\"\nW1222 15:01:47.234238    8743 syncer.go:263] Ignoring skipDependencies option as dependency sync is not supported if container image relocation is true or syncing from/to intermediate directory\nI1222 15:01:47.234685    8743 sync.go:58] There is 1 chart out of sync!\nI1222 15:01:47.234706    8743 sync.go:66] Syncing \"dao-2048_1.4.1\" chart...\n.relok8s-images.yaml hints file found\nComputing relocation...\n\nRelocating dao-2048@1.4.1...\nPushing 10.5.14.40/daocloud/dao-2048:v1.4.1...\nDone\nDone moving /var/folders/vm/08vw0t3j68z9z_4lcqyhg8nm0000gn/T/charts-syncer869598676/dao-2048-1.4.1.tgz\n
                                  2. \u5f85\u4e0a\u4e00\u6b65\u6267\u884c\u5b8c\u6210\u540e\uff0c\u8fdb\u5165\u5bb9\u5668\u7ba1\u7406 -> Helm \u5e94\u7528 -> Helm \u4ed3\u5e93\uff0c\u627e\u5230\u5bf9\u5e94 Addon\uff0c \u5728\u64cd\u4f5c\u680f\u70b9\u51fb\u540c\u6b65\u4ed3\u5e93\uff0c\u56de\u5230 Helm \u6a21\u677f\u5c31\u53ef\u4ee5\u770b\u5230\u4e0a\u4f20\u7684 Helm \u5e94\u7528

                                  3. \u540e\u7eed\u53ef\u6b63\u5e38\u8fdb\u884c\u5b89\u88c5\u3001\u5347\u7ea7\u3001\u5378\u8f7d

                                  "},{"location":"end-user/kpanda/helm/Import-addon.html#_3","title":"\u5728\u7ebf\u73af\u5883","text":"

                                  \u5728\u7ebf\u73af\u5883\u7684 Helm Repo \u5730\u5740\u4e3a release.daocloud.io\u3002 \u5982\u679c\u7528\u6237\u65e0\u6743\u9650\u6dfb\u52a0 Helm Repo\uff0c\u5219\u65e0\u6cd5\u5c06\u81ea\u5b9a\u4e49 Helm \u5e94\u7528\u5bfc\u5165\u7cfb\u7edf\u5185\u7f6e Addon\u3002 \u60a8\u53ef\u4ee5\u6dfb\u52a0\u81ea\u5df1\u642d\u5efa\u7684 Helm \u4ed3\u5e93\uff0c\u7136\u540e\u6309\u7167\u79bb\u7ebf\u73af\u5883\u4e2d\u540c\u6b65 Helm Chart \u7684\u6b65\u9aa4\u5c06\u60a8\u7684 Helm \u4ed3\u5e93\u96c6\u6210\u5230\u5e73\u53f0\u4f7f\u7528\u3002

                                  "},{"location":"end-user/kpanda/helm/helm-app.html","title":"\u7ba1\u7406 Helm \u5e94\u7528","text":"

                                  \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u5bf9 Helm \u8fdb\u884c\u754c\u9762\u5316\u7ba1\u7406\uff0c\u5305\u62ec\u4f7f\u7528 Helm \u6a21\u677f\u521b\u5efa Helm \u5b9e\u4f8b\u3001\u81ea\u5b9a\u4e49 Helm \u5b9e\u4f8b\u53c2\u6570\u3001\u5bf9 Helm \u5b9e\u4f8b\u8fdb\u884c\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406\u7b49\u529f\u80fd\u3002

                                  \u672c\u8282\u5c06\u4ee5 cert-manager \u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u754c\u9762\u521b\u5efa\u5e76\u7ba1\u7406 Helm \u5e94\u7528\u3002

                                  "},{"location":"end-user/kpanda/helm/helm-app.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                  • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Admin \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                  "},{"location":"end-user/kpanda/helm/helm-app.html#helm_1","title":"\u5b89\u88c5 Helm \u5e94\u7528","text":"

                                  \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u5b89\u88c5 Helm \u5e94\u7528\u3002

                                  1. \u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u8fdb\u5165 Helm \u6a21\u677f\u9875\u9762\u3002

                                    \u5728 Helm \u6a21\u677f\u9875\u9762\u9009\u62e9\u540d\u4e3a addon \u7684 Helm \u4ed3\u5e93\uff0c\u6b64\u65f6\u754c\u9762\u4e0a\u5c06\u5448\u73b0 addon \u4ed3\u5e93\u4e0b\u6240\u6709\u7684 Helm chart \u6a21\u677f\u3002 \u70b9\u51fb\u540d\u79f0\u4e3a cert-manager \u7684 Chart\u3002

                                  3. \u5728\u5b89\u88c5\u9875\u9762\uff0c\u80fd\u591f\u770b\u5230 Chart \u7684\u76f8\u5173\u8be6\u7ec6\u4fe1\u606f\uff0c\u5728\u754c\u9762\u53f3\u4e0a\u89d2\u9009\u62e9\u9700\u8981\u5b89\u88c5\u7684\u7248\u672c\uff0c\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u6b64\u5904\u9009\u62e9 v1.9.1 \u7248\u672c\u8fdb\u884c\u5b89\u88c5\u3002

                                  4. \u914d\u7f6e \u540d\u79f0 \u3001 \u547d\u540d\u7a7a\u95f4 \u53ca \u7248\u672c\u4fe1\u606f \uff0c\u4e5f\u53ef\u4ee5\u5728\u4e0b\u65b9\u7684 \u53c2\u6570\u914d\u7f6e \u533a\u57df\u901a\u8fc7\u4fee\u6539 YAML \u6765\u81ea\u5b9a\u4e49\u53c2\u6570\u3002\u70b9\u51fb \u786e\u5b9a \u3002

                                  5. \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de Helm \u5e94\u7528\u5217\u8868\uff0c\u65b0\u521b\u5efa\u7684 Helm \u5e94\u7528\u72b6\u6001\u4e3a \u5b89\u88c5\u4e2d \uff0c\u7b49\u5f85\u4e00\u6bb5\u65f6\u95f4\u540e\u72b6\u6001\u53d8\u4e3a \u8fd0\u884c\u4e2d \u3002

                                  "},{"location":"end-user/kpanda/helm/helm-app.html#helm_2","title":"\u66f4\u65b0 Helm \u5e94\u7528","text":"

                                  \u5f53\u6211\u4eec\u901a\u8fc7\u754c\u9762\u5b8c\u6210\u4e00\u4e2a Helm \u5e94\u7528\u7684\u5b89\u88c5\u540e\uff0c\u6211\u4eec\u53ef\u4ee5\u5bf9 Helm \u5e94\u7528\u6267\u884c\u66f4\u65b0\u64cd\u4f5c\u3002\u6ce8\u610f\uff1a\u53ea\u6709\u901a\u8fc7\u754c\u9762\u5b89\u88c5\u7684 Helm \u5e94\u7528\u624d\u652f\u6301\u4f7f\u7528\u754c\u9762\u8fdb\u884c\u66f4\u65b0\u64cd\u4f5c\u3002

                                  \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u66f4\u65b0 Helm \u5e94\u7528\u3002

                                  1. \u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb Helm \u5e94\u7528 \uff0c\u8fdb\u5165 Helm \u5e94\u7528\u5217\u8868\u9875\u9762\u3002

                                    \u5728 Helm \u5e94\u7528\u5217\u8868\u9875\u9009\u62e9\u9700\u8981\u66f4\u65b0\u7684 Helm \u5e94\u7528\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff0c\u5728\u4e0b\u62c9\u9009\u62e9\u4e2d\u9009\u62e9 \u66f4\u65b0 \u64cd\u4f5c\u3002

                                  3. \u70b9\u51fb \u66f4\u65b0 \u6309\u94ae\u540e\uff0c\u7cfb\u7edf\u5c06\u8df3\u8f6c\u81f3\u66f4\u65b0\u754c\u9762\uff0c\u60a8\u53ef\u4ee5\u6839\u636e\u9700\u8981\u5bf9 Helm \u5e94\u7528\u8fdb\u884c\u66f4\u65b0\uff0c\u6b64\u5904\u6211\u4eec\u4ee5\u66f4\u65b0 dao-2048 \u8fd9\u4e2a\u5e94\u7528\u7684 http \u7aef\u53e3\u4e3a\u4f8b\u3002

                                  4. \u4fee\u6539\u5b8c\u76f8\u5e94\u53c2\u6570\u540e\u3002\u60a8\u53ef\u4ee5\u5728\u53c2\u6570\u914d\u7f6e\u4e0b\u70b9\u51fb \u53d8\u5316 \u6309\u94ae\uff0c\u5bf9\u6bd4\u4fee\u6539\u524d\u540e\u7684\u6587\u4ef6\uff0c\u786e\u5b9a\u65e0\u8bef\u540e\uff0c\u70b9\u51fb\u5e95\u90e8 \u786e\u5b9a \u6309\u94ae\uff0c\u5b8c\u6210 Helm \u5e94\u7528\u7684\u66f4\u65b0\u3002

                                  5. \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de Helm \u5e94\u7528\u5217\u8868\uff0c\u53f3\u4e0a\u89d2\u5f39\u7a97\u63d0\u793a \u66f4\u65b0\u6210\u529f \u3002

                                  "},{"location":"end-user/kpanda/helm/helm-app.html#helm_3","title":"\u67e5\u770b Helm \u64cd\u4f5c\u8bb0\u5f55","text":"

                                  Helm \u5e94\u7528\u7684\u6bcf\u6b21\u5b89\u88c5\u3001\u66f4\u65b0\u3001\u5220\u9664\u90fd\u6709\u8be6\u7ec6\u7684\u64cd\u4f5c\u8bb0\u5f55\u548c\u65e5\u5fd7\u53ef\u4f9b\u67e5\u770b\u3002

                                  1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb \u96c6\u7fa4\u8fd0\u7ef4 -> \u6700\u8fd1\u64cd\u4f5c \uff0c\u7136\u540e\u5728\u9875\u9762\u4e0a\u65b9\u9009\u62e9 Helm \u64cd\u4f5c \u6807\u7b7e\u9875\u3002\u6bcf\u4e00\u6761\u8bb0\u5f55\u5bf9\u5e94\u4e00\u6b21\u5b89\u88c5/\u66f4\u65b0/\u5220\u9664\u64cd\u4f5c\u3002

                                  2. \u5982\u9700\u67e5\u770b\u6bcf\u4e00\u6b21\u64cd\u4f5c\u7684\u8be6\u7ec6\u65e5\u5fd7\uff1a\u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u65e5\u5fd7 \u3002

                                  3. \u6b64\u65f6\u9875\u9762\u4e0b\u65b9\u5c06\u4ee5\u63a7\u5236\u53f0\u7684\u5f62\u5f0f\u5c55\u793a\u8be6\u7ec6\u7684\u8fd0\u884c\u65e5\u5fd7\u3002

                                  "},{"location":"end-user/kpanda/helm/helm-app.html#helm_4","title":"\u5220\u9664 Helm \u5e94\u7528","text":"

                                  \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u5220\u9664 Helm \u5e94\u7528\u3002

                                  1. \u627e\u5230\u5f85\u5220\u9664\u7684 Helm \u5e94\u7528\u6240\u5728\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb Helm \u5e94\u7528 \uff0c\u8fdb\u5165 Helm \u5e94\u7528\u5217\u8868\u9875\u9762\u3002

                                    \u5728 Helm \u5e94\u7528\u5217\u8868\u9875\u9009\u62e9\u60a8\u9700\u8981\u5220\u9664\u7684 Helm \u5e94\u7528\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff0c\u5728\u4e0b\u62c9\u9009\u62e9\u4e2d\u9009\u62e9 \u5220\u9664 \u3002

                                  3. \u5728\u5f39\u7a97\u5185\u8f93\u5165 Helm \u5e94\u7528\u7684\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\uff0c\u7136\u540e\u70b9\u51fb \u5220\u9664 \u6309\u94ae\u3002

                                  "},{"location":"end-user/kpanda/helm/helm-repo.html","title":"\u7ba1\u7406 Helm \u4ed3\u5e93","text":"

                                  Helm \u4ed3\u5e93\u662f\u7528\u6765\u5b58\u50a8\u548c\u53d1\u5e03 Chart \u7684\u5b58\u50a8\u5e93\u3002Helm \u5e94\u7528\u6a21\u5757\u652f\u6301\u901a\u8fc7 HTTP(s) \u534f\u8bae\u6765\u8bbf\u95ee\u5b58\u50a8\u5e93\u4e2d\u7684 Chart \u5305\u3002\u7cfb\u7edf\u9ed8\u8ba4\u5185\u7f6e\u4e86\u4e0b\u8868\u6240\u793a\u7684 4 \u4e2a Helm \u4ed3\u5e93\u4ee5\u6ee1\u8db3\u4f01\u4e1a\u751f\u4ea7\u8fc7\u7a0b\u4e2d\u7684\u5e38\u89c1\u9700\u6c42\u3002

                                  \u4ed3\u5e93 \u63cf\u8ff0 \u793a\u4f8b partner \u7531\u751f\u6001\u5408\u4f5c\u4f19\u4f34\u6240\u63d0\u4f9b\u7684\u5404\u7c7b\u4f18\u8d28\u7279\u8272 Chart tidb system \u7cfb\u7edf\u6838\u5fc3\u529f\u80fd\u7ec4\u4ef6\u53ca\u90e8\u5206\u9ad8\u7ea7\u529f\u80fd\u6240\u5fc5\u9700\u4f9d\u8d56\u7684 Chart\uff0c\u5982\u5fc5\u9700\u5b89\u88c5 insight-agent \u624d\u80fd\u591f\u83b7\u53d6\u96c6\u7fa4\u7684\u76d1\u63a7\u4fe1\u606f Insight addon \u4e1a\u52a1\u573a\u666f\u4e2d\u5e38\u89c1\u7684 Chart cert-manager community Kubernetes \u793e\u533a\u8f83\u4e3a\u70ed\u95e8\u7684\u5f00\u6e90\u7ec4\u4ef6 Chart Istio

                                  \u9664\u4e0a\u8ff0\u9884\u7f6e\u4ed3\u5e93\u5916\uff0c\u60a8\u4e5f\u53ef\u4ee5\u81ea\u884c\u6dfb\u52a0\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93\u3002\u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u6dfb\u52a0\u3001\u66f4\u65b0\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93\u3002

                                  "},{"location":"end-user/kpanda/helm/helm-repo.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762

                                  • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Admin \u6216\u66f4\u9ad8\u6743\u9650 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                  • \u5982\u679c\u4f7f\u7528\u79c1\u6709\u4ed3\u5e93\uff0c\u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u62e5\u6709\u5bf9\u8be5\u79c1\u6709\u4ed3\u5e93\u7684\u8bfb\u5199\u6743\u9650\u3002

                                  "},{"location":"end-user/kpanda/helm/helm-repo.html#helm_1","title":"\u5f15\u5165\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93","text":"

                                  \u4e0b\u9762\u4ee5 Kubevela \u516c\u5f00\u7684\u955c\u50cf\u4ed3\u5e93\u4e3a\u4f8b\uff0c\u5f15\u5165 Helm \u4ed3\u5e93\u5e76\u7ba1\u7406\u3002

                                  1. \u627e\u5230\u9700\u8981\u5f15\u5165\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u4ed3\u5e93 \uff0c\u8fdb\u5165 Helm \u4ed3\u5e93\u9875\u9762\u3002

                                  3. \u5728 Helm \u4ed3\u5e93\u9875\u9762\u70b9\u51fb \u521b\u5efa\u4ed3\u5e93 \u6309\u94ae\uff0c\u8fdb\u5165\u521b\u5efa\u4ed3\u5e93\u9875\u9762\uff0c\u6309\u7167\u4e0b\u8868\u914d\u7f6e\u76f8\u5173\u53c2\u6570\u3002

                                    • \u4ed3\u5e93\u540d\u79f0\uff1a\u8bbe\u7f6e\u4ed3\u5e93\u540d\u79f0\u3002\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26 - \uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u5e76\u7ed3\u5c3e\uff0c\u4f8b\u5982 kubevela
                                    • \u4ed3\u5e93\u5730\u5740\uff1a\u7528\u6765\u6307\u5411\u76ee\u6807 Helm \u4ed3\u5e93\u7684 http\uff08s\uff09\u5730\u5740\u3002\u4f8b\u5982 https://charts.kubevela.net/core
                                    • \u8df3\u8fc7 TLS \u9a8c\u8bc1: \u5982\u679c\u6dfb\u52a0\u7684 Helm \u4ed3\u5e93\u4e3a https \u5730\u5740\u4e14\u9700\u8df3\u8fc7 TLS \u9a8c\u8bc1\uff0c\u53ef\u4ee5\u52fe\u9009\u6b64\u9009\u9879\uff0c\u9ed8\u8ba4\u4e3a\u4e0d\u52fe\u9009
                                    • \u8ba4\u8bc1\u65b9\u5f0f\uff1a\u8fde\u63a5\u4ed3\u5e93\u5730\u5740\u540e\u7528\u6765\u8fdb\u884c\u8eab\u4efd\u6821\u9a8c\u7684\u65b9\u5f0f\u3002\u5bf9\u4e8e\u516c\u5f00\u4ed3\u5e93\uff0c\u53ef\u4ee5\u9009\u62e9 None \uff0c\u79c1\u6709\u7684\u4ed3\u5e93\u9700\u8981\u8f93\u5165\u7528\u6237\u540d/\u5bc6\u7801\u4ee5\u8fdb\u884c\u8eab\u4efd\u6821\u9a8c
                                    • \u6807\u7b7e\uff1a\u4e3a\u8be5 Helm \u4ed3\u5e93\u6dfb\u52a0\u6807\u7b7e\u3002\u4f8b\u5982 key: repo4\uff1bvalue: Kubevela
                                    • \u6ce8\u89e3\uff1a\u4e3a\u8be5 Helm \u4ed3\u5e93\u6dfb\u52a0\u6ce8\u89e3\u3002\u4f8b\u5982 key: repo4\uff1bvalue: Kubevela
                                    • \u63cf\u8ff0\uff1a\u4e3a\u8be5 Helm \u4ed3\u5e93\u6dfb\u52a0\u63cf\u8ff0\u3002\u4f8b\u5982\uff1a\u8fd9\u662f\u4e00\u4e2a Kubevela \u516c\u5f00 Helm \u4ed3\u5e93

                                  4. \u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210 Helm \u4ed3\u5e93\u7684\u521b\u5efa\u3002\u9875\u9762\u4f1a\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u4ed3\u5e93\u5217\u8868\u3002

                                  "},{"location":"end-user/kpanda/helm/helm-repo.html#helm_2","title":"\u66f4\u65b0 Helm \u4ed3\u5e93","text":"

                                  \u5f53 Helm \u4ed3\u5e93\u7684\u5730\u5740\u4fe1\u606f\u53d1\u751f\u53d8\u5316\u65f6\uff0c\u53ef\u4ee5\u66f4\u65b0 Helm \u4ed3\u5e93\u7684\u5730\u5740\u3001\u8ba4\u8bc1\u65b9\u5f0f\u3001\u6807\u7b7e\u3001\u6ce8\u89e3\u53ca\u63cf\u8ff0\u4fe1\u606f\u3002

                                  1. \u627e\u5230\u5f85\u66f4\u65b0\u4ed3\u5e93\u6240\u5728\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u4ed3\u5e93 \uff0c\u8fdb\u5165 Helm \u4ed3\u5e93\u5217\u8868\u9875\u9762\u3002

                                  3. \u5728\u4ed3\u5e93\u5217\u8868\u9875\u9762\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684 Helm \u4ed3\u5e93\uff0c\u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \u6309\u94ae\uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u70b9\u51fb \u66f4\u65b0 \u3002

                                  4. \u5728 \u7f16\u8f91 Helm \u4ed3\u5e93 \u9875\u9762\u8fdb\u884c\u66f4\u65b0\uff0c\u5b8c\u6210\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                  5. \u8fd4\u56de Helm \u4ed3\u5e93\u5217\u8868\uff0c\u5c4f\u5e55\u63d0\u793a\u66f4\u65b0\u6210\u529f\u3002

                                  "},{"location":"end-user/kpanda/helm/helm-repo.html#helm_3","title":"\u5220\u9664 Helm \u4ed3\u5e93","text":"

                                  \u9664\u4e86\u5f15\u5165\u3001\u66f4\u65b0\u4ed3\u5e93\u5916\uff0c\u60a8\u4e5f\u53ef\u4ee5\u5c06\u4e0d\u9700\u8981\u7684\u4ed3\u5e93\u5220\u9664\uff0c\u5305\u62ec\u7cfb\u7edf\u9884\u7f6e\u4ed3\u5e93\u548c\u7b2c\u4e09\u65b9\u4ed3\u5e93\u3002

                                  1. \u627e\u5230\u5f85\u5220\u9664\u4ed3\u5e93\u6240\u5728\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u4ed3\u5e93 \uff0c\u8fdb\u5165 Helm \u4ed3\u5e93\u5217\u8868\u9875\u9762\u3002

                                  3. \u5728\u4ed3\u5e93\u5217\u8868\u9875\u9762\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684 Helm \u4ed3\u5e93\uff0c\u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \u6309\u94ae\uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u70b9\u51fb \u5220\u9664 \u3002

                                  4. \u8f93\u5165\u4ed3\u5e93\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\uff0c\u70b9\u51fb \u5220\u9664 \u3002

                                  5. \u8fd4\u56de Helm \u4ed3\u5e93\u5217\u8868\uff0c\u5c4f\u5e55\u63d0\u793a\u5220\u9664\u6210\u529f\u3002

                                  "},{"location":"end-user/kpanda/helm/multi-archi-helm.html","title":"Helm \u5e94\u7528\u591a\u67b6\u6784\u548c\u5347\u7ea7\u5bfc\u5165\u6b65\u9aa4","text":"

                                  \u901a\u5e38\u5728\u591a\u67b6\u6784\u96c6\u7fa4\u4e2d\uff0c\u4e5f\u4f1a\u4f7f\u7528\u591a\u67b6\u6784\u7684 Helm \u5305\u6765\u90e8\u7f72\u5e94\u7528\uff0c\u4ee5\u89e3\u51b3\u67b6\u6784\u5dee\u5f02\u5e26\u6765\u7684\u90e8\u7f72\u95ee\u9898\u3002 \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u5c06\u5355\u67b6\u6784 Helm \u5e94\u7528\u878d\u5408\u4e3a\u591a\u67b6\u6784\uff0c\u4ee5\u53ca\u591a\u67b6\u6784\u4e0e\u591a\u67b6\u6784 Helm \u5e94\u7528\u7684\u76f8\u4e92\u878d\u5408\u3002

                                  "},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_1","title":"\u5bfc\u5165","text":""},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_2","title":"\u5355\u67b6\u6784\u5bfc\u5165","text":"

                                  \u51c6\u5907\u597d\u5f85\u5bfc\u5165\u7684\u79bb\u7ebf\u5305 addon-offline-full-package-${version}-${arch}.tar.gz \u3002 \u628a\u8def\u5f84\u586b\u5199\u81f3 clusterConfig.yml \u914d\u7f6e\u6587\u4ef6\uff0c\u4f8b\u5982\uff1a

                                  addonPackage:\n  path: \"/home/addon-offline-full-package-v0.9.0-amd64.tar.gz\"\n

                                  \u7136\u540e\u6267\u884c\u5bfc\u5165\u547d\u4ee4\uff1a

                                  ~/dce5-installer cluster-create -c /home/dce5/sample/clusterConfig.yaml -m /home/dce5/sample/manifest.yaml -d -j13\n
                                  "},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_3","title":"\u591a\u67b6\u6784\u878d\u5408","text":"

                                  \u51c6\u5907\u597d\u5f85\u878d\u5408\u7684\u79bb\u7ebf\u5305 addon-offline-full-package-${version}-${arch}.tar.gz\u3002

                                  \u4ee5 addon-offline-full-package-v0.9.0-arm64.tar.gz \u4e3a\u4f8b\uff0c\u6267\u884c\u5bfc\u5165\u547d\u4ee4\uff1a

                                  ~/dce5-installer import-addon -c /home/dce5/sample/clusterConfig.yaml --addon-path=/home/addon-offline-full-package-v0.9.0-arm64.tar.gz\n
                                  "},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_4","title":"\u5347\u7ea7","text":""},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_5","title":"\u5355\u67b6\u6784\u5347\u7ea7","text":"

                                  \u51c6\u5907\u597d\u5f85\u5bfc\u5165\u7684\u79bb\u7ebf\u5305 addon-offline-full-package-${version}-${arch}.tar.gz\u3002

                                  \u628a\u8def\u5f84\u586b\u5199\u81f3 clusterConfig.yml \u914d\u7f6e\u6587\u4ef6\uff0c\u4f8b\u5982\uff1a

                                  addonPackage:\n  path: \"/home/addon-offline-full-package-v0.11.0-amd64.tar.gz\"\n

                                  \u7136\u540e\u6267\u884c\u5bfc\u5165\u547d\u4ee4\uff1a

                                  ~/dce5-installer cluster-create -c /home/dce5/sample/clusterConfig.yaml -m /home/dce5/sample/manifest.yaml -d -j13\n
                                  "},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_6","title":"\u591a\u67b6\u6784\u878d\u5408","text":"

                                  \u51c6\u5907\u597d\u5f85\u878d\u5408\u7684\u79bb\u7ebf\u5305 addon-offline-full-package-${version}-${arch}.tar.gz\u3002

                                  \u4ee5 addon-offline-full-package-v0.11.0-arm64.tar.gz \u4e3a\u4f8b\uff0c\u6267\u884c\u5bfc\u5165\u547d\u4ee4\uff1a

                                  ~/dce5-installer import-addon -c /home/dce5/sample/clusterConfig.yaml --addon-path=/home/addon-offline-full-package-v0.11.0-arm64.tar.gz\n
                                  "},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_7","title":"\u6ce8\u610f\u4e8b\u9879","text":""},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_8","title":"\u78c1\u76d8\u7a7a\u95f4","text":"

                                  \u79bb\u7ebf\u5305\u6bd4\u8f83\u5927\uff0c\u4e14\u8fc7\u7a0b\u4e2d\u9700\u8981\u89e3\u538b\u548c load \u955c\u50cf\uff0c\u9700\u8981\u9884\u7559\u5145\u8db3\u7684\u7a7a\u95f4\uff0c\u5426\u5219\u53ef\u80fd\u5728\u8fc7\u7a0b\u4e2d\u62a5 \u201cno space left\u201d \u800c\u4e2d\u65ad\u3002

                                  "},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_9","title":"\u5931\u8d25\u540e\u91cd\u8bd5","text":"

                                  \u5982\u679c\u5728\u591a\u67b6\u6784\u878d\u5408\u6b65\u9aa4\u6267\u884c\u5931\u8d25\uff0c\u91cd\u8bd5\u524d\u9700\u8981\u6e05\u7406\u4e00\u4e0b\u6b8b\u7559\uff1a

                                  rm -rf addon-offline-target-package\n
                                  "},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_10","title":"\u955c\u50cf\u7a7a\u95f4","text":"

                                  \u5982\u679c\u878d\u5408\u7684\u79bb\u7ebf\u5305\u4e2d\u5305\u542b\u4e86\u4e0e\u5bfc\u5165\u7684\u79bb\u7ebf\u5305\u4e0d\u4e00\u81f4\u7684\u955c\u50cf\u7a7a\u95f4\uff0c\u53ef\u80fd\u4f1a\u5728\u878d\u5408\u8fc7\u7a0b\u4e2d\u56e0\u4e3a\u955c\u50cf\u7a7a\u95f4\u4e0d\u5b58\u5728\u800c\u62a5\u9519\uff1a

                                  \u89e3\u51b3\u529e\u6cd5\uff1a\u53ea\u9700\u8981\u5728\u878d\u5408\u4e4b\u524d\u521b\u5efa\u597d\u8be5\u955c\u50cf\u7a7a\u95f4\u5373\u53ef\uff0c\u4f8b\u5982\u4e0a\u56fe\u62a5\u9519\u53ef\u901a\u8fc7\u521b\u5efa\u955c\u50cf\u7a7a\u95f4 localhost \u63d0\u524d\u907f\u514d\u3002

                                  "},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_11","title":"\u67b6\u6784\u51b2\u7a81","text":"

                                  \u5347\u7ea7\u81f3\u4f4e\u4e8e 0.12.0 \u7248\u672c\u7684 addon \u65f6\uff0c\u7531\u4e8e\u76ee\u6807\u79bb\u7ebf\u5305\u91cc\u7684 charts-syncer \u6ca1\u6709\u68c0\u67e5\u955c\u50cf\u5b58\u5728\u5219\u4e0d\u63a8\u9001\u529f\u80fd\uff0c\u56e0\u6b64\u4f1a\u5728\u5347\u7ea7\u7684\u8fc7\u7a0b\u4e2d\u4f1a\u91cd\u65b0\u628a\u591a\u67b6\u6784\u51b2\u6210\u5355\u67b6\u6784\u3002 \u4f8b\u5982\uff1a\u5728 v0.10 \u7248\u672c\u5c06 addon \u5b9e\u73b0\u4e3a\u591a\u67b6\u6784\uff0c\u6b64\u65f6\u82e5\u5347\u7ea7\u4e3a v0.11 \u7248\u672c\uff0c\u5219\u591a\u67b6\u6784 addon \u4f1a\u88ab\u8986\u76d6\u4e3a\u5355\u67b6\u6784\uff1b\u82e5\u5347\u7ea7\u4e3a 0.12.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u5219\u4ecd\u80fd\u591f\u4fdd\u6301\u591a\u67b6\u6784\u3002

                                  "},{"location":"end-user/kpanda/helm/upload-helm.html","title":"\u4e0a\u4f20 Helm \u6a21\u677f","text":"

                                  \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u4e0a\u4f20 Helm \u6a21\u677f\uff0c\u64cd\u4f5c\u6b65\u9aa4\u89c1\u4e0b\u6587\u3002

                                  1. \u5f15\u5165 Helm \u4ed3\u5e93\uff0c\u64cd\u4f5c\u6b65\u9aa4\u53c2\u8003\u5f15\u5165\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93\u3002

                                  2. \u4e0a\u4f20 Helm Chart \u5230 Helm \u4ed3\u5e93\u3002

                                    \u5ba2\u6237\u7aef\u4e0a\u4f20\u9875\u9762\u4e0a\u4f20

                                    Note

                                    \u6b64\u65b9\u5f0f\u9002\u7528\u4e8e Harbor\u3001ChartMuseum\u3001JFrog \u7c7b\u578b\u4ed3\u5e93\u3002

                                    1. \u767b\u5f55\u4e00\u4e2a\u53ef\u4ee5\u8bbf\u95ee\u5230 Helm \u4ed3\u5e93\u7684\u8282\u70b9\uff0c\u5c06 Helm \u4e8c\u8fdb\u5236\u6587\u4ef6\u4e0a\u4f20\u5230\u8282\u70b9\uff0c\u5e76\u5b89\u88c5 cm-push \u63d2\u4ef6\uff08\u9700\u8981\u8fde\u901a\u5916\u7f51\u5e76\u63d0\u524d\u5b89\u88c5 Git\uff09\u3002

                                      \u5b89\u88c5\u63d2\u4ef6\u6d41\u7a0b\u53c2\u8003\u5b89\u88c5 cm-push \u63d2\u4ef6\u3002

                                    2. \u63a8\u9001 Helm Chart \u5230 Helm \u4ed3\u5e93\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1b

                                      helm cm-push ${charts-dir} ${HELM_REPO_URL} --username ${username} --password ${password}\n

                                      \u5b57\u6bb5\u8bf4\u660e\uff1a

                                      • charts-dir\uff1aHelm Chart \u7684\u76ee\u5f55\uff0c\u6216\u8005\u662f\u6253\u5305\u597d\u7684 Chart\uff08\u5373 .tgz \u6587\u4ef6\uff09\u3002
                                      • HELM_REPO_URL\uff1aHelm \u4ed3\u5e93\u7684 URL\u3002
                                      • username/password\uff1a\u6709\u63a8\u9001\u6743\u9650\u7684 Helm \u4ed3\u5e93\u7528\u6237\u540d\u548c\u5bc6\u7801\u3002
                                      • \u5982\u679c\u91c7\u7528 https \u8bbf\u95ee\u4e14\u9700\u8981\u8df3\u8fc7\u8bc1\u4e66\u9a8c\u8bc1\uff0c\u53ef\u6dfb\u52a0\u53c2\u6570 --insecure

                                    Note

                                    \u6b64\u65b9\u5f0f\u4ec5\u9002\u7528\u4e8e Harbor \u7c7b\u578b\u4ed3\u5e93\u3002

                                    1. \u767b\u5f55\u7f51\u9875 Harbor \u4ed3\u5e93\uff0c\u8bf7\u786e\u4fdd\u767b\u5f55\u7528\u6237\u6709\u63a8\u9001\u6743\u9650\uff1b

                                    2. \u8fdb\u5165\u5230\u5bf9\u5e94\u9879\u76ee\uff0c\u9009\u62e9 Helm Charts \u9875\u7b7e\uff0c\u70b9\u51fb\u9875\u9762 \u4e0a\u4f20 \u6309\u94ae\uff0c\u5b8c\u6210 Helm Chart \u4e0a\u4f20\u3002

                                  3. \u540c\u6b65\u8fdc\u7aef\u4ed3\u5e93\u6570\u636e

                                    \u624b\u52a8\u540c\u6b65\u81ea\u52a8\u540c\u6b65

                                    \u9ed8\u8ba4\u96c6\u7fa4\u672a\u5f00\u542f Helm \u4ed3\u5e93\u81ea\u52a8\u5237\u65b0 \uff0c\u9700\u8981\u6267\u884c\u624b\u52a8\u540c\u6b65\u64cd\u4f5c\uff0c\u5927\u81f4\u6b65\u9aa4\u4e3a\uff1a

                                    \u8fdb\u5165 Helm \u5e94\u7528 -> Helm \u4ed3\u5e93 \uff0c\u70b9\u51fb\u4ed3\u5e93\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \u6309\u94ae\uff0c\u9009\u62e9 \u540c\u6b65\u4ed3\u5e93 \uff0c\u5b8c\u6210\u4ed3\u5e93\u6570\u636e\u540c\u6b65\u3002

                                    \u5982\u9700\u5f00\u542f Helm \u4ed3\u5e93\u81ea\u52a8\u540c\u6b65\u529f\u80fd\uff0c\u53ef\u8fdb\u5165 \u96c6\u7fa4\u8fd0\u7ef4 -> \u96c6\u7fa4\u8bbe\u7f6e -> \u9ad8\u7ea7\u914d\u7f6e \uff0c\u5f00\u542f Helm \u4ed3\u5e93\u81ea\u52a8\u5237\u65b0\u5f00\u5173\u3002

                                  "},{"location":"end-user/kpanda/inspect/index.html","title":"\u96c6\u7fa4\u5de1\u68c0","text":"

                                  \u96c6\u7fa4\u5de1\u68c0\u53ef\u4ee5\u901a\u8fc7\u81ea\u52a8\u6216\u624b\u52a8\u65b9\u5f0f\uff0c\u5b9a\u671f\u6216\u968f\u65f6\u68c0\u67e5\u96c6\u7fa4\u7684\u6574\u4f53\u5065\u5eb7\u72b6\u6001\uff0c\u8ba9\u7ba1\u7406\u5458\u83b7\u5f97\u4fdd\u969c\u96c6\u7fa4\u5b89\u5168\u7684\u4e3b\u52a8\u6743\u3002 \u57fa\u4e8e\u5408\u7406\u7684\u5de1\u68c0\u8ba1\u5212\uff0c\u8fd9\u79cd\u4e3b\u52a8\u81ea\u53d1\u7684\u96c6\u7fa4\u68c0\u67e5\u53ef\u4ee5\u8ba9\u7ba1\u7406\u5458\u968f\u65f6\u638c\u63e1\u96c6\u7fa4\u72b6\u6001\uff0c\u6446\u8131\u4e4b\u524d\u51fa\u73b0\u6545\u969c\u65f6\u53ea\u80fd\u88ab\u52a8\u6392\u67e5\u95ee\u9898\u7684\u56f0\u5883\uff0c\u505a\u5230\u4e8b\u5148\u76d1\u63a7\u3001\u63d0\u524d\u9632\u8303\u3002

                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u63d0\u4f9b\u7684\u96c6\u7fa4\u5de1\u68c0\u529f\u80fd\uff0c\u652f\u6301\u4ece\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5bb9\u5668\u7ec4\uff08Pod\uff09\u4e09\u4e2a\u7ef4\u5ea6\u8fdb\u884c\u81ea\u5b9a\u4e49\u5de1\u68c0\u9879\uff0c\u5de1\u68c0\u7ed3\u675f\u540e\u4f1a\u81ea\u52a8\u751f\u6210\u53ef\u89c6\u5316\u7684\u5de1\u68c0\u62a5\u544a\u3002

                                  • \u96c6\u7fa4\u7ef4\u5ea6\uff1a\u68c0\u67e5\u96c6\u7fa4\u4e2d\u7cfb\u7edf\u7ec4\u4ef6\u7684\u8fd0\u884c\u60c5\u51b5\uff0c\u5305\u62ec\u96c6\u7fa4\u72b6\u6001\u3001\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u4ee5\u53ca\u63a7\u5236\u8282\u70b9\u7279\u6709\u7684\u5de1\u68c0\u9879\u7b49\uff0c\u4f8b\u5982 kube-apiserver \u548c etcd \u7684\u72b6\u6001\u3002
                                  • \u8282\u70b9\u7ef4\u5ea6\uff1a\u5305\u62ec\u63a7\u5236\u8282\u70b9\u548c\u5de5\u4f5c\u8282\u70b9\u901a\u7528\u7684\u68c0\u67e5\u9879\uff0c\u4f8b\u5982\u8282\u70b9\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u3001\u53e5\u67c4\u6570\u3001PID \u72b6\u6001\u3001\u7f51\u7edc\u72b6\u6001\u3002
                                  • \u5bb9\u5668\u7ec4\u7ef4\u5ea6\uff1a\u68c0\u67e5 Pod \u7684 CPU \u548c\u5185\u5b58\u4f7f\u7528\u60c5\u51b5\u3001\u8fd0\u884c\u72b6\u6001\u3001PV \u548c PVC \u7684\u72b6\u6001\u7b49\u3002

                                  \u5982\u9700\u4e86\u89e3\u6216\u6267\u884c\u5b89\u5168\u65b9\u9762\u7684\u5de1\u68c0\uff0c\u53ef\u53c2\u8003\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u7684\u5b89\u5168\u626b\u63cf\u7c7b\u578b\u3002

                                  "},{"location":"end-user/kpanda/inspect/config.html","title":"\u521b\u5efa\u5de1\u68c0\u914d\u7f6e","text":"

                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u63d0\u4f9b\u96c6\u7fa4\u5de1\u68c0\u529f\u80fd\uff0c\u652f\u6301\u4ece\u96c6\u7fa4\u7ef4\u5ea6\u3001\u8282\u70b9\u7ef4\u5ea6\u3001\u5bb9\u5668\u7ec4\u7ef4\u5ea6\u8fdb\u884c\u5de1\u68c0\u3002

                                  • \u96c6\u7fa4\u7ef4\u5ea6\uff1a\u68c0\u67e5\u96c6\u7fa4\u4e2d\u7cfb\u7edf\u7ec4\u4ef6\u7684\u8fd0\u884c\u60c5\u51b5\uff0c\u5305\u62ec\u96c6\u7fa4\u72b6\u6001\u3001\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\uff0c\u4ee5\u53ca\u63a7\u5236\u8282\u70b9\u7279\u6709\u7684\u5de1\u68c0\u9879\u7b49\uff0c\u4f8b\u5982 kube-apiserver \u548c etcd \u7684\u72b6\u6001\u3002
                                  • \u8282\u70b9\u7ef4\u5ea6\uff1a\u5305\u62ec\u63a7\u5236\u8282\u70b9\u548c\u5de5\u4f5c\u8282\u70b9\u901a\u7528\u7684\u68c0\u67e5\u9879\uff0c\u4f8b\u5982\u8282\u70b9\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u3001\u53e5\u67c4\u6570\u3001PID \u72b6\u6001\u3001\u7f51\u7edc\u72b6\u6001\u3002
                                  • \u5bb9\u5668\u7ec4\u7ef4\u5ea6\uff1a\u68c0\u67e5 Pod \u7684 CPU \u548c\u5185\u5b58\u4f7f\u7528\u60c5\u51b5\u3001\u8fd0\u884c\u72b6\u6001\u3001PV \u548c PVC \u7684\u72b6\u6001\u7b49\u3002

                                  \u4e0b\u9762\u4ecb\u7ecd\u5982\u4f55\u521b\u5efa\u5de1\u68c0\u914d\u7f6e\u3002

                                  "},{"location":"end-user/kpanda/inspect/config.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4
                                  • \u6240\u9009\u96c6\u7fa4\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u4e14\u5df2\u7ecf\u5728\u96c6\u7fa4\u4e2d\u5b89\u88c5\u4e86 insight \u7ec4\u4ef6
                                  "},{"location":"end-user/kpanda/inspect/config.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                  1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u96c6\u7fa4\u5de1\u68c0 \u3002

                                  2. \u5728\u9875\u9762\u53f3\u4fa7\u70b9\u51fb \u5de1\u68c0\u914d\u7f6e \u3002

                                  3. \u53c2\u8003\u4ee5\u4e0b\u8bf4\u660e\u586b\u5199\u5de1\u68c0\u914d\u7f6e\uff0c\u7136\u540e\u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                                    • \u96c6\u7fa4\uff1a\u4e0b\u62c9\u9009\u62e9\u8981\u5bf9\u54ea\u4e9b\u96c6\u7fa4\u8fdb\u884c\u5de1\u68c0\u3002\u5982\u679c\u9009\u62e9\u591a\u4e2a\u96c6\u7fa4\uff0c\u5219\u81ea\u52a8\u751f\u6210\u591a\u4e2a\u5de1\u68c0\u914d\u7f6e\uff08\u4ec5\u5de1\u68c0\u7684\u96c6\u7fa4\u4e0d\u4e00\u81f4\uff0c\u5176\u4ed6\u914d\u7f6e\u90fd\u5b8c\u5168\u4e00\u81f4\uff09
                                    • \u5b9a\u65f6\u5de1\u68c0\uff1a\u542f\u7528\u540e\u53ef\u6839\u636e\u4e8b\u5148\u8bbe\u7f6e\u7684\u5de1\u68c0\u9891\u7387\u5b9a\u671f\u81ea\u52a8\u6267\u884c\u96c6\u7fa4\u5de1\u68c0
                                    • \u5de1\u68c0\u9891\u7387\uff1a\u8bbe\u7f6e\u81ea\u52a8\u5de1\u68c0\u7684\u5468\u671f\uff0c\u4f8b\u5982\u6bcf\u5468\u4e8c\u4e0a\u5348\u5341\u70b9\u3002\u652f\u6301\u81ea\u5b9a\u4e49 CronExpression\uff0c\u53ef\u53c2\u8003 Cron \u65f6\u95f4\u8868\u8bed\u6cd5
                                    • \u5de1\u68c0\u8bb0\u5f55\u4fdd\u7559\u6761\u6570\uff1a\u7d2f\u8ba1\u6700\u591a\u4fdd\u7559\u591a\u5c11\u6761\u5de1\u68c0\u8bb0\u5f55\uff0c\u5305\u62ec\u6240\u6709\u96c6\u7fa4\u7684\u5de1\u68c0\u8bb0\u5f55
                                    • \u53c2\u6570\u914d\u7f6e\uff1a\u53c2\u6570\u914d\u7f6e\u5206\u4e3a\u96c6\u7fa4\u7ef4\u5ea6\u3001\u8282\u70b9\u7ef4\u5ea6\u3001\u5bb9\u5668\u7ec4\u7ef4\u5ea6\u4e09\u90e8\u5206\uff0c\u53ef\u4ee5\u6839\u636e\u573a\u666f\u9700\u6c42\u542f\u7528\u6216\u7981\u7528\u67d0\u4e9b\u5de1\u68c0\u9879\u3002

                                  \u5de1\u68c0\u914d\u7f6e\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u4f1a\u81ea\u52a8\u663e\u793a\u5728\u5de1\u68c0\u914d\u7f6e\u5217\u8868\u4e2d\u3002\u5728\u914d\u7f6e\u53f3\u4fa7\u70b9\u51fb\u66f4\u591a\u64cd\u4f5c\u6309\u94ae\u53ef\u4ee5\u7acb\u5373\u6267\u884c\u5de1\u68c0\u3001\u4fee\u6539\u5de1\u68c0\u914d\u7f6e\u3001\u5220\u9664\u5de1\u68c0\u914d\u7f6e\u548c\u5de1\u68c0\u8bb0\u5f55\u3002

                                  • \u70b9\u51fb \u5de1\u68c0 \u53ef\u4ee5\u6839\u636e\u8be5\u914d\u7f6e\u7acb\u5373\u6267\u884c\u4e00\u6b21\u5de1\u68c0\u3002
                                  • \u70b9\u51fb \u5de1\u68c0\u914d\u7f6e \u53ef\u4ee5\u4fee\u6539\u5de1\u68c0\u914d\u7f6e\u3002
                                  • \u70b9\u51fb \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u5de1\u68c0\u914d\u7f6e\u548c\u5386\u53f2\u7684\u5de1\u68c0\u8bb0\u5f55

                                  Note

                                  • \u5de1\u68c0\u914d\u7f6e\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u5982\u679c\u542f\u7528\u4e86 \u5b9a\u65f6\u5de1\u68c0 \u914d\u7f6e\uff0c\u5219\u4f1a\u5728\u6307\u5b9a\u65f6\u95f4\u81ea\u52a8\u6267\u884c\u5de1\u68c0\u3002
                                  • \u5982\u672a\u542f\u7528 \u5b9a\u65f6\u5de1\u68c0 \u914d\u7f6e\uff0c\u5219\u9700\u8981\u624b\u52a8\u89e6\u53d1\u5de1\u68c0\u3002
                                  "},{"location":"end-user/kpanda/inspect/inspect.html","title":"\u6267\u884c\u96c6\u7fa4\u5de1\u68c0","text":"

                                  \u5de1\u68c0\u914d\u7f6e\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u5982\u679c\u542f\u7528\u4e86 \u5b9a\u65f6\u5de1\u68c0 \u914d\u7f6e\uff0c\u5219\u4f1a\u5728\u6307\u5b9a\u65f6\u95f4\u81ea\u52a8\u6267\u884c\u5de1\u68c0\u3002\u5982\u672a\u542f\u7528 \u5b9a\u65f6\u5de1\u68c0 \u914d\u7f6e\uff0c\u5219\u9700\u8981\u624b\u52a8\u89e6\u53d1\u5de1\u68c0\u3002

                                  \u6b64\u9875\u4ecb\u7ecd\u5982\u4f55\u624b\u52a8\u6267\u884c\u96c6\u7fa4\u5de1\u68c0\u3002

                                  "},{"location":"end-user/kpanda/inspect/inspect.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4
                                  • \u5df2\u521b\u5efa\u5de1\u68c0\u914d\u7f6e
                                  • \u6240\u9009\u96c6\u7fa4\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u4e14\u5df2\u7ecf\u5728\u96c6\u7fa4\u4e2d\u5b89\u88c5\u4e86 insight \u7ec4\u4ef6
                                  "},{"location":"end-user/kpanda/inspect/inspect.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                  \u6267\u884c\u5de1\u68c0\u65f6\uff0c\u652f\u6301\u52fe\u9009\u591a\u4e2a\u96c6\u7fa4\u8fdb\u884c\u6279\u91cf\u5de1\u68c0\uff0c\u6216\u8005\u4ec5\u5bf9\u67d0\u4e00\u4e2a\u96c6\u7fa4\u8fdb\u884c\u5355\u72ec\u5de1\u68c0\u3002

                                  \u6279\u91cf\u5de1\u68c0\u5355\u72ec\u5de1\u68c0
                                  1. \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684\u4e00\u7ea7\u5bfc\u822a\u680f\u70b9\u51fb \u96c6\u7fa4\u5de1\u68c0 \uff0c\u7136\u540e\u5728\u9875\u9762\u53f3\u4fa7\u70b9\u51fb \u5de1\u68c0 \u3002

                                  2. \u52fe\u9009\u9700\u8981\u5de1\u68c0\u7684\u96c6\u7fa4\uff0c\u7136\u540e\u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                                    • \u82e5\u9009\u62e9\u591a\u4e2a\u96c6\u7fa4\u8fdb\u884c\u540c\u65f6\u5de1\u68c0\uff0c\u7cfb\u7edf\u5c06\u6839\u636e\u4e0d\u540c\u96c6\u7fa4\u7684\u5de1\u68c0\u914d\u7f6e\u8fdb\u884c\u5de1\u68c0\u3002
                                    • \u5982\u672a\u8bbe\u7f6e\u96c6\u7fa4\u5de1\u68c0\u914d\u7f6e\uff0c\u5c06\u4f7f\u7528\u7cfb\u7edf\u9ed8\u8ba4\u914d\u7f6e\u3002

                                  1. \u8fdb\u5165\u96c6\u7fa4\u5de1\u68c0\u9875\u9762\u3002
                                  2. \u5728\u5bf9\u5e94\u5de1\u68c0\u914d\u7f6e\u7684\u53f3\u4fa7\u70b9\u51fb \u2507 \u66f4\u591a\u64cd\u4f5c\u6309\u94ae\uff0c\u7136\u540e\u5728\u5f39\u51fa\u7684\u83dc\u5355\u4e2d\u9009\u62e9 \u5de1\u68c0 \u5373\u53ef\u3002

                                  "},{"location":"end-user/kpanda/inspect/report.html","title":"\u67e5\u770b\u5de1\u68c0\u62a5\u544a","text":"

                                  \u5de1\u68c0\u6267\u884c\u5b8c\u6210\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u5de1\u68c0\u8bb0\u5f55\u548c\u8be6\u7ec6\u7684\u5de1\u68c0\u62a5\u544a\u3002

                                  "},{"location":"end-user/kpanda/inspect/report.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  • \u5df2\u7ecf\u521b\u5efa\u4e86\u5de1\u68c0\u914d\u7f6e
                                  • \u5df2\u7ecf\u6267\u884c\u8fc7\u81f3\u5c11\u4e00\u6b21\u5de1\u68c0
                                  "},{"location":"end-user/kpanda/inspect/report.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                  1. \u8fdb\u5165\u96c6\u7fa4\u5de1\u68c0\u9875\u9762\uff0c\u70b9\u51fb\u76ee\u6807\u5de1\u68c0\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                  2. \u70b9\u51fb\u60f3\u8981\u67e5\u770b\u7684\u5de1\u68c0\u8bb0\u5f55\u540d\u79f0\u3002

                                    • \u6bcf\u6267\u884c\u4e00\u6b21\u5de1\u68c0\uff0c\u5c31\u4f1a\u751f\u6210\u4e00\u6761\u5de1\u68c0\u8bb0\u5f55\u3002
                                    • \u5f53\u5de1\u68c0\u8bb0\u5f55\u8d85\u8fc7\u5de1\u68c0\u914d\u7f6e\u4e2d\u8bbe\u7f6e\u7684\u6700\u5927\u4fdd\u7559\u6761\u6570\u65f6\uff0c\u4ece\u6267\u884c\u65f6\u95f4\u6700\u65e9\u7684\u8bb0\u5f55\u5f00\u59cb\u5220\u9664\u3002

                                  3. \u67e5\u770b\u5de1\u68c0\u7684\u8be6\u7ec6\u4fe1\u606f\uff0c\u6839\u636e\u5de1\u68c0\u914d\u7f6e\u53ef\u80fd\u5305\u62ec\u96c6\u7fa4\u8d44\u6e90\u6982\u89c8\u3001\u7cfb\u7edf\u7ec4\u4ef6\u7684\u8fd0\u884c\u60c5\u51b5\u7b49\u3002

                                    \u5728\u9875\u9762\u53f3\u4e0a\u89d2\u53ef\u4ee5\u4e0b\u8f7d\u5de1\u68c0\u62a5\u544a\u6216\u5220\u9664\u8be5\u9879\u5de1\u68c0\u62a5\u544a\u3002

                                  "},{"location":"end-user/kpanda/namespaces/createns.html","title":"\u547d\u540d\u7a7a\u95f4","text":"

                                  \u547d\u540d\u7a7a\u95f4\u662f Kubernetes \u4e2d\u7528\u6765\u8fdb\u884c\u8d44\u6e90\u9694\u79bb\u7684\u4e00\u79cd\u62bd\u8c61\u3002\u4e00\u4e2a\u96c6\u7fa4\u4e0b\u53ef\u4ee5\u5305\u542b\u591a\u4e2a\u4e0d\u91cd\u540d\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u6bcf\u4e2a\u547d\u540d\u7a7a\u95f4\u4e2d\u7684\u8d44\u6e90\u76f8\u4e92\u9694\u79bb\u3002\u6709\u5173\u547d\u540d\u7a7a\u95f4\u7684\u8be6\u7ec6\u4ecb\u7ecd\uff0c\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u3002

                                  \u672c\u6587\u5c06\u4ecb\u7ecd\u547d\u540d\u7a7a\u95f4\u7684\u76f8\u5173\u64cd\u4f5c\u3002

                                  "},{"location":"end-user/kpanda/namespaces/createns.html#_2","title":"\u521b\u5efa\u547d\u540d\u7a7a\u95f4","text":"

                                  \u652f\u6301\u901a\u8fc7\u8868\u5355\u8f7b\u677e\u521b\u5efa\u547d\u540d\u7a7a\u95f4\uff0c\u4e5f\u652f\u6301\u901a\u8fc7\u7f16\u5199\u6216\u5bfc\u5165 YAML \u6587\u4ef6\u5feb\u901f\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u3002

                                  Note

                                  • \u5728\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u4e4b\u524d\uff0c\u9700\u8981\u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\u3002
                                  • \u96c6\u7fa4\u521d\u59cb\u5316\u540e\u901a\u5e38\u4f1a\u81ea\u52a8\u751f\u6210\u9ed8\u8ba4\u7684\u547d\u540d\u7a7a\u95f4 default \u3002\u4f46\u5bf9\u4e8e\u751f\u4ea7\u96c6\u7fa4\u800c\u8a00\uff0c\u4e3a\u4fbf\u4e8e\u7ba1\u7406\uff0c\u5efa\u8bae\u521b\u5efa\u5176\u4ed6\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u800c\u975e\u76f4\u63a5\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002
                                  "},{"location":"end-user/kpanda/namespaces/createns.html#_3","title":"\u8868\u5355\u521b\u5efa","text":"
                                  1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u547d\u540d\u7a7a\u95f4 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae\u3002

                                  3. \u586b\u5199\u547d\u540d\u7a7a\u95f4\u7684\u540d\u79f0\uff0c\u914d\u7f6e\u5de5\u4f5c\u7a7a\u95f4\u548c\u6807\u7b7e\uff08\u53ef\u9009\u8bbe\u7f6e\uff09\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                    Info

                                    • \u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4\u4e4b\u540e\uff0c\u8be5\u547d\u540d\u7a7a\u95f4\u7684\u8d44\u6e90\u5c31\u4f1a\u5171\u4eab\u7ed9\u6240\u7ed1\u5b9a\u7684\u5de5\u4f5c\u7a7a\u95f4\u3002\u6709\u5173\u5de5\u4f5c\u7a7a\u95f4\u7684\u8be6\u7ec6\u8bf4\u660e\uff0c\u53ef\u53c2\u8003\u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7\u3002

                                    • \u547d\u540d\u7a7a\u95f4\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u4ecd\u7136\u53ef\u4ee5\u7ed1\u5b9a/\u89e3\u7ed1\u5de5\u4f5c\u7a7a\u95f4\u3002

                                  4. \u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3002\u5728\u547d\u540d\u7a7a\u95f4\u5217\u8868\u53f3\u4fa7\uff0c\u70b9\u51fb \u2507 \uff0c\u53ef\u4ee5\u4ece\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9\u67e5\u770b YAML\u3001\u4fee\u6539\u6807\u7b7e\u3001\u7ed1\u5b9a/\u89e3\u7ed1\u5de5\u4f5c\u7a7a\u95f4\u3001\u914d\u989d\u7ba1\u7406\u3001\u5220\u9664\u7b49\u66f4\u591a\u64cd\u4f5c\u3002

                                  "},{"location":"end-user/kpanda/namespaces/createns.html#yaml","title":"YAML \u521b\u5efa","text":"
                                  1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u547d\u540d\u7a7a\u95f4 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4fa7\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                                  3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u5185\u5bb9\uff0c\u6216\u8005\u4ece\u672c\u5730\u76f4\u63a5\u5bfc\u5165\u5df2\u6709\u7684 YAML \u6587\u4ef6\u3002

                                    \u8f93\u5165 YAML \u5185\u5bb9\u540e\uff0c\u70b9\u51fb \u4e0b\u8f7d \u53ef\u4ee5\u5c06\u8be5 YAML \u6587\u4ef6\u4fdd\u5b58\u5230\u672c\u5730\u3002

                                  4. \u6700\u540e\u5728\u5f39\u6846\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                                  "},{"location":"end-user/kpanda/namespaces/exclusive.html","title":"\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9","text":"

                                  \u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\u6307\u5728 kubernetes \u96c6\u7fa4\u4e2d\uff0c\u901a\u8fc7\u6c61\u70b9\u548c\u6c61\u70b9\u5bb9\u5fcd\u7684\u65b9\u5f0f\u5b9e\u73b0\u7279\u5b9a\u547d\u540d\u7a7a\u95f4\u5bf9\u4e00\u4e2a\u6216\u591a\u4e2a\u8282\u70b9 CPU\u3001\u5185\u5b58\u7b49\u8d44\u6e90\u7684\u72ec\u4eab\u3002\u4e3a\u7279\u5b9a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\u72ec\u4eab\u8282\u70b9\u540e\uff0c\u5176\u5b83\u975e\u6b64\u547d\u540d\u7a7a\u95f4\u7684\u5e94\u7528\u548c\u670d\u52a1\u5747\u4e0d\u80fd\u8fd0\u884c\u5728\u88ab\u72ec\u4eab\u7684\u8282\u70b9\u4e0a\u3002\u4f7f\u7528\u72ec\u4eab\u8282\u70b9\u53ef\u4ee5\u8ba9\u91cd\u8981\u5e94\u7528\u72ec\u4eab\u4e00\u90e8\u5206\u8ba1\u7b97\u8d44\u6e90\uff0c\u4ece\u800c\u548c\u5176\u4ed6\u5e94\u7528\u5b9e\u73b0\u7269\u7406\u9694\u79bb\u3002

                                  Note

                                  \u5728\u8282\u70b9\u88ab\u8bbe\u7f6e\u4e3a\u72ec\u4eab\u8282\u70b9\u524d\u5df2\u7ecf\u8fd0\u884c\u5728\u6b64\u8282\u70b9\u4e0a\u7684\u5e94\u7528\u548c\u670d\u52a1\u5c06\u4e0d\u4f1a\u53d7\u5f71\u54cd\uff0c\u4f9d\u7136\u4f1a\u6b63\u5e38\u8fd0\u884c\u5728\u8be5\u8282\u70b9\u4e0a\uff0c\u4ec5\u5f53\u8fd9\u4e9b Pod \u88ab\u5220\u9664\u6216\u91cd\u5efa\u65f6\uff0c\u624d\u4f1a\u8c03\u5ea6\u5230\u5176\u5b83\u975e\u72ec\u4eab\u8282\u70b9\u4e0a\u3002

                                  "},{"location":"end-user/kpanda/namespaces/exclusive.html#_2","title":"\u51c6\u5907\u5de5\u4f5c","text":"

                                  \u68c0\u67e5\u5f53\u524d\u96c6\u7fa4\u7684 kube-apiserver \u662f\u5426\u542f\u7528\u4e86 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668\u3002

                                  \u4f7f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\u529f\u80fd\u9700\u8981\u7528\u6237\u542f\u7528 kube-apiserver \u4e0a\u7684 PodNodeSelector \u548c PodTolerationRestriction \u4e24\u4e2a\u7279\u6027\u51c6\u5165\u63a7\u5236\u5668\uff08Admission Controllers\uff09\uff0c\u5173\u4e8e\u51c6\u5165\u63a7\u5236\u5668\u66f4\u591a\u8bf4\u660e\u8bf7\u53c2\u9605 kubernetes Admission Controllers Reference\u3002

                                  \u60a8\u53ef\u4ee5\u524d\u5f80\u5f53\u524d\u96c6\u7fa4\u4e0b\u4efb\u610f\u4e00\u4e2a Master \u8282\u70b9\u4e0a\u68c0\u67e5 kube-apiserver.yaml \u6587\u4ef6\u5185\u662f\u5426\u542f\u7528\u4e86\u8fd9\u4e24\u4e2a\u7279\u6027\uff0c\u4e5f\u53ef\u4ee5\u5728 Master \u8282\u70b9\u4e0a\u6267\u884c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u8fdb\u884c\u5feb\u901f\u68c0\u67e5\uff1a

                                  ```bash\n[root@g-master1 ~]# cat /etc/kubernetes/manifests/kube-apiserver.yaml | grep  enable-admission-plugins\n\n# \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a\n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction\n```\n
                                  "},{"location":"end-user/kpanda/namespaces/exclusive.html#_3","title":"\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9","text":"

                                  \u7531\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u8fd0\u884c\u7740 kpanda\u3001ghippo\u3001insight \u7b49\u5e73\u53f0\u57fa\u7840\u7ec4\u4ef6\uff0c\u5728 Global \u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\u5c06\u53ef\u80fd\u5bfc\u81f4\u5f53\u7cfb\u7edf\u7ec4\u4ef6\u91cd\u542f\u540e\uff0c\u7cfb\u7edf\u7ec4\u4ef6\u65e0\u6cd5\u8c03\u5ea6\u5230\u88ab\u72ec\u4eab\u7684\u8282\u70b9\u4e0a\uff0c\u5f71\u54cd\u7cfb\u7edf\u7684\u6574\u4f53\u9ad8\u53ef\u7528\u80fd\u529b\u3002\u56e0\u6b64\uff0c\u901a\u5e38\u60c5\u51b5\u4e0b\uff0c\u6211\u4eec\u4e0d\u63a8\u8350\u7528\u6237\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\u7279\u6027\u3002

                                  \u5982\u679c\u60a8\u786e\u5b9e\u9700\u8981\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\uff0c\u8bf7\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u8fdb\u884c\u5f00\u542f\uff1a

                                  1. \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684 kube-apiserver \u542f\u7528\u4e86 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668

                                    Note

                                    \u5982\u679c\u96c6\u7fa4\u5df2\u542f\u7528\u4e86\u4e0a\u8ff0\u7684\u4e24\u4e2a\u51c6\u5165\u63a7\u5236\u5668\uff0c\u8bf7\u8df3\u8fc7\u6b64\u6b65\uff0c\u76f4\u63a5\u524d\u5f80\u914d\u7f6e\u7cfb\u7edf\u7ec4\u4ef6\u5bb9\u5fcd\u3002

                                    \u524d\u5f80\u5f53\u524d\u96c6\u7fa4\u4e0b\u4efb\u610f\u4e00\u4e2a Master \u8282\u70b9\u4e0a\u4fee\u6539 kube-apiserver.yaml \u914d\u7f6e\u6587\u4ef6\uff0c\u4e5f\u53ef\u4ee5\u5728 Master \u8282\u70b9\u4e0a\u6267\u884c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\uff1a

                                    [root@g-master1 ~]# vi /etc/kubernetes/manifests/kube-apiserver.yaml\n\n# \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a\napiVersion: v1\nkind: Pod\nmetadata:\n    ......\nspec:\ncontainers:\n- command:\n    - kube-apiserver\n    ......\n    - --default-not-ready-toleration-seconds=300\n    - --default-unreachable-toleration-seconds=300\n    - --enable-admission-plugins=NodeRestriction   #\u542f\u7528\u7684\u51c6\u5165\u63a7\u5236\u5668\u5217\u8868\n    - --enable-aggregator-routing=False\n    - --enable-bootstrap-token-auth=true\n    - --endpoint-reconciler-type=lease\n    - --etcd-cafile=/etc/kubernetes/ssl/etcd/ca.crt\n    ......\n

                                    \u627e\u5230 --enable-admission-plugins \u53c2\u6570\uff0c\u52a0\u5165\uff08\u4ee5\u82f1\u6587\u9017\u53f7\u5206\u9694\u7684\uff09 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668\u3002\u53c2\u8003\u5982\u4e0b\uff1a

                                    # \u52a0\u5165 __ ,PodNodeSelector,PodTolerationRestriction__ \n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction \n
                                  2. \u4e3a\u5e73\u53f0\u7ec4\u4ef6\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u5bb9\u5fcd\u6ce8\u89e3

                                    \u5b8c\u6210\u51c6\u5165\u63a7\u5236\u5668\u7684\u5f00\u542f\u540e\uff0c\u60a8\u9700\u8981\u4e3a\u5e73\u53f0\u7ec4\u4ef6\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u5bb9\u5fcd\u6ce8\u89e3\uff0c\u4ee5\u4fdd\u8bc1\u5e73\u53f0\u7ec4\u4ef6\u7684\u9ad8\u53ef\u7528\u3002

                                    \u76ee\u524d\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u7cfb\u7edf\u7ec4\u4ef6\u547d\u540d\u7a7a\u95f4\u5982\u4e0b\u8868\uff1a

                                    \u547d\u540d\u7a7a\u95f4 \u6240\u5305\u542b\u7684\u7cfb\u7edf\u7ec4\u4ef6 kpanda-system kpanda hwameiStor-system hwameiStor istio-system istio metallb-system metallb cert-manager-system cert-manager contour-system contour kubean-system kubean ghippo-system ghippo kcoral-system kcoral kcollie-system kcollie insight-system insight\u3001insight-agent: ipavo-system ipavo kairship-system kairship karmada-system karmada amamba-system amamba\u3001jenkins skoala-system skoala mspider-system mspider mcamel-system mcamel-rabbitmq\u3001mcamel-elasticsearch\u3001mcamel-mysql\u3001mcamel-redis\u3001mcamel-kafka\u3001mcamel-minio\u3001mcamel-postgresql spidernet-system spidernet kangaroo-system kangaroo gmagpie-system gmagpie dowl-system dowl

                                    \u68c0\u67e5\u5f53\u524d\u96c6\u7fa4\u4e2d\u6240\u6709\u547d\u540d\u7a7a\u95f4\u662f\u5426\u5b58\u5728\u4e0a\u8ff0\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5206\u522b\u4e3a\u6bcf\u4e2a\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u6ce8\u89e3\uff1a scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]' \u3002

                                    kubectl annotate ns <namespace-name> scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \n\"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\n
                                    \u8bf7\u786e\u4fdd\u5c06 <namespace-name> \u66ff\u6362\u4e3a\u8981\u6dfb\u52a0\u6ce8\u89e3\u7684\u5e73\u53f0\u547d\u540d\u7a7a\u95f4\u540d\u79f0\u3002

                                  3. \u4f7f\u7528\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9

                                    \u5f53\u60a8\u786e\u8ba4\u96c6\u7fa4 API \u670d\u52a1\u5668\u4e0a\u7684 PodNodeSelector \u548c PodTolerationRestriction \u4e24\u4e2a\u7279\u6027\u51c6\u5165\u63a7\u5236\u5668\u5df2\u7ecf\u5f00\u542f\u540e\uff0c\u8bf7\u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684 UI \u7ba1\u7406\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9\u4e86\u3002

                                    1. \u5728\u96c6\u7fa4\u5217\u8868\u9875\u9762\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u547d\u540d\u7a7a\u95f4 \u3002

                                    2. \u70b9\u51fb\u547d\u540d\u7a7a\u95f4\u540d\u79f0\uff0c\u7136\u540e\u70b9\u51fb \u72ec\u4eab\u8282\u70b9 \u9875\u7b7e\uff0c\u5728\u4e0b\u65b9\u53f3\u4fa7\u70b9\u51fb \u6dfb\u52a0\u8282\u70b9 \u3002

                                    3. \u5728\u9875\u9762\u5de6\u4fa7\u9009\u62e9\u8ba9\u8be5\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u54ea\u4e9b\u8282\u70b9\uff0c\u5728\u53f3\u4fa7\u53ef\u4ee5\u6e05\u7a7a\u6216\u5220\u9664\u67d0\u4e2a\u5df2\u9009\u8282\u70b9\uff0c\u6700\u540e\u5728\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                    4. \u53ef\u4ee5\u5728\u5217\u8868\u4e2d\u67e5\u770b\u6b64\u547d\u540d\u7a7a\u95f4\u7684\u5df2\u6709\u7684\u72ec\u4eab\u8282\u70b9\uff0c\u5728\u8282\u70b9\u53f3\u4fa7\u53ef\u4ee5\u9009\u62e9 \u53d6\u6d88\u72ec\u4eab \u3002

                                      \u53d6\u6d88\u72ec\u4eab\u4e4b\u540e\uff0c\u5176\u4ed6\u547d\u540d\u7a7a\u95f4\u4e0b\u7684 Pod \u4e5f\u53ef\u4ee5\u88ab\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u4e0a\u3002

                                  "},{"location":"end-user/kpanda/namespaces/exclusive.html#_4","title":"\u5728 \u975e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9","text":"

                                  \u5728 \u975e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\uff0c\u8bf7\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u8fdb\u884c\u5f00\u542f\uff1a

                                  1. \u4e3a\u5f53\u524d\u96c6\u7fa4\u7684 kube-apiserver \u542f\u7528\u4e86 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668

                                    Note

                                    \u5982\u679c\u96c6\u7fa4\u5df2\u542f\u7528\u4e86\u4e0a\u8ff0\u7684\u4e24\u4e2a\u51c6\u5165\u63a7\u5236\u5668\uff0c\u8bf7\u8df3\u8fc7\u6b64\u6b65\uff0c\u76f4\u63a5\u524d\u5f80\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9

                                    \u524d\u5f80\u5f53\u524d\u96c6\u7fa4\u4e0b\u4efb\u610f\u4e00\u4e2a Master \u8282\u70b9\u4e0a\u4fee\u6539 kube-apiserver.yaml \u914d\u7f6e\u6587\u4ef6\uff0c\u4e5f\u53ef\u4ee5\u5728 Master \u8282\u70b9\u4e0a\u6267\u884c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\uff1a

                                    [root@g-master1 ~]# vi /etc/kubernetes/manifests/kube-apiserver.yaml\n\n# \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a\napiVersion: v1\nkind: Pod\nmetadata:\n    ......\nspec:\ncontainers:\n- command:\n    - kube-apiserver\n    ......\n    - --default-not-ready-toleration-seconds=300\n    - --default-unreachable-toleration-seconds=300\n    - --enable-admission-plugins=NodeRestriction   #\u542f\u7528\u7684\u51c6\u5165\u63a7\u5236\u5668\u5217\u8868\n    - --enable-aggregator-routing=False\n    - --enable-bootstrap-token-auth=true\n    - --endpoint-reconciler-type=lease\n    - --etcd-cafile=/etc/kubernetes/ssl/etcd/ca.crt\n    ......\n

                                    \u627e\u5230 --enable-admission-plugins \u53c2\u6570\uff0c\u52a0\u5165\uff08\u4ee5\u82f1\u6587\u9017\u53f7\u5206\u9694\u7684\uff09 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668\u3002\u53c2\u8003\u5982\u4e0b\uff1a

                                    # \u52a0\u5165 __ ,PodNodeSelector,PodTolerationRestriction__ \n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction \n
                                  2. \u4f7f\u7528\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9

                                    \u5f53\u60a8\u786e\u8ba4\u96c6\u7fa4 API \u670d\u52a1\u5668\u4e0a\u7684 PodNodeSelector \u548c PodTolerationRestriction \u4e24\u4e2a\u7279\u6027\u51c6\u5165\u63a7\u5236\u5668\u5df2\u7ecf\u5f00\u542f\u540e\uff0c\u8bf7\u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684 UI \u7ba1\u7406\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9\u4e86\u3002

                                    1. \u5728\u96c6\u7fa4\u5217\u8868\u9875\u9762\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u547d\u540d\u7a7a\u95f4 \u3002

                                    2. \u70b9\u51fb\u547d\u540d\u7a7a\u95f4\u540d\u79f0\uff0c\u7136\u540e\u70b9\u51fb \u72ec\u4eab\u8282\u70b9 \u9875\u7b7e\uff0c\u5728\u4e0b\u65b9\u53f3\u4fa7\u70b9\u51fb \u6dfb\u52a0\u8282\u70b9 \u3002

                                    3. \u5728\u9875\u9762\u5de6\u4fa7\u9009\u62e9\u8ba9\u8be5\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u54ea\u4e9b\u8282\u70b9\uff0c\u5728\u53f3\u4fa7\u53ef\u4ee5\u6e05\u7a7a\u6216\u5220\u9664\u67d0\u4e2a\u5df2\u9009\u8282\u70b9\uff0c\u6700\u540e\u5728\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                    4. \u53ef\u4ee5\u5728\u5217\u8868\u4e2d\u67e5\u770b\u6b64\u547d\u540d\u7a7a\u95f4\u7684\u5df2\u6709\u7684\u72ec\u4eab\u8282\u70b9\uff0c\u5728\u8282\u70b9\u53f3\u4fa7\u53ef\u4ee5\u9009\u62e9 \u53d6\u6d88\u72ec\u4eab \u3002

                                      \u53d6\u6d88\u72ec\u4eab\u4e4b\u540e\uff0c\u5176\u4ed6\u547d\u540d\u7a7a\u95f4\u4e0b\u7684 Pod \u4e5f\u53ef\u4ee5\u88ab\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u4e0a\u3002

                                  3. \u4e3a\u9700\u8981\u9ad8\u53ef\u7528\u7684\u7ec4\u4ef6\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u5bb9\u5fcd\u6ce8\u89e3\uff08\u53ef\u9009\uff09

                                    \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u9700\u8981\u9ad8\u53ef\u7528\u7684\u7ec4\u4ef6\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u6ce8\u89e3\uff1ascheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\u3002

                                    kubectl annotate ns <namespace-name> scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \n\"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\n

                                    \u8bf7\u786e\u4fdd\u5c06 <namespace-name> \u66ff\u6362\u4e3a\u8981\u6dfb\u52a0\u6ce8\u89e3\u7684\u5e73\u53f0\u547d\u540d\u7a7a\u95f4\u540d\u79f0\u3002

                                  "},{"location":"end-user/kpanda/namespaces/podsecurity.html","title":"\u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565","text":"

                                  \u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565\u6307\u5728 kubernetes \u96c6\u7fa4\u4e2d\uff0c\u901a\u8fc7\u4e3a\u6307\u5b9a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\u4e0d\u540c\u7684\u7b49\u7ea7\u548c\u6a21\u5f0f\uff0c\u5b9e\u73b0\u5728\u5b89\u5168\u7684\u5404\u4e2a\u65b9\u9762\u63a7\u5236 Pod \u7684\u884c\u4e3a\uff0c\u53ea\u6709\u6ee1\u8db3\u4e00\u5b9a\u7684\u6761\u4ef6\u7684 Pod \u624d\u4f1a\u88ab\u7cfb\u7edf\u63a5\u53d7\u3002\u5b83\u8bbe\u7f6e\u4e09\u4e2a\u7b49\u7ea7\u548c\u4e09\u79cd\u6a21\u5f0f\uff0c\u7528\u6237\u53ef\u4ee5\u6839\u636e\u81ea\u5df1\u7684\u9700\u6c42\u9009\u62e9\u66f4\u52a0\u5408\u9002\u7684\u65b9\u6848\u6765\u8bbe\u7f6e\u9650\u5236\u7b56\u7565\u3002

                                  Note

                                  \u4e00\u6761\u5b89\u5168\u6a21\u5f0f\u4ec5\u80fd\u914d\u7f6e\u4e00\u6761\u5b89\u5168\u7b56\u7565\u3002\u540c\u65f6\u8bf7\u8c28\u614e\u4e3a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e enforce \u7684\u5b89\u5168\u6a21\u5f0f\uff0c\u8fdd\u53cd\u540e\u5c06\u4f1a\u5bfc\u81f4 Pod \u65e0\u6cd5\u521b\u5efa\u3002

                                  \u672c\u8282\u5c06\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565\u3002

                                  "},{"location":"end-user/kpanda/namespaces/podsecurity.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u96c6\u7fa4\u7684\u7248\u672c\u9700\u8981\u5728 v1.22 \u4ee5\u4e0a\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                  • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Admin \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                  "},{"location":"end-user/kpanda/namespaces/podsecurity.html#_3","title":"\u4e3a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565","text":"
                                  1. \u9009\u62e9\u9700\u8981\u914d\u7f6e\u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u8fdb\u5165\u8be6\u60c5\u9875\u3002\u5728 \u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565 \u9875\u9762\u70b9\u51fb \u914d\u7f6e\u7b56\u7565 \uff0c\u8fdb\u5165\u914d\u7f6e\u9875\u3002

                                  2. \u5728\u914d\u7f6e\u9875\u70b9\u51fb \u6dfb\u52a0\u7b56\u7565 \uff0c\u5219\u4f1a\u51fa\u73b0\u4e00\u6761\u7b56\u7565\uff0c\u5305\u62ec\u5b89\u5168\u7ea7\u522b\u548c\u5b89\u5168\u6a21\u5f0f\uff0c\u4ee5\u4e0b\u662f\u5bf9\u5b89\u5168\u7ea7\u522b\u548c\u5b89\u5168\u7b56\u7565\u7684\u8be6\u7ec6\u4ecb\u7ecd\u3002

                                    \u5b89\u5168\u7ea7\u522b \u63cf\u8ff0 Privileged \u4e0d\u53d7\u9650\u5236\u7684\u7b56\u7565\uff0c\u63d0\u4f9b\u6700\u5927\u53ef\u80fd\u8303\u56f4\u7684\u6743\u9650\u8bb8\u53ef\u3002\u6b64\u7b56\u7565\u5141\u8bb8\u5df2\u77e5\u7684\u7279\u6743\u63d0\u5347\u3002 Baseline \u9650\u5236\u6027\u6700\u5f31\u7684\u7b56\u7565\uff0c\u7981\u6b62\u5df2\u77e5\u7684\u7b56\u7565\u63d0\u5347\u3002\u5141\u8bb8\u4f7f\u7528\u9ed8\u8ba4\u7684\uff08\u89c4\u5b9a\u6700\u5c11\uff09Pod \u914d\u7f6e\u3002 Restricted \u9650\u5236\u6027\u975e\u5e38\u5f3a\u7684\u7b56\u7565\uff0c\u9075\u5faa\u5f53\u524d\u7684\u4fdd\u62a4 Pod \u7684\u6700\u4f73\u5b9e\u8df5\u3002 \u5b89\u5168\u6a21\u5f0f \u63cf\u8ff0 Audit \u8fdd\u53cd\u6307\u5b9a\u7b56\u7565\u4f1a\u5728\u5ba1\u8ba1\u65e5\u5fd7\u4e2d\u6dfb\u52a0\u65b0\u7684\u5ba1\u8ba1\u4e8b\u4ef6\uff0cPod \u53ef\u4ee5\u88ab\u521b\u5efa\u3002 Warn \u8fdd\u53cd\u6307\u5b9a\u7b56\u7565\u4f1a\u8fd4\u56de\u7528\u6237\u53ef\u89c1\u7684\u544a\u8b66\u4fe1\u606f\uff0cPod \u53ef\u4ee5\u88ab\u521b\u5efa\u3002 Enforce \u8fdd\u53cd\u6307\u5b9a\u7b56\u7565\u4f1a\u5bfc\u81f4 Pod \u65e0\u6cd5\u521b\u5efa\u3002

                                  3. \u4e0d\u540c\u7684\u5b89\u5168\u7ea7\u522b\u5bf9\u5e94\u4e0d\u540c\u7684\u68c0\u67e5\u9879\uff0c\u82e5\u60a8\u4e0d\u77e5\u9053\u8be5\u5982\u4f55\u4e3a\u60a8\u7684\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\uff0c\u53ef\u4ee5\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u7b56\u7565\u914d\u7f6e\u9879\u8bf4\u660e \u67e5\u770b\u8be6\u7ec6\u4fe1\u606f\u3002

                                  4. \u70b9\u51fb\u786e\u5b9a\uff0c\u82e5\u521b\u5efa\u6210\u529f\uff0c\u5219\u9875\u9762\u4e0a\u5c06\u51fa\u73b0\u60a8\u914d\u7f6e\u7684\u5b89\u5168\u7b56\u7565\u3002

                                  5. \u70b9\u51fb \u2507 \u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u8005\u5220\u9664\u60a8\u914d\u7f6e\u7684\u5b89\u5168\u7b56\u7565\u3002

                                  "},{"location":"end-user/kpanda/network/create-ingress.html","title":"\u521b\u5efa\u8def\u7531\uff08Ingress\uff09","text":"

                                  \u5728 Kubernetes \u96c6\u7fa4\u4e2d\uff0cIngress \u516c\u5f00\u4ece\u96c6\u7fa4\u5916\u90e8\u5230\u96c6\u7fa4\u5185\u670d\u52a1\u7684 HTTP \u548c HTTPS \u8def\u7531\u3002 \u6d41\u91cf\u8def\u7531\u7531 Ingress \u8d44\u6e90\u4e0a\u5b9a\u4e49\u7684\u89c4\u5219\u63a7\u5236\u3002\u4e0b\u9762\u662f\u4e00\u4e2a\u5c06\u6240\u6709\u6d41\u91cf\u90fd\u53d1\u9001\u5230\u540c\u4e00 Service \u7684\u7b80\u5355 Ingress \u793a\u4f8b\uff1a

                                  Ingress \u662f\u5bf9\u96c6\u7fa4\u4e2d\u670d\u52a1\u7684\u5916\u90e8\u8bbf\u95ee\u8fdb\u884c\u7ba1\u7406\u7684 API \u5bf9\u8c61\uff0c\u5178\u578b\u7684\u8bbf\u95ee\u65b9\u5f0f\u662f HTTP\u3002Ingress \u53ef\u4ee5\u63d0\u4f9b\u8d1f\u8f7d\u5747\u8861\u3001SSL \u7ec8\u7ed3\u548c\u57fa\u4e8e\u540d\u79f0\u7684\u865a\u62df\u6258\u7ba1\u3002

                                  "},{"location":"end-user/kpanda/network/create-ingress.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                                  • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a NS Editor \u89d2\u8272 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002
                                  • \u5df2\u7ecf\u5b8c\u6210 Ingress \u5b9e\u4f8b\u7684\u521b\u5efa\uff0c\u5df2\u90e8\u7f72\u5e94\u7528\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5e76\u4e14\u5df2\u521b\u5efa\u5bf9\u5e94 Service
                                  • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002
                                  "},{"location":"end-user/kpanda/network/create-ingress.html#_2","title":"\u521b\u5efa\u8def\u7531","text":"
                                  1. \u4ee5 NS Editor \u7528\u6237\u6210\u529f\u767b\u5f55\u540e\uff0c\u70b9\u51fb\u5de6\u4e0a\u89d2\u7684 \u96c6\u7fa4\u5217\u8868 \u8fdb\u5165 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u3002\u5728\u96c6\u7fa4\u5217\u8868\u4e2d\uff0c\u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\u3002

                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u8def\u7531 \u8fdb\u5165\u670d\u52a1\u5217\u8868\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u521b\u5efa\u8def\u7531 \u6309\u94ae\u3002

                                    Note

                                    \u4e5f\u53ef\u4ee5\u901a\u8fc7 YAML \u521b\u5efa \u4e00\u4e2a\u8def\u7531\u3002

                                  3. \u6253\u5f00 \u521b\u5efa\u8def\u7531 \u9875\u9762\uff0c\u8fdb\u884c\u914d\u7f6e\u3002\u53ef\u9009\u62e9\u4e24\u79cd\u534f\u8bae\u7c7b\u578b\uff0c\u53c2\u8003\u4ee5\u4e0b\u4e24\u4e2a\u53c2\u6570\u8868\u8fdb\u884c\u914d\u7f6e\u3002

                                  "},{"location":"end-user/kpanda/network/create-ingress.html#http","title":"\u521b\u5efa HTTP \u534f\u8bae\u8def\u7531","text":"

                                  \u8f93\u5165\u5982\u4e0b\u53c2\u6570\uff1a

                                  • \u8def\u7531\u540d\u79f0 \uff1a\u5fc5\u586b\uff0c\u8f93\u5165\u65b0\u5efa\u8def\u7531\u7684\u540d\u79f0\u3002
                                  • \u547d\u540d\u7a7a\u95f4 \uff1a\u5fc5\u586b\uff0c\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002
                                  • \u8bbe\u7f6e\u8def\u7531\u89c4\u5219 \uff1a
                                    • \u57df\u540d \uff1a\u5fc5\u586b\uff0c\u4f7f\u7528\u57df\u540d\u5bf9\u5916\u63d0\u4f9b\u8bbf\u95ee\u670d\u52a1\u3002\u9ed8\u8ba4\u4e3a\u96c6\u7fa4\u7684\u57df\u540d\u3002
                                    • \u534f\u8bae \uff1a\u5fc5\u586b\uff0c\u6307\u6388\u6743\u5165\u7ad9\u5230\u8fbe\u96c6\u7fa4\u670d\u52a1\u7684\u534f\u8bae\uff0c\u652f\u6301 HTTP \uff08\u4e0d\u9700\u8981\u8eab\u4efd\u8ba4\u8bc1\uff09\u6216 HTTPS\uff08\u9700\u9700\u8981\u914d\u7f6e\u8eab\u4efd\u8ba4\u8bc1\uff09 \u534f\u8bae\u3002 \u8fd9\u91cc\u9009\u62e9 HTTP \u534f\u8bae\u7684\u8def\u7531\u3002
                                    • \u8f6c\u53d1\u7b56\u7565 \uff1a\u9009\u586b\uff0c\u6307\u5b9a Ingress \u7684\u8bbf\u95ee\u7b56\u7565
                                    • \u8def\u5f84 \uff1a\u6307\u5b9a\u670d\u52a1\u8bbf\u95ee\u7684URL\u8def\u5f84\uff0c\u9ed8\u8ba4\u4e3a\u6839\u8def\u5f84
                                    • \u76ee\u6807\u670d\u52a1 \uff1a\u8fdb\u884c\u8def\u7531\u7684\u670d\u52a1\u540d\u79f0
                                    • \u76ee\u6807\u670d\u52a1\u7aef\u53e3 \uff1a\u670d\u52a1\u5bf9\u5916\u66b4\u9732\u7684\u7aef\u53e3
                                  • \u8d1f\u8f7d\u5747\u8861\u5668\u7c7b\u578b \uff1a\u5fc5\u586b\uff0cIngress \u5b9e\u4f8b\u7684\u4f7f\u7528\u8303\u56f4
                                    • \u5e73\u53f0\u7ea7\u8d1f\u8f7d\u5747\u8861\u5668 \uff1a\u540c\u4e00\u4e2a\u96c6\u7fa4\u5185\uff0c\u5171\u4eab\u540c\u4e00\u4e2a Ingress \u5b9e\u4f8b\uff0c\u5176\u4e2d Pod \u90fd\u53ef\u4ee5\u63a5\u6536\u5230\u7531\u8be5\u8d1f\u8f7d\u5747\u8861\u5206\u53d1\u7684\u8bf7\u6c42
                                    • \u79df\u6237\u7ea7\u8d1f\u8f7d\u5747\u8861\u5668 \uff1a\u79df\u6237\u8d1f\u8f7d\u5747\u8861\u5668\uff0cIngress \u5b9e\u4f8b\u72ec\u5c5e\u4e8e\u5f53\u524d\u547d\u540d\u7a7a\uff0c\u6216\u8005\u72ec\u5c5e\u4e8e\u67d0\u4e00\u5de5\u4f5c\u7a7a\u95f4\uff0c \u5e76\u4e14\u8bbe\u7f6e\u7684\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u5305\u542b\u5f53\u524d\u547d\u540d\u7a7a\u95f4\uff0c\u5176\u4e2d Pod \u90fd\u53ef\u4ee5\u63a5\u6536\u5230\u7531\u8be5\u8d1f\u8f7d\u5747\u8861\u5206\u53d1\u7684\u8bf7\u6c42
                                  • Ingress Class \uff1a\u9009\u586b\uff0c\u9009\u62e9\u5bf9\u5e94\u7684 Ingress \u5b9e\u4f8b\uff0c\u9009\u62e9\u540e\u5c06\u6d41\u91cf\u5bfc\u5165\u5230\u6307\u5b9a\u7684 Ingress \u5b9e\u4f8b\u3002
                                    • \u4e3a None \u65f6\u4f7f\u7528\u9ed8\u8ba4\u7684 DefaultClass\uff0c\u8bf7\u5728\u521b\u5efa Ingress \u5b9e\u4f8b\u65f6\u8bbe\u7f6e DefaultClass\uff0c \u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003 Ingress Class
                                    • \u82e5\u9009\u62e9\u5176\u4ed6\u5b9e\u4f8b\uff08\u5982 ngnix \uff09\uff0c\u5219\u4f1a\u51fa\u73b0\u9ad8\u7ea7\u914d\u7f6e\uff0c\u53ef\u8bbe\u7f6e \u4f1a\u8bdd\u4fdd\u6301 \u3001 \u8def\u5f84\u91cd\u5199 \u3001 \u91cd\u5b9a\u5411 \u548c \u6d41\u91cf\u5206\u53d1 \u3002
                                  • \u4f1a\u8bdd\u4fdd\u6301 \uff1a\u9009\u586b\uff0c\u4f1a\u8bdd\u4fdd\u6301\u5206\u4e3a \u4e09\u79cd\u7c7b\u578b\uff1a L4 \u6e90\u5730\u5740\u54c8\u5e0c \u3001 Cookie Key \u3001 L7 Header Name \uff0c\u5f00\u542f\u540e\u6839\u636e\u5bf9\u5e94\u89c4\u5219\u8fdb\u884c\u4f1a\u8bdd\u4fdd\u6301\u3002
                                    • L4 \u6e90\u5730\u5740\u54c8\u5e0c \uff1a\u5f00\u542f\u540e\u9ed8\u8ba4\u5728 Annotation \u4e2d\u52a0\u5165\u5982\u4e0b\u6807\u7b7e\uff1a nginx.ingress.kubernetes.io/upstream-hash-by: \"$binary_remote_addr\"
                                    • Cookie Key \uff1a\u5f00\u542f\u540e\u6765\u81ea\u7279\u5b9a\u5ba2\u6237\u7aef\u7684\u8fde\u63a5\u5c06\u4f20\u9012\u81f3\u76f8\u540c Pod\uff0c\u5f00\u542f\u540e \u9ed8\u8ba4\u5728 Annotation \u4e2d\u589e\u52a0\u5982\u4e0b\u53c2\u6570\uff1a nginx.ingress.kubernetes.io/affinity: \"cookie\"\u3002nginx.ingress.kubernetes.io/affinity-mode: persistent
                                    • L7 Header Name \uff1a\u5f00\u542f\u540e\u9ed8\u8ba4\u5728 Annotation \u4e2d\u52a0\u5165\u5982\u4e0b\u6807\u7b7e\uff1a nginx.ingress.kubernetes.io/upstream-hash-by: \"$http_x_forwarded_for\"
                                  • \u8def\u5f84\u91cd\u5199 \uff1a\u9009\u586b\uff0c rewrite-target \uff0c\u67d0\u4e9b\u573a\u666f\u4e2d\u540e\u7aef\u670d\u52a1\u66b4\u9732\u7684URL\u4e0eIngress\u89c4\u5219\u4e2d\u6307\u5b9a\u7684\u8def\u5f84\u4e0d\u540c\uff0c\u5982\u679c\u4e0d\u8fdb\u884cURL\u91cd\u5199\u914d\u7f6e\uff0c\u8bbf\u95ee\u4f1a\u51fa\u73b0\u9519\u8bef\u3002
                                  • \u91cd\u5b9a\u5411 \uff1a\u9009\u586b\uff0c permanent-redirect \uff0c\u6c38\u4e45\u91cd\u5b9a\u5411\uff0c\u8f93\u5165\u91cd\u5199\u8def\u5f84\u540e\uff0c\u8bbf\u95ee\u8def\u5f84\u91cd\u5b9a\u5411\u81f3\u8bbe\u7f6e\u7684\u5730\u5740\u3002
                                  • \u6d41\u91cf\u5206\u53d1 \uff1a\u9009\u586b\uff0c\u5f00\u542f\u540e\u5e76\u8bbe\u7f6e\u540e\uff0c\u6839\u636e\u8bbe\u5b9a\u6761\u4ef6\u8fdb\u884c\u6d41\u91cf\u5206\u53d1\u3002
                                    • \u57fa\u4e8e\u6743\u91cd \uff1a\u8bbe\u5b9a\u6743\u91cd\u540e\uff0c\u5728\u521b\u5efa\u7684 Ingress \u6dfb\u52a0\u5982\u4e0b Annotation\uff1a nginx.ingress.kubernetes.io/canary-weight: \"10\"
                                    • \u57fa\u4e8e Cookie \uff1a\u8bbe\u5b9a Cookie \u89c4\u5219\u540e\uff0c\u6d41\u91cf\u6839\u636e\u8bbe\u5b9a\u7684 Cookie \u6761\u4ef6\u8fdb\u884c\u6d41\u91cf\u5206\u53d1
                                    • \u57fa\u4e8e Header \uff1a \u8bbe\u5b9a Header \u89c4\u5219\u540e\uff0c\u6d41\u91cf\u6839\u636e\u8bbe\u5b9a\u7684 Header \u6761\u4ef6\u8fdb\u884c\u6d41\u91cf\u5206\u53d1
                                  • \u6807\u7b7e \uff1a\u9009\u586b\uff0c\u4e3a\u8def\u7531\u6dfb\u52a0\u6807\u7b7e
                                  • \u6ce8\u89e3 \uff1a\u9009\u586b\uff0c\u4e3a\u8def\u7531\u6dfb\u52a0\u6ce8\u89e3
                                  "},{"location":"end-user/kpanda/network/create-ingress.html#https","title":"\u521b\u5efa HTTPS \u534f\u8bae\u8def\u7531","text":"

                                  \u8f93\u5165\u5982\u4e0b\u53c2\u6570\uff1a

                                  Note

                                  \u6ce8\u610f\uff1a\u4e0e HTTP \u534f\u8bae \u8bbe\u7f6e\u8def\u7531\u89c4\u5219 \u4e0d\u540c\uff0c\u589e\u52a0\u5bc6\u94a5\u9009\u62e9\u8bc1\u4e66\uff0c\u5176\u4ed6\u57fa\u672c\u4e00\u81f4\u3002

                                  • \u534f\u8bae \uff1a\u5fc5\u586b\u6307\u6388\u6743\u5165\u7ad9\u5230\u8fbe\u96c6\u7fa4\u670d\u52a1\u7684\u534f\u8bae\uff0c\u652f\u6301 HTTP \uff08\u4e0d\u9700\u8981\u8eab\u4efd\u8ba4\u8bc1\uff09\u6216 HTTPS\uff08\u9700\u9700\u8981\u914d\u7f6e\u8eab\u4efd\u8ba4\u8bc1\uff09 \u534f\u8bae\u3002\u8fd9\u91cc\u9009\u62e9 HTTPS \u534f\u8bae\u7684\u8def\u7531\u3002
                                  • \u5bc6\u94a5 \uff1a\u5fc5\u586b\uff0cHttps TLS \u8bc1\u4e66\uff0c\u521b\u5efa\u79d8\u94a5\u3002
                                  "},{"location":"end-user/kpanda/network/create-ingress.html#_3","title":"\u5b8c\u6210\u8def\u7531\u521b\u5efa","text":"

                                  \u914d\u7f6e\u5b8c\u6240\u6709\u53c2\u6570\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u81ea\u52a8\u8fd4\u56de\u8def\u7531\u5217\u8868\u3002\u5728\u5217\u8868\u53f3\u4fa7\uff0c\u70b9\u51fb \u2507 \uff0c\u53ef\u4ee5\u4fee\u6539\u6216\u5220\u9664\u6240\u9009\u8def\u7531\u3002

                                  "},{"location":"end-user/kpanda/network/create-services.html","title":"\u521b\u5efa\u670d\u52a1\uff08Service\uff09","text":"

                                  \u5728 Kubernetes \u96c6\u7fa4\u4e2d\uff0c\u6bcf\u4e2a Pod \u90fd\u6709\u4e00\u4e2a\u5185\u90e8\u72ec\u7acb\u7684 IP \u5730\u5740\uff0c\u4f46\u662f\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u7684 Pod \u53ef\u80fd\u4f1a\u88ab\u968f\u65f6\u521b\u5efa\u548c\u5220\u9664\uff0c\u76f4\u63a5\u4f7f\u7528 Pod IP \u5730\u5740\u5e76\u4e0d\u80fd\u5bf9\u5916\u63d0\u4f9b\u670d\u52a1\u3002

                                  \u8fd9\u5c31\u9700\u8981\u521b\u5efa\u670d\u52a1\uff0c\u901a\u8fc7\u670d\u52a1\u60a8\u4f1a\u83b7\u5f97\u4e00\u4e2a\u56fa\u5b9a\u7684 IP \u5730\u5740\uff0c\u4ece\u800c\u5b9e\u73b0\u5de5\u4f5c\u8d1f\u8f7d\u524d\u7aef\u548c\u540e\u7aef\u7684\u89e3\u8026\uff0c\u8ba9\u5916\u90e8\u7528\u6237\u80fd\u591f\u8bbf\u95ee\u670d\u52a1\u3002\u540c\u65f6\uff0c\u670d\u52a1\u8fd8\u63d0\u4f9b\u4e86\u8d1f\u8f7d\u5747\u8861\uff08LoadBalancer\uff09\u529f\u80fd\uff0c\u4f7f\u7528\u6237\u80fd\u4ece\u516c\u7f51\u8bbf\u95ee\u5230\u5de5\u4f5c\u8d1f\u8f7d\u3002

                                  "},{"location":"end-user/kpanda/network/create-services.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                  • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a NS Editor \u89d2\u8272 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                  • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                                  "},{"location":"end-user/kpanda/network/create-services.html#_2","title":"\u521b\u5efa\u670d\u52a1","text":"
                                  1. \u4ee5 NS Editor \u7528\u6237\u6210\u529f\u767b\u5f55\u540e\uff0c\u70b9\u51fb\u5de6\u4e0a\u89d2\u7684 \u96c6\u7fa4\u5217\u8868 \u8fdb\u5165 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u3002\u5728\u96c6\u7fa4\u5217\u8868\u4e2d\uff0c\u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\u3002

                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u670d\u52a1 \u8fdb\u5165\u670d\u52a1\u5217\u8868\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                                    Tip

                                    \u4e5f\u53ef\u4ee5\u901a\u8fc7 YAML \u521b\u5efa \u4e00\u4e2a\u670d\u52a1\u3002

                                  3. \u6253\u5f00 \u521b\u5efa\u670d\u52a1 \u9875\u9762\uff0c\u9009\u62e9\u4e00\u79cd\u8bbf\u95ee\u7c7b\u578b\uff0c\u53c2\u8003\u4ee5\u4e0b\u51e0\u4e2a\u53c2\u6570\u8868\u8fdb\u884c\u914d\u7f6e\u3002

                                  "},{"location":"end-user/kpanda/network/create-services.html#clusterip","title":"\u521b\u5efa ClusterIP \u670d\u52a1","text":"

                                  \u70b9\u9009 \u96c6\u7fa4\u5185\u8bbf\u95ee\uff08ClusterIP\uff09 \uff0c\u8fd9\u662f\u6307\u901a\u8fc7\u96c6\u7fa4\u7684\u5185\u90e8 IP \u66b4\u9732\u670d\u52a1\uff0c\u9009\u62e9\u6b64\u9879\u7684\u670d\u52a1\u53ea\u80fd\u5728\u96c6\u7fa4\u5185\u90e8\u8bbf\u95ee\u3002\u8fd9\u662f\u9ed8\u8ba4\u7684\u670d\u52a1\u7c7b\u578b\u3002\u53c2\u8003\u4e0b\u8868\u914d\u7f6e\u53c2\u6570\u3002

                                  \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8bbf\u95ee\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6307\u5b9a Pod \u670d\u52a1\u53d1\u73b0\u7684\u65b9\u5f0f\uff0c\u8fd9\u91cc\u9009\u62e9\u96c6\u7fa4\u5185\u8bbf\u95ee\uff08ClusterIP\uff09\u3002 ClusterIP \u670d\u52a1\u540d\u79f0 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u65b0\u5efa\u670d\u52a1\u7684\u540d\u79f0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 Svc-01 \u547d\u540d\u7a7a\u95f4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 default \u6807\u7b7e\u9009\u62e9\u5668 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6dfb\u52a0\u6807\u7b7e\uff0cService \u6839\u636e\u6807\u7b7e\u9009\u62e9 Pod\uff0c\u586b\u5199\u540e\u70b9\u51fb\u201c\u6dfb\u52a0\u201d\u3002\u4e5f\u53ef\u4ee5\u5f15\u7528\u5df2\u6709\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6807\u7b7e\uff0c\u70b9\u51fb \u5f15\u7528\u8d1f\u8f7d\u6807\u7b7e \uff0c\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\u9009\u62e9\u8d1f\u8f7d\uff0c\u7cfb\u7edf\u4f1a\u9ed8\u8ba4\u5c06\u6240\u9009\u7684\u8d1f\u8f7d\u6807\u7b7e\u4f5c\u4e3a\u9009\u62e9\u5668\u3002 app:job01 \u7aef\u53e3\u914d\u7f6e \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u534f\u8bae\u7aef\u53e3\uff0c\u9700\u8981\u5148\u9009\u62e9\u7aef\u53e3\u534f\u8bae\u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301 TCP\u3001UDP \u4e24\u79cd\u4f20\u8f93\u534f\u8bae\u3002\u7aef\u53e3\u540d\u79f0\uff1a\u8f93\u5165\u81ea\u5b9a\u4e49\u7684\u7aef\u53e3\u7684\u540d\u79f0\u3002\u670d\u52a1\u7aef\u53e3\uff08port\uff09\uff1aPod \u5bf9\u5916\u63d0\u4f9b\u670d\u52a1\u7684\u8bbf\u95ee\u7aef\u53e3\u3002\u5bb9\u5668\u7aef\u53e3\uff08targetport\uff09\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u9645\u76d1\u542c\u7684\u5bb9\u5668\u7aef\u53e3\uff0c\u7528\u6765\u5bf9\u96c6\u7fa4\u5185\u66b4\u9732\u670d\u52a1\u3002 \u4f1a\u8bdd\u4fdd\u6301 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5f00\u542f\u540e\uff0c\u76f8\u540c\u5ba2\u6237\u7aef\u7684\u8bf7\u6c42\u5c06\u8f6c\u53d1\u81f3\u540c\u4e00 Pod \u5f00\u542f \u4f1a\u8bdd\u4fdd\u6301\u6700\u5927\u65f6\u957f \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5f00\u542f\u4f1a\u8bdd\u4fdd\u6301\u540e\uff0c\u4fdd\u6301\u7684\u6700\u5927\u65f6\u957f\uff0c\u9ed8\u8ba4\u4e3a 30 \u79d2 30 \u79d2 \u6ce8\u89e3 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u6ce8\u89e3"},{"location":"end-user/kpanda/network/create-services.html#nodeport","title":"\u521b\u5efa NodePort \u670d\u52a1","text":"

                                  \u70b9\u9009 \u8282\u70b9\u8bbf\u95ee\uff08NodePort\uff09 \uff0c\u8fd9\u662f\u6307\u901a\u8fc7\u6bcf\u4e2a\u8282\u70b9\u4e0a\u7684 IP \u548c\u9759\u6001\u7aef\u53e3\uff08 NodePort \uff09\u66b4\u9732\u670d\u52a1\u3002 NodePort \u670d\u52a1\u4f1a\u8def\u7531\u5230\u81ea\u52a8\u521b\u5efa\u7684 ClusterIP \u670d\u52a1\u3002\u901a\u8fc7\u8bf7\u6c42 <\u8282\u70b9 IP>:<\u8282\u70b9\u7aef\u53e3> \uff0c\u60a8\u53ef\u4ee5\u4ece\u96c6\u7fa4\u7684\u5916\u90e8\u8bbf\u95ee\u4e00\u4e2a NodePort \u670d\u52a1\u3002\u53c2\u8003\u4e0b\u8868\u914d\u7f6e\u53c2\u6570\u3002

                                  \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8bbf\u95ee\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6307\u5b9a Pod \u670d\u52a1\u53d1\u73b0\u7684\u65b9\u5f0f\uff0c\u8fd9\u91cc\u9009\u62e9\u8282\u70b9\u8bbf\u95ee\uff08NodePort\uff09\u3002 NodePort \u670d\u52a1\u540d\u79f0 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u65b0\u5efa\u670d\u52a1\u7684\u540d\u79f0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 Svc-01 \u547d\u540d\u7a7a\u95f4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 default \u6807\u7b7e\u9009\u62e9\u5668 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6dfb\u52a0\u6807\u7b7e\uff0cService \u6839\u636e\u6807\u7b7e\u9009\u62e9 Pod\uff0c\u586b\u5199\u540e\u70b9\u51fb\u201c\u6dfb\u52a0\u201d\u3002\u4e5f\u53ef\u4ee5\u5f15\u7528\u5df2\u6709\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6807\u7b7e\uff0c\u70b9\u51fb \u5f15\u7528\u8d1f\u8f7d\u6807\u7b7e \uff0c\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\u9009\u62e9\u8d1f\u8f7d\uff0c\u7cfb\u7edf\u4f1a\u9ed8\u8ba4\u5c06\u6240\u9009\u7684\u8d1f\u8f7d\u6807\u7b7e\u4f5c\u4e3a\u9009\u62e9\u5668\u3002 \u7aef\u53e3\u914d\u7f6e \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u534f\u8bae\u7aef\u53e3\uff0c\u9700\u8981\u5148\u9009\u62e9\u7aef\u53e3\u534f\u8bae\u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301 TCP\u3001UDP \u4e24\u79cd\u4f20\u8f93\u534f\u8bae\u3002\u7aef\u53e3\u540d\u79f0\uff1a\u8f93\u5165\u81ea\u5b9a\u4e49\u7684\u7aef\u53e3\u7684\u540d\u79f0\u3002\u670d\u52a1\u7aef\u53e3\uff08port\uff09\uff1aPod \u5bf9\u5916\u63d0\u4f9b\u670d\u52a1\u7684\u8bbf\u95ee\u7aef\u53e3\u3002\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u4e3a\u4e86\u65b9\u4fbf\u8d77\u89c1\uff0c\u670d\u52a1\u7aef\u53e3\u88ab\u8bbe\u7f6e\u4e3a\u4e0e\u5bb9\u5668\u7aef\u53e3\u5b57\u6bb5\u76f8\u540c\u7684\u503c\u3002\u5bb9\u5668\u7aef\u53e3\uff08targetport\uff09\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u9645\u76d1\u542c\u7684\u5bb9\u5668\u7aef\u53e3\u3002\u8282\u70b9\u7aef\u53e3\uff08nodeport\uff09\uff1a\u8282\u70b9\u7684\u7aef\u53e3\uff0c\u63a5\u6536\u6765\u81ea ClusterIP \u4f20\u8f93\u7684\u6d41\u91cf\u3002\u7528\u6765\u505a\u5916\u90e8\u6d41\u91cf\u8bbf\u95ee\u7684\u5165\u53e3\u3002 \u4f1a\u8bdd\u4fdd\u6301 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5f00\u542f\u540e\uff0c\u76f8\u540c\u5ba2\u6237\u7aef\u7684\u8bf7\u6c42\u5c06\u8f6c\u53d1\u81f3\u540c\u4e00 Pod\u5f00\u542f\u540e Service \u7684 .spec.sessionAffinity \u4e3a ClientIP \uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\uff1aService \u7684\u4f1a\u8bdd\u4eb2\u548c\u6027 \u5f00\u542f \u4f1a\u8bdd\u4fdd\u6301\u6700\u5927\u65f6\u957f \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5f00\u542f\u4f1a\u8bdd\u4fdd\u6301\u540e\uff0c\u4fdd\u6301\u7684\u6700\u5927\u65f6\u957f\uff0c\u9ed8\u8ba4\u8d85\u65f6\u65f6\u957f\u4e3a 30 \u79d2.spec.sessionAffinityConfig.clientIP.timeoutSeconds \u9ed8\u8ba4\u8bbe\u7f6e\u4e3a 30 \u79d2 30 \u79d2 \u6ce8\u89e3 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u6ce8\u89e3"},{"location":"end-user/kpanda/network/create-services.html#loadbalancer","title":"\u521b\u5efa LoadBalancer \u670d\u52a1","text":"

                                  \u70b9\u9009 \u8d1f\u8f7d\u5747\u8861\uff08LoadBalancer\uff09 \uff0c\u8fd9\u662f\u6307\u4f7f\u7528\u4e91\u63d0\u4f9b\u5546\u7684\u8d1f\u8f7d\u5747\u8861\u5668\u5411\u5916\u90e8\u66b4\u9732\u670d\u52a1\u3002 \u5916\u90e8\u8d1f\u8f7d\u5747\u8861\u5668\u53ef\u4ee5\u5c06\u6d41\u91cf\u8def\u7531\u5230\u81ea\u52a8\u521b\u5efa\u7684 NodePort \u670d\u52a1\u548c ClusterIP \u670d\u52a1\u4e0a\u3002\u53c2\u8003\u4e0b\u8868\u914d\u7f6e\u53c2\u6570\u3002

                                  \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8bbf\u95ee\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6307\u5b9a Pod \u670d\u52a1\u53d1\u73b0\u7684\u65b9\u5f0f\uff0c\u8fd9\u91cc\u9009\u62e9\u8d1f\u8f7d\u5747\u8861\uff08LoadBalancer\uff09\u3002 LoadBalancer \u670d\u52a1\u540d\u79f0 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u65b0\u5efa\u670d\u52a1\u7684\u540d\u79f0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 Svc-01 \u547d\u540d\u7a7a\u95f4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 default \u5916\u90e8\u6d41\u91cf\u7b56\u7565 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8bbe\u7f6e\u5916\u90e8\u6d41\u91cf\u7b56\u7565\u3002Cluster\uff1a\u6d41\u91cf\u53ef\u4ee5\u8f6c\u53d1\u5230\u96c6\u7fa4\u4e2d\u6240\u6709\u8282\u70b9\u4e0a\u7684 Pod\u3002Local\uff1a\u6d41\u91cf\u53ea\u53d1\u7ed9\u672c\u8282\u70b9\u4e0a\u7684 Pod\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 \u6807\u7b7e\u9009\u62e9\u5668 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6dfb\u52a0\u6807\u7b7e\uff0cService \u6839\u636e\u6807\u7b7e\u9009\u62e9 Pod\uff0c\u586b\u5199\u540e\u70b9\u51fb\u201c\u6dfb\u52a0\u201d\u3002\u4e5f\u53ef\u4ee5\u5f15\u7528\u5df2\u6709\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6807\u7b7e\uff0c\u70b9\u51fb \u5f15\u7528\u8d1f\u8f7d\u6807\u7b7e \uff0c\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\u9009\u62e9\u8d1f\u8f7d\uff0c\u7cfb\u7edf\u4f1a\u9ed8\u8ba4\u5c06\u6240\u9009\u7684\u8d1f\u8f7d\u6807\u7b7e\u4f5c\u4e3a\u9009\u62e9\u5668\u3002 \u8d1f\u8f7d\u5747\u8861\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u4f7f\u7528\u7684\u8d1f\u8f7d\u5747\u8861\u7c7b\u578b\uff0c\u5f53\u524d\u652f\u6301 MetalLB \u548c\u5176\u4ed6\u3002 MetalLB IP \u6c60 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u7684 \u8d1f\u8f7d\u5747\u8861\u7c7b\u578b\u4e3a MetalLB \u65f6\uff0cLoadBalancer Service\u9ed8\u8ba4\u4f1a\u4ece\u8fd9\u4e2a\u6c60\u4e2d\u5206\u914d IP \u5730\u5740, \u5e76\u4e14\u901a\u8fc7 APR \u5ba3\u544a\u8fd9\u4e2a\u6c60\u4e2d\u7684\u6240\u6709 IP \u5730\u5740 \u8d1f\u8f7d\u5747\u8861\u5730\u5740 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u30111.\u5982\u4f7f\u7528\u7684\u662f\u516c\u6709\u4e91 CloudProvider\uff0c\u6b64\u5904\u586b\u5199\u7684\u4e3a\u4e91\u5382\u5546\u63d0\u4f9b\u7684\u8d1f\u8f7d\u5747\u8861\u5730\u5740\uff1b2.\u5982\u679c\u4e0a\u8ff0\u8d1f\u8f7d\u5747\u8861\u7c7b\u578b\u9009\u62e9\u4e3a MetalLB \uff0c\u9ed8\u8ba4\u4ece\u4e0a\u8ff0 IP \u6c60\u4e2d\u83b7\u53d6 IP \uff0c\u5982\u679c\u4e0d\u586b\u5219\u81ea\u52a8\u83b7\u53d6\u3002 \u7aef\u53e3\u914d\u7f6e \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u534f\u8bae\u7aef\u53e3\uff0c\u9700\u8981\u5148\u9009\u62e9\u7aef\u53e3\u534f\u8bae\u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301 TCP\u3001UDP \u4e24\u79cd\u4f20\u8f93\u534f\u8bae\u3002\u7aef\u53e3\u540d\u79f0\uff1a\u8f93\u5165\u81ea\u5b9a\u4e49\u7684\u7aef\u53e3\u7684\u540d\u79f0\u3002\u670d\u52a1\u7aef\u53e3\uff08port\uff09\uff1aPod \u5bf9\u5916\u63d0\u4f9b\u670d\u52a1\u7684\u8bbf\u95ee\u7aef\u53e3\u3002\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u4e3a\u4e86\u65b9\u4fbf\u8d77\u89c1\uff0c\u670d\u52a1\u7aef\u53e3\u88ab\u8bbe\u7f6e\u4e3a\u4e0e\u5bb9\u5668\u7aef\u53e3\u5b57\u6bb5\u76f8\u540c\u7684\u503c\u3002\u5bb9\u5668\u7aef\u53e3\uff08targetport\uff09\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u9645\u76d1\u542c\u7684\u5bb9\u5668\u7aef\u53e3\u3002\u8282\u70b9\u7aef\u53e3\uff08nodeport\uff09\uff1a\u8282\u70b9\u7684\u7aef\u53e3\uff0c\u63a5\u6536\u6765\u81ea ClusterIP \u4f20\u8f93\u7684\u6d41\u91cf\u3002\u7528\u6765\u505a\u5916\u90e8\u6d41\u91cf\u8bbf\u95ee\u7684\u5165\u53e3\u3002 \u6ce8\u89e3 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u6ce8\u89e3"},{"location":"end-user/kpanda/network/create-services.html#externalname","title":"\u521b\u5efa ExternalName \u670d\u52a1","text":"

                                  \u70b9\u9009 \u5916\u90e8\u670d\u52a1\uff08ExternalName\uff09 \uff0c\u8fd9\u662f\u6307\u901a\u8fc7\u5c06\u670d\u52a1\u6620\u5c04\u5230\u5916\u90e8\u57df\u540d\u6765\u66b4\u9732\u670d\u52a1\u3002\u9009\u62e9\u6b64\u9879\u7684\u670d\u52a1\u4e0d\u4f1a\u521b\u5efa\u5178\u578b\u7684 ClusterIP \u6216 NodePort\uff0c\u800c\u662f\u901a\u8fc7 DNS \u540d\u79f0\u89e3\u6790\u5c06\u8bf7\u6c42\u91cd\u5b9a\u5411\u5230\u5916\u90e8\u7684\u670d\u52a1\u5730\u5740\u3002\u53c2\u8003\u4e0b\u8868\u914d\u7f6e\u53c2\u6570\u3002

                                  \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8bbf\u95ee\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6307\u5b9a Pod \u670d\u52a1\u53d1\u73b0\u7684\u65b9\u5f0f\uff0c\u8fd9\u91cc\u9009\u62e9\u5916\u90e8\u670d\u52a1\uff08ExternalName\uff09\u3002 ExternalName \u670d\u52a1\u540d\u79f0 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u65b0\u5efa\u670d\u52a1\u7684\u540d\u79f0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 Svc-01 \u547d\u540d\u7a7a\u95f4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 default \u57df\u540d \u3010\u7c7b\u578b\u3011\u5fc5\u586b"},{"location":"end-user/kpanda/network/create-services.html#_3","title":"\u5b8c\u6210\u670d\u52a1\u521b\u5efa","text":"

                                  \u914d\u7f6e\u5b8c\u6240\u6709\u53c2\u6570\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u81ea\u52a8\u8fd4\u56de\u670d\u52a1\u5217\u8868\u3002\u5728\u5217\u8868\u53f3\u4fa7\uff0c\u70b9\u51fb \u2507 \uff0c\u53ef\u4ee5\u4fee\u6539\u6216\u5220\u9664\u6240\u9009\u670d\u52a1\u3002

                                  "},{"location":"end-user/kpanda/network/network-policy.html","title":"\u7f51\u7edc\u7b56\u7565","text":"

                                  \u7f51\u7edc\u7b56\u7565\uff08NetworkPolicy\uff09\u53ef\u4ee5\u5728 IP \u5730\u5740\u6216\u7aef\u53e3\u5c42\u9762\uff08OSI \u7b2c 3 \u5c42\u6216\u7b2c 4 \u5c42\uff09\u63a7\u5236\u7f51\u7edc\u6d41\u91cf\u3002\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u76ee\u524d\u652f\u6301\u521b\u5efa\u57fa\u4e8e Pod \u6216\u547d\u540d\u7a7a\u95f4\u7684\u7f51\u7edc\u7b56\u7565\uff0c\u652f\u6301\u901a\u8fc7\u6807\u7b7e\u9009\u62e9\u5668\u6765\u8bbe\u5b9a\u54ea\u4e9b\u6d41\u91cf\u53ef\u4ee5\u8fdb\u5165\u6216\u79bb\u5f00\u5e26\u6709\u7279\u5b9a\u6807\u7b7e\u7684 Pod\u3002

                                  \u6709\u5173\u7f51\u7edc\u7b56\u7565\u7684\u66f4\u591a\u8be6\u60c5\uff0c\u53ef\u53c2\u8003 Kubernetes \u5b98\u65b9\u6587\u6863\u7f51\u7edc\u7b56\u7565\u3002

                                  "},{"location":"end-user/kpanda/network/network-policy.html#_2","title":"\u521b\u5efa\u7f51\u7edc\u7b56\u7565","text":"

                                  \u76ee\u524d\u652f\u6301\u901a\u8fc7 YAML \u548c\u8868\u5355\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u7f51\u7edc\u7b56\u7565\uff0c\u8fd9\u4e24\u79cd\u65b9\u5f0f\u5404\u6709\u4f18\u52a3\uff0c\u53ef\u4ee5\u6ee1\u8db3\u4e0d\u540c\u7528\u6237\u7684\u4f7f\u7528\u9700\u6c42\u3002

                                  \u901a\u8fc7 YAML \u521b\u5efa\u6b65\u9aa4\u66f4\u5c11\u3001\u66f4\u9ad8\u6548\uff0c\u4f46\u95e8\u69db\u8981\u6c42\u8f83\u9ad8\uff0c\u9700\u8981\u719f\u6089\u7f51\u7edc\u7b56\u7565\u7684 YAML \u6587\u4ef6\u914d\u7f6e\u3002

                                  \u901a\u8fc7\u8868\u5355\u521b\u5efa\u66f4\u76f4\u89c2\u66f4\u7b80\u5355\uff0c\u6839\u636e\u63d0\u793a\u586b\u5199\u5bf9\u5e94\u7684\u503c\u5373\u53ef\uff0c\u4f46\u6b65\u9aa4\u66f4\u52a0\u7e41\u7410\u3002

                                  "},{"location":"end-user/kpanda/network/network-policy.html#yaml","title":"YAML \u521b\u5efa","text":"
                                  1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u7f51\u7edc\u7b56\u7565 -> YAML \u521b\u5efa \u3002

                                  2. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                  "},{"location":"end-user/kpanda/network/network-policy.html#_3","title":"\u8868\u5355\u521b\u5efa","text":"
                                  1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u7f51\u7edc\u7b56\u7565 -> \u521b\u5efa\u7b56\u7565 \u3002

                                  2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u3002

                                    \u540d\u79f0\u548c\u547d\u540d\u7a7a\u95f4\u5728\u521b\u5efa\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002

                                  3. \u586b\u5199\u7b56\u7565\u914d\u7f6e\u3002

                                    \u7b56\u7565\u914d\u7f6e\u5206\u4e3a\u5165\u6d41\u91cf\u7b56\u7565\u548c\u51fa\u6d41\u91cf\u7b56\u7565\u3002\u5982\u679c\u6e90 Pod \u60f3\u8981\u6210\u529f\u8fde\u63a5\u5230\u76ee\u6807 Pod\uff0c\u6e90 Pod \u7684\u51fa\u6d41\u91cf\u7b56\u7565\u548c\u76ee\u6807 Pod \u7684\u5165\u6d41\u91cf\u7b56\u7565\u90fd\u9700\u8981\u5141\u8bb8\u8fde\u63a5\u3002\u5982\u679c\u4efb\u4f55\u4e00\u65b9\u4e0d\u5141\u8bb8\u8fde\u63a5\uff0c\u90fd\u4f1a\u5bfc\u81f4\u8fde\u63a5\u5931\u8d25\u3002

                                    • \u5165\u6d41\u91cf\u7b56\u7565\uff1a\u70b9\u51fb \u2795 \u5f00\u59cb\u914d\u7f6e\u7b56\u7565\uff0c\u652f\u6301\u914d\u7f6e\u591a\u6761\u7b56\u7565\u3002\u591a\u6761\u7f51\u7edc\u7b56\u7565\u7684\u6548\u679c\u76f8\u4e92\u53e0\u52a0\uff0c\u53ea\u6709\u540c\u65f6\u6ee1\u8db3\u6240\u6709\u7f51\u7edc\u7b56\u7565\uff0c\u624d\u80fd\u6210\u529f\u5efa\u7acb\u8fde\u63a5\u3002

                                    • \u51fa\u6d41\u91cf\u7b56\u7565

                                  "},{"location":"end-user/kpanda/network/network-policy.html#_4","title":"\u67e5\u770b\u7f51\u7edc\u7b56\u7565","text":"
                                  1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u7f51\u7edc\u7b56\u7565 \uff0c\u70b9\u51fb\u7f51\u7edc\u7b56\u7565\u7684\u540d\u79f0\u3002

                                  2. \u67e5\u770b\u8be5\u7b56\u7565\u7684\u57fa\u672c\u914d\u7f6e\u3001\u5173\u8054\u5b9e\u4f8b\u4fe1\u606f\u3001\u5165\u6d41\u91cf\u7b56\u7565\u3001\u51fa\u6d41\u91cf\u7b56\u7565\u3002

                                  Info

                                  \u5728\u5173\u8054\u5b9e\u4f8b\u9875\u7b7e\u4e0b\uff0c\u652f\u6301\u67e5\u770b\u5b9e\u4f8b\u76d1\u63a7\u3001\u65e5\u5fd7\u3001\u5bb9\u5668\u5217\u8868\u3001YAML \u6587\u4ef6\u3001\u4e8b\u4ef6\u7b49\u3002

                                  "},{"location":"end-user/kpanda/network/network-policy.html#_5","title":"\u66f4\u65b0\u7f51\u7edc\u7b56\u7565","text":"

                                  \u6709\u4e24\u79cd\u9014\u5f84\u53ef\u4ee5\u66f4\u65b0\u7f51\u7edc\u7b56\u7565\u3002\u652f\u6301\u901a\u8fc7\u8868\u5355\u6216 YAML \u6587\u4ef6\u66f4\u65b0\u7f51\u7edc\u7b56\u7565\u3002

                                  • \u5728\u7f51\u7edc\u7b56\u7565\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u7b56\u7565\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                                  • \u70b9\u51fb\u7f51\u7edc\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u7f51\u7edc\u7b56\u7565\u7684\u8be6\u60c5\u9875\u9762\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                                  "},{"location":"end-user/kpanda/network/network-policy.html#_6","title":"\u5220\u9664\u7f51\u7edc\u7b56\u7565","text":"

                                  \u6709\u4e24\u79cd\u9014\u5f84\u53ef\u4ee5\u5220\u9664\u7f51\u7edc\u7b56\u7565\u3002\u652f\u6301\u901a\u8fc7\u8868\u5355\u6216 YAML \u6587\u4ef6\u66f4\u65b0\u7f51\u7edc\u7b56\u7565\u3002

                                  • \u5728\u7f51\u7edc\u7b56\u7565\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u7b56\u7565\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u5220\u9664\u3002

                                  • \u70b9\u51fb\u7f51\u7edc\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u7f51\u7edc\u7b56\u7565\u7684\u8be6\u60c5\u9875\u9762\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u5220\u9664\u3002

                                  "},{"location":"end-user/kpanda/nodes/add-node.html","title":"\u96c6\u7fa4\u8282\u70b9\u6269\u5bb9","text":"

                                  \u968f\u7740\u4e1a\u52a1\u5e94\u7528\u4e0d\u65ad\u589e\u957f\uff0c\u96c6\u7fa4\u8d44\u6e90\u65e5\u8d8b\u7d27\u5f20\uff0c\u8fd9\u65f6\u53ef\u4ee5\u57fa\u4e8e kubean \u5bf9\u96c6\u7fa4\u8282\u70b9\u8fdb\u884c\u6269\u5bb9\u3002\u6269\u5bb9\u540e\uff0c\u5e94\u7528\u53ef\u4ee5\u8fd0\u884c\u5728\u65b0\u589e\u7684\u8282\u70b9\u4e0a\uff0c\u7f13\u89e3\u8d44\u6e90\u538b\u529b\u3002

                                  \u53ea\u6709\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u521b\u5efa\u7684\u96c6\u7fa4\u624d\u652f\u6301\u8282\u70b9\u6269\u7f29\u5bb9\uff0c\u4ece\u5916\u90e8\u63a5\u5165\u7684\u96c6\u7fa4\u4e0d\u652f\u6301\u6b64\u64cd\u4f5c\u3002\u672c\u6587\u4e3b\u8981\u4ecb\u7ecd\u540c\u79cd\u67b6\u6784\u4e0b\u5de5\u4f5c\u96c6\u7fa4\u7684 \u5de5\u4f5c\u8282\u70b9 \u6269\u5bb9\u3002

                                  1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                    \u82e5 \u96c6\u7fa4\u89d2\u8272 \u4e2d\u5e26\u6709 \u63a5\u5165\u96c6\u7fa4 \u7684\u6807\u7b7e\uff0c\u5219\u8bf4\u660e\u8be5\u96c6\u7fa4\u4e0d\u652f\u6301\u8282\u70b9\u6269\u7f29\u5bb9\u3002

                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u7136\u540e\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u70b9\u51fb \u63a5\u5165\u8282\u70b9 \u3002

                                  3. \u8f93\u5165\u4e3b\u673a\u540d\u79f0\u548c\u8282\u70b9 IP \u5e76\u70b9\u51fb \u786e\u5b9a \u3002

                                    \u70b9\u51fb \u2795 \u6dfb\u52a0\u5de5\u4f5c\u8282\u70b9 \u53ef\u4ee5\u7ee7\u7eed\u63a5\u5165\u66f4\u591a\u8282\u70b9\u3002

                                  Note

                                  \u63a5\u5165\u8282\u70b9\u5927\u7ea6\u9700\u8981 20 \u5206\u949f\uff0c\u8bf7\u60a8\u8010\u5fc3\u7b49\u5f85\u3002

                                  "},{"location":"end-user/kpanda/nodes/add-node.html#_2","title":"\u53c2\u8003\u6587\u6863","text":"
                                  • \u5bf9\u5de5\u4f5c\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u6269\u5bb9
                                  • \u4e3a\u5de5\u4f5c\u96c6\u7fa4\u6dfb\u52a0\u5f02\u6784\u8282\u70b9
                                  • \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u5de5\u4f5c\u8282\u70b9\u6269\u5bb9
                                  • \u66ff\u6362\u5de5\u4f5c\u96c6\u7fa4\u7684\u9996\u4e2a\u63a7\u5236\u8282\u70b9
                                  "},{"location":"end-user/kpanda/nodes/delete-node.html","title":"\u96c6\u7fa4\u8282\u70b9\u7f29\u5bb9","text":"

                                  \u5f53\u4e1a\u52a1\u9ad8\u5cf0\u671f\u7ed3\u675f\u4e4b\u540e\uff0c\u4e3a\u4e86\u8282\u7701\u8d44\u6e90\u6210\u672c\uff0c\u53ef\u4ee5\u7f29\u5c0f\u96c6\u7fa4\u89c4\u6a21\uff0c\u5378\u8f7d\u5197\u4f59\u7684\u8282\u70b9\uff0c\u5373\u8282\u70b9\u7f29\u5bb9\u3002\u8282\u70b9\u5378\u8f7d\u540e\uff0c\u5e94\u7528\u65e0\u6cd5\u7ee7\u7eed\u8fd0\u884c\u5728\u8be5\u8282\u70b9\u4e0a\u3002

                                  "},{"location":"end-user/kpanda/nodes/delete-node.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5177\u6709 Cluster Admin \u89d2\u8272\u6388\u6743 \u3002
                                  • \u53ea\u6709\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u521b\u5efa\u7684\u96c6\u7fa4\u624d\u652f\u6301\u8282\u70b9\u6269\u7f29\u5bb9\uff0c\u4ece\u5916\u90e8\u63a5\u5165\u7684\u96c6\u7fa4\u4e0d\u652f\u6301\u6b64\u64cd\u4f5c\u3002
                                  • \u5378\u8f7d\u8282\u70b9\u4e4b\u524d\uff0c\u9700\u8981\u6682\u505c\u8c03\u5ea6\u8be5\u8282\u70b9\uff0c\u5e76\u4e14\u5c06\u8be5\u8282\u70b9\u4e0a\u7684\u5e94\u7528\u90fd\u9a71\u9010\u81f3\u5176\u4ed6\u8282\u70b9\u3002
                                  • \u9a71\u9010\u65b9\u5f0f\uff1a\u767b\u5f55\u63a7\u5236\u5668\u8282\u70b9\uff0c\u901a\u8fc7 kubectl drain \u547d\u4ee4\u9a71\u9010\u8282\u70b9\u4e0a\u6240\u6709 Pod\u3002\u5b89\u5168\u9a71\u9010\u7684\u65b9\u5f0f\u53ef\u4ee5\u5141\u8bb8\u5bb9\u5668\u7ec4\u91cc\u9762\u7684\u5bb9\u5668\u4f18\u96c5\u5730\u4e2d\u6b62\u3002
                                  "},{"location":"end-user/kpanda/nodes/delete-node.html#_3","title":"\u6ce8\u610f\u4e8b\u9879","text":"
                                  1. \u96c6\u7fa4\u8282\u70b9\u7f29\u5bb9\u65f6\uff0c\u53ea\u80fd\u9010\u4e2a\u8fdb\u884c\u5378\u8f7d\uff0c\u65e0\u6cd5\u6279\u91cf\u5378\u8f7d\u3002

                                  2. \u5982\u9700\u5378\u8f7d\u96c6\u7fa4\u63a7\u5236\u5668\u8282\u70b9\uff0c\u9700\u8981\u786e\u4fdd\u6700\u7ec8\u63a7\u5236\u5668\u8282\u70b9\u6570\u4e3a \u5947\u6570\u3002

                                  3. \u96c6\u7fa4\u8282\u70b9\u7f29\u5bb9\u65f6\u4e0d\u53ef\u4e0b\u7ebf \u7b2c\u4e00\u4e2a\u63a7\u5236\u5668 \u8282\u70b9\u3002\u5982\u679c\u5fc5\u987b\u6267\u884c\u6b64\u64cd\u4f5c\uff0c\u8bf7\u8054\u7cfb\u552e\u540e\u5de5\u7a0b\u5e08\u3002

                                  "},{"location":"end-user/kpanda/nodes/delete-node.html#_4","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                  1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                    \u82e5 \u96c6\u7fa4\u89d2\u8272 \u4e2d\u5e26\u6709 \u63a5\u5165\u96c6\u7fa4 \u7684\u6807\u7b7e\uff0c\u5219\u8bf4\u660e\u8be5\u96c6\u7fa4\u4e0d\u652f\u6301\u8282\u70b9\u6269\u7f29\u5bb9\u3002

                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u627e\u5230\u9700\u8981\u5378\u8f7d\u7684\u8282\u70b9\uff0c\u70b9\u51fb \u2507 \u9009\u62e9 \u79fb\u9664\u8282\u70b9 \u3002

                                  3. \u8f93\u5165\u8282\u70b9\u540d\u79f0\uff0c\u5e76\u70b9\u51fb \u5220\u9664 \u8fdb\u884c\u786e\u8ba4\u3002

                                  "},{"location":"end-user/kpanda/nodes/labels-annotations.html","title":"\u6807\u7b7e\u4e0e\u6ce8\u89e3","text":"

                                  \u6807\u7b7e\uff08Labels\uff09\u662f\u4e3a Pod\u3001\u8282\u70b9\u3001\u96c6\u7fa4\u7b49 Kubernetes \u5bf9\u8c61\u6dfb\u52a0\u7684\u6807\u8bc6\u6027\u952e\u503c\u5bf9\uff0c\u53ef\u7ed3\u5408\u6807\u7b7e\u9009\u62e9\u5668\u67e5\u627e\u5e76\u7b5b\u9009\u6ee1\u8db3\u67d0\u4e9b\u6761\u4ef6\u7684 Kubernetes \u5bf9\u8c61\u3002\u6bcf\u4e2a\u952e\u5bf9\u4e8e\u7ed9\u5b9a\u5bf9\u8c61\u5fc5\u987b\u662f\u552f\u4e00\u7684\u3002

                                  \u6ce8\u89e3\uff08Annotations\uff09\u548c\u6807\u7b7e\u4e00\u6837\uff0c\u4e5f\u662f\u952e/\u503c\u5bf9\uff0c\u4f46\u4e0d\u5177\u5907\u6807\u8bc6\u6216\u7b5b\u9009\u529f\u80fd\u3002 \u4f7f\u7528\u6ce8\u89e3\u53ef\u4ee5\u4e3a\u8282\u70b9\u6dfb\u52a0\u4efb\u610f\u7684\u5143\u6570\u636e\u3002 \u6ce8\u89e3\u7684\u952e\u901a\u5e38\u4f7f\u7528\u7684\u683c\u5f0f\u4e3a \u524d\u7f00\uff08\u53ef\u9009\uff09/\u540d\u79f0\uff08\u5fc5\u586b\uff09 \uff0c\u4f8b\u5982 nfd.node.kubernetes.io/extended-resources \u3002 \u5982\u679c\u7701\u7565\u524d\u7f00\uff0c\u8868\u793a\u8be5\u6ce8\u89e3\u952e\u662f\u7528\u6237\u79c1\u6709\u7684\u3002

                                  \u6709\u5173\u6807\u7b7e\u548c\u6ce8\u89e3\u7684\u66f4\u591a\u4fe1\u606f\uff0c\u53ef\u53c2\u8003 Kubernetes \u7684\u5b98\u65b9\u6587\u6863\u6807\u7b7e\u548c\u9009\u62e9\u7b97\u7b26\u6216\u6ce8\u89e3\u3002

                                  \u6dfb\u52a0/\u5220\u9664\u6807\u7b7e\u4e0e\u6ce8\u89e3\u7684\u6b65\u9aa4\u5982\u4e0b\uff1a

                                  1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u5728\u8282\u70b9\u53f3\u4fa7\u70b9\u51fb \u2507 \u64cd\u4f5c\u56fe\u6807\uff0c\u70b9\u51fb \u4fee\u6539\u6807\u7b7e \u6216 \u4fee\u6539\u6ce8\u89e3 \u3002

                                  3. \u70b9\u51fb \u2795 \u6dfb\u52a0 \u53ef\u4ee5\u6dfb\u52a0\u6807\u7b7e\u6216\u6ce8\u89e3\uff0c\u70b9\u51fb X \u53ef\u4ee5\u5220\u9664\u6807\u7b7e\u6216\u6ce8\u89e3\uff0c\u6700\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                  "},{"location":"end-user/kpanda/nodes/node-authentication.html","title":"\u4f7f\u7528 SSH \u5bc6\u94a5\u8ba4\u8bc1\u8282\u70b9","text":"

                                  \u5982\u679c\u60a8\u9009\u62e9\u4f7f\u7528 SSH \u5bc6\u94a5\u4f5c\u4e3a\u5f85\u521b\u5efa\u96c6\u7fa4\u7684\u8282\u70b9\u8ba4\u8bc1\u65b9\u5f0f\uff0c\u60a8\u9700\u8981\u6309\u7167\u5982\u4e0b\u8bf4\u660e\u914d\u7f6e\u516c\u79c1\u94a5\u3002

                                  1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5728 \u5f85\u5efa\u96c6\u7fa4\u7684\u7ba1\u7406\u96c6\u7fa4\u4e2d\u7684\u4efb\u610f\u8282\u70b9 \u4e0a\u751f\u6210\u516c\u79c1\u94a5\u3002

                                    cd /root/.ssh\nssh-keygen -t rsa\n
                                  2. \u6267\u884c ls \u547d\u4ee4\u67e5\u770b\u7ba1\u7406\u96c6\u7fa4\u4e0a\u7684\u5bc6\u94a5\u662f\u5426\u521b\u5efa\u6210\u529f\uff0c\u6b63\u786e\u53cd\u9988\u5982\u4e0b\uff1a

                                    ls\nid_rsa  id_rsa.pub  known_hosts\n

                                    \u5176\u4e2d\u540d\u4e3a id_rsa \u7684\u6587\u4ef6\u662f\u79c1\u94a5\uff0c\u540d\u4e3a id_rsa.pub \u7684\u6587\u4ef6\u662f\u516c\u94a5\u3002

                                  3. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5206\u522b\u5c06\u516c\u94a5\u6587\u4ef6 id_rsa.pub \u52a0\u8f7d\u5230\u5f85\u521b\u5efa\u96c6\u7fa4\u7684\u6240\u6709\u8282\u70b9\u4e0a\u3002

                                    ssh-copy-id -i /root/.ssh/id_rsa.pub root@10.0.0.0\n

                                    \u5c06\u4e0a\u9762\u547d\u4ee4\u4e2d\u7684 root@10.0.0.0 \u7528\u6237\u8d26\u53f7\u548c\u8282\u70b9 IP \u66ff\u6362\u4e3a\u5f85\u521b\u5efa\u96c6\u7fa4\u7684\u8282\u70b9\u7528\u6237\u540d\u548c IP\u3002** \u9700\u8981\u5728\u5f85\u521b\u5efa\u96c6\u7fa4\u7684\u6bcf\u53f0\u8282\u70b9\u90fd\u6267\u884c\u76f8\u540c\u7684\u64cd\u4f5c **\u3002

                                  4. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u67e5\u770b\u6b65\u9aa4 1 \u6240\u521b\u5efa\u7684\u79c1\u94a5\u6587\u4ef6 id_rsa \u3002

                                    cat /root/.ssh/id_rsa\n

                                    \u8f93\u51fa\u5982\u4e0b\u5185\u5bb9\uff1a

                                    -----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEA3UvyKINzY5BFuemQ+uJ6q+GqgfvnWwNC8HzZhpcMSjJy26MM\nUtBEBJxy8fMi57XcjYxPibXW/wnd+32ICCycqCwByUmuXeCC1cjlCQDqjcAvXae7\nY54IXGF7wm2IsMNwf0kjFEXjuS48FLDA0mGRaN3BG+Up5geXcHckg3K5LD8kXFFx\ndEmSIjdyw55NaUitmEdHzN7cIdfi6Z56jcV8dcFBgWKUx+ebiyPmZBkXToz6GnMF\nrswzzZCl+G6Jb2xTGy7g7ozb4BoZd1IpSD5EhDanRrESVE0C5YuJ5zUAC0CvVd1l\nv67AK8Ko6MXToHp01/bcsvlM6cqgwUFXZKVeOwIDAQABAoIBAQCO36GQlo3BEjxy\nM2HvGJmqrx+unDxafliRe4nVY2AD515Qf4xNSzke4QM1QoyenMOwf446krQkJPK0\nk+9nl6Xszby5gGCbK4BNFk8I6RaGPjZWeRx6zGUJf8avWJiPxx6yjz2esSC9RiR0\nF0nmiiefVMyAfgv2/5++dK2WUFNNRKLgSRRpP5bRaD5wMzzxtSSXrUon6217HO8p\n3RoWsI51MbVzhdVgpHUNABcoa0rpr9svT6XLKZxY8mxpKFYjM0Wv2JIDABg3kBvh\nQbJ7kStCO3naZjKMU9UuSqVJs06cflGYw7Or8/tABR3LErNQKPjkhAQqt0DXw7Iw\n3tKdTAJBAoGBAP687U7JAOqQkcphek2E/A/sbO/d37ix7Z3vNOy065STrA+ZWMZn\npZ6Ui1B/oJpoZssnfvIoz9sn559X0j67TljFALFd2ZGS0Fqh9KVCqDvfk+Vst1dq\n+3r/yZdTOyswoccxkJiC/GDwZGK0amJWqvob39JCZhDAKIGLbGMmjdAHAoGBAN5k\nm1WGnni1nZ+3dryIwgB6z1hWcnLTamzSET6KhSuo946ET0IRG9xtlheCx6dqICbr\nVk1Y4NtRZjK/p/YGx59rDWf7E3I8ZMgR7mjieOcUZ4lUlA4l7ZIlW/2WZHW+nUXO\nTi20fqJ8qSp4BUvOvuth1pz2GLUHe2/Fxjf7HIstAoGBAPHpPr9r+TfIlPsJeRj2\n6lzA3G8qWFRQfGRYjv0fjv0pA+RIb1rzgP/I90g5+63G6Z+R4WdcxI/OJJNY1iuG\nuw9n/pFxm7U4JC990BPE6nj5iLz+clpNGYckNDBF9VG9vFSrSDLdaYkxoVNvG/xJ\na9Na90H4lm7f3VewrPy310KvAoGAZr+mwNoEh5Kpc6xo8Gxi7aPP/mlaUVD6X7Ki\ngvmu02AqmC7rC4QqEiqTaONkaSXwGusqIWxJ3yp5hELmUBYLzszAEeV/s4zRp1oZ\ng133LBRSTbHFAdBmNdqK6Nu+KGRb92980UMOKvZbliKDl+W6cbfvVu+gtKrzTc3b\naevb4TUCgYEAnJAxyVYDP1nJf7bjBSHXQu1E/DMwbtrqw7dylRJ8cAzI7IxfSCez\n7BYWq41PqVd9/zrb3Pbh2phiVzKe783igAIMqummcjo/kZyCwFsYBzK77max1jF5\naPQsLbRS2aDz8kIH6jHPZ/R+15EROmdtLmA7vIJZGerWWQR0dUU+XXA=\n

                                    \u5c06\u79c1\u94a5\u5185\u5bb9\u590d\u5236\u540e\u586b\u81f3\u754c\u9762\u5bc6\u94a5\u8f93\u5165\u6846\u3002

                                  "},{"location":"end-user/kpanda/nodes/node-check.html","title":"\u521b\u5efa\u96c6\u7fa4\u8282\u70b9\u53ef\u7528\u6027\u68c0\u67e5","text":"

                                  \u5728\u521b\u5efa\u96c6\u7fa4\u6216\u4e3a\u5df2\u6709\u96c6\u7fa4\u6dfb\u52a0\u8282\u70b9\u65f6\uff0c\u8bf7\u53c2\u9605\u4e0b\u8868\uff0c\u68c0\u67e5\u8282\u70b9\u914d\u7f6e\uff0c\u4ee5\u907f\u514d\u56e0\u8282\u70b9\u914d\u7f6e\u9519\u8bef\u5bfc\u81f4\u96c6\u7fa4\u521b\u5efa\u6216\u6269\u5bb9\u5931\u8d25\u3002

                                  \u68c0\u67e5\u9879 \u63cf\u8ff0 \u64cd\u4f5c\u7cfb\u7edf \u53c2\u8003\u652f\u6301\u7684\u67b6\u6784\u53ca\u64cd\u4f5c\u7cfb\u7edf SELinux \u5173\u95ed \u9632\u706b\u5899 \u5173\u95ed \u67b6\u6784\u4e00\u81f4\u6027 \u8282\u70b9\u95f4 CPU \u67b6\u6784\u4e00\u81f4\uff08\u5982\u5747\u4e3a ARM \u6216 x86\uff09 \u4e3b\u673a\u65f6\u95f4 \u6240\u6709\u4e3b\u673a\u95f4\u540c\u6b65\u8bef\u5dee\u5c0f\u4e8e 10 \u79d2\u3002 \u7f51\u7edc\u8054\u901a\u6027 \u8282\u70b9\u53ca\u5176 SSH \u7aef\u53e3\u80fd\u591f\u6b63\u5e38\u88ab\u5e73\u53f0\u8bbf\u95ee\u3002 CPU \u53ef\u7528 CPU \u8d44\u6e90\u5927\u4e8e 4 Core \u5185\u5b58 \u53ef\u7528\u5185\u5b58\u8d44\u6e90\u5927\u4e8e 8 GB"},{"location":"end-user/kpanda/nodes/node-check.html#_2","title":"\u652f\u6301\u7684\u67b6\u6784\u53ca\u64cd\u4f5c\u7cfb\u7edf","text":"\u67b6\u6784 \u64cd\u4f5c\u7cfb\u7edf \u5907\u6ce8 ARM Kylin Linux Advanced Server release V10 (Sword) SP2 \u63a8\u8350 ARM UOS Linux ARM openEuler x86 CentOS 7.x \u63a8\u8350 x86 Redhat 7.x \u63a8\u8350 x86 Redhat 8.x \u63a8\u8350 x86 Flatcar Container Linux by Kinvolk x86 Debian Bullseye, Buster, Jessie, Stretch x86 Ubuntu 16.04, 18.04, 20.04, 22.04 x86 Fedora 35, 36 x86 Fedora CoreOS x86 openSUSE Leap 15.x/Tumbleweed x86 Oracle Linux 7, 8, 9 x86 Alma Linux 8, 9 x86 Rocky Linux 8, 9 x86 Amazon Linux 2 x86 Kylin Linux Advanced Server release V10 (Sword) - SP2 \u6d77\u5149 x86 UOS Linux x86 openEuler"},{"location":"end-user/kpanda/nodes/node-details.html","title":"\u8282\u70b9\u8be6\u60c5","text":"

                                  \u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4\u4e4b\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u96c6\u7fa4\u4e2d\u5404\u4e2a\u8282\u70b9\u7684\u4fe1\u606f\uff0c\u5305\u62ec\u8282\u70b9\u72b6\u6001\u3001\u6807\u7b7e\u3001\u8d44\u6e90\u7528\u91cf\u3001Pod\u3001\u76d1\u63a7\u4fe1\u606f\u7b49\u3002

                                  1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u53ef\u4ee5\u67e5\u770b\u8282\u70b9\u72b6\u6001\u3001\u89d2\u8272\u3001\u6807\u7b7e\u3001CPU/\u5185\u5b58\u4f7f\u7528\u60c5\u51b5\u3001IP \u5730\u5740\u3001\u521b\u5efa\u65f6\u95f4\u3002

                                  3. \u70b9\u51fb\u8282\u70b9\u540d\u79f0\uff0c\u53ef\u4ee5\u8fdb\u5165\u8282\u70b9\u8be6\u60c5\u9875\u9762\u67e5\u770b\u66f4\u591a\u4fe1\u606f\uff0c\u5305\u62ec\u6982\u89c8\u4fe1\u606f\u3001\u5bb9\u5668\u7ec4\u4fe1\u606f\u3001\u6807\u7b7e\u6ce8\u89e3\u4fe1\u606f\u3001\u4e8b\u4ef6\u5217\u8868\u3001\u72b6\u6001\u7b49\u3002

                                    \u6b64\u5916\uff0c\u8fd8\u53ef\u4ee5\u67e5\u770b\u8282\u70b9\u7684 YAML \u6587\u4ef6\u3001\u76d1\u63a7\u4fe1\u606f\u3001\u6807\u7b7e\u548c\u6ce8\u89e3\u7b49\u3002

                                  "},{"location":"end-user/kpanda/nodes/schedule.html","title":"\u8282\u70b9\u8c03\u5ea6","text":"

                                  \u652f\u6301\u5c06\u8282\u70b9\u6682\u505c\u8c03\u5ea6\u6216\u6062\u590d\u8c03\u5ea6\u3002\u6682\u505c\u8c03\u5ea6\u6307\uff0c\u505c\u6b62\u5c06 Pod \u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002\u6062\u590d\u8c03\u5ea6\u6307\uff0c\u53ef\u4ee5\u5c06 Pod \u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002

                                  1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u5728\u8282\u70b9\u53f3\u4fa7\u70b9\u51fb \u2507 \u64cd\u4f5c\u56fe\u6807\uff0c\u70b9\u51fb \u6682\u505c\u8c03\u5ea6 \u6309\u94ae\u5373\u53ef\u6682\u505c\u8c03\u5ea6\u8be5\u8282\u70b9\u3002

                                  3. \u5728\u8282\u70b9\u53f3\u4fa7\u70b9\u51fb \u2507 \u64cd\u4f5c\u56fe\u6807\uff0c\u70b9\u51fb \u6062\u590d\u8c03\u5ea6 \u6309\u94ae\u5373\u53ef\u6062\u590d\u8c03\u5ea6\u8be5\u8282\u70b9\u3002

                                  \u8282\u70b9\u8c03\u5ea6\u72b6\u6001\u53ef\u80fd\u56e0\u7f51\u7edc\u60c5\u51b5\u6709\u6240\u5ef6\u8fdf\uff0c\u70b9\u51fb\u641c\u7d22\u6846\u53f3\u4fa7\u7684\u5237\u65b0\u56fe\u6807\u53ef\u4ee5\u5237\u65b0\u8282\u70b9\u8c03\u5ea6\u72b6\u6001\u3002

                                  "},{"location":"end-user/kpanda/nodes/taints.html","title":"\u8282\u70b9\u6c61\u70b9\u7ba1\u7406","text":"

                                  \u6c61\u70b9 (Taint) \u80fd\u591f\u4f7f\u8282\u70b9\u6392\u65a5\u67d0\u4e00\u7c7b Pod\uff0c\u907f\u514d Pod \u88ab\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u4e0a\u3002 \u6bcf\u4e2a\u8282\u70b9\u4e0a\u53ef\u4ee5\u5e94\u7528\u4e00\u4e2a\u6216\u591a\u4e2a\u6c61\u70b9\uff0c\u4e0d\u80fd\u5bb9\u5fcd\u8fd9\u4e9b\u6c61\u70b9\u7684 Pod \u5219\u4e0d\u4f1a\u88ab\u8c03\u5ea6\u8be5\u8282\u70b9\u4e0a\u3002

                                  "},{"location":"end-user/kpanda/nodes/taints.html#_2","title":"\u6ce8\u610f\u4e8b\u9879","text":"
                                  1. \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u5907 NS Editor \u89d2\u8272\u6388\u6743\u6216\u5176\u4ed6\u66f4\u9ad8\u6743\u9650\u3002
                                  2. \u4e3a\u8282\u70b9\u6dfb\u52a0\u6c61\u70b9\u4e4b\u540e\uff0c\u53ea\u6709\u80fd\u5bb9\u5fcd\u8be5\u6c61\u70b9\u7684 Pod \u624d\u80fd\u88ab\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002
                                  "},{"location":"end-user/kpanda/nodes/taints.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                  1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u627e\u5230\u76ee\u6807\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u6982\u89c8 \u9875\u9762\u3002

                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u627e\u5230\u9700\u8981\u4fee\u6539\u6c61\u70b9\u7684\u8282\u70b9\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u56fe\u6807\u5e76\u70b9\u51fb \u4fee\u6539\u6c61\u70b9 \u6309\u94ae\u3002

                                  3. \u5728\u5f39\u6846\u5185\u8f93\u5165\u6c61\u70b9\u7684\u952e\u503c\u4fe1\u606f\uff0c\u9009\u62e9\u6c61\u70b9\u6548\u679c\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                    \u70b9\u51fb \u2795 \u6dfb\u52a0 \u53ef\u4ee5\u4e3a\u8282\u70b9\u6dfb\u52a0\u591a\u4e2a\u6c61\u70b9\uff0c\u70b9\u51fb\u6c61\u70b9\u6548\u679c\u53f3\u4fa7\u7684 X \u53ef\u4ee5\u5220\u9664\u6c61\u70b9\u3002

                                    \u76ee\u524d\u652f\u6301\u4e09\u79cd\u6c61\u70b9\u6548\u679c\uff1a

                                    • NoSchedule\uff1a\u65b0\u7684 Pod \u4e0d\u4f1a\u88ab\u8c03\u5ea6\u5230\u5e26\u6709\u6b64\u6c61\u70b9\u7684\u8282\u70b9\u4e0a\uff0c\u9664\u975e\u65b0\u7684 Pod \u5177\u6709\u76f8\u5339\u914d\u7684\u5bb9\u5fcd\u5ea6\u3002\u5f53\u524d\u6b63\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u4e0d\u4f1a \u88ab\u9a71\u9010\u3002
                                    • NoExecute\uff1a\u8fd9\u4f1a\u5f71\u54cd\u5df2\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod\uff1a
                                      • \u5982\u679c Pod \u4e0d\u80fd\u5bb9\u5fcd\u6b64\u6c61\u70b9\uff0c\u4f1a\u9a6c\u4e0a\u88ab\u9a71\u9010\u3002
                                      • \u5982\u679c Pod \u80fd\u591f\u5bb9\u5fcd\u6b64\u6c61\u70b9\uff0c\u4f46\u662f\u5728\u5bb9\u5fcd\u5ea6\u5b9a\u4e49\u4e2d\u6ca1\u6709\u6307\u5b9a tolerationSeconds\uff0c\u5219 Pod \u8fd8\u4f1a\u4e00\u76f4\u5728\u8fd9\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u3002
                                      • \u5982\u679c Pod \u80fd\u591f\u5bb9\u5fcd\u6b64\u6c61\u70b9\u800c\u4e14\u6307\u5b9a\u4e86 tolerationSeconds\uff0c\u5219 Pod \u8fd8\u80fd\u5728\u8fd9\u4e2a\u8282\u70b9\u4e0a\u7ee7\u7eed\u8fd0\u884c\u6307\u5b9a\u7684\u65f6\u957f\u3002\u8fd9\u6bb5\u65f6\u95f4\u8fc7\u53bb\u540e\uff0c\u518d\u4ece\u8282\u70b9\u4e0a\u9a71\u9664\u8fd9\u4e9b Pod\u3002
                                    • PreferNoSchedule\uff1a\u8fd9\u662f\u201c\u8f6f\u6027\u201d\u7684 NoSchedule\u3002\u63a7\u5236\u5e73\u9762\u5c06**\u5c1d\u8bd5**\u907f\u514d\u5c06\u4e0d\u5bb9\u5fcd\u6b64\u6c61\u70b9\u7684 Pod \u8c03\u5ea6\u5230\u8282\u70b9\u4e0a\uff0c\u4f46\u4e0d\u80fd\u4fdd\u8bc1\u5b8c\u5168\u907f\u514d\u3002\u6240\u4ee5\u8981\u5c3d\u91cf\u907f\u514d\u4f7f\u7528\u6b64\u6c61\u70b9\u3002

                                  \u6709\u5173\u6c61\u70b9\u7684\u66f4\u591a\u8be6\u60c5\uff0c\u8bf7\u53c2\u9605 Kubernetes \u5b98\u65b9\u6587\u6863\uff1a\u6c61\u70b9\u548c\u5bb9\u5fcd\u5ea6\u3002

                                  "},{"location":"end-user/kpanda/olm/import-miniooperator.html","title":"\u5bfc\u5165\u79bb\u7ebf MinIo Operator","text":"

                                  \u672c\u6587\u5c06\u4ecb\u7ecd\u5728\u79bb\u7ebf\u73af\u5883\u4e0b\u5982\u4f55\u5bfc\u5165 MinIo Operator\u3002

                                  "},{"location":"end-user/kpanda/olm/import-miniooperator.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  • \u5f53\u524d\u96c6\u7fa4\u5df2\u63a5\u5165\u5bb9\u5668\u7ba1\u7406\u4e14\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5df2\u7ecf\u5b89\u88c5 kolm \u7ec4\u4ef6\uff08helm \u6a21\u677f\u641c\u7d22 kolm\uff09
                                  • \u5f53\u524d\u96c6\u7fa4\u5df2\u7ecf\u5b89\u88c5 olm \u7ec4\u4ef6\u4e14\u7248\u672c >= 0.2.4 (helm \u6a21\u677f\u641c\u7d22 olm)
                                  • \u652f\u6301\u6267\u884c Docker \u547d\u4ee4
                                  • \u51c6\u5907\u4e00\u4e2a\u955c\u50cf\u4ed3\u5e93
                                  "},{"location":"end-user/kpanda/olm/import-miniooperator.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                  1. \u5728\u6267\u884c\u73af\u5883\u4e2d\u8bbe\u7f6e\u73af\u5883\u53d8\u91cf\u5e76\u5728\u540e\u7eed\u6b65\u9aa4\u4f7f\u7528\uff0c\u6267\u884c\u547d\u4ee4\uff1a

                                    export OPM_IMG=10.5.14.200/quay.m.daocloud.io/operator-framework/opm:v1.29.0 \nexport BUNDLE_IMG=10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3 \n

                                    \u5982\u4f55\u83b7\u53d6\u4e0a\u8ff0\u955c\u50cf\u5730\u5740\uff1a

                                    \u524d\u5f80 \u5bb9\u5668\u7ba1\u7406 -> \u9009\u62e9\u5f53\u524d\u96c6\u7fa4 -> helm \u5e94\u7528 -> \u67e5\u770b olm \u7ec4\u4ef6 -> \u63d2\u4ef6\u8bbe\u7f6e \uff0c\u627e\u5230\u540e\u7eed\u6b65\u9aa4\u6240\u9700 opm\uff0cminio\uff0cminio bundle\uff0cminio operator \u7684\u955c\u50cf\u3002

                                    \u4ee5\u4e0a\u8bc9\u622a\u56fe\u4e3a\u4f8b\uff0c\u5219\u56db\u4e2a\u955c\u50cf\u5730\u5740\u5982\u4e0b\n\n# opm \u955c\u50cf \n10.5.14.200/quay.m.daocloud.io/operator-framework/opm:v1.29.0\n\n# minio \u955c\u50cf\n10.5.14.200/quay.m.daocloud.io/minio/minio:RELEASE.2023-03-24T21-41-23Z\n\n# minio bundle \u955c\u50cf\n10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3\n\n# minio operator \u955c\u50cf \n10.5.14.200/quay.m.daocloud.io/minio/operator:v5.0.3\n
                                  2. \u6267\u884c opm \u547d\u4ee4\u83b7\u53d6\u79bb\u7ebf bundle \u955c\u50cf\u5305\u542b\u7684 operator\u3002

                                    # \u521b\u5efa operator \u5b58\u653e\u76ee\u5f55\n$ mkdir minio-operator && cd minio-operator \n\n# \u83b7\u53d6 operator yaml \n$ docker run --user root  -v $PWD/minio-operator:/minio-operator ${OPM_IMG} alpha bundle unpack --skip-tls-verify -v -d ${BUNDLE_IMG} -o ./minio-operator\n\n# \u9884\u671f\u7ed3\u679c\n.\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n3 directories, 9 files\n
                                  3. \u66ff\u6362\u00a0 minio-operator/manifests/minio-operator.clusterserviceversion.yaml\u00a0 \u6587\u4ef6\u4e2d\u7684\u6240\u6709\u955c\u50cf\u5730\u5740\u4e3a\u79bb\u7ebf\u955c\u50cf\u4ed3\u5e93\u5730\u5740\u955c\u50cf\u3002

                                    \u66ff\u6362\u524d\uff1a

                                    \u66ff\u6362\u540e\uff1a

                                  4. \u751f\u6210\u6784\u5efa bundle \u955c\u50cf\u7684 Dockerfile

                                    $ docker run --user root  -v $PWD:/minio-operator -w /minio-operator ${OPM_IMG} alpha bundle generate --channels stable,beta -d /minio-operator/minio-operator/manifests -e stable -p minio-operator \u00a0\n\n# \u9884\u671f\u7ed3\u679c\n.\n\u251c\u2500\u2500 bundle.Dockerfile\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n3 directories, 10 files\n
                                  5. \u6267\u884c\u6784\u5efa\u547d\u4ee4\uff0c\u6784\u5efa bundle \u955c\u50cf\u4e14\u63a8\u9001\u5230\u79bb\u7ebf registry\u3002

                                    # \u8bbe\u7f6e\u65b0\u7684 bundle image \nexport OFFLINE_BUNDLE_IMG=10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3-offline \n\n$ docker build . -f bundle.Dockerfile -t ${OFFLINE_BUNDLE_IMG} \u00a0\n\n$ docker push ${OFFLINE_BUNDLE_IMG}\n
                                  6. \u751f\u6210\u6784\u5efa catalog \u955c\u50cf\u7684 Dockerfile\u3002

                                    $ docker run --user root  -v $PWD:/minio-operator  -w /minio-operator ${OPM_IMG} index add  --bundles ${OFFLINE_BUNDLE_IMG} --generate --binary-image ${OPM_IMG} --skip-tls-verify\n\n# \u9884\u671f\u7ed3\u679c\n.\n\u251c\u2500\u2500 bundle.Dockerfile\n\u251c\u2500\u2500 database\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 index.db\n\u251c\u2500\u2500 index.Dockerfile\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n4 directories, 12 files\n
                                  7. \u6784\u5efa catalog \u955c\u50cf

                                    # \u8bbe\u7f6e\u65b0\u7684 catalog image  \nexport OFFLINE_CATALOG_IMG=10.5.14.200/release.daocloud.io/operator-framework/system-operator-index:v0.1.0-offline\n\n$ docker build . -f index.Dockerfile -t ${OFFLINE_CATALOG_IMG}  \n\n$ docker push ${OFFLINE_CATALOG_IMG}\n
                                  8. \u524d\u5f80\u5bb9\u5668\u7ba1\u7406\uff0c\u66f4\u65b0 helm \u5e94\u7528 olm \u7684\u5185\u7f6e catsrc \u955c\u50cf\uff08\u586b\u5199\u6784\u5efa catalog \u955c\u50cf\u6307\u5b9a\u7684 ${catalog-image} \u5373\u53ef\uff09

                                  9. \u66f4\u65b0\u6210\u529f\u540e\uff0cOperator Hub \u4e2d\u4f1a\u51fa\u73b0 minio-operator \u7ec4\u4ef6

                                  "},{"location":"end-user/kpanda/permissions/cluster-ns-auth.html","title":"\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u6388\u6743","text":"

                                  \u5bb9\u5668\u7ba1\u7406\u57fa\u4e8e\u5168\u5c40\u6743\u9650\u7ba1\u7406\u53ca\u5168\u5c40\u7528\u6237/\u7528\u6237\u7ec4\u7ba1\u7406\u5b9e\u73b0\u6388\u6743\uff0c\u5982\u9700\u4e3a\u7528\u6237\u6388\u4e88\u5bb9\u5668\u7ba1\u7406\u7684\u6700\u9ad8\u6743\u9650\uff08\u53ef\u4ee5\u521b\u5efa\u3001\u7ba1\u7406\u3001\u5220\u9664\u6240\u6709\u96c6\u7fa4\uff09\u3002

                                  "},{"location":"end-user/kpanda/permissions/cluster-ns-auth.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                  \u7ed9\u7528\u6237/\u7528\u6237\u7ec4\u6388\u6743\u4e4b\u524d\uff0c\u8bf7\u5b8c\u6210\u5982\u4e0b\u51c6\u5907\uff1a

                                  • \u5df2\u5728\u5168\u5c40\u7ba1\u7406\u4e2d\u521b\u5efa\u4e86\u5f85\u6388\u6743\u7684\u7528\u6237/\u7528\u6237\u7ec4\uff0c\u8bf7\u53c2\u8003\u7528\u6237\u3002

                                  • \u4ec5 Kpanda Owner\u53ca\u5f53\u524d\u96c6\u7fa4\u7684 Cluster Admin \u5177\u5907\u96c6\u7fa4\u6388\u6743\u80fd\u529b\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u6743\u9650\u8bf4\u660e\u3002

                                  • \u4ec5 Kpanda Owner\u3001\u5f53\u524d\u96c6\u7fa4\u7684 Cluster Admin\uff0c\u5f53\u524d\u547d\u540d\u7a7a\u95f4\u7684 NS Admin \u5177\u5907\u547d\u540d\u7a7a\u95f4\u6388\u6743\u80fd\u529b\u3002

                                  "},{"location":"end-user/kpanda/permissions/cluster-ns-auth.html#_3","title":"\u96c6\u7fa4\u6388\u6743","text":"
                                  1. \u7528\u6237\u767b\u5f55\u5e73\u53f0\u540e\uff0c\u70b9\u51fb\u5de6\u4fa7\u83dc\u5355\u680f \u5bb9\u5668\u7ba1\u7406 \u4e0b\u7684 \u6743\u9650\u7ba1\u7406 \uff0c\u9ed8\u8ba4\u4f4d\u4e8e \u96c6\u7fa4\u6743\u9650 \u9875\u7b7e\u3002

                                  2. \u70b9\u51fb \u6dfb\u52a0\u6388\u6743 \u6309\u94ae\u3002

                                  3. \u5728 \u6dfb\u52a0\u96c6\u7fa4\u6743\u9650 \u9875\u9762\u4e2d\uff0c\u9009\u62e9\u76ee\u6807\u96c6\u7fa4\u3001\u5f85\u6388\u6743\u7684\u7528\u6237/\u7528\u6237\u7ec4\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                    \u76ee\u524d\u4ec5\u652f\u6301\u7684\u96c6\u7fa4\u89d2\u8272\u4e3a Cluster Admin \uff0c\u8be6\u60c5\u6743\u9650\u53ef\u53c2\u8003\u6743\u9650\u8bf4\u660e\u3002\u5982\u9700\u8981\u7ed9\u591a\u4e2a\u7528\u6237/\u7528\u6237\u7ec4\u540c\u65f6\u8fdb\u884c\u6388\u6743\uff0c \u53ef\u70b9\u51fb \u6dfb\u52a0\u7528\u6237\u6743\u9650 \u8fdb\u884c\u591a\u6b21\u6dfb\u52a0\u3002

                                  4. \u8fd4\u56de\u96c6\u7fa4\u6743\u9650\u7ba1\u7406\u9875\u9762\uff0c\u5c4f\u5e55\u51fa\u73b0\u6d88\u606f\uff1a \u6dfb\u52a0\u96c6\u7fa4\u6743\u9650\u6210\u529f \u3002

                                  "},{"location":"end-user/kpanda/permissions/cluster-ns-auth.html#_4","title":"\u547d\u540d\u7a7a\u95f4\u6388\u6743","text":"
                                  1. \u7528\u6237\u767b\u5f55\u5e73\u53f0\u540e\uff0c\u70b9\u51fb\u5de6\u4fa7\u83dc\u5355\u680f \u5bb9\u5668\u7ba1\u7406 \u4e0b\u7684 \u6743\u9650\u7ba1\u7406 \uff0c\u70b9\u51fb \u547d\u540d\u7a7a\u95f4\u6743\u9650 \u9875\u7b7e\u3002

                                  2. \u70b9\u51fb \u6dfb\u52a0\u6388\u6743 \u6309\u94ae\u3002\u5728 \u6dfb\u52a0\u547d\u540d\u7a7a\u95f4\u6743\u9650 \u9875\u9762\u4e2d\uff0c\u9009\u62e9\u76ee\u6807\u96c6\u7fa4\u3001\u76ee\u6807\u547d\u540d\u7a7a\u95f4\uff0c\u4ee5\u53ca\u5f85\u6388\u6743\u7684\u7528\u6237/\u7528\u6237\u7ec4\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                    \u76ee\u524d\u652f\u6301\u7684\u547d\u540d\u7a7a\u95f4\u89d2\u8272\u4e3a NS Admin\u3001NS Editor\u3001NS Viewer\uff0c\u8be6\u60c5\u6743\u9650\u53ef\u53c2\u8003\u6743\u9650\u8bf4\u660e\u3002\u5982\u9700\u7ed9\u591a\u4e2a\u7528\u6237/\u7528\u6237\u7ec4\u540c\u65f6\u8fdb\u884c\u6388\u6743\uff0c\u53ef\u70b9\u51fb \u6dfb\u52a0\u7528\u6237\u6743\u9650 \u8fdb\u884c\u591a\u6b21\u6dfb\u52a0\u3002\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u6743\u9650\u6388\u6743\u3002

                                  3. \u8fd4\u56de\u547d\u540d\u7a7a\u95f4\u6743\u9650\u7ba1\u7406\u9875\u9762\uff0c\u5c4f\u5e55\u51fa\u73b0\u6d88\u606f\uff1a \u6dfb\u52a0\u96c6\u7fa4\u6743\u9650\u6210\u529f \u3002

                                    Tip

                                    \u540e\u7eed\u5982\u9700\u5220\u9664\u6216\u7f16\u8f91\u6743\u9650\uff0c\u53ef\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u9009\u62e9 \u7f16\u8f91 \u6216 \u5220\u9664 \u3002

                                  "},{"location":"end-user/kpanda/permissions/custom-kpanda-role.html","title":"\u589e\u52a0 Kpanda \u5185\u7f6e\u89d2\u8272\u6743\u9650\u70b9","text":"

                                  \u8fc7\u53bb Kpanda \u5185\u7f6e\u89d2\u8272\u7684\u6743\u9650\u70b9\uff08rbac rules\uff09\u90fd\u662f\u63d0\u524d\u9884\u5b9a\u4e49\u597d\u7684\u4e14\u7528\u6237\u65e0\u6cd5\u4fee\u6539\uff0c\u56e0\u4e3a\u4ee5\u524d\u4fee\u6539\u5185\u7f6e\u89d2\u8272\u7684\u6743\u9650\u70b9\u4e4b\u540e\u4e5f\u4f1a\u88ab Kpanda \u63a7\u5236\u5668\u8fd8\u539f\u6210\u9884\u5b9a\u4e49\u7684\u6743\u9650\u70b9\u3002 \u4e3a\u4e86\u652f\u6301\u66f4\u52a0\u7075\u6d3b\u7684\u6743\u9650\u914d\u7f6e\uff0c\u6ee1\u8db3\u5bf9\u7cfb\u7edf\u89d2\u8272\u7684\u81ea\u5b9a\u4e49\u9700\u6c42\uff0c\u76ee\u524d Kpanda \u652f\u6301\u4e3a\u5185\u7f6e\u7cfb\u7edf\u89d2\u8272\uff08cluster admin\u3001ns admin\u3001ns editor\u3001ns viewer\uff09\u4fee\u6539\u6743\u9650\u70b9\u3002 \u4ee5\u4e0b\u793a\u4f8b\u6f14\u793a\u5982\u4f55\u65b0\u589e ns-viewer \u6743\u9650\u70b9\uff0c\u5c1d\u8bd5\u589e\u52a0\u53ef\u4ee5\u5220\u9664 Deployment \u7684\u6743\u9650\u3002\u5176\u4ed6\u6743\u9650\u70b9\u64cd\u4f5c\u7c7b\u4f3c\u3002

                                  "},{"location":"end-user/kpanda/permissions/custom-kpanda-role.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  • \u9002\u7528\u4e8e\u5bb9\u5668\u7ba1\u7406 v0.27.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u3002
                                  • \u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                                  • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Viewer \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                  Note

                                  • \u53ea\u9700\u5728 Global Cluster \u589e\u52a0\u6743\u9650\u70b9\uff0cKpanda \u63a7\u5236\u5668\u4f1a\u628a Global Cluster \u589e\u52a0\u7684\u6743\u9650\u70b9\u540c\u6b65\u5230\u6240\u6709\u63a5\u5165\u5b50\u96c6\u7fa4\u4e2d\uff0c\u540c\u6b65\u9700\u4e00\u6bb5\u65f6\u95f4\u624d\u80fd\u5b8c\u6210
                                  • \u53ea\u80fd\u5728 Global Cluster \u589e\u52a0\u6743\u9650\u70b9\uff0c\u5728\u5b50\u96c6\u7fa4\u65b0\u589e\u7684\u6743\u9650\u70b9\u4f1a\u88ab Global Cluster \u5185\u7f6e\u89d2\u8272\u6743\u9650\u70b9\u8986\u76d6
                                  • \u53ea\u652f\u6301\u4f7f\u7528\u56fa\u5b9a Label \u7684 ClusterRole \u8ffd\u52a0\u6743\u9650\uff0c\u4e0d\u652f\u6301\u66ff\u6362\u6216\u8005\u5220\u9664\u6743\u9650\uff0c\u4e5f\u4e0d\u80fd\u4f7f\u7528 role \u8ffd\u52a0\u6743\u9650\uff0c\u5185\u7f6e\u89d2\u8272\u8ddf\u7528\u6237\u521b\u5efa\u7684 ClusterRole Label \u5bf9\u5e94\u5173\u7cfb\u5982\u4e0b

                                    cluster-admin: rbac.kpanda.io/role-template-cluster-admin: \"true\"\ncluster-edit: rbac.kpanda.io/role-template-cluster-edit: \"true\"\ncluster-view: rbac.kpanda.io/role-template-cluster-view: \"true\"\nns-admin: rbac.kpanda.io/role-template-ns-admin: \"true\"\nns-edit: rbac.kpanda.io/role-template-ns-edit: \"true\"\nns-view: rbac.kpanda.io/role-template-ns-view: \"true\"\n
                                  "},{"location":"end-user/kpanda/permissions/custom-kpanda-role.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                  1. \u4f7f\u7528 admin \u6216\u8005 cluster admin \u6743\u9650\u7684\u7528\u6237\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d

                                  2. \u6388\u6743 ns-viewer\uff0c\u7528\u6237\u6709\u8be5 namespace ns-view \u6743\u9650

                                  3. \u5207\u6362\u767b\u5f55\u7528\u6237\u4e3a ns-viewer\uff0c\u6253\u5f00\u63a7\u5236\u53f0\u83b7\u53d6 ns-viewer \u7528\u6237\u5bf9\u5e94\u7684 token\uff0c\u4f7f\u7528 curl \u8bf7\u6c42\u5220\u9664\u4e0a\u8ff0\u7684 deployment nginx\uff0c\u53d1\u73b0\u65e0\u5220\u9664\u6743\u9650

                                    [root@master-01 ~]# curl -k -X DELETE  'https://${URL}/apis/kpanda.io/v1alpha1/clusters/cluster-member/namespaces/default/deployments/nginx' -H 'authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJOU044MG9BclBRMzUwZ2VVU2ZyNy1xMEREVWY4MmEtZmJqR05uRE1sd1lFIn0.eyJleHAiOjE3MTU3NjY1NzksImlhdCI6MTcxNTY4MDE3OSwiYXV0aF90aW1lIjoxNzE1NjgwMTc3LCJqdGkiOiIxZjI3MzJlNC1jYjFhLTQ4OTktYjBiZC1iN2IxZWY1MzAxNDEiLCJpc3MiOiJodHRwczovLzEwLjYuMjAxLjIwMTozMDE0Ny9hdXRoL3JlYWxtcy9naGlwcG8iLCJhdWQiOiJfX2ludGVybmFsLWdoaXBwbyIsInN1YiI6ImMxZmMxM2ViLTAwZGUtNDFiYS05ZTllLWE5OGU2OGM0MmVmMCIsInR5cCI6IklEIiwiYXpwIjoiX19pbnRlcm5hbC1naGlwcG8iLCJzZXNzaW9uX3N0YXRlIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiYXRfaGFzaCI6IlJhTHoyQjlKQ2FNc1RrbGVMR3V6blEiLCJhY3IiOiIwIiwic2lkIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJncm91cHMiOltdLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJucy12aWV3ZXIiLCJsb2NhbGUiOiIifQ.As2ipMjfvzvgONAGlc9RnqOd3zMwAj82VXlcqcR74ZK9tAq3Q4ruQ1a6WuIfqiq8Kq4F77ljwwzYUuunfBli2zhU2II8zyxVhLoCEBu4pBVBd_oJyUycXuNa6HfQGnl36E1M7-_QG8b-_T51wFxxVb5b7SEDE1AvIf54NAlAr-rhDmGRdOK1c9CohQcS00ab52MD3IPiFFZ8_Iljnii-RpXKZoTjdcULJVn_uZNk_SzSUK-7MVWmPBK15m6sNktOMSf0pCObKWRqHd15JSe-2aA2PKBo1jBH3tHbOgZyMPdsLI0QdmEnKB5FiiOeMpwn_oHnT6IjT-BZlB18VkW8rA'\n{\"code\":7,\"message\":\"[RBAC] delete resources(deployments: nginx) is forbidden for user(ns-viewer) in cluster(cluster-member)\",\"details\":[]}[root@master-01 ~]#\n[root@master-01 ~]#\n
                                  4. \u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u521b\u5efa\u5982\u4e0b ClusterRole\uff1a

                                    apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: append-ns-view # (1)!\n  labels:\n    rbac.kpanda.io/role-template-ns-view: \"true\" # (2)!\nrules:\n  - apiGroups: [ \"apps\" ]\n    resources: [ \"deployments\" ]\n    verbs: [ \"delete\" ]\n
                                    1. \u6b64\u5b57\u6bb5\u503c\u53ef\u4efb\u610f\u6307\u5b9a\uff0c\u53ea\u9700\u4e0d\u91cd\u590d\u4e14\u7b26\u5408 Kubernetes \u8d44\u6e90\u540d\u79f0\u89c4\u5219\u8981\u6c42
                                    2. \u6ce8\u610f\u7ed9\u4e0d\u540c\u7684\u89d2\u8272\u6dfb\u52a0\u6743\u9650\u65f6\u5e94\u6253\u4e0a\u4e0d\u540c\u7684 label
                                  5. \u7b49\u5f85 Kpanda \u63a7\u5236\u5668\u6dfb\u52a0\u7528\u6237\u521b\u5efa\u6743\u9650\u5230\u5185\u7f6e\u89d2\u8272 ns-viewer \u4e2d\uff0c\u53ef\u67e5\u770b\u5bf9\u5e94\u5185\u7f6e\u89d2\u8272\u5982\u662f\u5426\u6709\u4e0a\u4e00\u6b65\u65b0\u589e\u7684\u6743\u9650\u70b9

                                    [root@master-01 ~]# kubectl get clusterrole role-template-ns-view -oyaml|grep deployments -C 10|tail -n 6\n
                                    - apiGroups:\n  - apps\n  resources:\n  - deployments\n  verbs:\n  - delete\n

                                  6. \u518d\u6b21\u4f7f\u7528 curl \u8bf7\u6c42\u5220\u9664\u4e0a\u8ff0\u7684 deployment nginx\uff0c\u8fd9\u6b21\u6210\u529f\u5220\u9664\u4e86\u3002\u4e5f\u5c31\u662f\u8bf4\uff0cns-viewer \u6210\u529f\u65b0\u589e\u4e86\u5220\u9664 Deployment \u7684\u6743\u9650\u3002

                                    [root@master-01 ~]# curl -k -X DELETE  'https://${URL}/apis/kpanda.io/v1alpha1/clusters/cluster-member/namespaces/default/deployments/nginx' -H 'authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJOU044MG9BclBRMzUwZ2VVU2ZyNy1xMEREVWY4MmEtZmJqR05uRE1sd1lFIn0.eyJleHAiOjE3MTU3NjY1NzksImlhdCI6MTcxNTY4MDE3OSwiYXV0aF90aW1lIjoxNzE1NjgwMTc3LCJqdGkiOiIxZjI3MzJlNC1jYjFhLTQ4OTktYjBiZC1iN2IxZWY1MzAxNDEiLCJpc3MiOiJodHRwczovLzEwLjYuMjAxLjIwMTozMDE0Ny9hdXRoL3JlYWxtcy9naGlwcG8iLCJhdWQiOiJfX2ludGVybmFsLWdoaXBwbyIsInN1YiI6ImMxZmMxM2ViLTAwZGUtNDFiYS05ZTllLWE5OGU2OGM0MmVmMCIsInR5cCI6IklEIiwiYXpwIjoiX19pbnRlcm5hbC1naGlwcG8iLCJzZXNzaW9uX3N0YXRlIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiYXRfaGFzaCI6IlJhTHoyQjlKQ2FNc1RrbGVMR3V6blEiLCJhY3IiOiIwIiwic2lkIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJncm91cHMiOltdLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJucy12aWV3ZXIiLCJsb2NhbGUiOiIifQ.As2ipMjfvzvgONAGlc9RnqOd3zMwAj82VXlcqcR74ZK9tAq3Q4ruQ1a6WuIfqiq8Kq4F77ljwwzYUuunfBli2zhU2II8zyxVhLoCEBu4pBVBd_oJyUycXuNa6HfQGnl36E1M7-_QG8b-_T51wFxxVb5b7SEDE1AvIf54NAlAr-rhDmGRdOK1c9CohQcS00ab52MD3IPiFFZ8_Iljnii-RpXKZoTjdcULJVn_uZNk_SzSUK-7MVWmPBK15m6sNktOMSf0pCObKWRqHd15JSe-2aA2PKBo1jBH3tHbOgZyMPdsLI0QdmEnKB5FiiOeMpwn_oHnT6IjT-BZlB18VkW8rA'\n
                                  "},{"location":"end-user/kpanda/permissions/permission-brief.html","title":"\u5bb9\u5668\u7ba1\u7406\u6743\u9650\u8bf4\u660e","text":"

                                  \u5bb9\u5668\u7ba1\u7406\u6743\u9650\u57fa\u4e8e\u5168\u5c40\u6743\u9650\u7ba1\u7406\u4ee5\u53ca Kubernetes RBAC \u6743\u9650\u7ba1\u7406\u6253\u9020\u7684\u591a\u7ef4\u5ea6\u6743\u9650\u7ba1\u7406\u4f53\u7cfb\u3002 \u652f\u6301\u96c6\u7fa4\u7ea7\u3001\u547d\u540d\u7a7a\u95f4\u7ea7\u7684\u6743\u9650\u63a7\u5236\uff0c\u5e2e\u52a9\u7528\u6237\u4fbf\u6377\u7075\u6d3b\u5730\u5bf9\u79df\u6237\u4e0b\u7684 IAM \u7528\u6237\u3001\u7528\u6237\u7ec4\uff08\u7528\u6237\u7684\u96c6\u5408\uff09\u8bbe\u5b9a\u4e0d\u540c\u7684\u64cd\u4f5c\u6743\u9650\u3002

                                  "},{"location":"end-user/kpanda/permissions/permission-brief.html#_2","title":"\u96c6\u7fa4\u6743\u9650","text":"

                                  \u96c6\u7fa4\u6743\u9650\u57fa\u4e8e Kubernetes RBAC \u7684 ClusterRolebinding \u6388\u6743\uff0c\u96c6\u7fa4\u6743\u9650\u8bbe\u7f6e\u53ef\u8ba9\u7528\u6237/\u7528\u6237\u7ec4\u5177\u5907\u96c6\u7fa4\u76f8\u5173\u6743\u9650\u3002 \u76ee\u524d\u7684\u9ed8\u8ba4\u96c6\u7fa4\u89d2\u8272\u4e3a Cluster Admin \uff08\u4e0d\u5177\u5907\u96c6\u7fa4\u7684\u521b\u5efa\u3001\u5220\u9664\u6743\u9650\uff09\u3002

                                  "},{"location":"end-user/kpanda/permissions/permission-brief.html#cluster-admin","title":"Cluster Admin","text":"

                                  Cluster Admin \u5177\u6709\u4ee5\u4e0b\u6743\u9650\uff1a

                                  • \u53ef\u7ba1\u7406\u3001\u7f16\u8f91\u3001\u67e5\u770b\u5bf9\u5e94\u96c6\u7fa4

                                  • \u7ba1\u7406\u3001\u7f16\u8f91\u3001\u67e5\u770b \u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u5de5\u4f5c\u8d1f\u8f7d\u53ca\u96c6\u7fa4\u5185\u6240\u6709\u8d44\u6e90

                                  • \u53ef\u6388\u6743\u7528\u6237\u4e3a\u96c6\u7fa4\u5185\u89d2\u8272 (Cluster Admin\u3001NS Admin\u3001NS Editor\u3001NS Viewer)

                                  \u8be5\u96c6\u7fa4\u89d2\u8272\u7684 YAML \u793a\u4f8b\u5982\u4e0b\uff1a

                                  apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:49Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-cluster-admin\n  resourceVersion: \"15168\"\n  uid: f8f86d42-d5ef-47aa-b284-097615795076\nrules:\n- apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n- nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'\n
                                  "},{"location":"end-user/kpanda/permissions/permission-brief.html#_3","title":"\u547d\u540d\u7a7a\u95f4\u6743\u9650","text":"

                                  \u547d\u540d\u7a7a\u95f4\u6743\u9650\u662f\u57fa\u4e8e Kubernetes RBAC \u80fd\u529b\u7684\u6388\u6743\uff0c\u53ef\u4ee5\u5b9e\u73b0\u4e0d\u540c\u7684\u7528\u6237/\u7528\u6237\u7ec4\u5bf9\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u8d44\u6e90\u5177\u6709\u4e0d\u540c\u7684\u64cd\u4f5c\u6743\u9650(\u5305\u62ec Kubernetes API \u6743\u9650)\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\uff1aKubernetes RBAC\u3002\u76ee\u524d\u5bb9\u5668\u7ba1\u7406\u7684\u9ed8\u8ba4\u89d2\u8272\u4e3a\uff1aNS Admin\u3001NS Editor\u3001NS Viewer\u3002

                                  "},{"location":"end-user/kpanda/permissions/permission-brief.html#ns-admin","title":"NS Admin","text":"

                                  NS Admin \u5177\u6709\u4ee5\u4e0b\u6743\u9650\uff1a

                                  • \u53ef\u67e5\u770b\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4
                                  • \u7ba1\u7406\u3001\u7f16\u8f91\u3001\u67e5\u770b \u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u53ca\u81ea\u5b9a\u4e49\u8d44\u6e90
                                  • \u53ef\u6388\u6743\u7528\u6237\u4e3a\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u89d2\u8272 (NS Editor\u3001NS Viewer)

                                  \u8be5\u96c6\u7fa4\u89d2\u8272\u7684 YAML \u793a\u4f8b\u5982\u4e0b\uff1a

                                  apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:49Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-admin\n  resourceVersion: \"15173\"\n  uid: 69f64c7e-70e7-4c7c-a3e0-053f507f2bc3\nrules:\n- apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n- nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'    \n
                                  "},{"location":"end-user/kpanda/permissions/permission-brief.html#ns-editor","title":"NS Editor","text":"

                                  NS Editor \u5177\u6709\u4ee5\u4e0b\u6743\u9650\uff1a

                                  • \u53ef\u67e5\u770b\u5bf9\u5e94\u6709\u6743\u9650\u7684\u547d\u540d\u7a7a\u95f4
                                  • \u7ba1\u7406\u3001\u7f16\u8f91\u3001\u67e5\u770b \u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u5de5\u4f5c\u8d1f\u8f7d
                                  \u70b9\u51fb\u67e5\u770b\u96c6\u7fa4\u89d2\u8272\u7684 YAML \u793a\u4f8b
                                  apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:50Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-edit\n  resourceVersion: \"15175\"\n  uid: ca9e690e-96c0-4978-8915-6e4c00c748fe\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - configmaps\n  - endpoints\n  - persistentvolumeclaims\n  - persistentvolumeclaims/status\n  - pods\n  - replicationcontrollers\n  - replicationcontrollers/scale\n  - serviceaccounts\n  - services\n  - services/status\n  verbs:\n  - '*'\n- apiGroups:\n  - \"\"\n  resources:\n  - bindings\n  - events\n  - limitranges\n  - namespaces/status\n  - pods/log\n  - pods/status\n  - replicationcontrollers/status\n  - resourcequotas\n  - resourcequotas/status\n  verbs:\n  - '*'\n- apiGroups:\n  - \"\"\n  resources:\n  - namespaces\n  verbs:\n  - '*'\n- apiGroups:\n  - apps\n  resources:\n  - controllerrevisions\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - statefulsets\n  - statefulsets/scale\n  - statefulsets/status\n  verbs:\n  - '*'\n- apiGroups:\n  - autoscaling\n  resources:\n  - horizontalpodautoscalers\n  - horizontalpodautoscalers/status\n  verbs:\n  - '*'\n- apiGroups:\n  - batch\n  resources:\n  - cronjobs\n  - cronjobs/status\n  - jobs\n  - jobs/status\n  verbs:\n  - '*'\n- apiGroups:\n  - extensions\n  resources:\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - replicationcontrollers/scale\n  verbs:\n  - '*'\n- apiGroups:\n  - policy\n  resources:\n  - poddisruptionbudgets\n  - poddisruptionbudgets/status\n  verbs:\n  - '*'\n- apiGroups:\n  - networking.k8s.io\n  resources:\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  verbs:\n  - '*'      \n
                                  "},{"location":"end-user/kpanda/permissions/permission-brief.html#ns-viewer","title":"NS Viewer","text":"

                                  NS Viewer \u5177\u6709\u4ee5\u4e0b\u6743\u9650\uff1a

                                  • \u53ef\u67e5\u770b\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4
                                  • \u53ef\u67e5\u770b\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u53ca\u81ea\u5b9a\u4e49\u8d44\u6e90
                                  \u70b9\u51fb\u67e5\u770b\u96c6\u7fa4\u89d2\u8272\u7684 YAML \u793a\u4f8b
                                  apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:50Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-view\n  resourceVersion: \"15183\"\n  uid: 853888fd-6ee8-42ac-b91e-63923918baf8\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - configmaps\n  - endpoints\n  - persistentvolumeclaims\n  - persistentvolumeclaims/status\n  - pods\n  - replicationcontrollers\n  - replicationcontrollers/scale\n  - serviceaccounts\n  - services\n  - services/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - bindings\n  - events\n  - limitranges\n  - namespaces/status\n  - pods/log\n  - pods/status\n  - replicationcontrollers/status\n  - resourcequotas\n  - resourcequotas/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - namespaces\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - apps\n  resources:\n  - controllerrevisions\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - statefulsets\n  - statefulsets/scale\n  - statefulsets/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - autoscaling\n  resources:\n  - horizontalpodautoscalers\n  - horizontalpodautoscalers/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - batch\n  resources:\n  - cronjobs\n  - cronjobs/status\n  - jobs\n  - jobs/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - extensions\n  resources:\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - replicationcontrollers/scale\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - policy\n  resources:\n  - poddisruptionbudgets\n  - poddisruptionbudgets/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - networking.k8s.io\n  resources:\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  verbs:\n  - get\n  - list\n  - watch \n
                                  "},{"location":"end-user/kpanda/permissions/permission-brief.html#faq","title":"\u6743\u9650 FAQ","text":"
                                  1. \u5168\u5c40\u6743\u9650\u548c\u5bb9\u5668\u7ba1\u7406\u6743\u9650\u7ba1\u7406\u7684\u5173\u7cfb\uff1f

                                    \u7b54\uff1a\u5168\u5c40\u6743\u9650\u4ec5\u6388\u6743\u4e3a\u7c97\u7c92\u5ea6\u6743\u9650\uff0c\u53ef\u7ba1\u7406\u6240\u6709\u96c6\u7fa4\u7684\u521b\u5efa\u3001\u7f16\u8f91\u3001\u5220\u9664\uff1b\u800c\u5bf9\u4e8e\u7ec6\u7c92\u5ea6\u7684\u6743\u9650\uff0c\u5982\u5355\u4e2a\u96c6\u7fa4\u7684\u7ba1\u7406\u6743\u9650\uff0c\u5355\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u7ba1\u7406\u3001\u7f16\u8f91\u3001\u5220\u9664\u6743\u9650\uff0c\u9700\u8981\u57fa\u4e8e Kubernetes RBAC \u7684\u5bb9\u5668\u7ba1\u7406\u6743\u9650\u8fdb\u884c\u5b9e\u73b0\u3002 \u4e00\u822c\u6743\u9650\u7684\u7528\u6237\u4ec5\u9700\u8981\u5728\u5bb9\u5668\u7ba1\u7406\u4e2d\u8fdb\u884c\u6388\u6743\u5373\u53ef\u3002

                                  2. \u76ee\u524d\u4ec5\u652f\u6301\u56db\u4e2a\u9ed8\u8ba4\u89d2\u8272\uff0c\u540e\u53f0\u81ea\u5b9a\u4e49\u89d2\u8272\u7684 RoleBinding \u4ee5\u53ca ClusterRoleBinding \uff08Kubernetes \u7ec6\u7c92\u5ea6\u7684 RBAC\uff09\u662f\u5426\u4e5f\u80fd\u751f\u6548\uff1f

                                    \u7b54\uff1a\u76ee\u524d\u81ea\u5b9a\u4e49\u6743\u9650\u6682\u65f6\u65e0\u6cd5\u901a\u8fc7\u56fe\u5f62\u754c\u9762\u8fdb\u884c\u7ba1\u7406\uff0c\u4f46\u662f\u901a\u8fc7 kubectl \u521b\u5efa\u7684\u6743\u9650\u89c4\u5219\u540c\u6837\u80fd\u751f\u6548\u3002

                                  "},{"location":"end-user/kpanda/scale/create-hpa.html","title":"\u57fa\u4e8e\u5185\u7f6e\u6307\u6807\u521b\u5efa HPA","text":"

                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301 Pod \u8d44\u6e90\u57fa\u4e8e\u6307\u6807\u8fdb\u884c\u5f39\u6027\u4f38\u7f29\uff08Horizontal Pod Autoscaling, HPA\uff09\u3002 \u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e CPU \u5229\u7528\u7387\u3001\u5185\u5b58\u7528\u91cf\u53ca\u81ea\u5b9a\u4e49\u6307\u6807\u6307\u6807\u6765\u52a8\u6001\u8c03\u6574 Pod \u8d44\u6e90\u7684\u526f\u672c\u6570\u91cf\u3002 \u4f8b\u5982\uff0c\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u8bbe\u7f6e\u57fa\u4e8e CPU \u5229\u7528\u7387\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u540e\uff0c\u5f53 Pod \u7684 CPU \u5229\u7528\u7387\u8d85\u8fc7/\u4f4e\u4e8e\u60a8\u8bbe\u7f6e\u7684\u6307\u6807\u9600\u503c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u63a7\u5236\u5668\u5c06\u4f1a\u81ea\u52a8\u589e\u52a0/\u8f83\u5c11 Pod \u526f\u672c\u6570\u3002

                                  \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u57fa\u4e8e\u5185\u7f6e\u6307\u6807\u7684\u5f39\u6027\u4f38\u7f29\u3002

                                  Note

                                  1. HPA \u4ec5\u9002\u7528\u4e8e Deployment \u548c StatefulSet\uff0c\u6bcf\u4e2a\u5de5\u4f5c\u8d1f\u8f7d\u53ea\u80fd\u521b\u5efa\u4e00\u4e2a HPA\u3002
                                  2. \u5982\u679c\u57fa\u4e8e CPU \u5229\u7528\u7387\u521b\u5efa HPA \u7b56\u7565\uff0c\u5fc5\u987b\u9884\u5148\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u8bbe\u7f6e\u914d\u7f6e\u9650\u5236\uff08Limit\uff09\uff0c\u5426\u5219\u65e0\u6cd5\u8ba1\u7b97 CPU \u5229\u7528\u7387\u3002
                                  3. \u5982\u679c\u540c\u65f6\u4f7f\u7528\u5185\u7f6e\u6307\u6807\u548c\u591a\u79cd\u81ea\u5b9a\u4e49\u6307\uff0cHPA \u4f1a\u6839\u636e\u591a\u9879\u6307\u6807\u5206\u522b\u8ba1\u7b97\u6240\u9700\u4f38\u7f29\u526f\u672c\u6570\uff0c\u53d6\u8f83\u5927\u503c\uff08\u4f46\u4e0d\u4f1a\u8d85\u8fc7\u8bbe\u7f6e HPA \u7b56\u7565\u65f6\u914d\u7f6e\u7684\u6700\u5927\u526f\u672c\u6570\uff09\u8fdb\u884c\u5f39\u6027\u4f38\u7f29\u3002
                                  "},{"location":"end-user/kpanda/scale/create-hpa.html#_1","title":"\u5185\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565","text":"

                                  \u7cfb\u7edf\u5185\u7f6e\u4e86 CPU \u548c\u5185\u5b58\u4e24\u79cd\u5f39\u6027\u4f38\u7f29\u6307\u6807\u4ee5\u6ee1\u8db3\u7528\u6237\u7684\u57fa\u7840\u4e1a\u52a1\u4f7f\u7528\u573a\u666f\u3002

                                  "},{"location":"end-user/kpanda/scale/create-hpa.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                  \u5728\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u5185\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                  • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                  • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa\u6216\u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa\u3002

                                  • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                  • \u5df2\u5b8c\u6210 metrics-server \u63d2\u4ef6\u5b89\u88c5 \u3002

                                  "},{"location":"end-user/kpanda/scale/create-hpa.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                  \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u5185\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u3002

                                  1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \u8fdb\u5165\u96c6\u7fa4\u5217\u8868\u9875\u9762\u3002\u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                  2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d \u8fdb\u5165\u5de5\u4f5c\u8d1f\u8f7d\u5217\u8868\u540e\uff0c\u70b9\u51fb\u4e00\u4e2a\u8d1f\u8f7d\u540d\u79f0\uff0c\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5 \u9875\u9762\u3002

                                  3. \u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u7684\u5f39\u6027\u4f38\u7f29\u914d\u7f6e\u60c5\u51b5\u3002

                                  4. \u786e\u8ba4\u96c6\u7fa4\u5df2\u5b89\u88c5\u4e86 metrics-server \u63d2\u4ef6\uff0c\u4e14\u63d2\u4ef6\u8fd0\u884c\u72b6\u6001\u4e3a\u6b63\u5e38\u540e\uff0c\u5373\u53ef\u70b9\u51fb \u65b0\u5efa\u4f38\u7f29 \u6309\u94ae\u3002

                                  5. \u521b\u5efa\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u53c2\u6570\u3002

                                    • \u7b56\u7565\u540d\u79f0\uff1a\u8f93\u5165\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 hpa-my-dep\u3002
                                    • \u547d\u540d\u7a7a\u95f4\uff1a\u8d1f\u8f7d\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                    • \u5de5\u4f5c\u8d1f\u8f7d\uff1a\u6267\u884c\u5f39\u6027\u4f38\u7f29\u7684\u5de5\u4f5c\u8d1f\u8f7d\u5bf9\u8c61\u3002
                                    • \u76ee\u6807 CPU \u5229\u7528\u7387\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u4e0b Pod \u7684 CPU \u4f7f\u7528\u7387\u3002\u8ba1\u7b97\u65b9\u5f0f\u4e3a\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u7684 Pod \u8d44\u6e90 / \u5de5\u4f5c\u8d1f\u8f7d\u7684\u8bf7\u6c42\uff08request\uff09\u503c\u3002\u5f53\u5b9e\u9645 CPU \u7528\u91cf\u5927\u4e8e/\u5c0f\u4e8e\u76ee\u6807\u503c\u65f6\uff0c\u7cfb\u7edf\u81ea\u52a8\u51cf\u5c11/\u589e\u52a0 Pod \u526f\u672c\u6570\u91cf\u3002
                                    • \u76ee\u6807\u5185\u5b58\u7528\u91cf\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u4e0b\u7684 Pod \u7684\u5185\u5b58\u7528\u91cf\u3002\u5f53\u5b9e\u9645\u5185\u5b58\u7528\u91cf\u5927\u4e8e/\u5c0f\u4e8e\u76ee\u6807\u503c\u65f6\uff0c\u7cfb\u7edf\u81ea\u52a8\u51cf\u5c11/\u589e\u52a0 Pod \u526f\u672c\u6570\u91cf\u3002
                                    • \u526f\u672c\u8303\u56f4\uff1aPod \u526f\u672c\u6570\u7684\u5f39\u6027\u4f38\u7f29\u8303\u56f4\u3002\u9ed8\u8ba4\u533a\u95f4\u4e3a\u4e3a 1 - 10\u3002
                                  6. \u5b8c\u6210\u53c2\u6570\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u81ea\u52a8\u8fd4\u56de\u5f39\u6027\u4f38\u7f29\u8be6\u60c5\u9875\u9762\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u6267\u884c\u7f16\u8f91\u3001\u5220\u9664\u64cd\u4f5c\uff0c\u8fd8\u53ef\u4ee5\u67e5\u770b\u76f8\u5173\u4e8b\u4ef6\u3002

                                  "},{"location":"end-user/kpanda/scale/create-vpa.html","title":"\u521b\u5efa VPA","text":"

                                  \u5bb9\u5668\u5782\u76f4\u6269\u7f29\u5bb9\u7b56\u7565\uff08Vertical Pod Autoscaler, VPA\uff09\u901a\u8fc7\u76d1\u63a7 Pod \u5728\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u8d44\u6e90\u7533\u8bf7\u548c\u7528\u91cf\uff0c \u8ba1\u7b97\u51fa\u5bf9\u8be5 Pod \u800c\u8a00\u6700\u9002\u5408\u7684 CPU \u548c\u5185\u5b58\u8bf7\u6c42\u503c\u3002\u4f7f\u7528 VPA \u53ef\u4ee5\u66f4\u52a0\u5408\u7406\u5730\u4e3a\u96c6\u7fa4\u4e0b\u6bcf\u4e2a Pod \u5206\u914d\u8d44\u6e90\uff0c\u63d0\u9ad8\u96c6\u7fa4\u7684\u6574\u4f53\u8d44\u6e90\u5229\u7528\u7387\uff0c\u907f\u514d\u96c6\u7fa4\u8d44\u6e90\u6d6a\u8d39\u3002

                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u901a\u8fc7\u5bb9\u5668\u5782\u76f4\u6269\u7f29\u5bb9\u7b56\u7565\uff08Vertical Pod Autoscaler, VPA\uff09\uff0c\u57fa\u4e8e\u6b64\u529f\u80fd\u53ef\u4ee5\u6839\u636e\u5bb9\u5668\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\u52a8\u6001\u8c03\u6574 Pod \u8bf7\u6c42\u503c\u3002 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u901a\u8fc7\u624b\u52a8\u548c\u81ea\u52a8\u4e24\u79cd\u65b9\u5f0f\u6765\u4fee\u6539\u8d44\u6e90\u8bf7\u6c42\u503c\uff0c\u60a8\u53ef\u4ee5\u6839\u636e\u5b9e\u9645\u9700\u8981\u8fdb\u884c\u914d\u7f6e\u3002

                                  \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e Pod \u5782\u76f4\u4f38\u7f29\u3002

                                  Warning

                                  \u4f7f\u7528 VPA \u4fee\u6539 Pod \u8d44\u6e90\u8bf7\u6c42\u4f1a\u89e6\u53d1 Pod \u91cd\u542f\u3002\u7531\u4e8e Kubernetes \u672c\u8eab\u7684\u9650\u5236\uff0c Pod \u91cd\u542f\u540e\u53ef\u80fd\u4f1a\u88ab\u8c03\u5ea6\u5230\u5176\u5b83\u8282\u70b9\u4e0a\u3002

                                  "},{"location":"end-user/kpanda/scale/create-vpa.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                  \u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u5782\u76f4\u4f38\u7f29\u7b56\u7565\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                  • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                  • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u3001\u7528\u6237\u3001\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u6216\u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u3002

                                  • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                  • \u5f53\u524d\u96c6\u7fa4\u5df2\u7ecf\u5b89\u88c5 metrics-server \u548c VPA \u63d2\u4ef6\u3002

                                  "},{"location":"end-user/kpanda/scale/create-vpa.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                  \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u5185\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u3002

                                  1. \u5728 \u96c6\u7fa4\u5217\u8868 \u4e2d\u627e\u5230\u76ee\u524d\u96c6\u7fa4\uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u627e\u5230\u9700\u8981\u521b\u5efa VPA \u7684\u8d1f\u8f7d\uff0c\u70b9\u51fb\u8be5\u8d1f\u8f7d\u7684\u540d\u79f0\u3002

                                    3. \u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u7684\u5f39\u6027\u4f38\u7f29\u914d\u7f6e\uff0c\u786e\u8ba4\u5df2\u7ecf\u5b89\u88c5\u4e86\u76f8\u5173\u63d2\u4ef6\u5e76\u4e14\u63d2\u4ef6\u662f\u5426\u8fd0\u884c\u6b63\u5e38\u3002

                                  3. \u70b9\u51fb \u65b0\u5efa\u4f38\u7f29 \u6309\u94ae\uff0c\u5e76\u914d\u7f6e VPA \u5782\u76f4\u4f38\u7f29\u7b56\u7565\u53c2\u6570\u3002

                                    • \u7b56\u7565\u540d\u79f0\uff1a\u8f93\u5165\u5782\u76f4\u4f38\u7f29\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 vpa-my-dep\u3002
                                    • \u4f38\u7f29\u6a21\u5f0f\uff1a\u6267\u884c\u4fee\u6539 CPU \u548c\u5185\u5b58\u8bf7\u6c42\u503c\u7684\u65b9\u5f0f\uff0c\u76ee\u524d\u5782\u76f4\u4f38\u7f29\u652f\u6301\u624b\u52a8\u548c\u81ea\u52a8\u4e24\u79cd\u4f38\u7f29\u6a21\u5f0f\u3002
                                      • \u624b\u52a8\u4f38\u7f29\uff1a\u5782\u76f4\u4f38\u7f29\u7b56\u7565\u8ba1\u7b97\u51fa\u63a8\u8350\u7684\u8d44\u6e90\u914d\u7f6e\u503c\u540e\uff0c\u9700\u7528\u6237\u624b\u52a8\u4fee\u6539\u5e94\u7528\u7684\u8d44\u6e90\u914d\u989d\u3002
                                      • \u81ea\u52a8\u4f38\u7f29\uff1a\u5782\u76f4\u4f38\u7f29\u7b56\u7565\u81ea\u52a8\u8ba1\u7b97\u548c\u4fee\u6539\u5e94\u7528\u7684\u8d44\u6e90\u914d\u989d\u3002
                                    • \u76ee\u6807\u5bb9\u5668\uff1a\u9009\u62e9\u9700\u8981\u8fdb\u884c\u5782\u76f4\u4f38\u7f29\u7684\u5bb9\u5668\u3002
                                  4. \u5b8c\u6210\u53c2\u6570\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u81ea\u52a8\u8fd4\u56de\u5f39\u6027\u4f38\u7f29\u8be6\u60c5\u9875\u9762\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u6267\u884c\u7f16\u8f91\u3001\u5220\u9664\u64cd\u4f5c\u3002

                                  Note

                                  \u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c--min-replicas \u7684\u503c\u4e3a 2\u3002\u8868\u793a\u5f53\u526f\u672c\u6570\u5927\u4e8e 1 \u65f6\uff0cVPA \u624d\u4f1a\u751f\u6548\uff0c \u53ef\u4ee5\u901a\u8fc7\u4fee\u6539 updater \u7684 --min-replicas \u53c2\u6570\u503c\u6765\u6539\u53d8\u8fd9\u4e00\u9ed8\u8ba4\u884c\u4e3a\u3002

                                  spec: \n  containers: \n  - name: updater \n  args: \n  - \"--min-replicas=2\"\n
                                  "},{"location":"end-user/kpanda/scale/custom-hpa.html","title":"\u57fa\u4e8e\u81ea\u5b9a\u4e49\u6307\u6807\u521b\u5efa HPA","text":"

                                  \u5f53\u7cfb\u7edf\u5185\u7f6e\u7684 CPU \u548c\u5185\u5b58\u4e24\u79cd\u6307\u6807\u4e0d\u80fd\u6ee1\u8db3\u60a8\u4e1a\u52a1\u7684\u5b9e\u9645\u9700\u6c42\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u914d\u7f6e ServiceMonitoring \u6765\u6dfb\u52a0\u81ea\u5b9a\u4e49\u6307\u6807\uff0c \u5e76\u57fa\u4e8e\u81ea\u5b9a\u4e49\u6307\u6807\u5b9e\u73b0\u5f39\u6027\u4f38\u7f29\u3002\u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u57fa\u4e8e\u81ea\u5b9a\u4e49\u6307\u6807\u8fdb\u884c\u5f39\u6027\u4f38\u7f29\u3002

                                  Note

                                  1. HPA \u4ec5\u9002\u7528\u4e8e Deployment \u548c StatefulSet\uff0c\u6bcf\u4e2a\u5de5\u4f5c\u8d1f\u8f7d\u53ea\u80fd\u521b\u5efa\u4e00\u4e2a HPA\u3002
                                  2. \u5982\u679c\u540c\u65f6\u4f7f\u7528\u5185\u7f6e\u6307\u6807\u548c\u591a\u79cd\u81ea\u5b9a\u4e49\u6307\uff0cHPA \u4f1a\u6839\u636e\u591a\u9879\u6307\u6807\u5206\u522b\u8ba1\u7b97\u6240\u9700\u4f38\u7f29\u526f\u672c\u6570\uff0c\u53d6\u8f83\u5927\u503c\uff08\u4f46\u4e0d\u4f1a\u8d85\u8fc7\u8bbe\u7f6e HPA \u7b56\u7565\u65f6\u914d\u7f6e\u7684\u6700\u5927\u526f\u672c\u6570\uff09\u8fdb\u884c\u5f39\u6027\u4f38\u7f29\u3002
                                  "},{"location":"end-user/kpanda/scale/custom-hpa.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                  \u5728\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                  • \u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c \u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762
                                  • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa\u6216\u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa
                                  • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c \u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743
                                  • \u5df2\u5b89\u88c5 metrics-server \u63d2\u4ef6
                                  • \u5df2\u5b89\u88c5 insight-agent \u63d2\u4ef6
                                  • \u5df2\u5b89\u88c5 Prometheus-adapter \u63d2\u4ef6
                                  "},{"location":"end-user/kpanda/scale/custom-hpa.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                  \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u3002

                                  1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \u8fdb\u5165\u96c6\u7fa4\u5217\u8868\u9875\u9762\u3002\u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                  2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d \u8fdb\u5165\u5de5\u4f5c\u8d1f\u8f7d\u5217\u8868\u540e\uff0c\u70b9\u51fb\u4e00\u4e2a\u8d1f\u8f7d\u540d\u79f0\uff0c\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5 \u9875\u9762\u3002

                                  3. \u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u7684\u5f39\u6027\u4f38\u7f29\u914d\u7f6e\u60c5\u51b5\u3002

                                  4. \u786e\u8ba4\u96c6\u7fa4\u5df2\u5b89\u88c5\u4e86 metrics-server \u3001Insight\u3001Prometheus-adapter \u63d2\u4ef6\u4e14\u63d2\u4ef6\u8fd0\u884c\u72b6\u6001\u4e3a\u6b63\u5e38\u540e\uff0c\u5373\u53ef\u70b9\u51fb \u65b0\u5efa\u4f38\u7f29 \u6309\u94ae\u3002

                                    Note

                                    \u5982\u679c\u76f8\u5173\u63d2\u4ef6\u672a\u5b89\u88c5\u6216\u63d2\u4ef6\u5904\u4e8e\u5f02\u5e38\u72b6\u6001\uff0c\u60a8\u5728\u9875\u9762\u4e0a\u5c06\u65e0\u6cd5\u770b\u89c1\u521b\u5efa\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u5165\u53e3\u3002

                                  5. \u521b\u5efa\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u53c2\u6570\u3002

                                    • \u7b56\u7565\u540d\u79f0\uff1a\u8f93\u5165\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 hpa-my-dep\u3002
                                    • \u547d\u540d\u7a7a\u95f4\uff1a\u8d1f\u8f7d\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                    • \u5de5\u4f5c\u8d1f\u8f7d\uff1a\u6267\u884c\u5f39\u6027\u4f38\u7f29\u7684\u5de5\u4f5c\u8d1f\u8f7d\u5bf9\u8c61\u3002
                                    • \u8d44\u6e90\u7c7b\u578b\uff1a\u8fdb\u884c\u76d1\u63a7\u7684\u81ea\u5b9a\u4e49\u6307\u6807\u7c7b\u578b\uff0c\u5305\u542b Pod \u548c Service \u4e24\u79cd\u7c7b\u578b\u3002
                                    • \u6307\u6807\uff1a\u4f7f\u7528 ServiceMonitoring \u521b\u5efa\u7684\u81ea\u5b9a\u4e49\u6307\u6807\u540d\u79f0\u6216\u7cfb\u7edf\u5185\u7f6e\u7684\u81ea\u5b9a\u4e49\u6307\u6807\u540d\u79f0\u3002
                                    • \u6570\u636e\u7c7b\u578b\uff1a\u7528\u4e8e\u8ba1\u7b97\u6307\u6807\u503c\u7684\u65b9\u6cd5\uff0c\u5305\u542b\u76ee\u6807\u503c\u548c\u76ee\u6807\u5e73\u5747\u503c\u4e24\u79cd\u7c7b\u578b\uff0c\u5f53\u8d44\u6e90\u7c7b\u578b\u4e3a Pod \u65f6\uff0c\u53ea\u652f\u6301\u4f7f\u7528\u76ee\u6807\u5e73\u5747\u503c\u3002
                                  "},{"location":"end-user/kpanda/scale/custom-hpa.html#_3","title":"\u64cd\u4f5c\u793a\u4f8b","text":"

                                  \u672c\u6848\u4f8b\u4ee5 Golang \u4e1a\u52a1\u7a0b\u5e8f\u4e3a\u4f8b\uff0c\u8be5\u793a\u4f8b\u7a0b\u5e8f\u66b4\u9732\u4e86 httpserver_requests_total \u6307\u6807\uff0c\u5e76\u8bb0\u5f55 HTTP \u7684\u8bf7\u6c42\uff0c\u901a\u8fc7\u8be5\u6307\u6807\u53ef\u4ee5\u8ba1\u7b97\u51fa\u4e1a\u52a1\u7a0b\u5e8f\u7684 QPS \u503c\u3002

                                  "},{"location":"end-user/kpanda/scale/custom-hpa.html#_4","title":"\u90e8\u7f72\u4e1a\u52a1\u7a0b\u5e8f","text":"

                                  \u4f7f\u7528 Deployment \u90e8\u7f72\u4e1a\u52a1\u7a0b\u5e8f\uff1a

                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: httpserver\n  namespace: httpserver\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: httpserver\n  template:\n    metadata:\n      labels:\n        app: httpserver\n    spec:\n      containers:\n      - name: httpserver\n        image: registry.imroc.cc/test/httpserver:custom-metrics\n        imagePullPolicy: Always\n---\n\napiVersion: v1\nkind: Service\nmetadata:\n  name: httpserver\n  namespace: httpserver\n  labels:\n    app: httpserver\n  annotations:\n    prometheus.io/scrape: \"true\"\n    prometheus.io/path: \"/metrics\"\n    prometheus.io/port: \"http\"\nspec:\n  type: ClusterIP\n  ports:\n  - port: 80\n    protocol: TCP\n    name: http\n  selector:\n    app: httpserver\n
                                  "},{"location":"end-user/kpanda/scale/custom-hpa.html#prometheus","title":"Prometheus \u91c7\u96c6\u4e1a\u52a1\u76d1\u63a7","text":"

                                  \u82e5\u5df2\u5b89\u88c5 insight-agent\uff0c\u53ef\u4ee5\u901a\u8fc7\u521b\u5efa ServiceMonitor \u7684 CRD \u5bf9\u8c61\u914d\u7f6e Prometheus\u3002

                                  \u64cd\u4f5c\u6b65\u9aa4\uff1a\u5728 \u96c6\u7fa4\u8be6\u60c5 -> \u81ea\u5b9a\u4e49\u8d44\u6e90 \u641c\u7d22\u201cservicemonitors.monitoring.coreos.com\"\uff0c\u70b9\u51fb\u540d\u79f0\u8fdb\u5165\u8be6\u60c5\u3002 \u901a\u8fc7\u521b\u5efa YAML\uff0c\u5728\u547d\u540d\u7a7a\u95f4 httpserver \u4e0b\u521b\u5efa\u5982\u4e0b\u793a\u4f8b\u7684 CRD\uff1a

                                  apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: httpserver\n  namespace: httpserver\n  labels:\n    operator.insight.io/managed-by: insight\nspec:\n  endpoints:\n  - port: http\n    interval: 5s\n  namespaceSelector:\n    matchNames:\n    - httpserver\n  selector:\n    matchLabels:\n      app: httpserver\n

                                  Note

                                  \u82e5\u901a\u8fc7 insight \u5b89\u88c5 Prometheus\uff0c\u5219 serviceMonitor \u4e0a\u5fc5\u987b\u6253\u4e0a operator.insight.io/managed-by: insight \u8fd9\u4e2a label\uff0c\u901a\u8fc7\u5176\u4ed6\u65b9\u5f0f\u5b89\u88c5\u5219\u65e0\u9700\u6b64 label\u3002

                                  "},{"location":"end-user/kpanda/scale/custom-hpa.html#prometheus-adapter","title":"\u5728 prometheus-adapter \u4e2d\u914d\u7f6e\u6307\u6807\u89c4\u5219","text":"

                                  \u64cd\u4f5c\u6b65\u9aa4\uff1a\u5728 \u96c6\u7fa4\u8be6\u60c5 -> Helm \u5e94\u7528 \u641c\u7d22 \u201cprometheus-adapter\"\uff0c\u901a\u8fc7\u64cd\u4f5c\u680f\u8fdb\u5165\u66f4\u65b0\u9875\u9762\uff0c\u5728 YAML \u4e2d\u914d\u7f6e\u81ea\u5b9a\u4e49\u6307\u6807\uff0c\u793a\u4f8b\u5982\u4e0b\uff1a

                                  rules:\n  custom:\n    - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)\n      name:\n        as: httpserver_requests_qps\n        matches: httpserver_requests_total\n      resources:\n        template: <<.Resource>>\n      seriesQuery: httpserver_requests_total\n

                                  "},{"location":"end-user/kpanda/scale/custom-hpa.html#_5","title":"\u521b\u5efa\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u53c2\u6570","text":"

                                  \u6309\u7167\u4e0a\u8ff0\u6b65\u9aa4\u5728 Deployment \u4e2d\u627e\u5230\u5e94\u7528\u7a0b\u5e8f httpserver \u5e76\u901a\u8fc7\u81ea\u5b9a\u4e49\u6307\u6807\u521b\u5efa\u5f39\u6027\u4f38\u7f29\u3002

                                  "},{"location":"end-user/kpanda/scale/hpa-cronhpa-compatibility-rules.html","title":"HPA \u548c CronHPA \u517c\u5bb9\u89c4\u5219","text":"

                                  HPA \u5168\u79f0\u4e3a HorizontalPodAutoscaler\uff0c\u5373 Pod \u6c34\u5e73\u81ea\u52a8\u4f38\u7f29\u3002

                                  CronHPA \u5168\u79f0\u4e3a Cron HorizontalPodAutoscaler\uff0c\u5373 Pod \u5b9a\u65f6\u7684\u6c34\u5e73\u81ea\u52a8\u4f38\u7f29\u3002

                                  "},{"location":"end-user/kpanda/scale/hpa-cronhpa-compatibility-rules.html#cronhpa-hpa","title":"CronHPA \u548c HPA \u517c\u5bb9\u51b2\u7a81","text":"

                                  \u5b9a\u65f6\u4f38\u7f29 CronHPA \u901a\u8fc7\u8bbe\u7f6e\u5b9a\u65f6\u7684\u65b9\u5f0f\u89e6\u53d1\u5bb9\u5668\u7684\u6c34\u5e73\u526f\u672c\u4f38\u7f29\u3002\u4e3a\u4e86\u9632\u6b62\u7a81\u53d1\u7684\u6d41\u91cf\u51b2\u51fb\u7b49\u72b6\u51b5\uff0c \u60a8\u53ef\u80fd\u5df2\u7ecf\u914d\u7f6e HPA \u4fdd\u969c\u5e94\u7528\u7684\u6b63\u5e38\u8fd0\u884c\u3002\u5982\u679c\u540c\u65f6\u68c0\u6d4b\u5230\u4e86 HPA \u548c CronHPA \u7684\u5b58\u5728\uff0c \u7531\u4e8e CronHPA \u548c HPA \u76f8\u4e92\u72ec\u7acb\u65e0\u6cd5\u611f\u77e5\uff0c\u5c31\u4f1a\u51fa\u73b0\u4e24\u4e2a\u63a7\u5236\u5668\u5404\u81ea\u5de5\u4f5c\uff0c\u540e\u6267\u884c\u7684\u64cd\u4f5c\u4f1a\u8986\u76d6\u5148\u6267\u884c\u7684\u64cd\u4f5c\u3002

                                  \u5bf9\u6bd4 CronHPA \u548c HPA \u7684\u5b9a\u4e49\u6a21\u677f\uff0c\u53ef\u4ee5\u89c2\u5bdf\u5230\u4ee5\u4e0b\u51e0\u70b9\uff1a

                                  • CronHPA \u548c HPA \u90fd\u662f\u901a\u8fc7 scaleTargetRef \u5b57\u6bb5\u6765\u83b7\u53d6\u4f38\u7f29\u5bf9\u8c61\u3002
                                  • CronHPA \u901a\u8fc7 jobs \u7684 crontab \u89c4\u5219\u5b9a\u65f6\u4f38\u7f29\u526f\u672c\u6570\u3002
                                  • HPA \u901a\u8fc7\u8d44\u6e90\u5229\u7528\u7387\u5224\u65ad\u4f38\u7f29\u60c5\u51b5\u3002

                                  Note

                                  \u5982\u679c\u540c\u65f6\u8bbe\u7f6e CronHPA \u548c HPA\uff0c\u4f1a\u51fa\u73b0 CronHPA \u548c HPA \u540c\u65f6\u64cd\u4f5c\u4e00\u4e2a scaleTargetRef \u7684\u573a\u666f\u3002

                                  "},{"location":"end-user/kpanda/scale/hpa-cronhpa-compatibility-rules.html#cronhpa-hpa_1","title":"CronHPA \u548c HPA \u517c\u5bb9\u65b9\u6848","text":"

                                  \u4ece\u4e0a\u6587\u53ef\u77e5\uff0cCronHPA \u548c HPA \u540c\u65f6\u4f7f\u7528\u4f1a\u5bfc\u81f4\u540e\u6267\u884c\u7684\u64cd\u4f5c\u8986\u76d6\u5148\u6267\u884c\u64cd\u4f5c\u7684\u672c\u8d28\u539f\u56e0\u662f\u4e24\u4e2a\u63a7\u5236\u5668\u65e0\u6cd5\u76f8\u4e92\u611f\u77e5\uff0c \u90a3\u4e48\u53ea\u9700\u8981\u8ba9 CronHPA \u611f\u77e5 HPA \u7684\u5f53\u524d\u72b6\u6001\u5c31\u80fd\u89e3\u51b3\u51b2\u7a81\u95ee\u9898\u3002

                                  \u7cfb\u7edf\u4f1a\u5c06 HPA \u4f5c\u4e3a\u5b9a\u65f6\u4f38\u7f29 CronHPA \u7684\u6269\u7f29\u5bb9\u5bf9\u8c61\uff0c\u4ece\u800c\u5b9e\u73b0\u5bf9\u8be5 HPA \u5b9a\u4e49\u7684 Deployment \u5bf9\u8c61\u7684\u5b9a\u65f6\u6269\u7f29\u5bb9\u3002

                                  HPA \u7684\u5b9a\u4e49\u5c06 Deployment \u914d\u7f6e\u5728 scaleTargetRef \u5b57\u6bb5\u4e0b\uff0c\u7136\u540e Deployment \u901a\u8fc7\u81ea\u8eab\u5b9a\u4e49\u67e5\u627e ReplicaSet\uff0c\u6700\u540e\u901a\u8fc7 ReplicaSet \u8c03\u6574\u771f\u5b9e\u7684\u526f\u672c\u6570\u76ee\u3002

                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5c06 CronHPA \u4e2d\u7684 scaleTargetRef \u8bbe\u7f6e\u4e3a HPA \u5bf9\u8c61\uff0c\u7136\u540e\u901a\u8fc7 HPA \u5bf9\u8c61\u6765\u5bfb\u627e\u771f\u5b9e\u7684 scaleTargetRef\uff0c\u4ece\u800c\u8ba9 CronHPA \u611f\u77e5 HPA \u7684\u5f53\u524d\u72b6\u6001\u3002

                                  CronHPA \u4f1a\u901a\u8fc7\u8c03\u6574 HPA \u7684\u65b9\u5f0f\u611f\u77e5 HPA\u3002CronHPA \u901a\u8fc7\u8bc6\u522b\u8981\u8fbe\u5230\u7684\u526f\u672c\u6570\u4e0e\u5f53\u524d\u526f\u672c\u6570\u4e24\u8005\u95f4\u7684\u8f83\u5927\u503c\uff0c \u5224\u65ad\u662f\u5426\u9700\u8981\u6269\u7f29\u5bb9\u53ca\u4fee\u6539 HPA \u7684\u4e0a\u9650\uff1bCronHPA \u901a\u8fc7\u8bc6\u522b CronHPA \u8981\u8fbe\u5230\u7684\u526f\u672c\u6570\u4e0e HPA \u7684\u914d\u7f6e\u95f4\u7684\u8f83\u5c0f\u503c\uff0c\u5224\u65ad\u662f\u5426\u9700\u8981\u4fee\u6539 HPA \u7684\u4e0b\u9650\u3002

                                  "},{"location":"end-user/kpanda/scale/install-cronhpa.html","title":"\u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6","text":"

                                  \u5bb9\u5668\u526f\u672c\u5b9a\u65f6\u6c34\u5e73\u6269\u7f29\u5bb9\u7b56\u7565\uff08CronHPA\uff09\u80fd\u591f\u4e3a\u5468\u671f\u6027\u9ad8\u5e76\u53d1\u5e94\u7528\u63d0\u4f9b\u7a33\u5b9a\u7684\u8ba1\u7b97\u8d44\u6e90\u4fdd\u969c\uff0c kubernetes-cronhpa-controller \u5219\u662f\u5b9e\u73b0 CronHPA \u7684\u5173\u952e\u7ec4\u4ef6\u3002

                                  \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6\u3002

                                  Note

                                  \u4e3a\u4e86\u4f7f\u7528 CornHPA\uff0c\u4e0d\u4ec5\u9700\u8981\u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6\uff0c\u8fd8\u8981\u5b89\u88c5 metrics-server \u63d2\u4ef6\u3002

                                  "},{"location":"end-user/kpanda/scale/install-cronhpa.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                  \u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                  • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                  • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u3002

                                  • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                  "},{"location":"end-user/kpanda/scale/install-cronhpa.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                  \u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6\u3002

                                  1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u627e\u5230\u9700\u8981\u5b89\u88c5\u6b64\u63d2\u4ef6\u7684\u76ee\u6807\u96c6\u7fa4\uff0c\u70b9\u51fb\u8be5\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u70b9\u51fb \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d \uff0c\u70b9\u51fb\u76ee\u6807\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u3002

                                  2. \u5728\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u5728 CronHPA \u53f3\u4fa7\u70b9\u51fb \u5b89\u88c5 \u3002

                                  3. \u9605\u8bfb\u8be5\u63d2\u4ef6\u7684\u76f8\u5173\u4ecb\u7ecd\uff0c\u9009\u62e9\u7248\u672c\u540e\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u63a8\u8350\u5b89\u88c5 1.3.0 \u6216\u66f4\u9ad8\u7248\u672c\u3002

                                  4. \u53c2\u8003\u4ee5\u4e0b\u8bf4\u660e\u914d\u7f6e\u53c2\u6570\u3002

                                    • \u540d\u79f0\uff1a\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 kubernetes-cronhpa-controller\u3002
                                    • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u63d2\u4ef6\u5b89\u88c5\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u6b64\u5904\u4ee5 default \u4e3a\u4f8b\u3002
                                    • \u7248\u672c\uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 1.3.0 \u7248\u672c\u4e3a\u4f8b\u3002
                                    • \u5c31\u7eea\u7b49\u5f85\uff1a\u542f\u7528\u540e\uff0c\u5c06\u7b49\u5f85\u5e94\u7528\u4e0b\u7684\u6240\u6709\u5173\u8054\u8d44\u6e90\u90fd\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002
                                    • \u5931\u8d25\u5220\u9664\uff1a\u5982\u679c\u63d2\u4ef6\u5b89\u88c5\u5931\u8d25\uff0c\u5219\u5220\u9664\u5df2\u7ecf\u5b89\u88c5\u7684\u5173\u8054\u8d44\u6e90\u3002\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u3002
                                    • \u8be6\u60c5\u65e5\u5fd7\uff1a\u5f00\u542f\u540e\uff0c\u5c06\u8bb0\u5f55\u5b89\u88c5\u8fc7\u7a0b\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002

                                    Note

                                    \u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u548c/\u6216 \u5931\u8d25\u5220\u9664 \u540e\uff0c\u5e94\u7528\u9700\u8981\u8f83\u957f\u65f6\u95f4\u624d\u4f1a\u88ab\u6807\u8bb0\u4e3a\u201c\u8fd0\u884c\u4e2d\u201d\u72b6\u6001\u3002

                                  5. \u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \uff0c\u7cfb\u7edf\u5c06\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u3002\u7a0d\u7b49\u51e0\u5206\u949f\u540e\u5237\u65b0\u9875\u9762\u4f5c\uff0c\u5373\u53ef\u770b\u5230\u521a\u521a\u5b89\u88c5\u7684\u5e94\u7528\u3002

                                    Warning

                                    \u5982\u9700\u5220\u9664 kubernetes-cronhpa-controller \u63d2\u4ef6\uff0c\u5e94\u5728 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u624d\u80fd\u5c06\u5176\u5f7b\u5e95\u5220\u9664\u3002

                                    \u5982\u679c\u5728\u5de5\u4f5c\u8d1f\u8f7d\u7684 \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\u4e0b\u5220\u9664\u63d2\u4ef6\uff0c\u8fd9\u53ea\u662f\u5220\u9664\u4e86\u8be5\u63d2\u4ef6\u7684\u5de5\u4f5c\u8d1f\u8f7d\u526f\u672c\uff0c\u63d2\u4ef6\u672c\u8eab\u4ecd\u672a\u5220\u9664\uff0c\u540e\u7eed\u91cd\u65b0\u5b89\u88c5\u8be5\u63d2\u4ef6\u65f6\u4e5f\u4f1a\u63d0\u793a\u9519\u8bef\u3002

                                  6. \u56de\u5230\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\u9762\u4e0b\u7684 \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u53ef\u4ee5\u770b\u5230\u754c\u9762\u663e\u793a \u63d2\u4ef6\u5df2\u5b89\u88c5 \u3002\u73b0\u5728\u53ef\u4ee5\u5f00\u59cb\u521b\u5efa CronHPA \u7b56\u7565\u4e86\u3002

                                  "},{"location":"end-user/kpanda/scale/install-metrics-server.html","title":"\u5b89\u88c5 metrics-server \u63d2\u4ef6","text":"

                                  metrics-server \u662f Kubernetes \u5185\u7f6e\u7684\u8d44\u6e90\u4f7f\u7528\u6307\u6807\u91c7\u96c6\u7ec4\u4ef6\u3002 \u60a8\u53ef\u4ee5\u901a\u8fc7\u914d\u7f6e\u5f39\u6027\u4f38\u7f29\uff08HPA\uff09\u7b56\u7565\u6765\u5b9e\u73b0\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u81ea\u52a8\u6c34\u5e73\u4f38\u7f29 Pod \u526f\u672c\u3002

                                  \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5 metrics-server \u3002

                                  "},{"location":"end-user/kpanda/scale/install-metrics-server.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                  \u5b89\u88c5 metrics-server \u63d2\u4ef6\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                  • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                  • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3002

                                  • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                  "},{"location":"end-user/kpanda/scale/install-metrics-server.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                  \u8bf7\u6267\u884c\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 metrics-server \u63d2\u4ef6\u3002

                                  1. \u5728\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u4e0b\u7684\u5f39\u6027\u4f38\u7f29\u9875\u9762\uff0c\u70b9\u51fb \u53bb\u5b89\u88c5 \uff0c\u8fdb\u5165 metrics-server \u63d2\u4ef6\u5b89\u88c5\u754c\u9762\u3002

                                  2. \u9605\u8bfb metrics-server \u63d2\u4ef6\u76f8\u5173\u4ecb\u7ecd\uff0c\u9009\u62e9\u7248\u672c\u540e\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u672c\u6587\u5c06\u4ee5 3.8.2 \u7248\u672c\u4e3a\u4f8b\u8fdb\u884c\u5b89\u88c5\uff0c\u63a8\u8350\u60a8\u5b89\u88c5 3.8.2 \u53ca\u66f4\u9ad8\u7248\u672c\u3002

                                  3. \u5728\u5b89\u88c5\u914d\u7f6e\u754c\u9762\u914d\u7f6e\u57fa\u672c\u53c2\u6570\u3002

                                    • \u540d\u79f0\uff1a\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 metrics-server-01\u3002
                                    • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u63d2\u4ef6\u5b89\u88c5\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u6b64\u5904\u4ee5 default \u4e3a\u4f8b\u3002
                                    • \u7248\u672c\uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 3.8.2 \u7248\u672c\u4e3a\u4f8b\u3002
                                    • \u5c31\u7eea\u7b49\u5f85\uff1a\u542f\u7528\u540e\uff0c\u5c06\u7b49\u5f85\u5e94\u7528\u4e0b\u6240\u6709\u5173\u8054\u8d44\u6e90\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002
                                    • \u5931\u8d25\u5220\u9664\uff1a\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f\u5c31\u7eea\u7b49\u5f85\u3002\u5982\u679c\u5b89\u88c5\u5931\u8d25\uff0c\u5c06\u5220\u9664\u5b89\u88c5\u76f8\u5173\u8d44\u6e90\u3002
                                    • \u8be6\u60c5\u65e5\u5fd7\uff1a\u5f00\u542f\u5b89\u88c5\u8fc7\u7a0b\u65e5\u5fd7\u7684\u8be6\u7ec6\u8f93\u51fa\u3002

                                    Note

                                    \u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u548c/\u6216 \u5931\u8d25\u5220\u9664 \u540e\uff0c\u5e94\u7528\u9700\u8981\u7ecf\u8fc7\u8f83\u957f\u65f6\u95f4\u624d\u4f1a\u88ab\u6807\u8bb0\u4e3a \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                                  4. \u9ad8\u7ea7\u53c2\u6570\u914d\u7f6e

                                    • \u5982\u679c\u96c6\u7fa4\u7f51\u7edc\u65e0\u6cd5\u8bbf\u95ee k8s.gcr.io \u4ed3\u5e93\uff0c\u8bf7\u5c1d\u8bd5\u4fee\u6539 repositort \u53c2\u6570\u4e3a repository: k8s.m.daocloud.io/metrics-server/metrics-server

                                    • \u5b89\u88c5 metrics-server \u63d2\u4ef6\u8fd8\u9700\u63d0\u4f9b SSL \u8bc1\u4e66\u3002\u5982\u9700\u7ed5\u8fc7\u8bc1\u4e66\u6821\u9a8c\uff0c\u9700\u8981\u5728 defaultArgs: \u5904\u6dfb\u52a0 - --kubelet-insecure-tls \u53c2\u6570\u3002

                                    \u70b9\u51fb\u67e5\u770b\u63a8\u8350\u7684 YAML \u53c2\u6570
                                    image:\n  repository: k8s.m.daocloud.io/metrics-server/metrics-server # \u5c06\u4ed3\u5e93\u6e90\u5730\u5740\u4fee\u6539\u4e3a k8s.m.daocloud.io\n  tag: ''\n  pullPolicy: IfNotPresent\nimagePullSecrets: []\nnameOverride: ''\nfullnameOverride: ''\nserviceAccount:\n  create: true\n  annotations: {}\n  name: ''\nrbac:\n  create: true\n  pspEnabled: false\napiService:\n  create: true\npodLabels: {}\npodAnnotations: {}\npodSecurityContext: {}\nsecurityContext:\n  allowPrivilegeEscalation: false\n  readOnlyRootFilesystem: true\n  runAsNonRoot: true\n  runAsUser: 1000\npriorityClassName: system-cluster-critical\ncontainerPort: 4443\nhostNetwork:\n  enabled: false\nreplicas: 1\nupdateStrategy: {}\npodDisruptionBudget:\n  enabled: false\n  minAvailable: null\n  maxUnavailable: null\ndefaultArgs:\n  - '--cert-dir=/tmp'\n  - '--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname'\n  - '--kubelet-use-node-status-port'\n  - '--metric-resolution=15s'\n  - --kubelet-insecure-tls # \u7ed5\u8fc7\u8bc1\u4e66\u6821\u9a8c\nargs: []\nlivenessProbe:\n  httpGet:\n    path: /livez\n    port: https\n    scheme: HTTPS\n  initialDelaySeconds: 0\n  periodSeconds: 10\n  failureThreshold: 3\nreadinessProbe:\n  httpGet:\n    path: /readyz\n    port: https\n    scheme: HTTPS\n  initialDelaySeconds: 20\n  periodSeconds: 10\n  failureThreshold: 3\nservice:\n  type: ClusterIP\n  port: 443\n  annotations: {}\n  labels: {}\nmetrics:\n  enabled: false\nserviceMonitor:\n  enabled: false\n  additionalLabels: {}\n  interval: 1m\n  scrapeTimeout: 10s\nresources: {}\nextraVolumeMounts: []\nextraVolumes: []\nnodeSelector: {}\ntolerations: []\naffinity: {}\n
                                  5. \u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u5b8c\u6210 metrics-server \u63d2\u4ef6\u7684\u5b89\u88c5\uff0c\u4e4b\u540e\u7cfb\u7edf\u5c06\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\uff0c \u7a0d\u7b49\u51e0\u5206\u949f\u540e\uff0c\u4e3a\u9875\u9762\u6267\u884c\u5237\u65b0\u64cd\u4f5c\uff0c\u5373\u53ef\u770b\u5230\u521a\u521a\u5b89\u88c5\u7684\u5e94\u7528\u3002

                                  Note

                                  \u5220\u9664 metrics-server \u63d2\u4ef6\u65f6\uff0c\u5728 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u624d\u80fd\u5f7b\u5e95\u5220\u9664\u8be5\u63d2\u4ef6\u3002\u5982\u679c\u4ec5\u5728\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u5220\u9664 metrics-server \uff0c \u8fd9\u53ea\u662f\u5220\u9664\u4e86\u8be5\u5e94\u7528\u7684\u5de5\u4f5c\u8d1f\u8f7d\u526f\u672c\uff0c\u5e94\u7528\u672c\u8eab\u4ecd\u672a\u5220\u9664\uff0c\u540e\u7eed\u91cd\u65b0\u5b89\u88c5\u8be5\u63d2\u4ef6\u65f6\u4e5f\u4f1a\u63d0\u793a\u9519\u8bef\u3002

                                  "},{"location":"end-user/kpanda/scale/install-vpa.html","title":"\u5b89\u88c5 vpa \u63d2\u4ef6","text":"

                                  \u5bb9\u5668\u5782\u76f4\u6269\u7f29\u5bb9\u7b56\u7565\uff08Vertical Pod Autoscaler, VPA\uff09\u80fd\u591f\u8ba9\u96c6\u7fa4\u7684\u8d44\u6e90\u914d\u7f6e\u66f4\u52a0\u5408\u7406\uff0c\u907f\u514d\u96c6\u7fa4\u8d44\u6e90\u6d6a\u8d39\u3002 vpa \u5219\u662f\u5b9e\u73b0\u5bb9\u5668\u5782\u76f4\u6269\u7f29\u5bb9\u7684\u5173\u952e\u7ec4\u4ef6\u3002

                                  \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5 vpa \u63d2\u4ef6\u3002

                                  \u4e3a\u4e86\u4f7f\u7528 VPA \u7b56\u7565\uff0c\u4e0d\u4ec5\u9700\u8981\u5b89\u88c5 __vpa__ \u63d2\u4ef6\uff0c\u8fd8\u8981[\u5b89\u88c5 __metrics-server__ \u63d2\u4ef6](install-metrics-server.md)\u3002\n
                                  "},{"location":"end-user/kpanda/scale/install-vpa.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                  \u5b89\u88c5 vpa \u63d2\u4ef6\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                  • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                  • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u3002

                                  • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                  "},{"location":"end-user/kpanda/scale/install-vpa.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                  \u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 vpa \u63d2\u4ef6\u3002

                                  1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u627e\u5230\u9700\u8981\u5b89\u88c5\u6b64\u63d2\u4ef6\u7684\u76ee\u6807\u96c6\u7fa4\uff0c\u70b9\u51fb\u8be5\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u70b9\u51fb \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d \uff0c\u70b9\u51fb\u76ee\u6807\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u3002

                                  2. \u5728\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u5728 VPA \u53f3\u4fa7\u70b9\u51fb \u5b89\u88c5 \u3002

                                    3. \u9605\u8bfb\u8be5\u63d2\u4ef6\u7684\u76f8\u5173\u4ecb\u7ecd\uff0c\u9009\u62e9\u7248\u672c\u540e\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u63a8\u8350\u5b89\u88c5 1.5.0 \u6216\u66f4\u9ad8\u7248\u672c\u3002

                                    4. \u67e5\u770b\u4ee5\u4e0b\u8bf4\u660e\u914d\u7f6e\u53c2\u6570\u3002

                                    - \u540d\u79f0\uff1a\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 kubernetes-cronhpa-controller\u3002 - \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u63d2\u4ef6\u5b89\u88c5\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u6b64\u5904\u4ee5 default \u4e3a\u4f8b\u3002 - \u7248\u672c\uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 4.5.0 \u7248\u672c\u4e3a\u4f8b\u3002 - \u5c31\u7eea\u7b49\u5f85\uff1a\u542f\u7528\u540e\uff0c\u5c06\u7b49\u5f85\u5e94\u7528\u4e0b\u7684\u6240\u6709\u5173\u8054\u8d44\u6e90\u90fd\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002 - \u5931\u8d25\u5220\u9664\uff1a\u5982\u679c\u63d2\u4ef6\u5b89\u88c5\u5931\u8d25\uff0c\u5219\u5220\u9664\u5df2\u7ecf\u5b89\u88c5\u7684\u5173\u8054\u8d44\u6e90\u3002\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u3002 - \u8be6\u60c5\u65e5\u5fd7\uff1a\u5f00\u542f\u540e\uff0c\u5c06\u8bb0\u5f55\u5b89\u88c5\u8fc7\u7a0b\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002

                                    Note

                                    \u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u548c/\u6216 \u5931\u8d25\u5220\u9664 \u540e\uff0c\u5e94\u7528\u9700\u8981\u7ecf\u8fc7\u8f83\u957f\u65f6\u95f4\u624d\u4f1a\u88ab\u6807\u8bb0\u4e3a\u201c\u8fd0\u884c\u4e2d\u201d\u72b6\u6001\u3002

                                  3. \u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \uff0c\u7cfb\u7edf\u5c06\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u3002\u7a0d\u7b49\u51e0\u5206\u949f\u540e\u5237\u65b0\u9875\u9762\u4f5c\uff0c\u5373\u53ef\u770b\u5230\u521a\u521a\u5b89\u88c5\u7684\u5e94\u7528\u3002

                                    Warning

                                    \u5982\u9700\u5220\u9664 vpa \u63d2\u4ef6\uff0c\u5e94\u5728 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u624d\u80fd\u5c06\u5176\u5f7b\u5e95\u5220\u9664\u3002

                                    \u5982\u679c\u5728\u5de5\u4f5c\u8d1f\u8f7d\u7684 \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\u4e0b\u5220\u9664\u63d2\u4ef6\uff0c\u8fd9\u53ea\u662f\u5220\u9664\u4e86\u8be5\u63d2\u4ef6\u7684\u5de5\u4f5c\u8d1f\u8f7d\u526f\u672c\uff0c\u63d2\u4ef6\u672c\u8eab\u4ecd\u672a\u5220\u9664\uff0c\u540e\u7eed\u91cd\u65b0\u5b89\u88c5\u8be5\u63d2\u4ef6\u65f6\u4e5f\u4f1a\u63d0\u793a\u9519\u8bef\u3002

                                  4. \u56de\u5230\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\u9762\u4e0b\u7684 \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u53ef\u4ee5\u770b\u5230\u754c\u9762\u663e\u793a \u63d2\u4ef6\u5df2\u5b89\u88c5 \u3002\u73b0\u5728\u53ef\u4ee5\u5f00\u59cb\u521b\u5efa VPA \u7b56\u7565\u4e86\u3002

                                  "},{"location":"end-user/kpanda/scale/knative/install.html","title":"\u5b89\u88c5","text":"

                                  Knative \u662f\u4e00\u4e2a\u9762\u5411\u65e0\u670d\u52a1\u5668\u90e8\u7f72\u7684\u8de8\u5e73\u53f0\u89e3\u51b3\u65b9\u6848\u3002

                                  "},{"location":"end-user/kpanda/scale/knative/install.html#_2","title":"\u6b65\u9aa4","text":"
                                  1. \u767b\u5f55\u96c6\u7fa4\uff0c\u70b9\u51fb\u4fa7\u8fb9\u680f Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u5728\u53f3\u4fa7\u4e0a\u65b9\u641c\u7d22\u6846\u8f93\u5165 knative \uff0c\u7136\u540e\u6309\u56de\u8f66\u952e\u641c\u7d22\u3002

                                  2. \u70b9\u51fb\u641c\u7d22\u51fa\u7684 knative-operator \uff0c\u8fdb\u5165\u5b89\u88c5\u914d\u7f6e\u754c\u9762\u3002\u4f60\u53ef\u4ee5\u5728\u8be5\u754c\u9762\u67e5\u770b\u53ef\u7528\u7248\u672c\u4ee5\u53ca Helm values \u7684 Parameters \u53ef\u9009\u9879\u3002

                                  3. \u70b9\u51fb\u5b89\u88c5\u6309\u94ae\u540e\uff0c\u8fdb\u5165\u5b89\u88c5\u914d\u7f6e\u754c\u9762\u3002

                                  4. \u8f93\u5165\u540d\u79f0\uff0c\u5b89\u88c5\u79df\u6237\uff0c\u5efa\u8bae\u52fe\u9009 \u5c31\u7eea\u7b49\u5f85 \u548c \u8be6\u7ec6\u65e5\u5fd7 \u3002

                                  5. \u5728\u4e0b\u65b9\u8bbe\u7f6e\uff0c\u53ef\u4ee5\u52fe\u9009 Serving \uff0c\u5e76\u8f93\u5165 Knative Serving \u7ec4\u4ef6\u7684\u5b89\u88c5\u79df\u6237\uff0c\u4f1a\u5728\u5b89\u88c5\u540e\u90e8\u7f72 Knative Serving \u7ec4\u4ef6\uff0c\u8be5\u7ec4\u4ef6\u7531 Knative Operator \u7ba1\u7406\u3002

                                  "},{"location":"end-user/kpanda/scale/knative/knative.html","title":"Kantive \u4ecb\u7ecd","text":"

                                  Knative \u63d0\u4f9b\u4e86\u4e00\u79cd\u66f4\u9ad8\u5c42\u6b21\u7684\u62bd\u8c61\uff0c\u7b80\u5316\u5e76\u52a0\u901f\u4e86\u5728 Kubernetes \u4e0a\u6784\u5efa\u3001\u90e8\u7f72\u548c\u7ba1\u7406\u5e94\u7528\u7684\u8fc7\u7a0b\u3002\u5b83\u4f7f\u5f97\u5f00\u53d1\u4eba\u5458\u80fd\u591f\u66f4\u4e13\u6ce8\u4e8e\u4e1a\u52a1\u903b\u8f91\u7684\u5b9e\u73b0\uff0c\u800c\u5c06\u5927\u90e8\u5206\u57fa\u7840\u8bbe\u65bd\u548c\u8fd0\u7ef4\u5de5\u4f5c\u4ea4\u7ed9 Knative \u53bb\u5904\u7406\uff0c\u4ece\u800c\u663e\u8457\u63d0\u9ad8\u751f\u4ea7\u529b\u3002

                                  "},{"location":"end-user/kpanda/scale/knative/knative.html#_1","title":"\u7ec4\u4ef6","text":"

                                  knative-operator \u8fd0\u884c\u7ec4\u4ef6\u5982\u4e0b\u3002

                                  knative-operator   knative-operator-58f7d7db5c-7f6r5      1/1     Running     0     6m55s\nknative-operator   operator-webhook-667dc67bc-qvrv4       1/1     Running     0     6m55s\n

                                  knative-serving \u7ec4\u4ef6\u5982\u4e0b\u3002

                                  knative-serving        3scale-kourier-gateway-d69fbfbd-bd8d8   1/1     Running     0                 7m13s\nknative-serving        activator-7c6fddd698-wdlng              1/1     Running     0                 7m3s\nknative-serving        autoscaler-8f4b876bb-kd25p              1/1     Running     0                 7m17s\nknative-serving        autoscaler-hpa-5f7f74679c-vkc7p         1/1     Running     0                 7m15s\nknative-serving        controller-789c896c46-tfvsv             1/1     Running     0                 7m17s\nknative-serving        net-kourier-controller-7db578c889-7gd5l 1/1     Running     0                 7m14s\nknative-serving        webhook-5c88b94c5-78x7m                 1/1     Running     0                 7m1s\nknative-serving        storage-version-migration-serving-serving-1.12.2-t7zvd   0/1  Completed   0   7m15s\n
                                  \u7ec4\u4ef6 \u4f5c\u7528 Activator \u5bf9\u8bf7\u6c42\u6392\u961f\uff08\u5982\u679c\u4e00\u4e2a Knative Service \u5df2\u7ecf\u7f29\u51cf\u5230\u96f6\uff09\u3002\u8c03\u7528 autoscaler\uff0c\u5c06\u7f29\u51cf\u5230 0 \u7684\u670d\u52a1\u6062\u590d\u5e76\u8f6c\u53d1\u6392\u961f\u7684\u8bf7\u6c42\u3002Activator \u8fd8\u53ef\u4ee5\u5145\u5f53\u8bf7\u6c42\u7f13\u51b2\u5668\uff0c\u5904\u7406\u7a81\u53d1\u6d41\u91cf\u3002 Autoscaler Autoscaler \u8d1f\u8d23\u6839\u636e\u914d\u7f6e\u3001\u6307\u6807\u548c\u8fdb\u5165\u7684\u8bf7\u6c42\u6765\u7f29\u653e Knative \u670d\u52a1\u3002 Controller \u7ba1\u7406 Knative CR \u7684\u72b6\u6001\u3002\u5b83\u4f1a\u76d1\u89c6\u591a\u4e2a\u5bf9\u8c61\uff0c\u7ba1\u7406\u4f9d\u8d56\u8d44\u6e90\u7684\u751f\u547d\u5468\u671f\uff0c\u5e76\u66f4\u65b0\u8d44\u6e90\u72b6\u6001\u3002 Queue-Proxy Sidecar \u5bb9\u5668\uff0c\u6bcf\u4e2a Knative Service \u90fd\u4f1a\u6ce8\u5165\u4e00\u4e2a\u3002\u8d1f\u8d23\u6536\u96c6\u6d41\u91cf\u6570\u636e\u5e76\u62a5\u544a\u7ed9 Autoscaler\uff0cAutoscaler \u6839\u636e\u8fd9\u4e9b\u6570\u636e\u548c\u9884\u8bbe\u7684\u89c4\u5219\u6765\u53d1\u8d77\u6269\u5bb9\u6216\u7f29\u5bb9\u8bf7\u6c42\u3002 Webhooks Knative Serving \u6709\u51e0\u4e2a Webhooks \u8d1f\u8d23\u9a8c\u8bc1\u548c\u53d8\u66f4 Knative \u8d44\u6e90\u3002"},{"location":"end-user/kpanda/scale/knative/knative.html#ingress","title":"Ingress \u6d41\u91cf\u5165\u53e3\u65b9\u6848","text":"\u65b9\u6848 \u9002\u7528\u573a\u666f Istio \u5982\u679c\u5df2\u7ecf\u7528\u4e86 Istio\uff0c\u53ef\u4ee5\u9009\u62e9 Istio \u4f5c\u4e3a\u6d41\u91cf\u5165\u53e3\u65b9\u6848\u3002 Contour \u5982\u679c\u96c6\u7fa4\u4e2d\u5df2\u7ecf\u542f\u7528\u4e86 Contour\uff0c\u53ef\u4ee5\u9009\u62e9 Contour \u4f5c\u4e3a\u6d41\u91cf\u5165\u53e3\u65b9\u6848\u3002 Kourier \u5982\u679c\u5728\u6ca1\u6709\u4e0a\u8ff0 2 \u79cd Ingress \u7ec4\u4ef6\u65f6\uff0c\u53ef\u4ee5\u4f7f\u7528 Knative \u57fa\u4e8e Envoy \u5b9e\u73b0\u7684 Kourier Ingress \u4f5c\u4e3a\u6d41\u91cf\u5165\u53e3\u3002"},{"location":"end-user/kpanda/scale/knative/knative.html#autoscaler","title":"Autoscaler \u65b9\u6848\u5bf9\u6bd4","text":"Autoscaler \u7c7b\u578b \u662f\u5426\u4e3a Knative Serving \u6838\u5fc3\u90e8\u5206 \u9ed8\u8ba4\u542f\u7528 Scale to Zero \u652f\u6301 \u57fa\u4e8e CPU \u7684 Autoscaling \u652f\u6301 Knative Pod Autoscaler (KPA) \u662f \u662f \u662f \u5426 Horizontal Pod Autoscaler (HPA) \u5426 \u9700\u5b89\u88c5 Knative Serving \u540e\u542f\u7528 \u5426 \u662f"},{"location":"end-user/kpanda/scale/knative/knative.html#crd","title":"CRD","text":"\u8d44\u6e90\u7c7b\u578b API \u540d\u79f0 \u63cf\u8ff0 Services service.serving.knative.dev \u81ea\u52a8\u7ba1\u7406 Workload \u7684\u6574\u4e2a\u751f\u547d\u5468\u671f\uff0c\u63a7\u5236\u5176\u4ed6\u5bf9\u8c61\u7684\u521b\u5efa\uff0c\u786e\u4fdd\u5e94\u7528\u5177\u6709 Routes\u3001Configurations \u4ee5\u53ca\u6bcf\u6b21\u66f4\u65b0\u65f6\u7684\u65b0 revision\u3002 Routes route.serving.knative.dev \u5c06\u7f51\u7edc\u7aef\u70b9\u6620\u5c04\u5230\u4e00\u4e2a\u6216\u591a\u4e2a\u4fee\u8ba2\u7248\u672c\uff0c\u652f\u6301\u6d41\u91cf\u5206\u914d\u548c\u7248\u672c\u8def\u7531\u3002 Configurations configuration.serving.knative.dev \u7ef4\u62a4\u90e8\u7f72\u7684\u671f\u671b\u72b6\u6001\uff0c\u63d0\u4f9b\u4ee3\u7801\u548c\u914d\u7f6e\u4e4b\u95f4\u7684\u5206\u79bb\uff0c\u9075\u5faa Twelve-Factor \u5e94\u7528\u7a0b\u5e8f\u65b9\u6cd5\u8bba\uff0c\u4fee\u6539\u914d\u7f6e\u4f1a\u521b\u5efa\u65b0\u7684 revision\u3002 Revisions revision.serving.knative.dev \u6bcf\u6b21\u5bf9\u5de5\u4f5c\u8d1f\u8f7d\u4fee\u6539\u7684\u65f6\u95f4\u70b9\u5feb\u7167\uff0c\u662f\u4e0d\u53ef\u53d8\u5bf9\u8c61\uff0c\u53ef\u6839\u636e\u6d41\u91cf\u81ea\u52a8\u6269\u5bb9\u548c\u7f29\u5bb9\u3002"},{"location":"end-user/kpanda/scale/knative/playground.html","title":"Knative \u4f7f\u7528\u5b9e\u8df5","text":"

                                  \u5728\u672c\u8282\u4e2d\uff0c\u6211\u4eec\u5c06\u901a\u8fc7\u51e0\u4e2a\u5b9e\u8df5\u6765\u6df1\u5165\u4e86\u89e3\u5b66\u4e60 Knative\u3002

                                  "},{"location":"end-user/kpanda/scale/knative/playground.html#case-1-hello-world","title":"case 1 - Hello World","text":"
                                  apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n

                                  \u53ef\u4ee5\u4f7f\u7528 kubectl \u5df2\u90e8\u7f72\u7684\u5e94\u7528\u7684\u72b6\u6001\uff0c\u8fd9\u4e2a\u5e94\u7528\u7531 knative \u81ea\u52a8\u914d\u7f6e\u4e86 ingress \u548c\u4f38\u7f29\u5668\u3002

                                  ~ kubectl get service.serving.knative.dev/hello\nNAME    URL                                              LATESTCREATED   LATESTREADY   READY   REASON\nhello   http://hello.knative-serving.knative.loulan.me   hello-00001     hello-00001   True\n

                                  \u90e8\u7f72\u51fa\u7684 Pod YAML \u5982\u4e0b\uff0c\u7531 2 \u4e2a Pod \u7ec4\u6210\uff1auser-container \u548c queue-proxy\u3002

                                  apiVersion: v1\nkind: Pod\nmetadata:\n  name: hello-00003-deployment-5fcb8ccbf-7qjfk\nspec:\n  containers:\n  - name: user-container\n  - name: queue-proxy\n

                                  \u8bf7\u6c42\u6d41\uff1a

                                  1. case1 \u5728\u4f4e\u6d41\u91cf\u6216\u96f6\u6d41\u91cf\u65f6\uff0c\u6d41\u91cf\u5c06\u8def\u7531\u5230 activator
                                  2. case2 \u6d41\u91cf\u5927\u65f6\uff0c\u6d41\u91cf\u5927\u4e8e target-burst-capacity \u65f6\u624d\u76f4\u63a5\u8def\u7531\u5230 Pod
                                    1. \u914d\u7f6e\u4e3a 0\uff0c\u53ea\u6709\u4ece 0 \u6269\u5bb9\u5b58\u5728
                                    2. \u914d\u7f6e\u4e3a -1\uff0cactivator \u4f1a\u4e00\u76f4\u5b58\u5728\u8bf7\u6c42\u8def\u5f84
                                    3. \u914d\u7f6e\u4e3a >0\uff0c\u89e6\u53d1\u6269\u7f29\u5bb9\u4e4b\u524d\uff0c\u7cfb\u7edf\u80fd\u591f\u989d\u5916\u5904\u7406\u7684\u5e76\u53d1\u8bf7\u6c42\u6570\u91cf\u3002
                                  3. case3 \u6d41\u91cf\u518d\u53d8\u5c0f\u65f6\uff0c\u6d41\u91cf\u4f4e\u4e8e current_demand + target-burst-capacity > (pods * concurrency-target) \u65f6\u5c06\u518d\u6b21\u8def\u7531\u5230 activator

                                    \u5f85\u5904\u7406\u7684\u8bf7\u6c42\u603b\u6570 + \u80fd\u63a5\u53d7\u7684\u8d85\u8fc7\u76ee\u6807\u5e76\u53d1\u6570\u7684\u8bf7\u6c42\u6570\u91cf > \u6bcf\u4e2a Pod \u7684\u76ee\u6807\u5e76\u53d1\u6570 * Pod \u6570\u91cf

                                  "},{"location":"end-user/kpanda/scale/knative/playground.html#case-2-","title":"case 2 - \u57fa\u4e8e\u5e76\u53d1\u5f39\u6027\u4f38\u7f29","text":"

                                  \u6211\u4eec\u9996\u5148\u5728\u96c6\u7fa4\u5e94\u7528\u4e0b\u9762 YAML \u5b9a\u4e49\u3002

                                  apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"1\"\n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"\n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n

                                  \u6267\u884c\u4e0b\u9762\u547d\u4ee4\u6d4b\u8bd5\uff0c\u5e76\u53ef\u4ee5\u901a\u8fc7 kubectl get pods -A -w \u6765\u89c2\u5bdf\u6269\u5bb9\u7684 Pod\u3002

                                  wrk -t2 -c4 -d6s http://hello.knative-serving.knative.daocloud.io/\n
                                  "},{"location":"end-user/kpanda/scale/knative/playground.html#case-3-","title":"case 3 - \u57fa\u4e8e\u5e76\u53d1\u5f39\u6027\u4f38\u7f29\uff0c\u8fbe\u5230\u7279\u5b9a\u6bd4\u4f8b\u63d0\u524d\u6269\u5bb9","text":"

                                  \u6211\u4eec\u53ef\u4ee5\u5f88\u8f7b\u677e\u7684\u5b9e\u73b0\uff0c\u4f8b\u5982\u9650\u5236\u6bcf\u4e2a\u5bb9\u5668\u5e76\u53d1\u4e3a 10\uff0c\u53ef\u4ee5\u901a\u8fc7 autoscaling.knative.dev/target-utilization-percentage: 70 \u6765\u5b9e\u73b0\uff0c\u8fbe\u5230 70% \u5c31\u5f00\u59cb\u6269\u5bb9 Pod\u3002

                                  apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"10\"\n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"\n \u00a0 \u00a0 \u00a0 \u00a0autoscaling.knative.dev/target-utilization-percentage: \"70\" \n \u00a0 \u00a0 \u00a0 \u00a0autoscaling.knative.dev/metric: \"concurrency\"\n \u00a0 \u00a0 spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n
                                  "},{"location":"end-user/kpanda/scale/knative/playground.html#case-4-","title":"case 4 - \u7070\u5ea6\u53d1\u5e03/\u6d41\u91cf\u767e\u5206\u6bd4","text":"

                                  \u6211\u4eec\u53ef\u4ee5\u901a\u8fc7 spec.traffic \u5b9e\u73b0\u5230\u6bcf\u4e2a\u7248\u672c\u6d41\u91cf\u7684\u63a7\u5236\u3002

                                  apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"1\"  \n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"         \n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n  traffic:\n  - latestRevision: true\n    percent: 50\n  - latestRevision: false\n    percent: 50\n    revisionName: hello-00001\n
                                  "},{"location":"end-user/kpanda/scale/knative/scene.html","title":"\u4f7f\u7528\u573a\u666f","text":""},{"location":"end-user/kpanda/scale/knative/scene.html#_2","title":"\u9002\u5408\u7684\u573a\u666f","text":"
                                  • \u77ed\u8fde\u63a5\u9ad8\u5e76\u53d1\u4e1a\u52a1
                                  • \u9700\u8981\u5f39\u6027\u4f38\u7f29\u7684\u4e1a\u52a1
                                  • \u5927\u91cf\u5e94\u7528\u9700\u8981\u7f29\u5bb9\u5230 0 \u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u7387
                                  • AI Serving \u670d\u52a1\uff0c\u57fa\u4e8e\u7279\u5b9a\u6307\u6807\u8fdb\u884c\u6269\u5bb9

                                  Tip

                                  \u77ed\u8fde\u63a5\u9ad8\u5e76\u53d1\u4e1a\u52a1\u4ee5\u53ca\u9700\u8981\u5f39\u6027\u4f38\u7f29\u7684\u4e1a\u52a1\uff0c\u63a8\u8350\u4f7f\u7528 HPA \u548c VPA \u80fd\u529b\u3002

                                  "},{"location":"end-user/kpanda/scale/knative/scene.html#_3","title":"\u4e0d\u9002\u5408\u7684\u573a\u666f","text":"
                                  • \u957f\u8fde\u63a5\u4e1a\u52a1
                                  • \u5ef6\u65f6\u654f\u611f\u4e1a\u52a1
                                  • \u57fa\u4e8e cookie \u7684\u6d41\u91cf\u5206\u6d41
                                  • \u57fa\u4e8e header \u7684\u6d41\u91cf\u5206\u6d41
                                  "},{"location":"end-user/kpanda/security/index.html","title":"\u5b89\u5168\u626b\u63cf\u7c7b\u578b","text":"

                                  \u5728Kubernetes\uff08\u7b80\u79f0K8s\uff09\u73af\u5883\u4e2d\uff0c\u5b89\u5168\u626b\u63cf\u662f\u786e\u4fdd\u96c6\u7fa4\u5b89\u5168\u6027\u7684\u5173\u952e\u63aa\u65bd\u4e4b\u4e00\u3002\u5176\u4e2d\uff0c\u5408\u89c4\u6027\u626b\u63cf\uff08\u57fa\u4e8eCIS Benchmark\uff09\u3001\u6743\u9650\u626b\u63cf\uff08\u57fa\u4e8ekube-audit\u5ba1\u8ba1\u529f\u80fd\uff09\u3001\u6f0f\u6d1e\u626b\u63cf\uff08\u57fa\u4e8e kube-hunter\uff09\u662f\u4e09\u79cd\u5e38\u89c1\u4e14\u91cd\u8981\u7684\u5b89\u5168\u626b\u63cf\u624b\u6bb5\uff1a

                                  • \u5408\u89c4\u6027\u626b\u63cf\uff1a\u57fa\u4e8e CIS Benchmark \u5bf9\u96c6\u7fa4\u8282\u70b9\u8fdb\u884c\u5b89\u5168\u626b\u63cf\u3002CIS Benchmark \u662f\u4e00\u5957\u5168\u7403\u516c\u8ba4\u7684\u6700\u4f73\u5b9e\u8df5\u6807\u51c6\uff0c\u4e3a Kubernetes \u96c6\u7fa4\u63d0\u4f9b\u4e86\u8be6\u7ec6\u7684\u5b89\u5168\u914d\u7f6e\u6307\u5357\u548c\u81ea\u52a8\u5316\u68c0\u67e5\u5de5\u5177\uff08\u5982Kube-Bench\uff09\uff0c\u5e2e\u52a9\u7ec4\u7ec7\u786e\u4fdd\u5176K8s\u96c6\u7fa4\u7b26\u5408\u5b89\u5168\u57fa\u7ebf\u8981\u6c42\uff0c\u4fdd\u62a4\u7cfb\u7edf\u548c\u6570\u636e\u514d\u53d7\u5a01\u80c1\u3002

                                  • \u6743\u9650\u626b\u63cf\uff1a\u57fa\u4e8ekube-audit\u5ba1\u8ba1\u529f\u80fd\u3002\u6743\u9650\u626b\u63cf\u4e3b\u8981\u89e3\u51b3\u96c6\u7fa4\u8bbf\u95ee\u63a7\u5236\u548c\u64cd\u4f5c\u900f\u660e\u5ea6\u7684\u95ee\u9898\u3002\u901a\u8fc7\u5ba1\u8ba1\u65e5\u5fd7\uff0c\u96c6\u7fa4\u7ba1\u7406\u5458\u80fd\u591f\u8ffd\u6eaf\u96c6\u7fa4\u8d44\u6e90\u7684\u8bbf\u95ee\u5386\u53f2\uff0c\u8bc6\u522b\u5f02\u5e38\u884c\u4e3a\uff0c\u5982\u672a\u7ecf\u6388\u6743\u7684\u8bbf\u95ee\u3001\u654f\u611f\u6570\u636e\u7684\u6cc4\u9732\u3001\u6709\u5b89\u5168\u6f0f\u6d1e\u7684\u64cd\u4f5c\u8bb0\u5f55\u7b49\u3002\u8fd9\u5bf9\u4e8e\u6545\u969c\u6392\u67e5\u3001\u5b89\u5168\u4e8b\u4ef6\u54cd\u5e94\u4ee5\u53ca\u6ee1\u8db3\u5408\u89c4\u6027\u8981\u6c42\u81f3\u5173\u91cd\u8981\u3002\u6b64\u5916\uff0c\u6743\u9650\u626b\u63cf\u8fd8\u53ef\u4ee5\u5e2e\u52a9\u7ec4\u7ec7\u53d1\u73b0\u6f5c\u5728\u7684\u6743\u9650\u6ee5\u7528\u95ee\u9898\uff0c\u53ca\u65f6\u91c7\u53d6\u63aa\u65bd\u9632\u6b62\u5b89\u5168\u4e8b\u4ef6\u7684\u53d1\u751f\u3002

                                  • \u6f0f\u6d1e\u626b\u63cf\uff1a\u57fa\u4e8e kube-hunter\uff0c\u4e3b\u8981\u89e3\u51b3 Kubernetes \u96c6\u7fa4\u4e2d\u5b58\u5728\u7684\u5df2\u77e5\u6f0f\u6d1e\u548c\u914d\u7f6e\u9519\u8bef\u95ee\u9898\u3002kube-hunter \u901a\u8fc7\u6a21\u62df\u653b\u51fb\u884c\u4e3a\uff0c\u80fd\u591f\u8bc6\u522b\u96c6\u7fa4\u4e2d\u53ef\u88ab\u6076\u610f\u5229\u7528\u7684\u6f0f\u6d1e\uff0c\u5982\u672a\u6388\u6743\u8bbf\u95ee\u3001\u66b4\u9732\u7684\u670d\u52a1\u548cAPI\u7aef\u70b9\u3001\u914d\u7f6e\u9519\u8bef\u7684\u89d2\u8272\u548c\u7ed1\u5b9a\u7b56\u7565\u7b49\u3002\u7279\u522b\u5730\uff0ckube-hunter\u80fd\u591f\u8bc6\u522b\u5e76\u62a5\u544a CVE \u6f0f\u6d1e\uff0c\u8fd9\u4e9b\u6f0f\u6d1e\u5982\u679c\u88ab\u6076\u610f\u5229\u7528\uff0c\u53ef\u80fd\u5bfc\u81f4\u6570\u636e\u6cc4\u9732\u3001\u670d\u52a1\u4e2d\u65ad\u7b49\u4e25\u91cd\u540e\u679c\u3002CVE \u6f0f\u6d1e\u662f\u7531\u56fd\u9645\u77e5\u540d\u7684\u5b89\u5168\u7ec4\u7ec7\u5982MITRE\u6240\u5b9a\u4e49\u548c\u7ef4\u62a4\u7684\uff0cCVE\u6570\u636e\u5e93\u4e3a\u8f6f\u4ef6\u548c\u56fa\u4ef6\u4e2d\u7684\u5df2\u77e5\u6f0f\u6d1e\u63d0\u4f9b\u4e86\u552f\u4e00\u6807\u8bc6\u7b26\uff0c\u6210\u4e3a\u5168\u7403\u5b89\u5168\u793e\u533a\u5171\u540c\u9075\u5faa\u7684\u6807\u51c6\u3002kube-hunter \u901a\u8fc7\u5229\u7528 CVE \u6570\u636e\u5e93\u4e2d\u7684\u4fe1\u606f\uff0c\u80fd\u591f\u5e2e\u52a9\u7528\u6237\u5feb\u901f\u8bc6\u522b\u5e76\u54cd\u5e94Kubernetes\u96c6\u7fa4\u4e2d\u7684\u5b89\u5168\u5a01\u80c1\u3002

                                  "},{"location":"end-user/kpanda/security/index.html#_2","title":"\u5408\u89c4\u6027\u626b\u63cf","text":"

                                  \u5408\u89c4\u6027\u626b\u63cf\u7684\u5bf9\u8c61\u662f\u96c6\u7fa4\u8282\u70b9\u3002\u626b\u63cf\u7ed3\u679c\u4e2d\u4f1a\u5217\u51fa\u626b\u63cf\u9879\u4ee5\u53ca\u626b\u63cf\u7ed3\u679c\uff0c\u5e76\u9488\u5bf9\u672a\u901a\u8fc7\u7684\u626b\u63cf\u9879\u7ed9\u51fa\u4fee\u590d\u5efa\u8bae\u3002\u6709\u5173\u626b\u63cf\u65f6\u7528\u5230\u7684\u5177\u4f53\u5b89\u5168\u89c4\u5219\uff0c\u53ef\u53c2\u8003 CIS Kubernetes Benchmark

                                  \u68c0\u67e5\u4e0d\u540c\u7c7b\u578b\u7684\u8282\u70b9\u65f6\uff0c\u626b\u63cf\u7684\u4fa7\u91cd\u70b9\u6709\u6240\u4e0d\u540c\u3002

                                  • \u626b\u63cf\u63a7\u5236\u5e73\u9762\u8282\u70b9\uff08Controller\uff09

                                    • \u5173\u6ce8 API Server \u3001 controller-manager \u3001 scheduler \u3001 kubelet \u7b49\u7cfb\u7edf\u7ec4\u4ef6\u7684\u5b89\u5168\u6027
                                    • \u68c0\u67e5 Etcd \u6570\u636e\u5e93\u7684\u5b89\u5168\u914d\u7f6e
                                    • \u68c0\u67e5\u96c6\u7fa4\u8eab\u4efd\u9a8c\u8bc1\u673a\u5236\u3001\u6388\u6743\u7b56\u7565\u548c\u7f51\u7edc\u5b89\u5168\u914d\u7f6e\u662f\u5426\u7b26\u5408\u5b89\u5168\u6807\u51c6
                                  • \u626b\u63cf\u5de5\u4f5c\u8282\u70b9\uff08Worker\uff09

                                    • \u68c0\u67e5 kubelet\u3001Docker\u7b49\u5bb9\u5668\u8fd0\u884c\u65f6\u7684\u914d\u7f6e\u5426\u7b26\u5408\u5b89\u5168\u6807\u51c6
                                    • \u68c0\u67e5\u5bb9\u5668\u955c\u50cf\u662f\u5426\u7ecf\u8fc7\u4fe1\u4efb\u9a8c\u8bc1
                                    • \u68c0\u67e5\u8282\u70b9\u7684\u7f51\u7edc\u5b89\u5168\u914d\u7f6e\u5426\u7b26\u5408\u5b89\u5168\u6807\u51c6

                                  Tip

                                  \u4f7f\u7528\u5408\u89c4\u6027\u626b\u63cf\u65f6\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u914d\u7f6e\uff0c\u7136\u540e\u57fa\u4e8e\u8be5\u914d\u7f6e\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002\u6267\u884c\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u626b\u63cf\u62a5\u544a\u3002

                                  "},{"location":"end-user/kpanda/security/index.html#_3","title":"\u6743\u9650\u626b\u63cf","text":"

                                  \u6743\u9650\u626b\u63cf\u4fa7\u91cd\u4e8e\u6743\u9650\u95ee\u9898\u5f15\u53d1\u7684\u5b89\u5168\u6f0f\u6d1e\u3002\u6743\u9650\u626b\u63cf\u53ef\u4ee5\u5e2e\u52a9\u7528\u6237\u8bc6\u522b Kubernetes \u96c6\u7fa4\u4e2d\u7684\u5b89\u5168\u5a01\u80c1\uff0c\u6807\u8bc6\u54ea\u4e9b\u8d44\u6e90\u9700\u8981\u8fdb\u884c\u8fdb\u4e00\u6b65\u7684\u5ba1\u67e5\u548c\u4fdd\u62a4\u63aa\u65bd\u3002\u901a\u8fc7\u6267\u884c\u8fd9\u4e9b\u68c0\u67e5\u9879\uff0c\u7528\u6237\u53ef\u4ee5\u66f4\u6e05\u695a\u3001\u66f4\u5168\u9762\u5730\u4e86\u89e3\u81ea\u5df1\u7684 Kubernetes \u73af\u5883\uff0c\u786e\u4fdd\u96c6\u7fa4\u73af\u5883\u7b26\u5408 Kubernetes \u7684\u6700\u4f73\u5b9e\u8df5\u548c\u5b89\u5168\u6807\u51c6\u3002

                                  \u5177\u4f53\u800c\u8a00\uff0c\u6743\u9650\u626b\u63cf\u652f\u6301\u4ee5\u4e0b\u64cd\u4f5c\uff1a

                                  • \u626b\u63cf\u96c6\u7fa4\u4e2d\u7684\u6240\u6709\u8282\u70b9\u7684\u5065\u5eb7\u72b6\u6001\u3002

                                  • \u626b\u63cf\u96c6\u7fa4\u7ec4\u4ef6\u7684\u8fd0\u884c\u72b6\u51b5\uff0c\u5982 kube-apiserver \u3001 kube-controller-manager \u3001 kube-scheduler \u7b49\u3002

                                  • \u626b\u63cf\u5b89\u5168\u914d\u7f6e\uff1a\u68c0\u67e5 Kubernetes \u7684\u5b89\u5168\u914d\u7f6e

                                    • API \u5b89\u5168\uff1a\u542f\u7528\u4e86\u4e0d\u5b89\u5168\u7684 API \u7248\u672c\uff0c\u662f\u5426\u8bbe\u7f6e\u4e86\u9002\u5f53\u7684 RBAC \u89d2\u8272\u548c\u6743\u9650\u9650\u5236\u7b49
                                    • \u5bb9\u5668\u5b89\u5168\uff1a\u662f\u5426\u4f7f\u7528\u4e86\u4e0d\u5b89\u5168\u7684 Image\u3001\u662f\u5426\u5f00\u653e\u4e86\u7279\u6743\u6a21\u5f0f\uff0c\u662f\u5426\u8bbe\u7f6e\u4e86\u5408\u9002\u7684\u5b89\u5168\u4e0a\u4e0b\u6587\u7b49
                                    • \u7f51\u7edc\u5b89\u5168\uff1a\u662f\u5426\u542f\u7528\u4e86\u5408\u9002\u7684\u7f51\u7edc\u7b56\u7565\u6765\u9650\u5236\u6d41\u91cf\uff0c\u662f\u5426\u4f7f\u7528\u4e86 TLS \u52a0\u5bc6\u7b49
                                    • \u5b58\u50a8\u5b89\u5168\uff1a\u662f\u5426\u542f\u7528\u4e86\u9002\u5f53\u7684\u52a0\u5bc6\u3001\u8bbf\u95ee\u63a7\u5236\u7b49\u3002
                                    • \u5e94\u7528\u7a0b\u5e8f\u5b89\u5168\uff1a\u662f\u5426\u8bbe\u7f6e\u4e86\u5fc5\u8981\u7684\u5b89\u5168\u63aa\u65bd\uff0c\u4f8b\u5982\u5bc6\u7801\u7ba1\u7406\u3001\u8de8\u7ad9\u811a\u672c\u653b\u51fb\u9632\u5fa1\u7b49\u3002
                                  • \u63d0\u4f9b\u8b66\u544a\u548c\u5efa\u8bae\uff1a\u5efa\u8bae\u96c6\u7fa4\u7ba1\u7406\u5458\u6267\u884c\u7684\u5b89\u5168\u6700\u4f73\u5b9e\u8df5\uff0c\u4f8b\u5982\u5b9a\u671f\u8f6e\u6362\u8bc1\u4e66\u3001\u4f7f\u7528\u5f3a\u5bc6\u7801\u3001\u9650\u5236\u7f51\u7edc\u8bbf\u95ee\u7b49\u3002

                                  Tip

                                  \u4f7f\u7528\u5408\u89c4\u6027\u626b\u63cf\u65f6\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002\u6267\u884c\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u626b\u63cf\u62a5\u544a\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5b89\u5168\u626b\u63cf\u3002

                                  "},{"location":"end-user/kpanda/security/index.html#_4","title":"\u6f0f\u6d1e\u626b\u63cf","text":"

                                  \u6f0f\u6d1e\u626b\u63cf\u4fa7\u91cd\u4e8e\u626b\u63cf\u6f5c\u5728\u7684\u6076\u610f\u653b\u51fb\u548c\u5b89\u5168\u6f0f\u6d1e\uff0c\u4f8b\u5982\u8fdc\u7a0b\u4ee3\u7801\u6267\u884c\u3001SQL \u6ce8\u5165\u3001XSS \u653b\u51fb\u7b49\uff0c\u4ee5\u53ca\u4e00\u4e9b\u9488\u5bf9 Kubernetes \u7279\u5b9a\u7684\u653b\u51fb\u3002\u6700\u7ec8\u7684\u626b\u63cf\u62a5\u544a\u4f1a\u5217\u51fa\u96c6\u7fa4\u4e2d\u5b58\u5728\u7684\u5b89\u5168\u6f0f\u6d1e\uff0c\u5e76\u63d0\u51fa\u4fee\u590d\u5efa\u8bae\u3002

                                  Tip

                                  \u4f7f\u7528\u5408\u89c4\u6027\u626b\u63cf\u65f6\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002\u6267\u884c\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u626b\u63cf\u62a5\u544a\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u6f0f\u6d1e\u626b\u63cf\u3002

                                  "},{"location":"end-user/kpanda/security/audit.html","title":"\u6743\u9650\u626b\u63cf","text":"

                                  \u4e3a\u4e86\u4f7f\u7528\u6743\u9650\u626b\u63cf\u529f\u80fd\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u7b56\u7565\uff0c\u6267\u884c\u8be5\u7b56\u7565\u4e4b\u540e\u4f1a\u81ea\u52a8\u751f\u6210\u626b\u63cf\u62a5\u544a\u4ee5\u4f9b\u67e5\u770b\u3002

                                  "},{"location":"end-user/kpanda/security/audit.html#_2","title":"\u521b\u5efa\u626b\u63cf\u7b56\u7565","text":"
                                  1. \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684\u9996\u9875\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5b89\u5168\u7ba1\u7406 \u3002

                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u6743\u9650\u626b\u63cf \uff0c\u70b9\u51fb \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\uff0c\u5728\u53f3\u4fa7\u70b9\u51fb \u521b\u5efa\u626b\u63cf\u7b56\u7565 \u3002

                                  3. \u53c2\u8003\u4e0b\u5217\u8bf4\u660e\u586b\u5199\u914d\u7f6e\uff0c\u6700\u540e\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                                    • \u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u626b\u63cf\u54ea\u4e2a\u96c6\u7fa4\u3002\u53ef\u9009\u7684\u96c6\u7fa4\u5217\u8868\u6765\u81ea\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u7684\u96c6\u7fa4\u3002\u5982\u679c\u6ca1\u6709\u60f3\u9009\u7684\u96c6\u7fa4\uff0c\u53ef\u4ee5\u53bb\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4\u3002
                                    • \u626b\u63cf\u7c7b\u578b\uff1a

                                      • \u7acb\u5373\u626b\u63cf\uff1a\u5728\u626b\u63cf\u7b56\u7565\u521b\u5efa\u597d\u4e4b\u540e\u7acb\u5373\u6267\u884c\u4e00\u6b21\u626b\u63cf\uff0c\u540e\u7eed\u4e0d\u53ef\u4ee5\u81ea\u52a8/\u624b\u52a8\u518d\u6b21\u6267\u884c\u626b\u63cf\u3002
                                      • \u5b9a\u65f6\u626b\u63cf\uff1a\u901a\u8fc7\u8bbe\u7f6e\u626b\u63cf\u5468\u671f\uff0c\u81ea\u52a8\u6309\u65f6\u91cd\u590d\u6267\u884c\u626b\u63cf\u3002
                                    • \u626b\u63cf\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff1a\u8bbe\u7f6e\u6700\u591a\u4fdd\u7559\u591a\u5c11\u626b\u63cf\u62a5\u544a\u3002\u8d85\u8fc7\u6307\u5b9a\u7684\u4fdd\u7559\u6570\u91cf\u65f6\uff0c\u4ece\u6700\u65e9\u7684\u62a5\u544a\u5f00\u59cb\u5220\u9664\u3002

                                  "},{"location":"end-user/kpanda/security/audit.html#_3","title":"\u66f4\u65b0/\u5220\u9664\u626b\u63cf\u7b56\u7565","text":"

                                  \u521b\u5efa\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u8981\u66f4\u65b0\u6216\u5220\u9664\u626b\u63cf\u7b56\u7565\u3002

                                  \u5728 \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u914d\u7f6e\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff1a

                                  • \u5bf9\u4e8e\u5468\u671f\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a

                                    • \u9009\u62e9 \u7acb\u5373\u6267\u884c \u610f\u5473\u7740\uff0c\u5728\u5468\u671f\u8ba1\u5212\u4e4b\u5916\u7acb\u5373\u518d\u626b\u63cf\u4e00\u6b21\u96c6\u7fa4
                                    • \u9009\u62e9 \u7981\u7528 \u4f1a\u4e2d\u65ad\u626b\u63cf\u8ba1\u5212\uff0c\u76f4\u5230\u70b9\u51fb \u542f\u7528 \u624d\u53ef\u4ee5\u7ee7\u7eed\u6839\u636e\u5468\u671f\u8ba1\u5212\u6267\u884c\u8be5\u626b\u63cf\u7b56\u7565\u3002
                                    • \u9009\u62e9 \u7f16\u8f91 \u53ef\u4ee5\u66f4\u65b0\u914d\u7f6e\uff0c\u652f\u6301\u66f4\u65b0\u626b\u63cf\u914d\u7f6e\u3001\u7c7b\u578b\u3001\u626b\u63cf\u5468\u671f\u3001\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff0c\u4e0d\u53ef\u66f4\u6539\u914d\u7f6e\u540d\u79f0\u548c\u9700\u8981\u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4\u3002
                                    • \u9009\u62e9 \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u914d\u7f6e
                                  • \u5bf9\u4e8e\u4e00\u6b21\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a\u4ec5\u652f\u6301 \u5220\u9664 \u64cd\u4f5c\u3002

                                  "},{"location":"end-user/kpanda/security/audit.html#_4","title":"\u67e5\u770b\u626b\u63cf\u62a5\u544a","text":"
                                  1. \u5728 \u5b89\u5168\u7ba1\u7406 -> \u6743\u9650\u626b\u63cf -> \u626b\u63cf\u62a5\u544a \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u62a5\u544a\u540d\u79f0

                                    \u5728\u62a5\u544a\u53f3\u4fa7\u70b9\u51fb \u5220\u9664 \u53ef\u4ee5\u624b\u52a8\u5220\u9664\u62a5\u544a\u3002

                                  2. \u67e5\u770b\u626b\u63cf\u62a5\u544a\u5185\u5bb9\uff0c\u5305\u62ec\uff1a

                                    • \u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4
                                    • \u4f7f\u7528\u7684\u626b\u63cf\u7b56\u7565
                                    • \u626b\u63cf\u9879\u603b\u6570\u3001\u8b66\u544a\u6570\u3001\u9519\u8bef\u6570
                                    • \u5728\u5468\u671f\u6027\u626b\u63cf\u7b56\u7565\u751f\u6210\u7684\u626b\u63cf\u62a5\u544a\u4e2d\uff0c\u8fd8\u53ef\u4ee5\u67e5\u770b\u626b\u63cf\u9891\u7387
                                    • \u626b\u63cf\u5f00\u59cb\u7684\u65f6\u95f4
                                    • \u68c0\u67e5\u8be6\u60c5\uff0c\u4f8b\u5982\u88ab\u68c0\u67e5\u7684\u8d44\u6e90\u3001\u8d44\u6e90\u7c7b\u578b\u3001\u626b\u63cf\u7ed3\u679c\u3001\u9519\u8bef\u7c7b\u578b\u3001\u9519\u8bef\u8be6\u60c5

                                  "},{"location":"end-user/kpanda/security/hunter.html","title":"\u6f0f\u6d1e\u626b\u63cf","text":"

                                  \u4e3a\u4e86\u4f7f\u7528\u6f0f\u6d1e\u626b\u63cf\u529f\u80fd\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u7b56\u7565\uff0c\u6267\u884c\u8be5\u7b56\u7565\u4e4b\u540e\u4f1a\u81ea\u52a8\u751f\u6210\u626b\u63cf\u62a5\u544a\u4ee5\u4f9b\u67e5\u770b\u3002

                                  "},{"location":"end-user/kpanda/security/hunter.html#_2","title":"\u521b\u5efa\u626b\u63cf\u7b56\u7565","text":"
                                  1. \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684\u9996\u9875\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5b89\u5168\u7ba1\u7406 \u3002

                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u6f0f\u6d1e\u626b\u63cf \uff0c\u70b9\u51fb \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\uff0c\u5728\u53f3\u4fa7\u70b9\u51fb \u521b\u5efa\u626b\u63cf\u7b56\u7565 \u3002

                                  3. \u53c2\u8003\u4e0b\u5217\u8bf4\u660e\u586b\u5199\u914d\u7f6e\uff0c\u6700\u540e\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                                    • \u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u626b\u63cf\u54ea\u4e2a\u96c6\u7fa4\u3002\u53ef\u9009\u7684\u96c6\u7fa4\u5217\u8868\u6765\u81ea\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u7684\u96c6\u7fa4\u3002\u5982\u679c\u6ca1\u6709\u60f3\u9009\u7684\u96c6\u7fa4\uff0c\u53ef\u4ee5\u53bb\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4\u3002
                                    • \u626b\u63cf\u7c7b\u578b\uff1a

                                      • \u7acb\u5373\u626b\u63cf\uff1a\u5728\u626b\u63cf\u7b56\u7565\u521b\u5efa\u597d\u4e4b\u540e\u7acb\u5373\u6267\u884c\u4e00\u6b21\u626b\u63cf\uff0c\u540e\u7eed\u4e0d\u53ef\u4ee5\u81ea\u52a8/\u624b\u52a8\u518d\u6b21\u6267\u884c\u626b\u63cf\u3002
                                      • \u5b9a\u65f6\u626b\u63cf\uff1a\u901a\u8fc7\u8bbe\u7f6e\u626b\u63cf\u5468\u671f\uff0c\u81ea\u52a8\u6309\u65f6\u91cd\u590d\u6267\u884c\u626b\u63cf\u3002
                                    • \u626b\u63cf\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff1a\u8bbe\u7f6e\u6700\u591a\u4fdd\u7559\u591a\u5c11\u626b\u63cf\u62a5\u544a\u3002\u8d85\u8fc7\u6307\u5b9a\u7684\u4fdd\u7559\u6570\u91cf\u65f6\uff0c\u4ece\u6700\u65e9\u7684\u62a5\u544a\u5f00\u59cb\u5220\u9664\u3002

                                  "},{"location":"end-user/kpanda/security/hunter.html#_3","title":"\u66f4\u65b0/\u5220\u9664\u626b\u63cf\u7b56\u7565","text":"

                                  \u521b\u5efa\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u8981\u66f4\u65b0\u6216\u5220\u9664\u626b\u63cf\u7b56\u7565\u3002

                                  \u5728 \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u914d\u7f6e\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff1a

                                  • \u5bf9\u4e8e\u5468\u671f\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a

                                    • \u9009\u62e9 \u7acb\u5373\u6267\u884c \u610f\u5473\u7740\uff0c\u5728\u5468\u671f\u8ba1\u5212\u4e4b\u5916\u7acb\u5373\u518d\u626b\u63cf\u4e00\u6b21\u96c6\u7fa4
                                    • \u9009\u62e9 \u7981\u7528 \u4f1a\u4e2d\u65ad\u626b\u63cf\u8ba1\u5212\uff0c\u76f4\u5230\u70b9\u51fb \u542f\u7528 \u624d\u53ef\u4ee5\u7ee7\u7eed\u6839\u636e\u5468\u671f\u8ba1\u5212\u6267\u884c\u8be5\u626b\u63cf\u7b56\u7565\u3002
                                    • \u9009\u62e9 \u7f16\u8f91 \u53ef\u4ee5\u66f4\u65b0\u914d\u7f6e\uff0c\u652f\u6301\u66f4\u65b0\u626b\u63cf\u914d\u7f6e\u3001\u7c7b\u578b\u3001\u626b\u63cf\u5468\u671f\u3001\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff0c\u4e0d\u53ef\u66f4\u6539\u914d\u7f6e\u540d\u79f0\u548c\u9700\u8981\u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4\u3002
                                    • \u9009\u62e9 \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u914d\u7f6e
                                  • \u5bf9\u4e8e\u4e00\u6b21\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a\u4ec5\u652f\u6301 \u5220\u9664 \u64cd\u4f5c\u3002

                                  "},{"location":"end-user/kpanda/security/hunter.html#_4","title":"\u67e5\u770b\u626b\u63cf\u62a5\u544a","text":"
                                  1. \u5728 \u5b89\u5168\u7ba1\u7406 -> \u6743\u9650\u626b\u63cf -> \u626b\u63cf\u62a5\u544a \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u62a5\u544a\u540d\u79f0

                                    \u5728\u62a5\u544a\u53f3\u4fa7\u70b9\u51fb \u5220\u9664 \u53ef\u4ee5\u624b\u52a8\u5220\u9664\u62a5\u544a\u3002

                                  2. \u67e5\u770b\u626b\u63cf\u62a5\u544a\u5185\u5bb9\uff0c\u5305\u62ec\uff1a

                                    • \u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4
                                    • \u4f7f\u7528\u7684\u626b\u63cf\u7b56\u7565
                                    • \u626b\u63cf\u9891\u7387
                                    • \u98ce\u9669\u603b\u6570\u3001\u9ad8\u98ce\u9669\u6570\u3001\u4e2d\u98ce\u9669\u6570\u3001\u4f4e\u98ce\u9669\u6570
                                    • \u626b\u63cf\u65f6\u95f4
                                    • \u68c0\u67e5\u8be6\u60c5\uff0c\u4f8b\u5982\u6f0f\u6d1e ID\u3001\u6f0f\u6d1e\u7c7b\u578b\u3001\u6f0f\u6d1e\u540d\u79f0\u3001\u6f0f\u6d1e\u63cf\u8ff0\u7b49

                                  "},{"location":"end-user/kpanda/security/cis/config.html","title":"\u626b\u63cf\u914d\u7f6e","text":"

                                  \u4f7f\u7528\u5408\u89c4\u6027\u626b\u63cf\u7684\u7b2c\u4e00\u6b65\uff0c\u5c31\u662f\u5148\u521b\u5efa\u626b\u63cf\u914d\u7f6e\u3002\u57fa\u4e8e\u626b\u63cf\u914d\u7f6e\u518d\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3001\u6267\u884c\u626b\u63cf\u7b56\u7565\uff0c\u6700\u540e\u67e5\u770b\u626b\u63cf\u7ed3\u679c\u3002

                                  "},{"location":"end-user/kpanda/security/cis/config.html#_2","title":"\u521b\u5efa\u626b\u63cf\u914d\u7f6e","text":"

                                  \u521b\u5efa\u626b\u63cf\u914d\u7f6e\u7684\u6b65\u9aa4\u5982\u4e0b\uff1a

                                  1. \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684\u9996\u9875\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5b89\u5168\u7ba1\u7406 \u3002

                                  2. \u9ed8\u8ba4\u8fdb\u5165 \u5408\u89c4\u6027\u626b\u63cf \u9875\u9762\uff0c\u70b9\u51fb \u626b\u63cf\u914d\u7f6e \u9875\u7b7e\uff0c\u7136\u540e\u5728\u53f3\u4e0a\u89d2\u70b9\u51fb \u521b\u5efa\u626b\u63cf\u914d\u7f6e \u3002

                                  3. \u586b\u5199\u914d\u7f6e\u540d\u79f0\u3001\u9009\u62e9\u914d\u7f6e\u6a21\u677f\u3001\u6309\u9700\u52fe\u9009\u626b\u63cf\u9879\uff0c\u6700\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                    \u626b\u63cf\u6a21\u677f\uff1a\u76ee\u524d\u63d0\u4f9b\u4e86\u4e24\u4e2a\u6a21\u677f\u3002 kubeadm \u6a21\u677f\u9002\u7528\u4e8e\u4e00\u822c\u60c5\u51b5\u4e0b\u7684 Kubernetes \u96c6\u7fa4\u3002 \u6211\u4eec\u5728 kubeadm \u6a21\u677f\u57fa\u7840\u4e0a\uff0c\u7ed3\u5408\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u5e73\u53f0\u8bbe\u8ba1\u5ffd\u7565\u4e86\u4e0d\u9002\u7528\u4e8e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u626b\u63cf\u9879\u3002

                                  "},{"location":"end-user/kpanda/security/cis/config.html#_3","title":"\u67e5\u770b\u626b\u63cf\u914d\u7f6e","text":"

                                  \u5728\u626b\u63cf\u914d\u7f6e\u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u626b\u63cf\u914d\u7f6e\u7684\u540d\u79f0\uff0c\u53ef\u4ee5\u67e5\u770b\u8be5\u914d\u7f6e\u7684\u7c7b\u578b\u3001\u626b\u63cf\u9879\u6570\u91cf\u3001\u521b\u5efa\u65f6\u95f4\u3001\u914d\u7f6e\u6a21\u677f\uff0c\u4ee5\u53ca\u8be5\u914d\u7f6e\u542f\u7528\u7684\u5177\u4f53\u626b\u63cf\u9879\u3002

                                  "},{"location":"end-user/kpanda/security/cis/config.html#_4","title":"\u66f4\u65b0/\u5220\u9664\u626b\u63cf\u914d\u7f6e","text":"

                                  \u626b\u63cf\u914d\u7f6e\u521b\u5efa\u6210\u529f\u4e4b\u540e\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u6c42\u66f4\u65b0\u914d\u7f6e\u6216\u5220\u9664\u8be5\u914d\u7f6e\u3002

                                  \u5728\u626b\u63cf\u914d\u7f6e\u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u914d\u7f6e\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff1a

                                  • \u9009\u62e9 \u7f16\u8f91 \u53ef\u4ee5\u66f4\u65b0\u914d\u7f6e\uff0c\u652f\u6301\u66f4\u65b0\u63cf\u8ff0\u3001\u6a21\u677f\u548c\u626b\u63cf\u9879\u3002\u4e0d\u53ef\u66f4\u6539\u914d\u7f6e\u540d\u79f0\u3002
                                  • \u9009\u62e9 \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u914d\u7f6e\u3002

                                  "},{"location":"end-user/kpanda/security/cis/policy.html","title":"\u626b\u63cf\u7b56\u7565","text":""},{"location":"end-user/kpanda/security/cis/policy.html#_2","title":"\u521b\u5efa\u626b\u63cf\u7b56\u7565","text":"

                                  \u521b\u5efa\u626b\u63cf\u914d\u7f6e\u4e4b\u540e\uff0c\u53ef\u4ee5\u57fa\u4e8e\u914d\u7f6e\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002

                                  1. \u5728 \u5b89\u5168\u7ba1\u7406 -> \u5408\u89c4\u6027\u626b\u63cf \u9875\u9762\u7684 \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\u4e0b\uff0c\u5728\u53f3\u4fa7\u70b9\u51fb\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002

                                  2. \u53c2\u8003\u4e0b\u5217\u8bf4\u660e\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                    • \u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u626b\u63cf\u54ea\u4e2a\u96c6\u7fa4\u3002\u53ef\u9009\u7684\u96c6\u7fa4\u5217\u8868\u6765\u81ea\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u7684\u96c6\u7fa4\u3002\u5982\u679c\u6ca1\u6709\u60f3\u9009\u7684\u96c6\u7fa4\uff0c\u53ef\u4ee5\u53bb\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4\u3002
                                    • \u626b\u63cf\u914d\u7f6e\uff1a\u9009\u62e9\u4e8b\u5148\u521b\u5efa\u597d\u7684\u626b\u63cf\u914d\u7f6e\u3002\u626b\u63cf\u914d\u7f6e\u89c4\u5b9a\u4e86\u9700\u8981\u6267\u884c\u54ea\u4e9b\u5177\u4f53\u7684\u626b\u63cf\u9879\u3002
                                    • \u626b\u63cf\u7c7b\u578b\uff1a

                                      • \u7acb\u5373\u626b\u63cf\uff1a\u5728\u626b\u63cf\u7b56\u7565\u521b\u5efa\u597d\u4e4b\u540e\u7acb\u5373\u6267\u884c\u4e00\u6b21\u626b\u63cf\uff0c\u540e\u7eed\u4e0d\u53ef\u4ee5\u81ea\u52a8/\u624b\u52a8\u518d\u6b21\u6267\u884c\u626b\u63cf\u3002
                                      • \u5b9a\u65f6\u626b\u63cf\uff1a\u901a\u8fc7\u8bbe\u7f6e\u626b\u63cf\u5468\u671f\uff0c\u81ea\u52a8\u6309\u65f6\u91cd\u590d\u6267\u884c\u626b\u63cf\u3002
                                    • \u626b\u63cf\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff1a\u8bbe\u7f6e\u6700\u591a\u4fdd\u7559\u591a\u5c11\u626b\u63cf\u62a5\u544a\u3002\u8d85\u8fc7\u6307\u5b9a\u7684\u4fdd\u7559\u6570\u91cf\u65f6\uff0c\u4ece\u6700\u65e9\u7684\u62a5\u544a\u5f00\u59cb\u5220\u9664\u3002

                                  "},{"location":"end-user/kpanda/security/cis/policy.html#_3","title":"\u66f4\u65b0/\u5220\u9664\u626b\u63cf\u7b56\u7565","text":"

                                  \u521b\u5efa\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u8981\u66f4\u65b0\u6216\u5220\u9664\u626b\u63cf\u7b56\u7565\u3002

                                  \u5728 \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u914d\u7f6e\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff1a

                                  • \u5bf9\u4e8e\u5468\u671f\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a

                                    • \u9009\u62e9 \u7acb\u5373\u6267\u884c \u610f\u5473\u7740\uff0c\u5728\u5468\u671f\u8ba1\u5212\u4e4b\u5916\u7acb\u5373\u518d\u626b\u63cf\u4e00\u6b21\u96c6\u7fa4
                                    • \u9009\u62e9 \u7981\u7528 \u4f1a\u4e2d\u65ad\u626b\u63cf\u8ba1\u5212\uff0c\u76f4\u5230\u70b9\u51fb \u542f\u7528 \u624d\u53ef\u4ee5\u7ee7\u7eed\u6839\u636e\u5468\u671f\u8ba1\u5212\u6267\u884c\u8be5\u626b\u63cf\u7b56\u7565\u3002
                                    • \u9009\u62e9 \u7f16\u8f91 \u53ef\u4ee5\u66f4\u65b0\u914d\u7f6e\uff0c\u652f\u6301\u66f4\u65b0\u626b\u63cf\u914d\u7f6e\u3001\u7c7b\u578b\u3001\u626b\u63cf\u5468\u671f\u3001\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff0c\u4e0d\u53ef\u66f4\u6539\u914d\u7f6e\u540d\u79f0\u548c\u9700\u8981\u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4\u3002
                                    • \u9009\u62e9 \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u914d\u7f6e
                                  • \u5bf9\u4e8e\u4e00\u6b21\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a\u4ec5\u652f\u6301 \u5220\u9664 \u64cd\u4f5c\u3002

                                  "},{"location":"end-user/kpanda/security/cis/report.html","title":"\u626b\u63cf\u62a5\u544a","text":"

                                  hide\uff1a - toc

                                  "},{"location":"end-user/kpanda/security/cis/report.html#_1","title":"\u626b\u63cf\u62a5\u544a","text":"

                                  \u6267\u884c\u626b\u63cf\u7b56\u7565\u4e4b\u540e\u4f1a\u81ea\u52a8\u751f\u6210\u626b\u63cf\u62a5\u544a\u3002\u60a8\u53ef\u4ee5\u5728\u7ebf\u67e5\u770b\u626b\u63cf\u62a5\u544a\u6216\u5c06\u5176\u4e0b\u8f7d\u5230\u672c\u5730\u67e5\u770b\u3002

                                  • \u4e0b\u8f7d\u67e5\u770b\u626b\u63cf\u62a5\u544a

                                    \u5b89\u5168\u7ba1\u7406 -> \u5408\u89c4\u6027\u626b\u63cf \u9875\u9762\u7684 \u626b\u63cf\u62a5\u544a \u9875\u7b7e\u70b9\u51fb\u62a5\u544a\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u4e0b\u8f7d \u3002

                                  • \u5728\u7ebf\u67e5\u770b\u626b\u63cf\u62a5\u544a

                                    \u70b9\u51fb\u67d0\u4e2a\u62a5\u544a\u7684\u540d\u79f0\uff0c\u60a8\u53ef\u4ee5\u5728\u7ebf\u67e5\u770b CIS \u5408\u89c4\u6027\u626b\u63cf\u7684\u62a5\u544a\u5185\u5bb9\u3002\u5177\u4f53\u5305\u62ec\uff1a

                                    • \u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4
                                    • \u4f7f\u7528\u7684\u626b\u63cf\u7b56\u7565\u548c\u626b\u63cf\u914d\u7f6e
                                    • \u626b\u63cf\u5f00\u59cb\u65f6\u95f4
                                    • \u626b\u63cf\u9879\u603b\u6570\u3001\u901a\u8fc7\u6570\u4e0e\u672a\u901a\u8fc7\u6570
                                    • \u5bf9\u4e8e\u672a\u901a\u8fc7\u7684\u626b\u63cf\u9879\u7ed9\u51fa\u5bf9\u5e94\u7684\u4fee\u590d\u5efa\u8bae
                                    • \u5bf9\u4e8e\u901a\u8fc7\u7684\u626b\u63cf\u9879\u7ed9\u51fa\u66f4\u5b89\u5168\u7684\u64cd\u4f5c\u5efa\u8bae

                                  "},{"location":"end-user/kpanda/storage/pv.html","title":"\u6570\u636e\u5377(PV)","text":"

                                  \u6570\u636e\u5377\uff08PersistentVolume\uff0cPV\uff09\u662f\u96c6\u7fa4\u4e2d\u7684\u4e00\u5757\u5b58\u50a8\uff0c\u53ef\u7531\u7ba1\u7406\u5458\u4e8b\u5148\u5236\u5907\uff0c\u6216\u4f7f\u7528\u5b58\u50a8\u7c7b\uff08Storage Class\uff09\u6765\u52a8\u6001\u5236\u5907\u3002PV \u662f\u96c6\u7fa4\u8d44\u6e90\uff0c\u4f46\u62e5\u6709\u72ec\u7acb\u7684\u751f\u547d\u5468\u671f\uff0c\u4e0d\u4f1a\u968f\u7740 Pod \u8fdb\u7a0b\u7ed3\u675f\u800c\u88ab\u5220\u9664\u3002\u5c06 PV \u6302\u8f7d\u5230\u5de5\u4f5c\u8d1f\u8f7d\u53ef\u4ee5\u5b9e\u73b0\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u636e\u6301\u4e45\u5316\u3002PV \u4e2d\u4fdd\u5b58\u4e86\u53ef\u88ab Pod \u4e2d\u5bb9\u5668\u8bbf\u95ee\u7684\u6570\u636e\u76ee\u5f55\u3002

                                  "},{"location":"end-user/kpanda/storage/pv.html#_1","title":"\u521b\u5efa\u6570\u636e\u5377","text":"

                                  \u76ee\u524d\u652f\u6301\u901a\u8fc7 YAML \u548c\u8868\u5355\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u6570\u636e\u5377\uff0c\u8fd9\u4e24\u79cd\u65b9\u5f0f\u5404\u6709\u4f18\u52a3\uff0c\u53ef\u4ee5\u6ee1\u8db3\u4e0d\u540c\u7528\u6237\u7684\u4f7f\u7528\u9700\u6c42\u3002

                                  • \u901a\u8fc7 YAML \u521b\u5efa\u6b65\u9aa4\u66f4\u5c11\u3001\u66f4\u9ad8\u6548\uff0c\u4f46\u95e8\u69db\u8981\u6c42\u8f83\u9ad8\uff0c\u9700\u8981\u719f\u6089\u6570\u636e\u5377\u7684 YAML \u6587\u4ef6\u914d\u7f6e\u3002

                                  • \u901a\u8fc7\u8868\u5355\u521b\u5efa\u66f4\u76f4\u89c2\u66f4\u7b80\u5355\uff0c\u6839\u636e\u63d0\u793a\u586b\u5199\u5bf9\u5e94\u7684\u503c\u5373\u53ef\uff0c\u4f46\u6b65\u9aa4\u66f4\u52a0\u7e41\u7410\u3002

                                  "},{"location":"end-user/kpanda/storage/pv.html#yaml","title":"YAML \u521b\u5efa","text":"
                                  1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377(PV) -> YAML \u521b\u5efa \u3002

                                  2. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                    \u652f\u6301\u4ece\u672c\u5730\u5bfc\u5165 YAML \u6587\u4ef6\u6216\u5c06\u586b\u5199\u597d\u7684\u6587\u4ef6\u4e0b\u8f7d\u4fdd\u5b58\u5230\u672c\u5730\u3002

                                  "},{"location":"end-user/kpanda/storage/pv.html#_2","title":"\u8868\u5355\u521b\u5efa","text":"
                                  1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377(PV) -> \u521b\u5efa\u6570\u636e\u5377(PV) \u3002

                                  2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u3002

                                    • \u6570\u636e\u5377\u540d\u79f0\u3001\u6570\u636e\u5377\u7c7b\u578b\u3001\u6302\u8f7d\u8def\u5f84\u3001\u5377\u6a21\u5f0f\u3001\u8282\u70b9\u4eb2\u548c\u6027\u5728\u521b\u5efa\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                    • \u6570\u636e\u5377\u7c7b\u578b\uff1a\u6709\u5173\u5377\u7c7b\u578b\u7684\u8be6\u7ec6\u4ecb\u7ecd\uff0c\u53ef\u53c2\u8003 Kubernetes \u5b98\u65b9\u6587\u6863\u5377\u3002

                                    • Local\uff1a\u5c06 Node \u8282\u70b9\u7684\u672c\u5730\u5b58\u50a8\u5305\u88c5\u6210 PVC \u63a5\u53e3\uff0c\u5bb9\u5668\u76f4\u63a5\u4f7f\u7528 PVC \u800c\u65e0\u9700\u5173\u6ce8\u5e95\u5c42\u7684\u5b58\u50a8\u7c7b\u578b\u3002Local \u5377\u4e0d\u652f\u6301\u52a8\u6001\u914d\u7f6e\u6570\u636e\u5377\uff0c\u4f46\u652f\u6301\u914d\u7f6e\u8282\u70b9\u4eb2\u548c\u6027\uff0c\u53ef\u4ee5\u9650\u5236\u80fd\u4ece\u54ea\u4e9b\u8282\u70b9\u4e0a\u8bbf\u95ee\u8be5\u6570\u636e\u5377\u3002

                                    • HostPath\uff1a\u4f7f\u7528 Node \u8282\u70b9\u7684\u6587\u4ef6\u7cfb\u7edf\u4e0a\u7684\u6587\u4ef6\u6216\u76ee\u5f55\u4f5c\u4e3a\u6570\u636e\u5377\uff0c\u4e0d\u652f\u6301\u57fa\u4e8e\u8282\u70b9\u4eb2\u548c\u6027\u7684 Pod \u8c03\u5ea6\u3002

                                    • \u6302\u8f7d\u8def\u5f84\uff1a\u5c06\u6570\u636e\u5377\u6302\u8f7d\u5230\u5bb9\u5668\u4e2d\u7684\u67d0\u4e2a\u5177\u4f53\u76ee\u5f55\u4e0b\u3002

                                    • \u8bbf\u95ee\u6a21\u5f0f\uff1a

                                      • ReadWriteOnce\uff1a\u6570\u636e\u5377\u53ef\u4ee5\u88ab\u4e00\u4e2a\u8282\u70b9\u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002
                                      • ReadWriteMany\uff1a\u6570\u636e\u5377\u53ef\u4ee5\u88ab\u591a\u4e2a\u8282\u70b9\u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002
                                      • ReadOnlyMany\uff1a\u6570\u636e\u5377\u53ef\u4ee5\u88ab\u591a\u4e2a\u8282\u70b9\u4ee5\u53ea\u8bfb\u65b9\u5f0f\u6302\u8f7d\u3002
                                      • ReadWriteOncePod\uff1a\u6570\u636e\u5377\u53ef\u4ee5\u88ab\u5355\u4e2a Pod \u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002
                                    • \u56de\u6536\u7b56\u7565\uff1a

                                      • Retain\uff1a\u4e0d\u5220\u9664 PV\uff0c\u4ec5\u5c06\u5176\u72b6\u6001\u53d8\u4e3a released \uff0c\u9700\u8981\u7528\u6237\u624b\u52a8\u56de\u6536\u3002\u6709\u5173\u5982\u4f55\u624b\u52a8\u56de\u6536\uff0c\u53ef\u53c2\u8003\u6301\u4e45\u5377\u3002
                                      • Recycle\uff1a\u4fdd\u7559 PV \u4f46\u6e05\u7a7a\u5176\u4e2d\u7684\u6570\u636e\uff0c\u6267\u884c\u57fa\u672c\u7684\u64e6\u9664\u64cd\u4f5c\uff08 rm -rf /thevolume/* \uff09\u3002
                                      • Delete\uff1a\u5220\u9664 PV \u65f6\u53ca\u5176\u4e2d\u7684\u6570\u636e\u3002
                                    • \u5377\u6a21\u5f0f\uff1a

                                      • \u6587\u4ef6\u7cfb\u7edf\uff1a\u6570\u636e\u5377\u5c06\u88ab Pod \u6302\u8f7d\u5230\u67d0\u4e2a\u76ee\u5f55\u3002\u5982\u679c\u6570\u636e\u5377\u7684\u5b58\u50a8\u6765\u81ea\u67d0\u5757\u8bbe\u5907\u800c\u8be5\u8bbe\u5907\u76ee\u524d\u4e3a\u7a7a\uff0c\u7b2c\u4e00\u6b21\u6302\u8f7d\u5377\u4e4b\u524d\u4f1a\u5728\u8bbe\u5907\u4e0a\u521b\u5efa\u6587\u4ef6\u7cfb\u7edf\u3002
                                      • \u5757\uff1a\u5c06\u6570\u636e\u5377\u4f5c\u4e3a\u539f\u59cb\u5757\u8bbe\u5907\u6765\u4f7f\u7528\u3002\u8fd9\u7c7b\u5377\u4ee5\u5757\u8bbe\u5907\u7684\u65b9\u5f0f\u4ea4\u7ed9 Pod \u4f7f\u7528\uff0c\u5176\u4e0a\u6ca1\u6709\u4efb\u4f55\u6587\u4ef6\u7cfb\u7edf\uff0c\u53ef\u4ee5\u8ba9 Pod \u66f4\u5feb\u5730\u8bbf\u95ee\u6570\u636e\u5377\u3002
                                    • \u8282\u70b9\u4eb2\u548c\u6027\uff1a

                                  "},{"location":"end-user/kpanda/storage/pv.html#_3","title":"\u67e5\u770b\u6570\u636e\u5377","text":"

                                  \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377(PV) \u3002

                                  • \u8be5\u9875\u9762\u53ef\u4ee5\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u4e2d\u7684\u6240\u6709\u6570\u636e\u5377\uff0c\u4ee5\u53ca\u5404\u4e2a\u6570\u636e\u5377\u7684\u72b6\u6001\u3001\u5bb9\u91cf\u3001\u547d\u540d\u7a7a\u95f4\u7b49\u4fe1\u606f\u3002

                                  • \u652f\u6301\u6309\u7167\u6570\u636e\u5377\u7684\u540d\u79f0\u3001\u72b6\u6001\u3001\u547d\u540d\u7a7a\u95f4\u3001\u521b\u5efa\u65f6\u95f4\u8fdb\u884c\u987a\u5e8f\u6216\u9006\u5e8f\u6392\u5e8f\u3002

                                  • \u70b9\u51fb\u6570\u636e\u5377\u7684\u540d\u79f0\uff0c\u53ef\u4ee5\u67e5\u770b\u8be5\u6570\u636e\u5377\u7684\u57fa\u672c\u914d\u7f6e\u3001\u5b58\u50a8\u6c60\u4fe1\u606f\u3001\u6807\u7b7e\u3001\u6ce8\u89e3\u7b49\u4fe1\u606f\u3002

                                  "},{"location":"end-user/kpanda/storage/pv.html#_4","title":"\u514b\u9686\u6570\u636e\u5377","text":"

                                  \u901a\u8fc7\u514b\u9686\u6570\u636e\u5377\uff0c\u53ef\u4ee5\u57fa\u4e8e\u88ab\u514b\u9686\u6570\u636e\u5377\u7684\u914d\u7f6e\uff0c\u91cd\u65b0\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u6570\u636e\u5377\u3002

                                  1. \u8fdb\u5165\u514b\u9686\u9875\u9762

                                    • \u5728\u6570\u636e\u5377\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u514b\u9686\u7684\u6570\u636e\u5377\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u514b\u9686 \u3002

                                      \u4e5f\u53ef\u4ee5\u70b9\u51fb\u6570\u636e\u5377\u7684\u540d\u79f0\uff0c\u5728\u8be6\u60c5\u9875\u9762\u7684\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u514b\u9686 \u3002

                                  2. \u76f4\u63a5\u4f7f\u7528\u539f\u914d\u7f6e\uff0c\u6216\u8005\u6309\u9700\u8fdb\u884c\u4fee\u6539\uff0c\u7136\u540e\u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                  "},{"location":"end-user/kpanda/storage/pv.html#_5","title":"\u66f4\u65b0\u6570\u636e\u5377","text":"

                                  \u6709\u4e24\u79cd\u9014\u5f84\u53ef\u4ee5\u66f4\u65b0\u6570\u636e\u5377\u3002\u652f\u6301\u901a\u8fc7\u8868\u5355\u6216 YAML \u6587\u4ef6\u66f4\u65b0\u6570\u636e\u5377\u3002

                                  Note

                                  \u4ec5\u652f\u6301\u66f4\u65b0\u6570\u636e\u5377\u7684\u522b\u540d\u3001\u5bb9\u91cf\u3001\u8bbf\u95ee\u6a21\u5f0f\u3001\u56de\u6536\u7b56\u7565\u3001\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                  • \u5728\u6570\u636e\u5377\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u6570\u636e\u5377\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                                  • \u70b9\u51fb\u6570\u636e\u5377\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u6570\u636e\u5377\u7684\u8be6\u60c5\u9875\u9762\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                                  "},{"location":"end-user/kpanda/storage/pv.html#_6","title":"\u5220\u9664\u6570\u636e\u5377","text":"

                                  \u5728\u6570\u636e\u5377\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u5220\u9664\u7684\u6570\u636e\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u5220\u9664 \u3002

                                  \u4e5f\u53ef\u4ee5\u70b9\u51fb\u6570\u636e\u5377\u7684\u540d\u79f0\uff0c\u5728\u8be6\u60c5\u9875\u9762\u7684\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u5220\u9664 \u3002

                                  "},{"location":"end-user/kpanda/storage/pvc.html","title":"\u6570\u636e\u5377\u58f0\u660e(PVC)","text":"

                                  \u6301\u4e45\u5377\u58f0\u660e\uff08PersistentVolumeClaim\uff0cPVC\uff09\u8868\u8fbe\u7684\u662f\u7528\u6237\u5bf9\u5b58\u50a8\u7684\u8bf7\u6c42\u3002PVC \u6d88\u8017 PV \u8d44\u6e90\uff0c\u7533\u9886\u4f7f\u7528\u7279\u5b9a\u5927\u5c0f\u3001\u7279\u5b9a\u8bbf\u95ee\u6a21\u5f0f\u7684\u6570\u636e\u5377\uff0c\u4f8b\u5982\u8981\u6c42 PV \u5377\u4ee5 ReadWriteOnce\u3001ReadOnlyMany \u6216 ReadWriteMany \u7b49\u6a21\u5f0f\u6765\u6302\u8f7d\u3002

                                  "},{"location":"end-user/kpanda/storage/pvc.html#_1","title":"\u521b\u5efa\u6570\u636e\u5377\u58f0\u660e","text":"

                                  \u76ee\u524d\u652f\u6301\u901a\u8fc7 YAML \u548c\u8868\u5355\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u6570\u636e\u5377\u58f0\u660e\uff0c\u8fd9\u4e24\u79cd\u65b9\u5f0f\u5404\u6709\u4f18\u52a3\uff0c\u53ef\u4ee5\u6ee1\u8db3\u4e0d\u540c\u7528\u6237\u7684\u4f7f\u7528\u9700\u6c42\u3002

                                  • \u901a\u8fc7 YAML \u521b\u5efa\u6b65\u9aa4\u66f4\u5c11\u3001\u66f4\u9ad8\u6548\uff0c\u4f46\u95e8\u69db\u8981\u6c42\u8f83\u9ad8\uff0c\u9700\u8981\u719f\u6089\u6570\u636e\u5377\u58f0\u660e\u7684 YAML \u6587\u4ef6\u914d\u7f6e\u3002

                                  • \u901a\u8fc7\u8868\u5355\u521b\u5efa\u66f4\u76f4\u89c2\u66f4\u7b80\u5355\uff0c\u6839\u636e\u63d0\u793a\u586b\u5199\u5bf9\u5e94\u7684\u503c\u5373\u53ef\uff0c\u4f46\u6b65\u9aa4\u66f4\u52a0\u7e41\u7410\u3002

                                  "},{"location":"end-user/kpanda/storage/pvc.html#yaml","title":"YAML \u521b\u5efa","text":"
                                  1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e (PVC) -> YAML \u521b\u5efa \u3002

                                  2. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                    \u652f\u6301\u4ece\u672c\u5730\u5bfc\u5165 YAML \u6587\u4ef6\u6216\u5c06\u586b\u5199\u597d\u7684\u6587\u4ef6\u4e0b\u8f7d\u4fdd\u5b58\u5230\u672c\u5730\u3002

                                  "},{"location":"end-user/kpanda/storage/pvc.html#_2","title":"\u8868\u5355\u521b\u5efa","text":"
                                  1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e (PVC) -> \u521b\u5efa\u6570\u636e\u5377\u58f0\u660e (PVC) \u3002

                                  2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u3002

                                    • \u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\u3001\u547d\u540d\u7a7a\u95f4\u3001\u521b\u5efa\u65b9\u5f0f\u3001\u6570\u636e\u5377\u3001\u5bb9\u91cf\u3001\u8bbf\u95ee\u6a21\u5f0f\u5728\u521b\u5efa\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                    • \u521b\u5efa\u65b9\u5f0f\uff1a\u5728\u5df2\u6709\u7684\u5b58\u50a8\u6c60\u6216\u8005\u6570\u636e\u5377\u4e2d\u52a8\u6001\u521b\u5efa\u65b0\u7684\u6570\u636e\u5377\u58f0\u660e\uff0c\u6216\u8005\u57fa\u4e8e\u6570\u636e\u5377\u58f0\u660e\u7684\u5feb\u7167\u521b\u5efa\u65b0\u7684\u6570\u636e\u5377\u58f0\u660e\u3002

                                      \u57fa\u4e8e\u5feb\u7167\u521b\u5efa\u65f6\u65e0\u6cd5\u4fee\u6539\u6570\u636e\u5377\u58f0\u660e\u7684\u5bb9\u91cf\uff0c\u53ef\u4ee5\u5728\u521b\u5efa\u5b8c\u6210\u540e\u518d\u8fdb\u884c\u4fee\u6539\u3002

                                    • \u9009\u62e9\u521b\u5efa\u65b9\u5f0f\u4e4b\u540e\uff0c\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u9009\u62e9\u60f3\u8981\u4f7f\u7528\u7684\u5b58\u50a8\u6c60/\u6570\u636e\u5377/\u5feb\u7167\u3002

                                    • \u8bbf\u95ee\u6a21\u5f0f\uff1a

                                    • ReadWriteOnce\uff0c\u6570\u636e\u5377\u58f0\u660e\u53ef\u4ee5\u88ab\u4e00\u4e2a\u8282\u70b9\u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002

                                    • ReadWriteMany\uff0c\u6570\u636e\u5377\u58f0\u660e\u53ef\u4ee5\u88ab\u591a\u4e2a\u8282\u70b9\u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002
                                    • ReadOnlyMany\uff0c\u6570\u636e\u5377\u58f0\u660e\u53ef\u4ee5\u88ab\u591a\u4e2a\u8282\u70b9\u4ee5\u53ea\u8bfb\u65b9\u5f0f\u6302\u8f7d\u3002
                                    • ReadWriteOncePod\uff0c\u6570\u636e\u5377\u58f0\u660e\u53ef\u4ee5\u88ab\u5355\u4e2a Pod \u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002

                                  "},{"location":"end-user/kpanda/storage/pvc.html#_3","title":"\u67e5\u770b\u6570\u636e\u5377\u58f0\u660e","text":"

                                  \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e(PVC) \u3002

                                  • \u8be5\u9875\u9762\u53ef\u4ee5\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u4e2d\u7684\u6240\u6709\u6570\u636e\u5377\u58f0\u660e\uff0c\u4ee5\u53ca\u5404\u4e2a\u6570\u636e\u5377\u58f0\u660e\u7684\u72b6\u6001\u3001\u5bb9\u91cf\u3001\u547d\u540d\u7a7a\u95f4\u7b49\u4fe1\u606f\u3002

                                  • \u652f\u6301\u6309\u7167\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\u3001\u72b6\u6001\u3001\u547d\u540d\u7a7a\u95f4\u3001\u521b\u5efa\u65f6\u95f4\u8fdb\u884c\u987a\u5e8f\u6216\u9006\u5e8f\u6392\u5e8f\u3002

                                  • \u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u53ef\u4ee5\u67e5\u770b\u8be5\u6570\u636e\u5377\u58f0\u660e\u7684\u57fa\u672c\u914d\u7f6e\u3001\u5b58\u50a8\u6c60\u4fe1\u606f\u3001\u6807\u7b7e\u3001\u6ce8\u89e3\u7b49\u4fe1\u606f\u3002

                                  "},{"location":"end-user/kpanda/storage/pvc.html#_4","title":"\u6269\u5bb9\u6570\u636e\u5377\u58f0\u660e","text":"
                                  1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e(PVC) \uff0c\u627e\u5230\u60f3\u8981\u8c03\u6574\u5bb9\u91cf\u7684\u6570\u636e\u5377\u58f0\u660e\u3002

                                  2. \u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u6269\u5bb9 \u3002

                                  3. \u8f93\u5165\u76ee\u6807\u5bb9\u91cf\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                  "},{"location":"end-user/kpanda/storage/pvc.html#_5","title":"\u514b\u9686\u6570\u636e\u5377\u58f0\u660e","text":"

                                  \u901a\u8fc7\u514b\u9686\u6570\u636e\u5377\u58f0\u660e\uff0c\u53ef\u4ee5\u57fa\u4e8e\u88ab\u514b\u9686\u6570\u636e\u5377\u58f0\u660e\u7684\u914d\u7f6e\uff0c\u91cd\u65b0\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u6570\u636e\u5377\u58f0\u660e\u3002

                                  1. \u8fdb\u5165\u514b\u9686\u9875\u9762

                                    • \u5728\u6570\u636e\u5377\u58f0\u660e\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u514b\u9686\u7684\u6570\u636e\u5377\u58f0\u660e\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u514b\u9686 \u3002

                                      \u4e5f\u53ef\u4ee5\u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u5728\u8be6\u60c5\u9875\u9762\u7684\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u514b\u9686 \u3002

                                  2. \u76f4\u63a5\u4f7f\u7528\u539f\u914d\u7f6e\uff0c\u6216\u8005\u6309\u9700\u8fdb\u884c\u4fee\u6539\uff0c\u7136\u540e\u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                  "},{"location":"end-user/kpanda/storage/pvc.html#_6","title":"\u66f4\u65b0\u6570\u636e\u5377\u58f0\u660e","text":"

                                  \u6709\u4e24\u79cd\u9014\u5f84\u53ef\u4ee5\u66f4\u65b0\u6570\u636e\u5377\u58f0\u660e\u3002\u652f\u6301\u901a\u8fc7\u8868\u5355\u6216 YAML \u6587\u4ef6\u66f4\u65b0\u6570\u636e\u5377\u58f0\u660e\u3002

                                  Note

                                  \u4ec5\u652f\u6301\u66f4\u65b0\u6570\u636e\u5377\u58f0\u660e\u7684\u522b\u540d\u3001\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                  • \u5728\u6570\u636e\u5377\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u6570\u636e\u5377\u58f0\u660e\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                                  • \u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u6570\u636e\u5377\u58f0\u660e\u7684\u8be6\u60c5\u9875\u9762\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                                  "},{"location":"end-user/kpanda/storage/pvc.html#_7","title":"\u5220\u9664\u6570\u636e\u5377\u58f0\u660e","text":"

                                  \u5728\u6570\u636e\u5377\u58f0\u660e\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u5220\u9664\u7684\u6570\u636e\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u5220\u9664 \u3002

                                  \u4e5f\u53ef\u4ee5\u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u5728\u8be6\u60c5\u9875\u9762\u7684\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u5220\u9664 \u3002

                                  "},{"location":"end-user/kpanda/storage/pvc.html#_8","title":"\u5e38\u89c1\u95ee\u9898","text":"
                                  1. \u5982\u679c\u5217\u8868\u4e2d\u6ca1\u6709\u53ef\u9009\u7684\u5b58\u50a8\u6c60\u6216\u6570\u636e\u5377\uff0c\u53ef\u4ee5\u521b\u5efa\u5b58\u50a8\u6c60\u6216\u521b\u5efa\u6570\u636e\u5377\u3002

                                  2. \u5982\u679c\u5217\u8868\u4e2d\u6ca1\u6709\u53ef\u9009\u7684\u5feb\u7167\uff0c\u53ef\u4ee5\u8fdb\u5165\u6570\u636e\u5377\u58f0\u660e\u7684\u8be6\u60c5\u9875\uff0c\u5728\u53f3\u4e0a\u89d2\u5236\u4f5c\u5feb\u7167\u3002

                                  3. \u5982\u679c\u6570\u636e\u5377\u58f0\u660e\u6240\u4f7f\u7528\u7684\u5b58\u50a8\u6c60 (SC) \u6ca1\u6709\u542f\u7528\u5feb\u7167\uff0c\u5219\u65e0\u6cd5\u5236\u4f5c\u5feb\u7167\uff0c\u9875\u9762\u4e0d\u4f1a\u663e\u793a\u201c\u5236\u4f5c\u5feb\u7167\u201d\u9009\u9879\u3002

                                  4. \u5982\u679c\u6570\u636e\u5377\u58f0\u660e\u6240\u4f7f\u7528\u7684\u5b58\u50a8\u6c60 (SC) \u6ca1\u6709\u5f00\u542f\u6269\u5bb9\u529f\u80fd\uff0c\u5219\u8be5\u6570\u636e\u5377\u4e0d\u652f\u6301\u6269\u5bb9\uff0c\u9875\u9762\u4e0d\u4f1a\u663e\u793a\u6269\u5bb9\u9009\u9879\u3002

                                  "},{"location":"end-user/kpanda/storage/sc-share.html","title":"\u5171\u4eab\u5b58\u50a8\u6c60","text":"

                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u5c06\u4e00\u4e2a\u5b58\u50a8\u6c60\u5171\u4eab\u7ed9\u591a\u4e2a\u547d\u540d\u7a7a\u95f4\u4f7f\u7528\uff0c\u4ee5\u4fbf\u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u6548\u7387\u3002

                                  1. \u5728\u5b58\u50a8\u6c60\u5217\u8868\u4e2d\u627e\u5230\u9700\u8981\u5171\u4eab\u7684\u5b58\u50a8\u6c60\uff0c\u5728\u53f3\u4fa7\u64cd\u4f5c\u680f\u4e0b\u70b9\u51fb \u6388\u6743\u547d\u540d\u7a7a\u95f4 \u3002

                                  2. \u70b9\u51fb \u81ea\u5b9a\u4e49\u547d\u540d\u7a7a\u95f4 \u53ef\u4ee5\u9010\u4e00\u9009\u62e9\u9700\u8981\u5c06\u6b64\u5b58\u50a8\u6c60\u5171\u4eab\u5230\u54ea\u4e9b\u547d\u540d\u7a7a\u95f4\u3002

                                    • \u70b9\u51fb \u6388\u6743\u6240\u6709\u547d\u540d\u7a7a\u95f4 \u53ef\u4ee5\u4e00\u6b21\u6027\u5c06\u6b64\u5b58\u50a8\u6c60\u5171\u4eab\u5230\u5f53\u524d\u96c6\u7fa4\u4e0b\u7684\u6240\u6709\u547d\u540d\u7a7a\u95f4\u3002
                                    • \u5728\u5217\u8868\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u65b9\u70b9\u51fb \u79fb\u9664\u6388\u6743 \uff0c\u53ef\u4ee5\u89e3\u9664\u6388\u6743\uff0c\u505c\u6b62\u5c06\u6b64\u5b58\u50a8\u6c60\u5171\u4eab\u5230\u8be5\u547d\u540d\u7a7a\u95f4\u3002

                                  "},{"location":"end-user/kpanda/storage/sc.html","title":"\u5b58\u50a8\u6c60(SC)","text":"

                                  \u5b58\u50a8\u6c60\u6307\u5c06\u8bb8\u591a\u7269\u7406\u78c1\u76d8\u7ec4\u6210\u4e00\u4e2a\u5927\u578b\u5b58\u50a8\u8d44\u6e90\u6c60\uff0c\u672c\u5e73\u53f0\u652f\u6301\u63a5\u5165\u5404\u7c7b\u5b58\u50a8\u5382\u5546\u540e\u521b\u5efa\u5757\u5b58\u50a8\u6c60\u3001\u672c\u5730\u5b58\u50a8\u6c60\u3001\u81ea\u5b9a\u4e49\u5b58\u50a8\u6c60\uff0c\u7136\u540e\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u52a8\u6001\u914d\u7f6e\u6570\u636e\u5377\u3002

                                  "},{"location":"end-user/kpanda/storage/sc.html#sc_1","title":"\u521b\u5efa\u5b58\u50a8\u6c60(SC)","text":"

                                  \u76ee\u524d\u652f\u6301\u901a\u8fc7 YAML \u548c\u8868\u5355\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u5b58\u50a8\u6c60\uff0c\u8fd9\u4e24\u79cd\u65b9\u5f0f\u5404\u6709\u4f18\u52a3\uff0c\u53ef\u4ee5\u6ee1\u8db3\u4e0d\u540c\u7528\u6237\u7684\u4f7f\u7528\u9700\u6c42\u3002

                                  • \u901a\u8fc7 YAML \u521b\u5efa\u6b65\u9aa4\u66f4\u5c11\u3001\u66f4\u9ad8\u6548\uff0c\u4f46\u95e8\u69db\u8981\u6c42\u8f83\u9ad8\uff0c\u9700\u8981\u719f\u6089\u5b58\u50a8\u6c60\u7684 YAML \u6587\u4ef6\u914d\u7f6e\u3002

                                  • \u901a\u8fc7\u8868\u5355\u521b\u5efa\u66f4\u76f4\u89c2\u66f4\u7b80\u5355\uff0c\u6839\u636e\u63d0\u793a\u586b\u5199\u5bf9\u5e94\u7684\u503c\u5373\u53ef\uff0c\u4f46\u6b65\u9aa4\u66f4\u52a0\u7e41\u7410\u3002

                                  "},{"location":"end-user/kpanda/storage/sc.html#yaml","title":"YAML \u521b\u5efa","text":"
                                  1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u5b58\u50a8\u6c60(SC) -> YAML \u521b\u5efa \u3002

                                  2. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                    \u652f\u6301\u4ece\u672c\u5730\u5bfc\u5165 YAML \u6587\u4ef6\u6216\u5c06\u586b\u5199\u597d\u7684\u6587\u4ef6\u4e0b\u8f7d\u4fdd\u5b58\u5230\u672c\u5730\u3002

                                  "},{"location":"end-user/kpanda/storage/sc.html#_1","title":"\u8868\u5355\u521b\u5efa","text":"
                                  1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u5b58\u50a8\u6c60(SC) -> \u521b\u5efa\u5b58\u50a8\u6c60(SC) \u3002

                                  2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\uff0c\u7136\u540e\u5728\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                    \u81ea\u5b9a\u4e49\u5b58\u50a8\u7cfb\u7edf

                                    • \u5b58\u50a8\u6c60\u540d\u79f0\u3001\u9a71\u52a8\u3001\u56de\u6536\u7b56\u7565\u5728\u521b\u5efa\u540e\u4e0d\u53ef\u4fee\u6539\u3002
                                    • CSI \u5b58\u50a8\u9a71\u52a8\uff1a\u57fa\u4e8e\u6807\u51c6 Kubernetes \u7684\u5bb9\u5668\u5b58\u50a8\u63a5\u53e3\u63d2\u4ef6\uff0c\u9700\u9075\u5b88\u5b58\u50a8\u5382\u5546\u89c4\u5b9a\u7684\u683c\u5f0f\uff0c\u4f8b\u5982 rancher.io/local-path \u3002

                                      • \u6709\u5173\u5982\u4f55\u586b\u5199\u4e0d\u540c\u5382\u5546\u63d0\u4f9b\u7684 CSI \u9a71\u52a8\uff0c\u53ef\u53c2\u8003 Kubernetes \u5b98\u65b9\u6587\u6863\u5b58\u50a8\u7c7b\u3002
                                        • \u56de\u6536\u7b56\u7565\uff1a\u5220\u9664\u6570\u636e\u5377\u65f6\uff0c\u4fdd\u7559\u6570\u636e\u5377\u4e2d\u7684\u6570\u636e\u6216\u8005\u5220\u9664\u5176\u4e2d\u7684\u6570\u636e\u3002
                                        • \u5feb\u7167/\u6269\u5bb9\uff1a\u5f00\u542f\u540e\uff0c\u57fa\u4e8e\u8be5\u5b58\u50a8\u6c60\u7684\u6570\u636e\u5377/\u6570\u636e\u5377\u58f0\u660e\u624d\u80fd\u652f\u6301\u6269\u5bb9\u548c\u5feb\u7167\u529f\u80fd\uff0c\u4f46 \u524d\u63d0\u662f\u5e95\u5c42\u4f7f\u7528\u7684\u5b58\u50a8\u9a71\u52a8\u652f\u6301\u5feb\u7167\u548c\u6269\u5bb9\u529f\u80fd\u3002

                                    HwameiStor \u5b58\u50a8\u7cfb\u7edf

                                    • \u5b58\u50a8\u6c60\u540d\u79f0\u3001\u9a71\u52a8\u3001\u56de\u6536\u7b56\u7565\u5728\u521b\u5efa\u540e\u4e0d\u53ef\u4fee\u6539\u3002
                                    • \u5b58\u50a8\u7cfb\u7edf\uff1aHwameiStor \u5b58\u50a8\u7cfb\u7edf\u3002
                                    • \u5b58\u50a8\u7c7b\u578b\uff1a\u652f\u6301 LVM\uff0c\u88f8\u78c1\u76d8\u7c7b\u578b
                                      • LVM \u7c7b\u578b \uff1aHwameiStor \u63a8\u8350\u4f7f\u7528\u6b64\u65b9\u5f0f\uff0c\u53ef\u4f7f\u7528\u9ad8\u53ef\u7528\u6570\u636e\u5377\uff0c\u5bf9\u5e94\u7684\u7684 CSI \u5b58\u50a8\u9a71\u52a8\u4e3a lvm.hwameistor.io\u3002
                                      • \u88f8\u78c1\u76d8\u6570\u636e\u5377 \uff1a \u9002\u7528\u4e8e\u975e\u9ad8\u53ef\u7528\u573a\u666f\uff0c\u65e0\u9ad8\u53ef\u7528\u80fd\u529b\uff0c\u5bf9\u5e94\u7684 CSI \u9a71\u52a8\u4e3a hdd.hwameistor.io
                                    • \u9ad8\u53ef\u7528\u6a21\u5f0f\uff1a\u4f7f\u7528\u9ad8\u53ef\u7528\u80fd\u529b\u4e4b\u524d\u8bf7\u786e\u8ba4 DRBD \u7ec4\u4ef6 \u5df2\u5b89\u88c5\u3002\u5f00\u542f\u9ad8\u53ef\u7528\u6a21\u5f0f\u540e\uff0c\u53ef\u5c06\u6570\u636e\u5377\u526f\u672c\u6570\u8bbe\u7f6e\u4e3a 1 \u548c 2\u3002 \u5982\u9700\u8981\u53ef\u5c06\u6570\u636e\u5377\u526f\u672c\u4ece 1 Convert \u6210 1
                                    • \u56de\u6536\u7b56\u7565\uff1a\u5220\u9664\u6570\u636e\u5377\u65f6\uff0c\u4fdd\u7559\u6570\u636e\u5377\u4e2d\u7684\u6570\u636e\u6216\u8005\u5220\u9664\u5176\u4e2d\u7684\u6570\u636e\u3002
                                    • \u5feb\u7167/\u6269\u5bb9\uff1a\u5f00\u542f\u540e\uff0c\u57fa\u4e8e\u8be5\u5b58\u50a8\u6c60\u7684\u6570\u636e\u5377/\u6570\u636e\u5377\u58f0\u660e\u624d\u80fd\u652f\u6301\u6269\u5bb9\u548c\u5feb\u7167\u529f\u80fd\uff0c\u4f46 \u524d\u63d0\u662f\u5e95\u5c42\u4f7f\u7528\u7684\u5b58\u50a8\u9a71\u52a8\u652f\u6301\u5feb\u7167\u548c\u6269\u5bb9\u529f\u80fd\u3002

                                    Note

                                    \u76ee\u524d HwameiStor xfs\u3001ext4 \u4e24\u79cd\u6587\u4ef6\u7cfb\u7edf\uff0c\u5176\u4e2d\u9ed8\u8ba4\u4f7f\u7528\u7684\u662f xfs \u6587\u4ef6\u7cfb\u7edf\uff0c\u5982\u679c\u60f3\u8981\u66ff\u6362\u4e3a ext4\uff0c\u53ef\u4ee5\u5728\u81ea\u5b9a\u4e49\u53c2\u6570\u6dfb\u52a0 csi.storage.k8s.io/fstype: ext4

                                  "},{"location":"end-user/kpanda/storage/sc.html#sc_2","title":"\u66f4\u65b0\u5b58\u50a8\u6c60(SC)","text":"

                                  \u5728\u5b58\u50a8\u6c60\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u5b58\u50a8\u6c60\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u7f16\u8f91 \u5373\u53ef\u901a\u8fc7\u66f4\u65b0\u5b58\u50a8\u6c60\u3002

                                  Info

                                  \u9009\u62e9 \u67e5\u770b YAML \u53ef\u4ee5\u67e5\u770b\u8be5\u5b58\u50a8\u6c60\u7684 YAML \u6587\u4ef6\uff0c\u4f46\u4e0d\u652f\u6301\u7f16\u8f91\u3002

                                  "},{"location":"end-user/kpanda/storage/sc.html#sc_3","title":"\u5220\u9664\u5b58\u50a8\u6c60(SC)","text":"

                                  \u5728\u5b58\u50a8\u6c60\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u5220\u9664\u7684\u5b58\u50a8\u6c60\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u5220\u9664 \u3002

                                  "},{"location":"end-user/kpanda/workloads/create-cronjob.html","title":"\u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\uff08CronJob\uff09","text":"

                                  \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\uff08CronJob\uff09\u3002

                                  \u5b9a\u65f6\u4efb\u52a1\uff08CronJob\uff09\u9002\u7528\u4e8e\u4e8e\u6267\u884c\u5468\u671f\u6027\u7684\u64cd\u4f5c\uff0c\u4f8b\u5982\u5907\u4efd\u3001\u62a5\u544a\u751f\u6210\u7b49\u3002\u8fd9\u4e9b\u4efb\u52a1\u53ef\u4ee5\u914d\u7f6e\u4e3a\u5468\u671f\u6027\u91cd\u590d\u7684\uff08\u4f8b\u5982\uff1a\u6bcf\u5929/\u6bcf\u5468/\u6bcf\u6708\u4e00\u6b21\uff09\uff0c\u53ef\u4ee5\u5b9a\u4e49\u4efb\u52a1\u5f00\u59cb\u6267\u884c\u7684\u65f6\u95f4\u95f4\u9694\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-cronjob.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                  \u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\uff08CronJob\uff09\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                  • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                  • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                                  • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                  • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-cronjob.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                                  \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u5b9a\u65f6\u4efb\u52a1\u3002

                                  1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                  2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u5b9a\u65f6\u4efb\u52a1 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                                  3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u5b9a\u65f6\u4efb\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                                    \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u5b9a\u65f6\u4efb\u52a1 \u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u5b9a\u65f6\u4efb\u52a1\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u91cd\u542f\u7b49\u64cd\u4f5c\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-cronjob.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"

                                  \u5728 \u521b\u5efa\u5b9a\u65f6\u4efb\u52a1 \u9875\u9762\u4e2d\uff0c\u6839\u636e\u4e0b\u8868\u8f93\u5165\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                  • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                  • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u5b9a\u65f6\u4efb\u52a1\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                  • \u63cf\u8ff0\uff1a\u8f93\u5165\u5de5\u4f5c\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u91cf\u5e94\u4e0d\u8d85\u8fc7 512 \u4e2a\u3002
                                  "},{"location":"end-user/kpanda/workloads/create-cronjob.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                                  \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                  \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                                  \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                                  \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                                  • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                                  • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                                  • \u955c\u50cf\uff1a
                                    • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u63a5\u5165\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                                    • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                                    • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                                    • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                                  • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                                  • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                                  • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                                    • \u6574\u5361\u6a21\u5f0f\uff1a
                                      • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                    • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                                      • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                      • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                                      • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                                      • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                                      • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                                      • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                                    • Mig \u6a21\u5f0f
                                      • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                                      • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                                  \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                                  \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                                  \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                                  \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                                  \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                                  \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-cronjob.html#_5","title":"\u5b9a\u65f6\u4efb\u52a1\u914d\u7f6e","text":"
                                  • \u5e76\u53d1\u7b56\u7565\uff1a\u662f\u5426\u5141\u8bb8\u591a\u4e2a Job \u4efb\u52a1\u5e76\u884c\u6267\u884c\u3002

                                    • Allow \uff1a\u53ef\u4ee5\u5728\u524d\u4e00\u4e2a\u4efb\u52a1\u672a\u5b8c\u6210\u65f6\u5c31\u521b\u5efa\u65b0\u7684\u5b9a\u65f6\u4efb\u52a1\uff0c\u800c\u4e14\u591a\u4e2a\u4efb\u52a1\u53ef\u4ee5\u5e76\u884c\u3002\u4efb\u52a1\u592a\u591a\u53ef\u80fd\u62a2\u5360\u96c6\u7fa4\u8d44\u6e90\u3002
                                    • Forbid \uff1a\u5728\u524d\u4e00\u4e2a\u4efb\u52a1\u5b8c\u6210\u4e4b\u524d\uff0c\u4e0d\u80fd\u521b\u5efa\u65b0\u4efb\u52a1\uff0c\u5982\u679c\u65b0\u4efb\u52a1\u7684\u6267\u884c\u65f6\u95f4\u5230\u4e86\u800c\u4e4b\u524d\u7684\u4efb\u52a1\u4ecd\u672a\u6267\u884c\u5b8c\uff0cCronJob \u4f1a\u5ffd\u7565\u65b0\u4efb\u52a1\u7684\u6267\u884c\u3002
                                    • Replace \uff1a\u5982\u679c\u65b0\u4efb\u52a1\u7684\u6267\u884c\u65f6\u95f4\u5230\u4e86\uff0c\u4f46\u524d\u4e00\u4e2a\u4efb\u52a1\u8fd8\u672a\u5b8c\u6210\uff0c\u65b0\u7684\u4efb\u52a1\u4f1a\u53d6\u4ee3\u524d\u4e00\u4e2a\u4efb\u52a1\u3002

                                    \u4e0a\u8ff0\u89c4\u5219\u4ec5\u9002\u7528\u4e8e\u540c\u4e00\u4e2a CronJob \u521b\u5efa\u7684\u591a\u4e2a\u4efb\u52a1\u3002\u591a\u4e2a CronJob \u521b\u5efa\u7684\u591a\u4e2a\u4efb\u52a1\u603b\u662f\u5141\u8bb8\u5e76\u53d1\u6267\u884c\u3002

                                  • \u5b9a\u65f6\u89c4\u5219\uff1a\u57fa\u4e8e\u5206\u949f\u3001\u5c0f\u65f6\u3001\u5929\u3001\u5468\u3001\u6708\u8bbe\u7f6e\u4efb\u52a1\u6267\u884c\u7684\u65f6\u95f4\u5468\u671f\u3002\u652f\u6301\u7528\u6570\u5b57\u548c * \u81ea\u5b9a\u4e49 Cron \u8868\u8fbe\u5f0f\uff0c\u8f93\u5165\u8868\u8fbe\u5f0f\u540e\u4e0b\u65b9\u4f1a\u63d0\u793a\u5f53\u524d\u8868\u8fbe\u5f0f\u7684\u542b\u4e49\u3002\u6709\u5173\u8be6\u7ec6\u7684\u8868\u8fbe\u5f0f\u8bed\u6cd5\u89c4\u5219\uff0c\u53ef\u53c2\u8003 Cron \u65f6\u95f4\u8868\u8bed\u6cd5\u3002

                                  • \u4efb\u52a1\u8bb0\u5f55\uff1a\u8bbe\u5b9a\u4fdd\u7559\u591a\u5c11\u6761\u4efb\u52a1\u6267\u884c\u6210\u529f\u6216\u5931\u8d25\u7684\u8bb0\u5f55\u3002 0 \u8868\u793a\u4e0d\u4fdd\u7559\u3002
                                  • \u8d85\u65f6\u65f6\u95f4\uff1a\u8d85\u51fa\u8be5\u65f6\u95f4\u65f6\uff0c\u4efb\u52a1\u5c31\u4f1a\u88ab\u6807\u8bc6\u4e3a\u6267\u884c\u5931\u8d25\uff0c\u4efb\u52a1\u4e0b\u7684\u6240\u6709 Pod \u90fd\u4f1a\u88ab\u5220\u9664\u3002\u4e3a\u7a7a\u65f6\u8868\u793a\u4e0d\u8bbe\u7f6e\u8d85\u65f6\u65f6\u95f4\u3002\u9ed8\u8ba4\u503c\u4e3a 360 s\u3002
                                  • \u91cd\u8bd5\u6b21\u6570\uff1a\u4efb\u52a1\u53ef\u91cd\u8bd5\u6b21\u6570\uff0c\u9ed8\u8ba4\u503c\u4e3a 6\u3002
                                  • \u91cd\u542f\u7b56\u7565\uff1a\u8bbe\u7f6e\u4efb\u52a1\u5931\u8d25\u65f6\u662f\u5426\u91cd\u542f Pod\u3002
                                  "},{"location":"end-user/kpanda/workloads/create-cronjob.html#_6","title":"\u670d\u52a1\u914d\u7f6e","text":"

                                  \u4e3a\u6709\u72b6\u6001\u8d1f\u8f7d\u914d\u7f6e\u670d\u52a1\uff08Service\uff09\uff0c\u4f7f\u6709\u72b6\u6001\u8d1f\u8f7d\u80fd\u591f\u88ab\u5916\u90e8\u8bbf\u95ee\u3002

                                  1. \u70b9\u51fb \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                                  2. \u53c2\u8003\u521b\u5efa\u670d\u52a1\uff0c\u914d\u7f6e\u670d\u52a1\u53c2\u6570\u3002

                                  3. \u70b9\u51fb \u786e\u5b9a \uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                  "},{"location":"end-user/kpanda/workloads/create-cronjob.html#_7","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                                  \u5b9a\u65f6\u4efb\u52a1\u7684\u9ad8\u7ea7\u914d\u7f6e\u4e3b\u8981\u6d89\u53ca\u6807\u7b7e\u4e0e\u6ce8\u89e3\u3002

                                  \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u4f8b Pod \u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-cronjob.html#yaml","title":"YAML \u521b\u5efa","text":"

                                  \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\u3002

                                  1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                  2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u5b9a\u65f6\u4efb\u52a1 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                                  3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                                  \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\u7684 YAML \u793a\u4f8b
                                  apiVersion: batch/v1\nkind: CronJob\nmetadata:\n  creationTimestamp: '2022-12-26T09:45:47Z'\n  generation: 1\n  name: demo\n  namespace: default\n  resourceVersion: '92726617'\n  uid: d030d8d7-a405-4dcd-b09a-176942ef36c9\nspec:\n  concurrencyPolicy: Allow\n  failedJobsHistoryLimit: 1\n  jobTemplate:\n    metadata:\n      creationTimestamp: null\n    spec:\n      activeDeadlineSeconds: 360\n      backoffLimit: 6\n      template:\n        metadata:\n          creationTimestamp: null\n        spec:\n          containers:\n            - image: nginx\n              imagePullPolicy: IfNotPresent\n              lifecycle: {}\n              name: container-3\n              resources:\n                limits:\n                  cpu: 250m\n                  memory: 512Mi\n                requests:\n                  cpu: 250m\n                  memory: 512Mi\n              securityContext:\n                privileged: false\n              terminationMessagePath: /dev/termination-log\n              terminationMessagePolicy: File\n          dnsPolicy: ClusterFirst\n          restartPolicy: Never\n          schedulerName: default-scheduler\n          securityContext: {}\n          terminationGracePeriodSeconds: 30\n  schedule: 0 0 13 * 5\n  successfulJobsHistoryLimit: 3\n  suspend: false\nstatus: {}\n
                                  "},{"location":"end-user/kpanda/workloads/create-daemonset.html","title":"\u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b(DaemonSet)","text":"

                                  \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b\uff08DaemonSet\uff09\u3002

                                  \u5b88\u62a4\u8fdb\u7a0b\uff08DaemonSet\uff09\u901a\u8fc7\u8282\u70b9\u4eb2\u548c\u6027\u4e0e\u6c61\u70b9\u529f\u80fd\u786e\u4fdd\u5728\u5168\u90e8\u6216\u90e8\u5206\u8282\u70b9\u4e0a\u8fd0\u884c\u4e00\u4e2a Pod \u7684\u526f\u672c\u3002\u5bf9\u4e8e\u65b0\u52a0\u5165\u96c6\u7fa4\u7684\u8282\u70b9\uff0cDaemonSet \u81ea\u52a8\u5728\u65b0\u8282\u70b9\u4e0a\u90e8\u7f72\u76f8\u5e94\u7684 Pod\uff0c\u5e76\u8ddf\u8e2a Pod \u7684\u8fd0\u884c\u72b6\u6001\u3002\u5f53\u8282\u70b9\u88ab\u79fb\u9664\u65f6\uff0cDaemonSet \u5219\u5220\u9664\u5176\u521b\u5efa\u7684\u6240\u6709 Pod\u3002

                                  \u5b88\u62a4\u8fdb\u7a0b\u7684\u5e38\u89c1\u7528\u4f8b\u5305\u62ec\uff1a

                                  • \u5728\u6bcf\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u96c6\u7fa4\u5b88\u62a4\u8fdb\u7a0b\u3002

                                  • \u5728\u6bcf\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u65e5\u5fd7\u6536\u96c6\u5b88\u62a4\u8fdb\u7a0b\u3002

                                  • \u5728\u6bcf\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u76d1\u63a7\u5b88\u62a4\u8fdb\u7a0b\u3002

                                  \u7b80\u5355\u8d77\u89c1\uff0c\u53ef\u4ee5\u5728\u6bcf\u4e2a\u8282\u70b9\u4e0a\u4e3a\u6bcf\u79cd\u7c7b\u578b\u7684\u5b88\u62a4\u8fdb\u7a0b\u90fd\u542f\u52a8\u4e00\u4e2a DaemonSet\u3002\u5982\u9700\u66f4\u7cbe\u7ec6\u3001\u66f4\u9ad8\u7ea7\u5730\u7ba1\u7406\u5b88\u62a4\u8fdb\u7a0b\uff0c\u4e5f\u53ef\u4ee5\u4e3a\u540c\u4e00\u79cd\u5b88\u62a4\u8fdb\u7a0b\u90e8\u7f72\u591a\u4e2a DaemonSet\u3002\u6bcf\u4e2a DaemonSet \u5177\u6709\u4e0d\u540c\u7684\u6807\u5fd7\uff0c\u5e76\u4e14\u5bf9\u4e0d\u540c\u786c\u4ef6\u7c7b\u578b\u5177\u6709\u4e0d\u540c\u7684\u5185\u5b58\u3001CPU \u8981\u6c42\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-daemonset.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                  \u521b\u5efa DaemonSet \u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                  • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                  • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                                  • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                  • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-daemonset.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                                  \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u5b88\u62a4\u8fdb\u7a0b\u3002

                                  1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                  2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u5b88\u62a4\u8fdb\u7a0b \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                                  3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                                    \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u5b88\u62a4\u8fdb\u7a0b \u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u5b88\u62a4\u8fdb\u7a0b\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u91cd\u542f\u7b49\u64cd\u4f5c\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-daemonset.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"

                                  \u5728 \u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b \u9875\u9762\u4e2d\uff0c\u6839\u636e\u4e0b\u8868\u8f93\u5165\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                  • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                  • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u5b88\u62a4\u8fdb\u7a0b\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                  • \u63cf\u8ff0\uff1a\u8f93\u5165\u5de5\u4f5c\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u91cf\u5e94\u4e0d\u8d85\u8fc7 512 \u4e2a\u3002
                                  "},{"location":"end-user/kpanda/workloads/create-daemonset.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                                  \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                  \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                                  \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                                  \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                                  • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                                  • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                                  • \u955c\u50cf\uff1a
                                    • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u63a5\u5165\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                                    • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                                    • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                                    • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                                  • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                                  • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                                  • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                                    • \u6574\u5361\u6a21\u5f0f\uff1a
                                      • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                    • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                                      • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                      • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                                      • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                                      • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                                      • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                                      • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                                    • Mig \u6a21\u5f0f
                                      • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                                      • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                                  \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                                  \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                                  \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                                  \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                                  \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                                  \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-daemonset.html#_5","title":"\u670d\u52a1\u914d\u7f6e","text":"

                                  \u4e3a\u5b88\u62a4\u8fdb\u7a0b\u521b\u5efa\u670d\u52a1\uff08Service\uff09\uff0c\u4f7f\u5b88\u62a4\u8fdb\u7a0b\u80fd\u591f\u88ab\u5916\u90e8\u8bbf\u95ee\u3002

                                  1. \u70b9\u51fb \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                                  2. \u914d\u7f6e\u670d\u52a1\u53c2\u6570\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\u521b\u5efa\u670d\u52a1\u3002

                                  3. \u70b9\u51fb \u786e\u5b9a \uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                  "},{"location":"end-user/kpanda/workloads/create-daemonset.html#_6","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                                  \u9ad8\u7ea7\u914d\u7f6e\u5305\u62ec\u8d1f\u8f7d\u7684\u7f51\u7edc\u914d\u7f6e\u3001\u5347\u7ea7\u7b56\u7565\u3001\u8c03\u5ea6\u7b56\u7565\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u56db\u90e8\u5206\uff0c\u53ef\u70b9\u51fb\u4e0b\u65b9\u7684\u9875\u7b7e\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                  \u7f51\u7edc\u914d\u7f6e\u5347\u7ea7\u7b56\u7565\u8c03\u5ea6\u7b56\u7565\u6807\u7b7e\u4e0e\u6ce8\u89e3

                                  \u5e94\u7528\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u4f1a\u51fa\u73b0\u5197\u4f59\u7684 DNS \u67e5\u8be2\u3002Kubernetes \u4e3a\u5e94\u7528\u63d0\u4f9b\u4e86\u4e0e DNS \u76f8\u5173\u7684\u914d\u7f6e\u9009\u9879\uff0c\u80fd\u591f\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u6709\u6548\u5730\u51cf\u5c11\u5197\u4f59\u7684 DNS \u67e5\u8be2\uff0c\u63d0\u5347\u4e1a\u52a1\u5e76\u53d1\u91cf\u3002

                                  • DNS \u7b56\u7565

                                    • Default\uff1a\u4f7f\u5bb9\u5668\u4f7f\u7528 kubelet \u7684 --resolv-conf \u53c2\u6570\u6307\u5411\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u3002\u8be5\u914d\u7f6e\u53ea\u80fd\u89e3\u6790\u6ce8\u518c\u5230\u4e92\u8054\u7f51\u4e0a\u7684\u5916\u90e8\u57df\u540d\uff0c\u65e0\u6cd5\u89e3\u6790\u96c6\u7fa4\u5185\u90e8\u57df\u540d\uff0c\u4e14\u4e0d\u5b58\u5728\u65e0\u6548\u7684 DNS \u67e5\u8be2\u3002
                                    • ClusterFirstWithHostNet\uff1a\u5e94\u7528\u5bf9\u63a5\u4e3b\u673a\u7684\u57df\u540d\u6587\u4ef6\u3002
                                    • ClusterFirst\uff1a\u5e94\u7528\u5bf9\u63a5 Kube-DNS/CoreDNS\u3002
                                    • None\uff1aKubernetes v1.9\uff08Beta in v1.10\uff09\u4e2d\u5f15\u5165\u7684\u65b0\u9009\u9879\u503c\u3002\u8bbe\u7f6e\u4e3a None \u4e4b\u540e\uff0c\u5fc5\u987b\u8bbe\u7f6e dnsConfig\uff0c\u6b64\u65f6\u5bb9\u5668\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u5c06\u5b8c\u5168\u901a\u8fc7 dnsConfig \u7684\u914d\u7f6e\u6765\u751f\u6210\u3002
                                  • \u57df\u540d\u670d\u52a1\u5668\uff1a\u586b\u5199\u57df\u540d\u670d\u52a1\u5668\u7684\u5730\u5740\uff0c\u4f8b\u5982 10.6.175.20 \u3002

                                  • \u641c\u7d22\u57df\uff1a\u57df\u540d\u67e5\u8be2\u65f6\u7684 DNS \u641c\u7d22\u57df\u5217\u8868\u3002\u6307\u5b9a\u540e\uff0c\u63d0\u4f9b\u7684\u641c\u7d22\u57df\u5217\u8868\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 search \u5b57\u6bb5\u4e2d\uff0c\u5e76\u5220\u9664\u91cd\u590d\u7684\u57df\u540d\u3002Kubernetes \u6700\u591a\u5141\u8bb8 6 \u4e2a\u641c\u7d22\u57df\u3002
                                  • Options\uff1aDNS \u7684\u914d\u7f6e\u9009\u9879\uff0c\u5176\u4e2d\u6bcf\u4e2a\u5bf9\u8c61\u53ef\u4ee5\u5177\u6709 name \u5c5e\u6027\uff08\u5fc5\u9700\uff09\u548c value \u5c5e\u6027\uff08\u53ef\u9009\uff09\u3002\u8be5\u5b57\u6bb5\u4e2d\u7684\u5185\u5bb9\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 options \u5b57\u6bb5\u4e2d\uff0cdnsConfig \u7684 options \u7684\u67d0\u4e9b\u9009\u9879\u5982\u679c\u4e0e\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684\u9009\u9879\u51b2\u7a81\uff0c\u5219\u4f1a\u88ab dnsConfig \u6240\u8986\u76d6\u3002
                                  • \u4e3b\u673a\u522b\u540d\uff1a\u4e3a\u4e3b\u673a\u8bbe\u7f6e\u7684\u522b\u540d\u3002

                                  • \u5347\u7ea7\u65b9\u5f0f\uff1a \u6eda\u52a8\u5347\u7ea7 \u6307\u9010\u6b65\u7528\u65b0\u7248\u672c\u7684\u5b9e\u4f8b\u66ff\u6362\u65e7\u7248\u672c\u7684\u5b9e\u4f8b\uff0c\u5347\u7ea7\u7684\u8fc7\u7a0b\u4e2d\uff0c\u4e1a\u52a1\u6d41\u91cf\u4f1a\u540c\u65f6\u8d1f\u8f7d\u5747\u8861\u5206\u5e03\u5230\u65b0\u8001\u7684\u5b9e\u4f8b\u4e0a\uff0c\u56e0\u6b64\u4e1a\u52a1\u4e0d\u4f1a\u4e2d\u65ad\u3002 \u91cd\u5efa\u5347\u7ea7 \u6307\u5148\u5220\u9664\u8001\u7248\u672c\u7684\u8d1f\u8f7d\u5b9e\u4f8b\uff0c\u518d\u5b89\u88c5\u6307\u5b9a\u7684\u65b0\u7248\u672c\uff0c\u5347\u7ea7\u8fc7\u7a0b\u4e2d\u4e1a\u52a1\u4f1a\u4e2d\u65ad\u3002
                                  • \u6700\u5927\u65e0\u6548 Pod \u6570\uff1a\u6307\u5b9a\u8d1f\u8f7d\u66f4\u65b0\u8fc7\u7a0b\u4e2d\u4e0d\u53ef\u7528 Pod \u7684\u6700\u5927\u503c\u6216\u6bd4\u7387\uff0c\u9ed8\u8ba4 25%\u3002\u5982\u679c\u7b49\u4e8e\u5b9e\u4f8b\u6570\u6709\u670d\u52a1\u4e2d\u65ad\u7684\u98ce\u9669\u3002
                                  • \u6700\u5927\u6d6a\u6d8c\uff1a\u66f4\u65b0 Pod \u7684\u8fc7\u7a0b\u4e2d Pod \u603b\u6570\u8d85\u8fc7 Pod \u671f\u671b\u526f\u672c\u6570\u90e8\u5206\u7684\u6700\u5927\u503c\u6216\u6bd4\u7387\u3002\u9ed8\u8ba4 25%\u3002
                                  • \u6700\u5927\u4fdd\u7559\u7248\u672c\u6570\uff1a\u8bbe\u7f6e\u7248\u672c\u56de\u6eda\u65f6\u4fdd\u7559\u7684\u65e7\u7248\u672c\u6570\u91cf\u3002\u9ed8\u8ba4 10\u3002
                                  • Pod \u53ef\u7528\u6700\u77ed\u65f6\u95f4\uff1aPod \u5c31\u7eea\u7684\u6700\u77ed\u65f6\u95f4\uff0c\u53ea\u6709\u8d85\u51fa\u8fd9\u4e2a\u65f6\u95f4 Pod \u624d\u88ab\u8ba4\u4e3a\u53ef\u7528\uff0c\u9ed8\u8ba4 0 \u79d2\u3002
                                  • \u5347\u7ea7\u6700\u5927\u6301\u7eed\u65f6\u95f4\uff1a\u5982\u679c\u8d85\u8fc7\u6240\u8bbe\u7f6e\u7684\u65f6\u95f4\u4ecd\u672a\u90e8\u7f72\u6210\u529f\uff0c\u5219\u5c06\u8be5\u8d1f\u8f7d\u6807\u8bb0\u4e3a\u5931\u8d25\u3002\u9ed8\u8ba4 600 \u79d2\u3002
                                  • \u7f29\u5bb9\u65f6\u95f4\u7a97\uff1a\u8d1f\u8f7d\u505c\u6b62\u524d\u547d\u4ee4\u7684\u6267\u884c\u65f6\u95f4\u7a97\uff080-9,999\u79d2\uff09\uff0c\u9ed8\u8ba4 30 \u79d2\u3002

                                  • \u5bb9\u5fcd\u65f6\u95f4\uff1a\u8d1f\u8f7d\u5b9e\u4f8b\u6240\u5728\u7684\u8282\u70b9\u4e0d\u53ef\u7528\u65f6\uff0c\u5c06\u8d1f\u8f7d\u5b9e\u4f8b\u91cd\u65b0\u8c03\u5ea6\u5230\u5176\u5b83\u53ef\u7528\u8282\u70b9\u7684\u65f6\u95f4\uff0c\u9ed8\u8ba4\u4e3a 300 \u79d2\u3002
                                  • \u8282\u70b9\u4eb2\u548c\u6027\uff1a\u6839\u636e\u8282\u70b9\u4e0a\u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002
                                  • \u5de5\u4f5c\u8d1f\u8f7d\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                                  • \u5de5\u4f5c\u8d1f\u8f7d\u53cd\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u4e0d\u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                                  • \u62d3\u6251\u57df\uff1a\u5373 topologyKey\uff0c\u7528\u4e8e\u6307\u5b9a\u53ef\u4ee5\u8c03\u5ea6\u7684\u4e00\u7ec4\u8282\u70b9\u3002\u4f8b\u5982\uff0c kubernetes.io/os \u8868\u793a\u53ea\u8981\u67d0\u4e2a\u64cd\u4f5c\u7cfb\u7edf\u7684\u8282\u70b9\u6ee1\u8db3 labelSelector \u7684\u6761\u4ef6\u5c31\u53ef\u4ee5\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002

                                  \u5177\u4f53\u8be6\u60c5\u8bf7\u53c2\u8003\u8c03\u5ea6\u7b56\u7565\u3002

                                  \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u548c\u5bb9\u5668\u7ec4\u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-daemonset.html#yaml","title":"YAML \u521b\u5efa","text":"

                                  \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b\u3002

                                  1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                  2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u5b88\u62a4\u8fdb\u7a0b \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                                  3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                                  \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b\u7684 YAML \u793a\u4f8b
                                  kind: DaemonSet\napiVersion: apps/v1\nmetadata:\n  name: hwameistor-local-disk-manager\n  namespace: hwameistor\n  uid: ccbdc098-7de3-4a8a-96dd-d1cee159c92b\n  resourceVersion: '90999552'\n  generation: 1\n  creationTimestamp: '2022-12-15T09:03:44Z'\n  labels:\n    app.kubernetes.io/managed-by: Helm\n  annotations:\n    deprecated.daemonset.template.generation: '1'\n    meta.helm.sh/release-name: hwameistor\n    meta.helm.sh/release-namespace: hwameistor\nspec:\n  selector:\n    matchLabels:\n      app: hwameistor-local-disk-manager\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: hwameistor-local-disk-manager\n    spec:\n      volumes:\n        - name: udev\n          hostPath:\n            path: /run/udev\n            type: Directory\n        - name: procmount\n          hostPath:\n            path: /proc\n            type: Directory\n        - name: devmount\n          hostPath:\n            path: /dev\n            type: Directory\n        - name: socket-dir\n          hostPath:\n            path: /var/lib/kubelet/plugins/disk.hwameistor.io\n            type: DirectoryOrCreate\n        - name: registration-dir\n          hostPath:\n            path: /var/lib/kubelet/plugins_registry/\n            type: Directory\n        - name: plugin-dir\n          hostPath:\n            path: /var/lib/kubelet/plugins\n            type: DirectoryOrCreate\n        - name: pods-mount-dir\n          hostPath:\n            path: /var/lib/kubelet/pods\n            type: DirectoryOrCreate\n      containers:\n        - name: registrar\n          image: k8s-gcr.m.daocloud.io/sig-storage/csi-node-driver-registrar:v2.5.0\n          args:\n            - '--v=5'\n            - '--csi-address=/csi/csi.sock'\n            - >-\n              --kubelet-registration-path=/var/lib/kubelet/plugins/disk.hwameistor.io/csi.sock\n          env:\n            - name: KUBE_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: spec.nodeName\n          resources: {}\n          volumeMounts:\n            - name: socket-dir\n              mountPath: /csi\n            - name: registration-dir\n              mountPath: /registration\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /bin/sh\n                  - '-c'\n                  - >-\n                    rm -rf /registration/disk.hwameistor.io \n                    /registration/disk.hwameistor.io-reg.sock\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: manager\n          image: ghcr.m.daocloud.io/hwameistor/local-disk-manager:v0.6.1\n          command:\n            - /local-disk-manager\n          args:\n            - '--endpoint=$(CSI_ENDPOINT)'\n            - '--nodeid=$(NODENAME)'\n            - '--csi-enable=true'\n          env:\n            - name: CSI_ENDPOINT\n              value: unix://var/lib/kubelet/plugins/disk.hwameistor.io/csi.sock\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: WATCH_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: NODENAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: spec.nodeName\n            - name: OPERATOR_NAME\n              value: local-disk-manager\n          resources: {}\n          volumeMounts:\n            - name: udev\n              mountPath: /run/udev\n            - name: procmount\n              readOnly: true\n              mountPath: /host/proc\n            - name: devmount\n              mountPath: /dev\n            - name: registration-dir\n              mountPath: /var/lib/kubelet/plugins_registry\n            - name: plugin-dir\n              mountPath: /var/lib/kubelet/plugins\n              mountPropagation: Bidirectional\n            - name: pods-mount-dir\n              mountPath: /var/lib/kubelet/pods\n              mountPropagation: Bidirectional\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            privileged: true\n      restartPolicy: Always\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      serviceAccountName: hwameistor-admin\n      serviceAccount: hwameistor-admin\n      hostNetwork: true\n      hostPID: true\n      securityContext: {}\n      schedulerName: default-scheduler\n      tolerations:\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - key: node.kubernetes.io/not-ready\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/master\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/control-plane\n          operator: Exists\n          effect: NoSchedule\n        - key: node.cloudprovider.kubernetes.io/uninitialized\n          operator: Exists\n          effect: NoSchedule\n  updateStrategy:\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n      maxSurge: 0\n  revisionHistoryLimit: 10\nstatus:\n  currentNumberScheduled: 4\n  numberMisscheduled: 0\n  desiredNumberScheduled: 4\n  numberReady: 4\n  observedGeneration: 1\n  updatedNumberScheduled: 4\n  numberAvailable: 4\n
                                  "},{"location":"end-user/kpanda/workloads/create-deployment.html","title":"\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\uff08Deployment\uff09","text":"

                                  \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\u3002

                                  \u65e0\u72b6\u6001\u8d1f\u8f7d\uff08Deployment\uff09\u662f Kubernetes \u4e2d\u7684\u4e00\u79cd\u5e38\u89c1\u8d44\u6e90\uff0c\u4e3b\u8981\u4e3a Pod \u548c ReplicaSet \u63d0\u4f9b\u58f0\u660e\u5f0f\u66f4\u65b0\uff0c\u652f\u6301\u5f39\u6027\u4f38\u7f29\u3001\u6eda\u52a8\u5347\u7ea7\u3001\u7248\u672c\u56de\u9000\u7b49\u529f\u80fd\u3002\u5728 Deployment \u4e2d\u58f0\u660e\u671f\u671b\u7684 Pod \u72b6\u6001\uff0cDeployment Controller \u4f1a\u901a\u8fc7 ReplicaSet \u4fee\u6539\u5f53\u524d\u72b6\u6001\uff0c\u4f7f\u5176\u8fbe\u5230\u9884\u5148\u58f0\u660e\u7684\u671f\u671b\u72b6\u6001\u3002Deployment \u662f\u65e0\u72b6\u6001\u7684\uff0c\u4e0d\u652f\u6301\u6570\u636e\u6301\u4e45\u5316\uff0c\u9002\u7528\u4e8e\u90e8\u7f72\u65e0\u72b6\u6001\u7684\u3001\u4e0d\u9700\u8981\u4fdd\u5b58\u6570\u636e\u3001\u968f\u65f6\u53ef\u4ee5\u91cd\u542f\u56de\u6eda\u7684\u5e94\u7528\u3002

                                  \u901a\u8fc7\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\uff0c\u53ef\u4ee5\u57fa\u4e8e\u76f8\u5e94\u7684\u89d2\u8272\u6743\u9650\u8f7b\u677e\u7ba1\u7406\u591a\u4e91\u591a\u96c6\u7fa4\u4e0a\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5305\u62ec\u5bf9\u65e0\u72b6\u6001\u8d1f\u8f7d\u7684\u521b\u5efa\u3001\u66f4\u65b0\u3001\u5220\u9664\u3001\u5f39\u6027\u6269\u7f29\u3001\u91cd\u542f\u3001\u7248\u672c\u56de\u9000\u7b49\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-deployment.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                  \u5728\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                  • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                  • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                                  • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                  • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-deployment.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                                  \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u65e0\u72b6\u6001\u8d1f\u8f7d\u3002

                                  1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                  2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                                  3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                                    \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u65e0\u72b6\u6001\u8d1f\u8f7d \u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u8d1f\u8f7d\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u5f39\u6027\u6269\u7f29\u3001\u91cd\u542f\u3001\u7248\u672c\u56de\u9000\u7b49\u64cd\u4f5c\u3002\u5982\u679c\u8d1f\u8f7d\u72b6\u6001\u51fa\u73b0\u5f02\u5e38\uff0c\u8bf7\u67e5\u770b\u5177\u4f53\u5f02\u5e38\u4fe1\u606f\uff0c\u53ef\u53c2\u8003\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-deployment.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"
                                  • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 deployment-01\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                  • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u8d1f\u8f7d\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                  • \u5b9e\u4f8b\u6570\uff1a\u8f93\u5165\u8d1f\u8f7d\u7684 Pod \u5b9e\u4f8b\u6570\u91cf\uff0c\u9ed8\u8ba4\u521b\u5efa 1 \u4e2a Pod \u5b9e\u4f8b\u3002
                                  • \u63cf\u8ff0\uff1a\u8f93\u5165\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u4e0d\u8d85\u8fc7 512\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-deployment.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                                  \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                  \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                                  \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                                  \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                                  • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                                  • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                                  • \u955c\u50cf\uff1a
                                    • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u5b89\u88c5\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                                    • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                                    • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                                    • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                                  • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                                  • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                                  • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                                    • \u6574\u5361\u6a21\u5f0f\uff1a
                                      • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                    • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                                      • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                      • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                                      • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                                      • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                                      • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                                      • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                                    • Mig \u6a21\u5f0f
                                      • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                                      • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                                  \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                                  \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                                  \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                                  \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                                  \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                                  \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-deployment.html#_5","title":"\u670d\u52a1\u914d\u7f6e","text":"

                                  \u4e3a\u65e0\u72b6\u6001\u8d1f\u8f7d\u914d\u7f6e\u670d\u52a1\uff08Service\uff09\uff0c\u4f7f\u65e0\u72b6\u6001\u8d1f\u8f7d\u80fd\u591f\u88ab\u5916\u90e8\u8bbf\u95ee\u3002

                                  1. \u70b9\u51fb \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                                  2. \u53c2\u8003\u521b\u5efa\u670d\u52a1\uff0c\u914d\u7f6e\u670d\u52a1\u53c2\u6570\u3002

                                  3. \u70b9\u51fb \u786e\u5b9a \uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                  "},{"location":"end-user/kpanda/workloads/create-deployment.html#_6","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                                  \u9ad8\u7ea7\u914d\u7f6e\u5305\u62ec\u8d1f\u8f7d\u7684\u7f51\u7edc\u914d\u7f6e\u3001\u5347\u7ea7\u7b56\u7565\u3001\u8c03\u5ea6\u7b56\u7565\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u56db\u90e8\u5206\uff0c\u53ef\u70b9\u51fb\u4e0b\u65b9\u7684\u9875\u7b7e\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                  \u7f51\u7edc\u914d\u7f6e\u5347\u7ea7\u7b56\u7565\u8c03\u5ea6\u7b56\u7565\u6807\u7b7e\u4e0e\u6ce8\u89e3
                                  • \u5982\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72\u4e86 SpiderPool \u548c Multus \u7ec4\u4ef6\uff0c\u5219\u53ef\u4ee5\u5728\u7f51\u7edc\u914d\u7f6e\u4e2d\u914d\u7f6e\u5bb9\u5668\u7f51\u5361\u3002

                                  • DNS \u914d\u7f6e\uff1a\u5e94\u7528\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u4f1a\u51fa\u73b0\u5197\u4f59\u7684 DNS \u67e5\u8be2\u3002Kubernetes \u4e3a\u5e94\u7528\u63d0\u4f9b\u4e86\u4e0e DNS \u76f8\u5173\u7684\u914d\u7f6e\u9009\u9879\uff0c\u80fd\u591f\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u6709\u6548\u5730\u51cf\u5c11\u5197\u4f59\u7684 DNS \u67e5\u8be2\uff0c\u63d0\u5347\u4e1a\u52a1\u5e76\u53d1\u91cf\u3002

                                  • DNS \u7b56\u7565

                                    • Default\uff1a\u4f7f\u5bb9\u5668\u4f7f\u7528 kubelet \u7684 --resolv-conf \u53c2\u6570\u6307\u5411\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u3002\u8be5\u914d\u7f6e\u53ea\u80fd\u89e3\u6790\u6ce8\u518c\u5230\u4e92\u8054\u7f51\u4e0a\u7684\u5916\u90e8\u57df\u540d\uff0c\u65e0\u6cd5\u89e3\u6790\u96c6\u7fa4\u5185\u90e8\u57df\u540d\uff0c\u4e14\u4e0d\u5b58\u5728\u65e0\u6548\u7684 DNS \u67e5\u8be2\u3002
                                    • ClusterFirstWithHostNet\uff1a\u5e94\u7528\u5bf9\u63a5\u4e3b\u673a\u7684\u57df\u540d\u6587\u4ef6\u3002
                                    • ClusterFirst\uff1a\u5e94\u7528\u5bf9\u63a5 Kube-DNS/CoreDNS\u3002
                                    • None\uff1aKubernetes v1.9\uff08Beta in v1.10\uff09\u4e2d\u5f15\u5165\u7684\u65b0\u9009\u9879\u503c\u3002\u8bbe\u7f6e\u4e3a None \u4e4b\u540e\uff0c\u5fc5\u987b\u8bbe\u7f6e dnsConfig\uff0c\u6b64\u65f6\u5bb9\u5668\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u5c06\u5b8c\u5168\u901a\u8fc7 dnsConfig \u7684\u914d\u7f6e\u6765\u751f\u6210\u3002
                                  • \u57df\u540d\u670d\u52a1\u5668\uff1a\u586b\u5199\u57df\u540d\u670d\u52a1\u5668\u7684\u5730\u5740\uff0c\u4f8b\u5982 10.6.175.20 \u3002

                                  • \u641c\u7d22\u57df\uff1a\u57df\u540d\u67e5\u8be2\u65f6\u7684 DNS \u641c\u7d22\u57df\u5217\u8868\u3002\u6307\u5b9a\u540e\uff0c\u63d0\u4f9b\u7684\u641c\u7d22\u57df\u5217\u8868\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 search \u5b57\u6bb5\u4e2d\uff0c\u5e76\u5220\u9664\u91cd\u590d\u7684\u57df\u540d\u3002Kubernetes \u6700\u591a\u5141\u8bb8 6 \u4e2a\u641c\u7d22\u57df\u3002
                                  • Options\uff1aDNS \u7684\u914d\u7f6e\u9009\u9879\uff0c\u5176\u4e2d\u6bcf\u4e2a\u5bf9\u8c61\u53ef\u4ee5\u5177\u6709 name \u5c5e\u6027\uff08\u5fc5\u9700\uff09\u548c value \u5c5e\u6027\uff08\u53ef\u9009\uff09\u3002\u8be5\u5b57\u6bb5\u4e2d\u7684\u5185\u5bb9\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 options \u5b57\u6bb5\u4e2d\uff0cdnsConfig \u7684 options \u7684\u67d0\u4e9b\u9009\u9879\u5982\u679c\u4e0e\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684\u9009\u9879\u51b2\u7a81\uff0c\u5219\u4f1a\u88ab dnsConfig \u6240\u8986\u76d6\u3002
                                  • \u4e3b\u673a\u522b\u540d\uff1a\u4e3a\u4e3b\u673a\u8bbe\u7f6e\u7684\u522b\u540d\u3002

                                  • \u5347\u7ea7\u65b9\u5f0f\uff1a \u6eda\u52a8\u5347\u7ea7 \u6307\u9010\u6b65\u7528\u65b0\u7248\u672c\u7684\u5b9e\u4f8b\u66ff\u6362\u65e7\u7248\u672c\u7684\u5b9e\u4f8b\uff0c\u5347\u7ea7\u7684\u8fc7\u7a0b\u4e2d\uff0c\u4e1a\u52a1\u6d41\u91cf\u4f1a\u540c\u65f6\u8d1f\u8f7d\u5747\u8861\u5206\u5e03\u5230\u65b0\u8001\u7684\u5b9e\u4f8b\u4e0a\uff0c\u56e0\u6b64\u4e1a\u52a1\u4e0d\u4f1a\u4e2d\u65ad\u3002 \u91cd\u5efa\u5347\u7ea7 \u6307\u5148\u5220\u9664\u8001\u7248\u672c\u7684\u8d1f\u8f7d\u5b9e\u4f8b\uff0c\u518d\u5b89\u88c5\u6307\u5b9a\u7684\u65b0\u7248\u672c\uff0c\u5347\u7ea7\u8fc7\u7a0b\u4e2d\u4e1a\u52a1\u4f1a\u4e2d\u65ad\u3002
                                  • \u6700\u5927\u4e0d\u53ef\u7528\uff1a\u6307\u5b9a\u8d1f\u8f7d\u66f4\u65b0\u8fc7\u7a0b\u4e2d\u4e0d\u53ef\u7528 Pod \u7684\u6700\u5927\u503c\u6216\u6bd4\u7387\uff0c\u9ed8\u8ba4 25%\u3002\u5982\u679c\u7b49\u4e8e\u5b9e\u4f8b\u6570\u6709\u670d\u52a1\u4e2d\u65ad\u7684\u98ce\u9669\u3002
                                  • \u6700\u5927\u5cf0\u503c\uff1a\u66f4\u65b0 Pod \u7684\u8fc7\u7a0b\u4e2d Pod \u603b\u6570\u8d85\u8fc7 Pod \u671f\u671b\u526f\u672c\u6570\u90e8\u5206\u7684\u6700\u5927\u503c\u6216\u6bd4\u7387\u3002\u9ed8\u8ba4 25%\u3002
                                  • \u6700\u5927\u4fdd\u7559\u7248\u672c\u6570\uff1a\u8bbe\u7f6e\u7248\u672c\u56de\u6eda\u65f6\u4fdd\u7559\u7684\u65e7\u7248\u672c\u6570\u91cf\u3002\u9ed8\u8ba4 10\u3002
                                  • Pod \u53ef\u7528\u6700\u77ed\u65f6\u95f4\uff1aPod \u5c31\u7eea\u7684\u6700\u77ed\u65f6\u95f4\uff0c\u53ea\u6709\u8d85\u51fa\u8fd9\u4e2a\u65f6\u95f4 Pod \u624d\u88ab\u8ba4\u4e3a\u53ef\u7528\uff0c\u9ed8\u8ba4 0 \u79d2\u3002
                                  • \u5347\u7ea7\u6700\u5927\u6301\u7eed\u65f6\u95f4\uff1a\u5982\u679c\u8d85\u8fc7\u6240\u8bbe\u7f6e\u7684\u65f6\u95f4\u4ecd\u672a\u90e8\u7f72\u6210\u529f\uff0c\u5219\u5c06\u8be5\u8d1f\u8f7d\u6807\u8bb0\u4e3a\u5931\u8d25\u3002\u9ed8\u8ba4 600 \u79d2\u3002
                                  • \u7f29\u5bb9\u65f6\u95f4\u7a97\uff1a\u8d1f\u8f7d\u505c\u6b62\u524d\u547d\u4ee4\u7684\u6267\u884c\u65f6\u95f4\u7a97\uff080-9,999\u79d2\uff09\uff0c\u9ed8\u8ba4 30 \u79d2\u3002

                                  • \u5bb9\u5fcd\u65f6\u95f4\uff1a\u8d1f\u8f7d\u5b9e\u4f8b\u6240\u5728\u7684\u8282\u70b9\u4e0d\u53ef\u7528\u65f6\uff0c\u5c06\u8d1f\u8f7d\u5b9e\u4f8b\u91cd\u65b0\u8c03\u5ea6\u5230\u5176\u5b83\u53ef\u7528\u8282\u70b9\u7684\u65f6\u95f4\uff0c\u9ed8\u8ba4\u4e3a 300 \u79d2\u3002
                                  • \u8282\u70b9\u4eb2\u548c\u6027\uff1a\u6839\u636e\u8282\u70b9\u4e0a\u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002
                                  • \u5de5\u4f5c\u8d1f\u8f7d\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                                  • \u5de5\u4f5c\u8d1f\u8f7d\u53cd\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u4e0d\u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002

                                  \u5177\u4f53\u8be6\u60c5\u8bf7\u53c2\u8003\u8c03\u5ea6\u7b56\u7565\u3002

                                  \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u548c\u5bb9\u5668\u7ec4\u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-deployment.html#yaml","title":"YAML \u521b\u5efa","text":"

                                  \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\u3002

                                  1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                  2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                                  3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                                  \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\u7684 YAML \u793a\u4f8b
                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nginx-deployment\nspec:\n  selector:\n    matchLabels:\n      app: nginx\n  replicas: 2 # \u544a\u77e5 Deployment \u8fd0\u884c 2 \u4e2a\u4e0e\u8be5\u6a21\u677f\u5339\u914d\u7684 Pod\n  template:\n    metadata:\n      labels:\n        app: nginx\n    spec:\n      containers:\n      - name: nginx\n        image: nginx:1.14.2\n        ports:\n        - containerPort: 80\n
                                  "},{"location":"end-user/kpanda/workloads/create-job.html","title":"\u521b\u5efa\u4efb\u52a1\uff08Job\uff09","text":"

                                  \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u4efb\u52a1\uff08Job\uff09\u3002

                                  \u4efb\u52a1\uff08Job\uff09\u9002\u7528\u4e8e\u6267\u884c\u4e00\u6b21\u6027\u4efb\u52a1\u3002Job \u4f1a\u521b\u5efa\u4e00\u4e2a\u6216\u591a\u4e2a Pod\uff0cJob \u4f1a\u4e00\u76f4\u91cd\u65b0\u5c1d\u8bd5\u6267\u884c Pod\uff0c\u76f4\u5230\u6210\u529f\u7ec8\u6b62\u7684 Pod \u8fbe\u5230\u4e00\u5b9a\u6570\u91cf\u3002\u6210\u529f\u7ec8\u6b62\u7684 Pod \u8fbe\u5230\u6307\u5b9a\u7684\u6570\u91cf\u540e\uff0cJob \u4e5f\u968f\u4e4b\u7ed3\u675f\u3002\u5220\u9664 Job \u65f6\u4f1a\u4e00\u540c\u6e05\u9664\u8be5 Job \u521b\u5efa\u7684\u6240\u6709 Pod\u3002\u6682\u505c Job \u65f6\u5220\u9664\u8be5 Job \u4e2d\u7684\u6240\u6709\u6d3b\u8dc3 Pod\uff0c\u76f4\u5230 Job \u88ab\u7ee7\u7eed\u6267\u884c\u3002\u6709\u5173\u4efb\u52a1\uff08Job\uff09\u7684\u66f4\u591a\u4ecb\u7ecd\uff0c\u53ef\u53c2\u8003Job\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-job.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                  • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                  • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                                  • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                  • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-job.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                                  \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u4efb\u52a1\u3002

                                  1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                  2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u4efb\u52a1 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                                  3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                                    \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u4efb\u52a1 \u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u4efb\u52a1\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u91cd\u542f\u7b49\u64cd\u4f5c\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-job.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"

                                  \u5728 \u521b\u5efa\u4efb\u52a1 \u9875\u9762\u4e2d\uff0c\u6839\u636e\u4e0b\u8868\u8f93\u5165\u57fa\u672c\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                  • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                  • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u4efb\u52a1\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                  • \u5b9e\u4f8b\u6570\uff1a\u8f93\u5165\u5de5\u4f5c\u8d1f\u8f7d\u7684 Pod \u5b9e\u4f8b\u6570\u91cf\u3002\u9ed8\u8ba4\u521b\u5efa 1 \u4e2a Pod \u5b9e\u4f8b\u3002
                                  • \u63cf\u8ff0\uff1a\u8f93\u5165\u5de5\u4f5c\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u91cf\u5e94\u4e0d\u8d85\u8fc7 512 \u4e2a\u3002
                                  "},{"location":"end-user/kpanda/workloads/create-job.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                                  \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                  \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                                  \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                                  \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                                  • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                                  • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                                  • \u955c\u50cf\uff1a
                                    • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u63a5\u5165\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                                    • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                                    • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                                    • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                                  • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                                  • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                                  • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                                    • \u6574\u5361\u6a21\u5f0f\uff1a
                                      • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                    • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                                      • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                      • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                                      • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                                      • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                                      • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                                      • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                                    • Mig \u6a21\u5f0f
                                      • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                                      • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                                  \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                                  \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                                  \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                                  \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                                  \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                                  \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-job.html#_5","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                                  \u9ad8\u7ea7\u914d\u7f6e\u5305\u62ec\u4efb\u52a1\u8bbe\u7f6e\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u4e24\u90e8\u5206\u3002

                                  \u4efb\u52a1\u8bbe\u7f6e\u6807\u7b7e\u4e0e\u6ce8\u89e3

                                  • \u5e76\u884c\u6570\uff1a\u4efb\u52a1\u6267\u884c\u8fc7\u7a0b\u4e2d\u5141\u8bb8\u540c\u65f6\u521b\u5efa\u7684\u6700\u5927 Pod \u6570\uff0c\u5e76\u884c\u6570\u5e94\u4e0d\u5927\u4e8e Pod \u603b\u6570\u3002\u9ed8\u8ba4\u4e3a 1\u3002
                                  • \u8d85\u65f6\u65f6\u95f4\uff1a\u8d85\u51fa\u8be5\u65f6\u95f4\u65f6\uff0c\u4efb\u52a1\u4f1a\u88ab\u6807\u8bc6\u4e3a\u6267\u884c\u5931\u8d25\uff0c\u4efb\u52a1\u4e0b\u7684\u6240\u6709 Pod \u90fd\u4f1a\u88ab\u5220\u9664\u3002\u4e3a\u7a7a\u65f6\u8868\u793a\u4e0d\u8bbe\u7f6e\u8d85\u65f6\u65f6\u95f4\u3002
                                  • \u91cd\u542f\u7b56\u7565\uff1a\u8bbe\u7f6e\u5931\u8d25\u65f6\u662f\u5426\u91cd\u542f Pod\u3002

                                  \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u4f8b Pod \u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-job.html#yaml","title":"YAML \u521b\u5efa","text":"

                                  \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u4efb\u52a1\u3002

                                  1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                  2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u4efb\u52a1 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                                  3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                                  \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u4efb\u52a1\u7684 YAML \u793a\u4f8b
                                  kind: Job\napiVersion: batch/v1\nmetadata:\n  name: demo\n  namespace: default\n  uid: a9708239-0358-4aa1-87d3-a092c080836e\n  resourceVersion: '92751876'\n  generation: 1\n  creationTimestamp: '2022-12-26T10:52:22Z'\n  labels:\n    app: demo\n    controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n    job-name: demo\n  annotations:\n    revisions: >-\n      {\"1\":{\"status\":\"running\",\"uid\":\"a9708239-0358-4aa1-87d3-a092c080836e\",\"start-time\":\"2022-12-26T10:52:22Z\",\"completion-time\":\"0001-01-01T00:00:00Z\"}}\nspec:\n  parallelism: 1\n  backoffLimit: 6\n  selector:\n    matchLabels:\n      controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: demo\n        controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n        job-name: demo\n    spec:\n      containers:\n        - name: container-4\n          image: nginx\n          resources:\n            limits:\n              cpu: 250m\n              memory: 512Mi\n            requests:\n              cpu: 250m\n              memory: 512Mi\n          lifecycle: {}\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            privileged: false\n      restartPolicy: Never\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      securityContext: {}\n      schedulerName: default-scheduler\n  completionMode: NonIndexed\n  suspend: false\nstatus:\n  startTime: '2022-12-26T10:52:22Z'\n  active: 1\n
                                  "},{"location":"end-user/kpanda/workloads/create-statefulset.html","title":"\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\uff08StatefulSet\uff09","text":"

                                  \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\uff08StatefulSet\uff09\u3002

                                  \u6709\u72b6\u6001\u8d1f\u8f7d\uff08StatefulSet\uff09\u662f Kubernetes \u4e2d\u7684\u4e00\u79cd\u5e38\u89c1\u8d44\u6e90\uff0c\u548c\u65e0\u72b6\u6001\u8d1f\u8f7d\uff08Deployment\uff09\u7c7b\u4f3c\uff0c\u4e3b\u8981\u7528\u4e8e\u7ba1\u7406 Pod \u96c6\u5408\u7684\u90e8\u7f72\u548c\u4f38\u7f29\u3002\u4e8c\u8005\u7684\u4e3b\u8981\u533a\u522b\u5728\u4e8e\uff0cDeployment \u662f\u65e0\u72b6\u6001\u7684\uff0c\u4e0d\u4fdd\u5b58\u6570\u636e\uff0c\u800c StatefulSet \u662f\u6709\u72b6\u6001\u7684\uff0c\u4e3b\u8981\u7528\u4e8e\u7ba1\u7406\u6709\u72b6\u6001\u5e94\u7528\u3002\u6b64\u5916\uff0cStatefulSet \u4e2d\u7684 Pod \u5177\u6709\u6c38\u4e45\u4e0d\u53d8\u7684 ID\uff0c\u4fbf\u4e8e\u5728\u5339\u914d\u5b58\u50a8\u5377\u65f6\u8bc6\u522b\u5bf9\u5e94\u7684 Pod\u3002

                                  \u901a\u8fc7\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\uff0c\u53ef\u4ee5\u57fa\u4e8e\u76f8\u5e94\u7684\u89d2\u8272\u6743\u9650\u8f7b\u677e\u7ba1\u7406\u591a\u4e91\u591a\u96c6\u7fa4\u4e0a\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5305\u62ec\u5bf9\u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa\u3001\u66f4\u65b0\u3001\u5220\u9664\u3001\u5f39\u6027\u6269\u7f29\u3001\u91cd\u542f\u3001\u7248\u672c\u56de\u9000\u7b49\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-statefulset.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                  \u5728\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                  • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                  • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                                  • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                  • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-statefulset.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                                  \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u6709\u72b6\u6001\u8d1f\u8f7d\u3002

                                  1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                  2. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u6709\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u53f3\u4e0a\u89d2 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                                  3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                                    \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d \u5217\u8868\uff0c\u7b49\u5f85\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u53d8\u4e3a \u8fd0\u884c\u4e2d \u3002\u5982\u679c\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u51fa\u73b0\u5f02\u5e38\uff0c\u8bf7\u67e5\u770b\u5177\u4f53\u5f02\u5e38\u4fe1\u606f\uff0c\u53ef\u53c2\u8003\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u3002

                                    \u70b9\u51fb\u65b0\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u5217\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u5de5\u4f5c\u8d1f\u8f7d\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u5f39\u6027\u6269\u7f29\u3001\u91cd\u542f\u3001\u7248\u672c\u56de\u9000\u7b49\u64cd\u4f5c\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-statefulset.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"
                                  • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 deployment-01\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                  • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u8d1f\u8f7d\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                  • \u5b9e\u4f8b\u6570\uff1a\u8f93\u5165\u8d1f\u8f7d\u7684 Pod \u5b9e\u4f8b\u6570\u91cf\uff0c\u9ed8\u8ba4\u521b\u5efa 1 \u4e2a Pod \u5b9e\u4f8b\u3002
                                  • \u63cf\u8ff0\uff1a\u8f93\u5165\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u4e0d\u8d85\u8fc7 512\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-statefulset.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                                  \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                  \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                                  \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                                  \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                                  • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                                  • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                                  • \u955c\u50cf\uff1a
                                    • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u63a5\u5165\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                                    • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                                    • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                                    • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                                  • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                                  • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                                  • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                                    • \u6574\u5361\u6a21\u5f0f\uff1a
                                      • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                    • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                                      • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                      • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                                      • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                                      • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                                      • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                                      • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                                    • Mig \u6a21\u5f0f
                                      • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                                      • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                                  \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                                  \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                                  \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\u3002\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                                  \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                                  \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                                  \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-statefulset.html#_5","title":"\u670d\u52a1\u914d\u7f6e","text":"

                                  \u4e3a\u6709\u72b6\u6001\u8d1f\u8f7d\u914d\u7f6e\u670d\u52a1\uff08Service\uff09\uff0c\u4f7f\u6709\u72b6\u6001\u8d1f\u8f7d\u80fd\u591f\u88ab\u5916\u90e8\u8bbf\u95ee\u3002

                                  1. \u70b9\u51fb \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                                  2. \u53c2\u8003\u521b\u5efa\u670d\u52a1\uff0c\u914d\u7f6e\u670d\u52a1\u53c2\u6570\u3002

                                  3. \u70b9\u51fb \u786e\u5b9a \uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                  "},{"location":"end-user/kpanda/workloads/create-statefulset.html#_6","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                                  \u9ad8\u7ea7\u914d\u7f6e\u5305\u62ec\u8d1f\u8f7d\u7684\u7f51\u7edc\u914d\u7f6e\u3001\u5347\u7ea7\u7b56\u7565\u3001\u8c03\u5ea6\u7b56\u7565\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u56db\u90e8\u5206\uff0c\u53ef\u70b9\u51fb\u4e0b\u65b9\u7684\u9875\u7b7e\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                  \u7f51\u7edc\u914d\u7f6e\u5347\u7ea7\u7b56\u7565\u5bb9\u5668\u7ba1\u7406\u7b56\u7565\u8c03\u5ea6\u7b56\u7565\u6807\u7b7e\u4e0e\u6ce8\u89e3
                                  • \u5982\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72\u4e86 SpiderPool \u548c Multus \u7ec4\u4ef6\uff0c\u5219\u53ef\u4ee5\u5728\u7f51\u7edc\u914d\u7f6e\u4e2d\u914d\u7f6e\u5bb9\u5668\u7f51\u5361\u3002

                                  • DNS \u914d\u7f6e\uff1a\u5e94\u7528\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u4f1a\u51fa\u73b0\u5197\u4f59\u7684 DNS \u67e5\u8be2\u3002Kubernetes \u4e3a\u5e94\u7528\u63d0\u4f9b\u4e86\u4e0e DNS \u76f8\u5173\u7684\u914d\u7f6e\u9009\u9879\uff0c\u80fd\u591f\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u6709\u6548\u5730\u51cf\u5c11\u5197\u4f59\u7684 DNS \u67e5\u8be2\uff0c\u63d0\u5347\u4e1a\u52a1\u5e76\u53d1\u91cf\u3002

                                  • DNS \u7b56\u7565

                                    • Default\uff1a\u4f7f\u5bb9\u5668\u4f7f\u7528 kubelet \u7684 --resolv-conf \u53c2\u6570\u6307\u5411\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u3002\u8be5\u914d\u7f6e\u53ea\u80fd\u89e3\u6790\u6ce8\u518c\u5230\u4e92\u8054\u7f51\u4e0a\u7684\u5916\u90e8\u57df\u540d\uff0c\u65e0\u6cd5\u89e3\u6790\u96c6\u7fa4\u5185\u90e8\u57df\u540d\uff0c\u4e14\u4e0d\u5b58\u5728\u65e0\u6548\u7684 DNS \u67e5\u8be2\u3002
                                    • ClusterFirstWithHostNet\uff1a\u5e94\u7528\u5bf9\u63a5\u4e3b\u673a\u7684\u57df\u540d\u6587\u4ef6\u3002
                                    • ClusterFirst\uff1a\u5e94\u7528\u5bf9\u63a5 Kube-DNS/CoreDNS\u3002
                                    • None\uff1aKubernetes v1.9\uff08Beta in v1.10\uff09\u4e2d\u5f15\u5165\u7684\u65b0\u9009\u9879\u503c\u3002\u8bbe\u7f6e\u4e3a None \u4e4b\u540e\uff0c\u5fc5\u987b\u8bbe\u7f6e dnsConfig\uff0c\u6b64\u65f6\u5bb9\u5668\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u5c06\u5b8c\u5168\u901a\u8fc7 dnsConfig \u7684\u914d\u7f6e\u6765\u751f\u6210\u3002
                                  • \u57df\u540d\u670d\u52a1\u5668\uff1a\u586b\u5199\u57df\u540d\u670d\u52a1\u5668\u7684\u5730\u5740\uff0c\u4f8b\u5982 10.6.175.20 \u3002

                                  • \u641c\u7d22\u57df\uff1a\u57df\u540d\u67e5\u8be2\u65f6\u7684 DNS \u641c\u7d22\u57df\u5217\u8868\u3002\u6307\u5b9a\u540e\uff0c\u63d0\u4f9b\u7684\u641c\u7d22\u57df\u5217\u8868\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 search \u5b57\u6bb5\u4e2d\uff0c\u5e76\u5220\u9664\u91cd\u590d\u7684\u57df\u540d\u3002Kubernetes \u6700\u591a\u5141\u8bb8 6 \u4e2a\u641c\u7d22\u57df\u3002
                                  • Options\uff1aDNS \u7684\u914d\u7f6e\u9009\u9879\uff0c\u5176\u4e2d\u6bcf\u4e2a\u5bf9\u8c61\u53ef\u4ee5\u5177\u6709 name \u5c5e\u6027\uff08\u5fc5\u9700\uff09\u548c value \u5c5e\u6027\uff08\u53ef\u9009\uff09\u3002\u8be5\u5b57\u6bb5\u4e2d\u7684\u5185\u5bb9\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 options \u5b57\u6bb5\u4e2d\uff0cdnsConfig \u7684 options \u7684\u67d0\u4e9b\u9009\u9879\u5982\u679c\u4e0e\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684\u9009\u9879\u51b2\u7a81\uff0c\u5219\u4f1a\u88ab dnsConfig \u6240\u8986\u76d6\u3002
                                  • \u4e3b\u673a\u522b\u540d\uff1a\u4e3a\u4e3b\u673a\u8bbe\u7f6e\u7684\u522b\u540d\u3002

                                  • \u5347\u7ea7\u65b9\u5f0f\uff1a \u6eda\u52a8\u5347\u7ea7 \u6307\u9010\u6b65\u7528\u65b0\u7248\u672c\u7684\u5b9e\u4f8b\u66ff\u6362\u65e7\u7248\u672c\u7684\u5b9e\u4f8b\uff0c\u5347\u7ea7\u7684\u8fc7\u7a0b\u4e2d\uff0c\u4e1a\u52a1\u6d41\u91cf\u4f1a\u540c\u65f6\u8d1f\u8f7d\u5747\u8861\u5206\u5e03\u5230\u65b0\u8001\u7684\u5b9e\u4f8b\u4e0a\uff0c\u56e0\u6b64\u4e1a\u52a1\u4e0d\u4f1a\u4e2d\u65ad\u3002 \u91cd\u5efa\u5347\u7ea7 \u6307\u5148\u5220\u9664\u8001\u7248\u672c\u7684\u8d1f\u8f7d\u5b9e\u4f8b\uff0c\u518d\u5b89\u88c5\u6307\u5b9a\u7684\u65b0\u7248\u672c\uff0c\u5347\u7ea7\u8fc7\u7a0b\u4e2d\u4e1a\u52a1\u4f1a\u4e2d\u65ad\u3002
                                  • \u6700\u5927\u4fdd\u7559\u7248\u672c\u6570\uff1a\u8bbe\u7f6e\u7248\u672c\u56de\u6eda\u65f6\u4fdd\u7559\u7684\u65e7\u7248\u672c\u6570\u91cf\u3002\u9ed8\u8ba4 10\u3002
                                  • \u7f29\u5bb9\u65f6\u95f4\u7a97\uff1a\u8d1f\u8f7d\u505c\u6b62\u524d\u547d\u4ee4\u7684\u6267\u884c\u65f6\u95f4\u7a97\uff080-9,999\u79d2\uff09\uff0c\u9ed8\u8ba4 30 \u79d2\u3002

                                  Kubernetes v1.7 \u53ca\u5176\u4e4b\u540e\u7684\u7248\u672c\u53ef\u4ee5\u901a\u8fc7 .spec.podManagementPolicy \u8bbe\u7f6e Pod \u7684\u7ba1\u7406\u7b56\u7565\uff0c\u652f\u6301\u4ee5\u4e0b\u4e24\u79cd\u65b9\u5f0f\uff1a

                                  • \u6309\u5e8f\u7b56\u7565\uff08OrderedReady\uff09 \uff1a\u9ed8\u8ba4\u7684 Pod \u7ba1\u7406\u7b56\u7565\uff0c\u8868\u793a\u6309\u987a\u5e8f\u90e8\u7f72 Pod\uff0c\u53ea\u6709\u524d\u4e00\u4e2a Pod \u90e8\u7f72 \u6210\u529f\u5b8c\u6210\u540e\uff0c\u6709\u72b6\u6001\u8d1f\u8f7d\u624d\u4f1a\u5f00\u59cb\u90e8\u7f72\u4e0b\u4e00\u4e2a Pod\u3002\u5220\u9664 Pod \u65f6\u5219\u91c7\u7528\u9006\u5e8f\uff0c\u6700\u540e\u521b\u5efa\u7684\u6700\u5148\u88ab\u5220\u9664\u3002

                                  • \u5e76\u884c\u7b56\u7565\uff08Parallel\uff09 \uff1a\u5e76\u884c\u521b\u5efa\u6216\u5220\u9664\u5bb9\u5668\uff0c\u548c Deployment \u7c7b\u578b\u7684 Pod \u4e00\u6837\u3002StatefulSet \u63a7\u5236\u5668\u5e76\u884c\u5730\u542f\u52a8\u6216\u7ec8\u6b62\u6240\u6709\u7684\u5bb9\u5668\u3002\u542f\u52a8\u6216\u8005\u7ec8\u6b62\u5176\u4ed6 Pod \u524d\uff0c\u65e0\u9700\u7b49\u5f85 Pod \u8fdb\u5165 Running \u548c ready \u6216\u8005\u5b8c\u5168\u505c\u6b62\u72b6\u6001\u3002 \u8fd9\u4e2a\u9009\u9879\u53ea\u4f1a\u5f71\u54cd\u6269\u7f29\u64cd\u4f5c\u7684\u884c\u4e3a\uff0c\u4e0d\u5f71\u54cd\u66f4\u65b0\u65f6\u7684\u987a\u5e8f\u3002

                                  • \u5bb9\u5fcd\u65f6\u95f4\uff1a\u8d1f\u8f7d\u5b9e\u4f8b\u6240\u5728\u7684\u8282\u70b9\u4e0d\u53ef\u7528\u65f6\uff0c\u5c06\u8d1f\u8f7d\u5b9e\u4f8b\u91cd\u65b0\u8c03\u5ea6\u5230\u5176\u5b83\u53ef\u7528\u8282\u70b9\u7684\u65f6\u95f4\uff0c\u9ed8\u8ba4\u4e3a 300 \u79d2\u3002
                                  • \u8282\u70b9\u4eb2\u548c\u6027\uff1a\u6839\u636e\u8282\u70b9\u4e0a\u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002
                                  • \u5de5\u4f5c\u8d1f\u8f7d\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                                  • \u5de5\u4f5c\u8d1f\u8f7d\u53cd\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u4e0d\u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                                  • \u62d3\u6251\u57df\uff1a\u5373 topologyKey\uff0c\u7528\u4e8e\u6307\u5b9a\u53ef\u4ee5\u8c03\u5ea6\u7684\u4e00\u7ec4\u8282\u70b9\u3002\u4f8b\u5982\uff0c kubernetes.io/os \u8868\u793a\u53ea\u8981\u67d0\u4e2a\u64cd\u4f5c\u7cfb\u7edf\u7684\u8282\u70b9\u6ee1\u8db3 labelSelector \u7684\u6761\u4ef6\u5c31\u53ef\u4ee5\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002

                                  \u5177\u4f53\u8be6\u60c5\u8bf7\u53c2\u8003\u8c03\u5ea6\u7b56\u7565\u3002

                                  ![\u8c03\u5ea6\u7b56\u7565](../../../images/deploy15_1.png)\n

                                  \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u548c\u5bb9\u5668\u7ec4\u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                  "},{"location":"end-user/kpanda/workloads/create-statefulset.html#yaml","title":"YAML \u521b\u5efa","text":"

                                  \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\u3002

                                  1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                  2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u6709\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                                  3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                                  \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\u7684 YAML \u793a\u4f8b
                                  kind: StatefulSet\napiVersion: apps/v1\nmetadata:\n  name: test-mysql-123-mysql\n  namespace: default\n  uid: d3f45527-a0ab-4b22-9013-5842a06f4e0e\n  resourceVersion: '20504385'\n  generation: 1\n  creationTimestamp: '2022-09-22T09:34:10Z'\n  ownerReferences:\n    - apiVersion: mysql.presslabs.org/v1alpha1\n      kind: MysqlCluster\n      name: test-mysql-123\n      uid: 5e877cc3-5167-49da-904e-820940cf1a6d\n      controller: true\n      blockOwnerDeletion: true\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app.kubernetes.io/managed-by: mysql.presslabs.org\n      app.kubernetes.io/name: mysql\n      mysql.presslabs.org/cluster: test-mysql-123\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app.kubernetes.io/component: database\n        app.kubernetes.io/instance: test-mysql-123\n        app.kubernetes.io/managed-by: mysql.presslabs.org\n        app.kubernetes.io/name: mysql\n        app.kubernetes.io/version: 5.7.31\n        mysql.presslabs.org/cluster: test-mysql-123\n      annotations:\n        config_rev: '13941099'\n        prometheus.io/port: '9125'\n        prometheus.io/scrape: 'true'\n        secret_rev: '13941101'\n    spec:\n      volumes:\n        - name: conf\n          emptyDir: {}\n        - name: init-scripts\n          emptyDir: {}\n        - name: config-map\n          configMap:\n            name: test-mysql-123-mysql\n            defaultMode: 420\n        - name: data\n          persistentVolumeClaim:\n            claimName: data\n      initContainers:\n        - name: init\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - clone-and-init\n          envFrom:\n            - secretRef:\n                name: test-mysql-123-mysql-operated\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: BACKUP_USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: BACKUP_USER\n                  optional: true\n            - name: BACKUP_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: BACKUP_PASSWORD\n                  optional: true\n          resources: {}\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: config-map\n              mountPath: /mnt/conf\n            - name: data\n              mountPath: /var/lib/mysql\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n      containers:\n        - name: mysql\n          image: docker.m.daocloud.io/mysql:5.7.31\n          ports:\n            - name: mysql\n              containerPort: 3306\n              protocol: TCP\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: ORCH_CLUSTER_ALIAS\n              value: test-mysql-123.default\n            - name: ORCH_HTTP_API\n              value: http://mysql-operator.mcamel-system/api\n            - name: MYSQL_ROOT_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: ROOT_PASSWORD\n                  optional: false\n            - name: MYSQL_USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: USER\n                  optional: true\n            - name: MYSQL_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: PASSWORD\n                  optional: true\n            - name: MYSQL_DATABASE\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: DATABASE\n                  optional: true\n          resources:\n            limits:\n              cpu: '1'\n              memory: 1Gi\n            requests:\n              cpu: 100m\n              memory: 512Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: data\n              mountPath: /var/lib/mysql\n          livenessProbe:\n            exec:\n              command:\n                - mysqladmin\n                - '--defaults-file=/etc/mysql/client.conf'\n                - ping\n            initialDelaySeconds: 60\n            timeoutSeconds: 5\n            periodSeconds: 5\n            successThreshold: 1\n            failureThreshold: 3\n          readinessProbe:\n            exec:\n              command:\n                - /bin/sh\n                - '-c'\n                - >-\n                  test $(mysql --defaults-file=/etc/mysql/client.conf -NB -e\n                  'SELECT COUNT(*) FROM sys_operator.status WHERE\n                  name=\"configured\" AND value=\"1\"') -eq 1\n            initialDelaySeconds: 5\n            timeoutSeconds: 5\n            periodSeconds: 2\n            successThreshold: 1\n            failureThreshold: 3\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - bash\n                  - /etc/mysql/pre-shutdown-ha.sh\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: sidecar\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - config-and-serve\n          ports:\n            - name: sidecar-http\n              containerPort: 8080\n              protocol: TCP\n          envFrom:\n            - secretRef:\n                name: test-mysql-123-mysql-operated\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: XTRABACKUP_TARGET_DIR\n              value: /tmp/xtrabackup_backupfiles/\n          resources:\n            limits:\n              cpu: '1'\n              memory: 1Gi\n            requests:\n              cpu: 10m\n              memory: 64Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: data\n              mountPath: /var/lib/mysql\n          readinessProbe:\n            httpGet:\n              path: /health\n              port: 8080\n              scheme: HTTP\n            initialDelaySeconds: 30\n            timeoutSeconds: 5\n            periodSeconds: 5\n            successThreshold: 1\n            failureThreshold: 3\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: metrics-exporter\n          image: prom/mysqld-exporter:v0.13.0\n          args:\n            - '--web.listen-address=0.0.0.0:9125'\n            - '--web.telemetry-path=/metrics'\n            - '--collect.heartbeat'\n            - '--collect.heartbeat.database=sys_operator'\n          ports:\n            - name: prometheus\n              containerPort: 9125\n              protocol: TCP\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: METRICS_EXPORTER_USER\n                  optional: false\n            - name: PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: METRICS_EXPORTER_PASSWORD\n                  optional: false\n            - name: DATA_SOURCE_NAME\n              value: $(USER):$(PASSWORD)@(127.0.0.1:3306)/\n          resources:\n            limits:\n              cpu: 100m\n              memory: 128Mi\n            requests:\n              cpu: 10m\n              memory: 32Mi\n          livenessProbe:\n            httpGet:\n              path: /metrics\n              port: 9125\n              scheme: HTTP\n            initialDelaySeconds: 30\n            timeoutSeconds: 30\n            periodSeconds: 30\n            successThreshold: 1\n            failureThreshold: 3\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: pt-heartbeat\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - pt-heartbeat\n            - '--update'\n            - '--replace'\n            - '--check-read-only'\n            - '--create-table'\n            - '--database'\n            - sys_operator\n            - '--table'\n            - heartbeat\n            - '--utc'\n            - '--defaults-file'\n            - /etc/mysql/heartbeat.conf\n            - '--fail-successive-errors=20'\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n          resources:\n            limits:\n              cpu: 100m\n              memory: 64Mi\n            requests:\n              cpu: 10m\n              memory: 32Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n      restartPolicy: Always\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      securityContext:\n        runAsUser: 999\n        fsGroup: 999\n      affinity:\n        podAntiAffinity:\n          preferredDuringSchedulingIgnoredDuringExecution:\n            - weight: 100\n              podAffinityTerm:\n                labelSelector:\n                  matchLabels:\n                    app.kubernetes.io/component: database\n                    app.kubernetes.io/instance: test-mysql-123\n                    app.kubernetes.io/managed-by: mysql.presslabs.org\n                    app.kubernetes.io/name: mysql\n                    app.kubernetes.io/version: 5.7.31\n                    mysql.presslabs.org/cluster: test-mysql-123\n                topologyKey: kubernetes.io/hostname\n      schedulerName: default-scheduler\n  volumeClaimTemplates:\n    - kind: PersistentVolumeClaim\n      apiVersion: v1\n      metadata:\n        name: data\n        creationTimestamp: null\n        ownerReferences:\n          - apiVersion: mysql.presslabs.org/v1alpha1\n            kind: MysqlCluster\n            name: test-mysql-123\n            uid: 5e877cc3-5167-49da-904e-820940cf1a6d\n            controller: true\n      spec:\n        accessModes:\n          - ReadWriteOnce\n        resources:\n          limits:\n            storage: 1Gi\n          requests:\n            storage: 1Gi\n        storageClassName: local-path\n        volumeMode: Filesystem\n      status:\n        phase: Pending\n  serviceName: mysql\n  podManagementPolicy: OrderedReady\n  updateStrategy:\n    type: RollingUpdate\n    rollingUpdate:\n      partition: 0\n  revisionHistoryLimit: 10\nstatus:\n  observedGeneration: 1\n  replicas: 1\n  readyReplicas: 1\n  currentReplicas: 1\n  updatedReplicas: 1\n  currentRevision: test-mysql-123-mysql-6b8f5577c7\n  updateRevision: test-mysql-123-mysql-6b8f5577c7\n  collisionCount: 0\n  availableReplicas: 1\n
                                  "},{"location":"end-user/kpanda/workloads/pod-config/env-variables.html","title":"\u914d\u7f6e\u73af\u5883\u53d8\u91cf","text":"

                                  \u73af\u5883\u53d8\u91cf\u662f\u6307\u5bb9\u5668\u8fd0\u884c\u73af\u5883\u4e2d\u8bbe\u5b9a\u7684\u4e00\u4e2a\u53d8\u91cf\uff0c\u7528\u4e8e\u7ed9 Pod \u6dfb\u52a0\u73af\u5883\u6807\u5fd7\u6216\u4f20\u9012\u914d\u7f6e\u7b49\uff0c\u652f\u6301\u901a\u8fc7\u952e\u503c\u5bf9\u7684\u5f62\u5f0f\u4e3a Pod \u914d\u7f6e\u73af\u5883\u53d8\u91cf\u3002

                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u5728\u539f\u751f Kubernetes \u7684\u57fa\u7840\u4e0a\u589e\u52a0\u4e86\u56fe\u5f62\u5316\u754c\u9762\u4e3a Pod \u914d\u7f6e\u73af\u5883\u53d8\u91cf\uff0c\u652f\u6301\u4ee5\u4e0b\u51e0\u79cd\u914d\u7f6e\u65b9\u5f0f\uff1a

                                  • \u952e\u503c\u5bf9\uff08Key/Value Pair\uff09\uff1a\u5c06\u81ea\u5b9a\u4e49\u7684\u952e\u503c\u5bf9\u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf
                                  • \u8d44\u6e90\u5f15\u7528\uff08Resource\uff09\uff1a\u5c06 Container \u5b9a\u4e49\u7684\u5b57\u6bb5\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\uff0c\u4f8b\u5982\u5bb9\u5668\u7684\u5185\u5b58\u9650\u5236\u3001\u526f\u672c\u6570\u7b49
                                  • \u53d8\u91cf/\u53d8\u91cf\u5f15\u7528\uff08Pod Field\uff09\uff1a\u5c06 Pod \u5b57\u6bb5\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\uff0c\u4f8b\u5982 Pod \u7684\u540d\u79f0
                                  • \u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165\uff08ConfigMap key\uff09\uff1a\u5bfc\u5165\u914d\u7f6e\u9879\u4e2d\u67d0\u4e2a\u952e\u7684\u503c\u4f5c\u4e3a\u67d0\u4e2a\u73af\u5883\u53d8\u91cf\u7684\u503c
                                  • \u5bc6\u94a5\u952e\u503c\u5bfc\u5165\uff08Secret Key\uff09\uff1a\u4f7f\u7528\u6765\u81ea Secret \u4e2d\u7684\u6570\u636e\u5b9a\u4e49\u73af\u5883\u53d8\u91cf\u7684\u503c
                                  • \u5bc6\u94a5\u5bfc\u5165\uff08Secret\uff09\uff1a\u5c06 Secret \u4e2d\u7684\u6240\u6709\u952e\u503c\u90fd\u5bfc\u5165\u4e3a\u73af\u5883\u53d8\u91cf
                                  • \u914d\u7f6e\u9879\u5bfc\u5165\uff08ConfigMap\uff09\uff1a\u5c06\u914d\u7f6e\u9879\u4e2d\u6240\u6709\u952e\u503c\u90fd\u5bfc\u5165\u4e3a\u73af\u5883\u53d8\u91cf
                                  "},{"location":"end-user/kpanda/workloads/pod-config/health-check.html","title":"\u5bb9\u5668\u7684\u5065\u5eb7\u68c0\u67e5","text":"

                                  \u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u6839\u636e\u7528\u6237\u9700\u6c42\uff0c\u68c0\u67e5\u5bb9\u5668\u7684\u5065\u5eb7\u72b6\u51b5\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5185\u7684\u5e94\u7528\u7a0b\u5e8f\u5165\u5982\u679c\u5f02\u5e38\uff0c\u5bb9\u5668\u4f1a\u81ea\u52a8\u8fdb\u884c\u91cd\u542f\u6062\u590d\u3002Kubernetes \u63d0\u4f9b\u4e86\u5b58\u6d3b\uff08Liveness\uff09\u68c0\u67e5\u3001\u5c31\u7eea\uff08Readiness\uff09\u68c0\u67e5\u548c\u542f\u52a8\uff08Startup\uff09\u68c0\u67e5\u3002

                                  • \u5b58\u6d3b\u68c0\u67e5\uff08LivenessProbe\uff09 \u53ef\u63a2\u6d4b\u5230\u5e94\u7528\u6b7b\u9501\uff08\u5e94\u7528\u7a0b\u5e8f\u5728\u8fd0\u884c\uff0c\u4f46\u662f\u65e0\u6cd5\u7ee7\u7eed\u6267\u884c\u540e\u9762\u7684\u6b65\u9aa4\uff09\u60c5\u51b5\u3002 \u91cd\u542f\u8fd9\u79cd\u72b6\u6001\u4e0b\u7684\u5bb9\u5668\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\uff0c\u5373\u4f7f\u5176\u4e2d\u5b58\u5728\u7f3a\u9677\u3002

                                  • \u5c31\u7eea\u68c0\u67e5\uff08ReadinessProbe\uff09 \u53ef\u63a2\u77e5\u5bb9\u5668\u4f55\u65f6\u51c6\u5907\u597d\u63a5\u53d7\u8bf7\u6c42\u6d41\u91cf\uff0c\u5f53\u4e00\u4e2a Pod \u5185\u7684\u6240\u6709\u5bb9\u5668\u90fd\u5c31\u7eea\u65f6\uff0c\u624d\u80fd\u8ba4\u4e3a\u8be5 Pod \u5c31\u7eea\u3002 \u8fd9\u79cd\u4fe1\u53f7\u7684\u4e00\u4e2a\u7528\u9014\u5c31\u662f\u63a7\u5236\u54ea\u4e2a Pod \u4f5c\u4e3a Service \u7684\u540e\u7aef\u3002 \u82e5 Pod \u5c1a\u672a\u5c31\u7eea\uff0c\u4f1a\u88ab\u4ece Service \u7684\u8d1f\u8f7d\u5747\u8861\u5668\u4e2d\u5254\u9664\u3002

                                  • \u542f\u52a8\u68c0\u67e5\uff08StartupProbe\uff09 \u53ef\u4ee5\u4e86\u89e3\u5e94\u7528\u5bb9\u5668\u4f55\u65f6\u542f\u52a8\uff0c\u914d\u7f6e\u540e\uff0c\u53ef\u63a7\u5236\u5bb9\u5668\u5728\u542f\u52a8\u6210\u529f\u540e\u518d\u8fdb\u884c\u5b58\u6d3b\u6027\u548c\u5c31\u7eea\u6001\u68c0\u67e5\uff0c \u786e\u4fdd\u8fd9\u4e9b\u5b58\u6d3b\u3001\u5c31\u7eea\u63a2\u6d4b\u5668\u4e0d\u4f1a\u5f71\u54cd\u5e94\u7528\u7684\u542f\u52a8\u3002 \u542f\u52a8\u63a2\u6d4b\u53ef\u4ee5\u7528\u4e8e\u5bf9\u6162\u542f\u52a8\u5bb9\u5668\u8fdb\u884c\u5b58\u6d3b\u6027\u68c0\u6d4b\uff0c\u907f\u514d\u5b83\u4eec\u5728\u542f\u52a8\u8fd0\u884c\u4e4b\u524d\u5c31\u88ab\u6740\u6389\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/health-check.html#_2","title":"\u5b58\u6d3b\u548c\u5c31\u7eea\u68c0\u67e5","text":"

                                  \u5b58\u6d3b\u68c0\u67e5\uff08LivenessProbe\uff09\u7684\u914d\u7f6e\u548c\u5c31\u7eea\u68c0\u67e5\uff08ReadinessProbe\uff09\u7684\u914d\u7f6e\u53c2\u6570\u76f8\u4f3c\uff0c \u552f\u4e00\u533a\u522b\u662f\u8981\u4f7f\u7528 readinessProbe \u5b57\u6bb5\uff0c\u800c\u4e0d\u662f livenessProbe \u5b57\u6bb5\u3002

                                  HTTP GET \u53c2\u6570\u8bf4\u660e\uff1a

                                  \u53c2\u6570 \u53c2\u6570\u8bf4\u660e \u8def\u5f84\uff08 Path\uff09 \u8bbf\u95ee\u7684\u8bf7\u6c42\u8def\u5f84\u3002\u5982\uff1a \u793a\u4f8b\u4e2d\u7684 /healthz \u8def\u5f84 \u7aef\u53e3(Port) \u670d\u52a1\u76d1\u542c\u7aef\u53e3\u3002 \u5982\uff1a \u793a\u4f8b\u4e2d\u7684 8080 \u7aef\u53e3 \u534f\u8bae \u8bbf\u95ee\u534f\u8bae\uff0cHttp \u6216\u8005Https \u5ef6\u8fdf\u65f6\u95f4\uff08initialDelaySeconds\uff09 \u5ef6\u8fdf\u68c0\u67e5\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\uff0c\u6b64\u8bbe\u7f6e\u4e0e\u4e1a\u52a1\u7a0b\u5e8f\u6b63\u5e38\u542f\u52a8\u65f6\u95f4\u76f8\u5173\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a30\uff0c\u8868\u660e\u5bb9\u5668\u542f\u52a8\u540e30\u79d2\u624d\u5f00\u59cb\u5065\u5eb7\u68c0\u67e5\uff0c\u8be5\u65f6\u95f4\u662f\u9884\u7559\u7ed9\u4e1a\u52a1\u7a0b\u5e8f\u542f\u52a8\u7684\u65f6\u95f4\u3002 \u8d85\u65f6\u65f6\u95f4\uff08timeoutSeconds\uff09 \u8d85\u65f6\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a10\uff0c\u8868\u660e\u6267\u884c\u5065\u5eb7\u68c0\u67e5\u7684\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a10\u79d2\uff0c\u5982\u679c\u8d85\u8fc7\u8fd9\u4e2a\u65f6\u95f4\uff0c\u672c\u6b21\u5065\u5eb7\u68c0\u67e5\u5c31\u88ab\u89c6\u4e3a\u5931\u8d25\u3002\u82e5\u8bbe\u7f6e\u4e3a0\u6216\u4e0d\u8bbe\u7f6e\uff0c\u9ed8\u8ba4\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a1\u79d2\u3002 \u8d85\u65f6\u65f6\u95f4\uff08timeoutSeconds\uff09 \u8d85\u65f6\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a10\uff0c\u8868\u660e\u6267\u884c\u5065\u5eb7\u68c0\u67e5\u7684\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a10\u79d2\uff0c\u5982\u679c\u8d85\u8fc7\u8fd9\u4e2a\u65f6\u95f4\uff0c\u672c\u6b21\u5065\u5eb7\u68c0\u67e5\u5c31\u88ab\u89c6\u4e3a\u5931\u8d25\u3002\u82e5\u8bbe\u7f6e\u4e3a0\u6216\u4e0d\u8bbe\u7f6e\uff0c\u9ed8\u8ba4\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a1\u79d2\u3002 \u6210\u529f\u9608\u503c\uff08successThreshold\uff09 \u63a2\u6d4b\u5931\u8d25\u540e\uff0c\u88ab\u89c6\u4e3a\u6210\u529f\u7684\u6700\u5c0f\u8fde\u7eed\u6210\u529f\u6570\u3002\u9ed8\u8ba4\u503c\u662f 1\uff0c\u6700\u5c0f\u503c\u662f 1\u3002\u5b58\u6d3b\u548c\u542f\u52a8\u63a2\u6d4b\u7684\u8fd9\u4e2a\u503c\u5fc5\u987b\u662f 1\u3002 \u6700\u5927\u5931\u8d25\u6b21\u6570\uff08failureThreshold\uff09 \u5f53\u63a2\u6d4b\u5931\u8d25\u65f6\u91cd\u8bd5\u7684\u6b21\u6570\u3002\u5b58\u6d3b\u63a2\u6d4b\u60c5\u51b5\u4e0b\u7684\u653e\u5f03\u5c31\u610f\u5473\u7740\u91cd\u65b0\u542f\u52a8\u5bb9\u5668\u3002\u5c31\u7eea\u63a2\u6d4b\u60c5\u51b5\u4e0b\u7684\u653e\u5f03 Pod \u4f1a\u88ab\u6253\u4e0a\u672a\u5c31\u7eea\u7684\u6807\u7b7e\u3002\u9ed8\u8ba4\u503c\u662f 3\u3002\u6700\u5c0f\u503c\u662f 1\u3002"},{"location":"end-user/kpanda/workloads/pod-config/health-check.html#http-get","title":"\u4f7f\u7528 HTTP GET \u8bf7\u6c42\u68c0\u67e5","text":"

                                  YAML \u793a\u4f8b\uff1a

                                  apiVersion: v1\nkind: Pod\nmetadata:\n  labels:\n    test: liveness\n  name: liveness-http\nspec:\n  containers:\n  - name: liveness\n    image: k8s.gcr.io/liveness\n    args:\n    - /server\n    livenessProbe:\n      httpGet:\n        path: /healthz  # \u8bbf\u95ee\u7684\u8bf7\u6c42\u8def\u5f84\n        port: 8080  # \u670d\u52a1\u76d1\u542c\u7aef\u53e3\n        httpHeaders:\n        - name: Custom-Header\n          value: Awesome\n      initialDelaySeconds: 3  # kubelet \u5728\u6267\u884c\u7b2c\u4e00\u6b21\u63a2\u6d4b\u524d\u5e94\u8be5\u7b49\u5f85 3 \u79d2\n      periodSeconds: 3   # kubelet \u6bcf\u9694 3 \u79d2\u6267\u884c\u4e00\u6b21\u5b58\u6d3b\u63a2\u6d4b\n

                                  \u6309\u7167\u8bbe\u5b9a\u7684\u89c4\u5219\uff0cubelet \u5411\u5bb9\u5668\u5185\u8fd0\u884c\u7684\u670d\u52a1\uff08\u670d\u52a1\u5728\u76d1\u542c 8080 \u7aef\u53e3\uff09\u53d1\u9001\u4e00\u4e2a HTTP GET \u8bf7\u6c42\u6765\u6267\u884c\u63a2\u6d4b\u3002\u5982\u679c\u670d\u52a1\u5668\u4e0a /healthz \u8def\u5f84\u4e0b\u7684\u5904\u7406\u7a0b\u5e8f\u8fd4\u56de\u6210\u529f\u4ee3\u7801\uff0c\u5219 kubelet \u8ba4\u4e3a\u5bb9\u5668\u662f\u5065\u5eb7\u5b58\u6d3b\u7684\u3002 \u5982\u679c\u5904\u7406\u7a0b\u5e8f\u8fd4\u56de\u5931\u8d25\u4ee3\u7801\uff0c\u5219 kubelet \u4f1a\u6740\u6b7b\u8fd9\u4e2a\u5bb9\u5668\u5e76\u5c06\u5176\u91cd\u542f\u3002\u8fd4\u56de\u5927\u4e8e\u6216\u7b49\u4e8e 200 \u5e76\u4e14\u5c0f\u4e8e 400 \u7684\u4efb\u4f55\u4ee3\u7801\u90fd\u6807\u793a\u6210\u529f\uff0c\u5176\u5b83\u8fd4\u56de\u4ee3\u7801\u90fd\u6807\u793a\u5931\u8d25\u3002 \u5bb9\u5668\u5b58\u6d3b\u671f\u95f4\u7684\u6700\u5f00\u59cb 10 \u79d2\u4e2d\uff0c /healthz \u5904\u7406\u7a0b\u5e8f\u8fd4\u56de 200 \u7684\u72b6\u6001\u7801\u3002 \u4e4b\u540e\u5904\u7406\u7a0b\u5e8f\u8fd4\u56de 500 \u7684\u72b6\u6001\u7801\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/health-check.html#tcp","title":"\u4f7f\u7528 TCP \u7aef\u53e3\u68c0\u67e5","text":"

                                  TCP \u7aef\u53e3\u53c2\u6570\u8bf4\u660e\uff1a

                                  \u53c2\u6570 \u53c2\u6570\u8bf4\u660e \u7aef\u53e3(Port) \u670d\u52a1\u76d1\u542c\u7aef\u53e3\u3002 \u5982\uff1a \u793a\u4f8b\u4e2d\u7684 8080 \u7aef\u53e3 \u5ef6\u8fdf\u65f6\u95f4\uff08initialDelaySeconds\uff09 \u5ef6\u8fdf\u68c0\u67e5\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\uff0c\u6b64\u8bbe\u7f6e\u4e0e\u4e1a\u52a1\u7a0b\u5e8f\u6b63\u5e38\u542f\u52a8\u65f6\u95f4\u76f8\u5173\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a30\uff0c\u8868\u660e\u5bb9\u5668\u542f\u52a8\u540e30\u79d2\u624d\u5f00\u59cb\u5065\u5eb7\u68c0\u67e5\uff0c\u8be5\u65f6\u95f4\u662f\u9884\u7559\u7ed9\u4e1a\u52a1\u7a0b\u5e8f\u542f\u52a8\u7684\u65f6\u95f4\u3002 \u8d85\u65f6\u65f6\u95f4\uff08timeoutSeconds\uff09 \u8d85\u65f6\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a10\uff0c\u8868\u660e\u6267\u884c\u5065\u5eb7\u68c0\u67e5\u7684\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a10\u79d2\uff0c\u5982\u679c\u8d85\u8fc7\u8fd9\u4e2a\u65f6\u95f4\uff0c\u672c\u6b21\u5065\u5eb7\u68c0\u67e5\u5c31\u88ab\u89c6\u4e3a\u5931\u8d25\u3002\u82e5\u8bbe\u7f6e\u4e3a0\u6216\u4e0d\u8bbe\u7f6e\uff0c\u9ed8\u8ba4\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a1\u79d2\u3002

                                  \u5bf9\u4e8e\u63d0\u4f9bTCP\u901a\u4fe1\u670d\u52a1\u7684\u5bb9\u5668\uff0c\u57fa\u4e8e\u6b64\u914d\u7f6e\uff0c\u6309\u7167\u8bbe\u5b9a\u89c4\u5219\u96c6\u7fa4\u5bf9\u8be5\u5bb9\u5668\u5efa\u7acbTCP\u8fde\u63a5\uff0c\u5982\u679c\u8fde\u63a5\u6210\u529f\uff0c\u5219\u8bc1\u660e\u63a2\u6d4b\u6210\u529f\uff0c\u5426\u5219\u63a2\u6d4b\u5931\u8d25\u3002\u9009\u62e9TCP\u7aef\u53e3\u63a2\u6d4b\u65b9\u5f0f\uff0c\u5fc5\u987b\u6307\u5b9a\u5bb9\u5668\u76d1\u542c\u7684\u7aef\u53e3\u3002

                                  YAML \u793a\u4f8b\uff1a

                                  apiVersion: v1\nkind: Pod\nmetadata:\n  name: goproxy\n  labels:\n    app: goproxy\nspec:\n  containers:\n  - name: goproxy\n    image: k8s.gcr.io/goproxy:0.1\n    ports:\n    - containerPort: 8080\n    readinessProbe:\n      tcpSocket:\n        port: 8080\n      initialDelaySeconds: 5\n      periodSeconds: 10\n    livenessProbe:\n      tcpSocket:\n        port: 8080\n      initialDelaySeconds: 15\n      periodSeconds: 20\n

                                  \u6b64\u793a\u4f8b\u540c\u65f6\u4f7f\u7528\u5c31\u7eea\u548c\u5b58\u6d3b\u63a2\u9488\u3002kubelet \u5728\u5bb9\u5668\u542f\u52a8 5 \u79d2\u540e\u53d1\u9001\u7b2c\u4e00\u4e2a\u5c31\u7eea\u63a2\u6d4b\u3002 \u5c1d\u8bd5\u8fde\u63a5 goproxy \u5bb9\u5668\u7684 8080 \u7aef\u53e3\uff0c \u5982\u679c\u63a2\u6d4b\u6210\u529f\uff0c\u8fd9\u4e2a Pod \u4f1a\u88ab\u6807\u8bb0\u4e3a\u5c31\u7eea\u72b6\u6001\uff0ckubelet \u5c06\u7ee7\u7eed\u6bcf\u9694 10 \u79d2\u8fd0\u884c\u4e00\u6b21\u68c0\u6d4b\u3002

                                  \u9664\u4e86\u5c31\u7eea\u63a2\u6d4b\uff0c\u8fd9\u4e2a\u914d\u7f6e\u5305\u62ec\u4e86\u4e00\u4e2a\u5b58\u6d3b\u63a2\u6d4b\u3002 kubelet \u4f1a\u5728\u5bb9\u5668\u542f\u52a8 15 \u79d2\u540e\u8fdb\u884c\u7b2c\u4e00\u6b21\u5b58\u6d3b\u63a2\u6d4b\u3002 \u5c31\u7eea\u63a2\u6d4b\u4f1a\u5c1d\u8bd5\u8fde\u63a5 goproxy \u5bb9\u5668\u7684 8080 \u7aef\u53e3\u3002 \u5982\u679c\u5b58\u6d3b\u63a2\u6d4b\u5931\u8d25\uff0c\u5bb9\u5668\u4f1a\u88ab\u91cd\u65b0\u542f\u52a8\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/health-check.html#_3","title":"\u6267\u884c\u547d\u4ee4\u68c0\u67e5","text":"

                                  YAML \u793a\u4f8b:

                                  apiVersion: v1\nkind: Pod\nmetadata:\n  labels:\n    test: liveness\n  name: liveness-exec\nspec:\n  containers:\n  - name: liveness\n    image: k8s.gcr.io/busybox\n    args:\n    - /bin/sh\n    - -c\n    - touch /tmp/healthy; sleep 30; rm -f /tmp/healthy; sleep 600\n    livenessProbe:\n      exec:\n        command:\n        - cat\n        - /tmp/healthy\n      initialDelaySeconds: 5 # kubelet \u5728\u6267\u884c\u7b2c\u4e00\u6b21\u63a2\u6d4b\u524d\u7b49\u5f85 5 \u79d2\n      periodSeconds: 5  #kubelet \u6bcf 5 \u79d2\u6267\u884c\u4e00\u6b21\u5b58\u6d3b\u63a2\u6d4b\n

                                  periodSeconds \u5b57\u6bb5\u6307\u5b9a\u4e86 kubelet \u6bcf 5 \u79d2\u6267\u884c\u4e00\u6b21\u5b58\u6d3b\u63a2\u6d4b\uff0c initialDelaySeconds \u5b57\u6bb5\u6307\u5b9a kubelet \u5728\u6267\u884c\u7b2c\u4e00\u6b21\u63a2\u6d4b\u524d\u7b49\u5f85 5 \u79d2\u3002\u6309\u7167\u8bbe\u5b9a\u89c4\u5219\uff0c\u96c6\u7fa4\u5468\u671f\u6027\u7684\u901a\u8fc7 kubelet \u5728\u5bb9\u5668\u5185\u6267\u884c\u547d\u4ee4 cat /tmp/healthy \u6765\u8fdb\u884c\u63a2\u6d4b\u3002 \u5982\u679c\u547d\u4ee4\u6267\u884c\u6210\u529f\u5e76\u4e14\u8fd4\u56de\u503c\u4e3a 0\uff0ckubelet \u5c31\u4f1a\u8ba4\u4e3a\u8fd9\u4e2a\u5bb9\u5668\u662f\u5065\u5eb7\u5b58\u6d3b\u7684\u3002 \u5982\u679c\u8fd9\u4e2a\u547d\u4ee4\u8fd4\u56de\u975e 0 \u503c\uff0ckubelet \u4f1a\u6740\u6b7b\u8fd9\u4e2a\u5bb9\u5668\u5e76\u91cd\u65b0\u542f\u52a8\u5b83\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/health-check.html#_4","title":"\u4f7f\u7528\u542f\u52a8\u524d\u68c0\u67e5\u4fdd\u62a4\u6162\u542f\u52a8\u5bb9\u5668","text":"

                                  \u6709\u4e9b\u5e94\u7528\u5728\u542f\u52a8\u65f6\u9700\u8981\u8f83\u957f\u7684\u521d\u59cb\u5316\u65f6\u95f4\uff0c\u9700\u8981\u4f7f\u7528\u76f8\u540c\u7684\u547d\u4ee4\u6765\u8bbe\u7f6e\u542f\u52a8\u63a2\u6d4b\uff0c\u9488\u5bf9 HTTP \u6216 TCP \u68c0\u6d4b\uff0c\u53ef\u4ee5\u901a\u8fc7\u5c06 failureThreshold * periodSeconds \u53c2\u6570\u8bbe\u7f6e\u4e3a\u8db3\u591f\u957f\u7684\u65f6\u95f4\u6765\u5e94\u5bf9\u542f\u52a8\u9700\u8981\u8f83\u957f\u65f6\u95f4\u7684\u573a\u666f\u3002

                                  YAML \u793a\u4f8b\uff1a

                                  ports:\n- name: liveness-port\n  containerPort: 8080\n  hostPort: 8080\n\nlivenessProbe:\n  httpGet:\n    path: /healthz\n    port: liveness-port\n  failureThreshold: 1\n  periodSeconds: 10\n\nstartupProbe:\n  httpGet:\n    path: /healthz\n    port: liveness-port\n  failureThreshold: 30\n  periodSeconds: 10\n

                                  \u5982\u4e0a\u8bbe\u7f6e\uff0c\u5e94\u7528\u5c06\u6709\u6700\u591a 5 \u5206\u949f\uff0830 * 10 = 300s\uff09\u7684\u65f6\u95f4\u6765\u5b8c\u6210\u542f\u52a8\u8fc7\u7a0b\uff0c \u4e00\u65e6\u542f\u52a8\u63a2\u6d4b\u6210\u529f\uff0c\u5b58\u6d3b\u63a2\u6d4b\u4efb\u52a1\u5c31\u4f1a\u63a5\u7ba1\u5bf9\u5bb9\u5668\u7684\u63a2\u6d4b\uff0c\u5bf9\u5bb9\u5668\u6b7b\u9501\u4f5c\u51fa\u5feb\u901f\u54cd\u5e94\u3002 \u5982\u679c\u542f\u52a8\u63a2\u6d4b\u4e00\u76f4\u6ca1\u6709\u6210\u529f\uff0c\u5bb9\u5668\u4f1a\u5728 300 \u79d2\u540e\u88ab\u6740\u6b7b\uff0c\u5e76\u4e14\u6839\u636e restartPolicy \u6765 \u6267\u884c\u8fdb\u4e00\u6b65\u5904\u7f6e\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/job-parameters.html","title":"\u4efb\u52a1\u53c2\u6570\u8bf4\u660e","text":"

                                  \u6839\u636e .spec.completions \u548c .spec.Parallelism \u7684\u8bbe\u7f6e\uff0c\u53ef\u4ee5\u5c06\u4efb\u52a1\uff08Job\uff09\u5212\u5206\u4e3a\u4ee5\u4e0b\u51e0\u79cd\u7c7b\u578b:

                                  Job \u7c7b\u578b \u8bf4\u660e \u975e\u5e76\u884c Job \u521b\u5efa\u4e00\u4e2a Pod \u76f4\u81f3\u5176 Job \u6210\u529f\u7ed3\u675f \u5177\u6709\u786e\u5b9a\u5b8c\u6210\u8ba1\u6570\u7684\u5e76\u884c Job \u5f53\u6210\u529f\u7684 Pod \u4e2a\u6570\u8fbe\u5230 .spec.completions \u65f6\uff0cJob \u88ab\u89c6\u4e3a\u5b8c\u6210 \u5e76\u884c Job \u521b\u5efa\u4e00\u4e2a\u6216\u591a\u4e2a Pod \u76f4\u81f3\u6709\u4e00\u4e2a\u6210\u529f\u7ed3\u675f

                                  \u53c2\u6570\u8bf4\u660e

                                  RestartPolicy \u521b\u5efa\u4e00\u4e2a Pod \u76f4\u81f3\u5176\u6210\u529f\u7ed3\u675f .spec.completions \u8868\u793a Job \u7ed3\u675f\u9700\u8981\u6210\u529f\u8fd0\u884c\u7684 Pod \u4e2a\u6570\uff0c\u9ed8\u8ba4\u4e3a 1 .spec.parallelism \u8868\u793a\u5e76\u884c\u8fd0\u884c\u7684 Pod \u7684\u4e2a\u6570\uff0c\u9ed8\u8ba4\u4e3a 1 spec.backoffLimit \u8868\u793a\u5931\u8d25 Pod \u7684\u91cd\u8bd5\u6700\u5927\u6b21\u6570\uff0c\u8d85\u8fc7\u8fd9\u4e2a\u6b21\u6570\u4e0d\u4f1a\u7ee7\u7eed\u91cd\u8bd5\u3002 .spec.activeDeadlineSeconds \u8868\u793a Pod \u8fd0\u884c\u65f6\u95f4\uff0c\u4e00\u65e6\u8fbe\u5230\u8fd9\u4e2a\u65f6\u95f4\uff0cJob \u5373\u5176\u6240\u6709\u7684 Pod \u90fd\u4f1a\u505c\u6b62\u3002\u4e14activeDeadlineSeconds \u4f18\u5148\u7ea7\u9ad8\u4e8e backoffLimit\uff0c\u5373\u5230\u8fbe activeDeadlineSeconds \u7684 Job \u4f1a\u5ffd\u7565backoffLimit \u7684\u8bbe\u7f6e\u3002

                                  \u4ee5\u4e0b\u662f\u4e00\u4e2a Job \u914d\u7f6e\u793a\u4f8b\uff0c\u4fdd\u5b58\u5728 myjob.yaml \u4e2d\uff0c\u5176\u8ba1\u7b97 \u03c0 \u5230 2000 \u4f4d\u5e76\u6253\u5370\u8f93\u51fa\u3002

                                  apiVersion: batch/v1\nkind: Job            # \u5f53\u524d\u8d44\u6e90\u7684\u7c7b\u578b\nmetadata:\n  name: myjob\nspec:\n  completions: 50        # Job\u7ed3\u675f\u9700\u8981\u8fd0\u884c50\u4e2aPod\uff0c\u8fd9\u4e2a\u793a\u4f8b\u4e2d\u5c31\u662f\u6253\u5370\u03c0 50\u6b21\n  parallelism: 5        # \u5e76\u884c5\u4e2aPod\n  backoffLimit: 5        # \u6700\u591a\u91cd\u8bd55\u6b21\n  template:\n    spec:\n      containers:\n      - name: pi\n        image: perl\n        command: [\"perl\",  \"-Mbignum=bpi\", \"-wle\", \"print bpi(2000)\"]\n      restartPolicy: Never #\u91cd\u542f\u7b56\u7565\n

                                  \u76f8\u5173\u547d\u4ee4

                                  kubectl apply -f myjob.yaml  #\u542f\u52a8 job\nkubectl get job #\u67e5\u770b\u8fd9\u4e2ajob\nkubectl logs myjob-1122dswzs \u67e5\u770bJob Pod \u7684\u65e5\u5fd7\n
                                  "},{"location":"end-user/kpanda/workloads/pod-config/lifecycle.html","title":"\u914d\u7f6e\u5bb9\u5668\u751f\u547d\u5468\u671f","text":"

                                  Pod \u9075\u5faa\u4e00\u4e2a\u9884\u5b9a\u4e49\u7684\u751f\u547d\u5468\u671f\uff0c\u8d77\u59cb\u4e8e Pending \u9636\u6bb5\uff0c\u5982\u679c Pod \u5185\u81f3\u5c11\u6709\u4e00\u4e2a\u5bb9\u5668\u6b63\u5e38\u542f\u52a8\uff0c\u5219\u8fdb\u5165 Running \u72b6\u6001\u3002\u5982\u679c Pod \u4e2d\u6709\u5bb9\u5668\u4ee5\u5931\u8d25\u72b6\u6001\u7ed3\u675f\uff0c\u5219\u72b6\u6001\u53d8\u4e3a Failed \u3002\u4ee5\u4e0b phase \u5b57\u6bb5\u503c\u8868\u660e\u4e86\u4e00\u4e2a Pod \u5904\u4e8e\u751f\u547d\u5468\u671f\u7684\u54ea\u4e2a\u9636\u6bb5\u3002

                                  \u503c \u63cf\u8ff0 Pending \uff08\u60ac\u51b3\uff09 Pod \u5df2\u88ab\u7cfb\u7edf\u63a5\u53d7\uff0c\u4f46\u6709\u4e00\u4e2a\u6216\u8005\u591a\u4e2a\u5bb9\u5668\u5c1a\u672a\u521b\u5efa\u4ea6\u672a\u8fd0\u884c\u3002\u8fd9\u4e2a\u9636\u6bb5\u5305\u62ec\u7b49\u5f85 Pod \u88ab\u8c03\u5ea6\u7684\u65f6\u95f4\u548c\u901a\u8fc7\u7f51\u7edc\u4e0b\u8f7d\u955c\u50cf\u7684\u65f6\u95f4\u3002 Running \uff08\u8fd0\u884c\u4e2d\uff09 Pod \u5df2\u7ecf\u7ed1\u5b9a\u5230\u4e86\u67d0\u4e2a\u8282\u70b9\uff0cPod \u4e2d\u7684\u6240\u6709\u5bb9\u5668\u90fd\u5df2\u88ab\u521b\u5efa\u3002\u81f3\u5c11\u6709\u4e00\u4e2a\u5bb9\u5668\u4ecd\u5728\u8fd0\u884c\uff0c\u6216\u8005\u6b63\u5904\u4e8e\u542f\u52a8\u6216\u91cd\u542f\u72b6\u6001\u3002 Succeeded \uff08\u6210\u529f\uff09 Pod \u4e2d\u7684\u6240\u6709\u5bb9\u5668\u90fd\u5df2\u6210\u529f\u7ec8\u6b62\uff0c\u5e76\u4e14\u4e0d\u4f1a\u518d\u91cd\u542f\u3002 Failed \uff08\u5931\u8d25\uff09 Pod \u4e2d\u7684\u6240\u6709\u5bb9\u5668\u90fd\u5df2\u7ec8\u6b62\uff0c\u5e76\u4e14\u81f3\u5c11\u6709\u4e00\u4e2a\u5bb9\u5668\u662f\u56e0\u4e3a\u5931\u8d25\u800c\u7ec8\u6b62\u3002\u4e5f\u5c31\u662f\u8bf4\uff0c\u5bb9\u5668\u4ee5\u975e 0 \u72b6\u6001\u9000\u51fa\u6216\u8005\u88ab\u7cfb\u7edf\u7ec8\u6b62\u3002 Unknown \uff08\u672a\u77e5\uff09 \u56e0\u4e3a\u67d0\u4e9b\u539f\u56e0\u65e0\u6cd5\u53d6\u5f97 Pod \u7684\u72b6\u6001\uff0c\u8fd9\u79cd\u60c5\u51b5\u901a\u5e38\u662f\u56e0\u4e3a\u4e0e Pod \u6240\u5728\u4e3b\u673a\u901a\u4fe1\u5931\u8d25\u6240\u81f4\u3002

                                  \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u4e2d\u521b\u5efa\u4e00\u4e2a\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u901a\u5e38\u4f7f\u7528\u955c\u50cf\u6765\u6307\u5b9a\u5bb9\u5668\u4e2d\u7684\u8fd0\u884c\u73af\u5883\u3002\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u5728\u6784\u5efa\u955c\u50cf\u65f6\uff0c\u53ef\u4ee5\u901a\u8fc7 Entrypoint \u548c CMD \u4e24\u4e2a\u5b57\u6bb5\u6765\u5b9a\u4e49\u5bb9\u5668\u8fd0\u884c\u65f6\u6267\u884c\u7684\u547d\u4ee4\u548c\u53c2\u6570\u3002\u5982\u679c\u9700\u8981\u66f4\u6539\u5bb9\u5668\u955c\u50cf\u542f\u52a8\u524d\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u7684\u547d\u4ee4\u548c\u53c2\u6570\uff0c\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e\u5bb9\u5668\u7684\u751f\u547d\u5468\u671f\u4e8b\u4ef6\u547d\u4ee4\u548c\u53c2\u6570\uff0c\u6765\u8986\u76d6\u955c\u50cf\u4e2d\u9ed8\u8ba4\u7684\u547d\u4ee4\u548c\u53c2\u6570\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/lifecycle.html#_2","title":"\u751f\u547d\u5468\u671f\u914d\u7f6e","text":"

                                  \u6839\u636e\u4e1a\u52a1\u9700\u8981\u5bf9\u5bb9\u5668\u7684\u542f\u52a8\u547d\u4ee4\u3001\u542f\u52a8\u540e\u547d\u4ee4\u3001\u505c\u6b62\u524d\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\u3002

                                  \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u542f\u52a8\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5bb9\u5668\u5c06\u6309\u7167\u542f\u52a8\u547d\u4ee4\u8fdb\u884c\u542f\u52a8\u3002 \u542f\u52a8\u540e\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5bb9\u5668\u542f\u52a8\u540e\u51fa\u53d1\u7684\u547d\u4ee4 \u505c\u6b62\u524d\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5bb9\u5668\u5728\u6536\u5230\u505c\u6b62\u547d\u4ee4\u540e\u6267\u884c\u7684\u547d\u4ee4\u3002\u786e\u4fdd\u5347\u7ea7\u6216\u5b9e\u4f8b\u5220\u9664\u65f6\u53ef\u63d0\u524d\u5c06\u5b9e\u4f8b\u4e2d\u8fd0\u884c\u7684\u4e1a\u52a1\u6392\u6c34\u3002"},{"location":"end-user/kpanda/workloads/pod-config/lifecycle.html#_3","title":"\u542f\u52a8\u547d\u4ee4","text":"

                                  \u6839\u636e\u4e0b\u8868\u5bf9\u542f\u52a8\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\u3002

                                  \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8fd0\u884c\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u53ef\u6267\u884c\u7684\u547d\u4ee4\uff0c\u591a\u4e2a\u547d\u4ee4\u4e4b\u95f4\u7528\u7a7a\u683c\u8fdb\u884c\u5206\u5272\uff0c\u5982\u547d\u4ee4\u672c\u8eab\u5e26\u7a7a\u683c\uff0c\u5219\u9700\u8981\u52a0\uff08\u201c\u201d\uff09\u3002\u3010\u542b\u4e49\u3011\u591a\u547d\u4ee4\u65f6\uff0c\u8fd0\u884c\u547d\u4ee4\u5efa\u8bae\u7528/bin/sh\u6216\u5176\u4ed6\u7684shell\uff0c\u5176\u4ed6\u5168\u90e8\u547d\u4ee4\u4f5c\u4e3a\u53c2\u6570\u6765\u4f20\u5165\u3002 /run/server \u8fd0\u884c\u53c2\u6570 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u63a7\u5236\u5bb9\u5668\u8fd0\u884c\u547d\u4ee4\u53c2\u6570\u3002 port=8080"},{"location":"end-user/kpanda/workloads/pod-config/lifecycle.html#_4","title":"\u542f\u52a8\u540e\u547d\u4ee4","text":"

                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u547d\u4ee4\u884c\u811a\u672c\u548c HTTP \u8bf7\u6c42\u4e24\u79cd\u5904\u7406\u7c7b\u578b\u5bf9\u542f\u52a8\u540e\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\u3002\u60a8\u53ef\u4ee5\u6839\u636e\u4e0b\u8868\u9009\u62e9\u9002\u5408\u60a8\u7684\u914d\u7f6e\u65b9\u5f0f\u3002

                                  \u547d\u4ee4\u884c\u811a\u672c\u914d\u7f6e

                                  \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8fd0\u884c\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u53ef\u6267\u884c\u7684\u547d\u4ee4\uff0c\u591a\u4e2a\u547d\u4ee4\u4e4b\u95f4\u7528\u7a7a\u683c\u8fdb\u884c\u5206\u5272\uff0c\u5982\u547d\u4ee4\u672c\u8eab\u5e26\u7a7a\u683c\uff0c\u5219\u9700\u8981\u52a0\uff08\u201c\u201d\uff09\u3002\u3010\u542b\u4e49\u3011\u591a\u547d\u4ee4\u65f6\uff0c\u8fd0\u884c\u547d\u4ee4\u5efa\u8bae\u7528/bin/sh\u6216\u5176\u4ed6\u7684shell\uff0c\u5176\u4ed6\u5168\u90e8\u547d\u4ee4\u4f5c\u4e3a\u53c2\u6570\u6765\u4f20\u5165\u3002 /run/server \u8fd0\u884c\u53c2\u6570 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u63a7\u5236\u5bb9\u5668\u8fd0\u884c\u547d\u4ee4\u53c2\u6570\u3002 port=8080"},{"location":"end-user/kpanda/workloads/pod-config/lifecycle.html#_5","title":"\u505c\u6b62\u524d\u547d\u4ee4","text":"

                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u547d\u4ee4\u884c\u811a\u672c\u548c HTTP \u8bf7\u6c42\u4e24\u79cd\u5904\u7406\u7c7b\u578b\u5bf9\u505c\u6b62\u524d\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\u3002\u60a8\u53ef\u4ee5\u6839\u636e\u4e0b\u8868\u9009\u62e9\u9002\u5408\u60a8\u7684\u914d\u7f6e\u65b9\u5f0f\u3002

                                  HTTP \u8bf7\u6c42\u914d\u7f6e

                                  \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c URL \u8def\u5f84 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8bf7\u6c42\u7684URL\u8def\u5f84\u3002\u3010\u542b\u4e49\u3011\u591a\u547d\u4ee4\u65f6\uff0c\u8fd0\u884c\u547d\u4ee4\u5efa\u8bae\u7528/bin/sh\u6216\u5176\u4ed6\u7684shell\uff0c\u5176\u4ed6\u5168\u90e8\u547d\u4ee4\u4f5c\u4e3a\u53c2\u6570\u6765\u4f20\u5165\u3002 /run/server \u7aef\u53e3 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8bf7\u6c42\u7684\u7aef\u53e3\u3002 port=8080 \u8282\u70b9\u5730\u5740 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8bf7\u6c42\u7684 IP \u5730\u5740\uff0c\u9ed8\u8ba4\u662f\u5bb9\u5668\u6240\u5728\u7684\u8282\u70b9 IP\u3002"},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html","title":"\u8c03\u5ea6\u7b56\u7565","text":"

                                  \u5728 Kubernetes \u96c6\u7fa4\u4e2d\uff0c\u8282\u70b9\u4e5f\u6709\u6807\u7b7e\u3002\u60a8\u53ef\u4ee5\u624b\u52a8\u6dfb\u52a0\u6807\u7b7e\u3002 Kubernetes \u4e5f\u4f1a\u4e3a\u96c6\u7fa4\u4e2d\u6240\u6709\u8282\u70b9\u6dfb\u52a0\u4e00\u4e9b\u6807\u51c6\u7684\u6807\u7b7e\u3002\u53c2\u89c1\u5e38\u7528\u7684\u6807\u7b7e\u3001\u6ce8\u89e3\u548c\u6c61\u70b9\u4ee5\u4e86\u89e3\u5e38\u89c1\u7684\u8282\u70b9\u6807\u7b7e\u3002\u901a\u8fc7\u4e3a\u8282\u70b9\u6dfb\u52a0\u6807\u7b7e\uff0c\u60a8\u53ef\u4ee5\u8ba9 Pod \u8c03\u5ea6\u5230\u7279\u5b9a\u8282\u70b9\u6216\u8282\u70b9\u7ec4\u4e0a\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u8fd9\u4e2a\u529f\u80fd\u6765\u786e\u4fdd\u7279\u5b9a\u7684 Pod \u53ea\u80fd\u8fd0\u884c\u5728\u5177\u6709\u4e00\u5b9a\u9694\u79bb\u6027\uff0c\u5b89\u5168\u6027\u6216\u76d1\u7ba1\u5c5e\u6027\u7684\u8282\u70b9\u4e0a\u3002

                                  nodeSelector \u662f\u8282\u70b9\u9009\u62e9\u7ea6\u675f\u7684\u6700\u7b80\u5355\u63a8\u8350\u5f62\u5f0f\u3002\u60a8\u53ef\u4ee5\u5c06 nodeSelector \u5b57\u6bb5\u6dfb\u52a0\u5230 Pod \u7684\u89c4\u7ea6\u4e2d\u8bbe\u7f6e\u60a8\u5e0c\u671b\u76ee\u6807\u8282\u70b9\u6240\u5177\u6709\u7684\u8282\u70b9\u6807\u7b7e\u3002Kubernetes \u53ea\u4f1a\u5c06 Pod \u8c03\u5ea6\u5230\u62e5\u6709\u6307\u5b9a\u6bcf\u4e2a\u6807\u7b7e\u7684\u8282\u70b9\u4e0a\u3002 nodeSelector \u63d0\u4f9b\u4e86\u4e00\u79cd\u6700\u7b80\u5355\u7684\u65b9\u6cd5\u6765\u5c06 Pod \u7ea6\u675f\u5230\u5177\u6709\u7279\u5b9a\u6807\u7b7e\u7684\u8282\u70b9\u4e0a\u3002\u4eb2\u548c\u6027\u548c\u53cd\u4eb2\u548c\u6027\u6269\u5c55\u4e86\u60a8\u53ef\u4ee5\u5b9a\u4e49\u7684\u7ea6\u675f\u7c7b\u578b\u3002\u4f7f\u7528\u4eb2\u548c\u6027\u4e0e\u53cd\u4eb2\u548c\u6027\u7684\u4e00\u4e9b\u597d\u5904\u6709\uff1a

                                  • \u4eb2\u548c\u6027\u3001\u53cd\u4eb2\u548c\u6027\u8bed\u8a00\u7684\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002 nodeSelector \u53ea\u80fd\u9009\u62e9\u62e5\u6709\u6240\u6709\u6307\u5b9a\u6807\u7b7e\u7684\u8282\u70b9\u3002\u4eb2\u548c\u6027\u3001\u53cd\u4eb2\u548c\u6027\u4e3a\u60a8\u63d0\u4f9b\u5bf9\u9009\u62e9\u903b\u8f91\u7684\u66f4\u5f3a\u63a7\u5236\u80fd\u529b\u3002

                                  • \u60a8\u53ef\u4ee5\u6807\u660e\u67d0\u89c4\u5219\u662f\u201c\u8f6f\u9700\u6c42\u201d\u6216\u8005\u201c\u504f\u597d\u201d\uff0c\u8fd9\u6837\u8c03\u5ea6\u5668\u5728\u65e0\u6cd5\u627e\u5230\u5339\u914d\u8282\u70b9\u65f6\uff0c\u4f1a\u5ffd\u7565\u4eb2\u548c\u6027/\u53cd\u4eb2\u548c\u6027\u89c4\u5219\uff0c\u786e\u4fdd Pod \u8c03\u5ea6\u6210\u529f\u3002

                                  • \u60a8\u53ef\u4ee5\u4f7f\u7528\u8282\u70b9\u4e0a\uff08\u6216\u5176\u4ed6\u62d3\u6251\u57df\u4e2d\uff09\u8fd0\u884c\u7684\u5176\u4ed6 Pod \u7684\u6807\u7b7e\u6765\u5b9e\u65bd\u8c03\u5ea6\u7ea6\u675f\uff0c\u800c\u4e0d\u662f\u53ea\u80fd\u4f7f\u7528\u8282\u70b9\u672c\u8eab\u7684\u6807\u7b7e\u3002\u8fd9\u4e2a\u80fd\u529b\u8ba9\u60a8\u80fd\u591f\u5b9a\u4e49\u89c4\u5219\u5141\u8bb8\u54ea\u4e9b Pod \u53ef\u4ee5\u88ab\u653e\u7f6e\u5728\u4e00\u8d77\u3002

                                  \u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e\u4eb2\u548c\uff08affinity\uff09\u4e0e\u53cd\u4eb2\u548c\uff08anti-affinity\uff09\u6765\u9009\u62e9 Pod \u8981\u90e8\u7f72\u7684\u8282\u70b9\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_2","title":"\u5bb9\u5fcd\u65f6\u95f4","text":"

                                  \u5f53\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u4f8b\u6240\u5728\u7684\u8282\u70b9\u4e0d\u53ef\u7528\u65f6\uff0c\u7cfb\u7edf\u5c06\u5b9e\u4f8b\u91cd\u65b0\u8c03\u5ea6\u5230\u5176\u5b83\u53ef\u7528\u8282\u70b9\u7684\u65f6\u95f4\u7a97\u3002\u9ed8\u8ba4\u4e3a 300 \u79d2\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#nodeaffinity","title":"\u8282\u70b9\u4eb2\u548c\u6027\uff08nodeAffinity\uff09","text":"

                                  \u8282\u70b9\u4eb2\u548c\u6027\u6982\u5ff5\u4e0a\u7c7b\u4f3c\u4e8e nodeSelector \uff0c \u5b83\u4f7f\u60a8\u53ef\u4ee5\u6839\u636e\u8282\u70b9\u4e0a\u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002 \u8282\u70b9\u4eb2\u548c\u6027\u6709\u4e24\u79cd\uff1a

                                  • \u5fc5\u987b\u6ee1\u8db3\uff1a\uff08 requiredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u53ea\u6709\u5728\u89c4\u5219\u88ab\u6ee1\u8db3\u7684\u65f6\u5019\u624d\u80fd\u6267\u884c\u8c03\u5ea6\u3002\u6b64\u529f\u80fd\u7c7b\u4f3c\u4e8e nodeSelector \uff0c \u4f46\u5176\u8bed\u6cd5\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002\u60a8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002

                                  • \u5c3d\u91cf\u6ee1\u8db3\uff1a\uff08 preferredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u4f1a\u5c1d\u8bd5\u5bfb\u627e\u6ee1\u8db3\u5bf9\u5e94\u89c4\u5219\u7684\u8282\u70b9\u3002\u5982\u679c\u627e\u4e0d\u5230\u5339\u914d\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u5668\u4ecd\u7136\u4f1a\u8c03\u5ea6\u8be5 Pod\u3002\u60a8\u8fd8\u53ef\u4e3a\u8f6f\u7ea6\u675f\u89c4\u5219\u8bbe\u5b9a\u6743\u91cd\uff0c\u5177\u4f53\u8c03\u5ea6\u65f6\uff0c\u82e5\u5b58\u5728\u591a\u4e2a\u7b26\u5408\u6761\u4ef6\u7684\u8282\u70b9\uff0c\u6743\u91cd\u6700\u5927\u7684\u8282\u70b9\u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002\u540c\u65f6\u60a8\u8fd8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_3","title":"\u6807\u7b7e\u540d","text":"

                                  \u5bf9\u5e94\u8282\u70b9\u7684\u6807\u7b7e\uff0c\u53ef\u4ee5\u4f7f\u7528\u9ed8\u8ba4\u7684\u6807\u7b7e\u4e5f\u53ef\u4ee5\u7528\u6237\u81ea\u5b9a\u4e49\u6807\u7b7e\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_4","title":"\u64cd\u4f5c\u7b26","text":"
                                  • In\uff1a\u6807\u7b7e\u503c\u9700\u8981\u5728 values \u7684\u5217\u8868\u4e2d
                                  • NotIn\uff1a\u6807\u7b7e\u7684\u503c\u4e0d\u5728\u67d0\u4e2a\u5217\u8868\u4e2d
                                  • Exists\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                                  • DoesNotExist\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u4e0d\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                                  • Gt\uff1a\u6807\u7b7e\u7684\u503c\u5927\u4e8e\u67d0\u4e2a\u503c\uff08\u5b57\u7b26\u4e32\u6bd4\u8f83\uff09
                                  • Lt\uff1a\u6807\u7b7e\u7684\u503c\u5c0f\u4e8e\u67d0\u4e2a\u503c\uff08\u5b57\u7b26\u4e32\u6bd4\u8f83\uff09
                                  "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_5","title":"\u6743\u91cd","text":"

                                  \u4ec5\u652f\u6301\u5728\u201c\u5c3d\u91cf\u6ee1\u8db3\u201d\u7b56\u7565\u4e2d\u6dfb\u52a0\uff0c\u53ef\u4ee5\u7406\u89e3\u4e3a\u8c03\u5ea6\u7684\u4f18\u5148\u7ea7\uff0c\u6743\u91cd\u5927\u7684\u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002\u53d6\u503c\u8303\u56f4\u662f 1 \u5230 100\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_6","title":"\u5de5\u4f5c\u8d1f\u8f7d\u4eb2\u548c\u6027","text":"

                                  \u4e0e\u8282\u70b9\u4eb2\u548c\u6027\u7c7b\u4f3c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u7684\u4eb2\u548c\u6027\u4e5f\u6709\u4e24\u79cd\u7c7b\u578b\uff1a

                                  • \u5fc5\u987b\u6ee1\u8db3\uff1a\uff08 requiredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u53ea\u6709\u5728\u89c4\u5219\u88ab\u6ee1\u8db3\u7684\u65f6\u5019\u624d\u80fd\u6267\u884c\u8c03\u5ea6\u3002\u6b64\u529f\u80fd\u7c7b\u4f3c\u4e8e nodeSelector \uff0c \u4f46\u5176\u8bed\u6cd5\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002\u60a8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002
                                  • \u5c3d\u91cf\u6ee1\u8db3\uff1a\uff08 preferredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u4f1a\u5c1d\u8bd5\u5bfb\u627e\u6ee1\u8db3\u5bf9\u5e94\u89c4\u5219\u7684\u8282\u70b9\u3002\u5982\u679c\u627e\u4e0d\u5230\u5339\u914d\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u5668\u4ecd\u7136\u4f1a\u8c03\u5ea6\u8be5 Pod\u3002\u60a8\u8fd8\u53ef\u4e3a\u8f6f\u7ea6\u675f\u89c4\u5219\u8bbe\u5b9a\u6743\u91cd\uff0c\u5177\u4f53\u8c03\u5ea6\u65f6\uff0c\u82e5\u5b58\u5728\u591a\u4e2a\u7b26\u5408\u6761\u4ef6\u7684\u8282\u70b9\uff0c\u6743\u91cd\u6700\u5927\u7684\u8282\u70b9\u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002\u540c\u65f6\u60a8\u8fd8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002

                                  \u5de5\u4f5c\u8d1f\u8f7d\u7684\u4eb2\u548c\u6027\u4e3b\u8981\u7528\u6765\u51b3\u5b9a\u5de5\u4f5c\u8d1f\u8f7d\u7684 Pod \u53ef\u4ee5\u548c\u54ea\u4e9b Pod\u90e8 \u7f72\u5728\u540c\u4e00\u62d3\u6251\u57df\u3002\u4f8b\u5982\uff0c\u5bf9\u4e8e\u76f8\u4e92\u901a\u4fe1\u7684\u670d\u52a1\uff0c\u53ef\u901a\u8fc7\u5e94\u7528\u4eb2\u548c\u6027\u8c03\u5ea6\uff0c\u5c06\u5176\u90e8\u7f72\u5230\u540c\u4e00\u62d3\u6251\u57df\uff08\u5982\u540c\u4e00\u53ef\u7528\u533a\uff09\u4e2d\uff0c\u51cf\u5c11\u5b83\u4eec\u4e4b\u95f4\u7684\u7f51\u7edc\u5ef6\u8fdf\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_7","title":"\u6807\u7b7e\u540d","text":"

                                  \u5bf9\u5e94\u8282\u70b9\u7684\u6807\u7b7e\uff0c\u53ef\u4ee5\u4f7f\u7528\u9ed8\u8ba4\u7684\u6807\u7b7e\u4e5f\u53ef\u4ee5\u7528\u6237\u81ea\u5b9a\u4e49\u6807\u7b7e\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_8","title":"\u547d\u540d\u7a7a\u95f4","text":"

                                  \u6307\u5b9a\u8c03\u5ea6\u7b56\u7565\u751f\u6548\u7684\u547d\u540d\u7a7a\u95f4\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_9","title":"\u64cd\u4f5c\u7b26","text":"
                                  • In\uff1a\u6807\u7b7e\u503c\u9700\u8981\u5728 values \u7684\u5217\u8868\u4e2d
                                  • NotIn\uff1a\u6807\u7b7e\u7684\u503c\u4e0d\u5728\u67d0\u4e2a\u5217\u8868\u4e2d
                                  • Exists\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                                  • DoesNotExist\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u4e0d\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                                  "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_10","title":"\u62d3\u6251\u57df","text":"

                                  \u6307\u5b9a\u8c03\u5ea6\u65f6\u7684\u5f71\u54cd\u8303\u56f4\u3002\u4f8b\u5982\uff0c\u5982\u679c\u6307\u5b9a\u4e3a kubernetes.io/Clustername \u8868\u793a\u4ee5 Node \u8282\u70b9\u4e3a\u533a\u5206\u8303\u56f4\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_11","title":"\u5de5\u4f5c\u8d1f\u8f7d\u53cd\u4eb2\u548c\u6027","text":"

                                  \u4e0e\u8282\u70b9\u4eb2\u548c\u6027\u7c7b\u4f3c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u7684\u53cd\u4eb2\u548c\u6027\u4e5f\u6709\u4e24\u79cd\u7c7b\u578b\uff1a

                                  • \u5fc5\u987b\u6ee1\u8db3\uff1a\uff08 requiredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u53ea\u6709\u5728\u89c4\u5219\u88ab\u6ee1\u8db3\u7684\u65f6\u5019\u624d\u80fd\u6267\u884c\u8c03\u5ea6\u3002\u6b64\u529f\u80fd\u7c7b\u4f3c\u4e8e nodeSelector \uff0c \u4f46\u5176\u8bed\u6cd5\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002\u60a8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002
                                  • \u5c3d\u91cf\u6ee1\u8db3\uff1a\uff08 preferredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u4f1a\u5c1d\u8bd5\u5bfb\u627e\u6ee1\u8db3\u5bf9\u5e94\u89c4\u5219\u7684\u8282\u70b9\u3002\u5982\u679c\u627e\u4e0d\u5230\u5339\u914d\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u5668\u4ecd\u7136\u4f1a\u8c03\u5ea6\u8be5 Pod\u3002\u60a8\u8fd8\u53ef\u4e3a\u8f6f\u7ea6\u675f\u89c4\u5219\u8bbe\u5b9a\u6743\u91cd\uff0c\u5177\u4f53\u8c03\u5ea6\u65f6\uff0c\u82e5\u5b58\u5728\u591a\u4e2a\u7b26\u5408\u6761\u4ef6\u7684\u8282\u70b9\uff0c\u6743\u91cd\u6700\u5927\u7684\u8282\u70b9\u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002\u540c\u65f6\u60a8\u8fd8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002

                                  \u5de5\u4f5c\u8d1f\u8f7d\u7684\u53cd\u4eb2\u548c\u6027\u4e3b\u8981\u7528\u6765\u51b3\u5b9a\u5de5\u4f5c\u8d1f\u8f7d\u7684 Pod \u4e0d\u53ef\u4ee5\u548c\u54ea\u4e9b Pod \u90e8\u7f72\u5728\u540c\u4e00\u62d3\u6251\u57df\u3002\u4f8b\u5982\uff0c\u5c06\u4e00\u4e2a\u8d1f\u8f7d\u7684\u76f8\u540c Pod \u5206\u6563\u90e8\u7f72\u5230\u4e0d\u540c\u7684\u62d3\u6251\u57df\uff08\u4f8b\u5982\u4e0d\u540c\u4e3b\u673a\uff09\u4e2d\uff0c\u63d0\u9ad8\u8d1f\u8f7d\u672c\u8eab\u7684\u7a33\u5b9a\u6027\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_12","title":"\u6807\u7b7e\u540d","text":"

                                  \u5bf9\u5e94\u8282\u70b9\u7684\u6807\u7b7e\uff0c\u53ef\u4ee5\u4f7f\u7528\u9ed8\u8ba4\u7684\u6807\u7b7e\u4e5f\u53ef\u4ee5\u7528\u6237\u81ea\u5b9a\u4e49\u6807\u7b7e\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_13","title":"\u547d\u540d\u7a7a\u95f4","text":"

                                  \u6307\u5b9a\u8c03\u5ea6\u7b56\u7565\u751f\u6548\u7684\u547d\u540d\u7a7a\u95f4\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_14","title":"\u64cd\u4f5c\u7b26","text":"
                                  • In\uff1a\u6807\u7b7e\u503c\u9700\u8981\u5728 values \u7684\u5217\u8868\u4e2d
                                  • NotIn\uff1a\u6807\u7b7e\u7684\u503c\u4e0d\u5728\u67d0\u4e2a\u5217\u8868\u4e2d
                                  • Exists\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                                  • DoesNotExist\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u4e0d\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                                  "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_15","title":"\u62d3\u6251\u57df","text":"

                                  \u6307\u5b9a\u8c03\u5ea6\u65f6\u7684\u5f71\u54cd\u8303\u56f4\u3002\u4f8b\u5982\uff0c\u5982\u679c\u6307\u5b9a\u4e3a kubernetes.io/Clustername \u8868\u793a\u4ee5 Node \u8282\u70b9\u4e3a\u533a\u5206\u8303\u56f4\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/workload-status.html","title":"\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001","text":"

                                  \u5de5\u4f5c\u8d1f\u8f7d\u662f\u8fd0\u884c\u5728 Kubernetes \u4e0a\u7684\u4e00\u4e2a\u5e94\u7528\u7a0b\u5e8f\uff0c\u5728 Kubernetes \u4e2d\uff0c\u65e0\u8bba\u60a8\u7684\u5e94\u7528\u7a0b\u5e8f\u662f\u7531\u5355\u4e2a\u540c\u4e00\u7ec4\u4ef6\u6216\u662f\u7531\u591a\u4e2a\u4e0d\u540c\u7684\u7ec4\u4ef6\u6784\u6210\uff0c\u90fd\u53ef\u4ee5\u4f7f\u7528\u4e00\u7ec4 Pod \u6765\u8fd0\u884c\u5b83\u3002Kubernetes \u63d0\u4f9b\u4e86\u4e94\u79cd\u5185\u7f6e\u7684\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u6765\u7ba1\u7406 Pod\uff1a

                                  • \u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d
                                  • \u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d
                                  • \u5b88\u62a4\u8fdb\u7a0b
                                  • \u4efb\u52a1
                                  • \u5b9a\u65f6\u4efb\u52a1

                                  \u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e\u81ea\u5b9a\u4e49\u8d44\u6e90 CRD \u6765\u5b9e\u73b0\u5bf9\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u7684\u6269\u5c55\u3002\u5728\u7b2c\u4e94\u4ee3\u5bb9\u5668\u7ba1\u7406\u4e2d\uff0c\u652f\u6301\u5bf9\u5de5\u4f5c\u8d1f\u8f7d\u8fdb\u884c\u521b\u5efa\u3001\u66f4\u65b0\u3001\u6269\u5bb9\u3001\u76d1\u63a7\u3001\u65e5\u5fd7\u3001\u5220\u9664\u3001\u7248\u672c\u7ba1\u7406\u7b49\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/workload-status.html#pod","title":"Pod \u72b6\u6001","text":"

                                  Pod \u662f Kuberneters \u4e2d\u521b\u5efa\u548c\u7ba1\u7406\u7684\u3001\u6700\u5c0f\u7684\u8ba1\u7b97\u5355\u5143\uff0c\u5373\u4e00\u7ec4\u5bb9\u5668\u7684\u96c6\u5408\u3002\u8fd9\u4e9b\u5bb9\u5668\u5171\u4eab\u5b58\u50a8\u3001\u7f51\u7edc\u4ee5\u53ca\u7ba1\u7406\u63a7\u5236\u5bb9\u5668\u8fd0\u884c\u65b9\u5f0f\u7684\u7b56\u7565\u3002 Pod \u901a\u5e38\u4e0d\u7531\u7528\u6237\u76f4\u63a5\u521b\u5efa\uff0c\u800c\u662f\u901a\u8fc7\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u6765\u521b\u5efa\u3002 Pod \u9075\u5faa\u4e00\u4e2a\u9884\u5b9a\u4e49\u7684\u751f\u547d\u5468\u671f\uff0c\u8d77\u59cb\u4e8e Pending \u9636\u6bb5\uff0c\u5982\u679c\u81f3\u5c11\u5176\u4e2d\u6709\u4e00\u4e2a\u4e3b\u8981\u5bb9\u5668\u6b63\u5e38\u542f\u52a8\uff0c\u5219\u8fdb\u5165 Running \uff0c\u4e4b\u540e\u53d6\u51b3\u4e8e Pod \u4e2d\u662f\u5426\u6709\u5bb9\u5668\u4ee5\u5931\u8d25\u72b6\u6001\u7ed3\u675f\u800c\u8fdb\u5165 Succeeded \u6216\u8005 Failed \u9636\u6bb5\u3002

                                  "},{"location":"end-user/kpanda/workloads/pod-config/workload-status.html#_2","title":"\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001","text":"

                                  \u7b2c\u4e94\u4ee3\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4f9d\u636e Pod \u7684\u72b6\u6001\u3001\u526f\u672c\u6570\u7b49\u56e0\u7d20\uff0c\u8bbe\u8ba1\u4e86\u4e00\u79cd\u5185\u7f6e\u7684\u5de5\u4f5c\u8d1f\u8f7d\u751f\u547d\u5468\u671f\u7684\u72b6\u6001\u96c6\uff0c\u4ee5\u8ba9\u7528\u6237\u80fd\u591f\u66f4\u52a0\u771f\u5b9e\u7684\u611f\u77e5\u5de5\u4f5c\u8d1f\u8f7d\u8fd0\u884c\u60c5\u51b5\u3002 \u7531\u4e8e\u4e0d\u540c\u7684\u5de5\u4f5c\u8d1f\u8f7d\u7c7b\u578b\uff08\u6bd4\u5982\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u548c\u4efb\u52a1\uff09\u5bf9 Pod \u7684\u7ba1\u7406\u673a\u5236\u4e0d\u4e00\u81f4\uff0c\u56e0\u6b64\uff0c\u4e0d\u540c\u7684\u5de5\u4f5c\u8d1f\u8f7d\u5728\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u4f1a\u5448\u73b0\u4e0d\u540c\u7684\u751f\u547d\u5468\u671f\u72b6\u6001\uff0c\u5177\u4f53\u5982\u4e0b\u8868\uff1a

                                  "},{"location":"end-user/kpanda/workloads/pod-config/workload-status.html#_3","title":"\u65e0\u72b6\u6001\u8d1f\u8f7d\u3001\u6709\u72b6\u6001\u8d1f\u8f7d\u3001\u5b88\u62a4\u8fdb\u7a0b\u72b6\u6001","text":"\u72b6\u6001 \u63cf\u8ff0 \u7b49\u5f85\u4e2d 1. \u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u6b63\u5728\u8fdb\u884c\u4e2d\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u30022. \u89e6\u53d1\u5347\u7ea7\u6216\u8005\u56de\u6eda\u52a8\u4f5c\u540e\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u30023. \u89e6\u53d1\u6682\u505c/\u6269\u7f29\u5bb9\u7b49\u64cd\u4f5c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u5728\u6b64\u72b6\u6001\u3002 \u8fd0\u884c\u4e2d \u8d1f\u8f7d\u4e0b\u7684\u6240\u6709\u5b9e\u4f8b\u90fd\u5728\u8fd0\u884c\u4e2d\u4e14\u526f\u672c\u6570\u4e0e\u7528\u6237\u9884\u5b9a\u4e49\u7684\u6570\u91cf\u4e00\u81f4\u65f6\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5220\u9664\u4e2d \u6267\u884c\u5220\u9664\u64cd\u4f5c\u65f6\uff0c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\uff0c\u76f4\u5230\u5220\u9664\u5b8c\u6210\u3002 \u5f02\u5e38 \u56e0\u4e3a\u67d0\u4e9b\u539f\u56e0\u65e0\u6cd5\u53d6\u5f97\u5de5\u4f5c\u8d1f\u8f7d\u7684\u72b6\u6001\u3002\u8fd9\u79cd\u60c5\u51b5\u901a\u5e38\u662f\u56e0\u4e3a\u4e0e Pod \u6240\u5728\u4e3b\u673a\u901a\u4fe1\u5931\u8d25\u3002 \u672a\u5c31\u7eea \u5bb9\u5668\u5904\u4e8e\u5f02\u5e38\uff0cpending \u72b6\u6001\u65f6\uff0c\u56e0\u672a\u77e5\u9519\u8bef\u5bfc\u81f4\u8d1f\u8f7d\u65e0\u6cd5\u542f\u52a8\u65f6\u663e\u793a\u6b64\u72b6\u6001"},{"location":"end-user/kpanda/workloads/pod-config/workload-status.html#_4","title":"\u4efb\u52a1\u72b6\u6001","text":"\u72b6\u6001 \u63cf\u8ff0 \u7b49\u5f85\u4e2d \u4efb\u52a1\u521b\u5efa\u6b63\u5728\u8fdb\u884c\u4e2d\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u6267\u884c\u4e2d \u4efb\u52a1\u6b63\u5728\u6267\u884c\u4e2d\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u6267\u884c\u5b8c\u6210 \u4efb\u52a1\u6267\u884c\u5b8c\u6210\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5220\u9664\u4e2d \u89e6\u53d1\u5220\u9664\u64cd\u4f5c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u5728\u6b64\u72b6\u6001\u3002 \u5f02\u5e38 \u56e0\u4e3a\u67d0\u4e9b\u539f\u56e0\u65e0\u6cd5\u53d6\u5f97 Pod \u7684\u72b6\u6001\u3002\u8fd9\u79cd\u60c5\u51b5\u901a\u5e38\u662f\u56e0\u4e3a\u4e0e Pod \u6240\u5728\u4e3b\u673a\u901a\u4fe1\u5931\u8d25\u3002"},{"location":"end-user/kpanda/workloads/pod-config/workload-status.html#_5","title":"\u5b9a\u65f6\u4efb\u52a1\u72b6\u6001","text":"\u72b6\u6001 \u63cf\u8ff0 \u7b49\u5f85\u4e2d \u5b9a\u65f6\u4efb\u52a1\u521b\u5efa\u6b63\u5728\u8fdb\u884c\u4e2d\uff0c\u5b9a\u65f6\u4efb\u52a1\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5df2\u542f\u52a8 \u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\u6210\u529f\u540e\uff0c\u6b63\u5e38\u8fd0\u884c\u6216\u5c06\u5df2\u6682\u505c\u7684\u4efb\u52a1\u542f\u52a8\u65f6\u5b9a\u65f6\u4efb\u52a1\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5df2\u505c\u6b62 \u6267\u884c\u505c\u6b62\u4efb\u52a1\u64cd\u4f5c\u65f6\uff0c\u5b9a\u65f6\u4efb\u52a1\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5220\u9664\u4e2d \u89e6\u53d1\u5220\u9664\u64cd\u4f5c\uff0c\u5b9a\u65f6\u4efb\u52a1\u5904\u5728\u6b64\u72b6\u6001\u3002

                                  \u5f53\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u5f02\u5e38\u6216\u672a\u5c31\u7eea\u72b6\u6001\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u5c06\u9f20\u6807\u79fb\u52a8\u5230\u8d1f\u8f7d\u7684\u72b6\u6001\u503c\u4e0a\uff0c\u7cfb\u7edf\u5c06\u901a\u8fc7\u63d0\u793a\u6846\u5c55\u793a\u66f4\u52a0\u8be6\u7ec6\u7684\u9519\u8bef\u4fe1\u606f\u3002\u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u67e5\u770b\u65e5\u5fd7\u6216\u4e8b\u4ef6\u6765\u83b7\u53d6\u5de5\u4f5c\u8d1f\u8f7d\u7684\u76f8\u5173\u8fd0\u884c\u4fe1\u606f\u3002

                                  "},{"location":"end-user/register/index.html","title":"\u7528\u6237\u6ce8\u518c","text":"

                                  \u65b0\u7528\u6237\u9996\u6b21\u4f7f\u7528 AI \u7b97\u529b\u5e73\u53f0\u9700\u8981\u8fdb\u884c\u6ce8\u518c\u3002

                                  "},{"location":"end-user/register/index.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                  • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                  • \u5df2\u5f00\u542f\u90ae\u7bb1\u6ce8\u518c\u529f\u80fd
                                  • \u6709\u4e00\u4e2a\u53ef\u7528\u7684\u90ae\u7bb1
                                  "},{"location":"end-user/register/index.html#_3","title":"\u90ae\u7bb1\u6ce8\u518c\u6b65\u9aa4","text":"
                                  1. \u6253\u5f00 AI \u7b97\u529b\u5e73\u53f0\u9996\u9875 https://ai.isuanova.com/\uff0c\u70b9\u51fb \u6ce8\u518c

                                  2. \u952e\u5165\u7528\u6237\u540d\u3001\u5bc6\u7801\u3001\u90ae\u7bb1\u540e\u70b9\u51fb \u6ce8\u518c

                                  3. \u7cfb\u7edf\u63d0\u793a\u53d1\u9001\u4e86\u4e00\u5c01\u90ae\u4ef6\u5230\u60a8\u7684\u90ae\u7bb1\u3002

                                  4. \u767b\u5f55\u81ea\u5df1\u7684\u90ae\u7bb1\uff0c\u627e\u5230\u90ae\u4ef6\uff0c\u70b9\u51fb\u94fe\u63a5\u3002

                                  5. \u606d\u559c\uff0c\u60a8\u6210\u529f\u8fdb\u5165\u4e86 AI \u7b97\u529b\u5e73\u53f0\uff0c\u73b0\u5728\u53ef\u4ee5\u5f00\u59cb\u60a8\u7684 AI \u4e4b\u65c5\u4e86\u3002

                                  "},{"location":"end-user/share/notebook.html","title":"\u4f7f\u7528 Notebook","text":"

                                  Notebook \u901a\u5e38\u6307\u7684\u662f Jupyter Notebook \u6216\u7c7b\u4f3c\u7684\u4ea4\u4e92\u5f0f\u8ba1\u7b97\u73af\u5883\u3002 \u8fd9\u662f\u4e00\u79cd\u975e\u5e38\u6d41\u884c\u7684\u5de5\u5177\uff0c\u5e7f\u6cdb\u7528\u4e8e\u6570\u636e\u79d1\u5b66\u3001\u673a\u5668\u5b66\u4e60\u548c\u6df1\u5ea6\u5b66\u4e60\u7b49\u9886\u57df\u3002 \u672c\u9875\u8bf4\u660e\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528 Notebook\u3002

                                  "},{"location":"end-user/share/notebook.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                  • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                  • \u7528\u6237\u5df2\u6210\u529f\u6ce8\u518c
                                  • \u7ba1\u7406\u5458\u4e3a\u7528\u6237\u5206\u914d\u4e86\u5de5\u4f5c\u7a7a\u95f4
                                  • \u5df2\u51c6\u5907\u597d\u6570\u636e\u96c6\uff08\u4ee3\u7801\u3001\u6570\u636e\u7b49\uff09
                                  "},{"location":"end-user/share/notebook.html#notebook_1","title":"\u521b\u5efa\u548c\u4f7f\u7528 Notebook \u5b9e\u4f8b","text":"
                                  1. \u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                                  2. \u5bfc\u822a\u81f3 AI Lab -> \u8fd0\u7ef4\u7ba1\u7406 -> \u961f\u5217\u7ba1\u7406 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae

                                  3. \u952e\u5165\u540d\u79f0\uff0c\u9009\u62e9\u96c6\u7fa4\u3001\u5de5\u4f5c\u7a7a\u95f4\u548c\u914d\u989d\u540e\uff0c\u70b9\u51fb \u786e\u5b9a

                                  4. \u4ee5 \u7528\u6237\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5bfc\u822a\u81f3 AI Lab -> Notebook \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae

                                  5. \u914d\u7f6e\u5404\u9879\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a

                                    \u57fa\u672c\u4fe1\u606f\u8d44\u6e90\u914d\u7f6e\u9ad8\u7ea7\u914d\u7f6e

                                    \u952e\u5165\u540d\u79f0\uff0c\u9009\u62e9\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\uff0c\u9009\u62e9\u521a\u521b\u5efa\u7684\u961f\u5217\uff0c\u70b9\u51fb \u4e00\u952e\u521d\u59cb\u5316

                                    \u9009\u62e9 Notebook \u7c7b\u578b\uff0c\u914d\u7f6e\u5185\u5b58\u3001CPU\uff0c\u5f00\u542f GPU\uff0c\u521b\u5efa\u548c\u914d\u7f6e PVC\uff1a

                                    \u5f00\u542f SSH \u5916\u7f51\u8bbf\u95ee\uff1a

                                  6. \u81ea\u52a8\u8df3\u8f6c\u5230 Notebook \u5b9e\u4f8b\u5217\u8868\uff0c\u70b9\u51fb\u5b9e\u4f8b\u540d\u79f0

                                  7. \u8fdb\u5165 Notebook \u5b9e\u4f8b\u8be6\u60c5\u9875\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u6253\u5f00 \u6309\u94ae

                                  8. \u8fdb\u5165\u4e86 Notebook \u5f00\u53d1\u73af\u5883\uff0c\u6bd4\u5982\u5728 /home/jovyan \u76ee\u5f55\u6302\u8f7d\u4e86\u6301\u4e45\u5377\uff0c\u53ef\u4ee5\u901a\u8fc7 git \u514b\u9686\u4ee3\u7801\uff0c\u901a\u8fc7 SSH \u8fde\u63a5\u540e\u4e0a\u4f20\u6570\u636e\u7b49\u3002

                                  "},{"location":"end-user/share/notebook.html#ssh-notebook","title":"\u901a\u8fc7 SSH \u8bbf\u95ee Notebook \u5b9e\u4f8b","text":"
                                  1. \u5728\u81ea\u5df1\u7684\u7535\u8111\u4e0a\u751f\u6210 SSH \u5bc6\u94a5\u5bf9

                                    \u5728\u81ea\u5df1\u7535\u8111\u4e0a\u6253\u5f00\u547d\u4ee4\u884c\uff0c\u6bd4\u5982\u5728 Windows \u4e0a\u6253\u5f00 git bash\uff0c\u8f93\u5165 ssh-keygen.exe -t rsa\uff0c\u7136\u540e\u4e00\u8def\u56de\u8f66\u3002

                                  2. \u901a\u8fc7 cat ~/.ssh/id_rsa.pub \u7b49\u547d\u4ee4\u67e5\u770b\u5e76\u590d\u5236\u516c\u94a5

                                  3. \u4ee5\u7528\u6237\u8eab\u4efd\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5728\u53f3\u4e0a\u89d2\u70b9\u51fb \u4e2a\u4eba\u4e2d\u5fc3 -> SSH \u516c\u94a5 -> \u5bfc\u5165 SSH \u516c\u94a5

                                  4. \u8fdb\u5165 Notebook \u5b9e\u4f8b\u7684\u8be6\u60c5\u9875\uff0c\u590d\u5236 SSH \u7684\u94fe\u63a5

                                  5. \u5728\u5ba2\u6237\u7aef\u4f7f\u7528 SSH \u8bbf\u95ee Notebook \u5b9e\u4f8b

                                  \u4e0b\u4e00\u6b65\uff1a\u521b\u5efa\u8bad\u7ec3\u4efb\u52a1

                                  "},{"location":"end-user/share/workload.html","title":"\u521b\u5efa AI \u8d1f\u8f7d\u4f7f\u7528 GPU \u8d44\u6e90","text":"

                                  \u7ba1\u7406\u5458\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u8d44\u6e90\u914d\u989d\u540e\uff0c\u7528\u6237\u5c31\u53ef\u4ee5\u521b\u5efa AI \u5de5\u4f5c\u8d1f\u8f7d\u6765\u4f7f\u7528 GPU \u7b97\u529b\u8d44\u6e90\u3002

                                  "},{"location":"end-user/share/workload.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                  • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                  • \u7528\u6237\u5df2\u6210\u529f\u6ce8\u518c
                                  • \u7ba1\u7406\u5458\u4e3a\u7528\u6237\u5206\u914d\u4e86\u5de5\u4f5c\u7a7a\u95f4
                                  • \u7ba1\u7406\u5458\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u8bbe\u7f6e\u4e86\u8d44\u6e90\u914d\u989d
                                  • \u7ba1\u7406\u5458\u5df2\u7ecf\u4e3a\u7528\u6237\u5206\u914d\u4e86\u4e00\u4e2a\u96c6\u7fa4
                                  "},{"location":"end-user/share/workload.html#ai","title":"\u521b\u5efa AI \u8d1f\u8f7d\u6b65\u9aa4","text":"
                                  1. \u4ee5\u7528\u6237\u8eab\u4efd\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                                  2. \u5bfc\u822a\u81f3 \u5bb9\u5668\u7ba1\u7406 \uff0c\u9009\u62e9\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u70b9\u51fb \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u8d1f\u8f7d \uff0c \u70b9\u51fb\u53f3\u4fa7\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae

                                  3. \u914d\u7f6e\u5404\u9879\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a

                                    \u57fa\u672c\u4fe1\u606f\u5bb9\u5668\u914d\u7f6e\u5176\u4ed6

                                    \u9009\u62e9\u81ea\u5df1\u7684\u547d\u540d\u7a7a\u95f4\u3002

                                    \u8bbe\u7f6e\u955c\u50cf\uff0c\u914d\u7f6e CPU\u3001\u5185\u5b58\u3001GPU \u7b49\u8d44\u6e90\uff0c\u8bbe\u7f6e\u542f\u52a8\u547d\u4ee4\u3002

                                    \u670d\u52a1\u914d\u7f6e\u548c\u9ad8\u7ea7\u914d\u7f6e\u53ef\u4ee5\u4f7f\u7528\u9ed8\u8ba4\u914d\u7f6e\u3002

                                  4. \u81ea\u52a8\u8fd4\u56de\u65e0\u72b6\u6001\u8d1f\u8f7d\u5217\u8868\uff0c\u70b9\u51fb\u8d1f\u8f7d\u540d\u79f0

                                  5. \u8fdb\u5165\u8be6\u60c5\u9875\uff0c\u53ef\u4ee5\u770b\u5230 GPU \u914d\u989d

                                  6. \u4f60\u8fd8\u53ef\u4ee5\u8fdb\u5165\u63a7\u5236\u53f0\uff0c\u8fd0\u884c mx-smi \u547d\u4ee4\u67e5\u770b GPU \u8d44\u6e90

                                  \u4e0b\u4e00\u6b65\uff1a\u4f7f\u7528 Notebook

                                  "},{"location":"openapi/index.html","title":"OpenAPI \u6587\u6863","text":"

                                  \u8fd9\u662f\u9762\u5411\u5f00\u53d1\u8005\u7684\u4e00\u4e9b OpenAPI \u6587\u6863\u3002

                                  • \u4e91\u4e3b\u673a OpenAPI \u6587\u6863
                                  • AI Lab OpenAPI \u6587\u6863
                                  • \u5bb9\u5668\u7ba1\u7406 OpenAPI \u6587\u6863
                                  • \u53ef\u89c2\u6d4b\u6027 OpenAPI \u6587\u6863
                                  • \u5168\u5c40\u7ba1\u7406 OpenAPI \u6587\u6863
                                  "},{"location":"openapi/index.html#openapi_1","title":"\u83b7\u53d6 OpenAPI \u8bbf\u95ee\u5bc6\u94a5","text":"

                                  \u8bbf\u95ee\u5bc6\u94a5\uff08Access Key\uff09\u53ef\u7528\u4e8e\u8bbf\u95ee OpenAPI \u548c\u6301\u7eed\u53d1\u5e03\uff0c\u7528\u6237\u53ef\u5728\u4e2a\u4eba\u4e2d\u5fc3\u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u83b7\u53d6\u5bc6\u94a5\u5e76\u8bbf\u95ee API\u3002

                                  \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5728\u53f3\u4e0a\u89d2\u7684\u4e0b\u62c9\u83dc\u5355\u4e2d\u627e\u5230 \u4e2a\u4eba\u4e2d\u5fc3 \uff0c\u53ef\u4ee5\u5728 \u8bbf\u95ee\u5bc6\u94a5 \u9875\u9762\u7ba1\u7406\u8d26\u53f7\u7684\u8bbf\u95ee\u5bc6\u94a5\u3002

                                  Info

                                  \u8bbf\u95ee\u5bc6\u94a5\u4fe1\u606f\u4ec5\u663e\u793a\u4e00\u6b21\u3002\u5982\u679c\u60a8\u5fd8\u8bb0\u4e86\u8bbf\u95ee\u5bc6\u94a5\u4fe1\u606f\uff0c\u60a8\u9700\u8981\u91cd\u65b0\u521b\u5efa\u65b0\u7684\u8bbf\u95ee\u5bc6\u94a5\u3002

                                  "},{"location":"openapi/index.html#api","title":"\u4f7f\u7528\u5bc6\u94a5\u8bbf\u95ee API","text":"

                                  \u5728\u8bbf\u95ee\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0openAPI \u65f6\uff0c\u5728\u8bf7\u6c42\u4e2d\u52a0\u4e0a\u8bf7\u6c42\u5934 Authorization:Bearer ${token} \u4ee5\u6807\u8bc6\u8bbf\u95ee\u8005\u7684\u8eab\u4efd\uff0c \u5176\u4e2d ${token} \u662f\u4e0a\u4e00\u6b65\u4e2d\u83b7\u53d6\u5230\u7684\u5bc6\u94a5\u3002

                                  \u8bf7\u6c42\u793a\u4f8b

                                  curl -X GET -H 'Authorization:Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRKVjlBTHRBLXZ4MmtQUC1TQnVGS0dCSWc1cnBfdkxiQVVqM2U3RVByWnMiLCJ0eXAiOiJKV1QifQ.eyJleHAiOjE2NjE0MTU5NjksImlhdCI6MTY2MDgxMTE2OSwiaXNzIjoiZ2hpcHBvLmlvIiwic3ViIjoiZjdjOGIxZjUtMTc2MS00NjYwLTg2MWQtOWI3MmI0MzJmNGViIiwicHJlZmVycmVkX3VzZXJuYW1lIjoiYWRtaW4iLCJncm91cHMiOltdfQ.RsUcrAYkQQ7C6BxMOrdD3qbBRUt0VVxynIGeq4wyIgye6R8Ma4cjxG5CbU1WyiHKpvIKJDJbeFQHro2euQyVde3ygA672ozkwLTnx3Tu-_mB1BubvWCBsDdUjIhCQfT39rk6EQozMjb-1X1sbLwzkfzKMls-oxkjagI_RFrYlTVPwT3Oaw-qOyulRSw7Dxd7jb0vINPq84vmlQIsI3UuTZSNO5BCgHpubcWwBss-Aon_DmYA-Et_-QtmPBA3k8E2hzDSzc7eqK0I68P25r9rwQ3DeKwD1dbRyndqWORRnz8TLEXSiCFXdZT2oiMrcJtO188Ph4eLGut1-4PzKhwgrQ' https://demo-dev.daocloud.io/apis/ghippo.io/v1alpha1/users?page=1&pageSize=10 -k\n

                                  \u8bf7\u6c42\u7ed3\u679c

                                  {\n    \"items\": [\n        {\n            \"id\": \"a7cfd010-ebbe-4601-987f-d098d9ef766e\",\n            \"name\": \"a\",\n            \"email\": \"\",\n            \"description\": \"\",\n            \"firstname\": \"\",\n            \"lastname\": \"\",\n            \"source\": \"locale\",\n            \"enabled\": true,\n            \"createdAt\": \"1660632794800\",\n            \"updatedAt\": \"0\",\n            \"lastLoginAt\": \"\"\n        }\n    ],\n    \"pagination\": {\n        \"page\": 1,\n        \"pageSize\": 10,\n        \"total\": 1\n    }\n}\n
                                  "},{"location":"openapi/baize/index.html","title":"AI Lab OpenAPI \u6587\u6863","text":""},{"location":"openapi/ghippo/index.html","title":"\u5168\u5c40\u7ba1\u7406 OpenAPI \u6587\u6863","text":""},{"location":"openapi/insight/index.html","title":"\u53ef\u89c2\u6d4b\u6027 OpenAPI \u6587\u6863","text":""},{"location":"openapi/kpanda/index.html","title":"\u5bb9\u5668\u7ba1\u7406 OpenAPI \u6587\u6863","text":""},{"location":"openapi/virtnest/index.html","title":"\u4e91\u4e3b\u673a OpenAPI \u6587\u6863","text":""},{"location":"stylesheets/tags.html","title":"Tags","text":"

                                  Following is a list of relevant tags:

                                  [TAGS]

                                  "},{"location":"en/index.html","title":"Suanova Website for AI Platform","text":"

                                  This is the website for the Suanova AI Platform.

                                  • User Manual: Develop AI algorithms, build training and inference jobs using cloud hosts in a containerized environment.
                                  • Administrator Manual: Ensure smooth and efficient operation of the platform for containerized end users.
                                  • Developer Manual: A compilation of OpenAPI manuals for five modules.

                                  "},{"location":"en/admin/index.html","title":"Suanova AI Platform - Administrator","text":"

                                  This is the operation and maintenance documentation for the Suanova AI Platform aimed at administrators.

                                  • Cloud Host

                                    A cloud host is a virtual machine deployed in the cloud.

                                    • Manage Cloud Hosts
                                    • Cloud Host vGPU
                                    • Cloud Host Templates
                                    • Import Cloud Hosts from VMWare
                                  • Container Management

                                    Manage K8s clusters, nodes, applications, resources, and permissions.

                                    • Create Cluster
                                    • Add Worker Nodes
                                    • Manage Helm Apps
                                    • HPA Horizontal Scaling
                                  • AI Lab

                                    Manage AI resources and queues.

                                    • Manage Resources
                                    • Manage Queues
                                    • Best Practices for AI Training and Deployment
                                    • AI Lab Troubleshooting
                                  • Insight

                                    Understand Insight resources, configuration, and troubleshooting.

                                    • Resource Planning Deployment
                                    • Install and Upgrade
                                    • Compatibility Test
                                    • Frequently Asked Questions
                                  • Global Management

                                    Control access permissions for users, user groups, workspaces, resources, etc.

                                    • Bind Workspace
                                    • Allocate Resources to Workspaces
                                    • Audit Logs
                                    • Platform Settings

                                  "},{"location":"en/admin/baize/best-practice/add-scheduler.html","title":"Add Job Scheduler","text":"

                                  AI Lab provides a job scheduler to help you better manage jobs. In addition to the basic scheduler, it also supports custom schedulers.

                                  "},{"location":"en/admin/baize/best-practice/add-scheduler.html#introduction-to-job-scheduler","title":"Introduction to Job Scheduler","text":"

                                  In Kubernetes, the job scheduler is responsible for deciding which node to assign a Pod to. It considers various factors such as resource requirements, hardware/software constraints, affinity/anti-affinity rules, and data locality.

                                  The default scheduler is a core component in a Kubernetes cluster that decides which node a Pod should run on. Let's delve into its working principles, features, and configuration methods.

                                  "},{"location":"en/admin/baize/best-practice/add-scheduler.html#scheduler-workflow","title":"Scheduler Workflow","text":"

                                  The workflow of the default scheduler can be divided into two main phases: filtering and scoring.

                                  "},{"location":"en/admin/baize/best-practice/add-scheduler.html#filtering-phase","title":"Filtering Phase","text":"

                                  The scheduler traverses all nodes and excludes those that do not meet the Pod's requirements, considering factors such as:

                                  • Resource requirements
                                  • Node selectors
                                  • Node affinity
                                  • Taints and tolerations

                                  These parameters can be set through advanced configurations when creating a job.

                                  "},{"location":"en/admin/baize/best-practice/add-scheduler.html#scoring-phase","title":"Scoring Phase","text":"

                                  The scheduler scores the nodes that passed the filtering phase and selects the highest-scoring node to run the Pod. Factors considered include:

                                  • Resource utilization
                                  • Pod affinity/anti-affinity
                                  • Node affinity
                                  "},{"location":"en/admin/baize/best-practice/add-scheduler.html#scheduler-plugins","title":"Scheduler Plugins","text":"

                                  In addition to basic job scheduling capabilities, we also support the use of Scheduler Plugins: Kubernetes SIG Scheduling, which maintains a set of scheduler plugins including Coscheduling (Gang Scheduling) and other features.

                                  "},{"location":"en/admin/baize/best-practice/add-scheduler.html#deploy-scheduler-plugins","title":"Deploy Scheduler Plugins","text":"

                                  To deploy a secondary scheduler plugin in a worker cluster, refer to Deploying Secondary Scheduler Plugin.

                                  "},{"location":"en/admin/baize/best-practice/add-scheduler.html#enable-scheduler-plugins-in-ai-lab","title":"Enable Scheduler Plugins in AI Lab","text":"

                                  Danger

                                  Improper operations when adding scheduler plugins may affect the stability of the entire cluster. It is recommended to test in a test environment or contact our technical support team.

                                  Note that if you wish to use more scheduler plugins in training jobs, you need to manually install them successfully in the worker cluster first. Then, when deploying the baize-agent in the cluster, add the proper scheduler plugin configuration.

                                  Through the container management UI provided by Helm Apps , you can easily deploy scheduler plugins in the cluster.

                                  Then, click Install in the top right corner. (If the baize-agent has already been deployed, you can update it in the Helm App list.) Add the scheduler.

                                  Note the parameter hierarchy of the scheduler. After adding, click OK .

                                  Note: Do not omit this configuration when updating the baize-agent in the future.

                                  "},{"location":"en/admin/baize/best-practice/add-scheduler.html#specify-scheduler-when-creating-a-job","title":"Specify Scheduler When Creating a Job","text":"

                                  Once you have successfully deployed the proper scheduler plugin in the cluster and correctly added the proper scheduler configuration in the baize-agent, you can specify the scheduler when creating a job.

                                  If everything is set up correctly, you will see the scheduler plugin you deployed in the scheduler dropdown menu.

                                  This concludes the instructions for configuring and using the scheduler options in AI Lab.

                                  "},{"location":"en/admin/baize/best-practice/change-notebook-image.html","title":"Update Built-in Notebook Images","text":"

                                  In the Notebook, multiple available base images are provided by default for developers to choose from. In most cases, this will meet the developers' needs.

                                  DaoCloud provides a default Notebook image that contains all necessary development tools and resources.

                                  baize/baize-notebook\n

                                  This Notebook includes basic development tools. Taking baize-notebook:v0.5.0 (May 30, 2024) as an example, the relevant dependencies and versions are as follows:

                                  Dependency Version Description Ubuntu 22.04.3 Default OS Python 3.11.6 Default Python version pip 23.3.1 conda(mamba) 23.3.1 jupyterlab 3.6.6 JupyterLab image, providing a complete Notebook experience codeserver v4.89.1 Mainstream Code development tool for a familiar experience *baizectl v0.5.0 DaoCloud built-in CLI task management tool *SSH - Supports local SSH direct access to the Notebook container *kubectl v1.27 Kubernetes CLI for managing container resources within Notebook

                                  Note

                                  With each version iteration, AI platform will proactively maintain and update.

                                  However, sometimes users may need custom images. This page explains how to update images and add them to the Notebook creation interface for selection.

                                  "},{"location":"en/admin/baize/best-practice/change-notebook-image.html#build-custom-images-for-reference-only","title":"Build Custom Images (For Reference Only)","text":"

                                  Note

                                  Building a new image requires using baize-notebook as the base image to ensure the Notebook runs properly.

                                  When building a custom image, it is recommended to first understand the Dockerfile of the baize-notebook image to better understand how to build a custom image.

                                  "},{"location":"en/admin/baize/best-practice/change-notebook-image.html#dockerfile-for-baize-notebook","title":"Dockerfile for baize-notebook","text":"
                                  ARG BASE_IMG=docker.m.daocloud.io/kubeflownotebookswg/jupyter:v1.8.0\n\nFROM $BASE_IMG\n\nUSER root\n\n# install - useful linux packages\nRUN export DEBIAN_FRONTEND=noninteractive \\\n && apt-get -yq update \\\n && apt-get -yq install --no-install-recommends \\\n    openssh-server git git-lfs bash-completion \\\n && apt-get clean \\\n && rm -rf /var/lib/apt/lists/*\n\n# remove default s6 jupyterlab run script\nRUN rm -rf /etc/services.d/jupyterlab\n\n# install - useful jupyter plugins\nRUN mamba install -n base -y jupyterlab-language-pack-zh-cn \\\n  && mamba clean --all -y\n\nARG CODESERVER_VERSION=4.89.1\nARG TARGETARCH\n\nRUN curl -fsSL \"https://github.com/coder/code-server/releases/download/v$CODESERVER_VERSION/code-server_${CODESERVER_VERSION}_$TARGETARCH.deb\" -o /tmp/code-server.deb \\\n  && dpkg -i /tmp/code-server.deb \\\n  && rm -f /tmp/code-server.deb\n\nARG CODESERVER_PYTHON_VERSION=2024.4.1\nARG CODESERVER_JUPYTER_VERSION=2024.3.1\nARG CODESERVER_LANGUAGE_PACK_ZH_CN=1.89.0\nARG CODESERVER_YAML=1.14.0\nARG CODESERVER_DOTENV=1.0.1\nARG CODESERVER_EDITORCONFIG=0.16.6\nARG CODESERVER_TOML=0.19.1\nARG CODESERVER_GITLENS=15.0.4\n\n# configure for code-server extensions\n# # https://github.com/kubeflow/kubeflow/blob/709254159986d2cc99e675d0fad5a128ddeb0917/components/example-notebook-servers/codeserver-python/Dockerfile\n# # and\n# # https://github.com/kubeflow/kubeflow/blob/709254159986d2cc99e675d0fad5a128ddeb0917/components/example-notebook-servers/codeserver/Dockerfile\nRUN code-server --list-extensions --show-versions \\\n  && code-server --list-extensions --show-versions \\\n  && code-server \\\n    --install-extension MS-CEINTL.vscode-language-pack-zh-hans@$CODESERVER_LANGUAGE_PACK_ZH_CN \\\n    --install-extension ms-python.python@$CODESERVER_PYTHON_VERSION \\\n    --install-extension ms-toolsai.jupyter@$CODESERVER_JUPYTER_VERSION \\\n    --install-extension redhat.vscode-yaml@$CODESERVER_YAML \\\n    --install-extension mikestead.dotenv@$CODESERVER_DOTENV \\\n    --install-extension EditorConfig.EditorConfig@$CODESERVER_EDITORCONFIG \\\n    --install-extension tamasfe.even-better-toml@$CODESERVER_TOML \\\n    --install-extension eamodio.gitlens@$CODESERVER_GITLENS \\\n    --install-extension catppuccin.catppuccin-vsc-pack \\\n    --force \\\n  && code-server --list-extensions --show-versions\n\n# configure for code-server\nRUN mkdir -p /home/${NB_USER}/.local/share/code-server/User \\\n  && chown -R ${NB_USER}:users /home/${NB_USER} \\\n  && cat <<EOF > /home/${NB_USER}/.local/share/code-server/User/settings.json\n{\n  \"gitlens.showWelcomeOnInstall\": false,\n  \"workbench.colorTheme\": \"Catppuccin Mocha\",\n}\nEOF\n\nRUN mkdir -p /tmp_home/${NB_USER}/.local/share \\\n  && mv /home/${NB_USER}/.local/share/code-server /tmp_home/${NB_USER}/.local/share\n\n# set ssh configuration\nRUN mkdir -p /run/sshd \\\n && chown -R ${NB_USER}:users /etc/ssh \\\n && chown -R ${NB_USER}:users /run/sshd \\\n && sed -i \"/#\\?Port/s/^.*$/Port 2222/g\" /etc/ssh/sshd_config \\\n && sed -i \"/#\\?PasswordAuthentication/s/^.*$/PasswordAuthentication no/g\" /etc/ssh/sshd_config \\\n && sed -i \"/#\\?PubkeyAuthentication/s/^.*$/PubkeyAuthentication yes/g\" /etc/ssh/sshd_config \\\n && rclone_version=v1.65.0 && \\\n       arch=$(uname -m | sed -E 's/x86_64/amd64/g;s/aarch64/arm64/g') && \\\n       filename=rclone-${rclone_version}-linux-${arch} && \\\n       curl -fsSL https://github.com/rclone/rclone/releases/download/${rclone_version}/${filename}.zip -o ${filename}.zip && \\\n       unzip ${filename}.zip && mv ${filename}/rclone /usr/local/bin && rm -rf ${filename} ${filename}.zip\n\n# Init mamba\nRUN mamba init --system\n\n# init baize-base environment for essential python packages\nRUN mamba create -n baize-base -y python \\\n  && /opt/conda/envs/baize-base/bin/pip install tensorboard \\\n  && mamba clean --all -y \\\n  && ln -s /opt/conda/envs/baize-base/bin/tensorboard /usr/local/bin/tensorboard\n\n# prepare baize-runtime-env directory\nRUN mkdir -p /opt/baize-runtime-env \\\n  && chown -R ${NB_USER}:users /opt/baize-runtime-env\n\nARG APP\nARG PROD_NAME\nARG TARGETOS\n\nCOPY out/$TARGETOS/$TARGETARCH/data-loader /usr/local/bin/\nCOPY out/$TARGETOS/$TARGETARCH/baizectl /usr/local/bin/\n\nRUN chmod +x /usr/local/bin/baizectl /usr/local/bin/data-loader && \\\n    echo \"source /etc/bash_completion\" >> /opt/conda/etc/profile.d/conda.sh && \\\n    echo \"source <(baizectl completion bash)\" >> /opt/conda/etc/profile.d/conda.sh && \\\n    echo \"source <(kubectl completion bash)\" >> /opt/conda/etc/profile.d/conda.sh && \\\n    echo '[ -f /run/baize-env ] && export $(cat /run/baize-env | xargs)' >> /opt/conda/etc/profile.d/conda.sh && \\\n    echo 'alias conda=\"mamba\"' >> /opt/conda/etc/profile.d/conda.sh\n\nUSER ${NB_UID}\n
                                  "},{"location":"en/admin/baize/best-practice/change-notebook-image.html#build-your-image","title":"Build Your Image","text":"
                                  ARG BASE_IMG=release.daocloud.io/baize/baize-notebook:v0.5.0\n\nFROM $BASE_IMG\nUSER root\n\n# Do Customization\nRUN mamba install -n baize-base -y pytorch torchvision torchaudio cpuonly -c pytorch \\\n && mamba install -n baize-base -y tensorflow \\\n && mamba clean --all -y\n\nUSER ${NB_UID}\n
                                  "},{"location":"en/admin/baize/best-practice/change-notebook-image.html#add-to-the-notebook-image-list-helm","title":"Add to the Notebook Image List (Helm)","text":"

                                  Warning

                                  Note that this must be done by the platform administrator. Be cautious with changes.

                                  Currently, the image selector needs to be modified by updating the Helm parameters of baize. The specific steps are as follows:

                                  In the Helm Apps list of the kpanda-global-cluster global management cluster, find baize, enter the update page, and modify the Notebook image in the YAML parameters:

                                  Note the parameter modification path global.config.notebook_images:

                                  ...\nglobal:\n  ...\n  config:\n    notebook_images:\n      ...\n      names: release.daocloud.io/baize/baize-notebook:v0.5.0\n      # Add your image information here\n

                                  After the update is completed and the Helm App restarts successfully, you can see the new image in the Notebook creation interface image selection.

                                  "},{"location":"en/admin/baize/best-practice/checkpoint.html","title":"Checkpoint Mechanism and Usage","text":"

                                  In practical deep learning scenarios, model training typically lasts for a period, which places higher demands on the stability and efficiency of distributed training tasks. Moreover, during actual training, unexpected interruptions can cause the loss of the model state, requiring the training process to start over. This not only wastes time and resources, which is particularly evident in LLM training, but also affects the training effectiveness of the model.

                                  The ability to save the model state during training, so that it can be restored in case of an interruption, becomes crucial. Checkpointing is the mainstream solution to this problem. This article will introduce the basic concepts of the Checkpoint mechanism and its usage in PyTorch and TensorFlow.

                                  "},{"location":"en/admin/baize/best-practice/checkpoint.html#what-is-a-checkpoint","title":"What is a Checkpoint?","text":"

                                  A checkpoint is a mechanism for saving the state of a model during training. By periodically saving checkpoints, you can restore the model in the following situations:

                                  • Training interruption (e.g., system crash or manual interruption)
                                  • Need to evaluate at a certain stage of training
                                  • Reuse the model in different experiments
                                  "},{"location":"en/admin/baize/best-practice/checkpoint.html#pytorch","title":"PyTorch","text":"

                                  In PyTorch, torch.save and torch.load are the basic functions used for saving and loading models.

                                  "},{"location":"en/admin/baize/best-practice/checkpoint.html#save-checkpoints-in-pytorch","title":"Save Checkpoints in PyTorch","text":"

                                  In PyTorch, the state_dict is typically used to save the model's parameters. Here is a simple example:

                                  import torch\nimport torch.nn as nn\n\n# Assume you have a simple neural network\nclass SimpleModel(nn.Module):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = nn.Linear(10, 2)\n\n    def forward(self, x):\n        return self.fc(x)\n\n# Initialize model and optimizer\nmodel = SimpleModel()\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n\n# Train the model...\n# Save checkpoint\ncheckpoint_path = 'model_checkpoint.pth'\ntorch.save({\n    'epoch': 10,\n    'model_state_dict': model.state_dict(),\n    'optimizer_state_dict': optimizer.state_dict(),\n    'loss': 0.02,\n}, checkpoint_path)\n
                                  "},{"location":"en/admin/baize/best-practice/checkpoint.html#restore-checkpoints-in-pytorch","title":"Restore Checkpoints in PyTorch","text":"

                                  When loading the model, you need to restore the model parameters and optimizer state, and then continue training or inference:

                                  # Restore checkpoint\ncheckpoint = torch.load('model_checkpoint.pth')\nmodel.load_state_dict(checkpoint['model_state_dict'])\noptimizer.load_state_dict(checkpoint['optimizer_state_dict'])\nepoch = checkpoint['epoch']\nloss = checkpoint['loss']\n\n# Continue training or inference...\n
                                  • model_state_dict: Model parameters
                                  • optimizer_state_dict: Optimizer state
                                  • epoch: Current training epoch
                                  • loss: Loss value
                                  • learning_rate: Learning rate
                                  • best_accuracy: Best accuracy
                                  "},{"location":"en/admin/baize/best-practice/checkpoint.html#tensorflow","title":"TensorFlow","text":"

                                  TensorFlow provides the tf.train.Checkpoint class to manage the saving and restoring of models and optimizers.

                                  "},{"location":"en/admin/baize/best-practice/checkpoint.html#save-checkpoints-in-tensorflow","title":"Save Checkpoints in TensorFlow","text":"

                                  Here is an example of saving a checkpoint in TensorFlow:

                                  import tensorflow as tf\n\n# Assume you have a simple model\nmodel = tf.keras.Sequential([\n    tf.keras.layers.Dense(2, input_shape=(10,))\n])\noptimizer = tf.keras.optimizers.Adam(learning_rate=0.001)\n\n# Define checkpoint\ncheckpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\ncheckpoint_dir = './checkpoints'\ncheckpoint_prefix = f'{checkpoint_dir}/ckpt'\n\n# Train the model...\n# Save checkpoint\ncheckpoint.save(file_prefix=checkpoint_prefix)\n

                                  Note

                                  Users of AI Lab can directly mount high-performance storage as the checkpoint directory to improve the speed of saving and restoring checkpoints.

                                  "},{"location":"en/admin/baize/best-practice/checkpoint.html#restore-checkpoints-in-tensorflow","title":"Restore Checkpoints in TensorFlow","text":"

                                  Load the checkpoint and restore the model and optimizer state:

                                  # Restore checkpoint\nlatest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)\ncheckpoint.restore(latest_checkpoint)\n\n# Continue training or inference...\n
                                  "},{"location":"en/admin/baize/best-practice/checkpoint.html#manage-checkpoints-in-distributed-training-with-tensorflow","title":"Manage Checkpoints in Distributed Training with TensorFlow","text":"

                                  In distributed training, TensorFlow manages checkpoints primarily through the following methods:

                                  • Using tf.train.Checkpoint and tf.train.CheckpointManager

                                    checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)\nmanager = tf.train.CheckpointManager(checkpoint, directory='/tmp/model', max_to_keep=3)\n
                                  • Saving checkpoints within a distributed strategy

                                    strategy = tf.distribute.MirroredStrategy()\nwith strategy.scope():\n    checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)\n    manager = tf.train.CheckpointManager(checkpoint, directory='/tmp/model', max_to_keep=3)\n
                                  • Saving checkpoints only on the chief worker node

                                    if strategy.cluster_resolver.task_type == 'chief':\n    manager.save()\n
                                  • Special handling when using MultiWorkerMirroredStrategy

                                    strategy = tf.distribute.MultiWorkerMirroredStrategy()\nwith strategy.scope():\n    # Define model\n    ...\n    checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)\n    manager = tf.train.CheckpointManager(checkpoint, '/tmp/model', max_to_keep=3)\n\ndef _chief_worker(task_type, task_id):\n    return task_type is None or task_type == 'chief' or (task_type == 'worker' and task_id == 0)\n\nif _chief_worker(strategy.cluster_resolver.task_type, strategy.cluster_resolver.task_id):\n    manager.save()\n
                                  • Using a distributed file system

                                    Ensure all worker nodes can access the same checkpoint directory, typically using a distributed file system such as HDFS or GCS.

                                  • Asynchronous saving

                                    Use tf.keras.callbacks.ModelCheckpoint and set the save_freq parameter to asynchronously save checkpoints during training.

                                  • Checkpoint restoration

                                    status = checkpoint.restore(manager.latest_checkpoint)\nstatus.assert_consumed()  # (1)!\n
                                    1. Ensure all variables are restored
                                  • Performance optimization

                                    • Enable mixed precision training using tf.train.experimental.enable_mixed_precision_graph_rewrite()
                                    • Adjust saving frequency to avoid too frequent I/O operations
                                    • Consider using tf.saved_model.save() to save the entire model, not just the weights
                                  "},{"location":"en/admin/baize/best-practice/checkpoint.html#considerations","title":"Considerations","text":"
                                  1. Regular Saving : Determine a suitable saving frequency based on training time and resource consumption, such as every epoch or every few training steps.

                                  2. Save Multiple Checkpoints : Keep the latest few checkpoints to prevent issues like file corruption or inapplicability.

                                  3. Record Metadata : Save additional information in the checkpoint, such as the epoch number and loss value, to better restore the training state.

                                  4. Use Version Control : Save checkpoints for different experiments to facilitate comparison and reuse.

                                  5. Validation and Testing : Use checkpoints for validation and testing at different training stages to ensure model performance and stability.

                                  "},{"location":"en/admin/baize/best-practice/checkpoint.html#conclusion","title":"Conclusion","text":"

                                  The checkpoint mechanism plays a crucial role in deep learning training. By effectively using the checkpoint features in PyTorch and TensorFlow, you can significantly improve the reliability and efficiency of training. The methods and best practices described in this article should help you better manage the training process of deep learning models.

                                  "},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html","title":"Deploy NFS for Preloading Dataset","text":"

                                  A Network File System (NFS) allows remote hosts to mount file systems over a network and interact with those file systems as though they are mounted locally. This enables system administrators to consolidate resources onto centralized servers on the network.

                                  Dataset is a core feature provided by AI Lab. By abstracting the dependency on data throughout the entire lifecycle of MLOps into datasets, users can manage various types of data in datasets so that training tasks can directly use the data in the dataset.

                                  When remote data is not within the worker cluster, datasets provide the capability to automatically preheat data, supporting data preloading from sources such as Git, S3, and HTTP to the local cluster.

                                  A storage service supporting the ReadWriteMany mode is needed for preloading remote data for the dataset, and it is recommended to deploy NFS within the cluster.

                                  This article mainly introduces how to quickly deploy an NFS service and add it as a StorageClass for the cluster.

                                  "},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html#preparation","title":"Preparation","text":"
                                  • NFS by default uses the node's storage as a data caching point, so it is necessary to ensure that the disk itself has enough disk space.
                                  • The installation method uses Helm and Kubectl, please make sure they are already installed.
                                  "},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html#deployment-steps","title":"Deployment Steps","text":"

                                  Several components need to be installed:

                                  • NFS Server
                                  • csi-driver-nfs
                                  • StorageClass
                                  "},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html#initialize-namespace","title":"Initialize Namespace","text":"

                                  All system components will be installed in the nfs namespace, so it is necessary to create this namespace first.

                                  kubectl create namespace nfs\n
                                  "},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html#install-nfs-server","title":"Install NFS Server","text":"

                                  Here is a simple YAML deployment file that can be used directly.

                                  Note

                                  Be sure to check the image: and modify it to a domestic mirror based on the location of the cluster.

                                  nfs-server.yaml
                                  ---\nkind: Service\napiVersion: v1\nmetadata:\n  name: nfs-server\n  namespace: nfs\n  labels:\n    app: nfs-server\nspec:\n  type: ClusterIP\n  selector:\n    app: nfs-server\n  ports:\n    - name: tcp-2049\n      port: 2049\n      protocol: TCP\n    - name: udp-111\n      port: 111\n      protocol: UDP\n---\nkind: Deployment\napiVersion: apps/v1\nmetadata:\n  name: nfs-server\n  namespace: nfs\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: nfs-server\n  template:\n    metadata:\n      name: nfs-server\n      labels:\n        app: nfs-server\n    spec:\n      nodeSelector:\n        \"kubernetes.io/os\": linux\n      containers:\n        - name: nfs-server\n          image: itsthenetwork/nfs-server-alpine:latest\n          env:\n            - name: SHARED_DIRECTORY\n              value: \"/exports\"\n          volumeMounts:\n            - mountPath: /exports\n              name: nfs-vol\n          securityContext:\n            privileged: true\n          ports:\n            - name: tcp-2049\n              containerPort: 2049\n              protocol: TCP\n            - name: udp-111\n              containerPort: 111\n              protocol: UDP\n      volumes:\n        - name: nfs-vol\n          hostPath:\n            path: /nfsdata  # (1)!\n            type: DirectoryOrCreate\n
                                  1. Modify this to specify another path to store NFS shared data

                                  Save the above YAML as nfs-server.yaml, then run the following commands for deployment:

                                  kubectl -n nfs apply -f nfs-server.yaml\n\n# Check the deployment result\nkubectl -n nfs get pod,svc\n
                                  "},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html#install-csi-driver-nfs","title":"Install csi-driver-nfs","text":"

                                  Installing csi-driver-nfs requires the use of Helm, please ensure it is installed beforehand.

                                  # Add Helm repository\nhelm repo add csi-driver-nfs https://mirror.ghproxy.com/https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts\nhelm repo update csi-driver-nfs\n\n# Deploy csi-driver-nfs\n# The parameters here mainly optimize the image address to accelerate downloads in China\nhelm upgrade --install csi-driver-nfs csi-driver-nfs/csi-driver-nfs \\\n    --set image.nfs.repository=k8s.m.daocloud.io/sig-storage/nfsplugin \\\n    --set image.csiProvisioner.repository=k8s.m.daocloud.io/sig-storage/csi-provisioner \\\n    --set image.livenessProbe.repository=k8s.m.daocloud.io/sig-storage/livenessprobe \\\n    --set image.nodeDriverRegistrar.repository=k8s.m.daocloud.io/sig-storage/csi-node-driver-registrar \\\n    --namespace nfs \\\n    --version v4.5.0\n

                                  Warning

                                  Not all images of csi-nfs-controller support helm parameters, so the image field of the deployment needs to be manually modified. Change image: registry.k8s.io to image: k8s.dockerproxy.com to accelerate downloads in China.

                                  "},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html#create-storageclass","title":"Create StorageClass","text":"

                                  Save the following YAML as nfs-sc.yaml:

                                  nfs-sc.yaml
                                  apiVersion: storage.k8s.io/v1\nkind: StorageClass\nmetadata:\n  name: nfs-csi\nprovisioner: nfs.csi.k8s.io\nparameters:\n  server: nfs-server.nfs.svc.cluster.local\n  share: /\n  # csi.storage.k8s.io/provisioner-secret is only needed for providing mountOptions in DeleteVolume\n  # csi.storage.k8s.io/provisioner-secret-name: \"mount-options\"\n  # csi.storage.k8s.io/provisioner-secret-namespace: \"default\"\nreclaimPolicy: Delete\nvolumeBindingMode: Immediate\nmountOptions:\n  - nfsvers=4.1\n

                                  then run the following command:

                                  kubectl apply -f nfs-sc.yaml\n
                                  "},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html#test","title":"Test","text":"

                                  Create a dataset and set the dataset's associated storage class and preloading method to NFS to preheat remote data into the cluster.

                                  After the dataset is successfully created, you can see that the dataset's status is preloading, and you can start using it after the preloading is completed.

                                  "},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html#faqs","title":"FAQs","text":""},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html#missing-necessary-nfs-client-software-sbinmount","title":"Missing Necessary NFS Client Software /sbin/mount","text":"
                                  bad option; for several filesystems (e.g. nfs, cifs) you might need a /sbin/mount.<type> helper program.\n

                                  On the nodes running Kubernetes, ensure that the NFS client is installed:

                                  Ubuntu/DebianCentOS/RHEL

                                  Run the following commands to install the NFS client:

                                  sudo apt-get update\nsudo apt-get install nfs-common\n

                                  Run the following command to install the NFS client:

                                  sudo yum install nfs-utils\n

                                  Check the NFS server configuration to ensure that the NFS server is running and configured correctly. You can try mounting manually to test:

                                  sudo mkdir -p /mnt/test\nsudo mount -t nfs <nfs-server>:/nfsdata /mnt/test\n
                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html","title":"Fine-tune the ChatGLM3 Model by Using AI Lab","text":"

                                  This page uses the ChatGLM3 model as an example to demonstrate how to use LoRA (Low-Rank Adaptation) to fine-tune the ChatGLM3 model within the AI Lab environment. The demo program is from the ChatGLM3 official example.

                                  The general process of fine-tuning is as follows:

                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#environment-requirements","title":"Environment Requirements","text":"
                                  • GPU with at least 20GB memory, recommended RTX4090 or NVIDIA A/H series
                                  • At least 200GB of available disk space
                                  • At least 8-core CPU, recommended 16-core
                                  • 64GB RAM, recommended 128GB

                                  Info

                                  Before starting, ensure AI platform and AI Lab are correctly installed, GPU queue resources are successfully initialized, and computing resources are sufficient.

                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#prepare-data","title":"Prepare Data","text":"

                                  Utilize the dataset management feature provided by AI Lab to quickly preheat and persist the data required for fine-tuning large models, reducing GPU resource occupation due to data preparation, and improving resource utilization efficiency.

                                  Create the required data resources on the dataset list page. These resources include the ChatGLM3 code and data files, all of which can be managed uniformly through the dataset list.

                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#code-and-model-files","title":"Code and Model Files","text":"

                                  ChatGLM3 is a dialogue pre-training model jointly released by zhipuai.cn and Tsinghua University KEG Lab.

                                  First, pull the ChatGLM3 code repository and download the pre-training model for subsequent fine-tuning tasks.

                                  AI Lab will automatically preheat the data in the background to ensure quick data access for subsequent tasks.

                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#advertisegen-dataset","title":"AdvertiseGen Dataset","text":"

                                  Domestic data can be directly obtained from Tsinghua Cloud using the HTTP data source method.

                                  After creation, wait for the dataset to be preheated, which is usually quick and depends on your network conditions.

                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#fine-tune-output-data","title":"Fine-tune Output Data","text":"

                                  You also need to prepare an empty dataset to store the model files output after the fine-tuning task is completed. Here, create an empty dataset, using PVC as an example.

                                  Warning

                                  Ensure to use a storage type that supports ReadWriteMany to allow quick access to resources for subsequent tasks.

                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#set-up-environment","title":"Set up Environment","text":"

                                  For model developers, preparing the Python environment dependencies required for model development is crucial. Traditionally, environment dependencies are either packaged directly into the development tool's image or installed in the local environment, which can lead to inconsistency in environment dependencies and difficulties in managing and updating dependencies.

                                  AI Lab provides environment management capabilities, decoupling Python environment dependency package management from development tools and task images, solving dependency management chaos and environment inconsistency issues.

                                  Here, use the environment management feature provided by AI Lab to create the environment required for ChatGLM3 fine-tuning for subsequent use.

                                  Warning

                                  1. The ChatGLM repository contains a requirements.txt file that includes the environment dependencies required for ChatGLM3 fine-tuning.
                                  2. This fine-tuning does not use the deepspeed and mpi4py packages. It is recommended to comment them out in the requirements.txt file to avoid compilation failures.

                                  In the environment management list, you can quickly create a Python environment and complete the environment creation through a simple form configuration; a Python 3.11.x environment is required here.

                                  Since CUDA is required for this experiment, GPU resources need to be configured here to preheat the necessary resource dependencies.

                                  Creating the environment involves downloading a series of Python dependencies, and download speeds may vary based on your location. Using a domestic mirror for acceleration can speed up the download.

                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#use-notebook-as-ide","title":"Use Notebook as IDE","text":"

                                  AI Lab provides Notebook as an IDE feature, allowing users to write, run, and view code results directly in the browser. This is very suitable for development in data analysis, machine learning, and deep learning fields.

                                  You can use the JupyterLab Notebook provided by AI Lab for the ChatGLM3 fine-tuning task.

                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#create-jupyterlab-notebook","title":"Create JupyterLab Notebook","text":"

                                  In the Notebook list, you can create a Notebook according to the page operation guide. Note that you need to configure the proper Notebook resource parameters according to the resource requirements mentioned earlier to avoid resource issues affecting the fine-tuning process.

                                  Note

                                  When creating a Notebook, you can directly mount the preloaded model code dataset and environment, greatly saving data preparation time.

                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#mount-dataset-and-code","title":"Mount Dataset and Code","text":"

                                  Note: The ChatGLM3 code files are mounted to the /home/jovyan/ChatGLM3 directory, and you also need to mount the AdvertiseGen dataset to the /home/jovyan/ChatGLM3/finetune_demo/data/AdvertiseGen directory to allow the fine-tuning task to access the data.

                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#mount-pvc-to-model-output-folder","title":"Mount PVC to Model Output Folder","text":"

                                  The model output location used this time is the /home/jovyan/ChatGLM3/finetune_demo/output directory. You can mount the previously created PVC dataset to this directory, so the trained model can be saved to the dataset for subsequent inference tasks.

                                  After creation, you can see the Notebook interface where you can write, run, and view code results directly in the Notebook.

                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#fine-tune-chatglm3","title":"Fine-tune ChatGLM3","text":"

                                  Once in the Notebook, you can find the previously mounted dataset and code in the File Browser option in the Notebook sidebar. Locate the ChatGLM3 folder.

                                  You will find the fine-tuning code for ChatGLM3 in the finetune_demo folder. Open the lora_finetune.ipynb file, which contains the fine-tuning code for ChatGLM3.

                                  First, follow the instructions in the README.md file to understand the entire fine-tuning process. It is recommended to read it thoroughly to ensure that the basic environment dependencies and data preparation work are completed.

                                  Open the terminal and use conda to switch to the preheated environment, ensuring consistency with the JupyterLab Kernel for subsequent code execution.

                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#preprocess-data","title":"Preprocess Data","text":"

                                  First, preprocess the AdvertiseGen dataset, standardizing the data to meet the Lora pre-training format requirements. Save the processed data to the AdvertiseGen_fix folder.

                                  import json\nfrom typing import Union\nfrom pathlib import Path\n\ndef _resolve_path(path: Union[str, Path]) -> Path:\n    return Path(path).expanduser().resolve()\n\ndef _mkdir(dir_name: Union[str, Path]):\n    dir_name = _resolve_path(dir_name)\n    if not dir_name.is_dir():\n        dir_name.mkdir(parents=True, exist_ok=False)\n\ndef convert_adgen(data_dir: Union[str, Path], save_dir: Union[str, Path]):\n    def _convert(in_file: Path, out_file: Path):\n        _mkdir(out_file.parent)\n        with open(in_file, encoding='utf-8') as fin:\n            with open(out_file, 'wt', encoding='utf-8') as fout:\n                for line in fin:\n                    dct = json.loads(line)\n                    sample = {'conversations': [{'role': 'user', 'content': dct['content']},\n                                                {'role': 'assistant', 'content': dct['summary']}]}\n                    fout.write(json.dumps(sample, ensure_ascii=False) + '\\n')\n\n    data_dir = _resolve_path(data_dir)\n    save_dir = _resolve_path(save_dir)\n\n    train_file = data_dir / 'train.json'\n    if train_file is_file():\n        out_file = save_dir / train_file.relative_to(data_dir)\n        _convert(train_file, out_file)\n\n    dev_file = data_dir / 'dev.json'\n    if dev_file.is_file():\n        out_file = save_dir / dev_file.relative_to(data_dir)\n        _convert(dev_file, out_file)\n\nconvert_adgen('data/AdvertiseGen', 'data/AdvertiseGen_fix')\n

                                  To save debugging time, you can reduce the number of entries in /home/jovyan/ChatGLM3/finetune_demo/data/AdvertiseGen_fix/dev.json to 50. The data is in JSON format, making it easy to process.

                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#local-lora-fine-tuning-test","title":"Local LoRA Fine-tuning Test","text":"

                                  After preprocessing the data, you can proceed with the fine-tuning test. Configure the fine-tuning parameters in the /home/jovyan/ChatGLM3/finetune_demo/configs/lora.yaml file. Key parameters to focus on include:

                                  Open a new terminal window and use the following command for local fine-tuning testing. Ensure that the parameter configurations and paths are correct:

                                  !CUDA_VISIBLE_DEVICES=0 NCCL_P2P_DISABLE=\"1\" NCCL_IB_DISABLE=\"1\" python finetune_hf.py data/AdvertiseGen_fix ./chatglm3-6b configs/lora.yaml\n

                                  In this command:

                                  • finetune_hf.py is the fine-tuning script in the ChatGLM3 code
                                  • data/AdvertiseGen_fix is your preprocessed dataset
                                  • ./chatglm3-6b is your pre-trained model path
                                  • configs/lora.yaml is the fine-tuning configuration file

                                  During fine-tuning, you can use the nvidia-smi command to check GPU memory usage:

                                  After fine-tuning is complete, an output directory will be generated in the finetune_demo directory, containing the fine-tuned model files. This way, the fine-tuned model files are saved to the previously created PVC dataset.

                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#submit-fine-tuning-tasks","title":"Submit Fine-tuning Tasks","text":"

                                  After completing the local fine-tuning test and ensuring that your code and data are correct, you can submit the fine-tuning task to the AI Lab for large-scale training and fine-tuning tasks.

                                  Note

                                  This is the recommended model development and fine-tuning process: first, conduct local fine-tuning tests to ensure that the code and data are correct.

                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#submit-fine-tuning-tasks-via-ui","title":"Submit Fine-tuning Tasks via UI","text":"

                                  Use Pytorch to create a fine-tuning task. Select the resources of the cluster you need to use based on your actual situation. Ensure to meet the resource requirements mentioned earlier.

                                  • Image: You can directly use the model image provided by baizectl.
                                  • Startup command: Based on your experience using LoRA fine-tuning in the Notebook, the code files and data are in the /home/jovyan/ChatGLM3/finetune_demo directory, so you can directly use this path:

                                    bash -c \"cd /home/jovyan/ChatGLM3/finetune_demo && CUDA_VISIBLE_DEVICES=0 NCCL_P2P_DISABLE=\"1\" NCCL_IB_DISABLE=\"1\" python finetune_hf.py data/AdvertiseGen_fix ./chatglm3-6b configs/lora.yaml\"\n
                                  • Mount environment: This way, the preloaded environment dependencies can be used not only in the Notebook but also in the tasks.

                                  • Dataset: Use the preheated dataset
                                    • Set the model output path to the previously created PVC dataset
                                    • Mount the AdvertiseGen dataset to the /home/jovyan/ChatGLM3/finetune_demo/data/AdvertiseGen directory
                                  • Configure sufficient GPU resources to ensure the fine-tuning task runs smoothly
                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#check-task-status","title":"Check Task Status","text":"

                                  After successfully submitting the task, you can view the training progress of the task in real-time in the task list. You can see the task status, resource usage, logs, and other information.

                                  View task logs

                                  After the task is completed, you can view the fine-tuned model files in the data output dataset for subsequent inference tasks.

                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#submit-tasks-via-baizectl","title":"Submit Tasks via baizectl","text":"

                                  AI Lab's Notebook supports using the baizectl command-line tool without authentication. If you prefer using CLI, you can directly use the baizectl command-line tool to submit tasks.

                                  baizectl job submit --name finetunel-chatglm3 -t PYTORCH \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --resources cpu=8,memory=16Gi,nvidia.com/gpu=1 \\\n    --workers 1 \\\n    --queue default \\\n    --working-dir /home/jovyan/ChatGLM3 \\\n    --datasets AdvertiseGen:/home/jovyan/ChatGLM3/finetune_demo/data/AdvertiseGen  \\\n    --datasets output:/home/jovyan/ChatGLM3/finetune_demo/output  \\\n    --labels job_type=pytorch \\\n    --restart-policy on-failure \\\n    -- bash -c \"cd /home/jovyan/ChatGLM3/finetune_demo && CUDA_VISIBLE_DEVICES=0 NCCL_P2P_DISABLE=\"1\" NCCL_IB_DISABLE=\"1\" python finetune_hf.py data/AdvertiseGen_fix ./chatglm3-6b configs/lora.yaml\"\n

                                  For more information on using baizectl, refer to the baizectl Usage Documentation.

                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#model-inference","title":"Model Inference","text":"

                                  After completing the fine-tuning task, you can use the fine-tuned model for inference tasks. Here, you can use the inference service provided by AI Lab to create an inference service with the output model.

                                  In the inference service list, you can create a new inference service. When selecting the model, choose the previously output dataset and configure the model path.

                                  Regarding model resource requirements and GPU resource requirements for inference services, configure them based on the model size and inference concurrency. Refer to the resource configuration of the previous fine-tuning tasks.

                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#configure-model-runtime","title":"Configure Model Runtime","text":"

                                  Configuring the model runtime is crucial. Currently, AI Lab supports vLLM as the model inference service runtime, which can be directly selected.

                                  Tip

                                  vLLM supports a wide range of large language models. Visit vLLM for more information. These models can be easily used within AI Lab.

                                  After creation, you can see the created inference service in the inference service list. The model service list allows you to get the model's access address directly.

                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#test-the-model-service","title":"Test the Model Service","text":"

                                  Try using the curl command in the terminal to test the model service. Here, you can see the returned results, enabling you to use the model service for inference tasks.

                                  curl -X POST http://10.20.100.210:31118/v2/models/chatglm3-6b/generate \\\n  -d '{\"text_input\": \"hello\", \"stream\": false, \"sampling_parameters\": \"{\\\"temperature\\\": 0.7, \\\"top_p\\\": 0.95, \\'max_tokens\\\": 1024\uff5d\"\uff5d'\n
                                  "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#wrap-up","title":"Wrap up","text":"

                                  This page used ChatGLM3 as an example to quickly introduce and get you started with the AI Lab for model fine-tuning, using LoRA to fine-tune the ChatGLM3 model.

                                  AI Lab provides a wealth of features to help model developers quickly conduct model development, fine-tuning, and inference tasks. It also offers rich OpenAPI interfaces, facilitating integration with third-party application ecosystems.

                                  "},{"location":"en/admin/baize/best-practice/label-studio.html","title":"Deploy Label Studio","text":"

                                  Note

                                  Refer to the video tutorial: Data Labeling and Dataset Usage Instructions

                                  Label Studio is an open-source data labeling tool used for various machine learning and artificial intelligence jobs. Here is a brief introduction to Label Studio:

                                  • Supports labeling of various data types including images, audio, video, and text
                                  • Can be used for jobs such as object detection, image classification, speech transcription, and named entity recognition
                                  • Provides a customizable labeling interface
                                  • Supports various labeling formats and export options

                                  Label Studio offers a powerful data labeling solution for data scientists and machine learning engineers due to its flexibility and rich features.

                                  "},{"location":"en/admin/baize/best-practice/label-studio.html#deploy-to-ai-platform","title":"Deploy to AI platform","text":"

                                  To use Label Studio in AI Lab, it needs to be deployed to the Global Service Cluster. You can quickly deploy it using Helm.

                                  Note

                                  For more deployment details, refer to Deploy Label Studio on Kubernetes.

                                  1. Enter the Global Service Cluster, find Helm Apps -> Helm Repositories from the left navigation bar, click the Create Repository button, and fill in the following parameters:

                                  2. After successfully adding the repository, click the \u2507 on the right side of the list and select Sync Repository. Wait a moment to complete the synchronization. (This sync operation will also be used for future updates of Label Studio).

                                  3. Then navigate to the Helm Charts page, search for label-studio, and click the card.

                                  4. Choose the latest version and configure the installation parameters as shown below, naming it label-studio. It is recommended to create a new namespace. Switch the parameters to YAML and modify the configuration according to the instructions.

                                    global:\n  image:\n    repository: heartexlabs/label-studio   # Configure proxy address here if docker.io is inaccessible\n  extraEnvironmentVars:\n    LABEL_STUDIO_HOST: https://{Access_Address}/label-studio    # Use the AI platform login address, refer to the current webpage URL\n    LABEL_STUDIO_USERNAME: {User_Email}    # Must be an email, replace with your own\n    LABEL_STUDIO_PASSWORD: {User_Password}\napp:\n  nginx:\n    livenessProbe:\n      path: /label-studio/nginx_health\n    readinessProbe:\n      path: /label-studio/version\n

                                  At this point, the installation of Label Studio is complete.

                                  Warning

                                  By default, PostgreSQL will be installed as the data service middleware. If the image pull fails, it may be because docker.io is inaccessible. Ensure to switch to an available proxy.

                                  If you have your own PostgreSQL data service middleware, you can use the following parameters:

                                  global:\n  image:\n    repository: heartexlabs/label-studio   # Configure proxy address here if docker.io is inaccessible\n  extraEnvironmentVars:\n    LABEL_STUDIO_HOST: https://{Access_Address}/label-studio    # Use the AI platform login address, refer to the current webpage URL\n    LABEL_STUDIO_USERNAME: {User_Email}    # Must be an email, replace with your own\n    LABEL_STUDIO_PASSWORD: {User_Password}\napp:\n  nginx:\n    livenessProbe:\n      path: /label-studio/nginx_health\n    readinessProbe:\n      path: /label-studio/version\npostgresql:\n  enabled: false  # Disable the built-in PostgreSQL\nexternalPostgresql:\n  host: \"postgres-postgresql\"  # PostgreSQL address\n  port: 5432\n  username: \"label_studio\"  # PostgreSQL username\n  password: \"your_label_studio_password\"  # PostgreSQL password\n  database: \"label_studio\"  # PostgreSQL database name\n
                                  "},{"location":"en/admin/baize/best-practice/label-studio.html#add-gproduct-to-navigation-bar","title":"Add GProduct to Navigation Bar","text":"

                                  To add Label Studio to the AI platform navigation bar, you can refer to the method in Global Management OEM IN. The following example shows how to add it to the secondary navigation of AI Lab.

                                  "},{"location":"en/admin/baize/best-practice/label-studio.html#add-proxy-access","title":"Add Proxy Access","text":"
                                  apiVersion: ghippo.io/v1alpha1\nkind: GProductProxy\nmetadata:\n  name: label-studio\nspec:\n  gproduct: label-studio\n  proxies:\n  - authnCheck: false\n    destination:\n      host: label-studio-ls-app.label-studio.svc.cluster.local\n      port: 80\n    match:\n      uri:\n        prefix: /label-studio\n
                                  "},{"location":"en/admin/baize/best-practice/label-studio.html#add-to-ai-lab","title":"Add to AI Lab","text":"

                                  Modify the CRD for GProductNavigator CR baize, then make the following changes:

                                  apiVersion: ghippo.io/v1alpha1\nkind: GProductNavigator\nmetadata:\n  labelings:\n    meta.helm.sh/release-name: baize\n    meta.helm.sh/release-namespace: baize-system\n  labels:\n    app.kubernetes.io/managed-by: Helm\n    gProductName: baize\n  name: baize\nspec:\n  category: cloudnativeai\n  gproduct: baize\n  iconUrl: ./ui/baize/logo.svg\n  isCustom: false\n  localizedName:\n    en-US: AI Lab\n    zh-CN: AI Lab\n  menus:\n    - iconUrl: ''\n      isCustom: false\n      localizedName:\n        en-US: AI Lab\n        zh-CN: AI Lab\n      name: workspace-view\n      order: 1\n      url: ./baize\n      visible: true\n    - iconUrl: ''\n      isCustom: false\n      localizedName:\n        en-US: Operator\n        zh-CN: \u8fd0\u7ef4\u7ba1\u7406\n      name: admin-view\n      order: 1\n      url: ./baize/admin\n      visible: true\n    # Start adding\n    - iconUrl: ''\n      localizedName:\n        en-US: Data Labeling\n        zh-CN: \u6570\u636e\u6807\u6ce8\n      name: label-studio\n      order: 1\n      target: blank    # Control new blank page\n      url: https://{Access_Address}/label-studio    # url to access\n      visible: true\n    # End adding\n  name: AI Lab\n  order: 10\n  url: ./baize\n  visible: true\n
                                  "},{"location":"en/admin/baize/best-practice/label-studio.html#adding-effect","title":"Adding Effect","text":""},{"location":"en/admin/baize/best-practice/label-studio.html#conclusion","title":"Conclusion","text":"

                                  The above describes how to add Label Studio and integrate it as an labeling component in AI Lab. By adding labels to the datasets in AI Lab, you can associate it with algorithm development and improve the algorithm development process. For further usage, refer to relevant documentation.

                                  "},{"location":"en/admin/baize/best-practice/train-with-deepspeed.html","title":"Submit a DeepSpeed Training Task","text":"

                                  According to the DeepSpeed official documentation, it's recommended to modifying your code to implement the training task.

                                  Specifically, you can use deepspeed.init_distributed() instead of torch.distributed.init_process_group(...). Then run the command using torchrun to submit it as a PyTorch distributed task, which will allow you to run a DeepSpeed task.

                                  Yes, you can use torchrun to run your DeepSpeed training script. torchrun is a utility provided by PyTorch for distributed training. You can combine torchrun with the DeepSpeed API to start your training task.

                                  Below is an example of running a DeepSpeed training script using torchrun:

                                  1. Write the training script:

                                    train.py
                                    import torch\nimport deepspeed\nfrom torch.utils.data import DataLoader\n\n# Load model and data\nmodel = YourModel()\ntrain_dataset = YourDataset()\ntrain_dataloader = DataLoader(train_dataset, batch_size=32)\n\n# Configure file path\ndeepspeed_config = \"deepspeed_config.json\"\n\n# Create DeepSpeed training engine\nmodel_engine, optimizer, _, _ = deepspeed.initialize(\n    model=model,\n    model_parameters=model.parameters(),\n    config_params=deepspeed_config\n)\n\n# Training loop\nfor batch in train_dataloader:\n    loss = model_engine(batch)\n    model_engine.backward(loss)\n    model_engine.step()\n
                                  2. Create the DeepSpeed configuration file:

                                    deepspeed_config.json
                                    {\n  \"train_batch_size\": 32,\n  \"gradient_accumulation_steps\": 1,\n  \"fp16\": {\n    \"enabled\": true,\n    \"loss_scale\": 0\n  },\n  \"optimizer\": {\n    \"type\": \"Adam\",\n    \"params\": {\n      \"lr\": 0.00015,\n      \"betas\": [0.9, 0.999],\n      \"eps\": 1e-08,\n      \"weight_decay\": 0\n    }\n  }\n}\n
                                  3. Run the training script using torchrun or baizectl:

                                    torchrun train.py\n

                                    In this way, you can combine PyTorch's distributed training capabilities with DeepSpeed's optimization technologies for more efficient training. You can use the baizectl command to submit a job in a notebook:

                                    baizectl job submit --pytorch --workers 2 -- torchrun train.py\n
                                  "},{"location":"en/admin/baize/developer/index.html","title":"Developer Console","text":"

                                  The developer console is a console for developers to perform tasks such as AI inference and training large models on a daily basis.

                                  "},{"location":"en/admin/baize/developer/quick-start.html","title":"Quick Start","text":"

                                  This document provides a simple guide for users to use the AI Lab platform for the entire development and training process of datasets, Notebooks, and job training.

                                  1. Click Data Management -> Datasets in the navigation bar, then click Create. Create three datasets as follows:

                                    • Code: https://github.com/d-run/drun-samples
                                      • For faster access in China, use Gitee: https://gitee.com/samzong_lu/training-sample-code.git
                                    • Data: https://github.com/zalandoresearch/fashion-mnist
                                      • For faster access in China, use Gitee: https://gitee.com/samzong_lu/fashion-mnist.git
                                    • Empty PVC: Create an empty PVC to output the trained model and logs after training.

                                    Note

                                    Currently, only StorageClass with ReadWriteMany mode is supported. Please use NFS or the recommended JuiceFS.

                                  2. Prepare the development environment by clicking Notebooks in the navigation bar, then click Create. Associate the three datasets created in the previous step and fill in the mount paths as shown in the image below:

                                  3. Wait for the Notebook to be created successfully, click the access link in the list to enter the Notebook. Execute the following command in the Notebook terminal to start the job training.

                                    python /home/jovyan/code/tensorflow/tf-fashion-mnist-sample/train.py\n
                                  4. Click Job Center -> Jobs in the navigation bar, create a Tensorflow Single job. Refer to the image below for job configuration and enable the Job Analysis (Tensorboard) feature. Click Create and wait for the status to complete.

                                    • Image address: release.daocloud.io/baize/jupyter-tensorflow-full:v1.8.0-baize
                                    • Command: python
                                    • Arguments: /home/jovyan/code/tensorflow/tf-fashion-mnist-sample/train.py

                                    Note

                                    For large datasets or models, it is recommended to enable GPU configuration in the resource configuration step.

                                  5. In the job created in the previous step, you can click the specific job analysis to view the job status and optimize the job training.

                                  "},{"location":"en/admin/baize/developer/dataset/create-use-delete.html","title":"Create, Use and Delete Datasets","text":"

                                  AI Lab provides comprehensive dataset management functions needed for model development, training, and inference processes. Currently, it supports unified access to various data sources.

                                  With simple configurations, you can connect data sources to AI Lab, achieving unified data management, preloading, dataset management, and other functionalities.

                                  "},{"location":"en/admin/baize/developer/dataset/create-use-delete.html#create-a-dataset","title":"Create a Dataset","text":"
                                  1. In the left navigation bar, click Data Management -> Dataset List, and then click the Create button on the right.

                                  2. Select the worker cluster and namespace to which the dataset belongs, then click Next.

                                  3. Configure the data source type for the target data, then click OK.

                                    Currently supported data sources include:

                                    • GIT: Supports repositories such as GitHub, GitLab, and Gitee
                                    • S3: Supports object storage like Amazon Cloud
                                    • HTTP: Directly input a valid HTTP URL
                                    • PVC: Supports pre-created Kubernetes PersistentVolumeClaim
                                    • NFS: Supports NFS shared storage
                                  4. Upon successful creation, the dataset will be returned to the dataset list. You can perform more actions by clicking \u2507 on the right.

                                  Info

                                  The system will automatically perform a one-time data preloading after the dataset is successfully created; the dataset cannot be used until the preloading is complete.

                                  "},{"location":"en/admin/baize/developer/dataset/create-use-delete.html#use-a-dataset","title":"Use a Dataset","text":"

                                  Once the dataset is successfully created, it can be used in tasks such as model training and inference.

                                  "},{"location":"en/admin/baize/developer/dataset/create-use-delete.html#use-in-notebook","title":"Use in Notebook","text":"

                                  In creating a Notebook, you can directly use the dataset; the usage is as follows:

                                  • Use the dataset as training data mount
                                  • Use the dataset as code mount

                                  "},{"location":"en/admin/baize/developer/dataset/create-use-delete.html#use-in-training-obs","title":"Use in Training obs","text":"
                                  • Use the dataset to specify job output
                                  • Use the dataset to specify job input
                                  • Use the dataset to specify TensorBoard output
                                  "},{"location":"en/admin/baize/developer/dataset/create-use-delete.html#use-in-inference-services","title":"Use in Inference Services","text":"
                                  • Use the dataset to mount a model
                                  "},{"location":"en/admin/baize/developer/dataset/create-use-delete.html#delete-a-dataset","title":"Delete a Dataset","text":"

                                  If you find a dataset to be redundant, expired, or no longer needed, you can delete it from the dataset list.

                                  1. Click the \u2507 on the right side of the dataset list, then choose Delete from the dropdown menu.

                                  2. In the pop-up window, confirm the dataset you want to delete, enter the dataset name, and then click Delete.

                                  3. A confirmation message will appear indicating successful deletion, and the dataset will disappear from the list.

                                  Caution

                                  Once a dataset is deleted, it cannot be recovered, so please proceed with caution.

                                  "},{"location":"en/admin/baize/developer/dataset/environments.html","title":"Manage Python Environment Dependencies","text":"

                                  This document aims to guide users on managing environment dependencies using AI platform. Below are the specific steps and considerations.

                                  1. Overview of Environment Management
                                  2. Create New Environment
                                  3. Configure Environment
                                  4. Troubleshooting
                                  "},{"location":"en/admin/baize/developer/dataset/environments.html#overview","title":"Overview","text":"

                                  Traditionally, Python environment dependencies are built into an image, which includes the Python version and dependency packages. This approach has high maintenance costs and is inconvenient to update, often requiring a complete rebuild of the image.

                                  In AI Lab, users can manage pure environment dependencies through the Environment Management module, decoupling this part from the image. The advantages include:

                                  • One environment can be used in multiple places, such as in Notebooks, distributed training tasks, and even inference services.
                                  • Updating dependency packages is more convenient; you only need to update the environment dependencies without rebuilding the image.

                                  The main components of the environment management are:

                                  • Cluster : Select the cluster to operate on.
                                  • Namespace : Select the namespace to limit the scope of operations.
                                  • Environment List : Displays all environments and their statuses under the current cluster and namespace.

                                  "},{"location":"en/admin/baize/developer/dataset/environments.html#explanation-of-environment-list-fields","title":"Explanation of Environment List Fields","text":"
                                  • Name : The name of the environment.
                                  • Status : The current status of the environment (normal or failed). New environments undergo a warming-up process, after which they can be used in other tasks.
                                  • Creation Time : The time the environment was created.
                                  "},{"location":"en/admin/baize/developer/dataset/environments.html#creat-new-environment","title":"Creat New Environment","text":"

                                  On the Environment Management interface, click the Create button at the top right to enter the environment creation process.

                                  Fill in the following basic information:

                                  • Name : Enter the environment name, with a length of 2-63 characters, starting and ending with lowercase letters or numbers.
                                  • Deployment Location:
                                    • Cluster : Select the cluster to deploy, such as gpu-cluster.
                                    • Namespace : Select the namespace, such as default.
                                  • Remarks (optional): Enter remarks.
                                  • Labels (optional): Add labels to the environment.
                                  • Annotations (optional): Add annotations to the environment. After completing the information, click Next to proceed to environment configuration.
                                  "},{"location":"en/admin/baize/developer/dataset/environments.html#configure-environment","title":"Configure Environment","text":"

                                  In the environment configuration step, users need to configure the Python version and dependency management tool.

                                  "},{"location":"en/admin/baize/developer/dataset/environments.html#environment-settings","title":"Environment Settings","text":"
                                  • Python Version : Select the required Python version, such as 3.12.3.
                                  • Package Manager : Choose the package management tool, either PIP or CONDA.
                                  • Environment Data :
                                    • If PIP is selected: Enter the dependency package list in requirements.txt format in the editor below.
                                    • If CONDA is selected: Enter the dependency package list in environment.yaml format in the editor below.
                                  • Other Options (optional):
                                    • Additional pip Index URLs : Configure additional pip index URLs; suitable for internal enterprise private repositories or PIP acceleration sites.
                                    • GPU Configuration : Enable or disable GPU configuration; some GPU-related dependency packages need GPU resources configured during preloading.
                                    • Associated Storage : Select the associated storage configuration; environment dependency packages will be stored in the associated storage. Note: Storage must support ReadWriteMany.

                                  After configuration, click the Create button, and the system will automatically create and configure the new Python environment.

                                  "},{"location":"en/admin/baize/developer/dataset/environments.html#troubleshooting","title":"Troubleshooting","text":"
                                  • If environment creation fails:

                                    • Check if the network connection is normal.
                                    • Verify that the Python version and package manager configuration are correct.
                                    • Ensure the selected cluster and namespace are available.
                                  • If dependency preloading fails:

                                    • Check if the requirements.txt or environment.yaml file format is correct.
                                    • Verify that the dependency package names and versions are correct. If other issues arise, contact the platform administrator or refer to the platform help documentation for more support.

                                  These are the basic steps and considerations for managing Python dependencies in AI Lab.

                                  "},{"location":"en/admin/baize/developer/inference/models.html","title":"Model Support","text":"

                                  With the rapid iteration of AI Lab, we have now supported various model inference services. Here, you can see information about the supported models.

                                  • AI Lab v0.3.0 launched model inference services, facilitating users to directly use the inference services of AI Lab without worrying about model deployment and maintenance for traditional deep learning models.
                                  • AI Lab v0.6.0 supports the complete version of vLLM inference capabilities, supporting many large language models such as LLama, Qwen, ChatGLM, and more.

                                  Note

                                  The support for inference capabilities is related to the version of AI Lab.

                                  You can use GPU types that have been verified by AI platform in AI Lab. For more details, refer to the GPU Support Matrix.

                                  "},{"location":"en/admin/baize/developer/inference/models.html#triton-inference-server","title":"Triton Inference Server","text":"

                                  Through the Triton Inference Server, traditional deep learning models can be well supported. Currently, AI Lab supports mainstream inference backend services:

                                  Backend Supported Model Formats Description pytorch TorchScript, PyTorch 2.0 formats triton-inference-server/pytorch_backend tensorflow TensorFlow 2.x triton-inference-server/tensorflow_backend vLLM (Deprecated) TensorFlow 2.x triton-inference-server/tensorflow_backend

                                  Danger

                                  The use of Triton's Backend vLLM method has been deprecated. It is recommended to use the latest support for vLLM to deploy your large language models.

                                  "},{"location":"en/admin/baize/developer/inference/models.html#vllm","title":"vLLM","text":"

                                  With vLLM, we can quickly use large language models. Here, you can see the list of models we support, which generally aligns with the vLLM Support Models.

                                  • HuggingFace Models: We support most of HuggingFace's models. You can see more models at the HuggingFace Model Hub.
                                  • The vLLM Supported Models list includes supported large language models and vision-language models.
                                  • Models fine-tuned using the vLLM support framework.
                                  "},{"location":"en/admin/baize/developer/inference/models.html#new-features-of-vllm","title":"New Features of vLLM","text":"

                                  Currently, AI Lab also supports some new features when using vLLM as an inference tool:

                                  • Enable Lora Adapter to optimize model inference services during inference.
                                  • Provide a compatible OpenAPI interface with OpenAI, making it easy for users to switch to local inference services at a low cost and quickly transition.
                                  "},{"location":"en/admin/baize/developer/inference/triton-inference.html","title":"Create Inference Service Using Triton Framework","text":"

                                  The AI Lab currently offers Triton and vLLM as inference frameworks. Users can quickly start a high-performance inference service with simple configurations.

                                  Danger

                                  The use of Triton's Backend vLLM method has been deprecated. It is recommended to use the latest support for vLLM to deploy your large language models.

                                  "},{"location":"en/admin/baize/developer/inference/triton-inference.html#introduction-to-triton","title":"Introduction to Triton","text":"

                                  Triton is an open-source inference server developed by NVIDIA, designed to simplify the deployment and inference of machine learning models. It supports a variety of deep learning frameworks, including TensorFlow and PyTorch, enabling users to easily manage and deploy different types of models.

                                  "},{"location":"en/admin/baize/developer/inference/triton-inference.html#prerequisites","title":"Prerequisites","text":"

                                  Prepare model data: Manage the model code in dataset management and ensure that the data is successfully preloaded. The following example illustrates the PyTorch model for mnist handwritten digit recognition.

                                  Note

                                  The model to be inferred must adhere to the following directory structure within the dataset:

                                    <model-repository-name>\n  \u2514\u2500\u2500 <model-name>\n     \u2514\u2500\u2500 <version>\n        \u2514\u2500\u2500 <model-definition-file>\n

                                  The directory structure in this example is as follows:

                                      model-repo\n    \u2514\u2500\u2500 mnist-cnn\n        \u2514\u2500\u2500 1\n            \u2514\u2500\u2500 model.pt\n
                                  "},{"location":"en/admin/baize/developer/inference/triton-inference.html#create-inference-service","title":"Create Inference Service","text":"

                                  Currently, form-based creation is supported, allowing you to create services with field prompts in the interface.

                                  "},{"location":"en/admin/baize/developer/inference/triton-inference.html#configure-model-path","title":"Configure Model Path","text":"

                                  The model path model-repo/mnist-cnn/1/model.pt must be consistent with the directory structure of the dataset.

                                  "},{"location":"en/admin/baize/developer/inference/triton-inference.html#model-configuration","title":"Model Configuration","text":""},{"location":"en/admin/baize/developer/inference/triton-inference.html#configure-input-and-output-parameters","title":"Configure Input and Output Parameters","text":"

                                  Note

                                  The first dimension of the input and output parameters defaults to batchsize, setting it to -1 allows for the automatic calculation of the batchsize based on the input inference data. The remaining dimensions and data type must match the model's input.

                                  "},{"location":"en/admin/baize/developer/inference/triton-inference.html#configure-environment","title":"Configure Environment","text":"

                                  You can import the environment created in Manage Python Environment Dependencies to serve as the runtime environment for inference.

                                  "},{"location":"en/admin/baize/developer/inference/triton-inference.html#advanced-settings","title":"Advanced Settings","text":""},{"location":"en/admin/baize/developer/inference/triton-inference.html#configure-authentication-policy","title":"Configure Authentication Policy","text":"

                                  Supports API key-based request authentication. Users can customize and add authentication parameters.

                                  "},{"location":"en/admin/baize/developer/inference/triton-inference.html#affinity-scheduling","title":"Affinity Scheduling","text":"

                                  Supports automated affinity scheduling based on GPU resources and other node configurations. It also allows users to customize scheduling policies.

                                  "},{"location":"en/admin/baize/developer/inference/triton-inference.html#access","title":"Access","text":""},{"location":"en/admin/baize/developer/inference/triton-inference.html#api-access","title":"API Access","text":"
                                  • Triton provides a REST-based API, allowing clients to perform model inference via HTTP POST requests.
                                  • Clients can send requests with JSON-formatted bodies containing input data and related metadata.
                                  "},{"location":"en/admin/baize/developer/inference/triton-inference.html#http-access","title":"HTTP Access","text":"
                                  1. Send HTTP POST Request: Use tools like curl or HTTP client libraries (e.g., Python's requests library) to send POST requests to the Triton Server.

                                  2. Set HTTP Headers: Configuration generated automatically based on user settings, include metadata about the model inputs and outputs in the HTTP headers.

                                  3. Construct Request Body: The request body usually contains the input data for inference and model-specific metadata.

                                  "},{"location":"en/admin/baize/developer/inference/triton-inference.html#example-curl-command","title":"Example curl Command","text":"
                                    curl -X POST \"http://<ip>:<port>/v2/models/<inference-name>/infer\" \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\n    \"inputs\": [\n      {\n        \"name\": \"model_input\",            \n        \"shape\": [1, 1, 32, 32],          \n        \"datatype\": \"FP32\",               \n        \"data\": [\n          [0.1234, 0.5678, 0.9101, ... ]  \n        ]\n      }\n    ]\n  }'\n
                                  • <ip> is the host address where the Triton Inference Server is running.
                                  • <port> is the port where the Triton Inference Server is running.
                                  • <inference-name> is the name of the inference service that has been created.
                                  • \"name\" must match the name of the input parameter in the model configuration.
                                  • \"shape\" must match the dims of the input parameter in the model configuration.
                                  • \"datatype\" must match the Data Type of the input parameter in the model configuration.
                                  • \"data\" should be replaced with the actual inference data.

                                  Please note that the above example code needs to be adjusted according to your specific model and environment. The format and content of the input data must also comply with the model's requirements.

                                  "},{"location":"en/admin/baize/developer/inference/vllm-inference.html","title":"Create Inference Service Using vLLM Framework","text":"

                                  AI Lab supports using vLLM as an inference service, offering all the capabilities of vLLM while fully adapting to the OpenAI interface definition.

                                  "},{"location":"en/admin/baize/developer/inference/vllm-inference.html#introduction-to-vllm","title":"Introduction to vLLM","text":"

                                  vLLM is a fast and easy-to-use library for inference and services. It aims to significantly improve the throughput and memory efficiency of language model services in real-time scenarios. vLLM boasts several features in terms of speed and flexibility:

                                  • Continuous batching of incoming requests.
                                  • Efficiently manages attention keys and values memory using PagedAttention.
                                  • Seamless integration with popular HuggingFace models.
                                  • Compatible with OpenAI's API server.
                                  "},{"location":"en/admin/baize/developer/inference/vllm-inference.html#prerequisites","title":"Prerequisites","text":"

                                  Prepare model data: Manage the model code in dataset management and ensure that the data is successfully preloaded.

                                  "},{"location":"en/admin/baize/developer/inference/vllm-inference.html#create-inference-service","title":"Create Inference Service","text":"
                                  1. Select the vLLM inference framework. In the model module selection, choose the pre-created model dataset hdd-models and fill in the path information where the model is located within the dataset.

                                    This guide uses the ChatGLM3 model for creating the inference service.

                                  2. Configure the resources for the inference service and adjust the parameters for running the inference service.

                                    Parameter Name Description GPU Resources Configure GPU resources for inference based on the model scale and cluster resources. Allow Remote Code Controls whether vLLM trusts and executes code from remote sources. LoRA LoRA is a parameter-efficient fine-tuning technique for deep learning models. It reduces the number of parameters and computational complexity by decomposing the original model parameter matrix into low-rank matrices. 1. --lora-modules: Specifies specific modules or layers for low-rank approximation. 2. max_loras_rank: Specifies the maximum rank for each adapter layer in the LoRA model. For simpler tasks, a smaller rank value can be chosen, while more complex tasks may require a larger rank value to ensure model performance. 3. max_loras: Indicates the maximum number of LoRA layers that can be included in the model, customized based on model size and inference complexity. 4. max_cpu_loras: Specifies the maximum number of LoRA layers that can be handled in a CPU environment. Associated Environment Selects predefined environment dependencies required for inference.

                                    Info

                                    For models that support LoRA parameters, refer to vLLM Supported Models.

                                  3. In the Advanced Configuration , support is provided for automated affinity scheduling based on GPU resources and other node configurations. Users can also customize scheduling policies.

                                  "},{"location":"en/admin/baize/developer/inference/vllm-inference.html#verify-inference-service","title":"Verify Inference Service","text":"

                                  Once the inference service is created, click the name of the inference service to enter the details and view the API call methods. Verify the execution results using Curl, Python, and Node.js.

                                  Copy the curl command from the details and execute it in the terminal to send a model inference request. The expected output should be:

                                  "},{"location":"en/admin/baize/developer/jobs/create.html","title":"Create Job","text":"

                                  Job management refers to the functionality of creating and managing job lifecycles through job scheduling and control components.

                                  AI platform Smart Computing Capability adopts Kubernetes' Job mechanism to schedule various AI inference and training jobs.

                                  1. Click Job Center -> Jobs in the left navigation bar to enter the job list. Click the Create button on the right.

                                  2. The system will pre-fill basic configuration data, including the cluster, namespace, type, queue, and priority. Adjust these parameters and click Next.

                                  3. Configure the URL, runtime parameters, and associated datasets, then click Next.

                                  4. Optionally add labels, annotations, runtime env variables, and other job parameters. Select a scheduling policy and click Confirm.

                                  5. After the job is successfully created, it will have several running statuses:

                                    • Running
                                    • Queued
                                    • Submission successful, Submission failed
                                    • Successful, Failed
                                  "},{"location":"en/admin/baize/developer/jobs/create.html#next-steps","title":"Next Steps","text":"
                                  • View Job Load
                                  • Delete Job
                                  "},{"location":"en/admin/baize/developer/jobs/delete.html","title":"Delete Job","text":"

                                  If you find a job to be redundant, expired, or no longer needed for any other reason, you can delete it from the job list.

                                  1. Click the \u2507 on the right side of the job in the job list, then choose Delete from the dropdown menu.

                                  2. In the pop-up window, confirm the job you want to delete, enter the job name, and then click Delete.

                                  3. A confirmation message will appear indicating successful deletion, and the job will disappear from the list.

                                  Caution

                                  Once a job is deleted, it cannot be recovered, so please proceed with caution.

                                  "},{"location":"en/admin/baize/developer/jobs/pytorch.html","title":"Pytorch Jobs","text":"

                                  Pytorch is an open-source deep learning framework that provides a flexible environment for training and deployment. A Pytorch job is a job that uses the Pytorch framework.

                                  In the AI Lab platform, we provide support and adaptation for Pytorch jobs. Through a graphical interface, you can quickly create Pytorch jobs and perform model training.

                                  "},{"location":"en/admin/baize/developer/jobs/pytorch.html#job-configuration","title":"Job Configuration","text":"
                                  • Job types support both Pytorch Single and Pytorch Distributed modes.
                                  • The runtime image already supports the Pytorch framework by default, so no additional installation is required.
                                  "},{"location":"en/admin/baize/developer/jobs/pytorch.html#job-runtime-environment","title":"Job Runtime Environment","text":"

                                  Here we use the baize-notebook base image and the associated environment as the basic runtime environment for the job.

                                  To learn how to create an environment, refer to Environments.

                                  "},{"location":"en/admin/baize/developer/jobs/pytorch.html#create-jobs","title":"Create Jobs","text":""},{"location":"en/admin/baize/developer/jobs/pytorch.html#pytorch-single-jobs","title":"Pytorch Single Jobs","text":"
                                  1. Log in to the AI Lab platform, click Job Center in the left navigation bar to enter the Jobs page.
                                  2. Click the Create button in the upper right corner to enter the job creation page.
                                  3. Select the job type as Pytorch Single and click Next .
                                  4. Fill in the job name and description, then click OK .
                                  "},{"location":"en/admin/baize/developer/jobs/pytorch.html#parameters","title":"Parameters","text":"
                                  • Start command: bash
                                  • Command parameters:
                                  import torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n# Define a simple neural network\nclass SimpleNet(nn.Module):\n    def __init__(self):\n        super(SimpleNet, self).__init__()\n        self.fc = nn.Linear(10, 1)\n\n    def forward(self, x):\n        return self.fc(x)\n\n# Create model, loss function, and optimizer\nmodel = SimpleNet()\ncriterion = nn.MSELoss()\noptimizer = optim.SGD(model.parameters(), lr=0.01)\n\n# Generate some random data\nx = torch.randn(100, 10)\ny = torch.randn(100, 1)\n\n# Train the model\nfor epoch in range(100):\n    # Forward pass\n    outputs = model(x)\n    loss = criterion(outputs, y)\n\n    # Backward pass and optimization\n    optimizer.zero_grad()\n    loss.backward()\n    optimizer.step()\n\n    if (epoch + 1) % 10 == 0:\n        print(f'Epoch [{epoch+1}/100], Loss: {loss.item():.4f}')\n\nprint('Training finished.')\n
                                  "},{"location":"en/admin/baize/developer/jobs/pytorch.html#results","title":"Results","text":"

                                  Once the job is successfully submitted, we can enter the job details to see the resource usage. From the upper right corner, go to Workload Details to view the log output during the training process.

                                  [HAMI-core Warn(1:140244541377408:utils.c:183)]: get default cuda from (null)\n[HAMI-core Msg(1:140244541377408:libvgpu.c:855)]: Initialized\nEpoch [10/100], Loss: 1.1248\nEpoch [20/100], Loss: 1.0486\nEpoch [30/100], Loss: 0.9969\nEpoch [40/100], Loss: 0.9611\nEpoch [50/100], Loss: 0.9360\nEpoch [60/100], Loss: 0.9182\nEpoch [70/100], Loss: 0.9053\nEpoch [80/100], Loss: 0.8960\nEpoch [90/100], Loss: 0.8891\nEpoch [100/100], Loss: 0.8841\nTraining finished.\n[HAMI-core Msg(1:140244541377408:multiprocess_memory_limit.c:468)]: Calling exit handler 1\n
                                  "},{"location":"en/admin/baize/developer/jobs/pytorch.html#pytorch-distributed-jobs","title":"Pytorch Distributed Jobs","text":"
                                  1. Log in to the AI Lab platform, click Job Center in the left navigation bar to enter the Jobs page.
                                  2. Click the Create button in the upper right corner to enter the job creation page.
                                  3. Select the job type as Pytorch Distributed and click Next.
                                  4. Fill in the job name and description, then click OK.
                                  "},{"location":"en/admin/baize/developer/jobs/pytorch.html#parameters_1","title":"Parameters","text":"
                                  • Start command: bash
                                  • Command parameters:
                                  import os\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nclass SimpleModel(nn.Module):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = nn.Linear(10, 1)\n\n    def forward(self, x):\n        return self.fc(x)\n\ndef train():\n    # Print environment information\n    print(f'PyTorch version: {torch.__version__}')\n    print(f'CUDA available: {torch.cuda.is_available()}')\n    if torch.cuda.is_available():\n        print(f'CUDA version: {torch.version.cuda}')\n        print(f'CUDA device count: {torch.cuda.device_count()}')\n\n    rank = int(os.environ.get('RANK', '0'))\n    world_size = int(os.environ.get('WORLD_SIZE', '1'))\n\n    print(f'Rank: {rank}, World Size: {world_size}')\n\n    # Initialize distributed environment\n    try:\n        if world_size > 1:\n            dist.init_process_group('nccl')\n            print('Distributed process group initialized successfully')\n        else:\n            print('Running in non-distributed mode')\n    except Exception as e:\n        print(f'Error initializing process group: {e}')\n        return\n\n    # Set device\n    try:\n        if torch.cuda.is_available():\n            device = torch.device(f'cuda:{rank % torch.cuda.device_count()}')\n            print(f'Using CUDA device: {device}')\n        else:\n            device = torch.device('cpu')\n            print('CUDA not available, using CPU')\n    except Exception as e:\n        print(f'Error setting device: {e}')\n        device = torch.device('cpu')\n        print('Falling back to CPU')\n\n    try:\n        model = SimpleModel().to(device)\n        print('Model moved to device successfully')\n    except Exception as e:\n        print(f'Error moving model to device: {e}')\n        return\n\n    try:\n        if world_size > 1:\n            ddp_model = DDP(model, device_ids=[rank % torch.cuda.device_count()] if torch.cuda.is_available() else None)\n            print('DDP model created successfully')\n        else:\n            ddp_model = model\n            print('Using non-distributed model')\n    except Exception as e:\n        print(f'Error creating DDP model: {e}')\n        return\n\n    loss_fn = nn.MSELoss()\n    optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)\n\n    # Generate some random data\n    try:\n        data = torch.randn(100, 10, device=device)\n        labels = torch.randn(100, 1, device=device)\n        print('Data generated and moved to device successfully')\n    except Exception as e:\n        print(f'Error generating or moving data to device: {e}')\n        return\n\n    for epoch in range(10):\n        try:\n            ddp_model.train()\n            outputs = ddp_model(data)\n            loss = loss_fn(outputs, labels)\n            optimizer.zero_grad()\n            loss.backward()\n            optimizer.step()\n\n            if rank == 0:\n                print(f'Epoch {epoch}, Loss: {loss.item():.4f}')\n        except Exception as e:\n            print(f'Error during training epoch {epoch}: {e}')\n            break\n\n    if world_size > 1:\n        dist.destroy_process_group()\n\nif __name__ == '__main__':\n    train()\n
                                  "},{"location":"en/admin/baize/developer/jobs/pytorch.html#number-of-job-replicas","title":"Number of Job Replicas","text":"

                                  Note that Pytorch Distributed training jobs will create a group of Master and Worker training Pods, where the Master is responsible for coordinating the training job, and the Worker is responsible for the actual training work.

                                  Note

                                  In this demonstration: Master replica count is 1, Worker replica count is 2; Therefore, we need to set the replica count to 3 in the Job Configuration , which is the sum of Master and Worker replica counts. Pytorch will automatically tune the roles of Master and Worker.

                                  "},{"location":"en/admin/baize/developer/jobs/pytorch.html#results_1","title":"Results","text":"

                                  Similarly, we can enter the job details to view the resource usage and the log output of each Pod.

                                  "},{"location":"en/admin/baize/developer/jobs/tensorboard.html","title":"Job Analysis","text":"

                                  AI Lab provides important visualization analysis tools provided for the model development process, used to display the training process and results of machine learning models. This document will introduce the basic concepts of Job Analysis (Tensorboard), its usage in the AI Lab system, and how to configure the log content of datasets.

                                  Note

                                  Tensorboard is a visualization tool provided by TensorFlow, used to display the training process and results of machine learning models. It can help developers more intuitively understand the training dynamics of their models, analyze model performance, debug issues, and more.

                                  The role and advantages of Tensorboard in the model development process:

                                  • Visualize Training Process : Display metrics such as training and validation loss, and accuracy through charts, helping developers intuitively observe the training effects of the model.
                                  • Debug and Optimize Models : By viewing the weights and gradient distributions of different layers, help developers discover and fix issues in the model.
                                  • Compare Different Experiments : Simultaneously display the results of multiple experiments, making it convenient for developers to compare the effects of different models and hyperparameter configurations.
                                  • Track Training Data : Record the datasets and parameters used during training to ensure the reproducibility of experiments.
                                  "},{"location":"en/admin/baize/developer/jobs/tensorboard.html#how-to-create-tensorboard","title":"How to Create Tensorboard","text":"

                                  In the AI Lab system, we provide a convenient way to create and manage Tensorboard. Here are the specific steps:

                                  "},{"location":"en/admin/baize/developer/jobs/tensorboard.html#enable-tensorboard-when-creating-a-notebook","title":"Enable Tensorboard When Creating a Notebook","text":"
                                  1. Create a Notebook : Create a new Notebook on the AI Lab platform.
                                  2. Enable Tensorboard : On the Notebook creation page, enable the Tensorboard option and specify the dataset and log path.

                                  "},{"location":"en/admin/baize/developer/jobs/tensorboard.html#enable-tensorboard-after-creating-and-completing-a-distributed-job","title":"Enable Tensorboard After Creating and Completing a Distributed Job","text":"
                                  1. Create a Distributed Job : Create a new distributed training job on the AI Lab platform.
                                  2. Configure Tensorboard : On the job configuration page, enable the Tensorboard option and specify the dataset and log path.
                                  3. View Tensorboard After Job Completion : After the job is completed, you can view the Tensorboard link on the job details page. Click the link to see the visualized results of the training process.

                                  "},{"location":"en/admin/baize/developer/jobs/tensorboard.html#directly-reference-tensorboard-in-a-notebook","title":"Directly Reference Tensorboard in a Notebook","text":"

                                  In a Notebook, you can directly start Tensorboard through code. Here is a sample code snippet:

                                  # Import necessary libraries\nimport tensorflow as tf\nimport datetime\n\n# Define log directory\nlog_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n# Create Tensorboard callback\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n\n# Build and compile model\nmodel = tf.keras.models.Sequential([\n    tf.keras.layers.Flatten(input_shape=(28, 28)),\n    tf.keras.layers.Dense(512, activation='relu'),\n    tf.keras.layers.Dropout(0.2),\n    tf.keras.layers.Dense(10, activation='softmax')\n])\n\nmodel.compile(optimizer='adam',\n              loss='sparse_categorical_crossentropy',\n              metrics=['accuracy'])\n\n# Train model and enable Tensorboard callback\nmodel.fit(x_train, y_train, epochs=5, validation_data=(x_test, y_test), callbacks=[tensorboard_callback])\n
                                  "},{"location":"en/admin/baize/developer/jobs/tensorboard.html#how-to-configure-dataset-log-content","title":"How to Configure Dataset Log Content","text":"

                                  When using Tensorboard, you can record and configure different datasets and log content. Here are some common configuration methods:

                                  "},{"location":"en/admin/baize/developer/jobs/tensorboard.html#configure-training-and-validation-dataset-logs","title":"Configure Training and Validation Dataset Logs","text":"

                                  While training the model, you can use TensorFlow's tf.summary API to record logs for the training and validation datasets. Here is a sample code snippet:

                                  # Import necessary libraries\nimport tensorflow as tf\n\n# Create log directories\ntrain_log_dir = 'logs/gradient_tape/train'\nval_log_dir = 'logs/gradient_tape/val'\ntrain_summary_writer = tf.summary.create_file_writer(train_log_dir)\nval_summary_writer = tf.summary.create_file_writer(val_log_dir)\n\n# Train model and record logs\nfor epoch in range(EPOCHS):\n    for (x_train, y_train) in train_dataset:\n        # Training step\n        train_step(x_train, y_train)\n        with train_summary_writer.as_default():\n            tf.summary.scalar('loss', train_loss.result(), step=epoch)\n            tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)\n\n    for (x_val, y_val) in val_dataset:\n        # Validation step\n        val_step(x_val, y_val)\n        with val_summary_writer.as_default():\n            tf.summary.scalar('loss', val_loss.result(), step=epoch)\n            tf.summary.scalar('accuracy', val_accuracy.result(), step=epoch)\n
                                  "},{"location":"en/admin/baize/developer/jobs/tensorboard.html#configure-custom-logs","title":"Configure Custom Logs","text":"

                                  In addition to logs for training and validation datasets, you can also record other custom log content such as learning rate and gradient distribution. Here is a sample code snippet:

                                  # Record custom logs\nwith train_summary_writer.as_default():\n    tf.summary.scalar('learning_rate', learning_rate, step=epoch)\n    tf.summary.histogram('gradients', gradients, step=epoch)\n
                                  "},{"location":"en/admin/baize/developer/jobs/tensorboard.html#tensorboard-management","title":"Tensorboard Management","text":"

                                  In AI Lab, Tensorboards created through various methods are uniformly displayed on the job analysis page, making it convenient for users to view and manage.

                                  Users can view information such as the link, status, and creation time of Tensorboard on the job analysis page and directly access the visualized results of Tensorboard through the link.

                                  "},{"location":"en/admin/baize/developer/jobs/tensorflow.html","title":"Tensorflow Jobs","text":"

                                  Tensorflow, along with Pytorch, is a highly active open-source deep learning framework that provides a flexible environment for training and deployment.

                                  AI Lab provides support and adaptation for the Tensorflow framework. You can quickly create Tensorflow jobs and conduct model training through graphical operations.

                                  "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#job-configuration","title":"Job Configuration","text":"
                                  • The job types support both Tensorflow Single and Tensorflow Distributed modes.
                                  • The runtime image already supports the Tensorflow framework by default, so no additional installation is required.
                                  "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#job-runtime-environment","title":"Job Runtime Environment","text":"

                                  Here, we use the baize-notebook base image and the associated environment as the basic runtime environment for jobs.

                                  For information on how to create an environment, refer to Environment List.

                                  "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#creating-a-job","title":"Creating a Job","text":""},{"location":"en/admin/baize/developer/jobs/tensorflow.html#example-tfjob-single","title":"Example TFJob Single","text":"
                                  1. Log in to the AI Lab platform and click Job Center in the left navigation bar to enter the Jobs page.
                                  2. Click the Create button in the upper right corner to enter the job creation page.
                                  3. Select the job type as Tensorflow Single and click Next .
                                  4. Fill in the job name and description, then click OK .
                                  "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#pre-warming-the-code-repository","title":"Pre-warming the Code Repository","text":"

                                  Use AI Lab -> Dataset List to create a dataset and pull the code from a remote GitHub repository into the dataset. This way, when creating a job, you can directly select the dataset and mount the code into the job.

                                  Demo code repository address: https://github.com/d-run/training-sample-code/

                                  "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#parameters","title":"Parameters","text":"
                                  • Launch command: Use bash
                                  • Command parameters: Use python /code/tensorflow/tf-single.py
                                  \"\"\"\n  pip install tensorflow numpy\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\n# Create some random data\nx = np.random.rand(100, 1)\ny = 2 * x + 1 + np.random.rand(100, 1) * 0.1\n\n# Create a simple model\nmodel = tf.keras.Sequential([\n    tf.keras.layers.Dense(1, input_shape=(1,))\n])\n\n# Compile the model\nmodel.compile(optimizer='adam', loss='mse')\n\n# Train the model, setting epochs to 10\nhistory = model.fit(x, y, epochs=10, verbose=1)\n\n# Print the final loss\nprint('Final loss: {' + str(history.history['loss'][-1]) +'}')\n\n# Use the model to make predictions\ntest_x = np.array([[0.5]])\nprediction = model.predict(test_x)\nprint(f'Prediction for x=0.5: {prediction[0][0]}')\n
                                  "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#results","title":"Results","text":"

                                  After the job is successfully submitted, you can enter the job details to see the resource usage. From the upper right corner, navigate to Workload Details to view log outputs during the training process.

                                  "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#tfjob-distributed-job","title":"TFJob Distributed Job","text":"
                                  1. Log in to AI Lab and click Job Center in the left navigation bar to enter the Jobs page.
                                  2. Click the Create button in the upper right corner to enter the job creation page.
                                  3. Select the job type as Tensorflow Distributed and click Next.
                                  4. Fill in the job name and description, then click OK.
                                  "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#example-job-introduction","title":"Example Job Introduction","text":"

                                  This job includes three roles: Chief, Worker, and Parameter Server (PS).

                                  • Chief: Responsible for coordinating the training process and saving model checkpoints.
                                  • Worker: Executes the actual model training.
                                  • PS: Used in asynchronous training to store and update model parameters.

                                  Different resources are allocated to different roles. Chief and Worker use GPUs, while PS uses CPUs and larger memory.

                                  "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#parameters_1","title":"Parameters","text":"
                                  • Launch command: Use bash
                                  • Command parameters: Use python /code/tensorflow/tensorflow-distributed.py
                                  import os\nimport json\nimport tensorflow as tf\n\nclass SimpleModel(tf.keras.Model):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = tf.keras.layers.Dense(1, input_shape=(10,))\n\n    def call(self, x):\n        return self.fc(x)\n\ndef train():\n    # Print environment information\n    print(f\"TensorFlow version: {tf.__version__}\")\n    print(f\"GPU available: {tf.test.is_gpu_available()}\")\n    if tf.test.is_gpu_available():\n        print(f\"GPU device count: {len(tf.config.list_physical_devices('GPU'))}\")\n\n    # Retrieve distributed training information\n    tf_config = json.loads(os.environ.get('TF_CONFIG') or '{}')\n    job_type = tf_config.get('job', {}).get('type')\n    job_id = tf_config.get('job', {}).get('index')\n\n    print(f\"Job type: {job_type}, Job ID: {job_id}\")\n\n    # Set up distributed strategy\n    strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()\n\n    with strategy.scope():\n        model = SimpleModel()\n        loss_fn = tf.keras.losses.MeanSquaredError()\n        optimizer = tf.keras.optimizers.SGD(learning_rate=0.001)\n\n    # Generate some random data\n    data = tf.random.normal((100, 10))\n    labels = tf.random.normal((100, 1))\n\n    @tf.function\n    def train_step(inputs, labels):\n        with tf.GradientTape() as tape:\n            predictions = model(inputs)\n            loss = loss_fn(labels, predictions)\n        gradients = tape.gradient(loss, model.trainable_variables)\n        optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n        return loss\n\n    for epoch in range(10):\n        loss = train_step(data, labels)\n        if job_type == 'chief':\n            print(f'Epoch {epoch}, Loss: {loss.numpy():.4f}')\n\nif __name__ == '__main__':\n    train()\n
                                  "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#results_1","title":"Results","text":"

                                  Similarly, you can enter the job details to view the resource usage and log outputs of each Pod.

                                  "},{"location":"en/admin/baize/developer/jobs/view.html","title":"View Job Workloads","text":"

                                  Once a job is created, it will be displayed in the job list.

                                  1. In the job list, click the \u2507 on the right side of a job and select Job Workload Details .

                                  2. A pop-up window will appear asking you to choose which Pod to view. Click Enter .

                                  3. You will be redirected to the container management interface, where you can view the container\u2019s working status, labels and annotations, and any events that have occurred.

                                  4. You can also view detailed logs of the current Pod for the recent period. By default, 100 lines of logs are displayed. To view more detailed logs or to download logs, click the blue Insight text at the top.

                                  5. Additionally, you can use the ... in the upper right corner to view the current Pod's YAML, and to upload or download files. Below is an example of a Pod's YAML.

                                  kind: Pod\napiVersion: v1\nmetadata:\n  name: neko-tensorboard-job-test-202404181843-skxivllb-worker-0\n  namespace: default\n  uid: ddedb6ff-c278-47eb-ae1e-0de9b7c62f8c\n  resourceVersion: '41092552'\n  creationTimestamp: '2024-04-18T10:43:36Z'\n  labels:\n    training.kubeflow.org/job-name: neko-tensorboard-job-test-202404181843-skxivllb\n    training.kubeflow.org/operator-name: pytorchjob-controller\n    training.kubeflow.org/replica-index: '0'\n    training.kubeflow.org/replica-type: worker\n  annotations:\n    cni.projectcalico.org/containerID: 0cfbb9af257d5e69027c603c6cb2d3890a17c4ae1a145748d5aef73a10d7fbe1\n    cni.projectcalico.org/podIP: ''\n    cni.projectcalico.org/podIPs: ''\n    hami.io/bind-phase: success\n    hami.io/bind-time: '1713437016'\n    hami.io/vgpu-devices-allocated: GPU-29d5fa0d-935b-2966-aff8-483a174d61d1,NVIDIA,1024,20:;\n    hami.io/vgpu-devices-to-allocate: ;\n    hami.io/vgpu-node: worker-a800-1\n    hami.io/vgpu-time: '1713437016'\n    k8s.v1.cni.cncf.io/network-status: |-\n      [{\n          \"name\": \"kube-system/calico\",\n          \"ips\": [\n              \"10.233.97.184\"\n          ],\n          \"default\": true,\n          \"dns\": {}\n      }]\n    k8s.v1.cni.cncf.io/networks-status: |-\n      [{\n          \"name\": \"kube-system/calico\",\n          \"ips\": [\n              \"10.233.97.184\"\n          ],\n          \"default\": true,\n          \"dns\": {}\n      }]\n  ownerReferences:\n    - apiVersion: kubeflow.org/v1\n      kind: PyTorchJob\n      name: neko-tensorboard-job-test-202404181843-skxivllb\n      uid: e5a8b05d-1f03-4717-8e1c-4ec928014b7b\n      controller: true\n      blockOwnerDeletion: true\nspec:\n  volumes:\n    - name: 0-dataset-pytorch-examples\n      persistentVolumeClaim:\n        claimName: pytorch-examples\n    - name: kube-api-access-wh9rh\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n  containers:\n    - name: pytorch\n      image: m.daocloud.io/docker.io/pytorch/pytorch\n      command:\n        - bash\n      args:\n        - '-c'\n        - >-\n          ls -la /root && which pip && pip install pytorch_lightning tensorboard\n          && python /root/Git/pytorch/examples/mnist/main.py\n      ports:\n        - name: pytorchjob-port\n          containerPort: 23456\n          protocol: TCP\n      env:\n        - name: PYTHONUNBUFFERED\n          value: '1'\n        - name: PET_NNODES\n          value: '1'\n      resources:\n        limits:\n          cpu: '4'\n          memory: 8Gi\n          nvidia.com/gpucores: '20'\n          nvidia.com/gpumem: '1024'\n          nvidia.com/vgpu: '1'\n        requests:\n          cpu: '4'\n          memory: 8Gi\n          nvidia.com/gpucores: '20'\n          nvidia.com/gpumem: '1024'\n          nvidia.com/vgpu: '1'\n      volumeMounts:\n        - name: 0-dataset-pytorch-examples\n          mountPath: /root/Git/pytorch/examples\n        - name: kube-api-access-wh9rh\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  restartPolicy: Never\n  terminationGracePeriodSeconds: 30\n  dnsPolicy: ClusterFirst\n  serviceAccountName: default\n  serviceAccount: default\n  nodeName: worker-a800-1\n  securityContext: {}\n  affinity: {}\n  schedulerName: hami-scheduler\n  tolerations:\n    - key: node.kubernetes.io/not-ready\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n    - key: node.kubernetes.io/unreachable\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n  priorityClassName: baize-high-priority\n  priority: 100000\n  enableServiceLinks: true\n  preemptionPolicy: PreemptLowerPriority\nstatus:\n  phase: Succeeded\n  conditions:\n    - type: Initialized\n      status: 'True'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:43:36Z'\n      reason: PodCompleted\n    - type: Ready\n      status: 'False'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:46:34Z'\n      reason: PodCompleted\n    - type: ContainersReady\n      status: 'False'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:46:34Z'\n      reason: PodCompleted\n    - type: PodScheduled\n      status: 'True'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:43:36Z'\n  hostIP: 10.20.100.211\n  podIP: 10.233.97.184\n  podIPs:\n    - ip: 10.233.97.184\n  startTime: '2024-04-18T10:43:36Z'\n  containerStatuses:\n    - name: pytorch\n      state:\n        terminated:\n          exitCode: 0\n          reason: Completed\n          startedAt: '2024-04-18T10:43:39Z'\n          finishedAt: '2024-04-18T10:46:34Z'\n          containerID: >-\n            containerd://09010214bcf3315e81d38fba50de3943c9d2b48f50a6cc2e83f8ef0e5c6eeec1\n      lastState: {}\n      ready: false\n      restartCount: 0\n      image: m.daocloud.io/docker.io/pytorch/pytorch:latest\n      imageID: >-\n        m.daocloud.io/docker.io/pytorch/pytorch@sha256:11691e035a3651d25a87116b4f6adc113a27a29d8f5a6a583f8569e0ee5ff897\n      containerID: >-\n        containerd://09010214bcf3315e81d38fba50de3943c9d2b48f50a6cc2e83f8ef0e5c6eeec1\n      started: false\n  qosClass: Guaranteed\n
                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html","title":"baizectl CLI Usage Guide","text":"

                                  baizectl is a command line tool specifically designed for model developers and data scientists within the AI Lab module. It provides a series of commands to help users manage distributed training jobs, check job statuses, manage datasets, and more. It also supports connecting to Kubernetes worker clusters and AI platform workspaces, aiding users in efficiently using and managing Kubernetes platform resources.

                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#installation","title":"Installation","text":"

                                  Currently, baizectl is integrated within AI Lab. Once you create a Notebook, you can directly use baizectl within it.

                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#getting-started","title":"Getting Started","text":""},{"location":"en/admin/baize/developer/notebooks/baizectl.html#basic-information","title":"Basic Information","text":"

                                  The basic format of the baizectl command is as follows:

                                  jovyan@19d0197587cc:/$ baizectl\nAI platform management tool\n\nUsage:\n  baizectl [command]\n\nAvailable Commands:\n  completion  Generate the autocompletion script for the specified shell\n  data        Management datasets\n  help        Help about any command\n  job         Manage jobs\n  login       Login to the platform\n  version     Show cli version\n\nFlags:\n      --cluster string     Cluster name to operate\n  -h, --help               help for baizectl\n      --mode string        Connection mode: auto, api, notebook (default \"auto\")\n  -n, --namespace string   Namespace to use for the operation. If not set, the default Namespace will be used.\n  -s, --server string      access base url\n      --skip-tls-verify    Skip TLS certificate verification\n      --token string       access token\n  -w, --workspace int32    Workspace ID to use for the operation\n\nUse \"baizectl [command] --help\" for more information about a command.\n

                                  The above provides basic information about baizectl. Users can view the help information using baizectl --help, or view the help information for specific commands using baizectl [command] --help.

                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#view-versions","title":"View Versions","text":"

                                  baizectl supports viewing version information using the version command.

                                  (base) jovyan@den-0:~$ baizectl version \nbaizectl version: v0.5.0, commit sha: ac0837c4\n
                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#command-format","title":"Command Format","text":"

                                  The basic format of the baizectl command is as follows:

                                  baizectl [command] [flags]\n

                                  Here, [command] refers to the specific operation command, such as data and job, and [flags] are optional parameters used to specify detailed information about the operation.

                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#common-options","title":"Common Options","text":"
                                  • --cluster string: Specify the name of the cluster to operate on.
                                  • -h, --help: Display help information.
                                  • --mode string: Connection mode, optional values are auto, api, notebook (default value is auto).
                                  • -n, --namespace string: Specify the namespace for the operation. If not set, the default namespace will be used.
                                  • -s, --server string: Base URL
                                  • --skip-tls-verify: Skip TLS certificate verification.
                                  • --token string: Access token
                                  • -w, --workspace int32: Specify the workspace ID for the operation.
                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#features","title":"Features","text":""},{"location":"en/admin/baize/developer/notebooks/baizectl.html#job-management","title":"Job Management","text":"

                                  baizectl provides a series of commands to manage distributed training jobs, including viewing job lists, submitting jobs, viewing logs, restarting jobs, deleting jobs, and more.

                                  jovyan@19d0197587cc:/$ baizectl job\nManage jobs\n\nUsage:\n  baizectl job [command]\n\nAvailable Commands:\n  delete      Delete a job\n  logs        Show logs of a job\n  ls          List jobs\n  restart     restart a job\n  submit      Submit a job\n\nFlags:\n  -h, --help            help for job\n  -o, --output string   Output format. One of: table, json, yaml (default \"table\")\n      --page int        Page number (default 1)\n      --page-size int   Page size (default -1)\n      --search string   Search query\n      --sort string     Sort order\n      --truncate int    Truncate output to the given length, 0 means no truncation (default 50)\n\nUse \"baizectl job [command] --help\" for more information about a command.\n
                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#submit-training-jobs","title":"Submit Training Jobs","text":"

                                  baizectl supports submitting a job using the submit command. You can view detailed information by using baizectl job submit --help.

                                  (base) jovyan@den-0:~$ baizectl job submit --help\nSubmit a job\n\nUsage:\n  baizectl job submit [flags] -- command ...\n\nAliases:\n  submit, create\n\nExamples:\n# Submit a job to run the command \"torchrun python train.py\"\nbaizectl job submit -- torchrun python train.py\n# Submit a job with 2 workers(each pod use 4 gpus) to run the command \"torchrun python train.py\" and use the image \"pytorch/pytorch:1.8.1-cuda11.1-cudnn8-runtime\"\nbaizectl job submit --image pytorch/pytorch:1.8.1-cuda11.1-cudnn8-runtime --workers 2 --resources nvidia.com/gpu=4 -- torchrun python train.py\n# Submit a tensorflow job to run the command \"python train.py\"\nbaizectl job submit --tensorflow -- python train.py\n\n\nFlags:\n      --annotations stringArray                       The annotations of the job, the format is key=value\n      --auto-load-env                                 It only takes effect when executed in Notebook, the environment variables of the current environment will be automatically read and set to the environment variables of the Job, the specific environment variables to be read can be specified using the BAIZE_MAPPING_ENVS environment variable, the default is PATH,CONDA_*,*PYTHON*,NCCL_*, if set to false, the environment variables of the current environment will not be read. (default true)\n      --commands stringArray                          The default command of the job\n  -d, --datasets stringArray                          The dataset bind to the job, the format is datasetName:mountPath, e.g. mnist:/data/mnist\n  -e, --envs stringArray                              The environment variables of the job, the format is key=value\n  -x, --from-notebook string                          Define whether to read the configuration of the current Notebook and directly create tasks, including images, resources, and dataset.\n                                                      auto: Automatically determine the mode according to the current environment. If the current environment is a Notebook, it will be set to notebook mode.\n                                                      false: Do not read the configuration of the current Notebook.\n                                                      true: Read the configuration of the current Notebook. (default \"auto\")\n  -h, --help                                          help for submit\n      --image string                                  The image of the job, it must be specified if fromNotebook is false.\n  -t, --job-type string                               Job type: PYTORCH, TENSORFLOW, PADDLE (default \"PYTORCH\")\n      --labels stringArray                            The labels of the job, the format is key=value\n      --max-retries int32                             number of retries before marking this job failed\n      --max-run-duration int                          Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it\n      --name string                                   The name of the job, if empty, the name will be generated automatically.\n      --paddle                                        PaddlePaddle Job, has higher priority than --job-type\n      --priority string                               The priority of the job, current support baize-medium-priority, baize-low-priority, baize-high-priority\n      --pvcs stringArray                              The pvcs bind to the job, the format is pvcName:mountPath, e.g. mnist:/data/mnist\n      --pytorch                                       Pytorch Job, has higher priority than --job-type\n      --queue string                                  The queue to used\n      --requests-resources stringArray                Similar to resources, but sets the resources of requests\n      --resources stringArray                         The resources of the job, it is a string in the format of cpu=1,memory=1Gi,nvidia.com/gpu=1, it will be set to the limits and requests of the container.\n      --restart-policy string                         The job restart policy (default \"on-failure\")\n      --runtime-envs baizectl data ls --runtime-env   The runtime environment to use for the job, you can use baizectl data ls --runtime-env to get the runtime environment\n      --shm-size int32                                The shared memory size of the job, default is 0, which means no shared memory, if set to more than 0, the job will use the shared memory, the unit is MiB\n      --tensorboard-log-dir string                    The tensorboard log directory, if set, the job will automatically start tensorboard, else not. The format is /path/to/log, you can use relative path in notebook.\n      --tensorflow                                    Tensorflow Job, has higher priority than --job-type\n      --workers int                                   The workers of the job, default is 1, which means single worker, if set to more than 1, the job will be distributed. (default 1)\n      --working-dir string                            The working directory of job container, if in notebook mode, the default is the directory of the current file\n

                                  Note

                                  Explanation of command parameters for submitting jobs:

                                  • --name: Job name. If empty, it will be auto-generated.
                                  • --image: Image name. This must be specified.
                                  • --priority: Job priority, supporting high=baize-high-priority, medium=baize-medium-priority, low=baize-low-priority.
                                  • --resources: Job resources, formatted as cpu=1 memory=1Gi,nvidia.com/gpu=1.
                                  • --workers: Number of job worker nodes. The default is 1. When set to greater than 1, the job will run in a distributed manner.
                                  • --queue: Job queue. Queue resources need to be created in advance.
                                  • --working-dir: Working directory. In Notebook mode, the current file directory will be used by default.
                                  • --datasets: Dataset, formatted as datasetName:mountPath, for example mnist:/data/mnist.
                                  • --shm-size: Shared memory size. This can be enabled for distributed training jobs, indicating the use of shared memory, with units in MiB.
                                  • --labels: Job labels, formatted as key=value.
                                  • --max-retries: Maximum retry count. The number of times to retry the job upon failure. The job will restart upon failure. Default is unlimited.
                                  • --max-run-duration: Maximum run duration. The job will be terminated by the system if it exceeds the specified run time. Default is unlimited.
                                  • --restart-policy: Restart policy, supporting on-failure, never, always. The default is on-failure.
                                  • --from-notebook: Whether to read configurations from the Notebook. Supports auto, true, false, with the default being auto.
                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#example-of-a-pytorch-single-node-job","title":"Example of a PyTorch Single-Node Job","text":"

                                  Example of submitting a training job. Users can modify parameters based on their actual needs. Below is an example of creating a PyTorch job:

                                  baizectl job submit --name demojob-v2 -t PYTORCH \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --resources cpu=1,memory=1Gi \\\n    --workers 1 \\\n    --queue default \\\n    --working-dir /data \\\n    --datasets fashion-mnist:/data/mnist \\\n    --labels job_type=pytorch \\\n    --max-retries 3 \\\n    --max-run-duration 60 \\\n    --restart-policy on-failure \\\n    -- sleep 1000\n
                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#pytorch","title":"PyTorch \u5206\u5e03\u5f0f\u4efb\u52a1\u793a\u4f8b","text":"

                                  \u63d0\u4ea4\u8bad\u7ec3\u4efb\u52a1\u793a\u4f8b\uff0c\u7528\u6237\u53ef\u4ee5\u6839\u636e\u5b9e\u9645\u9700\u6c42\u4fee\u6539\u53c2\u6570\uff0c\u4ee5\u4e0b\u4e3a\u521b\u5efa\u4e00\u4e2a PyTorch \u4efb\u52a1\u7684\u793a\u4f8b\uff1a

                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#example-of-a-distributed-pytorch-job","title":"Example of a Distributed PyTorch Job","text":"

                                  Example of submitting a training job. You can modify parameters based on their actual needs. Below is an example of creating a distributed PyTorch job:

                                  baizectl job submit --name demojob-v2 -t PYTORCH \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --resources cpu=1,memory=1Gi \\\n    --workers 2 \\   # Multiple job replicas will automatically create a distributed job.\n    --shm-size 1024 \\\n    --queue default \\\n    --working-dir /data \\\n    --datasets fashion-mnist:/data/mnist \\\n    --labels job_type=pytorch \\\n    --max-retries 3 \\\n    --max-run-duration 60 \\\n    --restart-policy on-failure \\\n    -- sleep 1000\n
                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#example-of-a-tensorflow-job","title":"Example of a TensorFlow Job","text":"

                                  Use the -t parameter to specify the job type. Below is an example of creating a TensorFlow job:

                                  baizectl job submit --name demojob-v2 -t TENSORFLOW \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --from-notebook auto \\\n    --workers 1 \\\n    --queue default \\\n    --working-dir /data \\\n    --datasets fashion-mnist:/data/mnist \\\n    --labels job_type=pytorch \\\n    --max-retries 3 \\\n    --max-run-duration 60 \\\n    --restart-policy on-failure \\\n    -- sleep 1000\n

                                  You can also use the --job-type or --tensorflow parameter to specify the job type.

                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#example-of-a-paddle-job","title":"Example of a Paddle Job","text":"
                                  baizectl job submit --name demojob-v2 -t PADDLE \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --queue default \\\n    --working-dir /data \\\n    --datasets fashion-mnist:/data/mnist \\\n    --labels job_type=pytorch \\\n    --max-retries 3 \\\n    --max-run-duration 60 \\\n    --restart-policy on-failure \\\n    -- sleep 1000\n
                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#view-job-list","title":"View Job List","text":"

                                  baizectl job supports viewing the job list using the ls command. By default, it displays pytorch jobs, but users can specify the job type using the -t parameter.

                                  (base) jovyan@den-0:~$ baizectl job ls  # View pytorch jobs by default\n NAME        TYPE     PHASE      DURATION  COMMAND    \n demong      PYTORCH  SUCCEEDED  1m2s      sleep 60   \n demo-sleep  PYTORCH  RUNNING    1h25m28s  sleep 7200 \n(base) jovyan@den-0:~$ baizectl job ls demo-sleep  # View a specific job\n NAME        TYPE     PHASE      DURATION  COMMAND     \n demo-sleep  PYTORCH  RUNNING    1h25m28s  sleep 7200 \n(base) jovyan@den-0:~$ baizectl job ls -t TENSORFLOW   # View tensorflow jobs\n NAME       TYPE        PHASE    DURATION  COMMAND    \n demotfjob  TENSORFLOW  CREATED  0s        sleep 1000 \n

                                  The job list uses table as the default display format. If you want to view more information, you can use the json or yaml format, which can be specified using the -o parameter.

                                  (base) jovyan@den-0:~$ baizectl job ls -t TENSORFLOW -o yaml\n- baseConfig:\n    args:\n    - sleep\n    - \"1000\"\n    image: release.daocloud.io/baize/baize-notebook:v0.5.0\n    labels:\n      app: den\n    podConfig:\n      affinity: {}\n      kubeEnvs:\n      - name: CONDA_EXE\n        value: /opt/conda/bin/conda\n      - name: CONDA_PREFIX\n        value: /opt/conda\n      - name: CONDA_PROMPT_MODIFIER\n        value: '(base) '\n      - name: CONDA_SHLVL\n        value: \"1\"\n      - name: CONDA_DIR\n        value: /opt/conda\n      - name: CONDA_PYTHON_EXE\n        value: /opt/conda/bin/python\n      - name: CONDA_PYTHON_EXE\n        value: /opt/conda/bin/python\n      - name: CONDA_DEFAULT_ENV\n        value: base\n      - name: PATH\n        value: /opt/conda/bin:/opt/conda/condabin:/command:/opt/conda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\n      priorityClass: baize-high-priority\n      queue: default\n  creationTimestamp: \"2024-06-16T07:47:27Z\"\n  jobSpec:\n    runPolicy:\n      suspend: true\n    tfReplicaSpecs:\n      Worker:\n        replicas: 1\n        restartPolicy: OnFailure\n        template:\n          metadata:\n            creationTimestamp: null\n          spec:\n            affinity: {}\n            containers:\n            - args:\n              - sleep\n              - \"1000\"\n              env:\n              - name: CONDA_EXE\n                value: /opt/conda/bin/conda\n              - name: CONDA_PREFIX\n                value: /opt/conda\n              - name: CONDA_PROMPT_MODIFIER\n                value: '(base) '\n              - name: CONDA_SHLVL\n                value: \"1\"\n              - name: CONDA_DIR\n                value: /opt/conda\n              - name: CONDA_PYTHON_EXE\n                value: /opt/conda/bin/python\n              - name: CONDA_PYTHON_EXE\n                value: /opt/conda/bin/python\n              - name: CONDA_DEFAULT_ENV\n                value: base\n              - name: PATH\n                value: /opt/conda/bin:/opt/conda/condabin:/command:/opt/conda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\n              image: release.daocloud.io/baize/baize-notebook:v0.5.0\n              name: tensorflow\n              resources:\n                limits:\n                  memory: 1Gi\n                requests:\n                  cpu: \"1\"\n                  memory: 2Gi\n              workingDir: /home/jovyan\n            priorityClassName: baize-high-priority\n  name: demotfjob\n  namespace: ns-chuanjia-ndx\n  phase: CREATED\n  roleConfig:\n    TF_WORKER:\n      replicas: 1\n      resources:\n        limits:\n          memory: 1Gi\n        requests:\n          cpu: \"1\"\n          memory: 2Gi\n  totalResources:\n    limits:\n      memory: \"1073741824\"\n    requests:\n      cpu: \"1\"\n      memory: \"2147483648\"\n  trainingConfig:\n    restartPolicy: RESTART_POLICY_ON_FAILURE\n  trainingMode: SINGLE\n  type: TENSORFLOW\n
                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#view-job-logs","title":"View Job Logs","text":"

                                  baizectl job supports viewing job logs using the logs command. You can view detailed information by using baizectl job logs --help.

                                  (base) jovyan@den-0:~$ baizectl job logs --help\nShow logs of a job\n\nUsage:\n  baizectl job logs <job-name> [pod-name] [flags]\n\nAliases:\n  logs, log\n\nFlags:\n  -f, --follow            Specify if the logs should be streamed.\n  -h, --help              help for logs\n  -t, --job-type string   Job type: PYTORCH, TENSORFLOW, PADDLE (default \"PYTORCH\")\n      --paddle            PaddlePaddle Job, has higher priority than --job-type\n      --pytorch           Pytorch Job, has higher priority than --job-type\n      --tail int          Lines of recent log file to display.\n      --tensorflow        Tensorflow Job, has higher priority than --job-type\n      --timestamps        Show timestamps\n

                                  Note

                                  • The --follow parameter allows for real-time log viewing.
                                  • The --tail parameter specifies the number of log lines to view, with a default of 50 lines.
                                  • The --timestamps parameter displays timestamps.

                                  Example of viewing job logs:

                                  (base) jovyan@den-0:~$ baizectl job log -t TENSORFLOW tf-sample-job-v2-202406161632-evgrbrhn -f\n2024-06-16 08:33:06.083766: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n2024-06-16 08:33:06.086189: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used.\n2024-06-16 08:33:06.132416: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used.\n2024-06-16 08:33:06.132903: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\nTo enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n2024-06-16 08:33:07.223046: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\nModel: \"sequential\"\n_________________________________________________________________\n Layer (type)                Output Shape              Param #   \n=================================================================\n Conv1 (Conv2D)              (None, 13, 13, 8)         80        \n\n flatten (Flatten)           (None, 1352)              0         \n\n Softmax (Dense)             (None, 10)                13530     \n\n=================================================================\nTotal params: 13610 (53.16 KB)\nTrainable params: 13610 (53.16 KB)\nNon-trainable params: 0 (0.00 Byte)\n...\n
                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#delete-jobs","title":"Delete Jobs","text":"

                                  baizectl job supports deleting jobs using the delete command and also supports deleting multiple jobs simultaneously.

                                  (base) jovyan@den-0:~$ baizectl job delete --help\nDelete a job\n\nUsage:\n  baizectl job delete [flags]\n\nAliases:\n  delete, del, remove, rm\n\nFlags:\n  -h, --help              help for delete\n  -t, --job-type string   Job type: PYTORCH, TENSORFLOW, PADDLE (default \"PYTORCH\")\n      --paddle            PaddlePaddle Job, has higher priority than --job-type\n      --pytorch           Pytorch Job, has higher priority than --job-type\n      --tensorflow        Tensorflow Job, has higher priority than --job-type\n

                                  Here is an example to delete jobs:

                                  (base) jovyan@den-0:~$ baizectl job ls\n NAME        TYPE     PHASE      DURATION  COMMAND    \n demong      PYTORCH  SUCCEEDED  1m2s      sleep 60   \n demo-sleep  PYTORCH  RUNNING    1h20m51s  sleep 7200 \n demojob     PYTORCH  FAILED     16m46s    sleep 1000 \n demojob-v2  PYTORCH  RUNNING    3m13s     sleep 1000 \n demojob-v3  PYTORCH  CREATED    0s        sleep 1000 \n(base) jovyan@den-0:~$ baizectl job delete demojob      # delete a job\nDelete job demojob in ns-chuanjia-ndx successfully\n(base) jovyan@den-0:~$ baizectl job delete demojob-v2 demojob-v3     # delete several jobs\nDelete job demojob-v2 in ns-chuanjia-ndx successfully\nDelete job demojob-v3 in ns-chuanjia-ndx successfully\n
                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#restart-jobs","title":"Restart Jobs","text":"

                                  baizectl job supports restarting jobs using the restart command. You can view detailed information by using baizectl job restart --help.

                                  (base) jovyan@den-0:~$ baizectl job restart --help\nrestart a job\n\nUsage:\n  baizectl job restart [flags] job\n\nAliases:\n  restart, rerun\n\nFlags:\n  -h, --help              help for restart\n  -t, --job-type string   Job type: PYTORCH, TENSORFLOW, PADDLE (default \"PYTORCH\")\n      --paddle            PaddlePaddle Job, has higher priority than --job-type\n      --pytorch           Pytorch Job, has higher priority than --job-type\n      --tensorflow        Tensorflow Job, has higher priority than --job-type\n
                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#dataset-management","title":"Dataset Management","text":"

                                  baizectl supports managing datasets. Currently, it supports viewing the dataset list, making it convenient to quickly bind datasets during job training.

                                  (base) jovyan@den-0:~$ baizectl data \nManagement datasets\n\nUsage:\n  baizectl data [flags]\n  baizectl data [command]\n\nAliases:\n  data, dataset, datasets, envs, runtime-envs\n\nAvailable Commands:\n  ls          List datasets\n\nFlags:\n  -h, --help            help for data\n  -o, --output string   Output format. One of: table, json, yaml (default \"table\")\n      --page int        Page number (default 1)\n      --page-size int   Page size (default -1)\n      --search string   Search query\n      --sort string     Sort order\n      --truncate int    Truncate output to the given length, 0 means no truncation (default 50)\n\nUse \"baizectl data [command] --help\" for more information about a command.\n
                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#view-datasets","title":"View Datasets","text":"

                                  baizectl data supports viewing the datasets using the ls command. By default, it displays in table format, but users can specify the output format using the -o parameter.

                                  (base) jovyan@den-0:~$ baizectl data ls\n NAME             TYPE  URI                                                    PHASE \n fashion-mnist    GIT   https://gitee.com/samzong_lu/fashion-mnist.git         READY \n sample-code      GIT   https://gitee.com/samzong_lu/training-sample-code....  READY \n training-output  PVC   pvc://training-output                                  READY \n

                                  When submitting a training job, you can specify the dataset using the -d or --datasets parameter, for example:

                                  baizectl job submit --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --datasets sample-code:/home/jovyan/code \\\n    -- sleep 1000\n

                                  To mount multiple datasets simultaneously, you can use the following format:

                                  baizectl job submit --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --datasets sample-code:/home/jovyan/code fashion-mnist:/home/jovyan/data \\\n    -- sleep 1000\n
                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#view-dependencies-environment","title":"View Dependencies (Environment)","text":"

                                  The environment runtime-env is a unique environment management capability of Suanova. By decoupling the dependencies required for model development, training tasks, and inference, it offers a more flexible way to manage dependencies without the need to repeatedly build complex Docker images. You simply need to select the appropriate environment.

                                  Additionally, runtime-env supports hot updates and dynamic upgrades, allowing you to update environment dependencies without rebuilding the image.

                                  baizectl data supports viewing the environment list using the runtime-env command. By default, it displays in table format, but users can specify the output format using the -o parameter.

                                  (base) jovyan@den-0:~$ baizectl data ls --runtime-env \n NAME               TYPE   URI                                                    PHASE      \n fashion-mnist      GIT    https://gitee.com/samzong_lu/fashion-mnist.git         READY      \n sample-code        GIT    https://gitee.com/samzong_lu/training-sample-code....  READY      \n training-output    PVC    pvc://training-output                                  READY      \n tensorflow-sample  CONDA  conda://python?version=3.12.3                          PROCESSING \n

                                  When submitting a training job, you can specify the environment using the --runtime-env parameter:

                                  baizectl job submit --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --runtime-env tensorflow-sample \\\n    -- sleep 1000\n
                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#advanced-usage","title":"Advanced Usage","text":"

                                  baizectl supports more advanced usage, such as generating auto-completion scripts, using specific clusters and namespaces, and using specific workspaces.

                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#generating-auto-completion-scripts","title":"Generating Auto-Completion Scripts","text":"
                                  baizectl completion bash > /etc/bash_completion.d/baizectl\n

                                  The above command generates an auto-completion script for bash and saves it to the /etc/bash_completion.d/baizectl directory. You can load the auto-completion script by using source /etc/bash_completion.d/baizectl.

                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#using-specific-clusters-and-namespaces","title":"Using Specific Clusters and Namespaces","text":"
                                  baizectl job ls --cluster my-cluster --namespace my-namespace\n

                                  This command will list all jobs in the my-namespace namespace within the my-cluster cluster.

                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#using-specific-workspaces","title":"Using Specific Workspaces","text":"
                                  baizectl job ls --workspace 123\n
                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#frequently-asked-questions","title":"Frequently Asked Questions","text":"
                                  • Question: Why can't I connect to the server?

                                    Solution: Check if the --server parameter is set correctly and ensure that the network connection is stable. If the server uses a self-signed certificate, you can use --skip-tls-verify to skip TLS certificate verification.

                                  • Question: How can I resolve insufficient permissions issues?

                                    Solution: Ensure that you are using the correct --token parameter to log in and check if the current user has the necessary permissions for the operation.

                                  • Question: Why can't I list the datasets?

                                    Solution: Check if the namespace and workspace are set correctly and ensure that the current user has permission to access these resources.

                                  "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#conclusion","title":"Conclusion","text":"

                                  With this guide, you can quickly get started with baizectl commands and efficiently manage AI platform resources in practical applications. If you have any questions or issues, it is recommended to use baizectl [command] --help to check more detailed information.

                                  "},{"location":"en/admin/baize/developer/notebooks/baizess.html","title":"baizess Source Switch Tool Usage Guide","text":"

                                  baizess is a built-in, out-of-the-box source switch tool within the Notebook of AI Lab module. It provides a streamlined command-line interface to facilitate the management of package sources for various programming environments. With baizess, users can easily switch sources for commonly used package managers, ensuring seamless access to the latest libraries and dependencies. This tool enhances the efficiency of developers and data scientists by simplifying the process of managing package sources.

                                  "},{"location":"en/admin/baize/developer/notebooks/baizess.html#installation","title":"Installation","text":"

                                  Currently, baizess is integrated within AI Lab. Once you create a Notebook, you can directly use baizess within it.

                                  "},{"location":"en/admin/baize/developer/notebooks/baizess.html#getting-started","title":"Getting Started","text":""},{"location":"en/admin/baize/developer/notebooks/baizess.html#basic-information","title":"Basic Information","text":"

                                  The basic information of the baizess command is as follows:

                                  jovyan@19d0197587cc:/$ baizess\nsource switch tool\n\nUsage:\n  baizess [command] [package-manager]\n\nAvailable Commands:\n  set     Switch the source of specified package manager to current fastest source\n  reset   Reset the source of specified package manager to default source\n\nAvailable Package-managers:\n  apt     (require root privilege)\n  conda\n  pip\n
                                  "},{"location":"en/admin/baize/developer/notebooks/baizess.html#command-format","title":"Command Format","text":"

                                  The basic format of the baizess command is as follows:

                                  baizess [command] [package-manager]\n

                                  Here,[command] refers to the specific operation command, and [package-manager] is used to specify the proper package manager for the operation.

                                  "},{"location":"en/admin/baize/developer/notebooks/baizess.html#command","title":"Command","text":"
                                  • set\uff1aBackup the source, perform speed test, and switch the specified package manager's source to the fastest domestic source based on speed test result.
                                  • reset\uff1aReset the specified package manager to default source.
                                  "},{"location":"en/admin/baize/developer/notebooks/baizess.html#currently-supported-package-manager","title":"Currently supported package-manager","text":"
                                  • apt (Source switch and reset require root privilege)
                                  • conda (original source will be backed up in /etc/apt/backup/)
                                  • pip (updated source will be written to ~/.condarc)
                                  "},{"location":"en/admin/baize/developer/notebooks/create.html","title":"Create Notebook","text":"

                                  Notebook provides an online web interactive programming environment, making it convenient for developers to quickly conduct data science and machine learning experiments.

                                  Upon entering the developer console, developers can create and manage Notebooks in different clusters and namespaces.

                                  1. Click Notebooks in the left navigation bar to enter the Notebook list. Click the Create button on the right.

                                  2. The system will pre-fill basic configuration data, including the cluster, namespace, queue, priority, resources, and job arguments. Adjust these arguments and click OK.

                                  3. The newly created Notebook will initially be in the Pending state, and will change to Running after a moment, with the latest one appearing at the top of the list by default.

                                  4. Click the \u2507 on the right side to perform more actions: update arguments, start/stop, clone Notebook, view workload details, and delete.

                                  Note

                                  If you choose pure CPU resources and find that all GPUs on the node are mounted, you can try adding the following container environment variable to resolve this issue:

                                  NVIDIA_VISIBLE_DEVICES=\"\"\n
                                  "},{"location":"en/admin/baize/developer/notebooks/delete.html","title":"Delete Notebook","text":"

                                  If you find a Notebook to be redundant, expired, or no longer needed for any other reason, you can delete it from the Notebook list.

                                  1. Click the \u2507 on the right side of the Notebook in the Notebook list, then choose Delete from the dropdown menu.

                                  2. In the pop-up window, confirm the Notebook you want to delete, enter the Notebook name, and then click Delete.

                                  3. A confirmation message will appear indicating successful deletion, and the Notebook will disappear from the list.

                                  Caution

                                  Once a Notebook is deleted, it cannot be recovered, so please proceed with caution.

                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-auto-close.html","title":"Automatic Shutdown of Idle Notebooks","text":"

                                  To optimize resource usage, the smart computing system automatically shuts down idle notebooks after a period of inactivity. This helps free up resources when a notebook is not in use.

                                  • Advantages: This feature significantly reduces resource waste from long periods of inactivity, enhancing overall efficiency.
                                  • Disadvantages: Without proper backup strategies in place, this may lead to potential data loss.

                                  Note

                                  This feature is enabled by default at the cluster level, with a default timeout of 30 minutes.

                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-auto-close.html#change-configurations","title":"Change Configurations","text":"

                                  Currently, configuration changes must be made manually, but more convenient options will be available in the future.

                                  To modify the deployment parameters of baize-agent in your worker cluster, update the Helm App.

                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-auto-close.html#modify-on-ui","title":"Modify on UI","text":"
                                  1. In the clusters page, locate your worker cluster, go to its details, select Helm Apps, and find baize-agent under the baize-system namespace, Click Update on the upper right corner.

                                  2. Adjust YAML as shown below:

                                    ...\nnotebook-controller:\n  culling_enabled: false\n  cull_idle_time: 120\n  idleness_check_period: 1\n...\n
                                  3. After confirming the changes, click Next and OK .

                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-auto-close.html#modify-on-cli","title":"Modify on CLI","text":"

                                  In the console, use the helm upgrade command to change the configuration:

                                  # Set version number\nexport VERSION=0.8.0\n\n# Update Helm Chart \nhelm upgrade --install baize-agent baize/baize-agent \\\n    --namespace baize-system \\\n    --create-namespace \\\n    --set global.imageRegistry=release.daocloud.io \\\n    --set notebook-controller.culling_enabled=true \\    # Enable automatic shutdown (default: true)\n    --set notebook-controller.cull_idle_time=120 \\      # Set idle timeout to 120 minutes (default: 30 minutes)\n    --set notebook-controller.idleness_check_period=1 \\ # Set check interval to 1 minute (default: 1 minute)\n    --version=$VERSION\n

                                  Note

                                  To prevent data loss after an automatic shutdown, upgrade to v0.8.0 or higher and enable the auto-save feature in your notebook configuration.

                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-with-envs.html","title":"Use Environments in Notebooks","text":"

                                  Environment management is one of the key features of AI Lab. By associating an environment in a Notebook , you can quickly switch between different environments, making it easier for them to develop and debug.

                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-with-envs.html#select-an-environment-when-creating-a-notebook","title":"Select an Environment When Creating a Notebook","text":"

                                  When creating a Notebook, you can select one or more environments. If there isn\u2019t a suitable environment, you can create a new one in Environments .

                                  For instructions on how to create an environment, refer to Environments.

                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-with-envs.html#use-environments-in-notebooks_1","title":"Use Environments in Notebooks","text":"

                                  Note

                                  In the Notebook, both conda and mamba are provided as environment management tools. You can choose the appropriate tool based on their needs.

                                  In AI Lab, you can use the conda environment management tool. You can view the list of current environments in the Notebook by using the command !conda env list.

                                  (base) jovyan@chuanjia-jupyter-0:~/yolov8$ conda env list\n# conda environments:\n#\ndkj-python312-pure       /opt/baize-runtime-env/dkj-python312-pure/conda/envs/dkj-python312-pure\npython-3.10              /opt/baize-runtime-env/python-3.10/conda/envs/python-3.10\ntorch-smaple             /opt/baize-runtime-env/torch-smaple/conda/envs/torch-smaple\nbase                  *  /opt/conda     # Currently activated environment\nbaize-base               /opt/conda/envs/baize-base\n

                                  This command lists all conda environments and adds an asterisk (*) before the currently activated environment.

                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-with-envs.html#manage-kernel-environment-in-jupyterlab","title":"Manage Kernel Environment in JupyterLab","text":"

                                  In JupyterLab, the environments associated with the Notebook are automatically bounded to the Kernel list, allowing you to quickly switch environments through the Kernel.

                                  With this method, you can simultaneously write and debug algorithms in a single Notebook.

                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-with-envs.html#switch-environments-in-a-terminal","title":"Switch Environments in a Terminal","text":"

                                  The Notebook for AI Lab now also supports VSCode.

                                  If you prefer managing and switching environments in the Terminal, you can follow these steps:

                                  Upon first starting and using the Notebook, you need to execute conda init, and then run conda activate <env_name> to switch to the proper environment.

                                  (base) jovyan@chuanjia-jupyter-0:~/yolov8$ conda init bash  # Initialize bash environment, only needed for the first use\nno change     /opt/conda/condabin/conda\n change     /opt/conda/bin/conda\n change     /opt/conda/bin/conda-env\n change     /opt/conda/bin/activate\n change     /opt/conda/bin/deactivate\n change     /opt/conda/etc/profile.d/conda.sh\n change     /opt/conda/etc/fish/conf.d/conda.fish\n change     /opt/conda/shell/condabin/Conda.psm1\n change     /opt/conda/shell/condabin/conda-hook.ps1\n change     /opt/conda/lib/python3.11/site-packages/xontrib/conda.xsh\n change     /opt/conda/etc/profile.d/conda.csh\n change     /home/jovyan/.bashrc\n action taken.\nAdded mamba to /home/jovyan/.bashrc\n\n==> For changes to take effect, close and re-open your current shell. <==\n\n(base) jovyan@chuanjia-jupyter-0:~/yolov8$ source ~/.bashrc  # Reload bash environment\n(base) jovyan@chuanjia-jupyter-0:~/yolov8$ conda activate python-3.10   # Switch to python-3.10 environment\n(python-3.10) jovyan@chuanjia-jupyter-0:~/yolov8$ conda env list\n\n              mamba version : 1.5.1\n# conda environments:\n#\ndkj-python312-pure       /opt/baize-runtime-env/dkj-python312-pure/conda/envs/dkj-python312-pure\npython-3.10           *  /opt/baize-runtime-env/python-3.10/conda/envs/python-3.10    # Currently activated environment\ntorch-smaple             /opt/baize-runtime-env/torch-smaple/conda/envs/torch-smaple\nbase                     /opt/conda\nbaize-base               /opt/conda/envs/baize-base\n

                                  If you prefer to use mamba, you will need to use mamba init and mamba activate <env_name>.

                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-with-envs.html#view-packages-in-environment","title":"View Packages in Environment","text":"

                                  One important feature of different environment management is the ability to use different packages by quickly switching environments within a Notebook.

                                  You can use the command below to view all packages in the current environment using conda.

                                  (python-3.10) jovyan@chuanjia-jupyter-0:~/yolov8$ conda list\n# packages in environment at /opt/baize-runtime-env/python-3.10/conda/envs/python-3.10:\n#\n# Name                    Version                   Build  Channel\n_libgcc_mutex             0.1                        main    defaults\n_openmp_mutex             5.1                       1_gnu    defaults\n... # Output truncated\nidna                      3.7             py310h06a4308_0    defaults\nipykernel                 6.28.0          py310h06a4308_0    defaults\nipython                   8.20.0          py310h06a4308_0    defaults\nipython_genutils          0.2.0              pyhd3eb1b0_1    defaults\njedi                      0.18.1          py310h06a4308_1    defaults\njinja2                    3.1.4           py310h06a4308_0    defaults\njsonschema                4.19.2          py310h06a4308_0    defaults\njsonschema-specifications 2023.7.1        py310h06a4308_0    defaults\njupyter_client            7.4.9           py310h06a4308_0    defaults\njupyter_core              5.5.0           py310h06a4308_0    defaults\njupyter_events            0.8.0           py310h06a4308_0    defaults\njupyter_server            2.10.0          py310h06a4308_0    defaults\njupyter_server_terminals  0.4.4           py310h06a4308_1    defaults\njupyterlab_pygments       0.2.2           py310h06a4308_0    defaults\n... # Output truncated\nxz                        5.4.6                h5eee18b_1    defaults\nyaml                      0.2.5                h7b6447c_0    defaults\nzeromq                    4.3.5                h6a678d5_0    defaults\nzlib                      1.2.13               h5eee18b_1    defaults\n
                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-with-envs.html#update-packages-in-environment","title":"Update Packages in Environment","text":"

                                  Currently, you can update the packages in the environment through the Environment Management UI in AI Lab.

                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html","title":"Notebook SSH Guide","text":"

                                  The AI Lab provided by Notebook supports local access via SSH;

                                  With simple configuration, you can use SSH to access the Jupyter Notebook. Whether you are using Windows, Mac, or Linux operating systems, you can follow the steps below.

                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#configure-ssh-credentials","title":"Configure SSH Credentials","text":""},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#generate-ssh-key-pair","title":"Generate SSH Key Pair","text":"

                                  First, you need to generate an SSH public and private key pair on your computer. This key pair will be used for the authentication process to ensure secure access.

                                  Mac/LinuxWindows
                                  1. Open the terminal.
                                  2. Enter the command:

                                    ssh-keygen -t rsa -b 4096\n
                                  3. When prompted with \u201cEnter a file in which to save the key,\u201d you can press Enter to use the default path or specify a new path.

                                  4. Next, you will be prompted to enter a passphrase (optional), which adds an extra layer of security. If you choose to enter a passphrase, remember it as you will need it each time you use the key.
                                  1. Install Git Bash (if you haven't already).
                                  2. Open Git Bash.
                                  3. Enter the command:

                                    ssh-keygen -t rsa -b 4096\n
                                  4. Follow the same steps as Mac/Linux.

                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#add-ssh-public-key-to-personal-center-optional","title":"Add SSH Public Key to Personal Center (Optional)","text":"
                                  1. Open the generated public key file, usually located at ~/.ssh/id_rsa.pub (if you did not change the default path).
                                  2. Copy the public key content.
                                  3. Log in to the system's personal center.
                                  4. Look for the SSH public key configuration area and paste the copied public key into the designated location.
                                  5. Save the changes.
                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#enable-ssh-in-notebook","title":"Enable SSH in Notebook","text":"
                                  1. Log in to the Jupyter Notebook web interface.
                                  2. Find the Notebook for which you want to enable SSH.
                                  3. In the Notebook's settings or details page, find the option Enable SSH and enable it.
                                  4. Record or copy the displayed SSH access command. This command will be used in subsequent steps for SSH connection.
                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#ssh-in-different-environments","title":"SSH in Different Environments","text":""},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#example","title":"Example","text":"

                                  Assume the SSH command you obtained is as follows:

                                  ssh username@mockhost -p 2222\n

                                  Replace username with your username, mockhost with the actual hostname, and 2222 with the actual port number.

                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#windows","title":"Windows","text":"

                                  It is recommended to use PuTTY or Git Bash for SSH connection.

                                  PuTTYGit Bash
                                  1. Open PuTTY.
                                  2. In the Host Name (or IP address) field, enter mockhost (the actual hostname).
                                  3. Enter the port number 2222 (the actual port number).
                                  4. Click Open to start the connection.
                                  5. On the first connection, you may be prompted to verify the server's identity. Click Yes .
                                  1. Open Git Bash.
                                  2. Enter the ssh command to access your machine:

                                    ssh username@mockhost -p 2222\n
                                  3. Press Enter.

                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#maclinux","title":"Mac/Linux","text":"
                                  1. Open the terminal.
                                  2. Enter the ssh command to access your machine

                                    ssh username@mockhost -p 2222\n
                                  3. If prompted to accept the host's identity, type yes.

                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#remote-development-with-ide","title":"Remote Development with IDE","text":"

                                  In addition to using command line tools for SSH connection, you can also utilize modern IDEs such as Visual Studio Code (VSCode) and PyCharm's SSH remote connection feature to develop locally while utilizing remote server resources.

                                  Using SSH in VSCodeUsing SSH in PyCharm

                                  VSCode supports SSH remote connection through the Remote - SSH extension, allowing you to edit files on the remote server directly in the local VSCode environment and run commands.

                                  Steps:

                                  1. Ensure you have installed VSCode and the Remote - SSH extension.
                                  2. Open VSCode and click the remote resource manager icon at the bottom of the left activity bar.
                                  3. Select Remote-SSH: Connect to Host... and then click + Add New SSH Host...
                                  4. Enter the SSH connection command, for example:

                                    ssh username@mockhost -p 2222\n
                                  5. Press Enter. Replace username, mockhost, and 2222 with your actual username, hostname, and port number.

                                  6. Select a configuration file to save this SSH host, usually the default is fine.

                                  After completing, your SSH host will be added to the SSH target list. Click your host to connect. If it's your first connection, you may be prompted to verify the host's fingerprint. After accepting, you will be asked to enter the passphrase (if the SSH key has a passphrase). Once connected successfully, you can edit remote files in VSCode and utilize remote resources just as if you were developing locally.

                                  PyCharm Professional Edition supports connecting to remote servers via SSH and directly developing in the local PyCharm.

                                  Steps:

                                  1. Open PyCharm and open or create a project.
                                  2. Select File -> Settings (on Mac, it's PyCharm -> Preferences).
                                  3. In the settings window, navigate to Project: YourProjectName -> Python Interpreter.
                                  4. Click the gear icon in the upper right corner and select Add...

                                    • In the pop-up window, select SSH Interpreter.
                                    • Enter the remote host information: hostname (mockhost), port number (2222), username (username). Replace these placeholders with your actual information.
                                    • Click Next. PyCharm will attempt to connect to the remote server. If the connection is successful, you will be asked to enter the passphrase or select the private key file.
                                  5. Once configured, click Finish. Now, your PyCharm will use the Python interpreter on the remote server.

                                  "},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#security-restrictions","title":"Security Restrictions","text":"

                                  Within the same Workspace, any user can log in to a Notebook with SSH enabled using their own SSH credentials. This means that as long as users have configured their SSH public key in the personal center and the Notebook has enabled SSH, they can use SSH for a secure connection.

                                  Note that permissions for different users may vary depending on the Workspace configuration. Ensure you understand and comply with your organization's security and access policies.

                                  By following the above steps, you should be able to successfully configure and use SSH to access the Jupyter Notebook. If you encounter any issues, refer to the system help documentation or contact the system administrator.

                                  "},{"location":"en/admin/baize/developer/notebooks/start-pause.html","title":"Start and Stop Notebook","text":"

                                  After a Notebook is successfully created, it typically has several states:

                                  • Pending
                                  • Running
                                  • Stopped

                                  If a Notebook is in the Stopped state, click the \u2507 on the right side in the list, then choose Start from the dropdown menu.

                                  This Notebook will move into the running queue, and its status will change to Pending. If everything is normal, its status will change to Running after a moment.

                                  If you have finished using the Notebook, you can choose Stop from the menu to change its status to Stopped.

                                  "},{"location":"en/admin/baize/developer/notebooks/view.html","title":"View Notebook Workload","text":"

                                  If you want to view the workload of a specific Notebook, you can follow these steps:

                                  1. Click the \u2507 on the right side of the Notebook in the Notebook list, then choose Workload Details from the dropdown menu.

                                  2. You will be directed to the StatefulSet list, where you can view:

                                    • The running status, IP address, resource requests, and usage of the Pod containers
                                    • Container configuration information
                                    • Access methods: ClusterIP, NodePort
                                    • Scheduling strategies: node and workload affinity, anti-affinity
                                    • Labels and annotations: key-value pairs of labels and annotations for the workload and Pods
                                    • Autoscaling: support for HPA, CronHPA, and VPA
                                    • Event list: warnings, notifications, and other messages
                                  3. In the StatefulSet list, click the \u2507 on the right side to perform more actions specific to the Pods.

                                  "},{"location":"en/admin/baize/oam/index.html","title":"Operator","text":"

                                  Operator is the daily management of IT resources by IT operations personnel, handling workspace tasks.

                                  Here, you can visually understand the current usage status of resources such as clusters, nodes, CPUs, GPUs, and vGPUs.

                                  "},{"location":"en/admin/baize/oam/index.html#glossary","title":"Glossary","text":"
                                  • GPU Allocated: Statistics on the GPU allocation status of all unfinished tasks in the current cluster, calculating the ratio between requested GPUs (Request) and total resources (Total).
                                  • GPU Utilization: Statistics on the actual resource utilization of all running tasks in the current cluster, calculating the ratio between the GPUs actually used (Usage) and the total resources (Total).
                                  "},{"location":"en/admin/baize/oam/resource.html","title":"GPU Management","text":"

                                  Automatically consolidate GPU resource information across the entire platform, providing detailed GPU device information display, and allowing you to view workload statistics and task execution information for various GPUs.

                                  After entering Operator, click Resource Management -> GPU Management in the left navigation bar to view GPU and task information.

                                  "},{"location":"en/admin/baize/oam/queue/create.html","title":"Create Queue","text":"

                                  In the Operator mode, queues can be used to schedule and optimize batch job workloads, effectively managing multiple tasks running on a cluster and optimizing resource utilization through a queue system.

                                  1. Click Queue Management in the left navigation bar, then click the Create button on the right.

                                  2. The system will pre-fill basic setup data, including the cluster to deploy to, workspace, and queuing policy. Click OK after adjusting these parameters.

                                  3. A confirmation message will appear upon creation, returning you to the queue management list. Click the \u2507 on the right side of the list to perform additional operations such as update or delete.

                                  "},{"location":"en/admin/baize/oam/queue/delete.html","title":"Delete Queue","text":"

                                  In the Operator mode, if you find a queue to be redundant, expired, or no longer needed for any other reason, you can delete it from the queue list.

                                  1. Click the \u2507 on the right side of the queue in the queue list, then choose Delete from the dropdown menu.

                                  2. In the pop-up window, confirm the queue you want to delete, enter the queue name, and then click Delete.

                                  3. A confirmation message will appear indicating successful deletion, and the queue will disappear from the list.

                                  Caution

                                  Once a queue is deleted, it cannot be recovered, so please proceed with caution.

                                  "},{"location":"en/admin/baize/troubleshoot/index.html","title":"Troubleshooting","text":"

                                  This document will continuously compile and organize errors that may arise from environmental issues or improper operations during the use of AI Lab, as well as analyze and provide solutions for certain errors encountered during use.

                                  Warning

                                  This documentation is only applicable to version AI platform. If you encounter issues with the use of AI Lab, please refer to this troubleshooting guide first.

                                  In AI platform, the module name for AI Lab is baize, which offers one-stop solutions for model training, inference, model management, and more.

                                  "},{"location":"en/admin/baize/troubleshoot/index.html#common-troubleshooting-cases","title":"Common Troubleshooting Cases","text":"
                                  • Cluster Not Found in Drop-Down List
                                  • Notebook Not Controlled by Queue Quotas
                                  • Queue Initialization Failed
                                  "},{"location":"en/admin/baize/troubleshoot/cluster-not-found.html","title":"Cluster Not Found in Drop-Down List","text":""},{"location":"en/admin/baize/troubleshoot/cluster-not-found.html#symptom","title":"Symptom","text":"

                                  In the AI Lab Developer and Operator UI, the desired cluster cannot be found in the drop-down list while you search for a cluster.

                                  "},{"location":"en/admin/baize/troubleshoot/cluster-not-found.html#analysis","title":"Analysis","text":"

                                  If the desired cluster is missing from the cluster drop-down list in AI Lab, it could be due to the following reasons:

                                  • The baize-agent is not installed or failed to install, causing AI Lab to be unable to retrieve cluster information.
                                  • The cluster name was not configured when installing baize-agent, causing AI Lab to be unable to retrieve cluster information.
                                  • Observable components within the worker cluster are abnormal, leading to the inability to collect metrics information from the cluster.
                                  "},{"location":"en/admin/baize/troubleshoot/cluster-not-found.html#solution","title":"Solution","text":""},{"location":"en/admin/baize/troubleshoot/cluster-not-found.html#baize-agent-not-installed-or-failed-to-install","title":"baize-agent not installed or failed to install","text":"

                                  AI Lab requires some basic components to be installed in each worker cluster. If the baize-agent is not installed in the worker cluster, you can choose to install it via UI, which might lead to some unexpected errors.

                                  Therefore, to ensure a good user experience, the selectable cluster range only includes clusters where the baize-agent has been successfully installed.

                                  If the issue is due to the baize-agent not being installed or installation failure, use the following steps:

                                  Container Management -> Clusters -> Helm Apps -> Helm Charts , find baize-agent and install it.

                                  Note

                                  Quickly jump to this address: https://<host>/kpanda/clusters/<cluster_name>/helm/charts/addon/baize-agent. Note to replace <host> with the actual console address, and <cluster_name> with the actual cluster name.

                                  "},{"location":"en/admin/baize/troubleshoot/cluster-not-found.html#cluster-name-not-configured-in-the-process-of-installing-baize-agent","title":"Cluster name not configured in the process of installing baize-agent","text":"

                                  When installing baize-agent, ensure to configure the cluster name. This name will be used for Insight metrics collection and is empty by default, requiring manual configuration .

                                  "},{"location":"en/admin/baize/troubleshoot/cluster-not-found.html#insight-components-in-the-worker-cluster-are-abnormal","title":"Insight components in the worker cluster are abnormal","text":"

                                  If the Insight components in the cluster are abnormal, it might cause AI Lab to be unable to retrieve cluster information. Check if the platform's Insight services are running and configured correctly.

                                  • Check if the insight-server component is running properly in the Global Service Cluster.
                                  • Check if the insight-agent component is running properly in the worker cluster.
                                  "},{"location":"en/admin/baize/troubleshoot/local-queue-initialization-failed.html","title":"Local Queue Initialization Failed","text":""},{"location":"en/admin/baize/troubleshoot/local-queue-initialization-failed.html#issue-description","title":"Issue Description","text":"

                                  When creating a Notebook, training task, or inference service, if the queue is being used for the first time in that namespace, there will be a prompt to initialize the queue with one click. However, the initialization fails.

                                  "},{"location":"en/admin/baize/troubleshoot/local-queue-initialization-failed.html#issue-analysis","title":"Issue Analysis","text":"

                                  In the AI Lab environment, the queue management capability is provided by Kueue. Kueue provides two types of queue management resources:

                                  • ClusterQueue: A cluster-level queue mainly used to manage resource quotas within the queue, including CPU, memory, and GPU.
                                  • LocalQueue: A namespace-level queue that needs to point to a ClusterQueue for resource allocation within the queue.

                                  In the AI Lab environment, if a service is created and the specified namespace does not have a LocalQueue, there will be a prompt to initialize the queue.

                                  In rare cases, the LocalQueue initialization might fail due to special reasons.

                                  "},{"location":"en/admin/baize/troubleshoot/local-queue-initialization-failed.html#solution","title":"Solution","text":"

                                  Check if Kueue is running normally. If the kueue-controller-manager is not running, you can check it with the following command:

                                  kubectl get deploy kueue-controller-manager -n baize-system\n

                                  If the kueue-controller-manager is not running properly, fix Kueue first.

                                  "},{"location":"en/admin/baize/troubleshoot/local-queue-initialization-failed.html#references","title":"References","text":"
                                  • ClusterQueue
                                  • LocalQueue
                                  "},{"location":"en/admin/baize/troubleshoot/notebook-not-controlled-by-quotas.html","title":"Notebook Not Controlled by Queue Quota","text":"

                                  In the AI Lab module, when users create a Notebook, they find that even if the selected queue lacks resources, the Notebook can still be created successfully.

                                  "},{"location":"en/admin/baize/troubleshoot/notebook-not-controlled-by-quotas.html#issue-01-unsupported-kubernetes-version","title":"Issue 01: Unsupported Kubernetes Version","text":"
                                  • Analysis:

                                    The queue management capability in AI Lab is provided by Kueue, and the Notebook service is provided through JupyterHub. JupyterHub has high requirements for the Kubernetes version. For versions below v1.27, even if queue quotas are set in AI platform, and users select the quota when creating a Notebook, the Notebook will not actually be restricted by the queue quota.

                                  • Solution: Plan in advance. It is recommended to use Kubernetes version v1.27 or above in the production environment.

                                  • Reference: Jupyter Notebook Documentation

                                  "},{"location":"en/admin/baize/troubleshoot/notebook-not-controlled-by-quotas.html#issue-02-configuration-not-enabled","title":"Issue 02: Configuration Not Enabled","text":"
                                  • Analysis:

                                    When the Kubernetes cluster version is greater than v1.27, the Notebook still cannot be restricted by the queue quota.

                                    This is because Kueue needs to have support for enablePlainPod enabled to take effect for the Notebook service.

                                  • Solution: When deploying baize-agent in the worker cluster, enable Kueue support for enablePlainPod.

                                  • Reference: Run Plain Pods as a Kueue-Managed Job

                                  "},{"location":"en/admin/ghippo/password.html","title":"Reset Password","text":"

                                  If you forget your password, you can reset it by following the instructions on this page.

                                  "},{"location":"en/admin/ghippo/password.html#steps-to-reset-password","title":"Steps to Reset Password","text":"

                                  When an administrator initially creates a user, it sets a username and password for him. After the user logs in, fill in the email address and change the password in Personal Center . If the user has not set an email address, he can only contact the administrator to reset the password.

                                  1. If you forget your password, you can click Forgot your password? on the login interface.

                                  2. Enter your login email and click Submit .

                                  3. Find the password reset email in the mailbox, and click the link in your email. The link is effective for 5 minutes.

                                  4. Install applications that support 2FA dynamic password generation (such as Google Authenticator) on mobile phone or other devices. Set up a dynamic password to activate your account, and click Submit .

                                  5. Set a new password and click Submit . The requirements for setting a new password are consistent with the password rules when creating an account.

                                  6. The password is successfully reset, and you enter the home page directly.

                                  "},{"location":"en/admin/ghippo/password.html#reset-password-process","title":"Reset password process","text":"

                                  The flow of the password reset process is as follows.

                                  graph TB\n\npass[Forgot password] --> usern[Enter username]\n--> button[Click button to send a mail] --> judge1[Check your username is correct or not]\n\n    judge1 -.Correct.-> judge2[Check if you have bounded a mail]\n    judge1 -.Wrong.-> tip1[Error of incorrect username]\n\n        judge2 -.A mail has been bounded.-> send[Send a reset mail]\n        judge2 -.No any mail bounded.-> tip2[No any mail bounded<br>Contact admin to reset password]\n\nsend --> click[Click the mail link] --> config[Config dynamic password] --> reset[Reset password]\n--> success[Successfully reset]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill:#326ce5,stroke:#fff,stroke-width:1px,color:#fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass pass,usern,button,tip1,send,tip2,send,click,config,reset,success plain;\nclass judge1,judge2 k8s
                                  "},{"location":"en/admin/ghippo/access-control/custom-role.html","title":"Custom Roles","text":"

                                  AI platform supports the creation of three scopes of custom roles:

                                  • The permissions of Platform Role take effect on all relevant resources of the platform
                                  • The permissions of workspace role take effect on the resources under the workspace where the user is located
                                  • The permissions of folder role take effect on the folder where the user is located and the subfolders and workspace resources under it
                                  "},{"location":"en/admin/ghippo/access-control/custom-role.html#create-a-platform-role","title":"Create a platform role","text":"

                                  A platform role refers to a role that can manipulate features related to a certain module of AI platform (such as container management, microservice engine, Multicloud Management, service mesh, Container registry, Workbench, and global management).

                                  1. From the left navigation bar, click Global Management -> Access Control -> Roles , and click Create Custom Role .

                                  2. Enter the name and description, select Platform Role , check the role permissions and click OK .

                                  3. Return to the role list, search for the custom role you just created, and click \u2507 on the right to perform operations such as copying, editing, and deleting.

                                  4. After the platform role is successfully created, you can go to User/group to add users and groups for this role.

                                  "},{"location":"en/admin/ghippo/access-control/custom-role.html#create-a-workspace-role","title":"Create a workspace role","text":"

                                  A workspace role refers to a role that can manipulate features related to a module (such as container management, microservice engine, Multicloud Management, service mesh, container registry, Workbench, and global management) according to the workspace.

                                  1. From the left navigation bar, click Global Management -> Access Control -> Roles , and click Create Custom Role .

                                  2. Enter the name and description, select Workspace role , check the role permissions and click OK .

                                  3. Return to the role list, search for the custom role you just created, and click \u2507 on the right to perform operations such as copying, editing, and deleting.

                                  4. After the workspace role is successfully created, you can go to Workspace to authorize and set which workspaces this role can manage.

                                  "},{"location":"en/admin/ghippo/access-control/custom-role.html#create-folder-role","title":"Create Folder Role","text":"

                                  The folder role refers to the ability to manipulate the relevant features of a module of AI platform (such as container management, microservice engine, Multicloud Management, service mesh, container registry, Workbench and global management) according to folders and subfolders. Role.

                                  1. From the left navigation bar, click Global Management -> Access Control -> Roles , and click Create Custom Role .

                                  2. Enter the name and description, select Folder Role , check the role permissions and click OK .

                                  3. Return to the role list, search for the custom role you just created, and click \u2507 on the right to perform operations such as copying, editing, and deleting.

                                  4. After the folder role is successfully created, you can go to Folder to authorize and set which folders this role can manage.

                                  "},{"location":"en/admin/ghippo/access-control/docking.html","title":"Docking Portal","text":"

                                  When two or more platforms need to integrate or embed with each other, user system integration is usually required. During the process of user system integration, the Docking Portal mainly provides SSO (Single Sign-On) capability. If you want to integrate AI platform as a user source into a client platform, you can achieve it by docking a product through Docking Portal .

                                  "},{"location":"en/admin/ghippo/access-control/docking.html#docking-a-product","title":"Docking a product","text":"

                                  Prerequisite: Administrator privileges for the platform or IAM Owner privileges for access control.

                                  1. Log in with an admin, navigate to Access Control , select Docking Portal , enter the Docking Portal list, and click Create SSO Profile in the upper right corner.

                                  2. On the Create SSO Profile page, fill in the Client ID.

                                  3. After successfully creating the SSO access, in the Docking Portal list, click the just created Client ID to enter the details, copy the Client ID, Secret Key, and Single Sign-On URL information, and fill them in the client platform to complete the user system integration.

                                  "},{"location":"en/admin/ghippo/access-control/global.html","title":"System Roles","text":""},{"location":"en/admin/ghippo/access-control/global.html#use-cases","title":"Use cases","text":"

                                  AI platform provides predefined system roles to help users simplify the process of role permission usage.

                                  Note

                                  AI platform provides three types of system roles: platform role, workspace role, and folder role.

                                  • Platform role: has proper permissions for all related resources on the platform. Please go to user/group page for authorization.
                                  • Workspace role: has proper permissions for a specific workspace. Please go to the specific workspace page for authorization.
                                  • Folder role: has proper permissions for a specific folder, subfolder, and resources under its workspace. Please go to the specific folder page for authorization.
                                  "},{"location":"en/admin/ghippo/access-control/global.html#platform-roles","title":"Platform Roles","text":"

                                  Five system roles are predefined in Access Control: Admin, IAM Owner, Audit Owner, Kpanda Owner, and Workspace and Folder Owner. These five roles are created by the system and cannot be modified by users. The proper permissions of each role are as follows:

                                  Role Name Role Type Module Role Permissions Admin System role All Platform administrator, manages all platform resources, represents the highest authority of the platform. IAM Owner System role Access Control Administrator of Access Control, has all permissions under this service, such as managing users/groups and authorization. Audit Owner System role Audit Log Administrator of Audit Log, has all permissions under this service, such as setting audit log policies and exporting audit logs. Kpanda Owner System role Container Management Administrator of Container Management, has all permissions under this service, such as creating/accessing clusters, deploying applications, granting cluster/namespace-related permissions to users/groups. Workspace and Folder Owner System role Workspace and Folder Administrator of Workspace and Folder, has all permissions under this service, such as creating folders/workspaces, authorizing folder/workspace-related permissions to users/groups, using features such as Workbench and microservice engine under the workspace."},{"location":"en/admin/ghippo/access-control/global.html#workspace-roles","title":"Workspace Roles","text":"

                                  Three system roles are predefined in Access Control: Workspace Admin, Workspace Editor, and Workspace Viewer. These three roles are created by the system and cannot be modified by users. The proper permissions of each role are as follows:

                                  Role Name Role Type Module Role Permissions Workspace Admin System role Workspace Administrator of a workspace, with management permission of the workspace. Workspace Editor System role Workspace Editor of a workspace, with editing permission of the workspace. Workspace Viewer System role Workspace Viewer of a workspace, with readonly permission of the workspace."},{"location":"en/admin/ghippo/access-control/global.html#folder-roles","title":"Folder Roles","text":"

                                  Three system roles are predefined in Access Control: Folder Admin, Folder Editor, and Folder Viewer. These three roles are created by the system and cannot be modified by users. The proper permissions of each role are as follows:

                                  Role Name Role Type Module Role Permissions Folder Admin System role Workspace Administrator of a folder and its subfolders/workspaces, with management permission. Folder Editor System role Workspace Editor of a folder and its subfolders/workspaces, with editing permission. Folder Viewer System role Workspace Viewer of a folder and its subfolders/workspaces, with readonly permission."},{"location":"en/admin/ghippo/access-control/group.html","title":"Group","text":"

                                  A group is a collection of users. By joining a group, a user can inherit the role permissions of the group. Authorize users in batches through groups to better manage users and their permissions.

                                  "},{"location":"en/admin/ghippo/access-control/group.html#use-cases","title":"Use cases","text":"

                                  When a user's permission changes, it only needs to be moved to the proper group without affecting other users.

                                  When the permissions of a group change, you only need to modify the role permissions of the group to apply to all users in the group.

                                  "},{"location":"en/admin/ghippo/access-control/group.html#create-group","title":"Create group","text":"

                                  Prerequisite: Admin or IAM Owner.

                                  1. Enters Access Control , selects Groups , enters the list of groups, and clicks Create a group on the upper right.

                                  2. Fill in the group information on the Create group page.

                                  3. Click OK , the group is created successfully, and you will return to the group list page. The first line in the list is the newly created group.

                                  "},{"location":"en/admin/ghippo/access-control/group.html#add-permissions-to-a-group","title":"Add permissions to a group","text":"

                                  Prerequisite: The group already exists.

                                  1. Enters Access Control , selects Groups , enters the list of groups, and clicks \u2507 -> Add permissions .

                                  2. On the Add permissions page, check the required role permissions (multiple choices are allowed).

                                  3. Click OK to add permissions to the group. Automatically return to the group list, click a group to view the permissions granted to the group.

                                  "},{"location":"en/admin/ghippo/access-control/group.html#add-users-to-a-group","title":"Add users to a group","text":"
                                  1. Enters Access Control , selects Groups to display the group list, and on the right side of a group, click \u2507 -> Add Members .

                                  2. On the Add Group Members page, click the user to be added (multiple choices are allowed). If there is no user available, click Create a new user , first go to create a user, and then return to this page and click the refresh icon to display the newly created user.

                                  3. Click OK to finish adding users to the group.

                                  Note

                                  Users in the group will inherit the permissions of the group; users who join the group can be viewed in the group details.

                                  "},{"location":"en/admin/ghippo/access-control/group.html#delete-group","title":"Delete group","text":"

                                  Note: Deleting a group will not delete the users in the group, but the users in the group will no longer be able to inherit the permissions of the group

                                  1. The administrator enters Access Control , selects group to enter the group list, and on the right side of a group, click \u2507 -> Delete .

                                  2. Click Delete to delete the group.

                                  3. Return to the group list, and the screen will prompt that the deletion is successful.

                                  Note

                                  Deleting a group will not delete the users in the group, but the users in the group will no longer be able to inherit the permissions from the group.

                                  "},{"location":"en/admin/ghippo/access-control/iam.html","title":"What is IAM","text":"

                                  IAM (Identity and Access Management) is an important module of global management. You can create, manage and destroy users (groups) through the access control module, and use system roles and custom roles to control other users Access to the AI platform.

                                  "},{"location":"en/admin/ghippo/access-control/iam.html#benefits","title":"Benefits","text":"
                                  • Simple and smooth

                                    Structures and roles within an enterprise can be complex, with the management of projects, work groups, and mandates constantly changing. Access control uses a clear and tidy page to open up the authorization relationship between users, groups, and roles, and realize the authorization of users (groups) with the shortest link.

                                  • Appropriate role

                                    Access control pre-defines an administrator role for each sub-module, without user maintenance, you can directly authorize the predefined system roles of the platform to users to realize the modular management of the platform. For fine-grained permissions, please refer to Permission Management.

                                  • Enterprise-grade access control

                                    When you want your company's employees to use the company's internal authentication system to log in to the AI platform without creating proper users on the AI platform, you can use the identity provider feature of access control to establish a trust relationship between your company and Suanova, Through joint authentication, employees can directly log in to the AI platform with the existing account of the enterprise, realizing single sign-on.

                                  "},{"location":"en/admin/ghippo/access-control/iam.html#usage-process","title":"Usage Process","text":"

                                  Here is a typical process to perform access control.

                                  graph TD\n    login[Login] --> user[Create User]\n    user --> auth[Authorize User]\n    auth --> group[Create Group]\n    group --> role[Create Custom Role]\n    role --> id[Create Identity Provider]\n\n classDef plain fill:#ddd,stroke:#fff,stroke-width:4px,color:#000;\n classDef k8s fill:#326ce5,stroke:#fff,stroke-width:4px,color:#fff;\n classDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n class login,user,auth,group,role,id cluster;\n\nclick login \"https://docs.daocloud.io/en/ghippo/install/login.html\"\nclick user \"https://docs.daocloud.io/en/ghippo/access-control/user.html\"\nclick auth \"https://docs.daocloud.io/en/ghippo/access-control/role.html\"\nclick group \"https://docs.daocloud.io/en/ghippo/access-control/group.html\"\nclick role \"https://docs.daocloud.io/en/ghippo/access-control/custom-role.html\"\nclick id \"https://docs.daocloud.io/en/ghippo/access-control/idprovider.html\"
                                  "},{"location":"en/admin/ghippo/access-control/idprovider.html","title":"Identity provider","text":"

                                  Global management supports single sign-on based on LDPA and OIDC protocols. If your enterprise or organization has its own account system and you want to manage members in the organization to use AI platform resources, you can use the identity provider feature provided by global management. Instead of having to create username/passwords for every organization member in your AI platform. You can grant permissions to use AI platform resources to these external user identities.

                                  "},{"location":"en/admin/ghippo/access-control/idprovider.html#basic-concept","title":"Basic concept","text":"
                                  • Identity Provider (IdP for short)

                                    Responsible for collecting and storing user identity information, usernames, and passwords, and responsible for authenticating users when they log in. In the identity authentication process between an enterprise and AI platform, the identity provider refers to the identity provider of the enterprise itself.

                                  • Service Provider (SP)

                                    The service provider establishes a trust relationship with the identity provider IdP, and uses the user information provided by the IDP to provide users with specific services. In the process of enterprise authentication with AI platform, the service provider refers to AI platform.

                                  • LDAP

                                    LDAP refers to Lightweight Directory Access Protocol (Lightweight Directory Access Protocol), which is often used for single sign-on, that is, users can log in with one account password in multiple services. Global management supports LDAP for identity authentication, so the enterprise IdP that establishes identity authentication with AI platform through the LDAP protocol must support the LDAP protocol. For a detailed description of LDAP, please refer to: Welcome to LDAP.

                                  • OIDC

                                    OIDC, short for OpenID Connect, is an identity authentication standard protocol based on the OAuth 2.0 protocol. Global management supports the OIDC protocol for identity authentication, so the enterprise IdP that establishes identity authentication with AI platform through the OIDC protocol must support the OIDC protocol. For a detailed description of OIDC, please refer to: Welcome to OpenID Connect.

                                  • OAuth 2.0

                                    OAuth 2.0 is the abbreviation of Open Authorization 2.0. It is an open authorization protocol. The authorization framework supports third-party applications to obtain access permissions in their own name.

                                  "},{"location":"en/admin/ghippo/access-control/idprovider.html#features","title":"Features","text":"
                                  • Administrators do not need to recreate AI platform users

                                    Before using the identity provider for identity authentication, the administrator needs to create an account for the user in the enterprise management system and AI platform respectively; after using the identity provider for identity authentication, the enterprise administrator only needs to create an account for the user in the enterprise management system, Users can access both systems at the same time, reducing personnel management costs.

                                  • Users do not need to remember two sets of platform accounts

                                    Before using the identity provider for identity authentication, users need to log in with the accounts of the two systems to access the enterprise management system and AI platform; after using the identity provider for identity authentication, users can log in to the enterprise management system to access the two systems.

                                  "},{"location":"en/admin/ghippo/access-control/ldap.html","title":"LDAP","text":"

                                  The full name of LDAP is Lightweight Directory Access Protocol, which is an open and neutral industry-standard application protocol that provides access control and maintains directories for distributed information through the IP protocol.

                                  If your enterprise or organization has its own account system, and your enterprise user management system supports the LDAP protocol, you can use the identity provider feature based on the LDAP protocol provided by the Global Management instead of creating usernames/passwords for each member in AI platform. You can grant permissions to use AI platform resources to these external user identities.

                                  In Global Management, the operation steps are as follows:

                                  1. Log in to AI platform as a user with admin role. Click Global Management -> Access Control in the lower left corner of the left navigation bar.

                                  2. Click Identity Provider on the left nav bar, click Create an Identity Provider button.

                                  3. In the LDAP tab, fill in the following fields and click Save to establish a trust relationship with the identity provider and a user mapping relationship.

                                    Field Description Vendor Supports LDAP (Lightweight Directory Access Protocol) and AD (Active Directory) Identity Provider Name (UI display name) Used to distinguish different identity providers Connection URL The address and port number of the LDAP service, e.g., ldap://10.6.165.2:30061 Bind DN The DN of the LDAP administrator, which Keycloak will use to access the LDAP server Bind credentials The password of the LDAP administrator. This field can retrieve its value from a vault using the ${vault.ID} format. Users DN The full DN of the LDAP tree where your users are located. This DN is the parent of the LDAP users. For example, if the DN of a typical user is similar to \u201cuid='john',ou=users,dc=example,dc=com\u201d, it can be \u201cou=users,dc=example,dc=com\u201d. User Object Classes All values of the LDAP objectClass attribute for users in LDAP, separated by commas. For example: \u201cinetOrgPerson,organizationalPerson\u201d. New Keycloak users will be written to LDAP with all of these object classes, and existing LDAP user records will be found if they contain all of these object classes. Enable StartTLS Encrypts the connection between AI platform and LDAP when enabled Default Permission Users/groups have no permissions by default after synchronization Full name mapping proper First name and Last Name User Name Mapping The unique username for the user Mailbox Mapping User email

                                    Advanced Config

                                    Field Description Enable or not Enabled by default. When disabled, this LDAP configuration will not take effect. Periodic full sync Disabled by default. When enabled, a sync period can be configured, such as syncing once every hour. Edit mode Read-only mode will not modify the source data in LDAP. Write mode will sync data back to LDAP after user information is edited on the platform. Read timeout Adjusting this value can effectively avoid interface timeouts when the amount of LDAP data is large. User LDAP filter An additional LDAP filter used to filter the search for users. Leave it empty if no additional filter is needed. Ensure it starts with \u201c(\u201d and ends with \u201c)\u201d. Username LDAP attribute The name of the LDAP attribute maps to the Keycloak username. For many LDAP server vendors, it can be \u201cuid\u201d. For Active Directory, it can be \u201csAMAccountName\u201d or \u201ccn\u201d. This attribute should be filled in for all LDAP user records you want to import into Keycloak. RDN LDAP attribute The name of the LDAP attribute that serves as the RDN (top-level attribute) of the typical user DN. It is usually the same as the Username LDAP attribute, but this is not required. For example, for Active Directory, when the username attribute might be \u201csAMAccountName\u201d, \u201ccn\u201d is often used as the RDN attribute. UUID LDAP attribute The name of the LDAP attribute used as the unique object identifier (UUID) for objects in LDAP. For many LDAP server vendors, it is \u201centryUUID\u201d. However, some may differ. For example, for Active Directory, it should be \u201cobjectGUID\u201d. If your LDAP server does not support the UUID concept, you can use any other attribute that should be unique among LDAP users in the tree, such as \u201cuid\u201d or \u201centryDN\u201d.
                                  4. On the Sync Groups tab, fill in the following fields to configure the mapping relationship of groups, and click Save again.

                                    Field Description Example base DN location of the group in the LDAP tree ou=groups,dc=example,dc=org Usergroup Object Filter Object classes for usergroups, separated by commas if more classes are required. In a typical LDAP deployment, usually \"groupOfNames\", the system has been filled in automatically, if you need to change it, just edit it. * means all. * group name cn Unchangeable

                                  Note

                                  1. After you have established a trust relationship between the enterprise user management system and AI platform through the LDAP protocol, you can synchronize the users or groups in the enterprise user management system to AI platform at one time through auto/manual synchronization.
                                  2. After synchronization, the administrator can authorize groups/groups in batches, and users can log in to AI platform through the account/password in the enterprise user management system.
                                  3. See the LDAP Operations Demo Video for a hands-on tutorial.
                                  "},{"location":"en/admin/ghippo/access-control/oauth2.0.html","title":"OAuth 2.0 - WeCom","text":"

                                  If all members in your enterprise or organization are managed in WeCom, you can use the identity provider feature based on the OAuth 2.0 protocol provided by Global Management, without the need to create a username/password for each organization member in AI platform. You can grant these external user identities permission to use AI platform resources.

                                  "},{"location":"en/admin/ghippo/access-control/oauth2.0.html#steps","title":"Steps","text":"
                                  1. Log in to AI platform with a user who has the admin role. Click Global Management -> Access Control at the bottom of the left navigation bar.

                                  2. Select Identity Providers on the left navigation bar, and click the OAuth 2.0 tab. Fill in the form fields and establish a trust relationship with WeCom, then click Save.

                                  "},{"location":"en/admin/ghippo/access-control/oauth2.0.html#proper-fields-in-wecom","title":"proper fields in WeCom","text":"

                                  Note

                                  Before integration, you need to create a custom application in the WeCom management console. Refer to How to create a custom application link

                                  Field Description Corp ID ID of WeCom Agent ID ID of the custom application ClientSecret Secret of the custom application

                                  WeCom ID:

                                  Agent ID and ClientSecret:

                                  "},{"location":"en/admin/ghippo/access-control/oidc.html","title":"Create and Manage OIDC","text":"

                                  OIDC (OpenID Connect) is an identity layer based on OAuth 2.0 and an identity authentication standard protocol based on the OAuth2 protocol.

                                  If your enterprise or organization already has its own account system, and your enterprise user management system supports the OIDC protocol, you can use the OIDC protocol-based identity provider feature provided by the Global Management instead of creating usernames/passwords for each member in AI platform. You can grant permissions to use AI platform resources to these external user identities.

                                  The specific operation steps are as follows.

                                  1. Log in to AI platform as a user with admin role. Click Global Management -> Access Control at the bottom of the left navigation bar.

                                  2. On the left nav bar select Identity Provider , click OIDC -> Create an Identity Provider

                                  3. After completing the form fields and establishing a trust relationship with the identity provider, click Save .

                                    Fields Descriptions Provider Name displayed on the login page and is the entry point for the identity provider Authentication Method Client authentication method. If the JWT is signed with a private key, select JWT signed with private key from the dropdown. For details, refer to Client Authentication. Client ID Client ID Client Secret Client Secret Client URL One-click access to login URL, Token URL, user information URL and logout URL through the identity provider's well-known interface Auto-associate After it is turned on, when the identity provider username/email is duplicated with the AI platform username/email, the two will be automatically associated

                                  Note

                                  1. After the user completes the first login to AI platform through the enterprise user management system, the user information will be synchronized to Access Control -> User List of AI platform.
                                  2. Users who log in for the first time will not be given any default permissions and need to be authorized by an administrator (the administrator can be a platform administrator, submodule administrator or resource administrator).
                                  3. For practical tutorials, please refer to OIDC Operation Video Tutorials, or refer to Azure OpenID Connect (OIDC) Access Process.
                                  "},{"location":"en/admin/ghippo/access-control/oidc.html#user-identity-authentication-interaction-process","title":"User identity authentication interaction process","text":"

                                  The interactive process of user authentication is as follows:

                                  1. Use a browser to initiate a single sign-on request for AI platform.
                                  2. According to the information carried in the login link, AI platform searches for the proper configuration information in Global Management -> Access Control -> Identity Provider , constructs an OIDC authorization Request, and sends it to the browser.
                                  3. After the browser receives the request, it forwards the OIDC authorization Request to the enterprise IdP.
                                  4. Enter the username and password on the login page of the enterprise IdP. The enterprise IdP verifies the provided identity information, constructs an ID token carrying user information, and sends an OIDC authorization response to the browser.
                                  5. After the browser responds, it forwards the OIDC authorization Response to AI platform.
                                  6. AI platform takes the ID Token from the OIDC Authorization Response, maps it to a specific user list according to the configured identity conversion rules, and issues the Token.
                                  7. Complete single sign-on to access AI platform.
                                  "},{"location":"en/admin/ghippo/access-control/role.html","title":"Role and Permission Management","text":"

                                  A role corresponds to a set of permissions that determine the actions that can be performed on resources. Granting a user a role means granting all the permissions included in that role.

                                  AI platform platform provides three levels of roles, which effectively solve your permission-related issues:

                                  • Platform Roles
                                  • Workspace Roles
                                  • Folder Roles
                                  "},{"location":"en/admin/ghippo/access-control/role.html#platform-roles","title":"Platform Roles","text":"

                                  Platform roles are coarse-grained permissions that grant proper permissions to all relevant resources on the platform. By assigning platform roles, users can have permissions to create, delete, modify, and view all clusters and workspaces, but not specifically to a particular cluster or workspace. AI platform provides 5 pre-defined platform roles that users can directly use:

                                  • Admin
                                  • Kpanda Owner
                                  • Workspace and Folder Owner
                                  • IAM Owner
                                  • Audit Owner

                                  Additionally, AI platform supports the creation of custom platform roles with customized content as needed. For example, creating a platform role that includes all functional permissions in the Workbench. Since the Workbench depends on workspaces, the platform will automatically select the \"view\" permission for workspaces by default. Please do not manually deselect it. If User A is granted this Workbench role, they will automatically have all functional permissions related to the Workbench in all workspaces.

                                  "},{"location":"en/admin/ghippo/access-control/role.html#platform-role-authorization-methods","title":"Platform Role Authorization Methods","text":"

                                  There are three ways to authorize platform roles:

                                  • In the Global Management -> Access Control -> Users section, find the user in the user list, click ... , select Authorization , and grant platform role permissions to the user.

                                  • In the Global Management -> Access Control -> Groups section, create a group in the group list, add the user to the group, and grant authorization to the group (the specific operation is: find the group in the group list, click ... , select Add Permissions , and grant platform roles to the group).

                                  • In the Global Management -> Access Control -> Roles section, find the proper platform role in the role list, click the role name to access details, click the Related Members button, select the user or group, and click OK .

                                  "},{"location":"en/admin/ghippo/access-control/role.html#workspace-roles","title":"Workspace Roles","text":"

                                  Workspace roles are fine-grained roles that grant users management permissions, view permissions, or Workbench-related permissions for a specific workspace. Users with these roles can only manage the assigned workspace and cannot access other workspaces. AI platform provides 3 pre-defined workspace roles that users can directly use:

                                  • Workspace Admin
                                  • Workspace Editor
                                  • Workspace Viewer

                                  Moreover, AI platform supports the creation of custom workspace roles with customized content as needed. For example, creating a workspace role that includes all functional permissions in the Workbench. Since the Workbench depends on workspaces, the platform will automatically select the \"view\" permission for workspaces by default. Please do not manually deselect it. If User A is granted this role in Workspace 01, they will have all functional permissions related to the Workbench in Workspace 01.

                                  Note

                                  Unlike platform roles, workspace roles need to be used within the workspace. Once authorized, users will only have the functional permissions of that role within the assigned workspace.

                                  "},{"location":"en/admin/ghippo/access-control/role.html#workspace-role-authorization-methods","title":"Workspace Role Authorization Methods","text":"

                                  In the Global Management -> Workspace and Folder list, find the workspace, click Authorization , and grant workspace role permissions to the user.

                                  "},{"location":"en/admin/ghippo/access-control/role.html#folder-roles","title":"Folder Roles","text":"

                                  Folder roles have permissions granularity between platform roles and workspace roles. They grant users management permissions and view permissions for a specific folder and its sub-folders, as well as all workspaces within that folder. Folder roles are commonly used in departmental scenarios in enterprises. For example, User B is a leader of a first-level department and usually has management permissions over the first-level department, all second-level departments under it, and projects within those departments. In this scenario, User B is granted admin permissions for the first-level folder, which also grants proper permissions for the second-level folders and workspaces below them. AI platform provides 3 pre-defined folder roles that users can directly use:

                                  • Folder Admin
                                  • Folder Editor
                                  • Folder Viewer

                                  Additionally, AI platform supports the creation of custom folder roles with customized content as needed. For example, creating a folder role that includes all functional permissions in the Workbench. If User A is granted this role in Folder 01, they will have all functional permissions related to the Workbench in all workspaces within Folder 01.

                                  Note

                                  The functionality of modules depends on workspaces, and folders provide further grouping mechanisms with permission inheritance capabilities. Therefore, folder permissions not only include the folder itself but also its sub-folders and workspaces.

                                  "},{"location":"en/admin/ghippo/access-control/role.html#folder-role-authorization-methods","title":"Folder Role Authorization Methods","text":"

                                  In the Global Management -> Workspace and Folder list, find the folder, click Authorization , and grant folder role permissions to the user.

                                  "},{"location":"en/admin/ghippo/access-control/user.html","title":"User","text":"

                                  A user refers to a user created by the platform administrator Admin or the access control administrator IAM Owner on the Global Management -> Access Control -> Users page, or a user connected through LDAP / OIDC . The username represents the account, and the user logs in to the Suanova Enterprise platform through the username and password.

                                  Having a user account is a prerequisite for users to access the platform. The newly created user does not have any permissions by default. For example, you need to assign proper role permissions to users, such as granting administrator permissions to submodules in User List or User Details . The sub-module administrator has the highest authority of the sub-module, and can create, manage, and delete all resources of the module. If a user needs to be granted permission for a specific resource, such as the permission to use a certain resource, please see Resource Authorization Description.

                                  This page introduces operations such as creating, authorizing, disabling, enabling, and deleting users.

                                  "},{"location":"en/admin/ghippo/access-control/user.html#create-user","title":"Create user","text":"

                                  Prerequisite: You have the platform administrator Admin permission or the access control administrator IAM Admin permission.

                                  1. The administrator enters Access Control , selects Users , enters the user list, and clicks Create User on the upper right.

                                  2. Fill in the username and login password on the Create User page. If you need to create multiple users at one time, you can click Create User to create in batches, and you can create up to 5 users at a time. Determine whether to set the user to reset the password when logging in for the first time according to your actual situation.

                                  3. Click OK , the user is successfully created and returns to the user list page.

                                  Note

                                  The username and password set here will be used to log in to the platform.

                                  "},{"location":"en/admin/ghippo/access-control/user.html#authorize-for-user","title":"Authorize for User","text":"

                                  Prerequisite: The user already exists.

                                  1. The administrator enters Access Control , selects Users , enters the user list, and clicks \u2507 -> Authorization .

                                  2. On the Authorization page, check the required role permissions (multiple choices are allowed).

                                  3. Click OK to complete the authorization for the user.

                                  Note

                                  In the user list, click a user to enter the user details page.

                                  "},{"location":"en/admin/ghippo/access-control/user.html#add-user-to-group","title":"Add user to group","text":"
                                  1. The administrator enters Access Control , selects Users , enters the user list, and clicks \u2507 -> Add to Group .

                                  2. On the Add to Group page, check the groups to be joined (multiple choices are allowed). If there is no optional group, click Create a new group to create a group, and then return to this page and click the Refresh button to display the newly created group.

                                  3. Click OK to add the user to the group.

                                  Note

                                  The user will inherit the permissions of the group, and you can view the groups that the user has joined in User Details .

                                  "},{"location":"en/admin/ghippo/access-control/user.html#enabledisable-user","title":"Enable/Disable user","text":"

                                  Once a user is deactivated, that user will no longer be able to access the Platform. Unlike deleting a user, a disabled user can be enabled again as needed. It is recommended to disable the user before deleting it to ensure that no critical service is using the key created by the user.

                                  1. The administrator enters Access Control , selects Users , enters the user list, and clicks a username to enter user details.

                                  2. Click Edit on the upper right, turn off the status button, and make the button gray and inactive.

                                  3. Click OK to finish disabling the user.

                                  "},{"location":"en/admin/ghippo/access-control/user.html#forgot-password","title":"Forgot password","text":"

                                  Premise: User mailboxes need to be set. There are two ways to set user mailboxes.

                                  • On the user details page, the administrator clicks Edit , enters the user's email address in the pop-up box, and clicks OK to complete the email setting.

                                  • Users can also enter the Personal Center and set the email address on the Security Settings page.

                                  If the user forgets the password when logging in, please refer to Reset Password.

                                  "},{"location":"en/admin/ghippo/access-control/user.html#delete-users","title":"Delete users","text":"

                                  Warning

                                  After deleting a user, the user will no longer be able to access platform resources in any way, please delete carefully. Before deleting a user, make sure your key programs no longer use keys created by that user. If you are unsure, it is recommended to disable the user before deleting. If you delete a user and then create a new user with the same name, the new user is considered a new, separate identity that does not inherit the deleted user's roles.

                                  1. The administrator enters Access Control , selects Users , enters the user list, and clicks \u2507 -> Delete .

                                  2. Click Delete to finish deleting the user.

                                  "},{"location":"en/admin/ghippo/access-control/webhook.html","title":"Webhook Message Notification","text":"

                                  With AI platform integrated into the client's system, you can create Webhooks to send message notifications when users are created, updated, deleted, logged in, or logged out.

                                  Webhook is a mechanism for implementing real-time event notifications. It allows an application to push data or events to another application without the need for polling or continuous querying. By configuring Webhooks, you can specify that the target application receives and processes notifications when a certain event occurs.

                                  The working principle of Webhook is as follows:

                                  1. The source application (AI platform) performs a specific operation or event.
                                  2. The source application packages the relevant data and information into an HTTP request and sends it to the URL specified by the target application (e.g., enterprise WeChat group robot).
                                  3. The target application receives the request and processes it based on the data and information provided.

                                  By using Webhooks, you can achieve the following functionalities:

                                  • Real-time notification: Notify other applications in a timely manner when a specific event occurs.
                                  • Automation: The target application can automatically trigger predefined operations based on the received Webhook requests, eliminating the need for manual intervention.
                                  • Data synchronization: Use Webhooks to pass data from one application to another, enabling synchronized updates.

                                  Common use cases include:

                                  • Version control systems (e.g., GitHub, GitLab): Automatically trigger build and deployment operations when code repositories change.
                                  • E-commerce platforms: Send update notifications to logistics systems when order statuses change.
                                  • Chatbot platforms: Push messages to target servers via Webhooks for processing when user messages are received.
                                  "},{"location":"en/admin/ghippo/access-control/webhook.html#configuration-steps","title":"Configuration Steps","text":"

                                  The steps to configure Webhooks in AI platform are as follows:

                                  1. On the left nav, click Global Management -> Access Control -> Docking Portal , create a client ID.

                                  2. Click a client ID to enter the details page, then click the Create Webhook button.

                                  3. Fill in the field information in the popup window and click OK .

                                    • Object: Currently only supports the User object.
                                    • Action: Send Webhook messages when users are created/updated/deleted/logged in or out.
                                    • URL: The address to receive the messages.
                                    • Method: Choose the appropriate method as required, e.g., for enterprise WeChat, POST is recommended.
                                    • Advanced Configuration: You can write the message body in JSON format. For enterprise WeChat groups, refer to the Group Robot configuration guide.

                                  4. A screen prompt indicates that the Webhook was created successfully.

                                  5. Now try creating a user.

                                  6. User creation succeeds, and you can see that an enterprise WeChat group received a message.

                                  "},{"location":"en/admin/ghippo/access-control/webhook.html#advanced-configuration-example","title":"Advanced Configuration Example","text":"

                                  Default Message Body

                                  AI platform predefines some variables that you can use in the message body based on your needs.

                                  {\n  \"id\": \"{{$$.ID$$}}\",\n  \"email\": \"{{$$.Email$$}}\",\n  \"username\": \"{{$$.Name$$}}\",\n  \"last_name\": \"{{$$.LastName$$}}\",\n  \"first_name\": \"{{$$.FirstName$$}}\",\n  \"created_at\": \"{{$$.CreatedAt$$}}\",\n  \"enabled\": \"{{$$.Enabled$$}}\"\n}\n

                                  Message Body for WeCom Group Robot

                                  {\n    \"msgtype\": \"text\",\n    \"text\": {\n      \"content\": \"{{$$.Name$$}} hello world\"\n    }\n}\n
                                  "},{"location":"en/admin/ghippo/audit/audit-log.html","title":"Audit log","text":"

                                  Audit logs help you monitor and record the activities of each user, and provide features for collecting, storing and querying security-related records arranged in chronological order. With the audit log service, you can continuously monitor and retain user behaviors in the Global Management module, including but not limited to user creation, user login/logout, user authorization, and user operations related to Kubernetes.

                                  "},{"location":"en/admin/ghippo/audit/audit-log.html#features","title":"Features","text":"

                                  The audit log feature has the following characteristics:

                                  • Out of the box: When installing and using the platform, the audit log feature will be enabled by default, automatically recording various user-related actions, such as creating users, authorization, and login/logout. By default, 365 days of user behavior can be viewed within the platform.

                                  • Security analysis: The audit log will record user operations in detail and provide an export function. Through these events, you can judge whether the account is at risk.

                                  • Real-time recording: Quickly collect operation events, and trace back in the audit log list after user operations, so that suspicious behavior can be found at any time.

                                  • Convenient and reliable: The audit log supports manual cleaning and automatic cleaning, and the cleaning policy can be configured according to your storage size.

                                  "},{"location":"en/admin/ghippo/audit/audit-log.html#view-audit-logs","title":"View Audit Logs","text":"
                                  1. Log in to AI platform with a user account that has the admin or Audit Owner role.

                                  2. At the bottom of the left navigation bar, click Global Management -> Audit Logs .

                                  "},{"location":"en/admin/ghippo/audit/audit-log.html#user-operations","title":"User operations","text":"

                                  On the User operations tab, you can search for user operation events by time range, or by using fuzzy or exact search.

                                  Click the \u2507 icon on the right side of an event to view its details.

                                  The event details are shown in the following figure.

                                  Click the Export in the upper right corner to export the user operation logs within the selected time range in CSV or Excel format.

                                  "},{"location":"en/admin/ghippo/audit/audit-log.html#system-operations","title":"System operations","text":"

                                  On the System operations tab, you can search for system operation events by time range, or by using fuzzy or exact search.

                                  Similarly, click the \u2507 icon on the right side of an event to view its details.

                                  Click the Export in the upper right corner to export the system operation logs within the selected time range in CSV or Excel format.

                                  "},{"location":"en/admin/ghippo/audit/audit-log.html#settings","title":"Settings","text":"

                                  On the Settings tab, you can clean up audit logs for user operations and system operations.

                                  You can manually clean up the logs, but it is recommended to export and save them before cleaning. You can also set the maximum retention time for the logs to automatically clean them up.

                                  Note

                                  The audit logs related to Kubernetes in the auditing module are provided by the Insight module. To reduce the storage pressure of the audit logs, Global Management by default does not collect Kubernetes-related logs. If you need to record them, please refer to Enabling K8s Audit Logs. Once enabled, the cleanup function is consistent with the Global Management cleanup function, but they do not affect each other.

                                  "},{"location":"en/admin/ghippo/audit/open-audit.html","title":"Enable/Disable collection of audit logs","text":"
                                  • Kubernetes Audit Logs: Kubernetes itself generates audit logs. When this feature is enabled, audit log files for Kubernetes will be created in the specified directory.
                                  • Collecting Kubernetes Audit Logs: The log files mentioned above are collected using the Insight Agent. The prerequisite for collecting Kubernetes audit logs are that the cluster has enabled Kubernetes audit logs, the export of audit logs has been allowed, and the collection of audit logs has been opened.
                                  "},{"location":"en/admin/ghippo/audit/open-audit.html#ai-platform-installation-status","title":"AI platform Installation Status","text":"
                                  • For AI Community installations, the Kubernetes audit log switch was not operated during the management cluster installation process.
                                  • For AI platform Enterprise installations, the Kubernetes audit log switch is enabled by default.
                                    • To set it to default off, you can modify the installer's clusterConfig.yaml file (set logPath to empty \"\").
                                  • The collection of Kubernetes audit logs switch is disabled by default for the management cluster.
                                    • Default settings do not support configuration.
                                  "},{"location":"en/admin/ghippo/audit/open-audit.html#management-cluster-collection-of-kubernetes-audit-logs-switch","title":"Management Cluster Collection of Kubernetes Audit Logs Switch","text":""},{"location":"en/admin/ghippo/audit/open-audit.html#ai-platform-enterprise-installation-environment","title":"AI platform Enterprise Installation Environment","text":""},{"location":"en/admin/ghippo/audit/open-audit.html#confirm-enabling-kubernetes-audit-logs","title":"Confirm Enabling Kubernetes Audit Logs","text":"

                                  Run the following command to check if audit logs are generated under the /var/log/kubernetes/audit directory. If they exist, it means that Kubernetes audit logs are successfully enabled.

                                  ls /var/log/kubernetes/audit\n

                                  If they are not enabled, please refer to the documentation on enabling/disabling Kubernetes audit logs.

                                  "},{"location":"en/admin/ghippo/audit/open-audit.html#enable-collection-of-kubernetes-audit-logs-process","title":"Enable Collection of Kubernetes Audit Logs Process","text":"
                                  1. Add ChartMuseum to the helm repo.

                                    helm repo add chartmuseum http://10.5.14.30:8081\n

                                    Modify the IP address in this command to the IP address of the Spark node.

                                    Note

                                    If using a self-built Harbor repository, please modify the chart repo URL in the first step to the insight-agent chart URL of the self-built repository.

                                  2. Save the current Insight Agent helm values.

                                    helm get values insight-agent -n insight-system -o yaml > insight-agent-values-bak.yaml\n
                                  3. Get the current version number ${insight_version_code}.

                                    insight_version_code=`helm list -n insight-system |grep insight-agent | awk {'print $10'}`\n
                                  4. Update the helm value configuration.

                                    helm upgrade --install --create-namespace --version ${insight_version_code} --cleanup-on-fail insight-agent chartmuseum/insight-agent -n insight-system -f insight-agent-values-bak.yaml --set global.exporters.auditLog.kubeAudit.enabled=true\n
                                  5. Restart all fluentBit pods under the insight-system namespace.

                                    fluent_pod=`kubectl get pod -n insight-system | grep insight-agent-fluent-bit | awk {'print $1'} | xargs`\nkubectl delete pod ${fluent_pod} -n insight-system\n
                                  "},{"location":"en/admin/ghippo/audit/open-audit.html#disable-collection-of-kubernetes-audit-logs","title":"Disable Collection of Kubernetes Audit Logs","text":"

                                  The remaining steps are the same as enabling the collection of Kubernetes audit logs, with only a modification in the previous section's step 4: updating the helm value configuration.

                                  helm upgrade --install --create-namespace --version ${insight_version_code} --cleanup-on-fail insight-agent chartmuseum/insight-agent -n insight-system -f insight-agent-values-bak.yaml --set global.exporters.auditLog.kubeAudit.enabled=false\n
                                  "},{"location":"en/admin/ghippo/audit/open-audit.html#ai-community-online-installation-environment","title":"AI Community Online Installation Environment","text":"

                                  Note

                                  If installing AI Community in a Kind cluster, perform the following steps inside the Kind container.

                                  "},{"location":"en/admin/ghippo/audit/open-audit.html#confirm-enabling-kubernetes-audit-logs_1","title":"Confirm Enabling Kubernetes Audit Logs","text":"

                                  Run the following command to check if audit logs are generated under the /var/log/kubernetes/audit directory. If they exist, it means that Kubernetes audit logs are successfully enabled.

                                  ls /var/log/kubernetes/audit\n

                                  If they are not enabled, please refer to the documentation on enabling/disabling Kubernetes audit logs.

                                  "},{"location":"en/admin/ghippo/audit/open-audit.html#enable-collection-of-kubernetes-audit-logs-process_1","title":"Enable Collection of Kubernetes Audit Logs Process","text":"
                                  1. Save the current values.

                                    helm get values insight-agent -n insight-system -o yaml > insight-agent-values-bak.yaml\n
                                  2. Get the current version number ${insight_version_code} and update the configuration.

                                    insight_version_code=`helm list -n insight-system |grep insight-agent | awk {'print $10'}`\n
                                  3. Update the helm value configuration.

                                    helm upgrade --install --create-namespace --version ${insight_version_code} --cleanup-on-fail insight-agent insight-release/insight-agent -n insight-system -f insight-agent-values-bak.yaml --set global.exporters.auditLog.kubeAudit.enabled=true\n

                                    If the upgrade fails due to an unsupported version, check if the helm repo used in the command has that version. If not, retry after you updated the helm repo.

                                    helm repo update insight-release\n
                                  4. Restart all fluentBit pods under the insight-system namespace.

                                    fluent_pod=`kubectl get pod -n insight-system | grep insight-agent-fluent-bit | awk {'print $1'} | xargs`\nkubectl delete pod ${fluent_pod} -n insight-system\n
                                  "},{"location":"en/admin/ghippo/audit/open-audit.html#disable-collection-of-kubernetes-audit-logs_1","title":"Disable Collection of Kubernetes Audit Logs","text":"

                                  The remaining steps are the same as enabling the collection of Kubernetes audit logs, with only a modification in the previous section's step 3: updating the helm value configuration.

                                  helm upgrade --install --create-namespace --version ${insight_version_code} --cleanup-on-fail insight-agent insight-release/insight-agent -n insight-system -f insight-agent-values-bak.yaml --set global.exporters.auditLog.kubeAudit.enabled=false\n
                                  "},{"location":"en/admin/ghippo/audit/open-audit.html#change-worker-cluster","title":"Change Worker Cluster","text":"

                                  Each worker cluster is independent and can be turned on as needed.

                                  "},{"location":"en/admin/ghippo/audit/open-audit.html#steps-to-enable-audit-log-collection-when-creating-a-cluster","title":"Steps to Enable Audit Log Collection When Creating a Cluster","text":"

                                  By default, the collection of K8s audit logs is turned off. If you need to enable it, you can follow these steps:

                                  Set the switch to the enabled state to enable the collection of K8s audit logs.

                                  When creating a worker cluster via AI platform, ensure that the K8s audit log option for the cluster is set to 'true' so that the created worker cluster will have audit logs enabled.

                                  After the cluster creation is successful, the K8s audit logs for that worker cluster will be collected.

                                  "},{"location":"en/admin/ghippo/audit/open-audit.html#steps-to-enabledisable-after-accessing-or-creating-the-cluster","title":"Steps to Enable/Disable After Accessing or Creating the Cluster","text":""},{"location":"en/admin/ghippo/audit/open-audit.html#confirm-enabling-k8s-audit-logs","title":"Confirm Enabling K8s Audit Logs","text":"

                                  Run the following command to check if audit logs are generated under the /var/log/kubernetes/audit directory. If they exist, it means that K8s audit logs are successfully enabled.

                                  ls /var/log/kubernetes/audit\n

                                  If they are not enabled, please refer to the documentation on enabling/disabling K8s audit logs.

                                  "},{"location":"en/admin/ghippo/audit/open-audit.html#enable-collection-of-k8s-audit-logs","title":"Enable Collection of K8s Audit Logs","text":"

                                  The collection of K8s audit logs is disabled by default. To enable it, follow these steps:

                                  1. Select the cluster that has been accessed and needs to enable the collection of K8s audit logs.

                                  2. Go to the Helm App management page and update the insight-agent configuration (if insight-agent is not installed, you can install it).

                                  3. Enable/Disable the collection of K8s audit logs switch.

                                  4. After enabling/disabling the switch, the fluent-bit pod needs to be restarted for the changes to take effect.

                                  "},{"location":"en/admin/ghippo/audit/open-k8s-audit.html","title":"Generate K8s Audit Logs","text":"

                                  By default, the Kubernetes cluster does not generate audit log information. Through the following configuration, you can enable the audit log feature of Kubernetes.

                                  Note

                                  In a public cloud environment, it may not be possible to control the output and output path of Kubernetes audit logs.

                                  1. Prepare the Policy file for the audit log
                                  2. Configure the API server, and enable audit logs
                                  3. Reboot and verify
                                  "},{"location":"en/admin/ghippo/audit/open-k8s-audit.html#prepare-audit-log-policy-file","title":"Prepare audit log Policy file","text":"Click to view Policy YAML for audit log policy.yaml
                                  apiVersion: audit.k8s.io/v1\nkind: Policy\nrules:\n# The following requests were manually identified as high-volume and low-risk,\n# so drop them.\n- level: None\n  users: [\"system:kube-proxy\"]\n  verbs: [\"watch\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"endpoints\", \"services\", \"services/status\"]\n- level: None\n  # Ingress controller reads `configmaps/ingress-uid` through the unsecured port.\n  # TODO(#46983): Change this to the ingress controller service account.\n  users: [\"system:unsecured\"]\n  namespaces: [\"kube-system\"]\n  verbs: [\"get\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"configmaps\"]\n- level: None\n  users: [\"kubelet\"] # legacy kubelet identity\n  verbs: [\"get\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"nodes\", \"nodes/status\"]\n- level: None\n  userGroups: [\"system:nodes\"]\n  verbs: [\"get\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"nodes\", \"nodes/status\"]\n- level: None\n  users:\n   - system:kube-controller-manager\n   - system:kube-scheduler\n   - system:serviceaccount:kube-system:endpoint-controller\n     verbs: [\"get\", \"update\"]\n     namespaces: [\"kube-system\"]\n     resources:\n   - group: \"\" # core\n     resources: [\"endpoints\"]\n- level: None\n  users: [\"system:apiserver\"]\n  verbs: [\"get\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"namespaces\", \"namespaces/status\", \"namespaces/finalize\"]\n# Don't log HPA fetching metrics.\n- level: None\n  users:\n   - system:kube-controller-manager\n     verbs: [\"get\", \"list\"]\n     resources:\n   - group: \"metrics.k8s.io\"\n# Don't log these read-only URLs.\n- level: None\n  nonResourceURLs:\n   - /healthz*\n   - /version\n   - /swagger*\n# Don't log events requests.\n- level: None\n  resources:\n   - group: \"\" # core\n     resources: [\"events\"]\n# Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,\n# so only log at the Metadata level.\n- level: Metadata\n  resources:\n   - group: \"\" # core\n     resources: [\"secrets\", \"configmaps\", \"serviceaccounts/token\"]\n   - group: authentication.k8s.io\n     resources: [\"tokenreviews\"]\n     omitStages:\n   - \"RequestReceived\"\n# Get responses can be large; skip them.\n- level: Request\n  verbs: [\"get\", \"list\", \"watch\"]\n  resources:\n   - group: \"\" # core\n   - group: \"admissionregistration.k8s.io\"\n   - group: \"apiextensions.k8s.io\"\n   - group: \"apiregistration.k8s.io\"\n   - group: \"apps\"\n   - group: \"authentication.k8s.io\"\n   - group: \"authorization.k8s.io\"\n   - group: \"autoscaling\"\n   - group: \"batch\"\n   - group: \"certificates.k8s.io\"\n   - group: \"extensions\"\n   - group: \"metrics.k8s.io\"\n   - group: \"networking.k8s.io\"\n   - group: \"policy\"\n   - group: \"rbac.authorization.k8s.io\"\n   - group: \"settings.k8s.io\"\n   - group: \"storage.k8s.io\"\n     omitStages:\n   - \"RequestReceived\"\n# Default level for known APIs\n- level: RequestResponse\n  resources:\n   - group: \"\" # core\n   - group: \"admissionregistration.k8s.io\"\n   - group: \"apiextensions.k8s.io\"\n   - group: \"apiregistration.k8s.io\"\n   - group: \"apps\"\n   - group: \"authentication.k8s.io\"\n   - group: \"authorization.k8s.io\"\n   - group: \"autoscaling\"\n   - group: \"batch\"\n   - group: \"certificates.k8s.io\"\n   - group: \"extensions\"\n   - group: \"metrics.k8s.io\"\n   - group: \"networking.k8s.io\"\n   - group: \"policy\"\n   - group: \"rbac.authorization.k8s.io\"\n   - group: \"settings.k8s.io\"\n   - group: \"storage.k8s.io\"\n     omitStages:\n   - \"RequestReceived\"\n# Default level for all other requests.\n- level: Metadata\n  omitStages:\n   - \"RequestReceived\"\n

                                  Put the above audit log file in /etc/kubernetes/audit-policy/ folder, and name it apiserver-audit-policy.yaml .

                                  "},{"location":"en/admin/ghippo/audit/open-k8s-audit.html#configure-the-api-server","title":"Configure the API server","text":"

                                  Open the configuration file kube-apiserver.yaml of the API server, usually in the /etc/kubernetes/manifests/ folder, and add the following configuration information:

                                  Please back up kube-apiserver.yaml before this step. The backup file cannot be placed in the /etc/kubernetes/manifests/ , and it is recommended to put it in the /etc/kubernetes/tmp .

                                  1. Add the command under spec.containers.command :

                                    --audit-log-maxage=30\n--audit-log-maxbackup=10\n--audit-log-maxsize=100\n--audit-log-path=/var/log/audit/kube-apiserver-audit.log\n--audit-policy-file=/etc/kubernetes/audit-policy/apiserver-audit-policy.yaml\n
                                  2. Add the command under spec.containers.volumeMounts :

                                    - mountPath: /var/log/audit\n  name: audit-logs\n- mountPath: /etc/kubernetes/audit-policy\n  name: audit-policy\n
                                  3. Add the command under spec.volumes :

                                    - hostPath:\n  path: /var/log/kubernetes/audit\n  type: \"\"\n  name: audit-logs\n- hostPath:\n  path: /etc/kubernetes/audit-policy\n  type: \"\"\n  name: audit-policy\n
                                  "},{"location":"en/admin/ghippo/audit/open-k8s-audit.html#test-and-verify","title":"Test and verify","text":"

                                  After a while, the API server will automatically restart, and run the following command to check whether there is an audit log generated in the /var/log/kubernetes/audit directory. If so, it means that the K8s audit log is successfully enabled.

                                  ls /var/log/kubernetes/audit\n

                                  If you want to close it, just remove the relevant commands in spec.containers.command .

                                  "},{"location":"en/admin/ghippo/audit/source-ip.html","title":"Get Source IP in Audit Logs","text":"

                                  The source IP in audit logs plays a critical role in system and network management. It helps track activities, maintain security, resolve issues, and ensure system compliance. However, getting the source IP can result in some performance overhead, so that audit logs are not always enabled in AI platform. The default enablement of source IP in audit logs and the methods to enable it vary depending on the installation mode. The following sections will explain the default enablement and the steps to enable source IP in audit logs based on the installation mode.

                                  Note

                                  Enabling audit logs will modify the replica count of the istio-ingressgateway, resulting in a certain performance overhead. Enabling audit logs requires disabling LoadBalance of kube-proxy and Topology Aware Routing, which can have a certain impact on cluster performance. After enabling audit logs, it is essential to ensure that the istio-ingressgateway exists on the proper node to the access IP. If the istio-ingressgateway drifts due to node health issues or other issues, it needs to be manually rescheduled back to that node. Otherwise, it will affect the normal operation of AI platform.

                                  "},{"location":"en/admin/ghippo/audit/source-ip.html#determine-the-installation-mode","title":"Determine the Installation Mode","text":"
                                  kubectl get pod -n metallb-system\n

                                  Run the above command in the cluster. If the result is as follows, it means that the cluster is not in the MetalLB installation mode:

                                  No resources found in metallbs-system namespace.\n
                                  "},{"location":"en/admin/ghippo/audit/source-ip.html#nodeport-installation-mode","title":"NodePort Installation Mode","text":"

                                  In this mode, the source IP in audit logs is disabled by default. The steps to enable it are as follows:

                                  1. Set the minimum replica count of the istio-ingressgateway HPA to be equal to the number of control plane nodes

                                    count=$(kubectl get nodes --selector=node-role.kubernetes.io/control-plane | wc -l)\ncount=$((count-1))\n\nkubectl patch hpa istio-ingressgateway -n istio-system -p '{\"spec\":{\"minReplicas\":'$count'}}'\n
                                  2. Modify the externalTrafficPolicy and internalTrafficPolicy value of the istio-ingressgateway service to \"Local\"

                                    kubectl patch svc istio-ingressgateway -n istio-system -p '{\"spec\":{\"externalTrafficPolicy\":\"Local\",\"internalTrafficPolicy\":\"Local\"}}'\n
                                  "},{"location":"en/admin/ghippo/audit/source-ip.html#metallb-installation-mode","title":"MetalLB Installation Mode","text":"

                                  In this mode, the source IP in audit logs is gotten by default after the installation. For more information, refer to MetalLB Source IP.

                                  "},{"location":"en/admin/ghippo/audit/gproduct-audit/ghippo.html","title":"Audit Items of Global Management","text":"Events Resource Type Notes UpdateEmail-Account Account UpdatePassword-Account Account CreateAccessKeys-Account Account UpdateAccessKeys-Account Account DeleteAccessKeys-Account Account Create-User User Delete-User User Update-User User UpdateRoles-User User UpdatePassword-User User CreateAccessKeys-User User UpdateAccessKeys-User User DeleteAccessKeys-User User Create-Group Group Delete-Group Group Update-Group Group AddUserTo-Group Group RemoveUserFrom-Group Group UpdateRoles-Group Group UpdateRoles-User User Create-LADP LADP Update-LADP LADP Delete-LADP LADP Unable to audit through API server for OIDC Login-User User Logout-User User UpdatePassword-SecurityPolicy SecurityPolicy UpdateSessionTimeout-SecurityPolicy SecurityPolicy UpdateAccountLockout-SecurityPolicy SecurityPolicy UpdateLogout-SecurityPolicy SecurityPolicy MailServer-SecurityPolicy SecurityPolicy CustomAppearance-SecurityPolicy SecurityPolicy OfficialAuthz-SecurityPolicy SecurityPolicy Create-Workspace Workspace Delete-Workspace Workspace BindResourceTo-Workspace Workspace UnBindResource-Workspace Workspace BindShared-Workspace Workspace SetQuota-Workspace Workspace Authorize-Workspace Workspace DeAuthorize-Workspace Workspace UpdateDeAuthorize-Workspace Workspace Update-Workspace Workspace Create-Folder Folder Delete-Folder Folder UpdateAuthorize-Folder Folder Update-Folder Folder Authorize-Folder Folder DeAuthorize-Folder Folder AutoCleanup-Audit Audit ManualCleanup-Audit Audit Export-Audit Audit"},{"location":"en/admin/ghippo/audit/gproduct-audit/insight.html","title":"Insight Audit Items","text":"Events Resource Type Notes Create-ProbeJob ProbeJob Update-ProbeJob ProbeJob Delete-ProbeJob ProbeJob Create-AlertPolicy AlertPolicy Update-AlertPolicy AlertPolicy Delete-AlertPolicy AlertPolicy Import-AlertPolicy AlertPolicy Create-AlertRule AlertRule Update-AlertRule AlertRule Delete-AlertRule AlertRule Create-RuleTemplate RuleTemplate Update-RuleTemplate RuleTemplate Delete-RuleTemplate RuleTemplate Create-email email Update-email email Delete-Receiver Receiver Create-dingtalk dingtalk Update-dingtalk dingtalk Delete-Receiver Receiver Create-wecom wecom Update-wecom wecom Delete-Receiver Receiver Create-webhook webhook Update-webhook webhook Delete-Receiver Receiver Create-sms sms Update-sms sms Delete-Receiver Receiver Create-aliyun(tencent,custom) aliyun, tencent, custom Update-aliyun(tencent,custom) aliyun, tencent, custom Delete-SMSserver SMSserver Create-MessageTemplate MessageTemplate Update-MessageTemplate MessageTemplate Delete-MessageTemplate MessageTemplate Create-AlertSilence AlertSilence Update-AlertSilence AlertSilence Delete-AlertSilence AlertSilence Create-AlertInhibition AlertInhibition Update-AlertInhibition AlertInhibition Delete-AlertInhibition AlertInhibition Update-SystemSettings SystemSettings"},{"location":"en/admin/ghippo/audit/gproduct-audit/kpanda.html","title":"Audit Items of Container Management","text":"Events Resource Types Create-Cluster Cluster Delete-Cluster Cluster Integrate-Cluster Cluster Remove-Cluster Cluster Upgrade-Cluster Cluster Integrate-Node Node Remove-Node Node Update-NodeGPUMode NodeGPUMode Create-HelmRepo HelmRepo Create-HelmApp HelmApp Delete-HelmApp HelmApp Create-Deployment Deployment Delete-Deployment Deployment Create-DaemonSet DaemonSet Delete-DaemonSet DaemonSet Create-StatefulSet StatefulSet Delete-StatefulSet StatefulSet Create-Job Job Delete-Job Job Create-CronJob CronJob Delete-CronJob CronJob Delete-Pod Pod Create-Service Service Delete-Service Service Create-Ingress Ingress Delete-Ingress Ingress Create-StorageClass StorageClass Delete-StorageClass StorageClass Create-PersistentVolume PersistentVolume Delete-PersistentVolume PersistentVolume Create-PersistentVolumeClaim PersistentVolumeClaim Delete-PersistentVolumeClaim PersistentVolumeClaim Delete-ReplicaSet ReplicaSet BindResourceTo-Workspace Workspace UnBindResource-Workspace Workspace BindResourceTo-Workspace Workspace UnBindResource-Workspace Workspace Create-CloudShell CloudShell Delete-CloudShell CloudShell"},{"location":"en/admin/ghippo/audit/gproduct-audit/virtnest.html","title":"Audit Items of Virtual Machine","text":"Events Resource Type Notes Restart-VMs VM ConvertToTemplate-VMs VM Edit-VMs VM Update-VMs VM Restore-VMs VM Power on-VMs VM LiveMigrate-VMs VM Delete-VMs VM Delete-VM Template VM Template Create-VMs VM CreateSnapshot-VMs VM Power off-VMs VM Clone-VMs VM"},{"location":"en/admin/ghippo/best-practice/authz-plan.html","title":"Ordinary user authorization plan","text":"

                                  Ordinary users refer to those who can use most product modules and features (except management features), have certain operation rights to resources within the scope of authority, and can independently use resources to deploy applications.

                                  The authorization and resource planning process for such users is shown in the following figure.

                                  graph TB\n\n    start([Start]) --> user[1. Create User]\n    user --> ns[2. Prepare Kubernetes Namespace]\n    ns --> ws[3. Prepare Workspace]\n    ws --> ws-to-ns[4. Bind a workspace to namespace]\n    ws-to-ns --> authu[5. Authorize a user with Workspace Editor]\n    authu --> complete([End])\n\nclick user \"https://docs.daocloud.io/en/ghippo/access-control/user/\"\nclick ns \"https://docs.daocloud.io/en/kpanda/namespaces/createns/\"\nclick ws \"https://docs.daocloud.io/en/ghippo/workspace/workspace/\"\nclick ws-to-ns \"https://docs.daocloud.io/en/ghippo/workspace/ws-to-ns-across-clus/\"\nclick authu \"https://docs.daocloud.io/en/ghippo/workspace/wspermission/\"\n\n classDef plain fill:#ddd,stroke:#fff,stroke-width:4px,color:#000;\n classDef k8s fill:#326ce5,stroke:#fff,stroke-width:4px,color:#fff;\n classDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n class user,ns,ws,ws-to-ns,authu cluster;\n class start,complete plain;
                                  "},{"location":"en/admin/ghippo/best-practice/cluster-for-multiws.html","title":"Assign a Cluster to Multiple Workspaces (Tenants)","text":"

                                  Cluster resources are typically managed by operations personnel. When allocating resources, they need to create namespaces to isolate resources and set resource quotas. This method has a drawback: if the business volume of the enterprise is large, manually allocating resources requires a significant amount of work, and flexibly adjusting resource quotas can also be challenging.

                                  To address this, the AI platform introduces the concept of workspaces. By sharing resources, workspaces can provide higher-dimensional resource quota capabilities, allowing workspaces (tenants) to self-create Kubernetes namespaces under resource quotas.

                                  For example, if you want several departments to share different clusters:

                                  Cluster01 (Normal) Cluster02 (High Availability) Department (Workspace) A 50 quota 10 quota Department (Workspace) B 100 quota 20 quota

                                  You can follow the process below to share clusters with multiple departments/workspaces/tenants:

                                  graph TB\n\npreparews[Prepare Workspace] --> preparecs[Prepare Cluster]\n--> share[Share Cluster to Workspace]\n--> judge([Judge Workspace Remaining Quota])\njudge -.Greater than remaining quota.->modifyns[Modify Namespace Quota]\njudge -.Less than remaining quota.->createns[Create Namespace]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill:#326ce5,stroke:#fff,stroke-width:1px,color:#fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass preparews,preparecs,share, cluster;\nclass judge plain\nclass modifyns,createns k8s\n\nclick preparews \"https://docs.daocloud.io/en/ghippo/workspace/cluster-for-multiws/#prepare-a-workspace\"\nclick preparecs \"https://docs.daocloud.io/en/ghippo/workspace/cluster-for-multiws/#prepare-a-cluster\"\nclick share \"https://docs.daocloud.io/en/ghippo/workspace/cluster-for-multiws/#add-a-cluster-to-the-workspace\"
                                  "},{"location":"en/admin/ghippo/best-practice/cluster-for-multiws.html#prepare-a-workspace","title":"Prepare a Workspace","text":"

                                  Workspaces are designed to meet multi-tenant usage scenarios, forming isolated resource environments based on clusters, cluster namespaces, meshes, mesh namespaces, multicloud, multicloud namespaces, and other resources. Workspaces can be mapped to various concepts such as projects, tenants, enterprises, and suppliers.

                                  1. Log in to AI platform with a user having the admin/folder admin role and click Global Management at the bottom of the left navigation bar.

                                  2. Click Workspaces and Folders in the left navigation bar, then click the Create Workspace button at the top right.

                                  3. Fill in the workspace name, folder, and other information, then click OK to complete the creation of the workspace.

                                  "},{"location":"en/admin/ghippo/best-practice/cluster-for-multiws.html#prepare-a-cluster","title":"Prepare a Cluster","text":"

                                  Workspaces are designed to meet multi-tenant usage scenarios, forming isolated resource environments based on clusters, cluster namespaces, meshes, mesh namespaces, multicloud, multicloud namespaces, and other resources. Workspaces can be mapped to various concepts such as projects, tenants, enterprises, and suppliers.

                                  Follow these steps to prepare a cluster.

                                  1. Click Container Management at the bottom of the left navigation bar, then select Clusters .

                                  2. Click Create Cluster to create a cluster or click Integrate Cluster to integrate a cluster.

                                  "},{"location":"en/admin/ghippo/best-practice/cluster-for-multiws.html#add-cluster-to-workspace","title":"Add Cluster to Workspace","text":"

                                  Return to Global Management to add clusters to the workspace.

                                  1. Click Global Management -> Workspaces and Folders -> Shared Resources, then click a workspace name and click the New Shared Resource button.

                                  2. Select the cluster, fill in the resource quota, and click OK .

                                  "},{"location":"en/admin/ghippo/best-practice/folder-practice.html","title":"Folder Best Practices","text":"

                                  A folder represents an organizational unit (such as a department) and is a node in the resource hierarchy.

                                  A folder can contain workspaces, subfolders, or a combination of both. It provides identity management, multi-level and permission mapping capabilities, and can map the role of a user/group in a folder to its subfolders, workspaces and resources. Therefore, with the help of folders, enterprise managers can centrally manage and control all resources.

                                  1. Build corporate hierarchy

                                    First of all, according to the existing enterprise hierarchy structure, build the same folder hierarchy as the enterprise. The AI platform supports 5-level folders, which can be freely combined according to the actual situation of the enterprise, and folders and workspaces are mapped to entities such as departments, projects, and suppliers in the enterprise.

                                    Folders are not directly linked to resources, but indirectly achieve resource grouping through workspaces.

                                  2. User identity management

                                    Folder provides three roles: Folder Admin, Folder Editor, and Folder Viewer. View role permissions, you can grant different roles to users/groups in the same folder through Authorization.

                                  3. Role and permission mapping

                                    Enterprise Administrator: Grant the Folder Admin role on the root folder. He will have administrative authority over all departments, projects and their resources.

                                    Department manager: grant separate management rights to each subfolder and workspace.

                                    Project members: Grant management rights separately at the workspace and resource levels.

                                  "},{"location":"en/admin/ghippo/best-practice/super-group.html","title":"Architecture Management of Large Enterprises","text":"

                                  With the continuous scaling of business, the company's scale continues to grow, subsidiaries and branches are established one after another, and some subsidiaries even further establish subsidiaries. The original large departments are gradually subdivided into multiple smaller departments, leading to an increasing number of hierarchical levels in the organizational structure. This organizational structure change also affects the IT governance architecture.

                                  The specific operational steps are as follows:

                                  1. Enable Isolation Mode between Folder/WS

                                    Please refer to Enable Isolation Mode between Folder/WS.

                                  2. Plan Enterprise Architecture according to the Actual Situation

                                    Under a multi-level organizational structure, it is recommended to use the second-level folder as an isolation unit to isolate users/user groups/resources between \"sub-companies\". After isolation, users/user groups/resources between \"sub-companies\" are not visible to each other.

                                  3. Create Users/Integrate User Systems

                                    The main platform administrator Admin can create users on the platform or integrate users through LDAP/OIDC/OAuth2.0 and other identity providers to AI platform.

                                  4. Create Folder Roles

                                    In the isolation mode of Folder/WS, the platform administrator Admin needs to first authorize users to invite them to various sub-companies, so that the \"sub-company administrators (Folder Admin)\" can manage these users, such as secondary authorization or editing permissions. It is recommended to simplify the management work of the platform administrator Admin by creating a role without actual permissions to assist the platform administrator Admin in inviting users to sub-companies through \"authorization\". The actual permissions of sub-company users are delegated to the sub-company administrators (Folder Admin) to manage independently. (The following demonstrates how to create a resource-bound role without actual permissions, i.e., minirole)

                                    Note

                                    Resource-bound permissions used alone do not take effect, hence meeting the requirement of inviting users to sub-companies through \"authorization\" and then managed by sub-company administrators Folder Admin.

                                  5. Authorize Users

                                    The platform administrator invites users to various sub-companies according to the actual situation and appoints sub-company administrators.

                                    Authorize sub-company regular users as \"minirole\" (1), and authorize sub-company administrators as Folder Admin.

                                    1. Refers to the role without actual permissions created in step 4
                                  6. Sub-company Administrators Manage Users/User Groups Independently

                                    Sub-company administrator Folder Admin can only see their own \"Sub-company 2\" after logging into the platform, and can adjust the architecture by creating folders, creating workspaces, and assigning other permissions to users in Sub-company 2 through adding authorization/edit permissions.

                                    When adding authorization, sub-company administrator Folder Admin can only see users invited by the platform administrator through \"authorization\", and cannot see all users on the platform, thus achieving user isolation between Folder/WS, and the same applies to user groups (the platform administrator can see and authorize all users and user groups on the platform).

                                  Note

                                  The main difference between large enterprises and small/medium-sized enterprises lies in whether users/user groups in Folder and workspaces are visible to each other. In large enterprises, users/user groups between subsidiaries are not visible + permission isolation; in small/medium-sized enterprises, users between departments are visible to each other + permission isolation.

                                  "},{"location":"en/admin/ghippo/best-practice/system-message.html","title":"System Messages","text":"

                                  System messages are used to notify all users, similar to system announcements, and will be displayed at the top bar of the AI platform UI at specific times.

                                  "},{"location":"en/admin/ghippo/best-practice/system-message.html#configure-system-messages","title":"Configure System Messages","text":"

                                  You can create a system message by applying the YAML for the system message in the Cluster Roles. The display time of the message is determined by the time fields in the YAML. System messages will only be displayed within the time range configured by the start and end fields.

                                  1. In the Clusters, click the name of the Global Service Cluster to enter the Gobal Service Cluster.

                                  2. Select CRDs from the left navigation bar, search for ghippoconfig, and click the ghippoconfigs.ghippo.io that appears in the search results.

                                  3. Click Create from YAML or modify an existing YAML.

                                  A sample YAML is as follows:

                                  apiVersion: ghippo.io/v1alpha1\nkind: GhippoConfig\nmetadata:\n  name: system-message\nspec:\n  message: \"this is a message\"\n  start: 2024-01-02T15:04:05+08:00\n  end: 2024-07-24T17:26:05+08:00\n
                                  "},{"location":"en/admin/ghippo/best-practice/ws-best-practice.html","title":"Workspace Best Practices","text":"

                                  A workspace is a resource grouping unit, and most resources can be bound to a certain workspace. The workspace can realize the binding relationship between users and roles through authorization and resource binding, and apply it to all resources in the workspace at one time.

                                  Through the workspace, you can easily manage teams and resources, and solve cross-module and cross-cluster resource authorization issues.

                                  "},{"location":"en/admin/ghippo/best-practice/ws-best-practice.html#workspace-features","title":"Workspace features","text":"

                                  A workspace consists of three features: authorization, resource groups, and shared resources. It mainly solves the problems of unified authorization of resources, resource grouping and resource quota.

                                  1. Authorization: Grant users/groups different roles in the workspace, and apply the roles to the resources in the workspace.

                                    Best practice: When ordinary users want to use Workbench, microservice engine, service mesh, and middleware module features, or need to have permission to use container management and some resources in the service mesh, the administrator needs to grant the workspace permissions (Workspace Admin, Workspace Edit, Workspace View). The administrator here can be the Admin role, the Workspace Admin role of the workspace, or the Folder Admin role above the workspace. See Relationship between Folder and Workspace.

                                  2. Resource group: Resource group and shared resource are two resource management modes of the workspace.

                                    Resource groups support four resource types: Cluster, Cluster-Namespace (cross-cluster), Mesh, and Mesh-Namespace. A resource can only be bound to one resource group. After a resource is bound to a resource group, the owner of the workspace will have all the management rights of the resource, which is equivalent to the owner of the resource, so it is not limited by the resource quota.

                                    Best practice: The workspace can grant different role permissions to department members through the \"authorization\" function, and the workspace can apply the authorization relationship between people and roles to all resources in the workspace at one time. Therefore, the operation and maintenance personnel only need to bind resources to resource groups, and add different roles in the department to different resource groups to ensure that resource permissions are assigned correctly.

                                    Department Role Cluster Cross-cluster Cluster-Namespace Mesh Mesh-Namespace Department Admin Workspace Admin \u2713 \u2713 \u2713 \u2713 Department Core Members Workspace Edit \u2713 \u2717 \u2713 \u2717 Other Members Workspace View \u2713 \u2717 \u2717 \u2717
                                  3. Shared resources: The shared resource feature is mainly for cluster resources.

                                    A cluster can be shared by multiple workspaces (referring to the shared resource feature in the workspace); a workspace can also use the resources of multiple clusters at the same time. However, resource sharing does not mean that the sharer (workspace) can use the shared resource (cluster) without restriction, so the resource quota that the sharer (workspace) can use is usually limited.

                                    At the same time, unlike resource groups, workspace members are only users of shared resources and can use resources in the cluster under resource quotas. For example, go to Workbench to create a namespace, and deploy applications, but do not have the management authority of the cluster. After the restriction, the total resource quota of the namespace created/bound under this workspace cannot exceed the resources set by the cluster in this workspace.

                                    Best practice: The operation and maintenance department has a high-availability cluster 01, and wants to allocate it to department A (workspace A) and department B (workspace B), where department A allocates 50 CPU cores, and department B allocates CPU 100 cores. Then you can borrow the concept of shared resources, share cluster 01 with department A and department B respectively, and limit the CPU usage quota of department A to 50, and the CPU usage quota of department B to 100. Then the administrator of department A (workspace A Admin) can create and use a namespace in Workbench, and the sum of the namespace quotas cannot exceed 50 cores, and the administrator of department B (workspace B Admin) can create a namespace in Workbench And use namespaces, where the sum of namespace credits cannot exceed 100 cores. The namespaces created by the administrators of department A and department B will be automatically bound to the department, and other members of the department will have the roles of Namesapce Admin, Namesapce Edit, and Namesapce View proper to the namespace (the department here refers to Workspace, workspace can also be mapped to other concepts such as organization, and supplier). The whole process is as follows:

                                    Department Role Cluster Resource Quota Department Administrator A Workspace Admin CPU 50 cores CPU 50 cores Department Administrator B Workspace Admin CPU 100 cores CPU 100 cores Other Members of the Department Namesapce AdminNamesapce EditNamesapce View Assign as Needed Assign as Needed
                                  "},{"location":"en/admin/ghippo/best-practice/ws-best-practice.html#the-effect-of-the-workspace-on-the-ai-platfrom","title":"The effect of the workspace on the AI platfrom","text":"

                                  Module name: Container Management

                                  Due to the particularity of functional modules, resources created in the container management module will not be automatically bound to a certain workspace.

                                  If you need to perform unified authorization management on people and resources through workspaces, you can manually bind the required resources to a certain workspace, to apply the roles of users in this workspace to resources (resources here can be cross- clustered).

                                  In addition, there is a slight difference between container management and service mesh in terms of resource binding entry. The workspace provides the binding entry of Cluster and Cluster-Namespace resources in container management, but has not opened the Mesh and Mesh-Namespace for service mesh. Bindings for Namespace resources.

                                  For Mesh and Mesh-Namespace resources, you can manually bind them in the resource list of the service mesh.

                                  "},{"location":"en/admin/ghippo/best-practice/ws-best-practice.html#use-cases-of-workspace","title":"Use Cases of Workspace","text":"
                                  • Mapping to concepts such as different departments, projects, and organizations. At the same time, the roles of Workspace Admin, Workspace Edit, and Workspace View in the workspace can be mapped to different roles in departments, projects, and organizations
                                  • Add resources for different purposes to different workspaces for separate management and use
                                  • Set up completely independent administrators for different workspaces to realize user and authority management within the scope of the workspace
                                  • Share resources to different workspaces, and limit the upper limit of resources that can be used by workspaces
                                  "},{"location":"en/admin/ghippo/best-practice/ws-to-ns.html","title":"Workspaces (tenants) bind namespaces across clusters","text":"

                                  Namespaces from different clusters are bound under the workspace (tenant), which enables the workspace (tenant) to flexibly manage the Kubernetes Namespace under any cluster on the platform. At the same time, the platform provides permission mapping capabilities, which can map the user's permissions in the workspace to the bound namespace.

                                  When one or more cross-cluster namespaces are bound under the workspace (tenant), the administrator does not need to authorize the members in the workspace again. The roles of members in the workspace will be automatically mapped according to the following mapping relationship to complete the authorization, avoiding repeated operations of multiple authorizations:

                                  • Workspace Admin corresponds to Namespace Admin
                                  • Workspace Editor corresponds to Namespace Editor
                                  • Workspace Viewer corresponds to Namespace Viewer

                                  Here is an example:

                                  User Workspace Role User A Workspace01 Workspace Admin

                                  After binding a namespace to a workspace:

                                  User Category Role User A Workspace01 Workspace Admin Namespace01 Namespace Admin"},{"location":"en/admin/ghippo/best-practice/ws-to-ns.html#implementation-plan","title":"Implementation plan","text":"

                                  Bind different namespaces from different clusters to the same workspace (tenant), and use the process for members under the workspace (tenant) as shown in the figure.

                                  graph TB\n\npreparews[prepare workspace] --> preparens[prepare namespace]\n--> judge([whether the namespace is bound to another workspace])\njudge -.unbound.->nstows[bind namespace to workspace] -->wsperm[manage workspace access]\njudge -.bound.->createns[Create a new namespace]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill: #326ce5, stroke: #fff, stroke-width: 1px, color: #fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass preparews, preparens, createns, nstows, wsperm cluster;\nclass judge plain\n\nclick preparews \"https://docs.daocloud.io/ghippo/workspace/ws-to-ns-across-clus/#_3\"\nclick prepares \"https://docs.daocloud.io/ghippo/workspace/ws-to-ns-across-clus/#_4\"\nclick nstows \"https://docs.daocloud.io/ghippo/workspace/ws-to-ns-across-clus/#_5\"\nclick wsperm \"https://docs.daocloud.io/ghippo/workspace/ws-to-ns-across-clus/#_6\"\nclick creates \"https://docs.daocloud.io/ghippo/workspace/ws-to-ns-across-clus/#_4\"

                                  Tip

                                  A namespace can only be bound by one workspace.

                                  "},{"location":"en/admin/ghippo/best-practice/ws-to-ns.html#prepare-workspace","title":"Prepare workspace","text":"

                                  In order to meet the multi-tenant use cases, the workspace forms an isolated resource environment based on multiple resources such as clusters, cluster namespaces, meshs, mesh namespaces, multicloud, and multicloud namespaces. Workspaces can be mapped to various concepts such as projects, tenants, enterprises, and suppliers.

                                  1. Log in to AI platform as a user with the admin/folder admin role, and click Global Management at the bottom of the left navigation bar.

                                  2. Click Workspace and Folder in the left navigation bar, and click the Create Workspace button in the upper right corner.

                                  3. After filling in the workspace name, folder and other information, click OK to complete the creation of the workspace.

                                  Tip: If the created namespace already exists in the platform, click a workspace, and under the Resource Group tab, click Bind Resource to directly bind the namespace.

                                  "},{"location":"en/admin/ghippo/best-practice/ws-to-ns.html#prepare-the-namespace","title":"Prepare the namespace","text":"

                                  A namespace is a smaller unit of resource isolation that can be managed and used by members of a workspace after it is bound to a workspace.

                                  Follow the steps below to prepare a namespace that is not yet bound to any workspace.

                                  1. Click Container Management at the bottom of the left navigation bar.

                                  2. Click the name of the target cluster to enter Cluster Details .

                                  3. Click Namespace on the left navigation bar to enter the namespace management page, and click the Create button on the right side of the page.

                                  4. Fill in the name of the namespace, configure the workspace and tags (optional settings), and click OK .

                                    Info

                                    Workspaces are primarily used to divide groups of resources and grant users (groups of users) different access rights to that resource. For a detailed description of the workspace, please refer to Workspace and Folder.

                                  5. Click OK to complete the creation of the namespace. On the right side of the namespace list, click \u2507 , and you can select Bind Workspace from the pop-up menu.

                                  "},{"location":"en/admin/ghippo/best-practice/ws-to-ns.html#bind-the-namespace-to-the-workspace","title":"Bind the namespace to the workspace","text":"

                                  In addition to binding in the namespace list, you can also return to global management , follow the steps below to bind the workspace.

                                  1. Click Global Management -> Workspace and Folder -> Resource Group , click a workspace name, and click the Bind Resource button.

                                  2. Select the workspace to be bound (multiple choices are allowed), and click OK to complete the binding.

                                  "},{"location":"en/admin/ghippo/best-practice/ws-to-ns.html#add-members-to-the-workspace-and-authorize","title":"Add members to the workspace and authorize","text":"
                                  1. In Workspace and Folder -> Authorization , click the name of a workspace, and click the Add Authorization button.

                                  2. After selecting the User/group and Role to be authorized, click OK to complete the authorization.

                                  "},{"location":"en/admin/ghippo/best-practice/gproduct/intro.html","title":"How GProduct connects to global management","text":"

                                  GProduct is the general term for all other modules in AI platform except the global management. These modules need to be connected with the global management before they can be added to AI platform.

                                  "},{"location":"en/admin/ghippo/best-practice/gproduct/intro.html#what-to-be-docking","title":"What to be docking","text":"
                                  • Docking Navigation Bar

                                    The entrances are unified on the left navigation bar.

                                  • Access Routing and AuthN

                                    Unify the IP or domain name, and unify the routing entry through the globally managed Istio Gateway.

                                  • Unified login / unified AuthN authentication

                                    The login page is unified using the global management (Keycloak) login page, and the API authn token verification uses Istio Gateway. After GProduct is connected to the global management, there is no need to pay attention to how to implement login and authentication.

                                  "},{"location":"en/admin/ghippo/best-practice/gproduct/nav.html","title":"Docking navigation bar","text":"

                                  Take container management (codename kpanda) as an example, docking to the navigation bar.

                                  The expected effect after docking is as follows:

                                  "},{"location":"en/admin/ghippo/best-practice/gproduct/nav.html#docking-method","title":"Docking method","text":"

                                  Refer to the following steps to dock the GProduct:

                                  1. Register all kpanda (container management) features to the nav bar via GProductNavigator CR.

                                    apiVersion: ghippo.io/v1alpha1\nkind: GProductNavigator\nmetadata:\n  name: kpanda\nspec:\n  gproduct: kpanda\n  name: \u5bb9\u5668\u7ba1\u7406\n  localizedName:\n    zh-CN: \u5bb9\u5668\u7ba1\u7406\n    en-US: Container Management\n  url: /kpanda\n  category: \u5bb9\u5668  # (1)\n  iconUrl: /kpanda/nav-icon.png\n  order: 10 # (2)\n  menus:\n  - name: \u5907\u4efd\u7ba1\u7406\n    localizedName:\n      zh-CN: \u5907\u4efd\u7ba1\u7406\n      en-US: Backup Management\n    iconUrl: /kpanda/bkup-icon.png\n    url: /kpanda/backup\n
                                    1. Only support one of overview, workbench, container, microservice, data service, and management
                                    2. The larger the number, the higher it is ranked

                                    The configuration for the global management navigation bar category is stored in a ConfigMap and cannot be added through registration at present. Please contact the global management team to add it.

                                  2. The kpanda front-end is integrated into the AI platform parent application Anakin as a micro-frontend.

                                    AI platform frontend uses qiankun to connect the sub-applications UI. See getting started.

                                    After registering the GProductNavigator CR, the proper registration information will be generated for the front-end parent application. For example, kpanda will generate the following registration information:

                                    {\n  \"id\": \"kpanda\",\n  \"title\": \"\u5bb9\u5668\u7ba1\u7406\",\n  \"url\": \"/kpanda\",\n  \"uiAssetsUrl\": \"/ui/kpanda/\", // The trailing / is required\n  \"needImportLicense\": false\n},\n

                                    The proper relation between the above registration and the qiankun sub-application fields is:

                                    {\n    name: id,\n    entry: uiAssetsUrl,\n    container: '#container',\n    activeRule: url, \n    loader,\n    props: globalProps,\n}\n

                                    container and loader are provided by the frontend parent application. The sub-application does not need to concern it. Props will provide a pinia store containing user basic information and sub-product registration information.

                                    qiankun will use the following parameters on startup:

                                    start({\n  sandbox: {\n    experimentalStyleIsolation: true,\n  },\n  // Remove the favicon in the sub-application to prevent it from overwriting the parent application's favicon in Firefox\n  getTemplate: (template) => template.replaceAll(/<link\\s* rel=\"[\\w\\s]*icon[\\w\\s]*\"\\s*( href=\".*?\")?\\s*\\/?>/g, ''),\n});\n

                                  Refer to Docking demo tar to GProduct provided by frontend team.

                                  "},{"location":"en/admin/ghippo/best-practice/gproduct/route-auth.html","title":"Access routing and login authentication","text":"

                                  Unified login and password verification after docking, the effect is as follows:

                                  The API bear token verification of each GProduct module goes through the Istio Gateway.

                                  The routing map after access is as follows:

                                  "},{"location":"en/admin/ghippo/best-practice/gproduct/route-auth.html#docking-method","title":"Docking method","text":"

                                  Take kpanda as an example to register GProductProxy CR.

                                  # GProductProxy CR example, including routing and login authentication\n\n# spec.proxies: The route written later cannot be a subset of the route written first, and vice versa\n# spec.proxies.match.uri.prefix: If it is a backend api, it is recommended to add \"/\" at the end of the prefix to indicate the end of this path (special requirements can not be added)\n# spec.proxies.match.uri: supports prefix and exact modes; Prefix and Exact can only choose 1 out of 2; Prefix has a higher priority than Exact\n\napiVersion: ghippo.io/v1alpha1\nkind: GProductProxy\nmetadata:\n  name: kpanda  # (1)\nspec:\n  gproduct: kpanda  # (2)\n  proxies:\n  - labels:\n      kind: UIEntry\n    match:\n      uri:\n        prefix: /kpanda # (3)\n    rewrite:\n      uri: /index.html\n    destination:\n      host: ghippo-anakin.ghippo-system.svc.cluster.local\n      port: 80\n    authnCheck: false  # (4)\n  - labels:\n      kind: UIAssets\n    match:\n      uri:\n        prefix: /ui/kpanda/ # (5)\n    destination:\n      host: kpanda-ui.kpanda-system.svc.cluster.local\n      port: 80\n    authnCheck: false\n  - match:\n      uri:\n        prefix: /apis/kpanda.io/v1/a\n    destination:\n      host: kpanda-service.kpanda-system.svc.cluster.local\n      port: 80\n    authnCheck: false\n  - match:\n      uri:\n        prefix: /apis/kpanda.io/v1 # (6)\n    destination:\n      host: kpanda-service.kpanda-system.svc.cluster.local\n      port: 80\n    authnCheck: true\n
                                  1. Cluster-level CRDs
                                  2. You need to specify the GProduct name in lowercase
                                  3. Can also support exact
                                  4. Whether istio-gateway is required to perform AuthN Token authentication for this routing API, false means to skip authentication
                                  5. UIAssets recommends adding / at the end to indicate the end (otherwise there may be problems in the front end)
                                  6. The route written later cannot be a subset of the route written earlier, and vice versa
                                  "},{"location":"en/admin/ghippo/best-practice/menu/menu-display-or-hiding.html","title":"Display/Hide Navigation Bar Menu Based on Permissions","text":"

                                  Under the current permission system, Global Management has the capability to regulate the visibility of navigation bar menus according to user permissions. However, due to the authorization information of Container Management not being synchronized with Global Management, Global Management cannot accurately determine whether to display the Container Management menu.

                                  This document implements the following through configuration: By default, the menus for Container Management and Insight will not be displayed in areas where Global Management cannot make a judgment. A Whitelist authorization strategy is employed to effectively manage the visibility of these menus. (The permissions for clusters or namespaces authorized through the Container Management page cannot be perceived or judged by Global Management)

                                  For example, if User A holds the Cluster Admin role for cluster A in Container Management, Global Management cannot determine whether to display the Container Management menu. After the configuration described in this document, User A will not see the Container Management menu by default. They will need to have explicit permission in Global Management to access the Container Management menu.

                                  "},{"location":"en/admin/ghippo/best-practice/menu/menu-display-or-hiding.html#prerequisites","title":"Prerequisites","text":"

                                  The feature to show/hide menus based on permissions must be enabled. The methods to enable this are as follows:

                                  • For new installation enviroments, add the --set global.navigatorVisibleDependency=true parameter when using helm install.
                                  • For existing environments, back up values using helm get values ghippo -n ghippo-system -o yaml, then modify bak.yaml and add global.navigatorVisibleDependency: true.

                                  Then upgrade the Global Management using the following command:

                                  helm upgrade ghippo ghippo-release/ghippo \\  \n  -n ghippo-system \\  \n  -f ./bak.yaml \\  \n  --version ${version}\n
                                  "},{"location":"en/admin/ghippo/best-practice/menu/menu-display-or-hiding.html#configure-the-navigation-bar","title":"Configure the Navigation Bar","text":"

                                  Apply the following YAML in kpanda-global-cluster:

                                  apiVersion: ghippo.io/v1alpha1  \nkind: GProductNavigator  \nmetadata:  \n  name: kpanda-menus-custom  \nspec:  \n  category: container  \n  gproduct: kpanda  \n  iconUrl: ./ui/kpanda/kpanda.svg  \n  isCustom: true  \n  localizedName:  \n    en-US: Container Management  \n    zh-CN: \u5bb9\u5668\u7ba1\u7406  \n  menus:  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Clusters  \n        zh-CN: \u96c6\u7fa4\u5217\u8868  \n      name: Clusters  \n      order: 80  \n      url: ./kpanda/clusters  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Namespaces  \n        zh-CN: \u547d\u540d\u7a7a\u95f4  \n      name: Namespaces  \n      order: 70  \n      url: ./kpanda/namespaces  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Workloads  \n        zh-CN: \u5de5\u4f5c\u8d1f\u8f7d  \n      name: Workloads  \n      order: 60  \n      url: ./kpanda/workloads/deployments  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Permissions  \n        zh-CN: \u6743\u9650\u7ba1\u7406  \n      name: Permissions  \n      order: 10  \n      url: ./kpanda/rbac/content/cluster  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n  name: Container Management \n  order: 50  \n  url: ./kpanda/clusters  \n  visible: true  \n\n---\napiVersion: ghippo.io/v1alpha1  \nkind: GProductNavigator  \nmetadata:  \n  name: insight-menus-custom  \nspec:  \n  category: microservice  \n  gproduct: insight  \n  iconUrl: ./ui/insight/logo.svg  \n  isCustom: true  \n  localizedName:  \n    en-US: Insight  \n    zh-CN: \u53ef\u89c2\u6d4b\u6027  \n  menus:  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Overview  \n        zh-CN: \u6982\u89c8  \n      name: Overview  \n      order: 9  \n      url: ./insight/overview  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Dashboard  \n        zh-CN: \u4eea\u8868\u76d8  \n      name: Dashboard  \n      order: 8  \n      url: ./insight/dashboard  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Infrastructure  \n        zh-CN: \u57fa\u7840\u8bbe\u65bd  \n      name: Infrastructure  \n      order: 7  \n      url: ./insight/clusters  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Metrics  \n        zh-CN: \u6307\u6807  \n      name: Metrics  \n      order: 6  \n      url: ./insight/metric/basic  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Logs  \n        zh-CN: \u65e5\u5fd7  \n      name: Logs  \n      order: 5  \n      url: ./insight/logs  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Trace Tracking  \n        zh-CN: \u94fe\u8def\u8ffd\u8e2a  \n      name: Trace Tracking  \n      order: 4  \n      url: ./insight/topology  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Alerts  \n        zh-CN: \u544a\u8b66  \n      name: Alerts  \n      order: 3  \n      url: ./insight/alerts/active/metrics  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Collect Management  \n        zh-CN: \u91c7\u96c6\u7ba1\u7406  \n      name: Collect Management  \n      order: 2  \n      url: ./insight/agents  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: System Management  \n        zh-CN: \u7cfb\u7edf\u7ba1\u7406  \n      name: System Management  \n      order: 1  \n      url: ./insight/system-components  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n  name: Insight \n  order: 30  \n  url: ./insight  \n  visible: true  \n\n---\napiVersion: ghippo.io/v1alpha1  \nkind: GProductResourcePermissions  \nmetadata:  \n  name: kpanda  \nspec:  \n  actions:  \n    - localizedName:  \n        en-US: Create  \n        zh-CN: \u521b\u5efa  \n      name: create  \n    - localizedName:  \n        en-US: Delete  \n        zh-CN: \u5220\u9664  \n      name: delete  \n    - localizedName:  \n        en-US: Update  \n        zh-CN: \u7f16\u8f91  \n      name: update  \n    - localizedName:  \n        en-US: Get  \n        zh-CN: \u67e5\u770b  \n      name: get  \n    - localizedName:  \n        en-US: Admin  \n        zh-CN: \u7ba1\u7406  \n      name: admin  \n  authScopes:  \n    - resourcePermissions:  \n        - actions:  \n            - name: get  \n            - dependPermissions:  \n                - action: get  \n              name: create  \n            - dependPermissions:  \n                - action: get  \n              name: update  \n            - dependPermissions:  \n                - action: get  \n              name: delete  \n          resourceType: cluster  \n        - actions:  \n            - name: get  \n          resourceType: menu  \n      scope: platform  \n    - resourcePermissions:  \n        - actions:  \n            - name: admin  \n              tips:  \n                - en-US: >-  \n                    If the workspace is bound to a cluster, it will be assigned  \n                    the Cluster Admin role upon authorization.  \n                  zh-CN: \u82e5\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u4e86\u96c6\u7fa4\uff0c\u6388\u6743\u540e\u8fd8\u5c06\u88ab\u6620\u5c04\u4e3a\u5bf9\u5e94\u96c6\u7fa4\u7684 Cluster Admin \u89d2\u8272  \n          resourceType: cluster  \n        - actions:  \n            - name: get  \n              tips:  \n                - en-US: >-  \n                    If the workspace is bound to a namespace, it will be  \n                    assigned the NS View role upon authorization.  \n                  zh-CN: \u82e5\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u4e86\u547d\u540d\u7a7a\u95f4\uff0c\u6388\u6743\u540e\u8fd8\u5c06\u88ab\u6620\u5c04\u4e3a\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u7684 NS View \u89d2\u8272  \n            - name: update  \n              tips:  \n                - en-US: >-  \n                    If the workspace is bound to a namespace, it will be  \n                    assigned the NS Edit role upon authorization.  \n                  zh-CN: \u82e5\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u4e86\u547d\u540d\u7a7a\u95f4\uff0c\u6388\u6743\u540e\u8fd8\u5c06\u88ab\u6620\u5c04\u4e3a\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u7684 NS  Edit \u89d2\u8272  \n            - name: admin  \n              tips:  \n                - en-US: >-  \n                    If the workspace is bound to a namespace, it will be  \n                    assigned the NS Admin role upon authorization.  \n                  zh-CN: \u82e5\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u4e86\u547d\u540d\u7a7a\u95f4\uff0c\u6388\u6743\u540e\u8fd8\u5c06\u88ab\u6620\u5c04\u4e3a\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u7684 NS Admin \u89d2\u8272  \n          resourceType: namespace  \n      scope: workspace  \n  gproduct: kpanda  \n  resourceTypes:  \n    - localizedName:  \n        en-US: Cluster Management  \n        zh-CN: \u96c6\u7fa4\u7ba1\u7406  \n      name: cluster  \n    - localizedName:  \n        en-US: Menu  \n        zh-CN: \u83dc\u5355  \n      name: menu  \n    - localizedName:  \n        en-US: Namespace Management  \n        zh-CN: \u547d\u540d\u7a7a\u95f4  \n      name: namespace\n
                                  "},{"location":"en/admin/ghippo/best-practice/menu/menu-display-or-hiding.html#achieve-the-above-effect-through-custom-roles","title":"Achieve the Above Effect Through Custom Roles","text":"

                                  Note

                                  Only the menus for the Container Management module need to be configured separately menu permissions. Other modules will automatically show/hide based on user permissions

                                  Create a custom role that includes the permission to view the Container Management menu, and then grant this role to users who need access to the Container Management menu.

                                  you can see the navigation bar menus for container management and observability. The result is as follows:

                                  "},{"location":"en/admin/ghippo/best-practice/oem/custom-idp.html","title":"Customizing AI platform Integration with IdP","text":"

                                  Identity Provider (IdP): In AI platform, when a client system needs to be used as the user source and user authentication is performed through the client system's login interface, the client system is referred to as the Identity Provider for AI platform.

                                  "},{"location":"en/admin/ghippo/best-practice/oem/custom-idp.html#use-cases","title":"Use Cases","text":"

                                  If there is a high customization requirement for the Ghippo login IdP, such as supporting WeCom, WeChat, or other social organization login requirements, please refer to this document for implementation.

                                  "},{"location":"en/admin/ghippo/best-practice/oem/custom-idp.html#supported-versions","title":"Supported Versions","text":"

                                  Ghippo v0.15.0 and above.

                                  "},{"location":"en/admin/ghippo/best-practice/oem/custom-idp.html#specific-steps","title":"Specific Steps","text":""},{"location":"en/admin/ghippo/best-practice/oem/custom-idp.html#customizing-ghippo-keycloak-plugin","title":"Customizing Ghippo Keycloak Plugin","text":"
                                  1. Customize the plugin

                                    Refer to the official keycloak documentation and customizing Keycloak IdP for development.

                                  2. Build the image

                                    # FROM scratch\nFROM scratch\n\n# plugin\nCOPY ./xxx-jar-with-dependencies.jar /plugins/\n

                                  Note

                                  If you need two customized IdPs, you need to copy two jar packages.

                                  "},{"location":"en/admin/ghippo/best-practice/oem/custom-idp.html#deploying-ghippo-keycloak-plugin-steps","title":"Deploying Ghippo Keycloak Plugin Steps","text":"
                                  1. Upgrade Ghippo to v0.15.0 or above. You can also directly install and deploy Ghippo v0.15.0, but make sure to manually record the following information.

                                    helm -n ghippo-system get values ghippo -o yaml\n
                                    apiserver:\n  image:\n    repository: release.daocloud.io/ghippo-ci/ghippo-apiserver\n    tag: v0.4.2-test-3-gaba5ec2\ncontrollermanager:\n  image:\n    repository: release.daocloud.io/ghippo-ci/ghippo-apiserver\n    tag: v0.4.2-test-3-gaba5ec2\nglobal:\n  database:\n    builtIn: true\n  reverseProxy: http://192.168.31.10:32628\n
                                  2. After a successful upgrade, an installation command should be manually run. The parameter values set in --set should be gotten from the above saved content, along with additional parameter values:

                                    • global.idpPlugin.enabled: Whether to enable the custom plugin, default is disabled.
                                    • global.idpPlugin.image.repository: The image address used by the initContainer to initialize the custom plugin.
                                    • global.idpPlugin.image.tag: The image tag used by the initContainer to initialize the custom plugin.
                                    • global.idpPlugin.path: The directory file of the custom plugin within the above image.

                                    Here is an example:

                                    helm upgrade \\\n    ghippo \\\n    ghippo-release/ghippo \\\n    --version v0.4.2-test-3-gaba5ec2 \\\n    -n ghippo-system \\\n    --set apiserver.image.repository=release.daocloud.io/ghippo-ci/ghippo-apiserver \\\n    --set apiserver.image.tag=v0.4.2-test-3-gaba5ec2 \\\n    --set controllermanager.image.repository=release.daocloud.io/ghippo-ci/ghippo-apiserver \\\n    --set controllermanager.image.tag=v0.4.2-test-3-gaba5ec2 \\\n    --set global.reverseProxy=http://192.168.31.10:32628 \\\n    --set global.database.builtIn=true \\\n    --set global.idpPlugin.enabled=true \\\n    --set global.idpPlugin.image.repository=chenyang-idp \\\n    --set global.idpPlugin.image.tag=v0.0.1 \\\n    --set global.idpPlugin.path=/plugins/.\n
                                  3. Select the desired plugin on the Keycloak administration page.

                                  "},{"location":"en/admin/ghippo/best-practice/oem/demo.html","title":"gproduct-demo","text":""},{"location":"en/admin/ghippo/best-practice/oem/demo.html#environment-setup","title":"Environment setup","text":"
                                  npm install\n

                                  Compile and hot-reload for development:

                                  npm run serve\n

                                  Compile and build:

                                  npm run build\n

                                  Fix linting issues:

                                  npm run lint\n
                                  "},{"location":"en/admin/ghippo/best-practice/oem/demo.html#custom-configuration","title":"Custom Configuration","text":"

                                  Refer to the Configuration Reference for customization options.

                                  Build the image:

                                  docker build -t release.daocloud.io/henry/gproduct-demo .\n

                                  Run on Kubernetes:

                                  kubectl apply -f demo.yaml\n
                                  "},{"location":"en/admin/ghippo/best-practice/oem/keycloak-idp.html","title":"Customizing Keycloak Identity Provider (IdP)","text":"

                                  Requirements: keycloak >= v20

                                  Known issue in keycloak >= v21, support for old version themes has been removed and may be fixed in v22. See Issue #15344.

                                  This demo uses Keycloak v20.0.5.

                                  "},{"location":"en/admin/ghippo/best-practice/oem/keycloak-idp.html#source-based-development","title":"Source-based Development","text":""},{"location":"en/admin/ghippo/best-practice/oem/keycloak-idp.html#configure-the-environment","title":"Configure the Environment","text":"

                                  Refer to keycloak/building.md for environment configuration.

                                  Run the following commands based on keycloak/README.md:

                                  cd quarkus\nmvn -f ../pom.xml clean install -DskipTestsuite -DskipExamples -DskipTests\n
                                  "},{"location":"en/admin/ghippo/best-practice/oem/keycloak-idp.html#run-from-ide","title":"Run from IDE","text":""},{"location":"en/admin/ghippo/best-practice/oem/keycloak-idp.html#add-service-code","title":"Add Service Code","text":""},{"location":"en/admin/ghippo/best-practice/oem/keycloak-idp.html#if-inheriting-some-functionality-from-keycloak","title":"If inheriting some functionality from Keycloak","text":"

                                  Add files under the directory services/src/main/java/org/keycloak/broker :

                                  The file names should be xxxProvider.java and xxxProviderFactory.java .

                                  xxxProviderFactory.java example:

                                  Pay attention to the variable PROVIDER_ID = \"oauth\"; , as it will be used in the HTML definition later.

                                  xxxProvider.java example:

                                  "},{"location":"en/admin/ghippo/best-practice/oem/keycloak-idp.html#if-unable-to-inherit-functionality-from-keycloak","title":"If unable to inherit functionality from Keycloak","text":"

                                  Refer to the three files in the image below to write your own code:

                                  Add xxxProviderFactory to resource service

                                  Add xxxProviderFactory to services/src/main/resources/META-INF/services/org.keycloak.broker.provider.IdentityProviderFactory so that the newly added code can work:

                                  Add HTML file

                                  Copy the file themes/src/main/resources/theme/base/admin/resources/partials/realm-identity-provider-oidc.html and rename it as realm-identity-provider-oauth.html (remember the variable to pay attention to from earlier).

                                  Place the copied file in themes/src/main/resources/theme/base/admin/resources/partials/realm-identity-provider-oauth.html .

                                  All the necessary files have been added. Now you can start debugging the functionality.

                                  "},{"location":"en/admin/ghippo/best-practice/oem/keycloak-idp.html#packaging-as-a-jar-plugin","title":"Packaging as a JAR Plugin","text":"

                                  Create a new Java project and copy the above code into the project, as shown below:

                                  Refer to pom.xml.

                                  Run mvn clean package to package the code, resulting in the xxx-jar-with-dependencies.jar file.

                                  Download Keycloak Release 20.0.5 zip package and extract it.

                                  Copy the xxx-jar-with-dependencies.jar file to the keycloak-20.0.5/providers directory.

                                  Run the following command to check if the functionality is working correctly:

                                  bin/kc.sh start-dev\n
                                  "},{"location":"en/admin/ghippo/best-practice/oem/oem-in.html","title":"Integrating Customer Systems into AI platform (OEM IN)","text":"

                                  OEM IN refers to the partner's platform being embedded as a submodule in AI platform, appearing in the primary navigation bar of AI platform. Users can log in and manage it uniformly through AI platform. The implementation of OEM IN is divided into 5 steps:

                                  1. Unify Domain
                                  2. Integrate User Systems
                                  3. Integrate Navigation Bar
                                  4. Customize Appearance
                                  5. Integrate Permission System (Optional)

                                  For specific operational demonstrations, refer to the OEM IN Best Practices Video Tutorial.

                                  Note

                                  The open source software Label Studio is used for nested demonstrations below. In actual scenarios, you need to solve the following issues in the customer system:

                                  The customer system needs to add a Subpath to distinguish which services belong to AI platform and which belong to the customer system.

                                  "},{"location":"en/admin/ghippo/best-practice/oem/oem-in.html#environment-preparation","title":"Environment Preparation","text":"
                                  1. Deploy the AI platform environment:

                                    https://10.6.202.177:30443 as AI platform

                                  2. Deploy the customer system environment:

                                    http://10.6.202.177:30123 as the customer system

                                    Adjust the operations on the customer system during the application according to the actual situation.

                                  3. Plan the Subpath path of the customer system: http://10.6.202.177:30123/label-studio (It is recommended to use a recognizable name as the Subpath, which should not conflict with the HTTP router of the main AI platform). Ensure that users can access the customer system through http://10.6.202.177:30123/label-studio.

                                  "},{"location":"en/admin/ghippo/best-practice/oem/oem-in.html#unify-domain-name-and-port","title":"Unify Domain Name and Port","text":"
                                  1. SSH into the AI platform server.

                                    ssh root@10.6.202.177\n
                                  2. Create the label-studio.yaml file using the vim command.

                                    vim label-studio.yaml\n
                                    label-studio.yaml
                                    apiVersion: networking.istio.io/v1beta1\nkind: ServiceEntry\nmetadata:\n  name: label-studio\n  namespace: ghippo-system\nspec:\n  exportTo:\n  - \"*\"\n  hosts:\n  - label-studio.svc.external\n  ports:\n  # Add a virtual port\n  - number: 80\n    name: http\n    protocol: HTTP\n  location: MESH_EXTERNAL\n  resolution: STATIC\n  endpoints:\n  # Change to the domain name (or IP) of the customer system\n  - address: 10.6.202.177\n    ports:\n      # Change to the port number of the customer system\n      http: 30123\n---\napiVersion: networking.istio.io/v1alpha3\nkind: VirtualService\nmetadata:\n  # Change to the name of the customer system\n  name: label-studio\n  namespace: ghippo-system\nspec:\n  exportTo:\n  - \"*\"\n  hosts:\n  - \"*\"\n  gateways:\n  - ghippo-gateway\n  http:\n  - match:\n      - uri:\n          exact: /label-studio # Change to the routing address of the customer system in the Web UI entry\n      - uri:\n          prefix: /label-studio/ # Change to the routing address of the customer system in the Web UI entry\n    route:\n    - destination:\n        # Change to the value of spec.hosts in the ServiceEntry above\n        host: label-studio.svc.external\n        port:\n          # Change to the value of spec.ports in the ServiceEntry above\n          number: 80\n---\napiVersion: security.istio.io/v1beta1\nkind: AuthorizationPolicy\nmetadata:\n  # Change to the name of the customer system\n  name: label-studio\n  namespace: istio-system\nspec:\n  action: ALLOW\n  selector:\n    matchLabels:\n      app: istio-ingressgateway\n  rules:\n  - from:\n    - source:\n        requestPrincipals:\n        - '*'\n  - to:\n    - operation:\n        paths:\n        - /label-studio # Change to the value of spec.http.match.uri.prefix in VirtualService\n        - /label-studio/* # Change to the value of spec.http.match.uri.prefix in VirtualService (Note: add \"*\" at the end)\n
                                  3. Apply the label-studio.yaml using the kubectl command:

                                    kubectl apply -f\u00a0label-studio.yaml\n
                                  4. Verify if the IP and port of the Label Studio UI are consistent:

                                  "},{"location":"en/admin/ghippo/best-practice/oem/oem-in.html#integrate-user-systems","title":"Integrate User Systems","text":"

                                  Integrate the customer system with the AI platform platform through protocols like OIDC/OAUTH, allowing users to enter the customer system without logging in again after logging into the AI platform platform.

                                  1. In the scenario of two AI platform, you can create SSO access through Global Management -> Access Control -> Docking Portal.

                                  2. After creating, fill in the details such as the Client ID, Client Secret, and Login URL in the customer system's Global Management -> Access Control -> Identity Provider -> OIDC, to complete user integration.

                                  3. After integration, the customer system login page will display the OIDC (Custom) option. Select to log in via OIDC the first time entering the customer system from the AI platform platform, and subsequently, you will directly enter the customer system without selecting again.

                                  "},{"location":"en/admin/ghippo/best-practice/oem/oem-in.html#integrate-navigation-bar","title":"Integrate Navigation Bar","text":"

                                  Refer to the tar package at the bottom of the document to implement an empty frontend sub-application, and embed the customer system into this empty shell application in the form of an iframe.

                                  1. Download the gproduct-demo-main.tar.gz file and change the value of the src attribute in App-iframe.vue under the src folder (the user entering the customer system):

                                    • The absolute address: src=\"https://10.6.202.177:30443/label-studio\" (AI platform address + Subpath)
                                    • The relative address, such as src=\"./external-anyproduct/insight\"
                                    App-iframe.vue
                                    <template>\n  <iframe>\n    src=\"https://daocloud.io\"\n    title=\"demo\"\n    class=\"iframe-container\"\n  </iframe>\n</template>\n\n<style lang=\"scss\">\nhtml,\nbody {\n  height: 100%;\n}\n\n# app {\n  display: flex;\n  height: 100%;\n  .iframe-container {\n    border: 0;\n    flex: 1 1 0;\n  }\n}\n</style>\n
                                  2. Delete the App.vue and main.ts files under the src folder, and rename:

                                    • Rename App-iframe.vue to App.vue
                                    • Rename main-iframe.ts to main.ts
                                  3. Build the image following the steps in the readme (Note: before executing the last step, replace the image address in demo.yaml with the built image address)

                                    demo.yaml
                                    kind: Namespace\napiVersion: v1\nmetadata:\n  name: gproduct-demo\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: gproduct-demo\n  namespace: gproduct-demo\n  labels:\n    app: gproduct-demo\nspec:\n  selector:\n    matchLabels:\n      app: gproduct-demo\n  template:\n    metadata:\n      name: gproduct-demo\n      labels:\n        app: gproduct-demo\n    spec:\n      containers:\n      - name: gproduct-demo\n        image: release.daocloud.io/gproduct-demo # Modify this image address\n        ports:\n        - containerPort: 80\n---\napiVersion: v1\nkind: Service\n...\n

                                  After integration, the Customer System will appear in the primary navigation bar of AI platform, and clicking it will allow users to enter the customer system.

                                  "},{"location":"en/admin/ghippo/best-practice/oem/oem-in.html#customize-appearance","title":"Customize Appearance","text":"

                                  Note

                                  AI platform supports customizing the appearance by writing CSS. How the customer system implements appearance customization in actual applications needs to be handled according to the actual situation.

                                  Log in to the customer system, and through Global Management -> Settings -> Appearance, you can customize platform background colors, logos, and names. For specific operations, please refer to Appearance Customization.

                                  "},{"location":"en/admin/ghippo/best-practice/oem/oem-in.html#integrate-permission-system-optional","title":"Integrate Permission System (Optional)","text":"

                                  Method One:

                                  Customized teams can implement a customized module that AI platform will notify each user login event to the customized module via Webhook, and the customized module can call the OpenAPI of AnyProduct and AI platform to synchronize the user's permission information.

                                  Method Two:

                                  Through Webhook, notify AnyProduct of each authorization change (if required, it can be implemented later).

                                  "},{"location":"en/admin/ghippo/best-practice/oem/oem-in.html#use-other-capabilities-of-ai-platform-in-anyproduct-optional","title":"Use Other Capabilities of AI platform in AnyProduct (Optional)","text":"

                                  The method is to call the AI platform OpenAPI.

                                  "},{"location":"en/admin/ghippo/best-practice/oem/oem-in.html#references","title":"References","text":"
                                  • Refer to OEM OUT Document
                                  • Download the tar package for gProduct-demo-main integration
                                  "},{"location":"en/admin/ghippo/best-practice/oem/oem-out.html","title":"Integrate AI platform into Customer System (OEM OUT)","text":"

                                  OEM OUT refers to integrating AI platform as a sub-module into other products, appearing in their menus. You can directly access AI platform without logging in again after logging into other products. The OEM OUT integration involves 5 steps:

                                  1. Unify domain name
                                  2. User system integration
                                  3. Navigation bar integration
                                  4. Customize appearance
                                  5. Permission system integration (optional)
                                  "},{"location":"en/admin/ghippo/best-practice/oem/oem-out.html#unify-domain-name","title":"Unify Domain Name","text":"
                                  1. Deploy AI platform (Assuming the access address after deployment is https://10.6.8.2:30343/).

                                  2. To achieve cross-domain access between the customer system and AI platform, you can use an nginx reverse proxy. Use the following example configuration in vi /etc/nginx/conf.d/default.conf :

                                    server {\n    listen       80;\n    server_name  localhost;\n\n    location /dce5/ {\n      proxy_pass https://10.6.8.2:30343/;\n      proxy_http_version 1.1;\n      proxy_read_timeout 300s; # This line is required for using kpanda cloudtty, otherwise it can be removed\n      proxy_send_timeout 300s; # This line is required for using kpanda cloudtty, otherwise it can be removed\n\n      proxy_set_header Host $host;\n      proxy_set_header X-Real-IP $remote_addr;\n      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n\n      proxy_set_header Upgrade $http_upgrade; # This line is required for using kpanda cloudtty, otherwise it can be removed\n      proxy_set_header Connection $connection_upgrade; # This line is required for using kpanda cloudtty, otherwise it can be removed\n    }\n\n    location / {\n        proxy_pass https://10.6.165.50:30443/; # Assuming this is the customer system address (e.g., Yiyun)\n        proxy_http_version 1.1;\n\n        proxy_set_header Host $host;\n        proxy_set_header X-Real-IP $remote_addr;\n        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n    }\n}\n
                                  3. Assuming the nginx entry address is 10.6.165.50, follow the Customize AI platform Reverse Proxy Server Address to set the AI_PROXY reverse proxy as http://10.6.165.50/dce5. Ensure that AI platform can be accessed via http://10.6.165.50/dce5. The customer system also needs to configure the reverse proxy based on its specific requirements.

                                  "},{"location":"en/admin/ghippo/best-practice/oem/oem-out.html#user-system-integration","title":"User System Integration","text":"

                                  Integrate the customer system with AI platform using protocols like OIDC/OAUTH, allowing users to access AI platform without logging in again after logging into the customer system. Fill in the OIDC information of the customer system in Global Management -> Access Control -> Identity Provider .

                                  After integration, the AI platform login page will display the OIDC (custom) option. When accessing AI platform from the customer system for the first time, select OIDC login, and subsequent logins will directly enter AI platform without needing to choose again.

                                  "},{"location":"en/admin/ghippo/best-practice/oem/oem-out.html#navigation-bar-integration","title":"Navigation Bar Integration","text":"

                                  Navigation bar integration means adding AI platform to the menu of the customer system. You can directly access AI platform by clicking the proper menu item. The navigation bar integration depends on the customer system and needs to be handled based on specific circumstances.

                                  "},{"location":"en/admin/ghippo/best-practice/oem/oem-out.html#customizie-appearance","title":"Customizie Appearance","text":"

                                  Use Global Management -> Settings -> Appearance to customize the platform's background color, logo, and name. For detailed instructions, refer to Appearance Customization.

                                  "},{"location":"en/admin/ghippo/best-practice/oem/oem-out.html#permission-system-integration-optional","title":"Permission System Integration (optional)","text":"

                                  Permission system integration is complex. If you have such requirements, please contact the Global Management team.

                                  "},{"location":"en/admin/ghippo/best-practice/oem/oem-out.html#reference","title":"Reference","text":"
                                  • OEM IN
                                  "},{"location":"en/admin/ghippo/install/gm-gateway.html","title":"Use Guomi Gateway to proxy AI platform","text":"

                                  Follow the steps below to configure the Guomi Gateway for AI platform.

                                  "},{"location":"en/admin/ghippo/install/gm-gateway.html#software-introduction","title":"Software Introduction","text":"

                                  Tengine: Tengine is a web server project initiated by taobao.com. Based on Nginx, it adds many advanced features and features for the needs of high-traffic websites.

                                  Tongsuo: Formerly known as BabaSSL, Tongsuo is an open-source cryptographic library that offers a range of modern cryptographic algorithms and secure communication protocols. It is designed to support a variety of use cases, including storage, network security, key management, and privacy computing. By providing foundational cryptographic capabilities, Tongsuo ensures the privacy, integrity, and authenticity of data during transmission, storage, and usage. It also enhances security throughout the data lifecycle, offering robust privacy protection and security features.

                                  "},{"location":"en/admin/ghippo/install/gm-gateway.html#preparation","title":"Preparation","text":"

                                  A Linux host with Docker installed and internet access.

                                  "},{"location":"en/admin/ghippo/install/gm-gateway.html#compile-and-install-tengine-tongsuo","title":"Compile and install Tengine & Tongsuo","text":"

                                  Note

                                  This configuration is for reference only.

                                  FROM docker.m.daocloud.io/debian:11.3\n\n# Version\nENV TENGINE_VERSION=\"2.3.4\" \\\n    TONGSUO_VERSION=\"8.3.2\"\n\n# Install required system packages and dependencies\nRUN apt update && \\\n    apt -y install \\\n    wget \\\n    gcc \\\n    make \\\n    libpcre3 \\\n    libpcre3-dev \\\n    zlib1g-dev \\\n    perl \\\n    && apt clean\n\n# Build tengine\nRUN mkdir -p /tmp/pkg/cache/ && cd /tmp/pkg/cache/ \\\n    && wget https://github.com/alibaba/tengine/archive/refs/tags/${TENGINE_VERSION}.tar.gz -O tengine-${TENGINE_VERSION}.tar.gz \\\n    && tar zxvf tengine-${TENGINE_VERSION}.tar.gz \\\n    && wget https://github.com/Tongsuo-Project/Tongsuo/archive/refs/tags/${TONGSUO_VERSION}.tar.gz -O Tongsuo-${TONGSUO_VERSION}.tar.gz \\\n    && tar zxvf Tongsuo-${TONGSUO_VERSION}.tar.gz \\\n    && cd tengine-${TENGINE_VERSION} \\\n    && ./configure \\\n        --add-module=modules/ngx_openssl_ntls \\\n        --with-openssl=/tmp/pkg/cache/Tongsuo-${TONGSUO_VERSION} \\\n        --with-openssl-opt=\"--strict-warnings enable-ntls\" \\\n        --with-http_ssl_module --with-stream \\\n        --with-stream_ssl_module --with-stream_sni \\\n    && make \\\n    && make install \\\n    && ln -s /usr/local/nginx/sbin/nginx /usr/sbin/ \\\n    && rm -rf /tmp/pkg/cache\n\nEXPOSE 80 443\nSTOPSIGNAL SIGTERM\nCMD [\"nginx\", \"-g\", \"daemon off;\"]\n
                                  docker build -t tengine:0.0.1 .\n
                                  "},{"location":"en/admin/ghippo/install/gm-gateway.html#generate-sm2-and-rsa-tls-certificates","title":"Generate SM2 and RSA TLS Certificates","text":"

                                  Here's how to generate SM2 and RSA TLS certificates and configure the Guomi gateway.

                                  "},{"location":"en/admin/ghippo/install/gm-gateway.html#sm2-tls-certificate","title":"SM2 TLS Certificate","text":"

                                  Note

                                  This certificate is only for testing purposes.

                                  You can refer to the Tongsuo official documentation to use OpenSSL to generate SM2 certificates, or visit Guomi SSL Laboratory to apply for SM2 certificates.

                                  In the end, we will get the following files:

                                  -rw-r--r-- 1 root root  749 Dec  8 02:59 sm2.*.enc.crt.pem\n-rw-r--r-- 1 root root  258 Dec  8 02:59 sm2.*.enc.key.pem\n-rw-r--r-- 1 root root  749 Dec  8 02:59 sm2.*.sig.crt.pem\n-rw-r--r-- 1 root root  258 Dec  8 02:59 sm2.*.sig.key.pem\n
                                  "},{"location":"en/admin/ghippo/install/gm-gateway.html#rsa-tls-certificate","title":"RSA TLS Certificate","text":"
                                  -rw-r--r-- 1 root root  216 Dec  8 03:21 rsa.*.crt.pem\n-rw-r--r-- 1 root root 4096 Dec  8 02:59 rsa.*.key.pem\n
                                  "},{"location":"en/admin/ghippo/install/gm-gateway.html#configure-sm2-and-rsa-tls-certificates-for-the-guomi-gateway","title":"Configure SM2 and RSA TLS Certificates for the Guomi Gateway","text":"

                                  The Guomi gateway used in this article supports SM2 and RSA TLS certificates. The advantage of dual certificates is that when the browser does not support SM2 TLS certificates, it automatically switches to RSA TLS certificates.

                                  For more detailed configurations, please refer to the Tongsuo official documentation.

                                  We enter the Tengine container:

                                  # Go to the nginx configuration file directory\ncd /usr/local/nginx/conf\n\n# Create the cert folder to store TLS certificates\nmkdir cert\n\n# Copy the SM2 and RSA TLS certificates to the `/usr/local/nginx/conf/cert` directory\ncp sm2.*.enc.crt.pem sm2.*.enc.key.pem  sm2.*.sig.crt.pem  sm2.*.sig.key.pem /usr/local/nginx/conf/cert\ncp rsa.*.crt.pem  rsa.*.key.pem /usr/local/nginx/conf/cert\n\n# Edit the nginx.conf configuration\nvim nginx.conf\n...\nserver {\n  listen 443          ssl;\n  proxy_http_version  1.1;\n  # Enable Guomi function to support SM2 TLS certificates\n  enable_ntls         on;\n\n  # RSA certificate\n  # If your browser does not support Guomi certificates, you can enable this option, and Tengine will automatically recognize the user's browser and use RSA certificates for fallback\n  ssl_certificate                 /usr/local/nginx/conf/cert/rsa.*.crt.pem;\n  ssl_certificate_key             /usr/local/nginx/conf/cert/rsa.*.key.pem;\n\n  # Configure two pairs of SM2 certificates for encryption and signature\n  # SM2 signature certificate\n  ssl_sign_certificate            /usr/local/nginx/conf/cert/sm2.*.sig.crt.pem;\n  ssl_sign_certificate_key        /usr/local/nginx/conf/cert/sm2.*.sig.key.pem;\n  # SM2 encryption certificate\n  ssl_enc_certificate             /usr/local/nginx/conf/cert/sm2.*.enc.crt.pem;\n  ssl_enc_certificate_key         /usr/local/nginx/conf/cert/sm2.*.enc.key.pem;\n  ssl_protocols                   TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;\n\n  location / {\n    proxy_set_header Host $http_host;\n    proxy_set_header X-Real-IP $remote_addr;\n    proxy_set_header REMOTE-HOST $remote_addr;\n    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n    # You need to modify the address here to the address of the Istio ingress gateway\n    # For example, proxy_pass https://istio-ingressgateway.istio-system.svc.cluster.local\n    # Or proxy_pass https://demo-dev.daocloud.io\n    proxy_pass https://istio-ingressgateway.istio-system.svc.cluster.local;\n  }\n}\n
                                  "},{"location":"en/admin/ghippo/install/gm-gateway.html#reload-the-configuration-of-the-guomi-gateway","title":"Reload the Configuration of the Guomi Gateway","text":"
                                  nginx -s reload\n
                                  "},{"location":"en/admin/ghippo/install/gm-gateway.html#next-steps","title":"Next Steps","text":"

                                  After successfully deploying the Guomi gateway, customize the AI platform reverse proxy server address.

                                  "},{"location":"en/admin/ghippo/install/gm-gateway.html#verification","title":"Verification","text":"

                                  You can deploy a web browser that supports Guomi certificates. For example, Samarium Browser, and then access the UI interface through Tengine to verify if the Guomi certificate is effective.

                                  "},{"location":"en/admin/ghippo/install/login.html","title":"Login","text":"

                                  Before a user uses a new system, there is no data in the system, and the system cannot identify the new user. In order to identify the user identity and bind user data, the user needs an account that can uniquely identify the user identity.

                                  AI platform assigns an account with certain permissions to the user through the way the administrator creates a new user in User and Access Control . All behaviors generated by this user will be associated with their own account.

                                  The user logs in through the account/password, and the system verifies whether the identity is legal. If the verification is legal, the user logs in successfully.

                                  Note

                                  If the user does not perform any operation within 24 hours after logging in, the login status will be automatically logged out. If the logged-in user is always active, the logged-in state will persist.

                                  The simple process of user login is shown in the figure below.

                                  graph TB\n\nuser[Input username] --> pass[Input password] --> judge([Click Login and verify username and password])\njudge -.Correct.->success[Success]\njudge -.Incorrect.->fail[Fail]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill:#326ce5,stroke:#fff,stroke-width:1px,color:#fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass user,pass cluster;\nclass judge plain\nclass success,fail k8s

                                  The user login screen is as shown in the figure below. For the specific login screen, please refer to the actual product.

                                  "},{"location":"en/admin/ghippo/install/reverse-proxy.html","title":"Customize AI platform Reverse Proxy Server Address","text":"

                                  The specific setup steps are as follows:

                                  1. Check if the global management Helm repository exists.

                                    helm repo list | grep ghippo\n

                                    If the result is empty or shows the following error, proceed to the next step; otherwise, skip the next step.

                                    Error: no repositories to show\n
                                  2. Add and update the global management Helm repository.

                                    helm repo add ghippo http://{harbor url}/chartrepo/{project}\nhelm repo update ghippo\n
                                  3. Set environment variables for easier use in the following steps.

                                    # Your reverse proxy address, for example `export Suanova_PROXY=\"https://demo-alpha.daocloud.io\"` \nexport Suanova_PROXY=\"https://domain:port\"\n\n# Helm --set parameter backup file\nexport GHIPPO_VALUES_BAK=\"ghippo-values-bak.yaml\"\n\n# Get the current version of ghippo\nexport GHIPPO_HELM_VERSION=$(helm get notes ghippo -n ghippo-system | grep \"Chart Version\" | awk -F ': ' '{ print $2 }')\n
                                  4. Backup the --set parameters.

                                    helm get values ghippo -n ghippo-system -o yaml > ${GHIPPO_VALUES_BAK}\n
                                  5. Add your reverse proxy address.

                                    Note

                                    • If possible, you can use the yq command:

                                      yq -i \".global.reverseProxy = \\\"${Suanova_PROXY}\\\"\" ${GHIPPO_VALUES_BAK}\n
                                    • Or you can use the vim command to edit and save:

                                      vim ${GHIPPO_VALUES_BAK}\n\nUSER-SUPPLIED VALUES:\n...\nglobal:\n  ...\n  reverseProxy: ${Suanova_PROXY} # Only need to modify this line\n
                                  6. Run helm upgrade to apply the configuration.

                                    helm upgrade ghippo ghippo/ghippo \\\n  -n ghippo-system \\\n  -f ${GHIPPO_VALUES_BAK} \\\n  --version ${GHIPPO_HELM_VERSION}\n
                                  7. Use kubectl to restart the global management Pod to apply the configuration.

                                    kubectl rollout restart deploy/ghippo-apiserver -n ghippo-system\nkubectl rollout restart statefulset/ghippo-keycloakx -n ghippo-system\n
                                  "},{"location":"en/admin/ghippo/install/user-isolation.html","title":"Customize AI platform Reverse Proxy Server Address","text":"

                                  The specific setup steps are as follows:

                                  1. Check if the global management Helm repository exists.

                                    helm repo list | grep ghippo\n

                                    If the result is empty or shows the following error, proceed to the next step; otherwise, skip the next step.

                                    Error: no repositories to show\n
                                  2. Add and update the global management Helm repository.

                                    helm repo add ghippo http://{harbor url}/chartrepo/{project}\nhelm repo update ghippo\n
                                  3. Set environment variables for easier use in the following steps.

                                    # Your reverse proxy address, for example `export Suanova_PROXY=\"https://demo-alpha.daocloud.io\"` \nexport Suanova_PROXY=\"https://domain:port\"\n\n# Helm --set parameter backup file\nexport GHIPPO_VALUES_BAK=\"ghippo-values-bak.yaml\"\n\n# Get the current version of ghippo\nexport GHIPPO_HELM_VERSION=$(helm get notes ghippo -n ghippo-system | grep \"Chart Version\" | awk -F ': ' '{ print $2 }')\n
                                  4. Backup the --set parameters.

                                    helm get values ghippo -n ghippo-system -o yaml > ${GHIPPO_VALUES_BAK}\n
                                  5. Add your reverse proxy address.

                                    Note

                                    • If possible, you can use the yq command:

                                      yq -i \".apiserver.userIsolationMode = \\\"Folder\\\"\" ${GHIPPO_VALUES_BAK}\n
                                    • Or you can use the vim command to edit and save:

                                      vim ${GHIPPO_VALUES_BAK}\n\nUSER-SUPPLIED VALUES:\n...\n# Just add the following two lines\napiserver:\n  userIsolationMode: Folder\n
                                  6. Run helm upgrade to apply the configuration.

                                    helm upgrade ghippo ghippo/ghippo \\\n  -n ghippo-system \\\n  -f ${GHIPPO_VALUES_BAK} \\\n  --version ${GHIPPO_HELM_VERSION}\n
                                  7. Use kubectl to restart the global management Pod to apply the configuration.

                                    kubectl rollout restart deploy/ghippo-apiserver -n ghippo-system\n
                                  "},{"location":"en/admin/ghippo/permissions/baize.html","title":"AI Lab Permissions","text":"

                                  AI Lab supports four user roles:

                                  • Admin / Baize Owner: Has full permissions (create, read, update, delete) for all features in the Developer and Operator.
                                  • Workspace Admin: Has full permissions (create, read, update, delete) for all features in the authorized workspace's Developer.
                                  • Workspace Editor: Has update and read permissions for all features in the authorized workspace's Developer.
                                  • Workspace Viewer: Has read permissions for all features in the authorized workspace's Developer.

                                  Each role has different permissions, as detailed below.

                                  Menu Object Operation Admin / Baize Owner Workspace Admin Workspace Editor Workspace Viewer Developer Overview View Overview \u2713 \u2713 \u2713 \u2713 Notebooks View Notebooks \u2713 \u2713 \u2713 \u2713 View Notebooks Details \u2713 \u2713 \u2713 \u2717 Create Notebooks \u2713 \u2713 \u2717 \u2717 Update Notebooks \u2713 \u2713 \u2713 \u2717 Clone Notebooks \u2713 \u2713 \u2717 \u2717 Stop Notebooks \u2713 \u2713 \u2713 \u2717 Start Notebooks \u2713 \u2713 \u2713 \u2717 Delete Notebooks \u2713 \u2713 \u2717 \u2717 Jobs View Jobs \u2713 \u2713 \u2713 \u2713 View Job Details \u2713 \u2713 \u2713 \u2713 Create Job \u2713 \u2713 \u2717 \u2717 Clone Job \u2713 \u2713 \u2717 \u2717 View Job Load Details \u2713 \u2713 \u2713 \u2717 Delete Job \u2713 \u2713 \u2717 \u2717 Job Analysis View Job Analysis \u2713 \u2713 \u2713 \u2713 View Job Analysis Details \u2713 \u2713 \u2713 \u2713 Delete Job Analysis \u2713 \u2713 \u2717 \u2717 Datasets View Datasets \u2713 \u2713 \u2713 \u2717 Create Dataset \u2713 \u2713 \u2717 \u2717 Resync Dataset \u2713 \u2713 \u2713 \u2717 Update Credentials \u2713 \u2713 \u2713 \u2717 Delete Dataset \u2713 \u2713 \u2717 \u2717 Runtime Env View Runtime Env \u2713 \u2713 \u2713 \u2713 Create Runtime Env \u2713 \u2713 \u2717 \u2717 Update Runtime Env \u2713 \u2713 \u2713 \u2717 Delete Runtime Env \u2713 \u2713 \u2717 \u2717 Inference Services View Inference Services \u2713 \u2713 \u2713 \u2713 View Inference Services Details \u2713 \u2713 \u2713 \u2713 Create Inference Service \u2713 \u2713 \u2717 \u2717 Update Inference Service \u2713 \u2713 \u2713 \u2717 Stop Inference Service \u2713 \u2713 \u2713 \u2717 Start Inference Service \u2713 \u2713 \u2713 \u2717 Delete Inference Service \u2713 \u2713 \u2717 \u2717 Operator Overview View Overview \u2713 \u2717 \u2717 \u2717 GPU Management View GPU Management \u2713 \u2717 \u2717 \u2717 Queue Management View Queue Management \u2713 \u2717 \u2717 \u2717 View Queue Details \u2713 \u2717 \u2717 \u2717 Create Queue \u2713 \u2717 \u2717 \u2717 Update Queue \u2713 \u2717 \u2717 \u2717 Delete Queue \u2713 \u2717 \u2717 \u2717"},{"location":"en/admin/ghippo/permissions/kpanda.html","title":"Container Management Permissions","text":"

                                  The container management module uses the following roles:

                                  • Admin / Kpanda Owner
                                  • Cluster Admin
                                  • NS Admin
                                  • NS Editor
                                  • NS Viewer

                                  Note

                                  • For more information about permissions, please refer to the Container Management Permission System Description.
                                  • For creating, managing, and deleting roles, please refer to Role and Permission Management.
                                  • The permissions of Cluster Admin , NS Admin , NS Editor , NS Viewer only take effect within the current cluster or namespace.

                                  The permissions granted to each role are as follows:

                                  Primary Function Secondary Function Permission Cluster Admin Ns Admin Ns Editor NS Viewer Cluster Clusters View Clusters \u2714 \u2714 \u2714 \u2714 Access Cluster \u2718 \u2718 \u2718 \u2718 Create Cluster \u2718 \u2718 \u2718 \u2718 Cluster Operations Enter Console \u2714 \u2714 (only in the list) \u2714 \u2718 View Monitoring \u2714 \u2718 \u2718 \u2718 Edit Basic Configuration \u2714 \u2718 \u2718 \u2718 Download kubeconfig \u2714 \u2714 (with ns permission) \u2714 (with ns permission) \u2714 (with ns permission) Disconnect Cluster \u2718 \u2718 \u2718 \u2718 View Logs \u2714 \u2718 \u2718 \u2718 Retry \u2718 \u2718 \u2718 \u2718 Uninstall Cluster \u2718 \u2718 \u2718 \u2718 Cluster Overview View Cluster Overview \u2714 \u2718 \u2718 \u2718 Node Management Access Node \u2718 \u2718 \u2718 \u2718 View Node List \u2714 \u2718 \u2718 \u2718 View Node Details \u2714 \u2718 \u2718 \u2718 View YAML \u2714 \u2718 \u2718 \u2718 Pause Scheduling \u2714 \u2718 \u2718 \u2718 Modify Labels \u2714 \u2718 \u2718 \u2718 Modify Annotations \u2714 \u2718 \u2718 \u2718 Modify Taints \u2714 \u2718 \u2718 \u2718 Remove Node \u2718 \u2718 \u2718 \u2718 Deployment View List \u2714 \u2714 \u2714 \u2714 View/Manage Details \u2714 \u2714 \u2714 \u2714 (view only) Create by YAML \u2714 \u2714 \u2714 \u2718 Create by image \u2714 \u2714 \u2714 \u2718 Select an instance in ws bound to ns Select image \u2714 \u2714 \u2714 \u2718 View IP Pool \u2714 \u2714 \u2714 \u2718 Edit Network Interface \u2714 \u2714 \u2714 \u2718 Enter Console \u2714 \u2714 \u2714 \u2718 View Monitoring \u2714 \u2714 \u2714 \u2714 View Logs \u2714 \u2714 \u2714 \u2714 Load Balancer Scaling \u2714 \u2714 \u2714 \u2718 Edit YAML \u2714 \u2714 \u2714 \u2718 Update \u2714 \u2714 \u2714 \u2718 Status - Pause Upgrade \u2714 \u2714 \u2714 \u2718 Status - Stop \u2714 \u2714 \u2714 \u2718 Status - Restart \u2714 \u2714 \u2714 \u2718 Delete \u2714 \u2714 \u2714 \u2718 StatefulSet View List \u2714 \u2714 \u2714 \u2714 View/Manage Details \u2714 \u2714 \u2714 \u2714 (view only) Create by YAML \u2714 \u2714 \u2714 \u2718 Create by image \u2714 \u2714 \u2714 \u2718 Select an instance in ws bound to ns Select image \u2714 \u2714 \u2714 \u2718 Enter Console \u2714 \u2714 \u2714 \u2718 View Monitoring \u2714 \u2714 \u2714 \u2714 View Logs \u2714 \u2714 \u2714 \u2714 Load Balancer Scaling \u2714 \u2714 \u2714 \u2718 Edit YAML \u2714 \u2714 \u2714 \u2718 Update \u2714 \u2714 \u2714 \u2718 Status - Stop \u2714 \u2714 \u2714 \u2718 Status - Restart \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 \u5b88\u62a4\u8fdb\u7a0b View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Create by YAML \u2713 \u2713 \u2713 \u2717 Create by image \u2713 \u2713 \u2713 \u2717 Select an instance in ws bound to ns Select image \u2713 \u2713 \u2713 \u2717 Go to console \u2713 \u2713 \u2713 \u2717 Check monitor \u2713 \u2713 \u2713 \u2713 View logs \u2713 \u2713 \u2713 \u2713 Edit YAML \u2713 \u2713 \u2713 \u2717 Update \u2713 \u2713 \u2713 \u2717 Status - restart \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 Job View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Create by YAML \u2713 \u2713 \u2713 \u2717 Create by image \u2713 \u2713 \u2713 \u2717 Instance list \u2713 \u2713 \u2713 \u2713 Select an instance in ws bound to ns Select image \u2713 \u2713 \u2713 \u2717 Go to console \u2713 \u2713 \u2713 \u2717 View logs \u2713 \u2713 \u2713 \u2713 View YAML \u2713 \u2713 \u2713 \u2713 Restart \u2713 \u2713 \u2713 \u2717 View event \u2713 \u2713 \u2713 \u2713 Delete \u2713 \u2713 \u2713 \u2717 CronJob View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Create by YAML \u2713 \u2713 \u2713 \u2717 Create by image \u2713 \u2713 \u2713 \u2717 Select an instance in ws bound to ns Select image \u2713 \u2713 \u2713 \u2717 Edit YAML \u2713 \u2713 \u2713 \u2717 Stop \u2713 \u2713 \u2713 \u2717 View jobs \u2713 \u2713 \u2713 \u2713 View event \u2713 \u2713 \u2713 \u2713 Delete \u2713 \u2713 \u2713 \u2717 Pod View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Go to console \u2713 \u2713 \u2713 \u2717 Check monitor \u2713 \u2713 \u2713 \u2713 View logs \u2713 \u2713 \u2713 \u2713 View YAML \u2713 \u2713 \u2713 \u2713 Upload file \u2713 \u2713 \u2713 \u2717 Download file \u2713 \u2713 \u2713 \u2717 View containers \u2713 \u2713 \u2713 \u2713 View event \u2713 \u2713 \u2713 \u2713 Delete \u2713 \u2713 \u2713 \u2717 ReplicaSet View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Go to console \u2713 \u2713 \u2713 \u2717 Check monitor \u2713 \u2713 \u2713 \u2713 View logs \u2713 \u2713 \u2713 \u2713 View YAML \u2713 \u2713 \u2713 \u2713 Delete \u2713 \u2713 \u2713 \u2717 Helm app View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Update \u2713 \u2713 \u2713 \u2717 View YAML \u2713 \u2713 \u2713 \u2713 Delete \u2713 \u2713 \u2713 \u2717 Helm chart View list \u2713 \u2713 \u2713 \u2713 View details \u2713 \u2713 \u2713 \u2713 Install chart \u2713 \u2713 (Fine for ns level) \u2717 \u2717 Download chart \u2713 \u2713 \u2713 (Consistent with viewing interface) \u2713 Helm repo View list \u2713 \u2713 \u2713 \u2713 Create repo \u2713 \u2717 \u2717 \u2717 Update repo \u2713 \u2717 \u2717 \u2717 Clone repo \u2713 \u2717 \u2717 \u2717 Refresh repo \u2713 \u2717 \u2717 \u2717 Modify label \u2713 \u2717 \u2717 \u2717 Modify annotation \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 Service View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Create by YAML \u2713 \u2713 \u2713 \u2717 Create \u2713 \u2713 \u2713 \u2717 Update \u2713 \u2713 \u2713 \u2717 View event \u2713 \u2713 \u2713 \u2713 Edit YAML \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 Ingress View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Create by YAML \u2713 \u2713 \u2713 \u2717 Create \u2713 \u2713 \u2713 \u2717 Update \u2713 \u2713 \u2713 \u2717 View event \u2713 \u2713 \u2713 \u2713 Edit YAML \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 Network policy View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2717 Create by YAML \u2713 \u2713 \u2713 \u2717 Create \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 Network config Config \u2713 \u2713 \u2713 \u2717 CRD View list \u2713 \u2717 \u2717 \u2717 View/Manage details \u2713 \u2717 \u2717 \u2717 Create by YAML \u2713 \u2717 \u2717 \u2717 Edit YAML \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 PVC View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Create \u2713 \u2713 \u2713 \u2717 Select sc \u2713 \u2713 \u2713 \u2717 Create by YAML \u2713 \u2713 \u2713 \u2717 Edit YAML \u2713 \u2713 \u2713 \u2717 Clone \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 PV View list \u2713 \u2717 \u2717 \u2717 View/Manage details \u2713 \u2717 \u2717 \u2717 Create by YAML \u2713 \u2717 \u2717 \u2717 Create \u2713 \u2717 \u2717 \u2717 Edit YAML \u2713 \u2717 \u2717 \u2717 Update \u2713 \u2717 \u2717 \u2717 Clone \u2713 \u2717 \u2717 \u2717 Modify label \u2713 \u2717 \u2717 \u2717 Modify annotation \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 SC View list \u2713 \u2717 \u2717 \u2717 Create by YAML \u2713 \u2717 \u2717 \u2717 Create \u2713 \u2717 \u2717 \u2717 View YAML \u2713 \u2717 \u2717 \u2717 Update \u2713 \u2717 \u2717 \u2717 Authorize NS \u2713 \u2717 \u2717 \u2717 Deauthorize \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 ConfigMap View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Create by YAML \u2713 \u2713 \u2713 \u2717 Create \u2713 \u2713 \u2713 \u2717 Edit YAML \u2713 \u2713 \u2713 \u2717 Update \u2713 \u2713 \u2713 \u2717 Export ConfigMap \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 Secret View list \u2713 \u2713 \u2713 \u2717 View/Manage details \u2713 \u2713 \u2713 \u2717 Create by YAML \u2713 \u2713 \u2713 \u2717 Create \u2713 \u2713 \u2713 \u2717 Edit YAML \u2713 \u2713 \u2713 \u2717 Update \u2713 \u2713 \u2713 \u2717 Export secret \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 Namespace View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Create by YAML \u2713 \u2717 \u2717 \u2717 Create \u2713 \u2717 \u2717 \u2717 View YAML \u2713 \u2713 \u2713 \u2717 Modify label \u2713 \u2713 \u2717 \u2717 Unbind WS \u2717 \u2717 \u2717 \u2717 Bind WS \u2717 \u2717 \u2717 \u2717 Quotas \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 Cluster operation View list \u2713 \u2717 \u2717 \u2717 View YAML \u2713 \u2717 \u2717 \u2717 View logs \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 Helm operation Set preserved entries \u2713 \u2717 \u2717 \u2717 View YAML \u2713 \u2713 \u2717 \u2717 View logs \u2713 \u2713 \u2717 \u2717 Delete \u2713 \u2713 \u2717 \u2717 Cluster upgrade View details \u2713 \u2717 \u2717 \u2717 Upgrade \u2717 \u2717 \u2717 \u2717 Cluster settings Addon config \u2713 \u2717 \u2717 \u2717 Advanced config \u2713 \u2717 \u2717 \u2717 Namespace View list \u2713 \u2713 \u2713 \u2713 Create \u2713 \u2717 \u2717 \u2717 View/Manage details \u2713 \u2713 \u2713 \u2713 View YAML \u2713 \u2713 \u2713 \u2717 Modify label \u2713 \u2713 \u2717 \u2717 Bind WS \u2713 \u2717 \u2717 \u2717 Quotas \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 Workload Deployment View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Go to console \u2713 \u2713 \u2713 \u2717 Check monitor \u2713 \u2713 \u2713 \u2713 View logs \u2713 \u2713 \u2713 \u2713 Workload scaling \u2713 \u2713 \u2713 \u2717 Edit YAML \u2713 \u2713 \u2713 \u2717 Update \u2713 \u2713 \u2713 \u2717 Status - Pause Upgrade \u2713 \u2713 \u2713 \u2717 Status - Stop \u2713 \u2713 \u2713 \u2717 Status - restart \u2713 \u2713 \u2713 \u2717 Revert \u2713 \u2713 \u2713 \u2717 Modify label and annotation \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 StatefulSet View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Go to console \u2713 \u2713 \u2713 \u2717 Check monitor \u2713 \u2713 \u2713 \u2713 View logs \u2713 \u2713 \u2713 \u2713 Workload scaling \u2713 \u2713 \u2713 \u2717 Edit YAML \u2713 \u2713 \u2713 \u2717 Update \u2713 \u2713 \u2713 \u2717 Status - Stop \u2713 \u2713 \u2713 \u2717 Status - restart \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 DaemonSet View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Go to console \u2713 \u2713 \u2713 \u2717 Check monitor \u2713 \u2713 \u2713 \u2713 View logs \u2713 \u2713 \u2713 \u2713 Edit YAML \u2713 \u2713 \u2713 \u2717 Update \u2713 \u2713 \u2713 \u2717 Status - restart \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 Job View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Go to console \u2713 \u2713 \u2713 \u2717 View logs \u2713 \u2713 \u2713 \u2713 View YAML \u2713 \u2713 \u2713 \u2717 Restart \u2713 \u2713 \u2713 \u2717 View event \u2713 \u2713 \u2713 \u2713 Delete \u2713 \u2713 \u2713 \u2717 CronJob View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) View event \u2713 \u2713 \u2713 \u2713 Delete \u2713 \u2713 \u2713 \u2717 Pod View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Go to console \u2713 \u2713 \u2713 \u2717 Check monitor \u2713 \u2713 \u2713 \u2713 View logs \u2713 \u2713 \u2713 \u2713 View YAML \u2713 \u2713 \u2713 \u2713 Upload file \u2713 \u2713 \u2713 \u2717 Download file \u2713 \u2713 \u2713 \u2717 View containers \u2713 \u2713 \u2713 \u2713 View event \u2713 \u2713 \u2713 \u2713 Delete \u2713 \u2713 \u2713 \u2717 Backup and Restore App backup View list \u2713 \u2717 \u2717 \u2717 View/Manage details \u2713 \u2717 \u2717 \u2717 Create backup schedule \u2713 \u2717 \u2717 \u2717 View YAML \u2713 \u2717 \u2717 \u2717 Update Schedule \u2713 \u2717 \u2717 \u2717 Pause \u2713 \u2717 \u2717 \u2717 Run now \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 Resume backup View list \u2713 \u2717 \u2717 \u2717 View/Manage details \u2713 \u2717 \u2717 \u2717 Resume backup \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 Backup point View list \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 Object storage View list \u2713 \u2717 \u2717 \u2717 etcd backup View backup policies \u2713 \u2717 \u2717 \u2717 Create backup policies \u2713 \u2717 \u2717 \u2717 View logs \u2713 \u2717 \u2717 \u2717 View YAML \u2713 \u2717 \u2717 \u2717 Update backup policy \u2713 \u2717 \u2717 \u2717 Stop/Start \u2713 \u2717 \u2717 \u2717 Run now \u2713 \u2717 \u2717 \u2717 View/Manage details \u2713 \u2717 \u2717 \u2717 Delete backup records \u2713 \u2717 \u2717 \u2717 View backup points \u2713 \u2717 \u2717 \u2717 Cluster inspection Cluster inspection View list \u2713 \u2717 \u2717 \u2717 View/Manage details \u2713 \u2717 \u2717 \u2717 Cluster inspection \u2713 \u2717 \u2717 \u2717 Settings \u2713 \u2717 \u2717 \u2717 Permissions Permissions View list \u2713 \u2717 \u2717 \u2717 Grant to cluster admin \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 NS permissions View list \u2713 \u2713 \u2717 \u2717 Grant to ns admin \u2713 \u2713 \u2717 \u2717 Grant to ns editor \u2713 \u2713 \u2717 \u2717 Grant to ns viewer \u2713 \u2713 \u2717 \u2717 Edit permissions \u2713 \u2713 \u2717 \u2717 Delete \u2713 \u2713 \u2717 \u2717 Security Compliance scanning View scanning report \u2713 \u2717 \u2717 \u2717 View scanning report details \u2713 \u2717 \u2717 \u2717 Download scanning report \u2713 \u2717 \u2717 \u2717 Delete scanning report \u2713 \u2717 \u2717 \u2717 View scanning policies \u2713 \u2717 \u2717 \u2717 Create scanning policy \u2713 \u2717 \u2717 \u2717 Delete scanning policy \u2713 \u2717 \u2717 \u2717 View scanning config list \u2713 \u2717 \u2717 \u2717 View scanning config details \u2713 \u2717 \u2717 \u2717 Delete scanning config \u2713 \u2717 \u2717 \u2717 Scan permission View scanning reports \u2713 \u2717 \u2717 \u2717 View scanning report details \u2713 \u2717 \u2717 \u2717 Delete scanning report \u2713 \u2717 \u2717 \u2717 View scanning policies \u2713 \u2717 \u2717 \u2717 Create scanning policy \u2713 \u2717 \u2717 \u2717 Delete scanning policy \u2713 \u2717 \u2717 \u2717 Scan vulnerability View scanning reports \u2713 \u2717 \u2717 \u2717 View scanning report detail \u2713 \u2717 \u2717 \u2717 Delete scanning report \u2713 \u2717 \u2717 \u2717 View scanning policies \u2713 \u2717 \u2717 \u2717 Create scanning policy \u2713 \u2717 \u2717 \u2717 Delete scanning policy \u2713 \u2717 \u2717 \u2717"},{"location":"en/admin/ghippo/personal-center/accesstoken.html","title":"Access key","text":"

                                  The access key can be used to access the openAPI and continuous delivery. Users can obtain the key and access the API by referring to the following steps in the personal center.

                                  "},{"location":"en/admin/ghippo/personal-center/accesstoken.html#get-key","title":"Get key","text":"

                                  Log in to AI platform, find Personal Center in the drop-down menu in the upper right corner, and you can manage the access key of the account on the Access Keys page.

                                  Info

                                  Access key is displayed only once. If you forget your access key, you will need to create a new key.

                                  "},{"location":"en/admin/ghippo/personal-center/accesstoken.html#use-the-key-to-access-api","title":"Use the key to access API","text":"

                                  When accessing AI platform openAPI, add the header Authorization:Bearer ${token} to the request to identify the visitor, where ${token} is the key obtained in the previous step. Request Example

                                  curl -X GET -H 'Authorization:Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRKVjlBTHRBLXZ4MmtQUC1TQnVGS0dCSWc1cnBfdkxiQVVqM2U3RVByWnMiLCJ0eXAiOiJKV1QifQ.eyJleHAiOjE2NjE0MTU5NjksImlhdCI6MTY2MDgxMTE2OSwiaXNzIjoiZ2hpcHBvLmlvIiwic3ViIjoiZjdjOGIxZjUtMTc2MS00NjYwLTg2MWQtOWI3MmI0MzJmNGViIiwicHJlZmVycmVkX3VzZXJuYW1lIjoiYWRtaW4iLCJncm91cHMiOltdfQ.RsUcrAYkQQ7C6BxMOrdD3qbBRUt0VVxynIGeq4wyIgye6R8Ma4cjxG5CbU1WyiHKpvIKJDJbeFQHro2euQyVde3ygA672ozkwLTnx3Tu-_mB1BubvWCBsDdUjIhCQfT39rk6EQozMjb-1X1sbLwzkfzKMls-oxkjagI_RFrYlTVPwT3Oaw-qOyulRSw7Dxd7jb0vINPq84vmlQIsI3UuTZSNO5BCgHpubcWwBss-Aon_DmYA-Et_-QtmPBA3k8E2hzDSzc7eqK0I68P25r9rwQ3DeKwD1dbRyndqWORRnz8TLEXSiCFXdZT2oiMrcJtO188Ph4eLGut1-4PzKhwgrQ' https://demo-dev.daocloud.io/apis/ghippo.io/v1alpha1/users?page=1&pageSize=10 -k\n

                                  Request result

                                  {\n    \"items\": [\n        {\n            \"id\": \"a7cfd010-ebbe-4601-987f-d098d9ef766e\",\n            \"name\": \"a\",\n            \"email\": \"\",\n            \"description\": \"\",\n            \"firstname\": \"\",\n            \"lastname\": \"\",\n            \"source\": \"locale\",\n            \"enabled\": true,\n            \"createdAt\": \"1660632794800\",\n            \"updatedAt\": \"0\",\n            \"lastLoginAt\": \"\"\n        }\n    ],\n    \"pagination\": {\n        \"page\": 1,\n        \"pageSize\": 10,\n        \"total\": 1\n    }\n}\n
                                  "},{"location":"en/admin/ghippo/personal-center/language.html","title":"language settings","text":"

                                  This section explains how to set the interface language. Currently supports Chinese, English two languages.

                                  Language setting is the portal for the platform to provide multilingual services. The platform is displayed in Chinese by default. Users can switch the platform language by selecting English or automatically detecting the browser language preference according to their needs. Each user's multilingual service is independent of each other, and switching will not affect other users.

                                  The platform provides three ways to switch languages: Chinese, English-English, and automatically detect your browser language preference.

                                  The operation steps are as follows.

                                  1. Log in to the AI platform with your username/password. Click Global Management at the bottom of the left navigation bar.

                                  2. Click the username in the upper right corner and select Personal Center .

                                  3. Click the Language Settings tab.

                                  4. Toggle the language option.

                                  "},{"location":"en/admin/ghippo/personal-center/security-setting.html","title":"Security Settings","text":"

                                  Function description: It is used to fill in the email address and modify the login password.

                                  • Email: After the administrator configures the email server address, the user can click the Forget Password button on the login page to fill in the email address there to retrieve the password.
                                  • Password: The password used to log in to the platform, it is recommended to change the password regularly.

                                  The specific operation steps are as follows:

                                  1. Click the username in the upper right corner and select Personal Center .

                                  2. Click the Security Settings tab. Fill in your email address or change the login password.

                                  "},{"location":"en/admin/ghippo/personal-center/ssh-key.html","title":"Configuring SSH Public Key","text":"

                                  This article explains how to configure SSH public key.

                                  "},{"location":"en/admin/ghippo/personal-center/ssh-key.html#step-1-view-existing-ssh-keys","title":"Step 1. View Existing SSH Keys","text":"

                                  Before generating a new SSH key, please check if you need to use an existing SSH key stored in the root directory of the local user. For Linux and Mac, use the following command to view existing public keys. Windows users can use the following command in WSL (requires Windows 10 or above) or Git Bash to view the generated public keys.

                                  • ED25519 Algorithm:

                                    cat ~/.ssh/id_ed25519.pub\n
                                  • RSA Algorithm:

                                    cat ~/.ssh/id_rsa.pub\n

                                  If a long string starting with ssh-ed25519 or ssh-rsa is returned, it means that a local public key already exists. You can skip Step 2 Generate SSH Key and proceed directly to Step 3.

                                  "},{"location":"en/admin/ghippo/personal-center/ssh-key.html#step-2-generate-ssh-key","title":"Step 2. Generate SSH Key","text":"

                                  If Step 1 does not return the specified content string, it means that there is no available SSH key locally and a new SSH key needs to be generated. Please follow these steps:

                                  1. Access the terminal (Windows users please use WSL or Git Bash), and run ssh-keygen -t.

                                  2. Enter the key algorithm type and an optional comment.

                                    The comment will appear in the .pub file and can generally use the email address as the comment content.

                                    • To generate a key pair based on the ED25519 algorithm, use the following command:

                                      ssh-keygen -t ed25519 -C \"<comment>\"\n
                                    • To generate a key pair based on the RSA algorithm, use the following command:

                                      ssh-keygen -t rsa -C \"<comment>\"\n
                                  3. Press Enter to choose the SSH key generation path.

                                    Taking the ED25519 algorithm as an example, the default path is as follows:

                                    Generating public/private ed25519 key pair.\nEnter file in which to save the key (/home/user/.ssh/id_ed25519):\n

                                    The default key generation path is /home/user/.ssh/id_ed25519, and the proper public key is /home/user/.ssh/id_ed25519.pub.

                                  4. Set a passphrase for the key.

                                    Enter passphrase (empty for no passphrase):\nEnter same passphrase again:\n

                                    The passphrase is empty by default, and you can choose to use a passphrase to protect the private key file. If you do not want to enter a passphrase every time you access the repository using the SSH protocol, you can enter an empty passphrase when creating the key.

                                  5. Press Enter to complete the key pair creation.

                                  "},{"location":"en/admin/ghippo/personal-center/ssh-key.html#step-3-copy-the-public-key","title":"Step 3. Copy the Public Key","text":"

                                  In addition to manually copying the generated public key information printed on the command line, you can use the following commands to copy the public key to the clipboard, depending on the operating system.

                                  • Windows (in WSL or Git Bash):

                                    cat ~/.ssh/id_ed25519.pub | clip\n
                                  • Mac:

                                    tr -d '\\n'< ~/.ssh/id_ed25519.pub | pbcopy\n
                                  • GNU/Linux (requires xclip):

                                    xclip -sel clip < ~/.ssh/id_ed25519.pub\n
                                  "},{"location":"en/admin/ghippo/personal-center/ssh-key.html#step-4-set-the-public-key-on-ai-platform-platform","title":"Step 4. Set the Public Key on AI platform Platform","text":"
                                  1. Log in to the AI platform UI page and select Profile -> SSH Public Key in the upper right corner of the page.

                                  2. Add the generated SSH public key information.

                                    1. SSH public key content.

                                    2. Public key title: Supports customizing the public key name for management differentiation.

                                    3. Expiration: Set the expiration period for the public key. After it expires, the public key will be automatically invalidated and cannot be used. If not set, it will be permanently valid.

                                  "},{"location":"en/admin/ghippo/platform-setting/about.html","title":"About","text":"

                                  The About page primarily showcases the latest versions of each module, highlights the open source software used, and expresses gratitude to the technical team via an animated video.

                                  Steps to view are as follows:

                                  1. Log in to AI platform as a user with Admin role. Click Global Management at the bottom of the left navigation bar.

                                  2. Click Settings , select About , and check the product version, open source software statement, and development teams.

                                    License Statement

                                    Technical Team

                                  "},{"location":"en/admin/ghippo/platform-setting/appearance.html","title":"Customize Appearance","text":"

                                  In AI platform, you have the option to customize the appearance of the login page, top navigation bar, bottom copyright and ICP registration to enhance your product recognition.

                                  "},{"location":"en/admin/ghippo/platform-setting/appearance.html#customizing-login-page-and-top-navigation-bar","title":"Customizing Login Page and Top Navigation Bar","text":"
                                  1. To get started, log in to AI platform as a user with the admin role and navigate to Global Management -> Settings found at the bottom of the left navigation bar.

                                  2. Select Appearance . On the Custom your login page tab, modify the icon and text of the login page as needed, then click Save .

                                  3. Log out and refresh the login page to see the configured effect.

                                  4. On the Advanced customization tab, you can modify login page, navigation bar, copyright, and ICP registration with css.

                                  Note

                                  If you wish to restore the default settings, simply click Revert . This action will discard all customized settings.

                                  "},{"location":"en/admin/ghippo/platform-setting/appearance.html#advanced-customization","title":"Advanced Customization","text":"

                                  Advanced customization allows you to modify the color, font spacing, and font size of the entire container platform using CSS styles. Please note that familiarity with CSS syntax is required.

                                  To reset any advanced customizations, delete the contents of the black input box or click the Revert button.

                                  Sample CSS for Login Page Customization:

                                  .test {\n  width: 12px;\n}\n\n#kc-login {\n /* color: red!important; */\n}\n

                                  CSS sample for page customization after login:

                                  .dao-icon.dao-iconfont.icon-service-global.dao-nav__head-icon {\n   color: red!important;\n}\n.ghippo-header-logo {\n  background-color: green!important;\n}\n.ghippo-header {\n  background-color: rgb(128, 115, 0)!important;\n}\n.ghippo-header-nav-main {\n  background-color: rgb(0, 19, 128)!important;\n}\n.ghippo-header-sub-nav-main .dao-popper-inner {\n  background-color: rgb(231, 82, 13) !important;\n}\n

                                  CSS sample for custom footer (including copyright, filing, and other information at the bottom)

                                  <div class=\"footer-content\">\n  <span class=\"footer-item\">Copyright \u00a9 2024 Suanova</span>\n  <a class=\"footer-item\" href=\"https://beian.miit.gov.cn/\" target=\"_blank\" rel=\"noopener noreferrer\">\u6caa ICP \u5907 14048409 \u53f7 - 1</a>\n  <a class=\"footer-item\" href=\"https://beian.miit.gov.cn/\" target=\"_blank\" rel=\"noopener noreferrer\">\u6caa ICP \u5907 14048409 \u53f7 - 2</a>\n</div>\n<div class=\"footer-content\">\n  <img class=\"gongan-icon\" src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABIAAAASCAYAAABWzo5XAAAACXBIWXMAAAsTAAALEwEAmpwYAAAKTWlDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVN3WJP3Fj7f92UPVkLY8LGXbIEAIiOsCMgQWaIQkgBhhBASQMWFiApWFBURnEhVxILVCkidiOKgKLhnQYqIWotVXDjuH9yntX167+3t+9f7vOec5/zOec8PgBESJpHmomoAOVKFPDrYH49PSMTJvYACFUjgBCAQ5svCZwXFAADwA3l4fnSwP/wBr28AAgBw1S4kEsfh/4O6UCZXACCRAOAiEucLAZBSAMguVMgUAMgYALBTs2QKAJQAAGx5fEIiAKoNAOz0ST4FANipk9wXANiiHKkIAI0BAJkoRyQCQLsAYFWBUiwCwMIAoKxAIi4EwK4BgFm2MkcCgL0FAHaOWJAPQGAAgJlCLMwAIDgCAEMeE80DIEwDoDDSv+CpX3CFuEgBAMDLlc2XS9IzFLiV0Bp38vDg4iHiwmyxQmEXKRBmCeQinJebIxNI5wNMzgwAABr50cH+OD+Q5+bk4eZm52zv9MWi/mvwbyI+IfHf/ryMAgQAEE7P79pf5eXWA3DHAbB1v2upWwDaVgBo3/ldM9sJoFoK0Hr5i3k4/EAenqFQyDwdHAoLC+0lYqG9MOOLPv8z4W/gi372/EAe/tt68ABxmkCZrcCjg/1xYW52rlKO58sEQjFu9+cj/seFf/2OKdHiNLFcLBWK8ViJuFAiTcd5uVKRRCHJleIS6X8y8R+W/QmTdw0ArIZPwE62B7XLbMB+7gECiw5Y0nYAQH7zLYwaC5EAEGc0Mnn3AACTv/mPQCsBAM2XpOMAALzoGFyolBdMxggAAESggSqwQQcMwRSswA6cwR28wBcCYQZEQAwkwDwQQgbkgBwKoRiWQRlUwDrYBLWwAxqgEZrhELTBMTgN5+ASXIHrcBcGYBiewhi8hgkEQcgIE2EhOogRYo7YIs4IF5mOBCJhSDSSgKQg6YgUUSLFyHKkAqlCapFdSCPyLXIUOY1cQPqQ28ggMor8irxHMZSBslED1AJ1QLmoHxqKxqBz0XQ0D12AlqJr0Rq0Hj2AtqKn0UvodXQAfYqOY4DRMQ5mjNlhXIyHRWCJWBomxxZj5Vg1Vo81Yx1YN3YVG8CeYe8IJAKLgBPsCF6EEMJsgpCQR1hMWEOoJewjtBK6CFcJg4Qxwicik6hPtCV6EvnEeGI6sZBYRqwm7iEeIZ4lXicOE1+TSCQOyZLkTgohJZAySQtJa0jbSC2kU6Q+0hBpnEwm65Btyd7kCLKArCCXkbeQD5BPkvvJw+S3FDrFiOJMCaIkUqSUEko1ZT/lBKWfMkKZoKpRzame1AiqiDqfWkltoHZQL1OHqRM0dZolzZsWQ8ukLaPV0JppZ2n3aC/pdLoJ3YMeRZfQl9Jr6Afp5+mD9HcMDYYNg8dIYigZaxl7GacYtxkvmUymBdOXmchUMNcyG5lnmA+Yb1VYKvYqfBWRyhKVOpVWlX6V56pUVXNVP9V5qgtUq1UPq15WfaZGVbNQ46kJ1Bar1akdVbupNq7OUndSj1DPUV+jvl/9gvpjDbKGhUaghkijVGO3xhmNIRbGMmXxWELWclYD6yxrmE1iW7L57Ex2Bfsbdi97TFNDc6pmrGaRZp3mcc0BDsax4PA52ZxKziHODc57LQMtPy2x1mqtZq1+rTfaetq+2mLtcu0W7eva73VwnUCdLJ31Om0693UJuja6UbqFutt1z+o+02PreekJ9cr1Dund0Uf1bfSj9Rfq79bv0R83MDQINpAZbDE4Y/DMkGPoa5hpuNHwhOGoEctoupHEaKPRSaMnuCbuh2fjNXgXPmasbxxirDTeZdxrPGFiaTLbpMSkxeS+Kc2Ua5pmutG003TMzMgs3KzYrMnsjjnVnGueYb7ZvNv8jYWlRZzFSos2i8eW2pZ8ywWWTZb3rJhWPlZ5VvVW16xJ1lzrLOtt1ldsUBtXmwybOpvLtqitm63Edptt3xTiFI8p0in1U27aMez87ArsmuwG7Tn2YfYl9m32zx3MHBId1jt0O3xydHXMdmxwvOuk4TTDqcSpw+lXZxtnoXOd8zUXpkuQyxKXdpcXU22niqdun3rLleUa7rrStdP1o5u7m9yt2W3U3cw9xX2r+00umxvJXcM970H08PdY4nHM452nm6fC85DnL152Xlle+70eT7OcJp7WMG3I28Rb4L3Le2A6Pj1l+s7pAz7GPgKfep+Hvqa+It89viN+1n6Zfgf8nvs7+sv9j/i/4XnyFvFOBWABwQHlAb2BGoGzA2sDHwSZBKUHNQWNBbsGLww+FUIMCQ1ZH3KTb8AX8hv5YzPcZyya0RXKCJ0VWhv6MMwmTB7WEY6GzwjfEH5vpvlM6cy2CIjgR2yIuB9pGZkX+X0UKSoyqi7qUbRTdHF09yzWrORZ+2e9jvGPqYy5O9tqtnJ2Z6xqbFJsY+ybuIC4qriBeIf4RfGXEnQTJAntieTE2MQ9ieNzAudsmjOc5JpUlnRjruXcorkX5unOy553PFk1WZB8OIWYEpeyP+WDIEJQLxhP5aduTR0T8oSbhU9FvqKNolGxt7hKPJLmnVaV9jjdO31D+miGT0Z1xjMJT1IreZEZkrkj801WRNberM/ZcdktOZSclJyjUg1plrQr1zC3KLdPZisrkw3keeZtyhuTh8r35CP5c/PbFWyFTNGjtFKuUA4WTC+oK3hbGFt4uEi9SFrUM99m/ur5IwuCFny9kLBQuLCz2Lh4WfHgIr9FuxYji1MXdy4xXVK6ZHhp8NJ9y2jLspb9UOJYUlXyannc8o5Sg9KlpUMrglc0lamUycturvRauWMVYZVkVe9ql9VbVn8qF5VfrHCsqK74sEa45uJXTl/VfPV5bdra3kq3yu3rSOuk626s91m/r0q9akHV0IbwDa0b8Y3lG19tSt50oXpq9Y7NtM3KzQM1YTXtW8y2rNvyoTaj9nqdf13LVv2tq7e+2Sba1r/dd3vzDoMdFTve75TsvLUreFdrvUV99W7S7oLdjxpiG7q/5n7duEd3T8Wej3ulewf2Re/ranRvbNyvv7+yCW1SNo0eSDpw5ZuAb9qb7Zp3tXBaKg7CQeXBJ9+mfHvjUOihzsPcw83fmX+39QjrSHkr0jq/dawto22gPaG97+iMo50dXh1Hvrf/fu8x42N1xzWPV56gnSg98fnkgpPjp2Snnp1OPz3Umdx590z8mWtdUV29Z0PPnj8XdO5Mt1/3yfPe549d8Lxw9CL3Ytslt0utPa49R35w/eFIr1tv62X3y+1XPK509E3rO9Hv03/6asDVc9f41y5dn3m978bsG7duJt0cuCW69fh29u0XdwruTNxdeo94r/y+2v3qB/oP6n+0/rFlwG3g+GDAYM/DWQ/vDgmHnv6U/9OH4dJHzEfVI0YjjY+dHx8bDRq98mTOk+GnsqcTz8p+Vv9563Or59/94vtLz1j82PAL+YvPv655qfNy76uprzrHI8cfvM55PfGm/K3O233vuO+638e9H5ko/ED+UPPR+mPHp9BP9z7nfP78L/eE8/sl0p8zAAAAIGNIUk0AAHolAACAgwAA+f8AAIDpAAB1MAAA6mAAADqYAAAXb5JfxUYAAAQjSURBVHjaVNNZbFRlGIDh95w525zpdGa6TVtbykBbyiICxQY0AhYTJUCiiYqGqEEiJhKQmBg0ESPeeCGRENEYb4jhBr0gNQrRlCBiSgyLaSlSaKEs3Wemy+xnzuqFYdD/6rt6ku/N9wue55EcPwWArCgIgkx5ZRuYVxsnJ801Z05f3jY1MRnb/HxHV+uSph9RKq4mhkdwbZVgdQ2SHkPTwgj/h1QUWWi8/tfg/hM/XN/Y2zfaZnkSnuRDtLMsXhBOvrJtya/LlrcdMs1Qb1lVRQmSAEDAsU1kxpgamXp3y+azu1esreK9dyRqs9PIjkW6OsLx7lTV1ld/237s8HRV57MbnvO8CA+e9GCQFTk6Mza+4/0P+t9a9VSEI3uyTH/eR27aB2Ed31Q/Hx1sI6BHOPT13c5Frd0HW9p3HPUQEwAigJW9RDp+bstrOy981nVGLN/7RpHUV70YfXnEAtjxFPasxPDBQXatjzNTdOQXtg983H/51AFFy1KCIg2bNIdC+8270NwmUmelsXqSqHkDK5PDl8iCW0QcnEW+lqCjvcjQuMZ4YnQRTkotQUZu4GkjcfZNv19G011kXw4vayNYNvqCCvSVTciOgABgeuhBGwhgz5zbkI2ff7HUqJiNR2QktbbSYnBYYqbMT/ilKI4SIbT/GcRylbnvLmJ2X8N7tJ7rR8OE/BbliqEYea81WIotmOs02WFpc55Lf0f5/mSI3dsamOgxSX7ZjaALuBmB6M6FnB+S+POCwmOLk1QFFAqZyQWl1YrpiRZJLvDkygyC5NJ1XCax7xYNiTQVEYVIuUulayIcGeLkpw6WK7GuPY/fb2CkhleXIFFe8XPGaKBj9QxLW1Ik0bg8EuT2zRCJYZvZIYepe0EGbvi4bQUJVZhs2phADFYj+df0lBqJUnaekS4SUHXe3jrOnoE2PhSewHfRpfZGgcryIvfHdQruQlLo7Ns6QizqkJ31CIUlqwQJXuWUpDXj6qOsW32HT3YNImll9FwJsb4jyaLmWQ4fa6a+2sQw0ry8YZSiHcPxxXBtMfCv4XkUCrfliWs/fTE31rtTVfv9vsIorvQIniMhqXM4popVcJFVMHMpfMEaLPdxR1Tnna1b1vl6tGntpAjgCTNWONZyIFBR8Ydtr6EgrCI3VySfzZPLBDHyIq5gkpmzcOUmTGMF+bh7M9LYulfWzMmHBzk7Fpq9deWEYxjrtaCMXjWfstp6BCGNXZzBdYqYhogWqkMum4+oBVD0YnP63u/fFqbv1D+M7VSlBbmmK5uYaLYLYwslfwFVAyXQiOfcx3XyyGIM8DDn0lgWyGokHogu/0UJxpL/+f2e569s/CZQZ53OpzJr0+NXludUfb5jVdf7VUGXJUPIZast1S9PeII6jFDT5xMjFwO1S4c8zwTgnwEAxufYSzA67PMAAAAASUVORK5CYII=\" >\n  <a class=\"footer-item\" href=\"http://www.beian.gov.cn/portal/registerSystemInfo\">\u6caa\u516c\u7f51\u5b89\u5907 12345678912345\u53f7</a>\n</div>\n<style>\n.footer-content {\n  display: flex;\n  flex-wrap: wrap;\n  align-items: center;\n  justify-content: center;\n}\n.footer-content + .footer-content {\n  margin-top: 8px;\n}\n.login-pf .footer-item {\n  color: white;\n}\n.footer-item {\n  color: var(--dao-gray-010);\n  text-decoration: none;\n}\n.footer-item + .footer-item {\n  margin-left: 8px;\n}\n.gongan-icon {\n  width: 18px;\n  height: 18px;\n  margin-right: 4px;\n}\n</style>\n
                                  "},{"location":"en/admin/ghippo/platform-setting/mail-server.html","title":"Mail Server","text":"

                                  AI platform will send an e-mail to the user to verify the e-mail address if the user forgets the password to ensure that the user is acting in person. In order for AI platform to be able to send email, you need to provide your mail server address first.

                                  The specific operation steps are as follows:

                                  1. Log in to AI platform as a user with admin role. Click Global Management at the bottom of the left navigation bar.

                                  2. Click Settings , select Mail Server Settings .

                                    Complete the following fields to configure the mail server:

                                    Field Description Example SMTP server address SMTP server address that can provide mail service smtp.163.com SMTP server port Port for sending mail 25 Username Name of the SMTP user test@163.com Password Password for the SMTP account 123456 Sender's email address Sender's email address test@163.com Use SSL secure connection SSL can be used to encrypt emails, thereby improving the security of information transmitted via emails, usually need to configure a certificate for the mail server Disable
                                  3. After the configuration is complete, click Save , and click Test Mail Server .

                                  4. A message indicating that the mail has been successfully sent appears in the upper right corner of the screen, indicating that the mail server has been successfully set up.

                                  "},{"location":"en/admin/ghippo/platform-setting/mail-server.html#common-problem","title":"Common problem","text":"

                                  Q: What is the reason why the user still cannot retrieve the password after the mail server is set up?

                                  Answer: The user may not have an email address or set a wrong email address; at this time, users with the admin role can find the user by username in Global Management -> Access Control , and set it as The user sets a new login password.

                                  If the mail server is not connected, please check whether the mail server address, username and password are correct.

                                  "},{"location":"en/admin/ghippo/platform-setting/security.html","title":"Security Policy","text":"

                                  AI platform offers robust security measures, including password policies and access control for the graphical interface.

                                  "},{"location":"en/admin/ghippo/platform-setting/security.html#password-policy","title":"Password Policy","text":"
                                  • New passwords must differ from the most recent historical password.
                                  • Users are required to change their passwords upon expiration.
                                  • Passwords must not match the username.
                                  • Passwords cannot be the same as the user's email address.
                                  • Customizable password rules.
                                  • Customizable minimum password length.
                                  "},{"location":"en/admin/ghippo/platform-setting/security.html#access-control-policy","title":"Access Control Policy","text":"
                                  • Session Timeout Policy: Users will be automatically logged out after a period of inactivity lasting x hours.
                                  • Account Lockout Policy: Accounts will be locked after multiple failed login attempts within a specified time frame.
                                  • Login/Logout Policy: Users will be logged out when closing the browser.

                                  To configure the password and access control policies, navigate to global management, then click Settings -> Security Policy in the left navigation bar.

                                  "},{"location":"en/admin/ghippo/report-billing/index.html","title":"Operation Management","text":"

                                  Operation Management provides a visual representation of the total usage and utilization rates of CPU, memory, storage and GPU across various dimensions such as cluster, node, namespace, pod, and workspace within a specified time range on the platform. It also automatically calculates platform consumption information based on usage, usage time, and unit price. By default, the module enables all report statistics, but platform administrators can manually enable or disable individual reports. After enabling or disabling, the platform will start or stop collecting report data within a maximum of 20 minutes. Previously collected data will still be displayed normally. Operation Management data can be retained on the platform for up to 365 days. Statistical data exceeding this retention period will be automatically deleted. You can also download reports in CSV or Excel format for further statistics and analysis.

                                  Operation Management is available only for the Standard Edition and above. It is not supported in the Community Edition.

                                  You need to install or upgrade the Operations Management module first, and then you can experience report management and billing metering.

                                  "},{"location":"en/admin/ghippo/report-billing/index.html#report-management","title":"Report Management","text":"

                                  Report Management provides data statistics for cluster, node, pods, workspace, and namespace across five dimensions: CPU Utilization, Memory Utilization, Storage Utilization, GPU Computing Power Utilization, and GPU Memory Utilization. It also integrates with the audit and alert modules to support the statistical management of audit and alert data, supporting a total of seven types of reports.

                                  "},{"location":"en/admin/ghippo/report-billing/index.html#accounting-billing","title":"Accounting & Billing","text":"

                                  Accounting & Billing provides billing statistics for clusters, nodes, pods, namespaces, and workspaces on the platform. It calculates the consumption for each resource during the statistical period based on the usage of CPU, memory, storage and GPU, as well as user-configured prices and currency units. Depending on the selected time span, such as monthly, quarterly, or annually, it can quickly calculate the actual consumption for that period.

                                  "},{"location":"en/admin/ghippo/report-billing/billing.html","title":"Accounting & Billing","text":"

                                  Accounting and billing further process the usage data of resources based on reports. You can manually set the unit price and currency unit for CPU, memory, GPU and storage. After setting, the system will automatically calculate the expenses of clusters, nodes, pods, namespaces, and workspaces over a period. You can adjust the period freely and export billing reports in Excel or Csv format after filtering by week, month, quarter, or year.

                                  "},{"location":"en/admin/ghippo/report-billing/billing.html#billing-rules-and-effective-time","title":"Billing Rules and Effective Time","text":"
                                  • Billing Rules: Default billing is based on the maximum value of request and usage.
                                  • Effective Time: Effective the next day, the fees incurred on that day are calculated based on the unit price and quantity obtained at midnight the next day.
                                  "},{"location":"en/admin/ghippo/report-billing/billing.html#features","title":"Features","text":"
                                  • Support customizing the billing unit for CPU, memory, storage and GPU, as well as the currency unit.
                                  • Support custom querying of billing data within a year, automatically calculating the billing situation for the selected time period.
                                  • Support exporting billing reports in CSV and Excel formats.
                                  • Support enabling/disabling individual billing reports. After enabling/disabling, the platform will start/stop collecting data within 20 minutes, and past collected data will still be displayed normally.
                                  • Support selective display of billing data for CPU, total memory, storage, GPU and total.
                                  "},{"location":"en/admin/ghippo/report-billing/billing.html#report-dimensions","title":"Report Dimensions","text":"

                                  Currently, the following reports are supported:

                                  • Cluster Billing Report: Displays the CPU billing, memory billing, storage billing, GPU billing and overall billing situation for all clusters within a certain period, as well as the number of nodes in that cluster. By clicking the number of nodes, you can quickly enter the node billing report and view the billing situation of nodes in that cluster during that time period.
                                  • Node Billing Report: Displays the CPU billing, memory billing, storage billing, GPU billing and overall billing situation for all nodes within a certain period, as well as the IP, type, and belonging cluster of nodes.
                                  • Pod Report: Displays the CPU billing, memory billing, storage billing, GPU billing and overall billing situation for all pods within a certain period, as well as the namespace, cluster, and workspace to which the pod belongs.
                                  • Workspace Billing Report: Displays the CPU billing, memory billing, storage billing, GPU billing and overall billing situation for all workspaces within a certain period, as well as the number of namespaces and pods. By clicking the number of namespaces, you can quickly enter the namespace billing report and view the billing situation of namespaces in that workspace during that time period; the same method can be used to view the billing situation of pods in that workspace during that time period.
                                  • Namespace Billing Report: Displays the CPU billing, memory billing, storage billing, GPU billing and overall billing situation for all namespaces within a certain period, as well as the number of pods, the belonging cluster, and workspace. By clicking the number of pods, you can quickly enter the pod billing report and view the billing situation of pods in that namespace during that time period.
                                  "},{"location":"en/admin/ghippo/report-billing/billing.html#operating-steps","title":"Operating Steps","text":"
                                  1. Log in to AI platform as a user with the admin role. Click Global Management -> Operations Management at the bottom of the left navigation bar.

                                  2. After entering the Operations Management , switch to different menus to view billing reports for clusters, nodes, and pods.

                                  "},{"location":"en/admin/ghippo/report-billing/report.html","title":"Report Management","text":"

                                  Report management visually displays statistical data across clusters, nodes, pods, workspaces, namespaces, audits, and alarms. This data provides a reliable foundation for platform billing and utilization optimization.

                                  "},{"location":"en/admin/ghippo/report-billing/report.html#features","title":"Features","text":"
                                  • Supports custom queries for statistical data within a year
                                  • Allows exporting reports in CSV and Excel formats
                                  • Supports enabling/disabling individual reports; once toggled, the platform will start/stop data collection within 20 minutes, but previously collected data will still be displayed.
                                  • Displays maximum, minimum, and average values for CPU utilization, memory utilization, storage utilization, and GPU memory utilization
                                  "},{"location":"en/admin/ghippo/report-billing/report.html#report-dimensions","title":"Report Dimensions","text":"

                                  Currently, the following reports are supported:

                                  • Cluster Report: Displays the maximum, minimum, and average values of CPU utilization, memory utilization, storage utilization, and GPU memory utilization for all clusters during a specific time period, as well as the number of nodes under the cluster. You can quickly access the node report by clicking on the node count and view the utilization of nodes under the cluster during that period.
                                  • Node Report: Displays the maximum, minimum, and average values of CPU utilization, memory utilization, storage utilization, and GPU memory utilization for all nodes during a specific time period, along with the node's IP, type, and affiliated cluster.
                                  • Pod Report: Shows the maximum, minimum, and average values of CPU utilization, memory utilization, storage utilization, and GPU memory utilization for all pods during a specific time period, as well as the pod's namespace, affiliated cluster, and workspace.
                                  • Workspace Report: Displays the maximum, minimum, and average values of CPU utilization, memory utilization, storage utilization, and GPU memory utilization for all workspaces during a specific time period, along with the number of namespaces and pods. You can quickly access the namespace report by clicking on the namespace count and view the utilization of namespaces under the workspace during that period; similarly, you can view the utilization of pods under the workspace.
                                  • Namespace Report: Displays the maximum, minimum, and average values of CPU utilization, memory utilization, storage utilization, and GPU memory utilization for all namespaces during a specific time period, as well as the number of pods, affiliated clusters, and workspaces. You can quickly access the pod report by clicking on the pod count and view the utilization of pods within the namespace during that period.
                                  • Audit Report: Divided into user actions and resource operations. The user action report mainly counts the number of operations by a single user during a period, including successful and failed attempts; The resource operation report mainly counts the number of operations on a type of resource by all users.
                                  • Alarm Report: Displays the number of alarms for all nodes during a specific period, including the occurrences of fatal, severe, and warning alarms.
                                  "},{"location":"en/admin/ghippo/report-billing/report.html#steps","title":"Steps","text":"
                                  1. Log in to AI platform as a user with the Admin role. Click Global Management -> Operations Management at the bottom of the left sidebar.

                                  2. After entering Operations Management, switch between different menus to view reports on clusters, nodes, and pods.

                                  "},{"location":"en/admin/ghippo/troubleshooting/ghippo01.html","title":"Unable to start istio-ingressgateway when restarting the cluster (virtual machine)?","text":"

                                  The error message is as shown in the following image:

                                  Possible cause: The jwtsUri address of the RequestAuthentication CR cannot be accessed, causing istiod to be unable to push the configuration to istio-ingressgateway (This bug can be avoided in Istio 1.15: https://github.com/istio/istio/pull/39341/).

                                  Solution:

                                  1. Backup the RequestAuthentication ghippo CR.

                                    kubectl get RequestAuthentication ghippo -n istio-system -o yaml > ghippo-ra.yaml\n
                                  2. Delete the RequestAuthentication ghippo CR.

                                    kubectl delete RequestAuthentication ghippo -n istio-system\n
                                  3. Restart Istio.

                                    kubectl rollout restart deploy/istiod -n istio-system\nkubectl rollout restart deploy/istio-ingressgateway -n istio-system\n
                                  4. Reapply the RequestAuthentication ghippo CR.

                                    kubectl apply -f ghippo-ra.yaml\n

                                    Note

                                    Before applying the RequestAuthentication ghippo CR, make sure that ghippo-apiserver and ghippo-keycloak are started correctly.

                                  "},{"location":"en/admin/ghippo/troubleshooting/ghippo02.html","title":"Login loop with error 401 or 403","text":"

                                  This issue occurs when the MySQL database connected to ghippo-keycloak encounters a failure, causing the OIDC Public keys to be reset.

                                  For Global Management version 0.11.1 and above, you can follow these steps to restore normal operation by updating the Global Management configuration file using helm .

                                  # Update helm repository\nhelm repo update ghippo\n\n# Backup ghippo parameters\nhelm get values ghippo -n ghippo-system -o yaml > ghippo-values-bak.yaml\n\n# Get the current deployed ghippo version\nversion=$(helm get notes ghippo -n ghippo-system | grep \"Chart Version\" | awk -F ': ' '{ print $2 }')\n\n# Perform the update operation to make the configuration file take effect\nhelm upgrade ghippo ghippo/ghippo \\\n-n ghippo-system \\\n-f ./ghippo-values-bak.yaml \\\n--version ${version}\n
                                  "},{"location":"en/admin/ghippo/troubleshooting/ghippo03.html","title":"Keycloak Unable to Start","text":""},{"location":"en/admin/ghippo/troubleshooting/ghippo03.html#common-issues","title":"Common Issues","text":""},{"location":"en/admin/ghippo/troubleshooting/ghippo03.html#symptoms","title":"Symptoms","text":"

                                  MySQL is ready with no errors. After installing the global management, Keycloak fails to start (more than 10 times).

                                  "},{"location":"en/admin/ghippo/troubleshooting/ghippo03.html#checklist","title":"Checklist","text":"
                                  • If the database is MySQL, check if the Keycloak database encoding is UTF8.
                                  • Check the network connection from Keycloak to the database, ensure the database resources are sufficient, including but not limited to resource limits, storage space, and physical machine resources.
                                  "},{"location":"en/admin/ghippo/troubleshooting/ghippo03.html#troubleshooting-steps","title":"Troubleshooting Steps","text":"
                                  1. Check if MySQL resource usage has reached the limit
                                  2. Check if the number of tables in the MySQL database keycloak is 95. (The number of tables may vary across different versions of Keycloak, so you can compare it with the number of tables in the Keycloak database of the same version in development or testing environments). If the number is fewer, it indicates that there may be an issue with the database table initialization (The command to check the number of tables is: show tables;).
                                  3. Delete and recreate the Keycloak database with the command CREATE DATABASE IF NOT EXISTS keycloak CHARACTER SET utf8
                                  4. Restart the Keycloak Pod to resolve the issue
                                  "},{"location":"en/admin/ghippo/troubleshooting/ghippo03.html#symptoms_1","title":"Symptoms","text":"

                                  Keycloak cannot start normally, the Keycloak pod is in the CrashLoopBackOff state, and the Keycloak log shows:

                                  "},{"location":"en/admin/ghippo/troubleshooting/ghippo03.html#checklist_1","title":"Checklist","text":"

                                  Run the following script to check the supported CPU types:

                                  cat <<\"EOF\" > detect-cpu.sh\n#!/bin/sh -eu\n\nflags=$(cat /proc/cpuinfo | grep flags | head -n 1 | cut -d: -f2)\n\nsupports_v2='awk \"/cx16/&&/lahf/&&/popcnt/&&/sse4_1/&&/sse4_2/&&/ssse3/ {found=1} END {exit !found}\"'\nsupports_v3='awk \"/avx/&&/avx2/&&/bmi1/&&/bmi2/&&/f16c/&&/fma/&&/abm/&&/movbe/&&/xsave/ {found=1} END {exit !found}\"'\nsupports_v4='awk \"/avx512f/&&/avx512bw/&&/avx512cd/&&/avx512dq/&&/avx512vl/ {found=1} END {exit !found}\"'\n\necho \"$flags\" | eval $supports_v2 || exit 2 && echo \"CPU supports x86-64-v2\"\necho \"$flags\" | eval $supports_v3 || exit 3 && echo \"CPU supports x86-64-v3\"\necho \"$flags\" | eval $supports_v4 || exit 4 && echo \"CPU supports x86-64-v4\"\nEOF\n\nchmod +x detect-cpu.sh\nsh detect-cpu.sh\n

                                  Execute the command below to check the current CPU features. If the output contains sse4_2, it indicates that your processor supports SSE 4.2.

                                  lscpu | grep sse4_2\n
                                  "},{"location":"en/admin/ghippo/troubleshooting/ghippo03.html#solution","title":"Solution","text":"

                                  You need to upgrade your virtual machine or physical machine CPU to support x86-64-v2 and above, ensuring that the x86 CPU instruction set supports SSE4.2. For details on how to upgrade, you should consult your virtual machine platform provider or your physical machine provider.

                                  For more information, see: https://github.com/keycloak/keycloak/issues/17290

                                  "},{"location":"en/admin/ghippo/troubleshooting/ghippo04.html","title":"Failure to Upgrade Global Management Separately","text":"

                                  If the upgrade fails and includes the following message, you can refer to the Offline Upgrade section to complete the installation of CRDs by following the steps for updating the ghippo crd.

                                  ensure CRDs are installed first\n
                                  "},{"location":"en/admin/ghippo/workspace/folder-permission.html","title":"Description of folder permissions","text":"

                                  Folders have permission mapping capabilities, which can map the permissions of users/groups in this folder to subfolders, workspaces and resources under it.

                                  If the user/group is Folder Admin role in this folder, it is still Folder Admin role when mapped to a subfolder, and Workspace Admin is mapped to the workspace under it; If a Namespace is bound in Workspace and Folder -> Resource Group , the user/group is also a Namespace Admin after mapping.

                                  Note

                                  The permission mapping capability of folders will not be applied to shared resources, because sharing is to share the use permissions of the cluster to multiple workspaces, rather than assigning management permissions to workspaces, so permission inheritance and role mapping will not be implemented.

                                  "},{"location":"en/admin/ghippo/workspace/folder-permission.html#use-cases","title":"Use cases","text":"

                                  Folders have hierarchical capabilities, so when folders are mapped to departments/suppliers/projects in the enterprise,

                                  • If a user/group has administrative authority (Admin) in the first-level department, the second-level, third-level, and fourth-level departments or projects under it also have administrative authority;
                                  • If a user/group has access rights (Editor) in the first-level department, the second-, third-, and fourth-level departments or projects under it also have access rights;
                                  • If a user/group has read-only permission (Viewer) in the first-level department, the second-level, third-level, and fourth-level departments or projects under it also have read-only permission.
                                  Objects Actions Folder Admin Folder Editor Folder Viewer on the folder itself view \u2713 \u2713 \u2713 Authorization \u2713 \u2717 \u2717 Modify Alias \u200b\u200b \u2713 \u2717 \u2717 To Subfolder Create \u2713 \u2717 \u2717 View \u2713 \u2713 \u2713 Authorization \u2713 \u2717 \u2717 Modify Alias \u200b\u200b \u2713 \u2717 \u2717 workspace under it create \u2713 \u2717 \u2717 View \u2713 \u2713 \u2713 Authorization \u2713 \u2717 \u2717 Modify Alias \u200b\u200b \u2713 \u2717 \u2717 Workspace under it - Resource Group View \u2713 \u2713 \u2713 resource binding \u2713 \u2717 \u2717 unbind \u2713 \u2717 \u2717 Workspaces under it - Shared Resources View \u2713 \u2713 \u2713 New share \u2713 \u2717 \u2717 Unshare \u2713 \u2717 \u2717 Resource Quota \u2713 \u2717 \u2717"},{"location":"en/admin/ghippo/workspace/folders.html","title":"Create/Delete Folders","text":"

                                  Folders have the capability to map permissions, allowing users/user groups to have their permissions in the folder mapped to its sub-folders, workspaces, and resources.

                                  Follow the steps below to create a folder:

                                  1. Log in to AI platform with a user account having the admin/folder admin role. Click Global Management -> Workspace and Folder at the bottom of the left navigation bar.

                                  2. Click the Create Folder button in the top right corner.

                                  3. Fill in the folder name, parent folder, and other information, then click OK to complete creating the folder.

                                  Tip

                                  After successful creation, the folder name will be displayed in the left tree structure, represented by different icons for workspaces and folders.

                                  Note

                                  To edit or delete a specific folder, select it and Click \u2507 on the right side.

                                  • If there are resources bound to the resource group or shared resources within the folder, the folder cannot be deleted. All resources need to be unbound before deleting.

                                  • If there are registry resources accessed by the microservice engine module within the folder, the folder cannot be deleted. All access to the registry needs to be removed before deleting the folder.

                                  "},{"location":"en/admin/ghippo/workspace/quota.html","title":"Resource Quota","text":"

                                  Shared resources do not necessarily mean that the shared users can use the shared resources without any restrictions. Admin, Kpanda Owner, and Workspace Admin can limit the maximum usage quota of a user through the Resource Quota feature in shared resources. If no restrictions are set, it means the usage is unlimited.

                                  • CPU Request (Core)
                                  • CPU Limit (Core)
                                  • Memory Request (MB)
                                  • Memory Limit (MB)
                                  • Total Storage Request (GB)
                                  • Persistent Volume Claims (PVC)
                                  • GPU Type, Spec, Quantity (including but not limited to Nvidia, Ascend, ILLUVATAR, and other GPUs)

                                  A resource (cluster) can be shared among multiple workspaces, and a workspace can use resources from multiple shared clusters simultaneously.

                                  "},{"location":"en/admin/ghippo/workspace/quota.html#resource-groups-and-shared-resources","title":"Resource Groups and Shared Resources","text":"

                                  Cluster resources in both shared resources and resource groups are derived from Container Management. However, different effects will occur when binding a cluster to a workspace or sharing it with a workspace.

                                  1. Binding Resources

                                    Users/User groups in the workspace will have full management and usage permissions for the cluster. Workspace Admin will be mapped as Cluster Admin. Workspace Admin can access the Container Management module to manage the cluster.

                                    Note

                                    As of now, there are no Cluster Editor and Cluster Viewer roles in the Container Management module. Therefore, Workspace Editor and Workspace Viewer cannot be mapped.

                                  2. Adding Shared Resources

                                    Users/User groups in the workspace will have usage permissions for the cluster resources.

                                    Unlike resource groups, when sharing a cluster with a workspace, the roles of the users in the workspace will not be mapped to the resources. Therefore, Workspace Admin will not be mapped as Cluster Admin.

                                  This section demonstrates three scenarios related to resource quotas.

                                  "},{"location":"en/admin/ghippo/workspace/quota.html#create-namespaces","title":"Create Namespaces","text":"

                                  Creating a namespace involves resource quotas.

                                  1. Add a shared cluster to workspace ws01 .

                                  2. Select workspace ws01 and the shared cluster in Workbench, and create a namespace ns01 .

                                    • If no resource quotas are set in the shared cluster, there is no need to set resource quotas when creating the namespace.
                                    • If resource quotas are set in the shared cluster (e.g., CPU Request = 100 cores), the CPU request for the namespace must be less than or equal to 100 cores (CPU Request \u2264 100 core) for successful creation.
                                  "},{"location":"en/admin/ghippo/workspace/quota.html#bind-namespace-to-workspace","title":"Bind Namespace to Workspace","text":"

                                  Prerequisite: Workspace ws01 has added a shared cluster, and the operator has the Workspace Admin + Kpanda Owner or Admin role.

                                  The two methods of binding have the same effect.

                                  • Bind the created namespace ns01 to ws01 in Container Management.

                                    • If no resource quotas are set in the shared cluster, the namespace ns01 can be successfully bound regardless of whether resource quotas are set.
                                    • If resource quotas are set in the shared cluster (e.g., CPU Request = 100 cores), the namespace ns01 must meet the requirement of CPU requests less than or equal to 100 cores (CPU Request \u2264 100 core) for successful binding.
                                  • Bind the namespace ns01 to ws01 in Global Management.

                                    • If no resource quotas are set in the shared cluster, the namespace ns01 can be successfully bound regardless of whether resource quotas are set.
                                    • If resource quotas are set in the shared cluster (e.g., CPU Request = 100 cores), the namespace ns01 must meet the requirement of CPU requests less than or equal to 100 cores (CPU Request \u2264 100 core) for successful binding.
                                  "},{"location":"en/admin/ghippo/workspace/quota.html#unbind-namespace-from-workspace","title":"Unbind Namespace from Workspace","text":"

                                  The two methods of unbinding have the same effect.

                                  • Unbind the namespace ns01 from workspace ws01 in Container Management.

                                    • If no resource quotas are set in the shared cluster, unbinding the namespace ns01 will not affect the resource quotas, regardless of whether resource quotas were set for the namespace.
                                    • If resource quotas (CPU Request = 100 cores) are set in the shared cluster and the namespace ns01 has its own resource quotas, unbinding will release the proper resource quota.
                                  • Unbind the namespace ns01 from workspace ws01 in Global Management.

                                    • If no resource quotas are set in the shared cluster, unbinding the namespace ns01 will not affect the resource quotas, regardless of whether resource quotas were set for the namespace.
                                    • If resource quotas (CPU Request = 100 cores) are set in the shared cluster and the namespace ns01 has its own resource quotas, unbinding will release the proper resource quota.
                                  "},{"location":"en/admin/ghippo/workspace/res-gp-and-shared-res.html","title":"Differences between Resource Groups and Shared Resources","text":"

                                  Both resource groups and shared resources support cluster binding, but they have significant differences in usage.

                                  "},{"location":"en/admin/ghippo/workspace/res-gp-and-shared-res.html#differences-in-usage-scenarios","title":"Differences in Usage Scenarios","text":"
                                  • Cluster Binding for Resource Groups: Resource groups are usually used for batch authorization. After binding a resource group to a cluster, the workspace administrator will be mapped as a cluster administrator and able to manage and use cluster resources.
                                  • Cluster Binding for Shared Resources: Shared resources are usually used for resource quotas. A typical scenario is that the platform administrator assigns a cluster to a first-level supplier, who then assigns the cluster to a second-level supplier and sets resource quotas for the second-level supplier.

                                  Note: In this scenario, the platform administrator needs to impose resource restrictions on secondary suppliers. Currently, it is not supported to limit the cluster quota of secondary suppliers by the primary supplier.

                                  "},{"location":"en/admin/ghippo/workspace/res-gp-and-shared-res.html#differences-in-cluster-quota-usage","title":"Differences in Cluster Quota Usage","text":"
                                  • Cluster Binding for Resource Groups: The workspace administrator is mapped as the administrator of the cluster and is equivalent to being granted the Cluster Admin role in Container Management-Permission Management. They can have unrestricted access to cluster resources, manage important content such as management nodes, and cannot be subject to resource quotas.
                                  • Cluster Binding for Shared Resources: The workspace administrator can only use the quota in the cluster to create namespaces in the Workbench and does not have cluster management permissions. If the workspace is restricted by a quota, the workspace administrator can only create and use namespaces within the quota range.
                                  "},{"location":"en/admin/ghippo/workspace/res-gp-and-shared-res.html#differences-in-resource-types","title":"Differences in Resource Types","text":"
                                  • Resource Groups: Can bind to clusters, cluster-namespaces, multiclouds, multicloud namespaces, meshs, and mesh-namespaces.
                                  • Shared Resources: Can only bind to clusters.
                                  "},{"location":"en/admin/ghippo/workspace/res-gp-and-shared-res.html#similarities-between-resource-groups-and-shared-resources","title":"Similarities between Resource Groups and Shared Resources","text":"

                                  After binding to a cluster, both resource groups and shared resources can go to the Workbench to create namespaces, which will be automatically bound to the workspace.

                                  "},{"location":"en/admin/ghippo/workspace/workspace.html","title":"Creating/Deleting Workspaces","text":"

                                  A workspace is a resource category that represents a hierarchical relationship of resources. A workspace can contain resources such as clusters, namespaces, and registries. Typically, each workspace corresponds to a project and different resources can be allocated, and different users and user groups can be assigned to each workspace.

                                  Follow the steps below to create a workspace:

                                  1. Log in to AI platform with a user account having the admin/folder admin role. Click Global Management -> Workspace and Folder at the bottom of the left navigation bar.

                                  2. Click the Create Workspace button in the top right corner.

                                  3. Fill in the workspace name, folder assignment, and other information, then click OK to complete creating the workspace.

                                  Tip

                                  After successful creation, the workspace name will be displayed in the left tree structure, represented by different icons for folders and workspaces.

                                  Note

                                  To edit or delete a specific workspace or folder, select it and click ... on the right side.

                                  • If resource groups and shared resources have resources under the workspace, the workspace cannot be deleted. All resources need to be unbound before deletion of the workspace.

                                  • If Microservices Engine has Integrated Registry under the workspace, the workspace cannot be deleted. Integrated Registry needs to be removed before deletion of the workspace.

                                  • If Container Registry has Registry Space or Integrated Registry under the workspace, the workspace cannot be deleted. Registry Space needs to be removed, and Integrated Registry needs to be deleted before deletion of the workspace.

                                  "},{"location":"en/admin/ghippo/workspace/ws-folder.html","title":"Workspace and Folder","text":"

                                  Workspace and Folder is a feature that provides resource isolation and grouping, addressing issues related to unified authorization, resource grouping, and resource quotas.

                                  Workspace and Folder involves two concepts: workspaces and folders.

                                  "},{"location":"en/admin/ghippo/workspace/ws-folder.html#workspaces","title":"Workspaces","text":"

                                  Workspaces allow the management of resources through Authorization , Resource Group , and Shared Resource , enabling users (and user groups) to share resources within the workspace.

                                  • Resources

                                    Resources are at the lowest level of the hierarchy in the resource management module. They include clusters, namespaces, pipelines, gateways, and more. All these resources can only have workspaces as their parent level. Workspaces act as containers for grouping resources.

                                  • Workspace

                                    A workspace usually refers to a project or environment, and the resources in each workspace are logically isolated from those in other workspaces. You can grant users (groups of users) different access rights to the same set of resources through authorization in the workspace.

                                    Workspaces are at the first level, counting from the bottom of the hierarchy, and contain resources. All resources except shared resources have one and only one parent. All workspaces also have one and only one parent folder.

                                    Resources are grouped by workspace, and there are two grouping modes in workspace, namely Resource Group and Shared Resource .

                                  • Resource group

                                    A resource can only be added to one resource group, and resource groups correspond to workspaces one by one. After a resource is added to a resource group, Workspace Admin will obtain the management authority of the resource, which is equivalent to the owner of the resource.

                                  • Share resource

                                    For shared resources, multiple workspaces can share one or more resources. Resource owners can choose to share their own resources with the workspace. Generally, when sharing, the resource owner will limit the amount of resources that can be used by the shared workspace. After resources are shared, Workspace Admin only has resource usage rights under the resource limit, and cannot manage resources or adjust the amount of resources that can be used by the workspace.

                                    At the same time, shared resources also have certain requirements for the resources themselves. Only Cluster (cluster) resources can be shared. Cluster Admin can share Cluster resources to different workspaces, and limit the use of workspaces on this Cluster.

                                    Workspace Admin can create multiple Namespaces within the resource quota, but the sum of the resource quotas of the Namespaces cannot exceed the resource quota of the Cluster in the workspace. For Kubernetes resources, the only resource type that can be shared currently is Cluster.

                                  "},{"location":"en/admin/ghippo/workspace/ws-folder.html#folder","title":"Folder","text":"

                                  Folders can be used to build enterprise business hierarchy relationships.

                                  • Folders are a further grouping mechanism based on workspaces and have a hierarchical structure. A folder can contain workspaces, other folders, or a combination of both, forming a tree-like organizational relationship.

                                  • Folders allow you to map your business hierarchy and group workspaces by department. Folders are not directly linked to resources, but indirectly achieve resource grouping through workspaces.

                                  • A folder has one and only one parent folder, and the root folder is the highest level of the hierarchy. The root folder has no parent, and folders and workspaces are attached to the root folder.

                                  In addition, users (groups) in folders can inherit permissions from their parents through a hierarchical structure. The permissions of the user in the hierarchical structure come from the combination of the permissions of the current level and the permissions inherited from its parents. The permissions are additive and there is no mutual exclusion.

                                  "},{"location":"en/admin/ghippo/workspace/ws-permission.html","title":"Description of workspace permissions","text":"

                                  The workspace has permission mapping and resource isolation capabilities, and can map the permissions of users/groups in the workspace to the resources under it. If the user/group has the Workspace Admin role in the workspace and the resource Namespace is bound to the workspace-resource group, the user/group will become Namespace Admin after mapping.

                                  Note

                                  The permission mapping capability of the workspace will not be applied to shared resources, because sharing is to share the cluster usage permissions to multiple workspaces, rather than assigning management permissions to the workspaces, so permission inheritance and role mapping will not be implemented.

                                  "},{"location":"en/admin/ghippo/workspace/ws-permission.html#use-cases","title":"Use cases","text":"

                                  Resource isolation is achieved by binding resources to different workspaces. Therefore, resources can be flexibly allocated to each workspace (tenant) with the help of permission mapping, resource isolation, and resource sharing capabilities.

                                  Generally applicable to the following two use cases:

                                  • Cluster one-to-one

                                    Ordinary Cluster Department/Tenant (Workspace) Purpose Cluster 01 A Administration and Usage Cluster 02 B Administration and Usage
                                  • Cluster one-to-many

                                    Cluster Department/Tenant (Workspace) Resource Quota Cluster 01 A 100 core CPU B 50-core CPU
                                  "},{"location":"en/admin/ghippo/workspace/ws-permission.html#permission-description","title":"Permission description","text":"Action Objects Operations Workspace Admin Workspace Editor Workspace Viewer itself view \u2713 \u2713 \u2713 - Authorization \u2713 \u2717 \u2717 - Modify Alias \u2713 \u2713 \u2717 Resource Group View \u2713 \u2713 \u2713 - resource binding \u2713 \u2717 \u2717 - unbind \u2713 \u2717 \u2717 Shared Resources View \u2713 \u2713 \u2713 - Add Share \u2713 \u2717 \u2717 - Unshare \u2713 \u2717 \u2717 - Resource Quota \u2713 \u2717 \u2717 - Using Shared Resources 1 \u2713 \u2717 \u2717
                                  1. Authorized users can go to modules such as workbench, microservice engine, middleware, multicloud orchestration, and service mesh to use resources in the workspace. For the operation scope of the roles of Workspace Admin, Workspace Editor, and Workspace Viewer in each module, please refer to the permission description:

                                    • Container Management Permissions

                                    \u21a9

                                  "},{"location":"en/admin/ghippo/workspace/wsbind-permission.html","title":"Resource Binding Permission Instructions","text":"

                                  If a user John (\"John\" represents any user who is required to bind resources) has the Workspace Admin role assigned or has been granted proper permissions through a custom role, which includes the Workspace's \"Resource Binding\" Permissions, and wants to bind a specific cluster or namespace to the workspace.

                                  To bind cluster/namespace resources to a workspace, not only the workspace's \"Resource Binding\" permissions are required, but also the permissions of Cluster Admin.

                                  "},{"location":"en/admin/ghippo/workspace/wsbind-permission.html#granting-authorization-to-john","title":"Granting Authorization to John","text":"
                                  1. Using the Platform Admin Role, grant John the role of Workspace Admin on the Workspace -> Authorization page.

                                  2. Then, on the Container Management -> Permissions page, authorize John as a Cluster Admin by Add Permission.

                                  "},{"location":"en/admin/ghippo/workspace/wsbind-permission.html#binding-to-workspace","title":"Binding to Workspace","text":"

                                  Using John's account to log in to AI platform, on the Container Management -> Clusters page, John can bind the specified cluster to his own workspace by using the Bind Workspace button.

                                  Note

                                  John can only bind clusters or namespaces to a specific workspace in the Container Management module, and cannot perform this operation in the Global Management module.

                                  To bind a namespace to a workspace, you must have at least Workspace Admin and Cluster Admin permissions.

                                  "},{"location":"en/admin/host/createhost.html","title":"Create and Start a Cloud Host","text":"

                                  After the user completes registration and is assigned a workspace, namespace, and resources, they can create and start a cloud host.

                                  "},{"location":"en/admin/host/createhost.html#prerequisites","title":"Prerequisites","text":"
                                  • AI platform installed
                                  • User has successfully registered
                                  • Workspace has been bound to the user
                                  • Resources have been allocated to the workspace
                                  "},{"location":"en/admin/host/createhost.html#steps","title":"Steps","text":"
                                  1. User logs into the AI platform.
                                  2. Click Create Cloud Host -> Create from Template

                                  3. After defining all configurations for the cloud host, click Next

                                    Basic ConfigurationTemplate ConfigurationStorage and Network

                                  4. After configuring the root password or SSH key, click Confirm

                                  5. Return to the host list and wait for the status to change to Running. After that, you can start the host by clicking the \u2507 on the right side.

                                  Next step: Use the Cloud Host

                                  "},{"location":"en/admin/host/usehost.html","title":"Using Cloud Host","text":"

                                  After creating and starting the cloud host, users can begin using it.

                                  "},{"location":"en/admin/host/usehost.html#prerequisites","title":"Prerequisites","text":"
                                  • AI platform is installed
                                  • User has created and started a cloud host
                                  "},{"location":"en/admin/host/usehost.html#steps-to-follow","title":"Steps to Follow","text":"
                                  1. Log in to the AI platform as an administrator.
                                  2. Navigate to Container Management -> Container Network -> Services, click the service name to enter the service details page, and click Update at the top right corner.

                                  3. Change the port range to 30900-30999, ensuring there are no conflicts.

                                  4. Log in to the AI platform as an end user, navigate to the proper service, and check the access port.

                                  5. Use an SSH client to log in to the cloud host from the external network.

                                  6. At this point, you can perform various operations on the cloud host.

                                  Next step: Cloud Resource Sharing: Quota Management

                                  "},{"location":"en/admin/insight/alert-center/index.html","title":"Alert Center","text":"

                                  The Alert Center is an important feature provided by AI platform that allows users to easily view all active and historical alerts by cluster and namespace through a graphical interface, and search alerts based on severity level (critical, warning, info).

                                  All alerts are triggered based on the threshold conditions set in the preset alert rules. In AI platform, some global alert policies are built-in, but users can also create or delete alert policies at any time, and set thresholds for the following metrics:

                                  • CPU usage
                                  • Memory usage
                                  • Disk usage
                                  • Disk reads per second
                                  • Disk writes per second
                                  • Cluster disk read throughput
                                  • Cluster disk write throughput
                                  • Network send rate
                                  • Network receive rate

                                  Users can also add labels and annotations to alert rules. Alert rules can be classified as active or expired, and certain rules can be enabled/disabled to achieve silent alerts.

                                  When the threshold condition is met, users can configure how they want to be notified, including email, DingTalk, WeCom, webhook, and SMS notifications. All notification message templates can be customized and all messages are sent at specified intervals.

                                  In addition, the Alert Center also supports sending alert messages to designated users through short message services provided by Alibaba Cloud, Tencent Cloud, and more platforms that will be added soon, enabling multiple ways of alert notification.

                                  AI platform Alert Center is a powerful alert management platform that helps users quickly detect and resolve problems in the cluster, improve business stability and availability, and facilitate cluster inspection and troubleshooting.

                                  "},{"location":"en/admin/insight/alert-center/alert-policy.html","title":"Alert Policies","text":"

                                  In addition to the built-in alert policies, AI platform allows users to create custom alert policies. Each alert policy is a collection of alert rules that can be set for clusters, nodes, and workloads. When an alert object reaches the threshold set by any of the rules in the policy, an alert is automatically triggered and a notification is sent.

                                  Taking the built-in alerts as an example, click the first alert policy alertmanager.rules .

                                  You can see that some alert rules have been set under it. You can add more rules under this policy, or edit or delete them at any time. You can also view the historical and active alerts related to this alert policy and edit the notification configuration.

                                  "},{"location":"en/admin/insight/alert-center/alert-policy.html#create-alert-policies","title":"Create Alert Policies","text":"
                                  1. Select Alert Center -> Alert Policies , and click the Create Alert Policy button.

                                  2. Fill in the basic information, select one or more clusters, nodes, or workloads as the alert objects, and click Next .

                                  3. The list must have at least one rule. If the list is empty, please Add Rule .

                                    Create an alert rule in the pop-up window, fill in the parameters, and click OK .

                                    • Template rules: Pre-defined basic metrics that can monitor CPU, memory, disk, and network.
                                    • PromQL rules: Input a PromQL expression, please query Prometheus expressions.
                                    • Duration: After the alert is triggered and the duration reaches the set value, the alert policy will become a triggered state.
                                    • Alert level: Including emergency, warning, and information levels.
                                    • Advanced settings: Custom tags and annotations.
                                  4. After clicking Next , configure notifications.

                                  5. After the configuration is complete, click the OK button to return to the Alert Policy list.

                                  Tip

                                  The newly created alert policy is in the Not Triggered state. Once the threshold conditions and duration specified in the rules are met, it will change to the Triggered state.

                                  "},{"location":"en/admin/insight/alert-center/alert-policy.html#create-log-rules","title":"Create Log Rules","text":"

                                  After filling in the basic information, click Add Rule and select Log Rule as the rule type.

                                  Creating log rules is supported only when the resource object is selected as a node or workload.

                                  Field Explanation:

                                  • Filter Condition : Field used to query log content, supports four filtering conditions: AND, OR, regular expression matching, and fuzzy matching.
                                  • Condition : Based on the filter condition, enter keywords or matching conditions.
                                  • Time Range : Time range for log queries.
                                  • Threshold Condition : Enter the alert threshold value in the input box. When the set threshold is reached, an alert will be triggered. Supported comparison operators are: >, \u2265, =, \u2264, <.
                                  • Alert Level : Select the alert level to indicate the severity of the alert.
                                  "},{"location":"en/admin/insight/alert-center/alert-policy.html#create-event-rules","title":"Create Event Rules","text":"

                                  After filling in the basic information, click Add Rule and select Event Rule as the rule type.

                                  Creating event rules is supported only when the resource object is selected as a workload.

                                  Field Explanation:

                                  • Event Rule : Only supports selecting the workload as the resource object.
                                  • Event Reason : Different event reasons for different types of workloads, where the event reasons are combined with \"AND\" relationship.
                                  • Time Range : Detect data generated within this time range. If the threshold condition is reached, an alert event will be triggered.
                                  • Threshold Condition : When the generated events reach the set threshold, an alert event will be triggered.
                                  • Trend Chart : By default, it queries the trend of event changes within the last 10 minutes. The value at each point represents the total number of occurrences within a certain period of time (time range) from the current time point to a previous time.
                                  "},{"location":"en/admin/insight/alert-center/alert-policy.html#other-operations","title":"Other Operations","text":"

                                  Click \u2507 at the right side of the list, then choose Delete from the pop-up menu to delete an alert policy. By clicking on the policy name, you can enter the policy details where you can add, edit, or delete the alert rules under it.

                                  Warning

                                  Deleted alert strategies will be permanently removed, so please proceed with caution.

                                  "},{"location":"en/admin/insight/alert-center/alert-template.html","title":"Alert Template","text":"

                                  The Alert template allows platform administrators to create Alert templates and rules, and business units can directly use Alert templates to create Alert policies. This feature can reduce the management of Alert rules by business personnel and allow for modification of Alert thresholds based on actual environment conditions.

                                  "},{"location":"en/admin/insight/alert-center/alert-template.html#create-alert-template","title":"Create Alert Template","text":"
                                  1. In the navigation bar, select Alert -> Alert Policy, and click Alert Template at the top.

                                  2. Click Create Alert Template, and set the name, description, and other information for the Alert template.

                                    Parameter Description Template Name The name can only contain lowercase letters, numbers, and hyphens (-), must start and end with a lowercase letter or number, and can be up to 63 characters long. Description The description can contain any characters and can be up to 256 characters long. Resource Type Used to specify the matching type of the Alert template. Alert Rule Supports pre-defined multiple Alert rules, including template rules and PromQL rules.
                                  3. Click OK to complete the creation and return to the Alert template list. Click the template name to view the template details.

                                  "},{"location":"en/admin/insight/alert-center/alert-template.html#edit-alert-template","title":"Edit Alert Template","text":"

                                  Click \u2507 next to the target rule, then click Edit to enter the editing page for the suppression rule.

                                  "},{"location":"en/admin/insight/alert-center/alert-template.html#delete-alert-template","title":"Delete Alert Template","text":"

                                  Click \u2507 next to the target template, then click Delete. Enter the name of the Alert template in the input box to confirm deletion.

                                  "},{"location":"en/admin/insight/alert-center/inhibition.html","title":"Alert Inhibition","text":"

                                  Alert Inhibition is mainly a mechanism for temporarily hiding or reducing the priority of alerts that do not need immediate attention. The purpose of this feature is to reduce unnecessary alert information that may disturb operations personnel, allowing them to focus on more critical issues.

                                  Alert inhibition recognizes and ignores certain alerts by defining a set of rules to deal with specific conditions. There are mainly the following conditions:

                                  • Parent-child inhibition: when a parent alert (for example, a crash on a node) is triggered, all child alerts aroused by it (for example, a crash on a container running on that node) are inhibited.
                                  • Similar alert inhibition: When alerts have the same characteristics (for example, the same problem on the same instance), multiple alerts are inhibited.
                                  "},{"location":"en/admin/insight/alert-center/inhibition.html#create-inhibition","title":"Create Inhibition","text":"
                                  1. In the left navigation bar, select Alert -> Noise Reduction, and click Inhibition at the top.

                                  2. Click Create Inhibition, and set the name and rules for the inhibition.

                                    Note

                                    The problem of avoiding multiple similar or related alerts that may be triggered by the same issue is achieved by defining a set of rules to identify and ignore certain alerts through Rule Details and Alert Details.

                                    Parameter Description Name The name can only contain lowercase letters, numbers, and hyphens (-), must start and end with a lowercase letter or number, and can be up to 63 characters long. Description The description can contain any characters and can be up to 256 characters long. Cluster The cluster where the inhibition rule applies. Namespace The namespace where the inhibition rule applies. Source Alert Matching alerts by label conditions. It compares alerts that meet all label conditions with those that meet inhibition conditions, and alerts that do not meet inhibition conditions will be sent to the user as usual. Value range explanation: - Alert Level: The level of metric or event alerts, can be set as: Critical, Major, Minor. - Resource Type: The resource type specific for the alert object, can be set as: Cluster, Node, StatefulSet, Deployment, DaemonSet, Pod. - Labels: Alert identification attributes, consisting of label name and label value, supports user-defined values. Inhibition Specifies the matching conditions for the target alert (the alert to be inhibited). Alerts that meet all the conditions will no longer be sent to the user. Equal Specifies the list of labels to compare to determine if the source alert and target alert match. Inhibition is triggered only when the values of the labels specified in equal are exactly the same in the source and target alerts. The equal field is optional. If the equal field is omitted, all labels are used for matching.
                                  3. Click OK to complete the creation and return to Inhibition list. Click the inhibition rule name to view the rule details.

                                  "},{"location":"en/admin/insight/alert-center/inhibition.html#view-rule-details","title":"View Rule Details","text":"

                                  In the left navigation bar, select Alert -> Alert Policy, and click the policy name to view the rule details.

                                  !!! note\n\n    You can add cuntom tags when adding rules.\n
                                  "},{"location":"en/admin/insight/alert-center/inhibition.html#view-alert-details","title":"View Alert Details","text":"

                                  In the left navigation bar, select Alert -> Alerts, and click the policy name to view details.

                                  !!! note\n\n    Alert details show information and settings for creating inhibitions.\n
                                  "},{"location":"en/admin/insight/alert-center/inhibition.html#edit-inhibition-rule","title":"Edit Inhibition Rule","text":"

                                  Click \u2507 next to the target rule, then click Edit to enter the editing page for the inhibition rule.

                                  "},{"location":"en/admin/insight/alert-center/inhibition.html#delete-inhibition-rule","title":"Delete Inhibition Rule","text":"

                                  Click \u2507 next to the target rule, then click Delete. Enter the name of the inhibition rule in the input box to confirm deletion.

                                  "},{"location":"en/admin/insight/alert-center/message.html","title":"Notification Settings","text":"

                                  On the Notification Settings page, you can configure how to send messages to users through email, WeCom, DingTalk, Webhook, and SMS.

                                  "},{"location":"en/admin/insight/alert-center/message.html#email-group","title":"Email Group","text":"
                                  1. After entering Insight , click Alert Center -> Notification Settings in the left navigation bar. By default, the email notification object is selected. Click Add email group and add one or more email addresses.

                                  2. Multiple email addresses can be added.

                                  3. After the configuration is complete, the notification list will automatically return. Click \u2507 on the right side of the list to edit or delete the email group.

                                  "},{"location":"en/admin/insight/alert-center/message.html#wecom","title":"WeCom","text":"
                                  1. In the left navigation bar, click Alert Center -> Notification Settings -> WeCom . Click Add Group Robot and add one or more group robots.

                                    For the URL of the WeCom group robot, please refer to the official document of WeCom: How to use group robots.

                                  2. After the configuration is complete, the notification list will automatically return. Click \u2507 on the right side of the list, select Send Test Information , and you can also edit or delete the group robot.

                                  "},{"location":"en/admin/insight/alert-center/message.html#dingtalk","title":"DingTalk","text":"
                                  1. In the left navigation bar, click Alert Center -> Notification Settings -> DingTalk . Click Add Group Robot and add one or more group robots.

                                    For the URL of the DingTalk group robot, please refer to the official document of DingTalk: Custom Robot Access.

                                  2. After the configuration is complete, the notification list will automatically return. Click \u2507 on the right side of the list, select Send Test Information , and you can also edit or delete the group robot.

                                  "},{"location":"en/admin/insight/alert-center/message.html#lark","title":"Lark","text":"
                                  1. In the left navigation bar, click Alert Center -> Notification Settings -> Lark . Click Add Group Bot and add one or more group bots.

                                    Note

                                    When signature verification is required in Lark's group bot, you need to fill in the specific signature key when enabling notifications. Refer to Customizing Bot User Guide.

                                  2. After configuration, you will be automatically redirected to the list page. Click \u2507 on the right side of the list and select Send Test Message . You can edit or delete group bots.

                                  "},{"location":"en/admin/insight/alert-center/message.html#webhook","title":"Webhook","text":"
                                  1. In the left navigation bar, click Alert Center -> Notification Settings -> Webhook . Click New Webhook and add one or more Webhooks.

                                    For the Webhook URL and more configuration methods, please refer to the webhook document.

                                  2. After the configuration is complete, the notification list will automatically return. Click \u2507 on the right side of the list, select Send Test Information , and you can also edit or delete the Webhook.

                                  "},{"location":"en/admin/insight/alert-center/message.html#message","title":"Message","text":"

                                  Note

                                  Alert messages are sent to the personal Message sector and notifications can be viewed by clicking \ud83d\udd14 at the top.

                                  1. In the left navigation bar, click Alert Center -> Notification Settings -> Message\uff0cclick Create Message .

                                    You can add and notify multiple users for a message.

                                  2. After configuration, you will be automatically redirected to the list page. Click \u2507 on the right side of the list and select Send Test Message .

                                  "},{"location":"en/admin/insight/alert-center/message.html#sms-group","title":"SMS Group","text":"
                                  1. In the left navigation bar, click Alert Center -> Notification Settings -> SMS . Click Add SMS Group and add one or more SMS groups.

                                  2. Enter the name, the object receiving the message, phone number, and notification server in the pop-up window.

                                    The notification server needs to be created in advance under Notification Settings -> Notification Server . Currently, two cloud servers, Alibaba Cloud and Tencent Cloud, are supported. Please refer to your own cloud server information for the specific configuration parameters.

                                  3. After the SMS group is successfully added, the notification list will automatically return. Click \u2507 on the right side of the list to edit or delete the SMS group.

                                  "},{"location":"en/admin/insight/alert-center/msg-template.html","title":"Message Templates","text":"

                                  The message template feature supports customizing the content of message templates and can notify specified objects in the form of email, WeCom, DingTalk, Webhook, and SMS.

                                  "},{"location":"en/admin/insight/alert-center/msg-template.html#creating-a-message-template","title":"Creating a Message Template","text":"
                                  1. In the left navigation bar, select Alert -> Message Template .

                                    Insight comes with two default built-in templates in both Chinese and English for user convenience.

                                  2. Fill in the template content.

                                  Info

                                  Observability comes with predefined message templates. If you need to define the content of the templates, refer to Configure Notification Templates.

                                  "},{"location":"en/admin/insight/alert-center/msg-template.html#message-template-details","title":"Message Template Details","text":"

                                  Click the name of a message template to view the details of the message template in the right slider.

                                  Parameters Variable Description ruleName {{ .Labels.alertname }} The name of the rule that triggered the alert groupName {{ .Labels.alertgroup }} The name of the alert policy to which the alert rule belongs severity {{ .Labels.severity }} The level of the alert that was triggered cluster {{ .Labels.cluster }} The cluster where the resource that triggered the alert is located namespace {{ .Labels.namespace }} The namespace where the resource that triggered the alert is located node {{ .Labels.node }} The node where the resource that triggered the alert is located targetType {{ .Labels.target_type }} The resource type of the alert target target {{ .Labels.target }} The name of the object that triggered the alert value {{ .Annotations.value }} The metric value at the time the alert notification was triggered startsAt {{ .StartsAt }} The time when the alert started to occur endsAt {{ .EndsAt }} The time when the alert ended description {{ .Annotations.description }} A detailed description of the alert labels {{ for .labels }} {{ end }} All labels of the alert use the for function to iterate through the labels list to get all label contents."},{"location":"en/admin/insight/alert-center/msg-template.html#editing-or-deleting-a-message-template","title":"Editing or Deleting a Message Template","text":"

                                  Click \u2507 on the right side of the list and select Edit or Delete from the pop-up menu to modify or delete the message template.

                                  Warning

                                  Once a template is deleted, it cannot be recovered, so please use caution when deleting templates.

                                  "},{"location":"en/admin/insight/alert-center/silent.html","title":"Alert Silence","text":"

                                  Alert silence is a feature that allows alerts meeting certain criteria to be temporarily disabled from sending notifications within a specific time range. This feature helps operations personnel avoid receiving too many noisy alerts during certain operations or events, while also allowing for more precise handling of real issues that need to be addressed.

                                  On the Alert Silence page, you can see two tabs: Active Rule and Expired Rule. The former presents the rules currently in effect, while the latter presents those that were defined in the past but have now expired (or have been deleted by the user).

                                  "},{"location":"en/admin/insight/alert-center/silent.html#creating-a-silent-rule","title":"Creating a Silent Rule","text":"
                                  1. In the left navigation bar, select Alert -> Noice Reduction -> Alert Silence , and click the Create Silence Rule button.

                                  2. Fill in the parameters for the silent rule, such as cluster, namespace, tags, and time, to define the scope and effective time of the rule, and then click OK .

                                  3. Return to the rule list, and on the right side of the list, click \u2507 to edit or delete a silent rule.

                                  Through the Alert Silence feature, you can flexibly control which alerts should be ignored and when they should be effective, thereby improving operational efficiency and reducing the possibility of false alerts.

                                  "},{"location":"en/admin/insight/alert-center/sms-provider.html","title":"Configure Notification Server","text":"

                                  Insight supports SMS notifications and currently sends alert messages using integrated Alibaba Cloud and Tencent Cloud SMS services. This article explains how to configure the SMS notification server in Insight. The variables supported in the SMS signature are the default variables in the message template. As the number of SMS characters is limited, it is recommended to choose more explicit variables.

                                  For information on how to configure SMS recipients, refer to the document: Configure SMS Notification Group.

                                  "},{"location":"en/admin/insight/alert-center/sms-provider.html#procedure","title":"Procedure","text":"
                                  1. Go to Alert Center -> Notification Settings -> Notification Server .

                                  2. Click Add Notification Server .

                                    • Configure Alibaba Cloud server.

                                      To apply for Alibaba Cloud SMS service, please refer to Alibaba Cloud SMS Service.

                                      Field descriptions:

                                      • AccessKey ID : Parameter used by Alibaba Cloud to identify the user.
                                      • AccessKey Secret : Key used by Alibaba Cloud to authenticate the user. AccessKey Secret must be kept confidential.
                                      • SMS Signature : The SMS service supports creating signatures that meet the requirements according to user needs. When sending SMS, the SMS platform will add the approved SMS signature to the SMS content before sending it to the SMS recipient.
                                      • Template CODE : The SMS template is the specific content of the SMS to be sent.
                                      • Parameter Template : The SMS body template can contain variables. Users can use variables to customize the SMS content.

                                      Please refer to Alibaba Cloud Variable Specification.

                                      Note

                                      Example: The template content defined in Alibaba Cloud is: ${severity}: ${alertname} triggered at ${startat}. Refer to the configuration in the parameter template.

                                    • Configure Tencent Cloud server.

                                      To apply for Tencent Cloud SMS service, please refer to Tencent Cloud SMS.

                                      Field descriptions:

                                      • Secret ID : Parameter used by Tencent Cloud to identify the API caller.
                                      • SecretKey : Parameter used by Tencent Cloud to authenticate the API caller.
                                      • SMS Template ID : The SMS template ID automatically generated by Tencent Cloud system.
                                      • Signature Content : The SMS signature content, which is the full name or abbreviation of the actual website name defined in the Tencent Cloud SMS signature.
                                      • SdkAppId : SMS SdkAppId, the actual SdkAppId generated after adding the application in the Tencent Cloud SMS console.
                                      • Parameter Template : The SMS body template can contain variables. Users can use variables to customize the SMS content. Please refer to: Tencent Cloud Variable Specification.

                                      Note

                                      Example: The template content defined in Tencent Cloud is: {1}: {2} triggered at {3}. Refer to the configuration in the parameter template.

                                  "},{"location":"en/admin/insight/best-practice/debug-log.html","title":"Log Collection Troubleshooting Guide","text":"

                                  After installing the insight-agent in the cluster, Fluent Bit in insight-agent will collect logs in the cluster by default, including Kubernetes event logs, node logs, and container logs. Fluent Bit has already configured various log collection plugins, related filter plugins, and log output plugins. The working status of these plugins determines whether log collection is normal. Below is a dashboard for Fluent Bit that monitors the working conditions of each Fluent Bit in the cluster and the collection, processing, and export of plugin logs.

                                  1. Use AI platform platform, enter Insight , and select the Dashboard in the left navigation bar.

                                  2. Click the dashboard title Overview .

                                  3. Switch to the insight-system -> Fluent Bit dashboard.

                                  4. There are several check boxes above the Fluent Bit dashboard to select the input plugin, filter plugin, output plugin, and cluster in which it is located.

                                  "},{"location":"en/admin/insight/best-practice/debug-log.html#plugin-description","title":"Plugin Description","text":"

                                  Here are some plugins for Fluent Bit .

                                  Log Collection Plugin

                                  Input Plugin Plugin Description Collection Directory tail.kube Collect container logs /var/log/containers/*.log tail.kubeevent Collect Kubernetes event logs /var/log/containers/-kubernetes-event-exporter.log tail.syslog.dmesg Collect host dmesg logs /var/log/dmesg tail.syslog.messages Collect frequently used host logs /var/log/secure, /var/log/messages, /var/log/syslog,/var/log/auth.log syslog.syslog.RSyslog Collect RSyslog logs systemd.syslog.systemd Collect Journald daemon logs tail.audit_log.k8s Collect Kubernetes audit logs /var/log//audit/.log tail.audit_log.ghippo Collect global management audit logs /var/log/containers/_ghippo-system_audit-log.log tail.skoala-gw Collect microservice gateway logs /var/log/containers/_skoala-gw.log

                                  Log Filter Plugin

                                  Filter Plugin Plugin Description Lua.audit_log.k8s Use lua to filter Kubernetes audit logs that meet certain conditions

                                  Note

                                  There are more filter plugins than Lua.audit_log.k8s, which only introduces filters that will discard logs.

                                  Log Output Plugin

                                  Output Plugin Plugin Description es.kube.kubeevent.syslog Write Kubernetes audit logs, event logs, and syslog logs to ElasticSearch cluster forward.audit_log Send Kubernetes audit logs and global management audit logs to Global Management es.skoala Write request logs and instance logs of microservice gateway to ElasticSearch cluster"},{"location":"en/admin/insight/best-practice/debug-trace.html","title":"Trace Collection Troubleshooting Guide","text":"

                                  Before attempting to troubleshoot issues with trace data collection, you need to understand the transmission path of trace data. The following is a schematic diagram of the transmission of trace data:

                                  graph TB\n\nsdk[Language proble / SDK] --> workload[Workload cluster otel collector]\n--> otel[Global cluster otel collector]\n--> jaeger[Global cluster jaeger collector]\n--> es[Elasticsearch cluster]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill:#326ce5,stroke:#fff,stroke-width:1px,color:#fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass sdk,workload,otel,jaeger,es cluster

                                  As shown in the above figure, any transmission failure at any step will result in the inability to query trace data. If you find that there is no trace data after completing the application trace enhancement, please perform the following steps:

                                  1. Use AI platform platform, enter Insight , and select the Dashboard in the left navigation bar.

                                  2. Click the dashboard title Overview .

                                  3. Switch to the insight-system -> insight tracing debug dashboard.

                                  4. You can see that this dashboard is composed of three blocks, each responsible for monitoring the data transmission of different clusters and components. Check whether there are problems with trace data transmission through the generated time series chart.

                                    • workload opentelemetry collector
                                    • global opentelemetry collector
                                    • global jaeger collector

                                  "},{"location":"en/admin/insight/best-practice/debug-trace.html#block-introduction","title":"Block Introduction","text":"
                                  1. workload opentelemetry collector

                                    Display the opentelemetry collector in different worker clusters receiving language probe/SDK trace data and sending aggregated trace data. You can select the cluster where it is located by the Cluster selection box in the upper left corner.

                                    Note

                                    Based on these four time series charts, you can determine whether the opentelemetry collector in this cluster is running normally.

                                  2. global opentelemetry collector

                                    Display the opentelemetry collector in the Global Service Cluster receiving trace data from the worker cluster's opentelemetry collector and sending aggregated trace data.

                                    Note

                                    The opentelemetry collector in the Global Management Cluster is also responsible for sending audit logs of all worker clusters' global management module and Kubernetes audit logs (not collected by default) to the audit server component of the global management module.

                                  3. global jaeger collector

                                    Display the jaeger collector in the Global Management Cluster receiving data from the otel collector in the Global Management Cluster and sending trace data to the ElasticSearch cluster.

                                  "},{"location":"en/admin/insight/best-practice/find_root_cause.html","title":"Troubleshooting Service Issues with Insight","text":"

                                  This article serves as a guide on using Insight to identify and analyze abnormal components in AI platform and determine the root causes of component exceptions.

                                  Please note that this post assumes you have a basic understanding of Insight's product features or vision.

                                  "},{"location":"en/admin/insight/best-practice/find_root_cause.html#service-map-identifying-abnormalities-on-a-macro-level","title":"Service Map - Identifying Abnormalities on a Macro Level","text":"

                                  In enterprise microservice architectures, managing a large number of services with complex interdependencies can be challenging. Insight offers service map monitoring, allowing users to gain a high-level overview of the running microservices in the system.

                                  In the example below, you observe that the node insight-server is highlighted in red/yellow on the service map. By hovering over the node, you can see the error rate associated with it. To investigate further and understand why the error rate is not 0 , you can explore more detailed information:

                                  Alternatively, clicking on the service name at the top will take you to the service's overview UI:

                                  "},{"location":"en/admin/insight/best-practice/find_root_cause.html#service-overview-delving-into-detailed-analysis","title":"Service Overview - Delving into Detailed Analysis","text":"

                                  When it becomes necessary to analyze inbound and outbound traffic separately, you can use the filter in the upper right corner to refine the data. After applying the filter, you can observe that the service has multiple operations proper to a non-zero error rate. To investigate further, you can inspect the traces generated by these operations during a specific time period by clicking on \"View Traces\":

                                  "},{"location":"en/admin/insight/best-practice/find_root_cause.html#trace-details-identifying-and-eliminating-root-causes-of-errors","title":"Trace Details - Identifying and Eliminating Root Causes of Errors","text":"

                                  In the trace list, you can easily identify traces marked as error (circled in red in the figure above) and examine their details by clicking on the proper trace. The following figure illustrates the trace details:

                                  Within the trace diagram, you can quickly locate the last piece of data in an error state. Expanding the associated logs section reveals the cause of the request error:

                                  Following the above analysis method, you can also identify traces related to other operation errors:

                                  "},{"location":"en/admin/insight/best-practice/find_root_cause.html#lets-get-started-with-your-analysis","title":"Let's Get Started with Your Analysis!","text":""},{"location":"en/admin/insight/best-practice/insight-kafka.html","title":"Kafka + Elasticsearch Stream Architecture for Handling Large-Scale Logs","text":"

                                  As businesses grow, the amount of log data generated by applications increases significantly. To ensure that systems can properly collect and analyze massive amounts of log data, it is common practice to introduce a streaming architecture using Kafka to handle asynchronous data collection. The collected log data flows through Kafka and is consumed by proper components, which then store the data into Elasticsearch for visualization and analysis using Insight.

                                  This article will introduce two solutions:

                                  • Fluentbit + Kafka + Logstash + Elasticsearch
                                  • Fluentbit + Kafka + Vector + Elasticsearch

                                  Once we integrate Kafka into the logging system, the data flow diagram looks as follows:

                                  Both solutions share similarities but differ in the component used to consume Kafka data. To ensure compatibility with Insight's data analysis, the format of the data consumed from Kafka and written into Elasticsearch should be consistent with the data directly written by Fluentbit to Elasticsearch.

                                  Let's first see how Fluentbit writes logs to Kafka:

                                  "},{"location":"en/admin/insight/best-practice/insight-kafka.html#modifying-fluentbit-output-configuration","title":"Modifying Fluentbit Output Configuration","text":"

                                  Once the Kafka cluster is ready, we need to modify the content of the insight-system namespace's ConfigMap . We will add three Kafka outputs and comment out the original three Elasticsearch outputs:

                                  Assuming the Kafka Brokers address is: insight-kafka.insight-system.svc.cluster.local:9092

                                      [OUTPUT]\n        Name        kafka\n        Match_Regex (?:kube|syslog)\\.(.*)\n        Brokers     insight-kafka.insight-system.svc.cluster.local:9092\n        Topics      insight-logs\n        format      json\n        timestamp_key @timestamp\n        rdkafka.batch.size 65536\n        rdkafka.compression.level 6\n        rdkafka.compression.type lz4\n        rdkafka.linger.ms 0\n        rdkafka.log.connection.close false\n        rdkafka.message.max.bytes 2.097152e+06\n        rdkafka.request.required.acks 1\n    [OUTPUT]\n        Name        kafka\n        Match_Regex (?:skoala-gw)\\.(.*)\n        Brokers     insight-kafka.insight-system.svc.cluster.local:9092\n        Topics      insight-gw-skoala\n        format      json\n        timestamp_key @timestamp\n        rdkafka.batch.size 65536\n        rdkafka.compression.level 6\n        rdkafka.compression.type lz4\n        rdkafka.linger.ms 0\n        rdkafka.log.connection.close false\n        rdkafka.message.max.bytes 2.097152e+06\n        rdkafka.request.required.acks 1\n    [OUTPUT]\n        Name        kafka\n        Match_Regex (?:kubeevent)\\.(.*)\n        Brokers     insight-kafka.insight-system.svc.cluster.local:9092\n        Topics      insight-event\n        format      json\n        timestamp_key @timestamp\n        rdkafka.batch.size 65536\n        rdkafka.compression.level 6\n        rdkafka.compression.type lz4\n        rdkafka.linger.ms 0\n        rdkafka.log.connection.close false\n        rdkafka.message.max.bytes 2.097152e+06\n        rdkafka.request.required.acks 1\n

                                  Next, let's discuss the subtle differences in consuming Kafka data and writing it to Elasticsearch. As mentioned at the beginning of this article, we will explore Logstash and Vector as two ways to consume Kafka data.

                                  "},{"location":"en/admin/insight/best-practice/insight-kafka.html#consuming-kafka-and-writing-to-elasticsearch","title":"Consuming Kafka and Writing to Elasticsearch","text":"

                                  Assuming the Elasticsearch address is: https://mcamel-common-es-cluster-es-http.mcamel-system:9200

                                  "},{"location":"en/admin/insight/best-practice/insight-kafka.html#using-logstash-for-consumption","title":"Using Logstash for Consumption","text":"

                                  If you are familiar with the Logstash technology stack, you can continue using this approach.

                                  When deploying Logstash via Helm, you can add the following pipeline in the logstashPipeline section:

                                  replicas: 3\nresources:\n  requests:\n    cpu: 100m\n    memory: 1536Mi\n  limits:\n    cpu: 1000m\n    memory: 1536Mi\nlogstashConfig:\n  logstash.yml: |\n    http.host: 0.0.0.0\n    xpack.monitoring.enabled: false\nlogstashPipeline:\n  insight-event.conf: |\n    input {\n      kafka {\n        add_field => {\"kafka_topic\" => \"insight-event\"}\n        topics => [\"insight-event\"]         \n        bootstrap_servers => \"172.30.120.189:32082\" # kafka\u7684ip \u548c\u7aef\u53e3\n        enable_auto_commit => true\n        consumer_threads => 1                       # \u5bf9\u5e94 partition \u7684\u6570\u91cf\n        decorate_events => true\n        codec => \"plain\"\n      }\n    }\n\n    filter {\n      mutate { gsub => [ \"message\", \"@timestamp\", \"_@timestamp\"] }\n      json {source => \"message\"}\n      date {\n        match => [ \"_@timestamp\", \"UNIX\" ]\n        remove_field => \"_@timestamp\"\n        remove_tag => \"_timestampparsefailure\"\n      }\n      mutate {\n        remove_field => [\"event\", \"message\"]\n      }\n    }\n\n    output {\n      if [kafka_topic] == \"insight-event\" {\n        elasticsearch {\n          hosts => [\"https://172.30.120.201:32427\"] # elasticsearch \u5730\u5740\n          user => 'elastic'                         # elasticsearch \u7528\u6237\u540d\n          ssl => 'true'\n          password => '0OWj4D54GTH3xK06f9Gg01Zk'    # elasticsearch \u5bc6\u7801\n          ssl_certificate_verification => 'false'\n          action => \"create\"\n          index => \"insight-es-k8s-event-logs-alias\"\n          data_stream => \"false\"\n        }\n      }\n    }\n  insight-gw-skoala.conf: |\n    input {\n      kafka {\n        add_field => {\"kafka_topic\" => \"insight-gw-skoala\"}\n        topics => [\"insight-gw-skoala\"]         \n        bootstrap_servers => \"172.30.120.189:32082\"\n        enable_auto_commit => true\n        consumer_threads => 1\n        decorate_events => true\n        codec => \"plain\"\n      }\n    }\n\n    filter {\n      mutate { gsub => [ \"message\", \"@timestamp\", \"_@timestamp\"] }\n      json {source => \"message\"}\n      date {\n        match => [ \"_@timestamp\", \"UNIX\" ]\n        remove_field => \"_@timestamp\"\n        remove_tag => \"_timestampparsefailure\"\n      }\n      mutate {\n        remove_field => [\"event\", \"message\"]\n      }\n    }\n\n    output {\n      if [kafka_topic] == \"insight-gw-skoala\" {\n        elasticsearch {\n          hosts => [\"https://172.30.120.201:32427\"]\n          user => 'elastic'\n          ssl => 'true'\n          password => '0OWj4D54GTH3xK06f9Gg01Zk'\n          ssl_certificate_verification => 'false'\n          action => \"create\"\n          index => \"skoala-gw-alias\"\n          data_stream => \"false\"\n        }\n      }\n    }\n  insight-logs.conf: |\n    input {\n      kafka {\n        add_field => {\"kafka_topic\" => \"insight-logs\"}\n        topics => [\"insight-logs\"]         \n        bootstrap_servers => \"172.30.120.189:32082\"   \n        enable_auto_commit => true\n        consumer_threads => 1\n        decorate_events => true\n        codec => \"plain\"\n      }\n    }\n\n    filter {\n      mutate { gsub => [ \"message\", \"@timestamp\", \"_@timestamp\"] }\n      json {source => \"message\"}\n      date {\n        match => [ \"_@timestamp\", \"UNIX\" ]\n        remove_field => \"_@timestamp\"\n        remove_tag => \"_timestampparsefailure\"\n      }\n      mutate {\n        remove_field => [\"event\", \"message\"]\n      }\n    }\n\n    output {\n      if [kafka_topic] == \"insight-logs\" {\n        elasticsearch {\n          hosts => [\"https://172.30.120.201:32427\"]\n          user => 'elastic'\n          ssl => 'true'\n          password => '0OWj4D54GTH3xK06f9Gg01Zk'\n          ssl_certificate_verification => 'false'\n          action => \"create\"\n          index => \"insight-es-k8s-logs-alias\"\n          data_stream => \"false\"\n        }\n      }\n    }\n
                                  "},{"location":"en/admin/insight/best-practice/insight-kafka.html#consumption-with-vector","title":"Consumption with Vector","text":"

                                  If you are familiar with the Vector technology stack, you can continue using this approach.

                                  When deploying Vector via Helm, you can reference a ConfigMap with the following rules:

                                  metadata:\n  name: vector\napiVersion: v1\ndata:\n  aggregator.yaml: |\n    api:\n      enabled: true\n      address: '0.0.0.0:8686'\n    sources:\n      insight_logs_kafka:\n        type: kafka\n        bootstrap_servers: 'insight-kafka.insight-system.svc.cluster.local:9092'\n        group_id: consumer-group-insight\n        topics:\n          - insight-logs\n      insight_event_kafka:\n        type: kafka\n        bootstrap_servers: 'insight-kafka.insight-system.svc.cluster.local:9092'\n        group_id: consumer-group-insight\n        topics:\n          - insight-event\n      insight_gw_skoala_kafka:\n        type: kafka\n        bootstrap_servers: 'insight-kafka.insight-system.svc.cluster.local:9092'\n        group_id: consumer-group-insight\n        topics:\n          - insight-gw-skoala\n    transforms:\n      insight_logs_remap:\n        type: remap\n        inputs:\n          - insight_logs_kafka\n        source: |2\n              . = parse_json!(string!(.message))\n              .@timestamp = now()\n      insight_event_kafka_remap:\n        type: remap\n        inputs:\n          - insight_event_kafka\n          - insight_gw_skoala_kafka\n        source: |2\n              . = parse_json!(string!(.message))\n              .@timestamp = now()\n      insight_gw_skoala_kafka_remap:\n        type: remap\n        inputs:\n          - insight_gw_skoala_kafka\n        source: |2\n              . = parse_json!(string!(.message))\n              .@timestamp = now()\n    sinks:\n      insight_es_logs:\n        type: elasticsearch\n        inputs:\n          - insight_logs_remap\n        api_version: auto\n        auth:\n          strategy: basic\n          user: elastic\n          password: 8QZJ656ax3TXZqQh205l3Ee0\n        bulk:\n          index: insight-es-k8s-logs-alias-1418\n        endpoints:\n          - 'https://mcamel-common-es-cluster-es-http.mcamel-system:9200'\n        tls:\n          verify_certificate: false\n          verify_hostname: false\n      insight_es_event:\n        type: elasticsearch\n        inputs:\n          - insight_event_kafka_remap\n        api_version: auto\n        auth:\n          strategy: basic\n          user: elastic\n          password: 8QZJ656ax3TXZqQh205l3Ee0\n        bulk:\n          index: insight-es-k8s-event-logs-alias-1418\n        endpoints:\n          - 'https://mcamel-common-es-cluster-es-http.mcamel-system:9200'\n        tls:\n          verify_certificate: false\n          verify_hostname: false\n      insight_es_gw_skoala:\n        type: elasticsearch\n        inputs:\n          - insight_gw_skoala_kafka_remap\n        api_version: auto\n        auth:\n          strategy: basic\n          user: elastic\n          password: 8QZJ656ax3TXZqQh205l3Ee0\n        bulk:\n          index: skoala-gw-alias-1418\n        endpoints:\n          - 'https://mcamel-common-es-cluster-es-http.mcamel-system:9200'\n        tls:\n          verify_certificate: false\n          verify_hostname: false\n
                                  "},{"location":"en/admin/insight/best-practice/insight-kafka.html#checking-if-its-working-properly","title":"Checking if it's Working Properly","text":"

                                  You can verify if the configuration is successful by checking if there are new data in the Insight log query interface or observing an increase in the number of indices in Elasticsearch.

                                  "},{"location":"en/admin/insight/best-practice/insight-kafka.html#references","title":"References","text":"
                                  • Logstash Helm Chart
                                  • Vector Helm Chart
                                  • Vector Practices
                                  • Vector Perfomance
                                  "},{"location":"en/admin/insight/best-practice/integration_deepflow.html","title":"Integrate DeepFlow","text":"

                                  DeepFlow is an observability product based on eBPF. Its community edition has been integrated into Insight. The following is the integration process.

                                  "},{"location":"en/admin/insight/best-practice/integration_deepflow.html#prerequisites","title":"Prerequisites","text":"
                                  • Your global service cluster has installed Insight
                                  • Insight minimum version requirement is v0.23.0
                                  • Understand and meet the DeepFlow runtime permissions and kernel requirements
                                  • Storage volume is ready
                                  "},{"location":"en/admin/insight/best-practice/integration_deepflow.html#install-deepflow-and-configure-insight","title":"Install DeepFlow and Configure Insight","text":"

                                  Installing DeepFlow components requires two charts:

                                  • deepflow: includes components such as deepflow-app, deepflow-server, deepflow-clickhouse, and deepflow-agent. Generally, deepflow is deployed in the global service cluster, so it also installs deepflow-agent together.
                                  • deepflow-agent: only includes the deepflow-agent component, used to collect eBPF data and send it to deepflow-server.
                                  "},{"location":"en/admin/insight/best-practice/integration_deepflow.html#install-deepflow","title":"Install DeepFlow","text":"

                                  DeepFlow needs to be installed in the global service cluster.

                                  1. Go to the kpanda-global-cluster cluster and click Helm Apps -> Helm Charts in the left navigation bar, select community as the repository, and search for deepflow in the search box:

                                  2. Click the deepflow card to enter the details page:

                                  3. Click Install to enter the installation page:

                                  4. Most of the values have default values. Clickhouse and Mysql require applying storage volumes, and their default sizes are 10Gi. You can search for relevant configurations and modify them using the persistence keyword.

                                  5. After configuring, click OK to start the installation.

                                  "},{"location":"en/admin/insight/best-practice/integration_deepflow.html#configure-insight","title":"Configure Insight","text":"

                                  After installing DeepFlow, you also need to enable the related feature switches in Insight.

                                  1. Click ConfigMps & Keys -> ConfigMaps in the left navigation bar, search for insight-server-config in the search box, and edit it:

                                  2. In the YAML, find the eBPF Flow feature switch and enable it:

                                  3. Save the changes and restart insight-server. The Insight main page will display Network Observability :

                                  "},{"location":"en/admin/insight/best-practice/integration_deepflow.html#install-deepflow-agent","title":"Install DeepFlow Agent","text":"

                                  DeepFlow Agent is installed in the sub-cluster using the deepflow-agent chart. It is used to collect eBPF observability data from the sub-cluster and report it to the global service cluster. Similar to installing deepflow, go to Helm Apps -> Helm Charts, select community as the repository, and search for deepflow-agent in the search box. Follow the process to enter the installation page.

                                  Parameter Explanation:

                                  • DeployComponent : deployment mode, default is daemonset.
                                  • timezone : timezone, default is Asia/Shanghai.
                                  • DeepflowServerNodeIPS : addresses of the nodes where deepflow server is installed.
                                  • deepflowK8sClusterID : cluster UUID.
                                  • agentGroupID : agent group ID.
                                  • controllerPort : data reporting port of deepflow server, can be left blank, default is 30035.
                                  • clusterNAME : cluster name.

                                  After configuring, click OK to complete the installation.

                                  "},{"location":"en/admin/insight/best-practice/integration_deepflow.html#usage","title":"Usage","text":"

                                  After correctly installing DeepFlow, click Network Observability to enter the DeepFlow Grafana UI. It contains a large number of dashboards for viewing and helping analyze issues. Click DeepFlow Templates to browse all available dashboards:

                                  "},{"location":"en/admin/insight/best-practice/sw-to-otel.html","title":"Simplifying Trace Data Integration with OpenTelemetry and SkyWalking","text":"

                                  This article explains how to seamlessly integrate trace data from SkyWalking into the Insight platform, using OpenTelemetry. With zero code modification required, you can transform your existing SkyWalking trace data and leverage Insight's capabilities.

                                  "},{"location":"en/admin/insight/best-practice/sw-to-otel.html#understanding-the-code","title":"Understanding the Code","text":"

                                  To ensure compatibility with different distributed tracing implementations, OpenTelemetry provides a way to incorporate components that standardize data processing and output to various backends. While Jaeger and Zipkin are already available, we have contributed the SkyWalkingReceiver to the OpenTelemetry community. This receiver has been refined and is now suitable for use in production environments without any modifications to your application's code.

                                  Although SkyWalking and OpenTelemetry share similarities, such as using Trace to define a trace and Span to mark the smallest granularity, there are differences in certain details and implementations:

                                  SkyWalking OpenTelemetry Data Structure Span -> Segment -> Trace Span -> Trace Attribute Information Tags Attributes Application Time Logs Events Reference Relationship References Links

                                  Now, let's discuss the steps involved in converting SkyWalking Trace to OpenTelemetry Trace. The main tasks include:

                                  1. Constructing OpenTelemetry's TraceId and SpanId

                                  2. Constructing OpenTelemetry's ParentSpanId

                                  3. Retaining SkyWalking's original TraceId, SegmentId, and SpanId in OpenTelemetry Spans

                                  First, let's look at how to construct the TraceId and SpanId for OpenTelemetry. Both SkyWalking and OpenTelemetry use TraceId to connect distributed service calls and use SpanId to mark each Span, but there are significant differences in the implementation specifications:

                                  Info

                                  View GitHub for code implementation\uff1a

                                  1. Skywalking Receiver
                                  2. PR: Create skywalking component folder/structure
                                  3. PR: add Skywalking tracing receiver impl

                                  Specifically, the possible formats for SkyWalking TraceId and SegmentId are as follows:

                                  In the OpenTelemetry protocol, a Span is unique across all Traces, while in SkyWalking, a Span is only unique within each Segment. This means that to uniquely identify a Span in SkyWalking, it is necessary to combine the SegmentId and SpanId, and convert it to the SpanId in OpenTelemetry.

                                  Info

                                  View GitHub for code implementation\uff1a

                                  1. Skywalking Receiver
                                  2. PR: Fix skywalking traceid and spanid convertion

                                  Next, let's see how to construct the ParentSpanId for OpenTelemetry. Within a Segment, the ParentSpanId field in SkyWalking can be directly used to construct the ParentSpanId field in OpenTelemetry. However, when a Trace spans multiple Segments, SkyWalking uses the association information represented by ParentTraceSegmentId and ParentSpanId in the Reference. In this case, the ParentSpanId in OpenTelemetry needs to be constructed using the information in the Reference.

                                  Code implementation can be found on GitHub: Skywalking Receiver

                                  Finally, let's see how to preserve the original TraceId, SegmentId, and SpanId from SkyWalking in the OpenTelemetry Span. We carry these original information to associate the OpenTelemetry TraceId and SpanId displayed in the distributed tracing backend with the SkyWalking TraceId, SegmentId, and SpanId in the application logs. We choose to carry the original TraceId, SegmentId, and ParentSegmentId from SkyWalking to the OpenTelemetry Attributes.

                                  Info

                                  View GitHub for code implementation\uff1a

                                  1. Skywalking Receiver
                                  2. Add extra link attributes from skywalking ref

                                  After this series of conversions, we have fully transformed the SkyWalking Segment Object into an OpenTelemetry Trace, as shown in the following diagram:

                                  "},{"location":"en/admin/insight/best-practice/sw-to-otel.html#deploying-the-demo","title":"Deploying the Demo","text":"

                                  To demonstrate the complete process of collecting and displaying SkyWalking tracing data using OpenTelemetry, we will use a demo application.

                                  First, deploy the OpenTelemetry Agent and enable the following configuration to ensure compatibility with the SkyWalking protocol:

                                  # otel-agent config\nreceivers:\n  skywalking:\n    protocols:\n      grpc:\n        endpoint: 0.0.0.0:11800 # Receive trace data reported by the SkyWalking Agent\n      http: \n        endpoint: 0.0.0.0:12800 # Receive trace data reported from the front-end / nginx or other HTTP protocols\nservice: \n  pipelines: \n    traces:      \n      receivers: [skywalking]\n\n# otel-agent service yaml\nspec:\n  ports: \n    - name: sw-http\n      port: 12800    \n      protocol: TCP    \n      targetPort: 12800 \n    - name: sw-grpc     \n      port: 11800 \n      protocol: TCP  \n      targetPort: 11800\n

                                  Next, modify the connection of your business application from the SkyWalking OAP Service (e.g., oap:11800) to the OpenTelemetry Agent Service (e.g., otel-agent:11800). This will allow you to start receiving trace data from the SkyWalking probe using OpenTelemetry.

                                  To demonstrate the entire process, we will use the SkyWalking-showcase Demo. This demo utilizes the SkyWalking Agent for tracing, and after being processed by OpenTelemetry, the final results are presented using Jaeger:

                                  From the architecture diagram of the SkyWalking Showcase, we can observe that the data remains intact even after standardization by OpenTelemetry. In this trace, the request starts from app/homepage, then two requests /rcmd and /songs/top are initiated simultaneously within the app, distributed to the recommendation and songs services, and finally reach the database for querying, completing the entire request chain.

                                  Additionally, you can view the original SkyWalking Id information on the Jaeger page, which facilitates correlation with application logs:

                                  By following these steps, you can seamlessly integrate SkyWalking trace data into OpenTelemetry and leverage the capabilities of the Insight platform.

                                  "},{"location":"en/admin/insight/best-practice/tail-based-sampling.html","title":"About Trace Sampling and Configuration","text":"

                                  Using distributed tracing, you can observe how requests flow through various systems in a distributed system. Undeniably, it is very useful for understanding service connections, diagnosing latency issues, and providing many other benefits.

                                  However, if most of your requests are successful and there are no unacceptable delays or errors, do you really need all this data? Therefore, you only need to achieve the right insights through appropriate data sampling rather than a large amount or complete data.

                                  The idea behind sampling is to control the traces sent to the observability collector, thereby reducing collection costs. Different organizations have different reasons for sampling, including why they want to sample and what types of data they wish to sample. Therefore, we need to customize the sampling strategy:

                                  • Cost Management: If a large amount of telemetry data needs to be stored, it incurs higher computational and storage costs.
                                  • Focus on Interesting Traces: Different organizations prioritize different data types.
                                  • Filter Out Noise: For example, you may want to filter out health checks.

                                  It is important to use consistent terminology when discussing sampling. A Trace or Span is considered sampled or unsampled:

                                  • Sampled: A Trace or Span that is processed and stored. It is chosen by the sampler to represent the overall data, so it is considered sampled.
                                  • Unsampled: A Trace or Span that is not processed or stored. Because it was not selected by the sampler, it is considered unsampled.
                                  "},{"location":"en/admin/insight/best-practice/tail-based-sampling.html#what-are-the-sampling-options","title":"What Are the Sampling Options?","text":""},{"location":"en/admin/insight/best-practice/tail-based-sampling.html#head-sampling","title":"Head Sampling","text":"

                                  Head sampling is a sampling technique used to make a sampling decision as early as possible. A decision to sample or drop a span or trace is not made by inspecting the trace as a whole.

                                  For example, the most common form of head sampling is Consistent Probability Sampling. This is also be referred to as Deterministic Sampling. In this case, a sampling decision is made based on the trace ID and the desired percentage of traces to sample. This ensures that whole traces are sampled - no missing spans - at a consistent rate, such as 5% of all traces.

                                  The upsides to head sampling are: - Easy to understand - Easy to configure - Efficient - Can be done at any point in the trace collection pipeline

                                  The primary downside to head sampling is that it is not possible to make a sampling decision based on data in the entire trace. This means that while head sampling is effective as a blunt instrument, but it is completely insufficient for sampling strategies that must consider information from the entire system. For example, you cannot ensure that all traces with an error within them are sampled with head sampling alone. For this situation and many others, you need tail sampling.

                                  "},{"location":"en/admin/insight/best-practice/tail-based-sampling.html#tail-sampling-recommended","title":"Tail Sampling (Recommended)","text":"

                                  Tail sampling is where the decision to sample a trace takes place by considering all or most of the spans within the trace. Tail Sampling gives you the option to sample your traces based on specific criteria derived from different parts of a trace, which isn\u2019t an option with Head Sampling.

                                  Some examples of how to use tail sampling include:

                                  • Always sampling traces that contain an error
                                  • Sampling traces based on overall latency
                                  • Sampling traces based on the presence or value of specific attributes on one or more spans in a trace; for example, sampling more traces originating from a newly deployed service
                                  • Applying different sampling rates to traces based on certain criteria, such as when traces only come from low-volume services versus traces with high-volume services

                                  As you can see, tail sampling allows for a much higher degree of sophistication in how you sample data. For larger systems that must sample telemetry, it is almost always necessary to use Tail Sampling to balance data volume with the usefulness of that data.

                                  There are three primary downsides to tail sampling today:

                                  • Tail sampling can be difficult to implement. Depending on the kind of sampling techniques available to you, it is not always a \u201cset and forget\u201d kind of thing. As your systems change, so too will your sampling strategies. For a large and sophisticated distributed system, rules that implement sampling strategies can also be large and sophisticated.
                                  • Tail sampling can be difficult to operate. The component(s) that implement tail sampling must be stateful systems that can accept and store a large amount of data. Depending on traffic patterns, this can require dozens or even hundreds of compute nodes that all utilize resources differently. Furthermore, a tail sampler might need to \u201cfall back\u201d to less computationally intensive sampling techniques if it is unable to keep up with the volume of data it is receiving. Because of these factors, it is critical to monitor tail-sampling components to ensure that they have the resources they need to make the correct sampling decisions.
                                  • Tail samplers often end up as vendor-specific technology today. If you\u2019re using a paid vendor for Observability, the most effective tail sampling options available to you might be limited to what the vendor offers.

                                  Finally, for some systems, tail sampling might be used in conjunction with Head Sampling. For example, a set of services that produce an extremely high volume of trace data might first use head sampling to sample only a small percentage of traces, and then later in the telemetry pipeline use tail sampling to make more sophisticated sampling decisions before exporting to a backend. This is often done in the interest of protecting the telemetry pipeline from being overloaded.

                                  Insight currently recommends using tail sampling and prioritizes support for tail sampling.

                                  The tail sampling processor samples traces based on a defined set of strategies. However, all spans of a trace must be received by the same collector instance to make effective sampling decisions.

                                  Therefore, adjustments need to be made to the Global OpenTelemetry Collector architecture of Insight to implement the tail sampling strategy.

                                  "},{"location":"en/admin/insight/best-practice/tail-based-sampling.html#specific-changes-to-insight","title":"Specific Changes to Insight","text":"

                                  Introduce an Opentelemetry Collector Gateway component with load balancing capabilities in front of the insight-opentelemetry-collector in the Global cluster, allowing the same group of Traces to be routed to the same Opentelemetry Collector instance based on the TraceID.

                                  1. Deploy an OTEL COL Gateway component with load balancing capabilities.

                                    If you are using Insight V0.25.x, you can quickly enable this by using the Helm Upgrade parameter --set opentelemetry-collector-gateway.enabled=true, thereby skipping the deployment process described below.

                                    Refer to the following YAML to deploy the component.

                                    Click to view deployment configuration
                                    kind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: insight-otel-collector-gateway\nrules:\n- apiGroups: [\"\"]\n  resources: [\"endpoints\"]\n  verbs: [\"get\", \"watch\", \"list\"]\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: insight-otel-collector-gateway\n  namespace: insight-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: insight-otel-collector-gateway\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: insight-otel-collector-gateway\nsubjects:\n- kind: ServiceAccount\n  name: insight-otel-collector-gateway\n  namespace: insight-system\n---\nkind: ConfigMap\nmetadata:\n  labels:\n    app.kubernetes.io/component: opentelemetry-collector\n    app.kubernetes.io/instance: insight-otel-collector-gateway\n    app.kubernetes.io/name: insight-otel-collector-gateway\n  name: insight-otel-collector-gateway-collector\n  namespace: insight-system\napiVersion: v1\ndata:\n  collector.yaml: |\n    receivers:\n      otlp:\n        protocols:\n          grpc:\n          http:\n      jaeger:\n        protocols:\n          grpc:\n    processors:\n\n    extensions:\n      health_check:\n      pprof:\n        endpoint: :1888\n      zpages:\n        endpoint: :55679\n    exporters:\n      logging:\n      loadbalancing:\n        routing_key: \"traceID\"\n        protocol:\n          otlp:\n            # all options from the OTLP exporter are supported\n            # except the endpoint\n            timeout: 1s\n            tls:\n              insecure: true\n        resolver:\n          k8s:\n            service: insight-opentelemetry-collector\n            ports:\n              - 4317\n    service:\n      extensions: [pprof, zpages, health_check]\n      pipelines:\n        traces:\n          receivers: [otlp, jaeger]\n          exporters: [loadbalancing]\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app.kubernetes.io/component: opentelemetry-collector\n    app.kubernetes.io/instance: insight-otel-collector-gateway\n    app.kubernetes.io/name: insight-otel-collector-gateway\n  name: insight-otel-collector-gateway\n  namespace: insight-system\nspec:\n  replicas: 2\n  selector:\n    matchLabels:\n      app.kubernetes.io/component: opentelemetry-collector\n      app.kubernetes.io/instance: insight-otel-collector-gateway\n      app.kubernetes.io/name: insight-otel-collector-gateway\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/component: opentelemetry-collector\n        app.kubernetes.io/instance: insight-otel-collector-gateway\n        app.kubernetes.io/name: insight-otel-collector-gateway\n    spec:\n      containers:\n      - args:\n        - --config=/conf/collector.yaml\n        env:\n        - name: POD_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.name\n        image: ghcr.m.daocloud.io/openinsight-proj/opentelemetry-collector-contrib:5baef686672cfe5551e03b5c19d3072c432b6f33\n        imagePullPolicy: IfNotPresent\n        livenessProbe:\n          failureThreshold: 3\n          httpGet:\n            path: /\n            port: 13133\n            scheme: HTTP\n          periodSeconds: 10\n          successThreshold: 1\n          timeoutSeconds: 1\n        name: otc-container\n        resources:\n          limits:\n            cpu: '1'\n            memory: 2Gi\n          requests:\n            cpu: 100m\n            memory: 400Mi\n        ports:\n        - containerPort: 14250\n          name: jaeger-grpc\n          protocol: TCP\n        - containerPort: 8888\n          name: metrics\n          protocol: TCP\n        - containerPort: 4317\n          name: otlp-grpc\n          protocol: TCP\n        - containerPort: 4318\n          name: otlp-http\n          protocol: TCP\n        - containerPort: 55679\n          name: zpages\n          protocol: TCP\n\n        volumeMounts:\n        - mountPath: /conf\n          name: otc-internal\n\n      serviceAccount: insight-otel-collector-gateway\n      serviceAccountName: insight-otel-collector-gateway\n      volumes:\n      - configMap:\n          defaultMode: 420\n          items:\n          - key: collector.yaml\n            path: collector.yaml\n          name: insight-otel-collector-gateway-collector\n        name: otc-internal\n---\nkind: Service\napiVersion: v1\nmetadata:\n  name: insight-opentelemetry-collector-gateway\n  namespace: insight-system\n  labels:\n    app.kubernetes.io/component: opentelemetry-collector\n    app.kubernetes.io/instance: insight-otel-collector-gateway\n    app.kubernetes.io/name: insight-otel-collector-gateway\nspec:\n  ports:\n    - name: fluentforward\n      protocol: TCP\n      port: 8006\n      targetPort: 8006\n    - name: jaeger-compact\n      protocol: UDP\n      port: 6831\n      targetPort: 6831\n    - name: jaeger-grpc\n      protocol: TCP\n      port: 14250\n      targetPort: 14250\n    - name: jaeger-thrift\n      protocol: TCP\n      port: 14268\n      targetPort: 14268\n    - name: metrics\n      protocol: TCP\n      port: 8888\n      targetPort: 8888\n    - name: otlp\n      protocol: TCP\n      appProtocol: grpc\n      port: 4317\n      targetPort: 4317\n    - name: otlp-http\n      protocol: TCP\n      port: 4318\n      targetPort: 4318\n    - name: zipkin\n      protocol: TCP\n      port: 9411\n      targetPort: 9411\n    - name: zpages\n      protocol: TCP\n      port: 55679\n      targetPort: 55679\n  selector:\n    app.kubernetes.io/component: opentelemetry-collector\n    app.kubernetes.io/instance: insight-otel-collector-gateway\n    app.kubernetes.io/name: insight-otel-collector-gateway\n
                                  2. Configure Tail Sampling Rules

                                    Note

                                    Tail sampling rules need to be added to the existing insight-otel-collector-config configmap configuration group.

                                  3. Add the following content in the processor section, and adjust the specific rules as needed, refer to the OTel official example.

                                    ........\ntail_sampling:\n  decision_wait: 10s # Wait for 10 seconds, traces older than 10 seconds will no longer be processed\n  num_traces: 1500000  # Number of traces saved in memory, assuming 1000 traces per second, should not be less than 1000 * decision_wait * 2;\n                       # Setting it too large may consume too much memory resources, setting it too small may cause some traces to be dropped\n  expected_new_traces_per_sec: 10\n  policies: # Reporting policies\n    [\n        {\n          name: latency-policy,\n          type: latency,  # Report traces that exceed 500ms\n          latency: {threshold_ms: 500}\n        },\n        {\n          name: status_code-policy,\n          type: status_code,  # Report traces with ERROR status code\n          status_code: {status_codes: [ ERROR ]}\n        }\n    ]\n......\ntail_sampling: # Composite sampling\n  decision_wait: 10s # Wait for 10 seconds, traces older than 10 seconds will no longer be processed\n  num_traces: 1500000  # Number of traces saved in memory, assuming 1000 traces per second, should not be less than 1000 * decision_wait * 2;\n                       # Setting it too large may consume too much memory resources, setting it too small may cause some traces to be dropped\n  expected_new_traces_per_sec: 10\n  policies: [\n      {\n        name: debug-worker-cluster-sample-policy,\n        type: and,\n        and:\n          {\n            and_sub_policy:\n              [\n                {\n                  name: service-name-policy,\n                  type: string_attribute,\n                  string_attribute:\n                    { key: k8s.cluster.id, values: [xxxxxxx] },\n                },\n                {\n                  name: trace-status-policy,\n                  type: status_code,\n                  status_code: { status_codes: [ERROR] },\n                },\n                {\n                  name: probabilistic-policy,\n                  type: probabilistic,\n                  probabilistic: { sampling_percentage: 1 },\n                }\n              ]\n          }\n      }\n    ]\n
                                  4. Activate the processor in the otel col pipeline within the insight-otel-collector-config configmap:

                                    traces:\n  exporters:\n    - servicegraph\n    - otlp/jaeger\n  processors:\n    - memory_limiter\n    - tail_sampling # \ud83d\udc48\n    - batch\n  receivers:\n    - otlp\n
                                  5. Restart the insight-opentelemetry-collector component.

                                  6. When deploying the insight-agent, modify the reporting address of the link data to the 4317 port address of the otel-col LB.

                                    ....\n    exporters:\n      otlp/global:\n        endpoint: insight-opentelemetry-collector-lb.insight-system.svc.cluster.local:4317  # \ud83d\udc48 Modify to lb address\n
                                  "},{"location":"en/admin/insight/best-practice/tail-based-sampling.html#reference","title":"Reference","text":"
                                  • sampling
                                  "},{"location":"en/admin/insight/collection-manag/agent-status.html","title":"insight-agent Component Status Explanation","text":"

                                  In AI platform, Insight acts as a multi-cluster observability product. To achieve unified data collection across multiple clusters, users need to install the Helm App insight-agent (installed by default in the insight-system namespace). Refer to How to Install insight-agent .

                                  "},{"location":"en/admin/insight/collection-manag/agent-status.html#status-explanation","title":"Status Explanation","text":"

                                  In the \"Observability\" -> \"Collection Management\" section, you can view the installation status of insight-agent in each cluster.

                                  • Not Installed : insight-agent is not installed in the insight-system namespace of the cluster.
                                  • Running : insight-agent is successfully installed in the cluster, and all deployed components are running.
                                  • Error : If insight-agent is in this state, it indicates that the helm deployment failed or there are components deployed that are not in a running state.

                                  You can troubleshoot using the following steps:

                                  1. Run the following command. If the status is deployed , proceed to the next step. If it is failed , it is recommended to uninstall and reinstall it from Container Management -> Helm Apps as it may affect application upgrades:

                                    helm list -n insight-system\n
                                  2. Run the following command or check the status of the deployed components in Insight -> Data Collection . If there are Pods not in the Running state, restart the containers in an abnormal state.

                                    kubectl get pods -n insight-system\n
                                  "},{"location":"en/admin/insight/collection-manag/agent-status.html#additional-notes","title":"Additional Notes","text":"
                                  1. The resource consumption of the Prometheus metric collection component in insight-agent is directly proportional to the number of Pods running in the cluster. Please adjust the resources for Prometheus according to the cluster size. Refer to Prometheus Resource Planning.

                                  2. The storage capacity of the vmstorage metric storage component in the global service cluster is directly proportional to the total number of Pods in the clusters.

                                    • Please contact the platform administrator to adjust the disk capacity of vmstorage based on the cluster size. Refer to vmstorage Disk Capacity Planning.
                                    • Adjust vmstorage disk based on multi-cluster scale. Refer to vmstorge Disk Expansion.
                                  "},{"location":"en/admin/insight/collection-manag/collection-manag.html","title":"Data Collection","text":"

                                  Data Collection is mainly to centrally manage and display the entrance of the cluster installation collection plug-in insight-agent , which helps users quickly view the health status of the cluster collection plug-in, and provides a quick entry to configure collection rules.

                                  The specific operation steps are as follows:

                                  1. Click in the upper left corner and select Insight -> Data Collection .

                                  2. You can view the status of all cluster collection plug-ins.

                                  3. When the cluster is connected to insight-agent and is running, click a cluster name to enter the details\u3002

                                  4. In the Service Monitor tab, click the shortcut link to jump to Container Management -> CRD to add service discovery rules.

                                  "},{"location":"en/admin/insight/collection-manag/metric-collect.html","title":"Metrics Retrieval Methods","text":"

                                  Prometheus primarily uses the Pull approach to retrieve monitoring metrics from target services' exposed endpoints. Therefore, it requires configuring proper scraping jobs to request monitoring data and write it into the storage provided by Prometheus. Currently, Prometheus offers several configurations for these jobs:

                                  • Native Job Configuration: This provides native Prometheus job configuration for scraping.
                                  • Pod Monitor: In the Kubernetes ecosystem, it allows scraping of monitoring data from Pods using Prometheus Operator.
                                  • Service Monitor: In the Kubernetes ecosystem, it allows scraping monitoring data from Endpoints of Services using Prometheus Operator.

                                  Note

                                  [ ] indicates optional configmaps.

                                  "},{"location":"en/admin/insight/collection-manag/metric-collect.html#native-job-configuration","title":"Native Job Configuration","text":"

                                  The proper configmaps are explained as follows:

                                  # Name of the scraping job, also adds a label (job=job_name) to the scraped metrics\njob_name: <job_name>\n\n# Time interval between scrapes\n[ scrape_interval: <duration> | default = <global_config.scrape_interval> ]\n\n# Timeout for scrape requests\n[ scrape_timeout: <duration> | default = <global_config.scrape_timeout> ]\n\n# URI path for the scrape request\n[ metrics_path: <path> | default = /metrics ]\n\n# Handling of label conflicts between scraped labels and labels added by the backend Prometheus.\n# true: Retains the scraped labels and ignores conflicting labels from the backend Prometheus.\n# false: Adds an \"exported_<original-label>\" prefix to the scraped labels and includes the additional labels added by the backend Prometheus.\n[ honor_labels: <boolean> | default = false ]\n\n# Whether to use the timestamp generated by the target being scraped.\n# true: Uses the timestamp from the target if available.\n# false: Ignores the timestamp from the target.\n[ honor_timestamps: <boolean> | default = true ]\n\n# Protocol for the scrape request: http or https\n[ scheme: <scheme> | default = http ]\n\n# URL parameters for the scrape request\nparams:\n  [ <string>: [<string>, ...] ]\n\n# Set the value of the `Authorization` header in the scrape request through basic authentication. password/password_file are mutually exclusive, with password_file taking precedence.\nbasic_auth:\n  [ username: <string> ]\n  [ password: <secret> ]\n  [ password_file: <string> ]\n\n# Set the value of the `Authorization` header in the scrape request through bearer token authentication. bearer_token/bearer_token_file are mutually exclusive, with bearer_token taking precedence.\n[ bearer_token: <secret> ]\n\n# Set the value of the `Authorization` header in the scrape request through bearer token authentication. bearer_token/bearer_token_file are mutually exclusive, with bearer_token taking precedence.\n[ bearer_token_file: <filename> ]\n\n# Whether the scrape connection should use a TLS secure channel, configure the proper TLS parameters\ntls_config:\n  [ <tls_config> ]\n\n# Use a proxy service to scrape the metrics from the target, specify the address of the proxy service.\n[ proxy_url: <string> ]\n\n# Specify the targets using static configuration, see explanation below.\nstatic_configs:\n  [ - <static_config> ... ]\n\n# CVM service discovery configuration, see explanation below.\ncvm_sd_configs:\n  [ - <cvm_sd_config> ... ]\n\n# After scraping the data, rewrite the labels of the proper target using the relabel mechanism. Executes multiple relabel rules in order.\n# See explanation below for relabel_config.\nrelabel_configs:\n  [ - <relabel_config> ... ]\n\n# Before writing the scraped data, rewrite the values of the labels using the relabel mechanism. Executes multiple relabel rules in order.\n# See explanation below for relabel_config.\nmetric_relabel_configs:\n  [ - <relabel_config> ... ]\n\n# Limit the number of data points per scrape, 0: no limit, default is 0\n[ sample_limit: <int> | default = 0 ]\n\n# Limit the number of targets per scrape, 0: no limit, default is 0\n[ target_limit: <int> | default = 0 ]\n
                                  "},{"location":"en/admin/insight/collection-manag/metric-collect.html#pod-monitor","title":"Pod Monitor","text":"

                                  The explanation for the proper configmaps is as follows:

                                  # Prometheus Operator CRD version\napiVersion: monitoring.coreos.com/v1\n# proper Kubernetes resource type, here it is PodMonitor\nkind: PodMonitor\n# proper Kubernetes Metadata, only the name needs to be concerned. If jobLabel is not specified, the value of the job label in the scraped metrics will be <namespace>/<name>\nmetadata:\n  name: redis-exporter # Specify a unique name\n  namespace: cm-prometheus  # Fixed namespace, no need to modify\n# Describes the selection and configuration of the target Pods to be scraped\n  labels:\n    operator.insight.io/managed-by: insight # Label indicating managed by Insight\nspec:\n  # Specify the label of the proper Pod, pod monitor will use this value as the job label value.\n  # If viewing the Pod YAML, use the values in pod.metadata.labels.\n  # If viewing Deployment/Daemonset/Statefulset, use spec.template.metadata.labels.\n  [ jobLabel: string ]\n  # Adds the proper Pod's Labels to the Target's Labels\n  [ podTargetLabels: []string ]\n  # Limit the number of data points per scrape, 0: no limit, default is 0\n  [ sampleLimit: uint64 ]\n  # Limit the number of targets per scrape, 0: no limit, default is 0\n  [ targetLimit: uint64 ]\n  # Configure the Prometheus HTTP endpoints that need to be scraped and exposed. Multiple endpoints can be configured.\n  podMetricsEndpoints:\n  [ - <endpoint_config> ... ] # See explanation below for endpoint\n  # Select the namespaces where the monitored Pods are located. Leave it blank to select all namespaces.\n  [ namespaceSelector: ]\n    # Select all namespaces\n    [ any: bool ]\n    # Specify the list of namespaces to be selected\n    [ matchNames: []string ]\n  # Specify the Label values of the Pods to be monitored in order to locate the target Pods [K8S metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)\n  selector:\n    [ matchExpressions: array ]\n      [ example: - {key: tier, operator: In, values: [cache]} ]\n    [ matchLabels: object ]\n      [ example: k8s-app: redis-exporter ]\n
                                  "},{"location":"en/admin/insight/collection-manag/metric-collect.html#example-1","title":"Example 1","text":"
                                  apiVersion: monitoring.coreos.com/v1\nkind: PodMonitor\nmetadata:\n  name: redis-exporter # Specify a unique name\n  namespace: cm-prometheus # Fixed namespace, do not modify\n  labels:\n    operator.insight.io/managed-by: insight  # Label indicating managed by Insight, required.\nspec:\n  podMetricsEndpoints:\n    - interval: 30s\n      port: metric-port # Specify the Port Name proper to Prometheus Exporter in the pod YAML\n      path: /metrics # Specify the value of the Path proper to Prometheus Exporter, if not specified, default is /metrics\n      relabelings:\n        - action: replace\n          sourceLabels:\n            - instance\n          regex: (.*)\n          targetLabel: instance\n          replacement: \"crs-xxxxxx\" # Adjust to the proper Redis instance ID\n        - action: replace\n          sourceLabels:\n            - instance\n          regex: (.*)\n          targetLabel: ip\n          replacement: \"1.x.x.x\" # Adjust to the proper Redis instance IP\n  namespaceSelector: # Select the namespaces where the monitored Pods are located\n    matchNames:\n      - redis-test\n  selector: # Specify the Label values of the Pods to be monitored in order to locate the target pods\n    matchLabels:\n      k8s-app: redis-exporter\n
                                  "},{"location":"en/admin/insight/collection-manag/metric-collect.html#example-2","title":"Example 2","text":"
                                  job_name: prometheus\nscrape_interval: 30s\nstatic_configs:\n- targets:\n  - 127.0.0.1:9090\n
                                  "},{"location":"en/admin/insight/collection-manag/metric-collect.html#service-monitor","title":"Service Monitor","text":"

                                  The explanation for the proper configmaps is as follows:

                                  # Prometheus Operator CRD version\napiVersion: monitoring.coreos.com/v1\n# proper Kubernetes resource type, here it is ServiceMonitor\nkind: ServiceMonitor\n# proper Kubernetes Metadata, only the name needs to be concerned. If jobLabel is not specified, the value of the job label in the scraped metrics will be the name of the Service.\nmetadata:\n  name: redis-exporter # Specify a unique name\n  namespace: cm-prometheus  # Fixed namespace, no need to modify\n# Describes the selection and configuration of the target Pods to be scraped\n  labels:\n    operator.insight.io/managed-by: insight # Label indicating managed by Insight, required.\nspec:\n  # Specify the label(metadata/labels) of the proper Pod, service monitor will use this value as the job label value.\n  [ jobLabel: string ]\n  # Adds the Labels of the proper service to the Target's Labels\n  [ targetLabels: []string ]\n  # Adds the Labels of the proper Pod to the Target's Labels\n  [ podTargetLabels: []string ]\n  # Limit the number of data points per scrape, 0: no limit, default is 0\n  [ sampleLimit: uint64 ]\n  # Limit the number of targets per scrape, 0: no limit, default is 0\n  [ targetLimit: uint64 ]\n  # Configure the Prometheus HTTP endpoints that need to be scraped and exposed. Multiple endpoints can be configured.\n  endpoints:\n  [ - <endpoint_config> ... ] # See explanation below for endpoint\n  # Select the namespaces where the monitored Pods are located. Leave it blank to select all namespaces.\n  [ namespaceSelector: ]\n    # Select all namespaces\n    [ any: bool ]\n    # Specify the list of namespaces to be selected\n    [ matchNames: []string ]\n  # Specify the Label values of the Pods to be monitored in order to locate the target Pods [K8S metav1.LabelSelector](https://v1-17.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#labelselector-v1-meta)\n  selector:\n    [ matchExpressions: array ]\n      [ example: - {key: tier, operator: In, values: [cache]} ]\n    [ matchLabels: object ]\n      [ example: k8s-app: redis-exporter ]\n
                                  "},{"location":"en/admin/insight/collection-manag/metric-collect.html#example","title":"Example","text":"
                                  apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: go-demo # Specify a unique name\n  namespace: cm-prometheus # Fixed namespace, do not modify\n  labels:\n    operator.insight.io/managed-by: insight  # Label indicating managed by Insight, required.\nspec:\n  endpoints:\n    - interval: 30s\n      # Specify the Port Name proper to Prometheus Exporter in the service YAML\n      port: 8080-8080-tcp\n      # Specify the value of the Path proper to Prometheus Exporter, if not specified, default is /metrics\n      path: /metrics\n      relabelings:\n        # ** There must be a label named 'application', assuming there is a label named 'app' in k8s,\n        # we replace it with 'application' using the relabel 'replace' action\n        - action: replace\n          sourceLabels: [__meta_kubernetes_pod_label_app]\n          targetLabel: application\n  # Select the namespace where the monitored service is located\n  namespaceSelector:\n    matchNames:\n      - golang-demo\n  # Specify the Label values of the service to be monitored in order to locate the target service\n  selector:\n    matchLabels:\n      app: golang-app-demo\n
                                  "},{"location":"en/admin/insight/collection-manag/metric-collect.html#endpoint_config","title":"endpoint_config","text":"

                                  The explanation for the proper configmaps is as follows:

                                  # The name of the proper port. Please note that it's not the actual port number.\n# Default: 80. Possible values are as follows:\n# ServiceMonitor: corresponds to Service>spec/ports/name;\n# PodMonitor: explained as follows:\n#   If viewing the Pod YAML, take the value from pod.spec.containers.ports.name.\n#   If viewing Deployment/DaemonSet/StatefulSet, take the value from spec.template.spec.containers.ports.name.\n[ port: string | default = 80]\n# The URI path for the scrape request.\n[ path: string | default = /metrics ]\n# The protocol for the scrape: http or https.\n[ scheme: string | default = http]\n# URL parameters for the scrape request.\n[ params: map[string][]string]\n# The interval between scrape requests.\n[ interval: string | default = 30s ]\n# The timeout for the scrape request.\n[ scrapeTimeout: string | default = 30s]\n# Whether the scrape connection should be made over a secure TLS channel, and the TLS configuration.\n[ tlsConfig: TLSConfig ]\n# Read the bearer token value from the specified file and include it in the headers of the scrape request.\n[ bearerTokenFile: string ]\n# Read the bearer token from the specified K8S secret key. Note that the secret namespace must match the PodMonitor/ServiceMonitor.\n[ bearerTokenSecret: string ]\n# Handling conflicts when scraped labels conflict with labels added by the backend Prometheus.\n# true: Keep the scraped labels and ignore the conflicting labels from the backend Prometheus.\n# false: For conflicting labels, prefix the scraped label with 'exported_<original-label>' and add the labels added by the backend Prometheus.\n[ honorLabels: bool | default = false ]\n# Whether to use the timestamp generated on the target during the scrape.\n# true: Use the timestamp on the target if available.\n# false: Ignore the timestamp on the target.\n[ honorTimestamps: bool | default = true ]\n# Basic authentication credentials. Fill in the values of username/password from the proper K8S secret key. Note that the secret namespace must match the PodMonitor/ServiceMonitor.\n[ basicAuth: BasicAuth ]\n# Scrape the metrics from the target through a proxy server. Specify the address of the proxy server.\n[ proxyUrl: string ]\n# After scraping the data, rewrite the values of the labels on the target using the relabeling mechanism. Multiple relabel rules are executed in order.\n# See explanation below for relabel_config\nrelabelings:\n[ - <relabel_config> ...]\n# Before writing the scraped data, rewrite the values of the proper labels on the target using the relabeling mechanism. Multiple relabel rules are executed in order.\n# See explanation below for relabel_config\nmetricRelabelings:\n[ - <relabel_config> ...]\n
                                  "},{"location":"en/admin/insight/collection-manag/metric-collect.html#relabel_config","title":"relabel_config","text":"

                                  The explanation for the proper configmaps is as follows:

                                  # Specifies which labels to take from the original labels for relabeling. The values taken are concatenated using the separator defined in the configuration.\n# For PodMonitor/ServiceMonitor, the proper configmap is sourceLabels.\n[ source_labels: '[' <labelname> [, ...] ']' ]\n# Defines the character used to concatenate the values of the labels to be relabeled. Default is ';'.\n[ separator: <string> | default = ; ]\n\n# When the action is replace/hashmod, target_label is used to specify the proper label name.\n# For PodMonitor/ServiceMonitor, the proper configmap is targetLabel.\n[ target_label: <labelname> ]\n\n# Regular expression used to match the values of the source labels.\n[ regex: <regex> | default = (.*) ]\n\n# Used when action is hashmod, it takes the modulus value based on the MD5 hash of the source label's value.\n[ modulus: <int> ]\n\n# Used when action is replace, it defines the expression to replace when the regex matches. It can use regular expression replacement with regex.\n[ replacement: <string> | default = $1 ]\n\n# Actions performed based on the matched values of regex. The available actions are as follows, with replace being the default:\n# replace: If the regex matches, replace the proper value with the value defined in replacement. Set the value using target_label and add the proper label.\n# keep: If the regex doesn't match, discard the value.\n# drop: If the regex matches, discard the value.\n# hashmod: Take the modulus of the MD5 hash of the source label's value based on the value specified in modulus.\n# Add a new label with a label name specified by target_label.\n# labelmap: If the regex matches, replace the proper label name with the value specified in replacement.\n# labeldrop: If the regex matches, delete the proper label.\n# labelkeep: If the regex doesn't match, delete the proper label.\n[ action: <relabel_action> | default = replace ]\n
                                  "},{"location":"en/admin/insight/collection-manag/probe-module.html","title":"Custom probers","text":"

                                  Insight uses the Blackbox Exporter provided by Prometheus as a blackbox monitoring solution, allowing detection of target instances via HTTP, HTTPS, DNS, ICMP, TCP, and gRPC. It can be used in the following scenarios:

                                  • HTTP/HTTPS: URL/API availability monitoring
                                  • ICMP: Host availability monitoring
                                  • TCP: Port availability monitoring
                                  • DNS: Domain name resolution

                                  In this page, we will explain how to configure custom probers in an existing Blackbox ConfigMap.

                                  ICMP prober is not enabled by default in Insight because it requires higher permissions. Therfore We will use the HTTP prober as an example to demonstrate how to modify the ConfigMap to achieve custom HTTP probing.

                                  "},{"location":"en/admin/insight/collection-manag/probe-module.html#procedure","title":"Procedure","text":"
                                  1. Go to Clusters in Container Management and enter the details of the target cluster.
                                  2. Click the left navigation bar and select ConfigMaps & Secrets -> ConfigMaps .
                                  3. Find the ConfigMap named insight-agent-prometheus-blackbox-exporter and click Edit YAML .

                                    Add custom probers under modules :

                                  HTTP ProberICMP Prober
                                  module:\n  http_2xx:\n    prober: http\n    timeout: 5s\n    http:\n      valid_http_versions: [HTTP/1.1, HTTP/2]\n      valid_status_codes: []  # Defaults to 2xx\n      method: GET\n

                                  module:\n  ICMP: # Example of ICMP prober configuration\n    prober: icmp\n    timeout: 5s\n    icmp:\n      preferred_ip_protocol: ip4\nicmp_example: # Example 2 of ICMP prober configuration\n  prober: icmp\n  timeout: 5s\n  icmp:\n    preferred_ip_protocol: \"ip4\"\n    source_ip_address: \"127.0.0.1\"\n
                                  Since ICMP requires higher permissions, we also need to elevate the pod permissions. Otherwise, an operation not permitted error will occur. There are two ways to elevate permissions:

                                  • Directly edit the BlackBox Exporter deployment file to enable it

                                    apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: insight-agent-prometheus-blackbox-exporter\n  namespace: insight-system\nspec:\n  template:\n    spec:\n      containers:\n        - name: blackbox-exporter\n          image: # ... (image, args, ports, etc. remain unchanged)\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            allowPrivilegeEscalation: false\n            capabilities:\n              add:\n              - NET_RAW\n              drop:\n              - ALL\n            readOnlyRootFilesystem: true\n            runAsGroup: 0\n            runAsNonRoot: false\n            runAsUser: 0\n
                                  • Elevate permissions via helm upgrade

                                    prometheus-blackbox-exporter:\n  enabled: true\n  securityContext:\n    runAsUser: 0\n    runAsGroup: 0\n    readOnlyRootFilesystem: true\n    runAsNonRoot: false\n    allowPrivilegeEscalation: false\n    capabilities:\n      add: [\"NET_RAW\"]\n

                                  Info

                                  For more probers, refer to blackbox_exporter Configuration.

                                  "},{"location":"en/admin/insight/collection-manag/probe-module.html#other-references","title":"Other References","text":"

                                  The following YAML file contains various probers such as HTTP, TCP, SMTP, ICMP, and DNS. You can modify the configuration file of insight-agent-prometheus-blackbox-exporter according to your needs.

                                  Click to view the complete YAML file
                                  kind: ConfigMap\napiVersion: v1\nmetadata:\n  name: insight-agent-prometheus-blackbox-exporter\n  namespace: insight-system\n  labels:\n    app.kubernetes.io/instance: insight-agent\n    app.kubernetes.io/managed-by: Helm\n    app.kubernetes.io/name: prometheus-blackbox-exporter\n    app.kubernetes.io/version: v0.24.0\n    helm.sh/chart: prometheus-blackbox-exporter-8.8.0\n  annotations:\n    meta.helm.sh/release-name: insight-agent\n    meta.helm.sh/release-namespace: insight-system\ndata:\n  blackbox.yaml: |\n    modules:\n      HTTP_GET:\n        prober: http\n        timeout: 5s\n        http:\n          method: GET\n          valid_http_versions: [\"HTTP/1.1\", \"HTTP/2.0\"]\n          follow_redirects: true\n          preferred_ip_protocol: \"ip4\"\n      HTTP_POST:\n        prober: http\n        timeout: 5s\n        http:\n          method: POST\n          body_size_limit: 1MB\n      TCP:\n        prober: tcp\n        timeout: 5s\n      # Not enabled by default:\n      # ICMP:\n      #   prober: icmp\n      #   timeout: 5s\n      #   icmp:\n      #     preferred_ip_protocol: ip4\n      SSH:\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n          - expect: \"^SSH-2.0-\"\n      POP3S:\n        prober: tcp\n        tcp:\n          query_response:\n          - expect: \"^+OK\"\n          tls: true\n          tls_config:\n            insecure_skip_verify: false\n      http_2xx_example:               # http prober example\n        prober: http\n        timeout: 5s                   # probe timeout\n        http:\n          valid_http_versions: [\"HTTP/1.1\", \"HTTP/2.0\"]                   # Version in the response, usually default\n          valid_status_codes: []  # Defaults to 2xx                       # Valid range of response codes, probe successful if within this range\n          method: GET                 # request method\n          headers:                    # request headers\n            Host: vhost.example.com\n            Accept-Language: en-US\n            Origin: example.com\n          no_follow_redirects: false  # allow redirects\n          fail_if_ssl: false   \n          fail_if_not_ssl: false\n          fail_if_body_matches_regexp:\n            - \"Could not connect to database\"\n          fail_if_body_not_matches_regexp:\n            - \"Download the latest version here\"\n          fail_if_header_matches: # Verifies that no cookies are set\n            - header: Set-Cookie\n              allow_missing: true\n              regexp: '.*'\n          fail_if_header_not_matches:\n            - header: Access-Control-Allow-Origin\n              regexp: '(\\*|example\\.com)'\n          tls_config:                  # tls configuration for https requests\n            insecure_skip_verify: false\n          preferred_ip_protocol: \"ip4\" # defaults to \"ip6\"                 # Preferred IP protocol version\n          ip_protocol_fallback: false  # no fallback to \"ip6\"            \n      http_post_2xx:                   # http prober example with body\n        prober: http\n        timeout: 5s\n        http:\n          method: POST                 # probe request method\n          headers:\n            Content-Type: application/json\n          body: '{\"username\":\"admin\",\"password\":\"123456\"}'                   # body carried during probe\n      http_basic_auth_example:         # prober example with username and password\n        prober: http\n        timeout: 5s\n        http:\n          method: POST\n          headers:\n            Host: \"login.example.com\"\n          basic_auth:                  # username and password to be added during probe\n            username: \"username\"\n            password: \"mysecret\"\n      http_custom_ca_example:\n        prober: http\n        http:\n          method: GET\n          tls_config:                  # root certificate used during probe\n            ca_file: \"/certs/my_cert.crt\"\n      http_gzip:\n        prober: http\n        http:\n          method: GET\n          compression: gzip            # compression method used during probe\n      http_gzip_with_accept_encoding:\n        prober: http\n        http:\n          method: GET\n          compression: gzip\n          headers:\n            Accept-Encoding: gzip\n      tls_connect:                     # TCP prober example\n        prober: tcp\n        timeout: 5s\n        tcp:\n          tls: true                    # use TLS\n      tcp_connect_example:\n        prober: tcp\n        timeout: 5s\n      imap_starttls:                   # IMAP email server probe configuration example\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - expect: \"OK.*STARTTLS\"\n            - send: \". STARTTLS\"\n            - expect: \"OK\"\n            - starttls: true\n            - send: \". capability\"\n            - expect: \"CAPABILITY IMAP4rev1\"\n      smtp_starttls:                   # SMTP email server probe configuration example\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - expect: \"^220 ([^ ]+) ESMTP (.+)$\"\n            - send: \"EHLO prober\\r\"\n            - expect: \"^250-STARTTLS\"\n            - send: \"STARTTLS\\r\"\n            - expect: \"^220\"\n            - starttls: true\n            - send: \"EHLO prober\\r\"\n            - expect: \"^250-AUTH\"\n            - send: \"QUIT\\r\"\n      irc_banner_example:\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - send: \"NICK prober\"\n            - send: \"USER prober prober prober :prober\"\n            - expect: \"PING :([^ ]+)\"\n              send: \"PONG ${1}\"\n            - expect: \"^:[^ ]+ 001\"\n      # icmp_example:                    # ICMP prober configuration example\n      #  prober: icmp\n      #  timeout: 5s\n      #  icmp:\n      #    preferred_ip_protocol: \"ip4\"\n      #    source_ip_address: \"127.0.0.1\"\n      dns_udp_example:                 # DNS query example using UDP\n        prober: dns\n        timeout: 5s\n        dns:\n          query_name: \"www.prometheus.io\"                 # domain name to resolve\n          query_type: \"A\"              # type proper to this domain\n          valid_rcodes:\n          - NOERROR\n          validate_answer_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n            fail_if_all_match_regexp:\n            - \".*127.0.0.1\"\n            fail_if_not_matches_regexp:\n            - \"www.prometheus.io.\\t300\\tIN\\tA\\t127.0.0.1\"\n            fail_if_none_matches_regexp:\n            - \"127.0.0.1\"\n          validate_authority_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n          validate_additional_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n      dns_soa:\n        prober: dns\n        dns:\n          query_name: \"prometheus.io\"\n          query_type: \"SOA\"\n      dns_tcp_example:               # DNS query example using TCP\n        prober: dns\n        dns:\n          transport_protocol: \"tcp\" # defaults to \"udp\"\n          preferred_ip_protocol: \"ip4\" # defaults to \"ip6\"\n          query_name: \"www.prometheus.io\"\n
                                  "},{"location":"en/admin/insight/collection-manag/service-monitor.html","title":"Configure service discovery rules","text":"

                                  Observable Insight supports the way of creating CRD ServiceMonitor through container management to meet your collection requirements for custom service discovery. Users can use ServiceMonitor to define the scope of the Namespace discovered by the Pod and select the monitored Service through matchLabel .

                                  "},{"location":"en/admin/insight/collection-manag/service-monitor.html#prerequisites","title":"Prerequisites","text":"

                                  The cluster has the Helm App insight-agent installed and in the running state.

                                  "},{"location":"en/admin/insight/collection-manag/service-monitor.html#steps","title":"Steps","text":"
                                  1. Select Data Collection on the left navigation bar to view the status of all cluster collection plug-ins.

                                  2. Click a cluster name to enter the collection configuration details.

                                  3. Click the link to jump to Container Management to create a Service Monitor.

                                    apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: micrometer-demo # (1)\n  namespace: insight-system # (2)\n    labels: \n     operator.insight.io/managed-by: insight\nspec:\n  endpoints: # (3)\n    - honorLabels: true\n       interval: 15s\n        path: /actuator/prometheus\n        port: http\n  namespaceSelector: # (4)\n    matchNames:\n      - insight-system  # (5)\n  selector: # (6)\n    matchLabels:\n      micrometer-prometheus-discovery: \"true\"\n
                                    1. Specify the name of the ServiceMonitor.
                                    2. Specify the namespace of the ServiceMonitor.
                                    3. This is the service endpoint, which represents the address where Prometheus collects Metrics. endpoints is an array, and multiple endpoints can be created at the same time. Each endpoint contains three fields, and the meaning of each field is as follows:

                                      • interval : Specifies the collection cycle of Prometheus for the current endpoint . The unit is seconds, set to 15s in this example.
                                      • path : Specifies the collection path of Prometheus. In this example, it is specified as /actuator/prometheus .
                                      • port : Specifies the port through which the collected data needs to pass. The set port is the name set by the port of the Service being collected.
                                    4. This is the scope of the Service that needs to be discovered. namespaceSelector contains two mutually exclusive fields, and the meaning of the fields is as follows:

                                      • any : Only one value true , when this field is set, it will listen to changes of all Services that meet the Selector filtering conditions.
                                      • matchNames : An array value that specifies the scope of namespace to be monitored. For example, if you only want to monitor the Services in two namespaces, default and insight-system, the matchNames are set as follows:

                                        namespaceSelector:\n  matchNames:\n    - default\n    - insight-system\n
                                    5. The namespace where the application that needs to expose metrics is located

                                    6. Used to select the Service
                                  "},{"location":"en/admin/insight/compati-test/k8s-compatibility.html","title":"Kubernetes Cluster Compatibility Test","text":"

                                  \u2705: Test passed; \u274c: Test failed; No Value: Test not conducted.

                                  "},{"location":"en/admin/insight/compati-test/k8s-compatibility.html#kubernetes-compatibility-testing-for-insight-server","title":"Kubernetes Compatibility Testing for Insight Server","text":"Scenario Testing Method K8s 1.31 K8s 1.30 K8s 1.29 K8s 1.28 K8s 1.27 K8s 1.26 k8s 1.25.0 k8s 1.24 k8s 1.23 k8s 1.22 Baseline Scenario E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 Metrics Query E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 Logs Query E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 Traces Query E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 Alert Center E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 Topology Query E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705"},{"location":"en/admin/insight/compati-test/k8s-compatibility.html#kubernetes-compatibility-testing-for-insight-agent","title":"Kubernetes Compatibility Testing for Insight-agent","text":"Scenario Testing Method K8s 1.31 K8s 1.30 K8s 1.29 K8s 1.28 K8s 1.27 K8s 1.26 k8s 1.25 k8s 1.24 k8s 1.23 k8s 1.22 k8s 1.21 k8s 1.20 k8s 1.19 k8s 1.18 k8s 1.17 k8s 1.16 Baseline Scenario E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c Metrics Query E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c Logs Query E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c Traces Query E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c Alert Center E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c Topology Query E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c

                                  Note

                                  Insight-agent Version Compatibility History:

                                  1. Insight Agent is not compatible with k8s v1.16.15 starting from v0.16.x.
                                  2. Insight Agent v0.20.0 is compatible with k8s v1.18.20.
                                  3. Insight Agent v0.19.2/v0.18.2/v0.17.x is not compatible with k8s v1.18.20.
                                  4. Insight Agent v0.30.1 is compatible with k8s v1.18.x and below versions.
                                  "},{"location":"en/admin/insight/compati-test/ocp-compatibility.html","title":"Openshift 4.x Cluster Compatibility Test","text":"

                                  \u2705: Test passed; \u274c: Test failed.

                                  Note

                                  The table does not cover all test scenarios.

                                  Test Case Test Method OCP 4.10 (K8s 1.23.0) Remarks Collect and query web application metrics Manual \u2705 - Add custom metric collection Manual \u2705 - Query real-time metrics Manual \u2705 - Instantaneous index query Manual \u2705 - Instantaneous metric API field verification Manual \u2705 - Query metrics over a period of time Manual \u2705 - Metric API field verification within a period of time Manual \u2705 - Batch query cluster CPU, memory usage, total cluster CPU, cluster memory usage, total number of cluster nodes Manual \u2705 - Batch query node CPU, memory usage, total node CPU, node memory usage Manual \u2705 - Batch query cluster metrics within a period of time Manual \u2705 - Metric API field verification within a period of time Manual \u2705 - Query Pod log Manual \u2705 - Query SVC log Manual \u2705 - Query statefulset logs Manual \u2705 - Query Deployment Logs Manual \u2705 - Query NPD log Manual \u2705 - Log Filtering Manual \u2705 - Log fuzzy query - workloadSearch Manual \u2705 - Log fuzzy query - podSearch Manual \u2705 - Log fuzzy query - containerSearch Manual \u2705 - Log Accurate Query - cluster Manual \u2705 - Log Accurate Query - namespace Manual \u2705 - Log query API field verification Manual \u2705 - Alert Rule - CRUD operations Manual \u2705 - Alert Template - CRUD operations Manual \u2705 - Notification Method - CRUD operations Manual \u2705 - Link Query Manual \u2705 - Topology Query Manual \u2705 -

                                  The table above represents the Openshift 4.x cluster compatibility test. It includes various test cases, their proper test method (manual), and the test results for the OCP version 4.10 (with Kubernetes version 1.23.0).

                                  Please note that this is not an exhaustive list, and additional test scenarios may exist.

                                  "},{"location":"en/admin/insight/compati-test/rancher-compatibility.html","title":"Rancher Cluster Compatibility Test","text":"

                                  \u2705: Test passed; \u274c: Test failed.

                                  Note

                                  The table does not cover all test scenarios.

                                  Test Scenario Test Method Rancher rke2c1 (K8s 1.24.11) Notes Collect and query web application metrics Manual \u2705 - Add custom metric collection Manual \u2705 - Query real-time metrics Manual \u2705 - Instantaneous index query Manual \u2705 - Instantaneous metric API field verification Manual \u2705 - Query metrics over a period of time Manual \u2705 - Metric API field verification within a period of time Manual \u2705 - Batch query cluster CPU, memory usage, total cluster CPU, cluster memory usage, total number of cluster nodes Manual \u2705 - Batch query node CPU, memory usage, total node CPU, node memory usage Manual \u2705 - Batch query cluster metrics within a period of time Manual \u2705 - Metric API field verification within a period of time Manual \u2705 - Query Pod log Manual \u2705 - Query SVC log Manual \u2705 - Query statefulset logs Manual \u2705 - Query Deployment Logs Manual \u2705 - Query NPD log Manual \u2705 - Log Filtering Manual \u2705 - Log fuzzy query - workloadSearch Manual \u2705 - Log fuzzy query - podSearch Manual \u2705 - Log fuzzy query - containerSearch Manual \u2705 - Log Accurate Query - cluster Manual \u2705 - Log Accurate Query - namespace Manual \u2705 - Log query API field verification Manual \u2705 - Alert Rule - CRUD operations Manual \u2705 - Alert Template - CRUD operations Manual \u2705 - Notification Method - CRUD operations Manual \u2705 - Link Query Manual \u2705 - Topology Query Manual \u2705 -"},{"location":"en/admin/insight/dashboard/dashboard.html","title":"Dashboard","text":"

                                  Grafana is a cross-platform open source visual analysis tool. Insight uses open source Grafana to provide monitoring services, and supports viewing resource consumption from multiple dimensions such as clusters, nodes, and namespaces.

                                  For more information on open source Grafana, see Grafana Official Documentation.

                                  "},{"location":"en/admin/insight/dashboard/dashboard.html#steps","title":"Steps","text":"
                                  1. Select Dashboard from the left navigation bar .

                                    • In the Insight / Overview dashboard, you can view the resource usage of multiple clusters and analyze resource usage, network, storage, and more based on dimensions such as namespaces and Pods.

                                    • Click the dropdown menu in the upper-left corner of the dashboard to switch between clusters.

                                    • Click the lower-right corner of the dashboard to switch the time range for queries.

                                  2. Insight provides several recommended dashboards that allow monitoring from different dimensions such as nodes, namespaces, and workloads. Switch between dashboards by clicking the insight-system / Insight / Overview section.

                                  Note

                                  1. For accessing Grafana UI, refer to Access Native Grafana.

                                  2. For importing custom dashboards, refer to Importing Custom Dashboards.

                                  "},{"location":"en/admin/insight/dashboard/import-dashboard.html","title":"Import Custom Dashboards","text":"

                                  By using Grafana CRD, you can incorporate the management and deployment of dashboards into the lifecycle management of Kubernetes. This enables version control, automated deployment, and cluster-level management of dashboards. This page describes how to import custom dashboards using CRD and the UI interface.

                                  "},{"location":"en/admin/insight/dashboard/import-dashboard.html#steps","title":"Steps","text":"
                                  1. Log in to the AI platform platform and go to Container Management . Select the kpanda-global-cluster from the cluster list.

                                  2. Choose Custom Resources from the left navigation bar. Look for the grafanadashboards.integreatly.org file in the list and click it to view the details.

                                  3. Click YAML Create and use the following template. Replace the dashboard JSON in the Json field.

                                    • namespace : Specify the target namespace.
                                    • name : Provide a name for the dashboard.
                                    • label : Mandatory. Set the label as operator.insight.io/managed-by: insight .
                                    apiVersion: integreatly.org/v1alpha1\nkind: GrafanaDashboard\nmetadata:\n  labels:\n    app: insight-grafana-operator\n    operator.insight.io/managed-by: insight\n  name: sample-dashboard\n  namespace: insight-system\nspec:\n  json: >\n    {\n      \"id\": null,\n      \"title\": \"Simple Dashboard\",\n      \"tags\": [],\n      \"style\": \"dark\",\n      \"timezone\": \"browser\",\n      \"editable\": true,\n      \"hideControls\": false,\n      \"graphTooltip\": 1,\n      \"panels\": [],\n      \"time\": {\n        \"from\": \"now-6h\",\n        \"to\": \"now\"\n      },\n      \"timepicker\": {\n        \"time_options\": [],\n        \"refresh_intervals\": []\n      },\n      \"templating\": {\n        \"list\": []\n      },\n      \"annotations\": {\n        \"list\": []\n      },\n      \"refresh\": \"5s\",\n      \"schemaVersion\": 17,\n      \"version\": 0,\n      \"links\": []\n    }\n
                                  4. After clicking OK , wait for a while to view the newly imported dashboard in Dashboard .

                                  Info

                                  If you need to customize the dashboard, refer to Add Dashboard Panel.

                                  "},{"location":"en/admin/insight/dashboard/login-grafana.html","title":"Access Native Grafana","text":"

                                  Please make sure that the Helm App Insight in your global management cluster is in Running state.

                                  The specific operation steps are as follows:

                                  1. Log in to the console to access native Grafana.

                                    Access address: http://ip:port/ui/insight-grafana

                                    For example: http://10.6.10.233:30209/ui/insight-grafana

                                  2. Click Login in the lower right corner, and use the default username and password to log in.

                                    • Default username: admin

                                    • Default password: admin

                                  3. Click Log in to complete the login.

                                  "},{"location":"en/admin/insight/dashboard/overview.html","title":"Overview","text":"

                                  Insight only collects data from clusters that have insight-agent installed and running in a normal state. The overview provides an overview of resources across multiple clusters:

                                  • Alert Statistics: Provides statistics on active alerts across all clusters.
                                  • Resource Consumption: Displays the resource usage trends for the top 5 clusters and nodes in the past hour, based on CPU usage, memory usage, and disk usage.
                                  • By default, the sorting is based on CPU usage. You can switch the metric to sort clusters and nodes.
                                  • Resource Trends: Shows the trends in the number of nodes over the past 15 days and the running trend of pods in the last hour.
                                  • Service Requests Ranking: Displays the top 5 services with the highest request latency and error rates, along with their respective clusters and namespaces in the multi-cluster environment.
                                  "},{"location":"en/admin/insight/dashboard/overview.html#operation-procedure","title":"Operation procedure","text":"

                                  Select Overview in the left navigation bar to enter the details page.

                                  "},{"location":"en/admin/insight/data-query/log.html","title":"Log query","text":"

                                  By default, Insight collects node logs, container logs, and Kubernetes audit logs. In the log query page, you can search for standard output (stdout) logs within the permissions of your login account. This includes node logs, product logs, and Kubernetes audit logs. You can quickly find the desired logs among a large volume of logs. Additionally, you can use the source information and contextual raw data of the logs to assist in troubleshooting and issue resolution.

                                  "},{"location":"en/admin/insight/data-query/log.html#prerequisites","title":"Prerequisites","text":"

                                  The cluster has insight-agent installed and the application is in running state.

                                  "},{"location":"en/admin/insight/data-query/log.html#query-log","title":"Query log","text":"
                                  1. In the left navigation bar, select Data Query -> Log Query .

                                  2. After selecting the query criteria, click Search , and the log records in the form of graphs will be displayed. The most recent logs are displayed on top.

                                  3. In the Filter panel, switch Type and select Node to check the logs of all nodes in the cluster.

                                  4. In the Filter panel, switch Type and select Event to view the logs generated by all Kubernetes events in the cluster.

                                  Lucene Syntax Explanation:

                                  1. Use logical operators (AND, OR, NOT, \"\") to query multiple keywords. For example: keyword1 AND (keyword2 OR keyword3) NOT keyword4.
                                  2. Use a tilde (~) for fuzzy queries. You can optionally specify a parameter after the \"~\" to control the similarity of the fuzzy query. If not specified, it defaults to 0.5. For example: error~.
                                  3. Use wildcards (*, ?) as single-character placeholders to match any character.
                                  4. Use square brackets [ ] or curly braces { } for range queries. Square brackets [ ] represent a closed interval and include the boundary values. Curly braces { } represent an open interval and exclude the boundary values. Range queries are applicable only to fields that can be sorted, such as numeric fields and date fields. For example timestamp:[2022-01-01 TO 2022-01-31].
                                  5. For more information, please refer to the Lucene Syntax Explanation.
                                  "},{"location":"en/admin/insight/data-query/log.html#view-log-context","title":"View log context","text":"

                                  Clicking on the button next to a log will slide out a panel on the right side where you can view the default 100 lines of context for that log. You can switch the Display Rows option to view more contextual content.

                                  "},{"location":"en/admin/insight/data-query/log.html#export-log","title":"Export log","text":"

                                  Click the download button located in the upper right corner of the list.

                                  • You can configure the exported log fields. The available fields may vary depending on the log type, with the Log Content field being mandatory.
                                  • You can export the log query results in .txt or .csv format.

                                  "},{"location":"en/admin/insight/data-query/metric.html","title":"Metric query","text":"

                                  Metric query supports querying the index data of each container resource, and you can view the trend changes of the monitoring index. At the same time, advanced query supports native PromQL statements for Metric query.

                                  "},{"location":"en/admin/insight/data-query/metric.html#prerequisites","title":"Prerequisites","text":"
                                  • The cluster has insight-agent installed and the application is in running state.
                                  "},{"location":"en/admin/insight/data-query/metric.html#common-query","title":"Common query","text":"
                                  1. In the left navigation bar, click Data Query -> Metrics .

                                  2. After selecting query conditions such as cluster, type, node, and metric name, click Search , and the proper metric chart and data details will be displayed on the right side of the screen.

                                  Tip

                                  Support custom time range. You can manually click the Refresh icon or select a default time interval to refresh.

                                  "},{"location":"en/admin/insight/data-query/metric.html#advanced-search","title":"Advanced Search","text":"
                                  1. In the left navigation bar, click Data Query -> metric Query , click the Advanced Query tab to switch to the advanced query page.

                                  2. Enter a PromQL statement (see PromQL Syntax), click Query , and the query metric chart and data details will be displayed.

                                  "},{"location":"en/admin/insight/faq/expand-once-es-full.html","title":"What to Do When ElasticSearch is Full?","text":"

                                  When ElasticSearch memory is full, you can choose to either scale up or delete data to resolve the issue:

                                  You can run the following command to check the resource usage of ES nodes.

                                  kubectl get pod -n mcamel-system | grep common-es-cluster-masters-es | awk '{print $1}' | xargs -I {} kubectl exec {} -n mcamel-system -c elasticsearch -- df -h | grep /usr/share/elasticsearch/data\n
                                  "},{"location":"en/admin/insight/faq/expand-once-es-full.html#scale-up","title":"Scale Up","text":"

                                  If the host still has available resources, scaling up is a common solution, which involves increasing the PVC capacity.

                                  1. First, run the following command to get the PVC configuration of the es-data-0 node. Use the actual environment's PVC as a reference.

                                    kubectl edit -n mcamel-system pvc elasticsearch-data-mcamel-common-es-cluster-masters-es-data-0\n
                                  2. Then modify the following storage field (the storage class SC you are using should be scalable):

                                    spec:\n  accessModes:\n    - ReadWriteOnce\n  resources:\n    requests:\n      storage: 35Gi # (1)!\n
                                    1. Adjust this value as needed.
                                  "},{"location":"en/admin/insight/faq/expand-once-es-full.html#delete-data","title":"Delete Data","text":"

                                  When ElasticSearch memory is full, you can also delete index data to free up resources.

                                  You can follow the steps below to access the Kibana page and manually delete data.

                                  1. First, ensure that the Kibana Pod exists and is running normally:

                                    kubectl get po -n mcamel-system | grep mcamel-common-es-cluster-masters-kb\n
                                  2. If it does not exist, manually set the replica to 1 and wait for the service to run normally. If it exists, skip this step.

                                    kubectl scale -n mcamel-system deployment mcamel-common-es-cluster-masters-kb --replicas 1\n
                                  3. Modify the Kibana Service to be exposed as a NodePort for access:

                                    kubectl patch svc -n mcamel-system mcamel-common-es-cluster-masters-kb-http -p '{\"spec\":{\"type\":\"NodePort\"}}'\n\n# After modification, check the NodePort. For example, if the port is 30128, the access URL will be https://{NodeIP in the cluster}:30128\n[root@insight-master1 ~]# kubectl get svc -n mcamel-system | grep mcamel-common-es-cluster-masters-kb-http\nmcamel-common-es-cluster-masters-kb-http   NodePort    10.233.51.174   <none>   5601:30128/TCP    108m\n
                                  4. Retrieve the ElasticSearch Secret to log in to Kibana (username is elastic):

                                    kubectl get secrets -n mcamel-system mcamel-common-es-cluster-masters-es-elastic-user -o jsonpath=\"{.data.elastic}\" | base64 -d\n
                                  5. Go to Kibana -> Stack Management -> Index Management and enable the Include hidden indices option to see all indexes. Based on the index sequence numbers, keep the indexes with larger numbers and delete the ones with smaller numbers.

                                  "},{"location":"en/admin/insight/faq/traceclockskew.html","title":"Clock offset in trace data","text":"

                                  In a distributed system, due to Clock Skew (clock skew adjustment) influence, Time drift exists between different hosts. Generally speaking, the system time of different hosts at the same time has a slight deviation.

                                  The traces system is a typical distributed system, and it is also affected by this phenomenon in terms of time data collection. For example, in a link, the start time of the server-side span is earlier than that of the client-side span. This phenomenon does not exist logically, but due to the influence of clock skew, there is a deviation in the system time between the hosts at the moment when the trace data is collected in each service, which eventually leads to the phenomenon shown in the following figure:

                                  The phenomenon in the above figure cannot be eliminated theoretically. However, this phenomenon is rare, and even if it occurs, it will not affect the calling relationship between services.

                                  Currently Insight uses Jaeger UI to display trace data, and the UI will remind when encountering such a link:

                                  Currently Jaeger's community is trying to optimize this problem through the UI level.

                                  For more information, refer to:

                                  • Clock Skew Adjuster considered harmful
                                  • Add ability to display unadjusted trace in the UI
                                  • Clock Skew Adjustment
                                  "},{"location":"en/admin/insight/infra/cluster.html","title":"Cluster Monitoring","text":"

                                  Through cluster monitoring, you can view the basic information of the cluster, the resource consumption and the trend of resource consumption over a period of time.

                                  "},{"location":"en/admin/insight/infra/cluster.html#prerequisites","title":"Prerequisites","text":"

                                  The cluster has insight-agent installed and the application is in running state.

                                  "},{"location":"en/admin/insight/infra/cluster.html#steps","title":"Steps","text":"
                                  1. Go to the Insight product module.

                                  2. Select Infrastructure > Clusters from the left navigation bar. On this page, you can view the following information:

                                    • Resource Overview: Provides statistics on the number of normal/all nodes and workloads across multiple clusters.
                                    • Fault: Displays the number of alerts generated in the current cluster.
                                    • Resource Consumption: Shows the actual usage and total capacity of CPU, memory, and disk for the selected cluster.
                                    • Metric Explanations: Describes the trends in CPU, memory, disk I/O, and network bandwidth.

                                  3. Click Resource Level Monitor, you can view more metrics of the current cluster.

                                  "},{"location":"en/admin/insight/infra/cluster.html#metric-explanations","title":"Metric Explanations","text":"Metric Name Description CPU Usage The ratio of the actual CPU usage of all pod resources in the cluster to the total CPU capacity of all nodes. CPU Allocation The ratio of the sum of CPU requests of all pods in the cluster to the total CPU capacity of all nodes. Memory Usage The ratio of the actual memory usage of all pod resources in the cluster to the total memory capacity of all nodes. Memory Allocation The ratio of the sum of memory requests of all pods in the cluster to the total memory capacity of all nodes."},{"location":"en/admin/insight/infra/container.html","title":"Container Insight","text":"

                                  Container insight is the process of monitoring workloads in cluster management. In the list, you can view basic information and status of workloads. On the Workloads details page, you can see the number of active alerts and the trend of resource consumption such as CPU and memory.

                                  "},{"location":"en/admin/insight/infra/container.html#prerequisites","title":"Prerequisites","text":"
                                  • The cluster has insight-agent installed, and all pods are in the Running state.

                                  • To install insight-agent, please refer to: Installing insight-agent online or Offline upgrade of insight-agent.

                                  "},{"location":"en/admin/insight/infra/container.html#steps","title":"Steps","text":"

                                  Follow these steps to view service monitoring metrics:

                                  1. Go to the Insight product module.

                                  2. Select Infrastructure > Workloads from the left navigation bar.

                                  3. Switch between tabs at the top to view data for different types of workloads.

                                  4. Click the target workload name to view the details.

                                    1. Faults: Displays the total number of active alerts for the workload.
                                    2. Resource Consumption: Shows the CPU, memory, and network usage of the workload.
                                    3. Monitoring Metrics: Provides the trends of CPU, Memory, Network, and disk usage for the workload over the past hour.

                                  5. Switch to the Pods tab to view the status of various pods for the workload, including their nodes, restart counts, and other information.

                                  6. Switch to the JVM monitor tab to view the JVM metrics for each pods

                                    Note

                                    1. The JVM monitoring feature only supports the Java language.
                                    2. To enable the JVM monitoring feature, refer to Getting Started with Monitoring Java Applications.
                                  "},{"location":"en/admin/insight/infra/container.html#metric-explanations","title":"Metric Explanations","text":"Metric Name Description CPU Usage The sum of CPU usage for all pods under the workload. CPU Requests The sum of CPU requests for all pods under the workload. CPU Limits The sum of CPU limits for all pods under the workload. Memory Usage The sum of memory usage for all pods under the workload. Memory Requests The sum of memory requests for all pods under the workload. Memory Limits The sum of memory limits for all pods under the workload. Disk Read/Write Rate The total number of continuous disk reads and writes per second within the specified time range, representing a performance measure of the number of read and write operations per second on the disk. Network Send/Receive Rate The incoming and outgoing rates of network traffic, aggregated by workload, within the specified time range."},{"location":"en/admin/insight/infra/event.html","title":"Event Query","text":"

                                  AI platform Insight supports event querying by cluster and namespace.

                                  "},{"location":"en/admin/insight/infra/event.html#event-status-distribution","title":"Event Status Distribution","text":"

                                  By default, the events that occurred within the last 12 hours are displayed. You can select a different time range in the upper right corner to view longer or shorter periods. You can also customize the sampling interval from 1 minute to 5 hours.

                                  The event status distribution chart provides a visual representation of the intensity and dispersion of events. This helps in evaluating and preparing for subsequent cluster operations and maintenance tasks. If events are densely concentrated during specific time periods, you may need to allocate more resources or take proper measures to ensure cluster stability and high availability. On the other hand, if events are dispersed, you can effectively schedule other maintenance tasks such as system optimization, upgrades, or handling other tasks during this period.

                                  By considering the event status distribution chart and the selected time range, you can better plan and manage your cluster operations and maintenance work, ensuring system stability and reliability.

                                  "},{"location":"en/admin/insight/infra/event.html#event-count-and-statistics","title":"Event Count and Statistics","text":"

                                  Through important event statistics, you can easily understand the number of image pull failures, health check failures, Pod execution failures, Pod scheduling failures, container OOM (Out-of-Memory) occurrences, volume mounting failures, and the total count of all events. These events are typically categorized as \"Warning\" and \"Normal\".

                                  "},{"location":"en/admin/insight/infra/event.html#event-list","title":"Event List","text":"

                                  The event list is presented chronologically based on time. You can sort the events by Last Occurrend At and Type .

                                  By clicking on the \u2699\ufe0f icon on the right side, you can customize the displayed columns according to your preferences and needs.

                                  Additionally, you can click the refresh icon to update the current event list when needed.

                                  In the operation column on the right, clicking the icon allows you to view the history of a specific event.

                                  "},{"location":"en/admin/insight/infra/event.html#reference","title":"Reference","text":"

                                  For detailed meanings of the built-in Events in the system, refer to the Kubernetes API Event List.

                                  "},{"location":"en/admin/insight/infra/namespace.html","title":"Namespace Monitoring","text":"

                                  With namespaces as the dimension, you can quickly query resource consumption and trends within a namespace.

                                  "},{"location":"en/admin/insight/infra/namespace.html#prerequisites","title":"Prerequisites","text":"
                                  • Insight Agent is installed in the cluster and the applications are in the Running state.
                                  "},{"location":"en/admin/insight/infra/namespace.html#steps","title":"Steps","text":"
                                  1. Go to the Insight product module.

                                  2. Select Infrastructure -> Namespaces from the left navigation bar. On this page, you can view the following information:

                                    1. Switch Namespace: Switch between clusters or namespaces at the top.
                                    2. Resource Overview: Provides statistics on the number of normal and total workloads within the selected namespace.
                                    3. Incidents: Displays the number of alerts generated within the selected namespace.
                                    4. Events: Shows the number of Warning level events within the selected namespace in the past 24 hours.
                                    5. Resource Consumption: Provides the sum of CPU and memory usage for Pods within the selected namespace, along with the CPU and memory quota information.
                                  "},{"location":"en/admin/insight/infra/namespace.html#metric-explanations","title":"Metric Explanations","text":"Metric Name Description CPU Usage The sum of CPU usage for Pods within the selected namespace. Memory Usage The sum of memory usage for Pods within the selected namespace. Pod CPU Usage The CPU usage for each Pod within the selected namespace. Pod Memory Usage The memory usage for each Pod within the selected namespace."},{"location":"en/admin/insight/infra/node.html","title":"Node Monitoring","text":"

                                  Through node monitoring, you can get an overview of the current health status of the nodes in the selected cluster and the number of abnormal pod; on the current node details page, you can view the number of alerts and the trend of resource consumption such as CPU, memory, and disk.

                                  "},{"location":"en/admin/insight/infra/node.html#prerequisites","title":"Prerequisites","text":"
                                  • The cluster has insight-agent installed and the application is in running state.
                                  "},{"location":"en/admin/insight/infra/node.html#steps","title":"Steps","text":"
                                  1. Go to the Insight product module.

                                  2. Select Infrastructure -> Nodes from the left navigation bar. On this page, you can view the following information:

                                    • Cluster: Uses the dropdown at the top to switch between clusters.
                                    • Nodes: Shows a list of nodes within the selected cluster. Click a specific node to view detailed information.
                                    • Alert: Displays the number of alerts generated in the current cluster.
                                    • Resource Consumption: Shows the actual usage and total capacity of CPU, memory, and disk for the selected node.
                                    • Metric Explanations: Describes the trends in CPU, memory, disk I/O, and network traffic for the selected node.

                                  3. Click Resource Level Monitor, you can view more metrics of the current cluster.

                                  "},{"location":"en/admin/insight/infra/probe.html","title":"Probe","text":"

                                  Probe refers to the use of black-box monitoring to regularly test the connectivity of targets through HTTP, TCP, and other methods, enabling quick detection of ongoing faults.

                                  Insight uses the Prometheus Blackbox Exporter tool to probe the network using protocols such as HTTP, HTTPS, DNS, TCP, and ICMP, and returns the probe results to understand the network status.

                                  "},{"location":"en/admin/insight/infra/probe.html#prerequisites","title":"Prerequisites","text":"

                                  The insight-agent has been successfully deployed in the target cluster and is in the Running state.

                                  "},{"location":"en/admin/insight/infra/probe.html#view-probes","title":"View Probes","text":"
                                  1. Go to the Insight product module.
                                  2. Select Infrastructure -> Probes in the left navigation bar.

                                    • Click the cluster or namespace dropdown in the table to switch between clusters and namespaces.
                                    • The list displays the name, probe method, probe target, connectivity status, and creation time of the probes by default.
                                    • The connectivity status can be:
                                      • Normal: The probe successfully connects to the target, and the target returns the expected response.
                                      • Abnormal: The probe fails to connect to the target, or the target does not return the expected response.
                                      • Pending: The probe is attempting to connect to the target.
                                    • Supports fuzzy search of probe names.
                                  "},{"location":"en/admin/insight/infra/probe.html#create-a-probe","title":"Create a Probe","text":"
                                  1. Click Create Probe .
                                  2. Fill in the basic information and click Next .

                                    • Name: The name can only contain lowercase letters, numbers, and hyphens (-), and must start and end with a lowercase letter or number, with a maximum length of 63 characters.
                                    • Cluster: Select the cluster for the probe task.
                                    • Namespace: The namespace where the probe task is located.
                                  3. Configure the probe parameters.

                                    • Blackbox Instance: Select the blackbox instance responsible for the probe.
                                    • Probe Method:
                                      • HTTP: Sends HTTP or HTTPS requests to the target URL to check its connectivity and response time. This can be used to monitor the availability and performance of websites or web applications.
                                      • TCP: Establishes a TCP connection to the target host and port to check its connectivity and response time. This can be used to monitor TCP-based services such as web servers and database servers.
                                      • Other: Supports custom probe methods by configuring ConfigMap. For more information, refer to: Custom Probe Methods
                                    • Probe Target: The target address of the probe, supports domain names or IP addresses.
                                    • Labels: Custom labels that will be automatically added to Prometheus' labels.
                                    • Probe Interval: The interval between probes.
                                    • Probe Timeout: The maximum waiting time when probing the target.
                                  4. After configuring, click OK to complete the creation.

                                  Warning

                                  After the probe task is created, it takes about 3 minutes to synchronize the configuration. During this period, no probes will be performed, and probe results cannot be viewed.

                                  "},{"location":"en/admin/insight/infra/probe.html#view-monitoring-dashboards","title":"View Monitoring Dashboards","text":"

                                  Click \u2507 in the operations column and click View Monitoring Dashboard .

                                  Metric Name Description Current Status Response Represents the response status code of the HTTP probe request. Ping Status Indicates whether the probe request was successful. 1 indicates a successful probe request, and 0 indicates a failed probe request. IP Protocol Indicates the IP protocol version used in the probe request. SSL Expiry Represents the earliest expiration time of the SSL/TLS certificate. DNS Response (Latency) Represents the duration of the entire probe process in seconds. HTTP Duration Represents the duration of the entire process from sending the request to receiving the complete response."},{"location":"en/admin/insight/infra/probe.html#edit-a-probe","title":"Edit a Probe","text":"

                                  Click \u2507 in the operations column and click Edit .

                                  "},{"location":"en/admin/insight/infra/probe.html#delete-a-probe","title":"Delete a Probe","text":"

                                  Click \u2507 in the operations column and click Delete .

                                  "},{"location":"en/admin/insight/quickstart/install/index.html","title":"Start Observing","text":"

                                  AI platform platform enables the management and creation of multicloud and multiple clusters. Building upon this capability, Insight serves as a unified observability solution for multiple clusters. It collects observability data from multiple clusters by deploying the insight-agent plugin and allows querying of metrics, logs, and trace data through the AI platform Insight.

                                  insight-agent is a tool that facilitates the collection of observability data from multiple clusters. Once installed, it automatically collects metrics, logs, and trace data without any modifications.

                                  Clusters created through Container Management come pre-installed with insight-agent. Hence, this guide specifically provides instructions on enabling observability for integrated clusters.

                                  • Install insight-agent online

                                  As a unified observability platform for multiple clusters, Insight's resource consumption of certain components is closely related to the data of cluster creation and the number of integrated clusters. When installing insight-agent, it is necessary to adjust the resources of the proper components based on the cluster size.

                                  1. Adjust the CPU and memory resources of the Prometheus collection component in insight-agent according to the size of the cluster created or integrated. Please refer to Prometheus resource planning.

                                  2. As the metric data from multiple clusters is stored centrally, AI platform platform administrators need to adjust the disk space of vmstorage based on the cluster size. Please refer to vmstorage disk capacity planning.

                                  3. For instructions on adjusting the disk space of vmstorage, please refer to Expanding vmstorage disk.

                                  Since AI platform supports the management of multicloud and multiple clusters, insight-agent has undergone partial verification. However, there are known conflicts with monitoring components when installing insight-agent in Suanova 4.0 clusters and Openshift 4.x clusters. If you encounter similar issues, please refer to the following documents:

                                  • Install insight-agent in Suanova 4.0.x
                                  • Install insight-agent in Openshift 4.x

                                  Currently, the insight-agent collection component has undergone functional testing for popular versions of Kubernetes. Please refer to:

                                  • Kubernetes cluster compatibility testing
                                  • Openshift 4.x cluster compatibility testing
                                  • Rancher cluster compatibility testing
                                  "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html","title":"Enable Big Log and Big Trace Modes","text":"

                                  The Insight Module supports switching log to Big Log mode and trace to Big Trace mode, in order to enhance data writing capabilities in large-scale environments. This page introduces following methods for enabling these modes:

                                  • Enable or upgrade to Big Log and Big Trace modes through the installer (controlled by the same parameter value in manifest.yaml)
                                  • Manually enable Big Log and Big Trace modes through Helm commands
                                  "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#logs","title":"Logs","text":"

                                  This section explains the differences between the normal log mode and the Big Log mode.

                                  "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#log-mode","title":"Log Mode","text":"

                                  Components: Fluentbit + Elasticsearch

                                  This mode is referred to as the ES mode, and the data flow diagram is shown below:

                                  "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#big-log-mode","title":"Big Log Mode","text":"

                                  Components: Fluentbit + Kafka + Vector + Elasticsearch

                                  This mode is referred to as the Kafka mode, and the data flow diagram is shown below:

                                  "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#traces","title":"Traces","text":"

                                  This section explains the differences between the normal trace mode and the Big Trace mode.

                                  "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#trace-mode","title":"Trace Mode","text":"

                                  Components: Agent opentelemetry-collector + Global opentelemetry-collector + Jaeger-collector + Elasticsearch

                                  This mode is referred to as the OTlp mode, and the data flow diagram is shown below:

                                  "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#big-trace-mode","title":"Big Trace Mode","text":"

                                  Components: Agent opentelemetry-collector + Kafka + Global opentelemetry-collector + Jaeger-collector + Elasticsearch

                                  This mode is referred to as the Kafka mode, and the data flow diagram is shown below:

                                  "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#enabling-via-installer","title":"Enabling via Installer","text":"

                                  When deploying/upgrading AI platform using the installer, the manifest.yaml file includes the infrastructures.kafka field. To enable observable Big Log and Big Trace modes, Kafka must be activated:

                                  manifest.yaml
                                  apiVersion: manifest.daocloud.io/v1alpha1\nkind: SuanovaManifest\n...\ninfrastructures:\n  ...\n  kafka:\n    enable: true # Default is false\n    cpuLimit: 1\n    memLimit: 2Gi\n    pvcSize: 15Gi\n
                                  "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#enable","title":"Enable","text":"

                                  When using a manifest.yaml that enables kafka during installation, Kafka middleware will be installed by default, and Big Log and Big Trace modes will be enabled automatically. The installation command is:

                                  ./dce5-installer cluster-create -c clusterConfig.yaml -m manifest.yaml\n
                                  "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#upgrade","title":"Upgrade","text":"

                                  The upgrade also involves modifying the kafka field. However, note that since the old environment was installed with kafka: false, Kafka is not present in the environment. Therefore, you need to specify the upgrade for middleware to install Kafka middleware simultaneously. The upgrade command is:

                                  ./dce5-installer cluster-create -c clusterConfig.yaml -m manifest.yaml -u gproduct,middleware\n

                                  Note

                                  After the upgrade is complete, you need to manually restart the following components:

                                  • insight-agent-fluent-bit
                                  • insight-agent-opentelemetry-collector
                                  • insight-opentelemetry-collector
                                  "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#enabling-via-helm-commands","title":"Enabling via Helm Commands","text":"

                                  Prerequisites: Ensure that there is a usable Kafka and that the address is accessible.

                                  Use the following commands to retrieve the values of the old versions of Insight and insight-agent (it's recommended to back them up):

                                  helm get values insight -n insight-system -o yaml > insight.yaml\nhelm get values insight-agent -n insight-system -o yaml > insight-agent.yaml\n
                                  "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#enabling-big-log","title":"Enabling Big Log","text":"

                                  There are several ways to enable or upgrade to Big Log mode:

                                  Use --set in the helm upgrade commandModify YAML and run helm upgradeUpgrade via Container Management UI

                                  First, run the following Insight upgrade command, ensuring the Kafka brokers address is correct:

                                  helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --set global.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.kafka.enabled=true \\\n  --set vector.enabled=true \\\n  --version 0.30.1\n

                                  Then, run the following insight-agent upgrade command, ensuring the Kafka brokers address is correct:

                                  helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --set global.exporters.logging.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.exporters.logging.output=kafka \\\n  --version 0.30.1\n

                                  Follow these steps to modify the YAML and then run the helm upgrade command:

                                  1. Modify insight.yaml

                                    insight.yaml
                                    global:\n  ...\n  kafka:\n    brokers: 10.6.216.111:30592\n    enabled: true\n...\nvector:\n  enabled: true\n
                                  2. Upgrade the Insight component:

                                    helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --version 0.30.1\n
                                  3. Modify insight-agent.yaml

                                    insight-agent.yaml
                                    global:\n  ...\n  exporters:\n    ...\n    logging:\n      ...\n      kafka:\n        brokers: 10.6.216.111:30592\n      output: kafka\n
                                  4. Upgrade the insight-agent:

                                    helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --version 0.30.1\n

                                  In the Container Management module, find the cluster, select Helm Apps from the left navigation bar, and find and update the insight-agent.

                                  In Logging Settings, select kafka for output and fill in the correct brokers address.

                                  Note that after the upgrade is complete, you need to manually restart the insight-agent-fluent-bit component.

                                  "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#enabling-big-trace","title":"Enabling Big Trace","text":"

                                  There are several ways to enable or upgrade to Big Trace mode:

                                  Using --set in the helm upgrade commandModify YAML and run helm upgradeUpgrade via Container Management UI

                                  First, run the following Insight upgrade command, ensuring the Kafka brokers address is correct:

                                  helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --set global.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.kafka.enabled=true \\\n  --set global.tracing.kafkaReceiver.enabled=true \\\n  --version 0.30.1\n

                                  Then, run the following insight-agent upgrade command, ensuring the Kafka brokers address is correct:

                                  helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --set global.exporters.trace.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.exporters.trace.output=kafka \\\n  --version 0.30.1\n

                                  Follow these steps to modify the YAML and then run the helm upgrade command:

                                  1. Modify insight.yaml

                                    insight.yaml
                                    global:\n  ...\n  kafka:\n    brokers: 10.6.216.111:30592\n    enabled: true\n...\ntracing:\n  ...\n  kafkaReceiver:\n    enabled: true\n
                                  2. Upgrade the Insight component:

                                    helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --version 0.30.1\n
                                  3. Modify insight-agent.yaml

                                    insight-agent.yaml
                                    global:\n  ...\n  exporters:\n    ...\n    trace:\n      ...\n      kafka:\n        brokers: 10.6.216.111:30592\n      output: kafka\n
                                  4. Upgrade the insight-agent:

                                    helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --version 0.30.1\n

                                  In the Container Management module, find the cluster, select Helm Apps from the left navigation bar, and find and update the insight-agent.

                                  In Trace Settings, select kafka for output and fill in the correct brokers address.

                                  Note that after the upgrade is complete, you need to manually restart the insight-agent-opentelemetry-collector and insight-opentelemetry-collector components.

                                  "},{"location":"en/admin/insight/quickstart/install/component-scheduling.html","title":"Custom Insight Component Scheduling Policy","text":"

                                  When deploying Insight to a Kubernetes environment, proper resource management and optimization are crucial. Insight includes several core components such as Prometheus, OpenTelemetry, FluentBit, Vector, and Elasticsearch. These components, during their operation, may negatively impact the performance of other pods within the cluster due to resource consumption issues. To effectively manage resources and optimize cluster operations, node affinity becomes an important option.

                                  This page is about how to add taints and node affinity to ensure that each component runs on the appropriate nodes, avoiding resource competition or contention, thereby guranttee the stability and efficiency of the entire Kubernetes cluster.

                                  "},{"location":"en/admin/insight/quickstart/install/component-scheduling.html#configure-dedicated-nodes-for-insight-using-taints","title":"Configure dedicated nodes for Insight using taints","text":"

                                  Since the Insight Agent includes DaemonSet components, the configuration method described in this section is to have all components except the Insight DaemonSet run on dedicated nodes.

                                  This is achieved by adding taints to the dedicated nodes and using tolerations to match them. More details can be found in the Kubernetes official documentation.

                                  You can refer to the following commands to add and remove taints on nodes:

                                  # Add taint\nkubectl taint nodes worker1 node.daocloud.io=insight-only:NoSchedule\n\n# Remove taint\nkubectl taint nodes worker1 node.daocloud.io:NoSchedule-\n

                                  There are two ways to schedule Insight components to dedicated nodes:

                                  "},{"location":"en/admin/insight/quickstart/install/component-scheduling.html#1-add-tolerations-for-each-component","title":"1. Add tolerations for each component","text":"

                                  Configure the tolerations for the insight-server and insight-agent Charts respectively:

                                  insight-server Chartinsight-agent Chart
                                  server:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nui:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nrunbook:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\n# mysql:\nvictoria-metrics-k8s-stack:\n  victoria-metrics-operator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  vmcluster:\n    spec:\n      vmstorage:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n      vmselect:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n      vminsert:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n  vmalert:\n    spec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n  alertmanager:\n    spec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n\njaeger:\n  collector:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  query:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n\nopentelemetry-collector-aggregator:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nopentelemetry-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\ngrafana-operator:\n  operator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  grafana:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\nkibana:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nelastic-alert:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nvector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n
                                  kube-prometheus-stack:\n  prometheus:\n    prometheusSpec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n  prometheus-node-exporter:\n    tolerations:\n      - effect: NoSchedule\n        operator: Exists\n  prometheusOperator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n\nkube-state-metrics:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-operator:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\ntailing-sidecar-operator:\n  operator:\n    tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-kubernetes-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nprometheus-blackbox-exporter:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\netcd-exporter:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\" \n
                                  "},{"location":"en/admin/insight/quickstart/install/component-scheduling.html#2-configure-at-the-namespace-level","title":"2. Configure at the namespace level","text":"

                                  Allow pods in the insight-system namespace to tolerate the node.daocloud.io=insight-only taint.

                                  1. Adjust the apiserver configuration file /etc/kubernetes/manifests/kube-apiserver.yaml to include PodTolerationRestriction,PodNodeSelector. See the following picture:

                                  2. Add an annotation to the insight-system namespace:

                                    apiVersion: v1\nkind: Namespace\nmetadata:\n  name: insight-system\n  annotations:\n    scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Equal\", \"effect\": \"NoSchedule\", \"key\": \"node.daocloud.io\", \"value\": \"insight-only\"}]'\n

                                  Restart the components under the insight-system namespace to allow normal scheduling of pods under the insight-system.

                                  "},{"location":"en/admin/insight/quickstart/install/component-scheduling.html#use-node-labels-and-node-affinity-to-manage-component-scheduling","title":"Use node labels and node affinity to manage component scheduling","text":"

                                  Info

                                  Node affinity is conceptually similar to nodeSelector, allowing you to constrain which nodes a pod can be scheduled on based on labels on the nodes. There are two types of node affinity:

                                  1. requiredDuringSchedulingIgnoredDuringExecution: The scheduler will only schedule the pod if the rules are met. This feature is similar to nodeSelector but has more expressive syntax.
                                  2. preferredDuringSchedulingIgnoredDuringExecution: The scheduler will try to find nodes that meet the rules. If no matching nodes are found, the scheduler will still schedule the Pod.

                                  For more details, please refer to the Kubernetes official documentation.

                                  To meet different user needs for scheduling Insight components, Insight provides fine-grained labels for different components' scheduling policies. Below is a description of the labels and their associated components:

                                  Label Key Label Value Description node.daocloud.io/insight-any Any value, recommended to use true Represents that all Insight components prefer nodes with this label node.daocloud.io/insight-prometheus Any value, recommended to use true Specifically for Prometheus components node.daocloud.io/insight-vmstorage Any value, recommended to use true Specifically for VictoriaMetrics vmstorage components node.daocloud.io/insight-vector Any value, recommended to use true Specifically for Vector components node.daocloud.io/insight-otel-col Any value, recommended to use true Specifically for OpenTelemetry components

                                  You can refer to the following commands to add and remove labels on nodes:

                                  # Add label to node8, prioritizing scheduling insight-prometheus to node8 \nkubectl label nodes node8 node.daocloud.io/insight-prometheus=true\n\n# Remove the node.daocloud.io/insight-prometheus label from node8\nkubectl label nodes node8 node.daocloud.io/insight-prometheus-\n

                                  Below is the default affinity preference for the insight-prometheus component during deployment:

                                  affinity:\n  nodeAffinity:\n    preferredDuringSchedulingIgnoredDuringExecution:\n    - preference:\n        matchExpressions:\n        - key: node-role.kubernetes.io/control-plane\n          operator: DoesNotExist\n      weight: 1\n    - preference:\n        matchExpressions:\n        - key: node.daocloud.io/insight-prometheus # (1)!\n          operator: Exists\n      weight: 2\n    - preference:\n        matchExpressions:\n        - key: node.daocloud.io/insight-any\n          operator: Exists\n      weight: 3\n    podAntiAffinity:\n      preferredDuringSchedulingIgnoredDuringExecution:\n        - weight: 1\n          podAffinityTerm:\n            topologyKey: kubernetes.io/hostname\n            labelSelector:\n              matchExpressions:\n                - key: app.kubernetes.io/instance\n                  operator: In\n                  values:\n                    - insight-agent-kube-prometh-prometheus\n
                                  1. Prioritize scheduling insight-prometheus to nodes with the node.daocloud.io/insight-prometheus label
                                  "},{"location":"en/admin/insight/quickstart/install/gethosturl.html","title":"Get Data Storage Address of Global Service Cluster","text":"

                                  Insight is a product for unified observation of multiple clusters. To achieve unified storage and querying of observation data from multiple clusters, sub-clusters need to report the collected observation data to the global service cluster for unified storage. This document provides the required address of the storage component when installing the collection component insight-agent.

                                  "},{"location":"en/admin/insight/quickstart/install/gethosturl.html#install-insight-agent-in-global-service-cluster","title":"Install insight-agent in Global Service Cluster","text":"

                                  If installing insight-agent in the global service cluster, it is recommended to access the cluster via domain name:

                                  export vminsert_host=\"vminsert-insight-victoria-metrics-k8s-stack.insight-system.svc.cluster.local\" # (1)!\nexport es_host=\"insight-es-master.insight-system.svc.cluster.local\" # (2)!\nexport otel_col_host=\"insight-opentelemetry-collector.insight-system.svc.cluster.local\" # (3)!\n
                                  "},{"location":"en/admin/insight/quickstart/install/gethosturl.html#install-insight-agent-in-other-clusters","title":"Install insight-agent in Other Clusters","text":""},{"location":"en/admin/insight/quickstart/install/gethosturl.html#get-address-via-interface-provided-by-insight-server","title":"Get Address via Interface Provided by Insight Server","text":"
                                  1. The management cluster uses the default LoadBalancer mode for exposure.

                                    Log in to the console of the global service cluster and run the following command:

                                    export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam'\n

                                    Note

                                    Please replace the ${INSIGHT_SERVER_IP} parameter in the command.

                                    You will get the following response:

                                    {\n  \"values\": {\n    \"global\": {\n      \"exporters\": {\n        \"logging\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"metric\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"auditLog\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"trace\": {\n          \"host\": \"10.6.182.32\"\n        }\n      }\n    },\n    \"opentelemetry-operator\": {\n      \"enabled\": true\n    },\n    \"opentelemetry-collector\": {\n      \"enabled\": true\n    }\n  }\n}\n
                                    • global.exporters.logging.host is the log service address, no need to set the proper service port, the default value will be used.
                                    • global.exporters.metric.host is the metrics service address.
                                    • global.exporters.trace.host is the trace service address.
                                    • global.exporters.auditLog.host is the audit log service address (same service as trace but different port).
                                  2. Management cluster disables LoadBalancer

                                    When calling the interface, you need to additionally pass an externally accessible node IP from the cluster, which will be used to construct the complete access address of the proper service.

                                    export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam' --data '{\"extra\": {\"EXPORTER_EXTERNAL_IP\": \"10.5.14.51\"}}'\n

                                    You will get the following response:

                                    {\n  \"values\": {\n    \"global\": {\n      \"exporters\": {\n        \"logging\": {\n          \"scheme\": \"https\",\n          \"host\": \"10.5.14.51\",\n          \"port\": 32007,\n          \"user\": \"elastic\",\n          \"password\": \"j8V1oVoM1184HvQ1F3C8Pom2\"\n        },\n        \"metric\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30683\n        },\n        \"auditLog\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30884\n        },\n        \"trace\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30274\n        }\n      }\n    },\n    \"opentelemetry-operator\": {\n      \"enabled\": true\n    },\n    \"opentelemetry-collector\": {\n      \"enabled\": true\n    }\n  }\n}\n
                                    • global.exporters.logging.host is the log service address.
                                    • global.exporters.logging.port is the NodePort exposed by the log service.
                                    • global.exporters.metric.host is the metrics service address.
                                    • global.exporters.metric.port is the NodePort exposed by the metrics service.
                                    • global.exporters.trace.host is the trace service address.
                                    • global.exporters.trace.port is the NodePort exposed by the trace service.
                                    • global.exporters.auditLog.host is the audit log service address (same service as trace but different port).
                                    • global.exporters.auditLog.port is the NodePort exposed by the audit log service.
                                  "},{"location":"en/admin/insight/quickstart/install/gethosturl.html#connect-via-loadbalancer","title":"Connect via LoadBalancer","text":"
                                  1. If LoadBalancer is enabled in the cluster and a VIP is set for Insight, you can manually execute the following command to obtain the address information for vminsert and opentelemetry-collector:

                                    $ kubectl get service -n insight-system | grep lb\nlb-insight-opentelemetry-collector               LoadBalancer   10.233.23.12    <pending>     4317:31286/TCP,8006:31351/TCP  24d\nlb-vminsert-insight-victoria-metrics-k8s-stack   LoadBalancer   10.233.63.67    <pending>     8480:31629/TCP                 24d\n
                                    • lb-vminsert-insight-victoria-metrics-k8s-stack is the address for the metrics service.
                                    • lb-insight-opentelemetry-collector is the address for the tracing service.
                                  2. Execute the following command to obtain the address information for elasticsearch:

                                    $ kubectl get service -n mcamel-system | grep es\nmcamel-common-es-cluster-masters-es-http               NodePort    10.233.16.120   <none>        9200:30465/TCP               47d\n

                                    mcamel-common-es-cluster-masters-es-http is the address for the logging service.

                                  "},{"location":"en/admin/insight/quickstart/install/gethosturl.html#connect-via-nodeport","title":"Connect via NodePort","text":"

                                  The LoadBalancer feature is disabled in the global service cluster.

                                  In this case, the LoadBalancer resources mentioned above will not be created by default. The relevant service names are:

                                  • vminsert-insight-victoria-metrics-k8s-stack (metrics service)
                                  • common-es (logging service)
                                  • insight-opentelemetry-collector (tracing service)

                                  After obtaining the proper port information for the services in the above two scenarios, make the following settings:

                                  --set global.exporters.logging.host=  # (1)!\n--set global.exporters.logging.port=  # (2)!\n--set global.exporters.metric.host=   # (3)!\n--set global.exporters.metric.port=   # (4)!\n--set global.exporters.trace.host=    # (5)!\n--set global.exporters.trace.port=    # (6)!\n--set global.exporters.auditLog.host= # (7)!\n
                                  1. NodeIP of the externally accessible management cluster
                                  2. NodePort of the logging service port 9200
                                  3. NodeIP of the externally accessible management cluster
                                  4. NodePort of the metrics service port 8480
                                  5. NodeIP of the externally accessible management cluster
                                  6. NodePort of the tracing service port 4317
                                  7. NodeIP of the externally accessible management cluster
                                  "},{"location":"en/admin/insight/quickstart/install/install-agent.html","title":"Install insight-agent","text":"

                                  insight-agent is a plugin for collecting insight data, supporting unified observation of metrics, links, and log data. This article describes how to install insight-agent in an online environment for the accessed cluster.

                                  "},{"location":"en/admin/insight/quickstart/install/install-agent.html#prerequisites","title":"Prerequisites","text":"

                                  Please confirm that your cluster has successfully connected to the container management platform. You can refer to Integrate Clusters for details.

                                  "},{"location":"en/admin/insight/quickstart/install/install-agent.html#steps","title":"Steps","text":"
                                  1. Enter Container Management from the left navigation bar, and enter Clusters . Find the cluster where you want to install insight-agent.

                                  2. Choose Install now to jump, or click the cluster and click Helm Apps -> Helm Templates in the left navigation bar, search for insight-agent in the search box, and click it for details.

                                  3. Select the appropriate version and click Install .

                                  4. Fill in the name, select the namespace and version, and fill in the addresses of logging, metric, audit, and trace reporting data in the yaml file. The system has filled in the address of the component for data reporting by default, please check it before clicking OK to install.

                                    If you need to modify the data reporting address, please refer to Get Data Reporting Address.

                                  5. The system will automatically return to Helm Apps . When the application status changes from Unknown to Deployed , it means that insight-agent is installed successfully.

                                    Note

                                    • Click \u2507 on the far right, and you can perform more operations such as Update , View YAML and Delete in the pop-up menu.
                                    • For a practical installation demo, watch Video demo of installing insight-agent
                                  "},{"location":"en/admin/insight/quickstart/install/knownissues.html","title":"Known Issues","text":"

                                  This page lists some issues related to the installation and uninstallation of Insight Agent and their workarounds.

                                  "},{"location":"en/admin/insight/quickstart/install/knownissues.html#uninstallation-failure-of-insight-agent","title":"Uninstallation Failure of Insight Agent","text":"

                                  When you run the following command to uninstall Insight Agent,

                                  helm uninstall insight agent\n

                                  The tls secret used by otel-operator is failed to uninstall.

                                  Due to the logic of \"reusing tls secret\" in the following code of otel-operator, it checks whether MutationConfiguration exists and reuses the CA cert bound in MutationConfiguration. However, since helm uninstall has uninstalled MutationConfiguration, it results in a null value.

                                  Therefore, please manually delete the proper secret using one of the following methods:

                                  • Delete via command line: Log in to the console of the target cluster and run the following command:

                                    kubectl -n insight-system delete secret insight-agent-opentelemetry-operator-controller-manager-service-cert\n
                                  • Delete via UI: Log in to AI platform container management, select the target cluster, select Secret from the left menu, input insight-agent-opentelemetry-operator-controller-manager-service-cert, then select Delete.

                                  "},{"location":"en/admin/insight/quickstart/install/knownissues.html#insight-agent_1","title":"Insight Agent","text":""},{"location":"en/admin/insight/quickstart/install/knownissues.html#log-collection-endpoint-not-updated-when-upgrading-insight-agent","title":"Log Collection Endpoint Not Updated When Upgrading Insight Agent","text":"

                                  When updating the log configuration of the insight-agent from Elasticsearch to Kafka or from Kafka to Elasticsearch, the changes do not take effect and the agent continues to use the previous configuration.

                                  Solution :

                                  Manually restart Fluent Bit in the cluster.

                                  "},{"location":"en/admin/insight/quickstart/install/knownissues.html#podmonitor-collects-multiple-sets-of-jvm-metrics","title":"PodMonitor Collects Multiple Sets of JVM Metrics","text":"
                                  1. In this version, there is a defect in PodMonitor/insight-kubernetes-pod: it will incorrectly create Jobs to collect metrics for all containers in Pods that are marked with insight.opentelemetry.io/metric-scrape=true, instead of only the containers proper to insight.opentelemetry.io/metric-port.

                                  2. After PodMonitor is declared, PrometheusOperator will pre-configure some service discovery configurations. Considering the compatibility of CRDs, it is abandoned to configure the collection tasks through annotations.

                                  3. Use the additional scrape config mechanism provided by Prometheus to configure the service discovery rules in a secret and introduce them into Prometheus.

                                  Therefore:

                                  1. Delete the current PodMonitor for insight-kubernetes-pod
                                  2. Use a new rule

                                  In the new rule, action: keepequal is used to compare the consistency between source_labels and target_label to determine whether to create collection tasks for the ports of a container. Note that this feature is only available in Prometheus v2.41.0 (2022-12-20) and higher.

                                  +    - source_labels: [__meta_kubernetes_pod_annotation_insight_opentelemetry_io_metric_port]\n+      separator: ;\n+      target_label: __meta_kubernetes_pod_container_port_number\n+      action: keepequal\n
                                  "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html","title":"Upgrade Notes","text":"

                                  This page provides some considerations for upgrading insight-server and insight-agent.

                                  "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v028x-or-lower-to-v029x","title":"Upgrade from v0.28.x (or lower) to v0.29.x","text":"

                                  Due to the upgrade of the Opentelemetry community operator chart version in v0.29.0, the supported values for featureGates in the values file have changed. Therefore, before upgrading, you need to set the value of featureGates to empty, as follows:

                                  -  --set opentelemetry-operator.manager.featureGates=\"+operator.autoinstrumentation.go,+operator.autoinstrumentation.multi-instrumentation,+operator.autoinstrumentation.nginx\" \\\n+  --set opentelemetry-operator.manager.featureGates=\"\"\n
                                  "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v026x-or-lower-to-v027x-or-higher","title":"Upgrade from v0.26.x (or lower) to v0.27.x or higher","text":"

                                  In v0.27.x, the switch for the vector component has been separated. If the existing environment has vector enabled, you need to specify --set vector.enabled=true when upgrading the insight-server.

                                  "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v019x-or-lower-to-020x","title":"Upgrade from v0.19.x (or lower) to 0.20.x","text":"

                                  Before upgrading Insight , you need to manually delete the jaeger-collector and jaeger-query deployments by running the following command:

                                  kubectl -n insight-system delete deployment insight-jaeger-collector\nkubectl -n insight-system delete deployment insight-jaeger-query\n
                                  "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v017x-or-lower-to-v018x","title":"Upgrade from v0.17.x (or lower) to v0.18.x","text":"

                                  In v0.18.x, there have been updates to the Jaeger-related deployment files, so you need to manually run the following commands before upgrading insight-server:

                                  kubectl -n insight-system delete deployment insight-jaeger-collector\nkubectl -n insight-system delete deployment insight-jaeger-query\n

                                  There have been changes to metric names in v0.18.x, so after upgrading insight-server, insight-agent should also be upgraded.

                                  In addition, the parameters for enabling the tracing module and adjusting the ElasticSearch connection have been modified. Refer to the following parameters:

                                  +  --set global.tracing.enable=true \\\n-  --set jaeger.collector.enabled=true \\\n-  --set jaeger.query.enabled=true \\\n+  --set global.elasticsearch.scheme=${your-external-elasticsearch-scheme} \\\n+  --set global.elasticsearch.host=${your-external-elasticsearch-host} \\\n+  --set global.elasticsearch.port=${your-external-elasticsearch-port} \\\n+  --set global.elasticsearch.user=${your-external-elasticsearch-username} \\\n+  --set global.elasticsearch.password=${your-external-elasticsearch-password} \\\n-  --set jaeger.storage.elasticsearch.scheme=${your-external-elasticsearch-scheme} \\\n-  --set jaeger.storage.elasticsearch.host=${your-external-elasticsearch-host} \\\n-  --set jaeger.storage.elasticsearch.port=${your-external-elasticsearch-port} \\\n-  --set jaeger.storage.elasticsearch.user=${your-external-elasticsearch-username} \\\n-  --set jaeger.storage.elasticsearch.password=${your-external-elasticsearch-password} \\\n
                                  "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v015x-or-lower-to-v016x","title":"Upgrade from v0.15.x (or lower) to v0.16.x","text":"

                                  In v0.16.x, a new feature parameter disableRouteContinueEnforce in the vmalertmanagers CRD is used. Therefore, you need to manually run the following command before upgrading insight-server:

                                  kubectl apply --server-side -f https://raw.githubusercontent.com/VictoriaMetrics/operator/v0.33.0/config/crd/bases/operator.victoriametrics.com_vmalertmanagers.yaml --force-conflicts\n

                                  Note

                                  If you are performing an offline installation, after extracting the insight offline package, please run the following command to update CRDs.

                                  kubectl apply --server-side -f insight/dependency-crds --force-conflicts \n
                                  "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v023x-or-lower-to-v024x","title":"Upgrade from v0.23.x (or lower) to v0.24.x","text":"

                                  In v0.24.x, CRDs have been added to the OTEL operator chart. However, helm upgrade does not update CRDs, so you need to manually run the following command:

                                  kubectl apply -f https://raw.githubusercontent.com/open-telemetry/opentelemetry-helm-charts/main/charts/opentelemetry-operator/crds/crd-opentelemetry.io_opampbridges.yaml\n

                                  If you are performing an offline installation, you can find the above CRD yaml file after extracting the insight-agent offline package. After extracting the insight-agent Chart, manually run the following command:

                                  kubectl apply -f charts/agent/crds/crd-opentelemetry.io_opampbridges.yaml\n
                                  "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v019x-or-lower-to-v020x","title":"Upgrade from v0.19.x (or lower) to v0.20.x","text":"

                                  In v0.20.x, Kafka log export configuration has been added, and there have been some adjustments to the log export configuration. Before upgrading insight-agent , please note the parameter changes. The previous logging configuration has been moved to the logging.elasticsearch configuration:

                                  -  --set global.exporters.logging.host \\\n-  --set global.exporters.logging.port \\\n+  --set global.exporters.logging.elasticsearch.host \\\n+  --set global.exporters.logging.elasticsearch.port \\\n
                                  "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v017x-or-lower-to-v018x_1","title":"Upgrade from v0.17.x (or lower) to v0.18.x","text":"

                                  Due to the updated deployment files for Jaeger In v0.18.x, it is important to note the changes in parameters before upgrading the insight-agent.

                                  +  --set global.exporters.trace.enable=true \\\n-  --set opentelemetry-collector.enabled=true \\\n-  --set opentelemetry-operator.enabled=true \\\n
                                  "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v016x-or-lower-to-v017x","title":"Upgrade from v0.16.x (or lower) to v0.17.x","text":"

                                  In v0.17.x, the kube-prometheus-stack chart version was upgraded from 41.9.1 to 45.28.1, and there were also some field upgrades in the CRD used, such as the attachMetadata field of servicemonitor. Therefore, the following command needs to be rund before upgrading the insight-agent:

                                  kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --force-conflicts\n

                                  If you are performing an offline installation, you can find the yaml for the above CRD in insight-agent/dependency-crds after extracting the insight-agent offline package.

                                  "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v011x-or-earlier-to-v012x","title":"Upgrade from v0.11.x (or earlier) to v0.12.x","text":"

                                  v0.12.x upgrades kube-prometheus-stack chart from 39.6.0 to 41.9.1, including prometheus-operator to v0.60.1, prometheus-node-exporter chart to v4.3.0. Prometheus-node-exporter uses Kubernetes recommended label after upgrading, so you need to delete node-exporter daemonset. prometheus-operator has updated the CRD, so you need to run the following command before upgrading the insight-agent:

                                  kubectl delete daemonset insight-agent-prometheus-node-exporter -n insight-system\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml --force- conflicts\n

                                  Note

                                  If you are installing offline, you can run the following command to update the CRD after decompressing the insight-agent offline package.

                                  kubectl apply --server-side -f insight-agent/dependency-crds --force-conflicts\n
                                  "},{"location":"en/admin/insight/quickstart/otel/operator.html","title":"Enhance Applications Non-Intrusively with Operators","text":"

                                  Currently, only Java, Node.js, Python, .NET, and Golang support non-intrusive integration through the Operator approach.

                                  "},{"location":"en/admin/insight/quickstart/otel/operator.html#prerequisites","title":"Prerequisites","text":"

                                  Please ensure that the insight-agent is ready. If not, please refer to Install insight-agent for data collection and make sure the following three items are ready:

                                  • Enable trace functionality for insight-agent
                                  • Check if the address and port for trace data are correctly filled
                                  • Ensure that the Pods proper to deployment/insight-agent-opentelemetry-operator and deployment/insight-agent-opentelemetry-collector are ready
                                  "},{"location":"en/admin/insight/quickstart/otel/operator.html#install-instrumentation-cr","title":"Install Instrumentation CR","text":"

                                  Tip

                                  Starting from Insight v0.22.0, there is no longer a need to manually install the Instrumentation CR.

                                  Install it in the insight-system namespace. There are some minor differences between different versions.

                                  Insight v0.21.xInsight v0.20.xInsight v0.18.xInsight v0.17.xInsight v0.16.x
                                  K8S_CLUSTER_UID=$(kubectl get namespace kube-system -o jsonpath='{.metadata.uid}')\nkubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/openinsight-proj/autoinstrumentation-java:1.31.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n      - name: OTEL_K8S_CLUSTER_UID\n        value: $K8S_CLUSTER_UID\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                                  kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.29.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0-rc.2\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                                  kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.25.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.37.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.38b0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.1-alpha\nEOF\n
                                  kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.23.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.34.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.33b0\nEOF\n
                                  kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.23.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.34.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.33b0\nEOF\n
                                  "},{"location":"en/admin/insight/quickstart/otel/operator.html#works-with-the-service-mesh-product-mspider","title":"Works with the Service Mesh Product (Mspider)","text":"

                                  If you enable the tracing capability of the Mspider(Service Mesh), you need to add an additional environment variable injection configuration:

                                  "},{"location":"en/admin/insight/quickstart/otel/operator.html#the-operation-steps-are-as-follows","title":"The operation steps are as follows","text":"
                                  1. Log in to AI platform, then enter Container Management and select the target cluster.
                                  2. Click CRDs in the left navigation bar, find instrumentations.opentelemetry.io, and enter the details page.
                                  3. Select the insight-system namespace, then edit insight-opentelemetry-autoinstrumentation, and add the following content under spec:env::

                                        - name: OTEL_SERVICE_NAME\n      valueFrom:\n        fieldRef:\n          fieldPath: metadata.labels['app'] \n

                                    The complete example (for Insight v0.21.x) is as follows:

                                    K8S_CLUSTER_UID=$(kubectl get namespace kube-system -o jsonpath='{.metadata.uid}')\nkubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n    - name: OTEL_SERVICE_NAME\n      valueFrom:\n        fieldRef:\n          fieldPath: metadata.labels['app'] \n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/openinsight-proj/autoinstrumentation-java:1.31.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n      - name: OTEL_K8S_CLUSTER_UID\n        value: $K8S_CLUSTER_UID\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                                  "},{"location":"en/admin/insight/quickstart/otel/operator.html#add-annotations-to-automatically-access-traces","title":"Add annotations to automatically access traces","text":"

                                  After the above is ready, you can access traces for the application through annotations (Annotation). Otel currently supports accessing traces through annotations. Depending on the service language, different pod annotations need to be added. Each service can add one of two types of annotations:

                                  • Only inject environment variable annotations

                                    There is only one such annotation, which is used to add otel-related environment variables, such as link reporting address, cluster id where the container is located, and namespace (this annotation is very useful when the application does not support automatic probe language)

                                    instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                    The value is divided into two parts by /, the first value (insight-system) is the namespace of the CR installed in the previous step, and the second value (insight-opentelemetry-autoinstrumentation) is the name of the CR.

                                  • Automatic probe injection and environment variable injection annotations

                                    There are currently 4 such annotations, proper to 4 different programming languages: java, nodejs, python, dotnet. After using it, automatic probes and otel default environment variables will be injected into the first container under spec.pod:

                                    Java applicationNodeJs applicationPython applicationDotnet applicationGolang application
                                    instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                    instrumentation.opentelemetry.io/inject-nodejs: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                    instrumentation.opentelemetry.io/inject-python: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                    instrumentation.opentelemetry.io/inject-dotnet: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                    Since Go's automatic detection requires the setting of OTEL_GO_AUTO_TARGET_EXE, you must provide a valid executable path through annotations or Instrumentation resources. Failure to set this value will result in the termination of Go's automatic detection injection, leading to a failure in the connection trace.

                                    instrumentation.opentelemetry.io/inject-go: \"insight-system/insight-opentelemetry-autoinstrumentation\"\ninstrumentation.opentelemetry.io/otel-go-auto-target-exe: \"/path/to/container/executable\"\n

                                    Go's automatic detection also requires elevated permissions. The following permissions are automatically set and are necessary.

                                    securityContext:\n  privileged: true\n  runAsUser: 0\n

                                  Tip

                                  The OpenTelemetry Operator automatically adds some OTel-related environment variables when injecting probes and also supports overriding these variables. The priority order for overriding these environment variables is as follows:

                                  original container env vars -> language specific env vars -> common env vars -> instrument spec configs' vars\n

                                  However, it is important to avoid manually overriding OTEL_RESOURCE_ATTRIBUTES_NODE_NAME . This variable serves as an identifier within the operator to determine if a pod has already been injected with a probe. Manually adding this variable may prevent the probe from being injected successfully.

                                  "},{"location":"en/admin/insight/quickstart/otel/operator.html#automatic-injection-demo","title":"Automatic injection Demo","text":"

                                  Note that the annotation is added under spec.annotations.

                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-app\n  labels:\n    app: my-app\nspec:\n  selector:\n    matchLabels:\n      app: my-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-app\n      annotations:\n        instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n    spec:\n      containers:\n      - name: myapp\n        image: jaegertracing/vertx-create-span:operator-e2e-tests\n        ports:\n          - containerPort: 8080\n            protocol: TCP\n

                                  The final generated YAML is as follows:

                                  apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-deployment-with-sidecar-565bd877dd-nqkk6\n  generateName: my-deployment-with-sidecar-565bd877dd-\n  namespace: default\n  uid: aa89ca0d-620c-4d20-8bc1-37d67bad4ea4\n  resourceVersion: '2668986'\n  creationTimestamp: '2022-04-08T05:58:48Z'\n  labels:\n    app: my-pod-with-sidecar\n    pod-template-hash: 565bd877dd\n  annotations:\n    cni.projectcalico.org/containerID: 234eae5e55ea53db2a4bc2c0384b9a1021ed3908f82a675e4a92a49a7e80dd61\n    cni.projectcalico.org/podIP: 192.168.134.133/32\n    cni.projectcalico.org/podIPs: 192.168.134.133/32\n    instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\nspec:\n  volumes:\n    - name: kube-api-access-sp2mz\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n    - name: opentelemetry-auto-instrumentation\n      emptyDir: {}\n  initContainers:\n    - name: opentelemetry-auto-instrumentation\n      image: >-\n        ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java\n      command:\n        - cp\n        - /javaagent.jar\n        - /otel-auto-instrumentation/javaagent.jar\n      resources: {}\n      volumeMounts:\n        - name: opentelemetry-auto-instrumentation\n          mountPath: /otel-auto-instrumentation\n        - name: kube-api-access-sp2mz\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  containers:\n    - name: myapp\n      image: ghcr.io/pavolloffay/spring-petclinic:latest\n      env:\n        - name: OTEL_JAVAAGENT_DEBUG\n          value: 'true'\n        - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n          value: 'true'\n        - name: SPLUNK_PROFILER_ENABLED\n          value: 'false'\n        - name: JAVA_TOOL_OPTIONS\n          value: ' -javaagent:/otel-auto-instrumentation/javaagent.jar'\n        - name: OTEL_TRACES_EXPORTER\n          value: otlp\n        - name: OTEL_EXPORTER_OTLP_ENDPOINT\n          value: http://insight-agent-opentelemetry-collector.svc.cluster.local:4317\n        - name: OTEL_EXPORTER_OTLP_TIMEOUT\n          value: '20'\n        - name: OTEL_TRACES_SAMPLER\n          value: parentbased_traceidratio\n        - name: OTEL_TRACES_SAMPLER_ARG\n          value: '0.85'\n        - name: SPLUNK_TRACE_RESPONSE_HEADER_ENABLED\n          value: 'true'\n        - name: OTEL_SERVICE_NAME\n          value: my-deployment-with-sidecar\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.name\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_UID\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.uid\n        - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: spec.nodeName\n        - name: OTEL_RESOURCE_ATTRIBUTES\n          value: >-\n            k8s.container.name=myapp,k8s.deployment.name=my-deployment-with-sidecar,k8s.deployment.uid=8de6929d-dda0-436c-bca1-604e9ca7ea4e,k8s.namespace.name=default,k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME),k8s.pod.uid=$(OTEL_RESOURCE_ATTRIBUTES_POD_UID),k8s.replicaset.name=my-deployment-with-sidecar-565bd877dd,k8s.replicaset.uid=190d5f6e-ba7f-4794-b2e6-390b5879a6c4\n        - name: OTEL_PROPAGATORS\n          value: jaeger,b3\n      resources: {}\n      volumeMounts:\n        - name: kube-api-access-sp2mz\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n        - name: opentelemetry-auto-instrumentation\n          mountPath: /otel-auto-instrumentation\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  restartPolicy: Always\n  terminationGracePeriodSeconds: 30\n  dnsPolicy: ClusterFirst\n  serviceAccountName: default\n  serviceAccount: default\n  nodeName: k8s-master3\n  securityContext:\n    runAsUser: 1000\n    runAsGroup: 3000\n    fsGroup: 2000\n  schedulerName: default-scheduler\n  tolerations:\n    - key: node.kubernetes.io/not-ready\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n    - key: node.kubernetes.io/unreachable\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n  priority: 0\n  enableServiceLinks: true\n  preemptionPolicy: PreemptLowerPriority\n
                                  "},{"location":"en/admin/insight/quickstart/otel/operator.html#trace-query","title":"Trace query","text":"

                                  How to query the connected services, refer to Trace Query.

                                  "},{"location":"en/admin/insight/quickstart/otel/otel.html","title":"Use OTel to provide the application observability","text":"

                                  Enhancement is the process of enabling application code to generate telemetry data. i.e. something that helps you monitor or measure the performance and status of your application.

                                  OpenTelemetry is a leading open source project providing instrumentation libraries for major programming languages \u200b\u200band popular frameworks. It is a project under the Cloud Native Computing Foundation and is supported by the vast resources of the community. It provides a standardized data format for collected data without the need to integrate specific vendors.

                                  Insight supports OpenTelemetry for application instrumentation to enhance your applications.

                                  This guide introduces the basic concepts of telemetry enhancement using OpenTelemetry. OpenTelemetry also has an ecosystem of libraries, plugins, integrations, and other useful tools to extend it. You can find these resources at the OTel Registry.

                                  You can use any open standard library for telemetry enhancement and use Insight as an observability backend to ingest, analyze, and visualize data.

                                  To enhance your code, you can use the enhanced operations provided by OpenTelemetry for specific languages:

                                  Insight currently provides an easy way to enhance .Net NodeJS, Java, Python and Golang applications with OpenTelemetry. Please follow the guidelines below.

                                  "},{"location":"en/admin/insight/quickstart/otel/otel.html#trace-enhancement","title":"Trace Enhancement","text":"
                                  • Best practices for integrate trace: Application Non-Intrusive Enhancement via Operator
                                  • Manual instrumentation with Go language as an example: Enhance Go application with OpenTelemetry SDK
                                  • Using ebpf to implement non-intrusive auto-instrumetation in Go language (experimental feature)
                                  "},{"location":"en/admin/insight/quickstart/otel/send_tracing_to_insight.html","title":"Sending Trace Data to Insight","text":"

                                  This document describes how customers can send trace data to Insight on their own. It mainly includes the following two scenarios:

                                  1. Customer apps report traces to Insight through OTEL Agent/SDK
                                  2. Forwarding traces to Insight through Opentelemetry Collector (OTEL COL)

                                  In each cluster where Insight Agent is installed, there is an insight-agent-otel-col component that is used to receive trace data from that cluster. Therefore, this component serves as the entry point for user access and needs to obtain its address first. You can get the address of the Opentelemetry Collector in the cluster through the AI platform interface, such as insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 :

                                  In addition, there are some slight differences for different reporting methods:

                                  "},{"location":"en/admin/insight/quickstart/otel/send_tracing_to_insight.html#customer-apps-report-traces-to-insight-through-otel-agentsdk","title":"Customer apps report traces to Insight through OTEL Agent/SDK","text":"

                                  To successfully report trace data to Insight and display it properly, it is recommended to provide the required metadata (Resource Attributes) for OTLP through the following environment variables. There are two ways to achieve this:

                                  • Manually add them to the deployment YAML file, for example:

                                    ...\n- name: OTEL_EXPORTER_OTLP_ENDPOINT\n  value: \"http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\"\n- name: \"OTEL_SERVICE_NAME\"\n  value: my-java-app-name\n- name: \"OTEL_K8S_NAMESPACE\"\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: metadata.namespace\n- name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: spec.nodeName\n- name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: metadata.name\n- name: OTEL_RESOURCE_ATTRIBUTES\n  value: \"k8s.namespace.name=$(OTEL_K8S_NAMESPACE),k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME)\"\n
                                  • Use the automatic injection capability of Insight Agent to inject the metadata (Resource Attributes)

                                    Ensure that Insight Agent is working properly and after installing the Instrumentation CR, you only need to add the following annotation to the Pod:

                                    instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                    For example:

                                    apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-with-aotu-instrumentation\nspec:\n  selector:\n    matchLabels:\n      app.kubernetes.io/name: my-deployment-with-aotu-instrumentation-kuberntes\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: my-deployment-with-aotu-instrumentation-kuberntes\n      annotations:\n        sidecar.opentelemetry.io/inject: \"false\"\n        instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                  "},{"location":"en/admin/insight/quickstart/otel/send_tracing_to_insight.html#forwarding-traces-to-insight-through-opentelemetry-collector","title":"Forwarding traces to Insight through Opentelemetry Collector","text":"

                                  After ensuring that the application has added the metadata mentioned above, you only need to add an OTLP Exporter in your customer's Opentelemetry Collector to forward the trace data to Insight Agent Opentelemetry Collector. Below is an example Opentelemetry Collector configuration file:

                                  ...\nexporters:\n  otlp/insight:\n    endpoint: insight-opentelemetry-collector.insight-system.svc.cluster.local:4317\nservice:\n...\npipelines:\n...\ntraces:\n  exporters:\n    - otlp/insight\n
                                  "},{"location":"en/admin/insight/quickstart/otel/send_tracing_to_insight.html#references","title":"References","text":"
                                  • Enhancing Applications Non-intrusively with the Operator
                                  • Achieving Observability with OTel
                                  "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html","title":"Enhance Go applications with OTel SDK","text":"

                                  This page contains instructions on how to set up OpenTelemetry enhancements in a Go application.

                                  OpenTelemetry, also known simply as OTel, is an open-source observability framework that helps generate and collect telemetry data: traces, metrics, and logs in Go apps.

                                  "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#enhance-go-apps-with-the-opentelemetry-sdk","title":"Enhance Go apps with the OpenTelemetry SDK","text":""},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#install-related-dependencies","title":"Install related dependencies","text":"

                                  Dependencies related to the OpenTelemetry exporter and SDK must be installed first. If you are using another request router, please refer to request routing. After switching/going into the application source folder run the following command:

                                  go get go.opentelemetry.io/otel@v1.8.0 \\\n  go.opentelemetry.io/otel/trace@v1.8.0 \\\n  go.opentelemetry.io/otel/sdk@v1.8.0 \\\n  go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin@v0.33.0 \\\n  go.opentelemetry.io/otel/exporters/otlp/otlptrace@v1.7.0 \\\n  go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc@v1.4.1\n
                                  "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#create-an-initialization-feature-using-the-opentelemetry-sdk","title":"Create an initialization feature using the OpenTelemetry SDK","text":"

                                  In order for an application to be able to send data, a feature is required to initialize OpenTelemetry. Add the following code snippet to the main.go file:

                                  import (\n    \"context\"\n    \"os\"\n    \"time\"\n\n    \"go.opentelemetry.io/otel\"\n    \"go.opentelemetry.io/otel/exporters/otlp/otlptrace\"\n    \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc\"\n    \"go.opentelemetry.io/otel/propagation\"\n    \"go.opentelemetry.io/otel/sdk/resource\"\n    sdktrace \"go.opentelemetry.io/otel/sdk/trace\"\n    semconv \"go.opentelemetry.io/otel/semconv/v1.7.0\"\n    \"go.uber.org/zap\"\n    \"google.golang.org/grpc\"\n)\n\nvar tracerExp *otlptrace.Exporter\n\nfunc retryInitTracer() func() {\n    var shutdown func()\n    go func() {\n        for {\n            // otel will reconnected and re-send spans when otel col recover. so, we don't need to re-init tracer exporter.\n            if tracerExp == nil {\n                shutdown = initTracer()\n            } else {\n                break\n            }\n            time.Sleep(time.Minute * 5)\n        }\n    }()\n    return shutdown\n}\n\nfunc initTracer() func() {\n    // temporarily set timeout to 10s\n    ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n    defer cancel()\n\n    serviceName, ok := os.LookupEnv(\"OTEL_SERVICE_NAME\")\n    if !ok {\n        serviceName = \"server_name\"\n        os.Setenv(\"OTEL_SERVICE_NAME\", serviceName)\n    }\n    otelAgentAddr, ok := os.LookupEnv(\"OTEL_EXPORTER_OTLP_ENDPOINT\")\n    if !ok {\n        otelAgentAddr = \"http://localhost:4317\"\n        os.Setenv(\"OTEL_EXPORTER_OTLP_ENDPOINT\", otelAgentAddr)\n    }\n    zap.S().Infof(\"OTLP Trace connect to: %s with service name: %s\", otelAgentAddr, serviceName)\n\n    traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithInsecure(), otlptracegrpc.WithDialOption(grpc.WithBlock()))\n    if err != nil {\n        handleErr(err, \"OTLP Trace gRPC Creation\")\n        return nil\n    }\n\n    tracerProvider := sdktrace.NewTracerProvider(\n        sdktrace.WithBatcher(traceExporter),\n        sdktrace.WithSampler(sdktrace.AlwaysSample()),\n    sdktrace.WithResource(resource.NewWithAttributes(semconv.SchemaURL)))\n\n    otel.SetTracerProvider(tracerProvider)\n    otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))\n\n    tracerExp = traceExporter\n    return func() {\n        // Shutdown will flush any remaining spans and shut down the exporter.\n        handleErr(tracerProvider.Shutdown(ctx), \"failed to shutdown TracerProvider\")\n    }\n}\n\nfunc handleErr(err error, message string) {\n    if err != nil {\n        zap.S().Errorf(\"%s: %v\", message, err)\n    }\n}\n
                                  "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#initialize-tracker-in-maingo","title":"Initialize tracker in main.go","text":"

                                  Modify the main feature to initialize the tracker in main.go. Also when your service shuts down, you should call TracerProvider.Shutdown() to ensure all spans are exported. The service makes the call as a deferred feature in the main function:

                                  func main() {\n    // start otel tracing\n    if shutdown := retryInitTracer(); shutdown != nil {\n            defer shutdown()\n        }\n    ......\n}\n
                                  "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#add-opentelemetry-gin-middleware-to-the-application","title":"Add OpenTelemetry Gin middleware to the application","text":"

                                  Configure Gin to use the middleware by adding the following line to main.go :

                                  import (\n    ....\n  \"go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin\"\n)\n\nfunc main() {\n    ......\n    r := gin.Default()\n    r.Use(otelgin.Middleware(\"my-app\"))\n    ......\n}\n
                                  "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#run-the-application","title":"Run the application","text":"
                                  • Local debugging and running

                                    Note: This step is only used for local development and debugging. In the production environment, the Operator will automatically complete the injection of the following environment variables.

                                    The above steps have completed the work of initializing the SDK. Now if you need to develop and debug locally, you need to obtain the address of insight-agent-opentelemerty-collector in the insight-system namespace in advance, assuming: insight-agent-opentelemetry-collector .insight-system.svc.cluster.local:4317 .

                                    Therefore, you can add the following environment variables when you start the application locally:

                                    OTEL_SERVICE_NAME=my-golang-app OTEL_EXPORTER_OTLP_ENDPOINT=http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 go run main.go...\n
                                  • Running in a production environment

                                    Please refer to the introduction of Only injecting environment variable annotations in Achieving non-intrusive enhancement of applications through Operators to add annotations to deployment yaml:

                                    instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                    If you cannot use annotations, you can manually add the following environment variables to the deployment yaml:

                                  \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\nenv:\n  - name: OTEL_EXPORTER_OTLP_ENDPOINT\n    value: 'http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317'\n  - name: OTEL_SERVICE_NAME\n    value: \"your depolyment name\" # modify it.\n  - name: OTEL_K8S_NAMESPACE\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: metadata.namespace\n  - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: spec.nodeName\n  - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: metadata.name\n  - name: OTEL_RESOURCE_ATTRIBUTES\n    value: 'k8s.namespace.name=$(OTEL_K8S_NAMESPACE),k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME)'\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
                                  "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#request-routing","title":"Request Routing","text":""},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#opentelemetry-gingonic-enhancements","title":"OpenTelemetry gin/gonic enhancements","text":"
                                  # Add one line to your import() stanza depending upon your request router:\nmiddleware \"go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin\"\n

                                  Then inject the OpenTelemetry middleware:

                                  router. Use(middleware. Middleware(\"my-app\"))\n
                                  "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#opentelemetry-gorillamux-enhancements","title":"OpenTelemetry gorillamux enhancements","text":"
                                  # Add one line to your import() stanza depending upon your request router:\nmiddleware \"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux\"\n

                                  Then inject the OpenTelemetry middleware:

                                  router. Use(middleware. Middleware(\"my-app\"))\n
                                  "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#grpc-enhancements","title":"gRPC enhancements","text":"

                                  Likewise, OpenTelemetry can help you auto-detect gRPC requests. To detect any gRPC server you have, add the interceptor to the server's instantiation.

                                  import (\n  grpcotel \"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc\"\n)\nfunc main() {\n  [...]\n\n    s := grpc.NewServer(\n        grpc.UnaryInterceptor(grpcotel.UnaryServerInterceptor()),\n        grpc.StreamInterceptor(grpcotel.StreamServerInterceptor()),\n    )\n}\n

                                  It should be noted that if your program uses Grpc Client to call third-party services, you also need to add an interceptor to Grpc Client:

                                      [...]\n\n    conn, err := grpc.Dial(addr, grpc.WithTransportCredentials(insecure.NewCredentials()),\n        grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()),\n        grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()),\n    )\n
                                  "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#if-not-using-request-routing","title":"If not using request routing","text":"
                                  import (\n  \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\"\n)\n

                                  Everywhere you pass http.Handler to ServeMux you will wrap the handler function. For example, the following replacements would be made:

                                  - mux.Handle(\"/path\", h)\n+ mux.Handle(\"/path\", otelhttp.NewHandler(h, \"description of path\"))\n---\n- mux.Handle(\"/path\", http.HandlerFunc(f))\n+ mux.Handle(\"/path\", otelhttp.NewHandler(http.HandlerFunc(f), \"description of path\"))\n

                                  In this way, you can ensure that each feature wrapped with othttp will automatically collect its metadata and start the proper trace.

                                  "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#database-enhancements","title":"database enhancements","text":""},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#golang-gorm","title":"Golang Gorm","text":"

                                  The OpenTelemetry community has also developed middleware for database access libraries, such as Gorm:

                                  import (\n    \"github.com/uptrace/opentelemetry-go-extra/otelgorm\"\n    \"gorm.io/driver/sqlite\"\n    \"gorm.io/gorm\"\n)\n\ndb, err := gorm.Open(sqlite.Open(\"file::memory:?cache=shared\"), &gorm.Config{})\nif err != nil {\n    panic(err)\n}\n\notelPlugin := otelgorm.NewPlugin(otelgorm.WithDBName(\"mydb\"), # Missing this can lead to incomplete display of database related topology\n    otelgorm.WithAttributes(semconv.ServerAddress(\"memory\"))) # Missing this can lead to incomplete display of database related topology\nif err := db.Use(otelPlugin); err != nil {\n    panic(err)\n}\n

                                  "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#custom-span","title":"Custom Span","text":"

                                  In many cases, the middleware provided by OpenTelemetry cannot help us record more internally called features, and we need to customize Span to record

                                   \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n    _, span := otel.Tracer(\"GetServiceDetail\").Start(ctx,\n        \"spanMetricDao.GetServiceDetail\",\n        trace.WithSpanKind(trace.SpanKindInternal))\n    defer span.End()\n  \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
                                  "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#add-custom-properties-and-custom-events-to-span","title":"Add custom properties and custom events to span","text":"

                                  It is also possible to set a custom attribute or tag as a span. To add custom properties and events, follow these steps:

                                  "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#import-tracking-and-property-libraries","title":"Import Tracking and Property Libraries","text":"
                                  import (\n    ...\n    \"go.opentelemetry.io/otel/attribute\"\n    \"go.opentelemetry.io/otel/trace\"\n)\n
                                  "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#get-the-current-span-from-the-context","title":"Get the current Span from the context","text":"
                                  span := trace.SpanFromContext(c.Request.Context())\n
                                  "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#set-properties-in-the-current-span","title":"Set properties in the current Span","text":"
                                  span.SetAttributes(attribute. String(\"controller\", \"books\"))\n
                                  "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#add-an-event-to-the-current-span","title":"Add an Event to the current Span","text":"

                                  Adding span events is done using AddEvent on the span object.

                                  span.AddEvent(msg)\n
                                  "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#log-errors-and-exceptions","title":"Log errors and exceptions","text":"
                                  import \"go.opentelemetry.io/otel/codes\"\n\n// Get the current span\nspan := trace.SpanFromContext(ctx)\n\n// RecordError will automatically convert an error into a span even\nspan.RecordError(err)\n\n// Flag this span as an error\nspan.SetStatus(codes.Error, \"internal error\")\n
                                  "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#references","title":"References","text":"

                                  For the Demo presentation, please refer to:

                                  • otel-grpc-examples
                                  • opentelemetry-demo/productcatalogservice
                                  • opentelemetry-collector-contrib/demo
                                  "},{"location":"en/admin/insight/quickstart/otel/golang/meter.html","title":"Exposing Metrics for Applications Using OpenTelemetry SDK","text":"

                                  This article is intended for users who wish to evaluate or explore the developing OTLP metrics.

                                  The OpenTelemetry project requires that APIs and SDKs must emit data in the OpenTelemetry Protocol (OTLP) for supported languages.

                                  "},{"location":"en/admin/insight/quickstart/otel/golang/meter.html#for-golang-applications","title":"For Golang Applications","text":"

                                  Golang can expose runtime metrics through the SDK by adding the following methods to enable the metrics exporter within the application:

                                  "},{"location":"en/admin/insight/quickstart/otel/golang/meter.html#install-required-dependencies","title":"Install Required Dependencies","text":"

                                  Navigate to your application\u2019s source folder and run the following command:

                                  go get go.opentelemetry.io/otel \\\n  go.opentelemetry.io/otel/attribute \\\n  go.opentelemetry.io/otel/exporters/prometheus \\\n  go.opentelemetry.io/otel/metric/global \\\n  go.opentelemetry.io/otel/metric/instrument \\\n  go.opentelemetry.io/otel/sdk/metric\n
                                  "},{"location":"en/admin/insight/quickstart/otel/golang/meter.html#create-an-initialization-function-using-otel-sdk","title":"Create an Initialization Function Using OTel SDK","text":"
                                  import (\n    .....\n\n    \"go.opentelemetry.io/otel/attribute\"\n    otelPrometheus \"go.opentelemetry.io/otel/exporters/prometheus\"\n    \"go.opentelemetry.io/otel/metric/global\"\n    \"go.opentelemetry.io/otel/metric/instrument\"\n    \"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram\"\n    controller \"go.opentelemetry.io/otel/sdk/metric/controller/basic\"\n    \"go.opentelemetry.io/otel/sdk/metric/export/aggregation\"\n    processor \"go.opentelemetry.io/otel/sdk/metric/processor/basic\"\n    selector \"go.opentelemetry.io/otel/sdk/metric/selector/simple\"\n)\n\nfunc (s *insightServer) initMeter() *otelPrometheus.Exporter {\n    s.meter = global.Meter(\"xxx\")\n\n    config := otelPrometheus.Config{\n        DefaultHistogramBoundaries: []float64{1, 2, 5, 10, 20, 50},\n        Gatherer:                   prometheus.DefaultGatherer,\n        Registry:                   prometheus.NewRegistry(),\n        Registerer:                 prometheus.DefaultRegisterer,\n    }\n\n    c := controller.New(\n        processor.NewFactory(\n            selector.NewWithHistogramDistribution(\n                histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries),\n            ),\n            aggregation.CumulativeTemporalitySelector(),\n            processor.WithMemory(true),\n        ),\n    )\n\n    exporter, err := otelPrometheus.New(config, c)\n    if err != nil {\n        zap.S().Panicf(\"failed to initialize prometheus exporter %v\", err)\n    }\n\n    global.SetMeterProvider(exporter.MeterProvider())\n\n    http.HandleFunc(\"/metrics\", exporter.ServeHTTP)\n\n    go func() {\n        _ = http.ListenAndServe(fmt.Sprintf(\":%d\", 8888), nil)\n    }()\n\n    zap.S().Info(\"Prometheus server running on \", fmt.Sprintf(\":%d\", port))\n    return exporter\n}\n

                                  The above method will expose a metrics endpoint for your application at: http://localhost:8888/metrics.

                                  Next, initialize it in main.go:

                                  func main() {\n    // ...\n    tp := initMeter()\n    // ...\n}\n

                                  If you want to add custom metrics, you can refer to the following:

                                  // exposeClusterMetric exposes a metric like \"insight_logging_count{} 1\"\nfunc (s *insightServer) exposeLoggingMetric(lserver *log.LogService) {\n    s.meter = global.Meter(\"insight.io/basic\")\n\n    var lock sync.Mutex\n    logCounter, err := s.meter.AsyncFloat64().Counter(\"insight_log_total\")\n    if err != nil {\n        zap.S().Panicf(\"failed to initialize instrument: %v\", err)\n    }\n\n    _ = s.meter.RegisterCallback([]instrument.Asynchronous{logCounter}, func(ctx context.Context) {\n        lock.Lock()\n        defer lock.Unlock()\n        count, err := lserver.Count(ctx)\n        if err == nil || count != -1 {\n            logCounter.Observe(ctx, float64(count))\n        }\n    })\n}\n

                                  Then, call this method in main.go:

                                  // ...\ns.exposeLoggingMetric(lservice)\n// ...\n

                                  You can check if your metrics are working correctly by visiting http://localhost:8888/metrics.

                                  "},{"location":"en/admin/insight/quickstart/otel/golang/meter.html#for-java-applications","title":"For Java Applications","text":"

                                  For Java applications, you can directly expose JVM-related metrics by using the OpenTelemetry agent with the following environment variable:

                                  OTEL_METRICS_EXPORTER=prometheus\n

                                  You can then check your metrics at http://localhost:8888/metrics.

                                  Next, combine it with a Prometheus ServiceMonitor to complete the metrics integration. If you want to expose custom metrics, please refer to opentelemetry-java-docs/prometheus.

                                  The process is mainly divided into two steps:

                                  • Create a meter provider and specify Prometheus as the exporter.
                                  /*\n * Copyright The OpenTelemetry Authors\n * SPDX-License-Identifier: Apache-2.0\n */\n\npackage io.opentelemetry.example.prometheus;\n\nimport io.opentelemetry.api.metrics.MeterProvider;\nimport io.opentelemetry.exporter.prometheus.PrometheusHttpServer;\nimport io.opentelemetry.sdk.metrics.SdkMeterProvider;\nimport io.opentelemetry.sdk.metrics.export.MetricReader;\n\npublic final class ExampleConfiguration {\n\n  /**\n   * Initializes the Meter SDK and configures the Prometheus collector with all default settings.\n   *\n   * @param prometheusPort the port to open up for scraping.\n   * @return A MeterProvider for use in instrumentation.\n   */\n  static MeterProvider initializeOpenTelemetry(int prometheusPort) {\n    MetricReader prometheusReader = PrometheusHttpServer.builder().setPort(prometheusPort).build();\n\n    return SdkMeterProvider.builder().registerMetricReader(prometheusReader).build();\n  }\n}\n
                                  • Create a custom meter and start the HTTP server.
                                  package io.opentelemetry.example.prometheus;\n\nimport io.opentelemetry.api.common.Attributes;\nimport io.opentelemetry.api.metrics.Meter;\nimport io.opentelemetry.api.metrics.MeterProvider;\nimport java.util.concurrent.ThreadLocalRandom;\n\n/**\n * Example of using the PrometheusHttpServer to convert OTel metrics to Prometheus format and expose\n * these to a Prometheus instance via a HttpServer exporter.\n *\n * <p>A Gauge is used to periodically measure how many incoming messages are awaiting processing.\n * The Gauge callback gets executed every collection interval.\n */\npublic final class PrometheusExample {\n  private long incomingMessageCount;\n\n  public PrometheusExample(MeterProvider meterProvider) {\n    Meter meter = meterProvider.get(\"PrometheusExample\");\n    meter\n        .gaugeBuilder(\"incoming.messages\")\n        .setDescription(\"No of incoming messages awaiting processing\")\n        .setUnit(\"message\")\n        .buildWithCallback(result -> result.record(incomingMessageCount, Attributes.empty()));\n  }\n\n  void simulate() {\n    for (int i = 500; i > 0; i--) {\n      try {\n        System.out.println(\n            i + \" Iterations to go, current incomingMessageCount is:  \" + incomingMessageCount);\n        incomingMessageCount = ThreadLocalRandom.current().nextLong(100);\n        Thread.sleep(1000);\n      } catch (InterruptedException e) {\n        // ignored here\n      }\n    }\n  }\n\n  public static void main(String[] args) {\n    int prometheusPort = 8888;\n\n    // It is important to initialize the OpenTelemetry SDK as early as possible in your process.\n    MeterProvider meterProvider = ExampleConfiguration.initializeOpenTelemetry(prometheusPort);\n\n    PrometheusExample prometheusExample = new PrometheusExample(meterProvider);\n\n    prometheusExample.simulate();\n\n    System.out.println(\"Exiting\");\n  }\n}\n

                                  After running the Java application, you can check if your metrics are working correctly by visiting http://localhost:8888/metrics.

                                  "},{"location":"en/admin/insight/quickstart/otel/golang/meter.html#insight-collecting-metrics","title":"Insight Collecting Metrics","text":"

                                  Lastly, it is important to note that you have exposed metrics in your application, and now you need Insight to collect those metrics.

                                  The recommended way to expose metrics is via ServiceMonitor or PodMonitor.

                                  "},{"location":"en/admin/insight/quickstart/otel/golang/meter.html#creating-servicemonitorpodmonitor","title":"Creating ServiceMonitor/PodMonitor","text":"

                                  The added ServiceMonitor/PodMonitor needs to have the label operator.insight.io/managed-by: insight for the Operator to recognize it:

                                  apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: example-app\n  labels:\n    operator.insight.io/managed-by: insight\nspec:\n  selector:\n    matchLabels:\n      app: example-app\n  endpoints:\n  - port: web\n  namespaceSelector:\n    any: true\n
                                  "},{"location":"en/admin/insight/quickstart/otel/java/index.html","title":"Start Monitoring Java Applications","text":"
                                  1. For accessing and monitoring Java application links, please refer to the document Implementing Non-Intrusive Enhancements for Applications via Operator, which explains how to automatically integrate links through annotations.

                                  2. Monitoring the JVM of Java applications: How Java applications that have already exposed JVM metrics and those that have not yet exposed JVM metrics can connect with observability Insight.

                                  3. If your Java application has not yet started exposing JVM metrics, you can refer to the following documents:

                                    • Exposing JVM Monitoring Metrics Using JMX Exporter
                                    • Exposing JVM Monitoring Metrics Using OpenTelemetry Java Agent
                                  4. If your Java application has already exposed JVM metrics, you can refer to the following document:

                                    • Connecting Existing JVM Metrics of Java Applications to Observability
                                  5. Writing TraceId and SpanId into Java Application Logs to correlate link data with log data.

                                  "},{"location":"en/admin/insight/quickstart/otel/java/mdc.html","title":"Writing TraceId and SpanId into Java Application Logs","text":"

                                  This article explains how to automatically write TraceId and SpanId into Java application logs using OpenTelemetry. By including TraceId and SpanId in your logs, you can correlate distributed tracing data with log data, enabling more efficient fault diagnosis and performance analysis.

                                  "},{"location":"en/admin/insight/quickstart/otel/java/mdc.html#supported-logging-libraries","title":"Supported Logging Libraries","text":"

                                  For more information, please refer to the Logger MDC auto-instrumentation.

                                  Logging Framework Supported Automatic Instrumentation Versions Dependencies Required for Manual Instrumentation Log4j 1 1.2+ None Log4j 2 2.7+ opentelemetry-log4j-context-data-2.17-autoconfigure Logback 1.0+ opentelemetry-logback-mdc-1.0"},{"location":"en/admin/insight/quickstart/otel/java/mdc.html#using-logback-spring-boot-project","title":"Using Logback (Spring Boot Project)","text":"

                                  Spring Boot projects come with a built-in logging framework and use Logback as the default logging implementation. If your Java project is a Spring Boot project, you can write TraceId into logs with minimal configuration.

                                  Set logging.pattern.level in application.properties, adding %mdc{trace_id} and %mdc{span_id} to the logs.

                                  logging.pattern.level=trace_id=%mdc{trace_id} span_id=%mdc{span_id} %5p ....omited...\n

                                  Here is an example of the logs:

                                  2024-06-26 10:56:31.200 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.a.c.c.C.[Tomcat].[localhost].[/]       : Initializing Spring DispatcherServlet 'dispatcherServlet'\n2024-06-26 10:56:31.201 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.s.web.servlet.DispatcherServlet        : Initializing Servlet 'dispatcherServlet'\n2024-06-26 10:56:31.209 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.s.web.servlet.DispatcherServlet        : Completed initialization in 8 ms\n2024-06-26 10:56:31.296 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=5743699405074f4e  INFO 53724 --- [nio-8081-exec-1] com.example.httpserver.ot.OTServer       : hello world\n
                                  "},{"location":"en/admin/insight/quickstart/otel/java/mdc.html#using-log4j2","title":"Using Log4j2","text":"
                                  1. Add OpenTelemetry Log4j2 dependency in pom.xml:

                                    Tip

                                    Please replace OPENTELEMETRY_VERSION with the latest version.

                                    <dependencies>\n  <dependency>\n    <groupId>io.opentelemetry.instrumentation</groupId>\n    <artifactId>opentelemetry-log4j-context-data-2.17-autoconfigure</artifactId>\n    <version>OPENTELEMETRY_VERSION</version>\n    <scope>runtime</scope>\n  </dependency>\n</dependencies>\n
                                  2. Modify the log4j2.xml configuration, adding %X{trace_id} and %X{span_id} in the pattern to automatically write TraceId and SpanId into the logs:

                                    <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Configuration>\n  <Appenders>\n    <Console name=\"Console\" target=\"SYSTEM_OUT\">\n      <PatternLayout\n          pattern=\"%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} trace_id=%X{trace_id} span_id=%X{span_id} trace_flags=%X{trace_flags} - %msg%n\"/>\n    </Console>\n  </Appenders>\n  <Loggers>\n    <Root>\n      <AppenderRef ref=\"Console\" level=\"All\"/>\n    </Root>\n  </Loggers>\n</Configuration>\n
                                  3. If using Logback, add OpenTelemetry Logback dependency in pom.xml.

                                    Tip

                                    Please replace OPENTELEMETRY_VERSION with the latest version.

                                    <dependencies>\n  <dependency>\n    <groupId>io.opentelemetry.instrumentation</groupId>\n    <artifactId>opentelemetry-logback-mdc-1.0</artifactId>\n    <version>OPENTELEMETRY_VERSION</version>\n  </dependency>\n</dependencies>\n
                                  4. Modify the log4j2.xml configuration, adding %X{trace_id} and %X{span_id} in the pattern to automatically write TraceId and SpanId into the logs:

                                    <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<configuration>\n  <appender name=\"CONSOLE\" class=\"ch.qos.logback.core.ConsoleAppender\">\n    <encoder>\n      <pattern>%d{HH:mm:ss.SSS} trace_id=%X{trace_id} span_id=%X{span_id} trace_flags=%X{trace_flags} %msg%n</pattern>\n    </encoder>\n  </appender>\n\n  <!-- Just wrap your logging appender, for example ConsoleAppender, with OpenTelemetryAppender -->\n  <appender name=\"OTEL\" class=\"io.opentelemetry.instrumentation.logback.mdc.v1_0.OpenTelemetryAppender\">\n    <appender-ref ref=\"CONSOLE\"/>\n  </appender>\n\n  <!-- Use the wrapped \"OTEL\" appender instead of the original \"CONSOLE\" one -->\n  <root level=\"INFO\">\n    <appender-ref ref=\"OTEL\"/>\n  </root>\n\n</configuration>\n
                                  "},{"location":"en/admin/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html","title":"Exposing JVM Monitoring Metrics Using JMX Exporter","text":"

                                  JMX Exporter provides two usage methods:

                                  1. Standalone Process: Specify parameters when starting the JVM to expose a JMX RMI interface. The JMX Exporter calls RMI to obtain the JVM runtime state data, converts it into Prometheus metrics format, and exposes a port for Prometheus to scrape.
                                  2. In-Process (JVM process): Specify parameters when starting the JVM to run the JMX Exporter jar file as a javaagent. This method reads the JVM runtime state data in-process, converts it into Prometheus metrics format, and exposes a port for Prometheus to scrape.

                                  Note

                                  The official recommendation is not to use the first method due to its complex configuration and the requirement for a separate process, which introduces additional monitoring challenges. Therefore, this article focuses on the second method, detailing how to use JMX Exporter to expose JVM monitoring metrics in a Kubernetes environment.

                                  In this method, you need to specify the JMX Exporter jar file and configuration file when starting the JVM. Since the jar file is a binary file that is not ideal for mounting via a configmap, and the configuration file typically does not require modifications, it is recommended to package both the JMX Exporter jar file and the configuration file directly into the business container image.

                                  For the second method, you can choose to include the JMX Exporter jar file in the application image or mount it during deployment. Below are explanations for both approaches:

                                  "},{"location":"en/admin/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html#method-1-building-jmx-exporter-jar-file-into-the-business-image","title":"Method 1: Building JMX Exporter JAR File into the Business Image","text":"

                                  The content of prometheus-jmx-config.yaml is as follows:

                                  prometheus-jmx-config.yaml
                                  ...\nssl: false\nlowercaseOutputName: false\nlowercaseOutputLabelNames: false\nrules:\n- pattern: \".*\"\n

                                  Note

                                  For more configuration options, please refer to the introduction at the bottom or Prometheus official documentation.

                                  Next, prepare the jar file. You can find the latest jar download link on the jmx_exporter GitHub page and refer to the following Dockerfile:

                                  FROM openjdk:11.0.15-jre\nWORKDIR /app/\nCOPY target/my-app.jar ./\nCOPY prometheus-jmx-config.yaml ./\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\nENV JAVA_TOOL_OPTIONS=-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\nEXPOSE 8081 8999 8080 8888\nENTRYPOINT java $JAVA_OPTS -jar my-app.jar\n

                                  Note:

                                  • The format for the startup parameter is: -javaagent:=:
                                  • Here, port 8088 is used to expose JVM monitoring metrics; you may change it if it conflicts with the Java application.
                                  "},{"location":"en/admin/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html#method-2-mounting-via-init-container","title":"Method 2: Mounting via Init Container","text":"

                                  First, we need to create a Docker image for the JMX Exporter. The following Dockerfile is for reference:

                                  FROM alpine/curl:3.14\nWORKDIR /app/\n# Copy the previously created config file into the image\nCOPY prometheus-jmx-config.yaml ./\n# Download the jmx prometheus javaagent jar online\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\n

                                  Build the image using the above Dockerfile: docker build -t my-jmx-exporter .

                                  Add the following init container to the Java application deployment YAML:

                                  Click to expand YAML file
                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-demo-app\n  labels:\n    app: my-demo-app\nspec:\n  selector:\n    matchLabels:\n      app: my-demo-app\n  template:\n    metadata:\n      labels:\n        app: my-demo-app\n    spec:\n      imagePullSecrets:\n      - name: registry-pull\n      initContainers:\n      - name: jmx-sidecar\n        image: my-jmx-exporter\n        command: [\"cp\", \"-r\", \"/app/jmx_prometheus_javaagent-0.17.2.jar\", \"/target/jmx_prometheus_javaagent-0.17.2.jar\"]  \u278a\n        volumeMounts:\n        - name: sidecar\n          mountPath: /target\n      containers:\n      - image: my-demo-app-image\n        name: my-demo-app\n        resources:\n          requests:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n          limits:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n        ports:\n        - containerPort: 18083\n        env:\n        - name: JAVA_TOOL_OPTIONS\n          value: \"-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\" \u278b\n        volumeMounts:\n        - name: host-time\n          mountPath: /etc/localtime\n          readOnly: true\n        - name: sidecar\n          mountPath: /sidecar\n      volumes:\n      - name: host-time\n        hostPath:\n          path: /etc/localtime\n      - name: sidecar  # Shared agent folder\n        emptyDir: {}\n      restartPolicy: Always\n

                                  With the above modifications, the example application my-demo-app now has the capability to expose JVM metrics. After running the service, you can access the Prometheus formatted metrics at http://localhost:8088.

                                  Next, you can refer to Connecting Existing JVM Metrics of Java Applications to Observability.

                                  "},{"location":"en/admin/insight/quickstart/otel/java/jvm-monitor/legacy-jvm.html","title":"Integrating Existing JVM Metrics of Java Applications with Observability","text":"

                                  If your Java application exposes JVM monitoring metrics through other means (such as Spring Boot Actuator), you will need to ensure that the monitoring data is collected. You can achieve this by adding annotations (Kubernetes Annotations) to your workload to allow Insight to scrape the existing JVM metrics:

                                  annotations: \n  insight.opentelemetry.io/metric-scrape: \"true\"  # Whether to scrape\n  insight.opentelemetry.io/metric-path: \"/\"         # Path to scrape metrics\n  insight.opentelemetry.io/metric-port: \"9464\"      # Port to scrape metrics\n

                                  For example, to add annotations to the my-deployment-app:

                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-app\nspec:\n  selector:\n    matchLabels:\n      app: my-deployment-app\n      app.kubernetes.io/name: my-deployment-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-deployment-app\n        app.kubernetes.io/name: my-deployment-app\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\"  # Whether to scrape\n        insight.opentelemetry.io/metric-path: \"/\"         # Path to scrape metrics\n        insight.opentelemetry.io/metric-port: \"9464\"      # Port to scrape metrics\n

                                  Here is a complete example:

                                  ---\napiVersion: v1\nkind: Service\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  type: NodePort\n  selector:\n    app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  ports:\n    - name: http\n      port: 8080\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  selector:\n    matchLabels:\n      app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\"  # Whether to scrape\n        insight.opentelemetry.io/metric-path: \"/actuator/prometheus\"  # Path to scrape metrics\n        insight.opentelemetry.io/metric-port: \"8080\"      # Port to scrape metrics\n    spec:\n      containers:\n        - name: myapp\n          image: docker.m.daocloud.io/wutang/spring-boot-actuator-prometheus-metrics-demo\n          ports:\n            - name: http\n              containerPort: 8080\n          resources:\n            limits:\n              cpu: 500m\n              memory: 800Mi\n            requests:\n              cpu: 200m\n              memory: 400Mi\n

                                  In the above example, Insight will scrape the Prometheus metrics exposed through Spring Boot Actuator via http://<service-ip>:8080/actuator/prometheus.

                                  "},{"location":"en/admin/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html","title":"Exposing JVM Metrics Using OpenTelemetry Java Agent","text":"

                                  Starting from OpenTelemetry Agent v1.20.0 and later, the OpenTelemetry Agent has introduced the JMX Metric Insight module. If your application is already integrated with the OpenTelemetry Agent for tracing, you no longer need to introduce another agent to expose JMX metrics for your application. The OpenTelemetry Agent collects and exposes metrics by detecting the locally available MBeans in the application.

                                  The OpenTelemetry Agent also provides built-in monitoring examples for common Java servers or frameworks. Please refer to the Predefined Metrics.

                                  When using the OpenTelemetry Java Agent, you also need to consider how to mount the JAR into the container. In addition to the methods for mounting the JAR file as described with the JMX Exporter, you can leverage the capabilities provided by the OpenTelemetry Operator to automatically enable JVM metrics exposure for your application.

                                  If your application is already integrated with the OpenTelemetry Agent for tracing, you do not need to introduce another agent to expose JMX metrics. The OpenTelemetry Agent can now locally collect and expose metrics interfaces by detecting the locally available MBeans in the application.

                                  However, as of the current version, you still need to manually add the appropriate annotations to your application for the JVM data to be collected by Insight. For specific annotation content, please refer to Integrating Existing JVM Metrics of Java Applications with Observability.

                                  "},{"location":"en/admin/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html#exposing-metrics-for-java-middleware","title":"Exposing Metrics for Java Middleware","text":"

                                  The OpenTelemetry Agent also includes built-in examples for monitoring middleware. Please refer to the Predefined Metrics.

                                  By default, no specific types are designated; you need to specify them using the -Dotel.jmx.target.system JVM options, for example, -Dotel.jmx.target.system=jetty,kafka-broker.

                                  "},{"location":"en/admin/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html#references","title":"References","text":"
                                  • Gaining JMX Metric Insights with the OpenTelemetry Java Agent

                                  • Otel JMX Metrics

                                  "},{"location":"en/admin/insight/quickstart/other/install-agent-on-ocp.html","title":"OpenShift Install Insight Agent","text":"

                                  Although the OpenShift system comes with a monitoring system, we will still install Insight Agent because of some rules in the data collection agreement.

                                  Among them, in addition to the basic installation configuration, the following parameters need to be added during helm install:

                                  ## Parameters related to fluentbit;\n--set fluent-bit.ocp.enabled=true \\\n--set fluent-bit.serviceAccount.create=false \\\n--set fluent-bit.securityContext.runAsUser=0 \\\n--set fluent-bit.securityContext.seLinuxOptions.type=spc_t \\\n--set fluent-bit.securityContext.readOnlyRootFilesystem=false \\\n--set fluent-bit.securityContext.allowPrivilegeEscalation=false \\\n\n## Enable Prometheus(CR) for OpenShift4.x\n--set compatibility.openshift.prometheus.enabled=true \\\n\n## Close the Prometheus instance of the higher version\n--set kube-prometheus-stack.prometheus.enabled=false \\\n--set kube-prometheus-stack.kubeApiServer.enabled=false \\\n--set kube-prometheus-stack.kubelet.enabled=false \\\n--set kube-prometheus-stack.kubeControllerManager.enabled=false \\\n--set kube-prometheus-stack.coreDns.enabled=false \\\n--set kube-prometheus-stack.kubeDns.enabled=false \\\n--set kube-prometheus-stack.kubeEtcd.enabled=false \\\n--set kube-prometheus-stack.kubeEtcd.enabled=false \\\n--set kube-prometheus-stack.kubeScheduler.enabled=false \\\n--set kube-prometheus-stack.kubeStateMetrics.enabled=false \\\n--set kube-prometheus-stack.nodeExporter.enabled=false \\\n\n## Limit the namespace processed by PrometheusOperator to avoid competition with OpenShift's own PrometheusOperator\n--set kube-prometheus-stack.prometheusOperator.kubeletService.namespace=\"insight-system\" \\\n--set kube-prometheus-stack.prometheusOperator.prometheusInstanceNamespaces=\"insight-system\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[0]=\"openshift-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[1]=\"openshift-user-workload-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[2]=\"openshift-customer-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[3]=\"openshift-route-monitor-operator\" \\\n
                                  "},{"location":"en/admin/insight/quickstart/other/install-agent-on-ocp.html#write-system-monitoring-data-into-prometheus-through-openshifts-own-mechanism","title":"Write system monitoring data into Prometheus through OpenShift's own mechanism","text":"
                                  apiVersion: v1\nkind: ConfigMap\nmetadata:\n   name: cluster-monitoring-config\n   namespace: openshift-monitoring\ndata:\n   config.yaml: |\n     prometheusK8s:\n       remoteWrite:\n         - queueConfig:\n             batchSendDeadline: 60s\n             maxBackoff: 5s\n             minBackoff: 30ms\n             minShards: 1\n             capacity: 5000\n             maxSamplesPerSend: 1000\n             maxShards: 100\n           remoteTimeout: 30s\n           url: http://insight-agent-prometheus.insight-system.svc.cluster.local:9090/api/v1/write\n           writeRelabelConfigs:\n             - action: keep\n               regex: etcd|kubelet|node-exporter|apiserver|kube-state-metrics\n               sourceLabels:\n                 - job\n
                                  "},{"location":"en/admin/insight/quickstart/res-plan/modify-vms-disk.html","title":"vmstorage Disk Expansion","text":"

                                  This article describes the method for expanding the vmstorage disk. Please refer to the vmstorage disk capacity planning for the specifications of the vmstorage disk.

                                  "},{"location":"en/admin/insight/quickstart/res-plan/modify-vms-disk.html#procedure","title":"Procedure","text":""},{"location":"en/admin/insight/quickstart/res-plan/modify-vms-disk.html#enable-storageclass-expansion","title":"Enable StorageClass expansion","text":"
                                  1. Log in to the AI platform platform as a global service cluster administrator. Click Container Management -> Clusters and go to the details of the kpanda-global-cluster cluster.

                                  2. Select the left navigation menu Container Storage -> PVCs and find the PVC bound to the vmstorage.

                                  3. Click a vmstorage PVC to enter the details of the volume claim for vmstorage and confirm the StorageClass that the PVC is bound to.

                                  4. Select the left navigation menu Container Storage -> Storage Class and find local-path . Click the \u2507 on the right side of the target and select Edit in the popup menu.

                                  5. Enable Scale Up and click OK .

                                  "},{"location":"en/admin/insight/quickstart/res-plan/modify-vms-disk.html#modify-the-disk-capacity-of-vmstorage","title":"Modify the disk capacity of vmstorage","text":"
                                  1. Log in to the AI platform platform as a global service cluster administrator and go to the details of the kpanda-global-cluster cluster.

                                  2. Select the left navigation menu CRDs and find the custom resource for vmcluster .

                                  3. Click the custom resource for vmcluster to enter the details page, switch to the insight-system namespace, and select Edit YAML from the right menu of insight-victoria-metrics-k8s-stack .

                                  4. Modify according to the legend and click OK .

                                  5. Select the left navigation menu Container Storage -> PVCs again and find the volume claim bound to vmstorage. Confirm that the modification has taken effect. In the details page of a PVC, click the associated storage source (PV).

                                  6. Open the volume details page and click the Update button in the upper right corner.

                                  7. After modifying the Capacity , click OK and wait for a moment until the expansion is successful.

                                  "},{"location":"en/admin/insight/quickstart/res-plan/modify-vms-disk.html#clone-the-storage-volume","title":"Clone the storage volume","text":"

                                  If the storage volume expansion fails, you can refer to the following method to clone the storage volume.

                                  1. Log in to the AI platform platform as a global service cluster administrator and go to the details of the kpanda-global-cluster cluster.

                                  2. Select the left navigation menu Workloads -> StatefulSets and find the statefulset for vmstorage . Click the \u2507 on the right side of the target and select Status -> Stop -> OK in the popup menu.

                                  3. After logging into the master node of the kpanda-global-cluster cluster in the command line, run the following command to copy the vm-data directory in the vmstorage container to store the metric information locally:

                                    kubectl cp -n insight-system vmstorage-insight-victoria-metrics-k8s-stack-1:vm-data ./vm-data\n
                                  4. Log in to the AI platform platform and go to the details of the kpanda-global-cluster cluster. Select the left navigation menu Container Storage -> PVs , click Clone in the upper right corner, and modify the capacity of the volume.

                                  5. Delete the previous data volume of vmstorage.

                                  6. Wait for a moment until the volume claim is bound to the cloned data volume, then run the following command to import the exported data from step 3 into the proper container, and then start the previously paused vmstorage .

                                    kubectl cp -n insight-system ./vm-data vmstorage-insight-victoria-metrics-k8s-stack-1:vm-data\n
                                  "},{"location":"en/admin/insight/quickstart/res-plan/prometheus-res.html","title":"Prometheus Resource Planning","text":"

                                  In the actual use of Prometheus, affected by the number of cluster containers and the opening of Istio, the CPU, memory and other resource usage of Prometheus will exceed the set resources.

                                  In order to ensure the normal operation of Prometheus in clusters of different sizes, it is necessary to adjust the resources of Prometheus according to the actual size of the cluster.

                                  "},{"location":"en/admin/insight/quickstart/res-plan/prometheus-res.html#reference-resource-planning","title":"Reference resource planning","text":"

                                  In the case that the mesh is not enabled, the test statistics show that the relationship between the system Job index and pods is Series count = 800 * pod count

                                  When the service mesh is enabled, the magnitude of the Istio-related metrics generated by the pod after the feature is enabled is Series count = 768 * pod count

                                  "},{"location":"en/admin/insight/quickstart/res-plan/prometheus-res.html#when-the-service-mesh-is-not-enabled","title":"When the service mesh is not enabled","text":"

                                  The following resource planning is recommended by Prometheus when the service mesh is not enabled :

                                  Cluster size (pod count) Metrics (service mesh is not enabled) CPU (core) Memory (GB) 100 8w Request: 0.5Limit: 1 Request: 2GBLimit: 4GB 200 16w Request: 1Limit: 1.5 Request: 3GBLimit: 6GB 300 24w Request: 1Limit: 2 Request: 3GBLimit: 6GB 400 32w Request: 1Limit: 2 Request: 4GBLimit: 8GB 500 40w Request: 1.5Limit: 3 Request: 5GBLimit: 10GB 800 64w Request: 2Limit: 4 Request: 8GBLimit: 16GB 1000 80w Request: 2.5Limit: 5 Request: 9GBLimit: 18GB 2000 160w Request: 3.5Limit: 7 Request: 20GBLimit: 40GB 3000 240w Request: 4Limit: 8 Request: 33GBLimit: 66GB"},{"location":"en/admin/insight/quickstart/res-plan/prometheus-res.html#when-the-service-mesh-feature-is-enabled","title":"When the service mesh feature is enabled","text":"

                                  The following resource planning is recommended by Prometheus in the scenario of starting the service mesh:

                                  Cluster size (pod count) metric volume (service mesh enabled) CPU (core) Memory (GB) 100 15w Request: 1Limit: 2 Request: 3GBLimit: 6GB 200 31w Request: 2Limit: 3 Request: 5GBLimit: 10GB 300 46w Request: 2Limit: 4 Request: 6GBLimit: 12GB 400 62w Request: 2Limit: 4 Request: 8GBLimit: 16GB 500 78w Request: 3Limit: 6 Request: 10GBLimit: 20GB 800 125w Request: 4Limit: 8 Request: 15GBLimit: 30GB 1000 156w Request: 5Limit: 10 Request: 18GBLimit: 36GB 2000 312w Request: 7Limit: 14 Request: 40GBLimit: 80GB 3000 468w Request: 8Limit: 16 Request: 65GBLimit: 130GB

                                  Note

                                  1. Pod count in the table refers to the pod count that is basically running stably in the cluster. If a large number of pods are restarted, the index will increase sharply in a short period of time. At this time, resources need to be adjusted accordingly.
                                  2. Prometheus stores two hours of data by default in memory, and when the Remote Write function is enabled in the cluster, a certain amount of memory will be occupied, and resources surge ratio is recommended to be set to 2.
                                  3. The data in the table are recommended values, applicable to general situations. If the environment has precise resource requirements, it is recommended to check the resource usage of the proper Prometheus after the cluster has been running for a period of time for precise configuration.
                                  "},{"location":"en/admin/insight/quickstart/res-plan/vms-res-plan.html","title":"vmstorage disk capacity planning","text":"

                                  vmstorage is responsible for storing multicluster metrics for observability. In order to ensure the stability of vmstorage, it is necessary to adjust the disk capacity of vmstorage according to the number of clusters and the size of the cluster. For more information, please refer to vmstorage retention period and disk space.

                                  "},{"location":"en/admin/insight/quickstart/res-plan/vms-res-plan.html#test-results","title":"Test Results","text":"

                                  After 14 days of disk observation of vmstorage of clusters of different sizes, We found that the disk usage of vmstorage was positively correlated with the amount of metrics it stored and the disk usage of individual data points.

                                  1. The amount of metrics stored instantaneously increase(vm_rows{ type != \"indexdb\"}[30s]) to obtain the increased amount of metrics within 30s
                                  2. Disk usage of a single data point: sum(vm_data_size_bytes{type!=\"indexdb\"}) / sum(vm_rows{type != \"indexdb\"})
                                  "},{"location":"en/admin/insight/quickstart/res-plan/vms-res-plan.html#calculation-method","title":"calculation method","text":"

                                  Disk usage = Instantaneous metrics x 2 x disk usage for a single data point x 60 x 24 x storage time (days)

                                  Parameter Description:

                                  1. The unit of disk usage is Byte .
                                  2. Storage duration (days) x 60 x 24 converts time (days) into minutes to calculate disk usage.
                                  3. The default collection time of Prometheus in Insight Agent is 30s, so twice the amount of metrics will be generated within 1 minute.
                                  4. The default storage duration in vmstorage is 1 month, please refer to Modify System Configuration to modify the configuration.

                                  Warning

                                  This formula is a general solution, and it is recommended to reserve redundant disk capacity on the calculation result to ensure the normal operation of vmstorage.

                                  "},{"location":"en/admin/insight/quickstart/res-plan/vms-res-plan.html#reference-capacity","title":"reference capacity","text":"

                                  The data in the table is calculated based on the default storage time of one month (30 days), and the disk usage of a single data point (datapoint) is calculated as 0.9. In a multicluster scenario, the number of Pods represents the sum of the number of Pods in the multicluster.

                                  "},{"location":"en/admin/insight/quickstart/res-plan/vms-res-plan.html#when-the-service-mesh-is-not-enabled","title":"When the service mesh is not enabled","text":"Cluster size (number of Pods) Metrics Disk capacity 100 8W 6 GiB 200 16W 12 GiB 300 24w 18 GiB 400 32w 24 GiB 500 40w 30 GiB 800 64w 48 GiB 1000 80W 60 GiB 2000 160w 120 GiB 3000 240w 180 GiB"},{"location":"en/admin/insight/quickstart/res-plan/vms-res-plan.html#when-the-service-mesh-is-enabled","title":"When the service mesh is enabled","text":"Cluster size (number of Pods) Metrics Disk capacity 100 15W 12 GiB 200 31w 24 GiB 300 46w 36 GiB 400 62w 48 GiB 500 78w 60 GiB 800 125w 94 GiB 1000 156w 120 GiB 2000 312w 235 GiB 3000 468w 350 GiB"},{"location":"en/admin/insight/quickstart/res-plan/vms-res-plan.html#example","title":"Example","text":"

                                  There are two clusters in the AI platform platform, of which 500 Pods are running in the global management cluster (service mesh is turned on), and 1000 Pods are running in the worker cluster (service mesh is not turned on), and the expected metrics are stored for 30 days.

                                  • The number of metrics in the global management cluster is 800x500 + 768x500 = 784000
                                  • Worker cluster metrics are 800x1000 = 800000

                                  Then the current vmstorage disk usage should be set to (784000+80000)x2x0.9x60x24x31 =124384896000 byte = 116 GiB

                                  Note

                                  For the relationship between the number of metrics and the number of Pods in the cluster, please refer to Prometheus Resource Planning.

                                  "},{"location":"en/admin/insight/reference/alertnotification.html","title":"Alert Notification Process Description","text":"

                                  When configuring an alert policy in Insight, you have the ability to set different notification sending intervals for alerts triggered at different levels within the same policy. However, due to the presence of two parameters, group_interval and repeat_interval , in the native Alertmanager configuration, the actual intervals for sending alert notifications may deviate.

                                  "},{"location":"en/admin/insight/reference/alertnotification.html#parameter-configuration","title":"Parameter Configuration","text":"

                                  In the Alertmanager configuration, set the following parameters:

                                  route:  \n  group_by: [\"rulename\"]\n  group_wait: 30s\n  group_interval: 5m\n  repeat_interval: 1h\n

                                  Parameter descriptions:

                                  • group_wait : Specifies the waiting time before sending alert notifications. When Alertmanager receives a group of alerts, if no further alerts are received within the duration specified by group_wait , Alertmanager waits for a certain amount of time to collect additional alerts with the same labels and content. It then includes all qualifying alerts in the same notification.

                                  • group_interval : Determines the waiting time before merging a group of alerts into a single notification. If no more alerts from the same group are received during this period, Alertmanager sends a notification containing all received alerts.

                                  • repeat_interval : Sets the interval for resending alert notifications. After Alertmanager sends an alert notification to a receiver, if it continues to receive alerts with the same labels and content within the duration specified by repeat_interval , Alertmanager will resend the alert notification.

                                  When the group_wait , group_interval , and repeat_interval parameters are set simultaneously, Alertmanager handles alert notifications under the same group as follows:

                                  1. When Alertmanager receives qualifying alerts, it waits for at least the duration specified in the group_wait parameter to collect additional alerts with the same labels and content. It includes all qualifying alerts in the same notification.

                                  2. If no further alerts are received during the group_wait duration, Alertmanager sends all received alerts to the receiver after that time. If additional qualifying alerts arrive during this period, Alertmanager continues to wait until all alerts are collected or a timeout occurs.

                                  3. If more alerts with the same labels and content are received within the group_interval parameter, these new alerts are merged into the previous notification and sent together. If there are still unsent alerts after the group_interval duration, Alertmanager starts a new timing cycle and waits for more alerts until the group_interval duration is reached again or new alerts are received.

                                  4. If Alertmanager keeps receiving alerts with the same labels and content within the duration specified by repeat_interval , it will resend the previously sent alert notifications. When resending alert notifications, Alertmanager does not wait for group_wait or group_interval , but sends notifications repeatedly according to the time interval specified by repeat_interval .

                                  5. If there are still unsent alerts after the repeat_interval duration, Alertmanager starts a new timing cycle and continues to wait for new alerts with the same labels and content. This process continues until there are no new alerts or Alertmanager is stopped.

                                  "},{"location":"en/admin/insight/reference/alertnotification.html#example","title":"Example","text":"

                                  In the following example, Alertmanager assigns all alerts with CPU usage above the threshold to a policy named \"critical_alerts\".

                                  groups:\n- name: critical_alerts\n  rules:\n  - alert: HighCPUUsage\n    expr: node_cpu_seconds_total{mode=\"idle\"} < 50\n    for: 5m\n    labels:\n      severity: critical\n    annotations:\n      summary: \"High CPU usage detected on instance {{ $labels.instance }}\"\n  group_by: [rulename]\n  group_wait: 30s\n  group_interval: 5m\n  repeat_interval: 1h\n

                                  In this case:

                                  • When Alertmanager receives an alert, it waits for at least 30 seconds to collect additional alerts with the same labels and content, and includes them in the same notification.

                                  • If more alerts with the same labels and content are received within 5 minutes, these new alerts are merged into the previous notification and sent together. If there are still unsent alerts after 15 minutes, Alertmanager starts a new timing cycle and waits for more alerts until 5 minutes have passed or new alerts are received.

                                  • If Alertmanager continues to receive alerts with the same labels and content within 1 hour, it will resend the previously sent alert notifications.

                                  "},{"location":"en/admin/insight/reference/lucene.html","title":"Lucene Syntax Usage","text":""},{"location":"en/admin/insight/reference/lucene.html#introduction-to-lucene","title":"Introduction to Lucene","text":"

                                  Lucene is a subproject of Apache Software Foundation's Jakarta project and is an open-source full-text search engine toolkit. The purpose of Lucene is to provide software developers with a simple and easy-to-use toolkit for implementing full-text search functionality in their target systems.

                                  "},{"location":"en/admin/insight/reference/lucene.html#lucene-syntax","title":"Lucene Syntax","text":"

                                  Lucene's syntax allows you to construct search queries in a flexible way to meet different search requirements. Here is a detailed explanation of Lucene's syntax:

                                  "},{"location":"en/admin/insight/reference/lucene.html#keyword-queries","title":"Keyword Queries","text":"

                                  To perform searches with multiple keywords using Lucene syntax, you can use Boolean logical operators to combine multiple keywords. Lucene supports the following operators:

                                  1. AND operator

                                    • Use AND or && to represent the logical AND relationship.
                                    • Example: term1 AND term2 or term1 && term2
                                  2. OR operator

                                    • Use OR or || to represent the logical OR relationship.
                                    • Example: term1 OR term2 or term1 || term2
                                  3. NOT operator

                                    • Use NOT or - to represent the logical NOT relationship.
                                    • Example: term1 NOT term2 or term1 -term2
                                  4. Quotes

                                    • You can enclose a phrase in quotes for exact matching.
                                    • Example: \"exact phrase\"
                                  "},{"location":"en/admin/insight/reference/lucene.html#examples","title":"Examples","text":"
                                  1. Specify fields

                                    field1:keyword1 AND (field2:keyword2 OR field3:keyword3) NOT field4:keyword4\n

                                    Explanation:

                                    • The query field field1 must contain the keyword keyword1 .
                                    • Additionally, either the field field2 must contain the keyword keyword2 or the field field3 must contain the keyword keyword3 .
                                    • Finally, the field field4 must not contain the keyword keyword4 .
                                  2. Not specify fields

                                    keyword1 AND (keyword2 OR keyword3) NOT keyword4\n

                                    Explanation:

                                    • The query keyword keyword1 must exist in any searchable field.
                                    • Additionally, either the keyword keyword2 must exist or the keyword keyword3 must exist in any searchable field.
                                    • Finally, the keyword keyword4 must not exist in any searchable field.
                                  "},{"location":"en/admin/insight/reference/lucene.html#fuzzy-queries","title":"Fuzzy Queries","text":"

                                  In Lucene, fuzzy queries can be performed using the tilde ( ~ ) operator for approximate matching. You can specify an edit distance to limit the degree of similarity in the matches.

                                  term~\n

                                  In the above example, term is the keyword to perform a fuzzy match on.

                                  Please note the following:

                                  • After the tilde ( ~ ), you can optionally specify a parameter to control the similarity of the fuzzy query.
                                  • The parameter value ranges from 0 to 2, where 0 represents an exact match, 1 allows for one edit operation (such as adding, deleting, or replacing characters) to match, and 2 allows for two edit operations to match.
                                  • If no parameter value is specified, the default similarity threshold used is 0.5.
                                  • Fuzzy queries will return documents that are similar to the given keyword but may incur some performance overhead, especially for larger indexes.
                                  "},{"location":"en/admin/insight/reference/lucene.html#wildcards","title":"Wildcards","text":"

                                  Lucene supports the following wildcard queries:

                                  1. * wildcard: Used to match zero or more characters.

                                    For example, te*t can match \"test\", \"text\", and \"tempest\".

                                  2. ? wildcard: Used to match a single character.

                                    For example, te?t can match \"test\" and \"text\".

                                  "},{"location":"en/admin/insight/reference/lucene.html#example","title":"Example","text":"
                                  te?t\n

                                  In the above example, te?t represents a word that starts with \"te\", followed by any single character, and ends with \"t\". This query can match words like \"test\", \"text\", and \"tent\".

                                  It is important to note that the question mark ( ? ) represents only a single character. If you want to match multiple characters or varying lengths of characters, you can use the asterisk ( * ) for multi-character wildcard matching. Additionally, the question mark will not match an empty string.

                                  To summarize, in Lucene syntax, the question mark ( ? ) is used as a single-character wildcard to match any single character. By using the question mark in your search keywords, you can perform more flexible and specific pattern matching.

                                  "},{"location":"en/admin/insight/reference/lucene.html#range-queries","title":"Range Queries","text":"

                                  Lucene syntax supports range queries, where you can use square brackets [ ] or curly braces { } to represent a range. Here are examples of range queries:

                                  1. Inclusive boundary range query:

                                    • Square brackets [ ] indicate a closed interval that includes the boundary values.
                                    • Example: field:[value1 TO value2] represents the range of values for field , including both value1 and value2 .
                                  2. Exclusive boundary range query:

                                    • Curly braces { } indicate an open interval that excludes the boundary values.
                                    • Example: field:{value1 TO value2} represents the range of values for field between value1 and value2 , excluding both.
                                  3. Omitted boundary range query:

                                    • You can omit one or both boundary values to specify an infinite range.
                                    • Example: field:[value TO ] represents the range of values for field from value to positive infinity, and field:[ TO value] represents the range of values for field from negative infinity to value .

                                    Note

                                    Please note that range queries are applicable only to fields that can be sorted, such as numeric fields and date fields. Also, ensure that you correctly specify the boundary values as the actual value type of the field in your query. If you want to perform a range query across the entire index without specifying a specific field, you can use the wildcard query * instead of a field name.

                                  "},{"location":"en/admin/insight/reference/lucene.html#examples_1","title":"Examples","text":"
                                  1. Specify a field

                                    timestamp:[2022-01-01 TO 2022-01-31]\n

                                    This will retrieve data where the timestamp field falls within the range from January 1, 2022, to January 31, 2022.

                                  2. Not specify a field

                                    *:[value1 TO value2]\n

                                    This will search the entire index for documents with values ranging from value1 to value2 .

                                  "},{"location":"en/admin/insight/reference/lucene.html#insight-common-keywords","title":"Insight Common Keywords","text":""},{"location":"en/admin/insight/reference/lucene.html#container-logs","title":"Container Logs","text":"
                                  • kubernetes.container_image: Container image name
                                  • kubernetes.container_name: Container name
                                  • kubernetes.namespace_name: Namespace name
                                  • kubernetes.pod_name: Pod name
                                  • log: Log content
                                  • time: Log timestamp
                                  "},{"location":"en/admin/insight/reference/lucene.html#host-logs","title":"Host Logs","text":"
                                  • syslog.file: Log file name
                                  • syslog.host: Host name
                                  • log: Log content

                                  If you want to accurately match a specific value, you can add a .keyword suffix after the keyword, e.g. kubernetes.containername.keyword.

                                  "},{"location":"en/admin/insight/reference/lucene.html#examples_2","title":"Examples","text":"
                                  1. Query container logs of the specified container in the specified Pod

                                    kubernetes.pod_name.keyword:nginx-pod AND kubernetes.container_name.keyword:nginx\n
                                    2. Query container logs containing 'nginx pod' in the Pod name

                                    kubernetes.pod_name:nginx-pod\n
                                  "},{"location":"en/admin/insight/reference/notify-helper.html","title":"Configure Notification Templates","text":""},{"location":"en/admin/insight/reference/notify-helper.html#template-syntax-go-template-description","title":"Template Syntax (Go Template) Description","text":"

                                  The alert notification template uses Go Template syntax to render the template.

                                  The template will be rendered based on the following data.

                                  {\n    \"status\": \"firing\",\n    \"labels\": {\n        \"alertgroup\": \"test-group\",           // Alert policy name\n        \"alertname\": \"test-rule\",          // Alert rule name\n        \"cluster\": \"35b54a48-b66c-467b-a8dc-503c40826330\",\n        \"customlabel1\": \"v1\",\n        \"customlabel2\": \"v2\",\n        \"endpoint\": \"https\",\n        \"group_id\": \"01gypg06fcdf7rmqc4ksv97646\",\n        \"instance\": \"10.6.152.85:6443\",\n        \"job\": \"apiserver\",\n        \"namespace\": \"default\",\n        \"prometheus\": \"insight-system/insight-agent-kube-prometh-prometheus\",\n        \"prometheus_replica\": \"prometheus-insight-agent-kube-prometh-prometheus-0\",\n        \"rule_id\": \"01gypg06fcyn2g9zyehbrvcdfn\",\n        \"service\": \"kubernetes\",\n        \"severity\": \"critical\",\n        \"target\": \"35b54a48-b66c-467b-a8dc-503c40826330\",\n        \"target_type\": \"cluster\"\n   },\n    \"annotations\": {\n        \"customanno1\": \"v1\",\n        \"customanno2\": \"v2\",\n        \"description\": \"This is a test rule, 10.6.152.85:6443 down\",\n        \"value\": \"1\"\n    },\n    \"startsAt\": \"2023-04-20T07:53:54.637363473Z\",\n    \"endsAt\": \"0001-01-01T00:00:00Z\",\n    \"generatorURL\": \"http://vmalert-insight-victoria-metrics-k8s-stack-df987997b-npsl9:8080/vmalert/alert?group_id=16797738747470868115&alert_id=10071735367745833597\",\n    \"fingerprint\": \"25c8d93d5bf58ac4\"\n}\n
                                  "},{"location":"en/admin/insight/reference/notify-helper.html#instructions-for-use","title":"Instructions for Use","text":"
                                  1. . character

                                    Render the specified object in the current scope.

                                    Example 1: Take all content under the top-level scope, which is all of the context data in the example code.

                                    {{ . }}\n
                                  2. Conditional statement if / else

                                    Use if to check the data and run else if it does not meet.

                                    {{if .Labels.namespace }}Namespace: {{ .Labels.namespace }} \\n{{ end }}\n
                                  3. Loop feature for

                                    The for feature is used to repeat the code content.

                                    Example 1: Traverse the labels list to obtain all label content for alerts.

                                    {{ for .Labels}} \\n {{end}}\n
                                  "},{"location":"en/admin/insight/reference/notify-helper.html#functions","title":"FUNCTIONS","text":"

                                  Insight's \"notification templates\" and \"SMS templates\" support over 70 sprig functions, as well as custom functions.

                                  "},{"location":"en/admin/insight/reference/notify-helper.html#sprig-functions","title":"Sprig Functions","text":"

                                  Sprig provides over 70 built-in template functions to assist in rendering data. The following are some commonly used functions:

                                  • Date operations
                                  • String operations
                                  • Type conversion operations
                                  • Mathematical calculations with integers

                                  For more details, you can refer to the official documentation.

                                  "},{"location":"en/admin/insight/reference/notify-helper.html#custom-functions","title":"Custom Functions","text":""},{"location":"en/admin/insight/reference/notify-helper.html#toclustername","title":"toClusterName","text":"

                                  The toClusterName function retrieves the \"cluster name\" based on the \"cluster unique identifier (ID)\". If there is no proper cluster found, it will directly return the passed-in cluster's unique identifier.

                                  func toClusterName(id string) (string, error)\n

                                  Example:

                                  {{ toClusterName \"clusterId\" }}\n{{ \"clusterId\" | toClusterName }}\n
                                  "},{"location":"en/admin/insight/reference/notify-helper.html#toclusterid","title":"toClusterId","text":"

                                  The toClusterId function retrieves the \"cluster unique identifier (ID)\" based on the \"cluster name\". If there is no proper cluster found, it will directly return the passed-in cluster name.

                                  func toClusterId(name string) (string, error)\n

                                  Example:

                                  {{ toClusterId \"clusterName\" }}\n{{ \"clusterName\" | toClusterId }}\n
                                  "},{"location":"en/admin/insight/reference/notify-helper.html#todateinzone","title":"toDateInZone","text":"

                                  The toDateInZone function converts a string date into the desired time format and applies the specified time zone.

                                  func toDateInZone(fmt string, date interface{}, zone string) string\n

                                  Example 1:

                                  {{ toDateInZone \"2006-01-02T15:04:05\" \"2022-08-15T05:59:08.064449533Z\" \"Asia/Shanghai\" }}\n

                                  This will return 2022-08-15T13:59:08. Additionally, you can achieve the same effect as toDateInZone using the built-in functions provided by sprig:

                                  {{ dateInZone \"2006-01-02T15:04:05\" (toDate \"2006-01-02T15:04:05Z07:00\" .StartsAt) \"Asia/Shanghai\" }}\n

                                  Example 2:

                                  {{ toDateInZone \"2006-01-02T15:04:05\" .StartsAt \"Asia/Shanghai\" }}\n\n## Threshold Template Description\n\nThe built-in webhook alert template in Insight is as follows. Other contents such as email and WeCom are the same, only proper adjustments are made for line breaks.\n\n```text\nRule Name: {{ .Labels.alertname }} \\n\nPolicy Name: {{ .Labels.alertgroup }} \\n\nAlert level: {{ .Labels.severity }} \\n\nCluster: {{ .Labels.cluster }} \\n\n{{if .Labels.namespace }}Namespace: {{ .Labels.namespace }} \\n{{ end }}\n{{if .Labels.node }}Node: {{ .Labels.node }} \\n{{ end }}\nResource Type: {{ .Labels.target_type }} \\n\n{{if .Labels.target }}Resource Name: {{ .Labels.target }} \\n{{ end }}\nTrigger Value: {{ .Annotations.value }} \\n\nOccurred Time: {{ .StartsAt }} \\n\n{{if ne \"0001-01-01T00:00:00Z\" .EndsAt }}End Time: {{ .EndsAt }} \\n{{ end }}\nDescription: {{ .Annotations.description }} \\n\n
                                  "},{"location":"en/admin/insight/reference/notify-helper.html#email-subject-parameters","title":"Email Subject Parameters","text":"

                                  Because Insight combines messages generated by the same rule at the same time when sending alert messages, email subjects are different from the four templates above and only use the content of commonLabels in the alert message to render the template. The default template is as follows:

                                  [{{ .status }}] [{{ .severity }}] Alert: {{ .alertname }}\n

                                  Other fields that can be used as email subjects are as follows:

                                  {{ .status }} Triggering status of the alert message\n{{ .alertgroup }} Name of the policy to which the alert belongs\n{{ .alertname }} Name of the rule to which the alert belongs\n{{ .severity }} Severity level of the alert\n{{ .target_type }} Type of resource for which the alert is raised\n{{ .target }} Resource object for which the alert is raised\n{{ .Custom label key for other rules }}\n
                                  "},{"location":"en/admin/insight/reference/tailing-sidecar.html","title":"Collecting Container Logs through Sidecar","text":"

                                  Tailing Sidecar is a Kubernetes cluster-level logging proxy that acts as a streaming sidecar container. It allows automatic collection and summarization of log files within containers, even when the container cannot write to standard output or standard error streams.

                                  Insight supports log collection through the Sidecar mode, which involves running a Sidecar container alongside each Pod to output log data to the standard output stream. This enables FluentBit to collect container logs effectively.

                                  The Insight Agent comes with the tailing-sidecar operator installed by default. To enable file log collection within a container, you can add annotations to the Pod, which will automatically inject the Tailing Sidecar container. The injected Sidecar container reads the files in the business container and outputs them to the standard output stream.

                                  Here are the specific steps to follow:

                                  1. Modify the YAML file of the Pod and add the following parameters in the annotation field:

                                    metadata:\n  annotations:\n    tailing-sidecar: <sidecar-name-0>:<volume-name-0>:<path-to-tail-0>;<sidecar-name-1>:<volume-name-1>:<path-to -tail-1>\n

                                    Field description:

                                    • sidecar-name-0 : Name for the Tailing Sidecar container (optional; a container name will be created automatically if not specified, starting with the prefix \"tailing-sidecar\").
                                    • volume-name-0 : Name of the storage volume.
                                    • path-to-tail-0 : File path to tail.

                                    Note

                                    Each Pod can run multiple sidecar containers, separated by ; . This allows different sidecar containers to collect multiple files and store them in various volumes.

                                  2. Restart the Pod. Once the Pod's status changes to Running , you can use the Log Query interface to search for logs within the container of the Pod.

                                  "},{"location":"en/admin/insight/reference/used-metric-in-insight.html","title":"Insight Reference Metric","text":"

                                  The metrics in this article are organized based on the community's kube-prometheus framework. Currently, it covers metrics from multiple levels, including Cluster, Node, Namespace, and Workload. This article lists some commonly used metrics, their descriptions, and units for easy reference.

                                  "},{"location":"en/admin/insight/reference/used-metric-in-insight.html#cluster","title":"Cluster","text":"Metric Name Description Unit cluster_cpu_utilization Cluster CPU Utilization cluster_cpu_total Total CPU in Cluster Core cluster_cpu_usage CPU Used in Cluster Core cluster_cpu_requests_commitment CPU Allocation Rate in Cluster cluster_memory_utilization Cluster Memory Utilization cluster_memory_usage Memory Usage in Cluster Byte cluster_memory_available Available Memory in Cluster Byte cluster_memory_requests_commitment Memory Allocation Rate in Cluster cluster_memory_total Total Memory in Cluster Byte cluster_net_utilization Network Data Transfer Rate in Cluster Byte/s cluster_net_bytes_transmitted Network Data Transmitted in Cluster (Upstream) Byte/s cluster_net_bytes_received Network Data Received in Cluster (Downstream) Byte/s cluster_disk_read_iops Disk Read IOPS in Cluster times/s cluster_disk_write_iops Disk Write IOPS in Cluster times/s cluster_disk_read_throughput Disk Read Throughput in Cluster Byte/s cluster_disk_write_throughput Disk Write Throughput in Cluster Byte/s cluster_disk_size_capacity Total Disk Capacity in Cluster Byte cluster_disk_size_available Available Disk Size in Cluster Byte cluster_disk_size_usage Disk Usage in Cluster Byte cluster_disk_size_utilization Disk Utilization in Cluster cluster_node_total Total Nodes in Cluster units cluster_node_online Online Nodes in Cluster units cluster_node_offline_count Count of Offline Nodes in Cluster units cluster_pod_count Total Pods in Cluster units cluster_pod_running_count Count of Running Pods in Cluster units cluster_pod_abnormal_count Count of Abnormal Pods in Cluster units cluster_deployment_count Total Deployments in Cluster units cluster_deployment_normal_count Count of Normal Deployments in Cluster units cluster_deployment_abnormal_count Count of Abnormal Deployments in Cluster units cluster_statefulset_count Count of StatefulSets in Cluster units cluster_statefulset_normal_count Count of Normal StatefulSets in Cluster units cluster_statefulset_abnormal_count Count of Abnormal StatefulSets in Cluster units cluster_daemonset_count Count of DaemonSets in Cluster units cluster_daemonset_normal_count Count of Normal DaemonSets in Cluster units cluster_daemonset_abnormal_count Count of Abnormal DaemonSets in Cluster units cluster_job_count Total Jobs in Cluster units cluster_job_normal_count Count of Normal Jobs in Cluster units cluster_job_abnormal_count Count of Abnormal Jobs in Cluster units

                                  Tip

                                  Utilization is generally a number in the range (0,1] (e.g., 0.21, not 21%)

                                  "},{"location":"en/admin/insight/reference/used-metric-in-insight.html#node","title":"Node","text":"Metric Name Description Unit node_cpu_utilization Node CPU Utilization node_cpu_total Total CPU in Node Core node_cpu_usage CPU Usage in Node Core node_cpu_requests_commitment CPU Allocation Rate in Node node_memory_utilization Node Memory Utilization node_memory_usage Memory Usage in Node Byte node_memory_requests_commitment Memory Allocation Rate in Node node_memory_available Available Memory in Node Byte node_memory_total Total Memory in Node Byte node_net_utilization Network Data Transfer Rate in Node Byte/s node_net_bytes_transmitted Network Data Transmitted in Node (Upstream) Byte/s node_net_bytes_received Network Data Received in Node (Downstream) Byte/s node_disk_read_iops Disk Read IOPS in Node times/s node_disk_write_iops Disk Write IOPS in Node times/s node_disk_read_throughput Disk Read Throughput in Node Byte/s node_disk_write_throughput Disk Write Throughput in Node Byte/s node_disk_size_capacity Total Disk Capacity in Node Byte node_disk_size_available Available Disk Size in Node Byte node_disk_size_usage Disk Usage in Node Byte node_disk_size_utilization Disk Utilization in Node"},{"location":"en/admin/insight/reference/used-metric-in-insight.html#workload","title":"Workload","text":"

                                  The currently supported workload types include: Deployment, StatefulSet, DaemonSet, Job, and CronJob.

                                  Metric Name Description Unit workload_cpu_usage Workload CPU Usage Core workload_cpu_limits Workload CPU Limit Core workload_cpu_requests Workload CPU Requests Core workload_cpu_utilization Workload CPU Utilization workload_memory_usage Workload Memory Usage Byte workload_memory_limits Workload Memory Limit Byte workload_memory_requests Workload Memory Requests Byte workload_memory_utilization Workload Memory Utilization workload_memory_usage_cached Workload Memory Usage (including cache) Byte workload_net_bytes_transmitted Workload Network Data Transmitted Rate Byte/s workload_net_bytes_received Workload Network Data Received Rate Byte/s workload_disk_read_throughput Workload Disk Read Throughput Byte/s workload_disk_write_throughput Workload Disk Write Throughput Byte/s
                                  1. Total workload is calculated here.
                                  2. Metrics can be obtained using workload_cpu_usage{workload_type=\"deployment\", workload=\"prometheus\"}.
                                  3. Calculation rule for workload_pod_utilization: workload_pod_usage / workload_pod_request.
                                  "},{"location":"en/admin/insight/reference/used-metric-in-insight.html#pod","title":"Pod","text":"Metric Name Description Unit pod_cpu_usage Pod CPU Usage Core pod_cpu_limits Pod CPU Limit Core pod_cpu_requests Pod CPU Requests Core pod_cpu_utilization Pod CPU Utilization pod_memory_usage Pod Memory Usage Byte pod_memory_limits Pod Memory Limit Byte pod_memory_requests Pod Memory Requests Byte pod_memory_utilization Pod Memory Utilization pod_memory_usage_cached Pod Memory Usage (including cache) Byte pod_net_bytes_transmitted Pod Network Data Transmitted Rate Byte/s pod_net_bytes_received Pod Network Data Received Rate Byte/s pod_disk_read_throughput Pod Disk Read Throughput Byte/s pod_disk_write_throughput Pod Disk Write Throughput Byte/s

                                  You can obtain the CPU usage of all Pods belonging to the Deployment named prometheus by using pod_cpu_usage{workload_type=\"deployment\", workload=\"prometheus\"}.

                                  "},{"location":"en/admin/insight/reference/used-metric-in-insight.html#span-metrics","title":"Span Metrics","text":"Metric Name Description Unit calls_total Total Service Requests duration_milliseconds_bucket Service Latency Histogram duration_milliseconds_sum Total Service Latency ms duration_milliseconds_count Number of Latency Records otelcol_processor_groupbytrace_spans_released Number of Collected Spans otelcol_processor_groupbytrace_traces_released Number of Collected Traces traces_service_graph_request_total Total Service Requests (Topology Feature) traces_service_graph_request_server_seconds_sum Total Latency (Topology Feature) ms traces_service_graph_request_server_seconds_bucket Service Latency Histogram (Topology Feature) traces_service_graph_request_server_seconds_count Total Service Requests (Topology Feature)"},{"location":"en/admin/insight/system-config/modify-config.html","title":"Modify system configuration","text":"

                                  Observability will persist the data of metrics, logs, and traces by default. Users can modify the system configuration according to This page.

                                  "},{"location":"en/admin/insight/system-config/modify-config.html#how-to-modify-the-metric-data-retention-period","title":"How to modify the metric data retention period","text":"

                                  Refer to the following steps to modify the metric data retention period.

                                  1. run the following command:

                                    kubectl edit vmcluster insight-victoria-metrics-k8s-stack -n insight-system\n
                                  2. In the Yaml file, the default value of retentionPeriod is 14 , and the unit is day . You can modify the parameters according to your needs.

                                    apiVersion: operator.victoriametrics.com/v1beta1\nkind: VMCluster\nmetadata:\n  annotations:\n    meta.helm.sh/release-name: insight\n    meta.helm.sh/release-namespace: insight-system\n  creationTimestamp: \"2022-08-25T04:31:02Z\"\n  finalizers:\n  - apps.victoriametrics.com/finalizer\n  generation: 2\n  labels:\n    app.kubernetes.io/instance: insight\n    app.kubernetes.io/managed-by: Helm\n    app.kubernetes.io/name: victoria-metrics-k8s-stack\n    app.kubernetes.io/version: 1.77.2\n    helm.sh/chart: victoria-metrics-k8s-stack-0.9.3\n  name: insight-victoria-metrics-k8s-stack\n  namespace: insight-system\n  resourceVersion: \"123007381\"\n  uid: 55cee8d6-c651-404b-b2c9-50603b405b54\nspec:\n  replicationFactor: 1\n  retentionPeriod: \"14\"\n  vminsert:\n    extraArgs:\n      maxLabelsPerTimeseries: \"45\"\n    image:\n      repository: docker.m.daocloud.io/victoriametrics/vminsert\n      tag: v1.80.0-cluster\n      replicaCount: 1\n
                                  3. After saving the modification, the pod of the component responsible for storing the metrics will automatically restart, just wait for a while.

                                  "},{"location":"en/admin/insight/system-config/modify-config.html#how-to-modify-the-log-data-storage-duration","title":"How to modify the log data storage duration","text":"

                                  Refer to the following steps to modify the log data retention period:

                                  "},{"location":"en/admin/insight/system-config/modify-config.html#method-1-modify-the-json-file","title":"Method 1: Modify the Json file","text":"
                                  1. Modify the max_age parameter in the rollover field in the following files, and set the retention period. The default storage period is 7d . Change http://localhost:9200 to the address of elastic .

                                    curl -X PUT \"http://localhost:9200/_ilm/policy/insight-es-k8s-logs-policy?pretty\" -H 'Content-Type: application/json' -d'\n{\n\"policy\": {\n    \"phases\": {\n        \"hot\": {\n            \"min_age\": \"0ms\",\n            \"actions\": {\n            \"set_priority\": {\n                \"priority\": 100\n            },\n            \"rollover\": {\n                \"max_age\": \"7d\",\n                \"max_size\": \"10gb\"\n            }\n            }\n        },\n    \"warm\": {\n        \"min_age\": \"10d\",\n        \"actions\": {\n        \"forcemerge\": {\n            \"max_num_segments\": 1\n        }\n        }\n    },\n    \"delete\": {\n        \"min_age\": \"30d\",\n        \"actions\": {\n        \"delete\": {}\n        }\n    }\n    }\n}\n}\n
                                  2. After modification, run the above command. It will print out the content as shown below, then the modification is successful.

                                    {\n\"acknowledged\": true\n}\n
                                  "},{"location":"en/admin/insight/system-config/modify-config.html#method-2-modify-from-the-ui","title":"Method 2: Modify from the UI","text":"
                                  1. Log in kibana , select Stack Management in the left navigation bar.

                                  2. Select the left navigation Index Lifecycle Polices , and find the index insight-es-k8s-logs-policy , click to enter the details.

                                  3. Expand the Hot phase configuration panel, modify the Maximum age parameter, and set the retention period. The default storage period is 7d .

                                  4. After modification, click Save policy at the bottom of the page to complete the modification.

                                  "},{"location":"en/admin/insight/system-config/modify-config.html#how-to-modify-the-trace-data-storage-duration","title":"How to modify the trace data storage duration","text":"

                                  Refer to the following steps to modify the trace data retention period:

                                  "},{"location":"en/admin/insight/system-config/modify-config.html#method-1-modify-the-json-file_1","title":"Method 1: Modify the Json file","text":"
                                  1. Modify the max_age parameter in the rollover field in the following files, and set the retention period. The default storage period is 7d . At the same time, modify http://localhost:9200 to the access address of elastic .

                                    curl -X PUT \"http://localhost:9200/_ilm/policy/jaeger-ilm-policy?pretty\" -H 'Content-Type: application/json' -d'\n{\n\"policy\": {\n    \"phases\": {\n        \"hot\": {\n            \"min_age\": \"0ms\",\n            \"actions\": {\n            \"set_priority\": {\n                \"priority\": 100\n            },\n            \"rollover\": {\n                \"max_age\": \"7d\",\n                \"max_size\": \"10gb\"\n            }\n            }\n        },\n    \"warm\": {\n        \"min_age\": \"10d\",\n        \"actions\": {\n        \"forcemerge\": {\n            \"max_num_segments\": 1\n        }\n        }\n    },\n    \"delete\": {\n        \"min_age\": \"30d\",\n        \"actions\": {\n        \"delete\": {}\n        }\n    }\n    }\n}\n}\n
                                  2. After modification, run the above command on the console. It will print out the content as shown below, then the modification is successful.

                                    {\n\"acknowledged\": true\n}\n
                                  "},{"location":"en/admin/insight/system-config/modify-config.html#method-2-modify-from-the-ui_1","title":"Method 2: Modify from the UI","text":"
                                  1. Log in kibana , select Stack Management in the left navigation bar.

                                  2. Select the left navigation Index Lifecycle Polices , and find the index jaeger-ilm-policy , click to enter the details.

                                  3. Expand the Hot phase configuration panel, modify the Maximum age parameter, and set the retention period. The default storage period is 7d .

                                  4. After modification, click Save policy at the bottom of the page to complete the modification.

                                  "},{"location":"en/admin/insight/system-config/system-component.html","title":"System Components","text":"

                                  On the system component page, you can quickly view the running status of the system components in Insight. When a system component fails, some features in Insight will be unavailable.

                                  1. Go to Insight product module,
                                  2. In the left navigation bar, select System Management -> System Components .
                                  "},{"location":"en/admin/insight/system-config/system-component.html#component-description","title":"Component description","text":"Module Component Name Description Metrics vminsert-insight-victoria-metrics-k8s-stack Responsible for writing the metric data collected by Prometheus in each cluster to the storage component. If this component is abnormal, the metric data of the worker cluster cannot be written. Metrics vmalert-insight-victoria-metrics-k8s-stack Responsible for taking effect of the recording and alert rules configured in the VM Rule, and sending the triggered alert rules to alertmanager. Metrics vmalertmanager-insight-victoria-metrics-k8s-stack is responsible for sending messages when alerts are triggered. If this component is abnormal, the alert information cannot be sent. Metrics vmselect-insight-victoria-metrics-k8s-stack Responsible for querying metrics data. If this component is abnormal, the metric cannot be queried. Metrics vmstorage-insight-victoria-metrics-k8s-stack Responsible for storing multicluster metrics data. Dashboard grafana-deployment Provide monitoring panel capability. The exception of this component will make it impossible to view the built-in dashboard. Link insight-jaeger-collector Responsible for receiving trace data in opentelemetry-collector and storing it. Link insight-jaeger-query Responsible for querying the trace data collected in each cluster. Link insight-opentelemetry-collector Responsible for receiving trace data forwarded by each sub-cluster Log elasticsearch Responsible for storing the log data of each cluster."},{"location":"en/admin/insight/system-config/system-config.html","title":"System Settings","text":"

                                  System Settings displays the default storage time of metrics, logs, traces and the default Apdex threshold.

                                  1. Click the right navigation bar and select System Settings .

                                  2. Currently only supports modifying the storage duration of historical alerts, click Edit to enter the target duration.

                                    When the storage duration is set to \"0\", the historical alerts will not be cleared.

                                  Note

                                  To modify other settings, please click to view How to modify the system settings?

                                  "},{"location":"en/admin/insight/trace/service.html","title":"Service Insight","text":"

                                  In Insight , a service refers to a group of workloads that provide the same behavior for incoming requests. Service insight helps observe the performance and status of applications during the operation process by using the OpenTelemetry SDK.

                                  For how to use OpenTelemetry, please refer to: Using OTel to give your application insight.

                                  "},{"location":"en/admin/insight/trace/service.html#glossary","title":"Glossary","text":"
                                  • Service: A service represents a group of workloads that provide the same behavior for incoming requests. You can define the service name when using the OpenTelemetry SDK or use the name defined in Istio.
                                  • Operation: An operation refers to a specific request or action handled by a service. Each span has an operation name.
                                  • Outbound Traffic: Outbound traffic refers to all the traffic generated by the current service when making requests.
                                  • Inbound Traffic: Inbound traffic refers to all the traffic initiated by the upstream service targeting the current service.
                                  "},{"location":"en/admin/insight/trace/service.html#steps","title":"Steps","text":"

                                  The Services List page displays key metrics such as throughput rate, error rate, and request latency for all services that have been instrumented with distributed tracing. You can filter services based on clusters or namespaces and sort the list by throughput rate, error rate, or request latency. By default, the data displayed in the list is for the last hour, but you can customize the time range.

                                  Follow these steps to view service insight metrics:

                                  1. Go to the Insight product module.

                                  2. Select Trace Tracking -> Services from the left navigation bar.

                                    Attention

                                    1. If the namespace of a service in the list is unknown , it means that the service has not been properly instrumented. We recommend reconfiguring the instrumentation.
                                    2. If multiple services have the same name and none of them have the correct Namespace environment variable configured, the metrics displayed in the list and service details page will be aggregated for all those services.
                                  3. Click a service name (taking insight-system as an example) to view the detailed metrics and operation metrics for that service.

                                    1. In the Service Topology section, you can view the service topology one layer above or below the current service. When you hover over a node, you can see its information.
                                    2. In the Traffic Metrics section, you can view the monitoring metrics for all requests to the service within the past hour (including inbound and outbound traffic).
                                    3. You can use the time selector in the upper right corner to quickly select a time range or specify a custom time range.
                                    4. Sorting is available for throughput, error rate, and request latency in the operation metrics.
                                    5. Clicking on the icon next to an individual operation will take you to the Traces page to quickly search for related traces.

                                  "},{"location":"en/admin/insight/trace/service.html#service-metric-explanations","title":"Service Metric Explanations","text":"Metric Description Throughput Rate The number of requests processed within a unit of time. Error Rate The ratio of erroneous requests to the total number of requests within the specified time range. P50 Request Latency The response time within which 50% of requests complete. P95 Request Latency The response time within which 95% of requests complete. P99 Request Latency The response time within which 99% of requests complete."},{"location":"en/admin/insight/trace/topology.html","title":"Service Map","text":"

                                  Service map is a visual representation of the connections, communication, and dependencies between services. It provides insights into the service-to-service interactions, allowing you to view the calls and performance of services within a specified time range. The connections between nodes in the topology map represent the existence of service-to-service calls during the queried time period.

                                  "},{"location":"en/admin/insight/trace/topology.html#prerequisites","title":"Prerequisites","text":"
                                  1. Insight Agent is installed in the cluster and the applications are in the Running state.
                                  2. Services have been instrumented for distributed tracing using Operator or OpenTelemetry SDK.
                                  "},{"location":"en/admin/insight/trace/topology.html#steps","title":"Steps","text":"
                                  1. Go to the Insight product module.

                                  2. Select Tracing -> Service Map from the left navigation bar.

                                  3. In the Service Map, you can perform the following actions:

                                    • Click a node to slide out the details of the service on the right side. Here, you can view metrics such as request latency, throughput, and error rate for the service. Clicking on the service name takes you to the service details page.
                                    • Hover over the connections to view the traffic metrics between the two services.
                                    • Click Display Settings , you can configure the display elements in the service map.

                                  "},{"location":"en/admin/insight/trace/topology.html#other-nodes","title":"Other Nodes","text":"

                                  In the Service Map, there can be nodes that are not part of the cluster. These external nodes can be categorized into three types:

                                  • Database
                                  • Message Queue
                                  • Virtual Node

                                  • If a service makes a request to a Database or Message Queue, these two types of nodes will be displayed by default in the topology map. However, Virtual Nodes represent nodes outside the cluster or services not integrated into the trace, and they will not be displayed by default in the map.

                                  • When a service makes a request to MySQL, PostgreSQL, or Oracle Database, the detailed database type can be seen in the map.

                                  "},{"location":"en/admin/insight/trace/topology.html#enabling-virtual-nodes","title":"Enabling Virtual Nodes","text":"
                                  1. Update the insight-server chart values, locate the parameter shown in the image below, and change false to true.
                                  1. In the display settings of the service map, check the Virtual Services option to enable it.
                                  "},{"location":"en/admin/insight/trace/trace.html","title":"Trace Query","text":"

                                  On the trace query page, you can query detailed information about a call trace by TraceID or filter call traces based on various conditions.

                                  "},{"location":"en/admin/insight/trace/trace.html#glossary","title":"Glossary","text":"
                                  • TraceID: Used to identify a complete request call trace.
                                  • Operation: Describes the specific operation or event represented by a Span.
                                  • Entry Span: The entry Span represents the first request of the entire call.
                                  • Latency: The duration from receiving the request to completing the response for the entire call trace.
                                  • Span: The number of Spans included in the entire trace.
                                  • Start Time: The time when the current trace starts.
                                  • Tag: A collection of key-value pairs that constitute Span tags. Tags are used to annotate and supplement Spans, and each Span can have multiple key-value tag pairs.
                                  "},{"location":"en/admin/insight/trace/trace.html#steps","title":"Steps","text":"

                                  Please follow these steps to search for a trace:

                                  1. Go to the Insight product module.
                                  2. Select Tracing -> Traces from the left navigation bar.

                                    Note

                                    Sorting by Span, Latency, and Start At is supported in the list.

                                  3. Click the TraceID Query in the filter bar to switch to TraceID search.

                                  4. To search using TraceID, please enter the complete TraceID.

                                  "},{"location":"en/admin/insight/trace/trace.html#other-operations","title":"Other Operations","text":""},{"location":"en/admin/insight/trace/trace.html#view-trace-details","title":"View Trace Details","text":"
                                  1. Click the TraceID of a trace in the trace list to view its detailed call information.

                                  "},{"location":"en/admin/insight/trace/trace.html#associated-logs","title":"Associated Logs","text":"
                                  1. Click the icon on the right side of the trace data to search for associated logs.

                                    • By default, it queries the log data within the duration of the trace and one minute after its completion.
                                    • The queried logs include those with the trace's TraceID in their log text and container logs related to the trace invocation process.
                                  2. Click View More to jump to the Associated Log page with conditions.

                                  3. By default, all logs are searched, but you can filter by the TraceID or the relevant container logs from the trace call process using the dropdown.

                                    Note

                                    Since trace may span across clusters or namespaces, if the user does not have sufficient permissions, they will be unable to query the associated logs for that trace.

                                  "},{"location":"en/admin/k8s/add-node.html","title":"Adding Worker Nodes","text":"

                                  If there are not enough nodes, you can add more nodes to the cluster.

                                  "},{"location":"en/admin/k8s/add-node.html#prerequisites","title":"Prerequisites","text":"
                                  • AI platform is installed
                                  • An administrator account is available
                                  • A cluster with GPU nodes has been created
                                  • A cloud host has been prepared
                                  "},{"location":"en/admin/k8s/add-node.html#steps-to-add-nodes","title":"Steps to Add Nodes","text":"
                                  1. Log in to the AI platform as an administrator.
                                  2. Navigate to Container Management -> Clusters, and click the name of the target cluster.

                                  3. On the cluster overview page, click Node Management, and then click the Add Node button on the right side.

                                  4. Follow the wizard, fill in the required parameters, and then click OK.

                                    Basic InformationParameter Configuration

                                  5. Click OK in the popup window.

                                  6. Return to the node list. The status of the newly added node will be Pending. After a few minutes, if the status changes to Running, it indicates that the node has been successfully added.

                                  Tip

                                  For nodes that have just been successfully added, it may take an additional 2-3 minutes for the GPU to be recognized.

                                  "},{"location":"en/admin/k8s/create-k8s.html","title":"Creating a Kubernetes Cluster on the Cloud","text":"

                                  Deploying a Kubernetes cluster is aimed at supporting efficient AI computing resource scheduling and management, achieving elastic scalability, providing high availability, and optimizing the model training and inference processes.

                                  "},{"location":"en/admin/k8s/create-k8s.html#prerequisites","title":"Prerequisites","text":"
                                  • An AI platform is installed
                                  • An administrator account is available
                                  • A physical machine with a GPU is prepared
                                  • Two segments of IP addresses are allocated (Pod CIDR 18 bits, SVC CIDR 18 bits, must not conflict with existing network segments)
                                  "},{"location":"en/admin/k8s/create-k8s.html#steps-to-create-the-cluster","title":"Steps to Create the Cluster","text":"
                                  1. Log in to the AI platform as an administrator.
                                  2. Create and launch 3 cloud hosts without GPUs to serve as the Master nodes for the cluster.

                                    • Configure resources: 16 CPU cores, 32 GB memory, 200 GB system disk (ReadWriteOnce)
                                    • Select Bridge network mode
                                    • Set the root password or add an SSH public key for SSH connection
                                    • Take note of the IP addresses of the 3 hosts
                                  3. Navigate to Container Management -> Clusters, and click the Create Cluster button on the right side.

                                  4. Follow the wizard to configure the various parameters of the cluster.

                                    Basic InformationNode ConfigurationNetwork ConfigurationAddon ConfigurationAdvanced Configuration

                                    After configuring the node information, click Start Check.

                                    Each node can run a default of 110 Pods (container groups). If the node configuration is higher, it can be adjusted to 200 or 300 Pods.

                                  5. Wait for the cluster creation to complete.

                                  6. In the cluster list, find the newly created cluster, click the cluster name, navigate to Helm Apps -> Helm Charts, and search for metax-gpu-extensions in the search box, then click the card.

                                  7. Click the Install button on the right to begin installing the GPU plugin.

                                    Application SettingsKubernetes Orchestration Confirmation

                                    Enter a name, select a namespace, and modify the image address in the YAML:

                                  8. You will automatically return to the Helm App list. Wait for the status of metax-gpu-extensions to change to Deployed.

                                  9. The cluster has been successfully created. You can now check the nodes included in the cluster. You can create AI workloads and use the GPU.

                                  Next step: Create AI Workloads

                                  "},{"location":"en/admin/k8s/remove-node.html","title":"Removing GPU Worker Nodes","text":"

                                  The cost of GPU resources is relatively high. If you temporarily do not need a GPU, you can remove the worker nodes with GPUs. The following steps are also applicable for removing regular worker nodes.

                                  "},{"location":"en/admin/k8s/remove-node.html#prerequisites","title":"Prerequisites","text":"
                                  • AI platform installed
                                  • An administrator account
                                  • A cluster with GPU nodes created
                                  "},{"location":"en/admin/k8s/remove-node.html#removal-steps","title":"Removal Steps","text":"
                                  1. Log in to the AI platform as an administrator.
                                  2. Navigate to Container Management -> Clusters, and click the name of the target cluster.

                                  3. On the cluster overview page, click Nodes, find the node you want to remove, click the \u2507 on the right side of the list, and select Remove Node from the pop-up menu.

                                  4. In the pop-up window, enter the node name, and after confirming it is correct, click Delete.

                                  5. You will automatically return to the node list, and the status will be Removing. After a few minutes, refresh the page; if the node is no longer there, it indicates that the node has been successfully removed.

                                  6. After removing the node from the UI list and shutting it down, log in to the host of the removed node via SSH and execute the shutdown command.

                                  Tip

                                  After removing the node from the UI and shutting it down, the data on the node is not immediately deleted; the node's data will be retained for a period of time.

                                  "},{"location":"en/admin/kpanda/backup/index.html","title":"Backup and Restore","text":"

                                  Backup and restore are essential aspects of system management. In practice, it is important to first back up the data of the system at a specific point in time and securely store the backup. In case of incidents such as data corruption, loss, or accidental deletion, the system can be quickly restored based on the previous backup data, reducing downtime and minimizing losses.

                                  • In real production environments, services may be deployed across different clouds, regions, or availability zones. If one infrastructure faces a failure, organizations need to quickly restore applications in other available environments. In such cases, cross-cloud or cross-cluster backup and restore become crucial.
                                  • Large-scale systems often involve multiple roles and users with complex permission management systems. With many operators involved, accidents caused by human error can lead to system failures. In such scenarios, the ability to roll back the system quickly using previously backed-up data is necessary. Relying solely on manual troubleshooting, fault repair, and system recovery can be time-consuming, resulting in prolonged system unavailability and increased losses for organizations.
                                  • Additionally, factors like network attacks, natural disasters, and equipment malfunctions can also cause data accidents.

                                  Therefore, backup and restore are vital as the last line of defense for maintaining system stability and ensuring data security.

                                  Backups are typically classified into three types: full backups, incremental backups, and differential backups. Currently, AI platform supports full backups and incremental backups.

                                  The backup and restore provided by AI platform can be divided into two categories: Application Backup and ETCD Backup. It supports both manual backups and scheduled automatic backups using CronJobs.

                                  • Application Backup

                                    Application backup refers to backing up data of a specific workload in the cluster and then restoring that data either within the same cluster or in another cluster. It supports backing up all resources under a namespace or filtering resources by specific labels.

                                    Application backup also supports cross-cluster backup of stateful applications. For detailed steps, refer to the Backup and Restore MySQL Applications and Data Across Clusters guide.

                                  • etcd Backup

                                    etcd is the data storage component of Kubernetes. Kubernetes stores its own component's data and application data in etcd. Therefore, backing up etcd is equivalent to backing up the entire cluster's data, allowing quick restoration of the cluster to a previous state in case of failures.

                                    It's worth noting that currently, restoring etcd backup data is only supported within the same cluster (the original cluster). To learn more about related best practices, refer to the ETCD Backup and Restore guide.

                                  "},{"location":"en/admin/kpanda/backup/deployment.html","title":"Application Backup","text":"

                                  This article explains how to backup applications in AI platform. The demo application used in this tutorial is called dao-2048 , which is a deployment.

                                  "},{"location":"en/admin/kpanda/backup/deployment.html#prerequisites","title":"Prerequisites","text":"

                                  Before backing up a deployment, the following prerequisites must be met:

                                  • Integrate a Kubernetes cluster or create a Kubernetes cluster in the Container Management module, and be able to access the UI interface of the cluster.

                                  • Create a Namespace and a user.

                                  • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                  • Install the velero component, and ensure the velero component is running properly.

                                  • Create a deployment (the workload in this tutorial is named dao-2048 ), and label the deployment with app: dao-2048 .

                                  "},{"location":"en/admin/kpanda/backup/deployment.html#backup-workload","title":"Backup workload","text":"

                                  Follow the steps below to backup the deployment dao-2048 .

                                  1. Enter the Container Management module, click Backup Recovery -> Application Backup on the left navigation bar, and enter the Application Backup list page.

                                  2. On the Application Backup list page, select the cluster where the velero and dao-2048 applications have been installed. Click Backup Plan in the upper right corner to create a new backup cluster.

                                  3. Refer to the instructions below to fill in the backup configuration.

                                    • Name: The name of the new backup plan.
                                    • Source Cluster: The cluster where the application backup plan is to be executed.
                                    • Object Storage Location: The access path of the object storage configured when installing velero on the source cluster.
                                    • Namespace: The namespaces that need to be backed up, multiple selections are supported.
                                    • Advanced Configuration: Back up specific resources in the namespace based on resource labels, such as an application, or do not back up specific resources in the namespace based on resource labels during backup.

                                  4. Refer to the instructions below to set the backup execution frequency, and then click Next .

                                    • Backup Frequency: Set the time period for task execution based on minutes, hours, days, weeks, and months. Support custom Cron expressions with numbers and * , after inputting the expression, the meaning of the current expression will be prompted. For detailed expression syntax rules, refer to Cron Schedule Syntax.
                                    • Retention Time (days): Set the storage time of backup resources, the default is 30 days, and will be deleted after expiration.
                                    • Backup Data Volume (PV): Whether to back up the data in the data volume (PV), support direct copy and use CSI snapshot.

                                      • Direct Replication: directly copy the data in the data volume (PV) for backup;
                                      • Use CSI snapshots: Use CSI snapshots to back up data volumes (PVs). Requires a CSI snapshot type available for backup in the cluster.

                                  5. Click OK , the page will automatically return to the application backup plan list, find the newly created dao-2048 backup plan, and perform the Immediate Execution operation.

                                  6. At this point, the Last Execution State of the cluster will change to in progress . After the backup is complete, you can click the name of the backup plan to view the details of the backup plan.

                                  "},{"location":"en/admin/kpanda/backup/etcd-backup.html","title":"etcd backup","text":"

                                  etcd backup is based on cluster data as the core backup. In cases such as hardware device damage, development and test configuration errors, etc., the backup cluster data can be restored through etcd backup.

                                  This section will introduce how to realize the etcd backup for clusters. Also see etcd Backup and Restore Best Practices.

                                  "},{"location":"en/admin/kpanda/backup/etcd-backup.html#prerequisites","title":"Prerequisites","text":"
                                  • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                  • Created a namespace, user, and granted NS Admin or higher permissions to the user. For details, refer to Namespace Authorization.

                                  • Prepared a MinIO instance. It is recommended to create it through AI platform's MinIO middleware. For specific steps, refer to MinIO Object Storage.

                                  "},{"location":"en/admin/kpanda/backup/etcd-backup.html#create-etcd-backup","title":"Create etcd backup","text":"

                                  Follow the steps below to create an etcd backup.

                                  1. Enter Container Management -> Backup Recovery -> etcd Backup page, you can see all the current backup policies. Click Create Backup Policy on the right.

                                  2. Fill in the Basic Information. Then, click Next to automatically verify the connectivity of etcd. If the verification passes, proceed to the next step.

                                    • First select the backup cluster and log in to the terminal
                                    • Enter etcd, and the format is https://${NodeIP}:${Port}.

                                      • In a standard Kubernetes cluster, the default port for etcd is 2379.
                                      • In a Suanova 4.0 cluster, the default port for etcd is 12379.
                                      • In a public cloud managed cluster, you need to contact the relevant developers to obtain the etcd port number. This is because the control plane components of public cloud clusters are maintained and managed by the cloud service provider. Users cannot directly access or view these components, nor can they obtain control plane port information through regular commands (such as kubectl).
                                      Ways to obtain port number
                                      1. Find the etcd Pod in the kube-system namespace

                                        kubectl get po -n kube-system | grep etcd\n
                                      2. Get the port number from the listen-client-urls of the etcd Pod

                                        kubectl get po -n kube-system ${etcd_pod_name} -oyaml | grep listen-client-urls # (1)!\n
                                        1. Replace etcd_pod_name with the actual Pod name

                                        The expected output is as follows, where the number after the node IP is the port number:

                                        - --listen-client-urls=https://127.0.0.1:2379,https://10.6.229.191:2379\n
                                    • Fill in the CA certificate, you can use the following command to view the certificate content. Then, copy and paste it to the proper location:

                                      Standard Kubernetes ClusterSuanova 4.0 Cluster
                                      cat /etc/kubernetes/ssl/etcd/ca.crt\n
                                      cat /etc/daocloud/dce/certs/ca.crt\n
                                    • Fill in the Cert certificate, you can use the following command to view the content of the certificate. Then, copy and paste it to the proper location:

                                      Standard Kubernetes ClusterSuanova 4.0 Cluster
                                      cat /etc/kubernetes/ssl/apiserver-etcd-client.crt\n
                                      cat /etc/daocloud/dce/certs/etcd/server.crt\n
                                    • Fill in the Key, you can use the following command to view the content of the certificate and copy and paste it to the proper location:

                                      Standard Kubernetes ClusterSuanova 4.0 Cluster
                                      cat /etc/kubernetes/ssl/apiserver-etcd-client.key\n
                                      cat /etc/daocloud/dce/certs/etcd/server.key\n

                                    Note

                                    Click How to get below the input box to see how to obtain the proper information on the UI page.

                                  3. Refer to the following information to fill in the Backup Policy.

                                    • Backup Method: Choose either manual backup or scheduled backup

                                      • Manual Backup: Immediately perform a full backup of etcd data based on the backup configuration.
                                      • Scheduled Backup: Periodically perform full backups of etcd data according to the set backup frequency.
                                    • Backup Chain Length: the maximum number of backup data to retain. The default is 30.

                                    • Backup Frequency: it can be per hour, per day, per week or per month, and can also be customized.
                                  4. Refer to the following information to fill in the Storage Path.

                                    • Storage Provider: Default is S3 storage
                                    • Object Storage Access Address: The access address of MinIO
                                    • Bucket: Create a Bucket in MinIO and fill in the Bucket name
                                    • Username: The login username for MinIO
                                    • Password: The login password for MinIO
                                  5. After clicking OK , the page will automatically redirect to the backup policy list, where you can view all the currently created ones.

                                    • Click the \u2507 action button on the right side of the policy to view logs, view YAML, update the policy, stop the policy, or execute the policy immediately.
                                    • When the backup method is manual, you can click Execute Now to perform the backup.
                                    • When the backup method is scheduled, the backup will be performed according to the configured time.
                                  "},{"location":"en/admin/kpanda/backup/etcd-backup.html#view-backup-policy-logs","title":"View Backup Policy Logs","text":"

                                  Click Logs to view the log content. By default, 100 lines are displayed. If you want to see more log information or download the logs, you can follow the prompts above the logs to go to the observability module.

                                  "},{"location":"en/admin/kpanda/backup/etcd-backup.html#view-backup-policy-details","title":"View Backup POlicy Details","text":"

                                  Go to Container Management -> Backup Recovery -> etcd Backup , click the Backup Policy tab, and then click the policy to view the details.

                                  "},{"location":"en/admin/kpanda/backup/etcd-backup.html#view-recovery-point","title":"View Recovery Point","text":"
                                  1. Go to Container Management -> Backup Recovery -> etcd Backup, and click the Recovery Point tab.
                                  2. After selecting the target cluster, you can view all the backup information under that cluster.

                                    Each time a backup is executed, a proper recovery point is generated, which can be used to quickly restore the application from a successful recovery point.

                                  "},{"location":"en/admin/kpanda/backup/install-velero.html","title":"Install the Velero Plugin","text":"

                                  velero is an open source tool for backing up and restoring Kubernetes cluster resources. It can back up resources in a Kubernetes cluster to cloud storage services, local storage, or other locations, and restore those resources to the same or a different cluster when needed.

                                  This section introduces how to deploy the Velero plugin in AI platform using the Helm Apps.

                                  "},{"location":"en/admin/kpanda/backup/install-velero.html#prerequisites","title":"Prerequisites","text":"

                                  Before installing the velero plugin, the following prerequisites need to be met:

                                  • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.
                                  • Created a velero namespace.
                                  • You should have permissions not lower than NS Editor. For details, refer to Namespace Authorization.
                                  "},{"location":"en/admin/kpanda/backup/install-velero.html#steps","title":"Steps","text":"

                                  Please perform the following steps to install the velero plugin for your cluster.

                                  1. On the cluster list page, find the target cluster that needs to install the velero plugin, click the name of the cluster, click Helm Apps -> Helm chart in the left navigation bar, and enter velero in the search bar to search .

                                  2. Read the introduction of the velero plugin, select the version and click the Install button. This page will take 5.2.0 version as an example to install, and it is recommended that you install 5.2.0 and later versions.

                                  3. Configure basic info .

                                    • Name: Enter the plugin name, please note that the name can be up to 63 characters, can only contain lowercase letters, numbers and separators (\"-\"), and must start and end with lowercase letters or numbers, such as metrics-server-01.
                                    • Namespace: Select the namespace for plugin installation, it must be velero namespace.
                                    • Version: The version of the plugin, here we take 5.2.0 version as an example.
                                    • Wait: When enabled, it will wait for all associated resources under the application to be ready before marking the application installation as successful.
                                    • Deletion Failed: After it is enabled, the synchronization will be enabled by default and ready to wait. If the installation fails, the installation-related resources will be removed.
                                    • Detailed Logs: Turn on the verbose output of the installation process log.

                                    Note

                                    After enabling Ready Wait and/or Failed Delete , it takes a long time for the app to be marked as Running .

                                  4. Configure Velero chart Parameter Settings according to the following instructions

                                    • S3 Credentials: Configure the authentication information of object storage (minio).

                                      • Use secret: Keep the default configuration true.
                                      • Secret name: Keep the default configuration velero-s3-credential.
                                      • SecretContents.aws_access_key_id = : Configure the username for accessing object storage, replace with the actual parameter.
                                      • SecretContents.aws_secret_access_key = : Configure the password for accessing object storage, replace with the actual parameter.

                                        Use existing secret parameter example is as follows:

                                        [default]\naws_access_key_id = minio\naws_secret_access_key = minio123\n
                                        • BackupStorageLocation: The location where Velero backs up data.

                                          • S3 bucket: The name of the storage bucket used to save backup data (must be a real storage bucket that already exists in minio).
                                          • Is default BackupStorage: Keep the default configuration true.
                                          • S3 access mode: The access mode of Velero to data, which can be selected
                                          • ReadWrite: Allow Velero to read and write backup data;
                                          • ReadOnly: Allow Velero to read backup data, but cannot modify backup data;
                                          • WriteOnly: Only allow Velero to write backup data, and cannot read backup data.
                                        • S3 Configs: Detailed configuration of S3 storage (minio).

                                          • S3 region: The geographical region of cloud storage. The default is to use the us-east-1 parameter, which is provided by the system administrator.
                                          • S3 force path style: Keep the default configuration true.
                                          • S3 server URL: The console access address of object storage (minio). Minio generally provides two services, UI access and console access. Please use the console access address here.

                                      • Click the OK button to complete the installation of the Velero plugin. The system will automatically jump to the Helm Apps list page. After waiting for a few minutes, refresh the page, and you can see the application just installed.

                                      • "},{"location":"en/admin/kpanda/best-practice/add-master-node.html","title":"Scaling Controller Nodes in a Worker Cluster","text":"

                                        This article provides a step-by-step guide on how to manually scale the control nodes in a worker cluster to achieve high availability for self-built clusters.

                                        Note

                                        It is recommended to enable high availability mode when creating the worker cluster in the interface. Manually scaling the control nodes of the worker cluster involves certain operational risks, so please proceed with caution.

                                        "},{"location":"en/admin/kpanda/best-practice/add-master-node.html#prerequisites","title":"Prerequisites","text":"
                                        • A worker cluster has been created using the AI platform platform. You can refer to the documentation on Creating a Worker Cluster.
                                        • The managed cluster associated with the worker cluster exists in the current platform and is running normally.

                                        Note

                                        Managed cluster refers to the cluster specified during the creation of the worker cluster, which provides capabilities such as Kubernetes version upgrades, node scaling, uninstallation, and operation records for the current cluster.

                                        "},{"location":"en/admin/kpanda/best-practice/add-master-node.html#modify-the-host-manifest","title":"Modify the Host manifest","text":"
                                        1. Log in to the container management platform and go to the overview page of the cluster where you want to scale the control nodes. In the Basic Information section, locate the Managed Cluster of the current cluster and click its name to enter the overview page.

                                        2. In the overview page of the managed cluster, click Console to open the cloud terminal console. Run the following command to find the host manifest of the worker cluster that needs to be scaled.

                                          kubectl get cm -n kubean-system ${ClusterName}-hosts-conf -oyaml\n

                                          ${ClusterName} is the name of the worker cluster to be scaled.

                                        3. Modify the host manifest file based on the example below and add information for the controller nodes.

                                          Before ModificationAfter Modification
                                          apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: tanghai-dev-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      hosts:\n        node1:\n          ip: 10.6.175.10 \n          access_ip: 10.6.175.10\n          ansible_host: 10.6.175.10 \n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: password01\n      children:\n        kube_control_plane:\n          hosts:\n            node1:\n        kube_node:\n          hosts:\n            node1:\n        etcd:\n          hosts:\n            node1:\n        k8s_cluster:\n          children:\n            kube_control_plane:\n            kube_node:\n        calico_rr:\n          hosts: {}\n......\n
                                          apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: tanghai-dev-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      hosts:\n        node1:\n          ip: 10.6.175.10\n          access_ip: 10.6.175.10 \n          ansible_host: 10.6.175.10\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: password01\n        node2: # Add controller node2\n          ip: 10.6.175.20\n          access_ip: 10.6.175.20\n          ansible_host: 10.6.175.20\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: password01\n        node3:\n          ip: 10.6.175.30 \n          access_ip: 10.6.175.30\n          ansible_host: 10.6.175.30 \n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: password01\n      children:\n        kube_control_plane:\n          hosts:\n            node1:\n            node2: # Add controller node2\n            node3: # Add controller node3\n        kube_node:\n          hosts:\n            node1:\n            node2: # Add controller node2\n            node3: # Add controller node3\n        etcd:\n          hosts:\n            node1:\n            node2: # Add controller node2\n            node3: # Add controller node3\n        k8s_cluster:\n          children:\n            kube_control_plane:\n            kube_node:\n        calico_rr:\n          hosts: {}\n

                                        Important Parameters:

                                        • all.hosts.node1: Existing master node in the original cluster
                                        • all.hosts.node2, all.hosts.node3: Control nodes to be added during cluster scaling
                                        • all.children.kube_control_plane.hosts: Control plane group in the cluster
                                        • all.children.kube_node.hosts: Worker node group in the cluster
                                        • all.children.etcd.hosts: ETCD node group in the cluster
                                        "},{"location":"en/admin/kpanda/best-practice/add-master-node.html#add-expansion-task-scale-master-node-opsyaml-using-the-clusteroperationyml-template","title":"Add Expansion Task \"scale-master-node-ops.yaml\" using the ClusterOperation.yml Template","text":"

                                        Use the following ClusterOperation.yml template to add a cluster control node expansion task called \"scale-master-node-ops.yaml\".

                                        ClusterOperation.yml
                                        apiVersion: kubean.io/v1alpha1\nkind: ClusterOperation\nmetadata:\n  name: cluster1-online-install-ops\nspec:\n  cluster: ${cluster-name} # Specify cluster name\n  image: ghcr.m.daocloud.io/kubean-io/spray-job:v0.18.0 # Specify the image for the kubean job\n  actionType: playbook\n  action: cluster.yml\n  extraArgs: --limit=etcd,kube_control_plane -e ignore_assert_errors=yes\n  preHook:\n    - actionType: playbook\n      action: ping.yml\n    - actionType: playbook\n      action: disable-firewalld.yml\n    - actionType: playbook\n      action: enable-repo.yml  # In an offline environment, you need to add this yaml and\n      # set the correct repo-list (for installing operating system packages).\n      # The following parameter values are for reference only.\n      extraArgs: |\n        -e \"{repo_list: ['http://172.30.41.0:9000/kubean/centos/\\$releasever/os/\\$basearch','http://172.30.41.0:9000/kubean/centos-iso/\\$releasever/os/\\$basearch']}\"\n  postHook:\n    - actionType: playbook\n      action: upgrade-cluster.yml\n      extraArgs: --limit=etcd,kube_control_plane -e ignore_assert_errors=yes\n    - actionType: playbook\n      action: kubeconfig.yml\n    - actionType: playbook\n      action: cluster-info.yml\n

                                        Note

                                        • spec.image: The image address should be consistent with the image within the job that was previously deployed
                                        • spec.action: set to cluster.yml, if adding Master (etcd) nodes exceeds (including) three at once, additional parameter -e etcd_retries=10 should be added to cluster.yaml to increase etcd node join retry times
                                        • spec.extraArgs: set to --limit=etcd,kube_control_plane -e ignore_assert_errors=yes
                                        • If it is an offline environment, spec.preHook needs to add enable-repo.yml, and the extraArgs parameter should fill in the correct repo_list for the relevant OS
                                        • spec.postHook.action: should include upgrade-cluster.yml, where extraArgs is set to --limit=etcd,kube_control_plane -e ignore_assert_errors=yes

                                        Create and deploy scale-master-node-ops.yaml based on the above configuration.

                                        # Copy the above manifest\nvi scale-master-node-ops.yaml\nkubectl apply -f scale-master-node-ops.yaml -n kubean-system\n

                                        Perform the following command to verify it.

                                        kubectl get node\n
                                        "},{"location":"en/admin/kpanda/best-practice/add-worker-node-on-global.html","title":"Scaling the Worker Nodes of the Global Service Cluster","text":"

                                        This page introduces how to manually scale the worker nodes of the global service cluster in offline mode. By default, it is not recommended to scale the global service cluster after deploying AI platform. Please ensure proper resource planning before deploying AI platform.

                                        Note

                                        The controller node of the global service cluster do not support scaling.

                                        "},{"location":"en/admin/kpanda/best-practice/add-worker-node-on-global.html#prerequisites","title":"Prerequisites","text":"
                                        • The AI platform deployment has been completed through bootstrap node, and the kind cluster on the bootstrap node is running normally.
                                        • You must log in with a user account that has admin privileges on the platform.
                                        "},{"location":"en/admin/kpanda/best-practice/add-worker-node-on-global.html#get-kubeconfig-for-the-kind-cluster-on-the-bootstrap-node","title":"Get kubeconfig for the kind cluster on the bootstrap node","text":"
                                        1. Run the following command to log in to the bootstrap node:

                                          ssh root@bootstrap-node-ip-address\n
                                        2. On the bootstrap node, run the following command to get the CONTAINER ID of the kind cluster:

                                          [root@localhost ~]# podman ps\n\n# Expected output:\nCONTAINER ID  IMAGE                                      COMMAND     CREATED      STATUS      PORTS                                                                                                         NAMES\n220d662b1b6a  docker.m.daocloud.io/kindest/node:v1.26.2              2 weeks ago  Up 2 weeks  0.0.0.0:443->30443/tcp, 0.0.0.0:8081->30081/tcp, 0.0.0.0:9000-9001->32000-32001/tcp, 0.0.0.0:36674->6443/tcp  my-cluster-installer-control-plane\n
                                        3. Run the following command to enter a container in the kind cluster:

                                          podman exec -it {CONTAINER ID} bash\n

                                          Replace {CONTAINER ID} with your actual container ID.

                                        4. Inside the container of the kind cluster, run the following command to get the kubeconfig information for the kind cluster:

                                          kubectl config view --minify --flatten --raw\n

                                          After the console output, copy the kubeconfig information of the kind cluster for the next step.

                                        "},{"location":"en/admin/kpanda/best-practice/add-worker-node-on-global.html#create-clusterkubeanio-resources-in-the-kind-cluster-on-the-bootstrap-node","title":"Create cluster.kubean.io resources in the kind cluster on the bootstrap node","text":"
                                        1. Use the command podman exec -it {CONTAINER ID} bash to enter the kind cluster container.

                                        2. Inside the kind cluster container, run the following command to get the kind cluster name:

                                          kubectl get clusters\n
                                        3. Copy and run the following command within the kind cluster to create the cluster.kubean.io resource:

                                          kubectl apply -f - <<EOF\napiVersion: kubean.io/v1alpha1\nkind: Cluster\nmetadata:\n  labels:\n    clusterName: kpanda-global-cluster\n  name: kpanda-global-cluster\nspec:\n  hostsConfRef:\n    name: my-cluster-hosts-conf\n    namespace: kubean-system\n  kubeconfRef:\n    name: my-cluster-kubeconf\n    namespace: kubean-system\n  varsConfRef:\n    name: my-cluster-vars-conf\n    namespace: kubean-system\nEOF\n

                                          Note

                                          The default cluster name for spec.hostsConfRef.name, spec.kubeconfRef.name, and spec.varsConfRef.name is my-cluster. Please replace it with the kind cluster name obtained in the previous step.

                                        4. Run the following command in the kind cluster to verify if the cluster.kubean.io resource is created successfully:

                                          kubectl get clusters\n

                                          Expected output is:

                                          NAME                    AGE\nkpanda-global-cluster   3s\nmy-cluster              16d\n
                                        "},{"location":"en/admin/kpanda/best-practice/add-worker-node-on-global.html#update-the-containerd-configuration-in-the-kind-cluster-on-the-bootstrap-node","title":"Update the containerd configuration in the kind cluster on the bootstrap node","text":"
                                        1. Run the following command to log in to one of the controller nodes of the global service cluster:

                                          ssh root@<global-service-cluster-controller-node-IP>\n
                                        2. On the global service cluster controller node, run the following command to copy the containerd configuration file config.toml from the controller node to the bootstrap node:

                                          scp /etc/containerd/config.toml root@<bootstrap-node-IP>:/root\n
                                        3. On the bootstrap node, select the insecure registry section from the containerd configuration file config.toml that was copied from the controller node, and add it to the config.toml in the kind cluster.

                                          An example of the insecure registry section is as follows:

                                          [plugins.\"io.containerd.grpc.v1.cri\".registry]\n  [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors]\n    [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors.\"10.6.202.20\"]\n      endpoint = [\"https://10.6.202.20\"]\n    [plugins.\"io.containerd.grpc.v1.cri\".registry.configs.\"10.6.202.20\".tls]\n      insecure_skip_verify = true\n

                                          Note

                                          Since the config.toml file in the kind cluster cannot be modified directly, you can first copy the file out to modify it and then copy it back to the kind cluster. The steps are as follows:

                                          1. Run the following command on the bootstrap node to copy the file out:

                                            podman cp {CONTAINER ID}:/etc/containerd/config.toml ./config.toml.kind\n
                                          2. Run the following command to edit the config.toml file:

                                            vim ./config.toml.kind\n
                                          3. After modifying the file, copy it back to the kind cluster by running the following command:

                                            podman cp ./config.toml.kind {CONTAINER ID}:/etc/containerd/config.toml\n

                                            {CONTAINER ID} should be replaced with your actual container ID.

                                        4. Run the following command within the kind cluster to restart the containerd service:

                                          systemctl restart containerd\n
                                        "},{"location":"en/admin/kpanda/best-practice/add-worker-node-on-global.html#integrate-a-kind-cluster-into-the-ai-platform-cluster-list","title":"Integrate a Kind cluster into the AI platform cluster list","text":"
                                        1. Log in to AI platform, navigate to Container Management, and on the right side of the cluster list, click the Integrate Cluster button.

                                        2. In the integration configuration section, fill in and edit the kubeconfig of the Kind cluster.

                                          apiVersion: v1\nclusters:\n- cluster:\n    insecure-skip-tls-verify: true # (1)!\n    certificate-authority-data: LS0TLSCFDFWEFEWFEWFGGEWGFWFEWGWEGFEWGEWGSDGFSDSD\n    server: https://my-cluster-installer-control-plane:6443 # (2)!\nname: my-cluster-installer\ncontexts:\n- context:\n    cluster: my-cluster-installer\n    user: kubernetes-admin\nname: kubernetes-admin@my-cluster-installer\ncurrent-context: kubernetes-admin@my-cluster-installer\nkind: Config\npreferences: {}\nusers:\n
                                          1. Skip TLS verification; this line needs to be added manually.
                                          2. Replace it with the IP of the Kind node, and change port 6443 to the port mapped to the node (you can run the command podman ps|grep 6443 to check the mapped port).

                                        3. Click the OK to complete the integration of the Kind cluster.

                                        "},{"location":"en/admin/kpanda/best-practice/add-worker-node-on-global.html#add-labels-to-the-global-service-cluster","title":"Add Labels to the Global Service Cluster","text":"
                                        1. Log in to AI platform, navigate to Container Management, find the kapnda-global-cluster , and in the right-side, find the Basic Configuration menu options.

                                        2. In the Basic Configuration page, add the label kpanda.io/managed-by=my-cluster for the global service cluster:

                                        Note

                                        The value in the label kpanda.io/managed-by=my-cluster corresponds to the name of the cluster specified during the integration process, which defaults to my-cluster. Please adjust this according to your actual situation.

                                        "},{"location":"en/admin/kpanda/best-practice/add-worker-node-on-global.html#add-nodes-to-the-global-service-cluster","title":"Add nodes to the global service cluster","text":"
                                        1. Go to the node list page of the global service cluster, find the Integrate Node button on the right side of the node list, and click to enter the node configuration page.

                                        2. After filling in the IP and authentication information of the node to be integrated, click Start Check . Once the node check is completed, click Next .

                                        3. Add the following custom parameters in the Custom Parameters section:

                                          download_run_once: false\ndownload_container: false\ndownload_force_cache: false\ndownload_localhost: false\n

                                        4. Click the OK button and wait for the node to be added.

                                        "},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html","title":"Cross-Cluster Backup and Recovery of MySQL Application and Data","text":"

                                        This demonstration will show how to use the application backup feature in AI platform to perform cross-cluster backup migration for a stateful application.

                                        Note

                                        The current operator should have admin privileges on the AI platform platform.

                                        "},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html#prepare-the-demonstration-environment","title":"Prepare the Demonstration Environment","text":""},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html#prepare-two-clusters","title":"Prepare Two Clusters","text":"

                                        main-cluster will be the source cluster for backup data, and recovery-cluster will be the target cluster for data recovery.

                                        Cluster IP Nodes main-cluster 10.6.175.100 1 node recovery-cluster 10.6.175.110 1 node"},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html#set-up-minio-configuration","title":"Set Up MinIO Configuration","text":"MinIO Server Address Bucket Username Password http://10.7.209.110:9000 mysql-demo root dangerous"},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html#deploy-nfs-storage-service-in-both-clusters","title":"Deploy NFS Storage Service in Both Clusters","text":"

                                        Note

                                        NFS storage service needs to be deployed on all nodes in both the source and target clusters.

                                        1. Install the dependencies required for NFS on all nodes in both clusters.

                                          yum install nfs-utils iscsi-initiator-utils nfs-utils iscsi-initiator-utils nfs-utils iscsi-initiator-utils -y\n

                                          Expected output

                                          [root@g-master1 ~]# kubectl apply -f nfs.yaml\nclusterrole.rbac.authorization.k8s.io/nfs-provisioner-runner created\nclusterrolebinding.rbac.authorization.k8s.io/run-nfs-provisioner created\nrole.rbac.authorization.k8s.io/leader-locking-nfs-provisioner created\nrolebinding.rbac.authorization.k8s.io/leader-locking-nfs-provisioner created\nserviceaccount/nfs-provisioner created\nservice/nfs-provisioner created\ndeployment.apps/nfs-provisioner created\nstorageclass.storage.k8s.io/nfs created\n

                                        2. Prepare NFS storage service for the MySQL application.

                                        Log in to any control node of both main-cluster and recovery-cluster . Use the command vi nfs.yaml to create a file named nfs.yaml on the node, and copy the following YAML content into the nfs.yaml file.

                                        <details>\n<summary>nfs.yaml</summary>\n```yaml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: nfs-provisioner-runner\nnamespace: nfs-system\nrules:\n- apiGroups: [\"\"]\n    resources: [\"persistentvolumes\"]\n    verbs: [\"get\", \"list\", \"watch\", \"create\", \"delete\"]\n- apiGroups: [\"\"]\n    resources: [\"persistentvolumeclaims\"]\n    verbs: [\"get\", \"list\", \"watch\", \"update\"]\n- apiGroups: [\"storage.k8s.io\"]\n    resources: [\"storageclasses\"]\n    verbs: [\"get\", \"list\", \"watch\"]\n- apiGroups: [\"\"]\n    resources: [\"events\"]\n    verbs: [\"create\", \"update\", \"patch\"]\n- apiGroups: [\"\"]\n    resources: [\"services\", \"endpoints\"]\n    verbs: [\"get\"]\n- apiGroups: [\"extensions\"]\n    resources: [\"podsecuritypolicies\"]\n    resourceNames: [\"nfs-provisioner\"]\n    verbs: [\"use\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: run-nfs-provisioner\nsubjects:\n- kind: ServiceAccount\n    name: nfs-provisioner\n    # replace with namespace where provisioner is deployed\n    namespace: default\nroleRef:\nkind: ClusterRole\nname: nfs-provisioner-runner\napiGroup: rbac.authorization.k8s.io\n---\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: leader-locking-nfs-provisioner\nrules:\n- apiGroups: [\"\"]\n    resources: [\"endpoints\"]\n    verbs: [\"get\", \"list\", \"watch\", \"create\", \"update\", \"patch\"]\n---\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: leader-locking-nfs-provisioner\nsubjects:\n- kind: ServiceAccount\n    name: nfs-provisioner\n    # replace with namespace where provisioner is deployed\n    namespace: default\nroleRef:\nkind: Role\nname: leader-locking-nfs-provisioner\napiGroup: rbac.authorization.k8s.io\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\nname: nfs-provisioner\n---\nkind: Service\napiVersion: v1\nmetadata:\nname: nfs-provisioner\nlabels:\n    app: nfs-provisioner\nspec:\nports:\n    - name: nfs\n    port: 2049\n    - name: nfs-udp\n    port: 2049\n    protocol: UDP\n    - name: nlockmgr\n    port: 32803\n    - name: nlockmgr-udp\n    port: 32803\n    protocol: UDP\n    - name: mountd\n    port: 20048\n    - name: mountd-udp\n    port: 20048\n    protocol: UDP\n    - name: rquotad\n    port: 875\n    - name: rquotad-udp\n    port: 875\n    protocol: UDP\n    - name: rpcbind\n    port: 111\n    - name: rpcbind-udp\n    port: 111\n    protocol: UDP\n    - name: statd\n    port: 662\n    - name: statd-udp\n    port: 662\n    protocol: UDP\nselector:\n    app: nfs-provisioner\n---\nkind: Deployment\napiVersion: apps/v1\nmetadata:\nname: nfs-provisioner\nspec:\nselector:\n    matchLabels:\n    app: nfs-provisioner\nreplicas: 1\nstrategy:\n    type: Recreate\ntemplate:\n    metadata:\n    labels:\n        app: nfs-provisioner\n    spec:\n    serviceAccount: nfs-provisioner\n    containers:\n        - name: nfs-provisioner\n        resources:\n            limits:\n            cpu: \"1\"\n            memory: \"4294967296\"\n        image: release.daocloud.io/velero/nfs-provisioner:v3.0.0\n        ports:\n            - name: nfs\n            containerPort: 2049\n            - name: nfs-udp\n            containerPort: 2049\n            protocol: UDP\n            - name: nlockmgr\n            containerPort: 32803\n            - name: nlockmgr-udp\n            containerPort: 32803\n            protocol: UDP\n            - name: mountd\n            containerPort: 20048\n            - name: mountd-udp\n            containerPort: 20048\n            protocol: UDP\n            - name: rquotad\n            containerPort: 875\n            - name: rquotad-udp\n            containerPort: 875\n            protocol: UDP\n            - name: rpcbind\n            containerPort: 111\n            - name: rpcbind-udp\n            containerPort: 111\n            protocol: UDP\n            - name: statd\n            containerPort: 662\n            - name: statd-udp\n            containerPort: 662\n            protocol: UDP\n        securityContext:\n            capabilities:\n            add:\n                - DAC_READ_SEARCH\n                - SYS_RESOURCE\n        args:\n            - \"-provisioner=example.com/nfs\"\n        env:\n            - name: POD_IP\n            valueFrom:\n                fieldRef:\n                fieldPath: status.podIP\n            - name: SERVICE_NAME\n            value: nfs-provisioner\n            - name: POD_NAMESPACE\n            valueFrom:\n                fieldRef:\n                fieldPath: metadata.namespace\n        imagePullPolicy: \"IfNotPresent\"\n        volumeMounts:\n            - name: export-volume\n            mountPath: /export\n    volumes:\n        - name: export-volume\n        hostPath:\n            path: /data\n---\nkind: StorageClass\napiVersion: storage.k8s.io/v1\nmetadata:\nname: nfs\nprovisioner: example.com/nfs\nmountOptions:\n- vers=4.1\n```\n</details>\n
                                        1. Run the nfs.yaml file on the control nodes of both clusters.

                                          kubectl apply -f nfs.yaml\n
                                        2. Check the status of the NFS Pod and wait for its status to become running (approximately 2 minutes).

                                          kubectl get pod -n nfs-system -owide\n

                                          Expected output

                                          [root@g-master1 ~]# kubectl get pod -owide\nNAME                               READY   STATUS    RESTARTS   AGE     IP              NODE        NOMINATED NODE   READINESS GATES\nnfs-provisioner-7dfb9bcc45-74ws2   1/1     Running   0          4m45s   10.6.175.100   g-master1   <none>           <none>\n

                                        "},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html#deploy-mysql-application","title":"Deploy MySQL Application","text":"
                                        1. Prepare a PVC (Persistent Volume Claim) based on NFS storage for the MySQL application to store its data.

                                          Use the command vi pvc.yaml to create a file named pvc.yaml on the node, and copy the following YAML content into the pvc.yaml file.

                                          pvc.yaml

                                          apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: mydata\n  namespace: default\nspec:\n  accessModes:\n  - ReadWriteOnce\n  resources:\n    requests:\n      storage: \"1Gi\"\n  storageClassName: nfs\n  volumeMode: Filesystem\n

                                        2. Run the pvc.yaml file using the kubectl tool on the node.

                                          kubectl apply -f pvc.yaml\n

                                          Expected output

                                          [root@g-master1 ~]# kubectl apply -f pvc.yaml\npersistentvolumeclaim/mydata created\n

                                        3. Deploy the MySQL application.

                                          Use the command vi mysql.yaml to create a file named mysql.yaml on the node, and copy the following YAML content into the mysql.yaml file.

                                          mysql.yaml

                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: mysql-deploy\n  name: mysql-deploy\n  namespace: default\nspec:\n  progressDeadlineSeconds: 600\n  replicas: 1\n  revisionHistoryLimit: 10\n  selector:\n    matchLabels:\n      app: mysql-deploy\n  strategy:\n    rollingUpdate:\n      maxSurge: 25%\n      maxUnavailable: 25%\n    type: RollingUpdate\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mysql-deploy\n      name: mysql-deploy\n    spec:\n      containers:\n      - args:\n        - --ignore-db-dir=lost+found\n        env:\n        - name: MYSQL_ROOT_PASSWORD\n          value: dangerous\n        image: release.daocloud.io/velero/mysql:5\n        imagePullPolicy: IfNotPresent\n        name: mysql-deploy\n        ports:\n        - containerPort: 3306\n          protocol: TCP\n        resources:\n          limits:\n            cpu: \"1\"\n            memory: \"4294967296\"\n        terminationMessagePath: /dev/termination-log\n        terminationMessagePolicy: File\n        volumeMounts:\n        - mountPath: /var/lib/mysql\n          name: data\n      dnsPolicy: ClusterFirst\n      restartPolicy: Always\n      schedulerName: default-scheduler\n      securityContext:\n        fsGroup: 999\n      terminationGracePeriodSeconds: 30\n      volumes:\n      - name: data\n        persistentVolumeClaim:\n          claimName: mydata\n

                                        4. Run the mysql.yaml file using the kubectl tool on the node.

                                          kubectl apply -f mysql.yaml\n

                                          Expected output

                                          [root@g-master1 ~]# kubectl apply -f mysql.yaml\ndeployment.apps/mysql-deploy created\n

                                        5. Check the status of the MySQL Pod.

                                          Run kubectl get pod | grep mysql to view the status of the MySQL Pod and wait for its status to become running (approximately 2 minutes).

                                          Expected output

                                          [root@g-master1 ~]# kubectl get pod |grep mysql\nmysql-deploy-5d6f94cb5c-gkrks      1/1     Running   0          2m53s\n

                                          Note

                                          • If the MySQL Pod remains in a non-running state for a long time, it is usually because NFS dependencies are not installed on all nodes in the cluster.
                                          • Run kubectl describe pod ${mysql pod name} to view detailed information about the Pod.
                                          • If there is an error message like MountVolume.SetUp failed for volume \"pvc-4ad70cc6-df37-4253-b0c9-8cb86518ccf8\" : mount failed: exit status 32 , please delete the previous resources by executing kubectl delete -f nfs.yaml/pvc.yaml/mysql.yaml and start from deploying the NFS service again.
                                        6. Write data to the MySQL application.

                                          To verify the success of the data migration later, you can use a script to write test data to the MySQL application.

                                          1. Use the command vi insert.sh to create a script named insert.sh on the node, and copy the following content into the script.

                                            insert.sh

                                            #!/bin/bash\n\nfunction rand(){\n    min=$1\n    max=$(($2-$min+1))\n    num=$(date +%s%N)\n    echo $(($num%$max+$min))\n}\n\nfunction insert(){\n    user=$(date +%s%N | md5sum | cut -c 1-9)\n    age=$(rand 1 100)\n\n    sql=\"INSERT INTO test.users(user_name, age)VALUES('${user}', ${age});\"\n    echo -e ${sql}\n\n    kubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"${sql}\"\n\n}\n\nkubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"CREATE DATABASE IF NOT EXISTS test;\"\nkubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"CREATE TABLE IF NOT EXISTS test.users(user_name VARCHAR(10) NOT NULL,age INT UNSIGNED)ENGINE=InnoDB DEFAULT CHARSET=utf8;\"\n\nwhile true;do\n    insert\n    sleep 1\ndone\n

                                          2. Add permission to insert.sh and run this script.

                                            [root@g-master1 ~]# chmod +x insert.sh\n[root@g-master1 ~]# ./insert.sh\n

                                            Expected output

                                            mysql: [Warning] Using a password on the command line interface can be insecure.\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('dc09195ba', 10);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('80ab6aa28', 70);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('f488e3d46', 23);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('e6098695c', 93);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('eda563e7d', 63);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('a4d1b8d68', 17);\nmysql: [Warning] Using a password on the command line interface can be insecure.\n

                                          3. Press Ctrl + C on the keyboard simultaneously to pause the script execution.

                                          4. Go to the MySQL Pod and check the data written in MySQL.

                                            kubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"SELECT * FROM test.users;\"\n

                                            Expected output

                                            [root@g-master1 ~]# kubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"SELECT * FROM test.users;\"\nmysql: [Warning] Using a password on the command line interface can be insecure.\nuser_name   age\ndc09195ba   10\n80ab6aa28   70\nf488e3d46   23\ne6098695c   93\neda563e7d   63\na4d1b8d68   17\nea47546d9   86\na34311f2e   47\n740cefe17   33\nede85ea28   65\nb6d0d6a0e   46\nf0eb38e50   44\nc9d2f28f5   72\n8ddaafc6f   31\n3ae078d0e   23\n6e041631e   96\n

                                        "},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html#install-velero-plugin-on-both-clusters","title":"Install Velero Plugin on Both Clusters","text":"

                                        Note

                                        The velero plugin needs to be installed on both the source and target clusters.

                                        Refer to the Install Velero Plugin documentation and the MinIO configuration below to install the velero plugin on the main-cluster and recovery-cluster .

                                        MinIO Server Address Bucket Username Password http://10.7.209.110:9000 mysql-demo root dangerous

                                        Note

                                        When installing the plugin, replace S3url with the MinIO server address prepared for this demonstration, and replace the bucket with an existing bucket in MinIO.

                                        "},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html#backup-mysql-application-and-data","title":"Backup MySQL Application and Data","text":"
                                        1. Add a unique label, backup=mysql , to the MySQL application and PVC data. This will facilitate resource selection during backup.

                                          kubectl label deploy mysql-deploy backup=mysql # Add label to mysql-deploy\nkubectl label pod mysql-deploy-5d6f94cb5c-gkrks backup=mysql # Add label to mysql pod\nkubectl label pvc mydata backup=mysql # Add label to mysql pvc\n
                                        2. Refer to the steps described in Application Backup and the parameters below to create an application backup.

                                        3. Name: backup-mysql (can be customized)

                                        4. Source Cluster: main-cluster
                                        5. Namespace: default
                                        6. Resource Filter - Specify resource label: backup:mysql

                                        1. After creating the backup plan, the page will automatically return to the backup plan list. Find the newly created backup plan backup-mysq and click the more options button __ ...__ in the plan. Select \"Run Now\" to execute the newly created backup plan.

                                        1. Wait for the backup plan execution to complete before proceeding with the next steps.
                                        "},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html#cross-cluster-recovery-of-mysql-application-and-data","title":"Cross-Cluster Recovery of MySQL Application and Data","text":"
                                        1. Log in to the AI platform platform and select Container Management -> Backup & Restore -> Application Backup from the left navigation menu.
                                        1. Select Recovery in the left-side toolbar, then click Restore Backup on the right side.
                                        1. Fill in the parameters based on the following instructions:

                                        2. Name: restore-mysql (can be customized)

                                        3. Backup Source Cluster: main-cluster
                                        4. Backup Plan: backup-mysql
                                        5. Backup Point: default
                                        6. Recovery Target Cluster: recovery-cluster

                                        1. Refresh the backup plan list and wait for the backup plan execution to complete.
                                        "},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html#check-if-the-data-is-restored-successfully","title":"Check if the data is restored successfully","text":"
                                        1. Log in to the control plane of recovery-cluster , check if mysql-deploy is successfully backed up in the current cluster.

                                          kubectl get pod\n

                                          Expected output\u5982\u4e0b\uff1a

                                          NAME                               READY   STATUS    RESTARTS   AGE\nmysql-deploy-5798f5d4b8-62k6c      1/1     Running   0          24h\n
                                        2. Check if the data in MySQL datasheet is restored or not.

                                          kubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"SELECT * FROM test.users;\"\n

                                          Expected output is as follows\uff1a

                                          [root@g-master1 ~]# kubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"SELECT * FROM test.users;\"\nmysql: [Warning] Using a password on the command line interface can be insecure.\nuser_name   age\ndc09195ba   10\n80ab6aa28   70\nf488e3d46   23\ne6098695c   93\neda563e7d   63\na4d1b8d68   17\nea47546d9   86\na34311f2e   47\n740cefe17   33\nede85ea28   65\nb6d0d6a0e   46\nf0eb38e50   44\nc9d2f28f5   72\n8ddaafc6f   31\n3ae078d0e   23\n6e041631e   96\n

                                          Success

                                          As you can see, the data in the Pod is consistent with the data inside the Pods in the main-cluster . This indicates that the MySQL application and its data from the main-cluster have been successfully recovered to the recovery-cluster cluster.

                                        "},{"location":"en/admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html","title":"Create a RedHat 9.2 Worker Cluster on a CentOS Management Platform","text":"

                                        This article explains how to create a RedHat 9.2 worker cluster on an existing CentOS management platform.

                                        Note

                                        This article only applies to the offline mode, using the AI platform platform to create a worker cluster. The architecture of the management platform and the cluster to be created are both AMD. When creating a cluster, heterogeneous deployment (mixing AMD and ARM) is not supported. After the cluster is created, you can use the method of connecting heterogeneous nodes to achieve mixed deployment and management of the cluster.

                                        "},{"location":"en/admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#prerequisites","title":"Prerequisites","text":"

                                        A AI platform full-mode has been deployed, and the spark node is still alive. For deployment, see the document Offline Install AI platform Enterprise.

                                        "},{"location":"en/admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#download-and-import-redhat-offline-packages","title":"Download and Import RedHat Offline Packages","text":"

                                        Make sure you are logged into the spark node! And the clusterConfig.yaml file used when deploying AI platform is still available.

                                        "},{"location":"en/admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#download-the-relevant-redhat-offline-packages","title":"Download the Relevant RedHat Offline Packages","text":"

                                        Download the required RedHat OS package and ISO offline packages:

                                        Resource Name Description Download Link os-pkgs-redhat9-v0.9.3.tar.gz RedHat9.2 OS-package package Download ISO Offline Package ISO package import script Go to RedHat Official Download Site import-iso ISO import script Download"},{"location":"en/admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#import-the-os-package-to-the-minio-of-the-spark-node","title":"Import the OS Package to the MinIO of the Spark Node","text":"

                                        Extract the RedHat OS package

                                        Execute the following command to extract the downloaded OS package. Here we download the RedHat OS package.

                                        tar -xvf os-pkgs-redhat9-v0.9.3.tar.gz\n

                                        The contents of the extracted OS package are as follows:

                                            os-pkgs\n    \u251c\u2500\u2500 import_ospkgs.sh       # This script is used to import OS packages into the MinIO file service\n    \u251c\u2500\u2500 os-pkgs-amd64.tar.gz   # OS packages for the amd64 architecture\n    \u251c\u2500\u2500 os-pkgs-arm64.tar.gz   # OS packages for the arm64 architecture\n    \u2514\u2500\u2500 os-pkgs.sha256sum.txt  # sha256sum verification file of the OS packages\n

                                        Import the OS Package to the MinIO of the Spark Node

                                        Execute the following command to import the OS packages to the MinIO file service:

                                        MINIO_USER=rootuser MINIO_PASS=rootpass123 ./import_ospkgs.sh  http://127.0.0.1:9000 os-pkgs-redhat9-v0.9.3.tar.gz\n

                                        Note

                                        The above command is only applicable to the MinIO service built into the spark node. If an external MinIO is used, replace http://127.0.0.1:9000 with the access address of the external MinIO. \"rootuser\" and \"rootpass123\" are the default account and password of the MinIO service built into the spark node. \"os-pkgs-redhat9-v0.9.3.tar.gz\" is the name of the downloaded OS package offline package.

                                        "},{"location":"en/admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#import-the-iso-offline-package-to-the-minio-of-the-spark-node","title":"Import the ISO Offline Package to the MinIO of the Spark Node","text":"

                                        Execute the following command to import the ISO package to the MinIO file service:

                                        MINIO_USER=rootuser MINIO_PASS=rootpass123 ./import_iso.sh http://127.0.0.1:9000 rhel-9.2-x86_64-dvd.iso\n

                                        Note

                                        The above command is only applicable to the MinIO service built into the spark node. If an external MinIO is used, replace http://127.0.0.1:9000 with the access address of the external MinIO. \"rootuser\" and \"rootpass123\" are the default account and password of the MinIO service built into the spark node. \"rhel-9.2-x86_64-dvd.iso\" is the name of the downloaded ISO offline package.

                                        "},{"location":"en/admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#create-the-cluster-in-the-ui","title":"Create the Cluster in the UI","text":"

                                        Refer to the document Creating a Worker Cluster to create a RedHat 9.2 cluster.

                                        "},{"location":"en/admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html","title":"Create an Ubuntu Worker Cluster on CentOS","text":"

                                        This page explains how to create an Ubuntu worker cluster on an existing CentOS.

                                        Note

                                        This page is specifically for the offline mode, using the AI platform platform to create a worker cluster, where both the CentOS platform and the worker cluster to be created are based on AMD architecture. Heterogeneous (mixed AMD and ARM) deployments are not supported during cluster creation; however, after the cluster is created, you can manage a mixed deployment by adding heterogeneous nodes.

                                        "},{"location":"en/admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#prerequisite","title":"Prerequisite","text":"
                                        • A fully deployed AI platform system, with the bootstrap node still active. For deployment reference, see the documentation Offline Install AI platform Enterprise.
                                        "},{"location":"en/admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#download-and-import-ubuntu-offline-packages","title":"Download and Import Ubuntu Offline Packages","text":"

                                        Please ensure you are logged into the bootstrap node! Also, make sure that the clusterConfig.yaml file used during the AI platform deployment is still available.

                                        "},{"location":"en/admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#download-ubuntu-offline-packages","title":"Download Ubuntu Offline Packages","text":"

                                        Download the required Ubuntu OS packages and ISO offline packages:

                                        Resource Name Description Download Link os-pkgs-ubuntu2204-v0.18.2.tar.gz Ubuntu 20.04 OS package https://github.com/kubean-io/kubean/releases/download/v0.18.2/os-pkgs-ubuntu2204-v0.18.2.tar.gz ISO Offline Package ISO Package http://mirrors.melbourne.co.uk/ubuntu-releases/"},{"location":"en/admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#import-os-and-iso-packages-into-minio-on-the-bootstrap-node","title":"Import OS and ISO Packages into MinIO on the Bootstrap Node","text":"

                                        Refer to the documentation Importing Offline Resources to import offline resources into MinIO on the bootstrap node.

                                        "},{"location":"en/admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#create-cluster-on-ui","title":"Create Cluster on UI","text":"

                                        Refer to the documentation Creating a Worker Cluster to create the Ubuntu cluster.

                                        "},{"location":"en/admin/kpanda/best-practice/etcd-backup.html","title":"etcd Backup and Restore","text":"

                                        Using the ETCD backup feature to create a backup policy, you can back up the etcd data of a specified cluster to S3 storage on a scheduled basis. This page focuses on how to restore the data that has been backed up to the current cluster.

                                        Note

                                        • AI platform ETCD backup restores are limited to backups and restores for the same cluster (with no change in the number of nodes and IP addresses). For example, after the etcd data of Cluster A is backed up, the backup data can only be restored to Cluster A, not to Cluster B.
                                        • The feature is recommended app backup and restore for cross-cluster backups and restores.
                                        • First, create a backup policy to back up the current status. It is recommended to refer to the ETCD backup.

                                        The following is a specific case to illustrate the whole process of backup and restore.

                                        "},{"location":"en/admin/kpanda/best-practice/etcd-backup.html#environmental-information","title":"Environmental Information","text":"

                                        Begin with basic information about the target cluster and S3 storage for the restore. Here, MinIo is used as S3 storage, and the whole cluster has 3 control planes (3 etcd copies).

                                        IP Host Role Remarks 10.6.212.10 host01 k8s-master01 k8s node 1 10.6.212.11 host02 k8s-master02 k8s node 2 10.6.212.12 host03 k8s-master03 k8s node 3 10.6.212.13 host04 minio minio service"},{"location":"en/admin/kpanda/best-practice/etcd-backup.html#prerequisites","title":"Prerequisites","text":""},{"location":"en/admin/kpanda/best-practice/etcd-backup.html#install-the-etcdbrctl-tool","title":"Install the etcdbrctl tool","text":"

                                        To implement ETCD data backup and restore, you need to install the etcdbrctl open source tool on any of the above k8s nodes. This tool does not have binary files for the time being and needs to be compiled by itself. Refer to the compilation mode.

                                        After installation, use the following command to check whether the tool is available:

                                        etcdbrctl -v\n

                                        The expected output is as follows:

                                        INFO[0000] etcd-backup-restore Version: v0.23.0-dev\nINFO[0000] Git SHA: b980beec\nINFO[0000] Go Version: go1.19.3\nINFO[0000] Go OS/Arch: linux/amd64\n
                                        "},{"location":"en/admin/kpanda/best-practice/etcd-backup.html#check-the-backup-data","title":"Check the backup data","text":"

                                        You need to check the following before restoring:

                                        • Have you successfully backed up your data in AI platform
                                        • Check if backup data exists in S3 storage

                                        Note

                                        The backup of AI platform is a full data backup, and the full data of the last backup will be restored when restoring.

                                        "},{"location":"en/admin/kpanda/best-practice/etcd-backup.html#shut-down-the-cluster","title":"Shut down the cluster","text":"

                                        Before backing up, the cluster must be shut down. The default clusters etcd and kube-apiserver are started as static pods. To close the cluster here means to move the static Pod manifest file out of the /etc/kubernetes/manifest directory, and the cluster will remove Pods to close the service.

                                        1. First, delete the previous backup data. Removing the data does not delete the existing etcd data, but refers to modifying the name of the etcd data directory. Wait for the backup to be successfully restored before deleting this directory. The purpose of this is to also try to restore the current cluster if the etcd backup restore fails. This step needs to be performed for each node.

                                          rm -rf /var/lib/etcd_bak\n
                                        2. The service then needs to be shut down kube-apiserver to ensure that there are no new changes to the etcd data. This step needs to be performed for each node.

                                          mv /etc/kubernetes/manifests/kube-apiserver.yaml /tmp/kube-apiserver.yaml\n
                                        3. You also need to turn off the etcd service. This step needs to be performed for each node.

                                          mv /etc/kubernetes/manifests/etcd.yaml /tmp/etcd.yaml\n
                                        4. Ensure that all control plane kube-apiserver and etcd services are turned off.

                                        5. After shutting down all the nodes, use the following command to check etcd the cluster status. This command can be executed at any node.

                                          The endpoints value of needs to be replaced with the actual node name

                                          etcdctl endpoint status --endpoints=controller-node-1:2379,controller-node-2:2379,controller-node-3:2379 -w table \\\n  --cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n  --cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n  --key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\"\n

                                          The expected output is as follows, indicating that all etcd nodes have been destroyed:

                                          {\"level\":\"warn\",\"ts\":\"2023-03-29T17:51:50.817+0800\",\"logger\":\"etcd-client\",\"caller\":\"v3@v3.5.6/retry_interceptor.go:62\",\"msg\":\"retrying of unary invoker failed\",\"target\":\"etcd-endpoints://0xc0001ba000/controller-node-1:2379\",\"attempt\":0,\"error\":\"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \\\"transport: Error while dialing dial tcp 10.5.14.31:2379: connect: connection refused\\\"\"}\nFailed to get the status of endpoint controller-node-1:2379 (context deadline exceeded)\n{\"level\":\"warn\",\"ts\":\"2023-03-29T17:51:55.818+0800\",\"logger\":\"etcd-client\",\"caller\":\"v3@v3.5.6/retry_interceptor.go:62\",\"msg\":\"retrying of unary invoker failed\",\"target\":\"etcd-endpoints://0xc0001ba000/controller-node-2:2379\",\"attempt\":0,\"error\":\"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \\\"transport: Error while dialing dial tcp 10.5.14.32:2379: connect: connection refused\\\"\"}\nFailed to get the status of endpoint controller-node-2:2379 (context deadline exceeded)\n{\"level\":\"warn\",\"ts\":\"2023-03-29T17:52:00.820+0800\",\"logger\":\"etcd-client\",\"caller\":\"v3@v3.5.6/retry_interceptor.go:62\",\"msg\":\"retrying of unary invoker failed\",\"target\":\"etcd-endpoints://0xc0001ba000/controller-node-1:2379\",\"attempt\":0,\"error\":\"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \\\"transport: Error while dialing dial tcp 10.5.14.33:2379: connect: connection refused\\\"\"}\nFailed to get the status of endpoint controller-node-3:2379 (context deadline exceeded)\n+----------+----+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |\n+----------+----+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n+----------+----+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n
                                        "},{"location":"en/admin/kpanda/best-practice/etcd-backup.html#restore-the-backup","title":"Restore the backup","text":"

                                        You only need to restore the data of one node, and the etcd data of other nodes will be automatically synchronized.

                                        1. Set environment variables

                                          Before restoring the data using etcdbrctl, run the following command to set the authentication information of the connection S3 as an environment variable:

                                          export ECS_ENDPOINT=http://10.6.212.13:9000 # (1)\nexport ECS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE # (2)\nexport ECS_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY # (3)\n
                                          1. Access points for S3 storage
                                          2. S3 Stored username
                                          3. S3 Stored Password
                                        2. Perform the restore operation

                                          Run the etcdbrctl command line tool to perform the restore, which is the most critical step.

                                          etcdbrctl restore --data-dir /var/lib/etcd/ --store-container=\"etcd-backup\" \\ \n  --storage-provider=ECS \\\n  --initial-cluster=controller-node1=https://10.6.212.10:2380 \\\n  --initial-advertise-peer-urls=https://10.6.212.10:2380 \n

                                          The parameters are described as follows:

                                          • --data-dir: etcd data directory. This directory must be consistent with the etcd data directory so that etcd can load data normally.
                                          • --store-container: The location of S3 storage, the bucket in MinIO, must correspond to the bucket of data backup.
                                          • --initial-cluster: etcd is configured initially. The name of the etcd cluster must be the same as the original one.
                                          • --initial-advertise-peer-urls: etcd member inter-cluster access address. Must be consistent with etcd configuration.

                                          The expected output is as follows:

                                          INFO[0000] Finding latest set of snapshot to recover from...\nINFO[0000] Restoring from base snapshot: Full-00000000-00111147-1679991074  actor=restorer\nINFO[0001] successfully fetched data of base snapshot in 1.241380207 seconds  actor=restorer\n{\"level\":\"info\",\"ts\":1680011221.2511616,\"caller\":\"mvcc/kvstore.go:380\",\"msg\":\"restored last compact revision\",\"meta-bucket-name\":\"meta\",\"meta-bucket-name-key\":\"finishedCompactRev\",\"restored-compact-revision\":110327}\n{\"level\":\"info\",\"ts\":1680011221.3045986,\"caller\":\"membership/cluster.go:392\",\"msg\":\"added member\",\"cluster-id\":\"66638454b9dd7b8a\",\"local-member-id\":\"0\",\"added-peer-id\":\"123c2503a378fc46\",\"added-peer-peer-urls\":[\"https://10.6.212.10:2380\"]}\nINFO[0001] Starting embedded etcd server...              actor=restorer\n\n....\n\n{\"level\":\"info\",\"ts\":\"2023-03-28T13:47:02.922Z\",\"caller\":\"embed/etcd.go:565\",\"msg\":\"stopped serving peer traffic\",\"address\":\"127.0.0.1:37161\"}\n{\"level\":\"info\",\"ts\":\"2023-03-28T13:47:02.922Z\",\"caller\":\"embed/etcd.go:367\",\"msg\":\"closed etcd server\",\"name\":\"default\",\"data-dir\":\"/var/lib/etcd\",\"advertise-peer-urls\":[\"http://localhost:0\"],\"advertise-client-urls\":[\"http://localhost:0\"]}\nINFO[0003] Successfully restored the etcd data directory.\n

                                          !!! note \u201cYou can check the YAML file of etcd for comparison to avoid configuration errors\u201d

                                          ```shell\ncat /tmp/etcd.yaml | grep initial-\n- --experimental-initial-corrupt-check=true\n- --initial-advertise-peer-urls=https://10.6.212.10:2380\n- --initial-cluster=controller-node-1=https://10.6.212.10:2380\n```\n
                                        3. The following command is executed on node 01 in order to restore the etcd service for node 01.

                                          First, move the manifest file of etcd static Pod to the /etc/kubernetes/manifests directory, and kubelet will restart etcd:

                                          mv /tmp/etcd.yaml /etc/kubernetes/manifests/etcd.yaml\n

                                          Then wait for the etcd service to finish starting, and check the status of etcd. The default directory of etcd-related certificates is: /etc/kubernetes/ssl . If the cluster certificate is stored in another location, specify the proper path.

                                          • Check the etcd cluster list:

                                            etcdctl member list -w table \\\n--cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n--cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n--key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\" \n

                                            The expected output is as follows:

                                            +------------------+---------+-------------------+--------------------------+--------------------------+------------+\n|        ID        | STATUS  |       NAME        |        PEER ADDRS        |       CLIENT ADDRS       | IS LEARNER |\n+------------------+---------+-------------------+--------------------------+--------------------------+------------+\n| 123c2503a378fc46 | started | controller-node-1 | https://10.6.212.10:2380 | https://10.6.212.10:2379 |      false |\n+------------------+---------+-------------------+--------------------------+--------------------------+------------+\n
                                          • To view the status of controller-node-1:

                                            etcdctl endpoint status --endpoints=controller-node-1:2379 -w table \\\n--cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n--cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n--key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\"\n

                                            The expected output is as follows:

                                            +------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n|        ENDPOINT        |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |\n+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n| controller-node-1:2379 | 123c2503a378fc46 |   3.5.6 |   15 MB |      true |      false |         3 |       1200 |               1199 |        |\n+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n
                                        4. Restore other node data

                                          The above steps have restored the data of node 01. If you want to restore the data of other nodes, you only need to start the Pod of etcd and let etcd complete the data synchronization by itself.

                                          • Do the same at both node 02 and node 03:

                                            mv /tmp/etcd.yaml /etc/kubernetes/manifests/etcd.yaml\n
                                          • Data synchronization between etcd member clusters takes some time. You can check the etcd cluster status to ensure that all etcd clusters are normal:

                                            Check whether the etcd cluster status is normal:

                                            etcdctl member list -w table \\\n--cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n--cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n--key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\"\n

                                            The expected output is as follows:

                                            +------------------+---------+-------------------+-------------------------+-------------------------+------------+\n|        ID        | STATUS  |    NAME           |       PEER ADDRS        |      CLIENT ADDRS       | IS LEARNER |\n+------------------+---------+-------------------+-------------------------+-------------------------+------------+\n| 6ea47110c5a87c03 | started | controller-node-1 | https://10.5.14.31:2380 | https://10.5.14.31:2379 |      false |\n| e222e199f1e318c4 | started | controller-node-2 | https://10.5.14.32:2380 | https://10.5.14.32:2379 |      false |\n| f64eeda321aabe2d | started | controller-node-3 | https://10.5.14.33:2380 | https://10.5.14.33:2379 |      false |\n+------------------+---------+-------------------+-------------------------+-------------------------+------------+\n

                                            Check whether the three member nodes are normal:

                                            etcdctl endpoint status --endpoints=controller-node-1:2379,controller-node-2:2379,controller-node-3:2379 -w table \\\n--cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n--cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n--key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\"\n

                                            The expected output is as follows:

                                            +------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n|     ENDPOINT           |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |\n+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n| controller-node-1:2379 | 6ea47110c5a87c03 |   3.5.6 |   88 MB |      true |      false |         6 |     199008 |             199008 |        |\n| controller-node-2:2379 | e222e199f1e318c4 |   3.5.6 |   88 MB |     false |      false |         6 |     199114 |             199114 |        |\n| controller-node-3:2379 | f64eeda321aabe2d |   3.5.6 |   88 MB |     false |      false |         6 |     199316 |             199316 |        |\n+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n
                                        "},{"location":"en/admin/kpanda/best-practice/etcd-backup.html#restore-the-cluster","title":"Restore the cluster","text":"

                                        After the etcd data of all nodes are synchronized, the kube-apiserver can be restarted to restore the entire cluster to an accessible state:

                                        1. Restart the kube-apiserver service for node1

                                          mv /tmp/kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.yaml\n
                                        2. Restart the kube-apiserver service for node2

                                          mv /tmp/kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.yaml\n
                                        3. Restart the kube-apiserver service for node3

                                          mv /tmp/kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.yaml\n
                                        4. After kubelet starts kube-apiserver, check whether the restored k8s data is normal:

                                          kubectl get nodes\n

                                          The expected output is as follows:

                                          NAME                STATUS     ROLES           AGE     VERSION\ncontroller-node-1   Ready      <none>          3h30m   v1.25.4\ncontroller-node-3   Ready      control-plane   3h29m   v1.25.4\ncontroller-node-3   Ready      control-plane   3h28m   v1.25.4\n
                                        "},{"location":"en/admin/kpanda/best-practice/hardening-cluster.html","title":"How to Harden a Self-built Work Cluster","text":"

                                        In AI platform, when using the CIS Benchmark (CIS) scan on a work cluster created using the user interface, some scan items did not pass the scan. This article provides hardening instructions based on different versions of CIS Benchmark.

                                        "},{"location":"en/admin/kpanda/best-practice/hardening-cluster.html#cis-benchmark-127","title":"CIS Benchmark 1.27","text":"

                                        Scan Environment:

                                        • Kubernetes version: 1.25.4
                                        • Containerd: 1.7.0
                                        • Kubean version: 0.4.9
                                        • Kubespray version: v2.22
                                        "},{"location":"en/admin/kpanda/best-practice/hardening-cluster.html#failed-scan-items","title":"Failed Scan Items","text":"
                                        1. [FAIL] 1.2.5 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)
                                        2. [FAIL] 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)
                                        3. [FAIL] 1.4.1 Ensure that the --profiling argument is set to false (Automated)
                                        4. [FAIL] 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)
                                        "},{"location":"en/admin/kpanda/best-practice/hardening-cluster.html#analysis-of-scan-failures","title":"Analysis of Scan Failures","text":"
                                        1. [FAIL] 1.2.5 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)

                                          Reason: CIS requires that kube-apiserver must specify the CA certificate path for kubelet:

                                        2. [FAIL] 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)

                                          Reason: CIS requires that kube-controller-manager's --bind-address=127.0.0.1

                                        3. [FAIL] 1.4.1 Ensure that the --profiling argument is set to false (Automated)

                                          Reason: CIS requires that kube-scheduler sets --profiling=false

                                        4. [FAIL] 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)

                                          Reason: CIS requires setting kube-scheduler's --bind-address=127.0.0.1

                                        "},{"location":"en/admin/kpanda/best-practice/hardening-cluster.html#hardening-configuration-to-pass-cis-scan","title":"Hardening Configuration to Pass CIS Scan","text":"

                                        To address these security scan issues, kubespray has added default values in v2.22 to solve some of the problems. For more details, refer to the kubespray hardening documentation.

                                        • Add parameters by modifying the kubean var-config configuration file:

                                          kubernetes_audit: true\nkube_controller_manager_bind_address: 127.0.0.1\nkube_scheduler_bind_address: 127.0.0.1\nkube_kubeadm_scheduler_extra_args:\n  profiling: false\nkubelet_rotate_server_certificates: true\n
                                        • In AI platform, there is also a feature to configure advanced parameters through the user interface. Add custom parameters in the last step of cluster creation:

                                        • After setting the custom parameters, the following parameters are added to the var-config configmap in kubean:

                                        • Perform a scan after installing the cluster:

                                        After the scan, all scan items passed the scan (WARN and INFO are counted as PASS). Note that this document only applies to CIS Benchmark 1.27, as CIS Benchmark is continuously updated.

                                        "},{"location":"en/admin/kpanda/best-practice/kubean-low-version.html","title":"Deploy and Upgrade Compatible Versions of Kubean in Offline Scenarios","text":"

                                        In order to meet the customer's demand for building Kubernetes (K8s) clusters with lower versions, Kubean provides the capability to be compatible with lower versions and create K8s clusters with those versions.

                                        Currently, the supported versions for self-built worker clusters range from 1.26.0-v1.28. Refer to the AI platform Cluster Version Support System for more information.

                                        This article will demonstrate how to deploy a K8s cluster with a lower version.

                                        Note

                                        Node environment used in the document:

                                        • X86 architecture
                                        • CentOS 7 Linux distribution
                                        "},{"location":"en/admin/kpanda/best-practice/kubean-low-version.html#prerequisites","title":"Prerequisites","text":"
                                        • Prepare a management cluster where kubean resides, and the current environment has deployed the podman, skopeo, and minio client commands. If not supported, you can install the dependent components through the script, Installing Prerequisite Dependencies.

                                        • Go to kubean to view the released artifacts, and choose the specific artifact version based on the actual situation. The currently supported artifact versions and their proper cluster version ranges are as follows:

                                          Artifact Version Cluster Range AI platform Support release-2.21 v1.23.0 ~ v1.25.6 Supported since installer v0.14.0 release-2.22 v1.24.0 ~ v1.26.9 Supported since installer v0.15.0 release-2.23 v1.25.0 ~ v1.27.7 Expected to support from installer v0.16.0

                                          This article demonstrates the offline deployment of a K8s cluster with version 1.23.0 and the offline upgrade of a K8s cluster from version 1.23.0 to 1.24.0, so we choose the artifact release-2.21.

                                        "},{"location":"en/admin/kpanda/best-practice/kubean-low-version.html#procedure","title":"Procedure","text":""},{"location":"en/admin/kpanda/best-practice/kubean-low-version.html#prepare-the-relevant-artifacts-for-the-lower-version-of-kubespray-release","title":"Prepare the Relevant Artifacts for the Lower Version of Kubespray Release","text":"

                                        Import the spray-job image into the registry of the offline environment.

                                        # Assuming the registry address in the bootstrap cluster is 172.30.41.200\nREGISTRY_ADDR=\"172.30.41.200\"\n\n# The image spray-job can use the accelerator address here, and the image address is determined based on the selected artifact version\nSPRAY_IMG_ADDR=\"ghcr.m.daocloud.io/kubean-io/spray-job:2.21-d6f688f\"\n\n# skopeo parameters\nSKOPEO_PARAMS=\" --insecure-policy -a --dest-tls-verify=false --retry-times=3 \"\n\n# Online environment: Export the spray-job image of version release-2.21 and transfer it to the offline environment\nskopeo copy docker://${SPRAY_IMG_ADDR} docker-archive:spray-job-2.21.tar\n\n# Offline environment: Import the spray-job image of version release-2.21 into the bootstrap registry\nskopeo copy ${SKOPEO_PARAMS} docker-archive:spray-job-2.21.tar docker://${REGISTRY_ADDR}/${SPRAY_IMG_ADDR}\n
                                        "},{"location":"en/admin/kpanda/best-practice/kubean-low-version.html#create-offline-resources-for-the-earlier-versions-of-k8s","title":"Create Offline Resources for the Earlier Versions of K8s","text":"
                                        1. Prepare the manifest.yml file.

                                          cat > \"manifest.yml\" <<EOF\nimage_arch:\n  - \"amd64\" ## \"arm64\"\nkube_version: ## Fill in the cluster version according to the actual scenario\n  - \"v1.23.0\"\n  - \"v1.24.0\"\nEOF\n
                                        2. Create the offline incremental package.

                                          # Create the data directory\nmkdir data\n# Create the offline package\nAIRGAP_IMG_ADDR=\"ghcr.m.daocloud.io/kubean-io/airgap-patch:2.21-d6f688f\" # (1)\npodman run --rm -v $(pwd)/manifest.yml:/manifest.yml -v $(pwd)/data:/data -e ZONE=CN -e MODE=FULL ${AIRGAP_IMG_ADDR}\n
                                          1. The image spray-job can use the accelerator address here, and the image address is determined based on the selected artifact version
                                        3. Import the offline images and binary packages for the proper K8s version.

                                          # Import the binaries from the data directory to the minio in the bootstrap node\ncd ./data/amd64/files/\nMINIO_ADDR=\"http://172.30.41.200:9000\" # Replace IP with the actual repository url\nMINIO_USER=rootuser MINIO_PASS=rootpass123 ./import_files.sh ${MINIO_ADDR}\n\n# Import the images from the data directory to the image repository in the bootstrap node\ncd ./data/amd64/images/\nREGISTRY_ADDR=\"172.30.41.200\"  ./import_images.sh # Replace IP with the actual repository url\n
                                        4. Deploy the manifest and localartifactset.cr.yaml custom resources to the management cluster where kubean resides or the Global cluster. In this example, we use the Global cluster.

                                          # Deploy the localArtifactSet resources in the data directory\ncd ./data\nkubectl apply -f data/localartifactset.cr.yaml\n\n# Download the manifest resources proper to release-2.21\nwget https://raw.githubusercontent.com/kubean-io/kubean-manifest/main/manifests/manifest-2.21-d6f688f.yml\n\n# Deploy the manifest resources proper to release-2.21\nkubectl apply -f manifest-2.21-d6f688f.yml\n
                                        "},{"location":"en/admin/kpanda/best-practice/kubean-low-version.html#deployment-and-upgrade-legacy-k8s-cluster","title":"Deployment and Upgrade Legacy K8s Cluster","text":""},{"location":"en/admin/kpanda/best-practice/kubean-low-version.html#deploy","title":"Deploy","text":"
                                        1. Go to Container Management and click the Create Cluster button on the Clusters page.

                                        2. Choose the manifest and localartifactset.cr.yaml custom resources deployed cluster as the Managed parameter. In this example, we use the Global cluster.

                                        3. Refer to Creating a Cluster for the remaining parameters.

                                        "},{"location":"en/admin/kpanda/best-practice/kubean-low-version.html#upgrade","title":"Upgrade","text":"
                                        1. Select the newly created cluster and go to the details page.

                                        2. Click Cluster Operations in the left navigation bar, then click Cluster Upgrade on the top right of the page.

                                        3. Select the available cluster for upgrade.

                                        "},{"location":"en/admin/kpanda/best-practice/multi-arch.html","title":"How to Add Heterogeneous Nodes to a Worker Cluster","text":"

                                        This page explains how to add ARM architecture nodes with Kylin v10 sp2 operating system to an AMD architecture worker cluster with CentOS 7.9 operating system.

                                        Note

                                        This page is only applicable to adding heterogeneous nodes to a worker cluster created using the AI platform platform in offline mode, excluding connected clusters.

                                        "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#prerequisites","title":"Prerequisites","text":"
                                        • A AI platform Full Mode deployment has been successfully completed, and the bootstrap node is still alive. Refer to the documentation Offline Installation of AI platform Enterprise for the deployment process.
                                        • A worker cluster with AMD architecture and CentOS 7.9 operating system has been created through the AI platform platform. Refer to the documentation Creating a Worker Cluster for the creation process.
                                        "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#procedure","title":"Procedure","text":""},{"location":"en/admin/kpanda/best-practice/multi-arch.html#download-and-import-offline-packages","title":"Download and Import Offline Packages","text":"

                                        Take ARM architecture and Kylin v10 sp2 operating system as examples.

                                        Make sure you are logged into the bootstrap node! Also, make sure the clusterConfig.yaml file used during the AI platform deployment is available.

                                        "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#offline-image-package","title":"Offline Image Package","text":"

                                        Note

                                        The latest version can be downloaded from the Download Center.

                                        CPU Architecture Version Download Link AMD64 v0.18.0 https://qiniu-download-public.daocloud.io/DaoCloud_Enterprise/dce5/offline-v0.18.0-amd64.tar ARM64 v0.18.0 https://qiniu-download-public.daocloud.io/DaoCloud_Enterprise/dce5/offline-v0.18.0-arm64.tar

                                        After downloading, extract the offline package:

                                        tar -xvf offline-v0.18.0-arm64.tar\n
                                        "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#iso-offline-package-kylin-v10-sp2","title":"ISO Offline Package (Kylin v10 sp2)","text":"CPU Architecture Operating System Version Download Link ARM64 Kylin Linux Advanced Server release V10 (Sword) SP2 https://www.kylinos.cn/support/trial.html

                                        Note

                                        Kylin operating system requires personal information to be provided for downloading and usage. Select V10 (Sword) SP2 when downloading.

                                        "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#ospackage-offline-package-kylin-v10-sp2","title":"osPackage Offline Package (Kylin v10 sp2)","text":"

                                        The Kubean project provides osPackage offline packages for different operating systems. Visit https://github.com/kubean-io/kubean/releases to view the available packages.

                                        Operating System Version Download Link Kylin Linux Advanced Server release V10 (Sword) SP2 https://github.com/kubean-io/kubean/releases/download/v0.16.3/os-pkgs-kylinv10-v0.16.3.tar.gz

                                        Note

                                        Check the specific version of the osPackage offline package in the offline/sample/clusterConfig.yaml file of the offline image package.

                                        "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#import-offline-packages-to-the-bootstrap-node","title":"Import Offline Packages to the Bootstrap Node","text":"

                                        Run the import-artifact command:

                                        ./offline/dce5-installer import-artifact -c clusterConfig.yaml \\\n    --offline-path=/root/offline \\\n    --iso-path=/root/Kylin-Server-10-SP2-aarch64-Release-Build09-20210524.iso \\\n    --os-pkgs-path=/root/os-pkgs-kylinv10-v0.7.4.tar.gz\n

                                        Note

                                        Parameter Explanation:

                                        • -c clusterConfig.yaml specifies the clusterConfig.yaml file used during the previous AI platform deployment.
                                        • --offline-path specifies the file path of the downloaded offline image package.
                                        • --iso-path specifies the file path of the downloaded ISO operating system image.
                                        • --os-pkgs-path specifies the file path of the downloaded osPackage offline package.

                                        After a successful import command execution, the offline package will be uploaded to Minio on the bootstrap node.

                                        "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#add-heterogeneous-worker-nodes","title":"Add Heterogeneous Worker Nodes","text":"

                                        Make sure you are logged into the management node of the AI platform Global Service Cluster.

                                        "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#modify-the-host-manifest","title":"Modify the Host Manifest","text":"

                                        Here is an example of host manifest:

                                        Before adding a nodeAfter adding a node
                                        apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: ${cluster-name}-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      children:\n        etcd:\n          hosts:\n            centos-master:\n        k8s_cluster:\n          children:\n            kube_control_plane:\n            kube_node:\n        kube_control_plane:\n          hosts:\n            centos-master:\n        kube_node:\n          hosts:\n            centos-master:\n    hosts:\n      centos-master:\n        ip: 10.5.14.122\n        access_ip: 10.5.14.122\n        ansible_host: 10.5.14.122\n        ansible_connection: ssh\n        ansible_user: root\n        ansible_ssh_pass: ******\n
                                        apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: ${cluster-name}-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      hosts:\n        centos-master:\n          ip: 10.5.14.122\n          access_ip: 10.5.14.122\n          ansible_host: 10.5.14.122\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_ssh_pass: ******\n          # Add heterogeneous nodes\n        kylin-worker:\n          ip: 10.5.10.220\n          access_ip: 10.5.10.220\n          ansible_host: 10.5.10.220\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_ssh_pass: dangerous@2022\n        children:\n          kube_control_plane:\n            hosts:\n              - centos-master\n          kube_node:\n            hosts:\n              - centos-master\n              - kylin-worker  # Add the name of heterogeneous node\n          etcd:\n            hosts:\n              - centos-master\n          k8s_cluster:\n            children:\n              - kube_control_plane\n              - kube_node\n

                                        To add information about the newly added worker nodes according to the above comments:

                                        kubectl edit cm ${cluster-name}-hosts-conf -n kubean-system\n
                                        "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#add-expansion-tasks-through-clusteroperationyml","title":"Add Expansion Tasks through ClusterOperation.yml","text":"

                                        Example:

                                        ClusterOperation.yml
                                        apiVersion: kubean.io/v1alpha1\nkind: ClusterOperation\nmetadata:\n  name: add-worker-node\nspec:\n  cluster: ${cluster-name} # Specify cluster name\n  image: ghcr.m.daocloud.io/kubean-io/spray-job:v0.5.0\n  backoffLimit: 0\n  actionType: playbook\n  action: scale.yml\n  extraArgs: --limit=kylin-worker\n  preHook:\n    - actionType: playbook\n      action: ping.yml\n    - actionType: playbook\n      action: disable-firewalld.yml\n    - actionType: playbook\n      action: enable-repo.yml\n      extraArgs: |\n        -e \"{repo_list: [\"http://10.5.14.30:9000/kubean/kylin-iso/\\$releasever/os/\\$basearch\",\"http://10.5.14.30:9000/kubean/kylin/\\$releasever/os/\\$basearch\"]}\"\n  postHook:\n    - actionType: playbook\n      action: cluster-info.yml\n

                                        Note

                                        • Ensure the spec.image image address matches the image used in the previous deployment job.
                                        • Set spec.action to scale.yml .
                                        • Set spec.extraArgs to --limit=g-worker .
                                        • Fill in the correct repo_list parameter for the relevant OS in spec.preHook 's enable-repo.yml script.

                                        To create and deploy join-node-ops.yaml according to the above configuration:

                                        vi join-node-ops.yaml\nkubectl apply -f join-node-ops.yaml -n kubean-system\n
                                        "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#check-the-status-of-the-task-execution","title":"Check the status of the task execution","text":"
                                        kubectl -n kubean-system get pod | grep add-worker-node\n

                                        To check the progress of the scaling task, you can view the logs of the proper pod.

                                        "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#verify-in-ui","title":"Verify in UI","text":"
                                        1. Go to Container Management -> Clusters -> Nodes .

                                        2. Click the newly added node to view details.

                                        "},{"location":"en/admin/kpanda/best-practice/replace-first-master-node.html","title":"Replace the first master node of the worker cluster","text":"

                                        This page will take a highly available three-master-node worker cluster as an example. When the first master node of the worker cluster fails or malfunctions, how to replace or reintroduce the first master node.

                                        This page features a highly available cluster with three master nodes.

                                        • node1 (172.30.41.161)
                                        • node2 (172.30.41.162)
                                        • node3 (172.30.41.163)

                                        Assuming node1 is down, the following steps will explain how to reintroduce the recovered node1 back into the worker cluster.

                                        "},{"location":"en/admin/kpanda/best-practice/replace-first-master-node.html#preparations","title":"Preparations","text":"

                                        Before performing the replacement operation, first obtain basic information about the cluster resources, which will be used when modifying related configurations.

                                        Note

                                        The following commands to obtain cluster resource information are executed in the management cluster.

                                        1. Get the cluster name

                                          Run the following command to find the clusters.kubean.io resource proper to the cluster:

                                          # For example, if the resource name of clusters.kubean.io is cluster-mini-1\n# Get the name of the cluster\nCLUSTER_NAME=$(kubectl get clusters.kubean.io cluster-mini-1 -o=jsonpath=\"{.metadata.name}{'\\n'}\")\n
                                        2. Get the host list configmap of the cluster

                                          kubectl get clusters.kubean.io cluster-mini-1 -o=jsonpath=\"{.spec.hostsConfRef}{'\\n'}\"\n{\"name\":\"mini-1-hosts-conf\",\"namespace\":\"kubean-system\"}\n
                                        3. Get the configuration parameters configmap of the cluster

                                          kubectl get clusters.kubean.io cluster-mini-1 -o=jsonpath=\"{.spec.varsConfRef}{'\\n'}\"\n{\"name\":\"mini-1-vars-conf\",\"namespace\":\"kubean-system\"}\n
                                        "},{"location":"en/admin/kpanda/best-practice/replace-first-master-node.html#steps","title":"Steps","text":"
                                        1. Adjust the order of control plane nodes

                                          Reset the node1 node to restore it to the state before installing the cluster (or use a new node), maintaining the network connectivity of the node1 node.

                                          Adjust the order of the node1 node in the kube_control_plane, kube_node, and etcd sections in the host list (node1/node2/node3 -> node2/node3/node1):

                                          function change_control_plane_order() {\n  cat << EOF | kubectl apply -f -\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: mini-1-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      hosts:\n        node1:\n          ip: \"172.30.41.161\"\n          access_ip: \"172.30.41.161\"\n          ansible_host: \"172.30.41.161\"\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: dangerous\n        node2:\n          ip: \"172.30.41.162\"\n          access_ip: \"172.30.41.162\"\n          ansible_host: \"172.30.41.162\"\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: dangerous\n        node3:\n          ip: \"172.30.41.163\"\n          access_ip: \"172.30.41.163\"\n          ansible_host: \"172.30.41.163\"\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: dangerous\n      children:\n        kube_control_plane:\n          hosts:\n            node2:\n            node3:\n            node1:\n        kube_node:\n          hosts:\n            node2:\n            node3:\n            node1:\n        etcd:\n          hosts:\n            node2:\n            node3:\n            node1:\n        k8s_cluster:\n          children:\n            kube_control_plane:\n            kube_node:\n        calico_rr:\n          hosts: {}\nEOF\n}\n\nchange_control_plane_order\n
                                        2. Remove the first master node in an abnormal state

                                          After adjusting the order of nodes in the host list, remove the node1 in an abnormal state of the K8s control plane.

                                          Note

                                          If node1 is offline or malfunctioning, the following ConfigMaps must be added to extraArgs, you need not to add them when node1 is online.

                                          reset_nodes=false # Skip resetting node operation\nallow_ungraceful_removal=true # Allow ungraceful removal operation\n
                                          # Image spray-job can use an accelerator address here\n\nSPRAY_IMG_ADDR=\"ghcr.m.daocloud.io/kubean-io/spray-job\"\nSPRAY_RLS_2_22_TAG=\"2.22-336b323\"\nKUBE_VERSION=\"v1.24.14\"\nCLUSTER_NAME=\"cluster-mini-1\"\nREMOVE_NODE_NAME=\"node1\"\n\ncat << EOF | kubectl apply -f -\n---\napiVersion: kubean.io/v1alpha1\nkind: ClusterOperation\nmetadata:\n  name: cluster-mini-1-remove-node-ops\nspec:\n  cluster: ${CLUSTER_NAME}\n  image: ${SPRAY_IMG_ADDR}:${SPRAY_RLS_2_22_TAG}\n  actionType: playbook\n  action: remove-node.yml\n  extraArgs: -e node=${REMOVE_NODE_NAME} -e reset_nodes=false -e allow_ungraceful_removal=true -e kube_version=${KUBE_VERSION}\n  postHook:\n    - actionType: playbook\n      action: cluster-info.yml\nEOF\n
                                        3. Manually modify the cluster configuration, edit and update cluster-info

                                          # Edit cluster-info\nkubectl -n kube-public edit cm cluster-info\n\n# 1. If the ca.crt certificate is updated, the content of the certificate-authority-data field needs to be updated\n# View the base64 encoding of the ca certificate:\ncat /etc/kubernetes/ssl/ca.crt | base64 | tr -d '\\n'\n\n# 2. Change the IP address in the server field to the new first master IP, this document will use the IP address of node2, 172.30.41.162\n
                                        4. Manually modify the cluster configuration, edit and update kubeadm-config

                                          # Edit kubeadm-config\nkubectl -n kube-system edit cm kubeadm-config\n\n# Change controlPlaneEndpoint to the new first master IP,\n# this document will use the IP address of node2, 172.30.41.162\n
                                        5. Scale up the master node and update the cluster

                                          Note

                                          • Use --limit to limit the update operation to only affect the etcd and kube_control_plane node groups.
                                          • If it is an offline environment, spec.preHook needs to add enable-repo.yml, and the extraArgs parameter should fill in the correct repo_list for the related OS.
                                          cat << EOF | kubectl apply -f -\n---\napiVersion: kubean.io/v1alpha1\nkind: ClusterOperation\nmetadata:\n  name: cluster-mini-1-update-cluster-ops\nspec:\n  cluster: ${CLUSTER_NAME}\n  image: ${SPRAY_IMG_ADDR}:${SPRAY_RLS_2_22_TAG}\n  actionType: playbook\n  action: cluster.yml\n  extraArgs: --limit=etcd,kube_control_plane -e kube_version=${KUBE_VERSION}\n  preHook:\n    - actionType: playbook\n      action: enable-repo.yml  # This yaml needs to be added in an offline environment,\n                               # and set the correct repo-list (install operating system packages),\n                               # the following parameter values are for reference only\n      extraArgs: |\n        -e \"{repo_list: ['http://172.30.41.0:9000/kubean/centos/\\$releasever/os/\\$basearch','http://172.30.41.0:9000/kubean/centos-iso/\\$releasever/os/\\$basearch']}\"\n  postHook:\n    - actionType: playbook\n      action: cluster-info.yml\nEOF\n

                                        Now, you completed the replacement of the first Master node.

                                        "},{"location":"en/admin/kpanda/best-practice/update-offline-cluster.html","title":"Offline Deployment/Upgrade Guide for Worker Clusters","text":"

                                        Note

                                        This document is specifically designed for deploying or upgrading the Kubernetes version of worker clusters created on the AI platform platform in offline mode. It does not cover the deployment or upgrade of other Kubernetes components.

                                        This guide is applicable to the following offline scenarios:

                                        • You can follow the operational guidelines to deploy the recommended Kubernetes version in a non-GUI environment created by the AI platform platform.
                                        • You can upgrade the Kubernetes version of worker clusters created using the AI platform platform by generating incremental offline packages.

                                        The overall approach is as follows:

                                        1. Build the offline package on an integrated node.
                                        2. Import the offline package to the bootstrap node.
                                        3. Update the Kubernetes version manifest for the global service cluster.
                                        4. Use the AI platform UI to create or upgrade the Kubernetes version of the worker cluster.

                                        Note

                                        For a list of currently supported offline Kubernetes versions, refer to the list of Kubernetes versions supported by Kubean.

                                        "},{"location":"en/admin/kpanda/best-practice/update-offline-cluster.html#building-the-offline-package-on-an-integrated-node","title":"Building the Offline Package on an Integrated Node","text":"

                                        Since the offline environment cannot connect to the internet, you need to prepare an integrated node in advance to build the incremental offline package and start Docker or Podman services on this node. Refer to How to Install Docker?

                                        1. Check the status of the Docker service on the integrated node.

                                          ps aux | grep docker\n

                                          You should see output similar to the following:

                                          root     12341  0.5  0.2 654372 26736 ?        Ssl  23:45   0:00 /usr/bin/docked\nroot     12351  0.2  0.1 625080 13740 ?        Ssl  23:45   0:00 docker-containerd --config /var/run/docker/containerd/containerd.toml\nroot     13024  0.0  0.0 112824   980 pts/0    S+   23:45   0:00 grep --color=auto docker\n
                                        2. Create a file named manifest.yaml in the /root directory of the integrated node with the following command:

                                          vi manifest.yaml\n

                                          The content of manifest.yaml should be as follows:

                                          manifest.yaml
                                          image_arch:\n- \"amd64\"\nkube_version: # Specify the version of the cluster to be upgraded\n- \"v1.28.0\"\n
                                          • image_arch specifies the CPU architecture type, with options for amd64 and arm64.
                                          • kube_version indicates the version of the Kubernetes offline package to be built. You can refer to the supported offline Kubernetes versions mentioned earlier.
                                        3. Create a folder named /data in the /root directory to store the incremental offline package.

                                          mkdir data\n

                                          Run the following command to generate the offline package using the kubean airgap-patch image. Make sure the tag of the airgap-patch image matches the Kubean version, and that the Kubean version covers the Kubernetes version you wish to upgrade.

                                          # Assuming the Kubean version is v0.13.9\ndocker run --rm -v $(pwd)/manifest.yaml:/manifest.yaml -v $(pwd)/data:/data ghcr.m.daocloud.io/kubean-io/airgap-patch:v0.13.9\n

                                          After the Docker service completes running, check the files in the /data folder. The folder structure should look like this:

                                          data\n\u251c\u2500\u2500 amd64\n\u2502   \u251c\u2500\u2500 files\n\u2502   \u2502   \u251c\u2500\u2500 import_files.sh\n\u2502   \u2502   \u2514\u2500\u2500 offline-files.tar.gz\n\u2502   \u251c\u2500\u2500 images\n\u2502   \u2502   \u251c\u2500\u2500 import_images.sh\n\u2502   \u2502   \u2514\u2500\u2500 offline-images.tar.gz\n\u2502   \u2514\u2500\u2500 os-pkgs\n\u2502       \u2514\u2500\u2500 import_ospkgs.sh\n\u2514\u2500\u2500 localartifactset.cr.yaml\n
                                        "},{"location":"en/admin/kpanda/best-practice/update-offline-cluster.html#importing-the-offline-package-to-the-bootstrap-node","title":"Importing the Offline Package to the Bootstrap Node","text":"
                                        1. Copy the /data files from the integrated node to the /root directory of the bootstrap node. On the integrated node , run the following command:

                                          scp -r data root@x.x.x.x:/root\n

                                          Replace x.x.x.x with the IP address of the bootstrap node.

                                        2. On the bootstrap node, copy the image files in the /data folder to the built-in Docker registry of the bootstrap node. After logging into the bootstrap node, run the following commands:

                                          1. Navigate to the directory where the image files are located.

                                            cd data/amd64/images\n
                                          2. Run the import_images.sh script to import the images into the built-in Docker Registry of the bootstrap node.

                                            REGISTRY_ADDR=\"127.0.0.1\" ./import_images.sh\n

                                          Note

                                          The above command is only applicable to the built-in Docker Registry of the bootstrap node. If you are using an external registry, use the following command:

                                          REGISTRY_SCHEME=https REGISTRY_ADDR=${registry_address} REGISTRY_USER=${username} REGISTRY_PASS=${password} ./import_images.sh\n
                                          • REGISTRY_ADDR is the address of the image repository, such as 1.2.3.4:5000.
                                          • If the image repository requires username and password authentication, set REGISTRY_USER and REGISTRY_PASS accordingly.
                                        3. On the bootstrap node, copy the binary files in the /data folder to the built-in Minio service of the bootstrap node.

                                          1. Navigate to the directory where the binary files are located.

                                            cd data/amd64/files/\n
                                          2. Run the import_files.sh script to import the binary files into the built-in Minio service of the bootstrap node.

                                            MINIO_USER=rootuser MINIO_PASS=rootpass123 ./import_files.sh http://127.0.0.1:9000\n

                                        Note

                                        The above command is only applicable to the built-in Minio service of the bootstrap node. If you are using an external Minio, replace http://127.0.0.1:9000 with the access address of the external Minio. \"rootuser\" and \"rootpass123\" are the default account and password for the built-in Minio service of the bootstrap node.

                                        "},{"location":"en/admin/kpanda/best-practice/update-offline-cluster.html#updating-the-kubernetes-version-manifest-for-the-global-service-cluster","title":"Updating the Kubernetes Version Manifest for the Global Service Cluster","text":"

                                        Run the following command on the bootstrap node to deploy the localartifactset resource to the global service cluster:

                                        kubectl apply -f data/kubeanofflineversion.cr.patch.yaml\n
                                        "},{"location":"en/admin/kpanda/best-practice/update-offline-cluster.html#next-steps","title":"Next Steps","text":"

                                        Log into the AI platform UI management interface to continue with the following actions:

                                        1. Refer to the Creating Cluster Documentation to create a worker cluster, where you can select the incremental version of Kubernetes.

                                        2. Refer to the Upgrading Cluster Documentation to upgrade your self-built worker cluster.

                                        "},{"location":"en/admin/kpanda/best-practice/use-otherlinux-create-custer.html","title":"Creating a Cluster on Non-Supported Operating Systems","text":"

                                        This document outlines how to create a worker cluster on an unsupported OS in offline mode. For the range of OS supported by AI platform, refer to AI platform Supported Operating Systems.

                                        The main process for creating a worker cluster on an unsupported OS in offline mode is illustrated in the diagram below:

                                        Next, we will use the openAnolis operating system as an example to demonstrate how to create a cluster on a non-mainstream operating system.

                                        "},{"location":"en/admin/kpanda/best-practice/use-otherlinux-create-custer.html#prerequisites","title":"Prerequisites","text":"
                                        • AI platform Full Mode has been deployed following the documentation: Offline Installation of AI platform Enterprise.
                                        • At least one node with the same architecture and version that can connect to the internet.
                                        "},{"location":"en/admin/kpanda/best-practice/use-otherlinux-create-custer.html#procedure","title":"Procedure","text":""},{"location":"en/admin/kpanda/best-practice/use-otherlinux-create-custer.html#online-node-building-an-offline-package","title":"Online Node - Building an Offline Package","text":"

                                        Find an online environment with the same architecture and OS as the nodes in the target cluster. In this example, we will use AnolisOS 8.8 GA. Run the following command to generate an offline os-pkgs package:

                                        # Download relevant scripts and build os packages package\n$ curl -Lo ./pkgs.yml https://raw.githubusercontent.com/kubean-io/kubean/main/build/os-packages/others/pkgs.yml\n$ curl -Lo ./other_os_pkgs.sh https://raw.githubusercontent.com/kubean-io/kubean/main/build/os-packages/others/other_os_pkgs.sh && chmod +x other_os_pkgs.sh\n$ ./other_os_pkgs.sh build # Build the offline package\n

                                        After executing the above command, you should have a compressed package named os-pkgs-anolis-8.8.tar.gz in the current directory. The file structure in the current directory should look like this:

                                            .\n    \u251c\u2500\u2500 other_os_pkgs.sh\n    \u251c\u2500\u2500 pkgs.yml\n    \u2514\u2500\u2500 os-pkgs-anolis-8.8.tar.gz\n
                                        "},{"location":"en/admin/kpanda/best-practice/use-otherlinux-create-custer.html#offline-node-installing-the-offline-package","title":"Offline Node - Installing the Offline Package","text":"

                                        Copy the three files generated on the online node ( other_os_pkgs.sh , pkgs.yml , and os-pkgs-anolis-8.8.tar.gz ) to all nodes in the target cluster in the offline environment.

                                        Login to any one of the nodes in the offline environment that is part of the target cluster, and run the following command to install the os-pkg package on the node:

                                        # Configure environment variables\n$ export PKGS_YML_PATH=/root/workspace/os-pkgs/pkgs.yml # Path to the pkgs.yml file on the current offline node\n$ export PKGS_TAR_PATH=/root/workspace/os-pkgs/os-pkgs-anolis-8.8.tar.gz # Path to the os-pkgs-anolis-8.8.tar.gz file on the current offline node\n$ export SSH_USER=root # Username for the current offline node\n$ export SSH_PASS=dangerous # Password for the current offline node\n$ export HOST_IPS='172.30.41.168' # IP address of the current offline node\n$ ./other_os_pkgs.sh install # Install the offline package\n

                                        After executing the above command, wait for the interface to prompt: All packages for node (X.X.X.X) have been installed , which indicates that the installation is complete.

                                        "},{"location":"en/admin/kpanda/best-practice/use-otherlinux-create-custer.html#go-to-the-user-interface-to-create-cluster","title":"Go to the User Interface to Create Cluster","text":"

                                        Refer to the documentation on Creating a Worker Cluster to create an openAnolis cluster.

                                        "},{"location":"en/admin/kpanda/clusterops/cluster-oversold.html","title":"Dynamic Resource Overprovision in the Cluster","text":"

                                        Currently, many businesses experience peaks and valleys in demand. To ensure service performance and stability, resources are typically allocated based on peak demand when deploying services. However, peak periods may be very short, resulting in resource waste during off-peak times. Cluster resource overprovision utilizes these allocated but unused resources (i.e., the difference between allocation and usage) to enhance cluster resource utilization and reduce waste.

                                        This article mainly introduces how to use the cluster dynamic resource overprovision feature.

                                        "},{"location":"en/admin/kpanda/clusterops/cluster-oversold.html#prerequisites","title":"Prerequisites","text":"
                                        • The container management module has been integrated with a Kubernetes cluster or a Kubernetes cluster has been created, and access to the cluster's UI interface is available.
                                        • A namespace has been created, and the user has been granted Cluster Admin permissions. For details, refer to Cluster Authorization.
                                        "},{"location":"en/admin/kpanda/clusterops/cluster-oversold.html#enable-cluster-overprovision","title":"Enable Cluster Overprovision","text":"
                                        1. click Clusters in the left navigation bar, then clickthe name of the target cluster to enter the Cluster Details page.

                                        2. On the cluster details page, click Cluster Operations -> Cluster Settings in the left navigation bar, then select the Advanced Configuration tab.

                                        3. Enable cluster overprovision and set the overprovision ratio.

                                          • If the cro-operator plugin is not installed, click the Install Now button and follow the installation process as per Managing Helm Apps.
                                          • If the cro-operator plugin is already installed, enable the cluster overprovision switch to start using the cluster overprovision feature.

                                          Note

                                          The proper namespace in the cluster must have the following label applied for the cluster overprovision policy to take effect.

                                          clusterresourceoverrides.admission.autoscaling.openshift.io/enabled: \"true\"\n

                                        "},{"location":"en/admin/kpanda/clusterops/cluster-oversold.html#using-cluster-overprovision","title":"Using Cluster Overprovision","text":"

                                        Once the cluster dynamic resource overprovision ratio is set, it will take effect while workloads are running. The following example uses nginx to validate the use of resource overprovision capabilities.

                                        1. Create a workload (nginx) and set the proper resource limits. For the creation process, refer to Creating Stateless Workloads (Deployment).

                                        2. Check whether the ratio of the Pod's resource requests to limits meets the overprovision ratio.

                                        "},{"location":"en/admin/kpanda/clusterops/cluster-settings.html","title":"Cluster Settings","text":"

                                        Cluster settings are used to customize advanced feature settings for your cluster, including whether to enable GPU, helm repo refresh cycle, Helm operation record retention, etc.

                                        • Enable GPU: GPUs and proper driver plug-ins need to be installed on the cluster in advance.

                                          Click the name of the target cluster, and click Operations and Maintenance -> Cluster Settings -> Addons in the left navigation bar.

                                        • Helm operation basic image, registry refresh cycle, number of operation records retained, whether to enable cluster deletion protection (the cluster cannot be uninstalled directly after enabling)

                                        "},{"location":"en/admin/kpanda/clusterops/latest-operations.html","title":"recent operations","text":"

                                        On this page, you can view the recent cluster operation records and Helm operation records, as well as the YAML files and logs of each operation, and you can also delete a certain record.

                                        Set the number of reserved entries for Helm operations:

                                        By default, the system keeps the last 100 Helm operation records. If you keep too many entries, it may cause data redundancy, and if you keep too few entries, you may lose the key operation records you need. A reasonable reserved quantity needs to be set according to the actual situation. Specific steps are as follows:

                                        1. Click the name of the target cluster, and click Recent Operations -> Helm Operations -> Set Number of Retained Items in the left navigation bar.

                                        2. Set how many Helm operation records need to be kept, and click OK .

                                        "},{"location":"en/admin/kpanda/clusters/access-cluster.html","title":"Access Clusters","text":"

                                        Clusters integrated or created using the AI platform Container Management platform can be accessed not only through the UI interface but also in two other ways for access control:

                                        • Access online via CloudShell
                                        • Access via kubectl after downloading the cluster certificate

                                        Note

                                        When accessing the cluster, the user should have Cluster Admin permission or higher.

                                        "},{"location":"en/admin/kpanda/clusters/access-cluster.html#access-via-cloudshell","title":"Access via CloudShell","text":"
                                        1. Enter Clusters page, select the cluster you want to access via CloudShell, click the ... icon on the right, and then click Console from the dropdown list.

                                        2. Run kubectl get node command in the Console to verify the connectivity between CloudShell and the cluster. If the console returns node information of the cluster, you can access and manage the cluster through CloudShell.

                                        "},{"location":"en/admin/kpanda/clusters/access-cluster.html#access-via-kubectl","title":"Access via kubectl","text":"

                                        If you want to access and manage remote clusters from a local node, make sure you have met these prerequisites:

                                        • Your local node and the cloud cluster are in a connected network.
                                        • The cluster certificate has been downloaded to the local node.
                                        • The kubectl tool has been installed on the local node. For detailed installation guides, see Installing tools.

                                        If everything is in place, follow these steps to access a cloud cluster from your local environment.

                                        1. Enter Clusters page, find your target cluster, click ... on the right, and select Download kubeconfig in the drop-down list.

                                        2. Set the Kubeconfig period and click Download .

                                        3. Open the downloaded certificate and copy its content to the config file of the local node.

                                          By default, the kubectl tool will look for a file named config in the $HOME/.kube directory on the local node. This file stores access credentials of clusters. Kubectl can access the cluster with that configuration file.

                                        4. Run the following command on the local node to verify its connectivity with the cluster:

                                          kubectl get pod -n default\n

                                          An expected output is as follows:

                                          NAME                            READY   STATUS      RESTARTS    AGE\ndao-2048-2048-58c7f7fc5-mq7h4   1/1     Running     0           30h\n

                                        Now you can access and manage the cluster locally with kubectl.

                                        "},{"location":"en/admin/kpanda/clusters/cluster-role.html","title":"Cluster Roles","text":"

                                        Suanova AI platform categorizes clusters based on different functionalities to help users better manage IT infrastructure.

                                        "},{"location":"en/admin/kpanda/clusters/cluster-role.html#global-service-cluster","title":"Global Service Cluster","text":"

                                        This cluster is used to run AI platform components such as Container Management, Global Management, Insight. It generally does not carry business workloads.

                                        Supported Features Description K8s Version 1.22+ Operating System RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86;Ubuntu 18.04 x86, Ubuntu 20.04 x86;CentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD Full Lifecycle Management Supported K8s Resource Management Supported Cloud Native Storage Supported Cloud Native Network Calico, Cillium, Multus, and other CNIs Policy Management Supports network policies, quota policies, resource limits, disaster recovery policies, security policies"},{"location":"en/admin/kpanda/clusters/cluster-role.html#management-cluster","title":"Management Cluster","text":"

                                        This cluster is used to manage worker clusters and generally does not carry business workloads.

                                        • Classic Mode deploys the global service cluster and management cluster in different clusters, suitable for multi-data center, multi-architecture enterprise scenarios.
                                        • Simple Mode deploys the management cluster and global service cluster in the same cluster.
                                        Supported Features Description K8s Version 1.22+ Operating System RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86;Ubuntu 18.04 x86, Ubuntu 20.04 x86;CentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD Full Lifecycle Management Supported K8s Resource Management Supported Cloud Native Storage Supported Cloud Native Network Calico, Cillium, Multus, and other CNIs Policy Management Supports network policies, quota policies, resource limits, disaster recovery policies, security policies"},{"location":"en/admin/kpanda/clusters/cluster-role.html#worker-cluster","title":"Worker Cluster","text":"

                                        This is a cluster created using Container Management and is mainly used to carry business workloads. This cluster is managed by the management cluster.

                                        Supported Features Description K8s Version Supports K8s 1.22 and above Operating System RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86;Ubuntu 18.04 x86, Ubuntu 20.04 x86;CentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD Full Lifecycle Management Supported K8s Resource Management Supported Cloud Native Storage Supported Cloud Native Network Calico, Cillium, Multus, and other CNIs Policy Management Supports network policies, quota policies, resource limits, disaster recovery policies, security policies"},{"location":"en/admin/kpanda/clusters/cluster-role.html#integrated-cluster","title":"Integrated Cluster","text":"

                                        This cluster is used to integrate existing standard K8s clusters, including but not limited to self-built clusters in local data centers, clusters provided by public cloud vendors, clusters provided by private cloud vendors, edge clusters, Xinchuang clusters, heterogeneous clusters, and different Suanova clusters. It is mainly used to carry business workloads.

                                        Supported Features Description K8s Version 1.18+ Supported Vendors VMware Tanzu, Amazon EKS, Redhat Openshift, SUSE Rancher, Alibaba ACK, Huawei CCE, Tencent TKE, Standard K8s Cluster, Suanova Full Lifecycle Management Not Supported K8s Resource Management Supported Cloud Native Storage Supported Cloud Native Network Depends on the network mode of the integrated cluster's kernel Policy Management Supports network policies, quota policies, resource limits, disaster recovery policies, security policies

                                        Note

                                        A cluster can have multiple cluster roles. For example, a cluster can be both a global service cluster and a management cluster or a worker cluster.

                                        "},{"location":"en/admin/kpanda/clusters/cluster-scheduler-plugin.html","title":"Deploy Second Scheduler scheduler-plugins in a Cluster","text":"

                                        This page describes how to deploy a second scheduler-plugins in a cluster.

                                        "},{"location":"en/admin/kpanda/clusters/cluster-scheduler-plugin.html#why-do-we-need-scheduler-plugins","title":"Why do we need scheduler-plugins?","text":"

                                        The cluster created through the platform will install the native K8s scheduler-plugin, but the native scheduler-plugin has many limitations:

                                        • The native scheduler-plugin cannot meet scheduling requirements, so you can use either CoScheduling, CapacityScheduling or other types of scheduler-plugins.
                                        • In special scenarios, a new scheduler-plugin is needed to complete scheduling tasks without affecting the process of the native scheduler-plugin.
                                        • Distinguish scheduler-plugins with different functionalities and achieve different scheduling scenarios by switching scheduler-plugin names.

                                        This page takes the scenario of using the vgpu scheduler-plugin while combining the coscheduling plugin capability of scheduler-plugins as an example to introduce how to install and use scheduler-plugins.

                                        "},{"location":"en/admin/kpanda/clusters/cluster-scheduler-plugin.html#installing-scheduler-plugins","title":"Installing scheduler-plugins","text":""},{"location":"en/admin/kpanda/clusters/cluster-scheduler-plugin.html#prerequisites","title":"Prerequisites","text":"
                                        • kubean is a new feature introduced in v0.13.0, please ensure that your version is v0.13.0 or higher.
                                        • The installation version of scheduler-plugins is v0.27.8, please ensure that the cluster version is compatible with it. Refer to the document Compatibility Matrix.
                                        "},{"location":"en/admin/kpanda/clusters/cluster-scheduler-plugin.html#installation-process","title":"Installation Process","text":"
                                        1. Add the scheduler-plugins parameter in Create Cluster -> Advanced Settings -> Custom Parameters.

                                          scheduler_plugins_enabled:true\nscheduler_plugins_plugin_config:\n  - name: Coscheduling\n    args:\n      permitWaitingTimeSeconds: 10 # default is 60\n

                                          Parameters:

                                          • scheduler_plugins_enabled Set to true to enable the scheduler-plugins capability.
                                          • You can enable or disable certain plugins by setting the scheduler_plugins_enabled_plugins or scheduler_plugins_disabled_plugins options. See K8s Official Plugin Names for reference.
                                          • If you need to set parameters for custom plugins, please configure scheduler_plugins_plugin_config, for example: set the permitWaitingTimeoutSeconds parameter for coscheduling. See K8s Official Plugin Configuration for reference.
                                        2. After successful cluster creation, the system will automatically install the scheduler-plugins and controller component loads. You can check the workload status in the proper cluster's deployment.

                                        "},{"location":"en/admin/kpanda/clusters/cluster-scheduler-plugin.html#using-scheduler-plugins","title":"Using scheduler-plugins","text":"

                                        Here is an example of how to use scheduler-plugins by demonstrating a scenario where the vgpu scheduler is used in combination with the coscheduling plugin capability of scheduler-plugins.

                                        1. Install vgpu in the Helm Charts and set the values.yaml parameters.

                                          • schedulerName: scheduler-plugins-scheduler: This is the scheduler name for scheduler-plugins installed by kubean, and currently cannot be modified.
                                          • scheduler.kubeScheduler.enabled: false: Do not install kube-scheduler and use vgpu-scheduler as a separate extender.
                                        2. Extend vgpu-scheduler on scheduler-plugins.

                                          [root@master01 charts]# kubectl get cm -n scheduler-plugins scheduler-config -ojsonpath=\"{.data.scheduler-config\\.yaml}\"\n
                                          apiVersion: kubescheduler.config.k8s.io/v1\nkind: KubeSchedulerConfiguration\nleaderElection:\n  leaderElect: false\nprofiles:\n  # Compose all plugins in one profile\n  - schedulerName: scheduler-plugins-scheduler\n    plugins:\n      multiPoint:\n        enabled:\n          - name: Coscheduling\n          - name: CapacityScheduling\n          - name: NodeResourceTopologyMatch\n          - name: NodeResourcesAllocatable\n        disabled:\n          - name: PrioritySort\npluginConfig:\n  - args:\n      permitWaitingTimeSeconds: 10\n    name: Coscheduling\n

                                          Modify configmap of scheduler-config for scheduler-plugins:

                                          [root@master01 charts]# kubectl get cm -n scheduler-plugins scheduler-config -ojsonpath=\"{.data.scheduler-config\\.yaml}\"\n
                                          apiVersion: kubescheduler.config.k8s.io/v1\nkind: KubeSchedulerConfiguration\nleaderElection:\n  leaderElect: false\nprofiles:\n  # Compose all plugins in one profile\n  - schedulerName: scheduler-plugins-scheduler\n    plugins:\n      multiPoint:\n        enabled:\n          - name: Coscheduling\n          - name: CapacityScheduling\n          - name: NodeResourceTopologyMatch\n          - name: NodeResourcesAllocatable\n        disabled:\n          - name: PrioritySort\npluginConfig:\n  - args:\n      permitWaitingTimeSeconds: 10\n    name: Coscheduling\nextenders:\n  - urlPrefix: \"${urlPrefix}\"\n    filterVerb: filter\n    bindVerb: bind\n    nodeCacheCapable: true\n    ignorable: true\n    httpTimeout: 30s\n    weight: 1\n    enableHTTPS: true\n    tlsConfig:\n      insecure: true\n    managedResources:\n      - name: nvidia.com/vgpu\n        ignoredByScheduler: true\n      - name: nvidia.com/gpumem\n        ignoredByScheduler: true\n      - name: nvidia.com/gpucores\n        ignoredByScheduler: true\n      - name: nvidia.com/gpumem-percentage\n        ignoredByScheduler: true\n      - name: nvidia.com/priority\n        ignoredByScheduler: true\n      - name: cambricon.com/mlunum\n        ignoredByScheduler: true\n
                                        3. After installing vgpu-scheduler, the system will automatically create a service (svc), and the urlPrefix specifies the URL of the svc.

                                          Note

                                          • The svc refers to the pod service load. You can use the following command in the namespace where the nvidia-vgpu plugin is installed to get the external access information for port 443.

                                            kubectl get svc -n ${namespace}\n
                                          • The urlPrefix format is https://${ip address}:${port}

                                        4. Restart the scheduler pod of scheduler-plugins to load the new configuration file.

                                          Note

                                          When creating a vgpu application, you do not need to specify the name of a scheduler-plugin. The vgpu-scheduler webhook will automatically change the scheduler's name to \"scheduler-plugins-scheduler\" without manual specification.

                                        "},{"location":"en/admin/kpanda/clusters/cluster-status.html","title":"Cluster Status","text":"

                                        AI platform Container Management module can manage two types of clusters: integrated clusters and created clusters.

                                        • Integrated clusters: clusters created in other platforms and now integrated into AI platform.
                                        • Created clusters: clusters created in AI platform.

                                        For more information about cluster types, see Cluster Role.

                                        We designed several status for these two clusters.

                                        "},{"location":"en/admin/kpanda/clusters/cluster-status.html#integrated-clusters","title":"Integrated Clusters","text":"Status Description Integrating The cluster is being integrated into AI platform. Removing The cluster is being removed from AI platform. Running The cluster is running as expected. Unknown The cluster is lost. Data displayed in the AI platform UI is the cached data before the disconnection, which does not represent real-time data. Any operation during this status will not take effect. You should check cluster network connectivity or host status."},{"location":"en/admin/kpanda/clusters/cluster-status.html#created-clusters","title":"Created Clusters","text":"Status Description Creating The cluster is being created. Updating The Kubernetes version of the cluster is being operating. Deleting The cluster is being deleted. Running The cluster is running as expected. Unknown The cluster is lost. Data displayed in the AI platform UI is the cached data before the disconnection, which does not represent real-time data. Any operation during this status will not take effect. You should check cluster network connectivity or host status. Failed The cluster creation is failed. You should check the logs for detailed reasons."},{"location":"en/admin/kpanda/clusters/cluster-version.html","title":"Supported Kubernetes Versions","text":"

                                        In AI platform, the integrated clusters and created clusters have different version support mechanisms.

                                        This page focuses on the version support mechanism for created clusters.

                                        The Kubernetes community supports three version ranges: 1.26, 1.27, and 1.28. When a new version is released by the community, the supported version range is incremented. For example, if the latest version released by the community is 1.27, the supported version range by the community will be 1.27, 1.28, and 1.29.

                                        To ensure the security and stability of the clusters, when creating clusters in AI platform, the supported version range will always be one version lower than the community's version.

                                        For instance, if the Kubernetes community supports v1.25, v1.26, and v1.27, then the version range for creating worker clusters in AI platform will be v1.24, v1.25, and v1.26. Additionally, a stable version, such as 1.24.7, will be recommended to users.

                                        Furthermore, the version range for creating worker clusters in AI platform will remain highly synchronized with the community. When the community version increases incrementally, the version range for creating worker clusters in AI platform will also increase by one version.

                                        "},{"location":"en/admin/kpanda/clusters/cluster-version.html#supported-kubernetes-versions_1","title":"Supported Kubernetes Versions","text":"Kubernetes Community Versions Created Worker Cluster Versions Recommended Versions for Created Worker Cluster AI platform Installer Release Date
                                        • 1.26
                                        • 1.27
                                        • 1.28
                                        • 1.26
                                        • 1.27
                                        • 1.28
                                        1.27.5 v0.13.0 2023.11.30"},{"location":"en/admin/kpanda/clusters/create-cluster.html","title":"Create Worker Clusters","text":"

                                        In AI platform Container Management, clusters can have four roles: global service cluster, management cluster, worker cluster, and integrated cluster. An integrated cluster can only be integrated from third-party vendors (see Integrate Cluster).

                                        This page explains how to create a Worker Cluster. By default, when creating a new Worker Cluster, the operating system type and CPU architecture of the worker nodes should be consistent with the Global Service Cluster. If you want to create a cluster with a different operating system or architecture than the Global Management Cluster, refer to Creating an Ubuntu Worker Cluster on a CentOS Management Platform for instructions.

                                        It is recommended to use the supported operating systems in AI platform to create the cluster. If your local nodes are not within the supported range, you can refer to Creating a Cluster on Non-Mainstream Operating Systems for instructions.

                                        "},{"location":"en/admin/kpanda/clusters/create-cluster.html#prerequisites","title":"Prerequisites","text":"

                                        Certain prerequisites must be met before creating a cluster:

                                        • Prepare enough nodes to be joined into the cluster.
                                        • It is recommended to use Kubernetes version 1.25.7. For the specific version range, refer to the AI platform Cluster Version Support System. Currently, the supported version range for created worker clusters is v1.26.0-v1.28. If you need to create a cluster with a lower version, refer to the Supporterd Cluster Versions.
                                        • The target host must allow IPv4 forwarding. If using IPv6 in Pods and Services, the target server needs to allow IPv6 forwarding.
                                        • AI platform does not provide firewall management. You need to pre-define the firewall rules of the target host by yourself. To avoid errors during cluster creation, it is recommended to disable the firewall of the target host.
                                        • See Node Availability Check.
                                        "},{"location":"en/admin/kpanda/clusters/create-cluster.html#steps","title":"Steps","text":"
                                        1. Enter the Container Management module, click Create Cluster on the upper right corner of the Clusters page.

                                        2. Fill in the basic information by referring to the following instructions.

                                          • Cluster Name: only contain lowercase letters, numbers, and hyphens (\"-\"). Must start and end with a lowercase letter or number and totally up to 63 characters.
                                          • Managed By: Choose a cluster to manage this new cluster through its lifecycle, such as creating, upgrading, node scaling, deleting the new cluster, etc.
                                          • Runtime: Select the runtime environment of the cluster. Currently support containerd and docker (see How to Choose Container Runtime).
                                          • Kubernetes Version: Allow span of three major versions, such as from 1.23-1.25, subject to the versions supported by the management cluster.

                                        3. Fill in the node configuration information and click Node Check .

                                          • High Availability: When enabled, at least 3 controller nodes are required. When disabled, only 1 controller node is needed.

                                            It is recommended to use High Availability mode in production environments.

                                          • Credential Type: Choose whether to access nodes using username/password or public/private keys.

                                            If using public/private key authentication, SSH keys for the nodes need to be configured in advance. Refer to Using SSH Key Authentication for Nodes.

                                          • Same Password: When enabled, all nodes in the cluster will have the same access password. Enter the unified password for accessing all nodes in the field below. If disabled, you can set separate usernames and passwords for each node.

                                          • Node Information: Set note names and IPs.
                                          • NTP Time Synchronization: When enabled, time will be automatically synchronized across all nodes. Provide the NTP server address.

                                        4. If node check is passed, click Next . If the check failed, update Node Information and check again.

                                        5. Fill in the network configuration and click Next .

                                          • CNI: Provide network services for Pods in the cluster. CNI cannot be changed after the cluster is created. Supports cilium and calico. Set none means not installing CNI when creating the cluster. You may install a CNI later.

                                            For CNI configuration details, see Cilium Installation Parameters or Calico Installation Parameters.

                                          • Container IP Range: Set an IP range for allocating IPs for containers in the cluster. IP range determines the max number of containers allowed in the cluster. Cannot be modified after creation.

                                          • Service IP Range: Set an IP range for allocating IPs for container Services in the cluster. This range determines the max number of container Services that can be created in the cluster. Cannot be modified after creation.
                                        6. Fill in the plug-in configuration and click Next .

                                        7. Fill in advanced settings and click OK .

                                          • kubelet_max_pods : Set the maximum number of Pods per node. The default is 110.
                                          • hostname_override : Reset the hostname (not recommended).
                                          • kubernetes_audit : Kubernetes audit log, enabled by default.
                                          • auto_renew_certificate : Automatically renew the certificate of the control plane on the first Monday of each month, enabled by default.
                                          • disable_firewalld&ufw : Disable the firewall to prevent the node from being inaccessible during installation.
                                          • Insecure_registries : Set the address of you private container registry. If you use a private container registry, fill in its address can bypass certificate authentication of the container engine and obtain the image.
                                          • yum_repos : Fill in the Yum source registry address.

                                        Success

                                        • After correctly filling in the above information, the page will prompt that the cluster is being created.
                                        • Creating a cluster takes a long time, so you need to wait patiently. You can click the Back to Clusters button to let it running backend.
                                        • To view the current status, click Real-time Log .

                                        Note

                                        • hen the cluster is in an unknown state, it means that the current cluster has been disconnected.
                                        • The data displayed by the system is the cached data before the disconnection, which does not represent real data.
                                        • Any operations performed in the disconnected state will not take effect. Please check the cluster network connectivity or Host Status.
                                        "},{"location":"en/admin/kpanda/clusters/delete-cluster.html","title":"Delete/Remove Clusters","text":"

                                        Clusters created in AI platform Container Management can be either deleted or removed. Clusters integrated into AI platform can only be removed.

                                        Info

                                        If you want to delete an integrated cluster, you should delete it in the platform where it is created.

                                        In AI platform, the difference between Delete and Remove is:

                                        • Delete will destroy the cluster and reset the data of all nodes under the cluster. All data will be totally cleared and lost. Making a backup before deleting a cluster is a recommended best practice. You can no longer use that cluster anymore.
                                        • Remove just removes the cluster from AI platform. It will not destroy the cluster and no data will be lost. You can still use the cluster in other platforms or re-integrate it into AI platform later if needed.

                                        Note

                                        • You should have Admin or Kpanda Owner permissions to perform delete or remove operations.
                                        • Before deleting a cluster, you should turn off Cluster Deletion Protection in Cluster Settings -> Advanced Settings , otherwise the Delete Cluster option will not be displayed.
                                        • The global service cluster cannot be deleted or removed.
                                        1. Enter the Container Management module, find your target cluster, click __ ...__ on the right, and select Delete cluster / Remove in the drop-down list.

                                        2. Enter the cluster name to confirm and click Delete .

                                        3. You will be auto directed to cluster lists. The status of this cluster will changed to Deleting . It may take a while to delete/remove a cluster.

                                        "},{"location":"en/admin/kpanda/clusters/integrate-cluster.html","title":"Integrate Clusters","text":"

                                        With the features of integrating clusters, AI platform allows you to manage on-premise and cloud clusters of various providers in a unified manner. This is quite important in avoiding the risk of being locked in by a certain providers, helping enterprises safely migrate their business to the cloud.

                                        In AI platform Container Management module, you can integrate a cluster of the following providers: standard Kubernetes clusters, Redhat Openshift, SUSE Rancher, VMware Tanzu, Amazon EKS, Aliyun ACK, Huawei CCE, Tencent TKE, etc.

                                        "},{"location":"en/admin/kpanda/clusters/integrate-cluster.html#prerequisites","title":"Prerequisites","text":"
                                        • Prepare a cluster of K8s v1.22+ and ensure its network connectivity.
                                        • The operator should have the NS Editor or higher permissions.
                                        "},{"location":"en/admin/kpanda/clusters/integrate-cluster.html#steps","title":"Steps","text":"
                                        1. Enter Container Management module, and click Integrate Cluster in the upper right corner.

                                        2. Fill in the basic information by referring to the following instructions.

                                          • Cluster Name: It should be unique and cannot be changed after the integration. Maximum 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number.
                                          • Cluster Alias: Enter any characters, no more than 60 characters.
                                          • Release Distribution: the cluster provider, support mainstream vendors listed at the beginning.
                                        3. Fill in the KubeConfig of the target cluster and click Verify Config . The cluster can be successfully connected only after the verification is passed.

                                          Click How do I get the KubeConfig? to see the specific steps for getting this file.

                                        4. Confirm that all parameters are filled in correctly and click OK in the lower right corner of the page.

                                        Note

                                        The status of the newly integrated cluster is Integrating , which will become Running after the integration succeeds.

                                        "},{"location":"en/admin/kpanda/clusters/integrate-rancher-cluster.html","title":"Integrate the Rancher Cluster","text":"

                                        This page explains how to integrate a Rancher cluster.

                                        "},{"location":"en/admin/kpanda/clusters/integrate-rancher-cluster.html#prerequisites","title":"Prerequisites","text":"
                                        • Prepare a Rancher cluster with administrator privileges and ensure network connectivity between the container management cluster and the target cluster.
                                        • Be equipped with permissions not lower than kpanda owner.
                                        "},{"location":"en/admin/kpanda/clusters/integrate-rancher-cluster.html#steps","title":"Steps","text":""},{"location":"en/admin/kpanda/clusters/integrate-rancher-cluster.html#step-1-create-a-serviceaccount-user-with-administrator-privileges-in-the-rancher-cluster","title":"Step 1: Create a ServiceAccount user with administrator privileges in the Rancher cluster","text":"
                                        1. Log in to the Rancher cluster with a role that has administrator privileges, and create a file named sa.yaml using the terminal.

                                          vi sa.yaml\n

                                          Press the i key to enter insert mode, then copy and paste the following content:

                                          sa.yaml
                                          apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: rancher-rke\n  rules:\n  - apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n  - nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: rancher-rke\n  roleRef:\n    apiGroup: rbac.authorization.k8s.io\n    kind: ClusterRole\n    name: rancher-rke\n  subjects:\n  - kind: ServiceAccount\n    name: rancher-rke\n    namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: rancher-rke\n  namespace: kube-system\n

                                          Press the Esc key to exit insert mode, then type :wq to save and exit.

                                        2. Run the following command in the current directory to create a ServiceAccount named rancher-rke (referred to as SA for short):

                                          kubectl apply -f sa.yaml\n

                                          The expected output is as follows:

                                          clusterrole.rbac.authorization.k8s.io/rancher-rke created\nclusterrolebinding.rbac.authorization.k8s.io/rancher-rke created\nserviceaccount/rancher-rke created\n
                                        3. Create a secret named rancher-rke-secret and bind the secret to the rancher-rke SA.

                                          kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: rancher-rke-secret\n  namespace: kube-system\n  annotations:\n    kubernetes.io/service-account.name: rancher-rke\n  type: kubernetes.io/service-account-token\nEOF\n

                                          The output is expected to be:

                                          secret/rancher-rke-secret created\n

                                          Note

                                          If your cluster version is lower than 1.24, please ignore this step and proceed to the next one.

                                        4. Check secret for rancher-rke SA:

                                          kubectl -n kube-system get secret | grep rancher-rke | awk '{print $1}'\n

                                          The output is expected to be:

                                          rancher-rke-secret\n

                                          Check the rancher-rke-secret secret:

                                          kubectl -n kube-system describe secret rancher-rke-secret\n

                                          The output is expected to be:

                                          Name:         rancher-rke-secret\nNamespace:    kube-system\nLabels:       <none>\nAnnotations:  kubernetes.io/service-account.name: rancher-rke\n            kubernetes.io/service-account.uid: d83df5d9-bd7d-488d-a046-b740618a0174\n\nType:  kubernetes.io/service-account-token\n\nData\n====\nca.crt:     570 bytes\nnamespace:  11 bytes\ntoken:      eyJhbGciOiJSUzI1NiIsImtpZCI6IjUtNE9nUWZLRzVpbEJORkZaNmtCQXhqVzRsZHU4MHhHcDBfb0VCaUo0V1kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJyYW5jaGVyLXJrZS1zZWNyZXQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoicmFuY2hlci1ya2UiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkODNkZjVkOS1iZDdkLTQ4OGQtYTA0Ni1iNzQwNjE4YTAxNzQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06cmFuY2hlci1ya2UifQ.VNsMtPEFOdDDeGt_8VHblcMRvjOwPXMM-79o9UooHx6q-VkHOcIOp3FOT2hnEdNnIsyODZVKCpEdCgyozX-3y5x2cZSZpocnkMcBbQm-qfTyUcUhAY7N5gcYUtHUhvRAsNWJcsDCn6d96gT_qo-ddo_cT8Ri39Lc123FDYOnYG-YGFKSgRQVy7Vyv34HIajZCCjZzy7i--eE_7o4DXeTjNqAFMFstUxxHBOXI3Rdn1zKQKqh5Jhg4ES7X-edSviSUfJUX-QV_LlAw5DuAyGPH7bDH4QaQ5k-p6cIctmpWZE-9wRDlKA4LYRblKE7MJcI6OmM4ldlMM0Jc8N-gCtl4w\n
                                        "},{"location":"en/admin/kpanda/clusters/integrate-rancher-cluster.html#step-2-update-kubeconfig-with-the-rancher-rke-sa-authentication-on-your-local-machine","title":"Step 2: Update kubeconfig with the rancher-rke SA authentication on your local machine","text":"

                                        Perform the following steps on any local node where kubelet is installed:

                                        1. Configure kubelet token.

                                          kubectl config set-credentials rancher-rke --token= __rancher-rke-secret__ # token \u4fe1\u606f\n

                                          For example,

                                          kubectl config set-credentials eks-admin --token=eyJhbGciOiJSUzI1NiIsImtpZCI6IjUtNE9nUWZLRzVpbEJORkZaNmtCQXhqVzRsZHU4MHhHcDBfb0VCaUo0V1kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJyYW5jaGVyLXJrZS1zZWNyZXQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoicmFuY2hlci1ya2UiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkODNkZjVkOS1iZDdkLTQ4OGQtYTA0Ni1iNzQwNjE4YTAxNzQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06cmFuY2hlci1ya2UifQ.VNsMtPEFOdDDeGt_8VHblcMRvjOwPXMM-79o9UooHx6q-VkHOcIOp3FOT2hnEdNnIsyODZVKCpEdCgyozX-3y5x2cZSZpocnkMcBbQm-qfTyUcUhAY7N5gcYUtHUhvRAsNWJcsDCn6d96gT_qo-ddo_cT8Ri39Lc123FDYOnYG-YGFKSgRQVy7Vyv34HIajZCCjZzy7i--eE_7o4DXeTjNqAFMFstUxxHBOXI3Rdn1zKQKqh5Jhg4ES7X-edSviSUfJUX-QV_LlAw5DuAyGPH7bDH4QaQ5k-p6cIctmpWZE-9wRDlKA4LYRblKE7MJcI6OmM4ldlMM0Jc8N-gCtl4w\n
                                        2. Configure the kubelet APIServer information.

                                          kubectl config set-cluster {cluster-name} --insecure-skip-tls-verify=true --server={APIServer}\n
                                          • {cluster-name} : the name of your Rancher cluster.
                                          • {APIServer} : the access address of the cluster, usually refering to the IP address of the control node + port \"6443\", such as https://10.X.X.X:6443 .

                                          For example,

                                          kubectl config set-cluster rancher-rke --insecure-skip-tls-verify=true --server=https://10.X.X.X:6443\n
                                        3. Configure the kubelet context.

                                          kubectl config set-context {context-name} --cluster={cluster-name} --user={SA-usename}\n

                                          For example,

                                          kubectl config set-context rancher-rke-context --cluster=rancher-rke --user=rancher-rke\n
                                        4. Specify the newly created context rancher-rke-context in kubelet.

                                          kubectl config use-context rancher-rke-context\n
                                        5. Fetch the kubeconfig information for the context rancher-rke-context .

                                          kubectl config view --minify --flatten --raw\n

                                          The output is expected to be:

                                          ```yaml apiVersion: v1 clusters: - cluster: insecure-skip-tls-verify: true server: https://77C321BCF072682C70C8665ED4BFA10D.gr7.ap-southeast-1.eks.amazonaws.com name: joincluster contexts: - context: cluster: joincluster user: eks-admin name: ekscontext current-context: ekscontext kind: Config preferences: {} users: - name: eks-admin user: token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImcxTjJwNkktWm5IbmRJU1RFRExvdWY1TGFWVUtGQ3VIejFtNlFQcUNFalEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2V

                                        "},{"location":"en/admin/kpanda/clusters/integrate-rancher-cluster.html#step-3-connect-the-cluster-in-the-suanova-interface","title":"Step 3: Connect the cluster in the Suanova Interface","text":"

                                        Using the kubeconfig file fetched earlier, refer to the Integrate Cluster documentation to integrate the Rancher cluster to the global cluster.

                                        "},{"location":"en/admin/kpanda/clusters/runtime.html","title":"How to choose the container runtime","text":"

                                        The container runtime is an important component in kubernetes to manage the life cycle of containers and container images. Kubernetes made containerd the default container runtime in version 1.19, and removed support for the Dockershim component in version 1.24.

                                        Therefore, compared to the Docker runtime, we recommend you to use the lightweight containerd as your container runtime, because this has become the current mainstream runtime choice.

                                        In addition, some operating system distribution vendors are not friendly enough for Docker runtime compatibility. The runtime support of different operating systems is as follows:

                                        "},{"location":"en/admin/kpanda/clusters/runtime.html#operating-systems-and-supported-runtimes","title":"Operating systems and supported runtimes","text":"Operating System Supported containerd Versions Supported Docker Versions CentOS 1.5.5, 1.5.7, 1.5.8, 1.5.9, 1.5.10, 1.5.11, 1.5.12, 1.5.13, 1.6.0, 1.6.1, 1.6.2, 1.6.3, 1.6.4, 1.6.5, 1.6.6, 1.6.7, 1.6.8, 1.6.9, 1.6.10, 1.6.11, 1.6.12, 1.6.13, 1.6.14, 1.6.15 (default) 18.09, 19.03, 20.10 (default) RedHatOS 1.5.5, 1.5.7, 1.5.8, 1.5.9, 1.5.10, 1.5.11, 1.5.12, 1.5.13, 1.6.0, 1.6.1, 1.6.2, 1.6.3, 1.6.4, 1.6.5, 1.6.6, 1.6.7, 1.6.8, 1.6.9, 1.6.10, 1.6.11, 1.6.12, 1.6.13, 1.6.14, 1.6.15 (default) 18.09, 19.03, 20.10 (default) KylinOS 1.5.5, 1.5.7, 1.5.8, 1.5.9, 1.5.10, 1.5.11, 1.5.12, 1.5.13, 1.6.0, 1.6.1, 1.6.2, 1.6.3, 1.6.4, 1.6.5, 1.6.6, 1.6.7, 1.6.8, 1.6.9, 1.6.10, 1.6.11, 1.6.12, 1.6.13, 1.6.14, 1.6.15 (default) 19.03 (Only supported by ARM architecture, Docker is not supported as a runtime under x86 architecture)

                                        Note

                                        In the offline installation mode, you need to prepare the runtime offline package of the relevant operating system in advance.

                                        "},{"location":"en/admin/kpanda/clusters/upgrade-cluster.html","title":"Cluster Upgrade","text":"

                                        The Kubernetes Community packages a small version every quarter, and the maintenance cycle of each version is only about 9 months. Some major bugs or security holes will not be updated after the version stops maintenance. Manually upgrading cluster operations is cumbersome and places a huge workload on administrators.

                                        In Suanova, you can upgrade the Kubernetes cluster with one click through the web UI interface.

                                        Danger

                                        After the version is upgraded, it will not be possible to roll back to the previous version, please proceed with caution.

                                        Note

                                        • Kubernetes versions are denoted as x.y.z , where x is the major version, y is the minor version, and z is the patch version.
                                        • Cluster upgrades across minor versions are not allowed, e.g. a direct upgrade from 1.23 to 1.25 is not possible.
                                        • **Access clusters do not support version upgrades. If there is no \"cluster upgrade\" in the left navigation bar, please check whether the cluster is an access cluster. **
                                        • The global service cluster can only be upgraded through the terminal.
                                        • When upgrading a worker cluster, the Management Cluster of the worker cluster should have been connected to the container management module and be running normally.
                                        1. Click the name of the target cluster in the cluster list.

                                        2. Then click Cluster Operation and Maintenance -> Cluster Upgrade in the left navigation bar, and click Version Upgrade in the upper right corner of the page.

                                        3. Select the version that can be upgraded, and enter the cluster name to confirm.

                                        4. After clicking OK , you can see the upgrade progress of the cluster.

                                        5. The cluster upgrade is expected to take 30 minutes. You can click the Real-time Log button to view the detailed log of the cluster upgrade.

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/create-configmap.html","title":"Create ConfigMaps","text":"

                                        ConfigMaps store non-confidential data in the form of key-value pairs to achieve the effect of mutual decoupling of configuration data and application code. ConfigMaps can be used as environment variables for containers, command-line parameters, or configuration files in storage volumes.

                                        Note

                                        • The data saved in ConfigMaps cannot exceed 1 MiB. If you need to store larger volumes of data, it is recommended to mount a storage volume or use an independent database or file service.

                                        • ConfigMaps do not provide confidentiality or encryption. If you want to store encrypted data, it is recommended to use secret, or other third-party tools to ensure the privacy of data.

                                        You can create ConfigMaps with two methods:

                                        • Graphical form creation
                                        • YAML creation
                                        "},{"location":"en/admin/kpanda/configmaps-secrets/create-configmap.html#prerequisites","title":"Prerequisites","text":"
                                        • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                        • Created a namespace, user, and authorized the user as NS Editor. For details, refer to Namespace Authorization.

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/create-configmap.html#graphical-form-creation","title":"Graphical form creation","text":"
                                        1. Click the name of a cluster on the Clusters page to enter Cluster Details .

                                        2. In the left navigation bar, click ConfigMap and Secret -> ConfigMap , and click the Create ConfigMap button in the upper right corner.

                                        3. Fill in the configuration information on the Create ConfigMap page, and click OK .

                                          !!! note

                                           Click __Upload File__ to import an existing file locally to quickly create ConfigMaps.\n
                                        4. After the creation is complete, click More on the right side of the ConfigMap to edit YAML, update, export, delete and other operations.

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/create-configmap.html#yaml-creation","title":"YAML creation","text":"
                                        1. Click the name of a cluster on the Clusters page to enter Cluster Details .

                                        2. In the left navigation bar, click ConfigMap and Secret -> ConfigMap , and click the YAML Create button in the upper right corner.

                                        3. Fill in or paste the configuration file prepared in advance, and then click OK in the lower right corner of the pop-up box.

                                          !!! note

                                           - Click __Import__ to import an existing file locally to quickly create ConfigMaps.\n - After filling in the data, click __Download__ to save the configuration file locally.\n
                                        4. After the creation is complete, click More on the right side of the ConfigMap to edit YAML, update, export, delete and other operations.

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/create-configmap.html#configmap-yaml-example","title":"ConfigMap YAML example","text":"
                                         ```yaml\n kind: ConfigMap\n apiVersion: v1\n metadata:\n   name: kube-root-ca.crt\n   namespace: default\n   annotations:\n data:\n   version: '1.0'\n ```\n

                                        Next step: Use ConfigMaps

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/create-secret.html","title":"Create Secret","text":"

                                        A secret is a resource object used to store and manage sensitive information such as passwords, OAuth tokens, SSH, TLS credentials, etc. Using keys means you don't need to include sensitive secrets in your application code.

                                        Secrets can be used in some cases:

                                        • Used as an environment variable of the container to provide some necessary information required during the running of the container.
                                        • Use secrets as pod data volumes.
                                        • As the identity authentication credential for the container registry when the kubelet pulls the container image.

                                        You can create ConfigMaps with two methods:

                                        • Graphical form creation
                                        • YAML creation
                                        "},{"location":"en/admin/kpanda/configmaps-secrets/create-secret.html#prerequisites","title":"Prerequisites","text":"
                                        • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster

                                        • Created a namespace, user, and authorized the user as NS Editor. For details, refer to Namespace Authorization.

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/create-secret.html#create-secret-with-wizard","title":"Create secret with wizard","text":"
                                        1. Click the name of a cluster on the Clusters page to enter Cluster Details .

                                        2. In the left navigation bar, click ConfigMap and Secret -> Secret , and click the Create Secret button in the upper right corner.

                                        3. Fill in the configuration information on the Create Secret page, and click OK .

                                          Note when filling in the configuration:

                                          • The name of the key must be unique within the same namespace
                                          • Key type:
                                            • Default (Opaque): Kubernetes default key type, which supports arbitrary data defined by users.
                                            • TLS (kubernetes.io/tls): credentials for TLS client or server data access.
                                            • Container registry information (kubernetes.io/dockerconfigjson): Credentials for Container registry access.
                                            • username and password (kubernetes.io/basic-auth): Credentials for basic authentication.
                                            • Custom: the type customized by the user according to business needs.
                                          • Key data: the data stored in the key, the parameters that need to be filled in are different for different data
                                            • When the key type is default (Opaque)/custom: multiple key-value pairs can be filled in.
                                            • When the key type is TLS (kubernetes.io/tls): you need to fill in the certificate certificate and private key data. Certificates are self-signed or CA-signed credentials used for authentication. A certificate request is a request for a signature and needs to be signed with a private key.
                                            • When the key type is container registry information (kubernetes.io/dockerconfigjson): you need to fill in the account and password of the private container registry.
                                            • When the key type is username and password (kubernetes.io/basic-auth): Username and password need to be specified.
                                        "},{"location":"en/admin/kpanda/configmaps-secrets/create-secret.html#yaml-creation","title":"YAML creation","text":"
                                        1. Click the name of a cluster on the Clusters page to enter Cluster Details .

                                        2. In the left navigation bar, click ConfigMap and Secret -> Secret , and click the YAML Create button in the upper right corner.

                                        3. Fill in the YAML configuration on the Create with YAML page, and click OK .

                                          Supports importing YAML files from local or downloading and saving filled files to local.

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/create-secret.html#key-yaml-example","title":"key YAML example","text":"
                                         ```yaml\n apiVersion: v1\n kind: Secret\n metadata:\n   name: secretdemo\n type: Opaque\n data:\n   username: ****\n   password: ****\n ```\n

                                        Next step: use secret

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/use-configmap.html","title":"Use ConfigMaps","text":"

                                        ConfigMap (ConfigMap) is an API object of Kubernetes, which is used to save non-confidential data into key-value pairs, and can store configurations that other objects need to use. When used, the container can use it as an environment variable, a command-line argument, or a configuration file in a storage volume. By using ConfigMaps, configuration data and application code can be separated, providing a more flexible way to modify application configuration.

                                        Note

                                        ConfigMaps do not provide confidentiality or encryption. If the data to be stored is confidential, please use secret, or use other third-party tools to ensure the privacy of the data instead of ConfigMaps. In addition, when using ConfigMaps in containers, the container and ConfigMaps must be in the same cluster namespace.

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/use-configmap.html#scenes-to-be-used","title":"scenes to be used","text":"

                                        You can use ConfigMaps in Pods. There are many use cases, mainly including:

                                        • Use ConfigMaps to set the environment variables of the container

                                        • Use ConfigMaps to set the command line parameters of the container

                                        • Use ConfigMaps as container data volumes

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/use-configmap.html#set-the-environment-variables-of-the-container","title":"Set the environment variables of the container","text":"

                                        You can use the ConfigMap as the environment variable of the container through the graphical interface or the terminal command line.

                                        Note

                                        The ConfigMap import is to use the ConfigMap as the value of the environment variable; the ConfigMap key value import is to use a certain parameter in the ConfigMap as the value of the environment variable.

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/use-configmap.html#graphical-interface-operation","title":"Graphical interface operation","text":"

                                        When creating a workload through an image, you can set environment variables for the container by selecting Import ConfigMaps or Import ConfigMap Key Values on the Environment Variables interface.

                                        1. Go to the Image Creation Workload page, in the Container Configuration step, select the Environment Variables configuration, and click the Add Environment Variable button.

                                        2. Select ConfigMap Import or ConfigMap Key Value Import in the environment variable type.

                                          • When the environment variable type is selected as ConfigMap import , enter variable name , prefix name, ConfigMap name in sequence.

                                          • When the environment variable type is selected as ConfigMap key-value import , enter variable name , ConfigMap name, and Secret name in sequence.

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/use-configmap.html#command-line-operation","title":"Command line operation","text":"

                                        You can set ConfigMaps as environment variables when creating a workload, using the valueFrom parameter to refer to the Key/Value in the ConfigMap.

                                        apiVersion: v1\nkind: Pod\nmetadata:\n   name: configmap-pod-1\nspec:\n   containers:\n     - name: test-container\n       image: busybox\n       command: [ \"/bin/sh\", \"-c\", \"env\" ]\n       env:\n         - name: SPECIAL_LEVEL_KEY\n           valueFrom: # (1)\n             configMapKeyRef:\n               name: kpanda-configmap # (2)\n               key: SPECIAL_LEVEL # (3)\n   restartPolicy: Never\n
                                        1. Use valueFrom to specify the value of the env reference ConfigMap
                                        2. Referenced configuration file name
                                        3. Referenced ConfigMap key
                                        "},{"location":"en/admin/kpanda/configmaps-secrets/use-configmap.html#set-the-command-line-parameters-of-the-container","title":"Set the command line parameters of the container","text":"

                                        You can use ConfigMaps to set the command or parameter value in the container, and use the environment variable substitution syntax $(VAR_NAME) to do so. As follows.

                                        apiVersion: v1\nkind: Pod\nmetadata:\n   name: configmap-pod-3\nspec:\n   containers:\n     - name: test-container\n       image: busybox\n       command: [ \"/bin/sh\", \"-c\", \"echo $(SPECIAL_LEVEL_KEY) $(SPECIAL_TYPE_KEY)\" ]\n       env:\n         - name: SPECIAL_LEVEL_KEY\n           valueFrom:\n             configMapKeyRef:\n               name: kpanda-configmap\n               key: SPECIAL_LEVEL\n         - name: SPECIAL_TYPE_KEY\n           valueFrom:\n             configMapKeyRef:\n               name: kpanda-configmap\n               key: SPECIAL_TYPE\n   restartPolicy: Never\n

                                        After the Pod runs, the output is as follows.

                                        Hello Kpanda\n
                                        "},{"location":"en/admin/kpanda/configmaps-secrets/use-configmap.html#used-as-container-data-volume","title":"Used as container data volume","text":"

                                        You can use the ConfigMap as the environment variable of the container through the graphical interface or the terminal command line.

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/use-configmap.html#graphical-operation","title":"Graphical operation","text":"

                                        When creating a workload through an image, you can use the ConfigMap as the data volume of the container by selecting the storage type as \"ConfigMap\" on the \"Data Storage\" interface.

                                        1. Go to the Image Creation Workload page, in the Container Configuration step, select the Data Storage configuration, and click __Add in the __ Node Path Mapping __ list __ button.

                                        2. Select ConfigMap in the storage type, and enter container path , subpath and other information in sequence.

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/use-configmap.html#command-line-operation_1","title":"Command line operation","text":"

                                        To use a ConfigMap in a Pod's storage volume.

                                        Here is an example Pod that mounts a ConfigMap as a volume:

                                        apiVersion: v1\nkind: Pod\nmetadata:\n   name: mypod\nspec:\n   containers:\n   -name: mypod\n     image: redis\n     volumeMounts:\n     - name: foo\n       mountPath: \"/etc/foo\"\n       readOnly: true\n   volumes:\n   - name: foo\n     configMap:\n       name: myconfigmap\n

                                        If there are multiple containers in a Pod, each container needs its own volumeMounts block, but you only need to set one spec.volumes block per ConfigMap.

                                        Note

                                        When a ConfigMap is used as a data volume mounted on a container, the ConfigMap can only be read as a read-only file.

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html","title":"use key","text":"

                                        A secret is a resource object used to store and manage sensitive information such as passwords, OAuth tokens, SSH, TLS credentials, etc. Using keys means you don't need to include sensitive secrets in your application code.

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html#scenes-to-be-used","title":"scenes to be used","text":"

                                        You can use keys in Pods in a variety of use cases, mainly including:

                                        • Used as an environment variable of the container to provide some necessary information required during the running of the container.
                                        • Use secrets as pod data volumes.
                                        • Used as the identity authentication credential for the container registry when the kubelet pulls the container image.
                                        "},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html#use-the-key-to-set-the-environment-variable-of-the-container","title":"Use the key to set the environment variable of the container","text":"

                                        You can use the key as the environment variable of the container through the GUI or the terminal command line.

                                        Note

                                        Key import is to use the key as the value of an environment variable; key key value import is to use a parameter in the key as the value of an environment variable.

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html#graphical-interface-operation","title":"Graphical interface operation","text":"

                                        When creating a workload from an image, you can set environment variables for the container by selecting Key Import or Key Key Value Import on the Environment Variables interface.

                                        1. Go to the Image Creation Workload page.

                                        2. Select the Environment Variables configuration in Container Configuration , and click the Add Environment Variable button.

                                        3. Select Key Import or Key Key Value Import in the environment variable type.

                                          • When the environment variable type is selected as Key Import , enter Variable Name , Prefix , and Secret in sequence.

                                          • When the environment variable type is selected as key key value import , enter variable name , Secret , Secret name in sequence.

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html#command-line-operation","title":"Command line operation","text":"

                                        As shown in the example below, you can set the secret as an environment variable when creating the workload, using the valueFrom parameter to refer to the Key/Value in the Secret.

                                        apiVersion: v1\nkind: Pod\nmetadata:\n   name: secret-env-pod\nspec:\n   containers:\n   -name: mycontainer\n     image: redis\n     env:\n       - name: SECRET_USERNAME\n         valueFrom:\n           secretKeyRef:\n             name: mysecret\n             key: username\n             optional: false # (1)\n       - name: SECRET_PASSWORD\n         valueFrom:\n           secretKeyRef:\n             name: mysecret\n             key: password\n             optional: false # (2)\n
                                        1. This value is the default; means \"mysecret\", which must exist and contain a primary key named \"username\"
                                        2. This value is the default; means \"mysecret\", which must exist and contain a primary key named \"password\"
                                        "},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html#use-the-key-as-the-pods-data-volume","title":"Use the key as the pod's data volume","text":""},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html#graphical-interface-operation_1","title":"Graphical interface operation","text":"

                                        When creating a workload through an image, you can use the key as the data volume of the container by selecting the storage type as \"key\" on the \"data storage\" interface.

                                        1. Go to the Image Creation Workload page.

                                        2. In the Container Configuration , select the Data Storage configuration, and click the Add button in the Node Path Mapping list.

                                        3. Select Secret in the storage type, and enter container path , subpath and other information in sequence.

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html#command-line-operation_1","title":"Command line operation","text":"

                                        The following is an example of a Pod that mounts a Secret named mysecret via a data volume:

                                        apiVersion: v1\nkind: Pod\nmetadata:\n   name: mypod\nspec:\n   containers:\n   -name: mypod\n     image: redis\n     volumeMounts:\n     - name: foo\n       mountPath: \"/etc/foo\"\n       readOnly: true\n   volumes:\n   - name: foo\n     secret:\n       secretName: mysecret\n       optional: false # (1)\n
                                        1. Default setting, means \"mysecret\" must already exist

                                        If the Pod contains multiple containers, each container needs its own volumeMounts block, but only one .spec.volumes setting is required for each Secret.

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html#used-as-the-identity-authentication-credential-for-the-container-registry-when-the-kubelet-pulls-the-container-image","title":"Used as the identity authentication credential for the container registry when the kubelet pulls the container image","text":"

                                        You can use the key as the identity authentication credential for the Container registry through the GUI or the terminal command line.

                                        "},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html#graphical-operation","title":"Graphical operation","text":"

                                        When creating a workload through an image, you can use the key as the data volume of the container by selecting the storage type as \"key\" on the \"data storage\" interface.

                                        1. Go to the Image Creation Workload page.

                                        2. In the second step of Container Configuration , select the Basic Information configuration, and click the Select Image button.

                                        3. Select the name of the private container registry in the drop-down list of `container registry' in the pop-up box. Please see Create Secret for details on private image secret creation.

                                        4. Enter the image name in the private registry, click OK to complete the image selection.

                                        Note

                                        When creating a key, you need to ensure that you enter the correct container registry address, username, password, and select the correct mirror name, otherwise you will not be able to obtain the mirror image in the container registry.

                                        "},{"location":"en/admin/kpanda/custom-resources/create.html","title":"CustomResourceDefinition (CRD)","text":"

                                        In Kubernetes, all objects are abstracted as resources, such as Pod, Deployment, Service, Volume, etc. are the default resources provided by Kubernetes. This provides important support for our daily operation and maintenance and management work, but in some special cases, the existing preset resources cannot meet the needs of the business. Therefore, we hope to expand the capabilities of the Kubernetes API, and CustomResourceDefinition (CRD) was born based on this requirement.

                                        The container management module supports interface-based management of custom resources, and its main features are as follows:

                                        • Obtain the list and detailed information of custom resources under the cluster
                                        • Create custom resources based on YAML
                                        • Create a custom resource example CR (Custom Resource) based on YAML
                                        • Delete custom resources
                                        "},{"location":"en/admin/kpanda/custom-resources/create.html#prerequisites","title":"Prerequisites","text":"
                                        • Integrated the Kubernetes cluster or created Kubernetes, and you can access the cluster UI interface.

                                        • Created a namespace, user, and authorized the user as Cluster Admin For details, refer to Namespace Authorization.

                                        "},{"location":"en/admin/kpanda/custom-resources/create.html#create-crd-via-yaml","title":"Create CRD via YAML","text":"
                                        1. Click a cluster name to enter Cluster Details .

                                        2. In the left navigation bar, click Custom Resource , and click the YAML Create button in the upper right corner.

                                        3. On the Create with YAML page, fill in the YAML statement and click OK .

                                        4. Return to the custom resource list page, and you can view the custom resource named crontabs.stable.example.com just created.

                                        Custom resource example:

                                        CRD example
                                        apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  name: crontabs.stable.example.com\nspec:\n  group: stable.example.com\n  versions:\n    - name: v1\n      served: true\n      storage: true\n      schema:\n        openAPIV3Schema:\n          type: object\n          properties:\n            spec:\n              type: object\n              properties:\n                cronSpec:\n                  type: string\n                image:\n                  type: string\n                replicas:\n                  type: integer\n  scope: Namespaced\n  names:\n    plural: crontabs\n    singular: crontab\n    kind: CronTab\n    shortNames:\n    - ct\n
                                        "},{"location":"en/admin/kpanda/custom-resources/create.html#create-a-custom-resource-example-via-yaml","title":"Create a custom resource example via YAML","text":"
                                        1. Click a cluster name to enter Cluster Details .

                                        2. In the left navigation bar, click Custom Resource , and click the YAML Create button in the upper right corner.

                                        3. Click the custom resource named crontabs.stable.example.com , enter the details, and click the YAML Create button in the upper right corner.

                                        4. On the Create with YAML page, fill in the YAML statement and click OK .

                                        5. Return to the details page of crontabs.stable.example.com , and you can view the custom resource named my-new-cron-object just created.

                                        CR Example:

                                        CR example
                                        apiVersion: \"stable.example.com/v1\"\nkind: CronTab\nmetadata:\n  name: my-new-cron-object\nspec:\n  cronSpec: \"* * * * */5\"\n  image: my-awesome-cron-image\n
                                        "},{"location":"en/admin/kpanda/gpu/index.html","title":"Overview of GPU Management","text":"

                                        This article introduces the capability of Suanova container management platform in unified operations and management of heterogeneous resources, with a focus on GPUs.

                                        "},{"location":"en/admin/kpanda/gpu/index.html#background","title":"Background","text":"

                                        With the rapid development of emerging technologies such as AI applications, large-scale models, artificial intelligence, and autonomous driving, enterprises are facing an increasing demand for compute-intensive tasks and data processing. Traditional compute architectures represented by CPUs can no longer meet the growing computational requirements of enterprises. At this point, heterogeneous computing represented by GPUs has been widely applied due to its unique advantages in processing large-scale data, performing complex calculations, and real-time graphics rendering.

                                        Meanwhile, due to the lack of experience and professional solutions in scheduling and managing heterogeneous resources, the utilization efficiency of GPU devices is extremely low, resulting in high AI production costs for enterprises. The challenge of reducing costs, increasing efficiency, and improving the utilization of GPUs and other heterogeneous resources has become a pressing issue for many enterprises.

                                        "},{"location":"en/admin/kpanda/gpu/index.html#introduction-to-gpu-capabilities","title":"Introduction to GPU Capabilities","text":"

                                        The Suanova container management platform supports unified scheduling and operations management of GPUs, NPUs, and other heterogeneous resources, fully unleashing the computational power of GPU resources, and accelerating the development of enterprise AI and other emerging applications. The GPU management capabilities of Suanova are as follows:

                                        • Support for unified management of heterogeneous computing resources from domestic and foreign manufacturers such as NVIDIA, Huawei Ascend, and Iluvatar.
                                        • Support for multi-card heterogeneous scheduling within the same cluster, with automatic recognition of GPUs in the cluster.
                                        • Support for native management solutions for NVIDIA GPUs, vGPUs, and MIG, with cloud native capabilities.
                                        • Support for partitioning a single physical card for use by different tenants, and allocate GPU resources to tenants and containers based on computing power and memory quotas.
                                        • Support for multi-dimensional GPU resource monitoring at the cluster, node, and application levels, assisting operators in managing GPU resources.
                                        • Compatibility with various training frameworks such as TensorFlow and PyTorch.
                                        "},{"location":"en/admin/kpanda/gpu/index.html#introduction-to-gpu-operator","title":"Introduction to GPU Operator","text":"

                                        Similar to regular computer hardware, NVIDIA GPUs, as physical devices, need to have the NVIDIA GPU driver installed in order to be used. To reduce the cost of using GPUs on Kubernetes, NVIDIA provides the NVIDIA GPU Operator component to manage various components required for using NVIDIA GPUs. These components include the NVIDIA driver (for enabling CUDA), NVIDIA container runtime, GPU node labeling, DCGM-based monitoring, and more. In theory, users only need to plug the GPU card into a compute device managed by Kubernetes, and they can use all the capabilities of NVIDIA GPUs through the GPU Operator. For more information about NVIDIA GPU Operator, refer to the NVIDIA official documentation. For deployment instructions, refer to Offline Installation of GPU Operator.

                                        Architecture diagram of NVIDIA GPU Operator:

                                        "},{"location":"en/admin/kpanda/gpu/FAQ.html","title":"GPU FAQs","text":""},{"location":"en/admin/kpanda/gpu/FAQ.html#gpu-processes-are-not-visible-while-running-nvidia-smi-inside-a-pod","title":"GPU processes are not visible while running nvidia-smi inside a pod","text":"

                                        Q: When running the nvidia-smi command inside a GPU-utilizing pod, no GPU process information is visible in the full-card mode and vGPU mode.

                                        A: Due to PID namespace isolation, GPU processes are not visible inside the Pod. To view GPU processes, you can use one of the following methods:

                                        • Configure the workload using the GPU with hostPID: true to enable viewing PIDs on the host.
                                        • Run the nvidia-smi command in the driver pod of the gpu-operator to view processes.
                                        • Run the chroot /run/nvidia/driver nvidia-smi command on the host to view processes.
                                        "},{"location":"en/admin/kpanda/gpu/Iluvatar_usage.html","title":"How to Use Iluvatar GPU in Applications","text":"

                                        This section describes how to use Iluvatar virtual GPU on AI platform.

                                        "},{"location":"en/admin/kpanda/gpu/Iluvatar_usage.html#prerequisites","title":"Prerequisites","text":"
                                        • Deployed AI platform container management platform and it is running smoothly.
                                        • The container management module has been integrated with a Kubernetes cluster or a Kubernetes cluster has been created, and the UI interface of the cluster can be accessed.
                                        • The Iluvatar GPU driver has been installed on the current cluster. Refer to the Iluvatar official documentation for driver installation instructions, or contact the Suanova ecosystem team for enterprise-level support at peg-pem@daocloud.io.
                                        • The GPUs in the current cluster have not undergone any virtualization operations and not been occupied by other applications.
                                        "},{"location":"en/admin/kpanda/gpu/Iluvatar_usage.html#procedure","title":"Procedure","text":""},{"location":"en/admin/kpanda/gpu/Iluvatar_usage.html#configuration-via-user-interface","title":"Configuration via User Interface","text":"
                                        1. Check if the GPU card in the cluster has been detected. Click Clusters -> Cluster Settings -> Addon Plugins , and check if the proper GPU type has been automatically enabled and detected. Currently, the cluster will automatically enable GPU and set the GPU type as Iluvatar .

                                        2. Deploy a workload. Click Clusters -> Workloads and deploy a workload using the image. After selecting the type as (Iluvatar) , configure the GPU resources used by the application:

                                          • Physical Card Count (iluvatar.ai/vcuda-core): Indicates the number of physical cards that the current pod needs to mount. The input value must be an integer and less than or equal to the number of cards on the host machine.

                                          • Memory Usage (iluvatar.ai/vcuda-memory): Indicates the amount of GPU memory occupied by each card. The value is in MB, with a minimum value of 1 and a maximum value equal to the entire memory of the card.

                                          If there are any issues with the configuration values, scheduling failures or resource allocation failures may occur.

                                        "},{"location":"en/admin/kpanda/gpu/Iluvatar_usage.html#configuration-via-yaml","title":"Configuration via YAML","text":"

                                        To request GPU resources for a workload, add the iluvatar.ai/vcuda-core: 1 and iluvatar.ai/vcuda-memory: 200 to the requests and limits. These parameters configure the application to use the physical card resources.

                                        apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-iluvatar-gpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-iluvatar-gpu-demo\n  template:\n    metadata:\n      labels:\n        app: full-iluvatar-gpu-demo\n    spec:\n      containers:\n      - image: nginx:perl\n        name: container-0\n        resources:\n          limits:\n            cpu: 250m\n            iluvatar.ai/vcuda-core: '1'\n            iluvatar.ai/vcuda-memory: '200'\n            memory: 512Mi\n          requests:\n            cpu: 250m\n            memory: 512Mi\n      imagePullSecrets:\n      - name: default-secret\n
                                        "},{"location":"en/admin/kpanda/gpu/dynamic-regulation.html","title":"GPU Scheduling Configuration (Binpack and Spread)","text":"

                                        This page introduces how to reduce GPU resource fragmentation and prevent single points of failure through Binpack and Spread when using NVIDIA vGPU, achieving advanced scheduling for vGPU. The AI platform platform provides Binpack and Spread scheduling policies across two dimensions: clusters and workloads, meeting different usage requirements in various scenarios.

                                        "},{"location":"en/admin/kpanda/gpu/dynamic-regulation.html#prerequisites","title":"Prerequisites","text":"
                                        • GPU devices are correctly installed on the cluster nodes.
                                        • The gpu-operator component and Nvidia-vgpu component are correctly installed in the cluster.
                                        • The NVIDIA-vGPU type exists in the GPU mode in the node list in the cluster.
                                        "},{"location":"en/admin/kpanda/gpu/dynamic-regulation.html#use-cases","title":"Use Cases","text":"
                                        • Scheduling policy based on GPU dimension

                                          • Binpack: Prioritizes using the same GPU on a node, suitable for increasing GPU utilization and reducing resource fragmentation.
                                          • Spread: Multiple Pods are distributed across different GPUs on nodes, suitable for high availability scenarios to avoid single card failures.
                                        • Scheduling policy based on node dimension

                                          • Binpack: Multiple Pods prioritize using the same node, suitable for increasing GPU utilization and reducing resource fragmentation.
                                          • Spread: Multiple Pods are distributed across different nodes, suitable for high availability scenarios to avoid single node failures.
                                        "},{"location":"en/admin/kpanda/gpu/dynamic-regulation.html#use-binpack-and-spread-at-cluster-level","title":"Use Binpack and Spread at Cluster-Level","text":"

                                        Note

                                        By default, workloads will follow the cluster-level Binpack and Spread. If a workload sets its own Binpack and Spread scheduling policies that differ from the cluster, the workload will prioritize its own scheduling policy.

                                        1. On the Clusters page, select the cluster for which you want to adjust the Binpack and Spread scheduling policies. Click the \u2507 icon on the right and select GPU Scheduling Configuration from the dropdown list.

                                        2. Adjust the GPU scheduling configuration according to your business scenario, and click OK to save.

                                        "},{"location":"en/admin/kpanda/gpu/dynamic-regulation.html#use-binpack-and-spread-at-workload-level","title":"Use Binpack and Spread at Workload-Level","text":"

                                        Note

                                        When the Binpack and Spread scheduling policies at the workload level conflict with the cluster-level configuration, the workload-level configuration takes precedence.

                                        Follow the steps below to create a deployment using an image and configure Binpack and Spread scheduling policies within the workload.

                                        1. Click Clusters in the left navigation bar, then click the name of the target cluster to enter the Cluster Details page.

                                        2. On the Cluster Details page, click Workloads -> Deployments in the left navigation bar, then click the Create by Image button in the upper right corner of the page.

                                        3. Sequentially fill in the Basic Information, Container Settings, and in the Container Configuration section, enable GPU configuration, selecting the GPU type as NVIDIA vGPU. Click Advanced Settings, enable the Binpack / Spread scheduling policy, and adjust the GPU scheduling configuration according to the business scenario. After configuration, click Next to proceed to Service Settings and Advanced Settings. Finally, click OK at the bottom right of the page to complete the creation.

                                        "},{"location":"en/admin/kpanda/gpu/gpu_matrix.html","title":"GPU Support Matrix","text":"

                                        This page explains the matrix of supported GPUs and operating systems for AI platform.

                                        "},{"location":"en/admin/kpanda/gpu/gpu_matrix.html#nvidia-gpu","title":"NVIDIA GPU","text":"GPU Manufacturer and Type Supported GPU Models Compatible Operating System (Online) Recommended Kernel Recommended Operating System and Kernel Installation Documentation NVIDIA GPU (Full Card/vGPU)
                                        • NVIDIA Fermi (2.1) Architecture:
                                        • NVIDIA GeForce 400 Series
                                        • NVIDIA Quadro 4000 Series
                                        • NVIDIA Tesla 20 Series
                                        • NVIDIA Ampere Architecture Series (A100; A800; H100)
                                        CentOS 7
                                        • Kernel 3.10.0-123 ~ 3.10.0-1160
                                        • Kernel Reference Document
                                        • Recommended Operating System with Proper Kernel Version
                                        Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 Offline Installation with GPU Operator CentOS 8 Kernel 4.18.0-80 ~ 4.18.0-348 Ubuntu 20.04 Kernel 5.4 Ubuntu 22.04 Kernel 5.19 RHEL 7 Kernel 3.10.0-123 ~ 3.10.0-1160 RHEL 8 Kernel 4.18.0-80 ~ 4.18.0-348 NVIDIA MIG
                                        • Ampere Architecture Series:
                                        • A100
                                        • A800
                                        • H100
                                        CentOS 7 Kernel 3.10.0-123 ~ 3.10.0-1160 Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 Offline Installation with GPU Operator CentOS 8 Kernel 4.18.0-80 ~ 4.18.0-348 Ubuntu 20.04 Kernel 5.4 Ubuntu 22.04 Kernel 5.19 RHEL 7 Kernel 3.10.0-123 ~ 3.10.0-1160 RHEL 8 Kernel 4.18.0-80 ~ 4.18.0-348"},{"location":"en/admin/kpanda/gpu/gpu_matrix.html#ascend-npu","title":"Ascend NPU","text":"GPU Manufacturer and Type Supported NPU Models Compatible Operating System (Online) Recommended Kernel Recommended Operating System and Kernel Installation Documentation Ascend (Ascend 310)
                                        • Ascend 310;
                                        • Ascend 310P;
                                        Ubuntu 20.04 Details refer to: Kernel Version Requirements Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 300 and 310P Driver Documentation CentOS 7.6 CentOS 8.2 KylinV10SP1 Operating System openEuler Operating System Ascend (Ascend 910P) Ascend 910 Ubuntu 20.04 Details refer to: Kernel Version Requirements Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 910 Driver Documentation CentOS 7.6 CentOS 8.2 KylinV10SP1 Operating System openEuler Operating System"},{"location":"en/admin/kpanda/gpu/gpu_matrix.html#iluvatar-gpu","title":"Iluvatar GPU","text":"GPU Manufacturer and Type Supported GPU Models Compatible Operating System (Online) Recommended Kernel Recommended Operating System and Kernel Installation Documentation Iluvatar (Iluvatar vGPU)
                                        • BI100;
                                        • MR100;
                                        CentOS 7
                                        • Kernel 3.10.0-957.el7.x86_64 ~ 3.10.0-1160.42.2.el7.x86_64
                                        Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 Coming Soon CentOS 8
                                        • Kernel 4.18.0-80.el8.x86_64 ~ 4.18.0-305.19.1.el8_4.x86_64
                                        Ubuntu 20.04
                                        • Kernel 4.15.0-20-generic ~ 4.15.0-160-generic
                                        • Kernel 5.4.0-26-generic ~ 5.4.0-89-generic
                                        • Kernel 5.8.0-23-generic ~ 5.8.0-63-generic
                                        Ubuntu 21.04
                                        • Kernel 4.15.0-20-generic ~ 4.15.0-160-generic
                                        • Kernel 5.4.0-26-generic ~ 5.4.0-89-generic
                                        • Kernel 5.8.0-23-generic ~ 5.8.0-63-generic
                                        openEuler 22.03 LTS
                                        • Kernel version >= 5.1 and <= 5.10
                                        "},{"location":"en/admin/kpanda/gpu/gpu_scheduler_config.html","title":"GPU Scheduling Configuration","text":"

                                        This document mainly introduces the configuration of GPU scheduling, which can implement advanced scheduling policies. Currently, the primary implementation is the vgpu scheduling policy.

                                        "},{"location":"en/admin/kpanda/gpu/gpu_scheduler_config.html#vgpu-resource-scheduling-configuration","title":"vGPU Resource Scheduling Configuration","text":"

                                        vGPU provides two policies for resource usage: binpack and spread. These correspond to node-level and GPU-level dimensions, respectively. The use case is whether you want to distribute workloads more sparsely across different nodes and GPUs or concentrate them on the same node and GPU, thereby making resource utilization more efficient and reducing resource fragmentation.

                                        You can modify the scheduling policy in your cluster by following these steps:

                                        1. Go to the cluster management list in the container management interface.
                                        2. Click the settings button ... next to the cluster.
                                        3. Click GPU Scheduling Configuration.
                                        4. Toggle the scheduling policy between node-level and GPU-level. By default, the node-level policy is binpack, and the GPU-level policy is spread.

                                        The above steps modify the cluster-level scheduling policy. Users can also specify their own scheduling policy at the workload level to change the scheduling results. Below is an example of modifying the scheduling policy at the workload level:

                                        apiVersion: v1\nkind: Pod\nmetadata:\n  name: gpu-pod\n  annotations:\n    hami.io/node-scheduler-policy: \"binpack\"\n    hami.io/gpu-scheduler-policy: \"binpack\"\nspec:\n  containers:\n    - name: ubuntu-container\n      image: ubuntu:18.04\n      command: [\"bash\", \"-c\", \"sleep 86400\"]\n      resources:\n        limits:\n          nvidia.com/gpu: 1\n          nvidia.com/gpumem: 3000\n          nvidia.com/gpucores: 30\n

                                        In this example, both the node- and GPU-level scheduling policies are set to binpack. This ensures that the workload is scheduled to maximize resource utilization and reduce fragmentation.

                                        "},{"location":"en/admin/kpanda/gpu/vgpu_quota.html","title":"GPU Quota Management","text":"

                                        This section describes how to use vGPU capabilities on the AI platform platform.

                                        "},{"location":"en/admin/kpanda/gpu/vgpu_quota.html#prerequisites","title":"Prerequisites","text":"

                                        The proper GPU driver (NVIDIA GPU, NVIDIA MIG, Iluvatar, Ascend) has been deployed on the current cluster either through an Operator or manually.

                                        "},{"location":"en/admin/kpanda/gpu/vgpu_quota.html#procedure","title":"Procedure","text":"

                                        Follow these steps to manage GPU quotas in AI platform:

                                        1. Go to Namespaces and click Quota Management to configure the GPU resources that can be used by a specific namespace.

                                        2. The currently supported card types for quota management in a namespace are: NVIDIA vGPU, NVIDIA MIG, Iluvatar, and Ascend.

                                        3. NVIDIA vGPU Quota Management: Configure the specific quota that can be used. This will create a ResourcesQuota CR.

                                          - Physical Card Count (nvidia.com/vgpu): Indicates the number of physical cards that the current pod needs to mount. The input value must be an integer and **less than or equal to** the number of cards on the host machine.\n- GPU Core Count (nvidia.com/gpucores): Indicates the GPU compute power occupied by each card. The value ranges from 0 to 100. If configured as 0, it is considered not to enforce isolation. If configured as 100, it is considered to exclusively occupy the entire card.\n- GPU Memory Usage (nvidia.com/gpumem): Indicates the amount of GPU memory occupied by each card. The value is in MB, with a minimum value of 1 and a maximum value equal to the entire memory of the card.\n
                                        "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html","title":"Installation of Ascend NPU Components","text":"

                                        This chapter provides installation guidance for Ascend NPU drivers, Device Plugin, NPU-Exporter, and other components.

                                        "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html#prerequisites","title":"Prerequisites","text":"
                                        1. Before installation, confirm the supported NPU models. For details, refer to the Ascend NPU Matrix.
                                        2. Ensure that the kernel version required for the proper NPU model is compatible. For more details, refer to the Ascend NPU Matrix.
                                        3. Prepare the basic Kubernetes environment.
                                        "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html#installation-steps","title":"Installation Steps","text":"

                                        Before using NPU resources, you need to complete the firmware installation, NPU driver installation, Docker Runtime installation, user creation, log directory creation, and NPU Device Plugin installation. Refer to the following steps for details.

                                        "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html#install-firmware","title":"Install Firmware","text":"
                                        1. Confirm that the kernel version is within the range proper to the \"binary installation\" method, and then you can directly install the NPU driver firmware.
                                        2. For firmware and driver downloads, refer to: Firmware Download Link
                                        3. For firmware installation, refer to: Install NPU Driver Firmware
                                        "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html#install-npu-driver","title":"Install NPU Driver","text":"
                                        1. If the driver is not installed, refer to the official Ascend documentation for installation. For example, for Ascend910, refer to: 910 Driver Installation Document.
                                        2. Run the command npu-smi info, and if the NPU information is returned normally, it indicates that the NPU driver and firmware are ready.
                                        "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html#install-docker-runtime","title":"Install Docker Runtime","text":"
                                        1. Download Ascend Docker Runtime

                                          Community edition download link: https://www.hiascend.com/zh/software/mindx-dl/community

                                          wget -c https://mindx.obs.cn-south-1.myhuaweicloud.com/OpenSource/MindX/MindX%205.0.RC2/MindX%20DL%205.0.RC2/Ascend-docker-runtime_5.0.RC2_linux-x86_64.run\n

                                          Install to the specified path by executing the following two commands in order, with parameters specifying the installation path:

                                          chmod u+x Ascend-docker-runtime_5.0.RC2_linux-x86_64.run \n./Ascend-docker-runtime_{version}_linux-{arch}.run --install --install-path=<path>\n
                                        2. Modify the containerd configuration file

                                          If containerd has no default configuration file, execute the following three commands in order to create the configuration file:

                                          mkdir /etc/containerd \ncontainerd config default > /etc/containerd/config.toml \nvim /etc/containerd/config.toml\n

                                          If containerd has a configuration file:

                                          vim /etc/containerd/config.toml\n

                                          Modify the runtime installation path according to the actual situation, mainly modifying the runtime field:

                                          ... \n[plugins.\"io.containerd.monitor.v1.cgroups\"]\n   no_prometheus = false  \n[plugins.\"io.containerd.runtime.v1.linux\"]\n   shim = \"containerd-shim\"\n   runtime = \"/usr/local/Ascend/Ascend-Docker-Runtime/ascend-docker-runtime\"\n   runtime_root = \"\"\n   no_shim = false\n   shim_debug = false\n [plugins.\"io.containerd.runtime.v2.task\"]\n   platforms = [\"linux/amd64\"]\n...\n

                                          Execute the following command to restart containerd:

                                          systemctl restart containerd\n
                                        "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html#create-a-user","title":"Create a User","text":"

                                        Execute the following commands on the node where the components are installed to create a user.

                                        # Ubuntu operating system\nuseradd -d /home/hwMindX -u 9000 -m -s /usr/sbin/nologin hwMindX\nusermod -a -G HwHiAiUser hwMindX\n# CentOS operating system\nuseradd -d /home/hwMindX -u 9000 -m -s /sbin/nologin hwMindX\nusermod -a -G HwHiAiUser hwMindX\n
                                        "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html#create-log-directory","title":"Create Log Directory","text":"

                                        Create the parent directory for component logs and the log directories for each component on the proper node, and set the appropriate owner and permissions for the directories. Execute the following command to create the parent directory for component logs.

                                        mkdir -m 755 /var/log/mindx-dl\nchown root:root /var/log/mindx-dl\n

                                        Execute the following command to create the Device Plugin component log directory.

                                        mkdir -m 750 /var/log/mindx-dl/devicePlugin\nchown root:root /var/log/mindx-dl/devicePlugin\n

                                        Note

                                        Please create the proper log directory for each required component. In this example, only the Device Plugin component is needed. For other component requirements, refer to the official documentation

                                        "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html#create-node-labels","title":"Create Node Labels","text":"

                                        Refer to the following commands to create labels on the proper nodes:

                                        # Create this label on computing nodes where the driver is installed\nkubectl label node {nodename} huawei.com.ascend/Driver=installed\nkubectl label node {nodename} node-role.kubernetes.io/worker=worker\nkubectl label node {nodename} workerselector=dls-worker-node\nkubectl label node {nodename} host-arch=huawei-arm // or host-arch=huawei-x86, select according to the actual situation\nkubectl label node {nodename} accelerator=huawei-Ascend910 // select according to the actual situation\n# Create this label on control nodes\nkubectl label node {nodename} masterselector=dls-master-node\n
                                        "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html#install-device-plugin-and-npuexporter","title":"Install Device Plugin and NpuExporter","text":"

                                        Functional module path: Container Management -> Cluster, click the name of the target cluster, then click Helm Apps -> Helm Charts from the left navigation bar, and search for ascend-mindxdl.

                                        • DevicePlugin: Provides a general device plugin mechanism and standard device API interface for Kubernetes to use devices. It is recommended to use the default image and version.
                                        • NpuExporter: Based on the Prometheus/Telegraf ecosystem, this component provides interfaces to help users monitor the Ascend series AI processors and container-level allocation status. It is recommended to use the default image and version.
                                        • ServiceMonitor: Disabled by default. If enabled, you can view NPU-related monitoring in the observability module. To enable, ensure that the insight-agent is installed and running, otherwise, the ascend-mindxdl installation will fail.
                                        • isVirtualMachine: Disabled by default. If the NPU node is a virtual machine scenario, enable the isVirtualMachine parameter.

                                        After a successful installation, two components will appear under the proper namespace, as shown below:

                                        At the same time, the proper NPU information will also appear on the node information:

                                        Once everything is ready, you can select the proper NPU device when creating a workload through the page, as shown below:

                                        Note

                                        For detailed information of how to use, refer to Using Ascend (Ascend) NPU.

                                        "},{"location":"en/admin/kpanda/gpu/ascend/vnpu.html","title":"Enable Ascend Virtualization","text":"

                                        Ascend virtualization is divided into dynamic virtualization and static virtualization. This document describes how to enable and use Ascend static virtualization capabilities.

                                        "},{"location":"en/admin/kpanda/gpu/ascend/vnpu.html#prerequisites","title":"Prerequisites","text":"
                                        • Setup of Kubernetes cluster environment.
                                        • The current NPU node has the Ascend driver installed.
                                        • The current NPU node has the Ascend-Docker-Runtime component installed.
                                        • The NPU MindX DL suite is installed on the current cluster.
                                        • Supported NPU models:

                                          • Ascend 310P, verified
                                          • Ascend 910b (20 cores), verified
                                          • Ascend 910 (32 cores), officially supported but not verified
                                          • Ascend 910 (30 cores), officially supported but not verified

                                          For more details, refer to the official virtualization hardware documentation.

                                        Refer to the Ascend NPU Component Installation Documentation for the basic environment setup.

                                        "},{"location":"en/admin/kpanda/gpu/ascend/vnpu.html#enable-virtualization-capabilities","title":"Enable Virtualization Capabilities","text":"

                                        To enable virtualization capabilities, you need to manually modify the startup parameters of the ascend-device-plugin-daemonset component. Refer to the following command:

                                        - device-plugin -useAscendDocker=true -volcanoType=false -presetVirtualDevice=true\n- logFile=/var/log/mindx-dl/devicePlugin/devicePlugin.log -logLevel=0\n
                                        "},{"location":"en/admin/kpanda/gpu/ascend/vnpu.html#split-vnpu-instances","title":"Split VNPU Instances","text":"

                                        Static virtualization requires manually splitting VNPU instances. Refer to the following command:

                                        npu-smi set -t create-vnpu -i 13 -c 0 -f vir02\n
                                        • i refers to the card id.
                                        • c refers to the chip id.
                                        • vir02 refers to the split specification template.

                                        Card id and chip id can be queried using npu-smi info. The split specifications can be found in the Ascend official templates.

                                        After splitting the instance, you can query the split results using the following command:

                                        npu-smi info -t info-vnpu -i 13 -c 0\n

                                        The query result is as follows:

                                        "},{"location":"en/admin/kpanda/gpu/ascend/vnpu.html#restart-ascend-device-plugin-daemonset","title":"Restart ascend-device-plugin-daemonset","text":"

                                        After splitting the instance, manually restart the device-plugin pod, then use the kubectl describe command to check the resources of the registered node:

                                        kubectl describe node {{nodename}}\n

                                        "},{"location":"en/admin/kpanda/gpu/ascend/vnpu.html#how-to-use-the-device","title":"How to Use the Device","text":"

                                        When creating an application, specify the resource key as shown in the following YAML:

                                        ......\nresources:\n  requests:\n    huawei.com/Ascend310P-2c: 1\n  limits:\n    huawei.com/Ascend310P-2c: 1\n......\n
                                        "},{"location":"en/admin/kpanda/gpu/metax/usemetax.html","title":"MetaX GPU Component Installation and Usage","text":"

                                        This chapter provides installation guidance for MetaX's gpu-extensions, gpu-operator, and other components, as well as usage methods for both the full GPU card and vGPU modes.

                                        "},{"location":"en/admin/kpanda/gpu/metax/usemetax.html#prerequisites","title":"Prerequisites","text":"
                                        1. The required tar package has been downloaded and installed from the MetaX Software Center. This article uses metax-gpu-k8s-package.0.7.10.tar.gz as an example.
                                        2. Prepare the basic Kubernetes environment.
                                        "},{"location":"en/admin/kpanda/gpu/metax/usemetax.html#component-introduction","title":"Component Introduction","text":"

                                        Metax provides two helm-chart packages: metax-extensions and gpu-operator. Depending on the usage scenario, different components can be selected for installation.

                                        1. Metax-extensions: Includes two components, gpu-device and gpu-label. When using the Metax-extensions solution, the user's application container image needs to be built based on the MXMACA\u00ae base image. Moreover, Metax-extensions is only suitable for scenarios using the full GPU card.
                                        2. gpu-operator: Includes components such as gpu-device, gpu-label, driver-manager, container-runtime, and operator-controller. When using the gpu-operator solution, users can choose to create application container images that do not include the MXMACA\u00ae SDK. The gpu-operator is suitable for both full GPU card and vGPU scenarios.
                                        "},{"location":"en/admin/kpanda/gpu/metax/usemetax.html#operation-steps","title":"Operation Steps","text":"
                                        1. Extract the following from the /home/metax/metax-docs/k8s/metax-gpu-k8s-package.0.7.10.tar.gz file:

                                          • deploy-gpu-extensions.yaml # Deployment YAML
                                          • metax-gpu-extensions-0.7.10.tgz, metax-operator-0.7.10.tgz # Helm chart files
                                          • metax-k8s-images.0.7.10.run # Offline image
                                        2. Check if the system has the driver installed:

                                          $ lsmod | grep metax \nmetax 1605632 0 \nttm 86016 3 drm_vram_helper,metax,drm_ttm_helper \ndrm 618496 7 drm_kms_helper,drm_vram_helper,ast,metax,drm_ttm_helper,ttm\n
                                          • If no content is displayed, it indicates that the software package has not been installed. If content is displayed, it indicates that the software package has been installed.
                                          • When using metax-operator, it is not recommended to pre-install the MXMACA kernel driver on worker nodes; if it has already been installed, there is no need to uninstall it.
                                        3. Install the driver.

                                        "},{"location":"en/admin/kpanda/gpu/metax/usemetax.html#gpu-extensions","title":"gpu-extensions","text":"
                                        1. Push the image:

                                          tar -xf metax-gpu-k8s-package.0.7.10.tar.gz\n./metax-k8s-images.0.7.10.run push {registry}/metax\n
                                        2. Push the Helm Chart:

                                          helm plugin install https://github.com/chartmuseum/helm-push\nhelm repo add --username rootuser --password rootpass123  metax http://172.16.16.5:8081\nhelm cm-push metax-operator-0.7.10.tgz metax\nhelm cm-push metax-gpu-extensions-0.7.10.tgz metax\n
                                        3. Install metax-gpu-extensions on the AI computing platform.

                                          After successful deployment, resources can be viewed on the node.

                                        4. After successful modification, you can see the label with Metax GPU on the node.

                                        "},{"location":"en/admin/kpanda/gpu/metax/usemetax.html#gpu-operator","title":"gpu-operator","text":"

                                        Known issues when installing gpu-operator:

                                        1. The images for the components metax-operator, gpu-label, gpu-device, and container-runtime must have the amd64 suffix.

                                        2. The image for the metax-maca component is not included in the metax-k8s-images.0.7.13.run package and needs to be separately downloaded, such as maca-mxc500-2.23.0.23-ubuntu20.04-x86_64.tar.xz. After loading it, the image for the metax-maca component needs to be modified again.

                                        3. The image for the metax-driver component needs to be downloaded from https://pub-docstore.metax-tech.com:7001 as the k8s-driver-image.2.23.0.25.run file, and then execute the command k8s-driver-image.2.23.0.25.run push {registry}/metax to push the image to the image repository. After pushing, modify the image address for the metax-driver component.

                                        "},{"location":"en/admin/kpanda/gpu/metax/usemetax.html#using-gpu","title":"Using GPU","text":"

                                        After installation, you can use MetaX GPU in workloads. Note that after enabling the GPU, you need to select the GPU type as Metax GPU.

                                        Enter the container and execute mx-smi to view the GPU usage.

                                        "},{"location":"en/admin/kpanda/gpu/mlu/use-mlu.html","title":"Using Cambricon GPU","text":"

                                        This article introduces how to use Cambricon GPU in the SuanFeng AI computing platform.

                                        "},{"location":"en/admin/kpanda/gpu/mlu/use-mlu.html#prerequisites","title":"Prerequisites","text":"
                                        • The SuanFeng AI computing platform's container management platform has been deployed and is running normally.
                                        • The container management module has either integrated with a Kubernetes cluster or created a Kubernetes cluster, and is able to access the cluster's UI interface.
                                        • The current cluster has installed the Cambricon firmware, drivers, and DevicePlugin components. For installation details, please refer to the official documentation:
                                          • Driver Firmware Installation
                                          • DevicePlugin Installation

                                        When installing DevicePlugin, please disable the --enable-device-type parameter; otherwise, the SuanFeng AI computing platform will not be able to correctly recognize the Cambricon GPU.

                                        "},{"location":"en/admin/kpanda/gpu/mlu/use-mlu.html#introduction-to-cambricon-gpu-modes","title":"Introduction to Cambricon GPU Modes","text":"

                                        Cambricon GPUs have the following modes:

                                        • Full Card Mode: Register the Cambricon GPU as a whole card for use in the cluster.
                                        • Share Mode: Allows one Cambricon GPU to be shared among multiple Pods, with the number of shareable containers set by the virtualization-num parameter.
                                        • Dynamic SMLU Mode: Further refines resource allocation, allowing control over the size of memory and computing power allocated to containers.
                                        • MIM Mode: Allows the Cambricon GPU to be divided into multiple GPUs of fixed specifications for use.
                                        "},{"location":"en/admin/kpanda/gpu/mlu/use-mlu.html#using-cambricon-in-suanfeng-ai-computing-platform","title":"Using Cambricon in SuanFeng AI Computing Platform","text":"

                                        Here, we take the Dynamic SMLU mode as an example:

                                        1. After correctly installing the DevicePlugin and other components, click the proper Cluster -> Cluster Maintenance -> Cluster Settings -> Addon Plugins to check whether the proper GPU type has been automatically enabled and detected.

                                        2. Click the node management page to check if the nodes have correctly recognized the proper GPU type.

                                        3. Deploy workloads. Click the proper Cluster -> Workloads, and deploy workloads using images. After selecting the type (MLU VGPU), you need to configure the GPU resources used by the App:

                                          • GPU Computing Power (cambricon.com/mlu.smlu.vcore): Indicates the percentage of cores the current Pod needs to use.
                                          • GPU Memory (cambricon.com/mlu.smlu.vmemory): Indicates the size of memory the current Pod needs to use, in MB.
                                        "},{"location":"en/admin/kpanda/gpu/mlu/use-mlu.html#using-yaml-configuration","title":"Using YAML Configuration","text":"

                                        Refer to the following YAML file:

                                        apiVersion: v1  \nkind: Pod  \nmetadata:  \n  name: pod1  \nspec:  \n  restartPolicy: OnFailure  \n  containers:  \n    - image: ubuntu:16.04  \n      name: pod1-ctr  \n      command: [\"sleep\"]  \n      args: [\"100000\"]  \n      resources:  \n        limits:  \n          cambricon.com/mlu: \"1\" # use this when device type is not enabled, else delete this line.  \n          #cambricon.com/mlu: \"1\" #uncomment to use when device type is enabled  \n          #cambricon.com/mlu.share: \"1\" #uncomment to use device with env-share mode  \n          #cambricon.com/mlu.mim-2m.8gb: \"1\" #uncomment to use device with mim mode  \n          #cambricon.com/mlu.smlu.vcore: \"100\" #uncomment to use device with mim mode  \n          #cambricon.com/mlu.smlu.vmemory: \"1024\" #uncomment to use device with mim mode\n
                                        "},{"location":"en/admin/kpanda/gpu/nvidia/index.html","title":"NVIDIA GPU Card Usage Modes","text":"

                                        NVIDIA, as a well-known graphics computing provider, offers various software and hardware solutions to enhance computational power. Among them, NVIDIA provides the following three solutions for GPU usage:

                                        "},{"location":"en/admin/kpanda/gpu/nvidia/index.html#full-gpu","title":"Full GPU","text":"

                                        Full GPU refers to allocating the entire NVIDIA GPU to a single user or application. In this configuration, the application can fully occupy all the resources of the GPU and achieve maximum computational performance. Full GPU is suitable for workloads that require a large amount of computational resources and memory, such as deep learning training, scientific computing, etc.

                                        "},{"location":"en/admin/kpanda/gpu/nvidia/index.html#vgpu-virtual-gpu","title":"vGPU (Virtual GPU)","text":"

                                        vGPU is a virtualization technology that allows one physical GPU to be partitioned into multiple virtual GPUs, with each virtual GPU assigned to different virtual machines or users. vGPU enables multiple users to share the same physical GPU and independently use GPU resources in their respective virtual environments. Each virtual GPU can access a certain amount of compute power and memory capacity. vGPU is suitable for virtualized environments and cloud computing scenarios, providing higher resource utilization and flexibility.

                                        "},{"location":"en/admin/kpanda/gpu/nvidia/index.html#mig-multi-instance-gpu","title":"MIG (Multi-Instance GPU)","text":"

                                        MIG is a feature introduced by the NVIDIA Ampere architecture that allows one physical GPU to be divided into multiple physical GPU instances, each of which can be independently allocated to different users or workloads. Each MIG instance has its own compute resources, memory, and PCIe bandwidth, just like an independent virtual GPU. MIG provides finer-grained GPU resource allocation and management and allows dynamic adjustment of the number and size of instances based on demand. MIG is suitable for multi-tenant environments, containerized applications, batch jobs, and other scenarios.

                                        Whether using vGPU in a virtualized environment or MIG on a physical GPU, NVIDIA provides users with more choices and optimized ways to utilize GPU resources. The Suanova container management platform fully supports the above NVIDIA capabilities. Users can easily access the full computational power of NVIDIA GPUs through simple UI operations, thereby improving resource utilization and reducing costs.

                                        • Single Mode: The node only exposes a single type of MIG device on all its GPUs. All GPUs on the node must:
                                          • Be of the same model (e.g., A100-SXM-40GB), with matching MIG profiles only for GPUs of the same model.
                                          • Have MIG configuration enabled, which requires a machine reboot to take effect.
                                          • Create identical GI and CI for exposing \"identical\" MIG devices across all products.
                                        • Mixed Mode: The node exposes mixed MIG device types on all its GPUs. Requesting a specific MIG device type requires the number of compute slices and total memory provided by the device type.
                                          • All GPUs on the node must: Be in the same product line (e.g., A100-SXM-40GB).
                                          • Each GPU can enable or disable MIG individually and freely configure any available mixture of MIG device types.
                                          • The k8s-device-plugin running on the node will:
                                            • Expose any GPUs not in MIG mode using the traditional nvidia.com/gpu resource type.
                                            • Expose individual MIG devices using resource types that follow the pattern nvidia.com/mig-<slice_count>g.<memory_size>gb .

                                        For detailed instructions on enabling these configurations, refer to Offline Installation of GPU Operator.

                                        "},{"location":"en/admin/kpanda/gpu/nvidia/index.html#how-to-use","title":"How to Use","text":"

                                        You can refer to the following links to quickly start using Suanova's management capabilities for NVIDIA GPUs.

                                        • Using Full NVIDIA GPU
                                        • Using NVIDIA vGPU
                                        • Using NVIDIA MIG
                                        "},{"location":"en/admin/kpanda/gpu/nvidia/full_gpu_userguide.html","title":"Using the Whole NVIDIA GPU Card for an Application","text":"

                                        This section describes how to allocate the entire NVIDIA GPU card to a single application on the AI platform platform.

                                        "},{"location":"en/admin/kpanda/gpu/nvidia/full_gpu_userguide.html#prerequisites","title":"Prerequisites","text":"
                                        • AI platform container management platform has been deployed and is running properly.
                                        • The container management module has been connected to a Kubernetes cluster or a Kubernetes cluster has been created, and you can access the UI interface of the cluster.
                                        • GPU Operator has been offline installed and NVIDIA DevicePlugin has been enabled on the current cluster. Refer to Offline Installation of GPU Operator for instructions.
                                        • The GPU card in the current cluster has not undergone any virtualization operations or been occupied by other applications.
                                        "},{"location":"en/admin/kpanda/gpu/nvidia/full_gpu_userguide.html#procedure","title":"Procedure","text":""},{"location":"en/admin/kpanda/gpu/nvidia/full_gpu_userguide.html#configuring-via-the-user-interface","title":"Configuring via the User Interface","text":"
                                        1. Check if the cluster has detected the GPUs. Click Clusters -> Cluster Settings -> Addon Plugins to see if it has automatically enabled and detected the proper GPU types. Currently, the cluster will automatically enable GPU and set the GPU Type as Nvidia GPU .

                                        2. Deploy a workload. Click Clusters -> Workloads , and deploy the workload using the image method. After selecting the type ( Nvidia GPU ), configure the number of physical cards used by the application:

                                          Physical Card Count (nvidia.com/gpu): Indicates the number of physical cards that the current pod needs to mount. The input value must be an integer and less than or equal to the number of cards on the host machine.

                                          If the above value is configured incorrectly, scheduling failures and resource allocation issues may occur.

                                        "},{"location":"en/admin/kpanda/gpu/nvidia/full_gpu_userguide.html#configuring-via-yaml","title":"Configuring via YAML","text":"

                                        To request GPU resources for a workload, add the nvidia.com/gpu: 1 parameter to the resource request and limit configuration in the YAML file. This parameter configures the number of physical cards used by the application.

                                        apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-gpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-gpu-demo\n  template:\n    metadata:\n      labels:\n        app: full-gpu-demo\n    spec:\n      containers:\n      - image: chrstnhntschl/gpu_burn\n        name: container-0\n        resources:\n          requests:\n            cpu: 250m\n            memory: 512Mi\n            nvidia.com/gpu: 1   # Number of GPUs requested\n          limits:\n            cpu: 250m\n            memory: 512Mi\n            nvidia.com/gpu: 1   # Upper limit of GPU usage\n      imagePullSecrets:\n      - name: default-secret\n

                                        Note

                                        When using the nvidia.com/gpu parameter to specify the number of GPUs, the values for requests and limits must be consistent.

                                        "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html","title":"Offline Install gpu-operator","text":"

                                        AI platform comes with pre-installed driver images for the following three operating systems: Ubuntu 22.04, Ubuntu 20.04, and CentOS 7.9. The driver version is 535.104.12. Additionally, it includes the required Toolkit images for each operating system, so users no longer need to manually provide offline toolkit images.

                                        This page demonstrates using AMD architecture with CentOS 7.9 (3.10.0-1160). If you need to deploy on Red Hat 8.4, refer to Uploading Red Hat gpu-operator Offline Image to the Bootstrap Node Repository and Building Offline Yum Source for Red Hat 8.4.

                                        "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#prerequisites","title":"Prerequisites","text":"
                                        • The kernel version of the cluster nodes where the gpu-operator is to be deployed must be completely consistent. The distribution and GPU card model of the nodes must fall within the scope specified in the GPU Support Matrix.
                                        • When installing the gpu-operator, select v23.9.0+2 or above.
                                        "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#steps","title":"Steps","text":"

                                        To install the gpu-operator plugin for your cluster, follow these steps:

                                        1. Log in to the platform and go to Container Management -> Clusters , check cluster eetails.

                                        2. On the Helm Charts page, select All Repositories and search for gpu-operator .

                                        3. Select gpu-operator and click Install .

                                        4. Configure the installation parameters for gpu-operator based on the instructions below to complete the installation.

                                        "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#configure-parameters","title":"Configure parameters","text":"
                                        • systemOS : Select the operating system for the host. The current options are Ubuntu 22.04, Ubuntu 20.04, Centos 7.9, and other. Please choose the correct operating system.
                                        "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#basic-information","title":"Basic information","text":"
                                        • Name : Enter the plugin name
                                        • Namespace : Select the namespace for installing the plugin
                                        • Version: The version of the plugin. Here, we use version v23.9.0+2 as an example.
                                        • Failure Deletion: If the installation fails, it will delete the already installed associated resources. When enabled, Ready Wait will also be enabled by default.
                                        • Ready Wait: When enabled, the application will be marked as successfully installed only when all associated resources are in a ready state.
                                        • Detailed Logs: When enabled, detailed logs of the installation process will be recorded.
                                        "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#advanced-settings","title":"Advanced settings","text":""},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#operator-parameters","title":"Operator parameters","text":"
                                        • InitContainer.image : Configure the CUDA image, recommended default image: nvidia/cuda
                                        • InitContainer.repository : Repository where the CUDA image is located, defaults to nvcr.m.daocloud.io repository
                                        • InitContainer.version : Version of the CUDA image, please use the default parameter
                                        "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#driver-parameters","title":"Driver parameters","text":"
                                        • Driver.enable : Configure whether to deploy the NVIDIA driver on the node, default is enabled. If you have already deployed the NVIDIA driver on the node before using the gpu-operator, please disable this.
                                        • Driver.image : Configure the GPU driver image, recommended default image: nvidia/driver .
                                        • Driver.repository : Repository where the GPU driver image is located, default is nvidia's nvcr.io repository.
                                        • Driver.usePrecompiled : Enable the precompiled mode to install the driver.
                                        • Driver.version : Version of the GPU driver image, use default parameters for offline deployment. Configuration is only required for online installation. Different versions of the Driver image exist for different types of operating systems. For more details, refer to Nvidia GPU Driver Versions. Examples of Driver Version for different operating systems are as follows:

                                          Note

                                          When using the built-in operating system version, there is no need to modify the image version. For other operating system versions, please refer to Uploading Images to the Bootstrap Node Repository. note that there is no need to include the operating system name such as Ubuntu, CentOS, or Red Hat in the version number. If the official image contains an operating system suffix, please manually remove it.

                                          • For Red Hat systems, for example, 525.105.17
                                          • For Ubuntu systems, for example, 535-5.15.0-1043-nvidia
                                          • For CentOS systems, for example, 525.147.05
                                        • Driver.RepoConfig.ConfigMapName : Used to record the name of the offline yum repository configuration file for the gpu-operator. When using the pre-packaged offline bundle, refer to the following documents for different types of operating systems.

                                          • Building CentOS 7.9 Offline Yum Repository
                                          • Building Red Hat 8.4 Offline Yum Repository
                                        "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#toolkit-parameters","title":"Toolkit parameters","text":"

                                        Toolkit.enable : Enabled by default. This component allows containerd/docker to support running containers that require GPUs.

                                        "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#mig-parameters","title":"MIG parameters","text":"

                                        For detailed configuration methods, refer to Enabling MIG Functionality.

                                        MigManager.Config.name : The name of the MIG split configuration file, used to define the MIG (GI, CI) split policy. The default is default-mig-parted-config . For custom parameters, refer to Enabling MIG Functionality.

                                        "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#next-steps","title":"Next Steps","text":"

                                        After completing the configuration and creation of the above parameters:

                                        • If using full-card mode , GPU resources can be used when creating applications.

                                        • If using vGPU mode , after completing the above configuration and creation, proceed to vGPU Addon Installation.

                                        • If using MIG mode and you need to use a specific split specification for individual GPU nodes, otherwise, split according to the default value in MigManager.Config.

                                          • For single mode, add label to nodes as follows:

                                            kubectl label nodes {node} nvidia.com/mig.config=\"all-1g.10gb\" --overwrite\n
                                          • For mixed mode, add label to nodes as follows:

                                            kubectl label nodes {node} nvidia.com/mig.config=\"custom-config\" --overwrite\n

                                          After spliting, applications can use MIG GPU resources.

                                        "},{"location":"en/admin/kpanda/gpu/nvidia/push_image_to_repo.html","title":"Uploading Red Hat GPU Operator Offline Image to Bootstrap Repository","text":"

                                        This guide explains how to upload an offline image to the bootstrap repository using the nvcr.io/nvidia/driver:525.105.17-rhel8.4 offline driver image for Red Hat 8.4 as an example.

                                        "},{"location":"en/admin/kpanda/gpu/nvidia/push_image_to_repo.html#prerequisites","title":"Prerequisites","text":"
                                        1. The bootstrap node and its components are running properly.
                                        2. Prepare a node that has internet access and can access the bootstrap node. Docker should also be installed on this node. You can refer to Installing Docker for installation instructions.
                                        "},{"location":"en/admin/kpanda/gpu/nvidia/push_image_to_repo.html#procedure","title":"Procedure","text":""},{"location":"en/admin/kpanda/gpu/nvidia/push_image_to_repo.html#step-1-obtain-the-offline-image-on-an-internet-connected-node","title":"Step 1: Obtain the Offline Image on an Internet-Connected Node","text":"

                                        Perform the following steps on the internet-connected node:

                                        1. Pull the nvcr.io/nvidia/driver:525.105.17-rhel8.4 offline driver image:

                                          docker pull nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                        2. Once the image is pulled, save it as a compressed archive named nvidia-driver.tar :

                                          docker save nvcr.io/nvidia/driver:525.105.17-rhel8.4 > nvidia-driver.tar\n
                                        3. Copy the compressed image archive nvidia-driver.tar to the bootstrap node:

                                          scp nvidia-driver.tar user@ip:/root\n

                                          For example:

                                          scp nvidia-driver.tar root@10.6.175.10:/root\n
                                        "},{"location":"en/admin/kpanda/gpu/nvidia/push_image_to_repo.html#step-2-push-the-image-to-the-bootstrap-repository","title":"Step 2: Push the Image to the Bootstrap Repository","text":"

                                        Perform the following steps on the bootstrap node:

                                        1. Log in to the bootstrap node and import the compressed image archive nvidia-driver.tar :

                                          docker load -i nvidia-driver.tar\n
                                        2. View the imported image:

                                          docker images -a | grep nvidia\n

                                          Expected output:

                                          nvcr.io/nvidia/driver                 e3ed7dee73e9   1 days ago   1.02GB\n
                                        3. Retag the image to correspond to the target repository in the remote Registry repository:

                                          docker tag <image-name> <registry-url>/<repository-name>:<tag>\n

                                          Replace with the name of the Nvidia image from the previous step, with the address of the Registry service on the bootstrap node, with the name of the repository you want to push the image to, and with the desired tag for the image.

                                          For example:

                                          docker tag nvcr.io/nvidia/driver 10.6.10.5/nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                        4. Push the image to the bootstrap repository:

                                          docker push {ip}/nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                        5. "},{"location":"en/admin/kpanda/gpu/nvidia/push_image_to_repo.html#whats-next","title":"What's Next","text":"

                                          Refer to Building Red Hat 8.4 Offline Yum Source and Offline Installation of GPU Operator to deploy the GPU Operator to your cluster.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html","title":"Offline Install gpu-operator Driver on Ubuntu 22.04","text":"

                                          Prerequisite: Installed gpu-operator v23.9.0+2 or higher versions

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html#prepare-offline-image","title":"Prepare Offline Image","text":"
                                          1. Check the kernel version

                                            $ uname -r\n5.15.0-78-generic\n
                                          2. Check the GPU Driver image version applicable to your kernel, at https://catalog.ngc.nvidia.com/orgs/nvidia/containers/driver/tags. Use the kernel to query the image version and save the image using ctr export.

                                            ctr i pull nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04\nctr i export --all-platforms driver.tar.gz nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 \n
                                          3. Import the image into the cluster's container registry

                                            ctr i import driver.tar.gz\nctr i tag nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 {your_registry}/nvcr.m.daocloud.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04\nctr i push {your_registry}/nvcr.m.daocloud.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 --skip-verify=true\n
                                          "},{"location":"en/admin/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html#install-the-driver","title":"Install the Driver","text":"
                                          1. Install the gpu-operator addon and set driver.usePrecompiled=true
                                          2. Set driver.version=535, note that it should be 535, not 535.104.12
                                          "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html","title":"Build CentOS 7.9 Offline Yum Source","text":"

                                          The AI platform comes with a pre-installed GPU Operator offline package for CentOS 7.9 with kernel version 3.10.0-1160. or other OS types or kernel versions, users need to manually build an offline yum source.

                                          This guide explains how to build an offline yum source for CentOS 7.9 with a specific kernel version and use it when installing the GPU Operator by specifying the RepoConfig.ConfigMapName parameter.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#prerequisites","title":"Prerequisites","text":"
                                          1. The user has already installed the v0.12.0 or later version of the addon offline package on the platform.
                                          2. Prepare a file server that is accessible from the cluster network, such as Nginx or MinIO.
                                          3. Prepare a node that has internet access, can access the cluster where the GPU Operator will be deployed, and can access the file server. Docker should also be installed on this node. You can refer to Installing Docker for installation instructions.
                                          "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#procedure","title":"Procedure","text":"

                                          This guide uses CentOS 7.9 with kernel version 3.10.0-1160.95.1.el7.x86_64 as an example to explain how to upgrade the pre-installed GPU Operator offline package's yum source.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#check-os-and-kernel-versions-of-cluster-nodes","title":"Check OS and Kernel Versions of Cluster Nodes","text":"

                                          Run the following commands on both the control node of the Global cluster and the node where GPU Operator will be deployed. If the OS and kernel versions of the two nodes are consistent, there is no need to build a yum source. You can directly refer to the Offline Installation of GPU Operator document for installation. If the OS or kernel versions of the two nodes are not consistent, please proceed to the next step.

                                          1. Run the following command to view the distribution name and version of the node where GPU Operator will be deployed in the cluster.

                                            cat /etc/redhat-release\n

                                            Expected output:

                                            CentOS Linux release 7.9 (Core)\n

                                            The output shows the current node's OS version as CentOS 7.9.

                                          2. Run the following command to view the kernel version of the node where GPU Operator will be deployed in the cluster.

                                            uname -a\n

                                            Expected output:

                                            Linux localhost.localdomain 3.10.0-1160.95.1.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux\n

                                            The output shows the current node's kernel version as 3.10.0-1160.el7.x86_64.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#create-the-offline-yum-source","title":"Create the Offline Yum Source","text":"

                                          Perform the following steps on a node that has internet access and can access the file server:

                                          1. Create a script file named yum.sh by running the following command:

                                            vi yum.sh\n

                                            Then press the i key to enter insert mode and enter the following content:

                                            export TARGET_KERNEL_VERSION=$1\n\ncat >> run.sh << \\EOF\n#! /bin/bash\necho \"start install kernel repo\"\necho ${KERNEL_VERSION}\nmkdir centos-base\n\nif [ \"$OS\" -eq 7 ]; then\n    yum install --downloadonly --downloaddir=./centos-base perl\n    yum install --downloadonly --downloaddir=./centos-base elfutils-libelf.x86_64\n    yum install --downloadonly --downloaddir=./redhat-base elfutils-libelf-devel.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-headers-${KERNEL_VERSION}.el7.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-devel-${KERNEL_VERSION}.el7.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-${KERNEL_VERSION}.el7.x86_64\n    yum install  -y --downloadonly --downloaddir=./centos-base groff-base\nelif [ \"$OS\" -eq 8 ]; then\n    yum install --downloadonly --downloaddir=./centos-base perl\n    yum install --downloadonly --downloaddir=./centos-base elfutils-libelf.x86_64\n    yum install --downloadonly --downloaddir=./redhat-base elfutils-libelf-devel.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-headers-${KERNEL_VERSION}.el8.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-devel-${KERNEL_VERSION}.el8.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-${KERNEL_VERSION}.el8.x86_64\n    yum install  -y --downloadonly --downloaddir=./centos-base groff-base\nelse\n    echo \"Error os version\"\nfi\n\ncreaterepo centos-base/\nls -lh centos-base/\ntar -zcf centos-base.tar.gz centos-base/\necho \"end install kernel repo\"\nEOF\n\ncat >> Dockerfile << EOF\nFROM centos:7\nENV KERNEL_VERSION=\"\"\nENV OS=7\nRUN yum install -y createrepo\nCOPY run.sh .\nENTRYPOINT [\"/bin/bash\",\"run.sh\"]\nEOF\n\ndocker build -t test:v1 -f Dockerfile .\ndocker run -e KERNEL_VERSION=$TARGET_KERNEL_VERSION --name centos7.9 test:v1\ndocker cp centos7.9:/centos-base.tar.gz .\ntar -xzf centos-base.tar.gz\n

                                            Press the Esc key to exit insert mode, then enter :wq to save and exit.

                                          2. Run the yum.sh file:

                                            bash -x yum.sh TARGET_KERNEL_VERSION\n

                                            The TARGET_KERNEL_VERSION parameter is used to specify the kernel version of the cluster nodes.

                                            Note: You don't need to include the distribution identifier (e.g., __ .el7.x86_64__ ). For example:

                                            bash -x yum.sh 3.10.0-1160.95.1\n

                                          Now you have generated an offline yum source, centos-base , for the kernel version 3.10.0-1160.95.1.el7.x86_64 .

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#upload-the-offline-yum-source-to-the-file-server","title":"Upload the Offline Yum Source to the File Server","text":"

                                          Perform the following steps on a node that has internet access and can access the file server. This step is used to upload the generated yum source from the previous step to a file server that can be accessed by the cluster where the GPU Operator will be deployed. The file server can be Nginx, MinIO, or any other file server that supports the HTTP protocol.

                                          In this example, we will use the built-in MinIO as the file server. The MinIO details are as follows:

                                          • Access URL: http://10.5.14.200:9000 (usually {bootstrap-node IP} + {port-9000} )
                                          • Login username: rootuser
                                          • Login password: rootpass123

                                          • Run the following command in the current directory of the node to establish a connection between the node's local mc command-line tool and the MinIO server:

                                            mc config host add minio http://10.5.14.200:9000 rootuser rootpass123\n

                                            The expected output should resemble the following:

                                            Added __minio__ successfully.\n

                                            mc is the command-line tool provided by MinIO for interacting with the MinIO server. For more details, refer to the MinIO Client documentation.

                                          • In the current directory of the node, create a bucket named centos-base :

                                            mc mb -p minio/centos-base\n

                                            The expected output should resemble the following:

                                            Bucket created successfully __minio/centos-base__ .\n
                                          • Set the access policy of the bucket centos-base to allow public download. This will enable access during the installation of the GPU Operator:

                                            mc anonymous set download minio/centos-base\n

                                            The expected output should resemble the following:

                                            Access permission for __minio/centos-base__ is set to __download__ \n
                                          • In the current directory of the node, copy the generated centos-base offline yum source to the minio/centos-base bucket on the MinIO server:

                                            mc cp centos-base minio/centos-base --recursive\n
                                          "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#create-a-configmap-to-store-the-yum-source-info-in-the-cluster","title":"Create a ConfigMap to Store the Yum Source Info in the Cluster","text":"

                                          Perform the following steps on the control node of the cluster where the GPU Operator will be deployed.

                                          1. Run the following command to create a file named CentOS-Base.repo that specifies the configmap for the yum source storage:

                                            # The file name must be CentOS-Base.repo, otherwise it cannot be recognized during the installation of the GPU Operator\ncat > CentOS-Base.repo << EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # The file server address where the yum source is placed in step 3\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # The file server address where the yum source is placed in step 3\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                          2. Based on the created CentOS-Base.repo file, create a configmap named local-repo-config in the gpu-operator namespace:

                                            kubectl create configmap local-repo-config  -n gpu-operator --from-file=CentOS-Base.repo=/etc/yum.repos.d/extension.repo\n

                                            The expected output should resemble the following:

                                            configmap/local-repo-config created\n

                                            The local-repo-config configmap will be used to provide the value for the RepoConfig.ConfigMapName parameter during the installation of the GPU Operator. You can customize the configuration file name.

                                          3. View the content of the local-repo-config configmap:

                                            kubectl get configmap local-repo-config -n gpu-operator -oyaml\n

                                            The expected output should resemble the following:

                                            apiVersion: v1\ndata:\nCentOS-Base.repo: \"[extension-0]\\nbaseurl = http://10.6.232.5:32618/centos-base# The file server path where the yum source is placed in step 2\\ngpgcheck = 0\\nname = kubean extension 0\\n  \\n[extension-1]\\nbaseurl = http://10.6.232.5:32618/centos-base # The file server path where the yum source is placed in step 2\\ngpgcheck = 0\\nname = kubean extension 1\\n\"\nkind: ConfigMap\nmetadata:\ncreationTimestamp: \"2023-10-18T01:59:02Z\"\nname: local-repo-config\nnamespace: gpu-operator\nresourceVersion: \"59445080\"\nuid: c5f0ebab-046f-442c-b932-f9003e014387\n

                                          You have successfully created an offline yum source configuration file for the cluster where the GPU Operator will be deployed. You can use it during the offline installation of the GPU Operator by specifying the RepoConfig.ConfigMapName parameter.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html","title":"Building Red Hat 8.4 Offline Yum Source","text":"

                                          The AI platform comes with pre-installed CentOS v7.9 and GPU Operator offline packages with kernel v3.10.0-1160. For other OS types or nodes with different kernels, users need to manually build the offline yum source.

                                          This guide explains how to build an offline yum source package for Red Hat 8.4 based on any node in the Global cluster. It also demonstrates how to use it during the installation of the GPU Operator by specifying the RepoConfig.ConfigMapName parameter.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#prerequisites","title":"Prerequisites","text":"
                                          1. The user has already installed the addon offline package v0.12.0 or higher on the platform.
                                          2. The OS of the cluster nodes where the GPU Operator will be deployed must be Red Hat v8.4, and the kernel version must be identical.
                                          3. Prepare a file server that can communicate with the cluster network where the GPU Operator will be deployed, such as Nginx or MinIO.
                                          4. Prepare a node that can access the internet, the cluster where the GPU Operator will be deployed, and the file server. Ensure that Docker is already installed on this node.
                                          5. The nodes in the Global cluster must be Red Hat 8.4 4.18.0-305.el8.x86_64.
                                          "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#procedure","title":"Procedure","text":"

                                          This guide uses a node with Red Hat 8.4 4.18.0-305.el8.x86_64 as an example to demonstrate how to build an offline yum source package for Red Hat 8.4 based on any node in the Global cluster. It also explains how to use it during the installation of the GPU Operator by specifying the RepoConfig.ConfigMapName parameter.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-1-download-the-yum-source-from-the-bootstrap-node","title":"Step 1: Download the Yum Source from the Bootstrap Node","text":"

                                          Perform the following steps on the master node of the Global cluster.

                                          1. Use SSH or any other method to access any node in the Global cluster and run the following command:

                                            cat /etc/yum.repos.d/extension.repo # View the contents of extension.repo.\n

                                            The expected output should resemble the following:

                                            [extension-0]\nbaseurl = http://10.5.14.200:9000/kubean/redhat/$releasever/os/$basearch\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/kubean/redhat-iso/$releasever/os/$basearch/AppStream\ngpgcheck = 0\nname = kubean extension 1\n\n[extension-2]\nbaseurl = http://10.5.14.200:9000/kubean/redhat-iso/$releasever/os/$basearch/BaseOS\ngpgcheck = 0\nname = kubean extension 2\n
                                          2. Create a folder named redhat-base-repo under the root directory:

                                            mkdir redhat-base-repo\n
                                          3. Download the RPM packages from the yum source to your local machine:

                                            Download the RPM packages from extension-1 :

                                            reposync -p redhat-base-repo -n --repoid=extension-1\n

                                            Download the RPM packages from extension-2 :

                                            reposync -p redhat-base-repo -n --repoid=extension-2\n
                                          "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-2-download-the-elfutils-libelf-devel-0187-4el8x86_64rpm-package","title":"Step 2: Download the elfutils-libelf-devel-0.187-4.el8.x86_64.rpm Package","text":"

                                          Perform the following steps on a node with internet access. Before proceeding, ensure that there is network connectivity between the node with internet access and the master node of the Global cluster.

                                          1. Run the following command on the node with internet access to download the elfutils-libelf-devel-0.187-4.el8.x86_64.rpm package:

                                            wget https://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/elfutils-libelf-devel-0.187-4.el8.x86_64.rpm\n
                                          2. Transfer the elfutils-libelf-devel-0.187-4.el8.x86_64.rpm package from the current directory to the node mentioned in step 1:

                                            scp elfutils-libelf-devel-0.187-4.el8.x86_64.rpm user@ip:~/redhat-base-repo/extension-2/Packages/\n

                                            For example:

                                            scp elfutils-libelf-devel-0.187-4.el8.x86_64.rpm root@10.6.175.10:~/redhat-base-repo/extension-2/Packages/\n
                                          "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-3-generate-the-local-yum-repository","title":"Step 3: Generate the Local Yum Repository","text":"

                                          Perform the following steps on the master node of the Global cluster mentioned in Step 1.

                                          1. Enter the yum repository directories:

                                            cd ~/redhat-base-repo/extension-1/Packages\ncd ~/redhat-base-repo/extension-2/Packages\n
                                          2. Generate the repository index for the directories:

                                            createrepo_c ./\n

                                          You have now generated the offline yum source named redhat-base-repo for kernel version 4.18.0-305.el8.x86_64 .

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-4-upload-the-local-yum-repository-to-the-file-server","title":"Step 4: Upload the Local Yum Repository to the File Server","text":"

                                          In this example, we will use Minio, which is built-in as the file server in the bootstrap node. However, you can choose any file server that suits your needs. Here are the details for Minio:

                                          • Access URL: http://10.5.14.200:9000 (usually the {bootstrap-node-IP} + {port-9000})
                                          • Login username: rootuser
                                          • Login password: rootpass123

                                          • On the current node, establish a connection between the local mc command-line tool and the Minio server by running the following command:

                                            mc config host add minio <file_server_access_url> <username> <password>\n

                                            For example:

                                            mc config host add minio http://10.5.14.200:9000 rootuser rootpass123\n

                                            The expected output should be similar to:

                                            Added __minio__ successfully.\n

                                            The mc command-line tool is provided by the Minio file server as a client command-line tool. For more details, refer to the MinIO Client documentation.

                                          • Create a bucket named redhat-base in the current location:

                                            mc mb -p minio/redhat-base\n

                                            The expected output should be similar to:

                                            Bucket created successfully __minio/redhat-base__ .\n
                                          • Set the access policy of the redhat-base bucket to allow public downloads so that it can be accessed during the installation of the GPU Operator:

                                            mc anonymous set download minio/redhat-base\n

                                            The expected output should be similar to:

                                            Access permission for __minio/redhat-base__ is set to __download__ \n
                                          • Copy the offline yum repository files ( redhat-base-repo ) from the current location to the Minio server's minio/redhat-base bucket:

                                            mc cp redhat-base-repo minio/redhat-base --recursive\n
                                          "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-5-create-a-configmap-to-store-yum-repository-information-in-the-cluster","title":"Step 5: Create a ConfigMap to Store Yum Repository Information in the Cluster","text":"

                                          Perform the following steps on the control node of the cluster where you will deploy the GPU Operator.

                                          1. Run the following command to create a file named redhat.repo , which specifies the configuration information for the yum repository storage:

                                            # The file name must be redhat.repo, otherwise it won't be recognized when installing gpu-operator\ncat > redhat.repo << EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/redhat-base/redhat-base-repo/Packages # The file server address where the yum source is stored in Step 1\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/redhat-base/redhat-base-repo/Packages # The file server address where the yum source is stored in Step 1\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                          2. Based on the created redhat.repo file, create a configmap named local-repo-config in the gpu-operator namespace:

                                            kubectl create configmap local-repo-config -n gpu-operator --from-file=./redhat.repo\n

                                            The expected output should be similar to:

                                            configmap/local-repo-config created\n

                                            The local-repo-config configuration file is used to provide the value for the RepoConfig.ConfigMapName parameter during the installation of the GPU Operator. You can choose a different name for the configuration file.

                                          3. View the contents of the local-repo-config configuration file:

                                            kubectl get configmap local-repo-config -n gpu-operator -oyaml\n

                                          You have successfully created the offline yum source configuration file for the cluster where the GPU Operator will be deployed. You can use it by specifying the RepoConfig.ConfigMapName parameter during the offline installation of the GPU Operator.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html","title":"Build an Offline Yum Repository for Red Hat 7.9","text":""},{"location":"en/admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#introduction","title":"Introduction","text":"

                                          AI platform comes with a pre-installed CentOS 7.9 with GPU Operator offline package for kernel 3.10.0-1160. You need to manually build an offline yum repository for other OS types or nodes with different kernels.

                                          This page explains how to build an offline yum repository for Red Hat 7.9 based on any node in the Global cluster, and how to use the RepoConfig.ConfigMapName parameter when installing the GPU Operator.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#prerequisites","title":"Prerequisites","text":"
                                          1. The cluster nodes where the GPU Operator is to be deployed must be Red Hat 7.9 with the exact same kernel version.
                                          2. Prepare a file server that can be connected to the cluster network where the GPU Operator is to be deployed, such as nginx or minio.
                                          3. Prepare a node that can access the internet, the cluster where the GPU Operator is to be deployed, and the file server. Docker installation must be completed on this node.
                                          4. The nodes in the global service cluster must be Red Hat 7.9.
                                          "},{"location":"en/admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#steps","title":"Steps","text":""},{"location":"en/admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#1-build-offline-yum-repo-for-relevant-kernel","title":"1. Build Offline Yum Repo for Relevant Kernel","text":"
                                          1. Download rhel7.9 ISO

                                          2. Download the rhel7.9 ospackage that corresponds to your Kubean version.

                                            Find the version number of Kubean in the Container Management section of the Global cluster under Helm Apps.

                                            Download the rhel7.9 ospackage for that version from the Kubean repository.

                                          3. Import offline resources using the installer.

                                            Refer to the Import Offline Resources document.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#2-download-offline-driver-image-for-red-hat-79-os","title":"2. Download Offline Driver Image for Red Hat 7.9 OS","text":"

                                          Click here to view the download url.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#3-upload-red-hat-gpu-operator-offline-image-to-boostrap-node-repository","title":"3. Upload Red Hat GPU Operator Offline Image to Boostrap Node Repository","text":"

                                          Refer to Upload Red Hat GPU Operator Offline Image to Boostrap Node Repository.

                                          Note

                                          This reference is based on rhel8.4, so make sure to modify it for rhel7.9.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#4-create-configmaps-in-the-cluster-to-save-yum-repository-information","title":"4. Create ConfigMaps in the Cluster to Save Yum Repository Information","text":"

                                          Run the following command on the control node of the cluster where the GPU Operator is to be deployed.

                                          1. Run the following command to create a file named CentOS-Base.repo to specify the configuration information where the yum repository is stored.

                                            # The file name must be CentOS-Base.repo, otherwise it will not be recognized when installing gpu-operator\ncat > CentOS-Base.repo <<  EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # The server file address of the boostrap node, usually {boostrap node IP} + {9000 port}\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # The server file address of the boostrap node, usually {boostrap node IP} + {9000 port}\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                          2. Based on the created CentOS-Base.repo file, create a profile named local-repo-config in the gpu-operator namespace:

                                            kubectl create configmap local-repo-config -n gpu-operator --from-file=CentOS-Base.repo=/etc/yum.repos.d/extension.repo\n

                                            The expected output is as follows:

                                            configmap/local-repo-config created\n

                                            The local-repo-config profile is used to provide the value of the RepoConfig.ConfigMapName parameter when installing gpu-operator, and the profile name can be customized by the user.

                                          3. View the contents of the local-repo-config profile:

                                            kubectl get configmap local-repo-config -n gpu-operator -oyaml\n

                                            The expected output is as follows:

                                            local-repo-config.yaml
                                            apiVersion: v1\ndata:\n  CentOS-Base.repo: \"[extension-0]\\nbaseurl = http://10.6.232.5:32618/centos-base # The file path where yum repository is placed in Step 2 \\ngpgcheck = 0\\nname = kubean extension 0\\n  \\n[extension-1]\\nbaseurl\n  = http://10.6.232.5:32618/centos-base # The file path where yum repository is placed in Step 2 \\ngpgcheck = 0\\nname\n  = kubean extension 1\\n\"\nkind: ConfigMap\nmetadata:\n  creationTimestamp: \"2023-10-18T01:59:02Z\"\n  name: local-repo-config\n  namespace: gpu-operator\n  resourceVersion: \"59445080\"\n  uid: c5f0ebab-046f-442c-b932-f9003e014387\n

                                          At this point, you have successfully created the offline yum repository profile for the cluster where the GPU Operator is to be deployed. The RepoConfig.ConfigMapName parameter was used during the Offline Installation of GPU Operator.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/mig/index.html","title":"Overview of NVIDIA Multi-Instance GPU (MIG)","text":""},{"location":"en/admin/kpanda/gpu/nvidia/mig/index.html#mig-scenarios","title":"MIG Scenarios","text":"
                                          • Multi-Tenant Cloud Environments:

                                          MIG allows cloud service providers to partition a physical GPU into multiple independent GPU instances, which can be allocated to different tenants. This enables resource isolation and independence, meeting the GPU computing needs of multiple tenants.

                                          • Containerized Applications:

                                          MIG enables finer-grained GPU resource management in containerized environments. By partitioning a physical GPU into multiple MIG instances, each container can be assigned with dedicated GPU compute resources, providing better performance isolation and resource utilization.

                                          • Batch Processing Jobs:

                                          For batch processing jobs requiring large-scale parallel computing, MIG provides higher computational performance and larger memory capacity. Each MIG instance can utilize a portion of the physical GPU's compute resources, accelerating the processing of large-scale computational tasks.

                                          • AI/Machine Learning Training:

                                          MIG offers increased compute power and memory capacity for training large-scale deep learning models. By partitioning the physical GPU into multiple MIG instances, each instance can independently carry out model training, improving training efficiency and throughput.

                                          In general, NVIDIA MIG is suitable for scenarios that require finer-grained allocation and management of GPU resources. It enables resource isolation, improved performance utilization, and meets the GPU computing needs of multiple users or applications.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/mig/index.html#overview-of-mig","title":"Overview of MIG","text":"

                                          NVIDIA Multi-Instance GPU (MIG) is a new feature introduced by NVIDIA on H100, A100, and A30 series GPUs. Its purpose is to divide a physical GPU into multiple GPU instances to provide finer-grained resource sharing and isolation. MIG can split a GPU into up to seven GPU instances, allowing a single physical GPU card to provide separate GPU resources to multiple users, maximizing GPU utilization.

                                          This feature enables multiple applications or users to share GPU resources simultaneously, improving the utilization of computational resources and increasing system scalability.

                                          With MIG, each GPU instance's processor has an independent and isolated path throughout the entire memory system, including cross-switch ports on the chip, L2 cache groups, memory controllers, and DRAM address buses, all uniquely allocated to a single instance.

                                          This ensures that the workload of individual users can run with predictable throughput and latency, along with identical L2 cache allocation and DRAM bandwidth. MIG can partition available GPU compute resources (such as streaming multiprocessors or SMs and GPU engines like copy engines or decoders) to provide defined quality of service (QoS) and fault isolation for different clients such as virtual machines, containers, or processes. MIG enables multiple GPU instances to run in parallel on a single physical GPU.

                                          MIG allows multiple vGPUs (and virtual machines) to run in parallel on a single GPU instance while retaining the isolation guarantees provided by vGPU. For more details on using vGPU and MIG for GPU partitioning, refer to NVIDIA Multi-Instance GPU and NVIDIA Virtual Compute Server.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/mig/index.html#mig-architecture","title":"MIG Architecture","text":"

                                          The following diagram provides an overview of MIG, illustrating how it virtualizes one physical GPU card into seven GPU instances that can be used by multiple users.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/mig/index.html#important-concepts","title":"Important Concepts","text":"
                                          • SM (Streaming Multiprocessor): The core computational unit of a GPU responsible for executing graphics rendering and general-purpose computing tasks. Each SM contains a group of CUDA cores, as well as shared memory, register files, and other resources, capable of executing multiple threads concurrently. Each MIG instance has a certain number of SMs and other related resources, along with the allocated memory slices.
                                          • GPU Memory Slice : The smallest portion of GPU memory, including the proper memory controller and cache. A GPU memory slice is approximately one-eighth of the total GPU memory resources in terms of capacity and bandwidth.
                                          • GPU SM Slice : The smallest computational unit of SMs on a GPU. When configuring in MIG mode, the GPU SM slice is approximately one-seventh of the total available SMs in the GPU.
                                          • GPU Slice : The GPU slice represents the smallest portion of the GPU, consisting of a single GPU memory slice and a single GPU SM slice combined together.
                                          • GPU Instance (GI): A GPU instance is the combination of a GPU slice and GPU engines (DMA, NVDEC, etc.). Anything within a GPU instance always shares all GPU memory slices and other GPU engines, but its SM slice can be further subdivided into Compute Instances (CIs). A GPU instance provides memory QoS. Each GPU slice contains dedicated GPU memory resources, limiting available capacity and bandwidth while providing memory QoS. Each GPU memory slice gets one-eighth of the total GPU memory resources, and each GPU SM slice gets one-seventh of the total SM count.
                                          • Compute Instance (CI): A Compute Instance represents the smallest computational unit within a GPU instance. It consists of a subset of SMs, along with dedicated register files, shared memory, and other resources. Each CI has its own CUDA context and can run independent CUDA kernels. The number of CIs in a GPU instance depends on the number of available SMs and the configuration chosen during MIG setup.
                                          • Instance Slice : An Instance Slice represents a single CI within a GPU instance. It is the combination of a subset of SMs and a portion of the GPU memory slice. Each Instance Slice provides isolation and resource allocation for individual applications or users running on the GPU instance.
                                          "},{"location":"en/admin/kpanda/gpu/nvidia/mig/index.html#key-benefits-of-mig","title":"Key Benefits of MIG","text":"
                                          • Resource Sharing: MIG allows a single physical GPU to be divided into multiple GPU instances, providing efficient sharing of GPU resources among different users or applications. This maximizes GPU utilization and enables improved performance isolation.

                                          • Fine-Grained Resource Allocation: With MIG, GPU resources can be allocated at a finer granularity, allowing for more precise partitioning and allocation of compute power and memory capacity.

                                          • Improved Performance Isolation: Each MIG instance operates independently with its dedicated resources, ensuring predictable throughput and latency for individual users or applications. This improves performance isolation and prevents interference between different workloads running on the same GPU.

                                          • Enhanced Security and Fault Isolation: MIG provides better security and fault isolation by ensuring that each user or application has its dedicated GPU resources. This prevents unauthorized access to data and mitigates the impact of faults or errors in one instance on others.

                                          • Increased Scalability: MIG enables the simultaneous usage of GPU resources by multiple users or applications, increasing system scalability and accommodating the needs of various workloads.

                                          • Efficient Containerization: By using MIG in containerized environments, GPU resources can be effectively allocated to different containers, improving performance isolation and resource utilization.

                                          Overall, MIG offers significant advantages in terms of resource sharing, fine-grained allocation, performance isolation, security, scalability, and containerization, making it a valuable feature for various GPU computing scenarios.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/mig/create_mig.html","title":"Enabling MIG Features","text":"

                                          This section describes how to enable NVIDIA MIG features. NVIDIA currently provides two strategies for exposing MIG devices on Kubernetes nodes:

                                          • Single mode : Nodes expose a single type of MIG device on all their GPUs.
                                          • Mixed mode : Nodes expose a mixture of MIG device types on all their GPUs.

                                          For more details, refer to the NVIDIA GPU Card Usage Modes.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/mig/create_mig.html#prerequisites","title":"Prerequisites","text":"
                                          • Check the system requirements for the GPU driver installation on the target node: GPU Support Matrix
                                          • Ensure that the cluster nodes have GPUs of the proper models (NVIDIA H100, A100, and A30 Tensor Core GPUs). For more information, see the GPU Support Matrix.
                                          • All GPUs on the nodes must belong to the same product line (e.g., A100-SXM-40GB).
                                          "},{"location":"en/admin/kpanda/gpu/nvidia/mig/create_mig.html#install-gpu-operator-addon","title":"Install GPU Operator Addon","text":""},{"location":"en/admin/kpanda/gpu/nvidia/mig/create_mig.html#parameter-configuration","title":"Parameter Configuration","text":"

                                          When installing the Operator, you need to set the MigManager Config parameter accordingly. The default setting is default-mig-parted-config. You can also customize the sharding policy configuration file:

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/mig/create_mig.html#custom-sharding-policy","title":"Custom Sharding Policy","text":"
                                            ## Custom GI Instance Configuration\n  all-disabled:\n    - devices: all\n      mig-enabled: false\n  all-enabled:\n    - devices: all\n      mig-enabled: true\n      mig-devices: {}\n  all-1g.10gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.5gb: 7\n  all-1g.10gb.me:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.10gb+me: 1\n  all-1g.20gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.20gb: 4\n  all-2g.20gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        2g.20gb: 3\n  all-3g.40gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        3g.40gb: 2\n  all-4g.40gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        4g.40gb: 1\n  all-7g.80gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        7g.80gb: 1\n  all-balanced:\n    - device-filter: [\"0x233110DE\", \"0x232210DE\", \"0x20B210DE\", \"0x20B510DE\", \"0x20F310DE\", \"0x20F510DE\"]\n      devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.10gb: 2\n        2g.20gb: 1\n        3g.40gb: 1\n  # After setting, CI instances will be partitioned according to the specified configuration\n  custom-config:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        3g.40gb: 2\n

                                          In the above YAML, set custom-config to partition CI instances according to the specifications.

                                          custom-config:\n  - devices: all\n    mig-enabled: true\n    mig-devices:\n      1c.3g.40gb: 6\n

                                          After completing the settings, you can use GPU MIG resources when confirming the deployment of the application.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/mig/create_mig.html#switch-node-gpu-mode","title":"Switch Node GPU Mode","text":"

                                          After successfully installing the GPU operator, the node is in full card mode by default. There will be an indicator on the node management page, as shown below:

                                          Click the \u2507 at the right side of the node list, select a GPU mode to switch, and then choose the proper MIG mode and sharding policy. Here, we take MIXED mode as an example:

                                          There are two configurations here:

                                          1. MIG Policy: Mixed and Single.
                                          2. Sharding Policy: The policy here needs to match the key in the default-mig-parted-config (or user-defined sharding policy) configuration file.

                                          After clicking OK button, wait for about a minute and refresh the page. The MIG mode will be switched to:

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/mig/mig_command.html","title":"MIG Related Commands","text":"

                                          GI Related Commands:

                                          Subcommand Description nvidia-smi mig -lgi View the list of created GI instances nvidia-smi mig -dgi -gi Delete a specific GI instance nvidia-smi mig -lgip View the profile of GI nvidia-smi mig -cgi Create a GI using the specified profile ID

                                          CI Related Commands:

                                          Subcommand Description nvidia-smi mig -lcip { -gi {gi Instance ID}} View the profile of CI, specifying -gi will show the CIs that can be created for a particular GI instance nvidia-smi mig -lci View the list of created CI instances nvidia-smi mig -cci {profile id} -gi {gi instance id} Create a CI instance with the specified GI nvidia-smi mig -dci -ci Delete a specific CI instance

                                          GI+CI Related Commands:

                                          Subcommand Description nvidia-smi mig -i 0 -cgi {gi profile id} -C {ci profile id} Create a GI + CI instance directly"},{"location":"en/admin/kpanda/gpu/nvidia/mig/mig_usage.html","title":"Using MIG GPU Resources","text":"

                                          This section explains how applications can use MIG GPU resources.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/mig/mig_usage.html#prerequisites","title":"Prerequisites","text":"
                                          • AI platform container management platform is deployed and running successfully.
                                          • The container management module is integrated with a Kubernetes cluster or a Kubernetes cluster is created, and the UI interface of the cluster can be accessed.
                                          • NVIDIA DevicePlugin and MIG capabilities are enabled. Refer to Offline installation of GPU Operator for details.
                                          • The nodes in the cluster have GPUs of the proper models.
                                          "},{"location":"en/admin/kpanda/gpu/nvidia/mig/mig_usage.html#using-mig-gpu-through-the-ui","title":"Using MIG GPU through the UI","text":"
                                          1. Confirm if the cluster has recognized the GPU card type.

                                            Go to Cluster Details -> Nodes and check if it has been correctly recognized as MIG.

                                          2. When deploying an application using an image, you can select and use NVIDIA MIG resources.

                                          3. Example of MIG Single Mode (used in the same way as a full GPU card):

                                            Note

                                            The MIG single policy allows users to request and use GPU resources in the same way as a full GPU card (nvidia.com/gpu). The difference is that these resources can be a portion of the GPU (MIG device) rather than the entire GPU. Learn more from the GPU MIG Mode Design.

                                          4. MIG Mixed Mode

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/mig/mig_usage.html#using-mig-through-yaml-configuration","title":"Using MIG through YAML Configuration","text":"

                                          MIG Single mode:

                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mig-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mig-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mig-demo\n    spec:\n      containers:\n        - name: mig-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/gpu: 2 # (1)!\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                                          1. Number of MIG GPUs to request

                                          MIG Mixed mode:

                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mig-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mig-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mig-demo\n    spec:\n      containers:\n        - name: mig-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/mig-4g.20gb: 1 # (1)!\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                                          1. Expose MIG device through nvidia.com/mig-g.gb resource type

                                          After entering the container, you can check if only one MIG device is being used:

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/vgpu/hami.html","title":"Build a vGPU Memory Oversubscription Image","text":"

                                          The vGPU memory oversubscription feature in the Hami Project no longer exists. To use this feature, you need to rebuild with the libvgpu.so file that supports memory oversubscription.

                                          Dockerfile
                                          FROM docker.m.daocloud.io/projecthami/hami:v2.3.11\nCOPY libvgpu.so /k8s-vgpu/lib/nvidia/\n

                                          Run the following command to build the image:

                                          docker build -t release.daocloud.io/projecthami/hami:v2.3.11 -f Dockerfile .\n

                                          Then, push the image to release.daocloud.io.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/vgpu/vgpu_addon.html","title":"Installing NVIDIA vGPU Addon","text":"

                                          To virtualize a single NVIDIA GPU into multiple virtual GPUs and allocate them to different virtual machines or users, you can use NVIDIA's vGPU capability. This section explains how to install the vGPU plugin in the AI platform platform, which is a prerequisite for using NVIDIA vGPU capability.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/vgpu/vgpu_addon.html#prerequisites","title":"Prerequisites","text":"
                                          • Refer to the GPU Support Matrix to confirm that the nodes in the cluster have GPUs of the proper models.
                                          • The current cluster has deployed NVIDIA drivers through the Operator. For specific instructions, refer to Offline Installation of GPU Operator.
                                          "},{"location":"en/admin/kpanda/gpu/nvidia/vgpu/vgpu_addon.html#procedure","title":"Procedure","text":"
                                          1. Path: Container Management -> Cluster Management -> Click the target cluster -> Helm Apps -> Helm Charts -> Search for nvidia-vgpu .

                                          2. During the installation of vGPU, several basic modification parameters are provided. If you need to modify advanced parameters, click the YAML column to make changes:

                                            • deviceMemoryScaling : NVIDIA device memory scaling factor, the input value must be an integer, with a default value of 1. It can be greater than 1 (enabling virtual memory, experimental feature). For an NVIDIA GPU with a memory size of M, if we configure the devicePlugin.deviceMemoryScaling parameter as S, in a Kubernetes cluster where we have deployed our device plugin, the vGPUs assigned from this GPU will have a total memory of S * M .

                                            • deviceSplitCount : An integer type, with a default value of 10. Number of GPU splits, each GPU cannot be assigned more tasks than its configuration count. If configured as N, each GPU can have up to N tasks simultaneously.

                                            • Resources : Represents the resource usage of the vgpu-device-plugin and vgpu-schedule pods.

                                          3. After a successful installation, you will see two types of pods in the specified namespace, indicating that the NVIDIA vGPU plugin has been successfully installed:

                                          After a successful installation, you can deploy applications using vGPU resources.

                                          Note

                                          NVIDIA vGPU Addon does not support upgrading directly from the older v2.0.0 to the latest v2.0.0+1; To upgrade, please uninstall the older version and then reinstall the latest version.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html","title":"Using NVIDIA vGPU in Applications","text":"

                                          This section explains how to use the vGPU capability in the AI platform platform.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html#prerequisites","title":"Prerequisites","text":"
                                          • The nodes in the cluster have GPUs of the proper models.
                                          • vGPU Addon has been successfully installed. Refer to Installing GPU Addon for details.
                                          • GPU Operator is installed, and the Nvidia.DevicePlugin capability is disabled. Refer to Offline Installation of GPU Operator for details.
                                          "},{"location":"en/admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html#procedure","title":"Procedure","text":""},{"location":"en/admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html#using-vgpu-through-the-ui","title":"Using vGPU through the UI","text":"
                                          1. Confirm if the cluster has detected GPUs. Click the Clusters -> Cluster Settings -> Addon Plugins and check if the GPU plugin has been automatically enabled and the proper GPU type has been detected. Currently, the cluster will automatically enable the GPU addon and set the GPU Type as Nvidia vGPU .

                                          2. Deploy a workload by clicking Clusters -> Workloads . When deploying a workload using an image, select the type Nvidia vGPU , and you will be prompted with the following parameters:

                                            • Number of Physical Cards (nvidia.com/vgpu) : Indicates how many physical cards need to be mounted by the current pod. The input value must be an integer and less than or equal to the number of cards on the host machine.
                                            • GPU Cores (nvidia.com/gpucores): Indicates the GPU cores utilized by each card, with a value range from 0 to 100. Setting it to 0 means no enforced isolation, while setting it to 100 means exclusive use of the entire card.
                                            • GPU Memory (nvidia.com/gpumem): Indicates the GPU memory occupied by each card, with a value in MB. The minimum value is 1, and the maximum value is the total memory of the card.

                                            If there are issues with the configuration values above, it may result in scheduling failure or inability to allocate resources.

                                          "},{"location":"en/admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html#using-vgpu-through-yaml-configuration","title":"Using vGPU through YAML Configuration","text":"

                                          Refer to the following workload configuration and add the parameter nvidia.com/vgpu: '1' in the resource requests and limits section to configure the number of physical cards used by the application.

                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-vgpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-vgpu-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: full-vgpu-demo\n    spec:\n      containers:\n        - name: full-vgpu-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/gpucores: '20'   # Request 20% of GPU cores for each card\n              nvidia.com/gpumem: '200'   # Request 200MB of GPU memory for each card\n              nvidia.com/vgpu: '1'   # Request 1 GPU card\n          imagePullPolicy: Always\n      restartPolicy: Always\n

                                          This YAML configuration requests the application to use vGPU resources. It specifies that each card should utilize 20% of GPU cores, 200MB of GPU memory, and requests 1 GPU card.

                                          "},{"location":"en/admin/kpanda/gpu/volcano/volcano-gang-scheduler.html","title":"Using Volcano's Gang Scheduler","text":"

                                          The Gang scheduling policy is one of the core scheduling algorithms of the volcano-scheduler. It satisfies the \"All or nothing\" scheduling requirement during the scheduling process, preventing arbitrary scheduling of Pods that could waste cluster resources. The specific algorithm observes whether the number of scheduled Pods under a Job meets the minimum running quantity. When the Job's minimum running quantity is satisfied, scheduling actions are performed for all Pods under the Job; otherwise, no actions are taken.

                                          "},{"location":"en/admin/kpanda/gpu/volcano/volcano-gang-scheduler.html#use-cases","title":"Use Cases","text":"

                                          The Gang scheduling algorithm, based on the concept of a Pod group, is particularly suitable for scenarios that require multi-process collaboration. AI scenarios often involve complex workflows, such as Data Ingestion, Data Analysis, Data Splitting, Training, Serving, and Logging, which require a group of containers to work together. This makes the Gang scheduling policy based on pods very appropriate.

                                          In multi-threaded parallel computing communication scenarios under the MPI computation framework, Gang scheduling is also very suitable because it requires master and slave processes to work together. High relevance among containers in a pod may lead to resource contention, and overall scheduling allocation can effectively resolve deadlocks.

                                          In scenarios with insufficient cluster resources, the Gang scheduling policy significantly improves the utilization of cluster resources. For example, if the cluster can currently accommodate only 2 Pods, but the minimum number of Pods required for scheduling is 3, then all Pods of this Job will remain pending until the cluster can accommodate 3 Pods, at which point the Pods will be scheduled. This effectively prevents the partial scheduling of Pods, which would not meet the requirements and would occupy resources, making other Jobs unable to run.

                                          "},{"location":"en/admin/kpanda/gpu/volcano/volcano-gang-scheduler.html#concept-explanation","title":"Concept Explanation","text":"

                                          The Gang Scheduler is the core scheduling plugin of Volcano, and it is enabled by default upon installing Volcano. When creating a workload, you only need to specify the scheduler name as Volcano.

                                          Volcano schedules based on PodGroups. When creating a workload, there is no need to manually create PodGroup resources; Volcano will automatically create them based on the workload information. Below is an example of a PodGroup:

                                          apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  name: test\n  namespace: default\nspec:\n  minMember: 1  # (1)!\n  minResources:  # (2)!\n    cpu: \"3\"\n    memory: \"2048Mi\"\n  priorityClassName: high-prority # (3)!\n  queue: default # (4)!\n
                                          1. Represents the minimum number of Pods or jobs that need to run under this PodGroup. If the cluster resources do not meet the requirements to run the number of jobs specified by miniMember, the scheduler will not schedule any jobs within this PodGroup.
                                          2. Represents the minimum resources required to run this PodGroup. If the allocatable resources of the cluster do not meet the minResources, the scheduler will not schedule any jobs within this PodGroup.
                                          3. Represents the priority of this PodGroup, used by the scheduler to sort all PodGroups within the queue during scheduling. system-node-critical and system-cluster-critical are two reserved values indicating the highest priority. If not specifically designated, the default priority or zero priority is used.
                                          4. Represents the queue to which this PodGroup belongs. The queue must be pre-created and in the open state.
                                          "},{"location":"en/admin/kpanda/gpu/volcano/volcano-gang-scheduler.html#use-case","title":"Use Case","text":"

                                          In a multi-threaded parallel computing communication scenario under the MPI computation framework, we need to ensure that all Pods can be successfully scheduled to ensure the job is completed correctly. Setting minAvailable to 4 means that 1 mpimaster and 3 mpiworkers are required to run.

                                          apiVersion: batch.volcano.sh/v1alpha1\nkind: Job\nmetadata:\n  name: lm-mpi-job\n  labels:\n    \"volcano.sh/job-type\": \"MPI\"\nspec:\n  minAvailable: 4\n  schedulerName: volcano\n  plugins:\n    ssh: []\n    svc: []\n  policies:\n    - event: PodEvicted\n      action: RestartJob\n  tasks:\n    - replicas: 1\n      name: mpimaster\n      policies:\n        - event: TaskCompleted\n          action: CompleteJob\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  MPI_HOST=`cat /etc/volcano/mpiworker.host | tr \"\\n\" \",\"`;\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd;\n                  mpiexec --allow-run-as-root --host ${MPI_HOST} -np 3 mpi_hello_world;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpimaster\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"500m\"\n                limits:\n                  cpu: \"500m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n    - replicas: 3\n      name: mpiworker\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd -D;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpiworker\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"1000m\"\n                limits:\n                  cpu: \"1000m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n

                                          Generate the resources for PodGroup:

                                          apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  annotations:\n  creationTimestamp: \"2024-05-28T09:18:50Z\"\n  generation: 5\n  labels:\n    volcano.sh/job-type: MPI\n  name: lm-mpi-job-9c571015-37c7-4a1a-9604-eaa2248613f2\n  namespace: default\n  ownerReferences:\n  - apiVersion: batch.volcano.sh/v1alpha1\n    blockOwnerDeletion: true\n    controller: true\n    kind: Job\n    name: lm-mpi-job\n    uid: 9c571015-37c7-4a1a-9604-eaa2248613f2\n  resourceVersion: \"25173454\"\n  uid: 7b04632e-7cff-4884-8e9a-035b7649d33b\nspec:\n  minMember: 4\n  minResources:\n    count/pods: \"4\"\n    cpu: 3500m\n    limits.cpu: 3500m\n    pods: \"4\"\n    requests.cpu: 3500m\n  minTaskMember:\n    mpimaster: 1\n    mpiworker: 3\n  queue: default\nstatus:\n  conditions:\n  - lastTransitionTime: \"2024-05-28T09:19:01Z\"\n    message: '3/4 tasks in gang unschedulable: pod group is not ready, 1 Succeeded,\n      3 Releasing, 4 minAvailable'\n    reason: NotEnoughResources\n    status: \"True\"\n    transitionID: f875efa5-0358-4363-9300-06cebc0e7466\n    type: Unschedulable\n  - lastTransitionTime: \"2024-05-28T09:18:53Z\"\n    reason: tasks in gang are ready to be scheduled\n    status: \"True\"\n    transitionID: 5a7708c8-7d42-4c33-9d97-0581f7c06dab\n    type: Scheduled\n  phase: Pending\n  succeeded: 1\n

                                          From the PodGroup, it can be seen that it is associated with the workload through ownerReferences and sets the minimum number of running Pods to 4.

                                          "},{"location":"en/admin/kpanda/gpu/volcano/volcano_user_guide.html","title":"Use Volcano for AI Compute","text":""},{"location":"en/admin/kpanda/gpu/volcano/volcano_user_guide.html#usage-scenarios","title":"Usage Scenarios","text":"

                                          Kubernetes has become the de facto standard for orchestrating and managing cloud-native applications, and an increasing number of applications are choosing to migrate to K8s. The fields of artificial intelligence and machine learning inherently involve a large number of compute-intensive tasks, and developers are very willing to build AI platforms based on Kubernetes to fully leverage its resource management, application orchestration, and operations monitoring capabilities. However, the default Kubernetes scheduler was initially designed primarily for long-running services and has many shortcomings in batch and elastic scheduling for AI and big data tasks. For example, resource contention issues:

                                          Take TensorFlow job scenarios as an example. TensorFlow jobs include two different roles, PS and Worker, and the Pods for these two roles need to work together to complete the entire job. If only one type of role Pod is running, the entire job cannot be executed properly. The default scheduler schedules Pods one by one and is unaware of the PS and Worker roles in a Kubeflow TFJob. In a high-load cluster (insufficient resources), multiple jobs may each be allocated some resources to run a portion of their Pods, but the jobs cannot complete successfully, leading to resource waste. For instance, if a cluster has 4 GPUs and both TFJob1 and TFJob2 each have 4 Workers, TFJob1 and TFJob2 might each be allocated 2 GPUs. However, both TFJob1 and TFJob2 require 4 GPUs to run. This mutual waiting for resource release creates a deadlock situation, resulting in GPU resource waste.

                                          "},{"location":"en/admin/kpanda/gpu/volcano/volcano_user_guide.html#volcano-batch-scheduling-system","title":"Volcano Batch Scheduling System","text":"

                                          Volcano is the first Kubernetes-based container batch computing platform under CNCF, focusing on high-performance computing scenarios. It fills in the missing functionalities of Kubernetes in fields such as machine learning, big data, and scientific computing, providing essential support for these high-performance workloads. Additionally, Volcano seamlessly integrates with mainstream computing frameworks like Spark, TensorFlow, and PyTorch, and supports hybrid scheduling of heterogeneous devices, including CPUs and GPUs, effectively resolving the deadlock issues mentioned above.

                                          The following sections will introduce how to install and use Volcano.

                                          "},{"location":"en/admin/kpanda/gpu/volcano/volcano_user_guide.html#install-volcano","title":"Install Volcano","text":"
                                          1. Find Volcano in Cluster Details -> Helm Apps -> Helm Charts and install it.

                                          2. Check and confirm whether Volcano is installed successfully, that is, whether the components volcano-admission, volcano-controllers, and volcano-scheduler are running properly.

                                          Typically, Volcano is used in conjunction with the AI Lab to achieve an effective closed-loop process for the development and training of datasets, Notebooks, and task training.

                                          "},{"location":"en/admin/kpanda/gpu/volcano/volcano_user_guide.html#volcano-use-cases","title":"Volcano Use Cases","text":"
                                          • Volcano is a standalone scheduler. To enable the Volcano scheduler when creating workloads, simply specify the scheduler's name (schedulerName: volcano).
                                          • The volcanoJob resource is an extension of the Job in Volcano, breaking the Job down into smaller working units called tasks, which can interact with each other.
                                          "},{"location":"en/admin/kpanda/gpu/volcano/volcano_user_guide.html#volcano-supports-tensorflow","title":"Volcano Supports TensorFlow","text":"

                                          Here is an example:

                                          apiVersion: batch.volcano.sh/v1alpha1\nkind: Job\nmetadata:\n  name: tensorflow-benchmark\n  labels:\n    \"volcano.sh/job-type\": \"Tensorflow\"\nspec:\n  minAvailable: 3\n  schedulerName: volcano\n  plugins:\n    env: []\n    svc: []\n  policies:\n    - event: PodEvicted\n      action: RestartJob\n  tasks:\n    - replicas: 1\n      name: ps\n      template:\n        spec:\n          imagePullSecrets:\n            - name: default-secret\n          containers:\n            - command:\n                - sh\n                - -c\n                - |\n                  PS_HOST=`cat /etc/volcano/ps.host | sed 's/$/&:2222/g' | tr \"\\n\" \",\"`;\n                  WORKER_HOST=`cat /etc/volcano/worker.host | sed 's/$/&:2222/g' | tr \"\\n\" \",\"`;\n                  python tf_cnn_benchmarks.py --batch_size=32 --model=resnet50 --variable_update=parameter_server --flush_stdout=true --num_gpus=1 --local_parameter_device=cpu --device=cpu --data_format=NHWC --job_name=ps --task_index=${VK_TASK_INDEX} --ps_hosts=${PS_HOST} --worker_hosts=${WORKER_HOST}\n              image: docker.m.daocloud.io/volcanosh/example-tf:0.0.1\n              name: tensorflow\n              ports:\n                - containerPort: 2222\n                  name: tfjob-port\n              resources:\n                requests:\n                  cpu: \"1000m\"\n                  memory: \"2048Mi\"\n                limits:\n                  cpu: \"1000m\"\n                  memory: \"2048Mi\"\n              workingDir: /opt/tf-benchmarks/scripts/tf_cnn_benchmarks\n          restartPolicy: OnFailure\n    - replicas: 2\n      name: worker\n      policies:\n        - event: TaskCompleted\n          action: CompleteJob\n      template:\n        spec:\n          imagePullSecrets:\n            - name: default-secret\n          containers:\n            - command:\n                - sh\n                - -c\n                - |\n                  PS_HOST=`cat /etc/volcano/ps.host | sed 's/$/&:2222/g' | tr \"\\n\" \",\"`;\n                  WORKER_HOST=`cat /etc/volcano/worker.host | sed 's/$/&:2222/g' | tr \"\\n\" \",\"`;\n                  python tf_cnn_benchmarks.py --batch_size=32 --model=resnet50 --variable_update=parameter_server --flush_stdout=true --num_gpus=1 --local_parameter_device=cpu --device=cpu --data_format=NHWC --job_name=worker --task_index=${VK_TASK_INDEX} --ps_hosts=${PS_HOST} --worker_hosts=${WORKER_HOST}\n              image: docker.m.daocloud.io/volcanosh/example-tf:0.0.1\n              name: tensorflow\n              ports:\n                - containerPort: 2222\n                  name: tfjob-port\n              resources:\n                requests:\n                  cpu: \"2000m\"\n                  memory: \"2048Mi\"\n                limits:\n                  cpu: \"2000m\"\n                  memory: \"4096Mi\"\n              workingDir: /opt/tf-benchmarks/scripts/tf_cnn_benchmarks\n          restartPolicy: OnFailure\n
                                          "},{"location":"en/admin/kpanda/gpu/volcano/volcano_user_guide.html#parallel-computing-with-mpi","title":"Parallel Computing with MPI","text":"

                                          In multi-threaded parallel computing communication scenarios under the MPI computing framework, we need to ensure that all Pods are successfully scheduled to guarantee the task's proper completion. Setting minAvailable to 4 indicates that 1 mpimaster and 3 mpiworkers are required to run. By simply setting the schedulerName field value to \"volcano,\" you can enable the Volcano scheduler.

                                          Here is an example:

                                          apiVersion: batch.volcano.sh/v1alpha1\nkind: Job\nmetadata:\n  name: lm-mpi-job\n  labels:\n    \"volcano.sh/job-type\": \"MPI\"\nspec:\n  minAvailable: 4\n  schedulerName: volcano\n  plugins:\n    ssh: []\n    svc: []\n  policies:\n    - event: PodEvicted\n      action: RestartJob\n  tasks:\n    - replicas: 1\n      name: mpimaster\n      policies:\n        - event: TaskCompleted\n          action: CompleteJob\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  MPI_HOST=`cat /etc/volcano/mpiworker.host | tr \"\\n\" \",\"`;\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd;\n                  mpiexec --allow-run-as-root --host ${MPI_HOST} -np 3 mpi_hello_world;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpimaster\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"500m\"\n                limits:\n                  cpu: \"500m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n    - replicas: 3\n      name: mpiworker\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd -D;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpiworker\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"1000m\"\n                limits:\n                  cpu: \"1000m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n

                                          Resources to generate PodGroup:

                                          apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  annotations:\n  creationTimestamp: \"2024-05-28T09:18:50Z\"\n  generation: 5\n  labels:\n    volcano.sh/job-type: MPI\n  name: lm-mpi-job-9c571015-37c7-4a1a-9604-eaa2248613f2\n  namespace: default\n  ownerReferences:\n  - apiVersion: batch.volcano.sh/v1alpha1\n    blockOwnerDeletion: true\n    controller: true\n    kind: Job\n    name: lm-mpi-job\n    uid: 9c571015-37c7-4a1a-9604-eaa2248613f2\n  resourceVersion: \"25173454\"\n  uid: 7b04632e-7cff-4884-8e9a-035b7649d33b\nspec:\n  minMember: 4\n  minResources:\n    count/pods: \"4\"\n    cpu: 3500m\n    limits.cpu: 3500m\n    pods: \"4\"\n    requests.cpu: 3500m\n  minTaskMember:\n    mpimaster: 1\n    mpiworker: 3\n  queue: default\nstatus:\n  conditions:\n  - lastTransitionTime: \"2024-05-28T09:19:01Z\"\n    message: '3/4 tasks in gang unschedulable: pod group is not ready, 1 Succeeded,\n      3 Releasing, 4 minAvailable'\n    reason: NotEnoughResources\n    status: \"True\"\n    transitionID: f875efa5-0358-4363-9300-06cebc0e7466\n    type: Unschedulable\n  - lastTransitionTime: \"2024-05-28T09:18:53Z\"\n    reason: tasks in gang are ready to be scheduled\n    status: \"True\"\n    transitionID: 5a7708c8-7d42-4c33-9d97-0581f7c06dab\n    type: Scheduled\n  phase: Pending\n  succeeded: 1\n

                                          From the PodGroup, it can be seen that it is associated with the workload through ownerReferences and sets the minimum number of running Pods to 4.

                                          If you want to learn more about the features and usage scenarios of Volcano, refer to Volcano Introduction.

                                          "},{"location":"en/admin/kpanda/helm/index.html","title":"Helm Charts","text":"

                                          Helm is a package management tool for Kubernetes, which makes it easy for users to quickly discover, share and use applications built with Kubernetes. The Container Management Module provides hundreds of Helm charts, covering storage, network, monitoring, database and other main cases. With these templates, you can quickly deploy and easily manage Helm apps through the UI interface. In addition, it supports adding more personalized templates through Add Helm repository to meet various needs.

                                          Key Concepts:

                                          There are a few key concepts to understand when using Helm:

                                          • Chart: A Helm installation package, which contains the images, dependencies, and resource definitions required to run an application, and may also contain service definitions in the Kubernetes cluster, similar to the formula in Homebrew, dpkg in APT, or rpm files in Yum. Charts are called Helm Charts in AI platform.

                                          • Release: A Chart instance running on the Kubernetes cluster. A Chart can be installed multiple times in the same cluster, and each installation will create a new Release. Release is called Helm Apps in AI platform.

                                          • Repository: A repository for publishing and storing Charts. Repository is called Helm Repositories in AI platform.

                                          For more details, refer to Helm official website.

                                          Related operations:

                                          • Manage Helm apps, including installing, updating, uninstalling Helm apps, viewing Helm operation records, etc.
                                          • Manage Helm repository, including installing, updating, deleting Helm repository, etc.
                                          "},{"location":"en/admin/kpanda/helm/Import-addon.html","title":"Import Custom Helm Apps into Built-in Addons","text":"

                                          This article explains how to import Helm appss into the system's built-in addons in both offline and online environments.

                                          "},{"location":"en/admin/kpanda/helm/Import-addon.html#offline-environment","title":"Offline Environment","text":"

                                          An offline environment refers to an environment that cannot connect to the internet or is a closed private network environment.

                                          "},{"location":"en/admin/kpanda/helm/Import-addon.html#prerequisites","title":"Prerequisites","text":"
                                          • charts-syncer is available and running. If not, you can click here to download.
                                          • The Helm Chart has been adapted for charts-syncer. This means adding a .relok8s-images.yaml file to the Helm Chart. This file should include all the images used in the Chart, including any images that are not directly used in the Chart but are used similar to images used in an Operator.

                                          Note

                                          • Refer to image-hints-file for instructions on how to write a Chart. It is required to separate the registry and repository of the image because the registry/repository needs to be replaced or modified when loading the image.
                                          • The installer's fire cluster has charts-syncer installed. If you are importing a custom Helm apps into the installer's fire cluster, you can skip the download and proceed to the adaptation. If charts-syncer binary is not installed, you can download it immediately.
                                          "},{"location":"en/admin/kpanda/helm/Import-addon.html#sync-helm-chart","title":"Sync Helm Chart","text":"
                                          1. Go to Container Management -> Helm Apps -> Helm Repositories , search for the addon, and obtain the built-in repository address and username/password (the default username/password for the system's built-in repository is rootuser/rootpass123).

                                          2. Sync the Helm Chart to the built-in repository addon of the container management system

                                            • Write the following configuration file, modify it according to your specific configuration, and save it as sync-dao-2048.yaml .

                                              source:  # helm charts source information\n  repo:\n    kind: HARBOR # It can also be any other supported Helm Chart repository type, such as CHARTMUSEUM\n    url: https://release-ci.daocloud.io/chartrepo/community #  Change to the chart repo URL\n    #auth: # username/password, if no password is set, leave it blank\n      #username: \"admin\"\n      #password: \"Harbor12345\"\ncharts:  # charts to sync\n  - name: dao-2048 # helm charts information, if not specified, sync all charts in the source helm repo\n    versions:\n      - 1.4.1\ntarget:  # helm charts target information\n  containerRegistry: 10.5.14.40 # image repository URL\n  repo:\n    kind: CHARTMUSEUM # It can also be any other supported Helm Chart repository type, such as HARBOR\n    url: http://10.5.14.40:8081 #  Change to the correct chart repo URL, you can verify the address by using helm repo add $HELM-REPO\n    auth: # username/password, if no password is set, leave it blank\n      username: \"rootuser\"\n      password: \"rootpass123\"\n  containers:\n    # kind: HARBOR # If the image repository is HARBOR and you want charts-syncer to automatically create an image repository, fill in this field\n    # auth: # username/password, if no password is set, leave it blank\n      # username: \"admin\"\n      # password: \"Harbor12345\"\n\n# leverage .relok8s-images.yaml file inside the Charts to move the container images too\nrelocateContainerImages: true\n
                                            • Run the charts-syncer command to sync the Chart and its included images

                                              charts-syncer sync --config sync-dao-2048.yaml --insecure --auto-create-repository\n

                                              The expected output is:

                                              I1222 15:01:47.119777    8743 sync.go:45] Using config file: \"examples/sync-dao-2048.yaml\"\nW1222 15:01:47.234238    8743 syncer.go:263] Ignoring skipDependencies option as dependency sync is not supported if container image relocation is true or syncing from/to intermediate directory\nI1222 15:01:47.234685    8743 sync.go:58] There is 1 chart out of sync!\nI1222 15:01:47.234706    8743 sync.go:66] Syncing \"dao-2048_1.4.1\" chart...\n.relok8s-images.yaml hints file found\nComputing relocation...\n\nRelocating dao-2048@1.4.1...\nPushing 10.5.14.40/daocloud/dao-2048:v1.4.1...\nDone\nDone moving /var/folders/vm/08vw0t3j68z9z_4lcqyhg8nm0000gn/T/charts-syncer869598676/dao-2048-1.4.1.tgz\n
                                          3. Once the previous step is completed, go to Container Management -> Helm Apps -> Helm Repositories , find the proper addon, click Sync Repository in the action column, and you will see the uploaded Helm apps in the Helm template.

                                          4. You can then proceed with normal installation, upgrade, and uninstallation.

                                          "},{"location":"en/admin/kpanda/helm/Import-addon.html#online-environment","title":"Online Environment","text":"

                                          The Helm Repo address for the online environment is release.daocloud.io . If the user does not have permission to add Helm Repo, they will not be able to import custom Helm appss into the system's built-in addons. You can add your own Helm repository and then integrate your Helm repository into the platform using the same steps as syncing Helm Chart in the offline environment.

                                          "},{"location":"en/admin/kpanda/helm/helm-app.html","title":"Manage Helm Apps","text":"

                                          The container management module supports interface-based management of Helm, including creating Helm instances using Helm charts, customizing Helm instance arguments, and managing the full lifecycle of Helm instances.

                                          This section will take cert-manager as an example to introduce how to create and manage Helm apps through the container management interface.

                                          "},{"location":"en/admin/kpanda/helm/helm-app.html#prerequisites","title":"Prerequisites","text":"
                                          • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                          • Created a namespace, user, and granted NS Admin or higher permissions to the user. For details, refer to Namespace Authorization.

                                          "},{"location":"en/admin/kpanda/helm/helm-app.html#install-the-helm-app","title":"Install the Helm app","text":"

                                          Follow the steps below to install the Helm app.

                                          1. Click a cluster name to enter Cluster Details .

                                          2. In the left navigation bar, click Helm Apps -> Helm Chart to enter the Helm chart page.

                                            On the Helm chart page, select the Helm repository named addon , and all the Helm chart templates under the addon repository will be displayed on the interface. Click the Chart named cert-manager .

                                          3. On the installation page, you can see the relevant detailed information of the Chart, select the version to be installed in the upper right corner of the interface, and click the Install button. Here select v1.9.1 version for installation.

                                          4. Configure Name , Namespace and Version Information . You can also customize arguments by modifying YAML in the argument Configuration area below. Click OK .

                                          5. The system will automatically return to the list of Helm apps, and the status of the newly created Helm app is Installing , and the status will change to Running after a period of time.

                                          "},{"location":"en/admin/kpanda/helm/helm-app.html#update-the-helm-app","title":"Update the Helm app","text":"

                                          After we have completed the installation of a Helm app through the interface, we can perform an update operation on the Helm app. Note: Update operations using the UI are only supported for Helm apps installed via the UI.

                                          Follow the steps below to update the Helm app.

                                          1. Click a cluster name to enter Cluster Details .

                                          2. In the left navigation bar, click Helm Apps to enter the Helm app list page.

                                            On the Helm app list page, select the Helm app that needs to be updated, click the __ ...__ operation button on the right side of the list, and select the Update operation in the drop-down selection.

                                          3. After clicking the Update button, the system will jump to the update interface, where you can update the Helm app as needed. Here we take updating the http port of the dao-2048 application as an example.

                                          4. After modifying the proper arguments. You can click the Change button under the argument configuration to compare the files before and after the modification. After confirming that there is no error, click the OK button at the bottom to complete the update of the Helm app.

                                          5. The system will automatically return to the Helm app list, and a pop-up window in the upper right corner will prompt update successful .

                                          "},{"location":"en/admin/kpanda/helm/helm-app.html#view-helm-operation-records","title":"View Helm operation records","text":"

                                          Every installation, update, and deletion of Helm apps has detailed operation records and logs for viewing.

                                          1. In the left navigation bar, click Cluster Operations -> Recent Operations , and then select the Helm Operations tab at the top of the page. Each record corresponds to an install/update/delete operation.

                                          2. To view the detailed log of each operation: Click \u2507 on the right side of the list, and select Log from the pop-up menu.

                                          3. At this point, the detailed operation log will be displayed in the form of console at the bottom of the page.

                                          "},{"location":"en/admin/kpanda/helm/helm-app.html#delete-the-helm-app","title":"Delete the Helm app","text":"

                                          Follow the steps below to delete the Helm app.

                                          1. Find the cluster where the Helm app to be deleted resides, click the cluster name, and enter Cluster Details .

                                          2. In the left navigation bar, click Helm Apps to enter the Helm app list page.

                                            On the Helm app list page, select the Helm app you want to delete, click the __ ...__ operation button on the right side of the list, and select Delete from the drop-down selection.

                                          3. Enter the name of the Helm app in the pop-up window to confirm, and then click the Delete button.

                                          "},{"location":"en/admin/kpanda/helm/helm-repo.html","title":"Manage Helm Repository","text":"

                                          The Helm repository is a repository for storing and publishing Charts. The Helm App module supports HTTP(s) protocol to access Chart packages in the repository. By default, the system has 4 built-in helm repos as shown in the table below to meet common needs in the production process of enterprises.

                                          Repository Description Example partner Various high-quality features provided by ecological partners Chart tidb system Chart that must be relied upon by system core functional components and some advanced features. For example, insight-agent must be installed to obtain cluster monitoring information Insight addon Common Chart in business cases cert-manager community The most popular open source components in the Kubernetes community Chart Istio

                                          In addition to the above preset repositories, you can also add third-party Helm repositories yourself. This page will introduce how to add and update third-party Helm repositories.

                                          "},{"location":"en/admin/kpanda/helm/helm-repo.html#prerequisites","title":"Prerequisites","text":"
                                          • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                          • Created a namespace, user, and granted NS Admin or higher permissions to the user. For details, refer to Namespace Authorization.

                                          • If using a private repository, you should have read and write permissions to the repository.

                                          "},{"location":"en/admin/kpanda/helm/helm-repo.html#introduce-third-party-helm-repository","title":"Introduce third-party Helm repository","text":"

                                          The following takes the public container repository of Kubevela as an example to introduce and manage the helm repo.

                                          1. Find the cluster that needs to be imported into the third-party helm repo, click the cluster name, and enter cluster details.

                                          2. In the left navigation bar, click Helm Apps -> Helm Repositories to enter the helm repo page.

                                          3. Click the Create Repository button on the helm repo page to enter the Create repository page, and configure relevant arguments according to the table below.

                                            • Repository Name: Set the repository name. It can be up to 63 characters long and may only include lowercase letters, numbers, and separators -. It must start and end with a lowercase letter or number, for example, kubevela.
                                            • Repository URL: The HTTP(S) address pointing to the target Helm repository. For example, https://charts.kubevela.net/core.
                                            • Skip TLS Verification: If the added Helm repository uses an HTTPS address and requires skipping TLS verification, you can check this option. The default is unchecked.
                                            • Authentication Method: The method used for identity verification after connecting to the repository URL. For public repositories, you can select None. For private repositories, you need to enter a username/password for identity verification.
                                            • Labels: Add labels to this Helm repository. For example, key: repo4; value: Kubevela.
                                            • Annotations: Add annotations to this Helm repository. For example, key: repo4; value: Kubevela.
                                            • Description: Add a description for this Helm repository. For example: This is a Kubevela public Helm repository.

                                          4. Click OK to complete the creation of the Helm repository. The page will automatically jump to the list of Helm repositories.

                                          "},{"location":"en/admin/kpanda/helm/helm-repo.html#update-the-helm-repository","title":"Update the Helm repository","text":"

                                          When the address information of the helm repo changes, the address, authentication method, label, annotation, and description information of the helm repo can be updated.

                                          1. Find the cluster where the repository to be updated is located, click the cluster name, and enter cluster details .

                                          2. In the left navigation bar, click Helm Apps -> Helm Repositories to enter the helm repo list page.

                                          3. Find the Helm repository that needs to be updated on the repository list page, click the \u2507 button on the right side of the list, and click Update in the pop-up menu.

                                          4. Update on the Update Helm Repository page, and click OK when finished.

                                          5. Return to the helm repo list, and the screen prompts that the update is successful.

                                          "},{"location":"en/admin/kpanda/helm/helm-repo.html#delete-the-helm-repository","title":"Delete the Helm repository","text":"

                                          In addition to importing and updating repositorys, you can also delete unnecessary repositories, including system preset repositories and third-party repositories.

                                          1. Find the cluster where the repository to be deleted is located, click the cluster name, and enter cluster details .

                                          2. In the left navigation bar, click Helm Apps -> Helm Repositories to enter the helm repo list page.

                                          3. Find the Helm repository that needs to be updated on the repository list page, click the \u2507 button on the right side of the list, and click Delete in the pop-up menu.

                                          4. Enter the repository name to confirm, and click Delete .

                                          5. Return to the list of Helm repositories, and the screen prompts that the deletion is successful.

                                          "},{"location":"en/admin/kpanda/helm/multi-archi-helm.html","title":"Import and Upgrade Multi-Arch Helm Apps","text":"

                                          In a multi-arch cluster, it is common to use Helm charts that support multiple architectures to address deployment issues caused by architectural differences. This guide will explain how to integrate single-arch Helm apps into multi-arch deployments and how to integrate multi-arch Helm apps.

                                          "},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#import","title":"Import","text":""},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#import-single-arch","title":"Import Single-arch","text":"

                                          Prepare the offline package addon-offline-full-package-${version}-${arch}.tar.gz.

                                          Specify the path in the clusterConfig.yml configuration file, for example:

                                          addonPackage:\n  path: \"/home/addon-offline-full-package-v0.9.0-amd64.tar.gz\"\n

                                          Then run the import command:

                                          ~/dce5-installer cluster-create -c /home/dce5/sample/clusterConfig.yaml -m /home/dce5/sample/manifest.yaml -d -j13\n
                                          "},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#integrate-multi-arch","title":"Integrate Multi-arch","text":"

                                          Prepare the offline package addon-offline-full-package-${version}-${arch}.tar.gz.

                                          Take addon-offline-full-package-v0.9.0-arm64.tar.gz as an example and run the import command:

                                          ~/dce5-installer import-addon -c /home/dce5/sample/clusterConfig.yaml --addon-path=/home/addon-offline-full-package-v0.9.0-arm64.tar.gz\n
                                          "},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#upgrade","title":"Upgrade","text":""},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#upgrade-single-arch","title":"Upgrade Single-arch","text":"

                                          Prepare the offline package addon-offline-full-package-${version}-${arch}.tar.gz.

                                          Specify the path in the clusterConfig.yml configuration file, for example:

                                          addonPackage:\n  path: \"/home/addon-offline-full-package-v0.11.0-amd64.tar.gz\"\n

                                          Then run the import command:

                                          ~/dce5-installer cluster-create -c /home/dce5/sample/clusterConfig.yaml -m /home/dce5/sample/manifest.yaml -d -j13\n
                                          "},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#multi-arch-integration","title":"Multi-arch Integration","text":"

                                          Prepare the offline package addon-offline-full-package-${version}-${arch}.tar.gz.

                                          Take addon-offline-full-package-v0.11.0-arm64.tar.gz as an example and run the import command:

                                          ~/dce5-installer import-addon -c /home/dce5/sample/clusterConfig.yaml --addon-path=/home/addon-offline-full-package-v0.11.0-arm64.tar.gz\n
                                          "},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#notes","title":"Notes","text":""},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#disk-space","title":"Disk Space","text":"

                                          The offline package is quite large and requires sufficient space for decompression and loading of images. Otherwise, it may interrupt the process with a \"no space left\" error.

                                          "},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#retry-after-failure","title":"Retry after Failure","text":"

                                          If the multi-arch fusion step fails, you need to clean up the residue before retrying:

                                          rm -rf addon-offline-target-package\n
                                          "},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#registry-space","title":"Registry Space","text":"

                                          If the offline package for fusion contains registry spaces that are inconsistent with the imported offline package, an error may occur during the fusion process due to the non-existence of the registry spaces:

                                          Solution: Simply create the registry space before the fusion. For example, in the above error, creating the registry space \"localhost\" in advance can prevent the error.

                                          "},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#architecture-conflict","title":"Architecture Conflict","text":"

                                          When upgrading to a version lower than 0.12.0 of the addon, the charts-syncer in the target offline package does not check the existence of the image before pushing, so it will recombine the multi-arch into a single architecture during the upgrade process. For example, if the addon is implemented as a multi-arch in v0.10, upgrading to v0.11 will overwrite the multi-arch addon with a single architecture. However, upgrading to v0.12.0 or above can still maintain the multi-arch.

                                          "},{"location":"en/admin/kpanda/helm/upload-helm.html","title":"Upload Helm Charts","text":"

                                          This article explains how to upload Helm charts. See the steps below.

                                          1. Add a Helm repository, refer to Adding a Third-Party Helm Repository for the procedure.

                                          2. Upload the Helm Chart to the Helm repository.

                                            Upload with ClientUpload with Web Page

                                            Note

                                            This method is suitable for Harbor, ChartMuseum, JFrog type repositories.

                                            1. Log in to a node that can access the Helm repository, upload the Helm binary to the node, and install the cm-push plugin (VPN is needed and Git should be installed in advance).

                                              Refer to the plugin installation process.

                                            2. Push the Helm Chart to the Helm repository by executing the following command:

                                              helm cm-push ${charts-dir} ${HELM_REPO_URL} --username ${username} --password ${password}\n

                                              Argument descriptions:

                                              • charts-dir: The directory of the Helm Chart, or the packaged Chart (i.e., .tgz file).
                                              • HELM_REPO_URL: The URL of the Helm repository.
                                              • username/password: The username and password for the Helm repository with push permissions.
                                              • If you want to access via HTTPS and skip the certificate verification, you can add the argument --insecure.

                                            Note

                                            This method is only applicable to Harbor repositories.

                                            1. Log into the Harbor repository, ensuring the logged-in user has permissions to push;

                                            2. Go to the relevant project, select the Helm Charts tab, click the Upload button on the page to upload the Helm Chart.

                                          3. Sync Remote Repository Data

                                            Manual SyncAuto Sync

                                            By default, the cluster does not enable Helm Repository Auto-Refresh, so you need to perform a manual sync operation. The general steps are:

                                            Go to Helm Apps -> Helm Repositories, click the \u2507 button on the right side of the repository list, and select Sync Repository to complete the repository data synchronization.

                                            If you need to enable the Helm repository auto-sync feature, you can go to Cluster Maintenance -> Cluster Settings -> Advanced Settings and turn on the Helm repository auto-refresh switch.

                                          "},{"location":"en/admin/kpanda/inspect/index.html","title":"Cluster Inspection","text":"

                                          Cluster inspection allows administrators to regularly or ad-hoc check the overall health of the cluster, giving them proactive control over ensuring cluster security. With a well-planned inspection schedule, this proactive cluster check allows administrators to monitor the cluster status at any time and address potential issues in advance. It eliminates the previous dilemma of passive troubleshooting during failures, enabling proactive monitoring and prevention.

                                          The cluster inspection feature provided by AI platform's container management module supports custom inspection items at the cluster, node, and pod levels. After the inspection is completed, it automatically generates visual inspection reports.

                                          • Cluster Level: Checks the running status of system components in the cluster, including cluster status, resource usage, and specific inspection items for control nodes, such as the status of kube-apiserver and etcd .
                                          • Node Level: Includes common inspection items for both control nodes and worker nodes, such as node resource usage, handle counts, PID status, and network status.
                                          • pod Level: Checks the CPU and memory usage, running status of pods, and the status of PV (Persistent Volume) and PVC (PersistentVolumeClaim).

                                          For information on security inspections or executing security-related inspections, refer to the supported security scan types in AI platform.

                                          "},{"location":"en/admin/kpanda/inspect/config.html","title":"Creating Inspection Configuration","text":"

                                          AI platform Container Management module provides cluster inspection functionality, which supports inspection at the cluster, node, and pod levels.

                                          • Cluster level: Check the running status of system components in the cluster, including cluster status, resource usage, and specific inspection items for control nodes such as kube-apiserver and etcd .
                                          • Node level: Includes common inspection items for both control nodes and worker nodes, such as node resource usage, handle count, PID status, and network status.
                                          • Pod level: Check the CPU and memory usage, running status, PV and PVC status of Pods.

                                          Here's how to create an inspection configuration.

                                          1. Click Cluster Inspection in the left navigation bar.

                                          2. On the right side of the page, click Inspection Configuration .

                                          3. Fill in the inspection configuration based on the following instructions, then click OK at the bottom of the page.

                                            • Cluster: Select the clusters that you want to inspect from the dropdown list. If you select multiple clusters, multiple inspection configurations will be automatically generated (only the inspected clusters are inconsistent, all other configurations are identical).
                                            • Scheduled Inspection: When enabled, it allows for regular automatic execution of cluster inspections based on a pre-set inspection frequency.
                                            • Inspection Frequency: Set the interval for automatic inspections, e.g., every Tuesday at 10 AM. It supports custom CronExpressios, refer to Cron Schedule Syntax for more information.
                                            • Number of Inspection Records to Retain: Specifies the maximum number of inspection records to be retained, including all inspection records for each cluster.
                                            • Parameter Configuration: The parameter configuration is divided into three parts: cluster level, node level, and pod level. You can enable or disable specific inspection items based on your requirements.

                                          After creating the inspection configuration, it will be automatically displayed in the inspection configuration list. Click the more options button on the right of the configuration to immediately perform an inspection, modify the inspection configuration or delete the inspection configuration and reports.

                                          • Click Inspection to perform an inspection once based on the configuration.
                                          • Click Inspection Configuration to modify the inspection configuration.
                                          • Click Delete to delete the inspection configuration and reports.

                                          Note

                                          • After creating the inspection configuration, if the Scheduled Inspection configuration is enabled, inspections will be automatically executed at the specified time.
                                          • If Scheduled Inspection configuration is not enabled, you need to manually trigger the inspection.
                                          "},{"location":"en/admin/kpanda/inspect/inspect.html","title":"Start Cluster Inspection","text":"

                                          After creating an inspection configuration, if the Scheduled Inspection configuration is enabled, inspections will be automatically executed at the specified time. If the Scheduled Inspection configuration is not enabled, you need to manually trigger the inspection.

                                          This page explains how to manually perform a cluster inspection.

                                          "},{"location":"en/admin/kpanda/inspect/inspect.html#prerequisites","title":"Prerequisites","text":"
                                          • Integrate or create a cluster in the Container Management module.
                                          • Create an inspection configuration.
                                          • The selected cluster is in the Running state and the insight component has been installed in the cluster.
                                          "},{"location":"en/admin/kpanda/inspect/inspect.html#steps","title":"Steps","text":"

                                          When performing an inspection, you can choose to inspect multiple clusters in batches or perform a separate inspection for a specific cluster.

                                          Batch InspectionIndividual Inspection
                                          1. Click Cluster Inspection in the top-level navigation bar of the Container Management module, then click Inspection on the right side of the page.

                                          2. Select the clusters you want to inspect, then click OK at the bottom of the page.

                                            • If you choose to inspect multiple clusters at the same time, the system will perform inspections based on different inspection configurations for each cluster.
                                            • If no inspection configuration is set for a cluster, the system will use the default configuration.

                                          1. Go to the Cluster Inspection page.
                                          2. Click the more options button ( \u2507 ) on the right of the proper inspection configuration, then select Inspection from the popup menu.

                                          "},{"location":"en/admin/kpanda/inspect/report.html","title":"Check Inspection Reports","text":"

                                          After the inspection execution is completed, you can view the inspection records and detailed inspection reports.

                                          "},{"location":"en/admin/kpanda/inspect/report.html#prerequisites","title":"Prerequisites","text":"
                                          • Create an inspection configuration.
                                          • Perform at least one inspection execution.
                                          "},{"location":"en/admin/kpanda/inspect/report.html#steps","title":"Steps","text":"
                                          1. Go to the Cluster Inspection page and click the name of the target inspection cluster.

                                          2. Click the name of the inspection record you want to view.

                                            • Each inspection execution generates an inspection record.
                                            • When the number of inspection records exceeds the maximum retention specified in the inspection configuration, the earliest record will be deleted starting from the execution time.

                                          3. View the detailed information of the inspection, which may include an overview of cluster resources and the running status of system components.

                                            You can download the inspection report or delete the inspection report from the top right corner of the page.

                                          "},{"location":"en/admin/kpanda/namespaces/createns.html","title":"Namespaces","text":"

                                          Namespaces are an abstraction used in Kubernetes for resource isolation. A cluster can contain multiple namespaces with different names, and the resources in each namespace are isolated from each other. For a detailed introduction to namespaces, refer to Namespaces.

                                          This page will introduce the related operations of the namespace.

                                          "},{"location":"en/admin/kpanda/namespaces/createns.html#create-a-namespace","title":"Create a namespace","text":"

                                          Supports easy creation of namespaces through forms, and quick creation of namespaces by writing or importing YAML files.

                                          Note

                                          • Before creating a namespace, you need to Integrate a Kubernetes cluster or Create a Kubernetes cluster in the container management module.
                                          • The default namespace default is usually automatically generated after cluster initialization. But for production clusters, for ease of management, it is recommended to create other namespaces instead of using the default namespace directly.
                                          "},{"location":"en/admin/kpanda/namespaces/createns.html#create-with-form","title":"Create with form","text":"
                                          1. On the cluster list page, click the name of the target cluster.

                                          2. Click Namespace in the left navigation bar, then click the Create button on the right side of the page.

                                          3. Fill in the name of the namespace, configure the workspace and labels (optional), and then click OK.

                                            Info

                                            • After binding a namespace to a workspace, the resources of that namespace will be shared with the bound workspace. For a detailed explanation of workspaces, refer to Workspaces and Hierarchies.

                                            • After the namespace is created, you can still bind/unbind the workspace.

                                          4. Click OK to complete the creation of the namespace. On the right side of the namespace list, click \u2507 to select update, bind/unbind workspace, quota management, delete, and more from the pop-up menu.

                                          "},{"location":"en/admin/kpanda/namespaces/createns.html#create-from-yaml","title":"Create from YAML","text":"
                                          1. On the Clusters page, click the name of the target cluster.

                                          2. Click Namespace in the left navigation bar, then click the YAML Create button on the right side of the page.

                                          3. Enter or paste the prepared YAML content, or directly import an existing YAML file locally.

                                            After entering the YAML content, click Download to save the YAML file locally.

                                          4. Finally, click OK in the lower right corner of the pop-up box.

                                          "},{"location":"en/admin/kpanda/namespaces/exclusive.html","title":"Namespace Exclusive Nodes","text":"

                                          Namespace exclusive nodes in a Kubernetes cluster allow a specific namespace to have exclusive access to one or more node's CPU, memory, and other resources through taints and tolerations. Once exclusive nodes are configured for a specific namespace, applications and services from other namespaces cannot run on the exclusive nodes. Using exclusive nodes allows important applications to have exclusive access to some computing resources, achieving physical isolation from other applications.

                                          Note

                                          Applications and services running on a node before it is set to be an exclusive node will not be affected and will continue to run normally on that node. Only when these Pods are deleted or rebuilt will they be scheduled to other non-exclusive nodes.

                                          "},{"location":"en/admin/kpanda/namespaces/exclusive.html#preparation","title":"Preparation","text":"

                                          Check whether the kube-apiserver of the current cluster has enabled the PodNodeSelector and PodTolerationRestriction admission controllers.

                                          The use of namespace exclusive nodes requires users to enable the PodNodeSelector and PodTolerationRestriction admission controllers on the kube-apiserver. For more information about admission controllers, refer to Kubernetes Admission Controllers Reference.

                                          You can go to any Master node in the current cluster to check whether these two features are enabled in the kube-apiserver.yaml file, or you can execute the following command on the Master node for a quick check:

                                          [root@g-master1 ~]# cat /etc/kubernetes/manifests/kube-apiserver.yaml | grep  enable-admission-plugins\n\n# The expected output is as follows:\n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction\n
                                          "},{"location":"en/admin/kpanda/namespaces/exclusive.html#enable-namespace-exclusive-nodes-on-global-cluster","title":"Enable Namespace Exclusive Nodes on Global Cluster","text":"

                                          Since the Global cluster runs platform basic components such as kpanda, ghippo, and insight, enabling namespace exclusive nodes on Global may cause system components to not be scheduled to the exclusive nodes when they restart, affecting the overall high availability of the system. Therefore, we generally do not recommend users to enable the namespace exclusive node feature on the Global cluster.

                                          If you do need to enable namespace exclusive nodes on the Global cluster, please follow the steps below:

                                          1. Enable the PodNodeSelector and PodTolerationRestriction admission controllers for the kube-apiserver of the Global cluster

                                            Note

                                            If the cluster has already enabled the above two admission controllers, please skip this step and go directly to configure system component tolerations.

                                            Go to any Master node in the current cluster to modify the kube-apiserver.yaml configuration file, or execute the following command on the Master node for configuration:

                                            [root@g-master1 ~]# vi /etc/kubernetes/manifests/kube-apiserver.yaml\n\n# The expected output is as follows:\napiVersion: v1\nkind: Pod\nmetadata:\n    ......\nspec:\ncontainers:\n- command:\n    - kube-apiserver\n    ......\n    - --default-not-ready-toleration-seconds=300\n    - --default-unreachable-toleration-seconds=300\n    - --enable-admission-plugins=NodeRestriction   #List of enabled admission controllers\n    - --enable-aggregator-routing=False\n    - --enable-bootstrap-token-auth=true\n    - --endpoint-reconciler-type=lease\n    - --etcd-cafile=/etc/kubernetes/ssl/etcd/ca.crt\n    ......\n

                                            Find the --enable-admission-plugins parameter and add the PodNodeSelector and PodTolerationRestriction admission controllers (separated by commas). Refer to the following:

                                            # Add __ ,PodNodeSelector,PodTolerationRestriction__ \n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction \n
                                          2. Add toleration annotations to the namespace where the platform components are located

                                            After enabling the admission controllers, you need to add toleration annotations to the namespace where the platform components are located to ensure the high availability of the platform components.

                                            The system component namespaces for AI platform are as follows:

                                            Namespace System Components Included kpanda-system kpanda hwameiStor-system hwameiStor istio-system istio metallb-system metallb cert-manager-system cert-manager contour-system contour kubean-system kubean ghippo-system ghippo kcoral-system kcoral kcollie-system kcollie insight-system insight, insight-agent: ipavo-system ipavo kairship-system kairship karmada-system karmada amamba-system amamba, jenkins skoala-system skoala mspider-system mspider mcamel-system mcamel-rabbitmq, mcamel-elasticsearch, mcamel-mysql, mcamel-redis, mcamel-kafka, mcamel-minio, mcamel-postgresql spidernet-system spidernet kangaroo-system kangaroo gmagpie-system gmagpie dowl-system dowl

                                            Check whether there are the above namespaces in the current cluster, execute the following command, and add the annotation: scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]' for each namespace.

                                            kubectl annotate ns <namespace-name> scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \n\"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\n
                                            Please make sure to replace <namespace-name> with the name of the platform namespace you want to add the annotation to.

                                          3. Use the interface to set exclusive nodes for the namespace

                                            After confirming that the PodNodeSelector and PodTolerationRestriction admission controllers on the cluster API server have been enabled, please follow the steps below to use the AI platform UI management interface to set exclusive nodes for the namespace.

                                            1. Click the cluster name in the cluster list page, then click Namespace in the left navigation bar.

                                            2. Click the namespace name, then click the Exclusive Node tab, and click Add Node on the bottom right.

                                            3. Select which nodes you want to be exclusive to this namespace on the left side of the page. On the right side, you can clear or delete a selected node. Finally, click OK at the bottom.

                                            4. You can view the current exclusive nodes for this namespace in the list. You can choose to Stop Exclusivity on the right side of the node.

                                              After cancelling exclusivity, Pods from other namespaces can also be scheduled to this node.

                                          "},{"location":"en/admin/kpanda/namespaces/exclusive.html#enable-namespace-exclusive-nodes-on-non-global-clusters","title":"Enable Namespace Exclusive Nodes on Non-Global Clusters","text":"

                                          To enable namespace exclusive nodes on non-Global clusters, please follow the steps below:

                                          1. Enable the PodNodeSelector and PodTolerationRestriction admission controllers for the kube-apiserver of the current cluster

                                            Note

                                            If the cluster has already enabled the above two admission controllers, please skip this step and go directly to using the interface to set exclusive nodes for the namespace.

                                            Go to any Master node in the current cluster to modify the kube-apiserver.yaml configuration file, or execute the following command on the Master node for configuration:

                                            [root@g-master1 ~]# vi /etc/kubernetes/manifests/kube-apiserver.yaml\n\n# The expected output is as follows:\napiVersion: v1\nkind: Pod\nmetadata:\n    ......\nspec:\ncontainers:\n- command:\n    - kube-apiserver\n    ......\n    - --default-not-ready-toleration-seconds=300\n    - --default-unreachable-toleration-seconds=300\n    - --enable-admission-plugins=NodeRestriction   #List of enabled admission controllers\n    - --enable-aggregator-routing=False\n    - --enable-bootstrap-token-auth=true\n    - --endpoint-reconciler-type=lease\n    - --etcd-cafile=/etc/kubernetes/ssl/etcd/ca.crt\n    ......\n

                                            Find the --enable-admission-plugins parameter and add the PodNodeSelector and PodTolerationRestriction admission controllers (separated by commas). Refer to the following:

                                            # Add __ ,PodNodeSelector,PodTolerationRestriction__ \n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction \n
                                          2. Use the interface to set exclusive nodes for the namespace

                                            After confirming that the PodNodeSelector and PodTolerationRestriction admission controllers on the cluster API server have been enabled, please follow the steps below to use the AI platform UI management interface to set exclusive nodes for the namespace.

                                            1. Click the cluster name in the cluster list page, then click Namespace in the left navigation bar.

                                            2. Click the namespace name, then click the Exclusive Node tab, and click Add Node on the bottom right.

                                            3. Select which nodes you want to be exclusive to this namespace on the left side of the page. On the right side, you can clear or delete a selected node. Finally, click OK at the bottom.

                                            4. You can view the current exclusive nodes for this namespace in the list. You can choose to Stop Exclusivity on the right side of the node.

                                              After cancelling exclusivity, Pods from other namespaces can also be scheduled to this node.

                                          3. Add toleration annotations to the namespace where the components that need high availability are located (optional)

                                            Execute the following command to add the annotation: scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]' to the namespace where the components that need high availability are located.

                                            kubectl annotate ns <namespace-name> scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \n\"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\n

                                            Please make sure to replace <namespace-name> with the name of the platform namespace you want to add the annotation to.

                                          "},{"location":"en/admin/kpanda/namespaces/podsecurity.html","title":"Pod Security Policy","text":"

                                          Pod security policies in a Kubernetes cluster allow you to control the behavior of Pods in various aspects of security by configuring different levels and modes for specific namespaces. Only Pods that meet certain conditions will be accepted by the system. It sets three levels and three modes, allowing users to choose the most suitable scheme to set restriction policies according to their needs.

                                          Note

                                          Only one security policy can be configured for one security mode. Please be careful when configuring the enforce security mode for a namespace, as violations will prevent Pods from being created.

                                          This section will introduce how to configure Pod security policies for namespaces through the container management interface.

                                          "},{"location":"en/admin/kpanda/namespaces/podsecurity.html#prerequisites","title":"Prerequisites","text":"
                                          • The container management module has integrated a Kubernetes cluster or created a Kubernetes cluster. The cluster version needs to be v1.22 or above, and you should be able to access the cluster's UI interface.

                                          • A namespace has been created, a user has been created, and the user has been granted NS Admin or higher permissions. For details, refer to Namespace Authorization.

                                          "},{"location":"en/admin/kpanda/namespaces/podsecurity.html#configure-pod-security-policies-for-namespace","title":"Configure Pod Security Policies for Namespace","text":"
                                          1. Select the namespace for which you want to configure Pod security policies and go to the details page. Click Configure Policy on the Pod Security Policy page to go to the configuration page.

                                          2. Click Add Policy on the configuration page, and a policy will appear, including security level and security mode. The following is a detailed introduction to the security level and security policy.

                                            Security Level Description Privileged An unrestricted policy that provides the maximum possible range of permissions. This policy allows known privilege elevations. Baseline The least restrictive policy that prohibits known privilege elevations. Allows the use of default (minimum specified) Pod configurations. Restricted A highly restrictive policy that follows current best practices for protecting Pods. Security Mode Description Audit Violations of the specified policy will add new audit events in the audit log, and the Pod can be created. Warn Violations of the specified policy will return user-visible warning information, and the Pod can be created. Enforce Violations of the specified policy will prevent the Pod from being created.

                                          3. Different security levels correspond to different check items. If you don't know how to configure your namespace, you can Policy ConfigMap Explanation at the top right corner of the page to view detailed information.

                                          4. Click Confirm. If the creation is successful, the security policy you configured will appear on the page.

                                          5. Click \u2507 to edit or delete the security policy you configured.

                                          "},{"location":"en/admin/kpanda/network/create-ingress.html","title":"Create an Ingress","text":"

                                          In a Kubernetes cluster, Ingress exposes services from outside the cluster to inside the cluster HTTP and HTTPS ingress. Traffic ingress is controlled by rules defined on the Ingress resource. Here's an example of a simple Ingress that sends all traffic to the same Service:

                                          Ingress is an API object that manages external access to services in the cluster, and the typical access method is HTTP. Ingress can provide load balancing, SSL termination, and name-based virtual hosting.

                                          "},{"location":"en/admin/kpanda/network/create-ingress.html#prerequisites","title":"Prerequisites","text":"
                                          • Container management module connected to Kubernetes cluster or created Kubernetes, and can access the cluster UI interface.
                                          • Completed a namespace creation, user creation, and authorize the user as NS Editor role, for details, refer to Namespace Authorization.
                                          • Completed Create Ingress Instance, Deploy Application Workload, and have created the proper Service
                                          • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.
                                          "},{"location":"en/admin/kpanda/network/create-ingress.html#create-ingress","title":"Create ingress","text":"
                                          1. After successfully logging in as the NS Editor user, click Clusters in the upper left corner to enter the Clusters page. In the list of clusters, click a cluster name.

                                          2. In the left navigation bar, click Container Network -> Ingress to enter the service list, and click the Create Ingress button in the upper right corner.

                                            Note

                                            It is also possible to Create from YAML .

                                          3. Open Create Ingress page to configure. There are two protocol types to choose from, refer to the following two parameter tables for configuration.

                                          "},{"location":"en/admin/kpanda/network/create-ingress.html#create-http-protocol-ingress","title":"Create HTTP protocol ingress","text":"Parameter Description Example value Ingress name [Type] Required[Meaning] Enter the name of the new ingress. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter, lowercase English letters or numbers. Ing-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default Protocol [Type] Required [Meaning] Refers to the protocol that authorizes inbound access to the cluster service, and supports HTTP (no identity authentication required) or HTTPS (identity authentication needs to be configured) protocol. Here select the ingress of HTTP protocol. HTTP Domain Name [Type] Required [Meaning] Use the domain name to provide external access services. The default is the domain name of the cluster testing.daocloud.io LB Type [Type] Required [Meaning] The usage range of the Ingress instance. Scope of use of Ingress Platform-level load balancer : In the same cluster, share the same Ingress instance, where all Pods can receive requests distributed by the load balancer. Tenant-level load balancer : Tenant load balancer, the Ingress instance belongs exclusively to the current namespace, or belongs to a certain workspace, and the set workspace includes the current namespace, and all Pods can receive it Requests distributed by this load balancer. Platform Level Load Balancer Ingress Class [Type] Optional[Meaning] Select the proper Ingress instance, and import traffic to the specified Ingress instance after selection. When it is None, the default DefaultClass is used. Please set the DefaultClass when creating an Ingress instance. For more information, refer to Ingress Class< br /> Ngnix Session persistence [Type] Optional[Meaning] Session persistence is divided into three types: L4 source address hash , Cookie Key , L7 Header Name . Keep L4 Source Address Hash : : When enabled, the following tag is added to the Annotation by default: nginx.ingress.kubernetes.io/upstream-hash-by: \"\\(binary_remote_addr\"<br /> __Cookie Key__ : When enabled, the connection from a specific client will be passed to the same Pod. After enabled, the following parameters are added to the Annotation by default:<br /> nginx.ingress.kubernetes.io/affinity: \"cookie\"<br /> nginx.ingress.kubernetes .io/affinity-mode: persistent<br /> __L7 Header Name__ : After enabled, the following tag is added to the Annotation by default: nginx.ingress.kubernetes.io/upstream-hash-by: \"\\)http_x_forwarded_for\" Close Path Rewriting [Type] Optional [Meaning] rewrite-target , in some cases, the URL exposed by the backend service is different from the path specified in the Ingress rule. If no URL rewriting configuration is performed, There will be an error when accessing. close Redirect [Type] Optional[Meaning] permanent-redirect , permanent redirection, after entering the rewriting path, the access path will be redirected to the set address. close Traffic Distribution [Type] Optional[Meaning] After enabled and set, traffic distribution will be performed according to the set conditions. Based on weight : After setting the weight, add the following Annotation to the created Ingress: nginx.ingress.kubernetes.io/canary-weight: \"10\" Based on Cookie : set After the cookie rules, the traffic will be distributed according to the set cookie conditions Based on Header : After setting the header rules, the traffic will be distributed according to the set header conditions Close Labels [Type] Optional [Meaning] Add a label for the ingress - Annotations [Type] Optional [Meaning] Add annotation for ingress -"},{"location":"en/admin/kpanda/network/create-ingress.html#create-https-protocol-ingress","title":"Create HTTPS protocol ingress","text":"Parameter Description Example value Ingress name [Type] Required[Meaning] Enter the name of the new ingress. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter, lowercase English letters or numbers. Ing-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default Protocol [Type] Required [Meaning] Refers to the protocol that authorizes inbound access to the cluster service, and supports HTTP (no identity authentication required) or HTTPS (identity authentication needs to be configured) protocol. Here select the ingress of HTTPS protocol. HTTPS Domain Name [Type] Required [Meaning] Use the domain name to provide external access services. The default is the domain name of the cluster testing.daocloud.io Secret [Type] Required [Meaning] Https TLS certificate, Create Secret. Forwarding policy [Type] Optional[Meaning] Specify the access policy of Ingress. Path: Specifies the URL path for service access, the default is the root path/directoryTarget service: Service name for ingressTarget service port: Port exposed by the service LB Type [Type] Required [Meaning] The usage range of the Ingress instance. Platform-level load balancer : In the same cluster, the same Ingress instance is shared, and all Pods can receive requests distributed by the load balancer. Tenant-level load balancer : Tenant load balancer, the Ingress instance belongs exclusively to the current namespace or to a certain workspace. This workspace contains the current namespace, and all Pods can receive the workload from this Balanced distribution of requests. Platform Level Load Balancer Ingress Class [Type] Optional[Meaning] Select the proper Ingress instance, and import traffic to the specified Ingress instance after selection. When it is None, the default DefaultClass is used. Please set the DefaultClass when creating an Ingress instance. For more information, refer to Ingress Class< br /> None Session persistence [Type] Optional[Meaning] Session persistence is divided into three types: L4 source address hash , Cookie Key , L7 Header Name . Keep L4 Source Address Hash : : When enabled, the following tag is added to the Annotation by default: nginx.ingress.kubernetes.io/upstream-hash-by: \"\\(binary_remote_addr\"<br /> __Cookie Key__ : When enabled, the connection from a specific client will be passed to the same Pod. After enabled, the following parameters are added to the Annotation by default:<br /> nginx.ingress.kubernetes.io/affinity: \"cookie\"<br /> nginx.ingress.kubernetes .io/affinity-mode: persistent<br /> __L7 Header Name__ : After enabled, the following tag is added to the Annotation by default: nginx.ingress.kubernetes.io/upstream-hash-by: \"\\)http_x_forwarded_for\" Close Labels [Type] Optional [Meaning] Add a label for the ingress Annotations [Type] Optional[Meaning] Add annotation for ingress"},{"location":"en/admin/kpanda/network/create-ingress.html#create-ingress-successfully","title":"Create ingress successfully","text":"

                                          After configuring all the parameters, click the OK button to return to the ingress list automatically. On the right side of the list, click \u2507 to modify or delete the selected ingress.

                                          "},{"location":"en/admin/kpanda/network/create-services.html","title":"Create a Service","text":"

                                          In a Kubernetes cluster, each Pod has an internal independent IP address, but Pods in the workload may be created and deleted at any time, and directly using the Pod IP address cannot provide external services.

                                          This requires creating a service through which you get a fixed IP address, decoupling the front-end and back-end of the workload, and allowing external users to access the service. At the same time, the service also provides the Load Balancer feature, enabling users to access workloads from the public network.

                                          "},{"location":"en/admin/kpanda/network/create-services.html#prerequisites","title":"Prerequisites","text":"
                                          • Container management module connected to Kubernetes cluster or created Kubernetes, and can access the cluster UI interface.

                                          • Completed a namespace creation, user creation, and authorize the user as NS Editor role, for details, refer to Namespace Authorization.

                                          • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                          "},{"location":"en/admin/kpanda/network/create-services.html#create-service","title":"Create service","text":"
                                          1. After successfully logging in as the NS Editor user, click Clusters in the upper left corner to enter the Clusters page. In the list of clusters, click a cluster name.

                                          2. In the left navigation bar, click Container Network -> Service to enter the service list, and click the Create Service button in the upper right corner.

                                            !!! tip

                                             It is also possible to create a service via __YAML__ .\n
                                          3. Open the Create Service page, select an access type, and refer to the following three parameter tables for configuration.

                                          "},{"location":"en/admin/kpanda/network/create-services.html#create-clusterip-service","title":"Create ClusterIP service","text":"

                                          Click Intra-Cluster Access (ClusterIP) , which refers to exposing services through the internal IP of the cluster. The services selected for this option can only be accessed within the cluster. This is the default service type. Refer to the configuration parameters in the table below.

                                          Parameter Description Example value Access type [Type] Required[Meaning] Specify the method of Pod service discovery, here select intra-cluster access (ClusterIP). ClusterIP Service Name [Type] Required[Meaning] Enter the name of the new service. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. Svc-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default Label selector [Type] Required[Meaning] Add a label, the Service selects a Pod according to the label, and click \"Add\" after filling. You can also refer to the label of an existing workload. Click Reference workload label , select the workload in the pop-up window, and the system will use the selected workload label as the selector by default. app:job01 Port configuration [Type] Required[Meaning] To add a protocol port for a service, you need to select the port protocol type first. Currently, it supports TCP and UDP. Port Name: Enter the name of the custom port. Service port (port): The access port for Pod to provide external services. Container port (targetport): The container port that the workload actually monitors, used to expose services to the cluster. Session Persistence [Type] Optional [Meaning] When enabled, requests from the same client will be forwarded to the same Pod Enabled Maximum session hold time [Type] Optional [Meaning] After session hold is enabled, the maximum hold time is 30 seconds by default 30 seconds Annotation [Type] Optional[Meaning] Add annotation for service"},{"location":"en/admin/kpanda/network/create-services.html#create-nodeport-service","title":"Create NodePort service","text":"

                                          Click NodePort , which means exposing the service via IP and static port ( NodePort ) on each node. The NodePort service is routed to the automatically created ClusterIP service. You can access a NodePort service from outside the cluster by requesting : . Refer to the configuration parameters in the table below. Parameter Description Example value Access type [Type] Required[Meaning] Specify the method of Pod service discovery, here select node access (NodePort). NodePort Service Name [Type] Required[Meaning] Enter the name of the new service. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. Svc-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default Label selector [Type] Required[Meaning] Add a label, the Service selects a Pod according to the label, and click \"Add\" after filling. You can also refer to the label of an existing workload. Click Reference workload label , select the workload in the pop-up window, and the system will use the selected workload label as the selector by default. Port configuration [Type] Required[Meaning] To add a protocol port for a service, you need to select the port protocol type first. Currently, it supports TCP and UDP. Port Name: Enter the name of the custom port. Service port (port): The access port for Pod to provide external services. By default, the service port is set to the same value as the container port field for convenience. ***Container port (targetport)*: The container port actually monitored by the workload. Node port (nodeport): The port of the node, which receives traffic from ClusterIP transmission. It is used as the entrance for external traffic access. Session Persistence [Type] Optional [Meaning] When enabled, requests from the same client will be forwarded to the same PodAfter enabled, .spec.sessionAffinity of Service is ClientIP , refer to for details : Session Affinity for Service Enabled Maximum session hold time [Type] Optional [Meaning] After session hold is enabled, the maximum hold time, the default timeout is 30 seconds.spec.sessionAffinityConfig.clientIP.timeoutSeconds is set to 30 by default seconds 30 seconds Annotation [Type] Optional[Meaning] Add annotation for service"},{"location":"en/admin/kpanda/network/create-services.html#create-loadbalancer-service","title":"Create LoadBalancer service","text":"

                                          Click Load Balancer , which refers to using the cloud provider's load balancer to expose services to the outside. External load balancers can route traffic to automatically created NodePort services and ClusterIP services. Refer to the configuration parameters in the table below.

                                          Parameter Description Example value Access type [Type] Required[Meaning] Specify the method of Pod service discovery, here select node access (NodePort). NodePort Service Name [Type] Required[Meaning] Enter the name of the new service. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. Svc-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default External Traffic Policy [Type] Required[Meaning] Set external traffic policy. Cluster: Traffic can be forwarded to Pods on all nodes in the cluster. Local: Traffic is only sent to Pods on this node. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. Tag selector [Type] Required [Meaning] Add tag, Service Select the Pod according to the label, fill it out and click \"Add\". You can also refer to the label of an existing workload. Click Reference workload label , select the workload in the pop-up window, and the system will use the selected workload label as the selector by default. Load balancing type [Type] Required [Meaning] The type of load balancing used, currently supports MetalLB and others. MetalLB IP Pool [Type] Required[Meaning] When the selected load balancing type is MetalLB, LoadBalancer Service will allocate IP addresses from this pool by default, and declare all IP addresses in this pool through APR, For details, refer to: Install MetalLB Load balancing address [Type] Required[Meaning] 1. If you are using a public cloud CloudProvider, fill in the load balancing address provided by the cloud provider here;2. If the above load balancing type is selected as MetalLB, the IP will be obtained from the above IP pool by default, if not filled, it will be obtained automatically. Port configuration [Type] Required[Meaning] To add a protocol port for a service, you need to select the port protocol type first. Currently, it supports TCP and UDP. Port Name: Enter the name of the custom port. Service port (port): The access port for Pod to provide external services. By default, the service port is set to the same value as the container port field for convenience. Container port (targetport): The container port actually monitored by the workload. Node port (nodeport): The port of the node, which receives traffic from ClusterIP transmission. It is used as the entrance for external traffic access. Annotation [Type] Optional[Meaning] Add annotation for service"},{"location":"en/admin/kpanda/network/create-services.html#complete-service-creation","title":"Complete service creation","text":"

                                          After configuring all parameters, click the OK button to return to the service list automatically. On the right side of the list, click \u2507 to modify or delete the selected service.

                                          "},{"location":"en/admin/kpanda/network/network-policy.html","title":"Network Policies","text":"

                                          Network policies in Kubernetes allow you to control network traffic at the IP address or port level (OSI layer 3 or layer 4). The container management module currently supports creating network policies based on Pods or namespaces, using label selectors to specify which traffic can enter or leave Pods with specific labels.

                                          For more details on network policies, refer to the official Kubernetes documentation on Network Policies.

                                          "},{"location":"en/admin/kpanda/network/network-policy.html#creating-network-policies","title":"Creating Network Policies","text":"

                                          Currently, there are two methods available for creating network policies: YAML and form-based creation. Each method has its advantages and disadvantages, catering to different user needs.

                                          YAML creation requires fewer steps and is more efficient, but it has a higher learning curve as it requires familiarity with configuring network policy YAML files.

                                          Form-based creation is more intuitive and straightforward. Users can simply fill in the proper values based on the prompts. However, this method involves more steps.

                                          "},{"location":"en/admin/kpanda/network/network-policy.html#yaml-creation","title":"YAML Creation","text":"
                                          1. In the cluster list, click the name of the target cluster, then navigate to Container Network -> Network Policies -> Create with YAML in the left navigation bar.

                                          2. In the pop-up dialog, enter or paste the pre-prepared YAML file, then click OK at the bottom of the dialog.

                                          "},{"location":"en/admin/kpanda/network/network-policy.html#form-based-creation","title":"Form-Based Creation","text":"
                                          1. In the cluster list, click the name of the target cluster, then navigate to Container Network -> Network Policies -> Create Policy in the left navigation bar.

                                          2. Fill in the basic information.

                                            The name and namespace cannot be changed after creation.

                                          3. Fill in the policy configuration.

                                            The policy configuration includes ingress and egress policies. To establish a successful connection from a source Pod to a target Pod, both the egress policy of the source Pod and the ingress policy of the target Pod need to allow the connection. If either side does not allow the connection, the connection will fail.

                                            • Ingress Policy: Click \u2795 to begin configuring the policy. Multiple policies can be configured. The effects of multiple network policies are cumulative. Only when all network policies are satisfied simultaneously can a connection be successfully established.

                                            • Egress Policy

                                          "},{"location":"en/admin/kpanda/network/network-policy.html#viewing-network-policies","title":"Viewing Network Policies","text":"
                                          1. In the cluster list, click the name of the target cluster, then navigate to Container Network -> Network Policies . Click the name of the network policy.

                                          2. View the basic configuration, associated instances, ingress policies, and egress policies of the policy.

                                          Info

                                          Under the \"Associated Instances\" tab, you can view instance monitoring, logs, container lists, YAML files, events, and more.

                                          "},{"location":"en/admin/kpanda/network/network-policy.html#updating-network-policies","title":"Updating Network Policies","text":"

                                          There are two ways to update network policies. You can either update them through the form or by using a YAML file.

                                          • On the network policy list page, find the policy you want to update, and choose Update in the action column on the right to update it via the form. Choose Edit YAML to update it using a YAML file.

                                          • Click the name of the network policy, then choose Update in the top right corner of the policy details page to update it via the form. Choose Edit YAML to update it using a YAML file.

                                          "},{"location":"en/admin/kpanda/network/network-policy.html#deleting-network-policies","title":"Deleting Network Policies","text":"

                                          There are two ways to delete network policies. You can delete network policies either through the form or by using a YAML file.

                                          • On the network policy list page, find the policy you want to delete, and choose Delete in the action column on the right to delete it via the form. Choose Edit YAML to delete it using a YAML file.

                                          • Click the name of the network policy, then choose Delete in the top right corner of the policy details page to delete it via the form. Choose Edit YAML to delete it using a YAML file.

                                          "},{"location":"en/admin/kpanda/nodes/add-node.html","title":"Cluster Node Expansion","text":"

                                          As the number of business applications continues to grow, the resources of the cluster become increasingly tight. At this point, you can expand the cluster nodes based on kubean. After the expansion, applications can run on the newly added nodes, alleviating resource pressure.

                                          Only clusters created through the container management module support node autoscaling. Clusters accessed from the outside do not support this operation. This article mainly introduces the expansion of worker nodes in the same architecture work cluster. If you need to add control nodes or heterogeneous work nodes to the cluster, refer to: Expanding the control node of the work cluster, Adding heterogeneous nodes to the work cluster, Expanding the worker node of the global service cluster.

                                          1. On the Clusters page, click the name of the target cluster.

                                            If the Cluster Type contains the label Integrated Cluster, it means that the cluster does not support node autoscaling.

                                          2. Click Nodes in the left navigation bar, and then click Integrate Node in the upper right corner of the page.

                                          3. Enter the host name and node IP and click OK.

                                            Click \u2795 Add Worker Node to continue accessing more nodes.

                                          Note

                                          Accessing the node takes about 20 minutes, please be patient.

                                          "},{"location":"en/admin/kpanda/nodes/delete-node.html","title":"Node Scales Down","text":"

                                          When the peak business period is over, in order to save resource costs, you can reduce the size of the cluster and unload redundant nodes, that is, node scaling. After a node is uninstalled, applications cannot continue to run on the node.

                                          "},{"location":"en/admin/kpanda/nodes/delete-node.html#prerequisites","title":"Prerequisites","text":"
                                          • The current operating user has the Cluster Admin role authorization.
                                          • Only through the container management module created cluster can node autoscaling be supported, and the cluster accessed from the outside does not support this operation.
                                          • Before uninstalling a node, you need to pause scheduling the node, and expel the applications on the node to other nodes.
                                          • Eviction method: log in to the controller node, and use the kubectl drain command to evict all Pods on the node. The safe eviction method allows the containers in the pod to terminate gracefully.
                                          "},{"location":"en/admin/kpanda/nodes/delete-node.html#precautions","title":"Precautions","text":"
                                          1. When cluster nodes scales down, they can only be uninstalled one by one, not in batches.

                                          2. If you need to uninstall cluster controller nodes, you need to ensure that the final number of controller nodes is an odd number.

                                          3. The first controller node cannot be offline when the cluster node scales down. If it is necessary to perform this operation, please contact the after-sales engineer.

                                          "},{"location":"en/admin/kpanda/nodes/delete-node.html#steps","title":"Steps","text":"
                                          1. On the Clusters page, click the name of the target cluster.

                                            If the Cluster Type has the tag Integrate Cluster , it means that the cluster does not support node autoscaling.

                                          2. Click Nodes on the left navigation bar, find the node to be uninstalled, click \u2507 and select Remove .

                                          3. Enter the node name, and click Delete to confirm.

                                          "},{"location":"en/admin/kpanda/nodes/labels-annotations.html","title":"Labels and Annotations","text":"

                                          Labels are identifying key-value pairs added to Kubernetes objects such as Pods, nodes, and clusters, which can be combined with label selectors to find and filter Kubernetes objects that meet certain conditions. Each key must be unique for a given object.

                                          Annotations, like tags, are key/value pairs, but they do not have identification or filtering features. Annotations can be used to add arbitrary metadata to nodes. Annotation keys usually use the format prefix(optional)/name(required) , for example nfd.node.kubernetes.io/extended-resources . If the prefix is \u200b\u200bomitted, it means that the annotation key is private to the user.

                                          For more information about labels and annotations, refer to the official Kubernetes documentation labels and selectors Or Annotations.

                                          The steps to add/delete tags and annotations are as follows:

                                          1. On the Clusters page, click the name of the target cluster.

                                          2. Click Nodes on the left navigation bar, click the \u2507 operation icon on the right side of the node, and click Edit Labels or Edit Annotations .

                                          3. Click \u2795 Add to add tags or annotations, click X to delete tags or annotations, and finally click OK .

                                          "},{"location":"en/admin/kpanda/nodes/node-authentication.html","title":"Node Authentication","text":""},{"location":"en/admin/kpanda/nodes/node-authentication.html#authenticate-nodes-using-ssh-keys","title":"Authenticate Nodes Using SSH Keys","text":"

                                          If you choose to authenticate the nodes of the cluster-to-be-created using SSH keys, you need to configure the public and private keys according to the following instructions.

                                          1. Run the following command on any node within the management cluster of the cluster-to-be-created to generate the public and private keys.

                                            cd /root/.ssh\nssh-keygen -t rsa\n
                                          2. Run the ls command to check if the keys have been successfully created in the management cluster. The correct output should be as follows:

                                            ls\nid_rsa  id_rsa.pub  known_hosts\n

                                            The file named id_rsa is the private key, and the file named id_rsa.pub is the public key.

                                          3. Run the following command to load the public key file id_rsa.pub onto all the nodes of the cluster-to-be-created.

                                            ssh-copy-id -i /root/.ssh/id_rsa.pub root@10.0.0.0\n

                                            Replace the user account and node IP in the above command with the username and IP of the nodes in the cluster-to-be-created. The same operation needs to be performed on every node in the cluster-to-be-created.

                                          4. Run the following command to view the private key file id_rsa created in step 1.

                                            cat /root/.ssh/id_rsa\n

                                            The output should be as follows:

                                            -----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEA3UvyKINzY5BFuemQ+uJ6q+GqgfvnWwNC8HzZhpcMSjJy26MM\nUtBEBJxy8fMi57XcjYxPibXW/wnd+32ICCycqCwByUmuXeCC1cjlCQDqjcAvXae7\nY54IXGF7wm2IsMNwf0kjFEXjuS48FLDA0mGRaN3BG+Up5geXcHckg3K5LD8kXFFx\ndEmSIjdyw55NaUitmEdHzN7cIdfi6Z56jcV8dcFBgWKUx+ebiyPmZBkXToz6GnMF\nrswzzZCl+G6Jb2xTGy7g7ozb4BoZd1IpSD5EhDanRrESVE0C5YuJ5zUAC0CvVd1l\nv67AK8Ko6MXToHp01/bcsvlM6cqgwUFXZKVeOwIDAQABAoIBAQCO36GQlo3BEjxy\nM2HvGJmqrx+unDxafliRe4nVY2AD515Qf4xNSzke4QM1QoyenMOwf446krQkJPK0\nk+9nl6Xszby5gGCbK4BNFk8I6RaGPjZWeRx6zGUJf8avWJiPxx6yjz2esSC9RiR0\nF0nmiiefVMyAfgv2/5++dK2WUFNNRKLgSRRpP5bRaD5wMzzxtSSXrUon6217HO8p\n3RoWsI51MbVzhdVgpHUNABcoa0rpr9svT6XLKZxY8mxpKFYjM0Wv2JIDABg3kBvh\nQbJ7kStCO3naZjKMU9UuSqVJs06cflGYw7Or8/tABR3LErNQKPjkhAQqt0DXw7Iw\n3tKdTAJBAoGBAP687U7JAOqQkcphek2E/A/sbO/d37ix7Z3vNOy065STrA+ZWMZn\npZ6Ui1B/oJpoZssnfvIoz9sn559X0j67TljFALFd2ZGS0Fqh9KVCqDvfk+Vst1dq\n+3r/yZdTOyswoccxkJiC/GDwZGK0amJWqvob39JCZhDAKIGLbGMmjdAHAoGBAN5k\nm1WGnni1nZ+3dryIwgB6z1hWcnLTamzSET6KhSuo946ET0IRG9xtlheCx6dqICbr\nVk1Y4NtRZjK/p/YGx59rDWf7E3I8ZMgR7mjieOcUZ4lUlA4l7ZIlW/2WZHW+nUXO\nTi20fqJ8qSp4BUvOvuth1pz2GLUHe2/Fxjf7HIstAoGBAPHpPr9r+TfIlPsJeRj2\n6lzA3G8qWFRQfGRYjv0fjv0pA+RIb1rzgP/I90g5+63G6Z+R4WdcxI/OJJNY1iuG\nuw9n/pFxm7U4JC990BPE6nj5iLz+clpNGYckNDBF9VG9vFSrSDLdaYkxoVNvG/xJ\na9Na90H4lm7f3VewrPy310KvAoGAZr+mwNoEh5Kpc6xo8Gxi7aPP/mlaUVD6X7Ki\ngvmu02AqmC7rC4QqEiqTaONkaSXwGusqIWxJ3yp5hELmUBYLzszAEeV/s4zRp1oZ\ng133LBRSTbHFAdBmNdqK6Nu+KGRb92980UMOKvZbliKDl+W6cbfvVu+gtKrzTc3b\naevb4TUCgYEAnJAxyVYDP1nJf7bjBSHXQu1E/DMwbtrqw7dylRJ8cAzI7IxfSCez\n7BYWq41PqVd9/zrb3Pbh2phiVzKe783igAIMqummcjo/kZyCwFsYBzK77max1jF5\naPQsLbRS2aDz8kIH6jHPZ/R+15EROmdtLmA7vIJZGerWWQR0dUU+XXA=\n

                                          Copy the content of the private key and paste it into the interface's key input field.

                                          "},{"location":"en/admin/kpanda/nodes/node-check.html","title":"Create a cluster node availability check","text":"

                                          When creating a cluster or adding nodes to an existing cluster, refer to the table below to check the node configuration to avoid cluster creation or expansion failure due to wrong node configuration.

                                          Check Item Description OS Refer to Supported Architectures and Operating Systems SELinux Off Firewall Off Architecture Consistency Consistent CPU architecture between nodes (such as ARM or x86) Host Time All hosts are out of sync within 10 seconds. Network Connectivity The node and its SSH port can be accessed normally by the platform. CPU Available CPU resources are greater than 4 Cores Memory Available memory resources are greater than 8 GB"},{"location":"en/admin/kpanda/nodes/node-check.html#supported-architectures-and-operating-systems","title":"Supported architectures and operating systems","text":"Architecture Operating System Remarks ARM Kylin Linux Advanced Server release V10 (Sword) SP2 Recommended ARM UOS Linux ARM openEuler x86 CentOS 7.x Recommended x86 Redhat 7.x Recommended x86 Redhat 8.x Recommended x86 Flatcar Container Linux by Kinvolk x86 Debian Bullseye, Buster, Jessie, Stretch x86 Ubuntu 16.04, 18.04, 20.04, 22.04 x86 Fedora 35, 36 x86 Fedora CoreOS x86 openSUSE Leap 15.x/Tumbleweed x86 Oracle Linux 7, 8, 9 x86 Alma Linux 8, 9 x86 Rocky Linux 8, 9 x86 Amazon Linux 2 x86 Kylin Linux Advanced Server release V10 (Sword) - SP2 Haiguang x86 UOS Linux x86 openEuler"},{"location":"en/admin/kpanda/nodes/node-details.html","title":"Node Details","text":"

                                          After accessing or creating a cluster, you can view the information of each node in the cluster, including node status, labels, resource usage, Pod, monitoring information, etc.

                                          1. On the Clusters page, click the name of the target cluster.

                                          2. Click Nodes on the left navigation bar to view the node status, role, label, CPU/memory usage, IP address, and creation time.

                                          3. Click the node name to enter the node details page to view more information, including overview information, pod information, label annotation information, event list, status, etc.

                                            In addition, you can also view the node's YAML file, monitoring information, labels and annotations, etc.

                                          "},{"location":"en/admin/kpanda/nodes/schedule.html","title":"Node Scheduling","text":"

                                          Supports suspending or resuming scheduling of nodes. Pausing scheduling means stopping the scheduling of Pods to the node. Resuming scheduling means that Pods can be scheduled to that node.

                                          1. On the Clusters page, click the name of the target cluster.

                                          2. Click Nodes on the left navigation bar, click the \u2507 operation icon on the right side of the node, and click the Cordon button to suspend scheduling the node.

                                          3. Click the \u2507 operation icon on the right side of the node, and click the Uncordon button to resume scheduling the node.

                                          The node scheduling status may be delayed due to network conditions. Click the refresh icon on the right side of the search box to refresh the node scheduling status.

                                          "},{"location":"en/admin/kpanda/nodes/taints.html","title":"Node Taints","text":"

                                          Taint can make a node exclude a certain type of Pod and prevent Pod from being scheduled on the node. One or more taints can be applied to each node, and Pods that cannot tolerate these taints will not be scheduled on that node.

                                          "},{"location":"en/admin/kpanda/nodes/taints.html#precautions","title":"Precautions","text":"
                                          1. The current operating user should have NS Editor role authorization or other higher permissions.
                                          2. After adding a taint to a node, only Pods that can tolerate the taint can be scheduled to the node.
                                          "},{"location":"en/admin/kpanda/nodes/taints.html#steps","title":"Steps","text":"
                                          1. Find the target cluster on the Clusters page, and click the cluster name to enter the Cluster page.

                                          2. In the left navigation bar, click Nodes , find the node that needs to modify the taint, click the \u2507 operation icon on the right and click the Edit Taints button.

                                          3. Enter the key value information of the taint in the pop-up box, select the taint effect, and click OK .

                                            Click \u2795 Add to add multiple taints to the node, and click X on the right side of the taint effect to delete the taint.

                                            Currently supports three taint effects:

                                            • NoExecute: This affects pods that are already running on the node as follows:

                                              • Pods that do not tolerate the taint are evicted immediately
                                              • Pods that tolerate the taint without specifying tolerationSeconds in their toleration specification remain bound forever
                                              • Pods that tolerate the taint with a specified tolerationSeconds remain bound for the specified amount of time. After that time elapses, the node lifecycle controller evicts the Pods from the node.
                                            • NoSchedule: No new Pods will be scheduled on the tainted node unless they have a matching toleration. Pods currently running on the node are not evicted.

                                            • PreferNoSchedule: This is a \"preference\" or \"soft\" version of NoSchedule. The control plane will try to avoid placing a Pod that does not tolerate the taint on the node, but it is not guaranteed, so this taint is not recommended to use in a production environment.

                                          For more details about taints, refer to the Kubernetes documentation Taints and Tolerance.

                                          "},{"location":"en/admin/kpanda/olm/import-miniooperator.html","title":"Importing MinIo Operator Offline","text":"

                                          This guide explains how to import the MinIo Operator offline in an environment without internet access.

                                          "},{"location":"en/admin/kpanda/olm/import-miniooperator.html#prerequisites","title":"Prerequisites","text":"
                                          • The current cluster is connected to the container management and the Global cluster has installed the kolm component (search for helm templates for kolm).
                                          • The current cluster has the olm component installed with a version of 0.2.4 or higher (search for helm templates for olm).
                                          • Ability to execute Docker commands.
                                          • Prepare a container registry.
                                          "},{"location":"en/admin/kpanda/olm/import-miniooperator.html#steps","title":"Steps","text":"
                                          1. Set the environment variables in the execution environment and use them in the subsequent steps by running the following command:

                                            export OPM_IMG=10.5.14.200/quay.m.daocloud.io/operator-framework/opm:v1.29.0 \nexport BUNDLE_IMG=10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3 \n

                                            How to get the above image addresses:

                                            Go to Container Management -> Select the current cluster -> Helm Apps -> View the olm component -> Plugin Settings , and find the images needed for the opm, minio, minio bundle, and minio operator in the subsequent steps.

                                            Using the screenshot as an example, the four image addresses are as follows:\n\n# opm image\n10.5.14.200/quay.m.daocloud.io/operator-framework/opm:v1.29.0\n\n# minio image\n10.5.14.200/quay.m.daocloud.io/minio/minio:RELEASE.2023-03-24T21-41-23Z\n\n# minio bundle image\n10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3\n\n# minio operator image\n10.5.14.200/quay.m.daocloud.io/minio/operator:v5.0.3\n
                                          2. Run the opm command to get the operators included in the offline bundle image.

                                            # Create the operator directory\n$ mkdir minio-operator && cd minio-operator \n\n# Get the operator yaml\n$ docker run --user root -v $PWD/minio-operator:/minio-operator ${OPM_IMG} alpha bundle unpack --skip-tls-verify -v -d ${BUNDLE_IMG} -o ./minio-operator\n\n# Expected result\n.\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502   \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502   \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502   \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502   \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502   \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502   \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n3 directories, 9 files\n
                                          3. Replace all image addresses in the minio-operator/manifests/minio-operator.clusterserviceversion.yaml file with the image addresses from the offline container registry.

                                            Before replacement:

                                            After replacement:

                                          4. Generate a Dockerfile for building the bundle image.

                                            $ docker run --user root -v $PWD:/minio-operator -w /minio-operator ${OPM_IMG} alpha bundle generate --channels stable,beta -d /minio-operator/minio-operator/manifests -e stable -p minio-operator \u00a0\n\n# Expected result\n.\n\u251c\u2500\u2500 bundle.Dockerfile\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502   \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502   \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502   \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502   \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502   \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502   \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n3 directories, 10 files\n
                                          5. Build the bundle image and push it to the offline registry.

                                            # Set the new bundle image\nexport OFFLINE_BUNDLE_IMG=10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3-offline \n\n$ docker build . -f bundle.Dockerfile -t ${OFFLINE_BUNDLE_IMG} \u00a0\n\n$ docker push ${OFFLINE_BUNDLE_IMG}\n
                                          6. Generate a Dockerfile for building the catalog image.

                                            $ docker run --user root -v $PWD:/minio-operator -w /minio-operator ${OPM_IMG} index add  --bundles ${OFFLINE_BUNDLE_IMG} --generate --binary-image ${OPM_IMG} --skip-tls-verify\n\n# Expected result\n.\n\u251c\u2500\u2500 bundle.Dockerfile\n\u251c\u2500\u2500 database\n\u2502   \u2514\u2500\u2500 index.db\n\u251c\u2500\u2500 index.Dockerfile\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502   \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502   \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502   \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502   \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502   \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502   \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n4 directories, 12 files\n
                                          7. Build the catalog image.

                                            # Set the new catalog image  \nexport OFFLINE_CATALOG_IMG=10.5.14.200/release.daocloud.io/operator-framework/system-operator-index:v0.1.0-offline\n\n$ docker build . -f index.Dockerfile -t ${OFFLINE_CATALOG_IMG}  \n\n$ docker push ${OFFLINE_CATALOG_IMG}\n
                                          8. Go to Container Management and update the built-in catsrc image for the Helm App olm (enter the catalog image specified in the construction of the catalog image, ${catalog-image} ).

                                          9. After the update is successful, the minio-operator component will appear in the Operator Hub.

                                          "},{"location":"en/admin/kpanda/permissions/cluster-ns-auth.html","title":"Cluster and Namespace Authorization","text":"

                                          Container management implements authorization based on global authority management and global user/group management. If you need to grant users the highest authority for container management (can create, manage, and delete all clusters), refer to What are Access Control.

                                          "},{"location":"en/admin/kpanda/permissions/cluster-ns-auth.html#prerequisites","title":"Prerequisites","text":"

                                          Before authorizing users/groups, complete the following preparations:

                                          • The user/group to be authorized has been created in the global management, refer to user.

                                          • Only Kpanda Owner and Cluster Admin of the current cluster have Cluster authorization capability. For details, refer to Permission Description.

                                          • only Kpanda Owner , Cluster Admin for the current cluster, NS Admin of the current namespace has namespace authorization capability.

                                          "},{"location":"en/admin/kpanda/permissions/cluster-ns-auth.html#cluster-authorization","title":"Cluster Authorization","text":"
                                          1. After the user logs in to the platform, click Privilege Management under Container Management on the left menu bar, which is located on the Cluster Permissions tab by default.

                                          2. Click the Add Authorization button.

                                          3. On the Add Cluster Permission page, select the target cluster, the user/group to be authorized, and click OK .

                                            Currently, the only cluster role supported is Cluster Admin . For details about permissions, refer to Permission Description. If you need to authorize multiple users/groups at the same time, you can click Add User Permissions to add multiple times.

                                          4. Return to the cluster permission management page, and a message appears on the screen: Cluster permission added successfully .

                                          "},{"location":"en/admin/kpanda/permissions/cluster-ns-auth.html#namespace-authorization","title":"Namespace Authorization","text":"
                                          1. After the user logs in to the platform, click Permissions under Container Management on the left menu bar, and click the Namespace Permissions tab.

                                          2. Click the Add Authorization button. On the Add Namespace Permission page, select the target cluster, target namespace, and user/group to be authorized, and click OK .

                                            The currently supported namespace roles are NS Admin, NS Editor, and NS Viewer. For details about permissions, refer to Permission Description. If you need to authorize multiple users/groups at the same time, you can click Add User Permission to add multiple times. Click OK to complete the permission authorization.

                                          3. Return to the namespace permission management page, and a message appears on the screen: Cluster permission added successfully .

                                            Tip

                                            If you need to delete or edit permissions later, you can click \u2507 on the right side of the list and select Edit or Delete .

                                          "},{"location":"en/admin/kpanda/permissions/custom-kpanda-role.html","title":"Adding RBAC Rules to System Roles","text":"

                                          In the past, the RBAC rules for those system roles in container management were pre-defined and could not be modified by users. To support more flexible permission settings and to meet the customized needs for system roles, now you can modify RBAC rules for system roles such as cluster admin, ns admin, ns editor, ns viewer.

                                          The following example demonstrates how to add a new ns-view rule, granting the authority to delete workload deployments. Similar operations can be performed for other rules.

                                          "},{"location":"en/admin/kpanda/permissions/custom-kpanda-role.html#prerequisites","title":"Prerequisites","text":"

                                          Before adding RBAC rules to system roles, the following prerequisites must be met:

                                          • Container management v0.27.0 and above.
                                          • Integrated Kubernetes cluster or created Kubernetes cluster, and able to access the cluster's UI interface.
                                          • Completed creation of a namespace and user account, and the granting of NS Viewer. For details, refer to namespace authorization.

                                          Note

                                          • RBAC rules only need to be added in the Global Cluster, and the Kpanda controller will synchronize those added rules to all integrated subclusters. Synchronization may take some time to complete.
                                          • RBAC rules can only be added in the Global Cluster. RBAC rules added in subclusters will be overridden by the system role permissions of the Global Cluster.
                                          • Only ClusterRoles with fixed Label are supported for adding rules. Replacing or deleting rules is not supported, nor is adding rules by using role. The correspondence between built-in roles and ClusterRole Label created by users is as follows.

                                            cluster-admin: rbac.kpanda.io/role-template-cluster-admin: \"true\"\ncluster-edit: rbac.kpanda.io/role-template-cluster-edit: \"true\"\ncluster-view: rbac.kpanda.io/role-template-cluster-view: \"true\"\nns-admin: rbac.kpanda.io/role-template-ns-admin: \"true\"\nns-edit: rbac.kpanda.io/role-template-ns-edit: \"true\"\nns-view: rbac.kpanda.io/role-template-ns-view: \"true\"\n
                                          "},{"location":"en/admin/kpanda/permissions/custom-kpanda-role.html#steps","title":"Steps","text":"
                                          1. Create a deployment by a user with admin or cluster admin permissions.

                                          2. Grant a user the ns-viewer role to provide them with the ns-view permission.

                                          3. Switch the login user to ns-viewer, open the console to get the token for the ns-viewer user, and use curl to request and delete the nginx deployment mentioned above. However, a prompt appears as below, indicating the user doesn't have permission to delete it.

                                            [root@master-01 ~]# curl -k -X DELETE  'https://${URL}/apis/kpanda.io/v1alpha1/clusters/cluster-member/namespaces/default/deployments/nginx' -H 'authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJOU044MG9BclBRMzUwZ2VVU2ZyNy1xMEREVWY4MmEtZmJqR05uRE1sd1lFIn0.eyJleHAiOjE3MTU3NjY1NzksImlhdCI6MTcxNTY4MDE3OSwiYXV0aF90aW1lIjoxNzE1NjgwMTc3LCJqdGkiOiIxZjI3MzJlNC1jYjFhLTQ4OTktYjBiZC1iN2IxZWY1MzAxNDEiLCJpc3MiOiJodHRwczovLzEwLjYuMjAxLjIwMTozMDE0Ny9hdXRoL3JlYWxtcy9naGlwcG8iLCJhdWQiOiJfX2ludGVybmFsLWdoaXBwbyIsInN1YiI6ImMxZmMxM2ViLTAwZGUtNDFiYS05ZTllLWE5OGU2OGM0MmVmMCIsInR5cCI6IklEIiwiYXpwIjoiX19pbnRlcm5hbC1naGlwcG8iLCJzZXNzaW9uX3N0YXRlIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiYXRfaGFzaCI6IlJhTHoyQjlKQ2FNc1RrbGVMR3V6blEiLCJhY3IiOiIwIiwic2lkIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJncm91cHMiOltdLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJucy12aWV3ZXIiLCJsb2NhbGUiOiIifQ.As2ipMjfvzvgONAGlc9RnqOd3zMwAj82VXlcqcR74ZK9tAq3Q4ruQ1a6WuIfqiq8Kq4F77ljwwzYUuunfBli2zhU2II8zyxVhLoCEBu4pBVBd_oJyUycXuNa6HfQGnl36E1M7-_QG8b-_T51wFxxVb5b7SEDE1AvIf54NAlAr-rhDmGRdOK1c9CohQcS00ab52MD3IPiFFZ8_Iljnii-RpXKZoTjdcULJVn_uZNk_SzSUK-7MVWmPBK15m6sNktOMSf0pCObKWRqHd15JSe-2aA2PKBo1jBH3tHbOgZyMPdsLI0QdmEnKB5FiiOeMpwn_oHnT6IjT-BZlB18VkW8rA'\n{\"code\":7,\"message\":\"[RBAC] delete resources(deployments: nginx) is forbidden for user(ns-viewer) in cluster(cluster-member)\",\"details\":[]}[root@master-01 ~]#\n[root@master-01 ~]#\n
                                          4. Create a ClusterRole on the global cluster, as shown in the yaml below.

                                            apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: append-ns-view # (1)!\n  labels:\n    rbac.kpanda.io/role-template-ns-view: \"true\" # (2)!\nrules:\n  - apiGroups: [ \"apps\" ]\n    resources: [ \"deployments\" ]\n    verbs: [ \"delete\" ]\n
                                            1. This field value can be arbitrarily specified, as long as it is not duplicated and complies with the Kubernetes resource naming conventions.
                                            2. When adding rules to different roles, make sure to apply different labels.
                                          5. Wait for the kpanda controller to add a rule of user creation to the built-in role: ns-viewer, then you can check if the rules added in the previous step are present for ns-viewer.

                                            [root@master-01 ~]# kubectl get clusterrole role-template-ns-view -oyaml|grep deployments -C 10|tail -n 6\n
                                            - apiGroups:\n  - apps\n  resources:\n  - deployments\n  verbs:\n  - delete\n

                                          6. When using curl again to request the deletion of the aforementioned nginx deployment, this time the deletion was successful. This means that ns-viewer has successfully added the rule to delete deployments.

                                            [root@master-01 ~]# curl -k -X DELETE  'https://${URL}/apis/kpanda.io/v1alpha1/clusters/cluster-member/namespaces/default/deployments/nginx' -H 'authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJOU044MG9BclBRMzUwZ2VVU2ZyNy1xMEREVWY4MmEtZmJqR05uRE1sd1lFIn0.eyJleHAiOjE3MTU3NjY1NzksImlhdCI6MTcxNTY4MDE3OSwiYXV0aF90aW1lIjoxNzE1NjgwMTc3LCJqdGkiOiIxZjI3MzJlNC1jYjFhLTQ4OTktYjBiZC1iN2IxZWY1MzAxNDEiLCJpc3MiOiJodHRwczovLzEwLjYuMjAxLjIwMTozMDE0Ny9hdXRoL3JlYWxtcy9naGlwcG8iLCJhdWQiOiJfX2ludGVybmFsLWdoaXBwbyIsInN1YiI6ImMxZmMxM2ViLTAwZGUtNDFiYS05ZTllLWE5OGU2OGM0MmVmMCIsInR5cCI6IklEIiwiYXpwIjoiX19pbnRlcm5hbC1naGlwcG8iLCJzZXNzaW9uX3N0YXRlIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiYXRfaGFzaCI6IlJhTHoyQjlKQ2FNc1RrbGVMR3V6blEiLCJhY3IiOiIwIiwic2lkIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJncm91cHMiOltdLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJucy12aWV3ZXIiLCJsb2NhbGUiOiIifQ.As2ipMjfvzvgONAGlc9RnqOd3zMwAj82VXlcqcR74ZK9tAq3Q4ruQ1a6WuIfqiq8Kq4F77ljwwzYUuunfBli2zhU2II8zyxVhLoCEBu4pBVBd_oJyUycXuNa6HfQGnl36E1M7-_QG8b-_T51wFxxVb5b7SEDE1AvIf54NAlAr-rhDmGRdOK1c9CohQcS00ab52MD3IPiFFZ8_Iljnii-RpXKZoTjdcULJVn_uZNk_SzSUK-7MVWmPBK15m6sNktOMSf0pCObKWRqHd15JSe-2aA2PKBo1jBH3tHbOgZyMPdsLI0QdmEnKB5FiiOeMpwn_oHnT6IjT-BZlB18VkW8rA'\n
                                          "},{"location":"en/admin/kpanda/permissions/permission-brief.html","title":"Container Management Permissions","text":"

                                          Container management permissions are based on a multi-dimensional permission management system created by global permission management and Kubernetes RBAC permission management. It supports cluster-level and namespace-level permission control, helping users to conveniently and flexibly set different operation permissions for IAM users and groups (collections of users) under a tenant.

                                          "},{"location":"en/admin/kpanda/permissions/permission-brief.html#cluster-permissions","title":"Cluster Permissions","text":"

                                          Cluster permissions are authorized based on Kubernetes RBAC's ClusterRoleBinding, allowing users/groups to have cluster-related permissions. The current default cluster role is Cluster Admin (does not have the permission to create or delete clusters).

                                          "},{"location":"en/admin/kpanda/permissions/permission-brief.html#cluster-admin","title":"Cluster Admin","text":"

                                          Cluster Admin has the following permissions:

                                          • Can manage, edit, and view the proper cluster
                                          • Manage, edit, and view all workloads and all resources within the namespace
                                          • Can authorize users for roles within the cluster (Cluster Admin, NS Admin, NS Editor, NS Viewer)

                                          The YAML example for this cluster role is as follows:

                                          apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:49Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-cluster-admin\n  resourceVersion: \"15168\"\n  uid: f8f86d42-d5ef-47aa-b284-097615795076\nrules:\n- apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n- nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'\n
                                          "},{"location":"en/admin/kpanda/permissions/permission-brief.html#namespace-permissions","title":"Namespace Permissions","text":"

                                          Namespace permissions are authorized based on Kubernetes RBAC capabilities, allowing different users/groups to have different operation permissions on resources under a namespace (including Kubernetes API permissions). For details, refer to: Kubernetes RBAC. Currently, the default roles for container management are: NS Admin, NS Editor, NS Viewer.

                                          "},{"location":"en/admin/kpanda/permissions/permission-brief.html#ns-admin","title":"NS Admin","text":"

                                          NS Admin has the following permissions:

                                          • Can view the proper namespace
                                          • Manage, edit, and view all workloads and custom resources within the namespace
                                          • Can authorize users for proper namespace roles (NS Editor, NS Viewer)

                                          The YAML example for this cluster role is as follows:

                                          apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:49Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-admin\n  resourceVersion: \"15173\"\n  uid: 69f64c7e-70e7-4c7c-a3e0-053f507f2bc3\nrules:\n- apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n- nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'    \n
                                          "},{"location":"en/admin/kpanda/permissions/permission-brief.html#ns-editor","title":"NS Editor","text":"

                                          NS Editor has the following permissions:

                                          • Can view proper namespaces where permissions are granted
                                          • Manage, edit, and view all workloads within the namespace
                                          Click to view the YAML example of the cluster role
                                          apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:50Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-edit\n  resourceVersion: \"15175\"\n  uid: ca9e690e-96c0-4978-8915-6e4c00c748fe\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - configmaps\n  - endpoints\n  - persistentvolumeclaims\n  - persistentvolumeclaims/status\n  - pods\n  - replicationcontrollers\n  - replicationcontrollers/scale\n  - serviceaccounts\n  - services\n  - services/status\n  verbs:\n  - '*'\n- apiGroups:\n  - \"\"\n  resources:\n  - bindings\n  - events\n  - limitranges\n  - namespaces/status\n  - pods/log\n  - pods/status\n  - replicationcontrollers/status\n  - resourcequotas\n  - resourcequotas/status\n  verbs:\n  - '*'\n- apiGroups:\n  - \"\"\n  resources:\n  - namespaces\n  verbs:\n  - '*'\n- apiGroups:\n  - apps\n  resources:\n  - controllerrevisions\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - statefulsets\n  - statefulsets/scale\n  - statefulsets/status\n  verbs:\n  - '*'\n- apiGroups:\n  - autoscaling\n  resources:\n  - horizontalpodautoscalers\n  - horizontalpodautoscalers/status\n  verbs:\n  - '*'\n- apiGroups:\n  - batch\n  resources:\n  - cronjobs\n  - cronjobs/status\n  - jobs\n  - jobs/status\n  verbs:\n  - '*'\n- apiGroups:\n  - extensions\n  resources:\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - replicationcontrollers/scale\n  verbs:\n  - '*'\n- apiGroups:\n  - policy\n  resources:\n  - poddisruptionbudgets\n  - poddisruptionbudgets/status\n  verbs:\n  - '*'\n- apiGroups:\n  - networking.k8s.io\n  resources:\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  verbs:\n  - '*'      \n
                                          "},{"location":"en/admin/kpanda/permissions/permission-brief.html#ns-viewer","title":"NS Viewer","text":"

                                          NS Viewer has the following permissions:

                                          • Can view the proper namespace
                                          • Can view all workloads and custom resources within the proper namespace
                                          Click to view the YAML example of the cluster role
                                          apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:50Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-view\n  resourceVersion: \"15183\"\n  uid: 853888fd-6ee8-42ac-b91e-63923918baf8\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - configmaps\n  - endpoints\n  - persistentvolumeclaims\n  - persistentvolumeclaims/status\n  - pods\n  - replicationcontrollers\n  - replicationcontrollers/scale\n  - serviceaccounts\n  - services\n  - services/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - bindings\n  - events\n  - limitranges\n  - namespaces/status\n  - pods/log\n  - pods/status\n  - replicationcontrollers/status\n  - resourcequotas\n  - resourcequotas/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - namespaces\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - apps\n  resources:\n  - controllerrevisions\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - statefulsets\n  - statefulsets/scale\n  - statefulsets/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - autoscaling\n  resources:\n  - horizontalpodautoscalers\n  - horizontalpodautoscalers/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - batch\n  resources:\n  - cronjobs\n  - cronjobs/status\n  - jobs\n  - jobs/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - extensions\n  resources:\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - replicationcontrollers/scale\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - policy\n  resources:\n  - poddisruptionbudgets\n  - poddisruptionbudgets/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - networking.k8s.io\n  resources:\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  verbs:\n  - get\n  - list\n  - watch \n
                                          "},{"location":"en/admin/kpanda/permissions/permission-brief.html#permissions-faq","title":"Permissions FAQ","text":"
                                          1. What is the relationship between global permissions and container management permissions?

                                            Answer: Global permissions only authorize coarse-grained permissions, which can manage the creation, editing, and deletion of all clusters; while for fine-grained permissions, such as the management permissions of a single cluster, the management, editing, and deletion permissions of a single namespace, they need to be implemented based on Kubernetes RBAC container management permissions. Generally, users only need to be authorized in container management.

                                          2. Currently, only four default roles are supported. Can the RoleBinding and ClusterRoleBinding (Kubernetes fine-grained RBAC) for custom roles also take effect?

                                            Answer: Currently, custom permissions cannot be managed through the graphical interface, but the permission rules created using kubectl can still take effect.

                                          "},{"location":"en/admin/kpanda/scale/create-hpa.html","title":"Create HPA","text":"

                                          Suanova AI platform supports elastic scaling of Pod resources based on metrics (Horizontal Pod Autoscaling, HPA). Users can dynamically adjust the number of copies of Pod resources by setting CPU utilization, memory usage, and custom metrics. For example, after setting an auto scaling policy based on the CPU utilization metric for the workload, when the CPU utilization of the Pod exceeds/belows the metric threshold you set, the workload controller will automatically increase/decrease the number of Pod replicas.

                                          This page describes how to configure auto scaling based on built-in metrics and custom metrics for workloads.

                                          Note

                                          1. HPA is only applicable to Deployment and StatefulSet, and only one HPA can be created per workload.
                                          2. If you create an HPA policy based on CPU utilization, you must set the configuration limit (Limit) for the workload in advance, otherwise the CPU utilization cannot be calculated.
                                          3. If built-in metrics and multiple custom metrics are used at the same time, HPA will calculate the number of scaling copies required based on multiple metrics, and take the larger value (but not exceed the maximum number of copies configured when setting the HPA policy) for elastic scaling .
                                          "},{"location":"en/admin/kpanda/scale/create-hpa.html#built-in-metric-elastic-scaling-policy","title":"Built-in metric elastic scaling policy","text":"

                                          The system has two built-in elastic scaling metrics of CPU and memory to meet users' basic business cases.

                                          "},{"location":"en/admin/kpanda/scale/create-hpa.html#prerequisites","title":"Prerequisites","text":"

                                          Before configuring the built-in index auto scaling policy for the workload, the following prerequisites need to be met:

                                          • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                          • Created a namespace, deployment or statefulset.

                                          • You should have permissions not lower than NS Editor. For details, refer to Namespace Authorization.

                                          • Installed metrics-server plugin install.

                                          "},{"location":"en/admin/kpanda/scale/create-hpa.html#steps","title":"Steps","text":"

                                          Refer to the following steps to configure the built-in index auto scaling policy for the workload.

                                          1. Click Clusters on the left navigation bar to enter the cluster list page. Click a cluster name to enter the Cluster Details page.

                                          2. On the cluster details page, click Workload in the left navigation bar to enter the workload list, and then click a workload name to enter the Workload Details page.

                                          3. Click the Auto Scaling tab to view the auto scaling configuration of the current cluster.

                                          4. After confirming that the cluster has installed the metrics-server plug-in, and the plug-in is running normally, you can click the New Scaling button.

                                          5. Create custom metric auto scaling policy parameters.

                                            • Policy name: Enter the name of the auto scaling policy. Please note that the name can contain up to 63 characters, and can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with lowercase letters or numbers, such as hpa- my-dep.
                                            • Namespace: The namespace where the payload resides.
                                            • Workload: The workload object that performs auto scaling.
                                            • Target CPU Utilization: The CPU usage of the Pod under the workload resource. The calculation method is: the request (request) value of all Pod resources/workloads under the workload. When the actual CPU usage is greater/lower than the target value, the system automatically reduces/increases the number of Pod replicas.
                                            • Target Memory Usage: The memory usage of the Pod under the workload resource. When the actual memory usage is greater/lower than the target value, the system automatically reduces/increases the number of Pod replicas.
                                            • Replica range: the elastic scaling range of the number of Pod replicas. The default interval is 1 - 10.
                                          6. After completing the parameter configuration, click the OK button to automatically return to the elastic scaling details page. Click \u2507 on the right side of the list to edit, delete, and view related events.

                                          "},{"location":"en/admin/kpanda/scale/create-vpa.html","title":"Create VPAs","text":"

                                          The container Vertical Pod Autoscaler (VPA) calculates the most suitable CPU and memory request values \u200b\u200bfor the Pod by monitoring the Pod's resource application and usage over a period of time. Using VPA can allocate resources to each Pod in the cluster more reasonably, improve the overall resource utilization of the cluster, and avoid waste of cluster resources.

                                          AI platform supports VPA through containers. Based on this feature, the Pod request value can be dynamically adjusted according to the usage of container resources. AI platform supports manual and automatic modification of resource request values, and you can configure them according to actual needs.

                                          This page describes how to configure VPA for deployment.

                                          Warning

                                          Using VPA to modify a Pod resource request will trigger a Pod restart. Due to the limitations of Kubernetes itself, Pods may be scheduled to other nodes after restarting.

                                          "},{"location":"en/admin/kpanda/scale/create-vpa.html#prerequisites","title":"Prerequisites","text":"

                                          Before configuring a vertical scaling policy for deployment, the following prerequisites must be met:

                                          • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                          • Create a namespace, user, Deployments or Statefulsets.

                                          • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                          • The current cluster has installed metrics-server and VPA plugins.

                                          "},{"location":"en/admin/kpanda/scale/create-vpa.html#steps","title":"Steps","text":"

                                          Refer to the following steps to configure the built-in index auto scaling policy for the deployment.

                                          1. Find the current cluster in Clusters , and click the name of the target cluster.

                                          2. Click Deployments in the left navigation bar, find the deployment that needs to create a VPA, and click the name of the deployment.

                                          3. Click the Auto Scaling tab to view the auto scaling configuration of the current cluster, and confirm that the relevant plug-ins have been installed and are running normally.

                                          4. Click the Create Autoscaler button and configure the VPA vertical scaling policy parameters.

                                            • Policy name: Enter the name of the vertical scaling policy. Please note that the name can contain up to 63 characters, and can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with lowercase letters or numbers, such as vpa- my-dep.
                                            • Scaling mode: Run the method of modifying the CPU and memory request values. Currently, vertical scaling supports manual and automatic scaling modes.
                                              • Manual scaling: After the vertical scaling policy calculates the recommended resource configuration value, the user needs to manually modify the resource quota of the application.
                                              • Auto-scaling: The vertical scaling policy automatically calculates and modifies the resource quota of the application.
                                            • Target container: Select the container to be scaled vertically.
                                          5. After completing the parameter configuration, click the OK button to automatically return to the elastic scaling details page. Click \u2507 on the right side of the list to perform edit and delete operations.

                                          "},{"location":"en/admin/kpanda/scale/custom-hpa.html","title":"Creating HPA Based on Custom Metrics","text":"

                                          When the built-in CPU and memory metrics in the system do not meet your business needs, you can add custom metrics by configuring ServiceMonitoring and achieve auto-scaling based on these custom metrics. This article will introduce how to configure auto-scaling for workloads based on custom metrics.

                                          Note

                                          1. HPA is only applicable to Deployment and StatefulSet, and each workload can only create one HPA.
                                          2. If both built-in metrics and multiple custom metrics are used, HPA will calculate the required number of scaled replicas based on multiple metrics respectively, and take the larger value (but not exceeding the maximum number of replicas configured when setting the HPA policy) for scaling.
                                          "},{"location":"en/admin/kpanda/scale/custom-hpa.html#prerequisites","title":"Prerequisites","text":"

                                          Before configuring the custom metrics auto-scaling policy for workloads, the following prerequisites must be met:

                                          • Integrated Kubernetes cluster or created Kubernetes cluster, and able to access the cluster's UI interface.
                                          • Completed creation of a namespace, deployment, or statefulSet.
                                          • The current user should have permissions higher than NS Editor. For details, refer to namespace authorization.
                                          • metrics-server plugin has been installed.
                                          • insight-agent plugin has been installed.
                                          • Prometheus-adapter plugin has been installed.
                                          "},{"location":"en/admin/kpanda/scale/custom-hpa.html#steps","title":"Steps","text":"

                                          Refer to the following steps to configure the auto-scaling policy based on metrics for workloads.

                                          1. Click Clusters in the left navigation bar to enter the clusters page. Click a cluster name to enter the Cluster Overview page.

                                          2. On the Cluster Details page, click Workloads in the left navigation bar to enter the workload list, and click a workload name to enter the Workload Details page.

                                          3. Click the Auto Scaling tab to view the current autoscaling configuration of the cluster.

                                          4. Confirm that the cluster has installed metrics-server, Insight, and Prometheus-adapter plugins, and that the plugins are running normally, then click the Create AutoScaler button.

                                            Note

                                            If the related plugins are not installed or the plugins are in an abnormal state, you will not be able to see the entry for creating custom metrics auto-scaling on the page.

                                          5. Create custom metrics auto-scaling policy parameters.

                                            • Policy Name: Enter the name of the auto-scaling policy. Note that the name can be up to 63 characters long, can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with a lowercase letter or number, e.g., hpa-my-dep.
                                            • Namespace: The namespace where the workload is located.
                                            • Workload: The workload object that performs auto-scaling.
                                            • Resource Type: The type of custom metric being monitored, including Pod and Service types.
                                            • Metric: The name of the custom metric created using ServiceMonitoring or the name of the system-built custom metric.
                                            • Data Type: The method used to calculate the metric value, including target value and target average value. When the resource type is Pod, only the target average value can be used.
                                          "},{"location":"en/admin/kpanda/scale/custom-hpa.html#operation-example","title":"Operation Example","text":"

                                          This case takes a Golang business program as an example. The example program exposes the httpserver_requests_total metric and records HTTP requests. This metric can be used to calculate the QPS value of the business program.

                                          "},{"location":"en/admin/kpanda/scale/custom-hpa.html#deploy-business-program","title":"Deploy Business Program","text":"

                                          Use Deployment to deploy the business program:

                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: httpserver\n  namespace: httpserver\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: httpserver\n  template:\n    metadata:\n      labels:\n        app: httpserver\n    spec:\n      containers:\n      - name: httpserver\n        image: registry.imroc.cc/test/httpserver:custom-metrics\n        imagePullPolicy: Always\n---\n\napiVersion: v1\nkind: Service\nmetadata:\n  name: httpserver\n  namespace: httpserver\n  labels:\n    app: httpserver\n  annotations:\n    prometheus.io/scrape: \"true\"\n    prometheus.io/path: \"/metrics\"\n    prometheus.io/port: \"http\"\nspec:\n  type: ClusterIP\n  ports:\n  - port: 80\n    protocol: TCP\n    name: http\n  selector:\n    app: httpserver\n
                                          "},{"location":"en/admin/kpanda/scale/custom-hpa.html#prometheus-collects-business-monitoring","title":"Prometheus Collects Business Monitoring","text":"

                                          If the insight-agent is installed, Prometheus can be configured by creating a ServiceMonitor CRD object.

                                          Operation steps: In Cluster Details -> Custom Resources, search for \u201cservicemonitors.monitoring.coreos.com\", click the name to enter the details. Create the following example CRD in the httpserver namespace via YAML:

                                          apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: httpserver\n  namespace: httpserver\n  labels:\n    operator.insight.io/managed-by: insight\nspec:\n  endpoints:\n  - port: http\n    interval: 5s\n  namespaceSelector:\n    matchNames:\n    - httpserver\n  selector:\n    matchLabels:\n      app: httpserver\n

                                          Note

                                          If Prometheus is installed via insight, the serviceMonitor must be labeled with operator.insight.io/managed-by: insight. If installed by other means, this label is not required.

                                          "},{"location":"en/admin/kpanda/scale/custom-hpa.html#configure-metric-rules-in-prometheus-adapter","title":"Configure Metric Rules in Prometheus-adapter","text":"

                                          steps: In Clusters -> Helm Apps, search for \u201cprometheus-adapter\",enter the update page through the action bar, and configure custom metrics in YAML as follows:

                                          rules:\n  custom:\n    - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)\n      name:\n        as: httpserver_requests_qps\n        matches: httpserver_requests_total\n      resources:\n        template: <<.Resource>>\n      seriesQuery: httpserver_requests_total\n
                                          "},{"location":"en/admin/kpanda/scale/custom-hpa.html#create-custom-metrics-auto-scaling-policy-parameters","title":"Create Custom Metrics Auto-scaling Policy Parameters","text":"

                                          Follow the above steps to find the application httpserver in the Deployment and create auto-scaling via custom metrics.

                                          "},{"location":"en/admin/kpanda/scale/hpa-cronhpa-compatibility-rules.html","title":"Compatibility Rules for HPA and CronHPA","text":"

                                          HPA stands for HorizontalPodAutoscaler, which refers to horizontal pod auto-scaling.

                                          CronHPA stands for Cron HorizontalPodAutoscaler, which refers to scheduled horizontal pod auto-scaling.

                                          "},{"location":"en/admin/kpanda/scale/hpa-cronhpa-compatibility-rules.html#conflict-between-cronhpa-and-hpa","title":"Conflict Between CronHPA and HPA","text":"

                                          Scheduled scaling with CronHPA triggers horizontal pod scaling at specified times. To prevent sudden traffic surges, you may have configured HPA to ensure the normal operation of your application. If both HPA and CronHPA are detected simultaneously, conflicts arise because CronHPA and HPA operate independently without awareness of each other. Consequently, the actions performed last will override those executed first.

                                          By comparing the definition templates of CronHPA and HPA, the following points can be observed:

                                          • Both CronHPA and HPA use the scaleTargetRef field to identify the scaling target.
                                          • CronHPA schedules the number of replicas to scale based on crontab rules in jobs.
                                          • HPA determines scaling based on resource utilization.

                                          Note

                                          If both CronHPA and HPA are set, there will be scenarios where CronHPA and HPA simultaneously operate on a single scaleTargetRef.

                                          "},{"location":"en/admin/kpanda/scale/hpa-cronhpa-compatibility-rules.html#compatibility-solution-for-cronhpa-and-hpa","title":"Compatibility Solution for CronHPA and HPA","text":"

                                          As noted above, the fundamental reason that simultaneous use of CronHPA and HPA results in the later action overriding the earlier one is that the two controllers cannot sense each other. Therefore, the conflict can be resolved by enabling CronHPA to be aware of HPA's current state.

                                          The system will treat HPA as the scaling object for CronHPA, thus achieving scheduled scaling for the Deployment object defined by the HPA.

                                          HPA's definition configures the Deployment in the scaleTargetRef field, and then the Deployment uses its definition to locate the ReplicaSet, which ultimately adjusts the actual number of replicas.

                                          In AI platform, the scaleTargetRef in CronHPA is set to the HPA object, and it uses the HPA object to find the actual scaleTargetRef, allowing CronHPA to be aware of HPA's current state.

                                          CronHPA senses HPA by adjusting HPA. CronHPA determines whether scaling is needed and modifies the HPA upper limit by comparing the target number of replicas with the current number of replicas, choosing the larger value. Similarly, CronHPA determines whether to modify the HPA lower limit by comparing the target number of replicas from CronHPA with the configuration in HPA, choosing the smaller value.

                                          "},{"location":"en/admin/kpanda/scale/install-cronhpa.html","title":"Install kubernetes-cronhpa-controller","text":"

                                          The container copy timing horizontal autoscaling policy (CronHPA) can provide stable computing resource guarantee for periodic high-concurrency applications, and kubernetes-cronhpa-controller is a key component to implement CronHPA.

                                          This section describes how to install the kubernetes-cronhpa-controller plugin.

                                          Note

                                          In order to use CornHPA, not only the kubernetes-cronhpa-controller plugin needs to be installed, but also install the metrics-server plugin.

                                          "},{"location":"en/admin/kpanda/scale/install-cronhpa.html#prerequisites","title":"Prerequisites","text":"

                                          Before installing the kubernetes-cronhpa-controller plugin, the following prerequisites need to be met:

                                          • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                          • Create a namespace.

                                          • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                          "},{"location":"en/admin/kpanda/scale/install-cronhpa.html#steps","title":"Steps","text":"

                                          Refer to the following steps to install the kubernetes-cronhpa-controller plugin for the cluster.

                                          1. On the Clusters page, find the target cluster where the plugin needs to be installed, click the name of the cluster, then click Workloads -> Deployments on the left, and click the name of the target workload.

                                          2. On the workload details page, click the Auto Scaling tab, and click Install on the right side of CronHPA .

                                          3. Read the relevant introduction of the plug-in, select the version and click the Install button. It is recommended to install 1.3.0 or later.

                                          4. Refer to the following instructions to configure the parameters.

                                            • Name: Enter the plugin name, please note that the name can be up to 63 characters, can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with lowercase letters or numbers, such as kubernetes-cronhpa-controller.
                                            • Namespace: Select which namespace the plugin will be installed in, here we take default as an example.
                                            • Version: The version of the plugin, here we take the 1.3.0 version as an example.
                                            • Ready Wait: When enabled, it will wait for all associated resources under the application to be in the ready state before marking the application installation as successful.
                                            • Failed to delete: If the plugin installation fails, delete the associated resources that have already been installed. When enabled, Wait will be enabled synchronously by default.
                                            • Detailed log: When enabled, a detailed log of the installation process will be recorded.

                                            Note

                                            After enabling ready wait and/or failed deletion , it takes a long time for the application to be marked as \"running\".

                                          5. Click OK in the lower right corner of the page, and the system will automatically jump to the Helm Apps list page. Wait a few minutes and refresh the page to see the application you just installed.

                                            Warning

                                            If you need to delete the kubernetes-cronhpa-controller plugin, you should go to the Helm Apps list page to delete it completely.

                                            If you delete the plug-in under the Auto Scaling tab of the workload, this only deletes the workload copy of the plug-in, and the plug-in itself is still not deleted, and an error will be prompted when the plug-in is reinstalled later.

                                          6. Go back to the Auto Scaling tab under the workload details page, and you can see that the interface displays Plug-in installed . Now it's time to start creating CronHPA policies.

                                          "},{"location":"en/admin/kpanda/scale/install-metrics-server.html","title":"Install metrics-server","text":"

                                          metrics-server is the built-in resource usage metrics collection component of Kubernetes. You can automatically scale Pod copies horizontally for workload resources by configuring HPA policies.

                                          This section describes how to install metrics-server .

                                          "},{"location":"en/admin/kpanda/scale/install-metrics-server.html#prerequisites","title":"Prerequisites","text":"

                                          Before installing the metrics-server plugin, the following prerequisites need to be met:

                                          • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                          • Created a namespace.

                                          • You should have permissions not lower than NS Editor. For details, refer to Namespace Authorization.

                                          "},{"location":"en/admin/kpanda/scale/install-metrics-server.html#steps","title":"Steps","text":"

                                          Please perform the following steps to install the metrics-server plugin for the cluster.

                                          1. On the Auto Scaling page under workload details, click the Install button to enter the metrics-server plug-in installation interface.

                                          2. Read the introduction of the metrics-server plugin, select the version and click the Install button. This page will use the 3.8.2 version as an example to install, and it is recommended that you install 3.8.2 and later versions.

                                          3. Configure basic parameters on the installation configuration interface.

                                            • Name: Enter the plugin name, please note that the name can be up to 63 characters, can only contain lowercase letters, numbers and separators (\"-\"), and must start and end with lowercase letters or numbers, such as metrics-server-01.
                                            • Namespace: Select the namespace for plugin installation, here we take default as an example.
                                            • Version: The version of the plugin, here we take 3.8.2 version as an example.
                                            • Ready Wait: When enabled, it will wait for all associated resources under the application to be ready before marking the application installation as successful.
                                            • Failed to delete: After it is enabled, the synchronization will be enabled by default and ready to wait. If the installation fails, the installation-related resources will be removed.
                                            • Verbose log: Turn on the verbose output of the installation process log.

                                            Note

                                            After enabling Wait and/or Deletion failed , it takes a long time for the app to be marked as Running .

                                          4. Advanced parameter configuration

                                            • If the cluster network cannot access the k8s.gcr.io repository, please try to modify the repositort parameter to repository: k8s.m.daocloud.io/metrics-server/metrics-server .

                                            • An SSL certificate is also required to install the metrics-server plugin. To bypass certificate verification, you need to add - --kubelet-insecure-tls parameter at defaultArgs: .

                                            Click to view and use the YAML parameters to replace the default YAML
                                            image:\n  repository: k8s.m.daocloud.io/metrics-server/metrics-server # Change the registry source address to k8s.m.daocloud.io\n  tag: ''\n  pullPolicy: IfNotPresent\nimagePullSecrets: []\nnameOverride: ''\nfullnameOverride: ''\nserviceAccount:\n  create: true\n  annotations: {}\n  name: ''\nrbac:\n  create: true\n  pspEnabled: false\napiService:\n  create: true\npodLabels: {}\npodAnnotations: {}\npodSecurityContext: {}\nsecurityContext:\n  allowPrivilegeEscalation: false\n  readOnlyRootFilesystem: true\n  runAsNonRoot: true\n  runAsUser: 1000\npriorityClassName: system-cluster-critical\ncontainerPort: 4443\nhostNetwork:\n  enabled: false\nreplicas: 1\nupdateStrategy: {}\npodDisruptionBudget:\n  enabled: false\n  minAvailable: null\n  maxUnavailable: null\ndefaultArgs:\n  - '--cert-dir=/tmp'\n  - '--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname'\n  - '--kubelet-use-node-status-port'\n  - '--metric-resolution=15s'\n  - --kubelet-insecure-tls # Bypass certificate verification\nargs: []\nlivenessProbe:\n  httpGet:\n    path: /livez\n    port:https\n    scheme: HTTPS\n  initialDelaySeconds: 0\n  periodSeconds: 10\n  failureThreshold: 3\nreadinessProbe:\n  httpGet:\n    path: /readyz\n    port:https\n    scheme: HTTPS\n  initialDelaySeconds: 20\n  periodSeconds: 10\n  failureThreshold: 3\nservice:\n  type: ClusterIP\n  port: 443\n  annotations: {}\n  labels: {}\nmetrics:\n  enabled: false\nserviceMonitor:\n  enabled: false\n  additionalLabels: {}\n  interval: 1m\n  scrapeTimeout: 10s\nresources: {}\nextraVolumeMounts: []\nextraVolumes: []\nnodeSelector: {}\ntolerations: []\naffinity: {}\n
                                          5. Click the OK button to complete the installation of the metrics-server plug-in, and then the system will automatically jump to the Helm Apps list page. After a few minutes, refresh the page and you will see the newly installed Applications.

                                          Note

                                          When deleting the metrics-server plugin, the plugin can only be completely deleted on the Helm Apps list page. If you only delete metrics-server on the workload page, this only deletes the workload copy of the application, the application itself is still not deleted, and an error will be prompted when you reinstall the plugin later.

                                          "},{"location":"en/admin/kpanda/scale/install-vpa.html","title":"Install vpa","text":"

                                          The Vertical Pod Autoscaler, VPA, can make the resource allocation of the cluster more reasonable and avoid the waste of cluster resources. vpa is the key component to realize the vertical autoscaling of the container.

                                          This section describes how to install the vpa plugin.

                                          In order to use VPA policies, not only the __vpa__ plugin needs to be installed, but also [install the __metrics-server__ plugin](install-metrics-server.md).\n
                                          "},{"location":"en/admin/kpanda/scale/install-vpa.html#prerequisites","title":"Prerequisites","text":"

                                          Before installing the vpa plugin, the following prerequisites need to be met:

                                          • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                          • Create a namespace.

                                          • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                          "},{"location":"en/admin/kpanda/scale/install-vpa.html#steps","title":"Steps","text":"

                                          Refer to the following steps to install the vpa plugin for the cluster.

                                          1. On the Clusters page, find the target cluster where the plugin needs to be installed, click the name of the cluster, then click Workloads -> Deployments on the left, and click the name of the target workload.

                                          2. On the workload details page, click the Auto Scaling tab, and click Install on the right side of VPA .

                                          3. Read the relevant introduction of the plug-in, select the version and click the Install button. It is recommended to install 1.5.0 or later.

                                          4. Review the configuration parameters described below.

                                            • Name: Enter the plugin name, please note that the name can be up to 63 characters, can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with lowercase letters or numbers, such as kubernetes-cronhpa-controller.
                                            • Namespace: Select which namespace the plugin will be installed in, here we take default as an example.
                                            • Version: The version of the plugin, here we take the 1.5.0 version as an example.
                                            • Ready Wait: When enabled, it will wait for all associated resources under the application to be in the ready state before marking the application installation as successful.
                                            • Failed to delete: If the plugin installation fails, delete the associated resources that have already been installed. When enabled, Wait will be enabled synchronously by default.
                                            • Detailed log: When enabled, a detailed log of the installation process will be recorded.

                                            Note

                                            After enabling Wait and/or Deletion failed , it takes a long time for the application to be marked as running .

                                          5. Click OK in the lower right corner of the page, and the system will automatically jump to the Helm Apps list page. Wait a few minutes and refresh the page to see the application you just installed.

                                            Warning

                                            If you need to delete the vpa plugin, you should go to the Helm Apps list page to delete it completely.

                                            If you delete the plug-in under the Auto Scaling tab of the workload, this only deletes the workload copy of the plug-in, and the plug-in itself is still not deleted, and an error will be prompted when the plug-in is reinstalled later.

                                          6. Go back to the Auto Scaling tab under the workload details page, and you can see that the interface displays Plug-in installed . Now you can start Create VPA policy.

                                          "},{"location":"en/admin/kpanda/scale/knative/install.html","title":"Installation","text":"

                                          Knative is a platform-agnostic solution for running serverless deployments.

                                          "},{"location":"en/admin/kpanda/scale/knative/install.html#steps","title":"Steps","text":"
                                          1. Log in to the cluster, click the sidebar Helm Apps \u2192 Helm Charts , enter knative in the search box at the top right, and then press the enter key to search.

                                          2. Click the knative-operator to enter the installation configuration interface. You can view the available versions and the Parameters optional items of Helm values on this interface.

                                          3. After clicking the install button, you will enter the installation configuration interface.

                                          4. Enter the name, installation tenant, and it is recommended to check Wait and Detailed Logs .

                                          5. In the settings below, you can tick Serving and enter the installation tenant of the Knative Serving component, which will deploy the Knative Serving component after installation. This component is managed by the Knative Operator.

                                          "},{"location":"en/admin/kpanda/scale/knative/knative.html","title":"Knative Introduction","text":"

                                          Knative provides a higher level of abstraction, simplifying and speeding up the process of building, deploying, and managing applications on Kubernetes. It allows developers to focus more on implementing business logic, while leaving most of the infrastructure and operations work to Knative, significantly improving productivity.

                                          "},{"location":"en/admin/kpanda/scale/knative/knative.html#components","title":"Components","text":"

                                          The Knative operator runs the following components.

                                          knative-operator   knative-operator-58f7d7db5c-7f6r5      1/1     Running     0     6m55s\nknative-operator   operator-webhook-667dc67bc-qvrv4       1/1     Running     0     6m55s\n

                                          The Knative serving components are as follows.

                                          knative-serving        3scale-kourier-gateway-d69fbfbd-bd8d8   1/1     Running     0                 7m13s\nknative-serving        activator-7c6fddd698-wdlng              1/1     Running     0                 7m3s\nknative-serving        autoscaler-8f4b876bb-kd25p              1/1     Running     0                 7m17s\nknative-serving        autoscaler-hpa-5f7f74679c-vkc7p         1/1     Running     0                 7m15s\nknative-serving        controller-789c896c46-tfvsv             1/1     Running     0                 7m17s\nknative-serving        net-kourier-controller-7db578c889-7gd5l 1/1     Running     0                 7m14s\nknative-serving        webhook-5c88b94c5-78x7m                 1/1     Running     0                 7m1s\nknative-serving        storage-version-migration-serving-serving-1.12.2-t7zvd   0/1  Completed   0   7m15s\n
                                          Component Features Activator Queues requests (if a Knative Service has scaled to zero). Calls the autoscaler to bring back services that have scaled down to zero and forward queued requests. The Activator can also act as a request buffer, handling bursts of traffic. Autoscaler Responsible for scaling Knative services based on configuration, metrics, and incoming requests. Controller Manages the state of Knative CRs. It monitors multiple objects, manages the lifecycle of dependent resources, and updates resource status. Queue-Proxy Sidecar container injected into each Knative Service. Responsible for collecting traffic data and reporting it to the Autoscaler, which then initiates scaling requests based on this data and preset rules. Webhooks Knative Serving has several Webhooks responsible for validating and mutating Knative resources."},{"location":"en/admin/kpanda/scale/knative/knative.html#ingress-traffic-entry-solutions","title":"Ingress Traffic Entry Solutions","text":"Solution Use Case Istio If Istio is already in use, it can be chosen as the traffic entry solution. Contour If Contour has been enabled in the cluster, it can be chosen as the traffic entry solution. Kourier If neither of the above two Ingress components are present, Knative's Envoy-based Kourier Ingress can be used as the traffic entry solution."},{"location":"en/admin/kpanda/scale/knative/knative.html#autoscaler-solutions-comparison","title":"Autoscaler Solutions Comparison","text":"Autoscaler Type Core Part of Knative Serving Default Enabled Scale to Zero Support CPU-based Autoscaling Support Knative Pod Autoscaler (KPA) Yes Yes Yes No Horizontal Pod Autoscaler (HPA) No Needs to be enabled after installing Knative Serving No Yes"},{"location":"en/admin/kpanda/scale/knative/knative.html#crd","title":"CRD","text":"Resource Type API Name Description Services service.serving.knative.dev Automatically manages the entire lifecycle of Workloads, controls the creation of other objects, ensures applications have Routes, Configurations, and new revisions with each update. Routes route.serving.knative.dev Maps network endpoints to one or more revision versions, supports traffic distribution and version routing. Configurations configuration.serving.knative.dev Maintains the desired state of deployments, provides separation between code and configuration, follows the Twelve-Factor App methodology, modifying configurations creates new revisions. Revisions revision.serving.knative.dev Snapshot of the workload at each modification time point, immutable object, automatically scales based on traffic."},{"location":"en/admin/kpanda/scale/knative/playground.html","title":"Knative Practices","text":"

                                          In this section, we will delve into learning Knative through several practical exercises.

                                          "},{"location":"en/admin/kpanda/scale/knative/playground.html#case-1-hello-world","title":"case 1 - Hello World","text":"
                                          apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n

                                          You can use kubectl to check the status of a deployed application that has been automatically configured with ingress and scalers by Knative.

                                          ~ kubectl get service.serving.knative.dev/hello\nNAME    URL                                              LATESTCREATED   LATESTREADY   READY   REASON\nhello   http://hello.knative-serving.knative.loulan.me   hello-00001     hello-00001   True\n

                                          The deployed Pod YAML is as follows, consisting of two Pods: user-container and queue-proxy.

                                          apiVersion: v1\nkind: Pod\nmetadata:\n  name: hello-00003-deployment-5fcb8ccbf-7qjfk\nspec:\n  containers:\n  - name: user-container\n  - name: queue-proxy\n

                                          Request Flow:

                                          1. case1 When there is low traffic or no traffic, traffic will be routed to the activator.
                                          2. case2 When there is high traffic, traffic will be routed directly to the Pod only if it exceeds the target-burst-capacity.
                                            1. Configured as 0, expansion from 0 is the only scenario.
                                            2. Configured as -1, the activator will always be present in the request path.
                                            3. Configured as >0, the number of additional concurrent requests that the system can handle before triggering scaling.
                                          3. case3 When the traffic decreases again, traffic will be routed back to the activator if the traffic is lower than current_demand + target-burst-capacity > (pods * concurrency-target).

                                            The total number of pending requests + the number of requests that can exceed the target concurrency > the target concurrency per Pod * number of Pods.

                                          "},{"location":"en/admin/kpanda/scale/knative/playground.html#case-2-based-on-concurrent-elastic-scaling","title":"case 2 - Based on Concurrent Elastic Scaling","text":"

                                          We first apply the following YAML definition under the cluster.

                                          apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"1\"\n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"\n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n

                                          Execute the following command for testing, and you can observe the scaling of the Pods by using kubectl get pods -A -w.

                                          wrk -t2 -c4 -d6s http://hello.knative-serving.knative.daocloud.io/\n
                                          "},{"location":"en/admin/kpanda/scale/knative/playground.html#case-3-based-on-concurrent-elastic-scaling-scale-out-in-advance-to-reach-a-specific-ratio","title":"case 3 - Based on concurrent elastic scaling, scale out in advance to reach a specific ratio.","text":"

                                          We can easily achieve this, for example, by limiting the concurrency to 10 per container. This can be implemented through autoscaling.knative.dev/target-utilization-percentage: 70, starting to scale out the Pods when 70% is reached.

                                          apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"10\"\n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"\n \u00a0 \u00a0 \u00a0 \u00a0autoscaling.knative.dev/target-utilization-percentage: \"70\" \n \u00a0 \u00a0 \u00a0 \u00a0autoscaling.knative.dev/metric: \"concurrency\"\n \u00a0 \u00a0 spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n
                                          "},{"location":"en/admin/kpanda/scale/knative/playground.html#case-4-canary-releasetraffic-percentage","title":"case 4 - Canary Release/Traffic Percentage","text":"

                                          We can control the distribution of traffic to each version through spec.traffic.

                                          apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"1\"  \n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"         \n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n  traffic:\n  - latestRevision: true\n    percent: 50\n  - latestRevision: false\n    percent: 50\n    revisionName: hello-00001\n
                                          "},{"location":"en/admin/kpanda/scale/knative/scene.html","title":"Use Cases","text":""},{"location":"en/admin/kpanda/scale/knative/scene.html#suitable-cases","title":"Suitable Cases","text":"
                                          • High concurrency business with short connections
                                          • Businesses that require elastic scaling
                                          • A large number of applications need to scale down to 0 to improve resource utilization
                                          • AI Serving services that scale based on specific metrics
                                          "},{"location":"en/admin/kpanda/scale/knative/scene.html#unsuitable-cases","title":"Unsuitable Cases","text":"
                                          • Long-lived connection business
                                          • Latency-sensitive business
                                          • Traffic splitting based on cookies
                                          • Traffic splitting based on headers
                                          "},{"location":"en/admin/kpanda/security/index.html","title":"Types of Security Scans","text":"

                                          AI platform Container Management provides three types of security scans:

                                          • Compliance Scan: Conducts security scans on cluster nodes based on CIS Benchmark.
                                          • Authorization Scan: Checks for security and compliance issues in the Kubernetes cluster, records and verifies authorized access, object changes, events, and other activities related to the Kubernetes API.
                                          • Vulnerability Scan: Scans the Kubernetes cluster for potential vulnerabilities and risks, such as unauthorized access, sensitive information leakage, weak authentication, container escape, etc.
                                          "},{"location":"en/admin/kpanda/security/index.html#compliance-scan","title":"Compliance Scan","text":"

                                          The object of compliance scanning is the cluster node. The scan result lists the scan items and results and provides repair suggestions for any failed scan items. For specific security rules used during scanning, refer to the CIS Kubernetes Benchmark.

                                          The focus of the scan varies when checking different types of nodes.

                                          • Scan the control plane node (Controller)

                                            • Focus on the security of system components such as API Server , controller-manager , scheduler , kubelet , etc.
                                            • Check the security configuration of the Etcd database.
                                            • Verify whether the cluster's authentication mechanism, authorization policy, and network security configuration meet security standards.
                                          • Scan worker nodes

                                            • Check if the configuration of container runtimes such as kubelet and Docker meets security standards.
                                            • Verify whether the container image has been trusted and verified.
                                            • Check if the network security configuration of the node meets security standards.

                                          Tip

                                          To use compliance scanning, you need to create a scan configuration first, and then create a scan policy based on that configuration. After executing the scan policy, you can view the scan report.

                                          "},{"location":"en/admin/kpanda/security/index.html#authorization-scan","title":"Authorization Scan","text":"

                                          Authorization scanning focuses on security vulnerabilities caused by authorization issues. Authorization scans can help users identify security threats in Kubernetes clusters, identify which resources need further review and protection measures. By performing these checks, users can gain a clearer and more comprehensive understanding of their Kubernetes environment and ensure that the cluster environment meets Kubernetes' best practices and security standards.

                                          Specifically, authorization scanning supports the following operations:

                                          • Scans the health status of all nodes in the cluster.

                                          • Scans the running state of components in the cluster, such as kube-apiserver , kube-controller-manager , kube-scheduler , etc.

                                          • Scans security configurations: Check Kubernetes' security configuration.

                                            • API security: whether unsafe API versions are enabled, whether appropriate RBAC roles and permission restrictions are set, etc.
                                            • Container security: whether insecure images are used, whether privileged mode is enabled, whether appropriate security context is set, etc.
                                            • Network security: whether appropriate network policy is enabled to restrict traffic, whether TLS encryption is used, etc.
                                            • Storage security: whether appropriate encryption and access controls are enabled.
                                            • Application security: whether necessary security measures are in place, such as password management, cross-site scripting attack defense, etc.
                                          • Provides warnings and suggestions: Security best practices that cluster administrators should perform, such as regularly rotating certificates, using strong passwords, restricting network access, etc.

                                          Tip

                                          To use authorization scanning, you need to create a scan policy first. After executing the scan policy, you can view the scan report. For details, refer to Security Scanning.

                                          "},{"location":"en/admin/kpanda/security/index.html#vulnerability-scan","title":"Vulnerability Scan","text":"

                                          Vulnerability scanning focuses on scanning potential malicious attacks and security vulnerabilities, such as remote code execution, SQL injection, XSS attacks, and some attacks specific to Kubernetes. The final scan report lists the security vulnerabilities in the cluster and provides repair suggestions.

                                          Tip

                                          To use vulnerability scanning, you need to create a scan policy first. After executing the scan policy, you can view the scan report. For details, refer to Vulnerability Scan.

                                          "},{"location":"en/admin/kpanda/security/audit.html","title":"Permission Scan","text":"

                                          To use the Permission Scan feature, you need to create a scan policy first. After executing the policy, a scan report will be automatically generated for viewing.

                                          "},{"location":"en/admin/kpanda/security/audit.html#create-a-scan-policy","title":"Create a Scan Policy","text":"
                                          1. On the left navigation bar of the homepage in the Container Management module, click Security Management .

                                          2. Click Permission Scan on the left navigation bar, then click the Scan Policy tab and click Create Scan Policy on the right.

                                          3. Fill in the configuration according to the following instructions, and then click OK .

                                            • Cluster: Select the cluster to be scanned. The optional cluster list comes from the clusters accessed or created in the Container Management module. If the desired cluster is not available, you can access or create a cluster in the Container Management module.
                                            • Scan Type:

                                              • Immediate scan: Perform a scan immediately after the scan policy is created. It cannot be automatically/manually executed again later.
                                              • Scheduled scan: Automatically repeat the scan at scheduled intervals.
                                            • Number of Scan Reports to Keep: Set the maximum number of scan reports to be kept. When the specified retention quantity is exceeded, delete from the earliest report.

                                          "},{"location":"en/admin/kpanda/security/audit.html#updatedelete-scan-policies","title":"Update/Delete Scan Policies","text":"

                                          After creating a scan policy, you can update or delete it as needed.

                                          Under the Scan Policy tab, click the \u2507 action button to the right of a configuration:

                                          • For periodic scan policies:

                                            • Select Execute Immediately to perform an additional scan outside the regular schedule.
                                            • Select Disable to interrupt the scanning plan until Enable is clicked to resume executing the scan policy according to the scheduling plan.
                                            • Select Edit to update the configuration. You can update the scan configuration, type, scan cycle, and report retention quantity. The configuration name and the target cluster to be scanned cannot be changed.
                                            • Select Delete to delete the configuration.
                                          • For one-time scan policies: Only support the Delete operation.

                                          "},{"location":"en/admin/kpanda/security/audit.html#view-scan-reports","title":"View Scan Reports","text":"
                                          1. Under the Security Management -> Permission Scanning -> Scan Reports tab, click the report name.

                                            Clicking Delete on the right of a report allows you to manually delete the report.

                                          2. View the scan report content, including:

                                            • The target cluster scanned.
                                            • The scan policy used.
                                            • The total number of scan items, warnings, and errors.
                                            • In periodic scan reports generated by periodic scan policies, you can also view the scan frequency.
                                            • The start time of the scan.
                                            • Check details, such as the checked resources, resource types, scan results, error types, and error details.
                                          "},{"location":"en/admin/kpanda/security/hunter.html","title":"Vulnerability Scan","text":"

                                          To use the Vulnerability Scan feature, you need to create a scan policy first. After executing the policy, a scan report will be automatically generated for viewing.

                                          "},{"location":"en/admin/kpanda/security/hunter.html#create-a-scan-policy","title":"Create a Scan Policy","text":"
                                          1. On the left navigation bar of the homepage in the Container Management module, click Security Management .

                                          2. Click Vulnerability Scan on the left navigation bar, then click the Scan Policy tab and click Create Scan Policy on the right.

                                          3. Fill in the configuration according to the following instructions, and then click OK .

                                            • Cluster: Select the cluster to be scanned. The optional cluster list comes from the clusters accessed or created in the Container Management module. If the desired cluster is not available, you can access or create a cluster in the Container Management module.
                                            • Scan Type:

                                              • Immediate scan: Perform a scan immediately after the scan policy is created. It cannot be automatically/manually executed again later.
                                              • Scheduled scan: Automatically repeat the scan at scheduled intervals.
                                            • Number of Scan Reports to Keep: Set the maximum number of scan reports to be kept. When the specified retention quantity is exceeded, delete from the earliest report.

                                          "},{"location":"en/admin/kpanda/security/hunter.html#updatedelete-scan-policies","title":"Update/Delete Scan Policies","text":"

                                          After creating a scan policy, you can update or delete it as needed.

                                          Under the Scan Policy tab, click the \u2507 action button to the right of a configuration:

                                          • For periodic scan policies:

                                            • Select Execute Immediately to perform an additional scan outside the regular schedule.
                                            • Select Disable to interrupt the scanning plan until Enable is clicked to resume executing the scan policy according to the scheduling plan.
                                            • Select Edit to update the configuration. You can update the scan configuration, type, scan cycle, and report retention quantity. The configuration name and the target cluster to be scanned cannot be changed.
                                            • Select Delete to delete the configuration.
                                          • For one-time scan policies: Only support the Delete operation.

                                          "},{"location":"en/admin/kpanda/security/hunter.html#viewe-scan-reports","title":"Viewe Scan Reports","text":"
                                          1. Under the Security Management -> Vulnerability Scanning -> Scan Reports tab, click the report name.

                                            Clicking Delete on the right of a report allows you to manually delete the report.

                                          2. View the scan report content, including:

                                            • The target cluster scanned.
                                            • The scan policy used.
                                            • The scan frequency.
                                            • The total number of risks, high risks, medium risks, and low risks.
                                            • The time of the scan.
                                            • Check details such as vulnerability ID, vulnerability type, vulnerability name, vulnerability description, etc.
                                          "},{"location":"en/admin/kpanda/security/cis/config.html","title":"Scan Configuration","text":"

                                          The first step in using CIS Scanning is to create a scan configuration. Based on the scan configuration, you can then create scan policies, execute scan policies, and finally view scan results.

                                          "},{"location":"en/admin/kpanda/security/cis/config.html#create-a-scan-configuration","title":"Create a Scan Configuration","text":"

                                          The steps for creating a scan configuration are as follows:

                                          1. Click Security Management in the left navigation bar of the homepage of the container management module.

                                          2. By default, enter the Compliance Scanning page, click the Scan Configuration tab, and then click Create Scan Configuration in the upper-right corner.

                                          3. Fill in the configuration name, select the configuration template, and optionally check the scan items, then click OK .

                                            Scan Template: Currently, two templates are provided. The kubeadm template is suitable for general Kubernetes clusters. The daocloud template ignores scan items that are not applicable to AI platform based on the kubeadm template and the platform design of AI platform.

                                          "},{"location":"en/admin/kpanda/security/cis/config.html#view-scan-configuration","title":"View Scan Configuration","text":"

                                          Under the scan configuration tab, clicking the name of a scan configuration displays the type of the configuration, the number of scan items, the creation time, the configuration template, and the specific scan items enabled for the configuration.

                                          "},{"location":"en/admin/kpanda/security/cis/config.html#updatdelete-scan-configuration","title":"Updat/Delete Scan Configuration","text":"

                                          After a scan configuration has been successfully created, it can be updated or deleted according to your needs.

                                          Under the scan configuration tab, click the \u2507 action button to the right of a configuration:

                                          • Select Edit to update the configuration. You can update the description, template, and scan items. The configuration name cannot be changed.
                                          • Select Delete to delete the configuration.
                                          "},{"location":"en/admin/kpanda/security/cis/policy.html","title":"Scan Policy","text":""},{"location":"en/admin/kpanda/security/cis/policy.html#create-a-scan-policy","title":"Create a Scan Policy","text":"

                                          After creating a scan configuration, you can create a scan policy based on the configuration.

                                          1. Under the Security Management -> Compliance Scanning page, click the Scan Policy tab on the right to create a scan policy.

                                          2. Fill in the configuration according to the following instructions and click OK .

                                            • Cluster: Select the cluster to be scanned. The optional cluster list comes from the clusters accessed or created in the Container Management module. If the desired cluster is not available, you can access or create a cluster in the Container Management module.
                                            • Scan Configuration: Select a pre-created scan configuration. The scan configuration determines which specific scan items need to be performed.
                                            • Scan Type:

                                              • Immediate scan: Perform a scan immediately after the scan policy is created. It cannot be automatically/manually executed again later.
                                              • Scheduled scan: Automatically repeat the scan at scheduled intervals.
                                            • Number of Scan Reports to Keep: Set the maximum number of scan reports to be kept. When the specified retention quantity is exceeded, delete from the earliest report.

                                          "},{"location":"en/admin/kpanda/security/cis/policy.html#updatedelete-scan-policies","title":"Update/Delete Scan Policies","text":"

                                          After creating a scan policy, you can update or delete it as needed.

                                          Under the Scan Policy tab, click the \u2507 action button to the right of a configuration:

                                          • For periodic scan policies:

                                            • Select Execute Immediately to perform an additional scan outside the regular schedule.
                                            • Select Disable to interrupt the scanning plan until Enable is clicked to resume executing the scan policy according to the scheduling plan.
                                            • Select Edit to update the configuration. You can update the scan configuration, type, scan cycle, and report retention quantity. The configuration name and the target cluster to be scanned cannot be changed.
                                            • Select Delete to delete the configuration.
                                          • For one-time scan policies: Only support the Delete operation.

                                          "},{"location":"en/admin/kpanda/security/cis/report.html","title":"Scan Report","text":"

                                          After executing a scan policy, a scan report will be generated automatically. You can view the scan report online or download it to your local computer.

                                          • Download and View

                                            Under the Security Management -> Compliance Scanning page, click the Scan Report tab, then click the \u2507 action button to the right of a report and select Download .

                                          • View Online

                                            Clicking the name of a report allows you to view its content online, which includes:

                                            • The target cluster scanned.
                                            • The scan policy and scan configuration used.
                                            • The start time of the scan.
                                            • The total number of scan items, the number passed, and the number failed.
                                            • For failed scan items, repair suggestions are provided.
                                            • For passed scan items, more secure operational suggestions are provided.
                                          "},{"location":"en/admin/kpanda/storage/pv.html","title":"data volume (PV)","text":"

                                          A data volume (PersistentVolume, PV) is a piece of storage in the cluster, which can be prepared in advance by the administrator, or dynamically prepared using a storage class (Storage Class). PV is a cluster resource, but it has an independent life cycle and will not be deleted when the Pod process ends. Mounting PVs to workloads can achieve data persistence for workloads. The PV holds the data directory that can be accessed by the containers in the Pod.

                                          "},{"location":"en/admin/kpanda/storage/pv.html#create-data-volume","title":"Create data volume","text":"

                                          Currently, there are two ways to create data volumes: YAML and form. These two ways have their own advantages and disadvantages, and can meet the needs of different users.

                                          • There are fewer steps and more efficient creation through YAML, but the threshold requirement is high, and you need to be familiar with the YAML file configuration of the data volume.

                                          • It is more intuitive and easier to create through the form, just fill in the proper values \u200b\u200baccording to the prompts, but the steps are more cumbersome.

                                          "},{"location":"en/admin/kpanda/storage/pv.html#yaml-creation","title":"YAML creation","text":"
                                          1. Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume (PV) -> Create with YAML in the left navigation bar.

                                          2. Enter or paste the prepared YAML file in the pop-up box, and click OK at the bottom of the pop-up box.

                                            Supports importing YAML files from local or downloading and saving filled files to local.

                                          "},{"location":"en/admin/kpanda/storage/pv.html#form-creation","title":"Form Creation","text":"
                                          1. Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume (PV) -> Create Data Volume (PV) in the left navigation bar.

                                          2. Fill in the basic information.

                                            • The data volume name, data volume type, mount path, volume mode, and node affinity cannot be changed after creation.
                                            • Data volume type: For a detailed introduction to volume types, refer to the official Kubernetes document Volumes.

                                            • Local: The local storage of the Node node is packaged into a PVC interface, and the container directly uses the PVC without paying attention to the underlying storage type. Local volumes do not support dynamic configuration of data volumes, but support configuration of node affinity, which can limit which nodes can access the data volume.

                                            • HostPath: Use files or directories on the file system of Node nodes as data volumes, and do not support Pod scheduling based on node affinity.

                                            • Mount path: mount the data volume to a specific directory in the container.

                                            • access mode:

                                              • ReadWriteOnce: The data volume can be mounted by a node in read-write mode.
                                              • ReadWriteMany: The data volume can be mounted by multiple nodes in read-write mode.
                                              • ReadOnlyMany: The data volume can be mounted read-only by multiple nodes.
                                              • ReadWriteOncePod: The data volume can be mounted read-write by a single Pod.
                                            • Recycling policy:

                                              • Retain: The PV is not deleted, but its status is only changed to released , which needs to be manually recycled by the user. For how to manually reclaim, refer to Persistent Volume.
                                              • Recycle: keep the PV but empty its data, perform a basic wipe ( rm -rf /thevolume/* ).
                                              • Delete: When deleting a PV and its data.
                                            • Volume mode:

                                              • File system: The data volume will be mounted to a certain directory by the Pod. If the data volume is stored from a device and the device is currently empty, a file system is created on the device before the volume is mounted for the first time.
                                              • Block: Use the data volume as a raw block device. This type of volume is given to the Pod as a block device without any file system on it, allowing the Pod to access the data volume faster.
                                            • Node affinity:

                                          "},{"location":"en/admin/kpanda/storage/pv.html#view-data-volume","title":"View data volume","text":"

                                          Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume (PV) in the left navigation bar.

                                          • On this page, you can view all data volumes in the current cluster, as well as information such as the status, capacity, and namespace of each data volume.

                                          • Supports sequential or reverse sorting according to the name, status, namespace, and creation time of data volumes.

                                          • Click the name of a data volume to view the basic configuration, StorageClass information, labels, comments, etc. of the data volume.

                                          "},{"location":"en/admin/kpanda/storage/pv.html#clone-data-volume","title":"Clone data volume","text":"

                                          By cloning a data volume, a new data volume can be recreated based on the configuration of the cloned data volume.

                                          1. Enter the clone page

                                            • On the data volume list page, find the data volume to be cloned, and select Clone under the operation bar on the right.

                                              You can also click the name of the data volume, click the operation button in the upper right corner of the details page and select Clone .

                                          2. Use the original configuration directly, or modify it as needed, and click OK at the bottom of the page.

                                          "},{"location":"en/admin/kpanda/storage/pv.html#update-data-volume","title":"Update data volume","text":"

                                          There are two ways to update data volumes. Support for updating data volumes via forms or YAML files.

                                          Note

                                          Only updating the alias, capacity, access mode, reclamation policy, label, and comment of the data volume is supported.

                                          • On the data volume list page, find the data volume that needs to be updated, select Update under the operation bar on the right to update through the form, select Edit YAML to update through YAML.

                                          • Click the name of the data volume to enter the details page of the data volume, select Update in the upper right corner of the page to update through the form, select Edit YAML to update through YAML.

                                          "},{"location":"en/admin/kpanda/storage/pv.html#delete-data-volume","title":"Delete data volume","text":"

                                          On the data volume list page, find the data to be deleted, and select Delete in the operation column on the right.

                                          You can also click the name of the data volume, click the operation button in the upper right corner of the details page and select Delete .

                                          "},{"location":"en/admin/kpanda/storage/pvc.html","title":"Data volume declaration (PVC)","text":"

                                          A persistent volume claim (PersistentVolumeClaim, PVC) expresses a user's request for storage. PVC consumes PV resources and claims a data volume with a specific size and specific access mode. For example, the PV volume is required to be mounted in ReadWriteOnce, ReadOnlyMany or ReadWriteMany modes.

                                          "},{"location":"en/admin/kpanda/storage/pvc.html#create-data-volume-statement","title":"Create data volume statement","text":"

                                          Currently, there are two ways to create data volume declarations: YAML and form. These two ways have their own advantages and disadvantages, and can meet the needs of different users.

                                          • There are fewer steps and more efficient creation through YAML, but the threshold requirement is high, and you need to be familiar with the YAML file configuration of the data volume declaration.

                                          • It is more intuitive and easier to create through the form, just fill in the proper values \u200b\u200baccording to the prompts, but the steps are more cumbersome.

                                          "},{"location":"en/admin/kpanda/storage/pvc.html#yaml-creation","title":"YAML creation","text":"
                                          1. Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume Declaration (PVC) -> Create with YAML in the left navigation bar.

                                          2. Enter or paste the prepared YAML file in the pop-up box, and click OK at the bottom of the pop-up box.

                                            Supports importing YAML files from local or downloading and saving filled files to local.

                                          "},{"location":"en/admin/kpanda/storage/pvc.html#form-creation","title":"Form Creation","text":"
                                          1. Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume Declaration (PVC) -> Create Data Volume Declaration (PVC) in the left navigation bar.

                                          2. Fill in the basic information.

                                            • The name, namespace, creation method, data volume, capacity, and access mode of the data volume declaration cannot be changed after creation.
                                            • Creation method: dynamically create a new data volume claim in an existing StorageClass or data volume, or create a new data volume claim based on a snapshot of a data volume claim.

                                              The declared capacity of the data volume cannot be modified when the snapshot is created, and can be modified after the creation is complete.

                                            • After selecting the creation method, select the desired StorageClass/data volume/snapshot from the drop-down list.

                                            • access mode:

                                            • ReadWriteOnce, the data volume declaration can be mounted by a node in read-write mode.

                                            • ReadWriteMany, the data volume declaration can be mounted by multiple nodes in read-write mode.
                                            • ReadOnlyMany, the data volume declaration can be mounted read-only by multiple nodes.
                                            • ReadWriteOncePod, the data volume declaration can be mounted by a single Pod in read-write mode.
                                          "},{"location":"en/admin/kpanda/storage/pvc.html#view-data-volume-statement","title":"View data volume statement","text":"

                                          Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume Declaration (PVC) in the left navigation bar.

                                          • On this page, you can view all data volume declarations in the current cluster, as well as information such as the status, capacity, and namespace of each data volume declaration.

                                          • Supports sorting in sequential or reverse order according to the declared name, status, namespace, and creation time of the data volume.

                                          • Click the name of the data volume declaration to view the basic configuration, StorageClass information, labels, comments and other information of the data volume declaration.

                                          "},{"location":"en/admin/kpanda/storage/pvc.html#expansion-data-volume-statement","title":"Expansion data volume statement","text":"
                                          1. In the left navigation bar, click Container Storage -> Data Volume Declaration (PVC) , and find the data volume declaration whose capacity you want to adjust.

                                          2. Click the name of the data volume declaration, and then click the operation button in the upper right corner of the page and select Expansion .

                                          3. Enter the target capacity and click OK .

                                          "},{"location":"en/admin/kpanda/storage/pvc.html#clone-data-volume-statement","title":"Clone data volume statement","text":"

                                          By cloning a data volume claim, a new data volume claim can be recreated based on the configuration of the cloned data volume claim.

                                          1. Enter the clone page

                                            • On the data volume declaration list page, find the data volume declaration that needs to be cloned, and select Clone under the operation bar on the right.

                                              You can also click the name of the data volume declaration, click the operation button in the upper right corner of the details page and select Clone .

                                          2. Use the original configuration directly, or modify it as needed, and click OK at the bottom of the page.

                                          "},{"location":"en/admin/kpanda/storage/pvc.html#update-data-volume-statement","title":"Update data volume statement","text":"

                                          There are two ways to update data volume claims. Support for updating data volume claims via form or YAML file.

                                          Note

                                          Only aliases, labels, and annotations for data volume claims are updated.

                                          • On the data volume list page, find the data volume declaration that needs to be updated, select Update in the operation bar on the right to update it through the form, and select Edit YAML to update it through YAML.

                                          • Click the name of the data volume declaration, enter the details page of the data volume declaration, select Update in the upper right corner of the page to update through the form, select Edit YAML to update through YAML.

                                          "},{"location":"en/admin/kpanda/storage/pvc.html#delete-data-volume-statement","title":"Delete data volume statement","text":"

                                          On the data volume declaration list page, find the data to be deleted, and select Delete in the operation column on the right.

                                          You can also click the name of the data volume statement, click the operation button in the upper right corner of the details page and select Delete .

                                          "},{"location":"en/admin/kpanda/storage/pvc.html#common-problem","title":"common problem","text":"
                                          1. If there is no optional StorageClass or data volume in the list, you can Create a StorageClass or Create a data volume.

                                          2. If there is no optional snapshot in the list, you can enter the details page of the data volume declaration and create a snapshot in the upper right corner.

                                          3. If the StorageClass (SC) used by the data volume declaration is not enabled for snapshots, snapshots cannot be made, and the page will not display the \"Make Snapshot\" option.

                                          4. If the StorageClass (SC) used by the data volume declaration does not have the capacity expansion feature enabled, the data volume does not support capacity expansion, and the page will not display the capacity expansion option.
                                          "},{"location":"en/admin/kpanda/storage/sc-share.html","title":"shared StorageClass","text":"

                                          The AI platform container management module supports sharing a StorageClass with multiple namespaces to improve resource utilization efficiency.

                                          1. Find the StorageClass that needs to be shared in the StorageClass list, and click Authorize Namespace under the operation bar on the right.

                                          2. Click Custom Namespace to select which namespaces this StorageClass needs to be shared to one by one.

                                            • Click Authorize All Namespaces to share this StorageClass to all namespaces under the current cluster at one time.
                                            • Click Remove Authorization under the operation bar on the right side of the list to deauthorize and stop sharing this StorageClass to this namespace.
                                          "},{"location":"en/admin/kpanda/storage/sc.html","title":"StorageClass (SC)","text":"

                                          A StorageClass refers to a large storage resource pool composed of many physical disks. This platform supports the creation of block StorageClass, local StorageClass, and custom StorageClass after accessing various storage vendors, and then dynamically configures data volumes for workloads.

                                          "},{"location":"en/admin/kpanda/storage/sc.html#create-storageclass-sc","title":"Create StorageClass (SC)","text":"

                                          Currently, it supports creating StorageClass through YAML and forms. These two methods have their own advantages and disadvantages, and can meet the needs of different users.

                                          • There are fewer steps and more efficient creation through YAML, but the threshold requirement is high, and you need to be familiar with the YAML file configuration of the StorageClass.

                                          • It is more intuitive and easier to create through the form, just fill in the proper values \u200b\u200baccording to the prompts, but the steps are more cumbersome.

                                          "},{"location":"en/admin/kpanda/storage/sc.html#yaml-creation","title":"YAML creation","text":"
                                          1. Click the name of the target cluster in the cluster list, and then click Container Storage -> StorageClass (SC) -> Create with YAML in the left navigation bar.

                                          2. Enter or paste the prepared YAML file in the pop-up box, and click OK at the bottom of the pop-up box.

                                            Supports importing YAML files from local or downloading and saving filled files to local.

                                          "},{"location":"en/admin/kpanda/storage/sc.html#form-creation","title":"Form Creation","text":"
                                          1. Click the name of the target cluster in the cluster list, and then click Container Storage -> StorageClass (SC) -> Create StorageClass (SC) in the left navigation bar.

                                          2. Fill in the basic information and click OK at the bottom.

                                            CUSTOM STORAGE SYSTEM

                                            • The StorageClass name, driver, and reclamation policy cannot be modified after creation.
                                            • CSI storage driver: A standard Kubernetes-based container storage interface plug-in, which must comply with the format specified by the storage manufacturer, such as rancher.io/local-path .

                                              • For how to fill in the CSI drivers provided by different vendors, refer to the official Kubernetes document Storage Class.
                                                • Recycling policy: When deleting a data volume, keep the data in the data volume or delete the data in it.
                                                • Snapshot/Expansion: After it is enabled, the data volume/data volume declaration based on the StorageClass can support the expansion and snapshot features, but the premise is that the underlying storage driver supports the snapshot and expansion features.

                                            HwameiStor storage system

                                            • The StorageClass name, driver, and reclamation policy cannot be modified after creation.
                                            • Storage system: HwameiStor storage system.
                                            • Storage type: support LVM, raw disk type
                                              • LVM type : HwameiStor recommended usage method, which can use highly available data volumes, and the proper CSI storage driver is lvm.hwameistor.io .
                                              • Raw disk data volume : suitable for high availability cases, without high availability capability, the proper CSI driver is hdd.hwameistor.io .
                                            • High Availability Mode: Before using the high availability capability, please make sure DRBD component has been installed. After the high availability mode is turned on, the number of data volume copies can be set to 1 and 2. Convert data volume copy from 1 to 1 if needed.
                                            • Recycling policy: When deleting a data volume, keep the data in the data volume or delete the data in it.
                                            • Snapshot/Expansion: After it is enabled, the data volume/data volume declaration based on the StorageClass can support the expansion and snapshot features, but the premise is that the underlying storage driver supports the snapshot and expansion features.
                                          "},{"location":"en/admin/kpanda/storage/sc.html#update-storageclass-sc","title":"Update StorageClass (SC)","text":"

                                          On the StorageClass list page, find the StorageClass that needs to be updated, and select Edit under the operation bar on the right to update the StorageClass.

                                          Info

                                          Select View YAML to view the YAML file of the StorageClass, but editing is not supported.

                                          "},{"location":"en/admin/kpanda/storage/sc.html#delete-storageclass-sc","title":"Delete StorageClass (SC)","text":"

                                          On the StorageClass list page, find the StorageClass to be deleted, and select Delete in the operation column on the right.

                                          "},{"location":"en/admin/kpanda/workloads/create-cronjob.html","title":"Create CronJob","text":"

                                          This page introduces how to create a CronJob through images and YAML files.

                                          CronJobs are suitable for performing periodic operations, such as backup and report generation. These jobs can be configured to repeat periodically (for example: daily/weekly/monthly), and the time interval at which the job starts to run can be defined.

                                          "},{"location":"en/admin/kpanda/workloads/create-cronjob.html#prerequisites","title":"Prerequisites","text":"

                                          Before creating a CronJob, the following prerequisites need to be met:

                                          • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                          • Create a namespace and a user.

                                          • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                          • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                          "},{"location":"en/admin/kpanda/workloads/create-cronjob.html#create-by-image","title":"Create by image","text":"

                                          Refer to the following steps to create a CronJob using the image.

                                          1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                          2. On the cluster details page, click Workloads -> CronJobs in the left navigation bar, and then click the Create by Image button in the upper right corner of the page.

                                          3. Fill in Basic Information, Container Settings, CronJob Settings, Advanced Configuration, click OK in the lower right corner of the page to complete the creation.

                                            The system will automatically return to the CronJobs list. Click \u2507 on the right side of the list to perform operations such as updating, deleting, and restarting the CronJob.

                                          "},{"location":"en/admin/kpanda/workloads/create-cronjob.html#basic-information","title":"Basic information","text":"

                                          On the Create CronJobs page, enter the information according to the table below, and click Next .

                                          • Workload Name: Can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                          • Namespace: Select which namespace to deploy the newly created CronJob in, and the default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                          • Description: Enter the description information of the workload and customize the content. The number of characters should not exceed 512.
                                          "},{"location":"en/admin/kpanda/workloads/create-cronjob.html#container-settings","title":"Container settings","text":"

                                          Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the requirements of each part.

                                          Container setting is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                          Basic information (required)Lifecycle (optional)Health Check (optional)Environment variables (optional)Data storage (optional)Security settings (optional)

                                          When configuring container-related parameters, you must correctly fill in the container name and image parameters, otherwise you will not be able to proceed to the next step. After filling in the configuration with reference to the following requirements, click OK .

                                          • Container Name: Up to 63 characters, lowercase letters, numbers and separators (\"-\") are supported. Must start and end with a lowercase letter or number, eg nginx-01.
                                          • Image: Enter the address or name of the image. When entering the image name, the image will be pulled from the official DockerHub by default.
                                          • Image Pull Policy: After checking Always pull the image , the image will be pulled from the registry every time the workload restarts/upgrades. If it is not checked, only the local mirror will be pulled, and only when the mirror does not exist locally, it will be re-pulled from the container registry. For more details, refer to Image Pull Policy.
                                          • Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
                                          • CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
                                          • GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU card or part of the vGPU for the container. For example, for an 8-core GPU card, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.

                                            Before setting exclusive GPU, the administrator needs to install the GPU card and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

                                          Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle Configuration.

                                          It is used to judge the health status of containers and applications, which helps to improve the availability of applications. For details, refer to Container Health Check Configuration.

                                          Configure container parameters within the Pod, add environment variables or pass configuration to the Pod, etc. For details, refer to Container environment variable configuration.

                                          Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage Configuration.

                                          Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                          "},{"location":"en/admin/kpanda/workloads/create-cronjob.html#cronjob-settings","title":"CronJob Settings","text":"
                                          • Concurrency Policy: Whether to allow multiple Job jobs to run in parallel.

                                            • Allow : A new CronJob can be created before the previous job is completed, and multiple jobs can be parallelized. Too many jobs may occupy cluster resources.
                                            • Forbid : Before the previous job is completed, a new job cannot be created. If the execution time of the new job is up and the previous job has not been completed, CronJob will ignore the execution of the new job.
                                            • Replace : If the execution time of the new job is up, but the previous job has not been completed, the new job will replace the previous job.

                                            The above rules only apply to multiple jobs created by the same CronJob. Multiple jobs created by multiple CronJobs are always allowed to run concurrently.

                                          • Policy Settings: Set the time period for job execution based on minutes, hours, days, weeks, and months. Support custom Cron expressions with numbers and * , after inputting the expression, the meaning of the current expression will be prompted. For detailed expression syntax rules, refer to Cron Schedule Syntax.

                                          • Job Records: Set how many records of successful or failed jobs to keep. 0 means do not keep.
                                          • Timeout: When this time is exceeded, the job will be marked as failed to execute, and all Pods under the job will be deleted. When it is empty, it means that no timeout is set. The default is 360 s.
                                          • Retries: the number of times the job can be retried, the default value is 6.
                                          • Restart Policy: Set whether to restart the Pod when the job fails.
                                          "},{"location":"en/admin/kpanda/workloads/create-cronjob.html#service-settings","title":"Service settings","text":"

                                          Configure Service for the statefulset, so that the statefulset can be accessed externally.

                                          1. Click the Create Service button.

                                          2. Refer to Create Service to configure service parameters.

                                          3. Click OK and click Next .

                                          "},{"location":"en/admin/kpanda/workloads/create-cronjob.html#advanced-configuration","title":"Advanced configuration","text":"

                                          The advanced configuration of CronJobs mainly involves labels and annotations.

                                          You can click the Add button to add labels and annotations to the workload instance Pod.

                                          "},{"location":"en/admin/kpanda/workloads/create-cronjob.html#create-from-yaml","title":"Create from YAML","text":"

                                          In addition to mirroring, you can also create timed jobs more quickly through YAML files.

                                          1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                          2. On the cluster details page, click Workloads -> CronJobs in the left navigation bar, and then click the Create from YAML button in the upper right corner of the page.

                                          3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                          click to view the complete YAML
                                          apiVersion: batch/v1\nkind: CronJob\nmetadata:\n  creationTimestamp: '2022-12-26T09:45:47Z'\n  generation: 1\n  name: demo\n  namespace: default\n  resourceVersion: '92726617'\n  uid: d030d8d7-a405-4dcd-b09a-176942ef36c9\nspec:\n  concurrencyPolicy: Allow\n  failedJobsHistoryLimit: 1\n  jobTemplate:\n    metadata:\n      creationTimestamp: null\n    spec:\n      activeDeadlineSeconds: 360\n      backoffLimit: 6\n      template:\n        metadata:\n          creationTimestamp: null\n        spec:\n          containers:\n            - image: nginx\n              imagePullPolicy: IfNotPresent\n              lifecycle: {}\n              name: container-3\n              resources:\n                limits:\n                  cpu: 250m\n                  memory: 512Mi\n                requests:\n                  cpu: 250m\n                  memory: 512Mi\n              securityContext:\n                privileged: false\n              terminationMessagePath: /dev/termination-log\n              terminationMessagePolicy: File\n          dnsPolicy: ClusterFirst\n          restartPolicy: Never\n          schedulerName: default-scheduler\n          securityContext: {}\n          terminationGracePeriodSeconds: 30\n  schedule: 0 0 13 * 5\n  successfulJobsHistoryLimit: 3\n  suspend: false\nstatus: {}\n
                                          "},{"location":"en/admin/kpanda/workloads/create-daemonset.html","title":"Create DaemonSet","text":"

                                          This page introduces how to create a daemonSet through image and YAML files.

                                          DaemonSet is connected to taint through node affinity feature ensures that a replica of a Pod is running on all or some of the nodes. For nodes that newly joined the cluster, DaemonSet automatically deploys the proper Pod on the new node and tracks the running status of the Pod. When a node is removed, the DaemonSet deletes all Pods it created.

                                          Common cases for daemons include:

                                          • Run cluster daemons on each node.
                                          • Run a log collection daemon on each node.
                                          • Run a monitoring daemon on each node.

                                          For simplicity, a DaemonSet can be started on each node for each type of daemon. For finer and more advanced daemon management, you can also deploy multiple DaemonSets for the same daemon. Each DaemonSet has different flags and has different memory, CPU requirements for different hardware types.

                                          "},{"location":"en/admin/kpanda/workloads/create-daemonset.html#prerequisites","title":"Prerequisites","text":"

                                          Before creating a DaemonSet, the following prerequisites need to be met:

                                          • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                          • Create a namespace and a user.

                                          • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                          • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                          "},{"location":"en/admin/kpanda/workloads/create-daemonset.html#create-by-image","title":"Create by image","text":"

                                          Refer to the following steps to create a daemon using the image.

                                          1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                          2. On the cluster details page, click Workloads -> DaemonSets in the left navigation bar, and then click the Create by Image button in the upper right corner of the page.

                                          3. Fill in Basic Information, Container Settings, Service Settings, Advanced Settings, click OK in the lower right corner of the page to complete the creation.

                                            The system will automatically return the list of DaemonSets . Click \u2507 on the right side of the list to perform operations such as updating, deleting, and restarting the DaemonSet.

                                          "},{"location":"en/admin/kpanda/workloads/create-daemonset.html#basic-information","title":"Basic information","text":"

                                          On the Create DaemonSets page, after entering the information according to the table below, click Next .

                                          • Workload Name: Can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                          • Namespace: Select which namespace to deploy the newly created DaemonSet in, and the default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                          • Description: Enter the description information of the workload and customize the content. The number of characters should not exceed 512.
                                          "},{"location":"en/admin/kpanda/workloads/create-daemonset.html#container-settings","title":"Container settings","text":"

                                          Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the requirements of each part.

                                          Container setting is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                          Basic information (required)Lifecycle (optional)Health Check (optional)Environment variables (optional)Data storage (optional)Security settings (optional)

                                          When configuring container-related parameters, you must correctly fill in the container name and image parameters, otherwise you will not be able to proceed to the next step. After filling in the settings with reference to the following requirements, click OK .

                                          • Container Name: Up to 63 characters, lowercase letters, numbers and separators (\"-\") are supported. Must start and end with a lowercase letter or number, eg nginx-01.
                                          • Image: Enter the address or name of the image. When entering the image name, the image will be pulled from the official DockerHub by default.
                                          • Image Pull Policy: After checking Always pull image , the image will be pulled from the registry every time the workload restarts/upgrades. If it is not checked, only the local image will be pulled, and only when the image does not exist locally, it will be re-pulled from the container registry. For more details, refer to Image Pull Policy.
                                          • Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
                                          • CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
                                          • GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU card or part of the vGPU for the container. For example, for an 8-core GPU card, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.

                                            Before setting exclusive GPU, the administrator needs to install the GPU card and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

                                          Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle Configuration.

                                          It is used to judge the health status of containers and applications, which helps to improve the availability of applications. For details, refer to Container Health Check Configuration.

                                          Configure container parameters within the Pod, add environment variables or pass settings to the Pod, etc. For details, refer to Container environment variable settings.

                                          Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage Configuration.

                                          Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                          "},{"location":"en/admin/kpanda/workloads/create-daemonset.html#service-settings","title":"Service settings","text":"

                                          Create a Service (Service) for the daemon, so that the daemon can be accessed externally.

                                          1. Click the Create Service button.

                                          2. Configure service parameters, refer to Create Service for details.

                                          3. Click OK and click Next .

                                          "},{"location":"en/admin/kpanda/workloads/create-daemonset.html#advanced-settings","title":"Advanced settings","text":"

                                          Advanced setting includes four parts: load network settings, upgrade policy, scheduling policy, label and annotation. You can click the tabs below to view the requirements of each part.

                                          Network ConfigurationUpgrade PolicyScheduling PoliciesLabels and Annotations

                                          In some cases, the application will have redundant DNS queries. Kubernetes provides DNS-related settings options for applications, which can effectively reduce redundant DNS queries and increase business concurrency in certain cases.

                                          • DNS Policy

                                            • Default: Make the container use the domain name resolution file pointed to by the --resolv-conf parameter of kubelet. This setting can only resolve external domain names registered on the Internet, but cannot resolve cluster internal domain names, and there is no invalid DNS query.
                                            • ClusterFirstWithHostNet: The domain name file of the host to which the application is connected.
                                            • ClusterFirst: application docking with Kube-DNS/CoreDNS.
                                            • None: New option value introduced in Kubernetes v1.9 (Beta in v1.10). After setting to None, dnsConfig must be set, at this time the domain name of the containerThe parsing file will be completely generated through the settings of dnsConfig.
                                          • Nameservers: fill in the address of the domain name server, such as 10.6.175.20 .

                                          • Search domains: DNS search domain list for domain name query. When specified, the provided search domain list will be merged into the search field of the domain name resolution file generated based on dnsPolicy, and duplicate domain names will be deleted. Kubernetes allows up to 6 search domains.
                                          • Options: Configuration options for DNS, where each object can have a name attribute (required) and a value attribute (optional). The content in this field will be merged into the options field of the domain name resolution file generated based on dnsPolicy. If some options of dnsConfig options conflict with the options of the domain name resolution file generated based on dnsPolicy, they will be overwritten by dnsConfig.
                                          • Host Alias: the alias set for the host.

                                          • Upgrade Mode: Rolling upgrade refers to gradually replacing instances of the old version with instances of the new version. During the upgrade process, business traffic will be load-balanced to the old and new instances at the same time, so the business will not be interrupted. Rebuild and upgrade refers to deleting the workload instance of the old version first, and then installing the specified new version. During the upgrade process, the business will be interrupted.
                                          • Max Unavailable Pods: Specify the maximum value or ratio of unavailable pods during the workload update process, the default is 25%. If it is equal to the number of instances, there is a risk of service interruption.
                                          • Max Surge: The maximum or ratio of the total number of Pods exceeding the desired replica count of Pods during a Pod update. Default is 25%.
                                          • Revision History Limit: Set the number of old versions retained when the version is rolled back. The default is 10.
                                          • Minimum Ready: The minimum time for a Pod to be ready. Only after this time is the Pod considered available. The default is 0 seconds.
                                          • Upgrade Max Duration: If the deployment is not successful after the set time, the workload will be marked as failed. Default is 600 seconds.
                                          • Graceful Period: The execution period (0-9,999 seconds) of the command before the workload stops, the default is 30 seconds.

                                          • Toleration time: When the node where the workload instance is located is unavailable, the time for rescheduling the workload instance to other available nodes, the default is 300 seconds.
                                          • Node affinity: According to the label on the node, constrain which nodes the Pod can be scheduled on.
                                          • Workload Affinity: Constrains which nodes a Pod can be scheduled to based on the labels of the Pods already running on the node.
                                          • Workload anti-affinity: Constrains which nodes a Pod cannot be scheduled to based on the labels of Pods already running on the node.
                                          • Topology domain: namely topologyKey, used to specify a group of nodes that can be scheduled. For example, kubernetes.io/os indicates that as long as the node of an operating system meets the conditions of labelSelector, it can be scheduled to the node.

                                          For details, refer to Scheduling Policy.

                                          You can click the Add button to add tags and annotations to workloads and pods.

                                          "},{"location":"en/admin/kpanda/workloads/create-daemonset.html#create-from-yaml","title":"Create from YAML","text":"

                                          In addition to image, you can also create daemons more quickly through YAML files.

                                          1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the Cluster Details page.

                                          2. On the cluster details page, click Workload -> Daemons in the left navigation bar, and then click the YAML Create button in the upper right corner of the page.

                                          3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                          Click to see an example YAML for creating a daemon
                                           kind: DaemonSet\n apiVersion: apps/v1\n metadata:\n   name: hwameistor-local-disk-manager\n   namespace: hwameistor\n   uid: ccbdc098-7de3-4a8a-96dd-d1cee159c92b\n   resourceVersion: '90999552'\n   generation: 1\n   creationTimestamp: '2022-12-15T09:03:44Z'\n   labels:\n     app.kubernetes.io/managed-by: Helm\n   annotations:\n     deprecated.DaemonSet.template.generation: '1'\n     meta.helm.sh/release-name: hwameistor\n     meta.helm.sh/release-namespace:hwameistor\n spec:\n   selector:\n     matchLabels:\n       app: hwameistor-local-disk-manager\n   template:\n     metadata:\n       creationTimestamp: null\n       labels:\n         app: hwameistor-local-disk-manager\n     spec:\n       volumes:\n         - name: udev\n           hostPath:\n             path: /run/udev\n             type: Directory\n         - name: procmount\n           hostPath:\n             path: /proc\n             type: Directory\n         - name: devmount\n           hostPath:\n             path: /dev\n             type: Directory\n         - name: socket-dir\n           hostPath:\n             path: /var/lib/kubelet/plugins/disk.hwameistor.io\n             type: DirectoryOrCreate\n         - name: registration-dir\n           hostPath:\n             path: /var/lib/kubelet/plugins_registry/\n             type: Directory\n         - name: plugin-dir\n           hostPath:\n             path: /var/lib/kubelet/plugins\n             type: DirectoryOrCreate\n         - name: pods-mount-dir\n           hostPath:\n             path: /var/lib/kubelet/pods\n             type: DirectoryOrCreate\n       containers:\n         - name: registrar\n           image: k8s-gcr.m.daocloud.io/sig-storage/csi-node-driver-registrar:v2.5.0\n           args:\n             - '--v=5'\n             - '--csi-address=/csi/csi.sock'\n             - >-\n               --kubelet-registration-path=/var/lib/kubelet/plugins/disk.hwameistor.io/csi.sock\n           env:\n             - name: KUBE_NODE_NAME\n               valueFrom:\n                 fieldRef:\n                   apiVersion: v1\n                   fieldPath: spec.nodeName\n           resources: {}\n           volumeMounts:\n             - name: socket-dir\n               mountPath: /csi\n             - name: registration-dir\n               mountPath: /registration\n           lifecycle:\n             preStop:\n               exec:\n                 command:\n                   - /bin/sh\n                   - '-c'\n                   - >-\n                     rm -rf /registration/disk.hwameistor.io\n                     /registration/disk.hwameistor.io-reg.sock\n           terminationMessagePath: /dev/termination-log\n           terminationMessagePolicy: File\n           imagePullPolicy: IfNotPresent\n         -name: managerimage: ghcr.m.daocloud.io/hwameistor/local-disk-manager:v0.6.1\n          command:\n            - /local-disk-manager\n          args:\n            - '--endpoint=$(CSI_ENDPOINT)'\n            - '--nodeid=$(NODENAME)'\n            - '--csi-enable=true'\n          env:\n            - name: CSI_ENDPOINT\n              value: unix://var/lib/kubelet/plugins/disk.hwameistor.io/csi.sock\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: WATCH_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: NODENAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: spec.nodeName\n            - name: OPERATOR_NAME\n              value: local-disk-manager\n          resources: {}\n          volumeMounts:\n            - name: udev\n              mountPath: /run/udev\n            - name: procmount\n              readOnly: true\n              mountPath: /host/proc\n            - name: devmount\n              mountPath: /dev\n            - name: registration-dir\n              mountPath: /var/lib/kubelet/plugins_registry\n            - name: plugin-dir\n              mountPath: /var/lib/kubelet/plugins\n              mountPropagation: Bidirectional\n            - name: pods-mount-dir\n              mountPath: /var/lib/kubelet/pods\n              mountPropagation: Bidirectional\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            privileged: true\n      restartPolicy: Always\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      serviceAccountName: hwameistor-admin\n      serviceAccount: hwameistor-admin\n      hostNetwork: true\n      hostPID: true\n      securityContext: {}\n      schedulerName: default-scheduler\n      tolerations:\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - key: node.kubernetes.io/not-ready\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/master\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/control-plane\n          operator: Exists\n          effect: NoSchedule\n        - key: node.cloudprovider.kubernetes.io/uninitialized\n          operator: Exists\n          effect: NoSchedule\n  updateStrategy:\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n      maxSurge: 0\n  revisionHistoryLimit: 10\nstatus:\n  currentNumberScheduled: 4\n  numberMisscheduled: 0\n  desiredNumberScheduled: 4\n  numberReady: 4\n  observedGeneration: 1\n  updatedNumberScheduled: 4\n  numberAvailable: 4\n
                                          "},{"location":"en/admin/kpanda/workloads/create-deployment.html","title":"Create Deployment","text":"

                                          This page describes how to create deployments through images and YAML files.

                                          Deployment is a common resource in Kubernetes, mainly Pod and ReplicaSet provide declarative updates, support elastic scaling, rolling upgrades, and version rollbacks features. Declare the desired Pod state in the Deployment, and the Deployment Controller will modify the current state through the ReplicaSet to make it reach the pre-declared desired state. Deployment is stateless and does not support data persistence. It is suitable for deploying stateless applications that do not need to save data and can be restarted and rolled back at any time.

                                          Through the container management module of AI platform, workloads on multicloud and multiclusters can be easily managed based on proper role permissions, including the creation of deployments, Full life cycle management such as update, deletion, elastic scaling, restart, and version rollback.

                                          "},{"location":"en/admin/kpanda/workloads/create-deployment.html#prerequisites","title":"Prerequisites","text":"

                                          Before using image to create deployments, the following prerequisites need to be met:

                                          • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                          • Create a namespace and a user.

                                          • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                          • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                          "},{"location":"en/admin/kpanda/workloads/create-deployment.html#create-by-image","title":"Create by image","text":"

                                          Follow the steps below to create a deployment by image.

                                          1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the Cluster Details page.

                                          2. On the cluster details page, click Workloads -> Deployments in the left navigation bar, and then click the Create by Image button in the upper right corner of the page.

                                          3. Fill in Basic Information, Container Setting, Service Setting, Advanced Setting in turn, click OK in the lower right corner of the page to complete the creation.

                                            The system will automatically return the list of Deployments . Click \u2507 on the right side of the list to perform operations such as update, delete, elastic scaling, restart, and version rollback on the load. If the workload status is abnormal, please check the specific abnormal information, refer to Workload Status.

                                          "},{"location":"en/admin/kpanda/workloads/create-deployment.html#basic-information","title":"Basic information","text":"
                                          • Workload Name: can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number, such as deployment-01. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                          • Namespace: Select the namespace where the newly created payload will be deployed. The default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                          • Pods: Enter the number of Pod instances for the load, and one Pod instance is created by default.
                                          • Description: Enter the description information of the payload and customize the content. The number of characters cannot exceed 512.
                                          "},{"location":"en/admin/kpanda/workloads/create-deployment.html#container-settings","title":"Container settings","text":"

                                          Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the requirements of each part.

                                          Container setting is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                          Basic Information (Required)Lifecycle (optional)Health Check (optional)Environment variables (optional)Data storage (optional)Security settings (optional)

                                          When configuring container-related parameters, it is essential to correctly fill in the container name and image parameters; otherwise, you will not be able to proceed to the next step. After filling in the configuration according to the following requirements, click OK.

                                          • Container Type: The default is Work Container. For information on init containers, see the [K8s Official Documentation] (https://kubernetes.io/docs/concepts/workloads/pods/init-containers/).
                                          • Container Name: No more than 63 characters, supporting lowercase letters, numbers, and separators (\"-\"). It must start and end with a lowercase letter or number, for example, nginx-01.
                                          • Image:
                                            • Image: Select an appropriate image from the list. When entering the image name, the default is to pull the image from the official DockerHub.
                                            • Image Version: Select an appropriate version from the dropdown list.
                                            • Image Pull Policy: By checking Always pull the image, the image will be pulled from the repository each time the workload restarts/upgrades. If unchecked, it will only pull the local image, and will pull from the repository only if the image does not exist locally. For more details, refer to Image Pull Policy.
                                            • Registry Secret: Optional. If the target repository requires a Secret to access, you need to create secret first.
                                          • Privileged Container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and has all the privileges of running processes on the host.
                                          • CPU/Memory Request: The request value (the minimum resource needed) and the limit value (the maximum resource allowed) for CPU/memory resources. Configure resources for the container as needed to avoid resource waste and system failures caused by container resource overages. Default values are shown in the figure.
                                          • GPU Configuration: Configure GPU usage for the container, supporting only positive integers. The GPU quota setting supports configuring the container to exclusively use an entire GPU card or part of a vGPU. For example, for a GPU card with 8 cores, entering the number 8 means the container exclusively uses the entire card, and entering the number 1 means configuring 1 core of the vGPU for the container.

                                          Before setting the GPU, the administrator needs to pre-install the GPU card and driver plugin on the cluster node and enable the GPU feature in the Cluster Settings.

                                          Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle Setting.

                                          It is used to judge the health status of containers and applications, which helps to improve the availability of applications. For details, refer to Container Health Check Setting.

                                          Configure container parameters within the Pod, add environment variables or pass setting to the Pod, etc. For details, refer to Container environment variable setting.

                                          Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage Setting.

                                          Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                          "},{"location":"en/admin/kpanda/workloads/create-deployment.html#service-settings","title":"Service settings","text":"

                                          Configure Service for the deployment, so that the deployment can be accessed externally.

                                          1. Click the Create Service button.

                                          2. Refer to Create Service to configure service parameters.

                                          3. Click OK and click Next .

                                          "},{"location":"en/admin/kpanda/workloads/create-deployment.html#advanced-settings","title":"Advanced settings","text":"

                                          Advanced setting includes four parts: Network Settings, Upgrade Policy, Scheduling Policies, Labels and Annotations. You can click the tabs below to view the setting requirements of each part.

                                          Network SettingsUpgrade PolicyScheduling PoliciesLabels and Annotations
                                          1. For container NIC setting, refer to Workload Usage IP Pool
                                          2. DNS setting

                                          In some cases, the application will have redundant DNS queries. Kubernetes provides DNS-related setting options for applications, which can effectively reduce redundant DNS queries and increase business concurrency in certain cases.

                                          • DNS Policy

                                            • Default: Make container use kubelet's -The domain name resolution file pointed to by the -resolv-conf parameter. This setting can only resolve external domain names registered on the Internet, but cannot resolve cluster internal domain names, and there is no invalid DNS query.
                                            • ClusterFirstWithHostNet: The domain name file of the host to which the application is connected.
                                            • ClusterFirst: application docking with Kube-DNS/CoreDNS.
                                            • None: New option value introduced in Kubernetes v1.9 (Beta in v1.10). After setting to None, dnsConfig must be set. At this time, the domain name resolution file of the container will be completely generated through the setting of dnsConfig.
                                          • Nameservers: fill in the address of the domain name server, such as 10.6.175.20 .

                                          • Search domains: DNS search domain list for domain name query. When specified, the provided search domain list will be merged into the search field of the domain name resolution file generated based on dnsPolicy, and duplicate domain names will be deleted. Kubernetes allows up to 6 search domains.
                                          • Options: Setting options for DNS, where each object can have a name attribute (required) and a value attribute (optional). The content in this field will be merged into the options field of the domain name resolution file generated based on dnsPolicy. If some options of dnsConfig options conflict with the options of the domain name resolution file generated based on dnsPolicy, they will be overwritten by dnsConfig.
                                          • Host Alias: the alias set for the host.

                                          • Upgrade Mode: Rolling upgrade refers to gradually replacing instances of the old version with instances of the new version. During the upgrade process, business traffic will be load-balanced to the old and new instances at the same time, so the business will not be interrupted. Rebuild and upgrade refers to deleting the workload instance of the old version first, and then installing the specified new version. During the upgrade process, the business will be interrupted.
                                          • Max Unavailable: Specify the maximum value or ratio of unavailable pods during the workload update process, the default is 25%. If it is equal to the number of instances, there is a risk of service interruption.
                                          • Max Surge: The maximum or ratio of the total number of Pods exceeding the desired replica count of Pods during a Pod update. Default is 25%.
                                          • Revision History Limit: Set the number of old versions retained when the version is rolled back. The default is 10.
                                          • Minimum Ready: The minimum time for a Pod to be ready. Only after this time is the Pod considered available. The default is 0 seconds.
                                          • Upgrade Max Duration: If the deployment is not successful after the set time, the workload will be marked as failed. Default is 600 seconds.
                                          • Graceful Period: The execution period (0-9,999 seconds) of the command before the workload stops, the default is 30 seconds.

                                          • Toleration time: When the node where the workload instance is located is unavailable, the time for rescheduling the workload instance to other available nodes, the default is 300 seconds.
                                          • Node Affinity: According to the label on the node, constrain which nodes the Pod can be scheduled on.
                                          • Workload Affinity: Constrains which nodes a Pod can be scheduled to based on the labels of the Pods already running on the node.
                                          • Workload Anti-affinity: Constrains which nodes a Pod cannot be scheduled to based on the labels of Pods already running on the node.

                                          For details, refer to Scheduling Policy.

                                          You can click the Add button to add tags and annotations to workloads and pods.

                                          "},{"location":"en/admin/kpanda/workloads/create-deployment.html#create-from-yaml","title":"Create from YAML","text":"

                                          In addition to image, you can also create deployments more quickly through YAML files.

                                          1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the Cluster Details page.

                                          2. On the cluster details page, click Workloads -> Deployments in the left navigation bar, and then click the Create from YAML button in the upper right corner of the page.

                                          3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                          Click to see an example YAML for creating a deployment
                                           apiVersion: apps/v1\n kind: Deployment\n metadata:\n   name: nginx-deployment\n spec:\n   selector:\n     matchLabels:\n       app: nginx\n   replicas: 2 # (1)!\n   template:\n     metadata:\n       labels:\n         app: nginx\n     spec:\n       containers:\n       -name: nginx\n         image: nginx:1.14.2\n         ports:\n         - containerPort: 80\n
                                          1. Tell the Deployment to run 2 Pods that match this template
                                          "},{"location":"en/admin/kpanda/workloads/create-job.html","title":"Create Job","text":"

                                          This page introduces how to create a job through image and YAML file.

                                          Job is suitable for performing one-time jobs. A Job creates one or more Pods, and the Job keeps retrying to run Pods until a certain number of Pods are successfully terminated. A Job ends when the specified number of Pods are successfully terminated. When a Job is deleted, all Pods created by the Job will be cleared. When a Job is paused, all active Pods in the Job are deleted until the Job is resumed. For more information about jobs, refer to Job.

                                          "},{"location":"en/admin/kpanda/workloads/create-job.html#prerequisites","title":"Prerequisites","text":"
                                          • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                          • Create a namespace and a user.

                                          • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                          • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                          "},{"location":"en/admin/kpanda/workloads/create-job.html#create-by-image","title":"Create by image","text":"

                                          Refer to the following steps to create a job using an image.

                                          1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                          2. On the cluster details page, click Workloads -> Jobs in the left navigation bar, and then click the Create by Image button in the upper right corner of the page.

                                          3. Fill in Basic Information, Container Settings and Advanced Settings, click OK in the lower right corner of the page to complete the creation.

                                            The system will automatically return to the job list. Click \u2507 on the right side of the list to perform operations such as updating, deleting, and restarting the job.

                                          "},{"location":"en/admin/kpanda/workloads/create-job.html#basic-information","title":"Basic information","text":"

                                          On the Create Jobs page, enter the basic information according to the table below, and click Next .

                                          • Payload Name: Can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                          • Namespace: Select which namespace to deploy the newly created job in, and the default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                          • Number of Instances: Enter the number of Pod instances for the workload. By default, 1 Pod instance is created.
                                          • Description: Enter the description information of the workload and customize the content. The number of characters should not exceed 512.
                                          "},{"location":"en/admin/kpanda/workloads/create-job.html#container-settings","title":"Container settings","text":"

                                          Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the setting requirements of each part.

                                          Container settings is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                          Basic information (required)Lifecycle (optional)Health Check (optional)Environment Variables (optional)Data Storage (optional)Security Settings (optional)

                                          When configuring container-related parameters, you must correctly fill in the container name and image parameters, otherwise you will not be able to proceed to the next step. After filling in the settings with reference to the following requirements, click OK .

                                          • Container Name: Up to 63 characters, lowercase letters, numbers and separators (\"-\") are supported. Must start and end with a lowercase letter or number, eg nginx-01.
                                          • Image: Enter the address or name of the image. When entering the image name, the image will be pulled from the official DockerHub by default.
                                          • Image Pull Policy: After checking Always pull image , the image will be pulled from the registry every time the workload restarts/upgrades. If it is not checked, only the local image will be pulled, and only when the image does not exist locally, it will be re-pulled from the container registry. For more details, refer to Image Pull Policy.
                                          • Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
                                          • CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
                                          • GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU card or part of the vGPU for the container. For example, for an 8-core GPU card, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.

                                          Before setting exclusive GPU, the administrator needs to install the GPU card and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

                                          Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle settings.

                                          It is used to judge the health status of containers and applications, which helps to improve the availability of applications. For details, refer to Container Health Check settings.

                                          Configure container parameters within the Pod, add environment variables or pass settings to the Pod, etc. For details, refer to Container environment variable settings.

                                          Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage settings.

                                          Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                          "},{"location":"en/admin/kpanda/workloads/create-job.html#advanced-settings","title":"Advanced settings","text":"

                                          Advanced setting includes job settings, labels and annotations.

                                          Job SettingsLabels and Annotations

                                          • Parallel Pods: the maximum number of Pods that can be created at the same time during job execution, and the parallel number should not be greater than the total number of Pods. Default is 1.
                                          • Timeout: When this time is exceeded, the job will be marked as failed to execute, and all Pods under the job will be deleted. When it is empty, it means that no timeout is set.
                                          • Restart Policy: Whether to restart the Pod when the setting fails.

                                          You can click the Add button to add labels and annotations to the workload instance Pod.

                                          "},{"location":"en/admin/kpanda/workloads/create-job.html#create-from-yaml","title":"Create from YAML","text":"

                                          In addition to image, creation jobs can also be created more quickly through YAML files.

                                          1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                          2. On the cluster details page, click Workloads -> Jobs in the left navigation bar, and then click the Create from YAML button in the upper right corner of the page.

                                          3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                          Click to view the complete YAML
                                          kind: Job\napiVersion: batch/v1\nmetadata:\n  name: demo\n  namespace: default\n  uid: a9708239-0358-4aa1-87d3-a092c080836e\n  resourceVersion: '92751876'\n  generation: 1\n  creationTimestamp: '2022-12-26T10:52:22Z'\n  labels:\n    app: demo\n    controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n    job-name: demo\n  annotations:\n    revisions: >-\n      {\"1\":{\"status\":\"running\",\"uid\":\"a9708239-0358-4aa1-87d3-a092c080836e\",\"start-time\":\"2022-12-26T10:52:22Z\",\"completion-time\":\"0001-01-01T00:00:00Z\"}}\nspec:\n  parallelism: 1\n  backoffLimit: 6\n  selector:\n    matchLabels:\n      controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: demo\n        controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n        job-name: demo\n    spec:\n      containers:\n        - name: container-4\n          image: nginx\n          resources:\n            limits:\n              cpu: 250m\n              memory: 512Mi\n            requests:\n              cpu: 250m\n              memory: 512Mi\n          lifecycle: {}\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            privileged: false\n      restartPolicy: Never\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      securityContext: {}\n      schedulerName: default-scheduler\n  completionMode: NonIndexed\n  suspend: false\nstatus:\n  startTime: '2022-12-26T10:52:22Z'\n  active: 1\n
                                          "},{"location":"en/admin/kpanda/workloads/create-statefulset.html","title":"Create StatefulSet","text":"

                                          This page describes how to create a StatefulSet through image and YAML files.

                                          StatefulSet is a common resource in Kubernetes, and Deployment, mainly used to manage the deployment and scaling of Pod collections. The main difference between the two is that Deployment is stateless and does not save data, while StatefulSet is stateful and is mainly used to manage stateful applications. In addition, Pods in a StatefulSet have a persistent ID, which makes it easy to identify the proper Pod when matching storage volumes.

                                          Through the container management module of AI platform, workloads on multicloud and multiclusters can be easily managed based on proper role permissions, including the creation of StatefulSets, update, delete, elastic scaling, restart, version rollback and other full life cycle management.

                                          "},{"location":"en/admin/kpanda/workloads/create-statefulset.html#prerequisites","title":"Prerequisites","text":"

                                          Before using image to create StatefulSets, the following prerequisites need to be met:

                                          • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                          • Create a namespace and a user.

                                          • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                          • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                          "},{"location":"en/admin/kpanda/workloads/create-statefulset.html#create-by-image","title":"Create by image","text":"

                                          Follow the steps below to create a statefulSet using image.

                                          1. Click Clusters on the left navigation bar, then click the name of the target cluster to enter Cluster Details.

                                          2. Click Workloads -> StatefulSets in the left navigation bar, and then click the Create by Image button in the upper right corner.

                                          3. Fill in Basic Information, Container Settings, Service Settings, Advanced Settings, click OK in the lower right corner of the page to complete the creation.

                                            The system will automatically return to the list of StatefulSets , and wait for the status of the workload to become running . If the workload status is abnormal, refer to Workload Status for specific exception information.

                                            Click \u2507 on the right side of the New Workload column to perform operations such as update, delete, elastic scaling, restart, and version rollback on the workload.

                                          "},{"location":"en/admin/kpanda/workloads/create-statefulset.html#basic-information","title":"Basic Information","text":"
                                          • Workload Name: can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number, such as deployment-01. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                          • Namespace: Select the namespace where the newly created payload will be deployed. The default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                          • Pods: Enter the number of Pod instances for the load, and one Pod instance is created by default.
                                          • Description: Enter the description information of the payload and customize the content. The number of characters cannot exceed 512.
                                          "},{"location":"en/admin/kpanda/workloads/create-statefulset.html#container-settings","title":"Container settings","text":"

                                          Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the requirements of each part.

                                          Container settings is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                          Basic information (required)Lifecycle (optional)Health Check (optional)Environment Variables (optional)Data Storage (optional)Security Settings (optional)

                                          When configuring container-related parameters, you must correctly fill in the container name and image parameters, otherwise you will not be able to proceed to the next step. After filling in the settings with reference to the following requirements, click OK .

                                          • Container Name: Up to 63 characters, lowercase letters, numbers and separators (\"-\") are supported. Must start and end with a lowercase letter or number, eg nginx-01.
                                          • Image: Enter the address or name of the image. When entering the image name, the image will be pulled from the official DockerHub by default.
                                          • Image Pull Policy: After checking Always pull image , the image will be pulled from the registry every time the workload restarts/upgrades. If it is not checked, only the local image will be pulled, and only when the image does not exist locally, it will be re-pulled from the container registry. For more details, refer to Image Pull Policy.
                                          • Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
                                          • CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
                                          • GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU card or part of the vGPU for the container. For example, for an 8-core GPU card, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.

                                          Before setting exclusive GPU, the administrator needs to install the GPU card and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

                                          Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle Configuration.

                                          Used to judge the health status of containers and applications. Helps improve app usability. For details, refer to Container Health Check Configuration.

                                          Configure container parameters within the Pod, add environment variables or pass settings to the Pod, etc. For details, refer to Container environment variable settings.

                                          Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage Configuration.

                                          Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                          "},{"location":"en/admin/kpanda/workloads/create-statefulset.html#service-settings","title":"Service settings","text":"

                                          Configure Service (Service) for the statefulset, so that the statefulset can be accessed externally.

                                          1. Click the Create Service button.

                                          2. Refer to Create Service to configure service parameters.

                                          3. Click OK and click Next .

                                          "},{"location":"en/admin/kpanda/workloads/create-statefulset.html#advanced-settings","title":"Advanced settings","text":"

                                          Advanced setting includes four parts: load network settings, upgrade policy, scheduling policy, label and annotation. You can click the tabs below to view the requirements of each part.

                                          Network ConfigurationUpgrade PolicyContainer Management PoliciesScheduling PoliciesLabels and Annotations
                                          1. For container NIC settings, refer to Workload Usage IP Pool
                                          2. DNS settings

                                          In some cases, the application will have redundant DNS queries. Kubernetes provides DNS-related settings options for applications, which can effectively reduce redundant DNS queries and increase business concurrency in certain cases.

                                          • DNS Policy

                                            • Default: Make the container use the domain name resolution file pointed to by the --resolv-conf parameter of kubelet. This setting can only resolve external domain names registered on the Internet, but cannot resolve cluster internal domain names, and there is no invalid DNS query.
                                            • ClusterFirstWithHostNet: The domain name file of the application docking host.
                                            • ClusterFirst: application docking with Kube-DNS/CoreDNS.
                                            • None: New option value introduced in Kubernetes v1.9 (Beta in v1.10). After setting to None, dnsConfig must be set. At this time, the domain name resolution file of the container will be completely generated through the settings of dnsConfig.
                                          • Nameservers: fill in the address of the domain name server, such as 10.6.175.20 .

                                          • Search domains: DNS search domain list for domain name query. When specified, the provided search domain list will be merged into the search field of the domain name resolution file generated based on dnsPolicy, and duplicate domain names will be deleted. Kubernetes allows up to 6 search domains.
                                          • Options: Configuration options for DNS, where each object can have a name attribute (required) and a value attribute (optional). The content in this field will be merged into the options field of the domain name resolution file generated based on dnsPolicy. If some options of dnsConfig options conflict with the options of the domain name resolution file generated based on dnsPolicy, they will be overwritten by dnsConfig.
                                          • Host Alias: the alias set for the host.

                                          • Upgrade Mode: Rolling upgrade refers to gradually replacing instances of the old version with instances of the new version. During the upgrade process, business traffic will be load-balanced to the old and new instances at the same time, so the business will not be interrupted. Rebuild and upgrade refers to deleting the workload instance of the old version first, and then installing the specified new version. During the upgrade process, the business will be interrupted.
                                          • Revision History Limit: Set the number of old versions retained when the version is rolled back. The default is 10.
                                          • Graceful Period: The execution period (0-9,999 seconds) of the command before the workload stops, the default is 30 seconds.

                                          Kubernetes v1.7 and later versions can set Pod management policies through .spec.podManagementPolicy , which supports the following two methods:

                                          • OrderedReady : The default Pod management policy, which means that Pods are deployed in order. Only after the deployment of the previous Pod is successfully completed, the statefulset will start to deploy the next Pod. Pods are deleted in reverse order, with the last created being deleted first.

                                          • Parallel : Create or delete containers in parallel, just like Pods of the Deployment type. The StatefulSet controller starts or terminates all containers in parallel. There is no need to wait for a Pod to enter the Running and ready state or to stop completely before starting or terminating other Pods. This option only affects the behavior of scaling operations, not the order of updates.

                                          • Tolerance time: When the node where the workload instance is located is unavailable, the time for rescheduling the workload instance to other available nodes, the default is 300 seconds.
                                          • Node affinity: According to the label on the node, constrain which nodes the Pod can be scheduled on.
                                          • Workload Affinity: Constrains which nodes a Pod can be scheduled to based on the labels of the Pods already running on the node.
                                          • Workload anti-affinity: Constrains which nodes a Pod cannot be scheduled to based on the labels of Pods already running on the node.
                                          • Topology domain: namely topologyKey, used to specify a group of nodes that can be scheduled. For example, kubernetes.io/os indicates that as long as the node of an operating system meets the conditions of labelSelector, it can be scheduled to the node.

                                          For details, refer to Scheduling Policy.

                                          You can click the Add button to add tags and annotations to workloads and pods.

                                          "},{"location":"en/admin/kpanda/workloads/create-statefulset.html#create-from-yaml","title":"Create from YAML","text":"

                                          In addition to image, you can also create statefulsets more quickly through YAML files.

                                          1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the Cluster Details page.

                                          2. On the cluster details page, click Workloads -> StatefulSets in the left navigation bar, and then click the Create from YAML button in the upper right corner of the page.

                                          3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                          Click to see an example YAML for creating a statefulSet
                                           kind: StatefulSet\n apiVersion: apps/v1\n metadata:\n   name: test-mysql-123-mysql\n   namespace: default\n   uid: d3f45527-a0ab-4b22-9013-5842a06f4e0e\n   resourceVersion: '20504385'\n   generation: 1\n   creationTimestamp: '2022-09-22T09:34:10Z'\n   ownerReferences:\n     - apiVersion: mysql.presslabs.org/v1alpha1\n       kind: MysqlCluster\n       name: test-mysql-123\n       uid: 5e877cc3-5167-49da-904e-820940cf1a6d\n       controller: true\n       blockOwnerDeletion: true\n spec:\n   replicas: 1\n   selector:\n     matchLabels:\n       app.kubernetes.io/managed-by: mysql.presslabs.org\n       app.kubernetes.io/name: mysql\n       mysql.presslabs.org/cluster: test-mysql-123\n   template:\n     metadata:\n       creationTimestamp: null\n       labels:\n         app.kubernetes.io/component: database\n         app.kubernetes.io/instance: test-mysql-123\n         app.kubernetes.io/managed-by: mysql.presslabs.org\n         app.kubernetes.io/name: mysql\n         app.kubernetes.io/version: 5.7.31\n         mysql.presslabs.org/cluster: test-mysql-123\n       annotations:\n         config_rev: '13941099'\n         prometheus.io/port: '9125'\n         prometheus.io/scrape: 'true'\n         secret_rev: '13941101'\n     spec:\n       volumes:\n         -name: conf\n           emptyDir: {}\n         - name: init-scripts\n           emptyDir: {}\n         - name: config-map\n           configMap:\n             name: test-mysql-123-mysql\n             defaultMode: 420\n         - name: data\n           persistentVolumeClaim:\n             claimName: data\n       initContainers:\n         -name: init\n           image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n           args:\n             - clone-and-init\n           envFrom:\n             - secretRef:\n                 name: test-mysql-123-mysql-operated\n           env:\n             - name: MY_NAMESPACE\n               valueFrom:\n                 fieldRef:\n                   apiVersion: v1\n                   fieldPath: metadata.namespace\n             - name: MY_POD_NAME\n               valueFrom:\n                 fieldRef:apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: BACKUP_USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: BACKUP_USER\n                  optional: true\n            - name: BACKUP_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: BACKUP_PASSWORD\n                  optional: true\n          resources: {}\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: config-map\n              mountPath: /mnt/conf\n            - name: data\n              mountPath: /var/lib/mysql\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n      containers:\n        - name: mysql\n          image: docker.m.daocloud.io/mysql:5.7.31\n          ports:\n            - name: mysql\n              containerPort: 3306\n              protocol: TCP\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: ORCH_CLUSTER_ALIAS\n              value: test-mysql-123.default\n            - name: ORCH_HTTP_API\n              value: http://mysql-operator.mcamel-system/api\n            - name: MYSQL_ROOT_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: ROOT_PASSWORD\n                  optional: false\n            - name: MYSQL_USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: USER\n                  optional: true\n            - name: MYSQL_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: PASSWORD\n                  optional: true\n            - name: MYSQL_DATABASE\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: DATABASE\n                  optional: true\n          resources:\n            limits:\n              cpu: '1'\n              memory: 1Gi\n            requests:\n              cpu: 100m\n              memory: 512Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: data\n              mountPath: /var/lib/mysql\n          livenessProbe:\n            exec:\n              command:\n                - mysqladmin\n                - '--defaults-file=/etc/mysql/client.conf'\n                - ping\n            initialDelaySeconds: 60\n            timeoutSeconds: 5\n            periodSeconds: 5\n            successThreshold: 1\n            failureThreshold: 3\n          readinessProbe:\n            exec:\n              command:\n                - /bin/sh\n                - '-c'\n                - >-\n                  test $(mysql --defaults-file=/etc/mysql/client.conf -NB -e\n                  'SELECT COUNT(*) FROM sys_operator.status WHERE\n                  name=\"configured\" AND value=\"1\"') -eq 1\n            initialDelaySeconds: 5\n            timeoutSeconds: 5\n            periodSeconds: 2\n            successThreshold: 1\n            failureThreshold: 3\n          lifecycle:preStop:\n              exec:\n                command:\n                  - bash\n                  - /etc/mysql/pre-shutdown-ha.sh\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: sidecar\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - config-and-serve\n          ports:\n            - name: sidecar-http\n              containerPort: 8080\n              protocol: TCP\n          envFrom:\n            - secretRef:\n                name: test-mysql-123-mysql-operated\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: XTRABACKUP_TARGET_DIR\n              value: /tmp/xtrabackup_backupfiles/\n          resources:\n            limits:\n              cpu: '1'\n              memory: 1Gi\n            requests:\n              cpu: 10m\n              memory: 64Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: data\n              mountPath: /var/lib/mysql\n          readinessProbe:\n            httpGet:\n              path: /health\n              port: 8080\n              scheme: HTTP\n            initialDelaySeconds: 30\n            timeoutSeconds: 5\n            periodSeconds: 5\n            successThreshold: 1\n            failureThreshold: 3\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: metrics-exporter\n          image: prom/mysqld-exporter:v0.13.0\n          args:\n            - '--web.listen-address=0.0.0.0:9125'\n            - '--web.telemetry-path=/metrics'\n            - '--collect.heartbeat'\n            - '--collect.heartbeat.database=sys_operator'\n          ports:\n            - name: prometheus\n              containerPort: 9125\n              protocol: TCP\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: METRICS_EXPORTER_USER\n                  optional: false\n            - name: PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: METRICS_EXPORTER_PASSWORD\n                  optional: false\n            - name: DATA_SOURCE_NAME\n              value: $(USER):$(PASSWORD)@(127.0.0.1:3306)/\n          resources:\n            limits:\n              cpu: 100m\n              memory: 128Mi\n            requests:\n              cpu: 10m\n              memory: 32Mi\n          livenessProbe:\n            httpGet:\n              path: /metrics\n              port: 9125\n              scheme: HTTP\n            initialDelaySeconds: 30\n            timeoutSeconds: 30\n            periodSeconds: 30\n            successThreshold: 1\n            failureThreshold: 3\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: pt-heartbeat\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - pt-heartbeat\n            - '--update'\n            - '--replace'\n            - '--check-read-only'\n            - '--create-table'\n            - '--database'\n            - sys_operator\n            - '--table'\n            - heartbeat\n            - '--utc'\n            - '--defaults-file'\n            - /etc/mysql/heartbeat.conf\n            - '--fail-successive-errors=20'\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n          resources:\n            limits:\n              cpu: 100m\n              memory: 64Mi\n            requests:\n              cpu: 10m\n              memory: 32Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n      restartPolicy: Always\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      securityContext:\n        runAsUser: 999\n        fsGroup: 999\n      affinity:\n        podAntiAffinity:\n          preferredDuringSchedulingIgnoredDuringExecution:\n            - weight: 100\n              podAffinityTerm:\n                labelSelector:\n                  matchLabels:\n                    app.kubernetes.io/component: database\n                    app.kubernetes.io/instance: test-mysql-123\n                    app.kubernetes.io/managed-by: mysql.presslabs.org\n                    app.kubernetes.io/name: mysql\n                    app.kubernetes.io/version: 5.7.31\n                    mysql.presslabs.org/cluster: test-mysql-123\n                topologyKey: kubernetes.io/hostname\n      schedulerName: default-scheduler\n  volumeClaimTemplates:\n    - kind: PersistentVolumeClaim\n      apiVersion: v1\n      metadata:\n        name: data\n        creationTimestamp: null\n        ownerReferences:\n          - apiVersion: mysql.presslabs.org/v1alpha1\n            kind: MysqlCluster\n            name: test-mysql-123\n            uid: 5e877cc3-5167-49da-904e-820940cf1a6d\n            controller: true\n      spec:\n        accessModes:\n          - ReadWriteOnce\n        resources:\n          limits:\n            storage: 1Gi\n          requests:\n            storage: 1Gi\n        storageClassName: local-path\n        volumeMode: Filesystem\n      status:\n        phase: Pending\n  serviceName: mysql\n  podManagementPolicy: OrderedReady\n  updateStrategy:\n    type: RollingUpdate\n    rollingUpdate:\n      partition: 0\n  revisionHistoryLimit: 10\nstatus:\n  observedGeneration: 1\n  replicas: 1\n  readyReplicas: 1\n  currentReplicas: 1\n  updatedReplicas: 1\n  currentRevision: test-mysql-123-mysql-6b8f5577c7\n  updateRevision: test-mysql-123-mysql-6b8f5577c7\n  collisionCount: 0\n  availableReplicas: 1\n
                                          "},{"location":"en/admin/kpanda/workloads/pod-config/env-variables.html","title":"Configure environment variables","text":"

                                          An environment variable refers to a variable set in the container running environment, which is used to add environment flags to Pods or transfer configurations, etc. It supports configuring environment variables for Pods in the form of key-value pairs.

                                          Suanova container management adds a graphical interface to configure environment variables for Pods on the basis of native Kubernetes, and supports the following configuration methods:

                                          • Key-value pair (Key/Value Pair): Use a custom key-value pair as the environment variable of the container

                                          • Resource reference (Resource): Use the fields defined by Container as the value of environment variables, such as the memory limit of the container, the number of copies, etc.

                                          • Variable/Variable Reference (Pod Field): Use the Pod field as the value of an environment variable, such as the name of the Pod

                                          • ConfigMap key value import (ConfigMap key): Import the value of a key in the ConfigMap as the value of an environment variable

                                          • Key key value import (Secret Key): use the data from the Secret to define the value of the environment variable

                                          • Key Import (Secret): Import all key values \u200b\u200bin Secret as environment variables

                                          • ConfigMap import (ConfigMap): import all key values \u200b\u200bin the ConfigMap as environment variables

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/health-check.html","title":"Container health check","text":"

                                          Container health check checks the health status of containers according to user requirements. After configuration, if the application in the container is abnormal, the container will automatically restart and recover. Kubernetes provides Liveness checks, Readiness checks, and Startup checks.

                                          • LivenessProbe can detect application deadlock (the application is running, but cannot continue to run the following steps). Restarting containers in this state can help improve the availability of applications, even if there are bugs in them.

                                          • ReadinessProbe can detect when a container is ready to accept request traffic. A Pod can only be considered ready when all containers in a Pod are ready. One use of this signal is to control which Pod is used as the backend of the Service. If the Pod is not ready, it will be removed from the Service's load balancer.

                                          • Startup check (StartupProbe) can know when the application container is started. After configuration, it can control the container to check the viability and readiness after it starts successfully, so as to ensure that these liveness and readiness probes will not affect the start of the application. Startup detection can be used to perform liveness checks on slow-starting containers, preventing them from being killed before they start running.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/health-check.html#liveness-and-readiness-checks","title":"Liveness and readiness checks","text":"

                                          The configuration of LivenessProbe is similar to that of ReadinessProbe, the only difference is to use readinessProbe field instead of livenessProbe field.

                                          HTTP GET parameter description:

                                          Parameter Description Path (Path) The requested path for access. Such as: /healthz path in the example Port (Port) Service listening port. Such as: port 8080 in the example protocol access protocol, Http or Https Delay time (initialDelaySeconds) Delay check time, in seconds, this setting is related to the normal startup time of business programs. For example, if it is set to 30, it means that the health check will start 30 seconds after the container is started, which is the time reserved for business program startup. Timeout (timeoutSeconds) Timeout, in seconds. For example, if it is set to 10, it indicates that the timeout waiting period for executing the health check is 10 seconds. If this time is exceeded, the health check will be regarded as a failure. If set to 0 or not set, the default timeout waiting time is 1 second. Timeout (timeoutSeconds) Timeout, in seconds. For example, if it is set to 10, it indicates that the timeout waiting period for executing the health check is 10 seconds. If this time is exceeded, the health check will be regarded as a failure. If set to 0 or not set, the default timeout waiting time is 1 second. SuccessThreshold (successThreshold) The minimum number of consecutive successes that are considered successful after a probe fails. The default value is 1, and the minimum value is 1. This value must be 1 for liveness and startup probes. Maximum number of failures (failureThreshold) The number of retries when the probe fails. Giving up in case of a liveness probe means restarting the container. Pods that are abandoned due to readiness probes are marked as not ready. The default value is 3. The minimum value is 1."},{"location":"en/admin/kpanda/workloads/pod-config/health-check.html#check-with-http-get-request","title":"Check with HTTP GET request","text":"

                                          YAML example:

                                          apiVersion: v1\nkind: Pod\nmetadata:\n  labels:\n    test: liveness\n  name: liveness-http\nspec:\n  containers:\n  - name: liveness  # Container name\n    image: k8s.gcr.io/liveness  # Container image\n    args:\n    - /server  # Arguments to pass to the container\n    livenessProbe:\n      httpGet:\n        path: /healthz  # Access request path\n        port: 8080  # Service listening port\n        httpHeaders:\n        - name: Custom-Header  # Custom header name\n          value: Awesome  # Custom header value\n      initialDelaySeconds: 3  # Wait 3 seconds before the first probe\n      periodSeconds: 3  # Perform liveness detection every 3 seconds\n

                                          According to the set rules, Kubelet sends an HTTP GET request to the service running in the container (the service is listening on port 8080) to perform the detection. The kubelet considers the container alive if the handler under the /healthz path on the server returns a success code. If the handler returns a failure code, the kubelet kills the container and restarts it. Any return code greater than or equal to 200 and less than 400 indicates success, and any other return code indicates failure. The /healthz handler returns a 200 status code for the first 10 seconds of the container's lifetime. The handler then returns a status code of 500.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/health-check.html#use-tcp-port-check","title":"Use TCP port check","text":"

                                          TCP port parameter description:

                                          Parameter Description Port (Port) Service listening port. Such as: port 8080 in the example Delay time (initialDelaySeconds) Delay check time, in seconds, this setting is related to the normal startup time of business programs. For example, if it is set to 30, it means that the health check will start 30 seconds after the container is started, which is the time reserved for business program startup. Timeout (timeoutSeconds) Timeout, in seconds. For example, if it is set to 10, it indicates that the timeout waiting period for executing the health check is 10 seconds. If this time is exceeded, the health check will be regarded as a failure. If set to 0 or not set, the default timeout waiting time is 1 second.

                                          For a container that provides TCP communication services, based on this configuration, the cluster establishes a TCP connection to the container according to the set rules. If the connection is successful, it proves that the detection is successful, otherwise the detection fails. If you choose the TCP port detection method, you must specify the port that the container listens to.

                                          YAML example:

                                          apiVersion: v1\nkind: Pod\nmetadata:\n  name: goproxy\n  labels:\n    app: goproxy\nspec:\n  containers:\n  - name: goproxy\n    image: k8s.gcr.io/goproxy:0.1\n    ports:\n    - containerPort: 8080\n    readinessProbe:\n      tcpSocket:\n        port: 8080\n      initialDelaySeconds: 5\n      periodSeconds: 10\n    livenessProbe:\n      tcpSocket:\n        port: 8080\n      initialDelaySeconds: 15\n      periodSeconds: 20\n

                                          This example uses both readiness and liveness probes. The kubelet sends the first readiness probe 5 seconds after the container is started. Attempt to connect to port 8080 of the goproxy container. If the probe is successful, the Pod will be marked as ready and the kubelet will continue to run the check every 10 seconds.

                                          In addition to the readiness probe, this configuration includes a liveness probe. The kubelet will perform the first liveness probe 15 seconds after the container is started. The readiness probe will attempt to connect to the goproxy container on port 8080. If the liveness probe fails, the container will be restarted.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/health-check.html#run-command-check","title":"Run command check","text":"

                                          YAML example:

                                          apiVersion: v1\nkind: Pod\nmetadata:\n  labels:\n    test: liveness\n  name: liveness-exec\nspec:\n  containers:\n  - name: liveness  # Container name\n    image: k8s.gcr.io/busybox  # Container image\n    args:\n    - /bin/sh  # Command to run\n    - -c  # Pass the following string as a command\n    - touch /tmp/healthy; sleep 30; rm -f /tmp/healthy; sleep 600  # Command to execute\n    livenessProbe:\n      exec:\n        command:\n        - cat  # Command to check liveness\n        - /tmp/healthy  # File to check\n      initialDelaySeconds: 5  # Wait 5 seconds before the first probe\n      periodSeconds: 5  # Perform liveness detection every 5 seconds\n

                                          The periodSeconds field specifies that the kubelet performs a liveness probe every 5 seconds, and the initialDelaySeconds field specifies that the kubelet waits for 5 seconds before performing the first probe. According to the set rules, the cluster periodically executes the command cat /tmp/healthy in the container through the kubelet to detect. If the command executes successfully and the return value is 0, the kubelet considers the container to be healthy and alive. If this command returns a non-zero value, the kubelet will kill the container and restart it.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/health-check.html#protect-slow-starting-containers-with-pre-start-checks","title":"Protect slow-starting containers with pre-start checks","text":"

                                          Some applications require a long initialization time at startup. You need to use the same command to set startup detection. For HTTP or TCP detection, you can set the failureThreshold * periodSeconds parameter to a long enough time to cope with the long startup time scene.

                                          YAML example:

                                          ports:\n- name: liveness-port\n  containerPort: 8080\n  hostPort: 8080\n\nlivenessProbe:\n  httpGet:\n    path: /healthz\n    port: liveness-port\n  failureThreshold: 1\n  periodSeconds: 10\n\nstartupProbe:\n  httpGet:\n    path: /healthz\n    port: liveness-port\n  failureThreshold: 30\n  periodSeconds: 10\n

                                          With the above settings, the application will have up to 5 minutes (30 * 10 = 300s) to complete the startup process. Once the startup detection is successful, the survival detection task will take over the detection of the container and respond quickly to the container deadlock. If the start probe has been unsuccessful, the container is killed after 300 seconds and further disposition is performed according to the restartPolicy .

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/job-parameters.html","title":"Description of job parameters","text":"

                                          According to the settings of .spec.completions and .spec.Parallelism , jobs (Job) can be divided into the following types:

                                          Job Type Description Non-parallel Job Creates a Pod until its Job completes successfully Parallel Jobs with deterministic completion counts A Job is considered complete when the number of successful Pods reaches .spec.completions Parallel Job Creates one or more Pods until one finishes successfully

                                          Parameter Description

                                          RestartPolicy Creates a Pod until it terminates successfully .spec.completions Indicates the number of Pods that need to run successfully when the Job ends, the default is 1 .spec.parallelism Indicates the number of Pods running in parallel, the default is 1 spec.backoffLimit Indicates the maximum number of retries for a failed Pod, beyond which no more retries will continue. .spec.activeDeadlineSeconds Indicates the Pod running time. Once this time is reached, the Job, that is, all its Pods, will stop. And activeDeadlineSeconds has a higher priority than backoffLimit, that is, the job that reaches activeDeadlineSeconds will ignore the setting of backoffLimit.

                                          The following is an example Job configuration, saved in myjob.yaml, which calculates \u03c0 to 2000 digits and prints the output.

                                          apiVersion: batch/v1\nkind: Job #The type of the current resource\nmetadata:\n  name: myjob\nspec:\n  completions: 50 # Job needs to run 50 Pods at the end, in this example it prints \u03c0 50 times\n  parallelism: 5 # 5 Pods in parallel\n  backoffLimit: 5 # retry up to 5 times\n  template:\n    spec:\n      containers:\n      - name: pi\n        image: perl\n        command: [\"perl\", \"-Mbignum=bpi\", \"-wle\", \"print bpi(2000)\"]\n      restartPolicy: Never #restart policy\n

                                          Related commands

                                          kubectl apply -f myjob.yaml # Start job\nkubectl get job # View this job\nkubectl logs myjob-1122dswzs View Job Pod logs\n
                                          "},{"location":"en/admin/kpanda/workloads/pod-config/lifecycle.html","title":"Configure the container lifecycle","text":"

                                          Pods follow a predefined lifecycle, starting in the Pending phase and entering the Running state if at least one container in the Pod starts normally. If any container in the Pod ends in a failed state, the state becomes Failed . The following phase field values \u200b\u200bindicate which phase of the lifecycle a Pod is in.

                                          Value Description Pending The Pod has been accepted by the system, but one or more containers have not yet been created or run. This phase includes waiting for the pod to be scheduled and downloading the image over the network. Running (Running) The Pod has been bound to a node, and all containers in the Pod have been created. At least one container is still running, or in the process of starting or restarting. Succeeded (Success) All containers in the Pod were successfully terminated and will not be restarted. Failed All containers in the Pod have terminated, and at least one container terminated due to failure. That is, the container exited with a non-zero status or was terminated by the system. Unknown (Unknown) The status of the Pod cannot be obtained for some reason, usually due to a communication failure with the host where the Pod resides.

                                          When creating a workload in Suanova container management, images are usually used to specify the running environment in the container. By default, when building an image, the Entrypoint and CMD fields can be used to define the commands and parameters to be executed when the container is running. If you need to change the commands and parameters of the container image before starting, after starting, and before stopping, you can override the default commands and parameters in the image by setting the lifecycle event commands and parameters of the container.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/lifecycle.html#lifecycle-configuration","title":"Lifecycle configuration","text":"

                                          Configure the startup command, post-start command, and pre-stop command of the container according to business needs.

                                          Parameter Description Example value Start command Type: Optional Meaning: The container will be started according to the start command. Command after startup Type: optionalMeaning: command after container startup Command before stopping Type: Optional Meaning: The command executed by the container after receiving the stop command. Ensure that the services running in the instance can be drained in advance when the instance is upgraded or deleted. -"},{"location":"en/admin/kpanda/workloads/pod-config/lifecycle.html#start-command","title":"start command","text":"

                                          Configure the startup command according to the table below.

                                          Parameter Description Example value Run command Type: RequiredMeaning: Enter an executable command, and separate multiple commands with spaces. If the command itself has spaces, you need to add (\"\"). Meaning: When there are multiple commands, it is recommended to use /bin/sh or other shells to run the command, and pass in all other commands as parameters. /run/server Running parameters Type: OptionalMeaning: Enter the parameters of the control container running command. port=8080"},{"location":"en/admin/kpanda/workloads/pod-config/lifecycle.html#post-start-commands","title":"Post-start commands","text":"

                                          Suanova provides two processing types, command line script and HTTP request, to configure post-start commands. You can choose the configuration method that suits you according to the table below.

                                          Command line script configuration

                                          Parameter Description Example value Run Command Type: Optional Meaning: Enter an executable command, and separate multiple commands with spaces. If the command itself contains spaces, you need to add (\"\"). Meaning: When there are multiple commands, it is recommended to use /bin/sh or other shells to run the command, and pass in all other commands as parameters. /run/server Running parameters Type: OptionalMeaning: Enter the parameters of the control container running command. port=8080"},{"location":"en/admin/kpanda/workloads/pod-config/lifecycle.html#stop-pre-command","title":"stop pre-command","text":"

                                          Suanova provides two processing types, command line script and HTTP request, to configure the pre-stop command. You can choose the configuration method that suits you according to the table below.

                                          HTTP request configuration

                                          Parameter Description Example value URL Path Type: Optional Meaning: Requested URL path. Meaning: When there are multiple commands, it is recommended to use /bin/sh or other shells to run the command, and pass in all other commands as parameters. /run/server Port Type: RequiredMeaning: Requested port. port=8080 Node Address Type: Optional Meaning: The requested IP address, the default is the node IP where the container is located. -"},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html","title":"Scheduling Policy","text":"

                                          In a Kubernetes cluster, like many other Kubernetes objects, nodes have labels. You can manually add labels. Kubernetes also adds some standard labels to all nodes in the cluster. See Common Labels, Annotations, and Taints for common node labels. By adding labels to nodes, you can have pods scheduled on specific nodes or groups of nodes. You can use this feature to ensure that specific Pods can only run on nodes with certain isolation, security or governance properties.

                                          nodeSelector is the simplest recommended form of a node selection constraint. You can add a nodeSelector field to the Pod's spec to set the node label. Kubernetes will only schedule pods on nodes with each label specified. nodeSelector provides one of the easiest ways to constrain Pods to nodes with specific labels. Affinity and anti-affinity expand the types of constraints you can define. Some benefits of using affinity and anti-affinity are:

                                          • Affinity and anti-affinity languages are more expressive. nodeSelector can only select nodes that have all the specified labels. Affinity, anti-affinity give you greater control over selection logic.

                                          • You can mark a rule as \"soft demand\" or \"preference\", so that the scheduler will still schedule the Pod if no matching node can be found.

                                          • You can use the labels of other Pods running on the node (or in other topological domains) to enforce scheduling constraints, instead of only using the labels of the node itself. This capability allows you to define rules which allow Pods to be placed together.

                                          You can choose which node the Pod will deploy to by setting affinity and anti-affinity.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#tolerance-time","title":"Tolerance time","text":"

                                          When the node where the workload instance is located is unavailable, the period for the system to reschedule the instance to other available nodes. The default is 300 seconds.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#node-affinity-nodeaffinity","title":"Node affinity (nodeAffinity)","text":"

                                          Node affinity is conceptually similar to nodeSelector , which allows you to constrain which nodes Pods can be scheduled on based on the labels on the nodes. There are two types of node affinity:

                                          • Must be satisfied: ( requiredDuringSchedulingIgnoredDuringExecution ) The scheduler can only run scheduling when the rules are satisfied. This functionality is similar to nodeSelector , but with a more expressive syntax. You can define multiple hard constraint rules, but only one of them must be satisfied.

                                          • Satisfy as much as possible: ( preferredDuringSchedulingIgnoredDuringExecution ) The scheduler will try to find nodes that meet the proper rules. If no matching node is found, the scheduler will still schedule the Pod. You can also set weights for soft constraint rules. During specific scheduling, if there are multiple nodes that meet the conditions, the node with the highest weight will be scheduled first. At the same time, you can also define multiple hard constraint rules, but only one of them needs to be satisfied.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#tag-name","title":"Tag name","text":"

                                          The label proper to the node can use the default label or user-defined label.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#operators","title":"Operators","text":"
                                          • In: the label value needs to be in the list of values
                                          • NotIn: the tag's value is not in a list
                                          • Exists: To judge whether a certain label exists, no need to set the label value
                                          • DoesNotExist: Determine if a tag does not exist, no need to set the tag value
                                          • Gt: the value of the label is greater than a certain value (string comparison)
                                          • Lt: the value of the label is less than a certain value (string comparison)
                                          "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#weights","title":"Weights","text":"

                                          It can only be added in the \"as far as possible\" policy, which can be understood as the priority of scheduling, and those with the highest weight will be scheduled first. The value range is 1 to 100.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#workload-affinity","title":"Workload Affinity","text":"

                                          Similar to node affinity, there are two types of workload affinity:

                                          • Must be satisfied: (requiredDuringSchedulingIgnoredDuringExecution) The scheduler can only run scheduling when the rules are satisfied. This functionality is similar to nodeSelector , but with a more expressive syntax. You can define multiple hard constraint rules, but only one of them must be satisfied.
                                          • Satisfy as much as possible: (preferredDuringSchedulingIgnoredDuringExecution) The scheduler will try to find nodes that meet the proper rules. If no matching node is found, the scheduler will still schedule the Pod. You can also set weights for soft constraint rules. During specific scheduling, if there are multiple nodes that meet the conditions, the node with the highest weight will be scheduled first. At the same time, you can also define multiple hard constraint rules, but only one of them needs to be satisfied.

                                          The affinity of the workload is mainly used to determine which Pods of the workload can be deployed in the same topology domain. For example, services that communicate with each other can be deployed in the same topology domain (such as the same availability zone) by applying affinity scheduling to reduce the network delay between them.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#tag-name_1","title":"Tag name","text":"

                                          The label proper to the node can use the default label or user-defined label.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#namespaces","title":"Namespaces","text":"

                                          Specifies the namespace in which the scheduling policy takes effect.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#operators_1","title":"Operators","text":"
                                          • In: the label value needs to be in the list of values
                                          • NotIn: the tag's value is not in a list
                                          • Exists: To judge whether a certain label exists, no need to set the label value
                                          • DoesNotExist: Determine if a tag does not exist, no need to set the tag value
                                          "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#topology-domain","title":"Topology domain","text":"

                                          Specify the scope of influence during scheduling. If you specify kubernetes.io/Clustername, it will use the Node node as the distinguishing scope.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#workload-anti-affinity","title":"Workload Anti-Affinity","text":"

                                          Similar to node affinity, there are two types of anti-affinity for workloads:

                                          • Must be satisfied: (requiredDuringSchedulingIgnoredDuringExecution) The scheduler can only run scheduling when the rules are satisfied. This functionality is similar to nodeSelector , but with a more expressive syntax. You can define multiple hard constraint rules, but only one of them must be satisfied.
                                          • Satisfy as much as possible: (preferredDuringSchedulingIgnoredDuringExecution) The scheduler will try to find nodes that meet the proper rules. If no matching node is found, the scheduler will still schedule the Pod. You can also set weights for soft constraint rules. During specific scheduling, if there are multiple nodes that meet the conditions, the node with the highest weight will be scheduled first. At the same time, you can also define multiple hard constraint rules, but only one of them needs to be satisfied.

                                          The anti-affinity of the workload is mainly used to determine which Pods of the workload cannot be deployed in the same topology domain. For example, the same Pod of a load is distributed to different topological domains (such as different hosts) to improve the stability of the workload itself.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#tag-name_2","title":"Tag name","text":"

                                          The label proper to the node can use the default label or user-defined label.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#namespaces_1","title":"Namespaces","text":"

                                          Specifies the namespace in which the scheduling policy takes effect.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#operators_2","title":"Operators","text":"
                                          • In: the label value needs to be in the list of values
                                          • NotIn: the tag's value is not in a list
                                          • Exists: To judge whether a certain label exists, no need to set the label value
                                          • DoesNotExist: Determine if a tag does not exist, no need to set the tag value
                                          "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#topology-domain_1","title":"Topology domain","text":"

                                          Specify the scope of influence when scheduling, such as specifying kubernetes.io/Clustername, it will use the Node node as the distinguishing scope.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/workload-status.html","title":"Workload Status","text":"

                                          A workload is an application running on Kubernetes, and in Kubernetes, whether your application is composed of a single same component or composed of many different components, you can use a set of Pods to run it. Kubernetes provides five built-in workload resources to manage pods:

                                          • Deployment
                                          • StatefulSet
                                          • Daemonset
                                          • Job
                                          • CronJob

                                          You can also expand workload resources by setting Custom Resource CRD. In the fifth-generation container management, it supports full lifecycle management of workloads such as creation, update, capacity expansion, monitoring, logging, deletion, and version management.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/workload-status.html#pod-status","title":"Pod Status","text":"

                                          Pod is the smallest computing unit created and managed in Kubernetes, that is, a collection of containers. These containers share storage, networking, and management policies that control how the containers run. Pods are typically not created directly by users, but through workload resources. Pods follow a predefined lifecycle, starting at Pending phase, if at least one of the primary containers starts normally, it enters Running , and then enters the Succeeded or Failed stage depending on whether any container in the Pod ends in a failed status.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/workload-status.html#workload-status_1","title":"Workload Status","text":"

                                          The fifth-generation container management module designs a built-in workload life cycle status set based on factors such as Pod status and number of replicas, so that users can more realistically perceive the running status of workloads. Because different workload types (such as Deployment and Jobs) have inconsistent management mechanisms for Pods, different workloads will have different lifecycle status during operation, as shown in the following table.

                                          "},{"location":"en/admin/kpanda/workloads/pod-config/workload-status.html#deployment-statefulset-damemonset-status","title":"Deployment, StatefulSet, DamemonSet Status","text":"Status Description Waiting 1. A workload is in this status while its creation is in progress. 2. After an upgrade or rollback action is triggered, the workload is in this status. 3. Trigger operations such as pausing/scaling, and the workload is in this status. Running This status occurs when all instances under the workload are running and the number of replicas matches the user-defined number. Deleting When a delete operation is performed, the payload is in this status until the delete is complete. Exception Unable to get the status of the workload for some reason. This usually occurs because communication with the pod's host has failed. Not Ready When the container is in an abnormal, pending status, this status is displayed when the workload cannot be started due to an unknown error"},{"location":"en/admin/kpanda/workloads/pod-config/workload-status.html#job-status","title":"Job Status","text":"Status Description Waiting The workload is in this status while Job creation is in progress. Executing The Job is in progress and the workload is in this status. Execution Complete The Job execution is complete and the workload is in this status. Deleting A delete operation is triggered and the workload is in this status. Exception Pod status could not be obtained for some reason. This usually occurs because communication with the pod's host has failed."},{"location":"en/admin/kpanda/workloads/pod-config/workload-status.html#cronjob-status","title":"CronJob status","text":"Status Description Waiting The CronJob is in this status when it is being created. Started After the CronJob is successfully created, the CronJob is in this status when it is running normally or when the paused task is started. Stopped The CronJob is in this status when the stop task operation is performed. Deleting The deletion operation is triggered, and the CronJob is in this status.

                                          When the workload is in an abnormal or unready status, you can move the mouse over the status value of the load, and the system will display more detailed error information through a prompt box. You can also view the log or events to obtain related running information of the workload.

                                          "},{"location":"en/admin/register/index.html","title":"User Registration","text":"

                                          New users need to register to use the AI platform for the first time.

                                          "},{"location":"en/admin/register/index.html#prerequisites","title":"Prerequisites","text":"
                                          • The AI platform is installed
                                          • Email registration feature is enabled
                                          • An available email address
                                          "},{"location":"en/admin/register/index.html#email-registration-steps","title":"Email Registration Steps","text":"
                                          1. Open the AI platform homepage at https://ai.isuanova.com/ and click Register.

                                          2. Enter your username, password, and email, then click Register.

                                          3. The system will prompt that an email has been sent to your inbox.

                                          4. Log in to your email, find the email, and click the link.

                                          5. Congratulations, you have successfully accessed the AI platform, and you can now begin your AI journey.

                                          Next step: Bind a Workspace for the User

                                          "},{"location":"en/admin/register/bindws.html","title":"Binding a Workspace for the User","text":"

                                          After a user successfully registers, a workspace needs to be bound to them.

                                          "},{"location":"en/admin/register/bindws.html#prerequisites","title":"Prerequisites","text":"
                                          • AI platform installed
                                          • User has successfully registered
                                          • An available administrator account
                                          "},{"location":"en/admin/register/bindws.html#steps-to-follow","title":"Steps to Follow","text":"
                                          1. Log in to the AI platform as an administrator.
                                          2. Navigate to Global Management -> Workspace and Folder, and click Create Workspace.

                                          3. Enter the workspace name, select a folder, and click OK to create a workspace.

                                          4. Bind resources to the workspace.

                                            On this interface, you can click Create Namespace to create a namespace.

                                          5. Add authorization: Assign the user to the workspace.

                                          6. The user logs in to the AI platform to check if they have permissions for the workspace and namespace. The administrator can perform more actions through the \u2507 on the right side.

                                          Next step: Allocate Resources for the Workspace

                                          "},{"location":"en/admin/register/wsres.html","title":"Allocate Resources to the Workspace","text":"

                                          After binding a user to a workspace, it is necessary to allocate appropriate resources to the workspace.

                                          "},{"location":"en/admin/register/wsres.html#prerequisites","title":"Prerequisites","text":"
                                          • The AI platform is installed
                                          • An available administrator account
                                          • The workspace has been created and bound to a namespace
                                          "},{"location":"en/admin/register/wsres.html#steps","title":"Steps","text":"
                                          1. Log in to the AI platform as an administrator.
                                          2. Navigate to Global Management -> Workspace and Folder, find the workspace to which you want to add resources, and click Add Shared Resources.

                                          3. Select the cluster, set the appropriate resource quota, and then click OK

                                          4. Return to the shared resources page. Resources have been successfully allocated to the workspace, and the administrator can modify them at any time using the \u2507 on the right side.

                                          Next step: Create a Cloud Host

                                          "},{"location":"en/admin/security/index.html","title":"Cloud Native Security","text":"

                                          AI platform provides a fully automated security implementation for containers, Pods, images, runtimes, and microservices. The following table lists some of the security features that have been implemented or are in the process of being implemented.

                                          Security Features Specific Items Description Image security Trusted image Distribution Key pairs and signature information are required to achieve secure transport of images. It's allowed to select a key for mirror signing during mirror transmission. Runtime security Event correlation analysis Support correlation and risk analysis of security events detected at runtime to enhance attack traceability. Support converge alerts, reduce invalid alerts, and improve event response efficiency. - Container decoy repository The container decoy repository is equipped with common decoys including but not limited to: unauthorized access vulnerabilities, code execution vulnerabilities, local file reading vulnerabilities, remote command execution RCE vulnerabilities, and other container decoys. - Container decoy deployment Support custom decoy containers, including service names, service locations, etc. - Container decoy alerting Support alerting on suspicious behavior in container decoys. - Offset detection While scanning the image, learn all the binary information in the image and form a \"whitelist\" to allow only the binaries in the \"whitelist\" to run after the container is online, which ensures that the container can not run unauthorized (such as illegal download) executable files. Micro-isolation Intelligent recommendation of isolation policies Support for recording historical access traffic to resources, and intelligent policy recommendation based on historical access traffic when configuring isolation policies for resources. - Tenant isolation Support isolation control of tenants in Kubernetes clusters, with the ability to set different network security groups for different tenants, and supports tenant-level security policies to achieve inter-tenant network access and isolation. Microservices security Service and API security scanning Supports automatic, manual and periodic scanning of services and APIs within a cluster. Support all traditional web scanning items including XSS vulnerabilities, SQL injection, command/code injection, directory enumeration, path traversal, XML entity injection, poc, file upload, weak password, jsonp, ssrf, arbitrary jump, CRLF injection and other risks. For vulnerabilities found in the container environment, support vulnerability type display, url display, parameter display, danger level display, test method display, etc."},{"location":"en/admin/security/falco-exporter.html","title":"What is Falco-exporter","text":"

                                          Falco-exporter is a Prometheus Metrics exporter for Falco output events.

                                          Falco-exporter is deployed as a DaemonSet on a Kubernetes cluster. If Prometheus is installed and running in the cluster, metrics provided by Falco-exporter will be automatically discovered.

                                          "},{"location":"en/admin/security/falco-exporter.html#install-falco-exporter","title":"Install Falco-exporter","text":"

                                          This section describes how to install Falco-exporter.

                                          Note

                                          Before installing and using Falco-exporter, you need to install and run Falco with gRPC output enabled (enabled by via Unix sockets by default). For more information on enabling gRPC output in Falco Helm Chart, see Enabling gRPC.

                                          Please confirm that your cluster has successfully connected to the Container Management platform, and then perform the following steps to install Falco-exporter.

                                          1. Click Container Management->Clusters in the left navigation bar, then find the cluster name where you want to install Falco-exporter.

                                          2. In the left navigation bar, select Helm Releases -> Helm Charts, and then find and click falco-exporter.

                                          3. Select the version you want to install in Version and click Install.

                                          4. On the installation screen, fill in the required installation parameters.

                                            Fill in application name, namespace, version, etc.

                                            Fill in the following parameters:

                                            • Falco Prometheus Exporter -> Image Settings -> Registry: set the repository address of the falco-exporter image, which is already filled with the available online repositories by default. If it is a private environment, you can change it to a private repository address.

                                            • Falco Prometheus Exporter -> Prometheus ServiceMonitor Settings -> Repository: set the falco-exporter image name.

                                            • Falco Prometheus Exporter -> Prometheus ServiceMonitor Settings -> Install ServiceMonitor: install Prometheus Operator service monitor. It is enabled by default.

                                            • Falco Prometheus Exporter -> Prometheus ServiceMonitor Settings -> Scrape Interval: user-defined interval; if not specified, the Prometheus default interval is used.

                                            • Falco Prometheus Exporter -> Prometheus ServiceMonitor Settings -> Scrape Timeout: user-defined scrape timeout; if not specified, the Prometheus default scrape timeout is used.

                                            In the screen as above, fill in the following parameters:

                                            • Falco Prometheus Exporter -> Prometheus prometheusRules -> Install prometheusRules: create PrometheusRules to alert on priority events. It is enabled by default.

                                            • Falco Prometheus Exporter -> Prometheus prometheusRules -> Alerts settings: set whether alerts are enabled for different levels of log events, the interval between alerts, and the threshold for alerts.

                                          5. Click the OK button at the bottom right corner to complete the installation.

                                          "},{"location":"en/admin/security/falco-install.html","title":"Install Falco","text":"

                                          Please confirm that your cluster has successfully connected to the Container Management platform, and then perform the following steps to install Falco.

                                          1. Click Container Management->Clusters in the left navigation bar, then find the cluster name where you want to install Falco.

                                          2. In the left navigation bar, select Helm Releases -> Helm Charts, and then find and click Falco.

                                          3. Select the version you want to install in Version, and click Install.

                                          4. On the installation page, fill in the required installation parameters.

                                            Fill in the application name, namespace, version, etc.

                                            Fill in the following parameters:

                                            • Falco -> Image Settings -> Registry: set the repository address of the Falco image, which is already filled with the available online repositories by default. If it is a private environment, you can change it to a private repository address.

                                            • Falco -> Image Settings -> Repository: set the Falco image name.

                                            • Falco -> Falco Driver -> Image Settings -> Registry: set the repository address of the Falco Driver image, which is already filled with available online repositories by default. If it is a private environment, you can change it to a private repository address.

                                            • Falco -> Falco Driver -> Image Settings -> Repository: set the Falco Driver image name.

                                            • Falco -> Falco Driver -> Image Settings -> Driver Kind: set the Driver Kind, providing the following two options.

                                              • ebpf: use ebpf to detect events, which requires the Linux kernel to support ebpf and enable CONFIG_BPF_JIT and sysctl net.core.bpf_jit_enable=1.

                                              • module: use kernel module detection with limited OS version support. Refer to module support system version.

                                            • Falco -> Falco Driver -> Image Settings -> Log Level: the minimum log level to be included in the log.

                                              Optional values include: emergency, alert, critical, error, warning, notice, info, debug.

                                          5. Click the OK button in the bottom right corner to complete the installation.

                                          "},{"location":"en/admin/security/falco.html","title":"What is Falco","text":"

                                          Falco is a cloudnative runtime security tool designed to detect anomalous activity in applications, and can be used to monitor the runtime security of Kubernetes applications and internal components. With only a set of rules, Falco can continuously monitor and watch for anomalous activity in containers, applications, hosts, and networks.

                                          "},{"location":"en/admin/security/falco.html#what-does-falco-detect","title":"What does Falco detect?","text":"

                                          Falco can detect and alert on any behavior involving Linux system calls. Falco alerts can be triggered using specific system calls, parameters, and properties of the calling process. For example, Falco can easily detect events including but not limited to the following:

                                          • A shell is running inside a container or pod in Kubernetes.
                                          • A container is running in privileged mode or mounting a sensitive path, such as /proc, from the host.
                                          • A server process is spawning a child process of an unexpected type.
                                          • A sensitive file, such as /etc/shadow, is being read unexpectedly.
                                          • A non-device file is being written to /dev.
                                          • A standard system binary, such as ls, is making an outbound network connection.
                                          • A privileged pod is started in a Kubernetes cluster.

                                          For more information on the default rules that come with Falco, see the Rules documentation.

                                          "},{"location":"en/admin/security/falco.html#what-are-falco-rules","title":"What are Falco rules?","text":"

                                          Falco rules define the behavior and events that Falco should monitor. Rules can be written in the Falco rules file or in a generic configuration file. For more information on writing, managing and deploying rules, see Falco Rules.

                                          "},{"location":"en/admin/security/falco.html#what-are-falco-alerts","title":"What are Falco Alerts?","text":"

                                          Alerts are configurable downstream operations that can be as simple as logging or as complex as STDOUT passing a gRPC call to a client. For more information on configuring, understanding, and developing alerts, see Falco Alerts. Falco can send alerts t:

                                          • Standard output
                                          • A file
                                          • A system log
                                          • A spawned program
                                          • An HTTP[s] endpoint
                                          • A client via the gRPC API
                                          "},{"location":"en/admin/security/falco.html#what-are-the-components-of-falco","title":"What are the components of Falco?","text":"

                                          Falco consists of the following main components:

                                          • Userspace program: a CLI tool that can be used to interact with Falco. The userspace program handles signals, parses messages from a Falco driver, and sends alerts.

                                          • Configuration: define how Falco is run, what rules to assert, and how to perform alerts. For more information, see Configuration.

                                          • Driver: a software that adheres to the Falco driver specification and sends a stream of system call information. You cannot run Falco without installing a driver. Currently, Falco supports the following drivers:

                                            • Kernel module built on libscap and libsinsp C++ libraries (default)
                                            • BPF probe built from the same modules
                                            • Userspace instrumentation

                                              For more information, see Falco drivers.

                                          • Plugins: allow users to extend the functionality of falco libraries/falco executable by adding new event sources and new fields that can extract information from events. For more information, see Plugins.

                                          "},{"location":"en/admin/share/notebook.html","title":"Using Notebook","text":"

                                          Notebook typically refers to Jupyter Notebook or similar interactive computing environments. This is a very popular tool widely used in fields such as data science, machine learning, and deep learning. This page explains how to use Notebook on the Canfeng AI platform.

                                          "},{"location":"en/admin/share/notebook.html#prerequisites","title":"Prerequisites","text":"
                                          • The AI platform is installed
                                          • The user has successfully registered
                                          • The administrator has assigned a workspace to the user
                                          • A dataset (code, data, etc.) is prepared
                                          "},{"location":"en/admin/share/notebook.html#creating-and-using-notebook-instances","title":"Creating and Using Notebook Instances","text":"
                                          1. Log in to the AI platform as an Administrator.
                                          2. Navigate to AI Lab -> Queue Management, and click the Create button on the right.

                                          3. After entering a name, selecting a cluster, workspace, and quota, click OK.

                                          4. Log in to the AI platform as a User, navigate to AI Lab -> Notebook, and click the Create button on the right.

                                          5. After configuring the parameters, click OK.

                                            Basic InformationResource ConfigurationAdvanced Configuration

                                            Enter a name, select a cluster, namespace, choose the newly created queue, and click One-click Initialization.

                                            Select Notebook type, configure memory and CPU, enable GPU, create and configure PVC:

                                            Enable SSH external access:

                                          6. You will be automatically redirected to the Notebook instance list; click the instance name.

                                          7. Enter the Notebook instance details page and click the Open button in the upper right corner.

                                          8. You will enter the Notebook development environment, where a persistent volume is mounted in the /home/jovyan directory. You can clone code using git and upload data after connecting via SSH, etc.

                                          "},{"location":"en/admin/share/notebook.html#accessing-notebook-instances-via-ssh","title":"Accessing Notebook Instances via SSH","text":"
                                          1. Generate an SSH key pair on your own computer.

                                            Open the command line on your computer, for example, open git bash on Windows, and enter ssh-keygen.exe -t rsa, then press enter until completion.

                                          2. Use commands like cat ~/.ssh/id_rsa.pub to view and copy the public key.

                                          3. Log in to the AI platform as a user, click Personal Center in the upper right corner -> SSH Public Key -> Import SSH Public Key.

                                          4. Go to the details page of the Notebook instance and copy the SSH link.

                                          5. Use SSH to access the Notebook instance from the client.

                                          Next step: Create Training Jobs

                                          "},{"location":"en/admin/share/quota.html","title":"Quota Management","text":"

                                          Once a user is bound to a workspace, resources can be allocated to the workspace, and resource quotas can be managed.

                                          "},{"location":"en/admin/share/quota.html#prerequisites","title":"Prerequisites","text":"
                                          • The AI platform is installed
                                          • There is an available administrator account
                                          "},{"location":"en/admin/share/quota.html#creating-and-managing-quotas","title":"Creating and Managing Quotas","text":"
                                          1. Log in to the AI platform as an Administrator.
                                          2. Create a workspace and namespace, and bind users.
                                          3. Allocate resource quotas to the workspace.

                                          4. Manage the resource quotas for the namespace test-ns-1, ensuring that the values do not exceed the workspace's quota.

                                          5. Log in to the AI platform as a User to check if they have been assigned the test-ns-1 namespace.

                                          Next step: Create AI Workloads Using GPUs

                                          "},{"location":"en/admin/share/workload.html","title":"Creating AI Workloads Using GPU Resources","text":"

                                          After the administrator allocates resource quotas for the workspace, users can create AI workloads to utilize GPU computing resources.

                                          "},{"location":"en/admin/share/workload.html#prerequisites","title":"Prerequisites","text":"
                                          • The AI platform is installed
                                          • User has successfully registered
                                          • Administrator has assigned a workspace to the user
                                          • Resource quotas have been set for the workspace
                                          • A cluster has been created
                                          "},{"location":"en/admin/share/workload.html#steps-to-create-ai-workloads","title":"Steps to Create AI Workloads","text":"
                                          1. Log in to the AI platform as a User.
                                          2. Navigate to Container Management, select a namespace, then click Workloads -> Deployments, and then click the Create from Image button on the right.

                                          3. After configuring the parameters, click OK.

                                            Basic InformationContainer ConfigurationOthers

                                            Select your own namespace.

                                            Set the image, configure resources such as CPU, memory, and GPU, and set the startup command.

                                            Service configuration and advanced settings can use default configurations.

                                          4. Automatically return to the stateless workload list and click the workload name.

                                          5. Enter the details page to view the GPU quota.

                                          6. You can also enter the console and run the mx-smi command to check the GPU resources.

                                          Next step: Using Notebook

                                          "},{"location":"en/admin/virtnest/best-practice/import-ubuntu.html","title":"Import a Linux Virtual Machine with Ubuntu from an External Platform","text":"

                                          This page provides a detailed introduction on how to import Linux virtual machines from the external platform VMware into the virtual machines of AI platform through the command line.

                                          Info

                                          The external virtual platform in this document is VMware vSphere Client, abbreviated as vSphere. Technically, it relies on kubevirt cdi for implementation. Before proceeding, the virtual machine imported on vSphere needs to be shut down. Take a virtual machine of the Ubuntu operating system as an example.

                                          "},{"location":"en/admin/virtnest/best-practice/import-ubuntu.html#fetch-basic-information-of-vsphere-virtual-machine","title":"Fetch Basic Information of vSphere Virtual Machine","text":"
                                          • vSphere URL: Fetch information on the URL of the target platform

                                          • vSphere SSL Certificate Thumbprint: Need to be fetched using openssl

                                            openssl s_client -connect 10.64.56.11:443 </dev/null | openssl x509 -in /dev/stdin -fingerprint -sha1 -noout\n
                                            Output will be similar to:
                                            Can't use SSL_get_servername\ndepth=0 CN = vcsa.daocloud.io\nverify error:num=20:unable to get local issuer certificate\nverify return:1\ndepth=0 CN = vcsa.daocloud.io\nverify error:num=21:unable to verify the first certificate\nverify return:1\ndepth=0 CN = vcsa.daocloud.io\nverify return:1\nDONE\nsha1 Fingerprint=C3:9D:D7:55:6A:43:11:2B:DE:BA:27:EA:3B:C2:13:AF:E4:12:62:4D  # Value needed\n

                                          • vSphere Account: Fetch account information for vSphere, and pay attention to permissions

                                          • vSphere Password: Fetch password information for vSphere

                                          • UUID of the virtual machine to be imported: Need to be fetched on the web page of vSphere

                                            • Access the Vsphere page, go to the details page of the virtual machine to be imported, click Edit Settings , open the browser's developer console at this point, click Network -> Headers , find the URL as shown in the image below.

                                            • Click Response , locate vmConfigContext -> config , and finally find the target value uuid .

                                          • Path of the vmdk file of the virtual machine to be imported

                                          "},{"location":"en/admin/virtnest/best-practice/import-ubuntu.html#network-configuration","title":"Network Configuration","text":"

                                          Different information needs to be configured based on the chosen network mode. If a fixed IP address is required, you should select the Bridge network mode.

                                          • Create a Multus CR of the ovs type. Refer to Creating a Multus CR.
                                          • Create subnets and IP pools. Refer to Creating Subnets and IP Pools.

                                            apiVersion: spiderpool.spidernet.io/v2beta1\nkind: SpiderIPPool\nmetadata:\n  name: test2\nspec:\n  ips:\n  - 10.20.3.90\n  subnet: 10.20.0.0/16\n  gateway: 10.20.0.1\n\n---\napiVersion: spiderpool.spidernet.io/v2beta1\nkind: SpiderIPPool\nmetadata:\n  name: test3\nspec:\n  ips:\n  - 10.20.240.1\n  subnet: 10.20.0.0/16\n  gateway: 10.20.0.1\n\n---\napiVersion: spiderpool.spidernet.io/v2beta1\nkind: SpiderMultusConfig\nmetadata:\n  name: test1\n  namespace: kube-system\nspec:\n  cniType: ovs\n  coordinator:\n    detectGateway: false\n    detectIPConflict: false\n    mode: auto\n    tunePodRoutes: true\n  disableIPAM: false\n  enableCoordinator: true\n  ovs:\n    bridge: br-1\n    ippools:\n    ipv4:\n    - test1\n    - test2\n
                                          "},{"location":"en/admin/virtnest/best-practice/import-ubuntu.html#fetch-vsphere-account-password-secret","title":"Fetch vSphere Account Password Secret","text":"
                                          apiVersion: v1\nkind: Secret\nmetadata:\n  name: vsphere   # Can be changed\n  labels:\n    app: containerized-data-importer  # Do not change\ntype: Opaque\ndata:\n  accessKeyId: \"username-base64\"\n  secretKey: \"password-base64\"\n
                                          "},{"location":"en/admin/virtnest/best-practice/import-ubuntu.html#write-a-kubevirt-vm-yaml-to-create-vm","title":"Write a KubeVirt VM YAML to create VM","text":"

                                          Tip

                                          If a fixed IP address is required, the YAML configuration differs slightly from the one used for the default network. These differences have been highlighted.

                                          apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n    virtnest.io/alias-name: \"\"\n    virtnest.io/image-secret: \"\"\n  creationTimestamp: \"2024-05-23T06:46:28Z\"\n  finalizers:\n  - kubevirt.io/virtualMachineControllerFinalize\n  generation: 1\n  labels:\n    virtnest.io/os-family: Ubuntu\n    virtnest.io/os-version: \"22.04\"\n  name: export-ubuntu\n  namespace: default\nspec:\n  dataVolumeTemplates:\n  - metadata:\n      creationTimestamp: null\n      name: export-ubuntu-rootdisk\n      namespace: default\n    spec:\n      pvc:\n        accessModes:\n        - ReadWriteOnce\n        resources:\n          requests:\n            storage: 10Gi\n        storageClassName: local-path\n      source:\n        vddk:\n          backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-ubuntu/virtnest-export-ubuntu.vmdk\"  \n          url: \"https://10.64.56.21\"                                                       \n          uuid: \"421d6135-4edb-df80-ee54-8c5b10cc4e78\"                                     \n          thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"            \n          secretRef: \"vsphere\"\n          initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"          \n  runStrategy: Manual\n  template:\n    metadata:\n      annotations:\n        ipam.spidernet.io/ippools: '[{\"cleangateway\":false,\"ipv4\":[\"test2\"]}]'  // Add spiderpool network here\n      creationTimestamp: null\n    spec:\n      architecture: amd64\n      domain:\n        devices:\n          disks:\n          - bootOrder: 1\n            disk:\n              bus: virtio\n            name: rootdisk\n          interfaces:                                                          // Modify the network configuration\n          - bridge: {}\n            name: ovs-bridge0\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 4Gi\n      networks:                                                                // Modify the network configuration\n      - multus:\n          default: true\n          networkName: kube-system/test1\n        name: ovs-bridge0\n      volumes:\n      - dataVolume:\n          name: export-ubuntu-rootdisk\n        name: rootdisk\n
                                          "},{"location":"en/admin/virtnest/best-practice/import-ubuntu.html#access-vnc-to-verify-successful-operation","title":"Access VNC to verify successful operation","text":"
                                          1. Modify the network configuration of the virtual machine

                                          2. Check the current network

                                            When the actual import is completed, the configuration shown in the image below has been completed. However, it should be noted that the enp1s0 interface does not contain the inet field, so it cannot connect to the external network.

                                          3. Configure netplan

                                            In the configuration shown in the image above, change the objects in ethernets to enp1s0 and obtain an IP address using DHCP.

                                          4. Apply the netplan configuration to the system network configuration

                                            sudo netplan apply\n
                                          5. Perform a ping test on the external network

                                          6. Access the virtual machine on the node via SSH.

                                          "},{"location":"en/admin/virtnest/best-practice/import-windows.html","title":"Import a Windows Virtual Machine from the External Platform","text":"

                                          This page provides a detailed introduction on how to import virtual machines from an external platform -- VMware, into the virtual machines of AI platform using the command line.

                                          Info

                                          The external virtual platform on this page is VMware vSphere Client, abbreviated as vSphere. Technically, it relies on kubevirt cdi for implementation. Before proceeding, the virtual machine imported on vSphere needs to be shut down. Take a virtual machine of the Windows operating system as an example.

                                          "},{"location":"en/admin/virtnest/best-practice/import-windows.html#environment-preparation","title":"Environment Preparation","text":"

                                          Before importing, refer to the Network Configuration to prepare the environment.

                                          "},{"location":"en/admin/virtnest/best-practice/import-windows.html#fetch-information-of-the-windows-virtual-machine","title":"Fetch Information of the Windows Virtual Machine","text":"

                                          Similar to importing a virtual machine with a Linux operating system, refer to Importing a Linux Virtual Machine with Ubuntu from an External Platform to get the following information:

                                          • vSphere account and password
                                          • vSphere virtual machine information
                                          "},{"location":"en/admin/virtnest/best-practice/import-windows.html#check-the-boot-type-of-windows","title":"Check the Boot Type of Windows","text":"

                                          When importing a virtual machine from an external platform into the AI platform virtualization platform, you need to configure it according to the boot type (BIOS or UEFI) to ensure it can boot and run correctly.

                                          You can check whether Windows uses BIOS or UEFI through \"System Summary.\" If it uses UEFI, you need to add the relevant information in the YAML file.

                                          "},{"location":"en/admin/virtnest/best-practice/import-windows.html#import-process","title":"Import Process","text":"

                                          Prepare the window.yaml file and pay attention to the following configmaps:

                                          • PVC booting Virtio drivers
                                          • Disk bus type, set to SATA or Virtio depending on the boot type
                                          • UEFI configuration (if UEFI is used)
                                          Click to view the window.yaml example window.yaml
                                          apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  labels:\n    virtnest.io/os-family: windows\n    virtnest.io/os-version: \"server2019\"\n  name: export-window-21\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: export-window-21-rootdisk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 22Gi\n          storageClassName: local-path\n        source:\n          vddk:\n            backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-window/virtnest-export-window.vmdk\"\n            url: \"https://10.64.56.21\"\n            uuid: \"421d40f2-21a2-cfeb-d5c9-e7f8abfc2faa\"\n            thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"\n            secretRef: \"vsphere21\"\n            initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"\n    - metadata:\n        name: export-window-21-datadisk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 1Gi\n          storageClassName: local-path\n        source:\n          vddk:\n            backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-window/virtnest-export-window_1.vmdk\"\n            url: \"https://10.64.56.21\"\n            uuid: \"421d40f2-21a2-cfeb-d5c9-e7f8abfc2faa\"\n            thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"\n            secretRef: \"vsphere21\"\n            initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"\n    # <1>. PVC for booting Virtio drivers\n    # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n    - metadata:\n        name: virtio-disk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Mi\n          storageClassName: local-path\n        source:\n          blank: {}\n          # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n  running: true\n  template:\n    metadata:\n      annotations:\n        ipam.spidernet.io/ippools: '[{\"cleangateway\":false,\"ipv4\":[\"test86\"]}]'\n    spec:\n      dnsConfig:\n        nameservers:\n        - 223.5.5.5\n      domain:\n        cpu:\n          cores: 2\n        memory:\n          guest: 4Gi\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: sata   # <2> Disk bus type, set to SATA or Virtio depending on the boot type\n              name: rootdisk\n            - bootOrder: 2\n              disk:\n                bus: sata   # <2> Disk bus type, set to SATA or Virtio depending on the boot type\n              name: datadisk\n            # <1>. disk for booting Virtio drivers\n            # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n            - bootOrder: 3\n              disk:\n                bus: virtio\n              name: virtdisk\n            - bootOrder: 4\n              cdrom:\n                bus: sata\n              name: virtiocontainerdisk\n            # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n          interfaces:\n            - bridge: {}\n              name: ovs-bridge0\n        # <3> In the above section \"Check the Boot Type of Windows\"\n        # If using UEFI, add the following information\n        # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n        features:\n          smm:\n            enabled: true\n        firmware:\n          bootloader:\n            efi:\n              secureBoot: false\n        # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 4Gi\n      networks:\n        - multus:\n            default: true\n            networkName: kube-system/test1\n          name: ovs-bridge0\n      volumes:\n        - dataVolume:\n            name: export-window-21-rootdisk\n          name: rootdisk\n        - dataVolume:\n            name: export-window-21-datadisk\n          name: datadisk      \n        # <1> Volumes for booting Virtio drivers\n        # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n        - dataVolume:\n            name: virtio-disk\n          name: virtdisk\n        - containerDisk:\n            image: release-ci.daocloud.io/virtnest/kubevirt/virtio-win:v4.12.12-5\n          name: virtiocontainerdisk\n        # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n
                                          "},{"location":"en/admin/virtnest/best-practice/import-windows.html#install-virtio-drivers-via-vnc","title":"Install VirtIO Drivers via VNC","text":"
                                          1. Access and connect to the virtual machine via VNC.
                                          2. Download and install the appropriate VirtIO drivers based on the Windows version.
                                          3. Enable Remote Desktop to facilitate future connections via RDP.
                                          4. After installation, update the YAML file and reboot the virtual machine.
                                          "},{"location":"en/admin/virtnest/best-practice/import-windows.html#update-yaml-after-reboot","title":"Update YAML After Reboot","text":"Click to view the modified `window.yaml` example window.yaml
                                          # Delete fields marked with <1>, modify fields marked with <2>: change sata to virtio\napiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  labels:\n    virtnest.io/os-family: windows\n    virtnest.io/os-version: \"server2019\"\n  name: export-window-21\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: export-window-21-rootdisk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 22Gi\n          storageClassName: local-path\n        source:\n          vddk:\n            backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-window/virtnest-export-window.vmdk\"\n            url: \"https://10.64.56.21\"\n            uuid: \"421d40f2-21a2-cfeb-d5c9-e7f8abfc2faa\"\n            thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"\n            secretRef: \"vsphere21\"\n            initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"\n    - metadata:\n        name: export-window-21-datadisk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 1Gi\n          storageClassName: local-path\n        source:\n          vddk:\n            backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-window/virtnest-export-window_1.vmdk\"\n            url: \"https://10.64.56.21\"\n            uuid: \"421d40f2-21a2-cfeb-d5c9-e7f8abfc2faa\"\n            thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"\n            secretRef: \"vsphere21\"\n            initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"\n  running: true\n  template:\n    metadata:\n      annotations:\n        ipam.spidernet.io/ippools: '[{\"cleangateway\":false,\"ipv4\":[\"test86\"]}]'\n    spec:\n      dnsConfig:\n        nameservers:\n        - 223.5.5.5\n      domain:\n        cpu:\n          cores: 2\n        memory:\n          guest: 4Gi\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio  # <2>\n              name: rootdisk\n            - bootOrder: 2\n              disk:\n                bus: virtio  # <2>\n              name: datadisk\n          interfaces:\n            - bridge: {}\n              name: ovs-bridge0\n        # <3> In the above section \"Check the Boot Type of Windows\"\n        # If using UEFI, add the following information\n        # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n        features:\n          smm:\n            enabled: true\n        firmware:\n          bootloader:\n            efi:\n              secureBoot: false\n        # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 4Gi\n      networks:\n        - multus:\n            default: true\n            networkName: kube-system/test1\n          name: ovs-bridge0\n      volumes:\n        - dataVolume:\n            name: export-window-21-rootdisk\n          name: rootdisk\n        - dataVolume:\n            name: export-window-21-datadisk\n          name: datadisk\n
                                          "},{"location":"en/admin/virtnest/best-practice/import-windows.html#access-and-verify-via-rdp","title":"Access and Verify via RDP","text":"
                                          • Use an RDP client to connect to the virtual machine. Log in with the default account admin and password dangerous!123.

                                          • Verify network access and data disk data

                                          "},{"location":"en/admin/virtnest/best-practice/import-windows.html#differences-between-importing-linux-and-windows-virtual-machines","title":"Differences Between Importing Linux and Windows Virtual Machines","text":"
                                          • Windows may require UEFI configuration.
                                          • Windows typically requires the installation of VirtIO drivers.
                                          • Windows multi-disk imports usually do not require re-mounting of disks.
                                          "},{"location":"en/admin/virtnest/best-practice/vm-windows.html","title":"Create a Windows Virtual Machine","text":"

                                          This document will explain how to create a Windows virtual machine via the command line.

                                          "},{"location":"en/admin/virtnest/best-practice/vm-windows.html#prerequisites","title":"Prerequisites","text":"
                                          1. Before creating a Windows virtual machine, it is recommended to first refer to installing dependencies and prerequisites for the virtual machine module to ensure that your environment is ready.
                                          2. During the creation process, it is recommended to refer to the official documentation: Installing Windows documentation, Installing Windows related drivers.
                                          3. It is recommended to access the Windows virtual machine using the VNC method.
                                          "},{"location":"en/admin/virtnest/best-practice/vm-windows.html#import-an-iso-image","title":"Import an ISO Image","text":"

                                          Creating a Windows virtual machine requires importing an ISO image primarily to install the Windows operating system. Unlike Linux operating systems, the Windows installation process usually involves booting from an installation disc or ISO image file. Therefore, when creating a Windows virtual machine, it is necessary to first import the installation ISO image of the Windows operating system so that the virtual machine can be installed properly.

                                          Here are two methods for importing ISO images:

                                          1. (Recommended) Creating a Docker image. It is recommended to refer to building images.

                                          2. (Not recommended) Using virtctl to import the image into a Persistent Volume Claim (PVC).

                                            You can refer to the following command:

                                            virtctl image-upload -n <namespace> pvc <PVC name> \\\n   --image-path=<ISO file path> \\\n   --access-mode=ReadWriteOnce \\\n   --size=6G \\\n   --uploadproxy-url=<https://cdi-uploadproxy ClusterIP and port> \\\n   --force-bind \\\n   --insecure \\\n   --wait-secs=240 \\\n   --storage-class=<SC>\n

                                            For example:

                                            virtctl image-upload -n <namespace> pvc <PVC name> \\\n   --image-path=<ISO file path> \\\n   --access-mode=ReadWriteOnce \\\n   --size=6G \\\n   --uploadproxy-url=<https://cdi-uploadproxy ClusterIP and port> \\\n   --force-bind \\\n   --insecure \\\n   --wait-secs=240 \\\n   --storage-class=<SC>\n
                                          "},{"location":"en/admin/virtnest/best-practice/vm-windows.html#create-a-windows-virtual-machine-using-yaml","title":"Create a Windows Virtual Machine Using YAML","text":"

                                          Creating a Windows virtual machine using YAML is more flexible and easier to write and maintain. Below are three reference YAML examples:

                                          1. (Recommended) Using Virtio drivers + Docker image:

                                            • If you need to use storage capabilities - mount disks, please install viostor drivers.
                                            • If you need to use network capabilities, please install NetKVM drivers.
                                            apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n  labels:\n    virtnest.io/os-family: Windows\n    virtnest.io/os-version: '10'\n  name: windows10-virtio\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: win10-system-virtio\n        namespace: default\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 32Gi\n          storageClassName: local-path\n        source:\n          blank: {}\n  running: true\n  template:\n    metadata:\n      labels:\n        app: windows10-virtio\n        version: v1\n        kubevirt.io/domain: windows10-virtio\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 8\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio # Use virtio\n              name: win10-system-virtio \n            - bootOrder: 2\n              cdrom:\n                bus: sata # Use sata for ISO image\n              name: iso-win10\n            - bootOrder: 3\n              cdrom:\n                bus: sata # Use sata for containerdisk\n              name: virtiocontainerdisk\n          interfaces:\n            - name: default\n              masquerade: {}\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 8G\n      networks:\n        - name: default\n          pod: {}\n      volumes:\n        - name: iso-win10\n          persistentVolumeClaim:\n            claimName: iso-win10\n        - name: win10-system-virtio\n          persistentVolumeClaim:\n            claimName: win10-system-virtio\n        - containerDisk:\n            image: kubevirt/virtio-container-disk\n          name: virtiocontainerdisk\n
                                          2. (Not recommended) Using a combination of Virtio drivers and virtctl tool to import the image into a Persistent Volume Claim (PVC).

                                            apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n  labels:\n    virtnest.io/os-family: Windows\n    virtnest.io/os-version: '10'\n  name: windows10-virtio\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: win10-system-virtio\n        namespace: default\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 32Gi\n          storageClassName: local-path\n        source:\n          blank: {}\n  running: true\n  template:\n    metadata:\n      labels:\n        app: windows10-virtio\n        version: v1\n        kubevirt.io/domain: windows10-virtio\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 8\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              # Use virtio\n              disk:\n                bus: virtio\n              name: win10-system-virtio\n              # Use sata for ISO image\n            - bootOrder: 2\n              cdrom:\n                bus: sata\n              name: iso-win10\n\u00a0 \u00a0 \u00a0 \u00a0 \u00a0 \u00a0 \u00a0 # Use sata for containerdisk\n            - bootOrder: 3\n              cdrom:\n                bus: sata\n              name: virtiocontainerdisk\n          interfaces:\n            - name: default\n              masquerade: {}\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 8G\n      networks:\n        - name: default\n          pod: {}\n      volumes:\n        - name: iso-win10\n          persistentVolumeClaim:\n            claimName: iso-win10\n        - name: win10-system-virtio\n          persistentVolumeClaim:\n            claimName: win10-system-virtio\n        - containerDisk:\n            image: kubevirt/virtio-container-disk\n          name: virtiocontainerdisk\n
                                          3. (Not recommended) In a scenario where Virtio drivers are not used, importing the image into a Persistent Volume Claim (PVC) using the virtctl tool. The virtual machine may use other types of drivers or default drivers to operate disk and network devices.

                                            apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n  labels:\n    virtnest.io/os-family: Windows\n    virtnest.io/os-version: '10'\n  name: windows10\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    # Create multiple PVC (disks) for system disk\n    - metadata:\n        name: win10-system\n        namespace: default\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 32Gi\n          storageClassName: local-path\n        source:\n          blank: {}\n  running: true\n  template:\n    metadata:\n      labels:\n        app: windows10\n        version: v1\n        kubevirt.io/domain: windows10\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 8\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              # Use sata without virtio driver\n\u00a0 \u00a0 \u00a0 \u00a0 \u00a0 \u00a0 \u00a0cdrom:\n                bus: sata\n              name: win10-system\n              # Use sata for ISO\n            - bootOrder: 2\n              cdrom:\n                bus: sata\n              name: iso-win10\n          interfaces:\n            - name: default\n              masquerade: {}\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 8G\n      networks:\n        - name: default\n          pod: {}\n      volumes:\n        - name: iso-win10\n          persistentVolumeClaim:\n            claimName: iso-win10\n        - name: win10-system\n          persistentVolumeClaim:\n            claimName: win10-system\n
                                          "},{"location":"en/admin/virtnest/best-practice/vm-windows.html#cloud-desktop","title":"Cloud Desktop","text":"

                                          For Windows virtual machines, remote desktop control access is often required. It is recommended to use Microsoft Remote Desktop to control your virtual machine.

                                          Note

                                          • Your Windows version must support remote desktop control to use Microsoft Remote Desktop.
                                          • You need to disable the Windows firewall.
                                          "},{"location":"en/admin/virtnest/best-practice/vm-windows.html#add-data-disks","title":"Add Data Disks","text":"

                                          Adding a data disk to a Windows virtual machine follows the same process as adding one to a Linux virtual machine. You can refer to the provided YAML example for guidance.

                                            apiVersion: kubevirt.io/v1\n  kind: VirtualMachine\n  <...>\n  spec:\n    dataVolumeTemplates:\n      # Add a data disk\n      - metadata:\n        name: win10-disk\n        namespace: default\n        spec:\n          pvc:\n            accessModes:\n              - ReadWriteOnce\n            resources:\n              requests:\n                storage: 16Gi\n            storageClassName: hwameistor-storage-lvm-hdd\n          source:\n            blank: {}\n    template:\n      spec:\n        domain:\n          devices:\n            disks:\n              - bootOrder: 1\n                disk:\n                  bus: virtio\n                name: win10-system\n              # Add a data disk\n              - bootOrder: 2\n                disk:\n                  bus: virtio\n                name: win10-disk\n            <....>\n        volumes:\n          <....>\n          # Add a data disk\n          - name: win10-disk\n            persistentVolumeClaim:\n              claimName: win10-disk\n
                                          "},{"location":"en/admin/virtnest/best-practice/vm-windows.html#snapshots-cloning-live-migration","title":"Snapshots, Cloning, Live Migration","text":"

                                          These capabilities are consistent with Linux virtual machines and can be configured using the same methods.

                                          "},{"location":"en/admin/virtnest/best-practice/vm-windows.html#access-your-windows-virtual-machine","title":"Access Your Windows Virtual Machine","text":"
                                          1. After successful creation, access the virtual machine list page to confirm that the virtual machine is running properly.

                                          2. Click the console access (VNC) to access it successfully.

                                          "},{"location":"en/admin/virtnest/gpu/vm-gpu.html","title":"Configure GPU Passthrough for Virtual Machines","text":"

                                          This page will explain the prerequisites for configuring GPU when creating a virtual machine.

                                          The key to configuring GPU for virtual machines is to configure the GPU Operator to deploy different software components on the worker nodes, depending on the GPU workload configuration. Here are three example nodes:

                                          • The controller-node-1 node is configured to run containers.
                                          • The work-node-1 node is configured to run virtual machines with GPU passthrough.
                                          • The work-node-2 node is configured to run virtual machines with virtual vGPU.
                                          "},{"location":"en/admin/virtnest/gpu/vm-gpu.html#assumptions-limitations-and-dependencies","title":"Assumptions, Limitations, and Dependencies","text":"

                                          The worker nodes can run GPU-accelerated containers, virtual machines with GPU passthrough, or virtual machines with vGPU. However, a combination of any of these is not supported.

                                          1. The cluster administrator or developer needs to have prior knowledge of the cluster and correctly label the nodes to indicate the type of GPU workload they will run.
                                          2. The worker node that runs a GPU-accelerated virtual machine with GPU passthrough or vGPU is assumed to be a bare metal machine. If the worker node is a virtual machine, the GPU passthrough feature needs to be enabled on the virtual machine platform. Please consult your virtual machine platform provider for guidance.
                                          3. Nvidia MIG is not supported for vGPU.
                                          4. The GPU Operator does not automatically install GPU drivers in the virtual machine.
                                          "},{"location":"en/admin/virtnest/gpu/vm-gpu.html#enable-iommu","title":"Enable IOMMU","text":"

                                          To enable GPU passthrough, the cluster nodes need to have IOMMU enabled. Refer to How to Enable IOMMU. If your cluster is running on a virtual machine, please consult your virtual machine platform provider.

                                          "},{"location":"en/admin/virtnest/gpu/vm-gpu.html#label-the-cluster-nodes","title":"Label the Cluster Nodes","text":"

                                          Go to Container Management, select your worker cluster, click Node Management, and then click Modify Labels in the action bar to add labels to the nodes. Each node can only have one label.

                                          You can assign the following values to the labels: container, vm-passthrough, and vm-vgpu.

                                          "},{"location":"en/admin/virtnest/gpu/vm-gpu.html#install-nvidia-operator","title":"Install Nvidia Operator","text":"
                                          1. Go to Container Management, select your worker cluster, click Helm Apps -> Helm Chart , and choose and install gpu-operator. Modify the relevant fields in the yaml.

                                            gpu-operator.sandboxWorkloads.enabled=true\ngpu-operator.vfioManager.enabled=true\ngpu-operator.sandboxDevicePlugin.enabled=true\ngpu-operator.sandboxDevicePlugin.version=v1.2.4   // version should be >= v1.2.4\ngpu-operator.toolkit.version=v1.14.3-ubuntu20.04\n
                                          2. Wait for the installation to succeed, as shown in the following image:

                                          "},{"location":"en/admin/virtnest/gpu/vm-gpu.html#install-virtnest-agent-and-configure-cr","title":"Install virtnest-agent and Configure CR","text":"
                                          1. Install virtnest-agent, refer to Install virtnest-agent.

                                          2. Add vGPU and GPU passthrough to the Virtnest Kubevirt CR. The following example shows the relevant yaml after adding vGPU and GPU passthrough:

                                            spec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    permittedHostDevices: # (1)!\n      mediatedDevices:            # (2)!\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com /GRID_P4-1Q\n      pciHostDevices:             # (3)!\n      - externalResourceProvider:  true\n        pciVendorSelector: 10DE:1BB3\n        resourceName: nvidia.com /GP104GL_TESLA_P4\n
                                            1. The following information needs to be filled in
                                            2. vGPU
                                            3. GPU passthrough
                                          3. In the kubevirt CR yaml, permittedHostDevices is used to import VM devices. For vGPU, mediatedDevices needs to be added, with the following structure:

                                            mediatedDevices:          \n- mdevNameSelector: GRID P4-1Q          # (1)!\n  resourceName: nvidia.com/GRID_P4-1Q   # (2)!\n
                                            1. Device name
                                            2. vGPU information registered by GPU Operator on the node
                                          4. For GPU passthrough, pciHostDevices needs to be added under permittedHostDevices, with the following structure:

                                            pciHostDevices:           \n- externalResourceProvider: true            # (1)!\n  pciVendorSelector: 10DE:1BB3              # (2)!\n  resourceName: nvidia.com/GP104GL_TESLA_P4 # (3)!\n
                                            1. Do not change this by default
                                            2. Vendor ID of the current PCI device
                                            3. GPU information registered by GPU Operator on the node
                                          5. Example of obtaining vGPU information (only applicable to vGPU): View the node information on the node marked as nvidia.com/gpu.workload.config=vm-gpu, such as work-node-2, in the Capacity section, nvidia.com/GRID_P4-1Q: 8 indicates the available vGPU:

                                            kubectl describe node work-node-2\n
                                            Capacity:\n  cpu:                                 64\n  devices.kubevirt.io/kvm:             1k\n  devices.kubevirt.io/tun:             1k\n  devices.kubevirt.io/vhost-net:       1k\n  ephemeral-storage:                   102626232Ki\n  hugepages-1Gi:                       0\n  hugepages-2Mi:                       0\n  memory:                              264010840Ki\n  nvidia.com/GRID_P4-1Q :              8\n  pods:                                110\nAllocatable:\n  cpu:                                  64\n  devices.kubevirt.io/kvm:              1k\n  devices.kubevirt.io/tun:              1k\n  devices.kubevirt.io/vhost-net:        1k\n  ephemeral-storage:                    94580335255\n  hugepages-1Gi:                        0\n  hugepages-2Mi:                        0\n  memory:                               263908440Ki\n  nvidia.com/GRID_P4-1Q:                8\n  pods:                                 110\n

                                            In this case, the mdevNameSelector should be \"GRID P4-1Q\" and the resourceName should be \"GRID_P4-1Q\".

                                          6. Get GPU passthrough information: On the node marked as nvidia.com/gpu.workload.config=vm-passthrough (work-node-1 in this example), view the node information. In the Capacity section, nvidia.com/GP104GL_TESLA_P4: 2 represents the available vGPU:

                                            kubectl describe node work-node-1\n
                                            Capacity:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              102626232Ki\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         264010840Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\nAllocatable:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              94580335255\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         263908440Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\n

                                            The resourceName should be \"GRID_P4-1Q\". How to obtain the pciVendorSelector? Use SSH to log in to the target node work-node-1 and use the lspci -nnk -d 10de: command to obtain the Nvidia GPU PCI information, as shown below: The red box indicates the pciVendorSelector information.

                                          7. Edit the kubevirt CR note: If there are multiple GPUs of the same model, you only need to write one in the CR, there is no need to list every GPU.

                                            kubectl -n virtnest-system edit kubevirt kubevirt\n
                                            spec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    permittedHostDevices: # (1)!\n      mediatedDevices:                    # (2)!\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com/GRID_P4-1Q\n      pciHostDevices:                     # (3)!\n      - externalResourceProvider: true\n        pciVendorSelector: 10DE:1BB3\n        resourceName: nvidia.com/GP104GL_TESLA_P4 \n

                                            1. The following information needs to be filled in
                                            2. vGPU
                                            3. GPU passthrough; in the example above, there are two GPUs for TEESLA P4, so only one needs to be registered here
                                          "},{"location":"en/admin/virtnest/gpu/vm-gpu.html#create-vm-using-yaml-and-enable-gpu-acceleration","title":"Create VM Using YAML and Enable GPU Acceleration","text":"

                                          The only difference from a regular virtual machine is adding GPU-related information in the devices section.

                                          Click to view the complete YAML
                                          apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  name: testvm-gpu1\n  namespace: default\nspec:\n  dataVolumeTemplates:\n  - metadata:\n      creationTimestamp: null\n      name: systemdisk-testvm-gpu1\n      namespace: default\n    spec:\n      pvc:\n        accessModes:\n        - ReadWriteOnce\n        resources:\n          requests:\n            storage: 10Gi\n        storageClassName: www\n      source:\n        registry:\n          url: docker://release-ci.daocloud.io/virtnest/system-images/debian-12-x86_64:v1\nrunStrategy: Manual\ntemplate:\n    metadata:\n      creationTimestamp: null\n    spec:\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n          - bootOrder: 1\n            disk:\n              bus: virtio\n            name: systemdisk-testvm-gpu1\n          - disk:\n              bus: virtio\n            name: cloudinitdisk\n        gpus:\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n            name: gpu-0-0\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n          name: gpu-0-1\n        interfaces:\n        - masquerade: {}\n          name: default\n      machine:\n        type: q35\n      resources:\n        requests:\n          memory: 2Gi\n    networks:\n    - name: default\n      pod: {}\n    volumes:\n    - dataVolume:\n        name: systemdisk-testvm-gpu1\n      name: systemdisk-testvm-gpu1\n    - cloudInitNoCloud:\n        userDataBase64: I2Nsb3VkLWNvbmZpZwpzc2hfcHdhdXRoOiB0cnVlCmRpc2FibGVfcm9vdDogZmFsc2UKY2hwYXNzd2Q6IHsibGlzdCI6ICJyb290OmRhbmdlcm91cyIsIGV4cGlyZTogRmFsc2V9CgoKcnVuY21kOgogIC0gc2VkIC1pICIvI1w/UGVybWl0Um9vdExvZ2luL3MvXi4qJC9QZXJtaXRSb290TG9naW4geWVzL2ciIC9ldGMvc3NoL3NzaGRfY29uZmlnCiAgLSBzeXN0ZW1jdGwgcmVzdGFydCBzc2guc2VydmljZQ==\n        name: cloudinitdisk\n
                                          "},{"location":"en/admin/virtnest/gpu/vm-vgpu.html","title":"Configure GPU (vGPU) for Virtual Machines","text":"

                                          This page will explain the prerequisites for configuring GPU when creating a virtual machine.

                                          The key to configuring GPU for virtual machines is to configure the GPU Operator to deploy different software components on the worker nodes, depending on the GPU workload configuration. Here are three example nodes:

                                          • The controller-node-1 node is configured to run containers.
                                          • The work-node-1 node is configured to run virtual machines with GPU passthrough.
                                          • The work-node-2 node is configured to run virtual machines with virtual vGPU.
                                          "},{"location":"en/admin/virtnest/gpu/vm-vgpu.html#assumptions-limitations-and-dependencies","title":"Assumptions, Limitations, and Dependencies","text":"

                                          The worker nodes can run GPU-accelerated containers, virtual machines with GPU passthrough, or virtual machines with vGPU. However, a combination of any of these is not supported.

                                          1. The worker nodes can run GPU-accelerated containers, virtual machines with GPU passthrough, or virtual machines with vGPU individually, but not in any combination.
                                          2. The cluster administrator or developer needs to have prior knowledge of the cluster and correctly label the nodes to indicate the type of GPU workload they will run.
                                          3. The worker node that runs a GPU-accelerated virtual machine with GPU passthrough or vGPU is assumed to be a bare metal machine. If the worker node is a virtual machine, the GPU passthrough feature needs to be enabled on the virtual machine platform. Please consult your virtual machine platform provider for guidance.
                                          4. Nvidia MIG is not supported for vGPU.
                                          5. The GPU Operator does not automatically install GPU drivers in the virtual machine.
                                          "},{"location":"en/admin/virtnest/gpu/vm-vgpu.html#enable-iommu","title":"Enable IOMMU","text":"

                                          To enable GPU passthrough, the cluster nodes need to have IOMMU enabled. Please refer to How to Enable IOMMU. If your cluster is running on a virtual machine, please consult your virtual machine platform provider.

                                          "},{"location":"en/admin/virtnest/gpu/vm-vgpu.html#build-vgpu-manager-image","title":"Build vGPU Manager Image","text":"

                                          Note: This step is only required when using NVIDIA vGPU. If you plan to use GPU passthrough only, skip this section.

                                          Follow these steps to build the vGPU Manager image and push it to the container registry:

                                          1. Download the vGPU software from the NVIDIA Licensing Portal.

                                            • Log in to the NVIDIA Licensing Portal and go to the Software Downloads page.
                                            • The NVIDIA vGPU software is located in the Driver downloads tab on the Software Downloads page.
                                            • Select VGPU + Linux in the filter criteria and click Download to get the Linux KVM package. Unzip the downloaded file (NVIDIA-Linux-x86_64-<version>-vgpu-kvm.run).
                                          2. Open a terminal and clone the container-images/driver repository.

                                            git clone https://gitlab.com/nvidia/container-images/driver cd driver\n
                                          3. Switch to the vgpu-manager directory proper to your operating system.

                                            cd vgpu-manager/<your-os>\n
                                          4. Copy the .run file extracted in step 1 to the current directory.

                                            cp <local-driver-download-directory>/*-vgpu-kvm.run ./\n
                                          5. Set the environment variables.

                                            • PRIVATE_REGISTRY: The name of the private registry to store the driver image.
                                            • VERSION: The version of the NVIDIA vGPU Manager, downloaded from the NVIDIA Software Portal.
                                            • OS_TAG: Must match the operating system version of the cluster nodes.
                                            • CUDA_VERSION: The base CUDA image version used to build the driver image.
                                            export PRIVATE_REGISTRY=my/private/registry VERSION=510.73.06 OS_TAG=ubuntu22.04 CUDA_VERSION=12.2.0\n
                                          6. Build the NVIDIA vGPU Manager Image.

                                            docker build \\\n  --build-arg DRIVER_VERSION=${VERSION} \\\n  --build-arg CUDA_VERSION=${CUDA_VERSION} \\\n  -t ${PRIVATE_REGISTRY}``/vgpu-manager``:${VERSION}-${OS_TAG} .\n
                                          7. Push the NVIDIA vGPU Manager image to your container registry.

                                            docker push ${PRIVATE_REGISTRY}/vgpu-manager:${VERSION}-${OS_TAG}\n
                                          "},{"location":"en/admin/virtnest/gpu/vm-vgpu.html#label-the-cluster-nodes","title":"Label the Cluster Nodes","text":"

                                          Go to Container Management, select your worker cluster, click Node Management, and then click Modify Labels in the action bar to add labels to the nodes. Each node can only have one label.

                                          You can assign the following values to the labels: container, vm-passthrough, and vm-vgpu.

                                          "},{"location":"en/admin/virtnest/gpu/vm-vgpu.html#install-nvidia-operator","title":"Install Nvidia Operator","text":"
                                          1. Go to Container Management, select your worker cluster, click Helm Apps -> Helm Chart, and choose and install gpu-operator. Modify the relevant fields in the yaml.

                                            gpu-operator.sandboxWorkloads.enabled=true\ngpu-operator.vgpuManager.enabled=true\ngpu-operator.vgpuManager.repository=<your-register-url>      # (1)!\ngpu-operator.vgpuManager.image=vgpu-manager\ngpu-operator.vgpuManager.version=<your-vgpu-manager-version> # (2)!\ngpu-operator.vgpuDeviceManager.enabled=true\n
                                            1. The container registry address from the \"Build vGPU Manager Image\" step.
                                            2. The VERSION from the \"Build vGPU Manager Image\" step.
                                          2. Wait for the installation to succeed, as shown in the following image:

                                          "},{"location":"en/admin/virtnest/gpu/vm-vgpu.html#install-virtnest-agent-and-configure-cr","title":"Install virtnest-agent and Configure CR","text":"
                                          1. Install virtnest-agent, refer to Install virtnest-agent.

                                          2. Add vGPU and GPU passthrough to the Virtnest Kubevirt CR. The following example shows the relevant yaml after adding vGPU and GPU passthrough:

                                            spec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    permittedHostDevices: # (1)!\n      mediatedDevices:            # (2)!\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com /GRID_P4-1Q\n      pciHostDevices:             # (3)!\n      - externalResourceProvider:  true\n        pciVendorSelector: 10DE:1BB3\n        resourceName: nvidia.com /GP104GL_TESLA_P4\n
                                            1. The following information needs to be filled in
                                            2. vGPU
                                            3. GPU passthrough
                                          3. In the kubevirt CR yaml, permittedHostDevices is used to import VM devices. For vGPU, mediatedDevices needs to be added, with the following structure:

                                            mediatedDevices:          \n- mdevNameSelector: GRID P4-1Q          # (1)!\n  resourceName: nvidia.com/GRID_P4-1Q   # (2)!\n
                                            1. Device name
                                            2. vGPU information registered by the GPU Operator on the node
                                          4. For GPU passthrough, pciHostDevices needs to be added under permittedHostDevices, with the following structure:

                                            pciHostDevices:           \n- externalResourceProvider: true            # (1)!\n  pciVendorSelector: 10DE:1BB3              # (2)!\n  resourceName: nvidia.com/GP104GL_TESLA_P4 # (3)!\n
                                            1. Do not change this by default
                                            2. Vendor ID of the current PCI device
                                            3. GPU information registered by the GPU Operator on the node
                                          5. Example of obtaining vGPU information (only applicable to vGPU): View the node information on the node marked as nvidia.com/gpu.workload.config=vm-gpu, such as work-node-2, in the Capacity section, nvidia.com/GRID_P4-1Q: 8 indicates the available vGPU:

                                            kubectl describe node work-node-2\n
                                            Capacity:\n  cpu:                                 64\n  devices.kubevirt.io/kvm:             1k\n  devices.kubevirt.io/tun:             1k\n  devices.kubevirt.io/vhost-net:       1k\n  ephemeral-storage:                   102626232Ki\n  hugepages-1Gi:                       0\n  hugepages-2Mi:                       0\n  memory:                              264010840Ki\n  nvidia.com/GRID_P4-1Q :              8\n  pods:                                110\nAllocatable:\n  cpu:                                  64\n  devices.kubevirt.io/kvm:              1k\n  devices.kubevirt.io/tun:              1k\n  devices.kubevirt.io/vhost-net:        1k\n  ephemeral-storage:                    94580335255\n  hugepages-1Gi:                        0\n  hugepages-2Mi:                        0\n  memory:                               263908440Ki\n  nvidia.com/GRID_P4-1Q:                8\n  pods:                                 110\n

                                            In this case, the mdevNameSelector should be \"GRID P4-1Q\" and the resourceName should be \"GRID_P4-1Q\".

                                          6. Get GPU passthrough information: On the node marked as nvidia.com/gpu.workload.config=vm-passthrough (work-node-1 in this example), view the node information. In the Capacity section, nvidia.com/GP104GL_TESLA_P4: 2 represents the available vGPU:

                                            kubectl describe node work-node-1\n
                                            Capacity:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              102626232Ki\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         264010840Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\nAllocatable:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              94580335255\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         263908440Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\n

                                            The resourceName should be \"GRID_P4-1Q\". How to obtain the pciVendorSelector? SSH into the target node work-node-1 and use the lspci -nnk -d 10de: command to obtain the Nvidia GPU PCI information, as shown below: The red box indicates the pciVendorSelector information.

                                          7. Edit the kubevirt CR note: If there are multiple GPUs of the same model, you only need to write one in the CR, there is no need to list every GPU.

                                            kubectl -n virtnest-system edit kubevirt kubevirt\n
                                            spec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    permittedHostDevices: # (1)!\n      mediatedDevices:                    # (2)!\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com/GRID_P4-1Q\n      pciHostDevices:                       # (3)!\n      - externalResourceProvider: true\n        pciVendorSelector: 10DE:1BB3\n        resourceName: nvidia.com/GP104GL_TESLA_P4 \n

                                            1. The following information needs to be filled in
                                            2. vGPU
                                            3. GPU passthrough; in the example above, there are two GPUs for TEESLA P4, so only one needs to be registered here
                                          "},{"location":"en/admin/virtnest/gpu/vm-vgpu.html#create-vm-using-yaml-and-enable-gpu-acceleration","title":"Create VM Using YAML and Enable GPU Acceleration","text":"

                                          The only difference from a regular virtual machine is adding the gpu-related information in the devices section.

                                          Click to view the complete YAML
                                          apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  name: testvm-gpu1\n  namespace: default\nspec:\n  dataVolumeTemplates:\n  - metadata:\n      creationTimestamp: null\n      name: systemdisk-testvm-gpu1\n      namespace: default\n    spec:\n      pvc:\n        accessModes:\n        - ReadWriteOnce\n        resources:\n          requests:\n            storage: 10Gi\n        storageClassName: www\n      source:\n        registry:\n          url: docker://release-ci.daocloud.io/virtnest/system-images/debian-12-x86_64:v1\nrunStrategy: Manual\ntemplate:\n    metadata:\n      creationTimestamp: null\n    spec:\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n          - bootOrder: 1\n            disk:\n              bus: virtio\n            name: systemdisk-testvm-gpu1\n          - disk:\n              bus: virtio\n            name: cloudinitdisk\n        gpus:\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n            name: gpu-0-0\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n          name: gpu-0-1\n        interfaces:\n        - masquerade: {}\n          name: default\n      machine:\n        type: q35\n      resources:\n        requests:\n          memory: 2Gi\n    networks:\n    - name: default\n      pod: {}\n    volumes:\n    - dataVolume:\n        name: systemdisk-testvm-gpu1\n      name: systemdisk-testvm-gpu1\n    - cloudInitNoCloud:\n        userDataBase64: I2Nsb3VkLWNvbmZpZwpzc2hfcHdhdXRoOiB0cnVlCmRpc2FibGVfcm9vdDogZmFsc2UKY2hwYXNzd2Q6IHsibGlzdCI6ICJyb290OmRhbmdlcm91cyIsIGV4cGlyZTogRmFsc2V9CgoKcnVuY21kOgogIC0gc2VkIC1pICIvI1w/UGVybWl0Um9vdExvZ2luL3MvXi4qJC9QZXJtaXRSb290TG9naW4geWVzL2ciIC9ldGMvc3NoL3NzaGRfY29uZmlnCiAgLSBzeXN0ZW1jdGwgcmVzdGFydCBzc2guc2VydmljZQ==\n        name: cloudinitdisk\n
                                          "},{"location":"en/admin/virtnest/install/index.html","title":"Install Virtual Machine Module","text":"

                                          This page explains how to install the virtual machine module.

                                          Info

                                          The term virtnest appearing in the following commands or scripts is the internal development code name for the Virtual Machine module.

                                          "},{"location":"en/admin/virtnest/install/index.html#configure-helm-repo","title":"Configure Helm Repo","text":"

                                          Helm-charts repository address: https://release.daocloud.io/harbor/projects/10/helm-charts/virtnest/versions

                                          helm repo add virtnest-release https://release.daocloud.io/chartrepo/virtnest\nhelm repo update virtnest-release\n

                                          If you want to experience the latest development version of virtnest, then please add the following repository address (the development version of virtnest is extremely unstable).

                                          helm repo add virtnest-release-ci https://release-ci.daocloud.io/chartrepo/virtnest\nhelm repo update virtnest-release-ci\n
                                          "},{"location":"en/admin/virtnest/install/index.html#choose-a-version-that-you-want-to-install","title":"Choose a Version that You Want to Install","text":"

                                          It is recommended to install the latest version.

                                          [root@master ~]# helm search repo virtnest-release/virtnest --versions\nNAME                   CHART VERSION  APP VERSION  DESCRIPTION\nvirtnest-release/virtnest  0.6.0          v0.6.0       A Helm chart for virtnest\n
                                          "},{"location":"en/admin/virtnest/install/index.html#create-a-namespace","title":"Create a Namespace","text":"
                                          kubectl create namespace virtnest-system\n
                                          "},{"location":"en/admin/virtnest/install/index.html#perform-installation-steps","title":"Perform Installation Steps","text":"
                                          helm install virtnest virtnest-release/virtnest -n virtnest-system --version 0.6.0\n
                                          "},{"location":"en/admin/virtnest/install/index.html#upgrade","title":"Upgrade","text":""},{"location":"en/admin/virtnest/install/index.html#update-the-virtnest-helm-repository","title":"Update the virtnest Helm Repository","text":"
                                          helm repo update virtnest-release\n
                                          "},{"location":"en/admin/virtnest/install/index.html#back-up-the-set-parameters","title":"Back up the --set Parameters","text":"

                                          Before upgrading the virtnest version, we recommend executing the following command to backup the --set parameters of the previous version

                                          helm get values virtnest -n virtnest-system -o yaml > bak.yaml\n
                                          "},{"location":"en/admin/virtnest/install/index.html#perform-helm-upgrade","title":"Perform Helm Upgrade","text":"
                                          helm upgrade virtnest virtnest-release/virtnest \\\n    -n virtnest-system \\\n    -f ./bak.yaml \\\n    --version 0.6.0\n
                                          "},{"location":"en/admin/virtnest/install/index.html#delete","title":"Delete","text":"
                                          helm delete virtnest -n virtnest-system\n
                                          "},{"location":"en/admin/virtnest/install/install-dependency.html","title":"Dependencies and Prerequisites","text":"

                                          This page explains the dependencies and prerequisites for installing virtual machine.

                                          Info

                                          The term virtnest mentioned in the commands or scripts below is the internal development codename for the Global Management module.

                                          "},{"location":"en/admin/virtnest/install/install-dependency.html#prerequisites","title":"Prerequisites","text":""},{"location":"en/admin/virtnest/install/install-dependency.html#kernel-version-being-above-v411","title":"Kernel version being above v4.11","text":"

                                          The kernel version of all nodes in the target cluster needs to be higher than v4.11. For detail information, see kubevirt issue. Run the following command to see the version:

                                          uname -a\n

                                          Example output:

                                          Linux master 6.5.3-1.el7.elrepo.x86_64 #1 SMP PREEMPT_DYNAMIC Wed Sep 13 11:46:28 EDT 2023 x86_64 x86_64 x86_64 GNU/Linux\n
                                          "},{"location":"en/admin/virtnest/install/install-dependency.html#cpu-supporting-x86-64-v2-instruction-set-or-higher","title":"CPU supporting x86-64-v2 instruction set or higher","text":"

                                          You can use the following script to check if the current node's CPU is usable:

                                          Note

                                          If you encounter a message like the one shown below, you can safely ignore it as it does not impact the final result.

                                          \u793a\u4f8b
                                          $ sh detect-cpu.sh\ndetect-cpu.sh: line 3: fpu: command not found\n
                                          cat <<EOF > detect-cpu.sh\n#!/bin/sh -eu\n\nflags=$(cat /proc/cpuinfo | grep flags | head -n 1 | cut -d: -f2)\n\nsupports_v2='awk \"/cx16/&&/lahf/&&/popcnt/&&/sse4_1/&&/sse4_2/&&/ssse3/ {found=1} END {exit !found}\"'\nsupports_v3='awk \"/avx/&&/avx2/&&/bmi1/&&/bmi2/&&/f16c/&&/fma/&&/abm/&&/movbe/&&/xsave/ {found=1} END {exit !found}\"'\nsupports_v4='awk \"/avx512f/&&/avx512bw/&&/avx512cd/&&/avx512dq/&&/avx512vl/ {found=1} END {exit !found}\"'\n\necho \"$flags\" | eval $supports_v2 || exit 2 && echo \"CPU supports x86-64-v2\"\necho \"$flags\" | eval $supports_v3 || exit 3 && echo \"CPU supports x86-64-v3\"\necho \"$flags\" | eval $supports_v4 || exit 4 && echo \"CPU supports x86-64-v4\"\nEOF\nchmod +x detect-cpu.sh\nsh detect-cpu.sh\n
                                          "},{"location":"en/admin/virtnest/install/install-dependency.html#all-nodes-having-hardware-virtualization-nested-virtualization-enabled","title":"All Nodes having hardware virtualization (nested virtualization) enabled","text":"
                                          • Run the following command to check if it has been achieved:

                                            virt-host-validate qemu\n
                                            # Successful case\nQEMU: Checking for hardware virtualization                                 : PASS\nQEMU: Checking if device /dev/kvm exists                                   : PASS\nQEMU: Checking if device /dev/kvm is accessible                            : PASS\nQEMU: Checking if device /dev/vhost-net exists                             : PASS\nQEMU: Checking if device /dev/net/tun exists                               : PASS\nQEMU: Checking for cgroup 'cpu' controller support                         : PASS\nQEMU: Checking for cgroup 'cpuacct' controller support                     : PASS\nQEMU: Checking for cgroup 'cpuset' controller support                      : PASS\nQEMU: Checking for cgroup 'memory' controller support                      : PASS\nQEMU: Checking for cgroup 'devices' controller support                     : PASS\nQEMU: Checking for cgroup 'blkio' controller support                       : PASS\nQEMU: Checking for device assignment IOMMU support                         : PASS\nQEMU: Checking if IOMMU is enabled by kernel                               : PASS\nQEMU: Checking for secure guest support                                    : WARN (Unknown if this platform has Secure Guest support)\n\n# Failure case\nQEMU: Checking for hardware virtualization                                 : FAIL (Only emulated CPUs are available, performance will be significantly limited)\nQEMU: Checking if device /dev/vhost-net exists                             : PASS\nQEMU: Checking if device /dev/net/tun exists                               : PASS\nQEMU: Checking for cgroup 'memory' controller support                      : PASS\nQEMU: Checking for cgroup 'memory' controller mount-point                  : PASS\nQEMU: Checking for cgroup 'cpu' controller support                         : PASS\nQEMU: Checking for cgroup 'cpu' controller mount-point                     : PASS\nQEMU: Checking for cgroup 'cpuacct' controller support                     : PASS\nQEMU: Checking for cgroup 'cpuacct' controller mount-point                 : PASS\nQEMU: Checking for cgroup 'cpuset' controller support                      : PASS\nQEMU: Checking for cgroup 'cpuset' controller mount-point                  : PASS\nQEMU: Checking for cgroup 'devices' controller support                     : PASS\nQEMU: Checking for cgroup 'devices' controller mount-point                 : PASS\nQEMU: Checking for cgroup 'blkio' controller support                       : PASS\nQEMU: Checking for cgroup 'blkio' controller mount-point                   : PASS\nWARN (Unknown if this platform has IOMMU support)\n
                                          • Install virt-host-validate:

                                            On CentOSOn Ubuntu
                                            yum install -y qemu-kvm libvirt virt-install bridge-utils\n
                                            apt install qemu-kvm libvirt-daemon-system libvirt-clients bridge-utils\n
                                          • Methods to enable hardware virtualization

                                            Methods vary from platforms, and this page takes vsphere as an example. See vmware website.

                                          "},{"location":"en/admin/virtnest/install/install-dependency.html#if-using-docker-engine-as-the-container-runtime","title":"If using Docker Engine as the container runtime","text":"

                                          If Docker Engine is used as the container runtime, it must be higher than v20.10.10.

                                          "},{"location":"en/admin/virtnest/install/install-dependency.html#enabling-iommu-is-recommended","title":"Enabling IOMMU is recommended","text":"

                                          To prepare for future functions, it is recommended to enable IOMMU.

                                          "},{"location":"en/admin/virtnest/install/offline-install.html","title":"Offline Upgrade of the Virtual Machine Module","text":"

                                          This page explains how to install or upgrade the Virtual Machine module after downloading it from the Download Center.

                                          Info

                                          The term \"virtnest\" appearing in the following commands or scripts is the internal development code name for the Virtual Machine module.

                                          "},{"location":"en/admin/virtnest/install/offline-install.html#load-images-from-the-installation-package","title":"Load Images from the Installation Package","text":"

                                          You can load the images using one of the following two methods. When there is an container registry available in your environment, it is recommended to choose the chart-syncer method for synchronizing the images to the container registry, as it is more efficient and convenient.

                                          "},{"location":"en/admin/virtnest/install/offline-install.html#synchronize-images-to-the-container-registry-using-chart-syncer","title":"Synchronize Images to the container registry using chart-syncer","text":"
                                          1. Create load-image.yaml file.

                                            Note

                                            All parameters in this YAML file are mandatory. You need a private container registry and modify the relevant configurations.

                                            Chart Repo InstalledChart Repo Not Installed

                                            If the chart repo is already installed in your environment, chart-syncer also supports exporting the chart as a tgz file.

                                            load-image.yaml
                                            source:\n  intermediateBundlesPath: virtnest-offline # (1)\ntarget:\n  containerRegistry: 10.16.10.111 # (2)\n  containerRepository: release.daocloud.io/virtnest # (3)\n  repo:\n    kind: HARBOR # (4)\n    url: http://10.16.10.111/chartrepo/release.daocloud.io # (5)\n    auth:\n      username: \"admin\" # (6)\n      password: \"Harbor12345\" # (7)\n  containers:\n    auth:\n      username: \"admin\" # (8)\n      password: \"Harbor12345\" # (9)\n
                                            1. The relative path to run the charts-syncer command, not the relative path between this YAML file and the offline package.
                                            2. Change to your container registry URL.
                                            3. Change to your container registry.
                                            4. It can also be any other supported Helm Chart repository type.
                                            5. Change to the chart repo URL.
                                            6. Your container registry username.
                                            7. Your container registry password.
                                            8. Your container registry username.
                                            9. Your container registry password.

                                            If the chart repo is not installed in your environment, chart-syncer also supports exporting the chart as a tgz file and storing it in the specified path.

                                            load-image.yaml
                                            source:\n  intermediateBundlesPath: virtnest-offline # (1)\ntarget:\n  containerRegistry: 10.16.10.111 # (2)\n  containerRepository: release.daocloud.io/virtnest # (3)\n  repo:\n    kind: LOCAL\n    path: ./local-repo # (4)\n  containers:\n    auth:\n      username: \"admin\" # (5)\n      password: \"Harbor12345\" # (6)\n
                                            1. The relative path to run the charts-syncer command, not the relative path between this YAML file and the offline package.
                                            2. Change to your container registry URL.
                                            3. Change to your container registry.
                                            4. Local path of the chart.
                                            5. Your container registry username.
                                            6. Your container registry password.
                                          2. Run the command to synchronize the images.

                                            charts-syncer sync --config load-image.yaml\n
                                          "},{"location":"en/admin/virtnest/install/offline-install.html#load-images-directly-using-docker-or-containerd","title":"Load Images Directly using Docker or containerd","text":"

                                          Unpack and load the image files.

                                          1. Unpack the tar archive.

                                            tar xvf virtnest.bundle.tar\n

                                            After successful extraction, you will have three files:

                                            • hints.yaml
                                            • images.tar
                                            • original-chart
                                          2. Load the images from the local file to Docker or containerd.

                                            Dockercontainerd
                                            docker load -i images.tar\n
                                            ctr -n k8s.io image import images.tar\n

                                          Note

                                          Perform the Docker or containerd image loading operation on each node. After loading is complete, tag the images to match the Registry and Repository used during installation.

                                          "},{"location":"en/admin/virtnest/install/offline-install.html#upgrade","title":"Upgrade","text":"

                                          There are two upgrade methods available. You can choose the appropriate upgrade method based on the prerequisites:

                                          Upgrade via helm repoUpgrade via chart package
                                          1. Check if the Virtual Machine Helm repository exists.

                                            helm repo list | grep virtnest\n

                                            If the result is empty or shows the following message, proceed to the next step. Otherwise, skip the next step.

                                            Error: no repositories to show\n
                                          2. Add the Virtual Machine Helm repository.

                                            helm repo add virtnest http://{harbor url}/chartrepo/{project}\n
                                          3. Update the Virtual Machine Helm repository.

                                            helm repo update virtnest # (1)\n
                                            1. If the helm version is too low, it may fail. If it fails, try executing helm update repo.
                                          4. Choose the version of the Virtual Machine you want to install (it is recommended to install the latest version).

                                            helm search repo virtnest/virtnest --versions\n
                                            [root@master ~]# helm search repo virtnest/virtnest --versions\nNAME                   CHART VERSION  APP VERSION  DESCRIPTION\nvirtnest/virtnest  0.2.0          v0.2.0       A Helm chart for virtnest\n...\n
                                          5. Back up the --set parameters.

                                            Before upgrading the Virtual Machine version, it is recommended to run the following command to backup the --set parameters of the previous version.

                                            helm get values virtnest -n virtnest-system -o yaml > bak.yaml\n
                                          6. Update the virtnest CRDs.

                                            helm pull virtnest/virtnest --version 0.2.0 && tar -zxf virtnest-0.2.0.tgz\nkubectl apply -f virtnest/crds\n
                                          7. Run helm upgrade.

                                            Before upgrading, it is recommended to replace the global.imageRegistry field in bak.yaml with the current container registry address.

                                            export imageRegistry={your container registry}\n
                                            helm upgrade virtnest virtnest/virtnest \\\n  -n virtnest-system \\\n  -f ./bak.yaml \\\n  --set global.imageRegistry=$imageRegistry \\\n  --version 0.2.0\n
                                          1. Back up the --set parameters.

                                            Before upgrading the Virtual Machine version, it is recommended to run the following command to backup the --set parameters of the previous version.

                                            helm get values virtnest -n virtnest-system -o yaml > bak.yaml\n
                                          2. Update the virtnest CRDs.

                                            kubectl apply -f ./crds\n
                                          3. Run helm upgrade.

                                            Before upgrading, it is recommended to replace the global.imageRegistry field in bak.yaml with the current container registry address.

                                            export imageRegistry={your container registry}\n
                                            helm upgrade virtnest . \\\n  -n virtnest-system \\\n  -f ./bak.yaml \\\n  --set global.imageRegistry=$imageRegistry\n
                                          "},{"location":"en/admin/virtnest/install/virtnest-agent.html","title":"Install virtnest-agent in a Cluster","text":"

                                          This guide explains how to install the virtnest-agent in a cluster.

                                          "},{"location":"en/admin/virtnest/install/virtnest-agent.html#prerequisites","title":"Prerequisites","text":"

                                          Before installing the virtnest-agent, the following prerequisite must be met:

                                          • The kernel version needs to be v4.11 or above.
                                          "},{"location":"en/admin/virtnest/install/virtnest-agent.html#steps","title":"Steps","text":"

                                          To utilize the Virtual Machine (VM), the virtnest-agent component needs to be installed in the cluster using Helm.

                                          1. Click Container Management in the left navigation menu, then click Virtual Machines . If the virtnest-agent component is not installed, you will not be able to use the VM. The interface will display a reminder for you to install within the required cluster.

                                          2. Select the desired cluster, click Helm Apps in the left navigation menu, then click Helm Charts to view the template list.

                                          3. Search for the virtnest-agent component, and click to the see details. Select the appropriate version and click Install button to install.

                                          4. On the installation page, fill in the required information, and click OK to finish the installation.

                                          5. Go back to the Virtual Machines in the navigation menu. If the installation is successful, you will see the VM list, and you can now use the VM.

                                          "},{"location":"en/admin/virtnest/quickstart/index.html","title":"Create Virtual Machine","text":"

                                          This article will explain how to create a virtual machine using two methods: image and YAML file.

                                          Virtual machine, based on KubeVirt, manages virtual machines as cloud native applications, seamlessly integrating with containers. This allows users to easily deploy virtual machine applications and enjoy a smooth experience similar to containerized applications.

                                          "},{"location":"en/admin/virtnest/quickstart/index.html#prerequisites","title":"Prerequisites","text":"

                                          Before creating a virtual machine, make sure you meet the following prerequisites:

                                          • Expose hardware-assisted virtualization to the user operating system.
                                          • Install virtnest-agent on the specified cluster; the operating system kernel version must be 3.15 or higher.
                                          • Create a namespace and user.
                                          • Prepare the image in advance. The platform comes with three built-in images (as shown below). If you need to create your own image, refer to creating from an image with KubeVirt.
                                          • When configuring the network, if you choose to use the Passt network mode, you need to upgrade to Version 0.4.0 or higher.
                                          "},{"location":"en/admin/virtnest/quickstart/index.html#create-image","title":"Create image","text":"

                                          Follow the steps below to create a virtual machine using an image.

                                          1. Click Container Management on the left navigation bar, then click Virtual Machines to enter the VM page.

                                          2. On the virtual machine list page, click Create VMs and select Create with Image.

                                          3. Fill the basic information, image settings, storage and network, login settings, and click OK at the bottom right corner to complete the creation.

                                            The system will automatically return to the virtual machine list. By clicking the \u2507 button on the right side of the list, you can perform operations such as power on/off, restart, clone, update, create snapshots, console access (VNC), and delete virtual machines. Cloning and snapshot capabilities depend on the selected StorageClass.

                                          "},{"location":"en/admin/virtnest/quickstart/index.html#basic-information","title":"Basic Information","text":"

                                          In the Create VMs page, enter the information according to the table below and click Next.

                                          • Name: Up to 63 characters, can only contain lowercase letters, numbers, and hyphens ( - ), and must start and end with a lowercase letter or number. The name must be unique within the namespace, and cannot be changed once the virtual machine is created.
                                          • Alias: Allows any characters, up to 60 characters.
                                          • Cluster: Select the cluster to deploy the newly created virtual machine.
                                          • Namespace: Select the namespace to deploy the newly created virtual machine. If the desired namespace is not found, you can create a new namespace according to the prompts on the page.
                                          • Label/Annotation: Select the desired labels/annotations to add to the virtual machine.
                                          "},{"location":"en/admin/virtnest/quickstart/index.html#image-settings","title":"Image Settings","text":"

                                          Fill in the image-related information according to the table below, then click Next.

                                          1. Image Source: Supports three types of sources.

                                            • Registry: Images stored in the container registry. You can select images from the registry as needed.
                                            • HTTP: Images stored in a file server using the HTTP protocol, supporting both HTTPS:// and HTTP:// prefixes.
                                            • Object Storage (S3): Virtual machine images obtained through the object storage protocol (S3). For non-authenticated object storage files, please use the HTTP source.
                                          2. The following are the built-in images provided by the platform, including the operating system, version, and the image URL. Custom virtual machine images are also supported.

                                            Operating System Version Image Address CentOS CentOS 7.9 release-ci.daocloud.io/virtnest/system-images/centos-7.9-x86_64:v1 Ubuntu Ubuntu 22.04 release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1 Debian Debian 12 release-ci.daocloud.io/virtnest/system-images/debian-12-x86_64:v1
                                          3. Image Secret: Only supports the default (Opaque) type of key, for specific operations you can refer to Create Secret.

                                            The built-in image storage in the bootstrap cluster, and the container registry of the bootstrap cluster is not encrypted, so when selecting the built-in image, there is no need to select a secret.

                                          Note

                                          The hot-plug configuration for CPU and memory requires virtnest v0.10.0 or higher, and virtnest-agent v0.7.0 or higher.

                                          1. Resource Config: For CPU, it is recommended to use whole numbers. If a decimal is entered, it will be rounded up. The hot-plug configuration for CPU and memory is supported.

                                          2. GPU Configuration: Enabling GPU functionality requires meeting certain prerequisites. For details, refer to Configuring GPU for Virtual Machines (Nvidia). Virtual machines support two types of Nvidia GPUs: Nvidia-GPU and Nvidia-vGPU. After selecting the desired type, you will need to choose the proper GPU model and the number of cards.

                                          "},{"location":"en/admin/virtnest/quickstart/index.html#storage-and-network","title":"Storage and Network","text":"
                                          • Storage:

                                            • Storage is closely related to the function of the virtual machine. Mainly by using Kubernetes' persistent volumes and storage classes, it provides flexible and scalable virtual machine storage capabilities. For example, the virtual machine image is stored in the PVC, and it supports cloning, snapshotting, etc. with other data.

                                            • System Disk: The system automatically creates a VirtIO type rootfs system disk for storing the operating system and data.

                                            • Data Disk: The data disk is a storage device in the virtual machine used to store user data, application data, or other non-operating system related files. Compared with the system disk, the data disk is optional and can be dynamically added or removed as needed. The capacity of the data disk can also be flexibly configured according to demand.

                                            • Block storage is used by default. If you need to use the clone and snapshot functions, make sure that your storage pool has created the proper VolumeSnapshotClass, which you can refer to the following example. If you need to use the live migration function, make sure your storage supports and selects the ReadWriteMany access mode.

                                              In most cases, the storage will not automatically create such a VolumeSnapshotClass during the installation process, so you need to manually create a VolumeSnapshotClass. The following is an example of HwameiStor creating a VolumeSnapshotClass:

                                              kind: VolumeSnapshotClass\napiVersion: snapshot.storage.k8s.io/v1\nmetadata:\n  name: hwameistor-storage-lvm-snapshot\n  annotations:\n    snapshot.storage.kubernetes.io/is-default-class: \"true\"\nparameters:\n  snapsize: \"1073741824\"\ndriver: lvm.hwameistor.io\ndeletionPolicy: Delete\n
                                            • Run the following command to check if the VolumeSnapshotClass was created successfully.

                                              kubectl get VolumeSnapshotClass\n
                                            • View the created Snapshotclass and confirm that the provisioner property is consistent with the Driver property in the storage pool.

                                          • Network:

                                            • Network setting can be combined as needed according to the table information.

                                              Network Mode CNI Install Spiderpool Network Cards Fixed IP Live Migration Masquerade (NAT) Calico \u274c Single \u274c \u2705 Cilium \u274c Single \u274c \u2705 Flannel \u274c Single \u274c \u2705 Bridge OVS \u2705 Multiple \u2705 \u2705

                                            • Network modes are divided into Masquerade (NAT) and Bridge, the latter mode need to be installed after the spiderpool component can be used.

                                              • The network mode of Masquerade (NAT) is selected by default, using the default network card eth0.
                                              • If the spiderpool component is installed in the cluster, you can choose the Bridge mode, and the Bridge mode supports multiple NICs.

                                            • Add Network Card

                                              • Passthrough / Bridge mode supports manual addition of network cards. Click Add NIC to configure the network card IP pool. Choose the Multus CR that matches the network mode, if not, you need to create it yourself.
                                              • If you turn on the Use Default IP Pool switch, use the default IP pool in the multus CR setting. If the switch is off, manually select the IP pool.

                                          "},{"location":"en/admin/virtnest/quickstart/index.html#login-settings","title":"Login Settings","text":"
                                          • Username/Password: Allows login to the virtual machine using a username and password.
                                          • SSH: When selecting the SSH login method, you can bind an SSH key to the virtual machine for future login.
                                          "},{"location":"en/admin/virtnest/quickstart/index.html#create-with-yaml","title":"Create with YAML","text":"

                                          In addition to creating virtual machines using images, you can also create them more quickly using YAML files.

                                          Go to the Virtual Machine list page and click the Create with YAML button.

                                          Click to view an example YAML for creating a virtual machine
                                          apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  name: example\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: systemdisk-example\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Gi\n          storageClassName: rook-ceph-block\n        source:\n          registry:\n            url: >-\n              docker://release-ci.daocloud.io/virtnest/system-images/centos-7.9-x86_64:v1\n  runStrategy: Always\n  template:\n    spec:\n      domain:\n        cpu:\n          cores: 1\n        devices:\n          disks:\n            - disk:\n                bus: virtio\n              name: systemdisk-example\n            - disk:\n                bus: virtio\n              name: cloudinitdisk\n          interfaces:\n            - masquerade: {}\n              name: default\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 1Gi\n      networks:\n        - name: default\n          pod: {}\n      volumes:\n        - dataVolume:\n            name: systemdisk-example\n          name: systemdisk-example\n
                                          "},{"location":"en/admin/virtnest/quickstart/access.html","title":"Connect to Virtual Machines","text":"

                                          This article will introduce two methods for connecting to virtual machines: Console Access (VNC) and Terminal Access.

                                          "},{"location":"en/admin/virtnest/quickstart/access.html#terminal-access","title":"Terminal Access","text":"

                                          Accessing virtual machines through the terminal provides more flexibility and lightweight access. However, it does not directly display the graphical interface, has limited interactivity, and does not support multiple concurrent terminal sessions.

                                          Click Container Management in the left navigation bar, then click Virtual Machines to access the list page. Click the \u2507 button on the right side of the list to access the virtual machine via the terminal.

                                          "},{"location":"en/admin/virtnest/quickstart/access.html#console-access-vnc","title":"Console Access (VNC)","text":"

                                          Accessing virtual machines through VNC allows you to access and control the full graphical interface of the remote computer. It provides a more interactive experience and allows intuitive operation of the remote device. However, it may have some performance impact, and it does not support multiple concurrent terminal sessions.

                                          Choose VNC for Windows systems.

                                          Click Container Management in the left navigation bar, then click Virtual Machines to access the list page. Click the \u2507 button on the right side of the list to access the virtual machine via Console Access (VNC).

                                          "},{"location":"en/admin/virtnest/quickstart/detail.html","title":"Virtual Machine Details","text":"

                                          After successfully creating a virtual machine, you can enter the VM Detail page to view Basic Information, Settings, GPU Settings, Overview, Storage, Network, Snapshot and Event List.

                                          Click Container Management in the left navigation bar, then click Clusters to enter the page of the cluster where the virtual machine is located. Click the VM Name to view the virtual machine details.

                                          "},{"location":"en/admin/virtnest/quickstart/detail.html#basic-information","title":"Basic Information","text":"

                                          The basic information of VM includes Status, Alias, Cluster, Namespace, IP, Label, Annotation, Node, Username, Password, and Create Time.

                                          • Status: The current running state of the virtual machine (Running / Processing / Power Off / Error).
                                          • IP: The IP of the virtual machine. For virtual machines with multiple network interfaces, multiple IP will be assigned.
                                          "},{"location":"en/admin/virtnest/quickstart/detail.html#settings-gpu-settings","title":"Settings & GPU Settings","text":"

                                          Settings includes:

                                          • Operating System: The operating system installed on the virtual machine to execute programs.
                                          • Image Address: A link to a virtual hard disk file or operating system installation media, which is used to load and install the operating system in the virtual machine software.
                                          • Network Mode: The network mode configured for the virtual machine, including Bridge or Masquerade(NAT).
                                          • CPU & Memory: The resources allocated to the virtual machine.

                                          GPU Settings includes: GPU Type, GPU Model and GPU Counts

                                          "},{"location":"en/admin/virtnest/quickstart/detail.html#other-information","title":"Other Information","text":"OverviewStorageNetworkSnapshotsEvent List

                                          It allows you to view its insight content. Please note that if insight-agent is not installed, overview information cannot be obtained.

                                          It displays the storage used by the virtual machine, including information about the system disk and data disk.

                                          It displays the network settings of the virtual machine, including Multus CR, NIC Name, IP Address and so on.

                                          If you have created snapshots, this part will display relative information. Restoring the virtual machine from snapshots is supported.

                                          The event list includes various state changes, operation records, and system messages during the lifecycle of the virtual machine.

                                          "},{"location":"en/admin/virtnest/quickstart/nodeport.html","title":"Accessing Virtual Machine via NodePort","text":"

                                          This page explains how to access a virtual machine using NodePort.

                                          "},{"location":"en/admin/virtnest/quickstart/nodeport.html#limitations-of-existing-access-methods","title":"Limitations of Existing Access Methods","text":"
                                          1. Virtual machines support access via VNC or console, but both methods have a limitation: they do not allow multiple terminals to be simultaneously online.

                                          2. Using a NodePort-formatted Service can help solve this problem.

                                          "},{"location":"en/admin/virtnest/quickstart/nodeport.html#create-a-service","title":"Create a Service","text":"
                                          1. Using the Container Management Page

                                            • Select the cluster page where the target virtual machine is located and create a Service.
                                            • Select the access type as NodePort.
                                            • Choose the namespace (the namespace where the virtual machine resides).
                                            • Fill in the label selector as vm.kubevirt.io/name: your-vm-name.
                                            • Port Configuration: Choose TCP for the protocol, provide a custom port name, and set the service port and container port to 22.
                                          2. After successful creation, you can access the virtual machine by using ssh username@nodeip -p port.

                                          "},{"location":"en/admin/virtnest/quickstart/nodeport.html#create-the-service-via-kubectl","title":"Create the Service via kubectl","text":"
                                          1. Write the YAML file as follows:

                                            apiVersion: v1\nkind: Service\nmetadata:\n  name: test-ssh\nspec:\n  ports:\n  - name: tcp-ssh\n    nodePort: 32090\n    protocol: TCP\n    port: 22\n    targetPort: 22\n  selector:\n    vm.kubevirt.io/name: test-image-s3\n  type: NodePort\n
                                          2. Run the following command:

                                            kubectl apply -f your-svc.yaml\n
                                          3. After successful creation, you can access the virtual machine by using ssh username@nodeip -p 32090.

                                          "},{"location":"en/admin/virtnest/quickstart/update.html","title":"Update Virtual Machine","text":"

                                          This page is about how to update a virtual machine using both forms and YAML files.

                                          "},{"location":"en/admin/virtnest/quickstart/update.html#prerequisite","title":"Prerequisite","text":"

                                          Before updating the CPU, memory, and data disks of the VM while it is powered on, the following prerequisite must be met:

                                          • Live migration is supported.
                                          "},{"location":"en/admin/virtnest/quickstart/update.html#update-virtual-machine-via-form","title":"Update Virtual Machine via Form","text":"

                                          On the virtual machine list page, click Update to enter the Update VM page.

                                          "},{"location":"en/admin/virtnest/quickstart/update.html#basic-information","title":"Basic Information","text":"

                                          On this page, Alias , Label and Annotation can be updated, while other information cannot. After completing the updates, click Next to proceed to the Image Settings page.

                                          "},{"location":"en/admin/virtnest/quickstart/update.html#image-settings","title":"Image Settings","text":"

                                          On this page, parameters such as Image Address, Operating System, and Version cannot be changed once selected. Users are allowed to update the GPU Quota, including enabling or disabling GPU support, selecting the GPU type, specifying the required model, and configuring the number of GPU. A restart is required for taking effect. After completing the updates, click Next to proceed to the Storage and Network page.

                                          "},{"location":"en/admin/virtnest/quickstart/update.html#storage-and-network","title":"Storage and Network","text":"

                                          On the Storage and Network page, the StorageClass and PVC Mode for the System Disk cannot be changed once selected. You can increase Disk Capacity, but reducing it is not supported. And you can freely add or remove Data Disk. Network updates are not supported. After completing the updates, click Next to proceed to the Login Settings page.

                                          Note

                                          It is recommended to restart the virtual machine after modifying storage capacity or adding data disks to ensure the configuration takes effect.

                                          "},{"location":"en/admin/virtnest/quickstart/update.html#login-settings","title":"Login Settings","text":"

                                          On the Login Settings page, Username, Password, and SSH cannot be changed once set. After confirming your login information is correct, click OK to complete the update process.

                                          "},{"location":"en/admin/virtnest/quickstart/update.html#edit-yaml","title":"Edit YAML","text":"

                                          In addition to updating the virtual machine via forms, you can also quickly update it using a YAML file.

                                          Go to the virtual machine list page and click the Edit YAML button.

                                          "},{"location":"en/admin/virtnest/template/index.html","title":"Create Virtual Machines via Templates","text":"

                                          This guide explains how to create virtual machines using templates.

                                          With internal templates and custom templates, users can easily create new virtual machines. Additionally, we provide the ability to convert existing virtual machines into templates, allowing users to manage and utilize resources more flexibly.

                                          "},{"location":"en/admin/virtnest/template/index.html#create-with-template","title":"Create with Template","text":"

                                          Follow these steps to create a virtual machine using a template.

                                          1. Click Container Management in the left navigation menu, then click Virtual Machines to access the Virtual Machine Management page. On the virtual machine list page, click Create Virtual Machine and select Create with Template .

                                          2. On the template creation page, fill in the required information, including Basic Information, Template Config, Storage and Network, and Login Settings. Then, click OK in the bottom-right corner to complete the creation.

                                            The system will automatically return to the virtual machine list. By clicking \u2507 on the right side of the list, you can perform operations such as power off/restart, clone, update, create snapshot, convert to template, console access (VNC), and delete. The ability to clone and create snapshots depends on the selected storage pool.

                                          "},{"location":"en/admin/virtnest/template/index.html#basic-information","title":"Basic Information","text":"

                                          On the Create VMs page, enter the information according to the table below and click Next .

                                          • Name: Can contain up to 63 characters and can only include lowercase letters, numbers, and hyphens ( - ). The name must start and end with a lowercase letter or number. Names must be unique within the same namespace, and the name cannot be changed after the virtual machine is created.
                                          • Alias: Can include any characters, up to 60 characters in length.
                                          • Cluster: Select the cluster where the new virtual machine will be deployed.
                                          • Namespace: Select the namespace where the new virtual machine will be deployed. If the desired namespace is not found, you can follow the instructions on the page to create a new namespace.
                                          "},{"location":"en/admin/virtnest/template/index.html#template-config","title":"Template Config","text":"

                                          The template list will appear, and you can choose either an internal template or a custom template based on your needs.

                                          • Select an Internal Template: AI platform Virtual Machine provides several standard templates that cannot be edited or deleted. When selecting an internal template, the image source, operating system, image address, and other information will be based on the template and cannot be modified. GPU quota will also be based on the template but can be modified.

                                          • Select a Custom Template: These templates are created from virtual machine configurations and can be edited or deleted. When using a custom template, you can modify the image source and other information based on your specific requirements.

                                          "},{"location":"en/admin/virtnest/template/index.html#storage-and-network","title":"Storage and Network","text":"
                                          • Storage: By default, the system creates a rootfs system disk of VirtIO type for storing the operating system and data. Block storage is used by default. If you need to use clone and snapshot functionality, make sure your storage pool supports the VolumeSnapshots feature and create it in the storage pool (SC). Please note that the storage pool (SC) has additional prerequisites that need to be met.

                                            • Prerequisites:

                                              • KubeVirt utilizes the VolumeSnapshot feature of the Kubernetes CSI driver to capture the persistent state of virtual machines. Therefore, you need to ensure that your virtual machine uses a StorageClass that supports VolumeSnapshots and is configured with the correct VolumeSnapshotClass.
                                              • Check the created SnapshotClass and confirm that the provisioner property matches the Driver property in the storage pool.
                                            • Supports adding one system disk and multiple data disks.

                                          • Network: If no configuration is made, the system will create a VirtIO type network by default.

                                          "},{"location":"en/admin/virtnest/template/index.html#login-settings","title":"Login Settings","text":"
                                          • Username/Password: You can log in to the virtual machine using a username and password.
                                          • SSH: When selecting SSH login, you can bind an SSH key to the virtual machine for future login purposes.
                                          "},{"location":"en/admin/virtnest/template/tep.html","title":"VM Template","text":"

                                          This guide explains the usage of internal VM templates and custom VM templates.

                                          Using both internal and custom templates, users can easily create new VMs. Additionally, we provide the ability to convert existing VMs into VM templates, allowing users to manage and utilize resources more flexibly.

                                          "},{"location":"en/admin/virtnest/template/tep.html#vm-templates","title":"VM Templates","text":"
                                          1. Click Container Management in the left navigation menu, then click VM Template to access the VM Template page. If the template is converted from a virtual machine configured with a GPU, the template will also include GPU information and will be displayed in the template list.

                                          2. Click the \u2507 on the right side of a template in the list. For internal templates, you can create VM and view YAML. For custom templates, you can create VM, edit YAML and delete template.

                                          "},{"location":"en/admin/virtnest/template/tep.html#internal-template","title":"Internal Template","text":"
                                          • The platform provides CentOS and Ubuntu as templates.

                                          "},{"location":"en/admin/virtnest/template/tep.html#custom-template","title":"Custom Template","text":"

                                          Custom templates are created from VM configurations. The following steps explain how to convert a VM configuration into a template.

                                          1. Click Container Management in the left navigation menu, then click Virtual Machines to access the list page. Click the \u2507 on the right side of a VM in the list to convert the configuration into a template. Only running or stopped VMs can be converted.

                                          2. Provide a name for the new template. A notification will indicate that the original VM will be preserved and remain available. After a successful conversion, a new entry will be added to the template list.

                                          "},{"location":"en/admin/virtnest/template/tep.html#template-details","title":"Template Details","text":"

                                          After successfully creating a template, you can click the template name to view the details of the VM, including Basic Information, GPU Settings, Storage, Network, and more. If you need to quickly deploy a new VM based on that template, simply click the Create VM button in the upper right corner of the page for easy operation.

                                          "},{"location":"en/admin/virtnest/vm/auto-migrate.html","title":"Automatic VM Drifting","text":"

                                          This article will explain how to seamlessly migrate running virtual machines to other nodes when a node in the cluster becomes inaccessible due to power outages or network failures, ensuring business continuity and data security.

                                          Compared to automatic drifting, live migration requires you to manually initiate the migration process through the interface, rather than having the system automatically trigger it.

                                          "},{"location":"en/admin/virtnest/vm/auto-migrate.html#prerequisites","title":"Prerequisites","text":"

                                          Before implementing automatic drifting, the following prerequisites must be met:

                                          • The virtual machine has not performed disk commit operations, or is using Rook-ceph or HwameiStor HA as the storage system.
                                          • The node has been unreachable for more than five minutes.
                                          • Ensure there are at least two available nodes in the cluster, and the virtual machine has not specified a scheduling node.
                                          • The virtual machine's launcher pod has been deleted.
                                          "},{"location":"en/admin/virtnest/vm/auto-migrate.html#steps","title":"Steps","text":"
                                          1. Check the status of the virtual machine launcher pod:

                                            kubectl get pod\n

                                            Check if the launcher pod is in a Terminating state.

                                          2. Force delete the launcher pod:

                                            If the launcher pod is in a Terminating state, you can force delete it with the following command:

                                            kubectl delete <launcher pod> --force\n

                                            Replace <launcher pod> with the name of your launcher pod.

                                          3. Wait for recreation and check the status:

                                            After deletion, the system will automatically recreate the launcher pod. Wait for its status to become running, then refresh the virtual machine list to see if the VM has successfully migrated to the new node.

                                          "},{"location":"en/admin/virtnest/vm/auto-migrate.html#notes","title":"Notes","text":"

                                          If using rook-ceph as storage, it needs to be configured in ReadWriteOnce mode:

                                          1. After force deleting the pod, you need to wait approximately six minutes for the launcher pod to start, or you can immediately start the pod using the following commands:

                                            kubectl get pv | grep <vm name>\nkubectl get VolumeAttachment | grep <pv name>\n

                                            Replace <vm name> and <pv name> with your virtual machine name and persistent volume name.

                                          2. Then delete the proper VolumeAttachment with the following command:

                                            kubectl delete VolumeAttachment <vm>\n

                                            Replace <vm> with your virtual machine name.

                                          "},{"location":"en/admin/virtnest/vm/clone.html","title":"Cloning a Cloud Host","text":"

                                          This article will introduce how to clone a new cloud host.

                                          Users can clone a new cloud host, which will have the same operating system and system configuration as the original cloud host. This enables quick deployment and scaling, allowing for the rapid creation of new cloud hosts with similar configurations without the need to install from scratch.

                                          "},{"location":"en/admin/virtnest/vm/clone.html#prerequisites","title":"Prerequisites","text":"

                                          Before using the cloning feature, the following prerequisites must be met (which are the same as those for the snapshot feature):

                                          • Only cloud hosts that are not in an error state can use the cloning feature.
                                          • Install Snapshot CRDs, Snapshot Controller, and CSI Driver. For specific installation steps, refer to CSI Snapshotter.
                                          • Wait for the snapshot-controller component to be ready. This component will monitor events related to VolumeSnapshot and VolumeSnapshotContent and trigger related operations.
                                          • Wait for the CSI Driver to be ready, ensuring that the csi-snapshotter sidecar is running in the CSI Driver. The csi-snapshotter sidecar will monitor events related to VolumeSnapshotContent and trigger related operations.
                                            • If the storage is Rook-Ceph, refer to ceph-csi-snapshot
                                            • If the storage is HwameiStor, refer to huameistor-snapshot
                                          "},{"location":"en/admin/virtnest/vm/clone.html#cloning-a-cloud-host_1","title":"Cloning a Cloud Host","text":"
                                          1. Click__Container Management__ in the left navigation bar, then click__Cloud Hosts__ to enter the list page. Clickthe \u2507 on the right side of the list to perform snapshot operations on cloud hosts that are not in an error state.

                                          2. A popup will appear, requiring you to fill in the name and description for the new cloud host being cloned. The cloning operation may take some time, depending on the size of the cloud host and storage performance.

                                          3. After a successful clone, you can view the new cloud host in the cloud host list. The newly created cloud host will be in a powered-off state and will need to be manually powered on if required.

                                          4. It is recommended to take a snapshot of the original cloud host before cloning. If you encounter issues during the cloning process, please check whether the prerequisites are met and try to execute the cloning operation again.

                                          "},{"location":"en/admin/virtnest/vm/create-secret.html","title":"Create Secret","text":"

                                          When creating a virtual machine using Object Storage (S3) as the image source, sometimes you need to fill in a secret to get through S3's verification. The following will introduce how to create a secret that meets the requirements of the virtual machine.

                                          1. Click Container Management in the left navigation bar, then click Clusters , enter the details of the cluster where the virtual machine is located, click ConfigMaps & Secrets , select the Secrets , and click Create Secret .

                                          2. Enter the creation page, fill in the secret name, select the namespace that is the same as the virtual machine, and note that you need to select the default type Opaque . The secret data needs to follow the following principles.

                                            • accessKeyId: Data represented in Base64 encoding
                                            • secretKey: Data represented in Base64 encoding

                                          3. After successful creation, you can use the required secret when creating a virtual machine.

                                          "},{"location":"en/admin/virtnest/vm/cross-cluster-migrate.html","title":"Migrate VM across Clusters","text":"

                                          This feature currently does not have a UI, so you can follow the steps in the documentation.

                                          "},{"location":"en/admin/virtnest/vm/cross-cluster-migrate.html#use-cases","title":"Use Cases","text":"
                                          • A VM needs to be migrated to another cluster when the original cluster experiences a failure or performance degradation that makes the VM inaccessible.
                                          • A VM needs to be migrated to another cluster when perform planned maintenance or upgrades on the cluster.
                                          • A VM needs to be migrated to another cluster to match more appropriate resource configurations when the performance requirements of specific applications change and resource allocation needs to be adjusted.
                                          "},{"location":"en/admin/virtnest/vm/cross-cluster-migrate.html#prerequisites","title":"Prerequisites","text":"

                                          Before performing migration of a VM across cluster, the following prerequisites must be met:

                                          • Cluster network connectivity: Ensure that the network between the original cluster and the target migration cluster is accessible.
                                          • Same storage type: The target migration cluster must support the same storage type as the original cluster. For example, if the exporting cluster uses rook-ceph-block type StorageClass, the importing cluster must also support this type.
                                          • Enable VMExport Feature Gate in KubeVirt of the original cluster.
                                          "},{"location":"en/admin/virtnest/vm/cross-cluster-migrate.html#enable-vmexport-feature-gate","title":"Enable VMExport Feature Gate","text":"

                                          To activate the VMExport Feature Gate, run the following command in the original cluster. You can refer to How to activate a feature gate

                                          kubectl edit kubevirt kubevirt -n virtnest-system\n

                                          This command modifies the featureGates to include VMExport.

                                          apiVersion: kubevirt.io/v1\nkind: KubeVirt\nmetadata:\n  name: kubevirt\n  namespace: virtnest-system\nspec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n        - DataVolumes\n        - LiveMigration\n        - VMExport\n
                                          "},{"location":"en/admin/virtnest/vm/cross-cluster-migrate.html#configure-ingress-for-the-original-cluster","title":"Configure Ingress for the Original Cluster","text":"

                                          Using Nginx Ingress as an example, configure Ingress to point to the virt-exportproxy Service:

                                          apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: ingress-vm-export\n  namespace: virtnest-system\nspec:\n  tls:\n    - hosts:\n        - upgrade-test.com\n      secretName: nginx-tls\n  rules:\n    - host: upgrade-test.com\n      http:\n        paths:\n          - path: /\n            pathType: Prefix\n            backend:\n              service:\n                name: virt-exportproxy\n                port:\n                  number: 8443\n  ingressClassName: nginx\n
                                          "},{"location":"en/admin/virtnest/vm/cross-cluster-migrate.html#migration-steps","title":"Migration Steps","text":"
                                          1. Create a VirtualMachineExport CR.

                                            • If cold migration is performed while the VM is powered off :

                                              apiVersion: v1\nkind: Secret\nmetadata:\n  name: example-token # Export Token used by the VM\n  namespace: default # Namespace where the VM resides\nstringData:\n  token: 1234567890ab # Export the used Token (Modifiable)\n\n---\napiVersion: export.kubevirt.io/v1alpha1\nkind: VirtualMachineExport\nmetadata:\n  name: example-export # Export name (Modifiable)\n  namespace: default # Namespace where the VM resides\nspec:\n  tokenSecretRef: example-token # Must match the name of the token created above\n  source:\n    apiGroup: \"kubevirt.io\"\n    kind: VirtualMachine\n    name: testvm # VM name\n
                                            • If hot migration is performed using a VM snapshot while the VM is powered on :

                                              apiVersion: v1\nkind: Secret\nmetadata:\n  name: example-token # Export Token used by VM\n  namespace: default # Namespace where the VM resides\nstringData:\n  token: 1234567890ab # Export the used Token (Modifiable)\n\n---\napiVersion: export.kubevirt.io/v1alpha1\nkind: VirtualMachineExport\nmetadata:\n  name: export-snapshot # Export name (Modifiable)\n  namespace: default # Namespace where the VM resides\nspec:\n  tokenSecretRef: export-token # Must match the name of the token created above\n  source:\n    apiGroup: \"snapshot.kubevirt.io\"\n    kind: VirtualMachineSnapshot\n    name: export-snap-202407191524 # Name of the proper VM snapshot\n
                                          2. Check if the VirtualMachineExport is ready:

                                            # Replace example-export with the name of the created VirtualMachineExport\nkubectl get VirtualMachineExport example-export -n default\n\nNAME             SOURCEKIND       SOURCENAME   PHASE\nexample-export   VirtualMachine   testvm       Ready\n
                                          3. Once the VirtualMachineExport is ready, export the VM YAML.

                                            • If virtctl is installed, you can use the following command to export the VM YAML:

                                              # Replace example-export with the name of the created VirtualMachineExport\n# Specify the namespace with -n\nvirtctl vmexport download example-export --manifest --include-secret --output=manifest.yaml\n
                                            • If virtctl is not installed, you can use the following commands to export the VM YAML:

                                              # Replace example-export with the name and  namespace of the created VirtualMachineExport\nmanifesturl=$(kubectl get VirtualMachineExport example-export -n default -o=jsonpath='{.status.links.internal.manifests[0].url}')\nsecreturl=$(kubectl get VirtualMachineExport example-export -n default -o=jsonpath='{.status.links.internal.manifests[1].url}')\n# Replace with the secret name and namespace\ntoken=$(kubectl get secret example-token -n default -o=jsonpath='{.data.token}' | base64 -d)\n\ncurl -H \"Accept: application/yaml\" -H \"x-kubevirt-export-token: $token\"  --insecure  $secreturl > manifest.yaml\ncurl -H \"Accept: application/yaml\" -H \"x-kubevirt-export-token: $token\"  --insecure  $manifesturl >> manifest.yaml\n
                                          4. Import VM.

                                            Copy the exported manifest.yaml to the target migration cluster and run the following command.(If the namespace does not exist, it need to be created in advance) :

                                            kubectl apply -f manifest.yaml\n
                                            After successfully creating a VM, you need to restart it. Once the VM is running successfully, the original VM need to be deleted in the original cluster (Do not delete the original VM if it has not started successfully).

                                          "},{"location":"en/admin/virtnest/vm/health-check.html","title":"Health Check","text":"

                                          When configuring the liveness and readiness probes for a cloud host, the process is similar to that of Kubernetes configuration. This article will introduce how to configure health check parameters for a cloud host using YAML.

                                          However, it is important to note that the configuration must be done when the cloud host has been successfully created and is in a powered-off state.

                                          "},{"location":"en/admin/virtnest/vm/health-check.html#configuring-http-liveness-probe","title":"Configuring HTTP Liveness Probe","text":"
                                          1. Configure livenessProbe.httpGet in spec.template.spec.
                                          2. Modify cloudInitNoCloud to start an HTTP server.

                                            Click to view YAML example
                                            apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n    virtnest.io/alias-name: ''\n    virtnest.io/image-secret: ''\n    virtnest.io/image-source: docker\n    virtnest.io/os-image: release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  creationTimestamp: '2024-10-15T02:39:45Z'\n  finalizers:\n    - kubevirt.io/virtualMachineControllerFinalize\n  generation: 1\n  labels:\n    virtnest.io/os-family: Ubuntu\n    virtnest.io/os-version: '22.04'\n  name: test-probe\n  namespace: amamba-team\n  resourceVersion: '254032135'\n  uid: 6d92779d-7415-4721-8c7b-a2dde163d758\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        creationTimestamp: null\n        name: test-probe-rootdisk\n        namespace: amamba-team\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Gi\n          storageClassName: hwameistor-storage-lvm-hdd\n        source:\n          registry:\n            url: >-\n          docker://release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  runStrategy: Halted\n  template:\n    metadata:\n      creationTimestamp: null\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio\n              name: rootdisk\n            - disk:\n                bus: virtio\n              name: cloudinitdisk\n          interfaces:\n            - masquerade: {}\n              name: default\n        machine:\n          type: q35\n        memory:\n          guest: 2Gi\n        resources:\n          requests:\n            memory: 2Gi\n      networks:\n        - name: default\n          pod: {}\n      livenessProbe:\n        initialDelaySeconds: 120\n        periodSeconds: 20\n        httpGet:\n          port: 1500\n        timeoutSeconds: 10\n      volumes:\n        - dataVolume:\n            name: test-probe-rootdisk\n          name: rootdisk\n        - cloudInitNoCloud:\n            userData: |\n              #cloud-config\n              ssh_pwauth: true\n              disable_root: false\n              chpasswd: {\"list\": \"root:dangerous\", expire: False}\n              runcmd:\n                - sed -i \"/#\\?PermitRootLogin/s/^.*$/PermitRootLogin yes/g\" /etc/ssh/sshd_config\n                - systemctl restart ssh.service\n                - dhclient -r && dhclient\n                - apt-get update && apt-get install -y ncat\n                - [\"systemd-run\", \"--unit=httpserver\", \"ncat\", \"-klp\", \"1500\", \"-e\", '/usr/bin/echo -e HTTP/1.1 200 OK\\nContent-Length: 12\\n\\nHello World!']\n          name: cloudinitdisk\n
                                          3. The configuration of userData may vary depending on the operating system (such as Ubuntu/Debian or CentOS). The main differences are:

                                            • Package manager:

                                              Ubuntu/Debian uses apt-get as the package manager. CentOS uses yum as the package manager.

                                            • SSH service restart command:

                                              Ubuntu/Debian uses systemctl restart ssh.service. CentOS uses systemctl restart sshd.service (note that for CentOS 7 and earlier versions, it uses service sshd restart).

                                            • Installed packages:

                                              Ubuntu/Debian installs ncat. CentOS installs nmap-ncat (because ncat may not be available in the default repository for CentOS).

                                          "},{"location":"en/admin/virtnest/vm/health-check.html#configuring-tcp-liveness-probe","title":"Configuring TCP Liveness Probe","text":"

                                          Configure livenessProbe.tcpSocket in spec.template.spec.

                                          Click to view YAML example configuration
                                          apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n    virtnest.io/alias-name: ''\n    virtnest.io/image-secret: ''\n    virtnest.io/image-source: docker\n    virtnest.io/os-image: release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  creationTimestamp: '2024-10-15T02:39:45Z'\n  finalizers:\n    - kubevirt.io/virtualMachineControllerFinalize\n  generation: 1\n  labels:\n    virtnest.io/os-family: Ubuntu\n    virtnest.io/os-version: '22.04'\n  name: test-probe\n  namespace: amamba-team\n  resourceVersion: '254032135'\n  uid: 6d92779d-7415-4721-8c7b-a2dde163d758\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        creationTimestamp: null\n        name: test-probe-rootdisk\n        namespace: amamba-team\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Gi\n          storageClassName: hwameistor-storage-lvm-hdd\n        source:\n          registry:\n            url: >-\n          docker://release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  runStrategy: Halted\n  template:\n    metadata:\n      creationTimestamp: null\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio\n              name: rootdisk\n            - disk:\n                bus: virtio\n              name: cloudinitdisk\n          interfaces:\n            - masquerade: {}\n              name: default\n        machine:\n          type: q35\n        memory:\n          guest: 2Gi\n        resources:\n          requests:\n            memory: 2Gi\n      networks:\n        - name: default\n          pod: {}\n      livenessProbe:\n        initialDelaySeconds: 120\n        periodSeconds: 20\n        tcpSocket:\n          port: 1500\n        timeoutSeconds: 10\n      volumes:\n        - dataVolume:\n            name: test-probe-rootdisk\n          name: rootdisk\n        - cloudInitNoCloud:\n            userData: |\n              #cloud-config\n              ssh_pwauth: true\n              disable_root: false\n              chpasswd: {\"list\": \"root:dangerous\", expire: False}\n              runcmd:\n                - sed -i \"/#\\?PermitRootLogin/s/^.*$/PermitRootLogin yes/g\" /etc/ssh/sshd_config\n                - systemctl restart ssh.service\n                - dhclient -r && dhclient\n                - apt-get update && apt-get install -y ncat\n                - [\"systemd-run\", \"--unit=httpserver\", \"ncat\", \"-klp\", \"1500\", \"-e\", '/usr/bin/echo -e HTTP/1.1 200 OK\\nContent-Length: 12\\n\\nHello World!']\n          name: cloudinitdisk\n
                                          "},{"location":"en/admin/virtnest/vm/health-check.html#configuring-readiness-probes","title":"Configuring Readiness Probes","text":"

                                          Configure readiness in spec.template.spec.

                                          Click to view YAML example configuration
                                          apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n    virtnest.io/alias-name: ''\n    virtnest.io/image-secret: ''\n    virtnest.io/image-source: docker\n    virtnest.io/os-image: release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  creationTimestamp: '2024-10-15T02:39:45Z'\n  finalizers:\n    - kubevirt.io/virtualMachineControllerFinalize\n  generation: 1\n  labels:\n    virtnest.io/os-family: Ubuntu\n    virtnest.io/os-version: '22.04'\n  name: test-probe\n  namespace: amamba-team\n  resourceVersion: '254032135'\n  uid: 6d92779d-7415-4721-8c7b-a2dde163d758\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        creationTimestamp: null\n        name: test-probe-rootdisk\n        namespace: amamba-team\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Gi\n          storageClassName: hwameistor-storage-lvm-hdd\n        source:\n          registry:\n            url: >-\n          docker://release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  runStrategy: Halted\n  template:\n    metadata:\n      creationTimestamp: null\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio\n              name: rootdisk\n            - disk:\n                bus: virtio\n              name: cloudinitdisk\n          interfaces:\n            - masquerade: {}\n              name: default\n        machine:\n          type: q35\n        memory:\n          guest: 2Gi\n        resources:\n          requests:\n            memory: 2Gi\n      networks:\n        - name: default\n          pod: {}\n      readiness:\n        initialDelaySeconds: 120\n        periodSeconds: 20\n        httpGet:\n          port: 1500\n        timeoutSeconds: 10\n      volumes:\n        - dataVolume:\n            name: test-probe-rootdisk\n          name: rootdisk\n        - cloudInitNoCloud:\n            userData: |\n              #cloud-config\n              ssh_pwauth: true\n              disable_root: false\n              chpasswd: {\"list\": \"root:dangerous\", expire: False}\n              runcmd:\n                - sed -i \"/#\\?PermitRootLogin/s/^.*$/PermitRootLogin yes/g\" /etc/ssh/sshd_config\n                - systemctl restart ssh.service\n                - dhclient -r && dhclient\n                - apt-get update && apt-get install -y ncat\n                - [\"systemd-run\", \"--unit=httpserver\", \"ncat\", \"-klp\", \"1500\", \"-e\", '/usr/bin/echo -e HTTP/1.1 200 OK\\nContent-Length: 12\\n\\nHello World!']\n          name: cloudinitdisk\n
                                          "},{"location":"en/admin/virtnest/vm/live-migration.html","title":"Live Migration","text":"

                                          This article will explain how to migrate a virtual machine from one node to another.

                                          When a node needs maintenance or upgrades, users can seamlessly migrate running virtual machines to other nodes while ensuring business continuity and data security.

                                          "},{"location":"en/admin/virtnest/vm/live-migration.html#prerequisites","title":"Prerequisites","text":"

                                          Before using live migration, the following prerequisites must be met:

                                          • Only running virtual machines can use the live migration feature.
                                          • If you need to use live migration, make sure that your PVC access mode is ReadWriteMany.
                                          • The current cluster must have at least 2 usable nodes.
                                          • When using the feature of live migration, Masquerade or Bridge can be selected as the network mode.
                                          "},{"location":"en/admin/virtnest/vm/live-migration.html#live-migration_1","title":"Live Migration","text":"
                                          1. Click Container Management on the left navigation bar, then click Virtual Machines to enter the list page. Click \u2507 on the right side of the list to migrate running virtual machines. Currently, the virtual machine is on the node controller-node-1 .

                                          2. A pop-up box will appear, indicating that during live migration, the running virtual machine instances will be migrated to another node, but the target node cannot be predetermined. Please ensure that other nodes have sufficient resources.

                                          3. After a successful migration, you can view the node information in the virtual machine list. At this time, the node has been migrated to controller-node-2 .

                                          "},{"location":"en/admin/virtnest/vm/migratiom.html","title":"Cold Migration within the Cluster","text":"

                                          This article will introduce how to move a cloud host from one node to another within the same cluster while it is powered off.

                                          The main feature of cold migration is that the cloud host will be offline during the migration process, which may impact business continuity. Therefore, careful planning of the migration time window is necessary, taking into account business needs and system availability. Typically, cold migration is suitable for scenarios where downtime requirements are not very strict.

                                          "},{"location":"en/admin/virtnest/vm/migratiom.html#prerequisites","title":"Prerequisites","text":"

                                          Before using cold migration, the following prerequisites must be met:

                                          • The cloud host must be powered off to perform cold migration.
                                          "},{"location":"en/admin/virtnest/vm/migratiom.html#cold-migration","title":"Cold Migration","text":"
                                          1. Click__Container Management__ in the left navigation bar, then click__Cloud Hosts__ to enter the list page. Clickthe \u2507 on the right side of the list to initiate the migration action for the cloud host that is in a powered-off state. The current node of the cloud host cannot be viewed while it is powered off, so prior planning or checking while powered on is required.

                                            Note

                                            If you have used local-path in the storage pool of the original node, there may be issues during cross-node migration. Please choose carefully.

                                          2. After clicking migrate, a prompt will appear allowing you to choose to migrate to a specific node or randomly. If you need to change the storage pool, ensure that there is an available storage pool in the target node. Also, ensure that the target node has sufficient resources. The migration process may take a significant amount of time, so please be patient.

                                          3. The migration will take some time, so please be patient. After it is successful, you need to restart the cloud host to check if the migration was successful. This example has already powered on the cloud host to check the migration effect.

                                          "},{"location":"en/admin/virtnest/vm/monitor.html","title":"Virtual Machine Monitoring","text":"

                                          The virtual machine's monitoring is based on the Grafana Dashboard open-sourced by Kubevirt, which generates monitoring dashboards for each virtual machine.

                                          Monitoring information of the virtual machine can provide better insights into the resource consumption of the virtual machine, such as CPU, memory, storage, and network resource usage. These information can help optimize and plan resources, improving overall resource utilization efficiency.

                                          "},{"location":"en/admin/virtnest/vm/monitor.html#prerequisites","title":"Prerequisites","text":"

                                          Before viewing the virtual machine monitoring information, the following prerequisites need to be met:

                                          • The insight-agent component needs to be installed in the same cluster where the virtual machine is located.
                                          "},{"location":"en/admin/virtnest/vm/monitor.html#virtual-machine-monitoring_1","title":"Virtual Machine Monitoring","text":"

                                          Navigate to the VM Detail page and click Overview to view the monitoring content of the virtual machine. Please note that without the insight-agent component installed, monitoring information cannot be obtained. Below are the detailed information:

                                          • Total CPU, CPU Usage, Memory Total, Memory Usage.

                                          • CPU Utilisation: the percentage of CPU resources currently used by the virtual machine;

                                          • Memory Utilisation: the percentage of memory resources currently used by the virtual machine out of the total available memory.

                                          • Network Traffic by Virtual Machines: the amount of network data sent and received by the virtual machine during a specific time period;

                                          • Network Packet Loss Rate: the proportion of lost data packets during data transmission out of the total sent data packets.

                                          • Network Packet Error Rate: the rate of errors that occur during network transmission;

                                          • Storage Traffic: the speed and capacity at which the virtual machine system reads and writes to the disk within a certain time period.

                                          • Storage IOPS: the number of input/output operations the virtual machine system performs in one second.

                                          • Storage Delay: the time delay experienced by the virtual machine system when performing disk read and write operations.

                                          "},{"location":"en/admin/virtnest/vm/scheduled-snapshot.html","title":"Scheduled Snapshot","text":"

                                          This article introduces how to create snapshots for VMs on a schedule.

                                          You can create scheduled snapshots for VMs, providing continuous protection for data and ensuring effective data recovery in case of data loss, corruption, or deletion.

                                          "},{"location":"en/admin/virtnest/vm/scheduled-snapshot.html#steps","title":"Steps","text":"
                                          1. In the left navigation bar, click Container Management -> Clusters to select the proper cluster where the target VM is located. After entering the cluster, click Workloads -> CronJobs, and choose Create from YAML to create a scheduled task. Refer to the following YAML example to create snapshots for the specified VM on a schedule.

                                            Click to view the YAML example for creating a scheduled task
                                            apiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: xxxxx-xxxxx-cronjob # Scheduled task name (Customizable)\n  namespace: virtnest-system # Do not modify the namespace\nspec:\n  schedule: \"5 * * * *\" # Modify the scheduled task execution interval as needed\n  concurrencyPolicy: Allow\n  suspend: false\n  successfulJobsHistoryLimit: 10\n  failedJobsHistoryLimit: 3\n  startingDeadlineSeconds: 60\n  jobTemplate:\n    spec:\n      template:\n        metadata:\n          labels:\n            virtnest.io/vm: xxxx # Modify to the name of the VM that needs to be snapshotted\n            virtnest.io/namespace: xxxx # Modify to the namespace where the VM is located\n        spec:\n          serviceAccountName: kubevirt-operator\n          containers:\n            - name: snapshot-job\n              image: release.daocloud.io/virtnest/tools:v0.1.5 # For offline environments, modify the registry address to the proper registry address of the cluster\n              imagePullPolicy: IfNotPresent\n              env:\n                - name: NS\n                  valueFrom:\n                    fieldRef:\n                      fieldPath: metadata.labels['virtnest.io/namespace']\n                - name: VM\n                  valueFrom:\n                    fieldRef:\n                      fieldPath: metadata.labels['virtnest.io/vm']\n              command:\n                - /bin/sh\n                - -c\n                - |\n                  export SUFFIX=$(date +\"%Y%m%d-%H%M%S\")\n                  cat <<EOF | kubectl apply -f -\n                  apiVersion: snapshot.kubevirt.io/v1alpha1\n                  kind: VirtualMachineSnapshot\n                  metadata:\n                    name: $(VM)-snapshot-$SUFFIX\n                    namespace: $(NS)\n                  spec:\n                    source:\n                      apiGroup: kubevirt.io\n                      kind: VirtualMachine\n                      name: $(VM)\n                  EOF\n          restartPolicy: OnFailure\n
                                          2. After creating the scheduled task and running it successfully, you can click Virtual Machines in the list page to select the target VM. After entering the details, you can view the snapshot list.

                                          "},{"location":"en/admin/virtnest/vm/snapshot.html","title":"Snapshot Management","text":"

                                          This guide explains how to create snapshots for virtual machines and restore them.

                                          You can create snapshots for virtual machines to save the current state of the virtual machine. A snapshot can be restored multiple times, and each time the virtual machine will be reverted to the state when the snapshot was created. Snapshots are commonly used for backup, recovery and rollback.

                                          "},{"location":"en/admin/virtnest/vm/snapshot.html#prerequisites","title":"Prerequisites","text":"

                                          Before using the snapshots, the following prerequisites need to be met:

                                          • Only virtual machines in a non-error state can use the snapshot function.
                                          • Install Snapshot CRDs, Snapshot Controller, and CSI Driver. For detailed installation steps, refer to CSI Snapshotter.
                                          • Wait for the snapshot-controller component to be ready. This component monitors events related to VolumeSnapshot and VolumeSnapshotContent and triggers specific actions.
                                          • Wait for the CSI Driver to be ready. Ensure that the csi-snapshotter sidecar is running within the CSI Driver. The csi-snapshotter sidecar monitors events related to VolumeSnapshotContent and triggers specific actions.
                                            • If the storage is rook-ceph, refer to ceph-csi-snapshot.
                                            • If the storage is HwameiStor, refer to huameistor-snapshot.
                                          "},{"location":"en/admin/virtnest/vm/snapshot.html#create-a-snapshot","title":"Create a Snapshot","text":"
                                          1. Click Container Management in the left navigation menu, then click Virtual Machines to access the list page. Click the \u2507 on the right side of the list for a virtual machine to perform snapshot operations (only available for non-error state virtual machines).

                                          2. A dialog box will pop up, prompting you to input a name and description for the snapshot. Please note that the creation process may take a few minutes, during which you won't be able to perform any operations on the virtual machine.

                                          3. After successfully creating the snapshot, you can view its details within the virtual machine's information section. Here, you have the option to edit the description, recover from the snapshot, delete it, among other operations.

                                          "},{"location":"en/admin/virtnest/vm/snapshot.html#restore-from-a-snapshot","title":"Restore from a Snapshot","text":"
                                          1. Click Restore from Snapshot and provide a name for the virtual machine recovery record. The recovery operation may take some time to complete, depending on the size of the snapshot and other factors. After a successful recovery, the virtual machine will be restored to the state when the snapshot was created.

                                          2. After some time, you can scroll down to the snapshot information to view all the recovery records for the current snapshot. It also provides a way to locate the position of the recovery.

                                          "},{"location":"en/admin/virtnest/vm/vm-network.html","title":"Virtual Machine Networking","text":"

                                          This article will introduce how to configure network information when creating virtual machines.

                                          In virtual machines, network management is a crucial part that allows us to manage and configure network connections for virtual machines in a Kubernetes environment. It can be configured according to different needs and scenarios, achieving a more flexible and diverse network architecture.

                                          1. Single NIC Scenario: For simple applications that only require basic network connectivity or when there are resource constraints, using a single NIC can save network resources and prevent waste of resources.
                                          2. Multiple NIC Scenario: When security isolation between different network environments needs to be achieved, multiple NICs can be used to divide different network areas. It also allows for control and management of traffic.
                                          "},{"location":"en/admin/virtnest/vm/vm-network.html#prerequisites","title":"Prerequisites","text":"
                                          1. When selecting the Bridge network mode, some information needs to be configured in advance:

                                            • Install and run Open vSwitch on the host nodes. See Ovs-cni Quick Start.
                                            • Configure Open vSwitch bridge on the host nodes. See vswitch for instructions.
                                            • Install Spiderpool. See installing spiderpool for instructions. By default, Spiderpool will install both Multus CNI and Ovs CNI.
                                            • Create a Multus CR of type ovs. You can create a custom Multus CR or use YAML for creation
                                            • Create a subnet and IP pool. See creating subnets and IP pools .
                                          2. Network configuration can be combined according to the table information.

                                            Network Mode CNI Spiderpool Installed NIC Mode Fixed IP Live Migration Masquerade (NAT) Calico \u274c Single NIC \u274c \u2705 Cilium \u274c Single NIC \u274c \u2705 Flannel \u274c Single NIC \u274c \u2705 Bridge OVS \u2705 Multiple NIC \u2705 \u2705

                                          3. Network Mode: There are two modes - Masquerade (NAT) and Bridge. Bridge mode requires the installation of the spiderpool component.

                                            1. The default selection is Masquerade (NAT) network mode using the eth0 default NIC.

                                            2. If the cluster has the spiderpool component installed, then Bridge mode can be selected. The Bridge mode supports multiple NICs.

                                              • Ensure all prerequisites are met before selecting the Bridge mode.
                                          4. Adding NICs

                                            1. Bridge modes support manually adding NICs. Click Add NIC to configure the NIC IP pool. Choose a Multus CR that matches the network mode, if not available, it needs to be created manually.

                                            2. If the Use Default IP Pool switch is turned on, it will use the default IP pool in the multus CR configuration. If turned off, manually select the IP pool.

                                          "},{"location":"en/admin/virtnest/vm/vm-network.html#network-configuration","title":"Network Configuration","text":""},{"location":"en/admin/virtnest/vm/vm-sc.html","title":"Storage for Virtual Machine","text":"

                                          This article will introduce how to configure storage when creating a virtual machine.

                                          Storage and virtual machine functionality are closely related, mainly providing flexible and scalable virtual machine storage capabilities through the use of Kubernetes persistent volumes and storage classes. For example, virtual machine image storage in PVC supports cloning, snapshotting, and other operations with other data.

                                          "},{"location":"en/admin/virtnest/vm/vm-sc.html#deploying-different-storage","title":"Deploying Different Storage","text":"

                                          Before using virtual machine storage functionality, different storage needs to be deployed according to requirements:

                                          1. Refer to Deploying hwameistor, or install hwameistor-operator in the Helm template of the container management module.
                                          2. Refer to Deploying rook-ceph
                                          3. Deploy localpath, use the command kubectl apply -f to create the following YAML:
                                          Click to view complete YAML
                                          ---\napiVersion: v1\nkind: Namespace\nmetadata:\n  name: local-path-storage\n\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: local-path-provisioner-service-account\n  namespace: local-path-storage\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: local-path-provisioner-role\nrules:\n- apiGroups: [\"\"]\n  resources: [\"nodes\", \"persistentvolumeclaims\", \"configmaps\"]\n  verbs: [\"get\", \"list\", \"watch\"]\n- apiGroups: [\"\"]\n  resources: [\"endpoints\", \"persistentvolumes\", \"pods\"]\n  verbs: [\"*\"]\n- apiGroups: [\"\"]\n  resources: [\"events\"]\n  verbs: [\"create\", \"patch\"]\n- apiGroups: [\"storage.k8s.io\"]\n  resources: [\"storageclasses\"]\n  verbs: [\"get\", \"list\", \"watch\"]\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: local-path-provisioner-bind\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: local-path-provisioner-role\nsubjects:\n- kind: ServiceAccount\n  name: local-path-provisioner-service-account\n  namespace: local-path-storage\n\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: local-path-provisioner\n  namespace: local-path-storage\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: local-path-provisioner\n  template:\n    metadata:\n      labels:\n        app: local-path-provisioner\n    spec:\n      serviceAccountName: local-path-provisioner-service-account\n      containers:\n      - name: local-path-provisioner\n        image: rancher/local-path-provisioner:v0.0.22\n        imagePullPolicy: IfNotPresent\n        command:\n        - local-path-provisioner\n        - --debug\n        - start\n        - --config\n        - /etc/config/config.json\n        volumeMounts:\n        - name: config-volume\n          mountPath: /etc/config/\n        env:\n        - name: POD_NAMESPACE\n          valueFrom:\n            fieldRef:\n              fieldPath: metadata.namespace\n      volumes:\n      - name: config-volume\n        configMap:\n          name: local-path-config\n\n---\napiVersion: storage.k8s.io/v1\nkind: StorageClass\nmetadata:\n  name: local-path\nprovisioner: rancher.io/local-path\nvolumeBindingMode: WaitForFirstConsumer\nreclaimPolicy: Delete\n\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: local-path-config\n  namespace: local-path-storage\ndata:\n  config.json: |-\n    {\n      \"nodePathMap\": [\n        {\n          \"node\": \"DEFAULT_PATH_FOR_NON_LISTED_NODES\",\n          \"paths\": [\"/opt/local-path-provisioner\"]\n        }\n      ]\n    }\n  setup: |-\n    #!/bin/sh\n    set -eu\n    mkdir -m 0777 -p \"$VOL_DIR\"\n  teardown: |-\n    #!/bin/sh\n    set -eu\n    rm -rf \"$VOL_DIR\"\n  helperPod.yaml: |-\n    apiVersion: v1\n    kind: Pod\n    metadata:\n      name: helper-pod\n    spec:\n      containers:\n      - name: helper-pod\n        image: busybox\n        imagePullPolicy: IfNotPresent\n
                                          "},{"location":"en/admin/virtnest/vm/vm-sc.html#virtual-machine-storage","title":"Virtual Machine Storage","text":"
                                          1. System Disk: By default, a VirtIO type rootfs system disk is created for the system to store the operating system and data.

                                          2. Data Disk: The data disk is a storage device in the virtual machine used to store user data, application data, or other files unrelated to the operating system. Compared to the system disk, the data disk is optional and can be dynamically added or removed as needed. The capacity of the data disk can also be flexibly configured according to requirements.

                                            Block storage is used by default. If you need to use cloning and snapshot functions, make sure that your storage pool has created the proper VolumeSnapshotClass, as shown in the example below. If you need to use real-time migration, make sure that your storage supports and has selected the ReadWriteMany access mode.

                                            In most cases, such VolumeSnapshotClass is not automatically created during the installation process, so you need to manually create VolumeSnapshotClass. Here is an example of creating a VolumeSnapshotClass in HwameiStor:

                                            kind: VolumeSnapshotClass\napiVersion: snapshot.storage.k8s.io/v1\nmetadata:\n  name: hwameistor-storage-lvm-snapshot\n  annotations:\n    snapshot.storage.kubernetes.io/is-default-class: \"true\"\nparameters:\n  snapsize: \"1073741824\"\ndriver: lvm.hwameistor.io\ndeletionPolicy: Delete\n
                                            • Execute the following command to check if the VolumeSnapshotClass has been successfully created.

                                              kubectl get VolumeSnapshotClass\n
                                            • View the created Snapshotclass and confirm that the Provisioner property is consistent with the Driver property in the storage pool.

                                          "},{"location":"en/admin/virtnest/vm-image/index.html","title":"Build Virtual Machine Images","text":"

                                          This document will explain how to build the required virtual machine images.

                                          A virtual machine image is essentially a replica file, which is a disk partition with an installed operating system. Common image file formats include raw, qcow2, vmdk, etc.

                                          "},{"location":"en/admin/virtnest/vm-image/index.html#build-an-image","title":"Build an Image","text":"

                                          Below are some detailed steps for building virtual machine images:

                                          1. Download System Images

                                            Before building virtual machine images, you need to download the required system images. We recommend using images in qcow2, raw, or vmdk formats. You can visit the following links to get CentOS and Fedora images:

                                            • CentOS Cloud Images: Obtain CentOS images from the official CentOS project or other sources. Make sure to choose a version compatible with your virtualization platform.
                                            • Fedora Cloud Images: Get images from the official Fedora project. Choose the appropriate version based on your requirements.
                                          2. Build a Docker Image and Push it to a Containe Registry

                                            In this step, we will use Docker to build an image and push it to a container registry for easy deployment and usage when needed.

                                            • Create a Dockerfile

                                              FROM scratch\nADD --chown=107:107 CentOS-7-x86_64-GenericCloud.qcow2 /disk/\n

                                              The Dockerfile above adds a file named CentOS-7-x86_64-GenericCloud.qcow2 to the image being built from a scratch base image and places it in the /disk/ directory within the image. This operation includes the file in the image, allowing it to provide a CentOS 7 x86_64 operating system environment when used to create a virtual machine.

                                            • Build the Image

                                              docker build -t release-ci.daocloud.io/ghippo/kubevirt-demo/centos7:v1 .\n

                                              The above command builds an image named release-ci.daocloud.io/ghippo/kubevirt-demo/centos7:v1 using the instructions in the Dockerfile. You can modify the image name according to your project requirements.

                                            • Push the Image to the Container Registry

                                              Use the following command to push the built image to the release-ci.daocloud.io container registry. You can modify the repository name and address as needed.

                                              docker push release-ci.daocloud.io/ghippo/kubevirt-demo/centos7:v1\n

                                          These are the detailed steps and instructions for building virtual machine images. By following these steps, you will be able to successfully build and push images for virtual machines to meet your usage needs.

                                          "},{"location":"en/admin/ghippo/best-practice/navigator.html","title":"Custom Navigation Bar","text":"

                                          Currently, the custom navigation bar needs to be manually created as a YAML file and applied to the cluster.

                                          "},{"location":"en/admin/ghippo/best-practice/navigator.html#navigation-bar-categories","title":"Navigation Bar Categories","text":"

                                          To add or reorder navigation bar categories, you can achieve it by adding or modifying the category YAML.

                                          Here is an example of a category YAML:

                                          apiVersion: ghippo.io/v1alpha1\nkind: NavigatorCategory\nmetadata:\n  name: management-custom # (1)!\nspec:\n  name: Management # (2)!\n  isCustom: true # (3)!\n  localizedName: # (4)!\n    zh-CN: \u7ba1\u7406\n    en-US: Management\n  order: 100 # (5)!\n
                                          1. Naming convention: composed of lowercase \"spec.name\" and \"-custom\"
                                          2. If used for modifying the category
                                          3. This field must be true
                                          4. Define the Chinese and English names of the category
                                          5. The higher the number, the higher its position in the sorting order

                                          After writing the YAML file, you can see the newly added or modified navigation bar categories by executing the following command and refreshing the page:

                                          kubectl apply -f xxx.yaml\n
                                          "},{"location":"en/admin/ghippo/best-practice/navigator.html#navigation-bar-menus","title":"Navigation Bar Menus","text":"

                                          To add or reorder navigation bar menus, you can achieve it by adding a navigator YAML.

                                          Note

                                          If you need to edit an existing navigation bar menu (not a custom menu added by the user), you need to set the \"gproduct\" field of the new custom menu the same as the \"gproduct\" field of the menu to be overridden. The new navigation bar menu will overwrite the parts with the same \"name\" in the \"menus\" section, and perform an addition operation for the parts with different \"name\".

                                          "},{"location":"en/admin/ghippo/best-practice/navigator.html#first-level-menu","title":"First-level Menu","text":"

                                          Insert as a product under a navigation bar category

                                          apiVersion: ghippo.io/v1alpha1\nkind: GProductNavigator\nmetadata:\n  name: gmagpie-custom # (1)!\nspec:\n  name: Operations Management\n  iconUrl: ./ui/gmagpie/gmagpie.svg\n  localizedName: # (2)!\n    zh-CN: \u8fd0\u8425\u7ba1\u7406\n    en-US: Operations Management\n  url: ./gmagpie\n  category: management # (3)!\n  menus: # (4)!\n    - name: Access Control\n      iconUrl: ./ui/ghippo/menus/access-control.svg\n      localizedName:\n        zh-CN: \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\n        en-US: Access Control\n      url: ./ghippo/users\n      order: 50 # (5)!\n    - name: Workspace\n      iconUrl: ./ui/ghippo/menus/workspace-folder.svg\n      localizedName:\n        zh-CN: \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7\n        en-US: Workspace and Folder\n      url: ./ghippo/workspaces\n      order: 40\n    - name: Audit Log\n      iconUrl: ./ui/ghippo/menus/audit-logs.svg\n      localizedName:\n        zh-CN: \u5ba1\u8ba1\u65e5\u5fd7\n        en-US: Audit Log\n      url: ./ghippo/audit\n      order: 30\n    - name: Settings\n      iconUrl: ./ui/ghippo/menus/setting.svg\n      localizedName:\n        zh-CN: \u5e73\u53f0\u8bbe\u7f6e\n        en-US: Settings\n      url: ./ghippo/settings\n      order: 10\n  gproduct: gmagpie # (6)!\n  visible: true # (7)!\n  isCustom: true # (8)!\n  order: 20 # (9)!\n  target: blank # (10)!\n
                                          1. Naming convention: composed of lowercase \"spec.gproduct\" and \"-custom\"
                                          2. Define the Chinese and English names of the menu
                                          3. Either \"category\" or \"parentGProduct\" can be used to distinguish between first-level and second-level menus, and it should match the \"spec.name\" field of NavigatorCategory to complete the matching
                                          4. Second-level menus
                                          5. The lower the number, the higher its position in the sorting order
                                          6. Define the identifier of the menu, used for linkage with the parentGProduct field to establish the parent-child relationship.
                                          7. Set whether the menu is visible, default is true
                                          8. This field must be true
                                          9. The higher the number, the higher its position in the sorting order
                                          10. Open a new tab
                                          "},{"location":"en/admin/ghippo/best-practice/navigator.html#second-level-menu","title":"Second-level Menu","text":"

                                          Insert as a sub-product under the second-level menu of a first-level menu

                                          apiVersion: ghippo.io/v1alpha1\nkind: GProductNavigator\nmetadata:\n  name: gmagpie-custom # (1)!\nspec:\n  name: Operations Management\n  iconUrl: ./ui/gmagpie/gmagpie.svg\n  localizedName: # (2)!\n    zh-CN: \u8fd0\u8425\u7ba1\u7406\n    en-US: Operations Management\n  url: ./gmagpie\n  parentGProduct: ghippo # (3)!\n  gproduct: gmagpie # (4)!\n  visible: true # (5)!\n  isCustom: true # (6)!\n  order: 20 # (7)!\n
                                          1. Naming convention: composed of lowercase \"spec.gproduct\" and \"-custom\"
                                          2. Define the Chinese and English names of the menu
                                          3. Either \"category\" or \"parentGProduct\" can be used to distinguish between first-level and second-level menus. If this field is added, it will ignore the \"menus\" field and insert this menu as a second-level menu under the first-level menu with the \"gproduct\" of \"ghippo\"
                                          4. Define the identifier of the menu, used for linkage with the parentGProduct field to establish the parent-child relationship.
                                          5. Set whether the menu is visible, default is true
                                          6. This field must be true
                                          7. The higher the number, the higher its position in the sorting order
                                          "},{"location":"en/admin/insight/quickstart/agent-status.html","title":"Insight-agent component status","text":"

                                          Insight is a multicluster observation product in AI platform. In order to realize the unified collection of multicluster observation data, users need to install the Helm App insight-agent (Installed in insight-system namespace by default). See How to install insight-agent .

                                          "},{"location":"en/admin/insight/quickstart/agent-status.html#status-description","title":"Status description","text":"

                                          In Insight -> Data Collection section, you can view the status of insight-agent installed in each cluster.

                                          • not installed : insight-agent is not installed under the insight-system namespace in this cluster
                                          • Running : insight-agent is successfully installed in the cluster, and all deployed components are running
                                          • Exception : If insight-agent is in this state, it means that the helm deployment failed or the deployed components are not running

                                          Can be checked by:

                                          1. Run the following command, if the status is deployed , go to the next step. If it is failed , since it will affect the upgrade of the application, it is recommended to reinstall after uninstalling Container Management -> Helm Apps :

                                            helm list -n insight-system\n
                                          2. run the following command or check the status of the components deployed in the cluster in Insight -> Data Collection . If there is a pod that is not in the Running state, please restart the abnormal pod.

                                            kubectl get pods -n insight-system\n
                                          "},{"location":"en/admin/insight/quickstart/agent-status.html#supplementary-instructions","title":"Supplementary instructions","text":"
                                          1. The resource consumption of the metric collection component Prometheus in insight-agent is directly proportional to the number of pods running in the cluster. Adjust Prometheus resources according to the cluster size, please refer to Prometheus Resource Planning.

                                          2. Since the storage capacity of the metric storage component vmstorage in the global service cluster is directly proportional to the sum of the number of pods in each cluster.

                                            • Please contact the platform administrator to adjust the disk capacity of vmstorage according to the cluster size, see vmstorage disk capacity planning.
                                            • Adjust vmstorage disk according to multicluster size, see vmstorge disk expansion.
                                          "},{"location":"en/admin/insight/quickstart/jvm-monitor/jmx-exporter.html","title":"Use JMX Exporter to expose JVM monitoring metrics","text":"

                                          JMX-Exporter provides two usages:

                                          1. Start a standalone process. Specify parameters when the JVM starts, expose the RMI interface of JMX, JMX Exporter calls RMI to obtain the JVM runtime status data, Convert to Prometheus metrics format, and expose ports for Prometheus to collect.
                                          2. Start the JVM in-process. Specify parameters when the JVM starts, and run the jar package of JMX-Exporter in the form of javaagent. Read the JVM runtime status data in the process, convert it into Prometheus metrics format, and expose the port for Prometheus to collect.

                                          Note

                                          Officials do not recommend the first method. On the one hand, the configuration is complicated, and on the other hand, it requires a separate process, and the monitoring of this process itself has become a new problem. So This page focuses on the second usage and how to use JMX Exporter to expose JVM monitoring metrics in the Kubernetes environment.

                                          The second usage is used here, and the JMX Exporter jar package file and configuration file need to be specified when starting the JVM. The jar package is a binary file, so it is not easy to mount it through configmap. We hardly need to modify the configuration file. So the suggestion is to directly package the jar package and configuration file of JMX Exporter into the business container image.

                                          Among them, in the second way, we can choose to put the jar file of JMX Exporter in the business application mirror, You can also choose to mount it during deployment. Here is an introduction to the two methods:

                                          "},{"location":"en/admin/insight/quickstart/jvm-monitor/jmx-exporter.html#method-1-build-the-jmx-exporter-jar-file-into-the-business-image","title":"Method 1: Build the JMX Exporter JAR file into the business image","text":"

                                          The content of prometheus-jmx-config.yaml is as follows:

                                          prometheus-jmx-config.yaml
                                          ...\nssl: false\nlowercaseOutputName: false\nlowercaseOutputLabelNames: false\nrules:\n- pattern: \".*\"\n

                                          Note

                                          For more configmaps, please refer to the bottom introduction or Prometheus official documentation.

                                          Then prepare the jar package file, you can find the latest jar package download address on the Github page of jmx_exporter and refer to the following Dockerfile:

                                          FROM openjdk:11.0.15-jre\nWORKDIR /app/\nCOPY target/my-app.jar ./\nCOPY prometheus-jmx-config.yaml ./\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\nENV JAVA_TOOL_OPTIONS=-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\nEXPOSE 8081 8999 8080 8888\nENTRYPOINT java $JAVA_OPTS -jar my-app.jar\n

                                          Notice:

                                          • Start parameter format: -javaagent:=:
                                          • Port 8088 is used here to expose the monitoring metrics of the JVM. If it conflicts with Java applications, you can change it yourself
                                          "},{"location":"en/admin/insight/quickstart/jvm-monitor/jmx-exporter.html#method-2-mount-via-init-container-container","title":"Method 2: mount via init container container","text":"

                                          We need to make the JMX exporter into a Docker image first, the following Dockerfile is for reference only:

                                          FROM alpine/curl:3.14\nWORKDIR /app/\n# Copy the previously created config file to the mirror\nCOPY prometheus-jmx-config.yaml ./\n# Download jmx prometheus javaagent jar online\nRUN set -ex; \\\n     curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\n

                                          Build the image according to the above Dockerfile: docker build -t my-jmx-exporter .

                                          Add the following init container to the Java application deployment Yaml:

                                          Click to view YAML file
                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-demo-app\n  labels:\n    app: my-demo-app\nspec:\n  selector:\n    matchLabels:\n      app: my-demo-app\n  template:\n    metadata:\n      labels:\n        app: my-demo-app\n    spec:\n      imagePullSecrets:\n      - name: registry-pull\n      initContainers:\n      - name: jmx-sidecar\n        image: my-jmx-exporter\n        command: [\"cp\", \"-r\", \"/app/jmx_prometheus_javaagent-0.17.2.jar\", \"/target/jmx_prometheus_javaagent-0.17.2.jar\"]  \u278a\n        volumeMounts:\n        - name: sidecar\n          mountPath: /target\n      containers:\n      - image: my-demo-app-image\n        name: my-demo-app\n        resources:\n          requests:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n          limits:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n        ports:\n        - containerPort: 18083\n        env:\n        - name: JAVA_TOOL_OPTIONS\n          value: \"-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\" \u278b\n        volumeMounts:\n        - name: host-time\n          mountPath: /etc/localtime\n          readOnly: true\n        - name: sidecar\n          mountPath: /sidecar\n      volumes:\n      - name: host-time\n        hostPath:\n          path: /etc/localtime\n      - name: sidecar  # Share the agent folder\n        emptyDir: {}\n      restartPolicy: Always\n

                                          After the above modification, the sample application my-demo-app has the ability to expose JVM metrics. After running the service, we can access the prometheus format metrics exposed by the service through http://lcoalhost:8088.

                                          Then, you can refer to Java Application Docking Observability with JVM Metrics.

                                          "},{"location":"en/admin/insight/quickstart/jvm-monitor/jvm-catelogy.html","title":"Start monitoring Java applications","text":"

                                          This document mainly describes how to monitor the JVM of the customer's Java application. It describes how Java applications that have exposed JVM metrics, and those that have not, interface with Insight.

                                          If your Java application does not start exposing JVM metrics, you can refer to the following documents:

                                          • Expose JVM monitoring metrics with JMX Exporter
                                          • Expose JVM monitoring metrics using OpenTelemetry Java Agent

                                          If your Java application has exposed JVM metrics, you can refer to the following documents:

                                          • Java application docking observability with existing JVM metrics
                                          "},{"location":"en/admin/insight/quickstart/jvm-monitor/legacy-jvm.html","title":"Java Application with JVM Metrics to Dock Insight","text":"

                                          If your Java application exposes JVM monitoring metrics through other means (such as Spring Boot Actuator), We need to allow monitoring data to be collected. You can let Insight collect existing JVM metrics by adding Kubernetes Annotations to the workload:

                                          annatation:\n   insight.opentelemetry.io/metric-scrape: \"true\" # whether to collect\n   insight.opentelemetry.io/metric-path: \"/\" # path to collect metrics\n   insight.opentelemetry.io/metric-port: \"9464\" # port for collecting metrics\n

                                          YAML Example to add annotations for my-deployment-app workload\uff1a

                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-app\nspec:\n  selector:\n    matchLabels:\n      app: my-deployment-app\n      app.kubernetes.io/name: my-deployment-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-deployment-app\n        app.kubernetes.io/name: my-deployment-app\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\" # whether to collect\n        insight.opentelemetry.io/metric-path: \"/\" # path to collect metrics\n        insight.opentelemetry.io/metric-port: \"9464\" # port for collecting metrics\n

                                          The following shows the complete YAML:

                                          ---\napiVersion: v1\nkind: Service\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  type: NodePort\n  selector:\n    #app: my-deployment-with-aotu-instrumentation-app\n    app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  ports:\n    - name: http\n      port: 8080\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  selector:\n    matchLabels:\n      #app: my-deployment-with-aotu-instrumentation-app\n      app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\" # whether to collect\n        insight.opentelemetry.io/metric-path: \"/actuator/prometheus\"      # path to collect metrics\n        insight.opentelemetry.io/metric-port: \"8080\"   # port for collecting metrics\n    spec:\n      containers:\n        - name: myapp\n          image: docker.m.daocloud.io/wutang/spring-boot-actuator-prometheus-metrics-demo\n          ports:\n            - name: http\n              containerPort: 8080\n          resources:\n            limits:\n              cpu: 500m\n              memory: 800Mi\n            requests:\n              cpu: 200m\n              memory: 400Mi\n

                                          In the above example\uff0cInsight will use :8080//actuator/prometheus to get Prometheus metrics exposed through Spring Boot Actuator .

                                          "},{"location":"en/admin/insight/quickstart/jvm-monitor/otel-java-agent.html","title":"Use OpenTelemetry Java Agent to expose JVM monitoring metrics","text":"

                                          In Opentelemetry Agent v1.20.0 and above, Opentelemetry Agent has added the JMX Metric Insight module. If your application has integrated Opentelemetry Agent to collect application traces, then you no longer need to introduce other Agents for our application Expose JMX metrics. The Opentelemetry Agent also collects and exposes metrics by instrumenting the metrics exposed by MBeans locally available in the application.

                                          Opentelemetry Agent also has some built-in monitoring samples for common Java Servers or frameworks, please refer to predefined metrics.

                                          Using the OpenTelemetry Java Agent also needs to consider how to mount the JAR into the container. In addition to referring to the JMX Exporter above to mount the JAR file, we can also use the Operator capabilities provided by OpenTelemetry to automatically enable JVM metric exposure for our applications. :

                                          If your application has integrated Opentelemetry Agent to collect application traces, then you no longer need to introduce other Agents to expose JMX metrics for our application. The Opentelemetry Agent can now natively collect and expose metrics interfaces by instrumenting metrics exposed by MBeans available locally in the application.

                                          However, for current version, you still need to manually add the proper annotations to workload before the JVM data will be collected by Insight.

                                          "},{"location":"en/admin/insight/quickstart/jvm-monitor/otel-java-agent.html#expose-metrics-for-java-middleware","title":"Expose metrics for Java middleware","text":"

                                          Opentelemetry Agent also has some built-in middleware monitoring samples, please refer to Predefined Metrics.

                                          By default, no type is specified, and it needs to be specified through -Dotel.jmx.target.system JVM Options, such as -Dotel.jmx.target.system=jetty,kafka-broker .

                                          "},{"location":"en/admin/insight/quickstart/jvm-monitor/otel-java-agent.html#reference","title":"Reference","text":"
                                          • Gaining JMX Metric Insights with the OpenTelemetry Java Agent

                                          • Otel jmx metrics

                                          "},{"location":"en/admin/insight/quickstart/otel/golang-ebpf.html","title":"Enhance Go apps with OTel auto-instrumentation","text":"

                                          If you don't want to manually change the application code, you can try This page's eBPF-based automatic enhancement method. This feature is currently in the review stage of donating to the OpenTelemetry community, and does not support Operator injection through annotations (it will be supported in the future), so you need to manually change the Deployment YAML or use a patch.

                                          "},{"location":"en/admin/insight/quickstart/otel/golang-ebpf.html#prerequisites","title":"Prerequisites","text":"

                                          Make sure Insight Agent is ready. If not, see Install insight-agent to collect data and make sure the following three items are in place:

                                          • Enable trace feature for Insight-agent
                                          • Whether the address and port of the trace data are filled in correctly
                                          • Pods proper to deployment/opentelemetry-operator-controller-manager and deployment/insight-agent-opentelemetry-collector are ready
                                          "},{"location":"en/admin/insight/quickstart/otel/golang-ebpf.html#install-instrumentation-cr","title":"Install Instrumentation CR","text":"

                                          Install under the Insight-system namespace, skip this step if it has already been installed.

                                          Note: This CR currently only supports the injection of environment variables (including service name and trace address) required to connect to Insight, and will support the injection of Golang probes in the future.

                                          kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.17.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.31.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.34b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:0.3.1-beta.1\nEOF\n
                                          "},{"location":"en/admin/insight/quickstart/otel/golang-ebpf.html#change-the-application-deployment-file","title":"Change the application deployment file","text":"
                                          • Add environment variable annotations

                                            There is only one such annotation, which is used to add OpenTelemetry-related environment variables, such as link reporting address, cluster id where the container is located, and namespace:

                                            instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                            The value is divided into two parts by / , the first value insight-system is the namespace of the CR installed in the second step, and the second value insight-opentelemetry-autoinstrumentation is the name of the CR.

                                          • Add golang ebpf probe container

                                            Here is sample code:

                                            apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: voting\n  namespace: emojivoto\n  labels:\n    app.kubernetes.io/name: voting\n    app.kubernetes.io/part-of: emojivoto\n    app.kubernetes.io/version: v11\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: voting-svc\n      version: v11\n  template:\n    metadata:\n      labels:\n        app: voting-svc\n        version: v11\n      annotations:\n        instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\" # (1)\n    spec:\n      containers:\n        - env:\n            - name: GRPC_PORT\n              value: \"8080\"\n            - name: PROM_PORT\n              value: \"8801\"\n          image: docker.l5d.io/buoyantio/emojivoto-voting-svc:v11 # (2)\n          name: voting-svc\n          command:\n            - /usr/local/bin/emojivoto-voting-svc\n          ports:\n            - containerPort: 8080\n              name: grpc\n            - containerPort: 8801\n              name: prom\n          resources:\n            requests:\n              cpu: 100m\n        - name: emojivoto-voting-instrumentation\n          image: docker.m.daocloud.io/keyval/otel-go-agent:v0.6.0\n          env:\n            - name: OTEL_TARGET_EXE\n              value: /usr/local/bin/emojivoto-voting-svc # (3)\n          securityContext:\n            runAsUser: 0\n            capabilities:\n              add:\n                - SYS_PTRACE\n            privileged: true\n          volumeMounts:\n            - mountPath: /sys/kernel/debug\n              name: kernel-debug\n      volumes:\n        - name: kernel-debug\n          hostPath:\n            path: /sys/kernel/debug\n
                                            1. Used to add environment variables related to OpenTelemetry.
                                            2. Assuming this is your Golang application.
                                            3. Note that it should be consistent with the content of the command mentioned above: /usr/local/bin/emojivoto-voting-svc .

                                          The final generated Yaml content is as follows:

                                          apiVersion: v1\nkind: Pod\nmetadata:\n  name: voting-84b696c897-p9xbp\n  generateName: voting-84b696c897-\n  namespace: default\n  uid: 742639b0-db6e-4f06-ac90-68a80e2b8a11\n  resourceVersion: '65560793'\n  creationTimestamp: '2022-10-19T07:08:56Z'\n  labels:\n    app: voting-svc\n    pod-template-hash: 84b696c897\n    version: v11\n  annotations:\n    cni.projectcalico.org/containerID: 0a987cf0055ce0dfbe75c3f30d580719eb4fbbd7e1af367064b588d4d4e4c7c7\n    cni.projectcalico.org/podIP: 192.168.141.218/32\n    cni.projectcalico.org/podIPs: 192.168.141.218/32\n    instrumentation.opentelemetry.io/inject-sdk: insight-system/insight-opentelemetry-autoinstrumentation\nspec:\n  volumes:\n    - name: launcherdir\n      emptyDir: {}\n    - name: kernel-debug\n      hostPath:\n        path: /sys/kernel/debug\n        type: ''\n    - name: kube-api-access-gwj5v\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n  containers:\n    - name: voting-svc\n      image: docker.l5d.io/buoyantio/emojivoto-voting-svc:v11\n      command:\n        - /odigos-launcher/launch\n        - /usr/local/bin/emojivoto-voting-svc\n      ports:\n        - name: grpc\n          containerPort: 8080\n          protocol: TCP\n        - name: prom\n          containerPort: 8801\n          protocol: TCP\n      env:\n        - name: GRPC_PORT\n          value: '8080'\n        - name: PROM_PORT\n          value: '8801'\n        - name: OTEL_TRACES_EXPORTER\n          value: otlp\n        - name: OTEL_EXPORTER_OTLP_ENDPOINT\n          value: >-\n            http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n        - name: OTEL_EXPORTER_OTLP_TIMEOUT\n          value: '200'\n        - name: SPLUNK_TRACE_RESPONSE_HEADER_ENABLED\n          value: 'true'\n        - name: OTEL_SERVICE_NAME\n          value: voting\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.name\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_UID\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.uid\n        - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: spec.nodeName\n        - name: OTEL_PROPAGATORS\n          value: jaeger,b3\n        - name: OTEL_TRACES_SAMPLER\n          value: always_on\n        - name: OTEL_RESOURCE_ATTRIBUTES\n          value: >-\n            k8s.container.name=voting-svc,k8s.deployment.name=voting,k8s.deployment.uid=79e015e2-4643-44c0-993c-e486aebaba10,k8s.namespace.name=default,k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME),k8s.pod.uid=$(OTEL_RESOURCE_ATTRIBUTES_POD_UID),k8s.replicaset.name=voting-84b696c897,k8s.replicaset.uid=63f56167-6632-415d-8b01-43a3db9891ff\n      resources:\n        requests:\n          cpu: 100m\n      volumeMounts:\n        - name: launcherdir\n          mountPath: /odigos-launcher\n        - name: kube-api-access-gwj5v\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: IfNotPresent\n    - name: emojivoto-voting-instrumentation\n      image: keyval/otel-go-agent:v0.6.0\n      env:\n        - name: OTEL_TARGET_EXE\n          value: /usr/local/bin/emojivoto-voting-svc\n        - name: OTEL_EXPORTER_OTLP_ENDPOINT\n          value: jaeger:4317\n        - name: OTEL_SERVICE_NAME\n          value: emojivoto-voting\n      resources: {}\n      volumeMounts:\n        - name: kernel-debug\n          mountPath: /sys/kernel/debug\n        - name: kube-api-access-gwj5v\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: IfNotPresent\n      securityContext:\n        capabilities:\n          add:\n            - SYS_PTRACE\n        privileged: true\n        runAsUser: 0\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
                                          "},{"location":"en/admin/insight/quickstart/otel/golang-ebpf.html#reference","title":"Reference","text":"
                                          • Getting Started with Go OpenTelemetry Automatic Instrumentation
                                          • Donating ebpf based instrumentation
                                          "},{"location":"en/admin/insight/quickstart/other/install-agentindce.html","title":"Install insight-agent in Suanova 4.0","text":"

                                          In AI platform, previous Suanova 4.0 can be accessed as a subcluster. This guide provides potential issues and solutions when installing insight-agent in a Suanova 4.0 cluster.

                                          "},{"location":"en/admin/insight/quickstart/other/install-agentindce.html#issue-one","title":"Issue One","text":"

                                          Since most Suanova 4.0 clusters have installed dx-insight as the monitoring system, installing insight-agent at this time will conflict with the existing prometheus operator in the cluster, making it impossible to install smoothly.

                                          "},{"location":"en/admin/insight/quickstart/other/install-agentindce.html#solution","title":"Solution","text":"

                                          Enable the parameters of the prometheus operator, retain the prometheus operator in dx-insight, and make it compatible with the prometheus operator in insight-agent in 5.0.

                                          "},{"location":"en/admin/insight/quickstart/other/install-agentindce.html#steps","title":"Steps","text":"
                                          1. Log in to the console.
                                          2. Enable the --deny-namespaces parameter in the two prometheus operators respectively.
                                          3. Run the following command (the following command is for reference only, the actual command needs to replace the prometheus operator name and namespace in the command).

                                            kubectl edit deploy insight-agent-kube-prometh-operator -n insight-system\n

                                          Note

                                          • As shown in the figure above, the dx-insight component is deployed under the dx-insight tenant, and the insight-agent is deployed under the insight-system tenant. Add --deny-namespaces=insight-system in the prometheus operator in dx-insight, Add --deny-namespaces=dx-insight in the prometheus operator in insight-agent.
                                          • Just add deny namespace, both prometheus operators can continue to scan other namespaces, and the related collection resources under kube-system or customer business namespaces are not affected.
                                          • Please pay attention to the problem of node exporter port conflict.
                                          "},{"location":"en/admin/insight/quickstart/other/install-agentindce.html#supplementary-explanation","title":"Supplementary Explanation","text":"

                                          The open-source node-exporter turns on hostnetwork by default and the default port is 9100. If the monitoring system of the cluster has installed node-exporter , then installing insight-agent at this time will cause node-exporter port conflict and it cannot run normally.

                                          Note

                                          Insight's node exporter will enable some features to collect special indicators, so it is recommended to install.

                                          Currently, it does not support modifying the port in the installation command. After helm install insight-agent , you need to manually modify the related ports of the insight node-exporter daemonset and svc.

                                          "},{"location":"en/admin/insight/quickstart/other/install-agentindce.html#issue-two","title":"Issue Two","text":"

                                          After Insight Agent is successfully deployed, fluentbit does not collect logs of Suanova 4.0.

                                          "},{"location":"en/admin/insight/quickstart/other/install-agentindce.html#solution_1","title":"Solution","text":"

                                          The docker storage directory of Suanova 4.0 is /var/lib/containers , which is different from the path in the configuration of insigh-agent, so the logs are not collected.

                                          "},{"location":"en/admin/insight/quickstart/other/install-agentindce.html#steps_1","title":"Steps","text":"
                                          1. Log in to the console.
                                          2. Modify the following parameters in the insight-agent Chart.

                                            fluent-bit:\ndaemonSetVolumeMounts:\n    - name: varlog\n    mountPath: /var/log\n    - name: varlibdockercontainers\n-     mountPath: /var/lib/docker/containers\n+     mountPath: /var/lib/containers/docker/containers\n    readOnly: true\n    - name: etcmachineid\n    mountPath: /etc/machine-id\n    readOnly: true\n    - name: dmesg\n    mountPath: /var/log/dmesg\n    readOnly: true\n
                                          "},{"location":"en/admin/insight/trace/topology-helper.html","title":"Service Topology Element Explanations","text":"

                                          The service topology provided by Observability allows you to quickly identify the request relationships between services and determine the health status of services based on different colors. The health status is determined based on the request latency and error rate of the service's overall traffic. This article explains the elements in the service topology.

                                          "},{"location":"en/admin/insight/trace/topology-helper.html#node-status-explanation","title":"Node Status Explanation","text":"

                                          The node health status is determined based on the error rate and request latency of the service's overall traffic, following these rules:

                                          Color Status Rules Gray Healthy Error rate equals 0% and request latency is less than 100ms Orange Warning Error rate (0, 5%) or request latency (100ms, 200ms) Red Abnormal Error rate (5%, 100%) or request latency (200ms, +Infinity)"},{"location":"en/admin/insight/trace/topology-helper.html#connection-status-explanation","title":"Connection Status Explanation","text":"Color Status Rules Green Healthy Error rate equals 0% and request latency is less than 100ms Orange Warning Error rate (0, 5%) or request latency (100ms, 200ms) Red Abnormal Error rate (5%, 100%) or request latency (200ms, +Infinity)"},{"location":"en/admin/kpanda/gpu/gpu-metrics.html","title":"GPU Metrics","text":"

                                          This page lists some commonly used GPU metrics.

                                          "},{"location":"en/admin/kpanda/gpu/gpu-metrics.html#cluster-level","title":"Cluster Level","text":"Metric Name Description Number of GPUs Total number of GPUs in the cluster Average GPU Utilization Average compute utilization of all GPUs in the cluster Average GPU Memory Utilization Average memory utilization of all GPUs in the cluster GPU Power Power consumption of all GPUs in the cluster GPU Temperature Temperature of all GPUs in the cluster GPU Utilization Details 24-hour usage details of all GPUs in the cluster (includes max, avg, current) GPU Memory Usage Details 24-hour memory usage details of all GPUs in the cluster (includes min, max, avg, current) GPU Memory Bandwidth Utilization For example, an Nvidia V100 GPU has a maximum memory bandwidth of 900 GB/sec. If the current memory bandwidth is 450 GB/sec, the utilization is 50%"},{"location":"en/admin/kpanda/gpu/gpu-metrics.html#node-level","title":"Node Level","text":"Metric Name Description GPU Mode Usage mode of GPUs on the node, including full-card mode, MIG mode, vGPU mode Number of Physical GPUs Total number of physical GPUs on the node Number of Virtual GPUs Number of vGPU devices created on the node Number of MIG Instances Number of MIG instances created on the node GPU Memory Allocation Rate Memory allocation rate of all GPUs on the node Average GPU Utilization Average compute utilization of all GPUs on the node Average GPU Memory Utilization Average memory utilization of all GPUs on the node GPU Driver Version Driver version information of GPUs on the node GPU Utilization Details 24-hour usage details of each GPU on the node (includes max, avg, current) GPU Memory Usage Details 24-hour memory usage details of each GPU on the node (includes min, max, avg, current)"},{"location":"en/admin/kpanda/gpu/gpu-metrics.html#pod-level","title":"Pod Level","text":"Category Metric Name Description Application Overview GPU - Compute & Memory Pod GPU Utilization Compute utilization of the GPUs used by the current Pod Pod GPU Memory Utilization Memory utilization of the GPUs used by the current Pod Pod GPU Memory Usage Memory usage of the GPUs used by the current Pod Memory Allocation Memory allocation of the GPUs used by the current Pod Pod GPU Memory Copy Ratio Memory copy ratio of the GPUs used by the current Pod GPU - Engine Overview GPU Graphics Engine Activity Percentage Percentage of time the Graphics or Compute engine is active during a monitoring cycle GPU Memory Bandwidth Utilization Memory bandwidth utilization (Memory BW Utilization) indicates the fraction of cycles during which data is sent to or received from the device memory. This value represents the average over the interval, not an instantaneous value. A higher value indicates higher utilization of device memory.A value of 1 (100%) indicates that a DRAM instruction is executed every cycle during the interval (in practice, a peak of about 0.8 (80%) is the maximum achievable).A value of 0.2 (20%) indicates that 20% of the cycles during the interval are spent reading from or writing to device memory. Tensor Core Utilization Percentage of time the Tensor Core pipeline is active during a monitoring cycle FP16 Engine Utilization Percentage of time the FP16 pipeline is active during a monitoring cycle FP32 Engine Utilization Percentage of time the FP32 pipeline is active during a monitoring cycle FP64 Engine Utilization Percentage of time the FP64 pipeline is active during a monitoring cycle GPU Decode Utilization Decode engine utilization of the GPU GPU Encode Utilization Encode engine utilization of the GPU GPU - Temperature & Power GPU Temperature Temperature of all GPUs in the cluster GPU Power Power consumption of all GPUs in the cluster GPU Total Power Consumption Total power consumption of the GPUs GPU - Clock GPU Memory Clock Memory clock frequency GPU Application SM Clock Application SM clock frequency GPU Application Memory Clock Application memory clock frequency GPU Video Engine Clock Video engine clock frequency GPU Throttle Reasons Reasons for GPU throttling GPU - Other Details PCIe Transfer Rate Data transfer rate of the GPU through the PCIe bus PCIe Receive Rate Data receive rate of the GPU through the PCIe bus"},{"location":"en/admin/kpanda/gpu/ascend/Ascend_usage.html","title":"Use Ascend NPU","text":"

                                          This section explains how to use Ascend NPU on the AI platform platform.

                                          "},{"location":"en/admin/kpanda/gpu/ascend/Ascend_usage.html#prerequisites","title":"Prerequisites","text":"
                                          • The current NPU node has the Ascend driver installed.
                                          • The current NPU node has the Ascend-Docker-Runtime component installed.
                                          • The NPU MindX DL suite is installed on the current cluster.
                                          • No virtualization is performed on the NPU card in the current cluster, and it is not occupied by other applications.

                                          Refer to the Ascend NPU Component Installation Document to install the basic environment.

                                          "},{"location":"en/admin/kpanda/gpu/ascend/Ascend_usage.html#quick-start","title":"Quick Start","text":"

                                          This document uses the AscentCL Image Classification Application example from the Ascend sample library.

                                          1. Download the Ascend repository

                                            Run the following command to download the Ascend demo repository, and remember the storage location of the code for subsequent use.

                                            git clone https://gitee.com/ascend/samples.git\n
                                          2. Prepare the base image

                                            This example uses the Ascent-pytorch base image, which can be obtained from the Ascend Container Registry.

                                          3. Prepare the YAML file

                                            ascend-demo.yaml
                                            apiVersion: batch/v1\nkind: Job\nmetadata:\n  name: resnetinfer1-1-1usoc\nspec:\n  template:\n    spec:\n      containers:\n        - image: ascendhub.huawei.com/public-ascendhub/ascend-pytorch:23.0.RC2-ubuntu18.04 # Inference image name\n          imagePullPolicy: IfNotPresent\n          name: resnet50infer\n          securityContext:\n            runAsUser: 0\n          command:\n            - \"/bin/bash\"\n            - \"-c\"\n            - |\n              source /usr/local/Ascend/ascend-toolkit/set_env.sh &&\n              TEMP_DIR=/root/samples_copy_$(date '+%Y%m%d_%H%M%S_%N') &&\n              cp -r /root/samples \"$TEMP_DIR\" &&\n              cd \"$TEMP_DIR\"/inference/modelInference/sampleResnetQuickStart/python/model &&\n              wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/resnet50/resnet50.onnx &&\n              atc --model=resnet50.onnx --framework=5 --output=resnet50 --input_shape=\"actual_input_1:1,3,224,224\"  --soc_version=Ascend910 &&\n              cd ../data &&\n              wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg &&\n              cd ../scripts &&\n              bash sample_run.sh\n          resources:\n            requests:\n              huawei.com/Ascend910: 1 # Number of the Ascend 910 Processors\n            limits:\n              huawei.com/Ascend910: 1 # The value should be the same as that of requests\n          volumeMounts:\n            - name: hiai-driver\n              mountPath: /usr/local/Ascend/driver\n              readOnly: true\n            - name: slog\n              mountPath: /var/log/npu/conf/slog/slog.conf\n            - name: localtime # The container time must be the same as the host time\n              mountPath: /etc/localtime\n            - name: dmp\n              mountPath: /var/dmp_daemon\n            - name: slogd\n              mountPath: /var/slogd\n            - name: hbasic\n              mountPath: /etc/hdcBasic.cfg\n            - name: sys-version\n              mountPath: /etc/sys_version.conf\n            - name: aicpu\n              mountPath: /usr/lib64/aicpu_kernels\n            - name: tfso\n              mountPath: /usr/lib64/libtensorflow.so\n            - name: sample-path\n              mountPath: /root/samples\n      volumes:\n        - name: hiai-driver\n          hostPath:\n            path: /usr/local/Ascend/driver\n        - name: slog\n          hostPath:\n            path: /var/log/npu/conf/slog/slog.conf\n        - name: localtime\n          hostPath:\n            path: /etc/localtime\n        - name: dmp\n          hostPath:\n            path: /var/dmp_daemon\n        - name: slogd\n          hostPath:\n            path: /var/slogd\n        - name: hbasic\n          hostPath:\n            path: /etc/hdcBasic.cfg\n        - name: sys-version\n          hostPath:\n            path: /etc/sys_version.conf\n        - name: aicpu\n          hostPath:\n            path: /usr/lib64/aicpu_kernels\n        - name: tfso\n          hostPath:\n            path: /usr/lib64/libtensorflow.so\n        - name: sample-path\n          hostPath:\n            path: /root/samples\n      restartPolicy: OnFailure\n

                                            Some fields in the above YAML need to be modified according to the actual situation:

                                            1. atc ... --soc_version=Ascend910 uses Ascend910, adjust this field depending on your actual situation. You can use the npu-smi info command to check the GPU model and add the Ascend prefix.
                                            2. samples-path should be adjusted according to the actual situation.
                                            3. resources should be adjusted according to the actual situation.
                                          4. Deploy a Job and check its results

                                            Use the following command to create a Job:

                                            kubectl apply -f ascend-demo.yaml\n

                                            Check the Pod running status:

                                            After the Pod runs successfully, check the log results. The key prompt information on the screen is shown in the figure below. The Label indicates the category identifier, Conf indicates the maximum confidence of the classification, and Class indicates the belonging category. These values may vary depending on the version and environment, so please refer to the actual situation:

                                            Result image display:

                                          "},{"location":"en/admin/kpanda/gpu/ascend/Ascend_usage.html#ui-usage","title":"UI Usage","text":"
                                          1. Confirm whether the cluster has detected the GPU card. Click Clusters -> Cluster Settings -> Addon Plugins , and check whether the proper GPU type is automatically enabled and detected. Currently, the cluster will automatically enable GPU and set the GPU type to Ascend .

                                          2. Deploy the workload. Click Clusters -> Workloads , deploy the workload through an image, select the type (Ascend), and then configure the number of physical cards used by the application:

                                            Number of Physical Cards (huawei.com/Ascend910) : This indicates how many physical cards the current Pod needs to mount. The input value must be an integer and less than or equal to the number of cards on the host.

                                            If there is an issue with the above configuration, it will result in scheduling failure and resource allocation issues.

                                          "},{"location":"en/admin/virtnest/vm/vm-gpu.html","title":"Virtual Machine Configuration GPU (Nvidia)","text":"

                                          This article will introduce the prerequisites for configuring GPU when creating a virtual machine.

                                          The key point of configuring GPU for virtual machines is to configure the GPU Operator so that different software components can be deployed on working nodes depending on the GPU workloads configured on these nodes. Taking the following three nodes as examples:

                                          • The controller-node-1 node is configured to run containers.
                                          • The work-node-1 node is configured to run virtual machines with direct GPUs.
                                          • The work-node-2 node is configured to run virtual machines with virtual vGPUs.
                                          "},{"location":"en/admin/virtnest/vm/vm-gpu.html#assumptions-limitations-and-dependencies","title":"Assumptions, Limitations, and Dependencies","text":"

                                          Working nodes can run GPU-accelerated containers, virtual machines with direct GPUs, or virtual machines with vGPUs, but not a combination of any.

                                          1. Working nodes can run GPU-accelerated containers, virtual machines with direct GPUs, or virtual machines with vGPUs separately, without supporting any combination forms.
                                          2. Cluster administrators or developers need to understand the cluster situation in advance and correctly label the nodes to indicate the type of GPU workload they will run.
                                          3. The working nodes running virtual machines with direct GPUs or vGPUs are assumed to be bare metal. If the working nodes are virtual machines, the GPU direct pass-through feature needs to be enabled on the virtual machine platform. Please consult the virtual machine platform provider.
                                          4. Nvidia MIG vGPU is not supported.
                                          5. The GPU Operator will not automatically install GPU drivers in virtual machines.
                                          "},{"location":"en/admin/virtnest/vm/vm-gpu.html#enable-iommu","title":"Enable IOMMU","text":"

                                          To enable the GPU direct pass-through feature, the cluster nodes need to enable IOMMU. Please refer to How to Enable IOMMU. If your cluster is running on a virtual machine, consult your virtual machine platform provider.

                                          "},{"location":"en/admin/virtnest/vm/vm-gpu.html#build-vgpu-manager-image","title":"Build vGPU Manager Image","text":"

                                          Note: Building a vGPU Manager image is only required when using NVIDIA vGPUs. If you plan to use only GPU direct pass-through, skip this section.

                                          The following are the steps to build the vGPU Manager image and push it to the container registry:

                                          1. Download the vGPU software from the NVIDIA Licensing Portal.

                                            • Log in to the NVIDIA Licensing Portal and go to the Software Downloads page.
                                            • The NVIDIA vGPU software is located in the Driver downloads tab on the Software Downloads page.
                                            • Select VGPU + Linux in the filter criteria and click Download to get the software package for Linux KVM. Unzip the downloaded file (NVIDIA-Linux-x86_64-<version>-vgpu-kvm.run).

                                          2. Clone the container-images/driver repository in the terminal

                                            git clone https://gitlab.com/nvidia/container-images/driver cd driver\n
                                          3. Switch to the vgpu-manager directory for your operating system

                                            cd vgpu-manager/<your-os>\n
                                          4. Copy the .run file extracted in step 1 to the current directory

                                            cp <local-driver-download-directory>/*-vgpu-kvm.run ./\n
                                          5. Set environment variables

                                            • PRIVATE_REGISTRY: Name of the private registry to store the driver image.
                                            • VERSION: Version of NVIDIA vGPU Manager, downloaded from the NVIDIA Software Portal.
                                            • OS_TAG: Must match the operating system version of the cluster node.
                                            • CUDA_VERSION: CUDA base image version used to build the driver image.
                                            export PRIVATE_REGISTRY=my/private/registry VERSION=510.73.06 OS_TAG=ubuntu22.04 CUDA_VERSION=12.2.0\n
                                          6. Build the NVIDIA vGPU Manager Image

                                            docker build \\\n  --build-arg DRIVER_VERSION=${VERSION} \\\n  --build-arg CUDA_VERSION=${CUDA_VERSION} \\\n  -t ${PRIVATE_REGISTRY}/vgpu-manager:${VERSION}-${OS_TAG} .\n
                                          7. Push the NVIDIA vGPU Manager image to your container registry

                                            docker push ${PRIVATE_REGISTRY}/vgpu-manager:${VERSION}-${OS_TAG}\n
                                          "},{"location":"en/admin/virtnest/vm/vm-gpu.html#label-cluster-nodes","title":"Label Cluster Nodes","text":"

                                          Go to Container Management , select your worker cluster and click Nodes. On the right of the list, click \u2507 and select Edit Labels to add labels to the nodes. Each node can only have one label.

                                          You can assign the following values to the labels: container, vm-passthrough, and vm-vgpu.

                                          "},{"location":"en/admin/virtnest/vm/vm-gpu.html#install-nvidia-operator","title":"Install Nvidia Operator","text":"
                                          1. Go to Container Management , select your worker cluster, click Helm Apps -> Helm Charts , choose and install gpu-operator. You need to modify some fields in the yaml.

                                            gpu-operator.sandboxWorkloads.enabled=true\ngpu-operator.vgpuManager.enabled=true\ngpu-operator.vgpuManager.repository=<your-register-url>      # (1)!\ngpu-operator.vgpuManager.image=vgpu-manager\ngpu-operator.vgpuManager.version=<your-vgpu-manager-version> # (2)!\ngpu-operator.vgpuDeviceManager.enabled=true\n
                                            1. Fill in the container registry address refered in the step \"Build vGPU Manager Image\".
                                            2. Fill in the VERSION refered in the step \"Build vGPU Manager Image\".
                                          2. Wait for the installation to be successful, as shown in the image below:

                                          "},{"location":"en/admin/virtnest/vm/vm-gpu.html#install-virtnest-agent-and-configure-cr","title":"Install virtnest-agent and Configure CR","text":"
                                          1. Install virtnest-agent, refer to Install virtnest-agent.

                                          2. Add vGPU and GPU direct pass-through to the Virtnest Kubevirt CR. The following example shows the key yaml after adding vGPU and GPU direct pass-through:

                                            spec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    # Fill in the information below\n    permittedHostDevices:\n      mediatedDevices:            # vGPU\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com /GRID_P4-1Q\n    pciHostDevices:             # GPU direct pass-through\n    - externalResourceProvider:  true\n      pciVendorSelector: 10DE:1BB3\n      resourceName: nvidia.com /GP104GL_TESLA_P4\n
                                          3. In the kubevirt CR yaml, permittedHostDevices is used to import VM devices, and vGPU should be added in mediatedDevices with the following structure:

                                            mediatedDevices:          \n- mdevNameSelector: GRID P4-1Q          # Device Name\n  resourceName: nvidia.com/GRID_P4-1Q   # vGPU information registered by GPU Operator to the node\n
                                          4. GPU direct pass-through should be added in pciHostDevices under permittedHostDevices with the following structure:

                                            pciHostDevices:           \n- externalResourceProvider: true           # Do not change by default\n  pciVendorSelector: 10DE:1BB3              # Vendor id of the current pci device\n  resourceName: nvidia.com/GP104GL_TESLA_P4 # GPU information registered by GPU Operator to the node\n
                                          5. Example of obtaining vGPU information (only applicable to vGPU): View node information on a node marked as nvidia.com/gpu.workload.config=vm-vgpu (e.g., work-node-2), and the nvidia.com/GRID_P4-1Q: 8 in Capacity indicates available vGPUs:

                                            # kubectl describe node work-node-2\nCapacity:\n  cpu:                                 64\n  devices.kubevirt.io/kvm:             1k\n  devices.kubevirt.io/tun:             1k\n  devices.kubevirt.io/vhost-net:       1k\n  ephemeral-storage:                   102626232Ki\n  hugepages-1Gi:                       0\n  hugepages-2Mi:                       0\n  memory:                              264010840Ki\n  nvidia.com/GRID_P4-1Q :              8\n  pods:                                110\nAllocatable:\n  cpu:                                  64\n  devices.kubevirt.io/kvm:              1k\n  devices.kubevirt.io/tun:              1k\n  devices.kubevirt.io/vhost-net:        1k\n  ephemeral-storage:                    94580335255\n  hugepages-1Gi:                        0\n  hugepages-2Mi:                        0\n  memory:                               263908440Ki\n  nvidia.com/GRID_P4-1Q:                8\n  pods:                                 110\n

                                            So the mdevNameSelector should be \"GRID P4-1Q\" and the resourceName should be \"GRID_P4-1Q\".

                                          6. Obtain GPU direct pass-through information: On a node marked as nvidia.com/gpu.workload.config=vm-passthrough (e.g., work-node-1), view the node information, and nvidia.com/GP104GL_TESLA_P4: 2 in Capacity indicates available vGPUs:

                                            # kubectl describe node work-node-1\nCapacity:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              102626232Ki\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         264010840Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\nAllocatable:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              94580335255\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         263908440Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\n

                                            So the resourceName should be \"GRID_P4-1Q\". How to obtain the pciVendorSelector? SSH into the target node work-node-1 and use the command \"lspci -nnk -d 10de:\" to get the Nvidia GPU PCI information, as shown in the image above.

                                          7. Editing kubevirt CR note: If there are multiple GPUs of the same model, only one needs to be written in the CR, listing each GPU is not necessary.

                                            # kubectl -n virtnest-system edit kubevirt kubevirt\nspec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    # Fill in the information below\n    permittedHostDevices:\n      mediatedDevices:                    # vGPU\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com/GRID_P4-1Q\n    pciHostDevices:                       # GPU direct pass-through, in the above example, TEESLA P4 has two GPUs, only register one here\n      - externalResourceProvider: true\n        pciVendorSelector: 10DE:1BB3\n        resourceName: nvidia.com/GP104GL_TESLA_P4 \n
                                          "},{"location":"en/admin/virtnest/vm/vm-gpu.html#create-vm-yaml-and-use-gpu-acceleration","title":"Create VM YAML and Use GPU Acceleration","text":"

                                          The only difference from a regular virtual machine is adding GPU-related information in the devices section.

                                          Click to view complete YAML
                                          apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  name: testvm-gpu1\n  namespace: default\nspec:\n  dataVolumeTemplates:\n  - metadata:\n      creationTimestamp: null\n      name: systemdisk-testvm-gpu1\n      namespace: default\n    spec:\n      pvc:\n        accessModes:\n        - ReadWriteOnce\n        resources:\n          requests:\n            storage: 10Gi\n        storageClassName: www\n      source:\n        registry:\n          url: docker://release-ci.daocloud.io/virtnest/system-images/debian-12-x86_64:v1\nrunStrategy: Manual\ntemplate:\n    metadata:\n      creationTimestamp: null\n    spec:\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n          - bootOrder: 1\n            disk:\n              bus: virtio\n            name: systemdisk-testvm-gpu1\n          - disk:\n              bus: virtio\n            name: cloudinitdisk\n        gpus:\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n            name: gpu-0-0\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n          name: gpu-0-1\n        interfaces:\n        - masquerade: {}\n          name: default\n      machine:\n        type: q35\n      resources:\n        requests:\n          memory: 2Gi\n    networks:\n    - name: default\n      pod: {}\n    volumes:\n    - dataVolume:\n        name: systemdisk-testvm-gpu1\n      name: systemdisk-testvm-gpu1\n    - cloudInitNoCloud:\n        userDataBase64: I2Nsb3VkLWNvbmZpZwpzc2hfcHdhdXRoOiB0cnVlCmRpc2FibGVfcm9vdDogZmFsc2UKY2hwYXNzd2Q6IHsibGlzdCI6ICJyb290OmRhbmdlcm91cyIsIGV4cGlyZTogRmFsc2V9CgoKcnVuY21kOgogIC0gc2VkIC1pICIvI1w/UGVybWl0Um9vdExvZ2luL3MvXi4qJC9QZXJtaXRSb290TG9naW4geWVzL2ciIC9ldGMvc3NoL3NzaGRfY29uZmlnCiAgLSBzeXN0ZW1jdGwgcmVzdGFydCBzc2guc2VydmljZQ==\n        name: cloudinitdisk\n
                                          "},{"location":"en/end-user/index.html","title":"Suanova AI Platform - End User","text":"

                                          This is the user documentation for the Suanova AI Platform aimed at end users.

                                          • User Registration

                                            User registration is the first step to using the AI platform.

                                            • User Registration
                                          • Cloud Host

                                            A cloud host is a virtual machine deployed in the cloud.

                                            • Create Cloud Host
                                            • Use Cloud Host
                                          • Container Management

                                            Container management is the core module of the AI computing center.

                                            • K8s Clusters on Cloud
                                            • Node Management
                                            • Workloads
                                            • Helm Apps and Templates
                                          • AI Lab

                                            Manage datasets and run AI training and inference jobs.

                                            • Create AI Workloads
                                            • Use Notebook
                                            • Create Training Jobs
                                            • Create Inference Services
                                          • Insight

                                            Monitor the status of clusters, nodes, and workloads through dashboards.

                                            • Monitor Clusters/Nodes
                                            • Metrics
                                            • Logs
                                            • Tracing
                                          • Personal Center

                                            Set password, keys, and language in the personal center.

                                            • Security Settings
                                            • Access Keys
                                            • Language Settings
                                          "},{"location":"en/end-user/baize/dataset/create-use-delete.html","title":"Create, Use and Delete Datasets","text":"

                                          AI Lab provides comprehensive dataset management functions needed for model development, training, and inference processes. Currently, it supports unified access to various data sources.

                                          With simple configurations, you can connect data sources to AI Lab, achieving unified data management, preloading, dataset management, and other functionalities.

                                          "},{"location":"en/end-user/baize/dataset/create-use-delete.html#create-a-dataset","title":"Create a Dataset","text":"
                                          1. In the left navigation bar, click Data Management -> Dataset List, and then click the Create button on the right.

                                          2. Select the worker cluster and namespace to which the dataset belongs, then click Next.

                                          3. Configure the data source type for the target data, then click OK.

                                            Currently supported data sources include:

                                            • GIT: Supports repositories such as GitHub, GitLab, and Gitee
                                            • S3: Supports object storage like Amazon Cloud
                                            • HTTP: Directly input a valid HTTP URL
                                            • PVC: Supports pre-created Kubernetes PersistentVolumeClaim
                                            • NFS: Supports NFS shared storage
                                          4. Upon successful creation, the dataset will be returned to the dataset list. You can perform more actions by clicking \u2507 on the right.

                                          Info

                                          The system will automatically perform a one-time data preloading after the dataset is successfully created; the dataset cannot be used until the preloading is complete.

                                          "},{"location":"en/end-user/baize/dataset/create-use-delete.html#use-a-dataset","title":"Use a Dataset","text":"

                                          Once the dataset is successfully created, it can be used in tasks such as model training and inference.

                                          "},{"location":"en/end-user/baize/dataset/create-use-delete.html#use-in-notebook","title":"Use in Notebook","text":"

                                          In creating a Notebook, you can directly use the dataset; the usage is as follows:

                                          • Use the dataset as training data mount
                                          • Use the dataset as code mount

                                          "},{"location":"en/end-user/baize/dataset/create-use-delete.html#use-in-training-obs","title":"Use in Training obs","text":"
                                          • Use the dataset to specify job output
                                          • Use the dataset to specify job input
                                          • Use the dataset to specify TensorBoard output
                                          "},{"location":"en/end-user/baize/dataset/create-use-delete.html#use-in-inference-services","title":"Use in Inference Services","text":"
                                          • Use the dataset to mount a model
                                          "},{"location":"en/end-user/baize/dataset/create-use-delete.html#delete-a-dataset","title":"Delete a Dataset","text":"

                                          If you find a dataset to be redundant, expired, or no longer needed, you can delete it from the dataset list.

                                          1. Click the \u2507 on the right side of the dataset list, then choose Delete from the dropdown menu.

                                          2. In the pop-up window, confirm the dataset you want to delete, enter the dataset name, and then click Delete.

                                          3. A confirmation message will appear indicating successful deletion, and the dataset will disappear from the list.

                                          Caution

                                          Once a dataset is deleted, it cannot be recovered, so please proceed with caution.

                                          "},{"location":"en/end-user/baize/dataset/environments.html","title":"Manage Python Environment Dependencies","text":"

                                          This document aims to guide users on managing environment dependencies using AI platform. Below are the specific steps and considerations.

                                          1. Overview of Environment Management
                                          2. Create New Environment
                                          3. Configure Environment
                                          4. Troubleshooting
                                          "},{"location":"en/end-user/baize/dataset/environments.html#overview","title":"Overview","text":"

                                          Traditionally, Python environment dependencies are built into an image, which includes the Python version and dependency packages. This approach has high maintenance costs and is inconvenient to update, often requiring a complete rebuild of the image.

                                          In AI Lab, users can manage pure environment dependencies through the Environment Management module, decoupling this part from the image. The advantages include:

                                          • One environment can be used in multiple places, such as in Notebooks, distributed training tasks, and even inference services.
                                          • Updating dependency packages is more convenient; you only need to update the environment dependencies without rebuilding the image.

                                          The main components of the environment management are:

                                          • Cluster : Select the cluster to operate on.
                                          • Namespace : Select the namespace to limit the scope of operations.
                                          • Environment List : Displays all environments and their statuses under the current cluster and namespace.

                                          "},{"location":"en/end-user/baize/dataset/environments.html#explanation-of-environment-list-fields","title":"Explanation of Environment List Fields","text":"
                                          • Name : The name of the environment.
                                          • Status : The current status of the environment (normal or failed). New environments undergo a warming-up process, after which they can be used in other tasks.
                                          • Creation Time : The time the environment was created.
                                          "},{"location":"en/end-user/baize/dataset/environments.html#creat-new-environment","title":"Creat New Environment","text":"

                                          On the Environment Management interface, click the Create button at the top right to enter the environment creation process.

                                          Fill in the following basic information:

                                          • Name : Enter the environment name, with a length of 2-63 characters, starting and ending with lowercase letters or numbers.
                                          • Deployment Location:
                                            • Cluster : Select the cluster to deploy, such as gpu-cluster.
                                            • Namespace : Select the namespace, such as default.
                                          • Remarks (optional): Enter remarks.
                                          • Labels (optional): Add labels to the environment.
                                          • Annotations (optional): Add annotations to the environment. After completing the information, click Next to proceed to environment configuration.
                                          "},{"location":"en/end-user/baize/dataset/environments.html#configure-environment","title":"Configure Environment","text":"

                                          In the environment configuration step, users need to configure the Python version and dependency management tool.

                                          "},{"location":"en/end-user/baize/dataset/environments.html#environment-settings","title":"Environment Settings","text":"
                                          • Python Version : Select the required Python version, such as 3.12.3.
                                          • Package Manager : Choose the package management tool, either PIP or CONDA.
                                          • Environment Data :
                                            • If PIP is selected: Enter the dependency package list in requirements.txt format in the editor below.
                                            • If CONDA is selected: Enter the dependency package list in environment.yaml format in the editor below.
                                          • Other Options (optional):
                                            • Additional pip Index URLs : Configure additional pip index URLs; suitable for internal enterprise private repositories or PIP acceleration sites.
                                            • GPU Configuration : Enable or disable GPU configuration; some GPU-related dependency packages need GPU resources configured during preloading.
                                            • Associated Storage : Select the associated storage configuration; environment dependency packages will be stored in the associated storage. Note: Storage must support ReadWriteMany.

                                          After configuration, click the Create button, and the system will automatically create and configure the new Python environment.

                                          "},{"location":"en/end-user/baize/dataset/environments.html#troubleshooting","title":"Troubleshooting","text":"
                                          • If environment creation fails:

                                            • Check if the network connection is normal.
                                            • Verify that the Python version and package manager configuration are correct.
                                            • Ensure the selected cluster and namespace are available.
                                          • If dependency preloading fails:

                                            • Check if the requirements.txt or environment.yaml file format is correct.
                                            • Verify that the dependency package names and versions are correct. If other issues arise, contact the platform administrator or refer to the platform help documentation for more support.

                                          These are the basic steps and considerations for managing Python dependencies in AI Lab.

                                          "},{"location":"en/end-user/baize/inference/models.html","title":"Model Support","text":"

                                          With the rapid iteration of AI Lab, we have now supported various model inference services. Here, you can see information about the supported models.

                                          • AI Lab v0.3.0 launched model inference services, facilitating users to directly use the inference services of AI Lab without worrying about model deployment and maintenance for traditional deep learning models.
                                          • AI Lab v0.6.0 supports the complete version of vLLM inference capabilities, supporting many large language models such as LLama, Qwen, ChatGLM, and more.

                                          Note

                                          The support for inference capabilities is related to the version of AI Lab.

                                          You can use GPU types that have been verified by AI platform in AI Lab. For more details, refer to the GPU Support Matrix.

                                          "},{"location":"en/end-user/baize/inference/models.html#triton-inference-server","title":"Triton Inference Server","text":"

                                          Through the Triton Inference Server, traditional deep learning models can be well supported. Currently, AI Lab supports mainstream inference backend services:

                                          Backend Supported Model Formats Description pytorch TorchScript, PyTorch 2.0 formats triton-inference-server/pytorch_backend tensorflow TensorFlow 2.x triton-inference-server/tensorflow_backend vLLM (Deprecated) TensorFlow 2.x triton-inference-server/tensorflow_backend

                                          Danger

                                          The use of Triton's Backend vLLM method has been deprecated. It is recommended to use the latest support for vLLM to deploy your large language models.

                                          "},{"location":"en/end-user/baize/inference/models.html#vllm","title":"vLLM","text":"

                                          With vLLM, we can quickly use large language models. Here, you can see the list of models we support, which generally aligns with the vLLM Support Models.

                                          • HuggingFace Models: We support most of HuggingFace's models. You can see more models at the HuggingFace Model Hub.
                                          • The vLLM Supported Models list includes supported large language models and vision-language models.
                                          • Models fine-tuned using the vLLM support framework.
                                          "},{"location":"en/end-user/baize/inference/models.html#new-features-of-vllm","title":"New Features of vLLM","text":"

                                          Currently, AI Lab also supports some new features when using vLLM as an inference tool:

                                          • Enable Lora Adapter to optimize model inference services during inference.
                                          • Provide a compatible OpenAPI interface with OpenAI, making it easy for users to switch to local inference services at a low cost and quickly transition.
                                          "},{"location":"en/end-user/baize/inference/triton-inference.html","title":"Create Inference Service Using Triton Framework","text":"

                                          The AI Lab currently offers Triton and vLLM as inference frameworks. Users can quickly start a high-performance inference service with simple configurations.

                                          Danger

                                          The use of Triton's Backend vLLM method has been deprecated. It is recommended to use the latest support for vLLM to deploy your large language models.

                                          "},{"location":"en/end-user/baize/inference/triton-inference.html#introduction-to-triton","title":"Introduction to Triton","text":"

                                          Triton is an open-source inference server developed by NVIDIA, designed to simplify the deployment and inference of machine learning models. It supports a variety of deep learning frameworks, including TensorFlow and PyTorch, enabling users to easily manage and deploy different types of models.

                                          "},{"location":"en/end-user/baize/inference/triton-inference.html#prerequisites","title":"Prerequisites","text":"

                                          Prepare model data: Manage the model code in dataset management and ensure that the data is successfully preloaded. The following example illustrates the PyTorch model for mnist handwritten digit recognition.

                                          Note

                                          The model to be inferred must adhere to the following directory structure within the dataset:

                                            <model-repository-name>\n  \u2514\u2500\u2500 <model-name>\n     \u2514\u2500\u2500 <version>\n        \u2514\u2500\u2500 <model-definition-file>\n

                                          The directory structure in this example is as follows:

                                              model-repo\n    \u2514\u2500\u2500 mnist-cnn\n        \u2514\u2500\u2500 1\n            \u2514\u2500\u2500 model.pt\n
                                          "},{"location":"en/end-user/baize/inference/triton-inference.html#create-inference-service","title":"Create Inference Service","text":"

                                          Currently, form-based creation is supported, allowing you to create services with field prompts in the interface.

                                          "},{"location":"en/end-user/baize/inference/triton-inference.html#configure-model-path","title":"Configure Model Path","text":"

                                          The model path model-repo/mnist-cnn/1/model.pt must be consistent with the directory structure of the dataset.

                                          "},{"location":"en/end-user/baize/inference/triton-inference.html#model-configuration","title":"Model Configuration","text":""},{"location":"en/end-user/baize/inference/triton-inference.html#configure-input-and-output-parameters","title":"Configure Input and Output Parameters","text":"

                                          Note

                                          The first dimension of the input and output parameters defaults to batchsize, setting it to -1 allows for the automatic calculation of the batchsize based on the input inference data. The remaining dimensions and data type must match the model's input.

                                          "},{"location":"en/end-user/baize/inference/triton-inference.html#configure-environment","title":"Configure Environment","text":"

                                          You can import the environment created in Manage Python Environment Dependencies to serve as the runtime environment for inference.

                                          "},{"location":"en/end-user/baize/inference/triton-inference.html#advanced-settings","title":"Advanced Settings","text":""},{"location":"en/end-user/baize/inference/triton-inference.html#configure-authentication-policy","title":"Configure Authentication Policy","text":"

                                          Supports API key-based request authentication. Users can customize and add authentication parameters.

                                          "},{"location":"en/end-user/baize/inference/triton-inference.html#affinity-scheduling","title":"Affinity Scheduling","text":"

                                          Supports automated affinity scheduling based on GPU resources and other node configurations. It also allows users to customize scheduling policies.

                                          "},{"location":"en/end-user/baize/inference/triton-inference.html#access","title":"Access","text":""},{"location":"en/end-user/baize/inference/triton-inference.html#api-access","title":"API Access","text":"
                                          • Triton provides a REST-based API, allowing clients to perform model inference via HTTP POST requests.
                                          • Clients can send requests with JSON-formatted bodies containing input data and related metadata.
                                          "},{"location":"en/end-user/baize/inference/triton-inference.html#http-access","title":"HTTP Access","text":"
                                          1. Send HTTP POST Request: Use tools like curl or HTTP client libraries (e.g., Python's requests library) to send POST requests to the Triton Server.

                                          2. Set HTTP Headers: Configuration generated automatically based on user settings, include metadata about the model inputs and outputs in the HTTP headers.

                                          3. Construct Request Body: The request body usually contains the input data for inference and model-specific metadata.

                                          "},{"location":"en/end-user/baize/inference/triton-inference.html#example-curl-command","title":"Example curl Command","text":"
                                            curl -X POST \"http://<ip>:<port>/v2/models/<inference-name>/infer\" \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\n    \"inputs\": [\n      {\n        \"name\": \"model_input\",            \n        \"shape\": [1, 1, 32, 32],          \n        \"datatype\": \"FP32\",               \n        \"data\": [\n          [0.1234, 0.5678, 0.9101, ... ]  \n        ]\n      }\n    ]\n  }'\n
                                          • <ip> is the host address where the Triton Inference Server is running.
                                          • <port> is the port where the Triton Inference Server is running.
                                          • <inference-name> is the name of the inference service that has been created.
                                          • \"name\" must match the name of the input parameter in the model configuration.
                                          • \"shape\" must match the dims of the input parameter in the model configuration.
                                          • \"datatype\" must match the Data Type of the input parameter in the model configuration.
                                          • \"data\" should be replaced with the actual inference data.

                                          Please note that the above example code needs to be adjusted according to your specific model and environment. The format and content of the input data must also comply with the model's requirements.

                                          "},{"location":"en/end-user/baize/inference/vllm-inference.html","title":"Create Inference Service Using vLLM Framework","text":"

                                          AI Lab supports using vLLM as an inference service, offering all the capabilities of vLLM while fully adapting to the OpenAI interface definition.

                                          "},{"location":"en/end-user/baize/inference/vllm-inference.html#introduction-to-vllm","title":"Introduction to vLLM","text":"

                                          vLLM is a fast and easy-to-use library for inference and services. It aims to significantly improve the throughput and memory efficiency of language model services in real-time scenarios. vLLM boasts several features in terms of speed and flexibility:

                                          • Continuous batching of incoming requests.
                                          • Efficiently manages attention keys and values memory using PagedAttention.
                                          • Seamless integration with popular HuggingFace models.
                                          • Compatible with OpenAI's API server.
                                          "},{"location":"en/end-user/baize/inference/vllm-inference.html#prerequisites","title":"Prerequisites","text":"

                                          Prepare model data: Manage the model code in dataset management and ensure that the data is successfully preloaded.

                                          "},{"location":"en/end-user/baize/inference/vllm-inference.html#create-inference-service","title":"Create Inference Service","text":"
                                          1. Select the vLLM inference framework. In the model module selection, choose the pre-created model dataset hdd-models and fill in the path information where the model is located within the dataset.

                                            This guide uses the ChatGLM3 model for creating the inference service.

                                          2. Configure the resources for the inference service and adjust the parameters for running the inference service.

                                            Parameter Name Description GPU Resources Configure GPU resources for inference based on the model scale and cluster resources. Allow Remote Code Controls whether vLLM trusts and executes code from remote sources. LoRA LoRA is a parameter-efficient fine-tuning technique for deep learning models. It reduces the number of parameters and computational complexity by decomposing the original model parameter matrix into low-rank matrices. 1. --lora-modules: Specifies specific modules or layers for low-rank approximation. 2. max_loras_rank: Specifies the maximum rank for each adapter layer in the LoRA model. For simpler tasks, a smaller rank value can be chosen, while more complex tasks may require a larger rank value to ensure model performance. 3. max_loras: Indicates the maximum number of LoRA layers that can be included in the model, customized based on model size and inference complexity. 4. max_cpu_loras: Specifies the maximum number of LoRA layers that can be handled in a CPU environment. Associated Environment Selects predefined environment dependencies required for inference.

                                            Info

                                            For models that support LoRA parameters, refer to vLLM Supported Models.

                                          3. In the Advanced Configuration , support is provided for automated affinity scheduling based on GPU resources and other node configurations. Users can also customize scheduling policies.

                                          "},{"location":"en/end-user/baize/inference/vllm-inference.html#verify-inference-service","title":"Verify Inference Service","text":"

                                          Once the inference service is created, click the name of the inference service to enter the details and view the API call methods. Verify the execution results using Curl, Python, and Node.js.

                                          Copy the curl command from the details and execute it in the terminal to send a model inference request. The expected output should be:

                                          "},{"location":"en/end-user/baize/jobs/create.html","title":"Create Job","text":"

                                          Job management refers to the functionality of creating and managing job lifecycles through job scheduling and control components.

                                          AI platform Smart Computing Capability adopts Kubernetes' Job mechanism to schedule various AI inference and training jobs.

                                          1. Click Job Center -> Jobs in the left navigation bar to enter the job list. Click the Create button on the right.

                                          2. The system will pre-fill basic configuration data, including the cluster, namespace, type, queue, and priority. Adjust these parameters and click Next.

                                          3. Configure the URL, runtime parameters, and associated datasets, then click Next.

                                          4. Optionally add labels, annotations, runtime env variables, and other job parameters. Select a scheduling policy and click Confirm.

                                          5. After the job is successfully created, it will have several running statuses:

                                            • Running
                                            • Queued
                                            • Submission successful, Submission failed
                                            • Successful, Failed
                                          "},{"location":"en/end-user/baize/jobs/create.html#next-steps","title":"Next Steps","text":"
                                          • View Job Load
                                          • Delete Job
                                          "},{"location":"en/end-user/baize/jobs/delete.html","title":"Delete Job","text":"

                                          If you find a job to be redundant, expired, or no longer needed for any other reason, you can delete it from the job list.

                                          1. Click the \u2507 on the right side of the job in the job list, then choose Delete from the dropdown menu.

                                          2. In the pop-up window, confirm the job you want to delete, enter the job name, and then click Delete.

                                          3. A confirmation message will appear indicating successful deletion, and the job will disappear from the list.

                                          Caution

                                          Once a job is deleted, it cannot be recovered, so please proceed with caution.

                                          "},{"location":"en/end-user/baize/jobs/pytorch.html","title":"Pytorch Jobs","text":"

                                          Pytorch is an open-source deep learning framework that provides a flexible environment for training and deployment. A Pytorch job is a job that uses the Pytorch framework.

                                          In the AI Lab platform, we provide support and adaptation for Pytorch jobs. Through a graphical interface, you can quickly create Pytorch jobs and perform model training.

                                          "},{"location":"en/end-user/baize/jobs/pytorch.html#job-configuration","title":"Job Configuration","text":"
                                          • Job types support both Pytorch Single and Pytorch Distributed modes.
                                          • The runtime image already supports the Pytorch framework by default, so no additional installation is required.
                                          "},{"location":"en/end-user/baize/jobs/pytorch.html#job-runtime-environment","title":"Job Runtime Environment","text":"

                                          Here we use the baize-notebook base image and the associated environment as the basic runtime environment for the job.

                                          To learn how to create an environment, refer to Environments.

                                          "},{"location":"en/end-user/baize/jobs/pytorch.html#create-jobs","title":"Create Jobs","text":""},{"location":"en/end-user/baize/jobs/pytorch.html#pytorch-single-jobs","title":"Pytorch Single Jobs","text":"
                                          1. Log in to the AI Lab platform, click Job Center in the left navigation bar to enter the Jobs page.
                                          2. Click the Create button in the upper right corner to enter the job creation page.
                                          3. Select the job type as Pytorch Single and click Next .
                                          4. Fill in the job name and description, then click OK .
                                          "},{"location":"en/end-user/baize/jobs/pytorch.html#parameters","title":"Parameters","text":"
                                          • Start command: bash
                                          • Command parameters:
                                          import torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n# Define a simple neural network\nclass SimpleNet(nn.Module):\n    def __init__(self):\n        super(SimpleNet, self).__init__()\n        self.fc = nn.Linear(10, 1)\n\n    def forward(self, x):\n        return self.fc(x)\n\n# Create model, loss function, and optimizer\nmodel = SimpleNet()\ncriterion = nn.MSELoss()\noptimizer = optim.SGD(model.parameters(), lr=0.01)\n\n# Generate some random data\nx = torch.randn(100, 10)\ny = torch.randn(100, 1)\n\n# Train the model\nfor epoch in range(100):\n    # Forward pass\n    outputs = model(x)\n    loss = criterion(outputs, y)\n\n    # Backward pass and optimization\n    optimizer.zero_grad()\n    loss.backward()\n    optimizer.step()\n\n    if (epoch + 1) % 10 == 0:\n        print(f'Epoch [{epoch+1}/100], Loss: {loss.item():.4f}')\n\nprint('Training finished.')\n
                                          "},{"location":"en/end-user/baize/jobs/pytorch.html#results","title":"Results","text":"

                                          Once the job is successfully submitted, we can enter the job details to see the resource usage. From the upper right corner, go to Workload Details to view the log output during the training process.

                                          [HAMI-core Warn(1:140244541377408:utils.c:183)]: get default cuda from (null)\n[HAMI-core Msg(1:140244541377408:libvgpu.c:855)]: Initialized\nEpoch [10/100], Loss: 1.1248\nEpoch [20/100], Loss: 1.0486\nEpoch [30/100], Loss: 0.9969\nEpoch [40/100], Loss: 0.9611\nEpoch [50/100], Loss: 0.9360\nEpoch [60/100], Loss: 0.9182\nEpoch [70/100], Loss: 0.9053\nEpoch [80/100], Loss: 0.8960\nEpoch [90/100], Loss: 0.8891\nEpoch [100/100], Loss: 0.8841\nTraining finished.\n[HAMI-core Msg(1:140244541377408:multiprocess_memory_limit.c:468)]: Calling exit handler 1\n
                                          "},{"location":"en/end-user/baize/jobs/pytorch.html#pytorch-distributed-jobs","title":"Pytorch Distributed Jobs","text":"
                                          1. Log in to the AI Lab platform, click Job Center in the left navigation bar to enter the Jobs page.
                                          2. Click the Create button in the upper right corner to enter the job creation page.
                                          3. Select the job type as Pytorch Distributed and click Next.
                                          4. Fill in the job name and description, then click OK.
                                          "},{"location":"en/end-user/baize/jobs/pytorch.html#parameters_1","title":"Parameters","text":"
                                          • Start command: bash
                                          • Command parameters:
                                          import os\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nclass SimpleModel(nn.Module):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = nn.Linear(10, 1)\n\n    def forward(self, x):\n        return self.fc(x)\n\ndef train():\n    # Print environment information\n    print(f'PyTorch version: {torch.__version__}')\n    print(f'CUDA available: {torch.cuda.is_available()}')\n    if torch.cuda.is_available():\n        print(f'CUDA version: {torch.version.cuda}')\n        print(f'CUDA device count: {torch.cuda.device_count()}')\n\n    rank = int(os.environ.get('RANK', '0'))\n    world_size = int(os.environ.get('WORLD_SIZE', '1'))\n\n    print(f'Rank: {rank}, World Size: {world_size}')\n\n    # Initialize distributed environment\n    try:\n        if world_size > 1:\n            dist.init_process_group('nccl')\n            print('Distributed process group initialized successfully')\n        else:\n            print('Running in non-distributed mode')\n    except Exception as e:\n        print(f'Error initializing process group: {e}')\n        return\n\n    # Set device\n    try:\n        if torch.cuda.is_available():\n            device = torch.device(f'cuda:{rank % torch.cuda.device_count()}')\n            print(f'Using CUDA device: {device}')\n        else:\n            device = torch.device('cpu')\n            print('CUDA not available, using CPU')\n    except Exception as e:\n        print(f'Error setting device: {e}')\n        device = torch.device('cpu')\n        print('Falling back to CPU')\n\n    try:\n        model = SimpleModel().to(device)\n        print('Model moved to device successfully')\n    except Exception as e:\n        print(f'Error moving model to device: {e}')\n        return\n\n    try:\n        if world_size > 1:\n            ddp_model = DDP(model, device_ids=[rank % torch.cuda.device_count()] if torch.cuda.is_available() else None)\n            print('DDP model created successfully')\n        else:\n            ddp_model = model\n            print('Using non-distributed model')\n    except Exception as e:\n        print(f'Error creating DDP model: {e}')\n        return\n\n    loss_fn = nn.MSELoss()\n    optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)\n\n    # Generate some random data\n    try:\n        data = torch.randn(100, 10, device=device)\n        labels = torch.randn(100, 1, device=device)\n        print('Data generated and moved to device successfully')\n    except Exception as e:\n        print(f'Error generating or moving data to device: {e}')\n        return\n\n    for epoch in range(10):\n        try:\n            ddp_model.train()\n            outputs = ddp_model(data)\n            loss = loss_fn(outputs, labels)\n            optimizer.zero_grad()\n            loss.backward()\n            optimizer.step()\n\n            if rank == 0:\n                print(f'Epoch {epoch}, Loss: {loss.item():.4f}')\n        except Exception as e:\n            print(f'Error during training epoch {epoch}: {e}')\n            break\n\n    if world_size > 1:\n        dist.destroy_process_group()\n\nif __name__ == '__main__':\n    train()\n
                                          "},{"location":"en/end-user/baize/jobs/pytorch.html#number-of-job-replicas","title":"Number of Job Replicas","text":"

                                          Note that Pytorch Distributed training jobs will create a group of Master and Worker training Pods, where the Master is responsible for coordinating the training job, and the Worker is responsible for the actual training work.

                                          Note

                                          In this demonstration: Master replica count is 1, Worker replica count is 2; Therefore, we need to set the replica count to 3 in the Job Configuration , which is the sum of Master and Worker replica counts. Pytorch will automatically tune the roles of Master and Worker.

                                          "},{"location":"en/end-user/baize/jobs/pytorch.html#results_1","title":"Results","text":"

                                          Similarly, we can enter the job details to view the resource usage and the log output of each Pod.

                                          "},{"location":"en/end-user/baize/jobs/tensorboard.html","title":"Job Analysis","text":"

                                          AI Lab provides important visualization analysis tools provided for the model development process, used to display the training process and results of machine learning models. This document will introduce the basic concepts of Job Analysis (Tensorboard), its usage in the AI Lab system, and how to configure the log content of datasets.

                                          Note

                                          Tensorboard is a visualization tool provided by TensorFlow, used to display the training process and results of machine learning models. It can help developers more intuitively understand the training dynamics of their models, analyze model performance, debug issues, and more.

                                          The role and advantages of Tensorboard in the model development process:

                                          • Visualize Training Process : Display metrics such as training and validation loss, and accuracy through charts, helping developers intuitively observe the training effects of the model.
                                          • Debug and Optimize Models : By viewing the weights and gradient distributions of different layers, help developers discover and fix issues in the model.
                                          • Compare Different Experiments : Simultaneously display the results of multiple experiments, making it convenient for developers to compare the effects of different models and hyperparameter configurations.
                                          • Track Training Data : Record the datasets and parameters used during training to ensure the reproducibility of experiments.
                                          "},{"location":"en/end-user/baize/jobs/tensorboard.html#how-to-create-tensorboard","title":"How to Create Tensorboard","text":"

                                          In the AI Lab system, we provide a convenient way to create and manage Tensorboard. Here are the specific steps:

                                          "},{"location":"en/end-user/baize/jobs/tensorboard.html#enable-tensorboard-when-creating-a-notebook","title":"Enable Tensorboard When Creating a Notebook","text":"
                                          1. Create a Notebook : Create a new Notebook on the AI Lab platform.
                                          2. Enable Tensorboard : On the Notebook creation page, enable the Tensorboard option and specify the dataset and log path.

                                          "},{"location":"en/end-user/baize/jobs/tensorboard.html#enable-tensorboard-after-creating-and-completing-a-distributed-job","title":"Enable Tensorboard After Creating and Completing a Distributed Job","text":"
                                          1. Create a Distributed Job : Create a new distributed training job on the AI Lab platform.
                                          2. Configure Tensorboard : On the job configuration page, enable the Tensorboard option and specify the dataset and log path.
                                          3. View Tensorboard After Job Completion : After the job is completed, you can view the Tensorboard link on the job details page. Click the link to see the visualized results of the training process.

                                          "},{"location":"en/end-user/baize/jobs/tensorboard.html#directly-reference-tensorboard-in-a-notebook","title":"Directly Reference Tensorboard in a Notebook","text":"

                                          In a Notebook, you can directly start Tensorboard through code. Here is a sample code snippet:

                                          # Import necessary libraries\nimport tensorflow as tf\nimport datetime\n\n# Define log directory\nlog_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n# Create Tensorboard callback\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n\n# Build and compile model\nmodel = tf.keras.models.Sequential([\n    tf.keras.layers.Flatten(input_shape=(28, 28)),\n    tf.keras.layers.Dense(512, activation='relu'),\n    tf.keras.layers.Dropout(0.2),\n    tf.keras.layers.Dense(10, activation='softmax')\n])\n\nmodel.compile(optimizer='adam',\n              loss='sparse_categorical_crossentropy',\n              metrics=['accuracy'])\n\n# Train model and enable Tensorboard callback\nmodel.fit(x_train, y_train, epochs=5, validation_data=(x_test, y_test), callbacks=[tensorboard_callback])\n
                                          "},{"location":"en/end-user/baize/jobs/tensorboard.html#how-to-configure-dataset-log-content","title":"How to Configure Dataset Log Content","text":"

                                          When using Tensorboard, you can record and configure different datasets and log content. Here are some common configuration methods:

                                          "},{"location":"en/end-user/baize/jobs/tensorboard.html#configure-training-and-validation-dataset-logs","title":"Configure Training and Validation Dataset Logs","text":"

                                          While training the model, you can use TensorFlow's tf.summary API to record logs for the training and validation datasets. Here is a sample code snippet:

                                          # Import necessary libraries\nimport tensorflow as tf\n\n# Create log directories\ntrain_log_dir = 'logs/gradient_tape/train'\nval_log_dir = 'logs/gradient_tape/val'\ntrain_summary_writer = tf.summary.create_file_writer(train_log_dir)\nval_summary_writer = tf.summary.create_file_writer(val_log_dir)\n\n# Train model and record logs\nfor epoch in range(EPOCHS):\n    for (x_train, y_train) in train_dataset:\n        # Training step\n        train_step(x_train, y_train)\n        with train_summary_writer.as_default():\n            tf.summary.scalar('loss', train_loss.result(), step=epoch)\n            tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)\n\n    for (x_val, y_val) in val_dataset:\n        # Validation step\n        val_step(x_val, y_val)\n        with val_summary_writer.as_default():\n            tf.summary.scalar('loss', val_loss.result(), step=epoch)\n            tf.summary.scalar('accuracy', val_accuracy.result(), step=epoch)\n
                                          "},{"location":"en/end-user/baize/jobs/tensorboard.html#configure-custom-logs","title":"Configure Custom Logs","text":"

                                          In addition to logs for training and validation datasets, you can also record other custom log content such as learning rate and gradient distribution. Here is a sample code snippet:

                                          # Record custom logs\nwith train_summary_writer.as_default():\n    tf.summary.scalar('learning_rate', learning_rate, step=epoch)\n    tf.summary.histogram('gradients', gradients, step=epoch)\n
                                          "},{"location":"en/end-user/baize/jobs/tensorboard.html#tensorboard-management","title":"Tensorboard Management","text":"

                                          In AI Lab, Tensorboards created through various methods are uniformly displayed on the job analysis page, making it convenient for users to view and manage.

                                          Users can view information such as the link, status, and creation time of Tensorboard on the job analysis page and directly access the visualized results of Tensorboard through the link.

                                          "},{"location":"en/end-user/baize/jobs/tensorflow.html","title":"Tensorflow Jobs","text":"

                                          Tensorflow, along with Pytorch, is a highly active open-source deep learning framework that provides a flexible environment for training and deployment.

                                          AI Lab provides support and adaptation for the Tensorflow framework. You can quickly create Tensorflow jobs and conduct model training through graphical operations.

                                          "},{"location":"en/end-user/baize/jobs/tensorflow.html#job-configuration","title":"Job Configuration","text":"
                                          • The job types support both Tensorflow Single and Tensorflow Distributed modes.
                                          • The runtime image already supports the Tensorflow framework by default, so no additional installation is required.
                                          "},{"location":"en/end-user/baize/jobs/tensorflow.html#job-runtime-environment","title":"Job Runtime Environment","text":"

                                          Here, we use the baize-notebook base image and the associated environment as the basic runtime environment for jobs.

                                          For information on how to create an environment, refer to Environment List.

                                          "},{"location":"en/end-user/baize/jobs/tensorflow.html#creating-a-job","title":"Creating a Job","text":""},{"location":"en/end-user/baize/jobs/tensorflow.html#example-tfjob-single","title":"Example TFJob Single","text":"
                                          1. Log in to the AI Lab platform and click Job Center in the left navigation bar to enter the Jobs page.
                                          2. Click the Create button in the upper right corner to enter the job creation page.
                                          3. Select the job type as Tensorflow Single and click Next .
                                          4. Fill in the job name and description, then click OK .
                                          "},{"location":"en/end-user/baize/jobs/tensorflow.html#pre-warming-the-code-repository","title":"Pre-warming the Code Repository","text":"

                                          Use AI Lab -> Dataset List to create a dataset and pull the code from a remote GitHub repository into the dataset. This way, when creating a job, you can directly select the dataset and mount the code into the job.

                                          Demo code repository address: https://github.com/d-run/training-sample-code/

                                          "},{"location":"en/end-user/baize/jobs/tensorflow.html#parameters","title":"Parameters","text":"
                                          • Launch command: Use bash
                                          • Command parameters: Use python /code/tensorflow/tf-single.py
                                          \"\"\"\n  pip install tensorflow numpy\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\n# Create some random data\nx = np.random.rand(100, 1)\ny = 2 * x + 1 + np.random.rand(100, 1) * 0.1\n\n# Create a simple model\nmodel = tf.keras.Sequential([\n    tf.keras.layers.Dense(1, input_shape=(1,))\n])\n\n# Compile the model\nmodel.compile(optimizer='adam', loss='mse')\n\n# Train the model, setting epochs to 10\nhistory = model.fit(x, y, epochs=10, verbose=1)\n\n# Print the final loss\nprint('Final loss: {' + str(history.history['loss'][-1]) +'}')\n\n# Use the model to make predictions\ntest_x = np.array([[0.5]])\nprediction = model.predict(test_x)\nprint(f'Prediction for x=0.5: {prediction[0][0]}')\n
                                          "},{"location":"en/end-user/baize/jobs/tensorflow.html#results","title":"Results","text":"

                                          After the job is successfully submitted, you can enter the job details to see the resource usage. From the upper right corner, navigate to Workload Details to view log outputs during the training process.

                                          "},{"location":"en/end-user/baize/jobs/tensorflow.html#tfjob-distributed-job","title":"TFJob Distributed Job","text":"
                                          1. Log in to AI Lab and click Job Center in the left navigation bar to enter the Jobs page.
                                          2. Click the Create button in the upper right corner to enter the job creation page.
                                          3. Select the job type as Tensorflow Distributed and click Next.
                                          4. Fill in the job name and description, then click OK.
                                          "},{"location":"en/end-user/baize/jobs/tensorflow.html#example-job-introduction","title":"Example Job Introduction","text":"

                                          This job includes three roles: Chief, Worker, and Parameter Server (PS).

                                          • Chief: Responsible for coordinating the training process and saving model checkpoints.
                                          • Worker: Executes the actual model training.
                                          • PS: Used in asynchronous training to store and update model parameters.

                                          Different resources are allocated to different roles. Chief and Worker use GPUs, while PS uses CPUs and larger memory.

                                          "},{"location":"en/end-user/baize/jobs/tensorflow.html#parameters_1","title":"Parameters","text":"
                                          • Launch command: Use bash
                                          • Command parameters: Use python /code/tensorflow/tensorflow-distributed.py
                                          import os\nimport json\nimport tensorflow as tf\n\nclass SimpleModel(tf.keras.Model):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = tf.keras.layers.Dense(1, input_shape=(10,))\n\n    def call(self, x):\n        return self.fc(x)\n\ndef train():\n    # Print environment information\n    print(f\"TensorFlow version: {tf.__version__}\")\n    print(f\"GPU available: {tf.test.is_gpu_available()}\")\n    if tf.test.is_gpu_available():\n        print(f\"GPU device count: {len(tf.config.list_physical_devices('GPU'))}\")\n\n    # Retrieve distributed training information\n    tf_config = json.loads(os.environ.get('TF_CONFIG') or '{}')\n    job_type = tf_config.get('job', {}).get('type')\n    job_id = tf_config.get('job', {}).get('index')\n\n    print(f\"Job type: {job_type}, Job ID: {job_id}\")\n\n    # Set up distributed strategy\n    strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()\n\n    with strategy.scope():\n        model = SimpleModel()\n        loss_fn = tf.keras.losses.MeanSquaredError()\n        optimizer = tf.keras.optimizers.SGD(learning_rate=0.001)\n\n    # Generate some random data\n    data = tf.random.normal((100, 10))\n    labels = tf.random.normal((100, 1))\n\n    @tf.function\n    def train_step(inputs, labels):\n        with tf.GradientTape() as tape:\n            predictions = model(inputs)\n            loss = loss_fn(labels, predictions)\n        gradients = tape.gradient(loss, model.trainable_variables)\n        optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n        return loss\n\n    for epoch in range(10):\n        loss = train_step(data, labels)\n        if job_type == 'chief':\n            print(f'Epoch {epoch}, Loss: {loss.numpy():.4f}')\n\nif __name__ == '__main__':\n    train()\n
                                          "},{"location":"en/end-user/baize/jobs/tensorflow.html#results_1","title":"Results","text":"

                                          Similarly, you can enter the job details to view the resource usage and log outputs of each Pod.

                                          "},{"location":"en/end-user/baize/jobs/view.html","title":"View Job Workloads","text":"

                                          Once a job is created, it will be displayed in the job list.

                                          1. In the job list, click the \u2507 on the right side of a job and select Job Workload Details .

                                          2. A pop-up window will appear asking you to choose which Pod to view. Click Enter .

                                          3. You will be redirected to the container management interface, where you can view the container\u2019s working status, labels and annotations, and any events that have occurred.

                                          4. You can also view detailed logs of the current Pod for the recent period. By default, 100 lines of logs are displayed. To view more detailed logs or to download logs, click the blue Insight text at the top.

                                          5. Additionally, you can use the ... in the upper right corner to view the current Pod's YAML, and to upload or download files. Below is an example of a Pod's YAML.

                                          kind: Pod\napiVersion: v1\nmetadata:\n  name: neko-tensorboard-job-test-202404181843-skxivllb-worker-0\n  namespace: default\n  uid: ddedb6ff-c278-47eb-ae1e-0de9b7c62f8c\n  resourceVersion: '41092552'\n  creationTimestamp: '2024-04-18T10:43:36Z'\n  labels:\n    training.kubeflow.org/job-name: neko-tensorboard-job-test-202404181843-skxivllb\n    training.kubeflow.org/operator-name: pytorchjob-controller\n    training.kubeflow.org/replica-index: '0'\n    training.kubeflow.org/replica-type: worker\n  annotations:\n    cni.projectcalico.org/containerID: 0cfbb9af257d5e69027c603c6cb2d3890a17c4ae1a145748d5aef73a10d7fbe1\n    cni.projectcalico.org/podIP: ''\n    cni.projectcalico.org/podIPs: ''\n    hami.io/bind-phase: success\n    hami.io/bind-time: '1713437016'\n    hami.io/vgpu-devices-allocated: GPU-29d5fa0d-935b-2966-aff8-483a174d61d1,NVIDIA,1024,20:;\n    hami.io/vgpu-devices-to-allocate: ;\n    hami.io/vgpu-node: worker-a800-1\n    hami.io/vgpu-time: '1713437016'\n    k8s.v1.cni.cncf.io/network-status: |-\n      [{\n          \"name\": \"kube-system/calico\",\n          \"ips\": [\n              \"10.233.97.184\"\n          ],\n          \"default\": true,\n          \"dns\": {}\n      }]\n    k8s.v1.cni.cncf.io/networks-status: |-\n      [{\n          \"name\": \"kube-system/calico\",\n          \"ips\": [\n              \"10.233.97.184\"\n          ],\n          \"default\": true,\n          \"dns\": {}\n      }]\n  ownerReferences:\n    - apiVersion: kubeflow.org/v1\n      kind: PyTorchJob\n      name: neko-tensorboard-job-test-202404181843-skxivllb\n      uid: e5a8b05d-1f03-4717-8e1c-4ec928014b7b\n      controller: true\n      blockOwnerDeletion: true\nspec:\n  volumes:\n    - name: 0-dataset-pytorch-examples\n      persistentVolumeClaim:\n        claimName: pytorch-examples\n    - name: kube-api-access-wh9rh\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n  containers:\n    - name: pytorch\n      image: m.daocloud.io/docker.io/pytorch/pytorch\n      command:\n        - bash\n      args:\n        - '-c'\n        - >-\n          ls -la /root && which pip && pip install pytorch_lightning tensorboard\n          && python /root/Git/pytorch/examples/mnist/main.py\n      ports:\n        - name: pytorchjob-port\n          containerPort: 23456\n          protocol: TCP\n      env:\n        - name: PYTHONUNBUFFERED\n          value: '1'\n        - name: PET_NNODES\n          value: '1'\n      resources:\n        limits:\n          cpu: '4'\n          memory: 8Gi\n          nvidia.com/gpucores: '20'\n          nvidia.com/gpumem: '1024'\n          nvidia.com/vgpu: '1'\n        requests:\n          cpu: '4'\n          memory: 8Gi\n          nvidia.com/gpucores: '20'\n          nvidia.com/gpumem: '1024'\n          nvidia.com/vgpu: '1'\n      volumeMounts:\n        - name: 0-dataset-pytorch-examples\n          mountPath: /root/Git/pytorch/examples\n        - name: kube-api-access-wh9rh\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  restartPolicy: Never\n  terminationGracePeriodSeconds: 30\n  dnsPolicy: ClusterFirst\n  serviceAccountName: default\n  serviceAccount: default\n  nodeName: worker-a800-1\n  securityContext: {}\n  affinity: {}\n  schedulerName: hami-scheduler\n  tolerations:\n    - key: node.kubernetes.io/not-ready\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n    - key: node.kubernetes.io/unreachable\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n  priorityClassName: baize-high-priority\n  priority: 100000\n  enableServiceLinks: true\n  preemptionPolicy: PreemptLowerPriority\nstatus:\n  phase: Succeeded\n  conditions:\n    - type: Initialized\n      status: 'True'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:43:36Z'\n      reason: PodCompleted\n    - type: Ready\n      status: 'False'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:46:34Z'\n      reason: PodCompleted\n    - type: ContainersReady\n      status: 'False'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:46:34Z'\n      reason: PodCompleted\n    - type: PodScheduled\n      status: 'True'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:43:36Z'\n  hostIP: 10.20.100.211\n  podIP: 10.233.97.184\n  podIPs:\n    - ip: 10.233.97.184\n  startTime: '2024-04-18T10:43:36Z'\n  containerStatuses:\n    - name: pytorch\n      state:\n        terminated:\n          exitCode: 0\n          reason: Completed\n          startedAt: '2024-04-18T10:43:39Z'\n          finishedAt: '2024-04-18T10:46:34Z'\n          containerID: >-\n            containerd://09010214bcf3315e81d38fba50de3943c9d2b48f50a6cc2e83f8ef0e5c6eeec1\n      lastState: {}\n      ready: false\n      restartCount: 0\n      image: m.daocloud.io/docker.io/pytorch/pytorch:latest\n      imageID: >-\n        m.daocloud.io/docker.io/pytorch/pytorch@sha256:11691e035a3651d25a87116b4f6adc113a27a29d8f5a6a583f8569e0ee5ff897\n      containerID: >-\n        containerd://09010214bcf3315e81d38fba50de3943c9d2b48f50a6cc2e83f8ef0e5c6eeec1\n      started: false\n  qosClass: Guaranteed\n
                                          "},{"location":"en/end-user/ghippo/personal-center/accesstoken.html","title":"Access key","text":"

                                          The access key can be used to access the openAPI and continuous delivery. Users can obtain the key and access the API by referring to the following steps in the personal center.

                                          "},{"location":"en/end-user/ghippo/personal-center/accesstoken.html#get-key","title":"Get key","text":"

                                          Log in to AI platform, find Personal Center in the drop-down menu in the upper right corner, and you can manage the access key of the account on the Access Keys page.

                                          Info

                                          Access key is displayed only once. If you forget your access key, you will need to create a new key.

                                          "},{"location":"en/end-user/ghippo/personal-center/accesstoken.html#use-the-key-to-access-api","title":"Use the key to access API","text":"

                                          When accessing AI platform openAPI, add the header Authorization:Bearer ${token} to the request to identify the visitor, where ${token} is the key obtained in the previous step. For the specific API, see OpenAPI Documentation.

                                          Request Example

                                          curl -X GET -H 'Authorization:Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRKVjlBTHRBLXZ4MmtQUC1TQnVGS0dCSWc1cnBfdkxiQVVqM2U3RVByWnMiLCJ0eXAiOiJKV1QifQ.eyJleHAiOjE2NjE0MTU5NjksImlhdCI6MTY2MDgxMTE2OSwiaXNzIjoiZ2hpcHBvLmlvIiwic3ViIjoiZjdjOGIxZjUtMTc2MS00NjYwLTg2MWQtOWI3MmI0MzJmNGViIiwicHJlZmVycmVkX3VzZXJuYW1lIjoiYWRtaW4iLCJncm91cHMiOltdfQ.RsUcrAYkQQ7C6BxMOrdD3qbBRUt0VVxynIGeq4wyIgye6R8Ma4cjxG5CbU1WyiHKpvIKJDJbeFQHro2euQyVde3ygA672ozkwLTnx3Tu-_mB1BubvWCBsDdUjIhCQfT39rk6EQozMjb-1X1sbLwzkfzKMls-oxkjagI_RFrYlTVPwT3Oaw-qOyulRSw7Dxd7jb0vINPq84vmlQIsI3UuTZSNO5BCgHpubcWwBss-Aon_DmYA-Et_-QtmPBA3k8E2hzDSzc7eqK0I68P25r9rwQ3DeKwD1dbRyndqWORRnz8TLEXSiCFXdZT2oiMrcJtO188Ph4eLGut1-4PzKhwgrQ' https://demo-dev.daocloud.io/apis/ghippo.io/v1alpha1/users?page=1&pageSize=10 -k\n

                                          Request result

                                          {\n    \"items\": [\n        {\n            \"id\": \"a7cfd010-ebbe-4601-987f-d098d9ef766e\",\n            \"name\": \"a\",\n            \"email\": \"\",\n            \"description\": \"\",\n            \"firstname\": \"\",\n            \"lastname\": \"\",\n            \"source\": \"locale\",\n            \"enabled\": true,\n            \"createdAt\": \"1660632794800\",\n            \"updatedAt\": \"0\",\n            \"lastLoginAt\": \"\"\n        }\n    ],\n    \"pagination\": {\n        \"page\": 1,\n        \"pageSize\": 10,\n        \"total\": 1\n    }\n}\n
                                          "},{"location":"en/end-user/ghippo/personal-center/language.html","title":"language settings","text":"

                                          This section explains how to set the interface language. Currently supports Chinese, English two languages.

                                          Language setting is the portal for the platform to provide multilingual services. The platform is displayed in Chinese by default. Users can switch the platform language by selecting English or automatically detecting the browser language preference according to their needs. Each user's multilingual service is independent of each other, and switching will not affect other users.

                                          The platform provides three ways to switch languages: Chinese, English-English, and automatically detect your browser language preference.

                                          The operation steps are as follows.

                                          1. Log in to the AI platform with your username/password. Click Global Management at the bottom of the left navigation bar.

                                          2. Click the username in the upper right corner and select Personal Center .

                                          3. Click the Language Settings tab.

                                          4. Toggle the language option.

                                          "},{"location":"en/end-user/ghippo/personal-center/security-setting.html","title":"Security Settings","text":"

                                          Function description: It is used to fill in the email address and modify the login password.

                                          • Email: After the administrator configures the email server address, the user can click the Forget Password button on the login page to fill in the email address there to retrieve the password.
                                          • Password: The password used to log in to the platform, it is recommended to change the password regularly.

                                          The specific operation steps are as follows:

                                          1. Click the username in the upper right corner and select Personal Center .

                                          2. Click the Security Settings tab. Fill in your email address or change the login password.

                                          "},{"location":"en/end-user/ghippo/personal-center/ssh-key.html","title":"Configuring SSH Public Key","text":"

                                          This article explains how to configure SSH public key.

                                          "},{"location":"en/end-user/ghippo/personal-center/ssh-key.html#step-1-view-existing-ssh-keys","title":"Step 1. View Existing SSH Keys","text":"

                                          Before generating a new SSH key, please check if you need to use an existing SSH key stored in the root directory of the local user. For Linux and Mac, use the following command to view existing public keys. Windows users can use the following command in WSL (requires Windows 10 or above) or Git Bash to view the generated public keys.

                                          • ED25519 Algorithm:

                                            cat ~/.ssh/id_ed25519.pub\n
                                          • RSA Algorithm:

                                            cat ~/.ssh/id_rsa.pub\n

                                          If a long string starting with ssh-ed25519 or ssh-rsa is returned, it means that a local public key already exists. You can skip Step 2 Generate SSH Key and proceed directly to Step 3.

                                          "},{"location":"en/end-user/ghippo/personal-center/ssh-key.html#step-2-generate-ssh-key","title":"Step 2. Generate SSH Key","text":"

                                          If Step 1 does not return the specified content string, it means that there is no available SSH key locally and a new SSH key needs to be generated. Please follow these steps:

                                          1. Access the terminal (Windows users please use WSL or Git Bash), and run ssh-keygen -t.

                                          2. Enter the key algorithm type and an optional comment.

                                            The comment will appear in the .pub file and can generally use the email address as the comment content.

                                            • To generate a key pair based on the ED25519 algorithm, use the following command:

                                              ssh-keygen -t ed25519 -C \"<comment>\"\n
                                            • To generate a key pair based on the RSA algorithm, use the following command:

                                              ssh-keygen -t rsa -C \"<comment>\"\n
                                          3. Press Enter to choose the SSH key generation path.

                                            Taking the ED25519 algorithm as an example, the default path is as follows:

                                            Generating public/private ed25519 key pair.\nEnter file in which to save the key (/home/user/.ssh/id_ed25519):\n

                                            The default key generation path is /home/user/.ssh/id_ed25519, and the proper public key is /home/user/.ssh/id_ed25519.pub.

                                          4. Set a passphrase for the key.

                                            Enter passphrase (empty for no passphrase):\nEnter same passphrase again:\n

                                            The passphrase is empty by default, and you can choose to use a passphrase to protect the private key file. If you do not want to enter a passphrase every time you access the repository using the SSH protocol, you can enter an empty passphrase when creating the key.

                                          5. Press Enter to complete the key pair creation.

                                          "},{"location":"en/end-user/ghippo/personal-center/ssh-key.html#step-3-copy-the-public-key","title":"Step 3. Copy the Public Key","text":"

                                          In addition to manually copying the generated public key information printed on the command line, you can use the following commands to copy the public key to the clipboard, depending on the operating system.

                                          • Windows (in WSL or Git Bash):

                                            cat ~/.ssh/id_ed25519.pub | clip\n
                                          • Mac:

                                            tr -d '\\n'< ~/.ssh/id_ed25519.pub | pbcopy\n
                                          • GNU/Linux (requires xclip):

                                            xclip -sel clip < ~/.ssh/id_ed25519.pub\n
                                          "},{"location":"en/end-user/ghippo/personal-center/ssh-key.html#step-4-set-the-public-key-on-ai-platform-platform","title":"Step 4. Set the Public Key on AI platform Platform","text":"
                                          1. Log in to the AI platform UI page and select Profile -> SSH Public Key in the upper right corner of the page.

                                          2. Add the generated SSH public key information.

                                            1. SSH public key content.

                                            2. Public key title: Supports customizing the public key name for management differentiation.

                                            3. Expiration: Set the expiration period for the public key. After it expires, the public key will be automatically invalidated and cannot be used. If not set, it will be permanently valid.

                                          "},{"location":"en/end-user/ghippo/workspace/folder-permission.html","title":"Description of folder permissions","text":"

                                          Folders have permission mapping capabilities, which can map the permissions of users/groups in this folder to subfolders, workspaces and resources under it.

                                          If the user/group is Folder Admin role in this folder, it is still Folder Admin role when mapped to a subfolder, and Workspace Admin is mapped to the workspace under it; If a Namespace is bound in Workspace and Folder -> Resource Group , the user/group is also a Namespace Admin after mapping.

                                          Note

                                          The permission mapping capability of folders will not be applied to shared resources, because sharing is to share the use permissions of the cluster to multiple workspaces, rather than assigning management permissions to workspaces, so permission inheritance and role mapping will not be implemented.

                                          "},{"location":"en/end-user/ghippo/workspace/folder-permission.html#use-cases","title":"Use cases","text":"

                                          Folders have hierarchical capabilities, so when folders are mapped to departments/suppliers/projects in the enterprise,

                                          • If a user/group has administrative authority (Admin) in the first-level department, the second-level, third-level, and fourth-level departments or projects under it also have administrative authority;
                                          • If a user/group has access rights (Editor) in the first-level department, the second-, third-, and fourth-level departments or projects under it also have access rights;
                                          • If a user/group has read-only permission (Viewer) in the first-level department, the second-level, third-level, and fourth-level departments or projects under it also have read-only permission.
                                          Objects Actions Folder Admin Folder Editor Folder Viewer on the folder itself view \u2713 \u2713 \u2713 Authorization \u2713 \u2717 \u2717 Modify Alias \u200b\u200b \u2713 \u2717 \u2717 To Subfolder Create \u2713 \u2717 \u2717 View \u2713 \u2713 \u2713 Authorization \u2713 \u2717 \u2717 Modify Alias \u200b\u200b \u2713 \u2717 \u2717 workspace under it create \u2713 \u2717 \u2717 View \u2713 \u2713 \u2713 Authorization \u2713 \u2717 \u2717 Modify Alias \u200b\u200b \u2713 \u2717 \u2717 Workspace under it - Resource Group View \u2713 \u2713 \u2713 resource binding \u2713 \u2717 \u2717 unbind \u2713 \u2717 \u2717 Workspaces under it - Shared Resources View \u2713 \u2713 \u2713 New share \u2713 \u2717 \u2717 Unshare \u2713 \u2717 \u2717 Resource Quota \u2713 \u2717 \u2717"},{"location":"en/end-user/ghippo/workspace/folders.html","title":"Create/Delete Folders","text":"

                                          Folders have the capability to map permissions, allowing users/user groups to have their permissions in the folder mapped to its sub-folders, workspaces, and resources.

                                          Follow the steps below to create a folder:

                                          1. Log in to AI platform with a user account having the admin/folder admin role. Click Global Management -> Workspace and Folder at the bottom of the left navigation bar.

                                          2. Click the Create Folder button in the top right corner.

                                          3. Fill in the folder name, parent folder, and other information, then click OK to complete creating the folder.

                                          Tip

                                          After successful creation, the folder name will be displayed in the left tree structure, represented by different icons for workspaces and folders.

                                          Note

                                          To edit or delete a specific folder, select it and Click \u2507 on the right side.

                                          • If there are resources bound to the resource group or shared resources within the folder, the folder cannot be deleted. All resources need to be unbound before deleting.

                                          • If there are registry resources accessed by the microservice engine module within the folder, the folder cannot be deleted. All access to the registry needs to be removed before deleting the folder.

                                          "},{"location":"en/end-user/ghippo/workspace/quota.html","title":"Resource Quota","text":"

                                          Shared resources do not necessarily mean that the shared users can use the shared resources without any restrictions. Admin, Kpanda Owner, and Workspace Admin can limit the maximum usage quota of a user through the Resource Quota feature in shared resources. If no restrictions are set, it means the usage is unlimited.

                                          • CPU Request (Core)
                                          • CPU Limit (Core)
                                          • Memory Request (MB)
                                          • Memory Limit (MB)
                                          • Total Storage Request (GB)
                                          • Persistent Volume Claims (PVC)
                                          • GPU Type, Spec, Quantity (including but not limited to Nvidia, Ascend, ILLUVATAR, and other GPUs)

                                          A resource (cluster) can be shared among multiple workspaces, and a workspace can use resources from multiple shared clusters simultaneously.

                                          "},{"location":"en/end-user/ghippo/workspace/quota.html#resource-groups-and-shared-resources","title":"Resource Groups and Shared Resources","text":"

                                          Cluster resources in both shared resources and resource groups are derived from Container Management. However, different effects will occur when binding a cluster to a workspace or sharing it with a workspace.

                                          1. Binding Resources

                                            Users/User groups in the workspace will have full management and usage permissions for the cluster. Workspace Admin will be mapped as Cluster Admin. Workspace Admin can access the Container Management module to manage the cluster.

                                            Note

                                            As of now, there are no Cluster Editor and Cluster Viewer roles in the Container Management module. Therefore, Workspace Editor and Workspace Viewer cannot be mapped.

                                          2. Adding Shared Resources

                                            Users/User groups in the workspace will have usage permissions for the cluster resources.

                                            Unlike resource groups, when sharing a cluster with a workspace, the roles of the users in the workspace will not be mapped to the resources. Therefore, Workspace Admin will not be mapped as Cluster Admin.

                                          This section demonstrates three scenarios related to resource quotas.

                                          "},{"location":"en/end-user/ghippo/workspace/quota.html#create-namespaces","title":"Create Namespaces","text":"

                                          Creating a namespace involves resource quotas.

                                          1. Add a shared cluster to workspace ws01 .

                                          2. Select workspace ws01 and the shared cluster in Workbench, and create a namespace ns01 .

                                            • If no resource quotas are set in the shared cluster, there is no need to set resource quotas when creating the namespace.
                                            • If resource quotas are set in the shared cluster (e.g., CPU Request = 100 cores), the CPU request for the namespace must be less than or equal to 100 cores (CPU Request \u2264 100 core) for successful creation.
                                          "},{"location":"en/end-user/ghippo/workspace/quota.html#bind-namespace-to-workspace","title":"Bind Namespace to Workspace","text":"

                                          Prerequisite: Workspace ws01 has added a shared cluster, and the operator has the Workspace Admin + Kpanda Owner or Admin role.

                                          The two methods of binding have the same effect.

                                          • Bind the created namespace ns01 to ws01 in Container Management.

                                            • If no resource quotas are set in the shared cluster, the namespace ns01 can be successfully bound regardless of whether resource quotas are set.
                                            • If resource quotas are set in the shared cluster (e.g., CPU Request = 100 cores), the namespace ns01 must meet the requirement of CPU requests less than or equal to 100 cores (CPU Request \u2264 100 core) for successful binding.
                                          • Bind the namespace ns01 to ws01 in Global Management.

                                            • If no resource quotas are set in the shared cluster, the namespace ns01 can be successfully bound regardless of whether resource quotas are set.
                                            • If resource quotas are set in the shared cluster (e.g., CPU Request = 100 cores), the namespace ns01 must meet the requirement of CPU requests less than or equal to 100 cores (CPU Request \u2264 100 core) for successful binding.
                                          "},{"location":"en/end-user/ghippo/workspace/quota.html#unbind-namespace-from-workspace","title":"Unbind Namespace from Workspace","text":"

                                          The two methods of unbinding have the same effect.

                                          • Unbind the namespace ns01 from workspace ws01 in Container Management.

                                            • If no resource quotas are set in the shared cluster, unbinding the namespace ns01 will not affect the resource quotas, regardless of whether resource quotas were set for the namespace.
                                            • If resource quotas (CPU Request = 100 cores) are set in the shared cluster and the namespace ns01 has its own resource quotas, unbinding will release the proper resource quota.
                                          • Unbind the namespace ns01 from workspace ws01 in Global Management.

                                            • If no resource quotas are set in the shared cluster, unbinding the namespace ns01 will not affect the resource quotas, regardless of whether resource quotas were set for the namespace.
                                            • If resource quotas (CPU Request = 100 cores) are set in the shared cluster and the namespace ns01 has its own resource quotas, unbinding will release the proper resource quota.
                                          "},{"location":"en/end-user/ghippo/workspace/res-gp-and-shared-res.html","title":"Differences between Resource Groups and Shared Resources","text":"

                                          Both resource groups and shared resources support cluster binding, but they have significant differences in usage.

                                          "},{"location":"en/end-user/ghippo/workspace/res-gp-and-shared-res.html#differences-in-usage-scenarios","title":"Differences in Usage Scenarios","text":"
                                          • Cluster Binding for Resource Groups: Resource groups are usually used for batch authorization. After binding a resource group to a cluster, the workspace administrator will be mapped as a cluster administrator and able to manage and use cluster resources.
                                          • Cluster Binding for Shared Resources: Shared resources are usually used for resource quotas. A typical scenario is that the platform administrator assigns a cluster to a first-level supplier, who then assigns the cluster to a second-level supplier and sets resource quotas for the second-level supplier.

                                          Note: In this scenario, the platform administrator needs to impose resource restrictions on secondary suppliers. Currently, it is not supported to limit the cluster quota of secondary suppliers by the primary supplier.

                                          "},{"location":"en/end-user/ghippo/workspace/res-gp-and-shared-res.html#differences-in-cluster-quota-usage","title":"Differences in Cluster Quota Usage","text":"
                                          • Cluster Binding for Resource Groups: The workspace administrator is mapped as the administrator of the cluster and is equivalent to being granted the Cluster Admin role in Container Management-Permission Management. They can have unrestricted access to cluster resources, manage important content such as management nodes, and cannot be subject to resource quotas.
                                          • Cluster Binding for Shared Resources: The workspace administrator can only use the quota in the cluster to create namespaces in the Workbench and does not have cluster management permissions. If the workspace is restricted by a quota, the workspace administrator can only create and use namespaces within the quota range.
                                          "},{"location":"en/end-user/ghippo/workspace/res-gp-and-shared-res.html#differences-in-resource-types","title":"Differences in Resource Types","text":"
                                          • Resource Groups: Can bind to clusters, cluster-namespaces, multiclouds, multicloud namespaces, meshs, and mesh-namespaces.
                                          • Shared Resources: Can only bind to clusters.
                                          "},{"location":"en/end-user/ghippo/workspace/res-gp-and-shared-res.html#similarities-between-resource-groups-and-shared-resources","title":"Similarities between Resource Groups and Shared Resources","text":"

                                          After binding to a cluster, both resource groups and shared resources can go to the Workbench to create namespaces, which will be automatically bound to the workspace.

                                          "},{"location":"en/end-user/ghippo/workspace/workspace.html","title":"Creating/Deleting Workspaces","text":"

                                          A workspace is a resource category that represents a hierarchical relationship of resources. A workspace can contain resources such as clusters, namespaces, and registries. Typically, each workspace corresponds to a project and different resources can be allocated, and different users and user groups can be assigned to each workspace.

                                          Follow the steps below to create a workspace:

                                          1. Log in to AI platform with a user account having the admin/folder admin role. Click Global Management -> Workspace and Folder at the bottom of the left navigation bar.

                                          2. Click the Create Workspace button in the top right corner.

                                          3. Fill in the workspace name, folder assignment, and other information, then click OK to complete creating the workspace.

                                          Tip

                                          After successful creation, the workspace name will be displayed in the left tree structure, represented by different icons for folders and workspaces.

                                          Note

                                          To edit or delete a specific workspace or folder, select it and click ... on the right side.

                                          • If resource groups and shared resources have resources under the workspace, the workspace cannot be deleted. All resources need to be unbound before deletion of the workspace.

                                          • If Microservices Engine has Integrated Registry under the workspace, the workspace cannot be deleted. Integrated Registry needs to be removed before deletion of the workspace.

                                          • If Container Registry has Registry Space or Integrated Registry under the workspace, the workspace cannot be deleted. Registry Space needs to be removed, and Integrated Registry needs to be deleted before deletion of the workspace.

                                          "},{"location":"en/end-user/ghippo/workspace/ws-folder.html","title":"Workspace and Folder","text":"

                                          Workspace and Folder is a feature that provides resource isolation and grouping, addressing issues related to unified authorization, resource grouping, and resource quotas.

                                          Workspace and Folder involves two concepts: workspaces and folders.

                                          "},{"location":"en/end-user/ghippo/workspace/ws-folder.html#workspaces","title":"Workspaces","text":"

                                          Workspaces allow the management of resources through Authorization , Resource Group , and Shared Resource , enabling users (and user groups) to share resources within the workspace.

                                          • Resources

                                            Resources are at the lowest level of the hierarchy in the resource management module. They include clusters, namespaces, pipelines, gateways, and more. All these resources can only have workspaces as their parent level. Workspaces act as containers for grouping resources.

                                          • Workspace

                                            A workspace usually refers to a project or environment, and the resources in each workspace are logically isolated from those in other workspaces. You can grant users (groups of users) different access rights to the same set of resources through authorization in the workspace.

                                            Workspaces are at the first level, counting from the bottom of the hierarchy, and contain resources. All resources except shared resources have one and only one parent. All workspaces also have one and only one parent folder.

                                            Resources are grouped by workspace, and there are two grouping modes in workspace, namely Resource Group and Shared Resource .

                                          • Resource group

                                            A resource can only be added to one resource group, and resource groups correspond to workspaces one by one. After a resource is added to a resource group, Workspace Admin will obtain the management authority of the resource, which is equivalent to the owner of the resource.

                                          • Share resource

                                            For shared resources, multiple workspaces can share one or more resources. Resource owners can choose to share their own resources with the workspace. Generally, when sharing, the resource owner will limit the amount of resources that can be used by the shared workspace. After resources are shared, Workspace Admin only has resource usage rights under the resource limit, and cannot manage resources or adjust the amount of resources that can be used by the workspace.

                                            At the same time, shared resources also have certain requirements for the resources themselves. Only Cluster (cluster) resources can be shared. Cluster Admin can share Cluster resources to different workspaces, and limit the use of workspaces on this Cluster.

                                            Workspace Admin can create multiple Namespaces within the resource quota, but the sum of the resource quotas of the Namespaces cannot exceed the resource quota of the Cluster in the workspace. For Kubernetes resources, the only resource type that can be shared currently is Cluster.

                                          "},{"location":"en/end-user/ghippo/workspace/ws-folder.html#folder","title":"Folder","text":"

                                          Folders can be used to build enterprise business hierarchy relationships.

                                          • Folders are a further grouping mechanism based on workspaces and have a hierarchical structure. A folder can contain workspaces, other folders, or a combination of both, forming a tree-like organizational relationship.

                                          • Folders allow you to map your business hierarchy and group workspaces by department. Folders are not directly linked to resources, but indirectly achieve resource grouping through workspaces.

                                          • A folder has one and only one parent folder, and the root folder is the highest level of the hierarchy. The root folder has no parent, and folders and workspaces are attached to the root folder.

                                          In addition, users (groups) in folders can inherit permissions from their parents through a hierarchical structure. The permissions of the user in the hierarchical structure come from the combination of the permissions of the current level and the permissions inherited from its parents. The permissions are additive and there is no mutual exclusion.

                                          "},{"location":"en/end-user/ghippo/workspace/ws-permission.html","title":"Description of workspace permissions","text":"

                                          The workspace has permission mapping and resource isolation capabilities, and can map the permissions of users/groups in the workspace to the resources under it. If the user/group has the Workspace Admin role in the workspace and the resource Namespace is bound to the workspace-resource group, the user/group will become Namespace Admin after mapping.

                                          Note

                                          The permission mapping capability of the workspace will not be applied to shared resources, because sharing is to share the cluster usage permissions to multiple workspaces, rather than assigning management permissions to the workspaces, so permission inheritance and role mapping will not be implemented.

                                          "},{"location":"en/end-user/ghippo/workspace/ws-permission.html#use-cases","title":"Use cases","text":"

                                          Resource isolation is achieved by binding resources to different workspaces. Therefore, resources can be flexibly allocated to each workspace (tenant) with the help of permission mapping, resource isolation, and resource sharing capabilities.

                                          Generally applicable to the following two use cases:

                                          • Cluster one-to-one

                                            Ordinary Cluster Department/Tenant (Workspace) Purpose Cluster 01 A Administration and Usage Cluster 02 B Administration and Usage
                                          • Cluster one-to-many

                                            Cluster Department/Tenant (Workspace) Resource Quota Cluster 01 A 100 core CPU B 50-core CPU
                                          "},{"location":"en/end-user/ghippo/workspace/ws-permission.html#permission-description","title":"Permission description","text":"Action Objects Operations Workspace Admin Workspace Editor Workspace Viewer itself view \u2713 \u2713 \u2713 - Authorization \u2713 \u2717 \u2717 - Modify Alias \u2713 \u2713 \u2717 Resource Group View \u2713 \u2713 \u2713 - resource binding \u2713 \u2717 \u2717 - unbind \u2713 \u2717 \u2717 Shared Resources View \u2713 \u2713 \u2713 - Add Share \u2713 \u2717 \u2717 - Unshare \u2713 \u2717 \u2717 - Resource Quota \u2713 \u2717 \u2717 - Using Shared Resources 1 \u2713 \u2717 \u2717
                                          1. Authorized users can go to modules such as workbench, microservice engine, middleware, multicloud orchestration, and service mesh to use resources in the workspace. For the operation scope of the roles of Workspace Admin, Workspace Editor, and Workspace Viewer in each module, please refer to the permission description:

                                            • Container Management Permissions

                                            \u21a9

                                          "},{"location":"en/end-user/ghippo/workspace/wsbind-permission.html","title":"Resource Binding Permission Instructions","text":"

                                          If a user John (\"John\" represents any user who is required to bind resources) has the Workspace Admin role assigned or has been granted proper permissions through a custom role, which includes the Workspace's \"Resource Binding\" Permissions, and wants to bind a specific cluster or namespace to the workspace.

                                          To bind cluster/namespace resources to a workspace, not only the workspace's \"Resource Binding\" permissions are required, but also the permissions of Cluster Admin.

                                          "},{"location":"en/end-user/ghippo/workspace/wsbind-permission.html#granting-authorization-to-john","title":"Granting Authorization to John","text":"
                                          1. Using the Platform Admin Role, grant John the role of Workspace Admin on the Workspace -> Authorization page.

                                          2. Then, on the Container Management -> Permissions page, authorize John as a Cluster Admin by Add Permission.

                                          "},{"location":"en/end-user/ghippo/workspace/wsbind-permission.html#binding-to-workspace","title":"Binding to Workspace","text":"

                                          Using John's account to log in to AI platform, on the Container Management -> Clusters page, John can bind the specified cluster to his own workspace by using the Bind Workspace button.

                                          Note

                                          John can only bind clusters or namespaces to a specific workspace in the Container Management module, and cannot perform this operation in the Global Management module.

                                          To bind a namespace to a workspace, you must have at least Workspace Admin and Cluster Admin permissions.

                                          "},{"location":"en/end-user/host/createhost.html","title":"Creating and Starting a Cloud Host","text":"

                                          Once the user completes registration and is assigned a workspace, namespace, and resources, they can create and start a cloud host.

                                          "},{"location":"en/end-user/host/createhost.html#prerequisites","title":"Prerequisites","text":"
                                          • The AI platform is installed
                                          • User has successfully registered
                                          • Administrator has bound the workspace to the user
                                          • Administrator has allocated resources for the workspace
                                          "},{"location":"en/end-user/host/createhost.html#steps-to-operate","title":"Steps to Operate","text":"
                                          1. User logs into the AI platform and go to the CloudHost module from left navigation bar
                                          2. Click Create VMs -> Create with Template.

                                          3. After defining the configurations for the cloud host, click Next.

                                            Basic InformationTemplate ConfigurationStorage and Network

                                          4. After configuring the root password or SSH key, click OK.

                                          5. Return to the host list and wait for the status to change to Running. Then, you can start the host using the \u2507 button on the right.

                                          Next step: Using the Cloud Host

                                          "},{"location":"en/end-user/host/usehost.html","title":"Using the Cloud Host","text":"

                                          After creating and starting the cloud host, the user can begin using the cloud host.

                                          "},{"location":"en/end-user/host/usehost.html#prerequisites","title":"Prerequisites","text":"
                                          • The AI platform is installed
                                          • User has created and started the cloud host
                                          "},{"location":"en/end-user/host/usehost.html#steps-to-operate","title":"Steps to Operate","text":"
                                          1. Log into the AI platform as an administrator.
                                          2. Navigate to Container Management -> Container Network -> Services, click the service name to enter the service details page, and click Update in the upper right corner.

                                          3. Change the port range to 30900-30999, ensuring there are no conflicts.

                                          4. Log into the AI platform as an end user, navigate to the proper service, and check the access ports.

                                          5. Use an SSH client to log into the cloud host from the external network.

                                          6. At this point, you can perform various operations on the cloud host.

                                          Next step: Using Notebook

                                          "},{"location":"en/end-user/insight/alert-center/index.html","title":"Alert Center","text":"

                                          The Alert Center is an important feature provided by AI platform that allows users to easily view all active and historical alerts by cluster and namespace through a graphical interface, and search alerts based on severity level (critical, warning, info).

                                          All alerts are triggered based on the threshold conditions set in the preset alert rules. In AI platform, some global alert policies are built-in, but users can also create or delete alert policies at any time, and set thresholds for the following metrics:

                                          • CPU usage
                                          • Memory usage
                                          • Disk usage
                                          • Disk reads per second
                                          • Disk writes per second
                                          • Cluster disk read throughput
                                          • Cluster disk write throughput
                                          • Network send rate
                                          • Network receive rate

                                          Users can also add labels and annotations to alert rules. Alert rules can be classified as active or expired, and certain rules can be enabled/disabled to achieve silent alerts.

                                          When the threshold condition is met, users can configure how they want to be notified, including email, DingTalk, WeCom, webhook, and SMS notifications. All notification message templates can be customized and all messages are sent at specified intervals.

                                          In addition, the Alert Center also supports sending alert messages to designated users through short message services provided by Alibaba Cloud, Tencent Cloud, and more platforms that will be added soon, enabling multiple ways of alert notification.

                                          AI platform Alert Center is a powerful alert management platform that helps users quickly detect and resolve problems in the cluster, improve business stability and availability, and facilitate cluster inspection and troubleshooting.

                                          "},{"location":"en/end-user/insight/alert-center/alert-policy.html","title":"Alert Policies","text":"

                                          In addition to the built-in alert policies, AI platform allows users to create custom alert policies. Each alert policy is a collection of alert rules that can be set for clusters, nodes, and workloads. When an alert object reaches the threshold set by any of the rules in the policy, an alert is automatically triggered and a notification is sent.

                                          Taking the built-in alerts as an example, click the first alert policy alertmanager.rules .

                                          You can see that some alert rules have been set under it. You can add more rules under this policy, or edit or delete them at any time. You can also view the historical and active alerts related to this alert policy and edit the notification configuration.

                                          "},{"location":"en/end-user/insight/alert-center/alert-policy.html#create-alert-policies","title":"Create Alert Policies","text":"
                                          1. Select Alert Center -> Alert Policies , and click the Create Alert Policy button.

                                          2. Fill in the basic information, select one or more clusters, nodes, or workloads as the alert objects, and click Next .

                                          3. The list must have at least one rule. If the list is empty, please Add Rule .

                                            Create an alert rule in the pop-up window, fill in the parameters, and click OK .

                                            • Template rules: Pre-defined basic metrics that can monitor CPU, memory, disk, and network.
                                            • PromQL rules: Input a PromQL expression, please query Prometheus expressions.
                                            • Duration: After the alert is triggered and the duration reaches the set value, the alert policy will become a triggered state.
                                            • Alert level: Including emergency, warning, and information levels.
                                            • Advanced settings: Custom tags and annotations.
                                          4. After clicking Next , configure notifications.

                                          5. After the configuration is complete, click the OK button to return to the Alert Policy list.

                                          Tip

                                          The newly created alert policy is in the Not Triggered state. Once the threshold conditions and duration specified in the rules are met, it will change to the Triggered state.

                                          "},{"location":"en/end-user/insight/alert-center/alert-policy.html#create-log-rules","title":"Create Log Rules","text":"

                                          After filling in the basic information, click Add Rule and select Log Rule as the rule type.

                                          Creating log rules is supported only when the resource object is selected as a node or workload.

                                          Field Explanation:

                                          • Filter Condition : Field used to query log content, supports four filtering conditions: AND, OR, regular expression matching, and fuzzy matching.
                                          • Condition : Based on the filter condition, enter keywords or matching conditions.
                                          • Time Range : Time range for log queries.
                                          • Threshold Condition : Enter the alert threshold value in the input box. When the set threshold is reached, an alert will be triggered. Supported comparison operators are: >, \u2265, =, \u2264, <.
                                          • Alert Level : Select the alert level to indicate the severity of the alert.
                                          "},{"location":"en/end-user/insight/alert-center/alert-policy.html#create-event-rules","title":"Create Event Rules","text":"

                                          After filling in the basic information, click Add Rule and select Event Rule as the rule type.

                                          Creating event rules is supported only when the resource object is selected as a workload.

                                          Field Explanation:

                                          • Event Rule : Only supports selecting the workload as the resource object.
                                          • Event Reason : Different event reasons for different types of workloads, where the event reasons are combined with \"AND\" relationship.
                                          • Time Range : Detect data generated within this time range. If the threshold condition is reached, an alert event will be triggered.
                                          • Threshold Condition : When the generated events reach the set threshold, an alert event will be triggered.
                                          • Trend Chart : By default, it queries the trend of event changes within the last 10 minutes. The value at each point represents the total number of occurrences within a certain period of time (time range) from the current time point to a previous time.
                                          "},{"location":"en/end-user/insight/alert-center/alert-policy.html#other-operations","title":"Other Operations","text":"

                                          Click \u2507 at the right side of the list, then choose Delete from the pop-up menu to delete an alert policy. By clicking on the policy name, you can enter the policy details where you can add, edit, or delete the alert rules under it.

                                          Warning

                                          Deleted alert strategies will be permanently removed, so please proceed with caution.

                                          "},{"location":"en/end-user/insight/alert-center/alert-template.html","title":"Alert Template","text":"

                                          The Alert template allows platform administrators to create Alert templates and rules, and business units can directly use Alert templates to create Alert policies. This feature can reduce the management of Alert rules by business personnel and allow for modification of Alert thresholds based on actual environment conditions.

                                          "},{"location":"en/end-user/insight/alert-center/alert-template.html#create-alert-template","title":"Create Alert Template","text":"
                                          1. In the navigation bar, select Alert -> Alert Policy, and click Alert Template at the top.

                                          2. Click Create Alert Template, and set the name, description, and other information for the Alert template.

                                            Parameter Description Template Name The name can only contain lowercase letters, numbers, and hyphens (-), must start and end with a lowercase letter or number, and can be up to 63 characters long. Description The description can contain any characters and can be up to 256 characters long. Resource Type Used to specify the matching type of the Alert template. Alert Rule Supports pre-defined multiple Alert rules, including template rules and PromQL rules.
                                          3. Click OK to complete the creation and return to the Alert template list. Click the template name to view the template details.

                                          "},{"location":"en/end-user/insight/alert-center/alert-template.html#edit-alert-template","title":"Edit Alert Template","text":"

                                          Click \u2507 next to the target rule, then click Edit to enter the editing page for the suppression rule.

                                          ![Edit](../images/template04.png){ width=1000px}\n
                                          "},{"location":"en/end-user/insight/alert-center/alert-template.html#delete-alert-template","title":"Delete Alert Template","text":"

                                          Click \u2507 next to the target template, then click Delete. Enter the name of the Alert template in the input box to confirm deletion.

                                          ![Delete](../images/template05.png){ width=1000px}\n
                                          "},{"location":"en/end-user/insight/alert-center/inhibition.html","title":"Alert Inhibition","text":"

                                          Alert Inhibition is mainly a mechanism for temporarily hiding or reducing the priority of alerts that do not need immediate attention. The purpose of this feature is to reduce unnecessary alert information that may disturb operations personnel, allowing them to focus on more critical issues.

                                          Alert inhibition recognizes and ignores certain alerts by defining a set of rules to deal with specific conditions. There are mainly the following conditions:

                                          • Parent-child inhibition: when a parent alert (for example, a crash on a node) is triggered, all child alerts aroused by it (for example, a crash on a container running on that node) are inhibited.
                                          • Similar alert inhibition: When alerts have the same characteristics (for example, the same problem on the same instance), multiple alerts are inhibited.
                                          "},{"location":"en/end-user/insight/alert-center/inhibition.html#create-inhibition","title":"Create Inhibition","text":"
                                          1. In the left navigation bar, select Alert -> Noise Reduction, and click Inhibition at the top.

                                          2. Click Create Inhibition, and set the name and rules for the inhibition.

                                            Note

                                            The problem of avoiding multiple similar or related alerts that may be triggered by the same issue is achieved by defining a set of rules to identify and ignore certain alerts through Rule Details and Alert Details.

                                            Parameter Description Name The name can only contain lowercase letters, numbers, and hyphens (-), must start and end with a lowercase letter or number, and can be up to 63 characters long. Description The description can contain any characters and can be up to 256 characters long. Cluster The cluster where the inhibition rule applies. Namespace The namespace where the inhibition rule applies. Source Alert Matching alerts by label conditions. It compares alerts that meet all label conditions with those that meet inhibition conditions, and alerts that do not meet inhibition conditions will be sent to the user as usual. Value range explanation: - Alert Level: The level of metric or event alerts, can be set as: Critical, Major, Minor. - Resource Type: The resource type specific for the alert object, can be set as: Cluster, Node, StatefulSet, Deployment, DaemonSet, Pod. - Labels: Alert identification attributes, consisting of label name and label value, supports user-defined values. Inhibition Specifies the matching conditions for the target alert (the alert to be inhibited). Alerts that meet all the conditions will no longer be sent to the user. Equal Specifies the list of labels to compare to determine if the source alert and target alert match. Inhibition is triggered only when the values of the labels specified in equal are exactly the same in the source and target alerts. The equal field is optional. If the equal field is omitted, all labels are used for matching.
                                          3. Click OK to complete the creation and return to Inhibition list. Click the inhibition rule name to view the rule details.

                                          "},{"location":"en/end-user/insight/alert-center/inhibition.html#view-rule-details","title":"View Rule Details","text":"

                                          In the left navigation bar, select Alert -> Alert Policy, and click the policy name to view the rule details.

                                          !!! note\n\n    You can add cuntom tags when adding rules.\n
                                          "},{"location":"en/end-user/insight/alert-center/inhibition.html#view-alert-details","title":"View Alert Details","text":"

                                          In the left navigation bar, select Alert -> Alerts, and click the policy name to view details.

                                          !!! note\n\n    Alert details show information and settings for creating inhibitions.\n
                                          "},{"location":"en/end-user/insight/alert-center/inhibition.html#edit-inhibition-rule","title":"Edit Inhibition Rule","text":"

                                          Click \u2507 next to the target rule, then click Edit to enter the editing page for the inhibition rule.

                                          "},{"location":"en/end-user/insight/alert-center/inhibition.html#delete-inhibition-rule","title":"Delete Inhibition Rule","text":"

                                          Click \u2507 next to the target rule, then click Delete. Enter the name of the inhibition rule in the input box to confirm deletion.

                                          "},{"location":"en/end-user/insight/alert-center/message.html","title":"Notification Settings","text":"

                                          On the Notification Settings page, you can configure how to send messages to users through email, WeCom, DingTalk, Webhook, and SMS.

                                          "},{"location":"en/end-user/insight/alert-center/message.html#email-group","title":"Email Group","text":"
                                          1. After entering Insight , click Alert Center -> Notification Settings in the left navigation bar. By default, the email notification object is selected. Click Add email group and add one or more email addresses.

                                          2. Multiple email addresses can be added.

                                          3. After the configuration is complete, the notification list will automatically return. Click \u2507 on the right side of the list to edit or delete the email group.

                                          "},{"location":"en/end-user/insight/alert-center/message.html#wecom","title":"WeCom","text":"
                                          1. In the left navigation bar, click Alert Center -> Notification Settings -> WeCom . Click Add Group Robot and add one or more group robots.

                                            For the URL of the WeCom group robot, please refer to the official document of WeCom: How to use group robots.

                                          2. After the configuration is complete, the notification list will automatically return. Click \u2507 on the right side of the list, select Send Test Information , and you can also edit or delete the group robot.

                                          "},{"location":"en/end-user/insight/alert-center/message.html#dingtalk","title":"DingTalk","text":"
                                          1. In the left navigation bar, click Alert Center -> Notification Settings -> DingTalk . Click Add Group Robot and add one or more group robots.

                                            For the URL of the DingTalk group robot, please refer to the official document of DingTalk: Custom Robot Access.

                                          2. After the configuration is complete, the notification list will automatically return. Click \u2507 on the right side of the list, select Send Test Information , and you can also edit or delete the group robot.

                                          "},{"location":"en/end-user/insight/alert-center/message.html#lark","title":"Lark","text":"
                                          1. In the left navigation bar, click Alert Center -> Notification Settings -> Lark . Click Add Group Bot and add one or more group bots.

                                            Note

                                            When signature verification is required in Lark's group bot, you need to fill in the specific signature key when enabling notifications. Refer to Customizing Bot User Guide.

                                          2. After configuration, you will be automatically redirected to the list page. Click \u2507 on the right side of the list and select Send Test Message . You can edit or delete group bots.

                                          "},{"location":"en/end-user/insight/alert-center/message.html#webhook","title":"Webhook","text":"
                                          1. In the left navigation bar, click Alert Center -> Notification Settings -> Webhook . Click New Webhook and add one or more Webhooks.

                                            For the Webhook URL and more configuration methods, please refer to the webhook document.

                                          2. After the configuration is complete, the notification list will automatically return. Click \u2507 on the right side of the list, select Send Test Information , and you can also edit or delete the Webhook.

                                          "},{"location":"en/end-user/insight/alert-center/message.html#message","title":"Message","text":"

                                          Note

                                          Alert messages are sent to the personal Message sector and notifications can be viewed by clicking \ud83d\udd14 at the top.

                                          1. In the left navigation bar, click Alert Center -> Notification Settings -> Message\uff0cclick Create Message .

                                            You can add and notify multiple users for a message.

                                          2. After configuration, you will be automatically redirected to the list page. Click \u2507 on the right side of the list and select Send Test Message .

                                          "},{"location":"en/end-user/insight/alert-center/message.html#sms-group","title":"SMS Group","text":"
                                          1. In the left navigation bar, click Alert Center -> Notification Settings -> SMS . Click Add SMS Group and add one or more SMS groups.

                                          2. Enter the name, the object receiving the message, phone number, and notification server in the pop-up window.

                                            The notification server needs to be created in advance under Notification Settings -> Notification Server . Currently, two cloud servers, Alibaba Cloud and Tencent Cloud, are supported. Please refer to your own cloud server information for the specific configuration parameters.

                                          3. After the SMS group is successfully added, the notification list will automatically return. Click \u2507 on the right side of the list to edit or delete the SMS group.

                                          "},{"location":"en/end-user/insight/alert-center/msg-template.html","title":"Message Templates","text":"

                                          The message template feature supports customizing the content of message templates and can notify specified objects in the form of email, WeCom, DingTalk, Webhook, and SMS.

                                          "},{"location":"en/end-user/insight/alert-center/msg-template.html#creating-a-message-template","title":"Creating a Message Template","text":"
                                          1. In the left navigation bar, select Alert -> Message Template .

                                            Insight comes with two default built-in templates in both Chinese and English for user convenience.

                                          2. Fill in the template content.

                                          Info

                                          Observability comes with predefined message templates. If you need to define the content of the templates, refer to Configure Notification Templates.

                                          "},{"location":"en/end-user/insight/alert-center/msg-template.html#message-template-details","title":"Message Template Details","text":"

                                          Click the name of a message template to view the details of the message template in the right slider.

                                          Parameters Variable Description ruleName {{ .Labels.alertname }} The name of the rule that triggered the alert groupName {{ .Labels.alertgroup }} The name of the alert policy to which the alert rule belongs severity {{ .Labels.severity }} The level of the alert that was triggered cluster {{ .Labels.cluster }} The cluster where the resource that triggered the alert is located namespace {{ .Labels.namespace }} The namespace where the resource that triggered the alert is located node {{ .Labels.node }} The node where the resource that triggered the alert is located targetType {{ .Labels.target_type }} The resource type of the alert target target {{ .Labels.target }} The name of the object that triggered the alert value {{ .Annotations.value }} The metric value at the time the alert notification was triggered startsAt {{ .StartsAt }} The time when the alert started to occur endsAt {{ .EndsAt }} The time when the alert ended description {{ .Annotations.description }} A detailed description of the alert labels {{ for .labels }} {{ end }} All labels of the alert use the for function to iterate through the labels list to get all label contents."},{"location":"en/end-user/insight/alert-center/msg-template.html#editing-or-deleting-a-message-template","title":"Editing or Deleting a Message Template","text":"

                                          Click \u2507 on the right side of the list and select Edit or Delete from the pop-up menu to modify or delete the message template.

                                          Warning

                                          Once a template is deleted, it cannot be recovered, so please use caution when deleting templates.

                                          "},{"location":"en/end-user/insight/alert-center/silent.html","title":"Alert Silence","text":"

                                          Alert silence is a feature that allows alerts meeting certain criteria to be temporarily disabled from sending notifications within a specific time range. This feature helps operations personnel avoid receiving too many noisy alerts during certain operations or events, while also allowing for more precise handling of real issues that need to be addressed.

                                          On the Alert Silence page, you can see two tabs: Active Rule and Expired Rule. The former presents the rules currently in effect, while the latter presents those that were defined in the past but have now expired (or have been deleted by the user).

                                          "},{"location":"en/end-user/insight/alert-center/silent.html#creating-a-silent-rule","title":"Creating a Silent Rule","text":"
                                          1. In the left navigation bar, select Alert -> Noice Reduction -> Alert Silence , and click the Create Silence Rule button.

                                          2. Fill in the parameters for the silent rule, such as cluster, namespace, tags, and time, to define the scope and effective time of the rule, and then click OK .

                                          3. Return to the rule list, and on the right side of the list, click \u2507 to edit or delete a silent rule.

                                          Through the Alert Silence feature, you can flexibly control which alerts should be ignored and when they should be effective, thereby improving operational efficiency and reducing the possibility of false alerts.

                                          "},{"location":"en/end-user/insight/alert-center/sms-provider.html","title":"Configure Notification Server","text":"

                                          Insight supports SMS notifications and currently sends alert messages using integrated Alibaba Cloud and Tencent Cloud SMS services. This article explains how to configure the SMS notification server in Insight. The variables supported in the SMS signature are the default variables in the message template. As the number of SMS characters is limited, it is recommended to choose more explicit variables.

                                          For information on how to configure SMS recipients, refer to the document: Configure SMS Notification Group.

                                          "},{"location":"en/end-user/insight/alert-center/sms-provider.html#procedure","title":"Procedure","text":"
                                          1. Go to Alert Center -> Notification Settings -> Notification Server .

                                          2. Click Add Notification Server .

                                            • Configure Alibaba Cloud server.

                                              To apply for Alibaba Cloud SMS service, refer to Alibaba Cloud SMS Service.

                                              Field descriptions:

                                              • AccessKey ID : Parameter used by Alibaba Cloud to identify the user.
                                              • AccessKey Secret : Key used by Alibaba Cloud to authenticate the user. AccessKey Secret must be kept confidential.
                                              • SMS Signature : The SMS service supports creating signatures that meet the requirements according to user needs. When sending SMS, the SMS platform will add the approved SMS signature to the SMS content before sending it to the SMS recipient.
                                              • Template CODE : The SMS template is the specific content of the SMS to be sent.
                                              • Parameter Template : The SMS body template can contain variables. Users can use variables to customize the SMS content.

                                              Please refer to Alibaba Cloud Variable Specification.

                                              Note

                                              Example: The template content defined in Alibaba Cloud is: ${severity}: ${alertname} triggered at ${startat}. Refer to the configuration in the parameter template.

                                            • Configure Tencent Cloud server.

                                              To apply for Tencent Cloud SMS service, please refer to Tencent Cloud SMS.

                                              Field descriptions:

                                              • Secret ID : Parameter used by Tencent Cloud to identify the API caller.
                                              • SecretKey : Parameter used by Tencent Cloud to authenticate the API caller.
                                              • SMS Template ID : The SMS template ID automatically generated by Tencent Cloud system.
                                              • Signature Content : The SMS signature content, which is the full name or abbreviation of the actual website name defined in the Tencent Cloud SMS signature.
                                              • SdkAppId : SMS SdkAppId, the actual SdkAppId generated after adding the application in the Tencent Cloud SMS console.
                                              • Parameter Template : The SMS body template can contain variables. Users can use variables to customize the SMS content. Please refer to: Tencent Cloud Variable Specification.

                                              Note

                                              Example: The template content defined in Tencent Cloud is: {1}: {2} triggered at {3}. Refer to the configuration in the parameter template.

                                          "},{"location":"en/end-user/insight/collection-manag/agent-status.html","title":"insight-agent Component Status Explanation","text":"

                                          In AI platform, Insight acts as a multi-cluster observability product. To achieve unified data collection across multiple clusters, users need to install the Helm App insight-agent (installed by default in the insight-system namespace). Refer to How to Install insight-agent .

                                          "},{"location":"en/end-user/insight/collection-manag/agent-status.html#status-explanation","title":"Status Explanation","text":"

                                          In the \"Observability\" -> \"Collection Management\" section, you can view the installation status of insight-agent in each cluster.

                                          • Not Installed : insight-agent is not installed in the insight-system namespace of the cluster.
                                          • Running : insight-agent is successfully installed in the cluster, and all deployed components are running.
                                          • Error : If insight-agent is in this state, it indicates that the helm deployment failed or there are components deployed that are not in a running state.

                                          You can troubleshoot using the following steps:

                                          1. Run the following command. If the status is deployed , proceed to the next step. If it is failed , it is recommended to uninstall and reinstall it from Container Management -> Helm Apps as it may affect application upgrades:

                                            helm list -n insight-system\n
                                          2. Run the following command or check the status of the deployed components in Insight -> Data Collection . If there are Pods not in the Running state, restart the containers in an abnormal state.

                                            kubectl get pods -n insight-system\n
                                          "},{"location":"en/end-user/insight/collection-manag/agent-status.html#additional-notes","title":"Additional Notes","text":"
                                          1. The resource consumption of the Prometheus metric collection component in insight-agent is directly proportional to the number of Pods running in the cluster. Please adjust the resources for Prometheus according to the cluster size. Refer to Prometheus Resource Planning.

                                          2. The storage capacity of the vmstorage metric storage component in the global service cluster is directly proportional to the total number of Pods in the clusters.

                                            • Please contact the platform administrator to adjust the disk capacity of vmstorage based on the cluster size. Refer to vmstorage Disk Capacity Planning.
                                            • Adjust vmstorage disk based on multi-cluster scale. Refer to vmstorge Disk Expansion.
                                          "},{"location":"en/end-user/insight/collection-manag/collection-manag.html","title":"Data Collection","text":"

                                          Data Collection is mainly to centrally manage and display the entrance of the cluster installation collection plug-in insight-agent , which helps users quickly view the health status of the cluster collection plug-in, and provides a quick entry to configure collection rules.

                                          The specific operation steps are as follows:

                                          1. Click in the upper left corner and select Insight -> Data Collection .

                                          2. You can view the status of all cluster collection plug-ins.

                                          3. When the cluster is connected to insight-agent and is running, click a cluster name to enter the details\u3002

                                          4. In the Service Monitor tab, click the shortcut link to jump to Container Management -> CRD to add service discovery rules.

                                          "},{"location":"en/end-user/insight/collection-manag/metric-collect.html","title":"Metrics Retrieval Methods","text":"

                                          Prometheus primarily uses the Pull approach to retrieve monitoring metrics from target services' exposed endpoints. Therefore, it requires configuring proper scraping jobs to request monitoring data and write it into the storage provided by Prometheus. Currently, Prometheus offers several configurations for these jobs:

                                          • Native Job Configuration: This provides native Prometheus job configuration for scraping.
                                          • Pod Monitor: In the Kubernetes ecosystem, it allows scraping of monitoring data from Pods using Prometheus Operator.
                                          • Service Monitor: In the Kubernetes ecosystem, it allows scraping monitoring data from Endpoints of Services using Prometheus Operator.

                                          Note

                                          [ ] indicates optional configmaps.

                                          "},{"location":"en/end-user/insight/collection-manag/metric-collect.html#native-job-configuration","title":"Native Job Configuration","text":"

                                          The proper configmaps are explained as follows:

                                          # Name of the scraping job, also adds a label (job=job_name) to the scraped metrics\njob_name: <job_name>\n\n# Time interval between scrapes\n[ scrape_interval: <duration> | default = <global_config.scrape_interval> ]\n\n# Timeout for scrape requests\n[ scrape_timeout: <duration> | default = <global_config.scrape_timeout> ]\n\n# URI path for the scrape request\n[ metrics_path: <path> | default = /metrics ]\n\n# Handling of label conflicts between scraped labels and labels added by the backend Prometheus.\n# true: Retains the scraped labels and ignores conflicting labels from the backend Prometheus.\n# false: Adds an \"exported_<original-label>\" prefix to the scraped labels and includes the additional labels added by the backend Prometheus.\n[ honor_labels: <boolean> | default = false ]\n\n# Whether to use the timestamp generated by the target being scraped.\n# true: Uses the timestamp from the target if available.\n# false: Ignores the timestamp from the target.\n[ honor_timestamps: <boolean> | default = true ]\n\n# Protocol for the scrape request: http or https\n[ scheme: <scheme> | default = http ]\n\n# URL parameters for the scrape request\nparams:\n  [ <string>: [<string>, ...] ]\n\n# Set the value of the `Authorization` header in the scrape request through basic authentication. password/password_file are mutually exclusive, with password_file taking precedence.\nbasic_auth:\n  [ username: <string> ]\n  [ password: <secret> ]\n  [ password_file: <string> ]\n\n# Set the value of the `Authorization` header in the scrape request through bearer token authentication. bearer_token/bearer_token_file are mutually exclusive, with bearer_token taking precedence.\n[ bearer_token: <secret> ]\n\n# Set the value of the `Authorization` header in the scrape request through bearer token authentication. bearer_token/bearer_token_file are mutually exclusive, with bearer_token taking precedence.\n[ bearer_token_file: <filename> ]\n\n# Whether the scrape connection should use a TLS secure channel, configure the proper TLS parameters\ntls_config:\n  [ <tls_config> ]\n\n# Use a proxy service to scrape the metrics from the target, specify the address of the proxy service.\n[ proxy_url: <string> ]\n\n# Specify the targets using static configuration, see explanation below.\nstatic_configs:\n  [ - <static_config> ... ]\n\n# CVM service discovery configuration, see explanation below.\ncvm_sd_configs:\n  [ - <cvm_sd_config> ... ]\n\n# After scraping the data, rewrite the labels of the proper target using the relabel mechanism. Executes multiple relabel rules in order.\n# See explanation below for relabel_config.\nrelabel_configs:\n  [ - <relabel_config> ... ]\n\n# Before writing the scraped data, rewrite the values of the labels using the relabel mechanism. Executes multiple relabel rules in order.\n# See explanation below for relabel_config.\nmetric_relabel_configs:\n  [ - <relabel_config> ... ]\n\n# Limit the number of data points per scrape, 0: no limit, default is 0\n[ sample_limit: <int> | default = 0 ]\n\n# Limit the number of targets per scrape, 0: no limit, default is 0\n[ target_limit: <int> | default = 0 ]\n
                                          "},{"location":"en/end-user/insight/collection-manag/metric-collect.html#pod-monitor","title":"Pod Monitor","text":"

                                          The explanation for the proper configmaps is as follows:

                                          # Prometheus Operator CRD version\napiVersion: monitoring.coreos.com/v1\n# proper Kubernetes resource type, here it is PodMonitor\nkind: PodMonitor\n# proper Kubernetes Metadata, only the name needs to be concerned. If jobLabel is not specified, the value of the job label in the scraped metrics will be <namespace>/<name>\nmetadata:\n  name: redis-exporter # Specify a unique name\n  namespace: cm-prometheus  # Fixed namespace, no need to modify\n# Describes the selection and configuration of the target Pods to be scraped\n  labels:\n    operator.insight.io/managed-by: insight # Label indicating managed by Insight\nspec:\n  # Specify the label of the proper Pod, pod monitor will use this value as the job label value.\n  # If viewing the Pod YAML, use the values in pod.metadata.labels.\n  # If viewing Deployment/Daemonset/Statefulset, use spec.template.metadata.labels.\n  [ jobLabel: string ]\n  # Adds the proper Pod's Labels to the Target's Labels\n  [ podTargetLabels: []string ]\n  # Limit the number of data points per scrape, 0: no limit, default is 0\n  [ sampleLimit: uint64 ]\n  # Limit the number of targets per scrape, 0: no limit, default is 0\n  [ targetLimit: uint64 ]\n  # Configure the Prometheus HTTP endpoints that need to be scraped and exposed. Multiple endpoints can be configured.\n  podMetricsEndpoints:\n  [ - <endpoint_config> ... ] # See explanation below for endpoint\n  # Select the namespaces where the monitored Pods are located. Leave it blank to select all namespaces.\n  [ namespaceSelector: ]\n    # Select all namespaces\n    [ any: bool ]\n    # Specify the list of namespaces to be selected\n    [ matchNames: []string ]\n  # Specify the Label values of the Pods to be monitored in order to locate the target Pods [K8S metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)\n  selector:\n    [ matchExpressions: array ]\n      [ example: - {key: tier, operator: In, values: [cache]} ]\n    [ matchLabels: object ]\n      [ example: k8s-app: redis-exporter ]\n
                                          "},{"location":"en/end-user/insight/collection-manag/metric-collect.html#example-1","title":"Example 1","text":"
                                          apiVersion: monitoring.coreos.com/v1\nkind: PodMonitor\nmetadata:\n  name: redis-exporter # Specify a unique name\n  namespace: cm-prometheus # Fixed namespace, do not modify\n  labels:\n    operator.insight.io/managed-by: insight  # Label indicating managed by Insight, required.\nspec:\n  podMetricsEndpoints:\n    - interval: 30s\n      port: metric-port # Specify the Port Name proper to Prometheus Exporter in the pod YAML\n      path: /metrics # Specify the value of the Path proper to Prometheus Exporter, if not specified, default is /metrics\n      relabelings:\n        - action: replace\n          sourceLabels:\n            - instance\n          regex: (.*)\n          targetLabel: instance\n          replacement: \"crs-xxxxxx\" # Adjust to the proper Redis instance ID\n        - action: replace\n          sourceLabels:\n            - instance\n          regex: (.*)\n          targetLabel: ip\n          replacement: \"1.x.x.x\" # Adjust to the proper Redis instance IP\n  namespaceSelector: # Select the namespaces where the monitored Pods are located\n    matchNames:\n      - redis-test\n  selector: # Specify the Label values of the Pods to be monitored in order to locate the target pods\n    matchLabels:\n      k8s-app: redis-exporter\n
                                          "},{"location":"en/end-user/insight/collection-manag/metric-collect.html#example-2","title":"Example 2","text":"
                                          job_name: prometheus\nscrape_interval: 30s\nstatic_configs:\n- targets:\n  - 127.0.0.1:9090\n
                                          "},{"location":"en/end-user/insight/collection-manag/metric-collect.html#service-monitor","title":"Service Monitor","text":"

                                          The explanation for the proper configmaps is as follows:

                                          # Prometheus Operator CRD version\napiVersion: monitoring.coreos.com/v1\n# proper Kubernetes resource type, here it is ServiceMonitor\nkind: ServiceMonitor\n# proper Kubernetes Metadata, only the name needs to be concerned. If jobLabel is not specified, the value of the job label in the scraped metrics will be the name of the Service.\nmetadata:\n  name: redis-exporter # Specify a unique name\n  namespace: cm-prometheus  # Fixed namespace, no need to modify\n# Describes the selection and configuration of the target Pods to be scraped\n  labels:\n    operator.insight.io/managed-by: insight # Label indicating managed by Insight, required.\nspec:\n  # Specify the label(metadata/labels) of the proper Pod, service monitor will use this value as the job label value.\n  [ jobLabel: string ]\n  # Adds the Labels of the proper service to the Target's Labels\n  [ targetLabels: []string ]\n  # Adds the Labels of the proper Pod to the Target's Labels\n  [ podTargetLabels: []string ]\n  # Limit the number of data points per scrape, 0: no limit, default is 0\n  [ sampleLimit: uint64 ]\n  # Limit the number of targets per scrape, 0: no limit, default is 0\n  [ targetLimit: uint64 ]\n  # Configure the Prometheus HTTP endpoints that need to be scraped and exposed. Multiple endpoints can be configured.\n  endpoints:\n  [ - <endpoint_config> ... ] # See explanation below for endpoint\n  # Select the namespaces where the monitored Pods are located. Leave it blank to select all namespaces.\n  [ namespaceSelector: ]\n    # Select all namespaces\n    [ any: bool ]\n    # Specify the list of namespaces to be selected\n    [ matchNames: []string ]\n  # Specify the Label values of the Pods to be monitored in order to locate the target Pods [K8S metav1.LabelSelector](https://v1-17.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#labelselector-v1-meta)\n  selector:\n    [ matchExpressions: array ]\n      [ example: - {key: tier, operator: In, values: [cache]} ]\n    [ matchLabels: object ]\n      [ example: k8s-app: redis-exporter ]\n
                                          "},{"location":"en/end-user/insight/collection-manag/metric-collect.html#example","title":"Example","text":"
                                          apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: go-demo # Specify a unique name\n  namespace: cm-prometheus # Fixed namespace, do not modify\n  labels:\n    operator.insight.io/managed-by: insight  # Label indicating managed by Insight, required.\nspec:\n  endpoints:\n    - interval: 30s\n      # Specify the Port Name proper to Prometheus Exporter in the service YAML\n      port: 8080-8080-tcp\n      # Specify the value of the Path proper to Prometheus Exporter, if not specified, default is /metrics\n      path: /metrics\n      relabelings:\n        # ** There must be a label named 'application', assuming there is a label named 'app' in k8s,\n        # we replace it with 'application' using the relabel 'replace' action\n        - action: replace\n          sourceLabels: [__meta_kubernetes_pod_label_app]\n          targetLabel: application\n  # Select the namespace where the monitored service is located\n  namespaceSelector:\n    matchNames:\n      - golang-demo\n  # Specify the Label values of the service to be monitored in order to locate the target service\n  selector:\n    matchLabels:\n      app: golang-app-demo\n
                                          "},{"location":"en/end-user/insight/collection-manag/metric-collect.html#endpoint_config","title":"endpoint_config","text":"

                                          The explanation for the proper configmaps is as follows:

                                          # The name of the proper port. Please note that it's not the actual port number.\n# Default: 80. Possible values are as follows:\n# ServiceMonitor: corresponds to Service>spec/ports/name;\n# PodMonitor: explained as follows:\n#   If viewing the Pod YAML, take the value from pod.spec.containers.ports.name.\n#   If viewing Deployment/DaemonSet/StatefulSet, take the value from spec.template.spec.containers.ports.name.\n[ port: string | default = 80]\n# The URI path for the scrape request.\n[ path: string | default = /metrics ]\n# The protocol for the scrape: http or https.\n[ scheme: string | default = http]\n# URL parameters for the scrape request.\n[ params: map[string][]string]\n# The interval between scrape requests.\n[ interval: string | default = 30s ]\n# The timeout for the scrape request.\n[ scrapeTimeout: string | default = 30s]\n# Whether the scrape connection should be made over a secure TLS channel, and the TLS configuration.\n[ tlsConfig: TLSConfig ]\n# Read the bearer token value from the specified file and include it in the headers of the scrape request.\n[ bearerTokenFile: string ]\n# Read the bearer token from the specified K8S secret key. Note that the secret namespace must match the PodMonitor/ServiceMonitor.\n[ bearerTokenSecret: string ]\n# Handling conflicts when scraped labels conflict with labels added by the backend Prometheus.\n# true: Keep the scraped labels and ignore the conflicting labels from the backend Prometheus.\n# false: For conflicting labels, prefix the scraped label with 'exported_<original-label>' and add the labels added by the backend Prometheus.\n[ honorLabels: bool | default = false ]\n# Whether to use the timestamp generated on the target during the scrape.\n# true: Use the timestamp on the target if available.\n# false: Ignore the timestamp on the target.\n[ honorTimestamps: bool | default = true ]\n# Basic authentication credentials. Fill in the values of username/password from the proper K8S secret key. Note that the secret namespace must match the PodMonitor/ServiceMonitor.\n[ basicAuth: BasicAuth ]\n# Scrape the metrics from the target through a proxy server. Specify the address of the proxy server.\n[ proxyUrl: string ]\n# After scraping the data, rewrite the values of the labels on the target using the relabeling mechanism. Multiple relabel rules are executed in order.\n# See explanation below for relabel_config\nrelabelings:\n[ - <relabel_config> ...]\n# Before writing the scraped data, rewrite the values of the proper labels on the target using the relabeling mechanism. Multiple relabel rules are executed in order.\n# See explanation below for relabel_config\nmetricRelabelings:\n[ - <relabel_config> ...]\n
                                          "},{"location":"en/end-user/insight/collection-manag/metric-collect.html#relabel_config","title":"relabel_config","text":"

                                          The explanation for the proper configmaps is as follows:

                                          # Specifies which labels to take from the original labels for relabeling. The values taken are concatenated using the separator defined in the configuration.\n# For PodMonitor/ServiceMonitor, the proper configmap is sourceLabels.\n[ source_labels: '[' <labelname> [, ...] ']' ]\n# Defines the character used to concatenate the values of the labels to be relabeled. Default is ';'.\n[ separator: <string> | default = ; ]\n\n# When the action is replace/hashmod, target_label is used to specify the proper label name.\n# For PodMonitor/ServiceMonitor, the proper configmap is targetLabel.\n[ target_label: <labelname> ]\n\n# Regular expression used to match the values of the source labels.\n[ regex: <regex> | default = (.*) ]\n\n# Used when action is hashmod, it takes the modulus value based on the MD5 hash of the source label's value.\n[ modulus: <int> ]\n\n# Used when action is replace, it defines the expression to replace when the regex matches. It can use regular expression replacement with regex.\n[ replacement: <string> | default = $1 ]\n\n# Actions performed based on the matched values of regex. The available actions are as follows, with replace being the default:\n# replace: If the regex matches, replace the proper value with the value defined in replacement. Set the value using target_label and add the proper label.\n# keep: If the regex doesn't match, discard the value.\n# drop: If the regex matches, discard the value.\n# hashmod: Take the modulus of the MD5 hash of the source label's value based on the value specified in modulus.\n# Add a new label with a label name specified by target_label.\n# labelmap: If the regex matches, replace the proper label name with the value specified in replacement.\n# labeldrop: If the regex matches, delete the proper label.\n# labelkeep: If the regex doesn't match, delete the proper label.\n[ action: <relabel_action> | default = replace ]\n
                                          "},{"location":"en/end-user/insight/collection-manag/probe-module.html","title":"Custom probers","text":"

                                          Insight uses the Blackbox Exporter provided by Prometheus as a blackbox monitoring solution, allowing detection of target instances via HTTP, HTTPS, DNS, ICMP, TCP, and gRPC. It can be used in the following scenarios:

                                          • HTTP/HTTPS: URL/API availability monitoring
                                          • ICMP: Host availability monitoring
                                          • TCP: Port availability monitoring
                                          • DNS: Domain name resolution

                                          In this page, we will explain how to configure custom probers in an existing Blackbox ConfigMap.

                                          ICMP prober is not enabled by default in Insight because it requires higher permissions. Therfore We will use the HTTP prober as an example to demonstrate how to modify the ConfigMap to achieve custom HTTP probing.

                                          "},{"location":"en/end-user/insight/collection-manag/probe-module.html#procedure","title":"Procedure","text":"
                                          1. Go to Clusters in Container Management and enter the details of the target cluster.
                                          2. Click the left navigation bar and select ConfigMaps & Secrets -> ConfigMaps .
                                          3. Find the ConfigMap named insight-agent-prometheus-blackbox-exporter and click Edit YAML .

                                            Add custom probers under modules :

                                          HTTP ProberICMP Prober
                                          module:\n  http_2xx:\n    prober: http\n    timeout: 5s\n    http:\n      valid_http_versions: [HTTP/1.1, HTTP/2]\n      valid_status_codes: []  # Defaults to 2xx\n      method: GET\n

                                          module:\n  ICMP: # Example of ICMP prober configuration\n    prober: icmp\n    timeout: 5s\n    icmp:\n      preferred_ip_protocol: ip4\nicmp_example: # Example 2 of ICMP prober configuration\n  prober: icmp\n  timeout: 5s\n  icmp:\n    preferred_ip_protocol: \"ip4\"\n    source_ip_address: \"127.0.0.1\"\n
                                          Since ICMP requires higher permissions, we also need to elevate the pod permissions. Otherwise, an operation not permitted error will occur. There are two ways to elevate permissions:

                                          • Directly edit the BlackBox Exporter deployment file to enable it

                                            apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: insight-agent-prometheus-blackbox-exporter\n  namespace: insight-system\nspec:\n  template:\n    spec:\n      containers:\n        - name: blackbox-exporter\n          image: # ... (image, args, ports, etc. remain unchanged)\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            allowPrivilegeEscalation: false\n            capabilities:\n              add:\n              - NET_RAW\n              drop:\n              - ALL\n            readOnlyRootFilesystem: true\n            runAsGroup: 0\n            runAsNonRoot: false\n            runAsUser: 0\n
                                          • Elevate permissions via helm upgrade

                                            prometheus-blackbox-exporter:\n  enabled: true\n  securityContext:\n    runAsUser: 0\n    runAsGroup: 0\n    readOnlyRootFilesystem: true\n    runAsNonRoot: false\n    allowPrivilegeEscalation: false\n    capabilities:\n      add: [\"NET_RAW\"]\n

                                          Info

                                          For more probers, refer to blackbox_exporter Configuration.

                                          "},{"location":"en/end-user/insight/collection-manag/probe-module.html#other-references","title":"Other References","text":"

                                          The following YAML file contains various probers such as HTTP, TCP, SMTP, ICMP, and DNS. You can modify the configuration file of insight-agent-prometheus-blackbox-exporter according to your needs.

                                          Click to view the complete YAML file
                                          kind: ConfigMap\napiVersion: v1\nmetadata:\n  name: insight-agent-prometheus-blackbox-exporter\n  namespace: insight-system\n  labels:\n    app.kubernetes.io/instance: insight-agent\n    app.kubernetes.io/managed-by: Helm\n    app.kubernetes.io/name: prometheus-blackbox-exporter\n    app.kubernetes.io/version: v0.24.0\n    helm.sh/chart: prometheus-blackbox-exporter-8.8.0\n  annotations:\n    meta.helm.sh/release-name: insight-agent\n    meta.helm.sh/release-namespace: insight-system\ndata:\n  blackbox.yaml: |\n    modules:\n      HTTP_GET:\n        prober: http\n        timeout: 5s\n        http:\n          method: GET\n          valid_http_versions: [\"HTTP/1.1\", \"HTTP/2.0\"]\n          follow_redirects: true\n          preferred_ip_protocol: \"ip4\"\n      HTTP_POST:\n        prober: http\n        timeout: 5s\n        http:\n          method: POST\n          body_size_limit: 1MB\n      TCP:\n        prober: tcp\n        timeout: 5s\n      # Not enabled by default:\n      # ICMP:\n      #   prober: icmp\n      #   timeout: 5s\n      #   icmp:\n      #     preferred_ip_protocol: ip4\n      SSH:\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n          - expect: \"^SSH-2.0-\"\n      POP3S:\n        prober: tcp\n        tcp:\n          query_response:\n          - expect: \"^+OK\"\n          tls: true\n          tls_config:\n            insecure_skip_verify: false\n      http_2xx_example:               # http prober example\n        prober: http\n        timeout: 5s                   # probe timeout\n        http:\n          valid_http_versions: [\"HTTP/1.1\", \"HTTP/2.0\"]                   # Version in the response, usually default\n          valid_status_codes: []  # Defaults to 2xx                       # Valid range of response codes, probe successful if within this range\n          method: GET                 # request method\n          headers:                    # request headers\n            Host: vhost.example.com\n            Accept-Language: en-US\n            Origin: example.com\n          no_follow_redirects: false  # allow redirects\n          fail_if_ssl: false   \n          fail_if_not_ssl: false\n          fail_if_body_matches_regexp:\n            - \"Could not connect to database\"\n          fail_if_body_not_matches_regexp:\n            - \"Download the latest version here\"\n          fail_if_header_matches: # Verifies that no cookies are set\n            - header: Set-Cookie\n              allow_missing: true\n              regexp: '.*'\n          fail_if_header_not_matches:\n            - header: Access-Control-Allow-Origin\n              regexp: '(\\*|example\\.com)'\n          tls_config:                  # tls configuration for https requests\n            insecure_skip_verify: false\n          preferred_ip_protocol: \"ip4\" # defaults to \"ip6\"                 # Preferred IP protocol version\n          ip_protocol_fallback: false  # no fallback to \"ip6\"            \n      http_post_2xx:                   # http prober example with body\n        prober: http\n        timeout: 5s\n        http:\n          method: POST                 # probe request method\n          headers:\n            Content-Type: application/json\n          body: '{\"username\":\"admin\",\"password\":\"123456\"}'                   # body carried during probe\n      http_basic_auth_example:         # prober example with username and password\n        prober: http\n        timeout: 5s\n        http:\n          method: POST\n          headers:\n            Host: \"login.example.com\"\n          basic_auth:                  # username and password to be added during probe\n            username: \"username\"\n            password: \"mysecret\"\n      http_custom_ca_example:\n        prober: http\n        http:\n          method: GET\n          tls_config:                  # root certificate used during probe\n            ca_file: \"/certs/my_cert.crt\"\n      http_gzip:\n        prober: http\n        http:\n          method: GET\n          compression: gzip            # compression method used during probe\n      http_gzip_with_accept_encoding:\n        prober: http\n        http:\n          method: GET\n          compression: gzip\n          headers:\n            Accept-Encoding: gzip\n      tls_connect:                     # TCP prober example\n        prober: tcp\n        timeout: 5s\n        tcp:\n          tls: true                    # use TLS\n      tcp_connect_example:\n        prober: tcp\n        timeout: 5s\n      imap_starttls:                   # IMAP email server probe configuration example\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - expect: \"OK.*STARTTLS\"\n            - send: \". STARTTLS\"\n            - expect: \"OK\"\n            - starttls: true\n            - send: \". capability\"\n            - expect: \"CAPABILITY IMAP4rev1\"\n      smtp_starttls:                   # SMTP email server probe configuration example\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - expect: \"^220 ([^ ]+) ESMTP (.+)$\"\n            - send: \"EHLO prober\\r\"\n            - expect: \"^250-STARTTLS\"\n            - send: \"STARTTLS\\r\"\n            - expect: \"^220\"\n            - starttls: true\n            - send: \"EHLO prober\\r\"\n            - expect: \"^250-AUTH\"\n            - send: \"QUIT\\r\"\n      irc_banner_example:\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - send: \"NICK prober\"\n            - send: \"USER prober prober prober :prober\"\n            - expect: \"PING :([^ ]+)\"\n              send: \"PONG ${1}\"\n            - expect: \"^:[^ ]+ 001\"\n      # icmp_example:                    # ICMP prober configuration example\n      #  prober: icmp\n      #  timeout: 5s\n      #  icmp:\n      #    preferred_ip_protocol: \"ip4\"\n      #    source_ip_address: \"127.0.0.1\"\n      dns_udp_example:                 # DNS query example using UDP\n        prober: dns\n        timeout: 5s\n        dns:\n          query_name: \"www.prometheus.io\"                 # domain name to resolve\n          query_type: \"A\"              # type proper to this domain\n          valid_rcodes:\n          - NOERROR\n          validate_answer_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n            fail_if_all_match_regexp:\n            - \".*127.0.0.1\"\n            fail_if_not_matches_regexp:\n            - \"www.prometheus.io.\\t300\\tIN\\tA\\t127.0.0.1\"\n            fail_if_none_matches_regexp:\n            - \"127.0.0.1\"\n          validate_authority_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n          validate_additional_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n      dns_soa:\n        prober: dns\n        dns:\n          query_name: \"prometheus.io\"\n          query_type: \"SOA\"\n      dns_tcp_example:               # DNS query example using TCP\n        prober: dns\n        dns:\n          transport_protocol: \"tcp\" # defaults to \"udp\"\n          preferred_ip_protocol: \"ip4\" # defaults to \"ip6\"\n          query_name: \"www.prometheus.io\"\n
                                          "},{"location":"en/end-user/insight/collection-manag/service-monitor.html","title":"Configure service discovery rules","text":"

                                          Observable Insight supports the way of creating CRD ServiceMonitor through container management to meet your collection requirements for custom service discovery. Users can use ServiceMonitor to define the scope of the Namespace discovered by the Pod and select the monitored Service through matchLabel .

                                          "},{"location":"en/end-user/insight/collection-manag/service-monitor.html#prerequisites","title":"Prerequisites","text":"

                                          The cluster has the Helm App insight-agent installed and in the running state.

                                          "},{"location":"en/end-user/insight/collection-manag/service-monitor.html#steps","title":"Steps","text":"
                                          1. Select Data Collection on the left navigation bar to view the status of all cluster collection plug-ins.

                                          2. Click a cluster name to enter the collection configuration details.

                                          3. Click the link to jump to Container Management to create a Service Monitor.

                                            apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: micrometer-demo # (1)\n  namespace: insight-system # (2)\n    labels: \n     operator.insight.io/managed-by: insight\nspec:\n  endpoints: # (3)\n    - honorLabels: true\n       interval: 15s\n        path: /actuator/prometheus\n        port: http\n  namespaceSelector: # (4)\n    matchNames:\n      - insight-system  # (5)\n  selector: # (6)\n    matchLabels:\n      micrometer-prometheus-discovery: \"true\"\n
                                            1. Specify the name of the ServiceMonitor.
                                            2. Specify the namespace of the ServiceMonitor.
                                            3. This is the service endpoint, which represents the address where Prometheus collects Metrics. endpoints is an array, and multiple endpoints can be created at the same time. Each endpoint contains three fields, and the meaning of each field is as follows:

                                              • interval : Specifies the collection cycle of Prometheus for the current endpoint . The unit is seconds, set to 15s in this example.
                                              • path : Specifies the collection path of Prometheus. In this example, it is specified as /actuator/prometheus .
                                              • port : Specifies the port through which the collected data needs to pass. The set port is the name set by the port of the Service being collected.
                                            4. This is the scope of the Service that needs to be discovered. namespaceSelector contains two mutually exclusive fields, and the meaning of the fields is as follows:

                                              • any : Only one value true , when this field is set, it will listen to changes of all Services that meet the Selector filtering conditions.
                                              • matchNames : An array value that specifies the scope of namespace to be monitored. For example, if you only want to monitor the Services in two namespaces, default and insight-system, the matchNames are set as follows:

                                                namespaceSelector:\n  matchNames:\n    - default\n    - insight-system\n
                                            5. The namespace where the application that needs to expose metrics is located

                                            6. Used to select the Service
                                          "},{"location":"en/end-user/insight/dashboard/dashboard.html","title":"Dashboard","text":"

                                          Grafana is a cross-platform open source visual analysis tool. Insight uses open source Grafana to provide monitoring services, and supports viewing resource consumption from multiple dimensions such as clusters, nodes, and namespaces.

                                          For more information on open source Grafana, see Grafana Official Documentation.

                                          "},{"location":"en/end-user/insight/dashboard/dashboard.html#steps","title":"Steps","text":"
                                          1. Select Dashboard from the left navigation bar .

                                            • In the Insight / Overview dashboard, you can view the resource usage of multiple clusters and analyze resource usage, network, storage, and more based on dimensions such as namespaces and Pods.

                                            • Click the dropdown menu in the upper-left corner of the dashboard to switch between clusters.

                                            • Click the lower-right corner of the dashboard to switch the time range for queries.

                                          2. Insight provides several recommended dashboards that allow monitoring from different dimensions such as nodes, namespaces, and workloads. Switch between dashboards by clicking the insight-system / Insight / Overview section.

                                          Note

                                          1. For accessing Grafana UI, refer to Access Native Grafana.

                                          2. For importing custom dashboards, refer to Importing Custom Dashboards.

                                          "},{"location":"en/end-user/insight/dashboard/import-dashboard.html","title":"Import Custom Dashboards","text":"

                                          By using Grafana CRD, you can incorporate the management and deployment of dashboards into the lifecycle management of Kubernetes. This enables version control, automated deployment, and cluster-level management of dashboards. This page describes how to import custom dashboards using CRD and the UI interface.

                                          "},{"location":"en/end-user/insight/dashboard/import-dashboard.html#steps","title":"Steps","text":"
                                          1. Log in to the AI platform platform and go to Container Management . Select the kpanda-global-cluster from the cluster list.

                                          2. Choose Custom Resources from the left navigation bar. Look for the grafanadashboards.integreatly.org file in the list and click it to view the details.

                                          3. Click YAML Create and use the following template. Replace the dashboard JSON in the Json field.

                                            • namespace : Specify the target namespace.
                                            • name : Provide a name for the dashboard.
                                            • label : Mandatory. Set the label as operator.insight.io/managed-by: insight .
                                            apiVersion: integreatly.org/v1alpha1\nkind: GrafanaDashboard\nmetadata:\n  labels:\n    app: insight-grafana-operator\n    operator.insight.io/managed-by: insight\n  name: sample-dashboard\n  namespace: insight-system\nspec:\n  json: >\n    {\n      \"id\": null,\n      \"title\": \"Simple Dashboard\",\n      \"tags\": [],\n      \"style\": \"dark\",\n      \"timezone\": \"browser\",\n      \"editable\": true,\n      \"hideControls\": false,\n      \"graphTooltip\": 1,\n      \"panels\": [],\n      \"time\": {\n        \"from\": \"now-6h\",\n        \"to\": \"now\"\n      },\n      \"timepicker\": {\n        \"time_options\": [],\n        \"refresh_intervals\": []\n      },\n      \"templating\": {\n        \"list\": []\n      },\n      \"annotations\": {\n        \"list\": []\n      },\n      \"refresh\": \"5s\",\n      \"schemaVersion\": 17,\n      \"version\": 0,\n      \"links\": []\n    }\n
                                          4. After clicking OK , wait for a while to view the newly imported dashboard in Dashboard .

                                          Info

                                          If you need to customize the dashboard, refer to Add Dashboard Panel.

                                          "},{"location":"en/end-user/insight/dashboard/login-grafana.html","title":"Access Native Grafana","text":"

                                          Please make sure that the Helm App Insight in your global management cluster is in Running state.

                                          The specific operation steps are as follows:

                                          1. Log in to the console to access native Grafana.

                                            Access address: http://ip:port/ui/insight-grafana

                                            For example: http://10.6.10.233:30209/ui/insight-grafana

                                          2. Click Login in the lower right corner, and use the default username and password to log in.

                                            • Default username: admin

                                            • Default password: admin

                                          3. Click Log in to complete the login.

                                          "},{"location":"en/end-user/insight/dashboard/overview.html","title":"Overview","text":"

                                          Insight only collects data from clusters that have insight-agent installed and running in a normal state. The overview provides an overview of resources across multiple clusters:

                                          • Alert Statistics: Provides statistics on active alerts across all clusters.
                                          • Resource Consumption: Displays the resource usage trends for the top 5 clusters and nodes in the past hour, based on CPU usage, memory usage, and disk usage.
                                          • By default, the sorting is based on CPU usage. You can switch the metric to sort clusters and nodes.
                                          • Resource Trends: Shows the trends in the number of nodes over the past 15 days and the running trend of pods in the last hour.
                                          • Service Requests Ranking: Displays the top 5 services with the highest request latency and error rates, along with their respective clusters and namespaces in the multi-cluster environment.
                                          "},{"location":"en/end-user/insight/dashboard/overview.html#operation-procedure","title":"Operation procedure","text":"

                                          Select Overview in the left navigation bar to enter the details page.

                                          "},{"location":"en/end-user/insight/data-query/log.html","title":"Log query","text":"

                                          By default, Insight collects node logs, container logs, and Kubernetes audit logs. In the log query page, you can search for standard output (stdout) logs within the permissions of your login account. This includes node logs, product logs, and Kubernetes audit logs. You can quickly find the desired logs among a large volume of logs. Additionally, you can use the source information and contextual raw data of the logs to assist in troubleshooting and issue resolution.

                                          "},{"location":"en/end-user/insight/data-query/log.html#prerequisites","title":"Prerequisites","text":"

                                          The cluster has insight-agent installed and the application is in running state.

                                          "},{"location":"en/end-user/insight/data-query/log.html#query-log","title":"Query log","text":"
                                          1. In the left navigation bar, select Data Query -> Log Query .

                                          2. After selecting the query criteria, click Search , and the log records in the form of graphs will be displayed. The most recent logs are displayed on top.

                                          3. In the Filter panel, switch Type and select Node to check the logs of all nodes in the cluster.

                                          4. In the Filter panel, switch Type and select Event to view the logs generated by all Kubernetes events in the cluster.

                                          Lucene Syntax Explanation:

                                          1. Use logical operators (AND, OR, NOT, \"\") to query multiple keywords. For example: keyword1 AND (keyword2 OR keyword3) NOT keyword4.
                                          2. Use a tilde (~) for fuzzy queries. You can optionally specify a parameter after the \"~\" to control the similarity of the fuzzy query. If not specified, it defaults to 0.5. For example: error~.
                                          3. Use wildcards (*, ?) as single-character placeholders to match any character.
                                          4. Use square brackets [ ] or curly braces { } for range queries. Square brackets [ ] represent a closed interval and include the boundary values. Curly braces { } represent an open interval and exclude the boundary values. Range queries are applicable only to fields that can be sorted, such as numeric fields and date fields. For example timestamp:[2022-01-01 TO 2022-01-31].
                                          "},{"location":"en/end-user/insight/data-query/log.html#view-log-context","title":"View log context","text":"

                                          Clicking on the button next to a log will slide out a panel on the right side where you can view the default 100 lines of context for that log. You can switch the Display Rows option to view more contextual content.

                                          "},{"location":"en/end-user/insight/data-query/log.html#export-log","title":"Export log","text":"

                                          Click the download button located in the upper right corner of the list.

                                          • You can configure the exported log fields. The available fields may vary depending on the log type, with the Log Content field being mandatory.
                                          • You can export the log query results in .txt or .csv format.

                                          "},{"location":"en/end-user/insight/data-query/metric.html","title":"Metric query","text":"

                                          Metric query supports querying the index data of each container resource, and you can view the trend changes of the monitoring index. At the same time, advanced query supports native PromQL statements for Metric query.

                                          "},{"location":"en/end-user/insight/data-query/metric.html#prerequisites","title":"Prerequisites","text":"
                                          • The cluster has insight-agent installed and the application is in running state.
                                          "},{"location":"en/end-user/insight/data-query/metric.html#common-query","title":"Common query","text":"
                                          1. In the left navigation bar, click Data Query -> metric Query .

                                          2. After selecting query conditions such as cluster, type, node, and metric name, click Search , and the proper metric chart and data details will be displayed on the right side of the screen.

                                          Tip

                                          Support custom time range. You can manually click the Refresh icon or select a default time interval to refresh.

                                          "},{"location":"en/end-user/insight/data-query/metric.html#advanced-search","title":"Advanced Search","text":"
                                          1. In the left navigation bar, click Data Query -> metric Query , click the Advanced Query tab to switch to the advanced query page.

                                          2. Enter a PromQL statement (see PromQL Syntax), click Query , and the query metric chart and data details will be displayed.

                                          "},{"location":"en/end-user/insight/infra/cluster.html","title":"Cluster Monitoring","text":"

                                          Through cluster monitoring, you can view the basic information of the cluster, the resource consumption and the trend of resource consumption over a period of time.

                                          "},{"location":"en/end-user/insight/infra/cluster.html#prerequisites","title":"Prerequisites","text":"

                                          The cluster has insight-agent installed and the application is in running state.

                                          "},{"location":"en/end-user/insight/infra/cluster.html#steps","title":"Steps","text":"
                                          1. Go to the Insight product module.

                                          2. Select Infrastructure > Clusters from the left navigation bar. On this page, you can view the following information:

                                            • Resource Overview: Provides statistics on the number of normal/all nodes and workloads across multiple clusters.
                                            • Fault: Displays the number of alerts generated in the current cluster.
                                            • Resource Consumption: Shows the actual usage and total capacity of CPU, memory, and disk for the selected cluster.
                                            • Metric Explanations: Describes the trends in CPU, memory, disk I/O, and network bandwidth.

                                          3. Click Resource Level Monitor, you can view more metrics of the current cluster.

                                          "},{"location":"en/end-user/insight/infra/cluster.html#metric-explanations","title":"Metric Explanations","text":"Metric Name Description CPU Usage The ratio of the actual CPU usage of all pod resources in the cluster to the total CPU capacity of all nodes. CPU Allocation The ratio of the sum of CPU requests of all pods in the cluster to the total CPU capacity of all nodes. Memory Usage The ratio of the actual memory usage of all pod resources in the cluster to the total memory capacity of all nodes. Memory Allocation The ratio of the sum of memory requests of all pods in the cluster to the total memory capacity of all nodes."},{"location":"en/end-user/insight/infra/container.html","title":"Container Insight","text":"

                                          Container insight is the process of monitoring workloads in cluster management. In the list, you can view basic information and status of workloads. On the Workloads details page, you can see the number of active alerts and the trend of resource consumption such as CPU and memory.

                                          "},{"location":"en/end-user/insight/infra/container.html#prerequisites","title":"Prerequisites","text":"
                                          • The cluster has insight-agent installed, and all pods are in the Running state.

                                          • To install insight-agent, please refer to: Installing insight-agent online or Offline upgrade of insight-agent.

                                          "},{"location":"en/end-user/insight/infra/container.html#steps","title":"Steps","text":"

                                          Follow these steps to view service monitoring metrics:

                                          1. Go to the Insight product module.

                                          2. Select Infrastructure > Workloads from the left navigation bar.

                                          3. Switch between tabs at the top to view data for different types of workloads.

                                          4. Click the target workload name to view the details.

                                            1. Faults: Displays the total number of active alerts for the workload.
                                            2. Resource Consumption: Shows the CPU, memory, and network usage of the workload.
                                            3. Monitoring Metrics: Provides the trends of CPU, Memory, Network, and disk usage for the workload over the past hour.

                                          5. Switch to the Pods tab to view the status of various pods for the workload, including their nodes, restart counts, and other information.

                                          6. Switch to the JVM monitor tab to view the JVM metrics for each pods

                                            Note

                                            1. The JVM monitoring feature only supports the Java language.
                                            2. To enable the JVM monitoring feature, refer to Getting Started with Monitoring Java Applications.
                                          "},{"location":"en/end-user/insight/infra/container.html#metric-explanations","title":"Metric Explanations","text":"Metric Name Description CPU Usage The sum of CPU usage for all pods under the workload. CPU Requests The sum of CPU requests for all pods under the workload. CPU Limits The sum of CPU limits for all pods under the workload. Memory Usage The sum of memory usage for all pods under the workload. Memory Requests The sum of memory requests for all pods under the workload. Memory Limits The sum of memory limits for all pods under the workload. Disk Read/Write Rate The total number of continuous disk reads and writes per second within the specified time range, representing a performance measure of the number of read and write operations per second on the disk. Network Send/Receive Rate The incoming and outgoing rates of network traffic, aggregated by workload, within the specified time range."},{"location":"en/end-user/insight/infra/event.html","title":"Event Query","text":"

                                          AI platform Insight supports event querying by cluster and namespace.

                                          "},{"location":"en/end-user/insight/infra/event.html#event-status-distribution","title":"Event Status Distribution","text":"

                                          By default, the events that occurred within the last 12 hours are displayed. You can select a different time range in the upper right corner to view longer or shorter periods. You can also customize the sampling interval from 1 minute to 5 hours.

                                          The event status distribution chart provides a visual representation of the intensity and dispersion of events. This helps in evaluating and preparing for subsequent cluster operations and maintenance tasks. If events are densely concentrated during specific time periods, you may need to allocate more resources or take proper measures to ensure cluster stability and high availability. On the other hand, if events are dispersed, you can effectively schedule other maintenance tasks such as system optimization, upgrades, or handling other tasks during this period.

                                          By considering the event status distribution chart and the selected time range, you can better plan and manage your cluster operations and maintenance work, ensuring system stability and reliability.

                                          "},{"location":"en/end-user/insight/infra/event.html#event-count-and-statistics","title":"Event Count and Statistics","text":"

                                          Through important event statistics, you can easily understand the number of image pull failures, health check failures, Pod execution failures, Pod scheduling failures, container OOM (Out-of-Memory) occurrences, volume mounting failures, and the total count of all events. These events are typically categorized as \"Warning\" and \"Normal\".

                                          "},{"location":"en/end-user/insight/infra/event.html#event-list","title":"Event List","text":"

                                          The event list is presented chronologically based on time. You can sort the events by Last Occurrend At and Type .

                                          By clicking on the \u2699\ufe0f icon on the right side, you can customize the displayed columns according to your preferences and needs.

                                          Additionally, you can click the refresh icon to update the current event list when needed.

                                          In the operation column on the right, clicking the icon allows you to view the history of a specific event.

                                          "},{"location":"en/end-user/insight/infra/event.html#reference","title":"Reference","text":"

                                          For detailed meanings of the built-in Events in the system, refer to the Kubernetes API Event List.

                                          "},{"location":"en/end-user/insight/infra/namespace.html","title":"Namespace Monitoring","text":"

                                          With namespaces as the dimension, you can quickly query resource consumption and trends within a namespace.

                                          "},{"location":"en/end-user/insight/infra/namespace.html#prerequisites","title":"Prerequisites","text":"
                                          • Insight Agent is installed in the cluster and the applications are in the Running state.
                                          "},{"location":"en/end-user/insight/infra/namespace.html#steps","title":"Steps","text":"
                                          1. Go to the Insight product module.

                                          2. Select Infrastructure -> Namespaces from the left navigation bar. On this page, you can view the following information:

                                            1. Switch Namespace: Switch between clusters or namespaces at the top.
                                            2. Resource Overview: Provides statistics on the number of normal and total workloads within the selected namespace.
                                            3. Incidents: Displays the number of alerts generated within the selected namespace.
                                            4. Events: Shows the number of Warning level events within the selected namespace in the past 24 hours.
                                            5. Resource Consumption: Provides the sum of CPU and memory usage for Pods within the selected namespace, along with the CPU and memory quota information.
                                          "},{"location":"en/end-user/insight/infra/namespace.html#metric-explanations","title":"Metric Explanations","text":"Metric Name Description CPU Usage The sum of CPU usage for Pods within the selected namespace. Memory Usage The sum of memory usage for Pods within the selected namespace. Pod CPU Usage The CPU usage for each Pod within the selected namespace. Pod Memory Usage The memory usage for each Pod within the selected namespace."},{"location":"en/end-user/insight/infra/node.html","title":"Node Monitoring","text":"

                                          Through node monitoring, you can get an overview of the current health status of the nodes in the selected cluster and the number of abnormal pod; on the current node details page, you can view the number of alerts and the trend of resource consumption such as CPU, memory, and disk.

                                          "},{"location":"en/end-user/insight/infra/node.html#prerequisites","title":"Prerequisites","text":"
                                          • The cluster has insight-agent installed and the application is in running state.
                                          "},{"location":"en/end-user/insight/infra/node.html#steps","title":"Steps","text":"
                                          1. Go to the Insight product module.

                                          2. Select Infrastructure -> Nodes from the left navigation bar. On this page, you can view the following information:

                                            • Cluster: Uses the dropdown at the top to switch between clusters.
                                            • Nodes: Shows a list of nodes within the selected cluster. Click a specific node to view detailed information.
                                            • Alert: Displays the number of alerts generated in the current cluster.
                                            • Resource Consumption: Shows the actual usage and total capacity of CPU, memory, and disk for the selected node.
                                            • Metric Explanations: Describes the trends in CPU, memory, disk I/O, and network traffic for the selected node.

                                          3. Click Resource Level Monitor, you can view more metrics of the current cluster.

                                          "},{"location":"en/end-user/insight/infra/probe.html","title":"Probe","text":"

                                          Probe refers to the use of black-box monitoring to regularly test the connectivity of targets through HTTP, TCP, and other methods, enabling quick detection of ongoing faults.

                                          Insight uses the Prometheus Blackbox Exporter tool to probe the network using protocols such as HTTP, HTTPS, DNS, TCP, and ICMP, and returns the probe results to understand the network status.

                                          "},{"location":"en/end-user/insight/infra/probe.html#prerequisites","title":"Prerequisites","text":"

                                          The insight-agent has been successfully deployed in the target cluster and is in the Running state.

                                          "},{"location":"en/end-user/insight/infra/probe.html#view-probes","title":"View Probes","text":"
                                          1. Go to the Insight product module.
                                          2. Select Infrastructure -> Probes in the left navigation bar.

                                            • Click the cluster or namespace dropdown in the table to switch between clusters and namespaces.
                                            • The list displays the name, probe method, probe target, connectivity status, and creation time of the probes by default.
                                            • The connectivity status can be:
                                              • Normal: The probe successfully connects to the target, and the target returns the expected response.
                                              • Abnormal: The probe fails to connect to the target, or the target does not return the expected response.
                                              • Pending: The probe is attempting to connect to the target.
                                            • Supports fuzzy search of probe names.
                                          "},{"location":"en/end-user/insight/infra/probe.html#create-a-probe","title":"Create a Probe","text":"
                                          1. Click Create Probe .
                                          2. Fill in the basic information and click Next .

                                            • Name: The name can only contain lowercase letters, numbers, and hyphens (-), and must start and end with a lowercase letter or number, with a maximum length of 63 characters.
                                            • Cluster: Select the cluster for the probe task.
                                            • Namespace: The namespace where the probe task is located.
                                          3. Configure the probe parameters.

                                            • Blackbox Instance: Select the blackbox instance responsible for the probe.
                                            • Probe Method:
                                              • HTTP: Sends HTTP or HTTPS requests to the target URL to check its connectivity and response time. This can be used to monitor the availability and performance of websites or web applications.
                                              • TCP: Establishes a TCP connection to the target host and port to check its connectivity and response time. This can be used to monitor TCP-based services such as web servers and database servers.
                                              • Other: Supports custom probe methods by configuring ConfigMap. For more information, refer to: Custom Probe Methods
                                            • Probe Target: The target address of the probe, supports domain names or IP addresses.
                                            • Labels: Custom labels that will be automatically added to Prometheus' labels.
                                            • Probe Interval: The interval between probes.
                                            • Probe Timeout: The maximum waiting time when probing the target.
                                          4. After configuring, click OK to complete the creation.

                                          Warning

                                          After the probe task is created, it takes about 3 minutes to synchronize the configuration. During this period, no probes will be performed, and probe results cannot be viewed.

                                          "},{"location":"en/end-user/insight/infra/probe.html#view-monitoring-dashboards","title":"View Monitoring Dashboards","text":"

                                          Click \u2507 in the operations column and click View Monitoring Dashboard .

                                          Metric Name Description Current Status Response Represents the response status code of the HTTP probe request. Ping Status Indicates whether the probe request was successful. 1 indicates a successful probe request, and 0 indicates a failed probe request. IP Protocol Indicates the IP protocol version used in the probe request. SSL Expiry Represents the earliest expiration time of the SSL/TLS certificate. DNS Response (Latency) Represents the duration of the entire probe process in seconds. HTTP Duration Represents the duration of the entire process from sending the request to receiving the complete response."},{"location":"en/end-user/insight/infra/probe.html#edit-a-probe","title":"Edit a Probe","text":"

                                          Click \u2507 in the operations column and click Edit .

                                          "},{"location":"en/end-user/insight/infra/probe.html#delete-a-probe","title":"Delete a Probe","text":"

                                          Click \u2507 in the operations column and click Delete .

                                          "},{"location":"en/end-user/insight/quickstart/agent-status.html","title":"Insight-agent component status","text":"

                                          Insight is a multicluster observation product in AI platform. In order to realize the unified collection of multicluster observation data, users need to install the Helm App insight-agent (Installed in insight-system namespace by default). See How to install insight-agent .

                                          "},{"location":"en/end-user/insight/quickstart/agent-status.html#status-description","title":"Status description","text":"

                                          In Insight -> Data Collection section, you can view the status of insight-agent installed in each cluster.

                                          • not installed : insight-agent is not installed under the insight-system namespace in this cluster
                                          • Running : insight-agent is successfully installed in the cluster, and all deployed components are running
                                          • Exception : If insight-agent is in this state, it means that the helm deployment failed or the deployed components are not running

                                          Can be checked by:

                                          1. Run the following command, if the status is deployed , go to the next step. If it is failed , since it will affect the upgrade of the application, it is recommended to reinstall after uninstalling Container Management -> Helm Apps :

                                            helm list -n insight-system\n
                                          2. run the following command or check the status of the components deployed in the cluster in Insight -> Data Collection . If there is a pod that is not in the Running state, please restart the abnormal pod.

                                            kubectl get pods -n insight-system\n
                                          "},{"location":"en/end-user/insight/quickstart/agent-status.html#supplementary-instructions","title":"Supplementary instructions","text":"
                                          1. The resource consumption of the metric collection component Prometheus in insight-agent is directly proportional to the number of pods running in the cluster. Adjust Prometheus resources according to the cluster size, please refer to Prometheus Resource Planning.

                                          2. Since the storage capacity of the metric storage component vmstorage in the global service cluster is directly proportional to the sum of the number of pods in each cluster.

                                            • Please contact the platform administrator to adjust the disk capacity of vmstorage according to the cluster size, see vmstorage disk capacity planning.
                                            • Adjust vmstorage disk according to multicluster size, see vmstorge disk expansion.
                                          "},{"location":"en/end-user/insight/quickstart/install/index.html","title":"Start Observing","text":"

                                          AI platform platform enables the management and creation of multicloud and multiple clusters. Building upon this capability, Insight serves as a unified observability solution for multiple clusters. It collects observability data from multiple clusters by deploying the insight-agent plugin and allows querying of metrics, logs, and trace data through the AI platform Insight.

                                          insight-agent is a tool that facilitates the collection of observability data from multiple clusters. Once installed, it automatically collects metrics, logs, and trace data without any modifications.

                                          Clusters created through Container Management come pre-installed with insight-agent. Hence, this guide specifically provides instructions on enabling observability for integrated clusters.

                                          • Install insight-agent online

                                          As a unified observability platform for multiple clusters, Insight's resource consumption of certain components is closely related to the data of cluster creation and the number of integrated clusters. When installing insight-agent, it is necessary to adjust the resources of the proper components based on the cluster size.

                                          1. Adjust the CPU and memory resources of the Prometheus collection component in insight-agent according to the size of the cluster created or integrated. Please refer to Prometheus resource planning.

                                          2. As the metric data from multiple clusters is stored centrally, AI platform platform administrators need to adjust the disk space of vmstorage based on the cluster size. Please refer to vmstorage disk capacity planning.

                                          3. For instructions on adjusting the disk space of vmstorage, please refer to Expanding vmstorage disk.

                                          Since AI platform supports the management of multicloud and multiple clusters, insight-agent has undergone partial verification. However, there are known conflicts with monitoring components when installing insight-agent in Suanova 4.0 clusters and Openshift 4.x clusters. If you encounter similar issues, please refer to the following documents:

                                          • Install insight-agent in Openshift 4.x

                                          Currently, the insight-agent collection component has undergone functional testing for popular versions of Kubernetes. Please refer to:

                                          • Kubernetes cluster compatibility testing
                                          • Openshift 4.x cluster compatibility testing
                                          • Rancher cluster compatibility testing
                                          "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html","title":"Enable Big Log and Big Trace Modes","text":"

                                          The Insight Module supports switching log to Big Log mode and trace to Big Trace mode, in order to enhance data writing capabilities in large-scale environments. This page introduces following methods for enabling these modes:

                                          • Enable or upgrade to Big Log and Big Trace modes through the installer (controlled by the same parameter value in manifest.yaml)
                                          • Manually enable Big Log and Big Trace modes through Helm commands
                                          "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#logs","title":"Logs","text":"

                                          This section explains the differences between the normal log mode and the Big Log mode.

                                          "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#log-mode","title":"Log Mode","text":"

                                          Components: Fluentbit + Elasticsearch

                                          This mode is referred to as the ES mode, and the data flow diagram is shown below:

                                          "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#big-log-mode","title":"Big Log Mode","text":"

                                          Components: Fluentbit + Kafka + Vector + Elasticsearch

                                          This mode is referred to as the Kafka mode, and the data flow diagram is shown below:

                                          "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#traces","title":"Traces","text":"

                                          This section explains the differences between the normal trace mode and the Big Trace mode.

                                          "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#trace-mode","title":"Trace Mode","text":"

                                          Components: Agent opentelemetry-collector + Global opentelemetry-collector + Jaeger-collector + Elasticsearch

                                          This mode is referred to as the OTlp mode, and the data flow diagram is shown below:

                                          "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#big-trace-mode","title":"Big Trace Mode","text":"

                                          Components: Agent opentelemetry-collector + Kafka + Global opentelemetry-collector + Jaeger-collector + Elasticsearch

                                          This mode is referred to as the Kafka mode, and the data flow diagram is shown below:

                                          "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#enabling-via-installer","title":"Enabling via Installer","text":"

                                          When deploying/upgrading AI platform using the installer, the manifest.yaml file includes the infrastructures.kafka field. To enable observable Big Log and Big Trace modes, Kafka must be activated:

                                          manifest.yaml
                                          apiVersion: manifest.daocloud.io/v1alpha1\nkind: SuanovaManifest\n...\ninfrastructures:\n  ...\n  kafka:\n    enable: true # Default is false\n    cpuLimit: 1\n    memLimit: 2Gi\n    pvcSize: 15Gi\n
                                          "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#enable","title":"Enable","text":"

                                          When using a manifest.yaml that enables kafka during installation, Kafka middleware will be installed by default, and Big Log and Big Trace modes will be enabled automatically. The installation command is:

                                          ./dce5-installer cluster-create -c clusterConfig.yaml -m manifest.yaml\n
                                          "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#upgrade","title":"Upgrade","text":"

                                          The upgrade also involves modifying the kafka field. However, note that since the old environment was installed with kafka: false, Kafka is not present in the environment. Therefore, you need to specify the upgrade for middleware to install Kafka middleware simultaneously. The upgrade command is:

                                          ./dce5-installer cluster-create -c clusterConfig.yaml -m manifest.yaml -u gproduct,middleware\n

                                          Note

                                          After the upgrade is complete, you need to manually restart the following components:

                                          • insight-agent-fluent-bit
                                          • insight-agent-opentelemetry-collector
                                          • insight-opentelemetry-collector
                                          "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#enabling-via-helm-commands","title":"Enabling via Helm Commands","text":"

                                          Prerequisites: Ensure that there is a usable Kafka and that the address is accessible.

                                          Use the following commands to retrieve the values of the old versions of Insight and insight-agent (it's recommended to back them up):

                                          helm get values insight -n insight-system -o yaml > insight.yaml\nhelm get values insight-agent -n insight-system -o yaml > insight-agent.yaml\n
                                          "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#enabling-big-log","title":"Enabling Big Log","text":"

                                          There are several ways to enable or upgrade to Big Log mode:

                                          Use --set in the helm upgrade commandModify YAML and run helm upgradeUpgrade via Container Management UI

                                          First, run the following Insight upgrade command, ensuring the Kafka brokers address is correct:

                                          helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --set global.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.kafka.enabled=true \\\n  --set vector.enabled=true \\\n  --version 0.30.1\n

                                          Then, run the following insight-agent upgrade command, ensuring the Kafka brokers address is correct:

                                          helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --set global.exporters.logging.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.exporters.logging.output=kafka \\\n  --version 0.30.1\n

                                          Follow these steps to modify the YAML and then run the helm upgrade command:

                                          1. Modify insight.yaml

                                            insight.yaml
                                            global:\n  ...\n  kafka:\n    brokers: 10.6.216.111:30592\n    enabled: true\n...\nvector:\n  enabled: true\n
                                          2. Upgrade the Insight component:

                                            helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --version 0.30.1\n
                                          3. Modify insight-agent.yaml

                                            insight-agent.yaml
                                            global:\n  ...\n  exporters:\n    ...\n    logging:\n      ...\n      kafka:\n        brokers: 10.6.216.111:30592\n      output: kafka\n
                                          4. Upgrade the insight-agent:

                                            helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --version 0.30.1\n

                                          In the Container Management module, find the cluster, select Helm Apps from the left navigation bar, and find and update the insight-agent.

                                          In Logging Settings, select kafka for output and fill in the correct brokers address.

                                          Note that after the upgrade is complete, you need to manually restart the insight-agent-fluent-bit component.

                                          "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#enabling-big-trace","title":"Enabling Big Trace","text":"

                                          There are several ways to enable or upgrade to Big Trace mode:

                                          Using --set in the helm upgrade commandModify YAML and run helm upgradeUpgrade via Container Management UI

                                          First, run the following Insight upgrade command, ensuring the Kafka brokers address is correct:

                                          helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --set global.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.kafka.enabled=true \\\n  --set global.tracing.kafkaReceiver.enabled=true \\\n  --version 0.30.1\n

                                          Then, run the following insight-agent upgrade command, ensuring the Kafka brokers address is correct:

                                          helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --set global.exporters.trace.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.exporters.trace.output=kafka \\\n  --version 0.30.1\n

                                          Follow these steps to modify the YAML and then run the helm upgrade command:

                                          1. Modify insight.yaml

                                            insight.yaml
                                            global:\n  ...\n  kafka:\n    brokers: 10.6.216.111:30592\n    enabled: true\n...\ntracing:\n  ...\n  kafkaReceiver:\n    enabled: true\n
                                          2. Upgrade the Insight component:

                                            helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --version 0.30.1\n
                                          3. Modify insight-agent.yaml

                                            insight-agent.yaml
                                            global:\n  ...\n  exporters:\n    ...\n    trace:\n      ...\n      kafka:\n        brokers: 10.6.216.111:30592\n      output: kafka\n
                                          4. Upgrade the insight-agent:

                                            helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --version 0.30.1\n

                                          In the Container Management module, find the cluster, select Helm Apps from the left navigation bar, and find and update the insight-agent.

                                          In Trace Settings, select kafka for output and fill in the correct brokers address.

                                          Note that after the upgrade is complete, you need to manually restart the insight-agent-opentelemetry-collector and insight-opentelemetry-collector components.

                                          "},{"location":"en/end-user/insight/quickstart/install/component-scheduling.html","title":"Custom Insight Component Scheduling Policy","text":"

                                          When deploying Insight to a Kubernetes environment, proper resource management and optimization are crucial. Insight includes several core components such as Prometheus, OpenTelemetry, FluentBit, Vector, and Elasticsearch. These components, during their operation, may negatively impact the performance of other pods within the cluster due to resource consumption issues. To effectively manage resources and optimize cluster operations, node affinity becomes an important option.

                                          This page is about how to add taints and node affinity to ensure that each component runs on the appropriate nodes, avoiding resource competition or contention, thereby guranttee the stability and efficiency of the entire Kubernetes cluster.

                                          "},{"location":"en/end-user/insight/quickstart/install/component-scheduling.html#configure-dedicated-nodes-for-insight-using-taints","title":"Configure dedicated nodes for Insight using taints","text":"

                                          Since the Insight Agent includes DaemonSet components, the configuration method described in this section is to have all components except the Insight DaemonSet run on dedicated nodes.

                                          This is achieved by adding taints to the dedicated nodes and using tolerations to match them. More details can be found in the Kubernetes official documentation.

                                          You can refer to the following commands to add and remove taints on nodes:

                                          # Add taint\nkubectl taint nodes worker1 node.daocloud.io=insight-only:NoSchedule\n\n# Remove taint\nkubectl taint nodes worker1 node.daocloud.io:NoSchedule-\n

                                          There are two ways to schedule Insight components to dedicated nodes:

                                          "},{"location":"en/end-user/insight/quickstart/install/component-scheduling.html#1-add-tolerations-for-each-component","title":"1. Add tolerations for each component","text":"

                                          Configure the tolerations for the insight-server and insight-agent Charts respectively:

                                          insight-server Chartinsight-agent Chart
                                          server:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nui:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nrunbook:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\n# mysql:\nvictoria-metrics-k8s-stack:\n  victoria-metrics-operator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  vmcluster:\n    spec:\n      vmstorage:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n      vmselect:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n      vminsert:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n  vmalert:\n    spec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n  alertmanager:\n    spec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n\njaeger:\n  collector:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  query:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n\nopentelemetry-collector-aggregator:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nopentelemetry-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\ngrafana-operator:\n  operator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  grafana:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\nkibana:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nelastic-alert:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nvector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n
                                          kube-prometheus-stack:\n  prometheus:\n    prometheusSpec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n  prometheus-node-exporter:\n    tolerations:\n      - effect: NoSchedule\n        operator: Exists\n  prometheusOperator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n\nkube-state-metrics:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-operator:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\ntailing-sidecar-operator:\n  operator:\n    tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-kubernetes-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nprometheus-blackbox-exporter:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\netcd-exporter:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\" \n
                                          "},{"location":"en/end-user/insight/quickstart/install/component-scheduling.html#2-configure-at-the-namespace-level","title":"2. Configure at the namespace level","text":"

                                          Allow pods in the insight-system namespace to tolerate the node.daocloud.io=insight-only taint.

                                          1. Adjust the apiserver configuration file /etc/kubernetes/manifests/kube-apiserver.yaml to include PodTolerationRestriction,PodNodeSelector. See the following picture:

                                          2. Add an annotation to the insight-system namespace:

                                            apiVersion: v1\nkind: Namespace\nmetadata:\n  name: insight-system\n  annotations:\n    scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Equal\", \"effect\": \"NoSchedule\", \"key\": \"node.daocloud.io\", \"value\": \"insight-only\"}]'\n

                                          Restart the components under the insight-system namespace to allow normal scheduling of pods under the insight-system.

                                          "},{"location":"en/end-user/insight/quickstart/install/component-scheduling.html#use-node-labels-and-node-affinity-to-manage-component-scheduling","title":"Use node labels and node affinity to manage component scheduling","text":"

                                          Info

                                          Node affinity is conceptually similar to nodeSelector, allowing you to constrain which nodes a pod can be scheduled on based on labels on the nodes. There are two types of node affinity:

                                          1. requiredDuringSchedulingIgnoredDuringExecution: The scheduler will only schedule the pod if the rules are met. This feature is similar to nodeSelector but has more expressive syntax.
                                          2. preferredDuringSchedulingIgnoredDuringExecution: The scheduler will try to find nodes that meet the rules. If no matching nodes are found, the scheduler will still schedule the Pod.

                                          For more details, please refer to the Kubernetes official documentation.

                                          To meet different user needs for scheduling Insight components, Insight provides fine-grained labels for different components' scheduling policies. Below is a description of the labels and their associated components:

                                          Label Key Label Value Description node.daocloud.io/insight-any Any value, recommended to use true Represents that all Insight components prefer nodes with this label node.daocloud.io/insight-prometheus Any value, recommended to use true Specifically for Prometheus components node.daocloud.io/insight-vmstorage Any value, recommended to use true Specifically for VictoriaMetrics vmstorage components node.daocloud.io/insight-vector Any value, recommended to use true Specifically for Vector components node.daocloud.io/insight-otel-col Any value, recommended to use true Specifically for OpenTelemetry components

                                          You can refer to the following commands to add and remove labels on nodes:

                                          # Add label to node8, prioritizing scheduling insight-prometheus to node8 \nkubectl label nodes node8 node.daocloud.io/insight-prometheus=true\n\n# Remove the node.daocloud.io/insight-prometheus label from node8\nkubectl label nodes node8 node.daocloud.io/insight-prometheus-\n

                                          Below is the default affinity preference for the insight-prometheus component during deployment:

                                          affinity:\n  nodeAffinity:\n    preferredDuringSchedulingIgnoredDuringExecution:\n    - preference:\n        matchExpressions:\n        - key: node-role.kubernetes.io/control-plane\n          operator: DoesNotExist\n      weight: 1\n    - preference:\n        matchExpressions:\n        - key: node.daocloud.io/insight-prometheus # (1)!\n          operator: Exists\n      weight: 2\n    - preference:\n        matchExpressions:\n        - key: node.daocloud.io/insight-any\n          operator: Exists\n      weight: 3\n    podAntiAffinity:\n      preferredDuringSchedulingIgnoredDuringExecution:\n        - weight: 1\n          podAffinityTerm:\n            topologyKey: kubernetes.io/hostname\n            labelSelector:\n              matchExpressions:\n                - key: app.kubernetes.io/instance\n                  operator: In\n                  values:\n                    - insight-agent-kube-prometh-prometheus\n
                                          1. Prioritize scheduling insight-prometheus to nodes with the node.daocloud.io/insight-prometheus label
                                          "},{"location":"en/end-user/insight/quickstart/install/gethosturl.html","title":"Get Data Storage Address of Global Service Cluster","text":"

                                          Insight is a product for unified observation of multiple clusters. To achieve unified storage and querying of observation data from multiple clusters, sub-clusters need to report the collected observation data to the global service cluster for unified storage. This document provides the required address of the storage component when installing the collection component insight-agent.

                                          "},{"location":"en/end-user/insight/quickstart/install/gethosturl.html#install-insight-agent-in-global-service-cluster","title":"Install insight-agent in Global Service Cluster","text":"

                                          If installing insight-agent in the global service cluster, it is recommended to access the cluster via domain name:

                                          export vminsert_host=\"vminsert-insight-victoria-metrics-k8s-stack.insight-system.svc.cluster.local\" # (1)!\nexport es_host=\"insight-es-master.insight-system.svc.cluster.local\" # (2)!\nexport otel_col_host=\"insight-opentelemetry-collector.insight-system.svc.cluster.local\" # (3)!\n
                                          "},{"location":"en/end-user/insight/quickstart/install/gethosturl.html#install-insight-agent-in-other-clusters","title":"Install insight-agent in Other Clusters","text":""},{"location":"en/end-user/insight/quickstart/install/gethosturl.html#get-address-via-interface-provided-by-insight-server","title":"Get Address via Interface Provided by Insight Server","text":"
                                          1. The management cluster uses the default LoadBalancer mode for exposure.

                                            Log in to the console of the global service cluster and run the following command:

                                            export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam'\n

                                            Note

                                            Please replace the ${INSIGHT_SERVER_IP} parameter in the command.

                                            You will get the following response:

                                            {\n  \"values\": {\n    \"global\": {\n      \"exporters\": {\n        \"logging\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"metric\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"auditLog\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"trace\": {\n          \"host\": \"10.6.182.32\"\n        }\n      }\n    },\n    \"opentelemetry-operator\": {\n      \"enabled\": true\n    },\n    \"opentelemetry-collector\": {\n      \"enabled\": true\n    }\n  }\n}\n
                                            • global.exporters.logging.host is the log service address, no need to set the proper service port, the default value will be used.
                                            • global.exporters.metric.host is the metrics service address.
                                            • global.exporters.trace.host is the trace service address.
                                            • global.exporters.auditLog.host is the audit log service address (same service as trace but different port).
                                          2. Management cluster disables LoadBalancer

                                            When calling the interface, you need to additionally pass an externally accessible node IP from the cluster, which will be used to construct the complete access address of the proper service.

                                            export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam' --data '{\"extra\": {\"EXPORTER_EXTERNAL_IP\": \"10.5.14.51\"}}'\n

                                            You will get the following response:

                                            {\n  \"values\": {\n    \"global\": {\n      \"exporters\": {\n        \"logging\": {\n          \"scheme\": \"https\",\n          \"host\": \"10.5.14.51\",\n          \"port\": 32007,\n          \"user\": \"elastic\",\n          \"password\": \"j8V1oVoM1184HvQ1F3C8Pom2\"\n        },\n        \"metric\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30683\n        },\n        \"auditLog\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30884\n        },\n        \"trace\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30274\n        }\n      }\n    },\n    \"opentelemetry-operator\": {\n      \"enabled\": true\n    },\n    \"opentelemetry-collector\": {\n      \"enabled\": true\n    }\n  }\n}\n
                                            • global.exporters.logging.host is the log service address.
                                            • global.exporters.logging.port is the NodePort exposed by the log service.
                                            • global.exporters.metric.host is the metrics service address.
                                            • global.exporters.metric.port is the NodePort exposed by the metrics service.
                                            • global.exporters.trace.host is the trace service address.
                                            • global.exporters.trace.port is the NodePort exposed by the trace service.
                                            • global.exporters.auditLog.host is the audit log service address (same service as trace but different port).
                                            • global.exporters.auditLog.port is the NodePort exposed by the audit log service.
                                          "},{"location":"en/end-user/insight/quickstart/install/gethosturl.html#connect-via-loadbalancer","title":"Connect via LoadBalancer","text":"
                                          1. If LoadBalancer is enabled in the cluster and a VIP is set for Insight, you can manually execute the following command to obtain the address information for vminsert and opentelemetry-collector:

                                            $ kubectl get service -n insight-system | grep lb\nlb-insight-opentelemetry-collector               LoadBalancer   10.233.23.12    <pending>     4317:31286/TCP,8006:31351/TCP  24d\nlb-vminsert-insight-victoria-metrics-k8s-stack   LoadBalancer   10.233.63.67    <pending>     8480:31629/TCP                 24d\n
                                            • lb-vminsert-insight-victoria-metrics-k8s-stack is the address for the metrics service.
                                            • lb-insight-opentelemetry-collector is the address for the tracing service.
                                          2. Execute the following command to obtain the address information for elasticsearch:

                                            $ kubectl get service -n mcamel-system | grep es\nmcamel-common-es-cluster-masters-es-http               NodePort    10.233.16.120   <none>        9200:30465/TCP               47d\n

                                            mcamel-common-es-cluster-masters-es-http is the address for the logging service.

                                          "},{"location":"en/end-user/insight/quickstart/install/gethosturl.html#connect-via-nodeport","title":"Connect via NodePort","text":"

                                          The LoadBalancer feature is disabled in the global service cluster.

                                          In this case, the LoadBalancer resources mentioned above will not be created by default. The relevant service names are:

                                          • vminsert-insight-victoria-metrics-k8s-stack (metrics service)
                                          • common-es (logging service)
                                          • insight-opentelemetry-collector (tracing service)

                                          After obtaining the proper port information for the services in the above two scenarios, make the following settings:

                                          --set global.exporters.logging.host=  # (1)!\n--set global.exporters.logging.port=  # (2)!\n--set global.exporters.metric.host=   # (3)!\n--set global.exporters.metric.port=   # (4)!\n--set global.exporters.trace.host=    # (5)!\n--set global.exporters.trace.port=    # (6)!\n--set global.exporters.auditLog.host= # (7)!\n
                                          1. NodeIP of the externally accessible management cluster
                                          2. NodePort of the logging service port 9200
                                          3. NodeIP of the externally accessible management cluster
                                          4. NodePort of the metrics service port 8480
                                          5. NodeIP of the externally accessible management cluster
                                          6. NodePort of the tracing service port 4317
                                          7. NodeIP of the externally accessible management cluster
                                          "},{"location":"en/end-user/insight/quickstart/install/install-agent.html","title":"Install insight-agent","text":"

                                          insight-agent is a plugin for collecting insight data, supporting unified observation of metrics, links, and log data. This article describes how to install insight-agent in an online environment for the accessed cluster.

                                          "},{"location":"en/end-user/insight/quickstart/install/install-agent.html#prerequisites","title":"Prerequisites","text":"

                                          Please confirm that your cluster has successfully connected to the container management platform. You can refer to Integrate Clusters for details.

                                          "},{"location":"en/end-user/insight/quickstart/install/install-agent.html#steps","title":"Steps","text":"
                                          1. Enter Container Management from the left navigation bar, and enter Clusters . Find the cluster where you want to install insight-agent.

                                          2. Choose Install now to jump, or click the cluster and click Helm Apps -> Helm Templates in the left navigation bar, search for insight-agent in the search box, and click it for details.

                                          3. Select the appropriate version and click Install .

                                          4. Fill in the name, select the namespace and version, and fill in the addresses of logging, metric, audit, and trace reporting data in the yaml file. The system has filled in the address of the component for data reporting by default, please check it before clicking OK to install.

                                            If you need to modify the data reporting address, please refer to Get Data Reporting Address.

                                          5. The system will automatically return to Helm Apps . When the application status changes from Unknown to Deployed , it means that insight-agent is installed successfully.

                                            Note

                                            • Click \u2507 on the far right, and you can perform more operations such as Update , View YAML and Delete in the pop-up menu.
                                            • For a practical installation demo, watch Video demo of installing insight-agent
                                          "},{"location":"en/end-user/insight/quickstart/install/knownissues.html","title":"Known Issues","text":"

                                          This page lists some issues related to the installation and uninstallation of Insight Agent and their workarounds.

                                          "},{"location":"en/end-user/insight/quickstart/install/knownissues.html#uninstallation-failure-of-insight-agent","title":"Uninstallation Failure of Insight Agent","text":"

                                          When you run the following command to uninstall Insight Agent,

                                          helm uninstall insight agent\n

                                          The tls secret used by otel-operator is failed to uninstall.

                                          Due to the logic of \"reusing tls secret\" in the following code of otel-operator, it checks whether MutationConfiguration exists and reuses the CA cert bound in MutationConfiguration. However, since helm uninstall has uninstalled MutationConfiguration, it results in a null value.

                                          Therefore, please manually delete the proper secret using one of the following methods:

                                          • Delete via command line: Log in to the console of the target cluster and run the following command:

                                            kubectl -n insight-system delete secret insight-agent-opentelemetry-operator-controller-manager-service-cert\n
                                          • Delete via UI: Log in to AI platform container management, select the target cluster, select Secret from the left menu, input insight-agent-opentelemetry-operator-controller-manager-service-cert, then select Delete.

                                          "},{"location":"en/end-user/insight/quickstart/install/knownissues.html#insight-agent_1","title":"Insight Agent","text":""},{"location":"en/end-user/insight/quickstart/install/knownissues.html#log-collection-endpoint-not-updated-when-upgrading-insight-agent","title":"Log Collection Endpoint Not Updated When Upgrading Insight Agent","text":"

                                          When updating the log configuration of the insight-agent from Elasticsearch to Kafka or from Kafka to Elasticsearch, the changes do not take effect and the agent continues to use the previous configuration.

                                          Solution :

                                          Manually restart Fluent Bit in the cluster.

                                          "},{"location":"en/end-user/insight/quickstart/install/knownissues.html#podmonitor-collects-multiple-sets-of-jvm-metrics","title":"PodMonitor Collects Multiple Sets of JVM Metrics","text":"
                                          1. In this version, there is a defect in PodMonitor/insight-kubernetes-pod: it will incorrectly create Jobs to collect metrics for all containers in Pods that are marked with insight.opentelemetry.io/metric-scrape=true, instead of only the containers proper to insight.opentelemetry.io/metric-port.

                                          2. After PodMonitor is declared, PrometheusOperator will pre-configure some service discovery configurations. Considering the compatibility of CRDs, it is abandoned to configure the collection tasks through annotations.

                                          3. Use the additional scrape config mechanism provided by Prometheus to configure the service discovery rules in a secret and introduce them into Prometheus.

                                          Therefore:

                                          1. Delete the current PodMonitor for insight-kubernetes-pod
                                          2. Use a new rule

                                          In the new rule, action: keepequal is used to compare the consistency between source_labels and target_label to determine whether to create collection tasks for the ports of a container. Note that this feature is only available in Prometheus v2.41.0 (2022-12-20) and higher.

                                          +    - source_labels: [__meta_kubernetes_pod_annotation_insight_opentelemetry_io_metric_port]\n+      separator: ;\n+      target_label: __meta_kubernetes_pod_container_port_number\n+      action: keepequal\n
                                          "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html","title":"Upgrade Notes","text":"

                                          This page provides some considerations for upgrading insight-server and insight-agent.

                                          "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v028x-or-lower-to-v029x","title":"Upgrade from v0.28.x (or lower) to v0.29.x","text":"

                                          Due to the upgrade of the Opentelemetry community operator chart version in v0.29.0, the supported values for featureGates in the values file have changed. Therefore, before upgrading, you need to set the value of featureGates to empty, as follows:

                                          -  --set opentelemetry-operator.manager.featureGates=\"+operator.autoinstrumentation.go,+operator.autoinstrumentation.multi-instrumentation,+operator.autoinstrumentation.nginx\" \\\n+  --set opentelemetry-operator.manager.featureGates=\"\"\n
                                          "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v026x-or-lower-to-v027x-or-higher","title":"Upgrade from v0.26.x (or lower) to v0.27.x or higher","text":"

                                          In v0.27.x, the switch for the vector component has been separated. If the existing environment has vector enabled, you need to specify --set vector.enabled=true when upgrading the insight-server.

                                          "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v019x-or-lower-to-020x","title":"Upgrade from v0.19.x (or lower) to 0.20.x","text":"

                                          Before upgrading Insight , you need to manually delete the jaeger-collector and jaeger-query deployments by running the following command:

                                          kubectl -n insight-system delete deployment insight-jaeger-collector\nkubectl -n insight-system delete deployment insight-jaeger-query\n
                                          "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v017x-or-lower-to-v018x","title":"Upgrade from v0.17.x (or lower) to v0.18.x","text":"

                                          In v0.18.x, there have been updates to the Jaeger-related deployment files, so you need to manually run the following commands before upgrading insight-server:

                                          kubectl -n insight-system delete deployment insight-jaeger-collector\nkubectl -n insight-system delete deployment insight-jaeger-query\n

                                          There have been changes to metric names in v0.18.x, so after upgrading insight-server, insight-agent should also be upgraded.

                                          In addition, the parameters for enabling the tracing module and adjusting the ElasticSearch connection have been modified. Refer to the following parameters:

                                          +  --set global.tracing.enable=true \\\n-  --set jaeger.collector.enabled=true \\\n-  --set jaeger.query.enabled=true \\\n+  --set global.elasticsearch.scheme=${your-external-elasticsearch-scheme} \\\n+  --set global.elasticsearch.host=${your-external-elasticsearch-host} \\\n+  --set global.elasticsearch.port=${your-external-elasticsearch-port} \\\n+  --set global.elasticsearch.user=${your-external-elasticsearch-username} \\\n+  --set global.elasticsearch.password=${your-external-elasticsearch-password} \\\n-  --set jaeger.storage.elasticsearch.scheme=${your-external-elasticsearch-scheme} \\\n-  --set jaeger.storage.elasticsearch.host=${your-external-elasticsearch-host} \\\n-  --set jaeger.storage.elasticsearch.port=${your-external-elasticsearch-port} \\\n-  --set jaeger.storage.elasticsearch.user=${your-external-elasticsearch-username} \\\n-  --set jaeger.storage.elasticsearch.password=${your-external-elasticsearch-password} \\\n
                                          "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v015x-or-lower-to-v016x","title":"Upgrade from v0.15.x (or lower) to v0.16.x","text":"

                                          In v0.16.x, a new feature parameter disableRouteContinueEnforce in the vmalertmanagers CRD is used. Therefore, you need to manually run the following command before upgrading insight-server:

                                          kubectl apply --server-side -f https://raw.githubusercontent.com/VictoriaMetrics/operator/v0.33.0/config/crd/bases/operator.victoriametrics.com_vmalertmanagers.yaml --force-conflicts\n

                                          Note

                                          If you are performing an offline installation, after extracting the insight offline package, please run the following command to update CRDs.

                                          kubectl apply --server-side -f insight/dependency-crds --force-conflicts \n
                                          "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v023x-or-lower-to-v024x","title":"Upgrade from v0.23.x (or lower) to v0.24.x","text":"

                                          In v0.24.x, CRDs have been added to the OTEL operator chart. However, helm upgrade does not update CRDs, so you need to manually run the following command:

                                          kubectl apply -f https://raw.githubusercontent.com/open-telemetry/opentelemetry-helm-charts/main/charts/opentelemetry-operator/crds/crd-opentelemetry.io_opampbridges.yaml\n

                                          If you are performing an offline installation, you can find the above CRD yaml file after extracting the insight-agent offline package. After extracting the insight-agent Chart, manually run the following command:

                                          kubectl apply -f charts/agent/crds/crd-opentelemetry.io_opampbridges.yaml\n
                                          "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v019x-or-lower-to-v020x","title":"Upgrade from v0.19.x (or lower) to v0.20.x","text":"

                                          In v0.20.x, Kafka log export configuration has been added, and there have been some adjustments to the log export configuration. Before upgrading insight-agent , please note the parameter changes. The previous logging configuration has been moved to the logging.elasticsearch configuration:

                                          -  --set global.exporters.logging.host \\\n-  --set global.exporters.logging.port \\\n+  --set global.exporters.logging.elasticsearch.host \\\n+  --set global.exporters.logging.elasticsearch.port \\\n
                                          "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v017x-or-lower-to-v018x_1","title":"Upgrade from v0.17.x (or lower) to v0.18.x","text":"

                                          Due to the updated deployment files for Jaeger In v0.18.x, it is important to note the changes in parameters before upgrading the insight-agent.

                                          +  --set global.exporters.trace.enable=true \\\n-  --set opentelemetry-collector.enabled=true \\\n-  --set opentelemetry-operator.enabled=true \\\n
                                          "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v016x-or-lower-to-v017x","title":"Upgrade from v0.16.x (or lower) to v0.17.x","text":"

                                          In v0.17.x, the kube-prometheus-stack chart version was upgraded from 41.9.1 to 45.28.1, and there were also some field upgrades in the CRD used, such as the attachMetadata field of servicemonitor. Therefore, the following command needs to be rund before upgrading the insight-agent:

                                          kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --force-conflicts\n

                                          If you are performing an offline installation, you can find the yaml for the above CRD in insight-agent/dependency-crds after extracting the insight-agent offline package.

                                          "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v011x-or-earlier-to-v012x","title":"Upgrade from v0.11.x (or earlier) to v0.12.x","text":"

                                          v0.12.x upgrades kube-prometheus-stack chart from 39.6.0 to 41.9.1, including prometheus-operator to v0.60.1, prometheus-node-exporter chart to v4.3.0. Prometheus-node-exporter uses Kubernetes recommended label after upgrading, so you need to delete node-exporter daemonset. prometheus-operator has updated the CRD, so you need to run the following command before upgrading the insight-agent:

                                          kubectl delete daemonset insight-agent-prometheus-node-exporter -n insight-system\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml --force- conflicts\n

                                          Note

                                          If you are installing offline, you can run the following command to update the CRD after decompressing the insight-agent offline package.

                                          kubectl apply --server-side -f insight-agent/dependency-crds --force-conflicts\n
                                          "},{"location":"en/end-user/insight/quickstart/jvm-monitor/jmx-exporter.html","title":"Use JMX Exporter to expose JVM monitoring metrics","text":"

                                          JMX-Exporter provides two usages:

                                          1. Start a standalone process. Specify parameters when the JVM starts, expose the RMI interface of JMX, JMX Exporter calls RMI to obtain the JVM runtime status data, Convert to Prometheus metrics format, and expose ports for Prometheus to collect.
                                          2. Start the JVM in-process. Specify parameters when the JVM starts, and run the jar package of JMX-Exporter in the form of javaagent. Read the JVM runtime status data in the process, convert it into Prometheus metrics format, and expose the port for Prometheus to collect.

                                          Note

                                          Officials do not recommend the first method. On the one hand, the configuration is complicated, and on the other hand, it requires a separate process, and the monitoring of this process itself has become a new problem. So This page focuses on the second usage and how to use JMX Exporter to expose JVM monitoring metrics in the Kubernetes environment.

                                          The second usage is used here, and the JMX Exporter jar package file and configuration file need to be specified when starting the JVM. The jar package is a binary file, so it is not easy to mount it through configmap. We hardly need to modify the configuration file. So the suggestion is to directly package the jar package and configuration file of JMX Exporter into the business container image.

                                          Among them, in the second way, we can choose to put the jar file of JMX Exporter in the business application mirror, You can also choose to mount it during deployment. Here is an introduction to the two methods:

                                          "},{"location":"en/end-user/insight/quickstart/jvm-monitor/jmx-exporter.html#method-1-build-the-jmx-exporter-jar-file-into-the-business-image","title":"Method 1: Build the JMX Exporter JAR file into the business image","text":"

                                          The content of prometheus-jmx-config.yaml is as follows:

                                          prometheus-jmx-config.yaml
                                          ...\nssl: false\nlowercaseOutputName: false\nlowercaseOutputLabelNames: false\nrules:\n- pattern: \".*\"\n

                                          Note

                                          For more configmaps, please refer to the bottom introduction or Prometheus official documentation.

                                          Then prepare the jar package file, you can find the latest jar package download address on the Github page of jmx_exporter and refer to the following Dockerfile:

                                          FROM openjdk:11.0.15-jre\nWORKDIR /app/\nCOPY target/my-app.jar ./\nCOPY prometheus-jmx-config.yaml ./\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\nENV JAVA_TOOL_OPTIONS=-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\nEXPOSE 8081 8999 8080 8888\nENTRYPOINT java $JAVA_OPTS -jar my-app.jar\n

                                          Notice:

                                          • Start parameter format: -javaagent:=:
                                          • Port 8088 is used here to expose the monitoring metrics of the JVM. If it conflicts with Java applications, you can change it yourself
                                          "},{"location":"en/end-user/insight/quickstart/jvm-monitor/jmx-exporter.html#method-2-mount-via-init-container-container","title":"Method 2: mount via init container container","text":"

                                          We need to make the JMX exporter into a Docker image first, the following Dockerfile is for reference only:

                                          FROM alpine/curl:3.14\nWORKDIR /app/\n# Copy the previously created config file to the mirror\nCOPY prometheus-jmx-config.yaml ./\n# Download jmx prometheus javaagent jar online\nRUN set -ex; \\\n     curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\n

                                          Build the image according to the above Dockerfile: docker build -t my-jmx-exporter .

                                          Add the following init container to the Java application deployment Yaml:

                                          Click to view YAML file
                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-demo-app\n  labels:\n    app: my-demo-app\nspec:\n  selector:\n    matchLabels:\n      app: my-demo-app\n  template:\n    metadata:\n      labels:\n        app: my-demo-app\n    spec:\n      imagePullSecrets:\n      - name: registry-pull\n      initContainers:\n      - name: jmx-sidecar\n        image: my-jmx-exporter\n        command: [\"cp\", \"-r\", \"/app/jmx_prometheus_javaagent-0.17.2.jar\", \"/target/jmx_prometheus_javaagent-0.17.2.jar\"]  \u278a\n        volumeMounts:\n        - name: sidecar\n          mountPath: /target\n      containers:\n      - image: my-demo-app-image\n        name: my-demo-app\n        resources:\n          requests:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n          limits:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n        ports:\n        - containerPort: 18083\n        env:\n        - name: JAVA_TOOL_OPTIONS\n          value: \"-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\" \u278b\n        volumeMounts:\n        - name: host-time\n          mountPath: /etc/localtime\n          readOnly: true\n        - name: sidecar\n          mountPath: /sidecar\n      volumes:\n      - name: host-time\n        hostPath:\n          path: /etc/localtime\n      - name: sidecar  # Share the agent folder\n        emptyDir: {}\n      restartPolicy: Always\n

                                          After the above modification, the sample application my-demo-app has the ability to expose JVM metrics. After running the service, we can access the prometheus format metrics exposed by the service through http://lcoalhost:8088.

                                          Then, you can refer to Java Application Docking Observability with JVM Metrics.

                                          "},{"location":"en/end-user/insight/quickstart/jvm-monitor/jvm-catelogy.html","title":"Start monitoring Java applications","text":"

                                          This document mainly describes how to monitor the JVM of the customer's Java application. It describes how Java applications that have exposed JVM metrics, and those that have not, interface with Insight.

                                          If your Java application does not start exposing JVM metrics, you can refer to the following documents:

                                          • Expose JVM monitoring metrics with JMX Exporter
                                          • Expose JVM monitoring metrics using OpenTelemetry Java Agent

                                          If your Java application has exposed JVM metrics, you can refer to the following documents:

                                          • Java application docking observability with existing JVM metrics
                                          "},{"location":"en/end-user/insight/quickstart/jvm-monitor/legacy-jvm.html","title":"Java Application with JVM Metrics to Dock Insight","text":"

                                          If your Java application exposes JVM monitoring metrics through other means (such as Spring Boot Actuator), We need to allow monitoring data to be collected. You can let Insight collect existing JVM metrics by adding Kubernetes Annotations to the workload:

                                          annatation:\n   insight.opentelemetry.io/metric-scrape: \"true\" # whether to collect\n   insight.opentelemetry.io/metric-path: \"/\" # path to collect metrics\n   insight.opentelemetry.io/metric-port: \"9464\" # port for collecting metrics\n

                                          YAML Example to add annotations for my-deployment-app workload\uff1a

                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-app\nspec:\n  selector:\n    matchLabels:\n      app: my-deployment-app\n      app.kubernetes.io/name: my-deployment-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-deployment-app\n        app.kubernetes.io/name: my-deployment-app\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\" # whether to collect\n        insight.opentelemetry.io/metric-path: \"/\" # path to collect metrics\n        insight.opentelemetry.io/metric-port: \"9464\" # port for collecting metrics\n

                                          The following shows the complete YAML:

                                          ---\napiVersion: v1\nkind: Service\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  type: NodePort\n  selector:\n    #app: my-deployment-with-aotu-instrumentation-app\n    app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  ports:\n    - name: http\n      port: 8080\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  selector:\n    matchLabels:\n      #app: my-deployment-with-aotu-instrumentation-app\n      app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\" # whether to collect\n        insight.opentelemetry.io/metric-path: \"/actuator/prometheus\"      # path to collect metrics\n        insight.opentelemetry.io/metric-port: \"8080\"   # port for collecting metrics\n    spec:\n      containers:\n        - name: myapp\n          image: docker.m.daocloud.io/wutang/spring-boot-actuator-prometheus-metrics-demo\n          ports:\n            - name: http\n              containerPort: 8080\n          resources:\n            limits:\n              cpu: 500m\n              memory: 800Mi\n            requests:\n              cpu: 200m\n              memory: 400Mi\n

                                          In the above example\uff0cInsight will use :8080//actuator/prometheus to get Prometheus metrics exposed through Spring Boot Actuator .

                                          "},{"location":"en/end-user/insight/quickstart/jvm-monitor/otel-java-agent.html","title":"Use OpenTelemetry Java Agent to expose JVM monitoring metrics","text":"

                                          In Opentelemetry Agent v1.20.0 and above, Opentelemetry Agent has added the JMX Metric Insight module. If your application has integrated Opentelemetry Agent to collect application traces, then you no longer need to introduce other Agents for our application Expose JMX metrics. The Opentelemetry Agent also collects and exposes metrics by instrumenting the metrics exposed by MBeans locally available in the application.

                                          Opentelemetry Agent also has some built-in monitoring samples for common Java Servers or frameworks, please refer to predefined metrics.

                                          Using the OpenTelemetry Java Agent also needs to consider how to mount the JAR into the container. In addition to referring to the JMX Exporter above to mount the JAR file, we can also use the Operator capabilities provided by OpenTelemetry to automatically enable JVM metric exposure for our applications. :

                                          If your application has integrated Opentelemetry Agent to collect application traces, then you no longer need to introduce other Agents to expose JMX metrics for our application. The Opentelemetry Agent can now natively collect and expose metrics interfaces by instrumenting metrics exposed by MBeans available locally in the application.

                                          However, for current version, you still need to manually add the proper annotations to workload before the JVM data will be collected by Insight.

                                          "},{"location":"en/end-user/insight/quickstart/jvm-monitor/otel-java-agent.html#expose-metrics-for-java-middleware","title":"Expose metrics for Java middleware","text":"

                                          Opentelemetry Agent also has some built-in middleware monitoring samples, please refer to Predefined Metrics.

                                          By default, no type is specified, and it needs to be specified through -Dotel.jmx.target.system JVM Options, such as -Dotel.jmx.target.system=jetty,kafka-broker .

                                          "},{"location":"en/end-user/insight/quickstart/jvm-monitor/otel-java-agent.html#reference","title":"Reference","text":"
                                          • Gaining JMX Metric Insights with the OpenTelemetry Java Agent

                                          • Otel jmx metrics

                                          "},{"location":"en/end-user/insight/quickstart/otel/golang-ebpf.html","title":"Enhance Go apps with OTel auto-instrumentation","text":"

                                          If you don't want to manually change the application code, you can try This page's eBPF-based automatic enhancement method. This feature is currently in the review stage of donating to the OpenTelemetry community, and does not support Operator injection through annotations (it will be supported in the future), so you need to manually change the Deployment YAML or use a patch.

                                          "},{"location":"en/end-user/insight/quickstart/otel/golang-ebpf.html#prerequisites","title":"Prerequisites","text":"

                                          Make sure Insight Agent is ready. If not, see Install insight-agent to collect data and make sure the following three items are in place:

                                          • Enable trace feature for Insight-agent
                                          • Whether the address and port of the trace data are filled in correctly
                                          • Pods proper to deployment/opentelemetry-operator-controller-manager and deployment/insight-agent-opentelemetry-collector are ready
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang-ebpf.html#install-instrumentation-cr","title":"Install Instrumentation CR","text":"

                                          Install under the Insight-system namespace, skip this step if it has already been installed.

                                          Note: This CR currently only supports the injection of environment variables (including service name and trace address) required to connect to Insight, and will support the injection of Golang probes in the future.

                                          kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.17.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.31.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.34b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:0.3.1-beta.1\nEOF\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang-ebpf.html#change-the-application-deployment-file","title":"Change the application deployment file","text":"
                                          • Add environment variable annotations

                                            There is only one such annotation, which is used to add OpenTelemetry-related environment variables, such as link reporting address, cluster id where the container is located, and namespace:

                                            instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                            The value is divided into two parts by / , the first value insight-system is the namespace of the CR installed in the second step, and the second value insight-opentelemetry-autoinstrumentation is the name of the CR.

                                          • Add golang ebpf probe container

                                            Here is sample code:

                                            apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: voting\n  namespace: emojivoto\n  labels:\n    app.kubernetes.io/name: voting\n    app.kubernetes.io/part-of: emojivoto\n    app.kubernetes.io/version: v11\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: voting-svc\n      version: v11\n  template:\n    metadata:\n      labels:\n        app: voting-svc\n        version: v11\n      annotations:\n        instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\" # (1)\n    spec:\n      containers:\n        - env:\n            - name: GRPC_PORT\n              value: \"8080\"\n            - name: PROM_PORT\n              value: \"8801\"\n          image: docker.l5d.io/buoyantio/emojivoto-voting-svc:v11 # (2)\n          name: voting-svc\n          command:\n            - /usr/local/bin/emojivoto-voting-svc\n          ports:\n            - containerPort: 8080\n              name: grpc\n            - containerPort: 8801\n              name: prom\n          resources:\n            requests:\n              cpu: 100m\n        - name: emojivoto-voting-instrumentation\n          image: docker.m.daocloud.io/keyval/otel-go-agent:v0.6.0\n          env:\n            - name: OTEL_TARGET_EXE\n              value: /usr/local/bin/emojivoto-voting-svc # (3)\n          securityContext:\n            runAsUser: 0\n            capabilities:\n              add:\n                - SYS_PTRACE\n            privileged: true\n          volumeMounts:\n            - mountPath: /sys/kernel/debug\n              name: kernel-debug\n      volumes:\n        - name: kernel-debug\n          hostPath:\n            path: /sys/kernel/debug\n
                                            1. Used to add environment variables related to OpenTelemetry.
                                            2. Assuming this is your Golang application.
                                            3. Note that it should be consistent with the content of the command mentioned above: /usr/local/bin/emojivoto-voting-svc .

                                          The final generated Yaml content is as follows:

                                          apiVersion: v1\nkind: Pod\nmetadata:\n  name: voting-84b696c897-p9xbp\n  generateName: voting-84b696c897-\n  namespace: default\n  uid: 742639b0-db6e-4f06-ac90-68a80e2b8a11\n  resourceVersion: '65560793'\n  creationTimestamp: '2022-10-19T07:08:56Z'\n  labels:\n    app: voting-svc\n    pod-template-hash: 84b696c897\n    version: v11\n  annotations:\n    cni.projectcalico.org/containerID: 0a987cf0055ce0dfbe75c3f30d580719eb4fbbd7e1af367064b588d4d4e4c7c7\n    cni.projectcalico.org/podIP: 192.168.141.218/32\n    cni.projectcalico.org/podIPs: 192.168.141.218/32\n    instrumentation.opentelemetry.io/inject-sdk: insight-system/insight-opentelemetry-autoinstrumentation\nspec:\n  volumes:\n    - name: launcherdir\n      emptyDir: {}\n    - name: kernel-debug\n      hostPath:\n        path: /sys/kernel/debug\n        type: ''\n    - name: kube-api-access-gwj5v\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n  containers:\n    - name: voting-svc\n      image: docker.l5d.io/buoyantio/emojivoto-voting-svc:v11\n      command:\n        - /odigos-launcher/launch\n        - /usr/local/bin/emojivoto-voting-svc\n      ports:\n        - name: grpc\n          containerPort: 8080\n          protocol: TCP\n        - name: prom\n          containerPort: 8801\n          protocol: TCP\n      env:\n        - name: GRPC_PORT\n          value: '8080'\n        - name: PROM_PORT\n          value: '8801'\n        - name: OTEL_TRACES_EXPORTER\n          value: otlp\n        - name: OTEL_EXPORTER_OTLP_ENDPOINT\n          value: >-\n            http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n        - name: OTEL_EXPORTER_OTLP_TIMEOUT\n          value: '200'\n        - name: SPLUNK_TRACE_RESPONSE_HEADER_ENABLED\n          value: 'true'\n        - name: OTEL_SERVICE_NAME\n          value: voting\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.name\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_UID\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.uid\n        - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: spec.nodeName\n        - name: OTEL_PROPAGATORS\n          value: jaeger,b3\n        - name: OTEL_TRACES_SAMPLER\n          value: always_on\n        - name: OTEL_RESOURCE_ATTRIBUTES\n          value: >-\n            k8s.container.name=voting-svc,k8s.deployment.name=voting,k8s.deployment.uid=79e015e2-4643-44c0-993c-e486aebaba10,k8s.namespace.name=default,k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME),k8s.pod.uid=$(OTEL_RESOURCE_ATTRIBUTES_POD_UID),k8s.replicaset.name=voting-84b696c897,k8s.replicaset.uid=63f56167-6632-415d-8b01-43a3db9891ff\n      resources:\n        requests:\n          cpu: 100m\n      volumeMounts:\n        - name: launcherdir\n          mountPath: /odigos-launcher\n        - name: kube-api-access-gwj5v\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: IfNotPresent\n    - name: emojivoto-voting-instrumentation\n      image: keyval/otel-go-agent:v0.6.0\n      env:\n        - name: OTEL_TARGET_EXE\n          value: /usr/local/bin/emojivoto-voting-svc\n        - name: OTEL_EXPORTER_OTLP_ENDPOINT\n          value: jaeger:4317\n        - name: OTEL_SERVICE_NAME\n          value: emojivoto-voting\n      resources: {}\n      volumeMounts:\n        - name: kernel-debug\n          mountPath: /sys/kernel/debug\n        - name: kube-api-access-gwj5v\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: IfNotPresent\n      securityContext:\n        capabilities:\n          add:\n            - SYS_PTRACE\n        privileged: true\n        runAsUser: 0\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang-ebpf.html#reference","title":"Reference","text":"
                                          • Getting Started with Go OpenTelemetry Automatic Instrumentation
                                          • Donating ebpf based instrumentation
                                          "},{"location":"en/end-user/insight/quickstart/otel/operator.html","title":"Enhance Applications Non-Intrusively with Operators","text":"

                                          Currently, only Java, Node.js, Python, .NET, and Golang support non-intrusive integration through the Operator approach.

                                          "},{"location":"en/end-user/insight/quickstart/otel/operator.html#prerequisites","title":"Prerequisites","text":"

                                          Please ensure that the insight-agent is ready. If not, please refer to Install insight-agent for data collection and make sure the following three items are ready:

                                          • Enable trace functionality for insight-agent
                                          • Check if the address and port for trace data are correctly filled
                                          • Ensure that the Pods proper to deployment/insight-agent-opentelemetry-operator and deployment/insight-agent-opentelemetry-collector are ready
                                          "},{"location":"en/end-user/insight/quickstart/otel/operator.html#install-instrumentation-cr","title":"Install Instrumentation CR","text":"

                                          Tip

                                          Starting from Insight v0.22.0, there is no longer a need to manually install the Instrumentation CR.

                                          Install it in the insight-system namespace. There are some minor differences between different versions.

                                          Insight v0.21.xInsight v0.20.xInsight v0.18.xInsight v0.17.xInsight v0.16.x
                                          K8S_CLUSTER_UID=$(kubectl get namespace kube-system -o jsonpath='{.metadata.uid}')\nkubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/openinsight-proj/autoinstrumentation-java:1.31.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n      - name: OTEL_K8S_CLUSTER_UID\n        value: $K8S_CLUSTER_UID\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                                          kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.29.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0-rc.2\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                                          kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.25.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.37.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.38b0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.1-alpha\nEOF\n
                                          kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.23.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.34.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.33b0\nEOF\n
                                          kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.23.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.34.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.33b0\nEOF\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/operator.html#works-with-the-service-mesh-product-mspider","title":"Works with the Service Mesh Product (Mspider)","text":"

                                          If you enable the tracing capability of the Mspider(Service Mesh), you need to add an additional environment variable injection configuration:

                                          "},{"location":"en/end-user/insight/quickstart/otel/operator.html#the-operation-steps-are-as-follows","title":"The operation steps are as follows","text":"
                                          1. Log in to AI platform, then enter Container Management and select the target cluster.
                                          2. Click CRDs in the left navigation bar, find instrumentations.opentelemetry.io, and enter the details page.
                                          3. Select the insight-system namespace, then edit insight-opentelemetry-autoinstrumentation, and add the following content under spec:env::

                                                - name: OTEL_SERVICE_NAME\n      valueFrom:\n        fieldRef:\n          fieldPath: metadata.labels['app'] \n

                                            The complete example (for Insight v0.21.x) is as follows:

                                            K8S_CLUSTER_UID=$(kubectl get namespace kube-system -o jsonpath='{.metadata.uid}')\nkubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n    - name: OTEL_SERVICE_NAME\n      valueFrom:\n        fieldRef:\n          fieldPath: metadata.labels['app'] \n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/openinsight-proj/autoinstrumentation-java:1.31.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n      - name: OTEL_K8S_CLUSTER_UID\n        value: $K8S_CLUSTER_UID\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/operator.html#add-annotations-to-automatically-access-traces","title":"Add annotations to automatically access traces","text":"

                                          After the above is ready, you can access traces for the application through annotations (Annotation). Otel currently supports accessing traces through annotations. Depending on the service language, different pod annotations need to be added. Each service can add one of two types of annotations:

                                          • Only inject environment variable annotations

                                            There is only one such annotation, which is used to add otel-related environment variables, such as link reporting address, cluster id where the container is located, and namespace (this annotation is very useful when the application does not support automatic probe language)

                                            instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                            The value is divided into two parts by /, the first value (insight-system) is the namespace of the CR installed in the previous step, and the second value (insight-opentelemetry-autoinstrumentation) is the name of the CR.

                                          • Automatic probe injection and environment variable injection annotations

                                            There are currently 4 such annotations, proper to 4 different programming languages: java, nodejs, python, dotnet. After using it, automatic probes and otel default environment variables will be injected into the first container under spec.pod:

                                            Java applicationNodeJs applicationPython applicationDotnet applicationGolang application
                                            instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                            instrumentation.opentelemetry.io/inject-nodejs: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                            instrumentation.opentelemetry.io/inject-python: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                            instrumentation.opentelemetry.io/inject-dotnet: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                            Since Go's automatic detection requires the setting of OTEL_GO_AUTO_TARGET_EXE, you must provide a valid executable path through annotations or Instrumentation resources. Failure to set this value will result in the termination of Go's automatic detection injection, leading to a failure in the connection trace.

                                            instrumentation.opentelemetry.io/inject-go: \"insight-system/insight-opentelemetry-autoinstrumentation\"\ninstrumentation.opentelemetry.io/otel-go-auto-target-exe: \"/path/to/container/executable\"\n

                                            Go's automatic detection also requires elevated permissions. The following permissions are automatically set and are necessary.

                                            securityContext:\n  privileged: true\n  runAsUser: 0\n

                                          Tip

                                          The OpenTelemetry Operator automatically adds some OTel-related environment variables when injecting probes and also supports overriding these variables. The priority order for overriding these environment variables is as follows:

                                          original container env vars -> language specific env vars -> common env vars -> instrument spec configs' vars\n

                                          However, it is important to avoid manually overriding OTEL_RESOURCE_ATTRIBUTES_NODE_NAME . This variable serves as an identifier within the operator to determine if a pod has already been injected with a probe. Manually adding this variable may prevent the probe from being injected successfully.

                                          "},{"location":"en/end-user/insight/quickstart/otel/operator.html#automatic-injection-demo","title":"Automatic injection Demo","text":"

                                          Note that the annotation is added under spec.annotations.

                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-app\n  labels:\n    app: my-app\nspec:\n  selector:\n    matchLabels:\n      app: my-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-app\n      annotations:\n        instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n    spec:\n      containers:\n      - name: myapp\n        image: jaegertracing/vertx-create-span:operator-e2e-tests\n        ports:\n          - containerPort: 8080\n            protocol: TCP\n

                                          The final generated YAML is as follows:

                                          apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-deployment-with-sidecar-565bd877dd-nqkk6\n  generateName: my-deployment-with-sidecar-565bd877dd-\n  namespace: default\n  uid: aa89ca0d-620c-4d20-8bc1-37d67bad4ea4\n  resourceVersion: '2668986'\n  creationTimestamp: '2022-04-08T05:58:48Z'\n  labels:\n    app: my-pod-with-sidecar\n    pod-template-hash: 565bd877dd\n  annotations:\n    cni.projectcalico.org/containerID: 234eae5e55ea53db2a4bc2c0384b9a1021ed3908f82a675e4a92a49a7e80dd61\n    cni.projectcalico.org/podIP: 192.168.134.133/32\n    cni.projectcalico.org/podIPs: 192.168.134.133/32\n    instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\nspec:\n  volumes:\n    - name: kube-api-access-sp2mz\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n    - name: opentelemetry-auto-instrumentation\n      emptyDir: {}\n  initContainers:\n    - name: opentelemetry-auto-instrumentation\n      image: >-\n        ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java\n      command:\n        - cp\n        - /javaagent.jar\n        - /otel-auto-instrumentation/javaagent.jar\n      resources: {}\n      volumeMounts:\n        - name: opentelemetry-auto-instrumentation\n          mountPath: /otel-auto-instrumentation\n        - name: kube-api-access-sp2mz\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  containers:\n    - name: myapp\n      image: ghcr.io/pavolloffay/spring-petclinic:latest\n      env:\n        - name: OTEL_JAVAAGENT_DEBUG\n          value: 'true'\n        - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n          value: 'true'\n        - name: SPLUNK_PROFILER_ENABLED\n          value: 'false'\n        - name: JAVA_TOOL_OPTIONS\n          value: ' -javaagent:/otel-auto-instrumentation/javaagent.jar'\n        - name: OTEL_TRACES_EXPORTER\n          value: otlp\n        - name: OTEL_EXPORTER_OTLP_ENDPOINT\n          value: http://insight-agent-opentelemetry-collector.svc.cluster.local:4317\n        - name: OTEL_EXPORTER_OTLP_TIMEOUT\n          value: '20'\n        - name: OTEL_TRACES_SAMPLER\n          value: parentbased_traceidratio\n        - name: OTEL_TRACES_SAMPLER_ARG\n          value: '0.85'\n        - name: SPLUNK_TRACE_RESPONSE_HEADER_ENABLED\n          value: 'true'\n        - name: OTEL_SERVICE_NAME\n          value: my-deployment-with-sidecar\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.name\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_UID\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.uid\n        - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: spec.nodeName\n        - name: OTEL_RESOURCE_ATTRIBUTES\n          value: >-\n            k8s.container.name=myapp,k8s.deployment.name=my-deployment-with-sidecar,k8s.deployment.uid=8de6929d-dda0-436c-bca1-604e9ca7ea4e,k8s.namespace.name=default,k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME),k8s.pod.uid=$(OTEL_RESOURCE_ATTRIBUTES_POD_UID),k8s.replicaset.name=my-deployment-with-sidecar-565bd877dd,k8s.replicaset.uid=190d5f6e-ba7f-4794-b2e6-390b5879a6c4\n        - name: OTEL_PROPAGATORS\n          value: jaeger,b3\n      resources: {}\n      volumeMounts:\n        - name: kube-api-access-sp2mz\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n        - name: opentelemetry-auto-instrumentation\n          mountPath: /otel-auto-instrumentation\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  restartPolicy: Always\n  terminationGracePeriodSeconds: 30\n  dnsPolicy: ClusterFirst\n  serviceAccountName: default\n  serviceAccount: default\n  nodeName: k8s-master3\n  securityContext:\n    runAsUser: 1000\n    runAsGroup: 3000\n    fsGroup: 2000\n  schedulerName: default-scheduler\n  tolerations:\n    - key: node.kubernetes.io/not-ready\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n    - key: node.kubernetes.io/unreachable\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n  priority: 0\n  enableServiceLinks: true\n  preemptionPolicy: PreemptLowerPriority\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/operator.html#trace-query","title":"Trace query","text":"

                                          How to query the connected services, refer to Trace Query.

                                          "},{"location":"en/end-user/insight/quickstart/otel/otel.html","title":"Use OTel to provide the application observability","text":"

                                          Enhancement is the process of enabling application code to generate telemetry data. i.e. something that helps you monitor or measure the performance and status of your application.

                                          OpenTelemetry is a leading open source project providing instrumentation libraries for major programming languages \u200b\u200band popular frameworks. It is a project under the Cloud Native Computing Foundation and is supported by the vast resources of the community. It provides a standardized data format for collected data without the need to integrate specific vendors.

                                          Insight supports OpenTelemetry for application instrumentation to enhance your applications.

                                          This guide introduces the basic concepts of telemetry enhancement using OpenTelemetry. OpenTelemetry also has an ecosystem of libraries, plugins, integrations, and other useful tools to extend it. You can find these resources at the OTel Registry.

                                          You can use any open standard library for telemetry enhancement and use Insight as an observability backend to ingest, analyze, and visualize data.

                                          To enhance your code, you can use the enhanced operations provided by OpenTelemetry for specific languages:

                                          Insight currently provides an easy way to enhance .Net NodeJS, Java, Python and Golang applications with OpenTelemetry. Please follow the guidelines below.

                                          "},{"location":"en/end-user/insight/quickstart/otel/otel.html#trace-enhancement","title":"Trace Enhancement","text":"
                                          • Best practices for integrate trace: Application Non-Intrusive Enhancement via Operator
                                          • Manual instrumentation with Go language as an example: Enhance Go application with OpenTelemetry SDK
                                          • Using ebpf to implement non-intrusive auto-instrumetation in Go language (experimental feature)
                                          "},{"location":"en/end-user/insight/quickstart/otel/send_tracing_to_insight.html","title":"Sending Trace Data to Insight","text":"

                                          This document describes how customers can send trace data to Insight on their own. It mainly includes the following two scenarios:

                                          1. Customer apps report traces to Insight through OTEL Agent/SDK
                                          2. Forwarding traces to Insight through Opentelemetry Collector (OTEL COL)

                                          In each cluster where Insight Agent is installed, there is an insight-agent-otel-col component that is used to receive trace data from that cluster. Therefore, this component serves as the entry point for user access and needs to obtain its address first. You can get the address of the Opentelemetry Collector in the cluster through the AI platform interface, such as insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 :

                                          In addition, there are some slight differences for different reporting methods:

                                          "},{"location":"en/end-user/insight/quickstart/otel/send_tracing_to_insight.html#customer-apps-report-traces-to-insight-through-otel-agentsdk","title":"Customer apps report traces to Insight through OTEL Agent/SDK","text":"

                                          To successfully report trace data to Insight and display it properly, it is recommended to provide the required metadata (Resource Attributes) for OTLP through the following environment variables. There are two ways to achieve this:

                                          • Manually add them to the deployment YAML file, for example:

                                            ...\n- name: OTEL_EXPORTER_OTLP_ENDPOINT\n  value: \"http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\"\n- name: \"OTEL_SERVICE_NAME\"\n  value: my-java-app-name\n- name: \"OTEL_K8S_NAMESPACE\"\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: metadata.namespace\n- name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: spec.nodeName\n- name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: metadata.name\n- name: OTEL_RESOURCE_ATTRIBUTES\n  value: \"k8s.namespace.name=$(OTEL_K8S_NAMESPACE),k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME)\"\n
                                          • Use the automatic injection capability of Insight Agent to inject the metadata (Resource Attributes)

                                            Ensure that Insight Agent is working properly and after installing the Instrumentation CR, you only need to add the following annotation to the Pod:

                                            instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                            For example:

                                            apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-with-aotu-instrumentation\nspec:\n  selector:\n    matchLabels:\n      app.kubernetes.io/name: my-deployment-with-aotu-instrumentation-kuberntes\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: my-deployment-with-aotu-instrumentation-kuberntes\n      annotations:\n        sidecar.opentelemetry.io/inject: \"false\"\n        instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/send_tracing_to_insight.html#forwarding-traces-to-insight-through-opentelemetry-collector","title":"Forwarding traces to Insight through Opentelemetry Collector","text":"

                                          After ensuring that the application has added the metadata mentioned above, you only need to add an OTLP Exporter in your customer's Opentelemetry Collector to forward the trace data to Insight Agent Opentelemetry Collector. Below is an example Opentelemetry Collector configuration file:

                                          ...\nexporters:\n  otlp/insight:\n    endpoint: insight-opentelemetry-collector.insight-system.svc.cluster.local:4317\nservice:\n...\npipelines:\n...\ntraces:\n  exporters:\n    - otlp/insight\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/send_tracing_to_insight.html#references","title":"References","text":"
                                          • Enhancing Applications Non-intrusively with the Operator
                                          • Achieving Observability with OTel
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html","title":"Enhance Go applications with OTel SDK","text":"

                                          This page contains instructions on how to set up OpenTelemetry enhancements in a Go application.

                                          OpenTelemetry, also known simply as OTel, is an open-source observability framework that helps generate and collect telemetry data: traces, metrics, and logs in Go apps.

                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#enhance-go-apps-with-the-opentelemetry-sdk","title":"Enhance Go apps with the OpenTelemetry SDK","text":""},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#install-related-dependencies","title":"Install related dependencies","text":"

                                          Dependencies related to the OpenTelemetry exporter and SDK must be installed first. If you are using another request router, please refer to request routing. After switching/going into the application source folder run the following command:

                                          go get go.opentelemetry.io/otel@v1.8.0 \\\n  go.opentelemetry.io/otel/trace@v1.8.0 \\\n  go.opentelemetry.io/otel/sdk@v1.8.0 \\\n  go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin@v0.33.0 \\\n  go.opentelemetry.io/otel/exporters/otlp/otlptrace@v1.7.0 \\\n  go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc@v1.4.1\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#create-an-initialization-feature-using-the-opentelemetry-sdk","title":"Create an initialization feature using the OpenTelemetry SDK","text":"

                                          In order for an application to be able to send data, a feature is required to initialize OpenTelemetry. Add the following code snippet to the main.go file:

                                          import (\n    \"context\"\n    \"os\"\n    \"time\"\n\n    \"go.opentelemetry.io/otel\"\n    \"go.opentelemetry.io/otel/exporters/otlp/otlptrace\"\n    \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc\"\n    \"go.opentelemetry.io/otel/propagation\"\n    \"go.opentelemetry.io/otel/sdk/resource\"\n    sdktrace \"go.opentelemetry.io/otel/sdk/trace\"\n    semconv \"go.opentelemetry.io/otel/semconv/v1.7.0\"\n    \"go.uber.org/zap\"\n    \"google.golang.org/grpc\"\n)\n\nvar tracerExp *otlptrace.Exporter\n\nfunc retryInitTracer() func() {\n    var shutdown func()\n    go func() {\n        for {\n            // otel will reconnected and re-send spans when otel col recover. so, we don't need to re-init tracer exporter.\n            if tracerExp == nil {\n                shutdown = initTracer()\n            } else {\n                break\n            }\n            time.Sleep(time.Minute * 5)\n        }\n    }()\n    return shutdown\n}\n\nfunc initTracer() func() {\n    // temporarily set timeout to 10s\n    ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n    defer cancel()\n\n    serviceName, ok := os.LookupEnv(\"OTEL_SERVICE_NAME\")\n    if !ok {\n        serviceName = \"server_name\"\n        os.Setenv(\"OTEL_SERVICE_NAME\", serviceName)\n    }\n    otelAgentAddr, ok := os.LookupEnv(\"OTEL_EXPORTER_OTLP_ENDPOINT\")\n    if !ok {\n        otelAgentAddr = \"http://localhost:4317\"\n        os.Setenv(\"OTEL_EXPORTER_OTLP_ENDPOINT\", otelAgentAddr)\n    }\n    zap.S().Infof(\"OTLP Trace connect to: %s with service name: %s\", otelAgentAddr, serviceName)\n\n    traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithInsecure(), otlptracegrpc.WithDialOption(grpc.WithBlock()))\n    if err != nil {\n        handleErr(err, \"OTLP Trace gRPC Creation\")\n        return nil\n    }\n\n    tracerProvider := sdktrace.NewTracerProvider(\n        sdktrace.WithBatcher(traceExporter),\n        sdktrace.WithSampler(sdktrace.AlwaysSample()),\n    sdktrace.WithResource(resource.NewWithAttributes(semconv.SchemaURL)))\n\n    otel.SetTracerProvider(tracerProvider)\n    otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))\n\n    tracerExp = traceExporter\n    return func() {\n        // Shutdown will flush any remaining spans and shut down the exporter.\n        handleErr(tracerProvider.Shutdown(ctx), \"failed to shutdown TracerProvider\")\n    }\n}\n\nfunc handleErr(err error, message string) {\n    if err != nil {\n        zap.S().Errorf(\"%s: %v\", message, err)\n    }\n}\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#initialize-tracker-in-maingo","title":"Initialize tracker in main.go","text":"

                                          Modify the main feature to initialize the tracker in main.go. Also when your service shuts down, you should call TracerProvider.Shutdown() to ensure all spans are exported. The service makes the call as a deferred feature in the main function:

                                          func main() {\n    // start otel tracing\n    if shutdown := retryInitTracer(); shutdown != nil {\n            defer shutdown()\n        }\n    ......\n}\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#add-opentelemetry-gin-middleware-to-the-application","title":"Add OpenTelemetry Gin middleware to the application","text":"

                                          Configure Gin to use the middleware by adding the following line to main.go :

                                          import (\n    ....\n  \"go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin\"\n)\n\nfunc main() {\n    ......\n    r := gin.Default()\n    r.Use(otelgin.Middleware(\"my-app\"))\n    ......\n}\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#run-the-application","title":"Run the application","text":"
                                          • Local debugging and running

                                            Note: This step is only used for local development and debugging. In the production environment, the Operator will automatically complete the injection of the following environment variables.

                                            The above steps have completed the work of initializing the SDK. Now if you need to develop and debug locally, you need to obtain the address of insight-agent-opentelemerty-collector in the insight-system namespace in advance, assuming: insight-agent-opentelemetry-collector .insight-system.svc.cluster.local:4317 .

                                            Therefore, you can add the following environment variables when you start the application locally:

                                            OTEL_SERVICE_NAME=my-golang-app OTEL_EXPORTER_OTLP_ENDPOINT=http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 go run main.go...\n
                                          • Running in a production environment

                                            Please refer to the introduction of Only injecting environment variable annotations in Achieving non-intrusive enhancement of applications through Operators to add annotations to deployment yaml:

                                            instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                            If you cannot use annotations, you can manually add the following environment variables to the deployment yaml:

                                          \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\nenv:\n  - name: OTEL_EXPORTER_OTLP_ENDPOINT\n    value: 'http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317'\n  - name: OTEL_SERVICE_NAME\n    value: \"your depolyment name\" # modify it.\n  - name: OTEL_K8S_NAMESPACE\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: metadata.namespace\n  - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: spec.nodeName\n  - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: metadata.name\n  - name: OTEL_RESOURCE_ATTRIBUTES\n    value: 'k8s.namespace.name=$(OTEL_K8S_NAMESPACE),k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME)'\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#request-routing","title":"Request Routing","text":""},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#opentelemetry-gingonic-enhancements","title":"OpenTelemetry gin/gonic enhancements","text":"
                                          # Add one line to your import() stanza depending upon your request router:\nmiddleware \"go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin\"\n

                                          Then inject the OpenTelemetry middleware:

                                          router. Use(middleware. Middleware(\"my-app\"))\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#opentelemetry-gorillamux-enhancements","title":"OpenTelemetry gorillamux enhancements","text":"
                                          # Add one line to your import() stanza depending upon your request router:\nmiddleware \"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux\"\n

                                          Then inject the OpenTelemetry middleware:

                                          router. Use(middleware. Middleware(\"my-app\"))\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#grpc-enhancements","title":"gRPC enhancements","text":"

                                          Likewise, OpenTelemetry can help you auto-detect gRPC requests. To detect any gRPC server you have, add the interceptor to the server's instantiation.

                                          import (\n  grpcotel \"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc\"\n)\nfunc main() {\n  [...]\n\n    s := grpc.NewServer(\n        grpc.UnaryInterceptor(grpcotel.UnaryServerInterceptor()),\n        grpc.StreamInterceptor(grpcotel.StreamServerInterceptor()),\n    )\n}\n

                                          It should be noted that if your program uses Grpc Client to call third-party services, you also need to add an interceptor to Grpc Client:

                                              [...]\n\n    conn, err := grpc.Dial(addr, grpc.WithTransportCredentials(insecure.NewCredentials()),\n        grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()),\n        grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()),\n    )\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#if-not-using-request-routing","title":"If not using request routing","text":"
                                          import (\n  \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\"\n)\n

                                          Everywhere you pass http.Handler to ServeMux you will wrap the handler function. For example, the following replacements would be made:

                                          - mux.Handle(\"/path\", h)\n+ mux.Handle(\"/path\", otelhttp.NewHandler(h, \"description of path\"))\n---\n- mux.Handle(\"/path\", http.HandlerFunc(f))\n+ mux.Handle(\"/path\", otelhttp.NewHandler(http.HandlerFunc(f), \"description of path\"))\n

                                          In this way, you can ensure that each feature wrapped with othttp will automatically collect its metadata and start the proper trace.

                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#database-enhancements","title":"database enhancements","text":""},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#golang-gorm","title":"Golang Gorm","text":"

                                          The OpenTelemetry community has also developed middleware for database access libraries, such as Gorm:

                                          import (\n    \"github.com/uptrace/opentelemetry-go-extra/otelgorm\"\n    \"gorm.io/driver/sqlite\"\n    \"gorm.io/gorm\"\n)\n\ndb, err := gorm.Open(sqlite.Open(\"file::memory:?cache=shared\"), &gorm.Config{})\nif err != nil {\n    panic(err)\n}\n\notelPlugin := otelgorm.NewPlugin(otelgorm.WithDBName(\"mydb\"), # Missing this can lead to incomplete display of database related topology\n    otelgorm.WithAttributes(semconv.ServerAddress(\"memory\"))) # Missing this can lead to incomplete display of database related topology\nif err := db.Use(otelPlugin); err != nil {\n    panic(err)\n}\n

                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#custom-span","title":"Custom Span","text":"

                                          In many cases, the middleware provided by OpenTelemetry cannot help us record more internally called features, and we need to customize Span to record

                                           \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n    _, span := otel.Tracer(\"GetServiceDetail\").Start(ctx,\n        \"spanMetricDao.GetServiceDetail\",\n        trace.WithSpanKind(trace.SpanKindInternal))\n    defer span.End()\n  \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#add-custom-properties-and-custom-events-to-span","title":"Add custom properties and custom events to span","text":"

                                          It is also possible to set a custom attribute or tag as a span. To add custom properties and events, follow these steps:

                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#import-tracking-and-property-libraries","title":"Import Tracking and Property Libraries","text":"
                                          import (\n    ...\n    \"go.opentelemetry.io/otel/attribute\"\n    \"go.opentelemetry.io/otel/trace\"\n)\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#get-the-current-span-from-the-context","title":"Get the current Span from the context","text":"
                                          span := trace.SpanFromContext(c.Request.Context())\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#set-properties-in-the-current-span","title":"Set properties in the current Span","text":"
                                          span.SetAttributes(attribute. String(\"controller\", \"books\"))\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#add-an-event-to-the-current-span","title":"Add an Event to the current Span","text":"

                                          Adding span events is done using AddEvent on the span object.

                                          span.AddEvent(msg)\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#log-errors-and-exceptions","title":"Log errors and exceptions","text":"
                                          import \"go.opentelemetry.io/otel/codes\"\n\n// Get the current span\nspan := trace.SpanFromContext(ctx)\n\n// RecordError will automatically convert an error into a span even\nspan.RecordError(err)\n\n// Flag this span as an error\nspan.SetStatus(codes.Error, \"internal error\")\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#references","title":"References","text":"

                                          For the Demo presentation, please refer to:

                                          • otel-grpc-examples
                                          • opentelemetry-demo/productcatalogservice
                                          • opentelemetry-collector-contrib/demo
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/meter.html","title":"Exposing Metrics for Applications Using OpenTelemetry SDK","text":"

                                          This article is intended for users who wish to evaluate or explore the developing OTLP metrics.

                                          The OpenTelemetry project requires that APIs and SDKs must emit data in the OpenTelemetry Protocol (OTLP) for supported languages.

                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/meter.html#for-golang-applications","title":"For Golang Applications","text":"

                                          Golang can expose runtime metrics through the SDK by adding the following methods to enable the metrics exporter within the application:

                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/meter.html#install-required-dependencies","title":"Install Required Dependencies","text":"

                                          Navigate to your application\u2019s source folder and run the following command:

                                          go get go.opentelemetry.io/otel \\\n  go.opentelemetry.io/otel/attribute \\\n  go.opentelemetry.io/otel/exporters/prometheus \\\n  go.opentelemetry.io/otel/metric/global \\\n  go.opentelemetry.io/otel/metric/instrument \\\n  go.opentelemetry.io/otel/sdk/metric\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/meter.html#create-an-initialization-function-using-otel-sdk","title":"Create an Initialization Function Using OTel SDK","text":"
                                          import (\n    .....\n\n    \"go.opentelemetry.io/otel/attribute\"\n    otelPrometheus \"go.opentelemetry.io/otel/exporters/prometheus\"\n    \"go.opentelemetry.io/otel/metric/global\"\n    \"go.opentelemetry.io/otel/metric/instrument\"\n    \"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram\"\n    controller \"go.opentelemetry.io/otel/sdk/metric/controller/basic\"\n    \"go.opentelemetry.io/otel/sdk/metric/export/aggregation\"\n    processor \"go.opentelemetry.io/otel/sdk/metric/processor/basic\"\n    selector \"go.opentelemetry.io/otel/sdk/metric/selector/simple\"\n)\n\nfunc (s *insightServer) initMeter() *otelPrometheus.Exporter {\n    s.meter = global.Meter(\"xxx\")\n\n    config := otelPrometheus.Config{\n        DefaultHistogramBoundaries: []float64{1, 2, 5, 10, 20, 50},\n        Gatherer:                   prometheus.DefaultGatherer,\n        Registry:                   prometheus.NewRegistry(),\n        Registerer:                 prometheus.DefaultRegisterer,\n    }\n\n    c := controller.New(\n        processor.NewFactory(\n            selector.NewWithHistogramDistribution(\n                histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries),\n            ),\n            aggregation.CumulativeTemporalitySelector(),\n            processor.WithMemory(true),\n        ),\n    )\n\n    exporter, err := otelPrometheus.New(config, c)\n    if err != nil {\n        zap.S().Panicf(\"failed to initialize prometheus exporter %v\", err)\n    }\n\n    global.SetMeterProvider(exporter.MeterProvider())\n\n    http.HandleFunc(\"/metrics\", exporter.ServeHTTP)\n\n    go func() {\n        _ = http.ListenAndServe(fmt.Sprintf(\":%d\", 8888), nil)\n    }()\n\n    zap.S().Info(\"Prometheus server running on \", fmt.Sprintf(\":%d\", port))\n    return exporter\n}\n

                                          The above method will expose a metrics endpoint for your application at: http://localhost:8888/metrics.

                                          Next, initialize it in main.go:

                                          func main() {\n    // ...\n    tp := initMeter()\n    // ...\n}\n

                                          If you want to add custom metrics, you can refer to the following:

                                          // exposeClusterMetric exposes a metric like \"insight_logging_count{} 1\"\nfunc (s *insightServer) exposeLoggingMetric(lserver *log.LogService) {\n    s.meter = global.Meter(\"insight.io/basic\")\n\n    var lock sync.Mutex\n    logCounter, err := s.meter.AsyncFloat64().Counter(\"insight_log_total\")\n    if err != nil {\n        zap.S().Panicf(\"failed to initialize instrument: %v\", err)\n    }\n\n    _ = s.meter.RegisterCallback([]instrument.Asynchronous{logCounter}, func(ctx context.Context) {\n        lock.Lock()\n        defer lock.Unlock()\n        count, err := lserver.Count(ctx)\n        if err == nil || count != -1 {\n            logCounter.Observe(ctx, float64(count))\n        }\n    })\n}\n

                                          Then, call this method in main.go:

                                          // ...\ns.exposeLoggingMetric(lservice)\n// ...\n

                                          You can check if your metrics are working correctly by visiting http://localhost:8888/metrics.

                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/meter.html#for-java-applications","title":"For Java Applications","text":"

                                          For Java applications, you can directly expose JVM-related metrics by using the OpenTelemetry agent with the following environment variable:

                                          OTEL_METRICS_EXPORTER=prometheus\n

                                          You can then check your metrics at http://localhost:8888/metrics.

                                          Next, combine it with a Prometheus ServiceMonitor to complete the metrics integration. If you want to expose custom metrics, please refer to opentelemetry-java-docs/prometheus.

                                          The process is mainly divided into two steps:

                                          • Create a meter provider and specify Prometheus as the exporter.
                                          /*\n * Copyright The OpenTelemetry Authors\n * SPDX-License-Identifier: Apache-2.0\n */\n\npackage io.opentelemetry.example.prometheus;\n\nimport io.opentelemetry.api.metrics.MeterProvider;\nimport io.opentelemetry.exporter.prometheus.PrometheusHttpServer;\nimport io.opentelemetry.sdk.metrics.SdkMeterProvider;\nimport io.opentelemetry.sdk.metrics.export.MetricReader;\n\npublic final class ExampleConfiguration {\n\n  /**\n   * Initializes the Meter SDK and configures the Prometheus collector with all default settings.\n   *\n   * @param prometheusPort the port to open up for scraping.\n   * @return A MeterProvider for use in instrumentation.\n   */\n  static MeterProvider initializeOpenTelemetry(int prometheusPort) {\n    MetricReader prometheusReader = PrometheusHttpServer.builder().setPort(prometheusPort).build();\n\n    return SdkMeterProvider.builder().registerMetricReader(prometheusReader).build();\n  }\n}\n
                                          • Create a custom meter and start the HTTP server.
                                          package io.opentelemetry.example.prometheus;\n\nimport io.opentelemetry.api.common.Attributes;\nimport io.opentelemetry.api.metrics.Meter;\nimport io.opentelemetry.api.metrics.MeterProvider;\nimport java.util.concurrent.ThreadLocalRandom;\n\n/**\n * Example of using the PrometheusHttpServer to convert OTel metrics to Prometheus format and expose\n * these to a Prometheus instance via a HttpServer exporter.\n *\n * <p>A Gauge is used to periodically measure how many incoming messages are awaiting processing.\n * The Gauge callback gets executed every collection interval.\n */\npublic final class PrometheusExample {\n  private long incomingMessageCount;\n\n  public PrometheusExample(MeterProvider meterProvider) {\n    Meter meter = meterProvider.get(\"PrometheusExample\");\n    meter\n        .gaugeBuilder(\"incoming.messages\")\n        .setDescription(\"No of incoming messages awaiting processing\")\n        .setUnit(\"message\")\n        .buildWithCallback(result -> result.record(incomingMessageCount, Attributes.empty()));\n  }\n\n  void simulate() {\n    for (int i = 500; i > 0; i--) {\n      try {\n        System.out.println(\n            i + \" Iterations to go, current incomingMessageCount is:  \" + incomingMessageCount);\n        incomingMessageCount = ThreadLocalRandom.current().nextLong(100);\n        Thread.sleep(1000);\n      } catch (InterruptedException e) {\n        // ignored here\n      }\n    }\n  }\n\n  public static void main(String[] args) {\n    int prometheusPort = 8888;\n\n    // It is important to initialize the OpenTelemetry SDK as early as possible in your process.\n    MeterProvider meterProvider = ExampleConfiguration.initializeOpenTelemetry(prometheusPort);\n\n    PrometheusExample prometheusExample = new PrometheusExample(meterProvider);\n\n    prometheusExample.simulate();\n\n    System.out.println(\"Exiting\");\n  }\n}\n

                                          After running the Java application, you can check if your metrics are working correctly by visiting http://localhost:8888/metrics.

                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/meter.html#insight-collecting-metrics","title":"Insight Collecting Metrics","text":"

                                          Lastly, it is important to note that you have exposed metrics in your application, and now you need Insight to collect those metrics.

                                          The recommended way to expose metrics is via ServiceMonitor or PodMonitor.

                                          "},{"location":"en/end-user/insight/quickstart/otel/golang/meter.html#creating-servicemonitorpodmonitor","title":"Creating ServiceMonitor/PodMonitor","text":"

                                          The added ServiceMonitor/PodMonitor needs to have the label operator.insight.io/managed-by: insight for the Operator to recognize it:

                                          apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: example-app\n  labels:\n    operator.insight.io/managed-by: insight\nspec:\n  selector:\n    matchLabels:\n      app: example-app\n  endpoints:\n  - port: web\n  namespaceSelector:\n    any: true\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/java/index.html","title":"Start Monitoring Java Applications","text":"
                                          1. For accessing and monitoring Java application links, please refer to the document Implementing Non-Intrusive Enhancements for Applications via Operator, which explains how to automatically integrate links through annotations.

                                          2. Monitoring the JVM of Java applications: How Java applications that have already exposed JVM metrics and those that have not yet exposed JVM metrics can connect with observability Insight.

                                          3. If your Java application has not yet started exposing JVM metrics, you can refer to the following documents:

                                            • Exposing JVM Monitoring Metrics Using JMX Exporter
                                            • Exposing JVM Monitoring Metrics Using OpenTelemetry Java Agent
                                          4. If your Java application has already exposed JVM metrics, you can refer to the following document:

                                            • Connecting Existing JVM Metrics of Java Applications to Observability
                                          5. Writing TraceId and SpanId into Java Application Logs to correlate link data with log data.

                                          "},{"location":"en/end-user/insight/quickstart/otel/java/mdc.html","title":"Writing TraceId and SpanId into Java Application Logs","text":"

                                          This article explains how to automatically write TraceId and SpanId into Java application logs using OpenTelemetry. By including TraceId and SpanId in your logs, you can correlate distributed tracing data with log data, enabling more efficient fault diagnosis and performance analysis.

                                          "},{"location":"en/end-user/insight/quickstart/otel/java/mdc.html#supported-logging-libraries","title":"Supported Logging Libraries","text":"

                                          For more information, please refer to the Logger MDC auto-instrumentation.

                                          Logging Framework Supported Automatic Instrumentation Versions Dependencies Required for Manual Instrumentation Log4j 1 1.2+ None Log4j 2 2.7+ opentelemetry-log4j-context-data-2.17-autoconfigure Logback 1.0+ opentelemetry-logback-mdc-1.0"},{"location":"en/end-user/insight/quickstart/otel/java/mdc.html#using-logback-spring-boot-project","title":"Using Logback (Spring Boot Project)","text":"

                                          Spring Boot projects come with a built-in logging framework and use Logback as the default logging implementation. If your Java project is a Spring Boot project, you can write TraceId into logs with minimal configuration.

                                          Set logging.pattern.level in application.properties, adding %mdc{trace_id} and %mdc{span_id} to the logs.

                                          logging.pattern.level=trace_id=%mdc{trace_id} span_id=%mdc{span_id} %5p ....omited...\n

                                          Here is an example of the logs:

                                          2024-06-26 10:56:31.200 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.a.c.c.C.[Tomcat].[localhost].[/]       : Initializing Spring DispatcherServlet 'dispatcherServlet'\n2024-06-26 10:56:31.201 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.s.web.servlet.DispatcherServlet        : Initializing Servlet 'dispatcherServlet'\n2024-06-26 10:56:31.209 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.s.web.servlet.DispatcherServlet        : Completed initialization in 8 ms\n2024-06-26 10:56:31.296 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=5743699405074f4e  INFO 53724 --- [nio-8081-exec-1] com.example.httpserver.ot.OTServer       : hello world\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/java/mdc.html#using-log4j2","title":"Using Log4j2","text":"
                                          1. Add OpenTelemetry Log4j2 dependency in pom.xml:

                                            Tip

                                            Please replace OPENTELEMETRY_VERSION with the latest version.

                                            <dependencies>\n  <dependency>\n    <groupId>io.opentelemetry.instrumentation</groupId>\n    <artifactId>opentelemetry-log4j-context-data-2.17-autoconfigure</artifactId>\n    <version>OPENTELEMETRY_VERSION</version>\n    <scope>runtime</scope>\n  </dependency>\n</dependencies>\n
                                          2. Modify the log4j2.xml configuration, adding %X{trace_id} and %X{span_id} in the pattern to automatically write TraceId and SpanId into the logs:

                                            <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Configuration>\n  <Appenders>\n    <Console name=\"Console\" target=\"SYSTEM_OUT\">\n      <PatternLayout\n          pattern=\"%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} trace_id=%X{trace_id} span_id=%X{span_id} trace_flags=%X{trace_flags} - %msg%n\"/>\n    </Console>\n  </Appenders>\n  <Loggers>\n    <Root>\n      <AppenderRef ref=\"Console\" level=\"All\"/>\n    </Root>\n  </Loggers>\n</Configuration>\n
                                          3. If using Logback, add OpenTelemetry Logback dependency in pom.xml.

                                            Tip

                                            Please replace OPENTELEMETRY_VERSION with the latest version.

                                            <dependencies>\n  <dependency>\n    <groupId>io.opentelemetry.instrumentation</groupId>\n    <artifactId>opentelemetry-logback-mdc-1.0</artifactId>\n    <version>OPENTELEMETRY_VERSION</version>\n  </dependency>\n</dependencies>\n
                                          4. Modify the log4j2.xml configuration, adding %X{trace_id} and %X{span_id} in the pattern to automatically write TraceId and SpanId into the logs:

                                            <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<configuration>\n  <appender name=\"CONSOLE\" class=\"ch.qos.logback.core.ConsoleAppender\">\n    <encoder>\n      <pattern>%d{HH:mm:ss.SSS} trace_id=%X{trace_id} span_id=%X{span_id} trace_flags=%X{trace_flags} %msg%n</pattern>\n    </encoder>\n  </appender>\n\n  <!-- Just wrap your logging appender, for example ConsoleAppender, with OpenTelemetryAppender -->\n  <appender name=\"OTEL\" class=\"io.opentelemetry.instrumentation.logback.mdc.v1_0.OpenTelemetryAppender\">\n    <appender-ref ref=\"CONSOLE\"/>\n  </appender>\n\n  <!-- Use the wrapped \"OTEL\" appender instead of the original \"CONSOLE\" one -->\n  <root level=\"INFO\">\n    <appender-ref ref=\"OTEL\"/>\n  </root>\n\n</configuration>\n
                                          "},{"location":"en/end-user/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html","title":"Exposing JVM Monitoring Metrics Using JMX Exporter","text":"

                                          JMX Exporter provides two usage methods:

                                          1. Standalone Process: Specify parameters when starting the JVM to expose a JMX RMI interface. The JMX Exporter calls RMI to obtain the JVM runtime state data, converts it into Prometheus metrics format, and exposes a port for Prometheus to scrape.
                                          2. In-Process (JVM process): Specify parameters when starting the JVM to run the JMX Exporter jar file as a javaagent. This method reads the JVM runtime state data in-process, converts it into Prometheus metrics format, and exposes a port for Prometheus to scrape.

                                          Note

                                          The official recommendation is not to use the first method due to its complex configuration and the requirement for a separate process, which introduces additional monitoring challenges. Therefore, this article focuses on the second method, detailing how to use JMX Exporter to expose JVM monitoring metrics in a Kubernetes environment.

                                          In this method, you need to specify the JMX Exporter jar file and configuration file when starting the JVM. Since the jar file is a binary file that is not ideal for mounting via a configmap, and the configuration file typically does not require modifications, it is recommended to package both the JMX Exporter jar file and the configuration file directly into the business container image.

                                          For the second method, you can choose to include the JMX Exporter jar file in the application image or mount it during deployment. Below are explanations for both approaches:

                                          "},{"location":"en/end-user/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html#method-1-building-jmx-exporter-jar-file-into-the-business-image","title":"Method 1: Building JMX Exporter JAR File into the Business Image","text":"

                                          The content of prometheus-jmx-config.yaml is as follows:

                                          prometheus-jmx-config.yaml
                                          ...\nssl: false\nlowercaseOutputName: false\nlowercaseOutputLabelNames: false\nrules:\n- pattern: \".*\"\n

                                          Note

                                          For more configuration options, please refer to the introduction at the bottom or Prometheus official documentation.

                                          Next, prepare the jar file. You can find the latest jar download link on the jmx_exporter GitHub page and refer to the following Dockerfile:

                                          FROM openjdk:11.0.15-jre\nWORKDIR /app/\nCOPY target/my-app.jar ./\nCOPY prometheus-jmx-config.yaml ./\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\nENV JAVA_TOOL_OPTIONS=-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\nEXPOSE 8081 8999 8080 8888\nENTRYPOINT java $JAVA_OPTS -jar my-app.jar\n

                                          Note:

                                          • The format for the startup parameter is: -javaagent:=:
                                          • Here, port 8088 is used to expose JVM monitoring metrics; you may change it if it conflicts with the Java application.
                                          "},{"location":"en/end-user/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html#method-2-mounting-via-init-container","title":"Method 2: Mounting via Init Container","text":"

                                          First, we need to create a Docker image for the JMX Exporter. The following Dockerfile is for reference:

                                          FROM alpine/curl:3.14\nWORKDIR /app/\n# Copy the previously created config file into the image\nCOPY prometheus-jmx-config.yaml ./\n# Download the jmx prometheus javaagent jar online\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\n

                                          Build the image using the above Dockerfile: docker build -t my-jmx-exporter .

                                          Add the following init container to the Java application deployment YAML:

                                          Click to expand YAML file
                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-demo-app\n  labels:\n    app: my-demo-app\nspec:\n  selector:\n    matchLabels:\n      app: my-demo-app\n  template:\n    metadata:\n      labels:\n        app: my-demo-app\n    spec:\n      imagePullSecrets:\n      - name: registry-pull\n      initContainers:\n      - name: jmx-sidecar\n        image: my-jmx-exporter\n        command: [\"cp\", \"-r\", \"/app/jmx_prometheus_javaagent-0.17.2.jar\", \"/target/jmx_prometheus_javaagent-0.17.2.jar\"]  \u278a\n        volumeMounts:\n        - name: sidecar\n          mountPath: /target\n      containers:\n      - image: my-demo-app-image\n        name: my-demo-app\n        resources:\n          requests:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n          limits:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n        ports:\n        - containerPort: 18083\n        env:\n        - name: JAVA_TOOL_OPTIONS\n          value: \"-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\" \u278b\n        volumeMounts:\n        - name: host-time\n          mountPath: /etc/localtime\n          readOnly: true\n        - name: sidecar\n          mountPath: /sidecar\n      volumes:\n      - name: host-time\n        hostPath:\n          path: /etc/localtime\n      - name: sidecar  # Shared agent folder\n        emptyDir: {}\n      restartPolicy: Always\n

                                          With the above modifications, the example application my-demo-app now has the capability to expose JVM metrics. After running the service, you can access the Prometheus formatted metrics at http://localhost:8088.

                                          Next, you can refer to Connecting Existing JVM Metrics of Java Applications to Observability.

                                          "},{"location":"en/end-user/insight/quickstart/otel/java/jvm-monitor/legacy-jvm.html","title":"Integrating Existing JVM Metrics of Java Applications with Observability","text":"

                                          If your Java application exposes JVM monitoring metrics through other means (such as Spring Boot Actuator), you will need to ensure that the monitoring data is collected. You can achieve this by adding annotations (Kubernetes Annotations) to your workload to allow Insight to scrape the existing JVM metrics:

                                          annotations: \n  insight.opentelemetry.io/metric-scrape: \"true\"  # Whether to scrape\n  insight.opentelemetry.io/metric-path: \"/\"         # Path to scrape metrics\n  insight.opentelemetry.io/metric-port: \"9464\"      # Port to scrape metrics\n

                                          For example, to add annotations to the my-deployment-app:

                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-app\nspec:\n  selector:\n    matchLabels:\n      app: my-deployment-app\n      app.kubernetes.io/name: my-deployment-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-deployment-app\n        app.kubernetes.io/name: my-deployment-app\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\"  # Whether to scrape\n        insight.opentelemetry.io/metric-path: \"/\"         # Path to scrape metrics\n        insight.opentelemetry.io/metric-port: \"9464\"      # Port to scrape metrics\n

                                          Here is a complete example:

                                          ---\napiVersion: v1\nkind: Service\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  type: NodePort\n  selector:\n    app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  ports:\n    - name: http\n      port: 8080\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  selector:\n    matchLabels:\n      app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\"  # Whether to scrape\n        insight.opentelemetry.io/metric-path: \"/actuator/prometheus\"  # Path to scrape metrics\n        insight.opentelemetry.io/metric-port: \"8080\"      # Port to scrape metrics\n    spec:\n      containers:\n        - name: myapp\n          image: docker.m.daocloud.io/wutang/spring-boot-actuator-prometheus-metrics-demo\n          ports:\n            - name: http\n              containerPort: 8080\n          resources:\n            limits:\n              cpu: 500m\n              memory: 800Mi\n            requests:\n              cpu: 200m\n              memory: 400Mi\n

                                          In the above example, Insight will scrape the Prometheus metrics exposed through Spring Boot Actuator via http://<service-ip>:8080/actuator/prometheus.

                                          "},{"location":"en/end-user/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html","title":"Exposing JVM Metrics Using OpenTelemetry Java Agent","text":"

                                          Starting from OpenTelemetry Agent v1.20.0 and later, the OpenTelemetry Agent has introduced the JMX Metric Insight module. If your application is already integrated with the OpenTelemetry Agent for tracing, you no longer need to introduce another agent to expose JMX metrics for your application. The OpenTelemetry Agent collects and exposes metrics by detecting the locally available MBeans in the application.

                                          The OpenTelemetry Agent also provides built-in monitoring examples for common Java servers or frameworks. Please refer to the Predefined Metrics.

                                          When using the OpenTelemetry Java Agent, you also need to consider how to mount the JAR into the container. In addition to the methods for mounting the JAR file as described with the JMX Exporter, you can leverage the capabilities provided by the OpenTelemetry Operator to automatically enable JVM metrics exposure for your application.

                                          If your application is already integrated with the OpenTelemetry Agent for tracing, you do not need to introduce another agent to expose JMX metrics. The OpenTelemetry Agent can now locally collect and expose metrics interfaces by detecting the locally available MBeans in the application.

                                          However, as of the current version, you still need to manually add the appropriate annotations to your application for the JVM data to be collected by Insight. For specific annotation content, please refer to Integrating Existing JVM Metrics of Java Applications with Observability.

                                          "},{"location":"en/end-user/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html#exposing-metrics-for-java-middleware","title":"Exposing Metrics for Java Middleware","text":"

                                          The OpenTelemetry Agent also includes built-in examples for monitoring middleware. Please refer to the Predefined Metrics.

                                          By default, no specific types are designated; you need to specify them using the -Dotel.jmx.target.system JVM options, for example, -Dotel.jmx.target.system=jetty,kafka-broker.

                                          "},{"location":"en/end-user/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html#references","title":"References","text":"
                                          • Gaining JMX Metric Insights with the OpenTelemetry Java Agent

                                          • Otel JMX Metrics

                                          "},{"location":"en/end-user/insight/quickstart/other/install-agent-on-ocp.html","title":"OpenShift Install Insight Agent","text":"

                                          Although the OpenShift system comes with a monitoring system, we will still install Insight Agent because of some rules in the data collection agreement.

                                          Among them, in addition to the basic installation configuration, the following parameters need to be added during helm install:

                                          ## Parameters related to fluentbit;\n--set fluent-bit.ocp.enabled=true \\\n--set fluent-bit.serviceAccount.create=false \\\n--set fluent-bit.securityContext.runAsUser=0 \\\n--set fluent-bit.securityContext.seLinuxOptions.type=spc_t \\\n--set fluent-bit.securityContext.readOnlyRootFilesystem=false \\\n--set fluent-bit.securityContext.allowPrivilegeEscalation=false \\\n\n## Enable Prometheus(CR) for OpenShift4.x\n--set compatibility.openshift.prometheus.enabled=true \\\n\n## Close the Prometheus instance of the higher version\n--set kube-prometheus-stack.prometheus.enabled=false \\\n--set kube-prometheus-stack.kubeApiServer.enabled=false \\\n--set kube-prometheus-stack.kubelet.enabled=false \\\n--set kube-prometheus-stack.kubeControllerManager.enabled=false \\\n--set kube-prometheus-stack.coreDns.enabled=false \\\n--set kube-prometheus-stack.kubeDns.enabled=false \\\n--set kube-prometheus-stack.kubeEtcd.enabled=false \\\n--set kube-prometheus-stack.kubeEtcd.enabled=false \\\n--set kube-prometheus-stack.kubeScheduler.enabled=false \\\n--set kube-prometheus-stack.kubeStateMetrics.enabled=false \\\n--set kube-prometheus-stack.nodeExporter.enabled=false \\\n\n## Limit the namespace processed by PrometheusOperator to avoid competition with OpenShift's own PrometheusOperator\n--set kube-prometheus-stack.prometheusOperator.kubeletService.namespace=\"insight-system\" \\\n--set kube-prometheus-stack.prometheusOperator.prometheusInstanceNamespaces=\"insight-system\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[0]=\"openshift-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[1]=\"openshift-user-workload-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[2]=\"openshift-customer-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[3]=\"openshift-route-monitor-operator\" \\\n
                                          "},{"location":"en/end-user/insight/quickstart/other/install-agent-on-ocp.html#write-system-monitoring-data-into-prometheus-through-openshifts-own-mechanism","title":"Write system monitoring data into Prometheus through OpenShift's own mechanism","text":"
                                          apiVersion: v1\nkind: ConfigMap\nmetadata:\n   name: cluster-monitoring-config\n   namespace: openshift-monitoring\ndata:\n   config.yaml: |\n     prometheusK8s:\n       remoteWrite:\n         - queueConfig:\n             batchSendDeadline: 60s\n             maxBackoff: 5s\n             minBackoff: 30ms\n             minShards: 1\n             capacity: 5000\n             maxSamplesPerSend: 1000\n             maxShards: 100\n           remoteTimeout: 30s\n           url: http://insight-agent-prometheus.insight-system.svc.cluster.local:9090/api/v1/write\n           writeRelabelConfigs:\n             - action: keep\n               regex: etcd|kubelet|node-exporter|apiserver|kube-state-metrics\n               sourceLabels:\n                 - job\n
                                          "},{"location":"en/end-user/insight/quickstart/other/install-agentindce.html","title":"Install insight-agent in Suanova 4.0","text":"

                                          In AI platform, previous Suanova 4.0 can be accessed as a subcluster. This guide provides potential issues and solutions when installing insight-agent in a Suanova 4.0 cluster.

                                          "},{"location":"en/end-user/insight/quickstart/other/install-agentindce.html#issue-one","title":"Issue One","text":"

                                          Since most Suanova 4.0 clusters have installed dx-insight as the monitoring system, installing insight-agent at this time will conflict with the existing prometheus operator in the cluster, making it impossible to install smoothly.

                                          "},{"location":"en/end-user/insight/quickstart/other/install-agentindce.html#solution","title":"Solution","text":"

                                          Enable the parameters of the prometheus operator, retain the prometheus operator in dx-insight, and make it compatible with the prometheus operator in insight-agent in 5.0.

                                          "},{"location":"en/end-user/insight/quickstart/other/install-agentindce.html#steps","title":"Steps","text":"
                                          1. Log in to the console.
                                          2. Enable the --deny-namespaces parameter in the two prometheus operators respectively.
                                          3. Run the following command (the following command is for reference only, the actual command needs to replace the prometheus operator name and namespace in the command).

                                            kubectl edit deploy insight-agent-kube-prometh-operator -n insight-system\n

                                          Note

                                          • As shown in the figure above, the dx-insight component is deployed under the dx-insight tenant, and the insight-agent is deployed under the insight-system tenant. Add --deny-namespaces=insight-system in the prometheus operator in dx-insight, Add --deny-namespaces=dx-insight in the prometheus operator in insight-agent.
                                          • Just add deny namespace, both prometheus operators can continue to scan other namespaces, and the related collection resources under kube-system or customer business namespaces are not affected.
                                          • Please pay attention to the problem of node exporter port conflict.
                                          "},{"location":"en/end-user/insight/quickstart/other/install-agentindce.html#supplementary-explanation","title":"Supplementary Explanation","text":"

                                          The open-source node-exporter turns on hostnetwork by default and the default port is 9100. If the monitoring system of the cluster has installed node-exporter , then installing insight-agent at this time will cause node-exporter port conflict and it cannot run normally.

                                          Note

                                          Insight's node exporter will enable some features to collect special indicators, so it is recommended to install.

                                          Currently, it does not support modifying the port in the installation command. After helm install insight-agent , you need to manually modify the related ports of the insight node-exporter daemonset and svc.

                                          "},{"location":"en/end-user/insight/quickstart/other/install-agentindce.html#issue-two","title":"Issue Two","text":"

                                          After Insight Agent is successfully deployed, fluentbit does not collect logs of Suanova 4.0.

                                          "},{"location":"en/end-user/insight/quickstart/other/install-agentindce.html#solution_1","title":"Solution","text":"

                                          The docker storage directory of Suanova 4.0 is /var/lib/containers , which is different from the path in the configuration of insigh-agent, so the logs are not collected.

                                          "},{"location":"en/end-user/insight/quickstart/other/install-agentindce.html#steps_1","title":"Steps","text":"
                                          1. Log in to the console.
                                          2. Modify the following parameters in the insight-agent Chart.

                                            fluent-bit:\ndaemonSetVolumeMounts:\n    - name: varlog\n    mountPath: /var/log\n    - name: varlibdockercontainers\n-     mountPath: /var/lib/docker/containers\n+     mountPath: /var/lib/containers/docker/containers\n    readOnly: true\n    - name: etcmachineid\n    mountPath: /etc/machine-id\n    readOnly: true\n    - name: dmesg\n    mountPath: /var/log/dmesg\n    readOnly: true\n
                                          "},{"location":"en/end-user/insight/quickstart/res-plan/modify-vms-disk.html","title":"vmstorage Disk Expansion","text":"

                                          This article describes the method for expanding the vmstorage disk. Please refer to the vmstorage disk capacity planning for the specifications of the vmstorage disk.

                                          "},{"location":"en/end-user/insight/quickstart/res-plan/modify-vms-disk.html#procedure","title":"Procedure","text":""},{"location":"en/end-user/insight/quickstart/res-plan/modify-vms-disk.html#enable-storageclass-expansion","title":"Enable StorageClass expansion","text":"
                                          1. Log in to the AI platform platform as a global service cluster administrator. Click Container Management -> Clusters and go to the details of the kpanda-global-cluster cluster.

                                          2. Select the left navigation menu Container Storage -> PVCs and find the PVC bound to the vmstorage.

                                          3. Click a vmstorage PVC to enter the details of the volume claim for vmstorage and confirm the StorageClass that the PVC is bound to.

                                          4. Select the left navigation menu Container Storage -> Storage Class and find local-path . Click the \u2507 on the right side of the target and select Edit in the popup menu.

                                          5. Enable Scale Up and click OK .

                                          "},{"location":"en/end-user/insight/quickstart/res-plan/modify-vms-disk.html#modify-the-disk-capacity-of-vmstorage","title":"Modify the disk capacity of vmstorage","text":"
                                          1. Log in to the AI platform platform as a global service cluster administrator and go to the details of the kpanda-global-cluster cluster.

                                          2. Select the left navigation menu CRDs and find the custom resource for vmcluster .

                                          3. Click the custom resource for vmcluster to enter the details page, switch to the insight-system namespace, and select Edit YAML from the right menu of insight-victoria-metrics-k8s-stack .

                                          4. Modify according to the legend and click OK .

                                          5. Select the left navigation menu Container Storage -> PVCs again and find the volume claim bound to vmstorage. Confirm that the modification has taken effect. In the details page of a PVC, click the associated storage source (PV).

                                          6. Open the volume details page and click the Update button in the upper right corner.

                                          7. After modifying the Capacity , click OK and wait for a moment until the expansion is successful.

                                          "},{"location":"en/end-user/insight/quickstart/res-plan/modify-vms-disk.html#clone-the-storage-volume","title":"Clone the storage volume","text":"

                                          If the storage volume expansion fails, you can refer to the following method to clone the storage volume.

                                          1. Log in to the AI platform platform as a global service cluster administrator and go to the details of the kpanda-global-cluster cluster.

                                          2. Select the left navigation menu Workloads -> StatefulSets and find the statefulset for vmstorage . Click the \u2507 on the right side of the target and select Status -> Stop -> OK in the popup menu.

                                          3. After logging into the master node of the kpanda-global-cluster cluster in the command line, run the following command to copy the vm-data directory in the vmstorage container to store the metric information locally:

                                            kubectl cp -n insight-system vmstorage-insight-victoria-metrics-k8s-stack-1:vm-data ./vm-data\n
                                          4. Log in to the AI platform platform and go to the details of the kpanda-global-cluster cluster. Select the left navigation menu Container Storage -> PVs , click Clone in the upper right corner, and modify the capacity of the volume.

                                          5. Delete the previous data volume of vmstorage.

                                          6. Wait for a moment until the volume claim is bound to the cloned data volume, then run the following command to import the exported data from step 3 into the proper container, and then start the previously paused vmstorage .

                                            kubectl cp -n insight-system ./vm-data vmstorage-insight-victoria-metrics-k8s-stack-1:vm-data\n
                                          "},{"location":"en/end-user/insight/quickstart/res-plan/prometheus-res.html","title":"Prometheus Resource Planning","text":"

                                          In the actual use of Prometheus, affected by the number of cluster containers and the opening of Istio, the CPU, memory and other resource usage of Prometheus will exceed the set resources.

                                          In order to ensure the normal operation of Prometheus in clusters of different sizes, it is necessary to adjust the resources of Prometheus according to the actual size of the cluster.

                                          "},{"location":"en/end-user/insight/quickstart/res-plan/prometheus-res.html#reference-resource-planning","title":"Reference resource planning","text":"

                                          In the case that the mesh is not enabled, the test statistics show that the relationship between the system Job index and pods is Series count = 800 * pod count

                                          When the service mesh is enabled, the magnitude of the Istio-related metrics generated by the pod after the feature is enabled is Series count = 768 * pod count

                                          "},{"location":"en/end-user/insight/quickstart/res-plan/prometheus-res.html#when-the-service-mesh-is-not-enabled","title":"When the service mesh is not enabled","text":"

                                          The following resource planning is recommended by Prometheus when the service mesh is not enabled :

                                          Cluster size (pod count) Metrics (service mesh is not enabled) CPU (core) Memory (GB) 100 8w Request: 0.5Limit: 1 Request: 2GBLimit: 4GB 200 16w Request: 1Limit: 1.5 Request: 3GBLimit: 6GB 300 24w Request: 1Limit: 2 Request: 3GBLimit: 6GB 400 32w Request: 1Limit: 2 Request: 4GBLimit: 8GB 500 40w Request: 1.5Limit: 3 Request: 5GBLimit: 10GB 800 64w Request: 2Limit: 4 Request: 8GBLimit: 16GB 1000 80w Request: 2.5Limit: 5 Request: 9GBLimit: 18GB 2000 160w Request: 3.5Limit: 7 Request: 20GBLimit: 40GB 3000 240w Request: 4Limit: 8 Request: 33GBLimit: 66GB"},{"location":"en/end-user/insight/quickstart/res-plan/prometheus-res.html#when-the-service-mesh-feature-is-enabled","title":"When the service mesh feature is enabled","text":"

                                          The following resource planning is recommended by Prometheus in the scenario of starting the service mesh:

                                          Cluster size (pod count) metric volume (service mesh enabled) CPU (core) Memory (GB) 100 15w Request: 1Limit: 2 Request: 3GBLimit: 6GB 200 31w Request: 2Limit: 3 Request: 5GBLimit: 10GB 300 46w Request: 2Limit: 4 Request: 6GBLimit: 12GB 400 62w Request: 2Limit: 4 Request: 8GBLimit: 16GB 500 78w Request: 3Limit: 6 Request: 10GBLimit: 20GB 800 125w Request: 4Limit: 8 Request: 15GBLimit: 30GB 1000 156w Request: 5Limit: 10 Request: 18GBLimit: 36GB 2000 312w Request: 7Limit: 14 Request: 40GBLimit: 80GB 3000 468w Request: 8Limit: 16 Request: 65GBLimit: 130GB

                                          Note

                                          1. Pod count in the table refers to the pod count that is basically running stably in the cluster. If a large number of pods are restarted, the index will increase sharply in a short period of time. At this time, resources need to be adjusted accordingly.
                                          2. Prometheus stores two hours of data by default in memory, and when the Remote Write function is enabled in the cluster, a certain amount of memory will be occupied, and resources surge ratio is recommended to be set to 2.
                                          3. The data in the table are recommended values, applicable to general situations. If the environment has precise resource requirements, it is recommended to check the resource usage of the proper Prometheus after the cluster has been running for a period of time for precise configuration.
                                          "},{"location":"en/end-user/insight/quickstart/res-plan/vms-res-plan.html","title":"vmstorage disk capacity planning","text":"

                                          vmstorage is responsible for storing multicluster metrics for observability. In order to ensure the stability of vmstorage, it is necessary to adjust the disk capacity of vmstorage according to the number of clusters and the size of the cluster. For more information, please refer to vmstorage retention period and disk space.

                                          "},{"location":"en/end-user/insight/quickstart/res-plan/vms-res-plan.html#test-results","title":"Test Results","text":"

                                          After 14 days of disk observation of vmstorage of clusters of different sizes, We found that the disk usage of vmstorage was positively correlated with the amount of metrics it stored and the disk usage of individual data points.

                                          1. The amount of metrics stored instantaneously increase(vm_rows{ type != \"indexdb\"}[30s]) to obtain the increased amount of metrics within 30s
                                          2. Disk usage of a single data point: sum(vm_data_size_bytes{type!=\"indexdb\"}) / sum(vm_rows{type != \"indexdb\"})
                                          "},{"location":"en/end-user/insight/quickstart/res-plan/vms-res-plan.html#calculation-method","title":"calculation method","text":"

                                          Disk usage = Instantaneous metrics x 2 x disk usage for a single data point x 60 x 24 x storage time (days)

                                          Parameter Description:

                                          1. The unit of disk usage is Byte .
                                          2. Storage duration (days) x 60 x 24 converts time (days) into minutes to calculate disk usage.
                                          3. The default collection time of Prometheus in Insight Agent is 30s, so twice the amount of metrics will be generated within 1 minute.
                                          4. The default storage duration in vmstorage is 1 month, please refer to Modify System Configuration to modify the configuration.

                                          Warning

                                          This formula is a general solution, and it is recommended to reserve redundant disk capacity on the calculation result to ensure the normal operation of vmstorage.

                                          "},{"location":"en/end-user/insight/quickstart/res-plan/vms-res-plan.html#reference-capacity","title":"reference capacity","text":"

                                          The data in the table is calculated based on the default storage time of one month (30 days), and the disk usage of a single data point (datapoint) is calculated as 0.9. In a multicluster scenario, the number of Pods represents the sum of the number of Pods in the multicluster.

                                          "},{"location":"en/end-user/insight/quickstart/res-plan/vms-res-plan.html#when-the-service-mesh-is-not-enabled","title":"When the service mesh is not enabled","text":"Cluster size (number of Pods) Metrics Disk capacity 100 8W 6 GiB 200 16W 12 GiB 300 24w 18 GiB 400 32w 24 GiB 500 40w 30 GiB 800 64w 48 GiB 1000 80W 60 GiB 2000 160w 120 GiB 3000 240w 180 GiB"},{"location":"en/end-user/insight/quickstart/res-plan/vms-res-plan.html#when-the-service-mesh-is-enabled","title":"When the service mesh is enabled","text":"Cluster size (number of Pods) Metrics Disk capacity 100 15W 12 GiB 200 31w 24 GiB 300 46w 36 GiB 400 62w 48 GiB 500 78w 60 GiB 800 125w 94 GiB 1000 156w 120 GiB 2000 312w 235 GiB 3000 468w 350 GiB"},{"location":"en/end-user/insight/quickstart/res-plan/vms-res-plan.html#example","title":"Example","text":"

                                          There are two clusters in the AI platform platform, of which 500 Pods are running in the global management cluster (service mesh is turned on), and 1000 Pods are running in the worker cluster (service mesh is not turned on), and the expected metrics are stored for 30 days.

                                          • The number of metrics in the global management cluster is 800x500 + 768x500 = 784000
                                          • Worker cluster metrics are 800x1000 = 800000

                                          Then the current vmstorage disk usage should be set to (784000+80000)x2x0.9x60x24x31 =124384896000 byte = 116 GiB

                                          Note

                                          For the relationship between the number of metrics and the number of Pods in the cluster, please refer to Prometheus Resource Planning.

                                          "},{"location":"en/end-user/insight/system-config/modify-config.html","title":"Modify system configuration","text":"

                                          Observability will persist the data of metrics, logs, and traces by default. Users can modify the system configuration according to This page.

                                          "},{"location":"en/end-user/insight/system-config/modify-config.html#how-to-modify-the-metric-data-retention-period","title":"How to modify the metric data retention period","text":"

                                          Refer to the following steps to modify the metric data retention period.

                                          1. run the following command:

                                            kubectl edit vmcluster insight-victoria-metrics-k8s-stack -n insight-system\n
                                          2. In the Yaml file, the default value of retentionPeriod is 14 , and the unit is day . You can modify the parameters according to your needs.

                                            apiVersion: operator.victoriametrics.com/v1beta1\nkind: VMCluster\nmetadata:\n  annotations:\n    meta.helm.sh/release-name: insight\n    meta.helm.sh/release-namespace: insight-system\n  creationTimestamp: \"2022-08-25T04:31:02Z\"\n  finalizers:\n  - apps.victoriametrics.com/finalizer\n  generation: 2\n  labels:\n    app.kubernetes.io/instance: insight\n    app.kubernetes.io/managed-by: Helm\n    app.kubernetes.io/name: victoria-metrics-k8s-stack\n    app.kubernetes.io/version: 1.77.2\n    helm.sh/chart: victoria-metrics-k8s-stack-0.9.3\n  name: insight-victoria-metrics-k8s-stack\n  namespace: insight-system\n  resourceVersion: \"123007381\"\n  uid: 55cee8d6-c651-404b-b2c9-50603b405b54\nspec:\n  replicationFactor: 1\n  retentionPeriod: \"14\"\n  vminsert:\n    extraArgs:\n      maxLabelsPerTimeseries: \"45\"\n    image:\n      repository: docker.m.daocloud.io/victoriametrics/vminsert\n      tag: v1.80.0-cluster\n      replicaCount: 1\n
                                          3. After saving the modification, the pod of the component responsible for storing the metrics will automatically restart, just wait for a while.

                                          "},{"location":"en/end-user/insight/system-config/modify-config.html#how-to-modify-the-log-data-storage-duration","title":"How to modify the log data storage duration","text":"

                                          Refer to the following steps to modify the log data retention period:

                                          "},{"location":"en/end-user/insight/system-config/modify-config.html#method-1-modify-the-json-file","title":"Method 1: Modify the Json file","text":"
                                          1. Modify the max_age parameter in the rollover field in the following files, and set the retention period. The default storage period is 7d . Change http://localhost:9200 to the address of elastic .

                                            curl -X PUT \"http://localhost:9200/_ilm/policy/insight-es-k8s-logs-policy?pretty\" -H 'Content-Type: application/json' -d'\n{\n\"policy\": {\n    \"phases\": {\n        \"hot\": {\n            \"min_age\": \"0ms\",\n            \"actions\": {\n            \"set_priority\": {\n                \"priority\": 100\n            },\n            \"rollover\": {\n                \"max_age\": \"7d\",\n                \"max_size\": \"10gb\"\n            }\n            }\n        },\n    \"warm\": {\n        \"min_age\": \"10d\",\n        \"actions\": {\n        \"forcemerge\": {\n            \"max_num_segments\": 1\n        }\n        }\n    },\n    \"delete\": {\n        \"min_age\": \"30d\",\n        \"actions\": {\n        \"delete\": {}\n        }\n    }\n    }\n}\n}\n
                                          2. After modification, run the above command. It will print out the content as shown below, then the modification is successful.

                                            {\n\"acknowledged\": true\n}\n
                                          "},{"location":"en/end-user/insight/system-config/modify-config.html#method-2-modify-from-the-ui","title":"Method 2: Modify from the UI","text":"
                                          1. Log in kibana , select Stack Management in the left navigation bar.

                                          2. Select the left navigation Index Lifecycle Polices , and find the index insight-es-k8s-logs-policy , click to enter the details.

                                          3. Expand the Hot phase configuration panel, modify the Maximum age parameter, and set the retention period. The default storage period is 7d .

                                          4. After modification, click Save policy at the bottom of the page to complete the modification.

                                          "},{"location":"en/end-user/insight/system-config/modify-config.html#how-to-modify-the-trace-data-storage-duration","title":"How to modify the trace data storage duration","text":"

                                          Refer to the following steps to modify the trace data retention period:

                                          "},{"location":"en/end-user/insight/system-config/modify-config.html#method-1-modify-the-json-file_1","title":"Method 1: Modify the Json file","text":"
                                          1. Modify the max_age parameter in the rollover field in the following files, and set the retention period. The default storage period is 7d . At the same time, modify http://localhost:9200 to the access address of elastic .

                                            curl -X PUT \"http://localhost:9200/_ilm/policy/jaeger-ilm-policy?pretty\" -H 'Content-Type: application/json' -d'\n{\n\"policy\": {\n    \"phases\": {\n        \"hot\": {\n            \"min_age\": \"0ms\",\n            \"actions\": {\n            \"set_priority\": {\n                \"priority\": 100\n            },\n            \"rollover\": {\n                \"max_age\": \"7d\",\n                \"max_size\": \"10gb\"\n            }\n            }\n        },\n    \"warm\": {\n        \"min_age\": \"10d\",\n        \"actions\": {\n        \"forcemerge\": {\n            \"max_num_segments\": 1\n        }\n        }\n    },\n    \"delete\": {\n        \"min_age\": \"30d\",\n        \"actions\": {\n        \"delete\": {}\n        }\n    }\n    }\n}\n}\n
                                          2. After modification, run the above command on the console. It will print out the content as shown below, then the modification is successful.

                                            {\n\"acknowledged\": true\n}\n
                                          "},{"location":"en/end-user/insight/system-config/modify-config.html#method-2-modify-from-the-ui_1","title":"Method 2: Modify from the UI","text":"
                                          1. Log in kibana , select Stack Management in the left navigation bar.

                                          2. Select the left navigation Index Lifecycle Polices , and find the index jaeger-ilm-policy , click to enter the details.

                                          3. Expand the Hot phase configuration panel, modify the Maximum age parameter, and set the retention period. The default storage period is 7d .

                                          4. After modification, click Save policy at the bottom of the page to complete the modification.

                                          "},{"location":"en/end-user/insight/system-config/system-component.html","title":"System Components","text":"

                                          On the system component page, you can quickly view the running status of the system components in Insight. When a system component fails, some features in Insight will be unavailable.

                                          1. Go to Insight product module,
                                          2. In the left navigation bar, select System Management -> System Components .
                                          "},{"location":"en/end-user/insight/system-config/system-component.html#component-description","title":"Component description","text":"Module Component Name Description Metrics vminsert-insight-victoria-metrics-k8s-stack Responsible for writing the metric data collected by Prometheus in each cluster to the storage component. If this component is abnormal, the metric data of the worker cluster cannot be written. Metrics vmalert-insight-victoria-metrics-k8s-stack Responsible for taking effect of the recording and alert rules configured in the VM Rule, and sending the triggered alert rules to alertmanager. Metrics vmalertmanager-insight-victoria-metrics-k8s-stack is responsible for sending messages when alerts are triggered. If this component is abnormal, the alert information cannot be sent. Metrics vmselect-insight-victoria-metrics-k8s-stack Responsible for querying metrics data. If this component is abnormal, the metric cannot be queried. Metrics vmstorage-insight-victoria-metrics-k8s-stack Responsible for storing multicluster metrics data. Dashboard grafana-deployment Provide monitoring panel capability. The exception of this component will make it impossible to view the built-in dashboard. Link insight-jaeger-collector Responsible for receiving trace data in opentelemetry-collector and storing it. Link insight-jaeger-query Responsible for querying the trace data collected in each cluster. Link insight-opentelemetry-collector Responsible for receiving trace data forwarded by each sub-cluster Log elasticsearch Responsible for storing the log data of each cluster."},{"location":"en/end-user/insight/system-config/system-config.html","title":"System Configuration","text":"

                                          System Configuration displays the default storage time of metrics, logs, traces and the default Apdex threshold.

                                          1. Click the right navigation bar and select System Configuration .

                                          2. Currently only supports modifying the storage duration of historical alerts, click Edit to enter the target duration.

                                            When the storage duration is set to \"0\", the historical alerts will not be cleared.

                                          Note

                                          To modify other configurations, please click to view How to modify the system configuration?

                                          "},{"location":"en/end-user/insight/trace/service.html","title":"Service Insight","text":"

                                          In Insight , a service refers to a group of workloads that provide the same behavior for incoming requests. Service insight helps observe the performance and status of applications during the operation process by using the OpenTelemetry SDK.

                                          For how to use OpenTelemetry, please refer to: Using OTel to give your application insight.

                                          "},{"location":"en/end-user/insight/trace/service.html#glossary","title":"Glossary","text":"
                                          • Service: A service represents a group of workloads that provide the same behavior for incoming requests. You can define the service name when using the OpenTelemetry SDK or use the name defined in Istio.
                                          • Operation: An operation refers to a specific request or action handled by a service. Each span has an operation name.
                                          • Outbound Traffic: Outbound traffic refers to all the traffic generated by the current service when making requests.
                                          • Inbound Traffic: Inbound traffic refers to all the traffic initiated by the upstream service targeting the current service.
                                          "},{"location":"en/end-user/insight/trace/service.html#steps","title":"Steps","text":"

                                          The Services List page displays key metrics such as throughput rate, error rate, and request latency for all services that have been instrumented with distributed tracing. You can filter services based on clusters or namespaces and sort the list by throughput rate, error rate, or request latency. By default, the data displayed in the list is for the last hour, but you can customize the time range.

                                          Follow these steps to view service insight metrics:

                                          1. Go to the Insight product module.

                                          2. Select Trace Tracking -> Services from the left navigation bar.

                                            Attention

                                            1. If the namespace of a service in the list is unknown , it means that the service has not been properly instrumented. We recommend reconfiguring the instrumentation.
                                            2. If multiple services have the same name and none of them have the correct Namespace environment variable configured, the metrics displayed in the list and service details page will be aggregated for all those services.
                                          3. Click a service name (taking insight-system as an example) to view the detailed metrics and operation metrics for that service.

                                            1. In the Service Topology section, you can view the service topology one layer above or below the current service. When you hover over a node, you can see its information.
                                            2. In the Traffic Metrics section, you can view the monitoring metrics for all requests to the service within the past hour (including inbound and outbound traffic).
                                            3. You can use the time selector in the upper right corner to quickly select a time range or specify a custom time range.
                                            4. Sorting is available for throughput, error rate, and request latency in the operation metrics.
                                            5. Clicking on the icon next to an individual operation will take you to the Traces page to quickly search for related traces.

                                          "},{"location":"en/end-user/insight/trace/service.html#service-metric-explanations","title":"Service Metric Explanations","text":"Metric Description Throughput Rate The number of requests processed within a unit of time. Error Rate The ratio of erroneous requests to the total number of requests within the specified time range. P50 Request Latency The response time within which 50% of requests complete. P95 Request Latency The response time within which 95% of requests complete. P99 Request Latency The response time within which 99% of requests complete."},{"location":"en/end-user/insight/trace/topology-helper.html","title":"Service Topology Element Explanations","text":"

                                          The service topology provided by Observability allows you to quickly identify the request relationships between services and determine the health status of services based on different colors. The health status is determined based on the request latency and error rate of the service's overall traffic. This article explains the elements in the service topology.

                                          "},{"location":"en/end-user/insight/trace/topology-helper.html#node-status-explanation","title":"Node Status Explanation","text":"

                                          The node health status is determined based on the error rate and request latency of the service's overall traffic, following these rules:

                                          Color Status Rules Gray Healthy Error rate equals 0% and request latency is less than 100ms Orange Warning Error rate (0, 5%) or request latency (100ms, 200ms) Red Abnormal Error rate (5%, 100%) or request latency (200ms, +Infinity)"},{"location":"en/end-user/insight/trace/topology-helper.html#connection-status-explanation","title":"Connection Status Explanation","text":"Color Status Rules Green Healthy Error rate equals 0% and request latency is less than 100ms Orange Warning Error rate (0, 5%) or request latency (100ms, 200ms) Red Abnormal Error rate (5%, 100%) or request latency (200ms, +Infinity)"},{"location":"en/end-user/insight/trace/topology.html","title":"Service Map","text":"

                                          Service map is a visual representation of the connections, communication, and dependencies between services. It provides insights into the service-to-service interactions, allowing you to view the calls and performance of services within a specified time range. The connections between nodes in the topology map represent the existence of service-to-service calls during the queried time period.

                                          "},{"location":"en/end-user/insight/trace/topology.html#prerequisites","title":"Prerequisites","text":"
                                          1. Insight Agent is installed in the cluster and the applications are in the Running state.
                                          2. Services have been instrumented for distributed tracing using Operator or OpenTelemetry SDK.
                                          "},{"location":"en/end-user/insight/trace/topology.html#steps","title":"Steps","text":"
                                          1. Go to the Insight product module.

                                          2. Select Tracing -> Service Map from the left navigation bar.

                                          3. In the Service Map, you can perform the following actions:

                                            • Click a node to slide out the details of the service on the right side. Here, you can view metrics such as request latency, throughput, and error rate for the service. Clicking on the service name takes you to the service details page.
                                            • Hover over the connections to view the traffic metrics between the two services.
                                            • Click Display Settings , you can configure the display elements in the service map.

                                          "},{"location":"en/end-user/insight/trace/topology.html#other-nodes","title":"Other Nodes","text":"

                                          In the Service Map, there can be nodes that are not part of the cluster. These external nodes can be categorized into three types:

                                          • Database
                                          • Message Queue
                                          • Virtual Node

                                          • If a service makes a request to a Database or Message Queue, these two types of nodes will be displayed by default in the topology map. However, Virtual Nodes represent nodes outside the cluster or services not integrated into the trace, and they will not be displayed by default in the map.

                                          • When a service makes a request to MySQL, PostgreSQL, or Oracle Database, the detailed database type can be seen in the map.

                                          "},{"location":"en/end-user/insight/trace/topology.html#enabling-virtual-nodes","title":"Enabling Virtual Nodes","text":"
                                          1. Update the insight-server chart values, locate the parameter shown in the image below, and change false to true.
                                          1. In the display settings of the service map, check the Virtual Services option to enable it.
                                          "},{"location":"en/end-user/insight/trace/trace.html","title":"Trace Query","text":"

                                          On the trace query page, you can query detailed information about a call trace by TraceID or filter call traces based on various conditions.

                                          "},{"location":"en/end-user/insight/trace/trace.html#glossary","title":"Glossary","text":"
                                          • TraceID: Used to identify a complete request call trace.
                                          • Operation: Describes the specific operation or event represented by a Span.
                                          • Entry Span: The entry Span represents the first request of the entire call.
                                          • Latency: The duration from receiving the request to completing the response for the entire call trace.
                                          • Span: The number of Spans included in the entire trace.
                                          • Start Time: The time when the current trace starts.
                                          • Tag: A collection of key-value pairs that constitute Span tags. Tags are used to annotate and supplement Spans, and each Span can have multiple key-value tag pairs.
                                          "},{"location":"en/end-user/insight/trace/trace.html#steps","title":"Steps","text":"

                                          Please follow these steps to search for a trace:

                                          1. Go to the Insight product module.
                                          2. Select Tracing -> Traces from the left navigation bar.

                                            Note

                                            Sorting by Span, Latency, and Start At is supported in the list.

                                          3. Click the TraceID Query in the filter bar to switch to TraceID search.

                                          4. To search using TraceID, please enter the complete TraceID.

                                          "},{"location":"en/end-user/insight/trace/trace.html#other-operations","title":"Other Operations","text":""},{"location":"en/end-user/insight/trace/trace.html#view-trace-details","title":"View Trace Details","text":"
                                          1. Click the TraceID of a trace in the trace list to view its detailed call information.

                                          "},{"location":"en/end-user/insight/trace/trace.html#associated-logs","title":"Associated Logs","text":"
                                          1. Click the icon on the right side of the trace data to search for associated logs.

                                            • By default, it queries the log data within the duration of the trace and one minute after its completion.
                                            • The queried logs include those with the trace's TraceID in their log text and container logs related to the trace invocation process.
                                          2. Click View More to jump to the Associated Log page with conditions.

                                          3. By default, all logs are searched, but you can filter by the TraceID or the relevant container logs from the trace call process using the dropdown.

                                            Note

                                            Since trace may span across clusters or namespaces, if the user does not have sufficient permissions, they will be unable to query the associated logs for that trace.

                                          "},{"location":"en/end-user/k8s/add-node.html","title":"Adding Worker Nodes","text":"

                                          If there are not enough nodes, you can add more nodes to the cluster.

                                          "},{"location":"en/end-user/k8s/add-node.html#prerequisites","title":"Prerequisites","text":"
                                          • The AI platform is installed
                                          • An administrator account is available
                                          • A cluster with GPU nodes has been created
                                          • A cloud host has been prepared
                                          "},{"location":"en/end-user/k8s/add-node.html#steps-to-add-nodes","title":"Steps to Add Nodes","text":"
                                          1. Log into the AI platform as an administrator.
                                          2. Navigate to Container Management -> Clusters, and click the name of the target cluster.

                                          3. On the cluster overview page, click Nodes, then click the Add Node button on the right.

                                          4. Follow the wizard to fill in the parameters and click OK.

                                          5. In the pop-up window, click OK.

                                          6. Return to the node list; the status of the newly added node will be Connecting. After a few minutes, when the status changes to Running, it indicates that the connection was successful.

                                          Tip

                                          For newly connected nodes, it may take an additional 2-3 minutes to recognize the GPU.

                                          "},{"location":"en/end-user/k8s/create-k8s.html","title":"Creating a Kubernetes Cluster in the Cloud","text":"

                                          Deploying a Kubernetes cluster is aimed at supporting efficient AI computing resource scheduling and management, achieving elastic scaling, providing high availability, and optimizing the model training and inference processes.

                                          "},{"location":"en/end-user/k8s/create-k8s.html#prerequisites","title":"Prerequisites","text":"
                                          • The AI platform is installed
                                          • An administrator account is available
                                          • A physical machine with a GPU is prepared
                                          • Two segments of IP addresses are allocated (Pod CIDR 18 bits, SVC CIDR 18 bits, must not conflict with existing networks)
                                          "},{"location":"en/end-user/k8s/create-k8s.html#steps-to-create","title":"Steps to Create","text":"
                                          1. Log into the AI platform as an administrator.
                                          2. Create and launch 3 cloud hosts without GPU to serve as Master nodes for the cluster.

                                            • Configure resources: 16 CPU cores, 32 GB RAM, 200 GB system disk (ReadWriteOnce)
                                            • Select Bridge network mode
                                            • Set the root password or add an SSH public key for SSH connection
                                            • Record the IPs of the 3 hosts
                                          3. Navigate to Container Management -> Clusters, and click the Create Cluster button on the right.

                                          4. Follow the wizard to configure various parameters of the cluster.

                                          5. Wait for the cluster creation to complete.

                                          6. In the cluster list, find the newly created cluster, click the cluster name, navigate to Helm Apps -> Helm Charts, and search for metax-gpu-extensions in the search box, then click the card.

                                          7. Click the Install button on the right to start installing the GPU plugin.

                                          8. Automatically return to the Helm App list and wait for the status of metax-gpu-extensions to change to Deployed.

                                          9. At this point, the cluster has been successfully created. You can check the nodes included in the cluster. You can now create AI workloads and use GPUs.

                                          Next step: Create AI Workloads

                                          "},{"location":"en/end-user/k8s/remove-node.html","title":"Removing GPU Worker Nodes","text":"

                                          The cost of GPU resources is relatively high. If GPUs are not needed temporarily, you can remove the worker nodes with GPUs. The following steps also apply to removing regular worker nodes.

                                          "},{"location":"en/end-user/k8s/remove-node.html#prerequisites","title":"Prerequisites","text":"
                                          • The AI platform is installed
                                          • An administrator account is available
                                          • A cluster with GPU nodes has been created
                                          "},{"location":"en/end-user/k8s/remove-node.html#steps-to-remove","title":"Steps to Remove","text":"
                                          1. Log into the AI platform as an administrator.
                                          2. Navigate to Container Management -> Clusters, and click the name of the target cluster.

                                          3. Enter the cluster overview page, click Nodes, find the node to be removed, click the \u2507 on the right side of the list, and select Remove Node from the pop-up menu.

                                          4. In the pop-up window, enter the node name, and after confirming it is correct, click Delete.

                                          5. You will automatically return to the node list, where the status will be Removing. After a few minutes, refresh the page, and if the node is no longer present, it indicates that the node has been successfully removed.

                                          6. After removing the node from the UI list, SSH into the removed node's host and execute the shutdown command.

                                          Tip

                                          After removing the node in the UI and shutting it down, the data on the node is not immediately deleted; the node's data will be retained for a period of time.

                                          "},{"location":"en/end-user/kpanda/backup/index.html","title":"Backup and Restore","text":"

                                          Backup and restore are essential aspects of system management. In practice, it is important to first back up the data of the system at a specific point in time and securely store the backup. In case of incidents such as data corruption, loss, or accidental deletion, the system can be quickly restored based on the previous backup data, reducing downtime and minimizing losses.

                                          • In real production environments, services may be deployed across different clouds, regions, or availability zones. If one infrastructure faces a failure, organizations need to quickly restore applications in other available environments. In such cases, cross-cloud or cross-cluster backup and restore become crucial.
                                          • Large-scale systems often involve multiple roles and users with complex permission management systems. With many operators involved, accidents caused by human error can lead to system failures. In such scenarios, the ability to roll back the system quickly using previously backed-up data is necessary. Relying solely on manual troubleshooting, fault repair, and system recovery can be time-consuming, resulting in prolonged system unavailability and increased losses for organizations.
                                          • Additionally, factors like network attacks, natural disasters, and equipment malfunctions can also cause data accidents.

                                          Therefore, backup and restore are vital as the last line of defense for maintaining system stability and ensuring data security.

                                          Backups are typically classified into three types: full backups, incremental backups, and differential backups. Currently, AI platform supports full backups and incremental backups.

                                          The backup and restore provided by AI platform can be divided into two categories: Application Backup and ETCD Backup. It supports both manual backups and scheduled automatic backups using CronJobs.

                                          • Application Backup

                                            Application backup refers to backing up data of a specific workload in the cluster and then restoring that data either within the same cluster or in another cluster. It supports backing up all resources under a namespace or filtering resources by specific labels.

                                            Application backup also supports cross-cluster backup of stateful applications. For detailed steps, refer to the Backup and Restore MySQL Applications and Data Across Clusters guide.

                                          • etcd Backup

                                            etcd is the data storage component of Kubernetes. Kubernetes stores its own component's data and application data in etcd. Therefore, backing up etcd is equivalent to backing up the entire cluster's data, allowing quick restoration of the cluster to a previous state in case of failures.

                                            It's worth noting that currently, restoring etcd backup data is only supported within the same cluster (the original cluster). To learn more about related best practices, refer to the ETCD Backup and Restore guide.

                                          "},{"location":"en/end-user/kpanda/backup/deployment.html","title":"Application Backup","text":"

                                          This article explains how to backup applications in AI platform. The demo application used in this tutorial is called dao-2048 , which is a deployment.

                                          "},{"location":"en/end-user/kpanda/backup/deployment.html#prerequisites","title":"Prerequisites","text":"

                                          Before backing up a deployment, the following prerequisites must be met:

                                          • Integrate a Kubernetes cluster or create a Kubernetes cluster in the Container Management module, and be able to access the UI interface of the cluster.

                                          • Create a Namespace and a user.

                                          • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                          • Install the velero component, and ensure the velero component is running properly.

                                          • Create a deployment (the workload in this tutorial is named dao-2048 ), and label the deployment with app: dao-2048 .

                                          "},{"location":"en/end-user/kpanda/backup/deployment.html#backup-workload","title":"Backup workload","text":"

                                          Follow the steps below to backup the deployment dao-2048 .

                                          1. Enter the Container Management module, click Backup Recovery -> Application Backup on the left navigation bar, and enter the Application Backup list page.

                                          2. On the Application Backup list page, select the cluster where the velero and dao-2048 applications have been installed. Click Backup Plan in the upper right corner to create a new backup cluster.

                                          3. Refer to the instructions below to fill in the backup configuration.

                                            • Name: The name of the new backup plan.
                                            • Source Cluster: The cluster where the application backup plan is to be executed.
                                            • Object Storage Location: The access path of the object storage configured when installing velero on the source cluster.
                                            • Namespace: The namespaces that need to be backed up, multiple selections are supported.
                                            • Advanced Configuration: Back up specific resources in the namespace based on resource labels, such as an application, or do not back up specific resources in the namespace based on resource labels during backup.

                                          4. Refer to the instructions below to set the backup execution frequency, and then click Next .

                                            • Backup Frequency: Set the time period for task execution based on minutes, hours, days, weeks, and months. Support custom Cron expressions with numbers and * , after inputting the expression, the meaning of the current expression will be prompted. For detailed expression syntax rules, refer to Cron Schedule Syntax.
                                            • Retention Time (days): Set the storage time of backup resources, the default is 30 days, and will be deleted after expiration.
                                            • Backup Data Volume (PV): Whether to back up the data in the data volume (PV), support direct copy and use CSI snapshot.

                                              • Direct Replication: directly copy the data in the data volume (PV) for backup;
                                              • Use CSI snapshots: Use CSI snapshots to back up data volumes (PVs). Requires a CSI snapshot type available for backup in the cluster.

                                          5. Click OK , the page will automatically return to the application backup plan list, find the newly created dao-2048 backup plan, and perform the Immediate Execution operation.

                                          6. At this point, the Last Execution State of the cluster will change to in progress . After the backup is complete, you can click the name of the backup plan to view the details of the backup plan.

                                          "},{"location":"en/end-user/kpanda/backup/etcd-backup.html","title":"etcd backup","text":"

                                          etcd backup is based on cluster data as the core backup. In cases such as hardware device damage, development and test configuration errors, etc., the backup cluster data can be restored through etcd backup.

                                          This section will introduce how to realize the etcd backup for clusters. Also see etcd Backup and Restore Best Practices.

                                          "},{"location":"en/end-user/kpanda/backup/etcd-backup.html#prerequisites","title":"Prerequisites","text":"
                                          • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                          • Created a namespace, user, and granted NS Admin or higher permissions to the user. For details, refer to Namespace Authorization.

                                          • Prepared a MinIO instance. It is recommended to create it through AI platform's MinIO middleware. For specific steps, refer to MinIO Object Storage.

                                          "},{"location":"en/end-user/kpanda/backup/etcd-backup.html#create-etcd-backup","title":"Create etcd backup","text":"

                                          Follow the steps below to create an etcd backup.

                                          1. Enter Container Management -> Backup Recovery -> etcd Backup page, you can see all the current backup policies. Click Create Backup Policy on the right.

                                          2. Fill in the Basic Information. Then, click Next to automatically verify the connectivity of etcd. If the verification passes, proceed to the next step.

                                            • First select the backup cluster and log in to the terminal
                                            • Enter etcd, and the format is https://${NodeIP}:${Port}.

                                              • In a standard Kubernetes cluster, the default port for etcd is 2379.
                                              • In a Suanova 4.0 cluster, the default port for etcd is 12379.
                                              • In a public cloud managed cluster, you need to contact the relevant developers to obtain the etcd port number. This is because the control plane components of public cloud clusters are maintained and managed by the cloud service provider. Users cannot directly access or view these components, nor can they obtain control plane port information through regular commands (such as kubectl).
                                              Ways to obtain port number
                                              1. Find the etcd Pod in the kube-system namespace

                                                kubectl get po -n kube-system | grep etcd\n
                                              2. Get the port number from the listen-client-urls of the etcd Pod

                                                kubectl get po -n kube-system ${etcd_pod_name} -oyaml | grep listen-client-urls # (1)!\n
                                                1. Replace etcd_pod_name with the actual Pod name

                                                The expected output is as follows, where the number after the node IP is the port number:

                                                - --listen-client-urls=https://127.0.0.1:2379,https://10.6.229.191:2379\n
                                            • Fill in the CA certificate, you can use the following command to view the certificate content. Then, copy and paste it to the proper location:

                                              Standard Kubernetes ClusterSuanova 4.0 Cluster
                                              cat /etc/kubernetes/ssl/etcd/ca.crt\n
                                              cat /etc/daocloud/dce/certs/ca.crt\n
                                            • Fill in the Cert certificate, you can use the following command to view the content of the certificate. Then, copy and paste it to the proper location:

                                              Standard Kubernetes ClusterSuanova 4.0 Cluster
                                              cat /etc/kubernetes/ssl/apiserver-etcd-client.crt\n
                                              cat /etc/daocloud/dce/certs/etcd/server.crt\n
                                            • Fill in the Key, you can use the following command to view the content of the certificate and copy and paste it to the proper location:

                                              Standard Kubernetes ClusterSuanova 4.0 Cluster
                                              cat /etc/kubernetes/ssl/apiserver-etcd-client.key\n
                                              cat /etc/daocloud/dce/certs/etcd/server.key\n

                                            Note

                                            Click How to get below the input box to see how to obtain the proper information on the UI page.

                                          3. Refer to the following information to fill in the Backup Policy.

                                            • Backup Method: Choose either manual backup or scheduled backup

                                              • Manual Backup: Immediately perform a full backup of etcd data based on the backup configuration.
                                              • Scheduled Backup: Periodically perform full backups of etcd data according to the set backup frequency.
                                            • Backup Chain Length: the maximum number of backup data to retain. The default is 30.

                                            • Backup Frequency: it can be per hour, per day, per week or per month, and can also be customized.
                                          4. Refer to the following information to fill in the Storage Path.

                                            • Storage Provider: Default is S3 storage
                                            • Object Storage Access Address: The access address of MinIO
                                            • Bucket: Create a Bucket in MinIO and fill in the Bucket name
                                            • Username: The login username for MinIO
                                            • Password: The login password for MinIO
                                          5. After clicking OK , the page will automatically redirect to the backup policy list, where you can view all the currently created ones.

                                            • Click the \u2507 action button on the right side of the policy to view logs, view YAML, update the policy, stop the policy, or execute the policy immediately.
                                            • When the backup method is manual, you can click Execute Now to perform the backup.
                                            • When the backup method is scheduled, the backup will be performed according to the configured time.
                                          "},{"location":"en/end-user/kpanda/backup/etcd-backup.html#view-backup-policy-logs","title":"View Backup Policy Logs","text":"

                                          Click Logs to view the log content. By default, 100 lines are displayed. If you want to see more log information or download the logs, you can follow the prompts above the logs to go to the observability module.

                                          "},{"location":"en/end-user/kpanda/backup/etcd-backup.html#view-backup-policy-details","title":"View Backup POlicy Details","text":"

                                          Go to Container Management -> Backup Recovery -> etcd Backup , click the Backup Policy tab, and then click the policy to view the details.

                                          "},{"location":"en/end-user/kpanda/backup/etcd-backup.html#view-recovery-point","title":"View Recovery Point","text":"
                                          1. Go to Container Management -> Backup Recovery -> etcd Backup, and click the Recovery Point tab.
                                          2. After selecting the target cluster, you can view all the backup information under that cluster.

                                            Each time a backup is executed, a proper recovery point is generated, which can be used to quickly restore the application from a successful recovery point.

                                          "},{"location":"en/end-user/kpanda/backup/install-velero.html","title":"Install the Velero Plugin","text":"

                                          velero is an open source tool for backing up and restoring Kubernetes cluster resources. It can back up resources in a Kubernetes cluster to cloud storage services, local storage, or other locations, and restore those resources to the same or a different cluster when needed.

                                          This section introduces how to deploy the Velero plugin in AI platform using the Helm Apps.

                                          "},{"location":"en/end-user/kpanda/backup/install-velero.html#prerequisites","title":"Prerequisites","text":"

                                          Before installing the velero plugin, the following prerequisites need to be met:

                                          • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.
                                          • Created a velero namespace.
                                          • You should have permissions not lower than NS Editor. For details, refer to Namespace Authorization.
                                          "},{"location":"en/end-user/kpanda/backup/install-velero.html#steps","title":"Steps","text":"

                                          Please perform the following steps to install the velero plugin for your cluster.

                                          1. On the cluster list page, find the target cluster that needs to install the velero plugin, click the name of the cluster, click Helm Apps -> Helm chart in the left navigation bar, and enter velero in the search bar to search .

                                          2. Read the introduction of the velero plugin, select the version and click the Install button. This page will take 5.2.0 version as an example to install, and it is recommended that you install 5.2.0 and later versions.

                                          3. Configure basic info .

                                            • Name: Enter the plugin name, please note that the name can be up to 63 characters, can only contain lowercase letters, numbers and separators (\"-\"), and must start and end with lowercase letters or numbers, such as metrics-server-01.
                                            • Namespace: Select the namespace for plugin installation, it must be velero namespace.
                                            • Version: The version of the plugin, here we take 5.2.0 version as an example.
                                            • Wait: When enabled, it will wait for all associated resources under the application to be ready before marking the application installation as successful.
                                            • Deletion Failed: After it is enabled, the synchronization will be enabled by default and ready to wait. If the installation fails, the installation-related resources will be removed.
                                            • Detailed Logs: Turn on the verbose output of the installation process log.

                                            !!! note

                                             After enabling __Ready Wait__ and/or __Failed Delete__ , it takes a long time for the app to be marked as __Running__ .\n
                                          4. Configure Velero chart Parameter Settings according to the following instructions

                                            • S3 Credentials: Configure the authentication information of object storage (minio).

                                              • Use secret: Keep the default configuration true.
                                              • Secret name: Keep the default configuration velero-s3-credential.
                                              • SecretContents.aws_access_key_id = : Configure the username for accessing object storage, replace with the actual parameter.
                                              • SecretContents.aws_secret_access_key = : Configure the password for accessing object storage, replace with the actual parameter.

                                                Use existing secret parameter example is as follows:

                                                [default]\naws_access_key_id = minio\naws_secret_access_key = minio123\n
                                                • BackupStorageLocation: The location where Velero backs up data.

                                                  • S3 bucket: The name of the storage bucket used to save backup data (must be a real storage bucket that already exists in minio).
                                                  • Is default BackupStorage: Keep the default configuration true.
                                                  • S3 access mode: The access mode of Velero to data, which can be selected
                                                  • ReadWrite: Allow Velero to read and write backup data;
                                                  • ReadOnly: Allow Velero to read backup data, but cannot modify backup data;
                                                  • WriteOnly: Only allow Velero to write backup data, and cannot read backup data.
                                                • S3 Configs: Detailed configuration of S3 storage (minio).

                                                  • S3 region: The geographical region of cloud storage. The default is to use the us-east-1 parameter, which is provided by the system administrator.
                                                  • S3 force path style: Keep the default configuration true.
                                                  • S3 server URL: The console access address of object storage (minio). Minio generally provides two services, UI access and console access. Please use the console access address here.

                                              • Click the OK button to complete the installation of the Velero plugin. The system will automatically jump to the Helm Apps list page. After waiting for a few minutes, refresh the page, and you can see the application just installed.

                                              • "},{"location":"en/end-user/kpanda/clusterops/cluster-settings.html","title":"Cluster Settings","text":"

                                                Cluster settings are used to customize advanced feature settings for your cluster, including whether to enable GPU, helm repo refresh cycle, Helm operation record retention, etc.

                                                • Enable GPU: GPUs and proper driver plug-ins need to be installed on the cluster in advance.

                                                  Click the name of the target cluster, and click Operations and Maintenance -> Cluster Settings -> Addons in the left navigation bar.

                                                • Helm operation basic image, registry refresh cycle, number of operation records retained, whether to enable cluster deletion protection (the cluster cannot be uninstalled directly after enabling)

                                                "},{"location":"en/end-user/kpanda/clusterops/latest-operations.html","title":"recent operations","text":"

                                                On this page, you can view the recent cluster operation records and Helm operation records, as well as the YAML files and logs of each operation, and you can also delete a certain record.

                                                Set the number of reserved entries for Helm operations:

                                                By default, the system keeps the last 100 Helm operation records. If you keep too many entries, it may cause data redundancy, and if you keep too few entries, you may lose the key operation records you need. A reasonable reserved quantity needs to be set according to the actual situation. Specific steps are as follows:

                                                1. Click the name of the target cluster, and click Recent Operations -> Helm Operations -> Set Number of Retained Items in the left navigation bar.

                                                2. Set how many Helm operation records need to be kept, and click OK .

                                                "},{"location":"en/end-user/kpanda/clusters/access-cluster.html","title":"Access Clusters","text":"

                                                Clusters integrated or created using the AI platform Container Management platform can be accessed not only through the UI interface but also in two other ways for access control:

                                                • Access online via CloudShell
                                                • Access via kubectl after downloading the cluster certificate

                                                Note

                                                When accessing the cluster, the user should have Cluster Admin permission or higher.

                                                "},{"location":"en/end-user/kpanda/clusters/access-cluster.html#access-via-cloudshell","title":"Access via CloudShell","text":"
                                                1. Enter Clusters page, select the cluster you want to access via CloudShell, click the ... icon on the right, and then click Console from the dropdown list.

                                                2. Run kubectl get node command in the Console to verify the connectivity between CloudShell and the cluster. If the console returns node information of the cluster, you can access and manage the cluster through CloudShell.

                                                "},{"location":"en/end-user/kpanda/clusters/access-cluster.html#access-via-kubectl","title":"Access via kubectl","text":"

                                                If you want to access and manage remote clusters from a local node, make sure you have met these prerequisites:

                                                • Your local node and the cloud cluster are in a connected network.
                                                • The cluster certificate has been downloaded to the local node.
                                                • The kubectl tool has been installed on the local node. For detailed installation guides, see Installing tools.

                                                If everything is in place, follow these steps to access a cloud cluster from your local environment.

                                                1. Enter Clusters page, find your target cluster, click ... on the right, and select Download kubeconfig in the drop-down list.

                                                2. Set the Kubeconfig period and click Download .

                                                3. Open the downloaded certificate and copy its content to the config file of the local node.

                                                  By default, the kubectl tool will look for a file named config in the $HOME/.kube directory on the local node. This file stores access credentials of clusters. Kubectl can access the cluster with that configuration file.

                                                4. Run the following command on the local node to verify its connectivity with the cluster:

                                                  kubectl get pod -n default\n

                                                  An expected output is as follows:

                                                  NAME                            READY   STATUS      RESTARTS    AGE\ndao-2048-2048-58c7f7fc5-mq7h4   1/1     Running     0           30h\n

                                                Now you can access and manage the cluster locally with kubectl.

                                                "},{"location":"en/end-user/kpanda/clusters/cluster-role.html","title":"Cluster Roles","text":"

                                                Suanova AI platform categorizes clusters based on different functionalities to help users better manage IT infrastructure.

                                                "},{"location":"en/end-user/kpanda/clusters/cluster-role.html#global-service-cluster","title":"Global Service Cluster","text":"

                                                This cluster is used to run AI platform components. It generally does not carry business workloads.

                                                Supported Features Description K8s Version 1.22+ Operating System RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86;Ubuntu 18.04 x86, Ubuntu 20.04 x86;CentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD Full Lifecycle Management Supported K8s Resource Management Supported Cloud Native Storage Supported Cloud Native Network Calico, Cillium, Multus, and other CNIs Policy Management Supports network policies, quota policies, resource limits, disaster recovery policies, security policies"},{"location":"en/end-user/kpanda/clusters/cluster-role.html#management-cluster","title":"Management Cluster","text":"

                                                This cluster is used to manage worker clusters and generally does not carry business workloads.

                                                Supported Features Description K8s Version 1.22+ Operating System RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86;Ubuntu 18.04 x86, Ubuntu 20.04 x86;CentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD Full Lifecycle Management Supported K8s Resource Management Supported Cloud Native Storage Supported Cloud Native Network Calico, Cillium, Multus, and other CNIs Policy Management Supports network policies, quota policies, resource limits, disaster recovery policies, security policies"},{"location":"en/end-user/kpanda/clusters/cluster-role.html#worker-cluster","title":"Worker Cluster","text":"

                                                This is a cluster created using Container Management and is mainly used to carry business workloads. This cluster is managed by the management cluster.

                                                Supported Features Description K8s Version Supports K8s 1.22 and above Operating System RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86;Ubuntu 18.04 x86, Ubuntu 20.04 x86;CentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD Full Lifecycle Management Supported K8s Resource Management Supported Cloud Native Storage Supported Cloud Native Network Calico, Cillium, Multus, and other CNIs Policy Management Supports network policies, quota policies, resource limits, disaster recovery policies, security policies"},{"location":"en/end-user/kpanda/clusters/cluster-role.html#integrated-cluster","title":"Integrated Cluster","text":"

                                                This cluster is used to integrate existing standard K8s clusters, including but not limited to self-built clusters in local data centers, clusters provided by public cloud vendors, clusters provided by private cloud vendors, edge clusters, Xinchuang clusters, heterogeneous clusters, and different Suanova clusters. It is mainly used to carry business workloads.

                                                Supported Features Description K8s Version 1.18+ Supported Vendors VMware Tanzu, Amazon EKS, Redhat Openshift, SUSE Rancher, Alibaba ACK, Huawei CCE, Tencent TKE, Standard K8s Cluster, Suanova Full Lifecycle Management Not Supported K8s Resource Management Supported Cloud Native Storage Supported Cloud Native Network Depends on the network mode of the integrated cluster's kernel Policy Management Supports network policies, quota policies, resource limits, disaster recovery policies, security policies

                                                Note

                                                A cluster can have multiple cluster roles. For example, a cluster can be both a global service cluster and a management cluster or a worker cluster.

                                                "},{"location":"en/end-user/kpanda/clusters/cluster-scheduler-plugin.html","title":"Deploy Second Scheduler scheduler-plugins in a Cluster","text":"

                                                This page describes how to deploy a second scheduler-plugins in a cluster.

                                                "},{"location":"en/end-user/kpanda/clusters/cluster-scheduler-plugin.html#why-do-we-need-scheduler-plugins","title":"Why do we need scheduler-plugins?","text":"

                                                The cluster created through the platform will install the native K8s scheduler-plugin, but the native scheduler-plugin has many limitations:

                                                • The native scheduler-plugin cannot meet scheduling requirements, so you can use either CoScheduling, CapacityScheduling or other types of scheduler-plugins.
                                                • In special scenarios, a new scheduler-plugin is needed to complete scheduling tasks without affecting the process of the native scheduler-plugin.
                                                • Distinguish scheduler-plugins with different functionalities and achieve different scheduling scenarios by switching scheduler-plugin names.

                                                This page takes the scenario of using the vgpu scheduler-plugin while combining the coscheduling plugin capability of scheduler-plugins as an example to introduce how to install and use scheduler-plugins.

                                                "},{"location":"en/end-user/kpanda/clusters/cluster-scheduler-plugin.html#installing-scheduler-plugins","title":"Installing scheduler-plugins","text":""},{"location":"en/end-user/kpanda/clusters/cluster-scheduler-plugin.html#prerequisites","title":"Prerequisites","text":"
                                                • kubean is a new feature introduced in v0.13.0, please ensure that your version is v0.13.0 or higher.
                                                • The installation version of scheduler-plugins is v0.27.8, please ensure that the cluster version is compatible with it. Refer to the document Compatibility Matrix.
                                                "},{"location":"en/end-user/kpanda/clusters/cluster-scheduler-plugin.html#installation-process","title":"Installation Process","text":"
                                                1. Add the scheduler-plugins parameter in Create Cluster -> Advanced Settings -> Custom Parameters.

                                                  scheduler_plugins_enabled:true\nscheduler_plugins_plugin_config:\n  - name: Coscheduling\n    args:\n      permitWaitingTimeSeconds: 10 # default is 60\n

                                                  Parameters:

                                                  • scheduler_plugins_enabled Set to true to enable the scheduler-plugins capability.
                                                  • You can enable or disable certain plugins by setting the scheduler_plugins_enabled_plugins or scheduler_plugins_disabled_plugins options. See K8s Official Plugin Names for reference.
                                                  • If you need to set parameters for custom plugins, please configure scheduler_plugins_plugin_config, for example: set the permitWaitingTimeoutSeconds parameter for coscheduling. See K8s Official Plugin Configuration for reference.
                                                2. After successful cluster creation, the system will automatically install the scheduler-plugins and controller component loads. You can check the workload status in the proper cluster's deployment.

                                                "},{"location":"en/end-user/kpanda/clusters/cluster-scheduler-plugin.html#using-scheduler-plugins","title":"Using scheduler-plugins","text":"

                                                Here is an example of how to use scheduler-plugins by demonstrating a scenario where the vgpu scheduler is used in combination with the coscheduling plugin capability of scheduler-plugins.

                                                1. Install vgpu in the Helm Charts and set the values.yaml parameters.

                                                  • schedulerName: scheduler-plugins-scheduler: This is the scheduler name for scheduler-plugins installed by kubean, and currently cannot be modified.
                                                  • scheduler.kubeScheduler.enabled: false: Do not install kube-scheduler and use vgpu-scheduler as a separate extender.
                                                2. Extend vgpu-scheduler on scheduler-plugins.

                                                  [root@master01 charts]# kubectl get cm -n scheduler-plugins scheduler-config -ojsonpath=\"{.data.scheduler-config\\.yaml}\"\n
                                                  apiVersion: kubescheduler.config.k8s.io/v1\nkind: KubeSchedulerConfiguration\nleaderElection:\n  leaderElect: false\nprofiles:\n  # Compose all plugins in one profile\n  - schedulerName: scheduler-plugins-scheduler\n    plugins:\n      multiPoint:\n        enabled:\n          - name: Coscheduling\n          - name: CapacityScheduling\n          - name: NodeResourceTopologyMatch\n          - name: NodeResourcesAllocatable\n        disabled:\n          - name: PrioritySort\npluginConfig:\n  - args:\n      permitWaitingTimeSeconds: 10\n    name: Coscheduling\n

                                                  Modify configmap of scheduler-config for scheduler-plugins:

                                                  [root@master01 charts]# kubectl get cm -n scheduler-plugins scheduler-config -ojsonpath=\"{.data.scheduler-config\\.yaml}\"\n
                                                  apiVersion: kubescheduler.config.k8s.io/v1\nkind: KubeSchedulerConfiguration\nleaderElection:\n  leaderElect: false\nprofiles:\n  # Compose all plugins in one profile\n  - schedulerName: scheduler-plugins-scheduler\n    plugins:\n      multiPoint:\n        enabled:\n          - name: Coscheduling\n          - name: CapacityScheduling\n          - name: NodeResourceTopologyMatch\n          - name: NodeResourcesAllocatable\n        disabled:\n          - name: PrioritySort\npluginConfig:\n  - args:\n      permitWaitingTimeSeconds: 10\n    name: Coscheduling\nextenders:\n  - urlPrefix: \"${urlPrefix}\"\n    filterVerb: filter\n    bindVerb: bind\n    nodeCacheCapable: true\n    ignorable: true\n    httpTimeout: 30s\n    weight: 1\n    enableHTTPS: true\n    tlsConfig:\n      insecure: true\n    managedResources:\n      - name: nvidia.com/vgpu\n        ignoredByScheduler: true\n      - name: nvidia.com/gpumem\n        ignoredByScheduler: true\n      - name: nvidia.com/gpucores\n        ignoredByScheduler: true\n      - name: nvidia.com/gpumem-percentage\n        ignoredByScheduler: true\n      - name: nvidia.com/priority\n        ignoredByScheduler: true\n      - name: cambricon.com/mlunum\n        ignoredByScheduler: true\n
                                                3. After installing vgpu-scheduler, the system will automatically create a service (svc), and the urlPrefix specifies the URL of the svc.

                                                  Note

                                                  • The svc refers to the pod service load. You can use the following command in the namespace where the nvidia-vgpu plugin is installed to get the external access information for port 443.

                                                    kubectl get svc -n ${namespace}\n
                                                  • The urlPrefix format is https://${ip address}:${port}

                                                4. Restart the scheduler pod of scheduler-plugins to load the new configuration file.

                                                  Note

                                                  When creating a vgpu application, you do not need to specify the name of a scheduler-plugin. The vgpu-scheduler webhook will automatically change the scheduler's name to \"scheduler-plugins-scheduler\" without manual specification.

                                                "},{"location":"en/end-user/kpanda/clusters/cluster-status.html","title":"Cluster Status","text":"

                                                AI platform Container Management module can manage two types of clusters: integrated clusters and created clusters.

                                                • Integrated clusters: clusters created in other platforms and now integrated into AI platform.
                                                • Created clusters: clusters created in AI platform.

                                                For more information about cluster types, see Cluster Role.

                                                We designed several status for these two clusters.

                                                "},{"location":"en/end-user/kpanda/clusters/cluster-status.html#integrated-clusters","title":"Integrated Clusters","text":"Status Description Integrating The cluster is being integrated into AI platform. Removing The cluster is being removed from AI platform. Running The cluster is running as expected. Unknown The cluster is lost. Data displayed in the AI platform UI is the cached data before the disconnection, which does not represent real-time data. Any operation during this status will not take effect. You should check cluster network connectivity or host status."},{"location":"en/end-user/kpanda/clusters/cluster-status.html#created-clusters","title":"Created Clusters","text":"Status Description Creating The cluster is being created. Updating The Kubernetes version of the cluster is being operating. Deleting The cluster is being deleted. Running The cluster is running as expected. Unknown The cluster is lost. Data displayed in the AI platform UI is the cached data before the disconnection, which does not represent real-time data. Any operation during this status will not take effect. You should check cluster network connectivity or host status. Failed The cluster creation is failed. You should check the logs for detailed reasons."},{"location":"en/end-user/kpanda/clusters/cluster-version.html","title":"Supported Kubernetes Versions","text":"

                                                In AI platform, the integrated clusters and created clusters have different version support mechanisms.

                                                This page focuses on the version support mechanism for created clusters.

                                                The Kubernetes community supports three version ranges: 1.26, 1.27, and 1.28. When a new version is released by the community, the supported version range is incremented. For example, if the latest version released by the community is 1.27, the supported version range by the community will be 1.27, 1.28, and 1.29.

                                                To ensure the security and stability of the clusters, when creating clusters in AI platform, the supported version range will always be one version lower than the community's version.

                                                For instance, if the Kubernetes community supports v1.25, v1.26, and v1.27, then the version range for creating worker clusters in AI platform will be v1.24, v1.25, and v1.26. Additionally, a stable version, such as 1.24.7, will be recommended to users.

                                                Furthermore, the version range for creating worker clusters in AI platform will remain highly synchronized with the community. When the community version increases incrementally, the version range for creating worker clusters in AI platform will also increase by one version.

                                                "},{"location":"en/end-user/kpanda/clusters/cluster-version.html#supported-kubernetes-versions_1","title":"Supported Kubernetes Versions","text":"Kubernetes Community Versions Created Worker Cluster Versions Recommended Versions for Created Worker Cluster AI platform Installer Release Date
                                                • 1.26
                                                • 1.27
                                                • 1.28
                                                • 1.26
                                                • 1.27
                                                • 1.28
                                                1.27.5 v0.13.0 2023.11.30"},{"location":"en/end-user/kpanda/clusters/create-cluster.html","title":"Create Worker Clusters","text":"

                                                In AI platform Container Management, clusters can have four roles: global service cluster, management cluster, worker cluster, and integrated cluster. An integrated cluster can only be integrated from third-party vendors (see Integrate Cluster).

                                                This page explains how to create a Worker Cluster. By default, when creating a new Worker Cluster, the operating system type and CPU architecture of the worker nodes should be consistent with the Global Service Cluster. If you want to create a cluster with a different operating system or architecture than the Global Management Cluster, refer to Creating an Ubuntu Worker Cluster on a CentOS Management Platform for instructions.

                                                It is recommended to use the supported operating systems in AI platform to create the cluster. If your local nodes are not within the supported range, you can refer to Creating a Cluster on Non-Mainstream Operating Systems for instructions.

                                                "},{"location":"en/end-user/kpanda/clusters/create-cluster.html#prerequisites","title":"Prerequisites","text":"

                                                Certain prerequisites must be met before creating a cluster:

                                                • Prepare enough nodes to be joined into the cluster.
                                                • It is recommended to use Kubernetes version 1.25.7. For the specific version range, refer to the AI platform Cluster Version Support System. Currently, the supported version range for created worker clusters is v1.26.0-v1.28. If you need to create a cluster with a lower version, refer to the Supporterd Cluster Versions.
                                                • The target host must allow IPv4 forwarding. If using IPv6 in Pods and Services, the target server needs to allow IPv6 forwarding.
                                                • AI platform does not provide firewall management. You need to pre-define the firewall rules of the target host by yourself. To avoid errors during cluster creation, it is recommended to disable the firewall of the target host.
                                                • See Node Availability Check.
                                                "},{"location":"en/end-user/kpanda/clusters/create-cluster.html#steps","title":"Steps","text":"
                                                1. Enter the Container Management module, click Create Cluster on the upper right corner of the Clusters page.

                                                2. Fill in the basic information by referring to the following instructions.

                                                  • Cluster Name: only contain lowercase letters, numbers, and hyphens (\"-\"). Must start and end with a lowercase letter or number and totally up to 63 characters.
                                                  • Managed By: Choose a cluster to manage this new cluster through its lifecycle, such as creating, upgrading, node scaling, deleting the new cluster, etc.
                                                  • Runtime: Select the runtime environment of the cluster. Currently support containerd and docker (see How to Choose Container Runtime).
                                                  • Kubernetes Version: Allow span of three major versions, such as from 1.23-1.25, subject to the versions supported by the management cluster.

                                                3. Fill in the node configuration information and click Node Check .

                                                  • High Availability: When enabled, at least 3 controller nodes are required. When disabled, only 1 controller node is needed.

                                                    It is recommended to use High Availability mode in production environments.

                                                  • Credential Type: Choose whether to access nodes using username/password or public/private keys.

                                                    If using public/private key authentication, SSH keys for the nodes need to be configured in advance. Refer to Using SSH Key Authentication for Nodes.

                                                  • Same Password: When enabled, all nodes in the cluster will have the same access password. Enter the unified password for accessing all nodes in the field below. If disabled, you can set separate usernames and passwords for each node.

                                                  • Node Information: Set note names and IPs.
                                                  • NTP Time Synchronization: When enabled, time will be automatically synchronized across all nodes. Provide the NTP server address.

                                                4. If node check is passed, click Next . If the check failed, update Node Information and check again.

                                                5. Fill in the network configuration and click Next .

                                                  • CNI: Provide network services for Pods in the cluster. CNI cannot be changed after the cluster is created. Supports cilium and calico. Set none means not installing CNI when creating the cluster. You may install a CNI later.

                                                    For CNI configuration details, see Cilium Installation Parameters or Calico Installation Parameters.

                                                  • Container IP Range: Set an IP range for allocating IPs for containers in the cluster. IP range determines the max number of containers allowed in the cluster. Cannot be modified after creation.

                                                  • Service IP Range: Set an IP range for allocating IPs for container Services in the cluster. This range determines the max number of container Services that can be created in the cluster. Cannot be modified after creation.
                                                6. Fill in the plug-in configuration and click Next .

                                                7. Fill in advanced settings and click OK .

                                                  • kubelet_max_pods : Set the maximum number of Pods per node. The default is 110.
                                                  • hostname_override : Reset the hostname (not recommended).
                                                  • kubernetes_audit : Kubernetes audit log, enabled by default.
                                                  • auto_renew_certificate : Automatically renew the certificate of the control plane on the first Monday of each month, enabled by default.
                                                  • disable_firewalld&ufw : Disable the firewall to prevent the node from being inaccessible during installation.
                                                  • Insecure_registries : Set the address of you private container registry. If you use a private container registry, fill in its address can bypass certificate authentication of the container engine and obtain the image.
                                                  • yum_repos : Fill in the Yum source registry address.

                                                Success

                                                • After correctly filling in the above information, the page will prompt that the cluster is being created.
                                                • Creating a cluster takes a long time, so you need to wait patiently. You can click the Back to Clusters button to let it running backend.
                                                • To view the current status, click Real-time Log .

                                                Note

                                                • hen the cluster is in an unknown state, it means that the current cluster has been disconnected.
                                                • The data displayed by the system is the cached data before the disconnection, which does not represent real data.
                                                • Any operations performed in the disconnected state will not take effect. Please check the cluster network connectivity or Host Status.
                                                "},{"location":"en/end-user/kpanda/clusters/delete-cluster.html","title":"Delete/Remove Clusters","text":"

                                                Clusters created in AI platform Container Management can be either deleted or removed. Clusters integrated into AI platform can only be removed.

                                                Info

                                                If you want to delete an integrated cluster, you should delete it in the platform where it is created.

                                                In AI platform, the difference between Delete and Remove is:

                                                • Delete will destroy the cluster and reset the data of all nodes under the cluster. All data will be totally cleared and lost. Making a backup before deleting a cluster is a recommended best practice. You can no longer use that cluster anymore.
                                                • Remove just removes the cluster from AI platform. It will not destroy the cluster and no data will be lost. You can still use the cluster in other platforms or re-integrate it into AI platform later if needed.

                                                Note

                                                • You should have Admin or Kpanda Owner permissions to perform delete or remove operations.
                                                • Before deleting a cluster, you should turn off Cluster Deletion Protection in Cluster Settings -> Advanced Settings , otherwise the Delete Cluster option will not be displayed.
                                                • The global service cluster cannot be deleted or removed.
                                                1. Enter the Container Management module, find your target cluster, click __ ...__ on the right, and select Delete cluster / Remove in the drop-down list.

                                                2. Enter the cluster name to confirm and click Delete .

                                                3. You will be auto directed to cluster lists. The status of this cluster will changed to Deleting . It may take a while to delete/remove a cluster.

                                                "},{"location":"en/end-user/kpanda/clusters/integrate-cluster.html","title":"Integrate Clusters","text":"

                                                With the features of integrating clusters, AI platform allows you to manage on-premise and cloud clusters of various providers in a unified manner. This is quite important in avoiding the risk of being locked in by a certain providers, helping enterprises safely migrate their business to the cloud.

                                                In AI platform Container Management module, you can integrate a cluster of the following providers: standard Kubernetes clusters, Redhat Openshift, SUSE Rancher, VMware Tanzu, Amazon EKS, Aliyun ACK, Huawei CCE, Tencent TKE, etc.

                                                "},{"location":"en/end-user/kpanda/clusters/integrate-cluster.html#prerequisites","title":"Prerequisites","text":"
                                                • Prepare a cluster of K8s v1.22+ and ensure its network connectivity.
                                                • The operator should have the NS Editor or higher permissions.
                                                "},{"location":"en/end-user/kpanda/clusters/integrate-cluster.html#steps","title":"Steps","text":"
                                                1. Enter Container Management module, and click Integrate Cluster in the upper right corner.

                                                2. Fill in the basic information by referring to the following instructions.

                                                  • Cluster Name: It should be unique and cannot be changed after the integration. Maximum 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number.
                                                  • Cluster Alias: Enter any characters, no more than 60 characters.
                                                  • Release Distribution: the cluster provider, support mainstream vendors listed at the beginning.
                                                3. Fill in the KubeConfig of the target cluster and click Verify Config . The cluster can be successfully connected only after the verification is passed.

                                                  Click How do I get the KubeConfig? to see the specific steps for getting this file.

                                                4. Confirm that all parameters are filled in correctly and click OK in the lower right corner of the page.

                                                Note

                                                The status of the newly integrated cluster is Integrating , which will become Running after the integration succeeds.

                                                "},{"location":"en/end-user/kpanda/clusters/integrate-rancher-cluster.html","title":"Integrate the Rancher Cluster","text":"

                                                This page explains how to integrate a Rancher cluster.

                                                "},{"location":"en/end-user/kpanda/clusters/integrate-rancher-cluster.html#prerequisites","title":"Prerequisites","text":"
                                                • Prepare a Rancher cluster with administrator privileges and ensure network connectivity between the container management cluster and the target cluster.
                                                • Be equipped with permissions not lower than kpanda owner.
                                                "},{"location":"en/end-user/kpanda/clusters/integrate-rancher-cluster.html#steps","title":"Steps","text":""},{"location":"en/end-user/kpanda/clusters/integrate-rancher-cluster.html#step-1-create-a-serviceaccount-user-with-administrator-privileges-in-the-rancher-cluster","title":"Step 1: Create a ServiceAccount user with administrator privileges in the Rancher cluster","text":"
                                                1. Log in to the Rancher cluster with a role that has administrator privileges, and create a file named sa.yaml using the terminal.

                                                  vi sa.yaml\n

                                                  Press the i key to enter insert mode, then copy and paste the following content:

                                                  sa.yaml
                                                  apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: rancher-rke\n  rules:\n  - apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n  - nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: rancher-rke\n  roleRef:\n    apiGroup: rbac.authorization.k8s.io\n    kind: ClusterRole\n    name: rancher-rke\n  subjects:\n  - kind: ServiceAccount\n    name: rancher-rke\n    namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: rancher-rke\n  namespace: kube-system\n

                                                  Press the Esc key to exit insert mode, then type :wq to save and exit.

                                                2. Run the following command in the current directory to create a ServiceAccount named rancher-rke (referred to as SA for short):

                                                  kubectl apply -f sa.yaml\n

                                                  The expected output is as follows:

                                                  clusterrole.rbac.authorization.k8s.io/rancher-rke created\nclusterrolebinding.rbac.authorization.k8s.io/rancher-rke created\nserviceaccount/rancher-rke created\n
                                                3. Create a secret named rancher-rke-secret and bind the secret to the rancher-rke SA.

                                                  kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: rancher-rke-secret\n  namespace: kube-system\n  annotations:\n    kubernetes.io/service-account.name: rancher-rke\n  type: kubernetes.io/service-account-token\nEOF\n

                                                  The output is expected to be:

                                                  secret/rancher-rke-secret created\n

                                                  Note

                                                  If your cluster version is lower than 1.24, please ignore this step and proceed to the next one.

                                                4. Check secret for rancher-rke SA:

                                                  kubectl -n kube-system get secret | grep rancher-rke | awk '{print $1}'\n

                                                  The output is expected to be:

                                                  rancher-rke-secret\n

                                                  Check the rancher-rke-secret secret:

                                                  kubectl -n kube-system describe secret rancher-rke-secret\n

                                                  The output is expected to be:

                                                  Name:         rancher-rke-secret\nNamespace:    kube-system\nLabels:       <none>\nAnnotations:  kubernetes.io/service-account.name: rancher-rke\n            kubernetes.io/service-account.uid: d83df5d9-bd7d-488d-a046-b740618a0174\n\nType:  kubernetes.io/service-account-token\n\nData\n====\nca.crt:     570 bytes\nnamespace:  11 bytes\ntoken:      eyJhbGciOiJSUzI1NiIsImtpZCI6IjUtNE9nUWZLRzVpbEJORkZaNmtCQXhqVzRsZHU4MHhHcDBfb0VCaUo0V1kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJyYW5jaGVyLXJrZS1zZWNyZXQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoicmFuY2hlci1ya2UiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkODNkZjVkOS1iZDdkLTQ4OGQtYTA0Ni1iNzQwNjE4YTAxNzQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06cmFuY2hlci1ya2UifQ.VNsMtPEFOdDDeGt_8VHblcMRvjOwPXMM-79o9UooHx6q-VkHOcIOp3FOT2hnEdNnIsyODZVKCpEdCgyozX-3y5x2cZSZpocnkMcBbQm-qfTyUcUhAY7N5gcYUtHUhvRAsNWJcsDCn6d96gT_qo-ddo_cT8Ri39Lc123FDYOnYG-YGFKSgRQVy7Vyv34HIajZCCjZzy7i--eE_7o4DXeTjNqAFMFstUxxHBOXI3Rdn1zKQKqh5Jhg4ES7X-edSviSUfJUX-QV_LlAw5DuAyGPH7bDH4QaQ5k-p6cIctmpWZE-9wRDlKA4LYRblKE7MJcI6OmM4ldlMM0Jc8N-gCtl4w\n
                                                "},{"location":"en/end-user/kpanda/clusters/integrate-rancher-cluster.html#step-2-update-kubeconfig-with-the-rancher-rke-sa-authentication-on-your-local-machine","title":"Step 2: Update kubeconfig with the rancher-rke SA authentication on your local machine","text":"

                                                Perform the following steps on any local node where kubelet is installed:

                                                1. Configure kubelet token.

                                                  kubectl config set-credentials rancher-rke --token= __rancher-rke-secret__ # token \u4fe1\u606f\n

                                                  For example,

                                                  kubectl config set-credentials eks-admin --token=eyJhbGciOiJSUzI1NiIsImtpZCI6IjUtNE9nUWZLRzVpbEJORkZaNmtCQXhqVzRsZHU4MHhHcDBfb0VCaUo0V1kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJyYW5jaGVyLXJrZS1zZWNyZXQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoicmFuY2hlci1ya2UiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkODNkZjVkOS1iZDdkLTQ4OGQtYTA0Ni1iNzQwNjE4YTAxNzQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06cmFuY2hlci1ya2UifQ.VNsMtPEFOdDDeGt_8VHblcMRvjOwPXMM-79o9UooHx6q-VkHOcIOp3FOT2hnEdNnIsyODZVKCpEdCgyozX-3y5x2cZSZpocnkMcBbQm-qfTyUcUhAY7N5gcYUtHUhvRAsNWJcsDCn6d96gT_qo-ddo_cT8Ri39Lc123FDYOnYG-YGFKSgRQVy7Vyv34HIajZCCjZzy7i--eE_7o4DXeTjNqAFMFstUxxHBOXI3Rdn1zKQKqh5Jhg4ES7X-edSviSUfJUX-QV_LlAw5DuAyGPH7bDH4QaQ5k-p6cIctmpWZE-9wRDlKA4LYRblKE7MJcI6OmM4ldlMM0Jc8N-gCtl4w\n
                                                2. Configure the kubelet APIServer information.

                                                  kubectl config set-cluster {cluster-name} --insecure-skip-tls-verify=true --server={APIServer}\n
                                                  • {cluster-name} : the name of your Rancher cluster.
                                                  • {APIServer} : the access address of the cluster, usually refering to the IP address of the control node + port \"6443\", such as https://10.X.X.X:6443 .

                                                  For example,

                                                  kubectl config set-cluster rancher-rke --insecure-skip-tls-verify=true --server=https://10.X.X.X:6443\n
                                                3. Configure the kubelet context.

                                                  kubectl config set-context {context-name} --cluster={cluster-name} --user={SA-usename}\n

                                                  For example,

                                                  kubectl config set-context rancher-rke-context --cluster=rancher-rke --user=rancher-rke\n
                                                4. Specify the newly created context rancher-rke-context in kubelet.

                                                  kubectl config use-context rancher-rke-context\n
                                                5. Fetch the kubeconfig information for the context rancher-rke-context .

                                                  kubectl config view --minify --flatten --raw\n

                                                  The output is expected to be:

                                                  ```yaml apiVersion: v1 clusters: - cluster: insecure-skip-tls-verify: true server: https://77C321BCF072682C70C8665ED4BFA10D.gr7.ap-southeast-1.eks.amazonaws.com name: joincluster contexts: - context: cluster: joincluster user: eks-admin name: ekscontext current-context: ekscontext kind: Config preferences: {} users: - name: eks-admin user: token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImcxTjJwNkktWm5IbmRJU1RFRExvdWY1TGFWVUtGQ3VIejFtNlFQcUNFalEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2V

                                                "},{"location":"en/end-user/kpanda/clusters/integrate-rancher-cluster.html#step-3-connect-the-cluster-in-the-suanova-interface","title":"Step 3: Connect the cluster in the Suanova Interface","text":"

                                                Using the kubeconfig file fetched earlier, refer to the Integrate Cluster documentation to integrate the Rancher cluster to the global cluster.

                                                "},{"location":"en/end-user/kpanda/clusters/runtime.html","title":"How to choose the container runtime","text":"

                                                The container runtime is an important component in kubernetes to manage the life cycle of containers and container images. Kubernetes made containerd the default container runtime in version 1.19, and removed support for the Dockershim component in version 1.24.

                                                Therefore, compared to the Docker runtime, we recommend you to use the lightweight containerd as your container runtime, because this has become the current mainstream runtime choice.

                                                In addition, some operating system distribution vendors are not friendly enough for Docker runtime compatibility. The runtime support of different operating systems is as follows:

                                                "},{"location":"en/end-user/kpanda/clusters/runtime.html#operating-systems-and-supported-runtimes","title":"Operating systems and supported runtimes","text":"Operating System Supported containerd Versions Supported Docker Versions CentOS 1.5.5, 1.5.7, 1.5.8, 1.5.9, 1.5.10, 1.5.11, 1.5.12, 1.5.13, 1.6.0, 1.6.1, 1.6.2, 1.6.3, 1.6.4, 1.6.5, 1.6.6, 1.6.7, 1.6.8, 1.6.9, 1.6.10, 1.6.11, 1.6.12, 1.6.13, 1.6.14, 1.6.15 (default) 18.09, 19.03, 20.10 (default) RedHatOS 1.5.5, 1.5.7, 1.5.8, 1.5.9, 1.5.10, 1.5.11, 1.5.12, 1.5.13, 1.6.0, 1.6.1, 1.6.2, 1.6.3, 1.6.4, 1.6.5, 1.6.6, 1.6.7, 1.6.8, 1.6.9, 1.6.10, 1.6.11, 1.6.12, 1.6.13, 1.6.14, 1.6.15 (default) 18.09, 19.03, 20.10 (default) KylinOS 1.5.5, 1.5.7, 1.5.8, 1.5.9, 1.5.10, 1.5.11, 1.5.12, 1.5.13, 1.6.0, 1.6.1, 1.6.2, 1.6.3, 1.6.4, 1.6.5, 1.6.6, 1.6.7, 1.6.8, 1.6.9, 1.6.10, 1.6.11, 1.6.12, 1.6.13, 1.6.14, 1.6.15 (default) 19.03 (Only supported by ARM architecture, Docker is not supported as a runtime under x86 architecture)

                                                Note

                                                In the offline installation mode, you need to prepare the runtime offline package of the relevant operating system in advance.

                                                "},{"location":"en/end-user/kpanda/clusters/upgrade-cluster.html","title":"Cluster Upgrade","text":"

                                                The Kubernetes Community packages a small version every quarter, and the maintenance cycle of each version is only about 9 months. Some major bugs or security holes will not be updated after the version stops maintenance. Manually upgrading cluster operations is cumbersome and places a huge workload on administrators.

                                                In Suanova, you can upgrade the Kubernetes cluster with one click through the web UI interface.

                                                Danger

                                                After the version is upgraded, it will not be possible to roll back to the previous version, please proceed with caution.

                                                Note

                                                • Kubernetes versions are denoted as x.y.z , where x is the major version, y is the minor version, and z is the patch version.
                                                • Cluster upgrades across minor versions are not allowed, e.g. a direct upgrade from 1.23 to 1.25 is not possible.
                                                • **Access clusters do not support version upgrades. If there is no \"cluster upgrade\" in the left navigation bar, please check whether the cluster is an access cluster. **
                                                • The global service cluster can only be upgraded through the terminal.
                                                • When upgrading a worker cluster, the Management Cluster of the worker cluster should have been connected to the container management module and be running normally.
                                                1. Click the name of the target cluster in the cluster list.

                                                2. Then click Cluster Operation and Maintenance -> Cluster Upgrade in the left navigation bar, and click Version Upgrade in the upper right corner of the page.

                                                3. Select the version that can be upgraded, and enter the cluster name to confirm.

                                                4. After clicking OK , you can see the upgrade progress of the cluster.

                                                5. The cluster upgrade is expected to take 30 minutes. You can click the Real-time Log button to view the detailed log of the cluster upgrade.

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/create-configmap.html","title":"Create ConfigMaps","text":"

                                                ConfigMaps store non-confidential data in the form of key-value pairs to achieve the effect of mutual decoupling of configuration data and application code. ConfigMaps can be used as environment variables for containers, command-line parameters, or configuration files in storage volumes.

                                                Note

                                                • The data saved in ConfigMaps cannot exceed 1 MiB. If you need to store larger volumes of data, it is recommended to mount a storage volume or use an independent database or file service.

                                                • ConfigMaps do not provide confidentiality or encryption. If you want to store encrypted data, it is recommended to use secret, or other third-party tools to ensure the privacy of data.

                                                You can create ConfigMaps with two methods:

                                                • Graphical form creation
                                                • YAML creation
                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/create-configmap.html#prerequisites","title":"Prerequisites","text":"
                                                • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                                • Created a namespace, user, and authorized the user as NS Editor. For details, refer to Namespace Authorization.

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/create-configmap.html#graphical-form-creation","title":"Graphical form creation","text":"
                                                1. Click the name of a cluster on the Clusters page to enter Cluster Details .

                                                2. In the left navigation bar, click ConfigMap and Secret -> ConfigMap , and click the Create ConfigMap button in the upper right corner.

                                                3. Fill in the configuration information on the Create ConfigMap page, and click OK .

                                                  !!! note

                                                   Click __Upload File__ to import an existing file locally to quickly create ConfigMaps.\n
                                                4. After the creation is complete, click More on the right side of the ConfigMap to edit YAML, update, export, delete and other operations.

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/create-configmap.html#yaml-creation","title":"YAML creation","text":"
                                                1. Click the name of a cluster on the Clusters page to enter Cluster Details .

                                                2. In the left navigation bar, click ConfigMap and Secret -> ConfigMap , and click the YAML Create button in the upper right corner.

                                                3. Fill in or paste the configuration file prepared in advance, and then click OK in the lower right corner of the pop-up box.

                                                  !!! note

                                                   - Click __Import__ to import an existing file locally to quickly create ConfigMaps.\n - After filling in the data, click __Download__ to save the configuration file locally.\n
                                                4. After the creation is complete, click More on the right side of the ConfigMap to edit YAML, update, export, delete and other operations.

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/create-configmap.html#configmap-yaml-example","title":"ConfigMap YAML example","text":"
                                                 ```yaml\n kind: ConfigMap\n apiVersion: v1\n metadata:\n   name: kube-root-ca.crt\n   namespace: default\n   annotations:\n data:\n   version: '1.0'\n ```\n

                                                Next step: Use ConfigMaps

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/create-secret.html","title":"Create Secret","text":"

                                                A secret is a resource object used to store and manage sensitive information such as passwords, OAuth tokens, SSH, TLS credentials, etc. Using keys means you don't need to include sensitive secrets in your application code.

                                                Secrets can be used in some cases:

                                                • Used as an environment variable of the container to provide some necessary information required during the running of the container.
                                                • Use secrets as pod data volumes.
                                                • As the identity authentication credential for the container registry when the kubelet pulls the container image.

                                                You can create ConfigMaps with two methods:

                                                • Graphical form creation
                                                • YAML creation
                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/create-secret.html#prerequisites","title":"Prerequisites","text":"
                                                • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster

                                                • Created a namespace, user, and authorized the user as NS Editor. For details, refer to Namespace Authorization.

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/create-secret.html#create-secret-with-wizard","title":"Create secret with wizard","text":"
                                                1. Click the name of a cluster on the Clusters page to enter Cluster Details .

                                                2. In the left navigation bar, click ConfigMap and Secret -> Secret , and click the Create Secret button in the upper right corner.

                                                3. Fill in the configuration information on the Create Secret page, and click OK .

                                                  Note when filling in the configuration:

                                                  • The name of the key must be unique within the same namespace
                                                  • Key type:
                                                    • Default (Opaque): Kubernetes default key type, which supports arbitrary data defined by users.
                                                    • TLS (kubernetes.io/tls): credentials for TLS client or server data access.
                                                    • Container registry information (kubernetes.io/dockerconfigjson): Credentials for Container registry access.
                                                    • username and password (kubernetes.io/basic-auth): Credentials for basic authentication.
                                                    • Custom: the type customized by the user according to business needs.
                                                  • Key data: the data stored in the key, the parameters that need to be filled in are different for different data
                                                    • When the key type is default (Opaque)/custom: multiple key-value pairs can be filled in.
                                                    • When the key type is TLS (kubernetes.io/tls): you need to fill in the certificate certificate and private key data. Certificates are self-signed or CA-signed credentials used for authentication. A certificate request is a request for a signature and needs to be signed with a private key.
                                                    • When the key type is container registry information (kubernetes.io/dockerconfigjson): you need to fill in the account and password of the private container registry.
                                                    • When the key type is username and password (kubernetes.io/basic-auth): Username and password need to be specified.
                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/create-secret.html#yaml-creation","title":"YAML creation","text":"
                                                1. Click the name of a cluster on the Clusters page to enter Cluster Details .

                                                2. In the left navigation bar, click ConfigMap and Secret -> Secret , and click the YAML Create button in the upper right corner.

                                                3. Fill in the YAML configuration on the Create with YAML page, and click OK .

                                                  Supports importing YAML files from local or downloading and saving filled files to local.

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/create-secret.html#key-yaml-example","title":"key YAML example","text":"
                                                 ```yaml\n apiVersion: v1\n kind: Secret\n metadata:\n   name: secretdemo\n type: Opaque\n data:\n   username: ****\n   password: ****\n ```\n

                                                Next step: use secret

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/use-configmap.html","title":"Use ConfigMaps","text":"

                                                ConfigMap (ConfigMap) is an API object of Kubernetes, which is used to save non-confidential data into key-value pairs, and can store configurations that other objects need to use. When used, the container can use it as an environment variable, a command-line argument, or a configuration file in a storage volume. By using ConfigMaps, configuration data and application code can be separated, providing a more flexible way to modify application configuration.

                                                Note

                                                ConfigMaps do not provide confidentiality or encryption. If the data to be stored is confidential, please use secret, or use other third-party tools to ensure the privacy of the data instead of ConfigMaps. In addition, when using ConfigMaps in containers, the container and ConfigMaps must be in the same cluster namespace.

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/use-configmap.html#scenes-to-be-used","title":"scenes to be used","text":"

                                                You can use ConfigMaps in Pods. There are many use cases, mainly including:

                                                • Use ConfigMaps to set the environment variables of the container

                                                • Use ConfigMaps to set the command line parameters of the container

                                                • Use ConfigMaps as container data volumes

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/use-configmap.html#set-the-environment-variables-of-the-container","title":"Set the environment variables of the container","text":"

                                                You can use the ConfigMap as the environment variable of the container through the graphical interface or the terminal command line.

                                                Note

                                                The ConfigMap import is to use the ConfigMap as the value of the environment variable; the ConfigMap key value import is to use a certain parameter in the ConfigMap as the value of the environment variable.

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/use-configmap.html#graphical-interface-operation","title":"Graphical interface operation","text":"

                                                When creating a workload through an image, you can set environment variables for the container by selecting Import ConfigMaps or Import ConfigMap Key Values on the Environment Variables interface.

                                                1. Go to the Image Creation Workload page, in the Container Configuration step, select the Environment Variables configuration, and click the Add Environment Variable button.

                                                2. Select ConfigMap Import or ConfigMap Key Value Import in the environment variable type.

                                                  • When the environment variable type is selected as ConfigMap import , enter variable name , prefix name, ConfigMap name in sequence.

                                                  • When the environment variable type is selected as ConfigMap key-value import , enter variable name , ConfigMap name, and Secret name in sequence.

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/use-configmap.html#command-line-operation","title":"Command line operation","text":"

                                                You can set ConfigMaps as environment variables when creating a workload, using the valueFrom parameter to refer to the Key/Value in the ConfigMap.

                                                apiVersion: v1\nkind: Pod\nmetadata:\n   name: configmap-pod-1\nspec:\n   containers:\n     - name: test-container\n       image: busybox\n       command: [ \"/bin/sh\", \"-c\", \"env\" ]\n       env:\n         - name: SPECIAL_LEVEL_KEY\n           valueFrom: # (1)\n             configMapKeyRef:\n               name: kpanda-configmap # (2)\n               key: SPECIAL_LEVEL # (3)\n   restartPolicy: Never\n
                                                1. Use valueFrom to specify the value of the env reference ConfigMap
                                                2. Referenced configuration file name
                                                3. Referenced ConfigMap key
                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/use-configmap.html#set-the-command-line-parameters-of-the-container","title":"Set the command line parameters of the container","text":"

                                                You can use ConfigMaps to set the command or parameter value in the container, and use the environment variable substitution syntax $(VAR_NAME) to do so. As follows.

                                                apiVersion: v1\nkind: Pod\nmetadata:\n   name: configmap-pod-3\nspec:\n   containers:\n     - name: test-container\n       image: busybox\n       command: [ \"/bin/sh\", \"-c\", \"echo $(SPECIAL_LEVEL_KEY) $(SPECIAL_TYPE_KEY)\" ]\n       env:\n         - name: SPECIAL_LEVEL_KEY\n           valueFrom:\n             configMapKeyRef:\n               name: kpanda-configmap\n               key: SPECIAL_LEVEL\n         - name: SPECIAL_TYPE_KEY\n           valueFrom:\n             configMapKeyRef:\n               name: kpanda-configmap\n               key: SPECIAL_TYPE\n   restartPolicy: Never\n

                                                After the Pod runs, the output is as follows.

                                                Hello Kpanda\n
                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/use-configmap.html#used-as-container-data-volume","title":"Used as container data volume","text":"

                                                You can use the ConfigMap as the environment variable of the container through the graphical interface or the terminal command line.

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/use-configmap.html#graphical-operation","title":"Graphical operation","text":"

                                                When creating a workload through an image, you can use the ConfigMap as the data volume of the container by selecting the storage type as \"ConfigMap\" on the \"Data Storage\" interface.

                                                1. Go to the Image Creation Workload page, in the Container Configuration step, select the Data Storage configuration, and click __Add in the __ Node Path Mapping __ list __ button.

                                                2. Select ConfigMap in the storage type, and enter container path , subpath and other information in sequence.

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/use-configmap.html#command-line-operation_1","title":"Command line operation","text":"

                                                To use a ConfigMap in a Pod's storage volume.

                                                Here is an example Pod that mounts a ConfigMap as a volume:

                                                apiVersion: v1\nkind: Pod\nmetadata:\n   name: mypod\nspec:\n   containers:\n   -name: mypod\n     image: redis\n     volumeMounts:\n     - name: foo\n       mountPath: \"/etc/foo\"\n       readOnly: true\n   volumes:\n   - name: foo\n     configMap:\n       name: myconfigmap\n

                                                If there are multiple containers in a Pod, each container needs its own volumeMounts block, but you only need to set one spec.volumes block per ConfigMap.

                                                Note

                                                When a ConfigMap is used as a data volume mounted on a container, the ConfigMap can only be read as a read-only file.

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html","title":"use key","text":"

                                                A secret is a resource object used to store and manage sensitive information such as passwords, OAuth tokens, SSH, TLS credentials, etc. Using keys means you don't need to include sensitive secrets in your application code.

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html#scenes-to-be-used","title":"scenes to be used","text":"

                                                You can use keys in Pods in a variety of use cases, mainly including:

                                                • Used as an environment variable of the container to provide some necessary information required during the running of the container.
                                                • Use secrets as pod data volumes.
                                                • Used as the identity authentication credential for the container registry when the kubelet pulls the container image.
                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html#use-the-key-to-set-the-environment-variable-of-the-container","title":"Use the key to set the environment variable of the container","text":"

                                                You can use the key as the environment variable of the container through the GUI or the terminal command line.

                                                Note

                                                Key import is to use the key as the value of an environment variable; key key value import is to use a parameter in the key as the value of an environment variable.

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html#graphical-interface-operation","title":"Graphical interface operation","text":"

                                                When creating a workload from an image, you can set environment variables for the container by selecting Key Import or Key Key Value Import on the Environment Variables interface.

                                                1. Go to the Image Creation Workload page.

                                                2. Select the Environment Variables configuration in Container Configuration , and click the Add Environment Variable button.

                                                3. Select Key Import or Key Key Value Import in the environment variable type.

                                                  • When the environment variable type is selected as Key Import , enter Variable Name , Prefix , and Secret in sequence.

                                                  • When the environment variable type is selected as key key value import , enter variable name , Secret , Secret name in sequence.

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html#command-line-operation","title":"Command line operation","text":"

                                                As shown in the example below, you can set the secret as an environment variable when creating the workload, using the valueFrom parameter to refer to the Key/Value in the Secret.

                                                apiVersion: v1\nkind: Pod\nmetadata:\n   name: secret-env-pod\nspec:\n   containers:\n   -name: mycontainer\n     image: redis\n     env:\n       - name: SECRET_USERNAME\n         valueFrom:\n           secretKeyRef:\n             name: mysecret\n             key: username\n             optional: false # (1)\n       - name: SECRET_PASSWORD\n         valueFrom:\n           secretKeyRef:\n             name: mysecret\n             key: password\n             optional: false # (2)\n
                                                1. This value is the default; means \"mysecret\", which must exist and contain a primary key named \"username\"
                                                2. This value is the default; means \"mysecret\", which must exist and contain a primary key named \"password\"
                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html#use-the-key-as-the-pods-data-volume","title":"Use the key as the pod's data volume","text":""},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html#graphical-interface-operation_1","title":"Graphical interface operation","text":"

                                                When creating a workload through an image, you can use the key as the data volume of the container by selecting the storage type as \"key\" on the \"data storage\" interface.

                                                1. Go to the Image Creation Workload page.

                                                2. In the Container Configuration , select the Data Storage configuration, and click the Add button in the Node Path Mapping list.

                                                3. Select Secret in the storage type, and enter container path , subpath and other information in sequence.

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html#command-line-operation_1","title":"Command line operation","text":"

                                                The following is an example of a Pod that mounts a Secret named mysecret via a data volume:

                                                apiVersion: v1\nkind: Pod\nmetadata:\n   name: mypod\nspec:\n   containers:\n   -name: mypod\n     image: redis\n     volumeMounts:\n     - name: foo\n       mountPath: \"/etc/foo\"\n       readOnly: true\n   volumes:\n   - name: foo\n     secret:\n       secretName: mysecret\n       optional: false # (1)\n
                                                1. Default setting, means \"mysecret\" must already exist

                                                If the Pod contains multiple containers, each container needs its own volumeMounts block, but only one .spec.volumes setting is required for each Secret.

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html#used-as-the-identity-authentication-credential-for-the-container-registry-when-the-kubelet-pulls-the-container-image","title":"Used as the identity authentication credential for the container registry when the kubelet pulls the container image","text":"

                                                You can use the key as the identity authentication credential for the Container registry through the GUI or the terminal command line.

                                                "},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html#graphical-operation","title":"Graphical operation","text":"

                                                When creating a workload through an image, you can use the key as the data volume of the container by selecting the storage type as \"key\" on the \"data storage\" interface.

                                                1. Go to the Image Creation Workload page.

                                                2. In the second step of Container Configuration , select the Basic Information configuration, and click the Select Image button.

                                                3. Select the name of the private container registry in the drop-down list of `container registry' in the pop-up box. Please see Create Secret for details on private image secret creation.

                                                4. Enter the image name in the private registry, click OK to complete the image selection.

                                                Note

                                                When creating a key, you need to ensure that you enter the correct container registry address, username, password, and select the correct mirror name, otherwise you will not be able to obtain the mirror image in the container registry.

                                                "},{"location":"en/end-user/kpanda/custom-resources/create.html","title":"CustomResourceDefinition (CRD)","text":"

                                                In Kubernetes, all objects are abstracted as resources, such as Pod, Deployment, Service, Volume, etc. are the default resources provided by Kubernetes. This provides important support for our daily operation and maintenance and management work, but in some special cases, the existing preset resources cannot meet the needs of the business. Therefore, we hope to expand the capabilities of the Kubernetes API, and CustomResourceDefinition (CRD) was born based on this requirement.

                                                The container management module supports interface-based management of custom resources, and its main features are as follows:

                                                • Obtain the list and detailed information of custom resources under the cluster
                                                • Create custom resources based on YAML
                                                • Create a custom resource example CR (Custom Resource) based on YAML
                                                • Delete custom resources
                                                "},{"location":"en/end-user/kpanda/custom-resources/create.html#prerequisites","title":"Prerequisites","text":"
                                                • Integrated the Kubernetes cluster or created Kubernetes, and you can access the cluster UI interface.

                                                • Created a namespace, user, and authorized the user as Cluster Admin For details, refer to Namespace Authorization.

                                                "},{"location":"en/end-user/kpanda/custom-resources/create.html#create-crd-via-yaml","title":"Create CRD via YAML","text":"
                                                1. Click a cluster name to enter Cluster Details .

                                                2. In the left navigation bar, click Custom Resource , and click the YAML Create button in the upper right corner.

                                                3. On the Create with YAML page, fill in the YAML statement and click OK .

                                                4. Return to the custom resource list page, and you can view the custom resource named crontabs.stable.example.com just created.

                                                Custom resource example:

                                                CRD example
                                                apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  name: crontabs.stable.example.com\nspec:\n  group: stable.example.com\n  versions:\n    - name: v1\n      served: true\n      storage: true\n      schema:\n        openAPIV3Schema:\n          type: object\n          properties:\n            spec:\n              type: object\n              properties:\n                cronSpec:\n                  type: string\n                image:\n                  type: string\n                replicas:\n                  type: integer\n  scope: Namespaced\n  names:\n    plural: crontabs\n    singular: crontab\n    kind: CronTab\n    shortNames:\n    - ct\n
                                                "},{"location":"en/end-user/kpanda/custom-resources/create.html#create-a-custom-resource-example-via-yaml","title":"Create a custom resource example via YAML","text":"
                                                1. Click a cluster name to enter Cluster Details .

                                                2. In the left navigation bar, click Custom Resource , and click the YAML Create button in the upper right corner.

                                                3. Click the custom resource named crontabs.stable.example.com , enter the details, and click the YAML Create button in the upper right corner.

                                                4. On the Create with YAML page, fill in the YAML statement and click OK .

                                                5. Return to the details page of crontabs.stable.example.com , and you can view the custom resource named my-new-cron-object just created.

                                                CR Example:

                                                CR example
                                                apiVersion: \"stable.example.com/v1\"\nkind: CronTab\nmetadata:\n  name: my-new-cron-object\nspec:\n  cronSpec: \"* * * * */5\"\n  image: my-awesome-cron-image\n
                                                "},{"location":"en/end-user/kpanda/gpu/index.html","title":"Overview of GPU Management","text":"

                                                This article introduces the capability of Suanova container management platform in unified operations and management of heterogeneous resources, with a focus on GPUs.

                                                "},{"location":"en/end-user/kpanda/gpu/index.html#background","title":"Background","text":"

                                                With the rapid development of emerging technologies such as AI applications, large-scale models, artificial intelligence, and autonomous driving, enterprises are facing an increasing demand for compute-intensive tasks and data processing. Traditional compute architectures represented by CPUs can no longer meet the growing computational requirements of enterprises. At this point, heterogeneous computing represented by GPUs has been widely applied due to its unique advantages in processing large-scale data, performing complex calculations, and real-time graphics rendering.

                                                Meanwhile, due to the lack of experience and professional solutions in scheduling and managing heterogeneous resources, the utilization efficiency of GPU devices is extremely low, resulting in high AI production costs for enterprises. The challenge of reducing costs, increasing efficiency, and improving the utilization of GPUs and other heterogeneous resources has become a pressing issue for many enterprises.

                                                "},{"location":"en/end-user/kpanda/gpu/index.html#introduction-to-gpu-capabilities","title":"Introduction to GPU Capabilities","text":"

                                                The Suanova container management platform supports unified scheduling and operations management of GPUs, NPUs, and other heterogeneous resources, fully unleashing the computational power of GPU resources, and accelerating the development of enterprise AI and other emerging applications. The GPU management capabilities of Suanova are as follows:

                                                • Support for unified management of heterogeneous computing resources from domestic and foreign manufacturers such as NVIDIA, Huawei Ascend, and Iluvatar.
                                                • Support for multi-card heterogeneous scheduling within the same cluster, with automatic recognition of GPUs in the cluster.
                                                • Support for native management solutions for NVIDIA GPUs, vGPUs, and MIG, with cloud native capabilities.
                                                • Support for partitioning a single physical card for use by different tenants, and allocate GPU resources to tenants and containers based on computing power and memory quotas.
                                                • Support for multi-dimensional GPU resource monitoring at the cluster, node, and application levels, assisting operators in managing GPU resources.
                                                • Compatibility with various training frameworks such as TensorFlow and PyTorch.
                                                "},{"location":"en/end-user/kpanda/gpu/index.html#introduction-to-gpu-operator","title":"Introduction to GPU Operator","text":"

                                                Similar to regular computer hardware, NVIDIA GPUs, as physical devices, need to have the NVIDIA GPU driver installed in order to be used. To reduce the cost of using GPUs on Kubernetes, NVIDIA provides the NVIDIA GPU Operator component to manage various components required for using NVIDIA GPUs. These components include the NVIDIA driver (for enabling CUDA), NVIDIA container runtime, GPU node labeling, DCGM-based monitoring, and more. In theory, users only need to plug the GPU card into a compute device managed by Kubernetes, and they can use all the capabilities of NVIDIA GPUs through the GPU Operator. For more information about NVIDIA GPU Operator, refer to the NVIDIA official documentation. For deployment instructions, refer to Offline Installation of GPU Operator.

                                                Architecture diagram of NVIDIA GPU Operator:

                                                "},{"location":"en/end-user/kpanda/gpu/FAQ.html","title":"GPU FAQs","text":""},{"location":"en/end-user/kpanda/gpu/FAQ.html#gpu-processes-are-not-visible-while-running-nvidia-smi-inside-a-pod","title":"GPU processes are not visible while running nvidia-smi inside a pod","text":"

                                                Q: When running the nvidia-smi command inside a GPU-utilizing pod, no GPU process information is visible in the full-card mode and vGPU mode.

                                                A: Due to PID namespace isolation, GPU processes are not visible inside the Pod. To view GPU processes, you can use one of the following methods:

                                                • Configure the workload using the GPU with hostPID: true to enable viewing PIDs on the host.
                                                • Run the nvidia-smi command in the driver pod of the gpu-operator to view processes.
                                                • Run the chroot /run/nvidia/driver nvidia-smi command on the host to view processes.
                                                "},{"location":"en/end-user/kpanda/gpu/Iluvatar_usage.html","title":"How to Use Iluvatar GPU in Applications","text":"

                                                This section describes how to use Iluvatar virtual GPU on AI platform.

                                                "},{"location":"en/end-user/kpanda/gpu/Iluvatar_usage.html#prerequisites","title":"Prerequisites","text":"
                                                • Deployed AI platform container management platform and it is running smoothly.
                                                • The container management module has been integrated with a Kubernetes cluster or a Kubernetes cluster has been created, and the UI interface of the cluster can be accessed.
                                                • The Iluvatar GPU driver has been installed on the current cluster. Refer to the Iluvatar official documentation for driver installation instructions, or contact the Suanova ecosystem team for enterprise-level support at peg-pem@daocloud.io.
                                                • The GPUs in the current cluster have not undergone any virtualization operations and not been occupied by other applications.
                                                "},{"location":"en/end-user/kpanda/gpu/Iluvatar_usage.html#procedure","title":"Procedure","text":""},{"location":"en/end-user/kpanda/gpu/Iluvatar_usage.html#configuration-via-user-interface","title":"Configuration via User Interface","text":"
                                                1. Check if the GPU card in the cluster has been detected. Click Clusters -> Cluster Settings -> Addon Plugins , and check if the proper GPU type has been automatically enabled and detected. Currently, the cluster will automatically enable GPU and set the GPU type as Iluvatar .

                                                2. Deploy a workload. Click Clusters -> Workloads and deploy a workload using the image. After selecting the type as (Iluvatar) , configure the GPU resources used by the application:

                                                  • Physical Card Count (iluvatar.ai/vcuda-core): Indicates the number of physical cards that the current pod needs to mount. The input value must be an integer and less than or equal to the number of cards on the host machine.

                                                  • Memory Usage (iluvatar.ai/vcuda-memory): Indicates the amount of GPU memory occupied by each card. The value is in MB, with a minimum value of 1 and a maximum value equal to the entire memory of the card.

                                                  If there are any issues with the configuration values, scheduling failures or resource allocation failures may occur.

                                                "},{"location":"en/end-user/kpanda/gpu/Iluvatar_usage.html#configuration-via-yaml","title":"Configuration via YAML","text":"

                                                To request GPU resources for a workload, add the iluvatar.ai/vcuda-core: 1 and iluvatar.ai/vcuda-memory: 200 to the requests and limits. These parameters configure the application to use the physical card resources.

                                                apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-iluvatar-gpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-iluvatar-gpu-demo\n  template:\n    metadata:\n      labels:\n        app: full-iluvatar-gpu-demo\n    spec:\n      containers:\n      - image: nginx:perl\n        name: container-0\n        resources:\n          limits:\n            cpu: 250m\n            iluvatar.ai/vcuda-core: '1'\n            iluvatar.ai/vcuda-memory: '200'\n            memory: 512Mi\n          requests:\n            cpu: 250m\n            memory: 512Mi\n      imagePullSecrets:\n      - name: default-secret\n
                                                "},{"location":"en/end-user/kpanda/gpu/dynamic-regulation.html","title":"GPU Scheduling Configuration (Binpack and Spread)","text":"

                                                This page introduces how to reduce GPU resource fragmentation and prevent single points of failure through Binpack and Spread when using NVIDIA vGPU, achieving advanced scheduling for vGPU. The AI platform platform provides Binpack and Spread scheduling policies across two dimensions: clusters and workloads, meeting different usage requirements in various scenarios.

                                                "},{"location":"en/end-user/kpanda/gpu/dynamic-regulation.html#prerequisites","title":"Prerequisites","text":"
                                                • GPU devices are correctly installed on the cluster nodes.
                                                • The gpu-operator component and Nvidia-vgpu component are correctly installed in the cluster.
                                                • The NVIDIA-vGPU type exists in the GPU mode in the node list in the cluster.
                                                "},{"location":"en/end-user/kpanda/gpu/dynamic-regulation.html#use-cases","title":"Use Cases","text":"
                                                • Scheduling policy based on GPU dimension

                                                  • Binpack: Prioritizes using the same GPU on a node, suitable for increasing GPU utilization and reducing resource fragmentation.
                                                  • Spread: Multiple Pods are distributed across different GPUs on nodes, suitable for high availability scenarios to avoid single card failures.
                                                • Scheduling policy based on node dimension

                                                  • Binpack: Multiple Pods prioritize using the same node, suitable for increasing GPU utilization and reducing resource fragmentation.
                                                  • Spread: Multiple Pods are distributed across different nodes, suitable for high availability scenarios to avoid single node failures.
                                                "},{"location":"en/end-user/kpanda/gpu/dynamic-regulation.html#use-binpack-and-spread-at-cluster-level","title":"Use Binpack and Spread at Cluster-Level","text":"

                                                Note

                                                By default, workloads will follow the cluster-level Binpack and Spread. If a workload sets its own Binpack and Spread scheduling policies that differ from the cluster, the workload will prioritize its own scheduling policy.

                                                1. On the Clusters page, select the cluster for which you want to adjust the Binpack and Spread scheduling policies. Click the \u2507 icon on the right and select GPU Scheduling Configuration from the dropdown list.

                                                2. Adjust the GPU scheduling configuration according to your business scenario, and click OK to save.

                                                "},{"location":"en/end-user/kpanda/gpu/dynamic-regulation.html#use-binpack-and-spread-at-workload-level","title":"Use Binpack and Spread at Workload-Level","text":"

                                                Note

                                                When the Binpack and Spread scheduling policies at the workload level conflict with the cluster-level configuration, the workload-level configuration takes precedence.

                                                Follow the steps below to create a deployment using an image and configure Binpack and Spread scheduling policies within the workload.

                                                1. Click Clusters in the left navigation bar, then click the name of the target cluster to enter the Cluster Details page.

                                                2. On the Cluster Details page, click Workloads -> Deployments in the left navigation bar, then click the Create by Image button in the upper right corner of the page.

                                                3. Sequentially fill in the Basic Information, Container Settings, and in the Container Configuration section, enable GPU configuration, selecting the GPU type as NVIDIA vGPU. Click Advanced Settings, enable the Binpack / Spread scheduling policy, and adjust the GPU scheduling configuration according to the business scenario. After configuration, click Next to proceed to Service Settings and Advanced Settings. Finally, click OK at the bottom right of the page to complete the creation.

                                                "},{"location":"en/end-user/kpanda/gpu/gpu-metrics.html","title":"GPU Metrics","text":"

                                                This page lists some commonly used GPU metrics.

                                                "},{"location":"en/end-user/kpanda/gpu/gpu-metrics.html#cluster-level","title":"Cluster Level","text":"Metric Name Description Number of GPUs Total number of GPUs in the cluster Average GPU Utilization Average compute utilization of all GPUs in the cluster Average GPU Memory Utilization Average memory utilization of all GPUs in the cluster GPU Power Power consumption of all GPUs in the cluster GPU Temperature Temperature of all GPUs in the cluster GPU Utilization Details 24-hour usage details of all GPUs in the cluster (includes max, avg, current) GPU Memory Usage Details 24-hour memory usage details of all GPUs in the cluster (includes min, max, avg, current) GPU Memory Bandwidth Utilization For example, an Nvidia V100 GPU has a maximum memory bandwidth of 900 GB/sec. If the current memory bandwidth is 450 GB/sec, the utilization is 50%"},{"location":"en/end-user/kpanda/gpu/gpu-metrics.html#node-level","title":"Node Level","text":"Metric Name Description GPU Mode Usage mode of GPUs on the node, including full-card mode, MIG mode, vGPU mode Number of Physical GPUs Total number of physical GPUs on the node Number of Virtual GPUs Number of vGPU devices created on the node Number of MIG Instances Number of MIG instances created on the node GPU Memory Allocation Rate Memory allocation rate of all GPUs on the node Average GPU Utilization Average compute utilization of all GPUs on the node Average GPU Memory Utilization Average memory utilization of all GPUs on the node GPU Driver Version Driver version information of GPUs on the node GPU Utilization Details 24-hour usage details of each GPU on the node (includes max, avg, current) GPU Memory Usage Details 24-hour memory usage details of each GPU on the node (includes min, max, avg, current)"},{"location":"en/end-user/kpanda/gpu/gpu-metrics.html#pod-level","title":"Pod Level","text":"Category Metric Name Description Application Overview GPU - Compute & Memory Pod GPU Utilization Compute utilization of the GPUs used by the current Pod Pod GPU Memory Utilization Memory utilization of the GPUs used by the current Pod Pod GPU Memory Usage Memory usage of the GPUs used by the current Pod Memory Allocation Memory allocation of the GPUs used by the current Pod Pod GPU Memory Copy Ratio Memory copy ratio of the GPUs used by the current Pod GPU - Engine Overview GPU Graphics Engine Activity Percentage Percentage of time the Graphics or Compute engine is active during a monitoring cycle GPU Memory Bandwidth Utilization Memory bandwidth utilization (Memory BW Utilization) indicates the fraction of cycles during which data is sent to or received from the device memory. This value represents the average over the interval, not an instantaneous value. A higher value indicates higher utilization of device memory.A value of 1 (100%) indicates that a DRAM instruction is executed every cycle during the interval (in practice, a peak of about 0.8 (80%) is the maximum achievable).A value of 0.2 (20%) indicates that 20% of the cycles during the interval are spent reading from or writing to device memory. Tensor Core Utilization Percentage of time the Tensor Core pipeline is active during a monitoring cycle FP16 Engine Utilization Percentage of time the FP16 pipeline is active during a monitoring cycle FP32 Engine Utilization Percentage of time the FP32 pipeline is active during a monitoring cycle FP64 Engine Utilization Percentage of time the FP64 pipeline is active during a monitoring cycle GPU Decode Utilization Decode engine utilization of the GPU GPU Encode Utilization Encode engine utilization of the GPU GPU - Temperature & Power GPU Temperature Temperature of all GPUs in the cluster GPU Power Power consumption of all GPUs in the cluster GPU Total Power Consumption Total power consumption of the GPUs GPU - Clock GPU Memory Clock Memory clock frequency GPU Application SM Clock Application SM clock frequency GPU Application Memory Clock Application memory clock frequency GPU Video Engine Clock Video engine clock frequency GPU Throttle Reasons Reasons for GPU throttling GPU - Other Details PCIe Transfer Rate Data transfer rate of the GPU through the PCIe bus PCIe Receive Rate Data receive rate of the GPU through the PCIe bus"},{"location":"en/end-user/kpanda/gpu/gpu_matrix.html","title":"GPU Support Matrix","text":"

                                                This page explains the matrix of supported GPUs and operating systems for AI platform.

                                                "},{"location":"en/end-user/kpanda/gpu/gpu_matrix.html#nvidia-gpu","title":"NVIDIA GPU","text":"GPU Manufacturer and Type Supported GPU Models Compatible Operating System (Online) Recommended Kernel Recommended Operating System and Kernel Installation Documentation NVIDIA GPU (Full Card/vGPU)
                                                • NVIDIA Fermi (2.1) Architecture:
                                                • NVIDIA GeForce 400 Series
                                                • NVIDIA Quadro 4000 Series
                                                • NVIDIA Tesla 20 Series
                                                • NVIDIA Ampere Architecture Series (A100; A800; H100)
                                                CentOS 7
                                                • Kernel 3.10.0-123 ~ 3.10.0-1160
                                                • Kernel Reference Document
                                                • Recommended Operating System with Proper Kernel Version
                                                Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 Offline Installation with GPU Operator CentOS 8 Kernel 4.18.0-80 ~ 4.18.0-348 Ubuntu 20.04 Kernel 5.4 Ubuntu 22.04 Kernel 5.19 RHEL 7 Kernel 3.10.0-123 ~ 3.10.0-1160 RHEL 8 Kernel 4.18.0-80 ~ 4.18.0-348 NVIDIA MIG
                                                • Ampere Architecture Series:
                                                • A100
                                                • A800
                                                • H100
                                                CentOS 7 Kernel 3.10.0-123 ~ 3.10.0-1160 Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 Offline Installation with GPU Operator CentOS 8 Kernel 4.18.0-80 ~ 4.18.0-348 Ubuntu 20.04 Kernel 5.4 Ubuntu 22.04 Kernel 5.19 RHEL 7 Kernel 3.10.0-123 ~ 3.10.0-1160 RHEL 8 Kernel 4.18.0-80 ~ 4.18.0-348"},{"location":"en/end-user/kpanda/gpu/gpu_matrix.html#ascend-npu","title":"Ascend NPU","text":"GPU Manufacturer and Type Supported NPU Models Compatible Operating System (Online) Recommended Kernel Recommended Operating System and Kernel Installation Documentation Ascend (Ascend 310)
                                                • Ascend 310;
                                                • Ascend 310P;
                                                Ubuntu 20.04 Details refer to: Kernel Version Requirements Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 300 and 310P Driver Documentation CentOS 7.6 CentOS 8.2 KylinV10SP1 Operating System openEuler Operating System Ascend (Ascend 910P) Ascend 910 Ubuntu 20.04 Details refer to: Kernel Version Requirements Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 910 Driver Documentation CentOS 7.6 CentOS 8.2 KylinV10SP1 Operating System openEuler Operating System"},{"location":"en/end-user/kpanda/gpu/gpu_matrix.html#iluvatar-gpu","title":"Iluvatar GPU","text":"GPU Manufacturer and Type Supported GPU Models Compatible Operating System (Online) Recommended Kernel Recommended Operating System and Kernel Installation Documentation Iluvatar (Iluvatar vGPU)
                                                • BI100;
                                                • MR100;
                                                CentOS 7
                                                • Kernel 3.10.0-957.el7.x86_64 ~ 3.10.0-1160.42.2.el7.x86_64
                                                Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 Coming Soon CentOS 8
                                                • Kernel 4.18.0-80.el8.x86_64 ~ 4.18.0-305.19.1.el8_4.x86_64
                                                Ubuntu 20.04
                                                • Kernel 4.15.0-20-generic ~ 4.15.0-160-generic
                                                • Kernel 5.4.0-26-generic ~ 5.4.0-89-generic
                                                • Kernel 5.8.0-23-generic ~ 5.8.0-63-generic
                                                Ubuntu 21.04
                                                • Kernel 4.15.0-20-generic ~ 4.15.0-160-generic
                                                • Kernel 5.4.0-26-generic ~ 5.4.0-89-generic
                                                • Kernel 5.8.0-23-generic ~ 5.8.0-63-generic
                                                openEuler 22.03 LTS
                                                • Kernel version >= 5.1 and <= 5.10
                                                "},{"location":"en/end-user/kpanda/gpu/gpu_scheduler_config.html","title":"GPU Scheduling Configuration","text":"

                                                This document mainly introduces the configuration of GPU scheduling, which can implement advanced scheduling policies. Currently, the primary implementation is the vgpu scheduling policy.

                                                "},{"location":"en/end-user/kpanda/gpu/gpu_scheduler_config.html#vgpu-resource-scheduling-configuration","title":"vGPU Resource Scheduling Configuration","text":"

                                                vGPU provides two policies for resource usage: binpack and spread. These correspond to node-level and GPU-level dimensions, respectively. The use case is whether you want to distribute workloads more sparsely across different nodes and GPUs or concentrate them on the same node and GPU, thereby making resource utilization more efficient and reducing resource fragmentation.

                                                You can modify the scheduling policy in your cluster by following these steps:

                                                1. Go to the cluster management list in the container management interface.
                                                2. Click the settings button ... next to the cluster.
                                                3. Click GPU Scheduling Configuration.
                                                4. Toggle the scheduling policy between node-level and GPU-level. By default, the node-level policy is binpack, and the GPU-level policy is spread.

                                                The above steps modify the cluster-level scheduling policy. Users can also specify their own scheduling policy at the workload level to change the scheduling results. Below is an example of modifying the scheduling policy at the workload level:

                                                apiVersion: v1\nkind: Pod\nmetadata:\n  name: gpu-pod\n  annotations:\n    hami.io/node-scheduler-policy: \"binpack\"\n    hami.io/gpu-scheduler-policy: \"binpack\"\nspec:\n  containers:\n    - name: ubuntu-container\n      image: ubuntu:18.04\n      command: [\"bash\", \"-c\", \"sleep 86400\"]\n      resources:\n        limits:\n          nvidia.com/gpu: 1\n          nvidia.com/gpumem: 3000\n          nvidia.com/gpucores: 30\n

                                                In this example, both the node- and GPU-level scheduling policies are set to binpack. This ensures that the workload is scheduled to maximize resource utilization and reduce fragmentation.

                                                "},{"location":"en/end-user/kpanda/gpu/vgpu_quota.html","title":"GPU Quota Management","text":"

                                                This section describes how to use vGPU capabilities on the AI platform platform.

                                                "},{"location":"en/end-user/kpanda/gpu/vgpu_quota.html#prerequisites","title":"Prerequisites","text":"

                                                The proper GPU driver (NVIDIA GPU, NVIDIA MIG, Iluvatar, Ascend) has been deployed on the current cluster either through an Operator or manually.

                                                "},{"location":"en/end-user/kpanda/gpu/vgpu_quota.html#procedure","title":"Procedure","text":"

                                                Follow these steps to manage GPU quotas in AI platform:

                                                1. Go to Namespaces and click Quota Management to configure the GPU resources that can be used by a specific namespace.

                                                2. The currently supported card types for quota management in a namespace are: NVIDIA vGPU, NVIDIA MIG, Iluvatar, and Ascend.

                                                3. NVIDIA vGPU Quota Management: Configure the specific quota that can be used. This will create a ResourcesQuota CR.

                                                  - Physical Card Count (nvidia.com/vgpu): Indicates the number of physical cards that the current pod needs to mount. The input value must be an integer and **less than or equal to** the number of cards on the host machine.\n- GPU Core Count (nvidia.com/gpucores): Indicates the GPU compute power occupied by each card. The value ranges from 0 to 100. If configured as 0, it is considered not to enforce isolation. If configured as 100, it is considered to exclusively occupy the entire card.\n- GPU Memory Usage (nvidia.com/gpumem): Indicates the amount of GPU memory occupied by each card. The value is in MB, with a minimum value of 1 and a maximum value equal to the entire memory of the card.\n
                                                "},{"location":"en/end-user/kpanda/gpu/ascend/Ascend_usage.html","title":"Use Ascend NPU","text":"

                                                This section explains how to use Ascend NPU on the AI platform platform.

                                                "},{"location":"en/end-user/kpanda/gpu/ascend/Ascend_usage.html#prerequisites","title":"Prerequisites","text":"
                                                • The current NPU node has the Ascend driver installed.
                                                • The current NPU node has the Ascend-Docker-Runtime component installed.
                                                • The NPU MindX DL suite is installed on the current cluster.
                                                • No virtualization is performed on the NPU card in the current cluster, and it is not occupied by other applications.

                                                Refer to the Ascend NPU Component Installation Document to install the basic environment.

                                                "},{"location":"en/end-user/kpanda/gpu/ascend/Ascend_usage.html#quick-start","title":"Quick Start","text":"

                                                This document uses the AscentCL Image Classification Application example from the Ascend sample library.

                                                1. Download the Ascend repository

                                                  Run the following command to download the Ascend demo repository, and remember the storage location of the code for subsequent use.

                                                  git clone https://gitee.com/ascend/samples.git\n
                                                2. Prepare the base image

                                                  This example uses the Ascent-pytorch base image, which can be obtained from the Ascend Container Registry.

                                                3. Prepare the YAML file

                                                  ascend-demo.yaml
                                                  apiVersion: batch/v1\nkind: Job\nmetadata:\n  name: resnetinfer1-1-1usoc\nspec:\n  template:\n    spec:\n      containers:\n        - image: ascendhub.huawei.com/public-ascendhub/ascend-pytorch:23.0.RC2-ubuntu18.04 # Inference image name\n          imagePullPolicy: IfNotPresent\n          name: resnet50infer\n          securityContext:\n            runAsUser: 0\n          command:\n            - \"/bin/bash\"\n            - \"-c\"\n            - |\n              source /usr/local/Ascend/ascend-toolkit/set_env.sh &&\n              TEMP_DIR=/root/samples_copy_$(date '+%Y%m%d_%H%M%S_%N') &&\n              cp -r /root/samples \"$TEMP_DIR\" &&\n              cd \"$TEMP_DIR\"/inference/modelInference/sampleResnetQuickStart/python/model &&\n              wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/resnet50/resnet50.onnx &&\n              atc --model=resnet50.onnx --framework=5 --output=resnet50 --input_shape=\"actual_input_1:1,3,224,224\"  --soc_version=Ascend910 &&\n              cd ../data &&\n              wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg &&\n              cd ../scripts &&\n              bash sample_run.sh\n          resources:\n            requests:\n              huawei.com/Ascend910: 1 # Number of the Ascend 910 Processors\n            limits:\n              huawei.com/Ascend910: 1 # The value should be the same as that of requests\n          volumeMounts:\n            - name: hiai-driver\n              mountPath: /usr/local/Ascend/driver\n              readOnly: true\n            - name: slog\n              mountPath: /var/log/npu/conf/slog/slog.conf\n            - name: localtime # The container time must be the same as the host time\n              mountPath: /etc/localtime\n            - name: dmp\n              mountPath: /var/dmp_daemon\n            - name: slogd\n              mountPath: /var/slogd\n            - name: hbasic\n              mountPath: /etc/hdcBasic.cfg\n            - name: sys-version\n              mountPath: /etc/sys_version.conf\n            - name: aicpu\n              mountPath: /usr/lib64/aicpu_kernels\n            - name: tfso\n              mountPath: /usr/lib64/libtensorflow.so\n            - name: sample-path\n              mountPath: /root/samples\n      volumes:\n        - name: hiai-driver\n          hostPath:\n            path: /usr/local/Ascend/driver\n        - name: slog\n          hostPath:\n            path: /var/log/npu/conf/slog/slog.conf\n        - name: localtime\n          hostPath:\n            path: /etc/localtime\n        - name: dmp\n          hostPath:\n            path: /var/dmp_daemon\n        - name: slogd\n          hostPath:\n            path: /var/slogd\n        - name: hbasic\n          hostPath:\n            path: /etc/hdcBasic.cfg\n        - name: sys-version\n          hostPath:\n            path: /etc/sys_version.conf\n        - name: aicpu\n          hostPath:\n            path: /usr/lib64/aicpu_kernels\n        - name: tfso\n          hostPath:\n            path: /usr/lib64/libtensorflow.so\n        - name: sample-path\n          hostPath:\n            path: /root/samples\n      restartPolicy: OnFailure\n

                                                  Some fields in the above YAML need to be modified according to the actual situation:

                                                  1. atc ... --soc_version=Ascend910 uses Ascend910, adjust this field depending on your actual situation. You can use the npu-smi info command to check the GPU model and add the Ascend prefix.
                                                  2. samples-path should be adjusted according to the actual situation.
                                                  3. resources should be adjusted according to the actual situation.
                                                4. Deploy a Job and check its results

                                                  Use the following command to create a Job:

                                                  kubectl apply -f ascend-demo.yaml\n

                                                  Check the Pod running status:

                                                  After the Pod runs successfully, check the log results. The key prompt information on the screen is shown in the figure below. The Label indicates the category identifier, Conf indicates the maximum confidence of the classification, and Class indicates the belonging category. These values may vary depending on the version and environment, so please refer to the actual situation:

                                                  Result image display:

                                                "},{"location":"en/end-user/kpanda/gpu/ascend/Ascend_usage.html#ui-usage","title":"UI Usage","text":"
                                                1. Confirm whether the cluster has detected the GPU card. Click Clusters -> Cluster Settings -> Addon Plugins , and check whether the proper GPU type is automatically enabled and detected. Currently, the cluster will automatically enable GPU and set the GPU type to Ascend .

                                                2. Deploy the workload. Click Clusters -> Workloads , deploy the workload through an image, select the type (Ascend), and then configure the number of physical cards used by the application:

                                                  Number of Physical Cards (huawei.com/Ascend910) : This indicates how many physical cards the current Pod needs to mount. The input value must be an integer and less than or equal to the number of cards on the host.

                                                  If there is an issue with the above configuration, it will result in scheduling failure and resource allocation issues.

                                                "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html","title":"Installation of Ascend NPU Components","text":"

                                                This chapter provides installation guidance for Ascend NPU drivers, Device Plugin, NPU-Exporter, and other components.

                                                "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html#prerequisites","title":"Prerequisites","text":"
                                                1. Before installation, confirm the supported NPU models. For details, refer to the Ascend NPU Matrix.
                                                2. Ensure that the kernel version required for the proper NPU model is compatible. For more details, refer to the Ascend NPU Matrix.
                                                3. Prepare the basic Kubernetes environment.
                                                "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html#installation-steps","title":"Installation Steps","text":"

                                                Before using NPU resources, you need to complete the firmware installation, NPU driver installation, Docker Runtime installation, user creation, log directory creation, and NPU Device Plugin installation. Refer to the following steps for details.

                                                "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html#install-firmware","title":"Install Firmware","text":"
                                                1. Confirm that the kernel version is within the range proper to the \"binary installation\" method, and then you can directly install the NPU driver firmware.
                                                2. For firmware and driver downloads, refer to: Firmware Download Link
                                                3. For firmware installation, refer to: Install NPU Driver Firmware
                                                "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html#install-npu-driver","title":"Install NPU Driver","text":"
                                                1. If the driver is not installed, refer to the official Ascend documentation for installation. For example, for Ascend910, refer to: 910 Driver Installation Document.
                                                2. Run the command npu-smi info, and if the NPU information is returned normally, it indicates that the NPU driver and firmware are ready.
                                                "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html#install-docker-runtime","title":"Install Docker Runtime","text":"
                                                1. Download Ascend Docker Runtime

                                                  Community edition download link: https://www.hiascend.com/zh/software/mindx-dl/community

                                                  wget -c https://mindx.obs.cn-south-1.myhuaweicloud.com/OpenSource/MindX/MindX%205.0.RC2/MindX%20DL%205.0.RC2/Ascend-docker-runtime_5.0.RC2_linux-x86_64.run\n

                                                  Install to the specified path by executing the following two commands in order, with parameters specifying the installation path:

                                                  chmod u+x Ascend-docker-runtime_5.0.RC2_linux-x86_64.run \n./Ascend-docker-runtime_{version}_linux-{arch}.run --install --install-path=<path>\n
                                                2. Modify the containerd configuration file

                                                  If containerd has no default configuration file, execute the following three commands in order to create the configuration file:

                                                  mkdir /etc/containerd \ncontainerd config default > /etc/containerd/config.toml \nvim /etc/containerd/config.toml\n

                                                  If containerd has a configuration file:

                                                  vim /etc/containerd/config.toml\n

                                                  Modify the runtime installation path according to the actual situation, mainly modifying the runtime field:

                                                  ... \n[plugins.\"io.containerd.monitor.v1.cgroups\"]\n   no_prometheus = false  \n[plugins.\"io.containerd.runtime.v1.linux\"]\n   shim = \"containerd-shim\"\n   runtime = \"/usr/local/Ascend/Ascend-Docker-Runtime/ascend-docker-runtime\"\n   runtime_root = \"\"\n   no_shim = false\n   shim_debug = false\n [plugins.\"io.containerd.runtime.v2.task\"]\n   platforms = [\"linux/amd64\"]\n...\n

                                                  Execute the following command to restart containerd:

                                                  systemctl restart containerd\n
                                                "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html#create-a-user","title":"Create a User","text":"

                                                Execute the following commands on the node where the components are installed to create a user.

                                                # Ubuntu operating system\nuseradd -d /home/hwMindX -u 9000 -m -s /usr/sbin/nologin hwMindX\nusermod -a -G HwHiAiUser hwMindX\n# CentOS operating system\nuseradd -d /home/hwMindX -u 9000 -m -s /sbin/nologin hwMindX\nusermod -a -G HwHiAiUser hwMindX\n
                                                "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html#create-log-directory","title":"Create Log Directory","text":"

                                                Create the parent directory for component logs and the log directories for each component on the proper node, and set the appropriate owner and permissions for the directories. Execute the following command to create the parent directory for component logs.

                                                mkdir -m 755 /var/log/mindx-dl\nchown root:root /var/log/mindx-dl\n

                                                Execute the following command to create the Device Plugin component log directory.

                                                mkdir -m 750 /var/log/mindx-dl/devicePlugin\nchown root:root /var/log/mindx-dl/devicePlugin\n

                                                Note

                                                Please create the proper log directory for each required component. In this example, only the Device Plugin component is needed. For other component requirements, refer to the official documentation

                                                "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html#create-node-labels","title":"Create Node Labels","text":"

                                                Refer to the following commands to create labels on the proper nodes:

                                                # Create this label on computing nodes where the driver is installed\nkubectl label node {nodename} huawei.com.ascend/Driver=installed\nkubectl label node {nodename} node-role.kubernetes.io/worker=worker\nkubectl label node {nodename} workerselector=dls-worker-node\nkubectl label node {nodename} host-arch=huawei-arm // or host-arch=huawei-x86, select according to the actual situation\nkubectl label node {nodename} accelerator=huawei-Ascend910 // select according to the actual situation\n# Create this label on control nodes\nkubectl label node {nodename} masterselector=dls-master-node\n
                                                "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html#install-device-plugin-and-npuexporter","title":"Install Device Plugin and NpuExporter","text":"

                                                Functional module path: Container Management -> Cluster, click the name of the target cluster, then click Helm Apps -> Helm Charts from the left navigation bar, and search for ascend-mindxdl.

                                                • DevicePlugin: Provides a general device plugin mechanism and standard device API interface for Kubernetes to use devices. It is recommended to use the default image and version.
                                                • NpuExporter: Based on the Prometheus/Telegraf ecosystem, this component provides interfaces to help users monitor the Ascend series AI processors and container-level allocation status. It is recommended to use the default image and version.
                                                • ServiceMonitor: Disabled by default. If enabled, you can view NPU-related monitoring in the observability module. To enable, ensure that the insight-agent is installed and running, otherwise, the ascend-mindxdl installation will fail.
                                                • isVirtualMachine: Disabled by default. If the NPU node is a virtual machine scenario, enable the isVirtualMachine parameter.

                                                After a successful installation, two components will appear under the proper namespace, as shown below:

                                                At the same time, the proper NPU information will also appear on the node information:

                                                Once everything is ready, you can select the proper NPU device when creating a workload through the page, as shown below:

                                                Note

                                                For detailed information of how to use, refer to Using Ascend (Ascend) NPU.

                                                "},{"location":"en/end-user/kpanda/gpu/ascend/vnpu.html","title":"Enable Ascend Virtualization","text":"

                                                Ascend virtualization is divided into dynamic virtualization and static virtualization. This document describes how to enable and use Ascend static virtualization capabilities.

                                                "},{"location":"en/end-user/kpanda/gpu/ascend/vnpu.html#prerequisites","title":"Prerequisites","text":"
                                                • Setup of Kubernetes cluster environment.
                                                • The current NPU node has the Ascend driver installed.
                                                • The current NPU node has the Ascend-Docker-Runtime component installed.
                                                • The NPU MindX DL suite is installed on the current cluster.
                                                • Supported NPU models:

                                                  • Ascend 310P, verified
                                                  • Ascend 910b (20 cores), verified
                                                  • Ascend 910 (32 cores), officially supported but not verified
                                                  • Ascend 910 (30 cores), officially supported but not verified

                                                  For more details, refer to the official virtualization hardware documentation.

                                                Refer to the Ascend NPU Component Installation Documentation for the basic environment setup.

                                                "},{"location":"en/end-user/kpanda/gpu/ascend/vnpu.html#enable-virtualization-capabilities","title":"Enable Virtualization Capabilities","text":"

                                                To enable virtualization capabilities, you need to manually modify the startup parameters of the ascend-device-plugin-daemonset component. Refer to the following command:

                                                - device-plugin -useAscendDocker=true -volcanoType=false -presetVirtualDevice=true\n- logFile=/var/log/mindx-dl/devicePlugin/devicePlugin.log -logLevel=0\n
                                                "},{"location":"en/end-user/kpanda/gpu/ascend/vnpu.html#split-vnpu-instances","title":"Split VNPU Instances","text":"

                                                Static virtualization requires manually splitting VNPU instances. Refer to the following command:

                                                npu-smi set -t create-vnpu -i 13 -c 0 -f vir02\n
                                                • i refers to the card id.
                                                • c refers to the chip id.
                                                • vir02 refers to the split specification template.

                                                Card id and chip id can be queried using npu-smi info. The split specifications can be found in the Ascend official templates.

                                                After splitting the instance, you can query the split results using the following command:

                                                npu-smi info -t info-vnpu -i 13 -c 0\n

                                                The query result is as follows:

                                                "},{"location":"en/end-user/kpanda/gpu/ascend/vnpu.html#restart-ascend-device-plugin-daemonset","title":"Restart ascend-device-plugin-daemonset","text":"

                                                After splitting the instance, manually restart the device-plugin pod, then use the kubectl describe command to check the resources of the registered node:

                                                kubectl describe node {{nodename}}\n

                                                "},{"location":"en/end-user/kpanda/gpu/ascend/vnpu.html#how-to-use-the-device","title":"How to Use the Device","text":"

                                                When creating an application, specify the resource key as shown in the following YAML:

                                                ......\nresources:\n  requests:\n    huawei.com/Ascend310P-2c: 1\n  limits:\n    huawei.com/Ascend310P-2c: 1\n......\n
                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/index.html","title":"NVIDIA GPU Card Usage Modes","text":"

                                                NVIDIA, as a well-known graphics computing provider, offers various software and hardware solutions to enhance computational power. Among them, NVIDIA provides the following three solutions for GPU usage:

                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/index.html#full-gpu","title":"Full GPU","text":"

                                                Full GPU refers to allocating the entire NVIDIA GPU to a single user or application. In this configuration, the application can fully occupy all the resources of the GPU and achieve maximum computational performance. Full GPU is suitable for workloads that require a large amount of computational resources and memory, such as deep learning training, scientific computing, etc.

                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/index.html#vgpu-virtual-gpu","title":"vGPU (Virtual GPU)","text":"

                                                vGPU is a virtualization technology that allows one physical GPU to be partitioned into multiple virtual GPUs, with each virtual GPU assigned to different virtual machines or users. vGPU enables multiple users to share the same physical GPU and independently use GPU resources in their respective virtual environments. Each virtual GPU can access a certain amount of compute power and memory capacity. vGPU is suitable for virtualized environments and cloud computing scenarios, providing higher resource utilization and flexibility.

                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/index.html#mig-multi-instance-gpu","title":"MIG (Multi-Instance GPU)","text":"

                                                MIG is a feature introduced by the NVIDIA Ampere architecture that allows one physical GPU to be divided into multiple physical GPU instances, each of which can be independently allocated to different users or workloads. Each MIG instance has its own compute resources, memory, and PCIe bandwidth, just like an independent virtual GPU. MIG provides finer-grained GPU resource allocation and management and allows dynamic adjustment of the number and size of instances based on demand. MIG is suitable for multi-tenant environments, containerized applications, batch jobs, and other scenarios.

                                                Whether using vGPU in a virtualized environment or MIG on a physical GPU, NVIDIA provides users with more choices and optimized ways to utilize GPU resources. The Suanova container management platform fully supports the above NVIDIA capabilities. Users can easily access the full computational power of NVIDIA GPUs through simple UI operations, thereby improving resource utilization and reducing costs.

                                                • Single Mode: The node only exposes a single type of MIG device on all its GPUs. All GPUs on the node must:
                                                  • Be of the same model (e.g., A100-SXM-40GB), with matching MIG profiles only for GPUs of the same model.
                                                  • Have MIG configuration enabled, which requires a machine reboot to take effect.
                                                  • Create identical GI and CI for exposing \"identical\" MIG devices across all products.
                                                • Mixed Mode: The node exposes mixed MIG device types on all its GPUs. Requesting a specific MIG device type requires the number of compute slices and total memory provided by the device type.
                                                  • All GPUs on the node must: Be in the same product line (e.g., A100-SXM-40GB).
                                                  • Each GPU can enable or disable MIG individually and freely configure any available mixture of MIG device types.
                                                  • The k8s-device-plugin running on the node will:
                                                    • Expose any GPUs not in MIG mode using the traditional nvidia.com/gpu resource type.
                                                    • Expose individual MIG devices using resource types that follow the pattern nvidia.com/mig-<slice_count>g.<memory_size>gb .

                                                For detailed instructions on enabling these configurations, refer to Offline Installation of GPU Operator.

                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/index.html#how-to-use","title":"How to Use","text":"

                                                You can refer to the following links to quickly start using Suanova's management capabilities for NVIDIA GPUs.

                                                • Using Full NVIDIA GPU
                                                • Using NVIDIA vGPU
                                                • Using NVIDIA MIG
                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/full_gpu_userguide.html","title":"Using the Whole NVIDIA GPU Card for an Application","text":"

                                                This section describes how to allocate the entire NVIDIA GPU card to a single application on the AI platform platform.

                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/full_gpu_userguide.html#prerequisites","title":"Prerequisites","text":"
                                                • AI platform container management platform has been deployed and is running properly.
                                                • The container management module has been connected to a Kubernetes cluster or a Kubernetes cluster has been created, and you can access the UI interface of the cluster.
                                                • GPU Operator has been offline installed and NVIDIA DevicePlugin has been enabled on the current cluster. Refer to Offline Installation of GPU Operator for instructions.
                                                • The GPU card in the current cluster has not undergone any virtualization operations or been occupied by other applications.
                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/full_gpu_userguide.html#procedure","title":"Procedure","text":""},{"location":"en/end-user/kpanda/gpu/nvidia/full_gpu_userguide.html#configuring-via-the-user-interface","title":"Configuring via the User Interface","text":"
                                                1. Check if the cluster has detected the GPUs. Click Clusters -> Cluster Settings -> Addon Plugins to see if it has automatically enabled and detected the proper GPU types. Currently, the cluster will automatically enable GPU and set the GPU Type as Nvidia GPU .

                                                2. Deploy a workload. Click Clusters -> Workloads , and deploy the workload using the image method. After selecting the type ( Nvidia GPU ), configure the number of physical cards used by the application:

                                                  Physical Card Count (nvidia.com/gpu): Indicates the number of physical cards that the current pod needs to mount. The input value must be an integer and less than or equal to the number of cards on the host machine.

                                                  If the above value is configured incorrectly, scheduling failures and resource allocation issues may occur.

                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/full_gpu_userguide.html#configuring-via-yaml","title":"Configuring via YAML","text":"

                                                To request GPU resources for a workload, add the nvidia.com/gpu: 1 parameter to the resource request and limit configuration in the YAML file. This parameter configures the number of physical cards used by the application.

                                                apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-gpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-gpu-demo\n  template:\n    metadata:\n      labels:\n        app: full-gpu-demo\n    spec:\n      containers:\n      - image: chrstnhntschl/gpu_burn\n        name: container-0\n        resources:\n          requests:\n            cpu: 250m\n            memory: 512Mi\n            nvidia.com/gpu: 1   # Number of GPUs requested\n          limits:\n            cpu: 250m\n            memory: 512Mi\n            nvidia.com/gpu: 1   # Upper limit of GPU usage\n      imagePullSecrets:\n      - name: default-secret\n

                                                Note

                                                When using the nvidia.com/gpu parameter to specify the number of GPUs, the values for requests and limits must be consistent.

                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html","title":"Offline Install gpu-operator","text":"

                                                AI platform comes with pre-installed driver images for the following three operating systems: Ubuntu 22.04, Ubuntu 20.04, and CentOS 7.9. The driver version is 535.104.12. Additionally, it includes the required Toolkit images for each operating system, so users no longer need to manually provide offline toolkit images.

                                                This page demonstrates using AMD architecture with CentOS 7.9 (3.10.0-1160). If you need to deploy on Red Hat 8.4, refer to Uploading Red Hat gpu-operator Offline Image to the Bootstrap Node Repository and Building Offline Yum Source for Red Hat 8.4.

                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#prerequisites","title":"Prerequisites","text":"
                                                • The kernel version of the cluster nodes where the gpu-operator is to be deployed must be completely consistent. The distribution and GPU card model of the nodes must fall within the scope specified in the GPU Support Matrix.
                                                • When installing the gpu-operator, select v23.9.0+2 or above.
                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#steps","title":"Steps","text":"

                                                To install the gpu-operator plugin for your cluster, follow these steps:

                                                1. Log in to the platform and go to Container Management -> Clusters , check cluster eetails.

                                                2. On the Helm Charts page, select All Repositories and search for gpu-operator .

                                                3. Select gpu-operator and click Install .

                                                4. Configure the installation parameters for gpu-operator based on the instructions below to complete the installation.

                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#configure-parameters","title":"Configure parameters","text":"
                                                • systemOS : Select the operating system for the host. The current options are Ubuntu 22.04, Ubuntu 20.04, Centos 7.9, and other. Please choose the correct operating system.
                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#basic-information","title":"Basic information","text":"
                                                • Name : Enter the plugin name
                                                • Namespace : Select the namespace for installing the plugin
                                                • Version: The version of the plugin. Here, we use version v23.9.0+2 as an example.
                                                • Failure Deletion: If the installation fails, it will delete the already installed associated resources. When enabled, Ready Wait will also be enabled by default.
                                                • Ready Wait: When enabled, the application will be marked as successfully installed only when all associated resources are in a ready state.
                                                • Detailed Logs: When enabled, detailed logs of the installation process will be recorded.
                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#advanced-settings","title":"Advanced settings","text":""},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#operator-parameters","title":"Operator parameters","text":"
                                                • InitContainer.image : Configure the CUDA image, recommended default image: nvidia/cuda
                                                • InitContainer.repository : Repository where the CUDA image is located, defaults to nvcr.m.daocloud.io repository
                                                • InitContainer.version : Version of the CUDA image, please use the default parameter
                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#driver-parameters","title":"Driver parameters","text":"
                                                • Driver.enable : Configure whether to deploy the NVIDIA driver on the node, default is enabled. If you have already deployed the NVIDIA driver on the node before using the gpu-operator, please disable this.
                                                • Driver.image : Configure the GPU driver image, recommended default image: nvidia/driver .
                                                • Driver.repository : Repository where the GPU driver image is located, default is nvidia's nvcr.io repository.
                                                • Driver.usePrecompiled : Enable the precompiled mode to install the driver.
                                                • Driver.version : Version of the GPU driver image, use default parameters for offline deployment. Configuration is only required for online installation. Different versions of the Driver image exist for different types of operating systems. For more details, refer to Nvidia GPU Driver Versions. Examples of Driver Version for different operating systems are as follows:

                                                  Note

                                                  When using the built-in operating system version, there is no need to modify the image version. For other operating system versions, please refer to Uploading Images to the Bootstrap Node Repository. note that there is no need to include the operating system name such as Ubuntu, CentOS, or Red Hat in the version number. If the official image contains an operating system suffix, please manually remove it.

                                                  • For Red Hat systems, for example, 525.105.17
                                                  • For Ubuntu systems, for example, 535-5.15.0-1043-nvidia
                                                  • For CentOS systems, for example, 525.147.05
                                                • Driver.RepoConfig.ConfigMapName : Used to record the name of the offline yum repository configuration file for the gpu-operator. When using the pre-packaged offline bundle, refer to the following documents for different types of operating systems.

                                                  • Building CentOS 7.9 Offline Yum Repository
                                                  • Building Red Hat 8.4 Offline Yum Repository
                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#toolkit-parameters","title":"Toolkit parameters","text":"

                                                Toolkit.enable : Enabled by default. This component allows containerd/docker to support running containers that require GPUs.

                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#mig-parameters","title":"MIG parameters","text":"

                                                For detailed configuration methods, refer to Enabling MIG Functionality.

                                                MigManager.Config.name : The name of the MIG split configuration file, used to define the MIG (GI, CI) split policy. The default is default-mig-parted-config . For custom parameters, refer to Enabling MIG Functionality.

                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#next-steps","title":"Next Steps","text":"

                                                After completing the configuration and creation of the above parameters:

                                                • If using full-card mode , GPU resources can be used when creating applications.

                                                • If using vGPU mode , after completing the above configuration and creation, proceed to vGPU Addon Installation.

                                                • If using MIG mode and you need to use a specific split specification for individual GPU nodes, otherwise, split according to the default value in MigManager.Config.

                                                  • For single mode, add label to nodes as follows:

                                                    kubectl label nodes {node} nvidia.com/mig.config=\"all-1g.10gb\" --overwrite\n
                                                  • For mixed mode, add label to nodes as follows:

                                                    kubectl label nodes {node} nvidia.com/mig.config=\"custom-config\" --overwrite\n

                                                  After spliting, applications can use MIG GPU resources.

                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/push_image_to_repo.html","title":"Uploading Red Hat GPU Operator Offline Image to Bootstrap Repository","text":"

                                                This guide explains how to upload an offline image to the bootstrap repository using the nvcr.io/nvidia/driver:525.105.17-rhel8.4 offline driver image for Red Hat 8.4 as an example.

                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/push_image_to_repo.html#prerequisites","title":"Prerequisites","text":"
                                                1. The bootstrap node and its components are running properly.
                                                2. Prepare a node that has internet access and can access the bootstrap node. Docker should also be installed on this node. You can refer to Installing Docker for installation instructions.
                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/push_image_to_repo.html#procedure","title":"Procedure","text":""},{"location":"en/end-user/kpanda/gpu/nvidia/push_image_to_repo.html#step-1-obtain-the-offline-image-on-an-internet-connected-node","title":"Step 1: Obtain the Offline Image on an Internet-Connected Node","text":"

                                                Perform the following steps on the internet-connected node:

                                                1. Pull the nvcr.io/nvidia/driver:525.105.17-rhel8.4 offline driver image:

                                                  docker pull nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                                2. Once the image is pulled, save it as a compressed archive named nvidia-driver.tar :

                                                  docker save nvcr.io/nvidia/driver:525.105.17-rhel8.4 > nvidia-driver.tar\n
                                                3. Copy the compressed image archive nvidia-driver.tar to the bootstrap node:

                                                  scp nvidia-driver.tar user@ip:/root\n

                                                  For example:

                                                  scp nvidia-driver.tar root@10.6.175.10:/root\n
                                                "},{"location":"en/end-user/kpanda/gpu/nvidia/push_image_to_repo.html#step-2-push-the-image-to-the-bootstrap-repository","title":"Step 2: Push the Image to the Bootstrap Repository","text":"

                                                Perform the following steps on the bootstrap node:

                                                1. Log in to the bootstrap node and import the compressed image archive nvidia-driver.tar :

                                                  docker load -i nvidia-driver.tar\n
                                                2. View the imported image:

                                                  docker images -a | grep nvidia\n

                                                  Expected output:

                                                  nvcr.io/nvidia/driver                 e3ed7dee73e9   1 days ago   1.02GB\n
                                                3. Retag the image to correspond to the target repository in the remote Registry repository:

                                                  docker tag <image-name> <registry-url>/<repository-name>:<tag>\n

                                                  Replace with the name of the Nvidia image from the previous step, with the address of the Registry service on the bootstrap node, with the name of the repository you want to push the image to, and with the desired tag for the image.

                                                  For example:

                                                  docker tag nvcr.io/nvidia/driver 10.6.10.5/nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                                4. Push the image to the bootstrap repository:

                                                  docker push {ip}/nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                                5. "},{"location":"en/end-user/kpanda/gpu/nvidia/push_image_to_repo.html#whats-next","title":"What's Next","text":"

                                                  Refer to Building Red Hat 8.4 Offline Yum Source and Offline Installation of GPU Operator to deploy the GPU Operator to your cluster.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html","title":"Offline Install gpu-operator Driver on Ubuntu 22.04","text":"

                                                  Prerequisite: Installed gpu-operator v23.9.0+2 or higher versions

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html#prepare-offline-image","title":"Prepare Offline Image","text":"
                                                  1. Check the kernel version

                                                    $ uname -r\n5.15.0-78-generic\n
                                                  2. Check the GPU Driver image version applicable to your kernel, at https://catalog.ngc.nvidia.com/orgs/nvidia/containers/driver/tags. Use the kernel to query the image version and save the image using ctr export.

                                                    ctr i pull nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04\nctr i export --all-platforms driver.tar.gz nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 \n
                                                  3. Import the image into the cluster's container registry

                                                    ctr i import driver.tar.gz\nctr i tag nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 {your_registry}/nvcr.m.daocloud.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04\nctr i push {your_registry}/nvcr.m.daocloud.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 --skip-verify=true\n
                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html#install-the-driver","title":"Install the Driver","text":"
                                                  1. Install the gpu-operator addon and set driver.usePrecompiled=true
                                                  2. Set driver.version=535, note that it should be 535, not 535.104.12
                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html","title":"Build CentOS 7.9 Offline Yum Source","text":"

                                                  The AI platform comes with a pre-installed GPU Operator offline package for CentOS 7.9 with kernel version 3.10.0-1160. or other OS types or kernel versions, users need to manually build an offline yum source.

                                                  This guide explains how to build an offline yum source for CentOS 7.9 with a specific kernel version and use it when installing the GPU Operator by specifying the RepoConfig.ConfigMapName parameter.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#prerequisites","title":"Prerequisites","text":"
                                                  1. The user has already installed the v0.12.0 or later version of the addon offline package on the platform.
                                                  2. Prepare a file server that is accessible from the cluster network, such as Nginx or MinIO.
                                                  3. Prepare a node that has internet access, can access the cluster where the GPU Operator will be deployed, and can access the file server. Docker should also be installed on this node. You can refer to Installing Docker for installation instructions.
                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#procedure","title":"Procedure","text":"

                                                  This guide uses CentOS 7.9 with kernel version 3.10.0-1160.95.1.el7.x86_64 as an example to explain how to upgrade the pre-installed GPU Operator offline package's yum source.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#check-os-and-kernel-versions-of-cluster-nodes","title":"Check OS and Kernel Versions of Cluster Nodes","text":"

                                                  Run the following commands on both the control node of the Global cluster and the node where GPU Operator will be deployed. If the OS and kernel versions of the two nodes are consistent, there is no need to build a yum source. You can directly refer to the Offline Installation of GPU Operator document for installation. If the OS or kernel versions of the two nodes are not consistent, please proceed to the next step.

                                                  1. Run the following command to view the distribution name and version of the node where GPU Operator will be deployed in the cluster.

                                                    cat /etc/redhat-release\n

                                                    Expected output:

                                                    CentOS Linux release 7.9 (Core)\n

                                                    The output shows the current node's OS version as CentOS 7.9.

                                                  2. Run the following command to view the kernel version of the node where GPU Operator will be deployed in the cluster.

                                                    uname -a\n

                                                    Expected output:

                                                    Linux localhost.localdomain 3.10.0-1160.95.1.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux\n

                                                    The output shows the current node's kernel version as 3.10.0-1160.el7.x86_64.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#create-the-offline-yum-source","title":"Create the Offline Yum Source","text":"

                                                  Perform the following steps on a node that has internet access and can access the file server:

                                                  1. Create a script file named yum.sh by running the following command:

                                                    vi yum.sh\n

                                                    Then press the i key to enter insert mode and enter the following content:

                                                    export TARGET_KERNEL_VERSION=$1\n\ncat >> run.sh << \\EOF\n#! /bin/bash\necho \"start install kernel repo\"\necho ${KERNEL_VERSION}\nmkdir centos-base\n\nif [ \"$OS\" -eq 7 ]; then\n    yum install --downloadonly --downloaddir=./centos-base perl\n    yum install --downloadonly --downloaddir=./centos-base elfutils-libelf.x86_64\n    yum install --downloadonly --downloaddir=./redhat-base elfutils-libelf-devel.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-headers-${KERNEL_VERSION}.el7.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-devel-${KERNEL_VERSION}.el7.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-${KERNEL_VERSION}.el7.x86_64\n    yum install  -y --downloadonly --downloaddir=./centos-base groff-base\nelif [ \"$OS\" -eq 8 ]; then\n    yum install --downloadonly --downloaddir=./centos-base perl\n    yum install --downloadonly --downloaddir=./centos-base elfutils-libelf.x86_64\n    yum install --downloadonly --downloaddir=./redhat-base elfutils-libelf-devel.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-headers-${KERNEL_VERSION}.el8.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-devel-${KERNEL_VERSION}.el8.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-${KERNEL_VERSION}.el8.x86_64\n    yum install  -y --downloadonly --downloaddir=./centos-base groff-base\nelse\n    echo \"Error os version\"\nfi\n\ncreaterepo centos-base/\nls -lh centos-base/\ntar -zcf centos-base.tar.gz centos-base/\necho \"end install kernel repo\"\nEOF\n\ncat >> Dockerfile << EOF\nFROM centos:7\nENV KERNEL_VERSION=\"\"\nENV OS=7\nRUN yum install -y createrepo\nCOPY run.sh .\nENTRYPOINT [\"/bin/bash\",\"run.sh\"]\nEOF\n\ndocker build -t test:v1 -f Dockerfile .\ndocker run -e KERNEL_VERSION=$TARGET_KERNEL_VERSION --name centos7.9 test:v1\ndocker cp centos7.9:/centos-base.tar.gz .\ntar -xzf centos-base.tar.gz\n

                                                    Press the Esc key to exit insert mode, then enter :wq to save and exit.

                                                  2. Run the yum.sh file:

                                                    bash -x yum.sh TARGET_KERNEL_VERSION\n

                                                    The TARGET_KERNEL_VERSION parameter is used to specify the kernel version of the cluster nodes.

                                                    Note: You don't need to include the distribution identifier (e.g., __ .el7.x86_64__ ). For example:

                                                    bash -x yum.sh 3.10.0-1160.95.1\n

                                                  Now you have generated an offline yum source, centos-base , for the kernel version 3.10.0-1160.95.1.el7.x86_64 .

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#upload-the-offline-yum-source-to-the-file-server","title":"Upload the Offline Yum Source to the File Server","text":"

                                                  Perform the following steps on a node that has internet access and can access the file server. This step is used to upload the generated yum source from the previous step to a file server that can be accessed by the cluster where the GPU Operator will be deployed. The file server can be Nginx, MinIO, or any other file server that supports the HTTP protocol.

                                                  In this example, we will use the built-in MinIO as the file server. The MinIO details are as follows:

                                                  • Access URL: http://10.5.14.200:9000 (usually {bootstrap-node IP} + {port-9000} )
                                                  • Login username: rootuser
                                                  • Login password: rootpass123

                                                  • Run the following command in the current directory of the node to establish a connection between the node's local mc command-line tool and the MinIO server:

                                                    mc config host add minio http://10.5.14.200:9000 rootuser rootpass123\n

                                                    The expected output should resemble the following:

                                                    Added __minio__ successfully.\n

                                                    mc is the command-line tool provided by MinIO for interacting with the MinIO server. For more details, refer to the MinIO Client documentation.

                                                  • In the current directory of the node, create a bucket named centos-base :

                                                    mc mb -p minio/centos-base\n

                                                    The expected output should resemble the following:

                                                    Bucket created successfully __minio/centos-base__ .\n
                                                  • Set the access policy of the bucket centos-base to allow public download. This will enable access during the installation of the GPU Operator:

                                                    mc anonymous set download minio/centos-base\n

                                                    The expected output should resemble the following:

                                                    Access permission for __minio/centos-base__ is set to __download__ \n
                                                  • In the current directory of the node, copy the generated centos-base offline yum source to the minio/centos-base bucket on the MinIO server:

                                                    mc cp centos-base minio/centos-base --recursive\n
                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#create-a-configmap-to-store-the-yum-source-info-in-the-cluster","title":"Create a ConfigMap to Store the Yum Source Info in the Cluster","text":"

                                                  Perform the following steps on the control node of the cluster where the GPU Operator will be deployed.

                                                  1. Run the following command to create a file named CentOS-Base.repo that specifies the configmap for the yum source storage:

                                                    # The file name must be CentOS-Base.repo, otherwise it cannot be recognized during the installation of the GPU Operator\ncat > CentOS-Base.repo << EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # The file server address where the yum source is placed in step 3\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # The file server address where the yum source is placed in step 3\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                                  2. Based on the created CentOS-Base.repo file, create a configmap named local-repo-config in the gpu-operator namespace:

                                                    kubectl create configmap local-repo-config  -n gpu-operator --from-file=CentOS-Base.repo=/etc/yum.repos.d/extension.repo\n

                                                    The expected output should resemble the following:

                                                    configmap/local-repo-config created\n

                                                    The local-repo-config configmap will be used to provide the value for the RepoConfig.ConfigMapName parameter during the installation of the GPU Operator. You can customize the configuration file name.

                                                  3. View the content of the local-repo-config configmap:

                                                    kubectl get configmap local-repo-config -n gpu-operator -oyaml\n

                                                    The expected output should resemble the following:

                                                    apiVersion: v1\ndata:\nCentOS-Base.repo: \"[extension-0]\\nbaseurl = http://10.6.232.5:32618/centos-base# The file server path where the yum source is placed in step 2\\ngpgcheck = 0\\nname = kubean extension 0\\n  \\n[extension-1]\\nbaseurl = http://10.6.232.5:32618/centos-base # The file server path where the yum source is placed in step 2\\ngpgcheck = 0\\nname = kubean extension 1\\n\"\nkind: ConfigMap\nmetadata:\ncreationTimestamp: \"2023-10-18T01:59:02Z\"\nname: local-repo-config\nnamespace: gpu-operator\nresourceVersion: \"59445080\"\nuid: c5f0ebab-046f-442c-b932-f9003e014387\n

                                                  You have successfully created an offline yum source configuration file for the cluster where the GPU Operator will be deployed. You can use it during the offline installation of the GPU Operator by specifying the RepoConfig.ConfigMapName parameter.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html","title":"Building Red Hat 8.4 Offline Yum Source","text":"

                                                  The AI platform comes with pre-installed CentOS v7.9 and GPU Operator offline packages with kernel v3.10.0-1160. For other OS types or nodes with different kernels, users need to manually build the offline yum source.

                                                  This guide explains how to build an offline yum source package for Red Hat 8.4 based on any node in the Global cluster. It also demonstrates how to use it during the installation of the GPU Operator by specifying the RepoConfig.ConfigMapName parameter.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#prerequisites","title":"Prerequisites","text":"
                                                  1. The user has already installed the addon offline package v0.12.0 or higher on the platform.
                                                  2. The OS of the cluster nodes where the GPU Operator will be deployed must be Red Hat v8.4, and the kernel version must be identical.
                                                  3. Prepare a file server that can communicate with the cluster network where the GPU Operator will be deployed, such as Nginx or MinIO.
                                                  4. Prepare a node that can access the internet, the cluster where the GPU Operator will be deployed, and the file server. Ensure that Docker is already installed on this node.
                                                  5. The nodes in the Global cluster must be Red Hat 8.4 4.18.0-305.el8.x86_64.
                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#procedure","title":"Procedure","text":"

                                                  This guide uses a node with Red Hat 8.4 4.18.0-305.el8.x86_64 as an example to demonstrate how to build an offline yum source package for Red Hat 8.4 based on any node in the Global cluster. It also explains how to use it during the installation of the GPU Operator by specifying the RepoConfig.ConfigMapName parameter.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-1-download-the-yum-source-from-the-bootstrap-node","title":"Step 1: Download the Yum Source from the Bootstrap Node","text":"

                                                  Perform the following steps on the master node of the Global cluster.

                                                  1. Use SSH or any other method to access any node in the Global cluster and run the following command:

                                                    cat /etc/yum.repos.d/extension.repo # View the contents of extension.repo.\n

                                                    The expected output should resemble the following:

                                                    [extension-0]\nbaseurl = http://10.5.14.200:9000/kubean/redhat/$releasever/os/$basearch\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/kubean/redhat-iso/$releasever/os/$basearch/AppStream\ngpgcheck = 0\nname = kubean extension 1\n\n[extension-2]\nbaseurl = http://10.5.14.200:9000/kubean/redhat-iso/$releasever/os/$basearch/BaseOS\ngpgcheck = 0\nname = kubean extension 2\n
                                                  2. Create a folder named redhat-base-repo under the root directory:

                                                    mkdir redhat-base-repo\n
                                                  3. Download the RPM packages from the yum source to your local machine:

                                                    Download the RPM packages from extension-1 :

                                                    reposync -p redhat-base-repo -n --repoid=extension-1\n

                                                    Download the RPM packages from extension-2 :

                                                    reposync -p redhat-base-repo -n --repoid=extension-2\n
                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-2-download-the-elfutils-libelf-devel-0187-4el8x86_64rpm-package","title":"Step 2: Download the elfutils-libelf-devel-0.187-4.el8.x86_64.rpm Package","text":"

                                                  Perform the following steps on a node with internet access. Before proceeding, ensure that there is network connectivity between the node with internet access and the master node of the Global cluster.

                                                  1. Run the following command on the node with internet access to download the elfutils-libelf-devel-0.187-4.el8.x86_64.rpm package:

                                                    wget https://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/elfutils-libelf-devel-0.187-4.el8.x86_64.rpm\n
                                                  2. Transfer the elfutils-libelf-devel-0.187-4.el8.x86_64.rpm package from the current directory to the node mentioned in step 1:

                                                    scp elfutils-libelf-devel-0.187-4.el8.x86_64.rpm user@ip:~/redhat-base-repo/extension-2/Packages/\n

                                                    For example:

                                                    scp elfutils-libelf-devel-0.187-4.el8.x86_64.rpm root@10.6.175.10:~/redhat-base-repo/extension-2/Packages/\n
                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-3-generate-the-local-yum-repository","title":"Step 3: Generate the Local Yum Repository","text":"

                                                  Perform the following steps on the master node of the Global cluster mentioned in Step 1.

                                                  1. Enter the yum repository directories:

                                                    cd ~/redhat-base-repo/extension-1/Packages\ncd ~/redhat-base-repo/extension-2/Packages\n
                                                  2. Generate the repository index for the directories:

                                                    createrepo_c ./\n

                                                  You have now generated the offline yum source named redhat-base-repo for kernel version 4.18.0-305.el8.x86_64 .

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-4-upload-the-local-yum-repository-to-the-file-server","title":"Step 4: Upload the Local Yum Repository to the File Server","text":"

                                                  In this example, we will use Minio, which is built-in as the file server in the bootstrap node. However, you can choose any file server that suits your needs. Here are the details for Minio:

                                                  • Access URL: http://10.5.14.200:9000 (usually the {bootstrap-node-IP} + {port-9000})
                                                  • Login username: rootuser
                                                  • Login password: rootpass123

                                                  • On the current node, establish a connection between the local mc command-line tool and the Minio server by running the following command:

                                                    mc config host add minio <file_server_access_url> <username> <password>\n

                                                    For example:

                                                    mc config host add minio http://10.5.14.200:9000 rootuser rootpass123\n

                                                    The expected output should be similar to:

                                                    Added __minio__ successfully.\n

                                                    The mc command-line tool is provided by the Minio file server as a client command-line tool. For more details, refer to the MinIO Client documentation.

                                                  • Create a bucket named redhat-base in the current location:

                                                    mc mb -p minio/redhat-base\n

                                                    The expected output should be similar to:

                                                    Bucket created successfully __minio/redhat-base__ .\n
                                                  • Set the access policy of the redhat-base bucket to allow public downloads so that it can be accessed during the installation of the GPU Operator:

                                                    mc anonymous set download minio/redhat-base\n

                                                    The expected output should be similar to:

                                                    Access permission for __minio/redhat-base__ is set to __download__ \n
                                                  • Copy the offline yum repository files ( redhat-base-repo ) from the current location to the Minio server's minio/redhat-base bucket:

                                                    mc cp redhat-base-repo minio/redhat-base --recursive\n
                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-5-create-a-configmap-to-store-yum-repository-information-in-the-cluster","title":"Step 5: Create a ConfigMap to Store Yum Repository Information in the Cluster","text":"

                                                  Perform the following steps on the control node of the cluster where you will deploy the GPU Operator.

                                                  1. Run the following command to create a file named redhat.repo , which specifies the configuration information for the yum repository storage:

                                                    # The file name must be redhat.repo, otherwise it won't be recognized when installing gpu-operator\ncat > redhat.repo << EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/redhat-base/redhat-base-repo/Packages # The file server address where the yum source is stored in Step 1\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/redhat-base/redhat-base-repo/Packages # The file server address where the yum source is stored in Step 1\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                                  2. Based on the created redhat.repo file, create a configmap named local-repo-config in the gpu-operator namespace:

                                                    kubectl create configmap local-repo-config -n gpu-operator --from-file=./redhat.repo\n

                                                    The expected output should be similar to:

                                                    configmap/local-repo-config created\n

                                                    The local-repo-config configuration file is used to provide the value for the RepoConfig.ConfigMapName parameter during the installation of the GPU Operator. You can choose a different name for the configuration file.

                                                  3. View the contents of the local-repo-config configuration file:

                                                    kubectl get configmap local-repo-config -n gpu-operator -oyaml\n

                                                  You have successfully created the offline yum source configuration file for the cluster where the GPU Operator will be deployed. You can use it by specifying the RepoConfig.ConfigMapName parameter during the offline installation of the GPU Operator.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html","title":"Build an Offline Yum Repository for Red Hat 7.9","text":""},{"location":"en/end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#introduction","title":"Introduction","text":"

                                                  AI platform comes with a pre-installed CentOS 7.9 with GPU Operator offline package for kernel 3.10.0-1160. You need to manually build an offline yum repository for other OS types or nodes with different kernels.

                                                  This page explains how to build an offline yum repository for Red Hat 7.9 based on any node in the Global cluster, and how to use the RepoConfig.ConfigMapName parameter when installing the GPU Operator.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#prerequisites","title":"Prerequisites","text":"
                                                  1. The cluster nodes where the GPU Operator is to be deployed must be Red Hat 7.9 with the exact same kernel version.
                                                  2. Prepare a file server that can be connected to the cluster network where the GPU Operator is to be deployed, such as nginx or minio.
                                                  3. Prepare a node that can access the internet, the cluster where the GPU Operator is to be deployed, and the file server. Docker installation must be completed on this node.
                                                  4. The nodes in the global service cluster must be Red Hat 7.9.
                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#steps","title":"Steps","text":""},{"location":"en/end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#1-build-offline-yum-repo-for-relevant-kernel","title":"1. Build Offline Yum Repo for Relevant Kernel","text":"
                                                  1. Download rhel7.9 ISO

                                                  2. Download the rhel7.9 ospackage that corresponds to your Kubean version.

                                                    Find the version number of Kubean in the Container Management section of the Global cluster under Helm Apps.

                                                    Download the rhel7.9 ospackage for that version from the Kubean repository.

                                                  3. Import offline resources using the installer.

                                                    Refer to the Import Offline Resources document.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#2-download-offline-driver-image-for-red-hat-79-os","title":"2. Download Offline Driver Image for Red Hat 7.9 OS","text":"

                                                  Click here to view the download url.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#3-upload-red-hat-gpu-operator-offline-image-to-boostrap-node-repository","title":"3. Upload Red Hat GPU Operator Offline Image to Boostrap Node Repository","text":"

                                                  Refer to Upload Red Hat GPU Operator Offline Image to Boostrap Node Repository.

                                                  Note

                                                  This reference is based on rhel8.4, so make sure to modify it for rhel7.9.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#4-create-configmaps-in-the-cluster-to-save-yum-repository-information","title":"4. Create ConfigMaps in the Cluster to Save Yum Repository Information","text":"

                                                  Run the following command on the control node of the cluster where the GPU Operator is to be deployed.

                                                  1. Run the following command to create a file named CentOS-Base.repo to specify the configuration information where the yum repository is stored.

                                                    # The file name must be CentOS-Base.repo, otherwise it will not be recognized when installing gpu-operator\ncat > CentOS-Base.repo <<  EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # The server file address of the boostrap node, usually {boostrap node IP} + {9000 port}\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # The server file address of the boostrap node, usually {boostrap node IP} + {9000 port}\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                                  2. Based on the created CentOS-Base.repo file, create a profile named local-repo-config in the gpu-operator namespace:

                                                    kubectl create configmap local-repo-config -n gpu-operator --from-file=CentOS-Base.repo=/etc/yum.repos.d/extension.repo\n

                                                    The expected output is as follows:

                                                    configmap/local-repo-config created\n

                                                    The local-repo-config profile is used to provide the value of the RepoConfig.ConfigMapName parameter when installing gpu-operator, and the profile name can be customized by the user.

                                                  3. View the contents of the local-repo-config profile:

                                                    kubectl get configmap local-repo-config -n gpu-operator -oyaml\n

                                                    The expected output is as follows:

                                                    local-repo-config.yaml
                                                    apiVersion: v1\ndata:\n  CentOS-Base.repo: \"[extension-0]\\nbaseurl = http://10.6.232.5:32618/centos-base # The file path where yum repository is placed in Step 2 \\ngpgcheck = 0\\nname = kubean extension 0\\n  \\n[extension-1]\\nbaseurl\n  = http://10.6.232.5:32618/centos-base # The file path where yum repository is placed in Step 2 \\ngpgcheck = 0\\nname\n  = kubean extension 1\\n\"\nkind: ConfigMap\nmetadata:\n  creationTimestamp: \"2023-10-18T01:59:02Z\"\n  name: local-repo-config\n  namespace: gpu-operator\n  resourceVersion: \"59445080\"\n  uid: c5f0ebab-046f-442c-b932-f9003e014387\n

                                                  At this point, you have successfully created the offline yum repository profile for the cluster where the GPU Operator is to be deployed. The RepoConfig.ConfigMapName parameter was used during the Offline Installation of GPU Operator.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/index.html","title":"Overview of NVIDIA Multi-Instance GPU (MIG)","text":""},{"location":"en/end-user/kpanda/gpu/nvidia/mig/index.html#mig-scenarios","title":"MIG Scenarios","text":"
                                                  • Multi-Tenant Cloud Environments:

                                                  MIG allows cloud service providers to partition a physical GPU into multiple independent GPU instances, which can be allocated to different tenants. This enables resource isolation and independence, meeting the GPU computing needs of multiple tenants.

                                                  • Containerized Applications:

                                                  MIG enables finer-grained GPU resource management in containerized environments. By partitioning a physical GPU into multiple MIG instances, each container can be assigned with dedicated GPU compute resources, providing better performance isolation and resource utilization.

                                                  • Batch Processing Jobs:

                                                  For batch processing jobs requiring large-scale parallel computing, MIG provides higher computational performance and larger memory capacity. Each MIG instance can utilize a portion of the physical GPU's compute resources, accelerating the processing of large-scale computational tasks.

                                                  • AI/Machine Learning Training:

                                                  MIG offers increased compute power and memory capacity for training large-scale deep learning models. By partitioning the physical GPU into multiple MIG instances, each instance can independently carry out model training, improving training efficiency and throughput.

                                                  In general, NVIDIA MIG is suitable for scenarios that require finer-grained allocation and management of GPU resources. It enables resource isolation, improved performance utilization, and meets the GPU computing needs of multiple users or applications.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/index.html#overview-of-mig","title":"Overview of MIG","text":"

                                                  NVIDIA Multi-Instance GPU (MIG) is a new feature introduced by NVIDIA on H100, A100, and A30 series GPUs. Its purpose is to divide a physical GPU into multiple GPU instances to provide finer-grained resource sharing and isolation. MIG can split a GPU into up to seven GPU instances, allowing a single physical GPU card to provide separate GPU resources to multiple users, maximizing GPU utilization.

                                                  This feature enables multiple applications or users to share GPU resources simultaneously, improving the utilization of computational resources and increasing system scalability.

                                                  With MIG, each GPU instance's processor has an independent and isolated path throughout the entire memory system, including cross-switch ports on the chip, L2 cache groups, memory controllers, and DRAM address buses, all uniquely allocated to a single instance.

                                                  This ensures that the workload of individual users can run with predictable throughput and latency, along with identical L2 cache allocation and DRAM bandwidth. MIG can partition available GPU compute resources (such as streaming multiprocessors or SMs and GPU engines like copy engines or decoders) to provide defined quality of service (QoS) and fault isolation for different clients such as virtual machines, containers, or processes. MIG enables multiple GPU instances to run in parallel on a single physical GPU.

                                                  MIG allows multiple vGPUs (and virtual machines) to run in parallel on a single GPU instance while retaining the isolation guarantees provided by vGPU. For more details on using vGPU and MIG for GPU partitioning, refer to NVIDIA Multi-Instance GPU and NVIDIA Virtual Compute Server.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/index.html#mig-architecture","title":"MIG Architecture","text":"

                                                  The following diagram provides an overview of MIG, illustrating how it virtualizes one physical GPU card into seven GPU instances that can be used by multiple users.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/index.html#important-concepts","title":"Important Concepts","text":"
                                                  • SM (Streaming Multiprocessor): The core computational unit of a GPU responsible for executing graphics rendering and general-purpose computing tasks. Each SM contains a group of CUDA cores, as well as shared memory, register files, and other resources, capable of executing multiple threads concurrently. Each MIG instance has a certain number of SMs and other related resources, along with the allocated memory slices.
                                                  • GPU Memory Slice : The smallest portion of GPU memory, including the proper memory controller and cache. A GPU memory slice is approximately one-eighth of the total GPU memory resources in terms of capacity and bandwidth.
                                                  • GPU SM Slice : The smallest computational unit of SMs on a GPU. When configuring in MIG mode, the GPU SM slice is approximately one-seventh of the total available SMs in the GPU.
                                                  • GPU Slice : The GPU slice represents the smallest portion of the GPU, consisting of a single GPU memory slice and a single GPU SM slice combined together.
                                                  • GPU Instance (GI): A GPU instance is the combination of a GPU slice and GPU engines (DMA, NVDEC, etc.). Anything within a GPU instance always shares all GPU memory slices and other GPU engines, but its SM slice can be further subdivided into Compute Instances (CIs). A GPU instance provides memory QoS. Each GPU slice contains dedicated GPU memory resources, limiting available capacity and bandwidth while providing memory QoS. Each GPU memory slice gets one-eighth of the total GPU memory resources, and each GPU SM slice gets one-seventh of the total SM count.
                                                  • Compute Instance (CI): A Compute Instance represents the smallest computational unit within a GPU instance. It consists of a subset of SMs, along with dedicated register files, shared memory, and other resources. Each CI has its own CUDA context and can run independent CUDA kernels. The number of CIs in a GPU instance depends on the number of available SMs and the configuration chosen during MIG setup.
                                                  • Instance Slice : An Instance Slice represents a single CI within a GPU instance. It is the combination of a subset of SMs and a portion of the GPU memory slice. Each Instance Slice provides isolation and resource allocation for individual applications or users running on the GPU instance.
                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/index.html#key-benefits-of-mig","title":"Key Benefits of MIG","text":"
                                                  • Resource Sharing: MIG allows a single physical GPU to be divided into multiple GPU instances, providing efficient sharing of GPU resources among different users or applications. This maximizes GPU utilization and enables improved performance isolation.

                                                  • Fine-Grained Resource Allocation: With MIG, GPU resources can be allocated at a finer granularity, allowing for more precise partitioning and allocation of compute power and memory capacity.

                                                  • Improved Performance Isolation: Each MIG instance operates independently with its dedicated resources, ensuring predictable throughput and latency for individual users or applications. This improves performance isolation and prevents interference between different workloads running on the same GPU.

                                                  • Enhanced Security and Fault Isolation: MIG provides better security and fault isolation by ensuring that each user or application has its dedicated GPU resources. This prevents unauthorized access to data and mitigates the impact of faults or errors in one instance on others.

                                                  • Increased Scalability: MIG enables the simultaneous usage of GPU resources by multiple users or applications, increasing system scalability and accommodating the needs of various workloads.

                                                  • Efficient Containerization: By using MIG in containerized environments, GPU resources can be effectively allocated to different containers, improving performance isolation and resource utilization.

                                                  Overall, MIG offers significant advantages in terms of resource sharing, fine-grained allocation, performance isolation, security, scalability, and containerization, making it a valuable feature for various GPU computing scenarios.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/create_mig.html","title":"Enabling MIG Features","text":"

                                                  This section describes how to enable NVIDIA MIG features. NVIDIA currently provides two strategies for exposing MIG devices on Kubernetes nodes:

                                                  • Single mode : Nodes expose a single type of MIG device on all their GPUs.
                                                  • Mixed mode : Nodes expose a mixture of MIG device types on all their GPUs.

                                                  For more details, refer to the NVIDIA GPU Card Usage Modes.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/create_mig.html#prerequisites","title":"Prerequisites","text":"
                                                  • Check the system requirements for the GPU driver installation on the target node: GPU Support Matrix
                                                  • Ensure that the cluster nodes have GPUs of the proper models (NVIDIA H100, A100, and A30 Tensor Core GPUs). For more information, see the GPU Support Matrix.
                                                  • All GPUs on the nodes must belong to the same product line (e.g., A100-SXM-40GB).
                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/create_mig.html#install-gpu-operator-addon","title":"Install GPU Operator Addon","text":""},{"location":"en/end-user/kpanda/gpu/nvidia/mig/create_mig.html#parameter-configuration","title":"Parameter Configuration","text":"

                                                  When installing the Operator, you need to set the MigManager Config parameter accordingly. The default setting is default-mig-parted-config. You can also customize the sharding policy configuration file:

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/create_mig.html#custom-sharding-policy","title":"Custom Sharding Policy","text":"
                                                    ## Custom GI Instance Configuration\n  all-disabled:\n    - devices: all\n      mig-enabled: false\n  all-enabled:\n    - devices: all\n      mig-enabled: true\n      mig-devices: {}\n  all-1g.10gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.5gb: 7\n  all-1g.10gb.me:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.10gb+me: 1\n  all-1g.20gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.20gb: 4\n  all-2g.20gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        2g.20gb: 3\n  all-3g.40gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        3g.40gb: 2\n  all-4g.40gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        4g.40gb: 1\n  all-7g.80gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        7g.80gb: 1\n  all-balanced:\n    - device-filter: [\"0x233110DE\", \"0x232210DE\", \"0x20B210DE\", \"0x20B510DE\", \"0x20F310DE\", \"0x20F510DE\"]\n      devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.10gb: 2\n        2g.20gb: 1\n        3g.40gb: 1\n  # After setting, CI instances will be partitioned according to the specified configuration\n  custom-config:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        3g.40gb: 2\n

                                                  In the above YAML, set custom-config to partition CI instances according to the specifications.

                                                  custom-config:\n  - devices: all\n    mig-enabled: true\n    mig-devices:\n      1c.3g.40gb: 6\n

                                                  After completing the settings, you can use GPU MIG resources when confirming the deployment of the application.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/create_mig.html#switch-node-gpu-mode","title":"Switch Node GPU Mode","text":"

                                                  After successfully installing the GPU operator, the node is in full card mode by default. There will be an indicator on the node management page, as shown below:

                                                  Click the \u2507 at the right side of the node list, select a GPU mode to switch, and then choose the proper MIG mode and sharding policy. Here, we take MIXED mode as an example:

                                                  There are two configurations here:

                                                  1. MIG Policy: Mixed and Single.
                                                  2. Sharding Policy: The policy here needs to match the key in the default-mig-parted-config (or user-defined sharding policy) configuration file.

                                                  After clicking OK button, wait for about a minute and refresh the page. The MIG mode will be switched to:

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/mig_command.html","title":"MIG Related Commands","text":"

                                                  GI Related Commands:

                                                  Subcommand Description nvidia-smi mig -lgi View the list of created GI instances nvidia-smi mig -dgi -gi Delete a specific GI instance nvidia-smi mig -lgip View the profile of GI nvidia-smi mig -cgi Create a GI using the specified profile ID

                                                  CI Related Commands:

                                                  Subcommand Description nvidia-smi mig -lcip { -gi {gi Instance ID}} View the profile of CI, specifying -gi will show the CIs that can be created for a particular GI instance nvidia-smi mig -lci View the list of created CI instances nvidia-smi mig -cci {profile id} -gi {gi instance id} Create a CI instance with the specified GI nvidia-smi mig -dci -ci Delete a specific CI instance

                                                  GI+CI Related Commands:

                                                  Subcommand Description nvidia-smi mig -i 0 -cgi {gi profile id} -C {ci profile id} Create a GI + CI instance directly"},{"location":"en/end-user/kpanda/gpu/nvidia/mig/mig_usage.html","title":"Using MIG GPU Resources","text":"

                                                  This section explains how applications can use MIG GPU resources.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/mig_usage.html#prerequisites","title":"Prerequisites","text":"
                                                  • AI platform container management platform is deployed and running successfully.
                                                  • The container management module is integrated with a Kubernetes cluster or a Kubernetes cluster is created, and the UI interface of the cluster can be accessed.
                                                  • NVIDIA DevicePlugin and MIG capabilities are enabled. Refer to Offline installation of GPU Operator for details.
                                                  • The nodes in the cluster have GPUs of the proper models.
                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/mig_usage.html#using-mig-gpu-through-the-ui","title":"Using MIG GPU through the UI","text":"
                                                  1. Confirm if the cluster has recognized the GPU card type.

                                                    Go to Cluster Details -> Nodes and check if it has been correctly recognized as MIG.

                                                  2. When deploying an application using an image, you can select and use NVIDIA MIG resources.

                                                  3. Example of MIG Single Mode (used in the same way as a full GPU card):

                                                    Note

                                                    The MIG single policy allows users to request and use GPU resources in the same way as a full GPU card (nvidia.com/gpu). The difference is that these resources can be a portion of the GPU (MIG device) rather than the entire GPU. Learn more from the GPU MIG Mode Design.

                                                  4. MIG Mixed Mode

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/mig_usage.html#using-mig-through-yaml-configuration","title":"Using MIG through YAML Configuration","text":"

                                                  MIG Single mode:

                                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mig-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mig-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mig-demo\n    spec:\n      containers:\n        - name: mig-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/gpu: 2 # (1)!\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                                                  1. Number of MIG GPUs to request

                                                  MIG Mixed mode:

                                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mig-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mig-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mig-demo\n    spec:\n      containers:\n        - name: mig-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/mig-4g.20gb: 1 # (1)!\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                                                  1. Expose MIG device through nvidia.com/mig-g.gb resource type

                                                  After entering the container, you can check if only one MIG device is being used:

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/vgpu/hami.html","title":"Build a vGPU Memory Oversubscription Image","text":"

                                                  The vGPU memory oversubscription feature in the Hami Project no longer exists. To use this feature, you need to rebuild with the libvgpu.so file that supports memory oversubscription.

                                                  Dockerfile
                                                  FROM docker.m.daocloud.io/projecthami/hami:v2.3.11\nCOPY libvgpu.so /k8s-vgpu/lib/nvidia/\n

                                                  Run the following command to build the image:

                                                  docker build -t release.daocloud.io/projecthami/hami:v2.3.11 -f Dockerfile .\n

                                                  Then, push the image to release.daocloud.io.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_addon.html","title":"Installing NVIDIA vGPU Addon","text":"

                                                  To virtualize a single NVIDIA GPU into multiple virtual GPUs and allocate them to different virtual machines or users, you can use NVIDIA's vGPU capability. This section explains how to install the vGPU plugin in the AI platform platform, which is a prerequisite for using NVIDIA vGPU capability.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_addon.html#prerequisites","title":"Prerequisites","text":"
                                                  • Refer to the GPU Support Matrix to confirm that the nodes in the cluster have GPUs of the proper models.
                                                  • The current cluster has deployed NVIDIA drivers through the Operator. For specific instructions, refer to Offline Installation of GPU Operator.
                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_addon.html#procedure","title":"Procedure","text":"
                                                  1. Path: Container Management -> Cluster Management -> Click the target cluster -> Helm Apps -> Helm Charts -> Search for nvidia-vgpu .

                                                  2. During the installation of vGPU, several basic modification parameters are provided. If you need to modify advanced parameters, click the YAML column to make changes:

                                                    • deviceMemoryScaling : NVIDIA device memory scaling factor, the input value must be an integer, with a default value of 1. It can be greater than 1 (enabling virtual memory, experimental feature). For an NVIDIA GPU with a memory size of M, if we configure the devicePlugin.deviceMemoryScaling parameter as S, in a Kubernetes cluster where we have deployed our device plugin, the vGPUs assigned from this GPU will have a total memory of S * M .

                                                    • deviceSplitCount : An integer type, with a default value of 10. Number of GPU splits, each GPU cannot be assigned more tasks than its configuration count. If configured as N, each GPU can have up to N tasks simultaneously.

                                                    • Resources : Represents the resource usage of the vgpu-device-plugin and vgpu-schedule pods.

                                                  3. After a successful installation, you will see two types of pods in the specified namespace, indicating that the NVIDIA vGPU plugin has been successfully installed:

                                                  After a successful installation, you can deploy applications using vGPU resources.

                                                  Note

                                                  NVIDIA vGPU Addon does not support upgrading directly from the older v2.0.0 to the latest v2.0.0+1; To upgrade, please uninstall the older version and then reinstall the latest version.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html","title":"Using NVIDIA vGPU in Applications","text":"

                                                  This section explains how to use the vGPU capability in the AI platform platform.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html#prerequisites","title":"Prerequisites","text":"
                                                  • The nodes in the cluster have GPUs of the proper models.
                                                  • vGPU Addon has been successfully installed. Refer to Installing GPU Addon for details.
                                                  • GPU Operator is installed, and the Nvidia.DevicePlugin capability is disabled. Refer to Offline Installation of GPU Operator for details.
                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html#procedure","title":"Procedure","text":""},{"location":"en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html#using-vgpu-through-the-ui","title":"Using vGPU through the UI","text":"
                                                  1. Confirm if the cluster has detected GPUs. Click the Clusters -> Cluster Settings -> Addon Plugins and check if the GPU plugin has been automatically enabled and the proper GPU type has been detected. Currently, the cluster will automatically enable the GPU addon and set the GPU Type as Nvidia vGPU .

                                                  2. Deploy a workload by clicking Clusters -> Workloads . When deploying a workload using an image, select the type Nvidia vGPU , and you will be prompted with the following parameters:

                                                    • Number of Physical Cards (nvidia.com/vgpu) : Indicates how many physical cards need to be mounted by the current pod. The input value must be an integer and less than or equal to the number of cards on the host machine.
                                                    • GPU Cores (nvidia.com/gpucores): Indicates the GPU cores utilized by each card, with a value range from 0 to 100. Setting it to 0 means no enforced isolation, while setting it to 100 means exclusive use of the entire card.
                                                    • GPU Memory (nvidia.com/gpumem): Indicates the GPU memory occupied by each card, with a value in MB. The minimum value is 1, and the maximum value is the total memory of the card.

                                                    If there are issues with the configuration values above, it may result in scheduling failure or inability to allocate resources.

                                                  "},{"location":"en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html#using-vgpu-through-yaml-configuration","title":"Using vGPU through YAML Configuration","text":"

                                                  Refer to the following workload configuration and add the parameter nvidia.com/vgpu: '1' in the resource requests and limits section to configure the number of physical cards used by the application.

                                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-vgpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-vgpu-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: full-vgpu-demo\n    spec:\n      containers:\n        - name: full-vgpu-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/gpucores: '20'   # Request 20% of GPU cores for each card\n              nvidia.com/gpumem: '200'   # Request 200MB of GPU memory for each card\n              nvidia.com/vgpu: '1'   # Request 1 GPU card\n          imagePullPolicy: Always\n      restartPolicy: Always\n

                                                  This YAML configuration requests the application to use vGPU resources. It specifies that each card should utilize 20% of GPU cores, 200MB of GPU memory, and requests 1 GPU card.

                                                  "},{"location":"en/end-user/kpanda/gpu/volcano/volcano-gang-scheduler.html","title":"Using Volcano's Gang Scheduler","text":"

                                                  The Gang scheduling policy is one of the core scheduling algorithms of the volcano-scheduler. It satisfies the \"All or nothing\" scheduling requirement during the scheduling process, preventing arbitrary scheduling of Pods that could waste cluster resources. The specific algorithm observes whether the number of scheduled Pods under a Job meets the minimum running quantity. When the Job's minimum running quantity is satisfied, scheduling actions are performed for all Pods under the Job; otherwise, no actions are taken.

                                                  "},{"location":"en/end-user/kpanda/gpu/volcano/volcano-gang-scheduler.html#use-cases","title":"Use Cases","text":"

                                                  The Gang scheduling algorithm, based on the concept of a Pod group, is particularly suitable for scenarios that require multi-process collaboration. AI scenarios often involve complex workflows, such as Data Ingestion, Data Analysis, Data Splitting, Training, Serving, and Logging, which require a group of containers to work together. This makes the Gang scheduling policy based on pods very appropriate.

                                                  In multi-threaded parallel computing communication scenarios under the MPI computation framework, Gang scheduling is also very suitable because it requires master and slave processes to work together. High relevance among containers in a pod may lead to resource contention, and overall scheduling allocation can effectively resolve deadlocks.

                                                  In scenarios with insufficient cluster resources, the Gang scheduling policy significantly improves the utilization of cluster resources. For example, if the cluster can currently accommodate only 2 Pods, but the minimum number of Pods required for scheduling is 3, then all Pods of this Job will remain pending until the cluster can accommodate 3 Pods, at which point the Pods will be scheduled. This effectively prevents the partial scheduling of Pods, which would not meet the requirements and would occupy resources, making other Jobs unable to run.

                                                  "},{"location":"en/end-user/kpanda/gpu/volcano/volcano-gang-scheduler.html#concept-explanation","title":"Concept Explanation","text":"

                                                  The Gang Scheduler is the core scheduling plugin of Volcano, and it is enabled by default upon installing Volcano. When creating a workload, you only need to specify the scheduler name as Volcano.

                                                  Volcano schedules based on PodGroups. When creating a workload, there is no need to manually create PodGroup resources; Volcano will automatically create them based on the workload information. Below is an example of a PodGroup:

                                                  apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  name: test\n  namespace: default\nspec:\n  minMember: 1  # (1)!\n  minResources:  # (2)!\n    cpu: \"3\"\n    memory: \"2048Mi\"\n  priorityClassName: high-prority # (3)!\n  queue: default # (4)!\n
                                                  1. Represents the minimum number of Pods or jobs that need to run under this PodGroup. If the cluster resources do not meet the requirements to run the number of jobs specified by miniMember, the scheduler will not schedule any jobs within this PodGroup.
                                                  2. Represents the minimum resources required to run this PodGroup. If the allocatable resources of the cluster do not meet the minResources, the scheduler will not schedule any jobs within this PodGroup.
                                                  3. Represents the priority of this PodGroup, used by the scheduler to sort all PodGroups within the queue during scheduling. system-node-critical and system-cluster-critical are two reserved values indicating the highest priority. If not specifically designated, the default priority or zero priority is used.
                                                  4. Represents the queue to which this PodGroup belongs. The queue must be pre-created and in the open state.
                                                  "},{"location":"en/end-user/kpanda/gpu/volcano/volcano-gang-scheduler.html#use-case","title":"Use Case","text":"

                                                  In a multi-threaded parallel computing communication scenario under the MPI computation framework, we need to ensure that all Pods can be successfully scheduled to ensure the job is completed correctly. Setting minAvailable to 4 means that 1 mpimaster and 3 mpiworkers are required to run.

                                                  apiVersion: batch.volcano.sh/v1alpha1\nkind: Job\nmetadata:\n  name: lm-mpi-job\n  labels:\n    \"volcano.sh/job-type\": \"MPI\"\nspec:\n  minAvailable: 4\n  schedulerName: volcano\n  plugins:\n    ssh: []\n    svc: []\n  policies:\n    - event: PodEvicted\n      action: RestartJob\n  tasks:\n    - replicas: 1\n      name: mpimaster\n      policies:\n        - event: TaskCompleted\n          action: CompleteJob\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  MPI_HOST=`cat /etc/volcano/mpiworker.host | tr \"\\n\" \",\"`;\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd;\n                  mpiexec --allow-run-as-root --host ${MPI_HOST} -np 3 mpi_hello_world;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpimaster\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"500m\"\n                limits:\n                  cpu: \"500m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n    - replicas: 3\n      name: mpiworker\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd -D;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpiworker\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"1000m\"\n                limits:\n                  cpu: \"1000m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n

                                                  Generate the resources for PodGroup:

                                                  apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  annotations:\n  creationTimestamp: \"2024-05-28T09:18:50Z\"\n  generation: 5\n  labels:\n    volcano.sh/job-type: MPI\n  name: lm-mpi-job-9c571015-37c7-4a1a-9604-eaa2248613f2\n  namespace: default\n  ownerReferences:\n  - apiVersion: batch.volcano.sh/v1alpha1\n    blockOwnerDeletion: true\n    controller: true\n    kind: Job\n    name: lm-mpi-job\n    uid: 9c571015-37c7-4a1a-9604-eaa2248613f2\n  resourceVersion: \"25173454\"\n  uid: 7b04632e-7cff-4884-8e9a-035b7649d33b\nspec:\n  minMember: 4\n  minResources:\n    count/pods: \"4\"\n    cpu: 3500m\n    limits.cpu: 3500m\n    pods: \"4\"\n    requests.cpu: 3500m\n  minTaskMember:\n    mpimaster: 1\n    mpiworker: 3\n  queue: default\nstatus:\n  conditions:\n  - lastTransitionTime: \"2024-05-28T09:19:01Z\"\n    message: '3/4 tasks in gang unschedulable: pod group is not ready, 1 Succeeded,\n      3 Releasing, 4 minAvailable'\n    reason: NotEnoughResources\n    status: \"True\"\n    transitionID: f875efa5-0358-4363-9300-06cebc0e7466\n    type: Unschedulable\n  - lastTransitionTime: \"2024-05-28T09:18:53Z\"\n    reason: tasks in gang are ready to be scheduled\n    status: \"True\"\n    transitionID: 5a7708c8-7d42-4c33-9d97-0581f7c06dab\n    type: Scheduled\n  phase: Pending\n  succeeded: 1\n

                                                  From the PodGroup, it can be seen that it is associated with the workload through ownerReferences and sets the minimum number of running Pods to 4.

                                                  "},{"location":"en/end-user/kpanda/gpu/volcano/volcano_user_guide.html","title":"Use Volcano for AI Compute","text":""},{"location":"en/end-user/kpanda/gpu/volcano/volcano_user_guide.html#usage-scenarios","title":"Usage Scenarios","text":"

                                                  Kubernetes has become the de facto standard for orchestrating and managing cloud-native applications, and an increasing number of applications are choosing to migrate to K8s. The fields of artificial intelligence and machine learning inherently involve a large number of compute-intensive tasks, and developers are very willing to build AI platforms based on Kubernetes to fully leverage its resource management, application orchestration, and operations monitoring capabilities. However, the default Kubernetes scheduler was initially designed primarily for long-running services and has many shortcomings in batch and elastic scheduling for AI and big data tasks. For example, resource contention issues:

                                                  Take TensorFlow job scenarios as an example. TensorFlow jobs include two different roles, PS and Worker, and the Pods for these two roles need to work together to complete the entire job. If only one type of role Pod is running, the entire job cannot be executed properly. The default scheduler schedules Pods one by one and is unaware of the PS and Worker roles in a Kubeflow TFJob. In a high-load cluster (insufficient resources), multiple jobs may each be allocated some resources to run a portion of their Pods, but the jobs cannot complete successfully, leading to resource waste. For instance, if a cluster has 4 GPUs and both TFJob1 and TFJob2 each have 4 Workers, TFJob1 and TFJob2 might each be allocated 2 GPUs. However, both TFJob1 and TFJob2 require 4 GPUs to run. This mutual waiting for resource release creates a deadlock situation, resulting in GPU resource waste.

                                                  "},{"location":"en/end-user/kpanda/gpu/volcano/volcano_user_guide.html#volcano-batch-scheduling-system","title":"Volcano Batch Scheduling System","text":"

                                                  Volcano is the first Kubernetes-based container batch computing platform under CNCF, focusing on high-performance computing scenarios. It fills in the missing functionalities of Kubernetes in fields such as machine learning, big data, and scientific computing, providing essential support for these high-performance workloads. Additionally, Volcano seamlessly integrates with mainstream computing frameworks like Spark, TensorFlow, and PyTorch, and supports hybrid scheduling of heterogeneous devices, including CPUs and GPUs, effectively resolving the deadlock issues mentioned above.

                                                  The following sections will introduce how to install and use Volcano.

                                                  "},{"location":"en/end-user/kpanda/gpu/volcano/volcano_user_guide.html#install-volcano","title":"Install Volcano","text":"
                                                  1. Find Volcano in Cluster Details -> Helm Apps -> Helm Charts and install it.

                                                  2. Check and confirm whether Volcano is installed successfully, that is, whether the components volcano-admission, volcano-controllers, and volcano-scheduler are running properly.

                                                  Typically, Volcano is used in conjunction with the AI Lab to achieve an effective closed-loop process for the development and training of datasets, Notebooks, and task training.

                                                  "},{"location":"en/end-user/kpanda/gpu/volcano/volcano_user_guide.html#volcano-use-cases","title":"Volcano Use Cases","text":"
                                                  • Volcano is a standalone scheduler. To enable the Volcano scheduler when creating workloads, simply specify the scheduler's name (schedulerName: volcano).
                                                  • The volcanoJob resource is an extension of the Job in Volcano, breaking the Job down into smaller working units called tasks, which can interact with each other.
                                                  "},{"location":"en/end-user/kpanda/gpu/volcano/volcano_user_guide.html#volcano-supports-tensorflow","title":"Volcano Supports TensorFlow","text":"

                                                  Here is an example:

                                                  apiVersion: batch.volcano.sh/v1alpha1\nkind: Job\nmetadata:\n  name: tensorflow-benchmark\n  labels:\n    \"volcano.sh/job-type\": \"Tensorflow\"\nspec:\n  minAvailable: 3\n  schedulerName: volcano\n  plugins:\n    env: []\n    svc: []\n  policies:\n    - event: PodEvicted\n      action: RestartJob\n  tasks:\n    - replicas: 1\n      name: ps\n      template:\n        spec:\n          imagePullSecrets:\n            - name: default-secret\n          containers:\n            - command:\n                - sh\n                - -c\n                - |\n                  PS_HOST=`cat /etc/volcano/ps.host | sed 's/$/&:2222/g' | tr \"\\n\" \",\"`;\n                  WORKER_HOST=`cat /etc/volcano/worker.host | sed 's/$/&:2222/g' | tr \"\\n\" \",\"`;\n                  python tf_cnn_benchmarks.py --batch_size=32 --model=resnet50 --variable_update=parameter_server --flush_stdout=true --num_gpus=1 --local_parameter_device=cpu --device=cpu --data_format=NHWC --job_name=ps --task_index=${VK_TASK_INDEX} --ps_hosts=${PS_HOST} --worker_hosts=${WORKER_HOST}\n              image: docker.m.daocloud.io/volcanosh/example-tf:0.0.1\n              name: tensorflow\n              ports:\n                - containerPort: 2222\n                  name: tfjob-port\n              resources:\n                requests:\n                  cpu: \"1000m\"\n                  memory: \"2048Mi\"\n                limits:\n                  cpu: \"1000m\"\n                  memory: \"2048Mi\"\n              workingDir: /opt/tf-benchmarks/scripts/tf_cnn_benchmarks\n          restartPolicy: OnFailure\n    - replicas: 2\n      name: worker\n      policies:\n        - event: TaskCompleted\n          action: CompleteJob\n      template:\n        spec:\n          imagePullSecrets:\n            - name: default-secret\n          containers:\n            - command:\n                - sh\n                - -c\n                - |\n                  PS_HOST=`cat /etc/volcano/ps.host | sed 's/$/&:2222/g' | tr \"\\n\" \",\"`;\n                  WORKER_HOST=`cat /etc/volcano/worker.host | sed 's/$/&:2222/g' | tr \"\\n\" \",\"`;\n                  python tf_cnn_benchmarks.py --batch_size=32 --model=resnet50 --variable_update=parameter_server --flush_stdout=true --num_gpus=1 --local_parameter_device=cpu --device=cpu --data_format=NHWC --job_name=worker --task_index=${VK_TASK_INDEX} --ps_hosts=${PS_HOST} --worker_hosts=${WORKER_HOST}\n              image: docker.m.daocloud.io/volcanosh/example-tf:0.0.1\n              name: tensorflow\n              ports:\n                - containerPort: 2222\n                  name: tfjob-port\n              resources:\n                requests:\n                  cpu: \"2000m\"\n                  memory: \"2048Mi\"\n                limits:\n                  cpu: \"2000m\"\n                  memory: \"4096Mi\"\n              workingDir: /opt/tf-benchmarks/scripts/tf_cnn_benchmarks\n          restartPolicy: OnFailure\n
                                                  "},{"location":"en/end-user/kpanda/gpu/volcano/volcano_user_guide.html#parallel-computing-with-mpi","title":"Parallel Computing with MPI","text":"

                                                  In multi-threaded parallel computing communication scenarios under the MPI computing framework, we need to ensure that all Pods are successfully scheduled to guarantee the task's proper completion. Setting minAvailable to 4 indicates that 1 mpimaster and 3 mpiworkers are required to run. By simply setting the schedulerName field value to \"volcano,\" you can enable the Volcano scheduler.

                                                  Here is an example:

                                                  apiVersion: batch.volcano.sh/v1alpha1\nkind: Job\nmetadata:\n  name: lm-mpi-job\n  labels:\n    \"volcano.sh/job-type\": \"MPI\"\nspec:\n  minAvailable: 4\n  schedulerName: volcano\n  plugins:\n    ssh: []\n    svc: []\n  policies:\n    - event: PodEvicted\n      action: RestartJob\n  tasks:\n    - replicas: 1\n      name: mpimaster\n      policies:\n        - event: TaskCompleted\n          action: CompleteJob\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  MPI_HOST=`cat /etc/volcano/mpiworker.host | tr \"\\n\" \",\"`;\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd;\n                  mpiexec --allow-run-as-root --host ${MPI_HOST} -np 3 mpi_hello_world;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpimaster\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"500m\"\n                limits:\n                  cpu: \"500m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n    - replicas: 3\n      name: mpiworker\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd -D;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpiworker\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"1000m\"\n                limits:\n                  cpu: \"1000m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n

                                                  Resources to generate PodGroup:

                                                  apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  annotations:\n  creationTimestamp: \"2024-05-28T09:18:50Z\"\n  generation: 5\n  labels:\n    volcano.sh/job-type: MPI\n  name: lm-mpi-job-9c571015-37c7-4a1a-9604-eaa2248613f2\n  namespace: default\n  ownerReferences:\n  - apiVersion: batch.volcano.sh/v1alpha1\n    blockOwnerDeletion: true\n    controller: true\n    kind: Job\n    name: lm-mpi-job\n    uid: 9c571015-37c7-4a1a-9604-eaa2248613f2\n  resourceVersion: \"25173454\"\n  uid: 7b04632e-7cff-4884-8e9a-035b7649d33b\nspec:\n  minMember: 4\n  minResources:\n    count/pods: \"4\"\n    cpu: 3500m\n    limits.cpu: 3500m\n    pods: \"4\"\n    requests.cpu: 3500m\n  minTaskMember:\n    mpimaster: 1\n    mpiworker: 3\n  queue: default\nstatus:\n  conditions:\n  - lastTransitionTime: \"2024-05-28T09:19:01Z\"\n    message: '3/4 tasks in gang unschedulable: pod group is not ready, 1 Succeeded,\n      3 Releasing, 4 minAvailable'\n    reason: NotEnoughResources\n    status: \"True\"\n    transitionID: f875efa5-0358-4363-9300-06cebc0e7466\n    type: Unschedulable\n  - lastTransitionTime: \"2024-05-28T09:18:53Z\"\n    reason: tasks in gang are ready to be scheduled\n    status: \"True\"\n    transitionID: 5a7708c8-7d42-4c33-9d97-0581f7c06dab\n    type: Scheduled\n  phase: Pending\n  succeeded: 1\n

                                                  From the PodGroup, it can be seen that it is associated with the workload through ownerReferences and sets the minimum number of running Pods to 4.

                                                  If you want to learn more about the features and usage scenarios of Volcano, refer to Volcano Introduction.

                                                  "},{"location":"en/end-user/kpanda/helm/index.html","title":"Helm Charts","text":"

                                                  Helm is a package management tool for Kubernetes, which makes it easy for users to quickly discover, share and use applications built with Kubernetes. Container Management provides hundreds of Helm charts, covering storage, network, monitoring, database and other main cases. With these templates, you can quickly deploy and easily manage Helm apps through the UI interface. In addition, it supports adding more personalized templates through Add Helm repository to meet various needs.

                                                  Key Concepts:

                                                  There are a few key concepts to understand when using Helm:

                                                  • Chart: A Helm installation package, which contains the images, dependencies, and resource definitions required to run an application, and may also contain service definitions in the Kubernetes cluster, similar to the formula in Homebrew, dpkg in APT, or rpm files in Yum. Charts are called Helm Charts in AI platform.

                                                  • Release: A Chart instance running on the Kubernetes cluster. A Chart can be installed multiple times in the same cluster, and each installation will create a new Release. Release is called Helm Apps in AI platform.

                                                  • Repository: A repository for publishing and storing Charts. Repository is called Helm Repositories in AI platform.

                                                  For more details, refer to Helm official website.

                                                  Related operations:

                                                  • Manage Helm apps, including installing, updating, uninstalling Helm apps, viewing Helm operation records, etc.
                                                  • Manage Helm repository, including installing, updating, deleting Helm repository, etc.
                                                  "},{"location":"en/end-user/kpanda/helm/Import-addon.html","title":"Import Custom Helm Apps into Built-in Addons","text":"

                                                  This article explains how to import Helm appss into the system's built-in addons in both offline and online environments.

                                                  "},{"location":"en/end-user/kpanda/helm/Import-addon.html#offline-environment","title":"Offline Environment","text":"

                                                  An offline environment refers to an environment that cannot connect to the internet or is a closed private network environment.

                                                  "},{"location":"en/end-user/kpanda/helm/Import-addon.html#prerequisites","title":"Prerequisites","text":"
                                                  • charts-syncer is available and running. If not, you can click here to download.
                                                  • The Helm Chart has been adapted for charts-syncer. This means adding a .relok8s-images.yaml file to the Helm Chart. This file should include all the images used in the Chart, including any images that are not directly used in the Chart but are used similar to images used in an Operator.

                                                  Note

                                                  • Refer to image-hints-file for instructions on how to write a Chart. It is required to separate the registry and repository of the image because the registry/repository needs to be replaced or modified when loading the image.
                                                  • The installer's fire cluster has charts-syncer installed. If you are importing a custom Helm apps into the installer's fire cluster, you can skip the download and proceed to the adaptation. If charts-syncer binary is not installed, you can download it immediately.
                                                  "},{"location":"en/end-user/kpanda/helm/Import-addon.html#sync-helm-chart","title":"Sync Helm Chart","text":"
                                                  1. Go to Container Management -> Helm Apps -> Helm Repositories , search for the addon, and obtain the built-in repository address and username/password (the default username/password for the system's built-in repository is rootuser/rootpass123).

                                                  2. Sync the Helm Chart to the built-in repository addon of the container management system

                                                    • Write the following configuration file, modify it according to your specific configuration, and save it as sync-dao-2048.yaml .

                                                      source:  # helm charts source information\n  repo:\n    kind: HARBOR # It can also be any other supported Helm Chart repository type, such as CHARTMUSEUM\n    url: https://release-ci.daocloud.io/chartrepo/community #  Change to the chart repo URL\n    #auth: # username/password, if no password is set, leave it blank\n      #username: \"admin\"\n      #password: \"Harbor12345\"\ncharts:  # charts to sync\n  - name: dao-2048 # helm charts information, if not specified, sync all charts in the source helm repo\n    versions:\n      - 1.4.1\ntarget:  # helm charts target information\n  containerRegistry: 10.5.14.40 # image repository URL\n  repo:\n    kind: CHARTMUSEUM # It can also be any other supported Helm Chart repository type, such as HARBOR\n    url: http://10.5.14.40:8081 #  Change to the correct chart repo URL, you can verify the address by using helm repo add $HELM-REPO\n    auth: # username/password, if no password is set, leave it blank\n      username: \"rootuser\"\n      password: \"rootpass123\"\n  containers:\n    # kind: HARBOR # If the image repository is HARBOR and you want charts-syncer to automatically create an image repository, fill in this field\n    # auth: # username/password, if no password is set, leave it blank\n      # username: \"admin\"\n      # password: \"Harbor12345\"\n\n# leverage .relok8s-images.yaml file inside the Charts to move the container images too\nrelocateContainerImages: true\n
                                                    • Run the charts-syncer command to sync the Chart and its included images

                                                      charts-syncer sync --config sync-dao-2048.yaml --insecure --auto-create-repository\n

                                                      The expected output is:

                                                      I1222 15:01:47.119777    8743 sync.go:45] Using config file: \"examples/sync-dao-2048.yaml\"\nW1222 15:01:47.234238    8743 syncer.go:263] Ignoring skipDependencies option as dependency sync is not supported if container image relocation is true or syncing from/to intermediate directory\nI1222 15:01:47.234685    8743 sync.go:58] There is 1 chart out of sync!\nI1222 15:01:47.234706    8743 sync.go:66] Syncing \"dao-2048_1.4.1\" chart...\n.relok8s-images.yaml hints file found\nComputing relocation...\n\nRelocating dao-2048@1.4.1...\nPushing 10.5.14.40/daocloud/dao-2048:v1.4.1...\nDone\nDone moving /var/folders/vm/08vw0t3j68z9z_4lcqyhg8nm0000gn/T/charts-syncer869598676/dao-2048-1.4.1.tgz\n
                                                  3. Once the previous step is completed, go to Container Management -> Helm Apps -> Helm Repositories , find the proper addon, click Sync Repository in the action column, and you will see the uploaded Helm apps in the Helm template.

                                                  4. You can then proceed with normal installation, upgrade, and uninstallation.

                                                  "},{"location":"en/end-user/kpanda/helm/Import-addon.html#online-environment","title":"Online Environment","text":"

                                                  The Helm Repo address for the online environment is release.daocloud.io . If the user does not have permission to add Helm Repo, they will not be able to import custom Helm appss into the system's built-in addons. You can add your own Helm repository and then integrate your Helm repository into the platform using the same steps as syncing Helm Chart in the offline environment.

                                                  "},{"location":"en/end-user/kpanda/helm/helm-app.html","title":"Manage Helm Apps","text":"

                                                  The container management module supports interface-based management of Helm, including creating Helm instances using Helm charts, customizing Helm instance arguments, and managing the full lifecycle of Helm instances.

                                                  This section will take cert-manager as an example to introduce how to create and manage Helm apps through the container management interface.

                                                  "},{"location":"en/end-user/kpanda/helm/helm-app.html#prerequisites","title":"Prerequisites","text":"
                                                  • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                                  • Created a namespace, user, and granted NS Admin or higher permissions to the user. For details, refer to Namespace Authorization.

                                                  "},{"location":"en/end-user/kpanda/helm/helm-app.html#install-the-helm-app","title":"Install the Helm app","text":"

                                                  Follow the steps below to install the Helm app.

                                                  1. Click a cluster name to enter Cluster Details .

                                                  2. In the left navigation bar, click Helm Apps -> Helm Chart to enter the Helm chart page.

                                                    On the Helm chart page, select the Helm repository named addon , and all the Helm chart templates under the addon repository will be displayed on the interface. Click the Chart named cert-manager .

                                                  3. On the installation page, you can see the relevant detailed information of the Chart, select the version to be installed in the upper right corner of the interface, and click the Install button. Here select v1.9.1 version for installation.

                                                  4. Configure Name , Namespace and Version Information . You can also customize arguments by modifying YAML in the argument Configuration area below. Click OK .

                                                  5. The system will automatically return to the list of Helm apps, and the status of the newly created Helm app is Installing , and the status will change to Running after a period of time.

                                                  "},{"location":"en/end-user/kpanda/helm/helm-app.html#update-the-helm-app","title":"Update the Helm app","text":"

                                                  After we have completed the installation of a Helm app through the interface, we can perform an update operation on the Helm app. Note: Update operations using the UI are only supported for Helm apps installed via the UI.

                                                  Follow the steps below to update the Helm app.

                                                  1. Click a cluster name to enter Cluster Details .

                                                  2. In the left navigation bar, click Helm Apps to enter the Helm app list page.

                                                    On the Helm app list page, select the Helm app that needs to be updated, click the __ ...__ operation button on the right side of the list, and select the Update operation in the drop-down selection.

                                                  3. After clicking the Update button, the system will jump to the update interface, where you can update the Helm app as needed. Here we take updating the http port of the dao-2048 application as an example.

                                                  4. After modifying the proper arguments. You can click the Change button under the argument configuration to compare the files before and after the modification. After confirming that there is no error, click the OK button at the bottom to complete the update of the Helm app.

                                                  5. The system will automatically return to the Helm app list, and a pop-up window in the upper right corner will prompt update successful .

                                                  "},{"location":"en/end-user/kpanda/helm/helm-app.html#view-helm-operation-records","title":"View Helm operation records","text":"

                                                  Every installation, update, and deletion of Helm apps has detailed operation records and logs for viewing.

                                                  1. In the left navigation bar, click Cluster Operations -> Recent Operations , and then select the Helm Operations tab at the top of the page. Each record corresponds to an install/update/delete operation.

                                                  2. To view the detailed log of each operation: Click \u2507 on the right side of the list, and select Log from the pop-up menu.

                                                  3. At this point, the detailed operation log will be displayed in the form of console at the bottom of the page.

                                                  "},{"location":"en/end-user/kpanda/helm/helm-app.html#delete-the-helm-app","title":"Delete the Helm app","text":"

                                                  Follow the steps below to delete the Helm app.

                                                  1. Find the cluster where the Helm app to be deleted resides, click the cluster name, and enter Cluster Details .

                                                  2. In the left navigation bar, click Helm Apps to enter the Helm app list page.

                                                    On the Helm app list page, select the Helm app you want to delete, click the __ ...__ operation button on the right side of the list, and select Delete from the drop-down selection.

                                                  3. Enter the name of the Helm app in the pop-up window to confirm, and then click the Delete button.

                                                  "},{"location":"en/end-user/kpanda/helm/helm-repo.html","title":"Manage Helm Repository","text":"

                                                  The Helm repository is a repository for storing and publishing Charts. The Helm App module supports HTTP(s) protocol to access Chart packages in the repository. By default, the system has 4 built-in helm repos as shown in the table below to meet common needs in the production process of enterprises.

                                                  Repository Description Example partner Various high-quality features provided by ecological partners Chart tidb system Chart that must be relied upon by system core functional components and some advanced features. For example, insight-agent must be installed to obtain cluster monitoring information Insight addon Common Chart in business cases cert-manager community The most popular open source components in the Kubernetes community Chart Istio

                                                  In addition to the above preset repositories, you can also add third-party Helm repositories yourself. This page will introduce how to add and update third-party Helm repositories.

                                                  "},{"location":"en/end-user/kpanda/helm/helm-repo.html#prerequisites","title":"Prerequisites","text":"
                                                  • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                                  • Created a namespace, user, and granted NS Admin or higher permissions to the user. For details, refer to Namespace Authorization.

                                                  • If using a private repository, you should have read and write permissions to the repository.

                                                  "},{"location":"en/end-user/kpanda/helm/helm-repo.html#introduce-third-party-helm-repository","title":"Introduce third-party Helm repository","text":"

                                                  The following takes the public container repository of Kubevela as an example to introduce and manage the helm repo.

                                                  1. Find the cluster that needs to be imported into the third-party helm repo, click the cluster name, and enter cluster details.

                                                  2. In the left navigation bar, click Helm Apps -> Helm Repositories to enter the helm repo page.

                                                  3. Click the Create Repository button on the helm repo page to enter the Create repository page, and configure relevant arguments according to the table below.

                                                    • Repository Name: Set the repository name. It can be up to 63 characters long and may only include lowercase letters, numbers, and separators -. It must start and end with a lowercase letter or number, for example, kubevela.
                                                    • Repository URL: The HTTP(S) address pointing to the target Helm repository. For example, https://charts.kubevela.net/core.
                                                    • Skip TLS Verification: If the added Helm repository uses an HTTPS address and requires skipping TLS verification, you can check this option. The default is unchecked.
                                                    • Authentication Method: The method used for identity verification after connecting to the repository URL. For public repositories, you can select None. For private repositories, you need to enter a username/password for identity verification.
                                                    • Labels: Add labels to this Helm repository. For example, key: repo4; value: Kubevela.
                                                    • Annotations: Add annotations to this Helm repository. For example, key: repo4; value: Kubevela.
                                                    • Description: Add a description for this Helm repository. For example: This is a Kubevela public Helm repository.

                                                  4. Click OK to complete the creation of the Helm repository. The page will automatically jump to the list of Helm repositories.

                                                  "},{"location":"en/end-user/kpanda/helm/helm-repo.html#update-the-helm-repository","title":"Update the Helm repository","text":"

                                                  When the address information of the helm repo changes, the address, authentication method, label, annotation, and description information of the helm repo can be updated.

                                                  1. Find the cluster where the repository to be updated is located, click the cluster name, and enter cluster details .

                                                  2. In the left navigation bar, click Helm Apps -> Helm Repositories to enter the helm repo list page.

                                                  3. Find the Helm repository that needs to be updated on the repository list page, click the \u2507 button on the right side of the list, and click Update in the pop-up menu.

                                                  4. Update on the Update Helm Repository page, and click OK when finished.

                                                  5. Return to the helm repo list, and the screen prompts that the update is successful.

                                                  "},{"location":"en/end-user/kpanda/helm/helm-repo.html#delete-the-helm-repository","title":"Delete the Helm repository","text":"

                                                  In addition to importing and updating repositorys, you can also delete unnecessary repositories, including system preset repositories and third-party repositories.

                                                  1. Find the cluster where the repository to be deleted is located, click the cluster name, and enter cluster details .

                                                  2. In the left navigation bar, click Helm Apps -> Helm Repositories to enter the helm repo list page.

                                                  3. Find the Helm repository that needs to be updated on the repository list page, click the \u2507 button on the right side of the list, and click Delete in the pop-up menu.

                                                  4. Enter the repository name to confirm, and click Delete .

                                                  5. Return to the list of Helm repositories, and the screen prompts that the deletion is successful.

                                                  "},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html","title":"Import and Upgrade Multi-Arch Helm Apps","text":"

                                                  In a multi-arch cluster, it is common to use Helm charts that support multiple architectures to address deployment issues caused by architectural differences. This guide will explain how to integrate single-arch Helm apps into multi-arch deployments and how to integrate multi-arch Helm apps.

                                                  "},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#import","title":"Import","text":""},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#import-single-arch","title":"Import Single-arch","text":"

                                                  Prepare the offline package addon-offline-full-package-${version}-${arch}.tar.gz.

                                                  Specify the path in the clusterConfig.yml configuration file, for example:

                                                  addonPackage:\n  path: \"/home/addon-offline-full-package-v0.9.0-amd64.tar.gz\"\n

                                                  Then run the import command:

                                                  ~/dce5-installer cluster-create -c /home/dce5/sample/clusterConfig.yaml -m /home/dce5/sample/manifest.yaml -d -j13\n
                                                  "},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#integrate-multi-arch","title":"Integrate Multi-arch","text":"

                                                  Prepare the offline package addon-offline-full-package-${version}-${arch}.tar.gz.

                                                  Take addon-offline-full-package-v0.9.0-arm64.tar.gz as an example and run the import command:

                                                  ~/dce5-installer import-addon -c /home/dce5/sample/clusterConfig.yaml --addon-path=/home/addon-offline-full-package-v0.9.0-arm64.tar.gz\n
                                                  "},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#upgrade","title":"Upgrade","text":""},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#upgrade-single-arch","title":"Upgrade Single-arch","text":"

                                                  Prepare the offline package addon-offline-full-package-${version}-${arch}.tar.gz.

                                                  Specify the path in the clusterConfig.yml configuration file, for example:

                                                  addonPackage:\n  path: \"/home/addon-offline-full-package-v0.11.0-amd64.tar.gz\"\n

                                                  Then run the import command:

                                                  ~/dce5-installer cluster-create -c /home/dce5/sample/clusterConfig.yaml -m /home/dce5/sample/manifest.yaml -d -j13\n
                                                  "},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#multi-arch-integration","title":"Multi-arch Integration","text":"

                                                  Prepare the offline package addon-offline-full-package-${version}-${arch}.tar.gz.

                                                  Take addon-offline-full-package-v0.11.0-arm64.tar.gz as an example and run the import command:

                                                  ~/dce5-installer import-addon -c /home/dce5/sample/clusterConfig.yaml --addon-path=/home/addon-offline-full-package-v0.11.0-arm64.tar.gz\n
                                                  "},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#notes","title":"Notes","text":""},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#disk-space","title":"Disk Space","text":"

                                                  The offline package is quite large and requires sufficient space for decompression and loading of images. Otherwise, it may interrupt the process with a \"no space left\" error.

                                                  "},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#retry-after-failure","title":"Retry after Failure","text":"

                                                  If the multi-arch fusion step fails, you need to clean up the residue before retrying:

                                                  rm -rf addon-offline-target-package\n
                                                  "},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#registry-space","title":"Registry Space","text":"

                                                  If the offline package for fusion contains registry spaces that are inconsistent with the imported offline package, an error may occur during the fusion process due to the non-existence of the registry spaces:

                                                  Solution: Simply create the registry space before the fusion. For example, in the above error, creating the registry space \"localhost\" in advance can prevent the error.

                                                  "},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#architecture-conflict","title":"Architecture Conflict","text":"

                                                  When upgrading to a version lower than 0.12.0 of the addon, the charts-syncer in the target offline package does not check the existence of the image before pushing, so it will recombine the multi-arch into a single architecture during the upgrade process. For example, if the addon is implemented as a multi-arch in v0.10, upgrading to v0.11 will overwrite the multi-arch addon with a single architecture. However, upgrading to v0.12.0 or above can still maintain the multi-arch.

                                                  "},{"location":"en/end-user/kpanda/helm/upload-helm.html","title":"Upload Helm Charts","text":"

                                                  This article explains how to upload Helm charts. See the steps below.

                                                  1. Add a Helm repository, refer to Adding a Third-Party Helm Repository for the procedure.

                                                  2. Upload the Helm Chart to the Helm repository.

                                                    Upload with ClientUpload with Web Page

                                                    Note

                                                    This method is suitable for Harbor, ChartMuseum, JFrog type repositories.

                                                    1. Log in to a node that can access the Helm repository, upload the Helm binary to the node, and install the cm-push plugin (VPN is needed and Git should be installed in advance).

                                                      Refer to the plugin installation process.

                                                    2. Push the Helm Chart to the Helm repository by executing the following command:

                                                      helm cm-push ${charts-dir} ${HELM_REPO_URL} --username ${username} --password ${password}\n

                                                      Argument descriptions:

                                                      • charts-dir: The directory of the Helm Chart, or the packaged Chart (i.e., .tgz file).
                                                      • HELM_REPO_URL: The URL of the Helm repository.
                                                      • username/password: The username and password for the Helm repository with push permissions.
                                                      • If you want to access via HTTPS and skip the certificate verification, you can add the argument --insecure.

                                                    Note

                                                    This method is only applicable to Harbor repositories.

                                                    1. Log into the Harbor repository, ensuring the logged-in user has permissions to push;

                                                    2. Go to the relevant project, select the Helm Charts tab, click the Upload button on the page to upload the Helm Chart.

                                                  3. Sync Remote Repository Data

                                                    Manual SyncAuto Sync

                                                    By default, the cluster does not enable Helm Repository Auto-Refresh, so you need to perform a manual sync operation. The general steps are:

                                                    Go to Helm Apps -> Helm Repositories, click the \u2507 button on the right side of the repository list, and select Sync Repository to complete the repository data synchronization.

                                                    If you need to enable the Helm repository auto-sync feature, you can go to Cluster Maintenance -> Cluster Settings -> Advanced Settings and turn on the Helm repository auto-refresh switch.

                                                  "},{"location":"en/end-user/kpanda/inspect/index.html","title":"Cluster Inspection","text":"

                                                  Cluster inspection allows administrators to regularly or ad-hoc check the overall health of the cluster, giving them proactive control over ensuring cluster security. With a well-planned inspection schedule, this proactive cluster check allows administrators to monitor the cluster status at any time and address potential issues in advance. It eliminates the previous dilemma of passive troubleshooting during failures, enabling proactive monitoring and prevention.

                                                  The cluster inspection feature provided by AI platform's container management module supports custom inspection items at the cluster, node, and pod levels. After the inspection is completed, it automatically generates visual inspection reports.

                                                  • Cluster Level: Checks the running status of system components in the cluster, including cluster status, resource usage, and specific inspection items for control nodes, such as the status of kube-apiserver and etcd .
                                                  • Node Level: Includes common inspection items for both control nodes and worker nodes, such as node resource usage, handle counts, PID status, and network status.
                                                  • pod Level: Checks the CPU and memory usage, running status of pods, and the status of PV (Persistent Volume) and PVC (PersistentVolumeClaim).

                                                  For information on security inspections or executing security-related inspections, refer to the supported security scan types in AI platform.

                                                  "},{"location":"en/end-user/kpanda/inspect/config.html","title":"Creating Inspection Configuration","text":"

                                                  AI platform Container Management module provides cluster inspection functionality, which supports inspection at the cluster, node, and pod levels.

                                                  • Cluster level: Check the running status of system components in the cluster, including cluster status, resource usage, and specific inspection items for control nodes such as kube-apiserver and etcd .
                                                  • Node level: Includes common inspection items for both control nodes and worker nodes, such as node resource usage, handle count, PID status, and network status.
                                                  • Pod level: Check the CPU and memory usage, running status, PV and PVC status of Pods.

                                                  Here's how to create an inspection configuration.

                                                  1. Click Cluster Inspection in the left navigation bar.

                                                  2. On the right side of the page, click Inspection Configuration .

                                                  3. Fill in the inspection configuration based on the following instructions, then click OK at the bottom of the page.

                                                    • Cluster: Select the clusters that you want to inspect from the dropdown list. If you select multiple clusters, multiple inspection configurations will be automatically generated (only the inspected clusters are inconsistent, all other configurations are identical).
                                                    • Scheduled Inspection: When enabled, it allows for regular automatic execution of cluster inspections based on a pre-set inspection frequency.
                                                    • Inspection Frequency: Set the interval for automatic inspections, e.g., every Tuesday at 10 AM. It supports custom CronExpressios, refer to Cron Schedule Syntax for more information.
                                                    • Number of Inspection Records to Retain: Specifies the maximum number of inspection records to be retained, including all inspection records for each cluster.
                                                    • Parameter Configuration: The parameter configuration is divided into three parts: cluster level, node level, and pod level. You can enable or disable specific inspection items based on your requirements.

                                                  After creating the inspection configuration, it will be automatically displayed in the inspection configuration list. Click the more options button on the right of the configuration to immediately perform an inspection, modify the inspection configuration or delete the inspection configuration and reports.

                                                  • Click Inspection to perform an inspection once based on the configuration.
                                                  • Click Inspection Configuration to modify the inspection configuration.
                                                  • Click Delete to delete the inspection configuration and reports.

                                                  Note

                                                  • After creating the inspection configuration, if the Scheduled Inspection configuration is enabled, inspections will be automatically executed at the specified time.
                                                  • If Scheduled Inspection configuration is not enabled, you need to manually trigger the inspection.
                                                  "},{"location":"en/end-user/kpanda/inspect/inspect.html","title":"Start Cluster Inspection","text":"

                                                  After creating an inspection configuration, if the Scheduled Inspection configuration is enabled, inspections will be automatically executed at the specified time. If the Scheduled Inspection configuration is not enabled, you need to manually trigger the inspection.

                                                  This page explains how to manually perform a cluster inspection.

                                                  "},{"location":"en/end-user/kpanda/inspect/inspect.html#prerequisites","title":"Prerequisites","text":"
                                                  • Integrate or create a cluster in the Container Management module.
                                                  • Create an inspection configuration.
                                                  • The selected cluster is in the Running state and the insight component has been installed in the cluster.
                                                  "},{"location":"en/end-user/kpanda/inspect/inspect.html#steps","title":"Steps","text":"

                                                  When performing an inspection, you can choose to inspect multiple clusters in batches or perform a separate inspection for a specific cluster.

                                                  Batch InspectionIndividual Inspection
                                                  1. Click Cluster Inspection in the top-level navigation bar of the Container Management module, then click Inspection on the right side of the page.

                                                  2. Select the clusters you want to inspect, then click OK at the bottom of the page.

                                                    • If you choose to inspect multiple clusters at the same time, the system will perform inspections based on different inspection configurations for each cluster.
                                                    • If no inspection configuration is set for a cluster, the system will use the default configuration.

                                                  1. Go to the Cluster Inspection page.
                                                  2. Click the more options button ( \u2507 ) on the right of the proper inspection configuration, then select Inspection from the popup menu.

                                                  "},{"location":"en/end-user/kpanda/inspect/report.html","title":"Check Inspection Reports","text":"

                                                  After the inspection execution is completed, you can view the inspection records and detailed inspection reports.

                                                  "},{"location":"en/end-user/kpanda/inspect/report.html#prerequisites","title":"Prerequisites","text":"
                                                  • Create an inspection configuration.
                                                  • Perform at least one inspection execution.
                                                  "},{"location":"en/end-user/kpanda/inspect/report.html#steps","title":"Steps","text":"
                                                  1. Go to the Cluster Inspection page and click the name of the target inspection cluster.
                                                  1. Click the name of the inspection record you want to view.

                                                    • Each inspection execution generates an inspection record.
                                                    • When the number of inspection records exceeds the maximum retention specified in the inspection configuration, the earliest record will be deleted starting from the execution time.

                                                  2. View the detailed information of the inspection, which may include an overview of cluster resources and the running status of system components.

                                                    You can download the inspection report or delete the inspection report from the top right corner of the page.

                                                  "},{"location":"en/end-user/kpanda/namespaces/createns.html","title":"Namespaces","text":"

                                                  Namespaces are an abstraction used in Kubernetes for resource isolation. A cluster can contain multiple namespaces with different names, and the resources in each namespace are isolated from each other. For a detailed introduction to namespaces, refer to Namespaces.

                                                  This page will introduce the related operations of the namespace.

                                                  "},{"location":"en/end-user/kpanda/namespaces/createns.html#create-a-namespace","title":"Create a namespace","text":"

                                                  Supports easy creation of namespaces through forms, and quick creation of namespaces by writing or importing YAML files.

                                                  Note

                                                  • Before creating a namespace, you need to Integrate a Kubernetes cluster or Create a Kubernetes cluster in the container management module.
                                                  • The default namespace default is usually automatically generated after cluster initialization. But for production clusters, for ease of management, it is recommended to create other namespaces instead of using the default namespace directly.
                                                  "},{"location":"en/end-user/kpanda/namespaces/createns.html#create-with-form","title":"Create with form","text":"
                                                  1. On the cluster list page, click the name of the target cluster.

                                                  2. Click Namespace in the left navigation bar, then click the Create button on the right side of the page.

                                                  3. Fill in the name of the namespace, configure the workspace and labels (optional), and then click OK.

                                                    Info

                                                    • After binding a namespace to a workspace, the resources of that namespace will be shared with the bound workspace. For a detailed explanation of workspaces, refer to Workspaces and Hierarchies.

                                                    • After the namespace is created, you can still bind/unbind the workspace.

                                                  4. Click OK to complete the creation of the namespace. On the right side of the namespace list, click \u2507 to select update, bind/unbind workspace, quota management, delete, and more from the pop-up menu.

                                                  "},{"location":"en/end-user/kpanda/namespaces/createns.html#create-from-yaml","title":"Create from YAML","text":"
                                                  1. On the Clusters page, click the name of the target cluster.

                                                  2. Click Namespace in the left navigation bar, then click the YAML Create button on the right side of the page.

                                                  3. Enter or paste the prepared YAML content, or directly import an existing YAML file locally.

                                                    After entering the YAML content, click Download to save the YAML file locally.

                                                  4. Finally, click OK in the lower right corner of the pop-up box.

                                                  "},{"location":"en/end-user/kpanda/namespaces/exclusive.html","title":"Namespace Exclusive Nodes","text":"

                                                  Namespace exclusive nodes in a Kubernetes cluster allow a specific namespace to have exclusive access to one or more node's CPU, memory, and other resources through taints and tolerations. Once exclusive nodes are configured for a specific namespace, applications and services from other namespaces cannot run on the exclusive nodes. Using exclusive nodes allows important applications to have exclusive access to some computing resources, achieving physical isolation from other applications.

                                                  Note

                                                  Applications and services running on a node before it is set to be an exclusive node will not be affected and will continue to run normally on that node. Only when these Pods are deleted or rebuilt will they be scheduled to other non-exclusive nodes.

                                                  "},{"location":"en/end-user/kpanda/namespaces/exclusive.html#preparation","title":"Preparation","text":"

                                                  Check whether the kube-apiserver of the current cluster has enabled the PodNodeSelector and PodTolerationRestriction admission controllers.

                                                  The use of namespace exclusive nodes requires users to enable the PodNodeSelector and PodTolerationRestriction admission controllers on the kube-apiserver. For more information about admission controllers, refer to Kubernetes Admission Controllers Reference.

                                                  You can go to any Master node in the current cluster to check whether these two features are enabled in the kube-apiserver.yaml file, or you can execute the following command on the Master node for a quick check:

                                                  [root@g-master1 ~]# cat /etc/kubernetes/manifests/kube-apiserver.yaml | grep  enable-admission-plugins\n\n# The expected output is as follows:\n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction\n
                                                  "},{"location":"en/end-user/kpanda/namespaces/exclusive.html#enable-namespace-exclusive-nodes-on-global-cluster","title":"Enable Namespace Exclusive Nodes on Global Cluster","text":"

                                                  Since the Global cluster runs platform basic components such as kpanda, ghippo, and insight, enabling namespace exclusive nodes on Global may cause system components to not be scheduled to the exclusive nodes when they restart, affecting the overall high availability of the system. Therefore, we generally do not recommend users to enable the namespace exclusive node feature on the Global cluster.

                                                  If you do need to enable namespace exclusive nodes on the Global cluster, please follow the steps below:

                                                  1. Enable the PodNodeSelector and PodTolerationRestriction admission controllers for the kube-apiserver of the Global cluster

                                                    Note

                                                    If the cluster has already enabled the above two admission controllers, please skip this step and go directly to configure system component tolerations.

                                                    Go to any Master node in the current cluster to modify the kube-apiserver.yaml configuration file, or execute the following command on the Master node for configuration:

                                                    [root@g-master1 ~]# vi /etc/kubernetes/manifests/kube-apiserver.yaml\n\n# The expected output is as follows:\napiVersion: v1\nkind: Pod\nmetadata:\n    ......\nspec:\ncontainers:\n- command:\n    - kube-apiserver\n    ......\n    - --default-not-ready-toleration-seconds=300\n    - --default-unreachable-toleration-seconds=300\n    - --enable-admission-plugins=NodeRestriction   # List of enabled admission controllers\n    - --enable-aggregator-routing=False\n    - --enable-bootstrap-token-auth=true\n    - --endpoint-reconciler-type=lease\n    - --etcd-cafile=/etc/kubernetes/ssl/etcd/ca.crt\n    ......\n

                                                    Find the --enable-admission-plugins parameter and add the PodNodeSelector and PodTolerationRestriction admission controllers (separated by commas). Refer to the following:

                                                    # Add __ ,PodNodeSelector,PodTolerationRestriction__ \n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction \n
                                                  2. Add toleration annotations to the namespace where the platform components are located

                                                    After enabling the admission controllers, you need to add toleration annotations to the namespace where the platform components are located to ensure the high availability of the platform components.

                                                    The system component namespaces for AI platform are as follows:

                                                    Namespace System Components Included kpanda-system kpanda hwameiStor-system hwameiStor istio-system istio metallb-system metallb cert-manager-system cert-manager contour-system contour kubean-system kubean ghippo-system ghippo kcoral-system kcoral kcollie-system kcollie insight-system insight, insight-agent: ipavo-system ipavo kairship-system kairship karmada-system karmada amamba-system amamba, jenkins skoala-system skoala mspider-system mspider mcamel-system mcamel-rabbitmq, mcamel-elasticsearch, mcamel-mysql, mcamel-redis, mcamel-kafka, mcamel-minio, mcamel-postgresql spidernet-system spidernet kangaroo-system kangaroo gmagpie-system gmagpie dowl-system dowl

                                                    Check whether there are the above namespaces in the current cluster, execute the following command, and add the annotation: scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]' for each namespace.

                                                    kubectl annotate ns <namespace-name> scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \n\"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\n
                                                    Please make sure to replace <namespace-name> with the name of the platform namespace you want to add the annotation to.

                                                  3. Use the interface to set exclusive nodes for the namespace

                                                    After confirming that the PodNodeSelector and PodTolerationRestriction admission controllers on the cluster API server have been enabled, please follow the steps below to use the AI platform UI management interface to set exclusive nodes for the namespace.

                                                    1. Click the cluster name in the cluster list page, then click Namespace in the left navigation bar.

                                                    2. Click the namespace name, then click the Exclusive Node tab, and click Add Node on the bottom right.

                                                    3. Select which nodes you want to be exclusive to this namespace on the left side of the page. On the right side, you can clear or delete a selected node. Finally, click OK at the bottom.

                                                    4. You can view the current exclusive nodes for this namespace in the list. You can choose to Stop Exclusivity on the right side of the node.

                                                      After cancelling exclusivity, Pods from other namespaces can also be scheduled to this node.

                                                  "},{"location":"en/end-user/kpanda/namespaces/exclusive.html#enable-namespace-exclusive-nodes-on-non-global-clusters","title":"Enable Namespace Exclusive Nodes on Non-Global Clusters","text":"

                                                  To enable namespace exclusive nodes on non-Global clusters, please follow the steps below:

                                                  1. Enable the PodNodeSelector and PodTolerationRestriction admission controllers for the kube-apiserver of the current cluster

                                                    Note

                                                    If the cluster has already enabled the above two admission controllers, please skip this step and go directly to using the interface to set exclusive nodes for the namespace.

                                                    Go to any Master node in the current cluster to modify the kube-apiserver.yaml configuration file, or execute the following command on the Master node for configuration:

                                                    [root@g-master1 ~]# vi /etc/kubernetes/manifests/kube-apiserver.yaml\n\n# The expected output is as follows:\napiVersion: v1\nkind: Pod\nmetadata:\n    ......\nspec:\ncontainers:\n- command:\n    - kube-apiserver\n    ......\n    - --default-not-ready-toleration-seconds=300\n    - --default-unreachable-toleration-seconds=300\n    - --enable-admission-plugins=NodeRestriction   #List of enabled admission controllers\n    - --enable-aggregator-routing=False\n    - --enable-bootstrap-token-auth=true\n    - --endpoint-reconciler-type=lease\n    - --etcd-cafile=/etc/kubernetes/ssl/etcd/ca.crt\n    ......\n

                                                    Find the --enable-admission-plugins parameter and add the PodNodeSelector and PodTolerationRestriction admission controllers (separated by commas). Refer to the following:

                                                    # Add __ ,PodNodeSelector,PodTolerationRestriction__ \n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction \n
                                                  2. Use the interface to set exclusive nodes for the namespace

                                                    After confirming that the PodNodeSelector and PodTolerationRestriction admission controllers on the cluster API server have been enabled, please follow the steps below to use the AI platform UI management interface to set exclusive nodes for the namespace.

                                                    1. Click the cluster name in the cluster list page, then click Namespace in the left navigation bar.

                                                    2. Click the namespace name, then click the Exclusive Node tab, and click Add Node on the bottom right.

                                                    3. Select which nodes you want to be exclusive to this namespace on the left side of the page. On the right side, you can clear or delete a selected node. Finally, click OK at the bottom.

                                                    4. You can view the current exclusive nodes for this namespace in the list. You can choose to Stop Exclusivity on the right side of the node.

                                                      After cancelling exclusivity, Pods from other namespaces can also be scheduled to this node.

                                                  3. Add toleration annotations to the namespace where the components that need high availability are located (optional)

                                                    Execute the following command to add the annotation: scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]' to the namespace where the components that need high availability are located.

                                                    kubectl annotate ns <namespace-name> scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \n\"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\n

                                                    Please make sure to replace <namespace-name> with the name of the platform namespace you want to add the annotation to.

                                                  "},{"location":"en/end-user/kpanda/namespaces/podsecurity.html","title":"Pod Security Policy","text":"

                                                  Pod security policies in a Kubernetes cluster allow you to control the behavior of Pods in various aspects of security by configuring different levels and modes for specific namespaces. Only Pods that meet certain conditions will be accepted by the system. It sets three levels and three modes, allowing users to choose the most suitable scheme to set restriction policies according to their needs.

                                                  Note

                                                  Only one security policy can be configured for one security mode. Please be careful when configuring the enforce security mode for a namespace, as violations will prevent Pods from being created.

                                                  This section will introduce how to configure Pod security policies for namespaces through the container management interface.

                                                  "},{"location":"en/end-user/kpanda/namespaces/podsecurity.html#prerequisites","title":"Prerequisites","text":"
                                                  • The container management module has integrated a Kubernetes cluster or created a Kubernetes cluster. The cluster version needs to be v1.22 or above, and you should be able to access the cluster's UI interface.

                                                  • A namespace has been created, a user has been created, and the user has been granted NS Admin or higher permissions. For details, refer to Namespace Authorization.

                                                  "},{"location":"en/end-user/kpanda/namespaces/podsecurity.html#configure-pod-security-policies-for-namespace","title":"Configure Pod Security Policies for Namespace","text":"
                                                  1. Select the namespace for which you want to configure Pod security policies and go to the details page. Click Configure Policy on the Pod Security Policy page to go to the configuration page.

                                                  2. Click Add Policy on the configuration page, and a policy will appear, including security level and security mode. The following is a detailed introduction to the security level and security policy.

                                                    Security Level Description Privileged An unrestricted policy that provides the maximum possible range of permissions. This policy allows known privilege elevations. Baseline The least restrictive policy that prohibits known privilege elevations. Allows the use of default (minimum specified) Pod configurations. Restricted A highly restrictive policy that follows current best practices for protecting Pods. Security Mode Description Audit Violations of the specified policy will add new audit events in the audit log, and the Pod can be created. Warn Violations of the specified policy will return user-visible warning information, and the Pod can be created. Enforce Violations of the specified policy will prevent the Pod from being created.

                                                  3. Different security levels correspond to different check items. If you don't know how to configure your namespace, you can Policy ConfigMap Explanation at the top right corner of the page to view detailed information.

                                                  4. Click Confirm. If the creation is successful, the security policy you configured will appear on the page.

                                                  5. Click \u2507 to edit or delete the security policy you configured.

                                                  "},{"location":"en/end-user/kpanda/network/create-ingress.html","title":"Create an Ingress","text":"

                                                  In a Kubernetes cluster, Ingress exposes services from outside the cluster to inside the cluster HTTP and HTTPS ingress. Traffic ingress is controlled by rules defined on the Ingress resource. Here's an example of a simple Ingress that sends all traffic to the same Service:

                                                  Ingress is an API object that manages external access to services in the cluster, and the typical access method is HTTP. Ingress can provide load balancing, SSL termination, and name-based virtual hosting.

                                                  "},{"location":"en/end-user/kpanda/network/create-ingress.html#prerequisites","title":"Prerequisites","text":"
                                                  • Container management module connected to Kubernetes cluster or created Kubernetes, and can access the cluster UI interface.
                                                  • Completed a namespace creation, user creation, and authorize the user as NS Editor role, for details, refer to Namespace Authorization.
                                                  • Completed Create Ingress Instance, Deploy Application Workload, and have created the proper Service
                                                  • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.
                                                  "},{"location":"en/end-user/kpanda/network/create-ingress.html#create-ingress","title":"Create ingress","text":"
                                                  1. After successfully logging in as the NS Editor user, click Clusters in the upper left corner to enter the Clusters page. In the list of clusters, click a cluster name.

                                                  2. In the left navigation bar, click Container Network -> Ingress to enter the service list, and click the Create Ingress button in the upper right corner.

                                                    Note

                                                    It is also possible to Create from YAML .

                                                  3. Open Create Ingress page to configure. There are two protocol types to choose from, refer to the following two parameter tables for configuration.

                                                  "},{"location":"en/end-user/kpanda/network/create-ingress.html#create-http-protocol-ingress","title":"Create HTTP protocol ingress","text":"Parameter Description Example value Ingress name [Type] Required[Meaning] Enter the name of the new ingress. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter, lowercase English letters or numbers. Ing-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default Protocol [Type] Required [Meaning] Refers to the protocol that authorizes inbound access to the cluster service, and supports HTTP (no identity authentication required) or HTTPS (identity authentication needs to be configured) protocol. Here select the ingress of HTTP protocol. HTTP Domain Name [Type] Required [Meaning] Use the domain name to provide external access services. The default is the domain name of the cluster testing.daocloud.io LB Type [Type] Required [Meaning] The usage range of the Ingress instance. Scope of use of Ingress Platform-level load balancer : In the same cluster, share the same Ingress instance, where all Pods can receive requests distributed by the load balancer. Tenant-level load balancer : Tenant load balancer, the Ingress instance belongs exclusively to the current namespace, or belongs to a certain workspace, and the set workspace includes the current namespace, and all Pods can receive it Requests distributed by this load balancer. Platform Level Load Balancer Ingress Class [Type] Optional[Meaning] Select the proper Ingress instance, and import traffic to the specified Ingress instance after selection. When it is None, the default DefaultClass is used. Please set the DefaultClass when creating an Ingress instance. For more information, refer to Ingress Class< br /> Ngnix Session persistence [Type] Optional[Meaning] Session persistence is divided into three types: L4 source address hash , Cookie Key , L7 Header Name . Keep L4 Source Address Hash : : When enabled, the following tag is added to the Annotation by default: nginx.ingress.kubernetes.io/upstream-hash-by: \"\\(binary_remote_addr\"<br /> __Cookie Key__ : When enabled, the connection from a specific client will be passed to the same Pod. After enabled, the following parameters are added to the Annotation by default:<br /> nginx.ingress.kubernetes.io/affinity: \"cookie\"<br /> nginx.ingress.kubernetes .io/affinity-mode: persistent<br /> __L7 Header Name__ : After enabled, the following tag is added to the Annotation by default: nginx.ingress.kubernetes.io/upstream-hash-by: \"\\)http_x_forwarded_for\" Close Path Rewriting [Type] Optional [Meaning] rewrite-target , in some cases, the URL exposed by the backend service is different from the path specified in the Ingress rule. If no URL rewriting configuration is performed, There will be an error when accessing. close Redirect [Type] Optional[Meaning] permanent-redirect , permanent redirection, after entering the rewriting path, the access path will be redirected to the set address. close Traffic Distribution [Type] Optional[Meaning] After enabled and set, traffic distribution will be performed according to the set conditions. Based on weight : After setting the weight, add the following Annotation to the created Ingress: nginx.ingress.kubernetes.io/canary-weight: \"10\" Based on Cookie : set After the cookie rules, the traffic will be distributed according to the set cookie conditions Based on Header : After setting the header rules, the traffic will be distributed according to the set header conditions Close Labels [Type] Optional [Meaning] Add a label for the ingress - Annotations [Type] Optional [Meaning] Add annotation for ingress -"},{"location":"en/end-user/kpanda/network/create-ingress.html#create-https-protocol-ingress","title":"Create HTTPS protocol ingress","text":"Parameter Description Example value Ingress name [Type] Required[Meaning] Enter the name of the new ingress. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter, lowercase English letters or numbers. Ing-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default Protocol [Type] Required [Meaning] Refers to the protocol that authorizes inbound access to the cluster service, and supports HTTP (no identity authentication required) or HTTPS (identity authentication needs to be configured) protocol. Here select the ingress of HTTPS protocol. HTTPS Domain Name [Type] Required [Meaning] Use the domain name to provide external access services. The default is the domain name of the cluster testing.daocloud.io Secret [Type] Required [Meaning] Https TLS certificate, Create Secret. Forwarding policy [Type] Optional[Meaning] Specify the access policy of Ingress. Path: Specifies the URL path for service access, the default is the root path/directoryTarget service: Service name for ingressTarget service port: Port exposed by the service LB Type [Type] Required [Meaning] The usage range of the Ingress instance. Platform-level load balancer : In the same cluster, the same Ingress instance is shared, and all Pods can receive requests distributed by the load balancer. Tenant-level load balancer : Tenant load balancer, the Ingress instance belongs exclusively to the current namespace or to a certain workspace. This workspace contains the current namespace, and all Pods can receive the workload from this Balanced distribution of requests. Platform Level Load Balancer Ingress Class [Type] Optional[Meaning] Select the proper Ingress instance, and import traffic to the specified Ingress instance after selection. When it is None, the default DefaultClass is used. Please set the DefaultClass when creating an Ingress instance. For more information, refer to Ingress Class< br /> None Session persistence [Type] Optional[Meaning] Session persistence is divided into three types: L4 source address hash , Cookie Key , L7 Header Name . Keep L4 Source Address Hash : : When enabled, the following tag is added to the Annotation by default: nginx.ingress.kubernetes.io/upstream-hash-by: \"\\(binary_remote_addr\"<br /> __Cookie Key__ : When enabled, the connection from a specific client will be passed to the same Pod. After enabled, the following parameters are added to the Annotation by default:<br /> nginx.ingress.kubernetes.io/affinity: \"cookie\"<br /> nginx.ingress.kubernetes .io/affinity-mode: persistent<br /> __L7 Header Name__ : After enabled, the following tag is added to the Annotation by default: nginx.ingress.kubernetes.io/upstream-hash-by: \"\\)http_x_forwarded_for\" Close Labels [Type] Optional [Meaning] Add a label for the ingress Annotations [Type] Optional[Meaning] Add annotation for ingress"},{"location":"en/end-user/kpanda/network/create-ingress.html#create-ingress-successfully","title":"Create ingress successfully","text":"

                                                  After configuring all the parameters, click the OK button to return to the ingress list automatically. On the right side of the list, click \u2507 to modify or delete the selected ingress.

                                                  "},{"location":"en/end-user/kpanda/network/create-services.html","title":"Create a Service","text":"

                                                  In a Kubernetes cluster, each Pod has an internal independent IP address, but Pods in the workload may be created and deleted at any time, and directly using the Pod IP address cannot provide external services.

                                                  This requires creating a service through which you get a fixed IP address, decoupling the front-end and back-end of the workload, and allowing external users to access the service. At the same time, the service also provides the Load Balancer feature, enabling users to access workloads from the public network.

                                                  "},{"location":"en/end-user/kpanda/network/create-services.html#prerequisites","title":"Prerequisites","text":"
                                                  • Container management module connected to Kubernetes cluster or created Kubernetes, and can access the cluster UI interface.

                                                  • Completed a namespace creation, user creation, and authorize the user as NS Editor role, for details, refer to Namespace Authorization.

                                                  • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                                  "},{"location":"en/end-user/kpanda/network/create-services.html#create-service","title":"Create service","text":"
                                                  1. After successfully logging in as the NS Editor user, click Clusters in the upper left corner to enter the Clusters page. In the list of clusters, click a cluster name.

                                                  2. In the left navigation bar, click Container Network -> Service to enter the service list, and click the Create Service button in the upper right corner.

                                                    !!! tip

                                                     It is also possible to create a service via __YAML__ .\n
                                                  3. Open the Create Service page, select an access type, and refer to the following three parameter tables for configuration.

                                                  "},{"location":"en/end-user/kpanda/network/create-services.html#create-clusterip-service","title":"Create ClusterIP service","text":"

                                                  Click Intra-Cluster Access (ClusterIP) , which refers to exposing services through the internal IP of the cluster. The services selected for this option can only be accessed within the cluster. This is the default service type. Refer to the configuration parameters in the table below.

                                                  Parameter Description Example value Access type [Type] Required[Meaning] Specify the method of Pod service discovery, here select intra-cluster access (ClusterIP). ClusterIP Service Name [Type] Required[Meaning] Enter the name of the new service. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. Svc-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default Label selector [Type] Required[Meaning] Add a label, the Service selects a Pod according to the label, and click \"Add\" after filling. You can also refer to the label of an existing workload. Click Reference workload label , select the workload in the pop-up window, and the system will use the selected workload label as the selector by default. app:job01 Port configuration [Type] Required[Meaning] To add a protocol port for a service, you need to select the port protocol type first. Currently, it supports TCP and UDP. Port Name: Enter the name of the custom port. Service port (port): The access port for Pod to provide external services. Container port (targetport): The container port that the workload actually monitors, used to expose services to the cluster. Session Persistence [Type] Optional [Meaning] When enabled, requests from the same client will be forwarded to the same Pod Enabled Maximum session hold time [Type] Optional [Meaning] After session hold is enabled, the maximum hold time is 30 seconds by default 30 seconds Annotation [Type] Optional[Meaning] Add annotation for service"},{"location":"en/end-user/kpanda/network/create-services.html#create-nodeport-service","title":"Create NodePort service","text":"

                                                  Click NodePort , which means exposing the service via IP and static port ( NodePort ) on each node. The NodePort service is routed to the automatically created ClusterIP service. You can access a NodePort service from outside the cluster by requesting : . Refer to the configuration parameters in the table below. Parameter Description Example value Access type [Type] Required[Meaning] Specify the method of Pod service discovery, here select node access (NodePort). NodePort Service Name [Type] Required[Meaning] Enter the name of the new service. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. Svc-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default Label selector [Type] Required[Meaning] Add a label, the Service selects a Pod according to the label, and click \"Add\" after filling. You can also refer to the label of an existing workload. Click Reference workload label , select the workload in the pop-up window, and the system will use the selected workload label as the selector by default. Port configuration [Type] Required[Meaning] To add a protocol port for a service, you need to select the port protocol type first. Currently, it supports TCP and UDP. Port Name: Enter the name of the custom port. Service port (port): The access port for Pod to provide external services. By default, the service port is set to the same value as the container port field for convenience. ***Container port (targetport)*: The container port actually monitored by the workload. Node port (nodeport): The port of the node, which receives traffic from ClusterIP transmission. It is used as the entrance for external traffic access. Session Persistence [Type] Optional [Meaning] When enabled, requests from the same client will be forwarded to the same PodAfter enabled, .spec.sessionAffinity of Service is ClientIP , refer to for details : Session Affinity for Service Enabled Maximum session hold time [Type] Optional [Meaning] After session hold is enabled, the maximum hold time, the default timeout is 30 seconds.spec.sessionAffinityConfig.clientIP.timeoutSeconds is set to 30 by default seconds 30 seconds Annotation [Type] Optional[Meaning] Add annotation for service"},{"location":"en/end-user/kpanda/network/create-services.html#create-loadbalancer-service","title":"Create LoadBalancer service","text":"

                                                  Click Load Balancer , which refers to using the cloud provider's load balancer to expose services to the outside. External load balancers can route traffic to automatically created NodePort services and ClusterIP services. Refer to the configuration parameters in the table below.

                                                  Parameter Description Example value Access type [Type] Required[Meaning] Specify the method of Pod service discovery, here select node access (NodePort). NodePort Service Name [Type] Required[Meaning] Enter the name of the new service. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. Svc-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default External Traffic Policy [Type] Required[Meaning] Set external traffic policy. Cluster: Traffic can be forwarded to Pods on all nodes in the cluster. Local: Traffic is only sent to Pods on this node. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. Tag selector [Type] Required [Meaning] Add tag, Service Select the Pod according to the label, fill it out and click \"Add\". You can also refer to the label of an existing workload. Click Reference workload label , select the workload in the pop-up window, and the system will use the selected workload label as the selector by default. Load balancing type [Type] Required [Meaning] The type of load balancing used, currently supports MetalLB and others. MetalLB IP Pool [Type] Required[Meaning] When the selected load balancing type is MetalLB, LoadBalancer Service will allocate IP addresses from this pool by default, and declare all IP addresses in this pool through APR, For details, refer to: Install MetalLB Load balancing address [Type] Required[Meaning] 1. If you are using a public cloud CloudProvider, fill in the load balancing address provided by the cloud provider here;2. If the above load balancing type is selected as MetalLB, the IP will be obtained from the above IP pool by default, if not filled, it will be obtained automatically. Port configuration [Type] Required[Meaning] To add a protocol port for a service, you need to select the port protocol type first. Currently, it supports TCP and UDP. Port Name: Enter the name of the custom port. Service port (port): The access port for Pod to provide external services. By default, the service port is set to the same value as the container port field for convenience. Container port (targetport): The container port actually monitored by the workload. Node port (nodeport): The port of the node, which receives traffic from ClusterIP transmission. It is used as the entrance for external traffic access. Annotation [Type] Optional[Meaning] Add annotation for service"},{"location":"en/end-user/kpanda/network/create-services.html#complete-service-creation","title":"Complete service creation","text":"

                                                  After configuring all parameters, click the OK button to return to the service list automatically. On the right side of the list, click \u2507 to modify or delete the selected service.

                                                  "},{"location":"en/end-user/kpanda/network/network-policy.html","title":"Network Policies","text":"

                                                  Network policies in Kubernetes allow you to control network traffic at the IP address or port level (OSI layer 3 or layer 4). The container management module currently supports creating network policies based on Pods or namespaces, using label selectors to specify which traffic can enter or leave Pods with specific labels.

                                                  For more details on network policies, refer to the official Kubernetes documentation on Network Policies.

                                                  "},{"location":"en/end-user/kpanda/network/network-policy.html#creating-network-policies","title":"Creating Network Policies","text":"

                                                  Currently, there are two methods available for creating network policies: YAML and form-based creation. Each method has its advantages and disadvantages, catering to different user needs.

                                                  YAML creation requires fewer steps and is more efficient, but it has a higher learning curve as it requires familiarity with configuring network policy YAML files.

                                                  Form-based creation is more intuitive and straightforward. Users can simply fill in the proper values based on the prompts. However, this method involves more steps.

                                                  "},{"location":"en/end-user/kpanda/network/network-policy.html#yaml-creation","title":"YAML Creation","text":"
                                                  1. In the cluster list, click the name of the target cluster, then navigate to Container Network -> Network Policies -> Create with YAML in the left navigation bar.

                                                  2. In the pop-up dialog, enter or paste the pre-prepared YAML file, then click OK at the bottom of the dialog.

                                                  "},{"location":"en/end-user/kpanda/network/network-policy.html#form-based-creation","title":"Form-Based Creation","text":"
                                                  1. In the cluster list, click the name of the target cluster, then navigate to Container Network -> Network Policies -> Create Policy in the left navigation bar.

                                                  2. Fill in the basic information.

                                                    The name and namespace cannot be changed after creation.

                                                  3. Fill in the policy configuration.

                                                    The policy configuration includes ingress and egress policies. To establish a successful connection from a source Pod to a target Pod, both the egress policy of the source Pod and the ingress policy of the target Pod need to allow the connection. If either side does not allow the connection, the connection will fail.

                                                    • Ingress Policy: Click \u2795 to begin configuring the policy. Multiple policies can be configured. The effects of multiple network policies are cumulative. Only when all network policies are satisfied simultaneously can a connection be successfully established.

                                                    • Egress Policy

                                                  "},{"location":"en/end-user/kpanda/network/network-policy.html#viewing-network-policies","title":"Viewing Network Policies","text":"
                                                  1. In the cluster list, click the name of the target cluster, then navigate to Container Network -> Network Policies . Click the name of the network policy.

                                                  2. View the basic configuration, associated instances, ingress policies, and egress policies of the policy.

                                                  Info

                                                  Under the \"Associated Instances\" tab, you can view instance monitoring, logs, container lists, YAML files, events, and more.

                                                  "},{"location":"en/end-user/kpanda/network/network-policy.html#updating-network-policies","title":"Updating Network Policies","text":"

                                                  There are two ways to update network policies. You can either update them through the form or by using a YAML file.

                                                  • On the network policy list page, find the policy you want to update, and choose Update in the action column on the right to update it via the form. Choose Edit YAML to update it using a YAML file.

                                                  • Click the name of the network policy, then choose Update in the top right corner of the policy details page to update it via the form. Choose Edit YAML to update it using a YAML file.

                                                  "},{"location":"en/end-user/kpanda/network/network-policy.html#deleting-network-policies","title":"Deleting Network Policies","text":"

                                                  There are two ways to delete network policies. You can delete network policies either through the form or by using a YAML file.

                                                  • On the network policy list page, find the policy you want to delete, and choose Delete in the action column on the right to delete it via the form. Choose Edit YAML to delete it using a YAML file.

                                                  • Click the name of the network policy, then choose Delete in the top right corner of the policy details page to delete it via the form. Choose Edit YAML to delete it using a YAML file.

                                                  "},{"location":"en/end-user/kpanda/nodes/add-node.html","title":"Cluster Node Expansion","text":"

                                                  As the number of business applications continues to grow, the resources of the cluster become increasingly tight. At this point, you can expand the cluster nodes based on kubean. After the expansion, applications can run on the newly added nodes, alleviating resource pressure.

                                                  Only clusters created through the container management module support node autoscaling. Clusters accessed from the outside do not support this operation. This article mainly introduces the expansion of worker nodes in the same architecture work cluster. If you need to add control nodes or heterogeneous work nodes to the cluster, refer to: Expanding the control node of the work cluster, Adding heterogeneous nodes to the work cluster, Expanding the worker node of the global service cluster.

                                                  1. On the Clusters page, click the name of the target cluster.

                                                    If the Cluster Type contains the label Integrated Cluster, it means that the cluster does not support node autoscaling.

                                                  2. Click Nodes in the left navigation bar, and then click Integrate Node in the upper right corner of the page.

                                                  3. Enter the host name and node IP and click OK.

                                                    Click \u2795 Add Worker Node to continue accessing more nodes.

                                                  Note

                                                  Accessing the node takes about 20 minutes, please be patient.

                                                  "},{"location":"en/end-user/kpanda/nodes/delete-node.html","title":"Node Scales Down","text":"

                                                  When the peak business period is over, in order to save resource costs, you can reduce the size of the cluster and unload redundant nodes, that is, node scaling. After a node is uninstalled, applications cannot continue to run on the node.

                                                  "},{"location":"en/end-user/kpanda/nodes/delete-node.html#prerequisites","title":"Prerequisites","text":"
                                                  • The current operating user has the Cluster Admin role authorization.
                                                  • Only through the container management module created cluster can node autoscaling be supported, and the cluster accessed from the outside does not support this operation.
                                                  • Before uninstalling a node, you need to pause scheduling the node, and expel the applications on the node to other nodes.
                                                  • Eviction method: log in to the controller node, and use the kubectl drain command to evict all Pods on the node. The safe eviction method allows the containers in the pod to terminate gracefully.
                                                  "},{"location":"en/end-user/kpanda/nodes/delete-node.html#precautions","title":"Precautions","text":"
                                                  1. When cluster nodes scales down, they can only be uninstalled one by one, not in batches.

                                                  2. If you need to uninstall cluster controller nodes, you need to ensure that the final number of controller nodes is an odd number.

                                                  3. The first controller node cannot be offline when the cluster node scales down. If it is necessary to perform this operation, please contact the after-sales engineer.

                                                  "},{"location":"en/end-user/kpanda/nodes/delete-node.html#steps","title":"Steps","text":"
                                                  1. On the Clusters page, click the name of the target cluster.

                                                    If the Cluster Type has the tag Integrate Cluster , it means that the cluster does not support node autoscaling.

                                                  2. Click Nodes on the left navigation bar, find the node to be uninstalled, click \u2507 and select Remove .

                                                  3. Enter the node name, and click Delete to confirm.

                                                  "},{"location":"en/end-user/kpanda/nodes/labels-annotations.html","title":"Labels and Annotations","text":"

                                                  Labels are identifying key-value pairs added to Kubernetes objects such as Pods, nodes, and clusters, which can be combined with label selectors to find and filter Kubernetes objects that meet certain conditions. Each key must be unique for a given object.

                                                  Annotations, like tags, are key/value pairs, but they do not have identification or filtering features. Annotations can be used to add arbitrary metadata to nodes. Annotation keys usually use the format prefix(optional)/name(required) , for example nfd.node.kubernetes.io/extended-resources . If the prefix is \u200b\u200bomitted, it means that the annotation key is private to the user.

                                                  For more information about labels and annotations, refer to the official Kubernetes documentation labels and selectors Or Annotations.

                                                  The steps to add/delete tags and annotations are as follows:

                                                  1. On the Clusters page, click the name of the target cluster.

                                                  2. Click Nodes on the left navigation bar, click the \u2507 operation icon on the right side of the node, and click Edit Labels or Edit Annotations .

                                                  3. Click \u2795 Add to add tags or annotations, click X to delete tags or annotations, and finally click OK .

                                                  "},{"location":"en/end-user/kpanda/nodes/node-authentication.html","title":"Node Authentication","text":""},{"location":"en/end-user/kpanda/nodes/node-authentication.html#authenticate-nodes-using-ssh-keys","title":"Authenticate Nodes Using SSH Keys","text":"

                                                  If you choose to authenticate the nodes of the cluster-to-be-created using SSH keys, you need to configure the public and private keys according to the following instructions.

                                                  1. Run the following command on any node within the management cluster of the cluster-to-be-created to generate the public and private keys.

                                                    cd /root/.ssh\nssh-keygen -t rsa\n
                                                  2. Run the ls command to check if the keys have been successfully created in the management cluster. The correct output should be as follows:

                                                    ls\nid_rsa  id_rsa.pub  known_hosts\n

                                                    The file named id_rsa is the private key, and the file named id_rsa.pub is the public key.

                                                  3. Run the following command to load the public key file id_rsa.pub onto all the nodes of the cluster-to-be-created.

                                                    ssh-copy-id -i /root/.ssh/id_rsa.pub root@10.0.0.0\n

                                                    Replace the user account and node IP in the above command with the username and IP of the nodes in the cluster-to-be-created. The same operation needs to be performed on every node in the cluster-to-be-created.

                                                  4. Run the following command to view the private key file id_rsa created in step 1.

                                                    cat /root/.ssh/id_rsa\n

                                                    The output should be as follows:

                                                    -----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEA3UvyKINzY5BFuemQ+uJ6q+GqgfvnWwNC8HzZhpcMSjJy26MM\nUtBEBJxy8fMi57XcjYxPibXW/wnd+32ICCycqCwByUmuXeCC1cjlCQDqjcAvXae7\nY54IXGF7wm2IsMNwf0kjFEXjuS48FLDA0mGRaN3BG+Up5geXcHckg3K5LD8kXFFx\ndEmSIjdyw55NaUitmEdHzN7cIdfi6Z56jcV8dcFBgWKUx+ebiyPmZBkXToz6GnMF\nrswzzZCl+G6Jb2xTGy7g7ozb4BoZd1IpSD5EhDanRrESVE0C5YuJ5zUAC0CvVd1l\nv67AK8Ko6MXToHp01/bcsvlM6cqgwUFXZKVeOwIDAQABAoIBAQCO36GQlo3BEjxy\nM2HvGJmqrx+unDxafliRe4nVY2AD515Qf4xNSzke4QM1QoyenMOwf446krQkJPK0\nk+9nl6Xszby5gGCbK4BNFk8I6RaGPjZWeRx6zGUJf8avWJiPxx6yjz2esSC9RiR0\nF0nmiiefVMyAfgv2/5++dK2WUFNNRKLgSRRpP5bRaD5wMzzxtSSXrUon6217HO8p\n3RoWsI51MbVzhdVgpHUNABcoa0rpr9svT6XLKZxY8mxpKFYjM0Wv2JIDABg3kBvh\nQbJ7kStCO3naZjKMU9UuSqVJs06cflGYw7Or8/tABR3LErNQKPjkhAQqt0DXw7Iw\n3tKdTAJBAoGBAP687U7JAOqQkcphek2E/A/sbO/d37ix7Z3vNOy065STrA+ZWMZn\npZ6Ui1B/oJpoZssnfvIoz9sn559X0j67TljFALFd2ZGS0Fqh9KVCqDvfk+Vst1dq\n+3r/yZdTOyswoccxkJiC/GDwZGK0amJWqvob39JCZhDAKIGLbGMmjdAHAoGBAN5k\nm1WGnni1nZ+3dryIwgB6z1hWcnLTamzSET6KhSuo946ET0IRG9xtlheCx6dqICbr\nVk1Y4NtRZjK/p/YGx59rDWf7E3I8ZMgR7mjieOcUZ4lUlA4l7ZIlW/2WZHW+nUXO\nTi20fqJ8qSp4BUvOvuth1pz2GLUHe2/Fxjf7HIstAoGBAPHpPr9r+TfIlPsJeRj2\n6lzA3G8qWFRQfGRYjv0fjv0pA+RIb1rzgP/I90g5+63G6Z+R4WdcxI/OJJNY1iuG\nuw9n/pFxm7U4JC990BPE6nj5iLz+clpNGYckNDBF9VG9vFSrSDLdaYkxoVNvG/xJ\na9Na90H4lm7f3VewrPy310KvAoGAZr+mwNoEh5Kpc6xo8Gxi7aPP/mlaUVD6X7Ki\ngvmu02AqmC7rC4QqEiqTaONkaSXwGusqIWxJ3yp5hELmUBYLzszAEeV/s4zRp1oZ\ng133LBRSTbHFAdBmNdqK6Nu+KGRb92980UMOKvZbliKDl+W6cbfvVu+gtKrzTc3b\naevb4TUCgYEAnJAxyVYDP1nJf7bjBSHXQu1E/DMwbtrqw7dylRJ8cAzI7IxfSCez\n7BYWq41PqVd9/zrb3Pbh2phiVzKe783igAIMqummcjo/kZyCwFsYBzK77max1jF5\naPQsLbRS2aDz8kIH6jHPZ/R+15EROmdtLmA7vIJZGerWWQR0dUU+XXA=\n

                                                  Copy the content of the private key and paste it into the interface's key input field.

                                                  "},{"location":"en/end-user/kpanda/nodes/node-check.html","title":"Create a cluster node availability check","text":"

                                                  When creating a cluster or adding nodes to an existing cluster, refer to the table below to check the node configuration to avoid cluster creation or expansion failure due to wrong node configuration.

                                                  Check Item Description OS Refer to Supported Architectures and Operating Systems SELinux Off Firewall Off Architecture Consistency Consistent CPU architecture between nodes (such as ARM or x86) Host Time All hosts are out of sync within 10 seconds. Network Connectivity The node and its SSH port can be accessed normally by the platform. CPU Available CPU resources are greater than 4 Cores Memory Available memory resources are greater than 8 GB"},{"location":"en/end-user/kpanda/nodes/node-check.html#supported-architectures-and-operating-systems","title":"Supported architectures and operating systems","text":"Architecture Operating System Remarks ARM Kylin Linux Advanced Server release V10 (Sword) SP2 Recommended ARM UOS Linux ARM openEuler x86 CentOS 7.x Recommended x86 Redhat 7.x Recommended x86 Redhat 8.x Recommended x86 Flatcar Container Linux by Kinvolk x86 Debian Bullseye, Buster, Jessie, Stretch x86 Ubuntu 16.04, 18.04, 20.04, 22.04 x86 Fedora 35, 36 x86 Fedora CoreOS x86 openSUSE Leap 15.x/Tumbleweed x86 Oracle Linux 7, 8, 9 x86 Alma Linux 8, 9 x86 Rocky Linux 8, 9 x86 Amazon Linux 2 x86 Kylin Linux Advanced Server release V10 (Sword) - SP2 Haiguang x86 UOS Linux x86 openEuler"},{"location":"en/end-user/kpanda/nodes/node-details.html","title":"Node Details","text":"

                                                  After accessing or creating a cluster, you can view the information of each node in the cluster, including node status, labels, resource usage, Pod, monitoring information, etc.

                                                  1. On the Clusters page, click the name of the target cluster.

                                                  2. Click Nodes on the left navigation bar to view the node status, role, label, CPU/memory usage, IP address, and creation time.

                                                  3. Click the node name to enter the node details page to view more information, including overview information, pod information, label annotation information, event list, status, etc.

                                                    In addition, you can also view the node's YAML file, monitoring information, labels and annotations, etc.

                                                  "},{"location":"en/end-user/kpanda/nodes/schedule.html","title":"Node Scheduling","text":"

                                                  Supports suspending or resuming scheduling of nodes. Pausing scheduling means stopping the scheduling of Pods to the node. Resuming scheduling means that Pods can be scheduled to that node.

                                                  1. On the Clusters page, click the name of the target cluster.

                                                  2. Click Nodes on the left navigation bar, click the \u2507 operation icon on the right side of the node, and click the Cordon button to suspend scheduling the node.

                                                  3. Click the \u2507 operation icon on the right side of the node, and click the Uncordon button to resume scheduling the node.

                                                  The node scheduling status may be delayed due to network conditions. Click the refresh icon on the right side of the search box to refresh the node scheduling status.

                                                  "},{"location":"en/end-user/kpanda/nodes/taints.html","title":"Node Taints","text":"

                                                  Taint can make a node exclude a certain type of Pod and prevent Pod from being scheduled on the node. One or more taints can be applied to each node, and Pods that cannot tolerate these taints will not be scheduled on that node.

                                                  "},{"location":"en/end-user/kpanda/nodes/taints.html#precautions","title":"Precautions","text":"
                                                  1. The current operating user should have NS Editor role authorization or other higher permissions.
                                                  2. After adding a taint to a node, only Pods that can tolerate the taint can be scheduled to the node.
                                                  "},{"location":"en/end-user/kpanda/nodes/taints.html#steps","title":"Steps","text":"
                                                  1. Find the target cluster on the Clusters page, and click the cluster name to enter the Cluster page.

                                                  2. In the left navigation bar, click Nodes , find the node that needs to modify the taint, click the \u2507 operation icon on the right and click the Edit Taints button.

                                                  3. Enter the key value information of the taint in the pop-up box, select the taint effect, and click OK .

                                                    Click \u2795 Add to add multiple taints to the node, and click X on the right side of the taint effect to delete the taint.

                                                    Currently supports three taint effects:

                                                    • NoExecute: This affects pods that are already running on the node as follows:

                                                      • Pods that do not tolerate the taint are evicted immediately
                                                      • Pods that tolerate the taint without specifying tolerationSeconds in their toleration specification remain bound forever
                                                      • Pods that tolerate the taint with a specified tolerationSeconds remain bound for the specified amount of time. After that time elapses, the node lifecycle controller evicts the Pods from the node.
                                                    • NoSchedule: No new Pods will be scheduled on the tainted node unless they have a matching toleration. Pods currently running on the node are not evicted.

                                                    • PreferNoSchedule: This is a \"preference\" or \"soft\" version of NoSchedule. The control plane will try to avoid placing a Pod that does not tolerate the taint on the node, but it is not guaranteed, so this taint is not recommended to use in a production environment.

                                                  For more details about taints, refer to the Kubernetes documentation Taints and Tolerance.

                                                  "},{"location":"en/end-user/kpanda/olm/import-miniooperator.html","title":"Importing MinIo Operator Offline","text":"

                                                  This guide explains how to import the MinIo Operator offline in an environment without internet access.

                                                  "},{"location":"en/end-user/kpanda/olm/import-miniooperator.html#prerequisites","title":"Prerequisites","text":"
                                                  • The current cluster is connected to the container management and the Global cluster has installed the kolm component (search for helm templates for kolm).
                                                  • The current cluster has the olm component installed with a version of 0.2.4 or higher (search for helm templates for olm).
                                                  • Ability to execute Docker commands.
                                                  • Prepare a container registry.
                                                  "},{"location":"en/end-user/kpanda/olm/import-miniooperator.html#steps","title":"Steps","text":"
                                                  1. Set the environment variables in the execution environment and use them in the subsequent steps by running the following command:

                                                    export OPM_IMG=10.5.14.200/quay.m.daocloud.io/operator-framework/opm:v1.29.0 \nexport BUNDLE_IMG=10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3 \n

                                                    How to get the above image addresses:

                                                    Go to Container Management -> Select the current cluster -> Helm Apps -> View the olm component -> Plugin Settings , and find the images needed for the opm, minio, minio bundle, and minio operator in the subsequent steps.

                                                    Using the screenshot as an example, the four image addresses are as follows:\n\n# opm image\n10.5.14.200/quay.m.daocloud.io/operator-framework/opm:v1.29.0\n\n# minio image\n10.5.14.200/quay.m.daocloud.io/minio/minio:RELEASE.2023-03-24T21-41-23Z\n\n# minio bundle image\n10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3\n\n# minio operator image\n10.5.14.200/quay.m.daocloud.io/minio/operator:v5.0.3\n
                                                  2. Run the opm command to get the operators included in the offline bundle image.

                                                    # Create the operator directory\n$ mkdir minio-operator && cd minio-operator \n\n# Get the operator yaml\n$ docker run --user root -v $PWD/minio-operator:/minio-operator ${OPM_IMG} alpha bundle unpack --skip-tls-verify -v -d ${BUNDLE_IMG} -o ./minio-operator\n\n# Expected result\n.\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502   \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502   \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502   \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502   \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502   \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502   \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n3 directories, 9 files\n
                                                  3. Replace all image addresses in the minio-operator/manifests/minio-operator.clusterserviceversion.yaml file with the image addresses from the offline container registry.

                                                    Before replacement:

                                                    After replacement:

                                                  4. Generate a Dockerfile for building the bundle image.

                                                    $ docker run --user root -v $PWD:/minio-operator -w /minio-operator ${OPM_IMG} alpha bundle generate --channels stable,beta -d /minio-operator/minio-operator/manifests -e stable -p minio-operator \u00a0\n\n# Expected result\n.\n\u251c\u2500\u2500 bundle.Dockerfile\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502   \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502   \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502   \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502   \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502   \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502   \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n3 directories, 10 files\n
                                                  5. Build the bundle image and push it to the offline registry.

                                                    # Set the new bundle image\nexport OFFLINE_BUNDLE_IMG=10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3-offline \n\n$ docker build . -f bundle.Dockerfile -t ${OFFLINE_BUNDLE_IMG} \u00a0\n\n$ docker push ${OFFLINE_BUNDLE_IMG}\n
                                                  6. Generate a Dockerfile for building the catalog image.

                                                    $ docker run --user root -v $PWD:/minio-operator -w /minio-operator ${OPM_IMG} index add  --bundles ${OFFLINE_BUNDLE_IMG} --generate --binary-image ${OPM_IMG} --skip-tls-verify\n\n# Expected result\n.\n\u251c\u2500\u2500 bundle.Dockerfile\n\u251c\u2500\u2500 database\n\u2502   \u2514\u2500\u2500 index.db\n\u251c\u2500\u2500 index.Dockerfile\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502   \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502   \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502   \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502   \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502   \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502   \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n4 directories, 12 files\n
                                                  7. Build the catalog image.

                                                    # Set the new catalog image  \nexport OFFLINE_CATALOG_IMG=10.5.14.200/release.daocloud.io/operator-framework/system-operator-index:v0.1.0-offline\n\n$ docker build . -f index.Dockerfile -t ${OFFLINE_CATALOG_IMG}  \n\n$ docker push ${OFFLINE_CATALOG_IMG}\n
                                                  8. Go to Container Management and update the built-in catsrc image for the Helm App olm (enter the catalog image specified in the construction of the catalog image, ${catalog-image} ).

                                                  9. After the update is successful, the minio-operator component will appear in the Operator Hub.

                                                  "},{"location":"en/end-user/kpanda/permissions/cluster-ns-auth.html","title":"Cluster and Namespace Authorization","text":"

                                                  Container management implements authorization based on global authority management and global user/group management. If you need to grant users the highest authority for container management (can create, manage, and delete all clusters), refer to What are Access Control.

                                                  "},{"location":"en/end-user/kpanda/permissions/cluster-ns-auth.html#prerequisites","title":"Prerequisites","text":"

                                                  Before authorizing users/groups, complete the following preparations:

                                                  • The user/group to be authorized has been created in the global management, refer to user.

                                                  • Only Kpanda Owner and Cluster Admin of the current cluster have Cluster authorization capability. For details, refer to Permission Description.

                                                  • only Kpanda Owner , Cluster Admin for the current cluster, NS Admin of the current namespace has namespace authorization capability.

                                                  "},{"location":"en/end-user/kpanda/permissions/cluster-ns-auth.html#cluster-authorization","title":"Cluster Authorization","text":"
                                                  1. After the user logs in to the platform, click Privilege Management under Container Management on the left menu bar, which is located on the Cluster Permissions tab by default.

                                                  2. Click the Add Authorization button.

                                                  3. On the Add Cluster Permission page, select the target cluster, the user/group to be authorized, and click OK .

                                                    Currently, the only cluster role supported is Cluster Admin . For details about permissions, refer to Permission Description. If you need to authorize multiple users/groups at the same time, you can click Add User Permissions to add multiple times.

                                                  4. Return to the cluster permission management page, and a message appears on the screen: Cluster permission added successfully .

                                                  "},{"location":"en/end-user/kpanda/permissions/cluster-ns-auth.html#namespace-authorization","title":"Namespace Authorization","text":"
                                                  1. After the user logs in to the platform, click Permissions under Container Management on the left menu bar, and click the Namespace Permissions tab.

                                                  2. Click the Add Authorization button. On the Add Namespace Permission page, select the target cluster, target namespace, and user/group to be authorized, and click OK .

                                                    The currently supported namespace roles are NS Admin, NS Editor, and NS Viewer. For details about permissions, refer to Permission Description. If you need to authorize multiple users/groups at the same time, you can click Add User Permission to add multiple times. Click OK to complete the permission authorization.

                                                  3. Return to the namespace permission management page, and a message appears on the screen: Cluster permission added successfully .

                                                    Tip

                                                    If you need to delete or edit permissions later, you can click \u2507 on the right side of the list and select Edit or Delete .

                                                  "},{"location":"en/end-user/kpanda/permissions/custom-kpanda-role.html","title":"Adding RBAC Rules to System Roles","text":"

                                                  In the past, the RBAC rules for those system roles in container management were pre-defined and could not be modified by users. To support more flexible permission settings and to meet the customized needs for system roles, now you can modify RBAC rules for system roles such as cluster admin, ns admin, ns editor, ns viewer.

                                                  The following example demonstrates how to add a new ns-view rule, granting the authority to delete workload deployments. Similar operations can be performed for other rules.

                                                  "},{"location":"en/end-user/kpanda/permissions/custom-kpanda-role.html#prerequisites","title":"Prerequisites","text":"

                                                  Before adding RBAC rules to system roles, the following prerequisites must be met:

                                                  • Container management v0.27.0 and above.
                                                  • Integrated Kubernetes cluster or created Kubernetes cluster, and able to access the cluster's UI interface.
                                                  • Completed creation of a namespace and user account, and the granting of NS Viewer. For details, refer to namespace authorization.

                                                  Note

                                                  • RBAC rules only need to be added in the Global Cluster, and the Kpanda controller will synchronize those added rules to all integrated subclusters. Synchronization may take some time to complete.
                                                  • RBAC rules can only be added in the Global Cluster. RBAC rules added in subclusters will be overridden by the system role permissions of the Global Cluster.
                                                  • Only ClusterRoles with fixed Label are supported for adding rules. Replacing or deleting rules is not supported, nor is adding rules by using role. The correspondence between built-in roles and ClusterRole Label created by users is as follows.

                                                    cluster-admin: rbac.kpanda.io/role-template-cluster-admin: \"true\"\ncluster-edit: rbac.kpanda.io/role-template-cluster-edit: \"true\"\ncluster-view: rbac.kpanda.io/role-template-cluster-view: \"true\"\nns-admin: rbac.kpanda.io/role-template-ns-admin: \"true\"\nns-edit: rbac.kpanda.io/role-template-ns-edit: \"true\"\nns-view: rbac.kpanda.io/role-template-ns-view: \"true\"\n
                                                  "},{"location":"en/end-user/kpanda/permissions/custom-kpanda-role.html#steps","title":"Steps","text":"
                                                  1. Create a deployment by a user with admin or cluster admin permissions.

                                                  2. Grant a user the ns-viewer role to provide them with the ns-view permission.

                                                  3. Switch the login user to ns-viewer, open the console to get the token for the ns-viewer user, and use curl to request and delete the nginx deployment mentioned above. However, a prompt appears as below, indicating the user doesn't have permission to delete it.

                                                    [root@master-01 ~]# curl -k -X DELETE  'https://${URL}/apis/kpanda.io/v1alpha1/clusters/cluster-member/namespaces/default/deployments/nginx' -H 'authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJOU044MG9BclBRMzUwZ2VVU2ZyNy1xMEREVWY4MmEtZmJqR05uRE1sd1lFIn0.eyJleHAiOjE3MTU3NjY1NzksImlhdCI6MTcxNTY4MDE3OSwiYXV0aF90aW1lIjoxNzE1NjgwMTc3LCJqdGkiOiIxZjI3MzJlNC1jYjFhLTQ4OTktYjBiZC1iN2IxZWY1MzAxNDEiLCJpc3MiOiJodHRwczovLzEwLjYuMjAxLjIwMTozMDE0Ny9hdXRoL3JlYWxtcy9naGlwcG8iLCJhdWQiOiJfX2ludGVybmFsLWdoaXBwbyIsInN1YiI6ImMxZmMxM2ViLTAwZGUtNDFiYS05ZTllLWE5OGU2OGM0MmVmMCIsInR5cCI6IklEIiwiYXpwIjoiX19pbnRlcm5hbC1naGlwcG8iLCJzZXNzaW9uX3N0YXRlIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiYXRfaGFzaCI6IlJhTHoyQjlKQ2FNc1RrbGVMR3V6blEiLCJhY3IiOiIwIiwic2lkIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJncm91cHMiOltdLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJucy12aWV3ZXIiLCJsb2NhbGUiOiIifQ.As2ipMjfvzvgONAGlc9RnqOd3zMwAj82VXlcqcR74ZK9tAq3Q4ruQ1a6WuIfqiq8Kq4F77ljwwzYUuunfBli2zhU2II8zyxVhLoCEBu4pBVBd_oJyUycXuNa6HfQGnl36E1M7-_QG8b-_T51wFxxVb5b7SEDE1AvIf54NAlAr-rhDmGRdOK1c9CohQcS00ab52MD3IPiFFZ8_Iljnii-RpXKZoTjdcULJVn_uZNk_SzSUK-7MVWmPBK15m6sNktOMSf0pCObKWRqHd15JSe-2aA2PKBo1jBH3tHbOgZyMPdsLI0QdmEnKB5FiiOeMpwn_oHnT6IjT-BZlB18VkW8rA'\n{\"code\":7,\"message\":\"[RBAC] delete resources(deployments: nginx) is forbidden for user(ns-viewer) in cluster(cluster-member)\",\"details\":[]}[root@master-01 ~]#\n[root@master-01 ~]#\n
                                                  4. Create a ClusterRole on the global cluster, as shown in the yaml below.

                                                    apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: append-ns-view # (1)!\n  labels:\n    rbac.kpanda.io/role-template-ns-view: \"true\" # (2)!\nrules:\n  - apiGroups: [ \"apps\" ]\n    resources: [ \"deployments\" ]\n    verbs: [ \"delete\" ]\n
                                                    1. This field value can be arbitrarily specified, as long as it is not duplicated and complies with the Kubernetes resource naming conventions.
                                                    2. When adding rules to different roles, make sure to apply different labels.
                                                  5. Wait for the kpanda controller to add a rule of user creation to the built-in role: ns-viewer, then you can check if the rules added in the previous step are present for ns-viewer.

                                                    [root@master-01 ~]# kubectl get clusterrole role-template-ns-view -oyaml|grep deployments -C 10|tail -n 6\n
                                                    - apiGroups:\n  - apps\n  resources:\n  - deployments\n  verbs:\n  - delete\n

                                                  6. When using curl again to request the deletion of the aforementioned nginx deployment, this time the deletion was successful. This means that ns-viewer has successfully added the rule to delete deployments.

                                                    [root@master-01 ~]# curl -k -X DELETE  'https://${URL}/apis/kpanda.io/v1alpha1/clusters/cluster-member/namespaces/default/deployments/nginx' -H 'authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJOU044MG9BclBRMzUwZ2VVU2ZyNy1xMEREVWY4MmEtZmJqR05uRE1sd1lFIn0.eyJleHAiOjE3MTU3NjY1NzksImlhdCI6MTcxNTY4MDE3OSwiYXV0aF90aW1lIjoxNzE1NjgwMTc3LCJqdGkiOiIxZjI3MzJlNC1jYjFhLTQ4OTktYjBiZC1iN2IxZWY1MzAxNDEiLCJpc3MiOiJodHRwczovLzEwLjYuMjAxLjIwMTozMDE0Ny9hdXRoL3JlYWxtcy9naGlwcG8iLCJhdWQiOiJfX2ludGVybmFsLWdoaXBwbyIsInN1YiI6ImMxZmMxM2ViLTAwZGUtNDFiYS05ZTllLWE5OGU2OGM0MmVmMCIsInR5cCI6IklEIiwiYXpwIjoiX19pbnRlcm5hbC1naGlwcG8iLCJzZXNzaW9uX3N0YXRlIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiYXRfaGFzaCI6IlJhTHoyQjlKQ2FNc1RrbGVMR3V6blEiLCJhY3IiOiIwIiwic2lkIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJncm91cHMiOltdLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJucy12aWV3ZXIiLCJsb2NhbGUiOiIifQ.As2ipMjfvzvgONAGlc9RnqOd3zMwAj82VXlcqcR74ZK9tAq3Q4ruQ1a6WuIfqiq8Kq4F77ljwwzYUuunfBli2zhU2II8zyxVhLoCEBu4pBVBd_oJyUycXuNa6HfQGnl36E1M7-_QG8b-_T51wFxxVb5b7SEDE1AvIf54NAlAr-rhDmGRdOK1c9CohQcS00ab52MD3IPiFFZ8_Iljnii-RpXKZoTjdcULJVn_uZNk_SzSUK-7MVWmPBK15m6sNktOMSf0pCObKWRqHd15JSe-2aA2PKBo1jBH3tHbOgZyMPdsLI0QdmEnKB5FiiOeMpwn_oHnT6IjT-BZlB18VkW8rA'\n
                                                  "},{"location":"en/end-user/kpanda/permissions/permission-brief.html","title":"Container Management Permissions","text":"

                                                  Container management permissions are based on a multi-dimensional permission management system created by global permission management and Kubernetes RBAC permission management. It supports cluster-level and namespace-level permission control, helping users to conveniently and flexibly set different operation permissions for IAM users and user groups (collections of users) under a tenant.

                                                  "},{"location":"en/end-user/kpanda/permissions/permission-brief.html#cluster-permissions","title":"Cluster Permissions","text":"

                                                  Cluster permissions are authorized based on Kubernetes RBAC's ClusterRoleBinding, allowing users/user groups to have cluster-related permissions. The current default cluster role is Cluster Admin (does not have the permission to create or delete clusters).

                                                  "},{"location":"en/end-user/kpanda/permissions/permission-brief.html#cluster-admin","title":"Cluster Admin","text":"

                                                  Cluster Admin has the following permissions:

                                                  • Can manage, edit, and view the proper cluster
                                                  • Manage, edit, and view all workloads and all resources within the namespace
                                                  • Can authorize users for roles within the cluster (Cluster Admin, NS Admin, NS Editor, NS Viewer)

                                                  The YAML example for this cluster role is as follows:

                                                  apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:49Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-cluster-admin\n  resourceVersion: \"15168\"\n  uid: f8f86d42-d5ef-47aa-b284-097615795076\nrules:\n- apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n- nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'\n
                                                  "},{"location":"en/end-user/kpanda/permissions/permission-brief.html#namespace-permissions","title":"Namespace Permissions","text":"

                                                  Namespace permissions are authorized based on Kubernetes RBAC capabilities, allowing different users/user groups to have different operation permissions on resources under a namespace (including Kubernetes API permissions). For details, refer to: Kubernetes RBAC. Currently, the default roles for container management are: NS Admin, NS Editor, NS Viewer.

                                                  "},{"location":"en/end-user/kpanda/permissions/permission-brief.html#ns-admin","title":"NS Admin","text":"

                                                  NS Admin has the following permissions:

                                                  • Can view the proper namespace
                                                  • Manage, edit, and view all workloads and custom resources within the namespace
                                                  • Can authorize users for proper namespace roles (NS Editor, NS Viewer)

                                                  The YAML example for this cluster role is as follows:

                                                  apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:49Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-admin\n  resourceVersion: \"15173\"\n  uid: 69f64c7e-70e7-4c7c-a3e0-053f507f2bc3\nrules:\n- apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n- nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'    \n
                                                  "},{"location":"en/end-user/kpanda/permissions/permission-brief.html#ns-editor","title":"NS Editor","text":"

                                                  NS Editor has the following permissions:

                                                  • Can view proper namespaces where permissions are granted
                                                  • Manage, edit, and view all workloads within the namespace
                                                  Click to view the YAML example of the cluster role
                                                  apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:50Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-edit\n  resourceVersion: \"15175\"\n  uid: ca9e690e-96c0-4978-8915-6e4c00c748fe\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - configmaps\n  - endpoints\n  - persistentvolumeclaims\n  - persistentvolumeclaims/status\n  - pods\n  - replicationcontrollers\n  - replicationcontrollers/scale\n  - serviceaccounts\n  - services\n  - services/status\n  verbs:\n  - '*'\n- apiGroups:\n  - \"\"\n  resources:\n  - bindings\n  - events\n  - limitranges\n  - namespaces/status\n  - pods/log\n  - pods/status\n  - replicationcontrollers/status\n  - resourcequotas\n  - resourcequotas/status\n  verbs:\n  - '*'\n- apiGroups:\n  - \"\"\n  resources:\n  - namespaces\n  verbs:\n  - '*'\n- apiGroups:\n  - apps\n  resources:\n  - controllerrevisions\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - statefulsets\n  - statefulsets/scale\n  - statefulsets/status\n  verbs:\n  - '*'\n- apiGroups:\n  - autoscaling\n  resources:\n  - horizontalpodautoscalers\n  - horizontalpodautoscalers/status\n  verbs:\n  - '*'\n- apiGroups:\n  - batch\n  resources:\n  - cronjobs\n  - cronjobs/status\n  - jobs\n  - jobs/status\n  verbs:\n  - '*'\n- apiGroups:\n  - extensions\n  resources:\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - replicationcontrollers/scale\n  verbs:\n  - '*'\n- apiGroups:\n  - policy\n  resources:\n  - poddisruptionbudgets\n  - poddisruptionbudgets/status\n  verbs:\n  - '*'\n- apiGroups:\n  - networking.k8s.io\n  resources:\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  verbs:\n  - '*'      \n
                                                  "},{"location":"en/end-user/kpanda/permissions/permission-brief.html#ns-viewer","title":"NS Viewer","text":"

                                                  NS Viewer has the following permissions:

                                                  • Can view the proper namespace
                                                  • Can view all workloads and custom resources within the proper namespace
                                                  Click to view the YAML example of the cluster role
                                                  apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:50Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-view\n  resourceVersion: \"15183\"\n  uid: 853888fd-6ee8-42ac-b91e-63923918baf8\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - configmaps\n  - endpoints\n  - persistentvolumeclaims\n  - persistentvolumeclaims/status\n  - pods\n  - replicationcontrollers\n  - replicationcontrollers/scale\n  - serviceaccounts\n  - services\n  - services/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - bindings\n  - events\n  - limitranges\n  - namespaces/status\n  - pods/log\n  - pods/status\n  - replicationcontrollers/status\n  - resourcequotas\n  - resourcequotas/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - namespaces\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - apps\n  resources:\n  - controllerrevisions\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - statefulsets\n  - statefulsets/scale\n  - statefulsets/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - autoscaling\n  resources:\n  - horizontalpodautoscalers\n  - horizontalpodautoscalers/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - batch\n  resources:\n  - cronjobs\n  - cronjobs/status\n  - jobs\n  - jobs/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - extensions\n  resources:\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - replicationcontrollers/scale\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - policy\n  resources:\n  - poddisruptionbudgets\n  - poddisruptionbudgets/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - networking.k8s.io\n  resources:\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  verbs:\n  - get\n  - list\n  - watch \n
                                                  "},{"location":"en/end-user/kpanda/permissions/permission-brief.html#permissions-faq","title":"Permissions FAQ","text":"
                                                  1. What is the relationship between global permissions and container management permissions?

                                                    Answer: Global permissions only authorize coarse-grained permissions, which can manage the creation, editing, and deletion of all clusters; while for fine-grained permissions, such as the management permissions of a single cluster, the management, editing, and deletion permissions of a single namespace, they need to be implemented based on Kubernetes RBAC container management permissions. Generally, users only need to be authorized in container management.

                                                  2. Currently, only four default roles are supported. Can the RoleBinding and ClusterRoleBinding (Kubernetes fine-grained RBAC) for custom roles also take effect?

                                                    Answer: Currently, custom permissions cannot be managed through the graphical interface, but the permission rules created using kubectl can still take effect.

                                                  "},{"location":"en/end-user/kpanda/scale/create-hpa.html","title":"Create HPA","text":"

                                                  Suanova AI platform supports elastic scaling of Pod resources based on metrics (Horizontal Pod Autoscaling, HPA). Users can dynamically adjust the number of copies of Pod resources by setting CPU utilization, memory usage, and custom metrics. For example, after setting an auto scaling policy based on the CPU utilization metric for the workload, when the CPU utilization of the Pod exceeds/belows the metric threshold you set, the workload controller will automatically increase/decrease the number of Pod replicas.

                                                  This page describes how to configure auto scaling based on built-in metrics and custom metrics for workloads.

                                                  Note

                                                  1. HPA is only applicable to Deployment and StatefulSet, and only one HPA can be created per workload.
                                                  2. If you create an HPA policy based on CPU utilization, you must set the configuration limit (Limit) for the workload in advance, otherwise the CPU utilization cannot be calculated.
                                                  3. If built-in metrics and multiple custom metrics are used at the same time, HPA will calculate the number of scaling copies required based on multiple metrics, and take the larger value (but not exceed the maximum number of copies configured when setting the HPA policy) for elastic scaling .
                                                  "},{"location":"en/end-user/kpanda/scale/create-hpa.html#built-in-metric-elastic-scaling-policy","title":"Built-in metric elastic scaling policy","text":"

                                                  The system has two built-in elastic scaling metrics of CPU and memory to meet users' basic business cases.

                                                  "},{"location":"en/end-user/kpanda/scale/create-hpa.html#prerequisites","title":"Prerequisites","text":"

                                                  Before configuring the built-in index auto scaling policy for the workload, the following prerequisites need to be met:

                                                  • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                                  • Created a namespace, deployment or statefulset.

                                                  • You should have permissions not lower than NS Editor. For details, refer to Namespace Authorization.

                                                  • Installed metrics-server plugin install.

                                                  "},{"location":"en/end-user/kpanda/scale/create-hpa.html#steps","title":"Steps","text":"

                                                  Refer to the following steps to configure the built-in index auto scaling policy for the workload.

                                                  1. Click Clusters on the left navigation bar to enter the cluster list page. Click a cluster name to enter the Cluster Details page.

                                                  2. On the cluster details page, click Workload in the left navigation bar to enter the workload list, and then click a workload name to enter the Workload Details page.

                                                  3. Click the Auto Scaling tab to view the auto scaling configuration of the current cluster.

                                                  4. After confirming that the cluster has installed the metrics-server plug-in, and the plug-in is running normally, you can click the New Scaling button.

                                                  5. Create custom metric auto scaling policy parameters.

                                                    • Policy name: Enter the name of the auto scaling policy. Please note that the name can contain up to 63 characters, and can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with lowercase letters or numbers, such as hpa- my-dep.
                                                    • Namespace: The namespace where the payload resides.
                                                    • Workload: The workload object that performs auto scaling.
                                                    • Target CPU Utilization: The CPU usage of the Pod under the workload resource. The calculation method is: the request (request) value of all Pod resources/workloads under the workload. When the actual CPU usage is greater/lower than the target value, the system automatically reduces/increases the number of Pod replicas.
                                                    • Target Memory Usage: The memory usage of the Pod under the workload resource. When the actual memory usage is greater/lower than the target value, the system automatically reduces/increases the number of Pod replicas.
                                                    • Replica range: the elastic scaling range of the number of Pod replicas. The default interval is 1 - 10.
                                                  6. After completing the parameter configuration, click the OK button to automatically return to the elastic scaling details page. Click \u2507 on the right side of the list to edit, delete, and view related events.

                                                  "},{"location":"en/end-user/kpanda/scale/create-vpa.html","title":"Create VPAs","text":"

                                                  The container Vertical Pod Autoscaler (VPA) calculates the most suitable CPU and memory request values \u200b\u200bfor the Pod by monitoring the Pod's resource application and usage over a period of time. Using VPA can allocate resources to each Pod in the cluster more reasonably, improve the overall resource utilization of the cluster, and avoid waste of cluster resources.

                                                  AI platform supports VPA through containers. Based on this feature, the Pod request value can be dynamically adjusted according to the usage of container resources. AI platform supports manual and automatic modification of resource request values, and you can configure them according to actual needs.

                                                  This page describes how to configure VPA for deployment.

                                                  Warning

                                                  Using VPA to modify a Pod resource request will trigger a Pod restart. Due to the limitations of Kubernetes itself, Pods may be scheduled to other nodes after restarting.

                                                  "},{"location":"en/end-user/kpanda/scale/create-vpa.html#prerequisites","title":"Prerequisites","text":"

                                                  Before configuring a vertical scaling policy for deployment, the following prerequisites must be met:

                                                  • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                  • Create a namespace, user, Deployments or Statefulsets.

                                                  • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                  • The current cluster has installed metrics-server and VPA plugins.

                                                  "},{"location":"en/end-user/kpanda/scale/create-vpa.html#steps","title":"Steps","text":"

                                                  Refer to the following steps to configure the built-in index auto scaling policy for the deployment.

                                                  1. Find the current cluster in Clusters , and click the name of the target cluster.

                                                  2. Click Deployments in the left navigation bar, find the deployment that needs to create a VPA, and click the name of the deployment.

                                                  3. Click the Auto Scaling tab to view the auto scaling configuration of the current cluster, and confirm that the relevant plug-ins have been installed and are running normally.

                                                  4. Click the Create Autoscaler button and configure the VPA vertical scaling policy parameters.

                                                    • Policy name: Enter the name of the vertical scaling policy. Please note that the name can contain up to 63 characters, and can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with lowercase letters or numbers, such as vpa- my-dep.
                                                    • Scaling mode: Run the method of modifying the CPU and memory request values. Currently, vertical scaling supports manual and automatic scaling modes.
                                                      • Manual scaling: After the vertical scaling policy calculates the recommended resource configuration value, the user needs to manually modify the resource quota of the application.
                                                      • Auto-scaling: The vertical scaling policy automatically calculates and modifies the resource quota of the application.
                                                    • Target container: Select the container to be scaled vertically.
                                                  5. After completing the parameter configuration, click the OK button to automatically return to the elastic scaling details page. Click \u2507 on the right side of the list to perform edit and delete operations.

                                                  "},{"location":"en/end-user/kpanda/scale/custom-hpa.html","title":"Creating HPA Based on Custom Metrics","text":"

                                                  When the built-in CPU and memory metrics in the system do not meet your business needs, you can add custom metrics by configuring ServiceMonitoring and achieve auto-scaling based on these custom metrics. This article will introduce how to configure auto-scaling for workloads based on custom metrics.

                                                  Note

                                                  1. HPA is only applicable to Deployment and StatefulSet, and each workload can only create one HPA.
                                                  2. If both built-in metrics and multiple custom metrics are used, HPA will calculate the required number of scaled replicas based on multiple metrics respectively, and take the larger value (but not exceeding the maximum number of replicas configured when setting the HPA policy) for scaling.
                                                  "},{"location":"en/end-user/kpanda/scale/custom-hpa.html#prerequisites","title":"Prerequisites","text":"

                                                  Before configuring the custom metrics auto-scaling policy for workloads, the following prerequisites must be met:

                                                  • Integrated Kubernetes cluster or created Kubernetes cluster, and able to access the cluster's UI interface.
                                                  • Completed creation of a namespace, deployment, or statefulSet.
                                                  • The current user should have permissions higher than NS Editor. For details, refer to namespace authorization.
                                                  • metrics-server plugin has been installed.
                                                  • insight-agent plugin has been installed.
                                                  • Prometheus-adapter plugin has been installed.
                                                  "},{"location":"en/end-user/kpanda/scale/custom-hpa.html#steps","title":"Steps","text":"

                                                  Refer to the following steps to configure the auto-scaling policy based on metrics for workloads.

                                                  1. Click Clusters in the left navigation bar to enter the clusters page. Click a cluster name to enter the Cluster Overview page.

                                                  2. On the Cluster Details page, click Workloads in the left navigation bar to enter the workload list, and click a workload name to enter the Workload Details page.

                                                  3. Click the Auto Scaling tab to view the current autoscaling configuration of the cluster.

                                                  4. Confirm that the cluster has installed metrics-server, Insight, and Prometheus-adapter plugins, and that the plugins are running normally, then click the Create AutoScaler button.

                                                    Note

                                                    If the related plugins are not installed or the plugins are in an abnormal state, you will not be able to see the entry for creating custom metrics auto-scaling on the page.

                                                  5. Create custom metrics auto-scaling policy parameters.

                                                    • Policy Name: Enter the name of the auto-scaling policy. Note that the name can be up to 63 characters long, can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with a lowercase letter or number, e.g., hpa-my-dep.
                                                    • Namespace: The namespace where the workload is located.
                                                    • Workload: The workload object that performs auto-scaling.
                                                    • Resource Type: The type of custom metric being monitored, including Pod and Service types.
                                                    • Metric: The name of the custom metric created using ServiceMonitoring or the name of the system-built custom metric.
                                                    • Data Type: The method used to calculate the metric value, including target value and target average value. When the resource type is Pod, only the target average value can be used.
                                                  "},{"location":"en/end-user/kpanda/scale/custom-hpa.html#operation-example","title":"Operation Example","text":"

                                                  This case takes a Golang business program as an example. The example program exposes the httpserver_requests_total metric and records HTTP requests. This metric can be used to calculate the QPS value of the business program.

                                                  "},{"location":"en/end-user/kpanda/scale/custom-hpa.html#deploy-business-program","title":"Deploy Business Program","text":"

                                                  Use Deployment to deploy the business program:

                                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: httpserver\n  namespace: httpserver\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: httpserver\n  template:\n    metadata:\n      labels:\n        app: httpserver\n    spec:\n      containers:\n      - name: httpserver\n        image: registry.imroc.cc/test/httpserver:custom-metrics\n        imagePullPolicy: Always\n---\n\napiVersion: v1\nkind: Service\nmetadata:\n  name: httpserver\n  namespace: httpserver\n  labels:\n    app: httpserver\n  annotations:\n    prometheus.io/scrape: \"true\"\n    prometheus.io/path: \"/metrics\"\n    prometheus.io/port: \"http\"\nspec:\n  type: ClusterIP\n  ports:\n  - port: 80\n    protocol: TCP\n    name: http\n  selector:\n    app: httpserver\n
                                                  "},{"location":"en/end-user/kpanda/scale/custom-hpa.html#prometheus-collects-business-monitoring","title":"Prometheus Collects Business Monitoring","text":"

                                                  If the insight-agent is installed, Prometheus can be configured by creating a ServiceMonitor CRD object.

                                                  Operation steps: In Cluster Details -> Custom Resources, search for \u201cservicemonitors.monitoring.coreos.com\", click the name to enter the details. Create the following example CRD in the httpserver namespace via YAML:

                                                  apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: httpserver\n  namespace: httpserver\n  labels:\n    operator.insight.io/managed-by: insight\nspec:\n  endpoints:\n  - port: http\n    interval: 5s\n  namespaceSelector:\n    matchNames:\n    - httpserver\n  selector:\n    matchLabels:\n      app: httpserver\n

                                                  Note

                                                  If Prometheus is installed via insight, the serviceMonitor must be labeled with operator.insight.io/managed-by: insight. If installed by other means, this label is not required.

                                                  "},{"location":"en/end-user/kpanda/scale/custom-hpa.html#configure-metric-rules-in-prometheus-adapter","title":"Configure Metric Rules in Prometheus-adapter","text":"

                                                  steps: In Clusters -> Helm Apps, search for \u201cprometheus-adapter\",enter the update page through the action bar, and configure custom metrics in YAML as follows:

                                                  rules:\n  custom:\n    - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)\n      name:\n        as: httpserver_requests_qps\n        matches: httpserver_requests_total\n      resources:\n        template: <<.Resource>>\n      seriesQuery: httpserver_requests_total\n
                                                  "},{"location":"en/end-user/kpanda/scale/custom-hpa.html#create-custom-metrics-auto-scaling-policy-parameters","title":"Create Custom Metrics Auto-scaling Policy Parameters","text":"

                                                  Follow the above steps to find the application httpserver in the Deployment and create auto-scaling via custom metrics.

                                                  "},{"location":"en/end-user/kpanda/scale/hpa-cronhpa-compatibility-rules.html","title":"Compatibility Rules for HPA and CronHPA","text":"

                                                  HPA stands for HorizontalPodAutoscaler, which refers to horizontal pod auto-scaling.

                                                  CronHPA stands for Cron HorizontalPodAutoscaler, which refers to scheduled horizontal pod auto-scaling.

                                                  "},{"location":"en/end-user/kpanda/scale/hpa-cronhpa-compatibility-rules.html#conflict-between-cronhpa-and-hpa","title":"Conflict Between CronHPA and HPA","text":"

                                                  Scheduled scaling with CronHPA triggers horizontal pod scaling at specified times. To prevent sudden traffic surges, you may have configured HPA to ensure the normal operation of your application. If both HPA and CronHPA are detected simultaneously, conflicts arise because CronHPA and HPA operate independently without awareness of each other. Consequently, the actions performed last will override those executed first.

                                                  By comparing the definition templates of CronHPA and HPA, the following points can be observed:

                                                  • Both CronHPA and HPA use the scaleTargetRef field to identify the scaling target.
                                                  • CronHPA schedules the number of replicas to scale based on crontab rules in jobs.
                                                  • HPA determines scaling based on resource utilization.

                                                  Note

                                                  If both CronHPA and HPA are set, there will be scenarios where CronHPA and HPA simultaneously operate on a single scaleTargetRef.

                                                  "},{"location":"en/end-user/kpanda/scale/hpa-cronhpa-compatibility-rules.html#compatibility-solution-for-cronhpa-and-hpa","title":"Compatibility Solution for CronHPA and HPA","text":"

                                                  As noted above, the fundamental reason that simultaneous use of CronHPA and HPA results in the later action overriding the earlier one is that the two controllers cannot sense each other. Therefore, the conflict can be resolved by enabling CronHPA to be aware of HPA's current state.

                                                  The system will treat HPA as the scaling object for CronHPA, thus achieving scheduled scaling for the Deployment object defined by the HPA.

                                                  HPA's definition configures the Deployment in the scaleTargetRef field, and then the Deployment uses its definition to locate the ReplicaSet, which ultimately adjusts the actual number of replicas.

                                                  In AI platform, the scaleTargetRef in CronHPA is set to the HPA object, and it uses the HPA object to find the actual scaleTargetRef, allowing CronHPA to be aware of HPA's current state.

                                                  CronHPA senses HPA by adjusting HPA. CronHPA determines whether scaling is needed and modifies the HPA upper limit by comparing the target number of replicas with the current number of replicas, choosing the larger value. Similarly, CronHPA determines whether to modify the HPA lower limit by comparing the target number of replicas from CronHPA with the configuration in HPA, choosing the smaller value.

                                                  "},{"location":"en/end-user/kpanda/scale/install-cronhpa.html","title":"Install kubernetes-cronhpa-controller","text":"

                                                  The container copy timing horizontal autoscaling policy (CronHPA) can provide stable computing resource guarantee for periodic high-concurrency applications, and kubernetes-cronhpa-controller is a key component to implement CronHPA.

                                                  This section describes how to install the kubernetes-cronhpa-controller plugin.

                                                  Note

                                                  In order to use CornHPA, not only the kubernetes-cronhpa-controller plugin needs to be installed, but also install the metrics-server plugin.

                                                  "},{"location":"en/end-user/kpanda/scale/install-cronhpa.html#prerequisites","title":"Prerequisites","text":"

                                                  Before installing the kubernetes-cronhpa-controller plugin, the following prerequisites need to be met:

                                                  • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                  • Create a namespace.

                                                  • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                  "},{"location":"en/end-user/kpanda/scale/install-cronhpa.html#steps","title":"Steps","text":"

                                                  Refer to the following steps to install the kubernetes-cronhpa-controller plugin for the cluster.

                                                  1. On the Clusters page, find the target cluster where the plugin needs to be installed, click the name of the cluster, then click Workloads -> Deployments on the left, and click the name of the target workload.

                                                  2. On the workload details page, click the Auto Scaling tab, and click Install on the right side of CronHPA .

                                                  3. Read the relevant introduction of the plug-in, select the version and click the Install button. It is recommended to install 1.3.0 or later.

                                                  4. Refer to the following instructions to configure the parameters.

                                                    • Name: Enter the plugin name, please note that the name can be up to 63 characters, can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with lowercase letters or numbers, such as kubernetes-cronhpa-controller.
                                                    • Namespace: Select which namespace the plugin will be installed in, here we take default as an example.
                                                    • Version: The version of the plugin, here we take the 1.3.0 version as an example.
                                                    • Ready Wait: When enabled, it will wait for all associated resources under the application to be in the ready state before marking the application installation as successful.
                                                    • Failed to delete: If the plugin installation fails, delete the associated resources that have already been installed. When enabled, Wait will be enabled synchronously by default.
                                                    • Detailed log: When enabled, a detailed log of the installation process will be recorded.

                                                    Note

                                                    After enabling ready wait and/or failed deletion , it takes a long time for the application to be marked as \"running\".

                                                  5. Click OK in the lower right corner of the page, and the system will automatically jump to the Helm Apps list page. Wait a few minutes and refresh the page to see the application you just installed.

                                                    Warning

                                                    If you need to delete the kubernetes-cronhpa-controller plugin, you should go to the Helm Apps list page to delete it completely.

                                                    If you delete the plug-in under the Auto Scaling tab of the workload, this only deletes the workload copy of the plug-in, and the plug-in itself is still not deleted, and an error will be prompted when the plug-in is reinstalled later.

                                                  6. Go back to the Auto Scaling tab under the workload details page, and you can see that the interface displays Plug-in installed . Now it's time to start creating CronHPA policies.

                                                  "},{"location":"en/end-user/kpanda/scale/install-metrics-server.html","title":"Install metrics-server","text":"

                                                  metrics-server is the built-in resource usage metrics collection component of Kubernetes. You can automatically scale Pod copies horizontally for workload resources by configuring HPA policies.

                                                  This section describes how to install metrics-server .

                                                  "},{"location":"en/end-user/kpanda/scale/install-metrics-server.html#prerequisites","title":"Prerequisites","text":"

                                                  Before installing the metrics-server plugin, the following prerequisites need to be met:

                                                  • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                                  • Created a namespace.

                                                  • You should have permissions not lower than NS Editor. For details, refer to Namespace Authorization.

                                                  "},{"location":"en/end-user/kpanda/scale/install-metrics-server.html#steps","title":"Steps","text":"

                                                  Please perform the following steps to install the metrics-server plugin for the cluster.

                                                  1. On the Auto Scaling page under workload details, click the Install button to enter the metrics-server plug-in installation interface.

                                                  2. Read the introduction of the metrics-server plugin, select the version and click the Install button. This page will use the 3.8.2 version as an example to install, and it is recommended that you install 3.8.2 and later versions.

                                                  3. Configure basic parameters on the installation configuration interface.

                                                    • Name: Enter the plugin name, please note that the name can be up to 63 characters, can only contain lowercase letters, numbers and separators (\"-\"), and must start and end with lowercase letters or numbers, such as metrics-server-01.
                                                    • Namespace: Select the namespace for plugin installation, here we take default as an example.
                                                    • Version: The version of the plugin, here we take 3.8.2 version as an example.
                                                    • Ready Wait: When enabled, it will wait for all associated resources under the application to be ready before marking the application installation as successful.
                                                    • Failed to delete: After it is enabled, the synchronization will be enabled by default and ready to wait. If the installation fails, the installation-related resources will be removed.
                                                    • Verbose log: Turn on the verbose output of the installation process log.

                                                    Note

                                                    After enabling Wait and/or Deletion failed , it takes a long time for the app to be marked as Running .

                                                  4. Advanced parameter configuration

                                                    • If the cluster network cannot access the k8s.gcr.io repository, please try to modify the repositort parameter to repository: k8s.m.daocloud.io/metrics-server/metrics-server .

                                                    • An SSL certificate is also required to install the metrics-server plugin. To bypass certificate verification, you need to add - --kubelet-insecure-tls parameter at defaultArgs: .

                                                    Click to view and use the YAML parameters to replace the default YAML
                                                    image:\n  repository: k8s.m.daocloud.io/metrics-server/metrics-server # Change the registry source address to k8s.m.daocloud.io\n  tag: ''\n  pullPolicy: IfNotPresent\nimagePullSecrets: []\nnameOverride: ''\nfullnameOverride: ''\nserviceAccount:\n  create: true\n  annotations: {}\n  name: ''\nrbac:\n  create: true\n  pspEnabled: false\napiService:\n  create: true\npodLabels: {}\npodAnnotations: {}\npodSecurityContext: {}\nsecurityContext:\n  allowPrivilegeEscalation: false\n  readOnlyRootFilesystem: true\n  runAsNonRoot: true\n  runAsUser: 1000\npriorityClassName: system-cluster-critical\ncontainerPort: 4443\nhostNetwork:\n  enabled: false\nreplicas: 1\nupdateStrategy: {}\npodDisruptionBudget:\n  enabled: false\n  minAvailable: null\n  maxUnavailable: null\ndefaultArgs:\n  - '--cert-dir=/tmp'\n  - '--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname'\n  - '--kubelet-use-node-status-port'\n  - '--metric-resolution=15s'\n  - --kubelet-insecure-tls # Bypass certificate verification\nargs: []\nlivenessProbe:\n  httpGet:\n    path: /livez\n    port:https\n    scheme: HTTPS\n  initialDelaySeconds: 0\n  periodSeconds: 10\n  failureThreshold: 3\nreadinessProbe:\n  httpGet:\n    path: /readyz\n    port:https\n    scheme: HTTPS\n  initialDelaySeconds: 20\n  periodSeconds: 10\n  failureThreshold: 3\nservice:\n  type: ClusterIP\n  port: 443\n  annotations: {}\n  labels: {}\nmetrics:\n  enabled: false\nserviceMonitor:\n  enabled: false\n  additionalLabels: {}\n  interval: 1m\n  scrapeTimeout: 10s\nresources: {}\nextraVolumeMounts: []\nextraVolumes: []\nnodeSelector: {}\ntolerations: []\naffinity: {}\n
                                                  5. Click the OK button to complete the installation of the metrics-server plug-in, and then the system will automatically jump to the Helm Apps list page. After a few minutes, refresh the page and you will see the newly installed Applications.

                                                  Note

                                                  When deleting the metrics-server plugin, the plugin can only be completely deleted on the Helm Apps list page. If you only delete metrics-server on the workload page, this only deletes the workload copy of the application, the application itself is still not deleted, and an error will be prompted when you reinstall the plugin later.

                                                  "},{"location":"en/end-user/kpanda/scale/install-vpa.html","title":"Install vpa","text":"

                                                  The Vertical Pod Autoscaler, VPA, can make the resource allocation of the cluster more reasonable and avoid the waste of cluster resources. vpa is the key component to realize the vertical autoscaling of the container.

                                                  This section describes how to install the vpa plugin.

                                                  In order to use VPA policies, not only the __vpa__ plugin needs to be installed, but also [install the __metrics-server__ plugin](install-metrics-server.md).\n
                                                  "},{"location":"en/end-user/kpanda/scale/install-vpa.html#prerequisites","title":"Prerequisites","text":"

                                                  Before installing the vpa plugin, the following prerequisites need to be met:

                                                  • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                  • Create a namespace.

                                                  • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                  "},{"location":"en/end-user/kpanda/scale/install-vpa.html#steps","title":"Steps","text":"

                                                  Refer to the following steps to install the vpa plugin for the cluster.

                                                  1. On the Clusters page, find the target cluster where the plugin needs to be installed, click the name of the cluster, then click Workloads -> Deployments on the left, and click the name of the target workload.

                                                  2. On the workload details page, click the Auto Scaling tab, and click Install on the right side of VPA .

                                                  3. Read the relevant introduction of the plug-in, select the version and click the Install button. It is recommended to install 1.5.0 or later.

                                                  4. Review the configuration parameters described below.

                                                    • Name: Enter the plugin name, please note that the name can be up to 63 characters, can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with lowercase letters or numbers, such as kubernetes-cronhpa-controller.
                                                    • Namespace: Select which namespace the plugin will be installed in, here we take default as an example.
                                                    • Version: The version of the plugin, here we take the 1.5.0 version as an example.
                                                    • Ready Wait: When enabled, it will wait for all associated resources under the application to be in the ready state before marking the application installation as successful.
                                                    • Failed to delete: If the plugin installation fails, delete the associated resources that have already been installed. When enabled, Wait will be enabled synchronously by default.
                                                    • Detailed log: When enabled, a detailed log of the installation process will be recorded.

                                                    Note

                                                    After enabling Wait and/or Deletion failed , it takes a long time for the application to be marked as running .

                                                  5. Click OK in the lower right corner of the page, and the system will automatically jump to the Helm Apps list page. Wait a few minutes and refresh the page to see the application you just installed.

                                                    Warning

                                                    If you need to delete the vpa plugin, you should go to the Helm Apps list page to delete it completely.

                                                    If you delete the plug-in under the Auto Scaling tab of the workload, this only deletes the workload copy of the plug-in, and the plug-in itself is still not deleted, and an error will be prompted when the plug-in is reinstalled later.

                                                  6. Go back to the Auto Scaling tab under the workload details page, and you can see that the interface displays Plug-in installed . Now you can start Create VPA policy.

                                                  "},{"location":"en/end-user/kpanda/scale/knative/install.html","title":"Installation","text":"

                                                  Knative is a platform-agnostic solution for running serverless deployments.

                                                  "},{"location":"en/end-user/kpanda/scale/knative/install.html#steps","title":"Steps","text":"
                                                  1. Log in to the cluster, click the sidebar Helm Apps \u2192 Helm Charts , enter knative in the search box at the top right, and then press the enter key to search.

                                                  2. Click the knative-operator to enter the installation configuration interface. You can view the available versions and the Parameters optional items of Helm values on this interface.

                                                  3. After clicking the install button, you will enter the installation configuration interface.

                                                  4. Enter the name, installation tenant, and it is recommended to check Wait and Detailed Logs .

                                                  5. In the settings below, you can tick Serving and enter the installation tenant of the Knative Serving component, which will deploy the Knative Serving component after installation. This component is managed by the Knative Operator.

                                                  "},{"location":"en/end-user/kpanda/scale/knative/knative.html","title":"Knative Introduction","text":"

                                                  Knative provides a higher level of abstraction, simplifying and speeding up the process of building, deploying, and managing applications on Kubernetes. It allows developers to focus more on implementing business logic, while leaving most of the infrastructure and operations work to Knative, significantly improving productivity.

                                                  "},{"location":"en/end-user/kpanda/scale/knative/knative.html#components","title":"Components","text":"

                                                  The Knative operator runs the following components.

                                                  knative-operator   knative-operator-58f7d7db5c-7f6r5      1/1     Running     0     6m55s\nknative-operator   operator-webhook-667dc67bc-qvrv4       1/1     Running     0     6m55s\n

                                                  The Knative serving components are as follows.

                                                  knative-serving        3scale-kourier-gateway-d69fbfbd-bd8d8   1/1     Running     0                 7m13s\nknative-serving        activator-7c6fddd698-wdlng              1/1     Running     0                 7m3s\nknative-serving        autoscaler-8f4b876bb-kd25p              1/1     Running     0                 7m17s\nknative-serving        autoscaler-hpa-5f7f74679c-vkc7p         1/1     Running     0                 7m15s\nknative-serving        controller-789c896c46-tfvsv             1/1     Running     0                 7m17s\nknative-serving        net-kourier-controller-7db578c889-7gd5l 1/1     Running     0                 7m14s\nknative-serving        webhook-5c88b94c5-78x7m                 1/1     Running     0                 7m1s\nknative-serving        storage-version-migration-serving-serving-1.12.2-t7zvd   0/1  Completed   0   7m15s\n
                                                  Component Features Activator Queues requests (if a Knative Service has scaled to zero). Calls the autoscaler to bring back services that have scaled down to zero and forward queued requests. The Activator can also act as a request buffer, handling bursts of traffic. Autoscaler Responsible for scaling Knative services based on configuration, metrics, and incoming requests. Controller Manages the state of Knative CRs. It monitors multiple objects, manages the lifecycle of dependent resources, and updates resource status. Queue-Proxy Sidecar container injected into each Knative Service. Responsible for collecting traffic data and reporting it to the Autoscaler, which then initiates scaling requests based on this data and preset rules. Webhooks Knative Serving has several Webhooks responsible for validating and mutating Knative resources."},{"location":"en/end-user/kpanda/scale/knative/knative.html#ingress-traffic-entry-solutions","title":"Ingress Traffic Entry Solutions","text":"Solution Use Case Istio If Istio is already in use, it can be chosen as the traffic entry solution. Contour If Contour has been enabled in the cluster, it can be chosen as the traffic entry solution. Kourier If neither of the above two Ingress components are present, Knative's Envoy-based Kourier Ingress can be used as the traffic entry solution."},{"location":"en/end-user/kpanda/scale/knative/knative.html#autoscaler-solutions-comparison","title":"Autoscaler Solutions Comparison","text":"Autoscaler Type Core Part of Knative Serving Default Enabled Scale to Zero Support CPU-based Autoscaling Support Knative Pod Autoscaler (KPA) Yes Yes Yes No Horizontal Pod Autoscaler (HPA) No Needs to be enabled after installing Knative Serving No Yes"},{"location":"en/end-user/kpanda/scale/knative/knative.html#crd","title":"CRD","text":"Resource Type API Name Description Services service.serving.knative.dev Automatically manages the entire lifecycle of Workloads, controls the creation of other objects, ensures applications have Routes, Configurations, and new revisions with each update. Routes route.serving.knative.dev Maps network endpoints to one or more revision versions, supports traffic distribution and version routing. Configurations configuration.serving.knative.dev Maintains the desired state of deployments, provides separation between code and configuration, follows the Twelve-Factor App methodology, modifying configurations creates new revisions. Revisions revision.serving.knative.dev Snapshot of the workload at each modification time point, immutable object, automatically scales based on traffic."},{"location":"en/end-user/kpanda/scale/knative/playground.html","title":"Knative Practices","text":"

                                                  In this section, we will delve into learning Knative through several practical exercises.

                                                  "},{"location":"en/end-user/kpanda/scale/knative/playground.html#case-1-hello-world","title":"case 1 - Hello World","text":"
                                                  apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n

                                                  You can use kubectl to check the status of a deployed application that has been automatically configured with ingress and scalers by Knative.

                                                  ~ kubectl get service.serving.knative.dev/hello\nNAME    URL                                              LATESTCREATED   LATESTREADY   READY   REASON\nhello   http://hello.knative-serving.knative.loulan.me   hello-00001     hello-00001   True\n

                                                  The deployed Pod YAML is as follows, consisting of two Pods: user-container and queue-proxy.

                                                  apiVersion: v1\nkind: Pod\nmetadata:\n  name: hello-00003-deployment-5fcb8ccbf-7qjfk\nspec:\n  containers:\n  - name: user-container\n  - name: queue-proxy\n

                                                  Request Flow:

                                                  1. case1 When there is low traffic or no traffic, traffic will be routed to the activator.
                                                  2. case2 When there is high traffic, traffic will be routed directly to the Pod only if it exceeds the target-burst-capacity.
                                                    1. Configured as 0, expansion from 0 is the only scenario.
                                                    2. Configured as -1, the activator will always be present in the request path.
                                                    3. Configured as >0, the number of additional concurrent requests that the system can handle before triggering scaling.
                                                  3. case3 When the traffic decreases again, traffic will be routed back to the activator if the traffic is lower than current_demand + target-burst-capacity > (pods * concurrency-target).

                                                    The total number of pending requests + the number of requests that can exceed the target concurrency > the target concurrency per Pod * number of Pods.

                                                  "},{"location":"en/end-user/kpanda/scale/knative/playground.html#case-2-based-on-concurrent-elastic-scaling","title":"case 2 - Based on Concurrent Elastic Scaling","text":"

                                                  We first apply the following YAML definition under the cluster.

                                                  apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"1\"\n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"\n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n

                                                  Execute the following command for testing, and you can observe the scaling of the Pods by using kubectl get pods -A -w.

                                                  wrk -t2 -c4 -d6s http://hello.knative-serving.knative.daocloud.io/\n
                                                  "},{"location":"en/end-user/kpanda/scale/knative/playground.html#case-3-based-on-concurrent-elastic-scaling-scale-out-in-advance-to-reach-a-specific-ratio","title":"case 3 - Based on concurrent elastic scaling, scale out in advance to reach a specific ratio.","text":"

                                                  We can easily achieve this, for example, by limiting the concurrency to 10 per container. This can be implemented through autoscaling.knative.dev/target-utilization-percentage: 70, starting to scale out the Pods when 70% is reached.

                                                  apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"10\"\n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"\n \u00a0 \u00a0 \u00a0 \u00a0autoscaling.knative.dev/target-utilization-percentage: \"70\" \n \u00a0 \u00a0 \u00a0 \u00a0autoscaling.knative.dev/metric: \"concurrency\"\n \u00a0 \u00a0 spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n
                                                  "},{"location":"en/end-user/kpanda/scale/knative/playground.html#case-4-canary-releasetraffic-percentage","title":"case 4 - Canary Release/Traffic Percentage","text":"

                                                  We can control the distribution of traffic to each version through spec.traffic.

                                                  apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"1\"  \n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"         \n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n  traffic:\n  - latestRevision: true\n    percent: 50\n  - latestRevision: false\n    percent: 50\n    revisionName: hello-00001\n
                                                  "},{"location":"en/end-user/kpanda/scale/knative/scene.html","title":"Use Cases","text":""},{"location":"en/end-user/kpanda/scale/knative/scene.html#suitable-cases","title":"Suitable Cases","text":"
                                                  • High concurrency business with short connections
                                                  • Businesses that require elastic scaling
                                                  • A large number of applications need to scale down to 0 to improve resource utilization
                                                  • AI Serving services that scale based on specific metrics
                                                  "},{"location":"en/end-user/kpanda/scale/knative/scene.html#unsuitable-cases","title":"Unsuitable Cases","text":"
                                                  • Long-lived connection business
                                                  • Latency-sensitive business
                                                  • Traffic splitting based on cookies
                                                  • Traffic splitting based on headers
                                                  "},{"location":"en/end-user/kpanda/security/index.html","title":"Types of Security Scans","text":"

                                                  AI platform Container Management provides three types of security scans:

                                                  • Compliance Scan: Conducts security scans on cluster nodes based on CIS Benchmark.
                                                  • Authorization Scan: Checks for security and compliance issues in the Kubernetes cluster, records and verifies authorized access, object changes, events, and other activities related to the Kubernetes API.
                                                  • Vulnerability Scan: Scans the Kubernetes cluster for potential vulnerabilities and risks, such as unauthorized access, sensitive information leakage, weak authentication, container escape, etc.
                                                  "},{"location":"en/end-user/kpanda/security/index.html#compliance-scan","title":"Compliance Scan","text":"

                                                  The object of compliance scanning is the cluster node. The scan result lists the scan items and results and provides repair suggestions for any failed scan items. For specific security rules used during scanning, refer to the CIS Kubernetes Benchmark.

                                                  The focus of the scan varies when checking different types of nodes.

                                                  • Scan the control plane node (Controller)

                                                    • Focus on the security of system components such as API Server , controller-manager , scheduler , kubelet , etc.
                                                    • Check the security configuration of the Etcd database.
                                                    • Verify whether the cluster's authentication mechanism, authorization policy, and network security configuration meet security standards.
                                                  • Scan worker nodes

                                                    • Check if the configuration of container runtimes such as kubelet and Docker meets security standards.
                                                    • Verify whether the container image has been trusted and verified.
                                                    • Check if the network security configuration of the node meets security standards.

                                                  Tip

                                                  To use compliance scanning, you need to create a scan configuration first, and then create a scan policy based on that configuration. After executing the scan policy, you can view the scan report.

                                                  "},{"location":"en/end-user/kpanda/security/index.html#authorization-scan","title":"Authorization Scan","text":"

                                                  Authorization scanning focuses on security vulnerabilities caused by authorization issues. Authorization scans can help users identify security threats in Kubernetes clusters, identify which resources need further review and protection measures. By performing these checks, users can gain a clearer and more comprehensive understanding of their Kubernetes environment and ensure that the cluster environment meets Kubernetes' best practices and security standards.

                                                  Specifically, authorization scanning supports the following operations:

                                                  • Scans the health status of all nodes in the cluster.

                                                  • Scans the running state of components in the cluster, such as kube-apiserver , kube-controller-manager , kube-scheduler , etc.

                                                  • Scans security configurations: Check Kubernetes' security configuration.

                                                    • API security: whether unsafe API versions are enabled, whether appropriate RBAC roles and permission restrictions are set, etc.
                                                    • Container security: whether insecure images are used, whether privileged mode is enabled, whether appropriate security context is set, etc.
                                                    • Network security: whether appropriate network policy is enabled to restrict traffic, whether TLS encryption is used, etc.
                                                    • Storage security: whether appropriate encryption and access controls are enabled.
                                                    • Application security: whether necessary security measures are in place, such as password management, cross-site scripting attack defense, etc.
                                                  • Provides warnings and suggestions: Security best practices that cluster administrators should perform, such as regularly rotating certificates, using strong passwords, restricting network access, etc.

                                                  Tip

                                                  To use authorization scanning, you need to create a scan policy first. After executing the scan policy, you can view the scan report. For details, refer to Security Scanning.

                                                  "},{"location":"en/end-user/kpanda/security/index.html#vulnerability-scan","title":"Vulnerability Scan","text":"

                                                  Vulnerability scanning focuses on scanning potential malicious attacks and security vulnerabilities, such as remote code execution, SQL injection, XSS attacks, and some attacks specific to Kubernetes. The final scan report lists the security vulnerabilities in the cluster and provides repair suggestions.

                                                  Tip

                                                  To use vulnerability scanning, you need to create a scan policy first. After executing the scan policy, you can view the scan report. For details, refer to Vulnerability Scan.

                                                  "},{"location":"en/end-user/kpanda/security/audit.html","title":"Permission Scan","text":"

                                                  To use the Permission Scan feature, you need to create a scan policy first. After executing the policy, a scan report will be automatically generated for viewing.

                                                  "},{"location":"en/end-user/kpanda/security/audit.html#create-a-scan-policy","title":"Create a Scan Policy","text":"
                                                  1. On the left navigation bar of the homepage in the Container Management module, click Security Management .

                                                  2. Click Permission Scan on the left navigation bar, then click the Scan Policy tab and click Create Scan Policy on the right.

                                                  3. Fill in the configuration according to the following instructions, and then click OK .

                                                    • Cluster: Select the cluster to be scanned. The optional cluster list comes from the clusters accessed or created in the Container Management module. If the desired cluster is not available, you can access or create a cluster in the Container Management module.
                                                    • Scan Type:

                                                      • Immediate scan: Perform a scan immediately after the scan policy is created. It cannot be automatically/manually executed again later.
                                                      • Scheduled scan: Automatically repeat the scan at scheduled intervals.
                                                    • Number of Scan Reports to Keep: Set the maximum number of scan reports to be kept. When the specified retention quantity is exceeded, delete from the earliest report.

                                                  "},{"location":"en/end-user/kpanda/security/audit.html#updatedelete-scan-policies","title":"Update/Delete Scan Policies","text":"

                                                  After creating a scan policy, you can update or delete it as needed.

                                                  Under the Scan Policy tab, click the \u2507 action button to the right of a configuration:

                                                  • For periodic scan policies:

                                                    • Select Execute Immediately to perform an additional scan outside the regular schedule.
                                                    • Select Disable to interrupt the scanning plan until Enable is clicked to resume executing the scan policy according to the scheduling plan.
                                                    • Select Edit to update the configuration. You can update the scan configuration, type, scan cycle, and report retention quantity. The configuration name and the target cluster to be scanned cannot be changed.
                                                    • Select Delete to delete the configuration.
                                                  • For one-time scan policies: Only support the Delete operation.

                                                  "},{"location":"en/end-user/kpanda/security/audit.html#view-scan-reports","title":"View Scan Reports","text":"
                                                  1. Under the Security Management -> Permission Scanning -> Scan Reports tab, click the report name.

                                                    Clicking Delete on the right of a report allows you to manually delete the report.

                                                  2. View the scan report content, including:

                                                    • The target cluster scanned.
                                                    • The scan policy used.
                                                    • The total number of scan items, warnings, and errors.
                                                    • In periodic scan reports generated by periodic scan policies, you can also view the scan frequency.
                                                    • The start time of the scan.
                                                    • Check details, such as the checked resources, resource types, scan results, error types, and error details.
                                                  "},{"location":"en/end-user/kpanda/security/hunter.html","title":"Vulnerability Scan","text":"

                                                  To use the Vulnerability Scan feature, you need to create a scan policy first. After executing the policy, a scan report will be automatically generated for viewing.

                                                  "},{"location":"en/end-user/kpanda/security/hunter.html#create-a-scan-policy","title":"Create a Scan Policy","text":"
                                                  1. On the left navigation bar of the homepage in the Container Management module, click Security Management .

                                                  2. Click Vulnerability Scan on the left navigation bar, then click the Scan Policy tab and click Create Scan Policy on the right.

                                                  3. Fill in the configuration according to the following instructions, and then click OK .

                                                    • Cluster: Select the cluster to be scanned. The optional cluster list comes from the clusters accessed or created in the Container Management module. If the desired cluster is not available, you can access or create a cluster in the Container Management module.
                                                    • Scan Type:

                                                      • Immediate scan: Perform a scan immediately after the scan policy is created. It cannot be automatically/manually executed again later.
                                                      • Scheduled scan: Automatically repeat the scan at scheduled intervals.
                                                    • Number of Scan Reports to Keep: Set the maximum number of scan reports to be kept. When the specified retention quantity is exceeded, delete from the earliest report.

                                                  "},{"location":"en/end-user/kpanda/security/hunter.html#updatedelete-scan-policies","title":"Update/Delete Scan Policies","text":"

                                                  After creating a scan policy, you can update or delete it as needed.

                                                  Under the Scan Policy tab, click the \u2507 action button to the right of a configuration:

                                                  • For periodic scan policies:

                                                    • Select Execute Immediately to perform an additional scan outside the regular schedule.
                                                    • Select Disable to interrupt the scanning plan until Enable is clicked to resume executing the scan policy according to the scheduling plan.
                                                    • Select Edit to update the configuration. You can update the scan configuration, type, scan cycle, and report retention quantity. The configuration name and the target cluster to be scanned cannot be changed.
                                                    • Select Delete to delete the configuration.
                                                  • For one-time scan policies: Only support the Delete operation.

                                                  "},{"location":"en/end-user/kpanda/security/hunter.html#viewe-scan-reports","title":"Viewe Scan Reports","text":"
                                                  1. Under the Security Management -> Vulnerability Scanning -> Scan Reports tab, click the report name.

                                                    Clicking Delete on the right of a report allows you to manually delete the report.

                                                  2. View the scan report content, including:

                                                    • The target cluster scanned.
                                                    • The scan policy used.
                                                    • The scan frequency.
                                                    • The total number of risks, high risks, medium risks, and low risks.
                                                    • The time of the scan.
                                                    • Check details such as vulnerability ID, vulnerability type, vulnerability name, vulnerability description, etc.
                                                  "},{"location":"en/end-user/kpanda/security/cis/config.html","title":"Scan Configuration","text":"

                                                  The first step in using CIS Scanning is to create a scan configuration. Based on the scan configuration, you can then create scan policies, execute scan policies, and finally view scan results.

                                                  "},{"location":"en/end-user/kpanda/security/cis/config.html#create-a-scan-configuration","title":"Create a Scan Configuration","text":"

                                                  The steps for creating a scan configuration are as follows:

                                                  1. Click Security Management in the left navigation bar of the homepage of the container management module.

                                                  2. By default, enter the Compliance Scanning page, click the Scan Configuration tab, and then click Create Scan Configuration in the upper-right corner.

                                                  3. Fill in the configuration name, select the configuration template, and optionally check the scan items, then click OK .

                                                    Scan Template: Currently, two templates are provided. The kubeadm template is suitable for general Kubernetes clusters. The daocloud template ignores scan items that are not applicable to AI platform based on the kubeadm template and the platform design of AI platform.

                                                  "},{"location":"en/end-user/kpanda/security/cis/config.html#view-scan-configuration","title":"View Scan Configuration","text":"

                                                  Under the scan configuration tab, clicking the name of a scan configuration displays the type of the configuration, the number of scan items, the creation time, the configuration template, and the specific scan items enabled for the configuration.

                                                  "},{"location":"en/end-user/kpanda/security/cis/config.html#updatdelete-scan-configuration","title":"Updat/Delete Scan Configuration","text":"

                                                  After a scan configuration has been successfully created, it can be updated or deleted according to your needs.

                                                  Under the scan configuration tab, click the \u2507 action button to the right of a configuration:

                                                  • Select Edit to update the configuration. You can update the description, template, and scan items. The configuration name cannot be changed.
                                                  • Select Delete to delete the configuration.
                                                  "},{"location":"en/end-user/kpanda/security/cis/policy.html","title":"Scan Policy","text":""},{"location":"en/end-user/kpanda/security/cis/policy.html#create-a-scan-policy","title":"Create a Scan Policy","text":"

                                                  After creating a scan configuration, you can create a scan policy based on the configuration.

                                                  1. Under the Security Management -> Compliance Scanning page, click the Scan Policy tab on the right to create a scan policy.

                                                  2. Fill in the configuration according to the following instructions and click OK .

                                                    • Cluster: Select the cluster to be scanned. The optional cluster list comes from the clusters accessed or created in the Container Management module. If the desired cluster is not available, you can access or create a cluster in the Container Management module.
                                                    • Scan Configuration: Select a pre-created scan configuration. The scan configuration determines which specific scan items need to be performed.
                                                    • Scan Type:

                                                      • Immediate scan: Perform a scan immediately after the scan policy is created. It cannot be automatically/manually executed again later.
                                                      • Scheduled scan: Automatically repeat the scan at scheduled intervals.
                                                    • Number of Scan Reports to Keep: Set the maximum number of scan reports to be kept. When the specified retention quantity is exceeded, delete from the earliest report.

                                                  "},{"location":"en/end-user/kpanda/security/cis/policy.html#updatedelete-scan-policies","title":"Update/Delete Scan Policies","text":"

                                                  After creating a scan policy, you can update or delete it as needed.

                                                  Under the Scan Policy tab, click the \u2507 action button to the right of a configuration:

                                                  • For periodic scan policies:

                                                    • Select Execute Immediately to perform an additional scan outside the regular schedule.
                                                    • Select Disable to interrupt the scanning plan until Enable is clicked to resume executing the scan policy according to the scheduling plan.
                                                    • Select Edit to update the configuration. You can update the scan configuration, type, scan cycle, and report retention quantity. The configuration name and the target cluster to be scanned cannot be changed.
                                                    • Select Delete to delete the configuration.
                                                  • For one-time scan policies: Only support the Delete operation.

                                                  "},{"location":"en/end-user/kpanda/security/cis/report.html","title":"Scan Report","text":"

                                                  After executing a scan policy, a scan report will be generated automatically. You can view the scan report online or download it to your local computer.

                                                  • Download and View

                                                    Under the Security Management -> Compliance Scanning page, click the Scan Report tab, then click the \u2507 action button to the right of a report and select Download .

                                                  • View Online

                                                    Clicking the name of a report allows you to view its content online, which includes:

                                                    • The target cluster scanned.
                                                    • The scan policy and scan configuration used.
                                                    • The start time of the scan.
                                                    • The total number of scan items, the number passed, and the number failed.
                                                    • For failed scan items, repair suggestions are provided.
                                                    • For passed scan items, more secure operational suggestions are provided.
                                                  "},{"location":"en/end-user/kpanda/storage/pv.html","title":"data volume (PV)","text":"

                                                  A data volume (PersistentVolume, PV) is a piece of storage in the cluster, which can be prepared in advance by the administrator, or dynamically prepared using a storage class (Storage Class). PV is a cluster resource, but it has an independent life cycle and will not be deleted when the Pod process ends. Mounting PVs to workloads can achieve data persistence for workloads. The PV holds the data directory that can be accessed by the containers in the Pod.

                                                  "},{"location":"en/end-user/kpanda/storage/pv.html#create-data-volume","title":"Create data volume","text":"

                                                  Currently, there are two ways to create data volumes: YAML and form. These two ways have their own advantages and disadvantages, and can meet the needs of different users.

                                                  • There are fewer steps and more efficient creation through YAML, but the threshold requirement is high, and you need to be familiar with the YAML file configuration of the data volume.

                                                  • It is more intuitive and easier to create through the form, just fill in the proper values \u200b\u200baccording to the prompts, but the steps are more cumbersome.

                                                  "},{"location":"en/end-user/kpanda/storage/pv.html#yaml-creation","title":"YAML creation","text":"
                                                  1. Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume (PV) -> Create with YAML in the left navigation bar.

                                                  2. Enter or paste the prepared YAML file in the pop-up box, and click OK at the bottom of the pop-up box.

                                                    Supports importing YAML files from local or downloading and saving filled files to local.

                                                  "},{"location":"en/end-user/kpanda/storage/pv.html#form-creation","title":"Form Creation","text":"
                                                  1. Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume (PV) -> Create Data Volume (PV) in the left navigation bar.

                                                  2. Fill in the basic information.

                                                    • The data volume name, data volume type, mount path, volume mode, and node affinity cannot be changed after creation.
                                                    • Data volume type: For a detailed introduction to volume types, refer to the official Kubernetes document Volumes.

                                                    • Local: The local storage of the Node node is packaged into a PVC interface, and the container directly uses the PVC without paying attention to the underlying storage type. Local volumes do not support dynamic configuration of data volumes, but support configuration of node affinity, which can limit which nodes can access the data volume.

                                                    • HostPath: Use files or directories on the file system of Node nodes as data volumes, and do not support Pod scheduling based on node affinity.

                                                    • Mount path: mount the data volume to a specific directory in the container.

                                                    • access mode:

                                                      • ReadWriteOnce: The data volume can be mounted by a node in read-write mode.
                                                      • ReadWriteMany: The data volume can be mounted by multiple nodes in read-write mode.
                                                      • ReadOnlyMany: The data volume can be mounted read-only by multiple nodes.
                                                      • ReadWriteOncePod: The data volume can be mounted read-write by a single Pod.
                                                    • Recycling policy:

                                                      • Retain: The PV is not deleted, but its status is only changed to released , which needs to be manually recycled by the user. For how to manually reclaim, refer to Persistent Volume.
                                                      • Recycle: keep the PV but empty its data, perform a basic wipe ( rm -rf /thevolume/* ).
                                                      • Delete: When deleting a PV and its data.
                                                    • Volume mode:

                                                      • File system: The data volume will be mounted to a certain directory by the Pod. If the data volume is stored from a device and the device is currently empty, a file system is created on the device before the volume is mounted for the first time.
                                                      • Block: Use the data volume as a raw block device. This type of volume is given to the Pod as a block device without any file system on it, allowing the Pod to access the data volume faster.
                                                    • Node affinity:

                                                  "},{"location":"en/end-user/kpanda/storage/pv.html#view-data-volume","title":"View data volume","text":"

                                                  Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume (PV) in the left navigation bar.

                                                  • On this page, you can view all data volumes in the current cluster, as well as information such as the status, capacity, and namespace of each data volume.

                                                  • Supports sequential or reverse sorting according to the name, status, namespace, and creation time of data volumes.

                                                  • Click the name of a data volume to view the basic configuration, StorageClass information, labels, comments, etc. of the data volume.

                                                  "},{"location":"en/end-user/kpanda/storage/pv.html#clone-data-volume","title":"Clone data volume","text":"

                                                  By cloning a data volume, a new data volume can be recreated based on the configuration of the cloned data volume.

                                                  1. Enter the clone page

                                                    • On the data volume list page, find the data volume to be cloned, and select Clone under the operation bar on the right.

                                                      You can also click the name of the data volume, click the operation button in the upper right corner of the details page and select Clone .

                                                  2. Use the original configuration directly, or modify it as needed, and click OK at the bottom of the page.

                                                  "},{"location":"en/end-user/kpanda/storage/pv.html#update-data-volume","title":"Update data volume","text":"

                                                  There are two ways to update data volumes. Support for updating data volumes via forms or YAML files.

                                                  Note

                                                  Only updating the alias, capacity, access mode, reclamation policy, label, and comment of the data volume is supported.

                                                  • On the data volume list page, find the data volume that needs to be updated, select Update under the operation bar on the right to update through the form, select Edit YAML to update through YAML.

                                                  • Click the name of the data volume to enter the details page of the data volume, select Update in the upper right corner of the page to update through the form, select Edit YAML to update through YAML.

                                                  "},{"location":"en/end-user/kpanda/storage/pv.html#delete-data-volume","title":"Delete data volume","text":"

                                                  On the data volume list page, find the data to be deleted, and select Delete in the operation column on the right.

                                                  You can also click the name of the data volume, click the operation button in the upper right corner of the details page and select Delete .

                                                  "},{"location":"en/end-user/kpanda/storage/pvc.html","title":"Data volume declaration (PVC)","text":"

                                                  A persistent volume claim (PersistentVolumeClaim, PVC) expresses a user's request for storage. PVC consumes PV resources and claims a data volume with a specific size and specific access mode. For example, the PV volume is required to be mounted in ReadWriteOnce, ReadOnlyMany or ReadWriteMany modes.

                                                  "},{"location":"en/end-user/kpanda/storage/pvc.html#create-data-volume-statement","title":"Create data volume statement","text":"

                                                  Currently, there are two ways to create data volume declarations: YAML and form. These two ways have their own advantages and disadvantages, and can meet the needs of different users.

                                                  • There are fewer steps and more efficient creation through YAML, but the threshold requirement is high, and you need to be familiar with the YAML file configuration of the data volume declaration.

                                                  • It is more intuitive and easier to create through the form, just fill in the proper values \u200b\u200baccording to the prompts, but the steps are more cumbersome.

                                                  "},{"location":"en/end-user/kpanda/storage/pvc.html#yaml-creation","title":"YAML creation","text":"
                                                  1. Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume Declaration (PVC) -> Create with YAML in the left navigation bar.

                                                  2. Enter or paste the prepared YAML file in the pop-up box, and click OK at the bottom of the pop-up box.

                                                    Supports importing YAML files from local or downloading and saving filled files to local.

                                                  "},{"location":"en/end-user/kpanda/storage/pvc.html#form-creation","title":"Form Creation","text":"
                                                  1. Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume Declaration (PVC) -> Create Data Volume Declaration (PVC) in the left navigation bar.

                                                  2. Fill in the basic information.

                                                    • The name, namespace, creation method, data volume, capacity, and access mode of the data volume declaration cannot be changed after creation.
                                                    • Creation method: dynamically create a new data volume claim in an existing StorageClass or data volume, or create a new data volume claim based on a snapshot of a data volume claim.

                                                      The declared capacity of the data volume cannot be modified when the snapshot is created, and can be modified after the creation is complete.

                                                    • After selecting the creation method, select the desired StorageClass/data volume/snapshot from the drop-down list.

                                                    • access mode:

                                                    • ReadWriteOnce, the data volume declaration can be mounted by a node in read-write mode.

                                                    • ReadWriteMany, the data volume declaration can be mounted by multiple nodes in read-write mode.
                                                    • ReadOnlyMany, the data volume declaration can be mounted read-only by multiple nodes.
                                                    • ReadWriteOncePod, the data volume declaration can be mounted by a single Pod in read-write mode.
                                                  "},{"location":"en/end-user/kpanda/storage/pvc.html#view-data-volume-statement","title":"View data volume statement","text":"

                                                  Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume Declaration (PVC) in the left navigation bar.

                                                  • On this page, you can view all data volume declarations in the current cluster, as well as information such as the status, capacity, and namespace of each data volume declaration.

                                                  • Supports sorting in sequential or reverse order according to the declared name, status, namespace, and creation time of the data volume.

                                                  • Click the name of the data volume declaration to view the basic configuration, StorageClass information, labels, comments and other information of the data volume declaration.

                                                  "},{"location":"en/end-user/kpanda/storage/pvc.html#expansion-data-volume-statement","title":"Expansion data volume statement","text":"
                                                  1. In the left navigation bar, click Container Storage -> Data Volume Declaration (PVC) , and find the data volume declaration whose capacity you want to adjust.

                                                  2. Click the name of the data volume declaration, and then click the operation button in the upper right corner of the page and select Expansion .

                                                  3. Enter the target capacity and click OK .

                                                  "},{"location":"en/end-user/kpanda/storage/pvc.html#clone-data-volume-statement","title":"Clone data volume statement","text":"

                                                  By cloning a data volume claim, a new data volume claim can be recreated based on the configuration of the cloned data volume claim.

                                                  1. Enter the clone page

                                                    • On the data volume declaration list page, find the data volume declaration that needs to be cloned, and select Clone under the operation bar on the right.

                                                      You can also click the name of the data volume declaration, click the operation button in the upper right corner of the details page and select Clone .

                                                  2. Use the original configuration directly, or modify it as needed, and click OK at the bottom of the page.

                                                  "},{"location":"en/end-user/kpanda/storage/pvc.html#update-data-volume-statement","title":"Update data volume statement","text":"

                                                  There are two ways to update data volume claims. Support for updating data volume claims via form or YAML file.

                                                  Note

                                                  Only aliases, labels, and annotations for data volume claims are updated.

                                                  • On the data volume list page, find the data volume declaration that needs to be updated, select Update in the operation bar on the right to update it through the form, and select Edit YAML to update it through YAML.

                                                  • Click the name of the data volume declaration, enter the details page of the data volume declaration, select Update in the upper right corner of the page to update through the form, select Edit YAML to update through YAML.

                                                  "},{"location":"en/end-user/kpanda/storage/pvc.html#delete-data-volume-statement","title":"Delete data volume statement","text":"

                                                  On the data volume declaration list page, find the data to be deleted, and select Delete in the operation column on the right.

                                                  You can also click the name of the data volume statement, click the operation button in the upper right corner of the details page and select Delete .

                                                  "},{"location":"en/end-user/kpanda/storage/pvc.html#common-problem","title":"common problem","text":"
                                                  1. If there is no optional StorageClass or data volume in the list, you can Create a StorageClass or Create a data volume.

                                                  2. If there is no optional snapshot in the list, you can enter the details page of the data volume declaration and create a snapshot in the upper right corner.

                                                  3. If the StorageClass (SC) used by the data volume declaration is not enabled for snapshots, snapshots cannot be made, and the page will not display the \"Make Snapshot\" option.

                                                  4. If the StorageClass (SC) used by the data volume declaration does not have the capacity expansion feature enabled, the data volume does not support capacity expansion, and the page will not display the capacity expansion option.
                                                  "},{"location":"en/end-user/kpanda/storage/sc-share.html","title":"shared StorageClass","text":"

                                                  The AI platform container management module supports sharing a StorageClass with multiple namespaces to improve resource utilization efficiency.

                                                  1. Find the StorageClass that needs to be shared in the StorageClass list, and click Authorize Namespace under the operation bar on the right.

                                                  2. Click Custom Namespace to select which namespaces this StorageClass needs to be shared to one by one.

                                                    • Click Authorize All Namespaces to share this StorageClass to all namespaces under the current cluster at one time.
                                                    • Click Remove Authorization under the operation bar on the right side of the list to deauthorize and stop sharing this StorageClass to this namespace.
                                                  "},{"location":"en/end-user/kpanda/storage/sc.html","title":"StorageClass (SC)","text":"

                                                  A StorageClass refers to a large storage resource pool composed of many physical disks. This platform supports the creation of block StorageClass, local StorageClass, and custom StorageClass after accessing various storage vendors, and then dynamically configures data volumes for workloads.

                                                  "},{"location":"en/end-user/kpanda/storage/sc.html#create-storageclass-sc","title":"Create StorageClass (SC)","text":"

                                                  Currently, it supports creating StorageClass through YAML and forms. These two methods have their own advantages and disadvantages, and can meet the needs of different users.

                                                  • There are fewer steps and more efficient creation through YAML, but the threshold requirement is high, and you need to be familiar with the YAML file configuration of the StorageClass.

                                                  • It is more intuitive and easier to create through the form, just fill in the proper values \u200b\u200baccording to the prompts, but the steps are more cumbersome.

                                                  "},{"location":"en/end-user/kpanda/storage/sc.html#yaml-creation","title":"YAML creation","text":"
                                                  1. Click the name of the target cluster in the cluster list, and then click Container Storage -> StorageClass (SC) -> Create with YAML in the left navigation bar.

                                                  2. Enter or paste the prepared YAML file in the pop-up box, and click OK at the bottom of the pop-up box.

                                                    Supports importing YAML files from local or downloading and saving filled files to local.

                                                  "},{"location":"en/end-user/kpanda/storage/sc.html#form-creation","title":"Form Creation","text":"
                                                  1. Click the name of the target cluster in the cluster list, and then click Container Storage -> StorageClass (SC) -> Create StorageClass (SC) in the left navigation bar.

                                                  2. Fill in the basic information and click OK at the bottom.

                                                    CUSTOM STORAGE SYSTEM

                                                    • The StorageClass name, driver, and reclamation policy cannot be modified after creation.
                                                    • CSI storage driver: A standard Kubernetes-based container storage interface plug-in, which must comply with the format specified by the storage manufacturer, such as rancher.io/local-path .

                                                      • For how to fill in the CSI drivers provided by different vendors, refer to the official Kubernetes document Storage Class.
                                                        • Recycling policy: When deleting a data volume, keep the data in the data volume or delete the data in it.
                                                        • Snapshot/Expansion: After it is enabled, the data volume/data volume declaration based on the StorageClass can support the expansion and snapshot features, but the premise is that the underlying storage driver supports the snapshot and expansion features.

                                                    HwameiStor storage system

                                                    • The StorageClass name, driver, and reclamation policy cannot be modified after creation.
                                                    • Storage system: HwameiStor storage system.
                                                    • Storage type: support LVM, raw disk type
                                                      • LVM type : HwameiStor recommended usage method, which can use highly available data volumes, and the proper CSI storage driver is lvm.hwameistor.io .
                                                      • Raw disk data volume : suitable for high availability cases, without high availability capability, the proper CSI driver is hdd.hwameistor.io .
                                                    • High Availability Mode: Before using the high availability capability, please make sure DRBD component has been installed. After the high availability mode is turned on, the number of data volume copies can be set to 1 and 2. Convert data volume copy from 1 to 1 if needed.
                                                    • Recycling policy: When deleting a data volume, keep the data in the data volume or delete the data in it.
                                                    • Snapshot/Expansion: After it is enabled, the data volume/data volume declaration based on the StorageClass can support the expansion and snapshot features, but the premise is that the underlying storage driver supports the snapshot and expansion features.
                                                  "},{"location":"en/end-user/kpanda/storage/sc.html#update-storageclass-sc","title":"Update StorageClass (SC)","text":"

                                                  On the StorageClass list page, find the StorageClass that needs to be updated, and select Edit under the operation bar on the right to update the StorageClass.

                                                  Info

                                                  Select View YAML to view the YAML file of the StorageClass, but editing is not supported.

                                                  "},{"location":"en/end-user/kpanda/storage/sc.html#delete-storageclass-sc","title":"Delete StorageClass (SC)","text":"

                                                  On the StorageClass list page, find the StorageClass to be deleted, and select Delete in the operation column on the right.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-cronjob.html","title":"Create CronJob","text":"

                                                  This page introduces how to create a CronJob through images and YAML files.

                                                  CronJobs are suitable for performing periodic operations, such as backup and report generation. These jobs can be configured to repeat periodically (for example: daily/weekly/monthly), and the time interval at which the job starts to run can be defined.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-cronjob.html#prerequisites","title":"Prerequisites","text":"

                                                  Before creating a CronJob, the following prerequisites need to be met:

                                                  • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                  • Create a namespace and a user.

                                                  • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                  • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-cronjob.html#create-by-image","title":"Create by image","text":"

                                                  Refer to the following steps to create a CronJob using the image.

                                                  1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                                  2. On the cluster details page, click Workloads -> CronJobs in the left navigation bar, and then click the Create by Image button in the upper right corner of the page.

                                                  3. Fill in Basic Information, Container Settings, CronJob Settings, Advanced Configuration, click OK in the lower right corner of the page to complete the creation.

                                                    The system will automatically return to the CronJobs list. Click \u2507 on the right side of the list to perform operations such as updating, deleting, and restarting the CronJob.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-cronjob.html#basic-information","title":"Basic information","text":"

                                                  On the Create CronJobs page, enter the information according to the table below, and click Next .

                                                  • Workload Name: Can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                                  • Namespace: Select which namespace to deploy the newly created CronJob in, and the default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                                  • Description: Enter the description information of the workload and customize the content. The number of characters should not exceed 512.
                                                  "},{"location":"en/end-user/kpanda/workloads/create-cronjob.html#container-settings","title":"Container settings","text":"

                                                  Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the requirements of each part.

                                                  Container setting is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                                  Basic information (required)Lifecycle (optional)Health Check (optional)Environment variables (optional)Data storage (optional)Security settings (optional)

                                                  When configuring container-related parameters, you must correctly fill in the container name and image parameters, otherwise you will not be able to proceed to the next step. After filling in the configuration with reference to the following requirements, click OK .

                                                  • Container Name: Up to 63 characters, lowercase letters, numbers and separators (\"-\") are supported. Must start and end with a lowercase letter or number, eg nginx-01.
                                                  • Image: Enter the address or name of the image. When entering the image name, the image will be pulled from the official DockerHub by default.
                                                  • Image Pull Policy: After checking Always pull the image , the image will be pulled from the registry every time the workload restarts/upgrades. If it is not checked, only the local mirror will be pulled, and only when the mirror does not exist locally, it will be re-pulled from the container registry. For more details, refer to Image Pull Policy.
                                                  • Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
                                                  • CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
                                                  • GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU card or part of the vGPU for the container. For example, for an 8-core GPU card, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.

                                                    Before setting exclusive GPU, the administrator needs to install the GPU card and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

                                                  Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle Configuration.

                                                  It is used to judge the health status of containers and applications, which helps to improve the availability of applications. For details, refer to Container Health Check Configuration.

                                                  Configure container parameters within the Pod, add environment variables or pass configuration to the Pod, etc. For details, refer to Container environment variable configuration.

                                                  Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage Configuration.

                                                  Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-cronjob.html#cronjob-settings","title":"CronJob Settings","text":"
                                                  • Concurrency Policy: Whether to allow multiple Job jobs to run in parallel.

                                                    • Allow : A new CronJob can be created before the previous job is completed, and multiple jobs can be parallelized. Too many jobs may occupy cluster resources.
                                                    • Forbid : Before the previous job is completed, a new job cannot be created. If the execution time of the new job is up and the previous job has not been completed, CronJob will ignore the execution of the new job.
                                                    • Replace : If the execution time of the new job is up, but the previous job has not been completed, the new job will replace the previous job.

                                                    The above rules only apply to multiple jobs created by the same CronJob. Multiple jobs created by multiple CronJobs are always allowed to run concurrently.

                                                  • Policy Settings: Set the time period for job execution based on minutes, hours, days, weeks, and months. Support custom Cron expressions with numbers and * , after inputting the expression, the meaning of the current expression will be prompted. For detailed expression syntax rules, refer to Cron Schedule Syntax.

                                                  • Job Records: Set how many records of successful or failed jobs to keep. 0 means do not keep.
                                                  • Timeout: When this time is exceeded, the job will be marked as failed to execute, and all Pods under the job will be deleted. When it is empty, it means that no timeout is set. The default is 360 s.
                                                  • Retries: the number of times the job can be retried, the default value is 6.
                                                  • Restart Policy: Set whether to restart the Pod when the job fails.
                                                  "},{"location":"en/end-user/kpanda/workloads/create-cronjob.html#service-settings","title":"Service settings","text":"

                                                  Configure Service for the statefulset, so that the statefulset can be accessed externally.

                                                  1. Click the Create Service button.

                                                  2. Refer to Create Service to configure service parameters.

                                                  3. Click OK and click Next .

                                                  "},{"location":"en/end-user/kpanda/workloads/create-cronjob.html#advanced-configuration","title":"Advanced configuration","text":"

                                                  The advanced configuration of CronJobs mainly involves labels and annotations.

                                                  You can click the Add button to add labels and annotations to the workload instance Pod.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-cronjob.html#create-from-yaml","title":"Create from YAML","text":"

                                                  In addition to mirroring, you can also create timed jobs more quickly through YAML files.

                                                  1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                                  2. On the cluster details page, click Workloads -> CronJobs in the left navigation bar, and then click the Create from YAML button in the upper right corner of the page.

                                                  3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                                  click to view the complete YAML
                                                  apiVersion: batch/v1\nkind: CronJob\nmetadata:\n  creationTimestamp: '2022-12-26T09:45:47Z'\n  generation: 1\n  name: demo\n  namespace: default\n  resourceVersion: '92726617'\n  uid: d030d8d7-a405-4dcd-b09a-176942ef36c9\nspec:\n  concurrencyPolicy: Allow\n  failedJobsHistoryLimit: 1\n  jobTemplate:\n    metadata:\n      creationTimestamp: null\n    spec:\n      activeDeadlineSeconds: 360\n      backoffLimit: 6\n      template:\n        metadata:\n          creationTimestamp: null\n        spec:\n          containers:\n            - image: nginx\n              imagePullPolicy: IfNotPresent\n              lifecycle: {}\n              name: container-3\n              resources:\n                limits:\n                  cpu: 250m\n                  memory: 512Mi\n                requests:\n                  cpu: 250m\n                  memory: 512Mi\n              securityContext:\n                privileged: false\n              terminationMessagePath: /dev/termination-log\n              terminationMessagePolicy: File\n          dnsPolicy: ClusterFirst\n          restartPolicy: Never\n          schedulerName: default-scheduler\n          securityContext: {}\n          terminationGracePeriodSeconds: 30\n  schedule: 0 0 13 * 5\n  successfulJobsHistoryLimit: 3\n  suspend: false\nstatus: {}\n
                                                  "},{"location":"en/end-user/kpanda/workloads/create-daemonset.html","title":"Create DaemonSet","text":"

                                                  This page introduces how to create a daemonSet through image and YAML files.

                                                  DaemonSet is connected to taint through node affinity feature ensures that a replica of a Pod is running on all or some of the nodes. For nodes that newly joined the cluster, DaemonSet automatically deploys the proper Pod on the new node and tracks the running status of the Pod. When a node is removed, the DaemonSet deletes all Pods it created.

                                                  Common cases for daemons include:

                                                  • Run cluster daemons on each node.
                                                  • Run a log collection daemon on each node.
                                                  • Run a monitoring daemon on each node.

                                                  For simplicity, a DaemonSet can be started on each node for each type of daemon. For finer and more advanced daemon management, you can also deploy multiple DaemonSets for the same daemon. Each DaemonSet has different flags and has different memory, CPU requirements for different hardware types.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-daemonset.html#prerequisites","title":"Prerequisites","text":"

                                                  Before creating a DaemonSet, the following prerequisites need to be met:

                                                  • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                  • Create a namespace and a user.

                                                  • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                  • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-daemonset.html#create-by-image","title":"Create by image","text":"

                                                  Refer to the following steps to create a daemon using the image.

                                                  1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                                  2. On the cluster details page, click Workloads -> DaemonSets in the left navigation bar, and then click the Create by Image button in the upper right corner of the page.

                                                  3. Fill in Basic Information, Container Settings, Service Settings, Advanced Settings, click OK in the lower right corner of the page to complete the creation.

                                                    The system will automatically return the list of DaemonSets . Click \u2507 on the right side of the list to perform operations such as updating, deleting, and restarting the DaemonSet.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-daemonset.html#basic-information","title":"Basic information","text":"

                                                  On the Create DaemonSets page, after entering the information according to the table below, click Next .

                                                  • Workload Name: Can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                                  • Namespace: Select which namespace to deploy the newly created DaemonSet in, and the default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                                  • Description: Enter the description information of the workload and customize the content. The number of characters should not exceed 512.
                                                  "},{"location":"en/end-user/kpanda/workloads/create-daemonset.html#container-settings","title":"Container settings","text":"

                                                  Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the requirements of each part.

                                                  Container setting is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                                  Basic information (required)Lifecycle (optional)Health Check (optional)Environment variables (optional)Data storage (optional)Security settings (optional)

                                                  When configuring container-related parameters, you must correctly fill in the container name and image parameters, otherwise you will not be able to proceed to the next step. After filling in the settings with reference to the following requirements, click OK .

                                                  • Container Name: Up to 63 characters, lowercase letters, numbers and separators (\"-\") are supported. Must start and end with a lowercase letter or number, eg nginx-01.
                                                  • Image: Enter the address or name of the image. When entering the image name, the image will be pulled from the official DockerHub by default.
                                                  • Image Pull Policy: After checking Always pull image , the image will be pulled from the registry every time the workload restarts/upgrades. If it is not checked, only the local image will be pulled, and only when the image does not exist locally, it will be re-pulled from the container registry. For more details, refer to Image Pull Policy.
                                                  • Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
                                                  • CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
                                                  • GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU card or part of the vGPU for the container. For example, for an 8-core GPU card, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.

                                                    Before setting exclusive GPU, the administrator needs to install the GPU card and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

                                                  Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle Configuration.

                                                  It is used to judge the health status of containers and applications, which helps to improve the availability of applications. For details, refer to Container Health Check Configuration.

                                                  Configure container parameters within the Pod, add environment variables or pass settings to the Pod, etc. For details, refer to Container environment variable settings.

                                                  Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage Configuration.

                                                  Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-daemonset.html#service-settings","title":"Service settings","text":"

                                                  Create a Service (Service) for the daemon, so that the daemon can be accessed externally.

                                                  1. Click the Create Service button.

                                                  2. Configure service parameters, refer to Create Service for details.

                                                  3. Click OK and click Next .

                                                  "},{"location":"en/end-user/kpanda/workloads/create-daemonset.html#advanced-settings","title":"Advanced settings","text":"

                                                  Advanced setting includes four parts: load network settings, upgrade policy, scheduling policy, label and annotation. You can click the tabs below to view the requirements of each part.

                                                  Network ConfigurationUpgrade PolicyScheduling PoliciesLabels and Annotations

                                                  In some cases, the application will have redundant DNS queries. Kubernetes provides DNS-related settings options for applications, which can effectively reduce redundant DNS queries and increase business concurrency in certain cases.

                                                  • DNS Policy

                                                    • Default: Make the container use the domain name resolution file pointed to by the --resolv-conf parameter of kubelet. This setting can only resolve external domain names registered on the Internet, but cannot resolve cluster internal domain names, and there is no invalid DNS query.
                                                    • ClusterFirstWithHostNet: The domain name file of the host to which the application is connected.
                                                    • ClusterFirst: application docking with Kube-DNS/CoreDNS.
                                                    • None: New option value introduced in Kubernetes v1.9 (Beta in v1.10). After setting to None, dnsConfig must be set, at this time the domain name of the containerThe parsing file will be completely generated through the settings of dnsConfig.
                                                  • Nameservers: fill in the address of the domain name server, such as 10.6.175.20 .

                                                  • Search domains: DNS search domain list for domain name query. When specified, the provided search domain list will be merged into the search field of the domain name resolution file generated based on dnsPolicy, and duplicate domain names will be deleted. Kubernetes allows up to 6 search domains.
                                                  • Options: Configuration options for DNS, where each object can have a name attribute (required) and a value attribute (optional). The content in this field will be merged into the options field of the domain name resolution file generated based on dnsPolicy. If some options of dnsConfig options conflict with the options of the domain name resolution file generated based on dnsPolicy, they will be overwritten by dnsConfig.
                                                  • Host Alias: the alias set for the host.

                                                  • Upgrade Mode: Rolling upgrade refers to gradually replacing instances of the old version with instances of the new version. During the upgrade process, business traffic will be load-balanced to the old and new instances at the same time, so the business will not be interrupted. Rebuild and upgrade refers to deleting the workload instance of the old version first, and then installing the specified new version. During the upgrade process, the business will be interrupted.
                                                  • Max Unavailable Pods: Specify the maximum value or ratio of unavailable pods during the workload update process, the default is 25%. If it is equal to the number of instances, there is a risk of service interruption.
                                                  • Max Surge: The maximum or ratio of the total number of Pods exceeding the desired replica count of Pods during a Pod update. Default is 25%.
                                                  • Revision History Limit: Set the number of old versions retained when the version is rolled back. The default is 10.
                                                  • Minimum Ready: The minimum time for a Pod to be ready. Only after this time is the Pod considered available. The default is 0 seconds.
                                                  • Upgrade Max Duration: If the deployment is not successful after the set time, the workload will be marked as failed. Default is 600 seconds.
                                                  • Graceful Period: The execution period (0-9,999 seconds) of the command before the workload stops, the default is 30 seconds.

                                                  • Toleration time: When the node where the workload instance is located is unavailable, the time for rescheduling the workload instance to other available nodes, the default is 300 seconds.
                                                  • Node affinity: According to the label on the node, constrain which nodes the Pod can be scheduled on.
                                                  • Workload Affinity: Constrains which nodes a Pod can be scheduled to based on the labels of the Pods already running on the node.
                                                  • Workload anti-affinity: Constrains which nodes a Pod cannot be scheduled to based on the labels of Pods already running on the node.
                                                  • Topology domain: namely topologyKey, used to specify a group of nodes that can be scheduled. For example, kubernetes.io/os indicates that as long as the node of an operating system meets the conditions of labelSelector, it can be scheduled to the node.

                                                  For details, refer to Scheduling Policy.

                                                  You can click the Add button to add tags and annotations to workloads and pods.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-daemonset.html#create-from-yaml","title":"Create from YAML","text":"

                                                  In addition to image, you can also create daemons more quickly through YAML files.

                                                  1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the Cluster Details page.

                                                  2. On the cluster details page, click Workload -> Daemons in the left navigation bar, and then click the YAML Create button in the upper right corner of the page.

                                                  3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                                  Click to see an example YAML for creating a daemon
                                                   kind: DaemonSet\n apiVersion: apps/v1\n metadata:\n   name: hwameistor-local-disk-manager\n   namespace: hwameistor\n   uid: ccbdc098-7de3-4a8a-96dd-d1cee159c92b\n   resourceVersion: '90999552'\n   generation: 1\n   creationTimestamp: '2022-12-15T09:03:44Z'\n   labels:\n     app.kubernetes.io/managed-by: Helm\n   annotations:\n     deprecated.DaemonSet.template.generation: '1'\n     meta.helm.sh/release-name: hwameistor\n     meta.helm.sh/release-namespace:hwameistor\n spec:\n   selector:\n     matchLabels:\n       app: hwameistor-local-disk-manager\n   template:\n     metadata:\n       creationTimestamp: null\n       labels:\n         app: hwameistor-local-disk-manager\n     spec:\n       volumes:\n         - name: udev\n           hostPath:\n             path: /run/udev\n             type: Directory\n         - name: procmount\n           hostPath:\n             path: /proc\n             type: Directory\n         - name: devmount\n           hostPath:\n             path: /dev\n             type: Directory\n         - name: socket-dir\n           hostPath:\n             path: /var/lib/kubelet/plugins/disk.hwameistor.io\n             type: DirectoryOrCreate\n         - name: registration-dir\n           hostPath:\n             path: /var/lib/kubelet/plugins_registry/\n             type: Directory\n         - name: plugin-dir\n           hostPath:\n             path: /var/lib/kubelet/plugins\n             type: DirectoryOrCreate\n         - name: pods-mount-dir\n           hostPath:\n             path: /var/lib/kubelet/pods\n             type: DirectoryOrCreate\n       containers:\n         - name: registrar\n           image: k8s-gcr.m.daocloud.io/sig-storage/csi-node-driver-registrar:v2.5.0\n           args:\n             - '--v=5'\n             - '--csi-address=/csi/csi.sock'\n             - >-\n               --kubelet-registration-path=/var/lib/kubelet/plugins/disk.hwameistor.io/csi.sock\n           env:\n             - name: KUBE_NODE_NAME\n               valueFrom:\n                 fieldRef:\n                   apiVersion: v1\n                   fieldPath: spec.nodeName\n           resources: {}\n           volumeMounts:\n             - name: socket-dir\n               mountPath: /csi\n             - name: registration-dir\n               mountPath: /registration\n           lifecycle:\n             preStop:\n               exec:\n                 command:\n                   - /bin/sh\n                   - '-c'\n                   - >-\n                     rm -rf /registration/disk.hwameistor.io\n                     /registration/disk.hwameistor.io-reg.sock\n           terminationMessagePath: /dev/termination-log\n           terminationMessagePolicy: File\n           imagePullPolicy: IfNotPresent\n         -name: managerimage: ghcr.m.daocloud.io/hwameistor/local-disk-manager:v0.6.1\n          command:\n            - /local-disk-manager\n          args:\n            - '--endpoint=$(CSI_ENDPOINT)'\n            - '--nodeid=$(NODENAME)'\n            - '--csi-enable=true'\n          env:\n            - name: CSI_ENDPOINT\n              value: unix://var/lib/kubelet/plugins/disk.hwameistor.io/csi.sock\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: WATCH_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: NODENAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: spec.nodeName\n            - name: OPERATOR_NAME\n              value: local-disk-manager\n          resources: {}\n          volumeMounts:\n            - name: udev\n              mountPath: /run/udev\n            - name: procmount\n              readOnly: true\n              mountPath: /host/proc\n            - name: devmount\n              mountPath: /dev\n            - name: registration-dir\n              mountPath: /var/lib/kubelet/plugins_registry\n            - name: plugin-dir\n              mountPath: /var/lib/kubelet/plugins\n              mountPropagation: Bidirectional\n            - name: pods-mount-dir\n              mountPath: /var/lib/kubelet/pods\n              mountPropagation: Bidirectional\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            privileged: true\n      restartPolicy: Always\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      serviceAccountName: hwameistor-admin\n      serviceAccount: hwameistor-admin\n      hostNetwork: true\n      hostPID: true\n      securityContext: {}\n      schedulerName: default-scheduler\n      tolerations:\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - key: node.kubernetes.io/not-ready\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/master\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/control-plane\n          operator: Exists\n          effect: NoSchedule\n        - key: node.cloudprovider.kubernetes.io/uninitialized\n          operator: Exists\n          effect: NoSchedule\n  updateStrategy:\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n      maxSurge: 0\n  revisionHistoryLimit: 10\nstatus:\n  currentNumberScheduled: 4\n  numberMisscheduled: 0\n  desiredNumberScheduled: 4\n  numberReady: 4\n  observedGeneration: 1\n  updatedNumberScheduled: 4\n  numberAvailable: 4\n
                                                  "},{"location":"en/end-user/kpanda/workloads/create-deployment.html","title":"Create Deployment","text":"

                                                  This page describes how to create deployments through images and YAML files.

                                                  Deployment is a common resource in Kubernetes, mainly Pod and ReplicaSet provide declarative updates, support elastic scaling, rolling upgrades, and version rollbacks features. Declare the desired Pod state in the Deployment, and the Deployment Controller will modify the current state through the ReplicaSet to make it reach the pre-declared desired state. Deployment is stateless and does not support data persistence. It is suitable for deploying stateless applications that do not need to save data and can be restarted and rolled back at any time.

                                                  Through the container management module of AI platform, workloads on multicloud and multiclusters can be easily managed based on proper role permissions, including the creation of deployments, Full life cycle management such as update, deletion, elastic scaling, restart, and version rollback.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-deployment.html#prerequisites","title":"Prerequisites","text":"

                                                  Before using image to create deployments, the following prerequisites need to be met:

                                                  • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                  • Create a namespace and a user.

                                                  • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                  • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-deployment.html#create-by-image","title":"Create by image","text":"

                                                  Follow the steps below to create a deployment by image.

                                                  1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the Cluster Details page.

                                                  2. On the cluster details page, click Workloads -> Deployments in the left navigation bar, and then click the Create by Image button in the upper right corner of the page.

                                                  3. Fill in Basic Information, Container Setting, Service Setting, Advanced Setting in turn, click OK in the lower right corner of the page to complete the creation.

                                                    The system will automatically return the list of Deployments . Click \u2507 on the right side of the list to perform operations such as update, delete, elastic scaling, restart, and version rollback on the load. If the workload status is abnormal, please check the specific abnormal information, refer to Workload Status.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-deployment.html#basic-information","title":"Basic information","text":"
                                                  • Workload Name: can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number, such as deployment-01. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                                  • Namespace: Select the namespace where the newly created payload will be deployed. The default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                                  • Pods: Enter the number of Pod instances for the load, and one Pod instance is created by default.
                                                  • Description: Enter the description information of the payload and customize the content. The number of characters cannot exceed 512.
                                                  "},{"location":"en/end-user/kpanda/workloads/create-deployment.html#container-settings","title":"Container settings","text":"

                                                  Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the requirements of each part.

                                                  Container setting is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                                  Basic Information (Required)Lifecycle (optional)Health Check (optional)Environment variables (optional)Data storage (optional)Security settings (optional)

                                                  When configuring container-related parameters, it is essential to correctly fill in the container name and image parameters; otherwise, you will not be able to proceed to the next step. After filling in the configuration according to the following requirements, click OK.

                                                  • Container Type: The default is Work Container. For information on init containers, see the [K8s Official Documentation] (https://kubernetes.io/docs/concepts/workloads/pods/init-containers/).
                                                  • Container Name: No more than 63 characters, supporting lowercase letters, numbers, and separators (\"-\"). It must start and end with a lowercase letter or number, for example, nginx-01.
                                                  • Image:
                                                    • Image: Select an appropriate image from the list. When entering the image name, the default is to pull the image from the official DockerHub.
                                                    • Image Version: Select an appropriate version from the dropdown list.
                                                    • Image Pull Policy: By checking Always pull the image, the image will be pulled from the repository each time the workload restarts/upgrades. If unchecked, it will only pull the local image, and will pull from the repository only if the image does not exist locally. For more details, refer to Image Pull Policy.
                                                    • Registry Secret: Optional. If the target repository requires a Secret to access, you need to create secret first.
                                                  • Privileged Container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and has all the privileges of running processes on the host.
                                                  • CPU/Memory Request: The request value (the minimum resource needed) and the limit value (the maximum resource allowed) for CPU/memory resources. Configure resources for the container as needed to avoid resource waste and system failures caused by container resource overages. Default values are shown in the figure.
                                                  • GPU Configuration: Configure GPU usage for the container, supporting only positive integers. The GPU quota setting supports configuring the container to exclusively use an entire GPU card or part of a vGPU. For example, for a GPU card with 8 cores, entering the number 8 means the container exclusively uses the entire card, and entering the number 1 means configuring 1 core of the vGPU for the container.

                                                  Before setting the GPU, the administrator needs to pre-install the GPU card and driver plugin on the cluster node and enable the GPU feature in the Cluster Settings.

                                                  Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle Setting.

                                                  It is used to judge the health status of containers and applications, which helps to improve the availability of applications. For details, refer to Container Health Check Setting.

                                                  Configure container parameters within the Pod, add environment variables or pass setting to the Pod, etc. For details, refer to Container environment variable setting.

                                                  Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage Setting.

                                                  Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-deployment.html#service-settings","title":"Service settings","text":"

                                                  Configure Service for the deployment, so that the deployment can be accessed externally.

                                                  1. Click the Create Service button.

                                                  2. Refer to Create Service to configure service parameters.

                                                  3. Click OK and click Next .

                                                  "},{"location":"en/end-user/kpanda/workloads/create-deployment.html#advanced-settings","title":"Advanced settings","text":"

                                                  Advanced setting includes four parts: Network Settings, Upgrade Policy, Scheduling Policies, Labels and Annotations. You can click the tabs below to view the setting requirements of each part.

                                                  Network SettingsUpgrade PolicyScheduling PoliciesLabels and Annotations
                                                  1. For container NIC setting, refer to Workload Usage IP Pool
                                                  2. DNS setting

                                                  In some cases, the application will have redundant DNS queries. Kubernetes provides DNS-related setting options for applications, which can effectively reduce redundant DNS queries and increase business concurrency in certain cases.

                                                  • DNS Policy

                                                    • Default: Make container use kubelet's -The domain name resolution file pointed to by the -resolv-conf parameter. This setting can only resolve external domain names registered on the Internet, but cannot resolve cluster internal domain names, and there is no invalid DNS query.
                                                    • ClusterFirstWithHostNet: The domain name file of the host to which the application is connected.
                                                    • ClusterFirst: application docking with Kube-DNS/CoreDNS.
                                                    • None: New option value introduced in Kubernetes v1.9 (Beta in v1.10). After setting to None, dnsConfig must be set. At this time, the domain name resolution file of the container will be completely generated through the setting of dnsConfig.
                                                  • Nameservers: fill in the address of the domain name server, such as 10.6.175.20 .

                                                  • Search domains: DNS search domain list for domain name query. When specified, the provided search domain list will be merged into the search field of the domain name resolution file generated based on dnsPolicy, and duplicate domain names will be deleted. Kubernetes allows up to 6 search domains.
                                                  • Options: Setting options for DNS, where each object can have a name attribute (required) and a value attribute (optional). The content in this field will be merged into the options field of the domain name resolution file generated based on dnsPolicy. If some options of dnsConfig options conflict with the options of the domain name resolution file generated based on dnsPolicy, they will be overwritten by dnsConfig.
                                                  • Host Alias: the alias set for the host.

                                                  • Upgrade Mode: Rolling upgrade refers to gradually replacing instances of the old version with instances of the new version. During the upgrade process, business traffic will be load-balanced to the old and new instances at the same time, so the business will not be interrupted. Rebuild and upgrade refers to deleting the workload instance of the old version first, and then installing the specified new version. During the upgrade process, the business will be interrupted.
                                                  • Max Unavailable: Specify the maximum value or ratio of unavailable pods during the workload update process, the default is 25%. If it is equal to the number of instances, there is a risk of service interruption.
                                                  • Max Surge: The maximum or ratio of the total number of Pods exceeding the desired replica count of Pods during a Pod update. Default is 25%.
                                                  • Revision History Limit: Set the number of old versions retained when the version is rolled back. The default is 10.
                                                  • Minimum Ready: The minimum time for a Pod to be ready. Only after this time is the Pod considered available. The default is 0 seconds.
                                                  • Upgrade Max Duration: If the deployment is not successful after the set time, the workload will be marked as failed. Default is 600 seconds.
                                                  • Graceful Period: The execution period (0-9,999 seconds) of the command before the workload stops, the default is 30 seconds.

                                                  • Toleration time: When the node where the workload instance is located is unavailable, the time for rescheduling the workload instance to other available nodes, the default is 300 seconds.
                                                  • Node Affinity: According to the label on the node, constrain which nodes the Pod can be scheduled on.
                                                  • Workload Affinity: Constrains which nodes a Pod can be scheduled to based on the labels of the Pods already running on the node.
                                                  • Workload Anti-affinity: Constrains which nodes a Pod cannot be scheduled to based on the labels of Pods already running on the node.

                                                  For details, refer to Scheduling Policy.

                                                  You can click the Add button to add tags and annotations to workloads and pods.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-deployment.html#create-from-yaml","title":"Create from YAML","text":"

                                                  In addition to image, you can also create deployments more quickly through YAML files.

                                                  1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the Cluster Details page.

                                                  2. On the cluster details page, click Workloads -> Deployments in the left navigation bar, and then click the Create from YAML button in the upper right corner of the page.

                                                  3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                                  Click to see an example YAML for creating a deployment
                                                   apiVersion: apps/v1\n kind: Deployment\n metadata:\n   name: nginx-deployment\n spec:\n   selector:\n     matchLabels:\n       app: nginx\n   replicas: 2 # (1)!\n   template:\n     metadata:\n       labels:\n         app: nginx\n     spec:\n       containers:\n       -name: nginx\n         image: nginx:1.14.2\n         ports:\n         - containerPort: 80\n
                                                  1. Tell the Deployment to run 2 Pods that match this template
                                                  "},{"location":"en/end-user/kpanda/workloads/create-job.html","title":"Create Job","text":"

                                                  This page introduces how to create a job through image and YAML file.

                                                  Job is suitable for performing one-time jobs. A Job creates one or more Pods, and the Job keeps retrying to run Pods until a certain number of Pods are successfully terminated. A Job ends when the specified number of Pods are successfully terminated. When a Job is deleted, all Pods created by the Job will be cleared. When a Job is paused, all active Pods in the Job are deleted until the Job is resumed. For more information about jobs, refer to Job.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-job.html#prerequisites","title":"Prerequisites","text":"
                                                  • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                  • Create a namespace and a user.

                                                  • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                  • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-job.html#create-by-image","title":"Create by image","text":"

                                                  Refer to the following steps to create a job using an image.

                                                  1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                                  2. On the cluster details page, click Workloads -> Jobs in the left navigation bar, and then click the Create by Image button in the upper right corner of the page.

                                                  3. Fill in Basic Information, Container Settings and Advanced Settings, click OK in the lower right corner of the page to complete the creation.

                                                    The system will automatically return to the job list. Click \u2507 on the right side of the list to perform operations such as updating, deleting, and restarting the job.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-job.html#basic-information","title":"Basic information","text":"

                                                  On the Create Jobs page, enter the basic information according to the table below, and click Next .

                                                  • Payload Name: Can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                                  • Namespace: Select which namespace to deploy the newly created job in, and the default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                                  • Number of Instances: Enter the number of Pod instances for the workload. By default, 1 Pod instance is created.
                                                  • Description: Enter the description information of the workload and customize the content. The number of characters should not exceed 512.
                                                  "},{"location":"en/end-user/kpanda/workloads/create-job.html#container-settings","title":"Container settings","text":"

                                                  Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the setting requirements of each part.

                                                  Container settings is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                                  Basic information (required)Lifecycle (optional)Health Check (optional)Environment Variables (optional)Data Storage (optional)Security Settings (optional)

                                                  When configuring container-related parameters, you must correctly fill in the container name and image parameters, otherwise you will not be able to proceed to the next step. After filling in the settings with reference to the following requirements, click OK .

                                                  • Container Name: Up to 63 characters, lowercase letters, numbers and separators (\"-\") are supported. Must start and end with a lowercase letter or number, eg nginx-01.
                                                  • Image: Enter the address or name of the image. When entering the image name, the image will be pulled from the official DockerHub by default.
                                                  • Image Pull Policy: After checking Always pull image , the image will be pulled from the registry every time the workload restarts/upgrades. If it is not checked, only the local image will be pulled, and only when the image does not exist locally, it will be re-pulled from the container registry. For more details, refer to Image Pull Policy.
                                                  • Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
                                                  • CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
                                                  • GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU card or part of the vGPU for the container. For example, for an 8-core GPU card, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.

                                                  Before setting exclusive GPU, the administrator needs to install the GPU card and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

                                                  Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle settings.

                                                  It is used to judge the health status of containers and applications, which helps to improve the availability of applications. For details, refer to Container Health Check settings.

                                                  Configure container parameters within the Pod, add environment variables or pass settings to the Pod, etc. For details, refer to Container environment variable settings.

                                                  Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage settings.

                                                  Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-job.html#advanced-settings","title":"Advanced settings","text":"

                                                  Advanced setting includes job settings, labels and annotations.

                                                  Job SettingsLabels and Annotations

                                                  • Parallel Pods: the maximum number of Pods that can be created at the same time during job execution, and the parallel number should not be greater than the total number of Pods. Default is 1.
                                                  • Timeout: When this time is exceeded, the job will be marked as failed to execute, and all Pods under the job will be deleted. When it is empty, it means that no timeout is set.
                                                  • Restart Policy: Whether to restart the Pod when the setting fails.

                                                  You can click the Add button to add labels and annotations to the workload instance Pod.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-job.html#create-from-yaml","title":"Create from YAML","text":"

                                                  In addition to image, creation jobs can also be created more quickly through YAML files.

                                                  1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                                  2. On the cluster details page, click Workloads -> Jobs in the left navigation bar, and then click the Create from YAML button in the upper right corner of the page.

                                                  3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                                  Click to view the complete YAML
                                                  kind: Job\napiVersion: batch/v1\nmetadata:\n  name: demo\n  namespace: default\n  uid: a9708239-0358-4aa1-87d3-a092c080836e\n  resourceVersion: '92751876'\n  generation: 1\n  creationTimestamp: '2022-12-26T10:52:22Z'\n  labels:\n    app: demo\n    controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n    job-name: demo\n  annotations:\n    revisions: >-\n      {\"1\":{\"status\":\"running\",\"uid\":\"a9708239-0358-4aa1-87d3-a092c080836e\",\"start-time\":\"2022-12-26T10:52:22Z\",\"completion-time\":\"0001-01-01T00:00:00Z\"}}\nspec:\n  parallelism: 1\n  backoffLimit: 6\n  selector:\n    matchLabels:\n      controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: demo\n        controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n        job-name: demo\n    spec:\n      containers:\n        - name: container-4\n          image: nginx\n          resources:\n            limits:\n              cpu: 250m\n              memory: 512Mi\n            requests:\n              cpu: 250m\n              memory: 512Mi\n          lifecycle: {}\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            privileged: false\n      restartPolicy: Never\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      securityContext: {}\n      schedulerName: default-scheduler\n  completionMode: NonIndexed\n  suspend: false\nstatus:\n  startTime: '2022-12-26T10:52:22Z'\n  active: 1\n
                                                  "},{"location":"en/end-user/kpanda/workloads/create-statefulset.html","title":"Create StatefulSet","text":"

                                                  This page describes how to create a StatefulSet through image and YAML files.

                                                  StatefulSet is a common resource in Kubernetes, and Deployment, mainly used to manage the deployment and scaling of Pod collections. The main difference between the two is that Deployment is stateless and does not save data, while StatefulSet is stateful and is mainly used to manage stateful applications. In addition, Pods in a StatefulSet have a persistent ID, which makes it easy to identify the proper Pod when matching storage volumes.

                                                  Through the container management module of AI platform, workloads on multicloud and multiclusters can be easily managed based on proper role permissions, including the creation of StatefulSets, update, delete, elastic scaling, restart, version rollback and other full life cycle management.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-statefulset.html#prerequisites","title":"Prerequisites","text":"

                                                  Before using image to create StatefulSets, the following prerequisites need to be met:

                                                  • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                  • Create a namespace and a user.

                                                  • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                  • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-statefulset.html#create-by-image","title":"Create by image","text":"

                                                  Follow the steps below to create a statefulSet using image.

                                                  1. Click Clusters on the left navigation bar, then click the name of the target cluster to enter Cluster Details.

                                                  2. Click Workloads -> StatefulSets in the left navigation bar, and then click the Create by Image button in the upper right corner.

                                                  3. Fill in Basic Information, Container Settings, Service Settings, Advanced Settings, click OK in the lower right corner of the page to complete the creation.

                                                    The system will automatically return to the list of StatefulSets , and wait for the status of the workload to become running . If the workload status is abnormal, refer to Workload Status for specific exception information.

                                                    Click \u2507 on the right side of the New Workload column to perform operations such as update, delete, elastic scaling, restart, and version rollback on the workload.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-statefulset.html#basic-information","title":"Basic Information","text":"
                                                  • Workload Name: can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number, such as deployment-01. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                                  • Namespace: Select the namespace where the newly created payload will be deployed. The default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                                  • Pods: Enter the number of Pod instances for the load, and one Pod instance is created by default.
                                                  • Description: Enter the description information of the payload and customize the content. The number of characters cannot exceed 512.
                                                  "},{"location":"en/end-user/kpanda/workloads/create-statefulset.html#container-settings","title":"Container settings","text":"

                                                  Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the requirements of each part.

                                                  Container settings is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                                  Basic information (required)Lifecycle (optional)Health Check (optional)Environment Variables (optional)Data Storage (optional)Security Settings (optional)

                                                  When configuring container-related parameters, you must correctly fill in the container name and image parameters, otherwise you will not be able to proceed to the next step. After filling in the settings with reference to the following requirements, click OK .

                                                  • Container Name: Up to 63 characters, lowercase letters, numbers and separators (\"-\") are supported. Must start and end with a lowercase letter or number, eg nginx-01.
                                                  • Image: Enter the address or name of the image. When entering the image name, the image will be pulled from the official DockerHub by default.
                                                  • Image Pull Policy: After checking Always pull image , the image will be pulled from the registry every time the workload restarts/upgrades. If it is not checked, only the local image will be pulled, and only when the image does not exist locally, it will be re-pulled from the container registry. For more details, refer to Image Pull Policy.
                                                  • Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
                                                  • CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
                                                  • GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU card or part of the vGPU for the container. For example, for an 8-core GPU card, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.

                                                  Before setting exclusive GPU, the administrator needs to install the GPU card and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

                                                  Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle Configuration.

                                                  Used to judge the health status of containers and applications. Helps improve app usability. For details, refer to Container Health Check Configuration.

                                                  Configure container parameters within the Pod, add environment variables or pass settings to the Pod, etc. For details, refer to Container environment variable settings.

                                                  Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage Configuration.

                                                  Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-statefulset.html#service-settings","title":"Service settings","text":"

                                                  Configure Service (Service) for the statefulset, so that the statefulset can be accessed externally.

                                                  1. Click the Create Service button.

                                                  2. Refer to Create Service to configure service parameters.

                                                  3. Click OK and click Next .

                                                  "},{"location":"en/end-user/kpanda/workloads/create-statefulset.html#advanced-settings","title":"Advanced settings","text":"

                                                  Advanced setting includes four parts: load network settings, upgrade policy, scheduling policy, label and annotation. You can click the tabs below to view the requirements of each part.

                                                  Network ConfigurationUpgrade PolicyContainer Management PoliciesScheduling PoliciesLabels and Annotations
                                                  1. For container NIC settings, refer to Workload Usage IP Pool
                                                  2. DNS settings

                                                  In some cases, the application will have redundant DNS queries. Kubernetes provides DNS-related settings options for applications, which can effectively reduce redundant DNS queries and increase business concurrency in certain cases.

                                                  • DNS Policy

                                                    • Default: Make the container use the domain name resolution file pointed to by the --resolv-conf parameter of kubelet. This setting can only resolve external domain names registered on the Internet, but cannot resolve cluster internal domain names, and there is no invalid DNS query.
                                                    • ClusterFirstWithHostNet: The domain name file of the application docking host.
                                                    • ClusterFirst: application docking with Kube-DNS/CoreDNS.
                                                    • None: New option value introduced in Kubernetes v1.9 (Beta in v1.10). After setting to None, dnsConfig must be set. At this time, the domain name resolution file of the container will be completely generated through the settings of dnsConfig.
                                                  • Nameservers: fill in the address of the domain name server, such as 10.6.175.20 .

                                                  • Search domains: DNS search domain list for domain name query. When specified, the provided search domain list will be merged into the search field of the domain name resolution file generated based on dnsPolicy, and duplicate domain names will be deleted. Kubernetes allows up to 6 search domains.
                                                  • Options: Configuration options for DNS, where each object can have a name attribute (required) and a value attribute (optional). The content in this field will be merged into the options field of the domain name resolution file generated based on dnsPolicy. If some options of dnsConfig options conflict with the options of the domain name resolution file generated based on dnsPolicy, they will be overwritten by dnsConfig.
                                                  • Host Alias: the alias set for the host.

                                                  • Upgrade Mode: Rolling upgrade refers to gradually replacing instances of the old version with instances of the new version. During the upgrade process, business traffic will be load-balanced to the old and new instances at the same time, so the business will not be interrupted. Rebuild and upgrade refers to deleting the workload instance of the old version first, and then installing the specified new version. During the upgrade process, the business will be interrupted.
                                                  • Revision History Limit: Set the number of old versions retained when the version is rolled back. The default is 10.
                                                  • Graceful Period: The execution period (0-9,999 seconds) of the command before the workload stops, the default is 30 seconds.

                                                  Kubernetes v1.7 and later versions can set Pod management policies through .spec.podManagementPolicy , which supports the following two methods:

                                                  • OrderedReady : The default Pod management policy, which means that Pods are deployed in order. Only after the deployment of the previous Pod is successfully completed, the statefulset will start to deploy the next Pod. Pods are deleted in reverse order, with the last created being deleted first.

                                                  • Parallel : Create or delete containers in parallel, just like Pods of the Deployment type. The StatefulSet controller starts or terminates all containers in parallel. There is no need to wait for a Pod to enter the Running and ready state or to stop completely before starting or terminating other Pods. This option only affects the behavior of scaling operations, not the order of updates.

                                                  • Tolerance time: When the node where the workload instance is located is unavailable, the time for rescheduling the workload instance to other available nodes, the default is 300 seconds.
                                                  • Node affinity: According to the label on the node, constrain which nodes the Pod can be scheduled on.
                                                  • Workload Affinity: Constrains which nodes a Pod can be scheduled to based on the labels of the Pods already running on the node.
                                                  • Workload anti-affinity: Constrains which nodes a Pod cannot be scheduled to based on the labels of Pods already running on the node.
                                                  • Topology domain: namely topologyKey, used to specify a group of nodes that can be scheduled. For example, kubernetes.io/os indicates that as long as the node of an operating system meets the conditions of labelSelector, it can be scheduled to the node.

                                                  For details, refer to Scheduling Policy.

                                                  You can click the Add button to add tags and annotations to workloads and pods.

                                                  "},{"location":"en/end-user/kpanda/workloads/create-statefulset.html#create-from-yaml","title":"Create from YAML","text":"

                                                  In addition to image, you can also create statefulsets more quickly through YAML files.

                                                  1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the Cluster Details page.

                                                  2. On the cluster details page, click Workloads -> StatefulSets in the left navigation bar, and then click the Create from YAML button in the upper right corner of the page.

                                                  3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                                  Click to see an example YAML for creating a statefulSet
                                                   kind: StatefulSet\n apiVersion: apps/v1\n metadata:\n   name: test-mysql-123-mysql\n   namespace: default\n   uid: d3f45527-a0ab-4b22-9013-5842a06f4e0e\n   resourceVersion: '20504385'\n   generation: 1\n   creationTimestamp: '2022-09-22T09:34:10Z'\n   ownerReferences:\n     - apiVersion: mysql.presslabs.org/v1alpha1\n       kind: MysqlCluster\n       name: test-mysql-123\n       uid: 5e877cc3-5167-49da-904e-820940cf1a6d\n       controller: true\n       blockOwnerDeletion: true\n spec:\n   replicas: 1\n   selector:\n     matchLabels:\n       app.kubernetes.io/managed-by: mysql.presslabs.org\n       app.kubernetes.io/name: mysql\n       mysql.presslabs.org/cluster: test-mysql-123\n   template:\n     metadata:\n       creationTimestamp: null\n       labels:\n         app.kubernetes.io/component: database\n         app.kubernetes.io/instance: test-mysql-123\n         app.kubernetes.io/managed-by: mysql.presslabs.org\n         app.kubernetes.io/name: mysql\n         app.kubernetes.io/version: 5.7.31\n         mysql.presslabs.org/cluster: test-mysql-123\n       annotations:\n         config_rev: '13941099'\n         prometheus.io/port: '9125'\n         prometheus.io/scrape: 'true'\n         secret_rev: '13941101'\n     spec:\n       volumes:\n         -name: conf\n           emptyDir: {}\n         - name: init-scripts\n           emptyDir: {}\n         - name: config-map\n           configMap:\n             name: test-mysql-123-mysql\n             defaultMode: 420\n         - name: data\n           persistentVolumeClaim:\n             claimName: data\n       initContainers:\n         -name: init\n           image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n           args:\n             - clone-and-init\n           envFrom:\n             - secretRef:\n                 name: test-mysql-123-mysql-operated\n           env:\n             - name: MY_NAMESPACE\n               valueFrom:\n                 fieldRef:\n                   apiVersion: v1\n                   fieldPath: metadata.namespace\n             - name: MY_POD_NAME\n               valueFrom:\n                 fieldRef:apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: BACKUP_USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: BACKUP_USER\n                  optional: true\n            - name: BACKUP_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: BACKUP_PASSWORD\n                  optional: true\n          resources: {}\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: config-map\n              mountPath: /mnt/conf\n            - name: data\n              mountPath: /var/lib/mysql\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n      containers:\n        - name: mysql\n          image: docker.m.daocloud.io/mysql:5.7.31\n          ports:\n            - name: mysql\n              containerPort: 3306\n              protocol: TCP\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: ORCH_CLUSTER_ALIAS\n              value: test-mysql-123.default\n            - name: ORCH_HTTP_API\n              value: http://mysql-operator.mcamel-system/api\n            - name: MYSQL_ROOT_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: ROOT_PASSWORD\n                  optional: false\n            - name: MYSQL_USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: USER\n                  optional: true\n            - name: MYSQL_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: PASSWORD\n                  optional: true\n            - name: MYSQL_DATABASE\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: DATABASE\n                  optional: true\n          resources:\n            limits:\n              cpu: '1'\n              memory: 1Gi\n            requests:\n              cpu: 100m\n              memory: 512Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: data\n              mountPath: /var/lib/mysql\n          livenessProbe:\n            exec:\n              command:\n                - mysqladmin\n                - '--defaults-file=/etc/mysql/client.conf'\n                - ping\n            initialDelaySeconds: 60\n            timeoutSeconds: 5\n            periodSeconds: 5\n            successThreshold: 1\n            failureThreshold: 3\n          readinessProbe:\n            exec:\n              command:\n                - /bin/sh\n                - '-c'\n                - >-\n                  test $(mysql --defaults-file=/etc/mysql/client.conf -NB -e\n                  'SELECT COUNT(*) FROM sys_operator.status WHERE\n                  name=\"configured\" AND value=\"1\"') -eq 1\n            initialDelaySeconds: 5\n            timeoutSeconds: 5\n            periodSeconds: 2\n            successThreshold: 1\n            failureThreshold: 3\n          lifecycle:preStop:\n              exec:\n                command:\n                  - bash\n                  - /etc/mysql/pre-shutdown-ha.sh\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: sidecar\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - config-and-serve\n          ports:\n            - name: sidecar-http\n              containerPort: 8080\n              protocol: TCP\n          envFrom:\n            - secretRef:\n                name: test-mysql-123-mysql-operated\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: XTRABACKUP_TARGET_DIR\n              value: /tmp/xtrabackup_backupfiles/\n          resources:\n            limits:\n              cpu: '1'\n              memory: 1Gi\n            requests:\n              cpu: 10m\n              memory: 64Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: data\n              mountPath: /var/lib/mysql\n          readinessProbe:\n            httpGet:\n              path: /health\n              port: 8080\n              scheme: HTTP\n            initialDelaySeconds: 30\n            timeoutSeconds: 5\n            periodSeconds: 5\n            successThreshold: 1\n            failureThreshold: 3\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: metrics-exporter\n          image: prom/mysqld-exporter:v0.13.0\n          args:\n            - '--web.listen-address=0.0.0.0:9125'\n            - '--web.telemetry-path=/metrics'\n            - '--collect.heartbeat'\n            - '--collect.heartbeat.database=sys_operator'\n          ports:\n            - name: prometheus\n              containerPort: 9125\n              protocol: TCP\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: METRICS_EXPORTER_USER\n                  optional: false\n            - name: PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: METRICS_EXPORTER_PASSWORD\n                  optional: false\n            - name: DATA_SOURCE_NAME\n              value: $(USER):$(PASSWORD)@(127.0.0.1:3306)/\n          resources:\n            limits:\n              cpu: 100m\n              memory: 128Mi\n            requests:\n              cpu: 10m\n              memory: 32Mi\n          livenessProbe:\n            httpGet:\n              path: /metrics\n              port: 9125\n              scheme: HTTP\n            initialDelaySeconds: 30\n            timeoutSeconds: 30\n            periodSeconds: 30\n            successThreshold: 1\n            failureThreshold: 3\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: pt-heartbeat\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - pt-heartbeat\n            - '--update'\n            - '--replace'\n            - '--check-read-only'\n            - '--create-table'\n            - '--database'\n            - sys_operator\n            - '--table'\n            - heartbeat\n            - '--utc'\n            - '--defaults-file'\n            - /etc/mysql/heartbeat.conf\n            - '--fail-successive-errors=20'\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n          resources:\n            limits:\n              cpu: 100m\n              memory: 64Mi\n            requests:\n              cpu: 10m\n              memory: 32Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n      restartPolicy: Always\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      securityContext:\n        runAsUser: 999\n        fsGroup: 999\n      affinity:\n        podAntiAffinity:\n          preferredDuringSchedulingIgnoredDuringExecution:\n            - weight: 100\n              podAffinityTerm:\n                labelSelector:\n                  matchLabels:\n                    app.kubernetes.io/component: database\n                    app.kubernetes.io/instance: test-mysql-123\n                    app.kubernetes.io/managed-by: mysql.presslabs.org\n                    app.kubernetes.io/name: mysql\n                    app.kubernetes.io/version: 5.7.31\n                    mysql.presslabs.org/cluster: test-mysql-123\n                topologyKey: kubernetes.io/hostname\n      schedulerName: default-scheduler\n  volumeClaimTemplates:\n    - kind: PersistentVolumeClaim\n      apiVersion: v1\n      metadata:\n        name: data\n        creationTimestamp: null\n        ownerReferences:\n          - apiVersion: mysql.presslabs.org/v1alpha1\n            kind: MysqlCluster\n            name: test-mysql-123\n            uid: 5e877cc3-5167-49da-904e-820940cf1a6d\n            controller: true\n      spec:\n        accessModes:\n          - ReadWriteOnce\n        resources:\n          limits:\n            storage: 1Gi\n          requests:\n            storage: 1Gi\n        storageClassName: local-path\n        volumeMode: Filesystem\n      status:\n        phase: Pending\n  serviceName: mysql\n  podManagementPolicy: OrderedReady\n  updateStrategy:\n    type: RollingUpdate\n    rollingUpdate:\n      partition: 0\n  revisionHistoryLimit: 10\nstatus:\n  observedGeneration: 1\n  replicas: 1\n  readyReplicas: 1\n  currentReplicas: 1\n  updatedReplicas: 1\n  currentRevision: test-mysql-123-mysql-6b8f5577c7\n  updateRevision: test-mysql-123-mysql-6b8f5577c7\n  collisionCount: 0\n  availableReplicas: 1\n
                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/env-variables.html","title":"Configure environment variables","text":"

                                                  An environment variable refers to a variable set in the container running environment, which is used to add environment flags to Pods or transfer configurations, etc. It supports configuring environment variables for Pods in the form of key-value pairs.

                                                  Suanova container management adds a graphical interface to configure environment variables for Pods on the basis of native Kubernetes, and supports the following configuration methods:

                                                  • Key-value pair (Key/Value Pair): Use a custom key-value pair as the environment variable of the container

                                                  • Resource reference (Resource): Use the fields defined by Container as the value of environment variables, such as the memory limit of the container, the number of copies, etc.

                                                  • Variable/Variable Reference (Pod Field): Use the Pod field as the value of an environment variable, such as the name of the Pod

                                                  • ConfigMap key value import (ConfigMap key): Import the value of a key in the ConfigMap as the value of an environment variable

                                                  • Key key value import (Secret Key): use the data from the Secret to define the value of the environment variable

                                                  • Key Import (Secret): Import all key values \u200b\u200bin Secret as environment variables

                                                  • ConfigMap import (ConfigMap): import all key values \u200b\u200bin the ConfigMap as environment variables

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/health-check.html","title":"Container health check","text":"

                                                  Container health check checks the health status of containers according to user requirements. After configuration, if the application in the container is abnormal, the container will automatically restart and recover. Kubernetes provides Liveness checks, Readiness checks, and Startup checks.

                                                  • LivenessProbe can detect application deadlock (the application is running, but cannot continue to run the following steps). Restarting containers in this state can help improve the availability of applications, even if there are bugs in them.

                                                  • ReadinessProbe can detect when a container is ready to accept request traffic. A Pod can only be considered ready when all containers in a Pod are ready. One use of this signal is to control which Pod is used as the backend of the Service. If the Pod is not ready, it will be removed from the Service's load balancer.

                                                  • Startup check (StartupProbe) can know when the application container is started. After configuration, it can control the container to check the viability and readiness after it starts successfully, so as to ensure that these liveness and readiness probes will not affect the start of the application. Startup detection can be used to perform liveness checks on slow-starting containers, preventing them from being killed before they start running.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/health-check.html#liveness-and-readiness-checks","title":"Liveness and readiness checks","text":"

                                                  The configuration of LivenessProbe is similar to that of ReadinessProbe, the only difference is to use readinessProbe field instead of livenessProbe field.

                                                  HTTP GET parameter description:

                                                  Parameter Description Path (Path) The requested path for access. Such as: /healthz path in the example Port (Port) Service listening port. Such as: port 8080 in the example protocol access protocol, Http or Https Delay time (initialDelaySeconds) Delay check time, in seconds, this setting is related to the normal startup time of business programs. For example, if it is set to 30, it means that the health check will start 30 seconds after the container is started, which is the time reserved for business program startup. Timeout (timeoutSeconds) Timeout, in seconds. For example, if it is set to 10, it indicates that the timeout waiting period for executing the health check is 10 seconds. If this time is exceeded, the health check will be regarded as a failure. If set to 0 or not set, the default timeout waiting time is 1 second. Timeout (timeoutSeconds) Timeout, in seconds. For example, if it is set to 10, it indicates that the timeout waiting period for executing the health check is 10 seconds. If this time is exceeded, the health check will be regarded as a failure. If set to 0 or not set, the default timeout waiting time is 1 second. SuccessThreshold (successThreshold) The minimum number of consecutive successes that are considered successful after a probe fails. The default value is 1, and the minimum value is 1. This value must be 1 for liveness and startup probes. Maximum number of failures (failureThreshold) The number of retries when the probe fails. Giving up in case of a liveness probe means restarting the container. Pods that are abandoned due to readiness probes are marked as not ready. The default value is 3. The minimum value is 1."},{"location":"en/end-user/kpanda/workloads/pod-config/health-check.html#check-with-http-get-request","title":"Check with HTTP GET request","text":"

                                                  YAML example:

                                                  apiVersion: v1\nkind: Pod\nmetadata:\n  labels:\n    test: liveness\n  name: liveness-http\nspec:\n  containers:\n  - name: liveness  # Container name\n    image: k8s.gcr.io/liveness  # Container image\n    args:\n    - /server  # Arguments to pass to the container\n    livenessProbe:\n      httpGet:\n        path: /healthz  # Access request path\n        port: 8080  # Service listening port\n        httpHeaders:\n        - name: Custom-Header  # Custom header name\n          value: Awesome  # Custom header value\n      initialDelaySeconds: 3  # Wait 3 seconds before the first probe\n      periodSeconds: 3  # Perform liveness detection every 3 seconds\n

                                                  According to the set rules, Kubelet sends an HTTP GET request to the service running in the container (the service is listening on port 8080) to perform the detection. The kubelet considers the container alive if the handler under the /healthz path on the server returns a success code. If the handler returns a failure code, the kubelet kills the container and restarts it. Any return code greater than or equal to 200 and less than 400 indicates success, and any other return code indicates failure. The /healthz handler returns a 200 status code for the first 10 seconds of the container's lifetime. The handler then returns a status code of 500.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/health-check.html#use-tcp-port-check","title":"Use TCP port check","text":"

                                                  TCP port parameter description:

                                                  Parameter Description Port (Port) Service listening port. Such as: port 8080 in the example Delay time (initialDelaySeconds) Delay check time, in seconds, this setting is related to the normal startup time of business programs. For example, if it is set to 30, it means that the health check will start 30 seconds after the container is started, which is the time reserved for business program startup. Timeout (timeoutSeconds) Timeout, in seconds. For example, if it is set to 10, it indicates that the timeout waiting period for executing the health check is 10 seconds. If this time is exceeded, the health check will be regarded as a failure. If set to 0 or not set, the default timeout waiting time is 1 second.

                                                  For a container that provides TCP communication services, based on this configuration, the cluster establishes a TCP connection to the container according to the set rules. If the connection is successful, it proves that the detection is successful, otherwise the detection fails. If you choose the TCP port detection method, you must specify the port that the container listens to.

                                                  YAML example:

                                                  apiVersion: v1\nkind: Pod\nmetadata:\n  name: goproxy\n  labels:\n    app: goproxy\nspec:\n  containers:\n  - name: goproxy\n    image: k8s.gcr.io/goproxy:0.1\n    ports:\n    - containerPort: 8080\n    readinessProbe:\n      tcpSocket:\n        port: 8080\n      initialDelaySeconds: 5\n      periodSeconds: 10\n    livenessProbe:\n      tcpSocket:\n        port: 8080\n      initialDelaySeconds: 15\n      periodSeconds: 20\n

                                                  This example uses both readiness and liveness probes. The kubelet sends the first readiness probe 5 seconds after the container is started. Attempt to connect to port 8080 of the goproxy container. If the probe is successful, the Pod will be marked as ready and the kubelet will continue to run the check every 10 seconds.

                                                  In addition to the readiness probe, this configuration includes a liveness probe. The kubelet will perform the first liveness probe 15 seconds after the container is started. The readiness probe will attempt to connect to the goproxy container on port 8080. If the liveness probe fails, the container will be restarted.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/health-check.html#run-command-check","title":"Run command check","text":"

                                                  YAML example:

                                                  apiVersion: v1\nkind: Pod\nmetadata:\n  labels:\n    test: liveness\n  name: liveness-exec\nspec:\n  containers:\n  - name: liveness  # Container name\n    image: k8s.gcr.io/busybox  # Container image\n    args:\n    - /bin/sh  # Command to run\n    - -c  # Pass the following string as a command\n    - touch /tmp/healthy; sleep 30; rm -f /tmp/healthy; sleep 600  # Command to execute\n    livenessProbe:\n      exec:\n        command:\n        - cat  # Command to check liveness\n        - /tmp/healthy  # File to check\n      initialDelaySeconds: 5  # Wait 5 seconds before the first probe\n      periodSeconds: 5  # Perform liveness detection every 5 seconds\n

                                                  The periodSeconds field specifies that the kubelet performs a liveness probe every 5 seconds, and the initialDelaySeconds field specifies that the kubelet waits for 5 seconds before performing the first probe. According to the set rules, the cluster periodically executes the command cat /tmp/healthy in the container through the kubelet to detect. If the command executes successfully and the return value is 0, the kubelet considers the container to be healthy and alive. If this command returns a non-zero value, the kubelet will kill the container and restart it.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/health-check.html#protect-slow-starting-containers-with-pre-start-checks","title":"Protect slow-starting containers with pre-start checks","text":"

                                                  Some applications require a long initialization time at startup. You need to use the same command to set startup detection. For HTTP or TCP detection, you can set the failureThreshold * periodSeconds parameter to a long enough time to cope with the long startup time scene.

                                                  YAML example:

                                                  ports:\n- name: liveness-port\n  containerPort: 8080\n  hostPort: 8080\n\nlivenessProbe:\n  httpGet:\n    path: /healthz\n    port: liveness-port\n  failureThreshold: 1\n  periodSeconds: 10\n\nstartupProbe:\n  httpGet:\n    path: /healthz\n    port: liveness-port\n  failureThreshold: 30\n  periodSeconds: 10\n

                                                  With the above settings, the application will have up to 5 minutes (30 * 10 = 300s) to complete the startup process. Once the startup detection is successful, the survival detection task will take over the detection of the container and respond quickly to the container deadlock. If the start probe has been unsuccessful, the container is killed after 300 seconds and further disposition is performed according to the restartPolicy .

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/job-parameters.html","title":"Description of job parameters","text":"

                                                  According to the settings of .spec.completions and .spec.Parallelism , jobs (Job) can be divided into the following types:

                                                  Job Type Description Non-parallel Job Creates a Pod until its Job completes successfully Parallel Jobs with deterministic completion counts A Job is considered complete when the number of successful Pods reaches .spec.completions Parallel Job Creates one or more Pods until one finishes successfully

                                                  Parameter Description

                                                  RestartPolicy Creates a Pod until it terminates successfully .spec.completions Indicates the number of Pods that need to run successfully when the Job ends, the default is 1 .spec.parallelism Indicates the number of Pods running in parallel, the default is 1 spec.backoffLimit Indicates the maximum number of retries for a failed Pod, beyond which no more retries will continue. .spec.activeDeadlineSeconds Indicates the Pod running time. Once this time is reached, the Job, that is, all its Pods, will stop. And activeDeadlineSeconds has a higher priority than backoffLimit, that is, the job that reaches activeDeadlineSeconds will ignore the setting of backoffLimit.

                                                  The following is an example Job configuration, saved in myjob.yaml, which calculates \u03c0 to 2000 digits and prints the output.

                                                  apiVersion: batch/v1\nkind: Job #The type of the current resource\nmetadata:\n  name: myjob\nspec:\n  completions: 50 # Job needs to run 50 Pods at the end, in this example it prints \u03c0 50 times\n  parallelism: 5 # 5 Pods in parallel\n  backoffLimit: 5 # retry up to 5 times\n  template:\n    spec:\n      containers:\n      - name: pi\n        image: perl\n        command: [\"perl\", \"-Mbignum=bpi\", \"-wle\", \"print bpi(2000)\"]\n      restartPolicy: Never #restart policy\n

                                                  Related commands

                                                  kubectl apply -f myjob.yaml # Start job\nkubectl get job # View this job\nkubectl logs myjob-1122dswzs View Job Pod logs\n
                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/lifecycle.html","title":"Configure the container lifecycle","text":"

                                                  Pods follow a predefined lifecycle, starting in the Pending phase and entering the Running state if at least one container in the Pod starts normally. If any container in the Pod ends in a failed state, the state becomes Failed . The following phase field values \u200b\u200bindicate which phase of the lifecycle a Pod is in.

                                                  Value Description Pending The Pod has been accepted by the system, but one or more containers have not yet been created or run. This phase includes waiting for the pod to be scheduled and downloading the image over the network. Running (Running) The Pod has been bound to a node, and all containers in the Pod have been created. At least one container is still running, or in the process of starting or restarting. Succeeded (Success) All containers in the Pod were successfully terminated and will not be restarted. Failed All containers in the Pod have terminated, and at least one container terminated due to failure. That is, the container exited with a non-zero status or was terminated by the system. Unknown (Unknown) The status of the Pod cannot be obtained for some reason, usually due to a communication failure with the host where the Pod resides.

                                                  When creating a workload in Suanova container management, images are usually used to specify the running environment in the container. By default, when building an image, the Entrypoint and CMD fields can be used to define the commands and parameters to be executed when the container is running. If you need to change the commands and parameters of the container image before starting, after starting, and before stopping, you can override the default commands and parameters in the image by setting the lifecycle event commands and parameters of the container.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/lifecycle.html#lifecycle-configuration","title":"Lifecycle configuration","text":"

                                                  Configure the startup command, post-start command, and pre-stop command of the container according to business needs.

                                                  Parameter Description Example value Start command Type: Optional Meaning: The container will be started according to the start command. Command after startup Type: optionalMeaning: command after container startup Command before stopping Type: Optional Meaning: The command executed by the container after receiving the stop command. Ensure that the services running in the instance can be drained in advance when the instance is upgraded or deleted. -"},{"location":"en/end-user/kpanda/workloads/pod-config/lifecycle.html#start-command","title":"start command","text":"

                                                  Configure the startup command according to the table below.

                                                  Parameter Description Example value Run command Type: RequiredMeaning: Enter an executable command, and separate multiple commands with spaces. If the command itself has spaces, you need to add (\"\"). Meaning: When there are multiple commands, it is recommended to use /bin/sh or other shells to run the command, and pass in all other commands as parameters. /run/server Running parameters Type: OptionalMeaning: Enter the parameters of the control container running command. port=8080"},{"location":"en/end-user/kpanda/workloads/pod-config/lifecycle.html#post-start-commands","title":"Post-start commands","text":"

                                                  Suanova provides two processing types, command line script and HTTP request, to configure post-start commands. You can choose the configuration method that suits you according to the table below.

                                                  Command line script configuration

                                                  Parameter Description Example value Run Command Type: Optional Meaning: Enter an executable command, and separate multiple commands with spaces. If the command itself contains spaces, you need to add (\"\"). Meaning: When there are multiple commands, it is recommended to use /bin/sh or other shells to run the command, and pass in all other commands as parameters. /run/server Running parameters Type: OptionalMeaning: Enter the parameters of the control container running command. port=8080"},{"location":"en/end-user/kpanda/workloads/pod-config/lifecycle.html#stop-pre-command","title":"stop pre-command","text":"

                                                  Suanova provides two processing types, command line script and HTTP request, to configure the pre-stop command. You can choose the configuration method that suits you according to the table below.

                                                  HTTP request configuration

                                                  Parameter Description Example value URL Path Type: Optional Meaning: Requested URL path. Meaning: When there are multiple commands, it is recommended to use /bin/sh or other shells to run the command, and pass in all other commands as parameters. /run/server Port Type: RequiredMeaning: Requested port. port=8080 Node Address Type: Optional Meaning: The requested IP address, the default is the node IP where the container is located. -"},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html","title":"Scheduling Policy","text":"

                                                  In a Kubernetes cluster, like many other Kubernetes objects, nodes have labels. You can manually add labels. Kubernetes also adds some standard labels to all nodes in the cluster. See Common Labels, Annotations, and Taints for common node labels. By adding labels to nodes, you can have pods scheduled on specific nodes or groups of nodes. You can use this feature to ensure that specific Pods can only run on nodes with certain isolation, security or governance properties.

                                                  nodeSelector is the simplest recommended form of a node selection constraint. You can add a nodeSelector field to the Pod's spec to set the node label. Kubernetes will only schedule pods on nodes with each label specified. nodeSelector provides one of the easiest ways to constrain Pods to nodes with specific labels. Affinity and anti-affinity expand the types of constraints you can define. Some benefits of using affinity and anti-affinity are:

                                                  • Affinity and anti-affinity languages are more expressive. nodeSelector can only select nodes that have all the specified labels. Affinity, anti-affinity give you greater control over selection logic.

                                                  • You can mark a rule as \"soft demand\" or \"preference\", so that the scheduler will still schedule the Pod if no matching node can be found.

                                                  • You can use the labels of other Pods running on the node (or in other topological domains) to enforce scheduling constraints, instead of only using the labels of the node itself. This capability allows you to define rules which allow Pods to be placed together.

                                                  You can choose which node the Pod will deploy to by setting affinity and anti-affinity.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#tolerance-time","title":"Tolerance time","text":"

                                                  When the node where the workload instance is located is unavailable, the period for the system to reschedule the instance to other available nodes. The default is 300 seconds.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#node-affinity-nodeaffinity","title":"Node affinity (nodeAffinity)","text":"

                                                  Node affinity is conceptually similar to nodeSelector , which allows you to constrain which nodes Pods can be scheduled on based on the labels on the nodes. There are two types of node affinity:

                                                  • Must be satisfied: ( requiredDuringSchedulingIgnoredDuringExecution ) The scheduler can only run scheduling when the rules are satisfied. This functionality is similar to nodeSelector , but with a more expressive syntax. You can define multiple hard constraint rules, but only one of them must be satisfied.

                                                  • Satisfy as much as possible: ( preferredDuringSchedulingIgnoredDuringExecution ) The scheduler will try to find nodes that meet the proper rules. If no matching node is found, the scheduler will still schedule the Pod. You can also set weights for soft constraint rules. During specific scheduling, if there are multiple nodes that meet the conditions, the node with the highest weight will be scheduled first. At the same time, you can also define multiple hard constraint rules, but only one of them needs to be satisfied.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#tag-name","title":"Tag name","text":"

                                                  The label proper to the node can use the default label or user-defined label.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#operators","title":"Operators","text":"
                                                  • In: the label value needs to be in the list of values
                                                  • NotIn: the tag's value is not in a list
                                                  • Exists: To judge whether a certain label exists, no need to set the label value
                                                  • DoesNotExist: Determine if a tag does not exist, no need to set the tag value
                                                  • Gt: the value of the label is greater than a certain value (string comparison)
                                                  • Lt: the value of the label is less than a certain value (string comparison)
                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#weights","title":"Weights","text":"

                                                  It can only be added in the \"as far as possible\" policy, which can be understood as the priority of scheduling, and those with the highest weight will be scheduled first. The value range is 1 to 100.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#workload-affinity","title":"Workload Affinity","text":"

                                                  Similar to node affinity, there are two types of workload affinity:

                                                  • Must be satisfied: (requiredDuringSchedulingIgnoredDuringExecution) The scheduler can only run scheduling when the rules are satisfied. This functionality is similar to nodeSelector , but with a more expressive syntax. You can define multiple hard constraint rules, but only one of them must be satisfied.
                                                  • Satisfy as much as possible: (preferredDuringSchedulingIgnoredDuringExecution) The scheduler will try to find nodes that meet the proper rules. If no matching node is found, the scheduler will still schedule the Pod. You can also set weights for soft constraint rules. During specific scheduling, if there are multiple nodes that meet the conditions, the node with the highest weight will be scheduled first. At the same time, you can also define multiple hard constraint rules, but only one of them needs to be satisfied.

                                                  The affinity of the workload is mainly used to determine which Pods of the workload can be deployed in the same topology domain. For example, services that communicate with each other can be deployed in the same topology domain (such as the same availability zone) by applying affinity scheduling to reduce the network delay between them.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#tag-name_1","title":"Tag name","text":"

                                                  The label proper to the node can use the default label or user-defined label.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#namespaces","title":"Namespaces","text":"

                                                  Specifies the namespace in which the scheduling policy takes effect.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#operators_1","title":"Operators","text":"
                                                  • In: the label value needs to be in the list of values
                                                  • NotIn: the tag's value is not in a list
                                                  • Exists: To judge whether a certain label exists, no need to set the label value
                                                  • DoesNotExist: Determine if a tag does not exist, no need to set the tag value
                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#topology-domain","title":"Topology domain","text":"

                                                  Specify the scope of influence during scheduling. If you specify kubernetes.io/Clustername, it will use the Node node as the distinguishing scope.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#workload-anti-affinity","title":"Workload Anti-Affinity","text":"

                                                  Similar to node affinity, there are two types of anti-affinity for workloads:

                                                  • Must be satisfied: (requiredDuringSchedulingIgnoredDuringExecution) The scheduler can only run scheduling when the rules are satisfied. This functionality is similar to nodeSelector , but with a more expressive syntax. You can define multiple hard constraint rules, but only one of them must be satisfied.
                                                  • Satisfy as much as possible: (preferredDuringSchedulingIgnoredDuringExecution) The scheduler will try to find nodes that meet the proper rules. If no matching node is found, the scheduler will still schedule the Pod. You can also set weights for soft constraint rules. During specific scheduling, if there are multiple nodes that meet the conditions, the node with the highest weight will be scheduled first. At the same time, you can also define multiple hard constraint rules, but only one of them needs to be satisfied.

                                                  The anti-affinity of the workload is mainly used to determine which Pods of the workload cannot be deployed in the same topology domain. For example, the same Pod of a load is distributed to different topological domains (such as different hosts) to improve the stability of the workload itself.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#tag-name_2","title":"Tag name","text":"

                                                  The label proper to the node can use the default label or user-defined label.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#namespaces_1","title":"Namespaces","text":"

                                                  Specifies the namespace in which the scheduling policy takes effect.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#operators_2","title":"Operators","text":"
                                                  • In: the label value needs to be in the list of values
                                                  • NotIn: the tag's value is not in a list
                                                  • Exists: To judge whether a certain label exists, no need to set the label value
                                                  • DoesNotExist: Determine if a tag does not exist, no need to set the tag value
                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#topology-domain_1","title":"Topology domain","text":"

                                                  Specify the scope of influence when scheduling, such as specifying kubernetes.io/Clustername, it will use the Node node as the distinguishing scope.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/workload-status.html","title":"Workload Status","text":"

                                                  A workload is an application running on Kubernetes, and in Kubernetes, whether your application is composed of a single same component or composed of many different components, you can use a set of Pods to run it. Kubernetes provides five built-in workload resources to manage pods:

                                                  • Deployment
                                                  • StatefulSet
                                                  • Daemonset
                                                  • Job
                                                  • CronJob

                                                  You can also expand workload resources by setting Custom Resource CRD. In the fifth-generation container management, it supports full lifecycle management of workloads such as creation, update, capacity expansion, monitoring, logging, deletion, and version management.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/workload-status.html#pod-status","title":"Pod Status","text":"

                                                  Pod is the smallest computing unit created and managed in Kubernetes, that is, a collection of containers. These containers share storage, networking, and management policies that control how the containers run. Pods are typically not created directly by users, but through workload resources. Pods follow a predefined lifecycle, starting at Pending phase, if at least one of the primary containers starts normally, it enters Running , and then enters the Succeeded or Failed stage depending on whether any container in the Pod ends in a failed status.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/workload-status.html#workload-status_1","title":"Workload Status","text":"

                                                  The fifth-generation container management module designs a built-in workload life cycle status set based on factors such as Pod status and number of replicas, so that users can more realistically perceive the running status of workloads. Because different workload types (such as Deployment and Jobs) have inconsistent management mechanisms for Pods, different workloads will have different lifecycle status during operation, as shown in the following table.

                                                  "},{"location":"en/end-user/kpanda/workloads/pod-config/workload-status.html#deployment-statefulset-damemonset-status","title":"Deployment, StatefulSet, DamemonSet Status","text":"Status Description Waiting 1. A workload is in this status while its creation is in progress. 2. After an upgrade or rollback action is triggered, the workload is in this status. 3. Trigger operations such as pausing/scaling, and the workload is in this status. Running This status occurs when all instances under the workload are running and the number of replicas matches the user-defined number. Deleting When a delete operation is performed, the payload is in this status until the delete is complete. Exception Unable to get the status of the workload for some reason. This usually occurs because communication with the pod's host has failed. Not Ready When the container is in an abnormal, pending status, this status is displayed when the workload cannot be started due to an unknown error"},{"location":"en/end-user/kpanda/workloads/pod-config/workload-status.html#job-status","title":"Job Status","text":"Status Description Waiting The workload is in this status while Job creation is in progress. Executing The Job is in progress and the workload is in this status. Execution Complete The Job execution is complete and the workload is in this status. Deleting A delete operation is triggered and the workload is in this status. Exception Pod status could not be obtained for some reason. This usually occurs because communication with the pod's host has failed."},{"location":"en/end-user/kpanda/workloads/pod-config/workload-status.html#cronjob-status","title":"CronJob status","text":"Status Description Waiting The CronJob is in this status when it is being created. Started After the CronJob is successfully created, the CronJob is in this status when it is running normally or when the paused task is started. Stopped The CronJob is in this status when the stop task operation is performed. Deleting The deletion operation is triggered, and the CronJob is in this status.

                                                  When the workload is in an abnormal or unready status, you can move the mouse over the status value of the load, and the system will display more detailed error information through a prompt box. You can also view the log or events to obtain related running information of the workload.

                                                  "},{"location":"en/end-user/register/index.html","title":"User Registration","text":"

                                                  New users need to register when using the AI platform for the first time.

                                                  "},{"location":"en/end-user/register/index.html#prerequisites","title":"Prerequisites","text":"
                                                  • The AI platform is installed
                                                  • Email registration functionality is enabled
                                                  • An available email address
                                                  "},{"location":"en/end-user/register/index.html#email-registration-steps","title":"Email Registration Steps","text":"
                                                  1. Open the AI platform homepage at https://ai.isuanova.com/ and click Register.

                                                  2. Enter your username, password, and email, then click Register.

                                                  3. The system will prompt that an email has been sent to your inbox.

                                                  4. Log into your email, find the email, and click the link.

                                                  5. Congratulations, you have successfully accessed the AI platform and can now start your AI journey.

                                                  "},{"location":"en/end-user/share/notebook.html","title":"Using Notebook","text":"

                                                  Notebook usually refers to Jupyter Notebook or similar interactive computing environments. It is a very popular tool widely used in fields such as data science, machine learning, and deep learning. This page explains how to use Notebook in the AI platform.

                                                  "},{"location":"en/end-user/share/notebook.html#prerequisites","title":"Prerequisites","text":"
                                                  • The AI platform is installed
                                                  • User has successfully registered
                                                  • The administrator has assigned a workspace to the user
                                                  • Datasets (code, data, etc.) are prepared
                                                  "},{"location":"en/end-user/share/notebook.html#creating-and-using-notebook-instances","title":"Creating and Using Notebook Instances","text":"
                                                  1. Log into the AI platform as an Administrator.
                                                  2. Navigate to AI Lab -> Operator -> Queue Management, and click the Create button on the right.

                                                  3. Enter a name, select the cluster, workspace, and quota, then click OK.

                                                  4. Log into the AI platform as a User, navigate to AI Lab -> Notebook, and click the Create button on the right.

                                                  5. After configuring the various parameters, click OK.

                                                    Basic InformationResource ConfigurationAdvanced Configuration

                                                    Enter a name, select the cluster, namespace, choose the queue just created, and click One-Click Initialization.

                                                    Select the Notebook type, configure memory, CPU, enable GPU, create and configure PVC:

                                                    Enable SSH external network access:

                                                  6. You will be automatically redirected to the Notebook instance list, click the instance name.

                                                  7. Enter the Notebook instance detail page and click the Open button in the upper right corner.

                                                  8. You have entered the Notebook development environment, where a persistent volume is mounted in the /home/jovyan directory. You can clone code through git, upload data after connecting via SSH, etc.

                                                  "},{"location":"en/end-user/share/notebook.html#accessing-notebook-instances-via-ssh","title":"Accessing Notebook Instances via SSH","text":"
                                                  1. Generate an SSH key pair on your own computer.

                                                    Open the command line on your computer, for example, open git bash on Windows, enter ssh-keygen.exe -t rsa, and press enter through the prompts.

                                                  2. Use commands like cat ~/.ssh/id_rsa.pub to view and copy the public key.

                                                  3. Log into the AI platform as a user, click Personal Center -> SSH Public Key -> Import SSH Public Key in the upper right corner.

                                                  4. Enter the detail page of the Notebook instance and copy the SSH link.

                                                  5. Use SSH to access the Notebook instance from the client.

                                                  Next step: Create Training Job

                                                  "},{"location":"en/end-user/share/workload.html","title":"Creating AI Workloads Using GPU Resources","text":"

                                                  After the administrator allocates resource quotas for the workspace, users can create AI workloads to utilize GPU computing resources.

                                                  "},{"location":"en/end-user/share/workload.html#prerequisites","title":"Prerequisites","text":"
                                                  • The AI platform is installed
                                                  • User has successfully registered
                                                  • The administrator has assigned a workspace to the user
                                                  • The administrator has set resource quotas for the workspace
                                                  • The administrator has assigned a cluster to the user
                                                  "},{"location":"en/end-user/share/workload.html#steps-to-create-ai-workloads","title":"Steps to Create AI Workloads","text":"
                                                  1. Log into the AI platform as a user.
                                                  2. Navigate to Container Management, select a namespace, click Workloads -> Deployments , and then click the Create Image button on the right.

                                                  3. After configuring various parameters, click OK.

                                                    Basic InformationContainer ConfigurationOther

                                                    Select your namespace.

                                                    Set the image, configure CPU, memory, GPU, and other resources, and set the startup command.

                                                    Service configuration and advanced configuration can use the default settings.

                                                  4. You will be automatically redirected to the stateless workload list; click the workload name.

                                                  5. Enter the detail page where you can see the GPU quota.

                                                  6. You can also access the console and run the nvidia-smi command to view GPU resources.

                                                  Next step: Using Notebook

                                                  "},{"location":"en/openapi/index.html","title":"OpenAPI Documentation","text":"

                                                  This is some OpenAPI documentation aimed at developers.

                                                  • CloudHost OpenAPI Documentation
                                                  • AI Lab OpenAPI Documentation
                                                  • Container Management OpenAPI Documentation
                                                  • Insight OpenAPI Documentation
                                                  • Global Management OpenAPI Documentation
                                                  "},{"location":"en/openapi/index.html#obtaining-openapi-access-keys","title":"Obtaining OpenAPI Access Keys","text":"

                                                  Access Keys can be used to access the OpenAPI and for continuous publishing. You can follow the steps below to obtain their keys and access the API in their personal center.

                                                  Log in to the AI platform, find Personal Center in the dropdown menu at the top right corner, and manage your account's access keys on the Access Keys page.

                                                  Info

                                                  Access key information is displayed only once. If you forget the access key information, you will need to create a new access key.

                                                  "},{"location":"en/openapi/index.html#using-the-key-to-access-the-api","title":"Using the Key to Access the API","text":"

                                                  When accessing the AI platform's OpenAPI, include the request header Authorization:Bearer ${token} in the request to identify the visitor's identity, where ${token} is the key obtained in the previous step.

                                                  Request Example

                                                  curl -X GET -H 'Authorization:Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRKVjlBTHRBLXZ4MmtQUC1TQnVGS0dCSWc1cnBfdkxiQVVqM2U3RVByWnMiLCJ0eXAiOiJKV1QifQ.eyJleHAiOjE2NjE0MTU5NjksImlhdCI6MTY2MDgxMTE2OSwiaXNzIjoiZ2hpcHBvLmlvIiwic3ViIjoiZjdjOGIxZjUtMTc2MS00NjYwLTg2MWQtOWI3MmI0MzJmNGViIiwicHJlZmVycmVkX3VzZXJuYW1lIjoiYWRtaW4iLCJncm91cHMiOltdfQ.RsUcrAYkQQ7C6BxMOrdD3qbBRUt0VVxynIGeq4wyIgye6R8Ma4cjxG5CbU1WyiHKpvIKJDJbeFQHro2euQyVde3ygA672ozkwLTnx3Tu-_mB1BubvWCBsDdUjIhCQfT39rk6EQozMjb-1X1sbLwzkfzKMls-oxkjagI_RFrYlTVPwT3Oaw-qOyulRSw7Dxd7jb0vINPq84vmlQIsI3UuTZSNO5BCgHpubcWwBss-Aon_DmYA-Et_-QtmPBA3k8E2hzDSzc7eqK0I68P25r9rwQ3DeKwD1dbRyndqWORRnz8TLEXSiCFXdZT2oiMrcJtO188Ph4eLGut1-4PzKhwgrQ' https://demo-dev.daocloud.io/apis/ghippo.io/v1alpha1/users?page=1&pageSize=10 -k\n

                                                  Request Result

                                                  {\n    \"items\": [\n        {\n            \"id\": \"a7cfd010-ebbe-4601-987f-d098d9ef766e\",\n            \"name\": \"a\",\n            \"email\": \"\",\n            \"description\": \"\",\n            \"firstname\": \"\",\n            \"lastname\": \"\",\n            \"source\": \"locale\",\n            \"enabled\": true,\n            \"createdAt\": \"1660632794800\",\n            \"updatedAt\": \"0\",\n            \"lastLoginAt\": \"\"\n        }\n    ],\n    \"pagination\": {\n        \"page\": 1,\n        \"pageSize\": 10,\n        \"total\": 1\n    }\n}\n
                                                  "},{"location":"en/openapi/baize/index.html","title":"AI Lab OpenAPI Docs","text":""},{"location":"en/openapi/ghippo/index.html","title":"Global Management OpenAPI Docs","text":""},{"location":"en/openapi/insight/index.html","title":"Insight OpenAPI Docs","text":""},{"location":"en/openapi/kpanda/index.html","title":"Container Management OpenAPI Docs","text":""},{"location":"en/openapi/virtnest/index.html","title":"Cloud Host OpenAPI Docs","text":""}]} \ No newline at end of file +{"config":{"lang":["en","zh"],"separator":"[\\s\\u200b\\u3000\\-\u3001\u3002\uff0c\uff0e\uff1f\uff01\uff1b]+","pipeline":["stemmer"]},"docs":[{"location":"index.html","title":"\u8c50\u6536\u4e8c\u865f\u6a94\u6848\u7ad9","text":"

                                                  \u9019\u662f\u8c50\u6536\u4e8c\u865f AI \u7b97\u529b\u4e2d\u5fc3\u7684\u6a94\u6848\u7ad9\u3002

                                                  • \u7d42\u7aef\u7528\u6236\u624b\u518a\uff1a\u5728\u5bb9\u5668\u5316\u74b0\u5883\u4e2d\uff0c\u4f7f\u7528\u96f2\u4e3b\u6a5f\uff0c\u958b\u767c AI \u7b97\u6cd5\uff0c\u69cb\u5efa\u8a13\u7df4\u548c\u63a8\u7406\u4efb\u52d9
                                                  • \u7ba1\u7406\u54e1\u624b\u518a\uff1a\u70ba\u5bb9\u5668\u5316\u7d42\u7aef\u7528\u6236\u505a\u597d\u904b\u7dad\u5de5\u4f5c\uff0c\u4fdd\u969c\u5e73\u53f0\u5e73\u7a69\u9ad8\u6548\u904b\u884c
                                                  • \u958b\u767c\u8005\u624b\u518a\uff1a\u532f\u7e3d\u4e86 5 \u500b\u6a21\u584a\u7684 OpenAPI \u624b\u518a

                                                  "},{"location":"admin/index.html","title":"\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 - \u7ba1\u7406\u5458","text":"

                                                  \u8fd9\u662f\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9762\u5411\u7ba1\u7406\u5458\u7684\u8fd0\u7ef4\u6587\u6863\u3002

                                                  • \u4e91\u4e3b\u673a

                                                    \u4e91\u4e3b\u673a\u662f\u90e8\u7f72\u5728\u4e91\u7aef\u7684\u865a\u62df\u673a\u3002

                                                    • \u7ba1\u7406\u4e91\u4e3b\u673a
                                                    • \u4e91\u4e3b\u673a vGPU
                                                    • \u4e91\u4e3b\u673a\u6a21\u677f
                                                    • \u4ece VMWare \u5bfc\u5165\u4e91\u4e3b\u673a
                                                  • \u5bb9\u5668\u7ba1\u7406

                                                    \u7ba1\u7406 K8s \u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5e94\u7528\u3001\u8d44\u6e90\u548c\u6743\u9650\u3002

                                                    • \u521b\u5efa\u96c6\u7fa4
                                                    • \u6dfb\u52a0\u5de5\u4f5c\u8282\u70b9
                                                    • \u7ba1\u7406 Helm \u5e94\u7528
                                                    • HPA \u6c34\u5e73\u6269\u7f29\u5bb9
                                                  • \u7b97\u6cd5\u5f00\u53d1

                                                    \u7ba1\u7406 AI \u8d44\u6e90\u548c\u961f\u5217\u3002

                                                    • \u7ba1\u7406\u8d44\u6e90
                                                    • \u7ba1\u7406\u961f\u5217
                                                    • AI \u8bad\u63a8\u6700\u4f73\u5b9e\u8df5
                                                    • \u7b97\u6cd5\u5f00\u53d1\u6545\u969c\u6392\u67e5
                                                  • \u53ef\u89c2\u6d4b\u6027

                                                    \u4e86\u89e3\u53ef\u89c2\u6d4b\u6027\u8d44\u6e90\uff0c\u914d\u7f6e\u548c\u6545\u969c\u6392\u67e5\u3002

                                                    • \u90e8\u7f72\u8d44\u6e90\u89c4\u5212
                                                    • \u5b89\u88c5\u4e0e\u5347\u7ea7
                                                    • \u517c\u5bb9\u6027\u6d4b\u8bd5
                                                    • \u5e38\u89c1\u95ee\u9898
                                                  • \u5168\u5c40\u7ba1\u7406

                                                    \u7ba1\u63a7\u7528\u6237\u3001\u7528\u6237\u7ec4\u3001\u5de5\u4f5c\u7a7a\u95f4\u3001\u8d44\u6e90\u7b49\u8bbf\u95ee\u6743\u9650\u3002

                                                    • \u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4
                                                    • \u4e3a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u8d44\u6e90
                                                    • \u5ba1\u8ba1\u65e5\u5fd7
                                                    • \u5e73\u53f0\u8bbe\u7f6e

                                                  "},{"location":"admin/baize/best-practice/add-scheduler.html","title":"\u589e\u52a0\u4efb\u52a1\u8c03\u5ea6\u5668","text":"

                                                  5.0 AI Lab \u63d0\u4f9b\u4e86\u4efb\u52a1\u8c03\u5ea6\u5668\uff0c\u53ef\u4ee5\u5e2e\u52a9\u60a8\u66f4\u597d\u5730\u7ba1\u7406\u4efb\u52a1\uff0c\u9664\u4e86\u63d0\u4f9b\u57fa\u7840\u7684\u8c03\u5ea6\u5668\u4e4b\u5916\uff0c\u76ee\u524d\u4e5f\u652f\u6301\u7528\u6237\u81ea\u5b9a\u4e49\u8c03\u5ea6\u5668\u3002

                                                  "},{"location":"admin/baize/best-practice/add-scheduler.html#_2","title":"\u4efb\u52a1\u8c03\u5ea6\u5668\u4ecb\u7ecd","text":"

                                                  \u5728 Kubernetes \u4e2d\uff0c\u4efb\u52a1\u8c03\u5ea6\u5668\u8d1f\u8d23\u51b3\u5b9a\u5c06 Pod \u5206\u914d\u5230\u54ea\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u3002\u5b83\u8003\u8651\u591a\u79cd\u56e0\u7d20\uff0c\u5982\u8d44\u6e90\u9700\u6c42\u3001\u786c\u4ef6/\u8f6f\u4ef6\u7ea6\u675f\u3001\u4eb2\u548c\u6027/\u53cd\u4eb2\u548c\u6027\u89c4\u5219\u3001\u6570\u636e\u5c40\u90e8\u6027\u7b49\u3002

                                                  \u9ed8\u8ba4\u8c03\u5ea6\u5668\u662f Kubernetes \u96c6\u7fa4\u4e2d\u7684\u4e00\u4e2a\u6838\u5fc3\u7ec4\u4ef6\uff0c\u8d1f\u8d23\u51b3\u5b9a\u5c06 Pod \u5206\u914d\u5230\u54ea\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u3002\u8ba9\u6211\u4eec\u6df1\u5165\u4e86\u89e3\u5b83\u7684\u5de5\u4f5c\u539f\u7406\u3001\u7279\u6027\u548c\u914d\u7f6e\u65b9\u6cd5\u3002

                                                  "},{"location":"admin/baize/best-practice/add-scheduler.html#_3","title":"\u8c03\u5ea6\u5668\u7684\u5de5\u4f5c\u6d41\u7a0b","text":"

                                                  \u9ed8\u8ba4\u8c03\u5ea6\u5668\u7684\u5de5\u4f5c\u6d41\u7a0b\u53ef\u4ee5\u5206\u4e3a\u4e24\u4e2a\u4e3b\u8981\u9636\u6bb5\uff1a\u8fc7\u6ee4\uff08Filtering\uff09\u548c\u8bc4\u5206\uff08Scoring\uff09\u3002

                                                  "},{"location":"admin/baize/best-practice/add-scheduler.html#_4","title":"\u8fc7\u6ee4\u9636\u6bb5","text":"

                                                  \u8c03\u5ea6\u5668\u4f1a\u904d\u5386\u6240\u6709\u8282\u70b9\uff0c\u6392\u9664\u4e0d\u6ee1\u8db3 Pod \u8981\u6c42\u7684\u8282\u70b9\uff0c\u8003\u8651\u7684\u56e0\u7d20\u5305\u62ec\uff1a

                                                  • \u8d44\u6e90\u9700\u6c42
                                                  • \u8282\u70b9\u9009\u62e9\u5668
                                                  • \u8282\u70b9\u4eb2\u548c\u6027
                                                  • \u6c61\u70b9\u548c\u5bb9\u5fcd

                                                  \u4ee5\u4e0a\u53c2\u6570\uff0c\u6211\u4eec\u53ef\u4ee5\u901a\u8fc7\u521b\u5efa\u4efb\u52a1\u65f6\u7684\u9ad8\u7ea7\u914d\u7f6e\u6765\u8bbe\u7f6e\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff1a

                                                  "},{"location":"admin/baize/best-practice/add-scheduler.html#_5","title":"\u8bc4\u5206\u9636\u6bb5","text":"

                                                  \u5bf9\u901a\u8fc7\u8fc7\u6ee4\u7684\u8282\u70b9\u8fdb\u884c\u6253\u5206\uff0c\u9009\u62e9\u5f97\u5206\u6700\u9ad8\u7684\u8282\u70b9\u6765\u8fd0\u884c Pod\uff0c\u8003\u8651\u56e0\u7d20\u5305\u62ec\uff1a

                                                  • \u8d44\u6e90\u4f7f\u7528\u7387
                                                  • Pod \u4eb2\u548c\u6027/\u53cd\u4eb2\u548c\u6027
                                                  • \u8282\u70b9\u4eb2\u548c\u6027\u7b49\u3002
                                                  "},{"location":"admin/baize/best-practice/add-scheduler.html#_6","title":"\u8c03\u5ea6\u5668\u63d2\u4ef6","text":"

                                                  \u9664\u4e86\u57fa\u7840\u7684\u4e00\u4e9b\u4efb\u52a1\u8c03\u5ea6\u80fd\u529b\u4e4b\u5916\uff0c\u6211\u4eec\u8fd8\u652f\u6301\u4f7f\u7528 Scheduler Plugins\uff1aKubernetes SIG Scheduling \u7ef4\u62a4\u7684\u4e00\u7ec4\u8c03\u5ea6\u5668\u63d2\u4ef6\uff0c\u5305\u62ec Coscheduling (Gang Scheduling) \u7b49\u529f\u80fd\u3002

                                                  "},{"location":"admin/baize/best-practice/add-scheduler.html#_7","title":"\u90e8\u7f72\u8c03\u5ea6\u5668\u63d2\u4ef6","text":"

                                                  \u5728\u5de5\u4f5c\u96c6\u7fa4\u4e2d\u90e8\u7f72\u7b2c\u4e8c\u8c03\u5ea6\u5668\u63d2\u4ef6\uff0c\u8bf7\u53c2\u8003\u90e8\u7f72\u7b2c\u4e8c\u8c03\u5ea6\u5668\u63d2\u4ef6\u3002

                                                  "},{"location":"admin/baize/best-practice/add-scheduler.html#ai-lab","title":"\u5728 AI Lab \u4e2d\u542f\u7528\u8c03\u5ea6\u5668\u63d2\u4ef6","text":"

                                                  Danger

                                                  \u589e\u52a0\u8c03\u5ea6\u5668\u63d2\u4ef6\u82e5\u64cd\u4f5c\u4e0d\u5f53\uff0c\u53ef\u80fd\u4f1a\u5f71\u54cd\u5230\u6574\u4e2a\u96c6\u7fa4\u7684\u7a33\u5b9a\u6027\uff0c\u5efa\u8bae\u5728\u6d4b\u8bd5\u73af\u5883\u4e2d\u8fdb\u884c\u6d4b\u8bd5\uff1b\u6216\u8005\u8054\u7cfb\u6211\u4eec\u7684\u6280\u672f\u652f\u6301\u56e2\u961f\u3002

                                                  \u6ce8\u610f\uff0c\u5982\u679c\u5e0c\u671b\u5728\u8bad\u7ec3\u4efb\u52a1\u4e2d\u4f7f\u7528\u66f4\u591a\u7684\u8c03\u5ea6\u5668\u63d2\u4ef6\uff0c\u9700\u8981\u4e8b\u5148\u624b\u5de5\u5728\u5de5\u4f5c\u96c6\u7fa4\u4e2d\u6210\u529f\u5b89\u88c5\uff0c\u7136\u540e\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72 baize-agent \u65f6\uff0c\u589e\u52a0\u5bf9\u5e94\u7684\u8c03\u5ea6\u5668\u63d2\u4ef6\u914d\u7f6e\u3002

                                                  \u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u63d0\u4f9b\u7684\u754c\u9762 Helm \u5e94\u7528 \u7ba1\u7406\u80fd\u529b\uff0c\u53ef\u4ee5\u65b9\u4fbf\u5730\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72\u8c03\u5ea6\u5668\u63d2\u4ef6\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff1a

                                                  \u7136\u540e\uff0c\u5728\u53f3\u4e0a\u89d2\u70b9\u51fb \u5b89\u88c5 \uff0c\uff08\u82e5\u5df2\u90e8\u7f72\u4e86 baize-agent\uff0c\u53ef\u4ee5\u5230 Helm \u5e94\u7528\u5217\u8868\u53bb\u66f4\u65b0\uff09\uff0c\u6839\u636e\u5982\u4e0b\u56fe\u6240\u793a\u7684\u914d\u7f6e\uff0c\u589e\u52a0\u8c03\u5ea6\u5668\u3002

                                                  \u6ce8\u610f\u8c03\u5ea6\u5668\u7684\u53c2\u6570\u5c42\u7ea7\uff0c\u6dfb\u52a0\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                                                  \u6ce8\u610f\u4ee5\u540e\u5728\u66f4\u65b0 baize-agent \u65f6\uff0c\u4e0d\u8981\u9057\u6f0f\u8fd9\u4e2a\u914d\u7f6e\u3002

                                                  "},{"location":"admin/baize/best-practice/add-scheduler.html#_8","title":"\u5728\u521b\u5efa\u4efb\u52a1\u65f6\u6307\u5b9a\u8c03\u5ea6\u5668","text":"

                                                  \u5f53\u60a8\u5728\u96c6\u7fa4\u4e2d\u6210\u529f\u90e8\u7f72\u4e86\u5bf9\u5e94\u7684\u8c03\u5ea6\u5668\u63d2\u4ef6\uff0c\u5e76\u4e14\u5728 baize-agent \u4e5f\u6b63\u786e\u589e\u52a0\u4e86\u5bf9\u5e94\u7684\u8c03\u5ea6\u5668\u914d\u7f6e\u540e\uff0c\u53ef\u4ee5\u5728\u521b\u5efa\u4efb\u52a1\u65f6\uff0c\u6307\u5b9a\u8c03\u5ea6\u5668\u3002

                                                  \u4e00\u5207\u6b63\u5e38\u7684\u60c5\u51b5\u4e0b\uff0c\u60a8\u53ef\u4ee5\u5728\u8c03\u5ea6\u5668\u4e0b\u62c9\u6846\u4e2d\u770b\u5230\u60a8\u90e8\u7f72\u7684\u8c03\u5ea6\u5668\u63d2\u4ef6\u3002

                                                  \u4ee5\u4e0a\uff0c\u5c31\u662f\u6211\u4eec\u5728 AI Lab \u4e2d\uff0c\u4e3a\u4efb\u52a1\u589e\u52a0\u8c03\u5ea6\u5668\u9009\u9879\u7684\u914d\u7f6e\u4f7f\u7528\u8bf4\u660e\u3002

                                                  "},{"location":"admin/baize/best-practice/change-notebook-image.html","title":"\u66f4\u65b0 Notebook \u5185\u7f6e\u955c\u50cf","text":"

                                                  \u5728 Notebook \u4e2d\uff0c\u9ed8\u8ba4\u63d0\u4f9b\u4e86\u591a\u4e2a\u53ef\u7528\u7684\u57fa\u7840\u955c\u50cf\uff0c\u4f9b\u5f00\u53d1\u8005\u9009\u62e9\uff1b\u5927\u90e8\u5206\u60c5\u51b5\u4e0b\uff0c\u8fd9\u4f1a\u6ee1\u8db3\u5f00\u53d1\u8005\u7684\u4f7f\u7528\u3002

                                                  \u7b97\u4e30\u63d0\u4f9b\u4e86\u4e00\u4e2a\u9ed8\u8ba4\u7684 Notebook \u955c\u50cf\uff0c\u5305\u542b\u4e86\u6240\u9700\u7684\u4efb\u4f55\u5f00\u53d1\u5de5\u5177\u548c\u8d44\u6599\u3002

                                                  baize/baize-notebook\n

                                                  \u8fd9\u4e2a Notebook \u91cc\u9762\u5305\u542b\u4e86\u57fa\u7840\u7684\u5f00\u53d1\u5de5\u5177\uff0c\u4ee5 baize-notebook:v0.5.0 \uff082024 \u5e74 5 \u6708 30 \u65e5\uff09\u4e3a\u4f8b\uff0c\u76f8\u5173\u4f9d\u8d56\u53ca\u7248\u672c\u5982\u4e0b\uff1a

                                                  \u4f9d\u8d56 \u7248\u672c\u7f16\u53f7 \u4ecb\u7ecd Ubuntu 22.04.3 \u9ed8\u8ba4 OS Python 3.11.6 \u9ed8\u8ba4 Python \u7248\u672c pip 23.3.1 conda(mamba) 23.3.1 jupyterlab 3.6.6 JupyterLab \u955c\u50cf\uff0c\u63d0\u4f9b\u5b8c\u6574\u7684 Notebook \u5f00\u53d1\u4f53\u9a8c codeserver v4.89.1 \u4e3b\u6d41 Code \u5f00\u53d1\u5de5\u5177\uff0c\u65b9\u4fbf\u7528\u6237\u4f7f\u7528\u719f\u6089\u7684\u5de5\u5177\u8fdb\u884c\u5f00\u53d1\u4f53\u9a8c *baizectl v0.5.0 \u7b97\u4e30\u5185\u7f6e CLI \u4efb\u52a1\u7ba1\u7406\u5de5\u5177 *SSH - \u652f\u6301\u672c\u5730 SSH \u76f4\u63a5\u8bbf\u95ee\u5230 Notebook \u5bb9\u5668\u5185 *kubectl v1.27 Kubernetes CLI\uff0c\u53ef\u4ee5\u4f7f\u7528 kubectl \u5728 Notebook \u5185 \u7ba1\u7406\u5bb9\u5668\u8d44\u6e90

                                                  \u4f46\u6709\u65f6\u7528\u6237\u53ef\u80fd\u9700\u8981\u81ea\u5b9a\u4e49\u955c\u50cf\uff0c\u672c\u6587\u4ecb\u7ecd\u4e86\u5982\u4f55\u66f4\u65b0\u955c\u50cf\uff0c\u5e76\u589e\u52a0\u5230 Notebook \u521b\u5efa\u754c\u9762\u4e2d\u8fdb\u884c\u9009\u62e9\u3002

                                                  "},{"location":"admin/baize/best-practice/change-notebook-image.html#_1","title":"\u6784\u5efa\u81ea\u5b9a\u4e49\u955c\u50cf\uff08\u4ec5\u4f9b\u53c2\u8003\uff09","text":"

                                                  Note

                                                  \u6ce8\u610f\uff0c\u6784\u5efa\u65b0\u955c\u50cf \u9700\u8981\u4ee5 baize-notebook \u4f5c\u4e3a\u57fa\u7840\u955c\u50cf\uff0c\u4ee5\u4fdd\u8bc1 Notebook \u7684\u6b63\u5e38\u8fd0\u884c\u3002

                                                  \u5728\u6784\u5efa\u81ea\u5b9a\u4e49\u955c\u50cf\u65f6\uff0c\u5efa\u8bae\u5148\u4e86\u89e3 baize-notebook \u955c\u50cf\u7684 Dockerfile\uff0c\u4ee5\u4fbf\u66f4\u597d\u5730\u7406\u89e3\u5982\u4f55\u6784\u5efa\u81ea\u5b9a\u4e49\u955c\u50cf\u3002

                                                  "},{"location":"admin/baize/best-practice/change-notebook-image.html#baize-noteboook-dockerfile","title":"baize-noteboook \u7684 Dockerfile","text":"
                                                  ARG BASE_IMG=docker.m.daocloud.io/kubeflownotebookswg/jupyter:v1.8.0\n\nFROM $BASE_IMG\n\nUSER root\n\n# install - useful linux packages\nRUN export DEBIAN_FRONTEND=noninteractive \\\n && apt-get -yq update \\\n && apt-get -yq install --no-install-recommends \\\n    openssh-server git git-lfs bash-completion \\\n && apt-get clean \\\n && rm -rf /var/lib/apt/lists/*\n\n# remove default s6 jupyterlab run script\nRUN rm -rf /etc/services.d/jupyterlab\n\n# install - useful jupyter plugins\nRUN mamba install -n base -y jupyterlab-language-pack-zh-cn \\\n  && mamba clean --all -y\n\nARG CODESERVER_VERSION=4.89.1\nARG TARGETARCH\n\nRUN curl -fsSL \"https://github.com/coder/code-server/releases/download/v$CODESERVER_VERSION/code-server_${CODESERVER_VERSION}_$TARGETARCH.deb\" -o /tmp/code-server.deb \\\n  && dpkg -i /tmp/code-server.deb \\\n  && rm -f /tmp/code-server.deb\n\nARG CODESERVER_PYTHON_VERSION=2024.4.1\nARG CODESERVER_JUPYTER_VERSION=2024.3.1\nARG CODESERVER_LANGUAGE_PACK_ZH_CN=1.89.0\nARG CODESERVER_YAML=1.14.0\nARG CODESERVER_DOTENV=1.0.1\nARG CODESERVER_EDITORCONFIG=0.16.6\nARG CODESERVER_TOML=0.19.1\nARG CODESERVER_GITLENS=15.0.4\n\n# configure for code-server extensions\n# # https://github.com/kubeflow/kubeflow/blob/709254159986d2cc99e675d0fad5a128ddeb0917/components/example-notebook-servers/codeserver-python/Dockerfile\n# # and\n# # https://github.com/kubeflow/kubeflow/blob/709254159986d2cc99e675d0fad5a128ddeb0917/components/example-notebook-servers/codeserver/Dockerfile\nRUN code-server --list-extensions --show-versions \\\n  && code-server --list-extensions --show-versions \\\n  && code-server \\\n    --install-extension MS-CEINTL.vscode-language-pack-zh-hans@$CODESERVER_LANGUAGE_PACK_ZH_CN \\\n    --install-extension ms-python.python@$CODESERVER_PYTHON_VERSION \\\n    --install-extension ms-toolsai.jupyter@$CODESERVER_JUPYTER_VERSION \\\n    --install-extension redhat.vscode-yaml@$CODESERVER_YAML \\\n    --install-extension mikestead.dotenv@$CODESERVER_DOTENV \\\n    --install-extension EditorConfig.EditorConfig@$CODESERVER_EDITORCONFIG \\\n    --install-extension tamasfe.even-better-toml@$CODESERVER_TOML \\\n    --install-extension eamodio.gitlens@$CODESERVER_GITLENS \\\n    --install-extension catppuccin.catppuccin-vsc-pack \\\n    --force \\\n  && code-server --list-extensions --show-versions\n\n# configure for code-server\nRUN mkdir -p /home/${NB_USER}/.local/share/code-server/User \\\n  && chown -R ${NB_USER}:users /home/${NB_USER} \\\n  && cat <<EOF > /home/${NB_USER}/.local/share/code-server/User/settings.json\n{\n  \"gitlens.showWelcomeOnInstall\": false,\n  \"workbench.colorTheme\": \"Catppuccin Mocha\",\n}\nEOF\n\nRUN mkdir -p /tmp_home/${NB_USER}/.local/share \\\n  && mv /home/${NB_USER}/.local/share/code-server /tmp_home/${NB_USER}/.local/share\n\n# set ssh configuration\nRUN mkdir -p /run/sshd \\\n && chown -R ${NB_USER}:users /etc/ssh \\\n && chown -R ${NB_USER}:users /run/sshd \\\n && sed -i \"/#\\?Port/s/^.*$/Port 2222/g\" /etc/ssh/sshd_config \\\n && sed -i \"/#\\?PasswordAuthentication/s/^.*$/PasswordAuthentication no/g\" /etc/ssh/sshd_config \\\n && sed -i \"/#\\?PubkeyAuthentication/s/^.*$/PubkeyAuthentication yes/g\" /etc/ssh/sshd_config \\\n && rclone_version=v1.65.0 && \\\n       arch=$(uname -m | sed -E 's/x86_64/amd64/g;s/aarch64/arm64/g') && \\\n       filename=rclone-${rclone_version}-linux-${arch} && \\\n       curl -fsSL https://github.com/rclone/rclone/releases/download/${rclone_version}/${filename}.zip -o ${filename}.zip && \\\n       unzip ${filename}.zip && mv ${filename}/rclone /usr/local/bin && rm -rf ${filename} ${filename}.zip\n\n# Init mamba\nRUN mamba init --system\n\n# init baize-base environment for essential python packages\nRUN mamba create -n baize-base -y python \\\n  && /opt/conda/envs/baize-base/bin/pip install tensorboard \\\n  && mamba clean --all -y \\\n  && ln -s /opt/conda/envs/baize-base/bin/tensorboard /usr/local/bin/tensorboard\n\n# prepare baize-runtime-env directory\nRUN mkdir -p /opt/baize-runtime-env \\\n  && chown -R ${NB_USER}:users /opt/baize-runtime-env\n\nARG APP\nARG PROD_NAME\nARG TARGETOS\n\nCOPY out/$TARGETOS/$TARGETARCH/data-loader /usr/local/bin/\nCOPY out/$TARGETOS/$TARGETARCH/baizectl /usr/local/bin/\n\nRUN chmod +x /usr/local/bin/baizectl /usr/local/bin/data-loader && \\\n    echo \"source /etc/bash_completion\" >> /opt/conda/etc/profile.d/conda.sh && \\\n    echo \"source <(baizectl completion bash)\" >> /opt/conda/etc/profile.d/conda.sh && \\\n    echo \"source <(kubectl completion bash)\" >> /opt/conda/etc/profile.d/conda.sh && \\\n    echo '[ -f /run/baize-env ] && export $(cat /run/baize-env | xargs)' >> /opt/conda/etc/profile.d/conda.sh && \\\n    echo 'alias conda=\"mamba\"' >> /opt/conda/etc/profile.d/conda.sh\n\nUSER ${NB_UID}\n
                                                  "},{"location":"admin/baize/best-practice/change-notebook-image.html#_2","title":"\u6784\u5efa\u4f60\u7684\u955c\u50cf","text":"
                                                  ARG BASE_IMG=release.daocloud.io/baize/baize-notebook:v0.5.0\n\nFROM $BASE_IMG\nUSER root\n\n# Do Customization\nRUN mamba install -n baize-base -y pytorch torchvision torchaudio cpuonly -c pytorch \\\n && mamba install -n baize-base -y tensorflow \\\n && mamba clean --all -y\n\nUSER ${NB_UID}\n
                                                  "},{"location":"admin/baize/best-practice/change-notebook-image.html#notebook-helm","title":"\u589e\u52a0\u5230 Notebook \u955c\u50cf\u5217\u8868\uff08Helm\uff09","text":"

                                                  Warning

                                                  \u6ce8\u610f\uff0c\u5fc5\u987b\u7531\u5e73\u53f0\u7ba1\u7406\u5458\u64cd\u4f5c\uff0c\u8c28\u614e\u53d8\u66f4\u3002

                                                  \u76ee\u524d\uff0c\u955c\u50cf\u9009\u62e9\u5668\u9700\u8981\u901a\u8fc7\u66f4\u65b0 baize \u7684 Helm \u53c2\u6570\u6765\u4fee\u6539\uff0c\u5177\u4f53\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                  \u5728 kpanda-global-cluster \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684 Helm \u5e94\u7528\u5217\u8868\uff0c\u627e\u5230 baize\uff0c\u8fdb\u5165\u66f4\u65b0\u9875\u9762\uff0c\u5728 YAML \u53c2\u6570\u4e2d\u4fee\u6539 Notebook \u955c\u50cf\uff1a

                                                  \u6ce8\u610f\u53c2\u6570\u4fee\u6539\u7684\u8def\u5f84\u5982\u4e0b global.config.notebook_images\uff1a

                                                  ...\nglobal:\n  ...\n  config:\n    notebook_images:\n      ...\n      names: release.daocloud.io/baize/baize-notebook:v0.5.0\n      # \u5728\u8fd9\u91cc\u589e\u52a0\u4f60\u7684\u955c\u50cf\u4fe1\u606f\n

                                                  \u66f4\u65b0\u5b8c\u6210\u4e4b\u540e\uff0c\u5f85 Helm \u5e94\u7528\u91cd\u542f\u6210\u529f\u4e4b\u540e\uff0c\u53ef\u4ee5\u5728 Notebook \u521b\u5efa\u754c\u9762\u4e2d\u7684\u9009\u62e9\u955c\u50cf\u770b\u5230\u65b0\u7684\u955c\u50cf\u3002

                                                  "},{"location":"admin/baize/best-practice/checkpoint.html","title":"Checkpoint \u673a\u5236\u53ca\u4f7f\u7528\u4ecb\u7ecd","text":"

                                                  \u5728\u6df1\u5ea6\u5b66\u4e60\u7684\u5b9e\u9645\u573a\u666f\u4e2d\uff0c\u6a21\u578b\u8bad\u7ec3\u4e00\u822c\u90fd\u4f1a\u6301\u7eed\u4e00\u6bb5\u65f6\u95f4\uff0c\u8fd9\u5bf9\u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u7684\u7a33\u5b9a\u6027\u548c\u6548\u7387\u63d0\u51fa\u4e86\u66f4\u9ad8\u7684\u8981\u6c42\u3002 \u800c\u4e14\uff0c\u5728\u5b9e\u9645\u8bad\u7ec3\u7684\u8fc7\u7a0b\u4e2d\uff0c\u5f02\u5e38\u4e2d\u65ad\u4f1a\u5bfc\u81f4\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u7684\u6a21\u578b\u72b6\u6001\u4e22\u5931\uff0c\u9700\u8981\u91cd\u65b0\u5f00\u59cb\u8bad\u7ec3\uff0c \u8fd9\u4e0d\u4ec5\u6d6a\u8d39\u4e86\u65f6\u95f4\u548c\u8d44\u6e90\uff0c\u8fd9\u5728 LLM \u8bad\u7ec3\u4e2d\u5c24\u4e3a\u660e\u663e\uff0c\u800c\u4e14\u4e5f\u4f1a\u5f71\u54cd\u6a21\u578b\u7684\u8bad\u7ec3\u6548\u679c\u3002

                                                  \u80fd\u591f\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u4fdd\u5b58\u6a21\u578b\u7684\u72b6\u6001\uff0c\u4ee5\u4fbf\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u5f02\u5e38\u65f6\u80fd\u591f\u6062\u590d\u6a21\u578b\u72b6\u6001\uff0c\u53d8\u5f97\u81f3\u5173\u91cd\u8981\u3002 Checkpoint \u5c31\u662f\u76ee\u524d\u4e3b\u6d41\u7684\u89e3\u51b3\u65b9\u6848\uff0c\u672c\u6587\u5c06\u4ecb\u7ecd Checkpoint \u673a\u5236\u7684\u57fa\u672c\u6982\u5ff5\u548c\u5728 PyTorch \u548c TensorFlow \u4e2d\u7684\u4f7f\u7528\u65b9\u6cd5\u3002

                                                  "},{"location":"admin/baize/best-practice/checkpoint.html#checkpoint_1","title":"\u4ec0\u4e48\u662f Checkpoint\uff1f","text":"

                                                  Checkpoint \u662f\u5728\u6a21\u578b\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u4fdd\u5b58\u6a21\u578b\u72b6\u6001\u7684\u673a\u5236\u3002\u901a\u8fc7\u5b9a\u671f\u4fdd\u5b58 Checkpoint\uff0c\u53ef\u4ee5\u5728\u4ee5\u4e0b\u60c5\u51b5\u4e0b\u6062\u590d\u6a21\u578b\uff1a

                                                  • \u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u65ad\uff08\u5982\u7cfb\u7edf\u5d29\u6e83\u6216\u624b\u52a8\u4e2d\u65ad\uff09
                                                  • \u9700\u8981\u5728\u67d0\u4e2a\u8bad\u7ec3\u9636\u6bb5\u8fdb\u884c\u8bc4\u4f30
                                                  • \u5e0c\u671b\u5728\u4e0d\u540c\u7684\u5b9e\u9a8c\u4e2d\u590d\u7528\u6a21\u578b
                                                  "},{"location":"admin/baize/best-practice/checkpoint.html#pytorch","title":"PyTorch","text":"

                                                  \u5728 PyTorch \u4e2d\uff0ctorch.save \u548c torch.load \u662f\u7528\u4e8e\u4fdd\u5b58\u548c\u52a0\u8f7d\u6a21\u578b\u7684\u57fa\u672c\u51fd\u6570\u3002

                                                  "},{"location":"admin/baize/best-practice/checkpoint.html#pytorch-checkpoint","title":"PyTorch \u4fdd\u5b58 Checkpoint","text":"

                                                  \u5728 PyTorch \u4e2d\uff0c\u901a\u5e38\u4f7f\u7528 state_dict \u4fdd\u5b58\u6a21\u578b\u7684\u53c2\u6570\u3002\u4ee5\u4e0b\u662f\u4e00\u4e2a\u7b80\u5355\u7684\u793a\u4f8b\uff1a

                                                  import torch\nimport torch.nn as nn\n\n# \u5047\u8bbe\u6211\u4eec\u6709\u4e00\u4e2a\u7b80\u5355\u7684\u795e\u7ecf\u7f51\u7edc\nclass SimpleModel(nn.Module):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = nn.Linear(10, 2)\n\n    def forward(self, x):\n        return self.fc(x)\n\n# \u521d\u59cb\u5316\u6a21\u578b\u548c\u4f18\u5316\u5668\nmodel = SimpleModel()\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n\n# \u8bad\u7ec3\u6a21\u578b...\n# \u4fdd\u5b58 Checkpoint\ncheckpoint_path = 'model_checkpoint.pth'\ntorch.save({\n    'epoch': 10,\n    'model_state_dict': model.state_dict(),\n    'optimizer_state_dict': optimizer.state_dict(),\n    'loss': 0.02,\n}, checkpoint_path)\n
                                                  "},{"location":"admin/baize/best-practice/checkpoint.html#pytorch-checkpoint_1","title":"PyTorch \u6062\u590d Checkpoint","text":"

                                                  \u52a0\u8f7d\u6a21\u578b\u65f6\uff0c\u9700\u8981\u6062\u590d\u6a21\u578b\u53c2\u6570\u548c\u4f18\u5316\u5668\u72b6\u6001\uff0c\u5e76\u7ee7\u7eed\u8bad\u7ec3\u6216\u63a8\u7406\uff1a

                                                  # \u6062\u590d Checkpoint\ncheckpoint = torch.load('model_checkpoint.pth')\nmodel.load_state_dict(checkpoint['model_state_dict'])\noptimizer.load_state_dict(checkpoint['optimizer_state_dict'])\nepoch = checkpoint['epoch']\nloss = checkpoint['loss']\n\n# \u7ee7\u7eed\u8bad\u7ec3\u6216\u63a8\u7406...\n
                                                  • model_state_dict: \u6a21\u578b\u53c2\u6570
                                                  • optimizer_state_dict: \u4f18\u5316\u5668\u72b6\u6001
                                                  • epoch: \u5f53\u524d\u8bad\u7ec3\u8f6e\u6570
                                                  • loss: \u635f\u5931\u503c
                                                  • learning_rate: \u5b66\u4e60\u7387
                                                  • best_accuracy: \u6700\u4f73\u51c6\u786e\u7387
                                                  "},{"location":"admin/baize/best-practice/checkpoint.html#tensorflow","title":"TensorFlow","text":"

                                                  TensorFlow \u63d0\u4f9b\u4e86 tf.train.Checkpoint \u7c7b\u6765\u7ba1\u7406\u6a21\u578b\u548c\u4f18\u5316\u5668\u7684\u4fdd\u5b58\u548c\u6062\u590d\u3002

                                                  "},{"location":"admin/baize/best-practice/checkpoint.html#tensorflow-checkpoint","title":"TensorFlow \u4fdd\u5b58 Checkpoint","text":"

                                                  \u4ee5\u4e0b\u662f\u4e00\u4e2a\u5728 TensorFlow \u4e2d\u4fdd\u5b58 Checkpoint \u7684\u793a\u4f8b\uff1a

                                                  import tensorflow as tf\n\n# \u5047\u8bbe\u6211\u4eec\u6709\u4e00\u4e2a\u7b80\u5355\u7684\u6a21\u578b\nmodel = tf.keras.Sequential([\n    tf.keras.layers.Dense(2, input_shape=(10,))\n])\noptimizer = tf.keras.optimizers.Adam(learning_rate=0.001)\n\n# \u5b9a\u4e49 Checkpoint\ncheckpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\ncheckpoint_dir = './checkpoints'\ncheckpoint_prefix = f'{checkpoint_dir}/ckpt'\n\n# \u8bad\u7ec3\u6a21\u578b...\n# \u4fdd\u5b58 Checkpoint\ncheckpoint.save(file_prefix=checkpoint_prefix)\n

                                                  Note

                                                  \u4f7f\u7528 AI Lab \u7684\u7528\u6237\uff0c\u53ef\u4ee5\u76f4\u63a5\u5c06\u9ad8\u6027\u80fd\u5b58\u50a8\u6302\u8f7d\u4e3a Checkpoint \u76ee\u5f55\uff0c\u4ee5\u63d0\u9ad8 Checkpoint \u4fdd\u5b58\u548c\u6062\u590d\u7684\u901f\u5ea6\u3002

                                                  "},{"location":"admin/baize/best-practice/checkpoint.html#tensorflow-checkpoint_1","title":"TensorFlow \u6062\u590d Checkpoint","text":"

                                                  \u52a0\u8f7d Checkpoint \u5e76\u6062\u590d\u6a21\u578b\u548c\u4f18\u5316\u5668\u72b6\u6001\uff1a

                                                  # \u6062\u590d Checkpoint\nlatest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)\ncheckpoint.restore(latest_checkpoint)\n\n# \u7ee7\u7eed\u8bad\u7ec3\u6216\u63a8\u7406...\n
                                                  "},{"location":"admin/baize/best-practice/checkpoint.html#tensorflow-checkpoint_2","title":"TensorFlow \u5728\u5206\u5e03\u5f0f\u8bad\u7ec3\u7684 Checkpoint \u7ba1\u7406","text":"

                                                  TensorFlow \u5728\u5206\u5e03\u5f0f\u8bad\u7ec3\u4e2d\u7ba1\u7406 Checkpoint \u7684\u4e3b\u8981\u65b9\u6cd5\u5982\u4e0b\uff1a

                                                  • \u4f7f\u7528 tf.train.Checkpoint \u548c tf.train.CheckpointManager

                                                    checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)\nmanager = tf.train.CheckpointManager(checkpoint, directory='/tmp/model', max_to_keep=3)\n
                                                  • \u5728\u5206\u5e03\u5f0f\u7b56\u7565\u4e2d\u4fdd\u5b58 Checkpoint

                                                    strategy = tf.distribute.MirroredStrategy()\nwith strategy.scope():\n    checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)\n    manager = tf.train.CheckpointManager(checkpoint, directory='/tmp/model', max_to_keep=3)\n
                                                  • \u53ea\u5728\u4e3b\u8282\u70b9 (chief worker) \u4fdd\u5b58 Checkpoint

                                                    if strategy.cluster_resolver.task_type == 'chief':\n    manager.save()\n
                                                  • \u4f7f\u7528 MultiWorkerMirroredStrategy \u65f6\u7684\u7279\u6b8a\u5904\u7406

                                                    strategy = tf.distribute.MultiWorkerMirroredStrategy()\nwith strategy.scope():\n    # \u6a21\u578b\u5b9a\u4e49\n    ...\n    checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)\n    manager = tf.train.CheckpointManager(checkpoint, '/tmp/model', max_to_keep=3)\n\ndef _chief_worker(task_type, task_id):\n    return task_type is None or task_type == 'chief' or (task_type == 'worker' and task_id == 0)\n\nif _chief_worker(strategy.cluster_resolver.task_type, strategy.cluster_resolver.task_id):\n    manager.save()\n
                                                  • \u4f7f\u7528\u5206\u5e03\u5f0f\u6587\u4ef6\u7cfb\u7edf

                                                    \u786e\u4fdd\u6240\u6709\u5de5\u4f5c\u8282\u70b9\u90fd\u80fd\u8bbf\u95ee\u5230\u540c\u4e00\u4e2a Checkpoint \u76ee\u5f55\uff0c\u901a\u5e38\u4f7f\u7528\u5206\u5e03\u5f0f\u6587\u4ef6\u7cfb\u7edf\u5982 HDFS \u6216 GCS\u3002

                                                  • \u5f02\u6b65\u4fdd\u5b58

                                                    \u4f7f\u7528 tf.keras.callbacks.ModelCheckpoint \u5e76\u8bbe\u7f6e save_freq \u53c2\u6570\u53ef\u4ee5\u5728\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u5f02\u6b65\u4fdd\u5b58 Checkpoint\u3002

                                                  • Checkpoint \u6062\u590d

                                                    status = checkpoint.restore(manager.latest_checkpoint)\nstatus.assert_consumed()  # (1)!\n
                                                    1. \u786e\u4fdd\u6240\u6709\u53d8\u91cf\u90fd\u88ab\u6062\u590d
                                                  • \u6027\u80fd\u4f18\u5316

                                                    • \u4f7f\u7528 tf.train.experimental.enable_mixed_precision_graph_rewrite() \u542f\u7528\u6df7\u5408\u7cbe\u5ea6\u8bad\u7ec3
                                                    • \u8c03\u6574\u4fdd\u5b58\u9891\u7387\uff0c\u907f\u514d\u8fc7\u4e8e\u9891\u7e41\u7684 I/O \u64cd\u4f5c
                                                    • \u8003\u8651\u4f7f\u7528 tf.saved_model.save() \u4fdd\u5b58\u6574\u4e2a\u6a21\u578b\uff0c\u800c\u4e0d\u4ec5\u4ec5\u662f\u6743\u91cd
                                                  "},{"location":"admin/baize/best-practice/checkpoint.html#_1","title":"\u6ce8\u610f\u4e8b\u9879","text":"
                                                  1. \u5b9a\u671f\u4fdd\u5b58\uff1a\u6839\u636e\u8bad\u7ec3\u65f6\u95f4\u548c\u8d44\u6e90\u6d88\u8017\uff0c\u51b3\u5b9a\u5408\u9002\u7684\u4fdd\u5b58\u9891\u7387\u3002\u5982\u6bcf\u4e2a epoch \u6216\u6bcf\u9694\u4e00\u5b9a\u7684\u8bad\u7ec3\u6b65\u6570\u3002

                                                  2. \u4fdd\u5b58\u591a\u4e2a Checkpoint\uff1a\u4fdd\u7559\u6700\u65b0\u7684\u51e0\u4e2a Checkpoint \u4ee5\u9632\u6b62\u6587\u4ef6\u635f\u574f\u6216\u4e0d\u9002\u7528\u7684\u60c5\u51b5\u3002

                                                  3. \u8bb0\u5f55\u5143\u6570\u636e\uff1a\u5728 Checkpoint \u4e2d\u4fdd\u5b58\u989d\u5916\u7684\u4fe1\u606f\uff0c\u5982 epoch \u6570\u3001\u635f\u5931\u503c\u7b49\uff0c\u4ee5\u4fbf\u66f4\u597d\u5730\u6062\u590d\u8bad\u7ec3\u72b6\u6001\u3002

                                                  4. \u4f7f\u7528\u7248\u672c\u63a7\u5236\uff1a\u4fdd\u5b58\u4e0d\u540c\u5b9e\u9a8c\u7684 Checkpoint\uff0c\u4fbf\u4e8e\u5bf9\u6bd4\u548c\u590d\u7528\u3002

                                                  5. \u9a8c\u8bc1\u548c\u6d4b\u8bd5\uff1a\u5728\u8bad\u7ec3\u7684\u4e0d\u540c\u9636\u6bb5\u4f7f\u7528 Checkpoint \u8fdb\u884c\u9a8c\u8bc1\u548c\u6d4b\u8bd5\uff0c\u786e\u4fdd\u6a21\u578b\u6027\u80fd\u548c\u7a33\u5b9a\u6027\u3002

                                                  "},{"location":"admin/baize/best-practice/checkpoint.html#_2","title":"\u7ed3\u8bba","text":"

                                                  Checkpoint \u673a\u5236\u5728\u6df1\u5ea6\u5b66\u4e60\u8bad\u7ec3\u4e2d\u8d77\u5230\u4e86\u5173\u952e\u4f5c\u7528\u3002\u901a\u8fc7\u5408\u7406\u4f7f\u7528 PyTorch \u548c TensorFlow \u4e2d\u7684 Checkpoint \u529f\u80fd\uff0c \u53ef\u4ee5\u6709\u6548\u63d0\u9ad8\u8bad\u7ec3\u7684\u53ef\u9760\u6027\u548c\u6548\u7387\u3002\u5e0c\u671b\u672c\u6587\u6240\u8ff0\u7684\u65b9\u6cd5\u548c\u6700\u4f73\u5b9e\u8df5\u80fd\u5e2e\u52a9\u4f60\u66f4\u597d\u5730\u7ba1\u7406\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u7684\u8bad\u7ec3\u8fc7\u7a0b\u3002

                                                  "},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html","title":"\u90e8\u7f72 NFS \u505a\u6570\u636e\u96c6\u9884\u70ed","text":"

                                                  \u7f51\u7edc\u6587\u4ef6\u7cfb\u7edf (NFS) \u5141\u8bb8\u8fdc\u7a0b\u4e3b\u673a\u901a\u8fc7\u7f51\u7edc\u6302\u8f7d\u6587\u4ef6\uff0c\u5e76\u50cf\u672c\u5730\u6587\u4ef6\u7cfb\u7edf\u4e00\u6837\u8fdb\u884c\u4ea4\u4e92\u3002 \u8fd9\u4f7f\u7cfb\u7edf\u7ba1\u7406\u5458\u80fd\u591f\u5c06\u8d44\u6e90\u96c6\u4e2d\u5230\u7f51\u7edc\u670d\u52a1\u5668\u4e0a\u8fdb\u884c\u7ba1\u7406\u3002

                                                  \u6570\u636e\u96c6 \u662f AI Lab \u4e2d\u7684\u6838\u5fc3\u6570\u636e\u7ba1\u7406\u529f\u80fd\uff0c\u5c06 MLOps \u751f\u547d\u5468\u671f\u4e2d\u5bf9\u4e8e\u6570\u636e\u7684\u4f9d\u8d56\u7edf\u4e00\u62bd\u8c61\u4e3a\u6570\u636e\u96c6\uff1b \u652f\u6301\u7528\u6237\u5c06\u5404\u7c7b\u6570\u636e\u7eb3\u7ba1\u5230\u6570\u636e\u96c6\u5185\uff0c\u4ee5\u4fbf\u8bad\u7ec3\u4efb\u52a1\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u6570\u636e\u96c6\u4e2d\u7684\u6570\u636e\u3002

                                                  \u5f53\u8fdc\u7aef\u6570\u636e\u4e0d\u5728\u5de5\u4f5c\u96c6\u7fa4\u5185\u65f6\uff0c\u6570\u636e\u96c6\u63d0\u4f9b\u4e86\u81ea\u52a8\u8fdb\u884c\u9884\u70ed\u7684\u80fd\u529b\uff0c\u652f\u6301 Git\u3001S3\u3001HTTP \u7b49\u6570\u636e\u63d0\u524d\u9884\u70ed\u5230\u96c6\u7fa4\u672c\u5730\u3002

                                                  \u6570\u636e\u96c6\u9700\u8981\u4e00\u4e2a\u652f\u6301 ReadWriteMany \u6a21\u5f0f\u7684\u5b58\u50a8\u670d\u52a1\u5bf9\u8fdc\u7aef\u6570\u636e\u8fdb\u884c\u9884\u70ed\uff0c\u63a8\u8350\u5728\u96c6\u7fa4\u5185\u90e8\u7f72 NFS\u3002

                                                  \u672c\u6587\u4e3b\u8981\u4ecb\u7ecd\u4e86\u5982\u4f55\u5feb\u901f\u90e8\u7f72\u4e00\u4e2a NFS \u670d\u52a1\uff0c\u5e76\u5c06\u5176\u6dfb\u52a0\u4e3a\u96c6\u7fa4\u7684\u5b58\u50a8\u7c7b\u3002

                                                  "},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html#_1","title":"\u51c6\u5907\u5de5\u4f5c","text":"
                                                  • NFS \u9ed8\u8ba4\u4f7f\u7528\u8282\u70b9\u7684\u5b58\u50a8\u4f5c\u4e3a\u6570\u636e\u7f13\u5b58\u70b9\uff0c\u56e0\u6b64\u9700\u8981\u786e\u8ba4\u78c1\u76d8\u672c\u8eab\u6709\u8db3\u591f\u7684\u78c1\u76d8\u7a7a\u95f4\u3002
                                                  • \u5b89\u88c5\u65b9\u5f0f\u4f7f\u7528 Helm \u4e0e Kubectl\uff0c\u8bf7\u786e\u4fdd\u5df2\u7ecf\u5b89\u88c5\u597d\u3002
                                                  "},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html#_2","title":"\u90e8\u7f72\u8fc7\u7a0b","text":"

                                                  \u4e00\u5171\u9700\u8981\u5b89\u88c5\u51e0\u4e2a\u7ec4\u4ef6\uff1a

                                                  • NFS Server
                                                  • csi-driver-nfs
                                                  • StorageClass
                                                  "},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html#_3","title":"\u521d\u59cb\u5316\u547d\u540d\u7a7a\u95f4","text":"

                                                  \u6240\u6709\u7cfb\u7edf\u7ec4\u4ef6\u4f1a\u5b89\u88c5\u5230 nfs \u547d\u540d\u7a7a\u95f4\u5185\uff0c\u56e0\u6b64\u9700\u8981\u5148\u521b\u5efa\u6b64\u547d\u540d\u7a7a\u95f4\u3002

                                                  kubectl create namespace nfs\n
                                                  "},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html#nfs-server","title":"\u5b89\u88c5 NFS Server","text":"

                                                  \u8fd9\u91cc\u662f\u4e00\u4e2a\u7b80\u5355\u7684 YAML \u90e8\u7f72\u6587\u4ef6\uff0c\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u3002

                                                  Note

                                                  \u6ce8\u610f\u68c0\u67e5 image:\uff0c\u6839\u636e\u96c6\u7fa4\u6240\u5728\u4f4d\u7f6e\u60c5\u51b5\uff0c\u53ef\u80fd\u9700\u8981\u4fee\u6539\u4e3a\u56fd\u5185\u955c\u50cf\u3002

                                                  nfs-server.yaml
                                                  kind: Service\napiVersion: v1\nmetadata:\n  name: nfs-server\n  namespace: nfs\n  labels:\n    app: nfs-server\nspec:\n  type: ClusterIP\n  selector:\n    app: nfs-server\n  ports:\n    - name: tcp-2049\n      port: 2049\n      protocol: TCP\n    - name: udp-111\n      port: 111\n      protocol: UDP\n---\nkind: Deployment\napiVersion: apps/v1\nmetadata:\n  name: nfs-server\n  namespace: nfs\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: nfs-server\n  template:\n    metadata:\n      name: nfs-server\n      labels:\n        app: nfs-server\n    spec:\n      nodeSelector:\n        \"kubernetes.io/os\": linux\n      containers:\n        - name: nfs-server\n          image: itsthenetwork/nfs-server-alpine:latest\n          env:\n            - name: SHARED_DIRECTORY\n              value: \"/exports\"\n          volumeMounts:\n            - mountPath: /exports\n              name: nfs-vol\n          securityContext:\n            privileged: true\n          ports:\n            - name: tcp-2049\n              containerPort: 2049\n              protocol: TCP\n            - name: udp-111\n              containerPort: 111\n              protocol: UDP\n      volumes:\n        - name: nfs-vol\n          hostPath:\n            path: /nfsdata  # (1)!\n            type: DirectoryOrCreate\n
                                                  1. \u4fee\u6539\u6b64\u5904\u4ee5\u6307\u5b9a\u53e6\u4e00\u4e2a\u8def\u5f84\u6765\u5b58\u50a8 NFS \u5171\u4eab\u6570\u636e

                                                  \u5c06\u4e0a\u8ff0 YAML \u4fdd\u5b58\u4e3a nfs-server.yaml\uff0c\u7136\u540e\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u8fdb\u884c\u90e8\u7f72\uff1a

                                                  kubectl -n nfs apply -f nfs-server.yaml\n\n# \u68c0\u67e5\u90e8\u7f72\u7ed3\u679c\nkubectl -n nfs get pod,svc\n
                                                  "},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html#csi-driver-nfs","title":"\u5b89\u88c5 csi-driver-nfs","text":"

                                                  \u5b89\u88c5 csi-driver-nfs \u9700\u8981\u4f7f\u7528 Helm\uff0c\u8bf7\u6ce8\u610f\u63d0\u524d\u5b89\u88c5\u3002

                                                  # \u6dfb\u52a0 Helm \u4ed3\u5e93\nhelm repo add csi-driver-nfs https://mirror.ghproxy.com/https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts\nhelm repo update csi-driver-nfs\n\n# \u90e8\u7f72 csi-driver-nfs\n# \u8fd9\u91cc\u53c2\u6570\u4e3b\u8981\u4f18\u5316\u4e86\u955c\u50cf\u5730\u5740\uff0c\u52a0\u901f\u56fd\u5185\u4e0b\u8f7d\nhelm upgrade --install csi-driver-nfs csi-driver-nfs/csi-driver-nfs \\\n    --set image.nfs.repository=k8s.m.daocloud.io/sig-storage/nfsplugin \\\n    --set image.csiProvisioner.repository=k8s.m.daocloud.io/sig-storage/csi-provisioner \\\n    --set image.livenessProbe.repository=k8s.m.daocloud.io/sig-storage/livenessprobe \\\n    --set image.nodeDriverRegistrar.repository=k8s.m.daocloud.io/sig-storage/csi-node-driver-registrar \\\n    --namespace nfs \\\n    --version v4.5.0\n

                                                  Warning

                                                  csi-nfs-controller \u7684\u955c\u50cf\u5e76\u672a\u5168\u90e8\u652f\u6301 helm \u53c2\u6570\uff0c\u9700\u8981\u624b\u5de5\u4fee\u6539 deployment \u7684 image \u5b57\u6bb5\u3002 \u5c06 image: registry.k8s.io \u6539\u4e3a image: k8s.dockerproxy.com \u4ee5\u52a0\u901f\u56fd\u5185\u4e0b\u8f7d\u3002

                                                  "},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html#storageclass","title":"\u521b\u5efa StorageClass","text":"

                                                  \u5c06\u4ee5\u4e0b YAML \u4fdd\u5b58\u4e3a nfs-sc.yaml\uff1a

                                                  nfs-sc.yaml
                                                  apiVersion: storage.k8s.io/v1\nkind: StorageClass\nmetadata:\n  name: nfs-csi\nprovisioner: nfs.csi.k8s.io\nparameters:\n  server: nfs-server.nfs.svc.cluster.local\n  share: /\n  # csi.storage.k8s.io/provisioner-secret is only needed for providing mountOptions in DeleteVolume\n  # csi.storage.k8s.io/provisioner-secret-name: \"mount-options\"\n  # csi.storage.k8s.io/provisioner-secret-namespace: \"default\"\nreclaimPolicy: Retain\nvolumeBindingMode: Immediate\nmountOptions:\n  - nfsvers=4.1\n

                                                  \u7136\u540e\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u8fdb\u884c\u90e8\u7f72\uff1a

                                                  kubectl apply -f nfs-sc.yaml\n
                                                  "},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html#_4","title":"\u6d4b\u8bd5","text":"

                                                  \u521b\u5efa\u6570\u636e\u96c6\uff0c\u5e76\u5c06\u6570\u636e\u96c6\u7684 \u5173\u8054\u5b58\u50a8\u7c7b \uff0c\u9884\u70ed\u65b9\u5f0f \u8bbe\u7f6e\u4e3a NFS\uff0c\u5373\u53ef\u5c06\u8fdc\u7aef\u6570\u636e\u9884\u70ed\u5230\u96c6\u7fa4\u5185\u3002

                                                  \u6570\u636e\u96c6\u521b\u5efa\u6210\u529f\u540e\uff0c\u53ef\u4ee5\u770b\u5230\u6570\u636e\u96c6\u7684\u72b6\u6001\u4e3a \u9884\u70ed\u4e2d\uff0c\u7b49\u5f85\u9884\u70ed\u5b8c\u6210\u540e\u5373\u53ef\u4f7f\u7528\u3002

                                                  "},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html#_5","title":"\u5e38\u89c1\u95ee\u9898","text":""},{"location":"admin/baize/best-practice/deploy-nfs-in-worker.html#nfs-sbinmount","title":"\u7f3a\u5c11\u5fc5\u8981\u7684 NFS \u5ba2\u6237\u7aef\u8f6f\u4ef6 /sbin/mount","text":"
                                                  bad option; for several filesystems (e.g. nfs, cifs) you might need a /sbin/mount.<type> helper program.\n

                                                  \u5728\u8fd0\u884c Kubernetes \u7684\u8282\u70b9\u673a\u5668\u4e0a\uff0c\u786e\u4fdd\u5df2\u5b89\u88c5 NFS \u5ba2\u6237\u7aef\uff1a

                                                  Ubuntu/DebianCentOS/RHEL

                                                  \u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u5b89\u88c5 NFS \u5ba2\u6237\u7aef\uff1a

                                                  sudo apt-get update\nsudo apt-get install nfs-common\n

                                                  \u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u5b89\u88c5 NFS \u5ba2\u6237\u7aef\uff1a

                                                  sudo yum install nfs-utils\n

                                                  \u68c0\u67e5 NFS \u670d\u52a1\u5668\u914d\u7f6e\uff0c\u786e\u4fdd NFS \u670d\u52a1\u5668\u6b63\u5728\u8fd0\u884c\u4e14\u914d\u7f6e\u6b63\u786e\u3002\u4f60\u53ef\u4ee5\u5c1d\u8bd5\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u624b\u52a8\u6302\u8f7d\u6765\u6d4b\u8bd5\uff1a

                                                  sudo mkdir -p /mnt/test\nsudo mount -t nfs <nfs-server>:/nfsdata /mnt/test\n
                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html","title":"\u4f7f\u7528 AI Lab \u5fae\u8c03 ChatGLM3 \u6a21\u578b","text":"

                                                  \u672c\u6587\u4ee5 ChatGLM3 \u6a21\u578b\u4e3a\u4f8b\uff0c\u6f14\u793a\u5982\u4f55\u5728 AI Lab \u4e2d\u4f7f\u7528 LoRA\uff08Low-Rank Adaptation\uff0c\u4f4e\u79e9\u81ea\u9002\u5e94\uff09\u5fae\u8c03 ChatGLM3 \u6a21\u578b\u3002 Demo \u7a0b\u5e8f\u6765\u81ea ChatGLM3 \u5b98\u65b9\u6848\u4f8b\u3002

                                                  \u5fae\u8c03\u7684\u5927\u81f4\u6d41\u7a0b\u4e3a\uff1a

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#_1","title":"\u73af\u5883\u4f9d\u8d56","text":"
                                                  • GPU \u663e\u5b58\u81f3\u5c11 20GB\uff0c\u63a8\u8350\u4f7f\u7528 RTX4090\u3001NVIDIA A/H \u7cfb\u5217\u663e\u5361
                                                  • \u53ef\u7528\u78c1\u76d8\u7a7a\u95f4\u81f3\u5c11 200GB
                                                  • CPU \u81f3\u5c11 8 \u6838\uff0c\u63a8\u8350 16 \u6838
                                                  • \u5185\u5b58 64GB\uff0c\u63a8\u8350 128GB

                                                  Info

                                                  \u5728\u5f00\u59cb\u4f53\u9a8c\u4e4b\u524d\uff0c\u8bf7\u68c0\u67e5 AI \u7b97\u529b\u5e73\u53f0\u4ee5\u53ca AI Lab \u90e8\u7f72\u6b63\u786e\uff0cGPU \u961f\u5217\u8d44\u6e90\u521d\u59cb\u5316\u6210\u529f\uff0c\u4e14\u7b97\u529b\u8d44\u6e90\u5145\u8db3\u3002

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#_2","title":"\u6570\u636e\u51c6\u5907","text":"

                                                  \u5229\u7528 AI Lab \u63d0\u4f9b\u7684\u6570\u636e\u96c6\u7ba1\u7406\u529f\u80fd\uff0c\u5feb\u901f\u5c06\u5fae\u8c03\u5927\u6a21\u578b\u6240\u9700\u7684\u6570\u636e\u8fdb\u884c\u9884\u70ed\u53ca\u6301\u4e45\u5316\uff0c\u51cf\u5c11\u56e0\u4e3a\u51c6\u5907\u6570\u636e\u5bfc\u81f4\u7684 GPU \u8d44\u6e90\u5360\u7528\uff0c\u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u6548\u7387\u3002

                                                  \u5728\u6570\u636e\u96c6\u5217\u8868\u9875\u9762\uff0c\u521b\u5efa\u9700\u8981\u7684\u6570\u636e\u8d44\u6e90\uff0c\u8fd9\u4e9b\u8d44\u6e90\u5305\u542b\u4e86 ChatGLM3 \u4ee3\u7801\uff0c\u4e5f\u53ef\u4ee5\u662f\u6570\u636e\u6587\u4ef6\uff0c\u6240\u6709\u8fd9\u4e9b\u6570\u636e\u90fd\u53ef\u4ee5\u901a\u8fc7\u6570\u636e\u96c6\u5217\u8868\u6765\u7edf\u4e00\u7ba1\u7406\u3002

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#_3","title":"\u4ee3\u7801\u53ca\u6a21\u578b\u6587\u4ef6","text":"

                                                  ChatGLM3 \u662f\u667a\u8c31 AI \u548c\u6e05\u534e\u5927\u5b66 KEG \u5b9e\u9a8c\u5ba4\u8054\u5408\u53d1\u5e03\u7684\u5bf9\u8bdd\u9884\u8bad\u7ec3\u6a21\u578b\u3002

                                                  \u5148\u62c9\u53d6 ChatGLM3 \u4ee3\u7801\u4ed3\u5e93\uff0c\u4e0b\u8f7d\u9884\u8bad\u7ec3\u6a21\u578b\uff0c\u7528\u4e8e\u540e\u7eed\u7684\u5fae\u8c03\u4efb\u52a1\u3002

                                                  AI Lab \u4f1a\u5728\u540e\u53f0\u8fdb\u884c\u5168\u81ea\u52a8\u6570\u636e\u9884\u70ed\uff0c\u4ee5\u4fbf\u540e\u7eed\u7684\u4efb\u52a1\u80fd\u591f\u5feb\u901f\u8bbf\u95ee\u6570\u636e\u3002

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#advertisegen","title":"AdvertiseGen \u6570\u636e\u96c6","text":"

                                                  \u56fd\u5185\u6570\u636e\u53ef\u4ee5\u4ece Tsinghua Cloud \u76f4\u63a5\u83b7\u53d6\uff0c\u8fd9\u91cc\u4f7f\u7528 HTTP \u7684\u6570\u636e\u6e90\u65b9\u5f0f\u3002

                                                  \u6ce8\u610f\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u9700\u8981\u7b49\u5f85\u6570\u636e\u96c6\u9884\u70ed\u5b8c\u6210\uff0c\u4e00\u822c\u5f88\u5feb\uff0c\u6839\u636e\u60a8\u7684\u7f51\u7edc\u60c5\u51b5\u800c\u5b9a\u3002

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#_4","title":"\u5fae\u8c03\u8f93\u51fa\u6570\u636e","text":"

                                                  \u540c\u65f6\uff0c\u60a8\u9700\u8981\u51c6\u5907\u4e00\u4e2a\u7a7a\u7684\u6570\u636e\u96c6\uff0c\u7528\u4e8e\u5b58\u653e\u5fae\u8c03\u4efb\u52a1\u5b8c\u6210\u540e\u8f93\u51fa\u7684\u6a21\u578b\u6587\u4ef6\uff0c\u8fd9\u91cc\u521b\u5efa\u4e00\u4e2a\u7a7a\u7684\u6570\u636e\u96c6\uff0c\u4ee5 PVC \u4e3a\u4f8b\u3002

                                                  Warning

                                                  \u6ce8\u610f\u9700\u8981\u4f7f\u7528\u652f\u6301 ReadWriteMany \u7684\u5b58\u50a8\u7c7b\u578b\uff0c\u4ee5\u4fbf\u540e\u7eed\u7684\u4efb\u52a1\u80fd\u591f\u5feb\u901f\u8bbf\u95ee\u6570\u636e\u3002

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#_5","title":"\u73af\u5883\u51c6\u5907","text":"

                                                  \u5bf9\u4e8e\u6a21\u578b\u5f00\u53d1\u8005\u6765\u8bf4\uff0c\u51c6\u5907\u6a21\u578b\u5f00\u53d1\u9700\u8981\u7684 Python \u73af\u5883\u4f9d\u8d56\u662f\u975e\u5e38\u91cd\u8981\u7684\uff0c\u4f20\u7edf\u505a\u6cd5\u5c06\u73af\u5883\u4f9d\u8d56\u76f4\u63a5\u6253\u5305\u5230\u5f00\u53d1\u5de5\u5177\u7684\u955c\u50cf\u4e2d\uff0c \u6216\u8005\u76f4\u63a5\u5728\u672c\u5730\u73af\u5883\u4e2d\u5b89\u88c5\uff0c\u4f46\u662f\u8fd9\u6837\u505a\u4f1a\u5bfc\u81f4\u73af\u5883\u4f9d\u8d56\u7684\u4e0d\u4e00\u81f4\uff0c\u800c\u4e14\u4e0d\u5229\u4e8e\u73af\u5883\u7684\u7ba1\u7406\u548c\u4f9d\u8d56\u66f4\u65b0\u53ca\u540c\u6b65\u3002

                                                  AI Lab \u63d0\u4f9b\u4e86\u73af\u5883\u7ba1\u7406\u7684\u80fd\u529b\uff0c\u5c06 Python \u73af\u5883\u4f9d\u8d56\u5305\u7ba1\u7406\u548c\u5f00\u53d1\u5de5\u5177\u3001\u4efb\u52a1\u955c\u50cf\u7b49\u8fdb\u884c\u89e3\u8026\uff0c\u89e3\u51b3\u4e86\u4f9d\u8d56\u7ba1\u7406\u6df7\u4e71\uff0c\u73af\u5883\u4e0d\u4e00\u81f4\u7b49\u95ee\u9898\u3002

                                                  \u8fd9\u91cc\u4f7f\u7528 AI Lab \u63d0\u4f9b\u7684\u73af\u5883\u7ba1\u7406\u529f\u80fd\uff0c\u521b\u5efa ChatGLM3 \u5fae\u8c03\u6240\u9700\u7684\u73af\u5883\uff0c\u4ee5\u5907\u540e\u7eed\u4f7f\u7528\u3002

                                                  Warning

                                                  1. ChatGLM \u4ed3\u5e93\u5185\u6709 requirements.txt \u6587\u4ef6\uff0c\u91cc\u9762\u5305\u542b\u4e86 ChatGLM3 \u5fae\u8c03\u6240\u9700\u7684\u73af\u5883\u4f9d\u8d56
                                                  2. \u672c\u6b21\u5fae\u8c03\u6ca1\u6709\u7528\u5230 deepspeed \u548c mpi4py \u5305\uff0c\u5efa\u8bae\u4ece requirements.txt \u6587\u4ef6\u4e2d\u5c06\u5176\u6ce8\u91ca\u6389\uff0c\u5426\u5219\u53ef\u80fd\u51fa\u73b0\u5305\u7f16\u8bd1\u4e0d\u901a\u8fc7\u7684\u60c5\u51b5

                                                  \u5728\u73af\u5883\u7ba1\u7406\u5217\u8868\uff0c\u60a8\u53ef\u4ee5\u5feb\u901f\u521b\u5efa\u4e00\u4e2a Python \u73af\u5883\uff0c\u5e76\u901a\u8fc7\u7b80\u5355\u7684\u8868\u5355\u914d\u7f6e\u6765\u5b8c\u6210\u73af\u5883\u7684\u521b\u5efa\uff1b\u8fd9\u91cc\u9700\u8981\u4e00\u4e2a Python 3.11.x \u73af\u5883\uff0c

                                                  \u56e0\u4e3a\u672c\u5b9e\u9a8c\u9700\u8981\u4f7f\u7528 CUDA\uff0c\u6240\u4ee5\u5728\u8fd9\u91cc\u9700\u8981\u914d\u7f6e GPU \u8d44\u6e90\uff0c\u7528\u4e8e\u9884\u70ed\u9700\u8981\u8d44\u6e90\u7684\u4f9d\u8d56\u5e93\u3002

                                                  \u521b\u5efa\u73af\u5883\uff0c\u9700\u8981\u53bb\u4e0b\u8f7d\u4e00\u7cfb\u5217\u7684 Python \u4f9d\u8d56\uff0c\u6839\u636e\u60a8\u7684\u5b9e\u9645\u4f4d\u7f6e\u4e0d\u540c\uff0c\u53ef\u80fd\u4f1a\u6709\u4e0d\u540c\u7684\u4e0b\u8f7d\u901f\u5ea6\uff0c\u8fd9\u91cc\u4f7f\u7528\u4e86\u56fd\u5185\u7684\u955c\u50cf\u52a0\u901f\uff0c\u53ef\u4ee5\u52a0\u5feb\u4e0b\u8f7d\u901f\u5ea6\u3002

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#notebook-ide","title":"\u4f7f\u7528 Notebook \u4f5c\u4e3a IDE","text":"

                                                  AI Lab \u63d0\u4f9b\u4e86 Notebook \u4f5c\u4e3a IDE \u7684\u529f\u80fd\uff0c\u53ef\u4ee5\u8ba9\u7528\u6237\u5728\u6d4f\u89c8\u5668\u4e2d\u76f4\u63a5\u7f16\u5199\u4ee3\u7801\uff0c\u8fd0\u884c\u4ee3\u7801\uff0c\u67e5\u770b\u4ee3\u7801\u8fd0\u884c\u7ed3\u679c\uff0c\u975e\u5e38\u9002\u5408\u4e8e\u6570\u636e\u5206\u6790\u3001\u673a\u5668\u5b66\u4e60\u3001\u6df1\u5ea6\u5b66\u4e60\u7b49\u9886\u57df\u7684\u5f00\u53d1\u3002

                                                  \u60a8\u53ef\u4ee5\u4f7f\u7528 AI Lab \u63d0\u4f9b\u7684 JupyterLab Notebook \u6765\u8fdb\u884c ChatGLM3 \u7684\u5fae\u8c03\u4efb\u52a1\u3002

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#jupyterlab-notebook","title":"\u521b\u5efa JupyterLab Notebook","text":"

                                                  \u5728 Notebook \u5217\u8868\u4e2d\uff0c\u53ef\u4ee5\u6839\u636e\u9875\u9762\u64cd\u4f5c\u6307\u5f15\uff0c\u521b\u5efa\u4e00\u4e2a Notebook\u3002\u6ce8\u610f\u60a8\u9700\u8981\u6839\u636e\u524d\u6587\u63d0\u5230\u7684\u8d44\u6e90\u8981\u6c42\u6765\u914d\u7f6e\u5bf9\u5e94\u7684 Notebook \u8d44\u6e90\u53c2\u6570\uff0c \u907f\u514d\u540e\u7eed\u56e0\u4e3a\u8d44\u6e90\u95ee\u9898\uff0c\u5f71\u54cd\u5fae\u8c03\u8fc7\u7a0b\u3002

                                                  Note

                                                  \u5728\u521b\u5efa Notebook \u65f6\uff0c\u53ef\u4ee5\u5c06\u4e4b\u524d\u9884\u52a0\u8f7d\u7684\u6a21\u578b\u4ee3\u7801\u6570\u636e\u96c6\u548c\u73af\u5883\uff0c\u76f4\u63a5\u6302\u8f7d\u5230 Notebook \u4e2d\uff0c\u6781\u5927\u8282\u7701\u4e86\u6570\u636e\u51c6\u5907\u7684\u65f6\u95f4\u3002

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#_6","title":"\u6302\u8f7d\u6570\u636e\u96c6\u548c\u4ee3\u7801","text":"

                                                  \u6ce8\u610f\uff1aChatGLM3 \u7684\u4ee3\u7801\u6587\u4ef6\u6302\u8f7d\u5230\u4e86 /home/jovyan/ChatGLM3 \u76ee\u5f55\u4e0b\uff0c\u540c\u65f6\u60a8\u4e5f\u9700\u8981\u5c06 AdvertiseGen \u6570\u636e\u96c6\u6302\u8f7d\u5230 /home/jovyan/ChatGLM3/finetune_demo/data/AdvertiseGen \u76ee\u5f55\u4e0b\uff0c\u4ee5\u4fbf\u540e\u7eed\u7684\u5fae\u8c03\u4efb\u52a1\u80fd\u591f\u8bbf\u95ee\u6570\u636e\u3002

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#pvc","title":"\u6302\u8f7d PVC \u5230\u6a21\u578b\u8f93\u51fa\u6587\u4ef6\u5939","text":"

                                                  \u672c\u6b21\u4f7f\u7528\u7684\u6a21\u578b\u8f93\u51fa\u4f4d\u7f6e\u5728 /home/jovyan/ChatGLM3/finetune_demo/output \u76ee\u5f55\u4e0b\uff0c\u53ef\u4ee5\u5c06\u4e4b\u524d\u521b\u5efa\u7684 PVC \u6570\u636e\u96c6\u6302\u8f7d\u5230\u8fd9\u4e2a\u76ee\u5f55\u4e0b\uff0c \u8fd9\u6837\u8bad\u7ec3\u8f93\u51fa\u7684\u6a21\u578b\u5c31\u53ef\u4ee5\u4fdd\u5b58\u5230\u6570\u636e\u96c6\u4e2d\uff0c\u540e\u7eed\u6a21\u578b\u63a8\u7406\u7b49\u4efb\u52a1\u53ef\u4ee5\u76f4\u63a5\u8bbf\u95ee\u3002

                                                  \u521b\u5efa\u5b8c\u6210\u540e\uff0c\u53ef\u4ee5\u770b\u5230 Notebook \u7684\u754c\u9762\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u5728 Notebook \u4e2d\u7f16\u5199\u4ee3\u7801\uff0c\u8fd0\u884c\u4ee3\u7801\uff0c\u67e5\u770b\u4ee3\u7801\u8fd0\u884c\u7ed3\u679c\u3002

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#chatglm3","title":"\u5fae\u8c03 ChatGLM3","text":"

                                                  \u5f53\u60a8\u8fdb\u5165\u5230 Notebook \u4e2d\u540e\uff0c\u53ef\u4ee5\u5728 Notebook \u4fa7\u8fb9\u680f\u4f1a\u53d1\u73b0\u6709\u4e00\u4e2a File Browser \u7684\u9009\u9879\uff0c\u53ef\u4ee5\u770b\u5230\u4e4b\u524d\u6302\u8f7d\u7684\u6570\u636e\u96c6\u548c\u4ee3\u7801\uff0c\u5728\u8fd9\u91cc\u627e\u5230 ChatGLM3 \u7684\u6587\u4ef6\u5939\u3002

                                                  \u60a8\u53ef\u4ee5\u770b\u5230 ChatGLM3 \u7684\u5fae\u8c03\u4ee3\u7801\u5728 finetune_demo \u6587\u4ef6\u5939\u4e2d\uff0c\u8fd9\u91cc\u53ef\u4ee5\u76f4\u63a5\u6253\u5f00 lora_finetune.ipynb \u6587\u4ef6\uff0c\u8fd9\u662f ChatGLM3 \u7684\u5fae\u8c03\u4ee3\u7801\u3002

                                                  \u9996\u5148\uff0c\u6839\u636e README.md \u7684\u8bf4\u660e\uff0c\u60a8\u53ef\u4ee5\u4e86\u89e3\u5230\u6574\u4e2a\u5fae\u8c03\u7684\u8fc7\u7a0b\uff0c\u5efa\u8bae\u5148\u9605\u8bfb\u4e00\u904d\uff0c\u786e\u4fdd\u57fa\u7840\u7684\u73af\u5883\u4f9d\u8d56\u548c\u6570\u636e\u51c6\u5907\u5de5\u4f5c\u90fd\u5df2\u7ecf\u5b8c\u6210\u3002

                                                  \u6253\u5f00\u7ec8\u7aef\uff0c\u5e76\u4f7f\u7528 conda \u5207\u6362\u5230\u60a8\u63d0\u524d\u9884\u70ed\u7684\u73af\u5883\u4e2d\uff0c\u6b64\u73af\u5883\u4e0e JupyterLab Kernel \u4fdd\u6301\u4e00\u81f4\uff0c\u4ee5\u4fbf\u540e\u7eed\u7684\u4ee3\u7801\u8fd0\u884c\u3002

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#_7","title":"\u6570\u636e\u9884\u5904\u7406","text":"

                                                  \u9996\u5148\uff0c\u60a8\u9700\u8981\u5c06 AdvertiseGen \u6570\u636e\u96c6\u8fdb\u884c\u9884\u5904\u7406\uff0c\u5bf9\u6570\u636e\u8fdb\u884c\u6807\u51c6\u5316\u5904\u7406\uff0c\u4f7f\u5176\u7b26\u5408 Lora \u9884\u8bad\u7ec3\u7684\u6807\u51c6\u683c\u5f0f\u8981\u6c42\uff1b \u8fd9\u91cc\u5c06\u5904\u7406\u540e\u7684\u6570\u636e\u4fdd\u5b58\u5230 AdvertiseGen_fix \u6587\u4ef6\u5939\u4e2d\u3002

                                                  import json\nfrom typing import Union\nfrom pathlib import Path\n\ndef _resolve_path(path: Union[str, Path]) -> Path:\n    return Path(path).expanduser().resolve()\n\ndef _mkdir(dir_name: Union[str, Path]):\n    dir_name = _resolve_path(dir_name)\n    if not dir_name.is_dir():\n        dir_name.mkdir(parents=True, exist_ok=False)\n\ndef convert_adgen(data_dir: Union[str, Path], save_dir: Union[str, Path]):\n    def _convert(in_file: Path, out_file: Path):\n        _mkdir(out_file.parent)\n        with open(in_file, encoding='utf-8') as fin:\n            with open(out_file, 'wt', encoding='utf-8') as fout:\n                for line in fin:\n                    dct = json.loads(line)\n                    sample = {'conversations': [{'role': 'user', 'content': dct['content']},\n                                                {'role': 'assistant', 'content': dct['summary']}]}\n                    fout.write(json.dumps(sample, ensure_ascii=False) + '\\n')\n\n    data_dir = _resolve_path(data_dir)\n    save_dir = _resolve_path(save_dir)\n\n    train_file = data_dir / 'train.json'\n    if train_file.is_file():\n        out_file = save_dir / train_file.relative_to(data_dir)\n        _convert(train_file, out_file)\n\n    dev_file = data_dir / 'dev.json'\n    if dev_file.is_file():\n        out_file = save_dir / dev_file.relative_to(data_dir)\n        _convert(dev_file, out_file)\n\nconvert_adgen('data/AdvertiseGen', 'data/AdvertiseGen_fix')\n

                                                  \u4e3a\u4e86\u8282\u7701\u8c03\u8bd5\u7684\u65f6\u95f4\uff0c\u60a8\u53ef\u4ee5\u5c06 /home/jovyan/ChatGLM3/finetune_demo/data/AdvertiseGen_fix/dev.json \u4e2d\u7684\u6570\u636e\u91cf\u7f29\u51cf\u5230 50 \u6761\uff0c\u8fd9\u91cc\u7684\u6570\u636e\u662f JSON \u683c\u5f0f\uff0c\u5904\u7406\u8d77\u6765\u4e5f\u662f\u6bd4\u8f83\u65b9\u4fbf\u7684\u3002

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#lora","title":"\u672c\u5730 LoRA \u5fae\u8c03\u6d4b\u8bd5","text":"

                                                  \u5b8c\u6210\u6570\u636e\u7684\u9884\u5904\u7406\u4e4b\u540e\uff0c\u57fa\u672c\u4e0a\u60a8\u5c31\u53ef\u4ee5\u76f4\u63a5\u5fae\u8c03\u6d4b\u8bd5\u4e86\uff0c\u53ef\u4ee5\u5728 /home/jovyan/ChatGLM3/finetune_demo/configs/lora.yaml \u6587\u4ef6\u4e2d\u914d\u7f6e\u5fae\u8c03\u7684\u53c2\u6570\uff0c\u4e00\u822c\u9700\u8981\u5173\u6ce8\u7684\u53c2\u6570\u57fa\u672c\u5982\u4e0b\uff1a

                                                  \u65b0\u5f00\u4e00\u4e2a\u7ec8\u7aef\u7a97\u53e3\uff0c\u4f7f\u7528\u5982\u4e0b\u547d\u4ee4\u5373\u53ef\u8fdb\u884c\u672c\u5730\u5fae\u8c03\u6d4b\u8bd5\uff0c\u8bf7\u786e\u4fdd\u53c2\u6570\u914d\u7f6e\u548c\u8def\u5f84\u6b63\u786e\uff1a

                                                  !CUDA_VISIBLE_DEVICES=0 NCCL_P2P_DISABLE=\"1\" NCCL_IB_DISABLE=\"1\" python finetune_hf.py  data/AdvertiseGen_fix  ./chatglm3-6b  configs/lora.yaml\n

                                                  \u5728\u8fd9\u6761\u547d\u4ee4\u4e2d\uff0c

                                                  • finetune_hf.py \u662f ChatGLM3 \u4ee3\u7801\u4e2d\u7684\u5fae\u8c03\u811a\u672c
                                                  • data/AdvertiseGen_fix \u662f\u60a8\u9884\u5904\u7406\u540e\u7684\u6570\u636e\u96c6
                                                  • ./chatglm3-6b \u662f\u60a8\u9884\u8bad\u7ec3\u6a21\u578b\u7684\u8def\u5f84
                                                  • configs/lora.yaml \u662f\u5fae\u8c03\u7684\u914d\u7f6e\u6587\u4ef6

                                                  \u5fae\u8c03\u8fc7\u7a0b\u4e2d\u53ef\u4ee5\u4f7f\u7528 nvidia-smi \u547d\u4ee4\u67e5\u770b GPU \u663e\u5b58\u4f7f\u7528\u60c5\u51b5\uff1a

                                                  \u5728\u5fae\u8c03\u5b8c\u6210\u540e\uff0c\u5728 finetune_demo \u76ee\u5f55\u4e0b\u4f1a\u751f\u6210\u4e00\u4e2a output \u76ee\u5f55\uff0c\u91cc\u9762\u5305\u542b\u4e86\u5fae\u8c03\u7684\u6a21\u578b\u6587\u4ef6\uff0c \u8fd9\u6837\u5fae\u8c03\u7684\u6a21\u578b\u6587\u4ef6\u5c31\u76f4\u63a5\u4fdd\u5b58\u5230\u60a8\u4e4b\u524d\u521b\u5efa\u7684 PVC \u6570\u636e\u96c6\u4e2d\u4e86\u3002

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#_8","title":"\u5fae\u8c03\u4efb\u52a1\u63d0\u4ea4","text":"

                                                  \u5728\u672c\u5730\u5fae\u8c03\u6d4b\u8bd5\u5b8c\u6210\u540e\uff0c\u786e\u4fdd\u60a8\u7684\u4ee3\u7801\u548c\u6570\u636e\u6ca1\u6709\u95ee\u9898\uff0c\u63a5\u4e0b\u6765\u53ef\u4ee5\u5c06\u5fae\u8c03\u4efb\u52a1\u63d0\u4ea4\u5230AI Lab \u4e2d\uff0c\u8fdb\u884c\u5927\u89c4\u6a21\u7684\u8bad\u7ec3\u548c\u5fae\u8c03\u4efb\u52a1\u3002

                                                  \u8fd9\u4e5f\u662f\u63a8\u8350\u7684\u6a21\u578b\u5f00\u53d1\u548c\u5fae\u8c03\u6d41\u7a0b\uff0c\u5148\u5728\u672c\u5730\u8fdb\u884c\u5fae\u8c03\u6d4b\u8bd5\uff0c\u786e\u4fdd\u4ee3\u7801\u548c\u6570\u636e\u6ca1\u6709\u95ee\u9898\u3002

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#_9","title":"\u4f7f\u7528\u754c\u9762\u63d0\u4ea4\u5fae\u8c03\u4efb\u52a1","text":"

                                                  \u8fd9\u91cc\u4f7f\u7528 Pytorch \u6765\u521b\u5efa\u5fae\u8c03\u4efb\u52a1\uff0c\u6839\u636e\u60a8\u7684\u5b9e\u9645\u60c5\u51b5\uff0c\u9009\u62e9\u9700\u8981\u4f7f\u7528\u54ea\u4e2a\u96c6\u7fa4\u7684\u8d44\u6e90\uff0c\u6ce8\u610f\u9700\u8981\u6ee1\u8db3\u524d\u9762\u8d44\u6e90\u51c6\u5907\u4e2d\u63d0\u53ca\u7684\u8d44\u6e90\u8981\u6c42\u3002

                                                  • \u955c\u50cf\uff1a\u53ef\u76f4\u63a5\u4f7f\u7528 baizectl \u63d0\u4f9b\u7684\u6a21\u578b\u955c\u50cf
                                                  • \u542f\u52a8\u547d\u4ee4\uff0c\u6839\u636e\u60a8\u5728 Notebook \u4e2d\u4f7f\u7528 LoRA \u5fae\u8c03\u7684\u7ecf\u9a8c\uff0c\u4ee3\u7801\u6587\u4ef6\u548c\u6570\u636e\u5728 /home/jovyan/ChatGLM3/finetune_demo \u76ee\u5f55\u4e0b\uff0c\u6240\u4ee5\u60a8\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u8fd9\u4e2a\u8def\u5f84\uff1a

                                                    bash -c \"cd /home/jovyan/ChatGLM3/finetune_demo && CUDA_VISIBLE_DEVICES=0 NCCL_P2P_DISABLE=\"1\" NCCL_IB_DISABLE=\"1\" python finetune_hf.py  data/AdvertiseGen_fix  ./chatglm3-6b  configs/lora.yaml\"\n
                                                  • \u6302\u8f7d\u73af\u5883\uff0c\u8fd9\u6837\u4e4b\u524d\u9884\u52a0\u8f7d\u7684\u73af\u5883\u4f9d\u8d56\u4e0d\u4ec5\u53ef\u4ee5\u5728 Notebook \u4e2d\u4f7f\u7528\uff0c\u540c\u65f6\u4e5f\u53ef\u4ee5\u5728\u4efb\u52a1\u4e2d\u4f7f\u7528

                                                  • \u6570\u636e\u96c6\uff1a\u76f4\u63a5\u4f7f\u7528\u4e4b\u524d\u9884\u70ed\u7684\u6570\u636e\u96c6
                                                    • \u5c06\u6a21\u578b\u8f93\u51fa\u8def\u5f84\u8bbe\u7f6e\u4e3a\u4e4b\u524d\u521b\u5efa\u7684 PVC \u6570\u636e\u96c6
                                                    • \u5c06 AdvertiseGen \u6570\u636e\u96c6\u6302\u8f7d\u5230 /home/jovyan/ChatGLM3/finetune_demo/data/AdvertiseGen \u76ee\u5f55\u4e0b
                                                  • \u914d\u7f6e\u8db3\u591f\u7684 GPU \u8d44\u6e90\uff0c\u786e\u4fdd\u5fae\u8c03\u4efb\u52a1\u80fd\u591f\u6b63\u5e38\u8fd0\u884c

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#_10","title":"\u67e5\u770b\u4efb\u52a1\u72b6\u6001","text":"

                                                  \u4efb\u52a1\u6210\u529f\u63d0\u4ea4\u540e\uff0c\u60a8\u53ef\u4ee5\u5728\u4efb\u52a1\u5217\u8868\u4e2d\u5b9e\u65f6\u67e5\u770b\u4efb\u52a1\u7684\u8bad\u7ec3\u8fdb\u5c55\uff0c\u8fd9\u91cc\u60a8\u53ef\u4ee5\u770b\u5230\u4efb\u52a1\u7684\u72b6\u6001\u3001\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u3001\u65e5\u5fd7\u7b49\u4fe1\u606f\u3002

                                                  \u67e5\u770b\u4efb\u52a1\u65e5\u5fd7

                                                  \u4efb\u52a1\u8fd0\u884c\u5b8c\u6210\u540e\uff0c\u60a8\u53ef\u4ee5\u5728\u6570\u636e\u8f93\u51fa\u7684\u6570\u636e\u96c6\u4e2d\u67e5\u770b\u5fae\u8c03\u7684\u6a21\u578b\u6587\u4ef6\uff0c\u8fd9\u6837\u5c31\u53ef\u4ee5\u4f7f\u7528\u8fd9\u4e2a\u6a21\u578b\u6587\u4ef6\u8fdb\u884c\u540e\u7eed\u7684\u63a8\u7406\u4efb\u52a1\u3002

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#baizectl","title":"\u4f7f\u7528 baizectl \u63d0\u4ea4\u4efb\u52a1","text":"

                                                  AI Lab \u7684 Notebook \u652f\u6301\u514d\u8ba4\u8bc1\u76f4\u63a5\u4f7f\u7528 baizectl \u547d\u4ee4\u884c\u5de5\u5177\uff0c \u5982\u679c\u60a8\u559c\u6b22\u4f7f\u7528 CLI\uff0c\u90a3\u4e48\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528 baizectl \u63d0\u4f9b\u7684\u547d\u4ee4\u884c\u5de5\u5177\uff0c\u63d0\u4ea4\u4efb\u52a1\u3002

                                                  baizectl job submit --name finetunel-chatglm3 -t PYTORCH \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --resources cpu=8,memory=16Gi,nvidia.com/gpu=1 \\\n    --workers 1 \\\n    --queue default \\\n    --working-dir /home/jovyan/ChatGLM3 \\\n    --datasets AdvertiseGen:/home/jovyan/ChatGLM3/finetune_demo/data/AdvertiseGen  \\\n    --datasets output:/home/jovyan/ChatGLM3/finetune_demo/output  \\\n    --labels job_type=pytorch \\\n    --restart-policy on-failure \\\n    -- bash -c \"cd /home/jovyan/ChatGLM3/finetune_demo && CUDA_VISIBLE_DEVICES=0 NCCL_P2P_DISABLE=\"1\" NCCL_IB_DISABLE=\"1\" python finetune_hf.py  data/AdvertiseGen_fix  ./chatglm3-6b  configs/lora.yaml\"\n

                                                  \u5982\u679c\u5e0c\u671b\u4e86\u89e3\u66f4\u591a baizectl \u7684\u4f7f\u7528\u8bf4\u660e\uff0c\u53ef\u4ee5\u67e5\u770b baizectl \u4f7f\u7528\u6587\u6863\u3002

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#_11","title":"\u6a21\u578b\u63a8\u7406","text":"

                                                  \u5728\u5fae\u8c03\u4efb\u52a1\u5b8c\u6210\u540e\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528\u5fae\u8c03\u7684\u6a21\u578b\u8fdb\u884c\u63a8\u7406\u4efb\u52a1\uff0c\u8fd9\u91cc\u60a8\u53ef\u4ee5\u4f7f\u7528AI Lab \u63d0\u4f9b\u7684\u63a8\u7406\u670d\u52a1\uff0c\u5c06\u8f93\u51fa\u540e\u7684\u6a21\u578b\u521b\u5efa\u4e3a\u63a8\u7406\u670d\u52a1\u3002

                                                  \u5728\u63a8\u7406\u670d\u52a1\u5217\u8868\u4e2d\uff0c\u60a8\u53ef\u4ee5\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u63a8\u7406\u670d\u52a1\uff0c\u5728\u9009\u62e9\u6a21\u578b\u7684\u4f4d\u7f6e\uff0c\u9009\u62e9\u4e4b\u524d\u63a8\u7406\u8f93\u51fa\u7684\u6570\u636e\u96c6\uff0c\u5e76\u914d\u7f6e\u6a21\u578b\u8def\u5f84\u3002

                                                  \u6709\u5173\u6a21\u578b\u8d44\u6e90\u8981\u6c42\u3001\u63a8\u7406\u670d\u52a1\u7684 GPU \u8d44\u6e90\u8981\u6c42\uff0c\u9700\u8981\u6839\u636e\u6a21\u578b\u7684\u5927\u5c0f\u548c\u63a8\u7406\u7684\u5e76\u53d1\u91cf\u6765\u914d\u7f6e\uff0c\u8fd9\u91cc\u60a8\u53ef\u4ee5\u6839\u636e\u4e4b\u524d\u5fae\u8c03\u4efb\u52a1\u7684\u8d44\u6e90\u914d\u7f6e\u6765\u914d\u7f6e\u3002

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#_12","title":"\u914d\u7f6e\u6a21\u578b\u8fd0\u884c\u65f6","text":"

                                                  \u914d\u7f6e\u6a21\u578b\u7684\u8fd0\u884c\u65f6\u5c24\u4e3a\u91cd\u8981\uff0c\u76ee\u524d AI Lab \u5df2\u7ecf\u652f\u6301 vLLM \u4f5c\u4e3a\u6a21\u578b\u63a8\u7406\u670d\u52a1\u7684\u8fd0\u884c\u65f6\uff0c\u53ef\u4ee5\u76f4\u63a5\u9009\u62e9 vLLM\u3002

                                                  vLLM \u652f\u6301\u975e\u5e38\u4e30\u5bcc\u7684\u5927\u8bed\u8a00\u6a21\u578b\uff0c\u5efa\u8bae\u8bbf\u95ee vLLM \u4e86\u89e3\u66f4\u591a\u4fe1\u606f\uff0c\u8fd9\u4e9b\u6a21\u578b\u90fd\u53ef\u4ee5\u5f88\u65b9\u4fbf\u5730\u5728 AI Lab \u4e2d\u4f7f\u7528\u3002

                                                  \u521b\u5efa\u5b8c\u6210\u540e\uff0c\u60a8\u53ef\u4ee5\u5728\u63a8\u7406\u670d\u52a1\u5217\u8868\u4e2d\u770b\u5230\u60a8\u521b\u5efa\u7684\u63a8\u7406\u670d\u52a1\uff0c\u5728\u6a21\u578b\u670d\u52a1\u5217\u8868\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u83b7\u53d6\u6a21\u578b\u7684\u8bbf\u95ee\u5730\u5740

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#_13","title":"\u4f7f\u7528\u6a21\u578b\u670d\u52a1\u6d4b\u8bd5","text":"

                                                  \u7b80\u5355\u5728\u7ec8\u7aef\u4e2d\u5c1d\u8bd5\uff0c\u4f7f\u7528 curl \u547d\u4ee4\u6765\u6d4b\u8bd5\u6a21\u578b\u670d\u52a1\uff0c\u8fd9\u91cc\u60a8\u53ef\u4ee5\u770b\u5230\u8fd4\u56de\u7684\u7ed3\u679c\uff0c\u8fd9\u6837\u5c31\u53ef\u4ee5\u4f7f\u7528\u6a21\u578b\u670d\u52a1\u8fdb\u884c\u63a8\u7406\u4efb\u52a1\u4e86\u3002

                                                  curl -X POST http://10.20.100.210:31118/v2/models/chatglm3-6b/generate \\\n  -d '{\"text_input\": \"hello\", \"stream\": false, \"sampling_parameters\": \"{\\\"temperature\\\": 0.7, \\\"top_p\\\": 0.95, \\'max_tokens\\\": 1024\uff5d\"\uff5d'\n

                                                  "},{"location":"admin/baize/best-practice/finetunel-llm.html#_14","title":"\u7ed3\u8bed","text":"

                                                  \u672c\u6587\u4ee5 ChatGLM3 \u4e3a\u4f8b\uff0c\u5e26\u60a8\u5feb\u901f\u4e86\u89e3\u548c\u4e0a\u624b AI Lab \u7684\u6a21\u578b\u5fae\u8c03\uff0c\u4f7f\u7528 LoRA \u5fae\u8c03\u4e86 ChatGLM3 \u6a21\u578b\u3002

                                                  AI Lab \u63d0\u4f9b\u4e86\u975e\u5e38\u4e30\u5bcc\u7684\u529f\u80fd\uff0c\u53ef\u4ee5\u5e2e\u52a9\u6a21\u578b\u5f00\u53d1\u8005\u5feb\u901f\u8fdb\u884c\u6a21\u578b\u5f00\u53d1\u3001\u5fae\u8c03\u3001\u63a8\u7406\u7b49\u4efb\u52a1\uff0c\u540c\u65f6\u4e5f\u63d0\u4f9b\u4e86\u4e30\u5bcc\u7684 OpenAPI \u63a5\u53e3\uff0c\u53ef\u4ee5\u65b9\u4fbf\u5730\u4e0e\u7b2c\u4e09\u65b9\u5e94\u7528\u751f\u6001\u8fdb\u884c\u7ed3\u5408\u3002

                                                  "},{"location":"admin/baize/best-practice/label-studio.html","title":"\u90e8\u7f72 Label Studio","text":"

                                                  Label Studio \u662f\u4e00\u4e2a\u5f00\u6e90\u7684\u6570\u636e\u6807\u6ce8\u5de5\u5177\uff0c\u7528\u4e8e\u5404\u79cd\u673a\u5668\u5b66\u4e60\u548c\u4eba\u5de5\u667a\u80fd\u4efb\u52a1\u3002 \u4ee5\u4e0b\u662f Label Studio \u7684\u7b80\u8981\u4ecb\u7ecd\uff1a

                                                  • \u652f\u6301\u56fe\u50cf\u3001\u97f3\u9891\u3001\u89c6\u9891\u3001\u6587\u672c\u7b49\u591a\u79cd\u6570\u636e\u7c7b\u578b\u7684\u6807\u6ce8
                                                  • \u53ef\u7528\u4e8e\u76ee\u6807\u68c0\u6d4b\u3001\u56fe\u50cf\u5206\u7c7b\u3001\u8bed\u97f3\u8f6c\u5f55\u3001\u547d\u540d\u5b9e\u4f53\u8bc6\u522b\u7b49\u591a\u79cd\u4efb\u52a1
                                                  • \u63d0\u4f9b\u53ef\u5b9a\u5236\u7684\u6807\u6ce8\u754c\u9762
                                                  • \u652f\u6301\u591a\u79cd\u6807\u6ce8\u683c\u5f0f\u548c\u5bfc\u51fa\u9009\u9879

                                                  Label Studio \u901a\u8fc7\u5176\u7075\u6d3b\u6027\u548c\u529f\u80fd\u4e30\u5bcc\u6027\uff0c\u4e3a\u6570\u636e\u79d1\u5b66\u5bb6\u548c\u673a\u5668\u5b66\u4e60\u5de5\u7a0b\u5e08\u63d0\u4f9b\u4e86\u5f3a\u5927\u7684\u6570\u636e\u6807\u6ce8\u89e3\u51b3\u65b9\u6848\u3002

                                                  "},{"location":"admin/baize/best-practice/label-studio.html#ai","title":"\u90e8\u7f72\u5230 AI \u7b97\u529b\u5e73\u53f0","text":"

                                                  \u8981\u60f3\u5728 AI Lab \u4e2d\u4f7f\u7528 Label Studio\uff0c\u9700\u5c06\u5176\u90e8\u7f72\u5230\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\uff0c \u4f60\u53ef\u4ee5\u901a\u8fc7 Helm \u7684\u65b9\u5f0f\u5feb\u901f\u90e8\u7f72\u3002

                                                  Note

                                                  \u66f4\u591a\u90e8\u7f72\u8be6\u60c5\uff0c\u8bf7\u53c2\u9605 Deploy Label Studio on Kubernetes\u3002

                                                  1. \u6253\u5f00\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u754c\u9762\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u627e\u5230 Helm \u5e94\u7528 -> Helm \u4ed3\u5e93 \uff0c\u9009\u62e9 \u521b\u5efa\u4ed3\u5e93 \u6309\u94ae\uff0c\u586b\u5199\u5982\u4e0b\u53c2\u6570\uff1a

                                                  2. \u6dfb\u52a0\u6210\u529f\u540e\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u9009\u62e9 \u540c\u6b65\u4ed3\u5e93 \uff0c\u7a0d\u7b49\u7247\u523b\u540e\u5b8c\u6210\u540c\u6b65\u3002\uff08\u540e\u7eed\u66f4\u65b0 Label Studio \u4e5f\u4f1a\u7528\u5230\u8fd9\u4e2a\u540c\u6b65\u64cd\u4f5c\uff09\u3002

                                                  3. \u7136\u540e\u8df3\u8f6c\u5230 Helm \u6a21\u677f \u9875\u9762\uff0c\u4f60\u53ef\u4ee5\u641c\u7d22\u627e\u5230 label-studio\uff0c\u70b9\u51fb\u5361\u7247\u3002

                                                  4. \u9009\u62e9\u6700\u65b0\u7684\u7248\u672c\uff0c\u5982\u4e0b\u56fe\u914d\u7f6e\u5b89\u88c5\u53c2\u6570\uff0c\u540d\u79f0\u4e3a label-stuio\uff0c\u5efa\u8bae\u521b\u5efa\u65b0\u7684\u547d\u4ee4\u7a7a\u95f4\uff0c\u914d\u7f6e\u53c2\u6570\u5207\u6362\u5230 YAML \uff0c\u6839\u636e\u8bf4\u660e\u4fee\u6539\u5176\u4e2d\u914d\u7f6e\u3002

                                                    global:\n  image:\n    repository: heartexlabs/label-studio   # \u5982\u679c\u65e0\u6cd5\u8bbf\u95ee docker.io\uff0c\u5728\u6b64\u5904\u914d\u7f6e\u4ee3\u7406\u5730\u5740\n  extraEnvironmentVars:\n    LABEL_STUDIO_HOST: https://{\u8bbf\u95ee\u5730\u5740}/label-studio    # \u4f7f\u7528\u7684\u767b\u5f55\u5730\u5740\uff0c\u8bf7\u53c2\u9605\u5f53\u524d\u7f51\u9875 URL\n    LABEL_STUDIO_USERNAME: {\u7528\u6237\u90ae\u7bb1}    # \u5fc5\u987b\u662f\u90ae\u7bb1\uff0c\u66ff\u6362\u4e3a\u81ea\u5df1\u7684\n    LABEL_STUDIO_PASSWORD: {\u7528\u6237\u5bc6\u7801}    \napp:\n  nginx:\n    livenessProbe:\n      path: /label-studio/nginx_health\n    readinessProbe:\n      path: /label-studio/version\n

                                                  \u81f3\u6b64\uff0c\u5b8c\u6210\u4e86 Label studio \u7684\u5b89\u88c5\u3002

                                                  Warning

                                                  \u9ed8\u8ba4\u4f1a\u5b89\u88c5 PostgreSQL \u4f5c\u4e3a\u6570\u636e\u670d\u52a1\u4e2d\u95f4\u4ef6\uff0c\u5982\u679c\u955c\u50cf\u62c9\u53d6\u5931\u8d25\uff0c\u53ef\u80fd\u662f docker.io \u65e0\u6cd5\u8bbf\u95ee\uff0c\u6ce8\u610f\u5207\u6362\u5230\u53ef\u7528\u4ee3\u7406\u5373\u53ef\u3002

                                                  \u5982\u679c\u4f60\u6709\u81ea\u5df1\u7684 PostgreSQL \u6570\u636e\u670d\u52a1\u4e2d\u95f4\u4ef6\uff0c\u53ef\u4ee5\u4f7f\u7528\u5982\u4e0b\u53c2\u6570\u914d\u7f6e\uff1a

                                                  global:\n  image:\n    repository: heartexlabs/label-studio   # \u5982\u679c\u65e0\u6cd5\u8bbf\u95ee docker.io\uff0c\u5728\u6b64\u5904\u914d\u7f6e\u4ee3\u7406\u5730\u5740\n  extraEnvironmentVars:\n    LABEL_STUDIO_HOST: https://{\u8bbf\u95ee\u5730\u5740}/label-studio    # \u4f7f\u7528\u7684\u767b\u5f55\u5730\u5740\uff0c\u53c2\u9605\u5f53\u524d\u7f51\u9875 URL\n    LABEL_STUDIO_USERNAME: {\u7528\u6237\u90ae\u7bb1}    # \u5fc5\u987b\u662f\u90ae\u7bb1\uff0c\u66ff\u6362\u4e3a\u81ea\u5df1\u7684\n    LABEL_STUDIO_PASSWORD: {\u7528\u6237\u5bc6\u7801}    \napp:\n  nginx:\n    livenessProbe:\n      path: /label-studio/nginx_health\n    readinessProbe:\n      path: /label-studio/version\npostgresql:\n  enabled: false  # \u7981\u7528\u5185\u7f6e\u7684 PostgreSQL\nexternalPostgresql:\n  host: \"postgres-postgresql\"  # PostgreSQL \u5730\u5740\n  port: 5432\n  username: \"label_studio\"  # PostgreSQL \u7528\u6237\u540d\n  password: \"your_label_studio_password\"  # PostgreSQL \u5bc6\u7801\n  database: \"label_studio\"  # PostgreSQL \u6570\u636e\u5e93\u540d\n
                                                  "},{"location":"admin/baize/best-practice/label-studio.html#gproduct","title":"\u6dfb\u52a0 GProduct \u5230\u5bfc\u822a\u680f","text":"

                                                  \u5982\u679c\u8981\u6dfb\u52a0 Label Studio \u5230\u5bfc\u822a\u680f\uff0c\u53ef\u4ee5\u53c2\u8003\u5168\u5c40\u7ba1\u7406 OEM IN \u7684\u65b9\u5f0f\u3002 \u4ee5\u4e0b\u6848\u4f8b\u662f\u589e\u52a0\u5230 AI Lab \u4e8c\u7ea7\u5bfc\u822a\u7684\u6dfb\u52a0\u65b9\u5f0f\u3002

                                                  "},{"location":"admin/baize/best-practice/label-studio.html#_1","title":"\u6dfb\u52a0\u4ee3\u7406\u8bbf\u95ee","text":"
                                                  apiVersion: ghippo.io/v1alpha1\nkind: GProductProxy\nmetadata:\n  name: label-studio\nspec:\n  gproduct: label-studio\n  proxies:\n  - authnCheck: false\n    destination:\n      host: label-studio-ls-app.label-studio.svc.cluster.local\n      port: 80\n    match:\n      uri:\n        prefix: /label-studio\n
                                                  "},{"location":"admin/baize/best-practice/label-studio.html#ai-lab","title":"\u6dfb\u52a0\u5230 AI Lab","text":"

                                                  \u4fee\u6539 CRD \u4e3a GProductNavigator \u7684 CR baize \uff0c\u7136\u540e\u5728\u73b0\u6709\u914d\u7f6e\u4e2d\u8fdb\u884c\u5982\u4e0b\u53d8\u66f4\uff1a

                                                  apiVersion: ghippo.io/v1alpha1\nkind: GProductNavigator\nmetadata:\n  annotations:\n    meta.helm.sh/release-name: baize\n    meta.helm.sh/release-namespace: baize-system\n  labels:\n    app.kubernetes.io/managed-by: Helm\n    gProductName: baize\n  name: baize\nspec:\n  category: cloudnativeai\n  gproduct: baize\n  iconUrl: ./ui/baize/logo.svg\n  isCustom: false\n  localizedName:\n    en-US: AI Lab\n    zh-CN: AI Lab\n  menus:\n    - iconUrl: ''\n      isCustom: false\n      localizedName:\n        en-US: AI Lab\n        zh-CN: AI Lab\n      name: workspace-view\n      order: 1\n      url: ./baize\n      visible: true\n    - iconUrl: ''\n      isCustom: false\n      localizedName:\n        en-US: Operator\n        zh-CN: \u8fd0\u7ef4\u7ba1\u7406\n      name: admin-view\n      order: 1\n      url: ./baize/admin\n      visible: true\n    # \u6dfb\u52a0\u5f00\u59cb\n    - iconUrl: ''\n      localizedName:\n        en-US: Data Annotation\n        zh-CN: \u6570\u636e\u6807\u6ce8\n      name: label-studio\n      order: 1\n      target: blank    # \u63a7\u5236\u65b0\u5f00\u9875\n      url: https://{\u8bbf\u95ee\u5730\u5740}/label-studio    # \u8bbf\u95ee\u5730\u5740\n      visible: true\n    # \u6dfb\u52a0\u7ed3\u675f\n  name: AI Lab\n  order: 10\n  url: ./baize\n  visible: true\n
                                                  "},{"location":"admin/baize/best-practice/label-studio.html#_2","title":"\u6dfb\u52a0\u6548\u679c","text":""},{"location":"admin/baize/best-practice/label-studio.html#_3","title":"\u7ed3\u8bed","text":"

                                                  \u4ee5\u4e0a\uff0c\u5c31\u662f\u5982\u4f55\u6dfb\u52a0 Label Studio \u5e76\u5c06\u5176\u4f5c\u4e3a AI Lab \u7684\u6807\u6ce8\u7ec4\u4ef6\uff0c\u901a\u8fc7\u5c06\u6807\u6ce8\u540e\u7684\u6570\u636e\u6dfb\u52a0\u5230 AI Lab \u7684\u6570\u636e\u96c6\u4e2d\uff0c \u8054\u52a8\u7b97\u6cd5\u5f00\u53d1\uff0c\u5b8c\u5584\u7b97\u6cd5\u5f00\u53d1\u6d41\u7a0b\uff0c\u540e\u7eed\u5982\u4f55\u4f7f\u7528\u8bf7\u5173\u6ce8\u5176\u4ed6\u6587\u6863\u53c2\u8003\u3002

                                                  "},{"location":"admin/baize/best-practice/train-with-deepspeed.html","title":"\u5982\u4f55\u63d0\u4ea4 DeepSpeed \u8bad\u7ec3\u4efb\u52a1","text":"

                                                  \u6839\u636e DeepSpeed \u5b98\u65b9\u6587\u6863\uff0c\u6211\u4eec\u63a8\u8350\u4f7f\u7528\u4fee\u6539\u4ee3\u7801\u7684\u65b9\u5f0f\u5b9e\u73b0\u3002

                                                  \u5373\u4f7f\u7528 deepspeed.init_distributed() \u4ee3\u66ff torch.distributed.init_process_group(...)\u3002 \u7136\u540e\u8fd0\u884c\u547d\u4ee4\u4f7f\u7528 torchrun\uff0c\u63d0\u4ea4\u4e3a Pytorch \u5206\u5e03\u5f0f\u4efb\u52a1\uff0c\u65e2\u53ef\u8fd0\u884c DeepSpeed \u4efb\u52a1\u3002

                                                  \u662f\u7684\uff0c\u4f60\u53ef\u4ee5\u4f7f\u7528 torchrun \u8fd0\u884c\u4f60\u7684 DeepSpeed \u8bad\u7ec3\u811a\u672c\u3002 torchrun \u662f PyTorch \u63d0\u4f9b\u7684\u4e00\u4e2a\u5b9e\u7528\u5de5\u5177\uff0c\u7528\u4e8e\u5206\u5e03\u5f0f\u8bad\u7ec3\u3002\u4f60\u53ef\u4ee5\u7ed3\u5408 torchrun \u548c DeepSpeed API \u6765\u542f\u52a8\u4f60\u7684\u8bad\u7ec3\u4efb\u52a1\u3002

                                                  \u4ee5\u4e0b\u662f\u4e00\u4e2a\u4f7f\u7528 torchrun \u8fd0\u884c DeepSpeed \u8bad\u7ec3\u811a\u672c\u7684\u793a\u4f8b\uff1a

                                                  1. \u7f16\u5199\u8bad\u7ec3\u811a\u672c\uff1a

                                                    train.py
                                                    import torch\nimport deepspeed\nfrom torch.utils.data import DataLoader\n\n# \u6a21\u578b\u548c\u6570\u636e\u52a0\u8f7d\nmodel = YourModel()\ntrain_dataset = YourDataset()\ntrain_dataloader = DataLoader(train_dataset, batch_size=32)\n\n# \u914d\u7f6e\u6587\u4ef6\u8def\u5f84\ndeepspeed_config = \"deepspeed_config.json\"\n\n# \u521b\u5efa DeepSpeed \u8bad\u7ec3\u5f15\u64ce\nmodel_engine, optimizer, _, _ = deepspeed.initialize(\n    model=model,\n    model_parameters=model.parameters(),\n    config_params=deepspeed_config\n)\n\n# \u8bad\u7ec3\u5faa\u73af\nfor batch in train_dataloader:\n    loss = model_engine(batch)\n    model_engine.backward(loss)\n    model_engine.step()\n
                                                  2. \u521b\u5efa DeepSpeed \u914d\u7f6e\u6587\u4ef6\uff1a

                                                    deepspeed_config.json
                                                    {\n  \"train_batch_size\": 32,\n  \"gradient_accumulation_steps\": 1,\n  \"fp16\": {\n    \"enabled\": true,\n    \"loss_scale\": 0\n  },\n  \"optimizer\": {\n    \"type\": \"Adam\",\n    \"params\": {\n      \"lr\": 0.00015,\n      \"betas\": [0.9, 0.999],\n      \"eps\": 1e-08,\n      \"weight_decay\": 0\n    }\n  }\n}\n
                                                  3. \u4f7f\u7528 torchrun \u6216\u8005 baizectl \u8fd0\u884c\u8bad\u7ec3\u811a\u672c\uff1a

                                                    torchrun train.py\n

                                                    \u901a\u8fc7\u8fd9\u79cd\u65b9\u5f0f\uff0c\u4f60\u53ef\u4ee5\u7ed3\u5408 PyTorch \u7684\u5206\u5e03\u5f0f\u8bad\u7ec3\u529f\u80fd\u548c DeepSpeed \u7684\u4f18\u5316\u6280\u672f\uff0c\u4ece\u800c\u5b9e\u73b0\u66f4\u9ad8\u6548\u7684\u8bad\u7ec3\u3002 \u60a8\u53ef\u4ee5\u5728 Notebook \u4e2d\uff0c\u4f7f\u7528 baizectl \u63d0\u4ea4\u547d\u4ee4\uff1a

                                                    baizectl job submit --pytorch --workers 2 -- torchrun train.py\n
                                                  "},{"location":"admin/baize/developer/index.html","title":"\u5f00\u53d1\u63a7\u5236\u53f0","text":"

                                                  \u5f00\u53d1\u63a7\u5236\u53f0\u662f\u5f00\u53d1\u8005\u65e5\u5e38\u6267\u884c AI \u63a8\u7406\u3001\u5927\u6a21\u578b\u8bad\u7ec3\u7b49\u4efb\u52a1\u7684\u63a7\u5236\u53f0\u3002

                                                  \u65b9\u4fbf\u7528\u6237\u901a\u8fc7\u6982\u89c8\u5feb\u901f\u4e86\u89e3\uff0c\u5f53\u524d\u5de5\u4f5c\u7a7a\u95f4\u7684\u8d44\u6e90\u53ca\u7528\u91cf\u60c5\u51b5\uff0c\u5305\u542b\u4e86GPU\u8d44\u6e90\u3001Notebook\u3001\u4efb\u52a1\u4ee5\u53ca\u6570\u636e\u96c6\u7684\u6570\u91cf\u4fe1\u606f\u3002

                                                  "},{"location":"admin/baize/developer/quick-start.html","title":"\u5feb\u901f\u5165\u95e8","text":"

                                                  \u672c\u6587\u63d0\u4f9b\u4e86\u7b80\u5355\u7684\u64cd\u4f5c\u624b\u518c\u4ee5\u4fbf\u7528\u6237\u4f7f\u7528 AI Lab \u8fdb\u884c\u6570\u636e\u96c6\u3001Notebook\u3001\u4efb\u52a1\u8bad\u7ec3\u7684\u6574\u4e2a\u5f00\u53d1\u3001\u8bad\u7ec3\u6d41\u7a0b\u3002

                                                  "},{"location":"admin/baize/developer/quick-start.html#_2","title":"\u51c6\u5907\u6570\u636e\u96c6","text":"

                                                  \u70b9\u51fb \u6570\u636e\u7ba1\u7406 -> \u6570\u636e\u96c6 \uff0c\u9009\u62e9 \u521b\u5efa \u6309\u94ae\uff0c\u5206\u522b\u521b\u5efa\u4ee5\u4e0b\u4e09\u4e2a\u6570\u636e\u96c6\u3002

                                                  "},{"location":"admin/baize/developer/quick-start.html#_3","title":"\u6570\u636e\u96c6\uff1a\u8bad\u7ec3\u4ee3\u7801","text":"
                                                  • \u4ee3\u7801\u6570\u636e\u6e90\uff1ahttps://github.com/samzong/training-sample-code.git\uff0c\u4e3b\u8981\u662f\u4e00\u4e2a\u7b80\u5355\u7684 Tensorflow \u4ee3\u7801\u3002
                                                  • \u5982\u679c\u662f\u4e2d\u56fd\u5883\u5185\u7684\u7528\u6237\uff0c\u53ef\u4ee5\u4f7f\u7528 Gitee \u52a0\u901f\uff1ahttps://gitee.com/samzong_lu/training-sample-code.git
                                                  • \u4ee3\u7801\u8def\u5f84\u4e3a tensorflow/tf-fashion-mnist-sample

                                                  Note

                                                  \u76ee\u524d\u4ec5\u652f\u6301\u8bfb\u5199\u6a21\u5f0f\u4e3a ReadWriteMany \u7684 StorageClass\uff0c\u8bf7\u4f7f\u7528 NFS \u6216\u8005\u63a8\u8350\u7684 JuiceFS\u3002

                                                  "},{"location":"admin/baize/developer/quick-start.html#_4","title":"\u6570\u636e\u96c6\uff1a\u8bad\u7ec3\u6570\u636e","text":"

                                                  \u672c\u6b21\u8bad\u7ec3\u4f7f\u7528\u7684\u6570\u636e\u4e3a https://github.com/zalandoresearch/fashion-mnist.git\uff0c \u8fd9\u662f Fashion-MNIST \u6570\u636e\u96c6\u3002

                                                  \u5982\u679c\u662f\u4e2d\u56fd\u5883\u5185\u7684\u7528\u6237\uff0c\u53ef\u4ee5\u4f7f\u7528 Gitee \u52a0\u901f\uff1ahttps://gitee.com/samzong_lu/fashion-mnist.git

                                                  Note

                                                  \u5982\u679c\u672a\u521b\u5efa\u8bad\u7ec3\u6570\u636e\u7684\u6570\u636e\u96c6\uff0c\u901a\u8fc7\u8bad\u7ec3\u811a\u672c\u4e5f\u4f1a\u81ea\u52a8\u4e0b\u8f7d\uff1b\u63d0\u524d\u51c6\u5907\u8bad\u7ec3\u6570\u636e\u53ef\u4ee5\u63d0\u9ad8\u8bad\u7ec3\u901f\u5ea6\u3002

                                                  "},{"location":"admin/baize/developer/quick-start.html#_5","title":"\u6570\u636e\u96c6\uff1a\u7a7a\u6570\u636e\u96c6","text":"

                                                  AI Lab \u652f\u6301\u5c06 PVC \u4f5c\u4e3a\u6570\u636e\u96c6\u7684\u6570\u636e\u6e90\u7c7b\u578b\uff0c\u6240\u4ee5\u521b\u5efa\u4e00\u4e2a\u7a7a PVC \u7ed1\u5b9a\u5230\u6570\u636e\u96c6\u540e\uff0c\u53ef\u5c06\u7a7a\u6570\u636e\u96c6\u4f5c\u4e3a\u5b58\u653e\u540e\u7eed\u8bad\u7ec3\u4efb\u52a1\u7684\u8f93\u51fa\u6570\u636e\u96c6\uff0c\u5b58\u653e\u6a21\u578b\u548c\u65e5\u5fd7\u3002

                                                  "},{"location":"admin/baize/developer/quick-start.html#tensorflow","title":"\u73af\u5883\u4f9d\u8d56: tensorflow","text":"

                                                  \u811a\u672c\u5728\u8fd0\u884c\u65f6\uff0c\u9700\u8981\u4f9d\u8d56 Tensorflow \u7684 Python \u5e93\uff0c\u53ef\u4ee5\u4f7f\u7528 AI Lab \u7684\u73af\u5883\u4f9d\u8d56\u7ba1\u7406\u529f\u80fd\uff0c\u63d0\u524d\u5c06\u9700\u8981\u7684 Python \u5e93\u4e0b\u8f7d\u548c\u51c6\u5907\u5b8c\u6210\uff0c\u65e0\u9700\u4f9d\u8d56\u955c\u50cf\u6784\u5efa

                                                  \u53c2\u8003 \u73af\u5883\u4f9d\u8d56 \u7684\u64cd\u4f5c\u65b9\u5f0f\uff0c\u6dfb\u52a0\u4e00\u4e2a CONDA \u73af\u5883.

                                                  name: tensorflow\nchannels:\n  - defaults\n  - conda-forge\ndependencies:\n  - python=3.12\n  - tensorflow\nprefix: /opt/conda/envs/tensorflow\n

                                                  Note

                                                  \u7b49\u5f85\u73af\u5883\u9884\u70ed\u6210\u529f\u540e\uff0c\u53ea\u9700\u8981\u5c06\u6b64\u73af\u5883\u6302\u8f7d\u5230 Notebook\u3001\u8bad\u7ec3\u4efb\u52a1\u4e2d\uff0c\u4f7f\u7528 AI Lab \u63d0\u4f9b\u7684\u57fa\u7840\u955c\u50cf\u5c31\u53ef\u4ee5

                                                  "},{"location":"admin/baize/developer/quick-start.html#notebook","title":"\u4f7f\u7528 Notebook \u8c03\u8bd5\u811a\u672c","text":"

                                                  \u51c6\u5907\u5f00\u53d1\u73af\u5883\uff0c\u70b9\u51fb\u5bfc\u822a\u680f\u7684 Notebooks \uff0c\u70b9\u51fb \u521b\u5efa \u3002

                                                  • \u5c06\u51c6\u5907\u597d\u7684\u4e09\u4e2a\u6570\u636e\u96c6\u8fdb\u884c\u5173\u8054\uff0c\u6302\u8f7d\u8def\u5f84\u8bf7\u53c2\u7167\u4e0b\u56fe\u586b\u5199\uff0c\u6ce8\u610f\u5c06\u9700\u8981\u4f7f\u7528\u7684\u7a7a\u6570\u636e\u96c6\u5728 \u8f93\u51fa\u6570\u636e\u96c6\u4f4d\u7f6e\u914d\u7f6e

                                                  • \u9009\u62e9\u5e76\u7ed1\u5b9a\u73af\u5883\u4f9d\u8d56\u5305

                                                    \u7b49\u5f85 Notebook \u521b\u5efa\u6210\u529f\uff0c\u70b9\u51fb\u5217\u8868\u4e2d\u7684\u8bbf\u95ee\u5730\u5740\uff0c\u8fdb\u5165 Notebook\u3002\u5e76\u5728 Notebook \u7684\u7ec8\u7aef\u4e2d\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u8fdb\u884c\u4efb\u52a1\u8bad\u7ec3\u3002

                                                    Note

                                                    \u811a\u672c\u4f7f\u7528 Tensorflow\uff0c\u5982\u679c\u5fd8\u8bb0\u5173\u8054\u4f9d\u8d56\u5e93\uff0c\u4e5f\u53ef\u4ee5\u4e34\u65f6\u7528 pip install tensorflow \u5b89\u88c5\u3002

                                                    python /home/jovyan/code/tensorflow/tf-fashion-mnist-sample/train.py\n
                                                  "},{"location":"admin/baize/developer/quick-start.html#_6","title":"\u521b\u5efa\u8bad\u7ec3\u4efb\u52a1","text":"
                                                  1. \u70b9\u51fb\u5bfc\u822a\u680f\u7684 \u4efb\u52a1\u4e2d\u5fc3 -> \u8bad\u7ec3\u4efb\u52a1 \uff0c\u521b\u5efa\u4e00\u4e2a Tensorflow \u5355\u673a\u4efb\u52a1
                                                  2. \u5148\u586b\u5199\u57fa\u672c\u53c2\u6570\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65
                                                  3. \u5728\u4efb\u52a1\u8d44\u6e90\u914d\u7f6e\u4e2d\uff0c\u6b63\u786e\u914d\u7f6e\u4efb\u52a1\u8d44\u6e90\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65

                                                    • \u955c\u50cf\uff1a\u5982\u679c\u524d\u5e8f\u73af\u5883\u4f9d\u8d56\u5305\u51c6\u5907\u597d\u4e86\uff0c\u4f7f\u7528\u9ed8\u8ba4\u955c\u50cf\u5373\u53ef\uff1b \u5982\u679c\u672a\u51c6\u5907\uff0c\u8981\u786e\u8ba4\u955c\u50cf\u5185\u6709 tensorflow \u7684 Python \u5e93
                                                    • shell\uff1a\u4f7f\u7528 bash \u5373\u53ef
                                                    • \u542f\u7528\u547d\u4ee4\uff1a

                                                      /home/jovyan/code/tensorflow/tf-fashion-mnist-sample/train.py\n
                                                  4. \u5728\u9ad8\u7ea7\u914d\u7f6e\u4e2d\uff0c\u542f\u7528 \u4efb\u52a1\u5206\u6790\uff08Tensorboard\uff09 \uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                    Note

                                                    \u65e5\u5fd7\u6240\u5728\u4f4d\u7f6e\u4e3a\u8f93\u51fa\u6570\u636e\u96c6\u7684 /home/jovyan/model/train/logs/

                                                  5. \u8fd4\u56de\u8bad\u7ec3\u4efb\u52a1\u5217\u8868\uff0c\u7b49\u5230\u72b6\u6001\u53d8\u4e3a \u6210\u529f \u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u67e5\u770b\u8be6\u60c5\u3001\u514b\u9686\u4efb\u52a1\u3001\u66f4\u65b0\u4f18\u5148\u7ea7\u3001\u67e5\u770b\u65e5\u5fd7\u548c\u5220\u9664\u7b49\u64cd\u4f5c\u3002

                                                  6. \u6210\u529f\u521b\u5efa\u4efb\u52a1\u540e\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u4efb\u52a1\u5206\u6790 \uff0c\u53ef\u4ee5\u67e5\u770b\u4efb\u52a1\u72b6\u6001\u5e76\u5bf9\u4efb\u52a1\u8bad\u7ec3\u8fdb\u884c\u8c03\u4f18\u3002

                                                  "},{"location":"admin/baize/developer/dataset/create-use-delete.html","title":"\u6570\u636e\u96c6\u5217\u8868","text":"

                                                  AI Lab \u63d0\u4f9b\u6a21\u578b\u5f00\u53d1\u3001\u8bad\u7ec3\u4ee5\u53ca\u63a8\u7406\u8fc7\u7a0b\u6240\u6709\u9700\u8981\u7684\u6570\u636e\u96c6\u7ba1\u7406\u529f\u80fd\u3002\u76ee\u524d\u652f\u6301\u5c06\u591a\u79cd\u6570\u636e\u6e90\u7edf\u4e00\u63a5\u5165\u80fd\u529b\u3002

                                                  \u901a\u8fc7\u7b80\u5355\u914d\u7f6e\u5373\u53ef\u5c06\u6570\u636e\u6e90\u63a5\u5165\u5230 AI Lab \u4e2d\uff0c\u5b9e\u73b0\u6570\u636e\u7684\u7edf\u4e00\u7eb3\u7ba1\u3001\u9884\u70ed\u3001\u6570\u636e\u96c6\u7ba1\u7406\u7b49\u529f\u80fd\u3002

                                                  "},{"location":"admin/baize/developer/dataset/create-use-delete.html#_2","title":"\u521b\u5efa\u6570\u636e\u96c6","text":"
                                                  1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u6570\u636e\u7ba1\u7406 -> \u6570\u636e\u96c6\u5217\u8868 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae\u3002

                                                  2. \u9009\u62e9\u6570\u636e\u96c6\u5f52\u5c5e\u7684\u5de5\u4f5c\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4 \u4e0b\u4e00\u6b65 \u3002

                                                  3. \u914d\u7f6e\u76ee\u6807\u6570\u636e\u7684\u6570\u636e\u6e90\u7c7b\u578b\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                    \u76ee\u524d\u652f\u6301\u8fd9\u51e0\u79cd\u6570\u636e\u6e90\uff1a

                                                    • GIT\uff1a\u652f\u6301 GitHub\u3001GitLab\u3001Gitee \u7b49\u4ed3\u5e93
                                                    • S3\uff1a\u652f\u6301 Amazon \u4e91\u7b49\u5bf9\u8c61\u5b58\u50a8
                                                    • HTTP\uff1a\u76f4\u63a5\u8f93\u5165\u4e00\u4e2a\u6709\u6548\u7684 HTTP \u7f51\u5740
                                                    • PVC\uff1a\u652f\u6301\u9884\u5148\u521b\u5efa\u7684 Kubernetes PersistentVolumeClaim
                                                    • NFS\uff1a\u652f\u6301 NFS \u5171\u4eab\u5b58\u50a8
                                                  4. \u6570\u636e\u96c6\u521b\u5efa\u6210\u529f\u5c06\u8fd4\u56de\u6570\u636e\u96c6\u5217\u8868\u3002\u4f60\u53ef\u4ee5\u901a\u8fc7\u53f3\u4fa7\u7684 \u2507 \u6267\u884c\u66f4\u591a\u64cd\u4f5c\u3002

                                                  Info

                                                  \u7cfb\u7edf\u81ea\u52a8\u4f1a\u5728\u6570\u636e\u96c6\u521b\u5efa\u6210\u529f\u540e\uff0c\u7acb\u5373\u8fdb\u884c\u4e00\u6b21\u6027\u7684\u6570\u636e\u9884\u52a0\u8f7d\uff1b\u5728\u9884\u52a0\u8f7d\u5b8c\u6210\u4e4b\u524d\uff0c\u6570\u636e\u96c6\u4e0d\u53ef\u4ee5\u4f7f\u7528\u3002

                                                  "},{"location":"admin/baize/developer/dataset/create-use-delete.html#_3","title":"\u6570\u636e\u96c6\u4f7f\u7528","text":"

                                                  \u6570\u636e\u96c6\u521b\u5efa\u6210\u529f\u540e\uff0c\u53ef\u4ee5\u5728\u6a21\u578b\u8bad\u7ec3\u3001\u63a8\u7406\u7b49\u4efb\u52a1\u4e2d\u4f7f\u7528\u3002

                                                  "},{"location":"admin/baize/developer/dataset/create-use-delete.html#notebook","title":"\u5728 Notebook \u4e2d\u4f7f\u7528","text":"

                                                  \u5728\u521b\u5efa Notebook \u4e2d\uff0c\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u6570\u636e\u96c6\uff1b\u4f7f\u7528\u65b9\u5f0f\u5982\u4e0b\uff1a

                                                  • \u4f7f\u7528\u6570\u636e\u96c6\u505a\u8bad\u7ec3\u6570\u636e\u6302\u8f7d
                                                  • \u4f7f\u7528\u6570\u636e\u96c6\u505a\u4ee3\u7801\u6302\u8f7d

                                                  "},{"location":"admin/baize/developer/dataset/create-use-delete.html#_4","title":"\u5728 \u8bad\u7ec3\u4efb\u52a1 \u4e2d\u4f7f\u7528","text":"
                                                  • \u4f7f\u7528\u6570\u636e\u96c6\u6307\u5b9a\u4efb\u52a1\u8f93\u51fa
                                                  • \u4f7f\u7528\u6570\u636e\u96c6\u6307\u5b9a\u4efb\u52a1\u8f93\u5165
                                                  • \u4f7f\u7528\u6570\u636e\u96c6\u6307\u5b9a TensorBoard \u8f93\u51fa
                                                  "},{"location":"admin/baize/developer/dataset/create-use-delete.html#_5","title":"\u5728\u63a8\u7406\u670d\u52a1 \u4e2d\u4f7f\u7528","text":"
                                                  • \u4f7f\u7528\u6570\u636e\u96c6\u6302\u8f7d\u6a21\u578b
                                                  "},{"location":"admin/baize/developer/dataset/create-use-delete.html#_6","title":"\u5220\u9664\u6570\u636e\u96c6","text":"

                                                  \u5982\u679c\u53d1\u73b0\u6570\u636e\u96c6\u5197\u4f59\u3001\u8fc7\u671f\u6216\u56e0\u5176\u4ed6\u7f18\u6545\u4e0d\u518d\u9700\u8981\uff0c\u53ef\u4ee5\u4ece\u6570\u636e\u96c6\u5217\u8868\u4e2d\u5220\u9664\u3002

                                                  1. \u5728\u6570\u636e\u96c6\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u5220\u9664 \u3002

                                                  2. \u5728\u5f39\u7a97\u4e2d\u786e\u8ba4\u8981\u5220\u9664\u7684\u6570\u636e\u96c6\uff0c\u8f93\u5165\u6570\u636e\u96c6\u540d\u79f0\u540e\u70b9\u51fb \u5220\u9664 \u3002

                                                  3. \u5c4f\u5e55\u63d0\u793a\u5220\u9664\u6210\u529f\uff0c\u8be5\u6570\u636e\u96c6\u4ece\u5217\u8868\u4e2d\u6d88\u5931\u3002

                                                  Caution

                                                  \u6570\u636e\u96c6\u4e00\u65e6\u5220\u9664\u5c06\u4e0d\u53ef\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                                                  "},{"location":"admin/baize/developer/dataset/environments.html","title":"\u7ba1\u7406\u73af\u5883","text":"

                                                  \u672c\u6587\u8bf4\u660e\u5982\u4f55\u5728 AI Lab \u4e2d\u7ba1\u7406\u4f60\u7684\u73af\u5883\u4f9d\u8d56\u5e93\uff0c\u4ee5\u4e0b\u662f\u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u548c\u6ce8\u610f\u4e8b\u9879\u3002

                                                  1. \u73af\u5883\u7ba1\u7406\u6982\u8ff0
                                                  2. \u521b\u5efa\u65b0\u73af\u5883
                                                  3. \u914d\u7f6e\u73af\u5883
                                                  4. \u6545\u969c\u6392\u9664
                                                  "},{"location":"admin/baize/developer/dataset/environments.html#_2","title":"\u73af\u5883\u7ba1\u7406\u6982\u8ff0","text":"

                                                  \u4f20\u7edf\u65b9\u5f0f\uff0c\u4e00\u822c\u4f1a\u5c06 Python \u73af\u5883\u4f9d\u8d56\u5728\u955c\u50cf\u4e2d\u6784\u5efa\uff0c\u955c\u50cf\u5e26\u6709 Python \u7248\u672c\u548c\u4f9d\u8d56\u5305\u7684\u955c\u50cf\uff0c\u7ef4\u62a4\u6210\u672c\u8f83\u9ad8\u4e14\u66f4\u65b0\u4e0d\u65b9\u4fbf\uff0c\u5f80\u5f80\u9700\u8981\u91cd\u65b0\u6784\u5efa\u955c\u50cf\u3002

                                                  \u800c\u5728 AI Lab \u4e2d\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 \u73af\u5883\u7ba1\u7406 \u6a21\u5757\u6765\u7ba1\u7406\u7eaf\u7cb9\u7684\u73af\u5883\u4f9d\u8d56\uff0c\u5c06\u8fd9\u90e8\u5206\u4ece\u955c\u50cf\u4e2d\u89e3\u8026\uff0c\u5e26\u6765\u7684\u4f18\u52bf\u6709\uff1a

                                                  • \u4e00\u4efd\u73af\u5883\u591a\u5904\u4f7f\u7528\uff0c\u540c\u65f6\u53ef\u4ee5\u5728 Notebook\u3001\u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u3001\u4e43\u81f3\u63a8\u7406\u670d\u52a1\u4e2d\u4f7f\u7528\u3002
                                                  • \u66f4\u65b0\u4f9d\u8d56\u5305\u66f4\u52a0\u65b9\u4fbf\uff0c\u53ea\u9700\u8981\u66f4\u65b0\u73af\u5883\u4f9d\u8d56\u5373\u53ef\uff0c\u65e0\u9700\u91cd\u65b0\u6784\u5efa\u955c\u50cf\u3002

                                                  \u4ee5\u4e0b\u4e3a\u73af\u5883\u7ba1\u7406\u7684\u4e3b\u8981\u7ec4\u6210\u90e8\u5206\uff1a

                                                  • \u96c6\u7fa4 \uff1a\u9009\u62e9\u9700\u8981\u64cd\u4f5c\u7684\u96c6\u7fa4\u3002
                                                  • \u547d\u540d\u7a7a\u95f4 \uff1a\u9009\u62e9\u547d\u540d\u7a7a\u95f4\u4ee5\u9650\u5b9a\u64cd\u4f5c\u8303\u56f4\u3002
                                                  • \u73af\u5883\u5217\u8868 \uff1a\u5c55\u793a\u5f53\u524d\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u73af\u5883\u53ca\u5176\u72b6\u6001\u3002

                                                  \u5b57\u6bb5 \u63cf\u8ff0 \u4e3e\u4f8b\u503c \u540d\u79f0 \u73af\u5883\u7684\u540d\u79f0 my-environment \u72b6\u6001 \u73af\u5883\u5f53\u524d\u7684\u72b6\u6001\uff08\u6b63\u5e38\u6216\u5931\u8d25\uff09\uff0c\u65b0\u521b\u5efa\u73af\u5883\u6709\u4e00\u4e2a\u9884\u70ed\u8fc7\u7a0b\uff0c\u9884\u70ed\u6210\u529f\u540e\u5373\u53ef\u5728\u5176\u4ed6\u4efb\u52a1\u4e2d\u4f7f\u7528 \u6b63\u5e38 \u521b\u5efa\u65f6\u95f4 \u73af\u5883\u521b\u5efa\u7684\u65f6\u95f4 2023-10-01 10:00:00"},{"location":"admin/baize/developer/dataset/environments.html#_3","title":"\u521b\u5efa\u65b0\u73af\u5883","text":"

                                                  \u5728 \u73af\u5883\u7ba1\u7406 \u754c\u9762\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u521b\u5efa\u73af\u5883\u7684\u6d41\u7a0b\u3002

                                                  \u5b57\u6bb5 \u63cf\u8ff0 \u4e3e\u4f8b\u503c \u540d\u79f0 \u8f93\u5165\u73af\u5883\u7684\u540d\u79f0\uff0c\u957f\u5ea6\u4e3a 2-63 \u4e2a\u5b57\u7b26\uff0c\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u5f00\u5934\u548c\u7ed3\u5c3e\u3002 my-environment \u90e8\u7f72\u4f4d\u7f6e \u96c6\u7fa4 \uff1a\u9009\u62e9\u9700\u8981\u90e8\u7f72\u7684\u96c6\u7fa4 gpu-cluster \u547d\u540d\u7a7a\u95f4 \uff1a\u9009\u62e9\u547d\u540d\u7a7a\u95f4 default \u5907\u6ce8 \u586b\u5199\u5907\u6ce8\u4fe1\u606f\u3002 \u8fd9\u662f\u4e00\u4e2a\u6d4b\u8bd5\u73af\u5883 \u6807\u7b7e \u4e3a\u73af\u5883\u6dfb\u52a0\u6807\u7b7e\u3002 env:test \u6ce8\u89e3 \u4e3a\u73af\u5883\u6dfb\u52a0\u6ce8\u89e3\u3002\u586b\u5199\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u8fdb\u5165\u73af\u5883\u914d\u7f6e\u3002 \u6ce8\u89e3\u793a\u4f8b"},{"location":"admin/baize/developer/dataset/environments.html#_4","title":"\u914d\u7f6e\u73af\u5883","text":"

                                                  \u5728\u73af\u5883\u914d\u7f6e\u6b65\u9aa4\u4e2d\uff0c\u7528\u6237\u9700\u8981\u914d\u7f6e Python \u7248\u672c\u548c\u4f9d\u8d56\u5305\u7ba1\u7406\u5de5\u5177\u3002

                                                  \u5b57\u6bb5 \u63cf\u8ff0 \u4e3e\u4f8b\u503c Python \u7248\u672c \u9009\u62e9\u6240\u9700\u7684 Python \u7248\u672c 3.12.3 \u5305\u7ba1\u7406\u5668 \u9009\u62e9\u5305\u7ba1\u7406\u5de5\u5177\uff0c\u53ef\u9009 PIP \u6216 CONDA PIP Environment Data \u5982\u679c\u9009\u62e9 PIP\uff1a\u5728\u4e0b\u65b9\u7f16\u8f91\u5668\u4e2d\u8f93\u5165 requirements.txt \u683c\u5f0f\u7684\u4f9d\u8d56\u5305\u5217\u8868\u3002 numpy==1.21.0 \u5982\u679c\u9009\u62e9 CONDA\uff1a\u5728\u4e0b\u65b9\u7f16\u8f91\u5668\u4e2d\u8f93\u5165 environment.yaml \u683c\u5f0f\u7684\u4f9d\u8d56\u5305\u5217\u8868\u3002 \u5176\u4ed6\u9009\u9879 pip \u989d\u5916\u7d22\u5f15\u5730\u5740 \uff1a\u914d\u7f6e pip \u989d\u5916\u7684\u7d22\u5f15\u5730\u5740\uff1b\u9002\u7528\u4e8e\u4f01\u4e1a\u5185\u90e8\u6709\u81ea\u5df1\u7684\u79c1\u6709\u4ed3\u5e93\u6216\u8005 PIP \u52a0\u901f\u7ad9\u70b9\u3002 https://pypi.example.com GPU \u914d\u7f6e \uff1a\u542f\u7528\u6216\u7981\u7528 GPU \u914d\u7f6e\uff1b\u90e8\u5206\u6d89\u53ca\u5230 GPU \u7684\u4f9d\u8d56\u5305\u9700\u8981\u5728\u9884\u52a0\u8f7d\u65f6\u914d\u7f6e GPU \u8d44\u6e90\u3002 \u542f\u7528 \u5173\u8054\u5b58\u50a8 \uff1a\u9009\u62e9\u5173\u8054\u7684\u5b58\u50a8\u914d\u7f6e\uff1b\u73af\u5883\u4f9d\u8d56\u5305\u4f1a\u5b58\u50a8\u5728\u5173\u8054\u5b58\u50a8\u4e2d\u3002\u6ce8\u610f\uff1a\u9700\u8981\u4f7f\u7528\u652f\u6301 ReadWriteMany \u7684\u5b58\u50a8\u3002 my-storage-config

                                                  \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u521b\u5efa \u6309\u94ae\uff0c\u7cfb\u7edf\u4f1a\u81ea\u52a8\u521b\u5efa\u5e76\u914d\u7f6e\u65b0\u7684 Python \u73af\u5883\u3002

                                                  "},{"location":"admin/baize/developer/dataset/environments.html#_5","title":"\u6545\u969c\u6392\u9664","text":"
                                                  • \u5982\u679c\u73af\u5883\u521b\u5efa\u5931\u8d25\uff1a

                                                    • \u68c0\u67e5\u7f51\u7edc\u8fde\u63a5\u662f\u5426\u6b63\u5e38\u3002
                                                    • \u786e\u8ba4\u586b\u5199\u7684 Python \u7248\u672c\u548c\u5305\u7ba1\u7406\u5668\u914d\u7f6e\u65e0\u8bef\u3002
                                                    • \u786e\u4fdd\u6240\u9009\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u53ef\u7528\u3002
                                                  • \u5982\u679c\u4f9d\u8d56\u9884\u70ed\u5931\u8d25\uff1a

                                                    • \u68c0\u67e5 requirements.txt \u6216 environment.yaml \u6587\u4ef6\u683c\u5f0f\u662f\u5426\u6b63\u786e\u3002
                                                    • \u786e\u8ba4\u4f9d\u8d56\u5305\u540d\u79f0\u548c\u7248\u672c\u662f\u5426\u6b63\u786e\u65e0\u8bef\u3002\u5982\u9047\u5230\u5176\u4ed6\u95ee\u9898\uff0c\u8bf7\u8054\u7cfb\u5e73\u53f0\u7ba1\u7406\u5458\u6216\u67e5\u770b\u5e73\u53f0\u5e2e\u52a9\u6587\u6863\u83b7\u53d6\u66f4\u591a\u652f\u6301\u3002

                                                  \u4ee5\u4e0a\u5373\u4e3a\u5728 AI Lab \u4e2d\u7ba1\u7406 Python \u4f9d\u8d56\u5e93\u7684\u57fa\u672c\u64cd\u4f5c\u6b65\u9aa4\u548c\u6ce8\u610f\u4e8b\u9879\u3002

                                                  "},{"location":"admin/baize/developer/inference/models.html","title":"\u4e86\u89e3\u6a21\u578b\u652f\u6301\u60c5\u51b5","text":"

                                                  \u968f\u7740 AI Lab \u7684\u5feb\u901f\u8fed\u4ee3\uff0c\u6211\u4eec\u5df2\u7ecf\u652f\u6301\u4e86\u591a\u79cd\u6a21\u578b\u7684\u63a8\u7406\u670d\u52a1\uff0c\u60a8\u53ef\u4ee5\u5728\u8fd9\u91cc\u770b\u5230\u6240\u652f\u6301\u7684\u6a21\u578b\u4fe1\u606f\u3002

                                                  • AI Lab v0.3.0 \u4e0a\u7ebf\u4e86\u6a21\u578b\u63a8\u7406\u670d\u52a1\uff0c\u9488\u5bf9\u4f20\u7edf\u7684\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\uff0c\u65b9\u4fbf\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528AI Lab \u7684\u63a8\u7406\u670d\u52a1\uff0c\u65e0\u9700\u5173\u5fc3\u6a21\u578b\u7684\u90e8\u7f72\u548c\u7ef4\u62a4\u3002
                                                  • AI Lab v0.6.0 \u652f\u6301\u4e86\u5b8c\u6574\u7248\u672c\u7684 vLLM \u63a8\u7406\u80fd\u529b\uff0c\u652f\u6301\u8bf8\u591a\u5927\u8bed\u8a00\u6a21\u578b\uff0c\u5982 LLama\u3001Qwen\u3001ChatGLM \u7b49\u3002

                                                  \u60a8\u53ef\u4ee5\u5728 AI Lab \u4e2d\u4f7f\u7528\u7ecf\u8fc7\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9a8c\u8bc1\u8fc7\u7684 GPU \u7c7b\u578b\uff1b \u66f4\u591a\u7ec6\u8282\u53c2\u9605 GPU \u652f\u6301\u77e9\u9635\u3002

                                                  "},{"location":"admin/baize/developer/inference/models.html#triton-inference-server","title":"Triton Inference Server","text":"

                                                  \u901a\u8fc7 Triton Inference Server \u53ef\u4ee5\u5f88\u597d\u7684\u652f\u6301\u4f20\u7edf\u7684\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\uff0c\u6211\u4eec\u76ee\u524d\u652f\u6301\u4e3b\u6d41\u7684\u63a8\u7406\u540e\u7aef\u670d\u52a1\uff1a

                                                  Backend \u652f\u6301\u6a21\u578b\u683c\u5f0f \u4ecb\u7ecd pytorch TorchScript\u3001PyTorch 2.0 \u683c\u5f0f\u7684\u6a21\u578b triton-inference-server/pytorch_backend tensorflow TensorFlow 2.x triton-inference-server/tensorflow_backend vLLM(Deprecated) \u4e0e vLLM \u4e00\u81f4 \u652f\u6301\u7684\u6a21\u578b\u548c vLLM support Model \u4e00\u81f4

                                                  Danger

                                                  \u4f7f\u7528 Triton \u7684 Backend vLLM \u7684\u65b9\u5f0f\u5df2\u88ab\u5f03\u7528\uff0c\u63a8\u8350\u4f7f\u7528\u6700\u65b0\u652f\u6301 vLLM \u6765\u90e8\u7f72\u60a8\u7684\u5927\u8bed\u8a00\u6a21\u578b\u3002

                                                  "},{"location":"admin/baize/developer/inference/models.html#vllm","title":"vLLM","text":"

                                                  \u901a\u8fc7 vLLM \u6211\u4eec\u53ef\u4ee5\u5f88\u5feb\u7684\u4f7f\u7528\u5927\u8bed\u8a00\u6a21\u578b\uff0c\u60a8\u53ef\u4ee5\u5728\u8fd9\u91cc\u770b\u5230\u6211\u4eec\u652f\u6301\u7684\u6a21\u578b\u5217\u8868\uff0c\u8fd9\u901a\u5e38\u548c vLLM Support Models \u4fdd\u6301\u4e00\u81f4\u3002

                                                  • HuggingFace \u6a21\u578b\uff1a\u6211\u4eec\u652f\u6301\u4e86 HuggingFace \u7684\u5927\u90e8\u5206\u6a21\u578b\uff0c\u60a8\u53ef\u4ee5\u5728 HuggingFace Model Hub \u67e5\u770b\u66f4\u591a\u6a21\u578b\u3002
                                                  • vLLM \u652f\u6301\u6a21\u578b\u5217\u51fa\u4e86\u652f\u6301\u7684\u5927\u8bed\u8a00\u6a21\u578b\u548c\u89c6\u89c9\u8bed\u8a00\u6a21\u578b\u3002
                                                  • \u4f7f\u7528 vLLM \u652f\u6301\u6846\u67b6\u7684\u6a21\u578b\u8fdb\u884c\u5fae\u8c03\u540e\u7684\u6a21\u578b\u3002
                                                  "},{"location":"admin/baize/developer/inference/models.html#vllm_1","title":"vLLM \u65b0\u7279\u6027","text":"

                                                  \u76ee\u524d\uff0cAI Lab \u8fd8\u652f\u6301\u5728\u4f7f\u7528 vLLM \u4f5c\u4e3a\u63a8\u7406\u5de5\u5177\u65f6\u7684\u4e00\u4e9b\u65b0\u7279\u6027\uff1a

                                                  • \u5728\u63a8\u7406\u6a21\u578b\u65f6\uff0c\u542f\u7528 Lora Adapter \u6765\u4f18\u5316\u6a21\u578b\u63a8\u7406\u670d\u52a1
                                                  • \u63d0\u4f9b\u517c\u5bb9 OpenAI \u7684 OpenAPI \u63a5\u53e3\uff0c\u65b9\u4fbf\u7528\u6237\u5207\u6362\u5230\u672c\u5730\u63a8\u7406\u670d\u52a1\u65f6\uff0c\u53ef\u4ee5\u4f4e\u6210\u672c\u7684\u5feb\u901f\u5207\u6362
                                                  "},{"location":"admin/baize/developer/inference/models.html#_2","title":"\u4e0b\u4e00\u6b65","text":"
                                                  • \u521b\u5efa Triton \u63a8\u7406\u670d\u52a1
                                                  • \u521b\u5efa vLLM \u63a8\u7406\u670d\u52a1
                                                  "},{"location":"admin/baize/developer/inference/triton-inference.html","title":"\u521b\u5efa Triton \u63a8\u7406\u670d\u52a1","text":"

                                                  AI Lab \u76ee\u524d\u63d0\u4f9b\u4ee5 Triton\u3001vLLM \u4f5c\u4e3a\u63a8\u7406\u6846\u67b6\uff0c\u7528\u6237\u53ea\u9700\u7b80\u5355\u914d\u7f6e\u5373\u53ef\u5feb\u901f\u542f\u52a8\u4e00\u4e2a\u9ad8\u6027\u80fd\u7684\u63a8\u7406\u670d\u52a1\u3002

                                                  Danger

                                                  \u4f7f\u7528 Triton \u7684 Backend vLLM \u7684\u65b9\u5f0f\u5df2\u88ab\u5f03\u7528\uff0c\u63a8\u8350\u4f7f\u7528\u6700\u65b0\u652f\u6301 vLLM \u6765\u90e8\u7f72\u60a8\u7684\u5927\u8bed\u8a00\u6a21\u578b\u3002

                                                  "},{"location":"admin/baize/developer/inference/triton-inference.html#triton_1","title":"Triton\u4ecb\u7ecd","text":"

                                                  Triton \u662f\u7531 NVIDIA \u5f00\u53d1\u7684\u4e00\u4e2a\u5f00\u6e90\u63a8\u7406\u670d\u52a1\u5668\uff0c\u65e8\u5728\u7b80\u5316\u673a\u5668\u5b66\u4e60\u6a21\u578b\u7684\u90e8\u7f72\u548c\u63a8\u7406\u670d\u52a1\u3002\u5b83\u652f\u6301\u591a\u79cd\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\uff0c\u5305\u62ec TensorFlow\u3001PyTorch \u7b49\uff0c\u4f7f\u5f97\u7528\u6237\u80fd\u591f\u8f7b\u677e\u7ba1\u7406\u548c\u90e8\u7f72\u4e0d\u540c\u7c7b\u578b\u7684\u6a21\u578b\u3002

                                                  "},{"location":"admin/baize/developer/inference/triton-inference.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                  \u51c6\u5907\u6a21\u578b\u6570\u636e\uff1a\u5728\u6570\u636e\u96c6\u7ba1\u7406\u4e2d\u7eb3\u7ba1\u6a21\u578b\u4ee3\u7801\uff0c\u5e76\u4fdd\u8bc1\u6570\u636e\u6210\u529f\u9884\u52a0\u8f7d\uff0c\u4e0b\u9762\u4ee5 mnist \u624b\u5199\u6570\u5b57\u8bc6\u522b\u7684 PyTorch \u6a21\u578b\u4e3a\u4f8b\u3002

                                                  Note

                                                  \u5f85\u63a8\u7406\u7684\u6a21\u578b\u5728\u6570\u636e\u96c6\u4e2d\u9700\u8981\u9075\u4ee5\u4e0b\u76ee\u5f55\u683c\u5f0f\uff1a

                                                    <model-repository-name>\n  \u2514\u2500\u2500 <model-name>\n     \u2514\u2500\u2500 <version>\n        \u2514\u2500\u2500 <model-definition-file>\n

                                                  \u672c\u4f8b\u4e2d\u7684\u76ee\u5f55\u683c\u5f0f\u4e3a\uff1a

                                                      model-repo\n    \u2514\u2500\u2500 mnist-cnn\n        \u2514\u2500\u2500 1\n            \u2514\u2500\u2500 model.pt\n
                                                  "},{"location":"admin/baize/developer/inference/triton-inference.html#_2","title":"\u521b\u5efa\u63a8\u7406\u670d\u52a1","text":"

                                                  \u76ee\u524d\u5df2\u7ecf\u652f\u6301\u8868\u5355\u521b\u5efa\uff0c\u53ef\u4ee5\u754c\u9762\u5b57\u6bb5\u63d0\u793a\uff0c\u8fdb\u884c\u670d\u52a1\u521b\u5efa\u3002

                                                  "},{"location":"admin/baize/developer/inference/triton-inference.html#_3","title":"\u914d\u7f6e\u6a21\u578b\u8def\u5f84","text":"

                                                  \u6a21\u578b\u8def\u5f84 model-repo/mnist-cnn/1/model.pt \u9700\u8981\u548c\u6570\u636e\u96c6\u4e2d\u7684\u6a21\u578b\u76ee\u5f55\u683c\u5f0f\u4e00\u81f4\u3002

                                                  "},{"location":"admin/baize/developer/inference/triton-inference.html#_4","title":"\u6a21\u578b\u914d\u7f6e","text":""},{"location":"admin/baize/developer/inference/triton-inference.html#_5","title":"\u914d\u7f6e\u8f93\u5165\u548c\u8f93\u51fa\u53c2\u6570","text":"

                                                  Note

                                                  \u8f93\u5165\u548c\u8f93\u51fa\u53c2\u6570\u7684\u7b2c\u4e00\u4e2a\u7ef4\u5ea6\u9ed8\u8ba4\u4e3a batchsize \u7684\u5927\u5c0f\uff0c\u8bbe\u7f6e\u4e3a -1 \u53ef\u4ee5\u6839\u636e\u8f93\u5165\u7684\u63a8\u7406\u6570\u636e\u81ea\u52a8\u8ba1\u7b97 batchsize\u3002\u53c2\u6570\u5176\u4f59\u7ef4\u5ea6\u548c\u6570\u636e\u7c7b\u578b\u9700\u8981\u4e0e\u6a21\u578b\u8f93\u5165\u5339\u914d\u3002

                                                  "},{"location":"admin/baize/developer/inference/triton-inference.html#_6","title":"\u914d\u7f6e\u73af\u5883","text":"

                                                  \u53ef\u4ee5\u5bfc\u5165 \u73af\u5883\u7ba1\u7406 \u4e2d\u521b\u5efa\u7684\u73af\u5883\u4f5c\u4e3a\u63a8\u7406\u65f6\u7684\u8fd0\u884c\u73af\u5883\u3002

                                                  "},{"location":"admin/baize/developer/inference/triton-inference.html#_7","title":"\u9ad8\u7ea7\u914d\u7f6e","text":""},{"location":"admin/baize/developer/inference/triton-inference.html#_8","title":"\u914d\u7f6e\u8ba4\u8bc1\u7b56\u7565","text":"

                                                  \u652f\u6301 API key \u7684\u8bf7\u6c42\u65b9\u5f0f\u8ba4\u8bc1\uff0c\u7528\u6237\u53ef\u4ee5\u81ea\u5b9a\u4e49\u589e\u52a0\u8ba4\u8bc1\u53c2\u6570\u3002

                                                  "},{"location":"admin/baize/developer/inference/triton-inference.html#_9","title":"\u4eb2\u548c\u6027\u8c03\u5ea6","text":"

                                                  \u652f\u6301 \u6839\u636e GPU \u8d44\u6e90\u7b49\u8282\u70b9\u914d\u7f6e\u5b9e\u73b0\u81ea\u52a8\u5316\u7684\u4eb2\u548c\u6027\u8c03\u5ea6\uff0c\u540c\u65f6\u4e5f\u65b9\u4fbf\u7528\u6237\u81ea\u5b9a\u4e49\u8c03\u5ea6\u7b56\u7565\u3002

                                                  "},{"location":"admin/baize/developer/inference/triton-inference.html#_10","title":"\u8bbf\u95ee","text":""},{"location":"admin/baize/developer/inference/triton-inference.html#api","title":"API \u8bbf\u95ee","text":"
                                                  • Triton \u63d0\u4f9b\u4e86\u4e00\u4e2a\u57fa\u4e8e REST \u7684 API\uff0c\u5141\u8bb8\u5ba2\u6237\u7aef\u901a\u8fc7 HTTP POST \u8bf7\u6c42\u8fdb\u884c\u6a21\u578b\u63a8\u7406\u3002
                                                  • \u5ba2\u6237\u7aef\u53ef\u4ee5\u53d1\u9001 JSON \u683c\u5f0f\u7684\u8bf7\u6c42\u4f53\uff0c\u5176\u4e2d\u5305\u542b\u8f93\u5165\u6570\u636e\u548c\u76f8\u5173\u7684\u5143\u6570\u636e\u3002
                                                  "},{"location":"admin/baize/developer/inference/triton-inference.html#http","title":"HTTP \u8bbf\u95ee","text":"
                                                  1. \u53d1\u9001 HTTP POST \u8bf7\u6c42\uff1a\u4f7f\u7528\u5de5\u5177\u5982 curl \u6216 HTTP \u5ba2\u6237\u7aef\u5e93\uff08\u5982 Python \u7684 requests \u5e93\uff09\u5411 Triton Server \u53d1\u9001 POST \u8bf7\u6c42\u3002

                                                  2. \u8bbe\u7f6e HTTP \u5934\uff1a\u6839\u636e\u7528\u6237\u914d\u7f6e\u9879\u81ea\u52a8\u751f\u6210\u7684\u914d\u7f6e\uff0c\u5305\u542b\u6a21\u578b\u8f93\u5165\u548c\u8f93\u51fa\u7684\u5143\u6570\u636e\u3002

                                                  3. \u6784\u5efa\u8bf7\u6c42\u4f53\uff1a\u8bf7\u6c42\u4f53\u901a\u5e38\u5305\u542b\u8981\u8fdb\u884c\u63a8\u7406\u7684\u8f93\u5165\u6570\u636e\uff0c\u4ee5\u53ca\u6a21\u578b\u7279\u5b9a\u7684\u5143\u6570\u636e\u3002

                                                  "},{"location":"admin/baize/developer/inference/triton-inference.html#curl","title":"\u793a\u4f8b curl \u547d\u4ee4","text":"
                                                    curl -X POST \"http://<ip>:<port>/v2/models/<inference-name>/infer\" \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\n    \"inputs\": [\n      {\n        \"name\": \"model_input\",            \n        \"shape\": [1, 1, 32, 32],          \n        \"datatype\": \"FP32\",               \n        \"data\": [\n          [0.1234, 0.5678, 0.9101, ... ]  \n        ]\n      }\n    ]\n  }'\n
                                                  • <ip> \u662f Triton Inference Server \u8fd0\u884c\u7684\u4e3b\u673a\u5730\u5740\u3002
                                                  • <port> \u662f Triton Inference Server \u8fd0\u884c\u7684\u4e3b\u673a\u7aef\u53e3\u53f7\u3002
                                                  • <inference-name> \u662f\u6240\u521b\u5efa\u7684\u63a8\u7406\u670d\u52a1\u7684\u540d\u79f0\u3002
                                                  • \"name\" \u8981\u4e0e\u6a21\u578b\u914d\u7f6e\u4e2d\u7684\u8f93\u5165\u53c2\u6570\u7684 name \u4e00\u81f4\u3002
                                                  • \"shape\" \u8981\u4e0e\u6a21\u578b\u914d\u7f6e\u4e2d\u7684\u8f93\u5165\u53c2\u6570\u7684 dims \u4e00\u81f4\u3002
                                                  • \"datatype\" \u8981\u4e0e\u6a21\u578b\u914d\u7f6e\u4e2d\u7684\u8f93\u5165\u53c2\u6570\u7684 Data Type \u4e00\u81f4\u3002
                                                  • \"data\" \u66ff\u6362\u4e3a\u5b9e\u9645\u7684\u63a8\u7406\u6570\u636e\u3002

                                                  \u8bf7\u6ce8\u610f\uff0c\u4e0a\u8ff0\u793a\u4f8b\u4ee3\u7801\u9700\u8981\u6839\u636e\u4f60\u7684\u5177\u4f53\u6a21\u578b\u548c\u73af\u5883\u8fdb\u884c\u8c03\u6574\uff0c\u8f93\u5165\u6570\u636e\u7684\u683c\u5f0f\u548c\u5185\u5bb9\u4e5f\u9700\u8981\u7b26\u5408\u6a21\u578b\u7684\u8981\u6c42\u3002

                                                  "},{"location":"admin/baize/developer/inference/vllm-inference.html","title":"\u521b\u5efa vLLM \u63a8\u7406\u670d\u52a1","text":"

                                                  AI Lab \u652f\u6301\u4ee5 vLLM \u4f5c\u4e3a\u63a8\u7406\u670d\u52a1\uff0c\u63d0\u4f9b\u5168\u90e8 vLLM \u7684\u80fd\u529b\uff0c\u540c\u65f6\u63d0\u4f9b\u4e86\u5b8c\u5168\u9002\u914d OpenAI \u63a5\u53e3\u5b9a\u4e49\u3002

                                                  "},{"location":"admin/baize/developer/inference/vllm-inference.html#vllm_1","title":"vLLM \u4ecb\u7ecd","text":"

                                                  vLLM \u662f\u4e00\u4e2a\u5feb\u901f\u4e14\u6613\u4e8e\u4f7f\u7528\u7684\u7528\u4e8e\u63a8\u7406\u548c\u670d\u52a1\u7684\u5e93\uff0cvLLM \u65e8\u5728\u6781\u5927\u5730\u63d0\u5347\u5b9e\u65f6\u573a\u666f\u4e0b\u7684\u8bed\u8a00\u6a21\u578b\u670d\u52a1\u7684\u541e\u5410\u4e0e\u5185\u5b58\u4f7f\u7528\u6548\u7387\u3002vLLM \u5728\u901f\u5ea6\u3001\u7075\u6d3b\u6027\u65b9\u9762\u5177\u6709\u4ee5\u4e0b\u90e8\u5206\u7279\u70b9\uff1a

                                                  • \u8fde\u7eed\u6279\u5904\u7406\u4f20\u5165\u8bf7\u6c42\uff1b
                                                  • \u4f7f\u7528 PagedAttention \u9ad8\u6548\u7ba1\u7406\u6ce8\u610f\u529b\u952e\u548c\u503c\u5185\u5b58\uff1b
                                                  • \u4e0e\u6d41\u884c\u7684 HuggingFace \u578b\u53f7\u65e0\u7f1d\u96c6\u6210\uff1b
                                                  • \u517c\u5bb9 OpenAI \u7684 API \u670d\u52a1\u5668\u3002
                                                  "},{"location":"admin/baize/developer/inference/vllm-inference.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                  \u51c6\u5907\u6a21\u578b\u6570\u636e\uff1a\u5728\u6570\u636e\u96c6\u7ba1\u7406\u4e2d\u7eb3\u7ba1\u6a21\u578b\u4ee3\u7801\uff0c\u5e76\u4fdd\u8bc1\u6570\u636e\u6210\u529f\u9884\u52a0\u8f7d\u3002

                                                  "},{"location":"admin/baize/developer/inference/vllm-inference.html#_2","title":"\u521b\u5efa\u63a8\u7406\u670d\u52a1","text":"
                                                  1. \u9009\u62e9 vLLM \u63a8\u7406\u6846\u67b6\uff0c\u5e76\u5728\u9009\u62e9\u6a21\u578b\u6a21\u5757\u9009\u62e9\u63d0\u524d\u521b\u5efa\u597d\u7684\u6a21\u578b\u6570\u636e\u96c6 hdd-models \u5e76\u586b\u5199\u6570\u636e\u96c6\u4e2d\u6a21\u578b\u6240\u5728\u7684\u8def\u5f84\u4fe1\u606f\u3002

                                                    \u672c\u6587\u63a8\u7406\u670d\u52a1\u7684\u521b\u5efa\u4f7f\u7528 ChatGLM3 \u6a21\u578b\u3002

                                                  2. \u914d\u7f6e\u63a8\u7406\u670d\u52a1\u7684\u8d44\u6e90\uff0c\u5e76\u8c03\u6574\u63a8\u7406\u670d\u52a1\u8fd0\u884c\u7684\u53c2\u6570\u3002

                                                    \u53c2\u6570\u540d \u63cf\u8ff0 GPU \u8d44\u6e90 \u6839\u636e\u6a21\u578b\u89c4\u6a21\u4ee5\u53ca\u96c6\u7fa4\u8d44\u6e90\u53ef\u4ee5\u4e3a\u63a8\u7406\u914d\u7f6e GPU \u8d44\u6e90\u3002 \u5141\u8bb8\u8fdc\u7a0b\u4ee3\u7801 \u63a7\u5236 vLLM \u662f\u5426\u4fe1\u4efb\u5e76\u6267\u884c\u6765\u81ea\u8fdc\u7a0b\u6e90\u7684\u4ee3\u7801 LoRA LoRA \u662f\u4e00\u79cd\u9488\u5bf9\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u7684\u53c2\u6570\u9ad8\u6548\u8c03\u6574\u6280\u672f\u3002\u5b83\u901a\u8fc7\u5c06\u539f\u59cb\u6a21\u578b\u53c2\u6570\u77e9\u9635\u5206\u89e3\u4e3a\u4f4e\u79e9\u77e9\u9635\uff0c\u4ece\u800c\u51cf\u5c11\u53c2\u6570\u6570\u91cf\u548c\u8ba1\u7b97\u590d\u6742\u5ea6\u3002 1. --lora-modules\uff1a\u7528\u6765\u6307\u5b9a\u7279\u5b9a\u6a21\u5757\u6216\u5c42\u8fdb\u884c\u4f4e\u79e9\u8fd1\u4f3c 2. max_loras_rank\uff1a\u7528\u6765\u6307\u5b9a LoRA \u6a21\u578b\u4e2d\u6bcf\u4e2a\u9002\u914d\u5c42\u7684\u6700\u5927\u79e9\uff0c\u5bf9\u4e8e\u7b80\u5355\u7684\u4efb\u52a1\uff0c\u53ef\u4ee5\u9009\u62e9\u8f83\u5c0f\u7684\u79e9\u503c\uff0c\u800c\u5bf9\u4e8e\u590d\u6742\u4efb\u52a1\uff0c\u53ef\u80fd\u9700\u8981\u8f83\u5927\u7684\u79e9\u503c\u6765\u4fdd\u8bc1\u6a21\u578b\u6027\u80fd\u3002 3. max_loras\uff1a\u8868\u793a\u6a21\u578b\u4e2d\u53ef\u4ee5\u5305\u542b\u7684 LoRA \u5c42\u7684\u6700\u5927\u6570\u91cf\uff0c\u6839\u636e\u6a21\u578b\u5927\u5c0f\u3001\u63a8\u7406\u590d\u6742\u5ea6\u7b49\u56e0\u7d20\u81ea\u5b9a 4. max_cpu_loras\uff1a\u7528\u4e8e\u6307\u5b9a\u5728 CPU \u73af\u5883\u4e2d\u53ef\u4ee5\u5904\u7406\u7684 LoRA \u5c42\u7684\u6700\u5927\u6570\u3002 \u5173\u8054\u73af\u5883 \u901a\u8fc7\u9009\u62e9\u73af\u5883\u9884\u5b9a\u4e49\u63a8\u7406\u65f6\u6240\u9700\u7684\u73af\u5883\u4f9d\u8d56\u3002

                                                    Info

                                                    \u652f\u6301\u914d\u7f6e LoRA \u53c2\u6570\u7684\u6a21\u578b\u53ef\u53c2\u8003 vLLM \u652f\u6301\u7684\u6a21\u578b\u3002

                                                  3. \u5728 \u9ad8\u7ea7\u914d\u7f6e \u4e2d\uff0c\u652f\u6301\u6839\u636e GPU \u8d44\u6e90\u7b49\u8282\u70b9\u914d\u7f6e\u5b9e\u73b0\u81ea\u52a8\u5316\u7684\u4eb2\u548c\u6027\u8c03\u5ea6\uff0c\u540c\u65f6\u4e5f\u65b9\u4fbf\u7528\u6237\u81ea\u5b9a\u4e49\u8c03\u5ea6\u7b56\u7565\u3002

                                                  "},{"location":"admin/baize/developer/inference/vllm-inference.html#_3","title":"\u9a8c\u8bc1\u63a8\u7406\u670d\u52a1","text":"

                                                  \u63a8\u7406\u670d\u52a1\u521b\u5efa\u5b8c\u6210\u4e4b\u540e\uff0c\u70b9\u51fb\u63a8\u7406\u670d\u52a1\u540d\u79f0\u8fdb\u5165\u8be6\u60c5\uff0c\u67e5\u770b API \u8c03\u7528\u65b9\u6cd5\u3002\u901a\u8fc7\u4f7f\u7528 Curl\u3001Python\u3001Nodejs \u7b49\u65b9\u5f0f\u9a8c\u8bc1\u6267\u884c\u7ed3\u679c\u3002

                                                  \u62f7\u8d1d\u8be6\u60c5\u4e2d\u7684 curl \u547d\u4ee4\uff0c\u5e76\u5728\u7ec8\u7aef\u4e2d\u6267\u884c\u547d\u4ee4\u53d1\u9001\u4e00\u6761\u6a21\u578b\u63a8\u7406\u8bf7\u6c42\uff0c\u9884\u671f\u8f93\u51fa\uff1a

                                                  "},{"location":"admin/baize/developer/jobs/create.html","title":"\u521b\u5efa\u4efb\u52a1\uff08Job\uff09","text":"

                                                  \u4efb\u52a1\u7ba1\u7406\u662f\u6307\u901a\u8fc7\u4f5c\u4e1a\u8c03\u5ea6\u548c\u7ba1\u63a7\u7ec4\u4ef6\u6765\u521b\u5efa\u548c\u7ba1\u7406\u4efb\u52a1\u751f\u547d\u5468\u671f\u7684\u529f\u80fd\u3002

                                                  AI Lab \u91c7\u7528 Kubernetes \u7684 Job \u673a\u5236\u6765\u8c03\u5ea6\u5404\u9879 AI \u63a8\u7406\u3001\u8bad\u7ec3\u4efb\u52a1\u3002

                                                  "},{"location":"admin/baize/developer/jobs/create.html#_1","title":"\u901a\u7528\u6b65\u9aa4","text":"
                                                  1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u4efb\u52a1\u4e2d\u5fc3 -> \u8bad\u7ec3\u4efb\u52a1 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae\u3002

                                                  2. \u7cfb\u7edf\u4f1a\u9884\u5148\u586b\u5145\u57fa\u7840\u914d\u7f6e\u6570\u636e\uff0c\u5305\u62ec\u8981\u90e8\u7f72\u7684\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u4efb\u52a1\u7c7b\u578b\u3001\u961f\u5217\u3001\u4f18\u5148\u7ea7\u7b49\u3002 \u8c03\u6574\u8fd9\u4e9b\u53c2\u6570\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                  3. \u914d\u7f6e\u955c\u50cf\u5730\u5740\u3001\u8fd0\u884c\u53c2\u6570\u4ee5\u53ca\u5173\u8054\u7684\u6570\u636e\u96c6\u3001\u73af\u5883\u548c\u8d44\u6e90\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                  4. \u6309\u9700\u6dfb\u52a0\u6807\u7b7e\u3001\u6ce8\u89e3\u3001\u73af\u5883\u53d8\u91cf\u7b49\u4efb\u52a1\u53c2\u6570\uff0c\u9009\u62e9\u8c03\u5ea6\u7b56\u7565\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                  5. \u4efb\u52a1\u521b\u5efa\u6210\u529f\u540e\uff0c\u4f1a\u6709\u51e0\u79cd\u8fd0\u884c\u72b6\u6001\uff1a

                                                    • \u8fd0\u884c\u4e2d
                                                    • \u6392\u961f\u4e2d
                                                    • \u63d0\u4ea4\u6210\u529f\u3001\u63d0\u4ea4\u5931\u8d25
                                                    • \u4efb\u52a1\u6210\u529f\u3001\u4efb\u52a1\u5931\u8d25
                                                  "},{"location":"admin/baize/developer/jobs/create.html#_2","title":"\u521b\u5efa\u7279\u5b9a\u4efb\u52a1","text":"
                                                  • \u521b\u5efa Pytorch \u4efb\u52a1
                                                  • \u521b\u5efa TensorFlow \u4efb\u52a1
                                                  • \u521b\u5efa MPI \u4efb\u52a1
                                                  • \u521b\u5efa MXNet \u4efb\u52a1
                                                  • \u521b\u5efa PaddlePaddle \u4efb\u52a1
                                                  "},{"location":"admin/baize/developer/jobs/delete.html","title":"\u5220\u9664\u4efb\u52a1\uff08Job\uff09","text":"

                                                  \u5982\u679c\u53d1\u73b0\u4efb\u52a1\u5197\u4f59\u3001\u8fc7\u671f\u6216\u56e0\u5176\u4ed6\u7f18\u6545\u4e0d\u518d\u9700\u8981\uff0c\u53ef\u4ee5\u4ece\u8bad\u7ec3\u4efb\u52a1\u5217\u8868\u4e2d\u5220\u9664\u3002

                                                  1. \u5728\u8bad\u7ec3\u4efb\u52a1\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u5220\u9664 \u3002

                                                  2. \u5728\u5f39\u7a97\u4e2d\u786e\u8ba4\u8981\u5220\u9664\u7684\u4efb\u52a1\uff0c\u8f93\u5165\u4efb\u52a1\u540d\u79f0\u540e\u70b9\u51fb \u5220\u9664 \u3002

                                                  3. \u5c4f\u5e55\u63d0\u793a\u5220\u9664\u6210\u529f\uff0c\u8be5\u4efb\u52a1\u4ece\u5217\u8868\u4e2d\u6d88\u5931\u3002

                                                  Caution

                                                  \u4efb\u52a1\u4e00\u65e6\u5220\u9664\u5c06\u4e0d\u53ef\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                                                  "},{"location":"admin/baize/developer/jobs/mpi.html","title":"MPI \u4efb\u52a1","text":"

                                                  MPI\uff08Message Passing Interface\uff09\u662f\u4e00\u79cd\u7528\u4e8e\u5e76\u884c\u8ba1\u7b97\u7684\u901a\u4fe1\u534f\u8bae\uff0c\u5b83\u5141\u8bb8\u591a\u4e2a\u8ba1\u7b97\u8282\u70b9\u4e4b\u95f4\u8fdb\u884c\u6d88\u606f\u4f20\u9012\u548c\u534f\u4f5c\u3002 MPI \u4efb\u52a1\u662f\u4f7f\u7528 MPI \u534f\u8bae\u8fdb\u884c\u5e76\u884c\u8ba1\u7b97\u7684\u4efb\u52a1\uff0c\u9002\u7528\u4e8e\u9700\u8981\u5927\u89c4\u6a21\u5e76\u884c\u5904\u7406\u7684\u5e94\u7528\u573a\u666f\uff0c\u4f8b\u5982\u5206\u5e03\u5f0f\u8bad\u7ec3\u3001\u79d1\u5b66\u8ba1\u7b97\u7b49\u3002

                                                  \u5728 AI Lab \u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86 MPI \u4efb\u52a1\u7684\u652f\u6301\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c\u5feb\u901f\u521b\u5efa MPI \u4efb\u52a1\uff0c\u8fdb\u884c\u9ad8\u6027\u80fd\u7684\u5e76\u884c\u8ba1\u7b97\u3002 \u672c\u6559\u7a0b\u5c06\u6307\u5bfc\u60a8\u5982\u4f55\u5728 AI Lab \u4e2d\u521b\u5efa\u548c\u8fd0\u884c\u4e00\u4e2a MPI \u4efb\u52a1\u3002

                                                  "},{"location":"admin/baize/developer/jobs/mpi.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
                                                  • \u4efb\u52a1\u7c7b\u578b \uff1aMPI\uff0c\u7528\u4e8e\u8fd0\u884c\u5e76\u884c\u8ba1\u7b97\u4efb\u52a1\u3002
                                                  • \u8fd0\u884c\u73af\u5883 \uff1a\u9009\u7528\u9884\u88c5\u4e86 MPI \u73af\u5883\u7684\u955c\u50cf\uff0c\u6216\u8005\u5728\u4efb\u52a1\u4e2d\u6307\u5b9a\u5b89\u88c5\u5fc5\u8981\u7684\u4f9d\u8d56\u3002
                                                  • MPIJob \u914d\u7f6e \uff1a\u7406\u89e3\u5e76\u914d\u7f6e MPIJob \u7684\u5404\u9879\u53c2\u6570\uff0c\u5982\u526f\u672c\u6570\u3001\u8d44\u6e90\u8bf7\u6c42\u7b49\u3002
                                                  "},{"location":"admin/baize/developer/jobs/mpi.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

                                                  \u5728\u8fd9\u91cc\u6211\u4eec\u4f7f\u7528 baize-notebook \u57fa\u7840\u955c\u50cf\u548c \u5173\u8054\u73af\u5883 \u7684\u65b9\u5f0f\u6765\u4f5c\u4e3a\u4efb\u52a1\u7684\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002 \u786e\u4fdd\u8fd0\u884c\u73af\u5883\u4e2d\u5305\u542b MPI \u53ca\u76f8\u5173\u5e93\uff0c\u5982 OpenMPI\u3001mpi4py \u7b49\u3002

                                                  \u6ce8\u610f \uff1a\u4e86\u89e3\u5982\u4f55\u521b\u5efa\u73af\u5883\uff0c\u8bf7\u53c2\u8003\u73af\u5883\u5217\u8868\u3002

                                                  "},{"location":"admin/baize/developer/jobs/mpi.html#mpi_1","title":"\u521b\u5efa MPI \u4efb\u52a1","text":""},{"location":"admin/baize/developer/jobs/mpi.html#mpi_2","title":"MPI \u4efb\u52a1\u521b\u5efa\u6b65\u9aa4","text":"
                                                  1. \u767b\u5f55\u5e73\u53f0 \uff1a\u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3\uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
                                                  2. \u521b\u5efa\u4efb\u52a1 \uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                                                  3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b \uff1a\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\uff0c\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a MPI\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
                                                  4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f \uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cbenchmarks-mpi\u201d\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
                                                  5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570 \uff1a\u6839\u636e\u60a8\u7684\u9700\u6c42\uff0c\u914d\u7f6e\u4efb\u52a1\u7684\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u4fe1\u606f\u3002
                                                  "},{"location":"admin/baize/developer/jobs/mpi.html#_3","title":"\u8fd0\u884c\u53c2\u6570","text":"
                                                  • \u542f\u52a8\u547d\u4ee4 \uff1a\u4f7f\u7528 mpirun\uff0c\u8fd9\u662f\u8fd0\u884c MPI \u7a0b\u5e8f\u7684\u547d\u4ee4\u3002
                                                  • \u547d\u4ee4\u53c2\u6570 \uff1a\u8f93\u5165\u60a8\u8981\u8fd0\u884c\u7684 MPI \u7a0b\u5e8f\u7684\u53c2\u6570\u3002

                                                  \u793a\u4f8b\uff1a\u8fd0\u884c TensorFlow Benchmarks

                                                  \u5728\u672c\u793a\u4f8b\u4e2d\uff0c\u6211\u4eec\u5c06\u8fd0\u884c\u4e00\u4e2a TensorFlow \u7684\u57fa\u51c6\u6d4b\u8bd5\u7a0b\u5e8f\uff0c\u4f7f\u7528 Horovod \u8fdb\u884c\u5206\u5e03\u5f0f\u8bad\u7ec3\u3002 \u9996\u5148\uff0c\u786e\u4fdd\u60a8\u4f7f\u7528\u7684\u955c\u50cf\u4e2d\u5305\u542b\u6240\u9700\u7684\u4f9d\u8d56\u9879\uff0c\u4f8b\u5982 TensorFlow\u3001Horovod\u3001Open MPI \u7b49\u3002

                                                  \u955c\u50cf\u9009\u62e9 \uff1a\u4f7f\u7528\u5305\u542b TensorFlow \u548c MPI \u7684\u955c\u50cf\uff0c\u4f8b\u5982 mai.daocloud.io/docker.io/mpioperator/tensorflow-benchmarks:latest\u3002

                                                  \u547d\u4ee4\u53c2\u6570 \uff1a

                                                  mpirun --allow-run-as-root -np 2 -bind-to none -map-by slot \\\n  -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH -x PATH \\\n  -mca pml ob1 -mca btl ^openib \\\n  python scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py \\\n  --model=resnet101 --batch_size=64 --variable_update=horovod\n

                                                  \u8bf4\u660e \uff1a

                                                  • mpirun\uff1aMPI \u7684\u542f\u52a8\u547d\u4ee4\u3002
                                                  • --allow-run-as-root\uff1a\u5141\u8bb8\u4ee5 root \u7528\u6237\u8fd0\u884c\uff08\u5728\u5bb9\u5668\u4e2d\u901a\u5e38\u662f root \u7528\u6237\uff09\u3002
                                                  • -np 2\uff1a\u6307\u5b9a\u8fd0\u884c\u7684\u8fdb\u7a0b\u6570\u4e3a 2\u3002
                                                  • -bind-to none\uff0c-map-by slot\uff1aMPI \u8fdb\u7a0b\u7ed1\u5b9a\u548c\u6620\u5c04\u7684\u914d\u7f6e\u3002
                                                  • -x NCCL_DEBUG=INFO\uff1a\u8bbe\u7f6e NCCL\uff08NVIDIA Collective Communication Library\uff09\u7684\u8c03\u8bd5\u4fe1\u606f\u7ea7\u522b\u3002
                                                  • -x LD_LIBRARY_PATH\uff0c-x PATH\uff1a\u5728 MPI \u73af\u5883\u4e2d\u4f20\u9012\u5fc5\u8981\u7684\u73af\u5883\u53d8\u91cf\u3002
                                                  • -mca pml ob1 -mca btl ^openib\uff1aMPI \u7684\u914d\u7f6e\u53c2\u6570\uff0c\u6307\u5b9a\u4f20\u8f93\u5c42\u548c\u6d88\u606f\u5c42\u534f\u8bae\u3002
                                                  • python scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py\uff1a\u8fd0\u884c TensorFlow \u57fa\u51c6\u6d4b\u8bd5\u811a\u672c\u3002
                                                  • --model=resnet101\uff0c--batch_size=64\uff0c--variable_update=horovod\uff1aTensorFlow \u811a\u672c\u7684\u53c2\u6570\uff0c\u6307\u5b9a\u6a21\u578b\u3001\u6279\u91cf\u5927\u5c0f\u548c\u4f7f\u7528 Horovod \u8fdb\u884c\u53c2\u6570\u66f4\u65b0\u3002
                                                  "},{"location":"admin/baize/developer/jobs/mpi.html#_4","title":"\u8d44\u6e90\u914d\u7f6e","text":"

                                                  \u5728\u4efb\u52a1\u914d\u7f6e\u4e2d\uff0c\u9700\u8981\u4e3a\u6bcf\u4e2a\u8282\u70b9\uff08Launcher \u548c Worker\uff09\u5206\u914d\u9002\u5f53\u7684\u8d44\u6e90\uff0c\u4f8b\u5982 CPU\u3001\u5185\u5b58\u548c GPU\u3002

                                                  \u8d44\u6e90\u793a\u4f8b \uff1a

                                                  • Launcher\uff08\u542f\u52a8\u5668\uff09 \uff1a

                                                    • \u526f\u672c\u6570 \uff1a1
                                                    • \u8d44\u6e90\u8bf7\u6c42 \uff1a
                                                      • CPU\uff1a2 \u6838
                                                      • \u5185\u5b58\uff1a4 GiB
                                                  • Worker\uff08\u5de5\u4f5c\u8282\u70b9\uff09 \uff1a

                                                    • \u526f\u672c\u6570 \uff1a2
                                                    • \u8d44\u6e90\u8bf7\u6c42 \uff1a
                                                      • CPU\uff1a2 \u6838
                                                      • \u5185\u5b58\uff1a4 GiB
                                                      • GPU\uff1a\u6839\u636e\u9700\u6c42\u5206\u914d
                                                  "},{"location":"admin/baize/developer/jobs/mpi.html#mpijob","title":"\u5b8c\u6574\u7684 MPIJob \u914d\u7f6e\u793a\u4f8b","text":"

                                                  \u4ee5\u4e0b\u662f\u5b8c\u6574\u7684 MPIJob \u914d\u7f6e\u793a\u4f8b\uff0c\u4f9b\u60a8\u53c2\u8003\u3002

                                                  apiVersion: kubeflow.org/v1\nkind: MPIJob\nmetadata:\n  name: tensorflow-benchmarks\nspec:\n  slotsPerWorker: 1\n  runPolicy:\n    cleanPodPolicy: Running\n  mpiReplicaSpecs:\n    Launcher:\n      replicas: 1\n      template:\n        spec:\n          containers:\n            - name: tensorflow-benchmarks\n              image: mai.daocloud.io/docker.io/mpioperator/tensorflow-benchmarks:latest\n              command:\n                - mpirun\n                - --allow-run-as-root\n                - -np\n                - \"2\"\n                - -bind-to\n                - none\n                - -map-by\n                - slot\n                - -x\n                - NCCL_DEBUG=INFO\n                - -x\n                - LD_LIBRARY_PATH\n                - -x\n                - PATH\n                - -mca\n                - pml\n                - ob1\n                - -mca\n                - btl\n                - ^openib\n                - python\n                - scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py\n                - --model=resnet101\n                - --batch_size=64\n                - --variable_update=horovod\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n    Worker:\n      replicas: 2\n      template:\n        spec:\n          containers:\n            - name: tensorflow-benchmarks\n              image: mai.daocloud.io/docker.io/mpioperator/tensorflow-benchmarks:latest\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpumem: 1k\n                  nvidia.com/vgpu: \"1\"\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n

                                                  \u914d\u7f6e\u89e3\u6790 \uff1a

                                                  • apiVersion \u548c kind\uff1a\u8868\u793a\u8d44\u6e90\u7684 API \u7248\u672c\u548c\u7c7b\u578b\uff0cMPIJob \u662f Kubeflow \u5b9a\u4e49\u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\uff0c\u7528\u4e8e\u521b\u5efa MPI \u7c7b\u578b\u7684\u4efb\u52a1\u3002
                                                  • metadata\uff1a\u5143\u6570\u636e\uff0c\u5305\u542b\u4efb\u52a1\u7684\u540d\u79f0\u7b49\u4fe1\u606f\u3002
                                                  • spec\uff1a\u4efb\u52a1\u7684\u8be6\u7ec6\u914d\u7f6e\u3002
                                                    • slotsPerWorker\uff1a\u6bcf\u4e2a Worker \u8282\u70b9\u7684\u69fd\u4f4d\u6570\u91cf\uff0c\u901a\u5e38\u8bbe\u7f6e\u4e3a 1\u3002
                                                    • runPolicy\uff1a\u8fd0\u884c\u7b56\u7565\uff0c\u4f8b\u5982\u4efb\u52a1\u5b8c\u6210\u540e\u662f\u5426\u6e05\u7406 Pod\u3002
                                                    • mpiReplicaSpecs\uff1aMPI \u4efb\u52a1\u7684\u526f\u672c\u914d\u7f6e\u3002
                                                      • Launcher\uff1a\u542f\u52a8\u5668\uff0c\u8d1f\u8d23\u542f\u52a8 MPI \u4efb\u52a1\u3002
                                                        • replicas\uff1a\u526f\u672c\u6570\uff0c\u901a\u5e38\u4e3a 1\u3002
                                                        • template\uff1aPod \u6a21\u677f\uff0c\u5b9a\u4e49\u5bb9\u5668\u8fd0\u884c\u7684\u955c\u50cf\u3001\u547d\u4ee4\u3001\u8d44\u6e90\u7b49\u3002
                                                      • Worker\uff1a\u5de5\u4f5c\u8282\u70b9\uff0c\u5b9e\u9645\u6267\u884c\u4efb\u52a1\u7684\u8ba1\u7b97\u8282\u70b9\u3002
                                                        • replicas\uff1a\u526f\u672c\u6570\uff0c\u6839\u636e\u5e76\u884c\u9700\u6c42\u8bbe\u7f6e\uff0c\u8fd9\u91cc\u8bbe\u7f6e\u4e3a 2\u3002
                                                        • template\uff1aPod \u6a21\u677f\uff0c\u540c\u6837\u5b9a\u4e49\u5bb9\u5668\u7684\u8fd0\u884c\u73af\u5883\u548c\u8d44\u6e90\u3002
                                                  "},{"location":"admin/baize/developer/jobs/mpi.html#_5","title":"\u8bbe\u7f6e\u4efb\u52a1\u526f\u672c\u6570","text":"

                                                  \u5728\u521b\u5efa MPI \u4efb\u52a1\u65f6\uff0c\u9700\u8981\u6839\u636e mpiReplicaSpecs \u4e2d\u914d\u7f6e\u7684\u526f\u672c\u6570\uff0c\u6b63\u786e\u8bbe\u7f6e \u4efb\u52a1\u526f\u672c\u6570\u3002

                                                  • \u603b\u526f\u672c\u6570 = Launcher \u526f\u672c\u6570 + Worker \u526f\u672c\u6570
                                                  • \u672c\u793a\u4f8b\u4e2d\uff1a

                                                    • Launcher \u526f\u672c\u6570\uff1a1
                                                    • Worker \u526f\u672c\u6570\uff1a2
                                                    • \u603b\u526f\u672c\u6570 \uff1a1 + 2 = 3

                                                  \u56e0\u6b64\uff0c\u5728\u4efb\u52a1\u914d\u7f6e\u4e2d\uff0c\u60a8\u9700\u8981\u5c06 \u4efb\u52a1\u526f\u672c\u6570 \u8bbe\u7f6e\u4e3a 3\u3002

                                                  "},{"location":"admin/baize/developer/jobs/mpi.html#_6","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

                                                  \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c MPI \u4efb\u52a1\u3002

                                                  "},{"location":"admin/baize/developer/jobs/mpi.html#_7","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

                                                  \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\u540e\uff0c\u60a8\u53ef\u4ee5\u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\u548c\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u3002 \u4ece\u53f3\u4e0a\u89d2\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\uff0c\u53ef\u4ee5\u67e5\u770b\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u6bcf\u4e2a\u8282\u70b9\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                                                  \u793a\u4f8b\u8f93\u51fa\uff1a

                                                  TensorFlow:  1.13\nModel:       resnet101\nMode:        training\nBatch size:  64\n...\n\nTotal images/sec: 125.67\n

                                                  \u8fd9\u8868\u793a MPI \u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0cTensorFlow \u57fa\u51c6\u6d4b\u8bd5\u7a0b\u5e8f\u5b8c\u6210\u4e86\u5206\u5e03\u5f0f\u8bad\u7ec3\u3002

                                                  "},{"location":"admin/baize/developer/jobs/mpi.html#_8","title":"\u5c0f\u7ed3","text":"

                                                  \u901a\u8fc7\u672c\u6559\u7a0b\uff0c\u60a8\u5b66\u4e60\u4e86\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c\u4e00\u4e2a MPI \u4efb\u52a1\u3002\u6211\u4eec\u8be6\u7ec6\u4ecb\u7ecd\u4e86 MPIJob \u7684\u914d\u7f6e\u65b9\u5f0f\uff0c \u4ee5\u53ca\u5982\u4f55\u5728\u4efb\u52a1\u4e2d\u6307\u5b9a\u8fd0\u884c\u7684\u547d\u4ee4\u548c\u8d44\u6e90\u9700\u6c42\u3002\u5e0c\u671b\u672c\u6559\u7a0b\u5bf9\u60a8\u6709\u6240\u5e2e\u52a9\uff0c\u5982\u6709\u4efb\u4f55\u95ee\u9898\uff0c\u8bf7\u53c2\u8003\u5e73\u53f0\u63d0\u4f9b\u7684\u5176\u4ed6\u6587\u6863\u6216\u8054\u7cfb\u6280\u672f\u652f\u6301\u3002

                                                  \u9644\u5f55 \uff1a

                                                  • \u5982\u679c\u60a8\u7684\u8fd0\u884c\u73af\u5883\u672a\u9884\u88c5\u6240\u9700\u7684\u5e93\uff08\u5982 mpi4py\u3001Horovod \u7b49\uff09\uff0c\u8bf7\u5728\u4efb\u52a1\u4e2d\u6dfb\u52a0\u5b89\u88c5\u547d\u4ee4\uff0c\u6216\u8005\u4f7f\u7528\u9884\u88c5\u4e86\u76f8\u5173\u4f9d\u8d56\u7684\u955c\u50cf\u3002
                                                  • \u5728\u5b9e\u9645\u5e94\u7528\u4e2d\uff0c\u60a8\u53ef\u4ee5\u6839\u636e\u9700\u6c42\u4fee\u6539 MPIJob \u7684\u914d\u7f6e\uff0c\u4f8b\u5982\u66f4\u6539\u955c\u50cf\u3001\u547d\u4ee4\u53c2\u6570\u3001\u8d44\u6e90\u8bf7\u6c42\u7b49\u3002
                                                  "},{"location":"admin/baize/developer/jobs/mxnet.html","title":"MXNet \u4efb\u52a1","text":"

                                                  Warning

                                                  \u7531\u4e8e Apache MXNet \u9879\u76ee\u5df2\u5b58\u6863\uff0c\u56e0\u6b64 Kubeflow MXJob \u5c06\u5728\u672a\u6765\u7684 Training Operator 1.9 \u7248\u672c\u4e2d\u5f03\u7528\u548c\u5220\u9664\u3002

                                                  Apache MXNet \u662f\u4e00\u4e2a\u9ad8\u6027\u80fd\u7684\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\uff0c\u652f\u6301\u591a\u79cd\u7f16\u7a0b\u8bed\u8a00\u3002MXNet \u4efb\u52a1\u53ef\u4ee5\u4f7f\u7528\u591a\u79cd\u65b9\u5f0f\u8fdb\u884c\u8bad\u7ec3\uff0c\u5305\u62ec\u5355\u673a\u6a21\u5f0f\u548c\u5206\u5e03\u5f0f\u6a21\u5f0f\u3002\u5728 AI Lab \u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86\u5bf9 MXNet \u4efb\u52a1\u7684\u652f\u6301\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c\u5feb\u901f\u521b\u5efa MXNet \u4efb\u52a1\uff0c\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\u3002

                                                  \u672c\u6559\u7a0b\u5c06\u6307\u5bfc\u60a8\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c MXNet \u7684\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4efb\u52a1\u3002

                                                  "},{"location":"admin/baize/developer/jobs/mxnet.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
                                                  • \u4efb\u52a1\u7c7b\u578b\uff1aMXNet\uff0c\u652f\u6301\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4e24\u79cd\u6a21\u5f0f\u3002
                                                  • \u8fd0\u884c\u73af\u5883\uff1a\u9009\u62e9\u5305\u542b MXNet \u6846\u67b6\u7684\u955c\u50cf\uff0c\u6216\u5728\u4efb\u52a1\u4e2d\u5b89\u88c5\u5fc5\u8981\u7684\u4f9d\u8d56\u3002
                                                  "},{"location":"admin/baize/developer/jobs/mxnet.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

                                                  \u6211\u4eec\u4f7f\u7528 release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest \u955c\u50cf\u4f5c\u4e3a\u4efb\u52a1\u7684\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002\u8be5\u955c\u50cf\u9884\u88c5\u4e86 MXNet \u53ca\u5176\u76f8\u5173\u4f9d\u8d56\uff0c\u652f\u6301 GPU \u52a0\u901f\u3002

                                                  \u6ce8\u610f\uff1a\u4e86\u89e3\u5982\u4f55\u521b\u5efa\u548c\u7ba1\u7406\u73af\u5883\uff0c\u8bf7\u53c2\u8003 \u73af\u5883\u5217\u8868\u3002

                                                  "},{"location":"admin/baize/developer/jobs/mxnet.html#mxnet_1","title":"\u521b\u5efa MXNet \u4efb\u52a1","text":""},{"location":"admin/baize/developer/jobs/mxnet.html#mxnet_2","title":"MXNet \u5355\u673a\u4efb\u52a1","text":""},{"location":"admin/baize/developer/jobs/mxnet.html#_3","title":"\u521b\u5efa\u6b65\u9aa4","text":"
                                                  1. \u767b\u5f55\u5e73\u53f0\uff1a\u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3\uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
                                                  2. \u521b\u5efa\u4efb\u52a1\uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                                                  3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\uff1a\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\uff0c\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a MXNet\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
                                                  4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f\uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cMXNet \u5355\u673a\u8bad\u7ec3\u4efb\u52a1\u201d\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a\u3002
                                                  5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570\uff1a\u6839\u636e\u60a8\u7684\u9700\u6c42\uff0c\u914d\u7f6e\u4efb\u52a1\u7684\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u4fe1\u606f\u3002
                                                  "},{"location":"admin/baize/developer/jobs/mxnet.html#_4","title":"\u8fd0\u884c\u53c2\u6570","text":"
                                                  • \u542f\u52a8\u547d\u4ee4\uff1apython3
                                                  • \u547d\u4ee4\u53c2\u6570\uff1a

                                                    /mxnet/mxnet/example/gluon/mnist/mnist.py --epochs 10 --cuda\n

                                                    \u8bf4\u660e\uff1a

                                                    • /mxnet/mxnet/example/gluon/mnist/mnist.py\uff1aMXNet \u63d0\u4f9b\u7684 MNIST \u624b\u5199\u6570\u5b57\u8bc6\u522b\u793a\u4f8b\u811a\u672c\u3002
                                                    • --epochs 10\uff1a\u8bbe\u7f6e\u8bad\u7ec3\u8f6e\u6570\u4e3a 10\u3002
                                                    • --cuda\uff1a\u4f7f\u7528 CUDA \u8fdb\u884c GPU \u52a0\u901f\u3002
                                                  "},{"location":"admin/baize/developer/jobs/mxnet.html#_5","title":"\u8d44\u6e90\u914d\u7f6e","text":"
                                                  • \u526f\u672c\u6570\uff1a1\uff08\u5355\u673a\u4efb\u52a1\uff09
                                                  • \u8d44\u6e90\u8bf7\u6c42\uff1a
                                                    • CPU\uff1a2 \u6838
                                                    • \u5185\u5b58\uff1a4 GiB
                                                    • GPU\uff1a1 \u5757
                                                  "},{"location":"admin/baize/developer/jobs/mxnet.html#mxjob","title":"\u5b8c\u6574\u7684 MXJob \u914d\u7f6e\u793a\u4f8b","text":"

                                                  \u4ee5\u4e0b\u662f\u5355\u673a MXJob \u7684 YAML \u914d\u7f6e\uff1a

                                                  apiVersion: \"kubeflow.org/v1\"\nkind: \"MXJob\"\nmetadata:\n  name: \"mxnet-single-job\"\nspec:\n  jobMode: MXTrain\n  mxReplicaSpecs:\n    Worker:\n      replicas: 1\n      restartPolicy: Never\n      template:\n        spec:\n          containers:\n            - name: mxnet\n              image: release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest\n              command: [\"python3\"]\n              args:\n                [\n                  \"/mxnet/mxnet/example/gluon/mnist/mnist.py\",\n                  \"--epochs\",\n                  \"10\",\n                  \"--cuda\",\n                ]\n              ports:\n                - containerPort: 9991\n                  name: mxjob-port\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n

                                                  \u914d\u7f6e\u89e3\u6790\uff1a

                                                  • apiVersion \u548c kind\uff1a\u6307\u5b9a\u8d44\u6e90\u7684 API \u7248\u672c\u548c\u7c7b\u578b\uff0c\u8fd9\u91cc\u662f MXJob\u3002
                                                  • metadata\uff1a\u5143\u6570\u636e\uff0c\u5305\u62ec\u4efb\u52a1\u540d\u79f0\u7b49\u4fe1\u606f\u3002
                                                  • spec\uff1a\u4efb\u52a1\u7684\u8be6\u7ec6\u914d\u7f6e\u3002
                                                    • jobMode\uff1a\u8bbe\u7f6e\u4e3a MXTrain\uff0c\u8868\u793a\u8bad\u7ec3\u4efb\u52a1\u3002
                                                    • mxReplicaSpecs\uff1aMXNet \u4efb\u52a1\u7684\u526f\u672c\u914d\u7f6e\u3002
                                                      • Worker\uff1a\u6307\u5b9a\u5de5\u4f5c\u8282\u70b9\u7684\u914d\u7f6e\u3002
                                                        • replicas\uff1a\u526f\u672c\u6570\uff0c\u8fd9\u91cc\u4e3a 1\u3002
                                                        • restartPolicy\uff1a\u91cd\u542f\u7b56\u7565\uff0c\u8bbe\u4e3a Never\uff0c\u8868\u793a\u4efb\u52a1\u5931\u8d25\u65f6\u4e0d\u91cd\u542f\u3002
                                                        • template\uff1aPod \u6a21\u677f\uff0c\u5b9a\u4e49\u5bb9\u5668\u7684\u8fd0\u884c\u73af\u5883\u548c\u8d44\u6e90\u3002
                                                          • containers\uff1a\u5bb9\u5668\u5217\u8868\u3002
                                                            • name\uff1a\u5bb9\u5668\u540d\u79f0\u3002
                                                            • image\uff1a\u4f7f\u7528\u7684\u955c\u50cf\u3002
                                                            • command \u548c args\uff1a\u542f\u52a8\u547d\u4ee4\u548c\u53c2\u6570\u3002
                                                            • ports\uff1a\u5bb9\u5668\u7aef\u53e3\u914d\u7f6e\u3002
                                                            • resources\uff1a\u8d44\u6e90\u8bf7\u6c42\u548c\u9650\u5236\u3002
                                                  "},{"location":"admin/baize/developer/jobs/mxnet.html#_6","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

                                                  \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c MXNet \u5355\u673a\u4efb\u52a1\u3002

                                                  "},{"location":"admin/baize/developer/jobs/mxnet.html#_7","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

                                                  \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\u540e\uff0c\u60a8\u53ef\u4ee5\u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\u548c\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u3002\u4ece\u53f3\u4e0a\u89d2\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\uff0c\u53ef\u4ee5\u67e5\u770b\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                                                  \u793a\u4f8b\u8f93\u51fa\uff1a

                                                  Epoch 1: accuracy=0.95\nEpoch 2: accuracy=0.97\n...\nEpoch 10: accuracy=0.98\nTraining completed.\n

                                                  \u8fd9\u8868\u793a MXNet \u5355\u673a\u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0c\u6a21\u578b\u8bad\u7ec3\u5b8c\u6210\u3002

                                                  "},{"location":"admin/baize/developer/jobs/mxnet.html#mxnet_3","title":"MXNet \u5206\u5e03\u5f0f\u4efb\u52a1","text":"

                                                  \u5728\u5206\u5e03\u5f0f\u6a21\u5f0f\u4e0b\uff0cMXNet \u4efb\u52a1\u53ef\u4ee5\u4f7f\u7528\u591a\u53f0\u8ba1\u7b97\u8282\u70b9\u5171\u540c\u5b8c\u6210\u8bad\u7ec3\uff0c\u63d0\u9ad8\u8bad\u7ec3\u6548\u7387\u3002

                                                  "},{"location":"admin/baize/developer/jobs/mxnet.html#_8","title":"\u521b\u5efa\u6b65\u9aa4","text":"
                                                  1. \u767b\u5f55\u5e73\u53f0\uff1a\u540c\u4e0a\u3002
                                                  2. \u521b\u5efa\u4efb\u52a1\uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                                                  3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\uff1a\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a MXNet\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
                                                  4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f\uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cMXNet \u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u201d\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a\u3002
                                                  5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570\uff1a\u6839\u636e\u9700\u6c42\uff0c\u914d\u7f6e\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u3002
                                                  "},{"location":"admin/baize/developer/jobs/mxnet.html#_9","title":"\u8fd0\u884c\u53c2\u6570","text":"
                                                  • \u542f\u52a8\u547d\u4ee4\uff1apython3
                                                  • \u547d\u4ee4\u53c2\u6570\uff1a

                                                    /mxnet/mxnet/example/image-classification/train_mnist.py --num-epochs 10 --num-layers 2 --kv-store dist_device_sync --gpus 0\n

                                                    \u8bf4\u660e\uff1a

                                                    • /mxnet/mxnet/example/image-classification/train_mnist.py\uff1aMXNet \u63d0\u4f9b\u7684\u56fe\u50cf\u5206\u7c7b\u793a\u4f8b\u811a\u672c\u3002
                                                    • --num-epochs 10\uff1a\u8bad\u7ec3\u8f6e\u6570\u4e3a 10\u3002
                                                    • --num-layers 2\uff1a\u6a21\u578b\u7684\u5c42\u6570\u4e3a 2\u3002
                                                    • --kv-store dist_device_sync\uff1a\u4f7f\u7528\u5206\u5e03\u5f0f\u8bbe\u5907\u540c\u6b65\u6a21\u5f0f\u3002
                                                    • --gpus 0\uff1a\u4f7f\u7528 GPU \u8fdb\u884c\u52a0\u901f\u3002
                                                  "},{"location":"admin/baize/developer/jobs/mxnet.html#_10","title":"\u8d44\u6e90\u914d\u7f6e","text":"
                                                  • \u4efb\u52a1\u526f\u672c\u6570\uff1a3\uff08\u5305\u62ec Scheduler\u3001Server \u548c Worker\uff09
                                                  • \u5404\u89d2\u8272\u8d44\u6e90\u8bf7\u6c42\uff1a
                                                    • Scheduler\uff08\u8c03\u5ea6\u5668\uff09\uff1a
                                                      • \u526f\u672c\u6570\uff1a1
                                                      • \u8d44\u6e90\u8bf7\u6c42\uff1a
                                                        • CPU\uff1a2 \u6838
                                                        • \u5185\u5b58\uff1a4 GiB
                                                        • GPU\uff1a1 \u5757
                                                    • Server\uff08\u53c2\u6570\u670d\u52a1\u5668\uff09\uff1a
                                                      • \u526f\u672c\u6570\uff1a1
                                                      • \u8d44\u6e90\u8bf7\u6c42\uff1a
                                                        • CPU\uff1a2 \u6838
                                                        • \u5185\u5b58\uff1a4 GiB
                                                        • GPU\uff1a1 \u5757
                                                    • Worker\uff08\u5de5\u4f5c\u8282\u70b9\uff09\uff1a
                                                      • \u526f\u672c\u6570\uff1a1
                                                      • \u8d44\u6e90\u8bf7\u6c42\uff1a
                                                        • CPU\uff1a2 \u6838
                                                        • \u5185\u5b58\uff1a4 GiB
                                                        • GPU\uff1a1 \u5757
                                                  "},{"location":"admin/baize/developer/jobs/mxnet.html#mxjob_1","title":"\u5b8c\u6574\u7684 MXJob \u914d\u7f6e\u793a\u4f8b","text":"

                                                  \u4ee5\u4e0b\u662f\u5206\u5e03\u5f0f MXJob \u7684 YAML \u914d\u7f6e\uff1a

                                                  apiVersion: \"kubeflow.org/v1\"\nkind: \"MXJob\"\nmetadata:\n  name: \"mxnet-job\"\nspec:\n  jobMode: MXTrain\n  mxReplicaSpecs:\n    Scheduler:\n      replicas: 1\n      restartPolicy: Never\n      template:\n        spec:\n          containers:\n            - name: mxnet\n              image: release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest\n              ports:\n                - containerPort: 9991\n                  name: mxjob-port\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n    Server:\n      replicas: 1\n      restartPolicy: Never\n      template:\n        spec:\n          containers:\n            - name: mxnet\n              image: release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest\n              ports:\n                - containerPort: 9991\n                  name: mxjob-port\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n    Worker:\n      replicas: 1\n      restartPolicy: Never\n      template:\n        spec:\n          containers:\n            - name: mxnet\n              image: release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest\n              command: [\"python3\"]\n              args:\n                [\n                  \"/mxnet/mxnet/example/image-classification/train_mnist.py\",\n                  \"--num-epochs\",\n                  \"10\",\n                  \"--num-layers\",\n                  \"2\",\n                  \"--kv-store\",\n                  \"dist_device_sync\",\n                  \"--gpus\",\n                  \"0\",\n                ]\n              ports:\n                - containerPort: 9991\n                  name: mxjob-port\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n

                                                  \u914d\u7f6e\u89e3\u6790\uff1a

                                                  • Scheduler\uff08\u8c03\u5ea6\u5668\uff09\uff1a\u8d1f\u8d23\u534f\u8c03\u96c6\u7fa4\u4e2d\u5404\u8282\u70b9\u7684\u4efb\u52a1\u8c03\u5ea6\u3002
                                                  • Server\uff08\u53c2\u6570\u670d\u52a1\u5668\uff09\uff1a\u7528\u4e8e\u5b58\u50a8\u548c\u66f4\u65b0\u6a21\u578b\u53c2\u6570\uff0c\u5b9e\u73b0\u5206\u5e03\u5f0f\u53c2\u6570\u540c\u6b65\u3002
                                                  • Worker\uff08\u5de5\u4f5c\u8282\u70b9\uff09\uff1a\u5b9e\u9645\u6267\u884c\u8bad\u7ec3\u4efb\u52a1\u3002
                                                  • \u8d44\u6e90\u914d\u7f6e\uff1a\u4e3a\u5404\u89d2\u8272\u5206\u914d\u9002\u5f53\u7684\u8d44\u6e90\uff0c\u786e\u4fdd\u4efb\u52a1\u987a\u5229\u8fd0\u884c\u3002
                                                  "},{"location":"admin/baize/developer/jobs/mxnet.html#_11","title":"\u8bbe\u7f6e\u4efb\u52a1\u526f\u672c\u6570","text":"

                                                  \u5728\u521b\u5efa MXNet \u5206\u5e03\u5f0f\u4efb\u52a1\u65f6\uff0c\u9700\u8981\u6839\u636e mxReplicaSpecs \u4e2d\u914d\u7f6e\u7684\u526f\u672c\u6570\uff0c\u6b63\u786e\u8bbe\u7f6e \u4efb\u52a1\u526f\u672c\u6570\u3002

                                                  • \u603b\u526f\u672c\u6570 = Scheduler \u526f\u672c\u6570 + Server \u526f\u672c\u6570 + Worker \u526f\u672c\u6570
                                                  • \u672c\u793a\u4f8b\u4e2d\uff1a
                                                    • Scheduler \u526f\u672c\u6570\uff1a1
                                                    • Server \u526f\u672c\u6570\uff1a1
                                                    • Worker \u526f\u672c\u6570\uff1a1
                                                    • \u603b\u526f\u672c\u6570\uff1a1 + 1 + 1 = 3

                                                  \u56e0\u6b64\uff0c\u5728\u4efb\u52a1\u914d\u7f6e\u4e2d\uff0c\u9700\u8981\u5c06 \u4efb\u52a1\u526f\u672c\u6570 \u8bbe\u7f6e\u4e3a 3\u3002

                                                  "},{"location":"admin/baize/developer/jobs/mxnet.html#_12","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

                                                  \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c MXNet \u5206\u5e03\u5f0f\u4efb\u52a1\u3002

                                                  "},{"location":"admin/baize/developer/jobs/mxnet.html#_13","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

                                                  \u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u548c\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u3002\u60a8\u53ef\u4ee5\u67e5\u770b\u6bcf\u4e2a\u89d2\u8272\uff08Scheduler\u3001Server\u3001Worker\uff09\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                                                  \u793a\u4f8b\u8f93\u51fa\uff1a

                                                  INFO:root:Epoch[0] Batch [50]     Speed: 1000 samples/sec   accuracy=0.85\nINFO:root:Epoch[0] Batch [100]    Speed: 1200 samples/sec   accuracy=0.87\n...\nINFO:root:Epoch[9] Batch [100]    Speed: 1300 samples/sec   accuracy=0.98\nTraining completed.\n

                                                  \u8fd9\u8868\u793a MXNet \u5206\u5e03\u5f0f\u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0c\u6a21\u578b\u8bad\u7ec3\u5b8c\u6210\u3002

                                                  "},{"location":"admin/baize/developer/jobs/mxnet.html#_14","title":"\u5c0f\u7ed3","text":"

                                                  \u901a\u8fc7\u672c\u6559\u7a0b\uff0c\u60a8\u5b66\u4e60\u4e86\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c MXNet \u7684\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4efb\u52a1\u3002\u6211\u4eec\u8be6\u7ec6\u4ecb\u7ecd\u4e86 MXJob \u7684\u914d\u7f6e\u65b9\u5f0f\uff0c\u4ee5\u53ca\u5982\u4f55\u5728\u4efb\u52a1\u4e2d\u6307\u5b9a\u8fd0\u884c\u7684\u547d\u4ee4\u548c\u8d44\u6e90\u9700\u6c42\u3002\u5e0c\u671b\u672c\u6559\u7a0b\u5bf9\u60a8\u6709\u6240\u5e2e\u52a9\uff0c\u5982\u6709\u4efb\u4f55\u95ee\u9898\uff0c\u8bf7\u53c2\u8003\u5e73\u53f0\u63d0\u4f9b\u7684\u5176\u4ed6\u6587\u6863\u6216\u8054\u7cfb\u6280\u672f\u652f\u6301\u3002

                                                  "},{"location":"admin/baize/developer/jobs/mxnet.html#_15","title":"\u9644\u5f55","text":"
                                                  • \u6ce8\u610f\u4e8b\u9879\uff1a

                                                    • \u786e\u4fdd\u60a8\u4f7f\u7528\u7684\u955c\u50cf\u5305\u542b\u6240\u9700\u7684 MXNet \u7248\u672c\u548c\u4f9d\u8d56\u3002
                                                    • \u6839\u636e\u5b9e\u9645\u9700\u6c42\u8c03\u6574\u8d44\u6e90\u914d\u7f6e\uff0c\u907f\u514d\u8d44\u6e90\u4e0d\u8db3\u6216\u6d6a\u8d39\u3002
                                                    • \u5982\u9700\u4f7f\u7528\u81ea\u5b9a\u4e49\u7684\u8bad\u7ec3\u811a\u672c\uff0c\u8bf7\u4fee\u6539\u542f\u52a8\u547d\u4ee4\u548c\u53c2\u6570\u3002
                                                  • \u53c2\u8003\u6587\u6863\uff1a

                                                    • MXNet \u5b98\u65b9\u6587\u6863
                                                    • Kubeflow MXJob \u6307\u5357
                                                  "},{"location":"admin/baize/developer/jobs/paddle.html","title":"PaddlePaddle \u4efb\u52a1","text":"

                                                  PaddlePaddle\uff08\u98de\u6868\uff09\u662f\u767e\u5ea6\u5f00\u6e90\u7684\u6df1\u5ea6\u5b66\u4e60\u5e73\u53f0\uff0c\u652f\u6301\u4e30\u5bcc\u7684\u795e\u7ecf\u7f51\u7edc\u6a21\u578b\u548c\u5206\u5e03\u5f0f\u8bad\u7ec3\u65b9\u5f0f\u3002PaddlePaddle \u4efb\u52a1\u53ef\u4ee5\u901a\u8fc7\u5355\u673a\u6216\u5206\u5e03\u5f0f\u6a21\u5f0f\u8fdb\u884c\u8bad\u7ec3\u3002\u5728 AI Lab \u5e73\u53f0\u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86\u5bf9 PaddlePaddle \u4efb\u52a1\u7684\u652f\u6301\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c\u5feb\u901f\u521b\u5efa PaddlePaddle \u4efb\u52a1\uff0c\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\u3002

                                                  \u672c\u6559\u7a0b\u5c06\u6307\u5bfc\u60a8\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c PaddlePaddle \u7684\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4efb\u52a1\u3002

                                                  "},{"location":"admin/baize/developer/jobs/paddle.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
                                                  • \u4efb\u52a1\u7c7b\u578b\uff1aPaddlePaddle\uff0c\u652f\u6301\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4e24\u79cd\u6a21\u5f0f\u3002
                                                  • \u8fd0\u884c\u73af\u5883\uff1a\u9009\u62e9\u5305\u542b PaddlePaddle \u6846\u67b6\u7684\u955c\u50cf\uff0c\u6216\u5728\u4efb\u52a1\u4e2d\u5b89\u88c5\u5fc5\u8981\u7684\u4f9d\u8d56\u3002
                                                  "},{"location":"admin/baize/developer/jobs/paddle.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

                                                  \u6211\u4eec\u4f7f\u7528 registry.baidubce.com/paddlepaddle/paddle:2.4.0rc0-cpu \u955c\u50cf\u4f5c\u4e3a\u4efb\u52a1\u7684\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002\u8be5\u955c\u50cf\u9884\u88c5\u4e86 PaddlePaddle \u6846\u67b6\uff0c\u9002\u7528\u4e8e CPU \u8ba1\u7b97\u3002\u5982\u679c\u9700\u8981\u4f7f\u7528 GPU\uff0c\u8bf7\u9009\u62e9\u5bf9\u5e94\u7684 GPU \u7248\u672c\u955c\u50cf\u3002

                                                  \u6ce8\u610f\uff1a\u4e86\u89e3\u5982\u4f55\u521b\u5efa\u548c\u7ba1\u7406\u73af\u5883\uff0c\u8bf7\u53c2\u8003 \u73af\u5883\u5217\u8868\u3002

                                                  "},{"location":"admin/baize/developer/jobs/paddle.html#paddlepaddle_1","title":"\u521b\u5efa PaddlePaddle \u4efb\u52a1","text":""},{"location":"admin/baize/developer/jobs/paddle.html#paddlepaddle_2","title":"PaddlePaddle \u5355\u673a\u8bad\u7ec3\u4efb\u52a1","text":""},{"location":"admin/baize/developer/jobs/paddle.html#_3","title":"\u521b\u5efa\u6b65\u9aa4","text":"
                                                  1. \u767b\u5f55\u5e73\u53f0\uff1a\u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3\uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
                                                  2. \u521b\u5efa\u4efb\u52a1\uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                                                  3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\uff1a\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\uff0c\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a PaddlePaddle\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
                                                  4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f\uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cPaddlePaddle \u5355\u673a\u8bad\u7ec3\u4efb\u52a1\u201d\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a\u3002
                                                  5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570\uff1a\u6839\u636e\u60a8\u7684\u9700\u6c42\uff0c\u914d\u7f6e\u4efb\u52a1\u7684\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u4fe1\u606f\u3002
                                                  "},{"location":"admin/baize/developer/jobs/paddle.html#_4","title":"\u8fd0\u884c\u53c2\u6570","text":"
                                                  • \u542f\u52a8\u547d\u4ee4\uff1apython
                                                  • \u547d\u4ee4\u53c2\u6570\uff1a

                                                    -m paddle.distributed.launch run_check\n

                                                    \u8bf4\u660e\uff1a

                                                    • -m paddle.distributed.launch\uff1a\u4f7f\u7528 PaddlePaddle \u63d0\u4f9b\u7684\u5206\u5e03\u5f0f\u542f\u52a8\u6a21\u5757\uff0c\u5373\u4f7f\u5728\u5355\u673a\u6a21\u5f0f\u4e0b\u4e5f\u53ef\u4ee5\u4f7f\u7528\uff0c\u65b9\u4fbf\u5c06\u6765\u8fc1\u79fb\u5230\u5206\u5e03\u5f0f\u3002
                                                    • run_check\uff1aPaddlePaddle \u63d0\u4f9b\u7684\u6d4b\u8bd5\u811a\u672c\uff0c\u7528\u4e8e\u68c0\u67e5\u5206\u5e03\u5f0f\u73af\u5883\u662f\u5426\u6b63\u5e38\u3002
                                                  "},{"location":"admin/baize/developer/jobs/paddle.html#_5","title":"\u8d44\u6e90\u914d\u7f6e","text":"
                                                  • \u526f\u672c\u6570\uff1a1\uff08\u5355\u673a\u4efb\u52a1\uff09
                                                  • \u8d44\u6e90\u8bf7\u6c42\uff1a
                                                    • CPU\uff1a\u6839\u636e\u9700\u6c42\u8bbe\u7f6e\uff0c\u5efa\u8bae\u81f3\u5c11 1 \u6838
                                                    • \u5185\u5b58\uff1a\u6839\u636e\u9700\u6c42\u8bbe\u7f6e\uff0c\u5efa\u8bae\u81f3\u5c11 2 GiB
                                                    • GPU\uff1a\u5982\u679c\u9700\u8981\u4f7f\u7528 GPU\uff0c\u9009\u62e9 GPU \u7248\u672c\u7684\u955c\u50cf\uff0c\u5e76\u5206\u914d\u76f8\u5e94\u7684 GPU \u8d44\u6e90
                                                  "},{"location":"admin/baize/developer/jobs/paddle.html#paddlejob","title":"\u5b8c\u6574\u7684 PaddleJob \u914d\u7f6e\u793a\u4f8b","text":"

                                                  \u4ee5\u4e0b\u662f\u5355\u673a PaddleJob \u7684 YAML \u914d\u7f6e\uff1a

                                                  apiVersion: kubeflow.org/v1\nkind: PaddleJob\nmetadata:\n    name: paddle-simple-cpu\n    namespace: kubeflow\nspec:\n    paddleReplicaSpecs:\n        Worker:\n            replicas: 1\n            restartPolicy: OnFailure\n            template:\n                spec:\n                    containers:\n                        - name: paddle\n                          image: registry.baidubce.com/paddlepaddle/paddle:2.4.0rc0-cpu\n                          command:\n                              [\n                                  'python',\n                                  '-m',\n                                  'paddle.distributed.launch',\n                                  'run_check',\n                              ]\n

                                                  \u914d\u7f6e\u89e3\u6790\uff1a

                                                  • apiVersion \u548c kind\uff1a\u6307\u5b9a\u8d44\u6e90\u7684 API \u7248\u672c\u548c\u7c7b\u578b\uff0c\u8fd9\u91cc\u662f PaddleJob\u3002
                                                  • metadata\uff1a\u5143\u6570\u636e\uff0c\u5305\u62ec\u4efb\u52a1\u540d\u79f0\u548c\u547d\u540d\u7a7a\u95f4\u3002
                                                  • spec\uff1a\u4efb\u52a1\u7684\u8be6\u7ec6\u914d\u7f6e\u3002
                                                    • paddleReplicaSpecs\uff1aPaddlePaddle \u4efb\u52a1\u7684\u526f\u672c\u914d\u7f6e\u3002
                                                      • Worker\uff1a\u6307\u5b9a\u5de5\u4f5c\u8282\u70b9\u7684\u914d\u7f6e\u3002
                                                        • replicas\uff1a\u526f\u672c\u6570\uff0c\u8fd9\u91cc\u4e3a 1\uff0c\u8868\u793a\u5355\u673a\u8bad\u7ec3\u3002
                                                        • restartPolicy\uff1a\u91cd\u542f\u7b56\u7565\uff0c\u8bbe\u4e3a OnFailure\uff0c\u8868\u793a\u4efb\u52a1\u5931\u8d25\u65f6\u81ea\u52a8\u91cd\u542f\u3002
                                                        • template\uff1aPod \u6a21\u677f\uff0c\u5b9a\u4e49\u5bb9\u5668\u7684\u8fd0\u884c\u73af\u5883\u548c\u8d44\u6e90\u3002
                                                          • containers\uff1a\u5bb9\u5668\u5217\u8868\u3002
                                                            • name\uff1a\u5bb9\u5668\u540d\u79f0\u3002
                                                            • image\uff1a\u4f7f\u7528\u7684\u955c\u50cf\u3002
                                                            • command\uff1a\u542f\u52a8\u547d\u4ee4\u548c\u53c2\u6570\u3002
                                                  "},{"location":"admin/baize/developer/jobs/paddle.html#_6","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

                                                  \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c PaddlePaddle \u5355\u673a\u4efb\u52a1\u3002

                                                  "},{"location":"admin/baize/developer/jobs/paddle.html#_7","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

                                                  \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\u540e\uff0c\u60a8\u53ef\u4ee5\u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\u548c\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u3002\u4ece\u53f3\u4e0a\u89d2\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\uff0c\u53ef\u4ee5\u67e5\u770b\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                                                  \u793a\u4f8b\u8f93\u51fa\uff1a

                                                  run check success, PaddlePaddle is installed correctly on this node :)\n

                                                  \u8fd9\u8868\u793a PaddlePaddle \u5355\u673a\u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0c\u73af\u5883\u914d\u7f6e\u6b63\u5e38\u3002

                                                  "},{"location":"admin/baize/developer/jobs/paddle.html#paddlepaddle_3","title":"PaddlePaddle \u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1","text":"

                                                  \u5728\u5206\u5e03\u5f0f\u6a21\u5f0f\u4e0b\uff0cPaddlePaddle \u4efb\u52a1\u53ef\u4ee5\u4f7f\u7528\u591a\u53f0\u8ba1\u7b97\u8282\u70b9\u5171\u540c\u5b8c\u6210\u8bad\u7ec3\uff0c\u63d0\u9ad8\u8bad\u7ec3\u6548\u7387\u3002

                                                  "},{"location":"admin/baize/developer/jobs/paddle.html#_8","title":"\u521b\u5efa\u6b65\u9aa4","text":"
                                                  1. \u767b\u5f55\u5e73\u53f0\uff1a\u540c\u4e0a\u3002
                                                  2. \u521b\u5efa\u4efb\u52a1\uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                                                  3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\uff1a\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a PaddlePaddle\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
                                                  4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f\uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cPaddlePaddle \u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u201d\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a\u3002
                                                  5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570\uff1a\u6839\u636e\u9700\u6c42\uff0c\u914d\u7f6e\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u3002
                                                  "},{"location":"admin/baize/developer/jobs/paddle.html#_9","title":"\u8fd0\u884c\u53c2\u6570","text":"
                                                  • \u542f\u52a8\u547d\u4ee4\uff1apython
                                                  • \u547d\u4ee4\u53c2\u6570\uff1a

                                                    -m paddle.distributed.launch train.py --epochs=10\n

                                                    \u8bf4\u660e\uff1a

                                                    • -m paddle.distributed.launch\uff1a\u4f7f\u7528 PaddlePaddle \u63d0\u4f9b\u7684\u5206\u5e03\u5f0f\u542f\u52a8\u6a21\u5757\u3002
                                                    • train.py\uff1a\u60a8\u7684\u8bad\u7ec3\u811a\u672c\uff0c\u9700\u8981\u653e\u5728\u955c\u50cf\u4e2d\u6216\u6302\u8f7d\u5230\u5bb9\u5668\u5185\u3002
                                                    • --epochs=10\uff1a\u8bad\u7ec3\u7684\u8f6e\u6570\uff0c\u8fd9\u91cc\u8bbe\u7f6e\u4e3a 10\u3002
                                                  "},{"location":"admin/baize/developer/jobs/paddle.html#_10","title":"\u8d44\u6e90\u914d\u7f6e","text":"
                                                  • \u4efb\u52a1\u526f\u672c\u6570\uff1a\u6839\u636e Worker \u526f\u672c\u6570\u8bbe\u7f6e\uff0c\u8fd9\u91cc\u4e3a 2\u3002
                                                  • \u8d44\u6e90\u8bf7\u6c42\uff1a
                                                    • CPU\uff1a\u6839\u636e\u9700\u6c42\u8bbe\u7f6e\uff0c\u5efa\u8bae\u81f3\u5c11 1 \u6838
                                                    • \u5185\u5b58\uff1a\u6839\u636e\u9700\u6c42\u8bbe\u7f6e\uff0c\u5efa\u8bae\u81f3\u5c11 2 GiB
                                                    • GPU\uff1a\u5982\u679c\u9700\u8981\u4f7f\u7528 GPU\uff0c\u9009\u62e9 GPU \u7248\u672c\u7684\u955c\u50cf\uff0c\u5e76\u5206\u914d\u76f8\u5e94\u7684 GPU \u8d44\u6e90
                                                  "},{"location":"admin/baize/developer/jobs/paddle.html#paddlejob_1","title":"\u5b8c\u6574\u7684 PaddleJob \u914d\u7f6e\u793a\u4f8b","text":"

                                                  \u4ee5\u4e0b\u662f\u5206\u5e03\u5f0f PaddleJob \u7684 YAML \u914d\u7f6e\uff1a

                                                  apiVersion: kubeflow.org/v1\nkind: PaddleJob\nmetadata:\n    name: paddle-distributed-job\n    namespace: kubeflow\nspec:\n    paddleReplicaSpecs:\n        Worker:\n            replicas: 2\n            restartPolicy: OnFailure\n            template:\n                spec:\n                    containers:\n                        - name: paddle\n                          image: registry.baidubce.com/paddlepaddle/paddle:2.4.0rc0-cpu\n                          command:\n                              [\n                                  'python',\n                                  '-m',\n                                  'paddle.distributed.launch',\n                                  'train.py',\n                              ]\n                          args:\n                              - '--epochs=10'\n

                                                  \u914d\u7f6e\u89e3\u6790\uff1a

                                                  • Worker\uff1a
                                                    • replicas\uff1a\u526f\u672c\u6570\uff0c\u8bbe\u7f6e\u4e3a 2\uff0c\u8868\u793a\u4f7f\u7528 2 \u4e2a\u5de5\u4f5c\u8282\u70b9\u8fdb\u884c\u5206\u5e03\u5f0f\u8bad\u7ec3\u3002
                                                    • \u5176\u4ed6\u914d\u7f6e\u4e0e\u5355\u673a\u6a21\u5f0f\u7c7b\u4f3c\u3002
                                                  "},{"location":"admin/baize/developer/jobs/paddle.html#_11","title":"\u8bbe\u7f6e\u4efb\u52a1\u526f\u672c\u6570","text":"

                                                  \u5728\u521b\u5efa PaddlePaddle \u5206\u5e03\u5f0f\u4efb\u52a1\u65f6\uff0c\u9700\u8981\u6839\u636e paddleReplicaSpecs \u4e2d\u914d\u7f6e\u7684\u526f\u672c\u6570\uff0c\u6b63\u786e\u8bbe\u7f6e \u4efb\u52a1\u526f\u672c\u6570\u3002

                                                  • \u603b\u526f\u672c\u6570 = Worker \u526f\u672c\u6570
                                                  • \u672c\u793a\u4f8b\u4e2d\uff1a
                                                    • Worker \u526f\u672c\u6570\uff1a2
                                                    • \u603b\u526f\u672c\u6570\uff1a2

                                                  \u56e0\u6b64\uff0c\u5728\u4efb\u52a1\u914d\u7f6e\u4e2d\uff0c\u9700\u8981\u5c06 \u4efb\u52a1\u526f\u672c\u6570 \u8bbe\u7f6e\u4e3a 2\u3002

                                                  "},{"location":"admin/baize/developer/jobs/paddle.html#_12","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

                                                  \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c PaddlePaddle \u5206\u5e03\u5f0f\u4efb\u52a1\u3002

                                                  "},{"location":"admin/baize/developer/jobs/paddle.html#_13","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

                                                  \u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u548c\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u3002\u60a8\u53ef\u4ee5\u67e5\u770b\u6bcf\u4e2a\u5de5\u4f5c\u8282\u70b9\u7684\u65e5\u5fd7\u8f93\u51fa\uff0c\u786e\u8ba4\u5206\u5e03\u5f0f\u8bad\u7ec3\u662f\u5426\u6b63\u5e38\u8fd0\u884c\u3002

                                                  \u793a\u4f8b\u8f93\u51fa\uff1a

                                                  Worker 0: Epoch 1, Batch 100, Loss 0.5\nWorker 1: Epoch 1, Batch 100, Loss 0.6\n...\nTraining completed.\n

                                                  \u8fd9\u8868\u793a PaddlePaddle \u5206\u5e03\u5f0f\u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0c\u6a21\u578b\u8bad\u7ec3\u5b8c\u6210\u3002

                                                  "},{"location":"admin/baize/developer/jobs/paddle.html#_14","title":"\u5c0f\u7ed3","text":"

                                                  \u901a\u8fc7\u672c\u6559\u7a0b\uff0c\u60a8\u5b66\u4e60\u4e86\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c PaddlePaddle \u7684\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4efb\u52a1\u3002\u6211\u4eec\u8be6\u7ec6\u4ecb\u7ecd\u4e86 PaddleJob \u7684\u914d\u7f6e\u65b9\u5f0f\uff0c\u4ee5\u53ca\u5982\u4f55\u5728\u4efb\u52a1\u4e2d\u6307\u5b9a\u8fd0\u884c\u7684\u547d\u4ee4\u548c\u8d44\u6e90\u9700\u6c42\u3002\u5e0c\u671b\u672c\u6559\u7a0b\u5bf9\u60a8\u6709\u6240\u5e2e\u52a9\uff0c\u5982\u6709\u4efb\u4f55\u95ee\u9898\uff0c\u8bf7\u53c2\u8003\u5e73\u53f0\u63d0\u4f9b\u7684\u5176\u4ed6\u6587\u6863\u6216\u8054\u7cfb\u6280\u672f\u652f\u6301\u3002

                                                  "},{"location":"admin/baize/developer/jobs/paddle.html#_15","title":"\u9644\u5f55","text":"
                                                  • \u6ce8\u610f\u4e8b\u9879\uff1a

                                                    • \u8bad\u7ec3\u811a\u672c\uff1a\u786e\u4fdd train.py\uff08\u6216\u5176\u4ed6\u8bad\u7ec3\u811a\u672c\uff09\u5728\u5bb9\u5668\u5185\u5b58\u5728\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u81ea\u5b9a\u4e49\u955c\u50cf\u3001\u6302\u8f7d\u6301\u4e45\u5316\u5b58\u50a8\u7b49\u65b9\u5f0f\u5c06\u811a\u672c\u653e\u5165\u5bb9\u5668\u3002
                                                    • \u955c\u50cf\u9009\u62e9\uff1a\u6839\u636e\u60a8\u7684\u9700\u6c42\u9009\u62e9\u5408\u9002\u7684\u955c\u50cf\uff0c\u4f8b\u5982\u4f7f\u7528 GPU \u65f6\u9009\u62e9 paddle:2.4.0rc0-gpu \u7b49\u3002
                                                    • \u53c2\u6570\u8c03\u6574\uff1a\u53ef\u4ee5\u901a\u8fc7\u4fee\u6539 command \u548c args \u6765\u4f20\u9012\u4e0d\u540c\u7684\u8bad\u7ec3\u53c2\u6570\u3002
                                                  • \u53c2\u8003\u6587\u6863\uff1a

                                                    • PaddlePaddle \u5b98\u65b9\u6587\u6863
                                                    • Kubeflow PaddleJob \u6307\u5357
                                                  "},{"location":"admin/baize/developer/jobs/pytorch.html","title":"Pytorch \u4efb\u52a1","text":"

                                                  Pytorch \u662f\u4e00\u4e2a\u5f00\u6e90\u7684\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\uff0c\u5b83\u63d0\u4f9b\u4e86\u4e00\u4e2a\u7075\u6d3b\u7684\u8bad\u7ec3\u548c\u90e8\u7f72\u73af\u5883\u3002 Pytorch \u4efb\u52a1\u662f\u4e00\u4e2a\u4f7f\u7528 Pytorch \u6846\u67b6\u7684\u4efb\u52a1\u3002

                                                  \u5728 AI Lab \u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86 Pytorch \u4efb\u52a1\u652f\u6301\u548c\u9002\u914d\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c \u5feb\u901f\u521b\u5efa Pytorch \u4efb\u52a1\uff0c\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\u3002

                                                  "},{"location":"admin/baize/developer/jobs/pytorch.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
                                                  • \u4efb\u52a1\u7c7b\u578b\u540c\u65f6\u652f\u6301 Pytorch \u5355\u673a \u548c Pytorch \u5206\u5e03\u5f0f \u4e24\u79cd\u6a21\u5f0f\u3002
                                                  • \u8fd0\u884c\u955c\u50cf\u5185\u5df2\u7ecf\u9ed8\u8ba4\u652f\u6301 Pytorch \u6846\u67b6\uff0c\u65e0\u9700\u989d\u5916\u5b89\u88c5\u3002
                                                  "},{"location":"admin/baize/developer/jobs/pytorch.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

                                                  \u5728\u8fd9\u91cc\u6211\u4eec\u4f7f\u7528 baize-notebook \u57fa\u7840\u955c\u50cf \u548c \u5173\u8054\u73af\u5883 \u7684\u65b9\u5f0f\u6765\u4f5c\u4e3a\u4efb\u52a1\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002

                                                  \u4e86\u89e3\u5982\u4f55\u521b\u5efa\u73af\u5883\uff0c\u8bf7\u53c2\u8003 \u73af\u5883\u5217\u8868\u3002

                                                  "},{"location":"admin/baize/developer/jobs/pytorch.html#_3","title":"\u521b\u5efa\u4efb\u52a1","text":""},{"location":"admin/baize/developer/jobs/pytorch.html#pytorch_1","title":"Pytorch \u5355\u673a\u4efb\u52a1","text":"
                                                  1. \u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3 \uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
                                                  2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                                                  3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a Pytorch \u5355\u673a\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002
                                                  4. \u586b\u5199\u4efb\u52a1\u540d\u79f0\u3001\u63cf\u8ff0\u540e\u70b9\u51fb \u786e\u5b9a \u3002
                                                  "},{"location":"admin/baize/developer/jobs/pytorch.html#_4","title":"\u8fd0\u884c\u53c2\u6570","text":"
                                                  • \u542f\u52a8\u547d\u4ee4 \u4f7f\u7528 bash
                                                  • \u547d\u4ee4\u53c2\u6570\u4f7f\u7528
                                                  import torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n# \u5b9a\u4e49\u4e00\u4e2a\u7b80\u5355\u7684\u795e\u7ecf\u7f51\u7edc\nclass SimpleNet(nn.Module):\n    def __init__(self):\n        super(SimpleNet, self).__init__()\n        self.fc = nn.Linear(10, 1)\n\n    def forward(self, x):\n        return self.fc(x)\n\n# \u521b\u5efa\u6a21\u578b\u3001\u635f\u5931\u51fd\u6570\u548c\u4f18\u5316\u5668\nmodel = SimpleNet()\ncriterion = nn.MSELoss()\noptimizer = optim.SGD(model.parameters(), lr=0.01)\n\n# \u751f\u6210\u4e00\u4e9b\u968f\u673a\u6570\u636e\nx = torch.randn(100, 10)\ny = torch.randn(100, 1)\n\n# \u8bad\u7ec3\u6a21\u578b\nfor epoch in range(100):\n    # \u524d\u5411\u4f20\u64ad\n    outputs = model(x)\n    loss = criterion(outputs, y)\n\n    # \u53cd\u5411\u4f20\u64ad\u548c\u4f18\u5316\n    optimizer.zero_grad()\n    loss.backward()\n    optimizer.step()\n\n    if (epoch + 1) % 10 == 0:\n        print(f'Epoch [{epoch+1}/100], Loss: {loss.item():.4f}')\n\nprint('Training finished.')\n
                                                  "},{"location":"admin/baize/developer/jobs/pytorch.html#_5","title":"\u8fd0\u884c\u7ed3\u679c","text":"

                                                  \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\uff0c\u6211\u4eec\u53ef\u4ee5\u8fdb\u5165\u4efb\u52a1\u8be6\u60c5\u67e5\u770b\u5230\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\uff0c\u4ece\u53f3\u4e0a\u89d2\u53bb\u5f80 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5 \uff0c\u53ef\u4ee5\u67e5\u770b\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u7684\u65e5\u5fd7\u8f93\u51fa

                                                  [HAMI-core Warn(1:140244541377408:utils.c:183)]: get default cuda from (null)\n[HAMI-core Msg(1:140244541377408:libvgpu.c:855)]: Initialized\nEpoch [10/100], Loss: 1.1248\nEpoch [20/100], Loss: 1.0486\nEpoch [30/100], Loss: 0.9969\nEpoch [40/100], Loss: 0.9611\nEpoch [50/100], Loss: 0.9360\nEpoch [60/100], Loss: 0.9182\nEpoch [70/100], Loss: 0.9053\nEpoch [80/100], Loss: 0.8960\nEpoch [90/100], Loss: 0.8891\nEpoch [100/100], Loss: 0.8841\nTraining finished.\n[HAMI-core Msg(1:140244541377408:multiprocess_memory_limit.c:468)]: Calling exit handler 1\n
                                                  "},{"location":"admin/baize/developer/jobs/pytorch.html#pytorch_2","title":"Pytorch \u5206\u5e03\u5f0f\u4efb\u52a1","text":"
                                                  1. \u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3 \uff0c\u8fdb\u5165 \u4efb\u52a1\u5217\u8868 \u9875\u9762\u3002
                                                  2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                                                  3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a Pytorch \u5206\u5e03\u5f0f\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002
                                                  4. \u586b\u5199\u4efb\u52a1\u540d\u79f0\u3001\u63cf\u8ff0\u540e\u70b9\u51fb \u786e\u5b9a \u3002
                                                  "},{"location":"admin/baize/developer/jobs/pytorch.html#_6","title":"\u8fd0\u884c\u53c2\u6570","text":"
                                                  • \u542f\u52a8\u547d\u4ee4 \u4f7f\u7528 bash
                                                  • \u547d\u4ee4\u53c2\u6570\u4f7f\u7528
                                                  import os\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nclass SimpleModel(nn.Module):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = nn.Linear(10, 1)\n\n    def forward(self, x):\n        return self.fc(x)\n\ndef train():\n    # \u6253\u5370\u73af\u5883\u4fe1\u606f\n    print(f'PyTorch version: {torch.__version__}')\n    print(f'CUDA available: {torch.cuda.is_available()}')\n    if torch.cuda.is_available():\n        print(f'CUDA version: {torch.version.cuda}')\n        print(f'CUDA device count: {torch.cuda.device_count()}')\n\n    rank = int(os.environ.get('RANK', '0'))\n    world_size = int(os.environ.get('WORLD_SIZE', '1'))\n\n    print(f'Rank: {rank}, World Size: {world_size}')\n\n    # \u521d\u59cb\u5316\u5206\u5e03\u5f0f\u73af\u5883\n    try:\n        if world_size > 1:\n            dist.init_process_group('nccl')\n            print('Distributed process group initialized successfully')\n        else:\n            print('Running in non-distributed mode')\n    except Exception as e:\n        print(f'Error initializing process group: {e}')\n        return\n\n    # \u8bbe\u7f6e\u8bbe\u5907\n    try:\n        if torch.cuda.is_available():\n            device = torch.device(f'cuda:{rank % torch.cuda.device_count()}')\n            print(f'Using CUDA device: {device}')\n        else:\n            device = torch.device('cpu')\n            print('CUDA not available, using CPU')\n    except Exception as e:\n        print(f'Error setting device: {e}')\n        device = torch.device('cpu')\n        print('Falling back to CPU')\n\n    try:\n        model = SimpleModel().to(device)\n        print('Model moved to device successfully')\n    except Exception as e:\n        print(f'Error moving model to device: {e}')\n        return\n\n    try:\n        if world_size > 1:\n            ddp_model = DDP(model, device_ids=[rank % torch.cuda.device_count()] if torch.cuda.is_available() else None)\n            print('DDP model created successfully')\n        else:\n            ddp_model = model\n            print('Using non-distributed model')\n    except Exception as e:\n        print(f'Error creating DDP model: {e}')\n        return\n\n    loss_fn = nn.MSELoss()\n    optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)\n\n    # \u751f\u6210\u4e00\u4e9b\u968f\u673a\u6570\u636e\n    try:\n        data = torch.randn(100, 10, device=device)\n        labels = torch.randn(100, 1, device=device)\n        print('Data generated and moved to device successfully')\n    except Exception as e:\n        print(f'Error generating or moving data to device: {e}')\n        return\n\n    for epoch in range(10):\n        try:\n            ddp_model.train()\n            outputs = ddp_model(data)\n            loss = loss_fn(outputs, labels)\n            optimizer.zero_grad()\n            loss.backward()\n            optimizer.step()\n\n            if rank == 0:\n                print(f'Epoch {epoch}, Loss: {loss.item():.4f}')\n        except Exception as e:\n            print(f'Error during training epoch {epoch}: {e}')\n            break\n\n    if world_size > 1:\n        dist.destroy_process_group()\n\nif __name__ == '__main__':\n    train()\n
                                                  "},{"location":"admin/baize/developer/jobs/pytorch.html#_7","title":"\u4efb\u52a1\u526f\u672c\u6570","text":"

                                                  \u6ce8\u610f Pytorch \u5206\u5e03\u5f0f \u8bad\u7ec3\u4efb\u52a1\u4f1a\u521b\u5efa\u4e00\u7ec4 Master \u548c Worker \u7684\u8bad\u7ec3 Pod\uff0c Master \u8d1f\u8d23\u534f\u8c03\u8bad\u7ec3\u4efb\u52a1\uff0cWorker \u8d1f\u8d23\u5b9e\u9645\u7684\u8bad\u7ec3\u5de5\u4f5c\u3002

                                                  Note

                                                  \u672c\u6b21\u6f14\u793a\u4e2d\uff1aMaster \u526f\u672c\u6570\u4e3a 1\uff0cWorker \u526f\u672c\u6570\u4e3a 2\uff1b \u6240\u4ee5\u6211\u4eec\u9700\u8981\u5728 \u4efb\u52a1\u914d\u7f6e \u4e2d\u8bbe\u7f6e\u526f\u672c\u6570\u4e3a 3\uff0c\u5373 Master \u526f\u672c\u6570 + Worker \u526f\u672c\u6570\u3002 Pytorch \u4f1a\u81ea\u52a8\u8c03\u8c10 Master \u548c Worker \u7684\u89d2\u8272\u3002

                                                  "},{"location":"admin/baize/developer/jobs/pytorch.html#_8","title":"\u8fd0\u884c\u7ed3\u679c","text":"

                                                  \u540c\u6837\uff0c\u6211\u4eec\u53ef\u4ee5\u8fdb\u5165\u4efb\u52a1\u8be6\u60c5\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\uff0c\u4ee5\u53ca\u6bcf\u4e2a Pod \u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                                                  "},{"location":"admin/baize/developer/jobs/tensorboard.html","title":"\u4efb\u52a1\u5206\u6790\u4ecb\u7ecd","text":"

                                                  \u5728 AI Lab \u6a21\u5757\u4e2d\uff0c\u63d0\u4f9b\u4e86\u6a21\u578b\u5f00\u53d1\u8fc7\u7a0b\u91cd\u8981\u7684\u53ef\u89c6\u5316\u5206\u6790\u5de5\u5177\uff0c\u7528\u4e8e\u5c55\u793a\u673a\u5668\u5b66\u4e60\u6a21\u578b\u7684\u8bad\u7ec3\u8fc7\u7a0b\u548c\u7ed3\u679c\u3002 \u672c\u6587\u5c06\u4ecb\u7ecd \u4efb\u52a1\u5206\u6790\uff08Tensorboard\uff09\u7684\u57fa\u672c\u6982\u5ff5\u3001\u5728 AI Lab \u7cfb\u7edf\u4e2d\u7684\u4f7f\u7528\u65b9\u6cd5\uff0c\u4ee5\u53ca\u5982\u4f55\u914d\u7f6e\u6570\u636e\u96c6\u7684\u65e5\u5fd7\u5185\u5bb9\u3002

                                                  Note

                                                  Tensorboard \u662f TensorFlow \u63d0\u4f9b\u7684\u4e00\u4e2a\u53ef\u89c6\u5316\u5de5\u5177\uff0c\u7528\u4e8e\u5c55\u793a\u673a\u5668\u5b66\u4e60\u6a21\u578b\u7684\u8bad\u7ec3\u8fc7\u7a0b\u548c\u7ed3\u679c\u3002 \u5b83\u53ef\u4ee5\u5e2e\u52a9\u5f00\u53d1\u8005\u66f4\u76f4\u89c2\u5730\u7406\u89e3\u6a21\u578b\u7684\u8bad\u7ec3\u52a8\u6001\uff0c\u5206\u6790\u6a21\u578b\u6027\u80fd\uff0c\u8c03\u8bd5\u6a21\u578b\u95ee\u9898\u7b49\u3002

                                                  Tensorboard \u5728\u6a21\u578b\u5f00\u53d1\u8fc7\u7a0b\u4e2d\u7684\u4f5c\u7528\u53ca\u4f18\u52bf\uff1a

                                                  • \u53ef\u89c6\u5316\u8bad\u7ec3\u8fc7\u7a0b\uff1a\u901a\u8fc7\u56fe\u8868\u5c55\u793a\u8bad\u7ec3\u548c\u9a8c\u8bc1\u7684\u635f\u5931\u3001\u7cbe\u5ea6\u7b49\u6307\u6807\uff0c\u5e2e\u52a9\u5f00\u53d1\u8005\u76f4\u89c2\u5730\u89c2\u5bdf\u6a21\u578b\u7684\u8bad\u7ec3\u6548\u679c\u3002
                                                  • \u8c03\u8bd5\u548c\u4f18\u5316\u6a21\u578b\uff1a\u901a\u8fc7\u67e5\u770b\u4e0d\u540c\u5c42\u7684\u6743\u91cd\u3001\u68af\u5ea6\u5206\u5e03\u7b49\uff0c\u5e2e\u52a9\u5f00\u53d1\u8005\u53d1\u73b0\u548c\u4fee\u6b63\u6a21\u578b\u4e2d\u7684\u95ee\u9898\u3002
                                                  • \u5bf9\u6bd4\u4e0d\u540c\u5b9e\u9a8c\uff1a\u53ef\u4ee5\u540c\u65f6\u5c55\u793a\u591a\u4e2a\u5b9e\u9a8c\u7684\u7ed3\u679c\uff0c\u65b9\u4fbf\u5f00\u53d1\u8005\u5bf9\u6bd4\u4e0d\u540c\u6a21\u578b\u548c\u8d85\u53c2\u6570\u914d\u7f6e\u7684\u6548\u679c\u3002
                                                  • \u8ffd\u8e2a\u8bad\u7ec3\u6570\u636e\uff1a\u8bb0\u5f55\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u4f7f\u7528\u7684\u6570\u636e\u96c6\u548c\u53c2\u6570\uff0c\u786e\u4fdd\u5b9e\u9a8c\u7684\u53ef\u590d\u73b0\u6027\u3002
                                                  "},{"location":"admin/baize/developer/jobs/tensorboard.html#tensorboard","title":"\u5982\u4f55\u521b\u5efa Tensorboard","text":"

                                                  \u5728 AI Lab \u7cfb\u7edf\u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86\u4fbf\u6377\u7684\u65b9\u5f0f\u6765\u521b\u5efa\u548c\u7ba1\u7406 Tensorboard\u3002\u4ee5\u4e0b\u662f\u5177\u4f53\u6b65\u9aa4\uff1a

                                                  "},{"location":"admin/baize/developer/jobs/tensorboard.html#notebook-tensorboard","title":"\u5728\u521b\u5efa\u65f6 Notebook \u542f\u7528 Tensorboard","text":"
                                                  1. \u521b\u5efa Notebook\uff1a\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u4e00\u4e2a\u65b0\u7684 Notebook\u3002
                                                  2. \u542f\u7528 Tensorboard\uff1a\u5728\u521b\u5efa Notebook \u7684\u9875\u9762\u4e2d\uff0c\u542f\u7528 Tensorboard \u9009\u9879\uff0c\u5e76\u6307\u5b9a\u6570\u636e\u96c6\u548c\u65e5\u5fd7\u8def\u5f84\u3002

                                                  "},{"location":"admin/baize/developer/jobs/tensorboard.html#tensorboard_1","title":"\u5728\u5206\u5e03\u5f0f\u4efb\u52a1\u521b\u5efa\u53ca\u5b8c\u6210\u540e\u542f\u7528 Tensorboard","text":"
                                                  1. \u521b\u5efa\u5206\u5e03\u5f0f\u4efb\u52a1\uff1a\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u3002
                                                  2. \u914d\u7f6e Tensorboard\uff1a\u5728\u4efb\u52a1\u914d\u7f6e\u9875\u9762\u4e2d\uff0c\u542f\u7528 Tensorboard \u9009\u9879\uff0c\u5e76\u6307\u5b9a\u6570\u636e\u96c6\u548c\u65e5\u5fd7\u8def\u5f84\u3002
                                                  3. \u4efb\u52a1\u5b8c\u6210\u540e\u67e5\u770b Tensorboard\uff1a\u4efb\u52a1\u5b8c\u6210\u540e\uff0c\u53ef\u4ee5\u5728\u4efb\u52a1\u8be6\u60c5\u9875\u9762\u4e2d\u67e5\u770b Tensorboard \u7684\u94fe\u63a5\uff0c\u70b9\u51fb\u94fe\u63a5\u5373\u53ef\u67e5\u770b\u8bad\u7ec3\u8fc7\u7a0b\u7684\u53ef\u89c6\u5316\u7ed3\u679c\u3002

                                                  "},{"location":"admin/baize/developer/jobs/tensorboard.html#notebook-tensorboard_1","title":"\u5728 Notebook \u4e2d\u76f4\u63a5\u5f15\u7528 Tensorboard","text":"

                                                  \u5728 Notebook \u4e2d\uff0c\u53ef\u4ee5\u901a\u8fc7\u4ee3\u7801\u76f4\u63a5\u542f\u52a8 Tensorboard\u3002\u4ee5\u4e0b\u662f\u4e00\u4e2a\u793a\u4f8b\u4ee3\u7801\uff1a

                                                  # \u5bfc\u5165\u5fc5\u8981\u7684\u5e93\nimport tensorflow as tf\nimport datetime\n\n# \u5b9a\u4e49\u65e5\u5fd7\u76ee\u5f55\nlog_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n# \u521b\u5efa Tensorboard \u56de\u8c03\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n\n# \u6784\u5efa\u5e76\u7f16\u8bd1\u6a21\u578b\nmodel = tf.keras.models.Sequential([\n    tf.keras.layers.Flatten(input_shape=(28, 28)),\n    tf.keras.layers.Dense(512, activation='relu'),\n    tf.keras.layers.Dropout(0.2),\n    tf.keras.layers.Dense(10, activation='softmax')\n])\n\nmodel.compile(optimizer='adam',\n              loss='sparse_categorical_crossentropy',\n              metrics=['accuracy'])\n\n# \u8bad\u7ec3\u6a21\u578b\u5e76\u542f\u7528 Tensorboard \u56de\u8c03\nmodel.fit(x_train, y_train, epochs=5, validation_data=(x_test, y_test), callbacks=[tensorboard_callback])\n
                                                  "},{"location":"admin/baize/developer/jobs/tensorboard.html#_2","title":"\u5982\u4f55\u914d\u7f6e\u6570\u636e\u96c6\u7684\u65e5\u5fd7\u5185\u5bb9","text":"

                                                  \u5728\u4f7f\u7528 Tensorboard \u65f6\uff0c\u53ef\u4ee5\u8bb0\u5f55\u548c\u914d\u7f6e\u4e0d\u540c\u7684\u6570\u636e\u96c6\u548c\u65e5\u5fd7\u5185\u5bb9\u3002\u4ee5\u4e0b\u662f\u4e00\u4e9b\u5e38\u89c1\u7684\u914d\u7f6e\u65b9\u5f0f\uff1a

                                                  "},{"location":"admin/baize/developer/jobs/tensorboard.html#_3","title":"\u914d\u7f6e\u8bad\u7ec3\u548c\u9a8c\u8bc1\u6570\u636e\u96c6\u7684\u65e5\u5fd7","text":"

                                                  \u5728\u8bad\u7ec3\u6a21\u578b\u65f6\uff0c\u53ef\u4ee5\u901a\u8fc7 TensorFlow \u7684 tf.summary API \u6765\u8bb0\u5f55\u8bad\u7ec3\u548c\u9a8c\u8bc1\u6570\u636e\u96c6\u7684\u65e5\u5fd7\u3002\u4ee5\u4e0b\u662f\u4e00\u4e2a\u793a\u4f8b\u4ee3\u7801\uff1a

                                                  # \u5bfc\u5165\u5fc5\u8981\u7684\u5e93\nimport tensorflow as tf\n\n# \u521b\u5efa\u65e5\u5fd7\u76ee\u5f55\ntrain_log_dir = 'logs/gradient_tape/train'\nval_log_dir = 'logs/gradient_tape/val'\ntrain_summary_writer = tf.summary.create_file_writer(train_log_dir)\nval_summary_writer = tf.summary.create_file_writer(val_log_dir)\n\n# \u8bad\u7ec3\u6a21\u578b\u5e76\u8bb0\u5f55\u65e5\u5fd7\nfor epoch in range(EPOCHS):\n    for (x_train, y_train) in train_dataset:\n        # \u8bad\u7ec3\u6b65\u9aa4\n        train_step(x_train, y_train)\n        with train_summary_writer.as_default():\n            tf.summary.scalar('loss', train_loss.result(), step=epoch)\n            tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)\n\n    for (x_val, y_val) in val_dataset:\n        # \u9a8c\u8bc1\u6b65\u9aa4\n        val_step(x_val, y_val)\n        with val_summary_writer.as_default():\n            tf.summary.scalar('loss', val_loss.result(), step=epoch)\n            tf.summary.scalar('accuracy', val_accuracy.result(), step=epoch)\n
                                                  "},{"location":"admin/baize/developer/jobs/tensorboard.html#_4","title":"\u914d\u7f6e\u81ea\u5b9a\u4e49\u65e5\u5fd7","text":"

                                                  \u9664\u4e86\u8bad\u7ec3\u548c\u9a8c\u8bc1\u6570\u636e\u96c6\u7684\u65e5\u5fd7\u5916\uff0c\u8fd8\u53ef\u4ee5\u8bb0\u5f55\u5176\u4ed6\u81ea\u5b9a\u4e49\u7684\u65e5\u5fd7\u5185\u5bb9\uff0c\u4f8b\u5982\u5b66\u4e60\u7387\u3001\u68af\u5ea6\u5206\u5e03\u7b49\u3002\u4ee5\u4e0b\u662f\u4e00\u4e2a\u793a\u4f8b\u4ee3\u7801\uff1a

                                                  # \u8bb0\u5f55\u81ea\u5b9a\u4e49\u65e5\u5fd7\nwith train_summary_writer.as_default():\n    tf.summary.scalar('learning_rate', learning_rate, step=epoch)\n    tf.summary.histogram('gradients', gradients, step=epoch)\n
                                                  "},{"location":"admin/baize/developer/jobs/tensorboard.html#tensorboard_2","title":"Tensorboard \u7ba1\u7406","text":"

                                                  \u5728 AI Lab \u4e2d\uff0c\u901a\u8fc7\u5404\u79cd\u65b9\u5f0f\u521b\u5efa\u51fa\u6765\u7684 Tensorboard \u4f1a\u7edf\u4e00\u5c55\u793a\u5728\u4efb\u52a1\u5206\u6790\u7684\u9875\u9762\u4e2d\uff0c\u65b9\u4fbf\u7528\u6237\u67e5\u770b\u548c\u7ba1\u7406\u3002

                                                  \u7528\u6237\u53ef\u4ee5\u5728\u4efb\u52a1\u5206\u6790\u9875\u9762\u4e2d\u67e5\u770b Tensorboard \u7684\u94fe\u63a5\u3001\u72b6\u6001\u3001\u521b\u5efa\u65f6\u95f4\u7b49\u4fe1\u606f\uff0c\u5e76\u901a\u8fc7\u94fe\u63a5\u76f4\u63a5\u8bbf\u95ee Tensorboard \u7684\u53ef\u89c6\u5316\u7ed3\u679c\u3002

                                                  "},{"location":"admin/baize/developer/jobs/tensorflow.html","title":"Tensorflow \u4efb\u52a1","text":"

                                                  Tensorflow \u662f\u9664\u4e86 Pytorch \u53e6\u5916\u4e00\u4e2a\u975e\u5e38\u6d3b\u8dc3\u7684\u5f00\u6e90\u7684\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\uff0c\u5b83\u63d0\u4f9b\u4e86\u4e00\u4e2a\u7075\u6d3b\u7684\u8bad\u7ec3\u548c\u90e8\u7f72\u73af\u5883\u3002

                                                  \u5728 AI Lab \u4e2d\uff0c\u6211\u4eec\u540c\u6837\u63d0\u4f9b\u4e86 Tensorflow \u6846\u67b6\u7684\u652f\u6301\u548c\u9002\u914d\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c\u5feb\u901f\u521b\u5efa Tensorflow \u4efb\u52a1\uff0c\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\u3002

                                                  "},{"location":"admin/baize/developer/jobs/tensorflow.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
                                                  • \u4efb\u52a1\u7c7b\u578b\u540c\u65f6\u652f\u6301 Tensorflow \u5355\u673a \u548c Tensorflow \u5206\u5e03\u5f0f \u4e24\u79cd\u6a21\u5f0f\u3002
                                                  • \u8fd0\u884c\u955c\u50cf\u5185\u5df2\u7ecf\u9ed8\u8ba4\u652f\u6301 Tensorflow \u6846\u67b6\uff0c\u65e0\u9700\u989d\u5916\u5b89\u88c5\u3002
                                                  "},{"location":"admin/baize/developer/jobs/tensorflow.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

                                                  \u5728\u8fd9\u91cc\u6211\u4eec\u4f7f\u7528 baize-notebook \u57fa\u7840\u955c\u50cf \u548c \u5173\u8054\u73af\u5883 \u7684\u65b9\u5f0f\u6765\u4f5c\u4e3a\u4efb\u52a1\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002

                                                  \u4e86\u89e3\u5982\u4f55\u521b\u5efa\u73af\u5883\uff0c\u8bf7\u53c2\u8003 \u73af\u5883\u5217\u8868\u3002

                                                  "},{"location":"admin/baize/developer/jobs/tensorflow.html#_3","title":"\u521b\u5efa\u4efb\u52a1","text":""},{"location":"admin/baize/developer/jobs/tensorflow.html#tfjob","title":"\u793a\u4f8b TFJob \u5355\u673a\u4efb\u52a1","text":"
                                                  1. \u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3 \uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
                                                  2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                                                  3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a Tensorflow \u5355\u673a\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002
                                                  4. \u586b\u5199\u4efb\u52a1\u540d\u79f0\u3001\u63cf\u8ff0\u540e\u70b9\u51fb \u786e\u5b9a \u3002
                                                  "},{"location":"admin/baize/developer/jobs/tensorflow.html#_4","title":"\u63d0\u524d\u9884\u70ed\u4ee3\u7801\u4ed3\u5e93","text":"

                                                  \u4f7f\u7528 AI Lab -> \u6570\u636e\u96c6\u5217\u8868 \uff0c\u521b\u5efa\u4e00\u4e2a\u6570\u636e\u96c6\uff0c\u5e76\u5c06\u8fdc\u7aef Github \u7684\u4ee3\u7801\u62c9\u53d6\u5230\u6570\u636e\u96c6\u4e2d\uff0c \u8fd9\u6837\u5728\u521b\u5efa\u4efb\u52a1\u65f6\uff0c\u53ef\u4ee5\u76f4\u63a5\u9009\u62e9\u6570\u636e\u96c6\uff0c\u5c06\u4ee3\u7801\u6302\u8f7d\u5230\u4efb\u52a1\u4e2d\u3002

                                                  \u6f14\u793a\u4ee3\u7801\u4ed3\u5e93\u5730\u5740\uff1ahttps://github.com/d-run/training-sample-code/

                                                  "},{"location":"admin/baize/developer/jobs/tensorflow.html#_5","title":"\u8fd0\u884c\u53c2\u6570","text":"
                                                  • \u542f\u52a8\u547d\u4ee4 \u4f7f\u7528 bash
                                                  • \u547d\u4ee4\u53c2\u6570\u4f7f\u7528 python /code/tensorflow/tf-single.py
                                                  \"\"\"\n  pip install tensorflow numpy\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\n# \u521b\u5efa\u4e00\u4e9b\u968f\u673a\u6570\u636e\nx = np.random.rand(100, 1)\ny = 2 * x + 1 + np.random.rand(100, 1) * 0.1\n\n# \u521b\u5efa\u4e00\u4e2a\u7b80\u5355\u7684\u6a21\u578b\nmodel = tf.keras.Sequential([\n    tf.keras.layers.Dense(1, input_shape=(1,))\n])\n\n# \u7f16\u8bd1\u6a21\u578b\nmodel.compile(optimizer='adam', loss='mse')\n\n# \u8bad\u7ec3\u6a21\u578b\uff0c\u5c06 epochs \u6539\u4e3a 10\nhistory = model.fit(x, y, epochs=10, verbose=1)\n\n# \u6253\u5370\u6700\u7ec8\u635f\u5931\nprint('Final loss: {' + str(history.history['loss'][-1]) +'}')\n\n# \u4f7f\u7528\u6a21\u578b\u8fdb\u884c\u9884\u6d4b\ntest_x = np.array([[0.5]])\nprediction = model.predict(test_x)\nprint(f'Prediction for x=0.5: {prediction[0][0]}')\n
                                                  "},{"location":"admin/baize/developer/jobs/tensorflow.html#_6","title":"\u8fd0\u884c\u7ed3\u679c","text":"

                                                  \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\u540e\uff0c\u53ef\u4ee5\u8fdb\u5165\u4efb\u52a1\u8be6\u60c5\u67e5\u770b\u5230\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\uff0c\u4ece\u53f3\u4e0a\u89d2\u53bb\u5f80 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5 \uff0c\u53ef\u4ee5\u67e5\u770b\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                                                  "},{"location":"admin/baize/developer/jobs/tensorflow.html#tfjob_1","title":"TFJob \u5206\u5e03\u5f0f\u4efb\u52a1","text":"
                                                  1. \u767b\u5f55 AI Lab \uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3 \uff0c\u8fdb\u5165 \u4efb\u52a1\u5217\u8868 \u9875\u9762\u3002
                                                  2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                                                  3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a Tensorflow \u5206\u5e03\u5f0f\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002
                                                  4. \u586b\u5199\u4efb\u52a1\u540d\u79f0\u3001\u63cf\u8ff0\u540e\u70b9\u51fb \u786e\u5b9a \u3002
                                                  "},{"location":"admin/baize/developer/jobs/tensorflow.html#_7","title":"\u793a\u4f8b\u4efb\u52a1\u4ecb\u7ecd","text":"

                                                  \u672c\u6b21\u5305\u542b\u4e86\u4e09\u79cd\u89d2\u8272\uff1aChief\u3001Worker \u548c Parameter Server (PS)\u3002

                                                  • Chief: \u4e3b\u8981\u8d1f\u8d23\u534f\u8c03\u8bad\u7ec3\u8fc7\u7a0b\u548c\u6a21\u578b\u68c0\u67e5\u70b9\u7684\u4fdd\u5b58\u3002
                                                  • Worker: \u6267\u884c\u5b9e\u9645\u7684\u6a21\u578b\u8bad\u7ec3\u3002
                                                  • PS: \u5728\u5f02\u6b65\u8bad\u7ec3\u4e2d\u7528\u4e8e\u5b58\u50a8\u548c\u66f4\u65b0\u6a21\u578b\u53c2\u6570\u3002

                                                  \u4e3a\u4e0d\u540c\u7684\u89d2\u8272\u5206\u914d\u4e86\u4e0d\u540c\u7684\u8d44\u6e90\u3002Chief \u548c Worker \u4f7f\u7528 GPU\uff0c\u800c PS \u4f7f\u7528 CPU \u548c\u8f83\u5927\u7684\u5185\u5b58\u3002

                                                  "},{"location":"admin/baize/developer/jobs/tensorflow.html#_8","title":"\u8fd0\u884c\u53c2\u6570","text":"
                                                  • \u542f\u52a8\u547d\u4ee4 \u4f7f\u7528 bash
                                                  • \u547d\u4ee4\u53c2\u6570\u4f7f\u7528 python /code/tensorflow/tensorflow-distributed.py
                                                  import os\nimport json\nimport tensorflow as tf\n\nclass SimpleModel(tf.keras.Model):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = tf.keras.layers.Dense(1, input_shape=(10,))\n\n    def call(self, x):\n        return self.fc(x)\n\ndef train():\n    # \u6253\u5370\u73af\u5883\u4fe1\u606f\n    print(f\"TensorFlow version: {tf.__version__}\")\n    print(f\"GPU available: {tf.test.is_gpu_available()}\")\n    if tf.test.is_gpu_available():\n        print(f\"GPU device count: {len(tf.config.list_physical_devices('GPU'))}\")\n\n    # \u83b7\u53d6\u5206\u5e03\u5f0f\u8bad\u7ec3\u4fe1\u606f\n    tf_config = json.loads(os.environ.get('TF_CONFIG') or '{}')\n    task_type = tf_config.get('task', {}).get('type')\n    task_id = tf_config.get('task', {}).get('index')\n\n    print(f\"Task type: {task_type}, Task ID: {task_id}\")\n\n    # \u8bbe\u7f6e\u5206\u5e03\u5f0f\u7b56\u7565\n    strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()\n\n    with strategy.scope():\n        model = SimpleModel()\n        loss_fn = tf.keras.losses.MeanSquaredError()\n        optimizer = tf.keras.optimizers.SGD(learning_rate=0.001)\n\n    # \u751f\u6210\u4e00\u4e9b\u968f\u673a\u6570\u636e\n    data = tf.random.normal((100, 10))\n    labels = tf.random.normal((100, 1))\n\n    @tf.function\n    def train_step(inputs, labels):\n        with tf.GradientTape() as tape:\n            predictions = model(inputs)\n            loss = loss_fn(labels, predictions)\n        gradients = tape.gradient(loss, model.trainable_variables)\n        optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n        return loss\n\n    for epoch in range(10):\n        loss = train_step(data, labels)\n        if task_type == 'chief':\n            print(f'Epoch {epoch}, Loss: {loss.numpy():.4f}')\n\nif __name__ == '__main__':\n    train()\n
                                                  "},{"location":"admin/baize/developer/jobs/tensorflow.html#_9","title":"\u8fd0\u884c\u7ed3\u679c","text":"

                                                  \u540c\u6837\uff0c\u6211\u4eec\u53ef\u4ee5\u8fdb\u5165\u4efb\u52a1\u8be6\u60c5\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\uff0c\u4ee5\u53ca\u6bcf\u4e2a Pod \u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                                                  "},{"location":"admin/baize/developer/jobs/view.html","title":"\u67e5\u770b\u4efb\u52a1\uff08Job\uff09\u5de5\u4f5c\u8d1f\u8f7d","text":"

                                                  \u4efb\u52a1\u521b\u5efa\u597d\u540e\uff0c\u90fd\u4f1a\u663e\u793a\u5728\u8bad\u7ec3\u4efb\u52a1\u5217\u8868\u4e2d\u3002

                                                  1. \u5728\u8bad\u7ec3\u8bad\u7ec3\u4efb\u52a1\u5217\u8868\u4e2d\uff0c\u70b9\u51fb\u67d0\u4e2a\u4efb\u52a1\u53f3\u4fa7\u7684 \u2507 -> \u4efb\u52a1\u8d1f\u8f7d\u8be6\u60c5 \u3002

                                                  2. \u51fa\u73b0\u4e00\u4e2a\u5f39\u7a97\u9009\u62e9\u8981\u67e5\u770b\u54ea\u4e2a Pod \u540e\uff0c\u70b9\u51fb \u8fdb\u5165 \u3002

                                                  3. \u8df3\u8f6c\u5230\u5bb9\u5668\u7ba1\u7406\u754c\u9762\uff0c\u53ef\u4ee5\u67e5\u770b\u5bb9\u5668\u7684\u5de5\u4f5c\u72b6\u6001\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u4ee5\u53ca\u53d1\u751f\u7684\u4e8b\u4ef6\u3002

                                                  4. \u4f60\u8fd8\u53ef\u4ee5\u67e5\u770b\u5f53\u524d Pod \u6700\u8fd1\u4e00\u6bb5\u65f6\u95f4\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002 \u6b64\u5904\u9ed8\u8ba4\u5c55\u793a 100 \u884c\u65e5\u5fd7\uff0c\u5982\u679c\u8981\u67e5\u770b\u66f4\u8be6\u7ec6\u7684\u65e5\u5fd7\u6d3b\u4e0b\u8f7d\u65e5\u5fd7\uff0c\u8bf7\u70b9\u51fb\u9876\u90e8\u7684\u84dd\u8272 \u53ef\u89c2\u6d4b\u6027 \u6587\u5b57\u3002

                                                  5. \u5f53\u7136\u4f60\u8fd8\u53ef\u4ee5\u901a\u8fc7\u53f3\u4e0a\u89d2\u7684 ... \uff0c\u67e5\u770b\u5f53\u524d Pod \u7684 YAML\u3001\u4e0a\u4f20\u548c\u4e0b\u8f7d\u6587\u4ef6\u3002 \u4ee5\u4e0b\u662f\u4e00\u4e2a Pod \u7684 YAML \u793a\u4f8b\u3002

                                                  kind: Pod\napiVersion: v1\nmetadata:\n  name: neko-tensorboard-job-test-202404181843-skxivllb-worker-0\n  namespace: default\n  uid: ddedb6ff-c278-47eb-ae1e-0de9b7c62f8c\n  resourceVersion: '41092552'\n  creationTimestamp: '2024-04-18T10:43:36Z'\n  labels:\n    training.kubeflow.org/job-name: neko-tensorboard-job-test-202404181843-skxivllb\n    training.kubeflow.org/operator-name: pytorchjob-controller\n    training.kubeflow.org/replica-index: '0'\n    training.kubeflow.org/replica-type: worker\n  annotations:\n    cni.projectcalico.org/containerID: 0cfbb9af257d5e69027c603c6cb2d3890a17c4ae1a145748d5aef73a10d7fbe1\n    cni.projectcalico.org/podIP: ''\n    cni.projectcalico.org/podIPs: ''\n    hami.io/bind-phase: success\n    hami.io/bind-time: '1713437016'\n    hami.io/vgpu-devices-allocated: GPU-29d5fa0d-935b-2966-aff8-483a174d61d1,NVIDIA,1024,20:;\n    hami.io/vgpu-devices-to-allocate: ;\n    hami.io/vgpu-node: worker-a800-1\n    hami.io/vgpu-time: '1713437016'\n    k8s.v1.cni.cncf.io/network-status: |-\n      [{\n          \"name\": \"kube-system/calico\",\n          \"ips\": [\n              \"10.233.97.184\"\n          ],\n          \"default\": true,\n          \"dns\": {}\n      }]\n    k8s.v1.cni.cncf.io/networks-status: |-\n      [{\n          \"name\": \"kube-system/calico\",\n          \"ips\": [\n              \"10.233.97.184\"\n          ],\n          \"default\": true,\n          \"dns\": {}\n      }]\n  ownerReferences:\n    - apiVersion: kubeflow.org/v1\n      kind: PyTorchJob\n      name: neko-tensorboard-job-test-202404181843-skxivllb\n      uid: e5a8b05d-1f03-4717-8e1c-4ec928014b7b\n      controller: true\n      blockOwnerDeletion: true\nspec:\n  volumes:\n    - name: 0-dataset-pytorch-examples\n      persistentVolumeClaim:\n        claimName: pytorch-examples\n    - name: kube-api-access-wh9rh\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n  containers:\n    - name: pytorch\n      image: m.daocloud.io/docker.io/pytorch/pytorch\n      command:\n        - bash\n      args:\n        - '-c'\n        - >-\n          ls -la /root && which pip && pip install pytorch_lightning tensorboard\n          && python /root/Git/pytorch/examples/mnist/main.py\n      ports:\n        - name: pytorchjob-port\n          containerPort: 23456\n          protocol: TCP\n      env:\n        - name: PYTHONUNBUFFERED\n          value: '1'\n        - name: PET_NNODES\n          value: '1'\n      resources:\n        limits:\n          cpu: '4'\n          memory: 8Gi\n          nvidia.com/gpucores: '20'\n          nvidia.com/gpumem: '1024'\n          nvidia.com/vgpu: '1'\n        requests:\n          cpu: '4'\n          memory: 8Gi\n          nvidia.com/gpucores: '20'\n          nvidia.com/gpumem: '1024'\n          nvidia.com/vgpu: '1'\n      volumeMounts:\n        - name: 0-dataset-pytorch-examples\n          mountPath: /root/Git/pytorch/examples\n        - name: kube-api-access-wh9rh\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  restartPolicy: Never\n  terminationGracePeriodSeconds: 30\n  dnsPolicy: ClusterFirst\n  serviceAccountName: default\n  serviceAccount: default\n  nodeName: worker-a800-1\n  securityContext: {}\n  affinity: {}\n  schedulerName: hami-scheduler\n  tolerations:\n    - key: node.kubernetes.io/not-ready\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n    - key: node.kubernetes.io/unreachable\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n  priorityClassName: baize-high-priority\n  priority: 100000\n  enableServiceLinks: true\n  preemptionPolicy: PreemptLowerPriority\nstatus:\n  phase: Succeeded\n  conditions:\n    - type: Initialized\n      status: 'True'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:43:36Z'\n      reason: PodCompleted\n    - type: Ready\n      status: 'False'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:46:34Z'\n      reason: PodCompleted\n    - type: ContainersReady\n      status: 'False'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:46:34Z'\n      reason: PodCompleted\n    - type: PodScheduled\n      status: 'True'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:43:36Z'\n  hostIP: 10.20.100.211\n  podIP: 10.233.97.184\n  podIPs:\n    - ip: 10.233.97.184\n  startTime: '2024-04-18T10:43:36Z'\n  containerStatuses:\n    - name: pytorch\n      state:\n        terminated:\n          exitCode: 0\n          reason: Completed\n          startedAt: '2024-04-18T10:43:39Z'\n          finishedAt: '2024-04-18T10:46:34Z'\n          containerID: >-\n            containerd://09010214bcf3315e81d38fba50de3943c9d2b48f50a6cc2e83f8ef0e5c6eeec1\n      lastState: {}\n      ready: false\n      restartCount: 0\n      image: m.daocloud.io/docker.io/pytorch/pytorch:latest\n      imageID: >-\n        m.daocloud.io/docker.io/pytorch/pytorch@sha256:11691e035a3651d25a87116b4f6adc113a27a29d8f5a6a583f8569e0ee5ff897\n      containerID: >-\n        containerd://09010214bcf3315e81d38fba50de3943c9d2b48f50a6cc2e83f8ef0e5c6eeec1\n      started: false\n  qosClass: Guaranteed\n
                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html","title":"baizectl \u547d\u4ee4\u884c\u5de5\u5177\u4f7f\u7528\u6307\u5357","text":"

                                                  baizectl \u662f\u5728 AI Lab \u6a21\u5757\u4e2d\u4e13\u95e8\u670d\u52a1\u4e8e\u6a21\u578b\u5f00\u53d1\u8005\u4e0e\u6570\u636e\u79d1\u5b66\u5bb6\u4eec\u4f7f\u7528\u7684\u547d\u4ee4\u884c\u5de5\u5177\u3002 \u5b83\u63d0\u4f9b\u4e86\u4e00\u7cfb\u5217\u547d\u4ee4\u6765\u5e2e\u52a9\u7528\u6237\u7ba1\u7406\u5206\u5e03\u5f0f\u8bad\u7ec3\u4f5c\u4e1a\u3001\u67e5\u770b\u4efb\u52a1\u72b6\u6001\u3001\u7ba1\u7406\u6570\u636e\u96c6\u7b49\u64cd\u4f5c\uff0c\u540c\u65f6\u652f\u6301\u8fde\u63a5 Kubernetes \u5de5\u4f5c\u96c6\u7fa4\u548c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5de5\u4f5c\u7a7a\u95f4\uff0c\u5e2e\u52a9\u7528\u6237\u66f4\u9ad8\u6548\u5730\u4f7f\u7528\u548c\u7ba1\u7406 Kubernetes \u5e73\u53f0\u8d44\u6e90\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_1","title":"\u5b89\u88c5","text":"

                                                  \u76ee\u524d\uff0cbaizectl \u5df2\u7ecf\u96c6\u6210\u5728 AI Lab \u4e2d\u3002 \u4f60\u5728\u521b\u5efa Notebook \u540e\uff0c\u5373\u53ef\u5728 Notebook \u4e2d\u76f4\u63a5\u4f7f\u7528 baizectl\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_2","title":"\u5feb\u901f\u4e0a\u624b","text":""},{"location":"admin/baize/developer/notebooks/baizectl.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"

                                                  baizectl \u547d\u4ee4\u7684\u57fa\u672c\u683c\u5f0f\u5982\u4e0b\uff1a

                                                  jovyan@19d0197587cc:/$ baizectl\nAI platform management tool\n\nUsage:\n  baizectl [command]\n\nAvailable Commands:\n  completion  Generate the autocompletion script for the specified shell\n  data        Management datasets\n  help        Help about any command\n  job         Manage jobs\n  login       Login to the platform\n  version     Show cli version\n\nFlags:\n      --cluster string     Cluster name to operate\n  -h, --help               help for baizectl\n      --mode string        Connection mode: auto, api, notebook (default \"auto\")\n  -n, --namespace string   Namespace to use for the operation. If not set, the default Namespace will be used.\n  -s, --server string      \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 access base url\n      --skip-tls-verify    Skip TLS certificate verification\n      --token string       \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 access token\n  -w, --workspace int32    Workspace ID to use for the operation\n\nUse \"baizectl [command] --help\" for more information about a command.\n

                                                  \u4ee5\u4e0a\u662f baizectl \u7684\u57fa\u672c\u4fe1\u606f\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 baizectl --help \u67e5\u770b\u5e2e\u52a9\u4fe1\u606f\uff0c \u6216\u8005\u901a\u8fc7 baizectl [command] --help \u67e5\u770b\u5177\u4f53\u547d\u4ee4\u7684\u5e2e\u52a9\u4fe1\u606f\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_4","title":"\u67e5\u770b\u7248\u672c\u4fe1\u606f","text":"

                                                  baizectl \u652f\u6301\u901a\u8fc7 version \u547d\u4ee4\u67e5\u770b\u7248\u672c\u4fe1\u606f\u3002

                                                  (base) jovyan@den-0:~$ baizectl version \nbaizectl version: v0.5.0, commit sha: ac0837c4\n
                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_5","title":"\u547d\u4ee4\u683c\u5f0f","text":"

                                                  baizectl \u547d\u4ee4\u7684\u57fa\u672c\u683c\u5f0f\u5982\u4e0b\uff1a

                                                  baizectl [command] [flags]\n

                                                  \u5176\u4e2d\uff0c[command] \u662f\u5177\u4f53\u7684\u64cd\u4f5c\u547d\u4ee4\uff0c\u5982 data\u3001job \u7b49\uff0c[flags] \u662f\u53ef\u9009\u7684\u53c2\u6570\uff0c\u7528\u4e8e\u6307\u5b9a\u64cd\u4f5c\u7684\u8be6\u7ec6\u4fe1\u606f\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_6","title":"\u5e38\u7528\u9009\u9879","text":"
                                                  • --cluster string\uff1a\u6307\u5b9a\u8981\u64cd\u4f5c\u7684\u96c6\u7fa4\u540d\u79f0
                                                  • -h, --help\uff1a\u663e\u793a\u5e2e\u52a9\u4fe1\u606f
                                                  • --mode string\uff1a\u8fde\u63a5\u6a21\u5f0f\uff0c\u53ef\u9009\u503c\u4e3a auto\u3001api\u3001notebook\uff08\u9ed8\u8ba4\u503c\u4e3a auto\uff09
                                                  • -n, --namespace string\uff1a\u6307\u5b9a\u64cd\u4f5c\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5982\u679c\u672a\u8bbe\u7f6e\uff0c\u5c06\u4f7f\u7528\u9ed8\u8ba4\u547d\u540d\u7a7a\u95f4
                                                  • -s, --server string\uff1a\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8bbf\u95ee\u57fa\u7840 URL
                                                  • --skip-tls-verify\uff1a\u8df3\u8fc7 TLS \u8bc1\u4e66\u9a8c\u8bc1
                                                  • --token string\uff1a\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8bbf\u95ee\u4ee4\u724c
                                                  • -w, --workspace int32\uff1a\u6307\u5b9a\u64cd\u4f5c\u7684\u5de5\u4f5c\u533a ID
                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_7","title":"\u529f\u80fd\u4ecb\u7ecd","text":""},{"location":"admin/baize/developer/notebooks/baizectl.html#_8","title":"\u4efb\u52a1\u7ba1\u7406","text":"

                                                  baizectl \u63d0\u4f9b\u4e86\u4e00\u7cfb\u5217\u547d\u4ee4\u6765\u7ba1\u7406\u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\uff0c\u5305\u542b\u4e86\u67e5\u770b\u4efb\u52a1\u5217\u8868\uff0c\u63d0\u4ea4\u4efb\u52a1\u3001\u67e5\u770b\u65e5\u5fd7\u3001\u91cd\u542f\u4efb\u52a1\u3001\u5220\u9664\u4efb\u52a1\u7b49\u3002

                                                  jovyan@19d0197587cc:/$ baizectl job\nManage jobs\n\nUsage:\n  baizectl job [command]\n\nAvailable Commands:\n  delete      Delete a job\n  logs        Show logs of a job\n  ls          List jobs\n  restart     restart a job\n  submit      Submit a job\n\nFlags:\n  -h, --help            help for job\n  -o, --output string   Output format. One of: table, json, yaml (default \"table\")\n      --page int        Page number (default 1)\n      --page-size int   Page size (default -1)\n      --search string   Search query\n      --sort string     Sort order\n      --truncate int    Truncate output to the given length, 0 means no truncation (default 50)\n\nUse \"baizectl job [command] --help\" for more information about a command.\n
                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_9","title":"\u63d0\u4ea4\u8bad\u7ec3\u4efb\u52a1","text":"

                                                  baizectl \u652f\u6301\u4f7f\u7528 submit \u547d\u4ee4\u63d0\u4ea4\u4e00\u4e2a\u4efb\u52a1\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 baizectl job submit --help \u67e5\u770b\u8be6\u7ec6\u4fe1\u606f\u3002

                                                  (base) jovyan@den-0:~$ baizectl job submit --help\nSubmit a job\n\nUsage:\n  baizectl job submit [flags] -- command ...\n\nAliases:\n  submit, create\n\nExamples:\n# Submit a job to run the command \"torchrun python train.py\"\nbaizectl job submit -- torchrun python train.py\n# Submit a job with 2 workers(each pod use 4 gpus) to run the command \"torchrun python train.py\" and use the image \"pytorch/pytorch:1.8.1-cuda11.1-cudnn8-runtime\"\nbaizectl job submit --image pytorch/pytorch:1.8.1-cuda11.1-cudnn8-runtime --workers 2 --resources nvidia.com/gpu=4 -- torchrun python train.py\n# Submit a tensorflow job to run the command \"python train.py\"\nbaizectl job submit --tensorflow -- python train.py\n\n\nFlags:\n      --annotations stringArray                       The annotations of the job, the format is key=value\n      --auto-load-env                                 It only takes effect when executed in Notebook, the environment variables of the current environment will be automatically read and set to the environment variables of the Job, the specific environment variables to be read can be specified using the BAIZE_MAPPING_ENVS environment variable, the default is PATH,CONDA_*,*PYTHON*,NCCL_*, if set to false, the environment variables of the current environment will not be read. (default true)\n      --commands stringArray                          The default command of the job\n  -d, --datasets stringArray                          The dataset bind to the job, the format is datasetName:mountPath, e.g. mnist:/data/mnist\n  -e, --envs stringArray                              The environment variables of the job, the format is key=value\n  -x, --from-notebook string                          Define whether to read the configuration of the current Notebook and directly create tasks, including images, resources, Dataset, etc.\n                                                      auto: Automatically determine the mode according to the current environment. If the current environment is a Notebook, it will be set to notebook mode.\n                                                      false: Do not read the configuration of the current Notebook.\n                                                      true: Read the configuration of the current Notebook. (default \"auto\")\n  -h, --help                                          help for submit\n      --image string                                  The image of the job, it must be specified if fromNotebook is false.\n  -t, --job-type string                               Job type: PYTORCH, TENSORFLOW, PADDLE (default \"PYTORCH\")\n      --labels stringArray                            The labels of the job, the format is key=value\n      --max-retries int32                             number of retries before marking this job failed\n      --max-run-duration int                          Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it\n      --name string                                   The name of the job, if empty, the name will be generated automatically.\n      --paddle                                        PaddlePaddle Job, has higher priority than --job-type\n      --priority string                               The priority of the job, current support baize-medium-priority, baize-low-priority, baize-high-priority\n      --pvcs stringArray                              The pvcs bind to the job, the format is pvcName:mountPath, e.g. mnist:/data/mnist\n      --pytorch                                       Pytorch Job, has higher priority than --job-type\n      --queue string                                  The queue to used\n      --requests-resources stringArray                Similar to resources, but sets the resources of requests\n      --resources stringArray                         The resources of the job, it is a string in the format of cpu=1,memory=1Gi,nvidia.com/gpu=1, it will be set to the limits and requests of the container.\n      --restart-policy string                         The job restart policy (default \"on-failure\")\n      --runtime-envs baizectl data ls --runtime-env   The runtime environment to use for the job, you can use baizectl data ls --runtime-env to get the runtime environment\n      --shm-size int32                                The shared memory size of the job, default is 0, which means no shared memory, if set to more than 0, the job will use the shared memory, the unit is MiB\n      --tensorboard-log-dir string                    The tensorboard log directory, if set, the job will automatically start tensorboard, else not. The format is /path/to/log, you can use relative path in notebook.\n      --tensorflow                                    Tensorflow Job, has higher priority than --job-type\n      --workers int                                   The workers of the job, default is 1, which means single worker, if set to more than 1, the job will be distributed. (default 1)\n      --working-dir string                            The working directory of job container, if in notebook mode, the default is the directory of the current file\n

                                                  Note

                                                  \u63d0\u4ea4\u4efb\u52a1\u7684\u547d\u4ee4\u53c2\u6570\u8bf4\u660e\uff1a

                                                  • --name: \u4efb\u52a1\u540d\u79f0\uff0c\u5982\u679c\u4e3a\u7a7a\uff0c\u5219\u4f1a\u81ea\u52a8\u751f\u6210
                                                  • --image: \u955c\u50cf\u540d\u79f0\uff0c\u5fc5\u987b\u6307\u5b9a
                                                  • --priority: \u4efb\u52a1\u4f18\u5148\u7ea7\uff0c\u652f\u6301 \u9ad8=baize-high-priority\u3001\u4e2d=baize-medium-priority\u3001\u4f4e=baize-low-priority
                                                  • --resources: \u4efb\u52a1\u8d44\u6e90\uff0c\u683c\u5f0f\u4e3a cpu=1 memory=1Gi,nvidia.com/gpu=1
                                                  • --workers: \u4efb\u52a1\u5de5\u4f5c\u8282\u70b9\u6570\uff0c\u9ed8\u8ba4\u4e3a 1\uff0c\u5f53\u8bbe\u7f6e\u5927\u4e8e 1 \u65f6\uff0c\u4efb\u52a1\u5c06\u4f1a\u5206\u5e03\u5f0f\u8fd0\u884c
                                                  • --queue: \u4efb\u52a1\u961f\u5217\uff0c\u9700\u8981\u63d0\u524d\u521b\u5efa\u961f\u5217\u8d44\u6e90
                                                  • --working-dir: \u5de5\u4f5c\u76ee\u5f55\uff0c\u5982\u679c\u5728 Notebook \u6a21\u5f0f\u4e0b\uff0c\u4f1a\u9ed8\u8ba4\u4f7f\u7528\u5f53\u524d\u6587\u4ef6\u76ee\u5f55
                                                  • --datasets: \u6570\u636e\u96c6\uff0c\u683c\u5f0f\u4e3a datasetName:mountPath\uff0c\u4f8b\u5982 mnist:/data/mnist
                                                  • --shm-size: \u5171\u4eab\u5185\u5b58\u5927\u5c0f\uff0c\u5728\u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u65f6\uff0c\u53ef\u4ee5\u542f\u7528\uff0c\u8868\u793a\u4f7f\u7528\u5171\u4eab\u5185\u5b58\uff0c\u5355\u4f4d\u4e3a MiB
                                                  • --labels: \u4efb\u52a1\u6807\u7b7e\uff0c\u683c\u5f0f\u4e3a key=value
                                                  • --max-retries: \u6700\u5927\u91cd\u8bd5\u6b21\u6570\uff0c\u4efb\u52a1\u5931\u8d25\u540e\u91cd\u8bd5\u6b21\u6570\uff0c\u5931\u8d25\u540e\u4f1a\u91cd\u542f\u4efb\u52a1\uff0c\u9ed8\u8ba4\u4e0d\u9650\u5236
                                                  • --max-run-duration: \u6700\u5927\u8fd0\u884c\u65f6\u95f4\uff0c\u4efb\u52a1\u8fd0\u884c\u65f6\u95f4\u8d85\u8fc7\u6307\u5b9a\u65f6\u95f4\u540e\uff0c\u4f1a\u88ab\u7cfb\u7edf\u7ec8\u6b62\uff0c\u9ed8\u8ba4\u4e0d\u9650\u5236
                                                  • --restart-policy: \u91cd\u542f\u7b56\u7565\uff0c\u652f\u6301 on-failure\u3001never\u3001always\uff0c\u9ed8\u8ba4\u4e3a on-failure
                                                  • --from-notebook: \u662f\u5426\u4ece Notebook \u4e2d\u8bfb\u53d6\u914d\u7f6e\uff0c\u652f\u6301 auto\u3001true\u3001false\uff0c\u9ed8\u8ba4\u4e3a auto
                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#pytorch","title":"PyTorch \u5355\u673a\u4efb\u52a1\u793a\u4f8b","text":"

                                                  \u63d0\u4ea4\u8bad\u7ec3\u4efb\u52a1\u793a\u4f8b\uff0c\u7528\u6237\u53ef\u4ee5\u6839\u636e\u5b9e\u9645\u9700\u6c42\u4fee\u6539\u53c2\u6570\uff0c\u4ee5\u4e0b\u4e3a\u521b\u5efa\u4e00\u4e2a PyTorch \u4efb\u52a1\u7684\u793a\u4f8b\uff1a

                                                  baizectl job submit --name demojob-v2 -t PYTORCH \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --resources cpu=1,memory=1Gi \\\n    --workers 1 \\\n    --queue default \\\n    --working-dir /data \\\n    --datasets fashion-mnist:/data/mnist \\\n    --labels job_type=pytorch \\\n    --max-retries 3 \\\n    --max-run-duration 60 \\\n    --restart-policy on-failure \\\n    -- sleep 1000\n
                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#pytorch_1","title":"PyTorch \u5206\u5e03\u5f0f\u4efb\u52a1\u793a\u4f8b","text":"

                                                  \u63d0\u4ea4\u8bad\u7ec3\u4efb\u52a1\u793a\u4f8b\uff0c\u7528\u6237\u53ef\u4ee5\u6839\u636e\u5b9e\u9645\u9700\u6c42\u4fee\u6539\u53c2\u6570\uff0c\u4ee5\u4e0b\u4e3a\u521b\u5efa\u4e00\u4e2a PyTorch \u4efb\u52a1\u7684\u793a\u4f8b\uff1a

                                                  baizectl job submit --name demojob-v2 -t PYTORCH \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --resources cpu=1,memory=1Gi \\\n    --workers 2 \\   # \u591a\u4efb\u52a1\u526f\u672c\u4f1a\u81ea\u52a8\u521b\u5efa\u5206\u5e03\u5f0f\u4efb\u52a1\n    --shm-size 1024 \\\n    --queue default \\\n    --working-dir /data \\\n    --datasets fashion-mnist:/data/mnist \\\n    --labels job_type=pytorch \\\n    --max-retries 3 \\\n    --max-run-duration 60 \\\n    --restart-policy on-failure \\\n    -- sleep 1000\n
                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#tensorflow","title":"Tensorflow \u4efb\u52a1\u793a\u4f8b","text":"

                                                  \u4f7f\u7528 -t \u53c2\u6570\u6307\u5b9a\u4efb\u52a1\u7c7b\u578b\uff0c\u4ee5\u4e0b\u4e3a\u521b\u5efa\u4e00\u4e2a Tensorflow \u4efb\u52a1\u7684\u793a\u4f8b\uff1a

                                                  baizectl job submit --name demojob-v2 -t TENSORFLOW \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --from-notebook auto \\\n    --workers 1 \\\n    --queue default \\\n    --working-dir /data \\\n    --datasets fashion-mnist:/data/mnist \\\n    --labels job_type=pytorch \\\n    --max-retries 3 \\\n    --max-run-duration 60 \\\n    --restart-policy on-failure \\\n    -- sleep 1000\n

                                                  \u4e5f\u53ef\u4ee5\u4f7f\u7528 --job-type \u6216\u8005 --tensorflow \u53c2\u6570\u6307\u5b9a\u4efb\u52a1\u7c7b\u578b

                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#paddle","title":"Paddle \u4efb\u52a1\u793a\u4f8b","text":"
                                                  baizectl job submit --name demojob-v2 -t PADDLE \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --queue default \\\n    --working-dir /data \\\n    --datasets fashion-mnist:/data/mnist \\\n    --labels job_type=pytorch \\\n    --max-retries 3 \\\n    --max-run-duration 60 \\\n    --restart-policy on-failure \\\n    -- sleep 1000\n
                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_10","title":"\u67e5\u770b\u4efb\u52a1\u5217\u8868","text":"

                                                  baizectl job \u652f\u6301\u901a\u8fc7 ls \u547d\u4ee4\u67e5\u770b\u4efb\u52a1\u5217\u8868\uff0c\u9ed8\u8ba4\u663e\u793a pytroch \u4efb\u52a1\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 -t \u6307\u5b9a\u4efb\u52a1\u7c7b\u578b\u3002

                                                  (base) jovyan@den-0:~$ baizectl job ls  # \u9ed8\u8ba4\u67e5\u770b pytorch \u4efb\u52a1\n NAME        TYPE     PHASE      DURATION  COMMAND    \n demong      PYTORCH  SUCCEEDED  1m2s      sleep 60   \n demo-sleep  PYTORCH  RUNNING    1h25m28s  sleep 7200 \n(base) jovyan@den-0:~$ baizectl job ls demo-sleep  # \u67e5\u770b\u6307\u5b9a\u4efb\u52a1\n NAME        TYPE     PHASE      DURATION  COMMAND     \n demo-sleep  PYTORCH  RUNNING    1h25m28s  sleep 7200 \n(base) jovyan@den-0:~$ baizectl job ls -t TENSORFLOW   # \u67e5\u770b tensorflow \u4efb\u52a1\n NAME       TYPE        PHASE    DURATION  COMMAND    \n demotfjob  TENSORFLOW  CREATED  0s        sleep 1000 \n

                                                  \u4efb\u52a1\u5217\u8868\u9ed8\u8ba4\u60c5\u51b5\u4e0b\u4f7f\u7528 table \u4f5c\u4e3a\u5c55\u793a\u5f62\u5f0f\uff0c\u5982\u679c\u5e0c\u671b\u67e5\u770b\u66f4\u591a\u4fe1\u606f\uff0c\u53ef\u4f7f\u7528 json \u6216 yaml \u683c\u5f0f\u5c55\u793a\uff0c\u53ef\u4ee5\u901a\u8fc7 -o \u53c2\u6570\u6307\u5b9a\u3002

                                                  (base) jovyan@den-0:~$ baizectl job ls -t TENSORFLOW -o yaml\n- baseConfig:\n    args:\n    - sleep\n    - \"1000\"\n    image: release.daocloud.io/baize/baize-notebook:v0.5.0\n    labels:\n      app: den\n    podConfig:\n      affinity: {}\n      kubeEnvs:\n      - name: CONDA_EXE\n        value: /opt/conda/bin/conda\n      - name: CONDA_PREFIX\n        value: /opt/conda\n      - name: CONDA_PROMPT_MODIFIER\n        value: '(base) '\n      - name: CONDA_SHLVL\n        value: \"1\"\n      - name: CONDA_DIR\n        value: /opt/conda\n      - name: CONDA_PYTHON_EXE\n        value: /opt/conda/bin/python\n      - name: CONDA_PYTHON_EXE\n        value: /opt/conda/bin/python\n      - name: CONDA_DEFAULT_ENV\n        value: base\n      - name: PATH\n        value: /opt/conda/bin:/opt/conda/condabin:/command:/opt/conda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\n      priorityClass: baize-high-priority\n      queue: default\n  creationTimestamp: \"2024-06-16T07:47:27Z\"\n  jobSpec:\n    runPolicy:\n      suspend: true\n    tfReplicaSpecs:\n      Worker:\n        replicas: 1\n        restartPolicy: OnFailure\n        template:\n          metadata:\n            creationTimestamp: null\n          spec:\n            affinity: {}\n            containers:\n            - args:\n              - sleep\n              - \"1000\"\n              env:\n              - name: CONDA_EXE\n                value: /opt/conda/bin/conda\n              - name: CONDA_PREFIX\n                value: /opt/conda\n              - name: CONDA_PROMPT_MODIFIER\n                value: '(base) '\n              - name: CONDA_SHLVL\n                value: \"1\"\n              - name: CONDA_DIR\n                value: /opt/conda\n              - name: CONDA_PYTHON_EXE\n                value: /opt/conda/bin/python\n              - name: CONDA_PYTHON_EXE\n                value: /opt/conda/bin/python\n              - name: CONDA_DEFAULT_ENV\n                value: base\n              - name: PATH\n                value: /opt/conda/bin:/opt/conda/condabin:/command:/opt/conda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\n              image: release.daocloud.io/baize/baize-notebook:v0.5.0\n              name: tensorflow\n              resources:\n                limits:\n                  memory: 1Gi\n                requests:\n                  cpu: \"1\"\n                  memory: 2Gi\n              workingDir: /home/jovyan\n            priorityClassName: baize-high-priority\n  name: demotfjob\n  namespace: ns-chuanjia-ndx\n  phase: CREATED\n  roleConfig:\n    TF_WORKER:\n      replicas: 1\n      resources:\n        limits:\n          memory: 1Gi\n        requests:\n          cpu: \"1\"\n          memory: 2Gi\n  totalResources:\n    limits:\n      memory: \"1073741824\"\n    requests:\n      cpu: \"1\"\n      memory: \"2147483648\"\n  trainingConfig:\n    restartPolicy: RESTART_POLICY_ON_FAILURE\n  trainingMode: SINGLE\n  type: TENSORFLOW\n
                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_11","title":"\u67e5\u770b\u4efb\u52a1\u65e5\u5fd7","text":"

                                                  baizectl job \u652f\u6301\u4f7f\u7528 logs \u547d\u4ee4\u67e5\u770b\u4efb\u52a1\u65e5\u5fd7\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 baizectl job logs --help \u67e5\u770b\u8be6\u7ec6\u4fe1\u606f\u3002

                                                  (base) jovyan@den-0:~$ baizectl job logs --help\nShow logs of a job\n\nUsage:\n  baizectl job logs <job-name> [pod-name] [flags]\n\nAliases:\n  logs, log\n\nFlags:\n  -f, --follow            Specify if the logs should be streamed.\n  -h, --help              help for logs\n  -t, --job-type string   Job type: PYTORCH, TENSORFLOW, PADDLE (default \"PYTORCH\")\n      --paddle            PaddlePaddle Job, has higher priority than --job-type\n      --pytorch           Pytorch Job, has higher priority than --job-type\n      --tail int          Lines of recent log file to display.\n      --tensorflow        Tensorflow Job, has higher priority than --job-type\n      --timestamps        Show timestamps\n

                                                  Note

                                                  • --follow \u53c2\u6570\u5b9e\u65f6\u67e5\u770b\u65e5\u5fd7
                                                  • --tail \u53c2\u6570\u6307\u5b9a\u67e5\u770b\u65e5\u5fd7\u7684\u884c\u6570\uff0c\u9ed8\u8ba4\u4e3a 50 \u884c
                                                  • --timestamps \u53c2\u6570\u663e\u793a\u65f6\u95f4\u6233

                                                  \u793a\u4f8b\u67e5\u770b\u4efb\u52a1\u65e5\u5fd7\uff1a

                                                  (base) jovyan@den-0:~$ baizectl job log -t TENSORFLOW tf-sample-job-v2-202406161632-evgrbrhn -f\n2024-06-16 08:33:06.083766: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n2024-06-16 08:33:06.086189: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used.\n2024-06-16 08:33:06.132416: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used.\n2024-06-16 08:33:06.132903: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\nTo enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n2024-06-16 08:33:07.223046: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\nModel: \"sequential\"\n_________________________________________________________________\n Layer (type)                Output Shape              Param #   \n=================================================================\n Conv1 (Conv2D)              (None, 13, 13, 8)         80        \n\n flatten (Flatten)           (None, 1352)              0         \n\n Softmax (Dense)             (None, 10)                13530     \n\n=================================================================\nTotal params: 13610 (53.16 KB)\nTrainable params: 13610 (53.16 KB)\nNon-trainable params: 0 (0.00 Byte)\n...\n
                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_12","title":"\u5220\u9664\u4efb\u52a1","text":"

                                                  baizectl job \u652f\u6301\u4f7f\u7528 delete \u547d\u4ee4\u5220\u9664\u4efb\u52a1\uff0c\u5e76\u4e14\u540c\u65f6\u652f\u6301\u5220\u9664\u591a\u4e2a\u4efb\u52a1\u3002

                                                  (base) jovyan@den-0:~$ baizectl job delete --help\nDelete a job\n\nUsage:\n  baizectl job delete [flags]\n\nAliases:\n  delete, del, remove, rm\n\nFlags:\n  -h, --help              help for delete\n  -t, --job-type string   Job type: PYTORCH, TENSORFLOW, PADDLE (default \"PYTORCH\")\n      --paddle            PaddlePaddle Job, has higher priority than --job-type\n      --pytorch           Pytorch Job, has higher priority than --job-type\n      --tensorflow        Tensorflow Job, has higher priority than --job-type\n

                                                  \u793a\u4f8b\u5220\u9664\u4efb\u52a1\uff1a

                                                  (base) jovyan@den-0:~$ baizectl job ls\n NAME        TYPE     PHASE      DURATION  COMMAND    \n demong      PYTORCH  SUCCEEDED  1m2s      sleep 60   \n demo-sleep  PYTORCH  RUNNING    1h20m51s  sleep 7200 \n demojob     PYTORCH  FAILED     16m46s    sleep 1000 \n demojob-v2  PYTORCH  RUNNING    3m13s     sleep 1000 \n demojob-v3  PYTORCH  CREATED    0s        sleep 1000 \n(base) jovyan@den-0:~$ baizectl job delete demojob      # \u5220\u9664\u5355\u4e2a\u4efb\u52a1\nDelete job demojob in ns-chuanjia-ndx successfully\n(base) jovyan@den-0:~$ baizectl job delete demojob-v2 demojob-v3     # \u5220\u9664\u591a\u4e2a\u4efb\u52a1\nDelete job demojob-v2 in ns-chuanjia-ndx successfully\nDelete job demojob-v3 in ns-chuanjia-ndx successfully\n
                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_13","title":"\u91cd\u542f\u4efb\u52a1","text":"

                                                  baizectl job \u652f\u6301\u4f7f\u7528 restart \u547d\u4ee4\u91cd\u542f\u4efb\u52a1\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 baizectl job restart --help \u67e5\u770b\u8be6\u7ec6\u4fe1\u606f\u3002

                                                  (base) jovyan@den-0:~$ baizectl job restart --help\nrestart a job\n\nUsage:\n  baizectl job restart [flags] job\n\nAliases:\n  restart, rerun\n\nFlags:\n  -h, --help              help for restart\n  -t, --job-type string   Job type: PYTORCH, TENSORFLOW, PADDLE (default \"PYTORCH\")\n      --paddle            PaddlePaddle Job, has higher priority than --job-type\n      --pytorch           Pytorch Job, has higher priority than --job-type\n      --tensorflow        Tensorflow Job, has higher priority than --job-type\n
                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_14","title":"\u6570\u636e\u96c6\u7ba1\u7406","text":"

                                                  baizectl \u652f\u6301\u7ba1\u7406\u6570\u636e\u96c6\uff0c\u76ee\u524d\u652f\u6301\u67e5\u770b\u6570\u636e\u96c6\u5217\u8868\uff0c\u65b9\u4fbf\u5728\u4efb\u52a1\u8bad\u7ec3\u65f6\uff0c\u5feb\u901f\u7ed1\u5b9a\u6570\u636e\u96c6\u3002

                                                  (base) jovyan@den-0:~$ baizectl data \nManagement datasets\n\nUsage:\n  baizectl data [flags]\n  baizectl data [command]\n\nAliases:\n  data, dataset, datasets, envs, runtime-envs\n\nAvailable Commands:\n  ls          List datasets\n\nFlags:\n  -h, --help            help for data\n  -o, --output string   Output format. One of: table, json, yaml (default \"table\")\n      --page int        Page number (default 1)\n      --page-size int   Page size (default -1)\n      --search string   Search query\n      --sort string     Sort order\n      --truncate int    Truncate output to the given length, 0 means no truncation (default 50)\n\nUse \"baizectl data [command] --help\" for more information about a command.\n
                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_15","title":"\u67e5\u770b\u6570\u636e\u96c6\u5217\u8868","text":"

                                                  baizectl data \u652f\u6301\u901a\u8fc7 ls \u547d\u4ee4\u67e5\u770b\u6570\u636e\u96c6\u5217\u8868\uff0c\u9ed8\u8ba4\u663e\u793a table \u683c\u5f0f\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 -o \u53c2\u6570\u6307\u5b9a\u8f93\u51fa\u683c\u5f0f\u3002

                                                  (base) jovyan@den-0:~$ baizectl data ls\n NAME             TYPE  URI                                                    PHASE \n fashion-mnist    GIT   https://gitee.com/samzong_lu/fashion-mnist.git         READY \n sample-code      GIT   https://gitee.com/samzong_lu/training-sample-code....  READY \n training-output  PVC   pvc://training-output                                  READY \n

                                                  \u5728\u63d0\u4ea4\u8bad\u7ec3\u4efb\u52a1\u65f6\uff0c\u53ef\u4ee5\u4f7f\u7528 -d \u6216\u8005 --datasets \u53c2\u6570\u6307\u5b9a\u6570\u636e\u96c6\uff0c\u4f8b\u5982\uff1a

                                                  baizectl job submit --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --datasets sample-code:/home/jovyan/code \\\n    -- sleep 1000\n

                                                  \u540c\u65f6\u6302\u8f7d\u591a\u4e2a\u6570\u636e\u96c6\uff0c\u53ef\u4ee5\u6309\u7167\u5982\u4e0b\u683c\u5f0f\uff1a

                                                  baizectl job submit --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --datasets sample-code:/home/jovyan/code fashion-mnist:/home/jovyan/data \\\n    -- sleep 1000\n
                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_16","title":"\u67e5\u770b\u4f9d\u8d56\u5e93\uff08\u73af\u5883\uff09","text":"

                                                  \u73af\u5883 runtime-env \u662f\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u7279\u8272\u73af\u5883\u7ba1\u7406\u80fd\u529b\uff0c\u901a\u8fc7\u5c06\u6a21\u578b\u5f00\u53d1\u3001\u8bad\u7ec3\u4efb\u52a1\u4ee5\u53ca\u63a8\u7406\u4e2d\u6240\u9700\u7684\u4f9d\u8d56\u5e93\u89e3\u8026\uff0c \u63d0\u4f9b\u4e86\u4e00\u79cd\u66f4\u52a0\u7075\u6d3b\u7684\u4f9d\u8d56\u5e93\u7ba1\u7406\u65b9\u5f0f\uff0c\u65e0\u9700\u91cd\u590d\u6784\u5efa\u590d\u6742\u7684 Docker \u955c\u50cf\uff0c\u53ea\u9700\u9009\u62e9\u5408\u9002\u7684\u73af\u5883\u5373\u53ef\u3002

                                                  \u540c\u65f6 runtime-env \u652f\u6301\u70ed\u66f4\u65b0\uff0c\u52a8\u6001\u5347\u7ea7\uff0c\u65e0\u9700\u91cd\u65b0\u6784\u5efa\u955c\u50cf\uff0c\u5373\u53ef\u66f4\u65b0\u73af\u5883\u4f9d\u8d56\u5e93\u3002

                                                  baizectl data \u652f\u6301\u901a\u8fc7 runtime-env \u547d\u4ee4\u67e5\u770b\u73af\u5883\u5217\u8868\uff0c\u9ed8\u8ba4\u663e\u793a table \u683c\u5f0f\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 -o \u53c2\u6570\u6307\u5b9a\u8f93\u51fa\u683c\u5f0f\u3002

                                                  (base) jovyan@den-0:~$ baizectl data ls --runtime-env \n NAME               TYPE   URI                                                    PHASE      \n fashion-mnist      GIT    https://gitee.com/samzong_lu/fashion-mnist.git         READY      \n sample-code        GIT    https://gitee.com/samzong_lu/training-sample-code....  READY      \n training-output    PVC    pvc://training-output                                  READY      \n tensorflow-sample  CONDA  conda://python?version=3.12.3                          PROCESSING \n

                                                  \u5728\u63d0\u4ea4\u8bad\u7ec3\u4efb\u52a1\u65f6\uff0c\u53ef\u4ee5\u4f7f\u7528 --runtime-env \u53c2\u6570\u6307\u5b9a\u73af\u5883\uff0c\u4f8b\u5982\uff1a

                                                  baizectl job submit --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --runtime-env tensorflow-sample \\\n    -- sleep 1000\n
                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_17","title":"\u9ad8\u7ea7\u7528\u6cd5","text":"

                                                  baizectl \u652f\u6301\u66f4\u591a\u9ad8\u7ea7\u7528\u6cd5\uff0c\u4f8b\u5982\u81ea\u52a8\u8865\u5168\u811a\u672c\u751f\u6210\u3001\u4f7f\u7528\u7279\u5b9a\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u3001\u4f7f\u7528\u7279\u5b9a\u5de5\u4f5c\u7a7a\u95f4\u7b49\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_18","title":"\u81ea\u52a8\u8865\u5168\u811a\u672c\u751f\u6210","text":"
                                                  baizectl completion bash > /etc/bash_completion.d/baizectl\n

                                                  \u4e0a\u8ff0\u547d\u4ee4\u751f\u6210 bash \u7684\u81ea\u52a8\u8865\u5168\u811a\u672c\uff0c\u5e76\u5c06\u5176\u4fdd\u5b58\u5230 /etc/bash_completion.d/baizectl \u76ee\u5f55\u4e2d\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 source /etc/bash_completion.d/baizectl \u52a0\u8f7d\u81ea\u52a8\u8865\u5168\u811a\u672c\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_19","title":"\u4f7f\u7528\u7279\u5b9a\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4","text":"
                                                  baizectl job ls --cluster my-cluster --namespace my-namespace\n

                                                  \u8be5\u547d\u4ee4\u5c06\u5217\u51fa my-cluster \u96c6\u7fa4\u4e2d my-namespace \u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u4f5c\u4e1a\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_20","title":"\u4f7f\u7528\u7279\u5b9a\u5de5\u4f5c\u7a7a\u95f4","text":"
                                                  baizectl job ls --workspace 123\n
                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_21","title":"\u5e38\u89c1\u95ee\u9898","text":"
                                                  • \u95ee\u9898\uff1a\u4e3a\u4ec0\u4e48\u65e0\u6cd5\u8fde\u63a5\u5230\u670d\u52a1\u5668\uff1f

                                                    \u89e3\u51b3\u65b9\u6cd5\uff1a\u68c0\u67e5 --server \u53c2\u6570\u662f\u5426\u6b63\u786e\u8bbe\u7f6e\uff0c\u5e76\u786e\u4fdd\u7f51\u7edc\u8fde\u63a5\u6b63\u5e38\u3002 \u5982\u679c\u670d\u52a1\u5668\u4f7f\u7528\u81ea\u7b7e\u540d\u8bc1\u4e66\uff0c\u53ef\u4ee5\u4f7f\u7528 --skip-tls-verify \u8df3\u8fc7 TLS \u8bc1\u4e66\u9a8c\u8bc1\u3002

                                                  • \u95ee\u9898\uff1a\u5982\u4f55\u89e3\u51b3\u6743\u9650\u4e0d\u8db3\u7684\u95ee\u9898\uff1f

                                                    \u89e3\u51b3\u65b9\u6cd5\uff1a\u786e\u4fdd\u4f7f\u7528\u6b63\u786e\u7684 --token \u53c2\u6570\u767b\u5f55\uff0c\u5e76\u68c0\u67e5\u5f53\u524d\u7528\u6237\u662f\u5426\u5177\u6709\u76f8\u5e94\u7684\u64cd\u4f5c\u6743\u9650\u3002

                                                  • \u95ee\u9898\uff1a\u4e3a\u4ec0\u4e48\u65e0\u6cd5\u5217\u51fa\u6570\u636e\u96c6\uff1f

                                                    \u89e3\u51b3\u65b9\u6cd5\uff1a\u68c0\u67e5\u547d\u540d\u7a7a\u95f4\u548c\u5de5\u4f5c\u533a\u662f\u5426\u6b63\u786e\u8bbe\u7f6e\uff0c\u786e\u4fdd\u5f53\u524d\u7528\u6237\u6709\u6743\u9650\u8bbf\u95ee\u8fd9\u4e9b\u8d44\u6e90\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/baizectl.html#_22","title":"\u7ed3\u8bed","text":"

                                                  \u901a\u8fc7\u4ee5\u4e0a\u6307\u5357\uff0c\u7528\u6237\u53ef\u4ee5\u5feb\u901f\u4e0a\u624b baizectl \u547d\u4ee4\uff0c\u5e76\u5728\u5b9e\u9645\u5e94\u7528\u4e2d\u9ad8\u6548\u5730\u7ba1\u7406 AI \u5e73\u53f0\u8d44\u6e90\u3002 \u5982\u679c\u6709\u4efb\u4f55\u7591\u95ee\u6216\u95ee\u9898\uff0c\u5efa\u8bae\u53c2\u8003 baizectl [command] --help \u83b7\u53d6\u66f4\u591a\u8be6\u7ec6\u4fe1\u606f\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/baizess.html","title":"baizess \u6362\u6e90\u5de5\u5177\u4f7f\u7528\u6307\u5357","text":"

                                                  baizess \u662f AI Lab \u6a21\u5757\u4e2d Notebook \u5185\u7f6e\u7684\u5f00\u7bb1\u5373\u7528\u7684\u6362\u6e90\u5c0f\u5de5\u5177\u3002\u5b83\u63d0\u4f9b\u4e86\u7b80\u6d01\u7684\u547d\u4ee4\u884c\u754c\u9762\uff0c\u65b9\u4fbf\u7528\u6237\u7ba1\u7406\u5404\u79cd\u7f16\u7a0b\u73af\u5883\u7684\u5305\u7ba1\u7406\u5668\u6e90\u3002 \u901a\u8fc7 baizess\uff0c\u7528\u6237\u53ef\u4ee5\u8f7b\u677e\u5207\u6362\u5e38\u7528\u5305\u7ba1\u7406\u5668\u7684\u6e90\uff0c\u786e\u4fdd\u987a\u5229\u8bbf\u95ee\u6700\u65b0\u7684\u5e93\u548c\u4f9d\u8d56\u9879\u3002\u8be5\u5de5\u5177\u901a\u8fc7\u7b80\u5316\u5305\u6e90\u7ba1\u7406\u6d41\u7a0b\uff0c\u63d0\u5347\u4e86\u5f00\u53d1\u8005\u548c\u6570\u636e\u79d1\u5b66\u5bb6\u7684\u5de5\u4f5c\u6548\u7387\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/baizess.html#_1","title":"\u5b89\u88c5","text":"

                                                  \u76ee\u524d\uff0cbaizess \u5df2\u7ecf\u96c6\u6210\u5728 AI Lab \u4e2d\u3002 \u4f60\u5728\u521b\u5efa Notebook \u540e\uff0c\u5373\u53ef\u5728 Notebook \u4e2d\u76f4\u63a5\u4f7f\u7528 baizess\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/baizess.html#_2","title":"\u5feb\u901f\u4e0a\u624b","text":""},{"location":"admin/baize/developer/notebooks/baizess.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"

                                                  baizess \u547d\u4ee4\u7684\u57fa\u672c\u4fe1\u606f\u5982\u4e0b\uff1a

                                                  jovyan@19d0197587cc:/$ baizess\nsource switch tool\n\nUsage:\n  baizess [command] [package-manager]\n\nAvailable Commands:\n  set     Switch the source of specified package manager to current fastest source\n  reset   Reset the source of specified package manager to default source\n\nAvailable Package-managers:\n  apt     (require root privilege)\n  conda\n  pip\n
                                                  "},{"location":"admin/baize/developer/notebooks/baizess.html#_4","title":"\u547d\u4ee4\u683c\u5f0f","text":"

                                                  baizess \u547d\u4ee4\u7684\u57fa\u672c\u683c\u5f0f\u5982\u4e0b\uff1a

                                                  baizess [command] [package-manager]\n

                                                  \u5176\u4e2d\uff0c[command] \u662f\u5177\u4f53\u7684\u64cd\u4f5c\u547d\u4ee4\uff0c[package-manager] \u7528\u4e8e\u6307\u5b9a\u64cd\u4f5c\u5bf9\u5e94\u7684\u5305\u7ba1\u7406\u5668\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/baizess.html#command","title":"command","text":"
                                                  • set\uff1a\u5907\u4efd\u6e90\uff0c\u6d4b\u901f\uff0c\u5c06\u6240\u6307\u5b9a\u7684\u5305\u7ba1\u7406\u5668\u7684\u6e90\u5207\u6362\u4e3a\u6d4b\u901f\u7ed3\u679c\u6700\u5feb\u7684\u56fd\u5185\u6e90\u3002
                                                  • reset\uff1a\u5c06\u6240\u6307\u5b9a\u7684\u5305\u7ba1\u7406\u5668\u91cd\u7f6e\u4e3a\u9ed8\u8ba4\u6e90\u3002
                                                  "},{"location":"admin/baize/developer/notebooks/baizess.html#package-manager","title":"\u76ee\u524d\u652f\u6301\u7684 package-manager","text":"
                                                  • apt \uff08\u6e90\u7684\u5207\u6362\u4e0e\u91cd\u7f6e\u9700\u8981root\u6743\u9650\uff09
                                                  • conda \uff08\u539f\u5148\u7684\u6e90\u5c06\u88ab\u5907\u4efd\u5728/etc/apt/backup/\uff09
                                                  • pip \uff08\u66f4\u65b0\u540e\u6e90\u4fe1\u606f\u5c06\u88ab\u5199\u5165~/.condarc\uff09
                                                  "},{"location":"admin/baize/developer/notebooks/create.html","title":"\u521b\u5efa Notebook","text":"

                                                  Notebook \u63d0\u4f9b\u4e86\u4e00\u4e2a\u5728\u7ebf\u7684 Web \u4ea4\u4e92\u5f0f\u7f16\u7a0b\u73af\u5883\uff0c\u65b9\u4fbf\u5f00\u53d1\u8005\u5feb\u901f\u8fdb\u884c\u6570\u636e\u79d1\u5b66\u548c\u673a\u5668\u5b66\u4e60\u5b9e\u9a8c\u3002

                                                  \u8fdb\u5165\u5f00\u53d1\u8005\u63a7\u5236\u53f0\u540e\uff0c\u5f00\u53d1\u8005\u53ef\u4ee5\u5728\u4e0d\u540c\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u4e2d\u521b\u5efa\u548c\u7ba1\u7406 Notebook\u3002

                                                  1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb Notebooks \uff0c\u8fdb\u5165 Notebook \u5217\u8868\u3002\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae\u3002

                                                  2. \u7cfb\u7edf\u4f1a\u9884\u5148\u586b\u5145\u57fa\u7840\u914d\u7f6e\u6570\u636e\uff0c\u5305\u62ec\u8981\u90e8\u7f72\u7684\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001Notebook \u955c\u50cf\u5730\u5740\u3001\u961f\u5217\u3001\u8d44\u6e90\u3001\u7528\u6237\u76ee\u5f55\u7b49\u3002 \u8c03\u6574\u8fd9\u4e9b\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                  3. \u521a\u521b\u5efa\u7684 Notebook \u72b6\u6001\u4e3a \u7b49\u5f85\u4e2d \uff0c\u7247\u523b\u540e\u5c06\u53d8\u4e3a \u8fd0\u884c\u4e2d \uff0c\u9ed8\u8ba4\u6700\u65b0\u7684\u4f4d\u4e8e\u5217\u8868\u9876\u90e8\u3002

                                                  4. \u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u6267\u884c\u66f4\u591a\u64cd\u4f5c\uff1a\u66f4\u65b0\u53c2\u6570\u3001\u542f\u52a8/\u6682\u505c\u3001\u514b\u9686 Notebook \u3001\u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u548c\u5220\u9664\u3002

                                                  Note

                                                  \u5982\u679c\u9009\u62e9\u7eaf CPU \u8d44\u6e90\u540e\uff0c\u53d1\u73b0\u6302\u8f7d\u4e86\u8282\u70b9\u4e0a\u7684\u6240\u6709 GPU \u5361\uff0c\u53ef\u4ee5\u5c1d\u8bd5\u6dfb\u52a0 container env \u6765\u89e3\u51b3\u6b64\u95ee\u9898\uff1a

                                                  NVIDIA_VISIBLE_DEVICES=\"\"\n
                                                  "},{"location":"admin/baize/developer/notebooks/delete.html","title":"\u5220\u9664 Notebook","text":"

                                                  \u5982\u679c\u53d1\u73b0 Notebook \u5197\u4f59\u3001\u8fc7\u671f\u6216\u56e0\u5176\u4ed6\u7f18\u6545\u4e0d\u518d\u9700\u8981\uff0c\u53ef\u4ee5\u4ece Notebook \u5217\u8868\u4e2d\u5220\u9664\u3002

                                                  1. \u5728 Notebook \u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u5220\u9664 \u3002

                                                  2. \u5728\u5f39\u7a97\u4e2d\u786e\u8ba4\u8981\u5220\u9664\u7684\u4efb\u52a1\uff0c\u8f93\u5165 Notebook \u540d\u79f0\u540e\u70b9\u51fb \u5220\u9664 \u3002

                                                  3. \u5c4f\u5e55\u63d0\u793a\u5220\u9664\u6210\u529f\uff0c\u8be5 Notebook \u4ece\u5217\u8868\u4e2d\u6d88\u5931\u3002

                                                  Caution

                                                  Notebook \u4e00\u65e6\u5220\u9664\u5c06\u4e0d\u53ef\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/notebook-auto-close.html","title":"Notebook \u95f2\u7f6e\u8d85\u65f6\u81ea\u52a8\u5173\u673a","text":"

                                                  \u5728\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u4e3a\u4f18\u5316\u8d44\u6e90\u5229\u7528\u7387\uff0cAI Lab \u542f\u7528\u4e86 Notebook \u95f2\u7f6e\u8d85\u65f6\u81ea\u52a8\u5173\u673a\u529f\u80fd\uff1b \u5f53 Notebook \u957f\u65f6\u95f4\u65e0\u64cd\u4f5c\u65f6\uff0c\u7cfb\u7edf\u4f1a\u81ea\u52a8\u5173\u673a Notebook\uff0c\u91ca\u653e\u8d44\u6e90\u3002

                                                  • \u4f18\u70b9\uff1a\u901a\u8fc7\u8fd9\u4e2a\u65b9\u5f0f\uff0c\u53ef\u4ee5\u6781\u5927\u51cf\u5c11\u56e0\u4e3a\u957f\u65f6\u95f4\u65e0\u64cd\u4f5c\u5bfc\u81f4\u7684\u8d44\u6e90\u6d6a\u8d39\uff0c\u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u6548\u7387\u3002
                                                  • \u7f3a\u70b9\uff1a\u5982\u679c Notebook \u672a\u914d\u7f6e\u76f8\u5173\u5907\u4efd\u7b56\u7565\uff0c\u53ef\u80fd\u5bfc\u81f4\u6570\u636e\u4e22\u5931\u3002

                                                  Note

                                                  \u5f53\u524d\uff0c\u6b64\u529f\u80fd\u4e3a\u96c6\u7fa4\u7ea7\u522b\u914d\u7f6e\uff0c\u9ed8\u8ba4\u5f00\u542f\uff0c\u9ed8\u8ba4\u8d85\u65f6\u65f6\u957f\u4e3a 30 \u5206\u949f\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/notebook-auto-close.html#_1","title":"\u914d\u7f6e\u53d8\u66f4","text":"

                                                  \u76ee\u524d\u914d\u7f6e\u4fee\u6539\u65b9\u5f0f\u4e3a\u624b\u52a8\u4fee\u6539\uff0c\u540e\u7eed\u4f1a\u63d0\u4f9b\u66f4\u52a0\u4fbf\u6377\u7684\u914d\u7f6e\u65b9\u5f0f\u3002

                                                  \u4fee\u6539\u5de5\u4f5c\u96c6\u7fa4\u4e2d baize-agent \u7684\u90e8\u7f72\u53c2\u6570\uff0c\u6b63\u786e\u7684\u4fee\u6539\u65b9\u5f0f\u4e3a\u66f4\u65b0 Helm \u5e94\u7528\uff0c

                                                  "},{"location":"admin/baize/developer/notebooks/notebook-auto-close.html#_2","title":"\u754c\u9762\u5316\u4fee\u6539","text":"
                                                  1. \u5728\u96c6\u7fa4\u7ba1\u7406\u754c\u9762\u627e\u5230\u5bf9\u5e94\u7684\u5de5\u4f5c\u96c6\u7fa4\uff0c\u8fdb\u5165\u96c6\u7fa4\u8be6\u60c5\uff0c\u9009\u62e9 Helm \u5e94\u7528 \uff0c\u5728 baize-system \u547d\u540d\u7a7a\u95f4\u4e0b\u627e\u5230 baize-agent\uff0c\u5728\u53f3\u4e0a\u89d2\u70b9\u51fb \u66f4\u65b0 \u6309\u94ae\uff1a

                                                  2. \u5982\u56fe\u4fee\u6539 YAML \u4ee3\u7801\uff1a

                                                    ...\nnotebook-controller:\n  culling_enabled: false\n  cull_idle_time: 120\n  idleness_check_period: 1\n...\n
                                                  3. \u786e\u8ba4\u53c2\u6570\u4fee\u6539\u6210\u529f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u548c \u786e\u5b9a \u3002

                                                  "},{"location":"admin/baize/developer/notebooks/notebook-auto-close.html#_3","title":"\u547d\u4ee4\u884c\u4fee\u6539","text":"

                                                  \u8fdb\u5165\u63a7\u5236\u53f0\u4ee5\u540e\uff0c\u4f7f\u7528 helm upgrade \u547d\u4ee4\u66f4\u6539\u914d\u7f6e\uff1a

                                                  # \u8bbe\u5b9a\u7248\u672c\u53f7\nexport VERSION=0.8.0\n\n# \u66f4\u65b0 Helm Chart \nhelm upgrade --install baize-agent baize/baize-agent \\\n    --namespace baize-system \\\n    --create-namespace \\\n    --set global.imageRegistry=release.daocloud.io \\\n    --set notebook-controller.culling_enabled=true \\    # \u5f00\u542f\u81ea\u52a8\u5173\u673a\uff0c\u9ed8\u8ba4\u4e3a true\n    --set notebook-controller.cull_idle_time=120 \\      # \u8bbe\u7f6e\u95f2\u7f6e\u8d85\u65f6\u65f6\u95f4\u4e3a 120 \u5206\u949f\uff0c\u9ed8\u8ba4\u4e3a 30 \u5206\u949f\n    --set notebook-controller.idleness_check_period=1 \\ # \u8bbe\u7f6e\u68c0\u67e5\u95f4\u9694\u4e3a 1 \u5206\u949f\uff0c\u9ed8\u8ba4\u4e3a 1 \u5206\u949f\n    --version=$VERSION\n

                                                  Note

                                                  \u4e3a\u4e86\u907f\u514d\u81ea\u52a8\u5173\u673a\u540e\u4e22\u5931\u6570\u636e\uff0c\u60a8\u53ef\u4ee5\u5c06 AI Lab \u5347\u7ea7\u5230 v0.8.0 \u53ca\u66f4\u9ad8\u7248\u672c\uff0c\u5728 Notebook \u914d\u7f6e\u4e2d\u542f\u7528\u5173\u673a\u81ea\u52a8\u4fdd\u5b58\u529f\u80fd\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/notebook-with-envs.html","title":"\u5728 Notebook \u4e2d\u4f7f\u7528\u73af\u5883","text":"

                                                  \u73af\u5883\u7ba1\u7406\u662f AI Lab \u7684\u91cd\u8981\u529f\u80fd\u4e4b\u4e00\uff0c\u901a\u8fc7\u5728 Notebook \u4e2d\u5173\u8054\u5bf9\u5e94\u7684\u73af\u5883\uff0c\u53ef\u4ee5\u5feb\u901f\u5207\u6362\u4e0d\u540c\u7684\u73af\u5883\uff0c\u65b9\u4fbf\u7528\u6237\u8fdb\u884c\u5f00\u53d1\u548c\u8c03\u8bd5\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/notebook-with-envs.html#notebook_1","title":"\u521b\u5efa Notebook \u65f6\u9009\u62e9\u73af\u5883","text":"

                                                  \u5728\u521b\u5efa Notebook \u65f6\uff0c\u53ef\u4ee5\u9009\u62e9\u4e00\u4e2a\u6216\u591a\u4e2a\u7684\u73af\u5883 Envs \u3002\u5982\u679c\u6ca1\u6709\u5408\u9002\u7684\u73af\u5883\uff0c\u53ef\u4ee5\u53bb \u73af\u5883\u7ba1\u7406 \u4e2d\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u73af\u5883\u3002

                                                  \u5982\u4f55\u521b\u5efa\u73af\u5883\uff0c\u8bf7\u53c2\u8003\u73af\u5883\u7ba1\u7406\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/notebook-with-envs.html#notebook_2","title":"\u5728 Notebook \u4f7f\u7528\u73af\u5883","text":"

                                                  Note

                                                  \u5728 Notebook \u4e2d\uff0c\u6211\u4eec\u540c\u65f6\u63d0\u4f9b\u4e86 conda \u548c mamba \u4e24\u79cd\uff0c\u7528\u6237\u53ef\u4ee5\u6839\u636e\u81ea\u5df1\u7684\u9700\u6c42\u9009\u62e9\u5408\u9002\u7684\u73af\u5883\u7ba1\u7406\u5de5\u5177\u3002

                                                  AI Lab \u4e2d\uff0c\u6211\u4eec\u91c7\u7528\u4e86 conda \u73af\u5883\u7ba1\u7406\u5de5\u5177\uff0c\u7528\u6237\u53ef\u4ee5\u5728 Notebook \u4e2d\u901a\u8fc7 !conda env list \u547d\u4ee4\u67e5\u770b\u5f53\u524d\u73af\u5883\u5217\u8868\u3002

                                                  (base) jovyan@chuanjia-jupyter-0:~/yolov8$ conda env list\n# conda environments:\n#\ndkj-python312-pure       /opt/baize-runtime-env/dkj-python312-pure/conda/envs/dkj-python312-pure\npython-3.10              /opt/baize-runtime-env/python-3.10/conda/envs/python-3.10\ntorch-smaple             /opt/baize-runtime-env/torch-smaple/conda/envs/torch-smaple\nbase                  *  /opt/conda     # \u5f53\u524d\u6fc0\u6d3b\u7684\u73af\u5883\nbaize-base               /opt/conda/envs/baize-base\n

                                                  \u8fd9\u4e2a\u547d\u4ee4\u4f1a\u5217\u51fa\u6240\u6709\u7684 conda \u73af\u5883\uff0c\u5e76\u5728\u5f53\u524d\u6fc0\u6d3b\u7684\u73af\u5883\u524d\u9762\u52a0\u4e0a\u4e00\u4e2a\u661f\u53f7\uff08*\uff09\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/notebook-with-envs.html#jupyterlab-kernel","title":"JupyterLab \u7684 Kernel \u73af\u5883\u7ba1\u7406","text":"

                                                  \u5728 Jupyterlab \u4e2d\uff0c\u6211\u4eec\u81ea\u52a8\u5c06 Notebook \u5173\u8054\u7684\u73af\u5883\u7ed1\u5b9a\u5230 Kernel \u5217\u8868\u4e2d\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 Kernel \u5feb\u901f\u5207\u6362\u73af\u5883\u3002

                                                  \u901a\u8fc7\u4ee5\u4e0a\u529e\u6cd5\uff0c\u53ef\u4ee5\u540c\u65f6\u5728\u4e00\u4e2a Notebook \u4e2d\u4f7f\u7528\u4e0d\u540c\u7f16\u5199\u548c\u8c03\u8bd5\u7b97\u6cd5\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/notebook-with-envs.html#terminal","title":"Terminal \u5207\u6362\u73af\u5883","text":"

                                                  AI Lab \u7684 Notebook \u76ee\u524d\u4e5f\u5df2\u7ecf\u652f\u6301\u4e86 VSCode\u3002

                                                  \u5982\u679c\u60a8\u66f4\u559c\u6b22\u5728 Terminal \u4e2d\u7ba1\u7406\u548c\u5207\u6362\u73af\u5883\uff0c\u53ef\u4ee5\u5b89\u88c5\u5982\u4e0b\u6b65\u9aa4\uff1a

                                                  \u5728\u9996\u6b21\u542f\u52a8\u5e76\u4f7f\u7528 Notebook \u65f6\uff0c\u9700\u8981\u5148\u6267\u884c conda init\uff0c\u7136\u540e\u518d\u6267\u884c conda activate <env_name> \u5207\u6362\u5230\u5bf9\u5e94\u7684\u73af\u5883\u3002

                                                  (base) jovyan@chuanjia-jupyter-0:~/yolov8$ conda init bash# \u521d\u59cb\u5316 bash \u73af\u5883, \u4ec5\u9996\u6b21\u4f7f\u7528\u9700\u8981\u6267\u884c\nno change     /opt/conda/condabin/conda\n change     /opt/conda/bin/conda\n change     /opt/conda/bin/conda-env\n change     /opt/conda/bin/activate\n change     /opt/conda/bin/deactivate\n change     /opt/conda/etc/profile.d/conda.sh\n change     /opt/conda/etc/fish/conf.d/conda.fish\n change     /opt/conda/shell/condabin/Conda.psm1\n change     /opt/conda/shell/condabin/conda-hook.ps1\n change     /opt/conda/lib/python3.11/site-packages/xontrib/conda.xsh\n change     /opt/conda/etc/profile.d/conda.csh\n change     /home/jovyan/.bashrc\n action taken.\nAdded mamba to /home/jovyan/.bashrc\n\n==> For changes to take effect, close and re-open your current shell. <==\n\n(base) jovyan@chuanjia-jupyter-0:~/yolov8$ source ~/.bashrc  # \u91cd\u65b0\u52a0\u8f7d bash \u73af\u5883\n(base) jovyan@chuanjia-jupyter-0:~/yolov8$ conda activate python-3.10   # \u5207\u6362\u5230 python-3.10 \u73af\u5883\n(python-3.10) jovyan@chuanjia-jupyter-0:~/yolov8$ conda env list\n\n              mamba version : 1.5.1\n# conda environments:\n#\ndkj-python312-pure       /opt/baize-runtime-env/dkj-python312-pure/conda/envs/dkj-python312-pure\npython-3.10           *  /opt/baize-runtime-env/python-3.10/conda/envs/python-3.10    # \u5f53\u524d\u6fc0\u6d3b\u7684\u73af\u5883\ntorch-smaple             /opt/baize-runtime-env/torch-smaple/conda/envs/torch-smaple\nbase                     /opt/conda\nbaize-base               /opt/conda/envs/baize-base\n

                                                  \u5982\u679c\u60a8\u66f4\u559c\u6b22\u4f7f\u7528 mamba \uff0c\u8fd9\u91cc\u9700\u8981\u4f7f\u7528 mamaba init \u548c mamba activate <env_name>\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/notebook-with-envs.html#_1","title":"\u67e5\u770b\u73af\u5883\u4e2d\u7684\u5305","text":"

                                                  \u901a\u8fc7\u4e0d\u540c\u73af\u5883\u7ba1\u7406\u7684\u4e00\u4e2a\u5f88\u91cd\u8981\u7684\u529f\u80fd\u662f\uff0c\u53ef\u4ee5\u5728\u4e00\u4e2a Notebook \u4e2d\u901a\u8fc7\u5feb\u901f\u5207\u6362\u73af\u5883\uff0c\u4f7f\u7528\u4e0d\u7528\u7684\u5305\u3002

                                                  \u6211\u4eec\u53ef\u4ee5\u901a\u8fc7\u4e0b\u65b9\u7684\u547d\u4ee4\u6765\u4f7f\u7528 conda \u67e5\u770b\u5f53\u524d\u73af\u5883\u4e2d\u7684\u6240\u6709\u5305\u3002

                                                  (python-3.10) jovyan@chuanjia-jupyter-0:~/yolov8$ conda list\n# packages in environment at /opt/baize-runtime-env/python-3.10/conda/envs/python-3.10:\n#\n# Name                    Version                   Build  Channel\n_libgcc_mutex             0.1                        main    defaults\n_openmp_mutex             5.1                       1_gnu    defaults\n... # \u7701\u7565\u90e8\u5206\u8f93\u51fa\nidna                      3.7             py310h06a4308_0    defaults\nipykernel                 6.28.0          py310h06a4308_0    defaults\nipython                   8.20.0          py310h06a4308_0    defaults\nipython_genutils          0.2.0              pyhd3eb1b0_1    defaults\njedi                      0.18.1          py310h06a4308_1    defaults\njinja2                    3.1.4           py310h06a4308_0    defaults\njsonschema                4.19.2          py310h06a4308_0    defaults\njsonschema-specifications 2023.7.1        py310h06a4308_0    defaults\njupyter_client            7.4.9           py310h06a4308_0    defaults\njupyter_core              5.5.0           py310h06a4308_0    defaults\njupyter_events            0.8.0           py310h06a4308_0    defaults\njupyter_server            2.10.0          py310h06a4308_0    defaults\njupyter_server_terminals  0.4.4           py310h06a4308_1    defaults\njupyterlab_pygments       0.2.2           py310h06a4308_0    defaults\n... # \u7701\u7565\u90e8\u5206\u8f93\u51fa\nxz                        5.4.6                h5eee18b_1    defaults\nyaml                      0.2.5                h7b6447c_0    defaults\nzeromq                    4.3.5                h6a678d5_0    defaults\nzlib                      1.2.13               h5eee18b_1    defaults\n
                                                  "},{"location":"admin/baize/developer/notebooks/notebook-with-envs.html#_2","title":"\u66f4\u65b0\u73af\u5883\u7684\u5305","text":"

                                                  \u76ee\u524d\uff0c\u53ef\u4ee5\u901a\u8fc7\u5728 AI Lab \u7684\u754c\u9762\u4e2d \u73af\u5883\u7ba1\u7406 \u6765\u66f4\u65b0\u73af\u5883\u4e2d\u7684\u5305\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html","title":"Notebook SSH \u8bbf\u95ee\u6307\u5357","text":"

                                                  AI Lab \u63d0\u4f9b\u7684 Notebook \u652f\u6301\u5728\u672c\u5730\u901a\u8fc7 SSH \u7684\u65b9\u5f0f\u8bbf\u95ee\uff1b

                                                  \u901a\u8fc7\u7b80\u5355\u7684\u914d\u7f6e\uff0c\u5373\u53ef\u4f7f\u7528 SSH \u8bbf\u95ee Jupyter Notebook \u7684\u529f\u80fd\u3002 \u65e0\u8bba\u60a8\u662f\u4f7f\u7528 Windows\u3001Mac \u8fd8\u662f Linux \u64cd\u4f5c\u7cfb\u7edf\uff0c\u90fd\u53ef\u4ee5\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u8fdb\u884c\u64cd\u4f5c\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#ssh","title":"\u914d\u7f6e SSH \u8bbf\u95ee\u51ed\u8bc1","text":""},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#ssh_1","title":"\u751f\u6210 SSH \u5bc6\u94a5\u5bf9","text":"

                                                  \u9996\u5148\uff0c\u60a8\u9700\u8981\u5728\u60a8\u7684\u8ba1\u7b97\u673a\u4e0a\u751f\u6210 SSH \u516c\u94a5\u548c\u79c1\u94a5\u5bf9\u3002\u8fd9\u4e2a\u5bc6\u94a5\u5bf9\u5c06\u7528\u4e8e\u8ba4\u8bc1\u8fc7\u7a0b\uff0c\u786e\u4fdd\u5b89\u5168\u8bbf\u95ee\u3002

                                                  Mac/LinuxWindows
                                                  1. \u6253\u5f00\u7ec8\u7aef
                                                  2. \u8f93\u5165\u547d\u4ee4\uff1a

                                                    ssh-keygen -t rsa -b 4096\n
                                                  3. \u5f53\u7cfb\u7edf\u63d0\u793a\u60a8\u201cEnter a file in which to save the key\u201d\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u6572\u51fb Enter \u952e\u4f7f\u7528\u9ed8\u8ba4\u8def\u5f84\uff0c\u6216\u8005\u6307\u5b9a\u4e00\u4e2a\u65b0\u7684\u8def\u5f84\u3002

                                                  4. \u63a5\u4e0b\u6765\uff0c\u7cfb\u7edf\u4f1a\u63d0\u793a\u60a8\u8f93\u5165\u5bc6\u7801\uff08\u53ef\u9009\uff09\uff0c\u8fd9\u5c06\u589e\u52a0\u4e00\u4e2a\u989d\u5916\u7684\u5b89\u5168\u5c42\u3002\u5982\u679c\u9009\u62e9\u8f93\u5165\u5bc6\u7801\uff0c\u8bf7\u8bb0\u4f4f\u8fd9\u4e2a\u5bc6\u7801\uff0c\u56e0\u4e3a\u6bcf\u6b21\u4f7f\u7528\u5bc6\u94a5\u65f6\u90fd\u4f1a\u9700\u8981\u5b83\u3002
                                                  1. \u5b89\u88c5 Git Bash\uff08\u5982\u679c\u60a8\u5c1a\u672a\u5b89\u88c5\uff09
                                                  2. \u6253\u5f00 Git Bash
                                                  3. \u8f93\u5165\u547d\u4ee4\uff1a

                                                    ssh-keygen -t rsa -b 4096\n
                                                  4. \u540c Mac/Linux \u6b65\u9aa4

                                                  "},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#ssh_2","title":"\u6dfb\u52a0 SSH \u516c\u94a5\u5230\u4e2a\u4eba\u4e2d\u5fc3","text":"

                                                  Note

                                                  \u5177\u4f53\u64cd\u4f5c\u53ef\u4ee5\u53c2\u8003\uff1a\u914d\u7f6e SSH \u516c\u94a5

                                                  1. \u6253\u5f00\u751f\u6210\u7684\u516c\u94a5\u6587\u4ef6\uff0c\u901a\u5e38\u4f4d\u4e8e ~/.ssh/id_rsa.pub\uff08\u5982\u679c\u60a8\u6ca1\u6709\u66f4\u6539\u9ed8\u8ba4\u8def\u5f84\uff09
                                                  2. \u590d\u5236\u516c\u94a5\u5185\u5bb9
                                                  3. \u767b\u5f55\u5230\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0, \u7136\u540e\u53f3\u4e0a\u89d2\u5e10\u53f7\u70b9\u5f00\uff0c\u9009\u62e9\u4e2a\u4eba\u4e2d\u5fc3
                                                  4. \u5728 SSH \u516c\u94a5\u914d\u7f6e\u9875\uff0c\u6dfb\u52a0\u4f60\u672c\u5730\u751f\u6210\u7684\u516c\u94a5\u6587\u4ef6
                                                  5. \u4fdd\u5b58\u66f4\u6539
                                                  "},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#notebook-ssh_1","title":"\u5728 Notebook \u4e2d\u5f00\u542f SSH \u8bbf\u95ee","text":"
                                                  1. \u767b\u5f55\u5230 Jupyter Notebook \u7684 Web \u754c\u9762\u3002
                                                  2. \u5bfb\u627e\u60a8\u60f3\u8981\u542f\u7528 SSH \u8bbf\u95ee\u7684 Notebook\u3002
                                                  3. \u5728 Notebook \u7684\u8bbe\u7f6e\u6216\u8be6\u60c5\u9875\u9762\uff0c\u627e\u5230 \u5f00\u542f SSH \u8bbf\u95ee \u7684\u9009\u9879\u5e76\u542f\u7528\u5b83\u3002
                                                  4. \u8bb0\u5f55\u6216\u590d\u5236\u663e\u793a\u7684 SSH \u8bbf\u95ee\u547d\u4ee4\u3002\u8fd9\u4e2a\u547d\u4ee4\u5c06\u7528\u4e8e\u540e\u7eed\u6b65\u9aa4\u4e2d\u7684 SSH \u8fde\u63a5\u3002
                                                  "},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#ssh_3","title":"\u4e0d\u540c\u73af\u5883\u4e0b\u7684 SSH \u8bbf\u95ee\u65b9\u5f0f","text":""},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#_1","title":"\u8bbf\u95ee\u793a\u4f8b","text":"

                                                  \u5047\u8bbe\u60a8\u83b7\u5f97\u7684 SSH \u8bbf\u95ee\u547d\u4ee4\u5982\u4e0b\uff1a

                                                      # ssh {USERNAME}@{CLUSTER}.{NAMESPACE}.{NOTEBOOK_NAME}@{UI_LOGIN_IP} -p {UI_LOGIN_IP}\n    ssh baizeuser01@gpu-cluster.demo.demo-notebook@10.20.100.201 -p 80 -i private_key\n

                                                  \u8bf7\u5c06 USERNAME \u66ff\u6362\u4e3a\u60a8\u7684\u7528\u6237\u540d\uff0cUI_LOGIN_IP \u66ff\u6362\u4e3a\u5b9e\u9645\u7684\u4e3b\u673a\u540d\uff0cUI_LOGIN_IP \u66ff\u6362\u4e3a\u5b9e\u9645\u7684\u7aef\u53e3\u53f7\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#windows","title":"Windows","text":"

                                                  \u63a8\u8350\u4f7f\u7528 PuTTY \u6216 Git Bash \u8fdb\u884c SSH \u8fde\u63a5\u3002

                                                  PuTTYGit Bash
                                                  1. \u6253\u5f00 PuTTY
                                                  2. \u5728 Host Name (or IP address) \u680f\u8f93\u5165 mockhost\uff08\u5b9e\u9645\u7684\u4e3b\u673a\u540d\uff09
                                                  3. \u8f93\u5165\u7aef\u53e3\u53f7 2222\uff08\u5b9e\u9645\u7684\u7aef\u53e3\u53f7\uff09
                                                  4. \u70b9\u51fb Open \u5f00\u59cb\u8fde\u63a5
                                                  5. \u7b2c\u4e00\u6b21\u8fde\u63a5\u65f6\uff0c\u53ef\u80fd\u4f1a\u63d0\u793a\u9a8c\u8bc1\u670d\u52a1\u5668\u7684\u8eab\u4efd\uff0c\u70b9\u51fb Yes
                                                  1. \u6253\u5f00 Git Bash
                                                  2. \u8f93\u5165\u8bbf\u95ee\u547d\u4ee4\uff1a

                                                        # ssh {USERNAME}@{CLUSTER}.{NAMESPACE}.{NOTEBOOK_NAME}@{UI_LOGIN_IP} -p {UI_LOGIN_IP}\n    ssh baizeuser01@gpu-cluster.demo.demo-notebook@10.20.100.201 -p 80 -i private_key\n
                                                  3. \u6309 Enter \u952e

                                                  "},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#maclinux","title":"Mac/Linux","text":"
                                                  1. \u6253\u5f00\u7ec8\u7aef\u3002
                                                  2. \u8f93\u5165\u8bbf\u95ee\u547d\u4ee4\uff1a

                                                        # ssh {USERNAME}@{CLUSTER}.{NAMESPACE}.{NOTEBOOK_NAME}@{UI_LOGIN_IP} -p {UI_LOGIN_IP}\n    ssh baizeuser01@gpu-cluster.demo.demo-notebook@10.20.100.201 -p 80 -i private_key\n
                                                  3. \u5982\u679c\u7cfb\u7edf\u63d0\u793a\u60a8\u63a5\u53d7\u4e3b\u673a\u7684\u8eab\u4efd\uff0c\u8bf7\u8f93\u5165yes\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#ide","title":"\u914d\u5408 IDE \u5b9e\u73b0\u8fdc\u7a0b\u5f00\u53d1","text":"

                                                  \u9664\u4e86\u4f7f\u7528\u547d\u4ee4\u884c\u5de5\u5177\u8fdb\u884c SSH \u8fde\u63a5\uff0c\u60a8\u8fd8\u53ef\u4ee5\u5229\u7528\u73b0\u4ee3 IDE \u5982 Visual Studio Code (VSCode) \u548c PyCharm \u7684 SSH \u8fdc\u7a0b\u8fde\u63a5\u529f\u80fd\uff0c \u76f4\u63a5\u5728\u672c\u5730 IDE \u4e2d\u5f00\u53d1\u5e76\u5229\u7528\u8fdc\u7a0b\u670d\u52a1\u5668\u7684\u8d44\u6e90\u3002

                                                  \u5728 VSCode \u4e2d\u4f7f\u7528 SSH \u8fdc\u7a0b\u8fde\u63a5\u5728 PyCharm \u4e2d\u4f7f\u7528 SSH \u8fdc\u7a0b\u8fde\u63a5

                                                  VSCode \u901a\u8fc7 Remote - SSH \u6269\u5c55\u652f\u6301 SSH \u8fdc\u7a0b\u8fde\u63a5\uff0c\u5141\u8bb8\u60a8\u76f4\u63a5\u5728\u672c\u5730 VSCode \u73af\u5883\u4e2d\u7f16\u8f91\u8fdc\u7a0b\u670d\u52a1\u5668\u4e0a\u7684\u6587\u4ef6\uff0c\u5e76\u8fd0\u884c\u547d\u4ee4\u3002

                                                  \u64cd\u4f5c\u6b65\u9aa4\u4e3a\uff1a

                                                  1. \u786e\u4fdd\u60a8\u5df2\u5b89\u88c5 VSCode \u548c Remote - SSH \u6269\u5c55\u3002
                                                  2. \u6253\u5f00 VSCode\uff0c\u70b9\u51fb\u5de6\u4fa7\u6d3b\u52a8\u680f\u5e95\u90e8\u7684\u8fdc\u7a0b\u8d44\u6e90\u7ba1\u7406\u5668\u56fe\u6807\u3002
                                                  3. \u9009\u62e9 Remote-SSH: Connect to Host... \u9009\u9879\uff0c\u7136\u540e\u70b9\u51fb + Add New SSH Host...
                                                  4. \u8f93\u5165 SSH \u8fde\u63a5\u547d\u4ee4\uff0c\u4f8b\u5982\uff1a

                                                        # ssh {USERNAME}@{CLUSTER}.{NAMESPACE}.{NOTEBOOK_NAME}@{UI_LOGIN_IP} -p {UI_LOGIN_IP}\n    ssh baizeuser01@gpu-cluster.demo.demo-notebook@10.20.100.201 -p 80 -i private_key\n
                                                  5. \u6572\u51fb Enter \u952e\u3002\u8bf7\u5c06 username\u3001mockhost \u548c 2222 \u66ff\u6362\u4e3a\u5b9e\u9645\u7684\u7528\u6237\u540d\u3001\u4e3b\u673a\u540d\u548c\u7aef\u53e3\u53f7\u3002

                                                  6. \u9009\u62e9\u4e00\u4e2a\u914d\u7f6e\u6587\u4ef6\u6765\u4fdd\u5b58\u6b64 SSH \u4e3b\u673a\uff0c\u901a\u5e38\u9009\u62e9\u9ed8\u8ba4\u5373\u53ef\u3002

                                                  \u5b8c\u6210\u540e\uff0c\u60a8\u7684 SSH \u4e3b\u673a\u5c06\u6dfb\u52a0\u5230 SSH \u76ee\u6807\u5217\u8868\u4e2d\u3002\u70b9\u51fb\u60a8\u7684\u4e3b\u673a\u8fdb\u884c\u8fde\u63a5\u3002 \u5982\u679c\u662f\u7b2c\u4e00\u6b21\u8fde\u63a5\uff0c\u53ef\u80fd\u4f1a\u63d0\u793a\u60a8\u9a8c\u8bc1\u4e3b\u673a\u7684\u6307\u7eb9\u3002\u63a5\u53d7\u540e\uff0c\u60a8\u5c06\u88ab\u8981\u6c42\u8f93\u5165\u5bc6\u7801\uff08\u5982\u679c SSH \u5bc6\u94a5\u8bbe\u7f6e\u4e86\u5bc6\u7801\uff09\u3002 \u8fde\u63a5\u6210\u529f\u540e\uff0c\u60a8\u53ef\u4ee5\u50cf\u5728\u672c\u5730\u5f00\u53d1\u4e00\u6837\u5728 VSCode \u4e2d\u7f16\u8f91\u8fdc\u7a0b\u6587\u4ef6\uff0c\u5e76\u5229\u7528\u8fdc\u7a0b\u8d44\u6e90\u3002

                                                  PyCharm Professional \u7248\u652f\u6301\u901a\u8fc7 SSH \u8fde\u63a5\u5230\u8fdc\u7a0b\u670d\u52a1\u5668\uff0c\u5e76\u5728\u672c\u5730 PyCharm \u4e2d\u76f4\u63a5\u5f00\u53d1\u3002

                                                  \u64cd\u4f5c\u6b65\u9aa4\u4e3a\uff1a

                                                  1. \u6253\u5f00 PyCharm\uff0c\u5e76\u6253\u5f00\u6216\u521b\u5efa\u4e00\u4e2a\u9879\u76ee
                                                  2. \u9009\u62e9 File -> Settings \uff08\u5728 Mac \u4e0a\u662f PyCharm -> Preferences
                                                  3. \u5728\u8bbe\u7f6e\u7a97\u53e3\u4e2d\uff0c\u5bfc\u822a\u5230 Project: YourProjectName -> Python Interpreter
                                                  4. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684\u9f7f\u8f6e\u56fe\u6807\uff0c\u9009\u62e9 Add...

                                                    • \u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\uff0c\u9009\u62e9 SSH Interpreter
                                                    • \u8f93\u5165\u8fdc\u7a0b\u4e3b\u673a\u7684\u4fe1\u606f\uff1a\u4e3b\u673a\u540d\uff08mockhost\uff09\u3001\u7aef\u53e3\u53f7\uff082222\uff09\u3001\u7528\u6237\u540d\uff08username\uff09\u3002 \u8bf7\u4f7f\u7528\u60a8\u7684\u5b9e\u9645\u4fe1\u606f\u66ff\u6362\u8fd9\u4e9b\u5360\u4f4d\u7b26\u3002
                                                    • \u70b9\u51fb Next \uff0cPyCharm \u5c06\u5c1d\u8bd5\u8fde\u63a5\u5230\u8fdc\u7a0b\u670d\u52a1\u5668\u3002\u5982\u679c\u8fde\u63a5\u6210\u529f\uff0c\u60a8\u5c06\u88ab\u8981\u6c42\u8f93\u5165\u5bc6\u7801\u6216\u9009\u62e9\u79c1\u94a5\u6587\u4ef6\u3002
                                                  5. \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb Finish \u3002\u73b0\u5728\uff0c\u60a8\u7684 PyCharm \u5c06\u4f7f\u7528\u8fdc\u7a0b\u670d\u52a1\u5668\u4e0a\u7684 Python \u89e3\u91ca\u5668\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/notebook-with-ssh.html#_2","title":"\u5b89\u5168\u9650\u5236","text":"

                                                  \u5728\u540c\u4e00\u4e2a Workspace \u5185\uff0c\u4efb\u610f\u7528\u6237\u90fd\u53ef\u4ee5\u901a\u8fc7\u81ea\u5df1\u7684 SSH \u8bbf\u95ee\u51ed\u8bc1\u6765\u767b\u5f55\u5230\u542f\u7528\u4e86 SSH \u7684 Notebook\u3002 \u8fd9\u610f\u5473\u7740\uff0c\u53ea\u8981\u7528\u6237\u914d\u7f6e\u4e86\u81ea\u5df1\u7684 SSH \u516c\u94a5\u5230\u4e2a\u4eba\u4e2d\u5fc3\uff0c\u5e76\u4e14 Notebook \u542f\u7528\u4e86 SSH \u8bbf\u95ee\uff0c\u5c31\u53ef\u4ee5\u4f7f\u7528 SSH \u8fdb\u884c\u5b89\u5168\u8fde\u63a5\u3002

                                                  \u8bf7\u6ce8\u610f\uff0c\u4e0d\u540c\u7528\u6237\u7684\u8bbf\u95ee\u6743\u9650\u53ef\u80fd\u4f1a\u6839\u636e Workspace \u7684\u914d\u7f6e\u800c\u6709\u6240\u4e0d\u540c\u3002\u786e\u4fdd\u60a8\u4e86\u89e3\u5e76\u9075\u5b88\u60a8\u6240\u5728\u7ec4\u7ec7\u7684\u5b89\u5168\u548c\u8bbf\u95ee\u7b56\u7565\u3002

                                                  \u901a\u8fc7\u9075\u5faa\u4e0a\u8ff0\u6b65\u9aa4\uff0c\u60a8\u5e94\u8be5\u80fd\u591f\u6210\u529f\u914d\u7f6e\u5e76\u4f7f\u7528 SSH \u8bbf\u95ee Jupyter Notebook\u3002\u5982\u679c\u9047\u5230\u4efb\u4f55\u95ee\u9898\uff0c\u8bf7\u53c2\u8003\u7cfb\u7edf\u5e2e\u52a9\u6587\u6863\u6216\u8054\u7cfb\u7cfb\u7edf\u7ba1\u7406\u5458\u3002

                                                  "},{"location":"admin/baize/developer/notebooks/start-pause.html","title":"\u542f\u52a8\u548c\u6682\u505c Notebook","text":"

                                                  Notebook \u521b\u5efa\u6210\u529f\u540e\uff0c\u901a\u5e38\u4f1a\u6709\u51e0\u4e2a\u72b6\u6001\uff1a

                                                  • \u7b49\u5f85\u4e2d
                                                  • \u8fd0\u884c\u4e2d
                                                  • \u5df2\u505c\u6b62

                                                  \u5982\u679c\u67d0\u4e2a Notebook \u7684\u72b6\u6001\u4e3a \u5df2\u505c\u6b62 \uff0c\u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u542f\u52a8 \u3002

                                                  \u6b64 Notebook \u5c06\u8fdb\u5165\u8fd0\u884c\u961f\u5217\u4e2d\uff0c\u72b6\u6001\u53d8\u4e3a \u7b49\u5f85\u4e2d \uff0c\u5982\u679c\u4e00\u5207\u6b63\u5e38\uff0c\u7247\u523b\u540e\u5176\u72b6\u6001\u5c06\u53d8\u4e3a \u8fd0\u884c\u4e2d \u3002

                                                  \u5982\u679c\u4f7f\u7528\u7ed3\u675f\uff0c\u53ef\u4ee5\u4ece\u83dc\u5355\u4e2d\u9009\u62e9 \u6682\u505c \uff0c\u5c06\u5176\u72b6\u6001\u53d8\u4e3a \u5df2\u505c\u6b62 \u3002

                                                  "},{"location":"admin/baize/developer/notebooks/view.html","title":"Notebook \u5de5\u4f5c\u8d1f\u8f7d","text":"

                                                  \u5982\u679c\u60f3\u8981\u67e5\u770b\u67d0\u4e2a Notebook \u7684\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u53ef\u4ee5\u6267\u884c\u4ee5\u4e0b\u64cd\u4f5c\uff1a

                                                  1. \u5728 Notebook \u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5 \u3002

                                                  2. \u8df3\u8f6c\u5230\u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\uff08StatefulSet\uff09\u5217\u8868\uff0c\u53ef\u4ee5\u67e5\u770b\uff1a

                                                    • \u5bb9\u5668\u7ec4 Pod \u7684\u8fd0\u884c\u72b6\u6001\u3001IP\u3001\u8d44\u6e90\u8bf7\u6c42\u548c\u4f7f\u7528\u60c5\u51b5
                                                    • \u5bb9\u5668\u914d\u7f6e\u4fe1\u606f
                                                    • \u8bbf\u95ee\u65b9\u5f0f\uff1aClusterIP\u3001NodePort
                                                    • \u8c03\u5ea6\u7b56\u7565\uff1a\u8282\u70b9\u548c\u5de5\u4f5c\u8d1f\u8f7d\u7684\u4eb2\u548c\u6027\u3001\u53cd\u4eb2\u548c\u6027
                                                    • \u6807\u7b7e\u4e0e\u6ce8\u89e3\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u3001Pod \u7684\u6807\u7b7e\u4e0e\u6ce8\u89e3\u952e\u503c\u5bf9
                                                    • \u5f39\u6027\u4f38\u7f29\uff1a\u652f\u6301 HPA\u3001CronHPA\u3001VPA \u7b49\u65b9\u5f0f
                                                    • \u4e8b\u4ef6\u5217\u8868\uff1a\u8b66\u544a\u3001\u901a\u77e5\u7b49\u6d88\u606f

                                                  3. \u5728 StatefulSet \u5217\u8868\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507\uff0c\u53ef\u4ee5\u9488\u5bf9 Pod \u6267\u884c\u66f4\u591a\u64cd\u4f5c\u3002

                                                  "},{"location":"admin/baize/oam/index.html","title":"\u8fd0\u7ef4\u7ba1\u7406","text":"

                                                  \u8fd0\u7ef4\u7ba1\u7406\u662f IT \u8fd0\u7ef4\u4eba\u5458\u65e5\u5e38\u7ba1\u7406 IT \u8d44\u6e90\uff0c\u5904\u7406\u5de5\u4f5c\u7684\u7a7a\u95f4\u3002

                                                  \u5728\u8fd9\u91cc\u53ef\u4ee5\u76f4\u89c2\u5730\u4e86\u89e3\u5f53\u524d\u96c6\u7fa4\u3001\u8282\u70b9\u3001CPU\u3001GPU\u3001vGPU \u7b49\u8d44\u6e90\u7684\u4f7f\u7528\u72b6\u51b5\u3002

                                                  "},{"location":"admin/baize/oam/index.html#_2","title":"\u5e38\u89c1\u672f\u8bed","text":"
                                                  • GPU \u5206\u914d\u7387\uff1a\u7edf\u8ba1\u5f53\u524d\u96c6\u7fa4\u5185\u6240\u6709\u672a\u5b8c\u6210\u7684\u4efb\u52a1\u7684 GPU \u5206\u914d\u60c5\u51b5\uff0c\u7edf\u8ba1\u8bf7\u6c42\u7684 GPU\uff08Request\uff09\u4e0e\u603b\u8d44\u6e90\u91cf\uff08Total\uff09\u4e4b\u95f4\u7684\u6bd4\u4f8b\u3002
                                                  • GPU \u5229\u7528\u7387\uff1a\u7edf\u8ba1\u5f53\u524d\u96c6\u7fa4\u4e2d\u6240\u6709\u8fd0\u884c\u4e2d\u7684\u4efb\u52a1\u7684\u5b9e\u9645\u8d44\u6e90\u5229\u7528\u60c5\u51b5\uff0c\u7edf\u8ba1\u5b9e\u9645\u4f7f\u7528\u7684 GPU\uff08Usage\uff09\u4e0e\u603b\u8d44\u6e90\u91cf\uff08Total\uff09\u4e4b\u95f4\u7684\u6bd4\u4f8b\u3002
                                                  "},{"location":"admin/baize/oam/resource.html","title":"GPU \u5217\u8868","text":"

                                                  \u81ea\u52a8\u5316\u6c47\u603b\u6574\u4e2a\u5e73\u53f0\u4e2d\u7684 GPU \u8d44\u6e90\u4fe1\u606f\uff0c\u63d0\u4f9b\u8be6\u5c3d\u7684 GPU \u8bbe\u5907\u4fe1\u606f\u5c55\u793a\uff0c\u53ef\u67e5\u770b\u5404\u79cd GPU \u5361\u7684\u8d1f\u8f7d\u7edf\u8ba1\u548c\u4efb\u52a1\u8fd0\u884c\u4fe1\u606f\u3002

                                                  \u8fdb\u5165 \u8fd0\u7ef4\u7ba1\u7406 \u540e\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u8d44\u6e90\u7ba1\u7406 -> GPU \u7ba1\u7406 \uff0c\u53ef\u4ee5\u67e5\u770b GPU \u5361\u548c\u4efb\u52a1\u4fe1\u606f\u3002

                                                  "},{"location":"admin/baize/oam/queue/create.html","title":"\u521b\u5efa\u961f\u5217","text":"

                                                  \u5728\u8fd0\u7ef4\u7ba1\u7406\u6a21\u5f0f\u4e2d\uff0c\u961f\u5217\u53ef\u7528\u4e8e\u8c03\u5ea6\u548c\u4f18\u5316\u6279\u5904\u7406\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5b83\u53ef\u4ee5\u6709\u6548\u5730\u7ba1\u7406\u5728\u96c6\u7fa4\u4e0a\u8fd0\u884c\u7684\u591a\u4e2a\u4efb\u52a1\uff0c\u901a\u8fc7\u961f\u5217\u7cfb\u7edf\u6765\u4f18\u5316\u8d44\u6e90\u5229\u7528\u7387\u3002

                                                  1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u961f\u5217\u7ba1\u7406 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae\u3002

                                                  2. \u7cfb\u7edf\u4f1a\u9884\u5148\u586b\u5145\u57fa\u7840\u8bbe\u7f6e\u6570\u636e\uff0c\u5305\u62ec\u8981\u90e8\u7f72\u7684\u96c6\u7fa4\u3001\u5de5\u4f5c\u7a7a\u95f4\u3001\u6392\u961f\u7b56\u7565\u7b49\u3002 \u8c03\u6574\u8fd9\u4e9b\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                  3. \u5c4f\u5e55\u63d0\u793a\u521b\u5efa\uff0c\u8fd4\u56de\u961f\u5217\u7ba1\u7406\u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u7b49\u66f4\u591a\u64cd\u4f5c\u3002

                                                  "},{"location":"admin/baize/oam/queue/delete.html","title":"\u5220\u9664\u961f\u5217","text":"

                                                  \u5728\u8fd0\u7ef4\u7ba1\u7406\u6a21\u5f0f\u4e2d\uff0c\u5982\u679c\u53d1\u73b0\u961f\u5217\u5197\u4f59\u3001\u8fc7\u671f\u6216\u56e0\u5176\u4ed6\u7f18\u6545\u4e0d\u518d\u9700\u8981\uff0c\u53ef\u4ee5\u4ece\u961f\u5217\u5217\u8868\u4e2d\u5220\u9664\u3002

                                                  1. \u5728\u961f\u5217\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u5220\u9664 \u3002

                                                  2. \u5728\u5f39\u7a97\u4e2d\u786e\u8ba4\u8981\u5220\u9664\u7684\u961f\u5217\uff0c\u8f93\u5165\u961f\u5217\u540d\u79f0\u540e\u70b9\u51fb \u5220\u9664 \u3002

                                                  3. \u5c4f\u5e55\u63d0\u793a\u5220\u9664\u6210\u529f\uff0c\u8be5\u961f\u5217\u4ece\u5217\u8868\u4e2d\u6d88\u5931\u3002

                                                  Caution

                                                  \u961f\u5217\u4e00\u65e6\u5220\u9664\u5c06\u4e0d\u53ef\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                                                  "},{"location":"admin/baize/troubleshoot/index.html","title":"\u6545\u969c\u6392\u67e5","text":"

                                                  \u672c\u6587\u5c06\u6301\u7eed\u7edf\u8ba1\u548c\u68b3\u7406 AI Lab \u4f7f\u7528\u8fc7\u7a0b\u53ef\u80fd\u56e0\u73af\u5883\u6216\u64cd\u4f5c\u4e0d\u89c4\u8303\u5f15\u8d77\u7684\u62a5\u9519\uff0c\u4ee5\u53ca\u5728\u4f7f\u7528\u8fc7\u7a0b\u4e2d\u9047\u5230\u67d0\u4e9b\u62a5\u9519\u7684\u95ee\u9898\u5206\u6790\u3001\u89e3\u51b3\u65b9\u6848\u3002

                                                  Warning

                                                  \u672c\u6587\u6863\u4ec5\u9002\u7528\u4e8e AI \u7b97\u529b\u4e2d\u5fc3\u7248\u672c\uff0c\u82e5\u9047\u5230 AI Lab \u7684\u4f7f\u7528\u95ee\u9898\uff0c\u8bf7\u4f18\u5148\u67e5\u770b\u6b64\u6392\u969c\u624b\u518c\u3002

                                                  AI Lab \u5728 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u6a21\u5757\u540d\u79f0 baize\uff0c\u63d0\u4f9b\u4e86\u4e00\u7ad9\u5f0f\u7684\u6a21\u578b\u8bad\u7ec3\u3001\u63a8\u7406\u3001\u6a21\u578b\u7ba1\u7406\u7b49\u529f\u80fd\u3002

                                                  "},{"location":"admin/baize/troubleshoot/index.html#_2","title":"\u5e38\u89c1\u6545\u969c\u6848\u4f8b","text":"
                                                  • \u96c6\u7fa4\u4e0b\u62c9\u5217\u8868\u4e2d\u627e\u4e0d\u5230\u96c6\u7fa4
                                                  • Notebook \u4e0d\u53d7\u961f\u5217\u914d\u989d\u63a7\u5236
                                                  • \u961f\u5217\u521d\u59cb\u5316\u5931\u8d25
                                                  "},{"location":"admin/baize/troubleshoot/cluster-not-found.html","title":"\u96c6\u7fa4\u4e0b\u62c9\u5217\u8868\u4e2d\u627e\u4e0d\u5230\u96c6\u7fa4","text":""},{"location":"admin/baize/troubleshoot/cluster-not-found.html#_2","title":"\u95ee\u9898\u73b0\u8c61","text":"

                                                  \u5728 AI Lab \u5f00\u53d1\u63a7\u5236\u53f0\u3001\u8fd0\u7ef4\u63a7\u5236\u53f0\uff0c\u529f\u80fd\u6a21\u5757\u7684\u96c6\u7fa4\u641c\u7d22\u6761\u4ef6\u7684\u4e0b\u62c9\u5217\u8868\u627e\u4e0d\u5230\u60f3\u8981\u7684\u96c6\u7fa4\u3002

                                                  "},{"location":"admin/baize/troubleshoot/cluster-not-found.html#_3","title":"\u95ee\u9898\u5206\u6790","text":"

                                                  \u5728 AI Lab \u4e2d\uff0c\u96c6\u7fa4\u4e0b\u62c9\u5217\u8868\u5982\u679c\u7f3a\u5c11\u4e86\u60f3\u8981\u7684\u96c6\u7fa4\uff0c\u53ef\u80fd\u662f\u7531\u4e8e\u4ee5\u4e0b\u539f\u56e0\u5bfc\u81f4\u7684\uff1a

                                                  • baize-agent \u672a\u5b89\u88c5\u6216\u5b89\u88c5\u4e0d\u6210\u529f\uff0c\u5bfc\u81f4 AI Lab \u65e0\u6cd5\u83b7\u53d6\u96c6\u7fa4\u4fe1\u606f
                                                  • \u5b89\u88c5 baize-agent \u672a\u914d\u7f6e\u96c6\u7fa4\u540d\u79f0\uff0c\u5bfc\u81f4 AI Lab \u65e0\u6cd5\u83b7\u53d6\u96c6\u7fa4\u4fe1\u606f
                                                  • \u5de5\u4f5c\u96c6\u7fa4\u5185\u53ef\u89c2\u6d4b\u7ec4\u4ef6\u5f02\u5e38\uff0c\u5bfc\u81f4\u65e0\u6cd5\u91c7\u96c6\u96c6\u7fa4\u5185\u7684\u6307\u6807\u4fe1\u606f
                                                  "},{"location":"admin/baize/troubleshoot/cluster-not-found.html#_4","title":"\u89e3\u51b3\u529e\u6cd5","text":""},{"location":"admin/baize/troubleshoot/cluster-not-found.html#baize-agent","title":"baize-agent \u672a\u5b89\u88c5\u6216\u5b89\u88c5\u4e0d\u6210\u529f","text":"

                                                  AI Lab \u6709\u4e00\u4e9b\u57fa\u7840\u7ec4\u4ef6\u9700\u8981\u5728\u6bcf\u4e2a\u5de5\u4f5c\u96c6\u7fa4\u5185\u8fdb\u884c\u5b89\u88c5\uff0c\u5982\u679c\u5de5\u4f5c\u96c6\u7fa4\u5185\u672a\u5b89\u88c5 baize-agent \u65f6\uff0c\u53ef\u4ee5\u5728\u754c\u9762\u4e0a\u9009\u62e9\u5b89\u88c5\uff0c\u53ef\u80fd\u4f1a\u5bfc\u81f4\u4e00\u4e9b\u975e\u9884\u671f\u7684\u62a5\u9519\u7b49\u95ee\u9898\u3002

                                                  \u6240\u4ee5\uff0c\u4e3a\u4e86\u4fdd\u969c\u4f7f\u7528\u4f53\u9a8c\uff0c\u53ef\u9009\u62e9\u7684\u96c6\u7fa4\u8303\u56f4\u4ec5\u5305\u542b\u4e86\u5df2\u7ecf\u6210\u529f\u5b89\u88c5\u4e86 baize-agent \u7684\u96c6\u7fa4\u3002

                                                  \u5982\u679c\u662f\u56e0\u4e3a baize-agent \u672a\u5b89\u88c5\u6216\u5b89\u88c5\u5931\u8d25\uff0c\u5219\u4f7f\u7528 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u7ba1\u7406 -> Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u627e\u5230 baize-agent \u5e76\u5b89\u88c5\u3002

                                                  Note

                                                  \u6b64\u5730\u5740\u5feb\u901f\u8df3\u8f6c https://<ai_host>/kpanda/clusters/<cluster_name>/helm/charts/addon/baize-agent\u3002 \u6ce8\u610f\u5c06 <ai_host> \u66ff\u6362\u4e3a\u5b9e\u9645\u7684 AI \u7b97\u529b\u4e2d\u5fc3\u63a7\u5236\u53f0\u5730\u5740\uff0c<cluster_name> \u66ff\u6362\u4e3a\u5b9e\u9645\u7684\u96c6\u7fa4\u540d\u79f0\u3002

                                                  "},{"location":"admin/baize/troubleshoot/cluster-not-found.html#baize-agent_1","title":"\u5b89\u88c5 baize-agent \u65f6\u672a\u914d\u7f6e\u96c6\u7fa4\u540d\u79f0","text":"

                                                  \u5728\u5b89\u88c5 baize-agent \u65f6\uff0c\u9700\u8981\u6ce8\u610f\u914d\u7f6e\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fd9\u4e2a\u540d\u79f0\u4f1a\u7528\u4e8e\u53ef\u89c2\u6d4b\u6307\u6807\u91c7\u96c6\uff0c \u9ed8\u8ba4\u4e3a\u7a7a\uff0c\u9700\u624b\u5de5\u914d\u7f6e \u3002

                                                  "},{"location":"admin/baize/troubleshoot/cluster-not-found.html#_5","title":"\u5de5\u4f5c\u96c6\u7fa4\u5185\u53ef\u89c2\u6d4b\u7ec4\u4ef6\u5f02\u5e38","text":"

                                                  \u5982\u679c\u96c6\u7fa4\u5185\u53ef\u89c2\u6d4b\u7ec4\u4ef6\u5f02\u5e38\uff0c\u53ef\u80fd\u4f1a\u5bfc\u81f4 AI Lab \u65e0\u6cd5\u83b7\u53d6\u96c6\u7fa4\u4fe1\u606f\uff0c\u8bf7\u68c0\u67e5\u5e73\u53f0\u7684\u53ef\u89c2\u6d4b\u670d\u52a1\u662f\u5426\u6b63\u5e38\u8fd0\u884c\u53ca\u914d\u7f6e\u3002

                                                  • \u68c0\u67e5\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5185 insight-server \u7ec4\u4ef6\u662f\u5426\u6b63\u5e38\u8fd0\u884c
                                                  • \u68c0\u67e5\u5de5\u4f5c\u96c6\u7fa4\u5185 insight-agent \u7ec4\u4ef6\u662f\u5426\u6b63\u5e38\u8fd0\u884c
                                                  "},{"location":"admin/baize/troubleshoot/local-queue-initialization-failed.html","title":"\u672c\u5730\u961f\u5217\u521d\u59cb\u5316\u5931\u8d25","text":""},{"location":"admin/baize/troubleshoot/local-queue-initialization-failed.html#_2","title":"\u95ee\u9898\u73b0\u8c61","text":"

                                                  \u5728\u521b\u5efa Notebook\u3001\u8bad\u7ec3\u4efb\u52a1\u6216\u8005\u63a8\u7406\u670d\u52a1\u65f6\uff0c\u5f53\u961f\u5217\u662f\u9996\u6b21\u5728\u8be5\u547d\u540d\u7a7a\u95f4\u4f7f\u7528\u65f6\uff0c\u4f1a\u63d0\u793a\u9700\u8981\u4e00\u952e\u521d\u59cb\u5316\u961f\u5217\uff0c\u4f46\u662f\u521d\u59cb\u5316\u5931\u8d25\u3002

                                                  "},{"location":"admin/baize/troubleshoot/local-queue-initialization-failed.html#_3","title":"\u95ee\u9898\u5206\u6790","text":"

                                                  \u5728 AI Lab \u4e2d\uff0c\u961f\u5217\u7ba1\u7406\u80fd\u529b\u7531 Kueue \u63d0\u4f9b\uff0c \u800c Kueue \u63d0\u4f9b\u4e86 \u4e24\u79cd\u961f\u5217\u7ba1\u7406\u8d44\u6e90\uff1a

                                                  • ClusterQueue \u662f\u96c6\u7fa4\u7ea7\u522b\u7684\u961f\u5217\uff0c\u4e3b\u8981\u7528\u4e8e\u7ba1\u7406\u961f\u5217\u4e2d\u7684\u8d44\u6e90\u914d\u989d\uff0c\u5305\u542b\u4e86 CPU\u3001\u5185\u5b58\u3001GPU \u7b49\u8d44\u6e90
                                                  • LocalQueue \u662f\u547d\u540d\u7a7a\u95f4\u7ea7\u522b\u7684\u961f\u5217\uff0c\u9700\u8981\u6307\u5411\u5230\u4e00\u4e2a ClusterQueue\uff0c\u7528\u4e8e\u4f7f\u7528\u961f\u5217\u4e2d\u7684\u8d44\u6e90\u5206\u914d

                                                  \u5728 AI Lab \u4e2d\uff0c\u5982\u679c\u521b\u5efa\u670d\u52a1\u65f6\uff0c\u53d1\u73b0\u6307\u5b9a\u7684\u547d\u540d\u7a7a\u95f4\u4e0d\u5b58\u5728 LocalQueue\uff0c\u5219\u4f1a\u63d0\u793a\u9700\u8981\u521d\u59cb\u5316\u961f\u5217\u3002

                                                  \u5728\u6781\u5c11\u6570\u60c5\u51b5\u4e0b\uff0c\u53ef\u80fd\u7531\u4e8e\u7279\u6b8a\u539f\u56e0\u4f1a\u5bfc\u81f4 LocalQueue \u521d\u59cb\u5316\u5931\u8d25\u3002

                                                  "},{"location":"admin/baize/troubleshoot/local-queue-initialization-failed.html#_4","title":"\u89e3\u51b3\u529e\u6cd5","text":"

                                                  \u68c0\u67e5 Kueue \u662f\u5426\u6b63\u5e38\u8fd0\u884c\uff0c\u5982\u679c kueue-controller-manager \u672a\u8fd0\u884c\uff0c\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b\u3002

                                                  kubectl get deploy kueue-controller-manager -n baize-sysatem\n

                                                  \u5982\u679c kueue-controller-manager \u672a\u6b63\u5e38\u8fd0\u884c\uff0c\u8bf7\u5148\u4fee\u590d Kueue\u3002

                                                  "},{"location":"admin/baize/troubleshoot/local-queue-initialization-failed.html#_5","title":"\u53c2\u8003\u8d44\u6599","text":"
                                                  • ClusterQueue
                                                  • LocalQueue
                                                  "},{"location":"admin/baize/troubleshoot/notebook-not-controlled-by-quotas.html","title":"Notebook \u4e0d\u53d7\u961f\u5217\u914d\u989d\u63a7\u5236","text":"

                                                  \u5728 AI Lab \u4e2d\uff0c\u7528\u6237\u5728\u521b\u5efa Notebook \u65f6\uff0c\u53d1\u73b0\u9009\u62e9\u7684\u961f\u5217\u5373\u4f7f\u8d44\u6e90\u4e0d\u8db3\uff0cNotebook \u4f9d\u7136\u53ef\u4ee5\u521b\u5efa\u6210\u529f\u3002

                                                  "},{"location":"admin/baize/troubleshoot/notebook-not-controlled-by-quotas.html#01-kubernetes","title":"\u95ee\u9898 01: Kubernetes \u7248\u672c\u4e0d\u652f\u6301","text":"
                                                  • \u5206\u6790\uff1a

                                                    AI Lab \u4e2d\u7684\u961f\u5217\u7ba1\u7406\u80fd\u529b\u7531 Kueue \u63d0\u4f9b\uff0c Notebook \u670d\u52a1\u662f\u901a\u8fc7 JupyterHub \u63d0\u4f9b\u7684\u3002 JupyterHub \u5bf9 Kubernetes \u7684\u7248\u672c\u8981\u6c42\u8f83\u9ad8\uff0c\u5bf9\u4e8e\u4f4e\u4e8e v1.27 \u7684\u7248\u672c\uff0c\u5373\u4f7f\u5728 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u8bbe\u7f6e\u4e86\u961f\u5217\u914d\u989d\uff0c \u7528\u6237\u5728\u521b\u5efa Notebook \u65f6\u4e5f\u9009\u62e9\u4e86\u914d\u989d\uff0c\u4f46 Notebook \u5b9e\u9645\u4e5f\u4e0d\u4f1a\u53d7\u5230\u961f\u5217\u914d\u989d\u7684\u9650\u5236\u3002

                                                  • \u89e3\u51b3\u529e\u6cd5\uff1a\u63d0\u524d\u89c4\u5212\uff0c\u751f\u4ea7\u73af\u5883\u4e2d\u5efa\u8bae\u4f7f\u7528 Kubernetes \u7248\u672c v1.27 \u4ee5\u4e0a\u3002

                                                  • \u53c2\u8003\u8d44\u6599\uff1aJupyter Notebook Documentation

                                                  "},{"location":"admin/baize/troubleshoot/notebook-not-controlled-by-quotas.html#02","title":"\u95ee\u9898 02: \u914d\u7f6e\u672a\u542f\u7528","text":"
                                                  • \u5206\u6790\uff1a

                                                    \u5f53 Kubernetes \u96c6\u7fa4\u7248\u672c \u5927\u4e8e v1.27 \u65f6\uff0cNotebook \u4ecd\u65e0\u6cd5\u53d7\u5230\u961f\u5217\u914d\u989d\u7684\u9650\u5236\u3002

                                                    \u8fd9\u662f\u56e0\u4e3a\uff0cKueue \u9700\u8981\u542f\u7528\u5bf9 enablePlainPod \u652f\u6301\uff0c\u624d\u4f1a\u5bf9 Notebook \u670d\u52a1\u751f\u6548\u3002

                                                  • \u89e3\u51b3\u529e\u6cd5\uff1a\u5728\u5de5\u4f5c\u96c6\u7fa4\u4e2d\u90e8\u7f72 baize-agent \u65f6\uff0c\u542f\u7528 Kueue \u5bf9 enablePlainPod \u7684\u652f\u6301\u3002

                                                  • \u53c2\u8003\u8d44\u6599\uff1aRun Plain Pods as a Kueue-Managed Job

                                                  "},{"location":"admin/ghippo/password.html","title":"\u5bc6\u7801\u91cd\u7f6e","text":"

                                                  \u5982\u679c\u60a8\u5fd8\u8bb0\u5bc6\u7801\uff0c\u53ef\u4ee5\u6309\u672c\u9875\u9762\u8bf4\u660e\u91cd\u7f6e\u5bc6\u7801\u3002

                                                  "},{"location":"admin/ghippo/password.html#_2","title":"\u91cd\u7f6e\u5bc6\u7801\u6b65\u9aa4","text":"

                                                  \u7ba1\u7406\u5458\u6700\u521d\u521b\u5efa\u4e00\u4e2a\u7528\u6237\u65f6\uff0c\u4f1a\u4e3a\u5176\u8bbe\u7f6e\u7528\u6237\u540d\u548c\u5bc6\u7801\u3002 \u8be5\u7528\u6237\u767b\u5f55\u540e\uff0c\u5728 \u4e2a\u4eba\u4e2d\u5fc3 \u586b\u5199\u90ae\u7bb1\u5e76\u4fee\u6539\u5bc6\u7801\u3002 \u82e5\u8be5\u7528\u6237\u672a\u8bbe\u7f6e\u90ae\u7bb1\uff0c\u5219\u53ea\u80fd\u8054\u7cfb\u7ba1\u7406\u5458\u8fdb\u884c\u5bc6\u7801\u91cd\u7f6e\u3002

                                                  1. \u5982\u679c\u7528\u6237\u5fd8\u8bb0\u4e86\u5bc6\u7801\uff0c\u53ef\u4ee5\u5728\u767b\u5f55\u754c\u9762\u70b9\u51fb \u5fd8\u8bb0\u5bc6\u7801 \u3002

                                                  2. \u8f93\u5165\u767b\u5f55\u90ae\u7bb1\uff0c\u70b9\u51fb \u63d0\u4ea4 \u3002

                                                  3. \u5728\u90ae\u7bb1\u4e2d\u627e\u5230\u5bc6\u7801\u91cd\u7f6e\u90ae\u4ef6\uff0c\u70b9\u51fb\u4e0b\u65b9\u94fe\u63a5\u8fdb\u884c\u5bc6\u7801\u91cd\u7f6e\uff0c\u94fe\u63a5\u65f6\u6548 5 \u5206\u949f\u3002

                                                  4. \u5728\u624b\u673a\u7b49\u7ec8\u7aef\u8bbe\u5907\u5b89\u88c5\u652f\u6301 2FA \u52a8\u6001\u53e3\u4ee4\u751f\u6210\u7684\u5e94\u7528\uff08\u5982 Google Authenticator\uff09\uff0c\u6309\u7167\u9875\u9762\u63d0\u793a\u914d\u7f6e\u52a8\u6001\u53e3\u4ee4\u4ee5\u6fc0\u6d3b\u8d26\u6237\uff0c\u70b9\u51fb \u63d0\u4ea4 \u3002

                                                  5. \u8bbe\u7f6e\u65b0\u5bc6\u7801\uff0c\u70b9\u51fb \u63d0\u4ea4 \u3002\u8bbe\u7f6e\u65b0\u5bc6\u7801\u7684\u8981\u6c42\u4e0e\u521b\u5efa\u7528\u6237\u65f6\u7684\u5bc6\u7801\u89c4\u5219\u4e00\u81f4\u3002

                                                  6. \u4fee\u6539\u5bc6\u7801\u6210\u529f\uff0c\u76f4\u63a5\u8df3\u8f6c\u9996\u9875\u3002

                                                  "},{"location":"admin/ghippo/password.html#_3","title":"\u91cd\u7f6e\u5bc6\u7801\u6d41\u7a0b","text":"

                                                  \u6574\u4e2a\u5bc6\u7801\u91cd\u7f6e\u7684\u6d41\u7a0b\u793a\u610f\u56fe\u5982\u4e0b\u3002

                                                  graph TB\n\npass[\u5fd8\u8bb0\u5bc6\u7801] --> usern[\u8f93\u5165\u7528\u6237\u540d]\n--> button[\u70b9\u51fb\u53d1\u9001\u9a8c\u8bc1\u90ae\u4ef6\u7684\u6309\u94ae] --> judge1[\u5224\u65ad\u7528\u6237\u540d\u662f\u5426\u6b63\u786e]\n\n    judge1 -.\u6b63\u786e.-> judge2[\u5224\u65ad\u662f\u5426\u7ed1\u5b9a\u90ae\u7bb1]\n    judge1 -.\u9519\u8bef.-> tip1[\u63d0\u793a\u7528\u6237\u540d\u4e0d\u6b63\u786e]\n\n        judge2 -.\u5df2\u7ed1\u5b9a\u90ae\u7bb1.-> send[\u53d1\u9001\u91cd\u7f6e\u90ae\u4ef6]\n        judge2 -.\u672a\u7ed1\u5b9a\u90ae\u7bb1.-> tip2[\u63d0\u793a\u672a\u7ed1\u5b9a\u90ae\u7bb1<br>\u8054\u7cfb\u7ba1\u7406\u5458\u91cd\u7f6e\u5bc6\u7801]\n\nsend --> click[\u70b9\u51fb\u90ae\u4ef6\u4e2d\u7684\u94fe\u63a5] --> config[\u914d\u7f6e\u52a8\u6001\u53e3\u4ee4] --> reset[\u91cd\u7f6e\u5bc6\u7801]\n--> success[\u6210\u529f\u91cd\u7f6e\u5bc6\u7801]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill:#326ce5,stroke:#fff,stroke-width:1px,color:#fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass pass,usern,button,tip1,send,tip2,send,click,config,reset,success plain;\nclass judge1,judge2 k8s
                                                  "},{"location":"admin/ghippo/access-control/custom-role.html","title":"\u81ea\u5b9a\u4e49\u89d2\u8272","text":"

                                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u521b\u5efa\u4e09\u79cd\u8303\u56f4\u7684\u81ea\u5b9a\u4e49\u89d2\u8272\uff1a

                                                  • \u5e73\u53f0\u89d2\u8272 \u7684\u6743\u9650\u5bf9\u5e73\u53f0\u6240\u6709\u76f8\u5173\u8d44\u6e90\u751f\u6548
                                                  • \u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272 \u7684\u6743\u9650\u5bf9\u8be5\u7528\u6237\u6240\u5728\u7684\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u7684\u8d44\u6e90\u751f\u6548
                                                  • \u6587\u4ef6\u5939\u89d2\u8272 \u7684\u6743\u9650\u5bf9\u8be5\u7528\u6237\u6240\u5728\u7684\u6587\u4ef6\u5939\u53ca\u5176\u4e0b\u7684\u5b50\u6587\u4ef6\u5939\u548c\u5de5\u4f5c\u7a7a\u95f4\u8d44\u6e90\u751f\u6548
                                                  "},{"location":"admin/ghippo/access-control/custom-role.html#_2","title":"\u521b\u5efa\u5e73\u53f0\u89d2\u8272","text":"

                                                  \u5e73\u53f0\u89d2\u8272\u662f\u7c97\u7c92\u5ea6\u89d2\u8272\uff0c\u80fd\u591f\u5bf9\u6240\u9009\u6743\u9650\u5185\u7684\u6240\u6709\u8d44\u6e90\u751f\u6548\u3002\u5982\u6388\u6743\u540e\u7528\u6237\u53ef\u4ee5\u62e5\u6709\u6240\u6709\u5de5\u4f5c\u7a7a\u95f4\u7684\u67e5\u770b\u6743\u9650\u3001\u6240\u6709\u96c6\u7fa4\u7684\u7f16\u8f91\u6743\u9650\u7b49\uff0c\u800c\u4e0d\u80fd\u9488\u5bf9\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u6216\u67d0\u4e2a\u96c6\u7fa4\u751f\u6548\u3002\u5e73\u53f0\u89d2\u8272\u521b\u5efa\u5b8c\u6210\u540e\u53ef\u4ee5\u5728\u7528\u6237/\u7528\u6237\u7ec4\u5217\u8868\u4e2d\u8fdb\u884c\u6388\u6743\u3002

                                                  1. \u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u89d2\u8272 \uff0c\u70b9\u51fb \u521b\u5efa\u81ea\u5b9a\u4e49\u89d2\u8272 \u3002

                                                  2. \u8f93\u5165\u540d\u79f0\u3001\u63cf\u8ff0\uff0c\u9009\u62e9 \u5e73\u53f0\u89d2\u8272 \uff0c\u52fe\u9009\u89d2\u8272\u6743\u9650\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                  3. \u8fd4\u56de\u89d2\u8272\u5217\u8868\uff0c\u641c\u7d22\u521a\u521b\u5efa\u7684\u81ea\u5b9a\u4e49\u89d2\u8272\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u6267\u884c\u590d\u5236\u3001\u7f16\u8f91\u548c\u5220\u9664\u7b49\u64cd\u4f5c\u3002

                                                  4. \u5e73\u53f0\u89d2\u8272\u521b\u5efa\u6210\u529f\u540e\uff0c\u53ef\u4ee5\u53bb\u7528\u6237/\u7528\u6237\u7ec4\u6388\u6743\uff0c\u4e3a\u8fd9\u4e2a\u89d2\u8272\u6dfb\u52a0\u7528\u6237\u548c\u7528\u6237\u7ec4\u3002

                                                  "},{"location":"admin/ghippo/access-control/custom-role.html#_3","title":"\u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272","text":"

                                                  \u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u662f\u7ec6\u7c92\u5ea6\u89d2\u8272\uff0c\u9488\u5bf9\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u751f\u6548\u3002\u5982\u5728\u8be5\u89d2\u8272\u4e2d\u9009\u62e9\u5e94\u7528\u5de5\u4f5c\u53f0\u7684\u5168\u90e8\u6743\u9650\uff0c\u7ed9\u7528\u6237\u5728\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u6388\u4e88\u8be5\u89d2\u8272\u540e\uff0c\u8be5\u7528\u6237\u5c06\u4ec5\u80fd\u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u4f7f\u7528\u5e94\u7528\u5de5\u4f5c\u53f0\u76f8\u5173\u7684\u529f\u80fd\uff0c\u800c\u65e0\u6cd5\u4f7f\u7528\u5982\u5fae\u670d\u52a1\u5f15\u64ce\u3001\u4e2d\u95f4\u4ef6\u7b49\u5176\u4ed6\u6a21\u5757\u7684\u80fd\u529b\u3002\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u53ef\u4ee5\u5728\u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7\u4e2d\u9009\u62e9\u5de5\u4f5c\u7a7a\u95f4\u540e\u8fdb\u884c\u6388\u6743\u3002

                                                  1. \u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u89d2\u8272 \uff0c\u70b9\u51fb \u521b\u5efa\u81ea\u5b9a\u4e49\u89d2\u8272 \u3002

                                                  2. \u8f93\u5165\u540d\u79f0\u3001\u63cf\u8ff0\uff0c\u9009\u62e9 \u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272 \uff0c\u52fe\u9009\u89d2\u8272\u6743\u9650\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                  3. \u8fd4\u56de\u89d2\u8272\u5217\u8868\uff0c\u641c\u7d22\u521a\u521b\u5efa\u7684\u81ea\u5b9a\u4e49\u89d2\u8272\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u6267\u884c\u590d\u5236\u3001\u7f16\u8f91\u548c\u5220\u9664\u7b49\u64cd\u4f5c\u3002

                                                  4. \u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u521b\u5efa\u6210\u529f\u540e\uff0c\u53ef\u4ee5\u53bb\u5de5\u4f5c\u7a7a\u95f4\u6388\u6743\uff0c\u8bbe\u5b9a\u8fd9\u4e2a\u89d2\u8272\u53ef\u4ee5\u7ba1\u7406\u54ea\u4e9b\u5de5\u4f5c\u7a7a\u95f4\u3002

                                                  "},{"location":"admin/ghippo/access-control/custom-role.html#_4","title":"\u521b\u5efa\u6587\u4ef6\u5939\u89d2\u8272","text":"

                                                  \u6587\u4ef6\u5939\u89d2\u8272\u9488\u5bf9\u67d0\u4e2a\u6587\u4ef6\u5939\u548c\u8be5\u6587\u4ef6\u5939\u4e0b\u7684\u6240\u6709\u5b50\u6587\u4ef6\u5939\u53ca\u5de5\u4f5c\u7a7a\u95f4\u751f\u6548\u3002\u5982\u5728\u8be5\u89d2\u8272\u4e2d\u9009\u62e9\u5168\u5c40\u7ba1\u7406-\u5de5\u4f5c\u7a7a\u95f4\u548c\u5e94\u7528\u5de5\u4f5c\u53f0\uff0c\u7ed9\u7528\u6237\u5728\u67d0\u4e2a\u6587\u4ef6\u5939\u4e0b\u6388\u4e88\u8be5\u89d2\u8272\u540e\uff0c\u8be5\u7528\u6237\u5c06\u80fd\u591f\u5728\u5176\u4e0b\u7684\u6240\u6709\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u4f7f\u7528\u5e94\u7528\u5de5\u4f5c\u53f0\u7684\u76f8\u5173\u529f\u80fd\uff0c\u800c\u65e0\u6cd5\u4f7f\u7528\u5982\u5fae\u670d\u52a1\u5f15\u64ce\u3001\u4e2d\u95f4\u4ef6\u7b49\u5176\u4ed6\u6a21\u5757\u7684\u80fd\u529b\u3002\u6587\u4ef6\u5939\u89d2\u8272\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u53ef\u4ee5\u5728\u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7\u4e2d\u9009\u62e9\u6587\u4ef6\u5939\u540e\u8fdb\u884c\u6388\u6743\u3002 \u8bf7\u6ce8\u610f\uff1a\u5e94\u7528\u5de5\u4f5c\u53f0\u3001\u591a\u4e91\u7f16\u6392\u3001\u955c\u50cf\u4ed3\u5e93\u3001\u5fae\u670d\u52a1\u5f15\u64ce\u3001\u670d\u52a1\u7f51\u683c\u548c\u4e2d\u95f4\u4ef6\u5747\u4f9d\u8d56\u4e8e\u5de5\u4f5c\u7a7a\u95f4\uff0c\u56e0\u6b64\u5728\u521b\u5efa\u6587\u4ef6\u5939\u89d2\u8272\u65f6\u5927\u90e8\u5206\u573a\u666f\u4e0b\u9700\u8981\u7528\u5230\u5de5\u4f5c\u7a7a\u95f4\uff0c\u8bf7\u6ce8\u610f\u5728\u5168\u5c40\u7ba1\u7406-\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u52fe\u9009\u3002

                                                  1. \u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u89d2\u8272 \uff0c\u70b9\u51fb \u521b\u5efa\u81ea\u5b9a\u4e49\u89d2\u8272 \u3002

                                                  2. \u8f93\u5165\u540d\u79f0\u3001\u63cf\u8ff0\uff0c\u9009\u62e9 \u6587\u4ef6\u5939\u89d2\u8272 \uff0c\u52fe\u9009\u89d2\u8272\u6743\u9650\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                  3. \u8fd4\u56de\u89d2\u8272\u5217\u8868\uff0c\u641c\u7d22\u521a\u521b\u5efa\u7684\u81ea\u5b9a\u4e49\u89d2\u8272\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u6267\u884c\u590d\u5236\u3001\u7f16\u8f91\u548c\u5220\u9664\u7b49\u64cd\u4f5c\u3002

                                                  4. \u6587\u4ef6\u5939\u89d2\u8272\u521b\u5efa\u6210\u529f\u540e\uff0c\u53ef\u4ee5\u53bb\u6587\u4ef6\u5939\u6388\u6743\uff0c\u8bbe\u5b9a\u8fd9\u4e2a\u89d2\u8272\u53ef\u4ee5\u7ba1\u7406\u54ea\u4e9b\u6587\u4ef6\u5939\u3002

                                                  "},{"location":"admin/ghippo/access-control/docking.html","title":"\u63a5\u5165\u7ba1\u7406","text":"

                                                  \u5f53\u4e24\u4e2a\u6216\u4e24\u4e2a\u4ee5\u4e0a\u5e73\u53f0\u76f8\u4e92\u5bf9\u63a5\u6216\u5d4c\u5165\u65f6\uff0c\u901a\u5e38\u9700\u8981\u8fdb\u884c\u7528\u6237\u4f53\u7cfb\u6253\u901a\u3002 \u5728\u7528\u6237\u6253\u901a\u8fc7\u7a0b\u4e2d\uff0c \u63a5\u5165\u7ba1\u7406 \u4e3b\u8981\u63d0\u4f9b SSO \u63a5\u5165\u80fd\u529b\uff0c\u5f53\u60a8\u9700\u8981\u5c06\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f5c\u4e3a\u7528\u6237\u6e90\u63a5\u5165\u5ba2\u6237\u7cfb\u7edf\u65f6\uff0c \u60a8\u53ef\u4ee5\u901a\u8fc7 \u63a5\u5165\u7ba1\u7406 \u521b\u5efa SSO \u63a5\u5165\u6765\u5b9e\u73b0\u3002

                                                  "},{"location":"admin/ghippo/access-control/docking.html#sso","title":"\u521b\u5efa SSO \u63a5\u5165","text":"

                                                  \u524d\u63d0\uff1a\u62e5\u6709\u5e73\u53f0\u7ba1\u7406\u5458 Admin \u6743\u9650\u6216\u8005\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u7ba1\u7406\u5458 IAM Owner \u6743\u9650\u3002

                                                  1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u63a5\u5165\u7ba1\u7406 \uff0c\u8fdb\u5165\u63a5\u5165\u7ba1\u7406\u5217\u8868\uff0c\u70b9\u51fb\u53f3\u4e0a\u65b9\u7684 \u521b\u5efa SSO \u63a5\u5165 \u3002

                                                  2. \u5728 \u521b\u5efa SSO \u63a5\u5165 \u9875\u9762\u586b\u5199\u5ba2\u6237\u7aef ID\u3002

                                                    • \u5ba2\u6237\u7aef ID\uff1a\u5bf9\u5e94 client \u540d\u79f0
                                                    • \u5ba2\u6237\u7aef\u8bbf\u95ee\u5730\u5740\uff1a\u7528\u6237\u5b8c\u6210\u767b\u5f55\u5e76\u901a\u8fc7\u8eab\u4efd\u9a8c\u8bc1\u540e\uff0c\u8ba4\u8bc1\u670d\u52a1\u5668\u7528\u6765\u91cd\u5b9a\u5411\u7528\u6237\u7684\u5730\u5740\uff0c\u5373 Callback URL

                                                  3. \u521b\u5efa SSO \u63a5\u5165\u6210\u529f\u540e\uff0c\u5728 \u63a5\u5165\u7ba1\u7406 \u7ba1\u7406\u5217\u8868\uff0c\u70b9\u51fb\u521a\u521b\u5efa\u7684\u5ba2\u6237\u7aef ID \u8fdb\u5165\u8be6\u60c5\uff0c \u590d\u5236\u5ba2\u6237\u7aef ID\u3001\u5bc6\u94a5\u548c\u5355\u70b9\u767b\u5f55 URL \u4fe1\u606f\uff0c\u586b\u5199\u81f3\u5ba2\u6237\u7cfb\u7edf\u5b8c\u6210\u7528\u6237\u4f53\u7cfb\u6253\u901a\u3002

                                                    Note

                                                    realm \u540d\u79f0\u4e3a ghippo\u3002

                                                  "},{"location":"admin/ghippo/access-control/global.html","title":"\u7cfb\u7edf\u89d2\u8272","text":""},{"location":"admin/ghippo/access-control/global.html#_2","title":"\u9002\u7528\u573a\u666f","text":"

                                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u4e86\u9884\u7f6e\u7684\u7cfb\u7edf\u89d2\u8272\uff0c\u5e2e\u52a9\u7528\u6237\u7b80\u5316\u89d2\u8272\u6743\u9650\u7684\u4f7f\u7528\u6b65\u9aa4\u3002

                                                  Note

                                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u4e86\u4e09\u79cd\u7c7b\u578b\u7684\u7cfb\u7edf\u89d2\u8272\uff0c\u5206\u522b\u4e3a\u5e73\u53f0\u89d2\u8272\u3001\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u548c\u6587\u4ef6\u5939\u89d2\u8272\u3002

                                                  - \u5e73\u53f0\u89d2\u8272\uff1a\u5bf9\u5e73\u53f0\u4e0a\u6240\u6709\u76f8\u5173\u8d44\u6e90\u5177\u6709\u76f8\u5e94\u6743\u9650\uff0c\u8bf7\u524d\u5f80\u7528\u6237/\u7528\u6237\u7ec4\u6388\u6743\u3002\n- \u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\uff1a\u5bf9\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u5177\u6709\u76f8\u5e94\u6743\u9650\uff0c\u8bf7\u524d\u5f80\u5177\u4f53\u5de5\u4f5c\u7a7a\u95f4\u6388\u6743\u3002\n- \u6587\u4ef6\u5939\u89d2\u8272\uff1a\u5bf9\u67d0\u4e2a\u6587\u4ef6\u5939\u3001\u5b50\u6587\u4ef6\u5939\u53ca\u5176\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u7684\u8d44\u6e90\u5177\u6709\u76f8\u5e94\u6743\u9650\uff0c\u8bf7\u524d\u5f80\u5177\u4f53\u6587\u4ef6\u5939\u6388\u6743\u3002\n
                                                  "},{"location":"admin/ghippo/access-control/global.html#_3","title":"\u5e73\u53f0\u89d2\u8272","text":"

                                                  \u5728\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u4e2d\u9884\u5b9a\u4e49\u4e86 5 \u4e2a\u7cfb\u7edf\u89d2\u8272\uff0c\u5206\u522b\u662f\uff1aAdmin\u3001IAM Owner\u3001Audit Owner\u3001 Kpanda Owner \u548c Workspace and Folder Owner \u3002\u8fd9 5 \u4e2a\u89d2\u8272\u7531\u7cfb\u7edf\u521b\u5efa\uff0c\u7528\u6237\u53ea\u80fd\u4f7f\u7528\u4e0d\u80fd\u4fee\u6539\u3002\u89d2\u8272\u5bf9\u5e94\u7684\u6743\u9650\u5982\u4e0b\uff1a

                                                  \u89d2\u8272\u540d\u79f0 \u89d2\u8272\u7c7b\u578b \u6240\u5c5e\u6a21\u5757 \u89d2\u8272\u6743\u9650 Admin \u7cfb\u7edf\u89d2\u8272 \u5168\u90e8 \u5e73\u53f0\u7ba1\u7406\u5458\uff0c\u7ba1\u7406\u6240\u6709\u5e73\u53f0\u8d44\u6e90\uff0c\u4ee3\u8868\u5e73\u53f0\u7684\u6700\u9ad8\u6743\u9650 IAM Owner \u7cfb\u7edf\u89d2\u8272 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u7684\u7ba1\u7406\u5458\uff0c\u62e5\u6709\u8be5\u670d\u52a1\u4e0b\u7684\u6240\u6709\u6743\u9650\uff0c\u5982\u7ba1\u7406\u7528\u6237/\u7528\u6237\u7ec4\u53ca\u6388\u6743 Audit Owner \u7cfb\u7edf\u89d2\u8272 \u5ba1\u8ba1\u65e5\u5fd7 \u5ba1\u8ba1\u65e5\u5fd7\u7684\u7ba1\u7406\u5458\uff0c\u62e5\u6709\u8be5\u670d\u52a1\u4e0b\u7684\u6240\u6709\u6743\u9650\uff0c\u5982\u8bbe\u7f6e\u5ba1\u8ba1\u65e5\u5fd7\u7b56\u7565\uff0c\u5bfc\u51fa\u5ba1\u8ba1\u65e5\u5fd7 Kpanda Owner \u7cfb\u7edf\u89d2\u8272 \u5bb9\u5668\u7ba1\u7406 \u5bb9\u5668\u7ba1\u7406\u7684\u7ba1\u7406\u5458\uff0c\u62e5\u6709\u8be5\u670d\u52a1\u4e0b\u7684\u6240\u6709\u6743\u9650\uff0c\u5982\u521b\u5efa/\u63a5\u5165\u96c6\u7fa4\uff0c\u90e8\u7f72\u5e94\u7528\uff0c\u7ed9\u7528\u6237/\u7528\u6237\u7ec4\u6388\u4e88\u96c6\u7fa4/\u547d\u540d\u7a7a\u95f4\u76f8\u5173\u7684\u6743\u9650 Workspace and Folder Owner \u7cfb\u7edf\u89d2\u8272 \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7\u7ba1\u7406\u5458\uff0c\u62e5\u6709\u8be5\u670d\u52a1\u4e0b\u7684\u6240\u6709\u6743\u9650\uff0c\u5982\u521b\u5efa\u6587\u4ef6\u5939/\u5de5\u4f5c\u7a7a\u95f4\uff0c\u7ed9\u7528\u6237/\u7528\u6237\u7ec4\u6388\u6743\u6587\u4ef6\u5939/\u5de5\u4f5c\u7a7a\u95f4\u7684\u76f8\u5173\u6743\u9650\uff0c\u5728\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u4f7f\u7528\u5e94\u7528\u5de5\u4f5c\u53f0\u3001\u5fae\u670d\u52a1\u5f15\u64ce\u7b49\u529f\u80fd"},{"location":"admin/ghippo/access-control/global.html#_4","title":"\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272","text":"

                                                  \u5728\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u4e2d\u9884\u5b9a\u4e49\u4e86 3 \u4e2a\u7cfb\u7edf\u89d2\u8272\uff0c\u5206\u522b\u662f\uff1aWorkspace Admin\u3001Workspace Editor\u3001Workspace Viewer\u3002\u8fd9 3 \u4e2a\u89d2\u8272\u7531\u7cfb\u7edf\u521b\u5efa\uff0c\u7528\u6237\u53ea\u80fd\u4f7f\u7528\u4e0d\u80fd\u4fee\u6539\u3002\u89d2\u8272\u5bf9\u5e94\u7684\u6743\u9650\u5982\u4e0b\uff1a

                                                  \u89d2\u8272\u540d\u79f0 \u89d2\u8272\u7c7b\u578b \u6240\u5c5e\u6a21\u5757 \u89d2\u8272\u6743\u9650 Workspace Admin \u7cfb\u7edf\u89d2\u8272 \u5de5\u4f5c\u7a7a\u95f4 \u5de5\u4f5c\u7a7a\u95f4\u7684\u7ba1\u7406\u6743\u9650 Workspace Editor \u7cfb\u7edf\u89d2\u8272 \u5de5\u4f5c\u7a7a\u95f4 \u5de5\u4f5c\u7a7a\u95f4\u7684\u7f16\u8f91\u6743\u9650 Workspace Viewer \u7cfb\u7edf\u89d2\u8272 \u5de5\u4f5c\u7a7a\u95f4 \u5de5\u4f5c\u7a7a\u95f4\u7684\u53ea\u8bfb\u6743\u9650"},{"location":"admin/ghippo/access-control/global.html#_5","title":"\u6587\u4ef6\u5939\u89d2\u8272","text":"

                                                  \u5728\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u4e2d\u9884\u5b9a\u4e49\u4e86 3 \u4e2a\u7cfb\u7edf\u89d2\u8272\uff0c\u5206\u522b\u662f\uff1aFolder Admin\u3001Folder Editor\u3001Folder Viewer\u3002\u8fd9 3 \u4e2a\u89d2\u8272\u7531\u7cfb\u7edf\u521b\u5efa\uff0c\u7528\u6237\u53ea\u80fd\u4f7f\u7528\u4e0d\u80fd\u4fee\u6539\u3002\u89d2\u8272\u5bf9\u5e94\u7684\u6743\u9650\u5982\u4e0b\uff1a

                                                  \u89d2\u8272\u540d\u79f0 \u89d2\u8272\u7c7b\u578b \u6240\u5c5e\u6a21\u5757 \u89d2\u8272\u6743\u9650 Folder Admin \u7cfb\u7edf\u89d2\u8272 \u5de5\u4f5c\u7a7a\u95f4 \u6587\u4ef6\u5939\u53ca\u5176\u4e0b\u5b50\u6587\u4ef6\u5939\u3001\u5de5\u4f5c\u7a7a\u95f4\u7684\u7ba1\u7406\u6743\u9650 Folder Editor \u7cfb\u7edf\u89d2\u8272 \u5de5\u4f5c\u7a7a\u95f4 \u6587\u4ef6\u5939\u53ca\u5176\u4e0b\u5b50\u6587\u4ef6\u5939\u3001\u5de5\u4f5c\u7a7a\u95f4\u7684\u7f16\u8f91\u6743\u9650 Folder Viewer \u7cfb\u7edf\u89d2\u8272 \u5de5\u4f5c\u7a7a\u95f4 \u6587\u4ef6\u5939\u53ca\u5176\u4e0b\u5b50\u6587\u4ef6\u5939\u3001\u5de5\u4f5c\u7a7a\u95f4\u7684\u53ea\u8bfb\u6743\u9650"},{"location":"admin/ghippo/access-control/group.html","title":"\u7528\u6237\u7ec4","text":"

                                                  \u7528\u6237\u7ec4\u662f\u7528\u6237\u7684\u96c6\u5408\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u52a0\u5165\u7528\u6237\u7ec4\uff0c\u7ee7\u627f\u7528\u6237\u7ec4\u7684\u89d2\u8272\u6743\u9650\u3002\u901a\u8fc7\u7528\u6237\u7ec4\u6279\u91cf\u5730\u7ed9\u7528\u6237\u8fdb\u884c\u6388\u6743\uff0c\u53ef\u4ee5\u66f4\u597d\u5730\u7ba1\u7406\u7528\u6237\u53ca\u5176\u6743\u9650\u3002

                                                  "},{"location":"admin/ghippo/access-control/group.html#_2","title":"\u9002\u7528\u573a\u666f","text":"

                                                  \u5f53\u7528\u6237\u6743\u9650\u53d1\u751f\u53d8\u5316\u65f6\uff0c\u53ea\u9700\u5c06\u5176\u79fb\u5230\u76f8\u5e94\u7684\u7528\u6237\u7ec4\u4e0b\uff0c\u4e0d\u4f1a\u5bf9\u5176\u4ed6\u7528\u6237\u4ea7\u751f\u5f71\u54cd\u3002

                                                  \u5f53\u7528\u6237\u7ec4\u7684\u6743\u9650\u53d1\u751f\u53d8\u5316\u65f6\uff0c\u53ea\u9700\u4fee\u6539\u7528\u6237\u7ec4\u7684\u89d2\u8272\u6743\u9650\uff0c\u5373\u53ef\u5e94\u7528\u5230\u7ec4\u5185\u7684\u6240\u6709\u7528\u6237\u3002

                                                  "},{"location":"admin/ghippo/access-control/group.html#_3","title":"\u521b\u5efa\u7528\u6237\u7ec4","text":"

                                                  \u524d\u63d0\uff1a\u62e5\u6709\u5e73\u53f0\u7ba1\u7406\u5458 Admin \u6743\u9650\u6216\u8005\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u7ba1\u7406\u5458 IAM Owner \u6743\u9650\u3002

                                                  1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u7528\u6237\u7ec4 \uff0c\u8fdb\u5165\u7528\u6237\u7ec4\u5217\u8868\uff0c\u70b9\u51fb\u53f3\u4e0a\u65b9\u7684 \u521b\u5efa\u7528\u6237\u7ec4 \u3002

                                                  2. \u5728 \u521b\u5efa\u7528\u6237\u7ec4 \u9875\u9762\u586b\u5199\u7528\u6237\u7ec4\u4fe1\u606f\u3002

                                                  3. \u70b9\u51fb \u786e\u5b9a \uff0c\u521b\u5efa\u7528\u6237\u7ec4\u6210\u529f\uff0c\u8fd4\u56de\u7528\u6237\u7ec4\u5217\u8868\u9875\u9762\u3002\u5217\u8868\u4e2d\u7684\u7b2c\u4e00\u884c\u662f\u65b0\u521b\u5efa\u7684\u7528\u6237\u7ec4\u3002

                                                  "},{"location":"admin/ghippo/access-control/group.html#_4","title":"\u4e3a\u7528\u6237\u7ec4\u6388\u6743","text":"

                                                  \u524d\u63d0\uff1a\u8be5\u7528\u6237\u7ec4\u5df2\u5b58\u5728\u3002

                                                  1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u7528\u6237\u7ec4 \uff0c\u8fdb\u5165\u7528\u6237\u7ec4\u5217\u8868\uff0c\u70b9\u51fb ... -> \u6388\u6743 \u3002

                                                  2. \u5728 \u6388\u6743 \u9875\u9762\u52fe\u9009\u9700\u8981\u7684\u89d2\u8272\u6743\u9650\uff08\u53ef\u591a\u9009\uff09\u3002

                                                  3. \u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u4e3a\u7528\u6237\u7ec4\u7684\u6388\u6743\u3002\u81ea\u52a8\u8fd4\u56de\u7528\u6237\u7ec4\u5217\u8868\uff0c\u70b9\u51fb\u67d0\u4e2a\u7528\u6237\u7ec4\uff0c\u53ef\u4ee5\u67e5\u770b\u7528\u6237\u7ec4\u88ab\u6388\u4e88\u7684\u6743\u9650\u3002

                                                  "},{"location":"admin/ghippo/access-control/group.html#_5","title":"\u7ed9\u7528\u6237\u7ec4\u6dfb\u52a0\u7528\u6237","text":"
                                                  1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u7528\u6237\u7ec4 \u8fdb\u5165\u7528\u6237\u7ec4\u5217\u8868\uff0c\u5728\u67d0\u4e2a\u7528\u6237\u7ec4\u53f3\u4fa7\uff0c\u70b9\u51fb ... -> \u6dfb\u52a0\u7528\u6237 \u3002

                                                  2. \u5728 \u6dfb\u52a0\u7528\u6237 \u9875\u9762\u70b9\u9009\u9700\u8981\u6dfb\u52a0\u7684\u7528\u6237\uff08\u53ef\u591a\u9009\uff09\u3002\u82e5\u6ca1\u6709\u53ef\u9009\u7684\u7528\u6237\uff0c\u70b9\u51fb \u524d\u5f80\u521b\u5efa\u65b0\u7528\u6237 \uff0c\u5148\u524d\u5f80\u521b\u5efa\u7528\u6237\uff0c\u518d\u8fd4\u56de\u8be5\u9875\u9762\u70b9\u51fb \u5237\u65b0 \u6309\u94ae\uff0c\u663e\u793a\u521a\u521b\u5efa\u7684\u7528\u6237\u3002

                                                  3. \u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u7ed9\u7528\u6237\u7ec4\u6dfb\u52a0\u7528\u6237\u3002

                                                  Note

                                                  \u7528\u6237\u7ec4\u4e2d\u7684\u7528\u6237\u4f1a\u7ee7\u627f\u7528\u6237\u7ec4\u7684\u6743\u9650\uff1b\u53ef\u4ee5\u5728\u7528\u6237\u7ec4\u8be6\u60c5\u4e2d\u67e5\u770b\u52a0\u5165\u8be5\u7ec4\u7684\u7528\u6237\u3002

                                                  "},{"location":"admin/ghippo/access-control/group.html#_6","title":"\u5220\u9664\u7528\u6237\u7ec4","text":"

                                                  \u8bf4\u660e\uff1a\u5220\u9664\u7528\u6237\u7ec4\uff0c\u4e0d\u4f1a\u5220\u9664\u7ec4\u5185\u7684\u7528\u6237\uff0c\u4f46\u7ec4\u5185\u7528\u6237\u5c06\u65e0\u6cd5\u518d\u7ee7\u627f\u8be5\u7ec4\u7684\u6743\u9650

                                                  1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u7528\u6237\u7ec4 \u8fdb\u5165\u7528\u6237\u7ec4\u5217\u8868\uff0c\u5728\u67d0\u4e2a\u7528\u6237\u7ec4\u53f3\u4fa7\uff0c\u70b9\u51fb ... -> \u5220\u9664 \u3002

                                                  2. \u70b9\u51fb \u79fb\u9664 \u5220\u9664\u7528\u6237\u7ec4\u3002

                                                  3. \u8fd4\u56de\u7528\u6237\u7ec4\u5217\u8868\uff0c\u5c4f\u5e55\u4e0a\u65b9\u5c06\u63d0\u793a\u5220\u9664\u6210\u529f\u3002

                                                  Note

                                                  \u8bf4\u660e\uff1a\u5220\u9664\u7528\u6237\u7ec4\uff0c\u4e0d\u4f1a\u5220\u9664\u7ec4\u5185\u7684\u7528\u6237\uff0c\u4f46\u7ec4\u5185\u7528\u6237\u5c06\u65e0\u6cd5\u518d\u7ee7\u627f\u8be5\u7ec4\u7684\u6743\u9650\u3002

                                                  "},{"location":"admin/ghippo/access-control/iam.html","title":"\u4ec0\u4e48\u662f\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236","text":"

                                                  IAM\uff08Identity and Access Management\uff0c\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\uff09\u662f\u5168\u5c40\u7ba1\u7406\u7684\u4e00\u4e2a\u91cd\u8981\u6a21\u5757\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u6a21\u5757\u521b\u5efa\u3001\u7ba1\u7406\u548c\u9500\u6bc1\u7528\u6237\uff08\u7528\u6237\u7ec4\uff09\uff0c\u5e76\u4f7f\u7528\u7cfb\u7edf\u89d2\u8272\u548c\u81ea\u5b9a\u4e49\u89d2\u8272\u63a7\u5236\u5176\u4ed6\u7528\u6237\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u6743\u9650\u3002

                                                  "},{"location":"admin/ghippo/access-control/iam.html#_2","title":"\u4f18\u52bf","text":"
                                                  • \u7b80\u6d01\u6d41\u7545

                                                    \u4f01\u4e1a\u5185\u90e8\u7684\u7ed3\u6784\u548c\u89d2\u8272\u53ef\u80fd\u975e\u5e38\u590d\u6742\uff0c\u9879\u76ee\u3001\u5de5\u4f5c\u5c0f\u7ec4\u53ca\u6388\u6743\u7684\u7ba1\u7406\u90fd\u5728\u4e0d\u65ad\u5730\u53d8\u5316\u3002\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u91c7\u7528\u6e05\u6670\u6574\u6d01\u7684\u9875\u9762\uff0c\u6253\u901a\u7528\u6237\u3001\u7528\u6237\u7ec4\u3001\u89d2\u8272\u4e4b\u95f4\u7684\u6388\u6743\u5173\u7cfb\uff0c\u4ee5\u6700\u77ed\u94fe\u8def\u5b9e\u73b0\u5bf9\u7528\u6237\uff08\u7528\u6237\u7ec4\uff09\u7684\u6388\u6743\u3002

                                                  • \u9002\u5f53\u7684\u89d2\u8272

                                                    \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u4e3a\u6bcf\u4e2a\u5b50\u6a21\u5757\u9884\u5b9a\u4e49\u4e86\u4e00\u4e2a\u7ba1\u7406\u5458\u89d2\u8272\uff0c\u65e0\u9700\u7528\u6237\u7ef4\u62a4\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u5c06\u5e73\u53f0\u9884\u5b9a\u4e49\u7684\u7cfb\u7edf\u89d2\u8272\u6388\u6743\u7ed9\u7528\u6237\uff0c\u5b9e\u73b0\u5e73\u53f0\u7684\u6a21\u5757\u5316\u7ba1\u7406\uff08\u7ec6\u7c92\u5ea6\u6743\u9650\u8bf7\u53c2\u9605\u6743\u9650\u7ba1\u7406\u3002

                                                  • \u4f01\u4e1a\u7ea7\u8bbf\u95ee\u63a7\u5236

                                                    \u5f53\u60a8\u5e0c\u671b\u672c\u4f01\u4e1a\u5458\u5de5\u53ef\u4ee5\u4f7f\u7528\u4f01\u4e1a\u5185\u90e8\u7684\u8ba4\u8bc1\u7cfb\u7edf\u767b\u5f55\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\uff0c\u800c\u4e0d\u9700\u8981\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u521b\u5efa\u5bf9\u5e94\u7684\u7528\u6237\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u7684\u8eab\u4efd\u63d0\u4f9b\u5546\u529f\u80fd\uff0c\u5efa\u7acb\u60a8\u6240\u5728\u4f01\u4e1a\u4e0e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u4fe1\u4efb\u5173\u7cfb\uff0c\u901a\u8fc7\u8054\u5408\u8ba4\u8bc1\u4f7f\u5458\u5de5\u4f7f\u7528\u4f01\u4e1a\u5df2\u6709\u8d26\u53f7\u76f4\u63a5\u767b\u5f55\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5b9e\u73b0\u5355\u70b9\u767b\u5f55\u3002

                                                  "},{"location":"admin/ghippo/access-control/iam.html#_3","title":"\u4f7f\u7528\u6d41\u7a0b","text":"

                                                  \u6709\u5173\u8bbf\u95ee\u63a7\u5236\u7684\u5e38\u89c4\u6d41\u7a0b\u4e3a\uff1a

                                                  graph TD\n    login[\u767b\u5f55] --> user[\u521b\u5efa\u7528\u6237]\n    user --> auth[\u4e3a\u7528\u6237\u6388\u6743]\n    auth --> group[\u521b\u5efa\u7528\u6237\u7ec4]\n    group --> role[\u521b\u5efa\u81ea\u5b9a\u4e49\u89d2\u8272]\n    role --> id[\u521b\u5efa\u8eab\u4efd\u63d0\u4f9b\u5546]\n\n classDef plain fill:#ddd,stroke:#fff,stroke-width:4px,color:#000;\n classDef k8s fill:#326ce5,stroke:#fff,stroke-width:4px,color:#fff;\n classDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n class login,user,auth,group,role,id cluster;\n\nclick login \"https://docs.daocloud.io/ghippo/install/login.html\"\nclick user \"https://docs.daocloud.io/ghippo/access-control/user.html\"\nclick auth \"https://docs.daocloud.io/ghippo/access-control/role.html\"\nclick group \"https://docs.daocloud.io/ghippo/access-control/group.html\"\nclick role \"https://docs.daocloud.io/ghippo/access-control/custom-role.html\"\nclick id \"https://docs.daocloud.io/ghippo/access-control/idprovider.html\"
                                                  "},{"location":"admin/ghippo/access-control/idprovider.html","title":"\u8eab\u4efd\u63d0\u4f9b\u5546","text":"

                                                  \u5168\u5c40\u7ba1\u7406\u652f\u6301\u57fa\u4e8e LDAP \u548c OIDC \u534f\u8bae\u7684\u5355\u70b9\u767b\u5f55\uff0c\u5982\u679c\u60a8\u7684\u4f01\u4e1a\u6216\u7ec4\u7ec7\u5df2\u6709\u81ea\u5df1\u7684\u8d26\u53f7\u4f53\u7cfb\uff0c\u540c\u65f6\u5e0c\u671b\u7ba1\u7406\u7ec4\u7ec7\u5185\u7684\u6210\u5458\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8d44\u6e90\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528\u5168\u5c40\u7ba1\u7406\u63d0\u4f9b\u7684\u8eab\u4efd\u63d0\u4f9b\u5546\u529f\u80fd\uff0c\u800c\u4e0d\u5fc5\u5728\u60a8\u7684\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4e3a\u6bcf\u4e00\u4f4d\u7ec4\u7ec7\u6210\u5458\u521b\u5efa\u7528\u6237\u540d/\u5bc6\u7801\u3002\u60a8\u53ef\u4ee5\u5411\u8fd9\u4e9b\u5916\u90e8\u7528\u6237\u8eab\u4efd\u6388\u4e88\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8d44\u6e90\u7684\u6743\u9650\u3002

                                                  "},{"location":"admin/ghippo/access-control/idprovider.html#_2","title":"\u57fa\u672c\u6982\u5ff5","text":"
                                                  • \u8eab\u4efd\u63d0\u4f9b\u5546\uff08Identity Provider\uff0c\u7b80\u79f0 IdP\uff09

                                                    \u8d1f\u8d23\u6536\u96c6\u548c\u5b58\u50a8\u7528\u6237\u8eab\u4efd\u4fe1\u606f\u3001\u7528\u6237\u540d\u3001\u5bc6\u7801\u7b49\uff0c\u5728\u7528\u6237\u767b\u5f55\u65f6\u8d1f\u8d23\u8ba4\u8bc1\u7528\u6237\u7684\u670d\u52a1\u3002\u5728\u4f01\u4e1a\u4e0e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\u7684\u8fc7\u7a0b\u4e2d\uff0c\u8eab\u4efd\u63d0\u4f9b\u5546\u6307\u4f01\u4e1a\u81ea\u8eab\u7684\u8eab\u4efd\u63d0\u4f9b\u5546\u3002

                                                  • \u670d\u52a1\u63d0\u4f9b\u5546\uff08Service Provider\uff0c\u7b80\u79f0 SP\uff09

                                                    \u670d\u52a1\u63d0\u4f9b\u5546\u901a\u8fc7\u4e0e\u8eab\u4efd\u63d0\u4f9b\u5546 IdP \u5efa\u7acb\u4fe1\u4efb\u5173\u7cfb\uff0c\u4f7f\u7528 IDP \u63d0\u4f9b\u7684\u7528\u6237\u4fe1\u606f\uff0c\u4e3a\u7528\u6237\u63d0\u4f9b\u5177\u4f53\u7684\u670d\u52a1\u3002\u5728\u4f01\u4e1a\u4e0e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\u7684\u8fc7\u7a0b\u4e2d\uff0c\u670d\u52a1\u63d0\u4f9b\u5546\u6307 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u3002

                                                  • LDAP

                                                    LDAP \u6307\u8f7b\u578b\u76ee\u5f55\u8bbf\u95ee\u534f\u8bae\uff08Lightweight Directory Access Protocol\uff09\uff0c\u5e38\u7528\u4e8e\u5355\u70b9\u767b\u5f55\uff0c\u5373\u7528\u6237\u53ef\u4ee5\u5728\u591a\u4e2a\u670d\u52a1\u4e2d\u4f7f\u7528\u4e00\u4e2a\u8d26\u53f7\u5bc6\u7801\u8fdb\u884c\u767b\u5f55\u3002\u5168\u5c40\u7ba1\u7406\u652f\u6301 LDAP \u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\uff0c\u56e0\u6b64\u4e0e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u901a\u8fc7 LDAP \u534f\u8bae\u5efa\u7acb\u8eab\u4efd\u8ba4\u8bc1\u7684\u4f01\u4e1a IdP \u5fc5\u987b\u652f\u6301 LDAP \u534f\u8bae\u3002\u5173\u4e8e LDAP \u7684\u8be6\u7ec6\u63cf\u8ff0\u8bf7\u53c2\u89c1\uff1a\u6b22\u8fce\u4f7f\u7528 LDAP\u3002

                                                  • OIDC

                                                    OIDC \u662f OpenID Connect \u7684\u7b80\u79f0\uff0c\u662f\u4e00\u4e2a\u57fa\u4e8e OAuth 2.0 \u534f\u8bae\u7684\u8eab\u4efd\u8ba4\u8bc1\u6807\u51c6\u534f\u8bae\u3002\u5168\u5c40\u7ba1\u7406\u652f\u6301\u4f7f\u7528 OIDC \u534f\u8bae\u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\uff0c\u56e0\u6b64\u4e0e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u901a\u8fc7 OIDC \u534f\u8bae\u5efa\u7acb\u8eab\u4efd\u8ba4\u8bc1\u7684\u4f01\u4e1a IdP \u5fc5\u987b\u652f\u6301 OIDC \u534f\u8bae\u3002\u5173\u4e8e OIDC \u7684\u8be6\u7ec6\u63cf\u8ff0\u8bf7\u53c2\u89c1\uff1a\u6b22\u8fce\u4f7f\u7528 OpenID Connect\u3002

                                                  • OAuth 2.0

                                                    OAuth 2.0 \u662f Open Authorization 2.0 \u7684\u7b80\u79f0\uff0c\u662f\u4e00\u79cd\u5f00\u653e\u6388\u6743\u534f\u8bae\uff0c\u6388\u6743\u6846\u67b6\u652f\u6301\u7b2c\u4e09\u65b9\u5e94\u7528\u7a0b\u5e8f\u4ee5\u81ea\u5df1\u7684\u540d\u4e49\u83b7\u53d6\u8bbf\u95ee\u6743\u9650\u3002

                                                  "},{"location":"admin/ghippo/access-control/idprovider.html#_3","title":"\u529f\u80fd\u7279\u6027","text":"
                                                  • \u7ba1\u7406\u5458\u65e0\u9700\u91cd\u65b0\u521b\u5efa\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7528\u6237

                                                    \u4f7f\u7528\u8eab\u4efd\u63d0\u4f9b\u5546\u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\u524d\uff0c\u7ba1\u7406\u5458\u9700\u8981\u5728\u4f01\u4e1a\u7ba1\u7406\u7cfb\u7edf\u548c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u5206\u522b\u4e3a\u7528\u6237\u521b\u5efa\u8d26\u53f7\uff1b\u4f7f\u7528\u8eab\u4efd\u63d0\u4f9b\u5546\u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\u540e\uff0c\u4f01\u4e1a\u7ba1\u7406\u5458\u53ea\u9700\u8981\u5728\u4f01\u4e1a\u7ba1\u7406\u7cfb\u7edf\u4e2d\u4e3a\u7528\u6237\u521b\u5efa\u8d26\u53f7\uff0c\u7528\u6237\u5373\u53ef\u540c\u65f6\u8bbf\u95ee\u4e24\u4e2a\u7cfb\u7edf\uff0c\u964d\u4f4e\u4e86\u4eba\u5458\u7ba1\u7406\u6210\u672c\u3002

                                                  • \u7528\u6237\u65e0\u9700\u8bb0\u4f4f\u4e24\u5957\u5e73\u53f0\u8d26\u53f7

                                                    \u4f7f\u7528\u8eab\u4efd\u63d0\u4f9b\u5546\u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\u524d\uff0c\u7528\u6237\u8bbf\u95ee\u4f01\u4e1a\u7ba1\u7406\u7cfb\u7edf\u548c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9700\u8981\u4f7f\u7528\u4e24\u4e2a\u7cfb\u7edf\u7684\u8d26\u53f7\u767b\u5f55\uff1b\u4f7f\u7528\u8eab\u4efd\u63d0\u4f9b\u5546\u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\u540e\uff0c\u7528\u6237\u5728\u672c\u4f01\u4e1a\u7ba1\u7406\u7cfb\u7edf\u4e2d\u767b\u5f55\u5373\u53ef\u8bbf\u95ee\u4e24\u4e2a\u7cfb\u7edf\u3002

                                                  "},{"location":"admin/ghippo/access-control/ldap.html","title":"LDAP","text":"

                                                  LDAP \u82f1\u6587\u5168\u79f0\u4e3a Lightweight Directory Access Protocol\uff0c\u5373\u8f7b\u578b\u76ee\u5f55\u8bbf\u95ee\u534f\u8bae\uff0c\u8fd9\u662f\u4e00\u4e2a\u5f00\u653e\u7684\u3001\u4e2d\u7acb\u7684\u5de5\u4e1a\u6807\u51c6\u5e94\u7528\u534f\u8bae\uff0c \u901a\u8fc7 IP \u534f\u8bae\u63d0\u4f9b\u8bbf\u95ee\u63a7\u5236\u548c\u7ef4\u62a4\u5206\u5e03\u5f0f\u4fe1\u606f\u7684\u76ee\u5f55\u4fe1\u606f\u3002

                                                  \u5982\u679c\u60a8\u7684\u4f01\u4e1a\u6216\u7ec4\u7ec7\u5df2\u6709\u81ea\u5df1\u7684\u8d26\u53f7\u4f53\u7cfb\uff0c\u540c\u65f6\u60a8\u7684\u4f01\u4e1a\u7528\u6237\u7ba1\u7406\u7cfb\u7edf\u652f\u6301 LDAP \u534f\u8bae\uff0c\u5c31\u53ef\u4ee5\u4f7f\u7528\u5168\u5c40\u7ba1\u7406\u63d0\u4f9b\u7684\u57fa\u4e8e LDAP \u534f\u8bae\u7684\u8eab\u4efd\u63d0\u4f9b\u5546\u529f\u80fd\uff0c\u800c\u4e0d\u5fc5\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4e3a\u6bcf\u4e00\u4f4d\u7ec4\u7ec7\u6210\u5458\u521b\u5efa\u7528\u6237\u540d/\u5bc6\u7801\u3002 \u60a8\u53ef\u4ee5\u5411\u8fd9\u4e9b\u5916\u90e8\u7528\u6237\u8eab\u4efd\u6388\u4e88\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8d44\u6e90\u7684\u6743\u9650\u3002

                                                  \u5728\u5168\u5c40\u7ba1\u7406\u4e2d\uff0c\u5176\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                  1. \u4f7f\u7528\u5177\u6709 admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5de6\u4e0b\u89d2\u7684 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \u3002

                                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8eab\u4efd\u63d0\u4f9b\u5546 \uff0c\u70b9\u51fb \u521b\u5efa\u8eab\u4efd\u63d0\u4f9b\u5546 \u6309\u94ae\u3002

                                                  3. \u5728 LDAP \u9875\u7b7e\u4e2d\uff0c\u586b\u5199\u4ee5\u4e0b\u5b57\u6bb5\u540e\u70b9\u51fb \u4fdd\u5b58 \uff0c\u5efa\u7acb\u4e0e\u8eab\u4efd\u63d0\u4f9b\u5546\u7684\u4fe1\u4efb\u5173\u7cfb\u53ca\u7528\u6237\u7684\u6620\u5c04\u5173\u7cfb\u3002

                                                    \u5b57\u6bb5 \u63cf\u8ff0 \u7c7b\u578b\uff08Vendor\uff09 \u652f\u6301 LDAP (Lightweight Directory Access Protocol) \u548c AD (Active Directory) \u8eab\u4efd\u63d0\u4f9b\u5546\u540d\u79f0\uff08UI display name\uff09 \u7528\u4e8e\u533a\u5206\u4e0d\u540c\u7684\u8eab\u4efd\u63d0\u4f9b\u5546 \u670d\u52a1\u5668\uff08Connection URL\uff09 LDAP \u670d\u52a1\u7684\u5730\u5740\u548c\u7aef\u53e3\u53f7\uff0c\u5982 ldap://10.6.165.2:30061 \u7528\u6237\u540d\u79f0\uff08Bind DN\uff09 LDAP \u7ba1\u7406\u5458\u7684 DN\uff0cKeycloak \u5c06\u4f7f\u7528\u8be5 DN \u6765\u8bbf\u95ee LDAP \u670d\u52a1\u5668 \u5bc6\u7801\uff08Bind credentials\uff09 LDAP \u7ba1\u7406\u5458\u7684\u5bc6\u7801\u3002\u8be5\u5b57\u6bb5\u53ef\u4ee5\u4ece vault \u4e2d\u83b7\u53d6\u5176\u503c\uff0c\u4f7f\u7528 ${vault.ID} \u683c\u5f0f\u3002 \u7528\u6237 DN\uff08Users DN\uff09 \u60a8\u7684\u7528\u6237\u6240\u5728\u7684 LDAP \u6811\u7684\u5b8c\u6574 DN\u3002\u6b64 DN \u662f LDAP \u7528\u6237\u7684\u7236\u7ea7\u3002\u4f8b\u5982\uff0c\u5047\u8bbe\u60a8\u7684\u5178\u578b\u7528\u6237\u7684 DN \u7c7b\u4f3c\u4e8e\u201cuid='john',ou=users,dc=example,dc=com\u201d\uff0c\u5219\u53ef\u4ee5\u662f\u201cou=users,dc=example,dc=com\u201d\u3002 \u7528\u6237\u5bf9\u8c61\u7c7b\uff08User object classes\uff09 LDAP \u4e2d\u7528\u6237\u7684 LDAP objectClass \u5c5e\u6027\u7684\u6240\u6709\u503c\uff0c\u4ee5\u9017\u53f7\u5206\u9694\u3002\u4f8b\u5982\uff1a\u201cinetOrgPerson\uff0corganizationalPerson\u201d\u3002\u65b0\u521b\u5efa\u7684 Keycloak \u7528\u6237\u5c06\u4e0e\u6240\u6709\u8fd9\u4e9b\u5bf9\u8c61\u7c7b\u4e00\u8d77\u5199\u5165 L\u200b\u200bDAP\uff0c\u5e76\u4e14\u53ea\u8981\u73b0\u6709 LDAP \u7528\u6237\u8bb0\u5f55\u5305\u542b\u6240\u6709\u8fd9\u4e9b\u5bf9\u8c61\u7c7b\uff0c\u5c31\u4f1a\u627e\u5230\u5b83\u4eec\u3002 \u662f\u5426\u542f\u7528TLS\uff08Enable StartTLS\uff09 \u542f\u7528\u540e\u5c06\u52a0\u5bc6\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e0e LDAP \u7684\u8fde\u63a5 \u9884\u8bbe\u6743\u9650\uff08Default permission\uff09 \u540c\u6b65\u540e\u7684\u7528\u6237/\u7528\u6237\u7ec4\u9ed8\u8ba4\u6ca1\u6709\u4efb\u4f55\u6743\u9650 \u5168\u540d\u6620\u5c04\uff08First/Last name mapping\uff09 \u5bf9\u5e94 First name \u548c Last Name \u7528\u6237\u540d\u6620\u5c04\uff08User name mapping\uff09 \u7528\u6237\u552f\u4e00\u7684\u7528\u6237\u540d \u90ae\u7bb1\u6620\u5c04\uff08Mailbox mapping\uff09 \u7528\u6237\u7684\u90ae\u7bb1

                                                    \u9ad8\u7ea7\u914d\u7f6e

                                                    \u5b57\u6bb5 \u63cf\u8ff0 \u662f\u5426\u542f\u7528\uff08Enable or not\uff09 \u9ed8\u8ba4\u542f\u7528\uff0c\u5173\u95ed\u540e\u8be5 LDAP \u914d\u7f6e\u4e0d\u751f\u6548 \u81ea\u52a8\u540c\u6b65\u7528\u6237\uff08Periodic full sync\uff09 \u9ed8\u8ba4\u4e0d\u542f\u7528\uff0c\u542f\u7528\u540e\u53ef\u914d\u7f6e\u540c\u6b65\u5468\u671f\uff0c\u5982\u6bcf\u5c0f\u65f6\u540c\u6b65\u4e00\u6b21 \u6570\u636e\u540c\u6b65\u6a21\u5f0f\uff08Edit mode\uff09 \u53ea\u8bfb\u6a21\u5f0f\u4e0d\u4f1a\u4fee\u6539 LDAP \u7684\u6e90\u6570\u636e\uff1b\u5199\u5165\u6a21\u5f0f\u5728\u5e73\u53f0\u7f16\u8f91\u7528\u6237\u4fe1\u606f\u540e\uff0c\u6570\u636e\u5c06\u540c\u6b65\u56deLDAP \u8bfb\u53d6\u8d85\u65f6\uff08Read timeout\uff09 \u5f53LDAP\u6570\u636e\u91cf\u8f83\u5927\u65f6\uff0c\u8c03\u6574\u8be5\u6570\u503c\u53ef\u4ee5\u6709\u6548\u907f\u514d\u63a5\u53e3\u8d85\u65f6 \u7528\u6237\u5bf9\u8c61\u8fc7\u6ee4\u5668\uff08User LDAP filter\uff09 \u7528\u4e8e\u8fc7\u6ee4\u641c\u7d22\u7528\u6237\u7684\u9644\u52a0 LDAP \u8fc7\u6ee4\u5668\u3002\u5982\u679c\u60a8\u4e0d\u9700\u8981\u989d\u5916\u7684\u8fc7\u6ee4\u5668\uff0c\u8bf7\u5c06\u5176\u7559\u7a7a\u3002\u786e\u4fdd\u5b83\u4ee5\u201c(\u201d\u5f00\u5934\uff0c\u5e76\u4ee5\u201c)\u201d\u7ed3\u5c3e\u3002 \u7528\u6237\u540d\u5c5e\u6027\uff08Username LDAP attribute\uff09 LDAP \u5c5e\u6027\u7684\u540d\u79f0\uff0c\u6620\u5c04\u4e3a Keycloak \u7528\u6237\u540d\u3002\u5bf9\u4e8e\u8bb8\u591a LDAP \u670d\u52a1\u5668\u4f9b\u5e94\u5546\u6765\u8bf4\uff0c\u5b83\u53ef\u4ee5\u662f\u201cuid\u201d\u3002\u5bf9\u4e8e Active Directory\uff0c\u5b83\u53ef\u4ee5\u662f\u201csAMAccountName\u201d\u6216\u201ccn\u201d\u3002\u5e94\u4e3a\u60a8\u60f3\u8981\u4ece LDAP \u5bfc\u5165\u5230 Keycloak \u7684\u6240\u6709 LDAP \u7528\u6237\u8bb0\u5f55\u586b\u5199\u8be5\u5c5e\u6027\u3002 RDN\u5c5e\u6027\uff08RDN LDAP attribute\uff09 LDAP \u5c5e\u6027\u540d\u79f0\uff0c\u4f5c\u4e3a\u5178\u578b\u7528\u6237DN\u7684RDN\uff08\u9876\u7ea7\u5c5e\u6027\uff09\u3002\u901a\u5e38\u5b83\u4e0e\u7528\u6237\u540d LDAP \u5c5e\u6027\u76f8\u540c\uff0c\u4f46\u8fd9\u4e0d\u662f\u5fc5\u9700\u7684\u3002\u4f8b\u5982\uff0c\u5bf9\u4e8e Active Directory\uff0c\u5f53\u7528\u6237\u540d\u5c5e\u6027\u53ef\u80fd\u662f\u201csAMAccountName\u201d\u65f6\uff0c\u901a\u5e38\u4f7f\u7528\u201ccn\u201d\u4f5c\u4e3a RDN \u5c5e\u6027\u3002 UUID\u5c5e\u6027\uff08UUID LDAP attribute\uff09 LDAP \u5c5e\u6027\u7684\u540d\u79f0\uff0c\u7528\u4f5c LDAP \u4e2d\u5bf9\u8c61\u7684\u552f\u4e00\u5bf9\u8c61\u6807\u8bc6\u7b26 (UUID)\u3002\u5bf9\u4e8e\u8bb8\u591a LDAP \u670d\u52a1\u5668\u4f9b\u5e94\u5546\u6765\u8bf4\uff0c\u5b83\u662f\u201centryUUID\u201d\uff1b\u7136\u800c\u6709\u4e9b\u662f\u4e0d\u540c\u7684\u3002\u4f8b\u5982\uff0c\u5bf9\u4e8e Active Directory\uff0c\u5b83\u5e94\u8be5\u662f\u201cobjectGUID\u201d\u3002\u5982\u679c\u60a8\u7684 LDAP \u670d\u52a1\u5668\u4e0d\u652f\u6301 UUID \u6982\u5ff5\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528\u5728\u6811\u4e2d\u7684 LDAP \u7528\u6237\u4e4b\u95f4\u5e94\u8be5\u552f\u4e00\u7684\u4efb\u4f55\u5176\u4ed6\u5c5e\u6027\u3002\u4f8b\u5982\u201cuid\u201d\u6216\u201centryDN\u201d\u3002
                                                  4. \u5728 \u540c\u6b65\u7528\u6237\u7ec4 \u9875\u7b7e\u4e2d\uff0c\u586b\u5199\u4ee5\u4e0b\u5b57\u6bb5\u914d\u7f6e\u7528\u6237\u7ec4\u7684\u6620\u5c04\u5173\u7cfb\u540e\uff0c\u518d\u6b21\u70b9\u51fb \u4fdd\u5b58 \u3002

                                                    \u5b57\u6bb5 \u63cf\u8ff0 \u4e3e\u4f8b\u503c \u57fa\u51c6 DN \u7528\u6237\u7ec4\u5728 LDAP \u6811\u72b6\u7ed3\u6784\u4e2d\u7684\u4f4d\u7f6e ou=groups,dc=example,dc=org \u7528\u6237\u7ec4\u5bf9\u8c61\u8fc7\u6ee4\u5668 \u7528\u6237\u7ec4\u7684\u5bf9\u8c61\u7c7b\uff0c\u5982\u679c\u9700\u8981\u66f4\u591a\u7c7b\uff0c\u5219\u7528\u9017\u53f7\u5206\u9694\u3002\u5728\u5178\u578b\u7684 LDAP \u90e8\u7f72\u4e2d\uff0c\u901a\u5e38\u662f \u201cgroupOfNames\u201d\uff0c\u7cfb\u7edf\u5df2\u81ea\u52a8\u586b\u5165\uff0c\u5982\u9700\u66f4\u6539\u8bf7\u76f4\u63a5\u7f16\u8f91\u3002* \u8868\u793a\u6240\u6709\u3002 * \u7528\u6237\u7ec4\u540d cn \u4e0d\u53ef\u66f4\u6539

                                                  Note

                                                  1. \u5f53\u60a8\u901a\u8fc7 LDAP \u534f\u8bae\u5c06\u4f01\u4e1a\u7528\u6237\u7ba1\u7406\u7cfb\u7edf\u4e0e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5efa\u7acb\u4fe1\u4efb\u5173\u7cfb\u540e\uff0c\u53ef\u901a\u8fc7\u624b\u52a8\u540c\u6b65\u6216\u81ea\u52a8\u540c\u6b65\u7684\u65b9\u5f0f\uff0c\u5c06\u4f01\u4e1a\u7528\u6237\u7ba1\u7406\u7cfb\u7edf\u4e2d\u7684\u7528\u6237\u6216\u7528\u6237\u7ec4\u4e00\u6b21\u6027\u540c\u6b65\u81f3 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u3002
                                                  2. \u540c\u6b65\u540e\u7ba1\u7406\u5458\u53ef\u5bf9\u7528\u6237\u7ec4/\u7528\u6237\u7ec4\u8fdb\u884c\u6279\u91cf\u6388\u6743\uff0c\u540c\u65f6\u7528\u6237\u53ef\u901a\u8fc7\u5728\u4f01\u4e1a\u7528\u6237\u7ba1\u7406\u7cfb\u7edf\u4e2d\u7684\u8d26\u53f7/\u5bc6\u7801\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002
                                                  "},{"location":"admin/ghippo/access-control/oauth2.0.html","title":"OAuth 2.0 - \u4f01\u4e1a\u5fae\u4fe1","text":"

                                                  \u5982\u679c\u60a8\u7684\u4f01\u4e1a\u6216\u7ec4\u7ec7\u4e2d\u7684\u6210\u5458\u5747\u7ba1\u7406\u5728\u4f01\u4e1a\u5fae\u4fe1\u4e2d\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528\u5168\u5c40\u7ba1\u7406\u63d0\u4f9b\u7684\u57fa\u4e8e OAuth 2.0 \u534f\u8bae\u7684\u8eab\u4efd\u63d0\u4f9b\u5546\u529f\u80fd\uff0c \u800c\u4e0d\u5fc5\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4e3a\u6bcf\u4e00\u4f4d\u7ec4\u7ec7\u6210\u5458\u521b\u5efa\u7528\u6237\u540d/\u5bc6\u7801\u3002 \u60a8\u53ef\u4ee5\u5411\u8fd9\u4e9b\u5916\u90e8\u7528\u6237\u8eab\u4efd\u6388\u4e88\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8d44\u6e90\u7684\u6743\u9650\u3002

                                                  "},{"location":"admin/ghippo/access-control/oauth2.0.html#_1","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u4f7f\u7528\u5177\u6709 Admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \u3002

                                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 \u8eab\u4efd\u63d0\u4f9b\u5546 \uff0c\u70b9\u51fb OAuth2.0 \u9875\u7b7e\u3002\u586b\u5199\u8868\u5355\u5b57\u6bb5\uff0c\u5efa\u7acb\u4e0e\u4f01\u4e1a\u5fae\u4fe1\u7684\u4fe1\u4efb\u5173\u7cfb\u540e\uff0c\u70b9\u51fb \u4fdd\u5b58 \u3002

                                                  "},{"location":"admin/ghippo/access-control/oauth2.0.html#_2","title":"\u4f01\u4e1a\u5fae\u4fe1\u4e2d\u5bf9\u5e94\u7684\u5b57\u6bb5","text":"

                                                  Note

                                                  \u5bf9\u63a5\u524d\u9700\u8981\u5728\u4f01\u4e1a\u5fae\u4fe1\u7ba1\u7406\u540e\u53f0\u4e2d\u521b\u5efa\u81ea\u5efa\u5e94\u7528\uff0c\u53c2\u9605\u5982\u4f55\u521b\u5efa\u81ea\u5efa\u5e94\u7528\u94fe\u63a5\u3002

                                                  \u5b57\u6bb5 \u63cf\u8ff0 \u4f01\u4e1a ID \u4f01\u4e1a\u5fae\u4fe1\u7684 ID Agent ID \u81ea\u5efa\u5e94\u7528\u7684 ID ClientSecret \u81ea\u5efa\u5e94\u7528\u7684 Secret

                                                  \u4f01\u4e1a\u5fae\u4fe1 ID\uff1a

                                                  Agent ID \u548c ClientSecret\uff1a

                                                  "},{"location":"admin/ghippo/access-control/oidc.html","title":"\u521b\u5efa\u548c\u7ba1\u7406 OIDC","text":"

                                                  OIDC\uff08OpenID Connect\uff09\u662f\u5efa\u7acb\u5728 OAuth 2.0 \u57fa\u7840\u4e0a\u7684\u4e00\u4e2a\u8eab\u4efd\u5c42\uff0c\u662f\u57fa\u4e8e OAuth2 \u534f\u8bae\u7684\u8eab\u4efd\u8ba4\u8bc1\u6807\u51c6\u534f\u8bae\u3002

                                                  \u5982\u679c\u60a8\u7684\u4f01\u4e1a\u6216\u7ec4\u7ec7\u5df2\u6709\u81ea\u5df1\u7684\u8d26\u53f7\u4f53\u7cfb\uff0c\u540c\u65f6\u60a8\u7684\u4f01\u4e1a\u7528\u6237\u7ba1\u7406\u7cfb\u7edf\u652f\u6301 OIDC \u534f\u8bae\uff0c \u53ef\u4ee5\u4f7f\u7528\u5168\u5c40\u7ba1\u7406\u63d0\u4f9b\u7684\u57fa\u4e8e OIDC \u534f\u8bae\u7684\u8eab\u4efd\u63d0\u4f9b\u5546\u529f\u80fd\uff0c\u800c\u4e0d\u5fc5\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4e3a\u6bcf\u4e00\u4f4d\u7ec4\u7ec7\u6210\u5458\u521b\u5efa\u7528\u6237\u540d/\u5bc6\u7801\u3002 \u60a8\u53ef\u4ee5\u5411\u8fd9\u4e9b\u5916\u90e8\u7528\u6237\u8eab\u4efd\u6388\u4e88\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8d44\u6e90\u7684\u6743\u9650\u3002

                                                  \u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\u3002

                                                  1. \u4f7f\u7528\u5177\u6709 admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \u3002

                                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 \u8eab\u4efd\u63d0\u4f9b\u5546 \uff0c\u70b9\u51fb OIDC \u9875\u7b7e -> \u521b\u5efa\u8eab\u4efd\u63d0\u4f9b\u5546 \u6309\u94ae\u3002

                                                  3. \u586b\u5199\u8868\u5355\u5b57\u6bb5\uff0c\u5efa\u7acb\u4e0e\u8eab\u4efd\u63d0\u4f9b\u5546\u7684\u4fe1\u4efb\u5173\u7cfb\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                    \u5b57\u6bb5 \u63cf\u8ff0 \u63d0\u4f9b\u5546\u540d\u79f0 \u663e\u793a\u5728\u767b\u5f55\u9875\u4e0a\uff0c\u662f\u8eab\u4efd\u63d0\u4f9b\u5546\u7684\u5165\u53e3 \u8ba4\u8bc1\u65b9\u5f0f \u5ba2\u6237\u7aef\u8eab\u4efd\u9a8c\u8bc1\u65b9\u6cd5\u3002\u5982\u679c JWT \u4f7f\u7528\u79c1\u94a5\u7b7e\u540d\uff0c\u8bf7\u4e0b\u62c9\u9009\u62e9 JWT signed with private key \u3002\u5177\u4f53\u53c2\u9605 Client Authentication\u3002 \u5ba2\u6237\u7aef ID \u5ba2\u6237\u7aef\u7684 ID \u5ba2\u6237\u7aef\u5bc6\u94a5 \u5ba2\u6237\u7aef\u5bc6\u7801 \u5ba2\u6237\u7aef URL \u53ef\u901a\u8fc7\u8eab\u4efd\u63d0\u4f9b\u5546 well-known \u63a5\u53e3\u4e00\u952e\u83b7\u53d6\u767b\u5f55 URL\u3001Token URL\u3001\u7528\u6237\u4fe1\u606f URL \u548c\u767b\u51fa URL \u81ea\u52a8\u5173\u8054 \u5f00\u542f\u540e\u5f53\u8eab\u4efd\u63d0\u4f9b\u5546\u7528\u6237\u540d/\u90ae\u7bb1\u4e0e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7528\u6237\u540d/\u90ae\u7bb1\u91cd\u590d\u65f6\u5c06\u81ea\u52a8\u4f7f\u4e8c\u8005\u5173\u8054

                                                  Note

                                                  1. \u5f53\u7528\u6237\u901a\u8fc7\u4f01\u4e1a\u7528\u6237\u7ba1\u7406\u7cfb\u7edf\u5b8c\u6210\u7b2c\u4e00\u6b21\u767b\u5f55\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u540e\uff0c\u7528\u6237\u4fe1\u606f\u624d\u4f1a\u88ab\u540c\u6b65\u81f3\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u7528\u6237\u5217\u8868 \u3002
                                                  2. \u521d\u6b21\u767b\u5f55\u7684\u7528\u6237\u4e0d\u4f1a\u88ab\u8d4b\u4e88\u4efb\u4f55\u9ed8\u8ba4\u6743\u9650\uff0c\u9700\u8981\u6709\u7ba1\u7406\u5458\u7ed9\u5176\u8d4b\u6743\uff08\u7ba1\u7406\u5458\u53ef\u4ee5\u662f\u5e73\u53f0\u7ba1\u7406\u5458\u3001\u5b50\u6a21\u5757\u7ba1\u7406\u5458\u6216\u8d44\u6e90\u7ba1\u7406\u5458\uff09\u3002
                                                  3. \u53c2\u8003 Azure OpenID Connect (OIDC) \u63a5\u5165\u6d41\u7a0b\u3002
                                                  "},{"location":"admin/ghippo/access-control/oidc.html#_1","title":"\u7528\u6237\u8eab\u4efd\u8ba4\u8bc1\u4ea4\u4e92\u6d41\u7a0b","text":"

                                                  \u7528\u6237\u8eab\u4efd\u8ba4\u8bc1\u7684\u4ea4\u4e92\u6d41\u7a0b\u4e3a\uff1a

                                                  1. \u4f7f\u7528\u6d4f\u89c8\u5668\u53d1\u8d77\u5355\u70b9\u767b\u5f55\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u8bf7\u6c42\u3002 1.\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u6839\u636e\u767b\u5f55\u94fe\u63a5\u4e2d\u643a\u5e26\u7684\u4fe1\u606f\uff0c\u67e5\u627e \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u8eab\u4efd\u63d0\u4f9b\u5546 \u4e2d\u5bf9\u5e94\u7684\u914d\u7f6e\u4fe1\u606f\uff0c \u6784\u5efa OIDC \u6388\u6743 Request\uff0c\u53d1\u9001\u7ed9\u6d4f\u89c8\u5668\u3002
                                                  2. \u6d4f\u89c8\u5668\u6536\u5230\u8bf7\u6c42\u540e\uff0c\u8f6c\u53d1 OIDC \u6388\u6743 Request \u7ed9\u4f01\u4e1a IdP\u3002
                                                  3. \u5728\u4f01\u4e1a IdP \u7684\u767b\u5f55\u9875\u9762\u4e2d\u8f93\u5165\u7528\u6237\u540d\u548c\u5bc6\u7801\uff0c\u4f01\u4e1a IdP \u5bf9\u63d0\u4f9b\u7684\u8eab\u4efd\u4fe1\u606f\u8fdb\u884c\u9a8c\u8bc1\uff0c\u5e76\u6784\u5efa\u643a\u5e26\u7528\u6237\u4fe1\u606f\u7684 ID Token\uff0c\u5411\u6d4f\u89c8\u5668\u53d1\u9001 OIDC \u6388\u6743 Response\u3002
                                                  4. \u6d4f\u89c8\u5668\u54cd\u5e94\u540e\u8f6c\u53d1 OIDC \u6388\u6743 Response \u7ed9 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u3002 1.\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4ece OIDC \u6388\u6743 Response \u4e2d\u53d6\u51fa ID Token\uff0c\u5e76\u6839\u636e\u5df2\u914d\u7f6e\u7684\u8eab\u4efd\u8f6c\u6362\u89c4\u5219\u6620\u5c04\u5230\u5177\u4f53\u7684\u7528\u6237\u5217\u8868\uff0c\u9881\u53d1 Token\u3002
                                                  5. \u5b8c\u6210\u5355\u70b9\u767b\u5f55\uff0c\u8bbf\u95ee \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u3002
                                                  "},{"location":"admin/ghippo/access-control/role.html","title":"\u89d2\u8272\u548c\u6743\u9650\u7ba1\u7406","text":"

                                                  \u4e00\u4e2a\u89d2\u8272\u5bf9\u5e94\u4e00\u7ec4\u6743\u9650\u3002\u6743\u9650\u51b3\u5b9a\u4e86\u53ef\u4ee5\u5bf9\u8d44\u6e90\u6267\u884c\u7684\u64cd\u4f5c\u3002\u5411\u7528\u6237\u6388\u4e88\u67d0\u89d2\u8272\uff0c\u5373\u6388\u4e88\u8be5\u89d2\u8272\u6240\u5305\u542b\u7684\u6240\u6709\u6743\u9650\u3002

                                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5b58\u5728\u4e09\u79cd\u89d2\u8272\u8303\u56f4\uff0c\u80fd\u591f\u7075\u6d3b\u3001\u6709\u6548\u5730\u89e3\u51b3\u60a8\u5728\u6743\u9650\u4e0a\u7684\u4f7f\u7528\u95ee\u9898\uff1a

                                                  • \u5e73\u53f0\u89d2\u8272
                                                  • \u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272
                                                  • \u6587\u4ef6\u5939\u89d2\u8272
                                                  "},{"location":"admin/ghippo/access-control/role.html#_2","title":"\u5e73\u53f0\u89d2\u8272","text":"

                                                  \u5e73\u53f0\u89d2\u8272\u662f\u7c97\u7c92\u5ea6\u6743\u9650\uff0c\u5bf9\u5e73\u53f0\u4e0a\u6240\u6709\u76f8\u5173\u8d44\u6e90\u5177\u6709\u76f8\u5e94\u6743\u9650\u3002\u901a\u8fc7\u5e73\u53f0\u89d2\u8272\u53ef\u4ee5\u8d4b\u4e88\u7528\u6237\u5bf9\u6240\u6709\u96c6\u7fa4\u3001\u6240\u6709\u5de5\u4f5c\u7a7a\u95f4\u7b49\u7684\u589e\u5220\u6539\u67e5\u6743\u9650\uff0c \u800c\u4e0d\u80fd\u5177\u4f53\u5230\u67d0\u4e00\u4e2a\u96c6\u7fa4\u6216\u67d0\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u3002\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u4e86 5 \u4e2a\u9884\u7f6e\u7684\u3001\u7528\u6237\u53ef\u76f4\u63a5\u4f7f\u7528\u7684\u5e73\u53f0\u89d2\u8272\uff1a

                                                  • Admin
                                                  • Kpanda Owner
                                                  • Workspace and Folder Owner
                                                  • IAM Owner
                                                  • Audit Owner

                                                  \u540c\u65f6\uff0c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8fd8\u652f\u6301\u7528\u6237\u521b\u5efa\u81ea\u5b9a\u4e49\u5e73\u53f0\u89d2\u8272\uff0c\u53ef\u6839\u636e\u9700\u8981\u81ea\u5b9a\u4e49\u89d2\u8272\u5185\u5bb9\u3002 \u5982\u521b\u5efa\u4e00\u4e2a\u5e73\u53f0\u89d2\u8272\uff0c\u5305\u542b\u5e94\u7528\u5de5\u4f5c\u53f0\u7684\u6240\u6709\u529f\u80fd\u6743\u9650\uff0c\u7531\u4e8e\u5e94\u7528\u5de5\u4f5c\u53f0\u4f9d\u8d56\u4e8e\u5de5\u4f5c\u7a7a\u95f4\uff0c \u56e0\u6b64\u5e73\u53f0\u4f1a\u5e2e\u52a9\u7528\u6237\u9ed8\u8ba4\u52fe\u9009\u5de5\u4f5c\u7a7a\u95f4\u7684\u67e5\u770b\u6743\u9650\uff0c\u8bf7\u4e0d\u8981\u624b\u52a8\u53d6\u6d88\u52fe\u9009\u3002 \u82e5\u7528\u6237 A \u88ab\u6388\u4e88\u8be5 Workbench\uff08\u5e94\u7528\u5de5\u4f5c\u53f0\uff09\u89d2\u8272\uff0c\u5c06\u81ea\u52a8\u62e5\u6709\u6240\u6709\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u7684\u5e94\u7528\u5de5\u4f5c\u53f0\u76f8\u5173\u529f\u80fd\u7684\u589e\u5220\u6539\u67e5\u7b49\u6743\u9650\u3002

                                                  "},{"location":"admin/ghippo/access-control/role.html#_3","title":"\u5e73\u53f0\u89d2\u8272\u6388\u6743\u65b9\u5f0f","text":"

                                                  \u7ed9\u5e73\u53f0\u89d2\u8272\u6388\u6743\u5171\u6709\u4e09\u79cd\u65b9\u5f0f\uff1a

                                                  • \u5728 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u7528\u6237 \u7684\u7528\u6237\u5217\u8868\u4e2d\uff0c\u627e\u5230\u8be5\u7528\u6237\uff0c\u70b9\u51fb ... \uff0c\u9009\u62e9 \u6388\u6743 \uff0c\u4e3a\u8be5\u7528\u6237\u8d4b\u4e88\u5e73\u53f0\u89d2\u8272\u6743\u9650\u3002

                                                  • \u5728 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u7528\u6237\u7ec4 \u7684\u7528\u6237\u7ec4\u5217\u8868\u4e2d\u521b\u5efa\u7528\u6237\u7ec4\uff0c\u5c06\u8be5\u7528\u6237\u52a0\u5165\u7528\u6237\u7ec4\uff0c\u5e76\u7ed9\u7528\u6237\u7ec4\u6388\u6743 \uff08\u5177\u4f53\u64cd\u4f5c\u4e3a\uff1a\u5728\u7528\u6237\u7ec4\u5217\u8868\u627e\u5230\u8be5\u7528\u6237\u7ec4\uff0c\u70b9\u51fb ... \uff0c\u9009\u62e9 \u6388\u6743 \uff0c\u4e3a\u8be5\u7528\u6237\u7ec4\u8d4b\u4e88\u5e73\u53f0\u89d2\u8272\uff09\u3002

                                                  • \u5728 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u89d2\u8272 \u7684\u89d2\u8272\u5217\u8868\u4e2d\uff0c\u627e\u5230\u76f8\u5e94\u7684\u5e73\u53f0\u89d2\u8272\uff0c \u70b9\u51fb\u89d2\u8272\u540d\u79f0\u8fdb\u5165\u8be6\u60c5\uff0c\u70b9\u51fb \u5173\u8054\u6210\u5458 \u6309\u94ae\uff0c\u9009\u4e2d\u8be5\u7528\u6237\u6216\u7528\u6237\u6240\u5728\u7684\u7528\u6237\u7ec4\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                  "},{"location":"admin/ghippo/access-control/role.html#_4","title":"\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272","text":"

                                                  \u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u662f\u7ec6\u7c92\u5ea6\u89d2\u8272\uff0c\u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u53ef\u4ee5\u8d4b\u4e88\u7528\u6237\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u7684\u7ba1\u7406\u6743\u9650\u3001\u67e5\u770b\u6743\u9650\u6216\u8be5\u5de5\u4f5c\u7a7a\u95f4\u5e94\u7528\u5de5\u4f5c\u53f0\u76f8\u5173\u7684\u6743\u9650\u7b49\u3002 \u83b7\u5f97\u8be5\u89d2\u8272\u6743\u9650\u7684\u7528\u6237\u53ea\u80fd\u7ba1\u7406\u8be5\u5de5\u4f5c\u7a7a\u95f4\uff0c\u800c\u65e0\u6cd5\u8bbf\u95ee\u5176\u4ed6\u5de5\u4f5c\u7a7a\u95f4\u3002\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u4e86 3 \u4e2a\u9884\u7f6e\u7684\u3001\u7528\u6237\u53ef\u76f4\u63a5\u4f7f\u7528\u7684\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\uff1a

                                                  • Workspace Admin
                                                  • Workspace Editor
                                                  • Workspace Viewer

                                                  \u540c\u65f6\uff0c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8fd8\u652f\u6301\u7528\u6237\u521b\u5efa\u81ea\u5b9a\u4e49\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\uff0c\u53ef\u6839\u636e\u9700\u8981\u81ea\u5b9a\u4e49\u89d2\u8272\u5185\u5bb9\u3002\u5982\u521b\u5efa\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\uff0c \u5305\u542b\u5e94\u7528\u5de5\u4f5c\u53f0\u7684\u6240\u6709\u529f\u80fd\u6743\u9650\uff0c\u7531\u4e8e\u5e94\u7528\u5de5\u4f5c\u53f0\u4f9d\u8d56\u4e8e\u5de5\u4f5c\u7a7a\u95f4\uff0c\u56e0\u6b64\u5e73\u53f0\u4f1a\u5e2e\u52a9\u7528\u6237\u9ed8\u8ba4\u52fe\u9009\u5de5\u4f5c\u7a7a\u95f4\u7684\u67e5\u770b\u6743\u9650\uff0c \u8bf7\u4e0d\u8981\u624b\u52a8\u53d6\u6d88\u52fe\u9009\u3002\u82e5\u7528\u6237 A \u5728\u5de5\u4f5c\u7a7a\u95f4 01 \u4e2d\u88ab\u6388\u4e88\u8be5\u89d2\u8272\uff0c\u5c06\u62e5\u6709\u5de5\u4f5c\u7a7a\u95f4 01 \u4e0b\u7684\u5e94\u7528\u5de5\u4f5c\u53f0\u76f8\u5173\u529f\u80fd\u7684\u589e\u5220\u6539\u67e5\u6743\u9650\u3002

                                                  Note

                                                  \u4e0e\u5e73\u53f0\u89d2\u8272\u4e0d\u540c\uff0c\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u88ab\u521b\u5efa\u540e\u9700\u8981\u524d\u5f80\u5de5\u4f5c\u7a7a\u95f4\u4f7f\u7528\uff0c\u88ab\u6388\u6743\u540e\u7528\u6237\u4ec5\u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u62e5\u6709\u8be5\u89d2\u8272\u4e2d\u7684\u529f\u80fd\u6743\u9650\u3002

                                                  "},{"location":"admin/ghippo/access-control/role.html#_5","title":"\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u6388\u6743\u65b9\u5f0f","text":"

                                                  \u5728 \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u5217\u8868\u4e2d\uff0c\u627e\u5230\u8be5\u5de5\u4f5c\u7a7a\u95f4\uff0c\u70b9\u51fb \u6dfb\u52a0\u6388\u6743 \uff0c\u4e3a\u8be5\u7528\u6237\u8d4b\u4e88\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u6743\u9650\u3002

                                                  "},{"location":"admin/ghippo/access-control/role.html#_6","title":"\u6587\u4ef6\u5939\u89d2\u8272","text":"

                                                  \u6587\u4ef6\u5939\u89d2\u8272\u7684\u6743\u9650\u7c92\u5ea6\u4ecb\u4e8e\u5e73\u53f0\u89d2\u8272\u4e0e\u5de5\u4f5c\u7a7a\u95f4\u89d2\u8272\u4e4b\u95f4\uff0c\u901a\u8fc7\u6587\u4ef6\u5939\u89d2\u8272\u53ef\u4ee5\u8d4b\u4e88\u7528\u6237\u67d0\u4e2a\u6587\u4ef6\u5939\u53ca\u5176\u5b50\u6587\u4ef6\u5939\u548c\u8be5\u6587\u4ef6\u5939\u4e0b\u6240\u6709\u5de5\u4f5c\u7a7a\u95f4\u7684\u7ba1\u7406\u6743\u9650\u3001\u67e5\u770b\u6743\u9650\u7b49\uff0c \u5e38\u9002\u7528\u4e8e\u4f01\u4e1a\u4e2d\u7684\u90e8\u95e8\u573a\u666f\u3002\u6bd4\u5982\u7528\u6237 B \u662f\u4e00\u7ea7\u90e8\u95e8\u7684 Leader\uff0c\u901a\u5e38\u7528\u6237 B \u80fd\u591f\u7ba1\u7406\u8be5\u4e00\u7ea7\u90e8\u95e8\u3001\u5176\u4e0b\u7684\u6240\u6709\u4e8c\u7ea7\u90e8\u95e8\u548c\u90e8\u95e8\u4e2d\u7684\u9879\u76ee\u7b49\uff0c \u5728\u6b64\u573a\u666f\u4e2d\u7ed9\u7528\u6237 B \u6388\u4e88\u4e00\u7ea7\u6587\u4ef6\u5939\u7684\u7ba1\u7406\u5458\u6743\u9650\uff0c\u7528\u6237 B \u4e5f\u5c06\u62e5\u6709\u5176\u4e0b\u7684\u4e8c\u7ea7\u6587\u4ef6\u5939\u548c\u5de5\u4f5c\u7a7a\u95f4\u7684\u76f8\u5e94\u6743\u9650\u3002 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u4e86 3 \u4e2a\u9884\u7f6e\u7684\u3001\u7528\u6237\u53ef\u76f4\u63a5\u4f7f\u7528\u6587\u4ef6\u5939\u89d2\u8272\uff1a

                                                  • Folder Admin
                                                  • Folder Editor
                                                  • Folder Viewer

                                                  \u540c\u65f6\uff0c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8fd8\u652f\u6301\u7528\u6237\u521b\u5efa\u81ea\u5b9a\u4e49\u6587\u4ef6\u5939\u89d2\u8272\uff0c\u53ef\u6839\u636e\u9700\u8981\u81ea\u5b9a\u4e49\u89d2\u8272\u5185\u5bb9\u3002 \u5982\u521b\u5efa\u4e00\u4e2a\u6587\u4ef6\u5939\u89d2\u8272\uff0c\u5305\u542b\u5e94\u7528\u5de5\u4f5c\u53f0\u7684\u6240\u6709\u529f\u80fd\u6743\u9650\u3002\u82e5\u7528\u6237 A \u5728\u6587\u4ef6\u5939 01 \u4e2d\u88ab\u6388\u4e88\u8be5\u89d2\u8272\uff0c \u5c06\u62e5\u6709\u8be5\u6587\u4ef6\u5939\u4e0b\u6240\u6709\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u5e94\u7528\u5de5\u4f5c\u53f0\u76f8\u5173\u529f\u80fd\u7684\u589e\u5220\u6539\u67e5\u6743\u9650\u3002

                                                  Note

                                                  \u529f\u80fd\u6a21\u5757\u672c\u8eab\u4f9d\u8d56\u7684\u662f\u5de5\u4f5c\u7a7a\u95f4\uff0c\u6587\u4ef6\u5939\u662f\u5de5\u4f5c\u7a7a\u95f4\u4e0a\u7684\u8fdb\u4e00\u6b65\u5206\u7ec4\u673a\u5236\u4e14\u5177\u6709\u6743\u9650\u7ee7\u627f\u80fd\u529b\uff0c \u56e0\u6b64\u6587\u4ef6\u5939\u6743\u9650\u4e0d\u5149\u5305\u542b\u6587\u4ef6\u5939\u672c\u8eab\uff0c\u8fd8\u5305\u62ec\u5176\u4e0b\u7684\u5b50\u6587\u4ef6\u5939\u548c\u5de5\u4f5c\u7a7a\u95f4\u3002

                                                  "},{"location":"admin/ghippo/access-control/role.html#_7","title":"\u6587\u4ef6\u5939\u89d2\u8272\u6388\u6743\u65b9\u5f0f","text":"

                                                  \u5728 \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u5217\u8868\u4e2d\uff0c\u627e\u5230\u8be5\u6587\u4ef6\u5939\uff0c\u70b9\u51fb \u6dfb\u52a0\u6388\u6743 \uff0c\u4e3a\u8be5\u7528\u6237\u8d4b\u4e88\u6587\u4ef6\u5939\u89d2\u8272\u6743\u9650\u3002

                                                  "},{"location":"admin/ghippo/access-control/user.html","title":"\u7528\u6237","text":"

                                                  \u7528\u6237\u6307\u7684\u662f\u7531\u5e73\u53f0\u7ba1\u7406\u5458 admin \u6216\u8005\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u7ba1\u7406\u5458 IAM Owner \u5728 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u7528\u6237 \u9875\u9762\u521b\u5efa\u7684\u7528\u6237\uff0c\u6216\u8005\u901a\u8fc7 LDAP / OIDC \u5bf9\u63a5\u8fc7\u6765\u7684\u7528\u6237\u3002 \u7528\u6237\u540d\u4ee3\u8868\u8d26\u53f7\uff0c\u7528\u6237\u901a\u8fc7\u7528\u6237\u540d\u548c\u5bc6\u7801\u767b\u5f55\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u3002

                                                  \u62e5\u6709\u4e00\u4e2a\u7528\u6237\u8d26\u53f7\u662f\u7528\u6237\u8bbf\u95ee\u5e73\u53f0\u7684\u524d\u63d0\u3002\u65b0\u5efa\u7684\u7528\u6237\u9ed8\u8ba4\u6ca1\u6709\u4efb\u4f55\u6743\u9650\uff0c\u4f8b\u5982\u60a8\u9700\u8981\u7ed9\u7528\u6237\u8d4b\u4e88\u76f8\u5e94\u7684\u89d2\u8272\u6743\u9650\uff0c\u6bd4\u5982\u5728 \u7528\u6237\u5217\u8868 \u6216 \u7528\u6237\u8be6\u60c5 \u6388\u4e88\u5b50\u6a21\u5757\u7684\u7ba1\u7406\u5458\u6743\u9650\u3002 \u5b50\u6a21\u5757\u7ba1\u7406\u5458\u62e5\u6709\u8be5\u5b50\u6a21\u5757\u7684\u6700\u9ad8\u6743\u9650\uff0c\u80fd\u591f\u521b\u5efa\u3001\u7ba1\u7406\u3001\u5220\u9664\u8be5\u6a21\u5757\u7684\u6240\u6709\u8d44\u6e90\u3002 \u5982\u679c\u7528\u6237\u9700\u8981\u88ab\u6388\u4e88\u5177\u4f53\u8d44\u6e90\u7684\u6743\u9650\uff0c\u6bd4\u5982\u67d0\u4e2a\u8d44\u6e90\u7684\u4f7f\u7528\u6743\u9650\uff0c\u8bf7\u67e5\u770b\u8d44\u6e90\u6388\u6743\u8bf4\u660e\u3002

                                                  \u672c\u9875\u4ecb\u7ecd\u7528\u6237\u7684\u521b\u5efa\u3001\u6388\u6743\u3001\u7981\u7528\u3001\u542f\u7528\u3001\u5220\u9664\u7b49\u64cd\u4f5c\u3002

                                                  "},{"location":"admin/ghippo/access-control/user.html#_2","title":"\u521b\u5efa\u7528\u6237","text":"

                                                  \u524d\u63d0\uff1a\u62e5\u6709\u5e73\u53f0\u7ba1\u7406\u5458 Admin \u6743\u9650\u6216\u8005\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u7ba1\u7406\u5458 IAM Owner \u6743\u9650\u3002

                                                  1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u7528\u6237 \uff0c\u8fdb\u5165\u7528\u6237\u5217\u8868\uff0c\u70b9\u51fb\u53f3\u4e0a\u65b9\u7684 \u521b\u5efa\u7528\u6237 \u3002

                                                  2. \u5728 \u521b\u5efa\u7528\u6237 \u9875\u9762\u586b\u5199\u7528\u6237\u540d\u548c\u767b\u5f55\u5bc6\u7801\u3002\u5982\u9700\u4e00\u6b21\u6027\u521b\u5efa\u591a\u4e2a\u7528\u6237\uff0c\u53ef\u4ee5\u70b9\u51fb \u521b\u5efa\u7528\u6237 \u540e\u8fdb\u884c\u6279\u91cf\u521b\u5efa\uff0c\u4e00\u6b21\u6027\u6700\u591a\u521b\u5efa 5 \u4e2a\u7528\u6237\u3002\u6839\u636e\u60a8\u7684\u5b9e\u9645\u60c5\u51b5\u786e\u5b9a\u662f\u5426\u8bbe\u7f6e\u7528\u6237\u5728\u9996\u6b21\u767b\u5f55\u65f6\u91cd\u7f6e\u5bc6\u7801\u3002

                                                  3. \u70b9\u51fb \u786e\u5b9a \uff0c\u521b\u5efa\u7528\u6237\u6210\u529f\uff0c\u8fd4\u56de\u7528\u6237\u5217\u8868\u9875\u3002

                                                  Note

                                                  \u6b64\u5904\u8bbe\u7f6e\u7684\u7528\u6237\u540d\u548c\u5bc6\u7801\u5c06\u7528\u4e8e\u767b\u5f55\u5e73\u53f0\u3002

                                                  "},{"location":"admin/ghippo/access-control/user.html#grant-admin-permissions","title":"\u4e3a\u7528\u6237\u6388\u4e88\u5b50\u6a21\u5757\u7ba1\u7406\u5458\u6743\u9650","text":"

                                                  \u524d\u63d0\uff1a\u8be5\u7528\u6237\u5df2\u5b58\u5728\u3002

                                                  1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u7528\u6237 \uff0c\u8fdb\u5165\u7528\u6237\u5217\u8868\uff0c\u70b9\u51fb \u2507 -> \u6388\u6743 \u3002

                                                  2. \u5728 \u6388\u6743 \u9875\u9762\u52fe\u9009\u9700\u8981\u7684\u89d2\u8272\u6743\u9650\uff08\u53ef\u591a\u9009\uff09\u3002

                                                  3. \u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u4e3a\u7528\u6237\u7684\u6388\u6743\u3002

                                                  Note

                                                  \u5728\u7528\u6237\u5217\u8868\u4e2d\uff0c\u70b9\u51fb\u67d0\u4e2a\u7528\u6237\uff0c\u53ef\u4ee5\u8fdb\u5165\u7528\u6237\u8be6\u60c5\u9875\u9762\u3002

                                                  "},{"location":"admin/ghippo/access-control/user.html#_3","title":"\u5c06\u7528\u6237\u52a0\u5165\u7528\u6237\u7ec4","text":"
                                                  1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u7528\u6237 \uff0c\u8fdb\u5165\u7528\u6237\u5217\u8868\uff0c\u70b9\u51fb \u2507 -> \u52a0\u5165\u7528\u6237\u7ec4 \u3002

                                                  2. \u5728 \u52a0\u5165\u7528\u6237\u7ec4 \u9875\u9762\u52fe\u9009\u9700\u8981\u52a0\u5165\u7684\u7528\u6237\u7ec4\uff08\u53ef\u591a\u9009\uff09\u3002\u82e5\u6ca1\u6709\u53ef\u9009\u7684\u7528\u6237\u7ec4\uff0c\u70b9\u51fb \u521b\u5efa\u7528\u6237\u7ec4 \u521b\u5efa\u7528\u6237\u7ec4\uff0c\u518d\u8fd4\u56de\u8be5\u9875\u9762\u70b9\u51fb \u5237\u65b0 \u6309\u94ae\uff0c\u663e\u793a\u521a\u521b\u5efa\u7684\u7528\u6237\u7ec4\u3002

                                                  3. \u70b9\u51fb \u786e\u5b9a \u5c06\u7528\u6237\u52a0\u5165\u7528\u6237\u7ec4\u3002

                                                  Note

                                                  \u7528\u6237\u4f1a\u7ee7\u627f\u7528\u6237\u7ec4\u7684\u6743\u9650\uff0c\u53ef\u4ee5\u5728 \u7528\u6237\u8be6\u60c5 \u4e2d\u67e5\u770b\u8be5\u7528\u6237\u5df2\u52a0\u5165\u7684\u7528\u6237\u7ec4\u3002

                                                  "},{"location":"admin/ghippo/access-control/user.html#_4","title":"\u542f\u7528/\u7981\u7528\u7528\u6237","text":"

                                                  \u7981\u7528\u7528\u6237\u540e\uff0c\u8be5\u7528\u6237\u5c06\u65e0\u6cd5\u518d\u8bbf\u95ee\u5e73\u53f0\u3002\u4e0e\u5220\u9664\u7528\u6237\u4e0d\u540c\uff0c\u7981\u7528\u7684\u7528\u6237\u53ef\u4ee5\u6839\u636e\u9700\u8981\u518d\u6b21\u542f\u7528\uff0c\u5efa\u8bae\u5220\u9664\u7528\u6237\u524d\u5148\u7981\u7528\uff0c\u4ee5\u786e\u4fdd\u6ca1\u6709\u5173\u952e\u670d\u52a1\u5728\u4f7f\u7528\u8be5\u7528\u6237\u521b\u5efa\u7684\u5bc6\u94a5\u3002

                                                  1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u7528\u6237 \uff0c\u8fdb\u5165\u7528\u6237\u5217\u8868\uff0c\u70b9\u51fb\u4e00\u4e2a\u7528\u6237\u540d\u8fdb\u5165\u7528\u6237\u8be6\u60c5\u3002

                                                  2. \u70b9\u51fb\u53f3\u4e0a\u65b9\u7684 \u7f16\u8f91 \uff0c\u5173\u95ed\u72b6\u6001\u6309\u94ae\uff0c\u4f7f\u6309\u94ae\u7f6e\u7070\u4e14\u5904\u4e8e\u672a\u542f\u7528\u72b6\u6001\u3002

                                                  3. \u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u7981\u7528\u7528\u6237\u7684\u64cd\u4f5c\u3002

                                                  "},{"location":"admin/ghippo/access-control/user.html#_5","title":"\u5fd8\u8bb0\u5bc6\u7801","text":"

                                                  \u524d\u63d0\uff1a\u9700\u8981\u8bbe\u7f6e\u7528\u6237\u90ae\u7bb1\uff0c\u6709\u4e24\u79cd\u65b9\u5f0f\u53ef\u4ee5\u8bbe\u7f6e\u7528\u6237\u90ae\u7bb1\u3002

                                                  • \u7ba1\u7406\u5458\u5728\u8be5\u7528\u6237\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb \u7f16\u8f91 \uff0c\u5728\u5f39\u51fa\u6846\u8f93\u5165\u7528\u6237\u90ae\u7bb1\u5730\u5740\uff0c\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u90ae\u7bb1\u8bbe\u7f6e\u3002

                                                  • \u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u5165 \u4e2a\u4eba\u4e2d\u5fc3 \uff0c\u5728 \u5b89\u5168\u8bbe\u7f6e \u9875\u9762\u8bbe\u7f6e\u90ae\u7bb1\u5730\u5740\u3002

                                                  \u5982\u679c\u7528\u6237\u767b\u5f55\u65f6\u5fd8\u8bb0\u5bc6\u7801\uff0c\u8bf7\u53c2\u8003\u91cd\u7f6e\u5bc6\u7801\u3002

                                                  "},{"location":"admin/ghippo/access-control/user.html#_6","title":"\u5220\u9664\u7528\u6237","text":"

                                                  Warning

                                                  \u5220\u9664\u7528\u6237\u540e\uff0c\u8be5\u7528\u6237\u5c06\u65e0\u6cd5\u518d\u901a\u8fc7\u4efb\u4f55\u65b9\u5f0f\u8bbf\u95ee\u5e73\u53f0\u8d44\u6e90\uff0c\u8bf7\u8c28\u614e\u5220\u9664\u3002 \u5728\u5220\u9664\u7528\u6237\u4e4b\u524d\uff0c\u8bf7\u786e\u4fdd\u60a8\u7684\u5173\u952e\u7a0b\u5e8f\u4e0d\u518d\u4f7f\u7528\u8be5\u7528\u6237\u521b\u5efa\u7684\u5bc6\u94a5\u3002 \u5982\u679c\u60a8\u4e0d\u786e\u5b9a\uff0c\u5efa\u8bae\u5728\u5220\u9664\u524d\u5148\u7981\u7528\u8be5\u7528\u6237\u3002 \u5982\u679c\u60a8\u5220\u9664\u4e86\u4e00\u4e2a\u7528\u6237\uff0c\u7136\u540e\u518d\u521b\u5efa\u4e00\u4e2a\u540c\u540d\u7684\u65b0\u7528\u6237\uff0c\u5219\u65b0\u7528\u6237\u5c06\u88ab\u89c6\u4e3a\u4e00\u4e2a\u65b0\u7684\u72ec\u7acb\u8eab\u4efd\uff0c\u5b83\u4e0d\u4f1a\u7ee7\u627f\u5df2\u5220\u9664\u7528\u6237\u7684\u89d2\u8272\u3002

                                                  1. \u7ba1\u7406\u5458\u8fdb\u5165 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \uff0c\u9009\u62e9 \u7528\u6237 \uff0c\u8fdb\u5165\u7528\u6237\u5217\u8868\uff0c\u70b9\u51fb \u2507 -> \u5220\u9664 \u3002

                                                  2. \u70b9\u51fb \u79fb\u9664 \u5b8c\u6210\u5220\u9664\u7528\u6237\u7684\u64cd\u4f5c\u3002

                                                  "},{"location":"admin/ghippo/access-control/webhook.html","title":"Webhook \u6d88\u606f\u901a\u77e5","text":"

                                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5728\u63a5\u5165\u5ba2\u6237\u7684\u7cfb\u7edf\u540e\uff0c\u53ef\u4ee5\u521b\u5efa Webhook\uff0c\u5728\u7528\u6237\u521b\u5efa/\u66f4\u65b0/\u5220\u9664/\u767b\u5f55/\u767b\u51fa\u4e4b\u65f6\u53d1\u9001\u6d88\u606f\u901a\u77e5\u3002

                                                  Webhook \u662f\u4e00\u79cd\u7528\u4e8e\u5b9e\u73b0\u5b9e\u65f6\u4e8b\u4ef6\u901a\u77e5\u7684\u673a\u5236\u3002\u5b83\u5141\u8bb8\u4e00\u4e2a\u5e94\u7528\u7a0b\u5e8f\u5c06\u6570\u636e\u6216\u4e8b\u4ef6\u63a8\u9001\u5230\u53e6\u4e00\u4e2a\u5e94\u7528\u7a0b\u5e8f\uff0c \u800c\u65e0\u9700\u8f6e\u8be2\u6216\u6301\u7eed\u67e5\u8be2\u3002\u901a\u8fc7\u914d\u7f6e Webhook\uff0c\u60a8\u53ef\u4ee5\u6307\u5b9a\u5728\u67d0\u4e2a\u4e8b\u4ef6\u53d1\u751f\u65f6\uff0c\u7531\u76ee\u6807\u5e94\u7528\u7a0b\u5e8f\u63a5\u6536\u5e76\u5904\u7406\u901a\u77e5\u3002

                                                  Webhook \u7684\u5de5\u4f5c\u539f\u7406\u5982\u4e0b\uff1a

                                                  1. \u6e90\u5e94\u7528\u7a0b\u5e8f\uff08\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\uff09\u6267\u884c\u67d0\u4e2a\u7279\u5b9a\u64cd\u4f5c\u6216\u4e8b\u4ef6\u3002
                                                  2. \u6e90\u5e94\u7528\u7a0b\u5e8f\u5c06\u76f8\u5173\u6570\u636e\u548c\u4fe1\u606f\u6253\u5305\u6210 HTTP \u8bf7\u6c42\uff0c\u5e76\u5c06\u5176\u53d1\u9001\u5230\u76ee\u6807\u5e94\u7528\u7a0b\u5e8f\u6307\u5b9a\u7684 URL\uff08\u4f8b\u5982\u4f01\u4e1a\u5fae\u4fe1\u7fa4\u673a\u5668\u4eba\uff09\u3002
                                                  3. \u76ee\u6807\u5e94\u7528\u7a0b\u5e8f\u63a5\u6536\u5230\u8bf7\u6c42\u540e\uff0c\u6839\u636e\u5176\u4e2d\u7684\u6570\u636e\u548c\u4fe1\u606f\u8fdb\u884c\u76f8\u5e94\u7684\u5904\u7406\u3002

                                                  \u901a\u8fc7\u4f7f\u7528 Webhook\uff0c\u60a8\u53ef\u4ee5\u5b9e\u73b0\u4ee5\u4e0b\u529f\u80fd\uff1a

                                                  • \u5b9e\u65f6\u901a\u77e5\uff1a\u5f53\u67d0\u4e2a\u7279\u5b9a\u4e8b\u4ef6\u53d1\u751f\u65f6\uff0c\u901a\u8fc7 Webhook \u53ca\u65f6\u901a\u77e5\u5176\u4ed6\u5e94\u7528\u7a0b\u5e8f\u3002
                                                  • \u81ea\u52a8\u5316\u5904\u7406\uff1a\u76ee\u6807\u5e94\u7528\u7a0b\u5e8f\u53ef\u4ee5\u6839\u636e\u6536\u5230\u7684 Webhook \u8bf7\u6c42\u81ea\u52a8\u89e6\u53d1\u4e8b\u5148\u5b9a\u4e49\u597d\u7684\u64cd\u4f5c\uff0c\u65e0\u9700\u624b\u52a8\u5e72\u9884\u3002
                                                  • \u6570\u636e\u540c\u6b65\uff1a\u901a\u8fc7 Webhook \u5c06\u6570\u636e\u4ece\u4e00\u4e2a\u5e94\u7528\u7a0b\u5e8f\u4f20\u9012\u5230\u53e6\u4e00\u4e2a\u5e94\u7528\u7a0b\u5e8f\uff0c\u5b9e\u73b0\u6570\u636e\u7684\u540c\u6b65\u66f4\u65b0\u3002

                                                  \u5e38\u89c1\u7684\u5e94\u7528\u573a\u666f\u5305\u62ec\uff1a

                                                  • \u7248\u672c\u63a7\u5236\u7cfb\u7edf\uff08\u4f8b\u5982 GitHub\u3001GitLab\uff09\u4e2d\uff0c\u5f53\u4ee3\u7801\u4ed3\u5e93\u53d1\u751f\u53d8\u52a8\u65f6\uff0c\u81ea\u52a8\u89e6\u53d1\u6784\u5efa\u548c\u90e8\u7f72\u64cd\u4f5c\u3002
                                                  • \u7535\u5b50\u5546\u52a1\u5e73\u53f0\u4e2d\uff0c\u5f53\u8ba2\u5355\u72b6\u6001\u53d1\u751f\u53d8\u5316\u65f6\uff0c\u5411\u7269\u6d41\u7cfb\u7edf\u53d1\u9001\u66f4\u65b0\u901a\u77e5\u3002
                                                  • \u804a\u5929\u673a\u5668\u4eba\u5e73\u53f0\u4e2d\uff0c\u5f53\u63a5\u6536\u5230\u7528\u6237\u6d88\u606f\u65f6\uff0c\u901a\u8fc7 Webhook \u5c06\u6d88\u606f\u63a8\u9001\u5230\u76ee\u6807\u670d\u52a1\u5668\u8fdb\u884c\u5904\u7406\u3002
                                                  "},{"location":"admin/ghippo/access-control/webhook.html#_1","title":"\u914d\u7f6e\u6b65\u9aa4","text":"

                                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u56fe\u5f62\u5316\u914d\u7f6e Webhook \u7684\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                  1. \u5728 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u63a5\u5165\u7ba1\u7406 \uff0c\u521b\u5efa\u4e00\u4e2a\u5ba2\u6237\u7aef ID\u3002

                                                  2. \u70b9\u51fb\u67d0\u4e2a\u5ba2\u6237\u7aef ID\uff0c\u8fdb\u5165\u8be6\u60c5\u9875\uff0c\u70b9\u51fb \u521b\u5efa Webhook \u6309\u94ae\u3002

                                                  3. \u5728\u5f39\u7a97\u4e2d\u586b\u5165\u5b57\u6bb5\u4fe1\u606f\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                    • \u5bf9\u8c61\uff1a\u76ee\u524d\u4ec5\u652f\u6301 \u7528\u6237 \u5bf9\u8c61
                                                    • \u884c\u4e3a\uff1a\u7528\u6237\u521b\u5efa/\u66f4\u65b0/\u5220\u9664/\u767b\u5f55/\u767b\u5f55\u65f6\u53d1\u9001 Webhook \u6d88\u606f
                                                    • URL\uff1a\u63a5\u6536\u6d88\u606f\u7684\u5730\u5740
                                                    • Method\uff1a\u89c6\u60c5\u51b5\u9009\u62e9\u9002\u7528\u7684\u65b9\u6cd5\uff0c\u4f8b\u5982\u4f01\u4e1a\u5fae\u4fe1\u63a8\u8350\u4f7f\u7528 POST \u65b9\u6cd5
                                                    • \u9ad8\u7ea7\u914d\u7f6e\uff1a\u53ef\u4ee5\u7528 Json \u7f16\u5199\u6d88\u606f\u4f53\u3002\u5982\u679c\u662f\u4f01\u4e1a\u5fae\u4fe1\u7fa4\uff0c\u8bf7\u53c2\u9605\u7fa4\u673a\u5668\u4eba\u914d\u7f6e\u8bf4\u660e

                                                  4. \u5c4f\u5e55\u63d0\u793a Webhook \u521b\u5efa\u6210\u529f\u3002

                                                  5. \u73b0\u5728\u53bb\u8bd5\u7740\u521b\u5efa\u4e00\u4e2a\u7528\u6237\u3002

                                                  6. \u7528\u6237\u521b\u5efa\u6210\u529f\uff0c\u53ef\u4ee5\u770b\u5230\u4f01\u4e1a\u5fae\u4fe1\u7fa4\u6536\u5230\u4e86\u4e00\u6761\u6d88\u606f\u3002

                                                  "},{"location":"admin/ghippo/access-control/webhook.html#_2","title":"\u9ad8\u7ea7\u914d\u7f6e\u793a\u4f8b","text":"

                                                  \u7cfb\u7edf\u9ed8\u8ba4\u7684\u6d88\u606f\u4f53

                                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9884\u5148\u5b9a\u4e49\u4e86\u4e00\u4e9b\u53d8\u91cf\uff0c\u60a8\u53ef\u4ee5\u6839\u636e\u81ea\u5df1\u60c5\u51b5\u5728\u6d88\u606f\u4f53\u4e2d\u4f7f\u7528\u8fd9\u4e9b\u53d8\u91cf\u3002

                                                  {\n  \"id\": \"{{$$.ID$$}}\",\n  \"email\": \"{{$$.Email$$}}\",\n  \"username\": \"{{$$.Name$$}}\",\n  \"last_name\": \"{{$$.LastName$$}}\",\n  \"first_name\": \"{{$$.FirstName$$}}\",\n  \"created_at\": \"{{$$.CreatedAt$$}}\",\n  \"enabled\": \"{{$$.Enabled$$}}\"\n}\n

                                                  \u4f01\u4e1a\u5fae\u4fe1\u7fa4\u673a\u5668\u4eba\u7684 Message Body

                                                  {\n    \"msgtype\": \"text\",\n    \"text\": {\n      \"content\": \"{{$$.Name$$}} hello world\"\n    }\n}\n
                                                  "},{"location":"admin/ghippo/access-control/webhook.html#_3","title":"\u53c2\u8003\u6587\u6863","text":"
                                                  • OEM OUT \u6587\u6863
                                                  • OEM IN \u6587\u6863
                                                  "},{"location":"admin/ghippo/audit/audit-log.html","title":"\u5ba1\u8ba1\u65e5\u5fd7","text":"

                                                  \u5ba1\u8ba1\u65e5\u5fd7\u5e2e\u52a9\u60a8\u76d1\u63a7\u5e76\u8bb0\u5f55\u6bcf\u4e2a\u7528\u6237\u7684\u6d3b\u52a8\uff0c\u63d0\u4f9b\u4e86\u4e0e\u5b89\u5168\u76f8\u5173\u7684\u3001\u6309\u65f6\u95f4\u987a\u5e8f\u6392\u5217\u7684\u8bb0\u5f55\u7684\u6536\u96c6\u3001\u5b58\u50a8\u548c\u67e5\u8be2\u529f\u80fd\u3002 \u501f\u52a9\u5ba1\u8ba1\u65e5\u5fd7\u670d\u52a1\uff0c\u60a8\u53ef\u4ee5\u6301\u7eed\u76d1\u63a7\u5e76\u4fdd\u7559\u7528\u6237\u5728\u5168\u5c40\u7ba1\u7406\u6a21\u5757\u7684\u4f7f\u7528\u884c\u4e3a\uff0c\u5305\u62ec\u4f46\u4e0d\u9650\u4e8e\u521b\u5efa\u7528\u6237\u3001\u7528\u6237\u767b\u5f55/\u767b\u51fa\u3001\u7528\u6237\u6388\u6743\u4ee5\u53ca\u4e0e Kubernetes \u76f8\u5173\u7684\u7528\u6237\u64cd\u4f5c\u884c\u4e3a\u3002

                                                  "},{"location":"admin/ghippo/audit/audit-log.html#_2","title":"\u529f\u80fd\u7279\u6027","text":"

                                                  \u5ba1\u8ba1\u65e5\u5fd7\u529f\u80fd\u5177\u6709\u4ee5\u4e0b\u7279\u70b9\uff1a

                                                  • \u5f00\u7bb1\u5373\u7528\uff1a\u5728\u5b89\u88c5\u4f7f\u7528\u8be5\u5e73\u53f0\u65f6\uff0c\u5ba1\u8ba1\u65e5\u5fd7\u529f\u80fd\u5c06\u4f1a\u88ab\u9ed8\u8ba4\u542f\u7528\uff0c\u81ea\u52a8\u8bb0\u5f55\u4e0e\u7528\u6237\u76f8\u5173\u7684\u5404\u79cd\u884c\u4e3a\uff0c \u5982\u521b\u5efa\u7528\u6237\u3001\u6388\u6743\u3001\u767b\u5f55/\u767b\u51fa\u7b49\u3002\u9ed8\u8ba4\u53ef\u4ee5\u5728\u5e73\u53f0\u5185\u67e5\u770b 365 \u5929\u7684\u7528\u6237\u884c\u4e3a\u3002
                                                  • \u5b89\u5168\u5206\u6790\uff1a\u5ba1\u8ba1\u65e5\u5fd7\u4f1a\u5bf9\u7528\u6237\u64cd\u4f5c\u8fdb\u884c\u8be6\u7ec6\u7684\u8bb0\u5f55\u5e76\u63d0\u4f9b\u5bfc\u51fa\u529f\u80fd\uff0c\u901a\u8fc7\u8fd9\u4e9b\u4e8b\u4ef6\u60a8\u53ef\u4ee5\u5224\u65ad\u8d26\u53f7\u662f\u5426\u5b58\u5728\u98ce\u9669\u3002
                                                  • \u5b9e\u65f6\u8bb0\u5f55\uff1a\u8fc5\u901f\u6536\u96c6\u64cd\u4f5c\u4e8b\u4ef6\uff0c\u7528\u6237\u64cd\u4f5c\u540e\u53ef\u5728\u5ba1\u8ba1\u65e5\u5fd7\u5217\u8868\u8fdb\u884c\u8ffd\u6eaf\uff0c\u968f\u65f6\u53d1\u73b0\u53ef\u7591\u884c\u4e3a\u3002
                                                  • \u65b9\u4fbf\u53ef\u9760\uff1a\u5ba1\u8ba1\u65e5\u5fd7\u652f\u6301\u624b\u52a8\u6e05\u7406\u548c\u81ea\u52a8\u6e05\u7406\u4e24\u79cd\u65b9\u5f0f\uff0c\u53ef\u6839\u636e\u60a8\u7684\u5b58\u50a8\u5927\u5c0f\u914d\u7f6e\u6e05\u7406\u7b56\u7565\u3002
                                                  "},{"location":"admin/ghippo/audit/audit-log.html#_3","title":"\u67e5\u770b\u5ba1\u8ba1\u65e5\u5fd7","text":"
                                                  1. \u4f7f\u7528\u5177\u6709 admin \u6216 Audit Owner \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002

                                                  2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\uff0c\u70b9\u51fb \u5168\u5c40\u7ba1\u7406 -> \u5ba1\u8ba1\u65e5\u5fd7 \u3002

                                                  "},{"location":"admin/ghippo/audit/audit-log.html#_4","title":"\u7528\u6237\u64cd\u4f5c","text":"

                                                  \u5728 \u7528\u6237\u64cd\u4f5c \u9875\u7b7e\u4e2d\uff0c\u53ef\u4ee5\u6309\u65f6\u95f4\u8303\u56f4\uff0c\u4e5f\u53ef\u4ee5\u901a\u8fc7\u6a21\u7cca\u641c\u7d22\u3001\u7cbe\u786e\u641c\u7d22\u6765\u67e5\u627e\u7528\u6237\u64cd\u4f5c\u4e8b\u4ef6\u3002

                                                  \u70b9\u51fb\u67d0\u4e2a\u4e8b\u4ef6\u6700\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u67e5\u770b\u4e8b\u4ef6\u8be6\u60c5\u3002

                                                  \u4e8b\u4ef6\u8be6\u60c5\u5982\u4e0b\u56fe\u6240\u793a\u3002

                                                  \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u5bfc\u51fa \u6309\u94ae\uff0c\u53ef\u4ee5\u6309 CSV \u548c Excel \u683c\u5f0f\u5bfc\u51fa\u5f53\u524d\u6240\u9009\u65f6\u95f4\u8303\u56f4\u5185\u7684\u7528\u6237\u64cd\u4f5c\u65e5\u5fd7\u3002

                                                  "},{"location":"admin/ghippo/audit/audit-log.html#_5","title":"\u7cfb\u7edf\u64cd\u4f5c","text":"

                                                  \u5728 \u7cfb\u7edf\u64cd\u4f5c \u9875\u7b7e\u4e2d\uff0c\u53ef\u4ee5\u6309\u65f6\u95f4\u8303\u56f4\uff0c\u4e5f\u53ef\u4ee5\u901a\u8fc7\u6a21\u7cca\u641c\u7d22\u3001\u7cbe\u786e\u641c\u7d22\u6765\u67e5\u627e\u7cfb\u7edf\u64cd\u4f5c\u4e8b\u4ef6\u3002

                                                  \u540c\u6837\u70b9\u51fb\u67d0\u4e2a\u4e8b\u4ef6\u6700\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u67e5\u770b\u4e8b\u4ef6\u8be6\u60c5\u3002

                                                  \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u5bfc\u51fa \u6309\u94ae\uff0c\u53ef\u4ee5\u6309 CSV \u548c Excel \u683c\u5f0f\u5bfc\u51fa\u5f53\u524d\u6240\u9009\u65f6\u95f4\u8303\u56f4\u5185\u7684\u7cfb\u7edf\u64cd\u4f5c\u65e5\u5fd7\u3002

                                                  "},{"location":"admin/ghippo/audit/audit-log.html#_6","title":"\u8bbe\u7f6e","text":"

                                                  \u5728 \u8bbe\u7f6e \u9875\u7b7e\u4e2d\uff0c\u60a8\u53ef\u4ee5\u6e05\u7406\u7528\u6237\u64cd\u4f5c\u548c\u7cfb\u7edf\u64cd\u4f5c\u7684\u5ba1\u8ba1\u65e5\u5fd7\u3002

                                                  \u53ef\u4ee5\u624b\u52a8\u6e05\u7406\uff0c\u5efa\u8bae\u6e05\u7406\u524d\u5148\u5bfc\u51fa\u5e76\u4fdd\u5b58\u3002\u4e5f\u53ef\u4ee5\u8bbe\u7f6e\u65e5\u5fd7\u7684\u6700\u957f\u4fdd\u5b58\u65f6\u95f4\u5b9e\u73b0\u81ea\u52a8\u6e05\u7406\u3002

                                                  Note

                                                  \u5ba1\u8ba1\u65e5\u5fd7\u4e2d\u4e0e Kubernetes \u76f8\u5173\u7684\u65e5\u5fd7\u8bb0\u5f55\u7531\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u63d0\u4f9b\uff0c\u4e3a\u51cf\u8f7b\u5ba1\u8ba1\u65e5\u5fd7\u7684\u5b58\u50a8\u538b\u529b\uff0c\u5168\u5c40\u7ba1\u7406\u9ed8\u8ba4\u4e0d\u91c7\u96c6 Kubernetes \u76f8\u5173\u65e5\u5fd7\u3002 \u5982\u9700\u8bb0\u5f55\u8bf7\u53c2\u9605\u5f00\u542f K8s \u5ba1\u8ba1\u65e5\u5fd7\u3002\u5f00\u542f\u540e\u7684\u6e05\u7406\u529f\u80fd\u4e0e\u5168\u5c40\u7ba1\u7406\u7684\u6e05\u7406\u529f\u80fd\u4e00\u81f4\uff0c\u4f46\u4e92\u4e0d\u5f71\u54cd\u3002

                                                  "},{"location":"admin/ghippo/audit/open-audit.html","title":"\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7","text":"
                                                  • \u751f\u6210 K8s \u5ba1\u8ba1\u65e5\u5fd7\uff1aK8s \u672c\u8eab\u751f\u6210\u7684\u5ba1\u8ba1\u65e5\u5fd7\uff0c\u5f00\u542f\u8be5\u529f\u80fd\u540e\uff0c\u4f1a\u5728\u6307\u5b9a\u76ee\u5f55\u4e0b\u751f\u6210 K8s \u5ba1\u8ba1\u65e5\u5fd7\u7684\u65e5\u5fd7\u6587\u4ef6
                                                  • \u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\uff1a\u901a\u8fc7 insight-agent \u91c7\u96c6\u4e0a\u8ff0 \u2018K8s \u5ba1\u8ba1\u65e5\u5fd7\u2019\u7684\u65e5\u5fd7\u6587\u4ef6\uff0c\u2019\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u2018 \u7684\u524d\u63d0\u6761\u4ef6\u662f\uff1a
                                                    • \u96c6\u7fa4\u751f\u6210\u4e86 \u2018K8s \u5ba1\u8ba1\u65e5\u5fd7\u2018
                                                    • \u65e5\u5fd7\u8f93\u51fa\u5f00\u5173\u5df2\u6253\u5f00
                                                    • \u65e5\u5fd7\u91c7\u96c6\u5f00\u5173\u5df2\u6253\u5f00
                                                  "},{"location":"admin/ghippo/audit/open-audit.html#ai","title":"\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5b89\u88c5\u5b8c\u6210\u65f6\u72b6\u6001","text":"
                                                  • \u7ba1\u7406\u96c6\u7fa4\u7684 K8s \u5ba1\u8ba1\u65e5\u5fd7\u5f00\u5173\u9ed8\u8ba4\u5f00\u542f
                                                  • \u7ba1\u7406\u96c6\u7fa4\u7684\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u5f00\u5173\u9ed8\u8ba4\u5173\u95ed
                                                    • \u9ed8\u8ba4\u8bbe\u7f6e\u4e0d\u652f\u6301\u914d\u7f6e
                                                  "},{"location":"admin/ghippo/audit/open-audit.html#k8s_1","title":"\u7ba1\u7406\u96c6\u7fa4\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u5f00\u5173","text":""},{"location":"admin/ghippo/audit/open-audit.html#k8s_2","title":"\u786e\u8ba4\u662f\u5426\u5f00\u542f\u4e86 K8s \u5ba1\u8ba1\u65e5\u5fd7","text":"

                                                  \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b /var/log/kubernetes/audit \u76ee\u5f55\u4e0b\u662f\u5426\u6709\u5ba1\u8ba1\u65e5\u5fd7\u751f\u6210\u3002 \u82e5\u6709\uff0c\u5219\u8868\u793a K8s \u5ba1\u8ba1\u65e5\u5fd7\u6210\u529f\u5f00\u542f\u3002

                                                  ls /var/log/kubernetes/audit\n

                                                  \u82e5\u672a\u5f00\u542f\uff0c\u8bf7\u53c2\u8003\u751f\u6210 K8s \u5ba1\u8ba1\u65e5\u5fd7\u3002

                                                  "},{"location":"admin/ghippo/audit/open-audit.html#k8s_3","title":"\u5f00\u542f\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u6d41\u7a0b","text":"
                                                  1. \u6dfb\u52a0 chartmuseum \u5230 helm repo \u4e2d

                                                    helm repo add chartmuseum http://10.5.14.30:8081\n

                                                    \u8fd9\u6761\u547d\u4ee4\u4e2d\u7684 IP \u9700\u8981\u4fee\u6539\u4e3a\u706b\u79cd\u8282\u70b9\u7684 IP \u5730\u5740\u3002

                                                    Note

                                                    \u4f7f\u7528\u81ea\u5efa Harbor \u4ed3\u5e93\u7684\u60c5\u51b5\u4e0b\uff0c\u8bf7\u4fee\u6539\u7b2c\u4e00\u6b65\u4e2d\u7684 chart repo \u5730\u5740\u4e3a\u81ea\u5efa\u4ed3\u5e93\u7684 insight-agent chart \u5730\u5740\u3002

                                                  2. \u4fdd\u5b58\u5f53\u524d insight-agent helm value

                                                    helm get values insight-agent -n insight-system -o yaml > insight-agent-values-bak.yaml\n
                                                  3. \u83b7\u53d6\u5f53\u524d\u7248\u672c\u53f7 ${insight_version_code}

                                                    insight_version_code=`helm list -n insight-system |grep insight-agent | awk {'print $10'}`\n
                                                  4. \u66f4\u65b0 helm value \u914d\u7f6e

                                                    helm upgrade --install --create-namespace --version ${insight_version_code} --cleanup-on-fail insight-agent chartmuseum/insight-agent -n insight-system -f insight-agent-values-bak.yaml --set global.exporters.auditLog.kubeAudit.enabled=true\n
                                                  5. \u91cd\u542f insight-system \u4e0b\u7684\u6240\u6709 fluentBit pod

                                                    fluent_pod=`kubectl get pod -n insight-system | grep insight-agent-fluent-bit | awk {'print $1'} | xargs`\nkubectl delete pod ${fluent_pod} -n insight-system\n
                                                  "},{"location":"admin/ghippo/audit/open-audit.html#k8s_4","title":"\u5173\u95ed\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7","text":"

                                                  \u5176\u4f59\u6b65\u9aa4\u548c\u5f00\u542f\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u4e00\u81f4\uff0c\u4ec5\u9700\u4fee\u6539\u4e0a\u4e00\u8282\u4e2d\u7b2c 4 \u6b65\uff1a\u66f4\u65b0 helm value \u914d\u7f6e\u3002

                                                  helm upgrade --install --create-namespace --version ${insight_version_code} --cleanup-on-fail insight-agent chartmuseum/insight-agent -n insight-system -f insight-agent-values-bak.yaml --set global.exporters.auditLog.kubeAudit.enabled=false\n
                                                  "},{"location":"admin/ghippo/audit/open-audit.html#_1","title":"\u5de5\u4f5c\u96c6\u7fa4\u5f00\u5173","text":"

                                                  \u5404\u5de5\u4f5c\u96c6\u7fa4\u5f00\u5173\u72ec\u7acb\uff0c\u6309\u9700\u5f00\u542f\u3002

                                                  "},{"location":"admin/ghippo/audit/open-audit.html#_2","title":"\u521b\u5efa\u96c6\u7fa4\u65f6\u6253\u5f00\u91c7\u96c6\u5ba1\u8ba1\u65e5\u5fd7\u6b65\u9aa4","text":"

                                                  \u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u529f\u80fd\u9ed8\u8ba4\u4e3a\u5173\u95ed\u72b6\u6001\u3002\u82e5\u9700\u8981\u5f00\u542f\uff0c\u53ef\u4ee5\u6309\u7167\u5982\u4e0b\u6b65\u9aa4\uff1a

                                                  \u5c06\u8be5\u6309\u94ae\u8bbe\u7f6e\u4e3a\u542f\u7528\u72b6\u6001\uff0c\u5f00\u542f\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u529f\u80fd\u3002

                                                  \u901a\u8fc7\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u65f6\uff0c\u786e\u8ba4\u8be5\u96c6\u7fa4\u7684 K8s \u5ba1\u8ba1\u65e5\u5fd7\u9009\u62e9 \u2018true'\uff0c\u8fd9\u6837\u521b\u5efa\u51fa\u6765\u7684\u5de5\u4f5c\u96c6\u7fa4 K8s \u5ba1\u8ba1\u65e5\u5fd7\u662f\u5f00\u542f\u7684\u3002

                                                  \u7b49\u5f85\u96c6\u7fa4\u521b\u5efa\u6210\u529f\u540e\uff0c\u8be5\u5de5\u4f5c\u96c6\u7fa4\u7684 K8s \u5ba1\u8ba1\u65e5\u5fd7\u5c06\u88ab\u91c7\u96c6\u3002

                                                  "},{"location":"admin/ghippo/audit/open-audit.html#_3","title":"\u63a5\u5165\u7684\u96c6\u7fa4\u548c\u521b\u5efa\u5b8c\u6210\u540e\u5f00\u5173\u6b65\u9aa4","text":""},{"location":"admin/ghippo/audit/open-audit.html#k8s_5","title":"\u786e\u8ba4\u5f00\u542f K8s \u5ba1\u8ba1\u65e5\u5fd7","text":"

                                                  \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b /var/log/kubernetes/audit \u76ee\u5f55\u4e0b\u662f\u5426\u6709\u5ba1\u8ba1\u65e5\u5fd7\u751f\u6210\uff0c\u82e5\u6709\uff0c\u5219\u8868\u793a K8s \u5ba1\u8ba1\u65e5\u5fd7\u6210\u529f\u5f00\u542f\u3002

                                                  ls /var/log/kubernetes/audit\n

                                                  \u82e5\u672a\u5f00\u542f\uff0c\u8bf7\u53c2\u8003\u6587\u6863\u7684\u5f00\u542f\u5173\u95ed K8s \u5ba1\u8ba1\u65e5\u5fd7

                                                  "},{"location":"admin/ghippo/audit/open-audit.html#k8s_6","title":"\u5f00\u542f\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7","text":"

                                                  \u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u529f\u80fd\u9ed8\u8ba4\u4e3a\u5173\u95ed\u72b6\u6001\uff0c\u82e5\u9700\u8981\u5f00\u542f\uff0c\u53ef\u4ee5\u6309\u7167\u5982\u4e0b\u6b65\u9aa4\uff1a

                                                  1. \u9009\u4e2d\u5df2\u63a5\u5165\u5e76\u4e14\u9700\u8981\u5f00\u542f\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u529f\u80fd\u7684\u96c6\u7fa4

                                                  2. \u8fdb\u5165 helm \u5e94\u7528\u7ba1\u7406\u9875\u9762\uff0c\u66f4\u65b0 insight-agent \u914d\u7f6e \uff08\u82e5\u672a\u5b89\u88c5 insight-agent\uff0c\u53ef\u4ee5\u5b89\u88c5 insight-agent\uff09

                                                  3. \u5f00\u542f/\u5173\u95ed\u91c7\u96c6 K8s \u5ba1\u8ba1\u65e5\u5fd7\u6309\u94ae

                                                  4. \u63a5\u5165\u96c6\u7fa4\u7684\u60c5\u51b5\u4e0b\u5f00\u5173\u540e\u4ecd\u9700\u8981\u91cd\u542f fluent-bit pod \u624d\u80fd\u751f\u6548

                                                  "},{"location":"admin/ghippo/audit/open-k8s-audit.html","title":"\u751f\u6210 K8s \u5ba1\u8ba1\u65e5\u5fd7","text":"

                                                  \u9ed8\u8ba4 Kubernetes \u96c6\u7fa4\u4e0d\u4f1a\u751f\u6210\u5ba1\u8ba1\u65e5\u5fd7\u4fe1\u606f\u3002\u901a\u8fc7\u4ee5\u4e0b\u914d\u7f6e\uff0c\u53ef\u4ee5\u5f00\u542f Kubernetes \u7684\u5ba1\u8ba1\u65e5\u5fd7\u529f\u80fd\u3002

                                                  Note

                                                  \u516c\u6709\u4e91\u73af\u5883\u4e2d\u53ef\u80fd\u65e0\u6cd5\u63a7\u5236 Kubernetes \u5ba1\u8ba1\u65e5\u5fd7\u8f93\u51fa\u53ca\u8f93\u51fa\u8def\u5f84\u3002

                                                  1. \u51c6\u5907\u5ba1\u8ba1\u65e5\u5fd7\u7684 Policy \u6587\u4ef6
                                                  2. \u914d\u7f6e API \u670d\u52a1\u5668\uff0c\u5f00\u542f\u5ba1\u8ba1\u65e5\u5fd7
                                                  3. \u91cd\u542f\u5e76\u9a8c\u8bc1
                                                  "},{"location":"admin/ghippo/audit/open-k8s-audit.html#policy","title":"\u51c6\u5907\u5ba1\u8ba1\u65e5\u5fd7 Policy \u6587\u4ef6","text":"\u70b9\u51fb\u67e5\u770b\u5ba1\u8ba1\u65e5\u5fd7 Policy YAML \u6587\u4ef6 policy.yaml
                                                  apiVersion: audit.k8s.io/v1\nkind: Policy\nrules:\n# The following requests were manually identified as high-volume and low-risk,\n# so drop them.\n- level: None\n  users: [\"system:kube-proxy\"]\n  verbs: [\"watch\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"endpoints\", \"services\", \"services/status\"]\n- level: None\n  # Ingress controller reads `configmaps/ingress-uid` through the unsecured port.\n  # TODO(#46983): Change this to the ingress controller service account.\n  users: [\"system:unsecured\"]\n  namespaces: [\"kube-system\"]\n  verbs: [\"get\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"configmaps\"]\n- level: None\n  users: [\"kubelet\"] # legacy kubelet identity\n  verbs: [\"get\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"nodes\", \"nodes/status\"]\n- level: None\n  userGroups: [\"system:nodes\"]\n  verbs: [\"get\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"nodes\", \"nodes/status\"]\n- level: None\n  users:\n   - system:kube-controller-manager\n   - system:kube-scheduler\n   - system:serviceaccount:kube-system:endpoint-controller\n     verbs: [\"get\", \"update\"]\n     namespaces: [\"kube-system\"]\n     resources:\n   - group: \"\" # core\n     resources: [\"endpoints\"]\n- level: None\n  users: [\"system:apiserver\"]\n  verbs: [\"get\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"namespaces\", \"namespaces/status\", \"namespaces/finalize\"]\n# Don't log HPA fetching metrics.\n- level: None\n  users:\n   - system:kube-controller-manager\n     verbs: [\"get\", \"list\"]\n     resources:\n   - group: \"metrics.k8s.io\"\n# Don't log these read-only URLs.\n- level: None\n  nonResourceURLs:\n   - /healthz*\n   - /version\n   - /swagger*\n# Don't log events requests.\n- level: None\n  resources:\n   - group: \"\" # core\n     resources: [\"events\"]\n# Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,\n# so only log at the Metadata level.\n- level: Metadata\n  resources:\n   - group: \"\" # core\n     resources: [\"secrets\", \"configmaps\", \"serviceaccounts/token\"]\n   - group: authentication.k8s.io\n     resources: [\"tokenreviews\"]\n     omitStages:\n   - \"RequestReceived\"\n# Get responses can be large; skip them.\n- level: Request\n  verbs: [\"get\", \"list\", \"watch\"]\n  resources:\n   - group: \"\" # core\n   - group: \"admissionregistration.k8s.io\"\n   - group: \"apiextensions.k8s.io\"\n   - group: \"apiregistration.k8s.io\"\n   - group: \"apps\"\n   - group: \"authentication.k8s.io\"\n   - group: \"authorization.k8s.io\"\n   - group: \"autoscaling\"\n   - group: \"batch\"\n   - group: \"certificates.k8s.io\"\n   - group: \"extensions\"\n   - group: \"metrics.k8s.io\"\n   - group: \"networking.k8s.io\"\n   - group: \"policy\"\n   - group: \"rbac.authorization.k8s.io\"\n   - group: \"settings.k8s.io\"\n   - group: \"storage.k8s.io\"\n     omitStages:\n   - \"RequestReceived\"\n# Default level for known APIs\n- level: RequestResponse\n  resources:\n   - group: \"\" # core\n   - group: \"admissionregistration.k8s.io\"\n   - group: \"apiextensions.k8s.io\"\n   - group: \"apiregistration.k8s.io\"\n   - group: \"apps\"\n   - group: \"authentication.k8s.io\"\n   - group: \"authorization.k8s.io\"\n   - group: \"autoscaling\"\n   - group: \"batch\"\n   - group: \"certificates.k8s.io\"\n   - group: \"extensions\"\n   - group: \"metrics.k8s.io\"\n   - group: \"networking.k8s.io\"\n   - group: \"policy\"\n   - group: \"rbac.authorization.k8s.io\"\n   - group: \"settings.k8s.io\"\n   - group: \"storage.k8s.io\"\n     omitStages:\n   - \"RequestReceived\"\n# Default level for all other requests.\n- level: Metadata\n  omitStages:\n   - \"RequestReceived\"\n

                                                  \u5c06\u4ee5\u4e0a\u5ba1\u8ba1\u65e5\u5fd7\u6587\u4ef6\u653e\u5230 /etc/kubernetes/audit-policy/ \u6587\u4ef6\u5939\u4e0b\uff0c\u5e76\u53d6\u540d\u4e3a apiserver-audit-policy.yaml \u3002

                                                  "},{"location":"admin/ghippo/audit/open-k8s-audit.html#api","title":"\u914d\u7f6e API \u670d\u52a1\u5668","text":"

                                                  \u6253\u5f00 API \u670d\u52a1\u5668\u7684\u914d\u7f6e\u6587\u4ef6 kube-apiserver.yaml \uff0c\u4e00\u822c\u4f1a\u5728 /etc/kubernetes/manifests/ \u6587\u4ef6\u5939\u4e0b\uff0c\u5e76\u6dfb\u52a0\u4ee5\u4e0b\u914d\u7f6e\u4fe1\u606f\uff1a

                                                  \u8fd9\u4e00\u6b65\u64cd\u4f5c\u524d\u8bf7\u5907\u4efd kube-apiserver.yaml \uff0c\u5e76\u4e14\u5907\u4efd\u7684\u6587\u4ef6\u4e0d\u80fd\u653e\u5728 /etc/kubernetes/manifests/ \u4e0b\uff0c\u5efa\u8bae\u653e\u5728 /etc/kubernetes/tmp \u3002

                                                  1. \u5728 spec.containers.command \u4e0b\u6dfb\u52a0\u547d\u4ee4\uff1a

                                                    --audit-log-maxage=30\n--audit-log-maxbackup=10\n--audit-log-maxsize=100\n--audit-log-path=/var/log/audit/kube-apiserver-audit.log\n--audit-policy-file=/etc/kubernetes/audit-policy/apiserver-audit-policy.yaml\n
                                                  2. \u5728 spec.containers.volumeMounts \u4e0b\u6dfb\u52a0\uff1a

                                                    - mountPath: /var/log/audit\n  name: audit-logs\n- mountPath: /etc/kubernetes/audit-policy\n  name: audit-policy\n
                                                  3. \u5728 spec.volumes \u4e0b\u6dfb\u52a0\uff1a

                                                    - hostPath:\n  path: /var/log/kubernetes/audit\n  type: \"\"\n  name: audit-logs\n- hostPath:\n  path: /etc/kubernetes/audit-policy\n  type: \"\"\n  name: audit-policy\n
                                                  "},{"location":"admin/ghippo/audit/open-k8s-audit.html#_1","title":"\u6d4b\u8bd5\u5e76\u9a8c\u8bc1","text":"

                                                  \u7a0d\u7b49\u4e00\u4f1a\uff0cAPI \u670d\u52a1\u5668\u4f1a\u81ea\u52a8\u91cd\u542f\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b /var/log/kubernetes/audit \u76ee\u5f55\u4e0b\u662f\u5426\u6709\u5ba1\u8ba1\u65e5\u5fd7\u751f\u6210\uff0c\u82e5\u6709\uff0c\u5219\u8868\u793a K8s \u5ba1\u8ba1\u65e5\u5fd7\u6210\u529f\u5f00\u542f\u3002

                                                  ls /var/log/kubernetes/audit\n

                                                  \u5982\u679c\u60f3\u5173\u95ed\uff0c\u53bb\u6389 spec.containers.command \u4e2d\u7684\u76f8\u5173\u547d\u4ee4\u5373\u53ef\u3002

                                                  "},{"location":"admin/ghippo/audit/source-ip.html","title":"\u5ba1\u8ba1\u65e5\u5fd7\u83b7\u53d6\u6e90 IP","text":"

                                                  \u5ba1\u8ba1\u65e5\u5fd7\u6e90 IP \u5728\u7cfb\u7edf\u548c\u7f51\u7edc\u7ba1\u7406\u4e2d\u626e\u6f14\u7740\u5173\u952e\u89d2\u8272\uff0c\u5b83\u6709\u52a9\u4e8e\u8ffd\u8e2a\u6d3b\u52a8\u3001\u7ef4\u62a4\u5b89\u5168\u3001\u89e3\u51b3\u95ee\u9898\u5e76\u786e\u4fdd\u7cfb\u7edf\u5408\u89c4\u6027\u3002 \u4f46\u662f\u83b7\u53d6\u6e90 IP \u4f1a\u5e26\u6765\u4e00\u5b9a\u7684\u6027\u80fd\u635f\u8017\uff0c\u6240\u4ee5\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u5ba1\u8ba1\u65e5\u5fd7\u5e76\u4e0d\u603b\u662f\u5f00\u542f\u7684\uff0c \u5728\u4e0d\u540c\u7684\u5b89\u88c5\u6a21\u5f0f\u4e0b\uff0c\u5ba1\u8ba1\u65e5\u5fd7\u6e90 IP \u7684\u9ed8\u8ba4\u5f00\u542f\u60c5\u51b5\u4e0d\u540c\uff0c\u5e76\u4e14\u5f00\u542f\u7684\u65b9\u5f0f\u4e0d\u540c\u3002 \u4e0b\u9762\u4f1a\u6839\u636e\u5b89\u88c5\u6a21\u5f0f\u5206\u522b\u4ecb\u7ecd\u5ba1\u8ba1\u65e5\u5fd7\u6e90 IP \u7684\u9ed8\u8ba4\u5f00\u542f\u60c5\u51b5\u4ee5\u53ca\u5982\u4f55\u5f00\u542f\u3002

                                                  Note

                                                  \u5f00\u542f\u5ba1\u8ba1\u65e5\u5fd7\u4f1a\u4fee\u6539 istio-ingressgateway \u7684\u526f\u672c\u6570\uff0c\u5e26\u6765\u4e00\u5b9a\u7684\u6027\u80fd\u635f\u8017\u3002 \u5f00\u542f\u5ba1\u8ba1\u65e5\u5fd7\u9700\u8981\u5173\u95ed kube-proxy \u7684\u8d1f\u8f7d\u5747\u8861\u4ee5\u53ca\u62d3\u6251\u611f\u77e5\u8def\u7531\uff0c\u4f1a\u5bf9\u96c6\u7fa4\u6027\u80fd\u4ea7\u751f\u4e00\u5b9a\u7684\u5f71\u54cd\u3002 \u5f00\u542f\u5ba1\u8ba1\u65e5\u5fd7\u540e\uff0c\u8bbf\u95eeIP\u6240\u5bf9\u5e94\u7684\u8282\u70b9\u4e0a\u5fc5\u987b\u4fdd\u8bc1\u5b58\u5728 istio-ingressgateway \uff0c\u82e5\u56e0\u4e3a\u8282\u70b9\u5065\u5eb7\u6216\u5176\u4ed6\u95ee\u9898\u5bfc\u81f4 istio-ingressgateway \u53d1\u751f\u6f02\u79fb\uff0c\u9700\u8981\u624b\u52a8\u8c03\u5ea6\u56de\u8be5\u8282\u70b9\uff0c\u5426\u5219\u4f1a\u5f71\u54cd\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u6b63\u5e38\u4f7f\u7528\u3002

                                                  "},{"location":"admin/ghippo/audit/source-ip.html#_1","title":"\u5224\u65ad\u5b89\u88c5\u6a21\u5f0f\u7684\u65b9\u6cd5","text":"
                                                  kubectl get pod -n metallb-system\n

                                                  \u5728\u96c6\u7fa4\u4e2d\u6267\u884c\u4e0a\u9762\u7684\u547d\u4ee4\uff0c\u82e5\u8fd4\u56de\u7ed3\u679c\u5982\u4e0b\uff0c\u5219\u8868\u793a\u8be5\u96c6\u7fa4\u4e3a\u975e MetalLB \u5b89\u88c5\u6a21\u5f0f

                                                  No resources found in metallbs-system namespace.\n
                                                  "},{"location":"admin/ghippo/audit/source-ip.html#nodeport","title":"NodePort \u5b89\u88c5\u6a21\u5f0f","text":"

                                                  \u8be5\u6a21\u5f0f\u5b89\u88c5\u4e0b\uff0c\u5ba1\u8ba1\u65e5\u5fd7\u6e90 IP \u9ed8\u8ba4\u662f\u5173\u95ed\u7684\uff0c\u5f00\u542f\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                  1. \u8bbe\u7f6e istio-ingressgateway \u7684 HPA \u7684\u6700\u5c0f\u526f\u672c\u6570\u4e3a\u63a7\u5236\u9762\u8282\u70b9\u6570

                                                    count=$(kubectl get nodes --selector=node-role.kubernetes.io/control-plane | wc -l)\ncount=$((count-1))\n\nkubectl patch hpa istio-ingressgateway -n istio-system -p '{\"spec\":{\"minReplicas\":'$count'}}'\n
                                                  2. \u4fee\u6539 istio-ingressgateway \u7684 service \u7684 externalTrafficPolicy \u548c internalTrafficPolicy \u503c\u4e3a \"Local\"

                                                    kubectl patch svc istio-ingressgateway -n istio-system -p '{\"spec\":{\"externalTrafficPolicy\":\"Local\",\"internalTrafficPolicy\":\"Local\"}}'\n
                                                  "},{"location":"admin/ghippo/audit/source-ip.html#metallb","title":"MetalLB \u5b89\u88c5\u6a21\u5f0f","text":"

                                                  \u8be5\u6a21\u5f0f\u4e0b\u5b89\u88c5\u5b8c\u6210\u540e\uff0c\u4f1a\u9ed8\u8ba4\u83b7\u53d6\u5ba1\u8ba1\u65e5\u5fd7\u6e90 IP\u3002

                                                  "},{"location":"admin/ghippo/audit/gproduct-audit/ghippo.html","title":"\u5168\u5c40\u7ba1\u7406\u5ba1\u8ba1\u9879\u6c47\u603b","text":"\u4e8b\u4ef6\u540d\u79f0 \u8d44\u6e90\u7c7b\u578b \u5907\u6ce8 \u4fee\u6539\u7528\u6237email\uff1aUpdateEmail-Account Account \u4fee\u6539\u7528\u6237\u5bc6\u7801\uff1aUpdatePassword-Account Account \u521b\u5efask\uff1aCreateAccessKeys-Account Account \u4fee\u6539sk\uff1aUpdateAccessKeys-Account Account \u5220\u9664sk\uff1aDeleteAccessKeys-Account Account \u521b\u5efa\u7528\u6237\uff1aCreate-User User \u5220\u9664\u7528\u6237\uff1aDelete-User User \u66f4\u65b0\u7528\u6237\u4fe1\u606f\uff1aUpdate-User User \u66f4\u65b0\u7528\u6237\u89d2\u8272\uff1a UpdateRoles-User User \u8bbe\u7f6e\u7528\u6237\u5bc6\u7801\uff1a UpdatePassword-User User \u521b\u5efa\u7528\u6237\u5bc6\u94a5\uff1a CreateAccessKeys-User User \u66f4\u65b0\u7528\u6237\u5bc6\u94a5\uff1a UpdateAccessKeys-User User \u5220\u9664\u7528\u6237\u5bc6\u94a5\uff1aDeleteAccessKeys-User User \u521b\u5efa\u7528\u6237\u7ec4\uff1aCreate-Group Group \u5220\u9664\u7528\u6237\u7ec4\uff1aDelete-Group Group \u66f4\u65b0\u7528\u6237\u7ec4\uff1aUpdate-Group Group \u6dfb\u52a0\u7528\u6237\u81f3\u7528\u6237\u7ec4\uff1aAddUserTo-Group Group \u4ece\u7528\u6237\u7ec4\u5220\u9664\u7528\u6237\uff1a RemoveUserFrom-Group Group \u66f4\u65b0\u7528\u6237\u7ec4\u89d2\u8272\uff1a UpdateRoles-Group Group \u89d2\u8272\u5173\u8054\u7528\u6237\uff1aUpdateRoles-User User \u521b\u5efaLdap \uff1aCreate-LADP LADP \u66f4\u65b0Ldap\uff1aUpdate-LADP LADP \u5220\u9664Ldap \uff1a Delete-LADP LADP OIDC\u6ca1\u6709\u8d70APIserver\u5ba1\u8ba1\u4e0d\u5230 \u767b\u5f55\uff1aLogin-User User \u767b\u51fa\uff1aLogout-User User \u8bbe\u7f6e\u5bc6\u7801\u7b56\u7565\uff1aUpdatePassword-SecurityPolicy SecurityPolicy \u8bbe\u7f6e\u4f1a\u8bdd\u8d85\u65f6\uff1aUpdateSessionTimeout-SecurityPolicy SecurityPolicy \u8bbe\u7f6e\u8d26\u53f7\u9501\u5b9a\uff1aUpdateAccountLockout-SecurityPolicy SecurityPolicy \u8bbe\u7f6e\u81ea\u52a8\u767b\u51fa\uff1aUpdateLogout-SecurityPolicy SecurityPolicy \u90ae\u4ef6\u670d\u52a1\u5668\u8bbe\u7f6e MailServer-SecurityPolicy SecurityPolicy \u5916\u89c2\u5b9a\u5236 CustomAppearance-SecurityPolicy SecurityPolicy \u6b63\u7248\u6388\u6743 OfficialAuthz-SecurityPolicy SecurityPolicy \u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4\uff1aCreate-Workspace Workspace \u5220\u9664\u5de5\u4f5c\u7a7a\u95f4\uff1aDelete-Workspace Workspace \u7ed1\u5b9a\u8d44\u6e90\uff1aBindResourceTo-Workspace Workspace \u89e3\u7ed1\u8d44\u6e90\uff1aUnBindResource-Workspace Workspace \u7ed1\u5b9a\u5171\u4eab\u8d44\u6e90\uff1aBindShared-Workspace Workspace \u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff1aSetQuota-Workspace Workspace \u5de5\u4f5c\u7a7a\u95f4\u6388\u6743\uff1aAuthorize-Workspace Workspace \u5220\u9664\u6388\u6743 DeAuthorize-Workspace Workspace \u7f16\u8f91\u6388\u6743 UpdateDeAuthorize-Workspace Workspace \u66f4\u65b0\u5de5\u4f5c\u7a7a\u95f4 Update-Workspace Workspace \u521b\u5efa\u6587\u4ef6\u5939\uff1aCreate-Folder Folder \u5220\u9664\u6587\u4ef6\u5939\uff1aDelete-Folder Folder \u7f16\u8f91\u6587\u4ef6\u5939\u6388\u6743\uff1aUpdateAuthorize-Folder Folder \u66f4\u65b0\u6587\u4ef6\u5939\uff1aUpdate-Folder Folder \u65b0\u589e\u6587\u4ef6\u5939\u6388\u6743\uff1aAuthorize-Folder Folder \u5220\u9664\u6587\u4ef6\u5939\u6388\u6743\uff1aDeAuthorize-Folder Folder \u8bbe\u7f6e\u5ba1\u8ba1\u65e5\u5fd7\u81ea\u52a8\u6e05\u7406\uff1aAutoCleanup-Audit Audit \u624b\u52a8\u6e05\u7406\u5ba1\u8ba1\u65e5\u5fd7\uff1aManualCleanup-Audit Audit \u5bfc\u51fa\u5ba1\u8ba1\u65e5\u5fd7\uff1aExport-Audit Audit"},{"location":"admin/ghippo/audit/gproduct-audit/insight.html","title":"\u53ef\u89c2\u6d4b\u6027\u5ba1\u8ba1\u9879\u6c47\u603b","text":"\u4e8b\u4ef6\u540d\u79f0 \u8d44\u6e90\u7c7b\u578b \u5907\u6ce8 \u521b\u5efa\u62e8\u6d4b\u4efb\u52a1\uff1aCreate-ProbeJob ProbeJob \u7f16\u8f91\u62e8\u6d4b\u4efb\u52a1\uff1aUpdate-ProbeJob ProbeJob \u5220\u9664\u62e8\u6d4b\u4efb\u52a1\uff1aDelete-ProbeJob ProbeJob \u521b\u5efa\u544a\u8b66\u7b56\u7565\uff1aCreate-AlertPolicy AlertPolicy \u7f16\u8f91\u544a\u8b66\u7b56\u7565\uff1aUpdate-AlertPolicy AlertPolicy \u5220\u9664\u544a\u8b66\u7b56\u7565\uff1aDelete-AlertPolicy AlertPolicy \u5bfc\u5165\u544a\u8b66\u7b56\u7565\uff1aImport-AlertPolicy AlertPolicy \u5728\u544a\u8b66\u7b56\u7565\u4e2d\u6dfb\u52a0\u89c4\u5219\uff1aCreate-AlertRule AlertRule \u5728\u544a\u8b66\u7b56\u7565\u4e2d\u7f16\u8f91\u89c4\u5219\uff1aUpdate-AlertRule AlertRule \u5728\u544a\u8b66\u7b56\u7565\u4e2d\u5220\u9664\u89c4\u5219\uff1aDelete-AlertRule AlertRule \u521b\u5efa\u544a\u8b66\u6a21\u677f\uff1aCreate-RuleTemplate RuleTemplate \u7f16\u8f91\u544a\u8b66\u6a21\u677f\uff1aUpdate-RuleTemplate RuleTemplate \u5220\u9664\u544a\u8b66\u6a21\u677f\uff1aDelete-RuleTemplate RuleTemplate \u521b\u5efa\u90ae\u7bb1\u7ec4\uff1aCreate-email email \u7f16\u8f91\u90ae\u7bb1\u7ec4\uff1aUpdate-email email \u5220\u9664\u90ae\u7bb1\u7ec4\uff1aDelete-Receiver Receiver \u521b\u5efa\u9489\u9489\u673a\u5668\u4eba\uff1aCreate-dingtalk dingtalk \u7f16\u8f91\u9489\u9489\u673a\u5668\u4eba\uff1aUpdate-dingtalk dingtalk \u5220\u9664\u9489\u9489\u673a\u5668\u4eba\uff1aDelete-Receiver Receiver \u521b\u5efa\u4f01\u5fae\u673a\u5668\u4eba\uff1aCreate-wecom wecom \u7f16\u8f91\u4f01\u5fae\u673a\u5668\u4eba\uff1aUpdate-wecom wecom \u5220\u9664\u4f01\u5fae\u673a\u5668\u4eba\uff1aDelete-Receiver Receiver \u521b\u5efa Webhook\uff1aCreate-webhook webhook \u7f16\u8f91 Webhook\uff1aUpdate-webhook webhook \u5220\u9664 Webhook\uff1aDelete-Receiver Receiver \u521b\u5efa SMS\uff1aCreate-sms sms \u7f16\u8f91 SMS\uff1aUpdate-sms sms \u5220\u9664 SMS\uff1aDelete-Receiver Receiver \u521b\u5efa SMS \u670d\u52a1\u5668\uff1aCreate-aliyun(\u6216\u8005\uff1atencent\uff0ccustom) aliyun, tencent, custom \u7f16\u8f91 SMS \u670d\u52a1\u5668\uff1aUpdate-aliyun(\u6216\u8005\uff1atencent\uff0ccustom) aliyun, tencent, custom \u5220\u9664 SMS \u670d\u52a1\u5668\uff1aDelete-SMSserver SMSserver \u521b\u5efa\u6d88\u606f\u6a21\u677f\uff1aCreate-MessageTemplate MessageTemplate \u7f16\u8f91\u6d88\u606f\u6a21\u677f\uff1aUpdate-MessageTemplate MessageTemplate \u5220\u9664\u6d88\u606f\u6a21\u677f\uff1aDelete-MessageTemplate MessageTemplate \u521b\u5efa\u544a\u8b66\u9759\u9ed8\uff1aCreate-AlertSilence AlertSilence \u7f16\u8f91\u544a\u8b66\u9759\u9ed8\uff1aUpdate-AlertSilence AlertSilence \u5220\u9664\u544a\u8b66\u9759\u9ed8\uff1aDelete-AlertSilence AlertSilence \u521b\u5efa\u544a\u8b66\u6291\u5236\u89c4\u5219\uff1aCreate-AlertInhibition AlertInhibition \u7f16\u8f91\u544a\u8b66\u6291\u5236\u89c4\u5219\uff1aUpdate-AlertInhibition AlertInhibition \u5220\u9664\u544a\u8b66\u6291\u5236\u89c4\u5219\uff1aDelete-AlertInhibition AlertInhibition \u66f4\u65b0\u7cfb\u7edf\u914d\u7f6e\uff1aUpdate-SystemSettings SystemSettings"},{"location":"admin/ghippo/audit/gproduct-audit/kpanda.html","title":"\u5bb9\u5668\u7ba1\u7406\u5ba1\u8ba1\u9879\u6c47\u603b","text":"\u4e8b\u4ef6\u540d\u79f0 \u8d44\u6e90\u7c7b\u578b \u521b\u5efa\u96c6\u7fa4\uff1aCreate-Cluster Cluster \u5378\u8f7d\u96c6\u7fa4\uff1aDelete-Cluster Cluster \u63a5\u5165\u96c6\u7fa4\uff1aIntegrate-Cluster Cluster \u89e3\u9664\u63a5\u5165\u7684\u96c6\u7fa4\uff1aRemove-Cluster Cluster \u96c6\u7fa4\u5347\u7ea7\uff1aUpgrade-Cluster Cluster \u96c6\u7fa4\u63a5\u5165\u8282\u70b9\uff1aIntegrate-Node Node \u96c6\u7fa4\u8282\u70b9\u79fb\u9664\uff1aRemove-Node Node \u96c6\u7fa4\u8282\u70b9 GPU \u6a21\u5f0f\u5207\u6362\uff1aUpdate-NodeGPUMode NodeGPUMode helm\u4ed3\u5e93\u521b\u5efa\uff1aCreate-HelmRepo HelmRepo helm\u5e94\u7528\u90e8\u7f72\uff1aCreate-HelmApp HelmApp helm\u5e94\u7528\u5220\u9664\uff1aDelete-HelmApp HelmApp \u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\uff1aCreate-Deployment Deployment \u5220\u9664\u65e0\u72b6\u6001\u8d1f\u8f7d\uff1aDelete-Deployment Deployment \u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b\uff1aCreate-DaemonSet DaemonSet \u5220\u9664\u5b88\u62a4\u8fdb\u7a0b\uff1aDelete-DaemonSet DaemonSet \u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\uff1aCreate-StatefulSet StatefulSet \u5220\u9664\u6709\u72b6\u6001\u8d1f\u8f7d\uff1aDelete-StatefulSet StatefulSet \u521b\u5efa\u4efb\u52a1\uff1aCreate-Job Job \u5220\u9664\u4efb\u52a1\uff1aDelete-Job Job \u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\uff1aCreate-CronJob CronJob \u5220\u9664\u5b9a\u65f6\u4efb\u52a1\uff1aDelete-CronJob CronJob \u5220\u9664\u5bb9\u5668\u7ec4\uff1aDelete-Pod Pod \u521b\u5efa\u670d\u52a1\uff1aCreate-Service Service \u5220\u9664\u670d\u52a1\uff1aDelete-Service Service \u521b\u5efa\u8def\u7531\uff1aCreate-Ingress Ingress \u5220\u9664\u8def\u7531\uff1aDelete-Ingress Ingress \u521b\u5efa\u5b58\u50a8\u6c60\uff1aCreate-StorageClass StorageClass \u5220\u9664\u5b58\u50a8\u6c60\uff1aDelete-StorageClass StorageClass \u521b\u5efa\u6570\u636e\u5377\uff1aCreate-PersistentVolume PersistentVolume \u5220\u9664\u6570\u636e\u5377\uff1aDelete-PersistentVolume PersistentVolume \u521b\u5efa\u6570\u636e\u5377\u58f0\u660e\uff1aCreate-PersistentVolumeClaim PersistentVolumeClaim \u5220\u9664\u6570\u636e\u5377\u58f0\u660e\uff1aDelete-PersistentVolumeClaim PersistentVolumeClaim \u5220\u9664\u526f\u672c\u96c6\uff1aDelete-ReplicaSet ReplicaSet ns\u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4\uff1aBindResourceTo-Workspace Workspace ns\u89e3\u7ed1\u5de5\u4f5c\u7a7a\u95f4 \uff1aUnBindResource-Workspace Workspace \u96c6\u7fa4\u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4\uff1aBindResourceTo-Workspace Workspace \u96c6\u7fa4\u89e3\u7ed1\u5de5\u4f5c\u7a7a\u95f4\uff1aUnBindResource-Workspace Workspace \u6253\u5f00\u63a7\u5236\u53f0\uff1aCreate-CloudShell CloudShell \u5173\u95ed\u63a7\u5236\u53f0\uff1aDelete-CloudShell CloudShell"},{"location":"admin/ghippo/audit/gproduct-audit/virtnest.html","title":"\u4e91\u4e3b\u673a\u5ba1\u8ba1\u9879\u6c47\u603b","text":"\u4e8b\u4ef6\u540d\u79f0 \u8d44\u6e90\u7c7b\u578b \u5907\u6ce8 \u91cd\u542f\u4e91\u4e3b\u673a\uff1aRestart-VMs VM \u4e91\u4e3b\u673a\u8f6c\u6362\u4e3a\u6a21\u677f\uff1aConvertToTemplate-VMs VM \u7f16\u8f91\u4e91\u4e3b\u673a\uff1aEdit-VMs VM \u66f4\u65b0\u4e91\u4e3b\u673a\uff1aUpdate-VMs VM \u5feb\u7167\u6062\u590d\uff1aRestore-VMs VM \u5f00\u673a\u4e91\u4e3b\u673a\uff1aPower on-VMs VM \u5b9e\u65f6\u8fc1\u79fb\uff1aLiveMigrate-VMs VM \u5220\u9664\u4e91\u4e3b\u673a\uff1aDelete-VMs VM \u5220\u9664\u4e91\u4e3b\u673a\u6a21\u677f\uff1aDelete-VM Template VM Template \u521b\u5efa\u4e91\u4e3b\u673a\uff1aCreate-VMs VM \u521b\u5efa\u5feb\u7167\uff1aCreateSnapshot-VMs VM \u5173\u673a\u4e91\u4e3b\u673a\uff1aPower off-VMs VM \u514b\u9686\u4e91\u4e3b\u673a\uff1aClone-VMs VM"},{"location":"admin/ghippo/best-practice/authz-plan.html","title":"\u666e\u901a\u7528\u6237\u6388\u6743\u89c4\u5212","text":"

                                                  \u666e\u901a\u7528\u6237\u662f\u6307\u80fd\u591f\u4f7f\u7528 AI \u7b97\u529b\u4e2d\u5fc3\u5927\u90e8\u5206\u4ea7\u54c1\u6a21\u5757\u53ca\u529f\u80fd\uff08\u7ba1\u7406\u529f\u80fd\u9664\u5916\uff09\uff0c\u5bf9\u6743\u9650\u8303\u56f4\u5185\u7684\u8d44\u6e90\u6709\u4e00\u5b9a\u7684\u64cd\u4f5c\u6743\u9650\uff0c\u80fd\u591f\u72ec\u7acb\u4f7f\u7528\u8d44\u6e90\u90e8\u7f72\u5e94\u7528\u3002

                                                  \u5bf9\u8fd9\u7c7b\u7528\u6237\u7684\u6388\u6743\u53ca\u8d44\u6e90\u89c4\u5212\u6d41\u7a0b\u5982\u4e0b\u56fe\u6240\u793a\u3002

                                                  graph TB\n\n    start([\u5f00\u59cb]) --> user[1. \u521b\u5efa\u7528\u6237]\n    user --> ns[2. \u51c6\u5907 Kubernetes \u547d\u540d\u7a7a\u95f4]\n    ns --> ws[3. \u51c6\u5907\u5de5\u4f5c\u7a7a\u95f4]\n    ws --> ws-to-ns[4. \u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u547d\u540d\u7a7a\u95f4]\n    ws-to-ns --> authu[5. \u7ed9\u7528\u6237\u6388\u6743 Workspace Editor]\n    authu --> complete([\u7ed3\u675f])\n\nclick user \"https://docs.daocloud.io/ghippo/user-guide/access-control/user/\"\nclick ns \"https://docs.daocloud.io/kpanda/user-guide/namespaces/createns/\"\nclick ws \"https://docs.daocloud.io/ghippo/user-guide/workspace/workspace/\"\nclick ws-to-ns \"https://docs.daocloud.io/ghippo/user-guide/workspace/ws-to-ns/\"\nclick authu \"https://docs.daocloud.io/ghippo/user-guide/workspace/ws-permission/\"\n\n classDef plain fill:#ddd,stroke:#fff,stroke-width:4px,color:#000;\n classDef k8s fill:#326ce5,stroke:#fff,stroke-width:4px,color:#fff;\n classDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n class user,ns,ws,ws-to-ns,authu cluster;\n class start,complete plain;
                                                  "},{"location":"admin/ghippo/best-practice/cluster-for-multiws.html","title":"\u5c06\u96c6\u7fa4\u5206\u914d\u7ed9\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09","text":"

                                                  \u96c6\u7fa4\u8d44\u6e90\u901a\u5e38\u7531\u8fd0\u7ef4\u4eba\u5458\u8fdb\u884c\u7ba1\u7406\u3002\u5728\u5206\u914d\u8d44\u6e90\u5206\u914d\u65f6\uff0c\u4ed6\u4eec\u9700\u8981\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u6765\u9694\u79bb\u8d44\u6e90\uff0c\u5e76\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\u3002 \u8fd9\u79cd\u65b9\u5f0f\u6709\u4e2a\u5f0a\u7aef\uff0c\u5982\u679c\u4f01\u4e1a\u7684\u4e1a\u52a1\u91cf\u5f88\u5927\uff0c\u624b\u52a8\u5206\u914d\u8d44\u6e90\u9700\u8981\u8f83\u5927\u7684\u5de5\u4f5c\u91cf\uff0c\u800c\u60f3\u8981\u7075\u6d3b\u8c03\u914d\u8d44\u6e90\u989d\u5ea6\u4e5f\u6709\u4e0d\u5c0f\u96be\u5ea6\u3002

                                                  AI \u7b97\u529b\u4e2d\u5fc3\u4e3a\u6b64\u5f15\u5165\u4e86\u5de5\u4f5c\u7a7a\u95f4\u7684\u6982\u5ff5\u3002\u5de5\u4f5c\u7a7a\u95f4\u901a\u8fc7\u5171\u4eab\u8d44\u6e90\u53ef\u4ee5\u63d0\u4f9b\u66f4\u9ad8\u7ef4\u5ea6\u7684\u8d44\u6e90\u9650\u989d\u80fd\u529b\uff0c\u5b9e\u73b0\u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09\u5728\u8d44\u6e90\u9650\u989d\u4e0b\u81ea\u52a9\u5f0f\u521b\u5efa Kubernetes \u547d\u540d\u7a7a\u95f4\u7684\u80fd\u529b\u3002

                                                  \u4e3e\u4f8b\u800c\u8a00\uff0c\u5982\u679c\u60f3\u8981\u8ba9\u51e0\u4e2a\u90e8\u95e8\u5171\u4eab\u4e0d\u540c\u7684\u96c6\u7fa4\u3002

                                                  Cluster01\uff08\u666e\u901a\uff09 Cluster02\uff08\u9ad8\u53ef\u7528\uff09 \u90e8\u95e8\uff08\u5de5\u4f5c\u7a7a\u95f4\uff09A 50 quota 10 quota \u90e8\u95e8\uff08\u5de5\u4f5c\u7a7a\u95f4\uff09B 100 quota 20 quota

                                                  \u53ef\u4ee5\u53c2\u7167\u4ee5\u4e0b\u6d41\u7a0b\u5c06\u96c6\u7fa4\u5206\u4eab\u7ed9\u591a\u4e2a\u90e8\u95e8/\u5de5\u4f5c\u7a7a\u95f4/\u79df\u6237\uff1a

                                                  graph TB\n\npreparews[\u51c6\u5907\u5de5\u4f5c\u7a7a\u95f4] --> preparecs[\u51c6\u5907\u96c6\u7fa4]\n--> share[\u5c06\u96c6\u7fa4\u5171\u4eab\u5230\u5de5\u4f5c\u7a7a\u95f4]\n--> judge([\u5224\u65ad\u5de5\u4f5c\u7a7a\u95f4\u5269\u4f59\u989d\u5ea6])\njudge -.\u5927\u4e8e\u5269\u4f59\u989d\u5ea6.->modifyns[\u4fee\u6539\u547d\u540d\u7a7a\u95f4\u989d\u5ea6]\njudge -.\u5c0f\u4e8e\u5269\u4f59\u989d\u5ea6.->createns[\u521b\u5efa\u547d\u540d\u7a7a\u95f4]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill:#326ce5,stroke:#fff,stroke-width:1px,color:#fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass preparews,preparecs,share, cluster;\nclass judge plain\nclass modifyns,createns k8s\n\nclick preparews \"https://docs.daocloud.io/ghippo/user-guide/workspace/cluster-for-multiws/#_2\"\nclick preparecs \"https://docs.daocloud.io/ghippo/user-guide/workspace/cluster-for-multiws/#_3\"\nclick share \"https://docs.daocloud.io/ghippo/user-guide/workspace/cluster-for-multiws/#_4\"\nclick createns \"https://docs.daocloud.io/amamba/user-guide/namespace/namespace/#_3\"\nclick modifyns \"https://docs.daocloud.io/amamba/user-guide/namespace/namespace/#_4\"
                                                  "},{"location":"admin/ghippo/best-practice/cluster-for-multiws.html#_2","title":"\u51c6\u5907\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4","text":"

                                                  \u5de5\u4f5c\u7a7a\u95f4\u662f\u4e3a\u4e86\u6ee1\u8db3\u591a\u79df\u6237\u7684\u4f7f\u7528\u573a\u666f\uff0c\u57fa\u4e8e\u96c6\u7fa4\u3001\u96c6\u7fa4\u547d\u540d\u7a7a\u95f4\u3001\u7f51\u683c\u3001\u7f51\u683c\u547d\u540d\u7a7a\u95f4\u3001\u591a\u4e91\u3001\u591a\u4e91\u547d\u540d\u7a7a\u95f4\u7b49\u591a\u79cd\u8d44\u6e90\u5f62\u6210\u76f8\u4e92\u9694\u79bb\u7684\u8d44\u6e90\u73af\u5883\uff0c \u5de5\u4f5c\u7a7a\u95f4\u53ef\u4ee5\u6620\u5c04\u4e3a\u9879\u76ee\u3001\u79df\u6237\u3001\u4f01\u4e1a\u3001\u4f9b\u5e94\u5546\u7b49\u591a\u79cd\u6982\u5ff5\u3002

                                                  1. \u4f7f\u7528 admin/folder admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u3002

                                                  2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4 \u6309\u94ae\u3002

                                                  3. \u586b\u5199\u5de5\u4f5c\u7a7a\u95f4\u540d\u79f0\u3001\u6240\u5c5e\u6587\u4ef6\u5939\u7b49\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4\u3002

                                                  "},{"location":"admin/ghippo/best-practice/cluster-for-multiws.html#_3","title":"\u51c6\u5907\u4e00\u4e2a\u96c6\u7fa4","text":"

                                                  \u5de5\u4f5c\u7a7a\u95f4\u662f\u4e3a\u4e86\u6ee1\u8db3\u591a\u79df\u6237\u7684\u4f7f\u7528\u573a\u666f\uff0c\u57fa\u4e8e\u96c6\u7fa4\u3001\u96c6\u7fa4\u547d\u540d\u7a7a\u95f4\u3001\u7f51\u683c\u3001\u7f51\u683c\u547d\u540d\u7a7a\u95f4\u3001\u591a\u4e91\u3001\u591a\u4e91\u547d\u540d\u7a7a\u95f4\u7b49\u591a\u79cd\u8d44\u6e90\u5f62\u6210\u76f8\u4e92\u9694\u79bb\u7684\u8d44\u6e90\u73af\u5883\uff0c\u5de5\u4f5c\u7a7a\u95f4\u53ef\u4ee5\u6620\u5c04\u4e3a\u9879\u76ee\u3001\u79df\u6237\u3001\u4f01\u4e1a\u3001\u4f9b\u5e94\u5546\u7b49\u591a\u79cd\u6982\u5ff5\u3002

                                                  \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u51c6\u5907\u4e00\u4e2a\u96c6\u7fa4\u3002

                                                  1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u9009\u62e9 \u96c6\u7fa4\u5217\u8868 \u3002

                                                  2. \u70b9\u51fb \u521b\u5efa\u96c6\u7fa4 \u521b\u5efa\u4e00\u4e2a\u96c6\u7fa4\uff0c\u6216\u70b9\u51fb \u63a5\u5165\u96c6\u7fa4 \u63a5\u5165\u4e00\u4e2a\u96c6\u7fa4\u3002

                                                  "},{"location":"admin/ghippo/best-practice/cluster-for-multiws.html#_4","title":"\u5728\u5de5\u4f5c\u7a7a\u95f4\u6dfb\u52a0\u96c6\u7fa4","text":"

                                                  \u8fd4\u56de \u5168\u5c40\u7ba1\u7406 \uff0c\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u6dfb\u52a0\u96c6\u7fa4\u3002

                                                  1. \u4f9d\u6b21\u70b9\u51fb \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 -> \u5171\u4eab\u8d44\u6e90 \uff0c\u70b9\u51fb\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u540d\u79f0\u540e\uff0c\u70b9\u51fb \u65b0\u589e\u5171\u4eab\u8d44\u6e90 \u6309\u94ae\u3002

                                                  2. \u9009\u62e9\u96c6\u7fa4\uff0c\u586b\u5199\u8d44\u6e90\u9650\u989d\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                  \u4e0b\u4e00\u6b65\uff1a\u5c06\u96c6\u7fa4\u8d44\u6e90\u5206\u914d\u7ed9\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u540e\uff0c\u7528\u6237\u53ef\u4ee5\u524d\u5f80 \u5e94\u7528\u5de5\u4f5c\u53f0 \u5728\u8fd9\u4e9b\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u5e76\u90e8\u7f72\u5e94\u7528\u3002

                                                  "},{"location":"admin/ghippo/best-practice/folder-practice.html","title":"\u6587\u4ef6\u5939\u6700\u4f73\u5b9e\u8df5","text":"

                                                  \u6587\u4ef6\u5939\u4ee3\u8868\u4e00\u4e2a\u7ec4\u7ec7\u673a\u6784\uff08\u4f8b\u5982\u4e00\u4e2a\u90e8\u95e8\uff09\uff0c\u662f\u8d44\u6e90\u5c42\u6b21\u7ed3\u6784\u4e2d\u7684\u4e00\u4e2a\u8282\u70b9\u3002

                                                  \u4e00\u4e2a\u6587\u4ef6\u5939\u53ef\u4ee5\u5305\u542b\u5de5\u4f5c\u7a7a\u95f4\u3001\u5b50\u6587\u4ef6\u5939\u6216\u4e24\u8005\u7684\u7ec4\u5408\u3002 \u5b83\u63d0\u4f9b\u4e86\u8eab\u4efd\u7ba1\u7406\u3001\u591a\u5c42\u7ea7\u548c\u6743\u9650\u6620\u5c04\u80fd\u529b\uff0c\u80fd\u591f\u5c06\u7528\u6237/\u7528\u6237\u7ec4\u5728\u6587\u4ef6\u5939\u4e2d\u7684\u89d2\u8272\u6620\u5c04\u5230\u5176\u4e0b\u7684\u5b50\u6587\u4ef6\u5939\u3001\u5de5\u4f5c\u7a7a\u95f4\u548c\u8d44\u6e90\u4e0a\u3002 \u56e0\u6b64\u501f\u52a9\u4e8e\u6587\u4ef6\u5939\uff0c\u4f01\u4e1a\u7ba1\u7406\u8005\u80fd\u591f\u96c6\u4e2d\u7ba1\u63a7\u6240\u6709\u8d44\u6e90\u3002

                                                  1. \u6784\u5efa\u4f01\u4e1a\u5c42\u7ea7\u5173\u7cfb

                                                    \u9996\u5148\u8981\u6309\u7167\u73b0\u6709\u7684\u4f01\u4e1a\u5c42\u7ea7\u7ed3\u6784\uff0c\u6784\u5efa\u4e0e\u4f01\u4e1a\u76f8\u540c\u7684\u6587\u4ef6\u5939\u5c42\u7ea7\u3002 AI \u7b97\u529b\u4e2d\u5fc3\u652f\u6301 5 \u7ea7\u6587\u4ef6\u5939\uff0c\u53ef\u4ee5\u6839\u636e\u4f01\u4e1a\u5b9e\u9645\u60c5\u51b5\u81ea\u7531\u7ec4\u5408\uff0c\u5c06\u6587\u4ef6\u5939\u548c\u5de5\u4f5c\u7a7a\u95f4\u6620\u5c04\u4e3a\u4f01\u4e1a\u4e2d\u7684\u90e8\u95e8\u3001\u9879\u76ee\u3001\u4f9b\u5e94\u5546\u7b49\u5b9e\u4f53\u3002

                                                    \u6587\u4ef6\u5939\u4e0d\u76f4\u63a5\u4e0e\u8d44\u6e90\u6302\u94a9\uff0c\u800c\u662f\u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u95f4\u63a5\u5b9e\u73b0\u8d44\u6e90\u5206\u7ec4\u3002

                                                  2. \u7528\u6237\u8eab\u4efd\u7ba1\u7406

                                                    \u6587\u4ef6\u5939\u63d0\u4f9b\u4e86 Folder Admin\u3001Folder Editor\u3001Folder Viewer \u4e09\u79cd\u89d2\u8272\u3002 \u67e5\u770b\u89d2\u8272\u6743\u9650\uff0c\u53ef\u901a\u8fc7\u6388\u6743\u7ed9\u540c\u4e00\u6587\u4ef6\u5939\u4e2d\u7684\u7528\u6237/\u7528\u6237\u7ec4\u6388\u4e88\u4e0d\u540c\u7684\u89d2\u8272\u3002

                                                  3. \u89d2\u8272\u6743\u9650\u6620\u5c04

                                                    \u4f01\u4e1a\u7ba1\u7406\u8005\uff1a\u5728\u6839\u6587\u4ef6\u5939\u6388\u4e88 Folder Admin \u89d2\u8272\u3002\u4ed6\u5c06\u62e5\u6709\u6240\u6709\u90e8\u95e8\u3001\u9879\u76ee\u53ca\u5176\u8d44\u6e90\u7684\u7ba1\u7406\u6743\u9650\u3002

                                                    \u90e8\u95e8\u7ba1\u7406\u8005\uff1a\u5728\u5404\u4e2a\u5b50\u6587\u4ef6\u5939\u3001\u5de5\u4f5c\u7a7a\u95f4\u5355\u72ec\u6388\u4e88\u7ba1\u7406\u6743\u9650\u3002

                                                    \u9879\u76ee\u6210\u5458\uff1a\u5728\u5de5\u4f5c\u7a7a\u95f4\u3001\u8d44\u6e90\u5c42\u7ea7\u5355\u72ec\u6388\u4e88\u7ba1\u7406\u6743\u9650\u3002

                                                  "},{"location":"admin/ghippo/best-practice/super-group.html","title":"\u8d85\u5927\u578b\u4f01\u4e1a\u7684\u67b6\u6784\u7ba1\u7406","text":"

                                                  \u4f34\u968f\u4e1a\u52a1\u7684\u6301\u7eed\u6269\u5f20\uff0c\u516c\u53f8\u89c4\u6a21\u4e0d\u65ad\u58ee\u5927\uff0c\u5b50\u516c\u53f8\u3001\u5206\u516c\u53f8\u7eb7\u7eb7\u8bbe\u7acb\uff0c\u6709\u7684\u5b50\u516c\u53f8\u8fd8\u8fdb\u4e00\u6b65\u8bbe\u7acb\u5b59\u516c\u53f8\uff0c \u539f\u5148\u7684\u5927\u90e8\u95e8\u4e5f\u9010\u6e10\u7ec6\u5206\u6210\u591a\u4e2a\u5c0f\u90e8\u95e8\uff0c\u4ece\u800c\u4f7f\u5f97\u7ec4\u7ec7\u7ed3\u6784\u7684\u5c42\u7ea7\u65e5\u76ca\u589e\u591a\u3002\u8fd9\u79cd\u7ec4\u7ec7\u7ed3\u6784\u7684\u53d8\u5316\uff0c\u4e5f\u5bf9 IT \u6cbb\u7406\u67b6\u6784\u4ea7\u751f\u4e86\u5f71\u54cd\u3002

                                                  \u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                  1. \u5f00\u542f Folder/WS \u4e4b\u95f4\u7684\u9694\u79bb\u6a21\u5f0f

                                                    \u8bf7\u53c2\u8003\u5f00\u542f Folder/WS \u4e4b\u95f4\u7684\u9694\u79bb\u6a21\u5f0f\u3002

                                                  2. \u6309\u7167\u5b9e\u9645\u60c5\u51b5\u89c4\u5212\u4f01\u4e1a\u67b6\u6784

                                                    \u5728\u591a\u5c42\u7ea7\u7ec4\u7ec7\u67b6\u6784\u4e0b\uff0c\u5efa\u8bae\u5c06\u4e8c\u7ea7\u6587\u4ef6\u5939\u4f5c\u4e3a\u9694\u79bb\u5355\u5143\uff0c\u8fdb\u884c\u201c\u5b50\u516c\u53f8\u201d\u4e4b\u95f4\u7684\u7528\u6237/\u7528\u6237\u7ec4/\u8d44\u6e90\u4e4b\u95f4\u7684\u9694\u79bb\u3002 \u9694\u79bb\u540e\u201c\u5b50\u516c\u53f8\u201d\u4e4b\u95f4\u7684\u7528\u6237/\u7528\u6237\u7ec4/\u8d44\u6e90\u4e92\u4e0d\u53ef\u89c1\u3002

                                                  3. \u521b\u5efa\u7528\u6237/\u6253\u901a\u7528\u6237\u4f53\u7cfb

                                                    \u7531\u4e3b\u5e73\u53f0\u7ba1\u7406\u5458 Admin \u5728\u5e73\u53f0\u7edf\u4e00\u521b\u5efa\u7528\u6237\u6216\u901a\u8fc7 LDAP/OIDC/OAuth2.0 \u7b49\u8eab\u4efd\u63d0\u4f9b\u5546\u80fd\u529b\u5c06\u7528\u6237\u7edf\u4e00\u5bf9\u63a5\u5230 AI \u7b97\u529b\u4e2d\u5fc3\u3002

                                                  4. \u521b\u5efa\u6587\u4ef6\u5939\u89d2\u8272

                                                    \u5728 Folder/WS \u7684\u9694\u79bb\u6a21\u5f0f\u4e0b\uff0c\u9700\u8981\u5e73\u53f0\u7ba1\u7406\u5458 Admin \u901a\u8fc7 \u6388\u6743 \u9996\u5148\u5c06\u7528\u6237\u9080\u8bf7\u5230\u5404\u4e2a\u5b50\u516c\u53f8\uff0c\u201c\u5b50\u516c\u53f8\u7ba1\u7406\u5458\uff08Folder Admin\uff09\u201d\u624d\u80fd\u591f\u5bf9\u8fd9\u4e9b\u7528\u6237\u8fdb\u884c\u7ba1\u7406\uff0c \u5982\u4e8c\u6b21\u6388\u6743\u6216\u8005\u7f16\u8f91\u6743\u9650\u3002\u5efa\u8bae\u7b80\u5316\u5e73\u53f0\u7ba1\u7406\u5458 Admin \u7684\u7ba1\u7406\u5de5\u4f5c\uff0c\u521b\u5efa\u4e00\u4e2a\u65e0\u5b9e\u9645\u6743\u9650\u7684\u89d2\u8272\u6765\u8f85\u52a9\u5e73\u53f0\u7ba1\u7406\u5458 Admin \u5b9e\u73b0\u901a\u8fc7\u201c\u6388\u6743\u201d\u5c06\u7528\u6237\u9080\u8bf7\u5230\u5b50\u516c\u53f8\u7684\u64cd\u4f5c\u3002 \u800c\u5b50\u516c\u53f8\u7528\u6237\u7684\u5b9e\u9645\u6743\u9650\u4e0b\u653e\u5230\u5404\u4e2a\u5b50\u516c\u53f8\u7ba1\u7406\u5458\uff08Folder Admin\uff09\u81ea\u884c\u7ba1\u7406\u3002

                                                    Note

                                                    \u8d44\u6e90\u7ed1\u5b9a\u6743\u9650\u70b9\u5355\u72ec\u4f7f\u7528\u4e0d\u751f\u6548\uff0c\u56e0\u6b64\u7b26\u5408\u4e0a\u8ff0\u901a\u8fc7\u201c\u6388\u6743\u201d\u5c06\u7528\u6237\u9080\u8bf7\u5230\u5b50\u516c\u53f8\u7684\u64cd\u4f5c\uff0c\u518d\u7531\u5b50\u516c\u53f8\u7ba1\u7406\u5458 Folder Admin \u81ea\u884c\u7ba1\u7406\u7684\u8981\u6c42\u3002

                                                    \u4ee5\u4e0b\u6f14\u793a\u5982\u4f55\u521b\u5efa\u8d44\u6e90\u7ed1\u5b9a \u65e0\u5b9e\u9645\u6743\u9650\u7684\u89d2\u8272 \uff0c\u5373 minirole\u3002

                                                  5. \u7ed9\u7528\u6237\u6388\u6743

                                                    \u5e73\u53f0\u7ba1\u7406\u5458\u901a\u8fc7\u201c\u6388\u6743\u201d\u5c06\u7528\u6237\u6309\u7167\u5b9e\u9645\u60c5\u51b5\u9080\u8bf7\u5230\u5404\u4e2a\u5b50\u516c\u53f8\uff0c\u5e76\u4efb\u547d\u5b50\u516c\u53f8\u7ba1\u7406\u5458\u3002

                                                    \u5c06\u5b50\u516c\u53f8\u666e\u901a\u7528\u6237\u6388\u6743\u4e3a \u201cminirole\u201d (1)\uff0c\u5c06\u5b50\u516c\u53f8\u7ba1\u7406\u5458\u6388\u6743\u4e3a Floder Admin\u3002

                                                    1. \u5373\u7b2c 4 \u6b65\uff08\u4e0a\u4e00\u6b65\uff09\u4e2d\u521b\u5efa\u7684 \u65e0\u5b9e\u9645\u6743\u9650\u7684\u89d2\u8272

                                                  6. \u5b50\u516c\u53f8\u7ba1\u7406\u5458\u81ea\u884c\u7ba1\u7406\u7528\u6237/\u7528\u6237\u7ec4

                                                    \u5b50\u516c\u53f8\u7ba1\u7406\u5458 Folder Admin \u767b\u5f55\u5e73\u53f0\u540e\u53ea\u80fd\u770b\u5230\u81ea\u5df1\u6240\u5728\u7684\u201c\u5b50\u516c\u53f8 2\u201d\uff0c \u5e76\u80fd\u591f\u901a\u8fc7\u521b\u5efa\u6587\u4ef6\u5939\u3001\u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4\u8c03\u6574\u67b6\u6784\uff0c\u901a\u8fc7\u6dfb\u52a0\u6388\u6743/\u7f16\u8f91\u6743\u9650\u4e3a\u5b50\u516c\u53f8 2 \u4e2d\u7684\u7528\u6237\u8d4b\u4e88\u5176\u4ed6\u6743\u9650\u3002

                                                    \u5728\u6dfb\u52a0\u6388\u6743\u65f6\uff0c\u5b50\u516c\u53f8\u7ba1\u7406\u5458 Folder Admin \u53ea\u80fd\u770b\u5230\u88ab\u5e73\u53f0\u7ba1\u7406\u5458\u901a\u8fc7\u201c\u6388\u6743\u201d\u9080\u8bf7\u8fdb\u6765\u7684\u7528\u6237\uff0c\u800c\u4e0d\u80fd\u770b\u5230\u5e73\u53f0\u4e0a\u7684\u6240\u6709\u7528\u6237\uff0c \u4ece\u800c\u5b9e\u73b0 Folder/WS \u4e4b\u95f4\u7684\u7528\u6237\u9694\u79bb\uff0c\u7528\u6237\u7ec4\u540c\u7406\uff08\u5e73\u53f0\u7ba1\u7406\u5458\u89c6\u89d2\u80fd\u591f\u770b\u5230\u5e76\u6388\u6743\u5e73\u53f0\u4e0a\u6240\u6709\u7684\u7528\u6237\u548c\u7528\u6237\u7ec4\uff09\u3002

                                                  Note

                                                  \u8d85\u5927\u578b\u4f01\u4e1a\u4e0e\u5927/\u4e2d/\u5c0f\u578b\u4f01\u4e1a\u7684\u4e3b\u8981\u533a\u522b\u5728\u4e8e Folder \u548c\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7528\u6237/\u7528\u6237\u7ec4\u4e4b\u95f4\u662f\u5426\u53ef\u89c1\u3002 \u8d85\u5927\u578b\u4f01\u4e1a\u91cc\u5b50\u516c\u53f8\u4e0e\u5b50\u516c\u53f8\u4e4b\u95f4\u7528\u6237/\u7528\u6237\u7ec4\u4e0d\u53ef\u89c1 + \u6743\u9650\u9694\u79bb\uff1b \u5927/\u4e2d/\u5c0f\u578b\u4f01\u4e1a\u90e8\u95e8\u4e4b\u95f4\u7684\u7528\u6237\u76f8\u4e92\u53ef\u89c1 + \u6743\u9650\u9694\u79bb\u3002

                                                  "},{"location":"admin/ghippo/best-practice/system-message.html","title":"\u7cfb\u7edf\u6d88\u606f","text":"

                                                  \u7cfb\u7edf\u6d88\u606f\u7528\u4e8e\u901a\u77e5\u6240\u6709\u7528\u6237\uff0c\u7c7b\u4f3c\u4e8e\u7cfb\u7edf\u516c\u544a\uff0c\u4f1a\u5728\u7279\u5b9a\u65f6\u95f4\u663e\u793a\u5728 AI \u7b97\u529b\u4e2d\u5fc3UI \u7684\u9876\u90e8\u680f\u3002

                                                  "},{"location":"admin/ghippo/best-practice/system-message.html#_2","title":"\u914d\u7f6e\u7cfb\u7edf\u6d88\u606f","text":"

                                                  \u901a\u8fc7\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4 apply \u7cfb\u7edf\u6d88\u606f\u7684 YAML \u5373\u53ef\u521b\u5efa\u4e00\u6761\u7cfb\u7edf\u6d88\u606f\uff0c\u6d88\u606f\u7684\u663e\u793a\u65f6\u95f4\u7531 YAML \u4e2d\u7684\u65f6\u95f4\u5b57\u6bb5\u51b3\u5b9a\u3002 \u7cfb\u7edf\u6d88\u606f\u4ec5\u5728 start\u3001end \u5b57\u6bb5\u914d\u7f6e\u7684\u65f6\u95f4\u8303\u56f4\u4e4b\u5185\u624d\u4f1a\u663e\u793a\u3002

                                                  1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\uff0c\u70b9\u51fb\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u3002

                                                  2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u641c\u7d22 ghippoconfig\uff0c\u70b9\u51fb\u641c\u7d22\u51fa\u6765\u7684 ghippoconfigs.ghippo.io

                                                  3. \u70b9\u51fb YAML \u521b\u5efa \uff0c\u6216\u4fee\u6539\u5df2\u5b58\u5728\u7684 YAML

                                                  4. \u6700\u7ec8\u6548\u679c\u5982\u4e0b

                                                  \u4ee5\u4e0b\u662f\u4e00\u4e2a YAML \u793a\u4f8b\uff1a

                                                  apiVersion: ghippo.io/v1alpha1\nkind: GhippoConfig\nmetadata:\n  name: system-message\nspec:\n  message: \"this is a message\"\n  start: 2024-01-02T15:04:05+08:00\n  end: 2024-07-24T17:26:05+08:00\n
                                                  "},{"location":"admin/ghippo/best-practice/ws-best-practice.html","title":"\u5de5\u4f5c\u7a7a\u95f4\u6700\u4f73\u5b9e\u8df5","text":"

                                                  \u5de5\u4f5c\u7a7a\u95f4\u662f\u4e00\u79cd\u8d44\u6e90\u5206\u7ec4\u5355\u5143\uff0c\u5927\u591a\u6570\u8d44\u6e90\u90fd\u53ef\u4ee5\u5728\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u521b\u5efa\u6216\u624b\u52a8\u7ed1\u5b9a\u5230\u67d0\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u3002 \u800c\u5de5\u4f5c\u7a7a\u95f4\u901a\u8fc7\u6388\u6743\u548c\u8d44\u6e90\u7ed1\u5b9a\uff0c\u80fd\u591f\u5b9e\u73b0\u7528\u6237\u4e0e\u89d2\u8272\u7684\u7ed1\u5b9a\u5173\u7cfb\uff0c\u5e76\u4e00\u6b21\u6027\u5e94\u7528\u5230\u5de5\u4f5c\u7a7a\u95f4\u7684\u6240\u6709\u8d44\u6e90\u4e0a\u3002

                                                  \u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\uff0c\u53ef\u4ee5\u8f7b\u677e\u7ba1\u7406\u56e2\u961f\u4e0e\u8d44\u6e90\uff0c\u89e3\u51b3\u8de8\u6a21\u5757\u3001\u8de8\u96c6\u7fa4\u7684\u8d44\u6e90\u6388\u6743\u95ee\u9898\u3002

                                                  "},{"location":"admin/ghippo/best-practice/ws-best-practice.html#_2","title":"\u5de5\u4f5c\u7a7a\u95f4\u7684\u529f\u80fd","text":"

                                                  \u5de5\u4f5c\u7a7a\u95f4\u5305\u542b\u4e09\u4e2a\u529f\u80fd\uff1a\u6388\u6743\u3001\u8d44\u6e90\u7ec4\u548c\u5171\u4eab\u8d44\u6e90\u3002\u4e3b\u8981\u89e3\u51b3\u8d44\u6e90\u7edf\u4e00\u6388\u6743\u3001\u8d44\u6e90\u5206\u7ec4\u53ca\u8d44\u6e90\u914d\u989d\u95ee\u9898\u3002

                                                  1. \u6388\u6743\uff1a\u4e3a\u7528\u6237/\u7528\u6237\u7ec4\u6388\u4e88\u8be5\u5de5\u4f5c\u7a7a\u95f4\u7684\u4e0d\u540c\u89d2\u8272\uff0c\u5e76\u5c06\u89d2\u8272\u5e94\u7528\u5230\u5de5\u4f5c\u7a7a\u95f4\u7684\u8d44\u6e90\u4e0a\u3002

                                                    \u6700\u4f73\u5b9e\u8df5\uff1a\u666e\u901a\u7528\u6237\u60f3\u8981\u4f7f\u7528\u5e94\u7528\u5de5\u4f5c\u53f0\u3001\u5fae\u670d\u52a1\u5f15\u64ce\u3001\u670d\u52a1\u7f51\u683c\u3001\u4e2d\u95f4\u4ef6\u6a21\u5757\u529f\u80fd\uff0c\u6216\u8005\u9700\u8981\u62e5\u6709\u5bb9\u5668\u7ba1\u7406\u3001\u670d\u52a1\u7f51\u683c\u4e2d\u90e8\u5206\u8d44\u6e90\u7684\u4f7f\u7528\u6743\u9650\u65f6\uff0c\u9700\u8981\u7ba1\u7406\u5458\u6388\u4e88\u8be5\u5de5\u4f5c\u7a7a\u95f4\u7684\u4f7f\u7528\u6743\u9650\uff08Workspace Admin\u3001Workspace Edit\u3001Workspace View\uff09\u3002 \u8fd9\u91cc\u7684\u7ba1\u7406\u5458\u53ef\u4ee5\u662f Admin \u89d2\u8272\u3001\u8be5\u5de5\u4f5c\u7a7a\u95f4\u7684 Workspace Admin \u89d2\u8272\u6216\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0a\u5c42\u7684 Folder Admin \u89d2\u8272\u3002 \u67e5\u770b Folder \u4e0e Workspace \u7684\u5173\u7cfb\u3002

                                                  2. \u8d44\u6e90\u7ec4\uff1a\u8d44\u6e90\u7ec4\u652f\u6301 Cluster\u3001Cluster-Namespace (\u8de8\u96c6\u7fa4)\u3001Mesh\u3001Mesh-Namespace\u3001Kairship\u3001Kairship-Namespace \u516d\u79cd\u8d44\u6e90\u7c7b\u578b\u3002 \u4e00\u4e2a\u8d44\u6e90\u53ea\u80fd\u7ed1\u5b9a\u4e00\u4e2a\u8d44\u6e90\u7ec4\uff0c\u8d44\u6e90\u88ab\u7ed1\u5b9a\u5230\u8d44\u6e90\u7ec4\u540e\uff0c\u5de5\u4f5c\u7a7a\u95f4\u7684\u6240\u6709\u8005\u5c06\u62e5\u6709\u8be5\u8d44\u6e90\u7684\u6240\u6709\u7ba1\u7406\u6743\u9650\uff0c\u76f8\u5f53\u4e8e\u8be5\u8d44\u6e90\u7684\u6240\u6709\u8005\uff0c\u56e0\u6b64\u4e0d\u53d7\u8d44\u6e90\u914d\u989d\u7684\u9650\u5236\u3002

                                                    \u6700\u4f73\u5b9e\u8df5\uff1a\u5de5\u4f5c\u7a7a\u95f4\u901a\u8fc7\u201c\u6388\u6743\u201d\u529f\u80fd\u53ef\u4ee5\u7ed9\u90e8\u95e8\u6210\u5458\u6388\u4e88\u4e0d\u540c\u89d2\u8272\u6743\u9650\uff0c\u800c\u5de5\u4f5c\u7a7a\u95f4\u80fd\u591f\u628a\u4eba\u4e0e\u89d2\u8272\u7684\u6388\u6743\u5173\u7cfb\u4e00\u6b21\u6027\u5e94\u7528\u5230\u5de5\u4f5c\u7a7a\u95f4\u7684\u6240\u6709\u8d44\u6e90\u4e0a\u3002\u56e0\u6b64\u8fd0\u7ef4\u4eba\u5458\u53ea\u9700\u5c06\u8d44\u6e90\u7ed1\u5b9a\u5230\u8d44\u6e90\u7ec4\uff0c\u5c06\u90e8\u95e8\u4e2d\u7684\u4e0d\u540c\u89d2\u8272\u52a0\u5165\u4e0d\u540c\u7684\u8d44\u6e90\u7ec4\uff0c\u5c31\u80fd\u786e\u4fdd\u8d44\u6e90\u7684\u6743\u9650\u88ab\u6b63\u786e\u5206\u914d\u3002

                                                    \u89d2\u8272 \u96c6\u7fa4 Cluster \u8de8\u96c6\u7fa4 Cluster-Namespace Workspace Admin Cluster Admin NS Admin Workspace Edit \u2717 NS Editor Workspace View \u2717 NS Viewer
                                                  3. \u5171\u4eab\u8d44\u6e90\uff1a\u5171\u4eab\u8d44\u6e90\u529f\u80fd\u4e3b\u8981\u9488\u5bf9\u96c6\u7fa4\u8d44\u6e90\u3002

                                                    \u4e00\u4e2a\u96c6\u7fa4\u53ef\u4ee5\u5171\u4eab\u7ed9\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff08\u6307\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u5171\u4eab\u8d44\u6e90\u529f\u80fd\uff09\uff1b\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u4e5f\u53ef\u4ee5\u540c\u65f6\u4f7f\u7528\u591a\u4e2a\u96c6\u7fa4\u7684\u8d44\u6e90\u3002 \u4f46\u662f\u96c6\u7fa4\u8d44\u6e90\u88ab\u5171\u4eab\u540e\uff0c\u4e0d\u610f\u5473\u7740\u88ab\u5171\u4eab\u8005\uff08\u5de5\u4f5c\u7a7a\u95f4\uff09\u53ef\u4ee5\u65e0\u9650\u5236\u5730\u4f7f\u7528\uff0c\u56e0\u6b64\u901a\u5e38\u4f1a\u9650\u5236\u88ab\u5171\u4eab\u8005\uff08\u5de5\u4f5c\u7a7a\u95f4\uff09\u80fd\u591f\u4f7f\u7528\u7684\u8d44\u6e90\u9650\u989d\u3002

                                                    \u540c\u65f6\uff0c\u4e0e\u8d44\u6e90\u7ec4\u4e0d\u540c\uff0c\u5de5\u4f5c\u7a7a\u95f4\u6210\u5458\u53ea\u662f\u5171\u4eab\u8d44\u6e90\u7684\u4f7f\u7528\u8005\uff0c\u80fd\u591f\u5728\u8d44\u6e90\u9650\u989d\u4e0b\u4f7f\u7528\u96c6\u7fa4\u4e2d\u7684\u8d44\u6e90\u3002\u6bd4\u5982\u524d\u5f80\u5e94\u7528\u5de5\u4f5c\u53f0\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u3001\u90e8\u7f72\u5e94\u7528\u7b49\uff0c\u4f46\u5e76\u4e0d\u5177\u5907\u96c6\u7fa4\u7684\u7ba1\u7406\u6743\u9650\uff0c\u9650\u5236\u540e\u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u521b\u5efa/\u7ed1\u5b9a\u7684\u547d\u540d\u7a7a\u95f4\u7684\u8d44\u6e90\u914d\u989d\u603b\u548c\u4e0d\u80fd\u8d85\u8fc7\u96c6\u7fa4\u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u8bbe\u7f6e\u7684\u8d44\u6e90\u4f7f\u7528\u4e0a\u9650\u3002

                                                    \u6700\u4f73\u5b9e\u8df5\uff1a\u8fd0\u7ef4\u90e8\u95e8\u624b\u4e2d\u6709\u4e00\u4e2a\u9ad8\u53ef\u7528\u96c6\u7fa4 01\uff0c\u60f3\u8981\u5206\u914d\u7ed9\u90e8\u95e8 A\uff08\u5de5\u4f5c\u7a7a\u95f4 A\uff09\u548c\u90e8\u95e8 B\uff08\u5de5\u4f5c\u7a7a\u95f4 B\uff09\u4f7f\u7528\uff0c\u5176\u4e2d\u90e8\u95e8 A \u5206\u914d CPU 50 \u6838\uff0c\u90e8\u95e8 B \u5206\u914d CPU 100 \u6838\u3002 \u90a3\u4e48\u53ef\u4ee5\u501f\u7528\u5171\u4eab\u8d44\u6e90\u7684\u6982\u5ff5\uff0c\u5c06\u96c6\u7fa4 01 \u5206\u522b\u5171\u4eab\u7ed9\u90e8\u95e8 A \u548c\u90e8\u95e8 B\uff0c\u5e76\u9650\u5236\u90e8\u95e8 A \u7684 CPU \u4f7f\u7528\u989d\u5ea6\u4e3a 50\uff0c\u90e8\u95e8 B \u7684 CPU \u4f7f\u7528\u989d\u5ea6\u4e3a 100\u3002 \u90a3\u4e48\u90e8\u95e8 A \u7684\u7ba1\u7406\u5458\uff08\u5de5\u4f5c\u7a7a\u95f4 A Admin\uff09\u80fd\u591f\u5728\u5e94\u7528\u5de5\u4f5c\u53f0\u521b\u5efa\u5e76\u4f7f\u7528\u547d\u540d\u7a7a\u95f4\uff0c\u5176\u4e2d\u547d\u540d\u7a7a\u95f4\u989d\u5ea6\u603b\u548c\u4e0d\u80fd\u8d85\u8fc7 50 \u6838\uff0c\u90e8\u95e8 B \u7684\u7ba1\u7406\u5458\uff08\u5de5\u4f5c\u7a7a\u95f4 B Admin\uff09\u80fd\u591f\u5728\u5e94\u7528\u5de5\u4f5c\u53f0\u521b\u5efa\u5e76\u4f7f\u7528\u547d\u540d\u7a7a\u95f4\uff0c\u5176\u4e2d\u547d\u540d\u7a7a\u95f4\u989d\u5ea6\u603b\u548c\u4e0d\u80fd\u8d85\u8fc7 100 \u6838\u3002 \u90e8\u95e8 A \u7684\u7ba1\u7406\u5458\u548c\u90e8\u95e8 B \u7ba1\u7406\u5458\u521b\u5efa\u7684\u547d\u540d\u7a7a\u95f4\u4f1a\u88ab\u81ea\u52a8\u7ed1\u5b9a\u5728\u8be5\u90e8\u95e8\uff0c\u90e8\u95e8\u4e2d\u7684\u5176\u4ed6\u6210\u5458\u5c06\u5bf9\u5e94\u7684\u62e5\u6709\u547d\u540d\u7a7a\u95f4\u7684 Namesapce Admin\u3001Namesapce Edit\u3001Namesapce View \u89d2\u8272\uff08\u8fd9\u91cc\u90e8\u95e8\u6307\u7684\u662f\u5de5\u4f5c\u7a7a\u95f4\uff0c\u5de5\u4f5c\u7a7a\u95f4\u8fd8\u53ef\u4ee5\u6620\u5c04\u4e3a\u7ec4\u7ec7\u3001\u4f9b\u5e94\u5546\u7b49\u5176\u4ed6\u6982\u5ff5\uff09\u3002\u6574\u4e2a\u8fc7\u7a0b\u5982\u4e0b\u8868\uff1a

                                                    \u90e8\u95e8 \u89d2\u8272 \u5171\u4eab\u96c6\u7fa4 Cluster \u8d44\u6e90\u914d\u989d \u90e8\u95e8\u7ba1\u7406\u5458 A Workspace Admin \u96c6\u7fa4 01 CPU 50 \u6838 \u90e8\u95e8\u7ba1\u7406\u5458 B Workspace Admin \u96c6\u7fa4 01 CPU 100 \u6838
                                                  "},{"location":"admin/ghippo/best-practice/ws-best-practice.html#ai","title":"\u5de5\u4f5c\u7a7a\u95f4\u5bf9 AI \u7b97\u529b\u4e2d\u5fc3\u5404\u6a21\u5757\u7684\u4f5c\u7528","text":"

                                                  \u6a21\u5757\u540d\u79f0\uff1a\u5bb9\u5668\u7ba1\u7406

                                                  \u7531\u4e8e\u529f\u80fd\u6a21\u5757\u7684\u7279\u6b8a\u6027\uff0c\u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u521b\u5efa\u7684\u8d44\u6e90\u4e0d\u4f1a\u81ea\u52a8\u88ab\u7ed1\u5b9a\u5230\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u3002

                                                  \u5982\u679c\u60a8\u9700\u8981\u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u5bf9\u4eba\u548c\u8d44\u6e90\u8fdb\u884c\u7edf\u4e00\u6388\u6743\u7ba1\u7406\uff0c\u53ef\u4ee5\u624b\u52a8\u5c06\u9700\u8981\u7684\u8d44\u6e90\u7ed1\u5b9a\u5230\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u4e2d\uff0c\u4ece\u800c\u5c06\u7528\u6237\u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u7684\u89d2\u8272\u5e94\u7528\u5230\u8d44\u6e90\u4e0a\uff08\u8fd9\u91cc\u7684\u8d44\u6e90\u662f\u53ef\u4ee5\u8de8\u96c6\u7fa4\u7684\uff09\u3002

                                                  \u53e6\u5916\uff0c\u5728\u8d44\u6e90\u7684\u7ed1\u5b9a\u5165\u53e3\u4e0a\u5bb9\u5668\u7ba1\u7406\u4e0e\u670d\u52a1\u7f51\u683c\u7a0d\u6709\u5dee\u5f02\uff0c\u5de5\u4f5c\u7a7a\u95f4\u63d0\u4f9b\u4e86\u5bb9\u5668\u7ba1\u7406\u4e2d\u7684 Cluster \u3001 Cluster-Namesapce \u548c\u670d\u52a1\u7f51\u683c\u4e2d\u7684 Mesh\u3001Mesh-Namespace \u8d44\u6e90\u7684\u7ed1\u5b9a\u5165\u53e3\uff0c\u4f46\u5c1a\u672a\u5f00\u653e\u5bf9\u670d\u52a1\u7f51\u683c\u7684 kairship \u548c Kairship-Namespace \u8d44\u6e90\u7684\u7ed1\u5b9a\u3002

                                                  \u5bf9\u4e8e kairship \u548c Kairship-Namespace \u8d44\u6e90\uff0c\u53ef\u4ee5\u5728\u670d\u52a1\u7f51\u683c\u7684\u8d44\u6e90\u5217\u8868\u8fdb\u884c\u624b\u52a8\u7ed1\u5b9a\u3002

                                                  "},{"location":"admin/ghippo/best-practice/ws-best-practice.html#_3","title":"\u5de5\u4f5c\u7a7a\u95f4\u7684\u4f7f\u7528\u573a\u666f","text":"
                                                  • \u6620\u5c04\u4e3a\u4e0d\u540c\u7684\u90e8\u95e8\u3001\u9879\u76ee\u3001\u7ec4\u7ec7\u7b49\u6982\u5ff5\uff0c\u540c\u65f6\u53ef\u4ee5\u5c06\u5de5\u4f5c\u7a7a\u95f4\u4e2d Workspace Admin\u3001Workspace Edit \u548c Workspace View \u89d2\u8272\u5bf9\u5e94\u5230\u90e8\u95e8\u3001\u9879\u76ee\u3001\u7ec4\u7ec7\u4e2d\u7684\u4e0d\u540c\u89d2\u8272
                                                  • \u5c06\u4e0d\u540c\u7528\u9014\u7684\u8d44\u6e90\u52a0\u5165\u4e0d\u540c\u7684\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u5206\u5f00\u7ba1\u7406\u548c\u4f7f\u7528
                                                  • \u4e3a\u4e0d\u540c\u5de5\u4f5c\u7a7a\u95f4\u8bbe\u7f6e\u5b8c\u5168\u72ec\u7acb\u7684\u7ba1\u7406\u5458\uff0c\u5b9e\u73b0\u5de5\u4f5c\u7a7a\u95f4\u8303\u56f4\u5185\u7684\u7528\u6237\u4e0e\u6743\u9650\u7ba1\u7406
                                                  • \u5c06\u8d44\u6e90\u5171\u4eab\u7ed9\u4e0d\u540c\u7684\u5de5\u4f5c\u7a7a\u95f4\u4f7f\u7528\uff0c\u5e76\u9650\u5236\u5de5\u4f5c\u7a7a\u95f4\u80fd\u591f\u4f7f\u7528\u7684\u8d44\u6e90\u989d\u5ea6\u4e0a\u9650
                                                  "},{"location":"admin/ghippo/best-practice/ws-to-ns.html","title":"\u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09\u7ed1\u5b9a\u8de8\u96c6\u7fa4\u7684\u547d\u540d\u7a7a\u95f4","text":"

                                                  \u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09\u4e0b\u7ed1\u5b9a\u6765\u81ea\u4e0d\u540c\u96c6\u7fa4\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u80fd\u591f\u4f7f\u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09\u7075\u6d3b\u7eb3\u7ba1\u5e73\u53f0\u4e0a\u4efb\u610f\u96c6\u7fa4\u4e0b\u7684 Kubernetes Namespace\u3002 \u540c\u65f6\u5e73\u53f0\u63d0\u4f9b\u4e86\u6743\u9650\u6620\u5c04\u80fd\u529b\uff0c\u80fd\u591f\u5c06\u7528\u6237\u5728\u5de5\u4f5c\u7a7a\u95f4\u7684\u6743\u9650\u6620\u5c04\u5230\u7ed1\u5b9a\u7684\u547d\u540d\u7a7a\u95f4\u8eab\u4e0a\u3002

                                                  \u5f53\u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09\u4e0b\u7ed1\u5b9a\u4e00\u4e2a\u6216\u591a\u4e2a\u8de8\u96c6\u7fa4\u7684\u547d\u540d\u7a7a\u95f4\u65f6\uff0c\u7ba1\u7406\u5458\u65e0\u9700\u518d\u6b21\u7ed9\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u6210\u5458\u6388\u6743\uff0c \u6210\u5458\u4eec\u5728\u5de5\u4f5c\u7a7a\u95f4\u4e0a\u7684\u89d2\u8272\u5c06\u6839\u636e\u4ee5\u4e0b\u6620\u5c04\u5173\u7cfb\u81ea\u52a8\u6620\u5c04\u5b8c\u6210\u6388\u6743\uff0c\u907f\u514d\u4e86\u591a\u6b21\u6388\u6743\u7684\u91cd\u590d\u6027\u64cd\u4f5c\uff1a

                                                  • Workspace Admin \u5bf9\u5e94 Namespace Admin
                                                  • Workspace Editor \u5bf9\u5e94 Namespace Editor
                                                  • Workspace Viewer \u5bf9\u5e94 Namespace Viewer

                                                  \u4ee5\u4e0b\u662f\u4e00\u4e2a\u4f8b\u5b50\uff1a

                                                  \u7528\u6237 \u5de5\u4f5c\u7a7a\u95f4 \u89d2\u8272 \u7528\u6237 A Workspace01 Workspace Admin

                                                  \u5c06\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4\u540e\uff1a

                                                  \u7528\u6237 \u6240\u5c5e\u8303\u7574 \u89d2\u8272 \u7528\u6237 A Workspace01 Workspace Admin Namespace01 Namespace Admin"},{"location":"admin/ghippo/best-practice/ws-to-ns.html#_2","title":"\u5b9e\u73b0\u65b9\u6848","text":"

                                                  \u5c06\u6765\u81ea\u4e0d\u540c\u96c6\u7fa4\u7684\u4e0d\u540c\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u540c\u4e00\u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09\uff0c\u7ed9\u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09\u4e0b\u7684\u6210\u5458\u4f7f\u7528\u6d41\u7a0b\u5982\u56fe\u3002

                                                  graph TB\n\npreparews[\u51c6\u5907\u5de5\u4f5c\u7a7a\u95f4] --> preparens[\u51c6\u5907\u547d\u540d\u7a7a\u95f4]\n--> judge([\u547d\u540d\u7a7a\u95f4\u662f\u5426\u4e0e\u7ed1\u5b9a\u5230\u5176\u4ed6\u5de5\u4f5c\u7a7a\u95f4])\njudge -.\u672a\u7ed1\u5b9a.->nstows[\u5c06\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4] --> wsperm[\u7ba1\u7406\u5de5\u4f5c\u7a7a\u95f4\u8bbf\u95ee\u6743\u9650]\njudge -.\u5df2\u7ed1\u5b9a.->createns[\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill:#326ce5,stroke:#fff,stroke-width:1px,color:#fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass preparews,preparens,createns,nstows,wsperm cluster;\nclass judge plain\n\nclick preparews \"https://docs.daocloud.io/ghippo/user-guide/workspace/ws-to-ns/#_3\"\nclick preparens \"https://docs.daocloud.io/ghippo/user-guide/workspace/ws-to-ns/#_4\"\nclick nstows \"https://docs.daocloud.io/ghippo/user-guide/workspace/ws-to-ns/#_5\"\nclick wsperm \"https://docs.daocloud.io/ghippo/user-guide/workspace/ws-to-ns/#_6\"\nclick createns \"https://docs.daocloud.io/ghippo/user-guide/workspace/ws-to-ns/#_4\"

                                                  Tip

                                                  \u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u53ea\u80fd\u88ab\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u3002

                                                  "},{"location":"admin/ghippo/best-practice/ws-to-ns.html#_3","title":"\u51c6\u5907\u5de5\u4f5c\u7a7a\u95f4","text":"

                                                  \u5de5\u4f5c\u7a7a\u95f4\u662f\u4e3a\u4e86\u6ee1\u8db3\u591a\u79df\u6237\u7684\u4f7f\u7528\u573a\u666f\uff0c\u57fa\u4e8e\u96c6\u7fa4\u3001\u96c6\u7fa4\u547d\u540d\u7a7a\u95f4\u3001\u7f51\u683c\u3001\u7f51\u683c\u547d\u540d\u7a7a\u95f4\u3001\u591a\u4e91\u3001\u591a\u4e91\u547d\u540d\u7a7a\u95f4\u7b49\u591a\u79cd\u8d44\u6e90\u5f62\u6210\u76f8\u4e92\u9694\u79bb\u7684\u8d44\u6e90\u73af\u5883\u3002 \u5de5\u4f5c\u7a7a\u95f4\u53ef\u4ee5\u6620\u5c04\u4e3a\u9879\u76ee\u3001\u79df\u6237\u3001\u4f01\u4e1a\u3001\u4f9b\u5e94\u5546\u7b49\u591a\u79cd\u6982\u5ff5\u3002

                                                  1. \u4f7f\u7528 admin/folder admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 \u3002

                                                  2. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4 \u6309\u94ae\u3002

                                                  3. \u586b\u5199\u5de5\u4f5c\u7a7a\u95f4\u540d\u79f0\u3001\u6240\u5c5e\u6587\u4ef6\u5939\u7b49\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4\u3002

                                                  \u63d0\u793a\uff1a\u82e5\u5e73\u53f0\u4e2d\u5df2\u5b58\u5728\u521b\u5efa\u597d\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u70b9\u51fb\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff0c\u5728 \u8d44\u6e90\u7ec4 \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb \u7ed1\u5b9a\u8d44\u6e90 \uff0c\u53ef\u4ee5\u76f4\u63a5\u7ed1\u5b9a\u547d\u540d\u7a7a\u95f4\u3002

                                                  "},{"location":"admin/ghippo/best-practice/ws-to-ns.html#_4","title":"\u51c6\u5907\u547d\u540d\u7a7a\u95f4","text":"

                                                  \u547d\u540d\u7a7a\u95f4\u662f\u66f4\u5c0f\u7684\u8d44\u6e90\u9694\u79bb\u5355\u5143\uff0c\u5c06\u5176\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4\u540e\uff0c\u5de5\u4f5c\u7a7a\u95f4\u7684\u6210\u5458\u5c31\u53ef\u4ee5\u8fdb\u884c\u7ba1\u7406\u548c\u4f7f\u7528\u3002

                                                  \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u51c6\u5907\u4e00\u4e2a\u8fd8\u672a\u7ed1\u5b9a\u5230\u4efb\u4f55\u5de5\u4f5c\u7a7a\u95f4\u7684\u547d\u540d\u7a7a\u95f4\u3002

                                                  1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5bb9\u5668\u7ba1\u7406 \u3002

                                                  2. \u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                  3. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u547d\u540d\u7a7a\u95f4 \uff0c\u8fdb\u5165\u547d\u540d\u7a7a\u95f4\u7ba1\u7406\u9875\u9762\uff0c\u70b9\u51fb\u9875\u9762\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae\u3002

                                                  4. \u586b\u5199\u547d\u540d\u7a7a\u95f4\u7684\u540d\u79f0\uff0c\u914d\u7f6e\u5de5\u4f5c\u7a7a\u95f4\u548c\u6807\u7b7e\uff08\u53ef\u9009\u8bbe\u7f6e\uff09\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                    Info

                                                    \u5de5\u4f5c\u7a7a\u95f4\u4e3b\u8981\u7528\u4e8e\u5212\u5206\u8d44\u6e90\u7ec4\u5e76\u4e3a\u7528\u6237\uff08\u7528\u6237\u7ec4\uff09\u6388\u4e88\u5bf9\u8be5\u8d44\u6e90\u7684\u4e0d\u540c\u8bbf\u95ee\u6743\u9650\u3002\u6709\u5173\u5de5\u4f5c\u7a7a\u95f4\u7684\u8be6\u7ec6\u8bf4\u660e\uff0c\u53ef\u53c2\u8003\u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7\u3002

                                                  5. \u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3002\u5728\u547d\u540d\u7a7a\u95f4\u5217\u8868\u53f3\u4fa7\uff0c\u70b9\u51fb \u2507 \uff0c\u53ef\u4ee5\u4ece\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4 \u3002

                                                  "},{"location":"admin/ghippo/best-practice/ws-to-ns.html#_5","title":"\u5c06\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4","text":"

                                                  \u9664\u4e86\u5728\u547d\u540d\u7a7a\u95f4\u5217\u8868\u4e2d\u7ed1\u5b9a\u5916\uff0c\u4e5f\u53ef\u4ee5\u8fd4\u56de \u5168\u5c40\u7ba1\u7406 \uff0c\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4\u3002

                                                  1. \u4f9d\u6b21\u70b9\u51fb \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 -> \u8d44\u6e90\u7ec4 \uff0c\u70b9\u51fb\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u540d\u79f0\u540e\uff0c\u70b9\u51fb \u7ed1\u5b9a\u8d44\u6e90 \u6309\u94ae\u3002

                                                  2. \u9009\u4e2d\u8981\u7ed1\u5b9a\u7684\u5de5\u4f5c\u7a7a\u95f4\uff08\u53ef\u591a\u9009\uff09\uff0c\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u7ed1\u5b9a\u3002

                                                  "},{"location":"admin/ghippo/best-practice/ws-to-ns.html#_6","title":"\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u6dfb\u52a0\u6210\u5458\u5e76\u6388\u6743","text":"
                                                  1. \u5728 \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 -> \u6388\u6743 \u4e2d\uff0c\u70b9\u51fb\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u540d\u79f0\u540e\uff0c\u70b9\u51fb \u6dfb\u52a0\u6388\u6743 \u6309\u94ae\u3002

                                                  2. \u9009\u62e9\u8981\u6388\u6743\u7684 \u7528\u6237/\u7528\u6237\u7ec4 \u3001 \u89d2\u8272 \u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u6388\u6743\u3002

                                                  "},{"location":"admin/ghippo/best-practice/gproduct/intro.html","title":"GProduct \u5982\u4f55\u5bf9\u63a5\u5168\u5c40\u7ba1\u7406","text":"

                                                  GProduct \u662f AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u9664\u5168\u5c40\u7ba1\u7406\u5916\u7684\u6240\u6709\u5176\u4ed6\u6a21\u5757\u7684\u7edf\u79f0\uff0c\u8fd9\u4e9b\u6a21\u5757\u9700\u8981\u4e0e\u5168\u5c40\u7ba1\u7406\u5bf9\u63a5\u540e\u624d\u80fd\u52a0\u5165\u5230 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u3002

                                                  "},{"location":"admin/ghippo/best-practice/gproduct/intro.html#_1","title":"\u5bf9\u63a5\u4ec0\u4e48","text":"
                                                  • \u5bf9\u63a5\u5bfc\u822a\u680f

                                                    \u5165\u53e3\u7edf\u4e00\u653e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u3002

                                                  • \u63a5\u5165\u8def\u7531\u548c AuthN

                                                    \u7edf\u4e00 IP \u6216\u57df\u540d\uff0c\u5c06\u8def\u7531\u5165\u53e3\u7edf\u4e00\u8d70\u5168\u5c40\u7ba1\u7406\u7684 Istio Gateway\u3002

                                                  • \u7edf\u4e00\u767b\u5f55 / \u7edf\u4e00 AuthN \u8ba4\u8bc1

                                                    \u767b\u5f55\u7edf\u4e00\u4f7f\u7528\u5168\u5c40\u7ba1\u7406 (Keycloak) \u767b\u5f55\u9875\uff0cAPI authn token \u9a8c\u8bc1\u4f7f\u7528 Istio Gateway\u3002 GProduct \u5bf9\u63a5\u5168\u5c40\u7ba1\u7406\u540e\u4e0d\u9700\u8981\u5173\u6ce8\u5982\u4f55\u5b9e\u73b0\u767b\u5f55\u548c\u8ba4\u8bc1\u3002

                                                  "},{"location":"admin/ghippo/best-practice/gproduct/intro.html#pdf","title":"\u89c6\u9891\u6f14\u793a\u548c PDF","text":"

                                                  \u5c06 AI \u7b97\u529b\u4e2d\u5fc3\u96c6\u6210\u5230\u5ba2\u6237\u7cfb\u7edf\uff08OEM OUT\uff09\uff0c\u53c2\u9605 OEM OUT \u6587\u6863\u3002

                                                  \u5c06\u5ba2\u6237\u7cfb\u7edf\u96c6\u6210\u5230 AI \u7b97\u529b\u4e2d\u5fc3\uff08OEM IN\uff09\uff0c\u53c2\u9605 OEM IN \u6587\u6863\u3002

                                                  "},{"location":"admin/ghippo/best-practice/gproduct/nav.html","title":"\u5bf9\u63a5\u5bfc\u822a\u680f","text":"

                                                  \u4ee5\u5bb9\u5668\u7ba1\u7406\uff08\u5f00\u53d1\u4ee3\u53f7 kpanda \uff09\u4e3a\u4f8b\uff0c\u5bf9\u63a5\u5230\u5bfc\u822a\u680f\u3002

                                                  \u5bf9\u63a5\u540e\u7684\u9884\u671f\u6548\u679c\u5982\u56fe\uff1a

                                                  "},{"location":"admin/ghippo/best-practice/gproduct/nav.html#_2","title":"\u5bf9\u63a5\u65b9\u6cd5","text":"

                                                  \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u5bf9\u63a5 GProduct\uff1a

                                                  1. \u901a\u8fc7 GProductNavigator CR \u5c06\u5bb9\u5668\u7ba1\u7406\u7684\u5404\u529f\u80fd\u9879\u6ce8\u518c\u5230\u5bfc\u822a\u680f\u83dc\u5355\u3002

                                                    apiVersion: ghippo.io/v1alpha1\nkind: GProductNavigator\nmetadata:\n  name: kpanda\nspec:\n  gproduct: kpanda\n  name: \u5bb9\u5668\u7ba1\u7406\n  localizedName:\n    zh-CN: \u5bb9\u5668\u7ba1\u7406\n    en-US: Container Management\n  url: /kpanda\n  category: \u5bb9\u5668  # (1)\n  iconUrl: /kpanda/nav-icon.png\n  order: 10 # (2)\n  menus:\n  - name: \u5907\u4efd\u7ba1\u7406\n    localizedName:\n      zh-CN: \u5907\u4efd\u7ba1\u7406\n      en-US: Backup Management\n    iconUrl: /kpanda/bkup-icon.png\n    url: /kpanda/backup\n
                                                    1. \u5f53\u524d\u53ea\u652f\u6301\u6982\u89c8\u3001\u5de5\u4f5c\u53f0\u3001\u5bb9\u5668\u3001\u5fae\u670d\u52a1\u3001\u6570\u636e\u670d\u52a1\u3001\u7ba1\u7406\uff0c\u516d\u9009\u4e00
                                                    2. \u6570\u5b57\u8d8a\u5927\u6392\u5728\u8d8a\u4e0a\u9762

                                                    \u5168\u5c40\u7ba1\u7406\u7684\u5bfc\u822a\u680f category \u914d\u7f6e\u5728 ConfigMap\uff0c\u6682\u65f6\u4e0d\u80fd\u4ee5\u6ce8\u518c\u65b9\u5f0f\u589e\u52a0\uff0c\u9700\u8981\u8054\u7cfb\u5168\u5c40\u7ba1\u7406\u56e2\u961f\u6765\u6dfb\u52a0\u3002

                                                  2. kpanda \u524d\u7aef\u4f5c\u4e3a\u5fae\u524d\u7aef\u63a5\u5165\u5230 AI \u7b97\u529b\u4e2d\u5fc3\u7236\u5e94\u7528 Anakin \u4e2d

                                                    \u524d\u7aef\u4f7f\u7528 qiankun \u6765\u63a5\u5165\u5b50\u5e94\u7528 UI\uff0c \u53ef\u4ee5\u53c2\u8003\u5feb\u901f\u4e0a\u624b\u3002

                                                    \u5728\u6ce8\u518c GProductNavigator CR \u540e\uff0c\u63a5\u53e3\u4f1a\u751f\u6210\u5bf9\u5e94\u7684\u6ce8\u518c\u4fe1\u606f\uff0c\u4f9b\u524d\u7aef\u7236\u5e94\u7528\u6ce8\u518c\u4f7f\u7528\u3002 \u4f8b\u5982 kpanda \u5c31\u4f1a\u751f\u6210\u4ee5\u4e0b\u6ce8\u518c\u4fe1\u606f\uff1a

                                                    {\n  \"id\": \"kpanda\",\n  \"title\": \"\u5bb9\u5668\u7ba1\u7406\",\n  \"url\": \"/kpanda\",\n  \"uiAssetsUrl\": \"/ui/kpanda/\", // \u7ed3\u5c3e\u7684/\u662f\u5fc5\u987b\u7684\n  \"needImportLicense\": false\n},\n

                                                    \u4ee5\u4e0a\u6ce8\u518c\u4fe1\u606f\u4e0e qiankun \u5b50\u5e94\u7528\u4fe1\u606f\u5b57\u6bb5\u7684\u5bf9\u5e94\u5173\u7cfb\u662f\uff1a

                                                    {\n    name: id,\n    entry: uiAssetsUrl,\n    container: '#container',\n    activeRule: url, \n    loader,\n    props: globalProps,\n}\n

                                                    container \u548c loader \u7531\u524d\u7aef\u7236\u5e94\u7528\u63d0\u4f9b\uff0c\u5b50\u5e94\u7528\u65e0\u9700\u5173\u5fc3\u3002 props \u4f1a\u63d0\u4f9b\u4e00\u4e2a\u5305\u542b\u7528\u6237\u57fa\u672c\u4fe1\u606f\u3001\u5b50\u4ea7\u54c1\u6ce8\u518c\u4fe1\u606f\u7b49\u7684 pinia store\u3002

                                                    qiankun \u542f\u52a8\u65f6\u4f1a\u4f7f\u7528\u5982\u4e0b\u53c2\u6570\uff1a

                                                    start({\n  sandbox: {\n    experimentalStyleIsolation: true,\n  },\n  // \u53bb\u9664\u5b50\u5e94\u7528\u4e2d\u7684favicon\u9632\u6b62\u5728Firefox\u4e2d\u8986\u76d6\u7236\u5e94\u7528\u7684favicon\n  getTemplate: (template) => template.replaceAll(/<link\\s* rel=\"[\\w\\s]*icon[\\w\\s]*\"\\s*( href=\".*?\")?\\s*\\/?>/g, ''),\n});\n

                                                  \u8bf7\u53c2\u9605\u524d\u7aef\u56e2\u961f\u51fa\u5177\u7684 GProduct \u5bf9\u63a5 demo tar \u5305\u3002

                                                  "},{"location":"admin/ghippo/best-practice/gproduct/route-auth.html","title":"\u63a5\u5165\u8def\u7531\u548c\u767b\u5f55\u8ba4\u8bc1","text":"

                                                  \u63a5\u5165\u540e\u7edf\u4e00\u767b\u5f55\u548c\u5bc6\u7801\u9a8c\u8bc1\uff0c\u6548\u679c\u5982\u4e0b\u56fe\uff1a

                                                  \u5404\u4e2a GProduct \u6a21\u5757\u7684 API bear token \u9a8c\u8bc1\u90fd\u8d70 Istio Gateway\u3002

                                                  \u63a5\u5165\u540e\u7684\u8def\u7531\u6620\u5c04\u56fe\u5982\u4e0b\uff1a

                                                  "},{"location":"admin/ghippo/best-practice/gproduct/route-auth.html#_2","title":"\u63a5\u5165\u65b9\u6cd5","text":"

                                                  \u4ee5 kpanda \u4e3a\u4f8b\u6ce8\u518c GProductProxy CR\u3002

                                                  # GProductProxy CR \u793a\u4f8b, \u5305\u542b\u8def\u7531\u548c\u767b\u5f55\u8ba4\u8bc1\n\n# spec.proxies: \u540e\u5199\u7684\u8def\u7531\u4e0d\u80fd\u662f\u5148\u5199\u7684\u8def\u7531\u5b50\u96c6, \u53cd\u4e4b\u53ef\u4ee5\n# spec.proxies.match.uri.prefix: \u5982\u679c\u662f\u540e\u7aef api, \u5efa\u8bae\u5728 prefix \u672b\u5c3e\u6dfb\u52a0 \"/\" \u8868\u8ff0\u8fd9\u6bb5 path \u7ed3\u675f\uff08\u7279\u6b8a\u9700\u6c42\u53ef\u4ee5\u4e0d\u7528\u52a0\uff09\n# spec.proxies.match.uri: \u652f\u6301 prefix \u548c exact \u6a21\u5f0f; Prefix \u548c Exact \u53ea\u80fd 2 \u9009 1; Prefix \u4f18\u5148\u7ea7\u5927\u4e8e Exact\n\napiVersion: ghippo.io/v1alpha1\nkind: GProductProxy\nmetadata:\n  name: kpanda  # (1)\nspec:\n  gproduct: kpanda  # (2)\n  proxies:\n  - labels:\n      kind: UIEntry\n    match:\n      uri:\n        prefix: /kpanda # (3)\n    rewrite:\n      uri: /index.html\n    destination:\n      host: ghippo-anakin.ghippo-system.svc.cluster.local\n      port: 80\n    authnCheck: false  # (4)\n  - labels:\n      kind: UIAssets\n    match:\n      uri:\n        prefix: /ui/kpanda/ # (5)\n    destination:\n      host: kpanda-ui.kpanda-system.svc.cluster.local\n      port: 80\n    authnCheck: false\n  - match:\n      uri:\n        prefix: /apis/kpanda.io/v1/a\n    destination:\n      host: kpanda-service.kpanda-system.svc.cluster.local\n      port: 80\n    authnCheck: false\n  - match:\n      uri:\n        prefix: /apis/kpanda.io/v1 # (6)\n    destination:\n      host: kpanda-service.kpanda-system.svc.cluster.local\n      port: 80\n    authnCheck: true\n
                                                  1. cluster \u7ea7\u522b CRD
                                                  2. \u9700\u8981\u7528\u5c0f\u5199\u6307\u5b9a GProduct \u540d\u5b57
                                                  3. \u8fd8\u53ef\u652f\u6301 exact
                                                  4. \u662f\u5426\u9700\u8981 istio-gateway \u7ed9\u8be5\u6761\u8def\u7531 API \u4f5c AuthN Token \u8ba4\u8bc1, false \u4e3a\u8df3\u8fc7\u8ba4\u8bc1
                                                  5. UIAssets \u5efa\u8bae\u672b\u5c3e\u6dfb\u52a0 / \u8868\u793a\u7ed3\u675f\uff08\u4e0d\u7136\u524d\u7aef\u53ef\u80fd\u4f1a\u51fa\u73b0\u95ee\u9898\uff09
                                                  6. \u540e\u5199\u7684\u8def\u7531\u4e0d\u80fd\u662f\u5148\u5199\u7684\u8def\u7531\u7684\u5b50\u96c6, \u53cd\u4e4b\u53ef\u4ee5
                                                  "},{"location":"admin/ghippo/best-practice/menu/menu-display-or-hiding.html","title":"\u5bfc\u822a\u680f\u83dc\u5355\u6839\u636e\u6743\u9650\u663e\u793a/\u9690\u85cf","text":"

                                                  \u5728\u73b0\u6709\u7684\u6743\u9650\u4f53\u7cfb\u4e0b, \u5168\u5c40\u7ba1\u7406\u53ef\u4ee5\u6839\u636e\u7528\u6237\u7684\u6743\u9650\u63a7\u5236\u5bfc\u822a\u680f\u7684\u83dc\u5355\u662f\u5426\u5c55\u793a\uff0c \u4f46\u662f\u7531\u4e8e\u5bb9\u5668\u7ba1\u7406\u7684\u6388\u6743\u4fe1\u606f\u672a\u540c\u6b65\u5230\u5168\u5c40\u7ba1\u7406\uff0c\u5bfc\u81f4\u5168\u5c40\u7ba1\u7406\u65e0\u6cd5\u51c6\u786e\u5224\u65ad\u5bb9\u5668\u7ba1\u7406\u83dc\u5355\u662f\u5426\u9700\u8981\u5c55\u793a\u3002

                                                  \u672c\u6587\u901a\u8fc7\u914d\u7f6e\u5b9e\u73b0\u4e86\uff1a \u5c06\u5bb9\u5668\u7ba1\u7406\u53ca\u53ef\u89c2\u6d4b\u6027\u7684\u83dc\u5355\u5728 \u5168\u5c40\u7ba1\u7406\u65e0\u6cd5\u5224\u65ad\u7684\u90e8\u5206, \u9ed8\u8ba4\u4e0d\u663e\u793a \uff0c \u901a\u8fc7 \u767d\u540d\u5355 \u6388\u6743\u7684\u65b9\u5f0f\uff0c\u5b9e\u73b0\u83dc\u5355\u7684\u9690\u85cf\u4e0e\u663e\u793a\uff08\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u9875\u9762\u6388\u6743\u7684\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\u6743\u9650\uff0c\u5168\u5c40\u7ba1\u7406\u5747\u65e0\u6cd5\u611f\u77e5\u548c\u5224\u65ad\uff09\u3002

                                                  \u4f8b\u5982\uff1aA \u7528\u6237\u5728\u5bb9\u5668\u7ba1\u7406\u662f cluster A \u7684 Cluster Admin \u89d2\u8272\uff0c \u8fd9\u79cd\u60c5\u51b5\u4e0b\u5168\u5c40\u7ba1\u7406\u65e0\u6cd5\u5224\u65ad\u662f\u5426\u6709\u6743\u9650\u5c55\u793a\u5bb9\u5668\u7ba1\u7406\u83dc\u5355\u3002 \u901a\u8fc7\u672c\u6587\u6863\u914d\u7f6e\u540e\uff0c\u7528\u6237 A \u9ed8\u8ba4\u4e0d\u53ef\u89c1\u5bb9\u5668\u7ba1\u7406\u83dc\u5355\uff0c\u9700\u8981 \u663e\u5f0f\u5730\u5728\u5168\u5c40\u7ba1\u7406\u6388\u6743 \u624d\u53ef\u4ee5\u770b\u5230\u5bb9\u5668\u7ba1\u7406\u83dc\u5355\u3002

                                                  "},{"location":"admin/ghippo/best-practice/menu/menu-display-or-hiding.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                  \u5df2\u5f00\u542f\u57fa\u4e8e\u6743\u9650\u663e\u793a/\u9690\u85cf\u83dc\u5355\u7684\u529f\u80fd\uff0c\u5f00\u542f\u65b9\u6cd5\u5982\u4e0b\uff1a

                                                  • \u65b0\u5b89\u88c5\u7684\u73af\u5883, \u4f7f\u7528 helm install \u65f6\u589e\u52a0 --set global.navigatorVisibleDependency=true \u53c2\u6570
                                                  • \u5df2\u6709\u73af\u5883\uff0chelm get values ghippo -n ghippo-system -o yaml \u5907\u4efd values, \u968f\u540e\u4fee\u6539 bak.yaml \u5e76\u6dfb\u52a0 global.navigatorVisibleDependency: true

                                                  \u518d\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u5347\u7ea7\u5168\u5c40\u7ba1\u7406\uff1a

                                                  helm upgrade ghippo ghippo-release/ghippo \\  \n  -n ghippo-system \\  \n  -f ./bak.yaml \\  \n  --version ${version}\n
                                                  "},{"location":"admin/ghippo/best-practice/menu/menu-display-or-hiding.html#_3","title":"\u914d\u7f6e\u5bfc\u822a\u680f","text":"

                                                  \u5728 kpanda-global-cluster \u4e2d apply \u5982\u4e0b YAML\uff1a

                                                  apiVersion: ghippo.io/v1alpha1  \nkind: GProductNavigator  \nmetadata:  \n  name: kpanda-menus-custom  \nspec:  \n  category: container  \n  gproduct: kpanda  \n  iconUrl: ./ui/kpanda/kpanda.svg  \n  isCustom: true  \n  localizedName:  \n    en-US: Container Management  \n    zh-CN: \u5bb9\u5668\u7ba1\u7406  \n  menus:  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Clusters  \n        zh-CN: \u96c6\u7fa4\u5217\u8868  \n      name: Clusters  \n      order: 80  \n      url: ./kpanda/clusters  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Namespaces  \n        zh-CN: \u547d\u540d\u7a7a\u95f4  \n      name: Namespaces  \n      order: 70  \n      url: ./kpanda/namespaces  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Workloads  \n        zh-CN: \u5de5\u4f5c\u8d1f\u8f7d  \n      name: Workloads  \n      order: 60  \n      url: ./kpanda/workloads/deployments  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Permissions  \n        zh-CN: \u6743\u9650\u7ba1\u7406  \n      name: Permissions  \n      order: 10  \n      url: ./kpanda/rbac/content/cluster  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n  name: \u5bb9\u5668\u7ba1\u7406  \n  order: 50  \n  url: ./kpanda/clusters  \n  visible: true  \n\n---\napiVersion: ghippo.io/v1alpha1  \nkind: GProductNavigator  \nmetadata:  \n  name: insight-menus-custom  \nspec:  \n  category: microservice  \n  gproduct: insight  \n  iconUrl: ./ui/insight/logo.svg  \n  isCustom: true  \n  localizedName:  \n    en-US: Insight  \n    zh-CN: \u53ef\u89c2\u6d4b\u6027  \n  menus:  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Overview  \n        zh-CN: \u6982\u89c8  \n      name: Overview  \n      order: 9  \n      url: ./insight/overview  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Dashboard  \n        zh-CN: \u4eea\u8868\u76d8  \n      name: Dashboard  \n      order: 8  \n      url: ./insight/dashboard  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Infrastructure  \n        zh-CN: \u57fa\u7840\u8bbe\u65bd  \n      name: Infrastructure  \n      order: 7  \n      url: ./insight/clusters  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Metrics  \n        zh-CN: \u6307\u6807  \n      name: Metrics  \n      order: 6  \n      url: ./insight/metric/basic  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Logs  \n        zh-CN: \u65e5\u5fd7  \n      name: Logs  \n      order: 5  \n      url: ./insight/logs  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Trace Tracking  \n        zh-CN: \u94fe\u8def\u8ffd\u8e2a  \n      name: Trace Tracking  \n      order: 4  \n      url: ./insight/topology  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Alerts  \n        zh-CN: \u544a\u8b66  \n      name: Alerts  \n      order: 3  \n      url: ./insight/alerts/active/metrics  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Collect Management  \n        zh-CN: \u91c7\u96c6\u7ba1\u7406  \n      name: Collect Management  \n      order: 2  \n      url: ./insight/agents  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: System Management  \n        zh-CN: \u7cfb\u7edf\u7ba1\u7406  \n      name: System Management  \n      order: 1  \n      url: ./insight/system-components  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n  name: \u53ef\u89c2\u6d4b\u6027  \n  order: 30  \n  url: ./insight  \n  visible: true  \n\n---\napiVersion: ghippo.io/v1alpha1  \nkind: GProductResourcePermissions  \nmetadata:  \n  name: kpanda  \nspec:  \n  actions:  \n    - localizedName:  \n        en-US: Create  \n        zh-CN: \u521b\u5efa  \n      name: create  \n    - localizedName:  \n        en-US: Delete  \n        zh-CN: \u5220\u9664  \n      name: delete  \n    - localizedName:  \n        en-US: Update  \n        zh-CN: \u7f16\u8f91  \n      name: update  \n    - localizedName:  \n        en-US: Get  \n        zh-CN: \u67e5\u770b  \n      name: get  \n    - localizedName:  \n        en-US: Admin  \n        zh-CN: \u7ba1\u7406  \n      name: admin  \n  authScopes:  \n    - resourcePermissions:  \n        - actions:  \n            - name: get  \n            - dependPermissions:  \n                - action: get  \n              name: create  \n            - dependPermissions:  \n                - action: get  \n              name: update  \n            - dependPermissions:  \n                - action: get  \n              name: delete  \n          resourceType: cluster  \n        - actions:  \n            - name: get  \n          resourceType: menu  \n      scope: platform  \n    - resourcePermissions:  \n        - actions:  \n            - name: admin  \n              tips:  \n                - en-US: >-  \n                    If the workspace is bound to a cluster, it will be assigned  \n                    the Cluster Admin role upon authorization.  \n                  zh-CN: \u82e5\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u4e86\u96c6\u7fa4\uff0c\u6388\u6743\u540e\u8fd8\u5c06\u88ab\u6620\u5c04\u4e3a\u5bf9\u5e94\u96c6\u7fa4\u7684 Cluster Admin \u89d2\u8272  \n          resourceType: cluster  \n        - actions:  \n            - name: get  \n              tips:  \n                - en-US: >-  \n                    If the workspace is bound to a namespace, it will be  \n                    assigned the NS View role upon authorization.  \n                  zh-CN: \u82e5\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u4e86\u547d\u540d\u7a7a\u95f4\uff0c\u6388\u6743\u540e\u8fd8\u5c06\u88ab\u6620\u5c04\u4e3a\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u7684 NS View \u89d2\u8272  \n            - name: update  \n              tips:  \n                - en-US: >-  \n                    If the workspace is bound to a namespace, it will be  \n                    assigned the NS Edit role upon authorization.  \n                  zh-CN: \u82e5\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u4e86\u547d\u540d\u7a7a\u95f4\uff0c\u6388\u6743\u540e\u8fd8\u5c06\u88ab\u6620\u5c04\u4e3a\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u7684 NS  Edit \u89d2\u8272  \n            - name: admin  \n              tips:  \n                - en-US: >-  \n                    If the workspace is bound to a namespace, it will be  \n                    assigned the NS Admin role upon authorization.  \n                  zh-CN: \u82e5\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u4e86\u547d\u540d\u7a7a\u95f4\uff0c\u6388\u6743\u540e\u8fd8\u5c06\u88ab\u6620\u5c04\u4e3a\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u7684 NS Admin \u89d2\u8272  \n          resourceType: namespace  \n      scope: workspace  \n  gproduct: kpanda  \n  resourceTypes:  \n    - localizedName:  \n        en-US: Cluster Management  \n        zh-CN: \u96c6\u7fa4\u7ba1\u7406  \n      name: cluster  \n    - localizedName:  \n        en-US: Menu  \n        zh-CN: \u83dc\u5355  \n      name: menu  \n    - localizedName:  \n        en-US: Namespace Management  \n        zh-CN: \u547d\u540d\u7a7a\u95f4  \n      name: namespace\n
                                                  "},{"location":"admin/ghippo/best-practice/menu/menu-display-or-hiding.html#_4","title":"\u901a\u8fc7\u81ea\u5b9a\u4e49\u89d2\u8272\u5b9e\u73b0\u4e0a\u8ff0\u6548\u679c","text":"

                                                  Note

                                                  \u4ec5\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684\u83dc\u5355\u9700\u8981\u5355\u72ec\u914d\u7f6e\u83dc\u5355\u6743\u9650\uff0c\u5176\u4ed6\u6a21\u5757\u4f1a\u6839\u636e\u7528\u6237\u7684\u6743\u9650\u81ea\u52a8\u663e\u793a/\u9690\u85cf

                                                  \u521b\u5efa\u4e00\u4e2a\u81ea\u5b9a\u4e49\u89d2\u8272\uff0c\u5305\u542b\u7684\u6743\u9650\u70b9\u4e3a\u5bb9\u5668\u7ba1\u7406\u7684\u83dc\u5355\u67e5\u770b\u6743\u9650\uff0c\u540e\u7eed\u6388\u6743\u7ed9\u9700\u8981\u67e5\u770b\u5bb9\u5668\u7ba1\u7406\u83dc\u5355\u7684\u7528\u6237\u3002

                                                  \u6548\u679c\u5982\u4e0b\uff0c\u53ef\u4ee5\u770b\u5230\u5bb9\u5668\u7ba1\u7406\u548c\u53ef\u89c2\u6d4b\u6027\u7684\u5bfc\u822a\u680f\u83dc\u5355\uff1a

                                                  "},{"location":"admin/ghippo/best-practice/menu/navigator.html","title":"\u81ea\u5b9a\u4e49\u5bfc\u822a\u680f","text":"

                                                  \u5f53\u524d\u81ea\u5b9a\u4e49\u5bfc\u822a\u680f\u9700\u8981\u901a\u8fc7\u624b\u52a8\u521b\u5efa\u5bfc\u822a\u680f\u7684 YAML \uff0c\u5e76 apply \u5230\u96c6\u7fa4\u4e2d\u3002

                                                  "},{"location":"admin/ghippo/best-practice/menu/navigator.html#_2","title":"\u5bfc\u822a\u680f\u5206\u7c7b","text":"

                                                  \u82e5\u9700\u8981\u65b0\u589e\u6216\u91cd\u65b0\u6392\u5e8f\u5bfc\u822a\u680f\u5206\u7c7b\u53ef\u4ee5\u901a\u8fc7\u65b0\u589e\u3001\u4fee\u6539 category YAML \u5b9e\u73b0\u3002

                                                  category \u7684 YAML \u793a\u4f8b\u5982\u4e0b\uff1a

                                                  apiVersion: ghippo.io/v1alpha1\nkind: NavigatorCategory\nmetadata:\n  name: management-custom # (1)!\nspec:\n  name: Management # (2)!\n  isCustom: true # (3)!\n  localizedName: # (4)!\n    zh-CN: \u7ba1\u7406\n    en-US: Management\n  order: 100 # (5)!\n
                                                  1. \u547d\u540d\u89c4\u5219\uff1a\u7531\u5c0f\u5199\u7684\"spec.name\"\u4e0e\"-custom\"\u800c\u6210
                                                  2. \u82e5\u662f\u7528\u4e8e\u4fee\u6539category
                                                  3. \u8be5\u5b57\u6bb5\u5fc5\u987b\u4e3atrue
                                                  4. \u5b9a\u4e49\u5206\u7c7b\u7684\u4e2d\u82f1\u6587\u540d\u79f0
                                                  5. \u6392\u5e8f\uff0c\u6570\u5b57\u8d8a\u5927\uff0c\u8d8a\u9760\u4e0a

                                                  \u7f16\u5199\u597d YAML \u6587\u4ef6\u540e\uff0c\u901a\u8fc7\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u540e\uff0c\u5237\u65b0\u9875\u9762\u5373\u53ef\u770b\u5230\u65b0\u589e\u3001\u4fee\u6539\u7684\u5bfc\u822a\u680f\u5206\u7c7b\u3002

                                                  kubectl apply -f xxx.yaml\n
                                                  "},{"location":"admin/ghippo/best-practice/menu/navigator.html#_3","title":"\u5bfc\u822a\u680f\u83dc\u5355","text":"

                                                  \u82e5\u9700\u8981\u65b0\u589e\u6216\u91cd\u65b0\u6392\u5e8f\u5bfc\u822a\u680f\u83dc\u5355\u53ef\u4ee5\u901a\u8fc7\u65b0\u589e navigator YAML \u5b9e\u73b0\u3002

                                                  Note

                                                  \u82e5\u9700\u8981\u7f16\u8f91\u5df2\u5b58\u5728\u7684\u5bfc\u822a\u680f\u83dc\u5355\uff08\u975e\u7528\u6237\u81ea\u5df1\u65b0\u589e\u7684 custom \u83dc\u5355\uff09\uff0c\u9700\u8981\u4ee4\u65b0\u589e custom \u83dc\u5355 gproduct \u5b57\u6bb5\u4e0e\u9700\u8981\u8986\u76d6\u7684\u83dc\u5355\u7684 gproduct \u76f8\u540c\uff0c \u65b0\u7684\u5bfc\u822a\u680f\u83dc\u5355\u4f1a\u5c06 menus \u4e2d name \u76f8\u540c\u7684\u90e8\u5206\u6267\u884c\u8986\u76d6\uff0cname \u4e0d\u540c\u7684\u5730\u65b9\u505a\u65b0\u589e\u64cd\u4f5c\u3002

                                                  "},{"location":"admin/ghippo/best-practice/menu/navigator.html#_4","title":"\u4e00\u7ea7\u83dc\u5355","text":"

                                                  \u4f5c\u4e3a\u4ea7\u54c1\u63d2\u5165\u5230\u67d0\u4e2a\u5bfc\u822a\u680f\u5206\u7c7b\u4e0b

                                                  apiVersion: ghippo.io/v1alpha1\nkind: GProductNavigator\nmetadata:\n  name: gmagpie-custom # (1)!\nspec:\n  name: Operations Management\n  iconUrl: ./ui/gmagpie/gmagpie.svg\n  localizedName: # (2)!\n    zh-CN: \u8fd0\u8425\u7ba1\u7406\n    en-US: Operations Management\n  url: ./gmagpie\n  category: management # (3)!\n  menus: # (4)!\n    - name: Access Control\n      iconUrl: ./ui/ghippo/menus/access-control.svg\n      localizedName:\n        zh-CN: \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\n        en-US: Access Control\n      url: ./ghippo/users\n      order: 50 # (5)!\n    - name: Workspace\n      iconUrl: ./ui/ghippo/menus/workspace-folder.svg\n      localizedName:\n        zh-CN: \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7\n        en-US: Workspace and Folder\n      url: ./ghippo/workspaces\n      order: 40\n    - name: Audit Log\n      iconUrl: ./ui/ghippo/menus/audit-logs.svg\n      localizedName:\n        zh-CN: \u5ba1\u8ba1\u65e5\u5fd7\n        en-US: Audit Log\n      url: ./ghippo/audit\n      order: 30\n    - name: Settings\n      iconUrl: ./ui/ghippo/menus/setting.svg\n      localizedName:\n        zh-CN: \u5e73\u53f0\u8bbe\u7f6e\n        en-US: Settings\n      url: ./ghippo/settings\n      order: 10\n  gproduct: gmagpie # (6)!\n  visible: true # (7)!\n  isCustom: true # (8)!\n  order: 20 # (9)!\n  target: blank # (10)!\n
                                                  1. \u547d\u540d\u89c4\u5219\uff1a\u7531\u5c0f\u5199\u7684\"spec.gproduct\"\u4e0e\"-custom\"\u800c\u6210
                                                  2. \u5b9a\u4e49\u83dc\u5355\u7684\u4e2d\u82f1\u6587\u540d\u79f0
                                                  3. \u4e0eparentGProduct\u4e8c\u9009\u4e00\uff0c\u7528\u4e8e\u533a\u5206\u4e00\u7ea7\u83dc\u5355\u8fd8\u662f\u4e8c\u7ea7\u83dc\u5355\uff0c\u4e0eNavigatorCategory\u7684spec.name\u5b57\u6bb5\u5bf9\u5e94\u6765\u5b8c\u6210\u5339\u914d
                                                  4. \u4e8c\u7ea7\u83dc\u5355
                                                  5. \u6392\u5e8f\uff0c\u6570\u5b57\u8d8a\u5c0f\uff0c\u8d8a\u9760\u4e0a
                                                  6. \u5b9a\u4e49\u83dc\u5355\u7684\u6807\u5fd7\uff0c\u7528\u4e8e\u548cparentGProduct\u5b57\u6bb5\u8054\u52a8\uff0c\u5b9e\u73b0\u7236\u5b50\u5173\u7cfb\u3002
                                                  7. \u8bbe\u7f6e\u8be5\u83dc\u5355\u662f\u5426\u53ef\u89c1\uff0c\u9ed8\u8ba4\u4e3atrue
                                                  8. \u8be5\u5b57\u6bb5\u5fc5\u987b\u4e3atrue
                                                  9. \u6392\u5e8f\uff0c\u6570\u5b57\u8d8a\u5927\uff0c\u8d8a\u9760\u4e0a
                                                  10. \u65b0\u5f00\u6807\u7b7e\u9875
                                                  "},{"location":"admin/ghippo/best-practice/menu/navigator.html#_5","title":"\u4e8c\u7ea7\u83dc\u5355","text":"

                                                  \u4f5c\u4e3a\u5b50\u4ea7\u54c1\u63d2\u5165\u5230\u67d0\u4e2a\u4e00\u7ea7\u83dc\u5355\u7684\u4e8c\u7ea7\u83dc\u5355\u4e2d

                                                  apiVersion: ghippo.io/v1alpha1\nkind: GProductNavigator\nmetadata:\n  name: gmagpie-custom # (1)!\nspec:\n  name: Operations Management\n  iconUrl: ./ui/gmagpie/gmagpie.svg\n  localizedName: # (2)!\n    zh-CN: \u8fd0\u8425\u7ba1\u7406\n    en-US: Operations Management\n  url: ./gmagpie\n  parentGProduct: ghippo # (3)!\n  gproduct: gmagpie # (4)!\n  visible: true # (5)!\n  isCustom: true # (6)!\n  order: 20 # (7)!\n
                                                  1. \u547d\u540d\u89c4\u5219\uff1a\u7531\u5c0f\u5199\u7684\"spec.gproduct\"\u4e0e\"-custom\"\u800c\u6210
                                                  2. \u5b9a\u4e49\u83dc\u5355\u7684\u4e2d\u82f1\u6587\u540d\u79f0
                                                  3. \u4e0ecategory\u4e8c\u9009\u4e00\uff0c\u7528\u4e8e\u533a\u5206\u4e00\u7ea7\u83dc\u5355\u8fd8\u662f\u4e8c\u7ea7\u83dc\u5355, \u82e5\u6dfb\u52a0\u8be5\u5b57\u6bb5\uff0c\u5219\u4f1a\u5ffd\u89c6\u6389menus\u5b57\u6bb5\uff0c\u5e76\u5c06\u8be5\u83dc\u5355\u4f5c\u4e3a\u4e8c\u7ea7\u83dc\u5355\u63d2\u5165\u5230\u4e0egproduct\u4e3aghippo\u7684\u4e00\u7ea7\u83dc\u5355\u4e2d
                                                  4. \u5b9a\u4e49\u83dc\u5355\u7684\u6807\u5fd7\uff0c\u7528\u4e8e\u548cparentGProduct\u5b57\u6bb5\u8054\u52a8\uff0c\u5b9e\u73b0\u7236\u5b50\u5173\u7cfb
                                                  5. \u8bbe\u7f6e\u8be5\u83dc\u5355\u662f\u5426\u53ef\u89c1\uff0c\u9ed8\u8ba4\u4e3atrue
                                                  6. \u8be5\u5b57\u6bb5\u5fc5\u987b\u4e3atrue
                                                  7. \u6392\u5e8f\uff0c\u6570\u5b57\u8d8a\u5927\uff0c\u8d8a\u9760\u4e0a
                                                  "},{"location":"admin/ghippo/best-practice/oem/custom-idp.html","title":"\u5b9a\u5236 AI \u7b97\u529b\u4e2d\u5fc3\u5bf9\u63a5\u5916\u90e8\u8eab\u4efd\u63d0\u4f9b\u5546 (IdP)","text":"

                                                  \u8eab\u4efd\u63d0\u4f9b\u5546\uff08IdP, Identity Provider\uff09\uff1a\u5f53 AI \u7b97\u529b\u4e2d\u5fc3\u9700\u8981\u4f7f\u7528\u5ba2\u6237\u7cfb\u7edf\u4f5c\u4e3a\u7528\u6237\u6e90\uff0c \u4f7f\u7528\u5ba2\u6237\u7cfb\u7edf\u767b\u5f55\u754c\u9762\u6765\u8fdb\u884c\u767b\u5f55\u8ba4\u8bc1\u65f6\uff0c\u8be5\u5ba2\u6237\u7cfb\u7edf\u88ab\u79f0\u4e3a AI \u7b97\u529b\u4e2d\u5fc3\u7684\u8eab\u4efd\u63d0\u4f9b\u5546

                                                  "},{"location":"admin/ghippo/best-practice/oem/custom-idp.html#_1","title":"\u9002\u7528\u573a\u666f","text":"

                                                  \u5982\u679c\u5ba2\u6237\u5bf9 Ghippo \u767b\u5f55 IdP \u6709\u9ad8\u5ea6\u5b9a\u5236\u9700\u6c42\uff0c\u4f8b\u5982\u652f\u6301\u4f01\u4e1a\u5fae\u4fe1\u3001\u5fae\u4fe1\u7b49\u5176\u4ed6\u793e\u4f1a\u7ec4\u7ec7\u767b\u5f55\u9700\u6c42\uff0c\u8bf7\u6839\u636e\u672c\u6587\u6863\u5b9e\u65bd\u3002

                                                  "},{"location":"admin/ghippo/best-practice/oem/custom-idp.html#_2","title":"\u652f\u6301\u7248\u672c","text":"

                                                  Ghippo 0.15.0\u53ca\u4ee5\u4e0a\u7248\u672c\u3002

                                                  "},{"location":"admin/ghippo/best-practice/oem/custom-idp.html#_3","title":"\u5177\u4f53\u65b9\u6cd5","text":""},{"location":"admin/ghippo/best-practice/oem/custom-idp.html#ghippo-keycloak-plugin","title":"\u81ea\u5b9a\u4e49 ghippo keycloak plugin","text":"
                                                  1. \u5b9a\u5236 plugin

                                                    \u53c2\u8003 keycloak \u5b98\u65b9\u6587\u6863\u548c keycloak \u81ea\u5b9a\u4e49 IdP \u8fdb\u884c\u5f00\u53d1\u3002

                                                  2. \u6784\u5efa\u955c\u50cf

                                                    # FROM scratch\nFROM scratch\n\n# plugin\nCOPY ./xxx-jar-with-dependencies.jar /plugins/\n

                                                  Note

                                                  \u5982\u679c\u9700\u8981\u4e24\u4e2a\u5b9a\u5236\u5316 IdP\uff0c\u9700\u8981\u590d\u5236\u4e24\u4e2a jar \u5305\u3002

                                                  "},{"location":"admin/ghippo/best-practice/oem/custom-idp.html#ghippo-keycloak-plugin_1","title":"\u90e8\u7f72 Ghippo keycloak plugin \u6b65\u9aa4","text":"
                                                  1. \u628a Ghippo \u5347\u7ea7\u5230 0.15.0 \u6216\u4ee5\u4e0a\u3002 \u60a8\u4e5f\u53ef\u4ee5\u76f4\u63a5\u5b89\u88c5\u90e8\u7f72 Ghippo 0.15.0 \u7248\u672c\uff0c\u4f46\u9700\u8981\u628a\u4ee5\u4e0b\u4fe1\u606f\u624b\u52a8\u8bb0\u5f55\u4e0b\u6765\u3002

                                                    helm -n ghippo-system get values ghippo -o yaml\n
                                                    apiserver:\n  image:\n    repository: release.daocloud.io/ghippo-ci/ghippo-apiserver\n    tag: v0.4.2-test-3-gaba5ec2\ncontrollermanager:\n  image:\n    repository: release.daocloud.io/ghippo-ci/ghippo-apiserver\n    tag: v0.4.2-test-3-gaba5ec2\nglobal:\n  database:\n    builtIn: true\n  reverseProxy: http://192.168.31.10:32628\n
                                                  2. \u5347\u7ea7\u6210\u529f\u540e\uff0c\u624b\u5de5\u8dd1\u4e00\u4e2a\u5b89\u88c5\u547d\u4ee4\uff0c --set \u91cc\u8bbe\u7684\u53c2\u6570\u503c\u4ece\u4e0a\u8ff0\u4fdd\u5b58\u7684\u5185\u5bb9\u91cc\u5f97\u5230\uff0c\u5e76\u4e14\u5916\u52a0\u51e0\u4e2a\u53c2\u6570\u503c\uff1a

                                                    • global.idpPlugin.enabled\uff1a\u662f\u5426\u542f\u7528\u5b9a\u5236 plugin\uff0c\u9ed8\u8ba4\u5df2\u5173\u95ed
                                                    • global.idpPlugin.image.repository\uff1a\u521d\u59cb\u5316\u81ea\u5b9a\u4e49 plugin \u7684 initContainer \u7528\u7684 image \u5730\u5740
                                                    • global.idpPlugin.image.tag\uff1a\u521d\u59cb\u5316\u81ea\u5b9a\u4e49 plugin \u7684 initContainer \u7528\u7684 image tag
                                                    • global.idpPlugin.path\uff1a\u81ea\u5b9a\u4e49 plugin \u7684\u76ee\u5f55\u6587\u4ef6\u5728\u4e0a\u8ff0 image \u91cc\u6240\u5728\u7684\u4f4d\u7f6e

                                                    \u5177\u4f53\u793a\u4f8b\u5982\u4e0b\uff1a

                                                    helm upgrade \\\n    ghippo \\\n    ghippo-release/ghippo \\\n    --version v0.4.2-test-3-gaba5ec2 \\\n    -n ghippo-system \\\n    --set apiserver.image.repository=release.daocloud.io/ghippo-ci/ghippo-apiserver \\\n    --set apiserver.image.tag=v0.4.2-test-3-gaba5ec2 \\\n    --set controllermanager.image.repository=release.daocloud.io/ghippo-ci/ghippo-apiserver \\\n    --set controllermanager.image.tag=v0.4.2-test-3-gaba5ec2 \\\n    --set global.reverseProxy=http://192.168.31.10:32628 \\\n    --set global.database.builtIn=true \\\n    --set global.idpPlugin.enabled=true \\\n    --set global.idpPlugin.image.repository=chenyang-idp \\\n    --set global.idpPlugin.image.tag=v0.0.1 \\\n    --set global.idpPlugin.path=/plugins/.\n
                                                  3. \u5728 keycloak \u7ba1\u7406\u9875\u9762\u9009\u62e9\u6240\u8981\u4f7f\u7528\u7684\u63d2\u4ef6\u3002

                                                  "},{"location":"admin/ghippo/best-practice/oem/demo.html","title":"gproduct-demo","text":"

                                                  \u672c\u9875\u8bf4\u660e\u5982\u4f55\u642d\u5efa GProduct Demo \u73af\u5883\u3002

                                                  "},{"location":"admin/ghippo/best-practice/oem/demo.html#_1","title":"\u642d\u5efa\u73af\u5883","text":"
                                                  npm install\n

                                                  \u7f16\u8bd1\u548c\u70ed\u52a0\u8f7d\u5f00\u53d1\u73af\u5883\uff1a

                                                  npm run serve\n

                                                  \u7f16\u8bd1\u548c\u6784\u5efa\uff1a

                                                  npm run build\n

                                                  \u8865\u5168 Lint \u68c0\u67e5\u6587\u4ef6\uff1a

                                                  npm run lint\n
                                                  "},{"location":"admin/ghippo/best-practice/oem/demo.html#_2","title":"\u81ea\u5b9a\u4e49\u914d\u7f6e","text":"

                                                  \u53c2\u89c1\u914d\u7f6e\u53c2\u8003\u3002

                                                  \u6784\u5efa\u955c\u50cf\uff1a

                                                  docker build -t release.daocloud.io/henry/gproduct-demo .\n

                                                  \u5728 K8s \u4e0a\u8fd0\u884c\uff1a

                                                  kubectl apply -f demo.yaml\n
                                                  "},{"location":"admin/ghippo/best-practice/oem/keycloak-idp.html","title":"Keycloak \u81ea\u5b9a\u4e49 IdP","text":"

                                                  \u8981\u6c42\uff1akeycloak >= v20

                                                  \u5df2\u77e5\u95ee\u9898 keycloak >= v21\uff0c\u5220\u9664\u4e86\u65e7\u7248 theme \u7684\u652f\u6301\uff0c\u53ef\u80fd\u4f1a\u5728 v22 \u4fee\u590d\u3002 \u53c2\u89c1 Issue #15344 \u3002

                                                  \u6b64\u6b21 demo \u4f7f\u7528 Keycloak v20.0.5\u3002

                                                  "},{"location":"admin/ghippo/best-practice/oem/keycloak-idp.html#source","title":"\u57fa\u4e8e source \u5f00\u53d1","text":""},{"location":"admin/ghippo/best-practice/oem/keycloak-idp.html#_1","title":"\u914d\u7f6e\u73af\u5883","text":"

                                                  \u53c2\u7167 keycloak/building.md \u914d\u7f6e\u73af\u5883\u3002

                                                  \u53c2\u7167 keycloak/README.md \u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                  cd quarkus\nmvn -f ../pom.xml clean install -DskipTestsuite -DskipExamples -DskipTests\n
                                                  "},{"location":"admin/ghippo/best-practice/oem/keycloak-idp.html#ide","title":"\u4ece IDE \u8fd0\u884c","text":""},{"location":"admin/ghippo/best-practice/oem/keycloak-idp.html#service","title":"\u6dfb\u52a0 service \u4ee3\u7801","text":""},{"location":"admin/ghippo/best-practice/oem/keycloak-idp.html#keycloak","title":"\u5982\u679c\u53ef\u4ece keycloak \u7ee7\u627f\u90e8\u5206\u529f\u80fd","text":"

                                                  \u5728\u76ee\u5f55 services/src/main/java/org/keycloak/broker \u4e0b\u6dfb\u52a0\u6587\u4ef6\uff1a

                                                  \u6587\u4ef6\u540d\u9700\u8981\u662f xxxProvider.java \u548c xxxProviderFactory.java

                                                  xxxProviderFactory.java \u793a\u4f8b\uff1a

                                                  \u7559\u610f PROVIDER_ID = \"oauth\"; \u8fd9\u4e2a\u53d8\u91cf\uff0c\u540e\u9762\u5b9a\u4e49 html \u4f1a\u7528\u5230\u3002

                                                  xxxProvider.java \u793a\u4f8b

                                                  "},{"location":"admin/ghippo/best-practice/oem/keycloak-idp.html#keycloak_1","title":"\u5982\u679c\u4e0d\u80fd\u4ece keycloak \u7ee7\u627f\u529f\u80fd","text":"

                                                  \u53c2\u8003\u4e0b\u56fe\u4e2d\u7684\u4e09\u4e2a\u6587\u4ef6\u7f16\u5199\u4f60\u7684\u4ee3\u7801\uff1a

                                                  \u6dfb\u52a0 xxxProviderFactory \u5230 resource service

                                                  \u5728 services/src/main/resources/META-INF/services/org.keycloak.broker.provider.IdentityProviderFactory \u6dfb\u52a0 xxxProviderFactory\uff0c\u8fd9\u6837\u521a\u521a\u7f16\u5199\u7684\u80fd\u5de5\u4f5c\u4e86\uff1a

                                                  \u6dfb\u52a0 html \u6587\u4ef6

                                                  \u590d\u5236 themes/src/main/resources/theme/base/admin/resources/partials/realm-identity-provider-oidc.html \u6587\u4ef6\u5230\uff08\u6539\u540d\u4e3a realm-identity-provider-oauth.html \uff0c\u8fd8\u8bb0\u5f97\u4e0a\u6587\u4e2d\u9700\u8981\u7559\u610f\u7684\u53d8\u91cf\u5417\uff09 themes/src/main/resources/theme/base/admin/resources/partials/realm-identity-provider-oauth.html

                                                  \u5230\u6b64\u6240\u6709\u7684\u6587\u4ef6\u90fd\u6dfb\u52a0\u5b8c\u6210\u4e86\uff0c\u5f00\u59cb\u8c03\u8bd5\u529f\u80fd\u3002

                                                  "},{"location":"admin/ghippo/best-practice/oem/keycloak-idp.html#jar","title":"\u6253\u5305\u6210 jar \u4f5c\u4e3a\u63d2\u4ef6\u8fd0\u884c","text":"

                                                  \u65b0\u5efa\u4e00\u4e2a java \u9879\u76ee\uff0c\u5e76\u5c06\u4e0a\u9762\u7684\u4ee3\u7801\u590d\u5236\u5230\u9879\u76ee\u4e2d\uff0c\u5982\u4e0b\u6240\u793a\uff1a

                                                  \u53c2\u89c1 pom.xml\u3002

                                                  \u8fd0\u884c mvn clean package \uff0c\u6253\u5305\u5b8c\u6210\u5f97\u5230 xxx-jar-with-dependencies.jar \u6587\u4ef6\u3002

                                                  \u4e0b\u8f7d keycloak Release 20.0.5 zip \u5305\u5e76\u89e3\u538b\u3002

                                                  \u5c06 xxx-jar-with-dependencies.jar \u590d\u5236\u5230 keycloak-20.0.5/providers \u76ee\u5f55\u4e2d\u3002

                                                  \u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b\u529f\u80fd\u662f\u5426\u5b8c\u6574\uff1a

                                                  bin/kc.sh start-dev\n
                                                  "},{"location":"admin/ghippo/best-practice/oem/oem-in.html","title":"\u5982\u4f55\u5c06\u5ba2\u6237\u7cfb\u7edf\u96c6\u6210\u5230 AI \u7b97\u529b\u4e2d\u5fc3\uff08OEM IN\uff09","text":"

                                                  OEM IN \u662f\u6307\u5408\u4f5c\u4f19\u4f34\u7684\u5e73\u53f0\u4f5c\u4e3a\u5b50\u6a21\u5757\u5d4c\u5165 AI \u7b97\u529b\u4e2d\u5fc3\uff0c\u51fa\u73b0\u5728 AI \u7b97\u529b\u4e2d\u5fc3\u4e00\u7ea7\u5bfc\u822a\u680f\u3002 \u7528\u6237\u901a\u8fc7 AI \u7b97\u529b\u4e2d\u5fc3\u8fdb\u884c\u767b\u5f55\u548c\u7edf\u4e00\u7ba1\u7406\u3002\u5b9e\u73b0 OEM IN \u5171\u5206\u4e3a 5 \u6b65\uff0c\u5206\u522b\u662f\uff1a

                                                  1. \u7edf\u4e00\u57df\u540d
                                                  2. \u6253\u901a\u7528\u6237\u4f53\u7cfb
                                                  3. \u5bf9\u63a5\u5bfc\u822a\u680f
                                                  4. \u5b9a\u5236\u5916\u89c2
                                                  5. \u6253\u901a\u6743\u9650\u4f53\u7cfb\uff08\u53ef\u9009\uff09

                                                  Note

                                                  \u4ee5\u4e0b\u4f7f\u7528\u5f00\u6e90\u8f6f\u4ef6 Label Studio \u6765\u505a\u5d4c\u5957\u6f14\u793a\u3002\u5b9e\u9645\u573a\u666f\u9700\u8981\u81ea\u5df1\u89e3\u51b3\u5ba2\u6237\u7cfb\u7edf\u7684\u95ee\u9898\uff1a

                                                  \u4f8b\u5982\u5ba2\u6237\u7cfb\u7edf\u9700\u8981\u81ea\u5df1\u6dfb\u52a0\u4e00\u4e2a Subpath\uff0c\u7528\u4e8e\u533a\u5206\u54ea\u4e9b\u662f AI \u7b97\u529b\u4e2d\u5fc3\u7684\u670d\u52a1\uff0c\u54ea\u4e9b\u662f\u5ba2\u6237\u7cfb\u7edf\u7684\u670d\u52a1\u3002

                                                  "},{"location":"admin/ghippo/best-practice/oem/oem-in.html#_1","title":"\u73af\u5883\u51c6\u5907","text":"
                                                  1. \u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3\u73af\u5883\uff1a

                                                    https://10.6.202.177:30443 \u4f5c\u4e3a AI \u7b97\u529b\u4e2d\u5fc3\u7684\u73af\u5883\u3002

                                                  2. \u90e8\u7f72\u5ba2\u6237\u7cfb\u7edf\u73af\u5883\uff1a

                                                    http://10.6.202.177:30123 \u4f5c\u4e3a\u5ba2\u6237\u7cfb\u7edf

                                                    \u5e94\u7528\u8fc7\u7a0b\u4e2d\u5bf9\u5ba2\u6237\u7cfb\u7edf\u7684\u64cd\u4f5c\u8bf7\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u8fdb\u884c\u8c03\u6574\u3002

                                                  3. \u89c4\u5212\u5ba2\u6237\u7cfb\u7edf\u7684 Subpath \u8def\u5f84\uff1a http://10.6.202.177:30123/label-studio \uff08\u5efa\u8bae\u4f7f\u7528\u8fa8\u8bc6\u5ea6\u9ad8\u7684\u540d\u79f0\u4f5c\u4e3a Subpath\uff0c\u4e0d\u80fd\u4e0e\u4e3b AI \u7b97\u529b\u4e2d\u5fc3\u7684 HTTP router \u53d1\u751f\u51b2\u7a81\uff09\u3002 \u8bf7\u786e\u4fdd\u7528\u6237\u901a\u8fc7 http://10.6.202.177:30123/label-studio \u80fd\u591f\u6b63\u5e38\u8bbf\u95ee\u5ba2\u6237\u7cfb\u7edf\u3002

                                                  "},{"location":"admin/ghippo/best-practice/oem/oem-in.html#_2","title":"\u7edf\u4e00\u57df\u540d\u548c\u7aef\u53e3","text":"
                                                  1. SSH \u767b\u5f55\u5230 AI \u7b97\u529b\u4e2d\u5fc3\u670d\u52a1\u5668\u3002

                                                    ssh root@10.6.202.177\n
                                                  2. \u4f7f\u7528 vim \u547d\u4ee4\u521b\u5efa\u548c\u4fee\u6539 label-studio.yaml \u6587\u4ef6

                                                    vim label-studio.yaml\n
                                                    label-studio.yaml
                                                    apiVersion: networking.istio.io/v1beta1\nkind: ServiceEntry\nmetadata:\n  name: label-studio\n  namespace: ghippo-system\nspec:\n  exportTo:\n  - \"*\"\n  hosts:\n  - label-studio.svc.external\n  ports:\n  # \u6dfb\u52a0\u865a\u62df\u7aef\u53e3\n  - number: 80\n    name: http\n    protocol: HTTP\n  location: MESH_EXTERNAL\n  resolution: STATIC\n  endpoints:\n  # \u6539\u4e3a\u5ba2\u6237\u7cfb\u7edf\u7684\u57df\u540d\uff08\u6216IP\uff09\n  - address: 10.6.202.177\n    ports:\n      # \u6539\u4e3a\u5ba2\u6237\u7cfb\u7edf\u7684\u7aef\u53e3\u53f7\n      http: 30123\n---\napiVersion: networking.istio.io/v1alpha3\nkind: VirtualService\nmetadata:\n  # \u4fee\u6539\u4e3a\u5ba2\u6237\u7cfb\u7edf\u7684\u540d\u5b57\n  name: label-studio\n  namespace: ghippo-system\nspec:\n  exportTo:\n  - \"*\"\n  hosts:\n  - \"*\"\n  gateways:\n  - ghippo-gateway\n  http:\n  - match:\n      - uri:\n          exact: /label-studio # \u4fee\u6539\u4e3a\u5ba2\u6237\u7cfb\u7edf\u5728 AI \u7b97\u529b\u4e2d\u5fc3.0 Web UI \u5165\u53e3\u4e2d\u7684\u8def\u7531\u5730\u5740\n      - uri:\n          prefix: /label-studio/ # \u4fee\u6539\u4e3a\u5ba2\u6237\u7cfb\u7edf\u5728 AI \u7b97\u529b\u4e2d\u5fc3.0 Web UI \u5165\u53e3\u4e2d\u7684\u8def\u7531\u5730\u5740\n    route:\n    - destination:\n        # \u4fee\u6539\u4e3a\u4e0a\u6587 ServiceEntry \u4e2d\u7684 spec.hosts \u7684\u503c\n        host: label-studio.svc.external\n        port:\n          # \u4fee\u6539\u4e3a\u4e0a\u6587 ServiceEntry \u4e2d\u7684 spec.ports \u7684\u503c\n          number: 80\n---\napiVersion: security.istio.io/v1beta1\nkind: AuthorizationPolicy\nmetadata:\n  # \u4fee\u6539\u4e3a\u5ba2\u6237\u7cfb\u7edf\u7684\u540d\u5b57\n  name: label-studio\n  namespace: istio-system\nspec:\n  action: ALLOW\n  selector:\n    matchLabels:\n      app: istio-ingressgateway\n  rules:\n  - from:\n    - source:\n        requestPrincipals:\n        - '*'\n  - to:\n    - operation:\n        paths:\n        - /label-studio # \u4fee\u6539\u4e3a VirtualService \u4e2d\u7684 spec.http.match.uri.prefix \u7684\u503c\n        - /label-studio/* # \u4fee\u6539\u4e3a VirtualService \u4e2d\u7684 spec.http.match.uri.prefix \u7684\u503c\uff08\u6ce8\u610f\uff0c\u672b\u5c3e\u9700\u8981\u6dfb\u52a0 \"*\"\uff09\n
                                                  3. \u4f7f\u7528 kubectl \u547d\u4ee4\u5e94\u7528 label-studio.yaml \uff1a

                                                    kubectl apply -f\u00a0label-studio.yaml\n
                                                  4. \u9a8c\u8bc1 Label Studio UI \u7684 IP \u548c \u7aef\u53e3\u662f\u5426\u4e00\u81f4\uff1a

                                                  "},{"location":"admin/ghippo/best-practice/oem/oem-in.html#_3","title":"\u6253\u901a\u7528\u6237\u4f53\u7cfb","text":"

                                                  \u5c06\u5ba2\u6237\u7cfb\u7edf\u4e0e AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u901a\u8fc7 OIDC/OAUTH \u7b49\u534f\u8bae\u5bf9\u63a5\uff0c\u4f7f\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u540e\u8fdb\u5165\u5ba2\u6237\u7cfb\u7edf\u65f6\u65e0\u9700\u518d\u6b21\u767b\u5f55\u3002

                                                  Note

                                                  \u8fd9\u91cc\u4f7f\u7528\u4e24\u5957 AI \u7b97\u529b\u4e2d\u5fc3\u76f8\u4e92\u5bf9\u63a5\u6765\u8fdb\u884c\u6f14\u793a\u3002\u6db5\u76d6\u5c06 AI \u7b97\u529b\u4e2d\u5fc3 \u4f5c\u4e3a\u7528\u6237\u6e90\u767b\u5f55\u5ba2\u6237\u5e73\u53f0\uff0c\u548c\u5c06\u5ba2\u6237\u5e73\u53f0\u4f5c\u4e3a\u7528\u6237\u6e90\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\u4e24\u79cd\u573a\u666f\u3002

                                                  1. AI \u7b97\u529b\u4e2d\u5fc3\u4f5c\u4e3a\u7528\u6237\u6e90\uff0c\u767b\u5f55\u5ba2\u6237\u5e73\u53f0\uff1a \u9996\u5148\u5c06\u7b2c\u4e00\u5957 AI \u7b97\u529b\u4e2d\u5fc3\u4f5c\u4e3a\u7528\u6237\u6e90\uff0c\u5b9e\u73b0\u5bf9\u63a5\u540e\u7b2c\u4e00\u5957 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u7684\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 OIDC \u76f4\u63a5\u767b\u5f55\u7b2c\u4e8c\u5957 AI \u7b97\u529b\u4e2d\u5fc3\uff0c \u800c\u65e0\u9700\u5728\u7b2c\u4e8c\u5957\u4e2d\u518d\u6b21\u521b\u5efa\u7528\u6237\u3002\u5728\u7b2c\u4e00\u5957 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u901a\u8fc7 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u63a5\u5165\u7ba1\u7406 \u521b\u5efa SSO \u63a5\u5165\u3002

                                                  2. \u5ba2\u6237\u5e73\u53f0\u4f5c\u4e3a\u7528\u6237\u6e90\uff0c\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3\uff1a \u5c06\u7b2c\u4e00\u5957 AI \u7b97\u529b\u4e2d\u5fc3 \u4e2d\u751f\u6210\u7684\u5ba2\u6237\u7aef ID\u3001\u5ba2\u6237\u7aef\u5bc6\u94a5\u3001\u5355\u70b9\u767b\u5f55 URL \u7b49\u586b\u5199\u5230\u7b2c\u4e8c\u5957 AI \u7b97\u529b\u4e2d\u5fc3 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u8eab\u4efd\u63d0\u4f9b\u5546 -> OIDC \u4e2d\uff0c\u5b8c\u6210\u7528\u6237\u5bf9\u63a5\u3002 \u5bf9\u63a5\u540e\uff0c\u7b2c\u4e00\u5957 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u7684\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 OIDC \u76f4\u63a5\u767b\u5f55\u7b2c\u4e8c\u5957 AI \u7b97\u529b\u4e2d\u5fc3\uff0c\u800c\u65e0\u9700\u5728\u7b2c\u4e8c\u5957\u4e2d\u518d\u6b21\u521b\u5efa\u7528\u6237\u3002

                                                  3. \u5bf9\u63a5\u5b8c\u6210\u540e\uff0c\u7b2c\u4e8c\u5957 AI \u7b97\u529b\u4e2d\u5fc3 \u767b\u5f55\u9875\u9762\u5c06\u51fa\u73b0 OIDC \u9009\u9879\uff0c\u9996\u6b21\u767b\u5f55\u65f6\u9009\u62e9\u901a\u8fc7 OIDC \u767b\u5f55\uff08\u81ea\u5b9a\u4e49\u540d\u79f0\uff0c\u8fd9\u91cc\u662f\u540d\u79f0\u662f loginname\uff09\uff0c \u540e\u7eed\u5c06\u76f4\u63a5\u8fdb\u5165\u65e0\u9700\u518d\u6b21\u9009\u62e9\u3002

                                                  Note

                                                  \u4f7f\u7528\u4e24\u5957 AI \u7b97\u529b\u4e2d\u5fc3,\u8868\u660e\u5ba2\u6237\u53ea\u8981\u652f\u6301 OIDC \u534f\u8bae\uff0c\u65e0\u8bba\u662f AI \u7b97\u529b\u4e2d\u5fc3\u4f5c\u4e3a\u7528\u6237\u6e90\uff0c\u8fd8\u662f\u201c\u5ba2\u6237\u5e73\u53f0\u201d\u4f5c\u4e3a\u7528\u6237\u6e90\uff0c\u4e24\u79cd\u573a\u666f\u90fd\u652f\u6301\u3002

                                                  "},{"location":"admin/ghippo/best-practice/oem/oem-in.html#_4","title":"\u5bf9\u63a5\u5bfc\u822a\u680f","text":"

                                                  \u53c2\u8003\u6587\u6863\u4e0b\u65b9\u7684 tar \u5305\u6765\u5b9e\u73b0\u4e00\u4e2a\u7a7a\u58f3\u7684\u524d\u7aef\u5b50\u5e94\u7528\uff0c\u628a\u5ba2\u6237\u7cfb\u7edf\u4ee5 iframe \u7684\u5f62\u5f0f\u653e\u8fdb\u8be5\u7a7a\u58f3\u5e94\u7528\u91cc\u3002

                                                  1. \u4e0b\u8f7d gproduct-demo-main.tar.gz \u6587\u4ef6\uff0c\u6253\u5f00 src/App-iframe.vue \u6587\u4ef6\uff0c\u4fee\u6539\u5176\u4e2d\u7684 src \u5c5e\u6027\u503c\uff08\u5373\u8fdb\u5165\u5ba2\u6237\u7cfb\u7edf\u7684\u5730\u5740\uff09\uff1a

                                                    • \u7edd\u5bf9\u5730\u5740\uff1asrc=\"https://10.6.202.177:30443/label-studio\" (AI \u7b97\u529b\u4e2d\u5fc3\u5730\u5740 + Subpath)
                                                    • \u76f8\u5bf9\u5730\u5740\uff1asrc=\"./external-anyproduct/insight\"
                                                    App-iframe.vue
                                                    <template>\n  <iframe>\n    src=\"https://daocloud.io\"\n    title=\"demo\"\n    class=\"iframe-container\"\n  </iframe>\n</template>\n\n<style lang=\"scss\">\nhtml,\nbody {\n  height: 100%;\n}\n\n# app {\n  display: flex;\n  height: 100%;\n  .iframe-container {\n    border: 0;\n    flex: 1 1 0;\n  }\n}\n</style>\n
                                                  2. \u5220\u9664 src \u6587\u4ef6\u5939\u4e0b\u7684 App.vue \u548c main.ts \u6587\u4ef6\uff0c\u540c\u65f6\u5c06\uff1a

                                                    • App-iframe.vue \u91cd\u547d\u540d\u4e3a App.vue
                                                    • main-iframe.ts \u91cd\u547d\u540d\u4e3a main.ts
                                                  3. \u6309\u7167 readme \u6b65\u9aa4\u6784\u5efa\u955c\u50cf\uff08\u6ce8\u610f\uff1a\u6267\u884c\u6700\u540e\u4e00\u6b65\u524d\u9700\u8981\u5c06 demo.yaml \u4e2d\u7684\u955c\u50cf\u5730\u5740\u66ff\u6362\u6210\u6784\u5efa\u51fa\u7684\u955c\u50cf\u5730\u5740\uff09

                                                    demo.yaml
                                                    kind: Namespace\napiVersion: v1\nmetadata:\n  name: gproduct-demo\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: gproduct-demo\n  namespace: gproduct-demo\n  labels:\n    app: gproduct-demo\nspec:\n  selector:\n    matchLabels:\n      app: gproduct-demo\n  template:\n    metadata:\n      name: gproduct-demo\n      labels:\n        app: gproduct-demo\n    spec:\n      containers:\n      - name: gproduct-demo\n        image: release.daocloud.io/gproduct-demo # \u4fee\u6539\u8fd9\u4e2a\u955c\u50cf\u5730\u5740\n        ports:\n        - containerPort: 80\n---\napiVersion: v1\nkind: Service\n...\n

                                                  \u5bf9\u63a5\u5b8c\u6210\u540e\uff0c\u5c06\u5728 AI \u7b97\u529b\u4e2d\u5fc3\u7684\u4e00\u7ea7\u5bfc\u822a\u680f\u51fa\u73b0 \u5ba2\u6237\u7cfb\u7edf \uff0c\u70b9\u51fb\u53ef\u8fdb\u5165\u5ba2\u6237\u7cfb\u7edf\u3002

                                                  "},{"location":"admin/ghippo/best-practice/oem/oem-in.html#_5","title":"\u5b9a\u5236\u5916\u89c2","text":"

                                                  Note

                                                  AI \u7b97\u529b\u4e2d\u5fc3\u652f\u6301\u901a\u8fc7\u5199 CSS \u7684\u65b9\u5f0f\u6765\u5b9e\u73b0\u5916\u89c2\u5b9a\u5236\u3002\u5b9e\u9645\u5e94\u7528\u4e2d\u5ba2\u6237\u7cfb\u7edf\u5982\u4f55\u5b9e\u73b0\u5916\u89c2\u5b9a\u5236\u9700\u8981\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u5904\u7406\u3002

                                                  \u767b\u5f55\u5ba2\u6237\u7cfb\u7edf\uff0c\u901a\u8fc7 \u5168\u5c40\u7ba1\u7406 -> \u5e73\u53f0\u8bbe\u7f6e -> \u5916\u89c2\u5b9a\u5236 \u53ef\u4ee5\u81ea\u5b9a\u4e49\u5e73\u53f0\u80cc\u666f\u989c\u8272\u3001logo\u3001\u540d\u79f0\u7b49\uff0c \u5177\u4f53\u64cd\u4f5c\u8bf7\u53c2\u7167\u5916\u89c2\u5b9a\u5236\u3002

                                                  "},{"location":"admin/ghippo/best-practice/oem/oem-in.html#_6","title":"\u6253\u901a\u6743\u9650\u4f53\u7cfb\uff08\u53ef\u9009\uff09","text":"

                                                  \u65b9\u6848\u601d\u8def\u4e00\uff1a

                                                  \u5b9a\u5236\u5316\u56e2\u961f\u53ef\u5b9e\u73b0\u4e00\u5b9a\u5236\u6a21\u5757\uff0cAI \u7b97\u529b\u4e2d\u5fc3\u5c06\u6bcf\u4e00\u6b21\u7684\u7528\u6237\u767b\u5f55\u4e8b\u4ef6\u901a\u8fc7 Webhook \u7684\u65b9\u5f0f\u901a\u77e5\u5230\u5b9a\u5236\u6a21\u5757\uff0c \u5b9a\u5236\u6a21\u5757\u53ef\u81ea\u884c\u8c03\u7528 AnyProduct \u548c AI \u7b97\u529b\u4e2d\u5fc3\u7684 OpenAPI \u5c06\u8be5\u7528\u6237\u7684\u6743\u9650\u4fe1\u606f\u540c\u6b65\u3002

                                                  \u65b9\u6848\u601d\u8def\u4e8c\uff1a

                                                  \u901a\u8fc7 Webhook \u65b9\u5f0f\uff0c\u5c06\u6bcf\u4e00\u6b21\u7684\u6388\u6743\u53d8\u5316\u90fd\u901a\u77e5\u5230 AnyProduct\uff08\u5982\u6709\u9700\u6c42\uff0c\u540e\u7eed\u53ef\u5b9e\u73b0\uff09\u3002

                                                  "},{"location":"admin/ghippo/best-practice/oem/oem-in.html#anyproduct-ai","title":"AnyProduct \u4f7f\u7528 AI \u7b97\u529b\u4e2d\u5fc3\u7684\u5176\u4ed6\u80fd\u529b(\u53ef\u9009)","text":"

                                                  \u64cd\u4f5c\u65b9\u6cd5\u4e3a\u8c03\u7528 AI \u7b97\u529b\u4e2d\u5fc3OpenAPI\u3002

                                                  "},{"location":"admin/ghippo/best-practice/oem/oem-in.html#_7","title":"\u53c2\u8003\u8d44\u6599","text":"
                                                  • \u53c2\u8003 OEM OUT \u6587\u6863
                                                  • \u53c2\u9605 gProduct-demo-main \u5bf9\u63a5 tar \u5305
                                                  "},{"location":"admin/ghippo/best-practice/oem/oem-out.html","title":"\u5982\u4f55\u5c06AI \u7b97\u529b\u4e2d\u5fc3\u96c6\u6210\u5230\u5ba2\u6237\u7cfb\u7edf\uff08OEM OUT\uff09","text":"

                                                  OEM OUT \u662f\u6307\u5c06 AI \u7b97\u529b\u4e2d\u5fc3\u4f5c\u4e3a\u5b50\u6a21\u5757\u63a5\u5165\u5176\u4ed6\u4ea7\u54c1\uff0c\u51fa\u73b0\u5728\u5176\u4ed6\u4ea7\u54c1\u7684\u83dc\u5355\u4e2d\u3002 \u7528\u6237\u767b\u5f55\u5176\u4ed6\u4ea7\u54c1\u540e\u53ef\u76f4\u63a5\u8df3\u8f6c\u81f3 AI \u7b97\u529b\u4e2d\u5fc3\u65e0\u9700\u4e8c\u6b21\u767b\u5f55\u3002\u5b9e\u73b0 OEM OUT \u5171\u5206\u4e3a 5 \u6b65\uff0c\u5206\u522b\u662f\uff1a

                                                  • \u7edf\u4e00\u57df\u540d
                                                  • \u6253\u901a\u7528\u6237\u4f53\u7cfb
                                                  • \u5bf9\u63a5\u5bfc\u822a\u680f
                                                  • \u5b9a\u5236\u5916\u89c2
                                                  • \u6253\u901a\u6743\u9650\u4f53\u7cfb(\u53ef\u9009)
                                                  "},{"location":"admin/ghippo/best-practice/oem/oem-out.html#_1","title":"\u7edf\u4e00\u57df\u540d","text":"
                                                  1. \u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3\uff08\u5047\u8bbe\u90e8\u7f72\u5b8c\u7684\u8bbf\u95ee\u5730\u5740\u4e3a https://10.6.8.2:30343/\uff09

                                                  2. \u5ba2\u6237\u7cfb\u7edf\u548c AI \u7b97\u529b\u4e2d\u5fc3\u524d\u53ef\u4ee5\u653e\u4e00\u4e2a nginx \u53cd\u4ee3\u6765\u5b9e\u73b0\u540c\u57df\u8bbf\u95ee\uff0c / \u8def\u7531\u5230\u5ba2\u6237\u7cfb\u7edf\uff0c /dce5 (subpath) \u8def\u7531\u5230 AI \u7b97\u529b\u4e2d\u5fc3\u7cfb\u7edf\uff0c vi /etc/nginx/conf.d/default.conf \u793a\u4f8b\u5982\u4e0b\uff1a

                                                    server {\n    listen       80;\n    server_name  localhost;\n\n    location /dce5/ {\n      proxy_pass https://10.6.8.2:30343/;\n      proxy_http_version 1.1;\n      proxy_read_timeout 300s; # \u5982\u9700\u8981\u4f7f\u7528 kpanda cloudtty\u529f\u80fd\u9700\u8981\u8fd9\u884c\uff0c\u5426\u5219\u53ef\u4ee5\u53bb\u6389\n      proxy_send_timeout 300s; # \u5982\u9700\u8981\u4f7f\u7528 kpanda cloudtty\u529f\u80fd\u9700\u8981\u8fd9\u884c\uff0c\u5426\u5219\u53ef\u4ee5\u53bb\u6389\n\n      proxy_set_header Host $host;\n      proxy_set_header X-Real-IP $remote_addr;\n      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n\n      proxy_set_header Upgrade $http_upgrade; # \u5982\u9700\u8981\u4f7f\u7528 kpanda cloudtty\u529f\u80fd\u9700\u8981\u8fd9\u884c\uff0c\u5426\u5219\u53ef\u4ee5\u53bb\u6389\n      proxy_set_header Connection $connection_upgrade; # \u5982\u9700\u8981\u4f7f\u7528 kpanda cloudtty\u529f\u80fd\u9700\u8981\u8fd9\u884c\uff0c\u5426\u5219\u53ef\u4ee5\u53bb\u6389\n    }\n\n    location / {\n        proxy_pass https://10.6.165.50:30443/; # \u5047\u8bbe\u8fd9\u662f\u5ba2\u6237\u7cfb\u7edf\u5730\u5740(\u5982\u610f\u4e91)\n        proxy_http_version 1.1;\n\n        proxy_set_header Host $host;\n        proxy_set_header X-Real-IP $remote_addr;\n        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n    }\n}\n
                                                  3. \u5047\u8bbe nginx \u5165\u53e3\u5730\u5740\u4e3a 10.6.165.50\uff0c\u6309\u81ea\u5b9a\u4e49 AI \u7b97\u529b\u4e2d\u5fc3\u53cd\u5411\u4ee3\u7406\u670d\u52a1\u5668\u5730\u5740\u628a AI_PROXY \u53cd\u4ee3\u8bbe\u4e3a http://10.6.165.50/dce5\u3002\u786e\u4fdd\u80fd\u591f\u901a\u8fc7 http://10.6.165.50/dce5\u8bbf\u95ee AI \u7b97\u529b\u4e2d\u5fc3\u3002 \u5ba2\u6237\u7cfb\u7edf\u4e5f\u9700\u8981\u8fdb\u884c\u53cd\u4ee3\u8bbe\u7f6e\uff0c\u9700\u8981\u6839\u636e\u4e0d\u540c\u5e73\u53f0\u7684\u60c5\u51b5\u8fdb\u884c\u5904\u7406\u3002

                                                  "},{"location":"admin/ghippo/best-practice/oem/oem-out.html#_2","title":"\u6253\u901a\u7528\u6237\u4f53\u7cfb","text":"

                                                  \u5c06\u5ba2\u6237\u7cfb\u7edf\u4e0e AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u901a\u8fc7 OIDC/OAUTH \u7b49\u534f\u8bae\u5bf9\u63a5\uff0c\u4f7f\u7528\u6237\u767b\u5f55\u5ba2\u6237\u7cfb\u7edf\u540e\u8fdb\u5165 AI \u7b97\u529b\u4e2d\u5fc3\u65f6\u65e0\u9700\u518d\u6b21\u767b\u5f55\u3002 \u5728\u62ff\u5230\u5ba2\u6237\u7cfb\u7edf\u7684 OIDC \u4fe1\u606f\u540e\u586b\u5165 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 -> \u8eab\u4efd\u63d0\u4f9b\u5546 \u4e2d\u3002

                                                  \u5bf9\u63a5\u5b8c\u6210\u540e\uff0cAI \u7b97\u529b\u4e2d\u5fc3\u767b\u5f55\u9875\u9762\u5c06\u51fa\u73b0 OIDC\uff08\u81ea\u5b9a\u4e49\uff09\u9009\u9879\uff0c\u9996\u6b21\u4ece\u5ba2\u6237\u7cfb\u7edf\u8fdb\u5165 AI \u7b97\u529b\u4e2d\u5fc3\u65f6\u9009\u62e9\u901a\u8fc7 OIDC \u767b\u5f55\uff0c \u540e\u7eed\u5c06\u76f4\u63a5\u8fdb\u5165 AI \u7b97\u529b\u4e2d\u5fc3\u65e0\u9700\u518d\u6b21\u9009\u62e9\u3002

                                                  "},{"location":"admin/ghippo/best-practice/oem/oem-out.html#_3","title":"\u5bf9\u63a5\u5bfc\u822a\u680f","text":"

                                                  \u5bf9\u63a5\u5bfc\u822a\u680f\u662f\u6307 AI \u7b97\u529b\u4e2d\u5fc3\u51fa\u73b0\u5728\u5ba2\u6237\u7cfb\u7edf\u7684\u83dc\u5355\u4e2d\uff0c\u7528\u6237\u70b9\u51fb\u76f8\u5e94\u7684\u83dc\u5355\u540d\u79f0\u80fd\u591f\u76f4\u63a5\u8fdb\u5165 AI \u7b97\u529b\u4e2d\u5fc3\u3002 \u56e0\u6b64\u5bf9\u63a5\u5bfc\u822a\u680f\u4f9d\u8d56\u4e8e\u5ba2\u6237\u7cfb\u7edf\uff0c\u4e0d\u540c\u5e73\u53f0\u9700\u8981\u6309\u7167\u5177\u4f53\u60c5\u51b5\u8fdb\u884c\u5904\u7406\u3002

                                                  "},{"location":"admin/ghippo/best-practice/oem/oem-out.html#_4","title":"\u5b9a\u5236\u5916\u89c2","text":"

                                                  \u901a\u8fc7 \u5168\u5c40\u7ba1\u7406 -> \u5e73\u53f0\u8bbe\u7f6e -> \u5916\u89c2\u5b9a\u5236 \u53ef\u4ee5\u81ea\u5b9a\u4e49\u5e73\u53f0\u80cc\u666f\u989c\u8272\u3001logo\u3001\u540d\u79f0\u7b49\uff0c \u5177\u4f53\u64cd\u4f5c\u8bf7\u53c2\u7167\u5916\u89c2\u5b9a\u5236\u3002

                                                  "},{"location":"admin/ghippo/best-practice/oem/oem-out.html#_5","title":"\u6253\u901a\u6743\u9650\u4f53\u7cfb\uff08\u53ef\u9009\uff09","text":"

                                                  \u6253\u901a\u6743\u9650\u8f83\u4e3a\u590d\u6742\uff0c\u5982\u6709\u9700\u6c42\u8bf7\u8054\u7cfb\u5168\u5c40\u7ba1\u7406\u56e2\u961f\u3002

                                                  "},{"location":"admin/ghippo/best-practice/oem/oem-out.html#_6","title":"\u53c2\u8003","text":"
                                                  • OEM IN \u6587\u6863
                                                  "},{"location":"admin/ghippo/install/gm-gateway.html","title":"\u4f7f\u7528\u56fd\u5bc6\u7f51\u5173\u4ee3\u7406 AI \u7b97\u529b\u4e2d\u5fc3","text":"

                                                  \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u4e3a AI \u7b97\u529b\u4e2d\u5fc3\u914d\u7f6e\u56fd\u5bc6\u7f51\u5173\u3002

                                                  "},{"location":"admin/ghippo/install/gm-gateway.html#_1","title":"\u8f6f\u4ef6\u4ecb\u7ecd","text":"

                                                  Tengine: Tengine \u662f\u7531\u6dd8\u5b9d\u7f51\u53d1\u8d77\u7684 Web \u670d\u52a1\u5668\u9879\u76ee\u3002\u5b83\u5728 Nginx \u7684\u57fa\u7840\u4e0a\uff0c \u9488\u5bf9\u5927\u8bbf\u95ee\u91cf\u7f51\u7ad9\u7684\u9700\u6c42\uff0c\u6dfb\u52a0\u4e86\u5f88\u591a\u9ad8\u7ea7\u529f\u80fd\u548c\u7279\u6027\u3002\u6bd4\u5982\u652f\u6301 Tongsuo \u63d2\u4ef6\uff0c\u652f\u6301\u56fd\u5bc6\u8bc1\u4e66\u7b49\u3002

                                                  Tongsuo: \u94dc\u9501/Tongsuo\uff08\u539f BabaSSL\uff09\u662f\u4e00\u4e2a\u63d0\u4f9b\u73b0\u4ee3\u5bc6\u7801\u5b66\u7b97\u6cd5\u548c\u5b89\u5168\u901a\u4fe1\u534f\u8bae\u7684\u5f00\u6e90\u57fa\u7840\u5bc6\u7801\u5e93\uff0c \u4e3a\u5b58\u50a8\u3001\u7f51\u7edc\u3001\u5bc6\u94a5\u7ba1\u7406\u3001\u9690\u79c1\u8ba1\u7b97\u7b49\u8bf8\u591a\u4e1a\u52a1\u573a\u666f\u63d0\u4f9b\u5e95\u5c42\u7684\u5bc6\u7801\u5b66\u57fa\u7840\u80fd\u529b\uff0c\u5b9e\u73b0\u6570\u636e\u5728\u4f20\u8f93\u3001\u4f7f\u7528\u3001\u5b58\u50a8\u7b49\u8fc7\u7a0b\u4e2d\u7684\u79c1\u5bc6\u6027\u3001\u5b8c\u6574\u6027\u548c\u53ef\u8ba4\u8bc1\u6027\uff0c \u4e3a\u6570\u636e\u751f\u547d\u5468\u671f\u4e2d\u7684\u9690\u79c1\u548c\u5b89\u5168\u63d0\u4f9b\u4fdd\u62a4\u80fd\u529b\u3002

                                                  "},{"location":"admin/ghippo/install/gm-gateway.html#_2","title":"\u51c6\u5907\u5de5\u4f5c","text":"

                                                  \u4e00\u53f0\u5b89\u88c5\u4e86 Docker \u7684 Linux \u4e3b\u673a\uff0c\u5e76\u4e14\u786e\u4fdd\u5b83\u80fd\u8bbf\u95ee\u4e92\u8054\u7f51\u3002

                                                  "},{"location":"admin/ghippo/install/gm-gateway.html#_3","title":"\u7f16\u8bd1\u548c\u5b89\u88c5\u56fd\u5bc6\u7f51\u5173","text":"

                                                  \u4e0b\u9762\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528 Tengine \u548c Tongsuo \u6784\u5efa\u56fd\u5bc6\u7f51\u5173\u3002

                                                  Note

                                                  \u6b64\u914d\u7f6e\u4ec5\u4f9b\u53c2\u8003\u3002

                                                  FROM docker.m.daocloud.io/debian:11.3\n\n# Version\nENV TENGINE_VERSION=\"2.3.4\" \\\n    TONGSUO_VERSION=\"8.3.2\"\n\n# Install required system packages and dependencies\nRUN apt update && \\\n    apt -y install \\\n    wget \\\n    gcc \\\n    make \\\n    libpcre3 \\\n    libpcre3-dev \\\n    zlib1g-dev \\\n    perl \\\n    && apt clean\n\n# Build tengine\nRUN mkdir -p /tmp/pkg/cache/ && cd /tmp/pkg/cache/ \\\n    && wget https://github.com/alibaba/tengine/archive/refs/tags/${TENGINE_VERSION}.tar.gz -O tengine-${TENGINE_VERSION}.tar.gz \\\n    && tar zxvf tengine-${TENGINE_VERSION}.tar.gz \\\n    && wget https://github.com/Tongsuo-Project/Tongsuo/archive/refs/tags/${TONGSUO_VERSION}.tar.gz -O Tongsuo-${TONGSUO_VERSION}.tar.gz \\\n    && tar zxvf Tongsuo-${TONGSUO_VERSION}.tar.gz \\\n    && cd tengine-${TENGINE_VERSION} \\\n    && ./configure \\\n        --add-module=modules/ngx_openssl_ntls \\\n        --with-openssl=/tmp/pkg/cache/Tongsuo-${TONGSUO_VERSION} \\\n        --with-openssl-opt=\"--strict-warnings enable-ntls\" \\\n        --with-http_ssl_module --with-stream \\\n        --with-stream_ssl_module --with-stream_sni \\\n    && make \\\n    && make install \\\n    && ln -s /usr/local/nginx/sbin/nginx /usr/sbin/ \\\n    && rm -rf /tmp/pkg/cache\n\nEXPOSE 80 443\nSTOPSIGNAL SIGTERM\nCMD [\"nginx\", \"-g\", \"daemon off;\"]\n
                                                  docker build -t tengine:0.0.1 .\n
                                                  "},{"location":"admin/ghippo/install/gm-gateway.html#sm2-rsa-tls","title":"\u751f\u6210 SM2 \u548c RSA TLS \u8bc1\u4e66","text":"

                                                  \u4e0b\u9762\u4ecb\u7ecd\u5982\u4f55\u751f\u6210 SM2 \u548c RSA TLS \u8bc1\u4e66\uff0c\u5e76\u914d\u7f6e\u56fd\u5bc6\u7f51\u5173\u3002

                                                  "},{"location":"admin/ghippo/install/gm-gateway.html#sm2-tls","title":"SM2 TLS \u8bc1\u4e66","text":"

                                                  Note

                                                  \u6b64\u8bc1\u4e66\u4ec5\u9002\u7528\u4e8e\u6d4b\u8bd5\u73af\u5883\u3002

                                                  \u60a8\u53ef\u4ee5\u53c2\u8003 Tongsuo \u5b98\u65b9\u6587\u6863\u4f7f\u7528 OpenSSL \u751f\u6210 SM2 \u8bc1\u4e66\uff0c \u6216\u8005\u8bbf\u95ee\u56fd\u5bc6 SSL \u5b9e\u9a8c\u5ba4\u7533\u8bf7 SM2 \u8bc1\u4e66\u3002

                                                  \u6700\u7ec8\u6211\u4eec\u4f1a\u5f97\u5230\u4ee5\u4e0b\u6587\u4ef6\uff1a

                                                  -rw-r--r-- 1 root root  749 Dec  8 02:59 sm2.*.enc.crt.pem\n-rw-r--r-- 1 root root  258 Dec  8 02:59 sm2.*.enc.key.pem\n-rw-r--r-- 1 root root  749 Dec  8 02:59 sm2.*.sig.crt.pem\n-rw-r--r-- 1 root root  258 Dec  8 02:59 sm2.*.sig.key.pem\n
                                                  "},{"location":"admin/ghippo/install/gm-gateway.html#rsa-tls","title":"RSA TLS \u8bc1\u4e66","text":"
                                                  -rw-r--r-- 1 root root  216 Dec  8 03:21 rsa.*.crt.pem\n-rw-r--r-- 1 root root 4096 Dec  8 02:59 rsa.*.key.pem\n
                                                  "},{"location":"admin/ghippo/install/gm-gateway.html#sm2-rsa-tls_1","title":"\u7ed9\u56fd\u5bc6\u7f51\u5173\u914d\u7f6e SM2 \u548c RSA TLS \u8bc1\u4e66","text":"

                                                  \u672c\u6587\u4e2d\u4f7f\u7528\u7684\u56fd\u5bc6\u7f51\u5173\uff0c\u652f\u6301 SM2 \u548c RSA \u7b49 TLS \u8bc1\u4e66\u3002\u53cc\u8bc1\u4e66\u7684\u4f18\u70b9\u662f\uff1a\u5f53\u6d4f\u89c8\u5668\u4e0d\u652f\u6301 SM2 TLS \u8bc1\u4e66\u65f6\uff0c\u81ea\u52a8\u5207\u6362\u5230 RSA TLS \u8bc1\u4e66\u3002

                                                  \u66f4\u591a\u8be6\u7ec6\u914d\u7f6e\uff0c\u8bf7\u53c2\u8003Tongsuo \u5b98\u65b9\u6587\u6863\u3002

                                                  \u6211\u4eec\u8fdb\u5165 Tengine \u5bb9\u5668\u5185\u90e8\uff1a

                                                  # \u8fdb\u5165 nginx \u914d\u7f6e\u6587\u4ef6\u5b58\u653e\u76ee\u5f55\ncd /usr/local/nginx/conf\n\n# \u521b\u5efa cert \u6587\u4ef6\u5939\uff0c\u7528\u4e8e\u5b58\u653e TLS \u8bc1\u4e66\nmkdir cert\n\n# \u628a SM2\u3001RSA TLS \u8bc1\u4e66\u62f7\u8d1d\u5230 `/usr/local/nginx/conf/cert` \u76ee\u5f55\u4e0b\ncp sm2.*.enc.crt.pem sm2.*.enc.key.pem  sm2.*.sig.crt.pem  sm2.*.sig.key.pem /usr/local/nginx/conf/cert\ncp rsa.*.crt.pem  rsa.*.key.pem /usr/local/nginx/conf/cert\n\n# \u7f16\u8f91 nginx.conf \u914d\u7f6e\nvim nginx.conf\n...\nserver {\n  listen 443          ssl;\n  proxy_http_version  1.1;\n  # \u5f00\u542f\u56fd\u5bc6\u529f\u80fd\uff0c\u4f7f\u5176\u652f\u6301 SM2 \u7b97\u6cd5\u7684 TLS \u8bc1\u4e66\n  enable_ntls         on;\n\n  # RSA \u8bc1\u4e66\n  # \u5982\u679c\u60a8\u7684\u6d4f\u89c8\u5668\u4e0d\u652f\u6301\u56fd\u5bc6\u8bc1\u4e66\uff0c\u90a3\u4e48\u60a8\u53ef\u4ee5\u5f00\u542f\u6b64\u9009\u9879\uff0cTengine \u4f1a\u81ea\u52a8\u8bc6\u522b\u6700\u7ec8\u7528\u6237\u7684\u6d4f\u89c8\u5668\uff0c\u5e76\u4f7f\u7528 RSA \u8bc1\u4e66\u8fdb\u884c\u56de\u9000\n  ssl_certificate                 /usr/local/nginx/conf/cert/rsa.*.crt.pem;\n  ssl_certificate_key             /usr/local/nginx/conf/cert/rsa.*.key.pem;\n\n  # \u914d\u7f6e\u4e24\u5bf9 SM2 \u8bc1\u4e66\uff0c\u7528\u4e8e\u52a0\u5bc6\u548c\u7b7e\u540d\n  # SM2 \u7b7e\u540d\u8bc1\u4e66\n  ssl_sign_certificate            /usr/local/nginx/conf/cert/sm2.*.sig.crt.pem;\n  ssl_sign_certificate_key        /usr/local/nginx/conf/cert/sm2.*.sig.key.pem;\n  # SM2 \u52a0\u5bc6\u8bc1\u4e66\n  ssl_enc_certificate             /usr/local/nginx/conf/cert/sm2.*.enc.crt.pem;\n  ssl_enc_certificate_key         /usr/local/nginx/conf/cert/sm2.*.enc.key.pem;\n  ssl_protocols                   TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;\n\n  location / {\n    proxy_set_header Host $http_host;\n    proxy_set_header X-Real-IP $remote_addr;\n    proxy_set_header REMOTE-HOST $remote_addr;\n    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n    # \u60a8\u9700\u8981\u5c06\u8fd9\u91cc\u7684\u5730\u5740\u4fee\u6539\u4e3a Istio \u5165\u53e3\u7f51\u5173\u7684\u5730\u5740\n    # \u4f8b\u5982 proxy_pass https://istio-ingressgateway.istio-system.svc.cluster.local\n    # \u6216\u8005 proxy_pass https://demo-dev.daocloud.io\n    proxy_pass https://istio-ingressgateway.istio-system.svc.cluster.local;\n  }\n}\n
                                                  "},{"location":"admin/ghippo/install/gm-gateway.html#_4","title":"\u91cd\u65b0\u52a0\u8f7d\u56fd\u5bc6\u7f51\u5173\u7684\u914d\u7f6e","text":"
                                                  nginx -s reload\n
                                                  "},{"location":"admin/ghippo/install/gm-gateway.html#_5","title":"\u4e0b\u4e00\u6b65","text":"

                                                  \u56fd\u5bc6\u7f51\u5173\u90e8\u7f72\u6210\u529f\u4e4b\u540e\uff0c\u81ea\u5b9a\u4e49 AI \u7b97\u529b\u4e2d\u5fc3\u53cd\u5411\u4ee3\u7406\u670d\u52a1\u5668\u5730\u5740\u3002

                                                  "},{"location":"admin/ghippo/install/gm-gateway.html#_6","title":"\u9a8c\u8bc1","text":"

                                                  \u60a8\u53ef\u4ee5\u90e8\u7f72\u4e00\u4e2a\u652f\u6301\u56fd\u5bc6\u8bc1\u4e66\u7684 Web \u6d4f\u89c8\u5668\u3002 \u4f8b\u5982 Samarium Browser\uff0c \u7136\u540e\u901a\u8fc7 Tengine \u8bbf\u95ee AI \u7b97\u529b\u4e2d\u5fc3 UI \u754c\u9762\uff0c\u9a8c\u8bc1\u56fd\u5bc6\u8bc1\u4e66\u662f\u5426\u751f\u6548\u3002

                                                  "},{"location":"admin/ghippo/install/login.html","title":"\u767b\u5f55","text":"

                                                  \u7528\u6237\u5728\u4f7f\u7528\u4e00\u4e2a\u65b0\u7cfb\u7edf\u524d\uff0c\u5728\u8fd9\u4e2a\u7cfb\u7edf\u4e2d\u662f\u6ca1\u6709\u4efb\u4f55\u6570\u636e\u7684\uff0c\u7cfb\u7edf\u4e5f\u65e0\u6cd5\u8bc6\u522b\u8fd9\u4e2a\u65b0\u7528\u6237\u3002\u4e3a\u4e86\u6807\u8bc6\u7528\u6237\u8eab\u4efd\u3001\u7ed1\u5b9a\u7528\u6237\u6570\u636e\uff0c\u7528\u6237\u9700\u8981\u4e00\u4e2a\u80fd\u552f\u4e00\u6807\u8bc6\u7528\u6237\u8eab\u4efd\u7684\u5e10\u53f7\u3002

                                                  AI \u7b97\u529b\u4e2d\u5fc3\u5728 \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \u4e2d\u901a\u8fc7\u7ba1\u7406\u5458\u521b\u5efa\u65b0\u7528\u6237\u7684\u65b9\u5f0f\u4e3a\u7528\u6237\u5206\u914d\u4e00\u4e2a\u9644\u6709\u4e00\u5b9a\u6743\u9650\u7684\u8d26\u53f7\u3002\u8be5\u7528\u6237\u4ea7\u751f\u7684\u6240\u6709\u884c\u4e3a\u90fd\u5c06\u5173\u8054\u5230\u81ea\u5df1\u7684\u5e10\u53f7\u3002

                                                  \u7528\u6237\u901a\u8fc7\u8d26\u53f7/\u5bc6\u7801\u8fdb\u884c\u767b\u5f55\uff0c\u7cfb\u7edf\u9a8c\u8bc1\u8eab\u4efd\u662f\u5426\u5408\u6cd5\uff0c\u5982\u679c\u9a8c\u8bc1\u5408\u6cd5\uff0c\u5219\u7528\u6237\u6210\u529f\u767b\u5f55\u3002

                                                  Note

                                                  \u5982\u679c\u7528\u6237\u767b\u5f55\u540e 24 \u5c0f\u65f6\u5185\u65e0\u4efb\u4f55\u64cd\u4f5c\uff0c\u5c06\u81ea\u52a8\u9000\u51fa\u767b\u5f55\u72b6\u6001\u3002\u5982\u679c\u767b\u5f55\u7684\u7528\u6237\u59cb\u7ec8\u6d3b\u8dc3\uff0c\u5c06\u6301\u7eed\u5904\u4e8e\u767b\u5f55\u72b6\u6001\u3002

                                                  \u7528\u6237\u767b\u5f55\u7684\u7b80\u5355\u6d41\u7a0b\u5982\u4e0b\u56fe\u3002

                                                  graph TB\n\nuser[\u8f93\u5165\u7528\u6237\u540d] --> pass[\u8f93\u5165\u5bc6\u7801] --> judge([\u70b9\u51fb\u767b\u5f55\u5e76\u6821\u9a8c\u7528\u6237\u540d\u548c\u5bc6\u7801])\njudge -.\u6b63\u786e.->success[\u767b\u5f55\u6210\u529f]\njudge -.\u9519\u8bef.->fail[\u63d0\u793a\u9519\u8bef]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill:#326ce5,stroke:#fff,stroke-width:1px,color:#fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass user,pass cluster;\nclass judge plain\nclass success,fail k8s

                                                  \u7528\u6237\u767b\u5f55\u754c\u9762\u5982\u4e0b\u56fe\u3002\u5177\u4f53\u767b\u5f55\u753b\u9762\uff0c\u8bf7\u4e0e\u5b9e\u9645\u4ea7\u54c1\u4e3a\u51c6\u3002

                                                  "},{"location":"admin/ghippo/install/offline-install.html","title":"\u79bb\u7ebf\u5347\u7ea7\u5168\u5c40\u7ba1\u7406\u6a21\u5757","text":"

                                                  \u672c\u9875\u8bf4\u660e\u4e0b\u8f7d\u5168\u5c40\u7ba1\u7406\u6a21\u5757\u540e\uff0c\u5e94\u8be5\u5982\u4f55\u5b89\u88c5\u6216\u5347\u7ea7\u3002

                                                  Info

                                                  \u4e0b\u8ff0\u547d\u4ee4\u6216\u811a\u672c\u5185\u51fa\u73b0\u7684 ghippo \u5b57\u6837\u662f\u5168\u5c40\u7ba1\u7406\u6a21\u5757\u7684\u5185\u90e8\u5f00\u53d1\u4ee3\u53f7\u3002

                                                  "},{"location":"admin/ghippo/install/offline-install.html#_2","title":"\u4ece\u5b89\u88c5\u5305\u4e2d\u52a0\u8f7d\u955c\u50cf","text":"

                                                  \u60a8\u53ef\u4ee5\u6839\u636e\u4e0b\u9762\u4e24\u79cd\u65b9\u5f0f\u4e4b\u4e00\u52a0\u8f7d\u955c\u50cf\uff0c\u5f53\u73af\u5883\u4e2d\u5b58\u5728\u955c\u50cf\u4ed3\u5e93\u65f6\uff0c\u5efa\u8bae\u9009\u62e9 chart-syncer \u540c\u6b65\u955c\u50cf\u5230\u955c\u50cf\u4ed3\u5e93\uff0c\u8be5\u65b9\u6cd5\u66f4\u52a0\u9ad8\u6548\u4fbf\u6377\u3002

                                                  "},{"location":"admin/ghippo/install/offline-install.html#chart-syncer","title":"chart-syncer \u540c\u6b65\u955c\u50cf\u5230\u955c\u50cf\u4ed3\u5e93","text":"
                                                  1. \u521b\u5efa load-image.yaml

                                                    Note

                                                    \u8be5 YAML \u6587\u4ef6\u4e2d\u7684\u5404\u9879\u53c2\u6570\u5747\u4e3a\u5fc5\u586b\u9879\u3002\u60a8\u9700\u8981\u4e00\u4e2a\u79c1\u6709\u7684\u955c\u50cf\u4ed3\u5e93\uff0c\u5e76\u4fee\u6539\u76f8\u5173\u914d\u7f6e\u3002

                                                    \u5df2\u5b89\u88c5 chart repo\u672a\u5b89\u88c5 chart repo

                                                    \u82e5\u5f53\u524d\u73af\u5883\u5df2\u5b89\u88c5 chart repo\uff0cchart-syncer \u4e5f\u652f\u6301\u5c06 Chart \u5bfc\u51fa\u4e3a tgz \u6587\u4ef6\u3002

                                                    load-image.yaml
                                                    source:\n  intermediateBundlesPath: ghippo-offline # (1)!\ntarget:\n  containerRegistry: 10.16.10.111 # (2)!\n  containerRepository: release.daocloud.io/ghippo # (3)!\n  repo:\n    kind: HARBOR # (4)!\n    url: http://10.16.10.111/chartrepo/release.daocloud.io # (5)!\n    auth:\n      username: \"admin\" # (6)!\n      password: \"Harbor12345\" # (7)!\n  containers:\n    auth:\n      username: \"admin\" # (8)!\n      password: \"Harbor12345\" # (9)!\n
                                                    1. \u5230\u6267\u884c charts-syncer \u547d\u4ee4\u7684\u76f8\u5bf9\u8def\u5f84\uff0c\u800c\u4e0d\u662f\u6b64 YAML \u6587\u4ef6\u548c\u79bb\u7ebf\u5305\u4e4b\u95f4\u7684\u76f8\u5bf9\u8def\u5f84
                                                    2. \u9700\u66f4\u6539\u4e3a\u4f60\u7684\u955c\u50cf\u4ed3\u5e93 url
                                                    3. \u9700\u66f4\u6539\u4e3a\u4f60\u7684\u955c\u50cf\u4ed3\u5e93
                                                    4. \u4e5f\u53ef\u4ee5\u662f\u4efb\u4f55\u5176\u4ed6\u652f\u6301\u7684 Helm Chart \u4ed3\u5e93\u7c7b\u522b
                                                    5. \u9700\u66f4\u6539\u4e3a chart repo url
                                                    6. \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u7528\u6237\u540d
                                                    7. \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u5bc6\u7801
                                                    8. \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u7528\u6237\u540d
                                                    9. \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u5bc6\u7801

                                                    \u82e5\u5f53\u524d\u73af\u5883\u672a\u5b89\u88c5 chart repo\uff0cchart-syncer \u4e5f\u652f\u6301\u5c06 Chart \u5bfc\u51fa\u4e3a tgz \u6587\u4ef6\uff0c\u5e76\u5b58\u653e\u5728\u6307\u5b9a\u8def\u5f84\u3002

                                                    load-image.yaml
                                                    source:\n  intermediateBundlesPath: ghippo-offline # (1)!\ntarget:\n  containerRegistry: 10.16.10.111 # (2)!\n  containerRepository: release.daocloud.io/ghippo # (3)!\n  repo:\n    kind: LOCAL\n    path: ./local-repo # (4)!\n  containers:\n    auth:\n      username: \"admin\" # (5)!\n      password: \"Harbor12345\" # (6)!\n
                                                    1. \u5230\u6267\u884c charts-syncer \u547d\u4ee4\u7684\u76f8\u5bf9\u8def\u5f84\uff0c\u800c\u4e0d\u662f\u6b64 YAML \u6587\u4ef6\u548c\u79bb\u7ebf\u5305\u4e4b\u95f4\u7684\u76f8\u5bf9\u8def\u5f84
                                                    2. \u9700\u66f4\u6539\u4e3a\u4f60\u7684\u955c\u50cf\u4ed3\u5e93 URL
                                                    3. \u9700\u66f4\u6539\u4e3a\u4f60\u7684\u955c\u50cf\u4ed3\u5e93
                                                    4. Chart \u672c\u5730\u8def\u5f84
                                                    5. \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u7528\u6237\u540d
                                                    6. \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u5bc6\u7801
                                                  2. \u6267\u884c\u540c\u6b65\u955c\u50cf\u547d\u4ee4\u3002

                                                    charts-syncer sync --config load-image.yaml\n
                                                  "},{"location":"admin/ghippo/install/offline-install.html#docker-containerd","title":"Docker \u6216 containerd \u76f4\u63a5\u52a0\u8f7d","text":"

                                                  \u89e3\u538b\u5e76\u52a0\u8f7d\u955c\u50cf\u6587\u4ef6\u3002

                                                  1. \u89e3\u538b tar \u538b\u7f29\u5305\u3002

                                                    tar xvf ghippo.bundle.tar\n

                                                    \u89e3\u538b\u6210\u529f\u540e\u4f1a\u5f97\u5230\u51e0\u4e2a\u6587\u4ef6\uff1a

                                                    • hints.yaml
                                                    • images.tar
                                                    • original-chart
                                                  2. \u4ece\u672c\u5730\u52a0\u8f7d\u955c\u50cf\u5230 Docker \u6216 containerd\u3002

                                                    Dockercontainerd
                                                    docker load -i images.tar\n
                                                    ctr -n k8s.io image import images.tar\n

                                                  Note

                                                  \u6bcf\u4e2a node \u90fd\u9700\u8981\u505a Docker \u6216 containerd \u52a0\u8f7d\u955c\u50cf\u64cd\u4f5c\uff0c \u52a0\u8f7d\u5b8c\u6210\u540e\u9700\u8981 tag \u955c\u50cf\uff0c\u4fdd\u6301 Registry\u3001Repository \u4e0e\u5b89\u88c5\u65f6\u4e00\u81f4\u3002

                                                  "},{"location":"admin/ghippo/install/offline-install.html#_3","title":"\u5347\u7ea7","text":"

                                                  \u5347\u7ea7\u6ce8\u610f\u4e8b\u9879\uff1a

                                                  \u4ece v0.11.x \u5347\u7ea7\u5230 \u2265v0.12.0\u4ece v0.15.x \u5347\u7ea7\u5230 \u2265v0.16.0

                                                  \u5f53\u4ece v0.11.x (\u6216\u66f4\u4f4e\u7248\u672c) \u5347\u7ea7\u5230 v0.12.0 (\u6216\u66f4\u9ad8\u7248\u672c) \u65f6\uff0c\u9700\u8981\u5c06 bak.yaml \u4e2d\u6240\u6709 keycloak key \u4fee\u6539\u4e3a keycloakx \u3002

                                                  \u4fee\u6539\u524d\uff1a

                                                  bak.yaml
                                                  USER-SUPPLIED VALUES:\nkeycloak:\n    ...\n

                                                  \u4fee\u6539\u540e\uff1a

                                                  bak.yaml
                                                  USER-SUPPLIED VALUES:\nkeycloakx:\n    ...\n

                                                  \u5f53\u4ece v0.15.x (\u6216\u66f4\u4f4e\u7248\u672c) \u5347\u7ea7\u5230 v0.16.0 (\u6216\u66f4\u9ad8\u7248\u672c) \u65f6\uff0c\u9700\u8981\u4fee\u6539\u6570\u636e\u5e93\u8fde\u63a5\u53c2\u6570\u3002

                                                  \u4fee\u6539\u524d\uff1a

                                                  bak.yaml
                                                  USER-SUPPLIED VALUES:\nglobal:\n  database:\n    host: 127.0.0.1\n    port: 3306\n    apiserver:\n      dbname: ghippo\n      password: passowrd\n      user: ghippo\n    keycloakx:\n      dbname: keycloak\n      password: passowrd\n      user: keycloak\n  auditDatabase:\n    auditserver:\n      dbname: audit\n      password: passowrd\n      user: audit\n    host: 127.0.0.1\n    port: 3306\n

                                                  \u4fee\u6539\u540e\uff1a

                                                  bak.yaml
                                                  USER-SUPPLIED VALUES:\nglobal:\n  storage:\n    ghippo:\n    - driver: mysql\n      accessType: readwrite\n      dsn: {global.database.apiserver.user}:{global.database.apiserver.password}@tcp({global.database.host}:{global.database.port})/{global.database.apiserver.dbname}?charset=utf8mb4&multiStatements=true&parseTime=true\n    audit:\n    - driver: mysql\n      accessType: readwrite\n      dsn: {global.auditDatabase.auditserver.user}:{global.auditDatabase.auditserver.password}@tcp({global.auditDatabase.host}:{global.auditDatabase.port})/{global.auditDatabase.auditserver.dbname}?charset=utf8mb4&multiStatements=true&parseTime=true\n    keycloak:\n    - driver: mysql\n      accessType: readwrite\n      dsn: {global.database.keycloakx.user}:{global.database.keycloakx.password}@tcp({global.database.host}:{global.database.port})/{global.database.keycloakx.dbname}?charset=utf8mb4\n

                                                  \u6709\u4e24\u79cd\u5347\u7ea7\u65b9\u5f0f\u3002\u60a8\u53ef\u4ee5\u6839\u636e\u524d\u7f6e\u64cd\u4f5c\uff0c\u9009\u62e9\u5bf9\u5e94\u7684\u5347\u7ea7\u65b9\u6848\uff1a

                                                  \u901a\u8fc7 Helm \u4ed3\u5e93\u5347\u7ea7\u901a\u8fc7 Chart \u5305\u5347\u7ea7
                                                  1. \u68c0\u67e5\u5168\u5c40\u7ba1\u7406 Helm \u4ed3\u5e93\u662f\u5426\u5b58\u5728\u3002

                                                    helm repo list | grep ghippo\n

                                                    \u82e5\u8fd4\u56de\u7ed3\u679c\u4e3a\u7a7a\u6216\u5982\u4e0b\u63d0\u793a\uff0c\u5219\u8fdb\u884c\u4e0b\u4e00\u6b65\uff1b\u53cd\u4e4b\u5219\u8df3\u8fc7\u4e0b\u4e00\u6b65\u3002

                                                    Error: no repositories to show\n
                                                  2. \u6dfb\u52a0\u5168\u5c40\u7ba1\u7406\u7684 Helm \u4ed3\u5e93\u3002

                                                    helm repo add ghippo http://{harbor url}/chartrepo/{project}\n
                                                  3. \u66f4\u65b0\u5168\u5c40\u7ba1\u7406\u7684 Helm \u4ed3\u5e93\u3002

                                                    helm repo update ghippo # (1)!\n
                                                    1. Helm \u7248\u672c\u8fc7\u4f4e\u4f1a\u5bfc\u81f4\u5931\u8d25\uff0c\u82e5\u5931\u8d25\uff0c\u8bf7\u5c1d\u8bd5\u6267\u884c helm update repo
                                                  4. \u9009\u62e9\u60a8\u60f3\u5b89\u88c5\u7684\u5168\u5c40\u7ba1\u7406\u7248\u672c\uff08\u5efa\u8bae\u5b89\u88c5\u6700\u65b0\u7248\u672c\uff09\u3002

                                                    helm search repo ghippo/ghippo --versions\n
                                                    NAME                   CHART VERSION  APP VERSION  DESCRIPTION\nghippo/ghippo  0.9.0          v0.9.0       A Helm chart for GHippo\n...\n
                                                  5. \u5907\u4efd --set \u53c2\u6570\u3002

                                                    \u5728\u5347\u7ea7\u5168\u5c40\u7ba1\u7406\u7248\u672c\u4e4b\u524d\uff0c\u5efa\u8bae\u60a8\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5907\u4efd\u8001\u7248\u672c\u7684 --set \u53c2\u6570\u3002

                                                    helm get values ghippo -n ghippo-system -o yaml > bak.yaml\n
                                                  6. \u66f4\u65b0 Ghippo CRD\uff1a

                                                    helm pull ghippo/ghippo --version 0.9.0 && tar -zxf ghippo-0.9.0.tgz\nkubectl apply -f ghippo/crds\n
                                                  7. \u6267\u884c helm upgrade \u3002

                                                    \u5347\u7ea7\u524d\u5efa\u8bae\u60a8\u8986\u76d6 bak.yaml \u4e2d\u7684 global.imageRegistry \u5b57\u6bb5\u4e3a\u5f53\u524d\u4f7f\u7528\u7684\u955c\u50cf\u4ed3\u5e93\u5730\u5740\u3002

                                                    export imageRegistry={\u4f60\u7684\u955c\u50cf\u4ed3\u5e93}\n
                                                    helm upgrade ghippo ghippo/ghippo \\\n  -n ghippo-system \\\n  -f ./bak.yaml \\\n  --set global.imageRegistry=$imageRegistry \\\n  --version 0.9.0\n
                                                  1. \u5907\u4efd --set \u53c2\u6570\u3002

                                                    \u5728\u5347\u7ea7\u5168\u5c40\u7ba1\u7406\u7248\u672c\u4e4b\u524d\uff0c\u5efa\u8bae\u60a8\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5907\u4efd\u8001\u7248\u672c\u7684 --set \u53c2\u6570\u3002

                                                    helm get values ghippo -n ghippo-system -o yaml > bak.yaml\n
                                                  2. \u66f4\u65b0 Ghippo CRD\uff1a

                                                    kubectl apply -f ./crds\n
                                                  3. \u6267\u884c helm upgrade\u3002

                                                    \u5347\u7ea7\u524d\u5efa\u8bae\u60a8\u8986\u76d6 bak.yaml \u4e2d\u7684 global.imageRegistry \u4e3a\u5f53\u524d\u4f7f\u7528\u7684\u955c\u50cf\u4ed3\u5e93\u5730\u5740\u3002

                                                    export imageRegistry={\u4f60\u7684\u955c\u50cf\u4ed3\u5e93}\n
                                                    helm upgrade ghippo . \\\n  -n ghippo-system \\\n  -f ./bak.yaml \\\n  --set global.imageRegistry=$imageRegistry\n
                                                  "},{"location":"admin/ghippo/install/reverse-proxy.html","title":"\u81ea\u5b9a\u4e49 AI \u7b97\u529b\u4e2d\u5fc3\u53cd\u5411\u4ee3\u7406\u670d\u52a1\u5668\u5730\u5740","text":"

                                                  \u5177\u4f53\u8bbe\u7f6e\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                  1. \u68c0\u67e5\u5168\u5c40\u7ba1\u7406 helm \u4ed3\u5e93\u662f\u5426\u5b58\u5728\u3002

                                                    helm repo list | grep ghippo\n

                                                    \u82e5\u8fd4\u56de\u7ed3\u679c\u4e3a\u7a7a\u6216\u5982\u4e0b\u63d0\u793a\uff0c\u5219\u8fdb\u884c\u4e0b\u4e00\u6b65\uff1b\u53cd\u4e4b\u5219\u8df3\u8fc7\u4e0b\u4e00\u6b65\u3002

                                                    Error: no repositories to show\n
                                                  2. \u6dfb\u52a0\u5e76\u4e14\u66f4\u65b0\u5168\u5c40\u7ba1\u7406\u7684 helm \u4ed3\u5e93\u3002

                                                    helm repo add ghippo http://{harbor url}/chartrepo/{project}\nhelm repo update ghippo\n
                                                  3. \u8bbe\u7f6e\u73af\u5883\u53d8\u91cf\uff0c\u65b9\u4fbf\u5728\u4e0b\u6587\u4e2d\u4f7f\u7528\u3002

                                                    # \u60a8\u7684\u53cd\u5411\u4ee3\u7406\u5730\u5740\uff0c\u4f8b\u5982 `export AI_PROXY=\"https://demo-alpha.daocloud.io\"` \nexport AI_PROXY=\"https://domain:port\"\n\n# helm --set \u53c2\u6570\u5907\u4efd\u6587\u4ef6\nexport GHIPPO_VALUES_BAK=\"ghippo-values-bak.yaml\"\n\n# \u83b7\u53d6\u5f53\u524d ghippo \u7684\u7248\u672c\u53f7\nexport GHIPPO_HELM_VERSION=$(helm get notes ghippo -n ghippo-system | grep \"Chart Version\" | awk -F ': ' '{ print $2 }')\n
                                                  4. \u5907\u4efd --set \u53c2\u6570\u3002

                                                    helm get values ghippo -n ghippo-system -o yaml > ${GHIPPO_VALUES_BAK}\n
                                                  5. \u6dfb\u52a0\u60a8\u7684\u53cd\u5411\u4ee3\u7406\u5730\u5740\u3002

                                                    Note

                                                    • \u5982\u679c\u53ef\u4ee5\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528 yq \u547d\u4ee4\uff1a

                                                      yq -i \".global.reverseProxy = \\\"${AI_PROXY}\\\"\" ${GHIPPO_VALUES_BAK}\n
                                                    • \u6216\u8005\u60a8\u53ef\u4ee5\u4f7f\u7528 vim \u547d\u4ee4\u7f16\u8f91\u5e76\u4fdd\u5b58\uff1a

                                                      vim ${GHIPPO_VALUES_BAK}\n\nUSER-SUPPLIED VALUES:\n...\nglobal:\n  ...\n  reverseProxy: ${AI_PROXY} # \u53ea\u9700\u8981\u4fee\u6539\u8fd9\u4e00\u884c\n
                                                  6. \u6267\u884c helm upgrade \u4f7f\u914d\u7f6e\u751f\u6548\u3002

                                                    helm upgrade ghippo ghippo/ghippo \\\n  -n ghippo-system \\\n  -f ${GHIPPO_VALUES_BAK} \\\n  --version ${GHIPPO_HELM_VERSION}\n
                                                  7. \u4f7f\u7528 kubectl \u91cd\u542f\u5168\u5c40\u7ba1\u7406 Pod\uff0c\u4f7f\u914d\u7f6e\u751f\u6548\u3002

                                                    kubectl rollout restart deploy/ghippo-apiserver -n ghippo-system\nkubectl rollout restart statefulset/ghippo-keycloakx -n ghippo-system\n
                                                  "},{"location":"admin/ghippo/install/user-isolation.html","title":"\u5f00\u542f Folder/WS \u4e4b\u95f4\u7684\u9694\u79bb\u6a21\u5f0f","text":"

                                                  \u5177\u4f53\u8bbe\u7f6e\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                  1. \u68c0\u67e5\u5168\u5c40\u7ba1\u7406 helm \u4ed3\u5e93\u662f\u5426\u5b58\u5728\u3002

                                                    helm repo list | grep ghippo\n

                                                    \u82e5\u8fd4\u56de\u7ed3\u679c\u4e3a\u7a7a\u6216\u5982\u4e0b\u63d0\u793a\uff0c\u5219\u8fdb\u884c\u4e0b\u4e00\u6b65\uff1b\u53cd\u4e4b\u5219\u8df3\u8fc7\u4e0b\u4e00\u6b65\u3002

                                                    Error: no repositories to show\n
                                                  2. \u6dfb\u52a0\u5e76\u4e14\u66f4\u65b0\u5168\u5c40\u7ba1\u7406\u7684 helm \u4ed3\u5e93\u3002

                                                    helm repo add ghippo http://{harbor url}/chartrepo/{project}\nhelm repo update ghippo\n
                                                  3. \u8bbe\u7f6e\u73af\u5883\u53d8\u91cf\uff0c\u65b9\u4fbf\u5728\u4e0b\u6587\u4e2d\u4f7f\u7528\u3002

                                                    # helm --set \u53c2\u6570\u5907\u4efd\u6587\u4ef6\nexport GHIPPO_VALUES_BAK=\"ghippo-values-bak.yaml\"\n\n# \u83b7\u53d6\u5f53\u524d ghippo \u7684\u7248\u672c\u53f7\nexport GHIPPO_HELM_VERSION=$(helm get notes ghippo -n ghippo-system | grep \"Chart Version\" | awk -F ': ' '{ print $2 }')\n
                                                  4. \u5907\u4efd --set \u53c2\u6570\u3002

                                                    helm get values ghippo -n ghippo-system -o yaml > ${GHIPPO_VALUES_BAK}\n
                                                  5. \u6253\u5f00 Folder/WS \u4e4b\u95f4\u7684\u9694\u79bb\u6a21\u5f0f\u5f00\u5173\u3002

                                                    Note

                                                    • \u5982\u679c\u53ef\u4ee5\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528 yq \u547d\u4ee4\uff1a

                                                      yq -i \".apiserver.userIsolationMode = \\\"Folder\\\"\" ${GHIPPO_VALUES_BAK}\n
                                                    • \u6216\u8005\u60a8\u53ef\u4ee5\u4f7f\u7528 vim \u547d\u4ee4\u7f16\u8f91\u5e76\u4fdd\u5b58\uff1a

                                                      vim ${GHIPPO_VALUES_BAK}\n\nUSER-SUPPLIED VALUES:\n...\n# \u6dfb\u52a0\u4e0b\u9762\u4e24\u884c\u5373\u53ef\napiserver:\n  userIsolationMode: Folder\n
                                                  6. \u6267\u884c helm upgrade \u4f7f\u914d\u7f6e\u751f\u6548\u3002

                                                    helm upgrade ghippo ghippo/ghippo \\\n  -n ghippo-system \\\n  -f ${GHIPPO_VALUES_BAK} \\\n  --version ${GHIPPO_HELM_VERSION}\n
                                                  7. \u4f7f\u7528 kubectl \u91cd\u542f\u5168\u5c40\u7ba1\u7406 Pod\uff0c\u4f7f\u914d\u7f6e\u751f\u6548\u3002

                                                    kubectl rollout restart deploy/ghippo-apiserver -n ghippo-system\n
                                                  "},{"location":"admin/ghippo/permissions/baize.html","title":"AI Lab \u6743\u9650\u8bf4\u660e","text":"

                                                  AI Lab \u652f\u6301\u56db\u79cd\u7528\u6237\u89d2\u8272\uff1a

                                                  • Admin / Baize Owner\uff1a\u62e5\u6709 \u5f00\u53d1\u63a7\u5236\u53f0 \u548c \u8fd0\u7ef4\u7ba1\u7406 \u5168\u90e8\u529f\u80fd\u7684\u589e\u5220\u6539\u67e5\u7684\u6743\u9650\u3002
                                                  • Workspace Admin\uff1a\u62e5\u6709\u6388\u6743\u5de5\u4f5c\u7a7a\u95f4\u7684 \u5f00\u53d1\u63a7\u5236\u53f0 \u5168\u90e8\u529f\u80fd\u7684\u589e\u5220\u6539\u67e5\u7684\u6743\u9650\u3002
                                                  • Workspace Editor\uff1a\u62e5\u6709\u6388\u6743\u5de5\u4f5c\u7a7a\u95f4\u7684 \u5f00\u53d1\u63a7\u5236\u53f0 \u5168\u90e8\u529f\u80fd\u7684\u66f4\u65b0\u3001\u67e5\u8be2\u7684\u6743\u9650\u3002
                                                  • Workspace Viewer\uff1a\u62e5\u6709\u6388\u6743\u5de5\u4f5c\u7a7a\u95f4\u7684 \u5f00\u53d1\u63a7\u5236\u53f0 \u5168\u90e8\u529f\u80fd\u7684\u67e5\u8be2\u7684\u6743\u9650\u3002

                                                  \u6bcf\u79cd\u89d2\u8272\u5177\u6709\u4e0d\u540c\u7684\u6743\u9650\uff0c\u5177\u4f53\u8bf4\u660e\u5982\u4e0b\u3002

                                                  \u83dc\u5355\u5bf9\u8c61 \u64cd\u4f5c Admin / Baize Owner Workspace Admin Workspace Editor Workspace Viewer \u5f00\u53d1\u63a7\u5236\u53f0 \u6982\u89c8 \u67e5\u770b\u6982\u89c8 \u2713 \u2713 \u2713 \u2713 Notebooks \u67e5\u770b Notebooks \u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b Notebooks \u8be6\u60c5 \u2713 \u2713 \u2713 \u2717 \u521b\u5efa Notebooks \u2713 \u2713 \u2717 \u2717 \u66f4\u65b0 Notebooks \u2713 \u2713 \u2713 \u2717 \u514b\u9686 Notebooks \u2713 \u2713 \u2717 \u2717 \u505c\u6b62 Notebooks \u2713 \u2713 \u2713 \u2717 \u542f\u52a8 Notebooks \u2713 \u2713 \u2713 \u2717 \u5220\u9664 Notebooks \u2713 \u2713 \u2717 \u2717 \u4efb\u52a1\u5217\u8868 \u67e5\u770b\u4efb\u52a1\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u4efb\u52a1\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713 \u521b\u5efa\u4efb\u52a1 \u2713 \u2713 \u2717 \u2717 \u514b\u9686\u4efb\u52a1 \u2713 \u2713 \u2717 \u2717 \u67e5\u770b\u4efb\u52a1\u8d1f\u8f7d\u8be6\u60c5 \u2713 \u2713 \u2713 \u2717 \u5220\u9664\u4efb\u52a1 \u2713 \u2713 \u2717 \u2717 \u4efb\u52a1\u5206\u6790 \u67e5\u770b\u4efb\u52a1\u5206\u6790 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u4efb\u52a1\u5206\u6790\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713 \u5220\u9664\u4efb\u52a1\u5206\u6790 \u2713 \u2713 \u2717 \u2717 \u6570\u636e\u96c6\u5217\u8868 \u67e5\u770b\u6570\u636e\u96c6\u5217\u8868 \u2713 \u2713 \u2713 \u2717 \u521b\u5efa\u6570\u636e\u96c6 \u2713 \u2713 \u2717 \u2717 \u91cd\u65b0\u540c\u6b65\u6570\u636e\u96c6 \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0\u51ed\u8bc1 \u2713 \u2713 \u2713 \u2717 \u5220\u9664\u6570\u636e\u96c6 \u2713 \u2713 \u2717 \u2717 \u73af\u5883\u7ba1\u7406 \u67e5\u770b\u73af\u5883\u7ba1\u7406\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u521b\u5efa\u73af\u5883 \u2713 \u2713 \u2717 \u2717 \u66f4\u65b0\u73af\u5883 \u2713 \u2713 \u2713 \u2717 \u5220\u9664\u73af\u5883 \u2713 \u2713 \u2717 \u2717 \u63a8\u7406\u670d\u52a1 \u67e5\u770b\u63a8\u7406\u670d\u52a1\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u63a8\u7406\u670d\u52a1\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713 \u521b\u5efa\u63a8\u7406\u670d\u52a1 \u2713 \u2713 \u2717 \u2717 \u66f4\u65b0\u63a8\u7406\u670d\u52a1 \u2713 \u2713 \u2713 \u2717 \u505c\u6b62\u63a8\u7406\u670d\u52a1 \u2713 \u2713 \u2713 \u2717 \u542f\u52a8\u63a8\u7406\u670d\u52a1 \u2713 \u2713 \u2713 \u2717 \u5220\u9664\u63a8\u7406\u670d\u52a1 \u2713 \u2713 \u2717 \u2717 \u8fd0\u7ef4\u7ba1\u7406 \u6982\u89c8 \u67e5\u770b\u6982\u89c8 \u2713 \u2717 \u2717 \u2717 GPU \u7ba1\u7406 \u67e5\u770b GPU \u7ba1\u7406\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u961f\u5217\u7ba1\u7406 \u67e5\u770b\u961f\u5217\u7ba1\u7406\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u961f\u5217\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u521b\u5efa\u961f\u5217 \u2713 \u2717 \u2717 \u2717 \u66f4\u65b0\u961f\u5217 \u2713 \u2717 \u2717 \u2717 \u5220\u9664\u961f\u5217 \u2713 \u2717 \u2717 \u2717"},{"location":"admin/ghippo/permissions/kpanda.html","title":"\u5bb9\u5668\u7ba1\u7406\u6743\u9650\u8bf4\u660e","text":"

                                                  \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4f7f\u7528\u4ee5\u4e0b\u89d2\u8272\uff1a

                                                  • Admin / Kpanda Owner
                                                  • Cluster Admin
                                                  • NS Admin
                                                  • NS Editor
                                                  • NS Viewer

                                                  Note

                                                  • \u6709\u5173\u6743\u9650\u7684\u66f4\u591a\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605\u5bb9\u5668\u7ba1\u7406\u6743\u9650\u4f53\u7cfb\u8bf4\u660e\u3002
                                                  • \u6709\u5173\u89d2\u8272\u7684\u521b\u5efa\u3001\u7ba1\u7406\u548c\u5220\u9664\uff0c\u8bf7\u53c2\u9605\u89d2\u8272\u548c\u6743\u9650\u7ba1\u7406\u3002
                                                  • Cluster Admin , NS Admin , NS Editor , NS Viewer \u7684\u6743\u9650\u4ec5\u5728\u5f53\u524d\u7684\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\u5185\u751f\u6548\u3002

                                                  \u5404\u89d2\u8272\u6240\u5177\u5907\u7684\u6743\u9650\u5982\u4e0b\uff1a

                                                  \u4e00\u7ea7\u529f\u80fd \u4e8c\u7ea7\u529f\u80fd \u6743\u9650\u70b9 Cluster Admin Ns Admin Ns Editor NS Viewer \u96c6\u7fa4 \u96c6\u7fa4\u5217\u8868 \u67e5\u770b\u96c6\u7fa4\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u63a5\u5165\u96c6\u7fa4 \u2717 \u2717 \u2717 \u2717 \u521b\u5efa\u96c6\u7fa4 \u2717 \u2717 \u2717 \u2717 \u96c6\u7fa4\u64cd\u4f5c \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713\uff08\u4ec5\u5217\u8868\u5185\u53ef\u4ee5\u8fdb\u5165\uff09 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2717 \u2717 \u2717 \u7f16\u8f91\u57fa\u7840\u914d\u7f6e \u2713 \u2717 \u2717 \u2717 \u4e0b\u8f7d kubeconfig \u2713 \u2713\uff08\u4e0b\u8f7dns\u6743\u9650\u7684kubeconfig\uff09 \u2713\uff08\u4e0b\u8f7d ns \u6743\u9650\u7684 kubeconfig\uff09 \u2713\uff08\u4e0b\u8f7d ns \u6743\u9650\u7684 kubeconfig\uff09 \u89e3\u9664\u63a5\u5165 \u2717 \u2717 \u2717 \u2717 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2717 \u2717 \u2717 \u91cd\u8bd5 \u2717 \u2717 \u2717 \u2717 \u5378\u8f7d\u96c6\u7fa4 \u2717 \u2717 \u2717 \u2717 \u96c6\u7fa4\u6982\u89c8 \u67e5\u770b\u96c6\u7fa4\u6982\u89c8 \u2713 \u2717 \u2717 \u2717 \u8282\u70b9\u7ba1\u7406 \u63a5\u5165\u8282\u70b9 \u2717 \u2717 \u2717 \u2717 \u67e5\u770b\u8282\u70b9\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u8282\u70b9\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b YAML \u2713 \u2717 \u2717 \u2717 \u6682\u505c\u8c03\u5ea6 \u2713 \u2717 \u2717 \u2717 \u4fee\u6539\u6807\u7b7e \u2713 \u2717 \u2717 \u2717 \u4fee\u6539\u6ce8\u89e3 \u2713 \u2717 \u2717 \u2717 \u4fee\u6539\u6c61\u70b9 \u2713 \u2717 \u2717 \u2717 \u79fb\u9664\u8282\u70b9 \u2717 \u2717 \u2717 \u2717 \u65e0\u72b6\u6001\u8d1f\u8f7d \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u955c\u50cf\u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u9009\u62e9 ns \u7ed1\u5b9a\u7684 ws \u5185\u7684\u5b9e\u4f8b \u9009\u62e9\u955c\u50cf \u2713 \u2713 \u2713 \u2717 IP \u6c60\u67e5\u770b \u2713 \u2713 \u2713 \u2717 \u7f51\u5361\u7f16\u8f91 \u2713 \u2713 \u2713 \u2717 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u8d1f\u8f7d\u4f38\u7f29 \u2713 \u2713 \u2713 \u2717 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001 - \u6682\u505c\u5347\u7ea7 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001 - \u505c\u6b62 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001 - \u91cd\u542f \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u6709\u72b6\u6001\u8d1f\u8f7d \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u955c\u50cf\u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u9009\u62e9ns\u7ed1\u5b9a\u7684ws\u5185\u7684\u5b9e\u4f8b \u9009\u62e9\u955c\u50cf \u2713 \u2713 \u2713 \u2717 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u8d1f\u8f7d\u4f38\u7f29 \u2713 \u2713 \u2713 \u2717 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001-\u505c\u6b62 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001-\u91cd\u542f \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u5b88\u62a4\u8fdb\u7a0b \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u955c\u50cf\u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u9009\u62e9ns\u7ed1\u5b9a\u7684ws\u5185\u7684\u5b9e\u4f8b \u9009\u62e9\u955c\u50cf \u2713 \u2713 \u2713 \u2717 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001-\u91cd\u542f \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u4efb\u52a1 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u955c\u50cf\u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u5b9e\u4f8b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u9009\u62e9ns\u7ed1\u5b9a\u7684ws\u5185\u7684\u5b9e\u4f8b \u9009\u62e9\u955c\u50cf \u2713 \u2713 \u2713 \u2717 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b YAML \u2713 \u2713 \u2713 \u2713 \u91cd\u542f \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u4e8b\u4ef6 \u2713 \u2713 \u2713 \u2713 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u5b9a\u65f6\u4efb\u52a1 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u955c\u50cf\u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u9009\u62e9ns\u7ed1\u5b9a\u7684ws\u5185\u7684\u5b9e\u4f8b \u9009\u62e9\u955c\u50cf \u2713 \u2713 \u2713 \u2717 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u505c\u6b62 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u4efb\u52a1\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u4e8b\u4ef6 \u2713 \u2713 \u2713 \u2713 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u5bb9\u5668\u7ec4 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b YAML \u2713 \u2713 \u2713 \u2713 \u4e0a\u4f20\u6587\u4ef6 \u2713 \u2713 \u2713 \u2717 \u4e0b\u8f7d\u6587\u4ef6 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u5bb9\u5668\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u4e8b\u4ef6 \u2713 \u2713 \u2713 \u2713 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 ReplicaSet \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b YAML \u2713 \u2713 \u2713 \u2713 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 Helm \u5e94\u7528 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b YAML \u2713 \u2713 \u2713 \u2713 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 Helm \u6a21\u677f \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713 \u5b89\u88c5\u6a21\u677f \u2713 \u2713\uff08ns\u7ea7\u522b\u7684\u53ef\u4ee5\uff09 \u2717 \u2717 \u4e0b\u8f7d\u6a21\u677f \u2713 \u2713 \u2713\uff08\u548c\u67e5\u770b\u63a5\u53e3\u4e00\u81f4\uff09 \u2713 Helm \u4ed3\u5e93 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u521b\u5efa\u4ed3\u5e93 \u2713 \u2717 \u2717 \u2717 \u66f4\u65b0\u4ed3\u5e93 \u2713 \u2717 \u2717 \u2717 \u514b\u9686\u4ed3\u5e93 \u2713 \u2717 \u2717 \u2717 \u5237\u65b0\u4ed3\u5e93 \u2713 \u2717 \u2717 \u2717 \u4fee\u6539\u6807\u7b7e \u2713 \u2717 \u2717 \u2717 \u4fee\u6539\u6ce8\u89e3 \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 \u670d\u52a1 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u4e8b\u4ef6 \u2713 \u2713 \u2713 \u2713 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u8def\u7531 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u4e8b\u4ef6 \u2713 \u2713 \u2713 \u2713 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u7f51\u7edc\u7b56\u7565 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2717 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u7f51\u7edc\u914d\u7f6e \u914d\u7f6e\u7f51\u7edc \u2713 \u2713 \u2713 \u2717 \u81ea\u5b9a\u4e49\u8d44\u6e90 \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 YAML \u521b\u5efa \u2713 \u2717 \u2717 \u2717 \u7f16\u8f91 YAML \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 PVC \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u9009\u62e9sc \u2713 \u2713 \u2713 \u2717 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u514b\u9686 \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 PV \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 YAML \u521b\u5efa \u2713 \u2717 \u2717 \u2717 \u521b\u5efa \u2713 \u2717 \u2717 \u2717 \u7f16\u8f91 YAML \u2713 \u2717 \u2717 \u2717 \u66f4\u65b0 \u2713 \u2717 \u2717 \u2717 \u514b\u9686 \u2713 \u2717 \u2717 \u2717 \u4fee\u6539\u6807\u7b7e \u2713 \u2717 \u2717 \u2717 \u4fee\u6539\u6ce8\u89e3 \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 SC \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 YAML \u521b\u5efa \u2713 \u2717 \u2717 \u2717 \u521b\u5efa \u2713 \u2717 \u2717 \u2717 \u67e5\u770b YAML \u2713 \u2717 \u2717 \u2717 \u66f4\u65b0 \u2713 \u2717 \u2717 \u2717 \u6388\u6743\u547d\u540d\u7a7a\u95f4 \u2713 \u2717 \u2717 \u2717 \u89e3\u9664\u6388\u6743 \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 \u914d\u7f6e\u9879 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u5bfc\u51fa\u914d\u7f6e\u9879 \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u5bc6\u94a5 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2717 YAML \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u521b\u5efa \u2713 \u2713 \u2713 \u2717 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u5bfc\u51fa\u5bc6\u94a5 \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u547d\u540d\u7a7a\u95f4 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 YAML \u521b\u5efa \u2713 \u2717 \u2717 \u2717 \u521b\u5efa \u2713 \u2717 \u2717 \u2717 \u67e5\u770b YAML \u2713 \u2713 \u2713 \u2717 \u4fee\u6539\u6807\u7b7e \u2713 \u2713 \u2717 \u2717 \u89e3\u7ed1\u5de5\u4f5c\u7a7a\u95f4 \u2717 \u2717 \u2717 \u2717 \u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4 \u2717 \u2717 \u2717 \u2717 \u914d\u989d\u7ba1\u7406 \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 \u96c6\u7fa4\u64cd\u4f5c \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b YAML \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 helm \u64cd\u4f5c \u8bbe\u7f6e\u4fdd\u7559\u6761\u6570 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b YAML \u2713 \u2713 \u2717 \u2717 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2717 \u2717 \u5220\u9664 \u2713 \u2713 \u2717 \u2717 \u96c6\u7fa4\u5347\u7ea7 \u67e5\u770b\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u5347\u7ea7 \u2717 \u2717 \u2717 \u2717 \u96c6\u7fa4\u8bbe\u7f6e addon \u63d2\u4ef6\u914d\u7f6e \u2713 \u2717 \u2717 \u2717 \u9ad8\u7ea7\u914d\u7f6e \u2713 \u2717 \u2717 \u2717 \u547d\u540d\u7a7a\u95f4 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u521b\u5efa \u2713 \u2717 \u2717 \u2717 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b YAML \u2713 \u2713 \u2713 \u2717 \u4fee\u6539\u6807\u7b7e \u2713 \u2713 \u2717 \u2717 \u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4 \u2713 \u2717 \u2717 \u2717 \u914d\u989d\u7ba1\u7406 \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 \u5de5\u4f5c\u8d1f\u8f7d \u65e0\u72b6\u6001\u8d1f\u8f7d \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u8d1f\u8f7d\u4f38\u7f29 \u2713 \u2713 \u2713 \u2717 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001-\u6682\u505c\u5347\u7ea7 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001-\u505c\u6b62 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001-\u91cd\u542f \u2713 \u2713 \u2713 \u2717 \u56de\u9000 \u2713 \u2713 \u2713 \u2717 \u4fee\u6539\u6807\u7b7e\u6ce8\u89e3 \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u6709\u72b6\u6001\u8d1f\u8f7d \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u8d1f\u8f7d\u4f38\u7f29 \u2713 \u2713 \u2713 \u2717 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001-\u505c\u6b62 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001-\u91cd\u542f \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u5b88\u62a4\u8fdb\u7a0b \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u7f16\u8f91 YAML \u2713 \u2713 \u2713 \u2717 \u66f4\u65b0 \u2713 \u2713 \u2713 \u2717 \u72b6\u6001-\u91cd\u542f \u2713 \u2713 \u2713 \u2717 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u4efb\u52a1 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b YAML \u2713 \u2713 \u2713 \u2717 \u91cd\u542f \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u4e8b\u4ef6 \u2713 \u2713 \u2713 \u2713 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u5b9a\u65f6\u4efb\u52a1 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u67e5\u770b\u4e8b\u4ef6 \u2713 \u2713 \u2713 \u2713 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u5bb9\u5668\u7ec4 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2713 \u2713 \u2713\uff08\u4ec5\u67e5\u770b\uff09 \u8fdb\u5165\u63a7\u5236\u53f0 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u76d1\u63a7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b YAML \u2713 \u2713 \u2713 \u2713 \u4e0a\u4f20\u6587\u4ef6 \u2713 \u2713 \u2713 \u2717 \u4e0b\u8f7d\u6587\u4ef6 \u2713 \u2713 \u2713 \u2717 \u67e5\u770b\u5bb9\u5668\u5217\u8868 \u2713 \u2713 \u2713 \u2713 \u67e5\u770b\u4e8b\u4ef6 \u2713 \u2713 \u2713 \u2713 \u5220\u9664 \u2713 \u2713 \u2713 \u2717 \u5907\u4efd\u6062\u590d \u5e94\u7528\u5907\u4efd \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u521b\u5efa\u5907\u4efd\u8ba1\u5212 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b YAML \u2713 \u2717 \u2717 \u2717 \u66f4\u65b0\u8ba1\u5212 \u2713 \u2717 \u2717 \u2717 \u6682\u505c \u2713 \u2717 \u2717 \u2717 \u7acb\u5373\u6267\u884c \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 \u6062\u590d\u5907\u4efd \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u6062\u590d\u5907\u4efd \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 \u5907\u4efd\u70b9 \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 \u5bf9\u8c61\u5b58\u50a8 \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 ETCD\u5907\u4efd \u67e5\u770b\u5907\u4efd\u7b56\u7565\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u521b\u5efa\u5907\u4efd\u7b56\u7565 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u65e5\u5fd7 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b YAML \u2713 \u2717 \u2717 \u2717 \u66f4\u65b0\u5907\u4efd\u7b56\u7565 \u2713 \u2717 \u2717 \u2717 \u505c\u6b62/\u542f\u52a8 \u2713 \u2717 \u2717 \u2717 \u7acb\u5373\u6267\u884c \u2713 \u2717 \u2717 \u2717 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u5220\u9664\u5907\u4efd\u8bb0\u5f55 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u5907\u4efd\u70b9\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u96c6\u7fa4\u5de1\u68c0 \u96c6\u7fa4\u5de1\u68c0 \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b/\u7ba1\u7406\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u96c6\u7fa4\u5de1\u68c0 \u2713 \u2717 \u2717 \u2717 \u8bbe\u7f6e \u2713 \u2717 \u2717 \u2717 \u6743\u9650\u7ba1\u7406 \u96c6\u7fa4\u6743\u9650 \u67e5\u770b\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u6388\u6743\u7528\u6237\u4e3a cluster admin \u2713 \u2717 \u2717 \u2717 \u5220\u9664 \u2713 \u2717 \u2717 \u2717 \u547d\u540d\u7a7a\u95f4\u6743\u9650 \u67e5\u770b\u5217\u8868 \u2713 \u2713 \u2717 \u2717 \u6388\u6743\u7528\u6237\u4e3a ns admin \u2713 \u2713 \u2717 \u2717 \u6388\u6743\u7528\u6237\u4e3a ns editor \u2713 \u2713 \u2717 \u2717 \u6388\u6743\u7528\u6237\u4e3a ns viewer \u2713 \u2713 \u2717 \u2717 \u7f16\u8f91\u6743\u9650 \u2713 \u2713 \u2717 \u2717 \u5220\u9664 \u2713 \u2713 \u2717 \u2717 \u5b89\u5168\u7ba1\u7406 \u5408\u89c4\u6027\u626b\u63cf \u67e5\u770b\u626b\u63cf\u62a5\u544a\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u626b\u63cf\u62a5\u544a\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u4e0b\u8f7d\u626b\u63cf\u62a5\u544a \u2713 \u2717 \u2717 \u2717 \u5220\u9664\u626b\u63cf\u62a5\u544a \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u626b\u63cf\u7b56\u7565\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u521b\u5efa\u626b\u63cf\u7b56\u7565 \u2713 \u2717 \u2717 \u2717 \u5220\u9664\u626b\u63cf\u7b56\u7565 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u626b\u63cf\u914d\u7f6e\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u626b\u63cf\u914d\u7f6e\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u5220\u9664\u626b\u63cf\u914d\u7f6e \u2713 \u2717 \u2717 \u2717 \u6743\u9650\u626b\u63cf \u67e5\u770b\u626b\u63cf\u62a5\u544a\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u626b\u63cf\u62a5\u544a\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u5220\u9664\u626b\u63cf\u62a5\u544a \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u626b\u63cf\u7b56\u7565\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u521b\u5efa\u626b\u63cf\u7b56\u7565 \u2713 \u2717 \u2717 \u2717 \u5220\u9664\u626b\u63cf\u7b56\u7565 \u2713 \u2717 \u2717 \u2717 \u6f0f\u6d1e\u626b\u63cf \u67e5\u770b\u626b\u63cf\u62a5\u544a\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u626b\u63cf\u62a5\u544a\u8be6\u60c5 \u2713 \u2717 \u2717 \u2717 \u5220\u9664\u626b\u63cf\u62a5\u544a \u2713 \u2717 \u2717 \u2717 \u67e5\u770b\u626b\u63cf\u7b56\u7565\u5217\u8868 \u2713 \u2717 \u2717 \u2717 \u521b\u5efa\u626b\u63cf\u7b56\u7565 \u2713 \u2717 \u2717 \u2717 \u5220\u9664\u626b\u63cf\u7b56\u7565 \u2713 \u2717 \u2717 \u2717"},{"location":"admin/ghippo/personal-center/accesstoken.html","title":"\u8bbf\u95ee\u5bc6\u94a5","text":"

                                                  \u8bbf\u95ee\u5bc6\u94a5\uff08Access Key\uff09\u53ef\u7528\u4e8e\u8bbf\u95ee\u5f00\u653e API \u548c\u6301\u7eed\u53d1\u5e03\uff0c\u7528\u6237\u53ef\u5728\u4e2a\u4eba\u4e2d\u5fc3\u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u83b7\u53d6\u5bc6\u94a5\u5e76\u8bbf\u95ee API\u3002

                                                  "},{"location":"admin/ghippo/personal-center/accesstoken.html#_2","title":"\u83b7\u53d6\u5bc6\u94a5","text":"

                                                  \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5728\u53f3\u4e0a\u89d2\u7684\u4e0b\u62c9\u83dc\u5355\u4e2d\u627e\u5230 \u4e2a\u4eba\u4e2d\u5fc3 \uff0c\u53ef\u4ee5\u5728 \u8bbf\u95ee\u5bc6\u94a5 \u9875\u9762\u7ba1\u7406\u8d26\u53f7\u7684\u8bbf\u95ee\u5bc6\u94a5\u3002

                                                  Info

                                                  \u8bbf\u95ee\u5bc6\u94a5\u4fe1\u606f\u4ec5\u663e\u793a\u4e00\u6b21\u3002\u5982\u679c\u60a8\u5fd8\u8bb0\u4e86\u8bbf\u95ee\u5bc6\u94a5\u4fe1\u606f\uff0c\u60a8\u9700\u8981\u91cd\u65b0\u521b\u5efa\u65b0\u7684\u8bbf\u95ee\u5bc6\u94a5\u3002

                                                  "},{"location":"admin/ghippo/personal-center/accesstoken.html#api","title":"\u4f7f\u7528\u5bc6\u94a5\u8bbf\u95ee API","text":"

                                                  \u5728\u8bbf\u95ee\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0openAPI \u65f6\uff0c\u5728\u8bf7\u6c42\u4e2d\u52a0\u4e0a\u8bf7\u6c42\u5934 Authorization:Bearer ${token} \u4ee5\u6807\u8bc6\u8bbf\u95ee\u8005\u7684\u8eab\u4efd\uff0c \u5176\u4e2d ${token} \u662f\u4e0a\u4e00\u6b65\u4e2d\u83b7\u53d6\u5230\u7684\u5bc6\u94a5\uff0c\u5177\u4f53\u63a5\u53e3\u4fe1\u606f\u53c2\u89c1 OpenAPI \u63a5\u53e3\u6587\u6863\u3002

                                                  \u8bf7\u6c42\u793a\u4f8b

                                                  curl -X GET -H 'Authorization:Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRKVjlBTHRBLXZ4MmtQUC1TQnVGS0dCSWc1cnBfdkxiQVVqM2U3RVByWnMiLCJ0eXAiOiJKV1QifQ.eyJleHAiOjE2NjE0MTU5NjksImlhdCI6MTY2MDgxMTE2OSwiaXNzIjoiZ2hpcHBvLmlvIiwic3ViIjoiZjdjOGIxZjUtMTc2MS00NjYwLTg2MWQtOWI3MmI0MzJmNGViIiwicHJlZmVycmVkX3VzZXJuYW1lIjoiYWRtaW4iLCJncm91cHMiOltdfQ.RsUcrAYkQQ7C6BxMOrdD3qbBRUt0VVxynIGeq4wyIgye6R8Ma4cjxG5CbU1WyiHKpvIKJDJbeFQHro2euQyVde3ygA672ozkwLTnx3Tu-_mB1BubvWCBsDdUjIhCQfT39rk6EQozMjb-1X1sbLwzkfzKMls-oxkjagI_RFrYlTVPwT3Oaw-qOyulRSw7Dxd7jb0vINPq84vmlQIsI3UuTZSNO5BCgHpubcWwBss-Aon_DmYA-Et_-QtmPBA3k8E2hzDSzc7eqK0I68P25r9rwQ3DeKwD1dbRyndqWORRnz8TLEXSiCFXdZT2oiMrcJtO188Ph4eLGut1-4PzKhwgrQ' https://demo-dev.daocloud.io/apis/ghippo.io/v1alpha1/users?page=1&pageSize=10 -k\n

                                                  \u8bf7\u6c42\u7ed3\u679c

                                                  {\n    \"items\": [\n        {\n            \"id\": \"a7cfd010-ebbe-4601-987f-d098d9ef766e\",\n            \"name\": \"a\",\n            \"email\": \"\",\n            \"description\": \"\",\n            \"firstname\": \"\",\n            \"lastname\": \"\",\n            \"source\": \"locale\",\n            \"enabled\": true,\n            \"createdAt\": \"1660632794800\",\n            \"updatedAt\": \"0\",\n            \"lastLoginAt\": \"\"\n        }\n    ],\n    \"pagination\": {\n        \"page\": 1,\n        \"pageSize\": 10,\n        \"total\": 1\n    }\n}\n
                                                  "},{"location":"admin/ghippo/personal-center/language.html","title":"\u8bed\u8a00\u8bbe\u7f6e","text":"

                                                  \u672c\u8282\u8bf4\u660e\u5982\u4f55\u8bbe\u7f6e\u754c\u9762\u8bed\u8a00\u3002\u76ee\u524d\u652f\u6301\u4e2d\u6587\u3001English \u4e24\u4e2a\u8bed\u8a00\u3002

                                                  \u8bed\u8a00\u8bbe\u7f6e\u662f\u5e73\u53f0\u63d0\u4f9b\u591a\u8bed\u8a00\u670d\u52a1\u7684\u5165\u53e3\uff0c\u5e73\u53f0\u9ed8\u8ba4\u663e\u793a\u4e3a\u4e2d\u6587\uff0c\u7528\u6237\u53ef\u6839\u636e\u9700\u8981\u9009\u62e9\u82f1\u8bed\u6216\u81ea\u52a8\u68c0\u6d4b\u6d4f\u89c8\u5668\u8bed\u8a00\u9996\u9009\u9879\u7684\u65b9\u5f0f\u6765\u5207\u6362\u5e73\u53f0\u8bed\u8a00\u3002 \u6bcf\u4e2a\u7528\u6237\u7684\u591a\u8bed\u8a00\u670d\u52a1\u662f\u76f8\u4e92\u72ec\u7acb\u7684\uff0c\u5207\u6362\u540e\u4e0d\u4f1a\u5f71\u54cd\u5176\u4ed6\u7528\u6237\u3002

                                                  \u5e73\u53f0\u63d0\u4f9b\u4e09\u79cd\u5207\u6362\u8bed\u8a00\u65b9\u5f0f\uff1a\u4e2d\u6587\u3001\u82f1\u8bed-English\u3001\u81ea\u52a8\u68c0\u6d4b\u60a8\u7684\u6d4f\u89c8\u5668\u8bed\u8a00\u9996\u9009\u9879\u3002

                                                  \u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\u3002

                                                  1. \u4f7f\u7528\u60a8\u7684\u7528\u6237\u540d/\u5bc6\u7801\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 \u3002

                                                  2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684\u7528\u6237\u540d\u4f4d\u7f6e\uff0c\u9009\u62e9 \u4e2a\u4eba\u4e2d\u5fc3 \u3002

                                                  3. \u70b9\u51fb \u8bed\u8a00\u8bbe\u7f6e \u9875\u7b7e\u3002

                                                  4. \u5207\u6362\u8bed\u8a00\u9009\u9879\u3002

                                                  "},{"location":"admin/ghippo/personal-center/security-setting.html","title":"\u5b89\u5168\u8bbe\u7f6e","text":"

                                                  \u529f\u80fd\u8bf4\u660e\uff1a\u7528\u4e8e\u586b\u5199\u90ae\u7bb1\u5730\u5740\u548c\u4fee\u6539\u767b\u5f55\u5bc6\u7801\u3002

                                                  • \u90ae\u7bb1\uff1a\u5f53\u7ba1\u7406\u5458\u914d\u7f6e\u90ae\u7bb1\u670d\u52a1\u5668\u5730\u5740\u4e4b\u540e\uff0c\u7528\u6237\u80fd\u591f\u901a\u8fc7\u767b\u5f55\u9875\u7684\u5fd8\u8bb0\u5bc6\u7801\u6309\u94ae\uff0c\u586b\u5199\u8be5\u5904\u7684\u90ae\u7bb1\u5730\u5740\u4ee5\u627e\u56de\u5bc6\u7801\u3002
                                                  • \u5bc6\u7801\uff1a\u7528\u4e8e\u767b\u5f55\u5e73\u53f0\u7684\u5bc6\u7801\uff0c\u5efa\u8bae\u5b9a\u671f\u4fee\u6539\u5bc6\u7801\u3002

                                                  \u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                  1. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684\u7528\u6237\u540d\u4f4d\u7f6e\uff0c\u9009\u62e9 \u4e2a\u4eba\u4e2d\u5fc3 \u3002

                                                  2. \u70b9\u51fb \u5b89\u5168\u8bbe\u7f6e \u9875\u7b7e\u3002\u586b\u5199\u60a8\u7684\u90ae\u7bb1\u5730\u5740\u6216\u4fee\u6539\u767b\u5f55\u5bc6\u7801\u3002

                                                  "},{"location":"admin/ghippo/personal-center/ssh-key.html","title":"\u914d\u7f6e SSH \u516c\u94a5","text":"

                                                  \u672c\u6587\u8bf4\u660e\u5982\u4f55\u914d\u7f6e SSH \u516c\u94a5\u3002

                                                  "},{"location":"admin/ghippo/personal-center/ssh-key.html#1-ssh","title":"\u6b65\u9aa4 1\uff1a\u67e5\u770b\u5df2\u5b58\u5728\u7684 SSH \u5bc6\u94a5","text":"

                                                  \u5728\u751f\u6210\u65b0\u7684 SSH \u5bc6\u94a5\u524d\uff0c\u8bf7\u5148\u786e\u8ba4\u662f\u5426\u9700\u8981\u4f7f\u7528\u672c\u5730\u5df2\u751f\u6210\u7684 SSH \u5bc6\u94a5\uff0cSSH \u5bc6\u94a5\u5bf9\u4e00\u822c\u5b58\u653e\u5728\u672c\u5730\u7528\u6237\u7684\u6839\u76ee\u5f55\u4e0b\u3002 Linux\u3001Mac \u8bf7\u76f4\u63a5\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b\u5df2\u5b58\u5728\u7684\u516c\u94a5\uff0cWindows \u7528\u6237\u5728 WSL\uff08\u9700\u8981 Windows 10 \u6216\u4ee5\u4e0a\uff09\u6216 Git Bash \u4e0b\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b\u5df2\u751f\u6210\u7684\u516c\u94a5\u3002

                                                  • ED25519 \u7b97\u6cd5\uff1a

                                                    cat ~/.ssh/id_ed25519.pub\n
                                                  • RSA \u7b97\u6cd5\uff1a

                                                    cat ~/.ssh/id_rsa.pub\n

                                                  \u5982\u679c\u8fd4\u56de\u4e00\u957f\u4e32\u4ee5 ssh-ed25519 \u6216 ssh-rsa \u5f00\u5934\u7684\u5b57\u7b26\u4e32\uff0c\u8bf4\u660e\u5df2\u5b58\u5728\u672c\u5730\u516c\u94a5\uff0c \u60a8\u53ef\u4ee5\u8df3\u8fc7\u6b65\u9aa4 2 \u751f\u6210 SSH \u5bc6\u94a5\uff0c\u76f4\u63a5\u64cd\u4f5c\u6b65\u9aa4 3\u3002

                                                  "},{"location":"admin/ghippo/personal-center/ssh-key.html#2-ssh","title":"\u6b65\u9aa4 2\uff1a\u751f\u6210 SSH \u5bc6\u94a5","text":"

                                                  \u82e5\u6b65\u9aa4 1 \u672a\u8fd4\u56de\u6307\u5b9a\u7684\u5185\u5bb9\u5b57\u7b26\u4e32\uff0c\u8868\u793a\u672c\u5730\u6682\u65e0\u53ef\u7528 SSH \u5bc6\u94a5\uff0c\u9700\u8981\u751f\u6210\u65b0\u7684 SSH \u5bc6\u94a5\uff0c\u8bf7\u6309\u5982\u4e0b\u6b65\u9aa4\u64cd\u4f5c\uff1a

                                                  1. \u8bbf\u95ee\u7ec8\u7aef\uff08Windows \u8bf7\u4f7f\u7528 WSL \u6216 Git Bash\uff09\uff0c \u8fd0\u884c ssh-keygen -t\u3002

                                                  2. \u8f93\u5165\u5bc6\u94a5\u7b97\u6cd5\u7c7b\u578b\u548c\u53ef\u9009\u7684\u6ce8\u91ca\u3002

                                                    \u6ce8\u91ca\u4f1a\u51fa\u73b0\u5728 .pub \u6587\u4ef6\u4e2d\uff0c\u4e00\u822c\u53ef\u4f7f\u7528\u90ae\u7bb1\u4f5c\u4e3a\u6ce8\u91ca\u5185\u5bb9\u3002

                                                    • \u57fa\u4e8e ED25519 \u7b97\u6cd5\uff0c\u751f\u6210\u5bc6\u94a5\u5bf9\u547d\u4ee4\u5982\u4e0b\uff1a

                                                      ssh-keygen -t ed25519 -C \"<\u6ce8\u91ca\u5185\u5bb9>\"\n
                                                    • \u57fa\u4e8e RSA \u7b97\u6cd5\uff0c\u751f\u6210\u5bc6\u94a5\u5bf9\u547d\u4ee4\u5982\u4e0b\uff1a

                                                      ssh-keygen -t rsa -C \"<\u6ce8\u91ca\u5185\u5bb9>\"\n
                                                  3. \u70b9\u51fb\u56de\u8f66\uff0c\u9009\u62e9 SSH \u5bc6\u94a5\u751f\u6210\u8def\u5f84\u3002

                                                    \u4ee5 ED25519 \u7b97\u6cd5\u4e3a\u4f8b\uff0c\u9ed8\u8ba4\u8def\u5f84\u5982\u4e0b\uff1a

                                                    Generating public/private ed25519 key pair.\nEnter file in which to save the key (/home/user/.ssh/id_ed25519):\n

                                                    \u5bc6\u94a5\u9ed8\u8ba4\u751f\u6210\u8def\u5f84\uff1a/home/user/.ssh/id_ed25519\uff0c\u516c\u94a5\u4e0e\u4e4b\u5bf9\u5e94\u4e3a\uff1a/home/user/.ssh/id_ed25519.pub\u3002

                                                  4. \u8bbe\u7f6e\u4e00\u4e2a\u5bc6\u94a5\u53e3\u4ee4\u3002

                                                    Enter passphrase (empty for no passphrase):\nEnter same passphrase again:\n

                                                    \u53e3\u4ee4\u9ed8\u8ba4\u4e3a\u7a7a\uff0c\u60a8\u53ef\u4ee5\u9009\u62e9\u4f7f\u7528\u53e3\u4ee4\u4fdd\u62a4\u79c1\u94a5\u6587\u4ef6\u3002 \u5982\u679c\u60a8\u4e0d\u60f3\u5728\u6bcf\u6b21\u4f7f\u7528 SSH \u534f\u8bae\u8bbf\u95ee\u4ed3\u5e93\u65f6\uff0c\u90fd\u8981\u8f93\u5165\u7528\u4e8e\u4fdd\u62a4\u79c1\u94a5\u6587\u4ef6\u7684\u53e3\u4ee4\uff0c\u53ef\u4ee5\u5728\u521b\u5efa\u5bc6\u94a5\u65f6\uff0c\u8f93\u5165\u7a7a\u53e3\u4ee4\u3002

                                                  5. \u70b9\u51fb\u56de\u8f66\uff0c\u5b8c\u6210\u5bc6\u94a5\u5bf9\u521b\u5efa\u3002

                                                  "},{"location":"admin/ghippo/personal-center/ssh-key.html#3","title":"\u6b65\u9aa4 3\uff1a\u62f7\u8d1d\u516c\u94a5","text":"

                                                  \u9664\u4e86\u5728\u547d\u4ee4\u884c\u6253\u5370\u51fa\u5df2\u751f\u6210\u7684\u516c\u94a5\u4fe1\u606f\u624b\u52a8\u590d\u5236\u5916\uff0c\u53ef\u4ee5\u4f7f\u7528\u547d\u4ee4\u62f7\u8d1d\u516c\u94a5\u5230\u7c98\u8d34\u677f\u4e0b\uff0c\u8bf7\u53c2\u8003\u64cd\u4f5c\u7cfb\u7edf\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u8fdb\u884c\u62f7\u8d1d\u3002

                                                  • Windows\uff08\u5728 WSL \u6216 Git Bash \u4e0b\uff09\uff1a

                                                    cat ~/.ssh/id_ed25519.pub | clip\n
                                                  • Mac\uff1a

                                                    tr -d '\\n'< ~/.ssh/id_ed25519.pub | pbcopy\n
                                                  • GNU/Linux (requires xclip):

                                                    xclip -sel clip < ~/.ssh/id_ed25519.pub\n
                                                  "},{"location":"admin/ghippo/personal-center/ssh-key.html#4-ai","title":"\u6b65\u9aa4 4\uff1a\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e0a\u8bbe\u7f6e\u516c\u94a5","text":"
                                                  1. \u767b\u5f55\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0UI \u9875\u9762\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u4e2a\u4eba\u4e2d\u5fc3 -> SSH \u516c\u94a5 \u3002

                                                  2. \u6dfb\u52a0\u751f\u6210\u7684 SSH \u516c\u94a5\u4fe1\u606f\u3002

                                                    1. SSH \u516c\u94a5\u5185\u5bb9\u3002

                                                    2. \u516c\u94a5\u6807\u9898\uff1a\u652f\u6301\u81ea\u5b9a\u4e49\u516c\u94a5\u540d\u79f0\uff0c\u7528\u4e8e\u533a\u5206\u7ba1\u7406\u3002

                                                    3. \u8fc7\u671f\u65f6\u95f4\uff1a\u8bbe\u7f6e\u516c\u94a5\u8fc7\u671f\u65f6\u95f4\uff0c\u5230\u671f\u540e\u516c\u94a5\u5c06\u81ea\u52a8\u5931\u6548\uff0c\u4e0d\u53ef\u4f7f\u7528\uff1b\u5982\u679c\u4e0d\u8bbe\u7f6e\uff0c\u5219\u6c38\u4e45\u6709\u6548\u3002

                                                  "},{"location":"admin/ghippo/platform-setting/about.html","title":"\u5173\u4e8e\u5e73\u53f0","text":"

                                                  \u5173\u4e8e\u5e73\u53f0 \u4e3b\u8981\u5448\u73b0\u5e73\u53f0\u5404\u4e2a\u5b50\u6a21\u5757\u5f53\u524d\u66f4\u65b0\u7684\u7248\u672c\uff0c\u58f0\u660e\u4e86\u5e73\u53f0\u4f7f\u7528\u7684\u5404\u4e2a\u5f00\u6e90\u8f6f\u4ef6\uff0c\u5e76\u4ee5\u52a8\u753b\u89c6\u9891\u7684\u65b9\u5f0f\u81f4\u8c22\u4e86\u5e73\u53f0\u7684\u6280\u672f\u56e2\u961f\u3002

                                                  \u67e5\u770b\u6b65\u9aa4\uff1a

                                                  1. \u4f7f\u7528\u5177\u6709 Admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 \u3002

                                                  2. \u70b9\u51fb \u5e73\u53f0\u8bbe\u7f6e \uff0c\u9009\u62e9 \u5173\u4e8e\u5e73\u53f0 \uff0c\u67e5\u770b\u4ea7\u54c1\u7248\u672c\u3001\u5f00\u6e90\u8f6f\u4ef6\u58f0\u660e\u548c\u6280\u672f\u56e2\u961f\u3002

                                                    License \u58f0\u660e

                                                    \u6280\u672f\u56e2\u961f

                                                  "},{"location":"admin/ghippo/platform-setting/appearance.html","title":"\u5916\u89c2\u5b9a\u5236","text":"

                                                  \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\uff0c\u53ef\u901a\u8fc7 \u5916\u89c2\u5b9a\u5236 \u66f4\u6362\u767b\u5f55\u754c\u9762\u3001\u9876\u90e8\u5bfc\u822a\u680f\u4ee5\u53ca\u5e95\u90e8\u7248\u6743\u548c\u5907\u6848\u4fe1\u606f\uff0c\u5e2e\u52a9\u7528\u6237\u66f4\u597d\u5730\u8fa8\u8bc6\u4ea7\u54c1\u3002

                                                  "},{"location":"admin/ghippo/platform-setting/appearance.html#_2","title":"\u5b9a\u5236\u8bf4\u660e","text":"
                                                  1. \u4f7f\u7528\u5177\u6709 admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u5e73\u53f0\u8bbe\u7f6e \u3002

                                                  2. \u9009\u62e9 \u5916\u89c2\u5b9a\u5236 \uff0c\u5728 \u767b\u5f55\u9875\u5b9a\u5236 \u9875\u7b7e\u4e2d\uff0c\u4fee\u6539\u767b\u5f55\u9875\u7684\u56fe\u6807\u548c\u6587\u5b57\u540e\uff0c\u70b9\u51fb \u4fdd\u5b58 \u3002

                                                  3. \u9000\u51fa\u767b\u5f55\uff0c\u5728\u767b\u5f55\u9875\u5237\u65b0\u540e\u53ef\u770b\u5230\u914d\u7f6e\u540e\u7684\u6548\u679c

                                                  4. \u70b9\u51fb \u9876\u90e8\u5bfc\u822a\u680f\u5b9a\u5236 \u9875\u7b7e\uff0c\u4fee\u6539\u5bfc\u822a\u680f\u7684\u56fe\u6807\u548c\u6587\u5b57\u540e\uff0c\u70b9\u51fb \u4fdd\u5b58 \u3002

                                                  5. \u70b9\u51fb \u9ad8\u7ea7\u5b9a\u5236 \uff0c\u53ef\u4ee5\u7528 CSS \u6837\u5f0f\u8bbe\u7f6e\u767b\u5f55\u9875\u3001\u5bfc\u822a\u680f\u3001\u5e95\u90e8\u7248\u6743\u53ca\u5907\u6848\u4fe1\u606f\u3002

                                                  "},{"location":"admin/ghippo/platform-setting/appearance.html#_3","title":"\u9ad8\u7ea7\u5b9a\u5236","text":"

                                                  \u9ad8\u7ea7\u5b9a\u5236\u80fd\u591f\u901a\u8fc7 CSS \u6837\u5f0f\u6765\u4fee\u6539\u6574\u4e2a\u5bb9\u5668\u5e73\u53f0\u7684\u989c\u8272\u3001\u5b57\u4f53\u95f4\u9694\u3001\u5b57\u53f7\u7b49\u3002 \u60a8\u9700\u8981\u719f\u6089 CSS \u8bed\u6cd5\u3002\u5220\u9664\u9ed1\u8272\u8f93\u5165\u6846\u7684\u5185\u5bb9\uff0c\u53ef\u6062\u590d\u5230\u9ed8\u8ba4\u72b6\u6001\uff0c\u5f53\u7136\u4e5f\u53ef\u4ee5\u70b9\u51fb \u4e00\u952e\u8fd8\u539f \u6309\u94ae\u3002

                                                  \u767b\u5f55\u9875\u5b9a\u5236\u7684 CSS \u6837\u4f8b\uff1a

                                                  .test {\n  width: 12px;\n}\n\n#kc-login {\n /* color: red!important; */\n}\n

                                                  \u767b\u5f55\u540e\u9875\u9762\u5b9a\u5236\u7684 CSS \u6837\u4f8b\uff1a

                                                  .dao-icon.dao-iconfont.icon-service-global.dao-nav__head-icon {\n   color: red!important;\n}\n.ghippo-header-logo {\n  background-color: green!important;\n}\n.ghippo-header {\n  background-color: rgb(128, 115, 0)!important;\n}\n.ghippo-header-nav-main {\n  background-color: rgb(0, 19, 128)!important;\n}\n.ghippo-header-sub-nav-main .dao-popper-inner {\n  background-color: rgb(231, 82, 13) !important;\n}\n

                                                  Footer\uff08\u9875\u9762\u5e95\u90e8\u7684\u7248\u6743\u3001\u5907\u6848\u7b49\u4fe1\u606f\uff09\u5b9a\u5236\u793a\u4f8b

                                                  <div class=\"footer-content\">\n  <span class=\"footer-item\">Copyright \u00a9 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4fdd\u7559\u6240\u6709\u6743\u5229</span>\n  <a class=\"footer-item\" href=\"https://beian.miit.gov.cn/\" target=\"_blank\" rel=\"noopener noreferrer\">\u6caa ICP \u5907 xxxxxx \u53f7 - 1</a>\n  <a class=\"footer-item\" href=\"https://beian.miit.gov.cn/\" target=\"_blank\" rel=\"noopener noreferrer\">\u6caa ICP \u5907 xxxxxx \u53f7 - 2</a>\n</div>\n<div class=\"footer-content\">\n  <img class=\"gongan-icon\" src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABIAAAASCAYAAABWzo5XAAAACXBIWXMAAAsTAAALEwEAmpwYAAAKTWlDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVN3WJP3Fj7f92UPVkLY8LGXbIEAIiOsCMgQWaIQkgBhhBASQMWFiApWFBURnEhVxILVCkidiOKgKLhnQYqIWotVXDjuH9yntX167+3t+9f7vOec5/zOec8PgBESJpHmomoAOVKFPDrYH49PSMTJvYACFUjgBCAQ5svCZwXFAADwA3l4fnSwP/wBr28AAgBw1S4kEsfh/4O6UCZXACCRAOAiEucLAZBSAMguVMgUAMgYALBTs2QKAJQAAGx5fEIiAKoNAOz0ST4FANipk9wXANiiHKkIAI0BAJkoRyQCQLsAYFWBUiwCwMIAoKxAIi4EwK4BgFm2MkcCgL0FAHaOWJAPQGAAgJlCLMwAIDgCAEMeE80DIEwDoDDSv+CpX3CFuEgBAMDLlc2XS9IzFLiV0Bp38vDg4iHiwmyxQmEXKRBmCeQinJebIxNI5wNMzgwAABr50cH+OD+Q5+bk4eZm52zv9MWi/mvwbyI+IfHf/ryMAgQAEE7P79pf5eXWA3DHAbB1v2upWwDaVgBo3/ldM9sJoFoK0Hr5i3k4/EAenqFQyDwdHAoLC+0lYqG9MOOLPv8z4W/gi372/EAe/tt68ABxmkCZrcCjg/1xYW52rlKO58sEQjFu9+cj/seFf/2OKdHiNLFcLBWK8ViJuFAiTcd5uVKRRCHJleIS6X8y8R+W/QmTdw0ArIZPwE62B7XLbMB+7gECiw5Y0nYAQH7zLYwaC5EAEGc0Mnn3AACTv/mPQCsBAM2XpOMAALzoGFyolBdMxggAAESggSqwQQcMwRSswA6cwR28wBcCYQZEQAwkwDwQQgbkgBwKoRiWQRlUwDrYBLWwAxqgEZrhELTBMTgN5+ASXIHrcBcGYBiewhi8hgkEQcgIE2EhOogRYo7YIs4IF5mOBCJhSDSSgKQg6YgUUSLFyHKkAqlCapFdSCPyLXIUOY1cQPqQ28ggMor8irxHMZSBslED1AJ1QLmoHxqKxqBz0XQ0D12AlqJr0Rq0Hj2AtqKn0UvodXQAfYqOY4DRMQ5mjNlhXIyHRWCJWBomxxZj5Vg1Vo81Yx1YN3YVG8CeYe8IJAKLgBPsCF6EEMJsgpCQR1hMWEOoJewjtBK6CFcJg4Qxwicik6hPtCV6EvnEeGI6sZBYRqwm7iEeIZ4lXicOE1+TSCQOyZLkTgohJZAySQtJa0jbSC2kU6Q+0hBpnEwm65Btyd7kCLKArCCXkbeQD5BPkvvJw+S3FDrFiOJMCaIkUqSUEko1ZT/lBKWfMkKZoKpRzame1AiqiDqfWkltoHZQL1OHqRM0dZolzZsWQ8ukLaPV0JppZ2n3aC/pdLoJ3YMeRZfQl9Jr6Afp5+mD9HcMDYYNg8dIYigZaxl7GacYtxkvmUymBdOXmchUMNcyG5lnmA+Yb1VYKvYqfBWRyhKVOpVWlX6V56pUVXNVP9V5qgtUq1UPq15WfaZGVbNQ46kJ1Bar1akdVbupNq7OUndSj1DPUV+jvl/9gvpjDbKGhUaghkijVGO3xhmNIRbGMmXxWELWclYD6yxrmE1iW7L57Ex2Bfsbdi97TFNDc6pmrGaRZp3mcc0BDsax4PA52ZxKziHODc57LQMtPy2x1mqtZq1+rTfaetq+2mLtcu0W7eva73VwnUCdLJ31Om0693UJuja6UbqFutt1z+o+02PreekJ9cr1Dund0Uf1bfSj9Rfq79bv0R83MDQINpAZbDE4Y/DMkGPoa5hpuNHwhOGoEctoupHEaKPRSaMnuCbuh2fjNXgXPmasbxxirDTeZdxrPGFiaTLbpMSkxeS+Kc2Ua5pmutG003TMzMgs3KzYrMnsjjnVnGueYb7ZvNv8jYWlRZzFSos2i8eW2pZ8ywWWTZb3rJhWPlZ5VvVW16xJ1lzrLOtt1ldsUBtXmwybOpvLtqitm63Edptt3xTiFI8p0in1U27aMez87ArsmuwG7Tn2YfYl9m32zx3MHBId1jt0O3xydHXMdmxwvOuk4TTDqcSpw+lXZxtnoXOd8zUXpkuQyxKXdpcXU22niqdun3rLleUa7rrStdP1o5u7m9yt2W3U3cw9xX2r+00umxvJXcM970H08PdY4nHM452nm6fC85DnL152Xlle+70eT7OcJp7WMG3I28Rb4L3Le2A6Pj1l+s7pAz7GPgKfep+Hvqa+It89viN+1n6Zfgf8nvs7+sv9j/i/4XnyFvFOBWABwQHlAb2BGoGzA2sDHwSZBKUHNQWNBbsGLww+FUIMCQ1ZH3KTb8AX8hv5YzPcZyya0RXKCJ0VWhv6MMwmTB7WEY6GzwjfEH5vpvlM6cy2CIjgR2yIuB9pGZkX+X0UKSoyqi7qUbRTdHF09yzWrORZ+2e9jvGPqYy5O9tqtnJ2Z6xqbFJsY+ybuIC4qriBeIf4RfGXEnQTJAntieTE2MQ9ieNzAudsmjOc5JpUlnRjruXcorkX5unOy553PFk1WZB8OIWYEpeyP+WDIEJQLxhP5aduTR0T8oSbhU9FvqKNolGxt7hKPJLmnVaV9jjdO31D+miGT0Z1xjMJT1IreZEZkrkj801WRNberM/ZcdktOZSclJyjUg1plrQr1zC3KLdPZisrkw3keeZtyhuTh8r35CP5c/PbFWyFTNGjtFKuUA4WTC+oK3hbGFt4uEi9SFrUM99m/ur5IwuCFny9kLBQuLCz2Lh4WfHgIr9FuxYji1MXdy4xXVK6ZHhp8NJ9y2jLspb9UOJYUlXyannc8o5Sg9KlpUMrglc0lamUycturvRauWMVYZVkVe9ql9VbVn8qF5VfrHCsqK74sEa45uJXTl/VfPV5bdra3kq3yu3rSOuk626s91m/r0q9akHV0IbwDa0b8Y3lG19tSt50oXpq9Y7NtM3KzQM1YTXtW8y2rNvyoTaj9nqdf13LVv2tq7e+2Sba1r/dd3vzDoMdFTve75TsvLUreFdrvUV99W7S7oLdjxpiG7q/5n7duEd3T8Wej3ulewf2Re/ranRvbNyvv7+yCW1SNo0eSDpw5ZuAb9qb7Zp3tXBaKg7CQeXBJ9+mfHvjUOihzsPcw83fmX+39QjrSHkr0jq/dawto22gPaG97+iMo50dXh1Hvrf/fu8x42N1xzWPV56gnSg98fnkgpPjp2Snnp1OPz3Umdx590z8mWtdUV29Z0PPnj8XdO5Mt1/3yfPe549d8Lxw9CL3Ytslt0utPa49R35w/eFIr1tv62X3y+1XPK509E3rO9Hv03/6asDVc9f41y5dn3m978bsG7duJt0cuCW69fh29u0XdwruTNxdeo94r/y+2v3qB/oP6n+0/rFlwG3g+GDAYM/DWQ/vDgmHnv6U/9OH4dJHzEfVI0YjjY+dHx8bDRq98mTOk+GnsqcTz8p+Vv9563Or59/94vtLz1j82PAL+YvPv655qfNy76uprzrHI8cfvM55PfGm/K3O233vuO+638e9H5ko/ED+UPPR+mPHp9BP9z7nfP78L/eE8/sl0p8zAAAAIGNIUk0AAHolAACAgwAA+f8AAIDpAAB1MAAA6mAAADqYAAAXb5JfxUYAAAQjSURBVHjaVNNZbFRlGIDh95w525zpdGa6TVtbykBbyiICxQY0AhYTJUCiiYqGqEEiJhKQmBg0ESPeeCGRENEYb4jhBr0gNQrRlCBiSgyLaSlSaKEs3Wemy+xnzuqFYdD/6rt6ku/N9wue55EcPwWArCgIgkx5ZRuYVxsnJ801Z05f3jY1MRnb/HxHV+uSph9RKq4mhkdwbZVgdQ2SHkPTwgj/h1QUWWi8/tfg/hM/XN/Y2zfaZnkSnuRDtLMsXhBOvrJtya/LlrcdMs1Qb1lVRQmSAEDAsU1kxpgamXp3y+azu1esreK9dyRqs9PIjkW6OsLx7lTV1ld/237s8HRV57MbnvO8CA+e9GCQFTk6Mza+4/0P+t9a9VSEI3uyTH/eR27aB2Ed31Q/Hx1sI6BHOPT13c5Frd0HW9p3HPUQEwAigJW9RDp+bstrOy981nVGLN/7RpHUV70YfXnEAtjxFPasxPDBQXatjzNTdOQXtg983H/51AFFy1KCIg2bNIdC+8270NwmUmelsXqSqHkDK5PDl8iCW0QcnEW+lqCjvcjQuMZ4YnQRTkotQUZu4GkjcfZNv19G011kXw4vayNYNvqCCvSVTciOgABgeuhBGwhgz5zbkI2ff7HUqJiNR2QktbbSYnBYYqbMT/ilKI4SIbT/GcRylbnvLmJ2X8N7tJ7rR8OE/BbliqEYea81WIotmOs02WFpc55Lf0f5/mSI3dsamOgxSX7ZjaALuBmB6M6FnB+S+POCwmOLk1QFFAqZyQWl1YrpiRZJLvDkygyC5NJ1XCax7xYNiTQVEYVIuUulayIcGeLkpw6WK7GuPY/fb2CkhleXIFFe8XPGaKBj9QxLW1Ik0bg8EuT2zRCJYZvZIYepe0EGbvi4bQUJVZhs2phADFYj+df0lBqJUnaekS4SUHXe3jrOnoE2PhSewHfRpfZGgcryIvfHdQruQlLo7Ns6QizqkJ31CIUlqwQJXuWUpDXj6qOsW32HT3YNImll9FwJsb4jyaLmWQ4fa6a+2sQw0ry8YZSiHcPxxXBtMfCv4XkUCrfliWs/fTE31rtTVfv9vsIorvQIniMhqXM4popVcJFVMHMpfMEaLPdxR1Tnna1b1vl6tGntpAjgCTNWONZyIFBR8Ydtr6EgrCI3VySfzZPLBDHyIq5gkpmzcOUmTGMF+bh7M9LYulfWzMmHBzk7Fpq9deWEYxjrtaCMXjWfstp6BCGNXZzBdYqYhogWqkMum4+oBVD0YnP63u/fFqbv1D+M7VSlBbmmK5uYaLYLYwslfwFVAyXQiOfcx3XyyGIM8DDn0lgWyGokHogu/0UJxpL/+f2e569s/CZQZ53OpzJr0+NXludUfb5jVdf7VUGXJUPIZast1S9PeII6jFDT5xMjFwO1S4c8zwTgnwEAxufYSzA67PMAAAAASUVORK5CYII=\" >\n  <a class=\"footer-item\" href=\"http://www.beian.gov.cn/portal/registerSystemInfo\">\u6caa\u516c\u7f51\u5b89\u5907 12345678912345\u53f7</a>\n</div>\n<style>\n.footer-content {\n  display: flex;\n  flex-wrap: wrap;\n  align-items: center;\n  justify-content: center;\n}\n.footer-content + .footer-content {\n  margin-top: 8px;\n}\n.login-pf .footer-item {\n  color: white;\n}\n.footer-item {\n  color: var(--dao-gray-010);\n  text-decoration: none;\n}\n.footer-item + .footer-item {\n  margin-left: 8px;\n}\n.gongan-icon {\n  width: 18px;\n  height: 18px;\n  margin-right: 4px;\n}\n</style>\n

                                                  Note

                                                  \u5982\u679c\u60f3\u8981\u6062\u590d\u9ed8\u8ba4\u8bbe\u7f6e\uff0c\u53ef\u4ee5\u70b9\u51fb \u4e00\u952e\u8fd8\u539f \u3002\u8bf7\u6ce8\u610f\uff0c\u4e00\u952e\u8fd8\u539f\u540e\u5c06\u4e22\u5f03\u6240\u6709\u81ea\u5b9a\u4e49\u8bbe\u7f6e\u3002

                                                  "},{"location":"admin/ghippo/platform-setting/mail-server.html","title":"\u90ae\u4ef6\u670d\u52a1\u5668","text":"

                                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f1a\u5728\u7528\u6237\u5fd8\u8bb0\u5bc6\u7801\u65f6\uff0c\u5411\u7528\u6237\u53d1\u9001\u7535\u5b50\u90ae\u4ef6\u4ee5\u9a8c\u8bc1\u7535\u5b50\u90ae\u4ef6\u5730\u5740\uff0c\u786e\u4fdd\u7528\u6237\u662f\u672c\u4eba\u64cd\u4f5c\u3002 \u8981\u4f7f\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u80fd\u591f\u53d1\u9001\u7535\u5b50\u90ae\u4ef6\uff0c\u9700\u8981\u5148\u63d0\u4f9b\u60a8\u7684\u90ae\u4ef6\u670d\u52a1\u5668\u5730\u5740\u3002

                                                  \u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                  1. \u4f7f\u7528\u5177\u6709 admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 \u3002

                                                  2. \u70b9\u51fb \u5e73\u53f0\u8bbe\u7f6e \uff0c\u9009\u62e9 \u90ae\u4ef6\u670d\u52a1\u5668\u8bbe\u7f6e \u3002

                                                    \u586b\u5199\u4ee5\u4e0b\u5b57\u6bb5\u914d\u7f6e\u90ae\u4ef6\u670d\u52a1\u5668\uff1a

                                                    \u5b57\u6bb5 \u63cf\u8ff0 \u4e3e\u4f8b\u503c SMTP \u670d\u52a1\u5668\u5730\u5740 \u80fd\u591f\u63d0\u4f9b\u90ae\u4ef6\u670d\u52a1\u7684 SMTP \u670d\u52a1\u5668\u5730\u5740 smtp.163.com SMTP \u670d\u52a1\u5668\u7aef\u53e3 \u53d1\u9001\u90ae\u4ef6\u7684\u7aef\u53e3 25 \u7528\u6237\u540d SMTP \u7528\u6237\u7684\u540d\u79f0 test@163.com \u5bc6\u7801 SMTP \u8d26\u53f7\u7684\u5bc6\u7801 123456 \u53d1\u4ef6\u4eba\u90ae\u7bb1 \u53d1\u4ef6\u4eba\u7684\u90ae\u7bb1\u5730\u5740 test@163.com \u4f7f\u7528 SSL \u5b89\u5168\u8fde\u63a5 SSL \u53ef\u4ee5\u7528\u4e8e\u52a0\u5bc6\u90ae\u4ef6\uff0c\u4ece\u800c\u63d0\u9ad8\u901a\u8fc7\u90ae\u4ef6\u4f20\u8f93\u7684\u4fe1\u606f\u7684\u5b89\u5168\u6027\uff0c\u901a\u5e38\u9700\u4e3a\u90ae\u4ef6\u670d\u52a1\u5668\u914d\u7f6e\u8bc1\u4e66 \u4e0d\u5f00\u542f
                                                  3. \u914d\u7f6e\u5b8c\u6210\u540e\u70b9\u51fb \u4fdd\u5b58 \uff0c\u70b9\u51fb \u6d4b\u8bd5\u90ae\u4ef6\u670d\u52a1\u5668 \u3002

                                                  4. \u5c4f\u5e55\u53f3\u4e0a\u89d2\u51fa\u73b0\u6210\u529f\u53d1\u9001\u90ae\u4ef6\u7684\u63d0\u793a\uff0c\u5219\u8868\u793a\u90ae\u4ef6\u670d\u52a1\u5668\u88ab\u6210\u529f\u8bbe\u7f6e\u3002

                                                  "},{"location":"admin/ghippo/platform-setting/mail-server.html#_2","title":"\u5e38\u89c1\u95ee\u9898","text":"

                                                  \u95ee\uff1a\u90ae\u4ef6\u670d\u52a1\u5668\u8bbe\u7f6e\u540e\u7528\u6237\u4ecd\u65e0\u6cd5\u627e\u56de\u5bc6\u7801\u662f\u4ec0\u4e48\u539f\u56e0\uff1f

                                                  \u7b54\uff1a\u7528\u6237\u53ef\u80fd\u672a\u8bbe\u7f6e\u90ae\u7bb1\u6216\u8005\u8bbe\u7f6e\u4e86\u9519\u8bef\u7684\u90ae\u7bb1\u5730\u5740\uff1b\u6b64\u65f6\u53ef\u4ee5\u8ba9 admin \u89d2\u8272\u7684\u7528\u6237\u5728 \u5168\u5c40\u7ba1\u7406 -> \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236 \u4e2d\u901a\u8fc7\u7528\u6237\u540d\u627e\u5230\u8be5\u7528\u6237\uff0c\u5e76\u5728\u7528\u6237\u8be6\u60c5\u4e2d\u4e3a\u8be5\u7528\u6237\u8bbe\u7f6e\u65b0\u7684\u767b\u5f55\u5bc6\u7801\u3002

                                                  \u5982\u679c\u90ae\u4ef6\u670d\u52a1\u5668\u6ca1\u6709\u8fde\u901a\uff0c\u8bf7\u68c0\u67e5\u90ae\u4ef6\u670d\u52a1\u5668\u5730\u5740\u3001\u7528\u6237\u540d\u53ca\u5bc6\u7801\u662f\u5426\u6b63\u786e\u3002

                                                  "},{"location":"admin/ghippo/platform-setting/security.html","title":"\u5b89\u5168\u7b56\u7565","text":"

                                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5728\u56fe\u5f62\u754c\u9762\u4e0a\u63d0\u4f9b\u4e86\u57fa\u4e8e\u5bc6\u7801\u548c\u8bbf\u95ee\u63a7\u5236\u7684\u5b89\u5168\u7b56\u7565\u3002

                                                  \u5bc6\u7801\u7b56\u7565

                                                  • \u65b0\u5bc6\u7801\u4e0d\u80fd\u4e0e\u6700\u8fd1\u7684\u5386\u53f2\u5bc6\u7801\u76f8\u540c\u3002
                                                  • \u5bc6\u7801\u8fc7\u671f\u540e\uff0c\u7cfb\u7edf\u5f3a\u5236\u8981\u6c42\u4fee\u6539\u5bc6\u7801\u3002
                                                  • \u5bc6\u7801\u4e0d\u80fd\u4e0e\u7528\u6237\u540d\u76f8\u540c\u3002
                                                  • \u5bc6\u7801\u4e0d\u80fd\u548c\u7528\u6237\u7684\u90ae\u7bb1\u5730\u5740\u76f8\u540c\u3002
                                                  • \u81ea\u5b9a\u4e49\u5bc6\u7801\u89c4\u5219\u3002
                                                  • \u81ea\u5b9a\u4e49\u5bc6\u7801\u6700\u5c0f\u957f\u5ea6\u3002

                                                  \u8bbf\u95ee\u63a7\u5236\u7b56\u7565

                                                  • \u4f1a\u8bdd\u8d85\u65f6\u7b56\u7565\uff1a\u7528\u6237\u5728 x \u5c0f\u65f6\u5185\u6ca1\u6709\u64cd\u4f5c\uff0c\u9000\u51fa\u5f53\u524d\u8d26\u53f7\u3002
                                                  • \u8d26\u53f7\u9501\u5b9a\u7b56\u7565\uff1a\u9650\u5236\u65f6\u95f4\u5185\u591a\u6b21\u767b\u5f55\u5931\u8d25\uff0c\u8d26\u53f7\u5c06\u88ab\u9501\u5b9a\u3002
                                                  • \u767b\u5f55/\u9000\u51fa\u7b56\u7565\uff1a\u5173\u95ed\u6d4f\u89c8\u5668\u7684\u540c\u65f6\u9000\u51fa\u767b\u5f55\u3002

                                                  \u8fdb\u5165\u5168\u5c40\u7ba1\u7406\u540e\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5e73\u53f0\u8bbe\u7f6e -> \u5b89\u5168\u7b56\u7565 \uff0c\u5373\u53ef\u8bbe\u7f6e\u5bc6\u7801\u7b56\u7565\u548c\u8bbf\u95ee\u63a7\u5236\u7b56\u7565\u3002

                                                  "},{"location":"admin/ghippo/report-billing/index.html","title":"\u8fd0\u8425\u7ba1\u7406","text":"

                                                  \u8fd0\u8425\u7ba1\u7406\u901a\u8fc7\u53ef\u89c6\u5316\u7684\u65b9\u5f0f\uff0c\u4e3a\u60a8\u5c55\u793a\u5e73\u53f0\u4e0a\u7edf\u8ba1\u65f6\u95f4\u8303\u56f4\u5185\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u547d\u540d\u7a7a\u95f4\u3001\u5bb9\u5668\u7ec4\u3001\u5de5\u4f5c\u7a7a\u95f4\u7b49\u7ef4\u5ea6\u7684 CPU/\u5185\u5b58/\u5b58\u50a8/GPU \u7684\u4f7f\u7528\u603b\u91cf\u548c\u4f7f\u7528\u7387\u7b49\u4fe1\u606f\u3002 \u4ee5\u53ca\u901a\u8fc7\u4f7f\u7528\u91cf\u3001\u4f7f\u7528\u65f6\u95f4\u53ca\u5355\u4ef7\u7b49\u4fe1\u606f\uff0c\u81ea\u52a8\u8ba1\u7b97\u51fa\u7684\u5e73\u53f0\u6d88\u8d39\u4fe1\u606f\u3002\u8be5\u6a21\u5757\u9ed8\u8ba4\u5f00\u542f\u6240\u6709\u62a5\u8868\u7edf\u8ba1\uff0c\u540c\u65f6\u4e5f\u652f\u6301\u5e73\u53f0\u7ba1\u7406\u5458\u5bf9\u5355\u4e2a\u62a5\u8868\u8fdb\u884c\u624b\u52a8\u5f00\u542f\u6216\u5173\u95ed\uff0c \u5f00\u542f/\u5173\u95ed\u540e\u5c06\u5728\u6700\u957f 20 \u5206\u949f\u5185\uff0c\u5e73\u53f0\u5f00\u59cb/\u505c\u6b62\u91c7\u96c6\u62a5\u8868\u6570\u636e\uff0c\u5f80\u671f\u5df2\u91c7\u96c6\u5230\u7684\u6570\u636e\u8fd8\u5c06\u6b63\u5e38\u5c55\u793a\u3002 \u8fd0\u8425\u7ba1\u7406\u6570\u636e\u6700\u591a\u53ef\u5728\u5e73\u53f0\u4e0a\u4fdd\u7559 365 \u5929\uff0c\u8d85\u8fc7\u4fdd\u7559\u65f6\u95f4\u7684\u7edf\u8ba1\u6570\u636e\u5c06\u88ab\u81ea\u52a8\u5220\u9664\u3002\u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7 CSV \u6216 Excel \u65b9\u5f0f\u4e0b\u8f7d\u62a5\u8868\u540e\u8fdb\u884c\u8fdb\u4e00\u6b65\u7684\u7edf\u8ba1\u548c\u5206\u6790\u3002

                                                  \u62a5\u8868\u7ba1\u7406\u901a\u8fc7 CPU \u5229\u7528\u7387\u3001\u5185\u5b58\u5229\u7528\u7387\u3001\u5b58\u50a8\u5229\u7528\u7387\u3001GPU \u7b97\u529b\u5229\u7528\u7387\u3001GPU \u663e\u5b58\u5229\u7528\u7387 5 \u4e2a\u7ef4\u5ea6\uff0c\u5bf9\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5bb9\u5668\u7ec4\u3001\u5de5\u4f5c\u7a7a\u95f4\u3001\u547d\u540d\u7a7a\u95f4 5 \u79cd\u8d44\u6e90\u8fdb\u884c\u6570\u636e\u7edf\u8ba1\u3002\u540c\u65f6\u8054\u52a8\u5ba1\u8ba1\u548c\u544a\u8b66\u6a21\u5757\uff0c\u652f\u6301\u5bf9\u5ba1\u8ba1\u6570\u636e\u548c\u544a\u8b66\u6570\u636e\u8fdb\u884c\u7edf\u8ba1\u7ba1\u7406\u3002\u5171\u8ba1\u652f\u6301 7 \u79cd\u7c7b\u578b\u62a5\u8868\u3002

                                                  \u8ba1\u91cf\u8ba1\u8d39\u9488\u5bf9\u5e73\u53f0\u4e0a\u7684\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5bb9\u5668\u7ec4\u3001\u547d\u540d\u7a7a\u95f4\u548c\u5de5\u4f5c\u7a7a\u95f4 5 \u79cd\u8d44\u6e90\u8fdb\u884c\u8ba1\u8d39\u7edf\u8ba1\u3002 \u6839\u636e\u4e0d\u540c\u8d44\u6e90\u4e2d CPU\u3001\u5185\u5b58\u3001\u5b58\u50a8\u548c GPU \u7684\u4f7f\u7528\u91cf\uff0c\u4ee5\u53ca\u7528\u6237\u624b\u52a8\u914d\u7f6e\u7684\u4ef7\u683c\u548c\u8d27\u5e01\u5355\u4f4d\u81ea\u52a8\u8ba1\u7b97\u51fa\u6bcf\u79cd\u8d44\u6e90\u5728\u7edf\u8ba1\u65f6\u95f4\u7684\u6d88\u8d39\u60c5\u51b5\uff0c \u6839\u636e\u6240\u9009\u65f6\u95f4\u8de8\u5ea6\u4e0d\u540c\uff0c\u53ef\u5feb\u901f\u8ba1\u7b97\u51fa\u8be5\u8de8\u5ea6\u5185\u7684\u5b9e\u9645\u6d88\u8d39\u60c5\u51b5\uff0c\u5982\u6708\u5ea6\u3001\u5b63\u5ea6\u3001\u5e74\u5ea6\u7b49\u3002

                                                  "},{"location":"admin/ghippo/report-billing/billing.html","title":"\u8ba1\u91cf\u8ba1\u8d39","text":"

                                                  \u8ba1\u91cf\u8ba1\u8d39\u5728\u62a5\u8868\u7684\u57fa\u7840\u4e0a\uff0c\u5bf9\u8d44\u6e90\u7684\u4f7f\u7528\u6570\u636e\u505a\u4e86\u8fdb\u4e00\u6b65\u7684\u8ba1\u8d39\u5904\u7406\u3002\u652f\u6301\u7528\u6237\u624b\u52a8\u8bbe\u7f6e CPU\u3001\u5185\u5b58\u3001\u5b58\u50a8\u3001GPU \u7684\u5355\u4ef7\u4ee5\u53ca\u8d27\u5e01\u5355\u4f4d\u7b49\uff0c\u8bbe\u7f6e\u540e\u7cfb\u7edf\u5c06\u81ea\u52a8\u7edf\u8ba1\u51fa\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5bb9\u5668\u7ec4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u5de5\u4f5c\u7a7a\u95f4\u5728\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u82b1\u8d39\u60c5\u51b5\uff0c\u65f6\u95f4\u6bb5\u7528\u6237\u53ef\u81ea\u7531\u8c03\u6574\uff0c\u53ef\u6309\u7167\u5468\u3001\u6708\u3001\u5b63\u5ea6\u3001\u5e74\u7b5b\u9009\u8c03\u6574\u540e\u5bfc\u51fa Excel \u6216 Csv \u683c\u5f0f\u7684\u8ba1\u8d39\u62a5\u8868\u3002

                                                  "},{"location":"admin/ghippo/report-billing/billing.html#_2","title":"\u8ba1\u8d39\u89c4\u5219\u53ca\u751f\u6548\u65f6\u95f4","text":"
                                                  • \u8ba1\u8d39\u89c4\u5219\uff1a\u9ed8\u8ba4\u6309\u7167\u8bf7\u6c42\u503c\u548c\u4f7f\u7528\u91cf\u7684\u6700\u5927\u503c\u8ba1\u8d39\u3002
                                                  • \u751f\u6548\u65f6\u95f4\uff1a\u6b21\u65e5\u751f\u6548\uff0c\u4ee5\u6b21\u65e5\u51cc\u6668\u65f6\u83b7\u53d6\u7684\u5355\u4ef7\u548c\u6570\u91cf\u8ba1\u7b97\u5f53\u5929\u4ea7\u751f\u7684\u8d39\u7528\u3002
                                                  "},{"location":"admin/ghippo/report-billing/billing.html#_3","title":"\u529f\u80fd\u7279\u6027","text":"
                                                  • \u652f\u6301\u81ea\u5b9a\u4e49\u8bbe\u7f6e CPU \u3001\u5185\u5b58\u3001\u5b58\u50a8\u4ee5\u53ca GPU \u7684\u8ba1\u8d39\u5355\u4f4d\uff0c\u4ee5\u53ca\u8d27\u5e01\u5355\u4f4d\u3002
                                                  • \u652f\u6301\u67e5\u8be2\u81ea\u5b9a\u4e49\u65f6\u95f4\u8303\u56f4\u7684\u7edf\u8ba1\u6570\u636e\uff0c\u6839\u636e\u6240\u9009\u65f6\u95f4\u6bb5\u81ea\u52a8\u8ba1\u7b97\u51fa\u8be5\u65f6\u95f4\u6bb5\u5185\u7684\u8ba1\u8d39\u60c5\u51b5\u3002
                                                  • \u652f\u6301\u4ee5 CSV \u548c Excel \u4e24\u79cd\u683c\u5f0f\u5bfc\u51fa\u8ba1\u8d39\u62a5\u8868\u3002
                                                  • \u652f\u6301\u5f00\u542f/\u5173\u95ed\u5355\u4e2a\u8ba1\u8d39\u62a5\u8868\uff0c\u5f00\u542f/\u5173\u95ed\u540e\uff0c\u5e73\u53f0\u5c06\u5728 20 \u5206\u949f\u5185\u5f00\u59cb/\u505c\u6b62\u91c7\u96c6\u6570\u636e\uff0c\u5f80\u671f\u5df2\u7ecf\u91c7\u96c6\u5230\u7684\u6570\u636e\u8fd8\u5c06\u6b63\u5e38\u663e\u793a\u3002
                                                  • \u652f\u6301\u5bf9 CPU\u3001\u5185\u5b58\u603b\u91cf\u3001\u5b58\u50a8\u3001GPU\u3001\u603b\u8ba1\u7b49\u8ba1\u8d39\u6570\u636e\u7684\u9009\u62e9\u6027\u5c55\u793a\u3002
                                                  "},{"location":"admin/ghippo/report-billing/billing.html#_4","title":"\u62a5\u8868\u7ef4\u5ea6","text":"

                                                  \u76ee\u524d\u652f\u6301\u4ee5\u4e0b\u51e0\u79cd\u62a5\u8868\uff1a

                                                  • \u96c6\u7fa4\u8ba1\u8d39\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u5168\u90e8\u96c6\u7fa4\u7684 CPU\u3001\u5185\u5b58\u603b\u91cf\u3001\u5b58\u50a8\u3001GPU\u3001\u603b\u8ba1\u7b49\u8ba1\u8d39\u60c5\u51b5\uff0c\u4ee5\u53ca\u8be5\u6bb5\u65f6\u95f4\u5185\u8be5\u96c6\u7fa4\u4e0b\u7684\u8282\u70b9\u6570\u91cf\uff0c\u53ef\u901a\u8fc7\u70b9\u51fb\u8282\u70b9\u6570\u91cf\u5feb\u6377\u8fdb\u5165\u8282\u70b9\u8ba1\u8d39\u62a5\u8868\uff0c\u5e76\u67e5\u770b\u8be5\u6bb5\u65f6\u95f4\u5185\u8be5\u96c6\u7fa4\u4e0b\u7684\u8282\u70b9\u8ba1\u8d39\u60c5\u51b5\u3002
                                                  • \u8282\u70b9\u8ba1\u8d39\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u5168\u90e8\u8282\u70b9\u7684 CPU\u3001\u5185\u5b58\u603b\u91cf\u3001\u5b58\u50a8\u3001GPU\u3001\u603b\u8ba1\u7b49\u8ba1\u8d39\u60c5\u51b5\uff0c\u4ee5\u53ca\u8282\u70b9\u7684 IP\u3001\u7c7b\u578b\u548c\u6240\u5c5e\u96c6\u7fa4\u3002
                                                  • \u5bb9\u5668\u7ec4\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u5168\u90e8\u5bb9\u5668\u7ec4\u7684 CPU\u3001\u5185\u5b58\u603b\u91cf\u3001\u5b58\u50a8\u3001GPU\u3001\u603b\u8ba1\u7b49\u8ba1\u8d39\u60c5\u51b5\uff0c\u4ee5\u53ca\u5bb9\u5668\u7ec4\u7684\u6240\u5c5e\u547d\u540d\u7a7a\u95f4\u3001\u6240\u5c5e\u96c6\u7fa4\u548c\u6240\u5c5e\u5de5\u4f5c\u7a7a\u95f4\u3002
                                                  • \u5de5\u4f5c\u7a7a\u95f4\u8ba1\u8d39\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u5168\u90e8\u5de5\u4f5c\u7a7a\u95f4\u7684 CPU\u3001\u5185\u5b58\u603b\u91cf\u3001\u5b58\u50a8\u3001GPU\u3001\u603b\u8ba1\u7b49\u8ba1\u8d39\u60c5\u51b5\uff0c\u4ee5\u53ca\u547d\u540d\u7a7a\u95f4\u6570\u91cf\u548c\u5bb9\u5668\u7ec4\u6570\u91cf\uff0c\u53ef\u901a\u8fc7\u70b9\u51fb\u547d\u540d\u7a7a\u95f4\u6570\u91cf\u5feb\u6377\u8fdb\u5165\u547d\u540d\u7a7a\u95f4\u8ba1\u8d39\u62a5\u8868\uff0c\u5e76\u67e5\u770b\u8be5\u6bb5\u65f6\u95f4\u5185\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u547d\u540d\u7a7a\u95f4\u7684\u8ba1\u8d39\u60c5\u51b5\uff1b\u540c\u6837\u7684\u65b9\u5f0f\u53ef\u67e5\u770b\u8be5\u6bb5\u65f6\u95f4\u5185\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u7684\u5bb9\u5668\u7ec4\u7684\u8ba1\u8d39\u60c5\u51b5\u3002
                                                  • \u547d\u540d\u7a7a\u95f4\u8ba1\u8d39\u62a5\u8868\uff1a\u67d0\u6bb5\u65f6\u95f4\u5185\u5168\u90e8\u547d\u540d\u7a7a\u95f4\u7684 CPU\u3001\u5185\u5b58\u603b\u91cf\u3001\u5b58\u50a8\u3001GPU\u3001\u603b\u8ba1\u7b49\u8ba1\u8d39\u60c5\u51b5\uff0c\u4ee5\u53ca\u5bb9\u5668\u7ec4\u6570\u91cf\u3001\u6240\u5c5e\u96c6\u7fa4\u3001\u6240\u5c5e\u5de5\u4f5c\u7a7a\u95f4\uff0c\u53ef\u901a\u8fc7\u70b9\u51fb\u5bb9\u5668\u7ec4\u6570\u91cf\u5feb\u6377\u8fdb\u5165\u5bb9\u5668\u7ec4\u8ba1\u8d39\u62a5\u8868\uff0c\u5e76\u67e5\u770b\u8be5\u6bb5\u65f6\u95f4\u5185\u8be5\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u5bb9\u5668\u7ec4\u7684\u8ba1\u8d39\u60c5\u51b5\u3002
                                                  "},{"location":"admin/ghippo/report-billing/billing.html#_5","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u4f7f\u7528\u5177\u6709 admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u8fd0\u8425\u7ba1\u7406 \u3002

                                                  2. \u8fdb\u5165 \u8fd0\u8425\u7ba1\u7406 \u540e\u5207\u6362\u4e0d\u540c\u83dc\u5355\u53ef\u67e5\u770b\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5bb9\u5668\u7ec4\u7b49\u8ba1\u8d39\u62a5\u8868\u3002

                                                  "},{"location":"admin/ghippo/report-billing/report.html","title":"\u62a5\u8868\u7ba1\u7406","text":"

                                                  \u62a5\u8868\u7ba1\u7406\u4ee5\u53ef\u89c6\u5316\u7684\u65b9\u5f0f\uff0c\u5c55\u793a\u4e86\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5bb9\u5668\u7ec4\uff08Pod\uff09\u3001\u5de5\u4f5c\u7a7a\u95f4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u5ba1\u8ba1\u53ca\u544a\u8b66\u7ef4\u5ea6\u7684\u7edf\u8ba1\u6570\u636e\uff0c\u4e3a\u5e73\u53f0\u7684\u8ba1\u8d39\u53ca\u4f7f\u7528\u60c5\u51b5\u7684\u8c03\u4f18\u63d0\u4f9b\u4e86\u53ef\u9760\u7684\u57fa\u7840\u6570\u636e\u3002

                                                  "},{"location":"admin/ghippo/report-billing/report.html#_2","title":"\u529f\u80fd\u7279\u6027","text":"
                                                  • \u652f\u6301\u67e5\u8be2\u81ea\u5b9a\u4e49\u65f6\u95f4\u8303\u56f4\u7684\u7edf\u8ba1\u6570\u636e
                                                  • \u652f\u6301\u4ee5 CSV \u548c Excel \u4e24\u79cd\u683c\u5f0f\u5bfc\u51fa\u62a5\u8868
                                                  • \u652f\u6301\u5f00\u542f/\u5173\u95ed\u5355\u4e2a\u62a5\u8868\uff0c\u5f00\u542f/\u5173\u95ed\u540e\uff0c\u5e73\u53f0\u5c06\u5728 20 \u5206\u949f\u5185\u5f00\u59cb/\u505c\u6b62\u91c7\u96c6\u6570\u636e\uff0c\u5f80\u671f\u5df2\u7ecf\u91c7\u96c6\u5230\u7684\u6570\u636e\u8fd8\u5c06\u6b63\u5e38\u663e\u793a
                                                  • \u652f\u6301\u5c55\u793a CPU \u4f7f\u7528\u7387\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u5b58\u50a8\u4f7f\u7528\u7387\u548c GPU \u663e\u5b58\u4f7f\u7528\u7387\u7684\u6700\u5927\u3001\u6700\u5c0f\u548c\u5e73\u5747\u503c
                                                  "},{"location":"admin/ghippo/report-billing/report.html#_3","title":"\u62a5\u8868\u7ef4\u5ea6","text":"

                                                  \u76ee\u524d\u652f\u6301\u4ee5\u4e0b\u51e0\u79cd\u62a5\u8868\uff1a

                                                  • \u96c6\u7fa4\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u6240\u6709\u96c6\u7fa4\u7684 CPU \u4f7f\u7528\u7387\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u5b58\u50a8\u4f7f\u7528\u7387\u548c GPU \u663e\u5b58\u4f7f\u7528\u7387\u7684\u6700\u5927\u3001\u6700\u5c0f\u548c\u5e73\u5747\u503c\uff0c\u4ee5\u53ca\u8be5\u6bb5\u65f6\u95f4\u5185\u96c6\u7fa4\u4e0b\u7684\u8282\u70b9\u6570\u91cf\uff0c \u53ef\u901a\u8fc7\u70b9\u51fb\u8282\u70b9\u6570\u91cf\u5feb\u6377\u8fdb\u5165\u8282\u70b9\u62a5\u8868\uff0c\u5e76\u67e5\u770b\u8be5\u6bb5\u65f6\u95f4\u5185\u8be5\u96c6\u7fa4\u4e0b\u7684\u8282\u70b9\u4f7f\u7528\u60c5\u51b5\u3002
                                                  • \u8282\u70b9\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u6240\u6709\u8282\u70b9\u7684 CPU \u4f7f\u7528\u7387\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u5b58\u50a8\u4f7f\u7528\u7387\u548c GPU \u663e\u5b58\u4f7f\u7528\u7387\u7684\u6700\u5927\u3001\u6700\u5c0f\u548c\u5e73\u5747\u503c\uff0c\u4ee5\u53ca\u8282\u70b9\u7684 IP\u3001\u7c7b\u578b\u548c\u6240\u5c5e\u96c6\u7fa4\u3002
                                                  • \u5bb9\u5668\u7ec4\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u6240\u6709\u5bb9\u5668\u7ec4\u7684 CPU \u4f7f\u7528\u7387\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u5b58\u50a8\u4f7f\u7528\u7387\u548c GPU \u663e\u5b58\u4f7f\u7528\u7387\u7684\u6700\u5927\u3001\u6700\u5c0f\u548c\u5e73\u5747\u503c\uff0c\u4ee5\u53ca\u5bb9\u5668\u7ec4\u7684\u6240\u5c5e\u547d\u540d\u7a7a\u95f4\u3001\u6240\u5c5e\u96c6\u7fa4\u548c\u6240\u5c5e\u5de5\u4f5c\u7a7a\u95f4\u3002
                                                  • \u5de5\u4f5c\u7a7a\u95f4\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u6240\u6709\u5de5\u4f5c\u7a7a\u95f4\u7684 CPU \u4f7f\u7528\u7387\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u5b58\u50a8\u4f7f\u7528\u7387\u548c GPU \u663e\u5b58\u4f7f\u7528\u7387\u7684\u6700\u5927\u3001\u6700\u5c0f\u548c\u5e73\u5747\u503c\uff0c\u4ee5\u53ca\u547d\u540d\u7a7a\u95f4\u6570\u91cf\u548c\u5bb9\u5668\u7ec4\u6570\u91cf\uff0c \u53ef\u901a\u8fc7\u70b9\u51fb\u547d\u540d\u7a7a\u95f4\u6570\u91cf\u5feb\u6377\u8fdb\u5165\u547d\u540d\u7a7a\u95f4\u62a5\u8868\uff0c\u5e76\u67e5\u770b\u8be5\u6bb5\u65f6\u95f4\u5185\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u547d\u540d\u7a7a\u95f4\u7684\u4f7f\u7528\u60c5\u51b5\uff1b\u540c\u6837\u7684\u65b9\u5f0f\u53ef\u67e5\u770b\u8be5\u6bb5\u65f6\u95f4\u4e0b\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u7684\u5bb9\u5668\u7ec4\u7684\u4f7f\u7528\u60c5\u51b5\u3002
                                                  • \u547d\u540d\u7a7a\u95f4\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u6240\u6709\u547d\u540d\u7a7a\u95f4\u7684 CPU \u4f7f\u7528\u7387\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u5b58\u50a8\u4f7f\u7528\u7387\u548c GPU \u663e\u5b58\u4f7f\u7528\u7387\u7684\u6700\u5927\u3001\u6700\u5c0f\u548c\u5e73\u5747\u503c\uff0c\u4ee5\u53ca\u5bb9\u5668\u7ec4\u6570\u91cf\u3001\u6240\u5c5e\u96c6\u7fa4\u3001\u6240\u5c5e\u5de5\u4f5c\u7a7a\u95f4\uff0c \u53ef\u901a\u8fc7\u70b9\u51fb\u5bb9\u5668\u7ec4\u6570\u91cf\u5feb\u6377\u8fdb\u5165\u5bb9\u5668\u7ec4\u62a5\u8868\uff0c\u5e76\u67e5\u770b\u8be5\u6bb5\u65f6\u95f4\u5185\u8be5\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u5bb9\u5668\u7ec4\u7684\u4f7f\u7528\u60c5\u51b5\u3002
                                                  • \u5ba1\u8ba1\u62a5\u8868\uff1a\u5206\u4e3a\u7528\u6237\u64cd\u4f5c\u548c\u8d44\u6e90\u64cd\u4f5c\u4e24\u4e2a\u62a5\u8868\u3002\u7528\u6237\u64cd\u4f5c\u62a5\u8868\u4e3b\u8981\u7edf\u8ba1\u5355\u4e2a\u7528\u6237\u5728\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u64cd\u4f5c\u6b21\u6570\uff0c\u4ee5\u53ca\u6210\u529f\u548c\u5931\u8d25\u7684\u6b21\u6570\uff1b \u8d44\u6e90\u64cd\u4f5c\u62a5\u8868\u4e3b\u8981\u7edf\u8ba1\u6240\u6709\u7528\u6237\u5bf9\u67d0\u79cd\u7c7b\u578b\u8d44\u6e90\u7684\u64cd\u4f5c\u6b21\u6570\u3002
                                                  • \u544a\u8b66\u62a5\u8868\uff1a\u5c55\u793a\u67d0\u6bb5\u65f6\u95f4\u5185\u6240\u6709\u8282\u70b9\u7684\u544a\u8b66\u6570\u91cf\uff0c\u4ee5\u53ca\u81f4\u547d\u3001\u4e25\u91cd\u3001\u544a\u8b66\u5206\u522b\u4ea7\u751f\u7684\u6b21\u6570\u3002
                                                  "},{"location":"admin/ghippo/report-billing/report.html#_4","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u4f7f\u7528\u5177\u6709 Admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u8fd0\u8425\u7ba1\u7406 \u3002

                                                  2. \u8fdb\u5165\u8fd0\u8425\u7ba1\u7406\u540e\u5207\u6362\u4e0d\u540c\u83dc\u5355\u53ef\u67e5\u770b\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5bb9\u5668\u7ec4\u7b49\u62a5\u8868\u3002

                                                  "},{"location":"admin/ghippo/troubleshooting/ghippo01.html","title":"\u91cd\u542f\u96c6\u7fa4\uff08\u4e91\u4e3b\u673a\uff09istio-ingressgateway \u65e0\u6cd5\u542f\u52a8\uff1f","text":"

                                                  \u62a5\u9519\u63d0\u793a\u5982\u4e0b\u56fe\uff1a

                                                  \u53ef\u80fd\u539f\u56e0\uff1aRequestAuthentication CR \u7684 jwtsUri \u5730\u5740\u65e0\u6cd5\u8bbf\u95ee\uff0c \u5bfc\u81f4 istiod \u65e0\u6cd5\u4e0b\u53d1\u914d\u7f6e\u7ed9 istio-ingressgateway\uff08Istio 1.15 \u53ef\u4ee5\u89c4\u907f\u8fd9\u4e2a bug\uff1a https://github.com/istio/istio/pull/39341/\uff09

                                                  \u89e3\u51b3\u65b9\u6cd5\uff1a

                                                  1. \u5907\u4efd RequestAuthentication ghippo CR\u3002

                                                    kubectl get RequestAuthentication ghippo -n istio-system -o yaml > ghippo-ra.yaml \n
                                                  2. \u5220\u9664 RequestAuthentication ghippo CR\u3002

                                                    kubectl delete RequestAuthentication ghippo -n istio-system \n
                                                  3. \u91cd\u542f Istio\u3002

                                                    kubectl rollout restart deploy/istiod -n istio-system\nkubectl rollout restart deploy/istio-ingressgateway -n istio-system \n
                                                  4. \u91cd\u65b0 apply RequestAuthentication ghippo CR\u3002

                                                    kubectl apply -f ghippo-ra.yaml \n

                                                    Note

                                                    apply RequestAuthentication ghippo CR \u4e4b\u524d\uff0c\u8bf7\u786e\u4fdd ghippo-apiserver \u548c ghippo-keycloak \u5df2\u7ecf\u6b63\u5e38\u542f\u52a8\u3002

                                                  "},{"location":"admin/ghippo/troubleshooting/ghippo02.html","title":"\u767b\u5f55\u65e0\u9650\u5faa\u73af\uff0c\u62a5\u9519 401 \u6216 403","text":"

                                                  \u51fa\u73b0\u8fd9\u4e2a\u95ee\u9898\u539f\u56e0\u4e3a\uff1aghippo-keycloak \u8fde\u63a5\u7684 Mysql \u6570\u636e\u5e93\u51fa\u73b0\u6545\u969c, \u5bfc\u81f4 OIDC Public keys \u88ab\u91cd\u7f6e

                                                  \u5728\u5168\u5c40\u7ba1\u7406 0.11.1 \u53ca\u4ee5\u4e0a\u7248\u672c\uff0c\u60a8\u53ef\u4ee5\u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528 helm \u66f4\u65b0\u5168\u5c40\u7ba1\u7406\u914d\u7f6e\u6587\u4ef6\u5373\u53ef\u6062\u590d\u6b63\u5e38\u3002

                                                  # \u66f4\u65b0 helm \u4ed3\u5e93\nhelm repo update ghippo\n\n# \u5907\u4efd ghippo \u53c2\u6570\nhelm get values ghippo -n ghippo-system -o yaml > ghippo-values-bak.yaml\n\n# \u83b7\u53d6\u5f53\u524d\u90e8\u7f72\u7684 ghippo \u7248\u672c\u53f7\nversion=$(helm get notes ghippo -n ghippo-system | grep \"Chart Version\" | awk -F ': ' '{ print $2 }')\n\n# \u6267\u884c\u66f4\u65b0\u64cd\u4f5c, \u4f7f\u914d\u7f6e\u6587\u4ef6\u751f\u6548\nhelm upgrade ghippo ghippo/ghippo \\\n-n ghippo-system \\\n-f ./ghippo-values-bak.yaml \\\n--version ${version}\n
                                                  "},{"location":"admin/ghippo/troubleshooting/ghippo03.html","title":"Keycloak \u65e0\u6cd5\u542f\u52a8","text":""},{"location":"admin/ghippo/troubleshooting/ghippo03.html#_1","title":"\u5e38\u89c1\u6545\u969c","text":""},{"location":"admin/ghippo/troubleshooting/ghippo03.html#_2","title":"\u6545\u969c\u8868\u73b0","text":"

                                                  MySQL \u5df2\u5c31\u7eea\uff0c\u65e0\u62a5\u9519\u3002\u5728\u5b89\u88c5\u5168\u5c40\u7ba1\u7406\u540e keycloak \u65e0\u6cd5\u542f\u52a8\uff08> 10 \u6b21\uff09\u3002

                                                  "},{"location":"admin/ghippo/troubleshooting/ghippo03.html#_3","title":"\u68c0\u67e5\u9879","text":"
                                                  • \u5982\u679c\u6570\u636e\u5e93\u662f MySQL\uff0c\u68c0\u67e5 keycloak database \u7f16\u7801\u662f\u5426\u662f UTF8\u3002
                                                  • \u68c0\u67e5\u4ece keycloak \u5230\u6570\u636e\u5e93\u7684\u7f51\u7edc\uff0c\u68c0\u67e5\u6570\u636e\u5e93\u8d44\u6e90\u662f\u5426\u5145\u8db3\uff0c\u5305\u62ec\u4f46\u4e0d\u9650\u4e8e\u8d44\u6e90\u9650\u5236\u3001\u5b58\u50a8\u7a7a\u95f4\u3001\u7269\u7406\u673a\u8d44\u6e90\u3002
                                                  "},{"location":"admin/ghippo/troubleshooting/ghippo03.html#_4","title":"\u89e3\u51b3\u6b65\u9aa4","text":"
                                                  1. \u68c0\u67e5 MySQL \u8d44\u6e90\u5360\u7528\u662f\u5426\u5230\u8fbe limit \u9650\u5236
                                                  2. \u68c0\u67e5 MySQL \u4e2d database keycloak table \u7684\u6570\u91cf\u662f\u4e0d\u662f 95 \uff08Keycloak \u4e0d\u540c\u7248\u672c\u6570\u636e\u5e93\u6570\u91cf\u53ef\u80fd\u4f1a\u4e0d\u4e00\u6837\uff0c\u53ef\u4ee5\u4e0e\u540c\u7248\u672c\u7684\u5f00\u53d1\u6216\u6d4b\u8bd5\u73af\u5883\u7684 Keycloak \u6570\u636e\u5e93\u6570\u91cf\u8fdb\u884c\u6bd4\u8f83\uff09\uff0c \u5982\u6570\u91cf\u5c11\u4e86\uff0c\u5219\u8bf4\u660e\u6570\u636e\u5e93\u8868\u521d\u59cb\u5316\u6709\u95ee\u9898\uff08\u67e5\u8be2\u8868\u6570\u91cf\u547d\u4ee4\u63d0\u793a\u4e3a\uff1ashow tables;\uff09
                                                  3. \u5220\u9664 keycloak database \u5e76\u521b\u5efa\uff0c\u63d0\u793a CREATE DATABASE IF NOT EXISTS keycloak CHARACTER SET utf8
                                                  4. \u91cd\u542f Keycloak Pod \u89e3\u51b3\u95ee\u9898
                                                  "},{"location":"admin/ghippo/troubleshooting/ghippo03.html#cpu-does-not-support-86-64-v2","title":"CPU does not support \u00d786-64-v2","text":""},{"location":"admin/ghippo/troubleshooting/ghippo03.html#_5","title":"\u6545\u969c\u8868\u73b0","text":"

                                                  keycloak \u65e0\u6cd5\u6b63\u5e38\u542f\u52a8\uff0ckeycloak pod \u8fd0\u884c\u72b6\u6001\u4e3a CrashLoopBackOff \u5e76\u4e14 keycloak \u7684 log \u51fa\u73b0\u5982\u4e0b\u56fe\u6240\u793a\u7684\u4fe1\u606f

                                                  "},{"location":"admin/ghippo/troubleshooting/ghippo03.html#_6","title":"\u68c0\u67e5\u9879","text":"

                                                  \u8fd0\u884c\u4e0b\u9762\u7684\u68c0\u67e5\u811a\u672c\uff0c\u67e5\u8be2\u5f53\u524d\u8282\u70b9 cpu \u7684 x86-64\u67b6\u6784\u7684\u7279\u5f81\u7ea7\u522b

                                                  cat <<\"EOF\" > detect-cpu.sh\n#!/bin/sh -eu\n\nflags=$(cat /proc/cpuinfo | grep flags | head -n 1 | cut -d: -f2)\n\nsupports_v2='awk \"/cx16/&&/lahf/&&/popcnt/&&/sse4_1/&&/sse4_2/&&/ssse3/ {found=1} END {exit !found}\"'\nsupports_v3='awk \"/avx/&&/avx2/&&/bmi1/&&/bmi2/&&/f16c/&&/fma/&&/abm/&&/movbe/&&/xsave/ {found=1} END {exit !found}\"'\nsupports_v4='awk \"/avx512f/&&/avx512bw/&&/avx512cd/&&/avx512dq/&&/avx512vl/ {found=1} END {exit !found}\"'\n\necho \"$flags\" | eval $supports_v2 || exit 2 && echo \"CPU supports x86-64-v2\"\necho \"$flags\" | eval $supports_v3 || exit 3 && echo \"CPU supports x86-64-v3\"\necho \"$flags\" | eval $supports_v4 || exit 4 && echo \"CPU supports x86-64-v4\"\nEOF\n\nchmod +x detect-cpu.sh\nsh detect-cpu.sh\n

                                                  \u6267\u884c\u4e0b\u9762\u547d\u4ee4\u67e5\u770b\u5f53\u524d cpu \u7684\u7279\u6027\uff0c\u5982\u679c\u8f93\u51fa\u4e2d\u5305\u542b sse4_2\uff0c\u5219\u8868\u793a\u4f60\u7684\u5904\u7406\u5668\u652f\u6301SSE 4.2\u3002

                                                  lscpu | grep sse4_2\n

                                                  "},{"location":"admin/ghippo/troubleshooting/ghippo03.html#_7","title":"\u89e3\u51b3\u65b9\u6cd5","text":"

                                                  \u9700\u8981\u5347\u7ea7\u4f60\u7684\u4e91\u4e3b\u673a\u6216\u7269\u7406\u673a CPU \u4ee5\u652f\u6301 x86-64-v2 \u53ca\u4ee5\u4e0a\uff0c\u786e\u4fddx86 CPU \u6307\u4ee4\u96c6\u652f\u6301 sse4.2\uff0c\u5982\u4f55\u5347\u7ea7\u9700\u8981\u4f60\u54a8\u8be2\u4e91\u4e3b\u673a\u5e73\u53f0\u63d0\u4f9b\u5546\u6216\u7740\u7269\u7406\u673a\u63d0\u4f9b\u5546\u3002

                                                  \u8be6\u89c1\uff1ahttps://github.com/keycloak/keycloak/issues/17290

                                                  "},{"location":"admin/ghippo/troubleshooting/ghippo04.html","title":"\u5355\u72ec\u5347\u7ea7\u5168\u5c40\u7ba1\u7406\u65f6\u5347\u7ea7\u5931\u8d25","text":"

                                                  \u82e5\u5347\u7ea7\u5931\u8d25\u65f6\u5305\u542b\u5982\u4e0b\u4fe1\u606f\uff0c\u53ef\u4ee5\u53c2\u8003\u79bb\u7ebf\u5347\u7ea7\u4e2d\u7684\u66f4\u65b0 ghippo crd \u6b65\u9aa4\u5b8c\u6210 crd \u5b89\u88c5

                                                  ensure CRDs are installed first\n
                                                  "},{"location":"admin/ghippo/workspace/folder-permission.html","title":"\u6587\u4ef6\u5939\u6743\u9650\u8bf4\u660e","text":"

                                                  \u6587\u4ef6\u5939\u5177\u6709\u6743\u9650\u6620\u5c04\u80fd\u529b\uff0c\u80fd\u591f\u5c06\u7528\u6237/\u7528\u6237\u7ec4\u5728\u672c\u6587\u4ef6\u5939\u7684\u6743\u9650\u6620\u5c04\u5230\u5176\u4e0b\u7684\u5b50\u6587\u4ef6\u5939\u3001\u5de5\u4f5c\u7a7a\u95f4\u4ee5\u53ca\u8d44\u6e90\u4e0a\u3002

                                                  \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u672c\u6587\u4ef6\u5939\u662f Folder Admin \u89d2\u8272\uff0c\u6620\u5c04\u5230\u5b50\u6587\u4ef6\u5939\u4ecd\u4e3a Folder Admin \u89d2\u8272\uff0c\u6620\u5c04\u5230\u5176\u4e0b\u7684\u5de5\u4f5c\u7a7a\u95f4\u5219\u4e3a Workspace Admin\uff1b \u82e5\u5728 \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 -> \u8d44\u6e90\u7ec4 \u4e2d\u7ed1\u5b9a\u4e86 Namespace\uff0c\u5219\u6620\u5c04\u540e\u8be5\u7528\u6237/\u7528\u6237\u7ec4\u540c\u65f6\u8fd8\u662f Namespace Admin\u3002

                                                  Note

                                                  \u6587\u4ef6\u5939\u7684\u6743\u9650\u6620\u5c04\u80fd\u529b\u4e0d\u4f1a\u4f5c\u7528\u5230\u5171\u4eab\u8d44\u6e90\u4e0a\uff0c\u56e0\u4e3a\u5171\u4eab\u662f\u5c06\u96c6\u7fa4\u7684\u4f7f\u7528\u6743\u9650\u5171\u4eab\u7ed9\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff0c\u800c\u4e0d\u662f\u5c06\u7ba1\u7406\u6743\u9650\u53d7\u8ba9\u7ed9\u5de5\u4f5c\u7a7a\u95f4\uff0c\u56e0\u6b64\u4e0d\u4f1a\u5b9e\u73b0\u6743\u9650\u7ee7\u627f\u548c\u89d2\u8272\u6620\u5c04\u3002

                                                  "},{"location":"admin/ghippo/workspace/folder-permission.html#_2","title":"\u5e94\u7528\u573a\u666f","text":"

                                                  \u6587\u4ef6\u5939\u5177\u6709\u5c42\u7ea7\u80fd\u529b\uff0c\u56e0\u6b64\u5c06\u6587\u4ef6\u5939\u5bf9\u5e94\u4e8e\u4f01\u4e1a\u4e2d\u7684\u90e8\u95e8/\u4f9b\u5e94\u5546/\u9879\u76ee\u7b49\u5c42\u7ea7\u65f6\uff0c

                                                  • \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u4e00\u7ea7\u90e8\u95e8\u5177\u6709\u7ba1\u7406\u6743\u9650\uff08Admin\uff09\uff0c\u5176\u4e0b\u7684\u4e8c\u7ea7\u3001\u4e09\u7ea7\u3001\u56db\u7ea7\u90e8\u95e8\u6216\u9879\u76ee\u540c\u6837\u5177\u6709\u7ba1\u7406\u6743\u9650\uff1b
                                                  • \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u4e00\u7ea7\u90e8\u95e8\u5177\u6709\u4f7f\u7528\u6743\u9650\uff08Editor\uff09\uff0c\u5176\u4e0b\u7684\u4e8c\u7ea7\u3001\u4e09\u7ea7\u3001\u56db\u7ea7\u90e8\u95e8\u6216\u9879\u76ee\u540c\u6837\u5177\u6709\u4f7f\u7528\u6743\u9650\uff1b
                                                  • \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u4e00\u7ea7\u90e8\u95e8\u5177\u6709\u53ea\u8bfb\u6743\u9650\uff08Viewer\uff09\uff0c\u5176\u4e0b\u7684\u4e8c\u7ea7\u3001\u4e09\u7ea7\u3001\u56db\u7ea7\u90e8\u95e8\u6216\u9879\u76ee\u540c\u6837\u5177\u6709\u53ea\u8bfb\u6743\u9650\u3002
                                                  \u5bf9\u8c61 \u64cd\u4f5c Folder Admin Folder Editor Folder Viewer \u5bf9\u6587\u4ef6\u5939\u672c\u8eab \u67e5\u770b \u2713 \u2713 \u2713 \u6388\u6743 \u2713 \u2717 \u2717 \u4fee\u6539\u522b\u540d \u2713 \u2717 \u2717 \u5bf9\u5b50\u6587\u4ef6\u5939 \u521b\u5efa \u2713 \u2717 \u2717 \u67e5\u770b \u2713 \u2713 \u2713 \u6388\u6743 \u2713 \u2717 \u2717 \u4fee\u6539\u522b\u540d \u2713 \u2717 \u2717 \u5bf9\u5176\u4e0b\u7684\u5de5\u4f5c\u7a7a\u95f4 \u521b\u5efa \u2713 \u2717 \u2717 \u67e5\u770b \u2713 \u2713 \u2713 \u6388\u6743 \u2713 \u2717 \u2717 \u4fee\u6539\u522b\u540d \u2713 \u2717 \u2717 \u5bf9\u5176\u4e0b\u7684\u5de5\u4f5c\u7a7a\u95f4 - \u8d44\u6e90\u7ec4 \u67e5\u770b \u2713 \u2713 \u2713 \u8d44\u6e90\u7ed1\u5b9a \u2713 \u2717 \u2717 \u89e3\u9664\u7ed1\u5b9a \u2713 \u2717 \u2717 \u5bf9\u5176\u4e0b\u7684\u5de5\u4f5c\u7a7a\u95f4 - \u5171\u4eab\u8d44\u6e90 \u67e5\u770b \u2713 \u2713 \u2713 \u65b0\u589e\u5171\u4eab \u2713 \u2717 \u2717 \u89e3\u9664\u5171\u4eab \u2713 \u2717 \u2717 \u8d44\u6e90\u9650\u989d \u2713 \u2717 \u2717"},{"location":"admin/ghippo/workspace/folders.html","title":"\u521b\u5efa/\u5220\u9664\u6587\u4ef6\u5939","text":"

                                                  \u6587\u4ef6\u5939\u5177\u6709\u6743\u9650\u6620\u5c04\u80fd\u529b\uff0c\u80fd\u591f\u5c06\u7528\u6237/\u7528\u6237\u7ec4\u5728\u672c\u6587\u4ef6\u5939\u7684\u6743\u9650\u6620\u5c04\u5230\u5176\u4e0b\u7684\u5b50\u6587\u4ef6\u5939\u3001\u5de5\u4f5c\u7a7a\u95f4\u4ee5\u53ca\u8d44\u6e90\u4e0a\u3002

                                                  \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u521b\u5efa\u4e00\u4e2a\u6587\u4ef6\u5939\u3002

                                                  1. \u4f7f\u7528 admin/folder admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u3002

                                                  2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa\u6587\u4ef6\u5939 \u6309\u94ae\u3002

                                                  3. \u586b\u5199\u6587\u4ef6\u5939\u540d\u79f0\u3001\u4e0a\u4e00\u7ea7\u6587\u4ef6\u5939\u7b49\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u521b\u5efa\u6587\u4ef6\u5939\u3002

                                                  Tip

                                                  \u521b\u5efa\u6210\u529f\u540e\u6587\u4ef6\u5939\u540d\u79f0\u5c06\u663e\u793a\u5728\u5de6\u4fa7\u7684\u6811\u72b6\u7ed3\u6784\u4e2d\uff0c\u4ee5\u4e0d\u540c\u7684\u56fe\u6807\u8868\u793a\u5de5\u4f5c\u7a7a\u95f4\u548c\u6587\u4ef6\u5939\u3002

                                                  Note

                                                  \u9009\u4e2d\u67d0\u4e00\u4e2a\u6587\u4ef6\u5939\u6216\u6587\u4ef6\u5939\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u53ef\u4ee5\u8fdb\u884c\u7f16\u8f91\u6216\u5220\u9664\u3002

                                                  • \u5f53\u8be5\u6587\u4ef6\u5939\u4e0b\u8d44\u6e90\u7ec4\u3001\u5171\u4eab\u8d44\u6e90\u4e2d\u5b58\u5728\u8d44\u6e90\u65f6\uff0c\u8be5\u6587\u4ef6\u5939\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u6240\u6709\u8d44\u6e90\u89e3\u7ed1\u540e\u518d\u5220\u9664\u3002

                                                  • \u5f53\u5fae\u670d\u52a1\u5f15\u64ce\u6a21\u5757\u5728\u8be5\u6587\u4ef6\u5939\u4e0b\u5b58\u5728\u63a5\u5165\u6ce8\u518c\u4e2d\u5fc3\u8d44\u6e90\u65f6\uff0c\u8be5\u6587\u4ef6\u5939\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u6240\u6709\u63a5\u5165\u6ce8\u518c\u4e2d\u5fc3\u79fb\u9664\u540e\u518d\u5220\u9664\u6587\u4ef6\u5939\u3002

                                                  "},{"location":"admin/ghippo/workspace/quota.html","title":"\u8d44\u6e90\u914d\u989d\uff08Quota\uff09","text":"

                                                  \u5171\u4eab\u8d44\u6e90\u5e76\u975e\u610f\u5473\u7740\u88ab\u5171\u4eab\u8005\u53ef\u4ee5\u65e0\u9650\u5236\u5730\u4f7f\u7528\u88ab\u5171\u4eab\u7684\u8d44\u6e90\u3002 Admin\u3001Kpanda Owner \u548c Workspace Admin \u53ef\u4ee5\u901a\u8fc7\u5171\u4eab\u8d44\u6e90\u4e2d\u7684 \u8d44\u6e90\u914d\u989d \u529f\u80fd\u9650\u5236\u67d0\u4e2a\u7528\u6237\u7684\u6700\u5927\u4f7f\u7528\u989d\u5ea6\u3002 \u82e5\u4e0d\u9650\u5236\uff0c\u5219\u8868\u793a\u53ef\u4ee5\u65e0\u9650\u5236\u4f7f\u7528\u3002

                                                  • CPU \u8bf7\u6c42\uff08Core\uff09
                                                  • CPU \u9650\u5236\uff08Core\uff09
                                                  • \u5185\u5b58\u8bf7\u6c42\uff08MB\uff09
                                                  • \u5185\u5b58\u9650\u5236\uff08MB\uff09
                                                  • \u5b58\u50a8\u8bf7\u6c42\u603b\u91cf\uff08GB\uff09
                                                  • \u5b58\u50a8\u5377\u58f0\u660e\uff08\u4e2a\uff09
                                                  • GPU \u7c7b\u578b\u3001\u89c4\u683c\u3001\u6570\u91cf\uff08\u5305\u62ec\u4f46\u4e0d\u9650\u4e8e Nvidia\u3001Ascend\u3001lluvatar\u7b49GPU\u5361\u7c7b\u578b\uff09

                                                  \u4e00\u4e2a\u8d44\u6e90\uff08\u96c6\u7fa4\uff09\u53ef\u4ee5\u88ab\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u5171\u4eab\uff0c\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u4e5f\u53ef\u4ee5\u540c\u65f6\u4f7f\u7528\u591a\u4e2a\u5171\u4eab\u96c6\u7fa4\u4e2d\u7684\u8d44\u6e90\u3002

                                                  "},{"location":"admin/ghippo/workspace/quota.html#_1","title":"\u8d44\u6e90\u7ec4\u548c\u5171\u4eab\u8d44\u6e90","text":"

                                                  \u5171\u4eab\u8d44\u6e90\u548c\u8d44\u6e90\u7ec4\u4e2d\u7684\u96c6\u7fa4\u8d44\u6e90\u5747\u6765\u81ea\u5bb9\u5668\u7ba1\u7406\uff0c\u4f46\u662f\u96c6\u7fa4\u7ed1\u5b9a\u548c\u5171\u4eab\u7ed9\u540c\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u5c06\u4f1a\u4ea7\u751f\u4e24\u79cd\u622a\u7136\u4e0d\u540c\u7684\u6548\u679c\u3002

                                                  1. \u7ed1\u5b9a\u8d44\u6e90

                                                    \u4f7f\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u7528\u6237/\u7528\u6237\u7ec4\u5177\u6709\u8be5\u96c6\u7fa4\u7684\u5168\u90e8\u7ba1\u7406\u548c\u4f7f\u7528\u6743\u9650\uff0cWorkspace Admin \u5c06\u88ab\u6620\u5c04\u4e3a Cluster Admin\u3002 Workspace Admin \u80fd\u591f\u8fdb\u5165\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7ba1\u7406\u8be5\u96c6\u7fa4\u3002

                                                    Note

                                                    \u5f53\u524d\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u6682\u65e0 Cluster Editor \u548c Cluster Viewer \u89d2\u8272\uff0c\u56e0\u6b64 Workspace Editor\u3001Workspace Viewer \u8fd8\u65e0\u6cd5\u6620\u5c04\u3002

                                                  2. \u65b0\u589e\u5171\u4eab\u8d44\u6e90

                                                    \u4f7f\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u7528\u6237/\u7528\u6237\u7ec4\u5177\u6709\u8be5\u96c6\u7fa4\u8d44\u6e90\u7684\u4f7f\u7528\u6743\u9650\uff0c\u8fd9\u4e9b\u8d44\u6e90\u53ef\u4ee5\u5728\u521b\u5efa\u547d\u540d\u7a7a\u95f4\uff08Namespace\uff09\u65f6\u4f7f\u7528\u3002

                                                    \u4e0e\u8d44\u6e90\u7ec4\u4e0d\u540c\uff0c\u5c06\u96c6\u7fa4\u5171\u4eab\u5230\u5de5\u4f5c\u7a7a\u95f4\u65f6\uff0c\u7528\u6237\u5728\u5de5\u4f5c\u7a7a\u95f4\u7684\u89d2\u8272\u4e0d\u4f1a\u6620\u5c04\u5230\u8d44\u6e90\u4e0a\uff0c\u56e0\u6b64 Workspace Admin \u4e0d\u4f1a\u88ab\u6620\u5c04\u4e3a Cluster admin\u3002

                                                  \u672c\u8282\u5c55\u793a 3 \u4e2a\u4e0e\u8d44\u6e90\u914d\u989d\u6709\u5173\u7684\u573a\u666f\u3002

                                                  "},{"location":"admin/ghippo/workspace/quota.html#_2","title":"\u521b\u5efa\u547d\u540d\u7a7a\u95f4","text":"

                                                  \u521b\u5efa\u547d\u540d\u7a7a\u95f4\u65f6\u4f1a\u6d89\u53ca\u5230\u8d44\u6e90\u914d\u989d\u3002

                                                  1. \u5728\u5de5\u4f5c\u7a7a\u95f4 ws01 \u65b0\u589e\u4e00\u4e2a\u5171\u4eab\u96c6\u7fa4\u3002

                                                  2. \u5728\u5e94\u7528\u5de5\u4f5c\u53f0\u9009\u62e9\u5de5\u4f5c\u7a7a\u95f4 ws01 \u548c\u5171\u4eab\u96c6\u7fa4\uff0c\u521b\u5efa\u547d\u540d\u7a7a\u95f4 ns01\u3002

                                                    • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u4e2d\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4e0d\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\u3002
                                                    • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u4e2d\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff08\u4f8b\u5982 CPU \u8bf7\u6c42 = 100 core\uff09\uff0c\u5219\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u65f6 CPU \u8bf7\u6c42 \u2264 100 core \u3002
                                                  "},{"location":"admin/ghippo/workspace/quota.html#_3","title":"\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4","text":"

                                                  \u524d\u63d0\uff1a\u5de5\u4f5c\u7a7a\u95f4 ws01 \u5df2\u65b0\u589e\u5171\u4eab\u96c6\u7fa4\uff0c\u64cd\u4f5c\u8005\u4e3a Workspace Admin + Kpanda Owner \u6216 Admin \u89d2\u8272\u3002

                                                  \u4ee5\u4e0b\u4e24\u79cd\u7ed1\u5b9a\u65b9\u5f0f\u7684\u6548\u679c\u76f8\u540c\u3002

                                                  • \u5728\u5bb9\u5668\u7ba1\u7406\u4e2d\u5c06\u521b\u5efa\u7684\u547d\u540d\u7a7a\u95f4 ns01 \u7ed1\u5b9a\u5230 ws01

                                                    • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u65e0\u8bba\u662f\u5426\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5747\u53ef\u6210\u529f\u7ed1\u5b9a\u3002
                                                    • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d CPU \u8bf7\u6c42 = 100 core \uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u5fc5\u987b\u6ee1\u8db3 CPU \u8bf7\u6c42 \u2264 100 core \u624d\u80fd\u7ed1\u5b9a\u6210\u529f\u3002
                                                  • \u5728\u5168\u5c40\u7ba1\u7406\u4e2d\uff0c\u5c06\u547d\u540d\u7a7a\u95f4 ns01 \u7ed1\u5b9a\u5230 ws01

                                                    • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u65e0\u8bba\u662f\u5426\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5747\u53ef\u6210\u529f\u7ed1\u5b9a\u3002
                                                    • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d CPU \u8bf7\u6c42 = 100 core \uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u5fc5\u987b\u6ee1\u8db3 CPU \u8bf7\u6c42 \u2264 100 core \u624d\u80fd\u7ed1\u5b9a\u6210\u529f\u3002
                                                  "},{"location":"admin/ghippo/workspace/quota.html#_4","title":"\u4ece\u5de5\u4f5c\u7a7a\u95f4\u89e3\u7ed1\u547d\u540d\u7a7a\u95f4","text":"

                                                  \u4ee5\u4e0b\u4e24\u79cd\u89e3\u7ed1\u65b9\u5f0f\u7684\u6548\u679c\u76f8\u540c\u3002

                                                  • \u5728\u5bb9\u5668\u7ba1\u7406\u4e2d\u5c06\u547d\u540d\u7a7a\u95f4 ns01 \u4ece\u5de5\u4f5c\u7a7a\u95f4 ws01 \u89e3\u7ed1

                                                    • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u4e2d\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u65e0\u8bba\u662f\u5426\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u89e3\u7ed1\u540e\u5747\u4e0d\u4f1a\u5bf9\u8d44\u6e90\u914d\u989d\u4ea7\u751f\u5f71\u54cd\u3002
                                                    • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d CPU \u8bf7\u6c42 = 100 core \uff0c\u547d\u540d\u7a7a\u95f4 ns01 \u4e5f\u8bbe\u7f6e\u4e86\u8d44\u6e90\u914d\u989d\uff0c\u5219\u89e3\u7ed1\u540e\u5c06\u91ca\u653e\u76f8\u5e94\u7684\u8d44\u6e90\u989d\u5ea6\u3002
                                                  • \u5728\u5168\u5c40\u7ba1\u7406\u4e2d\u5c06\u547d\u540d\u7a7a\u95f4 ns01 \u4ece\u5de5\u4f5c\u7a7a\u95f4 ws01 \u89e3\u7ed1

                                                    • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u65e0\u8bba\u662f\u5426\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u89e3\u7ed1\u540e\u5747\u4e0d\u4f1a\u5bf9\u8d44\u6e90\u914d\u989d\u4ea7\u751f\u5f71\u54cd\u3002
                                                    • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d CPU \u8bf7\u6c42 = 100 core \uff0c\u547d\u540d\u7a7a\u95f4 ns01 \u4e5f\u8bbe\u7f6e\u4e86\u8d44\u6e90\u914d\u989d\uff0c\u5219\u89e3\u7ed1\u540e\u5c06\u91ca\u653e\u76f8\u5e94\u7684\u8d44\u6e90\u989d\u5ea6\u3002
                                                  "},{"location":"admin/ghippo/workspace/res-gp-and-shared-res.html","title":"\u8d44\u6e90\u7ec4\u4e0e\u5171\u4eab\u8d44\u6e90\u7684\u533a\u522b","text":"

                                                  \u8d44\u6e90\u7ec4\u4e0e\u5171\u4eab\u8d44\u6e90\u5747\u652f\u6301\u7ed1\u5b9a\u96c6\u7fa4\uff0c\u4f46\u4f7f\u7528\u4e0a\u5b58\u5728\u5f88\u5927\u533a\u522b\u3002

                                                  "},{"location":"admin/ghippo/workspace/res-gp-and-shared-res.html#_2","title":"\u4f7f\u7528\u573a\u666f\u533a\u522b","text":"
                                                  • \u8d44\u6e90\u7ec4\u7ed1\u5b9a\u96c6\u7fa4\uff1a\u8d44\u6e90\u7ec4\u7ed1\u5b9a\u96c6\u7fa4\u901a\u5e38\u88ab\u7528\u6765\u6279\u91cf\u6388\u6743\u3002\u8d44\u6e90\u7ec4\u7ed1\u5b9a\u96c6\u7fa4\u540e\uff0c \u5de5\u4f5c\u7a7a\u95f4\u7ba1\u7406\u5458\u5c06\u88ab\u6620\u5c04\u4e3a\u96c6\u7fa4\u7ba1\u7406\u5458\uff0c\u80fd\u591f\u7ba1\u7406\u5e76\u4f7f\u7528\u96c6\u7fa4\u8d44\u6e90\u3002
                                                  • \u5171\u4eab\u8d44\u6e90\u7ed1\u5b9a\u96c6\u7fa4\uff1a\u8d44\u6e90\u5171\u4eab\u7ed1\u5b9a\u96c6\u7fa4\u901a\u5e38\u88ab\u7528\u6765\u505a\u8d44\u6e90\u9650\u989d\u3002 \u5178\u578b\u7684\u573a\u666f\u662f\u5e73\u53f0\u7ba1\u7406\u5458\u5c06\u96c6\u7fa4\u5206\u914d\u7ed9\u4e00\u7ea7\u4f9b\u5e94\u5546\u540e\uff0c\u518d\u7531\u4e00\u7ea7\u4f9b\u5e94\u5546\u5206\u914d\u7ed9\u4e8c\u7ea7\u4f9b\u5e94\u5546\u5e76\u5bf9\u4e8c\u7ea7\u4f9b\u5e94\u5546\u8fdb\u884c\u8d44\u6e90\u9650\u989d\u3002

                                                  \u8bf4\u660e\uff1a\u5728\u8be5\u573a\u666f\u4e2d\uff0c\u9700\u8981\u5e73\u53f0\u7ba1\u7406\u5458\u5bf9\u4e8c\u7ea7\u4f9b\u5e94\u5546\u8fdb\u884c\u8d44\u6e90\u9650\u5236\uff0c\u6682\u65f6\u8fd8\u4e0d\u652f\u6301\u4e00\u7ea7\u4f9b\u5e94\u5546\u9650\u5236\u4e8c\u7ea7\u4f9b\u5e94\u5546\u7684\u96c6\u7fa4\u989d\u5ea6\u3002

                                                  "},{"location":"admin/ghippo/workspace/res-gp-and-shared-res.html#_3","title":"\u96c6\u7fa4\u989d\u5ea6\u7684\u4f7f\u7528\u533a\u522b","text":"
                                                  • \u8d44\u6e90\u7ec4\u7ed1\u5b9a\u96c6\u7fa4\uff1a\u5de5\u4f5c\u7a7a\u95f4\u7684\u7ba1\u7406\u5458\u5c06\u88ab\u6620\u5c04\u4e3a\u8be5\u96c6\u7fa4\u7684\u7ba1\u7406\u5458\uff0c\u76f8\u5f53\u4e8e\u5728\u5bb9\u5668\u7ba1\u7406-\u6743\u9650\u7ba1\u7406\u4e2d\u88ab\u6388\u4e88 Cluster Admin \u89d2\u8272\uff0c \u80fd\u591f\u65e0\u9650\u5236\u652f\u914d\u8be5\u96c6\u7fa4\u8d44\u6e90\uff0c\u7ba1\u7406\u8282\u70b9\u7b49\u91cd\u8981\u5185\u5bb9\uff0c\u4e14\u8d44\u6e90\u7ec4\u4e0d\u80fd\u591f\u88ab\u8d44\u6e90\u9650\u989d\u3002
                                                  • \u5171\u4eab\u8d44\u6e90\u7ed1\u5b9a\u8d44\u6e90\uff1a\u5de5\u4f5c\u7a7a\u95f4\u7ba1\u7406\u5458\u4ec5\u80fd\u591f\u4f7f\u7528\u96c6\u7fa4\u4e2d\u7684\u989d\u5ea6\u5728\u5e94\u7528\u5de5\u4f5c\u53f0\u521b\u5efa\u547d\u540d\u7a7a\u95f4\uff0c\u4e0d\u5177\u5907\u96c6\u7fa4\u7684\u7ba1\u7406\u6743\u9650\u3002 \u82e5\u5bf9\u8be5\u5de5\u4f5c\u7a7a\u95f4\u9650\u5236\u989d\u5ea6\uff0c\u5219\u5de5\u4f5c\u7a7a\u95f4\u7ba1\u7406\u4ec5\u80fd\u591f\u5728\u989d\u5ea6\u8303\u56f4\u5185\u521b\u5efa\u5e76\u4f7f\u7528\u547d\u540d\u7a7a\u95f4\u3002
                                                  "},{"location":"admin/ghippo/workspace/res-gp-and-shared-res.html#_4","title":"\u8d44\u6e90\u7c7b\u578b\u7684\u533a\u522b","text":"
                                                  • \u8d44\u6e90\u7ec4\uff1a\u80fd\u591f\u7ed1\u5b9a\u96c6\u7fa4\u3001\u96c6\u7fa4-\u547d\u540d\u7a7a\u95f4\u3001\u591a\u4e91\u3001\u591a\u4e91-\u547d\u540d\u7a7a\u95f4\u3001\u7f51\u683c\u3001\u7f51\u683c-\u547d\u540d\u7a7a\u95f4
                                                  • \u5171\u4eab\u8d44\u6e90\uff1a\u4ec5\u80fd\u591f\u7ed1\u5b9a\u96c6\u7fa4
                                                  "},{"location":"admin/ghippo/workspace/res-gp-and-shared-res.html#_5","title":"\u8d44\u6e90\u7ec4\u4e0e\u5171\u4eab\u8d44\u6e90\u7684\u76f8\u540c\u70b9","text":"

                                                  \u5728\u8d44\u6e90\u7ec4/\u5171\u4eab\u8d44\u6e90\u7ed1\u5b9a\u96c6\u7fa4\u540e\u90fd\u53ef\u4ee5\u524d\u5f80\u5e94\u7528\u5de5\u4f5c\u53f0\u521b\u5efa\u547d\u540d\u7a7a\u95f4\uff0c\u521b\u5efa\u540e\u547d\u540d\u7a7a\u95f4\u5c06\u81ea\u52a8\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4\u3002

                                                  "},{"location":"admin/ghippo/workspace/workspace.html","title":"\u521b\u5efa/\u5220\u9664\u5de5\u4f5c\u7a7a\u95f4","text":"

                                                  \u5de5\u4f5c\u7a7a\u95f4\u662f\u4e00\u79cd\u8d44\u6e90\u8303\u7574\uff0c\u4ee3\u8868\u4e00\u79cd\u8d44\u6e90\u5c42\u7ea7\u5173\u7cfb\u3002 \u5de5\u4f5c\u7a7a\u95f4\u53ef\u4ee5\u5305\u542b\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u6ce8\u518c\u4e2d\u5fc3\u7b49\u8d44\u6e90\u3002 \u901a\u5e38\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u5bf9\u5e94\u4e00\u4e2a\u9879\u76ee\uff0c\u53ef\u4ee5\u4e3a\u6bcf\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u4e0d\u540c\u7684\u8d44\u6e90\uff0c\u6307\u6d3e\u4e0d\u540c\u7684\u7528\u6237\u548c\u7528\u6237\u7ec4\u3002

                                                  \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u521b\u5efa\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u3002

                                                  1. \u4f7f\u7528 admin/folder admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u3002

                                                  2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4 \u6309\u94ae\u3002

                                                  3. \u586b\u5199\u5de5\u4f5c\u7a7a\u95f4\u540d\u79f0\u3001\u6240\u5c5e\u6587\u4ef6\u5939\u7b49\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4\u3002

                                                  Tip

                                                  \u521b\u5efa\u6210\u529f\u540e\u5de5\u4f5c\u7a7a\u95f4\u540d\u79f0\u5c06\u663e\u793a\u5728\u5de6\u4fa7\u7684\u6811\u72b6\u7ed3\u6784\u4e2d\uff0c\u4ee5\u4e0d\u540c\u7684\u56fe\u6807\u8868\u793a\u6587\u4ef6\u5939\u548c\u5de5\u4f5c\u7a7a\u95f4\u3002

                                                  Note

                                                  \u9009\u4e2d\u67d0\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u6216\u6587\u4ef6\u5939\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 ... \u53ef\u4ee5\u8fdb\u884c\u7f16\u8f91\u6216\u5220\u9664\u3002

                                                  • \u5f53\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u8d44\u6e90\u7ec4\u3001\u5171\u4eab\u8d44\u6e90\u4e2d\u5b58\u5728\u8d44\u6e90\u65f6\uff0c\u8be5\u5de5\u4f5c\u7a7a\u95f4\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u6240\u6709\u8d44\u6e90\u89e3\u7ed1\u540e\u518d\u5220\u9664\u3002
                                                  • \u5f53\u5fae\u670d\u52a1\u5f15\u64ce\u6a21\u5757\u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u5b58\u5728\u63a5\u5165\u6ce8\u518c\u4e2d\u5fc3\u8d44\u6e90\u65f6\uff0c\u8be5\u5de5\u4f5c\u7a7a\u95f4\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u6240\u6709\u63a5\u5165\u6ce8\u518c\u4e2d\u5fc3\u79fb\u9664\u540e\u518d\u5220\u9664\u5de5\u4f5c\u7a7a\u95f4\u3002
                                                  • \u5f53\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u5b58\u5728\u955c\u50cf\u7a7a\u95f4\u6216\u96c6\u6210\u4ed3\u5e93\u65f6\uff0c\u8be5\u5de5\u4f5c\u7a7a\u95f4\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u955c\u50cf\u7a7a\u95f4\u89e3\u7ed1\uff0c\u5c06\u4ed3\u5e93\u96c6\u6210\u5220\u9664\u540e\u518d\u5220\u9664\u5de5\u4f5c\u7a7a\u95f4\u3002
                                                  "},{"location":"admin/ghippo/workspace/ws-folder.html","title":"\u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7","text":"

                                                  \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u662f\u4e00\u4e2a\u5177\u6709\u5c42\u7ea7\u7684\u8d44\u6e90\u9694\u79bb\u548c\u8d44\u6e90\u5206\u7ec4\u7279\u6027\uff0c\u4e3b\u8981\u89e3\u51b3\u8d44\u6e90\u7edf\u4e00\u6388\u6743\u3001\u8d44\u6e90\u5206\u7ec4\u4ee5\u53ca\u8d44\u6e90\u9650\u989d\u95ee\u9898\u3002

                                                  \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u6709\u4e24\u4e2a\u6982\u5ff5\uff1a\u5de5\u4f5c\u7a7a\u95f4\u548c\u6587\u4ef6\u5939\u3002

                                                  "},{"location":"admin/ghippo/workspace/ws-folder.html#_2","title":"\u5de5\u4f5c\u7a7a\u95f4","text":"

                                                  \u5de5\u4f5c\u7a7a\u95f4\u53ef\u901a\u8fc7 \u6388\u6743 \u3001 \u8d44\u6e90\u7ec4 \u548c \u5171\u4eab\u8d44\u6e90 \u6765\u7ba1\u7406\u8d44\u6e90\uff0c\u4f7f\u7528\u6237\uff08\u7528\u6237\u7ec4\uff09\u4e4b\u95f4\u80fd\u591f\u5171\u4eab\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u8d44\u6e90\u3002

                                                  • \u8d44\u6e90

                                                    \u8d44\u6e90\u5904\u4e8e\u8d44\u6e90\u7ba1\u7406\u6a21\u5757\u5c42\u7ea7\u7ed3\u6784\u7684\u6700\u4f4e\u5c42\u7ea7\uff0c\u8d44\u6e90\u5305\u62ec Cluster\u3001Namespace\u3001Pipeline\u3001\u7f51\u5173\u7b49\u3002 \u6240\u6709\u8fd9\u4e9b\u8d44\u6e90\u7684\u7236\u7ea7\u53ea\u80fd\u662f\u5de5\u4f5c\u7a7a\u95f4\uff0c\u800c\u5de5\u4f5c\u7a7a\u95f4\u4f5c\u4e3a\u8d44\u6e90\u5bb9\u5668\u662f\u4e00\u79cd\u8d44\u6e90\u5206\u7ec4\u5355\u4f4d\u3002

                                                  • \u5de5\u4f5c\u7a7a\u95f4

                                                    \u5de5\u4f5c\u7a7a\u95f4\u901a\u5e38\u4ee3\u6307\u4e00\u4e2a\u9879\u76ee\u6216\u73af\u5883\uff0c\u6bcf\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u7684\u8d44\u6e90\u76f8\u5bf9\u4e8e\u5176\u4ed6\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u8d44\u6e90\u65f6\u903b\u8f91\u9694\u79bb\u7684\u3002 \u60a8\u53ef\u4ee5\u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u6388\u6743\uff0c\u6388\u4e88\u7528\u6237\uff08\u7528\u6237\u7ec4\uff09\u540c\u4e00\u7ec4\u8d44\u6e90\u7684\u4e0d\u540c\u8bbf\u95ee\u6743\u9650\u3002

                                                    \u4ece\u5c42\u6b21\u7ed3\u6784\u7684\u5e95\u5c42\u7b97\u8d77\uff0c\u5de5\u4f5c\u7a7a\u95f4\u4f4d\u4e8e\u7b2c\u4e00\u5c42\uff0c\u4e14\u5305\u542b\u8d44\u6e90\u3002 \u9664\u5171\u4eab\u8d44\u6e90\u5916\uff0c\u6240\u6709\u8d44\u6e90\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u7236\u9879\u3002\u6240\u6709\u5de5\u4f5c\u7a7a\u95f4\u4e5f\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u7236\u7ea7\u6587\u4ef6\u5939\u3002

                                                    \u8d44\u6e90\u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u8fdb\u884c\u5206\u7ec4\uff0c\u800c\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u5b58\u5728\u4e24\u79cd\u5206\u7ec4\u6a21\u5f0f\uff0c\u5206\u522b\u662f \u8d44\u6e90\u7ec4 \u548c \u5171\u4eab\u8d44\u6e90 \u3002

                                                  • \u8d44\u6e90\u7ec4

                                                    \u4e00\u4e2a\u8d44\u6e90\u53ea\u80fd\u52a0\u5165\u4e00\u4e2a\u8d44\u6e90\u7ec4\uff0c\u8d44\u6e90\u7ec4\u4e0e\u5de5\u4f5c\u7a7a\u95f4\u4e00\u4e00\u5bf9\u5e94\u3002 \u8d44\u6e90\u88ab\u52a0\u5165\u5230\u8d44\u6e90\u7ec4\u540e\uff0cWorkspace Admin \u5c06\u83b7\u5f97\u8d44\u6e90\u7684\u7ba1\u7406\u6743\u9650\uff0c\u76f8\u5f53\u4e8e\u8be5\u8d44\u6e90\u7684\u6240\u6709\u8005\u3002

                                                  • \u5171\u4eab\u8d44\u6e90

                                                    \u800c\u5bf9\u4e8e\u5171\u4eab\u8d44\u6e90\u6765\u8bf4\uff0c\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u53ef\u4ee5\u5171\u4eab\u540c\u4e00\u4e2a\u6216\u8005\u591a\u4e2a\u8d44\u6e90\u3002 \u8d44\u6e90\u7684\u6240\u6709\u8005\uff0c\u53ef\u4ee5\u9009\u62e9\u5c06\u81ea\u5df1\u62e5\u6709\u7684\u8d44\u6e90\u5171\u4eab\u7ed9\u5de5\u4f5c\u7a7a\u95f4\u4f7f\u7528\uff0c\u4e00\u822c\u5171\u4eab\u65f6\u8d44\u6e90\u6240\u6709\u8005\u4f1a\u9650\u5236\u88ab\u5171\u4eab\u5de5\u4f5c\u7a7a\u95f4\u80fd\u591f\u4f7f\u7528\u7684\u8d44\u6e90\u989d\u5ea6\u3002 \u8d44\u6e90\u88ab\u5171\u4eab\u540e\uff0cWorkspace Admin \u4ec5\u5177\u6709\u8d44\u6e90\u9650\u989d\u4e0b\u7684\u8d44\u6e90\u4f7f\u7528\u6743\u9650\uff0c\u65e0\u6cd5\u7ba1\u7406\u8d44\u6e90\u6216\u8005\u8c03\u6574\u5de5\u4f5c\u7a7a\u95f4\u80fd\u591f\u4f7f\u7528\u7684\u8d44\u6e90\u91cf\u3002

                                                    \u540c\u65f6\u5171\u4eab\u8d44\u6e90\u5bf9\u4e8e\u8d44\u6e90\u672c\u8eab\u4e5f\u5177\u6709\u4e00\u5b9a\u7684\u8981\u6c42\uff0c\u53ea\u6709 Cluster\uff08\u96c6\u7fa4\uff09\u8d44\u6e90\u53ef\u4ee5\u88ab\u5171\u4eab\u3002 Cluster Admin \u80fd\u591f\u5c06 Cluster \u8d44\u6e90\u5206\u4eab\u7ed9\u4e0d\u540c\u7684\u5de5\u4f5c\u7a7a\u95f4\u4f7f\u7528\uff0c\u5e76\u4e14\u9650\u5236\u5de5\u4f5c\u7a7a\u95f4\u5728\u6b64 Cluster \u4e0a\u7684\u4f7f\u7528\u989d\u5ea6\u3002

                                                    Workspace Admin \u5728\u8d44\u6e90\u9650\u989d\u5185\u80fd\u591f\u521b\u5efa\u591a\u4e2a Namespace\uff0c\u4f46\u662f Namespace \u7684\u8d44\u6e90\u989d\u5ea6\u603b\u548c\u4e0d\u80fd\u8d85\u8fc7 Cluster \u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u7684\u8d44\u6e90\u9650\u989d\u3002 \u5bf9\u4e8e Kubernetes \u8d44\u6e90\uff0c\u5f53\u524d\u80fd\u591f\u5206\u4eab\u7684\u8d44\u6e90\u7c7b\u578b\u4ec5\u6709 Cluster\u3002

                                                  "},{"location":"admin/ghippo/workspace/ws-folder.html#_3","title":"\u6587\u4ef6\u5939","text":"

                                                  \u6587\u4ef6\u5939\u53ef\u7528\u4e8e\u6784\u5efa\u4f01\u4e1a\u4e1a\u52a1\u5c42\u7ea7\u5173\u7cfb\u3002

                                                  • \u6587\u4ef6\u5939\u662f\u5728\u5de5\u4f5c\u7a7a\u95f4\u57fa\u7840\u4e4b\u4e0a\u7684\u8fdb\u4e00\u6b65\u5206\u7ec4\u673a\u5236\uff0c\u5177\u6709\u5c42\u7ea7\u7ed3\u6784\u3002 \u4e00\u4e2a\u6587\u4ef6\u5939\u53ef\u4ee5\u5305\u542b\u5de5\u4f5c\u7a7a\u95f4\u3001\u5176\u4ed6\u6587\u4ef6\u5939\u6216\u4e24\u8005\u7684\u7ec4\u5408\uff0c\u80fd\u591f\u5f62\u6210\u6811\u72b6\u7684\u7ec4\u7ec7\u5173\u7cfb\u3002

                                                  • \u501f\u52a9\u6587\u4ef6\u5939\u60a8\u53ef\u4ee5\u6620\u5c04\u4f01\u4e1a\u4e1a\u52a1\u5c42\u7ea7\u5173\u7cfb\uff0c\u6309\u7167\u90e8\u95e8\u5bf9\u5de5\u4f5c\u7a7a\u95f4\u8fdb\u884c\u5206\u7ec4\u3002 \u6587\u4ef6\u5939\u4e0d\u76f4\u63a5\u4e0e\u8d44\u6e90\u6302\u94a9\uff0c\u800c\u662f\u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u95f4\u63a5\u5b9e\u73b0\u8d44\u6e90\u5206\u7ec4\u3002

                                                  • \u6587\u4ef6\u5939\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u7236\u7ea7\u6587\u4ef6\u5939\uff0c\u800c\u6839\u6587\u4ef6\u5939\u662f\u5c42\u6b21\u7ed3\u6784\u7684\u6700\u9ad8\u5c42\u7ea7\u3002 \u6839\u6587\u4ef6\u5939\u6ca1\u6709\u7236\u7ea7\uff0c\u6587\u4ef6\u5939\u548c\u5de5\u4f5c\u7a7a\u95f4\u5747\u6302\u9760\u5230\u6839\u6587\u4ef6\u5939\u4e0b\u3002

                                                  \u53e6\u5916\uff0c\u7528\u6237\uff08\u7528\u6237\u7ec4\uff09\u5728\u6587\u4ef6\u5939\u4e2d\u80fd\u591f\u901a\u8fc7\u5c42\u7ea7\u7ed3\u6784\u7ee7\u627f\u6765\u81ea\u7236\u9879\u7684\u6743\u9650\u3002 \u7528\u6237\u5728\u5c42\u6b21\u7ed3\u6784\u4e2d\u7684\u6743\u9650\u6765\u81ea\u5f53\u524d\u5c42\u7ea7\u7684\u6743\u9650\u4ee5\u53ca\u7ee7\u627f\u5176\u7236\u9879\u6743\u9650\u7684\u7ec4\u5408\u7ed3\u679c\uff0c\u6743\u9650\u4e4b\u95f4\u662f\u52a0\u5408\u5173\u7cfb\u4e0d\u5b58\u5728\u4e92\u65a5\u3002

                                                  "},{"location":"admin/ghippo/workspace/ws-permission.html","title":"\u5de5\u4f5c\u7a7a\u95f4\u6743\u9650\u8bf4\u660e","text":"

                                                  \u5de5\u4f5c\u7a7a\u95f4\u5177\u6709\u6743\u9650\u6620\u5c04\u548c\u8d44\u6e90\u9694\u79bb\u80fd\u529b\uff0c\u80fd\u591f\u5c06\u7528\u6237/\u7528\u6237\u7ec4\u5728\u5de5\u4f5c\u7a7a\u95f4\u7684\u6743\u9650\u6620\u5c04\u5230\u5176\u4e0b\u7684\u8d44\u6e90\u4e0a\u3002 \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u5de5\u4f5c\u7a7a\u95f4\u662f Workspace Admin \u89d2\u8272\uff0c\u540c\u65f6\u5de5\u4f5c\u7a7a\u95f4-\u8d44\u6e90\u7ec4\u4e2d\u7ed1\u5b9a\u4e86\u8d44\u6e90 Namespace\uff0c\u5219\u6620\u5c04\u540e\u8be5\u7528\u6237/\u7528\u6237\u7ec4\u5c06\u6210\u4e3a Namespace Admin\u3002

                                                  Note

                                                  \u5de5\u4f5c\u7a7a\u95f4\u7684\u6743\u9650\u6620\u5c04\u80fd\u529b\u4e0d\u4f1a\u4f5c\u7528\u5230\u5171\u4eab\u8d44\u6e90\u4e0a\uff0c\u56e0\u4e3a\u5171\u4eab\u662f\u5c06\u96c6\u7fa4\u7684\u4f7f\u7528\u6743\u9650\u5171\u4eab\u7ed9\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff0c\u800c\u4e0d\u662f\u5c06\u7ba1\u7406\u6743\u9650\u53d7\u8ba9\u7ed9\u5de5\u4f5c\u7a7a\u95f4\uff0c\u56e0\u6b64\u4e0d\u4f1a\u5b9e\u73b0\u6743\u9650\u7ee7\u627f\u548c\u89d2\u8272\u6620\u5c04\u3002

                                                  "},{"location":"admin/ghippo/workspace/ws-permission.html#_2","title":"\u5e94\u7528\u573a\u666f","text":"

                                                  \u901a\u8fc7\u5c06\u8d44\u6e90\u7ed1\u5b9a\u5230\u4e0d\u540c\u7684\u5de5\u4f5c\u7a7a\u95f4\u80fd\u591f\u5b9e\u73b0\u8d44\u6e90\u9694\u79bb\u3002 \u56e0\u6b64\u501f\u52a9\u6743\u9650\u6620\u5c04\u3001\u8d44\u6e90\u9694\u79bb\u548c\u5171\u4eab\u8d44\u6e90\u80fd\u529b\u80fd\u591f\u5c06\u8d44\u6e90\u7075\u6d3b\u5206\u914d\u7ed9\u5404\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09\u3002

                                                  \u901a\u5e38\u9002\u7528\u4e8e\u4ee5\u4e0b\u4e24\u4e2a\u573a\u666f\uff1a

                                                  • \u96c6\u7fa4\u4e00\u5bf9\u4e00

                                                    \u666e\u901a\u96c6\u7fa4 \u90e8\u95e8/\u79df\u6237\uff08\u5de5\u4f5c\u7a7a\u95f4\uff09 \u7528\u9014 \u96c6\u7fa4 01 A \u7ba1\u7406\u548c\u4f7f\u7528 \u96c6\u7fa4 02 B \u7ba1\u7406\u548c\u4f7f\u7528
                                                  • \u96c6\u7fa4\u4e00\u5bf9\u591a

                                                    \u96c6\u7fa4 \u90e8\u95e8/\u79df\u6237\uff08\u5de5\u4f5c\u7a7a\u95f4\uff09 \u8d44\u6e90\u9650\u989d \u96c6\u7fa4 01 A 100 \u6838 CPU B 50 \u6838 CPU
                                                  "},{"location":"admin/ghippo/workspace/ws-permission.html#_3","title":"\u6743\u9650\u8bf4\u660e","text":"\u64cd\u4f5c\u5bf9\u8c61 \u64cd\u4f5c Workspace Admin Workspace Editor Workspace Viewer \u672c\u8eab \u67e5\u770b \u2713 \u2713 \u2713 - \u6388\u6743 \u2713 \u2717 \u2717 - \u4fee\u6539\u522b\u540d \u2713 \u2713 \u2717 \u8d44\u6e90\u7ec4 \u67e5\u770b \u2713 \u2713 \u2713 - \u8d44\u6e90\u7ed1\u5b9a \u2713 \u2717 \u2717 - \u89e3\u9664\u7ed1\u5b9a \u2713 \u2717 \u2717 \u5171\u4eab\u8d44\u6e90 \u67e5\u770b \u2713 \u2713 \u2713 - \u65b0\u589e\u5171\u4eab \u2713 \u2717 \u2717 - \u89e3\u9664\u5171\u4eab \u2713 \u2717 \u2717 - \u8d44\u6e90\u9650\u989d \u2713 \u2717 \u2717 - \u4f7f\u7528\u5171\u4eab\u8d44\u6e90 [^1] \u2713 \u2717 \u2717"},{"location":"admin/ghippo/workspace/wsbind-permission.html","title":"\u8d44\u6e90\u7ed1\u5b9a\u6743\u9650\u8bf4\u660e","text":"

                                                  \u5047\u5982\u7528\u6237\u5c0f\u660e\uff08\u201c\u5c0f\u660e\u201d\u4ee3\u8868\u4efb\u4f55\u6709\u8d44\u6e90\u7ed1\u5b9a\u9700\u6c42\u7684\u7528\u6237\uff09\u5df2\u7ecf\u5177\u5907\u4e86 Workspace Admin \u89d2\u8272\u6216\u5df2\u901a\u8fc7\u81ea\u5b9a\u4e49\u89d2\u8272\u6388\u6743\uff0c \u540c\u65f6\u81ea\u5b9a\u4e49\u89d2\u8272\u4e2d\u5305\u542b\u5de5\u4f5c\u7a7a\u95f4\u7684\u201c\u8d44\u6e90\u7ed1\u5b9a\u201d\u6743\u9650\uff0c\u5e0c\u671b\u5c06\u67d0\u4e2a\u96c6\u7fa4\u6216\u8005\u67d0\u4e2a\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u5176\u6240\u5728\u7684\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u3002

                                                  \u8981\u5c06\u96c6\u7fa4/\u547d\u540d\u7a7a\u95f4\u8d44\u6e90\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4\uff0c\u4e0d\u4ec5\u9700\u8981\u8be5\u5de5\u4f5c\u7a7a\u95f4\u7684\u201c\u8d44\u6e90\u7ed1\u5b9a\u201d\u6743\u9650\uff0c\u8fd8\u9700\u8981 Cluster Admin \u7684\u8d44\u6e90\u6743\u9650\u3002

                                                  "},{"location":"admin/ghippo/workspace/wsbind-permission.html#_2","title":"\u7ed9\u5c0f\u660e\u6388\u6743","text":"
                                                  1. \u4f7f\u7528\u5e73\u53f0 Admin \u89d2\u8272\uff0c \u5728 \u5de5\u4f5c\u7a7a\u95f4 -> \u6388\u6743 \u9875\u9762\u7ed9\u5c0f\u660e\u6388\u4e88 Workspace Admin \u89d2\u8272\u3002

                                                  2. \u7136\u540e\u5728 \u5bb9\u5668\u7ba1\u7406 -> \u6743\u9650\u7ba1\u7406 \u9875\u9762\uff0c\u901a\u8fc7 \u6dfb\u52a0\u6388\u6743 \u5c06\u5c0f\u660e\u6388\u6743\u4e3a Cluster Admin\u3002

                                                  "},{"location":"admin/ghippo/workspace/wsbind-permission.html#_3","title":"\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4","text":"

                                                  \u4f7f\u7528\u5c0f\u660e\u7684\u8d26\u53f7\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5728 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \u9875\u9762\uff0c\u901a\u8fc7 \u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4 \u529f\u80fd\uff0c \u5c0f\u660e\u53ef\u4ee5\u5c06\u6307\u5b9a\u96c6\u7fa4\u7ed1\u5b9a\u5230\u81ea\u5df1\u7684\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u3002

                                                  Note

                                                  \u5c0f\u660e\u80fd\u4e14\u53ea\u80fd\u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5c06\u96c6\u7fa4\u6216\u8005\u8be5\u96c6\u7fa4\u4e0b\u7684\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff0c\u65e0\u6cd5\u5728\u5168\u5c40\u7ba1\u7406\u6a21\u5757\u5b8c\u6210\u6b64\u64cd\u4f5c\u3002

                                                  \u7ed1\u5b9a\u547d\u540d\u7a7a\u95f4\u5230\u5de5\u4f5c\u7a7a\u95f4\u4e5f\u81f3\u5c11\u9700\u8981 Workspace Admin + Cluster Admin \u6743\u9650\u3002

                                                  "},{"location":"admin/host/createhost.html","title":"\u521b\u5efa\u548c\u542f\u52a8\u4e91\u4e3b\u673a","text":"

                                                  \u7528\u6237\u5b8c\u6210\u6ce8\u518c\uff0c\u4e3a\u5176\u5206\u914d\u4e86\u5de5\u4f5c\u7a7a\u95f4\u3001\u547d\u540d\u7a7a\u95f4\u548c\u8d44\u6e90\u540e\uff0c\u5373\u53ef\u4ee5\u521b\u5efa\u5e76\u542f\u52a8\u4e91\u4e3b\u673a\u3002

                                                  "},{"location":"admin/host/createhost.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                  • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                                  • \u7528\u6237\u5df2\u6210\u529f\u6ce8\u518c
                                                  • \u4e3a\u7528\u6237\u7ed1\u5b9a\u4e86\u5de5\u4f5c\u7a7a\u95f4
                                                  • \u4e3a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u4e86\u8d44\u6e90
                                                  "},{"location":"admin/host/createhost.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                                                  2. \u70b9\u51fb \u521b\u5efa\u4e91\u4e3b\u673a -> \u901a\u8fc7\u6a21\u677f\u521b\u5efa

                                                  3. \u5b9a\u4e49\u7684\u4e91\u4e3b\u673a\u5404\u9879\u914d\u7f6e\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65

                                                    \u57fa\u672c\u914d\u7f6e\u6a21\u677f\u914d\u7f6e\u5b58\u50a8\u4e0e\u7f51\u7edc

                                                  4. \u914d\u7f6e root \u5bc6\u7801\u6216 ssh \u5bc6\u94a5\u540e\u70b9\u51fb \u786e\u5b9a

                                                  5. \u8fd4\u56de\u4e3b\u673a\u5217\u8868\uff0c\u7b49\u5f85\u72b6\u6001\u53d8\u4e3a \u8fd0\u884c\u4e2d \u4e4b\u540e\uff0c\u53ef\u4ee5\u901a\u8fc7\u53f3\u4fa7\u7684 \u2507 \u542f\u52a8\u4e3b\u673a\u3002

                                                  \u4e0b\u4e00\u6b65\uff1a\u4f7f\u7528\u4e91\u4e3b\u673a

                                                  "},{"location":"admin/host/usehost.html","title":"\u4f7f\u7528\u4e91\u4e3b\u673a","text":"

                                                  \u521b\u5efa\u5e76\u542f\u52a8\u4e91\u4e3b\u673a\u4e4b\u540e\uff0c\u7528\u6237\u5c31\u53ef\u4ee5\u5f00\u59cb\u4f7f\u7528\u4e91\u4e3b\u673a\u3002

                                                  "},{"location":"admin/host/usehost.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                  • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                                  • \u7528\u6237\u5df2\u521b\u5efa\u5e76\u542f\u52a8\u4e91\u4e3b\u673a
                                                  "},{"location":"admin/host/usehost.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u4ee5\u7ba1\u7406\u5458\u8eab\u4efd\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                                                  2. \u5bfc\u822a\u5230 \u5bb9\u5668\u7ba1\u7406 -> \u5bb9\u5668\u7f51\u7edc -> \u670d\u52a1 \uff0c\u70b9\u51fb\u670d\u52a1\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u670d\u52a1\u8be6\u60c5\u9875\uff0c\u5728\u53f3\u4e0a\u89d2\u70b9\u51fb \u66f4\u65b0

                                                  3. \u66f4\u6539\u7aef\u53e3\u8303\u56f4\u4e3a 30900-30999\uff0c\u4f46\u4e0d\u80fd\u51b2\u7a81\u3002

                                                  4. \u4ee5\u7ec8\u7aef\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5bfc\u822a\u5230\u5bf9\u5e94\u7684\u670d\u52a1\uff0c\u67e5\u770b\u8bbf\u95ee\u7aef\u53e3\u3002

                                                  5. \u5728\u5916\u7f51\u4f7f\u7528 SSH \u5ba2\u6237\u7aef\u767b\u5f55\u4e91\u4e3b\u673a

                                                  6. \u81f3\u6b64\uff0c\u4f60\u53ef\u4ee5\u5728\u4e91\u4e3b\u673a\u4e0a\u6267\u884c\u5404\u9879\u64cd\u4f5c\u3002

                                                  \u4e0b\u4e00\u6b65\uff1a\u4e91\u8d44\u6e90\u5171\u4eab\uff1a\u914d\u989d\u7ba1\u7406

                                                  "},{"location":"admin/insight/alert-center/index.html","title":"\u544a\u8b66\u4e2d\u5fc3","text":"

                                                  \u544a\u8b66\u4e2d\u5fc3\u662f AI \u7b97\u529b\u5e73\u53f0 \u63d0\u4f9b\u7684\u4e00\u4e2a\u91cd\u8981\u529f\u80fd\uff0c\u5b83\u8ba9\u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u754c\u9762\u65b9\u4fbf\u5730\u6309\u7167\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u67e5\u770b\u6240\u6709\u6d3b\u52a8\u548c\u5386\u53f2\u544a\u8b66\uff0c \u5e76\u6839\u636e\u544a\u8b66\u7ea7\u522b\uff08\u7d27\u6025\u3001\u8b66\u544a\u3001\u63d0\u793a\uff09\u6765\u641c\u7d22\u544a\u8b66\u3002

                                                  \u6240\u6709\u544a\u8b66\u90fd\u662f\u57fa\u4e8e\u9884\u8bbe\u7684\u544a\u8b66\u89c4\u5219\u8bbe\u5b9a\u7684\u9608\u503c\u6761\u4ef6\u89e6\u53d1\u7684\u3002\u5728 AI \u7b97\u529b\u5e73\u53f0\u4e2d\uff0c\u5185\u7f6e\u4e86\u4e00\u4e9b\u5168\u5c40\u544a\u8b66\u7b56\u7565\uff0c\u540c\u65f6\u60a8\u4e5f\u53ef\u4ee5\u968f\u65f6\u521b\u5efa\u3001\u5220\u9664\u544a\u8b66\u7b56\u7565\uff0c\u5bf9\u4ee5\u4e0b\u6307\u6807\u8fdb\u884c\u8bbe\u7f6e\uff1a

                                                  • CPU \u4f7f\u7528\u91cf
                                                  • \u5185\u5b58\u4f7f\u7528\u91cf
                                                  • \u78c1\u76d8\u4f7f\u7528\u91cf
                                                  • \u78c1\u76d8\u6bcf\u79d2\u8bfb\u6b21\u6570
                                                  • \u78c1\u76d8\u6bcf\u79d2\u5199\u6b21\u6570
                                                  • \u96c6\u7fa4\u78c1\u76d8\u8bfb\u53d6\u541e\u5410\u91cf
                                                  • \u96c6\u7fa4\u78c1\u76d8\u5199\u5165\u541e\u5410\u91cf
                                                  • \u7f51\u7edc\u53d1\u9001\u901f\u7387
                                                  • \u7f51\u7edc\u63a5\u6536\u901f\u7387

                                                  \u8fd8\u53ef\u4ee5\u4e3a\u544a\u8b66\u89c4\u5219\u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002\u544a\u8b66\u89c4\u5219\u5206\u4e3a\u6d3b\u8dc3\u548c\u8fc7\u671f\u89c4\u5219\uff0c\u652f\u6301\u542f\u7528/\u7981\u7528\u67d0\u4e9b\u89c4\u5219\u6765\u5b9e\u73b0\u544a\u8b66\u9759\u9ed8\u3002

                                                  \u5f53\u8fbe\u5230\u9608\u503c\u6761\u4ef6\u540e\uff0c\u53ef\u4ee5\u914d\u7f6e\u544a\u8b66\u901a\u77e5\u65b9\u5f0f\uff0c\u5305\u62ec\u90ae\u4ef6\u3001\u9489\u9489\u3001\u4f01\u4e1a\u5fae\u4fe1\u3001Webhook \u548c\u77ed\u4fe1\u901a\u77e5\u3002 \u6240\u6709\u901a\u77e5\u7684\u6d88\u606f\u6a21\u677f\u90fd\u53ef\u4ee5\u81ea\u5b9a\u4e49\uff0c\u540c\u65f6\u8fd8\u652f\u6301\u6309\u8bbe\u5b9a\u7684\u95f4\u9694\u65f6\u95f4\u53d1\u9001\u901a\u77e5\u3002

                                                  \u6b64\u5916\uff0c\u544a\u8b66\u4e2d\u5fc3\u8fd8\u652f\u6301\u901a\u8fc7\u963f\u91cc\u4e91\u3001\u817e\u8baf\u4e91\u7b49\u63d0\u4f9b\u7684\u77ed\u4fe1\u670d\u52a1\u5c06\u544a\u8b66\u6d88\u606f\u53d1\u9001\u7ed9\u6307\u5b9a\u7528\u6237\uff0c\u5b9e\u73b0\u591a\u79cd\u65b9\u5f0f\u7684\u544a\u8b66\u901a\u77e5\u3002

                                                  AI \u7b97\u529b\u5e73\u53f0 \u544a\u8b66\u4e2d\u5fc3\u662f\u4e00\u4e2a\u529f\u80fd\u5f3a\u5927\u7684\u544a\u8b66\u7ba1\u7406\u5e73\u53f0\uff0c\u53ef\u5e2e\u52a9\u7528\u6237\u53ca\u65f6\u53d1\u73b0\u548c\u89e3\u51b3\u96c6\u7fa4\u4e2d\u51fa\u73b0\u7684\u95ee\u9898\uff0c \u63d0\u9ad8\u4e1a\u52a1\u7a33\u5b9a\u6027\u548c\u53ef\u7528\u6027\uff0c\u4fbf\u4e8e\u96c6\u7fa4\u5de1\u68c0\u548c\u6545\u969c\u6392\u67e5\u3002

                                                  "},{"location":"admin/insight/alert-center/alert-policy.html","title":"\u544a\u8b66\u7b56\u7565","text":"

                                                  \u544a\u8b66\u7b56\u7565\u662f\u5728\u53ef\u89c2\u6d4b\u6027\u7cfb\u7edf\u4e2d\u5b9a\u4e49\u7684\u4e00\u7ec4\u89c4\u5219\u548c\u6761\u4ef6\uff0c\u7528\u4e8e\u68c0\u6d4b\u548c\u89e6\u53d1\u8b66\u62a5\uff0c\u4ee5\u4fbf\u5728\u7cfb\u7edf\u51fa\u73b0\u5f02\u5e38\u6216\u8fbe\u5230\u9884\u5b9a\u7684\u9608\u503c\u65f6\u53ca\u65f6\u901a\u77e5\u76f8\u5173\u4eba\u5458\u6216\u7cfb\u7edf\u3002

                                                  \u6bcf\u6761\u544a\u8b66\u7b56\u7565\u662f\u4e00\u7ec4\u544a\u8b66\u89c4\u5219\u7684\u96c6\u5408\uff0c\u652f\u6301\u5bf9\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5de5\u4f5c\u8d1f\u8f7d\u7b49\u8d44\u6e90\u3001\u65e5\u5fd7\u3001\u4e8b\u4ef6\u8bbe\u7f6e\u544a\u8b66\u89c4\u5219\u3002\u5f53\u544a\u8b66\u5bf9\u8c61\u8fbe\u5230\u7b56\u7565\u4e0b\u4efb\u4e00\u89c4\u5219\u8bbe\u5b9a\u7684\u9608\u503c\uff0c\u5219\u4f1a\u81ea\u52a8\u89e6\u53d1\u544a\u8b66\u5e76\u53d1\u9001\u901a\u77e5\u3002

                                                  "},{"location":"admin/insight/alert-center/alert-policy.html#_2","title":"\u67e5\u770b\u544a\u8b66\u7b56\u7565","text":"
                                                  1. \u70b9\u51fb\u4e00\u7ea7\u5bfc\u822a\u680f\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027\u3002
                                                  2. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u544a\u8b66\u4e2d\u5fc3 -> \u544a\u8b66\u7b56\u7565\u3002

                                                    • \u96c6\u7fa4\uff1a\u5355\u51fb\u96c6\u7fa4\u4e0b\u62c9\u6846\u53ef\u5207\u6362\u96c6\u7fa4\uff1b
                                                    • \u547d\u540d\u7a7a\u95f4\uff1a\u5355\u51fb\u547d\u540d\u7a7a\u95f4\u5207\u6362\u4e0b\u62c9\u6846\u3002

                                                  3. \u70b9\u51fb\u544a\u8b66\u7b56\u7565\u540d\u79f0\u53ef\u67e5\u770b\u7b56\u7565\u7684\u57fa\u672c\u4fe1\u606f\u3001\u89c4\u5219\u4ee5\u53ca\u901a\u77e5\u914d\u7f6e\u3002

                                                    1. \u5728\u89c4\u5219\u5217\u8868\u4e2d\u53ef\u67e5\u770b\u89c4\u5219\u7c7b\u578b\u3001\u89c4\u5219\u7684\u8868\u8fbe\u5f0f\u3001\u7ea7\u522b\u3001\u72b6\u6001\u7b49\u4fe1\u606f\u3002
                                                    2. \u8fdb\u5165\u7b56\u7565\u8be6\u60c5\uff0c\u53ef\u4ee5\u6dfb\u52a0\u3001\u7f16\u8f91\u3001\u5220\u9664\u5176\u4e0b\u7684\u544a\u8b66\u89c4\u5219\u3002

                                                  "},{"location":"admin/insight/alert-center/alert-policy.html#_3","title":"\u521b\u5efa\u544a\u8b66\u7b56\u7565","text":"
                                                  1. \u586b\u5199\u57fa\u672c\u4fe1\u606f\uff0c\u9009\u62e9\u4e00\u4e2a\u6216\u591a\u4e2a\u96c6\u7fa4\u3001\u8282\u70b9\u6216\u5de5\u4f5c\u8d1f\u8f7d\u4e3a\u544a\u8b66\u5bf9\u8c61\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002

                                                    Note

                                                    • \u9009\u62e9\u5168\u90e8\u96c6\u7fa4\u3001\u8282\u70b9\u6216\u5de5\u4f5c\u8d1f\u8f7d\uff1a\u521b\u5efa\u7684\u544a\u8b66\u89c4\u5219\u5bf9\u6240\u6709\u5df2\u5b89\u88c5 insight-agent \u7684\u96c6\u7fa4\u751f\u6548\u3002
                                                    • \u9009\u62e9\u5355\u4e2a\u6216\u591a\u4e2a\u96c6\u7fa4\u96c6\u7fa4\u3001\u8282\u70b9\u6216\u5de5\u4f5c\u8d1f\u8f7d\uff1a\u521b\u5efa\u7684\u544a\u8b66\u89c4\u5219\u4ec5\u5bf9\u6240\u9009\u7684\u8d44\u6e90\u5bf9\u8c61\u751f\u6548\u3002
                                                    • \u540c\u65f6\uff0c\u7528\u6237\u53ea\u80fd\u5bf9\u5df2\u6743\u9650\u7684\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u544a\u8b66\u89c4\u5219\u3002
                                                  "},{"location":"admin/insight/alert-center/alert-policy.html#_4","title":"\u624b\u52a8\u6dfb\u52a0\u89c4\u5219","text":"
                                                  1. \u5728\u521b\u5efa\u544a\u8b66\u7b56\u7565\u7684\u7b2c\u4e8c\u90e8\u4e2d\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4e0a\u89d2\u7684\u6dfb\u52a0\u89c4\u5219\u3002

                                                  2. \u5728\u5f39\u7a97\u4e2d\u521b\u5efa\u544a\u8b66\u89c4\u5219\uff0c\u586b\u5199\u5404\u9879\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a\u3002

                                                    • \u6a21\u677f\u89c4\u5219\uff1a\u9884\u5b9a\u4e49\u4e86\u57fa\u7840\u6307\u6807\uff0c\u53ef\u4ee5\u6309 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u3001\u7f51\u7edc\u8bbe\u5b9a\u8981\u76d1\u63a7\u7684\u6307\u6807\u3002
                                                    • PromQL \u89c4\u5219\uff1a\u8f93\u5165\u4e00\u4e2a PromQL \u8868\u8fbe\u5f0f\uff0c\u5177\u4f53\u8bf7\u67e5\u8be2 Prometheus \u8868\u8fbe\u5f0f\u3002
                                                    • \u6301\u7eed\u65f6\u957f\uff1a\u544a\u8b66\u88ab\u89e6\u53d1\u4e14\u6301\u7eed\u65f6\u95f4\u8fbe\u5230\u8be5\u8bbe\u5b9a\u503c\u540e\uff0c\u544a\u8b66\u7b56\u7565\u5c06\u53d8\u4e3a\u89e6\u53d1\u4e2d\u72b6\u6001\u3002
                                                    • \u544a\u8b66\u7ea7\u522b\uff1a\u5305\u542b\u7d27\u6025\u3001\u8b66\u544a\u3001\u4fe1\u606f\u4e09\u79cd\u7ea7\u522b\u3002
                                                    • \u9ad8\u7ea7\u8bbe\u7f6e\uff1a\u53ef\u4ee5\u81ea\u5b9a\u4e49\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                                    Info

                                                    \u7cfb\u7edf\u5b9a\u4e49\u4e86\u5185\u7f6e\u6807\u7b7e\uff0c\u82e5\u81ea\u5b9a\u4e49\u6807\u7b7e\u4e0e\u5185\u7f6e\u6807\u7b7e\u7684\u952e\u503c\u76f8\u540c\uff0c\u5219\u81ea\u5b9a\u4e49\u6807\u7b7e\u4e0d\u751f\u6548\u3002 \u5185\u7f6e\u6807\u7b7e\u6709\uff1aseverity\u3001rule_id\uff0csource\u3001cluster_name\u3001group_id\u3001 target_type \u548c target\u3002

                                                  "},{"location":"admin/insight/alert-center/alert-policy.html#_5","title":"\u521b\u5efa\u65e5\u5fd7\u89c4\u5219","text":"

                                                  \u5b8c\u6210\u57fa\u672c\u4fe1\u606f\u7684\u586b\u5199\u540e\uff0c\u70b9\u51fb \u6dfb\u52a0\u89c4\u5219\uff0c\u89c4\u5219\u7c7b\u578b\u9009\u62e9 \u65e5\u5fd7\u89c4\u5219\u3002

                                                  Note

                                                  \u4ec5\u5f53\u8d44\u6e90\u5bf9\u8c61\u9009\u62e9\u8282\u70b9\u6216\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u652f\u6301\u521b\u5efa\u65e5\u5fd7\u89c4\u5219\u3002

                                                  \u5b57\u6bb5\u8bf4\u660e\uff1a

                                                  • \u8fc7\u6ee4\u6761\u4ef6\uff1a\u67e5\u8be2\u65e5\u5fd7\u5185\u5bb9\u7684\u5b57\u6bb5\uff0c\u652f\u6301\u4e0e\u3001\u6216\u3001\u6b63\u5219\u5339\u914d\u3001\u6a21\u7cca\u5339\u914d\u56db\u79cd\u8fc7\u6ee4\u6761\u4ef6\u3002
                                                  • \u5224\u65ad\u6761\u4ef6\uff1a\u6839\u636e \u8fc7\u6ee4\u6761\u4ef6\uff0c\u8f93\u5165\u5173\u952e\u5b57\u6216\u5339\u914d\u6761\u4ef6\u3002
                                                  • \u65f6\u95f4\u8303\u56f4\uff1a\u65e5\u5fd7\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u3002
                                                  • \u9608\u503c\u6761\u4ef6\uff1a\u5728\u8f93\u5165\u6846\u4e2d\u8f93\u5165\u544a\u8b66\u9608\u503c\u3002\u5f53\u8fbe\u5230\u8bbe\u7f6e\u7684\u9608\u503c\u65f6\uff0c\u5219\u89e6\u53d1\u544a\u8b66\u3002\u652f\u6301\u7684\u6bd4\u8f83\u8fd0\u7b97\u7b26\u6709\uff1a >\u3001\u2265\u3001=\u3001\u2264\u3001<\u3002
                                                  • \u544a\u8b66\u7ea7\u522b\uff1a\u9009\u62e9\u544a\u8b66\u7ea7\u522b\uff0c\u7528\u4e8e\u8868\u793a\u544a\u8b66\u7684\u4e25\u91cd\u7a0b\u5ea6\u3002
                                                  "},{"location":"admin/insight/alert-center/alert-policy.html#_6","title":"\u521b\u5efa\u4e8b\u4ef6\u89c4\u5219","text":"

                                                  \u5b8c\u6210\u57fa\u672c\u4fe1\u606f\u7684\u586b\u5199\u540e\uff0c\u70b9\u51fb \u6dfb\u52a0\u89c4\u5219\uff0c\u89c4\u5219\u7c7b\u578b\u9009\u62e9 \u4e8b\u4ef6\u89c4\u5219\u3002

                                                  Note

                                                  \u4ec5\u5f53\u8d44\u6e90\u5bf9\u8c61\u9009\u62e9\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u652f\u6301\u521b\u5efa\u4e8b\u4ef6\u89c4\u5219\u3002

                                                  \u5b57\u6bb5\u8bf4\u660e\uff1a

                                                  • \u4e8b\u4ef6\u89c4\u5219\uff1a\u4ec5\u652f\u6301\u8d44\u6e90\u5bf9\u8c61\u9009\u62e9\u5de5\u4f5c\u8d1f\u8f7d
                                                  • \u4e8b\u4ef6\u539f\u56e0\uff1a\u4e0d\u540c\u7684\u5de5\u4f5c\u8d1f\u8f7d\u7c7b\u578b\u7684\u4e8b\u4ef6\u539f\u56e0\u4e0d\u540c\uff0c\u4e8b\u4ef6\u539f\u56e0\u4e4b\u95f4\u662f\u201c\u548c\u201d\u7684\u5173\u7cfb\u3002
                                                  • \u65f6\u95f4\u8303\u56f4\uff1a\u68c0\u6d4b\u8be5\u65f6\u95f4\u8303\u56f4\u5185\u4ea7\u751f\u6570\u636e\uff0c\u82e5\u8fbe\u5230\u8bbe\u7f6e\u7684\u9608\u503c\u6761\u4ef6\uff0c\u5219\u89e6\u53d1\u544a\u8b66\u4e8b\u4ef6\u3002
                                                  • \u9608\u503c\u6761\u4ef6\uff1a\u5f53\u4ea7\u751f\u7684\u4e8b\u4ef6\u8fbe\u5230\u8bbe\u7f6e\u7684\u9608\u503c\u65f6\uff0c\u5219\u89e6\u53d1\u544a\u8b66\u4e8b\u4ef6\u3002
                                                  • \u8d8b\u52bf\u56fe\uff1a\u9ed8\u8ba4\u67e5\u8be2 10 \u5206\u949f\u5185\u7684\u4e8b\u4ef6\u53d8\u5316\u8d8b\u52bf\uff0c\u6bcf\u4e2a\u70b9\u7684\u6570\u503c\u7edf\u8ba1\u7684\u662f\u5f53\u524d\u65f6\u95f4\u70b9\u5230\u4e4b\u524d\u7684\u67d0\u6bb5\u65f6\u95f4\uff08\u65f6\u95f4\u8303\u56f4\uff09\u5185\u53d1\u751f\u7684\u603b\u6b21\u6570\u3002
                                                  "},{"location":"admin/insight/alert-center/alert-policy.html#_7","title":"\u5bfc\u5165\u89c4\u5219\u6a21\u677f","text":"
                                                  1. \u53ef\u70b9\u51fb \u6a21\u677f\u5bfc\u5165\uff0c\u9009\u62e9\u5e73\u53f0\u7ba1\u7406\u5458\u5df2\u521b\u5efa\u597d\u7684\u544a\u8b66\u6a21\u677f\u6279\u91cf\u5bfc\u5165\u544a\u8b66\u89c4\u5219\u3002

                                                  2. \u70b9\u51fb \u4e0b\u4e00\u6b65 \u540e\u914d\u7f6e\u901a\u77e5\u3002

                                                  3. \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u8fd4\u56de\u544a\u8b66\u7b56\u7565\u5217\u8868\u3002

                                                  Tip

                                                  \u65b0\u5efa\u7684\u544a\u8b66\u7b56\u7565\u4e3a \u672a\u89e6\u53d1 \u72b6\u6001\u3002\u4e00\u65e6\u6ee1\u8db3\u89c4\u5219\u4e2d\u7684\u9608\u503c\u6761\u4ef6\u548c\u6301\u7eed\u65f6\u95f4\u540e\uff0c\u5c06\u53d8\u4e3a \u89e6\u53d1\u4e2d \u72b6\u6001\u3002

                                                  Warning

                                                  \u5220\u9664\u540e\u7684\u544a\u8b66\u7b56\u7565\u5c06\u5b8c\u5168\u6d88\u5931\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                                                  "},{"location":"admin/insight/alert-center/alert-policy.html#yaml","title":"\u901a\u8fc7 YAML \u5bfc\u5165\u544a\u8b66\u7b56\u7565","text":"
                                                  1. \u8fdb\u5165\u544a\u8b66\u7b56\u7565\u5217\u8868\uff0c\u70b9\u51fb YAML \u521b\u5efa\u3002

                                                  2. \u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u7684\u9009\u62e9\u662f\u4e3a\u4e86\u544a\u8b66\u7b56\u7565\u7684\u7ba1\u7406\u6743\u9650\u3002

                                                  3. YAML \u7f16\u8f91\u5668\u4e2d\u8bf7\u586b\u5199 spec \u53ca\u5176\u4e2d\u7684\u5185\u5bb9\uff0c\u4ec5\u652f\u6301\u5bfc\u5165\u4e00\u4e2a group\u3002
                                                  4. \u544a\u8b66\u89c4\u5219\u540d\u79f0 \u9700\u8981\u7b26\u5408\u89c4\u8303\uff1a\u540d\u79f0\u53ea\u80fd\u5305\u542b\u5927\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u3001\u4e0b\u5212\u7ebf\uff08_\uff09\u548c\u8fde\u5b57\u7b26\uff08-\uff09\uff0c\u5fc5\u987b\u4ee5\u5b57\u6bcd\u5f00\u5934\uff0c\u6700\u957f 63 \u4e2a\u5b57\u7b26\u3002
                                                  5. \u5fc5\u586b severity \u4e14\u7b26\u5408\u89c4\u8303\uff1acritical\u3001warning\u3001info\u3002
                                                  6. \u5fc5\u586b\u8868\u8fbe\u5f0f expr\u3002

                                                  7. \u5bfc\u5165 YAML \u6587\u4ef6\u540e\uff0c\u70b9\u51fb \u9884\u89c8\uff0c\u53ef\u4ee5\u5bf9\u5bfc\u5165\u7684 YAML \u683c\u5f0f\u8fdb\u884c\u9a8c\u8bc1\uff0c\u5e76\u5feb\u901f\u786e\u8ba4\u5bfc\u5165\u7684\u544a\u8b66\u89c4\u5219\u3002

                                                  "},{"location":"admin/insight/alert-center/alert-template.html","title":"\u544a\u8b66\u6a21\u677f","text":"

                                                  \u544a\u8b66\u6a21\u677f\u53ef\u652f\u6301\u5e73\u53f0\u7ba1\u7406\u5458\u521b\u5efa\u544a\u8b66\u6a21\u677f\u53ca\u89c4\u5219\uff0c\u4e1a\u52a1\u4fa7\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u544a\u8b66\u6a21\u677f\u521b\u5efa\u544a\u8b66\u7b56\u7565\u3002 \u8fd9\u4e2a\u529f\u80fd\u53ef\u4ee5\u51cf\u5c11\u4e1a\u52a1\u4eba\u5458\u5bf9\u544a\u8b66\u89c4\u5219\u7684\u7ba1\u7406\uff0c\u4e14\u53ef\u4ee5\u6839\u636e\u73af\u5883\u5b9e\u9645\u60c5\u51b5\u81ea\u884c\u4fee\u6539\u544a\u8b66\u9608\u503c\u3002

                                                  "},{"location":"admin/insight/alert-center/alert-template.html#_2","title":"\u521b\u5efa\u544a\u8b66\u6a21\u677f","text":"
                                                  1. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9\u00a0\u544a\u8b66\u4e2d\u5fc3\u00a0->\u00a0\u544a\u8b66\u7b56\u7565\uff0c\u5355\u51fb\u9876\u90e8\u7684 \u544a\u8b66\u6a21\u677f \u3002

                                                  2. \u70b9\u51fb \u521b\u5efa\u544a\u8b66\u6a21\u677f \uff0c\u8bbe\u7f6e\u544a\u8b66\u6a21\u677f\u7684\u540d\u79f0\u3001\u63cf\u8ff0\u7b49\u4fe1\u606f\u3002

                                                    \u53c2\u6570 \u8bf4\u660e \u6a21\u677f\u540d\u79f0 \u540d\u79f0\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u8fde\u5b57\u7b26\uff08-\uff09\uff0c\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u548c\u7ed3\u5c3e\uff0c\u6700\u957f 63 \u4e2a\u5b57\u7b26\u3002 \u63cf\u8ff0 \u63cf\u8ff0\u53ef\u5305\u542b\u4efb\u610f\u5b57\u7b26\uff0c\u6700\u957f 256 \u4e2a\u5b57\u7b26\u3002 \u8d44\u6e90\u7c7b\u578b \u7528\u4e8e\u6307\u5b9a\u544a\u8b66\u6a21\u677f\u7684\u5339\u914d\u7c7b\u578b\u3002 \u544a\u8b66\u89c4\u5219 \u652f\u6301\u9884\u5b9a\u4e49\u591a\u4e2a\u544a\u8b66\u89c4\u5219\uff0c\u53ef\u6dfb\u52a0\u6a21\u677f\u89c4\u5219\u3001PromQL \u89c4\u5219\u3002
                                                  3. \u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u540e\u8fd4\u56de\u544a\u8b66\u6a21\u677f\u5217\u8868\uff0c\u70b9\u51fb\u6a21\u677f\u540d\u79f0\u540e\u53ef\u67e5\u770b\u6a21\u677f\u8be6\u60c5\u3002

                                                  "},{"location":"admin/insight/alert-center/alert-template.html#_3","title":"\u7f16\u8f91\u544a\u8b66\u6a21\u677f","text":"

                                                  \u70b9\u51fb\u76ee\u6807\u89c4\u5219\u540e\u7684 \u2507 \uff0c\u70b9\u51fb \u7f16\u8f91\uff0c\u8fdb\u5165\u6291\u5236\u89c4\u5219\u7684\u7f16\u8f91\u9875\u3002

                                                  "},{"location":"admin/insight/alert-center/alert-template.html#_4","title":"\u5220\u9664\u544a\u8b66\u6a21\u677f","text":"

                                                  \u70b9\u51fb\u76ee\u6807\u6a21\u677f\u540e\u4fa7\u7684 \u2507 \uff0c\u70b9\u51fb \u5220\u9664\uff0c\u5728\u8f93\u5165\u6846\u4e2d\u8f93\u5165\u544a\u8b66\u6a21\u677f\u7684\u540d\u79f0\u5373\u53ef\u5220\u9664\u3002

                                                  "},{"location":"admin/insight/alert-center/inhibition.html","title":"\u544a\u8b66\u6291\u5236","text":"

                                                  \u544a\u8b66\u6291\u5236\u4e3b\u8981\u662f\u5bf9\u4e8e\u67d0\u4e9b\u4e0d\u9700\u8981\u7acb\u5373\u5173\u6ce8\u7684\u544a\u8b66\u8fdb\u884c\u4e34\u65f6\u9690\u85cf\u6216\u8005\u964d\u4f4e\u5176\u4f18\u5148\u7ea7\u7684\u4e00\u79cd\u673a\u5236\u3002\u8fd9\u4e2a\u529f\u80fd\u7684\u76ee\u7684\u662f\u4e3a\u4e86\u51cf\u5c11\u4e0d\u5fc5\u8981\u7684\u544a\u8b66\u4fe1\u606f\u5bf9\u8fd0\u7ef4\u4eba\u5458\u7684\u5e72\u6270\uff0c\u4f7f\u4ed6\u4eec\u80fd\u591f\u96c6\u4e2d\u7cbe\u529b\u5904\u7406\u66f4\u91cd\u8981\u7684\u95ee\u9898\u3002

                                                  \u544a\u8b66\u6291\u5236\u901a\u8fc7\u5b9a\u4e49\u4e00\u7ec4\u89c4\u5219\u6765\u8bc6\u522b\u548c\u5ffd\u7565\u67d0\u4e9b\u544a\u8b66\uff0c\u5f53\u5b83\u4eec\u5728\u7279\u5b9a\u6761\u4ef6\u4e0b\u53d1\u751f\u65f6\u3002\u4e3b\u8981\u6709\u4ee5\u4e0b\u51e0\u79cd\u60c5\u51b5\uff1a

                                                  • \u7236\u5b50\u5173\u7cfb\u6291\u5236\uff1a\u5f53\u4e00\u4e2a\u7236\u544a\u8b66\uff08\u4f8b\u5982\u67d0\u4e2a\u8282\u70b9\u7684\u5d29\u6e83\uff09\u89e6\u53d1\u65f6\uff0c\u53ef\u4ee5\u6291\u5236\u6240\u6709\u7531\u6b64\u5f15\u8d77\u7684\u5b50\u544a\u8b66\uff08\u4f8b\u5982\u8be5\u8282\u70b9\u4e0a\u8fd0\u884c\u7684\u5bb9\u5668\u5d29\u6e83\uff09\u3002
                                                  • \u76f8\u4f3c\u544a\u8b66\u6291\u5236\uff1a\u5f53\u591a\u4e2a\u544a\u8b66\u5177\u6709\u76f8\u540c\u7684\u7279\u5f81\uff08\u4f8b\u5982\u540c\u4e00\u5b9e\u4f8b\u4e0a\u7684\u76f8\u540c\u95ee\u9898\uff09\u65f6\uff0c\u53ef\u4ee5\u6291\u5236\u91cd\u590d\u7684\u544a\u8b66\u901a\u77e5\u3002
                                                  "},{"location":"admin/insight/alert-center/inhibition.html#_2","title":"\u521b\u5efa\u6291\u5236\u89c4\u5219","text":"
                                                  1. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9\u00a0\u544a\u8b66\u4e2d\u5fc3\u00a0->\u00a0\u544a\u8b66\u964d\u566a\uff0c\u5355\u51fb\u9876\u90e8\u7684 \u544a\u8b66\u6291\u5236 \u3002

                                                  2. \u70b9\u51fb \u65b0\u5efa\u6291\u5236\u89c4\u5219 \uff0c\u8bbe\u7f6e\u6291\u5236\u89c4\u5219\u7684\u540d\u79f0\u3001\u89c4\u5219\u7b49\u3002

                                                    Note

                                                    \u901a\u8fc7\u89c4\u5219\u6807\u7b7e\u548c\u544a\u8b66\u6807\u7b7e\u5b9a\u4e49\u4e00\u7ec4\u89c4\u5219\u6765\u8bc6\u522b\u548c\u5ffd\u7565\u67d0\u4e9b\u544a\u8b66\uff0c\u8fbe\u5230\u907f\u514d\u540c\u4e00\u95ee\u9898\u53ef\u80fd\u4f1a\u89e6\u53d1\u591a\u4e2a\u76f8\u4f3c\u6216\u76f8\u5173\u7684\u544a\u8b66\u7684\u95ee\u9898\u3002

                                                    \u53c2\u6570\u65f6\u95f4 \u8bf4\u660e \u6291\u5236\u89c4\u5219\u540d\u79f0 \u6291\u5236\u89c4\u5219\u540d\u79f0\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u8fde\u5b57\u7b26\uff08-\uff09\uff0c\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u548c\u7ed3\u5c3e\uff0c\u6700\u957f 63 \u4e2a\u5b57\u7b26\u3002 \u63cf\u8ff0 \u63cf\u8ff0\u53ef\u5305\u542b\u4efb\u610f\u5b57\u7b26\uff0c\u6700\u957f 256 \u4e2a\u5b57\u7b26\u3002 \u96c6\u7fa4 \u8be5\u6291\u5236\u89c4\u5219\u4f5c\u7528\u7684\u96c6\u7fa4\u3002 \u547d\u540d\u7a7a\u95f4 \u8be5\u6291\u5236\u89c4\u5219\u4f5c\u7528\u7684\u547d\u540d\u7a7a\u95f4\u3002 \u6839\u6e90\u544a\u8b66 \u901a\u8fc7\u586b\u5199\u7684\u6807\u7b7e\u6761\u4ef6\u5339\u914d\u544a\u8b66\uff0c\u4f1a\u5c06\u7b26\u5408\u6240\u6709\u6807\u7b7e\u6761\u4ef6\u7684\u544a\u8b66\u4e0e\u7b26\u5408\u6291\u5236\u6761\u4ef6\u7684\u8fdb\u884c\u5bf9\u6bd4\uff0c\u4e0d\u7b26\u5408\u6291\u5236\u6761\u4ef6\u7684\u544a\u8b66\u5c06\u7167\u5e38\u53d1\u9001\u6d88\u606f\u7ed9\u7528\u6237\u3002 \u53d6\u503c\u8303\u56f4\u8bf4\u660e\uff1a - \u544a\u8b66\u7ea7\u522b\uff1a\u6307\u6807\u6216\u4e8b\u4ef6\u544a\u8b66\u7684\u7ea7\u522b\uff0c\u53ef\u4ee5\u8bbe\u7f6e\u4e3a\uff1a\u7d27\u6025\u3001\u91cd\u8981\u3001\u63d0\u793a\u3002 - \u8d44\u6e90\u7c7b\u578b\uff1a\u544a\u8b66\u5bf9\u8c61\u6240\u5bf9\u5e94\u7684\u8d44\u6e90\u7c7b\u578b\uff0c\u53ef\u4ee5\u8bbe\u7f6e\u4e3a\uff1a\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u65e0\u72b6\u6001\u8d1f\u8f7d\u3001\u6709\u72b6\u5bb9\u8d1f\u8f7d\u3001\u5b88\u62a4\u8fdb\u7a0b\u3001\u5bb9\u5668\u7ec4\u3002 - \u6807\u7b7e\uff1a\u544a\u8b66\u6807\u8bc6\u5c5e\u6027\uff0c\u7531\u6807\u7b7e\u540d\u548c\u6807\u7b7e\u503c\u6784\u6210\uff0c\u652f\u6301\u7528\u6237\u81ea\u5b9a\u4e49\u3002 \u6291\u5236\u544a\u8b66 \u7528\u4e8e\u6307\u5b9a\u76ee\u6807\u8b66\u62a5\uff08\u5c06\u88ab\u6291\u5236\u7684\u8b66\u62a5\uff09\u7684\u5339\u914d\u6761\u4ef6\uff0c\u7b26\u5408\u6240\u6709\u6807\u7b7e\u6761\u4ef6\u7684\u544a\u8b66\u5c06\u4e0d\u4f1a\u518d\u53d1\u9001\u6d88\u606f\u7ed9\u7528\u6237\u3002 \u5339\u914d\u6807\u7b7e \u7528\u4e8e\u6307\u5b9a\u5e94\u8be5\u6bd4\u8f83\u7684\u6807\u7b7e\u5217\u8868\uff0c\u4ee5\u786e\u5b9a\u6e90\u8b66\u62a5\u548c\u76ee\u6807\u8b66\u62a5\u662f\u5426\u5339\u914d\u3002\u53ea\u6709\u5728\u00a0equal\u00a0\u4e2d\u6307\u5b9a\u7684\u6807\u7b7e\u5728\u6e90\u548c\u76ee\u6807\u8b66\u62a5\u4e2d\u7684\u503c\u5b8c\u5168\u76f8\u540c\u7684\u60c5\u51b5\u4e0b\uff0c\u624d\u4f1a\u89e6\u53d1\u6291\u5236\u3002equal\u00a0\u5b57\u6bb5\u662f\u53ef\u9009\u7684\u3002\u5982\u679c\u7701\u7565\u00a0equal\u00a0\u5b57\u6bb5\uff0c\u5219\u4f1a\u5c06\u6240\u6709\u6807\u7b7e\u7528\u4e8e\u5339\u914d
                                                  3. \u70b9\u51fb**\u786e\u5b9a**\u5b8c\u6210\u521b\u5efa\u540e\u8fd4\u56de\u544a\u8b66\u6291\u5236\u5217\u8868\uff0c\u70b9\u51fb\u544a\u8b66\u6291\u5236\u540d\u79f0\u540e\u53ef\u67e5\u770b\u6291\u5236\u89c4\u5219\u8be6\u60c5\u3002

                                                  "},{"location":"admin/insight/alert-center/inhibition.html#_3","title":"\u67e5\u770b\u89c4\u5219\u6807\u7b7e","text":"
                                                  1. \u70b9\u51fb\u53f3\u4fa7\u5bfc\u822a\u680f\u9009\u62e9\u00a0\u544a\u8b66\u4e2d\u5fc3\u00a0->\u00a0\u544a\u8b66\u7b56\u7565 \uff0c\u70b9\u51fb\u89c4\u5219\u6240\u5728\u7684\u7b56\u7565\u8be6\u60c5\u3002
                                                  2. \u70b9\u51fb\u76ee\u6807\u89c4\u5219\u540d\u79f0\uff0c\u67e5\u770b\u89c4\u5219\u8be6\u60c5\uff0c\u67e5\u770b\u5bf9\u5e94\u544a\u8b66\u89c4\u5219\u7684\u6807\u7b7e\u3002

                                                    Note

                                                    \u5728\u6dfb\u52a0\u89c4\u5219\u65f6\u53ef\u6dfb\u52a0\u81ea\u5b9a\u4e49\u6807\u7b7e\u3002

                                                  "},{"location":"admin/insight/alert-center/inhibition.html#_4","title":"\u67e5\u770b\u544a\u8b66\u6807\u7b7e","text":"
                                                  1. \u70b9\u51fb\u53f3\u4fa7\u5bfc\u822a\u680f\u9009\u62e9\u00a0\u544a\u8b66\u4e2d\u5fc3\u00a0->\u00a0\u544a\u8b66\u5217\u8868 \uff0c\u70b9\u51fb\u544a\u8b66\u6240\u5728\u884c\u67e5\u770b\u544a\u8b66\u8be6\u60c5\u3002

                                                    Note

                                                    \u544a\u8b66\u6807\u7b7e\u7528\u4e8e\u63cf\u8ff0\u544a\u8b66\u7684\u8be6\u7ec6\u4fe1\u606f\u548c\u5c5e\u6027\uff0c\u53ef\u4ee5\u7528\u6765\u521b\u5efa\u6291\u5236\u89c4\u5219\u3002

                                                  "},{"location":"admin/insight/alert-center/inhibition.html#_5","title":"\u7f16\u8f91\u6291\u5236\u89c4\u5219","text":"
                                                  1. \u70b9\u51fb\u76ee\u6807\u89c4\u5219\u540e\u4fa7\u7684 \u2507 \uff0c\u70b9\u51fb \u7f16\u8f91\uff0c\u8fdb\u5165\u6291\u5236\u89c4\u5219\u7684\u7f16\u8f91\u9875\u3002

                                                  "},{"location":"admin/insight/alert-center/inhibition.html#_6","title":"\u5220\u9664\u6291\u5236\u89c4\u5219","text":"

                                                  \u70b9\u51fb\u76ee\u6807\u89c4\u5219\u540e\u4fa7\u7684 \u2507 \uff0c\u70b9\u51fb \u5220\u9664\uff0c\u5728\u8f93\u5165\u6846\u4e2d\u8f93\u5165\u6291\u5236\u89c4\u5219\u7684\u540d\u79f0\u5373\u53ef\u5220\u9664\u3002

                                                  "},{"location":"admin/insight/alert-center/message.html","title":"\u901a\u77e5\u914d\u7f6e","text":"

                                                  \u5728 \u901a\u77e5\u914d\u7f6e \u9875\u9762\uff0c\u53ef\u4ee5\u914d\u7f6e\u901a\u8fc7\u90ae\u4ef6\u3001\u4f01\u4e1a\u5fae\u4fe1\u3001\u9489\u9489\u3001Webhook \u548c\u77ed\u4fe1\u7b49\u65b9\u5f0f\u5411\u7528\u6237\u53d1\u9001\u6d88\u606f\u3002

                                                  "},{"location":"admin/insight/alert-center/message.html#_2","title":"\u90ae\u4ef6\u7ec4","text":"
                                                  1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u540e\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e\uff0c\u9ed8\u8ba4\u4f4d\u4e8e\u90ae\u4ef6\u901a\u77e5\u5bf9\u8c61\u3002

                                                  2. \u70b9\u51fb \u6dfb\u52a0\u90ae\u7bb1\u7ec4\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u90ae\u4ef6\u5730\u5740\u3002

                                                  3. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u90ae\u7bb1\u7ec4\u3002

                                                  "},{"location":"admin/insight/alert-center/message.html#_3","title":"\u4f01\u4e1a\u5fae\u4fe1","text":"
                                                  1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u4f01\u4e1a\u5fae\u4fe1\u3002

                                                    \u6709\u5173\u4f01\u4e1a\u5fae\u4fe1\u7fa4\u673a\u5668\u4eba\u7684 URL\uff0c\u8bf7\u53c2\u9605\u4f01\u4e1a\u5fae\u4fe1\u5b98\u65b9\u6587\u6863\uff1a\u5982\u4f55\u4f7f\u7528\u7fa4\u673a\u5668\u4eba\u3002

                                                  2. \u70b9\u51fb \u6dfb\u52a0\u7fa4\u673a\u5668\u4eba\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u7fa4\u673a\u5668\u4eba\u3002

                                                  3. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\uff0c\u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u7fa4\u673a\u5668\u4eba\u3002

                                                  "},{"location":"admin/insight/alert-center/message.html#_4","title":"\u9489\u9489","text":"
                                                  1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u9489\u9489\uff0c\u70b9\u51fb \u6dfb\u52a0\u7fa4\u673a\u5668\u4eba\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u7fa4\u673a\u5668\u4eba\u3002

                                                    \u6709\u5173\u9489\u9489\u7fa4\u673a\u5668\u4eba\u7684 URL\uff0c\u8bf7\u53c2\u9605\u9489\u9489\u5b98\u65b9\u6587\u6863\uff1a\u81ea\u5b9a\u4e49\u673a\u5668\u4eba\u63a5\u5165\u3002

                                                    Note

                                                    \u52a0\u7b7e\u7684\u65b9\u5f0f\u662f\u9489\u9489\u673a\u5668\u4eba\u4e0e\u5f00\u53d1\u8005\u53cc\u5411\u8fdb\u884c\u5b89\u5168\u8ba4\u8bc1\uff0c\u82e5\u5728\u521b\u5efa\u9489\u9489\u673a\u5668\u4eba\u65f6\u5f00\u542f\u4e86\u52a0\u7b7e\uff0c\u5219\u9700\u8981\u5728\u6b64\u5904\u8f93\u5165\u9489\u9489\u751f\u6210\u7684\u5bc6\u94a5\u3002 \u53ef\u53c2\u8003\u9489\u9489\u81ea\u5b9a\u4e49\u673a\u5668\u4eba\u5b89\u5168\u8bbe\u7f6e\u3002

                                                  2. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\uff0c\u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u7fa4\u673a\u5668\u4eba\u3002

                                                  "},{"location":"admin/insight/alert-center/message.html#_5","title":"\u98de\u4e66","text":"
                                                  1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u98de\u4e66\uff0c\u70b9\u51fb \u6dfb\u52a0\u7fa4\u673a\u5668\u4eba\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u7fa4\u673a\u5668\u4eba\u3002

                                                    Note

                                                    \u5f53\u98de\u4e66\u7684\u7fa4\u673a\u5668\u4eba\u5f00\u542f\u7b7e\u540d\u6821\u9a8c\u65f6\uff0c\u6dfb\u52a0\u98de\u4e66\u901a\u77e5\u65f6\u9700\u8981\u586b\u5199\u5bf9\u5e94\u7684\u7b7e\u540d\u5bc6\u94a5\u3002\u8bf7\u67e5\u9605 \u81ea\u5b9a\u4e49\u673a\u5668\u4eba\u4f7f\u7528\u6307\u5357\u3002

                                                  2. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\uff0c\u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u7fa4\u673a\u5668\u4eba\u3002

                                                  "},{"location":"admin/insight/alert-center/message.html#webhook","title":"Webhook","text":"
                                                  1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> Webhook\u3002

                                                    \u6709\u5173 Webhook URL \u53ca\u66f4\u591a\u914d\u7f6e\u65b9\u5f0f\uff0c\u8bf7\u53c2\u9605 webhook \u6587\u6863\u3002

                                                  2. \u70b9\u51fb \u65b0\u5efa Webhook\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a Webhook\u3002

                                                    HTTP Headers\uff1a\u975e\u5fc5\u586b\uff0c\u8bbe\u7f6e\u8bf7\u6c42\u5934\u3002\u53ef\u4ee5\u6dfb\u52a0\u591a\u4e2a Headers\u3002

                                                    Note

                                                    \u6709\u5173 Webhook URL \u53ca\u66f4\u591a\u914d\u7f6e\u65b9\u5f0f\uff0c\u8bf7\u53c2\u9605 webhook \u6587\u6863\u3002

                                                  3. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\uff0c\u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664 Webhook\u3002

                                                  "},{"location":"admin/insight/alert-center/message.html#_6","title":"\u7ad9\u5185\u4fe1","text":"

                                                  Note

                                                  \u544a\u8b66\u6d88\u606f\u53d1\u9001\u81f3\u7528\u6237\u4e2a\u4eba\u7684\u7ad9\u5185\u4fe1\uff0c\u70b9\u51fb\u9876\u90e8\u7684 \ud83d\udd14 \u7b26\u53f7\u53ef\u4ee5\u67e5\u770b\u901a\u77e5\u6d88\u606f\u3002

                                                  1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u7ad9\u5185\u4fe1\uff0c\u70b9\u51fb\u521b\u5efa\u3002

                                                    • \u7ad9\u5185\u4fe1\u901a\u77e5\u5141\u8bb8\u6dfb\u52a0\u591a\u4e2a\u7528\u6237\u3002

                                                  2. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de \u7ad9\u5185\u4fe1\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\u3002

                                                  "},{"location":"admin/insight/alert-center/message.html#_7","title":"\u77ed\u4fe1\u7ec4","text":"
                                                  1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u77ed\u4fe1\uff0c\u70b9\u51fb \u6dfb\u52a0\u77ed\u4fe1\u7ec4\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u77ed\u4fe1\u7ec4\u3002

                                                  2. \u5728\u5f39\u7a97\u4e2d\u8f93\u5165\u540d\u79f0\u3001\u63a5\u6536\u77ed\u4fe1\u7684\u5bf9\u8c61\u3001\u624b\u673a\u53f7\u4ee5\u53ca\u901a\u77e5\u670d\u52a1\u5668\u3002

                                                    \u901a\u77e5\u670d\u52a1\u5668\u9700\u8981\u9884\u5148\u5728 \u901a\u77e5\u914d\u7f6e -> \u901a\u77e5\u670d\u52a1\u5668 \u4e2d\u6dfb\u52a0\u521b\u5efa\u3002\u76ee\u524d\u652f\u6301\u963f\u91cc\u4e91\u3001\u817e\u8baf\u4e91\u4e24\u79cd\u4e91\u670d\u52a1\u5668\uff0c\u5177\u4f53\u914d\u7f6e\u7684\u53c2\u6570\u8bf7\u53c2\u9605\u81ea\u5df1\u7684\u4e91\u670d\u52a1\u5668\u4fe1\u606f\u3002

                                                  3. \u77ed\u4fe1\u7ec4\u6dfb\u52a0\u6210\u529f\u540e\uff0c\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u77ed\u4fe1\u7ec4\u3002

                                                  "},{"location":"admin/insight/alert-center/msg-template.html","title":"\u6d88\u606f\u6a21\u677f","text":"

                                                  \u53ef\u89c2\u6d4b\u6027\u63d0\u4f9b\u81ea\u5b9a\u4e49\u6d88\u606f\u6a21\u677f\u5185\u5bb9\u7684\u80fd\u529b\uff0c\u652f\u6301\u90ae\u4ef6\u3001\u4f01\u4e1a\u5fae\u4fe1\u3001\u9489\u9489\u3001Webhook\u3001\u98de\u4e66\u3001\u7ad9\u5185\u4fe1\u7b49\u4e0d\u540c\u7684\u901a\u77e5\u5bf9\u8c61\u5b9a\u4e49\u4e0d\u540c\u7684\u6d88\u606f\u901a\u77e5\u5185\u5bb9\u3002

                                                  "},{"location":"admin/insight/alert-center/msg-template.html#_2","title":"\u521b\u5efa\u6d88\u606f\u6a21\u677f","text":"
                                                  1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u544a\u8b66\u4e2d\u5fc3 -> \u6d88\u606f\u6a21\u677f\u3002

                                                    Insight \u9ed8\u8ba4\u5185\u7f6e\u4e2d\u82f1\u6587\u4e24\u4e2a\u6a21\u677f\uff0c\u4ee5\u4fbf\u7528\u6237\u4f7f\u7528\u3002

                                                  2. \u70b9\u51fb \u65b0\u5efa\u6d88\u606f\u6a21\u677f \u6309\u94ae\uff0c\u586b\u5199\u6a21\u677f\u5185\u5bb9\u3002

                                                  Info

                                                  \u53ef\u89c2\u6d4b\u6027\u9884\u7f6e\u4e86\u6d88\u606f\u6a21\u677f\u3002\u82e5\u9700\u8981\u5b9a\u4e49\u6a21\u677f\u7684\u5185\u5bb9\uff0c\u8bf7\u53c2\u8003\u914d\u7f6e\u901a\u77e5\u6a21\u677f\u3002

                                                  "},{"location":"admin/insight/alert-center/msg-template.html#_3","title":"\u6d88\u606f\u6a21\u677f\u8be6\u60c5","text":"

                                                  \u70b9\u51fb\u67d0\u4e00\u6d88\u606f\u6a21\u677f\u7684\u540d\u79f0\uff0c\u53f3\u4fa7\u6ed1\u5757\u53ef\u67e5\u770b\u6d88\u606f\u6a21\u677f\u7684\u8be6\u60c5\u3002

                                                  \u53c2\u6570 \u53d8\u91cf \u63cf\u8ff0 \u89c4\u5219\u540d\u79f0 {{ .Labels.alertname }} \u89e6\u53d1\u544a\u8b66\u7684\u89c4\u5219\u540d\u79f0 \u7b56\u7565\u540d\u79f0 {{ .Labels.alertgroup }} \u89e6\u53d1\u544a\u8b66\u89c4\u5219\u6240\u5c5e\u7684\u544a\u8b66\u7b56\u7565\u540d\u79f0 \u544a\u8b66\u7ea7\u522b {{ .Labels.severity }} \u89e6\u53d1\u544a\u8b66\u7684\u7ea7\u522b \u96c6\u7fa4 {{ .Labels.cluster }} \u89e6\u53d1\u544a\u8b66\u7684\u8d44\u6e90\u6240\u5728\u7684\u96c6\u7fa4 \u547d\u540d\u7a7a\u95f4 {{ .Labels.namespace }} \u89e6\u53d1\u544a\u8b66\u7684\u8d44\u6e90\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4 \u8282\u70b9 {{ .Labels.node }} \u89e6\u53d1\u544a\u8b66\u7684\u8d44\u6e90\u6240\u5728\u7684\u8282\u70b9 \u8d44\u6e90\u7c7b\u578b {{ .Labels.target_type }} \u544a\u8b66\u5bf9\u8c61\u7684\u8d44\u6e90\u7c7b\u578b \u8d44\u6e90\u540d\u79f0 {{ .Labels.target }} \u89e6\u53d1\u544a\u8b66\u7684\u5bf9\u8c61\u540d\u79f0 \u89e6\u53d1\u503c {{ .Annotations.value }} \u89e6\u53d1\u544a\u8b66\u901a\u77e5\u65f6\u7684\u6307\u6807\u503c \u53d1\u751f\u65f6\u95f4 {{ .StartsAt }} \u544a\u8b66\u5f00\u59cb\u53d1\u751f\u7684\u65f6\u95f4 \u7ed3\u675f\u65f6\u95f4 {{ .EndsAT }} \u544a\u8b66\u7ed3\u675f\u7684\u65f6\u95f4 \u63cf\u8ff0 {{ .Annotations.description }} \u544a\u8b66\u7684\u8be6\u7ec6\u63cf\u8ff0 \u6807\u7b7e {{ for .labels}} {{end}} \u544a\u8b66\u7684\u6240\u6709\u6807\u7b7e\uff0c\u4f7f\u7528 for \u51fd\u6570\u904d\u5386 labels \u5217\u8868\uff0c\u83b7\u53d6\u544a\u8b66\u7684\u6240\u6709\u6807\u7b7e\u5185\u5bb9\u3002"},{"location":"admin/insight/alert-center/msg-template.html#_4","title":"\u7f16\u8f91\u6216\u5220\u9664\u6d88\u606f\u6a21\u677f","text":"

                                                  \u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507\uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u7f16\u8f91 \u6216 \u5220\u9664\uff0c\u53ef\u4ee5\u4fee\u6539\u6216\u5220\u9664\u6d88\u606f\u6a21\u677f\u3002

                                                  Warning

                                                  \u8bf7\u6ce8\u610f\uff0c\u5220\u9664\u6a21\u677f\u540e\u65e0\u6cd5\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                                                  "},{"location":"admin/insight/alert-center/silent.html","title":"\u544a\u8b66\u9759\u9ed8","text":"

                                                  \u544a\u8b66\u9759\u9ed8\u662f\u6307\u5728\u7279\u5b9a\u7684\u65f6\u95f4\u8303\u56f4\u5185\uff0c\u6839\u636e\u5b9a\u4e49\u597d\u7684\u89c4\u5219\u5bf9\u7b26\u5408\u6761\u4ef6\u7684\u544a\u8b66\u4e0d\u518d\u53d1\u9001\u544a\u8b66\u901a\u77e5\u3002\u8be5\u529f\u80fd\u53ef\u4ee5\u5e2e\u52a9\u8fd0\u7ef4\u4eba\u5458\u907f\u514d\u5728\u67d0\u4e9b\u64cd\u4f5c\u6216\u4e8b\u4ef6\u671f\u95f4\u63a5\u6536\u5230\u8fc7\u591a\u7684\u566a\u58f0\u544a\u8b66\uff0c\u540c\u65f6\u4fbf\u4e8e\u66f4\u52a0\u7cbe\u786e\u5730\u5904\u7406\u771f\u6b63\u9700\u8981\u89e3\u51b3\u7684\u95ee\u9898\u3002

                                                  \u5728\u544a\u8b66\u9759\u9ed8\u9875\u9762\u4e0a\uff0c\u7528\u6237\u53ef\u4ee5\u770b\u5230\u4e24\u4e2a\u9875\u7b7e\uff1a\u6d3b\u8dc3\u89c4\u5219\u548c\u8fc7\u671f\u89c4\u5219\u3002 \u5176\u4e2d\uff0c\u6d3b\u8dc3\u89c4\u5219\u8868\u793a\u76ee\u524d\u6b63\u5728\u751f\u6548\u7684\u89c4\u5219\uff0c\u800c\u8fc7\u671f\u89c4\u5219\u5219\u662f\u4ee5\u524d\u5b9a\u4e49\u8fc7\u4f46\u5df2\u7ecf\u8fc7\u671f\uff08\u6216\u8005\u7528\u6237\u4e3b\u52a8\u5220\u9664\uff09\u7684\u89c4\u5219\u3002

                                                  "},{"location":"admin/insight/alert-center/silent.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u544a\u8b66\u4e2d\u5fc3 -> \u544a\u8b66\u9759\u9ed8 ,\u70b9\u51fb \u65b0\u5efa\u9759\u9ed8\u89c4\u5219 \u6309\u94ae\u3002

                                                  2. \u586b\u5199\u9759\u9ed8\u89c4\u5219\u7684\u5404\u9879\u53c2\u6570\uff0c\u5982\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u6807\u7b7e\u3001\u65f6\u95f4\u7b49\uff0c\u4ee5\u5b9a\u4e49\u8fd9\u6761\u89c4\u5219\u7684\u4f5c\u7528\u8303\u56f4\u548c\u751f\u6548\u65f6\u95f4\u3002

                                                  3. \u8fd4\u56de\u89c4\u5219\u5217\u8868\uff0c\u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u9759\u9ed8\u89c4\u5219\u3002

                                                  \u901a\u8fc7\u544a\u8b66\u9759\u9ed8\u529f\u80fd\uff0c\u60a8\u53ef\u4ee5\u7075\u6d3b\u5730\u63a7\u5236\u54ea\u4e9b\u544a\u8b66\u9700\u8981\u88ab\u5ffd\u7565\uff0c\u5728\u4ec0\u4e48\u65f6\u95f4\u6bb5\u5185\u751f\u6548\uff0c\u4ece\u800c\u63d0\u9ad8\u8fd0\u7ef4\u6548\u7387\uff0c\u51cf\u5c11\u8bef\u62a5\u7684\u53ef\u80fd\u6027\u3002

                                                  "},{"location":"admin/insight/alert-center/sms-provider.html","title":"\u914d\u7f6e\u901a\u77e5\u670d\u52a1\u5668","text":"

                                                  \u53ef\u89c2\u6d4b\u6027 Insight \u652f\u6301\u77ed\u4fe1\u901a\u77e5\uff0c\u76ee\u524d\u901a\u8fc7\u96c6\u6210\u963f\u91cc\u4e91\u3001\u817e\u8baf\u4e91\u7684\u77ed\u4fe1\u670d\u52a1\u53d1\u9001\u544a\u8b66\u6d88\u606f\u3002\u672c\u6587\u4ecb\u7ecd\u4e86\u5982\u4f55\u5728 insight \u4e2d\u914d\u7f6e\u77ed\u4fe1\u901a\u77e5\u7684\u670d\u52a1\u5668\u3002\u77ed\u4fe1\u7b7e\u540d\u4e2d\u652f\u6301\u7684\u53d8\u91cf\u4e3a\u6d88\u606f\u6a21\u677f\u4e2d\u7684\u9ed8\u8ba4\u53d8\u91cf\uff0c\u540c\u65f6\u7531\u4e8e\u77ed\u4fe1\u5b57\u6570\u6709\u9650\uff0c\u5efa\u8bae\u9009\u62e9\u8f83\u4e3a\u660e\u786e\u7684\u53d8\u91cf\u3002

                                                  \u5982\u4f55\u914d\u7f6e\u77ed\u4fe1\u63a5\u6536\u4eba\u53ef\u53c2\u8003\u6587\u6863\uff1a\u914d\u7f6e\u77ed\u4fe1\u901a\u77e5\u7ec4\u3002

                                                  "},{"location":"admin/insight/alert-center/sms-provider.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u8fdb\u5165 \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u901a\u77e5\u670d\u52a1\u5668 \u3002

                                                  2. \u70b9\u51fb \u6dfb\u52a0\u901a\u77e5\u670d\u52a1\u5668 \u3002

                                                    1. \u914d\u7f6e\u963f\u91cc\u4e91\u670d\u52a1\u5668\u3002

                                                      \u7533\u8bf7\u963f\u91cc\u4e91\u77ed\u4fe1\u670d\u52a1\uff0c\u8bf7\u53c2\u8003\u963f\u91cc\u4e91\u77ed\u4fe1\u670d\u52a1\u3002

                                                      \u5b57\u6bb5\u8bf4\u660e\uff1a

                                                      • AccessKey ID \uff1a\u963f\u91cc\u4e91\u7528\u4e8e\u6807\u8bc6\u7528\u6237\u7684\u53c2\u6570\u3002
                                                      • AccessKey Secret \uff1a\u963f\u91cc\u4e91\u7528\u4e8e\u9a8c\u8bc1\u7528\u6237\u7684\u5bc6\u94a5\u3002AccessKey Secret \u5fc5\u987b\u4fdd\u5bc6\u3002
                                                      • \u77ed\u4fe1\u7b7e\u540d \uff1a\u77ed\u4fe1\u670d\u52a1\u652f\u6301\u6839\u636e\u7528\u6237\u9700\u6c42\u521b\u5efa\u7b26\u5408\u8981\u6c42\u7684\u7b7e\u540d\u3002\u53d1\u9001\u77ed\u4fe1\u65f6\uff0c\u77ed\u4fe1\u5e73\u53f0\u4f1a\u5c06\u5df2\u5ba1\u6838\u901a\u8fc7\u7684\u77ed\u4fe1\u7b7e\u540d\u6dfb\u52a0\u5230\u77ed\u4fe1\u5185\u5bb9\u4e2d\uff0c\u518d\u53d1\u9001\u7ed9\u77ed\u4fe1\u63a5\u6536\u65b9\u3002
                                                      • \u6a21\u677f CODE \uff1a\u77ed\u4fe1\u6a21\u677f\u662f\u53d1\u9001\u77ed\u4fe1\u7684\u5177\u4f53\u5185\u5bb9\u3002
                                                      • \u53c2\u6570\u6a21\u677f \uff1a\u77ed\u4fe1\u6b63\u6587\u6a21\u677f\u53ef\u4ee5\u5305\u542b\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u901a\u8fc7\u53d8\u91cf\u5b9e\u73b0\u81ea\u5b9a\u4e49\u77ed\u4fe1\u5185\u5bb9\u3002

                                                      \u8bf7\u53c2\u8003\u963f\u91cc\u4e91\u53d8\u91cf\u89c4\u8303\u3002

                                                      Note

                                                      \u4e3e\u4f8b\uff1a\u5728\u963f\u91cc\u4e91\u5b9a\u4e49\u7684\u6a21\u677f\u5185\u5bb9\u4e3a\uff1a\\({severity}\uff1a\\) \u88ab\u89e6\u53d1\u3002\u53c2\u6570\u6a21\u677f\u4e2d\u7684\u914d\u7f6e\u53c2\u8003\u4e0a\u56fe\u3002} \u5728 ${startat

                                                    2. \u914d\u7f6e\u817e\u8baf\u4e91\u670d\u52a1\u5668\u3002

                                                      \u7533\u8bf7\u817e\u8baf\u4e91\u77ed\u4fe1\u670d\u52a1\uff0c\u8bf7\u53c2\u8003\u817e\u8baf\u4e91\u77ed\u4fe1\u3002

                                                      \u5b57\u6bb5\u8bf4\u660e\uff1a

                                                      • Secret ID \uff1a\u817e\u8baf\u4e91\u7528\u4e8e\u6807\u8bc6 API \u8c03\u7528\u8005\u8eab\u4efd\u53c2\u6570\u3002
                                                      • SecretKey \uff1a\u817e\u8baf\u4e91\u7528\u4e8e\u9a8c\u8bc1 API \u8c03\u7528\u8005\u7684\u8eab\u4efd\u7684\u53c2\u6570\u3002
                                                      • \u77ed\u4fe1\u6a21\u677f ID \uff1a\u77ed\u4fe1\u6a21\u677f ID\uff0c\u7531\u817e\u8baf\u4e91\u7cfb\u7edf\u81ea\u52a8\u751f\u6210\u3002
                                                      • \u7b7e\u540d\u5185\u5bb9 \uff1a\u77ed\u4fe1\u7b7e\u540d\u5185\u5bb9\uff0c\u5373\u5728\u817e\u8baf\u4e91\u77ed\u4fe1\u7b7e\u540d\u4e2d\u5b9a\u4e49\u7684\u5b9e\u9645\u7f51\u7ad9\u540d\u7684\u5168\u79f0\u6216\u7b80\u79f0\u3002
                                                      • SdkAppId \uff1a\u77ed\u4fe1 SdkAppId\uff0c\u5728\u817e\u8baf\u4e91\u77ed\u4fe1\u63a7\u5236\u53f0\u6dfb\u52a0\u5e94\u7528\u540e\u751f\u6210\u7684\u5b9e\u9645 SdkAppId\u3002
                                                      • \u53c2\u6570\u6a21\u677f \uff1a\u77ed\u4fe1\u6b63\u6587\u6a21\u677f\u53ef\u4ee5\u5305\u542b\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u901a\u8fc7\u53d8\u91cf\u5b9e\u73b0\u81ea\u5b9a\u4e49\u77ed\u4fe1\u5185\u5bb9\u3002\u8bf7\u53c2\u8003\uff1a\u817e\u8baf\u4e91\u53d8\u91cf\u89c4\u8303\u3002

                                                      Note

                                                      \u4e3e\u4f8b\uff1a\u5728\u817e\u8baf\u4e91\u5b9a\u4e49\u7684\u6a21\u677f\u5185\u5bb9\u4e3a\uff1a{1}\uff1a{2} \u5728 {3} \u88ab\u89e6\u53d1\u3002\u53c2\u6570\u6a21\u677f\u4e2d\u7684\u914d\u7f6e\u53c2\u8003\u4e0a\u56fe\u3002

                                                  "},{"location":"admin/insight/best-practice/debug-log.html","title":"\u65e5\u5fd7\u91c7\u96c6\u6392\u969c\u6307\u5357","text":"

                                                  \u5728\u96c6\u7fa4\u4e2d\u5b89\u88c5 insight-agent \u540e\uff0c insight-agent \u4e2d\u7684 Fluent Bit \u4f1a\u9ed8\u8ba4\u91c7\u96c6\u96c6\u7fa4\u4e2d\u7684\u65e5\u5fd7\uff0c\u5305\u62ec Kubernetes \u4e8b\u4ef6\u65e5\u5fd7\u3001\u8282\u70b9\u65e5\u5fd7\u3001\u5bb9\u5668\u65e5\u5fd7\u7b49\u3002 Fluent Bit \u5df2\u914d\u7f6e\u597d\u5404\u79cd\u65e5\u5fd7\u91c7\u96c6\u63d2\u4ef6\u3001\u76f8\u5173\u7684\u8fc7\u6ee4\u5668\u63d2\u4ef6\u53ca\u65e5\u5fd7\u8f93\u51fa\u63d2\u4ef6\u3002 \u8fd9\u4e9b\u63d2\u4ef6\u7684\u5de5\u4f5c\u72b6\u6001\u51b3\u5b9a\u4e86\u65e5\u5fd7\u91c7\u96c6\u662f\u5426\u6b63\u5e38\u3002 \u4e0b\u9762\u662f\u4e00\u4e2a\u9488\u5bf9 Fluent Bit \u7684\u4eea\u8868\u76d8\uff0c\u5b83\u7528\u6765\u76d1\u63a7\u5404\u4e2a\u96c6\u7fa4\u4e2d Fluent Bit \u7684\u5de5\u4f5c\u60c5\u51b5\u548c\u63d2\u4ef6\u7684\u91c7\u96c6\u3001\u5904\u7406\u3001\u5bfc\u51fa\u65e5\u5fd7\u7684\u60c5\u51b5\u3002

                                                  1. \u4f7f\u7528 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\uff0c\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \uff0c\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u4eea\u8868\u76d8 \u3002

                                                  2. \u70b9\u51fb\u4eea\u8868\u76d8\u6807\u9898 \u6982\u89c8 \u3002

                                                  3. \u5207\u6362\u5230 insight-system -> Fluent Bit \u4eea\u8868\u76d8\u3002

                                                  4. Fluent Bit \u4eea\u8868\u76d8\u4e0a\u65b9\u6709\u51e0\u4e2a\u9009\u9879\u6846\uff0c\u53ef\u4ee5\u9009\u62e9\u65e5\u5fd7\u91c7\u96c6\u63d2\u4ef6\u3001\u65e5\u5fd7\u8fc7\u6ee4\u63d2\u4ef6\u3001\u65e5\u5fd7\u8f93\u51fa\u63d2\u4ef6\u53ca\u6240\u5728\u96c6\u7fa4\u540d\u3002

                                                  "},{"location":"admin/insight/best-practice/debug-log.html#_2","title":"\u63d2\u4ef6\u8bf4\u660e","text":"

                                                  \u6b64\u5904\u8bf4\u660e Fluent Bit \u7684\u51e0\u4e2a\u63d2\u4ef6\u3002

                                                  \u65e5\u5fd7\u91c7\u96c6\u63d2\u4ef6

                                                  input plugin \u63d2\u4ef6\u4ecb\u7ecd \u91c7\u96c6\u76ee\u5f55 tail.kube \u91c7\u96c6\u5bb9\u5668\u65e5\u5fd7 /var/log/containers/*.log tail.kubeevent \u91c7\u96c6 Kubernetes \u4e8b\u4ef6\u65e5\u5fd7 /var/log/containers/-kubernetes-event-exporter.log tail.syslog.dmesg \u91c7\u96c6\u4e3b\u673a dmesg \u65e5\u5fd7 /var/log/dmesg tail.syslog.messages \u91c7\u96c6\u4e3b\u673a\u5e38\u7528\u65e5\u5fd7 /var/log/secure, /var/log/messages, /var/log/syslog,/var/log/auth.log syslog.syslog.RSyslog \u91c7\u96c6 RSyslog \u65e5\u5fd7 systemd.syslog.systemd \u91c7\u96c6 Journald daemon \u65e5\u5fd7 tail.audit_log.k8s \u91c7\u96c6 Kubernetes \u5ba1\u8ba1\u65e5\u5fd7 /var/log//audit/.log tail.audit_log.ghippo \u91c7\u96c6\u5168\u5c40\u7ba1\u7406\u5ba1\u8ba1\u65e5\u5fd7 /var/log/containers/_ghippo-system_audit-log.log tail.skoala-gw \u91c7\u96c6\u5fae\u670d\u52a1\u7f51\u5173\u65e5\u5fd7 /var/log/containers/_skoala-gw.log

                                                  \u65e5\u5fd7\u8fc7\u6ee4\u63d2\u4ef6

                                                  filter plugin \u63d2\u4ef6\u4ecb\u7ecd Lua.audit_log.k8s \u4f7f\u7528 lua \u8fc7\u6ee4\u7b26\u5408\u6761\u4ef6\u7684 Kubernetes \u5ba1\u8ba1\u65e5\u5fd7

                                                  Note

                                                  \u8fc7\u6ee4\u5668\u63d2\u4ef6\u4e0d\u6b62 Lua.audit_log.k8s\uff0c\u8fd9\u91cc\u53ea\u4ecb\u7ecd\u4f1a\u4e22\u5f03\u65e5\u5fd7\u7684\u8fc7\u6ee4\u5668\u3002

                                                  \u65e5\u5fd7\u8f93\u51fa\u63d2\u4ef6

                                                  output plugin \u63d2\u4ef6\u4ecb\u7ecd es.kube.kubeevent.syslog \u628a Kubernetes \u5ba1\u8ba1\u65e5\u5fd7\u3001\u4e8b\u4ef6\u65e5\u5fd7\uff0csyslog \u65e5\u5fd7\u5199\u5165 ElasticSearch \u96c6\u7fa4 forward.audit_log \u628a Kubernetes \u5ba1\u8ba1\u65e5\u5fd7\u548c\u5168\u5c40\u7ba1\u7406\u7684\u5ba1\u8ba1\u65e5\u5fd7\u53d1\u9001\u5230 \u5168\u5c40\u7ba1\u7406"},{"location":"admin/insight/best-practice/debug-trace.html","title":"\u94fe\u8def\u91c7\u96c6\u6392\u969c\u6307\u5357","text":"

                                                  \u5728\u5c1d\u8bd5\u6392\u67e5\u94fe\u8def\u6570\u636e\u91c7\u96c6\u7684\u95ee\u9898\u524d\uff0c\u9700\u5148\u7406\u89e3\u94fe\u8def\u6570\u636e\u7684\u4f20\u8f93\u8def\u5f84\uff0c\u4e0b\u9762\u662f\u94fe\u8def\u6570\u636e\u4f20\u8f93\u793a\u610f\u56fe\uff1a

                                                  graph TB\n\nsdk[Language proble / SDK] --> workload[Workload cluster otel collector]\n--> otel[Global cluster otel collector]\n--> jaeger[Global cluster jaeger collector]\n--> es[Elasticsearch cluster]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill:#326ce5,stroke:#fff,stroke-width:1px,color:#fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass sdk,workload,otel,jaeger,es cluster

                                                  \u5982\u4e0a\u56fe\u6240\u793a\uff0c\u5728\u4efb\u4e00\u6b65\u9aa4\u4f20\u8f93\u5931\u8d25\u90fd\u4f1a\u5bfc\u81f4\u65e0\u6cd5\u67e5\u8be2\u51fa\u94fe\u8def\u6570\u636e\u3002\u5982\u679c\u60a8\u5728\u5b8c\u6210\u5e94\u7528\u94fe\u8def\u589e\u5f3a\u540e\u53d1\u73b0\u6ca1\u6709\u94fe\u8def\u6570\u636e\uff0c\u8bf7\u6267\u884c\u4ee5\u4e0b\u6b65\u9aa4\uff1a

                                                  1. \u4f7f\u7528 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\uff0c\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \uff0c\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u4eea\u8868\u76d8 \u3002

                                                  2. \u70b9\u51fb\u4eea\u8868\u76d8\u6807\u9898 \u6982\u89c8 \u3002

                                                  3. \u5207\u6362\u5230 insight-system -> insight tracing debug \u4eea\u8868\u76d8\u3002

                                                  4. \u53ef\u4ee5\u770b\u5230\u8be5\u4eea\u8868\u76d8\u7531\u4e09\u4e2a\u533a\u5757\u7ec4\u6210\uff0c\u5206\u522b\u8d1f\u8d23\u76d1\u63a7\u4e0d\u540c\u96c6\u7fa4\u3001\u4e0d\u540c\u7ec4\u4ef6\u4f20\u8f93\u94fe\u8def\u7684\u6570\u636e\u60c5\u51b5\u3002\u901a\u8fc7\u751f\u6210\u7684\u65f6\u5e8f\u56fe\u8868\uff0c\u68c0\u67e5\u94fe\u8def\u6570\u636e\u4f20\u8f93\u662f\u5426\u5b58\u5728\u95ee\u9898\u3002

                                                    • workload opentelemetry collector
                                                    • global opentelemetry collector
                                                    • global jaeger collector

                                                  "},{"location":"admin/insight/best-practice/debug-trace.html#_2","title":"\u533a\u5757\u4ecb\u7ecd","text":"
                                                  1. workload opentelemetry collector

                                                    \u5c55\u793a\u4e0d\u540c\u5de5\u4f5c\u96c6\u7fa4\u7684 opentelemetry collector \u5728\u63a5\u53d7 language probe/SDK \u94fe\u8def\u6570\u636e\uff0c\u53d1\u9001\u805a\u5408\u94fe\u8def\u6570\u636e\u60c5\u51b5\u3002\u53ef\u4ee5\u901a\u8fc7\u5de6\u4e0a\u89d2\u7684 Cluster \u9009\u62e9\u6846\u9009\u62e9\u6240\u5728\u7684\u96c6\u7fa4\u3002

                                                    Note

                                                    \u6839\u636e\u8fd9\u56db\u5f20\u65f6\u5e8f\u56fe\uff0c\u53ef\u4ee5\u5224\u65ad\u51fa\u8be5\u96c6\u7fa4\u7684 opentelemetry collector \u662f\u5426\u6b63\u5e38\u8fd0\u884c\u3002

                                                  2. global opentelemetry collector

                                                    \u5c55\u793a \u5168\u5c40\u670d\u52a1\u96c6\u7fa4 \u7684 opentelemetry collector \u5728\u63a5\u6536 \u5de5\u4f5c\u96c6\u7fa4 \u4e2d otel collector \u94fe\u8def\u6570\u636e\u4ee5\u53ca\u53d1\u9001\u805a\u5408\u94fe\u8def\u6570\u636e\u7684\u60c5\u51b5\u3002

                                                    Note

                                                    \u5168\u5c40\u670d\u52a1\u96c6\u7fa4 \u7684 opentelemetry collector \u8fd8\u8d1f\u8d23\u53d1\u9001\u6240\u6709\u5de5\u4f5c\u96c6\u7fa4\u7684\u5168\u5c40\u7ba1\u7406\u6a21\u5757\u7684\u5ba1\u8ba1\u65e5\u5fd7\u4ee5\u53ca Kubernetes \u5ba1\u8ba1\u65e5\u5fd7\uff08\u9ed8\u8ba4\u4e0d\u91c7\u96c6\uff09\u5230\u5168\u5c40\u7ba1\u7406\u6a21\u5757\u7684 audit server \u7ec4\u4ef6\u3002

                                                  3. global jaeger collector

                                                    \u5c55\u793a \u5168\u5c40\u670d\u52a1\u96c6\u7fa4 \u7684 jaeger collector \u5728\u63a5\u6536 \u5168\u5c40\u670d\u52a1\u96c6\u7fa4 \u4e2d otel collector \u7684\u6570\u636e\uff0c\u5e76\u53d1\u9001\u94fe\u8def\u6570\u636e\u5230 ElasticSearch \u96c6\u7fa4\u7684\u60c5\u51b5\u3002

                                                  "},{"location":"admin/insight/best-practice/find_root_cause.html","title":"\u4f7f\u7528 Insight \u5b9a\u4f4d\u5e94\u7528\u5f02\u5e38","text":"

                                                  \u672c\u6587\u5c06\u4ee5 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u4e3e\u4f8b\uff0c\u8bb2\u89e3\u5982\u4f55\u901a\u8fc7 Insight \u53d1\u73b0 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u5f02\u5e38\u7684\u7ec4\u4ef6\u5e76\u5206\u6790\u51fa\u7ec4\u4ef6\u5f02\u5e38\u7684\u6839\u56e0\u3002

                                                  \u672c\u6587\u5047\u8bbe\u4f60\u5df2\u7ecf\u4e86\u89e3 Insight \u7684\u4ea7\u54c1\u529f\u80fd\u6216\u613f\u666f\u3002

                                                  "},{"location":"admin/insight/best-practice/find_root_cause.html#_1","title":"\u62d3\u6251\u56fe \u2014 \u4ece\u5b8f\u89c2\u5bdf\u89c9\u5f02\u5e38","text":"

                                                  \u968f\u7740\u4f01\u4e1a\u5bf9\u5fae\u670d\u52a1\u67b6\u6784\u7684\u5b9e\u8df5\uff0c\u4f01\u4e1a\u4e2d\u7684\u670d\u52a1\u6570\u91cf\u53ef\u80fd\u4f1a\u9762\u4e34\u7740\u6570\u91cf\u591a\u3001\u8c03\u7528\u590d\u6742\u7684\u60c5\u51b5\uff0c\u5f00\u53d1\u6216\u8fd0\u7ef4\u4eba\u5458\u5f88\u96be\u7406\u6e05\u670d\u52a1\u4e4b\u95f4\u7684\u5173\u7cfb\uff0c \u56e0\u6b64\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86\u62d3\u6251\u56fe\u76d1\u63a7\u7684\u529f\u80fd\uff0c\u6211\u4eec\u53ef\u4ee5\u901a\u8fc7\u62d3\u6251\u56fe\u5bf9\u5f53\u524d\u7cfb\u7edf\u4e2d\u8fd0\u884c\u7684\u5fae\u670d\u52a1\u72b6\u51b5\u8fdb\u884c\u521d\u6b65\u8bca\u65ad\u3002

                                                  \u5982\u4e0b\u56fe\u6240\u793a\uff0c\u6211\u4eec\u901a\u8fc7\u62d3\u6251\u56fe\u53d1\u73b0\u5176\u4e2d Insight-Server \u8fd9\u4e2a\u8282\u70b9\u7684\u989c\u8272\u4e3a \u7ea2\u8272 \uff0c\u5e76\u5c06\u9f20\u6807\u79fb\u5230\u8be5\u8282\u70b9\u4e0a\uff0c \u53d1\u73b0\u8be5\u8282\u70b9\u7684\u9519\u8bef\u7387\u4e3a 2.11% \u3002\u56e0\u6b64\uff0c\u6211\u4eec\u5e0c\u671b\u67e5\u770b\u66f4\u591a\u7ec6\u8282\u53bb\u627e\u5230\u9020\u6210\u8be5\u670d\u52a1\u9519\u8bef\u7387\u4e0d\u4e3a 0 \u7684\u539f\u56e0:

                                                  \u5f53\u7136\uff0c\u6211\u4eec\u4e5f\u53ef\u4ee5\u70b9\u51fb\u6700\u9876\u90e8\u7684\u670d\u52a1\u540d\uff0c\u8fdb\u5165\u5230\u8be5\u670d\u52a1\u7684\u603b\u89c8\u754c\u9762\uff1a

                                                  "},{"location":"admin/insight/best-practice/find_root_cause.html#_2","title":"\u670d\u52a1\u603b\u89c8 \u2014 \u5177\u4f53\u5206\u6790\u7684\u5f00\u59cb","text":"

                                                  \u5f53\u4f60\u9700\u8981\u6839\u636e\u670d\u52a1\u7684\u5165\u53e3\u548c\u51fa\u53e3\u6d41\u91cf\u5206\u522b\u5206\u6790\u7684\u65f6\u5019\uff0c\u4f60\u53ef\u4ee5\u5728\u53f3\u4e0a\u89d2\u8fdb\u884c\u7b5b\u9009\u5207\u6362\uff0c\u7b5b\u9009\u6570\u636e\u4e4b\u540e\uff0c\u6211\u4eec\u53d1\u73b0\u8be5\u670d\u52a1\u6709\u5f88\u591a \u64cd\u4f5c \u5bf9\u5e94\u7684\u9519\u8bef\u7387\u90fd\u4e0d\u4e3a 0. \u6b64\u65f6\uff0c\u6211\u4eec\u53ef\u4ee5\u901a\u8fc7\u70b9\u51fb \u67e5\u770b\u94fe\u8def \u5bf9\u8be5 \u64cd\u4f5c \u5728\u8fd9\u6bb5\u65f6\u95f4\u4ea7\u751f\u7684\u5e76\u8bb0\u5f55\u4e0b\u6765\u7684\u94fe\u8def\u8fdb\u884c\u5206\u6790\uff1a

                                                  "},{"location":"admin/insight/best-practice/find_root_cause.html#_3","title":"\u94fe\u8def\u8be6\u60c5 \u2014 \u627e\u5230\u9519\u8bef\u6839\u56e0\uff0c\u6d88\u706d\u5b83\u4eec","text":"

                                                  \u5728\u94fe\u8def\u5217\u8868\u4e2d\uff0c\u6211\u4eec\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u76f4\u89c2\u5730\u53d1\u73b0\u94fe\u8def\u5217\u8868\u4e2d\u5b58\u5728\u7740 \u9519\u8bef \u7684\u94fe\u8def\uff08\u4e0a\u56fe\u4e2d\u7ea2\u6846\u5708\u8d77\u6765\u7684\uff09\uff0c\u6211\u4eec\u53ef\u4ee5\u70b9\u51fb\u9519\u8bef\u7684\u94fe\u8def\u67e5\u770b\u94fe\u8def\u8be6\u60c5\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff1a

                                                  \u5728\u94fe\u8def\u56fe\u4e2d\u6211\u4eec\u4e5f\u53ef\u4ee5\u4e00\u773c\u5c31\u53d1\u73b0\u94fe\u8def\u7684\u6700\u540e\u4e00\u6761\u6570\u636e\u662f\u5904\u4e8e \u9519\u8bef \u72b6\u6001\uff0c\u5c06\u5176\u53f3\u8fb9 Logs \u5c55\u5f00\uff0c\u6211\u4eec\u5b9a\u4f4d\u5230\u4e86\u9020\u6210\u8fd9\u6b21\u8bf7\u6c42\u9519\u8bef\u7684\u539f\u56e0\uff1a

                                                  \u6839\u636e\u4e0a\u9762\u7684\u5206\u6790\u65b9\u6cd5\uff0c\u6211\u4eec\u4e5f\u53ef\u4ee5\u5b9a\u4f4d\u5230\u5176\u4ed6 \u64cd\u4f5c \u9519\u8bef\u7684\u94fe\u8def\uff1a

                                                  "},{"location":"admin/insight/best-practice/find_root_cause.html#_4","title":"\u63a5\u4e0b\u6765 \u2014 \u4f60\u6765\u5206\u6790\uff01","text":""},{"location":"admin/insight/best-practice/grafana-use-db.html","title":"Insight Grafana \u6301\u4e45\u5316\u5230\u6570\u636e\u5e93","text":"

                                                  Insight \u4f7f\u7528\u4e91\u539f\u751f\u7684 GrafanaOperator + CRD \u7684\u65b9\u5f0f\u6765\u4f7f\u7528 Grafana\u3002\u6211\u4eec\u63a8\u8350\u4f7f\u7528 GrafanaDashboard(CRD) \u6765\u63cf\u8ff0\u4eea\u8868\u76d8\u7684 JSON \u6570\u636e\uff0c\u5373\u901a\u8fc7 GrafanaDashboard \u6765\u589e\u52a0\u3001\u5220\u9664\u3001\u4fee\u6539\u4eea\u8868\u76d8\u3002

                                                  \u56e0\u4e3a Grafana \u9ed8\u8ba4\u4f7f\u7528 SQLite3 \u4f5c\u4e3a\u672c\u5730\u6570\u636e\u5e93\u6765\u5b58\u50a8\u914d\u7f6e\u4fe1\u606f\uff0c\u4f8b\u5982\u7528\u6237\u3001\u4eea\u8868\u76d8\u3001\u544a\u8b66\u7b49\u3002 \u5f53\u7528\u6237\u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd\uff0c\u901a\u8fc7 UI \u521b\u5efa\u6216\u8005\u5bfc\u5165\u4eea\u8868\u76d8\u4e4b\u540e\uff0c\u6570\u636e\u5c06\u4e34\u65f6\u5b58\u50a8\u5728 SQLite3 \u4e2d\u3002 \u5f53 Grafana \u91cd\u542f\u4e4b\u540e\uff0c\u5c06\u91cd\u7f6e\u6240\u6709\u7684\u4eea\u8868\u76d8\u7684\u6570\u636e\uff0c\u5c06\u53ea\u5c55\u793a\u901a\u8fc7 GrafanaDashboard CR \u63cf\u8ff0\u7684\u4eea\u8868\u76d8\u6570\u636e\uff0c\u800c\u901a\u8fc7 UI \u521b\u5efa\uff0c\u5220\u9664\uff0c\u4fee\u6539\u4e5f\u90fd\u5c06\u88ab\u5168\u90e8\u91cd\u7f6e\u3002

                                                  Grafana \u652f\u6301\u4f7f\u7528\u5916\u90e8\u7684 MySQL\u3001PostgreSQL \u7b49\u6570\u636e\u5e93\u66ff\u4ee3\u5185\u7f6e\u7684 SQLite3 \u4f5c\u4e3a\u5185\u90e8\u5b58\u50a8\u3002\u672c\u6587\u63cf\u8ff0\u4e86\u5982\u679c\u7ed9 Insight \u63d0\u4f9b\u7684 Grafana \u914d\u7f6e\u5916\u7f6e\u7684\u6570\u636e\u5e93\u3002

                                                  "},{"location":"admin/insight/best-practice/grafana-use-db.html#_1","title":"\u4f7f\u7528\u5916\u90e8\u6570\u636e\u5e93","text":"

                                                  \u7ed3\u5408 Grafana\uff08\u5f53\u524d\u955c\u50cf\u7248\u672c 9.3.14\uff09\u7684\u5b98\u65b9\u6587\u6863\u3002\u6839\u636e\u5982\u4e0b\u6b65\u9aa4\u914d\u7f6e\u4f7f\u7528\u5916\u90e8\u7684\u6570\u636e\u5e93\uff0c\u793a\u4f8b\u4ee5 MySQL \u4e3a\u4f8b\uff1a

                                                  1. \u5728\u5916\u90e8\u6570\u636e\u5e93\uff08MySQL /PostgreSQL\uff09\u4e2d\u521b\u5efa\u4e00\u4e2a\u6570\u636e\u5e93\uff08DB\uff09\u3002
                                                  2. \u914d\u7f6e Grafana \u4f7f\u7528\u8fd9\u4e2a\u6570\u636e\u5e93\uff08MySQL \u7684 MGR \u6a21\u5f0f\u9700\u8981\u989d\u5916\u5904\u7406\uff09\u3002
                                                  "},{"location":"admin/insight/best-practice/grafana-use-db.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u521d\u59cb\u5316\u6570\u636e\u5e93

                                                    \u5728\u6570\u636e\u5e93\u4e2d\u521b\u5efa\u4e00\u4e2a\u65b0\u7684 database \u7ed9 Grafana \u4f7f\u7528\uff0c\u5efa\u8bae\u540d\u79f0\u4e3a grafana

                                                  2. \u914d\u7f6e Grafana \u4f7f\u7528 DB

                                                    \u5728 insight-system \u4e0b\uff0c\u540d\u4e3a insight-grafana-operator-grafana \u7684 Grafana \u7684 CR \u91cc\u7684\u914d\u7f6e\uff1a

                                                    apiVersion: integreatly.org/v1alpha1\nkind: Grafana\nmetadata:\n  name: insight-grafana-operator-grafana\n  namespace: insight-system\nspec:\n  baseImage: 10.64.40.50/docker.m.daocloud.io/grafana/grafana:9.3.14\n  config:\n    // \u5728 config \u7684\u5c3e\u90e8\u8ffd\u52a0\n+   database:\n+     type: mysql # \u652f\u6301 mysql, postgres\n+     host: \"10.6.216.101:30782\" # \u6570\u636e\u5e93\u7684 Endpoint\n+     name: \"grafana\"  # \u63d0\u524d\u521b\u5efa\u7684 database\n+     user: \"grafana\"\n+     password: \"grafana_password\"\n
                                                  3. \u5982\u4e0b\u662f\u914d\u7f6e\u5b8c\u6210\u540e\u5728 Grafana \u7684\u914d\u7f6e\u6587\u4ef6 grafana-config \u91cc\u7684\u914d\u7f6e\u4fe1\u606f\u3002

                                                    [database]\n  host = 10.6.216.101:30782\n  name = grafana\n  password = grafana_password\n  type = mysql\n  user = grafana\n
                                                    1. \u5728 insight.yaml \u6dfb\u52a0\u5982\u4e0b\u914d\u7f6e\uff1a

                                                      grafana-operator:\n  grafana:\n    config:\n      database:\n        type: mysql\n        host: \"10.6.216.101:30782\"\n        name: \"grafana\"\n        user: \"grafana\"\n        password: \"grafana_password\"\n
                                                    2. \u5347\u7ea7 insight server\uff0c\u5efa\u8bae\u901a\u8fc7 Helm \u5347\u7ea7\u3002

                                                      helm upgrade insight insight/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --version ${version}\n
                                                  4. \u901a\u8fc7\u547d\u4ee4\u884c\u8fdb\u884c\u5347\u7ea7\u3002

                                                    1. \u83b7\u53d6 insight Helm \u4e2d\u539f\u6765\u7684\u914d\u7f6e\u3002

                                                      helm get values insight -n insight-system -o yaml > insight.yaml\n
                                                    2. \u6307\u5b9a\u539f\u6765\u914d\u7f6e\u6587\u4ef6\u5e76\u4fdd\u5b58 grafana \u6570\u636e\u5e93\u7684\u8fde\u63a5\u4fe1\u606f\u3002

                                                      helm upgrade --install \\\n    --version ${version} \\\n    insight insight/insight -n insight-system \\\n    -f ./insight.yaml \\\n    --set grafana-operator.grafana.config.database.type=mysql \\\n    --set grafana-operator.grafana.config.database.host=10.6.216.101:30782 \\\n    --set grafana-operator.grafana.config.database.name=grafana \\\n    --set grafana-operator.grafana.config.database.user=grafana \\\n    --set grafana-operator.grafana.config.database.password=grafana_password \n
                                                  "},{"location":"admin/insight/best-practice/grafana-use-db.html#_3","title":"\u6ce8\u610f\u4e8b\u9879","text":"
                                                  1. \u7528\u6237\u662f\u5426\u4f1a\u8986\u76d6\u5185\u7f6e\u4eea\u8868\u76d8\uff0c\u5bfc\u81f4\u5347\u7ea7\u5931\u8d25\uff1f

                                                    \u56de\u590d\uff1a\u4f1a\u3002\u5f53\u7528\u6237\u7f16\u8f91\u4e86 Dashbaord A\uff08v1.1\uff09\uff0c\u4e14 Insight \u4e5f\u5347\u7ea7\u4e86 Dashboard A\uff08v2.0\uff09\uff0c \u5347\u7ea7\u4e4b\u540e\uff08\u5347\u7ea7\u955c\u50cf\uff09\uff1b\u7528\u6237\u770b\u5230\u5185\u5bb9\u8fd8\u662f v1.1\uff0c\u800c v2.0 \u662f\u4e0d\u4f1a\u66f4\u65b0\u5230\u73af\u5883\u91cc\u3002

                                                  2. \u5f53\u4f7f\u7528 MGR \u6a21\u5f0f MySQL \u65f6\u4f1a\u5b58\u5728\u95ee\u9898\uff0c\u5bfc\u81f4 grafana-deployment \u65e0\u6cd5\u6b63\u5e38\u542f\u52a8\u3002

                                                    \u539f\u56e0\uff1a\u8868 alert_rule_tag_v1 \u548c annotation_tag_v2 \u4e2d\u6ca1\u6709\u4e3b\u952e\uff0c\u800c mysql mgr \u5fc5\u987b\u6709\u4e3b\u952e

                                                    \u89e3\u51b3\u65b9\u6cd5\uff1a\u5411 alert_rule_tag_v1 \u548c annotation_tag_v2 \u4e34\u65f6\u8868\u6dfb\u52a0\u4e3b\u952e\uff1a

                                                    alter table alert_rule_tag_v1\n    add constraint alert_rule_tag_v1_pk\n        primary key (tag_id, alert_id);\n\nalter table annotation_tag_v2\n    add constraint annotation_tag_v2_pk\n        primary key (tag_id, annotation_id);\n
                                                  "},{"location":"admin/insight/best-practice/insight-kafka.html","title":"Kafka + Elasticsearch \u6d41\u5f0f\u67b6\u6784\u5e94\u5bf9\u8d85\u5927\u89c4\u6a21\u65e5\u5fd7\u65b9\u6848","text":"

                                                  \u968f\u7740\u4e1a\u52a1\u53d1\u5c55\uff0c\u8d8a\u6765\u8d8a\u591a\u7684\u5e94\u7528\u4ea7\u751f\u7684\u65e5\u5fd7\u6570\u636e\u4f1a\u8d8a\u6765\u8d8a\u591a\uff0c\u4e3a\u4e86\u4fdd\u8bc1\u7cfb\u7edf\u80fd\u591f\u6b63\u5e38\u91c7\u96c6\u5e76\u5206\u6790\u5e9e\u6742\u7684\u65e5\u5fd7\u6570\u636e\u65f6\uff0c \u4e00\u822c\u505a\u6cd5\u662f\u5f15\u5165 Kafka \u7684\u6d41\u5f0f\u67b6\u6784\u6765\u89e3\u51b3\u5927\u91cf\u6570\u636e\u5f02\u6b65\u91c7\u96c6\u7684\u65b9\u6848\u3002\u91c7\u96c6\u5230\u7684\u65e5\u5fd7\u6570\u636e\u4f1a\u7ecf\u8fc7 Kafka \u6d41\u8f6c\uff0c \u7531\u76f8\u5e94\u7684\u6570\u636e\u6d88\u8d39\u7ec4\u4ef6\u5c06\u6570\u636e\u4ece Kafka \u6d88\u8d39\u5b58\u5165\u5230 Elasticsearch \u4e2d\uff0c\u5e76\u901a\u8fc7 Insight \u8fdb\u884c\u53ef\u89c6\u5316\u5c55\u793a\u4e0e\u5206\u6790\u3002

                                                  \u672c\u6587\u5c06\u4ecb\u7ecd\u4ee5\u4e0b\u4e24\u79cd\u65b9\u6848\uff1a

                                                  • Fluentbit + Kafka + Logstash + Elasticsearch
                                                  • Fluentbit + Kafka + Vector + Elasticsearch

                                                  \u5f53\u6211\u4eec\u5728\u65e5\u5fd7\u7cfb\u7edf\u4e2d\u5f15\u5165 Kafka \u4e4b\u540e\uff0c\u6570\u636e\u6d41\u56fe\u5982\u4e0b\u56fe\u6240\u793a\uff1a

                                                  \u4e0a\u9762\u4e24\u79cd\u65b9\u6848\u4e2d\u6709\u5171\u901a\u7684\u5730\u65b9\uff0c\u4e0d\u540c\u4e4b\u5904\u5728\u4e8e\u6d88\u8d39 Kafka \u6570\u636e\u7684\u7ec4\u4ef6\uff0c\u540c\u65f6\uff0c\u4e3a\u4e86\u4e0d\u5f71\u54cd Insight \u6570\u636e\u5206\u6790\uff0c \u6211\u4eec\u9700\u8981\u5728\u6d88\u8d39 Kafka \u6570\u636e\u5e76\u5199\u5165\u5230 ES \u7684\u6570\u636e\u548c\u539f\u6765 Fluentbit \u76f4\u63a5\u5199\u5165 ES \u7684\u6570\u636e\u7684\u683c\u5f0f\u4e00\u81f4\u3002

                                                  \u9996\u5148\u6211\u4eec\u6765\u770b\u770b Fluentbit \u600e\u4e48\u5c06\u65e5\u5fd7\u5199\u5165 Kafka\uff1a

                                                  "},{"location":"admin/insight/best-practice/insight-kafka.html#fluentbit-output","title":"\u4fee\u6539 Fluentbit Output \u914d\u7f6e","text":"

                                                  \u5f53 Kafka \u96c6\u7fa4\u51c6\u5907\u5c31\u7eea\u4e4b\u540e\uff0c\u6211\u4eec\u9700\u8981\u4fee\u6539 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b ConfigMap \u7684\u5185\u5bb9\uff0c \u65b0\u589e\u4ee5\u4e0b\u4e09\u4e2a Kafka Output \u5e76\u6ce8\u91ca\u539f\u6765\u4e09\u4e2a Elasticsearch Output\uff1a

                                                  \u5047\u8bbe Kafka Brokers \u5730\u5740\u4e3a\uff1a insight-kafka.insight-system.svc.cluster.local:9092

                                                      [OUTPUT]\n        Name        kafka\n        Match_Regex (?:kube|syslog)\\.(.*)\n        Brokers     insight-kafka.insight-system.svc.cluster.local:9092\n        Topics      insight-logs\n        format      json\n        timestamp_key @timestamp\n        rdkafka.batch.size 65536\n        rdkafka.compression.level 6\n        rdkafka.compression.type lz4\n        rdkafka.linger.ms 0\n        rdkafka.log.connection.close false\n        rdkafka.message.max.bytes 2.097152e+06\n        rdkafka.request.required.acks 1\n    [OUTPUT]\n        Name        kafka\n        Match_Regex (?:skoala-gw)\\.(.*)\n        Brokers     insight-kafka.insight-system.svc.cluster.local:9092\n        Topics      insight-gw-skoala\n        format      json\n        timestamp_key @timestamp\n        rdkafka.batch.size 65536\n        rdkafka.compression.level 6\n        rdkafka.compression.type lz4\n        rdkafka.linger.ms 0\n        rdkafka.log.connection.close false\n        rdkafka.message.max.bytes 2.097152e+06\n        rdkafka.request.required.acks 1\n    [OUTPUT]\n        Name        kafka\n        Match_Regex (?:kubeevent)\\.(.*)\n        Brokers     insight-kafka.insight-system.svc.cluster.local:9092\n        Topics      insight-event\n        format      json\n        timestamp_key @timestamp\n        rdkafka.batch.size 65536\n        rdkafka.compression.level 6\n        rdkafka.compression.type lz4\n        rdkafka.linger.ms 0\n        rdkafka.log.connection.close false\n        rdkafka.message.max.bytes 2.097152e+06\n        rdkafka.request.required.acks 1\n

                                                  \u63a5\u4e0b\u6765\u5c31\u662f\u6d88\u8d39 Kafka \u6570\u636e\u4e4b\u540e\u5199\u5230 ES \u7684\u7ec6\u5fae\u5dee\u522b\u3002 \u6b63\u5982\u672c\u6587\u5f00\u59cb\u7684\u63cf\u8ff0\uff0c\u672c\u6587\u5c06\u4ecb\u7ecd Logstash \u4e0e Vector \u4f5c\u4e3a\u6d88\u8d39 Kafka \u7684\u4e24\u79cd\u65b9\u5f0f\u3002

                                                  "},{"location":"admin/insight/best-practice/insight-kafka.html#kafka-elasticsearch_1","title":"\u6d88\u8d39 Kafka \u5e76\u5199\u5165 Elasticsearch","text":"

                                                  \u5047\u8bbe Elasticsearch \u7684\u5730\u5740\u4e3a\uff1ahttps://mcamel-common-es-cluster-es-http.mcamel-system:9200

                                                  "},{"location":"admin/insight/best-practice/insight-kafka.html#logstash","title":"\u901a\u8fc7 Logstash \u6d88\u8d39","text":"

                                                  \u5982\u679c\u4f60\u5bf9 Logstash \u6280\u672f\u6808\u6bd4\u8f83\u719f\u6089\uff0c\u4f60\u53ef\u4ee5\u7ee7\u7eed\u4f7f\u7528\u8be5\u65b9\u5f0f\u3002

                                                  \u5f53\u4f60\u901a\u8fc7 Helm \u90e8\u7f72 Logstash \u7684\u65f6\u5019\uff0c \u5728 logstashPipeline \u4e2d\u589e\u52a0\u5982\u4e0b Pipeline \u5373\u53ef\uff1a

                                                  replicas: 3\nresources:\n  requests:\n    cpu: 100m\n    memory: 1536Mi\n  limits:\n    cpu: 1000m\n    memory: 1536Mi\nlogstashConfig:\n  logstash.yml: |\n    http.host: 0.0.0.0\n    xpack.monitoring.enabled: false\nlogstashPipeline:\n  insight-event.conf: |\n    input {\n      kafka {\n        add_field => {\"kafka_topic\" => \"insight-event\"}\n        topics => [\"insight-event\"]         \n        bootstrap_servers => \"172.30.120.189:32082\" # kafka\u7684ip \u548c\u7aef\u53e3\n        enable_auto_commit => true\n        consumer_threads => 1                       # \u5bf9\u5e94 partition \u7684\u6570\u91cf\n        decorate_events => true\n        codec => \"plain\"\n      }\n    }\n\n    filter {\n      mutate { gsub => [ \"message\", \"@timestamp\", \"_@timestamp\"] }\n      json {source => \"message\"}\n      date {\n        match => [ \"_@timestamp\", \"UNIX\" ]\n        remove_field => \"_@timestamp\"\n        remove_tag => \"_timestampparsefailure\"\n      }\n      mutate {\n        remove_field => [\"event\", \"message\"]\n      }\n    }\n\n    output {\n      if [kafka_topic] == \"insight-event\" {\n        elasticsearch {\n          hosts => [\"https://172.30.120.201:32427\"] # elasticsearch \u5730\u5740\n          user => 'elastic'                         # elasticsearch \u7528\u6237\u540d\n          ssl => 'true'\n          password => '0OWj4D54GTH3xK06f9Gg01Zk'    # elasticsearch \u5bc6\u7801\n          ssl_certificate_verification => 'false'\n          action => \"create\"\n          index => \"insight-es-k8s-event-logs-alias\"\n          data_stream => \"false\"\n        }\n      }\n    }\n  insight-gw-skoala.conf: |\n    input {\n      kafka {\n        add_field => {\"kafka_topic\" => \"insight-gw-skoala\"}\n        topics => [\"insight-gw-skoala\"]         \n        bootstrap_servers => \"172.30.120.189:32082\"\n        enable_auto_commit => true\n        consumer_threads => 1\n        decorate_events => true\n        codec => \"plain\"\n      }\n    }\n\n    filter {\n      mutate { gsub => [ \"message\", \"@timestamp\", \"_@timestamp\"] }\n      json {source => \"message\"}\n      date {\n        match => [ \"_@timestamp\", \"UNIX\" ]\n        remove_field => \"_@timestamp\"\n        remove_tag => \"_timestampparsefailure\"\n      }\n      mutate {\n        remove_field => [\"event\", \"message\"]\n      }\n    }\n\n    output {\n      if [kafka_topic] == \"insight-gw-skoala\" {\n        elasticsearch {\n          hosts => [\"https://172.30.120.201:32427\"]\n          user => 'elastic'\n          ssl => 'true'\n          password => '0OWj4D54GTH3xK06f9Gg01Zk'\n          ssl_certificate_verification => 'false'\n          action => \"create\"\n          index => \"skoala-gw-alias\"\n          data_stream => \"false\"\n        }\n      }\n    }\n  insight-logs.conf: |\n    input {\n      kafka {\n        add_field => {\"kafka_topic\" => \"insight-logs\"}\n        topics => [\"insight-logs\"]         \n        bootstrap_servers => \"172.30.120.189:32082\"   \n        enable_auto_commit => true\n        consumer_threads => 1\n        decorate_events => true\n        codec => \"plain\"\n      }\n    }\n\n    filter {\n      mutate { gsub => [ \"message\", \"@timestamp\", \"_@timestamp\"] }\n      json {source => \"message\"}\n      date {\n        match => [ \"_@timestamp\", \"UNIX\" ]\n        remove_field => \"_@timestamp\"\n        remove_tag => \"_timestampparsefailure\"\n      }\n      mutate {\n        remove_field => [\"event\", \"message\"]\n      }\n    }\n\n    output {\n      if [kafka_topic] == \"insight-logs\" {\n        elasticsearch {\n          hosts => [\"https://172.30.120.201:32427\"]\n          user => 'elastic'\n          ssl => 'true'\n          password => '0OWj4D54GTH3xK06f9Gg01Zk'\n          ssl_certificate_verification => 'false'\n          action => \"create\"\n          index => \"insight-es-k8s-logs-alias\"\n          data_stream => \"false\"\n        }\n      }\n    }\n
                                                  "},{"location":"admin/insight/best-practice/insight-kafka.html#vector","title":"\u901a\u8fc7 Vector \u6d88\u8d39","text":"

                                                  \u5982\u679c\u4f60\u5bf9 Vector \u6280\u672f\u6808\u6bd4\u8f83\u719f\u6089\uff0c\u4f60\u53ef\u4ee5\u7ee7\u7eed\u4f7f\u7528\u8be5\u65b9\u5f0f\u3002

                                                  \u5f53\u4f60\u901a\u8fc7 Helm \u90e8\u7f72 Vector \u7684\u65f6\u5019\uff0c\u5f15\u7528\u5982\u4e0b\u89c4\u5219\u7684 Configmap \u914d\u7f6e\u6587\u4ef6\u5373\u53ef\uff1a

                                                  metadata:\n  name: vector\napiVersion: v1\ndata:\n  aggregator.yaml: |\n    api:\n      enabled: true\n      address: '0.0.0.0:8686'\n    sources:\n      insight_logs_kafka:\n        type: kafka\n        bootstrap_servers: 'insight-kafka.insight-system.svc.cluster.local:9092'\n        group_id: consumer-group-insight\n        topics:\n          - insight-logs\n      insight_event_kafka:\n        type: kafka\n        bootstrap_servers: 'insight-kafka.insight-system.svc.cluster.local:9092'\n        group_id: consumer-group-insight\n        topics:\n          - insight-event\n      insight_gw_skoala_kafka:\n        type: kafka\n        bootstrap_servers: 'insight-kafka.insight-system.svc.cluster.local:9092'\n        group_id: consumer-group-insight\n        topics:\n          - insight-gw-skoala\n    transforms:\n      insight_logs_remap:\n        type: remap\n        inputs:\n          - insight_logs_kafka\n        source: |2\n              . = parse_json!(string!(.message))\n              .@timestamp = now()\n      insight_event_kafka_remap:\n        type: remap\n        inputs:\n          - insight_event_kafka\n          - insight_gw_skoala_kafka\n        source: |2\n              . = parse_json!(string!(.message))\n              .@timestamp = now()\n      insight_gw_skoala_kafka_remap:\n        type: remap\n        inputs:\n          - insight_gw_skoala_kafka\n        source: |2\n              . = parse_json!(string!(.message))\n              .@timestamp = now()\n    sinks:\n      insight_es_logs:\n        type: elasticsearch\n        inputs:\n          - insight_logs_remap\n        api_version: auto\n        auth:\n          strategy: basic\n          user: elastic\n          password: 8QZJ656ax3TXZqQh205l3Ee0\n        bulk:\n          index: insight-es-k8s-logs-alias-1418\n        endpoints:\n          - 'https://mcamel-common-es-cluster-es-http.mcamel-system:9200'\n        tls:\n          verify_certificate: false\n          verify_hostname: false\n      insight_es_event:\n        type: elasticsearch\n        inputs:\n          - insight_event_kafka_remap\n        api_version: auto\n        auth:\n          strategy: basic\n          user: elastic\n          password: 8QZJ656ax3TXZqQh205l3Ee0\n        bulk:\n          index: insight-es-k8s-event-logs-alias-1418\n        endpoints:\n          - 'https://mcamel-common-es-cluster-es-http.mcamel-system:9200'\n        tls:\n          verify_certificate: false\n          verify_hostname: false\n      insight_es_gw_skoala:\n        type: elasticsearch\n        inputs:\n          - insight_gw_skoala_kafka_remap\n        api_version: auto\n        auth:\n          strategy: basic\n          user: elastic\n          password: 8QZJ656ax3TXZqQh205l3Ee0\n        bulk:\n          index: skoala-gw-alias-1418\n        endpoints:\n          - 'https://mcamel-common-es-cluster-es-http.mcamel-system:9200'\n        tls:\n          verify_certificate: false\n          verify_hostname: false\n
                                                  "},{"location":"admin/insight/best-practice/insight-kafka.html#_1","title":"\u68c0\u67e5\u662f\u5426\u6b63\u5e38\u5de5\u4f5c","text":"

                                                  \u4f60\u53ef\u4ee5\u901a\u8fc7\u67e5\u770b Insight \u65e5\u5fd7\u67e5\u8be2\u754c\u9762\u662f\u5426\u6709\u6700\u65b0\u7684\u6570\u636e\uff0c\u6216\u8005\u67e5\u770b\u539f\u672c Elasticsearch \u7684\u7d22\u5f15\u7684\u6570\u91cf\u6709\u6ca1\u6709\u589e\u957f\uff0c\u589e\u957f\u5373\u4ee3\u8868\u914d\u7f6e\u6210\u529f\u3002

                                                  "},{"location":"admin/insight/best-practice/insight-kafka.html#_2","title":"\u53c2\u8003","text":"
                                                  • Logstash Helm Chart
                                                  • Vector Helm Chart
                                                  • Vector \u5b9e\u8df5
                                                  • Vector Perfomance
                                                  "},{"location":"admin/insight/best-practice/integration_deepflow.html","title":"\u96c6\u6210 DeepFlow","text":"

                                                  DeepFlow \u662f\u4e00\u6b3e\u57fa\u4e8e eBPF \u7684\u53ef\u89c2\u6d4b\u6027\u4ea7\u54c1\u3002\u5b83\u7684\u793e\u533a\u7248\u5df2\u7ecf\u88ab\u96c6\u6210\u8fdb Insight \u4e2d\uff0c\u4ee5\u4e0b\u662f\u96c6\u6210\u65b9\u5f0f\u3002

                                                  "},{"location":"admin/insight/best-practice/integration_deepflow.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                  • \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5df2\u7ecf\u5b89\u88c5 Insight
                                                  • Insight \u6700\u4f4e\u7248\u672c\u8981\u6c42\u4e3a v0.23.0
                                                  • \u4e86\u89e3\u5e76\u6ee1\u8db3 DeepFlow \u8fd0\u884c\u6743\u9650\u53ca\u5185\u6838\u8981\u6c42
                                                  • \u5b58\u50a8\u5377\u5c31\u7eea
                                                  "},{"location":"admin/insight/best-practice/integration_deepflow.html#deepflow-insight","title":"\u5b89\u88c5 DeepFlow \u548c\u914d\u7f6e Insight","text":"

                                                  \u5b89\u88c5 DeepFlow \u7ec4\u4ef6\u9700\u8981\u7528\u5230\u4e24\u4e2a Chart\uff1a

                                                  • deepflow\uff1a\u5305\u542b deepflow-app\u3001deepflow-server\u3001deepflow-clickhouse\u3001deepflow-agent \u7b49\u7ec4\u4ef6\u3002 \u4e00\u822c deepflow \u4f1a\u90e8\u7f72\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\uff0c\u6240\u4ee5\u5b83\u4e5f\u4e00\u5e76\u5b89\u88c5\u4e86 deepflow-agent
                                                  • deepflow-agent\uff1a\u53ea\u5305\u542b\u4e86 deepflow-agent \u7ec4\u4ef6\uff0c\u7528\u4e8e\u91c7\u96c6 eBPF \u6570\u636e\u5e76\u53d1\u9001\u7ed9 deepflow-server
                                                  "},{"location":"admin/insight/best-practice/integration_deepflow.html#deepflow_1","title":"\u5b89\u88c5 DeepFlow","text":"

                                                  DeepFlow \u9700\u8981\u5b89\u88c5\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u3002

                                                  1. \u8fdb\u5165 kpanda-global-cluster \u96c6\u7fa4\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u5185\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u4ed3\u5e93\u9009\u62e9 community \uff0c\u641c\u7d22\u6846\u627e\u5230 deepflow:

                                                  2. \u70b9\u51fb deepflow \u5361\u7247\u8fdb\u5165\u8be6\u60c5\u9875\uff1a

                                                  3. \u70b9\u51fb \u5b89\u88c5 \uff0c\u8fdb\u5165\u5b89\u88c5\u754c\u9762\uff1a

                                                  4. \u5927\u90e8\u5206 values \u90fd\u6709\u9ed8\u8ba4\u503c\u3002\u5176\u4e2d Clickhouse \u548c Mysql \u90fd\u9700\u8981\u7533\u8bf7\u5b58\u50a8\u5377\uff0c\u5b83\u4eec\u7684\u9ed8\u8ba4\u5927\u5c0f\u90fd\u662f 10Gi \uff0c \u53ef\u4ee5\u901a\u8fc7 persistence \u5173\u952e\u5b57\u641c\u7d22\u5230\u76f8\u5173\u914d\u7f6e\u5e76\u4fee\u6539\u3002

                                                  5. \u914d\u7f6e\u597d\u540e\u5c31\u53ef\u4ee5\u70b9\u51fb \u786e\u5b9a \uff0c\u6267\u884c\u5b89\u88c5\u4e86\u3002

                                                  "},{"location":"admin/insight/best-practice/integration_deepflow.html#insight","title":"\u4fee\u6539 Insight \u914d\u7f6e","text":"

                                                  \u5728\u5b89\u88c5 DeepFlow \u540e\uff0c\u8fd8\u9700\u8981\u5728 Insight \u4e2d\u5f00\u542f\u76f8\u5173\u7684\u529f\u80fd\u5f00\u5173\u3002

                                                  1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u5185\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u914d\u7f6e\u9879 \uff0c \u901a\u8fc7\u641c\u7d22\u6846\u627e\u5230 insight-server-config \u5e76\u8fdb\u884c\u7f16\u8f91\uff1a

                                                  2. \u5728 YAML \u914d\u7f6e\u4e2d\u627e\u5230 eBPF Flow feature \u8fd9\u4e2a\u529f\u80fd\u5f00\u5173\u5e76\u5c06\u5b83\u5f00\u542f:

                                                  3. \u4fdd\u5b58\u66f4\u6539\uff0c\u91cd\u542f insight-server \u540e\uff0cInsight \u4e3b\u754c\u9762\u5c31\u4f1a\u51fa\u73b0 \u7f51\u7edc\u89c2\u6d4b :

                                                  "},{"location":"admin/insight/best-practice/integration_deepflow.html#deepflow-agent","title":"\u5b89\u88c5 DeepFlow Agent","text":"

                                                  DeepFlow Agent \u901a\u8fc7 deepflow-agent Chart \u6765\u5b89\u88c5\u5728\u5b50\u96c6\u7fa4\u4e2d\uff0c\u7528\u4e8e\u91c7\u96c6\u5b50\u96c6\u7fa4\u7684 eBPF \u89c2\u6d4b\u6570\u636e\u5e76\u4e0a\u62a5\u5230\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u3002 \u7c7b\u4f3c\u4e8e\u5b89\u88c5 deepflow\uff0c\u901a\u8fc7 Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u4ed3\u5e93\u9009\u62e9 community \uff0c \u901a\u8fc7\u641c\u7d22\u6846\u67e5\u8be2 deepflow-agent\uff0c\u6309\u6d41\u7a0b\u8fdb\u5165\u5b89\u88c5\u754c\u9762\u3002

                                                  \u53c2\u6570\u8bf4\u660e\uff1a

                                                  • DeployComponent \u90e8\u7f72\u6a21\u5f0f\uff0c\u9ed8\u8ba4\u4e3a daemonset
                                                  • timezone \u65f6\u533a\uff0c\u9ed8\u8ba4\u4e3a Asia/Shanghai
                                                  • DeepflowServerNodeIPS \u5bf9\u5e94 deepflow server \u5b89\u88c5\u96c6\u7fa4\u7684\u8282\u70b9\u5730\u5740
                                                  • deepflowK8sClusterID \u96c6\u7fa4 UUID
                                                  • agentGroupID agent \u7ec4 ID
                                                  • controllerPort deepflow server \u7684\u6570\u636e\u4e0a\u62a5\u7aef\u53e3\uff0c\u53ef\u4ee5\u4e0d\u586b\uff0c\u9ed8\u8ba4\u4e3a 30035
                                                  • clusterNAME \u96c6\u7fa4\u540d\u79f0

                                                  \u914d\u7f6e\u597d\u540e\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u5b89\u88c5\u3002

                                                  "},{"location":"admin/insight/best-practice/integration_deepflow.html#_2","title":"\u4f7f\u7528","text":"

                                                  \u5728\u6b63\u786e\u5b89\u88c5 DeepFlow \u540e\uff0c\u70b9\u51fb \u7f51\u7edc\u89c2\u6d4b \u5c31\u53ef\u4ee5\u8fdb\u5165 DeepFlow Grafana UI\u3002 \u5b83\u5185\u7f6e\u4e86\u5927\u91cf\u7684 Dashboard \u53ef\u4f9b\u67e5\u770b\u4e0e\u5e2e\u52a9\u5206\u6790\u95ee\u9898\uff0c \u70b9\u51fb DeepFlow Templates \uff0c\u53ef\u4ee5\u6d4f\u89c8\u6240\u6709\u53ef\u4ee5\u67e5\u770b\u7684 Dashboard\uff1a

                                                  "},{"location":"admin/insight/best-practice/sw-to-otel.html","title":"\u4f7f\u7528 OpenTelemetry \u96f6\u4ee3\u7801\u63a5\u6536 SkyWalking \u94fe\u8def\u6570\u636e","text":"

                                                  \u53ef\u89c2\u6d4b\u6027 Insight \u901a\u8fc7 OpenTelemetry \u5c06\u5e94\u7528\u6570\u636e\u8fdb\u884c\u4e0a\u62a5\u3002\u82e5\u60a8\u7684\u5e94\u7528\u5df2\u4f7f\u7528 Skywalking \u6765\u91c7\u96c6\u94fe\u8def\uff0c \u53ef\u53c2\u8003\u672c\u6587\u8fdb\u884c\u96f6\u4ee3\u7801\u6539\u9020\u5c06\u94fe\u8def\u6570\u636e\u63a5\u5165 Insight\u3002

                                                  "},{"location":"admin/insight/best-practice/sw-to-otel.html#_1","title":"\u4ee3\u7801\u89e3\u8bfb","text":"

                                                  \u4e3a\u4e86\u80fd\u517c\u5bb9\u4e0d\u540c\u7684\u5206\u5e03\u5f0f\u8ffd\u8e2a\u5b9e\u73b0\uff0cOpenTelemetry \u63d0\u4f9b\u4e86\u7ec4\u4ef6\u690d\u5165\u7684\u65b9\u5f0f\uff0c\u8ba9\u4e0d\u540c\u7684\u5382\u5546\u80fd\u591f\u7ecf\u7531 OpenTelemetry \u6807\u51c6\u5316\u6570\u636e\u5904\u7406\u540e\u8f93\u51fa\u5230\u4e0d\u540c\u7684\u540e\u7aef\u3002Jaeger \u4e0e Zipkin \u5728\u793e\u533a\u4e2d\u5b9e\u73b0\u4e86 JaegerReceiver\u3001ZipkinReceiver\u3002 \u6211\u4eec\u4e5f\u4e3a\u793e\u533a\u8d21\u732e\u4e86 SkyWalkingReceiver\uff0c\u5e76\u8fdb\u884c\u4e86\u6301\u7eed\u7684\u6253\u78e8\uff0c\u73b0\u5728\u5df2\u7ecf\u5177\u5907\u4e86\u5728\u751f\u4ea7\u73af\u5883\u4e2d\u4f7f\u7528\u7684\u6761\u4ef6\uff0c \u800c\u4e14\u65e0\u9700\u4fee\u6539\u4efb\u4f55\u4e00\u884c\u4e1a\u52a1\u4ee3\u7801\u3002

                                                  OpenTelemetry \u4e0e SkyWalking \u6709\u4e00\u4e9b\u5171\u540c\u70b9\uff1a\u90fd\u662f\u4f7f\u7528 Trace \u6765\u5b9a\u4e49\u4e00\u6b21\u8ffd\u8e2a\uff0c\u5e76\u4f7f\u7528 Span \u6765\u6807\u8bb0\u8ffd\u8e2a\u91cc\u7684\u6700\u5c0f\u7c92\u5ea6\u3002 \u4f46\u662f\u5728\u4e00\u4e9b\u7ec6\u8282\u548c\u5b9e\u73b0\u4e0a\u8fd8\u662f\u4f1a\u6709\u5dee\u522b\uff1a

                                                  - Skywalking OpenTelemetry \u6570\u636e\u7ed3\u6784 span -> Segment -> Trace Span -> Trace \u5c5e\u6027\u4fe1\u606f Tags Attributes \u5e94\u7528\u65f6\u95f4 Logs Events \u5f15\u7528\u5173\u7cfb References Links

                                                  \u660e\u786e\u4e86\u8fd9\u4e9b\u5dee\u5f02\u540e\uff0c\u5c31\u53ef\u4ee5\u5f00\u59cb\u5b9e\u73b0\u5c06 SkyWalking Trace \u8f6c\u6362\u4e3a OpenTelemetry Trace\u3002\u4e3b\u8981\u5de5\u4f5c\u5305\u62ec\uff1a

                                                  1. \u5982\u4f55\u6784\u9020 OpenTelemetry \u7684 TraceId \u548c SpanId

                                                  2. \u5982\u4f55\u6784\u9020 OpenTelemetry \u7684 ParentSpanId

                                                  3. \u5982\u4f55\u5728 OpenTelemetry Span \u4e2d\u4fdd\u7559 SkyWalking \u7684\u539f\u59cb TraceId\u3001SegmentId\u3001SpanId

                                                  \u9996\u5148\uff0c\u6211\u4eec\u6765\u770b\u5982\u4f55\u6784\u9020 OpenTelemetry \u7684 TraceId \u548c SpanId\u3002SkyWalking \u548c OpenTelemetry \u90fd\u662f\u901a\u8fc7 TraceId \u4e32\u8054\u8d77\u5404\u4e2a\u5206\u5e03\u5f0f\u670d\u52a1\u8c03\u7528\uff0c\u5e76\u901a\u8fc7 SpanId \u6765\u6807\u8bb0\u6bcf\u4e00\u4e2a Span\uff0c\u4f46\u662f\u5b9e\u73b0\u89c4\u683c\u6709\u8f83\u5927\u5dee\u5f02\uff1a

                                                  Info

                                                  \u4ee3\u7801\u5b9e\u73b0\u89c1 GitHub\uff1a

                                                  1. Skywalking Receiver
                                                  2. PR: Create skywalking component folder/structure
                                                  3. PR: add Skywalking tracing receiver impl

                                                  \u5177\u4f53\u6765\u8bb2\uff0cSkyWalking TraceId \u548c SegmentId \u6240\u6709\u53ef\u80fd\u7684\u683c\u5f0f\u5982\u4e0b\uff1a

                                                  \u5176\u4e2d\uff0c\u5728 OpenTelemetry \u534f\u8bae\u91cc\uff0cSpan \u5728\u6240\u6709 Trace \u4e2d\u90fd\u662f\u552f\u4e00\u7684\uff0c\u800c\u5728 SkyWalking \u4e2d\uff0c Span \u4ec5\u5728\u6bcf\u4e2a Segment \u91cc\u662f\u552f\u4e00\u7684\uff0c\u8fd9\u8bf4\u660e\u8981\u901a\u8fc7 SegmentId \u4e0e SpanId \u7ed3\u5408\u624d\u80fd\u5728 SkyWalking \u4e2d\u5bf9 Span \u505a\u552f\u4e00\u6807\u8bc6\uff0c\u5e76\u8f6c\u6362\u4e3a OpenTelemetry \u7684 SpanId\u3002

                                                  Info

                                                  \u4ee3\u7801\u5b9e\u73b0\u89c1 GitHub\uff1a

                                                  1. Skywalking Receiver
                                                  2. PR: Fix skywalking traceid and spanid convertion

                                                  \u63a5\u4e0b\u6765\uff0c\u6211\u4eec\u6765\u770b\u5982\u4f55\u6784\u9020 OpenTelemetry \u7684 ParentSpanId\u3002\u5728\u4e00\u4e2a Segment \u5185\u90e8\uff0c SkyWalking \u7684 ParentSpanId \u5b57\u6bb5\u53ef\u76f4\u63a5\u7528\u4e8e\u6784\u9020 OpenTelemetry \u7684 ParentSpanId \u5b57\u6bb5\u3002 \u4f46\u5f53\u4e00\u4e2a Trace \u8de8\u591a\u4e2a Segment \u65f6\uff0cSkyWalking \u662f\u901a\u8fc7 Reference \u4e2d\u7684 ParentTraceSegmentId \u548c ParentSpanId \u8868\u793a\u7684\u5173\u8054\u4fe1\u606f\uff0c\u4e8e\u662f\u6b64\u65f6\u9700\u8981\u901a\u8fc7 Reference \u4e2d\u7684\u4fe1\u606f\u6784\u5efa OpenTelemetry \u7684 ParentSpanId\u3002

                                                  \u4ee3\u7801\u5b9e\u73b0\u89c1 GitHub\uff1aSkywalking Receiver

                                                  \u6700\u540e\uff0c\u6211\u4eec\u6765\u770b\u5982\u4f55\u5728 OpenTelemetry Span \u4e2d\u4fdd\u7559 SkyWalking \u7684\u539f\u59cb TraceId\u3001SegmentId\u3001SpanId\u3002 \u6211\u4eec\u643a\u5e26\u8fd9\u4e9b\u539f\u59cb\u4fe1\u606f\u662f\u4e3a\u4e86\u80fd\u5c06\u5206\u5e03\u5f0f\u8ffd\u8e2a\u540e\u7aef\u5c55\u73b0\u7684 OpenTelemetry TraceId\u3001SpanId \u4e0e\u5e94\u7528\u7a0b\u5e8f\u65e5\u5fd7\u4e2d\u7684 SkyWalking TraceId\u3001SegmentId\u3001SpanId \u8fdb\u884c\u5173\u8054\uff0c\u6253\u901a\u8ffd\u8e2a\u548c\u65e5\u5fd7\u3002\u6211\u4eec\u9009\u62e9\u5c06 SkyWalking \u4e2d\u539f\u6709\u7684 TraceId\u3001SegmentId\u3001ParentSegmentId \u643a\u5e26\u5230 OpenTelemetry Attributes \u4e2d\u3002

                                                  Info

                                                  \u4ee3\u7801\u5b9e\u73b0\u89c1 GitHub\uff1a

                                                  1. \u4ee3\u7801\u5b9e\u73b0\u89c1 GitHub\uff1aSkywalking Receiver
                                                  2. Add extra link attributes from skywalking ref

                                                  \u7ecf\u8fc7\u4e0a\u8ff0\u4e00\u7cfb\u5217\u8f6c\u6362\u540e\uff0c\u6211\u4eec\u5c06 SkyWalking Segment Object \u5b8c\u6574\u7684\u8f6c\u6362\u4e3a\u4e86 OpenTelmetry Trace\uff0c\u5982\u4e0b\u56fe\uff1a

                                                  "},{"location":"admin/insight/best-practice/sw-to-otel.html#demo","title":"\u90e8\u7f72 Demo","text":"

                                                  \u4e0b\u9762\u6211\u4eec\u4ee5\u4e00\u4e2a Demo \u6765\u5c55\u793a\u4f7f\u7528 OpenTelemetry \u6536\u96c6\u3001\u5c55\u793a SkyWalking \u8ffd\u8e2a\u6570\u636e\u7684\u5b8c\u6574\u8fc7\u7a0b\u3002

                                                  \u9996\u5148\uff0c\u5728\u90e8\u7f72 OpenTelemetry Agent \u4e4b\u540e\uff0c\u5f00\u542f\u5982\u4e0b\u914d\u7f6e\uff0c\u5373\u53ef\u5728 OpenTelemetry \u4e2d\u62e5\u6709\u517c\u5bb9 SkyWalking \u534f\u8bae\u7684\u80fd\u529b\uff1a

                                                  # otel-agent config\nreceivers:\n  # add the following config\n  skywalking:\n    protocols:\n      grpc:\n        endpoint: 0.0.0.0:11800 # \u63a5\u6536 SkyWalking Agent \u4e0a\u62a5\u7684 Trace \u6570\u636e\n      http: \n        endpoint: 0.0.0.0:12800 # \u63a5\u6536\u4ece\u524d\u7aef/ nginx \u7b49 HTTP \u534f\u8bae\u4e0a\u62a5\u7684 Trace \u6570\u636e\nservice: \n  pipelines: \n    traces:      \n      # add receiver __skywalking__ \n      receivers: [skywalking]\n\n# otel-agent service yaml\nspec:\n  ports: \n    - name: sw-http\n      port: 12800    \n      protocol: TCP    \n      targetPort: 12800 \n    - name: sw-grpc     \n      port: 11800 \n      protocol: TCP  \n      targetPort: 11800\n

                                                  \u63a5\u4e0b\u6765\u9700\u8981\u5c06\u4e1a\u52a1\u5e94\u7528\u5bf9\u63a5\u7684 SkyWalking OAP Service\uff08\u5982 oap:11800\uff09\u4fee\u6539\u4e3a OpenTelemetry Agent Service\uff08\u5982 otel-agent:11800\uff09\uff0c \u5c31\u53ef\u4ee5\u5f00\u59cb\u4f7f\u7528 OpenTelemetry \u63a5\u6536 SkyWalking \u63a2\u9488\u7684\u8ffd\u8e2a\u6570\u636e\u4e86\u3002

                                                  \u6211\u4eec\u4ee5 SkyWalking-showcase Demo \u4e3a\u4f8b\u5c55\u793a\u6574\u4e2a\u6548\u679c\u3002\u5b83\u4f7f\u7528 SkyWalking Agent \u505a\u8ffd\u8e2a\uff0c\u901a\u8fc7 OpenTelemetry \u6807\u51c6\u5316\u5904\u7406\u540e\u4f7f\u7528 Jaeger \u6765\u5448\u73b0\u6700\u7ec8\u6548\u679c\uff1a

                                                  \u901a\u8fc7 SkyWalking Showcase \u7684\u67b6\u6784\u56fe\uff0c\u53ef\u77e5 SkyWalking \u7684\u6570\u636e\u7ecf\u8fc7 OpenTelemetry \u6807\u51c6\u5316\u540e\uff0c\u4f9d\u7136\u5b8c\u6574\u3002\u5728\u8fd9\u4e2a Trace \u91cc\uff0c \u8bf7\u6c42\u4ece app/homepage \u53d1\u8d77\uff0c\u4e4b\u540e\u5728 app \u540c\u65f6\u53d1\u8d77\u4e24\u4e2a\u8bf7\u6c42 /rcmd/\u4e0e/songs/top\uff0c\u5206\u53d1\u5230 recommandation/songs \u4e24\u4e2a\u670d\u52a1\u4e2d\uff0c \u5e76\u6700\u7ec8\u5230\u8fbe\u6570\u636e\u5e93\u8fdb\u884c\u67e5\u8be2\uff0c\u4ece\u800c\u5b8c\u6210\u6574\u4e2a\u8bf7\u6c42\u94fe\u8def\u3002

                                                  \u53e6\u5916\uff0c\u6211\u4eec\u4e5f\u53ef\u4ece Jaeger \u9875\u9762\u4e2d\u67e5\u770b\u5230\u539f\u59cb SkyWalking Id \u4fe1\u606f\uff0c\u4fbf\u4e8e\u4e0e\u5e94\u7528\u65e5\u5fd7\u5173\u8054\uff1a

                                                  "},{"location":"admin/insight/best-practice/tail-based-sampling.html","title":"\u94fe\u8def\u6570\u636e\u91c7\u6837\u4ecb\u7ecd\u4e0e\u914d\u7f6e","text":"

                                                  \u4f7f\u7528\u5206\u5e03\u5f0f\u94fe\u8def\u8ddf\u8e2a\uff0c\u53ef\u4ee5\u5728\u5206\u5e03\u5f0f\u7cfb\u7edf\u4e2d\u89c2\u5bdf\u8bf7\u6c42\u5982\u4f55\u5728\u5404\u4e2a\u7cfb\u7edf\u4e2d\u6d41\u8f6c\u3002\u4e0d\u53ef\u5426\u8ba4\uff0c\u5b83\u975e\u5e38\u5b9e\u7528\uff0c\u4f8b\u5982\u4e86\u89e3\u60a8\u7684\u670d\u52a1\u8fde\u63a5\u548c\u8bca\u65ad\u5ef6\u8fdf\u95ee\u9898\uff0c\u4ee5\u53ca\u8bb8\u591a\u5176\u4ed6\u597d\u5904\u3002

                                                  \u4f46\u662f\uff0c\u5982\u679c\u60a8\u7684\u5927\u591a\u6570\u8bf7\u6c42\u90fd\u6210\u529f\u4e86\uff0c\u5e76\u4e14\u6ca1\u6709\u51fa\u73b0\u4e0d\u53ef\u63a5\u53d7\u7684\u5ef6\u8fdf\u6216\u9519\u8bef\uff0c\u90a3\u4e48\u60a8\u771f\u7684\u9700\u8981\u6240\u6709\u8fd9\u4e9b\u6570\u636e\u5417\uff1f\u6240\u4ee5\uff0c\u4f60\u5e76\u4e0d\u603b\u662f\u9700\u8981\u5927\u91cf\u6216\u8005\u5168\u91cf\u7684\u6570\u636e\u6765\u627e\u5230\u6b63\u786e\u7684\u89c1\u89e3\u3002\u60a8\u53ea\u9700\u8981\u901a\u8fc7\u6070\u5f53\u7684\u6570\u636e\u91c7\u6837\u5373\u53ef\u3002

                                                  \u91c7\u6837\u80cc\u540e\u7684\u60f3\u6cd5\u662f\u63a7\u5236\u53d1\u9001\u5230\u53ef\u89c2\u5bdf\u6027\u6536\u96c6\u5668\u7684\u94fe\u8def\uff0c\u4ece\u800c\u964d\u4f4e\u91c7\u96c6\u6210\u672c\u3002\u4e0d\u540c\u7684\u7ec4\u7ec7\u6709\u4e0d\u540c\u7684\u539f\u56e0\uff0c\u6bd4\u5982\u4e3a\u4ec0\u4e48\u8981\u62bd\u6837\uff0c\u4ee5\u53ca\u60f3\u8981\u62bd\u6837\u4ec0\u4e48\u6768\u7684\u6570\u636e\u3002\u6240\u4ee5\uff0c\u6211\u4eec\u9700\u8981\u81ea\u5b9a\u4e49\u91c7\u6837\u7b56\u7565\uff1a

                                                  • \u7ba1\u7406\u6210\u672c\uff1a\u5982\u679c\u9700\u8981\u5b58\u50a8\u5927\u91cf\u7684\u9065\u6d4b\u6570\u636e\uff0c\u5219\u9700\u8981\u4ed8\u51fa\u66f4\u591a\u7684\u8ba1\u7b97\u3001\u5b58\u50a8\u6210\u672c\u3002
                                                  • \u5173\u6ce8\u6709\u8da3\u7684\u8ddf\u8e2a\uff1a\u4e0d\u540c\u7ec4\u7ec7\u5173\u6ce8\u7684\u6570\u636e\u4e5f\u4e0d\u540c\u3002
                                                  • \u8fc7\u6ee4\u6389\u566a\u97f3\uff1a\u4f8b\u5982\uff0c\u60a8\u53ef\u80fd\u5e0c\u671b\u8fc7\u6ee4\u6389\u5065\u5eb7\u68c0\u67e5\u3002

                                                  \u5728\u8ba8\u8bba\u91c7\u6837\u65f6\u4f7f\u7528\u4e00\u81f4\u7684\u672f\u8bed\u662f\u5f88\u91cd\u8981\u7684\u3002Trace \u6216 Span \u88ab\u89c6\u4e3a \u91c7\u6837 \u6216 \u672a\u91c7\u6837\uff1a

                                                  • \u91c7\u6837\uff1aTrace \u6216 Span \u88ab\u5904\u7406\u5e76\u4fdd\u5b58\u3002\u4e3a\u5b83\u88ab\u91c7\u6837\u8005\u9009\u62e9\u4e3a\u603b\u4f53\u7684\u4ee3\u8868\uff0c\u6240\u4ee5\u5b83\u88ab\u8ba4\u4e3a\u662f \u91c7\u6837\u7684\u3002
                                                  • \u672a\u91c7\u6837\uff1a\u4e0d\u88ab\u5904\u7406\u6216\u4fdd\u5b58\u7684 Trace \u6216 Span\u3002\u56e0\u4e3a\u5b83\u4e0d\u662f\u7531\u91c7\u6837\u5668\u9009\u62e9\u7684\uff0c\u6240\u4ee5\u88ab\u8ba4\u4e3a\u662f \u672a\u91c7\u6837\u3002
                                                  "},{"location":"admin/insight/best-practice/tail-based-sampling.html#_2","title":"\u91c7\u6837\u7684\u65b9\u5f0f\u6709\u54ea\u4e9b\uff1f","text":""},{"location":"admin/insight/best-practice/tail-based-sampling.html#head-sampling","title":"\u5934\u90e8\u91c7\u6837\uff08Head Sampling\uff09","text":"

                                                  \u5934\u90e8\u62bd\u6837\u662f\u4e00\u79cd\u7528\u4e8e\u5c3d\u65e9\u505a\u51fa\u62bd\u6837\u51b3\u5b9a\u7684\u91c7\u6837\u6280\u672f\u3002\u91c7\u6837\u6216\u5220\u9664 Trace/Span \u7684\u51b3\u5b9a\u4e0d\u662f\u901a\u8fc7\u68c0\u67e5\u6574\u4e2a Trace \u6765\u505a\u51fa\u7684\u3002

                                                  \u4f8b\u5982\uff0c\u6700\u5e38\u89c1\u7684\u5934\u90e8\u91c7\u6837\u5f62\u5f0f\u662f\u4e00\u81f4\u6982\u7387\u91c7\u6837\u3002\u5b83\u4e5f\u53ef\u4ee5\u79f0\u4e3a\u786e\u5b9a\u6027\u91c7\u6837\u3002\u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u5c06\u6839\u636e TraceID \u548c\u8981\u91c7\u6837\u7684\u6240\u9700 Trace \u767e\u5206\u6bd4\u505a\u51fa\u91c7\u6837\u51b3\u7b56\u3002\u8fd9\u53ef\u786e\u4fdd\u4ee5\u4e00\u81f4\u7684\u901f\u7387\uff08\u4f8b\u5982\u6240\u6709 Trace\u7684 5%\uff09\u5bf9\u6574\u4e2a Trace \u8fdb\u884c\u91c7\u6837\u5e76\u4e14\u4e0d\u9057\u6f0f Span\u3002

                                                  \u5934\u90e8\u91c7\u6837\u7684\u597d\u5904\u662f\uff1a - \u6613\u4e8e\u7406\u89e3 - \u6613\u4e8e\u914d\u7f6e - \u9ad8\u6548 - \u53ef\u4ee5\u5728\u8ddf\u8e2a\u6536\u96c6\u7ba1\u9053\u4e2d\u7684\u4efb\u4f55\u4f4d\u7f6e\u5b8c\u6210

                                                  \u5934\u90e8\u91c7\u6837\u7684\u4e3b\u8981\u7f3a\u70b9\u662f\u65e0\u6cd5\u6839\u636e\u6574\u4e2a Trace \u4e2d\u7684\u6570\u636e\u505a\u51fa\u91c7\u6837\u51b3\u7b56\u3002\u8fd9\u610f\u5473\u7740\u5934\u90e8\u62bd\u6837\u4f5c\u4e3a\u4e00\u79cd\u949d\u5668\u662f\u6709\u6548\u7684\uff0c\u4f46\u5bf9\u4e8e\u5fc5\u987b\u8003\u8651\u6574\u4e2a\u7cfb\u7edf\u4fe1\u606f\u7684\u62bd\u6837\u7b56\u7565\u6765\u8bf4\uff0c\u8fd9\u662f\u5b8c\u5168\u4e0d\u591f\u7684\u3002\u4f8b\u5982\uff0c\u65e0\u6cd5\u4f7f\u7528\u5934\u90e8\u91c7\u6837\u6765\u786e\u4fdd\u5bf9\u6240\u6709\u5177\u6709\u8bef\u5dee\u7684\u8ff9\u7ebf\u8fdb\u884c\u91c7\u6837\u3002\u4e3a\u6b64\uff0c\u60a8\u9700\u8981\u5c3e\u90e8\u91c7\u6837\u3002

                                                  "},{"location":"admin/insight/best-practice/tail-based-sampling.html#tail-sampling","title":"\u5c3e\u90e8\u91c7\u6837\uff08Tail Sampling\uff09\u2014\u2014 \u63a8\u8350\u65b9\u6848","text":"

                                                  \u5c3e\u90e8\u91c7\u6837\u662f\u901a\u8fc7\u8003\u8651 Trace \u5185\u7684\u5168\u90e8\u6216\u5927\u90e8\u5206 Span \u6765\u51b3\u5b9a\u5bf9 Trace \u8fdb\u884c\u91c7\u6837\u3002\u5c3e\u90e8\u91c7\u6837\u5141\u8bb8\u60a8\u6839\u636e\u4ece Trace \u7684\u4e0d\u540c\u90e8\u5206\u4f7f\u7528\u7684\u7279\u5b9a\u6761\u4ef6\u5bf9 Trace \u8fdb\u884c\u91c7\u6837\uff0c\u800c\u5934\u90e8\u91c7\u6837\u5219\u4e0d\u5177\u6709\u6b64\u9009\u9879\u3002

                                                  \u5982\u4f55\u4f7f\u7528\u5c3e\u90e8\u91c7\u6837\u7684\u4e00\u4e9b\u793a\u4f8b\u5305\u62ec\uff1a

                                                  • \u59cb\u7ec8\u5bf9\u5305\u542b\u9519\u8bef\u7684 Trace \u8fdb\u884c\u91c7\u6837
                                                  • \u57fa\u4e8e\u603b\u4f53\u5ef6\u8fdf\u7684\u91c7\u6837
                                                  • \u6839\u636e Trace \u4e2d\u4e00\u4e2a\u6216\u591a\u4e2a Span \u4e0a\u7279\u5b9a\u5c5e\u6027\u7684\u5b58\u5728\u6216\u503c\u5bf9 Trace \u8fdb\u884c\u91c7\u6837; \u4f8b\u5982\uff0c\u5bf9\u6e90\u81ea\u65b0\u90e8\u7f72\u7684\u670d\u52a1\u7684\u66f4\u591a Trace \u8fdb\u884c\u91c7\u6837
                                                  • \u6839\u636e\u7279\u5b9a\u6761\u4ef6\u5bf9 Trace \u5e94\u7528\u4e0d\u540c\u7684\u91c7\u6837\u7387

                                                  \u6b63\u5982\u4f60\u6240\u770b\u5230\u7684\uff0c\u5c3e\u90e8\u91c7\u6837\u6709\u7740\u66f4\u9ad8\u7a0b\u5ea6\u7684\u590d\u6742\u5ea6\u3002\u5bf9\u4e8e\u5fc5\u987b\u5bf9\u9065\u6d4b\u6570\u636e\u8fdb\u884c\u91c7\u6837\u7684\u5927\u578b\u7cfb\u7edf\uff0c\u51e0\u4e4e\u603b\u662f\u9700\u8981\u4f7f\u7528\u5c3e\u90e8\u91c7\u6837\u6765\u5e73\u8861\u6570\u636e\u91cf\u548c\u6570\u636e\u7684\u6709\u7528\u6027\u3002

                                                  \u5982\u4eca\uff0c\u5c3e\u90e8\u91c7\u6837\u6709\u4e09\u4e2a\u4e3b\u8981\u7f3a\u70b9\uff1a

                                                  • \u5c3e\u90e8\u91c7\u6837\u53ef\u80fd\u96be\u4ee5\u64cd\u4f5c\u3002\u5b9e\u73b0\u5c3e\u90e8\u91c7\u6837\u7684\u7ec4\u4ef6\u5fc5\u987b\u662f\u53ef\u4ee5\u63a5\u53d7\u548c\u5b58\u50a8\u5927\u91cf\u6570\u636e\u7684\u6709\u72b6\u6001\u7cfb\u7edf\u3002\u6839\u636e\u6d41\u91cf\u6a21\u5f0f\uff0c\u8fd9\u53ef\u80fd\u9700\u8981\u6570\u5341\u4e2a\u751a\u81f3\u6570\u767e\u4e2a\u8282\u70b9\uff0c\u8fd9\u4e9b\u8282\u70b9\u90fd\u4ee5\u4e0d\u540c\u7684\u65b9\u5f0f\u5229\u7528\u8d44\u6e90\u3002\u6b64\u5916\uff0c\u5982\u679c\u5c3e\u90e8\u91c7\u6837\u5668\u65e0\u6cd5\u8ddf\u4e0a\u63a5\u6536\u7684\u6570\u636e\u91cf\uff0c\u5219\u53ef\u80fd\u9700\u8981\u201c\u56de\u9000\u201d\u5230\u8ba1\u7b97\u5bc6\u96c6\u5ea6\u8f83\u4f4e\u7684\u91c7\u6837\u6280\u672f\u3002\u7531\u4e8e\u8fd9\u4e9b\u56e0\u7d20\uff0c\u76d1\u63a7\u5c3e\u90e8\u91c7\u6837\u7ec4\u4ef6\u4ee5\u786e\u4fdd\u5b83\u4eec\u62e5\u6709\u505a\u51fa\u6b63\u786e\u91c7\u6837\u51b3\u7b56\u6240\u9700\u7684\u8d44\u6e90\u81f3\u5173\u91cd\u8981\u3002
                                                  • \u5c3e\u90e8\u91c7\u6837\u53ef\u80fd\u96be\u4ee5\u5b9e\u73b0\u3002\u6839\u636e\u60a8\u53ef\u7528\u7684\u91c7\u6837\u6280\u672f\u7c7b\u578b\uff0c\u5b83\u5e76\u4e0d\u603b\u662f\u201c\u4e00\u52b3\u6c38\u9038\u201d\u7684\u4e8b\u60c5\u3002\u968f\u7740\u7cfb\u7edf\u7684\u53d8\u5316\uff0c\u60a8\u7684\u91c7\u6837\u7b56\u7565\u4e5f\u4f1a\u53d1\u751f\u53d8\u5316\u3002\u5bf9\u4e8e\u5927\u578b\u800c\u590d\u6742\u7684\u5206\u5e03\u5f0f\u7cfb\u7edf\uff0c\u5b9e\u73b0\u91c7\u6837\u7b56\u7565\u7684\u89c4\u5219\u4e5f\u53ef\u4ee5\u662f\u5e9e\u5927\u800c\u590d\u6742\u7684\u3002
                                                  • \u5982\u4eca\uff0c\u5c3e\u90e8\u91c7\u6837\u5668\u901a\u5e38\u6700\u7ec8\u5c5e\u4e8e\u4f9b\u5e94\u5546\u7279\u5b9a\u6280\u672f\u9886\u57df\u3002\u5982\u679c\u60a8\u4f7f\u7528\u4ed8\u8d39\u4f9b\u5e94\u5546\u6765\u5b9e\u73b0\u53ef\u89c2\u6d4b\u6027\uff0c\u5219\u53ef\u7528\u7684\u6700\u6709\u6548\u7684\u5c3e\u90e8\u91c7\u6837\u9009\u9879\u53ef\u80fd\u4ec5\u9650\u4e8e\u4f9b\u5e94\u5546\u63d0\u4f9b\u7684\u5185\u5bb9\u3002

                                                  \u6700\u540e\uff0c\u5bf9\u4e8e\u67d0\u4e9b\u7cfb\u7edf\uff0c\u5c3e\u90e8\u91c7\u6837\u53ef\u4ee5\u4e0e\u5934\u90e8\u91c7\u6837\u7ed3\u5408\u4f7f\u7528\u3002\u4f8b\u5982\uff0c\u4e00\u7ec4\u751f\u6210\u5927\u91cf Trace \u6570\u636e\u7684\u670d\u52a1\u53ef\u80fd\u9996\u5148\u4f7f\u7528\u5934\u90e8\u91c7\u6837\u4ec5\u5bf9\u4e00\u5c0f\u90e8\u5206\u8ddf\u8e2a\u8fdb\u884c\u91c7\u6837\uff0c\u7136\u540e\u5728\u9065\u6d4b\u7ba1\u9053\u4e2d\u7a0d\u540e\u4f7f\u7528\u5c3e\u90e8\u91c7\u6837\u5728\u5bfc\u51fa\u5230\u540e\u7aef\u4e4b\u524d\u505a\u51fa\u66f4\u590d\u6742\u7684\u91c7\u6837\u51b3\u7b56\u3002\u8fd9\u6837\u505a\u901a\u5e38\u662f\u4e3a\u4e86\u4fdd\u62a4\u9065\u6d4b\u7ba1\u9053\u514d\u4e8e\u8fc7\u8f7d\u3002

                                                  AI \u7b97\u529b\u4e2d\u5fc3 Insight \u76ee\u524d\u63a8\u8350\u4f7f\u7528\u5c3e\u90e8\u91c7\u6837\u5e76\u4f18\u5148\u652f\u6301\u5c3e\u90e8\u91c7\u6837\u3002

                                                  \u5c3e\u90e8\u91c7\u6837\u5904\u7406\u5668\u6839\u636e\u4e00\u7ec4\u5b9a\u4e49\u7684\u7b56\u7565\u5bf9\u94fe\u8def\u8fdb\u884c\u91c7\u6837\u3002\u4f46\u662f\uff0c\u94fe\u8def\u7684\u6240\u6709\u8de8\u5ea6\uff08Span\uff09\u5fc5\u987b\u7531\u540c\u4e00\u6536\u96c6\u5668\u5b9e\u4f8b\u63a5\u6536\uff0c\u4ee5\u505a\u51fa\u6709\u6548\u7684\u91c7\u6837\u51b3\u7b56\u3002

                                                  \u56e0\u6b64\uff0c\u9700\u8981\u5bf9 Insight \u7684 Global Opentelemetry Collector \u67b6\u6784\u8fdb\u884c\u8c03\u6574\u4ee5\u5b9e\u73b0\u5c3e\u90e8\u91c7\u6837\u7b56\u7565\u3002

                                                  "},{"location":"admin/insight/best-practice/tail-based-sampling.html#insight","title":"Insight \u5177\u4f53\u6539\u52a8","text":"

                                                  \u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u7684 insight-opentelemetry-collector \u524d\u9762\u5f15\u5165\u5177\u6709\u8d1f\u8f7d\u5747\u8861\u80fd\u529b\u7684 Opentelemetry Collector Gateway \u7ec4\u4ef6\uff0c\u4f7f\u5f97\u540c\u4e00\u7ec4 Trace \u80fd\u591f\u6839\u636e TraceID \u8def\u7531\u5230\u540c\u4e00\u4e2a Opentelemetry Collector \u5b9e\u4f8b\u3002

                                                  1. \u90e8\u7f72\u5177\u6709\u8d1f\u8f7d\u5747\u8861\u80fd\u529b\u7684 OTEL COL Gateway \u7ec4\u4ef6

                                                    \u5982\u679c\u60a8\u4f7f\u7528\u4e86 Insight 0.25.x \u7248\u672c\uff0c\u53ef\u4ee5\u901a\u8fc7\u5982\u4e0b Helm Upgrade \u53c2\u6570 --set opentelemetry-collector-gateway.enabled=true \u5feb\u901f\u5f00\u542f\uff0c\u4ee5\u6b64\u8df3\u8fc7\u5982\u4e0b\u90e8\u7f72\u8fc7\u7a0b\u3002

                                                    \u53c2\u7167\u4ee5\u4e0b YAML \u914d\u7f6e\u6765\u90e8\u7f72\u3002

                                                    \u70b9\u51fb\u67e5\u770b\u90e8\u7f72\u914d\u7f6e
                                                    kind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: insight-otel-collector-gateway\nrules:\n- apiGroups: [\"\"]\n  resources: [\"endpoints\"]\n  verbs: [\"get\", \"watch\", \"list\"]\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: insight-otel-collector-gateway\n  namespace: insight-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: insight-otel-collector-gateway\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: insight-otel-collector-gateway\nsubjects:\n- kind: ServiceAccount\n  name: insight-otel-collector-gateway\n  namespace: insight-system\n---\nkind: ConfigMap\nmetadata:\n  labels:\n    app.kubernetes.io/component: opentelemetry-collector\n    app.kubernetes.io/instance: insight-otel-collector-gateway\n    app.kubernetes.io/name: insight-otel-collector-gateway\n  name: insight-otel-collector-gateway-collector\n  namespace: insight-system\napiVersion: v1\ndata:\n  collector.yaml: |\n    receivers:\n      otlp:\n        protocols:\n          grpc:\n          http:\n      jaeger:\n        protocols:\n          grpc:\n    processors:\n\n    extensions:\n      health_check:\n      pprof:\n        endpoint: :1888\n      zpages:\n        endpoint: :55679\n    exporters:\n      logging:\n      loadbalancing:\n        routing_key: \"traceID\"\n        protocol:\n          otlp:\n            # all options from the OTLP exporter are supported\n            # except the endpoint\n            timeout: 1s\n            tls:\n              insecure: true\n        resolver:\n          k8s:\n            service: insight-opentelemetry-collector\n            ports:\n              - 4317\n    service:\n      extensions: [pprof, zpages, health_check]\n      pipelines:\n        traces:\n          receivers: [otlp, jaeger]\n          exporters: [loadbalancing]\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app.kubernetes.io/component: opentelemetry-collector\n    app.kubernetes.io/instance: insight-otel-collector-gateway\n    app.kubernetes.io/name: insight-otel-collector-gateway\n  name: insight-otel-collector-gateway\n  namespace: insight-system\nspec:\n  replicas: 2\n  selector:\n    matchLabels:\n      app.kubernetes.io/component: opentelemetry-collector\n      app.kubernetes.io/instance: insight-otel-collector-gateway\n      app.kubernetes.io/name: insight-otel-collector-gateway\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/component: opentelemetry-collector\n        app.kubernetes.io/instance: insight-otel-collector-gateway\n        app.kubernetes.io/name: insight-otel-collector-gateway\n    spec:\n      containers:\n      - args:\n        - --config=/conf/collector.yaml\n        env:\n        - name: POD_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.name\n        image: ghcr.m.daocloud.io/openinsight-proj/opentelemetry-collector-contrib:5baef686672cfe5551e03b5c19d3072c432b6f33\n        imagePullPolicy: IfNotPresent\n        livenessProbe:\n          failureThreshold: 3\n          httpGet:\n            path: /\n            port: 13133\n            scheme: HTTP\n          periodSeconds: 10\n          successThreshold: 1\n          timeoutSeconds: 1\n        name: otc-container\n        resources:\n          limits:\n            cpu: '1'\n            memory: 2Gi\n          requests:\n            cpu: 100m\n            memory: 400Mi\n        ports:\n        - containerPort: 14250\n          name: jaeger-grpc\n          protocol: TCP\n        - containerPort: 8888\n          name: metrics\n          protocol: TCP\n        - containerPort: 4317\n          name: otlp-grpc\n          protocol: TCP\n        - containerPort: 4318\n          name: otlp-http\n          protocol: TCP\n        - containerPort: 55679\n          name: zpages\n          protocol: TCP\n\n        volumeMounts:\n        - mountPath: /conf\n          name: otc-internal\n\n      serviceAccount: insight-otel-collector-gateway\n      serviceAccountName: insight-otel-collector-gateway\n      volumes:\n      - configMap:\n          defaultMode: 420\n          items:\n          - key: collector.yaml\n            path: collector.yaml\n          name: insight-otel-collector-gateway-collector\n        name: otc-internal\n---\nkind: Service\napiVersion: v1\nmetadata:\n  name: insight-opentelemetry-collector-gateway\n  namespace: insight-system\n  labels:\n    app.kubernetes.io/component: opentelemetry-collector\n    app.kubernetes.io/instance: insight-otel-collector-gateway\n    app.kubernetes.io/name: insight-otel-collector-gateway\nspec:\n  ports:\n    - name: fluentforward\n      protocol: TCP\n      port: 8006\n      targetPort: 8006\n    - name: jaeger-compact\n      protocol: UDP\n      port: 6831\n      targetPort: 6831\n    - name: jaeger-grpc\n      protocol: TCP\n      port: 14250\n      targetPort: 14250\n    - name: jaeger-thrift\n      protocol: TCP\n      port: 14268\n      targetPort: 14268\n    - name: metrics\n      protocol: TCP\n      port: 8888\n      targetPort: 8888\n    - name: otlp\n      protocol: TCP\n      appProtocol: grpc\n      port: 4317\n      targetPort: 4317\n    - name: otlp-http\n      protocol: TCP\n      port: 4318\n      targetPort: 4318\n    - name: zipkin\n      protocol: TCP\n      port: 9411\n      targetPort: 9411\n    - name: zpages\n      protocol: TCP\n      port: 55679\n      targetPort: 55679\n  selector:\n    app.kubernetes.io/component: opentelemetry-collector\n    app.kubernetes.io/instance: insight-otel-collector-gateway\n    app.kubernetes.io/name: insight-otel-collector-gateway\n
                                                  2. \u914d\u7f6e\u5c3e\u90e8\u91c7\u6837\u89c4\u5219

                                                    Note

                                                    \u9700\u8981\u5728\u539f\u672c insight-otel-collector-config configmap \u914d\u7f6e\u7ec4\u4e2d\u589e\u52a0\u5c3e\u90e8\u91c7\u6837\uff08tail_sampling processors\uff09\u7684\u89c4\u5219\u3002

                                                  3. \u5728 processor \u4e2d\u589e\u52a0\u5982\u4e0b\u5185\u5bb9\uff0c\u5177\u4f53\u89c4\u5219\u53ef\u8c03\u6574\uff1b\u53c2\u8003 OTel \u5b98\u65b9\u793a\u4f8b\u3002

                                                    ........\ntail_sampling:\n  decision_wait: 10s # \u7b49\u5f85 10 \u79d2\uff0c\u8d85\u8fc7 10 \u79d2\u540e\u7684 traceid \u5c06\u4e0d\u518d\u5904\u7406\n  num_traces: 1500000  # \u5185\u5b58\u4e2d\u4fdd\u5b58\u7684 trace \u6570\uff0c\u5047\u8bbe\u6bcf\u79d2 1000 \u6761 trace\uff0c\u6700\u5c0f\u4e0d\u4f4e\u4e8e 1000 * decision_wait * 2\uff1b\n                       # \u8bbe\u7f6e\u8fc7\u5927\u4f1a\u5360\u7528\u8fc7\u591a\u7684\u5185\u5b58\u8d44\u6e90\uff0c\u8fc7\u5c0f\u4f1a\u5bfc\u81f4\u90e8\u5206 trace \u88ab drop \u6389\n  expected_new_traces_per_sec: 10\n  policies: # \u4e0a\u62a5\u7b56\u7565\n    [\n        {\n          name: latency-policy,\n          type: latency,  # \u8017\u65f6\u8d85\u8fc7 500ms \u4e0a\u62a5\n          latency: {threshold_ms: 500}\n        },\n        {\n          name: status_code-policy,\n          type: status_code,  # \u72b6\u6001\u7801\u4e3a ERROR \u7684\u4e0a\u62a5\n          status_code: {status_codes: [ ERROR ]}\n        }\n    ]\n......\ntail_sampling: # \u7ec4\u5408\u91c7\u6837\n  decision_wait: 10s # \u7b49\u5f85 10 \u79d2\uff0c\u8d85\u8fc7 10 \u79d2\u540e\u7684 traceid \u5c06\u4e0d\u518d\u5904\u7406\n  num_traces: 1500000  # \u5185\u5b58\u4e2d\u4fdd\u5b58\u7684 trace \u6570\uff0c\u5047\u8bbe\u6bcf\u79d2 1000 \u6761 trace\uff0c\u6700\u5c0f\u4e0d\u4f4e\u4e8e 1000 * decision_wait * 2\uff1b\n                       # \u8bbe\u7f6e\u8fc7\u5927\u4f1a\u5360\u7528\u8fc7\u591a\u7684\u5185\u5b58\u8d44\u6e90\uff0c\u8fc7\u5c0f\u4f1a\u5bfc\u81f4\u90e8\u5206 trace \u88ab drop \u6389\n  expected_new_traces_per_sec: 10\n  policies: [\n      {\n        name: debug-worker-cluster-sample-policy,\n        type: and,\n        and:\n          {\n            and_sub_policy:\n              [\n                {\n                  name: service-name-policy,\n                  type: string_attribute,\n                  string_attribute:\n                    { key: k8s.cluster.id, values: [xxxxxxx] },\n                },\n                {\n                  name: trace-status-policy,\n                  type: status_code,\n                  status_code: { status_codes: [ERROR] },\n                },\n                {\n                  name: probabilistic-policy,\n                  type: probabilistic,\n                  probabilistic: { sampling_percentage: 1 },\n                }\n              ]\n          }\n      }\n    ]\n
                                                  4. \u5728 insight-otel-collector-config configmap \u4e2d\u7684 otel col pipeline \u4e2d\u6fc0\u6d3b\u8be5 processor\uff1a

                                                    traces:\n  exporters:\n    - servicegraph\n    - otlp/jaeger\n  processors:\n    - memory_limiter\n    - tail_sampling # \ud83d\udc48\n    - batch\n  receivers:\n    - otlp\n
                                                  5. \u91cd\u542f insight-opentelemetry-collector \u7ec4\u4ef6\u3002

                                                  6. \u90e8\u7f72\u6216\u66f4\u65b0 Insight-agent\uff0c\u5c06\u94fe\u8def\u6570\u636e\u7684\u4e0a\u62a5\u5730\u5740\u4fee\u6539\u4e3a opentelemetry-collector-gateway LB \u7684 4317 \u7aef\u53e3\u5730\u5740\u3002

                                                    ....\n    exporters:\n      otlp/global:\n        endpoint: insight-opentelemetry-collector-gateway.insight-system.svc.cluster.local:4317  # \ud83d\udc48 \u4fee\u6539\u4e3a gateway/lb \u5730\u5740\n
                                                  "},{"location":"admin/insight/best-practice/tail-based-sampling.html#_3","title":"\u53c2\u8003","text":"
                                                  • sampling
                                                  "},{"location":"admin/insight/collection-manag/agent-status.html","title":"insight-agent \u7ec4\u4ef6\u72b6\u6001\u8bf4\u660e","text":"

                                                  \u5728 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u53ef\u89c2\u6d4b\u6027 Insight \u4f5c\u4e3a\u591a\u96c6\u7fa4\u89c2\u6d4b\u4ea7\u54c1\uff0c\u4e3a\u4e86\u5b9e\u73b0\u591a\u96c6\u7fa4\u89c2\u6d4b\u6570\u636e\u7684\u7edf\u4e00\u91c7\u96c6\uff0c\u9700\u8981\u7528\u6237\u5b89\u88c5 Helm \u5e94\u7528 insight-agent \uff08\u9ed8\u8ba4\u5b89\u88c5\u5728 insight-system \u547d\u540d\u7a7a\u95f4\uff09\u3002\u53c2\u9605\u5982\u4f55\u5b89\u88c5 insight-agent \u3002

                                                  "},{"location":"admin/insight/collection-manag/agent-status.html#_1","title":"\u72b6\u6001\u8bf4\u660e","text":"

                                                  \u5728 \u53ef\u89c2\u6d4b\u6027 -> \u91c7\u96c6\u7ba1\u7406 \u90e8\u5206\u53ef\u67e5\u770b\u5404\u96c6\u7fa4\u5b89\u88c5 insight-agent \u7684\u60c5\u51b5\u3002

                                                  • \u672a\u5b89\u88c5 \uff1a\u8be5\u96c6\u7fa4\u4e2d\u672a\u5728 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\u5b89\u88c5 insight-agent
                                                  • \u8fd0\u884c\u4e2d \uff1a\u8be5\u96c6\u7fa4\u4e2d\u6210\u529f\u5b89\u88c5 insight-agent \uff0c\u4e14\u90e8\u7f72\u7684\u6240\u6709\u7ec4\u4ef6\u5747\u5904\u4e8e\u8fd0\u884c\u4e2d\u72b6\u6001
                                                  • \u5f02\u5e38 \uff1a\u82e5 insight-agent \u5904\u4e8e\u6b64\u72b6\u6001\uff0c\u8bf4\u660e helm \u90e8\u7f72\u5931\u8d25\u6216\u5b58\u5728\u90e8\u7f72\u7684\u7ec4\u4ef6\u5904\u4e8e\u975e\u8fd0\u884c\u4e2d\u72b6\u6001

                                                  \u53ef\u901a\u8fc7\u4ee5\u4e0b\u65b9\u5f0f\u6392\u67e5\uff1a

                                                  1. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff0c\u82e5\u72b6\u6001\u4e3a deployed \uff0c\u5219\u6267\u884c\u4e0b\u4e00\u6b65\u3002\u82e5\u4e3a failed \uff0c\u7531\u4e8e\u4f1a\u5f71\u54cd\u5e94\u7528\u7684\u5347\u7ea7\uff0c\u5efa\u8bae\u5728 \u5bb9\u5668\u7ba1\u7406 -> helm \u5e94\u7528 \u5378\u8f7d\u540e\u91cd\u65b0\u5b89\u88c5 :

                                                    helm list -n insight-system\n
                                                  2. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u6216\u5728 \u53ef\u89c2\u6d4b\u6027 -> \u91c7\u96c6\u7ba1\u7406 \u4e2d\u67e5\u770b\u8be5\u96c6\u7fa4\u90e8\u7f72\u7684\u7ec4\u4ef6\u7684\u72b6\u6001\uff0c\u82e5\u5b58\u5728\u975e \u8fd0\u884c\u4e2d \u72b6\u6001\u7684\u5bb9\u5668\u7ec4\uff0c\u8bf7\u91cd\u542f\u5f02\u5e38\u7684\u5bb9\u5668\u7ec4\u3002

                                                    kubectl get pods -n insight-system\n
                                                  "},{"location":"admin/insight/collection-manag/agent-status.html#_2","title":"\u8865\u5145\u8bf4\u660e","text":"
                                                  1. insight-agent \u4e2d\u6307\u6807\u91c7\u96c6\u7ec4\u4ef6 Prometheus \u7684\u8d44\u6e90\u6d88\u8017\u4e0e\u96c6\u7fa4\u4e2d\u8fd0\u884c\u7684\u5bb9\u5668\u7ec4\u6570\u91cf\u5b58\u5728\u6b63\u6bd4\u5173\u7cfb\uff0c \u8bf7\u6839\u636e\u96c6\u7fa4\u89c4\u6a21\u8c03\u6574 Prometheus \u7684\u8d44\u6e90\uff0c\u8bf7\u53c2\u8003\uff1aPrometheus \u8d44\u6e90\u89c4\u5212

                                                  2. \u7531\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u6307\u6807\u5b58\u50a8\u7ec4\u4ef6 vmstorage \u7684\u5b58\u50a8\u5bb9\u91cf\u4e0e\u5404\u4e2a\u96c6\u7fa4\u5bb9\u5668\u7ec4\u6570\u91cf\u603b\u548c\u5b58\u5728\u6b63\u6bd4\u5173\u7cfb\u3002

                                                    • \u8bf7\u8054\u7cfb\u5e73\u53f0\u7ba1\u7406\u5458\u6839\u636e\u96c6\u7fa4\u89c4\u6a21\u8c03\u6574 vmstorage \u7684\u78c1\u76d8\u5bb9\u91cf\uff0c\u53c2\u9605 vmstorage \u78c1\u76d8\u5bb9\u91cf\u89c4\u5212
                                                    • \u6839\u636e\u591a\u96c6\u7fa4\u89c4\u6a21\u8c03\u6574 vmstorage \u78c1\u76d8\uff0c\u53c2\u9605 vmstorge \u78c1\u76d8\u6269\u5bb9
                                                  "},{"location":"admin/insight/collection-manag/collection-manag.html","title":"\u91c7\u96c6\u7ba1\u7406","text":"

                                                  \u91c7\u96c6\u7ba1\u7406 \u4e3b\u8981\u662f\u96c6\u4e2d\u7ba1\u7406\u3001\u5c55\u793a\u96c6\u7fa4\u5b89\u88c5\u91c7\u96c6\u63d2\u4ef6 insight-agent \u7684\u5165\u53e3\uff0c\u5e2e\u52a9\u7528\u6237\u5feb\u901f\u7684\u67e5\u770b\u96c6\u7fa4\u91c7\u96c6\u63d2\u4ef6\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u5e76\u63d0\u4f9b\u4e86\u5feb\u6377\u5165\u53e3\u914d\u7f6e\u91c7\u96c6\u89c4\u5219\u3002

                                                  \u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                  1. \u70b9\u51fb\u5de6\u4e0a\u89d2\u7684\uff0c\u9009\u62e9 \u53ef\u89c2\u6d4b\u6027 \u3002

                                                  2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u91c7\u96c6\u7ba1\u7406 \uff0c\u67e5\u770b\u5168\u90e8\u96c6\u7fa4\u91c7\u96c6\u63d2\u4ef6\u7684\u72b6\u6001\u3002

                                                  3. \u96c6\u7fa4\u63a5\u5165 insight-agent \u4e14\u5904\u4e8e\u8fd0\u884c\u4e2d\u72b6\u6001\u65f6\uff0c\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u540d\u79f0\u8fdb\u5165\u8be6\u60c5\u3002

                                                  4. \u5728 \u670d\u52a1\u76d1\u63a7 \u9875\u7b7e\u4e2d\uff0c\u70b9\u51fb\u5feb\u6377\u94fe\u63a5\u8df3\u8f6c\u5230 \u5bb9\u5668\u7ba1\u7406 -> \u81ea\u5b9a\u4e49\u8d44\u6e90 \u6dfb\u52a0\u670d\u52a1\u53d1\u73b0\u89c4\u5219\u3002

                                                  "},{"location":"admin/insight/collection-manag/metric-collect.html","title":"\u6307\u6807\u6293\u53d6\u65b9\u5f0f","text":"

                                                  Prometheus \u4e3b\u8981\u901a\u8fc7 Pull \u7684\u65b9\u5f0f\u6765\u6293\u53d6\u76ee\u6807\u670d\u52a1\u66b4\u9732\u51fa\u6765\u7684\u76d1\u63a7\u63a5\u53e3\uff0c\u56e0\u6b64\u9700\u8981\u914d\u7f6e\u5bf9\u5e94\u7684\u6293\u53d6\u4efb\u52a1\u6765\u8bf7\u6c42\u76d1\u63a7\u6570\u636e\u5e76\u5199\u5165\u5230 Prometheus \u63d0\u4f9b\u7684\u5b58\u50a8\u4e2d\uff0c\u76ee\u524d Prometheus \u670d\u52a1\u63d0\u4f9b\u4e86\u5982\u4e0b\u51e0\u4e2a\u4efb\u52a1\u7684\u914d\u7f6e\uff1a

                                                  • \u539f\u751f Job \u914d\u7f6e\uff1a\u63d0\u4f9b Prometheus \u539f\u751f\u6293\u53d6 Job \u7684\u914d\u7f6e\u3002
                                                  • Pod Monitor\uff1a\u5728 K8S \u751f\u6001\u4e0b\uff0c\u57fa\u4e8e Prometheus Operator \u6765\u6293\u53d6 Pod \u4e0a\u5bf9\u5e94\u7684\u76d1\u63a7\u6570\u636e\u3002
                                                  • Service Monitor\uff1a\u5728 K8S \u751f\u6001\u4e0b\uff0c\u57fa\u4e8e Prometheus Operator \u6765\u6293\u53d6 Service \u5bf9\u5e94 Endpoints \u4e0a\u7684\u76d1\u63a7\u6570\u636e\u3002

                                                  Note

                                                  [ ] \u4e2d\u7684\u914d\u7f6e\u9879\u4e3a\u53ef\u9009\u3002

                                                  "},{"location":"admin/insight/collection-manag/metric-collect.html#job","title":"\u539f\u751f Job \u914d\u7f6e","text":"

                                                  \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

                                                  # \u6293\u53d6\u4efb\u52a1\u540d\u79f0\uff0c\u540c\u65f6\u4f1a\u5728\u5bf9\u5e94\u6293\u53d6\u7684\u6307\u6807\u4e2d\u52a0\u4e86\u4e00\u4e2a label(job=job_name)\njob_name: <job_name>\n\n# \u6293\u53d6\u4efb\u52a1\u65f6\u95f4\u95f4\u9694\n[ scrape_interval: <duration> | default = <global_config.scrape_interval> ]\n\n# \u6293\u53d6\u8bf7\u6c42\u8d85\u65f6\u65f6\u95f4\n[ scrape_timeout: <duration> | default = <global_config.scrape_timeout> ]\n\n# \u6293\u53d6\u4efb\u52a1\u8bf7\u6c42 URI \u8def\u5f84\n[ metrics_path: <path> | default = /metrics ]\n\n# \u89e3\u51b3\u5f53\u6293\u53d6\u7684 label \u4e0e\u540e\u7aef Prometheus \u6dfb\u52a0 label \u51b2\u7a81\u65f6\u7684\u5904\u7406\u3002\n# true: \u4fdd\u7559\u6293\u53d6\u5230\u7684 label\uff0c\u5ffd\u7565\u4e0e\u540e\u7aef Prometheus \u51b2\u7a81\u7684 label\uff1b\n# false: \u5bf9\u51b2\u7a81\u7684 label\uff0c\u628a\u6293\u53d6\u7684 label \u524d\u52a0\u4e0a exported_<original-label>\uff0c\u6dfb\u52a0\u540e\u7aef Prometheus \u589e\u52a0\u7684 label\uff1b\n[ honor_labels: <boolean> | default = false ]\n\n# \u662f\u5426\u4f7f\u7528\u6293\u53d6\u5230 target \u4e0a\u4ea7\u751f\u7684\u65f6\u95f4\u3002\n# true: \u5982\u679c target \u4e2d\u6709\u65f6\u95f4\uff0c\u4f7f\u7528 target \u4e0a\u7684\u65f6\u95f4\uff1b\n# false: \u76f4\u63a5\u5ffd\u7565 target \u4e0a\u7684\u65f6\u95f4\uff1b\n[ honor_timestamps: <boolean> | default = true ]\n\n# \u6293\u53d6\u534f\u8bae: http \u6216\u8005 https\n[ scheme: <scheme> | default = http ]\n\n# \u6293\u53d6\u8bf7\u6c42\u5bf9\u5e94 URL \u53c2\u6570\nparams:\n  [ <string>: [<string>, ...] ]\n\n# \u901a\u8fc7 basic auth \u8bbe\u7f6e\u6293\u53d6\u8bf7\u6c42\u5934\u4e2d `Authorization` \u7684\u503c\uff0cpassword/password_file \u4e92\u65a5\uff0c\u4f18\u5148\u53d6 password_file \u91cc\u9762\u7684\u503c\u3002\nbasic_auth:\n  [ username: <string> ]\n  [ password: <secret> ]\n  [ password_file: <string> ]\n\n# \u901a\u8fc7 bearer token \u8bbe\u7f6e\u6293\u53d6\u8bf7\u6c42\u5934\u4e2d `Authorization` bearer_token/bearer_token_file \u4e92\u65a5\uff0c\u4f18\u5148\u53d6 bearer_token \u91cc\u9762\u7684\u503c\u3002\n[ bearer_token: <secret> ]\n\n# \u901a\u8fc7 bearer token \u8bbe\u7f6e\u6293\u53d6\u8bf7\u6c42\u5934\u4e2d `Authorization` bearer_token/bearer_token_file \u4e92\u65a5\uff0c\u4f18\u5148\u53d6 bearer_token \u91cc\u9762\u7684\u503c\u3002\n[ bearer_token_file: <filename> ]\n\n# \u6293\u53d6\u8fde\u63a5\u662f\u5426\u901a\u8fc7 TLS \u5b89\u5168\u901a\u9053\uff0c\u914d\u7f6e\u5bf9\u5e94\u7684 TLS \u53c2\u6570\ntls_config:\n  [ <tls_config> ]\n\n# \u901a\u8fc7\u4ee3\u7406\u670d\u52a1\u6765\u6293\u53d6 target \u4e0a\u7684\u6307\u6807\uff0c\u586b\u5199\u5bf9\u5e94\u7684\u4ee3\u7406\u670d\u52a1\u5730\u5740\u3002\n[ proxy_url: <string> ]\n\n# \u901a\u8fc7\u9759\u6001\u914d\u7f6e\u6765\u6307\u5b9a target\uff0c\u8be6\u89c1\u4e0b\u9762\u7684\u8bf4\u660e\u3002\nstatic_configs:\n  [ - <static_config> ... ]\n\n# CVM \u670d\u52a1\u53d1\u73b0\u914d\u7f6e\uff0c\u8be6\u89c1\u4e0b\u9762\u7684\u8bf4\u660e\u3002\ncvm_sd_configs:\n  [ - <cvm_sd_config> ... ]\n\n# \u5728\u6293\u53d6\u6570\u636e\u4e4b\u540e\uff0c\u628a target \u4e0a\u5bf9\u5e94\u7684 label \u901a\u8fc7 relabel \u7684\u673a\u5236\u8fdb\u884c\u6539\u5199\uff0c\u6309\u987a\u5e8f\u6267\u884c\u591a\u4e2a relabel \u89c4\u5219\u3002\n# relabel_config \u8be6\u89c1\u4e0b\u6587\u8bf4\u660e\u3002\nrelabel_configs:\n  [ - <relabel_config> ... ]\n\n# \u6570\u636e\u6293\u53d6\u5b8c\u6210\u5199\u5165\u4e4b\u524d\uff0c\u901a\u8fc7 relabel \u673a\u5236\u8fdb\u884c\u6539\u5199 label \u5bf9\u5e94\u7684\u503c\uff0c\u6309\u987a\u5e8f\u6267\u884c\u591a\u4e2a relabel \u89c4\u5219\u3002\n# relabel_config \u8be6\u89c1\u4e0b\u6587\u8bf4\u660e\u3002\nmetric_relabel_configs:\n  [ - <relabel_config> ... ]\n\n# \u4e00\u6b21\u6293\u53d6\u6570\u636e\u70b9\u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n[ sample_limit: <int> | default = 0 ]\n\n# \u4e00\u6b21\u6293\u53d6 Target \u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n[ target_limit: <int> | default = 0 ]\n
                                                  "},{"location":"admin/insight/collection-manag/metric-collect.html#pod-monitor","title":"Pod Monitor","text":"

                                                  \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

                                                  # Prometheus Operator CRD \u7248\u672c\napiVersion: monitoring.coreos.com/v1\n# \u5bf9\u5e94 K8S \u7684\u8d44\u6e90\u7c7b\u578b\uff0c\u8fd9\u91cc\u9762 Pod Monitor\nkind: PodMonitor\n# \u5bf9\u5e94 K8S \u7684 Metadata\uff0c\u8fd9\u91cc\u53ea\u7528\u5173\u5fc3 name\uff0c\u5982\u679c\u6ca1\u6709\u6307\u5b9a jobLabel\uff0c\u5bf9\u5e94\u6293\u53d6\u6307\u6807 label \u4e2d job \u7684\u503c\u4e3a <namespace>/<name>\nmetadata:\n  name: redis-exporter # \u586b\u5199\u4e00\u4e2a\u552f\u4e00\u540d\u79f0\n  namespace: cm-prometheus  # namespace \u56fa\u5b9a\uff0c\u4e0d\u9700\u8981\u4fee\u6539\n# \u63cf\u8ff0\u6293\u53d6\u76ee\u6807 Pod \u7684\u9009\u53d6\u53ca\u6293\u53d6\u4efb\u52a1\u7684\u914d\u7f6e\n  label:\n    operator.insight.io/managed-by: insight # Insight \u7ba1\u7406\u7684\u6807\u7b7e\u6807\u8bc6\nspec:\n  # \u586b\u5199\u5bf9\u5e94 Pod \u7684 label\uff0cpod monitor \u4f1a\u53d6\u5bf9\u5e94\u7684\u503c\u4f5c\u4e3a job label \u7684\u503c\u3002\n  # \u5982\u679c\u67e5\u770b\u7684\u662f Pod Yaml\uff0c\u53d6 pod.metadata.labels \u4e2d\u7684\u503c\u3002\n  # \u5982\u679c\u67e5\u770b\u7684\u662f Deployment/Daemonset/Statefulset\uff0c\u53d6 spec.template.metadata.labels\u3002\n  [ jobLabel: string ]\n  # \u628a\u5bf9\u5e94 Pod \u4e0a\u7684 Label \u6dfb\u52a0\u5230 Target \u7684 Label \u4e2d\n  [ podTargetLabels: []string ]\n  # \u4e00\u6b21\u6293\u53d6\u6570\u636e\u70b9\u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n  [ sampleLimit: uint64 ]\n  # \u4e00\u6b21\u6293\u53d6 Target \u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n  [ targetLimit: uint64 ]\n  # \u914d\u7f6e\u9700\u8981\u6293\u53d6\u66b4\u9732\u7684 Prometheus HTTP \u63a5\u53e3\uff0c\u53ef\u4ee5\u914d\u7f6e\u591a\u4e2a Endpoint\n  podMetricsEndpoints:\n  [ - <endpoint_config> ... ] # \u8be6\u89c1\u4e0b\u9762 endpoint \u8bf4\u660e\n  # \u9009\u62e9\u8981\u76d1\u63a7 Pod \u6240\u5728\u7684 namespace\uff0c\u4e0d\u586b\u4e3a\u9009\u53d6\u6240\u6709 namespace\n  [ namespaceSelector: ]\n    # \u662f\u5426\u9009\u53d6\u6240\u6709 namespace\n    [ any: bool ]\n    # \u9700\u8981\u9009\u53d6 namespace \u5217\u8868\n    [ matchNames: []string ]\n  # \u586b\u5199\u8981\u76d1\u63a7 Pod \u7684 Label \u503c\uff0c\u4ee5\u5b9a\u4f4d\u76ee\u6807 Pod [K8S metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)\n  selector:\n    [ matchExpressions: array ]\n      [ example: - {key: tier, operator: In, values: [cache]} ]\n    [ matchLabels: object ]\n      [ example: k8s-app: redis-exporter ]\n
                                                  "},{"location":"admin/insight/collection-manag/metric-collect.html#1","title":"\u4e3e\u4f8b 1","text":"
                                                  apiVersion: monitoring.coreos.com/v1\nkind: PodMonitor\nmetadata:\n  name: redis-exporter # \u586b\u5199\u4e00\u4e2a\u552f\u4e00\u540d\u79f0\n  namespace: cm-prometheus # namespace \u56fa\u5b9a\uff0c\u4e0d\u8981\u4fee\u6539\n  label:\n    operator.insight.io/managed-by: insight  # Insight \u7ba1\u7406\u7684\u6807\u7b7e\u6807\u8bc6\uff0c\u5fc5\u586b\u3002\nspec:\n  podMetricsEndpoints:\n    - interval: 30s\n      port: metric-port # \u586b\u5199 pod yaml \u4e2d Prometheus Exporter \u5bf9\u5e94\u7684 Port \u7684 Name\n      path: /metrics # \u586b\u5199 Prometheus Exporter \u5bf9\u5e94\u7684 Path \u7684\u503c\uff0c\u4e0d\u586b\u9ed8\u8ba4 /metrics\n      relabelings:\n        - action: replace\n          sourceLabels:\n            - instance\n          regex: (.*)\n          targetLabel: instance\n          replacement: \"crs-xxxxxx\" # \u8c03\u6574\u6210\u5bf9\u5e94\u7684 Redis \u5b9e\u4f8b ID\n        - action: replace\n          sourceLabels:\n            - instance\n          regex: (.*)\n          targetLabel: ip\n          replacement: \"1.x.x.x\" # \u8c03\u6574\u6210\u5bf9\u5e94\u7684 Redis \u5b9e\u4f8b IP\n  namespaceSelector: # \u9009\u62e9\u8981\u76d1\u63a7 Pod \u6240\u5728\u7684 namespace\n    matchNames:\n      - redis-test\n  selector: # \u586b\u5199\u8981\u76d1\u63a7 Pod \u7684 Label \u503c\uff0c\u4ee5\u5b9a\u4f4d\u76ee\u6807 pod\n    matchLabels:\n      k8s-app: redis-exporter\n
                                                  "},{"location":"admin/insight/collection-manag/metric-collect.html#2","title":"\u4e3e\u4f8b 2","text":"
                                                  job_name: prometheus\nscrape_interval: 30s\nstatic_configs:\n- targets:\n  - 127.0.0.1:9090\n
                                                  "},{"location":"admin/insight/collection-manag/metric-collect.html#service-monitor","title":"Service Monitor","text":"

                                                  \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

                                                  # Prometheus Operator CRD \u7248\u672c\napiVersion: monitoring.coreos.com/v1\n# \u5bf9\u5e94 K8S \u7684\u8d44\u6e90\u7c7b\u578b\uff0c\u8fd9\u91cc\u9762 Service Monitor\nkind: ServiceMonitor\n# \u5bf9\u5e94 K8S \u7684 Metadata\uff0c\u8fd9\u91cc\u53ea\u7528\u5173\u5fc3 name\uff0c\u5982\u679c\u6ca1\u6709\u6307\u5b9a jobLabel\uff0c\u5bf9\u5e94\u6293\u53d6\u6307\u6807 label \u4e2d job \u7684\u503c\u4e3a Service \u7684\u540d\u79f0\u3002\nmetadata:\n  name: redis-exporter # \u586b\u5199\u4e00\u4e2a\u552f\u4e00\u540d\u79f0\n  namespace: cm-prometheus  # namespace \u56fa\u5b9a\uff0c\u4e0d\u9700\u8981\u4fee\u6539\n# \u63cf\u8ff0\u6293\u53d6\u76ee\u6807 Pod \u7684\u9009\u53d6\u53ca\u6293\u53d6\u4efb\u52a1\u7684\u914d\u7f6e\n  label:\n    operator.insight.io/managed-by: insight  # Insight \u7ba1\u7406\u7684\u6807\u7b7e\u6807\u8bc6\uff0c\u5fc5\u586b\u3002\nspec:\n  # \u586b\u5199\u5bf9\u5e94 Pod \u7684 label(metadata/labels)\uff0cservice monitor \u4f1a\u53d6\u5bf9\u5e94\u7684\u503c\u4f5c\u4e3a job label \u7684\u503c\n  [ jobLabel: string ]\n  # \u628a\u5bf9\u5e94 service \u4e0a\u7684 Label \u6dfb\u52a0\u5230 Target \u7684 Label \u4e2d\n  [ targetLabels: []string ]\n  # \u628a\u5bf9\u5e94 Pod \u4e0a\u7684 Label \u6dfb\u52a0\u5230 Target \u7684 Label \u4e2d\n  [ podTargetLabels: []string ]\n  # \u4e00\u6b21\u6293\u53d6\u6570\u636e\u70b9\u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n  [ sampleLimit: uint64 ]\n  # \u4e00\u6b21\u6293\u53d6 Target \u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n  [ targetLimit: uint64 ]\n  # \u914d\u7f6e\u9700\u8981\u6293\u53d6\u66b4\u9732\u7684 Prometheus HTTP \u63a5\u53e3\uff0c\u53ef\u4ee5\u914d\u7f6e\u591a\u4e2a Endpoint\n  endpoints:\n  [ - <endpoint_config> ... ] # \u8be6\u89c1\u4e0b\u9762 endpoint \u8bf4\u660e\n  # \u9009\u62e9\u8981\u76d1\u63a7 Pod \u6240\u5728\u7684 namespace\uff0c\u4e0d\u586b\u4e3a\u9009\u53d6\u6240\u6709 namespace\n  [ namespaceSelector: ]\n    # \u662f\u5426\u9009\u53d6\u6240\u6709 namespace\n    [ any: bool ]\n    # \u9700\u8981\u9009\u53d6 namespace \u5217\u8868\n    [ matchNames: []string ]\n  # \u586b\u5199\u8981\u76d1\u63a7 Pod \u7684 Label \u503c\uff0c\u4ee5\u5b9a\u4f4d\u76ee\u6807 Pod [K8S metav1.LabelSelector](https://v1-17.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#labelselector-v1-meta)\n  selector:\n    [ matchExpressions: array ]\n      [ example: - {key: tier, operator: In, values: [cache]} ]\n    [ matchLabels: object ]\n      [ example: k8s-app: redis-exporter ]\n
                                                  "},{"location":"admin/insight/collection-manag/metric-collect.html#_2","title":"\u4e3e\u4f8b","text":"
                                                  apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: go-demo # \u586b\u5199\u4e00\u4e2a\u552f\u4e00\u540d\u79f0\n  namespace: cm-prometheus # namespace \u56fa\u5b9a\uff0c\u4e0d\u8981\u4fee\u6539\n  label:\n    operator.insight.io/managed-by: insight  # Insight \u7ba1\u7406\u7684\u6807\u7b7e\u6807\u8bc6\uff0c\u5fc5\u586b\u3002\nspec:\n  endpoints:\n    - interval: 30s\n      # \u586b\u5199 service yaml \u4e2d Prometheus Exporter \u5bf9\u5e94\u7684 Port \u7684 Name\n      port: 8080-8080-tcp\n      # \u586b\u5199 Prometheus Exporter \u5bf9\u5e94\u7684 Path \u7684\u503c\uff0c\u4e0d\u586b\u9ed8\u8ba4 /metrics\n      path: /metrics\n      relabelings:\n        # ** \u5fc5\u987b\u8981\u6709\u4e00\u4e2a label \u4e3a application\uff0c\u8fd9\u91cc\u5047\u8bbe k8s \u6709\u4e00\u4e2a label \u4e3a app\uff0c\n        # \u6211\u4eec\u901a\u8fc7 relabel \u7684 replace \u52a8\u4f5c\u628a\u5b83\u66ff\u6362\u6210\u4e86 application\n        - action: replace\n          sourceLabels: [__meta_kubernetes_pod_label_app]\n          targetLabel: application\n  # \u9009\u62e9\u8981\u76d1\u63a7 service \u6240\u5728\u7684 namespace\n  namespaceSelector:\n    matchNames:\n      - golang-demo\n  # \u586b\u5199\u8981\u76d1\u63a7 service \u7684 Label \u503c\uff0c\u4ee5\u5b9a\u4f4d\u76ee\u6807 service\n  selector:\n    matchLabels:\n      app: golang-app-demo\n
                                                  "},{"location":"admin/insight/collection-manag/metric-collect.html#endpoint_config","title":"endpoint_config","text":"

                                                  \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

                                                  # \u5bf9\u5e94 port \u7684\u540d\u79f0\uff0c\u8fd9\u91cc\u9700\u8981\u6ce8\u610f\u4e0d\u662f\u5bf9\u5e94\u7684\u7aef\u53e3\uff0c\u9ed8\u8ba4\uff1a80\uff0c\u5bf9\u5e94\u7684\u53d6\u503c\u5982\u4e0b\uff1a\n# ServiceMonitor: \u5bf9\u5e94 Service>spec/ports/name;\n# PodMonitor: \u8bf4\u660e\u5982\u4e0b\uff1a\n#   \u5982\u679c\u67e5\u770b\u7684\u662f Pod Yaml\uff0c\u53d6 pod.spec.containers.ports.name \u4e2d\u7684\u503c\u3002\n#   \u5982\u679c\u67e5\u770b\u7684\u662f Deployment/Daemonset/Statefulset\uff0c\u53d6\u503c spec.template.spec.containers.ports.name\n[ port: string | default = 80]\n# \u6293\u53d6\u4efb\u52a1\u8bf7\u6c42 URI \u8def\u5f84\n[ path: string | default = /metrics ]\n# \u6293\u53d6\u534f\u8bae: http \u6216\u8005 https\n[ scheme: string | default = http]\n# \u6293\u53d6\u8bf7\u6c42\u5bf9\u5e94 URL \u53c2\u6570\n[ params: map[string][]string]\n# \u6293\u53d6\u4efb\u52a1\u95f4\u9694\u7684\u65f6\u95f4\n[ interval: string | default = 30s ]\n# \u6293\u53d6\u4efb\u52a1\u8d85\u65f6\n[ scrapeTimeout: string | default = 30s]\n# \u6293\u53d6\u8fde\u63a5\u662f\u5426\u901a\u8fc7 TLS \u5b89\u5168\u901a\u9053\uff0c\u914d\u7f6e\u5bf9\u5e94\u7684 TLS \u53c2\u6570\n[ tlsConfig: TLSConfig ]\n# \u901a\u8fc7\u5bf9\u5e94\u7684\u6587\u4ef6\u8bfb\u53d6 bearer token \u5bf9\u5e94\u7684\u503c\uff0c\u653e\u5230\u6293\u53d6\u4efb\u52a1\u7684 header \u4e2d\n[ bearerTokenFile: string ]\n# \u901a\u8fc7\u5bf9\u5e94\u7684 K8S secret key \u8bfb\u53d6\u5bf9\u5e94\u7684 bearer token\uff0c\u6ce8\u610f secret namespace \u9700\u8981\u548c PodMonitor/ServiceMonitor \u76f8\u540c\n[ bearerTokenSecret: string ]\n# \u89e3\u51b3\u5f53\u6293\u53d6\u7684 label \u4e0e\u540e\u7aef Prometheus \u6dfb\u52a0 label \u51b2\u7a81\u65f6\u7684\u5904\u7406\u3002\n# true: \u4fdd\u7559\u6293\u53d6\u5230\u7684 label\uff0c\u5ffd\u7565\u4e0e\u540e\u7aef Prometheus \u51b2\u7a81\u7684 label\uff1b\n# false: \u5bf9\u51b2\u7a81\u7684 label\uff0c\u628a\u6293\u53d6\u7684 label \u524d\u52a0\u4e0a exported_<original-label>\uff0c\u6dfb\u52a0\u540e\u7aef Prometheus \u589e\u52a0\u7684 label\uff1b\n[ honorLabels: bool | default = false ]\n# \u662f\u5426\u4f7f\u7528\u6293\u53d6\u5230 target \u4e0a\u4ea7\u751f\u7684\u65f6\u95f4\u3002\n# true: \u5982\u679c target \u4e2d\u6709\u65f6\u95f4\uff0c\u4f7f\u7528 target \u4e0a\u7684\u65f6\u95f4\uff1b\n# false: \u76f4\u63a5\u5ffd\u7565 target \u4e0a\u7684\u65f6\u95f4\uff1b\n[ honorTimestamps: bool | default = true ]\n# basic auth \u7684\u8ba4\u8bc1\u4fe1\u606f\uff0cusername/password \u586b\u5199\u5bf9\u5e94 K8S secret key \u7684\u503c\uff0c\u6ce8\u610f secret namespace \u9700\u8981\u548c PodMonitor/ServiceMonitor \u76f8\u540c\u3002\n[ basicAuth: BasicAuth ]\n# \u901a\u8fc7\u4ee3\u7406\u670d\u52a1\u6765\u6293\u53d6 target \u4e0a\u7684\u6307\u6807\uff0c\u586b\u5199\u5bf9\u5e94\u7684\u4ee3\u7406\u670d\u52a1\u5730\u5740\n[ proxyUrl: string ]\n# \u5728\u6293\u53d6\u6570\u636e\u4e4b\u540e\uff0c\u628a target \u4e0a\u5bf9\u5e94\u7684 label \u901a\u8fc7 relabel \u7684\u673a\u5236\u8fdb\u884c\u6539\u5199\uff0c\u6309\u987a\u5e8f\u6267\u884c\u591a\u4e2a relabel \u89c4\u5219\u3002\n# relabel_config \u8be6\u89c1\u4e0b\u6587\u8bf4\u660e\nrelabelings:\n[ - <relabel_config> ...]\n# \u6570\u636e\u6293\u53d6\u5b8c\u6210\u5199\u5165\u4e4b\u524d\uff0c\u901a\u8fc7 relabel \u673a\u5236\u8fdb\u884c\u6539\u5199 label \u5bf9\u5e94\u7684\u503c\uff0c\u6309\u987a\u5e8f\u6267\u884c\u591a\u4e2a relabel \u89c4\u5219\u3002\n# relabel_config \u8be6\u89c1\u4e0b\u6587\u8bf4\u660e\nmetricRelabelings:\n[ - <relabel_config> ...]\n
                                                  "},{"location":"admin/insight/collection-manag/metric-collect.html#relabel_config","title":"relabel_config","text":"

                                                  \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

                                                  # \u4ece\u539f\u59cb labels \u4e2d\u53d6\u54ea\u4e9b label \u7684\u503c\u8fdb\u884c relabel\uff0c\u53d6\u51fa\u6765\u7684\u503c\u901a\u8fc7 separator \u4e2d\u7684\u5b9a\u4e49\u8fdb\u884c\u5b57\u7b26\u62fc\u63a5\u3002\n# \u5982\u679c\u662f PodMonitor/ServiceMonitor \u5bf9\u5e94\u7684\u914d\u7f6e\u9879\u4e3a sourceLabels\n[ source_labels: '[' <labelname> [, ...] ']' ]\n# \u5b9a\u4e49\u9700\u8981 relabel \u7684 label \u503c\u62fc\u63a5\u7684\u5b57\u7b26\uff0c\u9ed8\u8ba4\u4e3a ';'\n[ separator: <string> | default = ; ]\n\n# action \u4e3a replace/hashmod \u65f6\uff0c\u901a\u8fc7 target_label \u6765\u6307\u5b9a\u5bf9\u5e94 label name\u3002\n# \u5982\u679c\u662f PodMonitor/ServiceMonitor \u5bf9\u5e94\u7684\u914d\u7f6e\u9879\u4e3a targetLabel\n[ target_label: <labelname> ]\n\n# \u9700\u8981\u5bf9 source labels \u5bf9\u5e94\u503c\u8fdb\u884c\u6b63\u5219\u5339\u914d\u7684\u8868\u8fbe\u5f0f\n[ regex: <regex> | default = (.*) ]\n\n# action \u4e3a hashmod \u65f6\u7528\u5230\uff0c\u6839\u636e source label \u5bf9\u5e94\u503c md5 \u53d6\u6a21\u503c\n[ modulus: <int> ]\n\n# action \u4e3a replace \u7684\u65f6\u5019\uff0c\u901a\u8fc7 replacement \u6765\u5b9a\u4e49\u5f53 regex \u5339\u914d\u4e4b\u540e\u9700\u8981\u66ff\u6362\u7684\u8868\u8fbe\u5f0f\uff0c\u53ef\u4ee5\u7ed3\u5408 regex \u6b63\u89c4\u5219\u8868\u8fbe\u5f0f\u66ff\u6362\n[ replacement: <string> | default = $1 ]\n\n# \u57fa\u4e8e regex \u5339\u914d\u5230\u7684\u503c\u8fdb\u884c\u76f8\u5173\u7684\u64cd\u4f5c\uff0c\u5bf9\u5e94\u7684 action \u5982\u4e0b\uff0c\u9ed8\u8ba4\u4e3a replace\uff1a\n# replace: \u5982\u679c regex \u5339\u914d\u5230\uff0c\u901a\u8fc7 replacement \u4e2d\u5b9a\u4e49\u7684\u503c\u66ff\u6362\u76f8\u5e94\u7684\u503c\uff0c\u5e76\u901a\u8fc7 target_label \u8bbe\u503c\u5e76\u6dfb\u52a0\u76f8\u5e94\u7684 label\n# keep: \u5982\u679c regex \u6ca1\u6709\u5339\u914d\u5230\uff0c\u4e22\u5f03\n# drop: \u5982\u679c regex \u5339\u914d\u5230\uff0c\u4e22\u5f03\n# hashmod: \u901a\u8fc7 moduels \u6307\u5b9a\u7684\u503c\u628a source label \u5bf9\u5e94\u7684 md5 \u503c\u53d6\u6a21\n# \u5e76\u6dfb\u52a0\u4e00\u4e2a\u65b0\u7684 label\uff0clabel name \u901a\u8fc7 target_label \u6307\u5b9a\n# labelmap: \u5982\u679c regex \u5339\u914d\u5230\uff0c\u4f7f\u7528 replacement \u66ff\u6362\u5bf9\u5c31\u7684 label name\n# labeldrop: \u5982\u679c regex \u5339\u914d\u5230\uff0c\u5220\u9664\u5bf9\u5e94\u7684 label\n# labelkeep: \u5982\u679c regex \u6ca1\u6709\u5339\u914d\u5230\uff0c\u5220\u9664\u5bf9\u5e94\u7684 label\n[ action: <relabel_action> | default = replace ]\n
                                                  "},{"location":"admin/insight/collection-manag/probe-module.html","title":"\u81ea\u5b9a\u4e49\u63a2\u6d4b\u65b9\u5f0f","text":"

                                                  Insight \u4f7f\u7528 Prometheus \u5b98\u65b9\u63d0\u4f9b\u7684 Blackbox Exporter \u4f5c\u4e3a\u9ed1\u76d2\u76d1\u63a7\u89e3\u51b3\u65b9\u6848\uff0c\u53ef\u4ee5\u901a\u8fc7 HTTP\u3001HTTPS\u3001DNS\u3001ICMP\u3001TCP \u548c gRPC \u65b9\u5f0f\u5bf9\u76ee\u6807\u5b9e\u4f8b\u8fdb\u884c\u68c0\u6d4b\u3002\u53ef\u7528\u4e8e\u4ee5\u4e0b\u4f7f\u7528\u573a\u666f\uff1a

                                                  • HTTP/HTTPS\uff1aURL/API\u53ef\u7528\u6027\u68c0\u6d4b
                                                  • ICMP\uff1a\u4e3b\u673a\u5b58\u6d3b\u68c0\u6d4b
                                                  • TCP\uff1a\u7aef\u53e3\u5b58\u6d3b\u68c0\u6d4b
                                                  • DNS\uff1a\u57df\u540d\u89e3\u6790

                                                  \u5728\u672c\u6587\u4e2d\uff0c\u6211\u4eec\u5c06\u4ecb\u7ecd\u5982\u4f55\u5728\u5df2\u6709\u7684 Blackbox ConfigMap \u4e2d\u914d\u7f6e\u81ea\u5b9a\u4e49\u7684\u63a2\u6d4b\u65b9\u5f0f\u3002

                                                  Insight \u9ed8\u8ba4\u672a\u5f00\u542f ICMP \u63a2\u6d4b\u65b9\u5f0f\uff0c\u56e0\u4e3a ICMP \u9700\u8981\u66f4\u9ad8\u6743\u9650\uff0c\u56e0\u6b64\uff0c\u6211\u4eec\u5c06\u4ee5 ICMP \u548c HTTP \u63a2\u6d4b\u65b9\u5f0f\u4f5c\u4e3a\u793a\u4f8b\uff0c\u5c55\u793a\u5982\u4f55\u4fee\u6539 ConfigMap \u4ee5\u5b9e\u73b0\u81ea\u5b9a\u4e49\u7684 ICMP \u548c HTTP \u63a2\u6d4b\u3002

                                                  "},{"location":"admin/insight/collection-manag/probe-module.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb\u8fdb\u5165\u76ee\u6807\u96c6\u7fa4\u7684\u8be6\u60c5\uff1b
                                                  2. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\uff0c\u9009\u62e9 \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u914d\u7f6e\u9879 \uff1b
                                                  3. \u627e\u5230\u540d\u4e3a insight-agent-prometheus-blackbox-exporter \u7684\u914d\u7f6e\u9879\uff0c\u70b9\u51fb \u7f16\u8f91 YAML\uff1b

                                                    \u5728 modules \u4e0b\u6dfb\u52a0\u81ea\u5b9a\u4e49\u63a2\u6d4b\u65b9\u5f0f\uff1a

                                                  HTTP \u63a2\u6d4bICMP \u63a2\u6d4b
                                                  module:\n  http_2xx:\n    prober: http\n    timeout: 5s\n    http:\n      valid_http_versions: [HTTP/1.1, HTTP/2]\n      valid_status_codes: []  # Defaults to 2xx\n      method: GET\n

                                                  module:\n  ICMP: # ICMP \u63a2\u6d4b\u914d\u7f6e\u7684\u793a\u4f8b\n    prober: icmp\n    timeout: 5s\n    icmp:\n      preferred_ip_protocol: ip4\nicmp_example: # ICMP \u63a2\u6d4b\u914d\u7f6e\u7684\u793a\u4f8b 2\n  prober: icmp\n  timeout: 5s\n  icmp:\n    preferred_ip_protocol: \"ip4\"\n    source_ip_address: \"127.0.0.1\"\n
                                                  \u7531\u4e8e ICMP \u9700\u8981\u66f4\u9ad8\u6743\u9650\uff0c\u56e0\u6b64\uff0c\u6211\u4eec\u8fd8\u9700\u8981\u63d0\u5347 Pod \u6743\u9650\uff0c\u5426\u5219\u4f1a\u51fa\u73b0 operation not permitted \u7684\u9519\u8bef\u3002\u6709\u4ee5\u4e0b\u4e24\u79cd\u65b9\u5f0f\u63d0\u5347\u6743\u9650\uff1a

                                                  • \u65b9\u5f0f\u4e00\uff1a \u76f4\u63a5\u7f16\u8f91 BlackBox Exporter \u90e8\u7f72\u6587\u4ef6\u5f00\u542f

                                                    apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: insight-agent-prometheus-blackbox-exporter\n  namespace: insight-system\nspec:\n  template:\n    spec:\n      containers:\n        - name: blackbox-exporter\n          image: # ... (image, args, ports \u7b49\u4fdd\u6301\u4e0d\u53d8)\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            allowPrivilegeEscalation: false\n            capabilities:\n              add:\n              - NET_RAW\n              drop:\n              - ALL\n            readOnlyRootFilesystem: true\n            runAsGroup: 0\n            runAsNonRoot: false\n            runAsUser: 0\n
                                                  • \u65b9\u5f0f\u4e8c\uff1a \u901a\u8fc7 Helm Upgrade \u65b9\u5f0f\u63d0\u6743

                                                    prometheus-blackbox-exporter:\n  enabled: true\n  securityContext:\n    runAsUser: 0\n    runAsGroup: 0\n    readOnlyRootFilesystem: true\n    runAsNonRoot: false\n    allowPrivilegeEscalation: false\n    capabilities:\n      add: [\"NET_RAW\"]\n

                                                  Info

                                                  \u66f4\u591a\u63a2\u6d4b\u65b9\u5f0f\u53ef\u53c2\u8003 blackbox_exporter Configuration\u3002

                                                  "},{"location":"admin/insight/collection-manag/probe-module.html#_3","title":"\u5176\u4ed6\u53c2\u8003","text":"

                                                  \u4ee5\u4e0b YAML \u6587\u4ef6\u4e2d\u5305\u542b\u4e86 HTTP\u3001TCP\u3001SMTP\u3001ICMP\u3001DNS \u7b49\u591a\u79cd\u63a2\u6d4b\u65b9\u5f0f\uff0c\u53ef\u6839\u636e\u9700\u6c42\u81ea\u884c\u4fee\u6539 insight-agent-prometheus-blackbox-exporter \u7684\u914d\u7f6e\u6587\u4ef6\u3002

                                                  \u70b9\u51fb\u67e5\u770b\u5b8c\u6574\u7684 YAML \u6587\u4ef6
                                                  kind: ConfigMap\napiVersion: v1\nmetadata:\n  name: insight-agent-prometheus-blackbox-exporter\n  namespace: insight-system\n  labels:\n    app.kubernetes.io/instance: insight-agent\n    app.kubernetes.io/managed-by: Helm\n    app.kubernetes.io/name: prometheus-blackbox-exporter\n    app.kubernetes.io/version: v0.24.0\n    helm.sh/chart: prometheus-blackbox-exporter-8.8.0\n  annotations:\n    meta.helm.sh/release-name: insight-agent\n    meta.helm.sh/release-namespace: insight-system\ndata:\n  blackbox.yaml: |\n    modules:\n      HTTP_GET:\n        prober: http\n        timeout: 5s\n        http:\n          method: GET\n          valid_http_versions: [\"HTTP/1.1\", \"HTTP/2.0\"]\n          follow_redirects: true\n          preferred_ip_protocol: \"ip4\"\n      HTTP_POST:\n        prober: http\n        timeout: 5s\n        http:\n          method: POST\n          body_size_limit: 1MB\n      TCP:\n        prober: tcp\n        timeout: 5s\n      # \u9ed8\u8ba4\u672a\u5f00\u542f\uff1a\n      # ICMP:\n      #   prober: icmp\n      #   timeout: 5s\n      #   icmp:\n      #     preferred_ip_protocol: ip4\n      SSH:\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n          - expect: \"^SSH-2.0-\"\n      POP3S:\n        prober: tcp\n        tcp:\n          query_response:\n          - expect: \"^+OK\"\n          tls: true\n          tls_config:\n            insecure_skip_verify: false\n      http_2xx_example:               # http \u63a2\u6d4b\u793a\u4f8b\n        prober: http\n        timeout: 5s                   # \u63a2\u6d4b\u7684\u8d85\u65f6\u65f6\u95f4\n        http:\n          valid_http_versions: [\"HTTP/1.1\", \"HTTP/2.0\"]                   # \u8fd4\u56de\u4fe1\u606f\u4e2d\u7684 Version\uff0c\u4e00\u822c\u9ed8\u8ba4\u5373\u53ef\n          valid_status_codes: []  # Defaults to 2xx                       # \u6709\u6548\u7684\u8fd4\u56de\u7801\u8303\u56f4\uff0c\u5982\u679c\u8bf7\u6c42\u7684\u8fd4\u56de\u7801\u5728\u8be5\u8303\u56f4\u5185\uff0c\u89c6\u4e3a\u63a2\u6d4b\u6210\u529f\n          method: GET                 # \u8bf7\u6c42\u65b9\u6cd5\n          headers:                    # \u8bf7\u6c42\u7684\u5934\u90e8\n            Host: vhost.example.com\n            Accept-Language: en-US\n            Origin: example.com\n          no_follow_redirects: false  # \u662f\u5426\u5141\u8bb8\u91cd\u5b9a\u5411\n          fail_if_ssl: false   \n          fail_if_not_ssl: false\n          fail_if_body_matches_regexp:\n            - \"Could not connect to database\"\n          fail_if_body_not_matches_regexp:\n            - \"Download the latest version here\"\n          fail_if_header_matches: # Verifies that no cookies are set\n            - header: Set-Cookie\n              allow_missing: true\n              regexp: '.*'\n          fail_if_header_not_matches:\n            - header: Access-Control-Allow-Origin\n              regexp: '(\\*|example\\.com)'\n          tls_config:                  # \u9488\u5bf9 https \u8bf7\u6c42\u7684 tls \u7684\u914d\u7f6e\n            insecure_skip_verify: false\n          preferred_ip_protocol: \"ip4\" # defaults to \"ip6\"                 # \u9996\u9009\u7684 IP \u534f\u8bae\u7248\u672c\n          ip_protocol_fallback: false  # no fallback to \"ip6\"            \n      http_post_2xx:                   # \u5e26 Body \u7684 http \u63a2\u6d4b\u7684\u793a\u4f8b\n        prober: http\n        timeout: 5s\n        http:\n          method: POST                 # \u63a2\u6d4b\u7684\u8bf7\u6c42\u65b9\u6cd5\n          headers:\n            Content-Type: application/json\n          body: '{\"username\":\"admin\",\"password\":\"123456\"}'                   # \u63a2\u6d4b\u65f6\u643a\u5e26\u7684 body\n      http_basic_auth_example:         # \u5e26\u7528\u6237\u540d\u5bc6\u7801\u7684\u63a2\u6d4b\u7684\u793a\u4f8b\n        prober: http\n        timeout: 5s\n        http:\n          method: POST\n          headers:\n            Host: \"login.example.com\"\n          basic_auth:                  # \u63a2\u6d4b\u65f6\u8981\u52a0\u7684\u7528\u6237\u540d\u5bc6\u7801\n            username: \"username\"\n            password: \"mysecret\"\n      http_custom_ca_example:\n        prober: http\n        http:\n          method: GET\n          tls_config:                  # \u6307\u5b9a\u63a2\u6d4b\u65f6\u4f7f\u7528\u7684\u6839\u8bc1\u4e66\n            ca_file: \"/certs/my_cert.crt\"\n      http_gzip:\n        prober: http\n        http:\n          method: GET\n          compression: gzip            # \u63a2\u6d4b\u65f6\u4f7f\u7528\u7684\u538b\u7f29\u65b9\u6cd5\n      http_gzip_with_accept_encoding:\n        prober: http\n        http:\n          method: GET\n          compression: gzip\n          headers:\n            Accept-Encoding: gzip\n      tls_connect:                     # TCP \u63a2\u6d4b\u7684\u793a\u4f8b\n        prober: tcp\n        timeout: 5s\n        tcp:\n          tls: true                    # \u662f\u5426\u4f7f\u7528 TLS\n      tcp_connect_example:\n        prober: tcp\n        timeout: 5s\n      imap_starttls:                   # \u63a2\u6d4b IMAP \u90ae\u7bb1\u670d\u52a1\u5668\u7684\u914d\u7f6e\u793a\u4f8b\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - expect: \"OK.*STARTTLS\"\n            - send: \". STARTTLS\"\n            - expect: \"OK\"\n            - starttls: true\n            - send: \". capability\"\n            - expect: \"CAPABILITY IMAP4rev1\"\n      smtp_starttls:                   # \u63a2\u6d4b SMTP \u90ae\u7bb1\u670d\u52a1\u5668\u7684\u914d\u7f6e\u793a\u4f8b\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - expect: \"^220 ([^ ]+) ESMTP (.+)$\"\n            - send: \"EHLO prober\\r\"\n            - expect: \"^250-STARTTLS\"\n            - send: \"STARTTLS\\r\"\n            - expect: \"^220\"\n            - starttls: true\n            - send: \"EHLO prober\\r\"\n            - expect: \"^250-AUTH\"\n            - send: \"QUIT\\r\"\n      irc_banner_example:\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - send: \"NICK prober\"\n            - send: \"USER prober prober prober :prober\"\n            - expect: \"PING :([^ ]+)\"\n              send: \"PONG ${1}\"\n            - expect: \"^:[^ ]+ 001\"\n      # icmp_example:                    # ICMP \u63a2\u6d4b\u914d\u7f6e\u7684\u793a\u4f8b\n      #   prober: icmp\n      #   timeout: 5s\n      #   icmp:\n      #     preferred_ip_protocol: \"ip4\"\n      #     source_ip_address: \"127.0.0.1\"\n      dns_udp_example:                 # \u4f7f\u7528 UDP \u8fdb\u884c DNS \u67e5\u8be2\u7684\u793a\u4f8b\n        prober: dns\n        timeout: 5s\n        dns:\n          query_name: \"www.prometheus.io\"                 # \u8981\u89e3\u6790\u7684\u57df\u540d\n          query_type: \"A\"              # \u8be5\u57df\u540d\u5bf9\u5e94\u7684\u7c7b\u578b\n          valid_rcodes:\n          - NOERROR\n          validate_answer_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n            fail_if_all_match_regexp:\n            - \".*127.0.0.1\"\n            fail_if_not_matches_regexp:\n            - \"www.prometheus.io.\\t300\\tIN\\tA\\t127.0.0.1\"\n            fail_if_none_matches_regexp:\n            - \"127.0.0.1\"\n          validate_authority_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n          validate_additional_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n      dns_soa:\n        prober: dns\n        dns:\n          query_name: \"prometheus.io\"\n          query_type: \"SOA\"\n      dns_tcp_example:               # \u4f7f\u7528 TCP \u8fdb\u884c DNS \u67e5\u8be2\u7684\u793a\u4f8b\n        prober: dns\n        dns:\n          transport_protocol: \"tcp\" # defaults to \"udp\"\n          preferred_ip_protocol: \"ip4\" # defaults to \"ip6\"\n          query_name: \"www.prometheus.io\"\n
                                                  "},{"location":"admin/insight/collection-manag/service-monitor.html","title":"\u914d\u7f6e\u670d\u52a1\u53d1\u73b0\u89c4\u5219","text":"

                                                  \u53ef\u89c2\u6d4b Insight \u652f\u6301\u901a\u8fc7 \u5bb9\u5668\u7ba1\u7406 \u521b\u5efa CRD ServiceMonitor \u7684\u65b9\u5f0f\u6765\u6ee1\u8db3\u60a8\u81ea\u5b9a\u4e49\u670d\u52a1\u53d1\u73b0\u7684\u91c7\u96c6\u9700\u6c42\u3002 \u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528 ServiceMonitor \u81ea\u884c\u5b9a\u4e49 Pod \u53d1\u73b0\u7684 Namespace \u8303\u56f4\u4ee5\u53ca\u901a\u8fc7 matchLabel \u6765\u9009\u62e9\u76d1\u542c\u7684 Service\u3002

                                                  "},{"location":"admin/insight/collection-manag/service-monitor.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                  \u96c6\u7fa4\u5df2\u5b89\u88c5 Helm \u5e94\u7528 insight-agent \u4e14\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                                                  "},{"location":"admin/insight/collection-manag/service-monitor.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u91c7\u96c6\u7ba1\u7406 \uff0c\u67e5\u770b\u5168\u90e8\u96c6\u7fa4\u91c7\u96c6\u63d2\u4ef6\u7684\u72b6\u6001\u3002

                                                  2. \u70b9\u51fb\u5217\u8868\u4e2d\u7684\u67d0\u4e2a\u96c6\u7fa4\u540d\u79f0\u8fdb\u5165\u91c7\u96c6\u914d\u7f6e\u8be6\u60c5\u3002

                                                  3. \u70b9\u51fb\u94fe\u63a5\u8df3\u8f6c\u5230 \u5bb9\u5668\u7ba1\u7406 \u4e2d\u521b\u5efa Service Monitor\u3002

                                                    apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: micrometer-demo # (1)\n  namespace: insight-system # (2)\n    labels: \n      operator.insight.io/managed-by: insight\nspec:\n  endpoints: # (3)\n    - honorLabels: true\n        interval: 15s\n        path: /actuator/prometheus\n        port: http\n  namespaceSelector: # (4)\n    matchNames:\n      - insight-system  # (5)\n  selector: # (6)\n    matchLabels:\n          micrometer-prometheus-discovery: \"true\"\n
                                                    1. \u6307\u5b9a ServiceMonitor \u7684\u540d\u79f0
                                                    2. \u6307\u5b9a ServiceMonitor \u7684\u547d\u540d\u7a7a\u95f4
                                                    3. \u8fd9\u662f\u670d\u52a1\u7aef\u70b9\uff0c\u4ee3\u8868 Prometheus \u6240\u9700\u7684\u91c7\u96c6 Metrics \u7684\u5730\u5740\u3002 endpoints \u4e3a\u4e00\u4e2a\u6570\u7ec4\uff0c \u540c\u65f6\u53ef\u4ee5\u521b\u5efa\u591a\u4e2a endpoints \u3002\u6bcf\u4e2a endpoints \u5305\u542b\u4e09\u4e2a\u5b57\u6bb5\uff0c\u6bcf\u4e2a\u5b57\u6bb5\u7684\u542b\u4e49\u5982\u4e0b\uff1a

                                                      • interval \uff1a\u6307\u5b9a Prometheus \u5bf9\u5f53\u524d endpoints \u91c7\u96c6\u7684\u5468\u671f\u3002\u5355\u4f4d\u4e3a\u79d2\uff0c\u5728\u672c\u6b21\u793a\u4f8b\u4e2d\u8bbe\u5b9a\u4e3a 15s \u3002
                                                      • path \uff1a\u6307\u5b9a Prometheus \u7684\u91c7\u96c6\u8def\u5f84\u3002\u5728\u672c\u6b21\u793a\u4f8b\u4e2d\uff0c\u6307\u5b9a\u4e3a /actuator/prometheus \u3002
                                                      • port \uff1a\u6307\u5b9a\u91c7\u96c6\u6570\u636e\u9700\u8981\u901a\u8fc7\u7684\u7aef\u53e3\uff0c\u8bbe\u7f6e\u7684\u7aef\u53e3\u4e3a\u91c7\u96c6\u7684 Service \u7aef\u53e3\u6240\u8bbe\u7f6e\u7684 name \u3002
                                                    4. \u8fd9\u662f\u9700\u8981\u53d1\u73b0\u7684 Service \u7684\u8303\u56f4\u3002 namespaceSelector \u5305\u542b\u4e24\u4e2a\u4e92\u65a5\u5b57\u6bb5\uff0c\u5b57\u6bb5\u7684\u542b\u4e49\u5982\u4e0b\uff1a

                                                      • any \uff1a\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u503c true \uff0c\u5f53\u8be5\u5b57\u6bb5\u88ab\u8bbe\u7f6e\u65f6\uff0c\u5c06\u76d1\u542c\u6240\u6709\u7b26\u5408 Selector \u8fc7\u6ee4\u6761\u4ef6\u7684 Service \u7684\u53d8\u52a8\u3002
                                                      • matchNames \uff1a\u6570\u7ec4\u503c\uff0c\u6307\u5b9a\u9700\u8981\u76d1\u542c\u7684 namespace \u7684\u8303\u56f4\u3002\u4f8b\u5982\uff0c\u53ea\u60f3\u76d1\u542c default \u548c insight-system \u4e24\u4e2a\u547d\u540d\u7a7a\u95f4\u4e2d\u7684 Service\uff0c\u90a3\u4e48 matchNames \u8bbe\u7f6e\u5982\u4e0b\uff1a

                                                        namespaceSelector:\n  matchNames:\n    - default\n    - insight-system\n
                                                    5. \u6b64\u5904\u5339\u914d\u7684\u547d\u540d\u7a7a\u95f4\u4e3a\u9700\u8981\u66b4\u9732\u6307\u6807\u7684\u5e94\u7528\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4

                                                    6. \u7528\u4e8e\u9009\u62e9 Service
                                                  "},{"location":"admin/insight/compati-test/k8s-compatibility.html","title":"Kubernetes \u96c6\u7fa4\u517c\u5bb9\u6027\u6d4b\u8bd5","text":"

                                                  \u2705\uff1a\u6d4b\u8bd5\u901a\u8fc7\uff1b \u274c\uff1a\u6d4b\u8bd5\u672a\u901a\u8fc7\uff1b\u7a7a\uff1a\u672a\u8fdb\u884c\u6d4b\u8bd5\uff1b

                                                  "},{"location":"admin/insight/compati-test/k8s-compatibility.html#insight-server-kubernetes","title":"Insight Server \u7684 Kubernetes \u517c\u5bb9\u6027\u6d4b\u8bd5","text":"\u573a\u666f \u6d4b\u8bd5\u65b9\u5f0f K8s 1.31 K8s 1.30 K8s 1.29 K8s 1.28 K8s 1.27 K8s 1.26 k8s 1.25.0 k8s 1.24 k8s 1.23 k8s 1.22 \u57fa\u7ebf\u573a\u666f E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u6307\u6807\u67e5\u8be2 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u65e5\u5fd7\u67e5\u8be2 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u94fe\u8def\u67e5\u8be2 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u544a\u8b66\u4e2d\u5fc3 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u62d3\u6251\u67e5\u8be2 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705"},{"location":"admin/insight/compati-test/k8s-compatibility.html#insight-agent-kubernetes","title":"Insight Agent \u7684 Kubernetes \u517c\u5bb9\u6027\u6d4b\u8bd5","text":"\u573a\u666f \u6d4b\u8bd5\u65b9\u5f0f K8s 1.31 K8s 1.30 K8s 1.29 K8s 1.28 K8s 1.27 K8s 1.26 k8s 1.25 k8s 1.24 k8s 1.23 k8s 1.22 k8s 1.21 k8s 1.20 k8s 1.19 k8s 1.18 k8s 1.17 k8s 1.16 \u57fa\u7ebf\u573a\u666f E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c \u6307\u6807\u67e5\u8be2 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c \u65e5\u5fd7\u67e5\u8be2 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c \u94fe\u8def\u67e5\u8be2 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c \u544a\u8b66\u4e2d\u5fc3 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c \u62d3\u6251\u67e5\u8be2 E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c

                                                  Note

                                                  Insight Agent \u7248\u672c\u517c\u5bb9\u5386\u53f2\uff1a

                                                  1. Insight Agent \u4ece v0.16.x \u5f00\u59cb\u4e0d\u517c\u5bb9 k8s v1.16.15
                                                  2. Insight Agent v0.20.0 \u517c\u5bb9 k8s v1.18.20
                                                  3. Insight Agent v0.19.2/v0.18.2/v0.17.x \u4e0d\u517c\u5bb9 k8s v1.18.20
                                                  4. Insight Agent v0.30.1 \u4e0d\u517c\u5bb9 k8s v1.18.x \u53ca \u4ee5\u4e0b\u7248\u672c
                                                  "},{"location":"admin/insight/compati-test/ocp-compatibility.html","title":"Openshift 4.x \u96c6\u7fa4\u517c\u5bb9\u6027\u6d4b\u8bd5","text":"

                                                  \u2705\uff1a\u6d4b\u8bd5\u901a\u8fc7\uff1b \u274c\uff1a\u6d4b\u8bd5\u672a\u901a\u8fc7\u3002

                                                  Note

                                                  \u8868\u683c\u4e2d\u7684\u6d4b\u8bd5\u529f\u80fd\u975e\u5168\u91cf\u3002

                                                  case \u6d4b\u8bd5\u65b9\u5f0f ocp4.10(k8s 1.23.0) \u5907\u6ce8 \u91c7\u96c6\u5e76\u67e5\u8be2 web \u5e94\u7528\u7684\u6307\u6807 \u624b\u5de5 \u2705 \u6dfb\u52a0\u81ea\u5b9a\u4e49\u6307\u6807\u91c7\u96c6 \u624b\u5de5 \u2705 \u67e5\u8be2\u5b9e\u65f6\u6307\u6807 \u624b\u5de5 \u2705 \u77ac\u65f6\u6307\u6807\u67e5\u8be2 \u624b\u5de5 \u2705 \u77ac\u65f6\u6307\u6807api\u5b57\u6bb5\u529f\u80fd\u9a8c\u8bc1 \u624b\u5de5 \u2705 \u67e5\u8be2\u4e00\u6bb5\u65f6\u95f4\u5185\u6307\u6807 \u624b\u5de5 \u2705 \u67e5\u8be2\u4e00\u6bb5\u65f6\u95f4\u5185\u6307\u6807 api \u5b57\u6bb5\u529f\u80fd\u9a8c\u8bc1 \u624b\u5de5 \u2705 \u6279\u91cf\u67e5\u8be2\u96c6\u7fa4CPU\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u96c6\u7fa4 CPU \u603b\u91cf\u3001\u96c6\u7fa4\u5185\u5b58\u4f7f\u7528\u91cf\uff0c\u96c6\u7fa4\u8282\u70b9\u603b\u6570 \u624b\u5de5 \u2705 \u6279\u91cf\u67e5\u8be2\u8282\u70b9CPU\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u8282\u70b9 CPU \u603b\u91cf\u3001\u8282\u70b9\u5185\u5b58\u4f7f\u7528\u91cf \u624b\u5de5 \u2705 \u6279\u91cf\u67e5\u8be2\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u96c6\u7fa4CPU\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u96c6\u7fa4 CPU \u603b\u91cf\u3001\u96c6\u7fa4\u5185\u5b58\u4f7f\u7528\u91cf\uff0c\u96c6\u7fa4\u8282\u70b9\u603b\u6570 \u624b\u5de5 \u2705 \u6279\u91cf\u67e5\u8be2\u67e5\u8be2\u4e00\u6bb5\u65f6\u95f4\u5185\u6307\u6807 api \u5b57\u6bb5\u529f\u80fd\u9a8c\u8bc1 \u624b\u5de5 \u2705 \u67e5\u8be2 Pod \u65e5\u5fd7 \u624b\u5de5 \u2705 \u67e5\u8be2 SVC \u65e5\u5fd7 \u624b\u5de5 \u2705 \u67e5\u8be2 statefulset \u65e5\u5fd7 \u624b\u5de5 \u2705 \u67e5\u8be2 Deployment \u65e5\u5fd7 \u624b\u5de5 \u2705 \u67e5\u8be2 NPD \u65e5\u5fd7 \u624b\u5de5 \u2705 \u65e5\u5fd7\u7b5b\u9009 \u624b\u5de5 \u2705 \u65e5\u5fd7\u6a21\u7cca\u67e5\u8be2-workloadSearch \u624b\u5de5 \u2705 \u65e5\u5fd7\u6a21\u7cca\u67e5\u8be2-podSearch \u624b\u5de5 \u2705 \u65e5\u5fd7\u6a21\u7cca\u67e5\u8be2-containerSearch \u624b\u5de5 \u2705 \u65e5\u5fd7\u7cbe\u786e\u67e5\u8be2-cluster \u624b\u5de5 \u2705 \u65e5\u5fd7\u7cbe\u786e\u67e5\u8be2-namespace \u624b\u5de5 \u2705 \u65e5\u5fd7\u67e5\u8be2 api \u5b57\u6bb5\u529f\u80fd\u9a8c\u8bc1 \u624b\u5de5 \u2705 \u544a\u8b66\u89c4\u5219-\u589e\u5220\u6539\u67e5 \u624b\u5de5 \u2705 \u544a\u8b66\u6a21\u677f-\u589e\u5220\u6539\u67e5 \u624b\u5de5 \u2705 \u901a\u77e5\u65b9\u5f0f-\u589e\u5220\u6539\u67e5 \u624b\u5de5 \u2705 \u94fe\u8def\u67e5\u8be2 \u624b\u5de5 \u2705 \u62d3\u6251\u67e5\u8be2 \u624b\u5de5 \u2705"},{"location":"admin/insight/compati-test/rancher-compatibility.html","title":"Rancher \u96c6\u7fa4\u517c\u5bb9\u6027\u6d4b\u8bd5","text":"

                                                  \u2705\uff1a\u6d4b\u8bd5\u901a\u8fc7\uff1b \u274c\uff1a\u6d4b\u8bd5\u672a\u901a\u8fc7\u3002

                                                  Note

                                                  \u8868\u683c\u4e2d\u7684\u6d4b\u8bd5\u529f\u80fd\u975e\u5168\u91cf\u3002

                                                  case \u6d4b\u8bd5\u65b9\u5f0f Rancher rke2c1(k8s 1.24.11) \u5907\u6ce8 \u91c7\u96c6\u5e76\u67e5\u8be2 web \u5e94\u7528\u7684\u6307\u6807 \u624b\u5de5 \u2705 \u6dfb\u52a0\u81ea\u5b9a\u4e49\u6307\u6807\u91c7\u96c6 \u624b\u5de5 \u2705 \u67e5\u8be2\u5b9e\u65f6\u6307\u6807 \u624b\u5de5 \u2705 \u67e5\u8be2\u77ac\u65f6\u6307\u6807 \u624b\u5de5 \u2705 \u9a8c\u8bc1\u67e5\u8be2\u77ac\u65f6\u6307\u6807 API \u63a5\u53e3 \u624b\u5de5 \u2705 \u67e5\u8be2\u4e00\u6bb5\u65f6\u95f4\u5185\u6307\u6807 \u624b\u5de5 \u2705 \u9a8c\u8bc1\u67e5\u8be2\u4e00\u6bb5\u65f6\u95f4\u5185\u6307\u6807 API \u63a5\u53e3 \u624b\u5de5 \u2705 \u6279\u91cf\u67e5\u8be2\u96c6\u7fa4 CPU\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u96c6\u7fa4 CPU \u603b\u91cf\u3001\u96c6\u7fa4\u5185\u5b58\u4f7f\u7528\u91cf\uff0c\u96c6\u7fa4\u8282\u70b9\u603b\u6570 \u624b\u5de5 \u2705 \u6279\u91cf\u67e5\u8be2\u8282\u70b9 CPU\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u8282\u70b9 CPU \u603b\u91cf\u3001\u8282\u70b9\u5185\u5b58\u4f7f\u7528\u91cf \u624b\u5de5 \u2705 \u6279\u91cf\u67e5\u8be2\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u96c6\u7fa4 CPU\u3001\u5185\u5b58\u4f7f\u7528\u7387\u3001\u96c6\u7fa4 CPU \u603b\u91cf\u3001\u96c6\u7fa4\u5185\u5b58\u4f7f\u7528\u91cf\uff0c\u96c6\u7fa4\u8282\u70b9\u603b\u6570 \u624b\u5de5 \u2705 \u9a8c\u8bc1\u6279\u91cf\u67e5\u8be2\u4e00\u6bb5\u65f6\u95f4\u5185\u6307\u6807 API \u63a5\u53e3 \u624b\u5de5 \u2705 \u67e5\u8be2 Pod \u65e5\u5fd7 \u624b\u5de5 \u2705 \u67e5\u8be2 SVC \u65e5\u5fd7 \u624b\u5de5 \u2705 \u67e5\u8be2 statefulset \u65e5\u5fd7 \u624b\u5de5 \u2705 \u67e5\u8be2 Deployment \u65e5\u5fd7 \u624b\u5de5 \u2705 \u67e5\u8be2 NPD \u65e5\u5fd7 \u624b\u5de5 \u2705 \u7b5b\u9009\u65e5\u5fd7 \u624b\u5de5 \u2705 \u6a21\u7cca\u67e5\u8be2\u65e5\u5fd7-workloadSearch \u624b\u5de5 \u2705 \u6a21\u7cca\u67e5\u8be2\u65e5\u5fd7-podSearch \u624b\u5de5 \u2705 \u6a21\u7cca\u67e5\u8be2\u65e5\u5fd7-containerSearch \u624b\u5de5 \u2705 \u7cbe\u786e\u67e5\u8be2\u65e5\u5fd7-cluster \u624b\u5de5 \u2705 \u7cbe\u786e\u67e5\u8be2\u65e5\u5fd7-namespace \u624b\u5de5 \u2705 \u9a8c\u8bc1\u67e5\u8be2\u65e5\u5fd7 API \u63a5\u53e3 \u624b\u5de5 \u2705 \u544a\u8b66\u89c4\u5219 - \u589e\u5220\u6539\u67e5 \u624b\u5de5 \u2705 \u544a\u8b66\u6a21\u677f - \u589e\u5220\u6539\u67e5 \u624b\u5de5 \u2705 \u901a\u77e5\u65b9\u5f0f - \u589e\u5220\u6539\u67e5 \u624b\u5de5 \u2705 \u94fe\u8def\u67e5\u8be2 \u624b\u5de5 \u2705 \u62d3\u6251\u67e5\u8be2 \u624b\u5de5 \u2705"},{"location":"admin/insight/dashboard/dashboard.html","title":"\u4eea\u8868\u76d8","text":"

                                                  Grafana \u662f\u4e00\u79cd\u5f00\u6e90\u7684\u6570\u636e\u53ef\u89c6\u5316\u548c\u76d1\u63a7\u5e73\u53f0\uff0c\u5b83\u63d0\u4f9b\u4e86\u4e30\u5bcc\u7684\u56fe\u8868\u548c\u9762\u677f\uff0c\u7528\u4e8e\u5b9e\u65f6\u76d1\u63a7\u3001\u5206\u6790\u548c\u53ef\u89c6\u5316\u5404\u79cd\u6570\u636e\u6e90\u7684\u6307\u6807\u548c\u65e5\u5fd7\u3002\u53ef\u89c2\u6d4b\u6027 Insight \u4f7f\u7528\u5f00\u6e90 Grafana \u63d0\u4f9b\u76d1\u63a7\u670d\u52a1\uff0c\u652f\u6301\u4ece\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u547d\u540d\u7a7a\u95f4\u7b49\u591a\u7ef4\u5ea6\u67e5\u770b\u8d44\u6e90\u6d88\u8017\u60c5\u51b5\uff0c

                                                  \u5173\u4e8e\u5f00\u6e90 Grafana \u7684\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u89c1 Grafana \u5b98\u65b9\u6587\u6863\u3002

                                                  "},{"location":"admin/insight/dashboard/dashboard.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 \u4eea\u8868\u76d8 \u3002

                                                    • \u5728 Insight /\u6982\u89c8 \u4eea\u8868\u76d8\u4e2d\uff0c\u53ef\u67e5\u770b\u591a\u9009\u96c6\u7fa4\u7684\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\uff0c\u5e76\u4ee5\u547d\u540d\u7a7a\u95f4\u3001\u5bb9\u5668\u7ec4\u7b49\u591a\u4e2a\u7ef4\u5ea6\u5206\u6790\u4e86\u8d44\u6e90\u4f7f\u7528\u3001\u7f51\u7edc\u3001\u5b58\u50a8\u7b49\u60c5\u51b5\u3002

                                                    • \u70b9\u51fb\u4eea\u8868\u76d8\u5de6\u4e0a\u4fa7\u7684\u4e0b\u62c9\u6846\u53ef\u5207\u6362\u96c6\u7fa4\u3002

                                                    • \u70b9\u51fb\u4eea\u8868\u76d8\u53f3\u4e0b\u4fa7\u53ef\u5207\u6362\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u3002

                                                  2. Insight \u7cbe\u9009\u591a\u4e2a\u793e\u533a\u63a8\u8350\u4eea\u8868\u76d8\uff0c\u53ef\u4ece\u8282\u70b9\u3001\u547d\u540d\u7a7a\u95f4\u3001\u5de5\u4f5c\u8d1f\u8f7d\u7b49\u591a\u4e2a\u7ef4\u5ea6\u8fdb\u884c\u76d1\u63a7\u3002\u70b9\u51fb insight-system / Insight /\u6982\u89c8 \u533a\u57df\u5207\u6362\u4eea\u8868\u76d8\u3002

                                                  Note

                                                  1. \u8bbf\u95ee Grafana UI \u8bf7\u53c2\u8003\u4ee5\u7ba1\u7406\u5458\u8eab\u4efd\u767b\u5f55 Grafana\u3002

                                                  2. \u5bfc\u5165\u81ea\u5b9a\u4e49\u4eea\u8868\u76d8\u8bf7\u53c2\u8003\u5bfc\u5165\u81ea\u5b9a\u4e49\u4eea\u8868\u76d8\u3002

                                                  "},{"location":"admin/insight/dashboard/import-dashboard.html","title":"\u5bfc\u5165\u81ea\u5b9a\u4e49\u4eea\u8868\u76d8","text":"

                                                  \u901a\u8fc7\u4f7f\u7528 Grafana CRD\uff0c\u53ef\u4ee5\u5c06\u4eea\u8868\u677f\u7684\u7ba1\u7406\u548c\u90e8\u7f72\u7eb3\u5165\u5230 Kubernetes \u7684\u751f\u547d\u5468\u671f\u7ba1\u7406\u4e2d\uff0c\u5b9e\u73b0\u4eea\u8868\u677f\u7684\u7248\u672c\u63a7\u5236\u3001\u81ea\u52a8\u5316\u90e8\u7f72\u548c\u96c6\u7fa4\u7ea7\u7684\u7ba1\u7406\u3002\u672c\u9875\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7 CRD \u548c UI \u754c\u9762\u5bfc\u5165\u81ea\u5b9a\u4e49\u7684\u4eea\u8868\u76d8\u3002

                                                  "},{"location":"admin/insight/dashboard/import-dashboard.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0 \u5e73\u53f0\uff0c\u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \uff0c\u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u9009\u62e9 kpanda-global-cluster \u3002

                                                  2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u5728\u5217\u8868\u4e2d\u67e5\u627e grafanadashboards.integreatly.org \u6587\u4ef6\uff0c\u8fdb\u5165\u8be6\u60c5\u3002

                                                  3. \u70b9\u51fb Yaml \u521b\u5efa \uff0c\u4f7f\u7528\u4ee5\u4e0b\u6a21\u677f\uff0c\u5728 Json \u5b57\u6bb5\u4e2d\u66ff\u6362\u4eea\u8868\u76d8 JSON\u3002

                                                    • namespace \uff1a\u586b\u5199\u76ee\u6807\u547d\u540d\u7a7a\u95f4\uff1b
                                                    • name \uff1a\u586b\u5199\u4eea\u8868\u76d8\u7684\u540d\u79f0\u3002
                                                    • label \uff1a\u5fc5\u586b\uff0c operator.insight.io/managed-by: insight \u3002
                                                    apiVersion: integreatly.org/v1alpha1\nkind: GrafanaDashboard\nmetadata:\n  labels:\n    app: insight-grafana-operator\n    operator.insight.io/managed-by: insight\n  name: sample-dashboard\n  namespace: insight-system\nspec:\n  json: >\n    {\n      \"id\": null,\n      \"title\": \"Simple Dashboard\",\n      \"tags\": [],\n      \"style\": \"dark\",\n      \"timezone\": \"browser\",\n      \"editable\": true,\n      \"hideControls\": false,\n      \"graphTooltip\": 1,\n      \"panels\": [],\n      \"time\": {\n        \"from\": \"now-6h\",\n        \"to\": \"now\"\n      },\n      \"timepicker\": {\n        \"time_options\": [],\n        \"refresh_intervals\": []\n      },\n      \"templating\": {\n        \"list\": []\n      },\n      \"annotations\": {\n        \"list\": []\n      },\n      \"refresh\": \"5s\",\n      \"schemaVersion\": 17,\n      \"version\": 0,\n      \"links\": []\n    }\n
                                                  4. \u70b9\u51fb \u786e\u8ba4 \u540e\uff0c\u7a0d\u7b49\u7247\u523b\u5373\u53ef\u5728 \u4eea\u8868\u76d8 \u4e2d\u67e5\u770b\u521a\u521a\u5bfc\u5165\u7684\u4eea\u8868\u76d8\u3002

                                                  Info

                                                  \u81ea\u5b9a\u4e49\u8bbe\u8ba1\u4eea\u8868\u76d8\uff0c\u8bf7\u53c2\u8003\u6dfb\u52a0\u4eea\u8868\u76d8\u9762\u677f\u3002

                                                  "},{"location":"admin/insight/dashboard/login-grafana.html","title":"\u8bbf\u95ee\u539f\u751f Grafana","text":"

                                                  Insight \u501f\u52a9 Grafana \u63d0\u4f9b\u4e86\u4e30\u5bcc\u7684\u53ef\u89c6\u5316\u80fd\u529b\uff0c\u540c\u65f6\u4fdd\u7559\u4e86\u8bbf\u95ee\u539f\u751f Grafana \u7684\u5165\u53e3\u3002

                                                  "},{"location":"admin/insight/dashboard/login-grafana.html#_1","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u767b\u5f55\u6d4f\u89c8\u5668\uff0c\u5728\u6d4f\u89c8\u5668\u4e2d\u8f93\u5165 Grafana \u5730\u5740\u3002

                                                    \u8bbf\u95ee\u5730\u5740\uff1a http://ip:\u8bbf\u95ee\u7aef\u53e3/ui/insight-grafana/login

                                                    \u4f8b\u5982\uff1a http://10.6.10.233:30209/ui/insight-grafana/login

                                                  2. \u70b9\u51fb\u53f3\u4e0b\u89d2\u7684\u767b\u5f55\uff0c\u4f7f\u7528\u9ed8\u8ba4\u7528\u6237\u540d\u3001\u5bc6\u7801\uff08admin/admin\uff09\u8fdb\u884c\u767b\u5f55\u3002

                                                  3. \u70b9\u51fb Log in \u5b8c\u6210\u767b\u5f55\u3002

                                                  "},{"location":"admin/insight/dashboard/overview.html","title":"\u6982\u89c8","text":"

                                                  \u6982\u7387 \u4ec5\u7edf\u8ba1\u5df2\u5b89\u88c5 insight-agent \u4e14\u5176\u8fd0\u884c\u72b6\u6001\u4e3a\u6b63\u5e38\u7684\u96c6\u7fa4\u6570\u636e\u3002\u53ef\u5728\u6982\u89c8\u4e2d\u591a\u96c6\u7fa4\u7684\u8d44\u6e90\u6982\u51b5\uff1a

                                                  • \u544a\u8b66\u7edf\u8ba1\uff1a\u53ef\u67e5\u770b\u6240\u6709\u96c6\u7fa4\u7684\u6b63\u5728\u544a\u8b66\u7684\u7edf\u8ba1\u6570\u636e\u3002
                                                  • \u8d44\u6e90\u6d88\u8017\uff1a\u53ef\u6309 CPU \u4f7f\u7528\u7387\u3001\u5185\u5b58\u4f7f\u7528\u7387\u548c\u78c1\u76d8\u4f7f\u7528\u7387\u5206\u522b\u67e5\u770b\u8fd1\u4e00\u5c0f\u65f6 TOP5 \u96c6\u7fa4\u3001\u8282\u70b9\u7684\u8d44\u6e90\u53d8\u5316\u8d8b\u52bf\u3002
                                                  • \u9ed8\u8ba4\u6309\u7167\u6839\u636e CPU \u4f7f\u7528\u7387\u6392\u5e8f\u3002\u60a8\u53ef\u5207\u6362\u6307\u6807\u5207\u6362\u96c6\u7fa4\u3001\u8282\u70b9\u7684\u6392\u5e8f\u65b9\u5f0f\u3002
                                                  • \u8d44\u6e90\u53d8\u5316\u8d8b\u52bf\uff1a\u53ef\u67e5\u770b\u8fd1 15 \u5929\u7684\u8282\u70b9\u4e2a\u6570\u8d8b\u52bf\u4ee5\u53ca\u4e00\u5c0f\u65f6 Pod \u7684\u8fd0\u884c\u8d8b\u52bf\u3002
                                                  • \u670d\u52a1\u8bf7\u6c42\u6392\u884c\uff1a\u53ef\u67e5\u770b\u591a\u96c6\u7fa4\u4e2d\u8bf7\u6c42\u5ef6\u65f6\u3001\u9519\u8bef\u7387\u6392\u884c TOP5 \u7684\u670d\u52a1\u53ca\u6240\u5728\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u3002
                                                  "},{"location":"admin/insight/dashboard/overview.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                  \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u6982\u89c8 \u3002

                                                  "},{"location":"admin/insight/data-query/log.html","title":"\u65e5\u5fd7\u67e5\u8be2","text":"

                                                  Insight \u9ed8\u8ba4\u91c7\u96c6\u8282\u70b9\u65e5\u5fd7\u3001\u5bb9\u5668\u65e5\u5fd7\u4ee5\u53ca kubernetes \u5ba1\u8ba1\u65e5\u5fd7\u3002\u5728\u65e5\u5fd7\u67e5\u8be2\u9875\u9762\u4e2d\uff0c\u53ef\u67e5\u8be2\u767b\u5f55\u8d26\u53f7\u6743\u9650\u5185\u7684\u6807\u51c6\u8f93\u51fa (stdout) \u65e5\u5fd7\uff0c\u5305\u62ec\u8282\u70b9\u65e5\u5fd7\u3001\u4ea7\u54c1\u65e5\u5fd7\u3001Kubenetes \u5ba1\u8ba1\u65e5\u5fd7\u7b49\uff0c\u5feb\u901f\u5728\u5927\u91cf\u65e5\u5fd7\u4e2d\u67e5\u8be2\u5230\u6240\u9700\u7684\u65e5\u5fd7\uff0c\u540c\u65f6\u7ed3\u5408\u65e5\u5fd7\u7684\u6765\u6e90\u4fe1\u606f\u548c\u4e0a\u4e0b\u6587\u539f\u59cb\u6570\u636e\u8f85\u52a9\u5b9a\u4f4d\u95ee\u9898\u3002

                                                  "},{"location":"admin/insight/data-query/log.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u70b9\u51fb\u4e00\u7ea7\u5bfc\u822a\u680f\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u3002
                                                  2. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u65e5\u5fd7 \u3002

                                                    • \u9ed8\u8ba4\u67e5\u8be2\u6700\u8fd1 24 \u5c0f\u65f6\uff1b
                                                    • \u7b2c\u4e00\u6b21\u8fdb\u5165\u65f6\uff0c\u9ed8\u8ba4\u6839\u636e\u767b\u5f55\u8d26\u53f7\u6743\u9650\u67e5\u8be2\u6709\u6743\u9650\u7684\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\u7684\u5bb9\u5668\u65e5\u5fd7\uff1b

                                                  3. \u9876\u90e8 Tab \u9ed8\u8ba4\u8fdb\u5165 \u666e\u901a\u67e5\u8be2 \u3002

                                                    1. \u70b9\u51fb \u7b5b\u9009 \u5c55\u5f00\u8fc7\u6ee4\u9762\u677f\uff0c\u53ef\u5207\u6362\u65e5\u5fd7\u641c\u7d22\u6761\u4ef6\u548c\u7c7b\u578b\u3002
                                                    2. \u65e5\u5fd7\u7c7b\u578b\uff1a

                                                      • \u5bb9\u5668\u65e5\u5fd7 \uff1a\u8bb0\u5f55\u96c6\u7fa4\u4e2d\u5bb9\u5668\u5185\u90e8\u7684\u6d3b\u52a8\u548c\u4e8b\u4ef6\uff0c\u5305\u62ec\u5e94\u7528\u7a0b\u5e8f\u7684\u8f93\u51fa\u3001\u9519\u8bef\u6d88\u606f\u3001\u8b66\u544a\u548c\u8c03\u8bd5\u4fe1\u606f\u7b49\u3002\u652f\u6301\u901a\u8fc7\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u5bb9\u5668\u7ec4\u3001\u5bb9\u5668\u8fc7\u6ee4\u65e5\u5fd7\u3002
                                                      • \u8282\u70b9\u65e5\u5fd7 \uff1a\u8bb0\u5f55\u96c6\u7fa4\u4e2d\u6bcf\u4e2a\u8282\u70b9\u7684\u7cfb\u7edf\u7ea7\u522b\u65e5\u5fd7\u3002\u8fd9\u4e9b\u65e5\u5fd7\u5305\u542b\u8282\u70b9\u7684\u64cd\u4f5c\u7cfb\u7edf\u3001\u5185\u6838\u3001\u670d\u52a1\u548c\u7ec4\u4ef6\u7684\u76f8\u5173\u4fe1\u606f\u3002\u652f\u6301\u901a\u8fc7\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u6587\u4ef6\u8def\u5f84\u8fc7\u6ee4\u65e5\u5fd7\u3002
                                                    3. \u652f\u6301\u5bf9\u5355\u4e2a\u5173\u952e\u5b57\u8fdb\u884c\u6a21\u7cca\u641c\u7d22\u3002

                                                  4. \u9876\u90e8\u5207\u6362 Tab \u9009\u62e9 Lucene \u8bed\u6cd5\u67e5\u8be2 \u3002

                                                    \u7b2c\u4e00\u6b21\u8fdb\u5165\u65f6\uff0c\u9ed8\u8ba4\u9009\u62e9\u767b\u5f55\u8d26\u53f7\u6743\u9650\u67e5\u8be2\u6709\u6743\u9650\u7684\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\u7684\u5bb9\u5668\u65e5\u5fd7\u3002

                                                    Lucene \u8bed\u6cd5\u8bf4\u660e\uff1a

                                                    1. \u4f7f\u7528 \u903b\u8f91\u64cd\u4f5c\u7b26\uff08AND\u3001OR\u3001NOT\u3001\"\" \uff09\u7b26\u67e5\u8be2\u591a\u4e2a\u5173\u952e\u5b57\uff0c\u4f8b\u5982\uff1akeyword1 AND (keyword2 OR keyword3) NOT keyword4\u3002
                                                    2. \u4f7f\u7528\u6ce2\u6d6a\u53f7 (~) \u5b9e\u73b0\u6a21\u7cca\u67e5\u8be2\uff0c\u5728 \"~\" \u540e\u53ef\u6307\u5b9a\u53ef\u9009\u7684\u53c2\u6570\uff0c\u7528\u4e8e\u63a7\u5236\u6a21\u7cca\u67e5\u8be2\u7684\u76f8\u4f3c\u5ea6\uff0c\u4e0d\u6307\u5b9a\u5219\u9ed8\u8ba4\u4f7f\u7528 0.5\u3002\u4f8b\u5982\uff1aerror~\u3002
                                                    3. \u4f7f\u7528\u901a\u914d\u7b26 (*\u3001?) \u7528\u4f5c\u5355\u5b57\u7b26\u901a\u914d\u7b26\uff0c\u8868\u793a\u5339\u914d\u4efb\u610f\u4e00\u4e2a\u5b57\u7b26\u3002
                                                    4. \u4f7f\u7528\u65b9\u62ec\u53f7 [ ] \u6216\u82b1\u62ec\u53f7 { } \u6765\u67e5\u8be2\u8303\u56f4\uff0c\u65b9\u62ec\u53f7\u00a0[ ]\u00a0\u8868\u793a\u95ed\u533a\u95f4\uff0c\u5305\u542b\u8fb9\u754c\u503c\u3002\u82b1\u62ec\u53f7\u00a0{ }\u00a0\u8868\u793a\u5f00\u533a\u95f4\uff0c\u6392\u9664\u8fb9\u754c\u503c\u3002\u8303\u56f4\u67e5\u8be2\u53ea\u9002\u7528\u4e8e\u80fd\u591f\u8fdb\u884c\u6392\u5e8f\u7684\u5b57\u6bb5\u7c7b\u578b\uff0c\u5982\u6570\u5b57\u3001\u65e5\u671f\u7b49\u3002\u4f8b\u5982\uff1atimestamp:[2022-01-01 TO 2022-01-31]\u3002
                                                    5. \u66f4\u591a\u7528\u6cd5\u8bf7\u67e5\u770b\uff1aLucene \u8bed\u6cd5\u8bf4\u660e\u3002
                                                  "},{"location":"admin/insight/data-query/log.html#_3","title":"\u5176\u4ed6\u64cd\u4f5c","text":""},{"location":"admin/insight/data-query/log.html#_4","title":"\u67e5\u770b\u65e5\u5fd7\u4e0a\u4e0b\u6587","text":"

                                                  \u70b9\u51fb\u65e5\u5fd7\u540e\u7684\u6309\u94ae\uff0c\u5728\u53f3\u4fa7\u5212\u51fa\u9762\u677f\u4e2d\u53ef\u67e5\u770b\u8be5\u6761\u65e5\u5fd7\u7684\u9ed8\u8ba4 100 \u6761\u4e0a\u4e0b\u6587\u3002\u53ef\u5207\u6362 \u663e\u793a\u884c\u6570 \u67e5\u770b\u66f4\u591a\u4e0a\u4e0b\u6587\u5185\u5bb9\u3002

                                                  "},{"location":"admin/insight/data-query/log.html#_5","title":"\u5bfc\u51fa\u65e5\u5fd7\u6570\u636e","text":"

                                                  \u70b9\u51fb\u5217\u8868\u53f3\u4e0a\u4fa7\u7684\u4e0b\u8f7d\u6309\u94ae\u3002

                                                  • \u652f\u6301\u914d\u7f6e\u5bfc\u51fa\u7684\u65e5\u5fd7\u5b57\u6bb5\uff0c\u6839\u636e\u65e5\u5fd7\u7c7b\u578b\u53ef\u914d\u7f6e\u7684\u5b57\u6bb5\u4e0d\u540c\uff0c\u5176\u4e2d \u65e5\u5fd7\u5185\u5bb9 \u5b57\u6bb5\u4e3a\u5fc5\u9009\u3002
                                                  • \u652f\u6301\u5c06\u65e5\u5fd7\u67e5\u8be2\u7ed3\u679c\u5bfc\u51fa\u4e3a .txt \u6216 .csv \u683c\u5f0f\u3002

                                                  "},{"location":"admin/insight/data-query/metric.html","title":"\u6307\u6807\u67e5\u8be2","text":"

                                                  \u6307\u6807\u67e5\u8be2\u652f\u6301\u67e5\u8be2\u5bb9\u5668\u5404\u8d44\u6e90\u7684\u6307\u6807\u6570\u636e\uff0c\u53ef\u67e5\u770b\u76d1\u63a7\u6307\u6807\u7684\u8d8b\u52bf\u53d8\u5316\u3002\u540c\u65f6\uff0c\u9ad8\u7ea7\u67e5\u8be2\u652f\u6301\u539f\u751f PromQL \u8bed\u53e5\u8fdb\u884c\u6307\u6807\u67e5\u8be2\u3002

                                                  "},{"location":"admin/insight/data-query/metric.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                  • \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002
                                                  "},{"location":"admin/insight/data-query/metric.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u70b9\u51fb\u4e00\u7ea7\u5bfc\u822a\u680f\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u3002

                                                  2. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u6307\u6807 \u3002

                                                  3. \u9009\u62e9\u96c6\u7fa4\u3001\u7c7b\u578b\u3001\u8282\u70b9\u3001\u6307\u6807\u540d\u79f0\u67e5\u8be2\u6761\u4ef6\u540e\uff0c\u70b9\u51fb \u641c\u7d22 \uff0c\u5c4f\u5e55\u53f3\u4fa7\u5c06\u663e\u793a\u5bf9\u5e94\u6307\u6807\u56fe\u8868\u53ca\u6570\u636e\u8be6\u60c5\u3002

                                                  4. \u652f\u6301\u81ea\u5b9a\u4e49\u65f6\u95f4\u8303\u56f4\u3002\u53ef\u624b\u52a8\u70b9\u51fb \u5237\u65b0 \u56fe\u6807\u6216\u9009\u62e9\u9ed8\u8ba4\u65f6\u95f4\u95f4\u9694\u8fdb\u884c\u5237\u65b0\u3002

                                                  5. \u70b9\u51fb \u9ad8\u7ea7\u67e5\u8be2 \u9875\u7b7e\u901a\u8fc7\u539f\u751f\u7684 PromQL \u67e5\u8be2\u3002

                                                  Note

                                                  \u53c2\u9605 PromQL \u8bed\u6cd5\u3002

                                                  "},{"location":"admin/insight/faq/expand-once-es-full.html","title":"ElasticSearch \u6570\u636e\u585e\u6ee1\u5982\u4f55\u64cd\u4f5c\uff1f","text":"

                                                  \u5f53 ElasticSearch \u5185\u5b58\u5360\u6ee1\u65f6\uff0c\u53ef\u4ee5\u9009\u62e9\u6269\u5bb9\u6216\u8005\u5220\u9664\u6570\u636e\u6765\u89e3\u51b3\uff1a

                                                  \u4f60\u53ef\u4ee5\u8fd0\u884c\u5982\u4e0b\u547d\u4ee4\u67e5\u770b ES \u8282\u70b9\u7684\u8d44\u6e90\u5360\u6bd4\u3002

                                                  kubectl get pod -n mcamel-system | grep common-es-cluster-masters-es | awk '{print $1}' | xargs -I {} kubectl exec {} -n mcamel-system -c elasticsearch -- df -h | grep /usr/share/elasticsearch/data\n
                                                  "},{"location":"admin/insight/faq/expand-once-es-full.html#_1","title":"\u6269\u5bb9","text":"

                                                  \u5728\u4e3b\u673a\u8282\u70b9\u8fd8\u6709\u8d44\u6e90\u7684\u60c5\u51b5\u4e0b\uff0c \u6269\u5bb9 \u662f\u4e00\u79cd\u5e38\u89c1\u7684\u65b9\u6848\uff0c\u4e5f\u5c31\u662f\u63d0\u9ad8 PVC \u7684\u5bb9\u91cf\u3002

                                                  1. \u5148\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u83b7\u53d6 es-data-0 \u8282\u70b9\u7684 PVC \u914d\u7f6e\uff0c\u8bf7\u4ee5\u5b9e\u9645\u7684\u73af\u5883\u7684 PVC \u4e3a\u51c6\u3002

                                                    kubectl edit -n mcamel-system pvc elasticsearch-data-mcamel-common-es-cluster-masters-es-data-0\n
                                                  2. \u7136\u540e\u4fee\u6539\u4ee5\u4e0b storage \u5b57\u6bb5\uff08\u9700\u8981\u4f7f\u7528\u7684\u5b58\u50a8\u7c7b SC \u53ef\u4ee5\u6269\u5bb9\uff09

                                                    spec:\n  accessModes:\n    - ReadWriteOnce\n  resources:\n    requests:\n      storage: 35Gi # (1)!\n
                                                    1. \u8fd9\u4e2a\u6570\u503c\u9700\u8c03\u6574
                                                  "},{"location":"admin/insight/faq/expand-once-es-full.html#_2","title":"\u5220\u9664\u6570\u636e","text":"

                                                  \u5f53 ElasticSearch \u5185\u5b58\u5360\u6ee1\u65f6\uff0c\u4f60\u8fd8\u53ef\u4ee5\u5220\u9664 index \u6570\u636e\u91ca\u653e\u8d44\u6e90\u3002

                                                  \u4f60\u53ef\u4ee5\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u8fdb\u5165 Kibana \u9875\u9762\uff0c\u624b\u52a8\u6267\u884c\u5220\u9664\u64cd\u4f5c\u3002

                                                  1. \u9996\u5148\u660e\u786e Kibana Pod \u662f\u5426\u5b58\u5728\u5e76\u4e14\u6b63\u5e38\u8fd0\u884c\uff1a

                                                    kubectl get po -n mcamel-system |grep mcamel-common-es-cluster-masters-kb\n
                                                  2. \u82e5\u4e0d\u5b58\u5728\uff0c\u5219\u624b\u52a8\u8bbe\u7f6e replica \u4e3a 1\uff0c\u5e76\u4e14\u7b49\u5f85\u670d\u52a1\u6b63\u5e38\u8fd0\u884c\uff1b\u82e5\u5b58\u5728\uff0c\u5219\u8df3\u8fc7\u8be5\u6b65\u9aa4

                                                    kubectl scale -n mcamel-system deployment mcamel-common-es-cluster-masters-kb --replicas 1\n
                                                  3. \u4fee\u6539 Kibana \u7684 Service \u4e3a NodePort \u66b4\u9732\u8bbf\u95ee\u65b9\u5f0f

                                                    kubectl patch svc -n mcamel-system mcamel-common-es-cluster-masters-kb-http -p '{\"spec\":{\"type\":\"NodePort\"}}'\n\n# \u4fee\u6539\u5b8c\u6210\u540e\u67e5\u770b NodePort\u3002\u6b64\u4f8b\u7684\u7aef\u53e3\u4e3a 30128\uff0c\u5219\u8bbf\u95ee\u65b9\u5f0f\u4e3a https://{\u96c6\u7fa4\u4e2d\u7684\u8282\u70b9IP}:30128\n[root@insight-master1 ~]# kubectl get svc -n mcamel-system |grep mcamel-common-es-cluster-masters-kb-http\nmcamel-common-es-cluster-masters-kb-http   NodePort    10.233.51.174   <none>   5601:30128/TCP    108m\n
                                                  4. \u83b7\u53d6 ElasticSearch \u7684 Secret\uff0c\u7528\u4e8e\u767b\u5f55 Kibana\uff08\u7528\u6237\u540d\u4e3a elastic\uff09

                                                    kubectl get secrets -n mcamel-system mcamel-common-es-cluster-masters-es-elastic-user -o jsonpath=\"{.data.elastic}\" |base64 -d\n
                                                  5. \u8fdb\u5165 Kibana -> Stack Management -> Index Management \uff0c\u6253\u5f00 Include hidden indices \u9009\u9879\uff0c\u5373\u53ef\u89c1\u6240\u6709\u7684 index\u3002 \u6839\u636e index \u7684\u5e8f\u53f7\u5927\u5c0f\uff0c\u4fdd\u7559\u5e8f\u53f7\u5927\u7684 index\uff0c\u5220\u9664\u5e8f\u53f7\u5c0f\u7684 index\u3002

                                                  "},{"location":"admin/insight/faq/ignore-pod-log-collect.html","title":"\u5bb9\u5668\u65e5\u5fd7\u9ed1\u540d\u5355","text":""},{"location":"admin/insight/faq/ignore-pod-log-collect.html#_2","title":"\u914d\u7f6e\u65b9\u5f0f","text":"
                                                  1. \u5bf9\u4e8e\u4efb\u610f\u4e00\u4e2a\u4e0d\u9700\u8981\u91c7\u96c6\u5bb9\u5668\u65e5\u5fd7\u7684 Pod, \u5728 Pod \u7684 annotation \u4e2d\u6dfb\u52a0 insight.opentelemetry.io/log-ignore: \"true\" \u6765\u6307\u5b9a\u4e0d\u9700\u8981\u91c7\u96c6\u7684\u5bb9\u5668\u65e5\u5fd7\uff0c\u4f8b\u5982\uff1a

                                                    apiVersion: apps/v1\nkind: Pod\nmetadata:\n  name: log-generator\nspec:\n  selector:\n    matchLabels:\n      app.kubernetes.io/name: log-generator\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: log-generator\n      annotations:\n        insight.opentelemetry.io/log-ignore: \"true\"\n    spec:\n      containers:\n        - name: nginx\n          image: banzaicloud/log-generator:0.3.2\n
                                                  2. \u91cd\u542f Pod\uff0c\u7b49\u5f85 Pod \u6062\u590d\u8fd0\u884c\u72b6\u6001\u4e4b\u540e\uff0cFluenbit \u5c06\u4e0d\u518d\u91c7\u96c6\u8fd9\u4e2a Pod \u5185\u7684\u5bb9\u5668\u7684\u65e5\u5fd7\u3002

                                                  "},{"location":"admin/insight/faq/traceclockskew.html","title":"\u94fe\u8def\u6570\u636e\u4e2d\u7684\u65f6\u949f\u504f\u79fb","text":"

                                                  \u5728\u4e00\u4e2a\u5206\u5e03\u5f0f\u7cfb\u7edf\u4e2d\uff0c\u7531\u4e8e Clock Skew\uff08\u65f6\u949f\u504f\u659c\u8c03\u6574\uff09\u5f71\u54cd\uff0c \u4e0d\u540c\u4e3b\u673a\u95f4\u5b58\u5728\u65f6\u95f4\u6f02\u79fb\u73b0\u8c61\u3002\u901a\u4fd7\u6765\u8bf4\uff0c\u4e0d\u540c\u4e3b\u673a\u5728\u540c\u4e00\u65f6\u523b\u7684\u7cfb\u7edf\u65f6\u95f4\u662f\u6709\u5fae\u5c0f\u7684\u504f\u5dee\u7684\u3002

                                                  \u94fe\u8def\u8ffd\u8e2a\u7cfb\u7edf\u662f\u4e00\u4e2a\u5178\u578b\u7684\u5206\u5e03\u5f0f\u7cfb\u7edf\uff0c\u5b83\u5728\u6d89\u53ca\u65f6\u95f4\u6570\u636e\u91c7\u96c6\u4e0a\u4e5f\u53d7\u8fd9\u79cd\u73b0\u8c61\u5f71\u54cd\uff0c\u6bd4\u5982\u5728\u4e00\u6761\u94fe\u8def\u4e2d\u670d\u52a1\u7aef span \u7684\u5f00\u59cb\u65f6\u95f4\u65e9\u4e8e\u5ba2\u6237\u7aef span\uff0c \u8fd9\u79cd\u73b0\u8c61\u903b\u8f91\u4e0a\u662f\u4e0d\u5b58\u5728\u7684\uff0c\u4f46\u662f\u7531\u4e8e\u65f6\u949f\u504f\u79fb\u5f71\u54cd\uff0c\u94fe\u8def\u6570\u636e\u5728\u5404\u4e2a\u670d\u52a1\u4e2d\u88ab\u91c7\u96c6\u5230\u7684\u90a3\u4e00\u523b\u4e3b\u673a\u95f4\u7684\u7cfb\u7edf\u65f6\u5b58\u5728\u504f\u5dee\uff0c\u6700\u7ec8\u9020\u6210\u5982\u4e0b\u56fe\u6240\u793a\u7684\u73b0\u8c61\uff1a

                                                  \u4e0a\u56fe\u4e2d\u51fa\u73b0\u7684\u73b0\u8c61\u7406\u8bba\u4e0a\u65e0\u6cd5\u6d88\u9664\u3002\u4f46\u8be5\u73b0\u8c61\u8f83\u5c11\uff0c\u5373\u4f7f\u51fa\u73b0\u4e5f\u4e0d\u4f1a\u5f71\u54cd\u670d\u52a1\u95f4\u7684\u8c03\u7528\u5173\u7cfb\u3002

                                                  \u76ee\u524d Insight \u4f7f\u7528 Jaeger UI \u6765\u5c55\u793a\u94fe\u8def\u6570\u636e\uff0cUI \u5728\u9047\u5230\u8fd9\u79cd\u94fe\u8def\u65f6\u4f1a\u63d0\u9192\uff1a

                                                  \u76ee\u524d Jaeger \u7684\u793e\u533a\u6b63\u5728\u5c1d\u8bd5\u901a\u8fc7 UI \u5c42\u9762\u6765\u4f18\u5316\u8fd9\u4e2a\u95ee\u9898\u3002

                                                  \u66f4\u591a\u7684\u76f8\u5173\u8d44\u6599\uff0c\u8bf7\u53c2\u8003\uff1a

                                                  • Clock Skew Adjuster considered harmful
                                                  • Add ability to display unadjusted trace in the UI
                                                  • Clock Skew Adjustment
                                                  "},{"location":"admin/insight/infra/cluster.html","title":"\u96c6\u7fa4\u76d1\u63a7","text":"

                                                  \u901a\u8fc7\u96c6\u7fa4\u76d1\u63a7\uff0c\u4f60\u53ef\u4ee5\u67e5\u770b\u96c6\u7fa4\u7684\u57fa\u672c\u4fe1\u606f\u3001\u8be5\u96c6\u7fa4\u4e2d\u7684\u8d44\u6e90\u6d88\u8017\u4ee5\u53ca\u4e00\u6bb5\u65f6\u95f4\u7684\u8d44\u6e90\u6d88\u8017\u53d8\u5316\u8d8b\u52bf\u7b49\u3002

                                                  "},{"location":"admin/insight/infra/cluster.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                  \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                                                  "},{"location":"admin/insight/infra/cluster.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

                                                  2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd -> \u96c6\u7fa4 \u3002\u5728\u8be5\u9875\u9762\u53ef\u67e5\u770b\u4ee5\u4e0b\u4fe1\u606f\uff1a

                                                    • \u8d44\u6e90\u6982\u89c8 \uff1a\u591a\u9009\u96c6\u7fa4\u4e2d\u7684\u8282\u70b9\u3001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6b63\u5e38\u548c\u5168\u90e8\u7684\u6570\u91cf\u7edf\u8ba1\uff1b
                                                    • \u6545\u969c \uff1a\u7edf\u8ba1\u5f53\u524d\u96c6\u7fa4\u4ea7\u751f\u7684\u544a\u8b66\u6570\u91cf\uff1b
                                                    • \u8d44\u6e90\u6d88\u8017 \uff1a\u6240\u9009\u96c6\u7fa4\u7684 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u7684\u5b9e\u9645\u4f7f\u7528\u91cf\u548c\u603b\u91cf\uff1b
                                                    • \u6307\u6807\u8bf4\u660e \uff1a\u6240\u9009\u96c6\u7fa4\u7684 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u8bfb\u5199\u3001\u7f51\u7edc\u63a5\u6536\u53d1\u9001\u7684\u53d8\u5316\u8d8b\u52bf\u3002

                                                  3. \u5207\u6362\u5230 \u8d44\u6e90\u6c34\u4f4d\u7ebf\u76d1\u63a7 \u9875\u7b7e\uff0c\u53ef\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u7684\u66f4\u591a\u76d1\u63a7\u6570\u636e\u3002

                                                  "},{"location":"admin/insight/infra/cluster.html#_4","title":"\u53c2\u8003\u6307\u6807\u8bf4\u660e","text":"\u6307\u6807\u540d \u8bf4\u660e CPU \u4f7f\u7528\u7387 \u8be5\u6307\u6807\u662f\u6307\u96c6\u7fa4\u4e2d\u6240\u6709 Pod \u8d44\u6e90\u7684\u5b9e\u9645 CPU \u7528\u91cf\u4e0e\u6240\u6709\u8282\u70b9\u7684 CPU \u603b\u91cf\u7684\u6bd4\u7387\u3002 CPU \u5206\u914d\u7387 \u8be5\u6307\u6807\u662f\u6307\u96c6\u7fa4\u4e2d\u6240\u6709 Pod \u7684 CPU \u8bf7\u6c42\u91cf\u7684\u603b\u548c\u4e0e\u6240\u6709\u8282\u70b9\u7684 CPU \u603b\u91cf\u7684\u6bd4\u7387\u3002 \u5185\u5b58\u4f7f\u7528\u7387 \u8be5\u6307\u6807\u662f\u6307\u96c6\u7fa4\u4e2d\u6240\u6709 Pod \u8d44\u6e90\u7684\u5b9e\u9645\u5185\u5b58\u7528\u91cf\u4e0e\u6240\u6709\u8282\u70b9\u7684\u5185\u5b58\u603b\u91cf\u7684\u6bd4\u7387\u3002 \u5185\u5b58\u5206\u914d\u7387 \u8be5\u6307\u6807\u662f\u6307\u96c6\u7fa4\u4e2d\u6240\u6709 Pod \u7684\u5185\u5b58\u8bf7\u6c42\u91cf\u7684\u603b\u548c\u4e0e\u6240\u6709\u8282\u70b9\u7684\u5185\u5b58\u603b\u91cf\u7684\u6bd4\u7387\u3002"},{"location":"admin/insight/infra/container.html","title":"\u5bb9\u5668\u76d1\u63a7","text":"

                                                  \u5bb9\u5668\u76d1\u63a7\u662f\u5bf9\u96c6\u7fa4\u7ba1\u7406\u4e2d\u5de5\u4f5c\u8d1f\u8f7d\u7684\u76d1\u63a7\uff0c\u5728\u5217\u8868\u4e2d\u53ef\u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u57fa\u672c\u4fe1\u606f\u548c\u72b6\u6001\u3002\u5728\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\uff0c\u53ef\u67e5\u770b\u6b63\u5728\u544a\u8b66\u7684\u6570\u91cf\u4ee5\u53ca CPU\u3001\u5185\u5b58\u7b49\u8d44\u6e90\u6d88\u8017\u7684\u53d8\u5316\u8d8b\u52bf\u3002

                                                  "},{"location":"admin/insight/infra/container.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                  \u96c6\u7fa4\u5df2\u5b89\u88c5 insight-agent\uff0c\u4e14\u6240\u6709\u7684\u5bb9\u5668\u7ec4\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                                                  • \u5b89\u88c5 insight-agent\uff0c\u8bf7\u53c2\u8003\u5728\u7ebf\u5b89\u88c5 insight-agent \u6216\u79bb\u7ebf\u5347\u7ea7 insight-agent\u3002
                                                  "},{"location":"admin/insight/infra/container.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                  \u8bf7\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u67e5\u770b\u670d\u52a1\u76d1\u63a7\u6307\u6807\uff1a

                                                  1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

                                                  2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd -> \u5de5\u4f5c\u8d1f\u8f7d \u3002

                                                  3. \u5207\u6362\u9876\u90e8 Tab\uff0c\u67e5\u770b\u4e0d\u540c\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u636e\u3002

                                                  4. \u70b9\u51fb\u76ee\u6807\u5de5\u4f5c\u8d1f\u8f7d\u540d\u79f0\u67e5\u770b\u8be6\u60c5\u3002

                                                    1. \u6545\u969c\uff1a\u5728\u6545\u969c\u5361\u7247\u4e2d\u7edf\u8ba1\u8be5\u5de5\u4f5c\u8d1f\u8f7d\u5f53\u524d\u6b63\u5728\u544a\u8b66\u7684\u603b\u6570\u3002
                                                    2. \u8d44\u6e90\u6d88\u8017\uff1a\u5728\u8be5\u5361\u7247\u53ef\u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u7684 CPU\u3001\u5185\u5b58\u3001\u7f51\u7edc\u7684\u4f7f\u7528\u60c5\u51b5\u3002
                                                    3. \u76d1\u63a7\u6307\u6807\uff1a\u53ef\u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u9ed8\u8ba4 1 \u5c0f\u65f6\u7684 CPU\u3001\u5185\u5b58\u3001\u7f51\u7edc\u548c\u78c1\u76d8\u7684\u53d8\u5316\u8d8b\u52bf\u3002

                                                  5. \u5207\u6362 Tab \u5230 \u5bb9\u5668\u7ec4\u5217\u8868 \uff0c\u53ef\u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u5404\u4e2a\u5bb9\u5668\u7ec4\u72b6\u6001\u3001\u6240\u5728\u8282\u70b9\u3001\u91cd\u542f\u6b21\u6570\u7b49\u4fe1\u606f\u3002

                                                  6. \u5207\u6362 Tab \u5230 JVM \u76d1\u63a7 \uff0c\u53ef\u67e5\u770b\u5404\u4e2a\u5bb9\u5668\u7ec4\u7684 JVM \u6307\u6807\u3002

                                                    Note

                                                    1. JVM \u76d1\u63a7\u529f\u80fd\u4ec5\u652f\u6301 Java \u8bed\u8a00\u3002
                                                    2. \u5f00\u542f JVM \u76d1\u63a7\u529f\u80fd\uff0c\u8bf7\u53c2\u8003\u5f00\u59cb\u76d1\u63a7 Java \u5e94\u7528\u3002
                                                  "},{"location":"admin/insight/infra/container.html#_4","title":"\u6307\u6807\u53c2\u8003\u8bf4\u660e","text":"\u6307\u6807\u540d\u79f0 \u8bf4\u660e CPU \u4f7f\u7528\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684 CPU \u4f7f\u7528\u91cf\u4e4b\u548c\u3002 CPU \u8bf7\u6c42\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684 CPU \u8bf7\u6c42\u91cf\u4e4b\u548c\u3002 CPU \u9650\u5236\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684 CPU \u9650\u5236\u91cf\u4e4b\u548c\u3002 \u5185\u5b58\u4f7f\u7528\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u4f7f\u7528\u91cf\u4e4b\u548c\u3002 \u5185\u5b58\u8bf7\u6c42\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u4f7f\u7528\u91cf\u4e4b\u548c\u3002 \u5185\u5b58\u9650\u5236\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u9650\u5236\u91cf\u4e4b\u548c\u3002 \u78c1\u76d8\u8bfb\u5199\u901f\u7387 \u6307\u5b9a\u65f6\u95f4\u8303\u56f4\u5185\u78c1\u76d8\u6bcf\u79d2\u8fde\u7eed\u8bfb\u53d6\u548c\u5199\u5165\u7684\u603b\u548c\uff0c\u8868\u793a\u78c1\u76d8\u6bcf\u79d2\u8bfb\u53d6\u548c\u5199\u5165\u64cd\u4f5c\u6570\u7684\u6027\u80fd\u5ea6\u91cf\u3002 \u7f51\u7edc\u53d1\u9001\u63a5\u6536\u901f\u7387 \u6307\u5b9a\u65f6\u95f4\u8303\u56f4\u5185\uff0c\u6309\u5de5\u4f5c\u8d1f\u8f7d\u7edf\u8ba1\u7684\u7f51\u7edc\u6d41\u91cf\u7684\u6d41\u5165\u3001\u6d41\u51fa\u901f\u7387\u3002"},{"location":"admin/insight/infra/event.html","title":"\u4e8b\u4ef6\u67e5\u8be2","text":"

                                                  AI \u7b97\u529b\u5e73\u53f0 Insight \u652f\u6301\u6309\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u67e5\u8be2\u4e8b\u4ef6\uff0c\u5e76\u63d0\u4f9b\u4e86\u4e8b\u4ef6\u72b6\u6001\u5206\u5e03\u56fe\uff0c\u5bf9\u91cd\u8981\u4e8b\u4ef6\u8fdb\u884c\u7edf\u8ba1\u3002

                                                  "},{"location":"admin/insight/infra/event.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u70b9\u51fb\u4e00\u7ea7\u5bfc\u822a\u680f\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u3002
                                                  2. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u57fa\u7840\u8bbe\u7f6e > \u4e8b\u4ef6 \u3002

                                                  "},{"location":"admin/insight/infra/event.html#_3","title":"\u4e8b\u4ef6\u72b6\u6001\u5206\u5e03","text":"

                                                  \u9ed8\u8ba4\u663e\u793a\u6700\u8fd1 12 \u5c0f\u65f6\u5185\u53d1\u751f\u7684\u4e8b\u4ef6\uff0c\u60a8\u53ef\u4ee5\u5728\u53f3\u4e0a\u89d2\u9009\u62e9\u4e0d\u540c\u7684\u65f6\u95f4\u8303\u56f4\u6765\u67e5\u770b\u8f83\u957f\u6216\u8f83\u77ed\u7684\u65f6\u95f4\u6bb5\u3002 \u60a8\u8fd8\u53ef\u4ee5\u81ea\u5b9a\u4e49\u91c7\u6837\u95f4\u9694\u4e3a 1 \u5206\u949f\u81f3 5 \u5c0f\u65f6\u3002

                                                  \u901a\u8fc7\u4e8b\u4ef6\u72b6\u6001\u5206\u5e03\u56fe\uff0c\u60a8\u53ef\u4ee5\u76f4\u89c2\u5730\u4e86\u89e3\u4e8b\u4ef6\u7684\u5bc6\u96c6\u7a0b\u5ea6\u548c\u5206\u6563\u60c5\u51b5\u3002 \u8fd9\u6709\u52a9\u4e8e\u5bf9\u540e\u7eed\u7684\u96c6\u7fa4\u8fd0\u7ef4\u8fdb\u884c\u8bc4\u4f30\uff0c\u5e76\u505a\u597d\u51c6\u5907\u548c\u5b89\u6392\u5de5\u4f5c\u3002 \u5982\u679c\u4e8b\u4ef6\u5bc6\u96c6\u53d1\u751f\u5728\u7279\u5b9a\u65f6\u6bb5\uff0c\u60a8\u53ef\u80fd\u9700\u8981\u8c03\u914d\u66f4\u591a\u7684\u8d44\u6e90\u6216\u91c7\u53d6\u76f8\u5e94\u63aa\u65bd\u6765\u786e\u4fdd\u96c6\u7fa4\u7a33\u5b9a\u6027\u548c\u9ad8\u53ef\u7528\u6027\u3002 \u800c\u5982\u679c\u4e8b\u4ef6\u8f83\u4e3a\u5206\u6563\uff0c\u5728\u6b64\u671f\u95f4\u60a8\u53ef\u4ee5\u5408\u7406\u5b89\u6392\u5176\u4ed6\u8fd0\u7ef4\u5de5\u4f5c\uff0c\u4f8b\u5982\u7cfb\u7edf\u4f18\u5316\u3001\u5347\u7ea7\u6216\u5904\u7406\u5176\u4ed6\u4efb\u52a1\u3002

                                                  \u901a\u8fc7\u7efc\u5408\u8003\u8651\u4e8b\u4ef6\u72b6\u6001\u5206\u5e03\u56fe\u548c\u65f6\u95f4\u8303\u56f4\uff0c\u60a8\u80fd\u66f4\u597d\u5730\u89c4\u5212\u548c\u7ba1\u7406\u96c6\u7fa4\u7684\u8fd0\u7ef4\u5de5\u4f5c\uff0c\u786e\u4fdd\u7cfb\u7edf\u7a33\u5b9a\u6027\u548c\u53ef\u9760\u6027\u3002

                                                  "},{"location":"admin/insight/infra/event.html#_4","title":"\u4e8b\u4ef6\u603b\u6570\u548c\u7edf\u8ba1","text":"

                                                  \u901a\u8fc7\u91cd\u8981\u4e8b\u4ef6\u7edf\u8ba1\uff0c\u60a8\u53ef\u4ee5\u65b9\u4fbf\u5730\u4e86\u89e3\u955c\u50cf\u62c9\u53d6\u5931\u8d25\u6b21\u6570\u3001\u5065\u5eb7\u68c0\u67e5\u5931\u8d25\u6b21\u6570\u3001\u5bb9\u5668\u7ec4\uff08Pod\uff09\u8fd0\u884c\u5931\u8d25\u6b21\u6570\u3001 Pod \u8c03\u5ea6\u5931\u8d25\u6b21\u6570\u3001\u5bb9\u5668 OOM \u5185\u5b58\u8017\u5c3d\u6b21\u6570\u3001\u5b58\u50a8\u5377\u6302\u8f7d\u5931\u8d25\u6b21\u6570\u4ee5\u53ca\u6240\u6709\u4e8b\u4ef6\u7684\u603b\u6570\u3002\u8fd9\u4e9b\u4e8b\u4ef6\u901a\u5e38\u5206\u4e3a\u300cWarning\u300d\u548c\u300cNormal\u300d\u4e24\u7c7b\u3002

                                                  "},{"location":"admin/insight/infra/event.html#_5","title":"\u4e8b\u4ef6\u5217\u8868","text":"

                                                  \u4e8b\u4ef6\u5217\u8868\u4ee5\u65f6\u95f4\u4e3a\u8f74\uff0c\u4ee5\u6d41\u6c34\u7684\u5f62\u5f0f\u5c55\u793a\u53d1\u751f\u7684\u4e8b\u4ef6\u3002\u60a8\u53ef\u4ee5\u6839\u636e\u300c\u6700\u8fd1\u53d1\u751f\u65f6\u95f4\u300d\u548c\u300c\u7ea7\u522b\u300d\u8fdb\u884c\u6392\u5e8f\u3002

                                                  \u70b9\u51fb\u53f3\u4fa7\u7684 \u2699\ufe0f \u56fe\u6807\uff0c\u60a8\u53ef\u4ee5\u6839\u636e\u81ea\u5df1\u7684\u559c\u597d\u548c\u9700\u6c42\u6765\u81ea\u5b9a\u4e49\u663e\u793a\u7684\u5217\u3002

                                                  \u5728\u9700\u8981\u7684\u65f6\u5019\uff0c\u60a8\u8fd8\u53ef\u4ee5\u70b9\u51fb\u5237\u65b0\u56fe\u6807\u6765\u66f4\u65b0\u5f53\u524d\u7684\u4e8b\u4ef6\u5217\u8868\u3002

                                                  "},{"location":"admin/insight/infra/event.html#_6","title":"\u5176\u4ed6\u64cd\u4f5c","text":"
                                                  1. \u5728\u4e8b\u4ef6\u5217\u8868\u4e2d\u64cd\u4f5c\u5217\u7684\u56fe\u6807\uff0c\u53ef\u67e5\u770b\u67d0\u4e00\u4e8b\u4ef6\u7684\u5143\u6570\u636e\u4fe1\u606f\u3002

                                                  2. \u70b9\u51fb\u9876\u90e8\u9875\u7b7e\u7684 \u4e0a\u4e0b\u6587 \u53ef\u67e5\u770b\u8be5\u4e8b\u4ef6\u5bf9\u5e94\u8d44\u6e90\u7684\u5386\u53f2\u4e8b\u4ef6\u8bb0\u5f55\u3002

                                                  "},{"location":"admin/insight/infra/event.html#_7","title":"\u53c2\u8003","text":"

                                                  \u6709\u5173\u7cfb\u7edf\u81ea\u5e26\u7684 Event \u4e8b\u4ef6\u7684\u8be6\u7ec6\u542b\u4e49\uff0c\u8bf7\u53c2\u9605 Kubenetest API \u4e8b\u4ef6\u5217\u8868\u3002

                                                  "},{"location":"admin/insight/infra/namespace.html","title":"\u547d\u540d\u7a7a\u95f4\u76d1\u63a7","text":"

                                                  \u4ee5\u547d\u540d\u7a7a\u95f4\u4e3a\u7ef4\u5ea6\uff0c\u5feb\u901f\u67e5\u8be2\u547d\u540d\u7a7a\u95f4\u5185\u7684\u8d44\u6e90\u6d88\u8017\u548c\u53d8\u5316\u8d8b\u52bf\u3002

                                                  "},{"location":"admin/insight/infra/namespace.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                  \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                                                  "},{"location":"admin/insight/infra/namespace.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

                                                  2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd > \u547d\u540d\u7a7a\u95f4 \u3002\u5728\u8be5\u9875\u9762\u53ef\u67e5\u770b\u4ee5\u4e0b\u4fe1\u606f\uff1a

                                                    1. \u5207\u6362\u547d\u540d\u7a7a\u95f4\uff1a\u5728\u9876\u90e8\u5207\u6362\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\uff1b
                                                    2. \u8d44\u6e90\u6982\u89c8\uff1a\u7edf\u8ba1\u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6b63\u5e38\u548c\u5168\u90e8\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u91cf\uff1b
                                                    3. \u6545\u969c\uff1a\u7edf\u8ba1\u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e0b\u4ea7\u751f\u7684\u544a\u8b66\u6570\u91cf\uff1b
                                                    4. \u4e8b\u4ef6\uff1a\u7edf\u8ba1\u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e0b 24 \u5c0f\u65f6\u5185 Warning \u7ea7\u522b\u7684\u4e8b\u4ef6\u6570\u91cf\uff1b
                                                    5. \u8d44\u6e90\u6d88\u8017\uff1a\u7edf\u8ba1\u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e0b\u5bb9\u5668\u7ec4\u7684 CPU\u3001\u5185\u5b58\u4f7f\u7528\u91cf\u4e4b\u548c \u53ca CPU\u3001\u5185\u5b58\u914d\u989d\u60c5\u51b5\u3002

                                                  "},{"location":"admin/insight/infra/namespace.html#_4","title":"\u6307\u6807\u8bf4\u660e","text":"\u6307\u6807\u540d \u8bf4\u660e CPU \u4f7f\u7528\u91cf \u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e2d\u5bb9\u5668\u7ec4\u7684 CPU \u4f7f\u7528\u91cf\u4e4b\u548c \u5185\u5b58\u4f7f\u7528\u91cf \u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e2d\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u4f7f\u7528\u91cf\u4e4b\u548c \u5bb9\u5668\u7ec4 CPU \u4f7f\u7528\u91cf \u547d\u540d\u7a7a\u95f4\u4e2d\u5404\u5bb9\u5668\u7ec4\u7684 CPU \u4f7f\u7528\u91cf \u5bb9\u5668\u7ec4\u5185\u5b58\u4f7f\u7528\u91cf \u547d\u540d\u7a7a\u95f4\u4e2d\u5404\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u4f7f\u7528\u91cf"},{"location":"admin/insight/infra/node.html","title":"\u8282\u70b9\u76d1\u63a7","text":"

                                                  \u901a\u8fc7\u8282\u70b9\u76d1\u63a7\uff0c\u4f60\u53ef\u4ee5\u6982\u89c8\u6240\u9009\u96c6\u7fa4\u4e0b\u8282\u70b9\u7684\u5f53\u524d\u5065\u5eb7\u72b6\u6001\u3001\u5bf9\u5e94\u5bb9\u5668\u7ec4\u7684\u5f02\u5e38\u6570\u91cf\uff1b \u5728\u5f53\u524d\u8282\u70b9\u8be6\u60c5\u9875\uff0c\u4f60\u53ef\u4ee5\u67e5\u770b\u6b63\u5728\u544a\u8b66\u7684\u6570\u91cf\u4ee5\u53ca CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u7b49\u8d44\u6e90\u6d88\u8017\u7684\u53d8\u5316\u8d8b\u52bf\u56fe\u3002

                                                  "},{"location":"admin/insight/infra/node.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                  \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                                                  "},{"location":"admin/insight/infra/node.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

                                                  2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd -> \u8282\u70b9 \u3002\u5728\u8be5\u9875\u9762\u53ef\u67e5\u770b\u4ee5\u4e0b\u4fe1\u606f\uff1a

                                                    • \u96c6\u7fa4\u5207\u6362 \uff1a\u5207\u6362\u9876\u90e8\u7684\u4e0b\u62c9\u6846\u53ef\u5207\u6362\u96c6\u7fa4\uff1b
                                                    • \u8282\u70b9\u5217\u8868 \uff1a\u6240\u9009\u96c6\u7fa4\u4e2d\u7684\u8282\u70b9\u5217\u8868\uff0c\u5355\u51fb\u5207\u6362\u8282\u70b9\u3002
                                                    • \u6545\u969c \uff1a\u7edf\u8ba1\u5f53\u524d\u96c6\u7fa4\u4ea7\u751f\u7684\u544a\u8b66\u6570\u91cf\uff1b
                                                    • \u8d44\u6e90\u6d88\u8017 \uff1a\u6240\u9009\u8282\u70b9\u7684 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u7684\u5b9e\u9645\u4f7f\u7528\u91cf\u548c\u603b\u91cf\uff1b
                                                    • \u6307\u6807\u8bf4\u660e \uff1a\u6240\u9009\u8282\u70b9\u7684 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u8bfb\u5199\u3001\u7f51\u7edc\u63a5\u6536\u53d1\u9001\u7684\u53d8\u5316\u8d8b\u52bf\u3002

                                                  3. \u5207\u6362\u5230 \u8d44\u6e90\u6c34\u4f4d\u7ebf\u76d1\u63a7 \u9875\u7b7e\uff0c\u53ef\u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684\u66f4\u591a\u76d1\u63a7\u6570\u636e\u3002

                                                  "},{"location":"admin/insight/infra/probe.html","title":"\u62e8\u6d4b","text":"

                                                  \u62e8\u6d4b\uff08Probe\uff09\u6307\u7684\u662f\u57fa\u4e8e\u9ed1\u76d2\u76d1\u63a7\uff0c\u5b9a\u671f\u901a\u8fc7 HTTP\u3001TCP \u7b49\u65b9\u5f0f\u5bf9\u76ee\u6807\u8fdb\u884c\u8fde\u901a\u6027\u6d4b\u8bd5\uff0c\u5feb\u901f\u53d1\u73b0\u6b63\u5728\u53d1\u751f\u7684\u6545\u969c\u3002

                                                  Insight \u57fa\u4e8e Prometheus Blackbox Exporter \u5de5\u5177\u901a\u8fc7 HTTP\u3001HTTPS\u3001DNS\u3001TCP \u548c ICMP \u7b49\u534f\u8bae\uff0c\u5bf9\u7f51\u7edc\u8fdb\u884c\u63a2\u6d4b\u5e76\u8fd4\u56de\u63a2\u6d4b\u7ed3\u679c\u4ee5\u4fbf\u4e86\u89e3\u7f51\u7edc\u72b6\u6001\u3002

                                                  "},{"location":"admin/insight/infra/probe.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                  \u76ee\u6807\u96c6\u7fa4\u4e2d\u5df2\u6210\u529f\u90e8\u7f72 insight-agent\uff0c\u4e14\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                                                  "},{"location":"admin/insight/infra/probe.html#_3","title":"\u67e5\u770b\u62e8\u6d4b\u4efb\u52a1","text":"
                                                  1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\uff1b
                                                  2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd -> \u62e8\u6d4b\u3002

                                                    • \u70b9\u51fb\u8868\u683c\u4e2d\u7684\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\u4e0b\u62c9\u6846\uff0c\u53ef\u5207\u6362\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4
                                                    • \u4f60\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u2699\ufe0f \u4fee\u6539\u663e\u793a\u7684\u5217\uff0c\u9ed8\u8ba4\u4e3a\u62e8\u6d4b\u540d\u79f0\u3001\u63a2\u6d4b\u65b9\u5f0f\u3001\u63a2\u6d4b\u76ee\u6807\u3001\u8fde\u901a\u72b6\u6001\u3001\u521b\u5efa\u65f6\u95f4
                                                    • \u8fde\u901a\u72b6\u6001\u6709 3 \u79cd\uff1a
                                                      • \u6b63\u5e38\uff1aProbe \u6210\u529f\u8fde\u63a5\u5230\u4e86\u76ee\u6807\uff0c\u76ee\u6807\u8fd4\u56de\u4e86\u9884\u671f\u7684\u54cd\u5e94
                                                      • \u5f02\u5e38\uff1aProbe \u65e0\u6cd5\u8fde\u63a5\u5230\u76ee\u6807\uff0c\u6216\u76ee\u6807\u6ca1\u6709\u8fd4\u56de\u9884\u671f\u7684\u54cd\u5e94
                                                      • Pending\uff1aProbe \u6b63\u5728\u5c1d\u8bd5\u8fde\u63a5\u76ee\u6807
                                                    • \u4f60\u53ef\u4ee5\u5728 \ud83d\udd0d \u641c\u7d22\u6846\u4e2d\u952e\u5165\u540d\u79f0\uff0c\u6a21\u7cca\u641c\u7d22\u67d0\u4e9b\u62e8\u6d4b\u4efb\u52a1

                                                  "},{"location":"admin/insight/infra/probe.html#_4","title":"\u521b\u5efa\u62e8\u6d4b\u4efb\u52a1","text":"
                                                  1. \u70b9\u51fb \u521b\u5efa\u62e8\u6d4b\u4efb\u52a1\u3002
                                                  2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65

                                                    • \u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u62e8\u6d4b\u7684\u96c6\u7fa4
                                                    • \u547d\u540d\u7a7a\u95f4\uff1a\u62e8\u6d4b\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4

                                                  3. \u914d\u7f6e\u63a2\u6d4b\u53c2\u6570\u3002

                                                    • Blackbox \u5b9e\u4f8b\uff1a\u9009\u62e9\u8d1f\u8d23\u63a2\u6d4b\u7684 blackbox \u5b9e\u4f8b
                                                    • \u63a2\u6d4b\u65b9\u5f0f\uff1a
                                                      • HTTP\uff1a\u901a\u8fc7\u53d1\u9001 HTTP \u6216 HTTPS \u8bf7\u6c42\u5230\u76ee\u6807 URL\uff0c\u68c0\u6d4b\u5176\u8fde\u901a\u6027\u548c\u54cd\u5e94\u65f6\u95f4\uff0c\u8fd9\u53ef\u4ee5\u7528\u4e8e\u76d1\u6d4b\u7f51\u7ad9\u6216 Web \u5e94\u7528\u7684\u53ef\u7528\u6027\u548c\u6027\u80fd
                                                      • TCP\uff1a\u901a\u8fc7\u5efa\u7acb\u5230\u76ee\u6807\u4e3b\u673a\u548c\u7aef\u53e3\u7684 TCP \u8fde\u63a5\uff0c\u68c0\u6d4b\u5176\u8fde\u901a\u6027\u548c\u54cd\u5e94\u65f6\u95f4\u3002\u8fd9\u53ef\u4ee5\u7528\u4e8e\u76d1\u6d4b\u57fa\u4e8e TCP \u7684\u670d\u52a1\uff0c\u5982 Web \u670d\u52a1\u5668\u3001\u6570\u636e\u5e93\u670d\u52a1\u5668\u7b49
                                                      • \u5176\u4ed6\uff1a\u652f\u6301\u901a\u8fc7\u914d\u7f6e ConfigMap \u81ea\u5b9a\u4e49\u63a2\u6d4b\u65b9\u5f0f\uff0c\u53ef\u53c2\u8003\u81ea\u5b9a\u4e49\u62e8\u6d4b\u65b9\u5f0f
                                                    • \u63a2\u6d4b\u76ee\u6807\uff1a\u63a2\u6d4b\u7684\u76ee\u6807\u5730\u5740\uff0c\u652f\u6301\u57df\u540d\u6216 IP \u5730\u5740\u7b49
                                                    • \u6807\u7b7e\uff1a\u81ea\u5b9a\u4e49\u6807\u7b7e\uff0c\u8be5\u6807\u7b7e\u4f1a\u81ea\u52a8\u6dfb\u52a0\u5230 Prometheus \u7684 Label \u4e2d
                                                    • \u63a2\u6d4b\u95f4\u9694\uff1a\u63a2\u6d4b\u95f4\u9694\u65f6\u95f4
                                                    • \u63a2\u6d4b\u8d85\u65f6\uff1a\u63a2\u6d4b\u76ee\u6807\u65f6\u7684\u6700\u957f\u7b49\u5f85\u65f6\u95f4

                                                  4. \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                                                  Warning

                                                  \u62e8\u6d4b\u4efb\u52a1\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u9700\u8981\u5927\u6982 3 \u5206\u949f\u7684\u65f6\u95f4\u6765\u540c\u6b65\u914d\u7f6e\u3002\u5728\u6b64\u671f\u95f4\uff0c\u4e0d\u4f1a\u8fdb\u884c\u63a2\u6d4b\uff0c\u65e0\u6cd5\u67e5\u770b\u63a2\u6d4b\u7ed3\u679c\u3002

                                                  "},{"location":"admin/insight/infra/probe.html#_5","title":"\u7f16\u8f91\u62e8\u6d4b\u4efb\u52a1","text":"

                                                  \u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 -> \u7f16\u8f91\uff0c\u5b8c\u6210\u7f16\u8f91\u540e\u70b9\u51fb \u786e\u5b9a\u3002

                                                  "},{"location":"admin/insight/infra/probe.html#_6","title":"\u67e5\u770b\u76d1\u63a7\u9762\u677f","text":"

                                                  \u70b9\u51fb\u62e8\u6d4b\u540d\u79f0 \u67e5\u770b\u62e8\u6d4b\u4efb\u52a1\u4e2d\u6bcf\u4e2a\u76ee\u6807\u7684\u76d1\u63a7\u72b6\u6001\uff0c\u4ee5\u56fe\u8868\u65b9\u5f0f\u663e\u793a\u9488\u5bf9\u7f51\u7edc\u72b6\u51b5\u7684\u63a2\u6d4b\u7ed3\u679c\u3002

                                                  \u6307\u6807\u540d\u79f0 \u63cf\u8ff0 Current Status Response \u8868\u793a HTTP \u63a2\u6d4b\u8bf7\u6c42\u7684\u54cd\u5e94\u72b6\u6001\u7801\u3002 Ping Status \u8868\u793a\u63a2\u6d4b\u8bf7\u6c42\u662f\u5426\u6210\u529f\u30021 \u8868\u793a\u63a2\u6d4b\u8bf7\u6c42\u6210\u529f\uff0c0 \u8868\u793a\u63a2\u6d4b\u8bf7\u6c42\u5931\u8d25\u3002 IP Protocol \u8868\u793a\u63a2\u6d4b\u8bf7\u6c42\u4f7f\u7528\u7684 IP \u534f\u8bae\u7248\u672c\u3002 SSL Expiry \u8868\u793a SSL/TLS \u8bc1\u4e66\u7684\u6700\u65e9\u5230\u671f\u65f6\u95f4\u3002 DNS Response (Latency) \u8868\u793a\u6574\u4e2a\u63a2\u6d4b\u8fc7\u7a0b\u7684\u6301\u7eed\u65f6\u95f4\uff0c\u5355\u4f4d\u662f\u79d2\u3002 HTTP Duration \u8868\u793a\u4ece\u53d1\u9001\u8bf7\u6c42\u5230\u63a5\u6536\u5230\u5b8c\u6574\u54cd\u5e94\u7684\u6574\u4e2a\u8fc7\u7a0b\u7684\u65f6\u95f4\u3002"},{"location":"admin/insight/infra/probe.html#_7","title":"\u5220\u9664\u62e8\u6d4b\u4efb\u52a1","text":"

                                                  \u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 -> \u5220\u9664\uff0c\u786e\u8ba4\u65e0\u8bef\u540e\u70b9\u51fb \u786e\u5b9a\u3002

                                                  Caution

                                                  \u5220\u9664\u64cd\u4f5c\u4e0d\u53ef\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                                                  "},{"location":"admin/insight/quickstart/install/index.html","title":"\u5f00\u59cb\u89c2\u6d4b","text":"

                                                  AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\u5b9e\u73b0\u4e86\u5bf9\u591a\u4e91\u591a\u96c6\u7fa4\u7684\u7eb3\u7ba1\uff0c\u5e76\u652f\u6301\u521b\u5efa\u96c6\u7fa4\u3002\u5728\u6b64\u57fa\u7840\u4e0a\uff0c\u53ef\u89c2\u6d4b\u6027 Insight \u4f5c\u4e3a\u591a\u96c6\u7fa4\u7edf\u4e00\u89c2\u6d4b\u65b9\u6848\uff0c\u901a\u8fc7\u90e8\u7f72 insight-agent \u63d2\u4ef6\u5b9e\u73b0\u5bf9\u591a\u96c6\u7fa4\u89c2\u6d4b\u6570\u636e\u7684\u91c7\u96c6\uff0c\u5e76\u652f\u6301\u901a\u8fc7 AI \u7b97\u529b\u4e2d\u5fc3 \u53ef\u89c2\u6d4b\u6027\u4ea7\u54c1\u5b9e\u73b0\u5bf9\u6307\u6807\u3001\u65e5\u5fd7\u3001\u94fe\u8def\u6570\u636e\u7684\u67e5\u8be2\u3002

                                                  insight-agent \u662f\u53ef\u89c2\u6d4b\u6027\u5b9e\u73b0\u5bf9\u591a\u96c6\u7fa4\u6570\u636e\u91c7\u96c6\u7684\u5de5\u5177\uff0c\u5b89\u88c5\u540e\u65e0\u9700\u4efb\u4f55\u4fee\u6539\uff0c\u5373\u53ef\u5b9e\u73b0\u5bf9\u6307\u6807\u3001\u65e5\u5fd7\u4ee5\u53ca\u94fe\u8def\u6570\u636e\u7684\u81ea\u52a8\u5316\u91c7\u96c6\u3002

                                                  \u901a\u8fc7 \u5bb9\u5668\u7ba1\u7406 \u521b\u5efa\u7684\u96c6\u7fa4\u9ed8\u8ba4\u4f1a\u5b89\u88c5 insight-agent\uff0c\u6545\u5728\u6b64\u4ec5\u9488\u5bf9\u63a5\u5165\u7684\u96c6\u7fa4\u5982\u4f55\u5f00\u542f\u89c2\u6d4b\u80fd\u529b\u63d0\u4f9b\u6307\u5bfc\u3002

                                                  • \u5728\u7ebf\u5b89\u88c5 insight-agent

                                                  \u53ef\u89c2\u6d4b\u6027 Insight \u4f5c\u4e3a\u591a\u96c6\u7fa4\u7684\u7edf\u4e00\u89c2\u6d4b\u5e73\u53f0\uff0c\u5176\u90e8\u5206\u7ec4\u4ef6\u7684\u8d44\u6e90\u6d88\u8017\u4e0e\u521b\u5efa\u96c6\u7fa4\u7684\u6570\u636e\u3001\u63a5\u5165\u96c6\u7fa4\u7684\u6570\u91cf\u606f\u606f\u76f8\u5173\uff0c\u5728\u5b89\u88c5 insight-agent \u65f6\uff0c\u9700\u8981\u6839\u636e\u96c6\u7fa4\u89c4\u6a21\u5bf9\u76f8\u5e94\u7ec4\u4ef6\u7684\u8d44\u6e90\u8fdb\u884c\u8c03\u6574\u3002

                                                  1. \u6839\u636e\u521b\u5efa\u96c6\u7fa4\u7684\u89c4\u6a21\u6216\u63a5\u5165\u96c6\u7fa4\u7684\u89c4\u6a21\uff0c\u8c03\u6574 insight-agent \u4e2d\u91c7\u96c6\u7ec4\u4ef6 Prometheus \u7684 CPU \u548c\u5185\u5b58\uff0c\u8bf7\u53c2\u8003: Prometheus \u8d44\u6e90\u89c4\u5212

                                                  2. \u7531\u4e8e\u591a\u96c6\u7fa4\u7684\u6307\u6807\u6570\u636e\u4f1a\u7edf\u4e00\u5b58\u50a8\uff0c\u5219\u9700\u8981 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\u7ba1\u7406\u5458\u6839\u636e\u521b\u5efa\u96c6\u7fa4\u7684\u89c4\u6a21\u3001\u63a5\u5165\u96c6\u7fa4\u7684\u89c4\u6a21\u5bf9\u5e94\u8c03\u6574 vmstorage \u7684\u78c1\u76d8\uff0c\u8bf7\u53c2\u8003\uff1avmstorage \u78c1\u76d8\u5bb9\u91cf\u89c4\u5212\u3002

                                                  3. \u5982\u4f55\u8c03\u6574 vmstorage \u7684\u78c1\u76d8\uff0c\u8bf7\u53c2\u8003\uff1avmstorge \u78c1\u76d8\u6269\u5bb9\u3002

                                                  \u7531\u4e8e AI \u7b97\u529b\u4e2d\u5fc3 \u652f\u6301\u5bf9\u591a\u4e91\u591a\u96c6\u7fa4\u7684\u7eb3\u7ba1\uff0cinsight-agent \u76ee\u524d\u4e5f\u5b8c\u6210\u4e86\u90e8\u5206\u9a8c\u8bc1\uff0c\u7531\u4e8e\u76d1\u63a7\u7ec4\u4ef6\u51b2\u7a81\u95ee\u9898\u5bfc\u81f4\u5728 Openshift 4.x \u96c6\u7fa4\u4e2d\u5b89\u88c5 insight-agent \u4f1a\u51fa\u73b0\u95ee\u9898\uff0c\u82e5\u60a8\u9047\u5230\u540c\u6837\u95ee\u9898\uff0c\u8bf7\u53c2\u8003\u4ee5\u4e0b\u6587\u6863\uff1a

                                                  • \u5728 Openshift 4.x \u5b89\u88c5 insight-agent
                                                  "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html","title":"\u5f00\u542f\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f","text":"

                                                  \u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u4e3a\u4e86\u63d0\u9ad8\u5927\u89c4\u6a21\u73af\u5883\u4e0b\u7684\u6570\u636e\u5199\u5165\u80fd\u529b\uff0c\u652f\u6301\u5c06\u65e5\u5fd7\u5207\u6362\u4e3a \u5927\u65e5\u5fd7 \u6a21\u5f0f\u3001\u5c06\u94fe\u8def\u5207\u6362\u4e3a \u5927\u94fe\u8def \u6a21\u5f0f\u3002\u672c\u6587\u5c06\u4ecb\u7ecd\u4ee5\u4e0b\u51e0\u79cd\u5f00\u542f\u65b9\u5f0f\uff1a

                                                  • \u901a\u8fc7\u5b89\u88c5\u5668\u5f00\u542f\u6216\u5347\u7ea7\u81f3\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f\uff08\u901a\u8fc7 manifest.yaml \u4e2d\u540c\u4e00\u4e2a\u53c2\u6570\u503c\u63a7\u5236\uff09
                                                  • \u901a\u8fc7 Helm \u547d\u4ee4\u624b\u52a8\u5f00\u542f\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f
                                                  "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_2","title":"\u65e5\u5fd7","text":"

                                                  \u672c\u8282\u8bf4\u660e\u666e\u901a\u65e5\u5fd7\u6a21\u5f0f\u548c\u5927\u65e5\u5fd7\u6a21\u5f0f\u7684\u533a\u522b\u3002

                                                  "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_3","title":"\u65e5\u5fd7\u6a21\u5f0f","text":"

                                                  \u7ec4\u4ef6\uff1aFluentbit + Elasticsearch

                                                  \u8be5\u6a21\u5f0f\u7b80\u79f0\u4e3a ES \u6a21\u5f0f\uff0c\u6570\u636e\u6d41\u56fe\u5982\u4e0b\u6240\u793a\uff1a

                                                  "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_4","title":"\u5927\u65e5\u5fd7\u6a21\u5f0f","text":"

                                                  \u7ec4\u4ef6\uff1aFluentbit + Kafka + Vector + Elasticsearch

                                                  \u8be5\u6a21\u5f0f\u7b80\u79f0\u4e3a Kafka \u6a21\u5f0f\uff0c\u6570\u636e\u6d41\u56fe\u5982\u4e0b\u6240\u793a\uff1a

                                                  "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_5","title":"\u94fe\u8def","text":"

                                                  \u672c\u8282\u8bf4\u660e\u666e\u901a\u94fe\u8def\u6a21\u5f0f\u548c\u5927\u94fe\u8def\u6a21\u5f0f\u7684\u533a\u522b\u3002

                                                  "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_6","title":"\u94fe\u8def\u6a21\u5f0f","text":"

                                                  \u7ec4\u4ef6\uff1aAgent opentelemetry-collector + Global opentelemetry-collector + Jaeger-collector + Elasticsearch

                                                  \u8be5\u6a21\u5f0f\u7b80\u79f0\u4e3a OTlp \u6a21\u5f0f\uff0c\u6570\u636e\u6d41\u56fe\u5982\u4e0b\u6240\u793a\uff1a

                                                  "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_7","title":"\u5927\u94fe\u8def\u6a21\u5f0f","text":"

                                                  \u7ec4\u4ef6\uff1aAgent opentelemetry-collector + Kafka + Global opentelemetry-collector + Jaeger-collector + Elasticsearch

                                                  \u8be5\u6a21\u5f0f\u7b80\u79f0\u4e3a Kafka \u6a21\u5f0f\uff0c\u6570\u636e\u6d41\u56fe\u5982\u4e0b\u6240\u793a\uff1a

                                                  "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_8","title":"\u901a\u8fc7\u5b89\u88c5\u5668\u5f00\u542f","text":"

                                                  \u901a\u8fc7\u5b89\u88c5\u5668\u90e8\u7f72/\u5347\u7ea7 AI \u7b97\u529b\u4e2d\u5fc3 \u65f6\u4f7f\u7528\u7684 manifest.yaml \u4e2d\u5b58\u5728 infrastructures.kafka \u5b57\u6bb5\uff0c \u5982\u679c\u60f3\u5f00\u542f\u53ef\u89c2\u6d4b\u7684\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u542f\u7528 kafka\uff1a

                                                  manifest.yaml
                                                  apiVersion: manifest.daocloud.io/v1alpha1\nkind: DCEManifest\n...\ninfrastructures:\n  ...\n  kafka:\n    enable: true # \u9ed8\u8ba4\u4e3a false\n    cpuLimit: 1\n    memLimit: 2Gi\n    pvcSize: 15Gi\n
                                                  "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_9","title":"\u5f00\u542f","text":"

                                                  \u5b89\u88c5\u65f6\u4f7f\u7528\u542f\u7528 kafka \u7684 manifest.yaml\uff0c\u5219\u4f1a\u9ed8\u8ba4\u5b89\u88c5 kafka \u4e2d\u95f4\u4ef6\uff0c \u5e76\u5728\u5b89\u88c5 Insight \u65f6\u9ed8\u8ba4\u5f00\u542f\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f\u3002\u5b89\u88c5\u547d\u4ee4\u4e3a\uff1a

                                                  ./dce5-installer cluster-create -c clusterConfig.yaml -m manifest.yaml\n
                                                  "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_10","title":"\u5347\u7ea7","text":"

                                                  \u5347\u7ea7\u540c\u6837\u662f\u4fee\u6539 kafka \u5b57\u6bb5\u3002\u4f46\u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u56e0\u4e3a\u8001\u73af\u5883\u5b89\u88c5\u65f6\u4f7f\u7528\u7684\u662f kafka: false\uff0c \u6240\u4ee5\u73af\u5883\u4e2d\u65e0 kafka\u3002\u6b64\u65f6\u5347\u7ea7\u9700\u8981\u6307\u5b9a\u5347\u7ea7 middleware\uff0c\u624d\u4f1a\u540c\u65f6\u5b89\u88c5 kafka \u4e2d\u95f4\u4ef6\u3002\u5347\u7ea7\u547d\u4ee4\u4e3a\uff1a

                                                  ./dce5-installer cluster-create -c clusterConfig.yaml -m manifest.yaml -u gproduct,middleware\n

                                                  Note

                                                  \u5728\u5347\u7ea7\u5b8c\u6210\u540e\uff0c\u9700\u8981\u624b\u52a8\u91cd\u542f\u4ee5\u4e0b\u7ec4\u4ef6\uff1a

                                                  • insight-agent-fluent-bit
                                                  • insight-agent-opentelemetry-collector
                                                  • insight-opentelemetry-collector
                                                  "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#helm","title":"\u901a\u8fc7 Helm \u547d\u4ee4\u5f00\u542f","text":"

                                                  \u524d\u63d0\u6761\u4ef6\uff1a\u9700\u8981\u4fdd\u8bc1\u5b58\u5728 \u53ef\u7528\u7684 kafka \u4e14\u5730\u5740\u53ef\u6b63\u5e38\u8bbf\u95ee\u3002

                                                  \u6839\u636e\u4ee5\u4e0b\u547d\u4ee4\u83b7\u53d6\u8001\u7248\u672c insight \u548c insight-agent \u7684 values\uff08\u5efa\u8bae\u505a\u597d\u5907\u4efd\uff09\uff1a

                                                  helm get values insight -n insight-system -o yaml > insight.yaml\nhelm get values insight-agent -n insight-system -o yaml > insight-agent.yaml\n
                                                  "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_11","title":"\u5f00\u542f\u5927\u65e5\u5fd7","text":"

                                                  \u6709\u4ee5\u4e0b\u51e0\u79cd\u65b9\u5f0f\u5f00\u542f\u6216\u5347\u7ea7\u81f3\u5927\u65e5\u5fd7\u6a21\u5f0f\uff1a

                                                  \u5728 helm upgrade \u547d\u4ee4\u4e2d\u4f7f\u7528 --set\u4fee\u6539 YAML \u540e\u8fd0\u884c helm upgrade\u5bb9\u5668\u7ba1\u7406 UI \u5347\u7ea7

                                                  \u5148\u8fd0\u884c\u4ee5\u4e0b insight \u5347\u7ea7\u547d\u4ee4\uff0c\u6ce8\u610f kafka brokers \u5730\u5740\u9700\u6b63\u786e\uff1a

                                                  helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --set global.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.kafka.enabled=true \\\n  --set vector.enabled=true \\\n  --version 0.30.1\n

                                                  \u7136\u540e\u8fd0\u884c\u4ee5\u4e0b insight-agent \u5347\u7ea7\u547d\u4ee4\uff0c\u6ce8\u610f kafka brokers \u5730\u5740\u9700\u6b63\u786e\uff1a

                                                  helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --set global.exporters.logging.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.exporters.logging.output=kafka \\\n  --version 0.30.1\n

                                                  \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539 YAMl \u540e\u8fd0\u884c helm upgrade \u547d\u4ee4\uff1a

                                                  1. \u4fee\u6539 insight.yaml

                                                    insight.yaml
                                                    global:\n  ...\n  kafka:\n    brokers: 10.6.216.111:30592\n    enabled: true\n...\nvector:\n  enabled: true\n
                                                  2. \u5347\u7ea7 insight \u7ec4\u4ef6\uff1a

                                                    helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --version 0.30.1\n
                                                  3. \u4fee\u6539 insight-agent.yaml

                                                    insight-agent.yaml
                                                    global:\n  ...\n  exporters:\n    ...\n    logging:\n      ...\n      kafka:\n        brokers: 10.6.216.111:30592\n      output: kafka\n
                                                  4. \u5347\u7ea7 insight-agent\uff1a

                                                    helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --version 0.30.1\n

                                                  \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\uff0c\u627e\u5230\u5bf9\u5e94\u7684\u96c6\u7fa4\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 Helm \u5e94\u7528 \uff0c\u627e\u5230\u5e76\u66f4\u65b0 insight-agent\u3002

                                                  \u5728 Logging Settings \u4e2d\uff0c\u4e3a output \u9009\u62e9 kafka\uff0c\u5e76\u586b\u5199\u6b63\u786e\u7684 brokers \u5730\u5740\u3002

                                                  \u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u5728\u5347\u7ea7\u5b8c\u6210\u540e\uff0c\u9700\u624b\u52a8\u91cd\u542f insight-agent-fluent-bit \u7ec4\u4ef6\u3002

                                                  "},{"location":"admin/insight/quickstart/install/big-log-and-trace.html#_12","title":"\u5f00\u542f\u5927\u94fe\u8def","text":"

                                                  \u6709\u4ee5\u4e0b\u51e0\u79cd\u65b9\u5f0f\u5f00\u542f\u6216\u5347\u7ea7\u81f3\u5927\u94fe\u8def\u6a21\u5f0f\uff1a

                                                  \u5728 helm upgrade \u547d\u4ee4\u4e2d\u4f7f\u7528 --set\u4fee\u6539 YAML \u540e\u8fd0\u884c helm upgrade\u5bb9\u5668\u7ba1\u7406 UI \u5347\u7ea7

                                                  \u5148\u8fd0\u884c\u4ee5\u4e0b insight \u5347\u7ea7\u547d\u4ee4\uff0c\u6ce8\u610f kafka brokers \u5730\u5740\u9700\u6b63\u786e\uff1a

                                                  helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --set global.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.kafka.enabled=true \\\n  --set global.tracing.kafkaReceiver.enabled=true \\\n  --version 0.30.1\n

                                                  \u7136\u540e\u8fd0\u884c\u4ee5\u4e0b insight-agent \u5347\u7ea7\u547d\u4ee4\uff0c\u6ce8\u610f kafka brokers \u5730\u5740\u9700\u6b63\u786e\uff1a

                                                  helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --set global.exporters.trace.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.exporters.trace.output=kafka \\\n  --version 0.30.1\n

                                                  \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539 YAMl \u540e\u8fd0\u884c helm upgrade \u547d\u4ee4\uff1a

                                                  1. \u4fee\u6539 insight.yaml

                                                    insight.yaml
                                                    global:\n  ...\n  kafka:\n    brokers: 10.6.216.111:30592\n    enabled: true\n...\ntracing:\n  ...\n  kafkaReceiver:\n    enabled: true\n
                                                  2. \u5347\u7ea7 insight \u7ec4\u4ef6\uff1a

                                                    helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --version 0.30.1\n
                                                  3. \u4fee\u6539 insight-agent.yaml

                                                    insight-agent.yaml
                                                    global:\n  ...\n  exporters:\n    ...\n    trace:\n      ...\n      kafka:\n        brokers: 10.6.216.111:30592\n      output: kafka\n
                                                  4. \u5347\u7ea7 insight-agent\uff1a

                                                    helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --version 0.30.1\n

                                                  \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\uff0c\u627e\u5230\u5bf9\u5e94\u7684\u96c6\u7fa4\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 Helm \u5e94\u7528 \uff0c\u627e\u5230\u5e76\u66f4\u65b0 insight-agent\u3002

                                                  \u5728 Trace Settings \u4e2d\uff0c\u4e3a output \u9009\u62e9 kafka\uff0c\u5e76\u586b\u5199\u6b63\u786e\u7684 brokers \u5730\u5740\u3002

                                                  \u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u5728\u5347\u7ea7\u5b8c\u6210\u540e\uff0c\u9700\u624b\u52a8 \u91cd\u542f insight-agent-opentelemetry-collector \u548c insight-opentelemetry-collector \u7ec4\u4ef6\u3002

                                                  "},{"location":"admin/insight/quickstart/install/component-scheduling.html","title":"\u81ea\u5b9a\u4e49 Insight \u7ec4\u4ef6\u8c03\u5ea6\u7b56\u7565","text":"

                                                  \u5f53\u90e8\u7f72\u53ef\u89c2\u6d4b\u5e73\u53f0 Insight \u5230 Kubernetes \u73af\u5883\u65f6\uff0c\u6b63\u786e\u7684\u8d44\u6e90\u7ba1\u7406\u548c\u4f18\u5316\u81f3\u5173\u91cd\u8981\u3002 Insight \u5305\u542b\u591a\u4e2a\u6838\u5fc3\u7ec4\u4ef6\uff0c\u5982 Prometheus\u3001OpenTelemetry\u3001FluentBit\u3001Vector\u3001Elasticsearch \u7b49\uff0c \u8fd9\u4e9b\u7ec4\u4ef6\u5728\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u53ef\u80fd\u56e0\u4e3a\u8d44\u6e90\u5360\u7528\u95ee\u9898\u5bf9\u96c6\u7fa4\u5185\u5176\u4ed6 Pod \u7684\u6027\u80fd\u4ea7\u751f\u8d1f\u9762\u5f71\u54cd\u3002 \u4e3a\u4e86\u6709\u6548\u5730\u7ba1\u7406\u8d44\u6e90\u5e76\u4f18\u5316\u96c6\u7fa4\u7684\u8fd0\u884c\uff0c\u8282\u70b9\u4eb2\u548c\u6027\u6210\u4e3a\u4e00\u9879\u91cd\u8981\u7684\u914d\u7f6e\u9009\u9879\u3002

                                                  \u672c\u6587\u5c06\u91cd\u70b9\u63a2\u8ba8\u5982\u4f55\u901a\u8fc7\u6c61\u70b9\u548c\u8282\u70b9\u4eb2\u548c\u6027\u7684\u914d\u7f6e\u7b56\u7565\uff0c\u4f7f\u5f97\u6bcf\u4e2a\u7ec4\u4ef6\u80fd\u591f\u5728\u9002\u5f53\u7684\u8282\u70b9\u4e0a\u8fd0\u884c\uff0c \u5e76\u907f\u514d\u8d44\u6e90\u7ade\u4e89\u6216\u4e89\u7528\uff0c\u4ece\u800c\u786e\u4fdd\u6574\u4e2a Kubernetes \u96c6\u7fa4\u7684\u7a33\u5b9a\u6027\u548c\u9ad8\u6548\u6027\u3002

                                                  "},{"location":"admin/insight/quickstart/install/component-scheduling.html#insight_1","title":"\u901a\u8fc7\u6c61\u70b9\u4e3a Insight \u914d\u7f6e\u4e13\u6709\u8282\u70b9","text":"

                                                  \u7531\u4e8e Insight Agent \u5305\u542b\u4e86 DaemonSet \u7ec4\u4ef6\uff0c\u6240\u4ee5\u672c\u8282\u6240\u8ff0\u7684\u914d\u7f6e\u65b9\u5f0f\u662f\u8ba9\u9664\u4e86 Insight DameonSet \u4e4b\u5916\u7684\u5176\u4f59\u7ec4\u4ef6\u5747\u8fd0\u884c\u5728\u4e13\u6709\u8282\u70b9\u4e0a\u3002

                                                  \u8be5\u65b9\u5f0f\u662f\u901a\u8fc7\u4e3a\u4e13\u6709\u8282\u70b9\u6dfb\u52a0\u6c61\u70b9\uff08taint\uff09\uff0c\u5e76\u914d\u5408\u6c61\u70b9\u5bb9\u5fcd\u5ea6\uff08tolerations\uff09\u6765\u5b9e\u73b0\u7684\u3002 \u66f4\u591a\u7ec6\u8282\u53ef\u4ee5\u53c2\u8003 Kubernetes \u5b98\u65b9\u6587\u6863\u3002

                                                  \u53ef\u4ee5\u53c2\u8003\u5982\u4e0b\u547d\u4ee4\u4e3a\u8282\u70b9\u6dfb\u52a0\u53ca\u79fb\u9664\u6c61\u70b9\uff1a

                                                  # \u6dfb\u52a0\u6c61\u70b9\nkubectl taint nodes worker1 node.daocloud.io=insight-only:NoSchedule\n\n# \u79fb\u9664\u6c61\u70b9\nkubectl taint nodes worker1 node.daocloud.io:NoSchedule-\n

                                                  \u6709\u4ee5\u4e0b\u4e24\u79cd\u9014\u5f84\u8ba9 Insight \u7ec4\u4ef6\u8c03\u5ea6\u81f3\u4e13\u6709\u8282\u70b9\uff1a

                                                  "},{"location":"admin/insight/quickstart/install/component-scheduling.html#1","title":"1. \u4e3a\u6bcf\u4e2a\u7ec4\u4ef6\u6dfb\u52a0\u6c61\u70b9\u5bb9\u5fcd\u5ea6","text":"

                                                  \u9488\u5bf9 insight-server \u548c insight-agent \u4e24\u4e2a Chart \u5206\u522b\u8fdb\u884c\u914d\u7f6e\uff1a

                                                  insight-server Chart \u914d\u7f6einsight-agent Chart \u914d\u7f6e
                                                  server:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nui:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nrunbook:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\n# mysql:\nvictoria-metrics-k8s-stack:\n  victoria-metrics-operator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  vmcluster:\n    spec:\n      vmstorage:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n      vmselect:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n      vminsert:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n  vmalert:\n    spec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n  alertmanager:\n    spec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n\njaeger:\n  collector:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  query:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n\nopentelemetry-collector-aggregator:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nopentelemetry-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\ngrafana-operator:\n  operator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  grafana:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\nkibana:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nelastic-alert:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nvector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n
                                                  kube-prometheus-stack:\n  prometheus:\n    prometheusSpec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n  prometheus-node-exporter:\n    tolerations:\n      - effect: NoSchedule\n        operator: Exists\n  prometheusOperator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n\nkube-state-metrics:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-operator:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\ntailing-sidecar-operator:\n  operator:\n    tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-kubernetes-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nprometheus-blackbox-exporter:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\netcd-exporter:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\" \n
                                                  "},{"location":"admin/insight/quickstart/install/component-scheduling.html#2","title":"2. \u901a\u8fc7\u547d\u540d\u7a7a\u95f4\u7ea7\u522b\u914d\u7f6e","text":"

                                                  \u8ba9 insight-system \u547d\u540d\u7a7a\u95f4\u7684 Pod \u90fd\u5bb9\u5fcd node.daocloud.io=insight-only \u6c61\u70b9\u3002

                                                  1. \u8c03\u6574 apiserver \u7684\u914d\u7f6e\u6587\u4ef6 /etc/kubernetes/manifests/kube-apiserver.yaml\uff0c\u653e\u5f00 PodTolerationRestriction,PodNodeSelector, \u53c2\u8003\u4e0b\u56fe\uff1a

                                                  2. \u7ed9 insight-system \u547d\u540d\u7a7a\u95f4\u589e\u52a0\u6ce8\u89e3\uff1a

                                                    apiVersion: v1\nkind: Namespace\nmetadata:\n  name: insight-system\n  annotations:\n    scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Equal\", \"effect\": \"NoSchedule\", \"key\": \"node.daocloud.io\", \"value\": \"insight-only\"}]'\n

                                                  \u91cd\u542f insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\u9762\u7684\u7ec4\u4ef6\u5373\u53ef\u6b63\u5e38\u5bb9\u5fcd insight-system \u4e0b\u7684 Pod \u8c03\u5ea6\u3002

                                                  "},{"location":"admin/insight/quickstart/install/component-scheduling.html#label","title":"\u4e3a\u8282\u70b9\u6dfb\u52a0 Label \u548c\u8282\u70b9\u4eb2\u548c\u6027\u6765\u7ba1\u7406\u7ec4\u4ef6\u8c03\u5ea6","text":"

                                                  Info

                                                  \u8282\u70b9\u4eb2\u548c\u6027\u6982\u5ff5\u4e0a\u7c7b\u4f3c\u4e8e nodeSelector\uff0c\u5b83\u4f7f\u4f60\u53ef\u4ee5\u6839\u636e\u8282\u70b9\u4e0a\u7684 \u6807\u7b7e(label) \u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002 \u8282\u70b9\u4eb2\u548c\u6027\u6709\u4e24\u79cd\uff1a

                                                  1. requiredDuringSchedulingIgnoredDuringExecution\uff1a\u8c03\u5ea6\u5668\u53ea\u6709\u5728\u89c4\u5219\u88ab\u6ee1\u8db3\u7684\u65f6\u5019\u624d\u80fd\u6267\u884c\u8c03\u5ea6\u3002\u6b64\u529f\u80fd\u7c7b\u4f3c\u4e8e nodeSelector\uff0c \u4f46\u5176\u8bed\u6cd5\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002
                                                  2. preferredDuringSchedulingIgnoredDuringExecution\uff1a\u8c03\u5ea6\u5668\u4f1a\u5c1d\u8bd5\u5bfb\u627e\u6ee1\u8db3\u5bf9\u5e94\u89c4\u5219\u7684\u8282\u70b9\u3002\u5982\u679c\u627e\u4e0d\u5230\u5339\u914d\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u5668\u4ecd\u7136\u4f1a\u8c03\u5ea6\u8be5 Pod\u3002

                                                  \u66f4\u8fc7\u7ec6\u8282\u8bf7\u53c2\u8003 kubernetes \u5b98\u65b9\u6587\u6863\u3002

                                                  \u4e3a\u4e86\u5b9e\u73b0\u4e0d\u540c\u7528\u6237\u5bf9 Insight \u7ec4\u4ef6\u8c03\u5ea6\u7684\u7075\u6d3b\u9700\u6c42\uff0cInsight \u5206\u522b\u63d0\u4f9b\u4e86\u8f83\u4e3a\u7ec6\u7c92\u5ea6\u7684 Label \u6765\u5b9e\u73b0\u4e0d\u540c\u7ec4\u4ef6\u7684\u8c03\u5ea6\u7b56\u7565\uff0c\u4ee5\u4e0b\u662f\u6807\u7b7e\u4e0e\u7ec4\u4ef6\u7684\u5173\u7cfb\u8bf4\u660e\uff1a

                                                  \u6807\u7b7e Key \u6807\u7b7e Value \u8bf4\u660e node.daocloud.io/insight-any \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u4ee3\u8868 Insight \u6240\u6709\u7ec4\u4ef6\u4f18\u5148\u8003\u8651\u5e26\u4e86\u8be5\u6807\u7b7e\u7684\u8282\u70b9 node.daocloud.io/insight-prometheus \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u7279\u6307 Prometheus \u7ec4\u4ef6 node.daocloud.io/insight-vmstorage \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u7279\u6307 VictoriaMetrics vmstorage \u7ec4\u4ef6 node.daocloud.io/insight-vector \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u7279\u6307 Vector \u7ec4\u4ef6 node.daocloud.io/insight-otel-col \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u7279\u6307 OpenTelemetry \u7ec4\u4ef6

                                                  \u53ef\u4ee5\u53c2\u8003\u5982\u4e0b\u547d\u4ee4\u4e3a\u8282\u70b9\u6dfb\u52a0\u53ca\u79fb\u9664\u6807\u7b7e\uff1a

                                                  # \u4e3a node8 \u6dfb\u52a0\u6807\u7b7e\uff0c\u5148\u5c06 insight-prometheus \u8c03\u5ea6\u5230 node8 \nkubectl label nodes node8 node.daocloud.io/insight-prometheus=true\n\n# \u79fb\u9664 node8 \u7684 node.daocloud.io/insight-prometheus \u6807\u7b7e\nkubectl label nodes node8 node.daocloud.io/insight-prometheus-\n

                                                  \u4ee5\u4e0b\u662f insight-prometheus \u7ec4\u4ef6\u5728\u90e8\u7f72\u65f6\u9ed8\u8ba4\u7684\u4eb2\u548c\u6027\u504f\u597d\uff1a

                                                  affinity:\n  nodeAffinity:\n    preferredDuringSchedulingIgnoredDuringExecution:\n    - preference:\n        matchExpressions:\n        - key: node-role.kubernetes.io/control-plane\n          operator: DoesNotExist\n      weight: 1\n    - preference:\n        matchExpressions:\n        - key: node.daocloud.io/insight-prometheus # (1)!\n          operator: Exists\n      weight: 2\n    - preference:\n        matchExpressions:\n        - key: node.daocloud.io/insight-any\n          operator: Exists\n      weight: 3\n    podAntiAffinity:\n      preferredDuringSchedulingIgnoredDuringExecution:\n        - weight: 1\n          podAffinityTerm:\n            topologyKey: kubernetes.io/hostname\n            labelSelector:\n              matchExpressions:\n                - key: app.kubernetes.io/instance\n                  operator: In\n                  values:\n                    - insight-agent-kube-prometh-prometheus\n
                                                  1. \u5148\u5c06 insight-prometheus \u8c03\u5ea6\u5230\u5e26\u6709 node.daocloud.io/insight-prometheus \u6807\u7b7e\u7684\u8282\u70b9
                                                  "},{"location":"admin/insight/quickstart/install/gethosturl.html","title":"\u83b7\u53d6\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u6570\u636e\u5b58\u50a8\u5730\u5740","text":"

                                                  \u53ef\u89c2\u6d4b\u6027\u662f\u591a\u96c6\u7fa4\u7edf\u4e00\u89c2\u6d4b\u7684\u4ea7\u54c1\uff0c\u4e3a\u5b9e\u73b0\u5bf9\u591a\u96c6\u7fa4\u89c2\u6d4b\u6570\u636e\u7684\u7edf\u4e00\u5b58\u50a8\u3001\u67e5\u8be2\uff0c \u5b50\u96c6\u7fa4\u9700\u8981\u5c06\u91c7\u96c6\u7684\u89c2\u6d4b\u6570\u636e\u4e0a\u62a5\u7ed9\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u8fdb\u884c\u7edf\u4e00\u5b58\u50a8\u3002 \u672c\u6587\u63d0\u4f9b\u4e86\u5728\u5b89\u88c5\u91c7\u96c6\u7ec4\u4ef6 insight-agent \u65f6\u5fc5\u586b\u7684\u5b58\u50a8\u7ec4\u4ef6\u7684\u5730\u5740\u3002

                                                  "},{"location":"admin/insight/quickstart/install/gethosturl.html#insight-agent","title":"\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5b89\u88c5 insight-agent","text":"

                                                  \u5982\u679c\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5b89\u88c5 insight-agent\uff0c\u63a8\u8350\u901a\u8fc7\u57df\u540d\u6765\u8bbf\u95ee\u96c6\u7fa4\uff1a

                                                  export vminsert_host=\"vminsert-insight-victoria-metrics-k8s-stack.insight-system.svc.cluster.local\" # (1)!\nexport es_host=\"insight-es-master.insight-system.svc.cluster.local\" # (2)!\nexport otel_col_host=\"insight-opentelemetry-collector.insight-system.svc.cluster.local\" # (3)!\n
                                                  "},{"location":"admin/insight/quickstart/install/gethosturl.html#insight-agent_1","title":"\u5728\u5176\u4ed6\u96c6\u7fa4\u5b89\u88c5 insight-agent","text":""},{"location":"admin/insight/quickstart/install/gethosturl.html#insight-server","title":"\u901a\u8fc7 Insight Server \u63d0\u4f9b\u7684\u63a5\u53e3\u83b7\u53d6\u5730\u5740","text":"
                                                  1. \u7ba1\u7406\u96c6\u7fa4\u4f7f\u7528\u9ed8\u8ba4\u7684 LoadBalancer \u65b9\u5f0f\u66b4\u9732

                                                    \u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                    export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam'\n

                                                    Note

                                                    \u8bf7\u66ff\u6362\u547d\u4ee4\u4e2d\u7684 ${INSIGHT_SERVER_IP} \u53c2\u6570\u3002

                                                    \u83b7\u5f97\u5982\u4e0b\u8fd4\u56de\u503c\uff1a

                                                    {\n  \"values\": {\n    \"global\": {\n      \"exporters\": {\n        \"logging\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"metric\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"auditLog\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"trace\": {\n          \"host\": \"10.6.182.32\"\n        }\n      }\n    },\n    \"opentelemetry-operator\": {\n      \"enabled\": true\n    },\n    \"opentelemetry-collector\": {\n      \"enabled\": true\n    }\n  }\n}\n
                                                    • global.exporters.logging.host \u662f\u65e5\u5fd7\u670d\u52a1\u5730\u5740\uff0c\u4e0d\u9700\u8981\u518d\u8bbe\u7f6e\u5bf9\u5e94\u670d\u52a1\u7684\u7aef\u53e3\uff0c\u90fd\u4f1a\u4f7f\u7528\u76f8\u5e94\u9ed8\u8ba4\u503c
                                                    • global.exporters.metric.host \u662f\u6307\u6807\u670d\u52a1\u5730\u5740
                                                    • global.exporters.trace.host \u662f\u94fe\u8def\u670d\u52a1\u5730\u5740
                                                    • global.exporters.auditLog.host \u662f\u5ba1\u8ba1\u65e5\u5fd7\u670d\u52a1\u5730\u5740\uff08\u548c\u94fe\u8def\u4f7f\u7528\u7684\u540c\u4e00\u4e2a\u670d\u52a1\u4e0d\u540c\u7aef\u53e3\uff09
                                                  2. \u7ba1\u7406\u96c6\u7fa4\u7981\u7528 LoadBalancer

                                                    \u8c03\u7528\u63a5\u53e3\u65f6\u9700\u8981\u989d\u5916\u4f20\u9012\u96c6\u7fa4\u4e2d\u4efb\u610f\u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u8282\u70b9 IP\uff0c\u4f1a\u4f7f\u7528\u8be5 IP \u62fc\u63a5\u51fa\u5bf9\u5e94\u670d\u52a1\u7684\u5b8c\u6574\u8bbf\u95ee\u5730\u5740\u3002

                                                    export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam' --data '{\"extra\": {\"EXPORTER_EXTERNAL_IP\": \"10.5.14.51\"}}'\n

                                                    \u5c06\u83b7\u5f97\u5982\u4e0b\u7684\u8fd4\u56de\u503c\uff1a

                                                    {\n  \"values\": {\n    \"global\": {\n      \"exporters\": {\n        \"logging\": {\n          \"scheme\": \"https\",\n          \"host\": \"10.5.14.51\",\n          \"port\": 32007,\n          \"user\": \"elastic\",\n          \"password\": \"j8V1oVoM1184HvQ1F3C8Pom2\"\n        },\n        \"metric\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30683\n        },\n        \"auditLog\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30884\n        },\n        \"trace\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30274\n        }\n      }\n    },\n    \"opentelemetry-operator\": {\n      \"enabled\": true\n    },\n    \"opentelemetry-collector\": {\n      \"enabled\": true\n    }\n  }\n}\n
                                                    • global.exporters.logging.host \u662f\u65e5\u5fd7\u670d\u52a1\u5730\u5740
                                                    • global.exporters.logging.port \u662f\u65e5\u5fd7\u670d\u52a1\u66b4\u9732\u7684 NodePort
                                                    • global.exporters.metric.host \u662f\u6307\u6807\u670d\u52a1\u5730\u5740
                                                    • global.exporters.metric.port \u662f\u6307\u6807\u670d\u52a1\u66b4\u9732\u7684 NodePort
                                                    • global.exporters.trace.host \u662f\u94fe\u8def\u670d\u52a1\u5730\u5740
                                                    • global.exporters.trace.port \u662f\u94fe\u8def\u670d\u52a1\u66b4\u9732\u7684 NodePort
                                                    • global.exporters.auditLog.host \u662f\u5ba1\u8ba1\u65e5\u5fd7\u670d\u52a1\u5730\u5740\uff08\u548c\u94fe\u8def\u4f7f\u7528\u7684\u540c\u4e00\u4e2a\u670d\u52a1\u4e0d\u540c\u7aef\u53e3\uff09
                                                    • global.exporters.auditLog.host \u662f\u5ba1\u8ba1\u65e5\u5fd7\u670d\u52a1\u66b4\u9732\u7684 NodePort
                                                  "},{"location":"admin/insight/quickstart/install/gethosturl.html#loadbalancer","title":"\u901a\u8fc7 LoadBalancer \u8fde\u63a5","text":"
                                                  1. \u82e5\u96c6\u7fa4\u4e2d\u5f00\u542f LoadBalancer \u4e14\u4e3a Insight \u8bbe\u7f6e\u4e86 VIP \u65f6\uff0c\u60a8\u4e5f\u53ef\u4ee5\u624b\u52a8\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u83b7\u53d6 vminsert \u4ee5\u53ca opentelemetry-collector \u7684\u5730\u5740\u4fe1\u606f\uff1a

                                                    $ kubectl get service -n insight-system | grep lb\nlb-insight-opentelemetry-collector               LoadBalancer   10.233.23.12    <pending>     4317:31286/TCP,8006:31351/TCP  24d\nlb-vminsert-insight-victoria-metrics-k8s-stack   LoadBalancer   10.233.63.67    <pending>     8480:31629/TCP                 24d\n
                                                    • lb-vminsert-insight-victoria-metrics-k8s-stack \u662f\u6307\u6807\u670d\u52a1\u7684\u5730\u5740
                                                    • lb-insight-opentelemetry-collector \u662f\u94fe\u8def\u670d\u52a1\u7684\u5730\u5740
                                                  2. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u83b7\u53d6 elasticsearch \u5730\u5740\u4fe1\u606f\uff1a

                                                    $ kubectl get service -n mcamel-system | grep es\nmcamel-common-es-cluster-masters-es-http               NodePort    10.233.16.120   <none>        9200:30465/TCP               47d\n

                                                    mcamel-common-es-cluster-masters-es-http \u662f\u65e5\u5fd7\u670d\u52a1\u7684\u5730\u5740

                                                  "},{"location":"admin/insight/quickstart/install/gethosturl.html#nodeport","title":"\u901a\u8fc7 NodePort \u8fde\u63a5","text":"

                                                  \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7981\u7528 LB \u7279\u6027

                                                  \u5728\u8be5\u60c5\u51b5\u4e0b\uff0c\u9ed8\u8ba4\u4e0d\u4f1a\u521b\u5efa\u4e0a\u8ff0\u7684 LoadBalancer \u8d44\u6e90\uff0c\u5bf9\u5e94\u670d\u52a1\u540d\u4e3a\uff1a

                                                  • vminsert-insight-victoria-metrics-k8s-stack\uff08\u6307\u6807\u670d\u52a1\uff09
                                                  • common-es\uff08\u65e5\u5fd7\u670d\u52a1\uff09
                                                  • insight-opentelemetry-collector\uff08\u94fe\u8def\u670d\u52a1\uff09

                                                  \u4e0a\u9762\u4e24\u79cd\u60c5\u51b5\u83b7\u53d6\u5230\u5bf9\u5e94\u670d\u52a1\u7684\u5bf9\u5e94\u7aef\u53e3\u4fe1\u606f\u540e\uff0c\u8fdb\u884c\u5982\u4e0b\u8bbe\u7f6e\uff1a

                                                  --set global.exporters.logging.host=  # (1)!\n--set global.exporters.logging.port=  # (2)!\n--set global.exporters.metric.host=   # (3)!\n--set global.exporters.metric.port=   # (4)!\n--set global.exporters.trace.host=    # (5)!\n--set global.exporters.trace.port=    # (6)!\n--set global.exporters.auditLog.host= # (7)!\n
                                                  1. \u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u7ba1\u7406\u96c6\u7fa4 NodeIP
                                                  2. \u65e5\u5fd7\u670d\u52a1 9200 \u7aef\u53e3\u5bf9\u5e94\u7684 NodePort
                                                  3. \u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u7ba1\u7406\u96c6\u7fa4 NodeIP
                                                  4. \u6307\u6807\u670d\u52a1 8480 \u7aef\u53e3\u5bf9\u5e94\u7684 NodePort
                                                  5. \u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u7ba1\u7406\u96c6\u7fa4 NodeIP
                                                  6. \u94fe\u8def\u670d\u52a1 4317 \u7aef\u53e3\u5bf9\u5e94\u7684 NodePort
                                                  7. \u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u7ba1\u7406\u96c6\u7fa4 NodeIP
                                                  "},{"location":"admin/insight/quickstart/install/helm-installagent.html","title":"\u901a\u8fc7 Helm \u90e8\u7f72 Insight Agent","text":"

                                                  \u672c\u6587\u63cf\u8ff0\u4e86\u5728\u547d\u4ee4\u884c\u4e2d\u901a\u8fc7 Helm \u547d\u4ee4\u5b89\u88c5 Insight Agent \u793e\u533a\u7248\u7684\u64cd\u4f5c\u6b65\u9aa4\u3002

                                                  "},{"location":"admin/insight/quickstart/install/helm-installagent.html#insight-agent","title":"\u5b89\u88c5 Insight Agent","text":"
                                                  1. \u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u6dfb\u52a0\u955c\u50cf\u4ed3\u5e93\u7684\u5730\u5740

                                                    helm repo add insight https://release.daocloud.io/chartrepo/insight\nhelm repo upgrade\nhelm search repo  insight/insight-agent --versions\n
                                                  2. \u5b89\u88c5 Insight Agent \u9700\u8981\u786e\u4fdd\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u7684 Insight Server \u6b63\u5e38\u8fd0\u884c\uff0c\u6267\u884c\u4ee5\u4e0b\u5b89\u88c5\u547d\u4ee4\u5b89\u88c5 Insight Agent \u793e\u533a\u7248\uff0c\u8be5\u914d\u7f6e\u4e0d\u542f\u7528 Tracing \u529f\u80fd\uff1a

                                                    helm upgrade --install --create-namespace --cleanup-on-fail \\\n    --version ${version} \\      # \u8bf7\u6307\u5b9a\u90e8\u7f72\u7248\u672c\n    insight-agent  insight/insight-agent \\\n    --set global.exporters.logging.elasticsearch.host=10.10.10.x \\    # \u8bf7\u66ff\u6362\u201c10.10.10.x\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6216\u5916\u7f6e\u7684 Elasticsearch \u7684\u5730\u5740\n    --set global.exporters.logging.elasticsearch.port=32517 \\     # \u8bf7\u66ff\u6362\u201c32517\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6216\u5916\u7f6e\u7684 Elasticsearch \u66b4\u9732\u7684\u7aef\u53e3\n    --set global.exporters.logging.elasticsearch.user=elastic \\     # \u8bf7\u66ff\u6362\u201celastic\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6216\u5916\u7f6e\u7684 Elasticsearch \u7684\u7528\u6237\u540d\n    --set global.exporters.logging.elasticsearch.password=dangerous \\  # \u8bf7\u66ff\u6362\u201cdangerous\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6216\u5916\u7f6e\u7684 Elasticsearch \u7684\u5bc6\u7801\n    --set global.exporters.metric.host=${vminsert_address} \\    # \u8bf7\u66ff\u6362\u201c10.10.10.x\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d vminsert \u7684\u5730\u5740\n    --set global.exporters.metric.port=${vminsert_port} \\    # \u8bf7\u66ff\u6362\u201c32517\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d vminsert \u7684\u5730\u5740\n    --set global.exporters.auditLog.host=${opentelemetry-collector address} \\     # \u8bf7\u66ff\u6362\u201c32517\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d opentelemetry-collector \u7684\u7aef\u53e3\n    --set global.exporters.auditLog.port=${otel_col_auditlog_port}\\   # \u8bf7\u66ff\u6362\u201c32517\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d opentelemetry-collector \u5bb9\u5668\u7aef\u53e3\u4e3a 8006 \u7684 service \u5bf9\u5916\u8bbf\u95ee\u7684\u5730\u5740\n    -n insight-system\n

                                                    Info

                                                    \u53ef\u53c2\u8003 \u5982\u4f55\u83b7\u53d6\u8fde\u63a5\u5730\u5740 \u83b7\u53d6\u5730\u5740\u4fe1\u606f\u3002

                                                  3. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u786e\u8ba4\u5b89\u88c5\u72b6\u6001\uff1a

                                                    helm list -A\nkubectl get pods -n insight-system\n
                                                  "},{"location":"admin/insight/quickstart/install/helm-installagent.html#_1","title":"\u5982\u4f55\u83b7\u53d6\u8fde\u63a5\u5730\u5740","text":""},{"location":"admin/insight/quickstart/install/helm-installagent.html#insight-agent_1","title":"\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5b89\u88c5 Insight Agent","text":"

                                                  \u5982\u679c Agent \u662f\u5b89\u88c5\u5728\u7ba1\u7406\u96c6\u7fa4\uff0c\u63a8\u8350\u901a\u8fc7\u57df\u540d\u6765\u8bbf\u95ee\u96c6\u7fa4\uff1a

                                                  export vminsert_host=\"vminsert-insight-victoria-metrics-k8s-stack.insight-system.svc.cluster.local\" # \u6307\u6807\nexport es_host=\"insight-es-master.insight-system.svc.cluster.local\" # \u65e5\u5fd7\nexport otel_col_host=\"insight-opentelemetry-collector.insight-system.svc.cluster.local\" # \u94fe\u8def\n
                                                  "},{"location":"admin/insight/quickstart/install/helm-installagent.html#insight-agent_2","title":"\u5728\u5de5\u4f5c\u96c6\u7fa4\u5b89\u88c5 Insight Agent","text":"\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4f7f\u7528\u9ed8\u8ba4\u7684 LoadBalancer\u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\u64cd\u4f5c\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4f7f\u7528 Nodeport

                                                  \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4f7f\u7528\u9ed8\u8ba4\u7684 LoadBalancer \u65b9\u5f0f\u66b4\u9732\u670d\u52a1\u65f6\uff0c\u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                  export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam'\n

                                                  \u5c06\u83b7\u5f97\u5982\u4e0b\u7684\u8fd4\u56de\u503c\uff1a

                                                  {\"global\":{\"exporters\":{\"logging\":{\"output\":\"elasticsearch\",\"elasticsearch\":{\"host\":\"10.6.182.32\"},\"kafka\":{},\"host\":\"10.6.182.32\"},\"metric\":{\"host\":\"10.6.182.32\"},\"auditLog\":    {\"host\":\"10.6.182.32\"}}},\"opentelemetry-operator\":{\"enabled\":true},\"opentelemetry-collector\":{\"enabled\":true}}\n

                                                  \u5176\u4e2d\uff1a

                                                  • global.exporters.logging.elasticsearch.host \u662f\u65e5\u5fd7\u670d\u52a1\u5730\u5740\u3010\u4e0d\u9700\u8981\u518d\u8bbe\u7f6e\u5bf9\u5e94\u670d\u52a1\u7684\u7aef\u53e3\uff0c\u90fd\u4f1a\u4f7f\u7528\u76f8\u5e94\u9ed8\u8ba4\u503c\u3011\uff1b
                                                  • global.exporters.metric.host \u662f\u6307\u6807\u670d\u52a1\u5730\u5740\uff1b
                                                  • global.exporters.trace.host \u662f\u94fe\u8def\u670d\u52a1\u5730\u5740\uff1b
                                                  • global.exporters.auditLog.host \u662f\u5ba1\u8ba1\u65e5\u5fd7\u670d\u52a1\u5730\u5740 (\u548c\u94fe\u8def\u4f7f\u7528\u7684\u540c\u4e00\u4e2a\u670d\u52a1\u4e0d\u540c\u7aef\u53e3)\uff1b

                                                  \u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                  kubectl get service -n insight-system | grep lb\nkubectl get service -n mcamel-system | grep es\n

                                                  \u5176\u4e2d\uff1a

                                                  • lb-vminsert-insight-victoria-metrics-k8s-stack \u662f\u6307\u6807\u670d\u52a1\u7684\u5730\u5740\uff1b
                                                  • lb-insight-opentelemetry-collector \u662f\u94fe\u8def\u670d\u52a1\u7684\u5730\u5740;
                                                  • mcamel-common-es-cluster-masters-es-http \u662f\u65e5\u5fd7\u670d\u52a1\u7684\u5730\u5740;

                                                  \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4f7f\u7528 Nodeport \u65b9\u5f0f\u66b4\u9732\u670d\u52a1\u65f6\uff0c\u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                  kubectl get service -n insight-system\nkubectl get service -n mcamel-system\n

                                                  \u5176\u4e2d\uff1a

                                                  • vminsert-insight-victoria-metrics-k8s-stack \u662f\u6307\u6807\u670d\u52a1\u7684\u5730\u5740\uff1b
                                                  • insight-opentelemetry-collector \u662f\u94fe\u8def\u670d\u52a1\u7684\u5730\u5740;
                                                  • mcamel-common-es-cluster-masters-es-http \u662f\u65e5\u5fd7\u670d\u52a1\u7684\u5730\u5740;
                                                  "},{"location":"admin/insight/quickstart/install/helm-installagent.html#insight-agent_3","title":"\u5347\u7ea7 Insight Agent","text":"
                                                  1. \u767b\u5f55\u76ee\u6807\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u5907\u4efd --set \u53c2\u6570\u3002

                                                    helm get values insight-agent -n insight-system -o yaml > insight-agent.yaml\n
                                                  2. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u66f4\u65b0\u4ed3\u5e93\u3002

                                                    helm repo upgrade\n
                                                  3. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u8fdb\u884c\u5347\u7ea7\u3002

                                                    helm upgrade insight-agent insight/insight-agent \\\n-n insight-system \\\n-f ./insight-agent.yaml \\\n--version ${version}   # \u6307\u5b9a\u5347\u7ea7\u7248\u672c\n
                                                  4. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u786e\u8ba4\u5b89\u88c5\u72b6\u6001\uff1a

                                                    kubectl get pods -n insight-system\n
                                                  "},{"location":"admin/insight/quickstart/install/helm-installagent.html#insight-agent_4","title":"\u5378\u8f7d Insight Agent","text":"
                                                  helm uninstall insight-agent -n insight-system --timeout 10m\n
                                                  "},{"location":"admin/insight/quickstart/install/install-agent.html","title":"\u5728\u7ebf\u5b89\u88c5 insight-agent","text":"

                                                  insight-agent \u662f\u96c6\u7fa4\u89c2\u6d4b\u6570\u636e\u91c7\u96c6\u7684\u63d2\u4ef6\uff0c\u652f\u6301\u5bf9\u6307\u6807\u3001\u94fe\u8def\u3001\u65e5\u5fd7\u6570\u636e\u7684\u7edf\u4e00\u89c2\u6d4b\u3002\u672c\u6587\u63cf\u8ff0\u4e86\u5982\u4f55\u5728\u5728\u7ebf\u73af\u5883\u4e2d\u4e3a\u63a5\u5165\u96c6\u7fa4\u5b89\u88c5 insight-agent\u3002

                                                  "},{"location":"admin/insight/quickstart/install/install-agent.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                  • \u96c6\u7fa4\u5df2\u6210\u529f\u63a5\u5165 \u5bb9\u5668\u7ba1\u7406 \u6a21\u5757\u3002\u5982\u4f55\u63a5\u5165\u96c6\u7fa4\uff0c\u8bf7\u53c2\u8003\uff1a\u63a5\u5165\u96c6\u7fa4
                                                  "},{"location":"admin/insight/quickstart/install/install-agent.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \u6a21\u5757\uff0c\u5728 \u96c6\u7fa4\u5217\u8868 \u4e2d\u627e\u5230\u8981\u5b89\u88c5 insight-agent \u7684\u96c6\u7fa4\u540d\u79f0\u3002

                                                  2. \u9009\u62e9 \u7acb\u5373\u5b89\u88c5 \u8df3\u8f6c\uff0c\u6216\u70b9\u51fb\u96c6\u7fa4\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u5185\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u641c\u7d22\u6846\u67e5\u8be2 insight-agent \uff0c\u70b9\u51fb\u8be5\u5361\u7247\u8fdb\u5165\u8be6\u60c5\u3002

                                                  3. \u67e5\u770b insight-agent \u7684\u5b89\u88c5\u9875\u9762\uff0c\u70b9\u51fb \u5b89\u88c5 \u8fdb\u5165\u4e0b\u4e00\u6b65\u3002

                                                  4. \u9009\u62e9\u5b89\u88c5\u7684\u7248\u672c\u5e76\u5728\u4e0b\u65b9\u8868\u5355\u5206\u522b\u586b\u5199\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u5bf9\u5e94\u6570\u636e\u5b58\u50a8\u7ec4\u4ef6\u7684\u5730\u5740\uff0c\u786e\u8ba4\u586b\u5199\u7684\u4fe1\u606f\u65e0\u8bef\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                    • insight-agent \u9ed8\u8ba4\u90e8\u7f72\u5728\u96c6\u7fa4\u7684 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\u3002
                                                    • \u5efa\u8bae\u5b89\u88c5\u6700\u65b0\u7248\u672c\u7684 insight-agent\u3002
                                                    • \u7cfb\u7edf\u9ed8\u8ba4\u5df2\u586b\u5199\u6570\u636e\u4e0a\u62a5\u7684\u7ec4\u4ef6\u7684\u5730\u5740\uff0c\u4ecd\u8bf7\u60a8\u68c0\u67e5\u65e0\u8bef\u540e\u518d\u70b9\u51fb \u786e\u5b9a \u00a0\u8fdb\u884c\u5b89\u88c5\u3002 \u5982\u9700\u4fee\u6539\u6570\u636e\u4e0a\u62a5\u5730\u5740\uff0c\u8bf7\u53c2\u8003\uff1a\u83b7\u53d6\u6570\u636e\u4e0a\u62a5\u5730\u5740\u3002

                                                  5. \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de\u00a0 Helm \u5e94\u7528 \u5217\u8868\uff0c\u5f53\u5e94\u7528 insight-agent \u7684\u72b6\u6001\u4ece\u00a0 \u672a\u5c31\u7eea \u53d8\u4e3a \u5df2\u90e8\u7f72 \uff0c\u4e14\u6240\u6709\u7684\u7ec4\u4ef6\u72b6\u6001\u4e3a \u8fd0\u884c\u4e2d \u65f6\uff0c\u5219\u5b89\u88c5\u6210\u529f\u3002\u7b49\u5f85\u4e00\u6bb5\u65f6\u95f4\u540e\uff0c\u53ef\u5728 \u53ef\u89c2\u6d4b\u6027 \u6a21\u5757\u67e5\u770b\u8be5\u96c6\u7fa4\u7684\u6570\u636e\u3002

                                                  Note

                                                  • \u70b9\u51fb\u6700\u53f3\u4fa7\u7684 \u2507 \uff0c\u60a8\u53ef\u4ee5\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u6267\u884c\u66f4\u591a\u64cd\u4f5c\uff0c\u5982 \u66f4\u65b0 \u3001 \u67e5\u770b YAML \u548c \u5220\u9664 \u3002
                                                  "},{"location":"admin/insight/quickstart/install/knownissues.html","title":"\u5df2\u77e5\u95ee\u9898","text":"

                                                  \u672c\u9875\u5217\u51fa\u4e00\u4e9b Insight Agent \u5b89\u88c5\u548c\u5378\u8f7d\u6709\u5173\u7684\u95ee\u9898\u53ca\u5176\u89e3\u51b3\u529e\u6cd5\u3002

                                                  "},{"location":"admin/insight/quickstart/install/knownissues.html#v0230","title":"v0.23.0","text":""},{"location":"admin/insight/quickstart/install/knownissues.html#insight-agent","title":"Insight Agent","text":""},{"location":"admin/insight/quickstart/install/knownissues.html#insight-agent_1","title":"Insight Agent \u5378\u8f7d\u5931\u8d25","text":"

                                                  \u5f53\u4f60\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u5378\u8f7d Insight Agent \u65f6\u3002

                                                  helm uninstall insight-agent -n insight-system\n

                                                  otel-oprator \u6240\u4f7f\u7528\u7684 tls secret \u672a\u88ab\u5378\u8f7d\u6389\u3002

                                                  otel-operator \u5b9a\u4e49\u7684\u201c\u91cd\u590d\u5229\u7528 tls secret\u201d\u7684\u903b\u8f91\u4e2d\uff0c\u4f1a\u53bb\u5224\u65ad otel-oprator \u7684 MutationConfiguration \u662f\u5426\u5b58\u5728\u5e76\u91cd\u590d\u5229\u7528 MutationConfiguration \u4e2d\u7ed1\u5b9a\u7684 CA cert\u3002\u4f46\u662f\u7531\u4e8e helm uninstall \u5df2\u5378\u8f7d MutationConfiguration\uff0c\u5bfc\u81f4\u51fa\u73b0\u7a7a\u503c\u3002

                                                  \u7efc\u4e0a\u8bf7\u624b\u52a8\u5220\u9664\u5bf9\u5e94\u7684 secret\uff0c\u4ee5\u4e0b\u4e24\u79cd\u65b9\u5f0f\u4efb\u9009\u4e00\u79cd\u5373\u53ef\uff1a

                                                  • \u901a\u8fc7\u547d\u4ee4\u884c\u5220\u9664\uff1a\u767b\u5f55\u76ee\u6807\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                    kubectl -n insight-system delete secret insight-agent-opentelemetry-operator-controller-manager-service-cert\n
                                                  • \u901a\u8fc7 UI \u5220\u9664\uff1a\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5bb9\u5668\u7ba1\u7406\uff0c\u9009\u62e9\u76ee\u6807\u96c6\u7fa4\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u8fdb\u5165\u5bc6\u94a5\uff0c\u8f93\u5165 insight-agent-opentelemetry-operator-controller-manager-service-cert\uff0c\u9009\u62e9\u5220\u9664\u3002

                                                  "},{"location":"admin/insight/quickstart/install/knownissues.html#v0220","title":"v0.22.0","text":""},{"location":"admin/insight/quickstart/install/knownissues.html#insight-agent_2","title":"Insight Agent","text":""},{"location":"admin/insight/quickstart/install/knownissues.html#insight-agent_3","title":"\u5347\u7ea7 Insight Agent \u65f6\u66f4\u65b0\u65e5\u5fd7\u6536\u96c6\u7aef\uff0c\u672a\u751f\u6548","text":"

                                                  \u66f4\u65b0 insight-agent \u65e5\u5fd7\u914d\u7f6e\u4ece elasticsearch \u6539\u4e3a kafka \u6216\u8005\u4ece kafka \u6539\u4e3a elasticsearch\uff0c\u5b9e\u9645\u4e0a\u90fd\u672a\u751f\u6548\uff0c\u8fd8\u662f\u4f7f\u7528\u66f4\u65b0\u524d\u914d\u7f6e\u3002

                                                  \u89e3\u51b3\u65b9\u6848 \uff1a

                                                  \u624b\u52a8\u91cd\u542f\u96c6\u7fa4\u4e2d\u7684 fluentbit\u3002

                                                  "},{"location":"admin/insight/quickstart/install/knownissues.html#v0210","title":"v0.21.0","text":""},{"location":"admin/insight/quickstart/install/knownissues.html#insight-agent_4","title":"Insight Agent","text":""},{"location":"admin/insight/quickstart/install/knownissues.html#podmonitor-jvm","title":"PodMonitor \u91c7\u96c6\u591a\u4efd JVM \u6307\u6807\u6570\u636e","text":"
                                                  1. \u8fd9\u4e2a\u7248\u672c\u7684 PodMonitor/insight-kubernetes-pod \u5b58\u5728\u7f3a\u9677\uff1a\u4f1a\u9519\u8bef\u5730\u521b\u5efa Job \u53bb\u91c7\u96c6\u6807\u8bb0\u4e86 insight.opentelemetry.io/metric-scrape=true \u7684 Pod \u7684\u6240\u6709 container\uff1b\u800c\u5b9e\u9645\u4e0a\u53ea\u9700\u91c7\u96c6 insight.opentelemetry.io/metric-port \u6240\u5bf9\u5e94 container \u7684\u7aef\u53e3\u3002

                                                  2. \u56e0\u4e3a PodMonitor \u58f0\u660e\u4e4b\u540e\uff0cPromethuesOperator \u4f1a\u9884\u8bbe\u7f6e\u4e00\u4e9b\u670d\u52a1\u53d1\u73b0\u914d\u7f6e\u3002 \u518d\u8003\u8651\u5230 CRD \u7684\u517c\u5bb9\u6027\u7684\u95ee\u9898\u3002\u56e0\u6b64\uff0c\u653e\u5f03\u901a\u8fc7 PodMonitor \u6765\u914d\u7f6e\u901a\u8fc7 annotation \u521b\u5efa\u91c7\u96c6\u4efb\u52a1\u7684\u673a\u5236\u3002

                                                  3. \u901a\u8fc7 Prometheus \u81ea\u5e26\u7684 additional scrape config \u673a\u5236\uff0c\u5c06\u670d\u52a1\u53d1\u73b0\u89c4\u5219\u914d\u7f6e\u5728 secret \u4e2d\uff0c\u5728\u5f15\u5165 Prometheus \u91cc\u3002

                                                  \u7efc\u4e0a\uff1a

                                                  1. \u5220\u9664\u8fd9\u4e2a PodMonitor \u7684\u5f53\u524d insight-kubernetes-pod
                                                  2. \u4f7f\u7528\u65b0\u7684\u89c4\u5219

                                                  \u65b0\u7684\u89c4\u5219\u91cc\u901a\u8fc7 action: keepequal \u6765\u6bd4\u8f83 source_labels \u548c target_label \u7684\u4e00\u81f4\u6027\uff0c \u6765\u5224\u65ad\u662f\u5426\u8981\u7ed9\u67d0\u4e2a container \u7684 port \u521b\u5efa\u91c7\u96c6\u4efb\u52a1\u3002\u9700\u8981\u6ce8\u610f\uff0c\u8fd9\u4e2a\u662f Prometheus 2.41.0\uff082022-12-20\uff09\u548c\u66f4\u9ad8\u7248\u672c\u624d\u5177\u5907\u7684\u529f\u80fd\u3002

                                                  +    - source_labels: [__meta_kubernetes_pod_annotation_insight_opentelemetry_io_metric_port]\n+      separator: ;\n+      target_label: __meta_kubernetes_pod_container_port_number\n+      action: keepequal\n
                                                  "},{"location":"admin/insight/quickstart/install/upgrade-note.html","title":"\u5347\u7ea7\u6ce8\u610f\u4e8b\u9879","text":"

                                                  \u672c\u9875\u4ecb\u7ecd\u4e00\u4e9b\u5347\u7ea7 insight-server \u548c insight-agent \u7684\u6ce8\u610f\u4e8b\u9879\u3002

                                                  "},{"location":"admin/insight/quickstart/install/upgrade-note.html#insight-agent","title":"insight-agent","text":""},{"location":"admin/insight/quickstart/install/upgrade-note.html#v028x-v029x","title":"\u4ece v0.28.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.29.x","text":"

                                                  \u7531\u4e8e v0.29.0 \u5347\u7ea7\u4e86 Opentelemetry \u793e\u533a\u7684 operator chart \u7248\u672c\uff0cvalues \u4e2d\u7684 featureGates \u7684\u652f\u6301\u7684\u503c\u6709\u6240\u53d8\u5316\uff0c\u56e0\u6b64\uff0c\u5728 upgrade \u4e4b\u524d\uff0c\u9700\u8981\u5c06 featureGates \u7684\u503c\u8bbe\u7f6e\u4e3a\u7a7a, \u5373\uff1a

                                                  -  --set opentelemetry-operator.manager.featureGates=\"+operator.autoinstrumentation.go,+operator.autoinstrumentation.multi-instrumentation,+operator.autoinstrumentation.nginx\" \\\n+  --set opentelemetry-operator.manager.featureGates=\"\"\n
                                                  "},{"location":"admin/insight/quickstart/install/upgrade-note.html#insight-server","title":"insight-server","text":""},{"location":"admin/insight/quickstart/install/upgrade-note.html#v026x-v027x","title":"\u4ece v0.26.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.27.x \u6216\u66f4\u9ad8\u7248\u672c","text":"

                                                  \u5728 v0.27.x \u7248\u672c\u4e2d\u5c06 vector \u7ec4\u4ef6\u7684\u5f00\u5173\u5355\u72ec\u62bd\u51fa\u3002\u6545\u539f\u6709\u73af\u5883\u5f00\u542f\u4e86 vector\uff0c\u90a3\u5728\u5347\u7ea7 insight-server \u65f6\uff0c\u9700\u8981\u6307\u5b9a --set vector.enabled=true \u3002

                                                  "},{"location":"admin/insight/quickstart/install/upgrade-note.html#v019x-020x","title":"\u4ece v0.19.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 0.20.x","text":"

                                                  \u5728\u5347\u7ea7 Insight \u4e4b\u524d\uff0c\u60a8\u9700\u8981\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u624b\u52a8\u5220\u9664 jaeger-collector \u548c jaeger-query \u90e8\u7f72\uff1a

                                                  kubectl -n insight-system delete deployment insight-jaeger-collector\nkubectl -n insight-system delete deployment insight-jaeger-query\n
                                                  "},{"location":"admin/insight/quickstart/install/upgrade-note.html#v017x-v018x","title":"\u4ece v0.17.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.18.x","text":"

                                                  \u7531\u4e8e 0.18.x \u4e2d\u66f4\u65b0\u4e86 Jaeger \u76f8\u5173\u90e8\u7f72\u6587\u4ef6\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-server \u524d\u624b\u52a8\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

                                                  kubectl -n insight-system delete deployment insight-jaeger-collector\nkubectl -n insight-system delete deployment insight-jaeger-query\n

                                                  \u7531\u4e8e 0.18.x \u4e2d\u6307\u6807\u540d\u4ea7\u751f\u4e86\u53d8\u52a8\uff0c\u56e0\u6b64\uff0c\u9700\u8981\u5728\u5347\u7ea7 insight-server \u4e4b\u540e\uff0cinsight-agent \u4e5f\u5e94\u8be5\u505a\u5347\u7ea7\u3002

                                                  \u6b64\u5916\uff0c\u8c03\u6574\u4e86\u5f00\u542f\u94fe\u8def\u6a21\u5757\u7684\u53c2\u6570\uff0c\u4ee5\u53ca ElasticSearch \u8fde\u63a5\u8c03\u6574\u3002\u5177\u4f53\u53c2\u8003\u4ee5\u4e0b\u53c2\u6570\uff1a

                                                  +  --set global.tracing.enable=true \\\n-  --set jaeger.collector.enabled=true \\\n-  --set jaeger.query.enabled=true \\\n+  --set global.elasticsearch.scheme=${your-external-elasticsearch-scheme} \\\n+  --set global.elasticsearch.host=${your-external-elasticsearch-host} \\\n+  --set global.elasticsearch.port=${your-external-elasticsearch-port} \\\n+  --set global.elasticsearch.user=${your-external-elasticsearch-username} \\\n+  --set global.elasticsearch.password=${your-external-elasticsearch-password} \\\n-  --set jaeger.storage.elasticsearch.scheme=${your-external-elasticsearch-scheme} \\\n-  --set jaeger.storage.elasticsearch.host=${your-external-elasticsearch-host} \\\n-  --set jaeger.storage.elasticsearch.port=${your-external-elasticsearch-port} \\\n-  --set jaeger.storage.elasticsearch.user=${your-external-elasticsearch-username} \\\n-  --set jaeger.storage.elasticsearch.password=${your-external-elasticsearch-password} \\\n
                                                  "},{"location":"admin/insight/quickstart/install/upgrade-note.html#v015x-v016x","title":"\u4ece v0.15.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.16.x","text":"

                                                  \u7531\u4e8e 0.16.x \u4e2d\u4f7f\u7528\u4e86 vmalertmanagers CRD \u7684\u65b0\u7279\u6027\u53c2\u6570 disableRouteContinueEnforce\uff0c \u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-server \u524d\u624b\u52a8\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u3002

                                                  kubectl apply --server-side -f https://raw.githubusercontent.com/VictoriaMetrics/operator/v0.33.0/config/crd/bases/operator.victoriametrics.com_vmalertmanagers.yaml --force-conflicts\n

                                                  Note

                                                  \u5982\u60a8\u662f\u79bb\u7ebf\u5b89\u88c5\uff0c\u53ef\u4ee5\u5728\u89e3\u538b Insight \u79bb\u7ebf\u5305\u540e\uff0c\u8bf7\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u66f4\u65b0 CRD\u3002

                                                  kubectl apply --server-side -f insight/dependency-crds --force-conflicts \n
                                                  "},{"location":"admin/insight/quickstart/install/upgrade-note.html#insight-agent_1","title":"insight-agent","text":""},{"location":"admin/insight/quickstart/install/upgrade-note.html#v023x-v024x","title":"\u4ece v0.23.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.24.x","text":"

                                                  \u7531\u4e8e 0.24.x \u7248\u672c\u4e2d OTEL operator chart \u4e2d\u65b0\u589e\u4e86 CRD\uff0c\u4f46\u7531\u4e8e Helm Upgrade \u65f6\u5e76\u4e0d\u4f1a\u66f4\u65b0 CRD\uff0c\u56e0\u6b64\uff0c\u9700\u8981\u624b\u52a8\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                  kubectl apply -f https://raw.githubusercontent.com/open-telemetry/opentelemetry-helm-charts/main/charts/opentelemetry-operator/crds/crd-opentelemetry.io_opampbridges.yaml\n

                                                  \u5982\u60a8\u662f\u79bb\u7ebf\u5b89\u88c5\uff0c\u53ef\u4ee5\u5728\u89e3\u538b insight-agent \u79bb\u7ebf\u5305\u540e\u53ef\u627e\u5230\u4e0a\u8ff0 CRD \u7684 yaml\uff0c\u89e3\u538b Insight-Agent Chart \u4e4b\u540e\u624b\u52a8\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                  kubectl apply -f charts/agent/crds/crd-opentelemetry.io_opampbridges.yaml\n
                                                  "},{"location":"admin/insight/quickstart/install/upgrade-note.html#v019x-v020x","title":"\u4ece v0.19.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.20.x","text":"

                                                  \u7531\u4e8e 0.20.x \u4e2d\u589e\u52a0\u4e86 Kafka \u65e5\u5fd7\u5bfc\u51fa\u914d\u7f6e\uff0c\u65e5\u5fd7\u5bfc\u51fa\u914d\u7f6e\u505a\u4e86\u4e00\u4e9b\u8c03\u6574\u3002\u5347\u7ea7 insight-agent \u4e4b\u524d\u9700\u8981\u6ce8\u610f\u53c2\u6570\u53d8\u5316\uff0c \u5373\u539f\u6765 logging \u7684\u914d\u7f6e\u5df2\u7ecf\u79fb\u5230\u4e86\u914d\u7f6e\u4e2d logging.elasticsearch\uff1a

                                                  -  --set global.exporters.logging.host \\\n-  --set global.exporters.logging.port \\\n+  --set global.exporters.logging.elasticsearch.host \\\n+  --set global.exporters.logging.elasticsearch.port \\\n
                                                  "},{"location":"admin/insight/quickstart/install/upgrade-note.html#v017x-v018x_1","title":"\u4ece v0.17.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.18.x","text":"

                                                  \u7531\u4e8e 0.18.x \u4e2d\u66f4\u65b0\u4e86 Jaeger \u76f8\u5173\u90e8\u7f72\u6587\u4ef6\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-agent \u524d\u9700\u8981\u6ce8\u610f\u53c2\u6570\u7684\u6539\u52a8\u3002

                                                  +  --set global.exporters.trace.enable=true \\\n-  --set opentelemetry-collector.enabled=true \\\n-  --set opentelemetry-operator.enabled=true \\\n
                                                  "},{"location":"admin/insight/quickstart/install/upgrade-note.html#v016x-v017x","title":"\u4ece v0.16.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.17.x","text":"

                                                  \u5728 v0.17.x \u7248\u672c\u4e2d\u5c06 kube-prometheus-stack chart \u7248\u672c\u4ece 41.9.1 \u5347\u7ea7\u81f3 45.28.1, \u5176\u4e2d\u4f7f\u7528\u7684 CRD \u4e5f\u5b58\u5728\u4e00\u4e9b\u5b57\u6bb5\u7684\u5347\u7ea7\uff0c\u5982 servicemonitor \u7684 attachMetadata \u5b57\u6bb5\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-agent \u524d\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

                                                  kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --force-conflicts\n

                                                  \u5982\u60a8\u662f\u79bb\u7ebf\u5b89\u88c5\uff0c\u53ef\u4ee5\u5728\u89e3\u538b insight-agent \u79bb\u7ebf\u5305\u540e\uff0c\u5728 insight-agent/dependency-crds \u4e2d\u627e\u5230\u4e0a\u8ff0 CRD \u7684 yaml\u3002

                                                  "},{"location":"admin/insight/quickstart/install/upgrade-note.html#v011x-v012x","title":"\u4ece v0.11.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.12.x","text":"

                                                  \u5728 v0.12.x \u5c06 kube-prometheus-stack chart \u4ece 39.6.0 \u5347\u7ea7\u5230 41.9.1\uff0c\u5176\u4e2d\u5305\u62ec prometheus-operator \u5347\u7ea7\u5230 v0.60.1, prometheus-node-exporter chart \u5347\u7ea7\u5230 4.3.0 \u7b49\u3002 prometheus-node-exporter \u5347\u7ea7\u540e\u4f7f\u7528\u4e86 Kubernetes \u63a8\u8350 label\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7\u524d\u5220\u9664 node-exporter \u7684 DaemonSet\u3002 prometheus-operator \u66f4\u65b0\u4e86 CRD\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-agent \u524d\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

                                                  kubectl delete daemonset insight-agent-prometheus-node-exporter -n insight-system\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml --force-conflicts\n

                                                  Note

                                                  \u5982\u60a8\u662f\u79bb\u7ebf\u5b89\u88c5\uff0c\u53ef\u4ee5\u5728\u89e3\u538b insight-agent \u79bb\u7ebf\u5305\u540e\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u66f4\u65b0 CRD\u3002

                                                  kubectl apply --server-side -f insight-agent/dependency-crds --force-conflicts\n
                                                  "},{"location":"admin/insight/quickstart/otel/operator.html","title":"\u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a","text":"

                                                  \u76ee\u524d\u53ea\u6709 Java\u3001NodeJs\u3001Python\u3001.Net\u3001Golang \u652f\u6301 Operator \u7684\u65b9\u5f0f\u65e0\u4fb5\u5165\u63a5\u5165\u3002

                                                  "},{"location":"admin/insight/quickstart/otel/operator.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                  \u8bf7\u786e\u4fdd insight-agent \u5df2\u7ecf\u5c31\u7eea\u3002\u5982\u82e5\u6ca1\u6709\uff0c\u8bf7\u53c2\u8003\u5b89\u88c5 insight-agent \u91c7\u96c6\u6570\u636e\u5e76\u786e\u4fdd\u4ee5\u4e0b\u4e09\u9879\u5c31\u7eea\uff1a

                                                  • \u4e3a insight-agent \u5f00\u542f trace \u529f\u80fd
                                                  • trace \u6570\u636e\u7684\u5730\u5740\u4ee5\u53ca\u7aef\u53e3\u662f\u5426\u586b\u5199\u6b63\u786e
                                                  • deployment/insight-agent-opentelemetry-operator \u548c deployment/insight-agent-opentelemetry-collector \u5bf9\u5e94\u7684 Pod \u5df2\u7ecf\u51c6\u5907\u5c31\u7eea
                                                  "},{"location":"admin/insight/quickstart/otel/operator.html#instrumentation-cr","title":"\u5b89\u88c5 Instrumentation CR","text":"

                                                  Tip

                                                  \u4ece Insight v0.22.0 \u5f00\u59cb\uff0c\u4e0d\u518d\u9700\u8981\u624b\u52a8\u5b89\u88c5 Instrumentation CR\u3002

                                                  \u5728 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\u5b89\u88c5\uff0c\u4e0d\u540c\u7248\u672c\u4e4b\u95f4\u6709\u4e00\u4e9b\u7ec6\u5c0f\u7684\u5dee\u522b\u3002

                                                  Insight v0.21.xInsight v0.20.xInsight v0.18.xInsight v0.17.xInsight v0.16.x
                                                  K8S_CLUSTER_UID=$(kubectl get namespace kube-system -o jsonpath='{.metadata.uid}')\nkubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/openinsight-proj/autoinstrumentation-java:1.31.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n      - name: OTEL_K8S_CLUSTER_UID\n        value: $K8S_CLUSTER_UID\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                                                  kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.29.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0-rc.2\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                                                  kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.25.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.37.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.38b0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.1-alpha\nEOF\n
                                                  kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.23.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.34.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.33b0\nEOF\n
                                                  kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.23.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.34.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.33b0\nEOF\n
                                                  "},{"location":"admin/insight/quickstart/otel/operator.html#_2","title":"\u4e0e\u670d\u52a1\u7f51\u683c\u94fe\u8def\u4e32\u8054\u573a\u666f","text":"

                                                  \u5982\u679c\u60a8\u5f00\u542f\u4e86\u670d\u52a1\u7f51\u683c\u7684\u94fe\u8def\u8ffd\u8e2a\u80fd\u529b\uff0c\u9700\u8981\u989d\u5916\u589e\u52a0\u4e00\u4e2a\u73af\u5883\u53d8\u91cf\u6ce8\u5165\u7684\u914d\u7f6e\uff1a

                                                  "},{"location":"admin/insight/quickstart/otel/operator.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b","text":"
                                                  1. \u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3.0\uff0c\u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \u540e\u9009\u62e9\u8fdb\u5165\u76ee\u6807\u96c6\u7fa4\uff0c
                                                  2. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u627e\u5230 instrumentations.opentelemetry.io \u540e\u8fdb\u5165\u8be6\u60c5\u9875\u3002
                                                  3. \u9009\u62e9 insight-system \u547d\u540d\u7a7a\u95f4\u540e\uff0c\u7f16\u8f91 insight-opentelemetry-autoinstrumentation \uff0c\u5728 spec:env: \u4e0b\u6dfb\u52a0\u4ee5\u4e0b\u5185\u5bb9\uff1a

                                                        - name: OTEL_SERVICE_NAME\n      valueFrom:\n        fieldRef:\n          fieldPath: metadata.labels['app'] \n

                                                    \u5b8c\u6574\u7684\u547d\u4ee4\u5982\u4e0b\uff08For Insight v0.21.x\uff09\uff1a

                                                    K8S_CLUSTER_UID=$(kubectl get namespace kube-system -o jsonpath='{.metadata.uid}')\nkubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n    - name: OTEL_SERVICE_NAME\n      valueFrom:\n        fieldRef:\n          fieldPath: metadata.labels['app'] \n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/openinsight-proj/autoinstrumentation-java:1.31.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n      - name: OTEL_K8S_CLUSTER_UID\n        value: $K8S_CLUSTER_UID\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                                                  "},{"location":"admin/insight/quickstart/otel/operator.html#_4","title":"\u6dfb\u52a0\u6ce8\u89e3\uff0c\u81ea\u52a8\u63a5\u5165\u94fe\u8def","text":"

                                                  \u4ee5\u4e0a\u5c31\u7eea\u4e4b\u540e\uff0c\u60a8\u5c31\u53ef\u4ee5\u901a\u8fc7\u6ce8\u89e3\uff08Annotation\uff09\u65b9\u5f0f\u4e3a\u5e94\u7528\u7a0b\u5e8f\u63a5\u5165\u94fe\u8def\u8ffd\u8e2a\u4e86\uff0cOTel \u76ee\u524d\u652f\u6301\u901a\u8fc7\u6ce8\u89e3\u7684\u65b9\u5f0f\u63a5\u5165\u94fe\u8def\u3002 \u6839\u636e\u670d\u52a1\u8bed\u8a00\uff0c\u9700\u8981\u6dfb\u52a0\u4e0a\u4e0d\u540c\u7684 pod annotations\u3002\u6bcf\u4e2a\u670d\u52a1\u53ef\u6dfb\u52a0\u4e24\u7c7b\u6ce8\u89e3\u4e4b\u4e00\uff1a

                                                  • \u53ea\u6ce8\u5165\u73af\u5883\u53d8\u91cf\u6ce8\u89e3

                                                    \u8fd9\u7c7b\u6ce8\u89e3\u53ea\u6709\u4e00\u4e2a\uff0c\u7528\u4e8e\u6dfb\u52a0 otel \u76f8\u5173\u7684\u73af\u5883\u53d8\u91cf\uff0c\u6bd4\u5982\u94fe\u8def\u4e0a\u62a5\u5730\u5740\u3001\u5bb9\u5668\u6240\u5728\u7684\u96c6\u7fa4 id\u3001\u547d\u540d\u7a7a\u95f4\u7b49\uff08\u8fd9\u4e2a\u6ce8\u89e3\u5728\u5e94\u7528\u4e0d\u652f\u6301\u81ea\u52a8\u63a2\u9488\u8bed\u8a00\u65f6\u5341\u5206\u6709\u7528\uff09

                                                    instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                                    \u5176\u4e2d value \u88ab / \u5206\u6210\u4e24\u90e8\u5206\uff0c\u7b2c\u4e00\u4e2a\u503c (insight-system) \u662f\u4e0a\u4e00\u6b65\u5b89\u88c5\u7684 CR \u7684\u547d\u540d\u7a7a\u95f4\uff0c \u7b2c\u4e8c\u4e2a\u503c (insight-opentelemetry-autoinstrumentation) \u662f\u8fd9\u4e2a CR \u7684\u540d\u5b57\u3002

                                                  • \u81ea\u52a8\u63a2\u9488\u6ce8\u5165\u4ee5\u53ca\u73af\u5883\u53d8\u91cf\u6ce8\u5165\u6ce8\u89e3

                                                    \u8fd9\u7c7b\u6ce8\u89e3\u76ee\u524d\u6709 4 \u4e2a\uff0c\u5206\u522b\u5bf9\u5e94 4 \u79cd\u4e0d\u540c\u7684\u7f16\u7a0b\u8bed\u8a00\uff1ajava\u3001nodejs\u3001python\u3001dotnet\uff0c \u4f7f\u7528\u5b83\u540e\u5c31\u4f1a\u5bf9 spec.pod \u4e0b\u7684\u7b2c\u4e00\u4e2a\u5bb9\u5668\u6ce8\u5165\u81ea\u52a8\u63a2\u9488\u4ee5\u53ca otel \u9ed8\u8ba4\u73af\u5883\u53d8\u91cf\uff1a

                                                    Java \u5e94\u7528NodeJs \u5e94\u7528Python \u5e94\u7528Dotnet \u5e94\u7528Golang \u5e94\u7528
                                                    instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                                    instrumentation.opentelemetry.io/inject-nodejs: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                                    instrumentation.opentelemetry.io/inject-python: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                                    instrumentation.opentelemetry.io/inject-dotnet: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                                    \u7531\u4e8e Go \u81ea\u52a8\u68c0\u6d4b\u9700\u8981\u8bbe\u7f6e OTEL_GO_AUTO_TARGET_EXE\uff0c \u56e0\u6b64\u60a8\u5fc5\u987b\u901a\u8fc7\u6ce8\u89e3\u6216 Instrumentation \u8d44\u6e90\u63d0\u4f9b\u6709\u6548\u7684\u53ef\u6267\u884c\u8def\u5f84\u3002\u672a\u8bbe\u7f6e\u6b64\u503c\u4f1a\u5bfc\u81f4 Go \u81ea\u52a8\u68c0\u6d4b\u6ce8\u5165\u4e2d\u6b62\uff0c\u4ece\u800c\u5bfc\u81f4\u63a5\u5165\u94fe\u8def\u5931\u8d25\u3002

                                                    instrumentation.opentelemetry.io/inject-go: \"insight-system/insight-opentelemetry-autoinstrumentation\"\ninstrumentation.opentelemetry.io/otel-go-auto-target-exe: \"/path/to/container/executable\"\n

                                                    Go \u81ea\u52a8\u68c0\u6d4b\u4e5f\u9700\u8981\u63d0\u5347\u6743\u9650\u3002\u4ee5\u4e0b\u6743\u9650\u662f\u81ea\u52a8\u8bbe\u7f6e\u7684\u5e76\u4e14\u662f\u5fc5\u9700\u7684\u3002

                                                    securityContext:\n  privileged: true\n  runAsUser: 0\n

                                                  Tip

                                                  OpenTelemetry Operator \u5728\u6ce8\u5165\u63a2\u9488\u65f6\u4f1a\u81ea\u52a8\u6dfb\u52a0\u4e00\u4e9b OTel \u76f8\u5173\u73af\u5883\u53d8\u91cf\uff0c\u540c\u65f6\u4e5f\u652f\u6301\u8fd9\u4e9b\u73af\u5883\u53d8\u91cf\u7684\u8986\u76d6\u3002\u8fd9\u4e9b\u73af\u5883\u53d8\u91cf\u7684\u8986\u76d6\u4f18\u5148\u7ea7\uff1a

                                                  original container env vars -> language specific env vars -> common env vars -> instrument spec configs' vars\n

                                                  \u4f46\u662f\u9700\u8981\u907f\u514d\u624b\u52a8\u8986\u76d6 OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\uff0c\u5b83\u5728 Operator \u5185\u90e8\u4f5c\u4e3a\u4e00\u4e2a Pod \u662f\u5426\u5df2\u7ecf\u6ce8\u5165\u63a2\u9488\u7684\u6807\u8bc6\uff0c\u5982\u679c\u624b\u52a8\u6dfb\u52a0\u4e86\uff0c\u63a2\u9488\u53ef\u80fd\u65e0\u6cd5\u6ce8\u5165\u3002

                                                  "},{"location":"admin/insight/quickstart/otel/operator.html#demo","title":"\u81ea\u52a8\u6ce8\u5165\u793a\u4f8b Demo","text":"

                                                  \u6ce8\u610f\u8fd9\u4e2a annotations \u662f\u52a0\u5728 spec.annotations \u4e0b\u7684\u3002

                                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-app\n  labels:\n    app: my-app\nspec:\n  selector:\n    matchLabels:\n      app: my-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-app\n      annotations:\n        instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n    spec:\n      containers:\n      - name: myapp\n        image: jaegertracing/vertx-create-span:operator-e2e-tests\n        ports:\n          - containerPort: 8080\n            protocol: TCP\n

                                                  \u6700\u7ec8\u751f\u6210\u7684 YAML \u5185\u5bb9\u5982\u4e0b\uff1a

                                                  apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-deployment-with-sidecar-565bd877dd-nqkk6\n  generateName: my-deployment-with-sidecar-565bd877dd-\n  namespace: default\n  uid: aa89ca0d-620c-4d20-8bc1-37d67bad4ea4\n  resourceVersion: '2668986'\n  creationTimestamp: '2022-04-08T05:58:48Z'\n  labels:\n    app: my-pod-with-sidecar\n    pod-template-hash: 565bd877dd\n  annotations:\n    cni.projectcalico.org/containerID: 234eae5e55ea53db2a4bc2c0384b9a1021ed3908f82a675e4a92a49a7e80dd61\n    cni.projectcalico.org/podIP: 192.168.134.133/32\n    cni.projectcalico.org/podIPs: 192.168.134.133/32\n    instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\nspec:\n  volumes:\n    - name: kube-api-access-sp2mz\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n    - name: opentelemetry-auto-instrumentation\n      emptyDir: {}\n  initContainers:\n    - name: opentelemetry-auto-instrumentation\n      image: >-\n        ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java\n      command:\n        - cp\n        - /javaagent.jar\n        - /otel-auto-instrumentation/javaagent.jar\n      resources: {}\n      volumeMounts:\n        - name: opentelemetry-auto-instrumentation\n          mountPath: /otel-auto-instrumentation\n        - name: kube-api-access-sp2mz\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  containers:\n    - name: myapp\n      image: ghcr.io/pavolloffay/spring-petclinic:latest\n      env:\n        - name: OTEL_JAVAAGENT_DEBUG\n          value: 'true'\n        - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n          value: 'true'\n        - name: SPLUNK_PROFILER_ENABLED\n          value: 'false'\n        - name: JAVA_TOOL_OPTIONS\n          value: ' -javaagent:/otel-auto-instrumentation/javaagent.jar'\n        - name: OTEL_TRACES_EXPORTER\n          value: otlp\n        - name: OTEL_EXPORTER_OTLP_ENDPOINT\n          value: http://insight-agent-opentelemetry-collector.svc.cluster.local:4317\n        - name: OTEL_EXPORTER_OTLP_TIMEOUT\n          value: '20'\n        - name: OTEL_TRACES_SAMPLER\n          value: parentbased_traceidratio\n        - name: OTEL_TRACES_SAMPLER_ARG\n          value: '0.85'\n        - name: SPLUNK_TRACE_RESPONSE_HEADER_ENABLED\n          value: 'true'\n        - name: OTEL_SERVICE_NAME\n          value: my-deployment-with-sidecar\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.name\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_UID\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.uid\n        - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: spec.nodeName\n        - name: OTEL_RESOURCE_ATTRIBUTES\n          value: >-\n            k8s.container.name=myapp,k8s.deployment.name=my-deployment-with-sidecar,k8s.deployment.uid=8de6929d-dda0-436c-bca1-604e9ca7ea4e,k8s.namespace.name=default,k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME),k8s.pod.uid=$(OTEL_RESOURCE_ATTRIBUTES_POD_UID),k8s.replicaset.name=my-deployment-with-sidecar-565bd877dd,k8s.replicaset.uid=190d5f6e-ba7f-4794-b2e6-390b5879a6c4\n        - name: OTEL_PROPAGATORS\n          value: jaeger,b3\n      resources: {}\n      volumeMounts:\n        - name: kube-api-access-sp2mz\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n        - name: opentelemetry-auto-instrumentation\n          mountPath: /otel-auto-instrumentation\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  restartPolicy: Always\n  terminationGracePeriodSeconds: 30\n  dnsPolicy: ClusterFirst\n  serviceAccountName: default\n  serviceAccount: default\n  nodeName: k8s-master3\n  securityContext:\n    runAsUser: 1000\n    runAsGroup: 3000\n    fsGroup: 2000\n  schedulerName: default-scheduler\n  tolerations:\n    - key: node.kubernetes.io/not-ready\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n    - key: node.kubernetes.io/unreachable\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n  priority: 0\n  enableServiceLinks: true\n  preemptionPolicy: PreemptLowerPriority\n
                                                  "},{"location":"admin/insight/quickstart/otel/operator.html#_5","title":"\u94fe\u8def\u67e5\u8be2","text":"

                                                  \u5982\u4f55\u67e5\u8be2\u5df2\u7ecf\u63a5\u5165\u7684\u670d\u52a1\uff0c\u53c2\u8003\u94fe\u8def\u67e5\u8be2\u3002

                                                  "},{"location":"admin/insight/quickstart/otel/otel.html","title":"\u4f7f\u7528 OTel \u8d4b\u4e88\u5e94\u7528\u53ef\u89c2\u6d4b\u6027","text":"

                                                  \u589e\u5f3a\u662f\u4f7f\u5e94\u7528\u7a0b\u5e8f\u4ee3\u7801\u80fd\u591f\u751f\u6210\u9065\u6d4b\u6570\u636e\u7684\u8fc7\u7a0b\u3002\u5373\u4e00\u4e9b\u53ef\u4ee5\u5e2e\u52a9\u60a8\u76d1\u89c6\u6216\u6d4b\u91cf\u5e94\u7528\u7a0b\u5e8f\u7684\u6027\u80fd\u548c\u72b6\u6001\u7684\u4e1c\u897f\u3002

                                                  OpenTelemetry \u662f\u9886\u5148\u7684\u5f00\u6e90\u9879\u76ee\uff0c\u4e3a\u4e3b\u8981\u7f16\u7a0b\u8bed\u8a00\u548c\u6d41\u884c\u6846\u67b6\u63d0\u4f9b\u68c0\u6d4b\u5e93\u3002\u5b83\u662f\u4e91\u539f\u751f\u8ba1\u7b97\u57fa\u91d1\u4f1a\u4e0b\u7684\u4e00\u4e2a\u9879\u76ee\uff0c\u5f97\u5230\u4e86\u793e\u533a\u5e9e\u5927\u8d44\u6e90\u7684\u652f\u6301\u3002 \u5b83\u4e3a\u91c7\u96c6\u7684\u6570\u636e\u63d0\u4f9b\u6807\u51c6\u5316\u7684\u6570\u636e\u683c\u5f0f\uff0c\u65e0\u9700\u96c6\u6210\u7279\u5b9a\u7684\u4f9b\u5e94\u5546\u3002

                                                  Insight \u652f\u6301\u7528\u4e8e\u68c0\u6d4b\u5e94\u7528\u7a0b\u5e8f\u7684 OpenTelemetry \u6765\u589e\u5f3a\u60a8\u7684\u5e94\u7528\u7a0b\u5e8f\u3002

                                                  \u672c\u6307\u5357\u4ecb\u7ecd\u4e86\u4f7f\u7528 OpenTelemetry \u8fdb\u884c\u9065\u6d4b\u589e\u5f3a\u7684\u57fa\u672c\u6982\u5ff5\u3002 OpenTelemetry \u8fd8\u6709\u4e00\u4e2a\u7531\u5e93\u3001\u63d2\u4ef6\u3001\u96c6\u6210\u548c\u5176\u4ed6\u6709\u7528\u5de5\u5177\u7ec4\u6210\u7684\u751f\u6001\u7cfb\u7edf\u6765\u6269\u5c55\u5b83\u3002 \u60a8\u53ef\u4ee5\u5728 Otel Registry \u4e2d\u627e\u5230\u8fd9\u4e9b\u8d44\u6e90\u3002

                                                  \u60a8\u53ef\u4ee5\u4f7f\u7528\u4efb\u4f55\u5f00\u653e\u6807\u51c6\u5e93\u8fdb\u884c\u9065\u6d4b\u589e\u5f3a\uff0c\u5e76\u4f7f\u7528 Insight \u4f5c\u4e3a\u53ef\u89c2\u5bdf\u6027\u540e\u7aef\u6765\u6444\u53d6\u3001\u5206\u6790\u548c\u53ef\u89c6\u5316\u6570\u636e\u3002

                                                  \u4e3a\u4e86\u589e\u5f3a\u60a8\u7684\u4ee3\u7801\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528 OpenTelemetry \u4e3a\u7279\u5b9a\u8bed\u8a00\u63d0\u4f9b\u7684\u589e\u5f3a\u64cd\u4f5c\uff1a

                                                  Insight \u76ee\u524d\u63d0\u4f9b\u4e86\u4f7f\u7528 OpenTelemetry \u589e\u5f3a .Net NodeJS\u3001Java\u3001Python \u548c Golang \u5e94\u7528\u7a0b\u5e8f\u7684\u7b80\u5355\u65b9\u6cd5\u3002\u8bf7\u9075\u5faa\u4ee5\u4e0b\u6307\u5357\u3002

                                                  "},{"location":"admin/insight/quickstart/otel/otel.html#_1","title":"\u94fe\u8def\u589e\u5f3a","text":"
                                                  • \u94fe\u8def\u63a5\u5165\u7684\u6700\u4f73\u5b9e\u8df5\uff1a\u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a
                                                  • \u4ee5 Go \u8bed\u8a00\u4e3a\u4f8b\u7684\u624b\u52a8\u57cb\u70b9\u63a5\u5165\uff1a\u4f7f\u7528 OpenTelemetry SDK \u589e\u5f3a Go \u5e94\u7528\u7a0b\u5e8f
                                                  • \u5229\u7528 ebpf \u5b9e\u73b0 Go \u8bed\u8a00\u65e0\u4fb5\u5165\u63a2\u9488\uff08\u5b9e\u9a8c\u6027\u529f\u80fd\uff09
                                                  "},{"location":"admin/insight/quickstart/otel/send_tracing_to_insight.html","title":"\u5411 Insight \u53d1\u9001\u94fe\u8def\u6570\u636e","text":"

                                                  \u6b64\u6587\u6863\u4e3b\u8981\u63cf\u8ff0\u5ba2\u6237\u5e94\u7528\u5982\u4f55\u81ea\u884c\u5c06\u94fe\u8def\u6570\u636e\u4e0a\u62a5\u7ed9 Insight\u3002\u4e3b\u8981\u5305\u542b\u5982\u4e0b\u4e24\u79cd\u573a\u666f\uff1a

                                                  1. \u5ba2\u6237\u5e94\u7528\u901a\u8fc7 OTEL Agent/SDK \u4e0a\u62a5\u94fe\u8def\u7ed9 Insight
                                                  2. \u901a\u8fc7 Opentelemtry Collector(\u7b80\u79f0 OTEL COL) \u5c06\u94fe\u8def\u8f6c\u53d1\u7ed9 Insight

                                                  \u5728\u6bcf\u4e2a\u5df2\u5b89\u88c5 Insight Agent \u7684\u96c6\u7fa4\u4e2d\u90fd\u6709 insight-agent-otel-col \u7ec4\u4ef6\u7528\u4e8e\u7edf\u4e00\u63a5\u6536\u8be5\u96c6\u7fa4\u7684\u94fe\u8def\u6570\u636e\u3002 \u56e0\u6b64\uff0c\u8be5\u7ec4\u4ef6\u4f5c\u4e3a\u7528\u6237\u63a5\u5165\u4fa7\u7684\u5165\u53e3\uff0c\u9700\u8981\u5148\u83b7\u53d6\u8be5\u5730\u5740\u3002\u53ef\u4ee5\u901a\u8fc7 AI \u7b97\u529b\u4e2d\u5fc3 \u754c\u9762\u83b7\u53d6\u8be5\u96c6\u7fa4 Opentelemtry Collector \u7684\u5730\u5740\uff0c \u6bd4\u5982 insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 \uff1a

                                                  \u9664\u6b64\u4e4b\u5916\uff0c\u9488\u5bf9\u4e0d\u540c\u4e0a\u62a5\u65b9\u5f0f\uff0c\u6709\u4e00\u4e9b\u7ec6\u5fae\u5dee\u522b\uff1a

                                                  "},{"location":"admin/insight/quickstart/otel/send_tracing_to_insight.html#otel-agentsdk-insight-agent-opentelemtry-collector","title":"\u5ba2\u6237\u5e94\u7528\u901a\u8fc7 OTel Agent/SDK \u4e0a\u62a5\u94fe\u8def\u7ed9 Insight Agent Opentelemtry Collector","text":"

                                                  \u4e3a\u4e86\u80fd\u591f\u5c06\u94fe\u8def\u6570\u636e\u6b63\u5e38\u4e0a\u62a5\u81f3 Insight \u5e76\u80fd\u591f\u5728 Insight \u6b63\u5e38\u5c55\u793a\uff0c\u9700\u8981\u5e76\u5efa\u8bae\u901a\u8fc7\u5982\u4e0b\u73af\u5883\u53d8\u91cf\u63d0\u4f9b OTLP \u6240\u9700\u7684\u5143\u6570\u636e (Resource Attribute)\uff0c\u6709\u4e24\u79cd\u65b9\u5f0f\u53ef\u5b9e\u73b0\uff1a

                                                  • \u5728\u90e8\u7f72\u6587\u4ef6 YAML \u4e2d\u624b\u52a8\u6dfb\u52a0\uff0c\u4f8b\u5982\uff1a

                                                    ...\n- name: OTEL_EXPORTER_OTLP_ENDPOINT\n  value: \"http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\"\n- name: \"OTEL_SERVICE_NAME\"\n  value: my-java-app-name\n- name: \"OTEL_K8S_NAMESPACE\"\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: metadata.namespace\n- name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: spec.nodeName\n- name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: metadata.name\n- name: OTEL_RESOURCE_ATTRIBUTES\n  value: \"k8s.namespace.name=$(OTEL_K8S_NAMESPACE),k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME)\"\n
                                                  • \u5229\u7528 Insight Agent \u81ea\u52a8\u6ce8\u5165\u5982\u4e0a\u5143\u6570\u636e (Resource Attribute) \u80fd\u529b

                                                    \u786e\u4fdd Insight Agent \u6b63\u5e38\u5de5\u4f5c\u5e76 \u5b89\u88c5 Instrumentation CR \u4e4b\u540e\uff0c \u53ea\u9700\u8981\u4e3a Pod \u6dfb\u52a0\u5982\u4e0b Annotation \u5373\u53ef\uff1a

                                                    instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                                    \u4e3e\u4f8b\uff1a

                                                    apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-with-aotu-instrumentation\nspec:\n  selector:\n    matchLabels:\n      app.kubernetes.io/name: my-deployment-with-aotu-instrumentation-kuberntes\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: my-deployment-with-aotu-instrumentation-kuberntes\n      annotations:\n        sidecar.opentelemetry.io/inject: \"false\"\n        instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                                  "},{"location":"admin/insight/quickstart/otel/send_tracing_to_insight.html#opentelemtry-collector-insight-agent-opentelemtry-collector","title":"\u901a\u8fc7 Opentelemtry Collector \u5c06\u94fe\u8def\u8f6c\u53d1\u7ed9 Insight Agent Opentelemtry Collector","text":"

                                                  \u5728\u4fdd\u8bc1\u5e94\u7528\u6dfb\u52a0\u4e86\u5982\u4e0a\u5143\u6570\u636e\u4e4b\u540e\uff0c\u53ea\u9700\u5728\u5ba2\u6237 Opentelemtry Collector \u91cc\u9762\u65b0\u589e\u4e00\u4e2a OTLP Exporter \u5c06\u94fe\u8def\u6570\u636e\u8f6c\u53d1\u7ed9 Insight Agent Opentelemtry Collector \u5373\u53ef\uff0c\u5982\u4e0b Opentelemtry Collector \u914d\u7f6e\u6587\u4ef6\u6240\u793a\uff1a

                                                  ...\nexporters:\n  otlp/insight:\n    endpoint: insight-opentelemetry-collector.insight-system.svc.cluster.local:4317\nservice:\n...\npipelines:\n...\ntraces:\n  exporters:\n    - otlp/insight\n
                                                  "},{"location":"admin/insight/quickstart/otel/send_tracing_to_insight.html#_1","title":"\u53c2\u8003","text":"
                                                  • \u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a
                                                  • \u4f7f\u7528 OTel \u8d4b\u4e88\u5e94\u7528\u53ef\u89c2\u6d4b\u6027
                                                  "},{"location":"admin/insight/quickstart/otel/golang/golang.html","title":"\u4f7f\u7528 OTel SDK \u589e\u5f3a Go \u5e94\u7528\u7a0b\u5e8f","text":"

                                                  Golang \u65e0\u4fb5\u5165\u5f0f\u63a5\u5165\u94fe\u8def\u8bf7\u53c2\u8003 \u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a \u6587\u6863\uff0c\u901a\u8fc7\u6ce8\u89e3\u5b9e\u73b0\u81ea\u52a8\u63a5\u5165\u94fe\u8def\u3002

                                                  OpenTelemetry \u4e5f\u7b80\u79f0\u4e3a OTel\uff0c\u662f\u4e00\u4e2a\u5f00\u6e90\u7684\u53ef\u89c2\u6d4b\u6027\u6846\u67b6\uff0c\u53ef\u4ee5\u5e2e\u52a9\u5728 Go \u5e94\u7528\u7a0b\u5e8f\u4e2d\u751f\u6210\u548c\u6536\u96c6\u9065\u6d4b\u6570\u636e\uff1a\u94fe\u8def\u3001\u6307\u6807\u548c\u65e5\u5fd7\u3002

                                                  \u672c\u6587\u4e3b\u8981\u8bb2\u89e3\u5982\u4f55\u5728 Go \u5e94\u7528\u7a0b\u5e8f\u4e2d\u901a\u8fc7 OpenTelemetry Go SDK \u589e\u5f3a\u5e76\u63a5\u5165\u94fe\u8def\u76d1\u63a7\u3002

                                                  "},{"location":"admin/insight/quickstart/otel/golang/golang.html#otel-sdk-go_1","title":"\u4f7f\u7528 OTel SDK \u589e\u5f3a Go \u5e94\u7528","text":""},{"location":"admin/insight/quickstart/otel/golang/golang.html#_1","title":"\u5b89\u88c5\u76f8\u5173\u4f9d\u8d56","text":"

                                                  \u5fc5\u987b\u5148\u5b89\u88c5\u4e0e OpenTelemetry exporter \u548c SDK \u76f8\u5173\u7684\u4f9d\u8d56\u9879\u3002\u5982\u679c\u60a8\u6b63\u5728\u4f7f\u7528\u5176\u4ed6\u8bf7\u6c42\u8def\u7531\u5668\uff0c\u8bf7\u53c2\u8003\u8bf7\u6c42\u8def\u7531\u3002 \u5207\u6362/\u8fdb\u5165\u5230\u5e94\u7528\u7a0b\u5e8f\u6e90\u6587\u4ef6\u5939\u540e\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                  go get go.opentelemetry.io/otel@v1.19.0 \\\n  go.opentelemetry.io/otel/trace@v1.19.0 \\\n  go.opentelemetry.io/otel/sdk@v1.19.0 \\\n  go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin@v0.46.1 \\\n  go.opentelemetry.io/otel/exporters/otlp/otlptrace@v1.19.0 \\\n  go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc@v1.19.0\n
                                                  "},{"location":"admin/insight/quickstart/otel/golang/golang.html#otel-sdk","title":"\u4f7f\u7528 OTel SDK \u521b\u5efa\u521d\u59cb\u5316\u51fd\u6570","text":"

                                                  \u4e3a\u4e86\u8ba9\u5e94\u7528\u7a0b\u5e8f\u80fd\u591f\u53d1\u9001\u6570\u636e\uff0c\u9700\u8981\u4e00\u4e2a\u51fd\u6570\u6765\u521d\u59cb\u5316 OpenTelemetry\u3002\u5728 main.go \u6587\u4ef6\u4e2d\u6dfb\u52a0\u4ee5\u4e0b\u4ee3\u7801\u7247\u6bb5:

                                                  import (\n    \"context\"\n    \"os\"\n    \"time\"\n\n    \"go.opentelemetry.io/otel\"\n    \"go.opentelemetry.io/otel/exporters/otlp/otlptrace\"\n    \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc\"\n    \"go.opentelemetry.io/otel/propagation\"\n    \"go.opentelemetry.io/otel/sdk/resource\"\n    sdktrace \"go.opentelemetry.io/otel/sdk/trace\"\n    semconv \"go.opentelemetry.io/otel/semconv/v1.7.0\"\n    \"go.uber.org/zap\"\n    \"google.golang.org/grpc\"\n)\n\nvar tracerExp *otlptrace.Exporter\n\nfunc retryInitTracer() func() {\n    var shutdown func()\n    go func() {\n        for {\n            // otel will reconnected and re-send spans when otel col recover. so, we don't need to re-init tracer exporter.\n            if tracerExp == nil {\n                shutdown = initTracer()\n            } else {\n                break\n            }\n            time.Sleep(time.Minute * 5)\n        }\n    }()\n    return shutdown\n}\n\nfunc initTracer() func() {\n    // temporarily set timeout to 10s\n    ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n    defer cancel()\n\n    serviceName, ok := os.LookupEnv(\"OTEL_SERVICE_NAME\")\n    if !ok {\n        serviceName = \"server_name\"\n        os.Setenv(\"OTEL_SERVICE_NAME\", serviceName)\n    }\n    otelAgentAddr, ok := os.LookupEnv(\"OTEL_EXPORTER_OTLP_ENDPOINT\")\n    if !ok {\n        otelAgentAddr = \"http://localhost:4317\"\n        os.Setenv(\"OTEL_EXPORTER_OTLP_ENDPOINT\", otelAgentAddr)\n    }\n    zap.S().Infof(\"OTLP Trace connect to: %s with service name: %s\", otelAgentAddr, serviceName)\n\n    traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithInsecure(), otlptracegrpc.WithDialOption(grpc.WithBlock()))\n    if err != nil {\n        handleErr(err, \"OTLP Trace gRPC Creation\")\n        return nil\n    }\n\n    tracerProvider := sdktrace.NewTracerProvider(\n        sdktrace.WithBatcher(traceExporter),\n        sdktrace.WithSampler(sdktrace.AlwaysSample()),\n    sdktrace.WithResource(resource.NewWithAttributes(semconv.SchemaURL)))\n\n    otel.SetTracerProvider(tracerProvider)\n    otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))\n\n    tracerExp = traceExporter\n    return func() {\n        // Shutdown will flush any remaining spans and shut down the exporter.\n        handleErr(tracerProvider.Shutdown(ctx), \"failed to shutdown TracerProvider\")\n    }\n}\n\nfunc handleErr(err error, message string) {\n    if err != nil {\n        zap.S().Errorf(\"%s: %v\", message, err)\n    }\n}\n
                                                  "},{"location":"admin/insight/quickstart/otel/golang/golang.html#maingo","title":"\u5728 main.go \u4e2d\u521d\u59cb\u5316\u8ddf\u8e2a\u5668","text":"

                                                  \u4fee\u6539 main \u51fd\u6570\u4ee5\u5728 main.go \u4e2d\u521d\u59cb\u5316\u8ddf\u8e2a\u5668\u3002\u53e6\u5916\u5f53\u60a8\u7684\u670d\u52a1\u5173\u95ed\u65f6\uff0c\u5e94\u8be5\u8c03\u7528 TracerProvider.Shutdown() \u786e\u4fdd\u5bfc\u51fa\u6240\u6709 Span\u3002\u8be5\u670d\u52a1\u5c06\u8be5\u8c03\u7528\u4f5c\u4e3a\u4e3b\u51fd\u6570\u4e2d\u7684\u5ef6\u8fdf\u51fd\u6570\uff1a

                                                  func main() {\n    // start otel tracing\n    if shutdown := retryInitTracer(); shutdown != nil {\n            defer shutdown()\n        }\n    ......\n}\n
                                                  "},{"location":"admin/insight/quickstart/otel/golang/golang.html#otel-gin","title":"\u4e3a\u5e94\u7528\u6dfb\u52a0 OTel Gin \u4e2d\u95f4\u4ef6","text":"

                                                  \u901a\u8fc7\u5728 main.go \u4e2d\u6dfb\u52a0\u4ee5\u4e0b\u884c\u6765\u914d\u7f6e Gin \u4ee5\u4f7f\u7528\u4e2d\u95f4\u4ef6:

                                                  import (\n    ....\n  \"go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin\"\n)\n\nfunc main() {\n    ......\n    r := gin.Default()\n    r.Use(otelgin.Middleware(\"my-app\"))\n    ......\n}\n
                                                  "},{"location":"admin/insight/quickstart/otel/golang/golang.html#_2","title":"\u8fd0\u884c\u5e94\u7528\u7a0b\u5e8f","text":"
                                                  • \u672c\u5730\u8c03\u8bd5\u8fd0\u884c

                                                    \u6ce8\u610f: \u6b64\u6b65\u9aa4\u4ec5\u7528\u4e8e\u672c\u5730\u5f00\u53d1\u8c03\u8bd5\uff0c\u751f\u4ea7\u73af\u5883\u4e2d Operator \u4f1a\u81ea\u52a8\u5b8c\u6210\u4ee5\u4e0b\u73af\u5883\u53d8\u91cf\u7684\u6ce8\u5165\u3002

                                                    \u4ee5\u4e0a\u6b65\u9aa4\u5df2\u7ecf\u5b8c\u6210\u4e86\u521d\u59cb\u5316 SDK \u7684\u5de5\u4f5c\uff0c\u73b0\u5728\u5982\u679c\u9700\u8981\u5728\u672c\u5730\u5f00\u53d1\u8fdb\u884c\u8c03\u8bd5\uff0c\u9700\u8981\u63d0\u524d\u83b7\u53d6\u5230 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b insight-agent-opentelemerty-collector \u7684\u5730\u5740\uff0c\u5047\u8bbe\u4e3a\uff1a insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 \u3002

                                                    \u56e0\u6b64\uff0c\u53ef\u4ee5\u5728\u4f60\u672c\u5730\u542f\u52a8\u5e94\u7528\u7a0b\u5e8f\u7684\u65f6\u5019\u6dfb\u52a0\u5982\u4e0b\u73af\u5883\u53d8\u91cf\uff1a

                                                    OTEL_SERVICE_NAME=my-golang-app OTEL_EXPORTER_OTLP_ENDPOINT=http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 go run main.go...\n
                                                  • \u751f\u4ea7\u73af\u5883\u8fd0\u884c

                                                    \u8bf7\u53c2\u8003\u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a \u4e2d \u53ea\u6ce8\u5165\u73af\u5883\u53d8\u91cf\u6ce8\u89e3 \u76f8\u5173\u4ecb\u7ecd\uff0c\u4e3a deployment yaml \u6dfb\u52a0\u6ce8\u89e3\uff1a

                                                    instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                                    \u5982\u679c\u65e0\u6cd5\u4f7f\u7528\u6ce8\u89e3\u7684\u65b9\u5f0f\uff0c\u60a8\u53ef\u4ee5\u624b\u52a8\u5728 deployment yaml \u6dfb\u52a0\u5982\u4e0b\u73af\u5883\u53d8\u91cf\uff1a

                                                  \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\nenv:\n  - name: OTEL_EXPORTER_OTLP_ENDPOINT\n    value: 'http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317'\n  - name: OTEL_SERVICE_NAME\n    value: \"your depolyment name\" # (1)!\n  - name: OTEL_K8S_NAMESPACE\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: metadata.namespace\n  - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: spec.nodeName\n  - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: metadata.name\n  - name: OTEL_RESOURCE_ATTRIBUTES\n    value: 'k8s.namespace.name=$(OTEL_K8S_NAMESPACE),k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME)'\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
                                                  1. \u4fee\u6539\u6b64\u503c
                                                  "},{"location":"admin/insight/quickstart/otel/golang/golang.html#_3","title":"\u8bf7\u6c42\u8def\u7531","text":""},{"location":"admin/insight/quickstart/otel/golang/golang.html#opentelemetry-gingonic","title":"OpenTelemetry gin/gonic \u589e\u5f3a","text":"
                                                  # Add one line to your import() stanza depending upon your request router:\nmiddleware \"go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin\"\n

                                                  \u7136\u540e\u6ce8\u5165 OpenTelemetry \u4e2d\u95f4\u4ef6\uff1a

                                                  router.Use(middleware.Middleware(\"my-app\"))\n
                                                  "},{"location":"admin/insight/quickstart/otel/golang/golang.html#opentelemetry-gorillamux","title":"OpenTelemetry gorillamux \u589e\u5f3a","text":"
                                                  # Add one line to your import() stanza depending upon your request router:\nmiddleware \"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux\"\n

                                                  \u7136\u540e\u6ce8\u5165 OpenTelemetry \u4e2d\u95f4\u4ef6\uff1a

                                                  router.Use(middleware.Middleware(\"my-app\"))\n
                                                  "},{"location":"admin/insight/quickstart/otel/golang/golang.html#grpc","title":"gRPC \u589e\u5f3a","text":"

                                                  \u540c\u6837\uff0cOpenTelemetry \u4e5f\u53ef\u4ee5\u5e2e\u52a9\u60a8\u81ea\u52a8\u68c0\u6d4b gRPC \u8bf7\u6c42\u3002\u8981\u68c0\u6d4b\u60a8\u62e5\u6709\u7684\u4efb\u4f55 gRPC \u670d\u52a1\u5668\uff0c\u8bf7\u5c06\u62e6\u622a\u5668\u6dfb\u52a0\u5230\u670d\u52a1\u5668\u7684\u5b9e\u4f8b\u5316\u4e2d\u3002

                                                  import (\n  grpcotel \"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc\"\n)\nfunc main() {\n  [...]\n\n    s := grpc.NewServer(\n        grpc.UnaryInterceptor(grpcotel.UnaryServerInterceptor()),\n        grpc.StreamInterceptor(grpcotel.StreamServerInterceptor()),\n    )\n}\n

                                                  \u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u5982\u679c\u4f60\u7684\u7a0b\u5e8f\u91cc\u9762\u4f7f\u7528\u5230\u4e86 Grpc Client \u8c03\u7528\u7b2c\u4e09\u65b9\u670d\u52a1\uff0c\u4f60\u8fd8\u9700\u8981\u5bf9 Grpc Client \u6dfb\u52a0\u62e6\u622a\u5668\uff1a

                                                      [...]\n\n    conn, err := grpc.Dial(addr, grpc.WithTransportCredentials(insecure.NewCredentials()),\n        grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()),\n        grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()),\n    )\n
                                                  "},{"location":"admin/insight/quickstart/otel/golang/golang.html#_4","title":"\u5982\u679c\u4e0d\u4f7f\u7528\u8bf7\u6c42\u8def\u7531","text":"
                                                  import (\n  \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\"\n)\n

                                                  \u5728\u5c06 http.Handler \u4f20\u9012\u7ed9 ServeMux \u7684\u6bcf\u4e2a\u5730\u65b9\uff0c\u60a8\u90fd\u5c06\u5305\u88c5\u5904\u7406\u7a0b\u5e8f\u51fd\u6570\u3002\u4f8b\u5982\uff0c\u5c06\u8fdb\u884c\u4ee5\u4e0b\u66ff\u6362\uff1a

                                                  - mux.Handle(\"/path\", h)\n+ mux.Handle(\"/path\", otelhttp.NewHandler(h, \"description of path\"))\n---\n- mux.Handle(\"/path\", http.HandlerFunc(f))\n+ mux.Handle(\"/path\", otelhttp.NewHandler(http.HandlerFunc(f), \"description of path\"))\n

                                                  \u901a\u8fc7\u8fd9\u79cd\u65b9\u5f0f\uff0c\u60a8\u53ef\u4ee5\u786e\u4fdd\u4f7f\u7528 othttp \u5305\u88c5\u7684\u6bcf\u4e2a\u51fd\u6570\u90fd\u4f1a\u81ea\u52a8\u6536\u96c6\u5176\u5143\u6570\u636e\u5e76\u542f\u52a8\u76f8\u5e94\u7684\u8ddf\u8e2a\u3002

                                                  "},{"location":"admin/insight/quickstart/otel/golang/golang.html#_5","title":"\u6570\u636e\u5e93\u8bbf\u95ee\u589e\u5f3a","text":""},{"location":"admin/insight/quickstart/otel/golang/golang.html#golang-gorm","title":"Golang Gorm","text":"

                                                  OpenTelemetry \u793e\u533a\u4e5f\u5f00\u53d1\u4e86\u6570\u636e\u5e93\u8bbf\u95ee\u5e93\u7684\u4e2d\u95f4\u4ef6\uff0c\u6bd4\u5982 Gorm:

                                                  import (\n    \"github.com/uptrace/opentelemetry-go-extra/otelgorm\"\n    \"gorm.io/driver/sqlite\"\n    \"gorm.io/gorm\"\n)\n\ndb, err := gorm.Open(sqlite.Open(\"file::memory:?cache=shared\"), &gorm.Config{})\nif err != nil {\n    panic(err)\n}\n\notelPlugin := otelgorm.NewPlugin(otelgorm.WithDBName(\"mydb\"), # \u7f3a\u5931\u4f1a\u5bfc\u81f4\u6570\u636e\u5e93\u76f8\u5173\u62d3\u6251\u5c55\u793a\u4e0d\u5b8c\u6574\n    otelgorm.WithAttributes(semconv.ServerAddress(\"memory\"))) # \u7f3a\u5931\u4f1a\u5bfc\u81f4\u6570\u636e\u5e93\u76f8\u5173\u62d3\u6251\u5c55\u793a\u4e0d\u5b8c\u6574\nif err := db.Use(otelPlugin); err != nil {\n    panic(err)\n}\n

                                                  "},{"location":"admin/insight/quickstart/otel/golang/golang.html#span","title":"\u81ea\u5b9a\u4e49 Span","text":"

                                                  \u5f88\u591a\u65f6\u5019\uff0cOpenTelemetry \u63d0\u4f9b\u7684\u4e2d\u95f4\u4ef6\u4e0d\u80fd\u5e2e\u52a9\u6211\u4eec\u8bb0\u5f55\u66f4\u591a\u5185\u90e8\u8c03\u7528\u7684\u51fd\u6570\uff0c\u9700\u8981\u6211\u4eec\u81ea\u5b9a\u4e49 Span \u6765\u8bb0\u5f55

                                                   \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n    _, span := otel.Tracer(\"GetServiceDetail\").Start(ctx,\n        \"spanMetricDao.GetServiceDetail\",\n        trace.WithSpanKind(trace.SpanKindInternal))\n    defer span.End()\n  \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
                                                  "},{"location":"admin/insight/quickstart/otel/golang/golang.html#span_1","title":"\u5411 span \u6dfb\u52a0\u81ea\u5b9a\u4e49\u5c5e\u6027\u548c\u4e8b\u4ef6","text":"

                                                  \u4e5f\u53ef\u4ee5\u5c06\u81ea\u5b9a\u4e49\u5c5e\u6027\u6216\u6807\u7b7e\u8bbe\u7f6e\u4e3a Span\u3002\u8981\u6dfb\u52a0\u81ea\u5b9a\u4e49\u5c5e\u6027\u548c\u4e8b\u4ef6\uff0c\u8bf7\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u64cd\u4f5c\uff1a

                                                  "},{"location":"admin/insight/quickstart/otel/golang/golang.html#_6","title":"\u5bfc\u5165\u8ddf\u8e2a\u548c\u5c5e\u6027\u5e93","text":"
                                                  import (\n    ...\n    \"go.opentelemetry.io/otel/attribute\"\n    \"go.opentelemetry.io/otel/trace\"\n)\n
                                                  "},{"location":"admin/insight/quickstart/otel/golang/golang.html#span_2","title":"\u4ece\u4e0a\u4e0b\u6587\u4e2d\u83b7\u53d6\u5f53\u524d Span","text":"
                                                  span := trace.SpanFromContext(c.Request.Context())\n
                                                  "},{"location":"admin/insight/quickstart/otel/golang/golang.html#span_3","title":"\u5728\u5f53\u524d Span \u4e2d\u8bbe\u7f6e\u5c5e\u6027","text":"
                                                  span.SetAttributes(attribute.String(\"controller\", \"books\"))\n
                                                  "},{"location":"admin/insight/quickstart/otel/golang/golang.html#span-event","title":"\u4e3a\u5f53\u524d Span \u6dfb\u52a0 Event","text":"

                                                  \u6dfb\u52a0 span \u4e8b\u4ef6\u662f\u4f7f\u7528 span \u5bf9\u8c61\u4e0a\u7684 AddEvent \u5b8c\u6210\u7684\u3002

                                                  span.AddEvent(msg)\n
                                                  "},{"location":"admin/insight/quickstart/otel/golang/golang.html#_7","title":"\u8bb0\u5f55\u9519\u8bef\u548c\u5f02\u5e38","text":"
                                                  import \"go.opentelemetry.io/otel/codes\"\n\n// \u83b7\u53d6\u5f53\u524d span\nspan := trace.SpanFromContext(ctx)\n\n// RecordError \u4f1a\u81ea\u52a8\u5c06\u4e00\u4e2a\u9519\u8bef\u8f6c\u6362\u6210 span even\nspan.RecordError(err)\n\n// \u6807\u8bb0\u8fd9\u4e2a span \u9519\u8bef\nspan.SetStatus(codes.Error, \"internal error\")\n
                                                  "},{"location":"admin/insight/quickstart/otel/golang/golang.html#_8","title":"\u53c2\u8003","text":"

                                                  \u6709\u5173 Demo \u6f14\u793a\u8bf7\u53c2\u8003\uff1a - opentelemetry-demo/productcatalogservice - opentelemetry-collector-contrib/demo

                                                  "},{"location":"admin/insight/quickstart/otel/golang/meter.html","title":"\u4f7f\u7528 OTel SDK \u4e3a\u5e94\u7528\u7a0b\u5e8f\u66b4\u9732\u6307\u6807","text":"

                                                  \u672c\u6587\u4ec5\u4f9b\u5e0c\u671b\u8bc4\u4f30\u6216\u63a2\u7d22\u6b63\u5728\u5f00\u53d1\u7684 OTLP \u6307\u6807\u7684\u7528\u6237\u53c2\u8003\u3002

                                                  OpenTelemetry \u9879\u76ee\u8981\u6c42\u4ee5\u5fc5\u987b\u5728 OpenTelemetry \u534f\u8bae (OTLP) \u4e2d\u53d1\u51fa\u6570\u636e\u7684\u8bed\u8a00\u63d0\u4f9b API \u548c SDK\u3002

                                                  "},{"location":"admin/insight/quickstart/otel/golang/meter.html#golang","title":"\u9488\u5bf9 Golang \u5e94\u7528\u7a0b\u5e8f","text":"

                                                  Golang \u53ef\u4ee5\u901a\u8fc7 sdk \u66b4\u9732 runtime \u6307\u6807\uff0c\u5177\u4f53\u6765\u8bf4\uff0c\u5728\u5e94\u7528\u4e2d\u6dfb\u52a0\u4ee5\u4e0b\u65b9\u6cd5\u5f00\u542f metrics \u66b4\u9732\u5668\uff1a

                                                  "},{"location":"admin/insight/quickstart/otel/golang/meter.html#_1","title":"\u5b89\u88c5\u76f8\u5173\u4f9d\u8d56","text":"

                                                  \u5207\u6362/\u8fdb\u5165\u5230\u5e94\u7528\u7a0b\u5e8f\u6e90\u6587\u4ef6\u5939\u540e\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                  go get go.opentelemetry.io/otel \\\n  go.opentelemetry.io/otel/attribute \\\n  go.opentelemetry.io/otel/exporters/prometheus \\\n  go.opentelemetry.io/otel/metric/global \\\n  go.opentelemetry.io/otel/metric/instrument \\\n  go.opentelemetry.io/otel/sdk/metric\n
                                                  "},{"location":"admin/insight/quickstart/otel/golang/meter.html#otel-sdk_1","title":"\u4f7f\u7528 OTel SDK \u521b\u5efa\u521d\u59cb\u5316\u51fd\u6570","text":"
                                                  import (\n    .....\n\n    \"go.opentelemetry.io/otel/attribute\"\n    otelPrometheus \"go.opentelemetry.io/otel/exporters/prometheus\"\n    \"go.opentelemetry.io/otel/metric/global\"\n    \"go.opentelemetry.io/otel/metric/instrument\"\n    \"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram\"\n    controller \"go.opentelemetry.io/otel/sdk/metric/controller/basic\"\n    \"go.opentelemetry.io/otel/sdk/metric/export/aggregation\"\n    processor \"go.opentelemetry.io/otel/sdk/metric/processor/basic\"\n    selector \"go.opentelemetry.io/otel/sdk/metric/selector/simple\"\n)\nfunc (s *insightServer) initMeter() *otelPrometheus.Exporter {\n    s.meter = global.Meter(\"xxx\")\n\n    config := otelPrometheus.Config{\n        DefaultHistogramBoundaries: []float64{1, 2, 5, 10, 20, 50},\n        Gatherer:                   prometheus.DefaultGatherer,\n        Registry:                   prometheus.NewRegistry(),\n        Registerer:                 prometheus.DefaultRegisterer,\n    }\n\n    c := controller.New(\n        processor.NewFactory(\n            selector.NewWithHistogramDistribution(\n                histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries),\n            ),\n            aggregation.CumulativeTemporalitySelector(),\n            processor.WithMemory(true),\n        ),\n    )\n\n    exporter, err := otelPrometheus.New(config, c)\n    if err != nil {\n        zap.S().Panicf(\"failed to initialize prometheus exporter %v\", err)\n    }\n\n    global.SetMeterProvider(exporter.MeterProvider())\n\n    http.HandleFunc(\"/metrics\", exporter.ServeHTTP)\n\n    go func() {\n        _ = http.ListenAndServe(fmt.Sprintf(\":%d\", 8888), nil)\n    }()\n\n    zap.S().Info(\"Prometheus server running on \", fmt.Sprintf(\":%d\", port))\n    return exporter\n}\n

                                                  \u4ee5\u4e0a\u65b9\u6cd5\u4f1a\u4e3a\u60a8\u7684\u5e94\u7528\u66b4\u9732\u4e00\u4e2a\u6307\u6807\u63a5\u53e3: http://localhost:8888/metrics

                                                  \u968f\u540e\uff0c\u5728 main.go \u4e2d\u5bf9\u5176\u8fdb\u884c\u521d\u59cb\u5316\uff1a

                                                  func main() {\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n    tp := initMeter()\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n}\n

                                                  \u6b64\u5916\uff0c\u5982\u679c\u60f3\u6dfb\u52a0\u81ea\u5b9a\u4e49\u6307\u6807\uff0c\u53ef\u4ee5\u53c2\u8003\uff1a

                                                  // exposeClusterMetric expose metric like \"insight_logging_count{} 1\"\nfunc (s *insightServer) exposeLoggingMetric(lserver *log.LogService) {\n    s.meter = global.Meter(\"insight.io/basic\")\n\n    var lock sync.Mutex\n    logCounter, err := s.meter.AsyncFloat64().Counter(\"insight_log_total\")\n    if err != nil {\n        zap.S().Panicf(\"failed to initialize instrument: %v\", err)\n    }\n\n    _ = s.meter.RegisterCallback([]instrument.Asynchronous{logCounter}, func(ctx context.Context) {\n        lock.Lock()\n        defer lock.Unlock()\n        count, err := lserver.Count(ctx)\n        if err == nil || count != -1 {\n            logCounter.Observe(ctx, float64(count))\n        }\n    })\n}\n

                                                  \u968f\u540e\uff0c\u5728 main.go \u8c03\u7528\u8be5\u65b9\u6cd5\uff1a

                                                  \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\ns.exposeLoggingMetric(lservice)\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n

                                                  \u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbf\u95ee http://localhost:8888/metrics \u6765\u68c0\u67e5\u60a8\u7684\u6307\u6807\u662f\u5426\u6b63\u5e38\u5de5\u4f5c\u3002

                                                  "},{"location":"admin/insight/quickstart/otel/golang/meter.html#java","title":"\u9488\u5bf9 Java \u5e94\u7528\u7a0b\u5e8f","text":"

                                                  Java \u5728\u4f7f\u7528 otel agent \u5728\u5b8c\u6210\u94fe\u8def\u7684\u81ea\u52a8\u63a5\u5165\u7684\u57fa\u7840\u4e0a\uff0c\u901a\u8fc7\u6dfb\u52a0\u73af\u5883\u53d8\u91cf\uff1a

                                                  OTEL_METRICS_EXPORTER=prometheus\n

                                                  \u5c31\u53ef\u4ee5\u76f4\u63a5\u66b4\u9732 JVM \u76f8\u5173\u6307\u6807\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbf\u95ee http://localhost:8888/metrics \u6765\u68c0\u67e5\u60a8\u7684\u6307\u6807\u662f\u5426\u6b63\u5e38\u5de5\u4f5c\u3002

                                                  \u968f\u540e\uff0c\u518d\u914d\u5408 prometheus serviceMonitor \u5373\u53ef\u5b8c\u6210\u6307\u6807\u7684\u63a5\u5165\u3002 \u5982\u679c\u60f3\u66b4\u9732\u81ea\u5b9a\u4e49\u6307\u6807\u8bf7\u53c2\u9605 opentelemetry-java-docs/prometheus\u3002

                                                  \u4e3b\u8981\u5206\u4ee5\u4e0b\u4e24\u6b65\uff1a

                                                  • \u521b\u5efa meter provider\uff0c\u5e76\u6307\u5b9a prometheus \u4f5c\u4e3a exporter\u3002

                                                    /*\n* Copyright The OpenTelemetry Authors\n* SPDX-License-Identifier: Apache-2.0\n*/\n\npackage io.opentelemetry.example.prometheus;\n\nimport io.opentelemetry.api.metrics.MeterProvider;\nimport io.opentelemetry.exporter.prometheus.PrometheusHttpServer;\nimport io.opentelemetry.sdk.metrics.SdkMeterProvider;\nimport io.opentelemetry.sdk.metrics.export.MetricReader;\n\npublic final class ExampleConfiguration {\n\n  /**\n  * Initializes the Meter SDK and configures the prometheus collector with all default settings.\n  *\n  * @param prometheusPort the port to open up for scraping.\n  * @return A MeterProvider for use in instrumentation.\n  */\n  static MeterProvider initializeOpenTelemetry(int prometheusPort) {\n    MetricReader prometheusReader = PrometheusHttpServer.builder().setPort(prometheusPort).build();\n\n    return SdkMeterProvider.builder().registerMetricReader(prometheusReader).build();\n  }\n}\n
                                                  • \u81ea\u5b9a\u4e49 meter \u5e76\u5f00\u542f http server

                                                    package io.opentelemetry.example.prometheus;\n\nimport io.opentelemetry.api.common.Attributes;\nimport io.opentelemetry.api.metrics.Meter;\nimport io.opentelemetry.api.metrics.MeterProvider;\nimport java.util.concurrent.ThreadLocalRandom;\n\n/**\n* Example of using the PrometheusHttpServer to convert OTel metrics to Prometheus format and expose\n* these to a Prometheus instance via a HttpServer exporter.\n*\n* <p>A Gauge is used to periodically measure how many incoming messages are awaiting processing.\n* The Gauge callback gets executed every collection interval.\n*/\npublic final class PrometheusExample {\n  private long incomingMessageCount;\n\n  public PrometheusExample(MeterProvider meterProvider) {\n    Meter meter = meterProvider.get(\"PrometheusExample\");\n    meter\n        .gaugeBuilder(\"incoming.messages\")\n        .setDescription(\"No of incoming messages awaiting processing\")\n        .setUnit(\"message\")\n        .buildWithCallback(result -> result.record(incomingMessageCount, Attributes.empty()));\n  }\n\n  void simulate() {\n    for (int i = 500; i > 0; i--) {\n      try {\n        System.out.println(\n            i + \" Iterations to go, current incomingMessageCount is:  \" + incomingMessageCount);\n        incomingMessageCount = ThreadLocalRandom.current().nextLong(100);\n        Thread.sleep(1000);\n      } catch (InterruptedException e) {\n        // ignored here\n      }\n    }\n  }\n\n  public static void main(String[] args) {\n    int prometheusPort = 8888;\n\n    // it is important to initialize the OpenTelemetry SDK as early as possible in your process.\n    MeterProvider meterProvider = ExampleConfiguration.initializeOpenTelemetry(prometheusPort);\n\n    PrometheusExample prometheusExample = new PrometheusExample(meterProvider);\n\n    prometheusExample.simulate();\n\n    System.out.println(\"Exiting\");\n  }\n}\n

                                                  \u968f\u540e\uff0c\u5f85 java \u5e94\u7528\u7a0b\u5e8f\u8fd0\u884c\u4e4b\u540e\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbf\u95ee http://localhost:8888/metrics \u6765\u68c0\u67e5\u60a8\u7684\u6307\u6807\u662f\u5426\u6b63\u5e38\u5de5\u4f5c\u3002

                                                  "},{"location":"admin/insight/quickstart/otel/golang/meter.html#insight","title":"Insight \u91c7\u96c6\u6307\u6807","text":"

                                                  \u6700\u540e\u91cd\u8981\u7684\u662f\uff0c\u60a8\u5df2\u7ecf\u5728\u5e94\u7528\u7a0b\u5e8f\u4e2d\u66b4\u9732\u51fa\u4e86\u6307\u6807\uff0c\u73b0\u5728\u9700\u8981 Insight \u6765\u91c7\u96c6\u6307\u6807\u3002

                                                  \u63a8\u8350\u7684\u6307\u6807\u66b4\u9732\u65b9\u5f0f\u662f\u901a\u8fc7 servicemonitor \u6216\u8005 podmonitor\u3002

                                                  "},{"location":"admin/insight/quickstart/otel/golang/meter.html#servicemonitorpodmonitor","title":"\u521b\u5efa servicemonitor/podmonitor","text":"

                                                  \u6dfb\u52a0\u7684 servicemonitor/podmonitor \u9700\u8981\u6253\u4e0a label\uff1a\"operator.insight.io/managed-by\": \"insight\" \u624d\u4f1a\u88ab Operator \u8bc6\u522b\uff1a

                                                  apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: example-app\n  labels:\n    operator.insight.io/managed-by: insight\nspec:\n  selector:\n    matchLabels:\n      app: example-app\n  endpoints:\n  - port: web\n  namespaceSelector:\n    any: true\n
                                                  "},{"location":"admin/insight/quickstart/otel/java/index.html","title":"\u5f00\u59cb\u76d1\u63a7 Java \u5e94\u7528","text":"
                                                  1. Java \u5e94\u7528\u94fe\u8def\u63a5\u5165\u4e0e\u76d1\u63a7\u8bf7\u53c2\u8003 \u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a \u6587\u6863\uff0c\u901a\u8fc7\u6ce8\u89e3\u5b9e\u73b0\u81ea\u52a8\u63a5\u5165\u94fe\u8def\u3002

                                                  2. Java \u5e94\u7528\u7684 JVM \u8fdb\u884c\u76d1\u63a7\uff1a\u5df2\u7ecf\u66b4\u9732 JVM \u6307\u6807\u548c\u4ecd\u672a\u66b4\u9732 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5982\u4f55\u4e0e\u53ef\u89c2\u6d4b\u6027 Insight \u5bf9\u63a5\u3002

                                                  3. \u5982\u679c\u60a8\u7684 Java \u5e94\u7528\u672a\u5f00\u59cb\u66b4\u9732 JVM \u6307\u6807\uff0c\u60a8\u53ef\u4ee5\u53c2\u8003\u5982\u4e0b\u6587\u6863\uff1a

                                                    • \u4f7f\u7528 JMX Exporter \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807
                                                    • \u4f7f\u7528 OpenTelemetry Java Agent \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807
                                                  4. \u5982\u679c\u60a8\u7684 Java \u5e94\u7528\u5df2\u7ecf\u66b4\u9732 JVM \u6307\u6807\uff0c\u60a8\u53ef\u4ee5\u53c2\u8003\u5982\u4e0b\u6587\u6863\uff1a

                                                    • \u5df2\u6709 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5bf9\u63a5\u53ef\u89c2\u6d4b\u6027
                                                  5. \u5c06 TraceId \u548c SpanId \u5199\u5165 Java \u5e94\u7528\u65e5\u5fd7, \u5b9e\u73b0\u94fe\u8def\u6570\u636e\u4e0e\u65e5\u5fd7\u6570\u636e\u5173\u8054

                                                  "},{"location":"admin/insight/quickstart/otel/java/mdc.html","title":"\u5c06 TraceId \u548c SpanId \u5199\u5165 Java \u5e94\u7528\u65e5\u5fd7","text":"

                                                  \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528 OpenTelemetry \u5c06 TraceId \u548c SpanId \u81ea\u52a8\u5199\u5165 Java \u5e94\u7528\u65e5\u5fd7\u3002 TraceId \u4e0e SpanId \u5199\u5165\u65e5\u5fd7\u540e\uff0c\u60a8\u53ef\u4ee5\u5c06\u5206\u5e03\u5f0f\u94fe\u8def\u6570\u636e\u4e0e\u65e5\u5fd7\u6570\u636e\u5173\u8054\u8d77\u6765\uff0c\u5b9e\u73b0\u66f4\u9ad8\u6548\u7684\u6545\u969c\u8bca\u65ad\u548c\u6027\u80fd\u5206\u6790\u3002

                                                  "},{"location":"admin/insight/quickstart/otel/java/mdc.html#_1","title":"\u652f\u6301\u7684\u65e5\u5fd7\u5e93","text":"

                                                  \u66f4\u591a\u4fe1\u606f\uff0c\u8bf7\u53c2\u89c1 Logger MDC auto-instrumentation\u3002

                                                  \u65e5\u5fd7\u6846\u67b6 \u652f\u6301\u81ea\u52a8\u57cb\u70b9\u7684\u7248\u672c \u624b\u52a8\u57cb\u70b9\u9700\u8981\u5f15\u5165\u7684\u4f9d\u8d56 Log4j 1 1.2+ \u65e0 Log4j 2 2.7+ opentelemetry-log4j-context-data-2.17-autoconfigure Logback 1.0+ opentelemetry-logback-mdc-1.0"},{"location":"admin/insight/quickstart/otel/java/mdc.html#logbackspringboot","title":"\u4f7f\u7528 Logback\uff08SpringBoot \u9879\u76ee\uff09","text":"

                                                  Spring Boot \u9879\u76ee\u5185\u7f6e\u4e86\u65e5\u5fd7\u6846\u67b6\uff0c\u5e76\u4e14\u9ed8\u8ba4\u4f7f\u7528 Logback \u4f5c\u4e3a\u5176\u65e5\u5fd7\u5b9e\u73b0\u3002\u5982\u679c\u60a8\u7684 Java \u9879\u76ee\u4e3a SpringBoot \u9879\u76ee\uff0c\u53ea\u9700\u5c11\u91cf\u914d\u7f6e\u5373\u53ef\u5c06 TraceId \u5199\u5165\u65e5\u5fd7\u3002

                                                  \u5728 application.properties \u4e2d\u8bbe\u7f6e logging.pattern.level\uff0c\u6dfb\u52a0 %mdc{trace_id} \u4e0e %mdc{span_id} \u5230\u65e5\u5fd7\u4e2d\u3002

                                                  logging.pattern.level=trace_id=%mdc{trace_id} span_id=%mdc{span_id} %5p ....\u7701\u7565...\n

                                                  \u4ee5\u4e0b\u4e3a\u65e5\u5fd7\u793a\u4f8b\uff1a

                                                  2024-06-26 10:56:31.200 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.a.c.c.C.[Tomcat].[localhost].[/]       : Initializing Spring DispatcherServlet 'dispatcherServlet'\n2024-06-26 10:56:31.201 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.s.web.servlet.DispatcherServlet        : Initializing Servlet 'dispatcherServlet'\n2024-06-26 10:56:31.209 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.s.web.servlet.DispatcherServlet        : Completed initialization in 8 ms\n2024-06-26 10:56:31.296 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=5743699405074f4e  INFO 53724 --- [nio-8081-exec-1] com.example.httpserver.ot.OTServer       : hello world\n
                                                  "},{"location":"admin/insight/quickstart/otel/java/mdc.html#log4j2","title":"\u4f7f\u7528 Log4j2","text":"
                                                  1. \u5728 pom.xml \u4e2d\u6dfb\u52a0 OpenTelemetry Log4j2 \u4f9d\u8d56:

                                                    Tip

                                                    \u8bf7\u5c06 OPENTELEMETRY_VERSION \u66ff\u6362\u4e3a\u6700\u65b0\u7248\u672c

                                                    <dependencies>\n  <dependency>\n    <groupId>io.opentelemetry.instrumentation</groupId>\n    <artifactId>opentelemetry-log4j-context-data-2.17-autoconfigure</artifactId>\n    <version>OPENTELEMETRY_VERSION</version>\n    <scope>runtime</scope>\n  </dependency>\n</dependencies>\n
                                                  2. \u4fee\u6539 log4j2.xml \u914d\u7f6e\uff0c\u5728 pattern \u4e2d\u6dfb\u52a0 %X{trace_id} \u4e0e %X{span_id}\uff0c\u53ef\u4ee5\u5c06 TraceId \u4e0e SpanId \u81ea\u52a8\u5199\u5165\u65e5\u5fd7:

                                                    <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Configuration>\n  <Appenders>\n    <Console name=\"Console\" target=\"SYSTEM_OUT\">\n      <PatternLayout\n          pattern=\"%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} trace_id=%X{trace_id} span_id=%X{span_id} trace_flags=%X{trace_flags} - %msg%n\"/>\n    </Console>\n  </Appenders>\n  <Loggers>\n    <Root>\n      <AppenderRef ref=\"Console\" level=\"All\"/>\n    </Root>\n  </Loggers>\n</Configuration>\n
                                                  3. \u4f7f\u7528 Logback \u5728 pom.xml \u4e2d\u6dfb\u52a0 OpenTelemetry Logback \u4f9d\u8d56\u3002

                                                    Tip

                                                    \u8bf7\u5c06 OPENTELEMETRY_VERSION \u66ff\u6362\u4e3a\u6700\u65b0\u7248\u672c

                                                    <dependencies>\n  <dependency>\n    <groupId>io.opentelemetry.instrumentation</groupId>\n    <artifactId>opentelemetry-logback-mdc-1.0</artifactId>\n    <version>OPENTELEMETRY_VERSION</version>\n  </dependency>\n</dependencies>\n
                                                  4. \u4fee\u6539 log4j2.xml \u914d\u7f6e\uff0c\u5728 pattern \u4e2d\u6dfb\u52a0 %X{trace_id} \u4e0e %X{span_id}\uff0c\u53ef\u4ee5\u5c06 TraceId \u4e0e SpanId \u81ea\u52a8\u5199\u5165\u65e5\u5fd7:

                                                    <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<configuration>\n  <appender name=\"CONSOLE\" class=\"ch.qos.logback.core.ConsoleAppender\">\n    <encoder>\n      <pattern>%d{HH:mm:ss.SSS} trace_id=%X{trace_id} span_id=%X{span_id} trace_flags=%X{trace_flags} %msg%n</pattern>\n    </encoder>\n  </appender>\n\n  <!-- Just wrap your logging appender, for example ConsoleAppender, with OpenTelemetryAppender -->\n  <appender name=\"OTEL\" class=\"io.opentelemetry.instrumentation.logback.mdc.v1_0.OpenTelemetryAppender\">\n    <appender-ref ref=\"CONSOLE\"/>\n  </appender>\n\n  <!-- Use the wrapped \"OTEL\" appender instead of the original \"CONSOLE\" one -->\n  <root level=\"INFO\">\n    <appender-ref ref=\"OTEL\"/>\n  </root>\n\n</configuration>\n
                                                  "},{"location":"admin/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html","title":"\u4f7f\u7528 JMX Exporter \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807","text":"

                                                  JMX-Exporter \u63d0\u4f9b\u4e86\u4e24\u79cd\u7528\u6cd5:

                                                  1. \u542f\u52a8\u72ec\u7acb\u8fdb\u7a0b\u3002JVM \u542f\u52a8\u65f6\u6307\u5b9a\u53c2\u6570\uff0c\u66b4\u9732 JMX \u7684 RMI \u63a5\u53e3\uff0cJMX Exporter \u8c03\u7528 RMI \u83b7\u53d6 JVM \u8fd0\u884c\u65f6\u72b6\u6001\u6570\u636e\uff0c \u8f6c\u6362\u4e3a Prometheus metrics \u683c\u5f0f\uff0c\u5e76\u66b4\u9732\u7aef\u53e3\u8ba9 Prometheus \u91c7\u96c6\u3002
                                                  2. JVM \u8fdb\u7a0b\u5185\u542f\u52a8(in-process)\u3002JVM \u542f\u52a8\u65f6\u6307\u5b9a\u53c2\u6570\uff0c\u901a\u8fc7 javaagent \u7684\u5f62\u5f0f\u8fd0\u884c JMX-Exporter \u7684 jar \u5305\uff0c \u8fdb\u7a0b\u5185\u8bfb\u53d6 JVM \u8fd0\u884c\u65f6\u72b6\u6001\u6570\u636e\uff0c\u8f6c\u6362\u4e3a Prometheus metrics \u683c\u5f0f\uff0c\u5e76\u66b4\u9732\u7aef\u53e3\u8ba9 Prometheus \u91c7\u96c6\u3002

                                                  Note

                                                  \u5b98\u65b9\u4e0d\u63a8\u8350\u4f7f\u7528\u7b2c\u4e00\u79cd\u65b9\u5f0f\uff0c\u4e00\u65b9\u9762\u914d\u7f6e\u590d\u6742\uff0c\u53e6\u4e00\u65b9\u9762\u56e0\u4e3a\u5b83\u9700\u8981\u4e00\u4e2a\u5355\u72ec\u7684\u8fdb\u7a0b\uff0c\u800c\u8fd9\u4e2a\u8fdb\u7a0b\u672c\u8eab\u7684\u76d1\u63a7\u53c8\u6210\u4e86\u65b0\u7684\u95ee\u9898\uff0c \u6240\u4ee5\u672c\u6587\u91cd\u70b9\u56f4\u7ed5\u7b2c\u4e8c\u79cd\u7528\u6cd5\u8bb2\u5982\u4f55\u5728 Kubernetes \u73af\u5883\u4e0b\u4f7f\u7528 JMX Exporter \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807\u3002

                                                  \u8fd9\u91cc\u4f7f\u7528\u7b2c\u4e8c\u79cd\u7528\u6cd5\uff0c\u542f\u52a8 JVM \u65f6\u9700\u8981\u6307\u5b9a JMX Exporter \u7684 jar \u5305\u6587\u4ef6\u548c\u914d\u7f6e\u6587\u4ef6\u3002 jar \u5305\u662f\u4e8c\u8fdb\u5236\u6587\u4ef6\uff0c\u4e0d\u597d\u901a\u8fc7 configmap \u6302\u8f7d\uff0c\u914d\u7f6e\u6587\u4ef6\u6211\u4eec\u51e0\u4e4e\u4e0d\u9700\u8981\u4fee\u6539\uff0c \u6240\u4ee5\u5efa\u8bae\u662f\u76f4\u63a5\u5c06 JMX Exporter \u7684 jar \u5305\u548c\u914d\u7f6e\u6587\u4ef6\u90fd\u6253\u5305\u5230\u4e1a\u52a1\u5bb9\u5668\u955c\u50cf\u4e2d\u3002

                                                  \u5176\u4e2d\uff0c\u7b2c\u4e8c\u79cd\u65b9\u5f0f\u6211\u4eec\u53ef\u4ee5\u9009\u62e9\u5c06 JMX Exporter \u7684 jar \u6587\u4ef6\u653e\u5728\u4e1a\u52a1\u5e94\u7528\u955c\u50cf\u4e2d\uff0c \u4e5f\u53ef\u4ee5\u9009\u62e9\u5728\u90e8\u7f72\u7684\u65f6\u5019\u6302\u8f7d\u8fdb\u53bb\u3002\u8fd9\u91cc\u5206\u522b\u5bf9\u4e24\u79cd\u65b9\u5f0f\u505a\u4e00\u4e2a\u4ecb\u7ecd\uff1a

                                                  "},{"location":"admin/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html#jmx-exporter-jar","title":"\u65b9\u5f0f\u4e00\uff1a\u5c06 JMX Exporter JAR \u6587\u4ef6\u6784\u5efa\u81f3\u4e1a\u52a1\u955c\u50cf\u4e2d","text":"

                                                  prometheus-jmx-config.yaml \u5185\u5bb9\u5982\u4e0b\uff1a

                                                  prometheus-jmx-config.yaml
                                                  ...\nssl: false\nlowercaseOutputName: false\nlowercaseOutputLabelNames: false\nrules:\n- pattern: \".*\"\n

                                                  Note

                                                  \u66f4\u591a\u914d\u7f6e\u9879\u8bf7\u53c2\u8003\u5e95\u90e8\u4ecb\u7ecd\u6216Prometheus \u5b98\u65b9\u6587\u6863\u3002

                                                  \u7136\u540e\u51c6\u5907 jar \u5305\u6587\u4ef6\uff0c\u53ef\u4ee5\u5728 jmx_exporter \u7684 Github \u9875\u9762\u627e\u5230\u6700\u65b0\u7684 jar \u5305\u4e0b\u8f7d\u5730\u5740\u5e76\u53c2\u8003\u5982\u4e0b Dockerfile:

                                                  FROM openjdk:11.0.15-jre\nWORKDIR /app/\nCOPY target/my-app.jar ./\nCOPY prometheus-jmx-config.yaml ./\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\nENV JAVA_TOOL_OPTIONS=-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\nEXPOSE 8081 8999 8080 8888\nENTRYPOINT java $JAVA_OPTS -jar my-app.jar\n

                                                  \u6ce8\u610f\uff1a

                                                  • \u542f\u52a8\u53c2\u6570\u683c\u5f0f\uff1a-javaagent:=:
                                                  • \u8fd9\u91cc\u4f7f\u7528\u4e86 8088 \u7aef\u53e3\u66b4\u9732 JVM \u7684\u76d1\u63a7\u6307\u6807\uff0c\u5982\u679c\u548c Java \u5e94\u7528\u51b2\u7a81\uff0c\u53ef\u81ea\u884c\u66f4\u6539
                                                  "},{"location":"admin/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html#init-container","title":"\u65b9\u5f0f\u4e8c\uff1a\u901a\u8fc7 init container \u5bb9\u5668\u6302\u8f7d","text":"

                                                  \u6211\u4eec\u9700\u8981\u5148\u5c06 JMX exporter \u505a\u6210 Docker \u955c\u50cf, \u4ee5\u4e0b Dockerfile \u4ec5\u4f9b\u53c2\u8003\uff1a

                                                  FROM alpine/curl:3.14\nWORKDIR /app/\n# \u5c06\u524d\u9762\u521b\u5efa\u7684 config \u6587\u4ef6\u62f7\u8d1d\u81f3\u955c\u50cf\nCOPY prometheus-jmx-config.yaml ./\n# \u5728\u7ebf\u4e0b\u8f7d jmx prometheus javaagent jar\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\n

                                                  \u6839\u636e\u4e0a\u9762 Dockerfile \u6784\u5efa\u955c\u50cf\uff1a docker build -t my-jmx-exporter .

                                                  \u5728 Java \u5e94\u7528\u90e8\u7f72 Yaml \u4e2d\u52a0\u5165\u5982\u4e0b init container\uff1a

                                                  \u70b9\u51fb\u5c55\u5f00 YAML \u6587\u4ef6
                                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-demo-app\n  labels:\n    app: my-demo-app\nspec:\n  selector:\n    matchLabels:\n      app: my-demo-app\n  template:\n    metadata:\n      labels:\n        app: my-demo-app\n    spec:\n      imagePullSecrets:\n      - name: registry-pull\n      initContainers:\n      - name: jmx-sidecar\n        image: my-jmx-exporter\n        command: [\"cp\", \"-r\", \"/app/jmx_prometheus_javaagent-0.17.2.jar\", \"/target/jmx_prometheus_javaagent-0.17.2.jar\"]  \u278a\n        volumeMounts:\n        - name: sidecar\n          mountPath: /target\n      containers:\n      - image: my-demo-app-image\n        name: my-demo-app\n        resources:\n          requests:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n          limits:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n        ports:\n        - containerPort: 18083\n        env:\n        - name: JAVA_TOOL_OPTIONS\n          value: \"-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\" \u278b\n        volumeMounts:\n        - name: host-time\n          mountPath: /etc/localtime\n          readOnly: true\n        - name: sidecar\n          mountPath: /sidecar\n      volumes:\n      - name: host-time\n        hostPath:\n          path: /etc/localtime\n      - name: sidecar  #\u5171\u4eab agent \u6587\u4ef6\u5939\n        emptyDir: {}\n      restartPolicy: Always\n

                                                  \u7ecf\u8fc7\u5982\u4e0a\u7684\u6539\u9020\u4e4b\u540e\uff0c\u793a\u4f8b\u5e94\u7528 my-demo-app \u5177\u5907\u4e86\u66b4\u9732 JVM \u6307\u6807\u7684\u80fd\u529b\u3002 \u8fd0\u884c\u670d\u52a1\u4e4b\u540e\uff0c\u6211\u4eec\u53ef\u4ee5\u901a\u8fc7 http://lcoalhost:8088 \u8bbf\u95ee\u670d\u52a1\u66b4\u9732\u51fa\u6765\u7684 prometheus \u683c\u5f0f\u7684\u6307\u6807\u3002

                                                  \u63a5\u7740\uff0c\u60a8\u53ef\u4ee5\u53c2\u8003 \u5df2\u6709 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5bf9\u63a5\u53ef\u89c2\u6d4b\u6027\u3002

                                                  "},{"location":"admin/insight/quickstart/otel/java/jvm-monitor/legacy-jvm.html","title":"\u5df2\u6709 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5bf9\u63a5\u53ef\u89c2\u6d4b\u6027","text":"

                                                  \u5982\u679c\u60a8\u7684 Java \u5e94\u7528\u901a\u8fc7\u5176\u4ed6\u65b9\u5f0f\uff08\u6bd4\u5982 Spring Boot Actuator\uff09\u66b4\u9732\u4e86 JVM \u7684\u76d1\u63a7\u6307\u6807\uff0c \u6211\u4eec\u9700\u8981\u8ba9\u76d1\u63a7\u6570\u636e\u88ab\u91c7\u96c6\u5230\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u5728\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u6dfb\u52a0\u6ce8\u89e3\uff08Kubernetes Annotations\uff09\u7684\u65b9\u5f0f\u8ba9 Insight \u6765\u91c7\u96c6\u5df2\u6709\u7684 JVM \u6307\u6807\uff1a

                                                  annatation: \n  insight.opentelemetry.io/metric-scrape: \"true\" # \u662f\u5426\u91c7\u96c6\n  insight.opentelemetry.io/metric-path: \"/\"      # \u91c7\u96c6\u6307\u6807\u7684\u8def\u5f84\n  insight.opentelemetry.io/metric-port: \"9464\"   # \u91c7\u96c6\u6307\u6807\u7684\u7aef\u53e3\n

                                                  \u4f8b\u5982\u4e3a my-deployment-app \u6dfb\u52a0\u6ce8\u89e3\uff1a

                                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-app\nspec:\n  selector:\n    matchLabels:\n      app: my-deployment-app\n      app.kubernetes.io/name: my-deployment-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-deployment-app\n        app.kubernetes.io/name: my-deployment-app\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\" # \u662f\u5426\u91c7\u96c6\n        insight.opentelemetry.io/metric-path: \"/\"      # \u91c7\u96c6\u6307\u6807\u7684\u8def\u5f84\n        insight.opentelemetry.io/metric-port: \"9464\"   # \u91c7\u96c6\u6307\u6807\u7684\u7aef\u53e3\n

                                                  \u4ee5\u4e0b\u662f\u5b8c\u6574\u793a\u4f8b\uff1a

                                                  ---\napiVersion: v1\nkind: Service\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  type: NodePort\n  selector:\n    #app: my-deployment-with-aotu-instrumentation-app\n    app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  ports:\n    - name: http\n      port: 8080\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  selector:\n    matchLabels:\n      #app: my-deployment-with-aotu-instrumentation-app\n      app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\" # \u662f\u5426\u91c7\u96c6\n        insight.opentelemetry.io/metric-path: \"/actuator/prometheus\"      # \u91c7\u96c6\u6307\u6807\u7684\u8def\u5f84\n        insight.opentelemetry.io/metric-port: \"8080\"   # \u91c7\u96c6\u6307\u6807\u7684\u7aef\u53e3\n    spec:\n      containers:\n        - name: myapp\n          image: docker.m.daocloud.io/wutang/spring-boot-actuator-prometheus-metrics-demo\n          ports:\n            - name: http\n              containerPort: 8080\n          resources:\n            limits:\n              cpu: 500m\n              memory: 800Mi\n            requests:\n              cpu: 200m\n              memory: 400Mi\n

                                                  \u4ee5\u4e0a\u793a\u4f8b\u4e2d\uff0cInsight \u4f1a\u901a\u8fc7 :8080//actuator/prometheus \u6293\u53d6\u901a\u8fc7 Spring Boot Actuator \u66b4\u9732\u51fa\u6765\u7684 Prometheus \u6307\u6807\u3002

                                                  "},{"location":"admin/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html","title":"\u4f7f\u7528 OpenTelemetry Java Agent \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807","text":"

                                                  \u5728 Opentelemetry Agent v1.20.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u4e2d\uff0cOpentelemetry Agent \u65b0\u589e\u4e86 JMX Metric Insight \u6a21\u5757\uff0c\u5982\u679c\u4f60\u7684\u5e94\u7528\u5df2\u7ecf\u96c6\u6210\u4e86 Opentelemetry Agent \u53bb\u91c7\u96c6\u5e94\u7528\u94fe\u8def\uff0c\u90a3\u4e48\u4f60\u4e0d\u518d\u9700\u8981\u53e6\u5916\u5f15\u5165\u5176\u4ed6 Agent \u53bb\u4e3a\u6211\u4eec\u7684\u5e94\u7528\u66b4\u9732 JMX \u6307\u6807\u3002Opentelemetry Agent \u4e5f\u662f\u901a\u8fc7\u68c0\u6d4b\u5e94\u7528\u7a0b\u5e8f\u4e2d\u672c\u5730\u53ef\u7528\u7684 MBean \u516c\u5f00\u7684\u6307\u6807\uff0c\u5bf9\u5176\u8fdb\u884c\u6536\u96c6\u5e76\u66b4\u9732\u6307\u6807\u3002

                                                  Opentelemetry Agent \u4e5f\u9488\u5bf9\u5e38\u89c1\u7684 Java Server \u6216\u6846\u67b6\u5185\u7f6e\u4e86\u4e00\u4e9b\u76d1\u63a7\u7684\u6837\u4f8b\uff0c\u8bf7\u53c2\u8003\u9884\u5b9a\u4e49\u7684\u6307\u6807\u3002

                                                  \u4f7f\u7528 OpenTelemetry Java Agent \u540c\u6837\u9700\u8981\u8003\u8651\u5982\u4f55\u5c06 JAR \u6302\u8f7d\u8fdb\u5bb9\u5668\uff0c\u9664\u4e86\u53ef\u4ee5\u53c2\u8003\u4e0a\u9762 JMX Exporter \u6302\u8f7d JAR \u6587\u4ef6\u7684\u65b9\u5f0f\u5916\uff0c\u6211\u4eec\u8fd8\u53ef\u4ee5\u501f\u52a9 Opentelemetry \u63d0\u4f9b\u7684 Operator \u7684\u80fd\u529b\u6765\u5b9e\u73b0\u81ea\u52a8\u4e3a\u6211\u4eec\u7684\u5e94\u7528\u5f00\u542f JVM \u6307\u6807\u66b4\u9732\uff1a

                                                  \u5982\u679c\u4f60\u7684\u5e94\u7528\u5df2\u7ecf\u96c6\u6210\u4e86 Opentelemetry Agent \u53bb\u91c7\u96c6\u5e94\u7528\u94fe\u8def\uff0c\u90a3\u4e48\u4f60\u4e0d\u518d\u9700\u8981\u53e6\u5916\u5f15\u5165\u5176\u4ed6 Agent \u53bb\u4e3a\u6211\u4eec\u7684\u5e94\u7528\u66b4\u9732 JMX \u6307\u6807\u3002Opentelemetry Agent \u901a\u8fc7\u68c0\u6d4b\u5e94\u7528\u7a0b\u5e8f\u4e2d\u672c\u5730\u53ef\u7528\u7684 MBean \u516c\u5f00\u7684\u6307\u6807\uff0c\u73b0\u5728\u53ef\u4ee5\u672c\u5730\u6536\u96c6\u5e76\u66b4\u9732\u6307\u6807\u63a5\u53e3\u3002

                                                  \u4f46\u662f\uff0c\u622a\u81f3\u76ee\u524d\u7248\u672c\uff0c\u4f60\u4ecd\u7136\u9700\u8981\u624b\u52a8\u4e3a\u5e94\u7528\u52a0\u4e0a\u76f8\u5e94\u6ce8\u89e3\u4e4b\u540e\uff0cJVM \u6570\u636e\u624d\u4f1a\u88ab Insight \u91c7\u96c6\u5230\uff0c\u5177\u4f53\u6ce8\u89e3\u5185\u5bb9\u8bf7\u53c2\u8003 \u5df2\u6709 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5bf9\u63a5\u53ef\u89c2\u6d4b\u6027\u3002

                                                  "},{"location":"admin/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html#java","title":"\u4e3a Java \u4e2d\u95f4\u4ef6\u66b4\u9732\u6307\u6807","text":"

                                                  Opentelemetry Agent \u4e5f\u5185\u7f6e\u4e86\u4e00\u4e9b\u4e2d\u95f4\u4ef6\u76d1\u63a7\u7684\u6837\u4f8b\uff0c\u8bf7\u53c2\u8003 \u9884\u5b9a\u4e49\u6307\u6807\u3002

                                                  \u9ed8\u8ba4\u6ca1\u6709\u6307\u5b9a\u4efb\u4f55\u7c7b\u578b\uff0c\u9700\u8981\u901a\u8fc7 -Dotel.jmx.target.system JVM Options \u6307\u5b9a,\u6bd4\u5982 -Dotel.jmx.target.system=jetty,kafka-broker \u3002

                                                  "},{"location":"admin/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html#_1","title":"\u53c2\u8003","text":"
                                                  • Gaining JMX Metric Insights with the OpenTelemetry Java Agent

                                                  • Otel jmx metrics

                                                  "},{"location":"admin/insight/quickstart/other/install-agent-on-ocp.html","title":"OpenShift \u5b89\u88c5 Insight Agent","text":"

                                                  \u867d\u7136 OpenShift \u7cfb\u7edf\u81ea\u5e26\u4e86\u4e00\u5957\u76d1\u63a7\u7cfb\u7edf\uff0c\u56e0\u4e3a\u6570\u636e\u91c7\u96c6\u7ea6\u5b9a\u7684\u4e00\u4e9b\u89c4\u5219\uff0c\u6211\u4eec\u8fd8\u662f\u4f1a\u5b89\u88c5 Insight Agent\u3002

                                                  \u5176\u4e2d\uff0c\u5b89\u9664\u4e86\u57fa\u7840\u7684\u5b89\u88c5\u914d\u7f6e\u4e4b\u5916\uff0chelm install \u7684\u65f6\u5019\u8fd8\u9700\u8981\u589e\u52a0\u5982\u4e0b\u7684\u53c2\u6570\uff1a

                                                  ## \u9488\u5bf9 fluentbit \u76f8\u5173\u7684\u53c2\u6570\uff1b\n--set fluent-bit.ocp.enabled=true \\\n--set fluent-bit.serviceAccount.create=false \\\n--set fluent-bit.securityContext.runAsUser=0 \\\n--set fluent-bit.securityContext.seLinuxOptions.type=spc_t \\\n--set fluent-bit.securityContext.readOnlyRootFilesystem=false \\\n--set fluent-bit.securityContext.allowPrivilegeEscalation=false \\\n\n## \u542f\u7528\u9002\u914d OpenShift4.x \u7684 Prometheus(CR)\n--set compatibility.openshift.prometheus.enabled=true \\\n\n## \u5173\u95ed\u9ad8\u7248\u672c\u7684 Prometheus \u5b9e\u4f8b\n--set kube-prometheus-stack.prometheus.enabled=false \\\n--set kube-prometheus-stack.kubeApiServer.enabled=false \\\n--set kube-prometheus-stack.kubelet.enabled=false \\\n--set kube-prometheus-stack.kubeControllerManager.enabled=false \\\n--set kube-prometheus-stack.coreDns.enabled=false \\\n--set kube-prometheus-stack.kubeDns.enabled=false \\\n--set kube-prometheus-stack.kubeEtcd.enabled=false \\\n--set kube-prometheus-stack.kubeEtcd.enabled=false \\\n--set kube-prometheus-stack.kubeScheduler.enabled=false \\\n--set kube-prometheus-stack.kubeStateMetrics.enabled=false \\\n--set kube-prometheus-stack.nodeExporter.enabled=false \\\n\n## \u9650\u5236 PrometheusOperator \u5904\u7406\u7684 namespace\uff0c\u907f\u514d\u4e0e OpenShift \u81ea\u5e26\u7684 PrometheusOperator \u76f8\u4e92\u7ade\u4e89\n--set kube-prometheus-stack.prometheusOperator.kubeletService.namespace=\"insight-system\" \\\n--set kube-prometheus-stack.prometheusOperator.prometheusInstanceNamespaces=\"insight-system\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[0]=\"openshift-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[1]=\"openshift-user-workload-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[2]=\"openshift-customer-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[3]=\"openshift-route-monitor-operator\" \\\n
                                                  "},{"location":"admin/insight/quickstart/other/install-agent-on-ocp.html#openshift-prometheus","title":"\u901a\u8fc7 OpenShift \u81ea\u8eab\u673a\u5236\uff0c\u5c06\u7cfb\u7edf\u76d1\u63a7\u6570\u636e\u5199\u5165 Prometheus \u4e2d","text":"
                                                  apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: cluster-monitoring-config\n  namespace: openshift-monitoring\ndata:\n  config.yaml: |\n    prometheusK8s:\n      remoteWrite:\n        - queueConfig:\n            batchSendDeadline: 60s\n            maxBackoff: 5s\n            minBackoff: 30ms\n            minShards: 1\n            capacity: 5000\n            maxSamplesPerSend: 1000\n            maxShards: 100\n          remoteTimeout: 30s\n          url: http://insight-agent-prometheus.insight-system.svc.cluster.local:9090/api/v1/write\n          writeRelabelConfigs:\n            - action: keep\n              regex: etcd|kubelet|node-exporter|apiserver|kube-state-metrics\n              sourceLabels:\n                - job\n
                                                  "},{"location":"admin/insight/quickstart/res-plan/index.html","title":"\u90e8\u7f72\u5bb9\u91cf\u89c4\u5212","text":"

                                                  \u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u4e3a\u4e86\u907f\u514d\u6d88\u8017\u8fc7\u591a\u8d44\u6e90\uff0c\u5df2\u7ecf\u8bbe\u7f6e\u4e86\u8d44\u6e90\u4e0a\u7ebf\uff08resource limit\uff09\uff0c\u53ef\u89c2\u6d4b\u7cfb\u7edf\u9700\u8981\u5904\u7406\u5927\u91cf\u7684\u6570\u636e\uff0c\u5982\u679c\u5bb9\u91cf\u89c4\u5212\u4e0d\u5408\u7406\uff0c\u53ef\u80fd\u4f1a\u5bfc\u81f4\u7cfb\u7edf\u8d1f\u8f7d\u8fc7\u9ad8\uff0c\u5f71\u54cd\u7a33\u5b9a\u6027\u548c\u53ef\u9760\u6027\u3002

                                                  "},{"location":"admin/insight/quickstart/res-plan/index.html#_2","title":"\u89c2\u6d4b\u7ec4\u4ef6\u7684\u8d44\u6e90\u89c4\u5212","text":"

                                                  \u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u5305\u542b Insight \u548c Insight Agent\u3002\u5176\u4e2d\uff0cInsight \u4e3b\u8981\u8d1f\u8d23\u89c2\u6d4b\u6570\u636e\u7684\u5b58\u50a8\uff0c\u5206\u6790\u4e0e\u5c55\u793a\u3002\u800c Insight Agent \u5305\u542b\u4e86\u6570\u636e\u91c7\u96c6\u3001\u6570\u636e\u5904\u7406\u3001\u6570\u636e\u4e0a\u4f20\u7b49\u529f\u80fd\u3002

                                                  "},{"location":"admin/insight/quickstart/res-plan/index.html#_3","title":"\u5b58\u50a8\u7ec4\u4ef6\u7684\u5bb9\u91cf\u89c4\u5212","text":"

                                                  Insight \u7684\u5b58\u50a8\u7ec4\u4ef6\u4e3b\u8981\u5305\u62ec ElasticSearch \u548c VictoriaMetrics. \u5176\u4e2d\uff0cElasticSearch \u4e3b\u8981\u8d1f\u8d23\u5b58\u50a8\u548c\u67e5\u8be2\u65e5\u5fd7\u4e0e\u94fe\u8def\u6570\u636e\uff0cVictoriaMetrics \u4e3b\u8981\u8d1f\u8d23\u5b58\u50a8\u548c\u67e5\u8be2\u6307\u6807\u6570\u636e\u3002

                                                  • VictoriaMetircs: \u5176\u78c1\u76d8\u7528\u91cf\u4e0e\u5b58\u50a8\u7684\u6307\u6807\u6709\u5173\uff0c\u6839\u636e vmstorage \u7684\u78c1\u76d8\u89c4\u5212 \u9884\u4f30\u5bb9\u91cf\u540e \u8c03\u6574 vmstorage \u78c1\u76d8\u3002
                                                  "},{"location":"admin/insight/quickstart/res-plan/index.html#_4","title":"\u91c7\u96c6\u5668\u7684\u8d44\u6e90\u89c4\u5212","text":"

                                                  Insight Agent \u7684\u91c7\u96c6\u5668\u4e2d\u5305\u542b Proemtheus\uff0c\u867d\u7136 Prometheus \u672c\u8eab\u662f\u4e00\u4e2a\u72ec\u7acb\u7684\u7ec4\u4ef6\uff0c\u4f46\u662f\u5728 Insight Agent \u4e2d\uff0cPrometheus \u4f1a\u88ab\u7528\u4e8e\u91c7\u96c6\u6570\u636e\uff0c\u56e0\u6b64\u9700\u8981\u5bf9 Prometheus \u7684\u8d44\u6e90\u8fdb\u884c\u89c4\u5212\u3002

                                                  • Prometheus\uff1a\u5176\u8d44\u6e90\u7528\u91cf\u4e0e\u91c7\u96c6\u7684\u6307\u6807\u91cf\u6709\u5173\uff0c\u53ef\u4ee5\u53c2\u8003 Prometheus \u8d44\u6e90\u89c4\u5212 \u8fdb\u884c\u8c03\u6574\u3002
                                                  "},{"location":"admin/insight/quickstart/res-plan/modify-vms-disk.html","title":"vmstorge \u78c1\u76d8\u6269\u5bb9","text":"

                                                  \u672c\u6587\u63cf\u8ff0\u4e86 vmstorge \u78c1\u76d8\u6269\u5bb9\u7684\u65b9\u6cd5\uff0c vmstorge \u78c1\u76d8\u89c4\u8303\u8bf7\u53c2\u8003 vmstorage \u78c1\u76d8\u5bb9\u91cf\u89c4\u5212\u3002

                                                  "},{"location":"admin/insight/quickstart/res-plan/modify-vms-disk.html#_1","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"admin/insight/quickstart/res-plan/modify-vms-disk.html#_2","title":"\u5f00\u542f\u5b58\u50a8\u6c60\u6269\u5bb9","text":"
                                                  1. \u4ee5\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7ba1\u7406\u5458\u6743\u9650\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\uff0c\u70b9\u51fb \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb kpanda-global-cluster \u96c6\u7fa4\u3002

                                                  2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e(PVC) \uff0c\u627e\u5230 vmstorage \u7ed1\u5b9a\u7684\u6570\u636e\u5377\u58f0\u660e\u3002

                                                  3. \u70b9\u51fb\u67d0\u4e2a vmstorage PVC\uff0c\u8fdb\u5165 vmstorage \u7684\u6570\u636e\u5377\u58f0\u660e\u8be6\u60c5\uff0c\u786e\u8ba4\u8be5 PVC \u7ed1\u5b9a\u7684\u5b58\u50a8\u6c60\u3002

                                                  4. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5bb9\u5668\u5b58\u50a8 -> \u5b58\u50a8\u6c60(SC) \uff0c\u627e\u5230 local-path \uff0c\u70b9\u51fb\u76ee\u6807\u53f3\u4fa7\u7684 \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u7f16\u8f91 \u3002

                                                  5. \u5f00\u542f \u6269\u5bb9 \u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                  "},{"location":"admin/insight/quickstart/res-plan/modify-vms-disk.html#vmstorage","title":"\u66f4\u6539 vmstorage \u7684\u78c1\u76d8\u5bb9\u91cf","text":"
                                                  1. \u4ee5\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7ba1\u7406\u5458\u6743\u9650\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\uff0c\u8fdb\u5165 kpanda-global-cluster \u96c6\u7fa4\u8be6\u60c5\u3002

                                                  2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u627e\u5230 vmcluster \u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\u3002

                                                  3. \u70b9\u51fb\u8be5 vmcluster \u81ea\u5b9a\u4e49\u8d44\u6e90\u8fdb\u5165\u8be6\u60c5\u9875\uff0c\u5207\u6362\u5230 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\uff0c\u4ece insight-victoria-metrics-k8s-stack \u53f3\u4fa7\u83dc\u5355\u9009\u62e9 \u7f16\u8f91 YAML \u3002

                                                  4. \u6839\u636e\u56fe\u4f8b\u4fee\u6539\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                  5. \u518d\u6b21\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e(PVC) \uff0c\u627e\u5230 vmstorage \u7ed1\u5b9a\u7684\u6570\u636e\u5377\u58f0\u660e\u786e\u8ba4\u4fee\u6539\u5df2\u751f\u6548\u3002\u5728\u67d0\u4e2a PVC \u8be6\u60c5\u9875\uff0c\u70b9\u51fb\u5173\u8054\u5b58\u50a8\u6e90 (PV)\u3002

                                                  6. \u6253\u5f00\u6570\u636e\u5377\u8be6\u60c5\u9875\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u66f4\u65b0 \u6309\u94ae\u3002

                                                  7. \u4fee\u6539 \u5bb9\u91cf \u540e\u70b9\u51fb \u786e\u5b9a \uff0c\u7a0d\u7b49\u7247\u523b\u7b49\u5230\u6269\u5bb9\u6210\u529f\u3002

                                                  "},{"location":"admin/insight/quickstart/res-plan/modify-vms-disk.html#_3","title":"\u514b\u9686\u5b58\u50a8\u5377","text":"

                                                  \u82e5\u5b58\u50a8\u5377\u6269\u5bb9\u5931\u8d25\uff0c\u53ef\u53c2\u8003\u4ee5\u4e0b\u65b9\u6cd5\u514b\u9686\u5b58\u50a8\u5377\u3002

                                                  1. \u4ee5\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7ba1\u7406\u5458\u6743\u9650\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\uff0c\u8fdb\u5165 kpanda-global-cluster \u96c6\u7fa4\u8be6\u60c5\u3002

                                                  2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5de5\u4f5c\u8d1f\u8f7d -> \u6709\u72b6\u6001\u8d1f\u8f7d \uff0c\u627e\u5230 vmstorage \u7684\u6709\u72b6\u6001\u8d1f\u8f7d\uff0c\u70b9\u51fb\u76ee\u6807\u53f3\u4fa7\u7684 \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u72b6\u6001 -> \u505c\u6b62 -> \u786e\u5b9a \u3002

                                                  3. \u5728\u547d\u4ee4\u884c\u4e2d\u767b\u5f55 kpanda-global-cluster \u96c6\u7fa4\u7684 master \u8282\u70b9\u540e\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u590d\u5236 vmstorage \u5bb9\u5668\u4e2d\u7684 vm-data \u76ee\u5f55\u5c06\u6307\u6807\u4fe1\u606f\u5b58\u50a8\u5728\u672c\u5730\uff1a

                                                    kubectl cp -n insight-system vmstorage-insight-victoria-metrics-k8s-stack-1:vm-data ./vm-data\n
                                                  4. \u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\u8fdb\u5165 kpanda-global-cluster \u96c6\u7fa4\u8be6\u60c5\uff0c\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377(PV) \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u514b\u9686 \uff0c\u5e76\u4fee\u6539\u6570\u636e\u5377\u7684\u5bb9\u91cf\u3002

                                                  5. \u5220\u9664\u4e4b\u524d vmstorage \u7684\u6570\u636e\u5377\u3002

                                                  6. \u7a0d\u7b49\u7247\u523b\uff0c\u5f85\u5b58\u50a8\u5377\u58f0\u660e\u8ddf\u514b\u9686\u7684\u6570\u636e\u5377\u7ed1\u5b9a\u540e\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u5c06\u7b2c 3 \u6b65\u4e2d\u5bfc\u51fa\u7684\u6570\u636e\u5bfc\u5165\u5230\u5bf9\u5e94\u7684\u5bb9\u5668\u4e2d\uff0c\u7136\u540e\u5f00\u542f\u4e4b\u524d\u6682\u505c\u7684 vmstorage \u3002

                                                    kubectl cp -n insight-system ./vm-data vmstorage-insight-victoria-metrics-k8s-stack-1:vm-data\n
                                                  "},{"location":"admin/insight/quickstart/res-plan/prometheus-res.html","title":"Prometheus \u8d44\u6e90\u89c4\u5212","text":"

                                                  Prometheus \u5728\u5b9e\u9645\u4f7f\u7528\u8fc7\u7a0b\u4e2d\uff0c\u53d7\u5230\u96c6\u7fa4\u5bb9\u5668\u6570\u91cf\u4ee5\u53ca\u5f00\u542f Istio \u7684\u5f71\u54cd\uff0c\u4f1a\u5bfc\u81f4 Prometheus \u7684 CPU\u3001\u5185\u5b58\u7b49\u8d44\u6e90\u4f7f\u7528\u91cf\u8d85\u51fa\u8bbe\u5b9a\u7684\u8d44\u6e90\u3002

                                                  \u4e3a\u4e86\u4fdd\u8bc1\u4e0d\u540c\u89c4\u6a21\u96c6\u7fa4\u4e0b Prometheus \u7684\u6b63\u5e38\u8fd0\u884c\uff0c\u9700\u8981\u6839\u636e\u96c6\u7fa4\u7684\u5b9e\u9645\u89c4\u6a21\u5bf9 Prometheus \u8fdb\u884c\u8d44\u6e90\u8c03\u6574\u3002

                                                  "},{"location":"admin/insight/quickstart/res-plan/prometheus-res.html#_1","title":"\u53c2\u8003\u8d44\u6e90\u89c4\u5212","text":"

                                                  \u5728\u672a\u5f00\u542f\u7f51\u683c\u60c5\u51b5\u4e0b\uff0c\u6d4b\u8bd5\u60c5\u51b5\u7edf\u8ba1\u51fa\u7cfb\u7edf Job \u6307\u6807\u91cf\u4e0e Pod \u7684\u5173\u7cfb\u4e3a Series \u6570\u91cf = 800 * Pod \u6570\u91cf

                                                  \u5728\u5f00\u542f\u670d\u52a1\u7f51\u683c\u65f6\uff0c\u5f00\u542f\u529f\u80fd\u540e Pod \u4ea7\u751f\u7684 Istio \u76f8\u5173\u6307\u6807\u6570\u91cf\u7ea7\u4e3a Series \u6570\u91cf = 768 * Pod \u6570\u91cf

                                                  "},{"location":"admin/insight/quickstart/res-plan/prometheus-res.html#_2","title":"\u5f53\u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c\u65f6","text":"

                                                  \u4ee5\u4e0b\u8d44\u6e90\u89c4\u5212\u4e3a \u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c \u573a\u666f\u4e0b\uff0cPrometheus \u7684\u8d44\u6e90\u89c4\u5212\u63a8\u8350\uff1a

                                                  \u96c6\u7fa4\u89c4\u6a21(Pod \u6570) \u6307\u6807\u91cf(\u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c) CPU(core) \u5185\u5b58(GB) 100 8w Request: 0.5Limit\uff1a1 Request\uff1a2GBLimit\uff1a4GB 200 16w Request\uff1a1Limit\uff1a1.5 Request\uff1a3GBLimit\uff1a6GB 300 24w Request\uff1a1Limit\uff1a2 Request\uff1a3GBLimit\uff1a6GB 400 32w Request\uff1a1Limit\uff1a2 Request\uff1a4GBLimit\uff1a8GB 500 40w Request\uff1a1.5Limit\uff1a3 Request\uff1a5GBLimit\uff1a10GB 800 64w Request\uff1a2Limit\uff1a4 Request\uff1a8GBLimit\uff1a16GB 1000 80w Request\uff1a2.5Limit\uff1a5 Request\uff1a9GBLimit\uff1a18GB 2000 160w Request\uff1a3.5Limit\uff1a7 Request\uff1a20GBLimit\uff1a40GB 3000 240w Request\uff1a4Limit\uff1a8 Request\uff1a33GBLimit\uff1a66GB"},{"location":"admin/insight/quickstart/res-plan/prometheus-res.html#_3","title":"\u5f53\u5f00\u542f\u670d\u52a1\u7f51\u683c\u529f\u80fd\u65f6","text":"

                                                  \u4ee5\u4e0b\u8d44\u6e90\u89c4\u5212\u4e3a \u5f00\u542f\u670d\u52a1\u7f51\u683c \u573a\u666f\u4e0b\uff0cPrometheus \u7684\u8d44\u6e90\u89c4\u5212\u63a8\u8350\uff1a

                                                  \u96c6\u7fa4\u89c4\u6a21(Pod \u6570) \u6307\u6807\u91cf(\u5df2\u5f00\u542f\u670d\u52a1\u7f51\u683c) CPU(core) \u5185\u5b58(GB) 100 15w Request: 1Limit\uff1a2 Request\uff1a3GBLimit\uff1a6GB 200 31w Request\uff1a2Limit\uff1a3 Request\uff1a5GBLimit\uff1a10GB 300 46w Request\uff1a2Limit\uff1a4 Request\uff1a6GBLimit\uff1a12GB 400 62w Request\uff1a2Limit\uff1a4 Request\uff1a8GBLimit\uff1a16GB 500 78w Request\uff1a3Limit\uff1a6 Request\uff1a10GBLimit\uff1a20GB 800 125w Request\uff1a4Limit\uff1a8 Request\uff1a15GBLimit\uff1a30GB 1000 156w Request\uff1a5Limit\uff1a10 Request\uff1a18GBLimit\uff1a36GB 2000 312w Request\uff1a7Limit\uff1a14 Request\uff1a40GBLimit\uff1a80GB 3000 468w Request\uff1a8Limit\uff1a16 Request\uff1a65GBLimit\uff1a130GB

                                                  Note

                                                  1. \u8868\u683c\u4e2d\u7684 Pod \u6570\u91cf \u6307\u96c6\u7fa4\u4e2d\u57fa\u672c\u7a33\u5b9a\u8fd0\u884c\u7684 Pod \u6570\u91cf\uff0c\u5982\u51fa\u73b0\u5927\u91cf\u7684 Pod \u91cd\u542f\uff0c\u5219\u4f1a\u9020\u6210\u77ed\u65f6\u95f4\u5185\u6307\u6807\u91cf\u7684\u9661\u589e\uff0c\u6b64\u65f6\u8d44\u6e90\u9700\u8981\u8fdb\u884c\u76f8\u5e94\u4e0a\u8c03\u3002
                                                  2. Prometheus \u5185\u5b58\u4e2d\u9ed8\u8ba4\u4fdd\u5b58\u4e24\u5c0f\u65f6\u6570\u636e\uff0c\u4e14\u96c6\u7fa4\u4e2d\u5f00\u542f\u4e86 Remote Write \u529f\u80fd\u65f6\uff0c\u4f1a\u5360\u7528\u4e00\u5b9a\u5185\u5b58\uff0c\u8d44\u6e90\u8d85\u914d\u6bd4\u5efa\u8bae\u914d\u7f6e\u4e3a 2\u3002
                                                  3. \u8868\u683c\u4e2d\u6570\u636e\u4e3a\u63a8\u8350\u503c\uff0c\u9002\u7528\u4e8e\u901a\u7528\u60c5\u51b5\u3002\u5982\u73af\u5883\u6709\u7cbe\u786e\u7684\u8d44\u6e90\u8981\u6c42\uff0c\u5efa\u8bae\u5728\u96c6\u7fa4\u8fd0\u884c\u4e00\u6bb5\u65f6\u95f4\u540e\uff0c\u67e5\u770b\u5bf9\u5e94 Prometheus \u7684\u8d44\u6e90\u5360\u7528\u91cf\u8fdb\u884c\u7cbe\u786e\u914d\u7f6e\u3002
                                                  "},{"location":"admin/insight/quickstart/res-plan/vms-res-plan.html","title":"vmstorage \u78c1\u76d8\u5bb9\u91cf\u89c4\u5212","text":"

                                                  vmstorage \u662f\u8d1f\u8d23\u5b58\u50a8\u53ef\u89c2\u6d4b\u6027\u591a\u96c6\u7fa4\u6307\u6807\u3002 \u4e3a\u4fdd\u8bc1 vmstorage \u7684\u7a33\u5b9a\u6027\uff0c\u9700\u8981\u6839\u636e\u96c6\u7fa4\u6570\u91cf\u53ca\u96c6\u7fa4\u89c4\u6a21\u8c03\u6574 vmstorage \u7684\u78c1\u76d8\u5bb9\u91cf\u3002 \u66f4\u591a\u8d44\u6599\u8bf7\u53c2\u8003\uff1avmstorage \u4fdd\u7559\u671f\u4e0e\u78c1\u76d8\u7a7a\u95f4\u3002

                                                  "},{"location":"admin/insight/quickstart/res-plan/vms-res-plan.html#_1","title":"\u6d4b\u8bd5\u7ed3\u679c","text":"

                                                  \u7ecf\u8fc7 14 \u5929\u5bf9\u4e0d\u540c\u89c4\u6a21\u7684\u96c6\u7fa4\u7684 vmstorage \u7684\u78c1\u76d8\u89c2\u6d4b\uff0c \u6211\u4eec\u53d1\u73b0 vmstorage \u7684\u78c1\u76d8\u7528\u91cf\u4e0e\u5176\u5b58\u50a8\u7684\u6307\u6807\u91cf\u548c\u5355\u4e2a\u6570\u636e\u70b9\u5360\u7528\u78c1\u76d8\u6b63\u76f8\u5173\u3002

                                                  1. \u77ac\u65f6\u5b58\u50a8\u7684\u6307\u6807\u91cf increase(vm_rows{ type != \"indexdb\"}[30s]) \u4ee5\u83b7\u53d6 30s \u5185\u589e\u52a0\u7684\u6307\u6807\u91cf
                                                  2. \u5355\u4e2a\u6570\u636e\u70b9 (datapoint) \u7684\u5360\u7528\u78c1\u76d8\uff1a sum(vm_data_size_bytes{type!=\"indexdb\"}) /\u00a0sum(vm_rows{type\u00a0!=\u00a0\"indexdb\"})
                                                  "},{"location":"admin/insight/quickstart/res-plan/vms-res-plan.html#_2","title":"\u8ba1\u7b97\u65b9\u6cd5","text":"

                                                  \u78c1\u76d8\u7528\u91cf = \u77ac\u65f6\u6307\u6807\u91cf x 2 x \u5355\u4e2a\u6570\u636e\u70b9\u7684\u5360\u7528\u78c1\u76d8 x 60 x 24 x \u5b58\u50a8\u65f6\u95f4 (\u5929)

                                                  \u53c2\u6570\u8bf4\u660e\uff1a

                                                  1. \u78c1\u76d8\u7528\u91cf\u5355\u4f4d\u4e3a Byte \u3002
                                                  2. \u5b58\u50a8\u65f6\u957f(\u5929) x 60 x 24 \u5c06\u65f6\u95f4(\u5929)\u6362\u7b97\u6210\u5206\u949f\u4ee5\u4fbf\u8ba1\u7b97\u78c1\u76d8\u7528\u91cf\u3002
                                                  3. Insight Agent \u4e2d Prometheus \u9ed8\u8ba4\u91c7\u96c6\u65f6\u95f4\u4e3a 30s \uff0c\u6545\u5728 1 \u5206\u949f\u5185\u4ea7\u751f\u4e24\u500d\u7684\u6307\u6807\u91cf\u3002
                                                  4. vmstorage \u4e2d\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 1 \u4e2a\u6708\uff0c\u4fee\u6539\u914d\u7f6e\u8bf7\u53c2\u8003\u4fee\u6539\u7cfb\u7edf\u914d\u7f6e\u3002

                                                  Warning

                                                  \u8be5\u516c\u5f0f\u4e3a\u901a\u7528\u65b9\u6848\uff0c\u5efa\u8bae\u5728\u8ba1\u7b97\u7ed3\u679c\u4e0a\u9884\u7559\u5197\u4f59\u78c1\u76d8\u5bb9\u91cf\u4ee5\u4fdd\u8bc1 vmstorage \u7684\u6b63\u5e38\u8fd0\u884c\u3002

                                                  "},{"location":"admin/insight/quickstart/res-plan/vms-res-plan.html#_3","title":"\u53c2\u8003\u5bb9\u91cf","text":"

                                                  \u8868\u683c\u4e2d\u6570\u636e\u662f\u6839\u636e\u9ed8\u8ba4\u5b58\u50a8\u65f6\u95f4\u4e3a\u4e00\u4e2a\u6708 (30 \u5929)\uff0c\u5355\u4e2a\u6570\u636e\u70b9 (datapoint) \u7684\u5360\u7528\u78c1\u76d8\u53d6 0.9 \u8ba1\u7b97\u6240\u5f97\u7ed3\u679c\u3002 \u591a\u96c6\u7fa4\u573a\u666f\u4e0b\uff0cPod \u6570\u91cf\u8868\u793a\u591a\u96c6\u7fa4 Pod \u6570\u91cf\u7684\u603b\u548c\u3002

                                                  "},{"location":"admin/insight/quickstart/res-plan/vms-res-plan.html#_4","title":"\u5f53\u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c\u65f6","text":"\u96c6\u7fa4\u89c4\u6a21 (Pod \u6570) \u6307\u6807\u91cf \u78c1\u76d8\u5bb9\u91cf 100 8w 6 GiB 200 16w 12 GiB 300 24w 18 GiB 400 32w 24 GiB 500 40w 30 GiB 800 64w 48 GiB 1000 80w 60 GiB 2000 160w 120 GiB 3000 240w 180 GiB"},{"location":"admin/insight/quickstart/res-plan/vms-res-plan.html#_5","title":"\u5f53\u5f00\u542f\u670d\u52a1\u7f51\u683c\u65f6","text":"\u96c6\u7fa4\u89c4\u6a21 (Pod \u6570) \u6307\u6807\u91cf \u78c1\u76d8\u5bb9\u91cf 100 15w 12 GiB 200 31w 24 GiB 300 46w 36 GiB 400 62w 48 GiB 500 78w 60 GiB 800 125w 94 GiB 1000 156w 120 GiB 2000 312w 235 GiB 3000 468w 350 GiB"},{"location":"admin/insight/quickstart/res-plan/vms-res-plan.html#_6","title":"\u4e3e\u4f8b\u8bf4\u660e","text":"

                                                  AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\u4e2d\u6709\u4e24\u4e2a\u96c6\u7fa4\uff0c\u5176\u4e2d\u5168\u5c40\u670d\u52a1\u96c6\u7fa4(\u5f00\u542f\u670d\u52a1\u7f51\u683c)\u4e2d\u8fd0\u884c 500 \u4e2a Pod\uff0c\u5de5\u4f5c\u96c6\u7fa4(\u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c)\u8fd0\u884c\u4e86 1000 \u4e2a Pod\uff0c\u9884\u671f\u6307\u6807\u5b58 30 \u5929\u3002

                                                  • \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u6307\u6807\u91cf\u4e3a 800x500 + 768x500 = 784000
                                                  • \u5de5\u4f5c\u96c6\u7fa4\u6307\u6807\u91cf\u4e3a 800x1000 = 800000

                                                  \u5219\u5f53\u524d vmstorage \u78c1\u76d8\u7528\u91cf\u5e94\u8bbe\u7f6e\u4e3a (784000+80000)x2x0.9x60x24x31 = 124384896000 byte = 116 GiB

                                                  Note

                                                  \u96c6\u7fa4\u4e2d\u6307\u6807\u91cf\u4e0e Pod \u6570\u91cf\u7684\u5173\u7cfb\u53ef\u53c2\u8003Prometheus \u8d44\u6e90\u89c4\u5212\u3002

                                                  "},{"location":"admin/insight/reference/alertnotification.html","title":"\u544a\u8b66\u901a\u77e5\u6d41\u7a0b\u8bf4\u660e","text":"

                                                  \u5728\u521b\u5efa\u544a\u8b66\u7b56\u7565\u65f6\uff0c\u53ef\u89c2\u6d4b\u6027 Insight \u652f\u6301\u4e3a\u540c\u7b56\u7565\u4e0b\u4e0d\u540c\u7ea7\u522b\u89e6\u53d1\u7684\u544a\u8b66\u914d\u7f6e\u4e0d\u540c\u7684\u901a\u77e5\u53d1\u9001\u95f4\u9694\uff0c\u4f46\u7531\u4e8e\u5728 Alertmanager \u539f\u751f\u914d\u7f6e\u4e2d\u5b58\u5728 group_interval \u548c repeat_interval \u4e24\u4e2a\u53c2\u6570\uff0c\u4f1a\u5bfc\u81f4\u544a\u8b66\u901a\u77e5\u7684\u5b9e\u9645\u53d1\u9001\u95f4\u9694\u5b58\u5728\u504f\u5dee\u3002

                                                  "},{"location":"admin/insight/reference/alertnotification.html#_2","title":"\u53c2\u6570\u914d\u7f6e","text":"

                                                  \u5728 Alertmanager \u914d\u7f6e\u5982\u4e0b\uff1a

                                                  route:  \n  group_by: [\"rulename\"]\n  group_wait: 30s\n  group_interval: 5m\n  repeat_interval: 1h\n

                                                  \u53c2\u6570\u8bf4\u660e\uff1a

                                                  • group_wait \uff1a\u7528\u4e8e\u8bbe\u7f6e\u544a\u8b66\u901a\u77e5\u7684\u7b49\u5f85\u65f6\u95f4\u3002\u5f53 Alertmanager \u63a5\u6536\u5230\u4e00\u7ec4\u544a\u8b66\u65f6\uff0c\u5982\u679c\u5728 group_wait \u6307\u5b9a\u7684\u65f6\u95f4\u5185\u6ca1\u6709\u66f4\u591a\u7684\u544a\u8b66\uff0c\u5219 Alertmanager \u5c06\u7b49\u5f85\u4e00\u6bb5\u65f6\u95f4\u4ee5\u4fbf\u6536\u96c6\u5230\u66f4\u591a\u76f8\u540c\u6807\u7b7e\u548c\u5185\u5bb9\u7684\u544a\u8b66\uff0c\u5e76\u5c06\u6240\u6709\u7b26\u5408\u6761\u4ef6\u7684\u544a\u8b66\u6dfb\u52a0\u5230\u540c\u4e00\u901a\u77e5\u4e2d\u3002

                                                  • group_interval \uff1a\u7528\u4e8e\u8bbe\u7f6e\u4e00\u7ec4\u544a\u8b66\u5728\u88ab\u5408\u5e76\u6210\u5355\u4e00\u901a\u77e5\u524d\u7b49\u5f85\u7684\u65f6\u95f4\u3002\u5982\u679c\u5728\u8fd9\u4e2a\u65f6\u95f4\u5185\u6ca1\u6709\u6536\u5230\u66f4\u591a\u7684\u6765\u81ea\u540c\u4e00\u7ec4\u7684\u544a\u8b66\uff0c\u5219 Alertmanager \u5c06\u53d1\u9001\u4e00\u4e2a\u5305\u542b\u6240\u6709\u5df2\u63a5\u6536\u544a\u8b66\u7684\u901a\u77e5\u3002

                                                  • repeat_interval \uff1a\u7528\u4e8e\u8bbe\u7f6e\u544a\u8b66\u901a\u77e5\u7684\u91cd\u590d\u53d1\u9001\u95f4\u9694\u3002\u5f53 Alertmanager \u53d1\u9001\u544a\u8b66\u901a\u77e5\u5230\u63a5\u6536\u5668\u540e\uff0c\u5982\u679c\u5728 repeat_interval \u53c2\u6570\u6307\u5b9a\u7684\u65f6\u95f4\u5185\u6301\u7eed\u6536\u5230\u76f8\u540c\u6807\u7b7e\u548c\u5185\u5bb9\u7684\u544a\u8b66\uff0c\u5219 Alertmanager \u5c06\u91cd\u590d\u53d1\u9001\u544a\u8b66\u901a\u77e5\u3002

                                                  \u5f53\u540c\u65f6\u8bbe\u7f6e\u4e86 group_wait \u3001 group_interval \u548c repeat_interval \u53c2\u6570\u65f6\uff0cAlertmanager \u5c06\u6309\u7167\u4ee5\u4e0b\u65b9\u5f0f\u5904\u7406\u540c\u4e00 group \u4e0b\u7684\u544a\u8b66\u901a\u77e5\uff1a

                                                  1. \u5f53 Alertmanager \u63a5\u6536\u5230\u7b26\u5408\u6761\u4ef6\u7684\u544a\u8b66\u65f6\uff0c\u5b83\u5c06\u7b49\u5f85\u81f3\u5c11 group_wait \u53c2\u6570\u4e2d\u6307\u5b9a\u7684\u65f6\u95f4\u4ee5\u4fbf\u6536\u96c6\u66f4\u591a\u76f8\u540c\u6807\u7b7e\u548c\u5185\u5bb9\u7684\u544a\u8b66\uff0c\u5e76\u5c06\u6240\u6709\u7b26\u5408\u6761\u4ef6\u7684\u544a\u8b66\u6dfb\u52a0\u5230\u540c\u4e00\u901a\u77e5\u4e2d\u3002

                                                  2. \u5982\u679c\u5728 group_wait \u65f6\u95f4\u5185\u6ca1\u6709\u63a5\u6536\u5230\u66f4\u591a\u7684\u544a\u8b66\uff0c\u5219\u5728\u8be5\u65f6\u95f4\u4e4b\u540e\uff0cAlertmanager \u4f1a\u5c06\u6240\u6536\u5230\u7684\u6240\u6709\u6b64\u7c7b\u8b66\u62a5\u53d1\u9001\u5230\u63a5\u6536\u5668\u3002\u5982\u679c\u6709\u5176\u4ed6\u7b26\u5408\u6761\u4ef6\u7684\u544a\u8b66\u5728\u6b64\u671f\u95f4\u5185\u5230\u8fbe\uff0c\u5219 Alertmanager \u5c06\u7ee7\u7eed\u7b49\u5f85\uff0c\u76f4\u5230\u6536\u96c6\u5230\u6240\u6709\u544a\u8b66\u6216\u8d85\u65f6\u3002

                                                  3. \u5982\u679c\u5728 group_interval \u53c2\u6570\u4e2d\u6307\u5b9a\u7684\u65f6\u95f4\u5185\u63a5\u6536\u5230\u4e86\u66f4\u591a\u7684\u76f8\u540c\u6807\u7b7e\u548c\u5185\u5bb9\u7684\u544a\u8b66\uff0c\u5219\u8fd9\u4e9b\u65b0\u544a\u8b66\u4e5f\u5c06\u88ab\u6dfb\u52a0\u5230\u5148\u524d\u7684\u901a\u77e5\u4e2d\u5e76\u4e00\u8d77\u53d1\u9001\u3002\u5982\u679c\u5728 group_interval \u65f6\u95f4\u7ed3\u675f\u540e\u4ecd\u7136\u6709\u672a\u53d1\u9001\u7684\u544a\u8b66\uff0cAlertmanager \u5c06\u4f1a\u91cd\u65b0\u5f00\u59cb\u4e00\u4e2a\u65b0\u7684\u8ba1\u65f6\u5468\u671f\uff0c\u5e76\u7b49\u5f85\u66f4\u591a\u7684\u544a\u8b66\uff0c\u76f4\u5230\u518d\u6b21\u8fbe\u5230 group_interval \u65f6\u95f4\u6216\u6536\u5230\u65b0\u7684\u544a\u8b66\u3002

                                                  4. \u5982\u679c\u5728 repeat_interval \u53c2\u6570\u4e2d\u6307\u5b9a\u7684\u65f6\u95f4\u5185\u6301\u7eed\u6536\u5230\u76f8\u540c\u6807\u7b7e\u548c\u5185\u5bb9\u7684\u544a\u8b66\uff0c\u5219 Alertmanager \u5c06\u91cd\u590d\u53d1\u9001\u4e4b\u524d\u5df2\u7ecf\u53d1\u9001\u8fc7\u7684\u8b66\u62a5\u901a\u77e5\u3002\u5728\u91cd\u590d\u53d1\u9001\u8b66\u62a5\u901a\u77e5\u65f6\uff0cAlertmanager \u4e0d\u518d\u7b49\u5f85 group_wait \u6216 group_interval \uff0c\u800c\u662f\u6839\u636e repeat_interval \u6307\u5b9a\u7684\u65f6\u95f4\u95f4\u9694\u8fdb\u884c\u91cd\u590d\u901a\u77e5\u3002

                                                  5. \u5982\u679c\u5728 repeat_interval \u65f6\u95f4\u7ed3\u675f\u540e\u4ecd\u6709\u672a\u53d1\u9001\u7684\u544a\u8b66\uff0c\u5219 Alertmanager \u5c06\u91cd\u65b0\u5f00\u59cb\u4e00\u4e2a\u65b0\u7684\u8ba1\u65f6\u5468\u671f\uff0c\u5e76\u7ee7\u7eed\u7b49\u5f85\u76f8\u540c\u6807\u7b7e\u548c\u5185\u5bb9\u7684\u65b0\u544a\u8b66\u3002\u8fd9\u4e2a\u8fc7\u7a0b\u5c06\u4e00\u76f4\u6301\u7eed\u4e0b\u53bb\uff0c\u76f4\u5230\u6ca1\u6709\u65b0\u544a\u8b66\u4e3a\u6b62\u6216 Alertmanager \u88ab\u505c\u6b62\u3002

                                                  "},{"location":"admin/insight/reference/alertnotification.html#_3","title":"\u4e3e\u4f8b\u8bf4\u660e","text":"

                                                  \u5728\u4e0b\u8ff0\u793a\u4f8b\u4e2d\uff0cAlertmanager \u5c06\u6240\u6709 CPU \u4f7f\u7528\u7387\u9ad8\u4e8e\u9608\u503c\u7684\u544a\u8b66\u5206\u914d\u5230\u4e00\u4e2a\u540d\u4e3a\u201ccritical_alerts\u201d\u7684\u7b56\u7565\u4e2d\u3002

                                                  groups:\n- name: critical_alerts\n  rules:\n  - alert: HighCPUUsage\n    expr: node_cpu_seconds_total{mode=\"idle\"} < 50\n    for: 5m\n    labels:\n      severity: critical\n    annotations:\n      summary: \"High CPU usage detected on instance {{ $labels.instance }}\"\n  group_by: [rulename]\n  group_wait: 30s\n  group_interval: 5m\n  repeat_interval: 1h\n

                                                  \u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff1a

                                                  • \u5f53 Alertmanager \u6536\u5230\u544a\u8b66\u65f6\uff0c\u5b83\u5c06\u7b49\u5f85\u81f3\u5c11 30 \u79d2\u4ee5\u4fbf\u6536\u96c6\u66f4\u591a\u76f8\u540c\u6807\u7b7e\u548c\u5185\u5bb9\u7684\u544a\u8b66\uff0c\u5e76\u5c06\u5b83\u4eec\u6dfb\u52a0\u5230\u540c\u4e00\u901a\u77e5\u4e2d\u3002
                                                  • \u5982\u679c\u5728 5 \u5206\u949f\u5185\u63a5\u6536\u5230\u4e86\u66f4\u591a\u76f8\u540c\u6807\u7b7e\u548c\u5185\u5bb9\u7684\u544a\u8b66\uff0c\u5219\u8fd9\u4e9b\u65b0\u544a\u8b66\u4e5f\u5c06\u88ab\u6dfb\u52a0\u5230\u5148\u524d\u7684\u901a\u77e5\u4e2d\u5e76\u4e00\u8d77\u53d1\u9001\u3002\u5982\u679c\u5728 15\u5206\u949f\u540e\u4ecd\u6709\u672a\u53d1\u9001\u7684\u544a\u8b66\uff0c\u5219 Alertmanager \u5c06\u91cd\u65b0\u5f00\u59cb\u4e00\u4e2a\u65b0\u7684\u8ba1\u65f6\u5468\u671f\uff0c\u5e76\u7b49\u5f85\u66f4\u591a\u7684\u544a\u8b66\uff0c\u76f4\u5230\u518d\u6b21\u8fbe\u5230 5 \u5206\u949f\u6216\u6536\u5230\u65b0\u544a\u8b66\u3002
                                                  • \u5982\u679c\u5728 1 \u5c0f\u65f6\u5185\u6301\u7eed\u6536\u5230\u76f8\u540c\u6807\u7b7e\u548c\u5185\u5bb9\u7684\u544a\u8b66\uff0c\u5219 Alertmanager \u5c06\u91cd\u590d\u53d1\u9001\u4e4b\u524d\u5df2\u7ecf\u53d1\u9001\u8fc7\u7684\u8b66\u62a5\u901a\u77e5\u3002

                                                  "},{"location":"admin/insight/reference/lucene.html","title":"Lucene \u8bed\u6cd5\u4f7f\u7528\u65b9\u6cd5","text":""},{"location":"admin/insight/reference/lucene.html#lucene_1","title":"Lucene \u7b80\u4ecb","text":"

                                                  Lucene \u662f Apache \u8f6f\u4ef6\u57fa\u91d1\u4f1a 4 jakarta \u9879\u76ee\u7ec4\u7684\u4e00\u4e2a\u5b50\u9879\u76ee\uff0c\u662f\u4e00\u4e2a\u5f00\u653e\u6e90\u4ee3\u7801\u7684\u5168\u6587\u68c0\u7d22\u5f15\u64ce\u5de5\u5177\u5305\u3002 Lucene \u7684\u76ee\u7684\u662f\u4e3a\u8f6f\u4ef6\u5f00\u53d1\u4eba\u5458\u63d0\u4f9b\u4e00\u4e2a\u7b80\u5355\u6613\u7528\u7684\u5de5\u5177\u5305\uff0c\u4ee5\u65b9\u4fbf\u7684\u5728\u76ee\u6807\u7cfb\u7edf\u4e2d\u5b9e\u73b0\u5168\u6587\u68c0\u7d22\u7684\u529f\u80fd\u3002

                                                  "},{"location":"admin/insight/reference/lucene.html#lucene_2","title":"Lucene \u8bed\u6cd5","text":"

                                                  Lucene \u7684\u8bed\u6cd5\u641c\u7d22\u683c\u5f0f\u5141\u8bb8\u60a8\u4ee5\u7075\u6d3b\u7684\u65b9\u5f0f\u6784\u5efa\u641c\u7d22\u67e5\u8be2\uff0c\u4ee5\u6ee1\u8db3\u4e0d\u540c\u7684\u641c\u7d22\u9700\u6c42\u3002\u4ee5\u4e0b\u662f Lucene \u7684\u8bed\u6cd5\u641c\u7d22\u683c\u5f0f\u7684\u8be6\u7ec6\u8bf4\u660e\uff1a

                                                  "},{"location":"admin/insight/reference/lucene.html#_1","title":"\u5173\u952e\u5b57\u67e5\u8be2","text":"

                                                  \u8981\u901a\u8fc7 Lucene \u8bed\u6cd5\u5b9e\u73b0\u591a\u4e2a\u5173\u952e\u5b57\u7684\u67e5\u8be2\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528\u5e03\u5c14\u903b\u8f91\u64cd\u4f5c\u7b26\u6765\u7ec4\u5408\u591a\u4e2a\u5173\u952e\u5b57\u3002Lucene \u652f\u6301\u4ee5\u4e0b\u51e0\u79cd\u64cd\u4f5c\u7b26\uff1a

                                                  1. AND \u64cd\u4f5c\u7b26

                                                    • \u4f7f\u7528\u00a0 AND \u00a0\u6216\u00a0 && \u00a0\u6765\u8868\u793a\u903b\u8f91\u4e0e\u5173\u7cfb\u3002
                                                    • \u4f8b\u5982\uff1a term1 AND term2 \u00a0\u6216\u00a0 term1 && term2
                                                  2. OR \u64cd\u4f5c\u7b26

                                                    • \u4f7f\u7528\u00a0 OR \u00a0\u6216\u00a0 || \u00a0\u6765\u8868\u793a\u903b\u8f91\u6216\u5173\u7cfb\u3002
                                                    • \u4f8b\u5982\uff1a term1 OR term2 \u00a0\u6216\u00a0 term1 || term2
                                                  3. NOT \u64cd\u4f5c\u7b26

                                                    • \u4f7f\u7528\u00a0 NOT \u00a0\u6216\u00a0``\u00a0\u6765\u8868\u793a\u903b\u8f91\u975e\u5173\u7cfb\u3002
                                                    • \u4f8b\u5982\uff1a term1 NOT term2 \u00a0\u6216\u00a0 term1 -term2
                                                  4. \u5f15\u53f7

                                                    • \u60a8\u53ef\u4ee5\u5c06\u4e00\u4e2a\u77ed\u8bed\u62ec\u5728\u5f15\u53f7\u4e2d\u4ee5\u8fdb\u884c\u7cbe\u786e\u5339\u914d\u3002
                                                    • \u4f8b\u5982\uff1a \"exact phrase\"
                                                  "},{"location":"admin/insight/reference/lucene.html#_2","title":"\u4e3e\u4f8b","text":"
                                                  1. \u6307\u5b9a\u5b57\u6bb5

                                                    field1:keyword1 AND (field2:keyword2 OR field3:keyword3) NOT field4:keyword4\n

                                                    \u89e3\u91ca\u5982\u4e0b\uff1a

                                                    • \u67e5\u8be2\u5b57\u6bb5\u00a0 field1 \u00a0\u5fc5\u987b\u5305\u542b\u5173\u952e\u5b57\u00a0 keyword1 \u3002
                                                    • \u540c\u65f6\uff0c\u5b57\u6bb5\u00a0 field2 \u00a0\u5fc5\u987b\u5305\u542b\u5173\u952e\u5b57\u00a0 keyword2 \u00a0\u6216\u5b57\u6bb5\u00a0 field3 \u00a0\u5fc5\u987b\u5305\u542b\u5173\u952e\u5b57\u00a0 keyword3 \u3002
                                                    • \u6700\u540e\uff0c\u5b57\u6bb5\u00a0 field4 \u00a0\u4e0d\u5f97\u5305\u542b\u5173\u952e\u5b57\u00a0 keyword4 \u3002
                                                  2. \u4e0d\u6307\u5b9a\u5b57\u6bb5

                                                    keyword1 AND (keyword2 OR keyword3) NOT keyword4\n

                                                    \u89e3\u91ca\u5982\u4e0b\uff1a

                                                    • \u67e5\u8be2\u5173\u952e\u5b57\u00a0 keyword1 \u00a0\u5fc5\u987b\u5b58\u5728\u4e8e\u4efb\u610f\u53ef\u641c\u7d22\u7684\u5b57\u6bb5\u4e2d\u3002
                                                    • \u540c\u65f6\uff0c\u5173\u952e\u5b57\u00a0 keyword2 \u00a0\u5fc5\u987b\u5b58\u5728\u6216\u5173\u952e\u5b57\u00a0 keyword3 \u00a0\u5fc5\u987b\u5b58\u5728\u4e8e\u4efb\u610f\u53ef\u641c\u7d22\u7684\u5b57\u6bb5\u4e2d\u3002
                                                    • \u6700\u540e\uff0c\u5173\u952e\u5b57\u00a0 keyword4 \u00a0\u4e0d\u5f97\u5b58\u5728\u4e8e\u4efb\u610f\u53ef\u641c\u7d22\u7684\u5b57\u6bb5\u4e2d\u3002
                                                  "},{"location":"admin/insight/reference/lucene.html#_3","title":"\u6a21\u7cca\u67e5\u8be2","text":"

                                                  \u5728 Lucene \u4e2d\uff0c\u6a21\u7cca\u67e5\u8be2\u53ef\u4ee5\u901a\u8fc7\u6ce2\u6d6a\u53f7 ~ \u6765\u5b9e\u73b0\u8fd1\u4f3c\u5339\u914d\u3002\u60a8\u53ef\u4ee5\u6307\u5b9a\u4e00\u4e2a\u7f16\u8f91\u8ddd\u79bb\u6765\u9650\u5236\u5339\u914d\u7684\u76f8\u4f3c\u5ea6\u7a0b\u5ea6\u3002

                                                  term~\n

                                                  \u5728\u4e0a\u8ff0\u793a\u4f8b\u4e2d\uff0c term \u662f\u8981\u8fdb\u884c\u6a21\u7cca\u5339\u914d\u7684\u5173\u952e\u5b57\u3002

                                                  \u8bf7\u6ce8\u610f\u4ee5\u4e0b\u51e0\u70b9\uff1a

                                                  • \u6ce2\u6d6a\u53f7\u00a0 ~ \u00a0\u540e\u9762\u53ef\u4ee5\u6307\u5b9a\u4e00\u4e2a\u53ef\u9009\u7684\u53c2\u6570\uff0c\u7528\u4e8e\u63a7\u5236\u6a21\u7cca\u67e5\u8be2\u7684\u76f8\u4f3c\u5ea6\u3002
                                                  • \u53c2\u6570\u503c\u8303\u56f4\u4e3a 0 \u5230 2 \u4e4b\u95f4\uff0c\u5176\u4e2d 0 \u8868\u793a\u5b8c\u5168\u5339\u914d\uff0c1 \u8868\u793a\u4e00\u6b21\u7f16\u8f91\u64cd\u4f5c\uff08\u5982\u589e\u52a0\u3001\u5220\u9664\u6216\u66ff\u6362\u5b57\u7b26\uff09\u5185\u53ef\u4ee5\u5339\u914d\uff0c2 \u8868\u793a\u4e24\u6b21\u7f16\u8f91\u64cd\u4f5c\u5185\u53ef\u4ee5\u5339\u914d\u3002
                                                  • \u5982\u679c\u4e0d\u6307\u5b9a\u53c2\u6570\u503c\uff0c\u9ed8\u8ba4\u4f7f\u7528 0.5 \u4f5c\u4e3a\u76f8\u4f3c\u5ea6\u9608\u503c\u3002
                                                  • \u6a21\u7cca\u67e5\u8be2\u5c06\u8fd4\u56de\u4e0e\u7ed9\u5b9a\u5173\u952e\u5b57\u76f8\u4f3c\u7684\u6587\u6863\uff0c\u4f46\u4f1a\u6709\u4e00\u5b9a\u7684\u6027\u80fd\u5f00\u9500\uff0c\u7279\u522b\u662f\u5bf9\u4e8e\u8f83\u5927\u7684\u7d22\u5f15\u3002
                                                  "},{"location":"admin/insight/reference/lucene.html#_4","title":"\u901a\u914d\u7b26","text":"

                                                  Lucene \u652f\u6301\u4ee5\u4e0b\u4e24\u79cd\u901a\u914d\u7b26\u67e5\u8be2\uff1a

                                                  1. * \u901a\u914d\u7b26\uff1a * \u7528\u4e8e\u5339\u914d\u96f6\u4e2a\u6216\u591a\u4e2a\u5b57\u7b26\u3002

                                                    \u4f8b\u5982\uff0c te*t \u00a0\u53ef\u4ee5\u5339\u914d \"test\"\u3001\"text\"\u3001\"tempest\" \u7b49\u3002

                                                  2. ? \u901a\u914d\u7b26\uff1a ? \u7528\u4e8e\u5339\u914d\u5355\u4e2a\u5b57\u7b26\u3002

                                                    \u4f8b\u5982\uff0c te?t \u00a0\u53ef\u4ee5\u5339\u914d \"test\"\u3001\"text\" \u7b49\u3002

                                                  "},{"location":"admin/insight/reference/lucene.html#_5","title":"\u4e3e\u4f8b\u8bf4\u660e","text":"
                                                  te?t\n

                                                  \u5728\u4e0a\u8ff0\u793a\u4f8b\u4e2d\uff0c te?t \u8868\u793a\u5339\u914d\u4ee5 \"te\" \u5f00\u5934\uff0c\u63a5\u7740\u662f\u4e00\u4e2a\u4efb\u610f\u5b57\u7b26\uff0c\u7136\u540e\u4ee5 \"t\" \u7ed3\u5c3e\u7684\u8bcd\u3002\u8fd9\u79cd\u67e5\u8be2\u53ef\u4ee5\u5339\u914d\u4f8b\u5982 \"test\"\u3001\"text\"\u3001\"tent\" \u7b49\u3002

                                                  \u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u95ee\u53f7\u53ea\u80fd\u4ee3\u8868\u4e00\u4e2a\u5b57\u7b26\uff0c\u5982\u679c\u60f3\u8981\u5339\u914d\u591a\u4e2a\u5b57\u7b26\u6216\u8005\u662f\u53ef\u53d8\u957f\u5ea6\u7684\u5b57\u7b26\uff0c\u53ef\u4ee5\u4f7f\u7528\u661f\u53f7 * \u8fdb\u884c\u591a\u5b57\u7b26\u901a\u914d\u7b26\u5339\u914d\u3002 \u53e6\u5916\uff0c\u95ee\u53f7\u4e0d\u4f1a\u5339\u914d\u7a7a\u5b57\u7b26\u4e32\u3002

                                                  \u603b\u7ed3\u4e00\u4e0b\uff0cLucene \u8bed\u6cd5\u4e2d\u7684\u95ee\u53f7 ? \u7528\u4f5c\u5355\u5b57\u7b26\u901a\u914d\u7b26\uff0c\u8868\u793a\u5339\u914d\u4efb\u610f\u4e00\u4e2a\u5b57\u7b26\u3002\u901a\u8fc7\u5728\u641c\u7d22\u5173\u952e\u5b57\u4e2d\u4f7f\u7528\u95ee\u53f7\uff0c\u60a8\u53ef\u4ee5\u8fdb\u884c\u66f4\u7075\u6d3b\u548c\u5177\u4f53\u7684\u6a21\u5f0f\u5339\u914d\u3002

                                                  "},{"location":"admin/insight/reference/lucene.html#_6","title":"\u8303\u56f4\u67e5\u8be2","text":"

                                                  Lucene \u8bed\u6cd5\u652f\u6301\u8303\u56f4\u67e5\u8be2\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528\u65b9\u62ec\u53f7 [ ] \u6216\u82b1\u62ec\u53f7 { } \u6765\u8868\u793a\u8303\u56f4\u3002\u4ee5\u4e0b\u662f\u8303\u56f4\u67e5\u8be2\u7684\u793a\u4f8b\uff1a

                                                  1. \u5305\u542b\u8fb9\u754c\u7684\u8303\u56f4\u67e5\u8be2\uff1a

                                                    • \u65b9\u62ec\u53f7\u00a0 [ ] \u00a0\u8868\u793a\u95ed\u533a\u95f4\uff0c\u5305\u542b\u8fb9\u754c\u503c\u3002
                                                    • \u4f8b\u5982\uff1a field:[value1 TO value2] \u00a0\u8868\u793a\u00a0 field \u00a0\u7684\u53d6\u503c\u8303\u56f4\u4ece\u00a0 value1 \u00a0\u5230\u00a0 value2 \uff08\u5305\u542b\u4e24\u8005\uff09\u3002
                                                  2. \u6392\u9664\u8fb9\u754c\u7684\u8303\u56f4\u67e5\u8be2\uff1a

                                                    • \u82b1\u62ec\u53f7\u00a0 { } \u00a0\u8868\u793a\u5f00\u533a\u95f4\uff0c\u6392\u9664\u8fb9\u754c\u503c\u3002
                                                    • \u4f8b\u5982\uff1a field:{value1 TO value2} \u00a0\u8868\u793a\u00a0 field \u00a0\u7684\u53d6\u503c\u8303\u56f4\u5728\u00a0 value1 \u00a0\u548c\u00a0 value2 \u00a0\u4e4b\u95f4\uff08\u4e0d\u5305\u542b\u4e24\u8005\uff09\u3002
                                                  3. \u7701\u7565\u8fb9\u754c\u7684\u8303\u56f4\u67e5\u8be2\uff1a

                                                    • \u53ef\u4ee5\u7701\u7565\u4e00\u4e2a\u6216\u4e24\u4e2a\u8fb9\u754c\u503c\u6765\u6307\u5b9a\u65e0\u9650\u8303\u56f4\u3002
                                                    • \u4f8b\u5982\uff1a field:[value TO ] \u00a0\u8868\u793a\u00a0 field \u00a0\u7684\u53d6\u503c\u8303\u56f4\u4ece\u00a0 value \u00a0\u5230\u6b63\u65e0\u7a77\uff0c field:[ TO value] \u00a0\u8868\u793a\u00a0 field \u00a0\u7684\u53d6\u503c\u8303\u56f4\u4ece\u8d1f\u65e0\u7a77\u5230\u00a0 value \u3002

                                                    Note

                                                    \u8bf7\u6ce8\u610f\uff0c\u8303\u56f4\u67e5\u8be2\u53ea\u9002\u7528\u4e8e\u80fd\u591f\u8fdb\u884c\u6392\u5e8f\u7684\u5b57\u6bb5\u7c7b\u578b\uff0c\u5982\u6570\u5b57\u3001\u65e5\u671f\u7b49\u3002\u540c\u65f6\uff0c\u786e\u4fdd\u5728\u67e5\u8be2\u65f6\u5c06\u8fb9\u754c\u503c\u6b63\u786e\u5730\u6307\u5b9a\u4e3a\u5b57\u6bb5\u7684\u5b9e\u9645\u503c\u7c7b\u578b\u3002 \u5982\u679c\u60a8\u5e0c\u671b\u5728\u6574\u4e2a\u7d22\u5f15\u4e2d\u8fdb\u884c\u8303\u56f4\u67e5\u8be2\u800c\u4e0d\u6307\u5b9a\u7279\u5b9a\u5b57\u6bb5\uff0c\u53ef\u4ee5\u4f7f\u7528\u901a\u914d\u7b26\u67e5\u8be2 * \u6765\u4ee3\u66ff\u5b57\u6bb5\u540d\u3002

                                                  "},{"location":"admin/insight/reference/lucene.html#_7","title":"\u4e3e\u4f8b\u8bf4\u660e","text":"
                                                  1. \u6307\u5b9a\u5b57\u6bb5

                                                    timestamp:[2022-01-01 TO 2022-01-31]\n

                                                    \u8fd9\u5c06\u68c0\u7d22 timestamp \u5b57\u6bb5\u5728 2022 \u5e74 1 \u6708 1 \u65e5\u5230 2022 \u5e74 1 \u6708 31 \u65e5\u4e4b\u95f4\u7684\u6570\u636e\u3002

                                                  2. \u4e0d\u6307\u5b9a\u5b57\u6bb5

                                                    *:[value1 TO value2]\n

                                                    \u8fd9\u5c06\u5728\u6574\u4e2a\u7d22\u5f15\u4e2d\u641c\u7d22\u53d6\u503c\u8303\u56f4\u4ece value1 \u5230 value2 \u7684\u6587\u6863\u3002

                                                  "},{"location":"admin/insight/reference/lucene.html#insight","title":"Insight \u5e38\u7528\u5173\u952e\u5b57","text":""},{"location":"admin/insight/reference/lucene.html#_8","title":"\u5bb9\u5668\u65e5\u5fd7","text":"
                                                  • kubernetes.container_image: \u5bb9\u5668\u955c\u50cf\u540d\u79f0
                                                  • kubernetes.container_name: \u5bb9\u5668\u540d\u79f0
                                                  • kubernetes.namespace_name: \u547d\u540d\u7a7a\u95f4\u540d\u79f0
                                                  • kubernetes.pod_name: Pod \u540d\u79f0
                                                  • log: \u65e5\u5fd7\u5185\u5bb9
                                                  • time: \u65e5\u5fd7\u65f6\u95f4\u6233
                                                  "},{"location":"admin/insight/reference/lucene.html#_9","title":"\u4e3b\u673a\u65e5\u5fd7","text":"
                                                  • syslog.file: \u65e5\u5fd7\u6587\u4ef6\u8def\u5f84
                                                  • syslog.host: \u4e3b\u673a\u540d\u79f0
                                                  • log: \u65e5\u5fd7\u5185\u5bb9

                                                  \u5982\u679c\u4f60\u60f3\u8981\u7cbe\u786e\u5339\u914d\u67d0\u4e2a\u7279\u5b9a\u7684\u503c\uff0c\u53ef\u4ee5\u5728\u5173\u952e\u5b57\u540e\u52a0\u5165 .keyword \u540e\u7f00\uff0c\u4f8b\u5982 kubernetes.container_name.keyword\u3002

                                                  "},{"location":"admin/insight/reference/lucene.html#_10","title":"\u793a\u4f8b","text":"
                                                  1. \u67e5\u8be2\u6307\u5b9a Pod \u4e2d\u6307\u5b9a\u5bb9\u5668\u7684\u65e5\u5fd7

                                                    kubernetes.pod_name.keyword:nginx-pod AND kubernetes.container_name.keyword:nginx\n
                                                    2. \u67e5\u8be2 Pod \u540d\u79f0\u4e2d\u5305\u542b nginx-pod \u7684\u5bb9\u5668\u65e5\u5fd7

                                                    kubernetes.pod_name:nginx-pod\n
                                                  "},{"location":"admin/insight/reference/notify-helper.html","title":"\u901a\u77e5\u6a21\u677f\u4f7f\u7528\u8bf4\u660e","text":""},{"location":"admin/insight/reference/notify-helper.html#go-template","title":"\u6a21\u677f\u8bed\u6cd5\uff08Go Template\uff09\u8bf4\u660e","text":"

                                                  \u544a\u8b66\u901a\u77e5\u6a21\u677f\u91c7\u7528\u4e86 Go Template \u8bed\u6cd5\u6765\u6e32\u67d3\u6a21\u677f\u3002

                                                  \u6a21\u677f\u4f1a\u57fa\u4e8e\u4e0b\u9762\u7684\u6570\u636e\u8fdb\u884c\u6e32\u67d3\u3002

                                                  {\n    \"status\": \"firing\",\n    \"labels\": {\n        \"alertgroup\": \"test-group\",           // \u544a\u8b66\u7b56\u7565\u540d\u79f0\n        \"alertname\": \"test-rule\",          // \u544a\u8b66\u89c4\u5219\u540d\u79f0\n        \"cluster\": \"35b54a48-b66c-467b-a8dc-503c40826330\",\n        \"customlabel1\": \"v1\",\n        \"customlabel2\": \"v2\",\n        \"endpoint\": \"https\",\n        \"group_id\": \"01gypg06fcdf7rmqc4ksv97646\",\n        \"instance\": \"10.6.152.85:6443\",\n        \"job\": \"apiserver\",\n        \"namespace\": \"default\",\n        \"prometheus\": \"insight-system/insight-agent-kube-prometh-prometheus\",\n        \"prometheus_replica\": \"prometheus-insight-agent-kube-prometh-prometheus-0\",\n        \"rule_id\": \"01gypg06fcyn2g9zyehbrvcdfn\",\n        \"service\": \"kubernetes\",\n        \"severity\": \"critical\",\n        \"target\": \"35b54a48-b66c-467b-a8dc-503c40826330\",\n        \"target_type\": \"cluster\"\n   },\n    \"annotations\": {\n        \"customanno1\": \"v1\",\n        \"customanno2\": \"v2\",\n        \"description\": \"\u8fd9\u662f\u4e00\u6761\u6d4b\u8bd5\u89c4\u5219\uff0c10.6.152.85:6443 down\",\n        \"value\": \"1\"\n    },\n    \"startsAt\": \"2023-04-20T07:53:54.637363473Z\",\n    \"endsAt\": \"0001-01-01T00:00:00Z\",\n    \"generatorURL\": \"http://vmalert-insight-victoria-metrics-k8s-stack-df987997b-npsl9:8080/vmalert/alert?group_id=16797738747470868115&alert_id=10071735367745833597\",\n    \"fingerprint\": \"25c8d93d5bf58ac4\"\n}\n
                                                  "},{"location":"admin/insight/reference/notify-helper.html#_2","title":"\u4f7f\u7528\u8bf4\u660e","text":"
                                                  1. . \u5b57\u7b26

                                                    \u5728\u5f53\u524d\u4f5c\u7528\u57df\u4e0b\u6e32\u67d3\u6307\u5b9a\u5bf9\u8c61\u3002

                                                    \u793a\u4f8b 1: \u53d6\u9876\u7ea7\u4f5c\u7528\u57df\u4e0b\u7684\u6240\u6709\u5185\u5bb9\uff0c\u5373\u793a\u4f8b\u4ee3\u7801\u4e2d\u4e0a\u4e0b\u6587\u6570\u636e\u7684\u5168\u90e8\u5185\u5bb9\u3002

                                                    {{ . }}\n
                                                  2. \u5224\u65ad\u8bed\u53e5 if / else

                                                    \u4f7f\u7528 if \u68c0\u67e5\u6570\u636e\uff0c\u5982\u679c\u4e0d\u6ee1\u8db3\u53ef\u4ee5\u6267\u884c else\u3002

                                                    {{if .Labels.namespace }}\u547d\u540d\u7a7a\u95f4\uff1a{{ .Labels.namespace }} \\n{{ end }}\n
                                                  3. \u5faa\u73af\u51fd\u6570 for

                                                    for \u51fd\u6570\u7528\u4e8e\u91cd\u590d\u6267\u884c\u4ee3\u7801\u5185\u5bb9\u3002

                                                    \u793a\u4f8b 1: \u904d\u5386 labels \u5217\u8868\uff0c\u83b7\u53d6\u544a\u8b66\u7684\u6240\u6709 label \u5185\u5bb9\u3002

                                                    {{ for .Labels}} \\n {{end}}\n
                                                  "},{"location":"admin/insight/reference/notify-helper.html#functions","title":"\u51fd\u6570\u8bf4\u660e FUNCTIONS","text":"

                                                  Insight \u7684\u201d\u901a\u77e5\u6a21\u677f\u201c\u548c\u201d\u77ed\u4fe1\u6a21\u677f\u201c\u652f\u6301 70 \u591a\u4e2a sprig \u51fd\u6570\uff0c\u4ee5\u53ca\u81ea\u7814\u7684\u51fd\u6570\u3002

                                                  "},{"location":"admin/insight/reference/notify-helper.html#sprig","title":"Sprig \u51fd\u6570","text":"

                                                  Sprig \u5185\u7f6e\u4e86 70 \u591a\u79cd\u5e38\u89c1\u7684\u6a21\u677f\u51fd\u6570\u5e2e\u52a9\u6e32\u67d3\u6570\u636e\u3002\u4ee5\u4e0b\u5217\u4e3e\u5e38\u89c1\u51fd\u6570\uff1a

                                                  • \u65f6\u95f4\u64cd\u4f5c
                                                  • \u5b57\u7b26\u4e32\u64cd\u4f5c
                                                  • \u7c7b\u578b\u8f6c\u6362\u64cd\u4f5c
                                                  • \u6574\u6570\u7684\u6570\u5b66\u8ba1\u7b97

                                                  \u66f4\u591a\u7ec6\u8282\u53ef\u4ee5\u67e5\u770b\u5b98\u65b9\u6587\u6863\u3002

                                                  "},{"location":"admin/insight/reference/notify-helper.html#_3","title":"\u81ea\u7814\u51fd\u6570","text":""},{"location":"admin/insight/reference/notify-helper.html#toclustername","title":"toClusterName","text":"

                                                  toClusterName \u51fd\u6570\u6839\u636e\u201c\u96c6\u7fa4\u552f\u4e00\u6807\u793a Id\u201d\u67e5\u8be2\u201c\u96c6\u7fa4\u540d\u201d\uff1b\u5982\u679c\u67e5\u8be2\u4e0d\u5230\u5bf9\u5e94\u7684\u96c6\u7fa4\uff0c\u5c06\u76f4\u63a5\u8fd4\u56de\u4f20\u5165\u7684\u96c6\u7fa4\u7684\u552f\u4e00\u6807\u793a\u3002

                                                  func toClusterName(id string) (string, error)\n

                                                  \u793a\u4f8b\uff1a

                                                  {{ toClusterName \"clusterId\" }}\n{{ \"clusterId\" | toClusterName }}\n
                                                  "},{"location":"admin/insight/reference/notify-helper.html#toclusterid","title":"toClusterId","text":"

                                                  toClusterId \u51fd\u6570\u6839\u636e\u201c\u96c6\u7fa4\u540d\u201d\u67e5\u8be2\u201c\u96c6\u7fa4\u552f\u4e00\u6807\u793a Id\u201d\uff1b\u5982\u679c\u67e5\u8be2\u4e0d\u5230\u5bf9\u5e94\u7684\u96c6\u7fa4\uff0c\u5c06\u76f4\u63a5\u8fd4\u56de\u4f20\u5165\u7684\u96c6\u7fa4\u540d\u3002

                                                  func toClusterId(name string) (string, error)\n

                                                  \u793a\u4f8b\uff1a

                                                  {{ toClusterId \"clusterName\" }}\n{{ \"clusterName\" | toClusterId }}\n
                                                  "},{"location":"admin/insight/reference/notify-helper.html#todateinzone","title":"toDateInZone","text":"

                                                  toDateInZone \u6839\u636e\u5b57\u7b26\u4e32\u65f6\u95f4\u8f6c\u6362\u6210\u6240\u9700\u7684\u65f6\u95f4\uff0c\u5e76\u8fdb\u884c\u683c\u5f0f\u5316\u3002

                                                  func toDateInZone(fmt string, date interface{}, zone string) string\n

                                                  \u793a\u4f8b 1\uff1a

                                                  {{ toDateInZone \"2006-01-02T15:04:05\" \"2022-08-15T05:59:08.064449533Z\" \"Asia/Shanghai\" }}\n

                                                  \u5c06\u83b7\u5f97\u8fd4\u56de\u503c 2022-08-15T13:59:08 \u3002\u6b64\u5916\uff0c\u4e5f\u53ef\u4ee5\u901a\u8fc7 sprig \u5185\u7f6e\u7684\u51fd\u6570\u8fbe\u5230 toDateInZone \u7684\u6548\u679c\uff1a

                                                  {{ dateInZone \"2006-01-02T15:04:05\" (toDate \"2006-01-02T15:04:05Z07:00\" .StartsAt) \"Asia/Shanghai\" }}\n

                                                  \u793a\u4f8b 2\uff1a

                                                  {{ toDateInZone \"2006-01-02T15:04:05\" .StartsAt \"Asia/Shanghai\" }}\n\n## \u9608\u503c\u6a21\u677f\u8bf4\u660e\n\nInsight \u5185\u7f6e Webhook \u544a\u8b66\u6a21\u677f\u5982\u4e0b\uff0c\u5176\u4ed6\u5982\u90ae\u4ef6\u3001\u4f01\u4e1a\u5fae\u4fe1\u7b49\u5185\u5bb9\u76f8\u540c\uff0c\u53ea\u662f\u5bf9\u6362\u884c\u8fdb\u884c\u76f8\u5e94\u8c03\u6574\u3002\n\n```text\n\u89c4\u5219\u540d\u79f0\uff1a{{ .Labels.alertname }} \\n\n\u7b56\u7565\u540d\u79f0\uff1a{{ .Labels.alertgroup }} \\n\n\u544a\u8b66\u7ea7\u522b\uff1a{{ .Labels.severity }} \\n\n\u96c6\u7fa4\uff1a{{ .Labels.cluster }} \\n\n{{if .Labels.namespace }}\u547d\u540d\u7a7a\u95f4\uff1a{{ .Labels.namespace }} \\n{{ end }}\n{{if .Labels.node }}\u8282\u70b9\uff1a{{ .Labels.node }} \\n{{ end }}\n\u8d44\u6e90\u7c7b\u578b\uff1a{{ .Labels.target_type }} \\n\n{{if .Labels.target }}\u8d44\u6e90\u540d\u79f0\uff1a{{ .Labels.target }} \\n{{ end }}\n\u89e6\u53d1\u503c\uff1a{{ .Annotations.value }} \\n\n\u53d1\u751f\u65f6\u95f4\uff1a{{ .StartsAt }} \\n\n{{if ne \"0001-01-01T00:00:00Z\" .EndsAt }}\u7ed3\u675f\u65f6\u95f4\uff1a{{ .EndsAt }} \\n{{ end }}\n\u63cf\u8ff0\uff1a{{ .Annotations.description }} \\n\n
                                                  "},{"location":"admin/insight/reference/notify-helper.html#_4","title":"\u90ae\u7bb1\u4e3b\u9898\u53c2\u6570","text":"

                                                  \u7531\u4e8e Insight \u5728\u53d1\u9001\u544a\u8b66\u6d88\u606f\u65f6\uff0c\u4f1a\u5bf9\u540c\u4e00\u65f6\u95f4\u540c\u4e00\u6761\u89c4\u5219\u4ea7\u751f\u7684\u6d88\u606f\u8fdb\u884c\u5408\u5e76\u53d1\u9001\uff0c \u6240\u4ee5 email \u4e3b\u9898\u4e0d\u540c\u4e8e\u4e0a\u9762\u56db\u79cd\u6a21\u677f\uff0c\u53ea\u4f1a\u4f7f\u7528\u544a\u8b66\u6d88\u606f\u4e2d\u7684 commonLabels \u5185\u5bb9\u5bf9\u6a21\u677f\u8fdb\u884c\u6e32\u67d3\u3002\u9ed8\u8ba4\u6a21\u677f\u5982\u4e0b:

                                                  [{{ .status }}] [{{ .severity }}] \u544a\u8b66\uff1a{{ .alertname }}\n

                                                  \u5176\u4ed6\u53ef\u4f5c\u4e3a\u90ae\u7bb1\u4e3b\u9898\u7684\u5b57\u6bb5\u5982\u4e0b:

                                                  {{ .status }} \u544a\u8b66\u6d88\u606f\u7684\u89e6\u53d1\u72b6\u6001\n{{ .alertgroup }} \u544a\u8b66\u6240\u5c5e\u7684\u7b56\u7565\u540d\u79f0\n{{ .alertname }} \u544a\u8b66\u6240\u5c5e\u7684\u89c4\u5219\u540d\u79f0\n{{ .severity }} \u544a\u8b66\u7ea7\u522b\n{{ .target_type }} \u544a\u8b66\u8d44\u6e90\u7c7b\u578b\n{{ .target }} \u544a\u8b66\u8d44\u6e90\u5bf9\u8c61\n{{ .\u89c4\u5219\u5176\u4ed6\u81ea\u5b9a\u4e49 label key }}\n
                                                  "},{"location":"admin/insight/reference/tailing-sidecar.html","title":"\u901a\u8fc7 Sidecar \u91c7\u96c6\u5bb9\u5668\u65e5\u5fd7","text":"

                                                  Tailing Sidecar \u662f\u4e00\u4e2a\u6d41\u5f0f Sidecar \u5bb9\u5668\uff0c \u662f Kubernetes \u96c6\u7fa4\u7ea7\u7684\u65e5\u5fd7\u4ee3\u7406\u3002Tailing Sidercar \u53ef\u4ee5\u5728\u5bb9\u5668\u65e0\u6cd5\u5199\u5165\u6807\u51c6\u8f93\u51fa\u6216\u6807\u51c6\u9519\u8bef\u6d41\u65f6\uff0c\u65e0\u9700\u66f4\u6539\uff0c\u5373\u53ef\u81ea\u52a8\u6536\u53d6\u548c\u6c47\u603b\u5bb9\u5668\u5185\u65e5\u5fd7\u6587\u4ef6\u3002

                                                  Insight \u652f\u6301\u901a\u8fc7 Sidercar \u6a21\u5f0f\u91c7\u96c6\u65e5\u5fd7\uff0c\u5373\u5728\u6bcf\u4e2a Pod \u4e2d\u8fd0\u884c\u4e00\u4e2a Sidecar \u5bb9\u5668\u5c06\u65e5\u5fd7\u6570\u636e\u8f93\u51fa\u5230\u6807\u51c6\u8f93\u51fa\u6d41\uff0c\u4ee5\u4fbf FluentBit \u6536\u96c6\u5bb9\u5668\u65e5\u5fd7\u3002

                                                  Insight Agent \u4e2d\u9ed8\u8ba4\u5b89\u88c5\u4e86 tailing-sidecar operator \u3002 \u82e5\u60a8\u60f3\u5f00\u542f\u91c7\u96c6\u5bb9\u5668\u5185\u6587\u4ef6\u65e5\u5fd7\uff0c\u8bf7\u901a\u8fc7\u7ed9 Pod \u6dfb\u52a0\u6ce8\u89e3\u8fdb\u884c\u6807\u8bb0\uff0c tailing-sidecar operator \u5c06\u81ea\u52a8\u6ce8\u5165 Tailing Sidecar \u5bb9\u5668\uff0c \u88ab\u6ce8\u5165\u7684 Sidecar \u5bb9\u5668\u8bfb\u53d6\u4e1a\u52a1\u5bb9\u5668\u5185\u7684\u6587\u4ef6\uff0c\u5e76\u8f93\u51fa\u5230\u6807\u51c6\u8f93\u51fa\u6d41\u3002

                                                  \u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                  1. \u4fee\u6539 Pod \u7684 YAML \u6587\u4ef6\uff0c\u5728 annotation \u5b57\u6bb5\u589e\u52a0\u5982\u4e0b\u53c2\u6570\uff1a

                                                    metadata:\n  annotations:\n    tailing-sidecar:  <sidecar-name-0>:<volume-name-0>:<path-to-tail-0>;<sidecar-name-1>:<volume-name-1>:<path-to-tail-1>\n

                                                    \u5b57\u6bb5\u8bf4\u660e\uff1a

                                                    • sidecar-name-0 \uff1atailing sidecar \u5bb9\u5668\u540d\u79f0\uff08\u53ef\u9009\uff0c\u5982\u679c\u672a\u6307\u5b9a\u5bb9\u5668\u540d\u79f0\u5c06\u81ea\u52a8\u521b\u5efa\uff0c\u5b83\u5c06\u4ee5\u201ctailing-sidecar\u201d\u524d\u7f00\u5f00\u5934\uff09
                                                    • volume-name-0 \uff1a\u5b58\u50a8\u5377\u540d\u79f0\uff1b
                                                    • path-to-tail-0 \uff1a\u65e5\u5fd7\u7684\u6587\u4ef6\u8def\u5f84

                                                    Note

                                                    \u6bcf\u4e2a Pod \u53ef\u8fd0\u884c\u591a\u4e2a Sidecar \u5bb9\u5668\uff0c\u53ef\u4ee5\u901a\u8fc7 ; \u9694\u79bb\uff0c\u5b9e\u73b0\u4e0d\u540c Sidecar \u5bb9\u5668\u91c7\u96c6\u591a\u4e2a\u6587\u4ef6\u5230\u591a\u4e2a\u5b58\u50a8\u5377\u3002

                                                  2. \u91cd\u542f Pod\uff0c\u5f85 Pod \u72b6\u6001\u53d8\u6210 \u8fd0\u884c\u4e2d \u540e\uff0c\u5219\u53ef\u901a\u8fc7 \u65e5\u5fd7\u67e5\u8be2 \u754c\u9762\uff0c\u67e5\u627e\u8be5 Pod \u7684\u5bb9\u5668\u5185\u65e5\u5fd7\u3002

                                                  "},{"location":"admin/insight/reference/used-metric-in-insight.html","title":"Insight \u53c2\u8003\u6307\u6807\u8bf4\u660e","text":"

                                                  \u672c\u6587\u4e2d\u7684\u6307\u6807\u662f\u57fa\u4e8e\u793e\u533a\u7684 kube-prometheus \u7684\u57fa\u7840\u4e4b\u4e0a\u6574\u7406\u800c\u6210\u3002 \u76ee\u524d\u6db5\u76d6\u4e86 Cluster\u3001Node\u3001Namespace\u3001Workload \u7b49\u591a\u4e2a\u5c42\u9762\u7684\u6307\u6807\u3002 \u672c\u6587\u679a\u4e3e\u4e86\u4e00\u4e9b\u5e38\u7528\u7684\u6307\u6807\u540d\u3001\u4e2d\u6587\u63cf\u8ff0\u548c\u5355\u4f4d\uff0c\u4ee5\u4fbf\u7d22\u5f15\u3002

                                                  "},{"location":"admin/insight/reference/used-metric-in-insight.html#cluster","title":"\u96c6\u7fa4\uff08Cluster\uff09","text":"\u6307\u6807\u540d \u4e2d\u6587\u63cf\u8ff0 \u5355\u4f4d cluster_cpu_utilization \u96c6\u7fa4 CPU \u4f7f\u7528\u7387 cluster_cpu_total \u96c6\u7fa4 CPU \u603b\u91cf Core cluster_cpu_usage \u96c6\u7fa4 CPU \u7528\u91cf Core cluster_cpu_requests_commitment \u96c6\u7fa4 CPU \u5206\u914d\u7387 cluster_memory_utilization \u96c6\u7fa4\u5185\u5b58\u4f7f\u7528\u7387 cluster_memory_usage \u96c6\u7fa4\u5185\u5b58\u4f7f\u7528\u91cf Byte cluster_memory_available \u96c6\u7fa4\u53ef\u7528\u5185\u5b58 Byte cluster_memory_requests_commitment \u96c6\u7fa4\u5185\u5b58\u5206\u914d\u7387 cluster_memory_total \u96c6\u7fa4\u5185\u5b58\u53ef\u7528\u91cf Byte cluster_net_utilization \u96c6\u7fa4\u7f51\u7edc\u6570\u636e\u4f20\u8f93\u901f\u7387 Byte/s cluster_net_bytes_transmitted \u96c6\u7fa4\u7f51\u7edc\u6570\u636e\u53d1\u9001 (\u4e0a\u884c) \u901f\u7387 Byte/s cluster_net_bytes_received \u96c6\u7fa4\u7f51\u7edc\u6570\u636e\u63a5\u53d7 (\u4e0b\u884c) \u901f\u7387 Byte/s cluster_disk_read_iops \u96c6\u7fa4\u78c1\u76d8\u6bcf\u79d2\u8bfb\u6b21\u6570 \u6b21/s cluster_disk_write_iops \u96c6\u7fa4\u78c1\u76d8\u6bcf\u79d2\u5199\u6b21\u6570 \u6b21/s cluster_disk_read_throughput \u96c6\u7fa4\u78c1\u76d8\u6bcf\u79d2\u8bfb\u53d6\u6570\u636e\u91cf Byte/s cluster_disk_write_throughput \u96c6\u7fa4\u78c1\u76d8\u6bcf\u79d2\u5199\u5165\u6570\u636e\u91cf Byte/s cluster_disk_size_capacity \u96c6\u7fa4\u78c1\u76d8\u603b\u5bb9\u91cf Byte cluster_disk_size_available \u96c6\u7fa4\u78c1\u76d8\u53ef\u7528\u5927\u5c0f Byte cluster_disk_size_usage \u96c6\u7fa4\u78c1\u76d8\u4f7f\u7528\u91cf Byte cluster_disk_size_utilization \u96c6\u7fa4\u78c1\u76d8\u4f7f\u7528\u7387 cluster_node_total \u96c6\u7fa4\u8282\u70b9\u603b\u6570 \u4e2a cluster_node_online \u96c6\u7fa4\u8282\u70b9\u603b\u6570 \u4e2a cluster_node_offline_count \u96c6\u7fa4\u5931\u8054\u7684\u8282\u70b9\u4e2a\u6570 \u4e2a cluster_pod_count \u96c6\u7fa4 Pod \u603b\u6570 \u4e2a cluster_pod_running_count \u96c6\u7fa4\u6b63\u5e38\u8fd0\u884c Pod \u4e2a\u6570 \u4e2a cluster_pod_abnormal_count \u96c6\u7fa4\u5f02\u5e38\u8fd0\u884c Pod \u4e2a\u6570 \u4e2a cluster_deployment_count \u96c6\u7fa4 Deployment \u603b\u6570 \u4e2a cluster_deployment_normal_count \u96c6\u7fa4\u6b63\u5e38\u7684 Deployment \u603b\u6570 \u4e2a cluster_deployment_abnormal_count \u96c6\u7fa4\u5f02\u5e38\u7684 Deployment \u603b\u6570 \u4e2a cluster_statefulset_count \u96c6\u7fa4 StatefulSet \u4e2a\u6570 \u4e2a cluster_statefulset_normal_count \u96c6\u7fa4\u6b63\u5e38\u8fd0\u884c StatefulSet \u4e2a\u6570 \u4e2a cluster_statefulset_abnormal_count \u96c6\u7fa4\u5f02\u5e38\u8fd0\u884c StatefulSet \u4e2a\u6570 \u4e2a cluster_daemonset_count \u96c6\u7fa4 DaemonSet \u4e2a\u6570 \u4e2a cluster_daemonset_normal_count \u96c6\u7fa4\u6b63\u5e38\u8fd0\u884c DaemonSet \u4e2a\u6570 \u4e2a cluster_daemonset_abnormal_count \u96c6\u7fa4\u5f02\u5e38\u8fd0\u884c DaemonSet \u4e2a\u6570 \u4e2a cluster_job_count \u96c6\u7fa4 Job \u603b\u6570 \u4e2a cluster_job_normal_count \u96c6\u7fa4\u6b63\u5e38\u8fd0\u884c Job \u4e2a\u6570 \u4e2a cluster_job_abnormal_count \u96c6\u7fa4\u5f02\u5e38\u8fd0\u884c Job \u4e2a\u6570 \u4e2a

                                                  Tip

                                                  \u4f7f\u7528\u7387\u4e00\u822c\u662f\uff080,1] \u533a\u95f4\u7684\u6570\u5b57\uff08\u4f8b\u5982\uff1a0.21\uff0c\u800c\u4e0d\u662f 21%\uff09

                                                  "},{"location":"admin/insight/reference/used-metric-in-insight.html#node","title":"\u8282\u70b9\uff08Node\uff09","text":"\u6307\u6807\u540d \u4e2d\u6587\u63cf\u8ff0 \u5355\u4f4d node_cpu_utilization \u8282\u70b9 CPU \u4f7f\u7528\u7387 node_cpu_total \u8282\u70b9 CPU \u603b\u91cf Core node_cpu_usage \u8282\u70b9 CPU \u7528\u91cf Core node_cpu_requests_commitment \u8282\u70b9 CPU \u5206\u914d\u7387 node_memory_utilization \u8282\u70b9\u5185\u5b58\u4f7f\u7528\u7387 node_memory_usage \u8282\u70b9\u5185\u5b58\u4f7f\u7528\u91cf Byte node_memory_requests_commitment \u8282\u70b9\u5185\u5b58\u5206\u914d\u7387 node_memory_available \u8282\u70b9\u53ef\u7528\u5185\u5b58 Byte node_memory_total \u8282\u70b9\u5185\u5b58\u53ef\u7528\u91cf Byte node_net_utilization \u8282\u70b9\u7f51\u7edc\u6570\u636e\u4f20\u8f93\u901f\u7387 Byte/s node_net_bytes_transmitted \u8282\u70b9\u7f51\u7edc\u6570\u636e\u53d1\u9001 (\u4e0a\u884c) \u901f\u7387 Byte/s node_net_bytes_received \u8282\u70b9\u7f51\u7edc\u6570\u636e\u63a5\u53d7 (\u4e0b\u884c) \u901f\u7387 Byte/s node_disk_read_iops \u8282\u70b9\u78c1\u76d8\u6bcf\u79d2\u8bfb\u6b21\u6570 \u6b21/s node_disk_write_iops \u8282\u70b9\u78c1\u76d8\u6bcf\u79d2\u5199\u6b21\u6570 \u6b21/s node_disk_read_throughput \u8282\u70b9\u78c1\u76d8\u6bcf\u79d2\u8bfb\u53d6\u6570\u636e\u91cf Byte/s node_disk_write_throughput \u8282\u70b9\u78c1\u76d8\u6bcf\u79d2\u5199\u5165\u6570\u636e\u91cf Byte/s node_disk_size_capacity \u8282\u70b9\u78c1\u76d8\u603b\u5bb9\u91cf Byte node_disk_size_available \u8282\u70b9\u78c1\u76d8\u53ef\u7528\u5927\u5c0f Byte node_disk_size_usage \u8282\u70b9\u78c1\u76d8\u4f7f\u7528\u91cf Byte node_disk_size_utilization \u8282\u70b9\u78c1\u76d8\u4f7f\u7528\u7387"},{"location":"admin/insight/reference/used-metric-in-insight.html#workload","title":"\u5de5\u4f5c\u8d1f\u8f7d\uff08Workload\uff09","text":"

                                                  \u76ee\u524d\u652f\u6301\u7684\u5de5\u4f5c\u8d1f\u8f7d\u7c7b\u578b\u5305\u62ec\uff1aDeployment\u3001StatefulSet\u3001DaemonSet\u3001Job \u548c CronJob\u3002

                                                  \u6307\u6807\u540d \u4e2d\u6587\u63cf\u8ff0 \u5355\u4f4d workload_cpu_usage \u5de5\u4f5c\u8d1f\u8f7d CPU \u7528\u91cf Core workload_cpu_limits \u5de5\u4f5c\u8d1f\u8f7d CPU \u9650\u5236\u91cf Core workload_cpu_requests \u5de5\u4f5c\u8d1f\u8f7d CPU \u8bf7\u6c42\u91cf Core workload_cpu_utilization \u5de5\u4f5c\u8d1f\u8f7d CPU \u4f7f\u7528\u7387 workload_memory_usage \u5de5\u4f5c\u8d1f\u8f7d\u5185\u5b58\u4f7f\u7528\u91cf Byte workload_memory_limits \u5de5\u4f5c\u8d1f\u8f7d \u5185\u5b58 \u9650\u5236\u91cf Byte workload_memory_requests \u5de5\u4f5c\u8d1f\u8f7d \u5185\u5b58 \u8bf7\u6c42\u91cf Byte workload_memory_utilization \u5de5\u4f5c\u8d1f\u8f7d\u5185\u5b58\u4f7f\u7528\u7387 workload_memory_usage_cached \u5de5\u4f5c\u8d1f\u8f7d\u5185\u5b58\u4f7f\u7528\u91cf\uff08\u5305\u542b\u7f13\u5b58\uff09 Byte workload_net_bytes_transmitted \u5de5\u4f5c\u8d1f\u8f7d\u7f51\u7edc\u6570\u636e\u53d1\u9001\u901f\u7387 Byte/s workload_net_bytes_received \u5de5\u4f5c\u8d1f\u8f7d\u7f51\u7edc\u6570\u636e\u63a5\u53d7\u901f\u7387 Byte/s workload_disk_read_throughput \u5de5\u4f5c\u8d1f\u8f7d\u78c1\u76d8\u6bcf\u79d2\u8bfb\u53d6\u6570\u636e\u91cf Byte/s workload_disk_write_throughput \u5de5\u4f5c\u8d1f\u8f7d\u78c1\u76d8\u6bcf\u79d2\u5199\u5165\u6570\u636e\u91cf Byte/s
                                                  1. \u6b64\u5904\u8ba1\u7b97 workload \u603b\u91cf
                                                  2. \u901a\u8fc7 workload_cpu_usage{workload_type=\"deployment\", workload=\"prometheus\"} \u7684\u65b9\u5f0f\u83b7\u53d6\u6307\u6807
                                                  3. workload_pod_utilization \u8ba1\u7b97\u89c4\u5219\uff1a workload_pod_usage / workload_pod_request
                                                  "},{"location":"admin/insight/reference/used-metric-in-insight.html#pod","title":"\u5bb9\u5668\u7ec4\uff08Pod\uff09","text":"\u6307\u6807\u540d \u4e2d\u6587\u63cf\u8ff0 \u5355\u4f4d pod_cpu_usage \u5bb9\u5668\u7ec4 CPU \u7528\u91cf Core pod_cpu_limits \u5bb9\u5668\u7ec4 CPU \u9650\u5236\u91cf Core pod_cpu_requests \u5bb9\u5668\u7ec4 CPU \u8bf7\u6c42\u91cf Core pod_cpu_utilization \u5bb9\u5668\u7ec4 CPU \u4f7f\u7528\u7387 pod_memory_usage \u5bb9\u5668\u7ec4\u5185\u5b58\u4f7f\u7528\u91cf Byte pod_memory_limits \u5bb9\u5668\u7ec4\u5185\u5b58\u9650\u5236\u91cf Byte pod_memory_requests \u5bb9\u5668\u7ec4\u5185\u5b58\u8bf7\u6c42\u91cf Byte pod_memory_utilization \u5bb9\u5668\u7ec4\u5185\u5b58\u4f7f\u7528\u7387 pod_memory_usage_cached \u5bb9\u5668\u7ec4\u5185\u5b58\u4f7f\u7528\u91cf\uff08\u5305\u542b\u7f13\u5b58\uff09 Byte pod_net_bytes_transmitted \u5bb9\u5668\u7ec4\u7f51\u7edc\u6570\u636e\u53d1\u9001\u901f\u7387 Byte/s pod_net_bytes_received \u5bb9\u5668\u7ec4\u7f51\u7edc\u6570\u636e\u63a5\u53d7\u901f\u7387 Byte/s pod_disk_read_throughput \u5bb9\u5668\u7ec4\u78c1\u76d8\u6bcf\u79d2\u8bfb\u53d6\u6570\u636e\u91cf Byte/s pod_disk_write_throughput \u5bb9\u5668\u7ec4\u78c1\u76d8\u6bcf\u79d2\u5199\u5165\u6570\u636e\u91cf Byte/s

                                                  \u901a\u8fc7 pod_cpu_usage{workload_type=\"deployment\", workload=\"prometheus\"} \u83b7\u53d6\u540d\u4e3a prometheus \u7684 Deployment \u6240\u62e5\u6709\u7684\u6240\u6709 Pod \u7684 CPU \u4f7f\u7528\u7387\u3002

                                                  "},{"location":"admin/insight/reference/used-metric-in-insight.html#span","title":"Span \u6307\u6807","text":"\u6307\u6807\u540d \u4e2d\u6587\u63cf\u8ff0 \u5355\u4f4d calls_total \u670d\u52a1\u8bf7\u6c42\u603b\u6570 duration_milliseconds_bucket \u670d\u52a1\u5ef6\u65f6\u76f4\u65b9\u56fe duration_milliseconds_sum \u670d\u52a1\u603b\u5ef6\u65f6 ms duration_milliseconds_count \u670d\u52a1\u5ef6\u65f6\u8bb0\u5f55\u6761\u6570 otelcol_processor_groupbytrace_spans_released \u91c7\u96c6\u5230\u7684 span \u6570 otelcol_processor_groupbytrace_traces_released \u91c7\u96c6\u5230\u7684 trace \u6570 traces_service_graph_request_total \u670d\u52a1\u8bf7\u6c42\u603b\u6570 (\u62d3\u6251\u529f\u80fd\u4f7f\u7528) traces_service_graph_request_server_seconds_sum \u670d\u52a1\u603b\u5ef6\u65f6 (\u62d3\u6251\u529f\u80fd\u4f7f\u7528) ms traces_service_graph_request_server_seconds_bucket \u670d\u52a1\u5ef6\u65f6\u76f4\u65b9\u56fe (\u62d3\u6251\u529f\u80fd\u4f7f\u7528) traces_service_graph_request_server_seconds_count \u670d\u52a1\u8bf7\u6c42\u603b\u6570 (\u62d3\u6251\u529f\u80fd\u4f7f\u7528)"},{"location":"admin/insight/system-config/modify-config.html","title":"\u4fee\u6539\u7cfb\u7edf\u914d\u7f6e","text":"

                                                  \u53ef\u89c2\u6d4b\u6027\u4f1a\u9ed8\u8ba4\u6301\u4e45\u5316\u4fdd\u5b58\u6307\u6807\u3001\u65e5\u5fd7\u3001\u94fe\u8def\u7684\u6570\u636e\uff0c\u60a8\u53ef\u53c2\u9605\u672c\u6587\u4fee\u6539\u7cfb\u7edf\u914d\u7f6e\u3002\u8be5\u6587\u6863\u4ec5\u9002\u7528\u4e8e\u5185\u7f6e\u90e8\u7f72\u7684 Elasticsearch\uff0c\u82e5\u4f7f\u7528\u5916\u90e8 Elasticsearch \u53ef\u81ea\u884c\u8c03\u6574\u3002

                                                  "},{"location":"admin/insight/system-config/modify-config.html#_2","title":"\u5982\u4f55\u4fee\u6539\u6307\u6807\u6570\u636e\u4fdd\u7559\u671f\u9650","text":"

                                                  \u5148 ssh \u767b\u5f55\u5230\u5bf9\u5e94\u7684\u8282\u70b9\uff0c\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539\u6307\u6807\u6570\u636e\u4fdd\u7559\u671f\u9650\u3002

                                                  1. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                    kubectl edit vmcluster insight-victoria-metrics-k8s-stack -n insight-system\n
                                                  2. \u5728 Yaml \u6587\u4ef6\u4e2d\uff0c retentionPeriod \u7684\u9ed8\u8ba4\u503c\u4e3a 14 \uff0c\u5355\u4f4d\u4e3a \u5929 \u3002\u60a8\u53ef\u6839\u636e\u9700\u6c42\u4fee\u6539\u53c2\u6570\u3002

                                                    apiVersion: operator.victoriametrics.com/v1beta1\nkind: VMCluster\nmetadata:\n  annotations:\n    meta.helm.sh/release-name: insight\n    meta.helm.sh/release-namespace: insight-system\n  creationTimestamp: \"2022-08-25T04:31:02Z\"\n  finalizers:\n  - apps.victoriametrics.com/finalizer\n  generation: 2\n  labels:\n    app.kubernetes.io/instance: insight\n    app.kubernetes.io/managed-by: Helm\n    app.kubernetes.io/name: victoria-metrics-k8s-stack\n    app.kubernetes.io/version: 1.77.2\n    helm.sh/chart: victoria-metrics-k8s-stack-0.9.3\n  name: insight-victoria-metrics-k8s-stack\n  namespace: insight-system\n  resourceVersion: \"123007381\"\n  uid: 55cee8d6-c651-404b-b2c9-50603b405b54\nspec:\n  replicationFactor: 1\n  retentionPeriod: \"14\"\n  vminsert:\n    extraArgs:\n      maxLabelsPerTimeseries: \"45\"\n    image:\n      repository: docker.m.daocloud.io/victoriametrics/vminsert\n      tag: v1.80.0-cluster\n      replicaCount: 1\n
                                                  3. \u4fdd\u5b58\u4fee\u6539\u540e\uff0c\u8d1f\u8d23\u5b58\u50a8\u6307\u6807\u7684\u7ec4\u4ef6\u7684\u5bb9\u5668\u7ec4\u4f1a\u81ea\u52a8\u91cd\u542f\uff0c\u7a0d\u7b49\u7247\u523b\u5373\u53ef\u3002

                                                  "},{"location":"admin/insight/system-config/modify-config.html#_3","title":"\u5982\u4f55\u4fee\u6539\u65e5\u5fd7\u6570\u636e\u5b58\u50a8\u65f6\u957f","text":"

                                                  \u5148 ssh \u767b\u5f55\u5230\u5bf9\u5e94\u7684\u8282\u70b9\uff0c\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539\u65e5\u5fd7\u6570\u636e\u4fdd\u7559\u671f\u9650\uff1a

                                                  "},{"location":"admin/insight/system-config/modify-config.html#json","title":"\u65b9\u6cd5\u4e00\uff1a\u4fee\u6539 Json \u6587\u4ef6","text":"
                                                  1. \u4fee\u6539\u4ee5\u4e0b\u6587\u4ef6\u4e2d rollover \u5b57\u6bb5\u4e2d\u7684 max_age \u53c2\u6570\uff0c\u5e76\u8bbe\u7f6e\u4fdd\u7559\u671f\u9650\uff0c\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 7d \u3002\u6ce8\u610f\u9700\u8981\u4fee\u6539\u7b2c\u4e00\u884c\u4e2d\u7684 Elastic \u7528\u6237\u540d\u548c\u5bc6\u7801\u3001IP \u5730\u5740\u548c\u7d22\u5f15\u3002

                                                    curl  --insecure --location -u\"elastic:amyVt4o826e322TUVi13Ezw6\" -X PUT \"https://172.30.47.112:30468/_ilm/policy/insight-es-k8s-logs-policy?pretty\" -H 'Content-Type: application/json' -d'\n{\n    \"policy\": {\n        \"phases\": {\n            \"hot\": {\n                \"min_age\": \"0ms\",\n                \"actions\": {\n                    \"set_priority\": {\n                        \"priority\": 100\n                    },\n                    \"rollover\": {\n                        \"max_age\": \"8d\",\n                        \"max_size\": \"10gb\"\n                    }\n                }\n            },\n            \"warm\": {\n                \"min_age\": \"10d\",\n                \"actions\": {\n                    \"forcemerge\": {\n                        \"max_num_segments\": 1\n                    }\n                }\n            },\n            \"delete\": {\n                \"min_age\": \"30d\",\n                \"actions\": {\n                    \"delete\": {}\n                }\n            }\n        }\n    }\n}'\n
                                                  2. \u4fee\u6539\u5b8c\u540e\uff0c\u6267\u884c\u4ee5\u4e0a\u547d\u4ee4\u3002\u5b83\u4f1a\u6253\u5370\u51fa\u5982\u4e0b\u6240\u793a\u5185\u5bb9\uff0c\u5219\u4fee\u6539\u6210\u529f\u3002

                                                    {\n\"acknowledged\" : true\n}\n
                                                  "},{"location":"admin/insight/system-config/modify-config.html#ui","title":"\u65b9\u6cd5\u4e8c\uff1a\u4ece UI \u4fee\u6539","text":"
                                                  1. \u767b\u5f55 kibana \uff0c\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f Stack Management \u3002

                                                  2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a Index Lifecycle Polices \uff0c\u5e76\u627e\u5230\u7d22\u5f15 insight-es-k8s-logs-policy \uff0c\u70b9\u51fb\u8fdb\u5165\u8be6\u60c5\u3002

                                                  3. \u5c55\u5f00 Hot phase \u914d\u7f6e\u9762\u677f\uff0c\u4fee\u6539 Maximum age \u53c2\u6570\uff0c\u5e76\u8bbe\u7f6e\u4fdd\u7559\u671f\u9650\uff0c\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 7d \u3002

                                                  4. \u4fee\u6539\u5b8c\u540e\uff0c\u70b9\u51fb\u9875\u9762\u5e95\u90e8\u7684 Save policy \u5373\u4fee\u6539\u6210\u529f\u3002

                                                  "},{"location":"admin/insight/system-config/modify-config.html#_4","title":"\u5982\u4f55\u4fee\u6539\u94fe\u8def\u6570\u636e\u5b58\u50a8\u65f6\u957f","text":"

                                                  \u5148 ssh \u767b\u5f55\u5230\u5bf9\u5e94\u7684\u8282\u70b9\uff0c\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539\u94fe\u8def\u6570\u636e\u4fdd\u7559\u671f\u9650\uff1a

                                                  "},{"location":"admin/insight/system-config/modify-config.html#json_1","title":"\u65b9\u6cd5\u4e00\uff1a\u4fee\u6539 Json \u6587\u4ef6","text":"
                                                  1. \u4fee\u6539\u4ee5\u4e0b\u6587\u4ef6\u4e2d rollover \u5b57\u6bb5\u4e2d\u7684 max_age \u53c2\u6570\uff0c\u5e76\u8bbe\u7f6e\u4fdd\u7559\u671f\u9650\uff0c\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 7d \u3002\u6ce8\u610f\u9700\u8981\u4fee\u6539\u7b2c\u4e00\u884c\u4e2d\u7684 Elastic \u7528\u6237\u540d\u548c\u5bc6\u7801\u3001IP \u5730\u5740\u548c\u7d22\u5f15\u3002

                                                    curl --insecure --location -u\"elastic:amyVt4o826e322TUVi13Ezw6\" -X PUT \"https://172.30.47.112:30468/_ilm/policy/jaeger-ilm-policy?pretty\" -H 'Content-Type: application/json' -d'\n{\n    \"policy\": {\n        \"phases\": {\n            \"hot\": {\n                \"min_age\": \"0ms\",\n                \"actions\": {\n                    \"set_priority\": {\n                        \"priority\": 100\n                    },\n                    \"rollover\": {\n                        \"max_age\": \"6d\",\n                        \"max_size\": \"10gb\"\n                    }\n                }\n            },\n            \"warm\": {\n                \"min_age\": \"10d\",\n                \"actions\": {\n                    \"forcemerge\": {\n                        \"max_num_segments\": 1\n                    }\n                }\n            },\n            \"delete\": {\n                \"min_age\": \"30d\",\n                \"actions\": {\n                    \"delete\": {}\n                }\n            }\n        }\n    }\n}'\n
                                                  2. \u4fee\u6539\u5b8c\u540e\uff0c\u5728\u63a7\u5236\u53f0\u6267\u884c\u4ee5\u4e0a\u547d\u4ee4\u3002\u5b83\u4f1a\u6253\u5370\u51fa\u5982\u4e0b\u6240\u793a\u5185\u5bb9\uff0c\u5219\u4fee\u6539\u6210\u529f\u3002

                                                    {\n\"acknowledged\" : true\n}\n
                                                  "},{"location":"admin/insight/system-config/modify-config.html#ui_1","title":"\u65b9\u6cd5\u4e8c\uff1a\u4ece UI \u4fee\u6539","text":"
                                                  1. \u767b\u5f55 kibana \uff0c\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f Stack Management \u3002

                                                  2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a Index Lifecycle Polices \uff0c\u5e76\u627e\u5230\u7d22\u5f15 jaeger-ilm-policy \uff0c\u70b9\u51fb\u8fdb\u5165\u8be6\u60c5\u3002

                                                  3. \u5c55\u5f00 Hot phase \u914d\u7f6e\u9762\u677f\uff0c\u4fee\u6539 Maximum age \u53c2\u6570\uff0c\u5e76\u8bbe\u7f6e\u4fdd\u7559\u671f\u9650\uff0c\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 7d \u3002

                                                  4. \u4fee\u6539\u5b8c\u540e\uff0c\u70b9\u51fb\u9875\u9762\u5e95\u90e8\u7684 Save policy \u5373\u4fee\u6539\u6210\u529f\u3002

                                                  "},{"location":"admin/insight/system-config/system-component.html","title":"\u7cfb\u7edf\u7ec4\u4ef6","text":"

                                                  \u5728\u7cfb\u7edf\u7ec4\u4ef6\u9875\u9762\u53ef\u5feb\u901f\u7684\u67e5\u770b\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u4e2d\u7cfb\u7edf\u7ec4\u4ef6\u7684\u8fd0\u884c\u72b6\u6001\uff0c\u5f53\u7cfb\u7528\u7ec4\u4ef6\u53d1\u751f\u6545\u969c\u65f6\uff0c\u4f1a\u5bfc\u81f4\u53ef\u89c2\u6d4b\u6a21\u5757\u4e2d\u7684\u90e8\u5206\u529f\u80fd\u4e0d\u53ef\u7528\u3002

                                                  1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\uff0c
                                                  2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u7cfb\u7edf\u7ba1\u7406 -> \u7cfb\u7edf\u7ec4\u4ef6 \u3002

                                                  "},{"location":"admin/insight/system-config/system-component.html#_2","title":"\u7ec4\u4ef6\u8bf4\u660e","text":"\u6a21\u5757 \u7ec4\u4ef6\u540d\u79f0 \u8bf4\u660e \u6307\u6807 vminsert-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u5c06\u5404\u96c6\u7fa4\u4e2d Prometheus \u91c7\u96c6\u5230\u7684\u6307\u6807\u6570\u636e\u5199\u5165\u5b58\u50a8\u7ec4\u4ef6\u3002\u8be5\u7ec4\u4ef6\u5f02\u5e38\u4f1a\u5bfc\u81f4\u65e0\u6cd5\u5199\u5165\u5de5\u4f5c\u96c6\u7fa4\u7684\u6307\u6807\u6570\u636e\u3002 \u6307\u6807 vmalert-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u751f\u6548 VM Rule \u4e2d\u914d\u7f6e\u7684 recording \u548c Alert \u89c4\u5219\uff0c\u5e76\u5c06\u89e6\u53d1\u7684\u544a\u8b66\u89c4\u5219\u53d1\u9001\u7ed9 alertmanager\u3002 \u6307\u6807 vmalertmanager-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u5728\u544a\u8b66\u89e6\u65f6\u53d1\u9001\u6d88\u606f\u3002\u8be5\u7ec4\u4ef6\u5f02\u5e38\u4f1a\u5bfc\u81f4\u65e0\u6cd5\u53d1\u9001\u544a\u8b66\u4fe1\u606f\u3002 \u6307\u6807 vmselect-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u67e5\u8be2\u6307\u6807\u6570\u636e\u3002\u8be5\u7ec4\u4ef6\u5f02\u5e38\u4f1a\u5bfc\u81f4\u65e0\u6cd5\u67e5\u8be2\u6307\u6807\u3002 \u6307\u6807 vmstorage-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u5b58\u50a8\u591a\u96c6\u7fa4\u7684\u6307\u6807\u6570\u636e\u3002 \u4eea\u8868\u76d8 grafana-deployment \u63d0\u4f9b\u76d1\u63a7\u9762\u677f\u80fd\u529b\u3002\u8be5\u7ec4\u4ef6\u5f02\u5e38\u4f1a\u5bfc\u81f4\u65e0\u6cd5\u67e5\u770b\u5185\u7f6e\u7684\u4eea\u8868\u76d8\u3002 \u94fe\u8def insight-jaeger-collector \u8d1f\u8d23\u63a5\u6536\u00a0opentelemetry-collector\u00a0\u4e2d\u94fe\u8def\u6570\u636e\u5e76\u5c06\u5176\u8fdb\u884c\u5b58\u50a8\u3002 \u94fe\u8def insight-jaeger-query \u8d1f\u8d23\u67e5\u8be2\u5404\u96c6\u7fa4\u4e2d\u91c7\u96c6\u5230\u7684\u94fe\u8def\u6570\u636e\u3002 \u94fe\u8def insight-opentelemetry-collector \u8d1f\u8d23\u63a5\u6536\u5404\u5b50\u96c6\u7fa4\u8f6c\u53d1\u7684\u94fe\u8def\u6570\u636e \u65e5\u5fd7 elasticsearch \u8d1f\u8d23\u5b58\u50a8\u5404\u96c6\u7fa4\u7684\u65e5\u5fd7\u6570\u636e\u3002

                                                  Note

                                                  \u82e5\u4f7f\u7528\u5916\u90e8 Elasticsearch \u53ef\u80fd\u65e0\u6cd5\u83b7\u53d6\u90e8\u5206\u6570\u636e\u4ee5\u81f4\u4e8e Elasticsearch \u7684\u4fe1\u606f\u4e3a\u7a7a\u3002

                                                  "},{"location":"admin/insight/system-config/system-config.html","title":"\u7cfb\u7edf\u914d\u7f6e","text":"

                                                  \u7cfb\u7edf\u914d\u7f6e \u5c55\u793a\u6307\u6807\u3001\u65e5\u5fd7\u3001\u94fe\u8def\u9ed8\u8ba4\u7684\u4fdd\u5b58\u65f6\u957f\u4ee5\u53ca\u9ed8\u8ba4\u7684 Apdex \u9608\u503c\u3002

                                                  1. \u70b9\u51fb\u53f3\u4fa7\u5bfc\u822a\u680f\uff0c\u9009\u62e9 \u7cfb\u7edf\u914d\u7f6e\u3002

                                                  2. \u4fee\u6539\u5386\u53f2\u544a\u8b66\u5b58\u50a8\u65f6\u957f\uff0c\u70b9\u51fb \u7f16\u8f91 \u8f93\u5165\u76ee\u6807\u65f6\u957f\u3002

                                                    \u5f53\u5b58\u50a8\u65f6\u957f\u8bbe\u7f6e\u4e3a \"0\" \u5c06\u4e0d\u6e05\u9664\u5386\u53f2\u544a\u8b66\u3002

                                                  3. \u4fee\u6539\u62d3\u6251\u56fe\u6e32\u67d3\u9ed8\u8ba4\u914d\u7f6e\uff0c\u70b9\u51fb \u7f16\u8f91 \u6839\u636e\u9700\u6c42\u5b9a\u4e49\u7cfb\u7edf\u4e2d\u62d3\u6251\u56fe\u9608\u503c\u3002

                                                    \u9608\u503c\u8bbe\u7f6e\u5fc5\u987b\u5927\u4e8e 0\uff0c\u524d\u9762\u586b\u5199\u7684\u9608\u503c\u5fc5\u987b\u5c0f\u4e8e\u540e\u9762\u586b\u5199\u7684\u3002\u4e14\u586b\u5199\u7684\u9608\u503c\u5fc5\u987b\u5728\u6700\u5927\u548c\u6700\u5c0f\u7684\u8303\u56f4\u4e4b\u95f4\u3002

                                                  Note

                                                  \u4fee\u6539\u5176\u4ed6\u914d\u7f6e\uff0c\u8bf7\u70b9\u51fb\u67e5\u770b\u5982\u4f55\u4fee\u6539\u7cfb\u7edf\u914d\u7f6e\uff1f

                                                  "},{"location":"admin/insight/trace/service.html","title":"\u670d\u52a1\u76d1\u63a7","text":"

                                                  \u5728 \u53ef\u89c2\u6d4b\u6027 Insight \u4e2d\u670d\u52a1\u662f\u6307\u4f7f\u7528 Opentelemtry SDK \u63a5\u5165\u94fe\u8def\u6570\u636e\uff0c\u670d\u52a1\u76d1\u63a7\u80fd\u591f\u8f85\u52a9\u8fd0\u7ef4\u8fc7\u7a0b\u4e2d\u89c2\u5bdf\u5e94\u7528\u7a0b\u5e8f\u7684\u6027\u80fd\u548c\u72b6\u6001\u3002

                                                  \u5982\u4f55\u4f7f\u7528 OpenTelemetry \u8bf7\u53c2\u8003\u4f7f\u7528 OTel \u8d4b\u4e88\u5e94\u7528\u53ef\u89c2\u6d4b\u6027\u3002

                                                  "},{"location":"admin/insight/trace/service.html#_2","title":"\u540d\u8bcd\u89e3\u91ca","text":"
                                                  • \u670d\u52a1 \uff1a\u670d\u52a1\u8868\u793a\u4e3a\u4f20\u5165\u8bf7\u6c42\u63d0\u4f9b\u76f8\u540c\u884c\u4e3a\u7684\u4e00\u7ec4\u5de5\u4f5c\u8d1f\u8f7d\u3002\u60a8\u53ef\u4ee5\u5728\u4f7f\u7528 OpenTelemetry SDK \u65f6\u5b9a\u4e49\u670d\u52a1\u540d\u79f0\u6216\u4f7f\u7528 Istio \u4e2d\u5b9a\u4e49\u7684\u540d\u79f0\u3002
                                                  • \u64cd\u4f5c \uff1a\u64cd\u4f5c\u662f\u6307\u4e00\u4e2a\u670d\u52a1\u5904\u7406\u7684\u7279\u5b9a\u8bf7\u6c42\u6216\u64cd\u4f5c\uff0c\u6bcf\u4e2a Span \u90fd\u6709\u4e00\u4e2a\u64cd\u4f5c\u540d\u79f0\u3002
                                                  • \u51fa\u53e3\u6d41\u91cf \uff1a\u51fa\u53e3\u6d41\u91cf\u662f\u6307\u5f53\u524d\u670d\u52a1\u53d1\u8d77\u8bf7\u6c42\u7684\u6240\u6709\u6d41\u91cf\u3002
                                                  • \u5165\u53e3\u6d41\u91cf \uff1a\u5165\u53e3\u6d41\u91cf\u662f\u6307\u4e0a\u6e38\u670d\u52a1\u5bf9\u5f53\u524d\u670d\u52a1\u53d1\u8d77\u8bf7\u6c42\u7684\u6240\u6709\u6d41\u91cf\u3002
                                                  "},{"location":"admin/insight/trace/service.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                  \u670d\u52a1\u5217\u8868\u9875\u9762\u5c55\u793a\u4e86\u96c6\u7fa4\u4e2d\u6240\u6709\u5df2\u63a5\u5165\u94fe\u8def\u6570\u636e\u7684\u670d\u52a1\u7684\u541e\u5410\u7387\u3001\u9519\u8bef\u7387\u3001\u8bf7\u6c42\u5ef6\u65f6\u7b49\u5173\u952e\u6307\u6807\u3002 \u60a8\u53ef\u4ee5\u6839\u636e\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u5bf9\u670d\u52a1\u8fdb\u884c\u8fc7\u6ee4\uff0c\u4e5f\u53ef\u4ee5\u6309\u7167\u541e\u5410\u7387\u3001\u9519\u8bef\u7387\u3001\u8bf7\u6c42\u5ef6\u65f6\u5bf9\u8be5\u5217\u8868\u8fdb\u884c\u6392\u5e8f\u3002\u5217\u8868\u4e2d\u7684\u6307\u6807\u6570\u636e\u9ed8\u8ba4\u65f6\u95f4\u4e3a 1 \u5c0f\u65f6\uff0c\u60a8\u53ef\u4ee5\u81ea\u5b9a\u4e49\u65f6\u95f4\u8303\u56f4\u3002

                                                  \u8bf7\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u67e5\u770b\u670d\u52a1\u76d1\u63a7\u6307\u6807\uff1a

                                                  1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

                                                  2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u94fe\u8def\u8ffd\u8e2a -> \u670d\u52a1 \u3002

                                                    Attention

                                                    1. \u82e5\u5217\u8868\u4e2d\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u4e3a unknown \u65f6\uff0c\u5219\u8868\u793a\u8be5\u670d\u52a1\u672a\u89c4\u8303\u63a5\u5165\uff0c\u5efa\u8bae\u91cd\u65b0\u63a5\u5165\u3002
                                                    2. \u82e5\u63a5\u5165\u7684\u670d\u52a1\u5b58\u5728\u540c\u540d\u4e14\u5747\u672a\u6b63\u786e\u586b\u5199\u73af\u5883\u53d8\u91cf\u4e2d\u7684 \u547d\u540d\u7a7a\u95f4 \u65f6\uff0c\u5217\u8868\u53ca\u670d\u52a1\u8be6\u60c5\u9875\u4e2d\u5c55\u793a\u7684\u76d1\u63a7\u6570\u636e\u4e3a\u591a\u4e2a\u670d\u52a1\u7684\u6c47\u603b\u6570\u636e\u3002
                                                  3. \u70b9\u51fb\u670d\u52a1\u540d (\u4ee5 insight-server \u4e3a\u4f8b)\uff0c\u70b9\u51fb\u8fdb\u5165\u670d\u52a1\u8be6\u60c5\u9875\uff0c\u67e5\u770b\u670d\u52a1\u7684\u8be6\u7ec6\u6307\u6807\u548c\u8be5\u670d\u52a1\u7684\u64cd\u4f5c\u6307\u6807\u3002

                                                    1. \u5728\u670d\u52a1\u62d3\u6251\u6a21\u5757\u4e2d\uff0c\u60a8\u53ef\u4ee5\u67e5\u770b\u5f53\u524d\u6240\u9009\u670d\u52a1\u7684\u4e0a\u4e0b\u5404\u4e00\u5c42\u7684\u670d\u52a1\u62d3\u6251\uff0c\u9f20\u6807\u60ac\u6d6e\u5728\u8282\u70b9\u4e0a\u65f6\u53ef\u4ee5\u67e5\u770b\u8282\u70b9\u7684\u4fe1\u606f\u3002
                                                    2. \u5728\u6d41\u91cf\u6307\u6807\u6a21\u5757\uff0c\u60a8\u53ef\u67e5\u770b\u5230\u8be5\u670d\u52a1\u9ed8\u8ba4\u4e00\u5c0f\u65f6\u5185\u5168\u90e8\u8bf7\u6c42\uff08\u5305\u542b\u5165\u53e3\u6d41\u91cf\u548c\u51fa\u53e3\u6d41\u91cf\uff09\u7684\u76d1\u63a7\u6307\u6807\u3002
                                                    3. \u652f\u6301\u901a\u8fc7\u53f3\u4e0a\u89d2\u7684\u65f6\u95f4\u9009\u62e9\u5668\u5feb\u901f\u9009\u62e9\u65f6\u95f4\u8303\u56f4\uff0c\u6216\u81ea\u5b9a\u4e49\u65f6\u95f4\u8303\u56f4\u3002
                                                    4. \u5728 \u5173\u8054\u5bb9\u5668 \u6a21\u5757\u70b9\u51fb\u5bb9\u5668\u7ec4\u540d\u79f0\uff0c\u53ef\u8df3\u8f6c\u81f3\u5bb9\u5668\u7ec4\u8be6\u60c5\u9875\u3002

                                                  4. \u70b9\u51fb Tab \u5207\u6362\u5230 \u64cd\u4f5c\u6307\u6807 \uff0c\u53ef\u67e5\u8be2\u591a\u9009\u670d\u52a1\u76f8\u540c\u64cd\u4f5c\u7684\u805a\u5408\u8d77\u6765\u7684\u6d41\u91cf\u6307\u6807\u3002

                                                    1. \u652f\u6301\u5bf9\u64cd\u4f5c\u6307\u6807\u4e2d\u7684\u541e\u5410\u7387\u3001\u9519\u8bef\u7387\u3001\u8bf7\u6c42\u5ef6\u65f6\u7b49\u6307\u6807\u8fdb\u884c\u6392\u5e8f\u3002
                                                    2. \u70b9\u51fb\u5355\u4e2a\u64cd\u4f5c\u540e\u7684\u56fe\u6807\uff0c\u53ef\u8df3\u8f6c\u81f3 \u8c03\u7528\u94fe \u5feb\u901f\u67e5\u8be2\u76f8\u5173\u94fe\u8def\u3002

                                                  "},{"location":"admin/insight/trace/service.html#_4","title":"\u670d\u52a1\u6307\u6807\u8bf4\u660e","text":"\u53c2\u6570 \u8bf4\u660e \u541e\u5410\u7387 \u5355\u4f4d\u65f6\u95f4\u5185\u5904\u7406\u8bf7\u6c42\u7684\u6570\u91cf\u3002 \u9519\u8bef\u7387 \u67e5\u8be2\u65f6\u95f4\u8303\u56f4\u5185\u9519\u8bef\u8bf7\u6c42\u4e0e\u8bf7\u6c42\u603b\u6570\u7684\u6bd4\u503c\u3002 P50 \u8bf7\u6c42\u5ef6\u65f6 \u5728\u6240\u6709\u7684\u8bf7\u6c42\u4e2d\uff0c\u6709 50% \u7684\u8bf7\u6c42\u54cd\u5e94\u65f6\u95f4\u5c0f\u4e8e\u6216\u7b49\u4e8e\u8be5\u503c\u3002 P95 \u8bf7\u6c42\u5ef6\u65f6 \u5728\u6240\u6709\u7684\u8bf7\u6c42\u4e2d\uff0c\u6709 95% \u7684\u8bf7\u6c42\u54cd\u5e94\u65f6\u95f4\u5c0f\u4e8e\u6216\u7b49\u4e8e\u8be5\u503c\u3002 P99 \u8bf7\u6c42\u5ef6\u65f6 \u5728\u6240\u6709\u7684\u8bf7\u6c42\u4e2d\uff0c\u6709 95% \u7684\u8bf7\u6c42\u54cd\u5e94\u65f6\u95f4\u5c0f\u4e8e\u6216\u7b49\u4e8e\u8be5\u503c\u3002"},{"location":"admin/insight/trace/topology.html","title":"\u670d\u52a1\u62d3\u6251","text":"

                                                  \u670d\u52a1\u62d3\u6251\u56fe\u662f\u5bf9\u670d\u52a1\u4e4b\u95f4\u8fde\u63a5\u3001\u901a\u4fe1\u548c\u4f9d\u8d56\u5173\u7cfb\u7684\u53ef\u89c6\u5316\u8868\u793a\u3002\u901a\u8fc7\u53ef\u89c6\u5316\u62d3\u6251\u4e86\u89e3\u670d\u52a1\u95f4\u7684\u8c03\u7528\u5173\u7cfb\uff0c \u67e5\u770b\u670d\u52a1\u5728\u6307\u5b9a\u65f6\u95f4\u5185\u7684\u8c03\u7528\u53ca\u5176\u6027\u80fd\u72b6\u51b5\u3002\u62d3\u6251\u56fe\u7684\u8282\u70b9\u4e4b\u95f4\u7684\u8054\u7cfb\u4ee3\u8868\u4e24\u4e2a\u670d\u52a1\u5728\u67e5\u8be2\u65f6\u95f4\u8303\u56f4\u5185\u670d\u52a1\u4e4b\u95f4\u7684\u5b58\u5728\u8c03\u7528\u5173\u7cfb\u3002

                                                  "},{"location":"admin/insight/trace/topology.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                  1. \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002
                                                  2. \u670d\u52a1\u5df2\u901a\u8fc7 Operator \u6216 Opentelemetry SDK \u7684\u65b9\u5f0f\u63a5\u5165\u94fe\u8def\u3002
                                                  "},{"location":"admin/insight/trace/topology.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                  1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u6a21\u5757
                                                  2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u94fe\u8def\u8ffd\u8e2a -> \u670d\u52a1\u62d3\u6251
                                                  3. \u5728\u62d3\u6251\u56fe\u4e2d\uff0c\u60a8\u53ef\u6309\u9700\u6267\u884c\u4ee5\u4e0b\u64cd\u4f5c\uff1a

                                                    • \u5355\u51fb \u8282\u70b9\uff0c\u4ece\u53f3\u4fa7\u5212\u51fa\u670d\u52a1\u7684\u8be6\u60c5\uff0c\u53ef\u67e5\u770b\u670d\u52a1\u7684\u8bf7\u6c42\u5ef6\u65f6\u3001\u541e\u5410\u7387\u3001\u9519\u8bef\u7387\u7684\u6307\u6807\u3002\u70b9\u51fb\u670d\u52a1\u540d\u79f0\u53ef\u8df3\u8f6c\u81f3\u5bf9\u5e94\u670d\u52a1\u7684\u8be6\u60c5\u9875\u3002
                                                    • \u9f20\u6807\u60ac\u6d6e\u5728\u8fde\u7ebf\u4e0a\u65f6\uff0c\u53ef\u67e5\u770b\u4e24\u4e2a\u670d\u52a1\u4e4b\u95f4\u8bf7\u6c42\u7684\u6d41\u91cf\u6307\u6807\u3002
                                                    • \u5728 \u663e\u793a\u8bbe\u7f6e \u6a21\u5757\uff0c\u53ef\u914d\u7f6e\u62d3\u6251\u56fe\u4e2d\u7684\u663e\u793a\u5143\u7d20\u3002

                                                  4. \u70b9\u51fb\u53f3\u4e0b\u89d2 \u56fe\u4f8b \uff0c\u53ef\u901a\u8fc7 \u4e34\u65f6\u914d\u7f6e \u4fee\u6539\u5f53\u524d\u7684\u62d3\u6251\u56fe\u5b9a\u4e49\u7684\u6e32\u67d3\u9608\u503c\uff0c\u8df3\u51fa\u6216\u5173\u95ed\u8be5\u9875\u9762\u5373\u4f1a\u4e22\u5931\u8be5\u914d\u7f6e\u3002

                                                    \u9608\u503c\u8bbe\u7f6e\u5fc5\u987b\u5927\u4e8e 0\uff0c\u524d\u9762\u586b\u5199\u7684\u9608\u503c\u5fc5\u987b\u5c0f\u4e8e\u540e\u9762\u586b\u5199\u7684\u3002\u4e14\u586b\u5199\u7684\u9608\u503c\u5fc5\u987b\u5728\u6700\u5927\u548c\u6700\u5c0f\u7684\u8303\u56f4\u4e4b\u95f4\u3002

                                                  "},{"location":"admin/insight/trace/topology.html#_4","title":"\u5176\u4ed6\u8282\u70b9","text":"

                                                  \u5728\u670d\u52a1\u62d3\u6251\u4e2d\u4f1a\u5b58\u5728\u6e38\u79bb\u5728\u96c6\u7fa4\u4e4b\u5916\u7684\u8282\u70b9\uff0c\u8fd9\u4e9b\u6e38\u79bb\u5728\u5916\u7684\u8282\u70b9\u53ef\u5206\u6210\u4e09\u7c7b\uff1a

                                                  • \u6570\u636e\u5e93
                                                  • \u6d88\u606f\u961f\u5217
                                                  • \u865a\u62df\u8282\u70b9

                                                  • \u82e5\u670d\u52a1\u53d1\u8d77\u8bf7\u6c42\u5230\u6570\u636e\u5e93\u6216\u6d88\u606f\u961f\u5217\u65f6\uff0c\u62d3\u6251\u56fe\u4e2d\u4f1a\u9ed8\u8ba4\u5c55\u793a\u8fd9\u4e24\u7c7b\u8282\u70b9\u3002 \u800c\u865a\u62df\u670d\u52a1\u8868\u793a\u96c6\u7fa4\u5185\u670d\u52a1\u8bf7\u6c42\u4e86\u96c6\u7fa4\u5916\u7684\u8282\u70b9\u6216\u8005\u672a\u63a5\u5165\u94fe\u8def\u7684\u670d\u52a1\uff0c\u62d3\u6251\u56fe\u4e2d\u9ed8\u8ba4\u4e0d\u4f1a\u5c55\u793a \u865a\u62df\u670d\u52a1\u3002

                                                  • \u5f53\u670d\u52a1\u8bf7\u6c42\u5230 MySQL\u3001PostgreSQL\u3001Oracle Database \u8fd9\u4e09\u79cd\u6570\u636e\u5e93\u65f6\uff0c\u5728\u62d3\u6251\u56fe\u4e2d\u53ef\u4ee5\u770b\u5230\u8bf7\u6c42\u7684\u8be6\u7ec6\u6570\u636e\u5e93\u7c7b\u578b\u3002

                                                  "},{"location":"admin/insight/trace/topology.html#_5","title":"\u5f00\u542f\u865a\u62df\u8282\u70b9","text":"
                                                  1. \u66f4\u65b0 insight-server chart \u7684 values\uff0c\u627e\u5230\u4e0b\u56fe\u6240\u793a\u53c2\u6570\uff0c\u5c06 false \u6539\u4e3a true\u3002

                                                  2. \u5728\u670d\u52a1\u62d3\u6251\u7684\u663e\u793a\u8bbe\u7f6e\u4e2d\u52fe\u9009 \u865a\u62df\u670d\u52a1 \u3002

                                                  "},{"location":"admin/insight/trace/trace.html","title":"\u94fe\u8def\u67e5\u8be2","text":"

                                                  \u5728\u94fe\u8def\u67e5\u8be2\u9875\u9762\uff0c\u60a8\u53ef\u4ee5\u8fc7 TraceID \u6216\u7cbe\u786e\u67e5\u8be2\u8c03\u7528\u94fe\u8def\u8be6\u7ec6\u60c5\u51b5\u6216\u7ed3\u5408\u591a\u79cd\u6761\u4ef6\u7b5b\u9009\u67e5\u8be2\u8c03\u7528\u94fe\u8def\u3002

                                                  "},{"location":"admin/insight/trace/trace.html#_2","title":"\u540d\u8bcd\u89e3\u91ca","text":"
                                                  • TraceID\uff1a\u7528\u4e8e\u6807\u8bc6\u4e00\u4e2a\u5b8c\u6574\u7684\u8bf7\u6c42\u8c03\u7528\u94fe\u8def\u3002
                                                  • \u64cd\u4f5c\uff1a\u63cf\u8ff0 Span \u6240\u4ee3\u8868\u7684\u5177\u4f53\u64cd\u4f5c\u6216\u4e8b\u4ef6\u3002
                                                  • \u5165\u53e3 Span\uff1a\u5165\u53e3 Span \u4ee3\u8868\u4e86\u6574\u4e2a\u8bf7\u6c42\u7684\u7b2c\u4e00\u4e2a\u8bf7\u6c42\u3002
                                                  • \u5ef6\u65f6\uff1a\u6574\u4e2a\u8c03\u7528\u94fe\u4ece\u5f00\u59cb\u63a5\u6536\u8bf7\u6c42\u5230\u5b8c\u6210\u54cd\u5e94\u7684\u6301\u7eed\u65f6\u95f4\u3002
                                                  • Span\uff1a\u6574\u4e2a\u94fe\u8def\u4e2d\u5305\u542b\u7684 Span \u4e2a\u6570\u3002
                                                  • \u53d1\u751f\u65f6\u95f4\uff1a\u5f53\u524d\u94fe\u8def\u5f00\u59cb\u7684\u65f6\u95f4\u3002
                                                  • Tag\uff1a\u4e00\u7ec4\u952e\u503c\u5bf9\u6784\u6210\u7684 Span \u6807\u7b7e\u96c6\u5408\uff0cTag \u662f\u7528\u6765\u5bf9 Span \u8fdb\u884c\u7b80\u5355\u7684\u6ce8\u89e3\u548c\u8865\u5145\uff0c\u6bcf\u4e2a Span \u53ef\u4ee5\u6709\u591a\u4e2a\u7b80\u76f4\u5bf9\u5f62\u5f0f\u7684 Tag\u3002
                                                  "},{"location":"admin/insight/trace/trace.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                  \u8bf7\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u67e5\u8be2\u94fe\u8def\uff1a

                                                  1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\uff0c
                                                  2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u94fe\u8def\u8ffd\u8e2a -> \u8c03\u7528\u94fe\u3002

                                                    Note

                                                    \u5217\u8868\u4e2d\u652f\u6301\u5bf9 Span \u6570\u3001\u5ef6\u65f6\u3001\u53d1\u751f\u65f6\u95f4\u8fdb\u884c\u6392\u5e8f\u3002

                                                  3. \u70b9\u51fb\u7b5b\u9009\u680f\u4e2d\u7684 TraceID \u641c\u7d22 \u5207\u6362\u4f7f\u7528 TraceID \u641c\u7d22\u94fe\u8def\u3002

                                                  4. \u4f7f\u7528 TraceID \u641c\u7d22\u8bf7\u8f93\u5165\u5b8c\u6574\u7684 TraceID\u3002

                                                  "},{"location":"admin/insight/trace/trace.html#_4","title":"\u5176\u4ed6\u64cd\u4f5c","text":""},{"location":"admin/insight/trace/trace.html#_5","title":"\u67e5\u770b\u94fe\u8def\u8be6\u60c5","text":"
                                                  1. \u70b9\u51fb\u94fe\u8def\u5217\u8868\u4e2d\u7684\u67d0\u4e00\u94fe\u8def\u7684 TraceID\uff0c\u53ef\u67e5\u770b\u8be5\u94fe\u8def\u7684\u8be6\u60c5\u8c03\u7528\u60c5\u51b5\u3002

                                                  "},{"location":"admin/insight/trace/trace.html#_6","title":"\u67e5\u770b\u5173\u8054\u65e5\u5fd7","text":"
                                                  1. \u70b9\u51fb\u94fe\u8def\u6570\u636e\u53f3\u4fa7\u7684\u56fe\u6807\uff0c\u53ef\u67e5\u8be2\u8be5\u94fe\u8def\u7684\u5173\u8054\u65e5\u5fd7\u3002

                                                    • \u9ed8\u8ba4\u67e5\u8be2\u8be5\u94fe\u8def\u7684\u6301\u7eed\u65f6\u95f4\u53ca\u5176\u7ed3\u675f\u4e4b\u540e\u4e00\u5206\u949f\u5185\u7684\u65e5\u5fd7\u6570\u636e\u3002
                                                    • \u67e5\u8be2\u7684\u65e5\u5fd7\u5185\u5bb9\u4e3a\u65e5\u5fd7\u6587\u672c\u4e2d\u5305\u542b\u8be5\u94fe\u8def\u7684 TraceID \u7684\u65e5\u5fd7\u548c\u94fe\u8def\u8c03\u7528\u8fc7\u7a0b\u4e2d\u76f8\u5173\u7684\u5bb9\u5668\u65e5\u5fd7\u3002
                                                  2. \u70b9\u51fb \u67e5\u770b\u66f4\u591a \u540e\u53ef\u5e26\u6761\u4ef6\u8df3\u8f6c\u5230 \u65e5\u5fd7\u67e5\u8be2 \u7684\u9875\u9762\u3002

                                                  3. \u9ed8\u8ba4\u641c\u7d22\u5168\u90e8\u65e5\u5fd7\uff0c\u4f46\u53ef\u4e0b\u62c9\u6839\u636e\u94fe\u8def\u7684 TraceID \u6216\u94fe\u8def\u8c03\u7528\u8fc7\u7a0b\u4e2d\u76f8\u5173\u7684\u5bb9\u5668\u65e5\u5fd7\u8fdb\u884c\u8fc7\u6ee4\u3002

                                                    Note

                                                    \u7531\u4e8e\u94fe\u8def\u4f1a\u8de8\u96c6\u7fa4\u6216\u8de8\u547d\u540d\u7a7a\u95f4\uff0c\u82e5\u7528\u6237\u6743\u9650\u4e0d\u8db3\uff0c\u5219\u65e0\u6cd5\u67e5\u8be2\u8be5\u94fe\u8def\u7684\u5173\u8054\u65e5\u5fd7\u3002

                                                  "},{"location":"admin/k8s/add-node.html","title":"\u6dfb\u52a0\u5de5\u4f5c\u8282\u70b9","text":"

                                                  \u5982\u679c\u8282\u70b9\u4e0d\u591f\u7528\u4e86\uff0c\u53ef\u4ee5\u6dfb\u52a0\u66f4\u591a\u8282\u70b9\u5230\u96c6\u7fa4\u4e2d\u3002

                                                  "},{"location":"admin/k8s/add-node.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                  • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                                  • \u6709\u4e00\u4e2a\u7ba1\u7406\u5458\u5e10\u53f7
                                                  • \u5df2\u521b\u5efa\u5e26 GPU \u8282\u70b9\u7684\u96c6\u7fa4
                                                  • \u51c6\u5907\u4e00\u53f0\u4e91\u4e3b\u673a
                                                  "},{"location":"admin/k8s/add-node.html#_3","title":"\u6dfb\u52a0\u6b65\u9aa4","text":"
                                                  1. \u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                                                  2. \u5bfc\u822a\u81f3 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0

                                                  3. \u8fdb\u5165\u96c6\u7fa4\u6982\u89c8\u9875\uff0c\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u63a5\u5165\u8282\u70b9 \u6309\u94ae

                                                  4. \u6309\u7167\u5411\u5bfc\uff0c\u586b\u5199\u5404\u9879\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a

                                                    \u57fa\u672c\u4fe1\u606f\u53c2\u6570\u914d\u7f6e

                                                  5. \u5728\u5f39\u7a97\u4e2d\u70b9\u51fb \u786e\u5b9a

                                                  6. \u8fd4\u56de\u8282\u70b9\u5217\u8868\uff0c\u65b0\u63a5\u5165\u7684\u8282\u70b9\u72b6\u6001\u4e3a \u63a5\u5165\u4e2d \uff0c\u7b49\u5f85\u51e0\u5206\u949f\u540e\u72b6\u6001\u53d8\u4e3a \u5065\u5eb7 \u5219\u8868\u793a\u63a5\u5165\u6210\u529f\u3002

                                                  Tip

                                                  \u5bf9\u4e8e\u521a\u63a5\u5165\u6210\u529f\u7684\u8282\u70b9\uff0c\u53ef\u80fd\u8fd8\u8981\u7b49 2-3 \u5206\u949f\u624d\u80fd\u8bc6\u522b\u51fa GPU\u3002

                                                  "},{"location":"admin/k8s/create-k8s.html","title":"\u521b\u5efa\u4e91\u4e0a Kubernetes \u96c6\u7fa4","text":"

                                                  \u90e8\u7f72 Kubernetes \u96c6\u7fa4\u662f\u4e3a\u4e86\u652f\u6301\u9ad8\u6548\u7684 AI \u7b97\u529b\u8c03\u5ea6\u548c\u7ba1\u7406\uff0c\u5b9e\u73b0\u5f39\u6027\u4f38\u7f29\uff0c\u63d0\u4f9b\u9ad8\u53ef\u7528\u6027\uff0c\u4ece\u800c\u4f18\u5316\u6a21\u578b\u8bad\u7ec3\u548c\u63a8\u7406\u8fc7\u7a0b\u3002

                                                  "},{"location":"admin/k8s/create-k8s.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                  • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0\u5df2
                                                  • \u6709\u4e00\u4e2a\u7ba1\u7406\u5458\u6743\u9650\u7684\u8d26\u53f7
                                                  • \u51c6\u5907\u4e00\u53f0\u5e26 GPU \u7684\u7269\u7406\u673a
                                                  • \u5206\u914d\u4e24\u6bb5 IP \u5730\u5740\uff08Pod CIDR 18 \u4f4d\u3001SVC CIDR 18 \u4f4d\uff0c\u4e0d\u80fd\u4e0e\u73b0\u6709\u7f51\u6bb5\u51b2\u7a81\uff09
                                                  "},{"location":"admin/k8s/create-k8s.html#_2","title":"\u521b\u5efa\u6b65\u9aa4","text":"
                                                  1. \u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                                                  2. \u521b\u5efa\u5e76\u542f\u52a8 3 \u53f0\u4e0d\u5e26 GPU \u7684\u4e91\u4e3b\u673a\u7528\u4f5c\u96c6\u7fa4\u7684 Master \u8282\u70b9

                                                    • \u914d\u7f6e\u8d44\u6e90\uff0cCPU 16 \u6838\uff0c\u5185\u5b58 32 GB\uff0c\u7cfb\u7edf\u76d8 200 GB\uff08ReadWriteOnce\uff09
                                                    • \u7f51\u7edc\u6a21\u5f0f\u9009\u62e9 Bridge\uff08\u6865\u63a5\uff09
                                                    • \u8bbe\u7f6e root \u5bc6\u7801\u6216\u6dfb\u52a0 SSH \u516c\u94a5\uff0c\u65b9\u4fbf\u4ee5 SSH \u8fde\u63a5
                                                    • \u8bb0\u5f55\u597d 3 \u53f0\u4e3b\u673a\u7684 IP
                                                  3. \u5bfc\u822a\u81f3 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa\u96c6\u7fa4 \u6309\u94ae

                                                  4. \u6309\u7167\u5411\u5bfc\uff0c\u914d\u7f6e\u96c6\u7fa4\u7684\u5404\u9879\u53c2\u6570

                                                    \u57fa\u672c\u4fe1\u606f\u8282\u70b9\u914d\u7f6e\u7f51\u7edc\u914d\u7f6eAddon \u914d\u7f6e\u9ad8\u7ea7\u914d\u7f6e

                                                    \u914d\u7f6e\u5b8c\u8282\u70b9\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u5f00\u59cb\u68c0\u67e5 \uff0c

                                                    \u6bcf\u4e2a\u8282\u70b9\u9ed8\u8ba4\u53ef\u8fd0\u884c 110 \u4e2a Pod\uff08\u5bb9\u5668\u7ec4\uff09\uff0c\u5982\u679c\u8282\u70b9\u914d\u7f6e\u6bd4\u8f83\u9ad8\uff0c\u53ef\u4ee5\u8c03\u6574\u5230 200 \u6216 300 \u4e2a Pod\u3002

                                                  5. \u7b49\u5f85\u96c6\u7fa4\u521b\u5efa\u5b8c\u6210\u3002

                                                  6. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\uff0c\u627e\u5230\u521a\u521b\u5efa\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u5bfc\u822a\u5230 Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u5728\u641c\u7d22\u6846\u5185\u641c\u7d22 metax-gpu-extensions\uff0c\u70b9\u51fb\u5361\u7247

                                                  7. \u70b9\u51fb\u53f3\u4fa7\u7684 \u5b89\u88c5 \u6309\u94ae\uff0c\u5f00\u59cb\u5b89\u88c5 GPU \u63d2\u4ef6

                                                    \u5e94\u7528\u8bbe\u7f6eKubernetes \u7f16\u6392\u786e\u8ba4

                                                    \u8f93\u5165\u540d\u79f0\uff0c\u9009\u62e9\u547d\u540d\u7a7a\u95f4\uff0c\u5728 YAMl \u4e2d\u4fee\u6539\u955c\u50cf\u5730\u5740\uff1a

                                                  8. \u81ea\u52a8\u8fd4\u56de Helm \u5e94\u7528\u5217\u8868\uff0c\u7b49\u5f85 metax-gpu-extensions \u72b6\u6001\u53d8\u4e3a \u5df2\u90e8\u7f72

                                                  9. \u5230\u6b64\u96c6\u7fa4\u521b\u5efa\u6210\u529f\uff0c\u53ef\u4ee5\u53bb\u67e5\u770b\u96c6\u7fa4\u6240\u5305\u542b\u7684\u8282\u70b9\u3002\u4f60\u53ef\u4ee5\u53bb\u521b\u5efa AI \u5de5\u4f5c\u8d1f\u8f7d\u5e76\u4f7f\u7528 GPU \u4e86\u3002

                                                  \u4e0b\u4e00\u6b65\uff1a\u521b\u5efa AI \u5de5\u4f5c\u8d1f\u8f7d

                                                  "},{"location":"admin/k8s/remove-node.html","title":"\u79fb\u9664 GPU \u5de5\u4f5c\u8282\u70b9","text":"

                                                  GPU \u8d44\u6e90\u7684\u6210\u672c\u76f8\u5bf9\u8f83\u9ad8\uff0c\u5982\u679c\u6682\u65f6\u7528\u4e0d\u5230 GPU\uff0c\u53ef\u4ee5\u5c06\u5e26 GPU \u7684\u5de5\u4f5c\u8282\u70b9\u79fb\u9664\u3002 \u4ee5\u4e0b\u6b65\u9aa4\u4e5f\u540c\u6837\u9002\u7528\u4e8e\u79fb\u9664\u666e\u901a\u5de5\u4f5c\u8282\u70b9\u3002

                                                  "},{"location":"admin/k8s/remove-node.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                  • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                                  • \u6709\u4e00\u4e2a\u7ba1\u7406\u5458\u5e10\u53f7
                                                  • \u5df2\u521b\u5efa\u5e26 GPU \u8282\u70b9\u7684\u96c6\u7fa4
                                                  "},{"location":"admin/k8s/remove-node.html#_2","title":"\u79fb\u9664\u6b65\u9aa4","text":"
                                                  1. \u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                                                  2. \u5bfc\u822a\u81f3 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0

                                                  3. \u8fdb\u5165\u96c6\u7fa4\u6982\u89c8\u9875\uff0c\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u627e\u5230\u8981\u79fb\u9664\u7684\u8282\u70b9\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u79fb\u9664\u8282\u70b9

                                                  4. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u8282\u70b9\u540d\u79f0\uff0c\u786e\u8ba4\u65e0\u8bef\u540e\u70b9\u51fb \u5220\u9664

                                                  5. \u81ea\u52a8\u8fd4\u56de\u8282\u70b9\u5217\u8868\uff0c\u72b6\u6001\u4e3a \u79fb\u9664\u4e2d \uff0c\u51e0\u5206\u949f\u540e\u5237\u65b0\u9875\u9762\uff0c\u8282\u70b9\u4e0d\u5728\u4e86\uff0c\u8bf4\u660e\u8282\u70b9\u88ab\u6210\u529f\u79fb\u9664

                                                  6. \u4ece UI \u5217\u8868\u79fb\u9664\u8282\u70b9\u540e\uff0c\u901a\u8fc7 SSH \u767b\u5f55\u5230\u5df2\u79fb\u9664\u7684\u8282\u70b9\u4e3b\u673a\uff0c\u6267\u884c\u5173\u673a\u547d\u4ee4\u3002

                                                  Tip

                                                  \u5728 UI \u4e0a\u79fb\u9664\u8282\u70b9\u5e76\u5c06\u5176\u5173\u673a\u540e\uff0c\u8282\u70b9\u4e0a\u7684\u6570\u636e\u5e76\u672a\u88ab\u7acb\u5373\u5220\u9664\uff0c\u8282\u70b9\u6570\u636e\u4f1a\u88ab\u4fdd\u7559\u4e00\u6bb5\u65f6\u95f4\u3002

                                                  "},{"location":"admin/kpanda/backup/index.html","title":"\u5907\u4efd\u6062\u590d","text":"

                                                  \u5907\u4efd\u6062\u590d\u5206\u4e3a\u5907\u4efd\u548c\u6062\u590d\u4e24\u65b9\u9762\uff0c\u5b9e\u9645\u5e94\u7528\u65f6\u9700\u8981\u5148\u5907\u4efd\u7cfb\u7edf\u5728\u67d0\u4e00\u65f6\u70b9\u7684\u6570\u636e\uff0c\u7136\u540e\u5b89\u5168\u5b58\u50a8\u5730\u5907\u4efd\u6570\u636e\u3002\u540e\u7eed\u5982\u679c\u51fa\u73b0\u6570\u636e\u635f\u574f\u3001\u4e22\u5931\u3001\u8bef\u5220\u7b49\u4e8b\u6545\uff0c\u5c31\u53ef\u4ee5\u57fa\u4e8e\u4e4b\u524d\u7684\u6570\u636e\u5907\u4efd\u5feb\u901f\u8fd8\u539f\u7cfb\u7edf\uff0c\u7f29\u77ed\u6545\u969c\u65f6\u95f4\uff0c\u51cf\u5c11\u635f\u5931\u3002

                                                  • \u5728\u771f\u5b9e\u7684\u751f\u4ea7\u73af\u5883\u4e2d\uff0c\u670d\u52a1\u53ef\u80fd\u5206\u5e03\u5f0f\u5730\u90e8\u7f72\u5728\u4e0d\u540c\u7684\u4e91\u3001\u4e0d\u540c\u533a\u57df\u6216\u53ef\u7528\u533a\uff0c\u5982\u679c\u67d0\u4e00\u4e2a\u57fa\u7840\u8bbe\u65bd\u81ea\u8eab\u51fa\u73b0\u6545\u969c\uff0c\u4f01\u4e1a\u9700\u8981\u5728\u5176\u4ed6\u53ef\u7528\u73af\u5883\u4e2d\u5feb\u901f\u6062\u590d\u5e94\u7528\u3002\u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u8de8\u4e91/\u8de8\u96c6\u7fa4\u7684\u5907\u4efd\u6062\u590d\u663e\u5f97\u975e\u5e38\u91cd\u8981\u3002
                                                  • \u5728\u5927\u89c4\u6a21\u7cfb\u7edf\u4e2d\u5f80\u5f80\u6709\u5f88\u591a\u89d2\u8272\u548c\u7528\u6237\uff0c\u6743\u9650\u7ba1\u7406\u4f53\u7cfb\u590d\u6742\uff0c\u64cd\u4f5c\u8005\u4f17\u591a\uff0c\u96be\u514d\u6709\u4eba\u8bef\u64cd\u4f5c\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u4e5f\u9700\u8981\u80fd\u591f\u901a\u8fc7\u4e4b\u524d\u5907\u4efd\u7684\u6570\u636e\u5feb\u901f\u56de\u6eda\u7cfb\u7edf\uff0c\u5426\u5219\u5982\u679c\u4f9d\u8d56\u4eba\u4e3a\u6392\u67e5\u6545\u969c\u3001\u4fee\u590d\u6545\u969c\u3001\u6062\u590d\u7cfb\u7edf\u5c31\u4f1a\u8017\u8d39\u5927\u91cf\u65f6\u95f4\uff0c\u7cfb\u7edf\u4e0d\u53ef\u7528\u65f6\u95f4\u8d8a\u957f\uff0c\u4f01\u4e1a\u7684\u635f\u5931\u8d8a\u5927\u3002
                                                  • \u6b64\u5916\uff0c\u8fd8\u6709\u7f51\u7edc\u653b\u51fb\u3001\u81ea\u7136\u707e\u5bb3\u3001\u8bbe\u5907\u6545\u969c\u7b49\u5404\u79cd\u56e0\u7d20\u4e5f\u53ef\u80fd\u5bfc\u81f4\u6570\u636e\u4e8b\u6545

                                                  \u56e0\u6b64\uff0c\u5907\u4efd\u6062\u590d\u975e\u5e38\u91cd\u8981\uff0c\u53ef\u4ee5\u89c6\u4e4b\u4e3a\u7ef4\u62a4\u7cfb\u7edf\u7a33\u5b9a\u548c\u6570\u636e\u5b89\u5168\u7684\u6700\u540e\u4e00\u9053\u4fdd\u9669\u3002

                                                  \u5907\u4efd\u901a\u5e38\u5206\u4e3a\u5168\u91cf\u5907\u4efd\u3001\u589e\u91cf\u5907\u4efd\u3001\u5dee\u5f02\u5907\u4efd\u4e09\u79cd\u3002\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u76ee\u524d\u652f\u6301\u5168\u91cf\u5907\u4efd\u548c\u589e\u91cf\u5907\u4efd\u3002

                                                  \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u7684\u5907\u4efd\u6062\u590d\u53ef\u4ee5\u5206\u4e3a \u5e94\u7528\u5907\u4efd \u548c ETCD \u5907\u4efd \u4e24\u79cd\uff0c\u652f\u6301\u624b\u52a8\u5907\u4efd\uff0c\u6216\u57fa\u4e8e CronJob \u5b9a\u65f6\u81ea\u52a8\u5907\u4efd\u3002

                                                  • \u5e94\u7528\u5907\u4efd

                                                    \u5e94\u7528\u5907\u4efd\u6307\uff0c\u5907\u4efd\u96c6\u7fa4\u4e2d\u7684\u67d0\u4e2a\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u636e\uff0c\u7136\u540e\u5c06\u8be5\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u636e\u6062\u590d\u5230\u672c\u96c6\u7fa4\u6216\u8005\u5176\u4ed6\u96c6\u7fa4\u3002\u652f\u6301\u5907\u4efd\u6574\u4e2a\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u8d44\u6e90\uff0c\u4e5f\u652f\u6301\u901a\u8fc7\u6807\u7b7e\u9009\u62e9\u5668\u8fc7\u6ee4\uff0c\u4ec5\u5907\u4efd\u5e26\u6709\u7279\u5b9a\u6807\u7b7e\u7684\u8d44\u6e90\u3002

                                                    \u5e94\u7528\u5907\u4efd\u652f\u6301\u8de8\u96c6\u7fa4\u5907\u4efd\u6709\u72b6\u6001\u5e94\u7528\uff0c\u5177\u4f53\u6b65\u9aa4\u53ef\u53c2\u8003MySQL \u5e94\u7528\u53ca\u6570\u636e\u7684\u8de8\u96c6\u7fa4\u5907\u4efd\u6062\u590d\u3002

                                                  • ETCD \u5907\u4efd

                                                    etcd \u662f Kubernetes \u7684\u6570\u636e\u5b58\u50a8\u7ec4\u4ef6\uff0cKubernetes \u5c06\u81ea\u8eab\u7684\u7ec4\u4ef6\u6570\u636e\u548c\u5176\u4e2d\u7684\u5e94\u7528\u6570\u636e\u90fd\u5b58\u50a8\u5728 etcd \u4e2d\u3002\u56e0\u6b64\uff0c\u5907\u4efd etcd \u5c31\u76f8\u5f53\u4e8e\u5907\u4efd\u6574\u4e2a\u96c6\u7fa4\u7684\u6570\u636e\uff0c\u53ef\u4ee5\u5728\u6545\u969c\u65f6\u5feb\u901f\u5c06\u96c6\u7fa4\u6062\u590d\u5230\u4e4b\u524d\u67d0\u4e00\u65f6\u70b9\u7684\u72b6\u6001\u3002

                                                    \u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u76ee\u524d\u4ec5\u652f\u6301\u5c06 etcd \u5907\u4efd\u6570\u636e\u6062\u590d\u5230\u540c\u4e00\u96c6\u7fa4\uff08\u539f\u96c6\u7fa4\uff09\u3002

                                                  "},{"location":"admin/kpanda/backup/deployment.html","title":"\u5e94\u7528\u5907\u4efd","text":"

                                                  \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4e3a\u5e94\u7528\u505a\u5907\u4efd\uff0c\u672c\u6559\u7a0b\u4e2d\u4f7f\u7528\u7684\u6f14\u793a\u5e94\u7528\u540d\u4e3a dao-2048 \uff0c\u5c5e\u4e8e\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u3002

                                                  "},{"location":"admin/kpanda/backup/deployment.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                  \u5728\u5bf9\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u8fdb\u884c\u5907\u4efd\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                  • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                  • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                                                  • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                  • \u5b89\u88c5 velero \u7ec4\u4ef6\uff0c\u4e14 velero \u7ec4\u4ef6\u8fd0\u884c\u6b63\u5e38\u3002

                                                  • \u521b\u5efa\u4e00\u4e2a\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\uff08\u672c\u6559\u7a0b\u4e2d\u7684\u8d1f\u8f7d\u540d\u4e3a dao-2048 \uff09\uff0c\u5e76\u4e3a\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u6253\u4e0a app: dao-2048 \u7684\u6807\u7b7e\u3002

                                                  "},{"location":"admin/kpanda/backup/deployment.html#_3","title":"\u5907\u4efd\u5de5\u4f5c\u8d1f\u8f7d","text":"

                                                  \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u5907\u4efd\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d dao-2048 \u3002

                                                  1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c \u70b9\u51fb \u5bb9\u5668\u7ba1\u7406 -> \u5907\u4efd\u6062\u590d \u3002

                                                  2. \u8fdb\u5165 \u5e94\u7528\u5907\u4efd \u5217\u8868\u9875\u9762\uff0c\u4ece\u96c6\u7fa4\u4e0b\u62c9\u5217\u8868\u4e2d\u9009\u62e9\u5df2\u5b89\u88c5\u4e86 velero \u548c dao-2048 \u7684\u96c6\u7fa4\u3002 \u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa\u5907\u4efd\u8ba1\u5212 \u6309\u94ae\u3002

                                                  3. \u53c2\u8003\u4e0b\u65b9\u8bf4\u660e\u586b\u5199\u5907\u4efd\u914d\u7f6e\u3002

                                                  4. \u53c2\u8003\u4e0b\u65b9\u8bf4\u660e\u8bbe\u7f6e\u5907\u4efd\u6267\u884c\u9891\u7387\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                    • \u5907\u4efd\u9891\u7387\uff1a\u57fa\u4e8e\u5206\u949f\u3001\u5c0f\u65f6\u3001\u5929\u3001\u5468\u3001\u6708\u8bbe\u7f6e\u4efb\u52a1\u6267\u884c\u7684\u65f6\u95f4\u5468\u671f\u3002\u652f\u6301\u7528\u6570\u5b57\u548c * \u81ea\u5b9a\u4e49 Cron \u8868\u8fbe\u5f0f\uff0c\u8f93\u5165\u8868\u8fbe\u5f0f\u540e\u4e0b\u65b9\u4f1a\u63d0\u793a\u5f53\u524d\u8868\u8fbe\u5f0f\u7684\u542b\u4e49\u3002\u6709\u5173\u8be6\u7ec6\u7684\u8868\u8fbe\u5f0f\u8bed\u6cd5\u89c4\u5219\uff0c\u53ef\u53c2\u8003 Cron \u65f6\u95f4\u8868\u8bed\u6cd5\u3002
                                                    • \u7559\u5b58\u65f6\u957f\uff08\u5929\uff09\uff1a\u8bbe\u7f6e\u5907\u4efd\u8d44\u6e90\u4fdd\u5b58\u7684\u65f6\u95f4\uff0c\u9ed8\u8ba4\u4e3a 30 \u5929\uff0c\u8fc7\u671f\u540e\u5c06\u4f1a\u88ab\u5220\u9664\u3002
                                                    • \u5907\u4efd\u6570\u636e\u5377\uff08PV\uff09\uff1a\u662f\u5426\u5907\u4efd\u6570\u636e\u5377\uff08PV\uff09\u4e2d\u7684\u6570\u636e\uff0c\u652f\u6301\u76f4\u63a5\u590d\u5236\u548c\u4f7f\u7528 CSI \u5feb\u7167\u4e24\u79cd\u65b9\u5f0f\u3002
                                                      • \u76f4\u63a5\u590d\u5236\uff1a\u76f4\u63a5\u590d\u5236\u6570\u636e\u5377\uff08PV\uff09\u4e2d\u7684\u6570\u636e\u7528\u4e8e\u5907\u4efd\uff1b
                                                      • \u4f7f\u7528 CSI \u5feb\u7167\uff1a\u4f7f\u7528 CSI \u5feb\u7167\u6765\u5907\u4efd\u6570\u636e\u5377\uff08PV\uff09\u3002\u9700\u8981\u96c6\u7fa4\u4e2d\u6709\u53ef\u7528\u4e8e\u5907\u4efd\u7684 CSI \u5feb\u7167\u7c7b\u578b\u3002

                                                  5. \u70b9\u51fb \u786e\u5b9a \uff0c\u9875\u9762\u4f1a\u81ea\u52a8\u8fd4\u56de\u5e94\u7528\u5907\u4efd\u8ba1\u5212\u5217\u8868\u3002\u60a8\u53ef\u4ee5\u627e\u5230\u65b0\u5efa\u7684 dao-2048 \u5907\u4efd\u8ba1\u5212\uff0c\u5728\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u9009\u62e9 \u7acb\u5373\u6267\u884c \u5f00\u59cb\u5907\u4efd\u3002

                                                  6. \u6b64\u65f6\u96c6\u7fa4\u7684 \u4e0a\u4e00\u6b21\u6267\u884c\u72b6\u6001 \u5c06\u8f6c\u53d8\u4e3a \u5907\u4efd\u4e2d \u3002\u7b49\u5f85\u5907\u4efd\u5b8c\u6210\u540e\u53ef\u4ee5\u70b9\u51fb\u5907\u4efd\u8ba1\u5212\u7684\u540d\u79f0\uff0c\u67e5\u770b\u5907\u4efd\u8ba1\u5212\u8be6\u60c5\u3002

                                                  Note

                                                  \u5982\u679c Job \u7c7b\u578b\u7684\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u4e3a \u6267\u884c\u5b8c\u6210 \uff0c\u5219\u4e0d\u652f\u6301\u5907\u4efd\u3002

                                                  "},{"location":"admin/kpanda/backup/etcd-backup.html","title":"etcd \u5907\u4efd","text":"

                                                  etcd \u5907\u4efd\u662f\u4ee5\u96c6\u7fa4\u6570\u636e\u4e3a\u6838\u5fc3\u7684\u5907\u4efd\u3002\u5728\u786c\u4ef6\u8bbe\u5907\u635f\u574f\uff0c\u5f00\u53d1\u6d4b\u8bd5\u914d\u7f6e\u9519\u8bef\u7b49\u573a\u666f\u4e2d\uff0c\u53ef\u4ee5\u901a\u8fc7 etcd \u5907\u4efd\u6062\u590d\u96c6\u7fa4\u6570\u636e\u3002

                                                  \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u4e3a\u96c6\u7fa4\u5236\u4f5c etcd \u5907\u4efd\u3002

                                                  "},{"location":"admin/kpanda/backup/etcd-backup.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                  • \u63a5\u5165\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                  • \u521b\u5efa\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Admin \u6216\u66f4\u9ad8\u6743\u9650\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                  • \u51c6\u5907\u4e00\u4e2a MinIO \u5b9e\u4f8b\u3002

                                                  "},{"location":"admin/kpanda/backup/etcd-backup.html#etcd_1","title":"\u521b\u5efa etcd \u5907\u4efd","text":"

                                                  \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u521b\u5efa etcd \u5907\u4efd\u3002

                                                  1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 -> \u5907\u4efd\u6062\u590d -> etcd \u5907\u4efd \uff0c\u70b9\u51fb \u5907\u4efd\u7b56\u7565 \u9875\u7b7e\uff0c\u7136\u540e\u5728\u53f3\u4fa7\u70b9\u51fb \u521b\u5efa\u5907\u4efd\u7b56\u7565 \u3002

                                                  2. \u53c2\u8003\u4ee5\u4e0b\u8bf4\u660e\u586b\u5199 \u57fa\u672c\u4fe1\u606f \u3002\u586b\u5199\u5b8c\u6bd5\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \uff0c\u7cfb\u7edf\u5c06\u81ea\u52a8\u6821\u9a8c etcd \u7684\u8054\u901a\u6027\uff0c\u6821\u9a8c\u901a\u8fc7\u4e4b\u540e\u53ef\u4ee5\u8fdb\u884c\u4e0b\u4e00\u6b65\u3002

                                                    • \u5907\u4efd\u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u5907\u4efd\u54ea\u4e2a\u96c6\u7fa4\u7684 etcd \u6570\u636e\uff0c\u5e76\u5728\u7ec8\u7aef\u767b\u5f55
                                                    • etcd \u5730\u5740\uff1a\u683c\u5f0f\u4e3a https://${\u8282\u70b9IP}:${\u7aef\u53e3\u53f7}

                                                      • \u5728\u6807\u51c6 Kubernetes \u96c6\u7fa4\u4e2d\uff0cetcd \u7684\u9ed8\u8ba4\u7aef\u53e3\u53f7\u4e3a 2379
                                                      • \u5728\u516c\u6709\u4e91\u6258\u7ba1\u96c6\u7fa4\u4e2d\uff0c\u9700\u8981\u8054\u7cfb\u76f8\u5173\u5f00\u53d1\u4eba\u5458\u83b7\u53d6 etcd \u7684\u7aef\u53e3\u53f7\u3002 \u8fd9\u662f\u56e0\u4e3a\u516c\u6709\u4e91\u96c6\u7fa4\u7684\u63a7\u5236\u9762\u7ec4\u4ef6\u7531\u4e91\u670d\u52a1\u63d0\u4f9b\u5546\u7ef4\u62a4\u548c\u7ba1\u7406\uff0c\u7528\u6237\u65e0\u6cd5\u76f4\u63a5\u8bbf\u95ee\u6216\u67e5\u770b\u8fd9\u4e9b\u7ec4\u4ef6\uff0c \u4e5f\u65e0\u6cd5\u901a\u8fc7\u5e38\u89c4\u547d\u4ee4\uff08\u5982 kubectl\uff09\u65e0\u6cd5\u83b7\u53d6\u5230\u63a7\u5236\u9762\u7684\u7aef\u53e3\u7b49\u4fe1\u606f\u3002
                                                      \u83b7\u53d6\u7aef\u53e3\u53f7\u7684\u65b9\u5f0f
                                                      1. \u5728 kube-system \u547d\u540d\u7a7a\u95f4\u4e0b\u67e5\u627e etcd Pod

                                                        kubectl get po -n kube-system | grep etcd\n
                                                      2. \u83b7\u53d6 etcd Pod \u7684 listen-client-urls \u4e2d\u7684\u7aef\u53e3\u53f7

                                                        kubectl get po -n kube-system ${etcd_pod_name} -oyaml | grep listen-client-urls # (1)!\n
                                                        1. \u5c06 etcd_pod_name \u66ff\u6362\u4e3a\u5b9e\u9645\u7684 Pod \u540d\u79f0

                                                        \u9884\u671f\u8f93\u51fa\u7ed3\u679c\u5982\u4e0b\uff0c\u8282\u70b9 IP \u540e\u7684\u6570\u5b57\u5373\u4e3a\u7aef\u53e3\u53f7:

                                                        - --listen-client-urls=https://127.0.0.1:2379,https://10.6.229.191:2379\n
                                                    • CA \u8bc1\u4e66\uff1a\u53ef\u901a\u8fc7\u5982\u4e0b\u547d\u4ee4\u67e5\u770b\u8bc1\u4e66\uff0c\u7136\u540e\u5c06\u8bc1\u4e66\u5185\u5bb9\u590d\u5236\u7c98\u8d34\u5230\u5bf9\u5e94\u4f4d\u7f6e\uff1a

                                                      cat /etc/kubernetes/ssl/etcd/ca.crt\n
                                                    • Cert \u8bc1\u4e66\uff1a\u53ef\u901a\u8fc7\u5982\u4e0b\u547d\u4ee4\u67e5\u770b\u8bc1\u4e66\uff0c\u7136\u540e\u5c06\u8bc1\u4e66\u5185\u5bb9\u590d\u5236\u7c98\u8d34\u5230\u5bf9\u5e94\u4f4d\u7f6e\uff1a

                                                      cat /etc/kubernetes/ssl/apiserver-etcd-client.crt\n
                                                    • Key\uff1a\u53ef\u901a\u8fc7\u5982\u4e0b\u547d\u4ee4\u67e5\u770b\u8bc1\u4e66\uff0c\u7136\u540e\u5c06\u8bc1\u4e66\u5185\u5bb9\u590d\u5236\u7c98\u8d34\u5230\u5bf9\u5e94\u4f4d\u7f6e\uff1a

                                                      cat /etc/kubernetes/ssl/apiserver-etcd-client.key\n

                                                    Note

                                                    \u70b9\u51fb\u8f93\u5165\u6846\u4e0b\u65b9\u7684 \u5982\u4f55\u83b7\u53d6 \u53ef\u4ee5\u5728 UI \u9875\u9762\u67e5\u770b\u83b7\u53d6\u5bf9\u5e94\u4fe1\u606f\u7684\u65b9\u5f0f\u3002

                                                  3. \u53c2\u8003\u4ee5\u4e0b\u4fe1\u606f\u586b\u5199 \u5907\u4efd\u7b56\u7565 \u3002

                                                    • \u5907\u4efd\u65b9\u5f0f\uff1a\u9009\u62e9\u624b\u52a8\u5907\u4efd\u6216\u5b9a\u65f6\u5907\u4efd

                                                      • \u624b\u52a8\u5907\u4efd\uff1a\u57fa\u4e8e\u5907\u4efd\u914d\u7f6e\u7acb\u5373\u6267\u884c\u4e00\u6b21 etcd \u5168\u91cf\u6570\u636e\u7684\u5907\u4efd\u3002
                                                      • \u5b9a\u65f6\u5907\u4efd\uff1a\u6309\u7167\u8bbe\u7f6e\u7684\u5907\u4efd\u9891\u7387\u5bf9 etcd \u6570\u636e\u8fdb\u884c\u5468\u671f\u6027\u5168\u91cf\u5907\u4efd\u3002
                                                    • \u5907\u4efd\u94fe\u957f\u5ea6\uff1a\u6700\u591a\u4fdd\u7559\u591a\u5c11\u6761\u5907\u4efd\u6570\u636e\u3002\u9ed8\u8ba4\u4e3a 30 \u6761\u3002

                                                    • \u5907\u4efd\u9891\u7387\uff1a\u652f\u6301\u5c0f\u65f6\u3001\u65e5\u3001\u5468\u3001\u6708\u7ea7\u522b\u548c\u81ea\u5b9a\u4e49\u65b9\u5f0f\u3002

                                                  4. \u53c2\u8003\u4ee5\u4e0b\u4fe1\u606f\u586b\u5199 \u5b58\u50a8\u4f4d\u7f6e \u3002

                                                    • \u5b58\u50a8\u4f9b\u5e94\u5546\uff1a\u9ed8\u8ba4\u9009\u62e9 S3 \u5b58\u50a8
                                                    • \u5bf9\u8c61\u5b58\u50a8\u8bbf\u95ee\u5730\u5740\uff1aMinIO \u7684\u8bbf\u95ee\u5730\u5740
                                                    • \u5b58\u50a8\u6876\uff1a\u5728 MinIO \u4e2d\u521b\u5efa\u4e00\u4e2a Bucket\uff0c\u586b\u5199 Bucket \u7684\u540d\u79f0
                                                    • \u7528\u6237\u540d\uff1aMinIO \u7684\u767b\u5f55\u7528\u6237\u540d
                                                    • \u5bc6\u7801\uff1aMinIO \u7684\u767b\u5f55\u5bc6\u7801

                                                  5. \u70b9\u51fb \u786e\u5b9a \u540e\u9875\u9762\u81ea\u52a8\u8df3\u8f6c\u5230\u5907\u4efd\u7b56\u7565\u5217\u8868\uff0c\u53ef\u4ee5\u67e5\u770b\u76ee\u524d\u521b\u5efa\u597d\u7684\u6240\u6709\u7b56\u7565\u3002

                                                    • \u5728\u7b56\u7565\u53f3\u4fa7\u70b9\u51fb \u2507 \u64cd\u4f5c\u6309\u94ae\u53ef\u4ee5\u67e5\u770b\u65e5\u5fd7\u3001\u67e5\u770b YAML\u3001\u66f4\u65b0\u7b56\u7565\u3001\u505c\u6b62\u7b56\u7565\u3001\u7acb\u5373\u6267\u884c\u7b56\u7565\u7b49\u3002
                                                    • \u5f53\u5907\u4efd\u65b9\u5f0f\u4e3a\u624b\u52a8\u65f6\uff0c\u53ef\u4ee5\u70b9\u51fb \u7acb\u5373\u6267\u884c \u8fdb\u884c\u5907\u4efd\u3002
                                                    • \u5f53\u5907\u4efd\u65b9\u5f0f\u4e3a\u5b9a\u65f6\u5907\u4efd\u65f6\uff0c\u5219\u4f1a\u6839\u636e\u914d\u7f6e\u7684\u65f6\u95f4\u8fdb\u884c\u5907\u4efd\u3002

                                                  "},{"location":"admin/kpanda/backup/etcd-backup.html#_2","title":"\u67e5\u770b\u5907\u4efd\u7b56\u7565\u65e5\u5fd7","text":"

                                                  \u70b9\u51fb \u65e5\u5fd7 \u53ef\u4ee5\u67e5\u770b\u65e5\u5fd7\u5185\u5bb9\uff0c\u9ed8\u8ba4\u5c55\u793a 100 \u884c\u3002\u82e5\u60f3\u67e5\u770b\u66f4\u591a\u65e5\u5fd7\u4fe1\u606f\u6216\u8005\u4e0b\u8f7d\u65e5\u5fd7\uff0c\u53ef\u5728\u65e5\u5fd7\u4e0a\u65b9\u6839\u636e\u63d0\u793a\u524d\u5f80\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u3002

                                                  "},{"location":"admin/kpanda/backup/etcd-backup.html#_3","title":"\u67e5\u770b\u5907\u4efd\u7b56\u7565\u8be6\u60c5","text":"

                                                  \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 -> \u5907\u4efd\u6062\u590d -> etcd \u5907\u4efd \uff0c\u70b9\u51fb \u5907\u4efd\u7b56\u7565 \u9875\u7b7e\uff0c\u63a5\u7740\u70b9\u51fb\u7b56\u7565\u540d\u79f0\u53ef\u4ee5\u67e5\u770b\u7b56\u7565\u8be6\u60c5\u3002

                                                  "},{"location":"admin/kpanda/backup/etcd-backup.html#_4","title":"\u67e5\u770b\u5907\u4efd\u70b9","text":"
                                                  1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 -> \u5907\u4efd\u6062\u590d -> etcd \u5907\u4efd \uff0c\u70b9\u51fb \u5907\u4efd\u70b9 \u9875\u7b7e\u3002
                                                  2. \u9009\u62e9\u76ee\u6807\u96c6\u7fa4\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u8be5\u96c6\u7fa4\u4e0b\u6240\u6709\u5907\u4efd\u4fe1\u606f\u3002

                                                    \u6bcf\u6267\u884c\u4e00\u6b21\u5907\u4efd\uff0c\u5bf9\u5e94\u751f\u6210\u4e00\u4e2a\u5907\u4efd\u70b9\uff0c\u53ef\u901a\u8fc7\u6210\u529f\u72b6\u6001\u7684\u5907\u4efd\u70b9\u5feb\u901f\u6062\u590d\u5e94\u7528\u3002

                                                  "},{"location":"admin/kpanda/backup/install-velero.html","title":"\u5b89\u88c5 velero \u63d2\u4ef6","text":"

                                                  velero \u662f\u4e00\u4e2a\u5907\u4efd\u548c\u6062\u590d Kubernetes \u96c6\u7fa4\u8d44\u6e90\u7684\u5f00\u6e90\u5de5\u5177\u3002\u5b83\u53ef\u4ee5\u5c06 Kubernetes \u96c6\u7fa4\u4e2d\u7684\u8d44\u6e90\u5907\u4efd\u5230\u4e91\u5b58\u50a8\u670d\u52a1\u3001\u672c\u5730\u5b58\u50a8\u6216\u5176\u4ed6\u4f4d\u7f6e\uff0c\u5e76\u4e14\u53ef\u4ee5\u5728\u9700\u8981\u65f6\u5c06\u8fd9\u4e9b\u8d44\u6e90\u6062\u590d\u5230\u540c\u4e00\u6216\u4e0d\u540c\u7684\u96c6\u7fa4\u4e2d\u3002

                                                  \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528 Helm \u5e94\u7528 \u90e8\u7f72 velero \u63d2\u4ef6\u3002

                                                  "},{"location":"admin/kpanda/backup/install-velero.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                  \u5b89\u88c5 velero \u63d2\u4ef6\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                  • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                  • \u521b\u5efa velero \u547d\u540d\u7a7a\u95f4\u3002

                                                  • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                  "},{"location":"admin/kpanda/backup/install-velero.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                  \u8bf7\u6267\u884c\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 velero \u63d2\u4ef6\u3002

                                                  1. \u5728\u96c6\u7fa4\u5217\u8868\u9875\u9762\u627e\u5230\u9700\u8981\u5b89\u88c5 velero \u63d2\u4ef6\u7684\u76ee\u6807\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u5728\u641c\u7d22\u680f\u8f93\u5165 velero \u8fdb\u884c\u641c\u7d22\u3002

                                                  2. \u9605\u8bfb velero \u63d2\u4ef6\u76f8\u5173\u4ecb\u7ecd\uff0c\u9009\u62e9\u7248\u672c\u540e\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u672c\u6587\u5c06\u4ee5 4.0.2 \u7248\u672c\u4e3a\u4f8b\u8fdb\u884c\u5b89\u88c5\uff0c\u63a8\u8350\u5b89\u88c5 4.0.2 \u6216\u66f4\u9ad8\u7248\u672c\u3002

                                                  3. \u586b\u5199\u548c\u914d\u7f6e\u53c2\u6570\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65

                                                    \u57fa\u672c\u53c2\u6570\u53c2\u6570\u914d\u7f6e

                                                    • \u540d\u79f0\uff1a\u5fc5\u586b\u53c2\u6570\uff0c\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09,\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 metrics-server-01\u3002
                                                    • \u547d\u540d\u7a7a\u95f4\uff1a\u63d2\u4ef6\u5b89\u88c5\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4e3a velero \u547d\u540d\u7a7a\u95f4\u3002
                                                    • \u7248\u672c\uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 4.0.2 \u7248\u672c\u4e3a\u4f8b\u3002
                                                    • \u5c31\u7eea\u7b49\u5f85\uff1a\u53ef\u9009\u53c2\u6570\uff0c\u542f\u7528\u540e\uff0c\u5c06\u7b49\u5f85\u5e94\u7528\u4e0b\u6240\u6709\u5173\u8054\u8d44\u6e90\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002
                                                    • \u5931\u8d25\u5220\u9664\uff1a\u53ef\u9009\u53c2\u6570\uff0c\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f\u5c31\u7eea\u7b49\u5f85\u3002\u5982\u679c\u5b89\u88c5\u5931\u8d25\uff0c\u5c06\u5220\u9664\u5b89\u88c5\u76f8\u5173\u8d44\u6e90\u3002
                                                    • \u8be6\u60c5\u65e5\u5fd7\uff1a\u53ef\u9009\u53c2\u6570\uff0c\u5f00\u542f\u540e\u5c06\u8f93\u51fa\u5b89\u88c5\u8fc7\u7a0b\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002

                                                    Note

                                                    \u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u548c/\u6216 \u5931\u8d25\u5220\u9664 \u540e\uff0c\u5e94\u7528\u9700\u8981\u7ecf\u8fc7\u8f83\u957f\u65f6\u95f4\u624d\u4f1a\u88ab\u6807\u8bb0\u4e3a \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                                                    • S3 Credentials\uff1a

                                                      • Use secret \uff1a\u4fdd\u6301\u9ed8\u8ba4\u914d\u7f6e true \u3002
                                                      • Secret name \uff1a\u4fdd\u6301\u9ed8\u8ba4\u914d\u7f6e velero-s3-credential \u3002
                                                      • SecretContents.aws_access_key_id = \uff1a\u914d\u7f6e\u8bbf\u95ee\u5bf9\u8c61\u5b58\u50a8\u7684\u7528\u6237\u540d\uff0c\u66ff\u6362 \u4e3a\u771f\u5b9e\u53c2\u6570\u3002
                                                      • SecretContents.aws_secret_access_key = \uff1a\u914d\u7f6e\u8bbf\u95ee\u5bf9\u8c61\u5b58\u50a8\u7684\u5bc6\u7801\uff0c\u66ff\u6362 \u4e3a\u771f\u5b9e\u53c2\u6570\u3002

                                                        config \"SecretContents \u6837\u4f8b\" [default] aws_access_key_id = minio aws_secret_access_key = minio123

                                                      • Velero Configuration\uff1a

                                                        • Backupstoragelocation \uff1avelero \u5907\u4efd\u6570\u636e\u5b58\u50a8\u7684\u4f4d\u7f6e
                                                        • S3 bucket \uff1a\u7528\u4e8e\u4fdd\u5b58\u5907\u4efd\u6570\u636e\u7684\u5b58\u50a8\u6876\u540d\u79f0(\u9700\u4e3a minio \u5df2\u7ecf\u5b58\u5728\u7684\u771f\u5b9e\u5b58\u50a8\u6876)
                                                        • Is default BackupStorage \uff1a\u4fdd\u6301\u9ed8\u8ba4\u914d\u7f6e true
                                                        • S3 access mode \uff1avelero \u5bf9\u6570\u636e\u7684\u8bbf\u95ee\u6a21\u5f0f\uff0c\u53ef\u4ee5\u9009\u62e9
                                                          • ReadWrite \uff1a\u5141\u8bb8 velero \u8bfb\u5199\u5907\u4efd\u6570\u636e
                                                          • ReadOnly \uff1a\u5141\u8bb8 velero \u8bfb\u53d6\u5907\u4efd\u6570\u636e\uff0c\u4e0d\u80fd\u4fee\u6539\u5907\u4efd\u6570\u636e
                                                          • WriteOnly \uff1a\u53ea\u5141\u8bb8 velero \u5199\u5165\u5907\u4efd\u6570\u636e\uff0c\u4e0d\u80fd\u8bfb\u53d6\u5907\u4efd\u6570\u636e
                                                        • S3 Configs \uff1aS3 \u5b58\u50a8\uff08minio\uff09\u7684\u8be6\u7ec6\u914d\u7f6e
                                                        • S3 region \uff1a\u4e91\u5b58\u50a8\u7684\u5730\u7406\u533a\u57df\u3002\u9ed8\u8ba4\u4f7f\u7528 us-east-1 \u53c2\u6570\uff0c\u7531\u7cfb\u7edf\u7ba1\u7406\u5458\u63d0\u4f9b
                                                        • S3 force path style \uff1a\u4fdd\u6301\u9ed8\u8ba4\u914d\u7f6e true
                                                        • S3 server URL \uff1a\u5bf9\u8c61\u5b58\u50a8\uff08minio\uff09\u7684\u63a7\u5236\u53f0\u8bbf\u95ee\u5730\u5740\uff0cminio \u4e00\u822c\u63d0\u4f9b\u4e86 UI \u8bbf\u95ee\u548c\u63a7\u5236\u53f0\u8bbf\u95ee\u4e24\u4e2a\u670d\u52a1\uff0c\u6b64\u5904\u8bf7\u4f7f\u7528\u63a7\u5236\u53f0\u8bbf\u95ee\u7684\u5730\u5740

                                                        Note

                                                        \u8bf7\u786e\u4fdd s3 \u5b58\u50a8\u670d\u52a1\u65f6\u95f4\u8ddf\u5907\u4efd\u8fd8\u539f\u96c6\u7fa4\u65f6\u95f4\u5dee\u572810\u5206\u949f\u4ee5\u5185\uff0c\u6700\u597d\u662f\u65f6\u95f4\u4fdd\u6301\u540c\u6b65\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u6267\u884c\u5907\u4efd\u64cd\u4f5c\u3002

                                                      • migration plugin configuration\uff1a\u542f\u7528\u4e4b\u540e\uff0c\u5c06\u5728\u4e0b\u4e00\u6b65\u7684 YAML \u4ee3\u7801\u6bb5\u4e2d\u65b0\u589e\uff1a

                                                        ...\ninitContainers:\n  - image: 'release.daocloud.io/kcoral/velero-plugin-for-migration:v0.3.0'\n    imagePullPolicy: IfNotPresent\n    name: velero-plugin-for-migration\n    volumeMounts:\n      - mountPath: /target\n        name: plugins\n  - image: 'docker.m.daocloud.io/velero/velero-plugin-for-csi:v0.7.0'\n    imagePullPolicy: IfNotPresent\n    name: velero-plugin-for-csi\n    volumeMounts:\n      - mountPath: /target\n        name: plugins\n  - image: 'docker.m.daocloud.io/velero/velero-plugin-for-aws:v1.9.0'\n    imagePullPolicy: IfNotPresent\n    name: velero-plugin-for-aws\n    volumeMounts:\n      - mountPath: /target\n        name: plugins\n...\n
                                                      • \u786e\u8ba4 YAML \u65e0\u8bef\u540e\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210 velero \u63d2\u4ef6\u7684\u5b89\u88c5\u3002 \u4e4b\u540e\u7cfb\u7edf\u5c06\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\uff0c\u7a0d\u7b49\u51e0\u5206\u949f\u540e\uff0c\u4e3a\u9875\u9762\u6267\u884c\u5237\u65b0\u64cd\u4f5c\uff0c\u5373\u53ef\u770b\u5230\u521a\u521a\u5b89\u88c5\u7684\u5e94\u7528\u3002

                                                      • "},{"location":"admin/kpanda/best-practice/add-master-node.html","title":"\u5bf9\u5de5\u4f5c\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u6269\u5bb9","text":"

                                                        \u672c\u6587\u5c06\u4ee5\u4e00\u4e2a\u5355\u63a7\u5236\u8282\u70b9\u7684\u5de5\u4f5c\u96c6\u7fa4\u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u624b\u52a8\u4e3a\u5de5\u4f5c\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u8fdb\u884c\u6269\u5bb9\uff0c\u4ee5\u5b9e\u73b0\u81ea\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u9ad8\u53ef\u7528\u3002

                                                        Note

                                                        • \u63a8\u8350\u5728\u754c\u9762\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u65f6\u5373\u5f00\u542f\u9ad8\u53ef\u7528\u6a21\u5f0f\uff0c\u624b\u52a8\u6269\u5bb9\u5de5\u4f5c\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u5b58\u5728\u4e00\u5b9a\u7684\u64cd\u4f5c\u98ce\u9669\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002
                                                        • \u5f53\u5de5\u4f5c\u96c6\u7fa4\u7684\u9996\u4e2a\u63a7\u5236\u8282\u70b9\u6545\u969c\u6216\u5f02\u5e38\u65f6\uff0c\u5982\u679c\u60a8\u60f3\u66ff\u6362\u6216\u91cd\u65b0\u63a5\u5165\u9996\u4e2a\u63a7\u5236\u8282\u70b9\uff0c \u8bf7\u53c2\u8003\u66ff\u6362\u5de5\u4f5c\u96c6\u7fa4\u7684\u9996\u4e2a\u63a7\u5236\u8282\u70b9
                                                        "},{"location":"admin/kpanda/best-practice/add-master-node.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                        • \u5df2\u7ecf\u901a\u8fc7 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u521b\u5efa\u597d\u4e00\u4e2a\u5de5\u4f5c\u96c6\u7fa4\uff0c\u53ef\u53c2\u8003\u6587\u6863\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u3002
                                                        • \u5de5\u4f5c\u96c6\u7fa4\u7684\u88ab\u7eb3\u7ba1\u96c6\u7fa4\u5b58\u5728\u5f53\u524d\u5e73\u53f0\u4e2d\uff0c\u5e76\u4e14\u72b6\u6001\u8fd0\u884c\u6b63\u5e38\u3002

                                                        Note

                                                        \u88ab\u7eb3\u7ba1\u96c6\u7fa4\uff1a\u5728\u754c\u9762\u521b\u5efa\u96c6\u7fa4\u65f6\u6307\u5b9a\u7684\u7528\u6765\u7ba1\u7406\u5f53\u524d\u96c6\u7fa4\uff0c\u5e76\u4e3a\u5f53\u524d\u96c6\u7fa4\u63d0\u4f9b kubernetes \u7248\u672c\u5347\u7ea7\u3001\u8282\u70b9\u6269\u7f29\u5bb9\u3001\u5378\u8f7d\u3001\u64cd\u4f5c\u8bb0\u5f55\u7b49\u80fd\u529b\u7684\u96c6\u7fa4\u3002

                                                        "},{"location":"admin/kpanda/best-practice/add-master-node.html#_3","title":"\u4fee\u6539\u4e3b\u673a\u6e05\u5355\u6587\u4ef6","text":"
                                                        1. \u767b\u5f55\u5230\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u8fdb\u5165\u9700\u8981\u8fdb\u884c\u63a7\u5236\u8282\u70b9\u6269\u5bb9\u7684\u96c6\u7fa4\u6982\u89c8\u9875\u9762\uff0c\u5728 \u57fa\u672c\u4fe1\u606f \u5904\uff0c\u627e\u5230\u5f53\u524d\u96c6\u7fa4\u7684 \u88ab\u7eb3\u7ba1\u96c6\u7fa4 \uff0c \u70b9\u51fb\u88ab\u7eb3\u7ba1\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u88ab\u7eb3\u7ba1\u96c6\u7fa4\u7684\u6982\u89c8\u754c\u9762\u3002

                                                        2. \u5728\u88ab\u7eb3\u7ba1\u96c6\u7fa4\u7684\u6982\u89c8\u754c\u9762\uff0c\u70b9\u51fb \u63a7\u5236\u53f0\uff0c\u6253\u5f00\u4e91\u7ec8\u7aef\u63a7\u5236\u53f0\uff0c\u5e76\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u627e\u5230\u5f85\u6269\u5bb9\u5de5\u4f5c\u96c6\u7fa4\u7684\u4e3b\u673a\u6e05\u5355\u6587\u4ef6\u3002

                                                          kubectl get cm -n kubean-system ${ClusterName}-hosts-conf -oyaml\n

                                                          ${ClusterName}\uff1a\u4e3a\u5f85\u6269\u5bb9\u5de5\u4f5c\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                        3. \u53c2\u8003\u4e0b\u65b9\u793a\u4f8b\u4fee\u6539\u4e3b\u673a\u6e05\u5355\u6587\u4ef6\uff0c\u65b0\u589e\u63a7\u5236\u8282\u70b9\u4fe1\u606f\u3002

                                                          \u4fee\u6539\u524d\u4fee\u6539\u540e
                                                          apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: tanghai-dev-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      hosts:\n        node1:\n          ip: 10.6.175.10 \n          access_ip: 10.6.175.10\n          ansible_host: 10.6.175.10 \n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: password01\n      children:\n        kube_control_plane:\n          hosts:\n            node1:\n        kube_node:\n          hosts:\n            node1:\n        etcd:\n          hosts:\n            node1:\n        k8s_cluster:\n          children:\n            kube_control_plane:\n            kube_node:\n        calico_rr:\n          hosts: {}\n......\n
                                                          apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: tanghai-dev-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      hosts:\n        node1: # \u539f\u96c6\u7fa4\u4e2d\u5df2\u5b58\u5728\u7684\u4e3b\u8282\u70b9\n          ip: 10.6.175.10\n          access_ip: 10.6.175.10 \n          ansible_host: 10.6.175.10\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: password01\n        node2: # \u96c6\u7fa4\u6269\u5bb9\u5f85\u65b0\u589e\u7684\u63a7\u5236\u8282\u70b9\n          ip: 10.6.175.20\n          access_ip: 10.6.175.20\n          ansible_host: 10.6.175.20\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: password01\n        node3: # \u96c6\u7fa4\u6269\u5bb9\u5f85\u65b0\u589e\u7684\u63a7\u5236\u8282\u70b9\n          ip: 10.6.175.30 \n          access_ip: 10.6.175.30\n          ansible_host: 10.6.175.30 \n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: password01\n      children:\n        kube_control_plane:\n          hosts: # \u96c6\u7fa4\u4e2d\u7684\u63a7\u5236\u8282\u70b9\u7ec4\n            node1:\n            node2: # \u65b0\u589e\u63a7\u5236\u8282\u70b9 node2 \u5185\u5bb9 \n            node3: # \u65b0\u589e\u63a7\u5236\u8282\u70b9 node3 \u5185\u5bb9 \n        kube_node:\n          hosts: # \u96c6\u7fa4\u4e2d\u7684\u5de5\u4f5c\u8282\u70b9\u7ec4\n            node1:\n            node2: # \u65b0\u589e\u63a7\u5236\u8282\u70b9 node2 \u5185\u5bb9 \n            node3: # \u65b0\u589e\u63a7\u5236\u8282\u70b9 node3 \u5185\u5bb9 \n        etcd:\n          hosts: # \u96c6\u7fa4\u4e2d\u7684 ETCD \u8282\u70b9\u7ec4\n            node1:\n            node2: # \u65b0\u589e\u63a7\u5236\u8282\u70b9 node2 \u5185\u5bb9 \n            node3: # \u65b0\u589e\u63a7\u5236\u8282\u70b9 node3 \u5185\u5bb9 \n        k8s_cluster:\n          children:\n            kube_control_plane:\n            kube_node:\n        calico_rr:\n          hosts: {}\n
                                                        "},{"location":"admin/kpanda/best-practice/add-master-node.html#clusteroperationyml","title":"\u65b0\u589e ClusterOperation.yml \u6269\u5bb9\u4efb\u52a1","text":"

                                                        \u4f7f\u7528\u57fa\u4e8e\u4e0b\u9762\u7684 ClusterOperation.yml \u6a21\u677f\uff0c\u65b0\u589e\u4e00\u4e2a\u96c6\u7fa4\u63a7\u5236\u8282\u70b9\u6269\u5bb9\u4efb\u52a1 scale-master-node-ops.yaml \u3002

                                                        ClusterOperation.yml
                                                        apiVersion: kubean.io/v1alpha1\nkind: ClusterOperation\nmetadata:\n  name: cluster1-online-install-ops\nspec:\n  cluster: ${cluster-name} # (1)!\n  image: ghcr.m.daocloud.io/kubean-io/spray-job:v0.18.0 # (2)!\n  actionType: playbook\n  action: cluster.yml # (3)!\n  extraArgs: --limit=etcd,kube_control_plane -e ignore_assert_errors=yes\n  preHook:\n    - actionType: playbook\n      action: ping.yml\n    - actionType: playbook\n      action: disable-firewalld.yml\n    - actionType: playbook\n      action: enable-repo.yml  # (4)!\n      extraArgs: | # \u5982\u679c\u662f\u79bb\u7ebf\u73af\u5883\uff0c\u9700\u8981\u6dfb\u52a0 enable-repo.yml\uff0c\u5e76\u4e14 extraArgs \u53c2\u6570\u586b\u5199\u76f8\u5173 OS \u7684\u6b63\u786e repo_list\n        -e \"{repo_list: ['http://172.30.41.0:9000/kubean/centos/\\$releasever/os/\\$basearch','http://172.30.41.0:9000/kubean/centos-iso/\\$releasever/os/\\$basearch']}\"\n  postHook:\n    - actionType: playbook\n      action: upgrade-cluster.yml\n      extraArgs: --limit=etcd,kube_control_plane -e ignore_assert_errors=yes\n    - actionType: playbook\n      action: kubeconfig.yml\n    - actionType: playbook\n      action: cluster-info.yml\n
                                                        1. \u6307\u5b9a cluster name
                                                        2. \u6307\u5b9a kubean \u4efb\u52a1\u8fd0\u884c\u7684\u955c\u50cf\uff0c\u955c\u50cf\u5730\u5740\u8981\u4e0e\u4e4b\u524d\u6267\u884c\u90e8\u7f72\u65f6\u7684 job \u5176\u5185\u955c\u50cf\u4fdd\u6301\u4e00\u81f4
                                                        3. \u5982\u679c\u4e00\u6b21\u6027\u6dfb\u52a0 Master\uff08etcd\uff09\u8282\u70b9\u8d85\u8fc7\uff08\u5305\u542b\uff09\u4e09\u4e2a\uff0c\u9700\u5728 cluster.yaml \u8ffd\u52a0\u989d\u5916\u53c2\u6570 -e etcd_retries=10 \u4ee5\u589e\u5927 etcd node join \u91cd\u8bd5\u6b21\u6570
                                                        4. \u79bb\u7ebf\u73af\u5883\u4e0b\u9700\u8981\u6dfb\u52a0\u6b64 yaml\uff0c\u5e76\u4e14\u8bbe\u7f6e\u6b63\u786e\u7684 repo-list\uff08\u5b89\u88c5\u64cd\u4f5c\u7cfb\u7edf\u8f6f\u4ef6\u5305\uff09\uff0c\u4ee5\u4e0b\u53c2\u6570\u503c\u4ec5\u4f9b\u53c2\u8003

                                                        \u7136\u540e\u521b\u5efa\u5e76\u90e8\u7f72 scale-master-node-ops.yaml\u3002

                                                        vi scale-master-node-ops.yaml\nkubectl apply -f scale-master-node-ops.yaml -n kubean-system\n

                                                        \u6267\u884c\u5b8c\u4e0a\u8ff0\u6b65\u9aa4\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u8fdb\u884c\u9a8c\u8bc1\uff1a

                                                        kubectl get node\n
                                                        "},{"location":"admin/kpanda/best-practice/add-worker-node-on-global.html","title":"\u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u5de5\u4f5c\u8282\u70b9\u6269\u5bb9","text":"

                                                        \u672c\u6587\u5c06\u4ecb\u7ecd\u79bb\u7ebf\u6a21\u5f0f\u4e0b\uff0c\u5982\u4f55\u624b\u52a8\u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u5de5\u4f5c\u8282\u70b9\u8fdb\u884c\u6269\u5bb9\u3002 \u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u4e0d\u5efa\u8bae\u5728\u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3\u540e\u5bf9\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u8fdb\u884c\u6269\u5bb9\uff0c\u8bf7\u5728\u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3\u524d\u505a\u597d\u8d44\u6e90\u89c4\u5212\u3002

                                                        Note

                                                        \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u4e0d\u652f\u6301\u6269\u5bb9\u3002

                                                        "},{"location":"admin/kpanda/best-practice/add-worker-node-on-global.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                        • \u5df2\u7ecf\u901a\u8fc7\u706b\u79cd\u8282\u70b9\u5b8c\u6210 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u7684\u90e8\u7f72\uff0c\u5e76\u4e14\u706b\u79cd\u8282\u70b9\u4e0a\u7684 kind \u96c6\u7fa4\u8fd0\u884c\u6b63\u5e38\u3002
                                                        • \u5fc5\u987b\u4f7f\u7528\u5e73\u53f0 Admin \u6743\u9650\u7684\u7528\u6237\u767b\u5f55\u3002
                                                        "},{"location":"admin/kpanda/best-practice/add-worker-node-on-global.html#kind-kubeconfig","title":"\u83b7\u53d6\u706b\u79cd\u8282\u70b9\u4e0a kind \u96c6\u7fa4\u7684 kubeconfig","text":"
                                                        1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\u767b\u5f55\u706b\u79cd\u8282\u70b9\uff1a

                                                          ssh root@\u706b\u79cd\u8282\u70b9 IP \u5730\u5740\n
                                                        2. \u5728\u706b\u79cd\u8282\u70b9\u4e0a\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u83b7\u53d6 kind \u96c6\u7fa4\u7684 CONTAINER ID\uff1a

                                                          [root@localhost ~]# podman ps\n\n# \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a\nCONTAINER ID  IMAGE                                      COMMAND     CREATED      STATUS      PORTS                                                                                                         NAMES\n220d662b1b6a  docker.m.daocloud.io/kindest/node:v1.26.2              2 weeks ago  Up 2 weeks  0.0.0.0:443->30443/tcp, 0.0.0.0:8081->30081/tcp, 0.0.0.0:9000-9001->32000-32001/tcp, 0.0.0.0:36674->6443/tcp  my-cluster-installer-control-plane\n
                                                        3. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u8fdb\u5165 kind \u96c6\u7fa4\u5bb9\u5668\u5185\uff1a

                                                          podman exec -it {CONTAINER ID} bash\n

                                                          {CONTAINER ID} \u66ff\u6362\u4e3a\u60a8\u771f\u5b9e\u7684\u5bb9\u5668 ID

                                                        4. \u5728 kind \u96c6\u7fa4\u5bb9\u5668\u5185\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u83b7\u53d6 kind \u96c6\u7fa4\u7684 kubeconfig \u914d\u7f6e\u4fe1\u606f\uff1a

                                                          kubectl config view --minify --flatten --raw\n

                                                        \u5f85\u63a7\u5236\u53f0\u8f93\u51fa\u540e\uff0c\u590d\u5236 kind \u96c6\u7fa4\u7684 kubeconfig \u914d\u7f6e\u4fe1\u606f\uff0c\u4e3a\u4e0b\u4e00\u6b65\u505a\u51c6\u5907\u3002

                                                        "},{"location":"admin/kpanda/best-practice/add-worker-node-on-global.html#kind-clusterkubeanio","title":"\u5728\u706b\u79cd\u8282\u70b9\u4e0a kind \u96c6\u7fa4\u5185\u521b\u5efa cluster.kubean.io \u8d44\u6e90","text":"
                                                        1. \u4f7f\u7528 podman exec -it {CONTAINER ID} bash \u547d\u4ee4\u8fdb\u5165 kind \u96c6\u7fa4\u5bb9\u5668\u5185\u3002

                                                        2. \u5728 kind \u96c6\u7fa4\u5bb9\u5668\u5185\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u83b7\u53d6 kind \u96c6\u7fa4\u540d\u79f0 \uff1a

                                                          kubectl get clusters\n
                                                        3. \u590d\u5236\u5e76\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5728 kind \u96c6\u7fa4\u5185\u6267\u884c\uff0c\u4ee5\u521b\u5efa cluster.kubean.io \u8d44\u6e90\uff1a

                                                          kubectl apply -f - <<EOF\napiVersion: kubean.io/v1alpha1\nkind: Cluster\nmetadata:\n  labels:\n    clusterName: kpanda-global-cluster\n  name: kpanda-global-cluster\nspec:\n  hostsConfRef:\n    name: my-cluster-hosts-conf\n    namespace: kubean-system\n  kubeconfRef:\n    name: my-cluster-kubeconf\n    namespace: kubean-system\n  varsConfRef:\n    name: my-cluster-vars-conf\n    namespace: kubean-system\nEOF\n

                                                          Note

                                                          spec.hostsConfRef.name\u3001spec.kubeconfRef.name\u3001spec.varsConfRef.name \u4e2d\u96c6\u7fa4\u540d\u79f0\u9ed8\u8ba4\u4e3a my-cluster\uff0c \u9700\u66ff\u6362\u6210\u4e0a\u4e00\u6b65\u9aa4\u4e2d\u83b7\u53d6\u7684 kind \u96c6\u7fa4\u540d\u79f0 \u3002

                                                        4. \u5728 kind \u96c6\u7fa4\u5185\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u68c0\u9a8c cluster.kubean.io` \u8d44\u6e90\u662f\u5426\u6b63\u5e38\u521b\u5efa\uff1a

                                                          kubectl get clusters\n

                                                          \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                          NAME                    AGE\nkpanda-global-cluster   3s\nmy-cluster              16d\n
                                                        "},{"location":"admin/kpanda/best-practice/add-worker-node-on-global.html#kind-containerd","title":"\u66f4\u65b0\u706b\u79cd\u8282\u70b9\u4e0a\u7684 kind \u96c6\u7fa4\u91cc\u7684 containerd \u914d\u7f6e","text":"
                                                        1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u5176\u4e2d\u4e00\u4e2a\u63a7\u5236\u8282\u70b9\uff1a

                                                          ssh root@\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u63a7\u5236\u8282\u70b9 IP \u5730\u5740\n
                                                        2. \u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u63a7\u5236\u8282\u70b9\u4e0a\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5c06\u63a7\u5236\u8282\u70b9\u7684 containerd \u914d\u7f6e\u6587\u4ef6 config.toml \u590d\u5236\u5230\u706b\u79cd\u8282\u70b9\u4e0a\uff1a

                                                          scp /etc/containerd/config.toml root@{\u706b\u79cd\u8282\u70b9 IP}:/root\n
                                                        3. \u5728\u706b\u79cd\u8282\u70b9\u4e0a\uff0c\u4ece\u63a7\u5236\u8282\u70b9\u62f7\u8d1d\u8fc7\u6765\u7684 containerd \u914d\u7f6e\u6587\u4ef6 config.toml \u4e2d\u9009\u53d6 \u975e\u5b89\u5168\u955c\u50cf registry \u7684\u90e8\u5206 \u52a0\u5165\u5230 kind \u96c6\u7fa4\u5185 config.toml

                                                          \u975e\u5b89\u5168\u955c\u50cfregistry \u90e8\u5206\u793a\u4f8b\u5982\u4e0b\uff1a

                                                          [plugins.\"io.containerd.grpc.v1.cri\".registry]\n  [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors]\n    [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors.\"10.6.202.20\"]\n      endpoint = [\"https://10.6.202.20\"]\n    [plugins.\"io.containerd.grpc.v1.cri\".registry.configs.\"10.6.202.20\".tls]\n      insecure_skip_verify = true\n

                                                          Note

                                                          \u7531\u4e8e kind \u96c6\u7fa4\u5185\u4e0d\u80fd\u76f4\u63a5\u4fee\u6539 config.toml \u6587\u4ef6\uff0c\u6545\u53ef\u4ee5\u5148\u590d\u5236\u4e00\u4efd\u6587\u4ef6\u51fa\u6765\u4fee\u6539\uff0c\u518d\u62f7\u8d1d\u5230 kind \u96c6\u7fa4\uff0c\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                          1. \u5728\u706b\u79cd\u8282\u70b9\u4e0a\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff0c\u5c06\u6587\u4ef6\u62f7\u8d1d\u51fa\u6765

                                                            podman cp {CONTAINER ID}:/etc/containerd/config.toml ./config.toml.kind\n
                                                          2. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\u7f16\u8f91 config.toml \u6587\u4ef6

                                                            vim ./config.toml.kind\n
                                                          3. \u5c06\u4fee\u6539\u597d\u7684\u6587\u4ef6\u518d\u590d\u5236\u5230 kind \u96c6\u7fa4\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4

                                                            podman cp ./config.toml.kind {CONTAINER ID}:/etc/containerd/config.toml\n

                                                            {CONTAINER ID} \u66ff\u6362\u4e3a\u60a8\u771f\u5b9e\u7684\u5bb9\u5668 ID

                                                        4. \u5728 kind \u96c6\u7fa4\u5185\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u91cd\u542f containerd \u670d\u52a1

                                                          systemctl restart containerd\n
                                                        "},{"location":"admin/kpanda/best-practice/add-worker-node-on-global.html#kind-ai","title":"\u5c06 kind \u96c6\u7fa4\u63a5\u5165 AI \u7b97\u529b\u4e2d\u5fc3\u96c6\u7fa4\u5217\u8868","text":"
                                                        1. \u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3\uff0c\u8fdb\u5165\u5bb9\u5668\u7ba1\u7406\uff0c\u5728\u96c6\u7fa4\u5217\u8868\u9875\u53f3\u4fa7\u70b9\u51fb \u63a5\u5165\u96c6\u7fa4 \u6309\u94ae\uff0c\u8fdb\u5165\u63a5\u5165\u96c6\u7fa4\u9875\u9762\u3002

                                                        2. \u5728\u63a5\u5165\u914d\u7f6e\u5904\uff0c\u586b\u5165\u5e76\u7f16\u8f91\u521a\u521a\u590d\u5236\u7684 kind \u96c6\u7fa4\u7684 kubeconfig \u914d\u7f6e\u3002

                                                          apiVersion: v1\nclusters:\n- cluster:\n    insecure-skip-tls-verify: true # (1)!\n    certificate-authority-data: LS0TLSCFDFWEFEWFEWFGGEWGFWFEWGWEGFEWGEWGSDGFSDSD\n    server: https://my-cluster-installer-control-plane:6443 # (2)!\nname: my-cluster-installer\ncontexts:\n- context:\n    cluster: my-cluster-installer\n    user: kubernetes-admin\nname: kubernetes-admin@my-cluster-installer\ncurrent-context: kubernetes-admin@my-cluster-installer\nkind: Config\npreferences: {}\nusers:\n
                                                          1. \u8df3\u8fc7 tls \u9a8c\u8bc1\uff0c\u8fd9\u4e00\u884c\u9700\u8981\u624b\u52a8\u6dfb\u52a0
                                                          2. \u66ff\u6362\u4e3a\u706b\u79cd\u8282\u70b9\u7684 IP\uff0c\u7aef\u53e3 6443 \u66ff\u6362\u4e3a\u5728\u8282\u70b9\u6620\u5c04\u7684\u7aef\u53e3\uff08\u4f60\u53ef\u4ee5\u6267\u884c podman ps|grep 6443 \u547d\u4ee4\u67e5\u770b\u6620\u5c04\u7684\u7aef\u53e3\uff09

                                                        3. \u70b9\u51fb \u786e\u8ba4 \u6309\u94ae\uff0c\u5b8c\u6210 kind \u96c6\u7fa4\u7684\u63a5\u5165\u3002

                                                        "},{"location":"admin/kpanda/best-practice/add-worker-node-on-global.html#_3","title":"\u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6dfb\u52a0\u6807\u7b7e","text":"
                                                        1. \u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3\uff0c\u8fdb\u5165\u5bb9\u5668\u7ba1\u7406\uff0c\u627e\u5230 kapnda-glabal-cluster \u96c6\u7fa4\uff0c\u5728\u53f3\u4fa7\u64cd\u4f5c\u5217\u8868\u627e\u5230 \u57fa\u7840\u914d\u7f6e \u83dc\u5355\u9879\u5e76\u8fdb\u5165\u57fa\u7840\u914d\u7f6e\u754c\u9762\u3002

                                                        2. \u5728\u57fa\u7840\u914d\u7f6e\u9875\u9762\uff0c\u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6dfb\u52a0\u7684\u6807\u7b7e kpanda.io/managed-by=my-cluster\uff1a

                                                        Note

                                                        \u6807\u7b7e kpanda.io/managed-by=my-cluster \u4e2d\u7684 vaule \u503c\u4e3a\u63a5\u5165\u96c6\u7fa4\u65f6\u6307\u5b9a\u7684\u96c6\u7fa4\u540d\u79f0\uff0c\u9ed8\u8ba4\u4e3a my-cluster\uff0c\u5177\u4f53\u4f9d\u636e\u60a8\u7684\u5b9e\u9645\u60c5\u51b5\u3002

                                                        "},{"location":"admin/kpanda/best-practice/add-worker-node-on-global.html#_4","title":"\u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6dfb\u52a0\u8282\u70b9","text":"
                                                        1. \u8fdb\u5165\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u8282\u70b9\u5217\u8868\u9875\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u63a5\u5165\u8282\u70b9 \u6309\u94ae\u3002

                                                        2. \u586b\u5165\u5f85\u63a5\u5165\u8282\u70b9\u7684 IP \u548c\u8ba4\u8bc1\u4fe1\u606f\u540e\u70b9\u51fb \u5f00\u59cb\u68c0\u67e5 \uff0c\u901a\u8fc7\u8282\u70b9\u68c0\u67e5\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                        3. \u5728 \u81ea\u5b9a\u4e49\u53c2\u6570 \u5904\u6dfb\u52a0\u5982\u4e0b\u81ea\u5b9a\u4e49\u53c2\u6570\uff1a

                                                          download_run_once: false\ndownload_container: false\ndownload_force_cache: false\ndownload_localhost: false\n

                                                        4. \u70b9\u51fb \u786e\u5b9a \u7b49\u5f85\u8282\u70b9\u6dfb\u52a0\u5b8c\u6210\u3002

                                                        "},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html","title":"MySQL \u5e94\u7528\u53ca\u6570\u636e\u7684\u8de8\u96c6\u7fa4\u5907\u4efd\u6062\u590d","text":"

                                                        \u672c\u6b21\u6f14\u793a\u5c06\u57fa\u4e8e AI \u7b97\u529b\u4e2d\u5fc3\u7684\u5e94\u7528\u5907\u4efd\u529f\u80fd\uff0c\u5b9e\u73b0\u4e00\u4e2a\u6709\u72b6\u6001\u5e94\u7528\u7684\u8de8\u96c6\u7fa4\u5907\u4efd\u8fc1\u79fb\u3002

                                                        Note

                                                        \u5f53\u524d\u64cd\u4f5c\u8005\u5e94\u5177\u6709 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u7ba1\u7406\u5458\u7684\u6743\u9650\u3002

                                                        "},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html#_1","title":"\u51c6\u5907\u6f14\u793a\u73af\u5883","text":""},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html#_2","title":"\u51c6\u5907\u4e24\u4e2a\u96c6\u7fa4","text":"

                                                        main-cluster \u4f5c\u4e3a\u5907\u4efd\u6570\u636e\u7684\u6e90\u96c6\u7fa4\uff0c recovery-cluster \u96c6\u7fa4\u4f5c\u4e3a\u9700\u8981\u6062\u590d\u6570\u636e\u7684\u76ee\u6807\u96c6\u7fa4\u3002

                                                        \u96c6\u7fa4 IP \u8282\u70b9 main-cluster 10.6.175.100 1 \u8282\u70b9 recovery-cluster 10.6.175.110 1 \u8282\u70b9"},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html#minio","title":"\u642d\u5efa MinIO \u914d\u7f6e","text":"MinIO \u670d\u52a1\u5668\u8bbf\u95ee\u5730\u5740 \u5b58\u50a8\u6876 \u7528\u6237\u540d \u5bc6\u7801 http://10.7.209.110:9000 mysql-demo root dangerous"},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html#nfs","title":"\u5728\u4e24\u4e2a\u96c6\u7fa4\u90e8\u7f72 NFS \u5b58\u50a8\u670d\u52a1","text":"

                                                        Note

                                                        \u9700\u8981\u5728 \u6e90\u96c6\u7fa4\u548c\u76ee\u6807\u96c6\u7fa4 \u4e0a\u7684\u6240\u6709\u8282\u70b9\u4e0a\u90e8\u7f72 NFS \u5b58\u50a8\u670d\u52a1\u3002

                                                        1. \u5728\u4e24\u4e2a\u96c6\u7fa4\u4e2d\u7684\u6240\u6709\u8282\u70b9\u5b89\u88c5 NFS \u6240\u9700\u7684\u4f9d\u8d56\u3002

                                                          yum install nfs-utils iscsi-initiator-utils nfs-utils iscsi-initiator-utils nfs-utils iscsi-initiator-utils -y\n

                                                          \u9884\u671f\u8f93\u51fa\u4e3a\uff1a

                                                          [root@g-master1 ~]# kubectl apply -f nfs.yaml\nclusterrole.rbac.authorization.k8s.io/nfs-provisioner-runner created\nclusterrolebinding.rbac.authorization.k8s.io/run-nfs-provisioner created\nrole.rbac.authorization.k8s.io/leader-locking-nfs-provisioner created\nrolebinding.rbac.authorization.k8s.io/leader-locking-nfs-provisioner created\nserviceaccount/nfs-provisioner created\nservice/nfs-provisioner created\ndeployment.apps/nfs-provisioner created\nstorageclass.storage.k8s.io/nfs created\n
                                                        2. \u4e3a MySQL \u5e94\u7528\u51c6\u5907 NFS \u5b58\u50a8\u670d\u52a1\u3002

                                                          \u767b\u5f55 main-cluster \u96c6\u7fa4\u548c recovery-cluster \u96c6\u7fa4\u7684\u4efb\u4e00\u63a7\u5236\u8282\u70b9\uff0c\u4f7f\u7528 vi nfs.yaml \u547d\u4ee4\u5728\u8282\u70b9\u4e0a\u521b\u5efa\u4e00\u4e2a \u540d\u4e3a nfs.yaml \u7684\u6587\u4ef6\uff0c\u5c06\u4e0b\u9762\u7684 YAML \u5185\u5bb9\u590d\u5236\u5230 nfs.yaml \u6587\u4ef6\u3002

                                                          \u70b9\u51fb\u67e5\u770b\u5b8c\u6574\u7684 nfs.yaml nfs.yaml

                                                          kind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: nfs-provisioner-runner\nnamespace: nfs-system\nrules:\n- apiGroups: [\"\"]\n    resources: [\"persistentvolumes\"]\n    verbs: [\"get\", \"list\", \"watch\", \"create\", \"delete\"]\n- apiGroups: [\"\"]\n    resources: [\"persistentvolumeclaims\"]\n    verbs: [\"get\", \"list\", \"watch\", \"update\"]\n- apiGroups: [\"storage.k8s.io\"]\n    resources: [\"storageclasses\"]\n    verbs: [\"get\", \"list\", \"watch\"]\n- apiGroups: [\"\"]\n    resources: [\"events\"]\n    verbs: [\"create\", \"update\", \"patch\"]\n- apiGroups: [\"\"]\n    resources: [\"services\", \"endpoints\"]\n    verbs: [\"get\"]\n- apiGroups: [\"extensions\"]\n    resources: [\"podsecuritypolicies\"]\n    resourceNames: [\"nfs-provisioner\"]\n    verbs: [\"use\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: run-nfs-provisioner\nsubjects:\n- kind: ServiceAccount\n    name: nfs-provisioner\n    # replace with namespace where provisioner is deployed\n    namespace: default\nroleRef:\nkind: ClusterRole\nname: nfs-provisioner-runner\napiGroup: rbac.authorization.k8s.io\n---\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: leader-locking-nfs-provisioner\nrules:\n- apiGroups: [\"\"]\n    resources: [\"endpoints\"]\n    verbs: [\"get\", \"list\", \"watch\", \"create\", \"update\", \"patch\"]\n---\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: leader-locking-nfs-provisioner\nsubjects:\n- kind: ServiceAccount\n    name: nfs-provisioner\n    # replace with namespace where provisioner is deployed\n    namespace: default\nroleRef:\nkind: Role\nname: leader-locking-nfs-provisioner\napiGroup: rbac.authorization.k8s.io\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\nname: nfs-provisioner\n---\nkind: Service\napiVersion: v1\nmetadata:\nname: nfs-provisioner\nlabels:\n    app: nfs-provisioner\nspec:\nports:\n    - name: nfs\n    port: 2049\n    - name: nfs-udp\n    port: 2049\n    protocol: UDP\n    - name: nlockmgr\n    port: 32803\n    - name: nlockmgr-udp\n    port: 32803\n    protocol: UDP\n    - name: mountd\n    port: 20048\n    - name: mountd-udp\n    port: 20048\n    protocol: UDP\n    - name: rquotad\n    port: 875\n    - name: rquotad-udp\n    port: 875\n    protocol: UDP\n    - name: rpcbind\n    port: 111\n    - name: rpcbind-udp\n    port: 111\n    protocol: UDP\n    - name: statd\n    port: 662\n    - name: statd-udp\n    port: 662\n    protocol: UDP\nselector:\n    app: nfs-provisioner\n---\nkind: Deployment\napiVersion: apps/v1\nmetadata:\nname: nfs-provisioner\nspec:\nselector:\n    matchLabels:\n    app: nfs-provisioner\nreplicas: 1\nstrategy:\n    type: Recreate\ntemplate:\n    metadata:\n    labels:\n        app: nfs-provisioner\n    spec:\n    serviceAccount: nfs-provisioner\n    containers:\n        - name: nfs-provisioner\n        resources:\n            limits:\n            cpu: \"1\"\n            memory: \"4294967296\"\n        image: release.daocloud.io/velero/nfs-provisioner:v3.0.0\n        ports:\n            - name: nfs\n            containerPort: 2049\n            - name: nfs-udp\n            containerPort: 2049\n            protocol: UDP\n            - name: nlockmgr\n            containerPort: 32803\n            - name: nlockmgr-udp\n            containerPort: 32803\n            protocol: UDP\n            - name: mountd\n            containerPort: 20048\n            - name: mountd-udp\n            containerPort: 20048\n            protocol: UDP\n            - name: rquotad\n            containerPort: 875\n            - name: rquotad-udp\n            containerPort: 875\n            protocol: UDP\n            - name: rpcbind\n            containerPort: 111\n            - name: rpcbind-udp\n            containerPort: 111\n            protocol: UDP\n            - name: statd\n            containerPort: 662\n            - name: statd-udp\n            containerPort: 662\n            protocol: UDP\n        securityContext:\n            capabilities:\n            add:\n                - DAC_READ_SEARCH\n                - SYS_RESOURCE\n        args:\n            - \"-provisioner=example.com/nfs\"\n        env:\n            - name: POD_IP\n            valueFrom:\n                fieldRef:\n                fieldPath: status.podIP\n            - name: SERVICE_NAME\n            value: nfs-provisioner\n            - name: POD_NAMESPACE\n            valueFrom:\n                fieldRef:\n                fieldPath: metadata.namespace\n        imagePullPolicy: \"IfNotPresent\"\n        volumeMounts:\n            - name: export-volume\n            mountPath: /export\n    volumes:\n        - name: export-volume\n        hostPath:\n            path: /data\n---\nkind: StorageClass\napiVersion: storage.k8s.io/v1\nmetadata:\nname: nfs\nprovisioner: example.com/nfs\nmountOptions:\n- vers=4.1\n

                                                        3. \u5728\u4e24\u4e2a\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u4e0a\u6267\u884c nfs.yaml \u6587\u4ef6\u3002

                                                          kubectl apply -f nfs.yaml\n
                                                        4. \u67e5\u770b NFS Pod \u72b6\u6001\uff0c\u7b49\u5f85\u5176\u72b6\u6001\u53d8\u4e3a running \uff08\u5927\u7ea6\u9700\u8981 2 \u5206\u949f\uff09\u3002

                                                          kubectl get pod -n nfs-system -owide\n

                                                          \u9884\u671f\u8f93\u51fa\u4e3a\uff1a

                                                          [root@g-master1 ~]# kubectl get pod -owide\nNAME                               READY   STATUS    RESTARTS   AGE     IP              NODE        NOMINATED NODE   READINESS GATES\nnfs-provisioner-7dfb9bcc45-74ws2   1/1     Running   0          4m45s   10.6.175.100   g-master1   <none>           <none>\n
                                                        "},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html#mysql_1","title":"\u90e8\u7f72 MySQL \u5e94\u7528","text":"
                                                        1. \u4e3a MySQL \u5e94\u7528\u51c6\u5907\u57fa\u4e8e NFS \u5b58\u50a8\u7684 PVC\uff0c\u7528\u6765\u5b58\u50a8 MySQL \u670d\u52a1\u5185\u7684\u6570\u636e\u3002

                                                          \u4f7f\u7528 vi pvc.yaml \u547d\u4ee4\u5728\u8282\u70b9\u4e0a\u521b\u5efa\u540d\u4e3a pvc.yaml \u7684\u6587\u4ef6\uff0c\u5c06\u4e0b\u9762\u7684 YAML \u5185\u5bb9\u590d\u5236\u5230 pvc.yaml \u6587\u4ef6\u5185\u3002

                                                          pvc.yaml

                                                          apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: mydata\n  namespace: default\nspec:\n  accessModes:\n  - ReadWriteOnce\n  resources:\n    requests:\n      storage: \"1Gi\"\n  storageClassName: nfs\n  volumeMode: Filesystem\n

                                                        2. \u5728\u8282\u70b9\u4e0a\u4f7f\u7528 kubectl \u5de5\u5177\u6267\u884c pvc.yaml \u6587\u4ef6\u3002

                                                          kubectl apply -f pvc.yaml\n

                                                          \u9884\u671f\u8f93\u51fa\u4e3a\uff1a

                                                          [root@g-master1 ~]# kubectl apply -f pvc.yaml\npersistentvolumeclaim/mydata created\n

                                                        3. \u90e8\u7f72 MySQL \u5e94\u7528\u3002

                                                          \u4f7f\u7528 vi mysql.yaml \u547d\u4ee4\u5728\u8282\u70b9\u4e0a\u521b\u5efa\u540d\u4e3a mysql.yaml \u7684\u6587\u4ef6\uff0c\u5c06\u4e0b\u9762\u7684 YAML \u5185\u5bb9\u590d\u5236\u5230 mysql.yaml \u6587\u4ef6\u3002

                                                          \u70b9\u51fb\u67e5\u770b\u5b8c\u6574\u7684 mysql.yaml nfs.yaml

                                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: mysql-deploy\n  name: mysql-deploy\n  namespace: default\nspec:\n  progressDeadlineSeconds: 600\n  replicas: 1\n  revisionHistoryLimit: 10\n  selector:\n    matchLabels:\n      app: mysql-deploy\n  strategy:\n    rollingUpdate:\n      maxSurge: 25%\n      maxUnavailable: 25%\n    type: RollingUpdate\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mysql-deploy\n      name: mysql-deploy\n    spec:\n      containers:\n      - args:\n        - --ignore-db-dir=lost+found\n        env:\n        - name: MYSQL_ROOT_PASSWORD\n          value: dangerous\n        image: release.daocloud.io/velero/mysql:5\n        imagePullPolicy: IfNotPresent\n        name: mysql-deploy\n        ports:\n        - containerPort: 3306\n          protocol: TCP\n        resources:\n          limits:\n            cpu: \"1\"\n            memory: \"4294967296\"\n        terminationMessagePath: /dev/termination-log\n        terminationMessagePolicy: File\n        volumeMounts:\n        - mountPath: /var/lib/mysql\n          name: data\n      dnsPolicy: ClusterFirst\n      restartPolicy: Always\n      schedulerName: default-scheduler\n      securityContext:\n        fsGroup: 999\n      terminationGracePeriodSeconds: 30\n      volumes:\n      - name: data\n        persistentVolumeClaim:\n          claimName: mydata\n

                                                        4. \u5728\u8282\u70b9\u4e0a\u4f7f\u7528 kubectl \u5de5\u5177\u6267\u884c mysql.yaml \u6587\u4ef6\u3002

                                                          kubectl apply -f mysql.yaml\n

                                                          \u9884\u671f\u8f93\u51fa\u4e3a\uff1a

                                                          [root@g-master1 ~]# kubectl apply -f mysql.yaml\ndeployment.apps/mysql-deploy created\n
                                                        5. \u67e5\u770b MySQL Pod \u72b6\u6001\u3002

                                                          \u6267\u884c kubectl get pod | grep mysql \u67e5\u770b MySQL Pod \u72b6\u6001\uff0c\u7b49\u5f85\u5176\u72b6\u6001\u53d8\u4e3a running \uff08\u5927\u7ea6\u9700\u8981 2 \u5206\u949f\uff09\u3002

                                                          \u9884\u671f\u8f93\u51fa\u4e3a\uff1a

                                                          [root@g-master1 ~]# kubectl get pod |grep mysql\nmysql-deploy-5d6f94cb5c-gkrks      1/1     Running   0          2m53s\n

                                                          Note

                                                          • \u5982\u679c MySQL Pod \u72b6\u6001\u957f\u671f\u5904\u4e8e\u975e running \u72b6\u6001\uff0c\u901a\u5e38\u662f\u56e0\u4e3a\u6ca1\u6709\u5728\u96c6\u7fa4\u7684\u6240\u6709\u8282\u70b9\u4e0a\u5b89\u88c5 NFS \u4f9d\u8d56\u3002
                                                          • \u6267\u884c kubectl describe pod ${mysql pod \u540d\u79f0} \u67e5\u770b Pod \u7684\u8be6\u7ec6\u4fe1\u606f\u3002
                                                          • \u5982\u679c\u62a5\u9519\u4e2d\u6709 MountVolume.SetUp failed for volume \"pvc-4ad70cc6-df37-4253-b0c9-8cb86518ccf8\" : mount failed: exit status 32 \u4e4b\u7c7b\u7684\u4fe1\u606f\uff0c\u8bf7\u5206\u522b\u6267\u884c kubectl delete -f nfs.yaml/pvc.yaml/mysql.yaml \u5220\u9664\u4e4b\u524d\u7684\u8d44\u6e90\u540e\uff0c\u91cd\u65b0\u4ece\u90e8\u7f72 NFS \u670d\u52a1\u5f00\u59cb\u3002
                                                        6. \u5411 MySQL \u5e94\u7528\u5199\u5165\u6570\u636e\u3002

                                                          \u4e3a\u4e86\u4fbf\u4e8e\u540e\u671f\u9a8c\u8bc1\u8fc1\u79fb\u6570\u636e\u662f\u5426\u6210\u529f\uff0c\u53ef\u4ee5\u4f7f\u7528\u811a\u672c\u5411 MySQL \u5e94\u7528\u4e2d\u5199\u5165\u6d4b\u8bd5\u6570\u636e\u3002

                                                          1. \u4f7f\u7528 vi insert.sh \u547d\u4ee4\u5728\u8282\u70b9\u4e0a\u521b\u5efa\u540d\u4e3a insert.sh \u7684\u811a\u672c\uff0c\u5c06\u4e0b\u9762\u7684 YAML \u5185\u5bb9\u590d\u5236\u5230\u8be5\u811a\u672c\u3002

                                                            insert.sh
                                                            #!/bin/bash\n\nfunction rand(){\n    min=$1\n    max=$(($2-$min+1))\n    num=$(date +%s%N)\n    echo $(($num%$max+$min))\n}\n\nfunction insert(){\n    user=$(date +%s%N | md5sum | cut -c 1-9)\n    age=$(rand 1 100)\n\n    sql=\"INSERT INTO test.users(user_name, age)VALUES('${user}', ${age});\"\n    echo -e ${sql}\n\n    kubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"${sql}\"\n\n}\n\nkubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"CREATE DATABASE IF NOT EXISTS test;\"\nkubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"CREATE TABLE IF NOT EXISTS test.users(user_name VARCHAR(10) NOT NULL,age INT UNSIGNED)ENGINE=InnoDB DEFAULT CHARSET=utf8;\"\n\nwhile true;do\n    insert\n    sleep 1\ndone\n
                                                          2. \u4e3a insert.sh \u811a\u672c\u6dfb\u52a0\u6743\u9650\u5e76\u8fd0\u884c\u8be5\u811a\u672c\u3002

                                                            [root@g-master1 ~]# chmod +x insert.sh\n[root@g-master1 ~]# ./insert.sh\n

                                                            \u9884\u671f\u8f93\u51fa\u4e3a\uff1a

                                                            mysql: [Warning] Using a password on the command line interface can be insecure.\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('dc09195ba', 10);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('80ab6aa28', 70);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('f488e3d46', 23);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('e6098695c', 93);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('eda563e7d', 63);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('a4d1b8d68', 17);\nmysql: [Warning] Using a password on the command line interface can be insecure.\n
                                                          3. \u5728\u952e\u76d8\u4e0a\u540c\u65f6\u6309\u4e0b control \u548c c \u6682\u505c\u811a\u672c\u7684\u6267\u884c\u3002

                                                          4. \u524d\u5f80 MySQL Pod \u67e5\u770b MySQL \u4e2d\u5199\u5165\u7684\u6570\u636e\u3002

                                                            kubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"SELECT * FROM test.users;\"\n

                                                            \u9884\u671f\u8f93\u51fa\u4e3a\uff1a

                                                            mysql: [Warning] Using a password on the command line interface can be insecure.\nuser_name   age\ndc09195ba   10\n80ab6aa28   70\nf488e3d46   23\ne6098695c   93\neda563e7d   63\na4d1b8d68   17\nea47546d9   86\na34311f2e   47\n740cefe17   33\nede85ea28   65\nb6d0d6a0e   46\nf0eb38e50   44\nc9d2f28f5   72\n8ddaafc6f   31\n3ae078d0e   23\n6e041631e   96\n
                                                        "},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html#velero","title":"\u5728\u4e24\u4e2a\u96c6\u7fa4\u5b89\u88c5 velero \u63d2\u4ef6","text":"

                                                        Note

                                                        \u9700\u8981\u5728 \u6e90\u96c6\u7fa4\u548c\u76ee\u6807\u96c6\u7fa4 \u4e0a\u5747\u5b89\u88c5 velero \u63d2\u4ef6\u3002

                                                        \u53c2\u8003\u5b89\u88c5 velero \u63d2\u4ef6\u6587\u6863\u548c\u4e0b\u65b9\u7684 MinIO \u914d\u7f6e\uff0c\u5728 main-cluster \u96c6\u7fa4\u548c recovery-cluster \u96c6\u7fa4\u4e0a\u5b89\u88c5 velero \u63d2\u4ef6\u3002

                                                        minio \u670d\u52a1\u5668\u8bbf\u95ee\u5730\u5740 \u5b58\u50a8\u6876 \u7528\u6237\u540d \u5bc6\u7801 http://10.7.209.110:9000 mysql-demo root dangerous

                                                        Note

                                                        \u5b89\u88c5\u63d2\u4ef6\u65f6\u9700\u8981\u5c06 S3url \u66ff\u6362\u4e3a\u6b64\u6b21\u6f14\u793a\u51c6\u5907\u7684 MinIO \u670d\u52a1\u5668\u8bbf\u95ee\u5730\u5740\uff0c\u5b58\u50a8\u6876\u66ff\u6362\u4e3a MinIO \u4e2d\u771f\u5b9e\u5b58\u5728\u7684\u5b58\u50a8\u6876\u3002

                                                        "},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html#mysql_2","title":"\u5907\u4efd MySQL \u5e94\u7528\u53ca\u6570\u636e","text":"
                                                        1. \u5728\u5907\u4efd\u524d\u6211\u4eec\u9700\u8981\u5148\u4fdd\u8bc1\u6570\u636e\u5e93\u4e0d\u80fd\u6709\u65b0\u6570\u636e\u8fdb\u6765\uff0c\u6240\u4ee5\u8981\u8bbe\u7f6e\u4e3a\u53ea\u8bfb\u6a21\u5f0f\uff1a

                                                          mysql> set global read_only=1;    #1\u662f\u53ea\u8bfb\uff0c0\u662f\u8bfb\u5199\nmysql> show global variables like \"%read_only%\"; #\u67e5\u8be2\u72b6\u6001\n
                                                        2. \u4e3a MySQL \u5e94\u7528\u53ca PVC \u6570\u636e\u6dfb\u52a0\u72ec\u6709\u7684\u6807\u7b7e\uff1a backup=mysql \uff0c\u4fbf\u4e8e\u5907\u4efd\u65f6\u9009\u62e9\u8d44\u6e90\u3002

                                                          kubectl label deploy mysql-deploy backup=mysql # \u4e3a __mysql-deploy__ \u8d1f\u8f7d\u6dfb\u52a0\u6807\u7b7e\nkubectl label pod mysql-deploy-5d6f94cb5c-gkrks backup=mysql # \u4e3a mysql pod \u6dfb\u52a0\u6807\u7b7e\nkubectl label pvc mydata backup=mysql # \u4e3a mysql \u7684 pvc \u6dfb\u52a0\u6807\u7b7e\n
                                                        3. \u53c2\u8003\u5e94\u7528\u5907\u4efd\u4e2d\u4ecb\u7ecd\u7684\u6b65\u9aa4\uff0c\u4ee5\u53ca\u4e0b\u65b9\u7684\u53c2\u6570\u521b\u5efa\u5e94\u7528\u5907\u4efd\u3002

                                                          • \u540d\u79f0\uff1a backup-mysql \uff08\u53ef\u4ee5\u81ea\u5b9a\u4e49\uff09
                                                          • \u6e90\u96c6\u7fa4\uff1a main-cluster
                                                          • \u547d\u540d\u7a7a\u95f4\uff1adefault
                                                          • \u8d44\u6e90\u8fc7\u6ee4-\u6307\u5b9a\u8d44\u6e90\u6807\u7b7e\uff1abackup:mysql

                                                        4. \u521b\u5efa\u597d\u5907\u4efd\u8ba1\u5212\u4e4b\u540e\u9875\u9762\u4f1a\u81ea\u52a8\u8fd4\u56de\u5907\u4efd\u8ba1\u5212\u5217\u8868\uff0c\u627e\u5230\u65b0\u5efa\u7684\u5907\u4efd\u8ba1\u5212 backup-mysq \uff0c\u70b9\u51fb\u66f4\u591a\u64cd\u4f5c\u6309\u94ae ... \uff0c\u9009\u62e9 \u7acb\u5373\u6267\u884c \u6267\u884c\u65b0\u5efa\u7684\u5907\u4efd\u8ba1\u5212\u3002

                                                        5. \u7b49\u5f85\u5907\u4efd\u8ba1\u5212\u6267\u884c\u5b8c\u6210\u540e\uff0c\u5373\u53ef\u6267\u884c\u540e\u7eed\u64cd\u4f5c\u3002

                                                        "},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html#mysql_3","title":"\u8de8\u96c6\u7fa4\u6062\u590d MySQL \u5e94\u7528\u53ca\u6570\u636e","text":"
                                                        1. \u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u9009\u62e9 \u5bb9\u5668\u7ba1\u7406 -> \u5907\u4efd\u6062\u590d -> \u5e94\u7528\u5907\u4efd \u3002

                                                        2. \u5728\u5de6\u4fa7\u529f\u80fd\u680f\u9009\u62e9 \u6062\u590d \uff0c\u7136\u540e\u5728\u53f3\u4fa7\u70b9\u51fb \u6062\u590d\u5907\u4efd \u3002

                                                        3. \u67e5\u770b\u4ee5\u4e0b\u8bf4\u660e\u586b\u5199\u53c2\u6570\uff1a

                                                          • \u540d\u79f0\uff1a restore-mysql \uff08\u53ef\u4ee5\u81ea\u5b9a\u4e49\uff09
                                                          • \u5907\u4efd\u6e90\u96c6\u7fa4\uff1a main-cluster
                                                          • \u5907\u4efd\u8ba1\u5212\uff1a backup-mysql
                                                          • \u5907\u4efd\u70b9\uff1adefault
                                                          • \u6062\u590d\u76ee\u6807\u96c6\u7fa4\uff1a recovery-cluster

                                                        4. \u5237\u65b0\u5907\u4efd\u8ba1\u5212\u5217\u8868\uff0c\u7b49\u5f85\u5907\u4efd\u8ba1\u5212\u6267\u884c\u5b8c\u6210\u3002

                                                        "},{"location":"admin/kpanda/best-practice/backup-mysql-on-nfs.html#_3","title":"\u9a8c\u8bc1\u6570\u636e\u662f\u5426\u6210\u529f\u6062\u590d","text":"
                                                        1. \u767b\u5f55 recovery-cluster \u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\uff0c\u67e5\u770b mysql-deploy \u8d1f\u8f7d\u662f\u5426\u5df2\u7ecf\u6210\u529f\u5907\u4efd\u5230\u5f53\u524d\u96c6\u7fa4\u3002

                                                          kubectl get pod\n

                                                          \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                          NAME                               READY   STATUS    RESTARTS   AGE\nmysql-deploy-5798f5d4b8-62k6c      1/1     Running   0          24h\n
                                                        2. \u68c0\u67e5 MySQL \u6570\u636e\u8868\u4e2d\u7684\u6570\u636e\u662f\u5426\u6062\u590d\u6210\u529f\u3002

                                                          kubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"SELECT * FROM test.users;\"\n

                                                          \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                          mysql: [Warning] Using a password on the command line interface can be insecure.\nuser_name   age\ndc09195ba   10\n80ab6aa28   70\nf488e3d46   23\ne6098695c   93\neda563e7d   63\na4d1b8d68   17\nea47546d9   86\na34311f2e   47\n740cefe17   33\nede85ea28   65\nb6d0d6a0e   46\nf0eb38e50   44\nc9d2f28f5   72\n8ddaafc6f   31\n3ae078d0e   23\n6e041631e   96\n

                                                          Success

                                                          \u53ef\u4ee5\u770b\u5230\uff0cPod \u4e2d\u7684\u6570\u636e\u548c main-cluster \u96c6\u7fa4\u4e2d Pod \u91cc\u9762\u7684\u6570\u636e\u4e00\u81f4\u3002\u8fd9\u8bf4\u660e\u5df2\u7ecf\u6210\u529f\u5730\u5c06 main-cluster \u4e2d\u7684 MySQL \u5e94\u7528\u53ca\u5176\u6570\u636e\u8de8\u96c6\u7fa4\u6062\u590d\u5230\u4e86 recovery-cluster \u96c6\u7fa4\u3002

                                                        "},{"location":"admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html","title":"\u5728 CentOS \u7ba1\u7406\u5e73\u53f0\u4e0a\u521b\u5efa RedHat 9.2 \u5de5\u4f5c\u96c6\u7fa4","text":"

                                                        \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u5df2\u6709\u7684 CentOS \u7ba1\u7406\u5e73\u53f0\u4e0a\u521b\u5efa RedHat 9.2 \u5de5\u4f5c\u96c6\u7fa4\u3002

                                                        Note

                                                        \u672c\u6587\u4ec5\u9488\u5bf9\u79bb\u7ebf\u6a21\u5f0f\u4e0b\uff0c\u4f7f\u7528 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\uff0c\u7ba1\u7406\u5e73\u53f0\u548c\u5f85\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u67b6\u6784\u5747\u4e3a AMD\u3002 \u521b\u5efa\u96c6\u7fa4\u65f6\u4e0d\u652f\u6301\u5f02\u6784\uff08AMD \u548c ARM \u6df7\u5408\uff09\u90e8\u7f72\uff0c\u60a8\u53ef\u4ee5\u5728\u96c6\u7fa4\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u901a\u8fc7\u63a5\u5165\u5f02\u6784\u8282\u70b9\u7684\u65b9\u5f0f\u8fdb\u884c\u96c6\u7fa4\u6df7\u5408\u90e8\u7f72\u7ba1\u7406\u3002

                                                        "},{"location":"admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                        \u5df2\u7ecf\u90e8\u7f72\u597d\u4e00\u4e2a AI \u7b97\u529b\u4e2d\u5fc3\u5168\u6a21\u5f0f\uff0c\u5e76\u4e14\u706b\u79cd\u8282\u70b9\u8fd8\u5b58\u6d3b\uff0c\u90e8\u7f72\u53c2\u8003\u6587\u6863\u79bb\u7ebf\u5b89\u88c5 AI \u7b97\u529b\u4e2d\u5fc3\u5546\u4e1a\u7248

                                                        "},{"location":"admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#redhat","title":"\u4e0b\u8f7d\u5e76\u5bfc\u5165 RedHat \u76f8\u5173\u79bb\u7ebf\u5305","text":"

                                                        \u8bf7\u786e\u4fdd\u5df2\u7ecf\u767b\u5f55\u5230\u706b\u79cd\u8282\u70b9\uff01\u5e76\u4e14\u4e4b\u524d\u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3\u65f6\u4f7f\u7528\u7684 clusterConfig.yaml \u6587\u4ef6\u8fd8\u5728\u3002

                                                        "},{"location":"admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#redhat_1","title":"\u4e0b\u8f7d RedHat \u76f8\u5173\u79bb\u7ebf\u5305","text":"

                                                        \u4e0b\u8f7d\u6240\u9700\u7684 RedHat OS package \u5305\u548c ISO \u79bb\u7ebf\u5305\uff1a

                                                        \u8d44\u6e90\u540d \u8bf4\u660e \u4e0b\u8f7d\u5730\u5740 os-pkgs-redhat9-v0.9.3.tar.gz RedHat9.2 OS-package \u5305 https://github.com/kubean-io/kubean/releases/download/v0.9.3/os-pkgs-redhat9-v0.9.3.tar.gz ISO \u79bb\u7ebf\u5305 ISO \u5305\u5bfc\u5165\u706b\u79cd\u8282\u70b9\u811a\u672c \u524d\u5f80 RedHat \u5b98\u65b9\u5730\u5740\u767b\u5f55\u4e0b\u8f7d import-iso ISO \u5bfc\u5165\u706b\u79cd\u8282\u70b9\u811a\u672c https://github.com/kubean-io/kubean/releases/download/v0.9.3/import_iso.sh"},{"location":"admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#os-pckage-minio","title":"\u5bfc\u5165 os pckage \u79bb\u7ebf\u5305\u81f3\u706b\u79cd\u8282\u70b9\u7684 minio","text":"

                                                        \u89e3\u538b RedHat os pckage \u79bb\u7ebf\u5305

                                                        \u6267\u884c\u5982\u4e0b\u547d\u4ee4\u89e3\u538b\u4e0b\u8f7d\u7684 os pckage \u79bb\u7ebf\u5305\u3002\u6b64\u5904\u6211\u4eec\u4e0b\u8f7d\u7684 RedHat os pckage \u79bb\u7ebf\u5305\u3002

                                                        tar -xvf os-pkgs-redhat9-v0.9.3.tar.gz \n

                                                        os package \u89e3\u538b\u540e\u7684\u6587\u4ef6\u5185\u5bb9\u5982\u4e0b\uff1a

                                                            os-pkgs\n    \u251c\u2500\u2500 import_ospkgs.sh       # \u8be5\u811a\u672c\u7528\u4e8e\u5bfc\u5165 os packages \u5230 MinIO \u6587\u4ef6\u670d\u52a1\n    \u251c\u2500\u2500 os-pkgs-amd64.tar.gz   # amd64 \u67b6\u6784\u7684 os packages \u5305\n    \u251c\u2500\u2500 os-pkgs-arm64.tar.gz   # arm64 \u67b6\u6784\u7684 os packages \u5305\n    \u2514\u2500\u2500 os-pkgs.sha256sum.txt  # os packages \u5305\u7684 sha256sum \u6548\u9a8c\u6587\u4ef6\n

                                                        \u5bfc\u5165 OS Package \u81f3\u706b\u79cd\u8282\u70b9\u7684 MinIO

                                                        \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5c06 os packages \u5305\u5230 MinIO \u6587\u4ef6\u670d\u52a1\u4e2d\uff1a

                                                        MINIO_USER=rootuser MINIO_PASS=rootpass123 ./import_ospkgs.sh  http://127.0.0.1:9000 os-pkgs-redhat9-v0.9.3.tar.gz\n

                                                        Note

                                                        \u4e0a\u8ff0\u547d\u4ee4\u4ec5\u4ec5\u9002\u7528\u4e8e\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 MinIO \u670d\u52a1\uff0c\u5982\u679c\u4f7f\u7528\u5916\u90e8 MinIO \u8bf7\u5c06 http://127.0.0.1:9000 \u66ff\u6362\u4e3a\u5916\u90e8 MinIO \u7684\u8bbf\u95ee\u5730\u5740\u3002 \u201crootuser\u201d \u548c \u201crootpass123\u201d \u662f\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 MinIO \u670d\u52a1\u7684\u9ed8\u8ba4\u8d26\u6237\u548c\u5bc6\u7801\u3002\u201cos-pkgs-redhat9-v0.9.3.tar.gz\u201c \u4e3a\u6240\u4e0b\u8f7d\u7684 os package \u79bb\u7ebf\u5305\u7684\u540d\u79f0\u3002

                                                        "},{"location":"admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#iso-minio","title":"\u5bfc\u5165 ISO \u79bb\u7ebf\u5305\u81f3\u706b\u79cd\u8282\u70b9\u7684 MinIO","text":"

                                                        \u6267\u884c\u5982\u4e0b\u547d\u4ee4, \u5c06 ISO \u5305\u5230 MinIO \u6587\u4ef6\u670d\u52a1\u4e2d:

                                                        MINIO_USER=rootuser MINIO_PASS=rootpass123 ./import_iso.sh http://127.0.0.1:9000 rhel-9.2-x86_64-dvd.iso\n

                                                        Note

                                                        \u4e0a\u8ff0\u547d\u4ee4\u4ec5\u4ec5\u9002\u7528\u4e8e\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 MinIO \u670d\u52a1\uff0c\u5982\u679c\u4f7f\u7528\u5916\u90e8 MinIO \u8bf7\u5c06 http://127.0.0.1:9000 \u66ff\u6362\u4e3a\u5916\u90e8 MinIO \u7684\u8bbf\u95ee\u5730\u5740\u3002 \u201crootuser\u201d \u548c \u201crootpass123\u201d \u662f\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 MinIO \u670d\u52a1\u7684\u9ed8\u8ba4\u8d26\u6237\u548c\u5bc6\u7801\u3002 \u201crhel-9.2-x86_64-dvd.iso\u201c \u4e3a\u6240\u4e0b\u8f7d\u7684 ISO \u79bb\u7ebf\u5305\u3002

                                                        "},{"location":"admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#ui","title":"\u524d\u5f80 UI \u754c\u9762\u521b\u5efa\u96c6\u7fa4","text":"

                                                        \u53c2\u8003\u6587\u6863\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\uff0c\u521b\u5efa RedHat 9.2 \u96c6\u7fa4\u3002

                                                        "},{"location":"admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html","title":"\u5728 CentOS \u7ba1\u7406\u5e73\u53f0\u4e0a\u521b\u5efa Ubuntu \u5de5\u4f5c\u96c6\u7fa4","text":"

                                                        \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u5df2\u6709\u7684 CentOS \u7ba1\u7406\u5e73\u53f0\u4e0a\u521b\u5efa Ubuntu \u5de5\u4f5c\u96c6\u7fa4\u3002

                                                        Note

                                                        \u672c\u6587\u4ec5\u9488\u5bf9\u79bb\u7ebf\u6a21\u5f0f\u4e0b\uff0c\u4f7f\u7528 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\uff0c\u7ba1\u7406\u5e73\u53f0\u548c\u5f85\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u67b6\u6784\u5747\u4e3a AMD\u3002 \u521b\u5efa\u96c6\u7fa4\u65f6\u4e0d\u652f\u6301\u5f02\u6784\uff08AMD \u548c ARM \u6df7\u5408\uff09\u90e8\u7f72\uff0c\u60a8\u53ef\u4ee5\u5728\u96c6\u7fa4\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u901a\u8fc7\u63a5\u5165\u5f02\u6784\u8282\u70b9\u7684\u65b9\u5f0f\u8fdb\u884c\u96c6\u7fa4\u6df7\u5408\u90e8\u7f72\u7ba1\u7406\u3002

                                                        "},{"location":"admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                        • \u5df2\u7ecf\u90e8\u7f72\u597d\u4e00\u4e2a AI \u7b97\u529b\u4e2d\u5fc3\u5168\u6a21\u5f0f\uff0c\u5e76\u4e14\u706b\u79cd\u8282\u70b9\u8fd8\u5b58\u6d3b\uff0c\u90e8\u7f72\u53c2\u8003\u6587\u6863\u79bb\u7ebf\u5b89\u88c5 AI \u7b97\u529b\u4e2d\u5fc3\u5546\u4e1a\u7248
                                                        "},{"location":"admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#ubuntu","title":"\u4e0b\u8f7d\u5e76\u5bfc\u5165 Ubuntu \u76f8\u5173\u79bb\u7ebf\u5305","text":"

                                                        \u8bf7\u786e\u4fdd\u5df2\u7ecf\u767b\u5f55\u5230\u706b\u79cd\u8282\u70b9\uff01\u5e76\u4e14\u4e4b\u524d\u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3\u65f6\u4f7f\u7528\u7684 clusterConfig.yaml \u6587\u4ef6\u8fd8\u5728\u3002

                                                        "},{"location":"admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#ubuntu_1","title":"\u4e0b\u8f7d Ubuntu \u76f8\u5173\u79bb\u7ebf\u5305","text":"

                                                        \u4e0b\u8f7d\u6240\u9700\u7684 Ubuntu OS package \u5305\u548c ISO \u79bb\u7ebf\u5305\uff1a

                                                        \u8d44\u6e90\u540d \u8bf4\u660e \u4e0b\u8f7d\u5730\u5740 os-pkgs-ubuntu2204-v0.18.2.tar.gz Ubuntu1804 OS-package \u5305 https://github.com/kubean-io/kubean/releases/download/v0.18.2/os-pkgs-ubuntu2204-v0.18.2.tar.gz ISO \u79bb\u7ebf\u5305 ISO \u5305 http://mirrors.melbourne.co.uk/ubuntu-releases/"},{"location":"admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#os-package-iso-minio","title":"\u5bfc\u5165 OS Package \u548c ISO \u79bb\u7ebf\u5305\u81f3\u706b\u79cd\u8282\u70b9\u7684 MinIO","text":"

                                                        \u53c2\u8003\u6587\u6863\u79bb\u7ebf\u8d44\u6e90\u5bfc\u5165\uff0c\u5bfc\u5165\u79bb\u7ebf\u8d44\u6e90\u81f3\u706b\u79cd\u8282\u70b9\u7684 MinIO\u3002

                                                        "},{"location":"admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#ui","title":"\u524d\u5f80 UI \u754c\u9762\u521b\u5efa\u96c6\u7fa4","text":"

                                                        \u53c2\u8003\u6587\u6863\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\uff0c\u521b\u5efa Ubuntu \u96c6\u7fa4\u3002

                                                        "},{"location":"admin/kpanda/best-practice/etcd-backup.html","title":"ETCD \u5907\u4efd\u8fd8\u539f","text":"

                                                        \u4f7f\u7528 ETCD \u5907\u4efd\u529f\u80fd\u521b\u5efa\u5907\u4efd\u7b56\u7565\uff0c\u53ef\u4ee5\u5c06\u6307\u5b9a\u96c6\u7fa4\u7684 etcd \u6570\u636e\u5b9a\u65f6\u5907\u4efd\u5230 S3 \u5b58\u50a8\u4e2d\u3002\u672c\u6587\u4e3b\u8981\u4ecb\u7ecd\u5982\u4f55\u5c06\u5df2\u7ecf\u5907\u4efd\u7684\u6570\u636e\u8fd8\u539f\u5230\u5f53\u524d\u96c6\u7fa4\u4e2d\u3002

                                                        Note

                                                        • AI \u7b97\u529b\u4e2d\u5fc3ETCD \u5907\u4efd\u8fd8\u539f\u4ec5\u9650\u4e8e\u9488\u5bf9\u540c\u4e00\u96c6\u7fa4\uff08\u8282\u70b9\u6570\u548c IP \u5730\u5740\u6ca1\u6709\u53d8\u5316\uff09\u8fdb\u884c\u5907\u4efd\u4e0e\u8fd8\u539f\u3002 \u4f8b\u5982\uff0c\u5907\u4efd\u4e86 A \u96c6\u7fa4 \u7684 etcd \u6570\u636e\u540e\uff0c\u53ea\u80fd\u5c06\u5907\u4efd\u6570\u636e\u8fd8\u539f\u5230 A \u96c6\u7fa4\u4e2d\uff0c\u4e0d\u80fd\u8fd8\u539f\u5230 B \u96c6\u7fa4\u3002
                                                        • \u5bf9\u4e8e\u8de8\u96c6\u7fa4\u7684\u5907\u4efd\u4e0e\u8fd8\u539f\uff0c\u5efa\u8bae\u4f7f\u7528\u5e94\u7528\u5907\u4efd\u8fd8\u539f\u529f\u80fd\u3002
                                                        • \u9996\u5148\u521b\u5efa\u5907\u4efd\u7b56\u7565\uff0c\u5907\u4efd\u5f53\u524d\u72b6\u6001\uff0c\u5efa\u8bae\u53c2\u8003ETCD \u5907\u4efd\u529f\u80fd\u3002

                                                        \u4e0b\u9762\u901a\u8fc7\u5177\u4f53\u7684\u6848\u4f8b\u6765\u8bf4\u660e\u5907\u4efd\u8fd8\u539f\u7684\u6574\u4e2a\u8fc7\u7a0b\u3002

                                                        "},{"location":"admin/kpanda/best-practice/etcd-backup.html#_1","title":"\u73af\u5883\u4fe1\u606f","text":"

                                                        \u9996\u5148\u4ecb\u7ecd\u8fd8\u539f\u7684\u76ee\u6807\u96c6\u7fa4\u548c S3 \u5b58\u50a8\u7684\u57fa\u672c\u4fe1\u606f\u3002\u8fd9\u91cc\u4ee5 MinIo \u4f5c\u4e3a S3 \u5b58\u50a8\uff0c\u6574\u4e2a\u96c6\u7fa4\u6709 3 \u4e2a\u63a7\u5236\u9762\uff083 \u4e2a etcd \u526f\u672c\uff09\u3002

                                                        IP \u4e3b\u673a \u89d2\u8272 \u5907\u6ce8 10.6.212.10 host01 k8s-master01 k8s \u8282\u70b9 1 10.6.212.11 host02 k8s-master02 k8s \u8282\u70b9 2 10.6.212.12 host03 k8s-master03 k8s \u8282\u70b9 3 10.6.212.13 host04 minio minio \u670d\u52a1"},{"location":"admin/kpanda/best-practice/etcd-backup.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":""},{"location":"admin/kpanda/best-practice/etcd-backup.html#etcdbrctl","title":"\u5b89\u88c5 etcdbrctl \u5de5\u5177","text":"

                                                        \u4e3a\u4e86\u5b9e\u73b0 ETCD \u6570\u636e\u5907\u4efd\u8fd8\u539f\uff0c\u9700\u8981\u5728\u4e0a\u8ff0\u4efb\u610f\u4e00\u4e2a Kubernetes \u8282\u70b9\u4e0a\u5b89\u88c5 etcdbrctl \u5f00\u6e90\u5de5\u5177\u3002 \u6b64\u5de5\u5177\u6682\u65f6\u6ca1\u6709\u4e8c\u8fdb\u5236\u6587\u4ef6\uff0c\u9700\u8981\u81ea\u884c\u7f16\u8bd1\u3002\u7f16\u8bd1\u65b9\u5f0f\u8bf7\u53c2\u8003 Gardener / etcd-backup-restore \u672c\u5730\u5f00\u53d1\u6587\u6863\u3002

                                                        \u5b89\u88c5\u5b8c\u6210\u540e\u7528\u5982\u4e0b\u547d\u4ee4\u68c0\u67e5\u5de5\u5177\u662f\u5426\u53ef\u7528\uff1a

                                                        etcdbrctl -v\n

                                                        \u9884\u671f\u8f93\u51fa\u5982\u4e0b:

                                                        INFO[0000] etcd-backup-restore Version: v0.23.0-dev\nINFO[0000] Git SHA: b980beec\nINFO[0000] Go Version: go1.19.3\nINFO[0000] Go OS/Arch: linux/amd64\n
                                                        "},{"location":"admin/kpanda/best-practice/etcd-backup.html#_3","title":"\u68c0\u67e5\u5907\u4efd\u6570\u636e","text":"

                                                        \u8fd8\u539f\u4e4b\u524d\u9700\u8981\u68c0\u67e5\u4e0b\u5217\u4e8b\u9879\uff1a

                                                        • \u662f\u5426\u5df2\u7ecf\u5728 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\u6210\u529f\u5907\u4efd\u4e86\u6570\u636e
                                                        • \u68c0\u67e5 S3 \u5b58\u50a8\u4e2d\u5907\u4efd\u6570\u636e\u662f\u5426\u5b58\u5728

                                                        Note

                                                        AI \u7b97\u529b\u4e2d\u5fc3\u7684\u5907\u4efd\u662f\u5168\u91cf\u6570\u636e\u5907\u4efd\uff0c\u8fd8\u539f\u65f6\u5c06\u8fd8\u539f\u6700\u540e\u4e00\u6b21\u5907\u4efd\u7684\u5168\u91cf\u6570\u636e\u3002

                                                        "},{"location":"admin/kpanda/best-practice/etcd-backup.html#_4","title":"\u5173\u95ed\u96c6\u7fa4","text":"

                                                        \u5728\u5907\u4efd\u4e4b\u524d\uff0c\u5fc5\u987b\u8981\u5148\u5173\u95ed\u96c6\u7fa4\u3002\u9ed8\u8ba4\u96c6\u7fa4 etcd \u548c kube-apiserver \u90fd\u662f\u4ee5\u9759\u6001 Pod \u7684\u5f62\u5f0f\u542f\u52a8\u7684\u3002 \u8fd9\u91cc\u7684\u5173\u95ed\u96c6\u7fa4\u662f\u6307\u5c06\u9759\u6001 Pod manifest \u6587\u4ef6\u79fb\u52a8\u5230 /etc/kubernetes/manifest \u76ee\u5f55\u5916\uff0c\u96c6\u7fa4\u5c31\u4f1a\u79fb\u9664\u5bf9\u5e94 Pod\uff0c\u8fbe\u5230\u5173\u95ed\u670d\u52a1\u7684\u4f5c\u7528\u3002

                                                        1. \u9996\u5148\u5220\u9664\u4e4b\u524d\u7684\u5907\u4efd\u6570\u636e\uff0c\u79fb\u9664\u6570\u636e\u5e76\u975e\u5c06\u73b0\u6709 etcd \u6570\u636e\u5220\u9664\uff0c\u800c\u662f\u6307\u4fee\u6539 etcd \u6570\u636e\u76ee\u5f55\u7684\u540d\u79f0\u3002 \u7b49\u5907\u4efd\u8fd8\u539f\u6210\u529f\u4e4b\u540e\u518d\u5220\u9664\u6b64\u76ee\u5f55\u3002\u8fd9\u6837\u505a\u7684\u76ee\u7684\u662f\uff0c\u5982\u679c etcd \u5907\u4efd\u8fd8\u539f\u5931\u8d25\uff0c\u8fd8\u53ef\u4ee5\u5c1d\u8bd5\u8fd8\u539f\u5f53\u524d\u96c6\u7fa4\u3002\u6b64\u6b65\u9aa4\u6bcf\u4e2a\u8282\u70b9\u5747\u9700\u6267\u884c\u3002

                                                          rm -rf /var/lib/etcd_bak\n
                                                        2. \u7136\u540e\u9700\u8981\u5173\u95ed kube-apiserver \u7684\u670d\u52a1\uff0c\u786e\u4fdd etcd \u7684\u6570\u636e\u6ca1\u6709\u65b0\u53d8\u5316\u3002\u6b64\u6b65\u9aa4\u6bcf\u4e2a\u8282\u70b9\u5747\u9700\u6267\u884c\u3002

                                                          mv /etc/kubernetes/manifests/kube-apiserver.yaml /tmp/kube-apiserver.yaml\n
                                                        3. \u540c\u65f6\u8fd8\u9700\u8981\u5173\u95ed etcd \u7684\u670d\u52a1\u3002\u6b64\u6b65\u9aa4\u6bcf\u4e2a\u8282\u70b9\u5747\u9700\u6267\u884c\u3002

                                                          mv /etc/kubernetes/manifests/etcd.yaml /tmp/etcd.yaml\n
                                                        4. \u786e\u4fdd\u6240\u6709\u63a7\u5236\u5e73\u9762\u7684 kube-apiserver \u548c etcd \u670d\u52a1\u90fd\u5df2\u7ecf\u5173\u95ed\u3002

                                                        5. \u5173\u95ed\u6240\u6709\u7684\u8282\u70b9\u540e\uff0c\u4f7f\u7528\u5982\u4e0b\u547d\u4ee4\u68c0\u67e5 etcd \u96c6\u7fa4\u72b6\u6001\u3002\u6b64\u547d\u4ee4\u5728\u4efb\u610f\u4e00\u4e2a\u8282\u70b9\u6267\u884c\u5373\u53ef\u3002

                                                          endpoints \u7684\u503c\u9700\u8981\u66ff\u6362\u4e3a\u5b9e\u9645\u8282\u70b9\u540d\u79f0

                                                          etcdctl endpoint status --endpoints=controller-node-1:2379,controller-node-2:2379,controller-node-3:2379 -w table \\\n  --cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n  --cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n  --key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\"\n

                                                          \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff0c\u8868\u793a\u6240\u6709\u7684 etcd \u8282\u70b9\u90fd\u88ab\u9500\u6bc1\uff1a

                                                          {\"level\":\"warn\",\"ts\":\"2023-03-29T17:51:50.817+0800\",\"logger\":\"etcd-client\",\"caller\":\"v3@v3.5.6/retry_interceptor.go:62\",\"msg\":\"retrying of unary invoker failed\",\"target\":\"etcd-endpoints://0xc0001ba000/controller-node-1:2379\",\"attempt\":0,\"error\":\"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \\\"transport: Error while dialing dial tcp 10.5.14.31:2379: connect: connection refused\\\"\"}\nFailed to get the status of endpoint controller-node-1:2379 (context deadline exceeded)\n{\"level\":\"warn\",\"ts\":\"2023-03-29T17:51:55.818+0800\",\"logger\":\"etcd-client\",\"caller\":\"v3@v3.5.6/retry_interceptor.go:62\",\"msg\":\"retrying of unary invoker failed\",\"target\":\"etcd-endpoints://0xc0001ba000/controller-node-2:2379\",\"attempt\":0,\"error\":\"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \\\"transport: Error while dialing dial tcp 10.5.14.32:2379: connect: connection refused\\\"\"}\nFailed to get the status of endpoint controller-node-2:2379 (context deadline exceeded)\n{\"level\":\"warn\",\"ts\":\"2023-03-29T17:52:00.820+0800\",\"logger\":\"etcd-client\",\"caller\":\"v3@v3.5.6/retry_interceptor.go:62\",\"msg\":\"retrying of unary invoker failed\",\"target\":\"etcd-endpoints://0xc0001ba000/controller-node-1:2379\",\"attempt\":0,\"error\":\"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \\\"transport: Error while dialing dial tcp 10.5.14.33:2379: connect: connection refused\\\"\"}\nFailed to get the status of endpoint controller-node-3:2379 (context deadline exceeded)\n+----------+----+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |\n+----------+----+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n+----------+----+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n
                                                        "},{"location":"admin/kpanda/best-practice/etcd-backup.html#_5","title":"\u8fd8\u539f\u5907\u4efd","text":"

                                                        \u53ea\u9700\u8981\u8fd8\u539f\u4e00\u4e2a\u8282\u70b9\u7684\u6570\u636e\uff0c\u5176\u4ed6\u8282\u70b9\u7684 etcd \u6570\u636e\u5c31\u4f1a\u81ea\u52a8\u8fdb\u884c\u540c\u6b65\u3002

                                                        1. \u8bbe\u7f6e\u73af\u5883\u53d8\u91cf

                                                          \u4f7f\u7528 etcdbrctl \u8fd8\u539f\u6570\u636e\u4e4b\u524d\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u5c06\u8fde\u63a5 S3 \u7684\u8ba4\u8bc1\u4fe1\u606f\u8bbe\u7f6e\u4e3a\u73af\u5883\u53d8\u91cf\uff1a

                                                          export ECS_ENDPOINT=http://10.6.212.13:9000 # (1)!\nexport ECS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE # (2)!\nexport ECS_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY # (3)!\n
                                                          1. S3 \u5b58\u50a8\u7684\u8bbf\u95ee\u70b9
                                                          2. S3 \u5b58\u50a8\u7684\u7528\u6237\u540d
                                                          3. S3 \u5b58\u50a8\u7684\u5bc6\u7801
                                                        2. \u6267\u884c\u8fd8\u539f\u64cd\u4f5c

                                                          \u6267\u884c etcdbrctl \u547d\u4ee4\u884c\u5de5\u5177\u6267\u884c\u8fd8\u539f\uff0c\u8fd9\u662f\u6700\u5173\u952e\u7684\u4e00\u6b65\u3002

                                                          etcdbrctl restore --data-dir /var/lib/etcd/ --store-container=\"etcd-backup\" \\ \n  --storage-provider=ECS \\\n  --initial-cluster=controller-node1=https://10.6.212.10:2380 \\\n  --initial-advertise-peer-urls=https://10.6.212.10:2380 \n

                                                          \u53c2\u6570\u8bf4\u660e\u5982\u4e0b\uff1a

                                                          • --data-dir: etcd \u6570\u636e\u76ee\u5f55\uff0c\u6b64\u76ee\u5f55\u5fc5\u987b\u8ddf etcd \u6570\u636e\u76ee\u5f55\u4e00\u81f4\uff0cetcd \u624d\u80fd\u6b63\u5e38\u52a0\u8f7d\u6570\u636e\u3002
                                                          • --store-container\uff1aS3 \u5b58\u50a8\u7684\u4f4d\u7f6e\uff0cMinIO \u4e2d\u5bf9\u5e94\u7684 bucket\uff0c\u5fc5\u987b\u8ddf\u6570\u636e\u5907\u4efd\u7684 bucket \u76f8\u5bf9\u5e94\u3002
                                                          • --initial-cluster\uff1aetcd \u521d\u59cb\u5316\u914d\u7f6e, etcd \u96c6\u7fa4\u7684\u540d\u79f0\u5fc5\u987b\u8ddf\u539f\u6765\u4e00\u81f4\u3002
                                                          • --initial-advertise-peer-urls\uff1aetcd member \u96c6\u7fa4\u4e4b\u95f4\u8bbf\u95ee\u5730\u5740\u3002\u5fc5\u987b\u8ddf etcd \u7684\u914d\u7f6e\u4fdd\u6301\u4e00\u81f4\u3002

                                                          \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                          INFO[0000] Finding latest set of snapshot to recover from...\nINFO[0000] Restoring from base snapshot: Full-00000000-00111147-1679991074  actor=restorer\nINFO[0001] successfully fetched data of base snapshot in 1.241380207 seconds  actor=restorer\n{\"level\":\"info\",\"ts\":1680011221.2511616,\"caller\":\"mvcc/kvstore.go:380\",\"msg\":\"restored last compact revision\",\"meta-bucket-name\":\"meta\",\"meta-bucket-name-key\":\"finishedCompactRev\",\"restored-compact-revision\":110327}\n{\"level\":\"info\",\"ts\":1680011221.3045986,\"caller\":\"membership/cluster.go:392\",\"msg\":\"added member\",\"cluster-id\":\"66638454b9dd7b8a\",\"local-member-id\":\"0\",\"added-peer-id\":\"123c2503a378fc46\",\"added-peer-peer-urls\":[\"https://10.6.212.10:2380\"]}\nINFO[0001] Starting embedded etcd server...              actor=restorer\n....\n\n{\"level\":\"info\",\"ts\":\"2023-03-28T13:47:02.922Z\",\"caller\":\"embed/etcd.go:565\",\"msg\":\"stopped serving peer traffic\",\"address\":\"127.0.0.1:37161\"}\n{\"level\":\"info\",\"ts\":\"2023-03-28T13:47:02.922Z\",\"caller\":\"embed/etcd.go:367\",\"msg\":\"closed etcd server\",\"name\":\"default\",\"data-dir\":\"/var/lib/etcd\",\"advertise-peer-urls\":[\"http://localhost:0\"],\"advertise-client-urls\":[\"http://localhost:0\"]}\nINFO[0003] Successfully restored the etcd data directory.\n

                                                          \u53ef\u4ee5\u67e5\u770b etcd \u7684 YAML \u6587\u4ef6\u8fdb\u884c\u5bf9\u7167\uff0c\u4ee5\u514d\u914d\u7f6e\u9519\u8bef

                                                          cat /tmp/etcd.yaml | grep initial-\n- --experimental-initial-corrupt-check=true\n- --initial-advertise-peer-urls=https://10.6.212.10:2380\n- --initial-cluster=controller-node-1=https://10.6.212.10:2380\n
                                                        3. \u4ee5\u4e0b\u547d\u4ee4\u5728\u8282\u70b9 01 \u4e0a\u6267\u884c\uff0c\u4e3a\u4e86\u6062\u590d\u8282\u70b9 01 \u7684 etcd \u670d\u52a1\u3002

                                                          \u9996\u5148\u5c06 etcd \u9759\u6001 Pod \u7684 manifest \u6587\u4ef6\u79fb\u52a8\u5230 /etc/kubernetes/manifests \u76ee\u5f55\u4e0b\uff0ckubelet \u5c06\u4f1a\u91cd\u542f etcd\uff1a

                                                          mv /tmp/etcd.yaml /etc/kubernetes/manifests/etcd.yaml\n

                                                          \u7136\u540e\u7b49\u5f85 etcd \u670d\u52a1\u542f\u52a8\u5b8c\u6210\u4ee5\u540e\uff0c\u68c0\u67e5 etcd \u7684\u72b6\u6001\uff0cetcd \u76f8\u5173\u8bc1\u4e66\u9ed8\u8ba4\u76ee\u5f55\uff1a /etc/kubernetes/ssl \u3002\u5982\u679c\u96c6\u7fa4\u8bc1\u4e66\u5b58\u653e\u5728\u5176\u4ed6\u4f4d\u7f6e\uff0c\u8bf7\u6307\u5b9a\u5bf9\u5e94\u8def\u5f84\u3002

                                                          • \u68c0\u67e5 etcd \u96c6\u7fa4\u5217\u8868:

                                                            etcdctl member list -w table \\\n--cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n--cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n--key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\" \n

                                                            \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                            +------------------+---------+-------------------+--------------------------+--------------------------+------------+\n|        ID        | STATUS  |       NAME        |        PEER ADDRS        |       CLIENT ADDRS       | IS LEARNER |\n+------------------+---------+-------------------+--------------------------+--------------------------+------------+\n| 123c2503a378fc46 | started | controller-node-1 | https://10.6.212.10:2380 | https://10.6.212.10:2379 |      false |\n+------------------+---------+-------------------+--------------------------+--------------------------+------------+\n
                                                          • \u67e5\u770b controller-node-1 \u72b6\u6001:

                                                            etcdctl endpoint status --endpoints=controller-node-1:2379 -w table \\\n--cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n--cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n--key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\"\n

                                                            \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                            +------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n|        ENDPOINT        |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |\n+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n| controller-node-1:2379 | 123c2503a378fc46 |   3.5.6 |   15 MB |      true |      false |         3 |       1200 |               1199 |        |\n+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n
                                                        4. \u6062\u590d\u5176\u4ed6\u8282\u70b9\u6570\u636e

                                                          \u4e0a\u8ff0\u6b65\u9aa4\u5df2\u7ecf\u8fd8\u539f\u4e86\u8282\u70b9 01 \u7684\u6570\u636e\uff0c\u82e5\u60f3\u8981\u8fd8\u539f\u5176\u4ed6\u8282\u70b9\u6570\u636e\uff0c\u53ea\u9700\u8981\u5c06 etcd \u7684 Pod \u542f\u52a8\u8d77\u6765\uff0c\u8ba9 etcd \u81ea\u5df1\u5b8c\u6210\u6570\u636e\u540c\u6b65\u3002

                                                          • \u5728\u8282\u70b9 02 \u548c\u8282\u70b9 03 \u90fd\u6267\u884c\u76f8\u540c\u7684\u64cd\u4f5c\uff1a

                                                            mv /tmp/etcd.yaml /etc/kubernetes/manifests/etcd.yaml\n
                                                          • etcd member \u96c6\u7fa4\u4e4b\u95f4\u7684\u6570\u636e\u540c\u6b65\u9700\u8981\u4e00\u5b9a\u7684\u65f6\u95f4\uff0c\u53ef\u4ee5\u67e5\u770b etcd \u96c6\u7fa4\u72b6\u6001\uff0c\u786e\u4fdd\u6240\u6709 etcd \u96c6\u7fa4\u6b63\u5e38\uff1a

                                                            \u68c0\u67e5 etcd \u96c6\u7fa4\u72b6\u6001\u662f\u5426\u6b63\u5e38:

                                                            etcdctl member list -w table \\\n--cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n--cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n--key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\"\n

                                                            \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                            +------------------+---------+-------------------+-------------------------+-------------------------+------------+\n|        ID        | STATUS  |    NAME           |       PEER ADDRS        |      CLIENT ADDRS       | IS LEARNER |\n+------------------+---------+-------------------+-------------------------+-------------------------+------------+\n| 6ea47110c5a87c03 | started | controller-node-1 | https://10.5.14.31:2380 | https://10.5.14.31:2379 |      false |\n| e222e199f1e318c4 | started | controller-node-2 | https://10.5.14.32:2380 | https://10.5.14.32:2379 |      false |\n| f64eeda321aabe2d | started | controller-node-3 | https://10.5.14.33:2380 | https://10.5.14.33:2379 |      false |\n+------------------+---------+-------------------+-------------------------+-------------------------+------------+\n

                                                            \u68c0\u67e5 3 \u4e2a member \u8282\u70b9\u662f\u5426\u6b63\u5e38:

                                                            etcdctl endpoint status --endpoints=controller-node-1:2379,controller-node-2:2379,controller-node-3:2379 -w table \\\n--cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n--cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n--key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\"\n

                                                            \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                            +------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n|     ENDPOINT           |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |\n+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n| controller-node-1:2379 | 6ea47110c5a87c03 |   3.5.6 |   88 MB |      true |      false |         6 |     199008 |             199008 |        |\n| controller-node-2:2379 | e222e199f1e318c4 |   3.5.6 |   88 MB |     false |      false |         6 |     199114 |             199114 |        |\n| controller-node-3:2379 | f64eeda321aabe2d |   3.5.6 |   88 MB |     false |      false |         6 |     199316 |             199316 |        |\n+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n
                                                        "},{"location":"admin/kpanda/best-practice/etcd-backup.html#_6","title":"\u6062\u590d\u96c6\u7fa4","text":"

                                                        \u7b49\u6240\u6709\u8282\u70b9\u7684 etcd \u6570\u636e\u540c\u6b65\u5b8c\u6210\u540e\uff0c\u5219\u53ef\u4ee5\u5c06 kube-apiserver \u8fdb\u884c\u91cd\u65b0\u542f\u52a8\uff0c\u5c06\u6574\u4e2a\u96c6\u7fa4\u6062\u590d\u5230\u53ef\u8bbf\u95ee\u72b6\u6001\uff1a

                                                        1. \u91cd\u65b0\u542f\u52a8 node1 \u7684 kube-apiserver \u670d\u52a1

                                                          mv /tmp/kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.yaml\n
                                                        2. \u91cd\u65b0\u542f\u52a8 node2 \u7684 kube-apiserver \u670d\u52a1

                                                          mv /tmp/kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.yaml\n
                                                        3. \u91cd\u65b0\u542f\u52a8 node3 \u7684 kube-apiserver \u670d\u52a1

                                                          mv /tmp/kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.yaml\n
                                                        4. \u7b49\u5f85 kubelet \u5c06 kube-apiserver \u542f\u52a8\u540e\uff0c\u68c0\u67e5\u8fd8\u539f\u7684 k8s \u6570\u636e\u662f\u5426\u6b63\u5e38\uff1a

                                                          kubectl get nodes\n

                                                          \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                          NAME                STATUS     ROLES           AGE     VERSION\ncontroller-node-1   Ready      <none>          3h30m   v1.25.4\ncontroller-node-3   Ready      control-plane   3h29m   v1.25.4\ncontroller-node-3   Ready      control-plane   3h28m   v1.25.4\n
                                                        "},{"location":"admin/kpanda/best-practice/hardening-cluster.html","title":"\u5982\u4f55\u52a0\u56fa\u81ea\u5efa\u5de5\u4f5c\u96c6\u7fa4","text":"

                                                        \u5728 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\uff0c\u4f7f\u7528 CIS Benchmark (CIS) \u626b\u63cf\u4f7f\u7528\u754c\u9762\u521b\u5efa\u7684\u5de5\u4f5c\u96c6\u7fa4\uff0c\u6709\u4e00\u4e9b\u626b\u63cf\u9879\u5e76\u6ca1\u6709\u901a\u8fc7\u626b\u63cf\u3002 \u672c\u6587\u5c06\u57fa\u4e8e\u4e0d\u540c\u7684 CIS Benchmark \u7248\u672c\u8fdb\u884c\u52a0\u56fa\u8bf4\u660e\u3002

                                                        "},{"location":"admin/kpanda/best-practice/hardening-cluster.html#cis-benchmark-127","title":"CIS Benchmark 1.27","text":"

                                                        \u626b\u63cf\u73af\u5883\u8bf4\u660e\uff1a

                                                        • kubernetes version: 1.25.4
                                                        • containerd: 1.7.0
                                                        • kubean version: 0.4.9
                                                        • kubespary version: v2.22
                                                        "},{"location":"admin/kpanda/best-practice/hardening-cluster.html#_2","title":"\u672a\u901a\u8fc7\u626b\u63cf\u9879","text":"
                                                        1. [FAIL] 1.2.5 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)
                                                        2. [FAIL] 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)
                                                        3. [FAIL] 1.4.1 Ensure that the --profiling argument is set to false (Automated)
                                                        4. [FAIL] 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)
                                                        "},{"location":"admin/kpanda/best-practice/hardening-cluster.html#_3","title":"\u626b\u63cf\u5931\u8d25\u539f\u56e0\u5206\u6790","text":"
                                                        1. [FAIL] 1.2.5 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)

                                                          \u539f\u56e0\uff1a CIS \u8981\u6c42 kube-apiserver \u5fc5\u987b\u6307\u5b9a kubelet \u7684 CA \u8bc1\u4e66\u8def\u5f84\uff1a

                                                        2. [FAIL] 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)

                                                          \u539f\u56e0\uff1a CIS \u8981\u6c42 kube-controller-manager \u7684 --bing-address=127.0.0.1

                                                        3. [FAIL] 1.4.1 Ensure that the --profiling argument is set to false (Automated)

                                                          \u539f\u56e0\uff1a CIS \u8981\u6c42 kube-scheduler \u8bbe\u7f6e --profiling=false

                                                        4. [FAIL] 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)

                                                          \u539f\u56e0\uff1a CIS \u8981\u6c42 \u8bbe\u7f6e kube-scheduler \u7684 --bind-address=127.0.0.1

                                                        "},{"location":"admin/kpanda/best-practice/hardening-cluster.html#cis","title":"\u52a0\u56fa\u914d\u7f6e\u4ee5\u901a\u8fc7 CIS \u626b\u63cf","text":"

                                                        kubespray \u5b98\u65b9\u4e3a\u4e86\u89e3\u51b3\u8fd9\u4e9b\u5b89\u5168\u626b\u63cf\u95ee\u9898\uff0c\u5728 v2.22 \u4e2d\u6dfb\u52a0\u9ed8\u8ba4\u503c\u89e3\u51b3\u4e86\u4e00\u90e8\u5206\u95ee\u9898\uff0c \u66f4\u591a\u7ec6\u8282\u8bf7\u53c2\u8003 kubespray \u52a0\u56fa\u6587\u6863\u3002

                                                        • \u901a\u8fc7\u4fee\u6539 kubean var-config \u914d\u7f6e\u6587\u4ef6\u6765\u6dfb\u52a0\u53c2\u6570\uff1a

                                                          kubernetes_audit: true\nkube_controller_manager_bind_address: 127.0.0.1\nkube_scheduler_bind_address: 127.0.0.1\nkube_kubeadm_scheduler_extra_args:\n  profiling: false\nkubelet_rotate_server_certificates: true\n
                                                        • \u5728 AI \u7b97\u529b\u4e2d\u5fc3\u4e2d\uff0c\u4e5f\u63d0\u4f9b\u4e86\u901a\u8fc7 UI \u6765\u914d\u7f6e\u9ad8\u7ea7\u53c2\u6570\u7684\u529f\u80fd\uff0c\u5728\u521b\u5efa\u96c6\u7fa4\u6700\u540e\u4e00\u6b65\u6dfb\u52a0\u81ea\u5b9a\u4e49\u53c2\u6570\uff1a

                                                        • \u8bbe\u7f6e\u81ea\u5b9a\u4e49\u53c2\u6570\u540e\uff0c\u5728 kubean \u7684 var-config \u7684 configmap \u4e2d\u6dfb\u52a0\u4e86\u5982\u4e0b\u53c2\u6570\uff1a

                                                        • \u5b89\u88c5\u96c6\u7fa4\u540e\u8fdb\u884c\u626b\u63cf\uff1a

                                                        \u626b\u63cf\u540e\u6240\u6709\u7684\u626b\u63cf\u9879\u90fd\u901a\u8fc7\u4e86\u626b\u63cf\uff08WARN \u548c INFO \u8ba1\u7b97\u4e3a PASS\uff09\uff0c \u7531\u4e8e CIS Benchmark \u4f1a\u4e0d\u65ad\u66f4\u65b0\uff0c\u6b64\u6587\u6863\u7684\u5185\u5bb9\u53ea\u9002\u7528\u4e8e CIS Benchmark 1.27\u3002

                                                        "},{"location":"admin/kpanda/best-practice/k3s-lcm.html","title":"\u8fb9\u7f18\u96c6\u7fa4\u90e8\u7f72\u548c\u7ba1\u7406\u5b9e\u8df5","text":"

                                                        \u5bf9\u4e8e\u8d44\u6e90\u53d7\u9650\u7684\u8fb9\u7f18\u6216\u7269\u8054\u7f51\u573a\u666f\uff0cKubernetes \u65e0\u6cd5\u5f88\u597d\u7684\u6ee1\u8db3\u8d44\u6e90\u8981\u6c42\uff0c\u4e3a\u6b64\u9700\u8981\u4e00\u4e2a\u8f7b\u91cf\u5316 Kubernetes \u65b9\u6848\uff0c \u65e2\u80fd\u5b9e\u73b0\u5bb9\u5668\u7ba1\u7406\u548c\u7f16\u6392\u80fd\u529b\uff0c\u53c8\u80fd\u7ed9\u4e1a\u52a1\u5e94\u7528\u9884\u7559\u66f4\u591a\u8d44\u6e90\u7a7a\u95f4\u3002\u672c\u6587\u4ecb\u7ecd\u8fb9\u7f18\u96c6\u7fa4 k3s \u7684\u90e8\u7f72\u548c\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406\u5b9e\u8df5\u3002

                                                        "},{"location":"admin/kpanda/best-practice/k3s-lcm.html#_2","title":"\u8282\u70b9\u89c4\u5212","text":"

                                                        \u67b6\u6784

                                                        • x86_64
                                                        • armhf
                                                        • arm64/aarch64

                                                        \u64cd\u4f5c\u7cfb\u7edf

                                                        • \u53ef\u4ee5\u5728\u5927\u591a\u6570\u73b0\u4ee3 Linux \u7cfb\u7edf\u4e0a\u5de5\u4f5c

                                                        CPU/\u5185\u5b58

                                                        • \u5355\u8282\u70b9 K3s \u96c6\u7fa4

                                                          \u6700\u5c0f CPU \u63a8\u8350 CPU \u6700\u5c0f\u5185\u5b58 \u63a8\u8350\u5185\u5b58 K3s cluster 1 core 2 cores 1.5 GB 2 GB
                                                        • \u591a\u8282\u70b9 K3s \u96c6\u7fa4

                                                          \u6700\u5c0f CPU \u63a8\u8350 CPU \u6700\u5c0f\u5185\u5b58 \u63a8\u8350\u5185\u5b58 K3s server 1 core 2 cores 1 GB 1.5 GB K3s agent 1 core 2 cores 512 MB 1 GB
                                                        • \u8282\u70b9\u5165\u7ad9\u89c4\u5219

                                                          • \u6839\u636e\u9700\u8981\u786e\u4fdd\u4ee5\u4e0b\u7aef\u53e3\u672a\u88ab\u5360\u7528
                                                          • \u82e5\u6709\u7279\u6b8a\u8981\u6c42\u4e0d\u80fd\u5173\u95ed\u9632\u706b\u5899\uff0c\u9700\u786e\u4fdd\u7aef\u53e3\u4e3a\u653e\u884c
                                                          \u534f\u8bae \u7aef\u53e3 \u6e90 \u76ee\u7684 \u63cf\u8ff0 TCP 2379-2380 Servers Servers \u9002\u7528\u4e8e HA\u4e0e\u5d4c\u5165\u5f0fetcd TCP 6443 Agents Servers K3s supervisor \u548c Kubernetes API Server UDP 8472 All nodes All nodes \u4ec5\u9002\u7528\u4e8eFlannel VXLAN TCP 10250 All nodes All nodes Kubelet metrics UDP 51820 All nodes All nodes \u4ec5\u9002\u7528\u4e8e\u5e26\u6709 IPv4\u7684Flannel Wireguard UDP 51821 All nodes All nodes \u4ec5\u9002\u7528\u4e8e\u5e26\u6709 IPv6\u7684Flannel Wireguard TCP 5001 All nodes All nodes \u4ec5\u9002\u7528\u4e8e\u5d4c\u5165\u5206\u5e03\u5f0f\u6ce8\u518c\u8868\uff08Spegel\uff09 TCP 6443 All nodes All nodes \u4ec5\u9002\u7528\u4e8e\u5d4c\u5165\u5206\u5e03\u5f0f\u6ce8\u518c\u8868\uff08Spegel\uff09
                                                        • \u8282\u70b9\u89d2\u8272

                                                          \u767b\u5f55\u7528\u6237\u9700\u5177\u5907 root \u6743\u9650

                                                          server node agent node \u63cf\u8ff0 1 0 \u4e00\u53f0 server \u8282\u70b9 1 2 \u4e00\u53f0 server \u8282\u70b9\u3001\u4e24\u53f0 agent \u8282\u70b9 3 0 \u4e09\u53f0 server \u8282\u70b9
                                                        "},{"location":"admin/kpanda/best-practice/k3s-lcm.html#_3","title":"\u524d\u7f6e\u51c6\u5907","text":"
                                                        1. \u4fdd\u5b58\u5b89\u88c5\u811a\u672c\u5230\u5b89\u88c5\u8282\u70b9\uff08\u4efb\u610f\u53ef\u4ee5\u8bbf\u95ee\u5230\u96c6\u7fa4\u8282\u70b9\u7684\u8282\u70b9\uff09

                                                          $ cat > k3slcm <<'EOF'\n#!/bin/bash\nset -e\n\nairgap_image=${K3S_AIRGAP_IMAGE:-}\nk3s_bin=${K3S_BINARY:-}\ninstall_script=${K3S_INSTALL_SCRIPT:-}\n\nservers=${K3S_SERVERS:-}\nagents=${K3S_AGENTS:-}\nssh_user=${SSH_USER:-root}\nssh_password=${SSH_PASSWORD:-}\nssh_privatekey_path=${SSH_PRIVATEKEY_PATH:-}\nextra_server_args=${EXTRA_SERVER_ARGS:-}\nextra_agent_args=${EXTRA_AGENT_ARGS:-}\nfirst_server=$(cut -d, -f1 <<<\"$servers,\")\nother_servers=$(cut -d, -f2- <<<\"$servers,\")\n\ninstall_script_env=\"INSTALL_K3S_SKIP_SELINUX_RPM=true INSTALL_K3S_SELINUX_WARN=true \"\n[ -n \"$K3S_VERSION\" ] && install_script_env+=\"INSTALL_K3S_VERSION=$K3S_VERSION \"\n\nssh_opts=\"-q -o StrictHostkeyChecking=no -o UserKnownHostsFile=/dev/null -o ControlPath=/tmp/ssh_mux_%h_%p_%r -o ControlMaster=auto -o ControlPersist=10m\"\n\nif [ -n \"$ssh_privatekey_path\" ]; then\n  ssh_opts+=\" -i $ssh_privatekey_path\"\nelif [ -n \"$ssh_password\" ]; then\n  askpass=$(mktemp)\n  echo \"echo -n $ssh_password\" > $askpass\n  chmod 0755 $askpass\n  export SSH_ASKPASS=$askpass SSH_ASKPASS_REQUIRE=force\nelse\n  echo \"SSH_PASSWORD or SSH_PRIVATEKEY_PATH must be provided\" && exit 1\nfi\n\nlog_info() { echo -e \"\\033[36m* $*\\033[0m\"; }\nclean() { rm -f $askpass; }\ntrap clean EXIT\n\nIFS=',' read -ra all_nodes <<< \"$servers,$agents\"\nif [ -n \"$k3s_bin\" ]; then\n  for node in ${all_nodes[@]}; do\n    chmod +x \"$k3s_bin\" \"$install_script\"\n    ssh $ssh_opts \"$ssh_user@$node\" \"mkdir -p /usr/local/bin /var/lib/rancher/k3s/agent/images\"\n    log_info \"Copying $airgap_image to $node\"\n    scp -O $ssh_opts \"$airgap_image\" \"$ssh_user@$node:/var/lib/rancher/k3s/agent/images\"\n    log_info \"Copying $k3s_bin to $node\"\n    scp -O $ssh_opts \"$k3s_bin\" \"$ssh_user@$node:/usr/local/bin/k3s\"\n    log_info \"Copying $install_script to $node\"\n    scp -O $ssh_opts \"$install_script\" \"$ssh_user@$node:/usr/local/bin/k3s-install.sh\"\n  done\n  install_script_env+=\"INSTALL_K3S_SKIP_DOWNLOAD=true \"\nelse\n  for node in ${all_nodes[@]}; do\n    log_info \"Downloading install script for $node\"\n    ssh $ssh_opts \"$ssh_user@$node\" \"curl -sSLo /usr/local/bin/k3s-install.sh https://get.k3s.io/ && chmod +x /usr/local/bin/k3s-install.sh\"\n  done\nfi\n\nrestart_k3s() {\n  local node=$1\n  previous_k3s_version=$(ssh $ssh_opts \"$ssh_user@$first_server\" \"kubectl get no -o wide | awk '\\$6==\\\"$node\\\" {print \\$5}'\")\n  [ -n \"$previous_k3s_version\" -a \"$previous_k3s_version\" != \"$K3S_VERSION\" -a -n \"$k3s_bin\" ] && return 0 || return 1\n}\n\ntoken=mynodetoken\ninstall_script_env+=${K3S_INSTALL_SCRIPT_ENV:-}\nif [ -z \"$other_servers\" ]; then\n  log_info \"Installing on server node [$first_server]\"\n  ssh $ssh_opts \"$ssh_user@$first_server\" \"env $install_script_env /usr/local/bin/k3s-install.sh server --token $token $extra_server_args\"\n  ! restart_k3s \"$first_server\" || ssh $ssh_opts \"$ssh_user@$first_server\" \"systemctl restart k3s.service\"\nelse\n  log_info \"Installing on first server node [$first_server]\"\n  ssh $ssh_opts \"$ssh_user@$first_server\" \"env $install_script_env /usr/local/bin/k3s-install.sh server --cluster-init --token $token $extra_server_args\"\n  ! restart_k3s \"$first_server\" || ssh $ssh_opts \"$ssh_user@$first_server\" \"systemctl restart k3s.service\"\n  IFS=',' read -ra other_server_nodes <<< \"$other_servers\"\n  for node in ${other_server_nodes[@]}; do\n    log_info \"Installing on other server node [$node]\"\n    ssh $ssh_opts \"$ssh_user@$node\" \"env $install_script_env /usr/local/bin/k3s-install.sh server --server https://$first_server:6443 --token $token $extra_server_args\"\n    ! restart_k3s \"$node\" || ssh $ssh_opts \"$ssh_user@$node\" \"systemctl restart k3s.service\"\n  done\nfi\n\nif [ -n \"$agents\" ]; then\n  IFS=',' read -ra agent_nodes <<< \"$agents\"\n  for node in ${agent_nodes[@]}; do\n    log_info \"Installing on agent node [$node]\"\n    ssh $ssh_opts \"$ssh_user@$node\" \"env $install_script_env K3S_TOKEN=$token K3S_URL=https://$first_server:6443 /usr/local/bin/k3s-install.sh agent --token $token $extra_agent_args\"\n    ! restart_k3s \"$node\" || ssh $ssh_opts \"$ssh_user@$node\" \"systemctl restart k3s-agent.service\"\n  done\nfi\nEOF\n
                                                        2. \uff08\u53ef\u9009\uff09\u79bb\u7ebf\u73af\u5883\u4e0b\uff0c\u5728\u4e00\u53f0\u53ef\u8054\u7f51\u8282\u70b9\u4e0b\u8f7d K3s \u76f8\u5173\u79bb\u7ebf\u8d44\u6e90\uff0c\u5e76\u62f7\u8d1d\u5230\u5b89\u88c5\u8282\u70b9

                                                          ## [\u8054\u7f51\u8282\u70b9\u6267\u884c]\n\n# \u8bbe\u7f6e K3s \u7248\u672c\u4e3a v1.30.2+k3s1\n$ export k3s_version=v1.30.2+k3s1\n\n# \u79bb\u7ebf\u955c\u50cf\u5305\n# arm64\u94fe\u63a5\u4e3a https://github.com/k3s-io/k3s/releases/download/$k3s_version/k3s-airgap-images-arm64.tar.zst\n$ curl -LO https://github.com/k3s-io/k3s/releases/download/$k3s_version/k3s-airgap-images-amd64.tar.zst\n\n# k3s \u4e8c\u8fdb\u5236\u6587\u4ef6\n# arm64\u94fe\u63a5\u4e3a https://github.com/k3s-io/k3s/releases/download/$k3s_version/k3s-arm64\n$ curl -LO https://github.com/k3s-io/k3s/releases/download/$k3s_version/k3s\n\n# \u5b89\u88c5\u90e8\u7f72\u811a\u672c\n$ curl -Lo k3s-install.sh https://get.k3s.io/\n\n## \u4e0a\u8ff0\u8d44\u6e90\u62f7\u8d1d\u5230\u5b89\u88c5\u8282\u70b9\u6587\u4ef6\u7cfb\u7edf\u4e0a\n\n## [\u5b89\u88c5\u8282\u70b9\u6267\u884c]\n$ export K3S_AIRGAP_IMAGE=<\u8d44\u6e90\u5b58\u653e\u76ee\u5f55>/k3s-airgap-images-amd64.tar.zst \n$ export K3S_BINARY=<\u8d44\u6e90\u5b58\u653e\u76ee\u5f55>/k3s \n$ export K3S_INSTALL_SCRIPT=<\u8d44\u6e90\u5b58\u653e\u76ee\u5f55>/k3s-install.sh\n
                                                        3. \u5173\u95ed\u9632\u706b\u5899\u548c swap\uff08\u82e5\u9632\u706b\u5899\u65e0\u6cd5\u5173\u95ed\uff0c\u53ef\u653e\u884c\u4e0a\u8ff0\u5165\u7ad9\u7aef\u53e3\uff09

                                                          # Ubuntu \u5173\u95ed\u9632\u706b\u5899\u65b9\u6cd5\n$ sudo ufw disable\n# RHEL / CentOS / Fedora / SUSE \u5173\u95ed\u9632\u706b\u5899\u65b9\u6cd5\n$ systemctl disable firewalld --now\n$ sudo swapoff -a\n$ sudo sed -i '/swap/s/^/#/' /etc/fstab\n
                                                        "},{"location":"admin/kpanda/best-practice/k3s-lcm.html#_4","title":"\u90e8\u7f72\u96c6\u7fa4","text":"

                                                        \u4e0b\u6587\u6d4b\u8bd5\u73af\u5883\u4fe1\u606f\u4e3a Ubuntu 22.04 LTS, amd64\uff0c\u79bb\u7ebf\u5b89\u88c5

                                                        1. \u5728\u5b89\u88c5\u8282\u70b9\u6839\u636e\u90e8\u7f72\u89c4\u5212\u8bbe\u7f6e\u8282\u70b9\u4fe1\u606f\uff0c\u5e76\u5bfc\u51fa\u73af\u5883\u53d8\u91cf\uff0c\u591a\u4e2a\u8282\u70b9\u4ee5\u534a\u89d2\u9017\u53f7 , \u5206\u9694

                                                          1 server / 0 agent1 server / 2 agent3 server / 0 agent
                                                          export K3S_SERVERS=172.30.41.5 $ export SSH_USER=root\n# \u82e5\u4f7f\u7528 public key \u65b9\u5f0f\u767b\u5f55\uff0c\u786e\u4fdd\u5df2\u5c06\u516c\u94a5\u6dfb\u52a0\u5230\u5404\u8282\u70b9\u7684 ~/.ssh/authorized_keys\n\nexport SSH_PRIVATEKEY_PATH=<\u79c1\u94a5\u8def\u5f84>\nexport SSH_PASSWORD=<SSH\u5bc6\u7801>\n
                                                          export K3S_SERVERS=172.30.41.5\nexport K3S_AGENTS=172.30.41.6,172.30.41.7\nexport SSH_USER=root\n\n# \u82e5\u4f7f\u7528 public key \u65b9\u5f0f\u767b\u5f55\uff0c\u786e\u4fdd\u5df2\u5c06\u516c\u94a5\u6dfb\u52a0\u5230\u5404\u8282\u70b9\u7684 ~/.ssh/authorized_keys\nexport SSH_PRIVATEKEY_PATH=<\u79c1\u94a5\u8def\u5f84>\nexport SSH_PASSWORD=<SSH\u5bc6\u7801>\n
                                                          export K3S_SERVERS=172.30.41.5,172.30.41.6,172.30.41.7\nexport SSH_USER=root\n\n# \u82e5\u4f7f\u7528 public key \u65b9\u5f0f\u767b\u5f55\uff0c\u786e\u4fdd\u5df2\u5c06\u516c\u94a5\u6dfb\u52a0\u5230\u5404\u8282\u70b9\u7684 ~/.ssh/authorized_keys\nexport SSH_PRIVATEKEY_PATH=<\u79c1\u94a5\u8def\u5f84>\nexport SSH_PASSWORD=<SSH\u5bc6\u7801>\n
                                                        2. \u6267\u884c\u90e8\u7f72\u64cd\u4f5c

                                                          \u4ee5 3 server / 0 agent \u6a21\u5f0f\u4e3a\u4f8b\uff0c\u6bcf\u53f0\u673a\u5668\u5fc5\u987b\u6709\u4e00\u4e2a\u552f\u4e00\u7684\u4e3b\u673a\u540d

                                                          # \u82e5\u6709\u66f4\u591a K3s \u5b89\u88c5\u811a\u672c\u73af\u5883\u53d8\u91cf\u8bbe\u7f6e\u9700\u6c42\uff0c\u8bf7\u8bbe\u7f6e K3S_INSTALL_SCRIPT_ENV\uff0c\u5176\u503c\u53c2\u8003 https://docs.k3s.io/reference/env-variables\n# \u82e5\u9700\u5bf9 server \u6216 agent \u8282\u70b9\u4f5c\u51fa\u989d\u5916\u914d\u7f6e\uff0c\u8bf7\u8bbe\u7f6e EXTRA_SERVER_ARGS \u6216 EXTRA_AGENT_ARGS\uff0c\u5176\u503c\u53c2\u8003 https://docs.k3s.io/cli/server https://docs.k3s.io/cli/agent\n$ bash k3slcm\n* Copying ./v1.30.2/k3s-airgap-images-amd64.tar.zst to 172.30.41.5\n* Copying ./v1.30.2/k3s to 172.30.41.5\n* Copying ./v1.30.2/k3s-install.sh to 172.30.41.5\n* Copying ./v1.30.2/k3s-airgap-images-amd64.tar.zst to 172.30.41.6\n* Copying ./v1.30.2/k3s to 172.30.41.6\n* Copying ./v1.30.2/k3s-install.sh to 172.30.41.6\n* Copying ./v1.30.2/k3s-airgap-images-amd64.tar.zst to 172.30.41.7\n* Copying ./v1.30.2/k3s to 172.30.41.7\n* Copying ./v1.30.2/k3s-install.sh to 172.30.41.7\n* Installing on first server node [172.30.41.5]\n[INFO]  Skipping k3s download and verify\n[INFO]  Skipping installation of SELinux RPM\n[INFO]  Creating /usr/local/bin/kubectl symlink to k3s\n[INFO]  Creating /usr/local/bin/crictl symlink to k3s\n[INFO]  Creating /usr/local/bin/ctr symlink to k3s\n[INFO]  Creating killall script /usr/local/bin/k3s-killall.sh\n[INFO]  Creating uninstall script /usr/local/bin/k3s-uninstall.sh\n[INFO]  env: Creating environment file /etc/systemd/system/k3s.service.env\n[INFO]  systemd: Creating service file /etc/systemd/system/k3s.service\n[INFO]  systemd: Enabling k3s unit\nCreated symlink /etc/systemd/system/multi-user.target.wants/k3s.service \u2192 /etc/systemd/system/k3s.service.\n[INFO]  systemd: Starting k3s\n* Installing on other server node [172.30.41.6]\n......\n
                                                        3. \u68c0\u67e5\u96c6\u7fa4\u72b6\u6001

                                                          $ kubectl get no -owide\nNAME      STATUS   ROLES                       AGE     VERSION        INTERNAL-IP   EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION      CONTAINER-RUNTIME\nserver1   Ready    control-plane,etcd,master   3m51s   v1.30.2+k3s1   172.30.41.5   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\nserver2   Ready    control-plane,etcd,master   3m18s   v1.30.2+k3s1   172.30.41.6   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\nserver3   Ready    control-plane,etcd,master   3m7s    v1.30.2+k3s1   172.30.41.7   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\n\n$ kubectl get pod --all-namespaces -owide\nNAMESPACE     NAME                                      READY   STATUS      RESTARTS   AGE     IP          NODE      NOMINATED NODE   READINESS GATES\nkube-system   coredns-576bfc4dc7-z4x2s                  1/1     Running     0          8m31s   10.42.0.3   server1   <none>           <none>\nkube-system   helm-install-traefik-98kh5                0/1     Completed   1          8m31s   10.42.0.4   server1   <none>           <none>\nkube-system   helm-install-traefik-crd-9xtfd            0/1     Completed   0          8m31s   10.42.0.5   server1   <none>           <none>\nkube-system   local-path-provisioner-86f46b7bf7-qt995   1/1     Running     0          8m31s   10.42.0.6   server1   <none>           <none>\nkube-system   metrics-server-557ff575fb-kptsh           1/1     Running     0          8m31s   10.42.0.2   server1   <none>           <none>\nkube-system   svclb-traefik-f95cc81c-mgcjh              2/2     Running     0          6m28s   10.42.1.3   server2   <none>           <none>\nkube-system   svclb-traefik-f95cc81c-xtb8f              2/2     Running     0          6m28s   10.42.2.2   server3   <none>           <none>\nkube-system   svclb-traefik-f95cc81c-zcsxl              2/2     Running     0          6m28s   10.42.0.7   server1   <none>           <none>\nkube-system   traefik-5fb479b77-6pbh5                   1/1     Running     0          6m28s   10.42.1.2   server2   <none>           <none>\n
                                                        "},{"location":"admin/kpanda/best-practice/k3s-lcm.html#_5","title":"\u5347\u7ea7\u96c6\u7fa4","text":"
                                                        1. \u5982\u5347\u7ea7\u5230 v1.30.3+k3s1 \u7248\u672c\uff0c\u6309\u7167 \u524d\u7f6e\u51c6\u5907 \u6b65\u9aa4 2 \u91cd\u65b0\u4e0b\u8f7d\u79bb\u7ebf\u8d44\u6e90\u5e76\u62f7\u8d1d\u5230\u5b89\u88c5\u8282\u70b9\uff0c\u540c\u65f6\u5728\u5b89\u88c5\u8282\u70b9\u5bfc\u51fa\u79bb\u7ebf\u8d44\u6e90\u8def\u5f84\u73af\u5883\u53d8\u91cf\u3002\uff08\u82e5\u4e3a\u8054\u7f51\u5347\u7ea7\uff0c\u5219\u8df3\u8fc7\u6b64\u64cd\u4f5c\uff09
                                                        2. \u6267\u884c\u5347\u7ea7\u64cd\u4f5c

                                                          $ export K3S_VERSION=v1.30.3+k3s1\n$ bash k3slcm\n* Copying ./v1.30.3/k3s-airgap-images-amd64.tar.zst to 172.30.41.5\n* Copying ./v1.30.3/k3s to 172.30.41.5\n* Copying ./v1.30.3/k3s-install.sh to 172.30.41.5\n* Copying ./v1.30.3/k3s-airgap-images-amd64.tar.zst to 172.30.41.6\n* Copying ./v1.30.3/k3s to 172.30.41.6\n* Copying ./v1.30.3/k3s-install.sh to 172.30.41.6\n* Copying ./v1.30.3/k3s-airgap-images-amd64.tar.zst to 172.30.41.7\n* Copying ./v1.30.3/k3s to 172.30.41.7\n* Copying ./v1.30.3/k3s-install.sh to 172.30.41.7\n* Installing on first server node [172.30.41.5]\n[INFO]  Skipping k3s download and verify\n[INFO]  Skipping installation of SELinux RPM\n[INFO]  Skipping /usr/local/bin/kubectl symlink to k3s, already exists\n[INFO]  Skipping /usr/local/bin/crictl symlink to k3s, already exists\n[INFO]  Skipping /usr/local/bin/ctr symlink to k3s, already exists\n[INFO]  Creating killall script /usr/local/bin/k3s-killall.sh\n[INFO]  Creating uninstall script /usr/local/bin/k3s-uninstall.sh\n[INFO]  env: Creating environment file /etc/systemd/system/k3s.service.env\n[INFO]  systemd: Creating service file /etc/systemd/system/k3s.service\n[INFO]  systemd: Enabling k3s unit\nCreated symlink /etc/systemd/system/multi-user.target.wants/k3s.service \u2192 /etc/systemd/system/k3s.service.\n[INFO]  No change detected so skipping service start\n* Installing on other server node [172.30.41.6]\n......\n
                                                        3. \u68c0\u67e5\u96c6\u7fa4\u72b6\u6001

                                                          $ kubectl get node -owide\nNAME      STATUS   ROLES                       AGE   VERSION        INTERNAL-IP   EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION      CONTAINER-RUNTIME\nserver1   Ready    control-plane,etcd,master   18m   v1.30.3+k3s1   172.30.41.5   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\nserver2   Ready    control-plane,etcd,master   17m   v1.30.3+k3s1   172.30.41.6   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\nserver3   Ready    control-plane,etcd,master   17m   v1.30.3+k3s1   172.30.41.7   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\n\n$ kubectl get po --all-namespaces -owide\nNAMESPACE     NAME                                      READY   STATUS      RESTARTS   AGE     IP          NODE      NOMINATED NODE   READINESS GATES\nkube-system   coredns-576bfc4dc7-z4x2s                  1/1     Running     0          18m     10.42.0.3   server1   <none>           <none>\nkube-system   helm-install-traefik-98kh5                0/1     Completed   1          18m     <none>      server1   <none>           <none>\nkube-system   helm-install-traefik-crd-9xtfd            0/1     Completed   0          18m     <none>      server1   <none>           <none>\nkube-system   local-path-provisioner-6795b5f9d8-t4rvm   1/1     Running     0          2m49s   10.42.2.3   server3   <none>           <none>\nkube-system   metrics-server-557ff575fb-kptsh           1/1     Running     0          18m     10.42.0.2   server1   <none>           <none>\nkube-system   svclb-traefik-f95cc81c-mgcjh              2/2     Running     0          16m     10.42.1.3   server2   <none>           <none>\nkube-system   svclb-traefik-f95cc81c-xtb8f              2/2     Running     0          16m     10.42.2.2   server3   <none>           <none>\nkube-system   svclb-traefik-f95cc81c-zcsxl              2/2     Running     0          16m     10.42.0.7   server1   <none>           <none>\nkube-system   traefik-5fb479b77-6pbh5                   1/1     Running     0          16m     10.42.1.2   server2   <none>           <none>\n
                                                        "},{"location":"admin/kpanda/best-practice/k3s-lcm.html#_6","title":"\u6269\u5bb9\u96c6\u7fa4","text":"
                                                        1. \u5982\u6dfb\u52a0\u65b0\u7684 agent \u8282\u70b9\uff1a

                                                          export K3S_AGENTS=172.30.41.8\n

                                                          \u6dfb\u52a0\u65b0\u7684 server \u8282\u70b9\u5982\u4e0b\uff1a

                                                          < export K3S_SERVERS=172.30.41.5,172.30.41.6,172.30.41.7\n---\n> export K3S_SERVERS=172.30.41.5,172.30.41.6,172.30.41.7,172.30.41.8,172.30.41.9\n
                                                        2. \u6267\u884c\u6269\u5bb9\u64cd\u4f5c\uff08\u4ee5\u6dfb\u52a0 agent \u8282\u70b9\u4e3a\u4f8b\uff09

                                                          $ bash k3slcm\n* Copying ./v1.30.3/k3s-airgap-images-amd64.tar.zst to 172.30.41.5\n* Copying ./v1.30.3/k3s to 172.30.41.5\n* Copying ./v1.30.3/k3s-install.sh to 172.30.41.5\n* Copying ./v1.30.3/k3s-airgap-images-amd64.tar.zst to 172.30.41.6\n* Copying ./v1.30.3/k3s to 172.30.41.6\n* Copying ./v1.30.3/k3s-install.sh to 172.30.41.6\n* Copying ./v1.30.3/k3s-airgap-images-amd64.tar.zst to 172.30.41.7\n* Copying ./v1.30.3/k3s to 172.30.41.7\n* Copying ./v1.30.3/k3s-install.sh to 172.30.41.7\n* Copying ./v1.30.3/k3s-airgap-images-amd64.tar.zst to 172.30.41.8\n* Copying ./v1.30.3/k3s to 172.30.41.8\n* Copying ./v1.30.3/k3s-install.sh to 172.30.41.8\n* Installing on first server node [172.30.41.5]\n[INFO]  Skipping k3s download and verify\n[INFO]  Skipping installation of SELinux RPM\n[INFO]  Skipping /usr/local/bin/kubectl symlink to k3s, already exists\n[INFO]  Skipping /usr/local/bin/crictl symlink to k3s, already exists\n[INFO]  Skipping /usr/local/bin/ctr symlink to k3s, already exists\n[INFO]  Creating killall script /usr/local/bin/k3s-killall.sh\n[INFO]  Creating uninstall script /usr/local/bin/k3s-uninstall.sh\n[INFO]  env: Creating environment file /etc/systemd/system/k3s.service.env\n[INFO]  systemd: Creating service file /etc/systemd/system/k3s.service\n[INFO]  systemd: Enabling k3s unit\nCreated symlink /etc/systemd/system/multi-user.target.wants/k3s.service \u2192 /etc/systemd/system/k3s.service.\n[INFO]  No change detected so skipping service start\n......\n* Installing on agent node [172.30.41.8]\n[INFO]  Skipping k3s download and verify\n[INFO]  Skipping installation of SELinux RPM\n[INFO]  Creating /usr/local/bin/kubectl symlink to k3s\n[INFO]  Creating /usr/local/bin/crictl symlink to k3s\n[INFO]  Creating /usr/local/bin/ctr symlink to k3s\n[INFO]  Creating killall script /usr/local/bin/k3s-killall.sh\n[INFO]  Creating uninstall script /usr/local/bin/k3s-agent-uninstall.sh\n[INFO]  env: Creating environment file /etc/systemd/system/k3s-agent.service.env\n[INFO]  systemd: Creating service file /etc/systemd/system/k3s-agent.service\n[INFO]  systemd: Enabling k3s-agent unit\nCreated symlink /etc/systemd/system/multi-user.target.wants/k3s-agent.service \u2192 /etc/systemd/system/k3s-agent.service.\n[INFO]  systemd: Starting k3s-agent\n
                                                        3. \u68c0\u67e5\u96c6\u7fa4\u72b6\u6001

                                                          $ kubectl get node -owide\nNAME      STATUS   ROLES                       AGE   VERSION        INTERNAL-IP   EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION      CONTAINER-RUNTIME\nagent1    Ready    <none>                      57s   v1.30.3+k3s1   172.30.41.8   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\nserver1   Ready    control-plane,etcd,master   12m   v1.30.3+k3s1   172.30.41.5   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\nserver2   Ready    control-plane,etcd,master   11m   v1.30.3+k3s1   172.30.41.6   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\nserver3   Ready    control-plane,etcd,master   11m   v1.30.3+k3s1   172.30.41.7   <none>        Ubuntu 22.04.3 LTS   5.15.0-78-generic   containerd://1.7.17-k3s1\n
                                                        "},{"location":"admin/kpanda/best-practice/k3s-lcm.html#_7","title":"\u7f29\u5bb9\u96c6\u7fa4","text":"
                                                        1. \u4ec5\u5728\u5f85\u5220\u9664\u8282\u70b9\u6267\u884c k3s-uninstall.sh \u6216 k3s-agent-uninstall.sh
                                                        2. \u5728\u4efb\u610f server \u8282\u70b9\u4e0a\u6267\u884c\uff1a

                                                          kubectl delete node <\u8282\u70b9\u540d\u79f0>\n
                                                        "},{"location":"admin/kpanda/best-practice/k3s-lcm.html#_8","title":"\u5378\u8f7d\u96c6\u7fa4","text":"
                                                        1. \u5728\u6240\u6709 server \u8282\u70b9\u624b\u52a8\u6267\u884c k3s-uninstall.sh
                                                        2. \u5728\u6240\u6709 agent \u8282\u70b9\u624b\u52a8\u6267\u884c k3s-agent-uninstall.sh
                                                        "},{"location":"admin/kpanda/best-practice/kubean-low-version.html","title":"\u79bb\u7ebf\u573a\u666f Kubean \u5411\u4e0b\u517c\u5bb9\u7248\u672c\u7684\u90e8\u7f72\u4e0e\u5347\u7ea7\u64cd\u4f5c","text":"

                                                        \u4e3a\u4e86\u6ee1\u8db3\u5ba2\u6237\u5bf9\u4f4e\u7248\u672c\u7684 K8s \u96c6\u7fa4\u7684\u642d\u5efa\uff0cKubean \u63d0\u4f9b\u4e86\u5411\u4e0b\u517c\u5bb9\u5e76\u521b\u5efa\u4f4e\u7248\u672c\u7684 K8s \u96c6\u7fa4\u80fd\u529b\uff0c\u7b80\u79f0\u5411\u4e0b\u517c\u5bb9\u7248\u672c\u7684\u80fd\u529b\u3002

                                                        \u76ee\u524d\u652f\u6301\u81ea\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7248\u672c\u8303\u56f4\u5728 v1.26-v1.28\uff0c\u53ef\u4ee5\u53c2\u9605 AI \u7b97\u529b\u4e2d\u5fc3\u96c6\u7fa4\u7248\u672c\u652f\u6301\u4f53\u7cfb\u3002

                                                        \u672c\u6587\u5c06\u6f14\u793a\u5982\u4f55\u90e8\u7f72\u4f4e\u7248\u672c\u7684 K8s \u96c6\u7fa4\u3002

                                                        Note

                                                        \u672c\u6587\u6f14\u793a\u7684\u8282\u70b9\u73af\u5883\u4e3a\uff1a

                                                        • X86 \u67b6\u6784
                                                        • CentOS 7 Linux \u53d1\u884c\u7248
                                                        "},{"location":"admin/kpanda/best-practice/kubean-low-version.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                        • \u51c6\u5907\u4e00\u4e2a Kubean \u6240\u5728\u7684\u7ba1\u7406\u96c6\u7fa4\uff0c\u5e76\u4e14\u5f53\u524d\u73af\u5883\u5df2\u7ecf\u90e8\u7f72\u652f\u6301 podman \u3001skopeo\u3001minio client \u547d\u4ee4\u3002 \u5982\u679c\u4e0d\u652f\u6301\uff0c\u53ef\u901a\u8fc7\u811a\u672c\u8fdb\u884c\u5b89\u88c5\u4f9d\u8d56\u7ec4\u4ef6\uff0c\u5b89\u88c5\u524d\u7f6e\u4f9d\u8d56\u3002

                                                        • \u524d\u5f80 kubean \u67e5\u770b\u53d1\u5e03\u7684\u5236\u54c1\uff0c \u5e76\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u9009\u62e9\u5177\u4f53\u7684\u5236\u54c1\u7248\u672c\u3002\u76ee\u524d\u652f\u6301\u7684\u5236\u54c1\u7248\u672c\u53ca\u5bf9\u5e94\u7684\u96c6\u7fa4\u7248\u672c\u8303\u56f4\u5982\u4e0b\uff1a

                                                          \u5236\u54c1\u5305\u7248\u672c \u652f\u6301\u96c6\u7fa4\u8303\u56f4 AI \u7b97\u529b\u4e2d\u5fc3\u652f\u6301\u60c5\u51b5 release-2.21 v1.23.0 ~ v1.25.6 \u5b89\u88c5\u5668 v0.14.0+ \u5df2\u652f\u6301 release-2.22 v1.24.0 ~ v1.26.13 \u5b89\u88c5\u5668 v0.15.0+ \u5df2\u652f\u6301 release-2.23 v1.25.0 ~ v1.27.10 \u5b89\u88c5\u5668 v0.16.0+ \u5df2\u652f\u6301 release-2.24 v1.26.0 ~ v1.29.1 \u5b89\u88c5\u5668 v0.17.0+ \u5df2\u652f\u6301 release-2.25 v1.27.0 ~ v1.29.5 \u5b89\u88c5\u5668 v0.20.0+ \u5df2\u652f\u6301

                                                        Tip

                                                        \u5728\u9009\u62e9\u5236\u54c1\u7248\u672c\u65f6\uff0c\u4e0d\u4ec5\u9700\u8981\u53c2\u8003\u96c6\u7fa4\u7248\u672c\u8303\u56f4\uff0c\u8fd8\u9700\u5224\u65ad\u8be5\u5236\u54c1 manifest \u8d44\u6e90\u4e2d\u76f8\u5e94\u7ec4\u4ef6(\u5982 calico\u3001containerd)\u7248\u672c\u8303\u56f4\u662f\u5426\u8986\u76d6\u5f53\u524d\u96c6\u7fa4\u8be5\u7ec4\u4ef6\u7248\u672c\uff01

                                                        \u672c\u6587\u6f14\u793a\u79bb\u7ebf\u90e8\u7f72 K8s \u96c6\u7fa4\u5230 1.23.0 \u7248\u672c\u53ca\u79bb\u7ebf\u5347\u7ea7 K8s \u96c6\u7fa4\u4ece 1.23.0 \u7248\u672c\u5230 1.24.0 \u7248\u672c\uff0c\u6240\u4ee5\u9009\u62e9 release-2.21 \u7684\u5236\u54c1\u3002

                                                        "},{"location":"admin/kpanda/best-practice/kubean-low-version.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"admin/kpanda/best-practice/kubean-low-version.html#kubespray-release","title":"\u51c6\u5907 Kubespray Release \u4f4e\u7248\u672c\u7684\u76f8\u5173\u5236\u54c1","text":"

                                                        \u5c06 spray-job \u955c\u50cf\u5bfc\u5165\u5230\u79bb\u7ebf\u73af\u5883\u7684 Registry\uff08\u955c\u50cf\u4ed3\u5e93\uff09\u4e2d\u3002

                                                        # \u5047\u8bbe\u706b\u79cd\u96c6\u7fa4\u4e2d\u7684 registry \u5730\u5740\u4e3a 172.30.41.200\nREGISTRY_ADDR=\"172.30.41.200\"\n\n# \u955c\u50cf spray-job \u8fd9\u91cc\u53ef\u4ee5\u91c7\u7528\u52a0\u901f\u5668\u5730\u5740\uff0c\u955c\u50cf\u5730\u5740\u6839\u636e\u9009\u62e9\u5236\u54c1\u7248\u672c\u6765\u51b3\u5b9a\nSPRAY_IMG_ADDR=\"ghcr.m.daocloud.io/kubean-io/spray-job:2.21-d6f688f\"\n\n# skopeo \u53c2\u6570\nSKOPEO_PARAMS=\" --insecure-policy -a --dest-tls-verify=false --retry-times=3 \"\n\n# \u5728\u7ebf\u73af\u5883\uff1a\u5bfc\u51fa release-2.21 \u7248\u672c\u7684 spray-job \u955c\u50cf\uff0c\u5e76\u5c06\u5176\u8f6c\u79fb\u5230\u79bb\u7ebf\u73af\u5883\nskopeo copy docker://${SPRAY_IMG_ADDR} docker-archive:spray-job-2.21.tar\n\n# \u79bb\u7ebf\u73af\u5883\uff1a\u5bfc\u5165 release-2.21 \u7248\u672c\u7684 spray-job \u955c\u50cf\u5230\u706b\u79cd registry\nskopeo copy ${SKOPEO_PARAMS} docker-archive:spray-job-2.21.tar docker://${REGISTRY_ADDR}/${SPRAY_IMG_ADDR/.m.daocloud/}\n
                                                        "},{"location":"admin/kpanda/best-practice/kubean-low-version.html#k8s","title":"\u5236\u4f5c\u4f4e\u7248\u672c K8s \u79bb\u7ebf\u8d44\u6e90","text":"
                                                        1. \u51c6\u5907 manifest.yml \u6587\u4ef6\u3002

                                                          cat > \"manifest.yml\" <<EOF\nimage_arch:\n  - \"amd64\" ## \"arm64\"\nkube_version: ## \u6839\u636e\u5b9e\u9645\u573a\u666f\u586b\u5199\u96c6\u7fa4\u7248\u672c\n  - \"v1.23.0\"\n  - \"v1.24.0\"\nEOF\n
                                                        2. \u5236\u4f5c\u79bb\u7ebf\u589e\u91cf\u5305\u3002

                                                          # \u521b\u5efa data \u76ee\u5f55\nmkdir data\n# \u5236\u4f5c\u79bb\u7ebf\u5305\uff0c\nAIRGAP_IMG_ADDR=\"ghcr.m.daocloud.io/kubean-io/airgap-patch:2.21-d6f688f\" # (1)!\npodman run --rm -v $(pwd)/manifest.yml:/manifest.yml -v $(pwd)/data:/data -e ZONE=CN -e MODE=FULL ${AIRGAP_IMG_ADDR}\n
                                                          1. \u955c\u50cf spray-job \u8fd9\u91cc\u53ef\u4ee5\u91c7\u7528\u52a0\u901f\u5668\u5730\u5740\uff0c\u955c\u50cf\u5730\u5740\u6839\u636e\u9009\u62e9\u5236\u54c1\u7248\u672c\u6765\u51b3\u5b9a
                                                        3. \u5bfc\u5165\u5bf9\u5e94 k8s \u7248\u672c\u7684\u79bb\u7ebf\u955c\u50cf\u4e0e\u4e8c\u8fdb\u5236\u5305

                                                          # \u5c06\u4e0a\u4e00\u6b65 data \u76ee\u5f55\u4e2d\u7684\u4e8c\u8fdb\u5236\u5bfc\u5165\u4e8c\u8fdb\u5236\u5305\u81f3\u706b\u79cd\u8282\u70b9\u7684 MinIO \u4e2d\ncd ./data/amd64/files/\nMINIO_ADDR=\"http://127.0.0.1:9000\" # (1)!\nMINIO_USER=rootuser MINIO_PASS=rootpass123 ./import_files.sh ${MINIO_ADDR}\n\n# \u5c06\u4e0a\u4e00\u6b65 data \u76ee\u5f55\u4e2d\u7684\u955c\u50cf\u5bfc\u5165\u4e8c\u8fdb\u5236\u5305\u81f3\u706b\u79cd\u8282\u70b9\u7684\u955c\u50cf\u4ed3\u5e93\u4e2d\ncd ./data/amd64/images/\nREGISTRY_ADDR=\"127.0.0.1\"  ./import_images.sh # (2)!\n
                                                          1. IP \u66ff\u6362\u4e3a\u5b9e\u9645\u7684\u4ed3\u5e93\u5730\u5740
                                                          2. IP \u66ff\u6362\u4e3a\u5b9e\u9645\u7684\u4ed3\u5e93\u5730\u5740
                                                        4. \u5c06 manifest\u3001localartifactset.cr.yaml \u81ea\u5b9a\u4e49\u8d44\u6e90\u90e8\u7f72\u5230 Kubean \u6240\u5728\u7684\u7ba1\u7406\u96c6\u7fa4\u6216\u8005\u5168\u5c40\u670d\u52a1\u96c6\u7fa4 \u5f53\u4e2d\uff0c\u672c\u4f8b\u4f7f\u7528\u7684\u662f\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u3002

                                                          # \u90e8\u7f72 data \u6587\u4ef6\u76ee\u5f55\u4e0b\u7684 localArtifactSet \u8d44\u6e90\ncd ./data\nkubectl apply -f localartifactset.cr.yaml\n\n# \u4e0b\u8f7d release-2.21 \u7248\u672c\u7684 manifest \u8d44\u6e90\nwget https://raw.githubusercontent.com/kubean-io/kubean-manifest/main/manifests/manifest-2.21-d6f688f.yml\n\n# \u90e8\u7f72 release-2.21 \u5bf9\u5e94\u7684 manifest \u8d44\u6e90\nkubectl apply -f manifest-2.21-d6f688f.yml\n
                                                        "},{"location":"admin/kpanda/best-practice/kubean-low-version.html#k8s_1","title":"\u90e8\u7f72\u548c\u5347\u7ea7 K8s \u96c6\u7fa4\u517c\u5bb9\u7248\u672c","text":""},{"location":"admin/kpanda/best-practice/kubean-low-version.html#_3","title":"\u90e8\u7f72","text":"
                                                        1. \u524d\u5f80 \u5bb9\u5668\u7ba1\u7406 \uff0c\u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u4e2d\uff0c\u70b9\u51fb \u521b\u5efa\u96c6\u7fa4 \u6309\u94ae\u3002

                                                        2. \u88ab\u7eb3\u7ba1\u53c2\u6570\u9009\u62e9 manifest\u3001localartifactset.cr.yaml \u81ea\u5b9a\u4e49\u8d44\u6e90\u90e8\u7f72\u7684\u96c6\u7fa4\uff0c\u672c\u4f8b\u4f7f\u7528\u7684\u662f\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u3002

                                                        3. \u5176\u4f59\u53c2\u6570\u53c2\u8003\u521b\u5efa\u96c6\u7fa4\u3002

                                                        "},{"location":"admin/kpanda/best-practice/kubean-low-version.html#_4","title":"\u5347\u7ea7","text":"
                                                        1. \u9009\u62e9\u65b0\u521b\u5efa\u7684\u96c6\u7fa4\uff0c\u8fdb\u53bb\u8be6\u60c5\u754c\u9762\u3002

                                                        2. \u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u96c6\u7fa4\u8fd0\u7ef4 -> \u96c6\u7fa4\u5347\u7ea7 \uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u70b9\u51fb \u7248\u672c\u5347\u7ea7 \u3002

                                                        3. \u9009\u62e9\u53ef\u7528\u7684\u96c6\u7fa4\u8fdb\u884c\u5347\u7ea7\u3002

                                                        "},{"location":"admin/kpanda/best-practice/limit-disk-usage-docker.html","title":"\u9650\u5236 Docker \u5355\u5bb9\u5668\u53ef\u5360\u7528\u7684\u78c1\u76d8\u7a7a\u95f4","text":"

                                                        Docker \u5728 17.07.0-ce \u7248\u672c\u4e2d\u5f15\u5165 overlay2.zize\uff0c\u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528 overlay2.zize \u6765\u9650\u5236 docker \u5355\u5bb9\u5668\u53ef\u5360\u7528\u7684\u78c1\u76d8\u7a7a\u95f4\u3002

                                                        "},{"location":"admin/kpanda/best-practice/limit-disk-usage-docker.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                        \u5728\u914d\u7f6e docker overlay2.size \u4e4b\u524d\uff0c\u9700\u8981\u8c03\u6574\u64cd\u4f5c\u7cfb\u7edf\u4e2d\u6587\u4ef6\u7cfb\u7edf\u7c7b\u578b\u4e3a xfs \u5e76\u4f7f\u7528 pquota \u65b9\u5f0f\u8fdb\u884c\u8bbe\u5907\u6302\u8f7d\u3002

                                                        \u683c\u5f0f\u5316\u8bbe\u5907\u4e3a XFS \u6587\u4ef6\u7cfb\u7edf\uff0c\u53ef\u4ee5\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                        mkfs.xfs -f /dev/xxx\n

                                                        Note

                                                        pquota \u9650\u5236\u7684\u662f\u9879\u76ee\uff08project\uff09\u78c1\u76d8\u914d\u989d\u3002

                                                        "},{"location":"admin/kpanda/best-practice/limit-disk-usage-docker.html#_2","title":"\u8bbe\u7f6e\u5355\u5bb9\u5668\u78c1\u76d8\u53ef\u5360\u7528\u7a7a\u95f4","text":"

                                                        \u6ee1\u8db3\u4ee5\u4e0a\u6761\u4ef6\u540e\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e docker overlay2.size \u6765\u9650\u5236\u5355\u5bb9\u5668\u78c1\u76d8\u5360\u7528\u7a7a\u95f4\u5927\u5c0f\u3002\u547d\u4ee4\u884c\u793a\u4f8b\u5982\u4e0b\uff1a

                                                        sudo dockerd -s overlay2 --storage-opt overlay2.size=1G\n
                                                        "},{"location":"admin/kpanda/best-practice/limit-disk-usage-docker.html#_3","title":"\u573a\u666f\u6f14\u7ec3","text":"

                                                        \u63a5\u4e0b\u6765\u4ee5\u4e00\u4e2a\u5b9e\u9645\u7684\u4f8b\u5b50\u6765\u6f14\u7ec3\u4e00\u4e0b\u9650\u5236 docker \u5355\u5bb9\u5668\u53ef\u5360\u7528\u7684\u78c1\u76d8\u7a7a\u95f4\u6574\u4f53\u5b9e\u73b0\u6d41\u7a0b\u3002

                                                        "},{"location":"admin/kpanda/best-practice/limit-disk-usage-docker.html#_4","title":"\u76ee\u6807","text":"

                                                        \u90e8\u7f72\u4e00\u4e2a Kubernetes \u96c6\u7fa4\uff0c\u5e76\u9650\u5236 docker \u5355\u5bb9\u5668\u53ef\u5360\u7528\u7684\u78c1\u76d8\u7a7a\u95f4\u5927\u5c0f\u4e3a1G\uff0c\u8d85\u51fa1G\u5c06\u65e0\u6cd5\u4f7f\u7528\u3002

                                                        "},{"location":"admin/kpanda/best-practice/limit-disk-usage-docker.html#_5","title":"\u64cd\u4f5c\u6d41\u7a0b","text":"
                                                        1. \u767b\u5f55\u76ee\u6807\u8282\u70b9\uff0c\u67e5\u770b fstab \u6587\u4ef6\uff0c\u83b7\u53d6\u5f53\u524d\u8bbe\u5907\u7684\u6302\u8f7d\u60c5\u51b5\u3002

                                                          $ cat /etc/fstab\n\n# /etc/fstab\n# Created by anaconda on Thu Mar 19 11:32:59 2020\n#\n# Accessible filesystems, by reference, are maintained under '/dev/disk'\n# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info\n#\n/dev/mapper/centos-root /                       xfs     defaults        0 0\nUUID=3ed01f0e-67a1-4083-943a-343b7fed1708 /boot                   xfs     defaults        0 0\n/dev/mapper/centos-swap swap                    swap    defaults        0 0\n

                                                          \u4ee5\u56fe\u793a\u8282\u70b9\u8bbe\u5907\u4e3a\u4f8b\uff0c\u53ef\u4ee5\u770b\u5230 XFS \u683c\u5f0f\u8bbe\u5907 /dev/mapper/centos-root \u4ee5\u9ed8\u8ba4\u65b9\u5f0f defauls \u6302\u8f7d\u5230 / \u6839\u76ee\u5f55.

                                                        2. \u914d\u7f6e xfs \u6587\u4ef6\u7cfb\u7edf\u4f7f\u7528 pquota \u65b9\u5f0f\u6302\u8f7d\u3002

                                                          1. \u4fee\u6539 fstab \u6587\u4ef6\uff0c\u5c06\u6302\u8f7d\u65b9\u5f0f\u4ece defaults \u66f4\u65b0\u4e3a rw,pquota\uff1b

                                                            # \u4fee\u6539 fstab \u914d\u7f6e\n$ vi /etc/fstab\n- /dev/mapper/centos-root /                       xfs     defaults         0 0\n+ /dev/mapper/centos-root /                       xfs     rw,pquota        0 0\n\n# \u9a8c\u8bc1\u914d\u7f6e\u662f\u5426\u6709\u8bef\n$ mount -a\n
                                                          2. \u67e5\u770b pquota \u662f\u5426\u751f\u6548

                                                            xfs_quota -x -c print\n

                                                          Note

                                                          \u5982\u679c pquota \u672a\u751f\u6548\uff0c\u8bf7\u68c0\u67e5\u64cd\u4f5c\u7cfb\u7edf\u662f\u5426\u5f00\u542f pquota \u9009\u9879\uff0c\u5982\u679c\u672a\u5f00\u542f\uff0c\u9700\u8981\u5728\u7cfb\u7edf\u5f15\u5bfc\u914d\u7f6e /etc/grub2.cfg \u4e2d\u6dfb\u52a0 rootflags=pquota \u53c2\u6570\uff0c\u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u9700\u8981 reboot \u91cd\u542f\u64cd\u4f5c\u7cfb\u7edf\u3002

                                                        3. \u521b\u5efa\u96c6\u7fa4 -> \u9ad8\u7ea7\u914d\u7f6e -> \u81ea\u5b9a\u4e49\u53c2\u6570 \u4e2d\u6dfb\u52a0 docker_storage_options \u53c2\u6570\uff0c\u8bbe\u7f6e\u5355\u5bb9\u5668\u78c1\u76d8\u53ef\u5360\u7528\u7a7a\u95f4\u3002

                                                          Note

                                                          \u4e5f\u53ef\u4ee5\u57fa\u4e8e kubean manifest \u64cd\u4f5c\uff0c\u5728 vars conf \u91cc\u6dfb\u52a0 docker_storage_options \u53c2\u6570\u3002

                                                          apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: sample-vars-conf\n  namespace: kubean-system\ndata:\n  group_vars.yml: |\n    unsafe_show_logs: true\n    container_manager: docker\n+   docker_storage_options: -s overlay2 --storage-opt overlay2.size=1G  # \u65b0\u589e docker_storage_options \u53c2\u6570\n    kube_network_plugin: calico\n    kube_network_plugin_multus: false\n    kube_proxy_mode: iptables\n    etcd_deployment_type: kubeadm\n    override_system_hostname: true\n    ...\n
                                                        4. \u67e5\u770b dockerd \u670d\u52a1\u8fd0\u884c\u914d\u7f6e\uff0c\u68c0\u67e5\u78c1\u76d8\u9650\u5236\u662f\u5426\u8bbe\u7f6e\u6210\u529f\u3002

                                                        \u4ee5\u4e0a\uff0c\u5b8c\u6210\u9650\u5236 docker \u5355\u5bb9\u5668\u53ef\u5360\u7528\u7684\u78c1\u76d8\u7a7a\u95f4\u6574\u4f53\u5b9e\u73b0\u6d41\u7a0b\u3002

                                                        "},{"location":"admin/kpanda/best-practice/multi-arch.html","title":"\u4e3a\u5de5\u4f5c\u96c6\u7fa4\u6dfb\u52a0\u5f02\u6784\u8282\u70b9","text":"

                                                        \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u4e3a AMD \u67b6\u6784\uff0c\u64cd\u4f5c\u7cfb\u7edf\u4e3a CentOS 7.9 \u7684\u5de5\u4f5c\u96c6\u7fa4\u6dfb\u52a0 ARM \u67b6\u6784\uff0c\u64cd\u4f5c\u7cfb\u7edf\u4e3a Kylin v10 sp2 \u7684\u5de5\u4f5c\u8282\u70b9

                                                        Note

                                                        \u672c\u6587\u4ec5\u9488\u5bf9\u79bb\u7ebf\u6a21\u5f0f\u4e0b\uff0c\u4f7f\u7528 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u6240\u521b\u5efa\u7684\u5de5\u4f5c\u96c6\u7fa4\u8fdb\u884c\u5f02\u6784\u8282\u70b9\u7684\u6dfb\u52a0\uff0c\u4e0d\u5305\u62ec\u63a5\u5165\u7684\u96c6\u7fa4\u3002

                                                        "},{"location":"admin/kpanda/best-practice/multi-arch.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                        • \u5df2\u7ecf\u90e8\u7f72\u597d\u4e00\u4e2a AI \u7b97\u529b\u4e2d\u5fc3\u5168\u6a21\u5f0f\uff0c\u5e76\u4e14\u706b\u79cd\u8282\u70b9\u8fd8\u5b58\u6d3b\uff0c\u90e8\u7f72\u53c2\u8003\u6587\u6863\u79bb\u7ebf\u5b89\u88c5 AI \u7b97\u529b\u4e2d\u5fc3\u5546\u4e1a\u7248
                                                        • \u5df2\u7ecf\u901a\u8fc7 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u521b\u5efa\u597d\u4e00\u4e2a AMD \u67b6\u6784\uff0c\u64cd\u4f5c\u7cfb\u7edf\u4e3a CentOS 7.9 \u7684\u5de5\u4f5c\u96c6\u7fa4\uff0c\u521b\u5efa\u53c2\u8003\u6587\u6863\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4
                                                        "},{"location":"admin/kpanda/best-practice/multi-arch.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"admin/kpanda/best-practice/multi-arch.html#_4","title":"\u4e0b\u8f7d\u5e76\u5bfc\u5165\u79bb\u7ebf\u5305","text":"

                                                        \u4ee5 ARM \u67b6\u6784\u3001\u64cd\u4f5c\u7cfb\u7edf Kylin v10 sp2 \u4e3a\u4f8b\u3002

                                                        \u8bf7\u786e\u4fdd\u5df2\u7ecf\u767b\u5f55\u5230\u706b\u79cd\u8282\u70b9\uff01\u5e76\u4e14\u4e4b\u524d\u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3\u65f6\u4f7f\u7528\u7684 clusterConfig.yaml \u6587\u4ef6\u8fd8\u5728\u3002

                                                        "},{"location":"admin/kpanda/best-practice/multi-arch.html#_5","title":"\u79bb\u7ebf\u955c\u50cf\u5305","text":"

                                                        Note

                                                        \u53ef\u4ee5\u5728\u4e0b\u8f7d\u4e2d\u5fc3\u4e0b\u8f7d\u6700\u65b0\u7248\u672c\u3002\u8bf7\u786e\u4fdd\u5728\u5bb9\u5668\u7ba1\u7406 v0.31 \u53ca\u4ee5\u4e0a\u7248\u672c\u4f7f\u7528\u8be5\u80fd\u529b\uff0c\u5bf9\u5e94\u5b89\u88c5\u5668 v0.21.0 \u53ca\u4ee5\u4e0a\u7248\u672c

                                                        CPU \u67b6\u6784 \u7248\u672c \u4e0b\u8f7d\u5730\u5740 AMD64 v0.21.0 https://qiniu-download-public.daocloud.io/DaoCloud_Enterprise/dce5/offline-v0.21.0-amd64.tar ARM64 v0.21.0 https://qiniu-download-public.daocloud.io/DaoCloud_Enterprise/dce5/offline-v0.21.0-arm64.tar

                                                        \u4e0b\u8f7d\u5b8c\u6bd5\u540e\u89e3\u538b\u79bb\u7ebf\u5305\u3002\u6b64\u5904\u6211\u4eec\u4e0b\u8f7d arm64 \u67b6\u6784\u7684\u79bb\u7ebf\u5305\uff1a

                                                        tar -xvf offline-v0.21.0-arm64.tar\n
                                                        "},{"location":"admin/kpanda/best-practice/multi-arch.html#iso-kylin-v10-sp2","title":"ISO \u79bb\u7ebf\u5305\uff08Kylin v10 sp2\uff09","text":"CPU \u67b6\u6784 \u64cd\u4f5c\u7cfb\u7edf\u7248\u672c \u4e0b\u8f7d\u5730\u5740 ARM64 Kylin Linux Advanced Server release V10 (Sword) SP2 \u7533\u8bf7\u5730\u5740\uff1ahttps://www.kylinos.cn/support/trial.html

                                                        Note

                                                        \u9e92\u9e9f\u64cd\u4f5c\u7cfb\u7edf\u9700\u8981\u63d0\u4f9b\u4e2a\u4eba\u4fe1\u606f\u624d\u80fd\u4e0b\u8f7d\u4f7f\u7528\uff0c\u4e0b\u8f7d\u65f6\u8bf7\u9009\u62e9 V10 (Sword) SP2\u3002

                                                        "},{"location":"admin/kpanda/best-practice/multi-arch.html#ospackage-kylin-v10-sp2","title":"osPackage \u79bb\u7ebf\u5305 \uff08Kylin v10 sp2\uff09","text":"

                                                        \u5176\u4e2d Kubean \u63d0\u4f9b\u4e86\u4e0d\u540c\u64cd\u4f5c\u7cfb\u7edf\u7684osPackage \u79bb\u7ebf\u5305\uff0c\u53ef\u4ee5\u524d\u5f80 https://github.com/kubean-io/kubean/releases \u67e5\u770b\u3002

                                                        \u64cd\u4f5c\u7cfb\u7edf\u7248\u672c \u4e0b\u8f7d\u5730\u5740 Kylin Linux Advanced Server release V10 (Sword) SP2 https://github.com/kubean-io/kubean/releases/download/v0.18.5/os-pkgs-kylin-v10sp2-v0.18.5.tar.gz

                                                        Note

                                                        osPackage \u79bb\u7ebf\u5305\u7684\u5177\u4f53\u5bf9\u5e94\u7248\u672c\u8bf7\u67e5\u770b\u79bb\u7ebf\u955c\u50cf\u5305\u4e2d offline/sample/clusterConfig.yaml \u4e2d\u5bf9\u5e94\u7684 kubean \u7248\u672c

                                                        "},{"location":"admin/kpanda/best-practice/multi-arch.html#_6","title":"\u5bfc\u5165\u79bb\u7ebf\u5305\u81f3\u706b\u79cd\u8282\u70b9","text":"

                                                        \u6267\u884c import-artifact \u547d\u4ee4\uff1a

                                                        ./offline/dce5-installer import-artifact -c clusterConfig.yaml \\\n    --offline-path=/root/offline \\\n    --iso-path=/root/Kylin-Server-10-SP2-aarch64-Release-Build09-20210524.iso \\\n    --os-pkgs-path=/root/os-pkgs-kylin-v10sp2-v0.18.5.tar.gz\n

                                                        Note

                                                        \u53c2\u6570\u8bf4\u660e\uff1a

                                                        • -c clusterConfig.yaml \u6307\u5b9a\u4e4b\u524d\u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3.0 \u65f6\u4f7f\u7528\u7684 clusterConfig.yaml \u6587\u4ef6
                                                        • --offline-path \u6307\u5b9a\u4e0b\u8f7d\u7684\u79bb\u7ebf\u955c\u50cf\u5305\u6587\u4ef6\u5730\u5740
                                                        • --iso-path \u6307\u5b9a\u4e0b\u8f7d\u7684 ISO \u64cd\u4f5c\u7cfb\u7edf\u955c\u50cf\u6587\u4ef6\u5730\u5740
                                                        • --os-pkgs-path \u6307\u5b9a\u4e0b\u8f7d\u7684 osPackage \u79bb\u7ebf\u5305\u6587\u4ef6\u5730\u5740

                                                        \u5bfc\u5165\u547d\u4ee4\u6267\u884c\u6210\u529f\u540e\uff0c\u4f1a\u5c06\u79bb\u7ebf\u5305\u4e0a\u4f20\u5230\u706b\u79cd\u8282\u70b9\u7684 Minio \u4e2d\u3002

                                                        "},{"location":"admin/kpanda/best-practice/multi-arch.html#_7","title":"\u6dfb\u52a0\u5f02\u6784\u5de5\u4f5c\u8282\u70b9","text":"

                                                        \u8bf7\u786e\u4fdd\u5df2\u7ecf\u767b\u5f55\u5230 AI \u7b97\u529b\u4e2d\u5fc3\u7ba1\u7406\u96c6\u7fa4\u7684\u7ba1\u7406\u8282\u70b9\u4e0a\u3002

                                                        "},{"location":"admin/kpanda/best-practice/multi-arch.html#_8","title":"\u4fee\u6539\u4e3b\u673a\u6e05\u5355\u6587\u4ef6","text":"

                                                        \u4e3b\u673a\u6e05\u5355\u6587\u4ef6\u793a\u4f8b\uff1a

                                                        \u65b0\u589e\u8282\u70b9\u524d\u65b0\u589e\u8282\u70b9\u540e
                                                        apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: ${cluster-name}-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      children:\n        etcd:\n          hosts:\n            centos-master:\n        k8s_cluster:\n          children:\n            kube_control_plane:\n            kube_node:\n        kube_control_plane:\n          hosts:\n            centos-master:\n        kube_node:\n          hosts:\n            centos-master:\n    hosts:\n      centos-master:\n        ip: 10.5.10.183\n        access_ip: 10.5.10.183\n        ansible_host: 10.5.10.183\n        ansible_connection: ssh\n        ansible_user: root\n        ansible_ssh_pass: ******\n        ansible_password: ******\n        ansible_become_password: ******\n
                                                        apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: ${cluster-name}-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      hosts:\n        centos-master:\n          ip: 10.5.10.183\n          access_ip: 10.5.10.183\n          ansible_host: 10.5.10.183\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_ssh_pass: ******\n          ansible_password: ******\n          ansible_become_password: ******\n          # \u6dfb\u52a0\u5f02\u6784\u8282\u70b9\u4fe1\u606f\n        kylin-worker:\n          ip: 10.5.10.181\n          access_ip: 10.5.10.181\n          ansible_host: 10.5.10.181\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_ssh_pass: ******\n          ansible_password: ******\n          ansible_become_password: ******\n        children:\n          kube_control_plane:\n            hosts:\n              - centos-master\n          kube_node:\n            hosts:\n              - centos-master\n              - kylin-worker  # \u6dfb\u52a0\u65b0\u589e\u7684\u5f02\u6784\u8282\u70b9\u540d\u79f0\n          etcd:\n            hosts:\n              - centos-master\n          k8s_cluster:\n            children:\n              - kube_control_plane\n              - kube_node\n

                                                        \u6309\u7167\u4e0a\u8ff0\u7684\u914d\u7f6e\u6ce8\u91ca\uff0c\u6dfb\u52a0\u65b0\u589e\u7684\u5de5\u4f5c\u8282\u70b9\u4fe1\u606f\u3002

                                                        kubectl edit cm ${cluster-name}-hosts-conf -n kubean-system\n

                                                        cluster-name \u4e3a\u5de5\u4f5c\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u521b\u5efa\u96c6\u7fa4\u65f6\u4f1a\u9ed8\u8ba4\u751f\u6210\u3002

                                                        "},{"location":"admin/kpanda/best-practice/multi-arch.html#clusteroperationyml","title":"\u901a\u8fc7 ClusterOperation.yml \u65b0\u589e\u6269\u5bb9\u4efb\u52a1","text":"

                                                        \u793a\u4f8b\uff1a

                                                        ClusterOperation.yml
                                                        apiVersion: kubean.io/v1alpha1\nkind: ClusterOperation\nmetadata:\n  name: add-worker-node\nspec:\n  cluster: ${cluster-name} # \u6307\u5b9a cluster name\n  image: 10.5.14.30/ghcr.m.daocloud.io/kubean-io/spray-job:v0.18.5\n  actionType: playbook\n  action: scale.yml\n  extraArgs: --limit=kylin-worker\n  preHook:\n    - actionType: playbook\n      action: ping.yml\n    - actionType: playbook\n      action: disable-firewalld.yml\n    - actionType: playbook\n      action: enable-repo.yml\n      extraArgs: |\n        -e \"{repo_list: [\"http://10.5.14.30:9000/kubean/kylin-iso/\\$releasever/sp2/os/\\$basearch\",\"http://10.5.14.30:9000/kubean/kylin/\\$releasever/sp2/os/\\$basearch\"]}\" --limit=kylin-worker\n  postHook:\n    - actionType: playbook\n      action: cluster-info.yml\n

                                                        Note

                                                        • spec.image \u955c\u50cf\u5730\u5740\u8981\u4e0e\u4e4b\u524d\u6267\u884c\u90e8\u7f72\u65f6\u7684 job \u5176\u5185\u955c\u50cf\u4fdd\u6301\u4e00\u81f4
                                                        • spec.action \u8bbe\u7f6e\u4e3a scale.yml
                                                        • spec.extraArgs \u8bbe\u7f6e\u4e3a --limit=g-worker
                                                        • spec.preHook \u4e2d\u7684 enable-repo.yml \u5267\u672c\u53c2\u6570\uff0c\u8981\u586b\u5199\u76f8\u5173OS\u7684\u6b63\u786e\u7684 repo_list

                                                        \u6309\u7167\u4e0a\u8ff0\u7684\u914d\u7f6e\uff0c\u521b\u5efa\u5e76\u90e8\u7f72 join-node-ops.yaml\uff1a

                                                        vi join-node-ops.yaml\nkubectl apply -f join-node-ops.yaml -n kubean-system\n
                                                        "},{"location":"admin/kpanda/best-practice/multi-arch.html#_9","title":"\u68c0\u67e5\u4efb\u52a1\u6267\u884c\u72b6\u6001","text":"
                                                        kubectl -n kubean-system get pod | grep add-worker-node\n

                                                        \u4e86\u89e3\u7f29\u5bb9\u4efb\u52a1\u6267\u884c\u8fdb\u5ea6\uff0c\u53ef\u67e5\u770b\u8be5 Pod \u65e5\u5fd7\u3002

                                                        "},{"location":"admin/kpanda/best-practice/multi-arch.html#_10","title":"\u524d\u5f80\u754c\u9762\u9a8c\u8bc1","text":"
                                                        1. \u524d\u5f80 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4 -> \u8282\u70b9\u7ba1\u7406

                                                        2. \u70b9\u51fb\u65b0\u589e\u7684\u8282\u70b9\uff0c\u67e5\u770b\u8be6\u60c5

                                                        "},{"location":"admin/kpanda/best-practice/replace-first-master-node.html","title":"\u66ff\u6362\u5de5\u4f5c\u96c6\u7fa4\u7684\u9996\u4e2a\u63a7\u5236\u8282\u70b9","text":"

                                                        \u672c\u6587\u5c06\u4ee5\u4e00\u4e2a\u9ad8\u53ef\u7528\u4e09\u63a7\u5236\u8282\u70b9\u7684\u5de5\u4f5c\u96c6\u7fa4\u4e3a\u4f8b\u3002 \u5f53\u5de5\u4f5c\u96c6\u7fa4\u7684\u9996\u4e2a\u63a7\u5236\u8282\u70b9\u6545\u969c\u6216\u5f02\u5e38\u65f6\uff0c\u5982\u4f55\u66ff\u6362\u6216\u91cd\u65b0\u63a5\u5165\u9996\u4e2a\u63a7\u5236\u8282\u70b9\u3002

                                                        \u672c\u6587\u7684\u9ad8\u53ef\u7528\u96c6\u7fa4\u6709 3 \u4e2a Master \u8282\u70b9\uff1a

                                                        • node1 (172.30.41.161)
                                                        • node2 (172.30.41.162)
                                                        • node3 (172.30.41.163)

                                                        \u5047\u8bbe node1 \u5b95\u673a\uff0c\u63a5\u4e0b\u6765\u4ecb\u7ecd\u5982\u4f55\u5c06\u5b95\u673a\u540e\u6062\u590d\u7684 node1 \u91cd\u65b0\u63a5\u5165\u5de5\u4f5c\u96c6\u7fa4\u3002

                                                        "},{"location":"admin/kpanda/best-practice/replace-first-master-node.html#_2","title":"\u51c6\u5907\u5de5\u4f5c","text":"

                                                        \u5728\u6267\u884c\u66ff\u6362\u64cd\u4f5c\u4e4b\u524d\uff0c\u5148\u83b7\u53d6\u96c6\u7fa4\u8d44\u6e90\u57fa\u672c\u4fe1\u606f\uff0c\u4fee\u6539\u76f8\u5173\u914d\u7f6e\u65f6\u4f1a\u7528\u5230\u3002

                                                        Note

                                                        \u4ee5\u4e0b\u83b7\u53d6\u96c6\u7fa4\u8d44\u6e90\u4fe1\u606f\u7684\u547d\u4ee4\u5747\u5728\u7ba1\u7406\u96c6\u7fa4\u4e2d\u6267\u884c\u3002

                                                        1. \u83b7\u53d6\u96c6\u7fa4\u540d\u79f0

                                                          \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u627e\u5230\u96c6\u7fa4\u5bf9\u5e94\u7684 clusters.kubean.io \u8d44\u6e90\uff1a

                                                          # \u6bd4\u5982 clusters.kubean.io \u7684\u8d44\u6e90\u540d\u79f0\u4e3a cluster-mini-1\n# \u5219\u83b7\u53d6\u96c6\u7fa4\u7684\u540d\u79f0\nCLUSTER_NAME=$(kubectl get clusters.kubean.io cluster-mini-1 -o=jsonpath=\"{.metadata.name}{'\\n'}\")\n
                                                        2. \u83b7\u53d6\u96c6\u7fa4\u7684\u4e3b\u673a\u6e05\u5355 configmap

                                                          kubectl get clusters.kubean.io cluster-mini-1 -o=jsonpath=\"{.spec.hostsConfRef}{'\\n'}\"\n{\"name\":\"mini-1-hosts-conf\",\"namespace\":\"kubean-system\"}\n
                                                        3. \u83b7\u53d6\u96c6\u7fa4\u7684\u914d\u7f6e\u53c2\u6570 configmap

                                                          kubectl get clusters.kubean.io cluster-mini-1 -o=jsonpath=\"{.spec.varsConfRef}{'\\n'}\"\n{\"name\":\"mini-1-vars-conf\",\"namespace\":\"kubean-system\"}\n
                                                        "},{"location":"admin/kpanda/best-practice/replace-first-master-node.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                        1. \u8c03\u6574\u63a7\u5236\u5e73\u9762\u8282\u70b9\u987a\u5e8f

                                                          \u91cd\u7f6e node1 \u8282\u70b9\u4f7f\u5176\u6062\u590d\u5230\u5b89\u88c5\u96c6\u7fa4\u4e4b\u524d\u7684\u72b6\u6001\uff08\u6216\u4f7f\u7528\u65b0\u7684\u8282\u70b9\uff09\uff0c\u4fdd\u6301 node1 \u8282\u70b9\u7684\u7f51\u7edc\u8fde\u901a\u6027\u3002

                                                          \u8c03\u6574\u4e3b\u673a\u6e05\u5355\u4e2d node1 \u8282\u70b9\u5728 kube_control_plane \u3001kube_node\u3001etcd \u4e2d\u7684\u987a\u5e8f \uff08node1/node2/node3 -> node2/node3/node1\uff09\uff1a

                                                          function change_control_plane_order() {\n  cat << EOF | kubectl apply -f -\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: mini-1-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      hosts:\n        node1:\n          ip: \"172.30.41.161\"\n          access_ip: \"172.30.41.161\"\n          ansible_host: \"172.30.41.161\"\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: dangerous\n        node2:\n          ip: \"172.30.41.162\"\n          access_ip: \"172.30.41.162\"\n          ansible_host: \"172.30.41.162\"\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: dangerous\n        node3:\n          ip: \"172.30.41.163\"\n          access_ip: \"172.30.41.163\"\n          ansible_host: \"172.30.41.163\"\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: dangerous\n      children:\n        kube_control_plane:\n          hosts:\n            node2:\n            node3:\n            node1:\n        kube_node:\n          hosts:\n            node2:\n            node3:\n            node1:\n        etcd:\n          hosts:\n            node2:\n            node3:\n            node1:\n        k8s_cluster:\n          children:\n            kube_control_plane:\n            kube_node:\n        calico_rr:\n          hosts: {}\nEOF\n}\n\nchange_control_plane_order\n
                                                        2. \u79fb\u9664\u5f02\u5e38\u72b6\u6001\u7684\u9996\u4e2a master \u8282\u70b9

                                                          \u8c03\u6574\u4e3b\u673a\u6e05\u5355\u7684\u8282\u70b9\u987a\u5e8f\u540e\uff0c\u79fb\u9664 K8s \u63a7\u5236\u5e73\u9762\u5f02\u5e38\u72b6\u6001\u7684 node1\u3002

                                                          Note

                                                          \u5982\u679c node1 \u79bb\u7ebf\u6216\u6545\u969c\uff0c\u5219 extraArgs \u987b\u6dfb\u52a0\u4ee5\u4e0b\u914d\u7f6e\u9879\uff0cnode1 \u5728\u7ebf\u65f6\u4e0d\u9700\u8981\u6dfb\u52a0\u3002

                                                          reset_nodes=false # \u8df3\u8fc7\u91cd\u7f6e\u8282\u70b9\u64cd\u4f5c\nallow_ungraceful_removal=true # \u5141\u8bb8\u975e\u4f18\u96c5\u7684\u79fb\u9664\u64cd\u4f5c\n
                                                          # \u955c\u50cf spray-job \u8fd9\u91cc\u53ef\u4ee5\u91c7\u7528\u52a0\u901f\u5668\u5730\u5740\n\nSPRAY_IMG_ADDR=\"ghcr.m.daocloud.io/kubean-io/spray-job\"\nSPRAY_RLS_2_22_TAG=\"2.22-336b323\"\nKUBE_VERSION=\"v1.24.14\"\nCLUSTER_NAME=\"cluster-mini-1\"\nREMOVE_NODE_NAME=\"node1\"\n\ncat << EOF | kubectl apply -f -\n---\napiVersion: kubean.io/v1alpha1\nkind: ClusterOperation\nmetadata:\n  name: cluster-mini-1-remove-node-ops\nspec:\n  cluster: ${CLUSTER_NAME}\n  image: ${SPRAY_IMG_ADDR}:${SPRAY_RLS_2_22_TAG}\n  actionType: playbook\n  action: remove-node.yml\n  extraArgs: -e node=${REMOVE_NODE_NAME} -e reset_nodes=false -e allow_ungraceful_removal=true -e kube_version=${KUBE_VERSION}\n  postHook:\n    - actionType: playbook\n      action: cluster-info.yml\nEOF\n
                                                        3. \u624b\u52a8\u4fee\u6539\u96c6\u7fa4\u914d\u7f6e\uff0c\u7f16\u8f91\u66f4\u65b0 cluster-info

                                                          # \u7f16\u8f91 cluster-info\nkubectl -n kube-public edit cm cluster-info\n\n# 1. \u82e5 ca.crt \u8bc1\u4e66\u66f4\u65b0\uff0c\u5219\u9700\u8981\u66f4\u65b0 certificate-authority-data \u5b57\u6bb5\u7684\u5185\u5bb9\n# \u67e5\u770b ca \u8bc1\u4e66\u7684 base64 \u7f16\u7801\uff1a\ncat /etc/kubernetes/ssl/ca.crt | base64 | tr -d '\\n'\n\n# 2. \u9700\u6539 server \u5b57\u6bb5\u7684 IP \u5730\u5740\u4e3a\u65b0 first master IP, \u672c\u6587\u6863\u573a\u666f\u5c06\u4f7f\u7528 node2 \u7684 IP \u5730\u5740 172.30.41.162\n
                                                        4. \u624b\u52a8\u4fee\u6539\u96c6\u7fa4\u914d\u7f6e\uff0c\u7f16\u8f91\u66f4\u65b0 kubeadm-config

                                                          # \u7f16\u8f91 kubeadm-config\nkubectl -n kube-system edit cm kubeadm-config\n\n# \u4fee\u6539 controlPlaneEndpoint \u4e3a\u65b0 first master IP, \u672c\u6587\u6863\u573a\u666f\u5c06\u4f7f\u7528 node2 \u7684 IP \u5730\u5740 172.30.41.162\n
                                                        5. \u91cd\u65b0\u6269\u5bb9 master \u8282\u70b9\u5e76\u66f4\u65b0\u96c6\u7fa4

                                                          Note

                                                          • \u4f7f\u7528 --limit \u9650\u5236\u66f4\u65b0\u64cd\u4f5c\u4ec5\u4f5c\u7528\u4e8e etcd \u548c kube_control_plane \u8282\u70b9\u7ec4\u3002
                                                          • \u5982\u679c\u662f\u79bb\u7ebf\u73af\u5883\uff0cspec.preHook \u9700\u8981\u6dfb\u52a0 enable-repo.yml\uff0c\u5e76\u4e14 extraArgs \u53c2\u6570\u586b\u5199\u76f8\u5173 OS \u7684\u6b63\u786e repo_list\u3002
                                                          • \u6269\u5bb9\u5b8c\u6210\u540e\uff0cnode2 \u53d8\u66f4\u4e3a\u9996\u4e2a master
                                                          cat << EOF | kubectl apply -f -\n---\napiVersion: kubean.io/v1alpha1\nkind: ClusterOperation\nmetadata:\n  name: cluster-mini-1-update-cluster-ops\nspec:\n  cluster: ${CLUSTER_NAME}\n  image: ${SPRAY_IMG_ADDR}:${SPRAY_RLS_2_22_TAG}\n  actionType: playbook\n  action: cluster.yml\n  extraArgs: --limit=etcd,kube_control_plane -e kube_version=${KUBE_VERSION}\n  preHook:\n    - actionType: playbook\n      action: enable-repo.yml  # \u79bb\u7ebf\u73af\u5883\u4e0b\u9700\u8981\u6dfb\u52a0\u6b64 yaml\uff0c\u5e76\u4e14\u8bbe\u7f6e\u6b63\u786e\u7684 repo-list(\u5b89\u88c5\u64cd\u4f5c\u7cfb\u7edf\u8f6f\u4ef6\u5305)\uff0c\u4ee5\u4e0b\u53c2\u6570\u503c\u4ec5\u4f9b\u53c2\u8003\n      extraArgs: |\n        -e \"{repo_list: ['http://172.30.41.0:9000/kubean/centos/\\$releasever/os/\\$basearch','http://172.30.41.0:9000/kubean/centos-iso/\\$releasever/os/\\$basearch']}\"\n  postHook:\n    - actionType: playbook\n      action: cluster-info.yml\nEOF\n

                                                        \u81f3\u6b64\uff0c\u5b8c\u6210\u4e86\u9996\u4e2a Master \u8282\u70b9\u7684\u66ff\u6362\u3002

                                                        "},{"location":"admin/kpanda/best-practice/update-offline-cluster.html","title":"\u5de5\u4f5c\u96c6\u7fa4\u79bb\u7ebf\u90e8\u7f72/\u5347\u7ea7\u6307\u5357","text":"

                                                        Note

                                                        \u672c\u6587\u4ec5\u9488\u5bf9\u79bb\u7ebf\u6a21\u5f0f\u4e0b\uff0c\u4f7f\u7528 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u6240\u521b\u5efa\u7684\u5de5\u4f5c\u96c6\u7fa4\u7684 kubernetes \u7684\u7248\u672c\u8fdb\u884c\u90e8\u7f72\u6216\u5347\u7ea7\uff0c \u4e0d\u5305\u62ec\u5176\u5b83 kubeneters \u7ec4\u4ef6\u7684\u90e8\u7f72\u6216\u5347\u7ea7\u3002

                                                        \u672c\u6587\u9002\u7528\u4ee5\u4e0b\u79bb\u7ebf\u573a\u666f\uff1a

                                                        • \u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u64cd\u4f5c\u6307\u5357\uff0c\u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u6240\u521b\u5efa\u7684\u975e\u754c\u9762\u4e2d\u63a8\u8350\u7684 Kubernetes \u7248\u672c\u3002
                                                        • \u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u5236\u4f5c\u589e\u91cf\u79bb\u7ebf\u5305\u7684\u65b9\u5f0f\u5bf9\u4f7f\u7528 AI \u7b97\u529b\u4e2d\u5fc3\u5e73\u53f0\u6240\u521b\u5efa\u7684\u5de5\u4f5c\u96c6\u7fa4\u7684 kubernetes \u7684\u7248\u672c\u8fdb\u884c\u5347\u7ea7\u3002

                                                        \u6574\u4f53\u7684\u601d\u8def\u4e3a\uff1a

                                                        1. \u5728\u8054\u7f51\u8282\u70b9\u6784\u5efa\u79bb\u7ebf\u5305
                                                        2. \u5c06\u79bb\u7ebf\u5305\u5bfc\u5165\u706b\u79cd\u8282\u70b9
                                                        3. \u66f4\u65b0\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684 Kubernetes \u7248\u672c\u6e05\u5355
                                                        4. \u4f7f\u7528\u5e73\u53f0 UI \u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u6216\u5347\u7ea7\u5de5\u4f5c\u96c6\u7fa4\u7684 kubernetes \u7248\u672c

                                                        Note

                                                        \u76ee\u524d\u652f\u6301\u6784\u5efa\u7684\u79bb\u7ebf kubernetes \u7248\u672c\uff0c\u8bf7\u53c2\u8003 kubean \u652f\u6301\u7684 kubernetes \u7248\u672c\u5217\u8868\u3002

                                                        "},{"location":"admin/kpanda/best-practice/update-offline-cluster.html#_2","title":"\u5728\u8054\u7f51\u8282\u70b9\u6784\u5efa\u79bb\u7ebf\u5305","text":"

                                                        \u7531\u4e8e\u79bb\u7ebf\u73af\u5883\u65e0\u6cd5\u8054\u7f51\uff0c\u7528\u6237\u9700\u8981\u4e8b\u5148\u51c6\u5907\u4e00\u53f0\u80fd\u591f \u8054\u7f51\u7684\u8282\u70b9 \u6765\u8fdb\u884c\u589e\u91cf\u79bb\u7ebf\u5305\u7684\u6784\u5efa\uff0c\u5e76\u4e14\u5728\u8fd9\u4e2a\u8282\u70b9\u4e0a\u542f\u52a8 Docker \u6216\u8005 podman \u670d\u52a1\u3002 \u53c2\u9605\u5982\u4f55\u5b89\u88c5 Docker\uff1f

                                                        1. \u68c0\u67e5\u8054\u7f51\u8282\u70b9\u7684 Docker \u670d\u52a1\u8fd0\u884c\u72b6\u6001

                                                          ps aux|grep docker\n

                                                          \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                          root     12341  0.5  0.2 654372 26736 ?        Ssl  23:45   0:00 /usr/bin/docked\nroot     12351  0.2  0.1 625080 13740 ?        Ssl  23:45   0:00 docker-containerd --config /var/run/docker/containerd/containerd.toml\nroot     13024  0.0  0.0 112824   980 pts/0    S+   23:45   0:00 grep --color=auto docker\n
                                                        2. \u5728\u8054\u7f51\u8282\u70b9\u7684 /root \u76ee\u5f55\u4e0b\u521b\u5efa\u4e00\u4e2a\u540d\u4e3a manifest.yaml \u7684\u6587\u4ef6\uff0c\u547d\u4ee4\u5982\u4e0b\uff1a

                                                          vi manifest.yaml\n

                                                          manifest.yaml \u5185\u5bb9\u5982\u4e0b\uff1a

                                                          manifest.yaml
                                                          image_arch:\n- \"amd64\"\nkube_version: # \u586b\u5199\u5f85\u5347\u7ea7\u7684\u96c6\u7fa4\u7248\u672c\n- \"v1.28.0\"\n
                                                          • image_arch \u7528\u4e8e\u6307\u5b9a CPU \u7684\u67b6\u6784\u7c7b\u578b\uff0c\u53ef\u586b\u5165\u7684\u53c2\u6570\u4e3a amd64 \u548c arm64 \u3002
                                                          • kube_version \u7528\u4e8e\u6307\u5b9a\u9700\u8981\u6784\u5efa\u7684 kubernetes \u79bb\u7ebf\u5305\u7248\u672c\uff0c\u53ef\u53c2\u8003\u4e0a\u6587\u7684\u652f\u6301\u6784\u5efa\u7684\u79bb\u7ebf kubernetes \u7248\u672c\u3002
                                                        3. \u5728 /root \u76ee\u5f55\u4e0b\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a /data \u7684\u6587\u4ef6\u5939\u6765\u5b58\u50a8\u589e\u91cf\u79bb\u7ebf\u5305\u3002

                                                          mkdir data\n

                                                          \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u4f7f\u7528 kubean airgap-patch \u955c\u50cf\u751f\u6210\u79bb\u7ebf\u5305\u3002 airgap-patch \u955c\u50cf tag \u4e0e Kubean \u7248\u672c\u4e00\u81f4\uff0c\u9700\u786e\u4fdd Kubean \u7248\u672c\u8986\u76d6\u9700\u8981\u5347\u7ea7\u7684 Kubernetes \u7248\u672c\u3002

                                                          # \u5047\u8bbe kubean \u7248\u672c\u4e3a v0.13.9\ndocker run --rm -v $(pwd)/manifest.yml:/manifest.yml -v $(pwd)/data:/data ghcr.m.daocloud.io/kubean-io/airgap-patch:v0.13.9\n

                                                          \u7b49\u5f85 Docker \u670d\u52a1\u8fd0\u884c\u5b8c\u6210\u540e\uff0c\u68c0\u67e5 /data \u6587\u4ef6\u5939\u4e0b\u7684\u6587\u4ef6\uff0c\u6587\u4ef6\u76ee\u5f55\u5982\u4e0b\uff1a

                                                          data\n\u251c\u2500\u2500 amd64\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 files\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 import_files.sh\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 offline-files.tar.gz\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 images\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u251c\u2500\u2500 import_images.sh\n\u2502\u00a0\u00a0 \u2502\u00a0\u00a0 \u2514\u2500\u2500 offline-images.tar.gz\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 os-pkgs\n\u2502\u00a0\u00a0     \u2514\u2500\u2500 import_ospkgs.sh\n\u2514\u2500\u2500 localartifactset.cr.yaml\n
                                                        "},{"location":"admin/kpanda/best-practice/update-offline-cluster.html#_3","title":"\u5c06\u79bb\u7ebf\u5305\u5bfc\u5165\u706b\u79cd\u8282\u70b9","text":"
                                                        1. \u5c06\u8054\u7f51\u8282\u70b9\u7684 /data \u6587\u4ef6\u62f7\u8d1d\u81f3\u706b\u79cd\u8282\u70b9\u7684 /root \u76ee\u5f55\u4e0b\uff0c\u5728 \u8054\u7f51\u8282\u70b9 \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

                                                          scp -r data root@x.x.x.x:/root\n

                                                          x.x.x.x \u4e3a\u706b\u79cd\u8282\u70b9 IP \u5730\u5740

                                                        2. \u5728\u706b\u79cd\u8282\u70b9\u4e0a\u5c06 /data \u6587\u4ef6\u5185\u7684\u955c\u50cf\u6587\u4ef6\u62f7\u8d1d\u81f3\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 docker resgitry \u4ed3\u5e93\u3002\u767b\u5f55\u706b\u79cd\u8282\u70b9\u540e\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

                                                          1. \u8fdb\u5165\u955c\u50cf\u6587\u4ef6\u6240\u5728\u7684\u76ee\u5f55

                                                            cd data/amd64/images\n
                                                          2. \u6267\u884c import_images.sh \u811a\u672c\u5c06\u955c\u50cf\u5bfc\u5165\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Docker Resgitry \u4ed3\u5e93\u3002

                                                            REGISTRY_ADDR=\"127.0.0.1\"  ./import_images.sh\n

                                                          Note

                                                          \u4e0a\u8ff0\u547d\u4ee4\u4ec5\u4ec5\u9002\u7528\u4e8e\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Docker Resgitry \u4ed3\u5e93\uff0c\u5982\u679c\u4f7f\u7528\u5916\u90e8\u4ed3\u5e93\u8bf7\u4f7f\u7528\u5982\u4e0b\u547d\u4ee4\uff1a

                                                          REGISTRY_SCHEME=https REGISTRY_ADDR=${registry_address} REGISTRY_USER=${username} REGISTRY_PASS=${password} ./import_images.sh\n
                                                          • REGISTRY_ADDR \u662f\u955c\u50cf\u4ed3\u5e93\u7684\u5730\u5740\uff0c\u6bd4\u59821.2.3.4:5000
                                                          • \u5f53\u955c\u50cf\u4ed3\u5e93\u5b58\u5728\u7528\u6237\u540d\u5bc6\u7801\u9a8c\u8bc1\u65f6\uff0c\u9700\u8981\u8bbe\u7f6e REGISTRY_USER \u548c REGISTRY_PASS
                                                        3. \u5728\u706b\u79cd\u8282\u70b9\u4e0a\u5c06 /data \u6587\u4ef6\u5185\u7684\u4e8c\u8fdb\u5236\u6587\u4ef6\u62f7\u8d1d\u81f3\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Minio \u670d\u52a1\u4e0a\u3002

                                                          1. \u8fdb\u5165\u4e8c\u8fdb\u5236\u6587\u4ef6\u6240\u5728\u7684\u76ee\u5f55

                                                            cd data/amd64/files/\n
                                                          2. \u6267\u884c import_files.sh \u811a\u672c\u5c06\u4e8c\u8fdb\u5236\u6587\u4ef6\u5bfc\u5165\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Minio \u670d\u52a1\u4e0a\u3002

                                                            MINIO_USER=rootuser MINIO_PASS=rootpass123 ./import_files.sh http://127.0.0.1:9000\n

                                                        Note

                                                        \u4e0a\u8ff0\u547d\u4ee4\u4ec5\u4ec5\u9002\u7528\u4e8e\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Minio \u670d\u52a1\uff0c\u5982\u679c\u4f7f\u7528\u5916\u90e8 Minio \u8bf7\u5c06 http://127.0.0.1:9000 \u66ff\u6362\u4e3a\u5916\u90e8 Minio \u7684\u8bbf\u95ee\u5730\u5740\u3002 \u201crootuser\u201d \u548c \u201crootpass123\u201d\u662f\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Minio \u670d\u52a1\u7684\u9ed8\u8ba4\u8d26\u6237\u548c\u5bc6\u7801\u3002

                                                        "},{"location":"admin/kpanda/best-practice/update-offline-cluster.html#kubernetes","title":"\u66f4\u65b0\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684 kubernetes \u7248\u672c\u6e05\u5355","text":"

                                                        \u706b\u79cd\u8282\u70b9\u4e0a\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5c06 localartifactset \u8d44\u6e90\u90e8\u7f72\u5230\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\uff1a

                                                        kubectl apply -f data/kubeanofflineversion.cr.patch.yaml\n
                                                        "},{"location":"admin/kpanda/best-practice/update-offline-cluster.html#_4","title":"\u4e0b\u4e00\u6b65","text":"

                                                        \u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3\u7684 UI \u7ba1\u7406\u754c\u9762\uff0c\u60a8\u53ef\u4ee5\u7ee7\u7eed\u6267\u884c\u4ee5\u4e0b\u64cd\u4f5c\uff1a

                                                        1. \u53c2\u7167\u521b\u5efa\u96c6\u7fa4\u7684\u6587\u6863\u8fdb\u884c\u5de5\u4f5c\u96c6\u7fa4\u521b\u5efa\uff0c\u6b64\u65f6\u53ef\u4ee5\u9009\u62e9 Kubernetes \u589e\u91cf\u7248\u672c\u3002

                                                        2. \u53c2\u7167\u5347\u7ea7\u96c6\u7fa4\u7684\u6587\u6863\u5bf9\u81ea\u5efa\u7684\u5de5\u4f5c\u96c6\u7fa4\u8fdb\u884c\u5347\u7ea7\u3002

                                                        "},{"location":"admin/kpanda/best-practice/use-otherlinux-create-custer.html","title":"\u5728\u975e\u4e3b\u6d41\u64cd\u4f5c\u7cfb\u7edf\u4e0a\u521b\u5efa\u96c6\u7fa4","text":"

                                                        \u672c\u6587\u4ecb\u7ecd\u79bb\u7ebf\u6a21\u5f0f\u4e0b\u5982\u4f55\u5728 \u672a\u58f0\u660e\u652f\u6301\u7684 OS \u4e0a\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u3002AI \u7b97\u529b\u4e2d\u5fc3\u58f0\u660e\u652f\u6301\u7684 OS \u8303\u56f4\u8bf7\u53c2\u8003 AI \u7b97\u529b\u4e2d\u5fc3\u652f\u6301\u7684\u64cd\u4f5c\u7cfb\u7edf

                                                        \u79bb\u7ebf\u6a21\u5f0f\u4e0b\u5728\u672a\u58f0\u660e\u652f\u6301\u7684 OS \u4e0a\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\uff0c\u4e3b\u8981\u7684\u6d41\u7a0b\u5982\u4e0b\u56fe\uff1a

                                                        \u63a5\u4e0b\u6765\uff0c\u672c\u6587\u5c06\u4ee5 openAnolis \u64cd\u4f5c\u7cfb\u7edf\u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u5728\u975e\u4e3b\u6d41\u64cd\u4f5c\u7cfb\u7edf\u4e0a\u521b\u5efa\u96c6\u7fa4\u3002

                                                        "},{"location":"admin/kpanda/best-practice/use-otherlinux-create-custer.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                        • \u5df2\u7ecf\u90e8\u7f72\u597d\u4e00\u4e2a AI \u7b97\u529b\u4e2d\u5fc3\u5168\u6a21\u5f0f\uff0c\u90e8\u7f72\u53c2\u8003\u6587\u6863\u79bb\u7ebf\u5b89\u88c5 AI \u7b97\u529b\u4e2d\u5fc3\u5546\u4e1a\u7248
                                                        • \u81f3\u5c11\u62e5\u6709\u4e00\u53f0\u53ef\u4ee5\u8054\u7f51\u7684\u540c\u67b6\u6784\u540c\u7248\u672c\u7684\u8282\u70b9\u3002
                                                        "},{"location":"admin/kpanda/best-practice/use-otherlinux-create-custer.html#_3","title":"\u5728\u7ebf\u8282\u70b9\u6784\u5efa\u79bb\u7ebf\u5305","text":"

                                                        \u627e\u5230\u4e00\u4e2a\u548c\u5f85\u5efa\u96c6\u7fa4\u8282\u70b9\u67b6\u6784\u548c OS \u5747\u4e00\u81f4\u7684\u5728\u7ebf\u73af\u5883\uff0c\u672c\u6587\u4ee5 AnolisOS 8.8 GA \u4e3a\u4f8b\u3002\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u751f\u6210\u79bb\u7ebf os-pkgs \u5305\u3002

                                                        # \u4e0b\u8f7d\u76f8\u5173\u811a\u672c\u5e76\u6784\u5efa os packages \u5305\ncurl -Lo ./pkgs.yml https://raw.githubusercontent.com/kubean-io/kubean/main/build/os-packages/others/pkgs.yml\ncurl -Lo ./other_os_pkgs.sh https://raw.githubusercontent.com/kubean-io/kubean/main/build/os-packages/others/other_os_pkgs.sh && chmod +x  other_os_pkgs.sh\n./other_os_pkgs.sh build # \u6784\u5efa\u79bb\u7ebf\u5305\n

                                                        \u6267\u884c\u5b8c\u4e0a\u8ff0\u547d\u4ee4\u540e\uff0c\u9884\u671f\u5c06\u5728\u5f53\u524d\u8def\u5f84\u4e0b\u751f\u6210\u4e00\u4e2a\u540d\u4e3a os-pkgs-anolis-8.8.tar.gz \u7684\u538b\u7f29\u5305\u3002\u5f53\u524d\u8def\u5f84\u4e0b\u6587\u4ef6\u76ee\u5f55\u5927\u6982\u5982\u4e0b\uff1a

                                                            .\n    \u251c\u2500\u2500 other_os_pkgs.sh\n    \u251c\u2500\u2500 pkgs.yml\n    \u2514\u2500\u2500 os-pkgs-anolis-8.8.tar.gz\n
                                                        "},{"location":"admin/kpanda/best-practice/use-otherlinux-create-custer.html#_4","title":"\u79bb\u7ebf\u8282\u70b9\u5b89\u88c5\u79bb\u7ebf\u5305","text":"

                                                        \u5c06\u5728\u7ebf\u8282\u70b9\u4e2d\u751f\u6210\u7684 other_os_pkgs.sh \u3001 pkgs.yml \u3001 os-pkgs-anolis-8.8.tar.gz \u4e09\u4e2a\u6587\u4ef6\u62f7\u8d1d\u81f3\u79bb\u7ebf\u73af\u5883\u4e2d\u7684\u5f85\u5efa\u96c6\u7fa4\u7684**\u6240\u6709**\u8282\u70b9\u4e0a\u3002

                                                        \u767b\u5f55\u79bb\u7ebf\u73af\u5883\u4e2d\uff0c\u4efb\u4e00\u5f85\u5efa\u96c6\u7fa4\u7684\u5176\u4e2d\u4e00\u4e2a\u8282\u70b9\u4e0a\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u4e3a\u8282\u70b9\u5b89\u88c5 os-pkg \u5305\u3002

                                                        # \u914d\u7f6e\u73af\u5883\u53d8\u91cf\nexport PKGS_YML_PATH=/root/workspace/os-pkgs/pkgs.yml # \u5f53\u524d\u79bb\u7ebf\u8282\u70b9 pkgs.yml \u6587\u4ef6\u7684\u8def\u5f84\nexport PKGS_TAR_PATH=/root/workspace/os-pkgs/os-pkgs-anolis-8.8.tar.gz # \u5f53\u524d\u79bb\u7ebf\u8282\u70b9 os-pkgs-anolis-8.8.tar.gz \u7684\u8def\u5f84\nexport SSH_USER=root # \u5f53\u524d\u79bb\u7ebf\u8282\u70b9\u7684\u7528\u6237\u540d\nexport SSH_PASS=dangerous # \u5f53\u524d\u79bb\u7ebf\u8282\u70b9\u7684\u5bc6\u7801\nexport HOST_IPS='172.30.41.168' # \u5f53\u524d\u79bb\u7ebf\u8282\u70b9\u7684 IP\n./other_os_pkgs.sh install #\u5b89\u88c5\u79bb\u7ebf\u5305\n

                                                        \u6267\u884c\u5b8c\u6210\u4e0a\u8ff0\u547d\u4ee4\u540e\uff0c\u7b49\u5f85\u754c\u9762\u63d0\u793a\uff1a All packages for node (X.X.X.X) have been installed \u5373\u8868\u793a\u5b89\u88c5\u5b8c\u6210\u3002

                                                        "},{"location":"admin/kpanda/best-practice/use-otherlinux-create-custer.html#_5","title":"\u4e0b\u4e00\u6b65","text":"

                                                        \u53c2\u8003\u6587\u6863\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\uff0c\u5728 UI \u754c\u9762\u4e0a\u521b\u5efa openAnolis \u96c6\u7fa4\u3002

                                                        "},{"location":"admin/kpanda/best-practice/co-located/index.html","title":"\u5728\u79bb\u7ebf\u6df7\u90e8","text":"

                                                        \u4f01\u4e1a\u4e2d\u4e00\u822c\u5b58\u5728\u4e24\u79cd\u7c7b\u578b\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff1a\u5728\u7ebf\u670d\u52a1\uff08latency-sensitive service\uff09\u548c\u79bb\u7ebf\u4efb\u52a1\uff08batch job\uff09\u3002 \u5728\u7ebf\u670d\u52a1\u5982\u641c\u7d22/\u652f\u4ed8/\u63a8\u8350\u7b49\uff0c\u5177\u6709\u5904\u7406\u4f18\u5148\u7ea7\u9ad8\u3001\u65f6\u5ef6\u654f\u611f\u6027\u9ad8\u3001\u9519\u8bef\u5bb9\u5fcd\u5ea6\u4f4e\u4ee5\u53ca\u767d\u5929\u8d1f\u8f7d\u9ad8\u665a\u4e0a\u8d1f\u8f7d\u4f4e\u7b49\u7279\u70b9\u3002 \u800c\u79bb\u7ebf\u4efb\u52a1\u5982 AI \u8bad\u7ec3/\u5927\u6570\u636e\u5904\u7406\u7b49\uff0c\u5177\u6709\u5904\u7406\u4f18\u5148\u7ea7\u4f4e\u3001\u65f6\u5ef6\u654f\u611f\u6027\u4f4e\u3001\u9519\u8bef\u5bb9\u5fcd\u5ea6\u9ad8\u4ee5\u53ca\u8fd0\u884c\u65f6\u8d1f\u8f7d\u4e00\u76f4\u8f83\u9ad8\u7b49\u7279\u70b9\u3002 \u7531\u4e8e\u5728\u7ebf\u670d\u52a1\u4e0e\u79bb\u7ebf\u4efb\u52a1\u8fd9\u4e24\u7c7b\u5de5\u4f5c\u8d1f\u8f7d\u5929\u7136\u5b58\u5728\u4e92\u8865\u6027\uff0c\u5c06\u5728/\u79bb\u7ebf\u4e1a\u52a1\u6df7\u5408\u90e8\u7f72\u662f\u63d0\u9ad8\u670d\u52a1\u5668\u8d44\u6e90\u5229\u7528\u7387\u7684\u6709\u6548\u9014\u5f84\u3002

                                                        • \u53ef\u4ee5\u5c06\u79bb\u7ebf\u4e1a\u52a1\u6df7\u90e8\u5230\u5728\u7ebf\u4e1a\u52a1\u7684\u670d\u52a1\u5668\u4e0a\uff0c\u8ba9\u79bb\u7ebf\u4e1a\u52a1\u80fd\u591f\u5145\u5206\u5229\u7528\u5728\u7ebf\u4e1a\u52a1\u670d\u52a1\u5668\u7684\u7a7a\u95f2\u8d44\u6e90\uff0c\u63d0\u9ad8\u5728\u7ebf\u4e1a\u52a1\u670d\u52a1\u5668\u8d44\u6e90\u5229\u7528\u7387\uff0c\u5b9e\u73b0\u964d\u672c\u589e\u6548\u3002

                                                        • \u5f53\u4e1a\u52a1\u4e2d\u4e34\u65f6\u9700\u8981\u5927\u91cf\u7684\u8d44\u6e90\uff0c\u8fd9\u4e2a\u65f6\u5019\u53ef\u4ee5\u5c06\u5728\u7ebf\u4e1a\u52a1\u5f39\u6027\u6df7\u90e8\u5230\u79bb\u7ebf\u4e1a\u52a1\u7684\u670d\u52a1\u5668\u4e0a\uff0c\u4f18\u5148\u4fdd\u8bc1\u5728\u7ebf\u4e1a\u52a1\u7684\u8d44\u6e90\u9700\u6c42\uff0c\u4e34\u65f6\u9700\u6c42\u7ed3\u675f\u540e\u518d\u628a\u8d44\u6e90\u5f52\u8fd8\u7ed9\u79bb\u7ebf\u4e1a\u52a1\u3002

                                                        \u5f53\u524d\u4f7f\u7528\u5f00\u6e90\u9879\u76ee Koordinator \u4f5c\u4e3a\u5728\u79bb\u7ebf\u6df7\u90e8\u7684\u89e3\u51b3\u65b9\u6848\u3002

                                                        Koordinator \u662f\u4e00\u4e2a\u57fa\u4e8e QoS \u7684 Kubernetes \u6df7\u5408\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u7cfb\u7edf\u3002 \u5b83\u65e8\u5728\u63d0\u9ad8\u5bf9\u5ef6\u8fdf\u654f\u611f\u7684\u5de5\u4f5c\u8d1f\u8f7d\u548c\u6279\u5904\u7406\u4f5c\u4e1a\u7684\u8fd0\u884c\u65f6\u6548\u7387\u548c\u53ef\u9760\u6027\uff0c\u7b80\u5316\u4e0e\u8d44\u6e90\u76f8\u5173\u7684\u914d\u7f6e\u8c03\u6574\u7684\u590d\u6742\u6027\uff0c\u5e76\u589e\u52a0 Pod \u90e8\u7f72\u5bc6\u5ea6\u4ee5\u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u7387\u3002

                                                        "},{"location":"admin/kpanda/best-practice/co-located/index.html#koordinator-qos","title":"Koordinator QoS","text":"

                                                        Koordinator \u8c03\u5ea6\u7cfb\u7edf\u652f\u6301\u7684 QoS \u6709\u4e94\u79cd\u7c7b\u578b:

                                                        QoS \u7279\u70b9 \u8bf4\u660e SYSTEM \u7cfb\u7edf\u8fdb\u7a0b\uff0c\u8d44\u6e90\u53d7\u9650 \u5bf9\u4e8e DaemonSets \u7b49\u7cfb\u7edf\u670d\u52a1\uff0c\u867d\u7136\u9700\u8981\u4fdd\u8bc1\u7cfb\u7edf\u670d\u52a1\u7684\u5ef6\u8fdf\uff0c\u4f46\u4e5f\u9700\u8981\u9650\u5236\u8282\u70b9\u4e0a\u8fd9\u4e9b\u7cfb\u7edf\u670d\u52a1\u5bb9\u5668\u7684\u8d44\u6e90\u4f7f\u7528\uff0c\u4ee5\u786e\u4fdd\u5176\u4e0d\u5360\u7528\u8fc7\u591a\u7684\u8d44\u6e90 LSE(Latency Sensitive Exclusive) \u4fdd\u7559\u8d44\u6e90\u5e76\u7ec4\u7ec7\u540c QoS \u7684 Pod \u5171\u4eab\u8d44\u6e90 \u5f88\u5c11\u4f7f\u7528\uff0c\u5e38\u89c1\u4e8e\u4e2d\u95f4\u4ef6\u7c7b\u5e94\u7528\uff0c\u4e00\u822c\u5728\u72ec\u7acb\u7684\u8d44\u6e90\u6c60\u4e2d\u4f7f\u7528 LSR(Latency Sensitive Reserved) \u9884\u7559\u8d44\u6e90\u4ee5\u83b7\u5f97\u66f4\u597d\u7684\u786e\u5b9a\u6027 \u7c7b\u4f3c\u4e8e\u793e\u533a\u7684 Guaranteed\uff0cCPU \u6838\u88ab\u7ed1\u5b9a LS(Latency Sensitive) \u5171\u4eab\u8d44\u6e90\uff0c\u5bf9\u7a81\u53d1\u6d41\u91cf\u6709\u66f4\u597d\u7684\u5f39\u6027 \u5fae\u670d\u52a1\u5de5\u4f5c\u8d1f\u8f7d\u7684\u5178\u578bQoS\u7ea7\u522b\uff0c\u5b9e\u73b0\u66f4\u597d\u7684\u8d44\u6e90\u5f39\u6027\u548c\u66f4\u7075\u6d3b\u7684\u8d44\u6e90\u8c03\u6574\u80fd\u529b BE(Best Effort) \u5171\u4eab\u4e0d\u5305\u62ec LSE \u7684\u8d44\u6e90\uff0c\u8d44\u6e90\u8fd0\u884c\u8d28\u91cf\u6709\u9650\uff0c\u751a\u81f3\u5728\u6781\u7aef\u60c5\u51b5\u4e0b\u88ab\u6740\u6b7b \u6279\u91cf\u4f5c\u4e1a\u7684\u5178\u578b QoS \u6c34\u5e73\uff0c\u5728\u4e00\u5b9a\u65f6\u671f\u5185\u7a33\u5b9a\u7684\u8ba1\u7b97\u541e\u5410\u91cf\uff0c\u4f4e\u6210\u672c\u8d44\u6e90"},{"location":"admin/kpanda/best-practice/co-located/index.html#koordinator-qos-cpu","title":"Koordinator QoS CPU \u7f16\u6392\u539f\u5219","text":"
                                                        • LSE/LSR Pod \u7684 Request \u548c Limit \u5fc5\u987b\u76f8\u7b49\uff0cCPU \u503c\u5fc5\u987b\u662f 1000 \u7684\u6574\u6570\u500d\u3002
                                                        • LSE Pod \u5206\u914d\u7684 CPU \u662f\u5b8c\u5168\u72ec\u5360\u7684\uff0c\u4e0d\u5f97\u5171\u4eab\u3002\u5982\u679c\u8282\u70b9\u662f\u8d85\u7ebf\u7a0b\u67b6\u6784\uff0c\u53ea\u4fdd\u8bc1\u903b\u8f91\u6838\u5fc3\u7ef4\u5ea6\u662f\u9694\u79bb\u7684\uff0c\u4f46\u662f\u53ef\u4ee5\u901a\u8fc7 CPUBindPolicyFullPCPUs \u7b56\u7565\u83b7\u5f97\u66f4\u597d\u7684\u9694\u79bb\u3002
                                                        • LSR Pod \u5206\u914d\u7684 CPU \u53ea\u80fd\u4e0e BE Pod \u5171\u4eab\u3002
                                                        • LS Pod \u7ed1\u5b9a\u4e86\u4e0e LSE/LSR Pod \u72ec\u5360\u4e4b\u5916\u7684\u5171\u4eab CPU \u6c60\u3002
                                                        • BE Pod \u7ed1\u5b9a\u4f7f\u7528\u8282\u70b9\u4e2d\u9664 LSE Pod \u72ec\u5360\u4e4b\u5916\u7684\u6240\u6709 CPU \u3002
                                                        • \u5982\u679c kubelet \u7684 CPU \u7ba1\u7406\u5668\u7b56\u7565\u4e3a static \u7b56\u7565\uff0c\u5219\u5df2\u7ecf\u8fd0\u884c\u7684 K8s Guaranteed Pods \u7b49\u4ef7\u4e8e Koordinator LSR\u3002
                                                        • \u5982\u679c kubelet \u7684 CPU \u7ba1\u7406\u5668\u7b56\u7565\u4e3a none \u7b56\u7565\uff0c\u5219\u5df2\u7ecf\u8fd0\u884c\u7684 K8s Guaranteed Pods \u7b49\u4ef7\u4e8e Koordinator LS\u3002
                                                        • \u65b0\u521b\u5efa\u4f46\u672a\u6307\u5b9a Koordinator QoS \u7684 K8s Guaranteed Pod \u7b49\u4ef7\u4e8e Koordinator LS\u3002
                                                        "},{"location":"admin/kpanda/best-practice/co-located/index.html#_2","title":"\u5feb\u901f\u4e0a\u624b","text":""},{"location":"admin/kpanda/best-practice/co-located/index.html#_3","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                        • \u5df2\u7ecf\u90e8\u7f72 AI \u7b97\u529b\u4e2d\u5fc3\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u4e14\u5e73\u53f0\u8fd0\u884c\u6b63\u5e38\u3002
                                                        • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                                                        • \u5f53\u524d\u96c6\u7fa4\u5df2\u5b89\u88c5 koordinator \u5e76\u6b63\u5e38\u8fd0\u884c\uff0c\u5b89\u88c5\u6b65\u9aa4\u53ef\u53c2\u8003 koordinator \u79bb\u7ebf\u5b89\u88c5\u3002
                                                        "},{"location":"admin/kpanda/best-practice/co-located/index.html#_4","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                        \u4ee5\u4e0b\u793a\u4f8b\u4e2d\u521b\u5efa4\u4e2a\u526f\u672c\u6570\u4e3a1\u7684 deployment, \u8bbe\u7f6e QoS \u7c7b\u522b\u4e3a LSE, LSR, LS, BE, \u5f85 pod \u521b\u5efa\u5b8c\u6210\u540e\uff0c\u89c2\u5bdf\u5404 pod \u7684 CPU \u5206\u914d\u60c5\u51b5\u3002

                                                        1. \u521b\u5efa\u540d\u79f0\u4e3a nginx-lse \u7684 deployment\uff0cQoS \u7c7b\u522b\u4e3a LSE, yaml \u6587\u4ef6\u5982\u4e0b\u3002

                                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nginx-lse\n  labels:\n    app: nginx-lse\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: nginx-lse\n  template:\n    metadata:\n      name: nginx-lse\n      labels:\n        app: nginx-lse\n        koordinator.sh/qosClass: LSE # \u8bbe\u7f6e QoS \u7c7b\u522b\u4e3a LSE\n        # \u8c03\u5ea6\u5668\u5c06\u5728\u7269\u7406\u5185\u6838\u4e4b\u95f4\u5747\u5300\u7684\u5206\u914d\u903b\u8f91 CPU\n      annotations:\n          scheduling.koordinator.sh/resource-spec: '{\"preferredCPUBindPolicy\": \"SpreadByPCPUs\"}'\n    spec:\n      schedulerName: koord-scheduler # \u4f7f\u7528 koord-scheduler \u8c03\u5ea6\u5668\n      containers:\n      - name: nginx\n        image: release.daocloud.io/kpanda/nginx:1.25.3-alpine\n        resources:\n          limits:\n            cpu: '2'\n          requests:\n            cpu: '2'\n      priorityClassName: koord-prod\n
                                                        2. \u521b\u5efa\u540d\u79f0\u4e3a nginx-lsr \u7684 deployment\uff0cQoS \u7c7b\u522b\u4e3a LSR, yaml \u6587\u4ef6\u5982\u4e0b\u3002

                                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nginx-lsr\n  labels:\n    app: nginx-lsr\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: nginx-lsr\n  template:\n    metadata:\n      name: nginx-lsr\n      labels:\n        app: nginx-lsr\n        koordinator.sh/qosClass: LSR # \u8bbe\u7f6e QoS \u7c7b\u522b\u4e3a LSR\n        # \u8c03\u5ea6\u5668\u5c06\u5728\u7269\u7406\u5185\u6838\u4e4b\u95f4\u5747\u5300\u7684\u5206\u914d\u903b\u8f91 CPU\n      annotations:\n          scheduling.koordinator.sh/resource-spec: '{\"preferredCPUBindPolicy\": \"SpreadByPCPUs\"}'\n    spec:\n      schedulerName: koord-scheduler # \u4f7f\u7528 koord-scheduler \u8c03\u5ea6\u5668\n      containers:\n      - name: nginx\n        image: release.daocloud.io/kpanda/nginx:1.25.3-alpine\n        resources:\n          limits:\n            cpu: '2'\n          requests:\n            cpu: '2'\n      priorityClassName: koord-prod\n
                                                        3. \u521b\u5efa\u540d\u79f0\u4e3a nginx-ls \u7684 deployment\uff0cQoS \u7c7b\u522b\u4e3a LS, yaml \u6587\u4ef6\u5982\u4e0b\u3002

                                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nginx-ls\n  labels:\n    app: nginx-ls\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: nginx-ls\n  template:\n    metadata:\n      name: nginx-ls\n      labels:\n        app: nginx-ls\n        koordinator.sh/qosClass: LS # \u8bbe\u7f6e QoS \u7c7b\u522b\u4e3a LS\n        # \u8c03\u5ea6\u5668\u5c06\u5728\u7269\u7406\u5185\u6838\u4e4b\u95f4\u5747\u5300\u7684\u5206\u914d\u903b\u8f91 CPU\n      annotations:\n          scheduling.koordinator.sh/resource-spec: '{\"preferredCPUBindPolicy\": \"SpreadByPCPUs\"}'\n    spec:\n      schedulerName: koord-scheduler \n      containers:\n      - name: nginx\n        image: release.daocloud.io/kpanda/nginx:1.25.3-alpine\n        resources:\n          limits:\n            cpu: '2'\n          requests:\n            cpu: '2'\n      priorityClassName: koord-prod\n
                                                        4. \u521b\u5efa\u540d\u79f0\u4e3a nginx-be \u7684 deployment\uff0cQoS \u7c7b\u522b\u4e3a BE, yaml \u6587\u4ef6\u5982\u4e0b\u3002

                                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nginx-be\n  labels:\n    app: nginx-be\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: nginx-be\n  template:\n    metadata:\n      name: nginx-be\n      labels:\n        app: nginx-be\n        koordinator.sh/qosClass: BE # \u8bbe\u7f6e QoS \u7c7b\u522b\u4e3a BE\n        # \u8c03\u5ea6\u5668\u5c06\u5728\u7269\u7406\u5185\u6838\u4e4b\u95f4\u5747\u5300\u7684\u5206\u914d\u903b\u8f91 CPU\n      annotations:\n          scheduling.koordinator.sh/resource-spec: '{\"preferredCPUBindPolicy\": \"SpreadByPCPUs\"}'\n    spec:\n      schedulerName: koord-scheduler # \u4f7f\u7528 koord-scheduler \u8c03\u5ea6\u5668\n      containers:\n      - name: nginx\n        image: release.daocloud.io/kpanda/nginx:1.25.3-alpine\n        resources:\n          limits:\n            kubernetes.io/batch-cpu: 2k\n          requests:\n            kubernetes.io/batch-cpu: 2k\n      priorityClassName: koord-batch\n

                                                          \u67e5\u770b pod \u72b6\u6001\uff0c\u5f53 pod \u5904\u4e8e running \u540e\uff0c\u67e5\u770b\u5404 pod \u7684 CPU \u5206\u914d\u60c5\u51b5\u3002

                                                          [root@controller-node-1 ~]# kubectl get pod\nNAME                         READY   STATUS    RESTARTS   AGE\nnginx-be-577c946b89-js2qn    1/1     Running   0          4h41m\nnginx-ls-54746c8cf8-rh4b7    1/1     Running   0          4h51m\nnginx-lse-56c9cd77f5-cdqbd   1/1     Running   0          4h41m\nnginx-lsr-c7fdb97d8-b58h8    1/1     Running   0          4h51m\n

                                                          \u672c\u793a\u4f8b\u4e2d\u4f7f\u7528 get_cpuset.sh \u811a\u672c\u67e5\u770b Pod \u7684 cpuset \u4fe1\u606f\uff0c\u811a\u672c\u5185\u5bb9\u5982\u4e0b\u3002

                                                          #!/bin/bash\n\n# \u83b7\u53d6Pod\u7684\u540d\u79f0\u548c\u547d\u540d\u7a7a\u95f4\u4f5c\u4e3a\u8f93\u5165\u53c2\u6570\nPOD_NAME=$1\nNAMESPACE=${2-default}\n\n# \u786e\u4fdd\u63d0\u4f9b\u4e86Pod\u540d\u79f0\u548c\u547d\u540d\u7a7a\u95f4\nif [ -z \"$POD_NAME\" ] || [ -z \"$NAMESPACE\" ]; then\n    echo \"Usage: $0 <pod_name> <namespace>\"\n    exit 1\nfi\n\n# \u4f7f\u7528kubectl\u83b7\u53d6Pod\u7684UID\u548cQoS\u7c7b\u522b\nPOD_INFO=$(kubectl get pod \"$POD_NAME\" -n \"$NAMESPACE\" -o jsonpath=\"{.metadata.uid} {.status.qosClass} {.status.containerStatuses[0].containerID}\")\nread -r POD_UID POD_QOS CONTAINER_ID <<< \"$POD_INFO\"\n\n# \u68c0\u67e5UID\u548cQoS\u7c7b\u522b\u662f\u5426\u6210\u529f\u83b7\u53d6\nif [ -z \"$POD_UID\" ] || [ -z \"$POD_QOS\" ]; then\n    echo \"Failed to get UID or QoS Class for Pod $POD_NAME in namespace $NAMESPACE.\"\n    exit 1\nfi\n\nPOD_UID=\"${POD_UID//-/_}\"\nCONTAINER_ID=\"${CONTAINER_ID//containerd:\\/\\//cri-containerd-}\".scope\n\n# \u6839\u636eQoS\u7c7b\u522b\u6784\u5efacgroup\u8def\u5f84\ncase \"$POD_QOS\" in\n    Guaranteed)\n        QOS_PATH=\"kubepods-pod.slice/$POD_UID.slice\"\n        ;;\n    Burstable)\n        QOS_PATH=\"kubepods-burstable.slice/kubepods-burstable-pod$POD_UID.slice\"\n        ;;\n    BestEffort)\n        QOS_PATH=\"kubepods-besteffort.slice/kubepods-besteffort-pod$POD_UID.slice\"\n        ;;\n    *)\n        echo \"Unknown QoS Class: $POD_QOS\"\n        exit 1\n        ;;\nesac\n\nCPUGROUP_PATH=\"/sys/fs/cgroup/kubepods.slice/$QOS_PATH\"\n\n# \u68c0\u67e5\u8def\u5f84\u662f\u5426\u5b58\u5728\nif [ ! -d \"$CPUGROUP_PATH\" ]; then\n    echo \"CPUs cgroup path for Pod $POD_NAME does not exist: $CPUGROUP_PATH\"\n    exit 1\nfi\n\n# \u8bfb\u53d6\u5e76\u6253\u5370cpuset\u503c\nCPUSET=$(cat \"$CPUGROUP_PATH/$CONTAINER_ID/cpuset.cpus\")\necho \"CPU set for Pod $POD_NAME ($POD_QOS QoS): $CPUSET\"\n

                                                        \u67e5\u770b\u5404 Pod \u7684 cpuset \u5206\u914d\u60c5\u51b5\u3002

                                                        1. QoS \u7c7b\u578b\u4e3a LSE \u7684 Pod, \u72ec\u5360 0-1 \u6838\uff0c\u4e0d\u4e0e\u5176\u4ed6\u7c7b\u578b\u7684 Pod \u5171\u4eab CPU\u3002

                                                          [root@controller-node-1 ~]# ./get_cpuset.sh nginx-lse-56c9cd77f5-cdqbd\nCPU set for Pod nginx-lse-56c9cd77f5-cdqbd (Burstable QoS): 0-1\n
                                                        2. QoS \u7c7b\u578b\u4e3a LSR \u7684 Pod, \u7ed1\u5b9a CPU 2-3 \u6838\uff0c\u53ef\u4e0e BE \u7c7b\u578b\u7684 Pod \u5171\u4eab\u3002

                                                          [root@controller-node-1 ~]# ./get_cpuset.sh nginx-lsr-c7fdb97d8-b58h8\nCPU set for Pod nginx-lsr-c7fdb97d8-b58h8 (Burstable QoS): 2-3\n
                                                        3. QoS \u7c7b\u578b\u4e3a LS \u7684 Pod, \u4f7f\u7528 CPU 4-15 \u6838\uff0c\u7ed1\u5b9a\u4e86\u4e0e LSE/LSR Pod \u72ec\u5360\u4e4b\u5916\u7684\u5171\u4eab CPU \u6c60\u3002

                                                          [root@controller-node-1 ~]# ./get_cpuset.sh nginx-ls-54746c8cf8-rh4b7\nCPU set for Pod nginx-ls-54746c8cf8-rh4b7 (Burstable QoS): 4-15\n
                                                        4. QoS \u7c7b\u578b\u4e3a BE \u7684 pod, \u53ef\u4f7f\u7528 LSE Pod \u72ec\u5360\u4e4b\u5916\u7684 CPU\u3002

                                                          [root@controller-node-1 ~]# ./get_cpuset.sh nginx-be-577c946b89-js2qn\nCPU set for Pod nginx-be-577c946b89-js2qn (BestEffort QoS): 2,4-12\n
                                                        "},{"location":"admin/kpanda/best-practice/co-located/install.html","title":"Koordinator \u79bb\u7ebf\u5b89\u88c5","text":"

                                                        Koordinator \u662f\u4e00\u4e2a\u57fa\u4e8e QoS \u7684 Kubernetes \u6df7\u5408\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u7cfb\u7edf\u3002\u5b83\u65e8\u5728\u63d0\u9ad8\u5bf9\u5ef6\u8fdf\u654f\u611f\u7684\u5de5\u4f5c\u8d1f\u8f7d\u548c\u6279\u5904\u7406\u4f5c\u4e1a\u7684\u8fd0\u884c\u65f6\u6548\u7387\u548c\u53ef\u9760\u6027\uff0c \u7b80\u5316\u4e0e\u8d44\u6e90\u76f8\u5173\u7684\u914d\u7f6e\u8c03\u6574\u7684\u590d\u6742\u6027\uff0c\u5e76\u589e\u52a0 Pod \u90e8\u7f72\u5bc6\u5ea6\u4ee5\u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u7387\u3002

                                                        AI \u7b97\u529b\u4e2d\u5fc3\u9884\u7f6e\u4e86 Koordinator v1.5.0 \u79bb\u7ebf\u5305\u3002

                                                        \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u79bb\u7ebf\u90e8\u7f72 Koordinator\u3002

                                                        "},{"location":"admin/kpanda/best-practice/co-located/install.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                        1. \u7528\u6237\u5df2\u7ecf\u5728\u5e73\u53f0\u4e0a\u5b89\u88c5\u4e86 v0.20.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u7684 addon \u79bb\u7ebf\u5305\u3002
                                                        2. \u5f85\u5b89\u88c5\u96c6\u7fa4\u7684 Kubernetes version >= 1.18.
                                                        3. \u4e3a\u4e86\u6700\u597d\u7684\u4f53\u9a8c\uff0c\u63a8\u8350\u4f7f\u7528 linux kernel 4.19 \u6216\u8005\u66f4\u9ad8\u7248\u672c\u3002
                                                        "},{"location":"admin/kpanda/best-practice/co-located/install.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                        \u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 Koordinator \u63d2\u4ef6\u3002

                                                        1. \u767b\u5f55\u5e73\u53f0\uff0c\u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 -> \u5f85\u5b89\u88c5 Koordinator \u7684\u96c6\u7fa4 -> \u8fdb\u5165\u96c6\u7fa4\u8be6\u60c5\u3002

                                                        2. \u5728 Helm \u6a21\u677f \u9875\u9762\uff0c\u9009\u62e9 \u5168\u90e8\u4ed3\u5e93 \uff0c\u641c\u7d22 koordinator \u3002

                                                        3. \u9009\u62e9 koordinator \uff0c\u70b9\u51fb \u5b89\u88c5 \u3002

                                                        4. \u8fdb\u5165 koordinator \u5b89\u88c5\u9875\u9762\uff0c\u70b9\u51fb \u786e\u5b9a\uff0c\u4f7f\u7528\u9ed8\u8ba4\u914d\u7f6e\u5b89\u88c5 koordinator\u3002

                                                        5. \u67e5\u770b koordinator-system \u547d\u540d\u7a7a\u95f4\u4e0b\u7684 Pod \u662f\u5426\u6b63\u5e38\u8fd0\u884c

                                                        "},{"location":"admin/kpanda/clusterops/cluster-oversold.html","title":"\u96c6\u7fa4\u52a8\u6001\u8d44\u6e90\u8d85\u5356","text":"

                                                        \u76ee\u524d\uff0c\u8bb8\u591a\u4e1a\u52a1\u5b58\u5728\u5cf0\u503c\u548c\u4f4e\u8c37\u7684\u73b0\u8c61\u3002\u4e3a\u4e86\u786e\u4fdd\u670d\u52a1\u7684\u6027\u80fd\u548c\u7a33\u5b9a\u6027\uff0c\u5728\u90e8\u7f72\u670d\u52a1\u65f6\uff0c\u901a\u5e38\u4f1a\u6839\u636e\u5cf0\u503c\u9700\u6c42\u6765\u7533\u8bf7\u8d44\u6e90\u3002 \u7136\u800c\uff0c\u5cf0\u503c\u671f\u53ef\u80fd\u975e\u5e38\u77ed\u6682\uff0c\u5bfc\u81f4\u5728\u975e\u5cf0\u503c\u671f\u65f6\u8d44\u6e90\u88ab\u6d6a\u8d39\u3002 \u96c6\u7fa4\u8d44\u6e90\u8d85\u5356 \u5c31\u662f\u5c06\u8fd9\u4e9b\u7533\u8bf7\u4e86\u800c\u672a\u4f7f\u7528\u7684\u8d44\u6e90\uff08\u5373\u7533\u8bf7\u91cf\u4e0e\u4f7f\u7528\u91cf\u7684\u5dee\u503c\uff09\u5229\u7528\u8d77\u6765\uff0c\u4ece\u800c\u63d0\u5347\u96c6\u7fa4\u8d44\u6e90\u5229\u7528\u7387\uff0c\u51cf\u5c11\u8d44\u6e90\u6d6a\u8d39\u3002

                                                        \u672c\u6587\u4e3b\u8981\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u96c6\u7fa4\u52a8\u6001\u8d44\u6e90\u8d85\u5356\u529f\u80fd\u3002

                                                        "},{"location":"admin/kpanda/clusterops/cluster-oversold.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                        • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                                                        • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 Cluster Admin \uff0c \u8be6\u60c5\u53ef\u53c2\u8003\u96c6\u7fa4\u6388\u6743\u3002
                                                        "},{"location":"admin/kpanda/clusterops/cluster-oversold.html#_3","title":"\u5f00\u542f\u96c6\u7fa4\u8d85\u5356","text":"
                                                        1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762

                                                        2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u96c6\u7fa4\u8fd0\u7ef4 -> \u96c6\u7fa4\u8bbe\u7f6e \uff0c\u7136\u540e\u9009\u62e9 \u9ad8\u7ea7\u914d\u7f6e \u9875\u7b7e

                                                        3. \u6253\u5f00\u96c6\u7fa4\u8d85\u5356\uff0c\u8bbe\u7f6e\u8d85\u5356\u6bd4

                                                          • \u82e5\u672a\u5b89\u88c5 cro-operator \u63d2\u4ef6\uff0c\u70b9\u51fb \u7acb\u5373\u5b89\u88c5 \u6309\u94ae\uff0c\u5b89\u88c5\u6d41\u7a0b\u53c2\u8003\u7ba1\u7406 Helm \u5e94\u7528
                                                          • \u82e5\u5df2\u5b89\u88c5 cro-operator \u63d2\u4ef6\uff0c\u6253\u5f00\u96c6\u7fa4\u8d85\u5356\u5f00\u5173\uff0c\u5219\u53ef\u4ee5\u5f00\u59cb\u4f7f\u7528\u96c6\u7fa4\u8d85\u5356\u529f\u80fd\u3002

                                                          Note

                                                          \u9700\u8981\u5728\u96c6\u7fa4\u4e0b\u5bf9\u5e94\u7684 namespace \u6253\u4e0a\u5982\u4e0b\u6807\u7b7e\uff0c\u96c6\u7fa4\u8d85\u5356\u7b56\u7565\u624d\u80fd\u751f\u6548\u3002

                                                          clusterresourceoverrides.admission.autoscaling.openshift.io/enabled: \"true\"\n

                                                        "},{"location":"admin/kpanda/clusterops/cluster-oversold.html#_4","title":"\u4f7f\u7528\u96c6\u7fa4\u8d85\u5356","text":"

                                                        \u8bbe\u7f6e\u597d\u96c6\u7fa4\u52a8\u6001\u8d44\u6e90\u8d85\u5356\u6bd4\u540e\uff0c\u4f1a\u5728\u5de5\u4f5c\u8d1f\u8f7d\u8fd0\u884c\u65f6\u751f\u6548\u3002\u4e0b\u6587\u4ee5 niginx \u4e3a\u4f8b\uff0c\u9a8c\u8bc1\u4f7f\u7528\u8d44\u6e90\u8d85\u5356\u80fd\u529b\u3002

                                                        1. \u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d nginx \u5e76\u8bbe\u7f6e\u5bf9\u5e94\u7684\u8d44\u6e90\u9650\u5236\u503c\uff0c\u521b\u5efa\u6d41\u7a0b\u53c2\u8003\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\uff08Deployment\uff09

                                                        2. \u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u7684 Pod \u8d44\u6e90\u7533\u8bf7\u503c\u4e0e\u9650\u5236\u503c\u7684\u6bd4\u503c\u662f\u5426\u7b26\u5408\u8d85\u552e\u6bd4

                                                        "},{"location":"admin/kpanda/clusterops/cluster-settings.html","title":"\u96c6\u7fa4\u8bbe\u7f6e","text":"

                                                        \u96c6\u7fa4\u8bbe\u7f6e\u7528\u4e8e\u4e3a\u60a8\u7684\u96c6\u7fa4\u81ea\u5b9a\u4e49\u9ad8\u7ea7\u7279\u6027\u8bbe\u7f6e\uff0c\u5305\u62ec\u662f\u5426\u542f\u7528 GPU\u3001Helm \u4ed3\u5e93\u5237\u65b0\u5468\u671f\u3001Helm \u64cd\u4f5c\u8bb0\u5f55\u4fdd\u7559\u7b49\u3002

                                                        • \u542f\u7528 GPU\uff1a\u9700\u8981\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU \u5361\u53ca\u5bf9\u5e94\u9a71\u52a8\u63d2\u4ef6\u3002

                                                          \u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u6700\u8fd1\u64cd\u4f5c -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \u3002

                                                        • Helm \u64cd\u4f5c\u57fa\u7840\u955c\u50cf\u3001\u4ed3\u5e93\u5237\u65b0\u5468\u671f\u3001\u64cd\u4f5c\u8bb0\u5f55\u4fdd\u7559\u6761\u6570\u3001\u662f\u5426\u5f00\u542f\u96c6\u7fa4\u5220\u9664\u4fdd\u62a4\uff08\u5f00\u542f\u540e\u96c6\u7fa4\u5c06\u4e0d\u80fd\u76f4\u63a5\u5378\u8f7d\uff09

                                                        "},{"location":"admin/kpanda/clusterops/latest-operations.html","title":"\u6700\u8fd1\u64cd\u4f5c","text":"

                                                        \u5728\u8be5\u9875\u9762\u53ef\u4ee5\u67e5\u770b\u6700\u8fd1\u7684\u96c6\u7fa4\u64cd\u4f5c\u8bb0\u5f55\u548c Helm \u64cd\u4f5c\u8bb0\u5f55\uff0c\u4ee5\u53ca\u5404\u9879\u64cd\u4f5c\u7684 YAML \u6587\u4ef6\u548c\u65e5\u5fd7\uff0c\u4e5f\u53ef\u4ee5\u5220\u9664\u67d0\u4e00\u6761\u8bb0\u5f55\u3002

                                                        \u8bbe\u7f6e Helm \u64cd\u4f5c\u7684\u4fdd\u7559\u6761\u6570\uff1a

                                                        \u7cfb\u7edf\u9ed8\u8ba4\u4fdd\u7559\u6700\u8fd1 100 \u6761 Helm \u64cd\u4f5c\u8bb0\u5f55\u3002\u82e5\u4fdd\u7559\u6761\u6570\u592a\u591a\uff0c\u53ef\u80fd\u4f1a\u9020\u6210\u6570\u636e\u5197\u4f59\uff0c\u4fdd\u7559\u6761\u6570\u592a\u5c11\u53ef\u80fd\u4f1a\u9020\u6210\u60a8\u6240\u9700\u8981\u7684\u5173\u952e\u64cd\u4f5c\u8bb0\u5f55\u7684\u7f3a\u5931\u3002\u9700\u8981\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u8bbe\u7f6e\u5408\u7406\u7684\u4fdd\u7559\u6570\u91cf\u3002\u5177\u4f53\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                        1. \u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u6700\u8fd1\u64cd\u4f5c -> Helm \u64cd\u4f5c -> \u8bbe\u7f6e\u4fdd\u7559\u6761\u6570 \u3002

                                                        2. \u8bbe\u7f6e\u9700\u8981\u4fdd\u7559\u591a\u5c11\u6761 Helm \u64cd\u4f5c\u8bb0\u5f55\uff0c\u5e76\u70b9\u51fb \u786e\u5b9a \u3002

                                                        "},{"location":"admin/kpanda/clusters/access-cluster.html","title":"\u8bbf\u95ee\u96c6\u7fa4","text":"

                                                        \u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\u63a5\u5165\u6216\u521b\u5efa\u7684\u96c6\u7fa4\uff0c\u4e0d\u4ec5\u53ef\u4ee5\u901a\u8fc7 UI \u754c\u9762\u76f4\u63a5\u8bbf\u95ee\uff0c\u4e5f\u53ef\u4ee5\u901a\u8fc7\u5176\u4ed6\u4e24\u79cd\u65b9\u5f0f\u8fdb\u884c\u8bbf\u95ee\u63a7\u5236\uff1a

                                                        • \u901a\u8fc7 CloudShell \u5728\u7ebf\u8bbf\u95ee
                                                        • \u4e0b\u8f7d\u96c6\u7fa4\u8bc1\u4e66\u540e\u901a\u8fc7 kubectl \u8fdb\u884c\u8bbf\u95ee

                                                        Note

                                                        \u8bbf\u95ee\u96c6\u7fa4\u65f6\uff0c\u7528\u6237\u5e94\u5177\u6709 Cluster Admin \u6743\u9650\u6216\u66f4\u9ad8\u6743\u9650\u3002

                                                        "},{"location":"admin/kpanda/clusters/access-cluster.html#cloudshell","title":"\u901a\u8fc7 CloudShell \u8bbf\u95ee","text":"
                                                        1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9009\u62e9\u9700\u8981\u901a\u8fc7 CloudShell \u8bbf\u95ee\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u56fe\u6807\u5e76\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u70b9\u51fb \u63a7\u5236\u53f0 \u3002

                                                        2. \u5728 CloudShell \u63a7\u5236\u53f0\u6267\u884c kubectl get node \u547d\u4ee4\uff0c\u9a8c\u8bc1 CloudShell \u4e0e\u96c6\u7fa4\u7684\u8fde\u901a\u6027\u3002\u5982\u56fe\uff0c\u63a7\u5236\u53f0\u5c06\u8fd4\u56de\u96c6\u7fa4\u4e0b\u7684\u8282\u70b9\u4fe1\u606f\u3002

                                                        \u73b0\u5728\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7 CloudShell \u6765\u8bbf\u95ee\u5e76\u7ba1\u7406\u8be5\u96c6\u7fa4\u4e86\u3002

                                                        "},{"location":"admin/kpanda/clusters/access-cluster.html#kubectl","title":"\u901a\u8fc7 kubectl \u8bbf\u95ee","text":"

                                                        \u901a\u8fc7\u672c\u5730\u8282\u70b9\u8bbf\u95ee\u5e76\u7ba1\u7406\u4e91\u7aef\u96c6\u7fa4\u65f6\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u6761\u4ef6\uff1a

                                                        • \u672c\u5730\u8282\u70b9\u548c\u4e91\u7aef\u96c6\u7fa4\u7684\u7f51\u7edc\u4e92\u8054\u4e92\u901a\u3002
                                                        • \u5df2\u7ecf\u5c06\u96c6\u7fa4\u8bc1\u4e66\u4e0b\u8f7d\u5230\u4e86\u672c\u5730\u8282\u70b9\u3002
                                                        • \u672c\u5730\u8282\u70b9\u5df2\u7ecf\u5b89\u88c5\u4e86 kubectl \u5de5\u5177\u3002\u5173\u4e8e\u8be6\u7ec6\u7684\u5b89\u88c5\u65b9\u5f0f\uff0c\u8bf7\u53c2\u9605\u5b89\u88c5 kubectl\u3002

                                                        \u6ee1\u8db3\u4e0a\u8ff0\u6761\u4ef6\u540e\uff0c\u6309\u7167\u4e0b\u65b9\u6b65\u9aa4\u4ece\u672c\u5730\u8bbf\u95ee\u4e91\u7aef\u96c6\u7fa4\uff1a

                                                        1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9009\u62e9\u9700\u8981\u4e0b\u8f7d\u8bc1\u4e66\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \uff0c\u5e76\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u70b9\u51fb \u8bc1\u4e66\u83b7\u53d6 \u3002

                                                        2. \u9009\u62e9\u8bc1\u4e66\u6709\u6548\u671f\u5e76\u70b9\u51fb \u4e0b\u8f7d\u8bc1\u4e66 \u3002

                                                        3. \u6253\u5f00\u4e0b\u8f7d\u597d\u7684\u96c6\u7fa4\u8bc1\u4e66\uff0c\u5c06\u8bc1\u4e66\u5185\u5bb9\u590d\u5236\u81f3\u672c\u5730\u8282\u70b9\u7684 config \u6587\u4ef6\u3002

                                                          kubectl \u5de5\u5177\u9ed8\u8ba4\u4f1a\u4ece\u672c\u5730\u8282\u70b9\u7684 $HOME/.kube \u76ee\u5f55\u4e0b\u67e5\u627e\u540d\u4e3a config \u7684\u6587\u4ef6\u3002\u8be5\u6587\u4ef6\u5b58\u50a8\u4e86\u76f8\u5173\u96c6\u7fa4\u7684\u8bbf\u95ee\u51ed\u8bc1\uff0ckubectl \u53ef\u4ee5\u51ed\u8be5\u914d\u7f6e\u6587\u4ef6\u8fde\u63a5\u81f3\u96c6\u7fa4\u3002

                                                        4. \u5728\u672c\u5730\u8282\u70b9\u4e0a\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u9a8c\u8bc1\u96c6\u7fa4\u7684\u8fde\u901a\u6027\uff1a

                                                          kubectl get pod -n default\n

                                                          \u9884\u671f\u7684\u8f93\u51fa\u7c7b\u4f3c\u4e8e:

                                                          NAME                            READY   STATUS      RESTARTS    AGE\ndao-2048-2048-58c7f7fc5-mq7h4   1/1     Running     0           30h\n

                                                        \u73b0\u5728\u60a8\u53ef\u4ee5\u5728\u672c\u5730\u901a\u8fc7 kubectl \u8bbf\u95ee\u5e76\u7ba1\u7406\u8be5\u96c6\u7fa4\u4e86\u3002

                                                        "},{"location":"admin/kpanda/clusters/cluster-role.html","title":"\u96c6\u7fa4\u89d2\u8272","text":"

                                                        \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u57fa\u4e8e\u96c6\u7fa4\u7684\u4e0d\u540c\u529f\u80fd\u5b9a\u4f4d\u5bf9\u96c6\u7fa4\u8fdb\u884c\u4e86\u89d2\u8272\u5206\u7c7b\uff0c\u5e2e\u52a9\u7528\u6237\u66f4\u597d\u5730\u7ba1\u7406 IT \u57fa\u7840\u8bbe\u65bd\u3002

                                                        "},{"location":"admin/kpanda/clusters/cluster-role.html#_2","title":"\u5168\u5c40\u670d\u52a1\u96c6\u7fa4","text":"

                                                        \u6b64\u96c6\u7fa4\u7528\u4e8e\u8fd0\u884c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7ec4\u4ef6\uff0c\u4f8b\u5982\u5bb9\u5668\u7ba1\u7406\u3001\u5168\u5c40\u7ba1\u7406\u3001\u53ef\u89c2\u6d4b\u6027\u3001\u955c\u50cf\u4ed3\u5e93\u7b49\u3002 \u4e00\u822c\u4e0d\u627f\u8f7d\u4e1a\u52a1\u8d1f\u8f7d\u3002

                                                        \u652f\u6301\u7684\u529f\u80fd \u63cf\u8ff0 K8s \u7248\u672c 1.22+ \u64cd\u4f5c\u7cfb\u7edf RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86\uff1bUbuntu 18.04 x86, Ubuntu 20.04 x86\uff1bCentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD \u96c6\u7fa4\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406 \u652f\u6301 K8s \u8d44\u6e90\u7ba1\u7406 \u652f\u6301 \u4e91\u539f\u751f\u5b58\u50a8 \u652f\u6301 \u4e91\u539f\u751f\u7f51\u7edc Calico\u3001Cillium\u3001Multus \u548c\u5176\u5b83 CNI \u7b56\u7565\u7ba1\u7406 \u652f\u6301\u7f51\u7edc\u7b56\u7565\u3001\u914d\u989d\u7b56\u7565\u3001\u8d44\u6e90\u9650\u5236\u3001\u707e\u5907\u7b56\u7565\u3001\u5b89\u5168\u7b56\u7565"},{"location":"admin/kpanda/clusters/cluster-role.html#_3","title":"\u7ba1\u7406\u96c6\u7fa4","text":"

                                                        \u6b64\u96c6\u7fa4\u7528\u4e8e\u7ba1\u7406\u5de5\u4f5c\u96c6\u7fa4\uff0c\u4e00\u822c\u4e0d\u627f\u8f7d\u4e1a\u52a1\u8d1f\u8f7d\u3002

                                                        \u652f\u6301\u7684\u529f\u80fd \u63cf\u8ff0 K8s \u7248\u672c 1.22+ \u64cd\u4f5c\u7cfb\u7edf RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86\uff1bUbuntu 18.04 x86, Ubuntu 20.04 x86\uff1bCentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD \u96c6\u7fa4\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406 \u652f\u6301 K8s \u8d44\u6e90\u7ba1\u7406 \u652f\u6301 \u4e91\u539f\u751f\u5b58\u50a8 \u652f\u6301 \u4e91\u539f\u751f\u7f51\u7edc Calico\u3001Cillium\u3001Multus \u548c\u5176\u5b83 CNI \u7b56\u7565\u7ba1\u7406 \u652f\u6301\u7f51\u7edc\u7b56\u7565\u3001\u914d\u989d\u7b56\u7565\u3001\u8d44\u6e90\u9650\u5236\u3001\u707e\u5907\u7b56\u7565\u3001\u5b89\u5168\u7b56\u7565"},{"location":"admin/kpanda/clusters/cluster-role.html#_4","title":"\u5de5\u4f5c\u96c6\u7fa4","text":"

                                                        \u8fd9\u662f\u4f7f\u7528\u5bb9\u5668\u7ba1\u7406\u521b\u5efa\u7684\u96c6\u7fa4\uff0c\u4e3b\u8981\u7528\u4e8e\u627f\u8f7d\u4e1a\u52a1\u8d1f\u8f7d\u3002\u8be5\u96c6\u7fa4\u7531\u7ba1\u7406\u96c6\u7fa4\u8fdb\u884c\u7ba1\u7406\u3002

                                                        \u652f\u6301\u7684\u529f\u80fd \u63cf\u8ff0 K8s \u7248\u672c \u652f\u6301 K8s 1.22 \u53ca\u4ee5\u4e0a\u7248\u672c \u64cd\u4f5c\u7cfb\u7edf RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86\uff1bUbuntu 18.04 x86, Ubuntu 20.04 x86\uff1bCentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD \u96c6\u7fa4\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406 \u652f\u6301 K8s \u8d44\u6e90\u7ba1\u7406 \u652f\u6301 \u4e91\u539f\u751f\u5b58\u50a8 \u652f\u6301 \u4e91\u539f\u751f\u7f51\u7edc Calico\u3001Cillium\u3001Multus \u548c\u5176\u5b83 CNI \u7b56\u7565\u7ba1\u7406 \u652f\u6301\u7f51\u7edc\u7b56\u7565\u3001\u914d\u989d\u7b56\u7565\u3001\u8d44\u6e90\u9650\u5236\u3001\u707e\u5907\u7b56\u7565\u3001\u5b89\u5168\u7b56\u7565"},{"location":"admin/kpanda/clusters/cluster-role.html#_5","title":"\u63a5\u5165\u96c6\u7fa4","text":"

                                                        \u6b64\u96c6\u7fa4\u7528\u4e8e\u63a5\u5165\u5df2\u6709\u7684\u6807\u51c6 K8s \u96c6\u7fa4\uff0c\u5305\u62ec\u4f46\u4e0d\u9650\u4e8e\u672c\u5730\u6570\u636e\u4e2d\u5fc3\u81ea\u5efa\u96c6\u7fa4\u3001\u516c\u6709\u4e91\u5382\u5546\u63d0\u4f9b\u7684\u96c6\u7fa4\u3001\u79c1\u6709\u4e91\u5382\u5546\u63d0\u4f9b\u7684\u96c6\u7fa4\u3001\u8fb9\u7f18\u96c6\u7fa4\u3001\u4fe1\u521b\u96c6\u7fa4\u3001\u5f02\u6784\u96c6\u7fa4\u3002\u4e3b\u8981\u7528\u4e8e\u627f\u62c5\u4e1a\u52a1\u8d1f\u8f7d\u3002

                                                        \u652f\u6301\u7684\u529f\u80fd \u63cf\u8ff0 K8s \u7248\u672c 1.18+ \u652f\u6301\u53cb\u5546 Vmware Tanzu\u3001Amazon EKS\u3001Redhat Openshift\u3001SUSE Rancher\u3001\u963f\u91cc ACK\u3001\u534e\u4e3a CCE\u3001\u817e\u8baf TKE\u3001\u6807\u51c6 K8s \u96c6\u7fa4\u3001\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u96c6\u7fa4\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406 \u4e0d\u652f\u6301 K8s \u8d44\u6e90\u7ba1\u7406 \u652f\u6301 \u4e91\u539f\u751f\u5b58\u50a8 \u652f\u6301 \u4e91\u539f\u751f\u7f51\u7edc \u4f9d\u8d56\u4e8e\u63a5\u5165\u96c6\u7fa4\u53d1\u884c\u7248\u7f51\u7edc\u6a21\u5f0f \u7b56\u7565\u7ba1\u7406 \u652f\u6301\u7f51\u7edc\u7b56\u7565\u3001\u914d\u989d\u7b56\u7565\u3001\u8d44\u6e90\u9650\u5236\u3001\u707e\u5907\u7b56\u7565\u3001\u5b89\u5168\u7b56\u7565

                                                        Note

                                                        \u4e00\u4e2a\u96c6\u7fa4\u53ef\u4ee5\u6709\u591a\u4e2a\u96c6\u7fa4\u89d2\u8272\uff0c\u4f8b\u5982\u4e00\u4e2a\u96c6\u7fa4\u65e2\u53ef\u4ee5\u662f\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\uff0c\u4e5f\u53ef\u4ee5\u662f\u7ba1\u7406\u96c6\u7fa4\u6216\u5de5\u4f5c\u96c6\u7fa4\u3002

                                                        "},{"location":"admin/kpanda/clusters/cluster-scheduler-plugin.html","title":"\u5982\u4f55\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72\u7b2c\u4e8c\u8c03\u5ea6\u5668 scheduler-plugins","text":"

                                                        \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72\u7b2c\u4e8c\u4e2a\u8c03\u5ea6\u5668 scheduler-plugins\u3002

                                                        "},{"location":"admin/kpanda/clusters/cluster-scheduler-plugin.html#scheduler-plugins_1","title":"\u4e3a\u4ec0\u4e48\u9700\u8981 scheduler-plugins\uff1f","text":"

                                                        \u901a\u8fc7\u5e73\u53f0\u521b\u5efa\u7684\u96c6\u7fa4\u4e2d\u4f1a\u5b89\u88c5 K8s \u539f\u751f\u7684\u8c03\u5ea6\u5668\uff0c\u4f46\u662f\u539f\u751f\u7684\u8c03\u5ea6\u5668\u5b58\u5728\u5f88\u591a\u7684\u5c40\u9650\u6027\uff1a

                                                        • \u539f\u751f\u7684\u8c03\u5ea6\u5668\u65e0\u6cd5\u6ee1\u8db3\u8c03\u5ea6\u9700\u6c42\uff0c\u4f60\u53ef\u4ee5\u9009\u62e9\u4f7f\u7528 CoScheduling\u3001 CapacityScheduling \u7b49 scheduler-plugins \u63d2\u4ef6\u3002
                                                        • \u5728\u7279\u6b8a\u7684\u573a\u666f\uff0c\u9700\u8981\u65b0\u7684\u8c03\u5ea6\u5668\u6765\u5b8c\u6210\u8c03\u5ea6\u4efb\u52a1\u800c\u4e0d\u5f71\u54cd\u539f\u751f\u8c03\u5ea6\u5668\u7684\u6d41\u7a0b\u3002
                                                        • \u533a\u5206\u4e0d\u540c\u529f\u80fd\u7684\u8c03\u5ea6\u5668\uff0c\u901a\u8fc7\u5207\u6362\u8c03\u5ea6\u5668\u540d\u79f0\u6765\u5b9e\u73b0\u4e0d\u540c\u7684\u8c03\u5ea6\u573a\u666f\u3002

                                                        \u672c\u6587\u4ee5\u4f7f\u7528 vgpu \u8c03\u5ea6\u5668\u7684\u540c\u65f6\uff0c\u60f3\u7ed3\u5408 scheduler-plugins \u7684 coscheduling \u63d2\u4ef6\u80fd\u529b\u7684\u573a\u666f\u4e3a\u793a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5\u5e76\u4f7f\u7528 scheduler-plugins\u3002

                                                        "},{"location":"admin/kpanda/clusters/cluster-scheduler-plugin.html#scheduler-plugins_2","title":"\u5b89\u88c5 scheduler-plugins","text":""},{"location":"admin/kpanda/clusters/cluster-scheduler-plugin.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                        • kubean \u662f\u5728 v0.13.0 \u7248\u672c\u63a8\u51fa\u7684\u65b0\u529f\u80fd\uff0c\u9009\u62e9\u7ba1\u7406\u96c6\u7fa4\u65f6\u8bf7\u786e\u4fdd\u7248\u672c\u4e0d\u4f4e\u4e8e\u6b64\u7248\u672c\u3002
                                                        • \u5b89\u88c5 scheduler-plugins \u7248\u672c\u4e3a v0.27.8\uff0c\u8bf7\u786e\u4fdd\u96c6\u7fa4\u7248\u672c\u662f\u5426\u4e0e\u5b83\u517c\u5bb9\u3002 \u53c2\u8003\u6587\u6863 Compatibility Matrix\u3002
                                                        "},{"location":"admin/kpanda/clusters/cluster-scheduler-plugin.html#_2","title":"\u5b89\u88c5\u6d41\u7a0b","text":"
                                                        1. \u5728 \u521b\u5efa\u96c6\u7fa4 -> \u9ad8\u7ea7\u914d\u7f6e -> \u81ea\u5b9a\u4e49\u53c2\u6570 \u4e2d\u6dfb\u52a0 scheduler-plugins \u53c2\u6570

                                                          scheduler_plugins_enabled:true\nscheduler_plugins_plugin_config:\n  - name: Coscheduling\n    args:\n      permitWaitingTimeSeconds: 10 # default is 60\n

                                                          \u53c2\u6570\u8bf4\u660e\uff1a

                                                          • scheduler_plugins_enabled \u8bbe\u7f6e\u4e3a true \u65f6\uff0c\u5f00\u542f scheduler-plugins \u63d2\u4ef6\u80fd\u529b\u3002
                                                          • \u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e scheduler_plugins_enabled_plugins \u6216 scheduler_plugins_disabled_plugins \u9009\u9879\u6765\u542f\u7528\u6216\u7981\u7528\u67d0\u4e9b\u63d2\u4ef6\u3002 \u53c2\u9605 K8s \u5b98\u65b9\u63d2\u4ef6\u540d\u79f0\u3002
                                                          • \u5982\u679c\u9700\u8981\u8bbe\u7f6e\u81ea\u5b9a\u4e49\u63d2\u4ef6\u7684\u53c2\u6570\u8bf7\u914d\u7f6e scheduler_plugins_plugin_config\uff0c\u4f8b\u5982\uff1a\u8bbe\u7f6e coscheduling \u7684 permitWaitingTimeoutSeconds \u53c2\u6570\u3002 \u53c2\u9605 K8s \u5b98\u65b9\u63d2\u4ef6\u914d\u7f6e\u9879
                                                        2. \u96c6\u7fa4\u521b\u5efa\u6210\u529f\u540e\u7cfb\u7edf\u4f1a\u81ea\u52a8\u5b89\u88c5 scheduler-plugins \u548c controller \u7ec4\u4ef6\u8d1f\u8f7d\uff0c\u53ef\u4ee5\u5728\u5bf9\u5e94\u96c6\u7fa4\u7684\u65e0\u72b6\u6001\u8d1f\u8f7d\u4e2d\u67e5\u770b\u8d1f\u8f7d\u72b6\u6001\u3002

                                                        "},{"location":"admin/kpanda/clusters/cluster-scheduler-plugin.html#scheduler-plugins_3","title":"\u4f7f\u7528 scheduler-plugins","text":"

                                                        \u4ee5\u4e0b\u4ee5\u4f7f\u7528 vgpu \u8c03\u5ea6\u5668\u7684\u540c\u65f6\uff0c\u60f3\u7ed3\u5408 scheduler-plugins \u7684 coscheduling \u63d2\u4ef6\u80fd\u529b\u573a\u666f\u4e3a\u793a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528 scheduler-plugins\u3002

                                                        1. \u5728 Helm \u6a21\u677f\u4e2d\u5b89\u88c5 vgpu\uff0c\u8bbe\u7f6e values.yaml \u53c2\u6570\u3002

                                                          • schedulerName: scheduler-plugins-scheduler\uff0c\u8fd9\u662f kubean \u9ed8\u8ba4\u5b89\u88c5\u7684 scheduler-plugins \u7684 scheduler \u540d\u79f0\uff0c\u76ee\u524d\u4e0d\u80fd\u4fee\u6539\u3002
                                                          • scheduler.kubeScheduler.enabled: false\uff0c\u4e0d\u5b89\u88c5 kube-scheduler\uff0c\u5c06 vgpu-scheduler \u4f5c\u4e3a\u5355\u72ec\u7684 extender\u3002
                                                        2. \u5728 scheduler-plugins \u4e0a\u6269\u5c55 vgpu-scheduler\u3002

                                                          [root@master01 charts]# kubectl get cm -n scheduler-plugins scheduler-config -ojsonpath=\"{.data.scheduler-config\\.yaml}\"\n
                                                          apiVersion: kubescheduler.config.k8s.io/v1\nkind: KubeSchedulerConfiguration\nleaderElection:\n  leaderElect: false\nprofiles:\n  # Compose all plugins in one profile\n  - schedulerName: scheduler-plugins-scheduler\n    plugins:\n      multiPoint:\n        enabled:\n          - name: Coscheduling\n          - name: CapacityScheduling\n          - name: NodeResourceTopologyMatch\n          - name: NodeResourcesAllocatable\n        disabled:\n          - name: PrioritySort\npluginConfig:\n  - args:\n      permitWaitingTimeSeconds: 10\n    name: Coscheduling\n

                                                          \u4fee\u6539 scheduler-plugins \u7684 scheduler-config \u7684 configmap \u53c2\u6570\uff0c\u5982\u4e0b\uff1a

                                                          [root@master01 charts]# kubectl get cm -n scheduler-plugins scheduler-config -ojsonpath=\"{.data.scheduler-config\\.yaml}\"\n
                                                          apiVersion: kubescheduler.config.k8s.io/v1\nkind: KubeSchedulerConfiguration\nleaderElection:\n  leaderElect: false\nprofiles:\n  # Compose all plugins in one profile\n  - schedulerName: scheduler-plugins-scheduler\n    plugins:\n      multiPoint:\n        enabled:\n          - name: Coscheduling\n          - name: CapacityScheduling\n          - name: NodeResourceTopologyMatch\n          - name: NodeResourcesAllocatable\n        disabled:\n          - name: PrioritySort\npluginConfig:\n  - args:\n      permitWaitingTimeSeconds: 10\n    name: Coscheduling\nextenders:\n  - urlPrefix: \"${urlPrefix}\"\n    filterVerb: filter\n    bindVerb: bind\n    nodeCacheCapable: true\n    ignorable: true\n    httpTimeout: 30s\n    weight: 1\n    enableHTTPS: true\n    tlsConfig:\n      insecure: true\n    managedResources:\n      - name: nvidia.com/vgpu\n        ignoredByScheduler: true\n      - name: nvidia.com/gpumem\n        ignoredByScheduler: true\n      - name: nvidia.com/gpucores\n        ignoredByScheduler: true\n      - name: nvidia.com/gpumem-percentage\n        ignoredByScheduler: true\n      - name: nvidia.com/priority\n        ignoredByScheduler: true\n      - name: cambricon.com/mlunum\n        ignoredByScheduler: true\n
                                                        3. \u5b89\u88c5\u5b8c vgpu-scheduler \u540e\uff0c\u7cfb\u7edf\u4f1a\u81ea\u52a8\u521b\u5efa svc\uff0curlPrefix \u6307\u5b9a svc \u7684 URL\u3002

                                                          Note

                                                          • svc \u6307 pod \u670d\u52a1\u8d1f\u8f7d\uff0c\u60a8\u53ef\u4ee5\u5230\u5b89\u88c5\u4e86 nvidia-vgpu \u63d2\u4ef6\u7684\u547d\u540d\u7a7a\u95f4\u4e0b\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u62ff\u5230 443 \u7aef\u53e3\u5bf9\u5e94\u7684\u5916\u90e8\u8bbf\u95ee\u4fe1\u606f\u3002

                                                            kubectl get svc -n ${namespace} \n
                                                          • urlprifix \u683c\u5f0f\u4e3a https://${ip \u5730\u5740}:${\u7aef\u53e3}

                                                        4. \u5c06 scheduler-plugins \u7684 scheduler Pod \u91cd\u542f\uff0c\u52a0\u8f7d\u65b0\u7684\u914d\u7f6e\u6587\u4ef6\u3002

                                                          Note

                                                          \u5728\u521b\u5efa vgpu \u5e94\u7528\u65f6\u4e0d\u9700\u8981\u6307\u5b9a\u8c03\u5ea6\u5668\u540d\u79f0\uff0cvgpu-scheduler \u7684 Webhook \u4f1a\u81ea\u52a8\u5c06 Scheduler \u7684\u540d\u79f0\u4fee\u6539\u4e3a scheduler-plugins-scheduler\uff0c\u4e0d\u7528\u624b\u52a8\u6307\u5b9a\u3002

                                                        "},{"location":"admin/kpanda/clusters/cluster-status.html","title":"\u96c6\u7fa4\u72b6\u6001","text":"

                                                        \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u7eb3\u7ba1\u4e24\u79cd\u7c7b\u578b\u7684\u96c6\u7fa4\uff1a\u63a5\u5165\u96c6\u7fa4\u548c\u81ea\u5efa\u96c6\u7fa4\u3002 \u5173\u4e8e\u96c6\u7fa4\u7eb3\u7ba1\u7c7b\u578b\u7684\u66f4\u591a\u4fe1\u606f\uff0c\u8bf7\u53c2\u89c1\u96c6\u7fa4\u89d2\u8272\u3002

                                                        \u8fd9\u4e24\u79cd\u96c6\u7fa4\u7684\u72b6\u6001\u5982\u4e0b\u6240\u8ff0\u3002

                                                        "},{"location":"admin/kpanda/clusters/cluster-status.html#_2","title":"\u63a5\u5165\u96c6\u7fa4","text":"\u72b6\u6001 \u63cf\u8ff0 \u63a5\u5165\u4e2d\uff08Joining\uff09 \u96c6\u7fa4\u6b63\u5728\u63a5\u5165 \u89e3\u9664\u63a5\u5165\u4e2d\uff08Removing\uff09 \u96c6\u7fa4\u6b63\u5728\u89e3\u9664\u63a5\u5165 \u8fd0\u884c\u4e2d\uff08Running\uff09 \u96c6\u7fa4\u6b63\u5e38\u8fd0\u884c \u672a\u77e5\uff08Unknown\uff09 \u96c6\u7fa4\u5df2\u5931\u8054\uff0c\u7cfb\u7edf\u5c55\u793a\u6570\u636e\u4e3a\u5931\u8054\u524d\u7f13\u5b58\u6570\u636e\uff0c\u4e0d\u4ee3\u8868\u771f\u5b9e\u6570\u636e\uff0c\u540c\u65f6\u5931\u8054\u72b6\u6001\u4e0b\u6267\u884c\u7684\u4efb\u4f55\u64cd\u4f5c\u90fd\u5c06\u4e0d\u751f\u6548\uff0c\u8bf7\u68c0\u67e5\u96c6\u7fa4\u7f51\u7edc\u8fde\u901a\u6027\u6216\u4e3b\u673a\u72b6\u6001\u3002"},{"location":"admin/kpanda/clusters/cluster-status.html#_3","title":"\u81ea\u5efa\u96c6\u7fa4","text":"\u72b6\u6001 \u63cf\u8ff0 \u521b\u5efa\u4e2d\uff08Creating\uff09 \u96c6\u7fa4\u6b63\u5728\u521b\u5efa \u66f4\u65b0\u4e2d\uff08Updating\uff09 \u66f4\u65b0\u96c6\u7fa4 Kubernetes \u7248\u672c \u5220\u9664\u4e2d\uff08Deleting\uff09 \u96c6\u7fa4\u6b63\u5728\u5220\u9664 \u8fd0\u884c\u4e2d\uff08Running\uff09 \u96c6\u7fa4\u6b63\u5e38\u8fd0\u884c \u672a\u77e5\uff08Unknown\uff09 \u96c6\u7fa4\u5df2\u5931\u8054\uff0c\u7cfb\u7edf\u5c55\u793a\u6570\u636e\u4e3a\u5931\u8054\u524d\u7f13\u5b58\u6570\u636e\uff0c\u4e0d\u4ee3\u8868\u771f\u5b9e\u6570\u636e\uff0c\u540c\u65f6\u5931\u8054\u72b6\u6001\u4e0b\u6267\u884c\u7684\u4efb\u4f55\u64cd\u4f5c\u90fd\u5c06\u4e0d\u751f\u6548\uff0c\u8bf7\u68c0\u67e5\u96c6\u7fa4\u7f51\u7edc\u8fde\u901a\u6027\u6216\u4e3b\u673a\u72b6\u6001\u3002 \u521b\u5efa\u5931\u8d25\uff08Failed\uff09 \u96c6\u7fa4\u521b\u5efa\u5931\u8d25\uff0c\u8bf7\u67e5\u770b\u65e5\u5fd7\u4ee5\u83b7\u53d6\u8be6\u7ec6\u5931\u8d25\u539f\u56e0"},{"location":"admin/kpanda/clusters/cluster-version.html","title":"\u96c6\u7fa4\u7248\u672c\u652f\u6301\u8303\u56f4","text":"

                                                        \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\uff0c\u63a5\u5165\u578b\u96c6\u7fa4\u548c\u81ea\u5efa\u96c6\u7fa4\u91c7\u53d6\u4e0d\u540c\u7684\u7248\u672c\u652f\u6301\u673a\u5236\u3002

                                                        \u672c\u6587\u4e3b\u8981\u4ecb\u7ecd\u81ea\u5efa\u96c6\u7fa4\u7684\u7248\u672c\u652f\u6301\u673a\u5236\u3002

                                                        Kubernetes \u793e\u533a\u652f\u6301 3 \u4e2a\u7248\u672c\u8303\u56f4\uff0c\u5982 1.26\u30011.27\u30011.28\u3002\u5f53\u793e\u533a\u65b0\u7248\u672c\u53d1\u5e03\u4e4b\u540e\uff0c\u652f\u6301\u7684\u7248\u672c\u8303\u56f4\u5c06\u4f1a\u8fdb\u884c\u9012\u589e\u3002 \u5982\u793e\u533a\u6700\u65b0\u7684 1.29 \u7248\u672c\u5df2\u7ecf\u53d1\u5e03\uff0c\u6b64\u65f6\u793e\u533a\u652f\u6301\u7684\u7248\u672c\u8303\u56f4\u662f 1.27\u30011.28\u30011.29\u3002

                                                        \u4f8b\u5982\uff0c\u793e\u533a\u652f\u6301\u7684\u7248\u672c\u8303\u56f4\u662f 1.25\u30011.26\u30011.27\uff0c\u5219\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528\u754c\u9762\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u7248\u672c\u8303\u56f4\u662f 1.24\u30011.25\u30011.26\uff0c\u5e76\u4e14\u4f1a\u4e3a\u7528\u6237\u63a8\u8350\u4e00\u4e2a\u7a33\u5b9a\u7684\u7248\u672c\uff0c\u5982 1.24.7\u3002

                                                        \u9664\u6b64\u4e4b\u5916\uff0c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528\u754c\u9762\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u7248\u672c\u8303\u56f4\u4e0e\u793e\u533a\u4fdd\u6301\u9ad8\u5ea6\u540c\u6b65\uff0c\u5f53\u793e\u533a\u7248\u672c\u8fdb\u884c\u9012\u589e\u540e\uff0c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528\u754c\u9762\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u7248\u672c\u8303\u56f4\u4e5f\u4f1a\u540c\u6b65\u9012\u589e\u4e00\u4e2a\u7248\u672c\u3002

                                                        "},{"location":"admin/kpanda/clusters/cluster-version.html#kubernetes","title":"Kubernetes \u7248\u672c\u652f\u6301\u8303\u56f4","text":"Kubernetes \u793e\u533a\u7248\u672c\u8303\u56f4 \u81ea\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7248\u672c\u8303\u56f4 \u81ea\u5efa\u5de5\u4f5c\u96c6\u7fa4\u63a8\u8350\u7248\u672c \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5b89\u88c5\u5668 \u53d1\u5e03\u65f6\u95f4
                                                        • 1.26
                                                        • 1.27
                                                        • 1.28
                                                        • 1.25
                                                        • 1.26
                                                        • 1.27
                                                        1.27.5 v0.13.0 2023.11.30"},{"location":"admin/kpanda/clusters/create-cluster.html","title":"\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4","text":"

                                                        \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\uff0c\u96c6\u7fa4\u89d2\u8272\u5206\u56db\u7c7b\uff1a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u3001\u7ba1\u7406\u96c6\u7fa4\u3001\u5de5\u4f5c\u96c6\u7fa4\u3001\u63a5\u5165\u96c6\u7fa4\u3002 \u5176\u4e2d\uff0c\u63a5\u5165\u96c6\u7fa4\u53ea\u80fd\u4ece\u7b2c\u4e09\u65b9\u5382\u5546\u63a5\u5165\uff0c\u53c2\u89c1\u63a5\u5165\u96c6\u7fa4\u3002

                                                        \u672c\u9875\u4ecb\u7ecd\u5982\u4f55\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\uff0c\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u65b0\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u5de5\u4f5c\u8282\u70b9 OS \u7c7b\u578b\u548c CPU \u67b6\u6784\u9700\u8981\u4e0e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4fdd\u6301\u4e00\u81f4\u3002 \u5982\u9700\u4f7f\u7528\u533a\u522b\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4 OS \u6216\u67b6\u6784\u7684\u8282\u70b9\u521b\u5efa\u96c6\u7fa4\uff0c\u53c2\u9605\u5728 centos \u7ba1\u7406\u5e73\u53f0\u4e0a\u521b\u5efa ubuntu \u5de5\u4f5c\u96c6\u7fa4\u8fdb\u884c\u521b\u5efa\u3002

                                                        \u63a8\u8350\u4f7f\u7528 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u7684\u64cd\u4f5c\u7cfb\u7edf\u6765\u521b\u5efa\u96c6\u7fa4\u3002 \u5982\u60a8\u672c\u5730\u8282\u70b9\u4e0d\u5728\u4e0a\u8ff0\u652f\u6301\u8303\u56f4\uff0c\u53ef\u53c2\u8003\u5728\u975e\u4e3b\u6d41\u64cd\u4f5c\u7cfb\u7edf\u4e0a\u521b\u5efa\u96c6\u7fa4\u8fdb\u884c\u521b\u5efa\u3002

                                                        "},{"location":"admin/kpanda/clusters/create-cluster.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                        \u521b\u5efa\u96c6\u7fa4\u4e4b\u524d\u9700\u8981\u6ee1\u8db3\u4e00\u5b9a\u7684\u524d\u63d0\u6761\u4ef6\uff1a

                                                        • \u6839\u636e\u4e1a\u52a1\u9700\u6c42\u51c6\u5907\u4e00\u5b9a\u6570\u91cf\u7684\u8282\u70b9\uff0c\u4e14\u8282\u70b9 OS \u7c7b\u578b\u548c CPU \u67b6\u6784\u4e00\u81f4\u3002
                                                        • \u63a8\u8350 Kubernetes \u7248\u672c 1.29.5\uff0c\u5177\u4f53\u7248\u672c\u8303\u56f4\uff0c\u53c2\u9605 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u96c6\u7fa4\u7248\u672c\u652f\u6301\u4f53\u7cfb\uff0c \u76ee\u524d\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u81ea\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7248\u672c\u8303\u56f4\u5728 v1.28.0-v1.30.2\u3002\u5982\u9700\u521b\u5efa\u4f4e\u7248\u672c\u7684\u96c6\u7fa4\uff0c\u8bf7\u53c2\u8003\u96c6\u7fa4\u7248\u672c\u652f\u6301\u8303\u56f4\u3001\u90e8\u7f72\u4e0e\u5347\u7ea7 Kubean \u5411\u4e0b\u517c\u5bb9\u7248\u672c\u3002
                                                        • \u76ee\u6807\u4e3b\u673a\u9700\u8981\u5141\u8bb8 IPv4 \u8f6c\u53d1\u3002\u5982\u679c Pod \u548c Service \u4f7f\u7528\u7684\u662f IPv6\uff0c\u5219\u76ee\u6807\u670d\u52a1\u5668\u9700\u8981\u5141\u8bb8 IPv6 \u8f6c\u53d1\u3002
                                                        • \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u6682\u4e0d\u63d0\u4f9b\u5bf9\u9632\u706b\u5899\u7684\u7ba1\u7406\u529f\u80fd\uff0c\u60a8\u9700\u8981\u9884\u5148\u81ea\u884c\u5b9a\u4e49\u76ee\u6807\u4e3b\u673a\u9632\u706b\u5899\u89c4\u5219\u3002\u4e3a\u4e86\u907f\u514d\u521b\u5efa\u96c6\u7fa4\u7684\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u95ee\u9898\uff0c\u5efa\u8bae\u7981\u7528\u76ee\u6807\u4e3b\u673a\u7684\u9632\u706b\u5899\u3002
                                                        • \u53c2\u9605\u8282\u70b9\u53ef\u7528\u6027\u68c0\u67e5\u3002
                                                        "},{"location":"admin/kpanda/clusters/create-cluster.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                        1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u4e2d\uff0c\u70b9\u51fb \u521b\u5efa\u96c6\u7fa4 \u6309\u94ae\u3002

                                                        2. \u53c2\u8003\u4e0b\u5217\u8981\u6c42\u586b\u5199\u96c6\u7fa4\u57fa\u672c\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                          • \u96c6\u7fa4\u540d\u79f0\uff1a\u540d\u79f0\u53ea\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u8fde\u5b57\u7b26\uff08\"-\"\uff09\uff0c\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u8005\u6570\u5b57\u5f00\u5934\u548c\u7ed3\u5c3e\uff0c\u6700\u957f 63 \u4e2a\u5b57\u7b26\u3002
                                                          • \u88ab\u7eb3\u7ba1\uff1a\u9009\u62e9\u7531\u54ea\u4e2a\u96c6\u7fa4\u6765\u7ba1\u7406\u6b64\u96c6\u7fa4\uff0c\u4f8b\u5982\u5728\u96c6\u7fa4\u751f\u547d\u5468\u671f\u4e2d\u521b\u5efa\u3001\u5347\u7ea7\u3001\u8282\u70b9\u6269\u7f29\u5bb9\u3001\u5220\u9664\u96c6\u7fa4\u7b49\u3002
                                                          • \u8fd0\u884c\u65f6\uff1a\u9009\u62e9\u96c6\u7fa4\u7684\u8fd0\u884c\u65f6\u73af\u5883\uff0c\u76ee\u524d\u652f\u6301 containerd \u548c docker\uff0c\u5982\u4f55\u9009\u62e9\u5bb9\u5668\u8fd0\u884c\u65f6\u3002
                                                          • Kubernetes \u7248\u672c\uff1a\u652f\u6301 3 \u4e2a\u7248\u672c\u8de8\u5ea6\uff0c\u5177\u4f53\u53d6\u51b3\u4e8e\u88ab\u7eb3\u7ba1\u96c6\u7fa4\u6240\u652f\u6301\u7684\u7248\u672c\u3002

                                                        3. \u586b\u5199\u8282\u70b9\u914d\u7f6e\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                          • \u9ad8\u53ef\u7528\uff1a\u5f00\u542f\u540e\u9700\u8981\u63d0\u4f9b\u81f3\u5c11 3 \u4e2a\u63a7\u5236\u5668\u8282\u70b9\u3002\u5173\u95ed\u540e\uff0c\u53ea\u63d0\u4f9b 1 \u4e2a\u63a7\u5236\u5668\u8282\u70b9\u5373\u53ef\u3002

                                                            \u751f\u4ea7\u73af\u5883\u4e2d\u5efa\u8bae\u4f7f\u7528\u9ad8\u53ef\u7528\u6a21\u5f0f\u3002

                                                          • \u8ba4\u8bc1\u65b9\u5f0f\uff1a\u9009\u62e9\u901a\u8fc7\u7528\u6237\u540d/\u5bc6\u7801\u8fd8\u662f\u516c\u79c1\u94a5\u8bbf\u95ee\u8282\u70b9\u3002

                                                            \u5982\u679c\u4f7f\u7528\u516c\u79c1\u94a5\u65b9\u5f0f\u8bbf\u95ee\u8282\u70b9\uff0c\u9700\u8981\u9884\u5148\u914d\u7f6e\u8282\u70b9\u7684 SSH \u5bc6\u94a5\u3002\u53c2\u9605\u4f7f\u7528 SSH \u5bc6\u94a5\u8ba4\u8bc1\u8282\u70b9\u3002

                                                          • \u4f7f\u7528\u7edf\u4e00\u7684\u5bc6\u7801\uff1a\u5f00\u542f\u540e\u96c6\u7fa4\u4e2d\u6240\u6709\u8282\u70b9\u7684\u8bbf\u95ee\u5bc6\u7801\u90fd\u76f8\u540c\uff0c\u9700\u8981\u5728\u4e0b\u65b9\u8f93\u5165\u8bbf\u95ee\u6240\u6709\u8282\u70b9\u7684\u7edf\u4e00\u5bc6\u7801\u3002\u5982\u679c\u5173\u95ed\uff0c\u5219\u53ef\u4ee5\u4e3a\u6bcf\u4e2a\u8282\u70b9\u8bbe\u7f6e\u5355\u72ec\u7684\u7528\u6237\u540d\u548c\u5bc6\u7801\u3002

                                                          • \u8282\u70b9\u4fe1\u606f\uff1a\u586b\u5199\u8282\u70b9\u540d\u79f0\u548c IP \u5730\u5740\u3002

                                                          • \u81ea\u5b9a\u4e49\u53c2\u6570\uff1a\u8bbe\u7f6e\u53d8\u91cf\u63a7\u5236 Ansible \u4e0e\u8fdc\u7a0b\u4e3b\u673a\u4ea4\u4e92\u3002\u53ef\u8bbe\u7f6e\u53d8\u91cf\u53c2\u8003\u8fde\u63a5\u5230\u4e3b\u673a\uff1a\u884c\u4e3a\u6e05\u5355\u53c2\u6570
                                                          • NTP \u65f6\u95f4\u540c\u6b65\uff1a\u5f00\u542f\u540e\u4f1a\u81ea\u52a8\u540c\u6b65\u5404\u4e2a\u8282\u70b9\u4e0a\u7684\u65f6\u95f4\uff0c\u9700\u8981\u63d0\u4f9b NTP \u670d\u52a1\u5668\u5730\u5740\u3002

                                                        4. \u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb\u8282\u70b9\u68c0\u67e5\u3002\u5982\u679c\u68c0\u67e5\u901a\u8fc7\u5219\u7ee7\u7eed\u4e0b\u4e00\u6b65\u64cd\u4f5c\u3002\u5982\u679c\u68c0\u67e5\u672a\u901a\u8fc7\uff0c\u5219\u66f4\u65b0 \u8282\u70b9\u4fe1\u606f \u5e76\u518d\u6b21\u6267\u884c\u68c0\u67e5\u3002

                                                        5. \u586b\u5199\u7f51\u7edc\u914d\u7f6e\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                          • \u7f51\u7edc\u63d2\u4ef6\uff1a\u8d1f\u8d23\u4e3a\u96c6\u7fa4\u5185\u7684 Pod \u63d0\u4f9b\u7f51\u7edc\u670d\u52a1\uff0c\u521b\u5efa\u96c6\u7fa4\u540e\u4e0d\u53ef\u66f4\u6539\u7f51\u7edc\u63d2\u4ef6\u3002\u652f\u6301 cilium \u548c calico\u3002\u9009\u62e9 none \u8868\u793a\u6682\u4e0d\u5b89\u88c5\u7f51\u7edc\u63d2\u4ef6\u3002

                                                          • \u5bb9\u5668\u7f51\u6bb5\uff1a\u96c6\u7fa4\u4e0b\u5bb9\u5668\u4f7f\u7528\u7684\u7f51\u6bb5\uff0c\u51b3\u5b9a\u96c6\u7fa4\u4e0b\u5bb9\u5668\u7684\u6570\u91cf\u4e0a\u9650\u3002\u521b\u5efa\u540e\u4e0d\u53ef\u4fee\u6539\u3002

                                                          • \u670d\u52a1\u7f51\u6bb5\uff1a\u540c\u4e00\u96c6\u7fa4\u4e0b\u5bb9\u5668\u4e92\u76f8\u8bbf\u95ee\u65f6\u4f7f\u7528\u7684 Service \u8d44\u6e90\u7684\u7f51\u6bb5\uff0c\u51b3\u5b9a Service \u8d44\u6e90\u7684\u4e0a\u9650\u3002\u521b\u5efa\u540e\u4e0d\u53ef\u4fee\u6539\u3002

                                                        6. \u586b\u5199\u63d2\u4ef6\u914d\u7f6e\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                        7. \u586b\u5199\u9ad8\u7ea7\u914d\u7f6e\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u786e\u5b9a \u3002

                                                          • kubelet_max_pods \uff1a\u8bbe\u7f6e\u6bcf\u4e2a\u8282\u70b9\u7684\u6700\u5927 Pod \u6570\u91cf\uff0c\u9ed8\u8ba4\u4e3a 110 \u4e2a\u3002
                                                          • hostname_overide \uff1a\u91cd\u7f6e\u4e3b\u673a\u540d\uff0c\u5efa\u8bae\u4f7f\u7528\u9ed8\u8ba4\u503c\uff0c\u91c7\u7528\u7cfb\u7edf\u9ed8\u8ba4\u751f\u6210\u7684\u540d\u79f0\u4f5c\u4e3a\u4e3b\u673a\u540d\u79f0\u3002
                                                          • kubernetes_audit \uff1aKubernetes \u7684\u5ba1\u8ba1\u65e5\u5fd7\uff0c\u9ed8\u8ba4\u5f00\u542f\u3002
                                                          • auto_renew_certificate \uff1a\u5728\u6bcf\u6708\u7b2c\u4e00\u4e2a\u661f\u671f\u4e00\u81ea\u52a8\u66f4\u65b0 Kubernetes \u63a7\u5236\u5e73\u9762\u8bc1\u4e66\uff0c\u9ed8\u8ba4\u5f00\u542f\u3002
                                                          • disable_firewalld&ufw \uff1a\u7981\u7528\u9632\u706b\u5899\uff0c\u907f\u514d\u8282\u70b9\u5728\u5b89\u88c5\u8fc7\u7a0b\u4e2d\u65e0\u6cd5\u88ab\u8bbf\u95ee\u3002
                                                          • Insecure_registries \uff1a\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u914d\u7f6e\u3002\u4f7f\u7528\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u521b\u5efa\u96c6\u7fa4\u65f6\uff0c\u4e3a\u4e86\u907f\u514d\u8bc1\u4e66\u95ee\u9898\u5bfc\u81f4\u5bb9\u5668\u5f15\u64ce\u62d2\u7edd\u8bbf\u95ee\uff0c\u9700\u8981\u5728\u8fd9\u91cc\u586b\u5199\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u5730\u5740\uff0c\u4ee5\u7ed5\u8fc7\u5bb9\u5668\u5f15\u64ce\u7684\u8bc1\u4e66\u8ba4\u8bc1\u800c\u83b7\u53d6\u955c\u50cf\u3002
                                                          • yum_repos \uff1a\u586b\u5199 Yum \u6e90\u4ed3\u5e93\u5730\u5740\u3002\u79bb\u7ebf\u73af\u5883\u4e0b\uff0c\u9ed8\u8ba4\u7ed9\u51fa\u7684\u5730\u5740\u9009\u9879\u4ec5\u4f9b\u53c2\u8003\uff0c\u8bf7\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u586b\u5199\u3002

                                                        Success

                                                        • \u586b\u5199\u6b63\u786e\u4fe1\u606f\u5e76\u5b8c\u6210\u4e0a\u8ff0\u6b65\u9aa4\u540e\uff0c\u9875\u9762\u4f1a\u63d0\u793a\u96c6\u7fa4\u6b63\u5728\u521b\u5efa\u4e2d\u3002
                                                        • \u521b\u5efa\u96c6\u7fa4\u8017\u65f6\u8f83\u957f\uff0c\u9700\u8981\u8010\u5fc3\u7b49\u5f85\u3002\u5176\u95f4\uff0c\u53ef\u4ee5\u70b9\u51fb \u8fd4\u56de\u96c6\u7fa4\u5217\u8868 \u6309\u94ae\u8ba9\u5b89\u88c5\u8fc7\u7a0b\u540e\u53f0\u8fd0\u884c\u3002
                                                        • \u5982\u9700\u67e5\u770b\u5f53\u524d\u72b6\u6001\uff0c\u53ef\u70b9\u51fb \u5b9e\u65f6\u65e5\u5fd7 \u3002

                                                        Note

                                                        • \u5f53\u96c6\u7fa4\u51fa\u73b0\u672a\u77e5\u72b6\u6001\u65f6\uff0c\u8868\u793a\u5f53\u524d\u96c6\u7fa4\u5df2\u5931\u8054\u3002
                                                        • \u7cfb\u7edf\u5c55\u793a\u6570\u636e\u4e3a\u5931\u8054\u524d\u7f13\u5b58\u6570\u636e\uff0c\u4e0d\u4ee3\u8868\u771f\u5b9e\u6570\u636e\u3002
                                                        • \u540c\u65f6\u5931\u8054\u72b6\u6001\u4e0b\u6267\u884c\u7684\u4efb\u4f55\u64cd\u4f5c\u90fd\u5c06\u4e0d\u751f\u6548\uff0c\u8bf7\u68c0\u67e5\u96c6\u7fa4\u7f51\u7edc\u8fde\u901a\u6027\u6216\u4e3b\u673a\u72b6\u6001\u3002

                                                        "},{"location":"admin/kpanda/clusters/delete-cluster.html","title":"\u5378\u8f7d/\u89e3\u9664\u63a5\u5165\u96c6\u7fa4","text":"

                                                        \u901a\u8fc7\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0 \u521b\u5efa\u7684\u96c6\u7fa4 \u652f\u6301 \u5378\u8f7d\u96c6\u7fa4 \u6216 \u89e3\u9664\u63a5\u5165 \u64cd\u4f5c\uff0c\u4ece\u5176\u4ed6\u73af\u5883\u76f4\u63a5 \u63a5\u5165\u7684\u96c6\u7fa4 \u4ec5\u652f\u6301 \u89e3\u9664\u63a5\u5165 \u64cd\u4f5c\u3002

                                                        Info

                                                        \u5982\u679c\u60f3\u5f7b\u5e95\u5220\u9664\u4e00\u4e2a\u63a5\u5165\u7684\u96c6\u7fa4\uff0c\u9700\u8981\u524d\u5f80\u521b\u5efa\u8be5\u96c6\u7fa4\u7684\u539f\u59cb\u5e73\u53f0\u64cd\u4f5c\u3002\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e0d\u652f\u6301\u5220\u9664\u63a5\u5165\u7684\u96c6\u7fa4\u3002

                                                        \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\uff0c \u5378\u8f7d\u96c6\u7fa4 \u548c \u89e3\u9664\u63a5\u5165 \u7684\u533a\u522b\u5728\u4e8e\uff1a

                                                        • \u5378\u8f7d\u96c6\u7fa4 \u64cd\u4f5c\u4f1a\u9500\u6bc1\u8be5\u96c6\u7fa4\uff0c\u5e76\u91cd\u7f6e\u96c6\u7fa4\u4e0b\u6240\u6709\u8282\u70b9\u7684\u6570\u636e\u3002\u6240\u6709\u6570\u636e\u90fd\u5c06\u88ab\u9500\u6bc1\uff0c\u5efa\u8bae\u505a\u597d\u5907\u4efd\u3002\u540e\u671f\u9700\u8981\u65f6\u5fc5\u987b\u91cd\u65b0\u521b\u5efa\u4e00\u4e2a\u96c6\u7fa4\u3002
                                                        • \u89e3\u9664\u63a5\u5165 \u64cd\u4f5c\u4f1a\u5c06\u5f53\u524d\u96c6\u7fa4\u4ece\u5e73\u53f0\u4e2d\u79fb\u9664\uff0c\u4e0d\u4f1a\u6467\u6bc1\u96c6\u7fa4\uff0c\u4e5f\u4e0d\u4f1a\u9500\u6bc1\u6570\u636e\u3002
                                                        "},{"location":"admin/kpanda/clusters/delete-cluster.html#_2","title":"\u5378\u8f7d\u96c6\u7fa4","text":"

                                                        Note

                                                        • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u5907 Admin \u6216 Kpanda Owner \u6743\u9650\u624d\u80fd\u6267\u884c\u5378\u8f7d\u96c6\u7fa4\u7684\u64cd\u4f5c\u3002
                                                        • \u5378\u8f7d\u96c6\u7fa4\u4e4b\u524d\uff0c\u5e94\u8be5\u5148\u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u5728 \u96c6\u7fa4\u8fd0\u7ef4 -> \u96c6\u7fa4\u8bbe\u7f6e -> \u9ad8\u7ea7\u914d\u7f6e \u4e2d\u5173\u95ed \u96c6\u7fa4\u5220\u9664\u4fdd\u62a4 \uff0c \u5426\u5219\u4e0d\u663e\u793a \u5378\u8f7d\u96c6\u7fa4 \u7684\u9009\u9879\u3002
                                                        • \u5168\u5c40\u670d\u52a1\u96c6\u7fa4 \u4e0d\u652f\u6301\u5378\u8f7d\u6216\u79fb\u9664\u64cd\u4f5c\u3002
                                                        1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u627e\u5230\u9700\u8981\u5378\u8f7d\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u5e76\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u70b9\u51fb \u5378\u8f7d\u96c6\u7fa4 \u3002

                                                        2. \u8f93\u5165\u96c6\u7fa4\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\uff0c\u7136\u540e\u70b9\u51fb \u5220\u9664 \u3002

                                                          \u5982\u679c\u63d0\u793a\u96c6\u7fa4\u4e2d\u8fd8\u6709\u4e00\u4e9b\u6b8b\u7559\u7684\u8d44\u6e90\uff0c\u5219\u9700\u8981\u6309\u63d0\u793a\u5220\u9664\u76f8\u5173\u8d44\u6e90\u540e\u624d\u80fd\u6267\u884c\u5378\u8f7d\u64cd\u4f5c\u3002

                                                        3. \u8fd4\u56de \u96c6\u7fa4\u5217\u8868 \u9875\u53ef\u4ee5\u770b\u5230\u8be5\u96c6\u7fa4\u7684\u72b6\u6001\u5df2\u7ecf\u53d8\u6210 \u5220\u9664\u4e2d \u3002\u5378\u8f7d\u96c6\u7fa4\u53ef\u80fd\u9700\u8981\u4e00\u6bb5\u65f6\u95f4\uff0c\u8bf7\u60a8\u8010\u5fc3\u7b49\u5019\u3002

                                                        "},{"location":"admin/kpanda/clusters/delete-cluster.html#_3","title":"\u89e3\u9664\u63a5\u5165\u96c6\u7fa4","text":"

                                                        Note

                                                        • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u5907 Admin \u6216 Kpanda Owner \u6743\u9650\u624d\u80fd\u6267\u884c\u89e3\u9664\u63a5\u5165\u7684\u64cd\u4f5c\u3002
                                                        • \u5168\u5c40\u670d\u52a1\u96c6\u7fa4 \u4e0d\u652f\u6301\u89e3\u9664\u63a5\u5165\u3002
                                                        1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u627e\u5230\u9700\u8981\u5378\u8f7d\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u5e76\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u70b9\u51fb \u89e3\u9664\u63a5\u5165 \u3002

                                                        2. \u8f93\u5165\u96c6\u7fa4\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\uff0c\u7136\u540e\u70b9\u51fb \u89e3\u9664\u63a5\u5165 \u3002

                                                          \u5982\u679c\u63d0\u793a\u96c6\u7fa4\u4e2d\u8fd8\u6709\u4e00\u4e9b\u6b8b\u7559\u7684\u8d44\u6e90\uff0c\u5219\u9700\u8981\u6309\u63d0\u793a\u5220\u9664\u76f8\u5173\u8d44\u6e90\u540e\u624d\u80fd\u89e3\u9664\u63a5\u5165\u3002

                                                        "},{"location":"admin/kpanda/clusters/delete-cluster.html#_4","title":"\u6e05\u7406\u89e3\u9664\u63a5\u5165\u96c6\u7fa4\u914d\u7f6e\u6570\u636e","text":"

                                                        \u96c6\u7fa4\u88ab\u79fb\u9664\u540e\uff0c\u96c6\u7fa4\u4e2d\u539f\u6709\u7684\u7ba1\u7406\u5e73\u53f0\u6570\u636e\u4e0d\u4f1a\u88ab\u81ea\u52a8\u6e05\u9664\uff0c\u5982\u9700\u5c06\u96c6\u7fa4\u63a5\u5165\u81f3\u65b0\u7ba1\u7406\u5e73\u53f0\u5219\u9700\u8981\u624b\u52a8\u6267\u884c\u5982\u4e0b\u64cd\u4f5c\uff1a

                                                        \u5220\u9664 kpanda-system\u3001insight-system \u547d\u540d\u7a7a\u95f4

                                                        kubectl delete ns kpanda-system insight-system\n
                                                        "},{"location":"admin/kpanda/clusters/integrate-cluster.html","title":"\u63a5\u5165\u96c6\u7fa4","text":"

                                                        \u901a\u8fc7\u63a5\u5165\u96c6\u7fa4\u64cd\u4f5c\uff0c\u80fd\u591f\u5bf9\u4f17\u591a\u4e91\u670d\u52a1\u5e73\u53f0\u96c6\u7fa4\u548c\u672c\u5730\u79c1\u6709\u7269\u7406\u96c6\u7fa4\u8fdb\u884c\u7edf\u4e00\u7eb3\u7ba1\uff0c\u5f62\u6210\u7edf\u4e00\u6cbb\u7406\u5e73\u53f0\uff0c\u6709\u6548\u907f\u514d\u4e86\u88ab\u5382\u5546\u9501\u5b9a\u98ce\u9669\uff0c\u52a9\u529b\u4f01\u4e1a\u4e1a\u52a1\u5b89\u5168\u4e0a\u4e91\u3002

                                                        \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u63a5\u5165\u591a\u79cd\u4e3b\u6d41\u7684\u5bb9\u5668\u96c6\u7fa4\uff0c\u4f8b\u5982 Redhat Openshift, SUSE Rancher, VMware Tanzu, Amazon EKS, Aliyun ACK, Huawei CCE, Tencent TKE, \u6807\u51c6 Kubernetes \u96c6\u7fa4\u3002

                                                        "},{"location":"admin/kpanda/clusters/integrate-cluster.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                        • \u51c6\u5907\u4e00\u4e2a\u5f85\u63a5\u5165\u7684\u96c6\u7fa4\uff0c\u786e\u4fdd\u5bb9\u5668\u7ba1\u7406\u96c6\u7fa4\u548c\u5f85\u63a5\u5165\u96c6\u7fa4\u4e4b\u95f4\u7f51\u7edc\u901a\u7545\uff0c\u5e76\u4e14\u96c6\u7fa4\u7684 Kubernetes \u7248\u672c 1.22+\u3002
                                                        • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 Kpanda Owner \u6216\u66f4\u9ad8\u6743\u9650\u3002
                                                        "},{"location":"admin/kpanda/clusters/integrate-cluster.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                        1. \u8fdb\u5165 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u63a5\u5165\u96c6\u7fa4 \u6309\u94ae\u3002

                                                        2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u3002

                                                          • \u96c6\u7fa4\u540d\u79f0\uff1a\u540d\u79f0\u5e94\u5177\u6709\u552f\u4e00\u6027\uff0c\u8bbe\u7f6e\u540e\u4e0d\u53ef\u66f4\u6539\u3002\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26(\"-\")\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002
                                                          • \u96c6\u7fa4\u522b\u540d\uff1a\u53ef\u8f93\u5165\u4efb\u610f\u5b57\u7b26\uff0c\u4e0d\u8d85\u8fc7 60 \u4e2a\u5b57\u7b26\u3002
                                                          • \u53d1\u884c\u7248\uff1a\u96c6\u7fa4\u7684\u53d1\u884c\u5382\u5546\uff0c\u5305\u62ec\u5e02\u573a\u4e3b\u6d41\u4e91\u5382\u5546\u548c\u672c\u5730\u79c1\u6709\u7269\u7406\u96c6\u7fa4\u3002
                                                        3. \u586b\u5199\u76ee\u6807\u96c6\u7fa4\u7684 KubeConfig\uff0c\u70b9\u51fb \u9a8c\u8bc1 Config \uff0c\u9a8c\u8bc1\u901a\u8fc7\u540e\u624d\u80fd\u6210\u529f\u63a5\u5165\u96c6\u7fa4\u3002

                                                          \u5982\u679c\u4e0d\u77e5\u9053\u5982\u4f55\u83b7\u53d6\u96c6\u7fa4\u7684 KubeConfig \u6587\u4ef6\uff0c\u53ef\u4ee5\u5728\u8f93\u5165\u6846\u53f3\u4e0a\u89d2\u70b9\u51fb \u5982\u4f55\u83b7\u53d6 kubeConfig \u67e5\u770b\u5bf9\u5e94\u6b65\u9aa4\u3002

                                                        4. \u786e\u8ba4\u6240\u6709\u53c2\u6570\u586b\u5199\u6b63\u786e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u3002

                                                        Note

                                                        • \u65b0\u63a5\u5165\u7684\u96c6\u7fa4\u72b6\u6001\u4e3a \u63a5\u5165\u4e2d \uff0c\u63a5\u5165\u6210\u529f\u540e\u53d8\u4e3a \u8fd0\u884c\u4e2d \u3002
                                                        • \u5982\u679c\u96c6\u7fa4\u72b6\u6001\u4e00\u76f4\u5904\u4e8e \u63a5\u5165\u4e2d \uff0c\u8bf7\u786e\u8ba4\u63a5\u5165\u811a\u672c\u662f\u5426\u5728\u5bf9\u5e94\u96c6\u7fa4\u4e0a\u6267\u884c\u6210\u529f\u3002\u6709\u5173\u96c6\u7fa4\u72b6\u6001\u7684\u66f4\u591a\u8be6\u60c5\uff0c\u8bf7\u53c2\u8003\u96c6\u7fa4\u72b6\u6001\u3002
                                                        "},{"location":"admin/kpanda/clusters/integrate-rancher-cluster.html","title":"\u63a5\u5165 rancher \u96c6\u7fa4","text":"

                                                        \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u63a5\u5165 rancher \u96c6\u7fa4\u3002

                                                        "},{"location":"admin/kpanda/clusters/integrate-rancher-cluster.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                        • \u51c6\u5907\u4e00\u4e2a\u5177\u6709\u7ba1\u7406\u5458\u6743\u9650\u7684\u5f85\u63a5\u5165 ranhcer \u96c6\u7fa4\uff0c\u786e\u4fdd\u5bb9\u5668\u7ba1\u7406\u96c6\u7fa4\u548c\u5f85\u63a5\u5165\u96c6\u7fa4\u4e4b\u95f4\u7f51\u7edc\u901a\u7545\u3002
                                                        • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 Kpanda Owner \u6216\u66f4\u9ad8\u6743\u9650\u3002
                                                        "},{"location":"admin/kpanda/clusters/integrate-rancher-cluster.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"admin/kpanda/clusters/integrate-rancher-cluster.html#rancher-serviceaccount","title":"\u6b65\u9aa4\u4e00\uff1a\u5728 rancher \u96c6\u7fa4\u521b\u5efa\u5177\u6709\u7ba1\u7406\u5458\u6743\u9650\u7684 ServiceAccount \u7528\u6237","text":"
                                                        1. \u4f7f\u7528\u5177\u6709\u7ba1\u7406\u5458\u6743\u9650\u7684\u89d2\u8272\u8fdb\u5165 rancher \u96c6\u7fa4\uff0c\u5e76\u4f7f\u7528\u7ec8\u7aef\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a sa.yaml \u7684\u6587\u4ef6\u3002

                                                          vi sa.yaml\n

                                                          \u7136\u540e\u6309\u4e0b i \u952e\u8fdb\u5165\u63d2\u5165\u6a21\u5f0f\uff0c\u8f93\u5165\u4ee5\u4e0b\u5185\u5bb9\uff1a

                                                          sa.yaml
                                                          apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: rancher-rke\nrules:\n  - apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n  - nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: rancher-rke\nroleRef:\n    apiGroup: rbac.authorization.k8s.io\n    kind: ClusterRole\n    name: rancher-rke\n  subjects:\n  - kind: ServiceAccount\n    name: rancher-rke\n    namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: rancher-rke\n  namespace: kube-system\n

                                                          \u6309\u4e0b esc \u952e\u9000\u51fa\u63d2\u5165\u6a21\u5f0f\uff0c\u7136\u540e\u8f93\u5165 __ :wq__ \u4fdd\u5b58\u5e76\u9000\u51fa\u3002

                                                        2. \u5728\u5f53\u524d\u8def\u5f84\u4e0b\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u65b0\u5efa\u540d\u4e3a rancher-rke \u7684 ServiceAccount\uff08\u4ee5\u4e0b\u7b80\u79f0\u4e3a SA \uff09\uff1a

                                                          kubectl apply -f sa.yaml\n

                                                          \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                          clusterrole.rbac.authorization.k8s.io/rancher-rke created\nclusterrolebinding.rbac.authorization.k8s.io/rancher-rke created\nserviceaccount/rancher-rke created\n
                                                        3. \u521b\u5efa\u540d\u4e3a rancher-rke-secret \u7684\u5bc6\u94a5\uff0c\u5e76\u5c06\u5bc6\u94a5\u548c rancher-rke SA \u7ed1\u5b9a\u3002

                                                          kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: rancher-rke-secret\n  namespace: kube-system\n  annotations:\n    kubernetes.io/service-account.name: rancher-rke\n  type: kubernetes.io/service-account-token\nEOF\n

                                                          \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                          secret/rancher-rke-secret created\n

                                                          Note

                                                          \u5982\u679c\u60a8\u7684\u96c6\u7fa4\u7248\u672c\u4f4e\u4e8e 1.24\uff0c\u8bf7\u5ffd\u7565\u6b64\u6b65\u9aa4\uff0c\u76f4\u63a5\u524d\u5f80\u4e0b\u4e00\u6b65\u3002

                                                        4. \u67e5\u627e rancher-rke SA \u7684\u5bc6\u94a5\uff1a

                                                          kubectl -n kube-system get secret | grep rancher-rke | awk '{print $1}'\n

                                                          \u9884\u671f\u8f93\u51fa\uff1a

                                                          rancher-rke-secret\n

                                                          \u67e5\u770b\u5bc6\u94a5 rancher-rke-secret \u7684\u8be6\u60c5\uff1a

                                                          kubectl -n kube-system describe secret rancher-rke-secret\n

                                                          \u9884\u671f\u8f93\u51fa\uff1a

                                                          Name:         rancher-rke-secret\nNamespace:    kube-system\nLabels:       <none>\nAnnotations:  kubernetes.io/service-account.name: rancher-rke\n            kubernetes.io/service-account.uid: d83df5d9-bd7d-488d-a046-b740618a0174\n\nType:  kubernetes.io/service-account-token\n\nData\n====\nca.crt:     570 bytes\nnamespace:  11 bytes\ntoken:      eyJhbGciOiJSUzI1NiIsImtpZCI6IjUtNE9nUWZLRzVpbEJORkZaNmtCQXhqVzRsZHU4MHhHcDBfb0VCaUo0V1kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJyYW5jaGVyLXJrZS1zZWNyZXQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoicmFuY2hlci1ya2UiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkODNkZjVkOS1iZDdkLTQ4OGQtYTA0Ni1iNzQwNjE4YTAxNzQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06cmFuY2hlci1ya2UifQ.VNsMtPEFOdDDeGt_8VHblcMRvjOwPXMM-79o9UooHx6q-VkHOcIOp3FOT2hnEdNnIsyODZVKCpEdCgyozX-3y5x2cZSZpocnkMcBbQm-qfTyUcUhAY7N5gcYUtHUhvRAsNWJcsDCn6d96gT_qo-ddo_cT8Ri39Lc123FDYOnYG-YGFKSgRQVy7Vyv34HIajZCCjZzy7i--eE_7o4DXeTjNqAFMFstUxxHBOXI3Rdn1zKQKqh5Jhg4ES7X-edSviSUfJUX-QV_LlAw5DuAyGPH7bDH4QaQ5k-p6cIctmpWZE-9wRDlKA4LYRblKE7MJcI6OmM4ldlMM0Jc8N-gCtl4w\n
                                                        "},{"location":"admin/kpanda/clusters/integrate-rancher-cluster.html#rancher-rke-sa-kubeconfig","title":"\u6b65\u9aa4\u4e8c\uff1a\u5728\u672c\u5730\u4f7f\u7528 rancher-rke SA \u7684\u8ba4\u8bc1\u4fe1\u606f\u66f4\u65b0 kubeconfig \u6587\u4ef6","text":"

                                                        \u5728\u4efb\u610f\u4e00\u53f0\u5b89\u88c5\u4e86 kubelet \u7684\u672c\u5730\u8282\u70b9\u6267\u884c\u5982\u4e0b\u64cd\u4f5c\uff1a

                                                        1. \u914d\u7f6e kubelet token\uff1a

                                                          kubectl config set-credentials rancher-rke --token=`rancher-rke-secret` \u91cc\u9762\u7684 token \u4fe1\u606f\n

                                                          \u4f8b\u5982\uff1a

                                                          kubectl config set-credentials eks-admin --token=eyJhbGciOiJSUzI1NiIsImtpZCI6IjUtNE9nUWZLRzVpbEJORkZaNmtCQXhqVzRsZHU4MHhHcDBfb0VCaUo0V1kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJyYW5jaGVyLXJrZS1zZWNyZXQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoicmFuY2hlci1ya2UiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkODNkZjVkOS1iZDdkLTQ4OGQtYTA0Ni1iNzQwNjE4YTAxNzQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06cmFuY2hlci1ya2UifQ.VNsMtPEFOdDDeGt_8VHblcMRvjOwPXMM-79o9UooHx6q-VkHOcIOp3FOT2hnEdNnIsyODZVKCpEdCgyozX-3y5x2cZSZpocnkMcBbQm-qfTyUcUhAY7N5gcYUtHUhvRAsNWJcsDCn6d96gT_qo-ddo_cT8Ri39Lc123FDYOnYG-YGFKSgRQVy7Vyv34HIajZCCjZzy7i--eE_7o4DXeTjNqAFMFstUxxHBOXI3Rdn1zKQKqh5Jhg4ES7X-edSviSUfJUX-QV_LlAw5DuAyGPH7bDH4QaQ5k-p6cIctmpWZE-9wRDlKA4LYRblKE7MJcI6OmM4ldlMM0Jc8N-gCtl4w\n
                                                        2. \u914d\u7f6e kubelet APIServer \u4fe1\u606f\uff1a

                                                          kubectl config set-cluster {\u96c6\u7fa4\u540d} --insecure-skip-tls-verify=true --server={APIServer}\n
                                                          • {\u96c6\u7fa4\u540d} \uff1a\u6307 rancher \u96c6\u7fa4\u7684\u540d\u79f0\u3002
                                                          • {APIServer} \uff1a\u6307\u96c6\u7fa4\u7684\u8bbf\u95ee\u5730\u5740\uff0c\u4e00\u822c\u4e3a\u96c6\u7fa4\u63a7\u5236\u8282\u70b9 IP + 6443 \u7aef\u53e3\uff0c\u5982 https://10.X.X.X:6443

                                                          \u4f8b\u5982\uff1a

                                                          kubectl config set-cluster rancher-rke --insecure-skip-tls-verify=true --server=https://10.X.X.X:6443\n
                                                        3. \u914d\u7f6e kubelet \u4e0a\u4e0b\u6587\u4fe1\u606f\uff1a

                                                          kubectl config set-context {\u4e0a\u4e0b\u6587\u540d\u79f0} --cluster={\u96c6\u7fa4\u540d} --user={SA \u7528\u6237\u540d}\n

                                                          \u4f8b\u5982\uff1a

                                                          kubectl config set-context rancher-rke-context --cluster=rancher-rke --user=rancher-rke\n
                                                        4. \u5728 kubelet \u4e2d\u6307\u5b9a\u6211\u4eec\u521a\u521a\u65b0\u5efa\u7684\u4e0a\u4e0b\u6587 rancher-rke-context \uff1a

                                                          kubectl config use-context rancher-rke-context\n
                                                        5. \u83b7\u53d6\u4e0a\u4e0b\u6587 rancher-rke-context \u4e2d\u7684 kubeconfig \u4fe1\u606f\u3002

                                                          kubectl config view --minify --flatten --raw\n

                                                          \u9884\u671f\u8f93\u51fa\uff1a

                                                          apiVersion: v1\n  clusters:\n  - cluster:\n    insecure-skip-tls-verify: true\n    server: https://77C321BCF072682C70C8665ED4BFA10D.gr7.ap-southeast-1.eks.amazonaws.com\n  name: joincluster\n  contexts:\n  - context:\n    cluster: joincluster\n    user: eks-admin\n  name: ekscontext\n  current-context: ekscontext\n  kind: Config\n  preferences: {}\n  users:\n  - name: eks-admin\n  user:\n    token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImcxTjJwNkktWm5IbmRJU1RFRExvdWY1TGFWVUtGQ3VIejFtNlFQcUNFalEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2V\n
                                                        "},{"location":"admin/kpanda/clusters/integrate-rancher-cluster.html#ai","title":"\u6b65\u9aa4\u4e09\uff1a\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u754c\u9762\u63a5\u5165\u96c6\u7fa4","text":"

                                                        \u4f7f\u7528\u521a\u521a\u83b7\u53d6\u7684 kubeconfig \u6587\u4ef6\uff0c\u53c2\u8003\u63a5\u5165\u96c6\u7fa4\u6587\u6863\uff0c\u5c06 rancher \u96c6\u7fa4\u63a5\u5165\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u3002

                                                        "},{"location":"admin/kpanda/clusters/k8s-cert.html","title":"Kubernetes \u96c6\u7fa4\u8bc1\u4e66\u66f4\u65b0","text":"

                                                        \u4e3a\u4fdd\u8bc1 Kubernetes \u5404\u7ec4\u4ef6\u4e4b\u95f4\u7684\u901a\u4fe1\u5b89\u5168\uff0c\u7ec4\u4ef6\u4e4b\u95f4\u7684\u8c03\u7528\u4f1a\u8fdb\u884c TLS \u8eab\u4efd\u9a8c\u8bc1\uff0c\u6267\u884c\u9a8c\u8bc1\u64cd\u4f5c\u9700\u8981\u914d\u7f6e\u96c6\u7fa4 PKI \u8bc1\u4e66\u3002

                                                        \u96c6\u7fa4\u8bc1\u4e66\u6709\u6548\u671f\u4e3a1\u5e74\uff0c\u4e3a\u907f\u514d\u8bc1\u4e66\u8fc7\u671f\u5bfc\u81f4\u4e1a\u52a1\u65e0\u6cd5\u4f7f\u7528\uff0c\u8bf7\u53ca\u65f6\u66f4\u65b0\u8bc1\u4e66\u3002

                                                        \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u624b\u52a8\u8fdb\u884c\u8bc1\u4e66\u66f4\u65b0\u3002

                                                        "},{"location":"admin/kpanda/clusters/k8s-cert.html#_1","title":"\u68c0\u67e5\u8bc1\u4e66\u662f\u5426\u8fc7\u671f","text":"

                                                        \u60a8\u53ef\u4ee5\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b\u8bc1\u4e66\u662f\u5426\u8fc7\u671f\uff1a

                                                        kubeadm certs check-expiration\n

                                                        \u8f93\u51fa\u7c7b\u4f3c\u4e8e\u4ee5\u4e0b\u5185\u5bb9\uff1a

                                                        CERTIFICATE                EXPIRES                  RESIDUAL TIME   CERTIFICATE AUTHORITY   EXTERNALLY MANAGED\nadmin.conf                 Dec 14, 2024 07:26 UTC   204d                                    no      \napiserver                  Dec 14, 2024 07:26 UTC   204d            ca                      no      \napiserver-etcd-client      Dec 14, 2024 07:26 UTC   204d            etcd-ca                 no      \napiserver-kubelet-client   Dec 14, 2024 07:26 UTC   204d            ca                      no      \ncontroller-manager.conf    Dec 14, 2024 07:26 UTC   204d                                    no      \netcd-healthcheck-client    Dec 14, 2024 07:26 UTC   204d            etcd-ca                 no      \netcd-peer                  Dec 14, 2024 07:26 UTC   204d            etcd-ca                 no      \netcd-server                Dec 14, 2024 07:26 UTC   204d            etcd-ca                 no      \nfront-proxy-client         Dec 14, 2024 07:26 UTC   204d            front-proxy-ca          no      \nscheduler.conf             Dec 14, 2024 07:26 UTC   204d                                    no      \n\nCERTIFICATE AUTHORITY   EXPIRES                  RESIDUAL TIME   EXTERNALLY MANAGED\nca                      Dec 12, 2033 07:26 UTC   9y              no      \netcd-ca                 Dec 12, 2033 07:26 UTC   9y              no      \nfront-proxy-ca          Dec 12, 2033 07:26 UTC   9y              no      \n
                                                        "},{"location":"admin/kpanda/clusters/k8s-cert.html#_2","title":"\u624b\u52a8\u66f4\u65b0\u8bc1\u4e66","text":"

                                                        \u60a8\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u624b\u52a8\u66f4\u65b0\u8bc1\u4e66\uff0c\u53ea\u9700\u5e26\u4e0a\u5408\u9002\u7684\u547d\u4ee4\u884c\u9009\u9879\u3002\u66f4\u65b0\u8bc1\u4e66\u524d\u8bf7\u5148\u5907\u4efd\u5f53\u524d\u8bc1\u4e66\u3002

                                                        \u66f4\u65b0\u6307\u5b9a\u8bc1\u4e66\uff1a

                                                        kubeadm certs renew\n

                                                        \u66f4\u65b0\u5168\u90e8\u8bc1\u4e66\uff1a

                                                        kubeadm certs renew all\n

                                                        \u66f4\u65b0\u540e\u7684\u8bc1\u4e66\u53ef\u4ee5\u5728 /etc/kubernetes/pki \u76ee\u5f55\u4e0b\u67e5\u770b\uff0c\u6709\u6548\u671f\u5ef6\u7eed 1 \u5e74\u3002 \u4ee5\u4e0b\u5bf9\u5e94\u7684\u51e0\u4e2a\u914d\u7f6e\u6587\u4ef6\u4e5f\u4f1a\u540c\u6b65\u66f4\u65b0\uff1a

                                                        • /etc/kubernetes/admin.conf
                                                        • /etc/kubernetes/controller-manager.conf
                                                        • /etc/kubernetes/scheduler.conf

                                                        Note

                                                        • \u5982\u679c\u60a8\u90e8\u7f72\u7684\u662f\u4e00\u4e2a\u9ad8\u53ef\u7528\u96c6\u7fa4\uff0c\u8fd9\u4e2a\u547d\u4ee4\u9700\u8981\u5728\u6240\u6709\u63a7\u5236\u8282\u70b9\u4e0a\u6267\u884c\u3002
                                                        • \u6b64\u547d\u4ee4\u7528 CA\uff08\u6216\u8005 front-proxy-CA \uff09\u8bc1\u4e66\u548c\u5b58\u50a8\u5728 /etc/kubernetes/pki \u4e2d\u7684\u5bc6\u94a5\u6267\u884c\u66f4\u65b0\u3002
                                                        "},{"location":"admin/kpanda/clusters/k8s-cert.html#_3","title":"\u91cd\u542f\u670d\u52a1","text":"

                                                        \u6267\u884c\u66f4\u65b0\u64cd\u4f5c\u4e4b\u540e\uff0c\u4f60\u9700\u8981\u91cd\u542f\u63a7\u5236\u9762 Pod\u3002\u56e0\u4e3a\u52a8\u6001\u8bc1\u4e66\u91cd\u8f7d\u76ee\u524d\u8fd8\u4e0d\u88ab\u6240\u6709\u7ec4\u4ef6\u548c\u8bc1\u4e66\u652f\u6301\uff0c\u6240\u6709\u8fd9\u9879\u64cd\u4f5c\u662f\u5fc5\u987b\u7684\u3002

                                                        \u9759\u6001 Pod \u662f\u88ab\u672c\u5730 kubelet \u800c\u4e0d\u662f API \u670d\u52a1\u5668\u7ba1\u7406\uff0c\u6240\u4ee5 kubectl \u4e0d\u80fd\u7528\u6765\u5220\u9664\u6216\u91cd\u542f\u4ed6\u4eec\u3002

                                                        \u8981\u91cd\u542f\u9759\u6001 Pod\uff0c\u4f60\u53ef\u4ee5\u4e34\u65f6\u5c06\u6e05\u5355\u6587\u4ef6\u4ece /etc/kubernetes/manifests/ \u79fb\u9664\u5e76\u7b49\u5f85 20 \u79d2\u3002 \u53c2\u8003 KubeletConfiguration \u7ed3\u6784\u4e2d\u7684 fileCheckFrequency \u503c\u3002

                                                        \u5982\u679c Pod \u4e0d\u5728\u6e05\u5355\u76ee\u5f55\u91cc\uff0ckubelet \u5c06\u4f1a\u7ec8\u6b62\u5b83\u3002 \u5728\u53e6\u4e00\u4e2a fileCheckFrequency \u5468\u671f\u4e4b\u540e\u4f60\u53ef\u4ee5\u5c06\u6587\u4ef6\u79fb\u56de\u53bb\uff0ckubelet \u53ef\u4ee5\u5b8c\u6210 Pod \u7684\u91cd\u5efa\uff0c\u800c\u7ec4\u4ef6\u7684\u8bc1\u4e66\u66f4\u65b0\u64cd\u4f5c\u4e5f\u5f97\u4ee5\u5b8c\u6210\u3002

                                                        mv ./manifests/* ./temp/\nmv ./temp/* ./manifests/\n

                                                        Note

                                                        \u5982\u679c\u5bb9\u5668\u670d\u52a1\u4f7f\u7528\u7684\u662f Docker\uff0c\u4e3a\u4e86\u8ba9\u8bc1\u4e66\u751f\u6548\uff0c\u53ef\u4ee5\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u5bf9\u6d89\u53ca\u5230\u8bc1\u4e66\u4f7f\u7528\u7684\u51e0\u4e2a\u670d\u52a1\u8fdb\u884c\u91cd\u542f\uff1a

                                                        docker ps | grep -E 'k8s_kube-apiserver|k8s_kube-controller-manager|k8s_kube-scheduler|k8s_etcd_etcd' | awk -F ' ' '{print $1}' | xargs docker restart\n
                                                        "},{"location":"admin/kpanda/clusters/k8s-cert.html#kubeconfig","title":"\u66f4\u65b0 KubeConfig","text":"

                                                        \u6784\u5efa\u96c6\u7fa4\u65f6\u901a\u5e38\u4f1a\u5c06 admin.conf \u8bc1\u4e66\u590d\u5236\u5230 $HOME/.kube/config \u4e2d\uff0c\u4e3a\u4e86\u5728\u66f4\u65b0 admin.conf \u540e\u66f4\u65b0 $HOME/.kube/config \u7684\u5185\u5bb9\uff0c \u5fc5\u987b\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                        sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\nsudo chown $(id -u):$(id -g) $HOME/.kube/config\n
                                                        "},{"location":"admin/kpanda/clusters/k8s-cert.html#kubelet","title":"\u4e3a kubelet \u914d\u7f6e\u8bc1\u4e66\u8f6e\u6362","text":"

                                                        \u5b8c\u6210\u4ee5\u4e0a\u64cd\u4f5c\u540e\uff0c\u57fa\u672c\u5b8c\u6210\u4e86\u96c6\u7fa4\u6240\u6709\u8bc1\u4e66\u7684\u66f4\u65b0\uff0c\u4f46\u4e0d\u5305\u62ec kubelet\u3002

                                                        \u56e0\u4e3a kubernetes \u5305\u542b\u7279\u6027 kubelet \u8bc1\u4e66\u8f6e\u6362\uff0c \u5728\u5f53\u524d\u8bc1\u4e66\u5373\u5c06\u8fc7\u671f\u65f6\uff0c \u5c06\u81ea\u52a8\u751f\u6210\u65b0\u7684\u79d8\u94a5\uff0c\u5e76\u4ece Kubernetes API \u7533\u8bf7\u65b0\u7684\u8bc1\u4e66\u3002 \u4e00\u65e6\u65b0\u7684\u8bc1\u4e66\u53ef\u7528\uff0c\u5b83\u5c06\u88ab\u7528\u4e8e\u4e0e Kubernetes API \u95f4\u7684\u8fde\u63a5\u8ba4\u8bc1\u3002

                                                        Note

                                                        \u6b64\u7279\u6027\u9002\u7528\u4e8e Kubernetes 1.8.0 \u6216\u66f4\u9ad8\u7684\u7248\u672c\u3002

                                                        \u542f\u7528\u5ba2\u6237\u7aef\u8bc1\u4e66\u8f6e\u6362\uff0c\u914d\u7f6e\u53c2\u6570\u5982\u4e0b\uff1a

                                                        • kubelet \u8fdb\u7a0b\u63a5\u6536 --rotate-certificates \u53c2\u6570\uff0c\u8be5\u53c2\u6570\u51b3\u5b9a kubelet \u5728\u5f53\u524d\u4f7f\u7528\u7684 \u8bc1\u4e66\u5373\u5c06\u5230\u671f\u65f6\uff0c\u662f\u5426\u4f1a\u81ea\u52a8\u7533\u8bf7\u65b0\u7684\u8bc1\u4e66\u3002

                                                        • kube-controller-manager \u8fdb\u7a0b\u63a5\u6536 --cluster-signing-duration \u53c2\u6570 \uff08\u5728 1.19 \u7248\u672c\u4e4b\u524d\u4e3a --experimental-cluster-signing-duration\uff09\uff0c\u7528\u6765\u63a7\u5236\u7b7e\u53d1\u8bc1\u4e66\u7684\u6709\u6548\u671f\u9650\u3002

                                                        \u66f4\u591a\u8be6\u60c5\u53c2\u8003\u4e3a kubelet \u914d\u7f6e\u8bc1\u4e66\u8f6e\u6362\u3002

                                                        "},{"location":"admin/kpanda/clusters/k8s-cert.html#_4","title":"\u81ea\u52a8\u66f4\u65b0\u8bc1\u4e66","text":"

                                                        \u4e3a\u4e86\u66f4\u9ad8\u6548\u4fbf\u6377\u5904\u7406\u5df2\u8fc7\u671f\u6216\u8005\u5373\u5c06\u8fc7\u671f\u7684 kubernetes \u96c6\u7fa4\u8bc1\u4e66\uff0c\u53ef\u53c2\u8003 k8s \u7248\u672c\u96c6\u7fa4\u8bc1\u4e66\u66f4\u65b0\u3002

                                                        "},{"location":"admin/kpanda/clusters/runtime.html","title":"\u5982\u4f55\u9009\u62e9\u5bb9\u5668\u8fd0\u884c\u65f6","text":"

                                                        \u5bb9\u5668\u8fd0\u884c\u65f6\u662f kubernetes \u4e2d\u5bf9\u5bb9\u5668\u548c\u5bb9\u5668\u955c\u50cf\u751f\u547d\u5468\u671f\u8fdb\u884c\u7ba1\u7406\u7684\u91cd\u8981\u7ec4\u4ef6\u3002 kubernetes \u5728 1.19 \u7248\u672c\u4e2d\u5c06 containerd \u8bbe\u4e3a\u9ed8\u8ba4\u7684\u5bb9\u5668\u8fd0\u884c\u65f6\uff0c\u5e76\u5728 1.24 \u7248\u672c\u4e2d\u79fb\u9664\u4e86 Dockershim \u7ec4\u4ef6\u7684\u652f\u6301\u3002

                                                        \u56e0\u6b64\u76f8\u8f83\u4e8e Docker \u8fd0\u884c\u65f6\uff0c\u6211\u4eec\u66f4\u52a0 \u63a8\u8350\u60a8\u4f7f\u7528\u8f7b\u91cf\u7684 containerd \u4f5c\u4e3a\u60a8\u7684\u5bb9\u5668\u8fd0\u884c\u65f6\uff0c\u56e0\u4e3a\u8fd9\u5df2\u7ecf\u6210\u4e3a\u5f53\u524d\u4e3b\u6d41\u7684\u8fd0\u884c\u65f6\u9009\u62e9\u3002

                                                        \u9664\u6b64\u4e4b\u5916\uff0c\u4e00\u4e9b\u64cd\u4f5c\u7cfb\u7edf\u53d1\u884c\u5382\u5546\u5bf9 Docker \u8fd0\u884c\u65f6\u7684\u517c\u5bb9\u4e5f\u4e0d\u591f\u53cb\u597d\uff0c\u4e0d\u540c\u64cd\u4f5c\u7cfb\u7edf\u5bf9\u8fd0\u884c\u65f6\u7684\u652f\u6301\u5982\u4e0b\u8868\uff1a

                                                        "},{"location":"admin/kpanda/clusters/runtime.html#_2","title":"\u4e0d\u540c\u64cd\u4f5c\u7cfb\u7edf\u548c\u63a8\u8350\u7684\u8fd0\u884c\u65f6\u7248\u672c\u5bf9\u5e94\u5173\u7cfb","text":"\u64cd\u4f5c\u7cfb\u7edf \u63a8\u8350\u7684 containerd \u7248\u672c \u63a8\u8350\u7684 Docker \u7248\u672c CentOS 1.7.5 20.10 RedHatOS 1.7.5 20.10 KylinOS 1.7.5 19.03\uff08\u4ec5 ARM \u67b6\u6784\u652f\u6301 \uff0c\u5728 x86 \u67b6\u6784\u4e0b\u4e0d\u652f\u6301\u4f7f\u7528 Docker \u4f5c\u4e3a\u8fd0\u884c\u65f6\uff09

                                                        \u66f4\u591a\u652f\u6301\u7684\u8fd0\u884c\u65f6\u7248\u672c\u4fe1\u606f\uff0c\u8bf7\u53c2\u8003 RedHatOS \u652f\u6301\u7684\u8fd0\u884c\u65f6\u7248\u672c \u548c KylinOS \u652f\u6301\u7684\u8fd0\u884c\u65f6\u7248\u672c

                                                        Note

                                                        \u5728\u79bb\u7ebf\u5b89\u88c5\u6a21\u5f0f\u4e0b\uff0c\u9700\u8981\u63d0\u524d\u51c6\u5907\u76f8\u5173\u64cd\u4f5c\u7cfb\u7edf\u7684\u8fd0\u884c\u65f6\u79bb\u7ebf\u5305\u3002

                                                        "},{"location":"admin/kpanda/clusters/upgrade-cluster.html","title":"\u96c6\u7fa4\u5347\u7ea7","text":"

                                                        Kubernetes \u793e\u533a\u6bcf\u4e2a\u5b63\u5ea6\u90fd\u4f1a\u53d1\u5e03\u4e00\u6b21\u5c0f\u7248\u672c\uff0c\u6bcf\u4e2a\u7248\u672c\u7684\u7ef4\u62a4\u5468\u671f\u5927\u6982\u53ea\u6709 9 \u4e2a\u6708\u3002 \u7248\u672c\u505c\u6b62\u7ef4\u62a4\u540e\u5c31\u4e0d\u4f1a\u518d\u66f4\u65b0\u4e00\u4e9b\u91cd\u5927\u6f0f\u6d1e\u6216\u5b89\u5168\u6f0f\u6d1e\u3002\u624b\u52a8\u5347\u7ea7\u96c6\u7fa4\u64cd\u4f5c\u8f83\u4e3a\u7e41\u7410\uff0c\u7ed9\u7ba1\u7406\u4eba\u5458\u5e26\u6765\u4e86\u6781\u5927\u7684\u5de5\u4f5c\u8d1f\u62c5\u3002

                                                        \u672c\u8282\u5c06\u4ecb\u7ecd\u5982\u4f55\u5728\u901a\u8fc7 Web UI \u754c\u9762\u4e00\u952e\u5f0f\u5728\u7ebf\u5347\u7ea7\u5de5\u4f5c\u96c6\u7fa4 Kubernetes \u7248\u672c\uff0c \u5982\u9700\u79bb\u7ebf\u5347\u7ea7\u5de5\u4f5c\u96c6\u7fa4\u7684 kubernetes \u7248\u672c\uff0c\u8bf7\u53c2\u9605\u5de5\u4f5c\u96c6\u7fa4\u79bb\u7ebf\u5347\u7ea7\u6307\u5357\u8fdb\u884c\u5347\u7ea7\u3002

                                                        Danger

                                                        \u7248\u672c\u5347\u7ea7\u540e\u5c06\u65e0\u6cd5\u56de\u9000\u5230\u4e4b\u524d\u7684\u7248\u672c\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                                                        Note

                                                        • Kubernetes \u7248\u672c\u4ee5 x.y.z \u8868\u793a\uff0c\u5176\u4e2d x \u662f\u4e3b\u8981\u7248\u672c\uff0c y \u662f\u6b21\u8981\u7248\u672c\uff0c z \u662f\u8865\u4e01\u7248\u672c\u3002
                                                        • \u4e0d\u5141\u8bb8\u8de8\u6b21\u8981\u7248\u672c\u5bf9\u96c6\u7fa4\u8fdb\u884c\u5347\u7ea7\uff0c\u4f8b\u5982\u4e0d\u80fd\u4ece 1.23 \u76f4\u63a5\u5347\u7ea7\u5230 1.25\u3002
                                                        • \u63a5\u5165\u96c6\u7fa4 \u4e0d\u652f\u6301\u7248\u672c\u5347\u7ea7\u3002\u5982\u679c\u5de6\u4fa7\u5bfc\u822a\u680f\u6ca1\u6709 \u96c6\u7fa4\u5347\u7ea7 \uff0c\u8bf7\u68c0\u67e5\u8be5\u96c6\u7fa4\u662f\u5426\u4e3a \u63a5\u5165\u96c6\u7fa4 \u3002
                                                        • \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u53ea\u80fd\u901a\u8fc7\u7ec8\u7aef\u8fdb\u884c\u5347\u7ea7\u3002
                                                        • \u5347\u7ea7\u5de5\u4f5c\u96c6\u7fa4\u65f6\uff0c\u8be5\u5de5\u4f5c\u96c6\u7fa4\u7684\u7ba1\u7406\u96c6\u7fa4\u5e94\u8be5\u5df2\u7ecf\u63a5\u5165\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\uff0c\u5e76\u4e14\u5904\u4e8e\u6b63\u5e38\u8fd0\u884c\u4e2d\u3002
                                                        • \u5982\u679c\u9700\u8981\u4fee\u6539\u96c6\u7fa4\u53c2\u6570\uff0c\u53ef\u4ee5\u901a\u8fc7\u5347\u7ea7\u76f8\u540c\u7248\u672c\u7684\u65b9\u5f0f\u5b9e\u73b0\uff0c\u5177\u4f53\u64cd\u4f5c\u53c2\u8003\u4e0b\u6587\u3002
                                                        1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                        2. \u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u96c6\u7fa4\u8fd0\u7ef4 -> \u96c6\u7fa4\u5347\u7ea7 \uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u70b9\u51fb \u7248\u672c\u5347\u7ea7 \u3002

                                                        3. \u9009\u62e9\u53ef\u5347\u7ea7\u7684\u7248\u672c\uff0c\u8f93\u5165\u96c6\u7fa4\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\u3002

                                                          Note

                                                          \u5982\u679c\u60a8\u662f\u60f3\u901a\u8fc7\u5347\u7ea7\u65b9\u5f0f\u6765\u4fee\u6539\u96c6\u7fa4\u53c2\u6570\uff0c\u8bf7\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff1a

                                                          1. \u627e\u5230\u96c6\u7fa4\u5bf9\u5e94\u7684 ConfigMap\uff0c\u60a8\u53ef\u4ee5\u767b\u5f55\u63a7\u5236\u8282\u70b9\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u627e\u5230 varsConfRef \u4e2d\u7684 ConfigMap \u540d\u79f0\u3002

                                                            kubectl get cluster.kubean.io <clustername> -o yaml\n
                                                          2. \u6839\u636e\u9700\u8981\uff0c\u4fee\u6539 ConfigMap \u4e2d\u7684\u53c2\u6570\u4fe1\u606f\u3002

                                                          3. \u5728\u6b64\u5904\u9009\u62e9\u76f8\u540c\u7248\u672c\u8fdb\u884c\u5347\u7ea7\u64cd\u4f5c\uff0c\u5347\u7ea7\u5b8c\u6210\u5373\u53ef\u6210\u529f\u66f4\u65b0\u5bf9\u5e94\u7684\u96c6\u7fa4\u53c2\u6570\u3002

                                                        4. \u70b9\u51fb \u786e\u5b9a \u540e\uff0c\u53ef\u4ee5\u770b\u5230\u96c6\u7fa4\u7684\u5347\u7ea7\u8fdb\u5ea6\u3002

                                                        5. \u96c6\u7fa4\u5347\u7ea7\u9884\u8ba1\u9700\u8981 30 \u5206\u949f\uff0c\u53ef\u4ee5\u70b9\u51fb \u5b9e\u65f6\u65e5\u5fd7 \u6309\u94ae\u67e5\u770b\u96c6\u7fa4\u5347\u7ea7\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/configmap-hot-loading.html","title":"configmap/secret \u70ed\u52a0\u8f7d","text":"

                                                        configmap/secret \u70ed\u52a0\u8f7d\u662f\u6307\u5c06 configmap/secret \u4f5c\u4e3a\u6570\u636e\u5377\u6302\u8f7d\u5728\u5bb9\u5668\u4e2d\u6302\u8f7d\u65f6\uff0c\u5f53\u914d\u7f6e\u53d1\u751f\u6539\u53d8\u65f6\uff0c\u5bb9\u5668\u5c06\u81ea\u52a8\u8bfb\u53d6 configmap/secret \u66f4\u65b0\u540e\u7684\u914d\u7f6e\uff0c\u800c\u65e0\u9700\u91cd\u542f Pod\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/configmap-hot-loading.html#_1","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                        1. \u53c2\u8003\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d - \u5bb9\u5668\u914d\u7f6e\uff0c\u914d\u7f6e\u5bb9\u5668\u6570\u636e\u5b58\u50a8\uff0c\u9009\u62e9 Configmap \u3001 Configmap Key \u3001 Secret \u3001 Secret Key \u4f5c\u4e3a\u6570\u636e\u5377\u6302\u8f7d\u81f3\u5bb9\u5668\u3002

                                                          Note

                                                          \u4f7f\u7528\u5b50\u8def\u5f84\uff08SubPath\uff09\u65b9\u5f0f\u6302\u8f7d\u7684\u914d\u7f6e\u6587\u4ef6\u4e0d\u652f\u6301\u70ed\u52a0\u8f7d\u3002

                                                        2. \u8fdb\u5165\u3010\u914d\u7f6e\u4e0e\u5bc6\u94a5\u3011\u9875\u9762\uff0c\u8fdb\u5165\u914d\u7f6e\u9879\u8be6\u60c5\u9875\u9762\uff0c\u5728\u3010\u5173\u8054\u8d44\u6e90\u3011\u4e2d\u627e\u5230\u5bf9\u5e94\u7684 container \u8d44\u6e90\uff0c\u70b9\u51fb \u7acb\u5373\u52a0\u8f7d \u6309\u94ae\uff0c\u8fdb\u5165\u914d\u7f6e\u70ed\u52a0\u8f7d\u9875\u9762\u3002

                                                          Note

                                                          \u5982\u679c\u60a8\u7684\u5e94\u7528\u652f\u6301\u81ea\u52a8\u8bfb\u53d6 configmap/secret \u66f4\u65b0\u540e\u7684\u914d\u7f6e\uff0c\u5219\u65e0\u9700\u624b\u52a8\u6267\u884c\u70ed\u52a0\u8f7d\u64cd\u4f5c\u3002

                                                        3. \u5728\u70ed\u52a0\u8f7d\u914d\u7f6e\u5f39\u7a97\u4e2d\uff0c\u8f93\u5165\u8fdb\u5165\u5bb9\u5668\u5185\u7684 \u6267\u884c\u547d\u4ee4 \u5e76\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u4ee5\u91cd\u8f7d\u914d\u7f6e\u3002\u4f8b\u5982\uff0c\u5728 nginx \u5bb9\u5668\u4e2d\uff0c\u4ee5 root \u7528\u6237\u6743\u9650\uff0c\u6267\u884c nginx -s reload \u547d\u4ee4\u6765\u91cd\u8f7d\u914d\u7f6e\u3002

                                                        4. \u5728\u754c\u9762\u5f39\u51fa\u7684 web \u7ec8\u7aef\u4e2d\u67e5\u770b\u5e94\u7528\u91cd\u8f7d\u60c5\u51b5\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/create-configmap.html","title":"\u521b\u5efa\u914d\u7f6e\u9879","text":"

                                                        \u914d\u7f6e\u9879\uff08ConfigMap\uff09\u4ee5\u952e\u503c\u5bf9\u7684\u5f62\u5f0f\u5b58\u50a8\u975e\u673a\u5bc6\u6027\u6570\u636e\uff0c\u5b9e\u73b0\u914d\u7f6e\u6570\u636e\u548c\u5e94\u7528\u4ee3\u7801\u76f8\u4e92\u89e3\u8026\u7684\u6548\u679c\u3002\u914d\u7f6e\u9879\u53ef\u7528\u4f5c\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u3001\u547d\u4ee4\u884c\u53c2\u6570\u6216\u8005\u5b58\u50a8\u5377\u4e2d\u7684\u914d\u7f6e\u6587\u4ef6\u3002

                                                        Note

                                                        • \u5728\u914d\u7f6e\u9879\u4e2d\u4fdd\u5b58\u7684\u6570\u636e\u4e0d\u53ef\u8d85\u8fc7 1 MiB\u3002\u5982\u679c\u9700\u8981\u5b58\u50a8\u4f53\u79ef\u66f4\u5927\u7684\u6570\u636e\uff0c\u5efa\u8bae\u6302\u8f7d\u5b58\u50a8\u5377\u6216\u8005\u4f7f\u7528\u72ec\u7acb\u7684\u6570\u636e\u5e93\u6216\u8005\u6587\u4ef6\u670d\u52a1\u3002

                                                        • \u914d\u7f6e\u9879\u4e0d\u63d0\u4f9b\u4fdd\u5bc6\u6216\u8005\u52a0\u5bc6\u529f\u80fd\u3002\u5982\u679c\u8981\u5b58\u50a8\u52a0\u5bc6\u6570\u636e\uff0c\u5efa\u8bae\u4f7f\u7528\u5bc6\u94a5\uff0c\u6216\u8005\u5176\u4ed6\u7b2c\u4e09\u65b9\u5de5\u5177\u6765\u4fdd\u8bc1\u6570\u636e\u7684\u79c1\u5bc6\u6027\u3002

                                                        \u652f\u6301\u4e24\u79cd\u521b\u5efa\u65b9\u5f0f\uff1a

                                                        • \u56fe\u5f62\u5316\u8868\u5355\u521b\u5efa
                                                        • YAML \u521b\u5efa
                                                        "},{"location":"admin/kpanda/configmaps-secrets/create-configmap.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                        • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762

                                                        • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a NS Editor \u89d2\u8272 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/create-configmap.html#_3","title":"\u56fe\u5f62\u5316\u8868\u5355\u521b\u5efa","text":"
                                                        1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                        2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u914d\u7f6e\u9879 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u521b\u5efa\u914d\u7f6e\u9879 \u6309\u94ae\u3002

                                                        3. \u5728 \u521b\u5efa\u914d\u7f6e\u9879 \u9875\u9762\u4e2d\u586b\u5199\u914d\u7f6e\u4fe1\u606f\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                          Note

                                                          \u70b9\u51fb \u4e0a\u4f20\u6587\u4ef6 \u53ef\u4ee5\u4ece\u672c\u5730\u5bfc\u5165\u5df2\u6709\u7684\u6587\u4ef6\uff0c\u5feb\u901f\u521b\u5efa\u914d\u7f6e\u9879\u3002

                                                        4. \u521b\u5efa\u5b8c\u6210\u540e\u5728\u914d\u7f6e\u9879\u53f3\u4fa7\u70b9\u51fb\u66f4\u591a\u53ef\u4ee5\uff0c\u53ef\u4ee5\u7f16\u8f91 YAML\u3001\u66f4\u65b0\u3001\u5bfc\u51fa\u3001\u5220\u9664\u7b49\u64cd\u4f5c\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/create-configmap.html#yaml","title":"YAML \u521b\u5efa","text":"
                                                        1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                        2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u914d\u7f6e\u9879 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                        3. \u586b\u5199\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684\u914d\u7f6e\u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u3002

                                                          Note

                                                          • \u70b9\u51fb \u5bfc\u5165 \u53ef\u4ee5\u4ece\u672c\u5730\u5bfc\u5165\u5df2\u6709\u7684\u6587\u4ef6\uff0c\u5feb\u901f\u521b\u5efa\u914d\u7f6e\u9879\u3002
                                                          • \u586b\u5199\u6570\u636e\u4e4b\u540e\u70b9\u51fb \u4e0b\u8f7d \u53ef\u4ee5\u5c06\u914d\u7f6e\u6587\u4ef6\u4fdd\u5b58\u5728\u672c\u5730\u3002

                                                        4. \u521b\u5efa\u5b8c\u6210\u540e\u5728\u914d\u7f6e\u9879\u53f3\u4fa7\u70b9\u51fb\u66f4\u591a\u53ef\u4ee5\uff0c\u53ef\u4ee5\u7f16\u8f91 YAML\u3001\u66f4\u65b0\u3001\u5bfc\u51fa\u3001\u5220\u9664\u7b49\u64cd\u4f5c\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/create-configmap.html#yaml_1","title":"\u914d\u7f6e\u9879 YAML \u793a\u4f8b","text":"
                                                        ```yaml\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: kube-root-ca.crt\n  namespace: default\n  annotations:\ndata:\n  version: '1.0'\n```\n

                                                        \u4e0b\u4e00\u6b65\uff1a\u4f7f\u7528\u914d\u7f6e\u9879

                                                        "},{"location":"admin/kpanda/configmaps-secrets/create-secret.html","title":"\u521b\u5efa\u5bc6\u94a5","text":"

                                                        \u5bc6\u94a5\u662f\u4e00\u79cd\u7528\u4e8e\u5b58\u50a8\u548c\u7ba1\u7406\u5bc6\u7801\u3001OAuth \u4ee4\u724c\u3001SSH\u3001TLS \u51ed\u636e\u7b49\u654f\u611f\u4fe1\u606f\u7684\u8d44\u6e90\u5bf9\u8c61\u3002\u4f7f\u7528\u5bc6\u94a5\u610f\u5473\u7740\u60a8\u4e0d\u9700\u8981\u5728\u5e94\u7528\u7a0b\u5e8f\u4ee3\u7801\u4e2d\u5305\u542b\u654f\u611f\u7684\u673a\u5bc6\u6570\u636e\u3002

                                                        \u5bc6\u94a5\u4f7f\u7528\u573a\u666f\uff1a

                                                        • \u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u4f7f\u7528\uff0c\u63d0\u4f9b\u5bb9\u5668\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u6240\u9700\u7684\u4e00\u4e9b\u5fc5\u8981\u4fe1\u606f\u3002
                                                        • \u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a Pod \u7684\u6570\u636e\u5377\u3002
                                                        • \u5728 kubelet \u62c9\u53d6\u5bb9\u5668\u955c\u50cf\u65f6\u4f5c\u4e3a\u955c\u50cf\u4ed3\u5e93\u7684\u8eab\u4efd\u8ba4\u8bc1\u51ed\u8bc1\u3002

                                                        \u652f\u6301\u4e24\u79cd\u521b\u5efa\u65b9\u5f0f\uff1a

                                                        • \u56fe\u5f62\u5316\u8868\u5355\u521b\u5efa
                                                        • YAML \u521b\u5efa
                                                        "},{"location":"admin/kpanda/configmaps-secrets/create-secret.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                        • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762

                                                        • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a NS Editor \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/create-secret.html#_3","title":"\u56fe\u5f62\u5316\u8868\u5355\u521b\u5efa","text":"
                                                        1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                        2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u5bc6\u94a5 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u521b\u5efa\u5bc6\u94a5 \u6309\u94ae\u3002

                                                        3. \u5728 \u521b\u5efa\u5bc6\u94a5 \u9875\u9762\u4e2d\u586b\u5199\u914d\u7f6e\u4fe1\u606f\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                          \u586b\u5199\u914d\u7f6e\u65f6\u9700\u8981\u6ce8\u610f\uff1a

                                                          • \u5bc6\u94a5\u7684\u540d\u79f0\u5728\u540c\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u4e2d\u5fc5\u987b\u5177\u6709\u552f\u4e00\u6027
                                                          • \u5bc6\u94a5\u7c7b\u578b\uff1a
                                                            • \u9ed8\u8ba4\uff08Opaque\uff09\uff1aKubernetes \u9ed8\u8ba4\u7684\u5bc6\u94a5\u7c7b\u578b\uff0c\u652f\u6301\u7528\u6237\u5b9a\u4e49\u7684\u4efb\u610f\u6570\u636e\u3002
                                                            • TLS (kubernetes.io/tls)\uff1a\u7528\u4e8e TLS \u5ba2\u6237\u7aef\u6216\u8005\u670d\u52a1\u5668\u7aef\u6570\u636e\u8bbf\u95ee\u7684\u51ed\u8bc1\u3002
                                                            • \u955c\u50cf\u4ed3\u5e93\u4fe1\u606f (kubernetes.io/dockerconfigjson)\uff1a\u7528\u4e8e\u955c\u50cf\u4ed3\u5e93\u8bbf\u95ee\u7684\u51ed\u8bc1\u3002
                                                            • \u7528\u6237\u540d\u548c\u5bc6\u7801\uff08kubernetes.io/basic-auth\uff09\uff1a\u7528\u4e8e\u57fa\u672c\u8eab\u4efd\u8ba4\u8bc1\u7684\u51ed\u8bc1\u3002
                                                            • \u81ea\u5b9a\u4e49\uff1a\u7528\u6237\u6839\u636e\u4e1a\u52a1\u9700\u8981\u81ea\u5b9a\u4e49\u7684\u7c7b\u578b\u3002
                                                          • \u5bc6\u94a5\u6570\u636e\uff1a\u5bc6\u94a5\u6240\u5b58\u50a8\u7684\u6570\u636e\uff0c\u4e0d\u540c\u6570\u636e\u9700\u8981\u586b\u5199\u7684\u53c2\u6570\u6709\u6240\u4e0d\u540c
                                                            • \u5f53\u5bc6\u94a5\u7c7b\u578b\u4e3a\u9ed8\u8ba4\uff08Opaque\uff09/\u81ea\u5b9a\u4e49\uff1a\u53ef\u4ee5\u586b\u5165\u591a\u4e2a\u952e\u503c\u5bf9\u6570\u636e\u3002
                                                            • \u5f53\u5bc6\u94a5\u7c7b\u578b\u4e3a TLS (kubernetes.io/tls)\uff1a\u9700\u8981\u586b\u5165\u8bc1\u4e66\u51ed\u8bc1\u548c\u79c1\u94a5\u6570\u636e\u3002\u8bc1\u4e66\u662f\u81ea\u7b7e\u540d\u6216 CA \u7b7e\u540d\u8fc7\u7684\u51ed\u636e\uff0c\u7528\u6765\u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\u3002\u8bc1\u4e66\u8bf7\u6c42\u662f\u5bf9\u7b7e\u540d\u7684\u8bf7\u6c42\uff0c\u9700\u8981\u4f7f\u7528\u79c1\u94a5\u8fdb\u884c\u7b7e\u540d\u3002
                                                            • \u5f53\u5bc6\u94a5\u7c7b\u578b\u4e3a\u955c\u50cf\u4ed3\u5e93\u4fe1\u606f (kubernetes.io/dockerconfigjson)\uff1a\u9700\u8981\u586b\u5165\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u7684\u8d26\u53f7\u548c\u5bc6\u7801\u3002
                                                            • \u5f53\u5bc6\u94a5\u7c7b\u578b\u4e3a\u7528\u6237\u540d\u548c\u5bc6\u7801\uff08kubernetes.io/basic-auth\uff09\uff1a\u9700\u8981\u6307\u5b9a\u7528\u6237\u540d\u548c\u5bc6\u7801\u3002
                                                        "},{"location":"admin/kpanda/configmaps-secrets/create-secret.html#yaml","title":"YAML \u521b\u5efa","text":"
                                                        1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                        2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u5bc6\u94a5 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                        3. \u5728 YAML \u521b\u5efa \u9875\u9762\u4e2d\u586b\u5199 YAML \u914d\u7f6e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                          \u652f\u6301\u4ece\u672c\u5730\u5bfc\u5165 YAML \u6587\u4ef6\u6216\u5c06\u586b\u5199\u597d\u7684\u6587\u4ef6\u4e0b\u8f7d\u4fdd\u5b58\u5230\u672c\u5730\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/create-secret.html#yaml_1","title":"\u5bc6\u94a5 YAML \u793a\u4f8b","text":"
                                                        ```yaml\napiVersion: v1\nkind: Secret\nmetadata:\n  name: secretdemo\ntype: Opaque\ndata:\n  username: ******\n  password: ******\n```\n

                                                        \u4e0b\u4e00\u6b65\uff1a\u4f7f\u7528\u5bc6\u94a5

                                                        "},{"location":"admin/kpanda/configmaps-secrets/use-configmap.html","title":"\u4f7f\u7528\u914d\u7f6e\u9879","text":"

                                                        \u914d\u7f6e\u9879\uff08ConfigMap\uff09\u662f Kubernetes \u7684\u4e00\u79cd API \u5bf9\u8c61\uff0c\u7528\u6765\u5c06\u975e\u673a\u5bc6\u6027\u7684\u6570\u636e\u4fdd\u5b58\u5230\u952e\u503c\u5bf9\u4e2d\uff0c\u53ef\u4ee5\u5b58\u50a8\u5176\u4ed6\u5bf9\u8c61\u6240\u9700\u8981\u4f7f\u7528\u7684\u914d\u7f6e\u3002 \u4f7f\u7528\u65f6\uff0c \u5bb9\u5668\u53ef\u4ee5\u5c06\u5176\u7528\u4f5c\u73af\u5883\u53d8\u91cf\u3001\u547d\u4ee4\u884c\u53c2\u6570\u6216\u8005\u5b58\u50a8\u5377\u4e2d\u7684\u914d\u7f6e\u6587\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528\u914d\u7f6e\u9879\uff0c\u80fd\u591f\u5c06\u914d\u7f6e\u6570\u636e\u548c\u5e94\u7528\u7a0b\u5e8f\u4ee3\u7801\u5206\u5f00\uff0c\u4e3a\u5e94\u7528\u914d\u7f6e\u7684\u4fee\u6539\u63d0\u4f9b\u66f4\u52a0\u7075\u6d3b\u7684\u9014\u5f84\u3002

                                                        Note

                                                        \u914d\u7f6e\u9879\u5e76\u4e0d\u63d0\u4f9b\u4fdd\u5bc6\u6216\u8005\u52a0\u5bc6\u529f\u80fd\u3002\u5982\u679c\u8981\u5b58\u50a8\u7684\u6570\u636e\u662f\u673a\u5bc6\u7684\uff0c\u8bf7\u4f7f\u7528\u5bc6\u94a5\uff0c\u6216\u8005\u4f7f\u7528\u5176\u4ed6\u7b2c\u4e09\u65b9\u5de5\u5177\u6765\u4fdd\u8bc1\u6570\u636e\u7684\u79c1\u5bc6\u6027\uff0c\u800c\u4e0d\u662f\u7528\u914d\u7f6e\u9879\u3002 \u6b64\u5916\u5728\u5bb9\u5668\u91cc\u4f7f\u7528\u914d\u7f6e\u9879\u65f6\uff0c\u5bb9\u5668\u548c\u914d\u7f6e\u9879\u5fc5\u987b\u5904\u4e8e\u540c\u4e00\u96c6\u7fa4\u7684\u547d\u540d\u7a7a\u95f4\u4e2d\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/use-configmap.html#_2","title":"\u4f7f\u7528\u573a\u666f","text":"

                                                        \u60a8\u53ef\u4ee5\u5728 Pod \u4e2d\u4f7f\u7528\u914d\u7f6e\u9879\uff0c\u6709\u591a\u79cd\u4f7f\u7528\u573a\u666f\uff0c\u4e3b\u8981\u5305\u62ec\uff1a

                                                        • \u4f7f\u7528\u914d\u7f6e\u9879\u8bbe\u7f6e\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf

                                                        • \u4f7f\u7528\u914d\u7f6e\u9879\u8bbe\u7f6e\u5bb9\u5668\u7684\u547d\u4ee4\u884c\u53c2\u6570

                                                        • \u4f7f\u7528\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u7684\u6570\u636e\u5377

                                                        "},{"location":"admin/kpanda/configmaps-secrets/use-configmap.html#_3","title":"\u8bbe\u7f6e\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf","text":"

                                                        \u60a8\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u5316\u754c\u9762\u6216\u8005\u7ec8\u7aef\u547d\u4ee4\u884c\u6765\u4f7f\u7528\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u3002

                                                        Note

                                                        \u914d\u7f6e\u9879\u5bfc\u5165\u662f\u5c06\u914d\u7f6e\u9879\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\uff1b\u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165\u662f\u5c06\u914d\u7f6e\u9879\u4e2d\u67d0\u4e00\u53c2\u6570\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/use-configmap.html#_4","title":"\u56fe\u5f62\u5316\u754c\u9762\u64cd\u4f5c","text":"

                                                        \u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u53ef\u4ee5\u5728 \u73af\u5883\u53d8\u91cf \u754c\u9762\u901a\u8fc7\u9009\u62e9 \u914d\u7f6e\u9879\u5bfc\u5165 \u6216 \u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165 \u4e3a\u5bb9\u5668\u8bbe\u7f6e\u73af\u5883\u53d8\u91cf\u3002

                                                        1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u4e2d\uff0c\u5728 \u5bb9\u5668\u914d\u7f6e \u8fd9\u4e00\u6b65\u4e2d\uff0c\u9009\u62e9 \u73af\u5883\u53d8\u91cf \u914d\u7f6e\uff0c\u70b9\u51fb \u6dfb\u52a0\u73af\u5883\u53d8\u91cf \u6309\u94ae\u3002

                                                        2. \u5728\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u5904\u9009\u62e9 \u914d\u7f6e\u9879\u5bfc\u5165 \u6216 \u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165 \u3002

                                                          • \u5f53\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u9009\u62e9\u4e3a \u914d\u7f6e\u9879\u5bfc\u5165 \u65f6\uff0c\u4f9d\u6b21\u8f93\u5165 \u53d8\u91cf\u540d \u3001 \u524d\u7f00 \u540d\u79f0\u3001 \u914d\u7f6e\u9879 \u7684\u540d\u79f0\u3002

                                                          • \u5f53\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u9009\u62e9\u4e3a \u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165 \u65f6\uff0c\u4f9d\u6b21\u8f93\u5165 \u53d8\u91cf\u540d \u3001 \u914d\u7f6e\u9879 \u540d\u79f0\u3001 \u952e \u7684\u540d\u79f0\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/use-configmap.html#_5","title":"\u547d\u4ee4\u884c\u64cd\u4f5c","text":"

                                                        \u60a8\u53ef\u4ee5\u5728\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\u5c06\u914d\u7f6e\u9879\u8bbe\u7f6e\u4e3a\u73af\u5883\u53d8\u91cf\uff0c\u4f7f\u7528 valueFrom \u53c2\u6570\u5f15\u7528 ConfigMap \u4e2d\u7684 Key/Value\u3002

                                                        apiVersion: v1\nkind: Pod\nmetadata:\n  name: configmap-pod-1\nspec:\n  containers:\n    - name: test-container\n      image: busybox\n      command: [ \"/bin/sh\", \"-c\", \"env\" ]\n      env:\n        - name: SPECIAL_LEVEL_KEY\n          valueFrom:                  # (1)!\n            configMapKeyRef:\n              name: kpanda-configmap  # (2)!\n              key: SPECIAL_LEVEL      # (3)!\n  restartPolicy: Never\n
                                                        1. \u4f7f\u7528 valueFrom \u6765\u6307\u5b9a env \u5f15\u7528\u914d\u7f6e\u9879\u7684 value \u503c
                                                        2. \u5f15\u7528\u7684\u914d\u7f6e\u6587\u4ef6\u540d\u79f0
                                                        3. \u5f15\u7528\u7684\u914d\u7f6e\u9879 key
                                                        "},{"location":"admin/kpanda/configmaps-secrets/use-configmap.html#_6","title":"\u8bbe\u7f6e\u5bb9\u5668\u7684\u547d\u4ee4\u884c\u53c2\u6570","text":"

                                                        \u60a8\u53ef\u4ee5\u4f7f\u7528\u914d\u7f6e\u9879\u8bbe\u7f6e\u5bb9\u5668\u4e2d\u7684\u547d\u4ee4\u6216\u8005\u53c2\u6570\u503c\uff0c\u4f7f\u7528\u73af\u5883\u53d8\u91cf\u66ff\u6362\u8bed\u6cd5 $(VAR_NAME) \u6765\u8fdb\u884c\u3002\u5982\u4e0b\u6240\u793a\u3002

                                                        apiVersion: v1\nkind: Pod\nmetadata:\n  name: configmap-pod-3\nspec:\n  containers:\n    - name: test-container\n      image: busybox\n      command: [ \"/bin/sh\", \"-c\", \"echo $(SPECIAL_LEVEL_KEY) $(SPECIAL_TYPE_KEY)\" ]\n      env:\n        - name: SPECIAL_LEVEL_KEY\n          valueFrom:\n            configMapKeyRef:\n              name: kpanda-configmap\n              key: SPECIAL_LEVEL\n        - name: SPECIAL_TYPE_KEY\n          valueFrom:\n            configMapKeyRef:\n              name: kpanda-configmap\n              key: SPECIAL_TYPE\n  restartPolicy: Never\n

                                                        \u8fd9\u4e2a Pod \u8fd0\u884c\u540e\uff0c\u8f93\u51fa\u5982\u4e0b\u5185\u5bb9\u3002

                                                        Hello Kpanda\n
                                                        "},{"location":"admin/kpanda/configmaps-secrets/use-configmap.html#_7","title":"\u7528\u4f5c\u5bb9\u5668\u6570\u636e\u5377","text":"

                                                        \u60a8\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u5316\u754c\u9762\u6216\u8005\u7ec8\u7aef\u547d\u4ee4\u884c\u6765\u4f7f\u7528\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/use-configmap.html#_8","title":"\u56fe\u5f62\u5316\u64cd\u4f5c","text":"

                                                        \u5728\u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u5728 \u6570\u636e\u5b58\u50a8 \u754c\u9762\u9009\u62e9\u5b58\u50a8\u7c7b\u578b\u4e3a \u914d\u7f6e\u9879 \uff0c\u5c06\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u7684\u6570\u636e\u5377\u3002

                                                        1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u4e2d\uff0c\u5728 \u5bb9\u5668\u914d\u7f6e \u8fd9\u4e00\u6b65\u4e2d\uff0c\u9009\u62e9 \u6570\u636e\u5b58\u50a8 \u914d\u7f6e\uff0c\u5728 \u8282\u70b9\u8def\u5f84\u6620\u5c04 \u5217\u8868\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u3002

                                                        2. \u5728\u5b58\u50a8\u7c7b\u578b\u5904\u9009\u62e9 \u914d\u7f6e\u9879 \uff0c\u5e76\u4f9d\u6b21\u8f93\u5165 \u5bb9\u5668\u8def\u5f84 \u3001 \u5b50\u8def\u5f84 \u7b49\u4fe1\u606f\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/use-configmap.html#_9","title":"\u547d\u4ee4\u884c\u64cd\u4f5c","text":"

                                                        \u8981\u5728\u4e00\u4e2a Pod \u7684\u5b58\u50a8\u5377\u4e2d\u4f7f\u7528 ConfigMap\u3002

                                                        \u4e0b\u9762\u662f\u4e00\u4e2a\u5c06 ConfigMap \u4ee5\u5377\u7684\u5f62\u5f0f\u8fdb\u884c\u6302\u8f7d\u7684 Pod \u793a\u4f8b\uff1a

                                                        apiVersion: v1\nkind: Pod\nmetadata:\n  name: mypod\nspec:\n  containers:\n  - name: mypod\n    image: redis\n    volumeMounts:\n    - name: foo\n      mountPath: \"/etc/foo\"\n      readOnly: true\n  volumes:\n  - name: foo\n    configMap:\n      name: myconfigmap\n

                                                        \u5982\u679c Pod \u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\uff0c\u5219\u6bcf\u4e2a\u5bb9\u5668\u90fd\u9700\u8981\u81ea\u5df1\u7684 volumeMounts \u5757\uff0c\u4f46\u9488\u5bf9\u6bcf\u4e2a ConfigMap\uff0c\u60a8\u53ea\u9700\u8981\u8bbe\u7f6e\u4e00\u4e2a spec.volumes \u5757\u3002

                                                        Note

                                                        \u5c06\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u6302\u8f7d\u7684\u6570\u636e\u5377\u65f6\uff0c\u914d\u7f6e\u9879\u53ea\u80fd\u4f5c\u4e3a\u53ea\u8bfb\u6587\u4ef6\u8fdb\u884c\u8bfb\u53d6\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/use-secret.html","title":"\u4f7f\u7528\u5bc6\u94a5","text":"

                                                        \u5bc6\u94a5\u662f\u4e00\u79cd\u7528\u4e8e\u5b58\u50a8\u548c\u7ba1\u7406\u5bc6\u7801\u3001OAuth \u4ee4\u724c\u3001SSH\u3001TLS \u51ed\u636e\u7b49\u654f\u611f\u4fe1\u606f\u7684\u8d44\u6e90\u5bf9\u8c61\u3002\u4f7f\u7528\u5bc6\u94a5\u610f\u5473\u7740\u60a8\u4e0d\u9700\u8981\u5728\u5e94\u7528\u7a0b\u5e8f\u4ee3\u7801\u4e2d\u5305\u542b\u654f\u611f\u7684\u673a\u5bc6\u6570\u636e\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/use-secret.html#_2","title":"\u4f7f\u7528\u573a\u666f","text":"

                                                        \u60a8\u53ef\u4ee5\u5728 Pod \u4e2d\u4f7f\u7528\u5bc6\u94a5\uff0c\u6709\u591a\u79cd\u4f7f\u7528\u573a\u666f\uff0c\u4e3b\u8981\u5305\u62ec\uff1a

                                                        • \u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u4f7f\u7528\uff0c\u63d0\u4f9b\u5bb9\u5668\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u6240\u9700\u7684\u4e00\u4e9b\u5fc5\u8981\u4fe1\u606f\u3002
                                                        • \u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a Pod \u7684\u6570\u636e\u5377\u3002
                                                        • \u5728 kubelet \u62c9\u53d6\u5bb9\u5668\u955c\u50cf\u65f6\u7528\u4f5c\u955c\u50cf\u4ed3\u5e93\u7684\u8eab\u4efd\u8ba4\u8bc1\u51ed\u8bc1\u4f7f\u7528\u3002
                                                        "},{"location":"admin/kpanda/configmaps-secrets/use-secret.html#_3","title":"\u4f7f\u7528\u5bc6\u94a5\u8bbe\u7f6e\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf","text":"

                                                        \u60a8\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u5316\u754c\u9762\u6216\u8005\u7ec8\u7aef\u547d\u4ee4\u884c\u6765\u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u3002

                                                        Note

                                                        \u5bc6\u94a5\u5bfc\u5165\u662f\u5c06\u5bc6\u94a5\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\uff1b\u5bc6\u94a5\u952e\u503c\u5bfc\u5165\u662f\u5c06\u5bc6\u94a5\u4e2d\u67d0\u4e00\u53c2\u6570\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/use-secret.html#_4","title":"\u56fe\u5f62\u754c\u9762\u64cd\u4f5c","text":"

                                                        \u5728\u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u60a8\u53ef\u4ee5\u5728 \u73af\u5883\u53d8\u91cf \u754c\u9762\u901a\u8fc7\u9009\u62e9 \u5bc6\u94a5\u5bfc\u5165 \u6216 \u5bc6\u94a5\u952e\u503c\u5bfc\u5165 \u4e3a\u5bb9\u5668\u8bbe\u7f6e\u73af\u5883\u53d8\u91cf\u3002

                                                        1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u3002

                                                        2. \u5728 \u5bb9\u5668\u914d\u7f6e \u9009\u62e9 \u73af\u5883\u53d8\u91cf \u914d\u7f6e\uff0c\u70b9\u51fb \u6dfb\u52a0\u73af\u5883\u53d8\u91cf \u6309\u94ae\u3002

                                                        3. \u5728\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u5904\u9009\u62e9 \u5bc6\u94a5\u5bfc\u5165 \u6216 \u5bc6\u94a5\u952e\u503c\u5bfc\u5165 \u3002

                                                          • \u5f53\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u9009\u62e9\u4e3a \u5bc6\u94a5\u5bfc\u5165 \u65f6\uff0c\u4f9d\u6b21\u8f93\u5165 \u53d8\u91cf\u540d \u3001 \u524d\u7f00 \u3001 \u5bc6\u94a5 \u7684\u540d\u79f0\u3002

                                                          • \u5f53\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u9009\u62e9\u4e3a \u5bc6\u94a5\u952e\u503c\u5bfc\u5165 \u65f6\uff0c\u4f9d\u6b21\u8f93\u5165 \u53d8\u91cf\u540d \u3001 \u5bc6\u94a5 \u3001 \u952e \u7684\u540d\u79f0\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/use-secret.html#_5","title":"\u547d\u4ee4\u884c\u64cd\u4f5c","text":"

                                                        \u5982\u4e0b\u4f8b\u6240\u793a\uff0c\u60a8\u53ef\u4ee5\u5728\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\u5c06\u5bc6\u94a5\u8bbe\u7f6e\u4e3a\u73af\u5883\u53d8\u91cf\uff0c\u4f7f\u7528 valueFrom \u53c2\u6570\u5f15\u7528 Secret \u4e2d\u7684 Key/Value\u3002

                                                        apiVersion: v1\nkind: Pod\nmetadata:\n  name: secret-env-pod\nspec:\n  containers:\n  - name: mycontainer\n    image: redis\n    env:\n      - name: SECRET_USERNAME\n        valueFrom:\n          secretKeyRef:\n            name: mysecret\n            key: username\n            optional: false # (1)!\n      - name: SECRET_PASSWORD\n        valueFrom:\n          secretKeyRef:\n            name: mysecret\n            key: password\n            optional: false # (2)!\n
                                                        1. \u6b64\u503c\u4e3a\u9ed8\u8ba4\u503c\uff1b\u610f\u5473\u7740 \"mysecret\"\uff0c\u5fc5\u987b\u5b58\u5728\u4e14\u5305\u542b\u540d\u4e3a \"username\" \u7684\u4e3b\u952e
                                                        2. \u6b64\u503c\u4e3a\u9ed8\u8ba4\u503c\uff1b\u610f\u5473\u7740 \"mysecret\"\uff0c\u5fc5\u987b\u5b58\u5728\u4e14\u5305\u542b\u540d\u4e3a \"password\" \u7684\u4e3b\u952e
                                                        "},{"location":"admin/kpanda/configmaps-secrets/use-secret.html#pod","title":"\u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a Pod \u7684\u6570\u636e\u5377","text":""},{"location":"admin/kpanda/configmaps-secrets/use-secret.html#_6","title":"\u56fe\u5f62\u754c\u9762\u64cd\u4f5c","text":"

                                                        \u5728\u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u5728 \u6570\u636e\u5b58\u50a8 \u754c\u9762\u9009\u62e9\u5b58\u50a8\u7c7b\u578b\u4e3a \u5bc6\u94a5 \uff0c\u5c06\u5bc6\u94a5\u4f5c\u4e3a\u5bb9\u5668\u7684\u6570\u636e\u5377\u3002

                                                        1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u3002

                                                        2. \u5728 \u5bb9\u5668\u914d\u7f6e \u9009\u62e9 \u6570\u636e\u5b58\u50a8 \u914d\u7f6e\uff0c\u5728 \u8282\u70b9\u8def\u5f84\u6620\u5c04 \u5217\u8868\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u3002

                                                        3. \u5728\u5b58\u50a8\u7c7b\u578b\u5904\u9009\u62e9 \u5bc6\u94a5 \uff0c\u5e76\u4f9d\u6b21\u8f93\u5165 \u5bb9\u5668\u8def\u5f84 \u3001 \u5b50\u8def\u5f84 \u7b49\u4fe1\u606f\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/use-secret.html#_7","title":"\u547d\u4ee4\u884c\u64cd\u4f5c","text":"

                                                        \u4e0b\u9762\u662f\u4e00\u4e2a\u901a\u8fc7\u6570\u636e\u5377\u6765\u6302\u8f7d\u540d\u4e3a mysecret \u7684 Secret \u7684 Pod \u793a\u4f8b\uff1a

                                                        apiVersion: v1\nkind: Pod\nmetadata:\n  name: mypod\nspec:\n  containers:\n  - name: mypod\n    image: redis\n    volumeMounts:\n    - name: foo\n      mountPath: \"/etc/foo\"\n      readOnly: true\n  volumes:\n  - name: foo\n    secret:\n      secretName: mysecret\n      optional: false # (1)!\n
                                                        1. \u9ed8\u8ba4\u8bbe\u7f6e\uff0c\u610f\u5473\u7740 \"mysecret\" \u5fc5\u987b\u5df2\u7ecf\u5b58\u5728

                                                        \u5982\u679c Pod \u4e2d\u5305\u542b\u591a\u4e2a\u5bb9\u5668\uff0c\u5219\u6bcf\u4e2a\u5bb9\u5668\u9700\u8981\u81ea\u5df1\u7684 volumeMounts \u5757\uff0c\u4e0d\u8fc7\u9488\u5bf9\u6bcf\u4e2a Secret \u800c\u8a00\uff0c\u53ea\u9700\u8981\u4e00\u4efd .spec.volumes \u8bbe\u7f6e\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/use-secret.html#kubelet","title":"\u5728 kubelet \u62c9\u53d6\u5bb9\u5668\u955c\u50cf\u65f6\u7528\u4f5c\u955c\u50cf\u4ed3\u5e93\u7684\u8eab\u4efd\u8ba4\u8bc1\u51ed\u8bc1","text":"

                                                        \u60a8\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u5316\u754c\u9762\u6216\u8005\u7ec8\u7aef\u547d\u4ee4\u884c\u6765\u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a\u955c\u50cf\u4ed3\u5e93\u8eab\u4efd\u8ba4\u8bc1\u51ed\u8bc1\u3002

                                                        "},{"location":"admin/kpanda/configmaps-secrets/use-secret.html#_8","title":"\u56fe\u5f62\u5316\u64cd\u4f5c","text":"

                                                        \u5728\u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u5728 \u6570\u636e\u5b58\u50a8 \u754c\u9762\u9009\u62e9\u5b58\u50a8\u7c7b\u578b\u4e3a \u5bc6\u94a5 \uff0c\u5c06\u5bc6\u94a5\u4f5c\u4e3a\u5bb9\u5668\u7684\u6570\u636e\u5377\u3002

                                                        1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u3002

                                                        2. \u5728\u7b2c\u4e8c\u6b65 \u5bb9\u5668\u914d\u7f6e \u65f6\u9009\u62e9 \u57fa\u672c\u4fe1\u606f \u914d\u7f6e\uff0c\u70b9\u51fb \u9009\u62e9\u955c\u50cf \u6309\u94ae\u3002

                                                        3. \u5728\u5f39\u6846\u7684 \u955c\u50cf\u4ed3\u5e93 \u4e0b\u62c9\u9009\u62e9\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u540d\u79f0\u3002\u5173\u4e8e\u79c1\u6709\u955c\u50cf\u5bc6\u94a5\u521b\u5efa\u8bf7\u67e5\u770b\u521b\u5efa\u5bc6\u94a5\u4e86\u89e3\u8be6\u60c5\u3002

                                                        4. \u8f93\u5165\u79c1\u6709\u4ed3\u5e93\u5185\u7684\u955c\u50cf\u540d\u79f0\uff0c\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u955c\u50cf\u9009\u62e9\u3002

                                                        Note

                                                        \u521b\u5efa\u5bc6\u94a5\u65f6\uff0c\u9700\u8981\u786e\u4fdd\u8f93\u5165\u6b63\u786e\u7684\u955c\u50cf\u4ed3\u5e93\u5730\u5740\u3001\u7528\u6237\u540d\u79f0\u3001\u5bc6\u7801\u5e76\u9009\u62e9\u6b63\u786e\u7684\u955c\u50cf\u540d\u79f0\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u83b7\u53d6\u955c\u50cf\u4ed3\u5e93\u4e2d\u7684\u955c\u50cf\u3002

                                                        "},{"location":"admin/kpanda/custom-resources/create.html","title":"\u521b\u5efa\u81ea\u5b9a\u4e49\u8d44\u6e90 (CRD)","text":"

                                                        \u5728 Kubernetes \u4e2d\u4e00\u5207\u5bf9\u8c61\u90fd\u88ab\u62bd\u8c61\u4e3a\u8d44\u6e90\uff0c\u5982 Pod\u3001Deployment\u3001Service\u3001Volume \u7b49\u662f Kubernetes \u63d0\u4f9b\u7684\u9ed8\u8ba4\u8d44\u6e90\uff0c \u8fd9\u4e3a\u6211\u4eec\u7684\u65e5\u5e38\u8fd0\u7ef4\u548c\u7ba1\u7406\u5de5\u4f5c\u63d0\u4f9b\u4e86\u91cd\u8981\u652f\u6491\uff0c\u4f46\u662f\u5728\u4e00\u4e9b\u7279\u6b8a\u7684\u573a\u666f\u4e2d\uff0c\u73b0\u6709\u7684\u9884\u7f6e\u8d44\u6e90\u5e76\u4e0d\u80fd\u6ee1\u8db3\u4e1a\u52a1\u7684\u9700\u8981\uff0c \u56e0\u6b64\u6211\u4eec\u5e0c\u671b\u53bb\u6269\u5c55 Kubernetes API \u7684\u80fd\u529b\uff0c\u81ea\u5b9a\u4e49\u8d44\u6e90\uff08CustomResourceDefinition, CRD\uff09\u6b63\u662f\u57fa\u4e8e\u8fd9\u6837\u7684\u9700\u6c42\u5e94\u8fd0\u800c\u751f\u3002

                                                        \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u5bf9\u81ea\u5b9a\u4e49\u8d44\u6e90\u7684\u754c\u9762\u5316\u7ba1\u7406\uff0c\u4e3b\u8981\u529f\u80fd\u5982\u4e0b\uff1a

                                                        • \u83b7\u53d6\u96c6\u7fa4\u4e0b\u81ea\u5b9a\u4e49\u8d44\u6e90\u5217\u8868\u548c\u8be6\u7ec6\u4fe1\u606f
                                                        • \u57fa\u4e8e YAML \u521b\u5efa\u81ea\u5b9a\u8d44\u6e90
                                                        • \u57fa\u4e8e YAML \u521b\u5efa\u81ea\u5b9a\u4e49\u8d44\u6e90\u793a\u4f8b CR\uff08Custom Resource\uff09
                                                        • \u5220\u9664\u81ea\u5b9a\u4e49\u8d44\u6e90
                                                        "},{"location":"admin/kpanda/custom-resources/create.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                        • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762

                                                        • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a Cluster Admin \u89d2\u8272 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u6388\u6743

                                                        "},{"location":"admin/kpanda/custom-resources/create.html#yaml","title":"\u901a\u8fc7 YAML \u521b\u5efa\u81ea\u5b9a\u4e49\u8d44\u6e90","text":"
                                                        1. \u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                        2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                        3. \u5728 YAML \u521b\u5efa \u9875\u9762\u4e2d\uff0c\u586b\u5199 YAML \u8bed\u53e5\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                        4. \u8fd4\u56de\u81ea\u5b9a\u4e49\u8d44\u6e90\u5217\u8868\u9875\uff0c\u5373\u53ef\u67e5\u770b\u521a\u521a\u521b\u5efa\u7684\u540d\u4e3a crontabs.stable.example.com \u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\u3002

                                                        \u81ea\u5b9a\u4e49\u8d44\u6e90\u793a\u4f8b\uff1a

                                                        CRD example
                                                        apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  name: crontabs.stable.example.com\nspec:\n  group: stable.example.com\n  versions:\n    - name: v1\n      served: true\n      storage: true\n      schema:\n        openAPIV3Schema:\n          type: object\n          properties:\n            spec:\n              type: object\n              properties:\n                cronSpec:\n                  type: string\n                image:\n                  type: string\n                replicas:\n                  type: integer\n  scope: Namespaced\n  names:\n    plural: crontabs\n    singular: crontab\n    kind: CronTab\n    shortNames:\n    - ct\n
                                                        "},{"location":"admin/kpanda/custom-resources/create.html#yaml_1","title":"\u901a\u8fc7 YAML \u521b\u5efa\u81ea\u5b9a\u4e49\u8d44\u6e90\u793a\u4f8b","text":"
                                                        1. \u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                        2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u8fdb\u5165\u81ea\u5b9a\u4e49\u8d44\u6e90\u5217\u8868\u9875\u9762\u3002

                                                        3. \u70b9\u51fb\u540d\u4e3a crontabs.stable.example.com \u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\uff0c\u8fdb\u5165\u8be6\u60c5\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                        4. \u5728 YAML \u521b\u5efa \u9875\u9762\u4e2d\uff0c\u586b\u5199 YAML \u8bed\u53e5\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                        5. \u8fd4\u56de crontabs.stable.example.com \u7684\u8be6\u60c5\u9875\u9762\uff0c\u5373\u53ef\u67e5\u770b\u521a\u521a\u521b\u5efa\u7684\u540d\u4e3a my-new-cron-object \u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\u3002

                                                        CR \u793a\u4f8b\uff1a

                                                        CR example
                                                        apiVersion: \"stable.example.com/v1\"\nkind: CronTab\nmetadata:\n  name: my-new-cron-object\nspec:\n  cronSpec: \"* * * * */5\"\n  image: my-awesome-cron-image\n
                                                        "},{"location":"admin/kpanda/gpu/index.html","title":"GPU \u7ba1\u7406\u6982\u8ff0","text":"

                                                        \u672c\u6587\u4ecb\u7ecd \u7b97\u4e30 AI \u7b97\u529b\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\u5bf9 GPU\u4e3a\u4ee3\u8868\u7684\u5f02\u6784\u8d44\u6e90\u7edf\u4e00\u8fd0\u7ef4\u7ba1\u7406\u80fd\u529b\u3002

                                                        "},{"location":"admin/kpanda/gpu/index.html#_1","title":"\u80cc\u666f","text":"

                                                        \u968f\u7740 AI \u5e94\u7528\u3001\u5927\u6a21\u578b\u3001\u4eba\u5de5\u667a\u80fd\u3001\u81ea\u52a8\u9a7e\u9a76\u7b49\u65b0\u5174\u6280\u672f\u7684\u5feb\u901f\u53d1\u5c55\uff0c\u4f01\u4e1a\u9762\u4e34\u7740\u8d8a\u6765\u8d8a\u591a\u7684\u8ba1\u7b97\u5bc6\u96c6\u578b\u4efb\u52a1\u548c\u6570\u636e\u5904\u7406\u9700\u6c42\u3002 \u4ee5 CPU \u4e3a\u4ee3\u8868\u7684\u4f20\u7edf\u8ba1\u7b97\u67b6\u6784\u5df2\u65e0\u6cd5\u6ee1\u8db3\u4f01\u4e1a\u65e5\u76ca\u589e\u957f\u7684\u8ba1\u7b97\u9700\u6c42\u3002\u6b64\u65f6\uff0c\u4ee5 GPU \u4e3a\u4ee3\u8868\u7684\u5f02\u6784\u8ba1\u7b97\u56e0\u5728\u5904\u7406\u5927\u89c4\u6a21\u6570\u636e\u3001\u8fdb\u884c\u590d\u6742\u8ba1\u7b97\u548c\u5b9e\u65f6\u56fe\u5f62\u6e32\u67d3\u65b9\u9762\u5177\u6709\u72ec\u7279\u7684\u4f18\u52bf\u88ab\u5e7f\u6cdb\u5e94\u7528\u3002

                                                        \u4e0e\u6b64\u540c\u65f6\uff0c\u7531\u4e8e\u7f3a\u4e4f\u5f02\u6784\u8d44\u6e90\u8c03\u5ea6\u7ba1\u7406\u7b49\u65b9\u9762\u7684\u7ecf\u9a8c\u548c\u4e13\u4e1a\u7684\u89e3\u51b3\u65b9\u6848\uff0c\u5bfc\u81f4\u4e86 GPU \u8bbe\u5907\u7684\u8d44\u6e90\u5229\u7528\u7387\u6781\u4f4e\uff0c\u7ed9\u4f01\u4e1a\u5e26\u6765\u4e86\u9ad8\u6602\u7684 AI \u751f\u4ea7\u6210\u672c\u3002 \u5982\u4f55\u964d\u672c\u589e\u6548\uff0c\u63d0\u9ad8 GPU \u7b49\u5f02\u6784\u8d44\u6e90\u7684\u5229\u7528\u6548\u7387\uff0c\u6210\u4e3a\u4e86\u5f53\u524d\u4f17\u591a\u4f01\u4e1a\u4e9f\u9700\u8de8\u8d8a\u7684\u4e00\u9053\u96be\u9898\u3002

                                                        "},{"location":"admin/kpanda/gpu/index.html#gpu_1","title":"GPU \u80fd\u529b\u4ecb\u7ecd","text":"

                                                        \u7b97\u4e30 AI \u7b97\u529b\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\u652f\u6301\u5bf9 GPU\u3001NPU \u7b49\u5f02\u6784\u8d44\u6e90\u8fdb\u884c\u7edf\u4e00\u8c03\u5ea6\u548c\u8fd0\u7ef4\u7ba1\u7406\uff0c\u5145\u5206\u91ca\u653e GPU \u8d44\u6e90\u7b97\u529b\uff0c\u52a0\u901f\u4f01\u4e1a AI \u7b49\u65b0\u5174\u5e94\u7528\u53d1\u5c55\u3002GPU \u7ba1\u7406\u80fd\u529b\u5982\u4e0b\uff1a

                                                        • \u652f\u6301\u7edf\u4e00\u7eb3\u7ba1 NVIDIA\u3001\u534e\u4e3a\u6607\u817e\u3001\u5929\u6570\u7b49\u56fd\u5185\u5916\u5382\u5546\u7684\u5f02\u6784\u8ba1\u7b97\u8d44\u6e90\u3002
                                                        • \u652f\u6301\u540c\u4e00\u96c6\u7fa4\u591a\u5361\u5f02\u6784\u8c03\u5ea6\uff0c\u5e76\u652f\u6301\u96c6\u7fa4 GPU \u5361\u81ea\u52a8\u8bc6\u522b\u3002
                                                        • \u652f\u6301 NVIDIA GPU\u3001vGPU\u3001MIG \u7b49 GPU \u539f\u751f\u7ba1\u7406\u65b9\u6848\uff0c\u5e76\u63d0\u4f9b\u4e91\u539f\u751f\u80fd\u529b\u3002
                                                        • \u652f\u6301\u5355\u5757\u7269\u7406\u5361\u5207\u5206\u7ed9\u4e0d\u540c\u7684\u79df\u6237\u4f7f\u7528\uff0c\u5e76\u652f\u6301\u5bf9\u79df\u6237\u548c\u5bb9\u5668\u4f7f\u7528 GPU \u8d44\u6e90\u6309\u7167\u7b97\u529b\u3001\u663e\u5b58\u8fdb\u884c GPU \u8d44\u6e90\u914d\u989d\u3002
                                                        • \u652f\u6301\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5e94\u7528\u7b49\u591a\u7ef4\u5ea6 GPU \u8d44\u6e90\u76d1\u63a7\uff0c\u5e2e\u52a9\u8fd0\u7ef4\u4eba\u5458\u7ba1\u7406 GPU \u8d44\u6e90\u3002
                                                        • \u517c\u5bb9 TensorFlow\u3001pytorch \u7b49\u591a\u79cd\u8bad\u7ec3\u6846\u67b6\u3002
                                                        "},{"location":"admin/kpanda/gpu/index.html#gpu-operator","title":"GPU Operator \u4ecb\u7ecd","text":"

                                                        \u540c\u666e\u901a\u8ba1\u7b97\u673a\u786c\u4ef6\u4e00\u6837\uff0cNVIDIA GPU \u5361\u4f5c\u4e3a\u7269\u7406\u786c\u4ef6\uff0c\u5fc5\u987b\u5b89\u88c5 NVIDIA GPU \u9a71\u52a8\u540e\u624d\u80fd\u4f7f\u7528\u3002 \u4e3a\u4e86\u964d\u4f4e\u7528\u6237\u5728 kuberneets \u4e0a\u4f7f\u7528 GPU \u7684\u6210\u672c\uff0cNVIDIA \u5b98\u65b9\u63d0\u4f9b\u4e86 NVIDIA GPU Operator \u7ec4\u4ef6\u6765\u7ba1\u7406\u4f7f\u7528 NVIDIA GPU \u6240\u4f9d\u8d56\u7684\u5404\u79cd\u7ec4\u4ef6\u3002 \u8fd9\u4e9b\u7ec4\u4ef6\u5305\u62ec NVIDIA \u9a71\u52a8\u7a0b\u5e8f\uff08\u7528\u4e8e\u542f\u7528 CUDA\uff09\u3001NVIDIA \u5bb9\u5668\u8fd0\u884c\u65f6\u3001GPU \u8282\u70b9\u6807\u8bb0\u3001\u57fa\u4e8e DCGM \u7684\u76d1\u63a7\u7b49\u3002 \u7406\u8bba\u4e0a\u6765\u8bf4\u7528\u6237\u53ea\u9700\u8981\u5c06 GPU \u5361\u63d2\u5728\u5df2\u7ecf\u88ab kubernetes \u6240\u7eb3\u7ba1\u7684\u8ba1\u7b97\u8bbe\u5907\u4e0a\uff0c\u7136\u540e\u901a\u8fc7 GPU Operator \u5c31\u80fd\u4f7f\u7528 NVIDIA GPU \u7684\u6240\u6709\u80fd\u529b\u4e86\u3002 \u4e86\u89e3\u66f4\u591a NVIDIA GPU Operator \u76f8\u5173\u4fe1\u606f\uff0c\u8bf7\u53c2\u8003 NVIDIA \u5b98\u65b9\u6587\u6863\u3002 \u5982\u4f55\u90e8\u7f72\u8bf7\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5

                                                        NVIDIA GPU Operator \u67b6\u6784\u56fe\uff1a

                                                        "},{"location":"admin/kpanda/gpu/FAQ.html","title":"GPU \u76f8\u5173 FAQ","text":""},{"location":"admin/kpanda/gpu/FAQ.html#pod-nvidia-smi-gpu","title":"Pod \u5185 nvidia-smi \u770b\u4e0d\u5230 GPU \u8fdb\u7a0b","text":"

                                                        Q: \u5728\u4f7f\u7528 GPU \u7684 Pod \u5185\u6267\u884c nvidia-smi \u547d\u4ee4\u770b\u4e0d\u5230\u4f7f\u7528 GPU \u7684\u8fdb\u7a0b\u4fe1\u606f\uff0c\u5305\u62ec\u6574\u5361\u6a21\u5f0f\u3001vGPU \u6a21\u5f0f\u7b49\u3002

                                                        A: \u56e0\u4e3a\u6709 PID namespace \u9694\u79bb\uff0c\u5bfc\u81f4\u5728 Pod \u5185\u67e5\u770b\u4e0d\u5230 GPU \u8fdb\u7a0b\uff0c\u5982\u679c\u8981\u67e5\u770b GPU \u8fdb\u7a0b\u6709\u5982\u4e0b\u51e0\u79cd\u65b9\u6cd5\uff1a

                                                        • \u5728\u4f7f\u7528 GPU \u7684\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e hostPID: true\uff0c\u4f7f\u5176\u53ef\u4ee5\u67e5\u770b\u5230\u5bbf\u4e3b\u673a\u4e0a\u7684 PID
                                                        • \u5728 gpu-operator \u7684 driver Pod \u4e2d\u6267\u884c nvidia-smi \u547d\u4ee4\u67e5\u770b\u8fdb\u7a0b
                                                        • \u5728\u5bbf\u4e3b\u673a\u4e0a\u6267\u884c chroot /run/nvidia/driver nvidia-smi \u547d\u4ee4\u67e5\u770b\u8fdb\u7a0b
                                                        "},{"location":"admin/kpanda/gpu/Iluvatar_usage.html","title":"App \u4f7f\u7528\u5929\u6570\u667a\u82af\uff08Iluvatar\uff09GPU","text":"

                                                        \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528\u5929\u6570\u667a\u82af\u865a\u62df GPU\u3002

                                                        "},{"location":"admin/kpanda/gpu/Iluvatar_usage.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                        • \u5df2\u7ecf\u90e8\u7f72 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u4e14\u5e73\u53f0\u8fd0\u884c\u6b63\u5e38\u3002
                                                        • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                                                        • \u5f53\u524d\u96c6\u7fa4\u5df2\u5b89\u88c5\u5929\u6570\u667a\u82af GPU \u9a71\u52a8\uff0c\u9a71\u52a8\u5b89\u88c5\u8bf7\u53c2\u8003\u5929\u6570\u667a\u82af\u5b98\u65b9\u6587\u6863\u3002
                                                        • \u5f53\u524d\u96c6\u7fa4\u5185 GPU \u5361\u672a\u8fdb\u884c\u4efb\u4f55\u865a\u62df\u5316\u64cd\u4f5c\u4e14\u672a\u88ab\u5176\u5b83 App \u5360\u7528\u3002
                                                        "},{"location":"admin/kpanda/gpu/Iluvatar_usage.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"admin/kpanda/gpu/Iluvatar_usage.html#_3","title":"\u4f7f\u7528\u754c\u9762\u914d\u7f6e","text":"
                                                        1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u68c0\u6d4b GPU \u5361\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002 \u76ee\u524d\u96c6\u7fa4\u4f1a\u81ea\u52a8\u542f\u7528 GPU \uff0c\u5e76\u4e14\u8bbe\u7f6e GPU \u7c7b\u578b\u4e3a Iluvatar \u3002

                                                        2. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08Iluvatar\uff09\u4e4b\u540e\uff0c\u9700\u8981\u914d\u7f6e App \u4f7f\u7528\u7684 GPU \u8d44\u6e90\uff1a

                                                          • \u7269\u7406\u5361\u6570\u91cf\uff08iluvatar.ai/vcuda-core\uff09\uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u8f93\u5165\u503c\u5fc5\u987b\u4e3a\u6574\u6570\u4e14 \u5c0f\u4e8e\u7b49\u4e8e \u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002
                                                          • \u663e\u5b58\u4f7f\u7528\u6570\u91cf\uff08iluvatar.ai/vcuda-memory\uff09\uff1a\u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u663e\u5b58\uff0c\u503c\u5355\u4f4d\u4e3a MB\uff0c\u6700\u5c0f\u503c\u4e3a 1\uff0c\u6700\u5927\u503c\u4e3a\u6574\u5361\u7684\u663e\u5b58\u503c\u3002

                                                          \u5982\u679c\u4e0a\u8ff0\u503c\u914d\u7f6e\u7684\u6709\u95ee\u9898\u5219\u4f1a\u51fa\u73b0\u8c03\u5ea6\u5931\u8d25\uff0c\u8d44\u6e90\u5206\u914d\u4e0d\u4e86\u7684\u60c5\u51b5\u3002

                                                        "},{"location":"admin/kpanda/gpu/Iluvatar_usage.html#yaml","title":"\u4f7f\u7528 YAML \u914d\u7f6e","text":"

                                                        \u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u7533\u8bf7 GPU \u8d44\u6e90\uff0c\u5728\u8d44\u6e90\u7533\u8bf7\u548c\u9650\u5236\u914d\u7f6e\u4e2d\u589e\u52a0iluvatar.ai/vcuda-core: 1\u3001iluvatar.ai/vcuda-memory: 200 \u53c2\u6570\uff0c\u914d\u7f6e App \u4f7f\u7528\u7269\u7406\u5361\u7684\u8d44\u6e90\u3002

                                                        apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-iluvatar-gpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-iluvatar-gpu-demo\n  template:\n    metadata:\n      labels:\n        app: full-iluvatar-gpu-demo\n    spec:\n      containers:\n      - image: nginx:perl\n        name: container-0\n        resources:\n          limits:\n            cpu: 250m\n            iluvatar.ai/vcuda-core: '1'\n            iluvatar.ai/vcuda-memory: '200'\n            memory: 512Mi\n          requests:\n            cpu: 250m\n            memory: 512Mi\n      imagePullSecrets:\n      - name: default-secret\n
                                                        "},{"location":"admin/kpanda/gpu/dynamic-regulation.html","title":"GPU \u8d44\u6e90\u52a8\u6001\u8c03\u8282","text":"

                                                        \u63d0\u4f9b GPU \u8d44\u6e90\u52a8\u6001\u8c03\u6574\u529f\u80fd\uff0c\u5141\u8bb8\u60a8\u5728\u65e0\u9700\u91cd\u65b0\u52a0\u8f7d\u3001\u91cd\u7f6e\u6216\u91cd\u542f\u6574\u4e2a\u8fd0\u884c\u73af\u5883\u7684\u60c5\u51b5\u4e0b\uff0c\u5bf9\u5df2\u7ecf\u5206\u914d\u7684 vGPU \u8d44\u6e90\u8fdb\u884c\u5b9e\u65f6\u3001\u52a8\u6001\u7684\u8c03\u6574\u3002 \u8fd9\u4e00\u529f\u80fd\u65e8\u5728\u6700\u5927\u7a0b\u5ea6\u5730\u51cf\u5c11\u5bf9\u4e1a\u52a1\u8fd0\u884c\u7684\u5f71\u54cd\uff0c\u786e\u4fdd\u60a8\u7684\u4e1a\u52a1\u80fd\u591f\u6301\u7eed\u7a33\u5b9a\u5730\u8fd0\u884c\uff0c\u540c\u65f6\u6839\u636e\u5b9e\u9645\u9700\u6c42\u7075\u6d3b\u8c03\u6574 GPU \u8d44\u6e90\u3002

                                                        "},{"location":"admin/kpanda/gpu/dynamic-regulation.html#_1","title":"\u4f7f\u7528\u573a\u666f","text":"
                                                        • \u5f39\u6027\u8d44\u6e90\u5206\u914d \uff1a\u5f53\u4e1a\u52a1\u9700\u6c42\u6216\u5de5\u4f5c\u8d1f\u8f7d\u53d1\u751f\u53d8\u5316\u65f6\uff0c\u53ef\u4ee5\u5feb\u901f\u8c03\u6574 GPU \u8d44\u6e90\u4ee5\u6ee1\u8db3\u65b0\u7684\u6027\u80fd\u8981\u6c42\u3002
                                                        • \u5373\u65f6\u54cd\u5e94 \uff1a\u5728\u9762\u5bf9\u7a81\u53d1\u7684\u9ad8\u8d1f\u8f7d\u6216\u4e1a\u52a1\u9700\u6c42\u65f6\uff0c\u53ef\u4ee5\u8fc5\u901f\u589e\u52a0 GPU \u8d44\u6e90\u800c\u65e0\u9700\u4e2d\u65ad\u4e1a\u52a1\u8fd0\u884c\uff0c\u4ee5\u786e\u4fdd\u670d\u52a1\u7684\u7a33\u5b9a\u6027\u548c\u6027\u80fd\u3002
                                                        "},{"location":"admin/kpanda/gpu/dynamic-regulation.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                        \u4ee5\u4e0b\u662f\u4e00\u4e2a\u5177\u4f53\u7684\u64cd\u4f5c\u793a\u4f8b\uff0c\u5c55\u793a\u5982\u4f55\u5728\u4e0d\u91cd\u542f vGPU Pod \u7684\u60c5\u51b5\u4e0b\u52a8\u6001\u8c03\u6574 vGPU \u7684\u7b97\u529b\u548c\u663e\u5b58\u8d44\u6e90\uff1a

                                                        "},{"location":"admin/kpanda/gpu/dynamic-regulation.html#vgpu-pod","title":"\u521b\u5efa\u4e00\u4e2a vGPU Pod","text":"

                                                        \u9996\u5148\uff0c\u6211\u4eec\u4f7f\u7528\u4ee5\u4e0b YAML \u521b\u5efa\u4e00\u4e2a vGPU Pod\uff0c\u5176\u7b97\u529b\u521d\u59cb\u4e0d\u9650\u5236\uff0c\u663e\u5b58\u9650\u5236\u4e3a 200Mb\u3002

                                                        kind: Deployment\napiVersion: apps/v1\nmetadata:\n  name: gpu-burn-test\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: gpu-burn-test\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: gpu-burn-test\n    spec:\n      containers:\n        - name: container-1\n          image: docker.io/chrstnhntschl/gpu_burn:latest\n          command:\n            - sleep\n            - '100000'\n          resources:\n            limits:\n              cpu: 1m\n              memory: 1Gi\n              nvidia.com/gpucores: '0'\n              nvidia.com/gpumem: '200'\n              nvidia.com/vgpu: '1'\n

                                                        \u8c03\u6574\u524d\u67e5\u770b Pod \u4e2d\u7684\u8d44\u6e90 GPU \u5206\u914d\u8d44\u6e90\uff1a

                                                        "},{"location":"admin/kpanda/gpu/dynamic-regulation.html#_3","title":"\u52a8\u6001\u8c03\u6574\u7b97\u529b","text":"

                                                        \u5982\u679c\u9700\u8981\u4fee\u6539\u7b97\u529b\u4e3a 10%\uff0c\u53ef\u4ee5\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u64cd\u4f5c\uff1a

                                                        1. \u8fdb\u5165\u5bb9\u5668\uff1a

                                                          kubectl exec -it <pod-name> -- /bin/bash\n
                                                        2. \u6267\u884c\uff1a

                                                          export CUDA_DEVICE_SM_LIMIT=10\n
                                                        3. \u5728\u5f53\u524d\u7ec8\u7aef\u76f4\u63a5\u8fd0\u884c\uff1a

                                                          ./gpu_burn 60\n

                                                          \u7a0b\u5e8f\u5373\u53ef\u751f\u6548\u3002\u6ce8\u610f\uff0c\u4e0d\u80fd\u9000\u51fa\u5f53\u524d Bash \u7ec8\u7aef\u3002

                                                        "},{"location":"admin/kpanda/gpu/dynamic-regulation.html#_4","title":"\u52a8\u6001\u8c03\u6574\u663e\u5b58","text":"

                                                        \u5982\u679c\u9700\u8981\u4fee\u6539\u663e\u5b58\u4e3a 300 MB\uff0c\u53ef\u4ee5\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u64cd\u4f5c\uff1a

                                                        1. \u8fdb\u5165\u5bb9\u5668\uff1a

                                                          kubectl exec -it <pod-name> -- /bin/bash\n
                                                        2. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u6765\u8bbe\u7f6e\u663e\u5b58\u9650\u5236\uff1a

                                                          export CUDA_DEVICE_MEMORY_LIMIT_0=300m\nexport CUDA_DEVICE_MEMORY_SHARED_CACHE=/usr/local/vgpu/d.cache\n

                                                          Note

                                                          \u6bcf\u6b21\u4fee\u6539\u663e\u5b58\u5927\u5c0f\u65f6\uff0cd.cache \u8fd9\u4e2a\u6587\u4ef6\u540d\u5b57\u90fd\u9700\u8981\u4fee\u6539\uff0c\u6bd4\u5982\u6539\u4e3a a.cache\u30011.cache \u7b49\uff0c\u4ee5\u907f\u514d\u7f13\u5b58\u51b2\u7a81\u3002

                                                        3. \u5728\u5f53\u524d\u7ec8\u7aef\u76f4\u63a5\u8fd0\u884c\uff1a

                                                          ./gpu_burn 60\n

                                                          \u7a0b\u5e8f\u5373\u53ef\u751f\u6548\u3002\u540c\u6837\u5730\uff0c\u4e0d\u80fd\u9000\u51fa\u5f53\u524d Bash \u7ec8\u7aef\u3002

                                                        \u8c03\u6574\u540e\u67e5\u770b Pod \u4e2d\u7684\u8d44\u6e90 GPU \u5206\u914d\u8d44\u6e90\uff1a

                                                        \u901a\u8fc7\u4e0a\u8ff0\u6b65\u9aa4\uff0c\u60a8\u53ef\u4ee5\u5728\u4e0d\u91cd\u542f vGPU Pod \u7684\u60c5\u51b5\u4e0b\u52a8\u6001\u5730\u8c03\u6574\u5176\u7b97\u529b\u548c\u663e\u5b58\u8d44\u6e90\uff0c\u4ece\u800c\u66f4\u7075\u6d3b\u5730\u6ee1\u8db3\u4e1a\u52a1\u9700\u6c42\u5e76\u4f18\u5316\u8d44\u6e90\u5229\u7528\u3002

                                                        "},{"location":"admin/kpanda/gpu/gpu_matrix.html","title":"GPU \u652f\u6301\u77e9\u9635","text":"

                                                        \u672c\u9875\u8bf4\u660e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u7684 GPU \u53ca\u64cd\u4f5c\u7cfb\u7edf\u6240\u5bf9\u5e94\u7684\u77e9\u9635\u3002

                                                        "},{"location":"admin/kpanda/gpu/gpu_matrix.html#nvidia-gpu","title":"NVIDIA GPU","text":"GPU \u5382\u5546\u53ca\u7c7b\u578b \u652f\u6301 GPU \u578b\u53f7 \u9002\u914d\u7684\u64cd\u4f5c\u7cfb\u7edf\uff08\u5728\u7ebf\uff09 \u63a8\u8350\u5185\u6838 \u63a8\u8350\u7684\u64cd\u4f5c\u7cfb\u7edf\u53ca\u5185\u6838 \u5b89\u88c5\u6587\u6863 NVIDIA GPU\uff08\u6574\u5361/vGPU\uff09 NVIDIA Fermi (2.1) \u67b6\u6784 CentOS 7 Kernel 3.10.0-123 ~ 3.10.0-1160\u5185\u6838\u53c2\u8003\u6587\u6863\u5efa\u8bae\u4f7f\u7528\u64cd\u4f5c\u7cfb\u7edf\u5bf9\u5e94 Kernel \u7248\u672c \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a 3.10.0-1160 GPU Operator \u79bb\u7ebf\u5b89\u88c5 NVIDIA GeForce 400 \u7cfb\u5217 CentOS 8 Kernel 4.18.0-80 ~ 4.18.0-348 NVIDIA Quadro 4000 \u7cfb\u5217 Ubuntu 20.04 Kernel 5.4 NVIDIA Tesla 20 \u7cfb\u5217 Ubuntu 22.04 Kernel 5.19 NVIDIA Ampere \u67b6\u6784\u7cfb\u5217(A100;A800;H100) RHEL 7 Kernel 3.10.0-123 ~ 3.10.0-1160 RHEL 8 Kernel 4.18.0-80 ~ 4.18.0-348 NVIDIA MIG NVIDIA Ampere \u67b6\u6784\u7cfb\u5217\uff08A100\u3001A800\u3001H100\uff09 CentOS 7 Kernel 3.10.0-123 ~ 3.10.0-1160 \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a3.10.0-1160 GPU Operator \u79bb\u7ebf\u5b89\u88c5 CentOS 8 Kernel 4.18.0-80 ~ 4.18.0-348 Ubuntu 20.04 Kernel 5.4 Ubuntu 22.04 Kernel 5.19 RHEL 7 Kernel 3.10.0-123 ~ 3.10.0-1160 RHEL 8 Kernel 4.18.0-80 ~ 4.18.0-348"},{"location":"admin/kpanda/gpu/gpu_matrix.html#ascendnpu","title":"\u6607\u817e\uff08Ascend\uff09NPU","text":"GPU \u5382\u5546\u53ca\u7c7b\u578b \u652f\u6301 NPU \u578b\u53f7 \u9002\u914d\u7684\u64cd\u4f5c\u7cfb\u7edf\uff08\u5728\u7ebf\uff09 \u63a8\u8350\u5185\u6838 \u63a8\u8350\u7684\u64cd\u4f5c\u7cfb\u7edf\u53ca\u5185\u6838 \u5b89\u88c5\u6587\u6863 \u6607\u817e\uff08Ascend 310\uff09 Ascend 310 Ubuntu 20.04 \u8be6\u60c5\u53c2\u8003\uff1a\u5185\u6838\u7248\u672c\u8981\u6c42 \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a3.10.0-1160 300 \u548c 310P \u9a71\u52a8\u6587\u6863 Ascend 310P\uff1b CentOS 7.6 CentOS 8.2 KylinV10SP1 \u64cd\u4f5c\u7cfb\u7edf openEuler \u64cd\u4f5c\u7cfb\u7edf \u6607\u817e\uff08Ascend 910\uff09 Ascend 910B Ubuntu 20.04 \u8be6\u60c5\u53c2\u8003\u5185\u6838\u7248\u672c\u8981\u6c42 \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a3.10.0-1160 910 \u9a71\u52a8\u6587\u6863 CentOS 7.6 CentOS 8.2 KylinV10SP1 \u64cd\u4f5c\u7cfb\u7edf openEuler \u64cd\u4f5c\u7cfb\u7edf"},{"location":"admin/kpanda/gpu/gpu_matrix.html#iluvatargpu","title":"\u5929\u6570\u667a\u82af\uff08Iluvatar\uff09GPU","text":"GPU \u5382\u5546\u53ca\u7c7b\u578b \u652f\u6301\u7684 GPU \u578b\u53f7 \u9002\u914d\u7684\u64cd\u4f5c\u7cfb\u7edf\uff08\u5728\u7ebf\uff09 \u63a8\u8350\u5185\u6838 \u63a8\u8350\u7684\u64cd\u4f5c\u7cfb\u7edf\u53ca\u5185\u6838 \u5b89\u88c5\u6587\u6863 \u5929\u6570\u667a\u82af(Iluvatar vGPU) BI100 CentOS 7 Kernel 3.10.0-957.el7.x86_64 ~ 3.10.0-1160.42.2.el7.x86_64 \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a 3.10.0-1160 \u8865\u5145\u4e2d MR100\uff1b CentOS 8 Kernel 4.18.0-80.el8.x86_64 ~ 4.18.0-305.19.1.el8_4.x86_64 Ubuntu 20.04 Kernel 4.15.0-20-generic ~ 4.15.0-160-generic Kernel 5.4.0-26-generic ~ 5.4.0-89-generic Kernel 5.8.0-23-generic ~ 5.8.0-63-generic Ubuntu 21.04 Kernel 4.15.0-20-generic ~ 4.15.0-160-generic Kernel 5.4.0-26-generic ~ 5.4.0-89-generic Kernel 5.8.0-23-generic ~ 5.8.0-63-generic openEuler 22.03 LTS Kernel \u7248\u672c\u5927\u4e8e\u7b49\u4e8e 5.1 \u4e14\u5c0f\u4e8e\u7b49\u4e8e 5.10"},{"location":"admin/kpanda/gpu/gpu_matrix.html#metaxgpu","title":"\u6c90\u66e6\uff08Metax\uff09GPU","text":"GPU \u5382\u5546\u53ca\u7c7b\u578b \u652f\u6301\u7684 GPU \u578b\u53f7 \u9002\u914d\u7684\u64cd\u4f5c\u7cfb\u7edf\uff08\u5728\u7ebf\uff09 \u63a8\u8350\u5185\u6838 \u63a8\u8350\u7684\u64cd\u4f5c\u7cfb\u7edf\u53ca\u5185\u6838 \u5b89\u88c5\u6587\u6863 \u6c90\u66e6Metax\uff08\u6574\u5361/vGPU\uff09 \u66e6\u4e91 C500 \u6c90\u66e6 GPU \u5b89\u88c5\u4f7f\u7528"},{"location":"admin/kpanda/gpu/gpu_scheduler_config.html","title":"GPU \u8c03\u5ea6\u914d\u7f6e\uff08Binpack \u548c Spread \uff09","text":"

                                                        \u672c\u6587\u4ecb\u7ecd\u4f7f\u7528 NVIDIA vGPU \u65f6\uff0c\u5982\u4f55\u901a\u8fc7 Binpack \u548c Spread \u7684 GPU \u8c03\u5ea6\u914d\u7f6e\u51cf\u5c11 GPU \u8d44\u6e90\u788e\u7247\u3001\u9632\u6b62\u5355\u70b9\u6545\u969c\u7b49\uff0c\u5b9e\u73b0 vGPU \u7684\u9ad8\u7ea7\u8c03\u5ea6\u3002 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u4e86\u96c6\u7fa4\u548c\u5de5\u4f5c\u8d1f\u8f7d\u4e24\u79cd\u7ef4\u5ea6\u7684 Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565\uff0c\u5206\u522b\u6ee1\u8db3\u4e0d\u540c\u573a\u666f\u4e0b\u7684\u4f7f\u7528\u9700\u6c42\u3002

                                                        "},{"location":"admin/kpanda/gpu/gpu_scheduler_config.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                        • \u96c6\u7fa4\u8282\u70b9\u4e0a\u5df2\u6b63\u786e\u5b89\u88c5 GPU \u8bbe\u5907\u3002
                                                        • \u96c6\u7fa4\u4e2d\u5df2\u6b63\u786e\u5b89\u88c5 gpu-operator \u7ec4\u4ef6 \u548c Nvidia-vgpu \u7ec4\u4ef6\u3002
                                                        • \u96c6\u7fa4\u8282\u70b9\u5217\u8868\u4e2d\uff0cGPU \u6a21\u5f0f\u4e0b\u5b58\u5728 NVIDIA-vGPU \u7c7b\u578b\u3002
                                                        "},{"location":"admin/kpanda/gpu/gpu_scheduler_config.html#_2","title":"\u4f7f\u7528\u573a\u666f","text":"
                                                        • \u57fa\u4e8e GPU \u5361\u7ef4\u5ea6\u8c03\u5ea6\u7b56\u7565

                                                          • Binpack\uff1a\u4f18\u5148\u9009\u62e9\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\uff0c\u9002\u7528\u4e8e\u63d0\u9ad8 GPU \u5229\u7528\u7387\uff0c\u51cf\u5c11\u8d44\u6e90\u788e\u7247\u3002
                                                          • Spread\uff1a\u591a\u4e2a Pod \u4f1a\u5206\u6563\u5728\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u9ad8\u53ef\u7528\u573a\u666f\uff0c\u907f\u514d\u5355\u5361\u6545\u969c\u3002
                                                        • \u57fa\u4e8e\u8282\u70b9\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565

                                                          • Binpack\uff1a \u591a\u4e2a Pod \u4f1a\u4f18\u5148\u9009\u62e9\u540c\u4e00\u4e2a\u8282\u70b9\uff0c\u9002\u7528\u4e8e\u63d0\u9ad8 GPU \u5229\u7528\u7387\uff0c\u51cf\u5c11\u8d44\u6e90\u788e\u7247\u3002
                                                          • Spread\uff1a\u591a\u4e2a Pod \u4f1a\u5206\u6563\u5728\u4e0d\u540c\u8282\u70b9\u4e0a\uff0c\u9002\u7528\u4e8e\u9ad8\u53ef\u7528\u573a\u666f\uff0c\u907f\u514d\u5355\u8282\u70b9\u6545\u969c\u3002
                                                        "},{"location":"admin/kpanda/gpu/gpu_scheduler_config.html#binpack-spread","title":"\u96c6\u7fa4\u7ef4\u5ea6\u4f7f\u7528 Binpack \u548c Spread \u8c03\u5ea6\u914d\u7f6e","text":"

                                                        Note

                                                        \u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u4f1a\u9075\u5faa\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack \u548c Spread \u8c03\u5ea6\u914d\u7f6e\u3002 \u82e5\u5de5\u4f5c\u8d1f\u8f7d\u5355\u72ec\u8bbe\u7f6e\u4e86\u4e0e\u96c6\u7fa4\u4e0d\u4e00\u81f4\u7684 Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565\uff0c\u5219\u8be5\u5de5\u4f5c\u8d1f\u8f7d\u4f18\u5148\u9075\u5faa\u5176\u672c\u8eab\u7684\u8c03\u5ea6\u7b56\u7565\u3002

                                                        1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9009\u62e9\u9700\u8981\u8c03\u6574 Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u56fe\u6807\u5e76\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u70b9\u51fb GPU \u8c03\u5ea6\u914d\u7f6e \u3002

                                                        2. \u6839\u636e\u4e1a\u52a1\u573a\u666f\u8c03\u6574 GPU \u8c03\u5ea6\u914d\u7f6e\uff0c\u5e76\u70b9\u51fb \u786e\u5b9a \u540e\u4fdd\u5b58\u3002

                                                        "},{"location":"admin/kpanda/gpu/gpu_scheduler_config.html#binpack-spread_1","title":"\u5de5\u4f5c\u8d1f\u8f7d\u7ef4\u5ea6\u4f7f\u7528 Binpack \u548c Spread \u8c03\u5ea6\u914d\u7f6e","text":"

                                                        Note

                                                        \u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ef4\u5ea6\u7684 Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684\u914d\u7f6e\u51b2\u7a81\u65f6\uff0c\u4f18\u5148\u9075\u5faa\u5de5\u4f5c\u8d1f\u8f7d\u7ef4\u5ea6\u7684\u914d\u7f6e\u3002

                                                        \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u65e0\u72b6\u6001\u8d1f\u8f7d\uff0c\u5e76\u5728\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u914d\u7f6e Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565 \u3002

                                                        1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                        2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                                                        3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\uff0c\u5e76\u5728 \u5bb9\u5668\u914d\u7f6e \u4e2d\u542f\u7528 GPU \u914d\u7f6e\uff0c\u9009\u62e9 GPU \u7c7b\u578b\u4e3a NVIDIA vGPU\uff0c \u70b9\u51fb \u9ad8\u7ea7\u8bbe\u7f6e \uff0c\u542f\u7528 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\uff0c\u6839\u636e\u4e1a\u52a1\u573a\u666f\u8c03\u6574 GPU \u8c03\u5ea6\u914d\u7f6e\u3002\u914d\u7f6e\u5b8c\u6210\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \uff0c \u8fdb\u5165 \u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                                                        "},{"location":"admin/kpanda/gpu/vgpu_quota.html","title":"GPU \u914d\u989d\u7ba1\u7406","text":"

                                                        \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528 vGPU \u80fd\u529b\u3002

                                                        "},{"location":"admin/kpanda/gpu/vgpu_quota.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                        \u5f53\u524d\u96c6\u7fa4\u5df2\u901a\u8fc7 Operator \u6216\u624b\u52a8\u65b9\u5f0f\u90e8\u7f72\u5bf9\u5e94\u7c7b\u578b GPU \u9a71\u52a8\uff08NVIDIA GPU\u3001NVIDIA MIG\u3001\u5929\u6570\u3001\u6607\u817e\uff09

                                                        "},{"location":"admin/kpanda/gpu/vgpu_quota.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                        1. \u8fdb\u5165 Namespaces \u4e2d\uff0c\u70b9\u51fb \u914d\u989d\u7ba1\u7406 \u53ef\u4ee5\u914d\u7f6e\u5f53\u524d Namespace \u53ef\u4ee5\u4f7f\u7528\u7684 GPU \u8d44\u6e90\u3002

                                                        2. \u5f53\u524d\u547d\u540d\u7a7a\u95f4\u914d\u989d\u7ba1\u7406\u8986\u76d6\u7684\u5361\u7c7b\u578b\u4e3a\uff1aNVIDIA vGPU\u3001NVIDIA MIG\u3001\u5929\u6570\u3001\u6607\u817e\u3002

                                                          NVIDIA vGPU \u914d\u989d\u7ba1\u7406 \uff1a\u914d\u7f6e\u5177\u4f53\u53ef\u4ee5\u4f7f\u7528\u7684\u914d\u989d\uff0c\u4f1a\u521b\u5efa ResourcesQuota CR\uff1a

                                                          • \u7269\u7406\u5361\u6570\u91cf\uff08nvidia.com/vgpu\uff09\uff1a\u8868\u793a\u5f53\u524d POD \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u5e76\u4e14\u8981 \u5c0f\u4e8e\u7b49\u4e8e \u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002
                                                          • GPU \u7b97\u529b\uff08nvidia.com/gpucores\uff09\uff1a\u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u7b97\u529b\uff0c\u503c\u8303\u56f4\u4e3a 0-100\uff1b\u5982\u679c\u914d\u7f6e\u4e3a 0\uff0c\u5219\u8ba4\u4e3a\u4e0d\u5f3a\u5236\u9694\u79bb\uff1b\u914d\u7f6e\u4e3a 100\uff0c\u5219\u8ba4\u4e3a\u72ec\u5360\u6574\u5f20\u5361\u3002
                                                          • GPU \u663e\u5b58\uff08nvidia.com/gpumem\uff09\uff1a\u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u663e\u5b58\uff0c\u503c\u5355\u4f4d\u4e3a MB\uff0c\u6700\u5c0f\u503c\u4e3a 1\uff0c\u6700\u5927\u503c\u4e3a\u6574\u5361\u7684\u663e\u5b58\u503c\u3002

                                                        "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html","title":"\u6607\u817e NPU \u7ec4\u4ef6\u5b89\u88c5","text":"

                                                        \u672c\u7ae0\u8282\u63d0\u4f9b\u6607\u817e NPU \u9a71\u52a8\u3001Device Plugin\u3001NPU-Exporter \u7b49\u7ec4\u4ef6\u7684\u5b89\u88c5\u6307\u5bfc\u3002

                                                        "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                        1. \u5b89\u88c5\u524d\u8bf7\u786e\u8ba4\u652f\u6301\u7684 NPU \u578b\u53f7\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\u6607\u817e NPU \u77e9\u9635
                                                        2. \u8bf7\u786e\u8ba4 \u5bf9\u5e94 NPU \u578b\u53f7\u6240\u8981\u6c42\u7684\u5185\u6838\u7248\u672c\u662f\u5426\u5339\u914d\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\u6607\u817e NPU \u77e9\u9635
                                                        3. \u51c6\u5907 Kubernetes \u57fa\u7840\u73af\u5883
                                                        "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html#_2","title":"\u5b89\u88c5\u6b65\u9aa4","text":"

                                                        \u4f7f\u7528 NPU \u8d44\u6e90\u4e4b\u524d\uff0c\u9700\u8981\u5b8c\u6210\u56fa\u4ef6\u5b89\u88c5\u3001NPU \u9a71\u52a8\u5b89\u88c5\u3001 Docker Runtime \u5b89\u88c5\u3001\u7528\u6237\u521b\u5efa\u3001\u65e5\u5fd7\u76ee\u5f55\u521b\u5efa\u4ee5\u53ca NPU Device Plugin \u5b89\u88c5\uff0c\u8be6\u60c5\u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u3002

                                                        "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html#_3","title":"\u5b89\u88c5\u56fa\u4ef6","text":"
                                                        1. \u5b89\u88c5\u524d\u8bf7\u786e\u8ba4\u5185\u6838\u7248\u672c\u5728\u201c\u4e8c\u8fdb\u5236\u5b89\u88c5\u201d\u5b89\u88c5\u65b9\u5f0f\u5bf9\u5e94\u7684\u7248\u672c\u8303\u56f4\u5185\uff0c\u5219\u53ef\u4ee5\u76f4\u63a5\u5b89\u88c5NPU\u9a71\u52a8\u56fa\u4ef6\u3002
                                                        2. \u56fa\u4ef6\u4e0e\u9a71\u52a8\u4e0b\u8f7d\u8bf7\u53c2\u8003\u56fa\u4ef6\u4e0b\u8f7d\u5730\u5740
                                                        3. \u56fa\u4ef6\u5b89\u88c5\u8bf7\u53c2\u8003\u5b89\u88c5 NPU \u9a71\u52a8\u56fa\u4ef6
                                                        "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html#npu_1","title":"\u5b89\u88c5 NPU \u9a71\u52a8","text":"
                                                        1. \u5982\u9a71\u52a8\u672a\u5b89\u88c5\uff0c\u8bf7\u53c2\u8003\u6607\u817e\u5b98\u65b9\u6587\u6863\u8fdb\u884c\u5b89\u88c5\u3002\u4f8b\u5982 Ascend910\uff0c\u53c2\u8003 910 \u9a71\u52a8\u5b89\u88c5\u6587\u6863\u3002
                                                        2. \u8fd0\u884c npu-smi info \u547d\u4ee4\uff0c\u5e76\u4e14\u80fd\u591f\u6b63\u5e38\u8fd4\u56de NPU \u4fe1\u606f\uff0c\u8868\u793a NPU \u9a71\u52a8\u4e0e\u56fa\u4ef6\u5df2\u5c31\u7eea\u3002
                                                        "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html#docker-runtime","title":"\u5b89\u88c5 Docker Runtime","text":"
                                                        1. \u4e0b\u8f7d Ascend Docker Runtime

                                                          \u793e\u533a\u7248\u4e0b\u8f7d\u5730\u5740\uff1ahttps://www.hiascend.com/zh/software/mindx-dl/community

                                                          wget -c https://mindx.obs.cn-south-1.myhuaweicloud.com/OpenSource/MindX/MindX%205.0.RC2/MindX%20DL%205.0.RC2/Ascend-docker-runtime_5.0.RC2_linux-x86_64.run\n

                                                          \u5b89\u88c5\u5230\u6307\u5b9a\u8def\u5f84\u4e0b\uff0c\u4f9d\u6b21\u6267\u884c\u4ee5\u4e0b\u4e24\u6761\u547d\u4ee4\uff0c\u53c2\u6570\u4e3a\u6307\u5b9a\u7684\u5b89\u88c5\u8def\u5f84:

                                                          chmod u+x Ascend-docker-runtime_5.0.RC2_linux-x86_64.run \n./Ascend-docker-runtime_{version}_linux-{arch}.run --install --install-path=<path>\n
                                                        2. \u4fee\u6539 containerd \u914d\u7f6e\u6587\u4ef6

                                                          containerd \u65e0\u9ed8\u8ba4\u914d\u7f6e\u6587\u4ef6\u65f6\uff0c\u4f9d\u6b21\u6267\u884c\u4ee5\u4e0b3\u6761\u547d\u4ee4\uff0c\u521b\u5efa\u914d\u7f6e\u6587\u4ef6\uff1a

                                                          mkdir /etc/containerd \ncontainerd config default > /etc/containerd/config.toml \nvim /etc/containerd/config.toml\n

                                                          containerd \u6709\u914d\u7f6e\u6587\u4ef6\u65f6\uff1a

                                                          vim /etc/containerd/config.toml\n

                                                          \u6839\u636e\u5b9e\u9645\u60c5\u51b5\u4fee\u6539 runtime \u7684\u5b89\u88c5\u8def\u5f84\uff0c\u4e3b\u8981\u4fee\u6539 runtime \u5b57\u6bb5\uff1a

                                                          ... \n[plugins.\"io.containerd.monitor.v1.cgroups\"]\n   no_prometheus = false  \n[plugins.\"io.containerd.runtime.v1.linux\"]\n   shim = \"containerd-shim\"\n   runtime = \"/usr/local/Ascend/Ascend-Docker-Runtime/ascend-docker-runtime\"\n   runtime_root = \"\"\n   no_shim = false\n   shim_debug = false\n [plugins.\"io.containerd.runtime.v2.task\"]\n   platforms = [\"linux/amd64\"]\n...\n

                                                          \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff0c\u91cd\u542f containerd\uff1a

                                                          systemctl restart containerd\n
                                                        "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html#_4","title":"\u7528\u6237\u521b\u5efa","text":"

                                                        \u5728\u5bf9\u5e94\u7ec4\u4ef6\u5b89\u88c5\u7684\u8282\u70b9\u4e0a\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u521b\u5efa\u7528\u6237\u3002

                                                        # Ubuntu \u64cd\u4f5c\u7cfb\u7edf\nuseradd -d /home/hwMindX -u 9000 -m -s /usr/sbin/nologin hwMindX\nusermod -a -G HwHiAiUser hwMindX\n# Centos \u64cd\u4f5c\u7cfb\u7edf\nuseradd -d /home/hwMindX -u 9000 -m -s /sbin/nologin hwMindX\nusermod -a -G HwHiAiUser hwMindX\n
                                                        "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html#_5","title":"\u65e5\u5fd7\u76ee\u5f55\u521b\u5efa","text":"

                                                        \u5728\u5bf9\u5e94\u8282\u70b9\u521b\u5efa\u7ec4\u4ef6\u65e5\u5fd7\u7236\u76ee\u5f55\u548c\u5404\u7ec4\u4ef6\u7684\u65e5\u5fd7\u76ee\u5f55\uff0c\u5e76\u8bbe\u7f6e\u76ee\u5f55\u5bf9\u5e94\u5c5e\u4e3b\u548c\u6743\u9650\u3002\u6267\u884c\u4e0b\u8ff0\u547d\u4ee4\uff0c\u521b\u5efa\u7ec4\u4ef6\u65e5\u5fd7\u7236\u76ee\u5f55\u3002

                                                        mkdir -m 755 /var/log/mindx-dl\nchown root:root /var/log/mindx-dl\n

                                                        \u6267\u884c\u4e0b\u8ff0\u547d\u4ee4\uff0c\u521b\u5efa Device Plugin \u7ec4\u4ef6\u65e5\u5fd7\u76ee\u5f55\u3002

                                                        mkdir -m 750 /var/log/mindx-dl/devicePlugin\nchown root:root /var/log/mindx-dl/devicePlugin\n

                                                        Note

                                                        \u8bf7\u5206\u522b\u4e3a\u6240\u9700\u7ec4\u4ef6\u521b\u5efa\u5bf9\u5e94\u7684\u65e5\u5fd7\u76ee\u5f55\uff0c\u5f53\u524d\u6848\u4f8b\u4e2d\u53ea\u9700\u8981 Device Plugin \u7ec4\u4ef6\u3002 \u5982\u679c\u6709\u5176\u4ed6\u7ec4\u4ef6\u9700\u6c42\u8bf7\u53c2\u8003\u5b98\u65b9\u6587\u6863

                                                        "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html#label","title":"\u521b\u5efa\u8282\u70b9 Label","text":"

                                                        \u53c2\u8003\u4e0b\u8ff0\u547d\u4ee4\u5728\u5bf9\u5e94\u8282\u70b9\u4e0a\u521b\u5efa Label\uff1a

                                                        # \u5728\u5b89\u88c5\u4e86\u9a71\u52a8\u7684\u8ba1\u7b97\u8282\u70b9\u521b\u5efa\u6b64\u6807\u7b7e\nkubectl label node {nodename} huawei.com.ascend/Driver=installed\nkubectl label node {nodename} node-role.kubernetes.io/worker=worker\nkubectl label node {nodename} workerselector=dls-worker-node\nkubectl label node {nodename} host-arch=huawei-arm //\u6216\u8005host-arch=huawei-x86 \uff0c\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u9009\u62e9\nkubectl label node {nodename} accelerator=huawei-Ascend910 //\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u8fdb\u884c\u9009\u62e9\n# \u5728\u63a7\u5236\u8282\u70b9\u521b\u5efa\u6b64\u6807\u7b7e\nkubectl label node {nodename} masterselector=dls-master-node\n
                                                        "},{"location":"admin/kpanda/gpu/ascend/ascend_driver_install.html#device-plugin-npuexporter","title":"\u5b89\u88c5 Device Plugin \u548c NpuExporter","text":"

                                                        \u529f\u80fd\u6a21\u5757\u8def\u5f84\uff1a \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u7ba1\u7406 \uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f -> \u641c\u7d22 ascend-mindxdl \u3002

                                                        • DevicePlugin \uff1a\u901a\u8fc7\u63d0\u4f9b\u901a\u7528\u8bbe\u5907\u63d2\u4ef6\u673a\u5236\u548c\u6807\u51c6\u7684\u8bbe\u5907API\u63a5\u53e3\uff0c\u4f9bKubernetes\u4f7f\u7528\u8bbe\u5907\u3002\u5efa\u8bae\u4f7f\u7528\u9ed8\u8ba4\u7684\u955c\u50cf\u53ca\u7248\u672c\u3002
                                                        • NpuExporter \uff1a\u57fa\u4e8ePrometheus/Telegraf\u751f\u6001\uff0c\u8be5\u7ec4\u4ef6\u63d0\u4f9b\u63a5\u53e3\uff0c\u5e2e\u52a9\u7528\u6237\u80fd\u591f\u5173\u6ce8\u5230\u6607\u817e\u7cfb\u5217AI\u5904\u7406\u5668\u4ee5\u53ca\u5bb9\u5668\u7ea7\u5206\u914d\u72b6\u6001\u3002\u5efa\u8bae\u4f7f\u7528\u9ed8\u8ba4\u7684\u955c\u50cf\u53ca\u7248\u672c\u3002
                                                        • ServiceMonitor \uff1a\u9ed8\u8ba4\u4e0d\u5f00\u542f\uff0c\u5f00\u542f\u540e\u53ef\u524d\u5f80\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u67e5\u770b NPU \u76f8\u5173\u76d1\u63a7\u3002\u5982\u9700\u5f00\u542f\uff0c\u8bf7\u786e\u4fdd insight-agent \u5df2\u5b89\u88c5\u5e76\u5904\u4e8e\u8fd0\u884c\u72b6\u6001\uff0c\u5426\u5219\u5c06\u5bfc\u81f4 ascend-mindxdl \u5b89\u88c5\u5931\u8d25\u3002
                                                        • isVirtualMachine \uff1a\u9ed8\u8ba4\u4e0d\u5f00\u542f\uff0c\u5982\u679c NPU \u8282\u70b9\u4e3a\u4e91\u4e3b\u673a\u573a\u666f\uff0c\u8bf7\u5f00\u542f\u00a0isVirtualMachine \u53c2\u6570\u3002

                                                        \u5b89\u88c5\u6210\u529f\u540e\uff0c\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u4e0b\u4f1a\u51fa\u73b0\u4e24\u4e2a\u7ec4\u4ef6\uff0c\u5982\u4e0b\u56fe\uff1a

                                                        \u540c\u65f6\u8282\u70b9\u4fe1\u606f\u4e0a\u4e5f\u4f1a\u51fa\u73b0\u5bf9\u5e94 NPU \u7684\u4fe1\u606f\uff1a

                                                        \u4e00\u5207\u5c31\u7eea\u540e\uff0c\u6211\u4eec\u901a\u8fc7\u9875\u9762\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u5c31\u80fd\u591f\u9009\u62e9\u5230\u5bf9\u5e94\u7684 NPU \u8bbe\u5907\uff0c\u5982\u4e0b\u56fe\uff1a

                                                        Note

                                                        \u6709\u5173\u8be6\u7ec6\u4f7f\u7528\u6b65\u9aa4\uff0c\u8bf7\u53c2\u7167\u5e94\u7528\u4f7f\u7528\u6607\u817e\uff08Ascend\uff09NPU\u3002

                                                        "},{"location":"admin/kpanda/gpu/ascend/ascend_usage.html","title":"\u5e94\u7528\u4f7f\u7528\u6607\u817e\uff08Ascend\uff09NPU","text":"

                                                        \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528\u6607\u817e GPU\u3002

                                                        "},{"location":"admin/kpanda/gpu/ascend/ascend_usage.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                        • \u5f53\u524d NPU \u8282\u70b9\u5df2\u5b89\u88c5\u6607\u817e \uff08Ascend\uff09\u9a71\u52a8\u3002
                                                        • \u5f53\u524d NPU \u8282\u70b9\u5df2\u5b89\u88c5 Ascend-Docker-Runtime \u7ec4\u4ef6\u3002
                                                        • \u5f53\u524d\u96c6\u7fa4\u5df2\u5b89\u88c5 NPU MindX DL \u5957\u4ef6\u3002
                                                        • \u5f53\u524d\u96c6\u7fa4\u5185 NPU \u5361\u672a\u8fdb\u884c\u4efb\u4f55\u865a\u62df\u5316\u64cd\u4f5c\u6216\u88ab\u5176\u5b83\u5e94\u7528\u5360\u7528\u3002

                                                        \u8bf7\u53c2\u8003\u6607\u817e NPU \u7ec4\u4ef6\u5b89\u88c5\u6587\u6863\u5b89\u88c5\u57fa\u7840\u73af\u5883\u3002

                                                        "},{"location":"admin/kpanda/gpu/ascend/ascend_usage.html#_2","title":"\u5feb\u901f\u4f7f\u7528","text":"

                                                        \u672c\u6587\u4f7f\u7528\u6607\u817e\u793a\u4f8b\u5e93\u4e2d\u7684 AscentCL \u56fe\u7247\u5206\u7c7b\u5e94\u7528\u793a\u4f8b\u3002

                                                        1. \u4e0b\u8f7d\u6607\u817e\u4ee3\u7801\u5e93

                                                          \u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u4e0b\u8f7d\u6607\u817e Demo \u793a\u4f8b\u4ee3\u7801\u5e93\uff0c\u5e76\u4e14\u8bf7\u8bb0\u4f4f\u4ee3\u7801\u5b58\u653e\u7684\u4f4d\u7f6e\uff0c\u540e\u7eed\u9700\u8981\u4f7f\u7528\u3002

                                                          git clone https://gitee.com/ascend/samples.git\n
                                                        2. \u51c6\u5907\u57fa\u7840\u955c\u50cf

                                                          \u6b64\u4f8b\u4f7f\u7528 Ascent-pytorch \u57fa\u7840\u955c\u50cf\uff0c\u53ef\u8bbf\u95ee\u6607\u817e\u955c\u50cf\u4ed3\u5e93\u83b7\u53d6\u3002

                                                        3. \u51c6\u5907 YAML

                                                          ascend-demo.yaml
                                                          apiVersion: batch/v1\nkind: Job\nmetadata:\n  name: resnetinfer1-1-1usoc\nspec:\n  template:\n    spec:\n      containers:\n        - image: ascendhub.huawei.com/public-ascendhub/ascend-pytorch:23.0.RC2-ubuntu18.04 # Inference image name\n          imagePullPolicy: IfNotPresent\n          name: resnet50infer\n          securityContext:\n            runAsUser: 0\n          command:\n            - \"/bin/bash\"\n            - \"-c\"\n            - |\n              source /usr/local/Ascend/ascend-toolkit/set_env.sh &&\n              TEMP_DIR=/root/samples_copy_$(date '+%Y%m%d_%H%M%S_%N') &&\n              cp -r /root/samples \"$TEMP_DIR\" &&\n              cd \"$TEMP_DIR\"/inference/modelInference/sampleResnetQuickStart/python/model &&\n              wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/resnet50/resnet50.onnx &&\n              atc --model=resnet50.onnx --framework=5 --output=resnet50 --input_shape=\"actual_input_1:1,3,224,224\"  --soc_version=Ascend910 &&\n              cd ../data &&\n              wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg &&\n              cd ../scripts &&\n              bash sample_run.sh\n          resources:\n            requests:\n              huawei.com/Ascend910: 1 # Number of the Ascend 910 Processors\n            limits:\n              huawei.com/Ascend910: 1 # The value should be the same as that of requests\n          volumeMounts:\n            - name: hiai-driver\n              mountPath: /usr/local/Ascend/driver\n              readOnly: true\n            - name: slog\n              mountPath: /var/log/npu/conf/slog/slog.conf\n            - name: localtime # The container time must be the same as the host time\n              mountPath: /etc/localtime\n            - name: dmp\n              mountPath: /var/dmp_daemon\n            - name: slogd\n              mountPath: /var/slogd\n            - name: hbasic\n              mountPath: /etc/hdcBasic.cfg\n            - name: sys-version\n              mountPath: /etc/sys_version.conf\n            - name: aicpu\n              mountPath: /usr/lib64/aicpu_kernels\n            - name: tfso\n              mountPath: /usr/lib64/libtensorflow.so\n            - name: sample-path\n              mountPath: /root/samples\n      volumes:\n        - name: hiai-driver\n          hostPath:\n            path: /usr/local/Ascend/driver\n        - name: slog\n          hostPath:\n            path: /var/log/npu/conf/slog/slog.conf\n        - name: localtime\n          hostPath:\n            path: /etc/localtime\n        - name: dmp\n          hostPath:\n            path: /var/dmp_daemon\n        - name: slogd\n          hostPath:\n            path: /var/slogd\n        - name: hbasic\n          hostPath:\n            path: /etc/hdcBasic.cfg\n        - name: sys-version\n          hostPath:\n            path: /etc/sys_version.conf\n        - name: aicpu\n          hostPath:\n            path: /usr/lib64/aicpu_kernels\n        - name: tfso\n          hostPath:\n            path: /usr/lib64/libtensorflow.so\n        - name: sample-path\n          hostPath:\n            path: /root/samples\n      restartPolicy: OnFailure\n

                                                          \u4ee5\u4e0a YAML \u4e2d\u6709\u4e00\u4e9b\u5b57\u6bb5\u9700\u8981\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u8fdb\u884c\u4fee\u6539\uff1a

                                                          1. atc ... --soc_version=Ascend910 \u4f7f\u7528\u7684\u662f Ascend910 \uff0c\u8bf7\u4ee5\u5b9e\u9645\u60c5\u51b5\u4e3a\u4e3b \u60a8\u53ef\u4ee5\u4f7f\u7528 npu-smi info \u547d\u4ee4\u67e5\u770b\u663e\u5361\u578b\u53f7\u7136\u540e\u52a0\u4e0a Ascend \u524d\u7f00\u5373\u53ef
                                                          2. samples-path \u4ee5\u5b9e\u9645\u60c5\u51b5\u4e3a\u51c6
                                                          3. resources \u4ee5\u5b9e\u9645\u60c5\u51b5\u4e3a\u51c6
                                                        4. \u90e8\u7f72 Job \u5e76\u67e5\u770b\u7ed3\u679c

                                                          \u4f7f\u7528\u5982\u4e0b\u547d\u4ee4\u521b\u5efa Job\uff1a

                                                          kubectl apply -f ascend-demo.yaml\n

                                                          \u67e5\u770b Pod \u8fd0\u884c\u72b6\u6001\uff1a

                                                          Pod \u6210\u529f\u8fd0\u884c\u540e\uff0c\u67e5\u770b\u65e5\u5fd7\u7ed3\u679c\u3002\u5728\u5c4f\u5e55\u4e0a\u7684\u5173\u952e\u63d0\u793a\u4fe1\u606f\u793a\u4f8b\u5982\u4e0b\u56fe\uff0c\u63d0\u793a\u4fe1\u606f\u4e2d\u7684 Label \u8868\u793a\u7c7b\u522b\u6807\u8bc6\uff0c Conf \u8868\u793a\u8be5\u5206\u7c7b\u7684\u6700\u5927\u7f6e\u4fe1\u5ea6\uff0cClass \u8868\u793a\u6240\u5c5e\u7c7b\u522b\u3002\u8fd9\u4e9b\u503c\u53ef\u80fd\u4f1a\u6839\u636e\u7248\u672c\u3001\u73af\u5883\u6709\u6240\u4e0d\u540c\uff0c\u8bf7\u4ee5\u5b9e\u9645\u60c5\u51b5\u4e3a\u51c6\uff1a

                                                          \u7ed3\u679c\u56fe\u7247\u5c55\u793a\uff1a

                                                        "},{"location":"admin/kpanda/gpu/ascend/ascend_usage.html#_3","title":"\u754c\u9762\u4f7f\u7528","text":"
                                                        1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u68c0\u6d4b GPU \u5361\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002 \u76ee\u524d\u96c6\u7fa4\u4f1a\u81ea\u52a8\u542f\u7528 GPU \uff0c\u5e76\u4e14\u8bbe\u7f6e GPU \u7c7b\u578b\u4e3a Ascend \u3002

                                                        2. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08Ascend\uff09\u4e4b\u540e\uff0c\u9700\u8981\u914d\u7f6e\u5e94\u7528\u4f7f\u7528\u7684\u7269\u7406\u5361\u6570\u91cf\uff1a

                                                          \u7269\u7406\u5361\u6570\u91cf\uff08huawei.com/Ascend910\uff09 \uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u8f93\u5165\u503c\u5fc5\u987b\u4e3a\u6574\u6570\u4e14**\u5c0f\u4e8e\u7b49\u4e8e**\u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002

                                                          \u5982\u679c\u4e0a\u8ff0\u503c\u914d\u7f6e\u7684\u6709\u95ee\u9898\u5219\u4f1a\u51fa\u73b0\u8c03\u5ea6\u5931\u8d25\uff0c\u8d44\u6e90\u5206\u914d\u4e0d\u4e86\u7684\u60c5\u51b5\u3002

                                                        "},{"location":"admin/kpanda/gpu/ascend/vnpu.html","title":"\u542f\u7528\u6607\u817e\u865a\u62df\u5316","text":"

                                                        \u6607\u817e\u865a\u62df\u5316\u5206\u4e3a\u52a8\u6001\u865a\u62df\u5316\u548c\u9759\u6001\u865a\u62df\u5316\uff0c\u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5f00\u542f\u5e76\u4f7f\u7528\u6607\u817e\u9759\u6001\u865a\u62df\u5316\u80fd\u529b\u3002

                                                        "},{"location":"admin/kpanda/gpu/ascend/vnpu.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                        • Kubernetes \u96c6\u7fa4\u73af\u5883\u642d\u5efa\u3002
                                                        • \u5f53\u524d NPU \u8282\u70b9\u5df2\u5b89\u88c5\u6607\u817e \uff08Ascend\uff09\u9a71\u52a8\u3002
                                                        • \u5f53\u524d NPU \u8282\u70b9\u5df2\u5b89\u88c5 Ascend-Docker-Runtime \u7ec4\u4ef6\u3002
                                                        • \u5f53\u524d\u96c6\u7fa4\u5df2\u5b89\u88c5 NPU MindX DL \u5957\u4ef6\u3002
                                                        • \u652f\u6301\u7684 NPU \u5361\u578b\u53f7\uff1a

                                                          • Ascend 310P\uff0c\u5df2\u9a8c\u8bc1
                                                          • Ascend 910b\uff0820 \u6838\uff09\uff0c\u5df2\u9a8c\u8bc1
                                                          • Ascend 910\uff0832 \u6838\uff09\uff0c\u5b98\u65b9\u4ecb\u7ecd\u652f\u6301\uff0c\u672a\u5b9e\u9645\u9a8c\u8bc1
                                                          • Ascend 910\uff0830 \u6838\uff09\uff0c\u5b98\u65b9\u4ecb\u7ecd\u652f\u6301\uff0c\u672a\u5b9e\u9645\u9a8c\u8bc1

                                                          \u66f4\u591a\u7ec6\u8282\u53c2\u9605\u5b98\u65b9\u865a\u62df\u5316\u786c\u4ef6\u8bf4\u660e\u3002

                                                        \u8bf7\u53c2\u8003\u6607\u817e NPU \u7ec4\u4ef6\u5b89\u88c5\u6587\u6863\u5b89\u88c5\u57fa\u7840\u73af\u5883\u3002

                                                        "},{"location":"admin/kpanda/gpu/ascend/vnpu.html#_3","title":"\u5f00\u542f\u865a\u62df\u5316\u80fd\u529b","text":"

                                                        \u5f00\u542f\u865a\u62df\u5316\u80fd\u529b\u9700\u8981\u624b\u52a8\u4fee\u6539\u00a0ascend-device-plugin-daemonset \u7ec4\u4ef6\u7684\u542f\u52a8\u53c2\u6570\uff0c\u53c2\u8003\u4e0b\u8ff0\u547d\u4ee4\uff1a

                                                        - device-plugin -useAscendDocker=true -volcanoType=false -presetVirtualDevice=true\n- logFile=/var/log/mindx-dl/devicePlugin/devicePlugin.log -logLevel=0\n
                                                        "},{"location":"admin/kpanda/gpu/ascend/vnpu.html#vnpu","title":"\u5207\u5206 VNPU \u5b9e\u4f8b","text":"

                                                        \u9759\u6001\u865a\u62df\u5316\u9700\u8981\u624b\u52a8\u5bf9 VNPU \u5b9e\u4f8b\u7684\u5207\u5206\uff0c\u8bf7\u53c2\u8003\u4e0b\u8ff0\u547d\u4ee4\uff1a

                                                        npu-smi set -t create-vnpu -i 13 -c 0 -f vir02\n
                                                        • i \u6307\u7684\u662f card id
                                                        • c \u6307\u7684\u662f chip id
                                                        • vir02 \u6307\u7684\u662f\u5207\u5206\u89c4\u683c\u6a21\u677f

                                                        \u5173\u4e8e card id \u548c chip id\uff0c\u53ef\u4ee5\u901a\u8fc7 npu-smi info \u67e5\u8be2\uff0c\u5207\u5206\u89c4\u683c\u53ef\u901a\u8fc7 ascend \u5b98\u65b9\u6a21\u677f\u8fdb\u884c\u67e5\u8be2\u3002

                                                        \u5207\u5206\u5b9e\u4f8b\u8fc7\u540e\u53ef\u901a\u8fc7\u4e0b\u8ff0\u547d\u4ee4\u67e5\u8be2\u5207\u5206\u7ed3\u679c\uff1a

                                                        npu-smi info -t info-vnpu -i 13 -c 0\n

                                                        \u67e5\u8be2\u7ed3\u679c\u5982\u4e0b\uff1a

                                                        "},{"location":"admin/kpanda/gpu/ascend/vnpu.html#ascend-device-plugin-daemonset","title":"\u91cd\u542f\u00a0ascend-device-plugin-daemonset","text":"

                                                        \u5207\u5206\u5b9e\u4f8b\u540e\u624b\u52a8\u91cd\u542f device-plugin pod\uff0c\u7136\u540e\u4f7f\u7528 kubectl describe \u547d\u4ee4\u67e5\u770b\u5df2\u6ce8\u518c node \u7684\u8d44\u6e90\uff1a

                                                        kubectl describe node {{nodename}}\n

                                                        "},{"location":"admin/kpanda/gpu/ascend/vnpu.html#_4","title":"\u5982\u4f55\u4f7f\u7528\u8bbe\u5907","text":"

                                                        \u5728\u521b\u5efa\u5e94\u7528\u65f6\uff0c\u6307\u5b9a\u8d44\u6e90 key\uff0c\u53c2\u8003\u4e0b\u8ff0 YAML\uff1a

                                                        ......\nresources:\n  requests:\n    huawei.com/Ascend310P-2c: 1\n  limits:\n    huawei.com/Ascend310P-2c: 1\n......\n
                                                        "},{"location":"admin/kpanda/gpu/metax/usemetax.html","title":"\u6c90\u66e6 GPU \u7ec4\u4ef6\u5b89\u88c5\u4e0e\u4f7f\u7528","text":"

                                                        \u672c\u7ae0\u8282\u63d0\u4f9b\u6c90\u66e6 gpu-extensions\u3001gpu-operator \u7b49\u7ec4\u4ef6\u7684\u5b89\u88c5\u6307\u5bfc\u548c\u6c90\u66e6 GPU \u6574\u5361\u548c vGPU \u4e24\u79cd\u6a21\u5f0f\u7684\u4f7f\u7528\u65b9\u6cd5\u3002

                                                        "},{"location":"admin/kpanda/gpu/metax/usemetax.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                        1. \u5df2\u5728\u6c90\u66e6\u8f6f\u4ef6\u4e2d\u5fc3\u4e0b\u8f7d\u5e76\u5b89\u88c5\u6240\u9700\u7684 tar \u5305\uff0c \u672c\u6587\u4ee5 metax-gpu-k8s-package.0.7.10.tar.gz \u4e3a\u4f8b\u3002
                                                        2. \u51c6\u5907 Kubernetes \u57fa\u7840\u73af\u5883
                                                        "},{"location":"admin/kpanda/gpu/metax/usemetax.html#_2","title":"\u7ec4\u4ef6\u4ecb\u7ecd","text":"

                                                        Metax \u63d0\u4f9b\u4e86\u4e24\u4e2a helm-chart \u5305\uff0c\u4e00\u4e2a\u662f metax-extensions\uff0c\u4e00\u4e2a\u662f gpu-operator\uff0c\u6839\u636e\u4f7f\u7528\u573a\u666f\u53ef\u9009\u62e9\u5b89\u88c5\u4e0d\u540c\u7684\u7ec4\u4ef6\u3002

                                                        1. Metax-extensions\uff1a\u5305\u542b gpu-device \u548c gpu-label \u4e24\u4e2a\u7ec4\u4ef6\u3002\u5728\u4f7f\u7528 Metax-extensions \u65b9\u6848\u65f6\uff0c\u7528\u6237\u7684\u5e94\u7528\u5bb9\u5668\u955c\u50cf\u9700\u8981\u57fa\u4e8e MXMACA\u00ae \u57fa\u7840\u955c\u50cf\u6784\u5efa\u3002\u4e14 Metax-extensions \u4ec5\u9002\u7528\u4e8e GPU \u6574\u5361\u4f7f\u7528\u573a\u666f\u3002
                                                        2. gpu-operator\uff1a\u5305\u542b gpu-device\u3001gpu-label\u3001driver-manager\u3001container-runtime\u3001operator-controller \u8fd9\u4e9b\u7ec4\u4ef6\u3002 \u4f7f\u7528 gpu-operator \u65b9\u6848\u65f6\uff0c\u7528\u6237\u53ef\u9009\u62e9\u5236\u4f5c\u4e0d\u5305\u542b MXMACA\u00ae SDK \u7684\u5e94\u7528\u5bb9\u5668\u955c\u50cf\u3002gpu-operator \u9002\u7528\u4e8e GPU \u6574\u5361\u548c vGPU \u573a\u666f\u3002
                                                        "},{"location":"admin/kpanda/gpu/metax/usemetax.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                        1. \u4ece /home/metax/metax-docs/k8s/metax-gpu-k8s-package.0.7.10.tar.gz \u6587\u4ef6\u4e2d\u89e3\u538b\u51fa

                                                          • deploy-gpu-extensions.yaml # \u90e8\u7f72yaml
                                                          • metax-gpu-extensions-0.7.10.tgz\u3001metax-operator-0.7.10.tgz # helm chart\u6587\u4ef6
                                                          • metax-k8s-images.0.7.10.run # \u79bb\u7ebf\u955c\u50cf
                                                        2. \u67e5\u770b\u7cfb\u7edf\u662f\u5426\u5b89\u88c5\u9a71\u52a8

                                                          $ lsmod | grep metax \nmetax 1605632 0 \nttm 86016 3 drm_vram_helper,metax,drm_ttm_helper \ndrm 618496 7 drm_kms_helper,drm_vram_helper,ast,metax,drm_ttm_helper,ttm\n
                                                          • \u5982\u6ca1\u6709\u5185\u5bb9\u663e\u793a\uff0c\u5c31\u8868\u793a\u6ca1\u6709\u5b89\u88c5\u8fc7\u8f6f\u4ef6\u5305\u3002\u5982\u6709\u5185\u5bb9\u663e\u793a\uff0c\u5219\u8868\u793a\u5b89\u88c5\u8fc7\u8f6f\u4ef6\u5305\u3002
                                                          • \u4f7f\u7528 metax-opeartor \u65f6\uff0c\u4e0d\u63a8\u8350\u5728\u5de5\u4f5c\u8282\u70b9\u9884\u5b89\u88c5 MXMACA \u5185\u6838\u6001\u9a71\u52a8\uff0c\u82e5\u5df2\u5b89\u88c5\u4e5f\u65e0\u9700\u5378\u8f7d\u3002
                                                        3. \u5b89\u88c5\u9a71\u52a8

                                                        "},{"location":"admin/kpanda/gpu/metax/usemetax.html#gpu-extensions","title":"gpu-extensions","text":"
                                                        1. \u63a8\u9001\u955c\u50cf

                                                          tar -xf metax-gpu-k8s-package.0.7.10.tar.gz\n./metax-k8s-images.0.7.10.run push {registry}/metax\n
                                                        2. \u63a8\u9001 Helm Chart

                                                          helm plugin install https://github.com/chartmuseum/helm-push\nhelm repo add  --username rootuser --password rootpass123  metax http://172.16.16.5:8081\nhelm cm-push metax-operator-0.7.10.tgz metax\nhelm cm-push metax-gpu-extensions-0.7.10.tgz metax\n
                                                        3. \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e0a\u5b89\u88c5 metax-gpu-extensions

                                                          \u90e8\u7f72\u6210\u529f\u4e4b\u540e\uff0c\u53ef\u4ee5\u5728\u8282\u70b9\u4e0a\u67e5\u770b\u5230\u8d44\u6e90\u3002

                                                        4. \u4fee\u6539\u6210\u529f\u4e4b\u540e\u5c31\u53ef\u4ee5\u5728\u8282\u70b9\u4e0a\u770b\u5230\u5e26\u6709 Metax GPU \u7684\u6807\u7b7e

                                                        "},{"location":"admin/kpanda/gpu/metax/usemetax.html#gpu-operator","title":"gpu-operator","text":"

                                                        \u5b89\u88c5 gpu-opeartor \u65f6\u7684\u5df2\u77e5\u95ee\u9898\uff1a

                                                        1. metax-operator\u3001gpu-label\u3001gpu-device \u3001container-runtime \u8fd9\u51e0\u4e2a\u7ec4\u4ef6\u955c\u50cf\u8981\u5e26\u6709 amd64 \u540e\u7f00\u3002

                                                        2. metax-maca \u7ec4\u4ef6\u7684\u955c\u50cf\u4e0d\u5728 metax-k8s-images.0.7.13.run \u5305\u91cc\u9762\uff0c\u9700\u8981\u5355\u72ec\u4e0b\u8f7d maca-mxc500-2.23.0.23-ubuntu20.04-x86_64.tar.xz \u8fd9\u7c7b\u955c\u50cf\uff0cload \u4e4b\u540e\u91cd\u65b0\u4fee\u6539 metax-maca \u7ec4\u4ef6\u7684\u955c\u50cf\u3002

                                                        3. metax-driver \u7ec4\u4ef6\u7684\u955c\u50cf\u9700\u8981\u4ece https://pub-docstore.metax-tech.com:7001 \u8fd9\u4e2a\u7f51\u7ad9\u4e0b\u8f7d k8s-driver-image.2.23.0.25.run \u6587\u4ef6\uff0c\u7136\u540e\u6267\u884c k8s-driver-image.2.23.0.25.run push {registry}/metax \u547d\u4ee4\u628a\u955c\u50cf\u63a8\u9001\u5230\u955c\u50cf\u4ed3\u5e93\u3002\u63a8\u9001\u4e4b\u540e\u4fee\u6539 metax-driver \u7ec4\u4ef6\u7684\u955c\u50cf\u5730\u5740\u3002

                                                        "},{"location":"admin/kpanda/gpu/metax/usemetax.html#gpu_1","title":"\u4f7f\u7528 GPU","text":"

                                                        \u5b89\u88c5\u540e\u53ef\u5728\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u4f7f\u7528\u6c90\u66e6 GPU\u3002\u6ce8\u610f\u542f\u7528 GPU \u540e\uff0c\u9700\u9009\u62e9GPU\u7c7b\u578b\u4e3a Metax GPU

                                                        \u8fdb\u5165\u5bb9\u5668\uff0c\u6267\u884c mx-smi \u53ef\u67e5\u770b GPU \u7684\u4f7f\u7528\u60c5\u51b5.

                                                        "},{"location":"admin/kpanda/gpu/mlu/use-mlu.html","title":"\u4f7f\u7528\u5bd2\u6b66\u7eaa GPU","text":"

                                                        \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528\u5bd2\u6b66\u7eaa GPU\u3002

                                                        "},{"location":"admin/kpanda/gpu/mlu/use-mlu.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                        • \u5df2\u7ecf\u90e8\u7f72 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u4e14\u5e73\u53f0\u8fd0\u884c\u6b63\u5e38\u3002
                                                        • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                                                        • \u5f53\u524d\u96c6\u7fa4\u5df2\u5b89\u88c5\u5bd2\u6b66\u7eaa\u56fa\u4ef6\u3001\u9a71\u52a8\u4ee5\u53caDevicePlugin\u7ec4\u4ef6\uff0c\u5b89\u88c5\u8be6\u60c5\u8bf7\u53c2\u8003\u5b98\u65b9\u6587\u6863\uff1a
                                                          • \u9a71\u52a8\u56fa\u4ef6\u5b89\u88c5
                                                          • DevicePlugin \u5b89\u88c5

                                                        \u5728\u5b89\u88c5 DevicePlugin \u65f6\u8bf7\u5173\u95ed --enable-device-type \u53c2\u6570\uff0c\u5426\u5219\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5c06\u65e0\u6cd5\u6b63\u786e\u8bc6\u522b\u5bd2\u6b66\u7eaa GPU\u3002

                                                        "},{"location":"admin/kpanda/gpu/mlu/use-mlu.html#gpu_1","title":"\u5bd2\u6b66\u7eaa GPU \u6a21\u5f0f\u4ecb\u7ecd","text":"

                                                        \u5bd2\u6b66\u7eaa GPU \u6709\u4ee5\u4e0b\u51e0\u79cd\u6a21\u5f0f\uff1a

                                                        • \u6574\u5361\u6a21\u5f0f\uff1a\u5c06\u5bd2\u6b66\u7eaaGPU\u4ee5\u6574\u5361\u7684\u65b9\u5f0f\u6ce8\u518c\u5230\u96c6\u7fa4\u5f53\u4e2d\u8fdb\u884c\u4f7f\u7528\u3002
                                                        • Share \u6a21\u5f0f\uff1a\u53ef\u4ee5\u5c06\u4e00\u5f20\u5bd2\u6b66\u7eaaGPU\u5171\u4eab\u7ed9\u591a\u4e2a Pod \u8fdb\u884c\u4f7f\u7528\uff0c\u53ef\u4ee5\u901a\u8fc7 virtualization-num \u53c2\u6570\u8fdb\u884c\u8bbe\u7f6e\u53ef\u5171\u4eab\u5bb9\u5668\u7684\u6570\u91cf\u3002
                                                        • Dynamic smlu \u6a21\u5f0f\uff1a\u8fdb\u4e00\u6b65\u5bf9\u8d44\u6e90\u8fdb\u884c\u4e86\u7ec6\u5316\uff0c\u53ef\u4ee5\u63a7\u5236\u5206\u914d\u7ed9\u5bb9\u5668\u7684\u663e\u5b58\u3001\u7b97\u529b\u7684\u5927\u5c0f\u3002
                                                        • Mim \u6a21\u5f0f\uff1a\u53ef\u4ee5\u5c06\u5bd2\u6b66\u7eaa GPU \u6309\u7167\u56fa\u5b9a\u7684\u89c4\u683c\u5207\u5206\u6210\u591a\u5f20 GPU \u8fdb\u884c\u4f7f\u7528\u3002
                                                        "},{"location":"admin/kpanda/gpu/mlu/use-mlu.html#ai","title":"\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528\u5bd2\u6b66\u7eaa","text":"

                                                        \u8fd9\u91cc\u4ee5 Dynamic smlu \u6a21\u5f0f\u4e3a\u4f8b\uff1a

                                                        1. \u5728\u6b63\u786e\u5b89\u88c5 DevicePlugin \u7b49\u7ec4\u4ef6\u540e\uff0c\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8fd0\u7ef4-> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002

                                                        2. \u70b9\u51fb\u8282\u70b9\u7ba1\u7406\u9875\u9762\uff0c\u67e5\u770b\u8282\u70b9\u662f\u5426\u5df2\u7ecf\u6b63\u786e\u8bc6\u522b\u5230\u5bf9\u5e94\u7684GPU\u7c7b\u578b\u3002

                                                        3. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08MLU VGPU\uff09\u4e4b\u540e\uff0c\u9700\u8981\u914d\u7f6e App \u4f7f\u7528\u7684 GPU \u8d44\u6e90\uff1a

                                                          • GPU \u7b97\u529b\uff08cambricon.com/mlu.smlu.vcore\uff09\uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u4f7f\u7528\u6838\u5fc3\u7684\u767e\u5206\u6bd4\u6570\u91cf\u3002
                                                          • GPU \u663e\u5b58\uff08cambricon.com/mlu.smlu.vmemory\uff09\uff1a\u8868\u793a\u5f53\u524dPod\u9700\u8981\u4f7f\u7528\u663e\u5b58\u7684\u5927\u5c0f\uff0c\u5355\u4f4d\u662fMB\u3002

                                                        "},{"location":"admin/kpanda/gpu/mlu/use-mlu.html#yaml","title":"\u4f7f\u7528 YAML \u914d\u7f6e","text":"

                                                        \u53c2\u8003 YAML \u6587\u4ef6\u5982\u4e0b\uff1a

                                                        apiVersion: v1  \nkind: Pod  \nmetadata:  \n  name: pod1  \nspec:  \n  restartPolicy: OnFailure  \n  containers:  \n    - image: ubuntu:16.04  \n      name: pod1-ctr  \n      command: [\"sleep\"]  \n      args: [\"100000\"]  \n      resources:  \n        limits:  \n          cambricon.com/mlu: \"1\" # use this when device type is not enabled, else delete this line.  \n          #cambricon.com/mlu: \"1\" #uncomment to use when device type is enabled  \n          #cambricon.com/mlu.share: \"1\" #uncomment to use device with env-share mode  \n          #cambricon.com/mlu.mim-2m.8gb: \"1\" #uncomment to use device with mim mode  \n          #cambricon.com/mlu.smlu.vcore: \"100\" #uncomment to use device with mim mode  \n          #cambricon.com/mlu.smlu.vmemory: \"1024\" #uncomment to use device with mim mode\n
                                                        "},{"location":"admin/kpanda/gpu/nvidia/index.html","title":"NVIDIA GPU \u5361\u4f7f\u7528\u6a21\u5f0f","text":"

                                                        NVIDIA \u4f5c\u4e3a\u4e1a\u5185\u77e5\u540d\u7684\u56fe\u5f62\u8ba1\u7b97\u4f9b\u5e94\u5546\uff0c\u4e3a\u7b97\u529b\u7684\u63d0\u5347\u63d0\u4f9b\u4e86\u8bf8\u591a\u8f6f\u786c\u4ef6\u89e3\u51b3\u65b9\u6848\uff0c\u5176\u4e2d NVIDIA \u5728 GPU \u7684\u4f7f\u7528\u65b9\u5f0f\u4e0a\u63d0\u4f9b\u4e86\u5982\u4e0b\u4e09\u79cd\u89e3\u51b3\u65b9\u6848\uff1a

                                                        "},{"location":"admin/kpanda/gpu/nvidia/index.html#full-gpu","title":"\u6574\u5361\uff08Full GPU\uff09","text":"

                                                        \u6574\u5361\u662f\u6307\u5c06\u6574\u4e2a NVIDIA GPU \u5206\u914d\u7ed9\u5355\u4e2a\u7528\u6237\u6216\u5e94\u7528\u7a0b\u5e8f\u3002\u5728\u8fd9\u79cd\u914d\u7f6e\u4e0b\uff0c\u5e94\u7528\u53ef\u4ee5\u5b8c\u5168\u5360\u7528 GPU \u7684\u6240\u6709\u8d44\u6e90\uff0c \u5e76\u83b7\u5f97\u6700\u5927\u7684\u8ba1\u7b97\u6027\u80fd\u3002\u6574\u5361\u9002\u7528\u4e8e\u9700\u8981\u5927\u91cf\u8ba1\u7b97\u8d44\u6e90\u548c\u5185\u5b58\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5982\u6df1\u5ea6\u5b66\u4e60\u8bad\u7ec3\u3001\u79d1\u5b66\u8ba1\u7b97\u7b49\u3002

                                                        "},{"location":"admin/kpanda/gpu/nvidia/index.html#vgpuvirtual-gpu","title":"vGPU\uff08Virtual GPU\uff09","text":"

                                                        vGPU \u662f\u4e00\u79cd\u865a\u62df\u5316\u6280\u672f\uff0c\u5141\u8bb8\u5c06\u4e00\u4e2a\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a\u865a\u62df GPU\uff0c\u6bcf\u4e2a\u865a\u62df GPU \u5206\u914d\u7ed9\u4e0d\u540c\u7684\u4e91\u4e3b\u673a\u6216\u7528\u6237\u3002 vGPU \u4f7f\u591a\u4e2a\u7528\u6237\u53ef\u4ee5\u5171\u4eab\u540c\u4e00\u53f0\u7269\u7406 GPU\uff0c\u5e76\u5728\u5404\u81ea\u7684\u865a\u62df\u73af\u5883\u4e2d\u72ec\u7acb\u4f7f\u7528 GPU \u8d44\u6e90\u3002 \u6bcf\u4e2a\u865a\u62df GPU \u53ef\u4ee5\u83b7\u5f97\u4e00\u5b9a\u7684\u8ba1\u7b97\u80fd\u529b\u548c\u663e\u5b58\u5bb9\u91cf\u3002vGPU \u9002\u7528\u4e8e\u865a\u62df\u5316\u73af\u5883\u548c\u4e91\u8ba1\u7b97\u573a\u666f\uff0c\u53ef\u4ee5\u63d0\u4f9b\u66f4\u9ad8\u7684\u8d44\u6e90\u5229\u7528\u7387\u548c\u7075\u6d3b\u6027\u3002

                                                        "},{"location":"admin/kpanda/gpu/nvidia/index.html#migmulti-instance-gpu","title":"MIG\uff08Multi-Instance GPU\uff09","text":"

                                                        MIG \u662f NVIDIA Ampere \u67b6\u6784\u5f15\u5165\u7684\u4e00\u9879\u529f\u80fd\uff0c\u5b83\u5141\u8bb8\u5c06\u4e00\u4e2a\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a\u7269\u7406 GPU \u5b9e\u4f8b\uff0c\u6bcf\u4e2a\u5b9e\u4f8b\u53ef\u4ee5\u72ec\u7acb\u5206\u914d\u7ed9\u4e0d\u540c\u7684\u7528\u6237\u6216\u5de5\u4f5c\u8d1f\u8f7d\u3002 \u6bcf\u4e2a MIG \u5b9e\u4f8b\u5177\u6709\u81ea\u5df1\u7684\u8ba1\u7b97\u8d44\u6e90\u3001\u663e\u5b58\u548c PCIe \u5e26\u5bbd\uff0c\u5c31\u50cf\u4e00\u4e2a\u72ec\u7acb\u7684\u865a\u62df GPU\u3002 MIG \u63d0\u4f9b\u4e86\u66f4\u7ec6\u7c92\u5ea6\u7684 GPU \u8d44\u6e90\u5206\u914d\u548c\u7ba1\u7406\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u6c42\u52a8\u6001\u8c03\u6574\u5b9e\u4f8b\u7684\u6570\u91cf\u548c\u5927\u5c0f\u3002 MIG \u9002\u7528\u4e8e\u591a\u79df\u6237\u73af\u5883\u3001\u5bb9\u5668\u5316\u5e94\u7528\u7a0b\u5e8f\u548c\u6279\u5904\u7406\u4f5c\u4e1a\u7b49\u573a\u666f\u3002

                                                        \u65e0\u8bba\u662f\u5728\u865a\u62df\u5316\u73af\u5883\u4e2d\u4f7f\u7528 vGPU\uff0c\u8fd8\u662f\u5728\u7269\u7406 GPU \u4e0a\u4f7f\u7528 MIG\uff0cNVIDIA \u4e3a\u7528\u6237\u63d0\u4f9b\u4e86\u66f4\u591a\u7684\u9009\u62e9\u548c\u4f18\u5316 GPU \u8d44\u6e90\u7684\u65b9\u5f0f\u3002 \u7b97\u4e30 AI \u7b97\u529b\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\u5168\u9762\u517c\u5bb9\u4e86\u4e0a\u8ff0 NVIDIA \u7684\u80fd\u529b\u7279\u6027\uff0c\u7528\u6237\u53ea\u9700\u901a\u8fc7\u7b80\u5355\u7684\u754c\u9762\u64cd\u4f5c\uff0c\u5c31\u80fd\u591f\u83b7\u5f97\u5168\u90e8 NVIDIA GPU \u7684\u8ba1\u7b97\u80fd\u529b\uff0c\u4ece\u800c\u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u7387\u5e76\u964d\u4f4e\u6210\u672c\u3002

                                                        • Single \u6a21\u5f0f\uff0c\u8282\u70b9\u4ec5\u5728\u5176\u6240\u6709 GPU \u4e0a\u516c\u5f00\u5355\u4e00\u7c7b\u578b\u7684 MIG \u8bbe\u5907\uff0c\u8282\u70b9\u4e0a\u7684\u6240\u6709 GPU \u5fc5\u987b\uff1a
                                                          • \u5c5e\u4e8e\u540c\u4e00\u4e2a\u578b\u53f7\uff08\u4f8b\u5982 A100-SXM-40GB\uff09\uff0c\u53ea\u6709\u540c\u4e00\u578b\u53f7 GPU \u7684 MIG Profile \u624d\u662f\u4e00\u6837\u7684
                                                          • \u542f\u7528 MIG \u914d\u7f6e\uff0c\u9700\u8981\u91cd\u542f\u673a\u5668\u624d\u80fd\u751f\u6548
                                                          • \u4e3a\u5728\u6240\u6709\u4ea7\u54c1\u4e2d\u516c\u5f00\u201c\u5b8c\u5168\u76f8\u540c\u201d\u7684 MIG \u8bbe\u5907\u7c7b\u578b\uff0c\u521b\u5efa\u76f8\u540c\u7684GI \u548c CI
                                                        • Mixed \u6a21\u5f0f\uff0c\u8282\u70b9\u5728\u5176\u6240\u6709 GPU \u4e0a\u516c\u5f00\u6df7\u5408 MIG \u8bbe\u5907\u7c7b\u578b\u3002\u8bf7\u6c42\u7279\u5b9a\u7684 MIG \u8bbe\u5907\u7c7b\u578b\u9700\u8981\u8bbe\u5907\u7c7b\u578b\u63d0\u4f9b\u7684\u8ba1\u7b97\u5207\u7247\u6570\u91cf\u548c\u5185\u5b58\u603b\u91cf\u3002
                                                          • \u8282\u70b9\u4e0a\u7684\u6240\u6709 GPU \u5fc5\u987b\uff1a\u5c5e\u4e8e\u540c\u4e00\u4ea7\u54c1\u7ebf\uff08\u4f8b\u5982 A100-SXM-40GB\uff09
                                                          • \u6bcf\u4e2a GPU \u53ef\u542f\u7528\u6216\u4e0d\u542f\u7528 MIG\uff0c\u5e76\u4e14\u53ef\u4ee5\u81ea\u7531\u914d\u7f6e\u4efb\u4f55\u53ef\u7528 MIG \u8bbe\u5907\u7c7b\u578b\u7684\u6df7\u5408\u642d\u914d\u3002
                                                          • \u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 k8s-device-plugin \u5c06\uff1a
                                                            • \u4f7f\u7528\u4f20\u7edf\u7684 nvidia.com/gpu \u8d44\u6e90\u7c7b\u578b\u516c\u5f00\u4efb\u4f55\u4e0d\u5904\u4e8e MIG \u6a21\u5f0f\u7684 GPU
                                                            • \u4f7f\u7528\u9075\u5faa\u67b6\u6784 nvidia.com/mig-g.gb \u7684\u8d44\u6e90\u7c7b\u578b\u516c\u5f00\u5404\u4e2a MIG \u8bbe\u5907

                                                              \u5f00\u542f\u914d\u7f6e\u8be6\u60c5\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/index.html#_1","title":"\u5982\u4f55\u4f7f\u7528","text":"

                                                              \u60a8\u53ef\u4ee5\u53c2\u8003\u4ee5\u4e0b\u94fe\u63a5\uff0c\u5feb\u901f\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5173\u4e8e NVIDIA GPU \u5361\u7684\u7ba1\u7406\u80fd\u529b\u3002

                                                              • NVIDIA GPU \u6574\u5361\u4f7f\u7528
                                                              • NVIDIA vGPU \u4f7f\u7528
                                                              • NVIDIA MIG \u4f7f\u7528
                                                              "},{"location":"admin/kpanda/gpu/nvidia/full_gpu_userguide.html","title":"\u5e94\u7528\u4f7f\u7528 GPU \u6574\u5361","text":"

                                                              \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5c06\u6574\u4e2a NVIDIA GPU \u5361\u5206\u914d\u7ed9\u5355\u4e2a\u5e94\u7528\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/full_gpu_userguide.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u5df2\u7ecf\u90e8\u7f72 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u4e14\u5e73\u53f0\u8fd0\u884c\u6b63\u5e38\u3002
                                                              • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                                                              • \u5f53\u524d\u96c6\u7fa4\u5df2\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u5e76\u5df2\u542f\u7528 NVIDIA DevicePlugin \uff0c\u53ef\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5\u3002
                                                              • \u5f53\u524d\u96c6\u7fa4\u5185 GPU \u5361\u672a\u8fdb\u884c\u4efb\u4f55\u865a\u62df\u5316\u64cd\u4f5c\u6216\u88ab\u5176\u5b83\u5e94\u7528\u5360\u7528\u3002
                                                              "},{"location":"admin/kpanda/gpu/nvidia/full_gpu_userguide.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"admin/kpanda/gpu/nvidia/full_gpu_userguide.html#ui","title":"\u4f7f\u7528 UI \u754c\u9762\u914d\u7f6e","text":"
                                                              1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u68c0\u6d4b GPU \u5361\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002 \u76ee\u524d\u96c6\u7fa4\u4f1a\u81ea\u52a8\u542f\u7528 GPU \uff0c\u5e76\u4e14\u8bbe\u7f6e GPU \u7c7b\u578b\u4e3a Nvidia GPU \u3002

                                                              2. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08Nvidia GPU\uff09\u4e4b\u540e\uff0c\u9700\u8981\u914d\u7f6e\u5e94\u7528\u4f7f\u7528\u7684\u7269\u7406\u5361\u6570\u91cf\uff1a

                                                                \u7269\u7406\u5361\u6570\u91cf\uff08nvidia.com/gpu\uff09 \uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u8f93\u5165\u503c\u5fc5\u987b\u4e3a\u6574\u6570\u4e14 \u5c0f\u4e8e\u7b49\u4e8e \u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002

                                                                \u5982\u679c\u4e0a\u8ff0\u503c\u914d\u7f6e\u7684\u6709\u95ee\u9898\u5219\u4f1a\u51fa\u73b0\u8c03\u5ea6\u5931\u8d25\uff0c\u8d44\u6e90\u5206\u914d\u4e0d\u4e86\u7684\u60c5\u51b5\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/full_gpu_userguide.html#yaml","title":"\u4f7f\u7528 YAML \u914d\u7f6e","text":"

                                                              \u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u7533\u8bf7 GPU \u8d44\u6e90\uff0c\u5728\u8d44\u6e90\u7533\u8bf7\u548c\u9650\u5236\u914d\u7f6e\u4e2d\u589e\u52a0 nvidia.com/gpu: 1 \u53c2\u6570\u914d\u7f6e\u5e94\u7528\u4f7f\u7528\u7269\u7406\u5361\u7684\u6570\u91cf\u3002

                                                              apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-gpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-gpu-demo\n  template:\n    metadata:\n      labels:\n        app: full-gpu-demo\n    spec:\n      containers:\n      - image: chrstnhntschl/gpu_burn\n        name: container-0\n        resources:\n          requests:\n            cpu: 250m\n            memory: 512Mi\n            nvidia.com/gpu: 1   # \u7533\u8bf7 GPU \u7684\u6570\u91cf\n          limits:\n            cpu: 250m\n            memory: 512Mi\n            nvidia.com/gpu: 1   # GPU \u6570\u91cf\u7684\u4f7f\u7528\u4e0a\u9650\n      imagePullSecrets:\n      - name: default-secret\n

                                                              Note

                                                              \u4f7f\u7528 nvidia.com/gpu \u53c2\u6570\u6307\u5b9a GPU \u6570\u91cf\u65f6\uff0crequests \u548c limits \u503c\u9700\u8981\u4fdd\u6301\u4e00\u81f4\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html","title":"GPU Operator \u79bb\u7ebf\u5b89\u88c5","text":"

                                                              \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9884\u7f6e\u4e86 Ubuntu22.04\u3001Ubuntu20.04\u3001CentOS 7.9 \u8fd9\u4e09\u4e2a\u64cd\u4f5c\u7cfb\u7edf\u7684 Driver \u955c\u50cf\uff0c\u9a71\u52a8\u7248\u672c\u662f 535.104.12\uff1b \u5e76\u4e14\u5185\u7f6e\u4e86\u5404\u64cd\u4f5c\u7cfb\u7edf\u6240\u9700\u7684 Toolkit \u955c\u50cf\uff0c\u7528\u6237\u4e0d\u518d\u9700\u8981\u624b\u52a8\u79bb\u7ebf Toolkit \u955c\u50cf\u3002

                                                              \u672c\u6587\u4f7f\u7528 AMD \u67b6\u6784\u7684 CentOS 7.9\uff083.10.0-1160\uff09\u8fdb\u884c\u6f14\u793a\u3002\u5982\u9700\u4f7f\u7528 Red Hat 8.4 \u90e8\u7f72\uff0c \u8bf7\u53c2\u8003\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20 Red Hat GPU Opreator \u79bb\u7ebf\u955c\u50cf\u548c\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u5f85\u90e8\u7f72 gpu-operator \u7684\u96c6\u7fa4\u8282\u70b9\u5185\u6838\u7248\u672c\u5fc5\u987b\u5b8c\u5168\u4e00\u81f4\u3002\u8282\u70b9\u6240\u5728\u7684\u53d1\u884c\u7248\u548c GPU \u5361\u578b\u53f7\u5728 GPU \u652f\u6301\u77e9\u9635\u7684\u8303\u56f4\u5185\u3002
                                                              • \u5b89\u88c5 gpu-operator \u65f6\u9009\u62e9 v23.9.0+2 \u53ca\u4ee5\u4e0a\u7248\u672c
                                                              "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                              \u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 gpu-operator \u63d2\u4ef6\u3002

                                                              1. \u767b\u5f55\u5e73\u53f0\uff0c\u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 -> \u5f85\u5b89\u88c5 gpu-operator \u7684\u96c6\u7fa4 -> \u8fdb\u5165\u96c6\u7fa4\u8be6\u60c5\u3002

                                                              2. \u5728 Helm \u6a21\u677f \u9875\u9762\uff0c\u9009\u62e9 \u5168\u90e8\u4ed3\u5e93 \uff0c\u641c\u7d22 gpu-operator \u3002

                                                              3. \u9009\u62e9 gpu-operator \uff0c\u70b9\u51fb \u5b89\u88c5 \u3002

                                                              4. \u53c2\u8003\u4e0b\u6587\u53c2\u6570\u914d\u7f6e\uff0c\u914d\u7f6e gpu-operator \u5b89\u88c5\u53c2\u6570\uff0c\u5b8c\u6210 gpu-operator \u7684\u5b89\u88c5\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_3","title":"\u53c2\u6570\u914d\u7f6e","text":"
                                                              • systemOS \uff1a\u9009\u62e9\u673a\u5668\u7684\u64cd\u4f5c\u7cfb\u7edf\uff0c\u5f53\u524d\u5185\u7f6e\u4e86 Ubuntu 22.04\u3001Ubuntu20.04\u3001Centos7.9 \u3001other \u56db\u4e2a\u9009\u9879\uff0c\u8bf7\u6b63\u786e\u7684\u9009\u62e9\u64cd\u4f5c\u7cfb\u7edf\u3002
                                                              "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_4","title":"\u57fa\u672c\u53c2\u6570\u914d\u7f6e","text":"
                                                              • \u540d\u79f0 \uff1a\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\u3002
                                                              • \u547d\u540d\u7a7a\u95f4 \uff1a\u9009\u62e9\u5c06\u63d2\u4ef6\u5b89\u88c5\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                                              • \u7248\u672c \uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 v23.9.0+2 \u7248\u672c\u4e3a\u4f8b\u3002
                                                              • \u5931\u8d25\u5220\u9664 \uff1a\u5b89\u88c5\u5931\u8d25\uff0c\u5219\u5220\u9664\u5df2\u7ecf\u5b89\u88c5\u7684\u5173\u8054\u8d44\u6e90\u3002\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u3002
                                                              • \u5c31\u7eea\u7b49\u5f85 \uff1a\u542f\u7528\u540e\uff0c\u6240\u6709\u5173\u8054\u8d44\u6e90\u90fd\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002
                                                              • \u8be6\u60c5\u65e5\u5fd7 \uff1a\u5f00\u542f\u540e\uff0c\u5c06\u8bb0\u5f55\u5b89\u88c5\u8fc7\u7a0b\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002
                                                              "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_5","title":"\u9ad8\u7ea7\u53c2\u6570\u914d\u7f6e","text":""},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#operator","title":"Operator \u53c2\u6570\u914d\u7f6e","text":"
                                                              • InitContainer.image \uff1a\u914d\u7f6e CUDA \u955c\u50cf\uff0c\u63a8\u8350\u9ed8\u8ba4\u955c\u50cf\uff1a nvidia/cuda
                                                              • InitContainer.repository \uff1aCUDA \u955c\u50cf\u6240\u5728\u7684\u955c\u50cf\u4ed3\u5e93\uff0c\u9ed8\u8ba4\u4e3a nvcr.m.daocloud.io \u4ed3\u5e93
                                                              • InitContainer.version : CUDA \u955c\u50cf\u7684\u7248\u672c\uff0c\u8bf7\u4f7f\u7528\u9ed8\u8ba4\u53c2\u6570
                                                              "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#driver","title":"Driver \u53c2\u6570\u914d\u7f6e","text":"
                                                              • Driver.enable \uff1a\u914d\u7f6e\u662f\u5426\u5728\u8282\u70b9\u4e0a\u90e8\u7f72 NVIDIA \u9a71\u52a8\uff0c\u9ed8\u8ba4\u5f00\u542f\uff0c\u5982\u679c\u60a8\u5728\u4f7f\u7528 GPU Operator \u90e8\u7f72\u524d\uff0c\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u90e8\u7f72\u4e86 NVIDIA \u9a71\u52a8\u7a0b\u5e8f\uff0c\u8bf7\u5173\u95ed\u3002\uff08\u82e5\u624b\u52a8\u90e8\u7f72\u9a71\u52a8\u7a0b\u5e8f\u9700\u8981\u5173\u6ce8 CUDA Toolkit \u4e0e Toolkit Driver Version \u7684\u9002\u914d\u5173\u7cfb\uff0c\u901a\u8fc7 GPU operator \u5b89\u88c5\u5219\u65e0\u9700\u5173\u6ce8\uff09\u3002
                                                              • Driver.usePrecompiled \uff1a\u542f\u7528\u9884\u7f16\u8bd1\u7684GPU\u9a71\u52a8
                                                              • Driver.image \uff1a\u914d\u7f6e GPU \u9a71\u52a8\u955c\u50cf\uff0c\u63a8\u8350\u9ed8\u8ba4\u955c\u50cf\uff1a nvidia/driver \u3002
                                                              • Driver.repository \uff1aGPU \u9a71\u52a8\u955c\u50cf\u6240\u5728\u7684\u955c\u50cf\u4ed3\u5e93\uff0c\u9ed8\u8ba4\u4e3a nvidia \u7684 nvcr.io \u4ed3\u5e93\u3002
                                                              • Driver.usePrecompiled \uff1a\u5f00\u542f\u9884\u7f16\u8bd1\u6a21\u5f0f\u5b89\u88c5\u9a71\u52a8\u3002
                                                              • Driver.version \uff1aGPU \u9a71\u52a8\u955c\u50cf\u7684\u7248\u672c\uff0c\u79bb\u7ebf\u90e8\u7f72\u8bf7\u4f7f\u7528\u9ed8\u8ba4\u53c2\u6570\uff0c\u4ec5\u5728\u7ebf\u5b89\u88c5\u65f6\u9700\u914d\u7f6e\u3002\u4e0d\u540c\u7c7b\u578b\u64cd\u4f5c\u7cfb\u7edf\u7684 Driver \u955c\u50cf\u7684\u7248\u672c\u5b58\u5728\u5982\u4e0b\u5dee\u5f02\uff0c \u8be6\u60c5\u53ef\u53c2\u8003\uff1aNvidia GPU Driver \u7248\u672c\u3002 \u5982\u4e0b\u4e0d\u540c\u64cd\u4f5c\u7cfb\u7edf\u7684 Driver Version \u793a\u4f8b\uff1a

                                                                Note

                                                                \u4f7f\u7528\u5185\u7f6e\u7684\u64cd\u4f5c\u7cfb\u7edf\u7248\u672c\u65e0\u9700\u4fee\u6539\u955c\u50cf\u7248\u672c\uff0c\u5176\u4ed6\u64cd\u4f5c\u7cfb\u7edf\u7248\u672c\u8bf7\u53c2\u8003\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20\u955c\u50cf\u3002 \u6ce8\u610f\u7248\u672c\u53f7\u540e\u65e0\u9700\u586b\u5199 Ubuntu\u3001CentOS\u3001Red Hat \u7b49\u64cd\u4f5c\u7cfb\u7edf\u540d\u79f0\uff0c\u82e5\u5b98\u65b9\u955c\u50cf\u542b\u6709\u64cd\u4f5c\u7cfb\u7edf\u540e\u7f00\uff0c\u8bf7\u624b\u52a8\u79fb\u9664\u3002

                                                                • Red Hat \u7cfb\u7edf\uff0c\u4f8b\u5982 525.105.17
                                                                • Ubuntu \u7cfb\u7edf\uff0c\u4f8b\u5982 535-5.15.0-1043-nvidia
                                                                • CentOS \u7cfb\u7edf\uff0c\u4f8b\u5982 525.147.05
                                                              • Driver.RepoConfig.ConfigMapName \uff1a\u7528\u6765\u8bb0\u5f55 GPU Operator \u7684\u79bb\u7ebf yum \u6e90\u914d\u7f6e\u6587\u4ef6\u540d\u79f0\uff0c \u5f53\u4f7f\u7528\u9884\u7f6e\u7684\u79bb\u7ebf\u5305\u65f6\uff0c\u5404\u7c7b\u578b\u7684\u64cd\u4f5c\u7cfb\u7edf\u8bf7\u53c2\u8003\u5982\u4e0b\u7684\u6587\u6863\u3002

                                                                • \u6784\u5efa CentOS 7.9 \u79bb\u7ebf yum \u6e90
                                                                • \u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90
                                                              "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#toolkit","title":"Toolkit \u914d\u7f6e\u53c2\u6570","text":"

                                                              Toolkit.enable \uff1a\u9ed8\u8ba4\u5f00\u542f\uff0c\u8be5\u7ec4\u4ef6\u8ba9 conatainerd/docker \u652f\u6301\u8fd0\u884c\u9700\u8981 GPU \u7684\u5bb9\u5668\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#mig","title":"MIG \u914d\u7f6e\u53c2\u6570","text":"

                                                              \u8be6\u7ec6\u914d\u7f6e\u65b9\u5f0f\u8bf7\u53c2\u8003\u5f00\u542f MIG \u529f\u80fd

                                                              MigManager.Config.name \uff1aMIG \u7684\u5207\u5206\u914d\u7f6e\u6587\u4ef6\u540d\uff0c\u7528\u4e8e\u5b9a\u4e49 MIG \u7684\uff08GI, CI\uff09\u5207\u5206\u7b56\u7565\u3002 \u9ed8\u8ba4\u4e3a default-mig-parted-config \u3002\u81ea\u5b9a\u4e49\u53c2\u6570\u53c2\u8003\u5f00\u542f MIG \u529f\u80fd\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_6","title":"\u4e0b\u4e00\u6b65\u64cd\u4f5c","text":"

                                                              \u5b8c\u6210\u4e0a\u8ff0\u76f8\u5173\u53c2\u6570\u914d\u7f6e\u548c\u521b\u5efa\u540e\uff1a

                                                              • \u5982\u679c\u4f7f\u7528 \u6574\u5361\u6a21\u5f0f\uff0c\u5e94\u7528\u521b\u5efa\u65f6\u53ef\u4f7f\u7528 GPU \u8d44\u6e90

                                                              • \u5982\u679c\u4f7f\u7528 vGPU \u6a21\u5f0f \uff0c\u5b8c\u6210\u4e0a\u8ff0\u76f8\u5173\u53c2\u6570\u914d\u7f6e\u548c\u521b\u5efa\u540e\uff0c\u4e0b\u4e00\u6b65\u8bf7\u5b8c\u6210 vGPU Addon \u5b89\u88c5

                                                              • \u5982\u679c\u4f7f\u7528 MIG \u6a21\u5f0f\uff0c\u5e76\u4e14\u9700\u8981\u7ed9\u4e2a\u522b GPU \u8282\u70b9\u6309\u7167\u67d0\u79cd\u5207\u5206\u89c4\u683c\u8fdb\u884c\u4f7f\u7528\uff0c \u5426\u5219\u6309\u7167 MigManager.Config \u4e2d\u7684 default \u503c\u8fdb\u884c\u5207\u5206\u3002

                                                                • single \u6a21\u5f0f\u8bf7\u7ed9\u5bf9\u5e94\u8282\u70b9\u6253\u4e0a\u5982\u4e0b Label\uff1a

                                                                  kubectl label nodes {node} nvidia.com/mig.config=\"all-1g.10gb\" --overwrite\n
                                                                • mixed \u6a21\u5f0f\u8bf7\u7ed9\u5bf9\u5e94\u8282\u70b9\u6253\u4e0a\u5982\u4e0b Label\uff1a

                                                                  kubectl label nodes {node} nvidia.com/mig.config=\"custom-config\" --overwrite\n

                                                              \u200b \u5207\u5206\u540e\uff0c\u5e94\u7528\u53ef\u4f7f\u7528 MIG GPU \u8d44\u6e90\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/push_image_to_repo.html","title":"\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20 Red Hat GPU Opreator \u79bb\u7ebf\u955c\u50cf","text":"

                                                              \u672c\u6587\u4ee5 Red Hat 8.4 \u7684 nvcr.io/nvidia/driver:525.105.17-rhel8.4 \u79bb\u7ebf\u9a71\u52a8\u955c\u50cf\u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20\u79bb\u7ebf\u955c\u50cf\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/push_image_to_repo.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              1. \u706b\u79cd\u8282\u70b9\u53ca\u5176\u7ec4\u4ef6\u72b6\u6001\u8fd0\u884c\u6b63\u5e38\u3002
                                                              2. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u548c\u706b\u79cd\u8282\u70b9\u7684\u8282\u70b9\uff0c\u4e14\u8282\u70b9\u4e0a\u5df2\u7ecf\u5b8c\u6210 Docker \u7684\u5b89\u88c5\u3002
                                                              "},{"location":"admin/kpanda/gpu/nvidia/push_image_to_repo.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"admin/kpanda/gpu/nvidia/push_image_to_repo.html#_3","title":"\u5728\u8054\u7f51\u8282\u70b9\u83b7\u53d6\u79bb\u7ebf\u955c\u50cf","text":"

                                                              \u4ee5\u4e0b\u64cd\u4f5c\u5728\u8054\u7f51\u8282\u70b9\u4e0a\u8fdb\u884c\u3002

                                                              1. \u5728\u8054\u7f51\u673a\u5668\u4e0a\u62c9\u53d6 nvcr.io/nvidia/driver:525.105.17-rhel8.4 \u79bb\u7ebf\u9a71\u52a8\u955c\u50cf\uff1a

                                                                docker pull nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                                              2. \u955c\u50cf\u62c9\u53d6\u5b8c\u6210\u540e\uff0c\u6253\u5305\u955c\u50cf\u4e3a nvidia-driver.tar \u538b\u7f29\u5305\uff1a

                                                                docker save nvcr.io/nvidia/driver:525.105.17-rhel8.4 > nvidia-driver.tar\n
                                                              3. \u62f7\u8d1d nvidia-driver.tar \u955c\u50cf\u538b\u7f29\u5305\u5230\u706b\u79cd\u8282\u70b9\uff1a

                                                                scp  nvidia-driver.tar user@ip:/root\n

                                                                \u4f8b\u5982\uff1a

                                                                scp  nvidia-driver.tar root@10.6.175.10:/root\n
                                                              "},{"location":"admin/kpanda/gpu/nvidia/push_image_to_repo.html#_4","title":"\u63a8\u9001\u955c\u50cf\u5230\u706b\u79cd\u8282\u70b9\u4ed3\u5e93","text":"

                                                              \u4ee5\u4e0b\u64cd\u4f5c\u5728\u706b\u79cd\u8282\u70b9\u4e0a\u8fdb\u884c\u3002

                                                              1. \u767b\u5f55\u706b\u79cd\u8282\u70b9\uff0c\u5c06\u8054\u7f51\u8282\u70b9\u62f7\u8d1d\u7684\u955c\u50cf\u538b\u7f29\u5305 nvidia-driver.tar \u5bfc\u5165\u672c\u5730\uff1a

                                                                docker load -i nvidia-driver.tar\n
                                                              2. \u67e5\u770b\u521a\u521a\u5bfc\u5165\u7684\u955c\u50cf\uff1a

                                                                docker images -a |grep nvidia\n

                                                                \u9884\u671f\u8f93\u51fa\uff1a

                                                                nvcr.io/nvidia/driver                 e3ed7dee73e9   1 days ago   1.02GB\n
                                                              3. \u91cd\u65b0\u6807\u8bb0\u955c\u50cf\uff0c\u4f7f\u5176\u4e0e\u8fdc\u7a0b Registry \u4ed3\u5e93\u4e2d\u7684\u76ee\u6807\u4ed3\u5e93\u5bf9\u5e94\uff1a

                                                                docker tag <image-name> <registry-url>/<repository-name>:<tag>\n
                                                                • <image-name> \u662f\u4e0a\u4e00\u6b65 nvidia \u955c\u50cf\u7684\u540d\u79f0\uff0c
                                                                • <registry-url> \u662f\u706b\u79cd\u8282\u70b9\u4e0a Registry \u670d\u52a1\u7684\u5730\u5740\uff0c
                                                                • <repository-name> \u662f\u60a8\u8981\u63a8\u9001\u5230\u7684\u4ed3\u5e93\u540d\u79f0\uff0c
                                                                • <tag> \u662f\u60a8\u4e3a\u955c\u50cf\u6307\u5b9a\u7684\u6807\u7b7e\u3002

                                                                \u4f8b\u5982\uff1a

                                                                registry\uff1adocker tag nvcr.io/nvidia/driver 10.6.10.5/nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                                              4. \u5c06\u955c\u50cf\u63a8\u9001\u5230\u706b\u79cd\u8282\u70b9\u955c\u50cf\u4ed3\u5e93\uff1a

                                                                docker push {ip}/nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                                              "},{"location":"admin/kpanda/gpu/nvidia/push_image_to_repo.html#_5","title":"\u63a5\u4e0b\u6765","text":"

                                                              \u53c2\u8003\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90\u548c GPU Operator \u79bb\u7ebf\u5b89\u88c5\u6765\u4e3a\u96c6\u7fa4\u90e8\u7f72 GPU Operator\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/rhel9.2_offline_install_driver.html","title":"RHEL 9.2 \u79bb\u7ebf\u5b89\u88c5 gpu-operator \u9a71\u52a8","text":"

                                                              \u524d\u63d0\u6761\u4ef6\uff1a\u5df2\u5b89\u88c5 gpu-operator v23.9.0+2 \u53ca\u66f4\u9ad8\u7248\u672c

                                                              RHEL 9.2 \u9a71\u52a8\u955c\u50cf\u4e0d\u80fd\u76f4\u63a5\u5b89\u88c5\uff0c\u5b98\u65b9\u7684\u9a71\u52a8\u811a\u672c\u5b58\u5728\u4e00\u70b9\u95ee\u9898\uff0c\u5728\u5b98\u65b9\u4fee\u590d\u4e4b\u524d\uff0c\u63d0\u4f9b\u5982\u4e0b\u7684\u6b65\u9aa4\u6765\u5b9e\u73b0\u79bb\u7ebf\u5b89\u88c5\u9a71\u52a8\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/rhel9.2_offline_install_driver.html#nouveau","title":"\u7981\u7528nouveau\u9a71\u52a8","text":"

                                                              \u5728 RHEL 9.2 \u4e2d\u5b58\u5728 nouveau \u975e\u5b98\u65b9\u7684 Nvidia \u9a71\u52a8\uff0c\u56e0\u6b64\u9700\u8981\u5148\u7981\u7528\u3002

                                                              # \u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u6587\u4ef6\nsudo vi /etc/modprobe.d/blacklist-nouveau.conf\n# \u6dfb\u52a0\u4ee5\u4e0b\u4e24\u884c\u5185\u5bb9:\nblacklist nouveau\noptions nouveau modeset=0\n# \u7981\u7528Nouveau\nsudo dracut --force\n# \u91cd\u542fvm\nsudo reboot\n# \u68c0\u67e5\u662f\u5426\u5df2\u7ecf\u6210\u529f\u7981\u7528\nlsmod | grep nouveau\n
                                                              "},{"location":"admin/kpanda/gpu/nvidia/rhel9.2_offline_install_driver.html#_1","title":"\u81ea\u5b9a\u4e49\u9a71\u52a8\u955c\u50cf","text":"

                                                              \u5148\u5728\u672c\u5730\u521b\u5efa nvidia-driver \u6587\u4ef6\uff1a

                                                              \u70b9\u51fb\u67e5\u770b\u5b8c\u6574\u7684 nvidia-driver \u6587\u4ef6\u5185\u5bb9
                                                              #! /bin/bash -x\n# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.\n\nset -eu\n\nRUN_DIR=/run/nvidia\nPID_FILE=${RUN_DIR}/${0##*/}.pid\nDRIVER_VERSION=${DRIVER_VERSION:?\"Missing DRIVER_VERSION env\"}\nKERNEL_UPDATE_HOOK=/run/kernel/postinst.d/update-nvidia-driver\nNUM_VGPU_DEVICES=0\nNVIDIA_MODULE_PARAMS=()\nNVIDIA_UVM_MODULE_PARAMS=()\nNVIDIA_MODESET_MODULE_PARAMS=()\nNVIDIA_PEERMEM_MODULE_PARAMS=()\nTARGETARCH=${TARGETARCH:?\"Missing TARGETARCH env\"}\nUSE_HOST_MOFED=\"${USE_HOST_MOFED:-false}\"\nDNF_RELEASEVER=${DNF_RELEASEVER:-\"\"}\nRHEL_VERSION=${RHEL_VERSION:-\"\"}\nRHEL_MAJOR_VERSION=9\n\nOPEN_KERNEL_MODULES_ENABLED=${OPEN_KERNEL_MODULES_ENABLED:-false}\n[[ \"${OPEN_KERNEL_MODULES_ENABLED}\" == \"true\" ]] && KERNEL_TYPE=kernel-open || KERNEL_TYPE=kernel\n\nDRIVER_ARCH=${TARGETARCH/amd64/x86_64} && DRIVER_ARCH=${DRIVER_ARCH/arm64/aarch64}\necho \"DRIVER_ARCH is $DRIVER_ARCH\"\n\nSCRIPT_DIR=$( cd -- \"$( dirname -- \"${BASH_SOURCE[0]}\" )\" &> /dev/null && pwd )\nsource $SCRIPT_DIR/common.sh\n\n_update_package_cache() {\n    if [ \"${PACKAGE_TAG:-}\" != \"builtin\" ]; then\n        echo \"Updating the package cache...\"\n        if ! yum -q makecache; then\n            echo \"FATAL: failed to reach RHEL package repositories. \"\\\n                 \"Ensure that the cluster can access the proper networks.\"\n            exit 1\n        fi\n    fi\n}\n\n_cleanup_package_cache() {\n    if [ \"${PACKAGE_TAG:-}\" != \"builtin\" ]; then\n        echo \"Cleaning up the package cache...\"\n        rm -rf /var/cache/yum/*\n    fi\n}\n\n_get_rhel_version_from_kernel() {\n    local rhel_version_underscore rhel_version_arr\n    rhel_version_underscore=$(echo \"${KERNEL_VERSION}\" | sed 's/.*el\\([0-9]\\+_[0-9]\\+\\).*/\\1/g')\n    # For e.g. :- from the kernel version 4.18.0-513.9.1.el8_9, we expect to extract the string \"8_9\"\n    if [[ ! ${rhel_version_underscore} =~ ^[0-9]+_[0-9]+$ ]]; then\n        echo \"Unable to resolve RHEL version from kernel version\" >&2\n        return 1\n    fi\n    IFS='_' read -r -a rhel_version_arr <<< \"$rhel_version_underscore\"\n    if [[ ${#rhel_version_arr[@]} -ne 2 ]]; then\n        echo \"Unable to resolve RHEL version from kernel version\" >&2\n        return 1\n    fi\n    RHEL_VERSION=\"${rhel_version_arr[0]}.${rhel_version_arr[1]}\"\n    echo \"RHEL VERSION successfully resolved from kernel: ${RHEL_VERSION}\"\n    return 0\n}\n\n_resolve_rhel_version() {\n    _get_rhel_version_from_kernel || RHEL_VERSION=\"${RHEL_MAJOR_VERSION}\"\n    # set dnf release version as rhel version by default\n    if [[ -z \"${DNF_RELEASEVER}\" ]]; then\n        DNF_RELEASEVER=\"${RHEL_VERSION}\"\n    fi\n    return 0\n}\n\n# Resolve the kernel version to the form major.minor.patch-revision.\n_resolve_kernel_version() {\n    echo \"Resolving Linux kernel version...\"\n    local version=$(yum -q list available --showduplicates kernel-headers |\n      awk -v arch=$(uname -m) 'NR>1 {print $2\".\"arch}' | tac | grep -E -m1 \"^${KERNEL_VERSION/latest/.*}\")\n\n    if [ -z \"${version}\" ]; then\n        echo \"Could not resolve Linux kernel version\" >&2\n        return 1\n    fi\n    KERNEL_VERSION=\"${version}\"\n    echo \"Proceeding with Linux kernel version ${KERNEL_VERSION}\"\n    return 0\n}\n\n# Install the kernel modules header/builtin/order files and generate the kernel version string.\n_install_prerequisites() (\n    local tmp_dir=$(mktemp -d)\n\n    trap \"rm -rf ${tmp_dir}\" EXIT\n    cd ${tmp_dir}\n\n    echo \"Installing elfutils...\"\n    if ! dnf install -q -y elfutils-libelf.$DRIVER_ARCH; then\n        echo \"FATAL: failed to install elfutils packages. RHEL entitlement may be improperly deployed.\"\n        exit 1\n    fi\n    if ! dnf install -q -y elfutils-libelf-devel.$DRIVER_ARCH; then\n        echo \"FATAL: failed to install elfutils packages. RHEL entitlement may be improperly deployed.\"\n        exit 1\n    fi    \n\n    rm -rf /lib/modules/${KERNEL_VERSION}\n    mkdir -p /lib/modules/${KERNEL_VERSION}/proc\n\n    echo \"Enabling RHOCP and EUS RPM repos...\"\n    if [ -n \"${OPENSHIFT_VERSION:-}\" ]; then\n        dnf config-manager --set-enabled rhocp-${OPENSHIFT_VERSION}-for-rhel-9-$DRIVER_ARCH-rpms || true\n        if ! dnf makecache --releasever=${DNF_RELEASEVER}; then\n            dnf config-manager --set-disabled rhocp-${OPENSHIFT_VERSION}-for-rhel-9-$DRIVER_ARCH-rpms || true\n        fi\n    fi\n\n    dnf config-manager --set-enabled rhel-9-for-$DRIVER_ARCH-baseos-eus-rpms  || true\n    if ! dnf makecache --releasever=${DNF_RELEASEVER}; then\n            dnf config-manager --set-disabled rhel-9-for-$DRIVER_ARCH-baseos-eus-rpms || true\n    fi\n\n    # try with EUS disabled, if it does not work, then try just major version\n    if ! dnf makecache --releasever=${DNF_RELEASEVER}; then\n      # If pointing to DNF_RELEASEVER does not work, we point to the RHEL_MAJOR_VERSION as a last resort\n      if ! dnf makecache --releasever=${RHEL_MAJOR_VERSION}; then\n        echo \"FATAL: failed to update the dnf metadata cache after multiple attempts with releasevers ${DNF_RELEASEVER}, ${RHEL_MAJOR_VERSION}\"\n        exit 1\n      else\n        DNF_RELEASEVER=${RHEL_MAJOR_VERSION}\n      fi\n    fi\n\n    echo \"Installing Linux kernel headers...\"\n    dnf -q -y --releasever=${DNF_RELEASEVER} install kernel-headers-${KERNEL_VERSION} kernel-devel-${KERNEL_VERSION} --allowerasing > /dev/null\n    ln -s /usr/src/kernels/${KERNEL_VERSION} /lib/modules/${KERNEL_VERSION}/build\n\n    echo \"Installing Linux kernel module files...\"\n    dnf -q -y --releasever=${DNF_RELEASEVER} install kernel-core-${KERNEL_VERSION} > /dev/null\n\n    # Prevent depmod from giving a WARNING about missing files\n    touch /lib/modules/${KERNEL_VERSION}/modules.order\n    touch /lib/modules/${KERNEL_VERSION}/modules.builtin\n\n    depmod ${KERNEL_VERSION}\n\n    echo \"Generating Linux kernel version string...\"\n    if [ \"$TARGETARCH\" = \"arm64\" ]; then\n        gunzip -c /lib/modules/${KERNEL_VERSION}/vmlinuz | strings | grep -E '^Linux version' | sed 's/^\\(.*\\)\\s\\+(.*)$/\\1/' > version\n    else\n        extract-vmlinux /lib/modules/${KERNEL_VERSION}/vmlinuz | strings | grep -E '^Linux version' | sed 's/^\\(.*\\)\\s\\+(.*)$/\\1/' > version\n    fi\n    if [ -z \"$(<version)\" ]; then\n        echo \"Could not locate Linux kernel version string\" >&2\n        return 1\n    fi\n    mv version /lib/modules/${KERNEL_VERSION}/proc\n\n    # Parse gcc version\n    # gcc_version is expected to match x.y.z\n    # current_gcc is expected to match 'gcc-x.y.z-rel.el8.x86_64\n    local gcc_version=$(cat /lib/modules/${KERNEL_VERSION}/proc/version | grep -Eo \"gcc \\(GCC\\) ([0-9\\.]+)\" | grep -Eo \"([0-9\\.]+)\")\n    local current_gcc=$(rpm -qa gcc)\n    echo \"kernel requires gcc version: 'gcc-${gcc_version}', current gcc version is '${current_gcc}'\"\n\n    if ! [[ \"${current_gcc}\" =~ \"gcc-${gcc_version}\"-.* ]]; then\n        dnf install -q -y --releasever=${DNF_RELEASEVER} \"gcc-${gcc_version}\"\n    fi\n)\n\n# Cleanup the prerequisites installed above.\n_remove_prerequisites() {\n    true\n    if [ \"${PACKAGE_TAG:-}\" != \"builtin\" ]; then\n        dnf -q -y remove kernel-headers-${KERNEL_VERSION} kernel-devel-${KERNEL_VERSION} > /dev/null\n        # TODO remove module files not matching an existing driver package.\n    fi\n}\n\n# Check if the kernel version requires a new precompiled driver packages.\n_kernel_requires_package() {\n    local proc_mount_arg=\"\"\n\n    echo \"Checking NVIDIA driver packages...\"\n\n    [[ ! -d /usr/src/nvidia-${DRIVER_VERSION}/${KERNEL_TYPE} ]] && return 0\n    cd /usr/src/nvidia-${DRIVER_VERSION}/${KERNEL_TYPE}\n\n    proc_mount_arg=\"--proc-mount-point /lib/modules/${KERNEL_VERSION}/proc\"\n    for pkg_name in $(ls -d -1 precompiled/** 2> /dev/null); do\n        is_match=$(../mkprecompiled --match ${pkg_name} ${proc_mount_arg})\n        if [ \"${is_match}\" == \"kernel interface matches.\" ]; then\n            echo \"Found NVIDIA driver package ${pkg_name##*/}\"\n            return 1\n        fi\n    done\n    return 0\n}\n\n# Compile the kernel modules, optionally sign them, and generate a precompiled package for use by the nvidia-installer.\n_create_driver_package() (\n    local pkg_name=\"nvidia-modules-${KERNEL_VERSION%%-*}${PACKAGE_TAG:+-${PACKAGE_TAG}}\"\n    local nvidia_sign_args=\"\"\n    local nvidia_modeset_sign_args=\"\"\n    local nvidia_uvm_sign_args=\"\"\n\n    trap \"make -s -j ${MAX_THREADS} SYSSRC=/lib/modules/${KERNEL_VERSION}/build clean > /dev/null\" EXIT\n\n    echo \"Compiling NVIDIA driver kernel modules...\"\n    cd /usr/src/nvidia-${DRIVER_VERSION}/${KERNEL_TYPE}\n\n    if _gpu_direct_rdma_enabled; then\n        ln -s /run/mellanox/drivers/usr/src/ofa_kernel /usr/src/\n        # if arch directory exists(MOFED >=5.5) then create a symlink as expected by GPU driver installer\n        # This is required as currently GPU driver installer doesn't expect headers in x86_64 folder, but only in either default or kernel-version folder.\n        # ls -ltr /usr/src/ofa_kernel/\n        # lrwxrwxrwx 1 root root   36 Dec  8 20:10 default -> /etc/alternatives/ofa_kernel_headers\n        # drwxr-xr-x 4 root root 4096 Dec  8 20:14 x86_64\n        # lrwxrwxrwx 1 root root   44 Dec  9 19:05 5.4.0-90-generic -> /usr/src/ofa_kernel/x86_64/5.4.0-90-generic/\n        if [[ -d \"/run/mellanox/drivers/usr/src/ofa_kernel/$(uname -m)/$(uname -r)\" ]]; then\n            if [[ ! -e \"/usr/src/ofa_kernel/$(uname -r)\" ]]; then\n                ln -s \"/run/mellanox/drivers/usr/src/ofa_kernel/$(uname -m)/$(uname -r)\" /usr/src/ofa_kernel/\n            fi\n        fi\n    fi\n\n    make -s -j ${MAX_THREADS} SYSSRC=/lib/modules/${KERNEL_VERSION}/build nv-linux.o nv-modeset-linux.o > /dev/null\n\n    echo \"Relinking NVIDIA driver kernel modules...\"\n    rm -f nvidia.ko nvidia-modeset.ko\n    ld -d -r -o nvidia.ko ./nv-linux.o ./nvidia/nv-kernel.o_binary\n    ld -d -r -o nvidia-modeset.ko ./nv-modeset-linux.o ./nvidia-modeset/nv-modeset-kernel.o_binary\n\n    if [ -n \"${PRIVATE_KEY}\" ]; then\n        echo \"Signing NVIDIA driver kernel modules...\"\n        donkey get ${PRIVATE_KEY} sh -c \"PATH=${PATH}:/usr/src/linux-headers-${KERNEL_VERSION}/scripts && \\\n          sign-file sha512 \\$DONKEY_FILE pubkey.x509 nvidia.ko nvidia.ko.sign &&                          \\\n          sign-file sha512 \\$DONKEY_FILE pubkey.x509 nvidia-modeset.ko nvidia-modeset.ko.sign &&          \\\n          sign-file sha512 \\$DONKEY_FILE pubkey.x509 nvidia-uvm.ko\"\n        nvidia_sign_args=\"--linked-module nvidia.ko --signed-module nvidia.ko.sign\"\n        nvidia_modeset_sign_args=\"--linked-module nvidia-modeset.ko --signed-module nvidia-modeset.ko.sign\"\n        nvidia_uvm_sign_args=\"--signed\"\n    fi\n\n    echo \"Building NVIDIA driver package ${pkg_name}...\"\n    ../mkprecompiled --pack ${pkg_name} --description ${KERNEL_VERSION}                              \\\n                                        --proc-mount-point /lib/modules/${KERNEL_VERSION}/proc       \\\n                                        --driver-version ${DRIVER_VERSION}                           \\\n                                        --kernel-interface nv-linux.o                                \\\n                                        --linked-module-name nvidia.ko                               \\\n                                        --core-object-name nvidia/nv-kernel.o_binary                 \\\n                                        ${nvidia_sign_args}                                          \\\n                                        --target-directory .                                         \\\n                                        --kernel-interface nv-modeset-linux.o                        \\\n                                        --linked-module-name nvidia-modeset.ko                       \\\n                                        --core-object-name nvidia-modeset/nv-modeset-kernel.o_binary \\\n                                        ${nvidia_modeset_sign_args}                                  \\\n                                        --target-directory .                                         \\\n                                        --kernel-module nvidia-uvm.ko                                \\\n                                        ${nvidia_uvm_sign_args}                                      \\\n                                        --target-directory .\n    mkdir -p precompiled\n    mv ${pkg_name} precompiled\n)\n\n_assert_nvswitch_system() {\n    [ -d /proc/driver/nvidia-nvswitch ] || return 1\n    entries=$(ls -1 /proc/driver/nvidia-nvswitch/devices/*)\n    if [ -z \"${entries}\" ]; then\n        return 1\n    fi\n    return 0\n}\n\n# For each kernel module configuration file mounted into the container,\n# parse the file contents and extract the custom module parameters that\n# are to be passed as input to 'modprobe'.\n#\n# Assumptions:\n# - Configuration files are named <module-name>.conf (i.e. nvidia.conf, nvidia-uvm.conf).\n# - Configuration files are mounted inside the container at /drivers.\n# - Each line in the file contains at least one parameter, where parameters on the same line\n#   are space delimited. It is up to the user to properly format the file to ensure\n#   the correct set of parameters are passed to 'modprobe'.\n_get_module_params() {\n    local base_path=\"/drivers\"\n    # nvidia\n    if [ -f \"${base_path}/nvidia.conf\" ]; then\n       while IFS=\"\" read -r param || [ -n \"$param\" ]; do\n           NVIDIA_MODULE_PARAMS+=(\"$param\")\n       done <\"${base_path}/nvidia.conf\"\n       echo \"Module parameters provided for nvidia: ${NVIDIA_MODULE_PARAMS[@]}\"\n    fi\n    # nvidia-uvm\n    if [ -f \"${base_path}/nvidia-uvm.conf\" ]; then\n       while IFS=\"\" read -r param || [ -n \"$param\" ]; do\n           NVIDIA_UVM_MODULE_PARAMS+=(\"$param\")\n       done <\"${base_path}/nvidia-uvm.conf\"\n       echo \"Module parameters provided for nvidia-uvm: ${NVIDIA_UVM_MODULE_PARAMS[@]}\"\n    fi\n    # nvidia-modeset\n    if [ -f \"${base_path}/nvidia-modeset.conf\" ]; then\n       while IFS=\"\" read -r param || [ -n \"$param\" ]; do\n           NVIDIA_MODESET_MODULE_PARAMS+=(\"$param\")\n       done <\"${base_path}/nvidia-modeset.conf\"\n       echo \"Module parameters provided for nvidia-modeset: ${NVIDIA_MODESET_MODULE_PARAMS[@]}\"\n    fi\n    # nvidia-peermem\n    if [ -f \"${base_path}/nvidia-peermem.conf\" ]; then\n       while IFS=\"\" read -r param || [ -n \"$param\" ]; do\n           NVIDIA_PEERMEM_MODULE_PARAMS+=(\"$param\")\n       done <\"${base_path}/nvidia-peermem.conf\"\n       echo \"Module parameters provided for nvidia-peermem: ${NVIDIA_PEERMEM_MODULE_PARAMS[@]}\"\n    fi\n}\n\n# Load the kernel modules and start persistenced.\n_load_driver() {\n    echo \"Parsing kernel module parameters...\"\n    _get_module_params\n\n    local nv_fw_search_path=\"$RUN_DIR/driver/lib/firmware\"\n    local set_fw_path=\"true\"\n    local fw_path_config_file=\"/sys/module/firmware_class/parameters/path\"\n    for param in \"${NVIDIA_MODULE_PARAMS[@]}\"; do\n        if [[ \"$param\" == \"NVreg_EnableGpuFirmware=0\" ]]; then\n          set_fw_path=\"false\"\n        fi\n    done\n\n    if [[ \"$set_fw_path\" == \"true\" ]]; then\n        echo \"Configuring the following firmware search path in '$fw_path_config_file': $nv_fw_search_path\"\n        if [[ ! -z $(grep '[^[:space:]]' $fw_path_config_file) ]]; then\n            echo \"WARNING: A search path is already configured in $fw_path_config_file\"\n            echo \"         Retaining the current configuration\"\n        else\n            echo -n \"$nv_fw_search_path\" > $fw_path_config_file || echo \"WARNING: Failed to configure the firmware search path\"\n        fi\n    fi\n\n    echo \"Loading ipmi and i2c_core kernel modules...\"\n    modprobe -a i2c_core ipmi_msghandler ipmi_devintf\n\n    echo \"Loading NVIDIA driver kernel modules...\"\n    set -o xtrace +o nounset\n    modprobe nvidia \"${NVIDIA_MODULE_PARAMS[@]}\"\n    modprobe nvidia-uvm \"${NVIDIA_UVM_MODULE_PARAMS[@]}\"\n    modprobe nvidia-modeset \"${NVIDIA_MODESET_MODULE_PARAMS[@]}\"\n    set +o xtrace -o nounset\n\n    if _gpu_direct_rdma_enabled; then\n        echo \"Loading NVIDIA Peer Memory kernel module...\"\n        set -o xtrace +o nounset\n        modprobe -a nvidia-peermem \"${NVIDIA_PEERMEM_MODULE_PARAMS[@]}\"\n        set +o xtrace -o nounset\n    fi\n\n    echo \"Starting NVIDIA persistence daemon...\"\n    nvidia-persistenced --persistence-mode\n\n    if [ \"${DRIVER_TYPE}\" = \"vgpu\" ]; then\n        echo \"Copying gridd.conf...\"\n        cp /drivers/gridd.conf /etc/nvidia/gridd.conf\n        if [ \"${VGPU_LICENSE_SERVER_TYPE}\" = \"NLS\" ]; then\n            echo \"Copying ClientConfigToken...\"\n            mkdir -p  /etc/nvidia/ClientConfigToken/\n            cp /drivers/ClientConfigToken/* /etc/nvidia/ClientConfigToken/\n        fi\n\n        echo \"Starting nvidia-gridd..\"\n        LD_LIBRARY_PATH=/usr/lib64/nvidia/gridd nvidia-gridd\n\n        # Start virtual topology daemon\n        _start_vgpu_topology_daemon\n    fi\n\n    if _assert_nvswitch_system; then\n        echo \"Starting NVIDIA fabric manager daemon...\"\n        nv-fabricmanager -c /usr/share/nvidia/nvswitch/fabricmanager.cfg\n    fi\n}\n\n# Stop persistenced and unload the kernel modules if they are currently loaded.\n_unload_driver() {\n    local rmmod_args=()\n    local nvidia_deps=0\n    local nvidia_refs=0\n    local nvidia_uvm_refs=0\n    local nvidia_modeset_refs=0\n    local nvidia_peermem_refs=0\n\n    echo \"Stopping NVIDIA persistence daemon...\"\n    if [ -f /var/run/nvidia-persistenced/nvidia-persistenced.pid ]; then\n        local pid=$(< /var/run/nvidia-persistenced/nvidia-persistenced.pid)\n\n        kill -SIGTERM \"${pid}\"\n        for i in $(seq 1 50); do\n            kill -0 \"${pid}\" 2> /dev/null || break\n            sleep 0.1\n        done\n        if [ $i -eq 50 ]; then\n            echo \"Could not stop NVIDIA persistence daemon\" >&2\n            return 1\n        fi\n    fi\n\n    if [ -f /var/run/nvidia-gridd/nvidia-gridd.pid ]; then\n        echo \"Stopping NVIDIA grid daemon...\"\n        local pid=$(< /var/run/nvidia-gridd/nvidia-gridd.pid)\n\n        kill -SIGTERM \"${pid}\"\n        for i in $(seq 1 10); do\n            kill -0 \"${pid}\" 2> /dev/null || break\n            sleep 0.1\n        done\n        if [ $i -eq 10 ]; then\n            echo \"Could not stop NVIDIA Grid daemon\" >&2\n            return 1\n        fi\n    fi\n\n    if [ -f /var/run/nvidia-fabricmanager/nv-fabricmanager.pid ]; then\n        echo \"Stopping NVIDIA fabric manager daemon...\"\n        local pid=$(< /var/run/nvidia-fabricmanager/nv-fabricmanager.pid)\n\n        kill -SIGTERM \"${pid}\"\n        for i in $(seq 1 50); do\n            kill -0 \"${pid}\" 2> /dev/null || break\n            sleep 0.1\n        done\n        if [ $i -eq 50 ]; then\n            echo \"Could not stop NVIDIA fabric manager daemon\" >&2\n            return 1\n        fi\n    fi\n\n    echo \"Unloading NVIDIA driver kernel modules...\"\n    if [ -f /sys/module/nvidia_modeset/refcnt ]; then\n        nvidia_modeset_refs=$(< /sys/module/nvidia_modeset/refcnt)\n        rmmod_args+=(\"nvidia-modeset\")\n        ((++nvidia_deps))\n    fi\n    if [ -f /sys/module/nvidia_uvm/refcnt ]; then\n        nvidia_uvm_refs=$(< /sys/module/nvidia_uvm/refcnt)\n        rmmod_args+=(\"nvidia-uvm\")\n        ((++nvidia_deps))\n    fi\n    if [ -f /sys/module/nvidia/refcnt ]; then\n        nvidia_refs=$(< /sys/module/nvidia/refcnt)\n        rmmod_args+=(\"nvidia\")\n    fi\n    if [ -f /sys/module/nvidia_peermem/refcnt ]; then\n        nvidia_peermem_refs=$(< /sys/module/nvidia_peermem/refcnt)\n        rmmod_args+=(\"nvidia-peermem\")\n        ((++nvidia_deps))\n    fi\n    if [ ${nvidia_refs} -gt ${nvidia_deps} ] || [ ${nvidia_uvm_refs} -gt 0 ] || [ ${nvidia_modeset_refs} -gt 0 ] || [ ${nvidia_peermem_refs} -gt 0 ]; then\n        echo \"Could not unload NVIDIA driver kernel modules, driver is in use\" >&2\n        return 1\n    fi\n\n    if [ ${#rmmod_args[@]} -gt 0 ]; then\n        rmmod ${rmmod_args[@]}\n    fi\n    return 0\n}\n\n# Link and install the kernel modules from a precompiled package using the nvidia-installer.\n_install_driver() {\n    local install_args=()\n\n    echo \"Installing NVIDIA driver kernel modules...\"\n    cd /usr/src/nvidia-${DRIVER_VERSION}\n    rm -rf /lib/modules/${KERNEL_VERSION}/video\n\n    if [ \"${ACCEPT_LICENSE}\" = \"yes\" ]; then\n        install_args+=(\"--accept-license\")\n    fi\n    IGNORE_CC_MISMATCH=1 nvidia-installer --kernel-module-only --no-drm --ui=none --no-nouveau-check -m=${KERNEL_TYPE} ${install_args[@]+\"${install_args[@]}\"}\n    # May need to add no-cc-check for Rhel, otherwise it complains about cc missing in path\n    # /proc/version and lib/modules/KERNEL_VERSION/proc are different, by default installer looks at /proc/ so, added the proc-mount-point\n    # TODO: remove the -a flag. its not needed. in the new driver version, license-acceptance is implicit\n    #nvidia-installer --kernel-module-only --no-drm --ui=none --no-nouveau-check --no-cc-version-check --proc-mount-point /lib/modules/${KERNEL_VERSION}/proc ${install_args[@]+\"${install_args[@]}\"}\n}\n\n# Mount the driver rootfs into the run directory with the exception of sysfs.\n_mount_rootfs() {\n    echo \"Mounting NVIDIA driver rootfs...\"\n    mount --make-runbindable /sys\n    mount --make-private /sys\n    mkdir -p ${RUN_DIR}/driver\n    mount --rbind / ${RUN_DIR}/driver\n\n    echo \"Check SELinux status\"\n    if [ -e /sys/fs/selinux ]; then\n        echo \"SELinux is enabled\"\n        echo \"Change device files security context for selinux compatibility\"\n        chcon -R -t container_file_t ${RUN_DIR}/driver/dev\n    else\n        echo \"SELinux is disabled, skipping...\"\n    fi\n}\n\n# Unmount the driver rootfs from the run directory.\n_unmount_rootfs() {\n    echo \"Unmounting NVIDIA driver rootfs...\"\n    if findmnt -r -o TARGET | grep \"${RUN_DIR}/driver\" > /dev/null; then\n        umount -l -R ${RUN_DIR}/driver\n    fi\n}\n\n# Write a kernel postinst.d script to automatically precompile packages on kernel update (similar to DKMS).\n_write_kernel_update_hook() {\n    if [ ! -d ${KERNEL_UPDATE_HOOK%/*} ]; then\n        return\n    fi\n\n    echo \"Writing kernel update hook...\"\n    cat > ${KERNEL_UPDATE_HOOK} <<'EOF'\n#!/bin/bash\n\nset -eu\ntrap 'echo \"ERROR: Failed to update the NVIDIA driver\" >&2; exit 0' ERR\n\nNVIDIA_DRIVER_PID=$(< /run/nvidia/nvidia-driver.pid)\n\nexport \"$(grep -z DRIVER_VERSION /proc/${NVIDIA_DRIVER_PID}/environ)\"\nnsenter -t \"${NVIDIA_DRIVER_PID}\" -m -- nvidia-driver update --kernel \"$1\"\nEOF\n    chmod +x ${KERNEL_UPDATE_HOOK}\n}\n\n_shutdown() {\n    if _unload_driver; then\n        _unmount_rootfs\n        rm -f ${PID_FILE} ${KERNEL_UPDATE_HOOK}\n        return 0\n    fi\n    return 1\n}\n\n_find_vgpu_driver_version() {\n    local count=\"\"\n    local version=\"\"\n    local drivers_path=\"/drivers\"\n\n    if [ \"${DISABLE_VGPU_VERSION_CHECK}\" = \"true\" ]; then\n        echo \"vgpu version compatibility check is disabled\"\n        return 0\n    fi\n    # check if vgpu devices are present\n    count=$(vgpu-util count)\n    if [ $? -ne 0 ]; then\n         echo \"cannot find vgpu devices on host, pleae check /var/log/vgpu-util.log for more details...\"\n         return 0\n    fi\n    NUM_VGPU_DEVICES=$(echo \"$count\" | awk -F= '{print $2}')\n    if [ $NUM_VGPU_DEVICES -eq 0 ]; then\n        # no vgpu devices found, treat as passthrough\n        return 0\n    fi\n    echo \"found $NUM_VGPU_DEVICES vgpu devices on host\"\n\n    # find compatible guest driver using driver catalog\n    if [ -d \"/mnt/shared-nvidia-driver-toolkit/drivers\" ]; then\n        drivers_path=\"/mnt/shared-nvidia-driver-toolkit/drivers\"\n    fi\n    version=$(vgpu-util match -i \"${drivers_path}\" -c \"${drivers_path}/vgpuDriverCatalog.yaml\")\n    if [ $? -ne 0 ]; then\n        echo \"cannot find match for compatible vgpu driver from available list, please check /var/log/vgpu-util.log for more details...\"\n        return 1\n    fi\n    DRIVER_VERSION=$(echo \"$version\" | awk -F= '{print $2}')\n    echo \"vgpu driver version selected: ${DRIVER_VERSION}\"\n    return 0\n}\n\n_start_vgpu_topology_daemon() {\n    type nvidia-topologyd > /dev/null 2>&1 || return 0\n    echo \"Starting nvidia-topologyd..\"\n    nvidia-topologyd\n}\n\n_prepare() {\n    if [ \"${DRIVER_TYPE}\" = \"vgpu\" ]; then\n        _find_vgpu_driver_version || exit 1\n    fi\n\n    # Install the userspace components and copy the kernel module sources.\n    sh NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION.run -x && \\\n        cd NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION && \\\n        sh /tmp/install.sh nvinstall && \\\n        mkdir -p /usr/src/nvidia-$DRIVER_VERSION && \\\n        mv LICENSE mkprecompiled ${KERNEL_TYPE} /usr/src/nvidia-$DRIVER_VERSION && \\\n        sed '9,${/^\\(kernel\\|LICENSE\\)/!d}' .manifest > /usr/src/nvidia-$DRIVER_VERSION/.manifest\n\n    echo -e \"\\n========== NVIDIA Software Installer ==========\\n\"\n    echo -e \"Starting installation of NVIDIA driver version ${DRIVER_VERSION} for Linux kernel version ${KERNEL_VERSION}\\n\"\n}\n\n_prepare_exclusive() {\n    _prepare\n\n    exec 3> ${PID_FILE}\n    if ! flock -n 3; then\n        echo \"An instance of the NVIDIA driver is already running, aborting\"\n        exit 1\n    fi\n    echo $$ >&3\n\n    trap \"echo 'Caught signal'; exit 1\" HUP INT QUIT PIPE TERM\n    trap \"_shutdown\" EXIT\n\n    _unload_driver || exit 1\n    _unmount_rootfs\n}\n\n_build() {\n    # Install dependencies\n    if _kernel_requires_package; then\n        _update_package_cache\n        _install_prerequisites\n        _create_driver_package\n        #_remove_prerequisites\n        _cleanup_package_cache\n    fi\n\n    # Build the driver\n    _install_driver\n}\n\n_load() {\n    _load_driver\n    _mount_rootfs\n    _write_kernel_update_hook\n\n    echo \"Done, now waiting for signal\"\n    sleep infinity &\n    trap \"echo 'Caught signal'; _shutdown && { kill $!; exit 0; }\" HUP INT QUIT PIPE TERM\n    trap - EXIT\n    while true; do wait $! || continue; done\n    exit 0\n}\n\ninit() {\n    _prepare_exclusive\n\n    _build\n\n    _load\n}\n\nbuild() {\n    _prepare\n\n    _build\n}\n\nload() {\n    _prepare_exclusive\n\n    _load\n}\n\nupdate() {\n    exec 3>&2\n    if exec 2> /dev/null 4< ${PID_FILE}; then\n        if ! flock -n 4 && read pid <&4 && kill -0 \"${pid}\"; then\n            exec > >(tee -a \"/proc/${pid}/fd/1\")\n            exec 2> >(tee -a \"/proc/${pid}/fd/2\" >&3)\n        else\n            exec 2>&3\n        fi\n        exec 4>&-\n    fi\n    exec 3>&-\n\n    # vgpu driver version is chosen dynamically during runtime, so pre-compile modules for\n    # only non-vgpu driver types\n    if [ \"${DRIVER_TYPE}\" != \"vgpu\" ]; then\n        # Install the userspace components and copy the kernel module sources.\n        if [ ! -e /usr/src/nvidia-${DRIVER_VERSION}/mkprecompiled ]; then\n            sh NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION.run -x && \\\n                cd NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION && \\\n                sh /tmp/install.sh nvinstall && \\\n                mkdir -p /usr/src/nvidia-$DRIVER_VERSION && \\\n                mv LICENSE mkprecompiled ${KERNEL_TYPE} /usr/src/nvidia-$DRIVER_VERSION && \\\n                sed '9,${/^\\(kernel\\|LICENSE\\)/!d}' .manifest > /usr/src/nvidia-$DRIVER_VERSION/.manifest\n        fi\n    fi\n\n    echo -e \"\\n========== NVIDIA Software Updater ==========\\n\"\n    echo -e \"Starting update of NVIDIA driver version ${DRIVER_VERSION} for Linux kernel version ${KERNEL_VERSION}\\n\"\n\n    trap \"echo 'Caught signal'; exit 1\" HUP INT QUIT PIPE TERM\n\n    _update_package_cache\n    _resolve_kernel_version || exit 1\n    _install_prerequisites\n    if _kernel_requires_package; then\n        _create_driver_package\n    fi\n    _remove_prerequisites\n    _cleanup_package_cache\n\n    echo \"Done\"\n    exit 0\n}\n\n# Wait for MOFED drivers to be loaded and load nvidia-peermem whenever it gets unloaded during MOFED driver updates\nreload_nvidia_peermem() {\n    if [ \"$USE_HOST_MOFED\" = \"true\" ]; then\n        until  lsmod | grep mlx5_core > /dev/null 2>&1 && [ -f /run/nvidia/validations/.driver-ctr-ready ];\n        do\n            echo \"waiting for mellanox ofed and nvidia drivers to be installed\"\n            sleep 10\n        done\n    else\n        # use driver readiness flag created by MOFED container\n        until  [ -f /run/mellanox/drivers/.driver-ready ] && [ -f /run/nvidia/validations/.driver-ctr-ready ];\n        do\n            echo \"waiting for mellanox ofed and nvidia drivers to be installed\"\n            sleep 10\n        done\n    fi\n    # get any parameters provided for nvidia-peermem\n    _get_module_params && set +o nounset\n    if chroot /run/nvidia/driver modprobe nvidia-peermem \"${NVIDIA_PEERMEM_MODULE_PARAMS[@]}\"; then\n        if [ -f /sys/module/nvidia_peermem/refcnt ]; then\n            echo \"successfully loaded nvidia-peermem module, now waiting for signal\"\n            sleep inf\n            trap \"echo 'Caught signal'; exit 1\" HUP INT QUIT PIPE TERM\n        fi\n    fi\n    echo \"failed to load nvidia-peermem module\"\n    exit 1\n}\n\n# probe by gpu-operator for liveness/startup checks for nvidia-peermem module to be loaded when MOFED drivers are ready\nprobe_nvidia_peermem() {\n    if lsmod | grep mlx5_core > /dev/null 2>&1; then\n        if [ ! -f /sys/module/nvidia_peermem/refcnt ]; then\n            echo \"nvidia-peermem module is not loaded\"\n            return 1\n        fi\n    else\n        echo \"MOFED drivers are not ready, skipping probe to avoid container restarts...\"\n    fi\n    return 0\n}\n\nusage() {\n    cat >&2 <<EOF\nUsage: $0 COMMAND [ARG...]\n\nCommands:\n  init   [-a | --accept-license] [-m | --max-threads MAX_THREADS]\n  build  [-a | --accept-license] [-m | --max-threads MAX_THREADS]\n  load\n  update [-k | --kernel VERSION] [-s | --sign KEYID] [-t | --tag TAG] [-m | --max-threads MAX_THREADS]\nEOF\n    exit 1\n}\n\nif [ $# -eq 0 ]; then\n    usage\nfi\ncommand=$1; shift\ncase \"${command}\" in\n    init) options=$(getopt -l accept-license,max-threads: -o am: -- \"$@\") ;;\n    build) options=$(getopt -l accept-license,tag:,max-threads: -o a:t:m: -- \"$@\") ;;\n    load) options=\"\" ;;\n    update) options=$(getopt -l kernel:,sign:,tag:,max-threads: -o k:s:t:m: -- \"$@\") ;;\n    reload_nvidia_peermem) options=\"\" ;;\n    probe_nvidia_peermem) options=\"\" ;;\n    *) usage ;;\nesac\nif [ $? -ne 0 ]; then\n    usage\nfi\neval set -- \"${options}\"\n\nACCEPT_LICENSE=\"\"\nMAX_THREADS=\"\"\nKERNEL_VERSION=$(uname -r)\nPRIVATE_KEY=\"\"\nPACKAGE_TAG=\"\"\n\nfor opt in ${options}; do\n    case \"$opt\" in\n    -a | --accept-license) ACCEPT_LICENSE=\"yes\"; shift 1 ;;\n    -k | --kernel) KERNEL_VERSION=$2; shift 2 ;;\n    -m | --max-threads) MAX_THREADS=$2; shift 2 ;;\n    -s | --sign) PRIVATE_KEY=$2; shift 2 ;;\n    -t | --tag) PACKAGE_TAG=$2; shift 2 ;;\n    --) shift; break ;;\n    esac\ndone\nif [ $# -ne 0 ]; then\n    usage\nfi\n\n_resolve_rhel_version || exit 1\n\n$command\n

                                                              \u4f7f\u7528\u5b98\u65b9\u7684\u955c\u50cf\u6765\u4e8c\u6b21\u6784\u5efa\u81ea\u5b9a\u4e49\u955c\u50cf\uff0c\u5982\u4e0b\u662f\u4e00\u4e2a Dockerfile \u6587\u4ef6\u7684\u5185\u5bb9\uff1a

                                                              FROM nvcr.io/nvidia/driver:535.183.06-rhel9.2\nCOPY nvidia-driver /usr/local/bin\nRUN chmod +x /usr/local/bin/nvidia-driver\nCMD [\"/bin/bash\", \"-c\"]\n

                                                              \u6784\u5efa\u547d\u4ee4\u5e76\u63a8\u9001\u5230\u706b\u79cd\u96c6\u7fa4\uff1a

                                                              docker build -t {\u706b\u79cdregistry}/nvcr.m.daocloud.io/nvidia/driver:535.183.06-01-rhel9.2 -f Dockerfile .\ndocker push {\u706b\u79cdregistry}/nvcr.m.daocloud.io/nvidia/driver:535.183.06-01-rhel9.2\n
                                                              "},{"location":"admin/kpanda/gpu/nvidia/rhel9.2_offline_install_driver.html#_2","title":"\u5b89\u88c5\u9a71\u52a8","text":"
                                                              1. \u5b89\u88c5 gpu-operator addon
                                                              2. \u8bbe\u7f6e driver.version=535.183.06-01
                                                              "},{"location":"admin/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html","title":"Ubuntu22.04 \u79bb\u7ebf\u5b89\u88c5 gpu-operator \u9a71\u52a8","text":"

                                                              \u524d\u63d0\u6761\u4ef6\uff1a\u5df2\u5b89\u88c5 gpu-operator v23.9.0+2 \u53ca\u66f4\u9ad8\u7248\u672c

                                                              "},{"location":"admin/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html#_1","title":"\u51c6\u5907\u79bb\u7ebf\u955c\u50cf","text":"
                                                              1. \u67e5\u770b\u5185\u6838\u7248\u672c

                                                                $ uname -r\n5.15.0-78-generic\n
                                                              2. \u67e5\u770b\u5185\u6838\u5bf9\u5e94\u7684 GPU Driver \u955c\u50cf\u7248\u672c\uff0c https://catalog.ngc.nvidia.com/orgs/nvidia/containers/driver/tags\u3002 \u4f7f\u7528\u5185\u6838\u67e5\u8be2\u955c\u50cf\u7248\u672c\uff0c\u901a\u8fc7 ctr export \u4fdd\u5b58\u955c\u50cf\u3002

                                                                ctr i pull nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04\nctr i export --all-platforms driver.tar.gz nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 \n
                                                              3. \u628a\u955c\u50cf\u5bfc\u5165\u5230\u706b\u79cd\u96c6\u7fa4\u7684\u955c\u50cf\u4ed3\u5e93\u4e2d

                                                                ctr i import driver.tar.gz\nctr i tag nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 {\u706b\u79cdregistry}/nvcr.m.daocloud.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04\nctr i push {\u706b\u79cdregistry}/nvcr.m.daocloud.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 --skip-verify=true\n
                                                              "},{"location":"admin/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html#_2","title":"\u5b89\u88c5\u9a71\u52a8","text":"
                                                              1. \u5b89\u88c5 gpu-operator addon
                                                              2. \u82e5\u4f7f\u7528\u9884\u7f16\u8bd1\u6a21\u5f0f\uff0c\u5219\u8bbe\u7f6e driver.usePrecompiled=true,\u5e76\u8bbe\u7f6e driver.version=535\uff0c\u8fd9\u91cc\u8981\u6ce8\u610f\uff0c\u5199\u7684\u662f 535\uff0c\u4e0d\u662f 535.104.12\u3002\uff08\u975e\u9884\u7f16\u8bd1\u6a21\u5f0f\u8df3\u8fc7\u6b64\u6b65\uff0c\u76f4\u63a5\u5b89\u88c5\u5373\u53ef\uff09
                                                              "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html","title":"\u6784\u5efa CentOS 7.9 \u79bb\u7ebf yum \u6e90","text":""},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#_1","title":"\u4f7f\u7528\u573a\u666f\u4ecb\u7ecd","text":"

                                                              \u5f53\u5de5\u4f5c\u8282\u70b9\u7684\u5185\u6838\u7248\u672c\u4e0e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u5185\u6838\u7248\u672c\u6216 OS \u7c7b\u578b\u4e0d\u4e00\u81f4\u65f6\uff0c\u9700\u8981\u7528\u6237\u624b\u52a8\u6784\u5efa\u79bb\u7ebf yum \u6e90\u3002

                                                              \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u6784\u5efa\u79bb\u7ebf yum \u6e90\uff0c \u5e76\u5728\u5b89\u88c5 Gpu Operator \u65f6\uff0c\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              1. \u7528\u6237\u5df2\u7ecf\u5728\u5e73\u53f0\u4e0a\u5b89\u88c5\u4e86 v0.12.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u7684 addon \u79bb\u7ebf\u5305\u3002
                                                              2. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u548c\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u7f51\u7edc\u80fd\u591f\u8054\u901a\u7684\u6587\u4ef6\u670d\u52a1\u5668\uff0c\u5982 nginx \u6216 minio\u3002
                                                              3. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u3001\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\uff0c \u4e14\u8282\u70b9\u4e0a\u5df2\u7ecf\u5b8c\u6210 Docker \u7684\u5b89\u88c5\u3002
                                                              "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                              \u672c\u6587\u4ee5\u5185\u6838\u7248\u672c\u4e3a 3.10.0-1160.95.1.el7.x86_64 \u7684 CentOS 7.9 \u8282\u70b9\u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u6784\u5efa GPU operator \u79bb\u7ebf\u5305\u7684 yum \u6e90\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#os","title":"\u68c0\u67e5\u96c6\u7fa4\u8282\u70b9\u7684 OS \u548c\u5185\u6838\u7248\u672c","text":"

                                                              \u5206\u522b\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u548c\u5f85\u90e8\u7f72 GPU Operator \u7684\u8282\u70b9\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u82e5\u4e24\u4e2a\u8282\u70b9\u7684 OS \u548c\u5185\u6838\u7248\u672c\u4e00\u81f4\u5219\u65e0\u9700\u6784\u5efa yum \u6e90\uff0c \u53ef\u53c2\u8003\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u6587\u6863\u76f4\u63a5\u5b89\u88c5\uff1b\u82e5\u4e24\u4e2a\u8282\u70b9\u7684 OS \u6216\u5185\u6838\u7248\u672c\u4e0d\u4e00\u81f4\uff0c\u8bf7\u6267\u884c\u4e0b\u4e00\u6b65\u3002

                                                              1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u67e5\u770b\u96c6\u7fa4\u4e0b\u5f85\u90e8\u7f72 GPU Operator \u8282\u70b9\u7684\u53d1\u884c\u7248\u540d\u79f0\u548c\u7248\u672c\u53f7\u3002

                                                                cat /etc/redhat-release\n

                                                                \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                CentOS Linux release 7.9 (Core)\n

                                                                \u8f93\u51fa\u7ed3\u679c\u4e3a\u5f53\u524d\u8282\u70b9\u5185\u6838\u7248\u672c CentOS 7.9 \u3002

                                                              2. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u67e5\u770b\u96c6\u7fa4\u4e0b\u5f85\u90e8\u7f72 GPU Operator \u8282\u70b9\u7684\u5185\u6838\u7248\u672c\u3002

                                                                uname -a\n

                                                                \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                Linux localhost.localdomain 3.10.0-1160.95.1.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux\n

                                                                \u8f93\u51fa\u7ed3\u679c\u4e3a\u5f53\u524d\u8282\u70b9\u5185\u6838\u7248\u672c 3.10.0-1160.el7.x86_64\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#yum","title":"\u5236\u4f5c\u79bb\u7ebf yum \u6e90","text":"

                                                              \u5728\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\u4e0a\u8fdb\u884c\u64cd\u4f5c\u3002

                                                              1. \u5728\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\u4e0a\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a yum.sh \u7684\u811a\u672c\u6587\u4ef6\u3002

                                                                vi yum.sh\n

                                                                \u7136\u540e\u6309\u4e0b i \u952e\u8fdb\u5165\u63d2\u5165\u6a21\u5f0f\uff0c\u8f93\u5165\u4ee5\u4e0b\u5185\u5bb9\uff1a

                                                                export TARGET_KERNEL_VERSION=$1\n\ncat >> run.sh << \\EOF\n#! /bin/bash\necho \"start install kernel repo\"\necho ${KERNEL_VERSION}\nmkdir centos-base\n\nif [ \"$OS\" -eq 7 ]; then\n    yum install --downloadonly --downloaddir=./centos-base perl\n    yum install --downloadonly --downloaddir=./centos-base elfutils-libelf.x86_64\n    yum install --downloadonly --downloaddir=./redhat-base elfutils-libelf-devel.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-headers-${KERNEL_VERSION}.el7.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-devel-${KERNEL_VERSION}.el7.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-${KERNEL_VERSION}.el7.x86_64\n    yum install  -y --downloadonly --downloaddir=./centos-base groff-base\nelif [ \"$OS\" -eq 8 ]; then\n    yum install --downloadonly --downloaddir=./centos-base perl\n    yum install --downloadonly --downloaddir=./centos-base elfutils-libelf.x86_64\n    yum install --downloadonly --downloaddir=./redhat-base elfutils-libelf-devel.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-headers-${KERNEL_VERSION}.el8.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-devel-${KERNEL_VERSION}.el8.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-${KERNEL_VERSION}.el8.x86_64\n    yum install  -y --downloadonly --downloaddir=./centos-base groff-base\nelse\n    echo \"Error os version\"\nfi\n\ncreaterepo centos-base/\nls -lh centos-base/\ntar -zcf centos-base.tar.gz centos-base/\necho \"end install kernel repo\"\nEOF\n\ncat >> Dockerfile << EOF\nFROM centos:7\nENV KERNEL_VERSION=\"\"\nENV OS=7\nRUN yum install -y createrepo\nCOPY run.sh .\nENTRYPOINT [\"/bin/bash\",\"run.sh\"]\nEOF\n\ndocker build -t test:v1 -f Dockerfile .\ndocker run -e KERNEL_VERSION=$TARGET_KERNEL_VERSION --name centos7.9 test:v1\ndocker cp centos7.9:/centos-base.tar.gz .\ntar -xzf centos-base.tar.gz\n

                                                                \u6309\u4e0b esc \u952e\u9000\u51fa\u63d2\u5165\u6a21\u5f0f\uff0c\u7136\u540e\u8f93\u5165 __ :wq__ \u4fdd\u5b58\u5e76\u9000\u51fa\u3002

                                                              2. \u8fd0\u884c yum.sh \u6587\u4ef6\uff1a

                                                                bash -x yum.sh TARGET_KERNEL_VERSION\n

                                                                TARGET_KERNEL_VERSION \u53c2\u6570\u7528\u4e8e\u6307\u5b9a\u96c6\u7fa4\u8282\u70b9\u7684\u5185\u6838\u7248\u672c\uff0c\u6ce8\u610f\uff1a\u53d1\u884c\u7248\u6807\u8bc6\u7b26\uff08\u5982 __ .el7.x86_64 __ \uff09\u65e0\u9700\u8f93\u5165\u3002 \u4f8b\u5982\uff1a

                                                                bash -x yum.sh 3.10.0-1160.95.1\n

                                                              \u81f3\u6b64\uff0c\u60a8\u5df2\u7ecf\u751f\u6210\u4e86\u5185\u6838\u4e3a 3.10.0-1160.95.1.el7.x86_64 \u7684\u79bb\u7ebf\u7684 yum \u6e90\uff1a centos-base \u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#yum_1","title":"\u4e0a\u4f20\u79bb\u7ebf yum \u6e90\u5230\u6587\u4ef6\u670d\u52a1\u5668","text":"

                                                              \u5728\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\u4e0a\u8fdb\u884c\u64cd\u4f5c\u3002\u4e3b\u8981\u7528\u4e8e\u5c06\u4e0a\u4e00\u6b65\u4e2d\u751f\u6210\u7684 yum \u6e90\u4e0a\u4f20\u5230\u53ef\u4ee5\u88ab\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u8fdb\u884c\u8bbf\u95ee\u7684\u6587\u4ef6\u670d\u52a1\u5668\u4e2d\u3002 \u6587\u4ef6\u670d\u52a1\u5668\u53ef\u4ee5\u4e3a Nginx \u3001 Minio \u6216\u5176\u5b83\u652f\u6301 Http \u534f\u8bae\u7684\u6587\u4ef6\u670d\u52a1\u5668\u3002

                                                              \u672c\u64cd\u4f5c\u793a\u4f8b\u91c7\u7528\u7684\u662f\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Minio \u4f5c\u4e3a\u6587\u4ef6\u670d\u52a1\u5668\uff0cMinio \u76f8\u5173\u4fe1\u606f\u5982\u4e0b\uff1a

                                                              • \u8bbf\u95ee\u5730\u5740\uff1a http://10.5.14.200:9000\uff08\u4e00\u822c\u4e3a{\u706b\u79cd\u8282\u70b9 IP} + {9000 \u7aef\u53e3}\uff09
                                                              • \u767b\u5f55\u7528\u6237\u540d\uff1arootuser
                                                              • \u767b\u5f55\u5bc6\u7801\uff1arootpass123

                                                              • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u5c06\u8282\u70b9\u672c\u5730 mc \u547d\u4ee4\u884c\u5de5\u5177\u548c minio \u670d\u52a1\u5668\u5efa\u7acb\u94fe\u63a5\u3002

                                                                mc config host add minio http://10.5.14.200:9000 rootuser rootpass123\n

                                                                \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                Added `minio` successfully.\n

                                                                mc \u547d\u4ee4\u884c\u5de5\u5177\u662f Minio \u6587\u4ef6\u670d\u52a1\u5668\u63d0\u4f9b\u7684\u5ba2\u6237\u7aef\u547d\u4ee4\u884c\u5de5\u5177\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\uff1a MinIO Client\u3002

                                                              • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a centos-base \u7684\u5b58\u50a8\u6876\uff08bucket\uff09\u3002

                                                                mc mb -p minio/centos-base\n

                                                                \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                Bucket created successfully __minio/centos-base__ .\n
                                                              • \u5c06\u5b58\u50a8\u6876 centos-base \u7684\u8bbf\u95ee\u7b56\u7565\u8bbe\u7f6e\u4e3a\u5141\u8bb8\u516c\u5f00\u4e0b\u8f7d\u3002\u4ee5\u4fbf\u5728\u540e\u671f\u5b89\u88c5 GPU-operator \u65f6\u80fd\u591f\u88ab\u8bbf\u95ee\u3002

                                                                mc anonymous set download minio/centos-base\n

                                                                \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                Access permission for `minio/centos-base` is set to `download` \n
                                                              • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u5c06\u6b65\u9aa4\u4e8c\u751f\u6210\u7684\u79bb\u7ebf yum \u6e90\u6587\u4ef6 centos-base \u590d\u5236\u5230 minio \u670d\u52a1\u5668\u7684 minio/centos-base \u5b58\u50a8\u6876\u4e2d\u3002

                                                                mc cp centos-base minio/centos-base --recursive\n
                                                              "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#yum_2","title":"\u5728\u96c6\u7fa4\u521b\u5efa\u914d\u7f6e\u9879\u7528\u6765\u4fdd\u5b58 yum \u6e90\u4fe1\u606f","text":"

                                                              \u5728\u5f85\u90e8\u7f72 GPU Operator \u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u4e0a\u8fdb\u884c\u64cd\u4f5c\u3002

                                                              1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\u521b\u5efa\u540d\u4e3a CentOS-Base.repo \u7684\u6587\u4ef6\uff0c\u7528\u6765\u6307\u5b9a yum \u6e90\u5b58\u50a8\u7684\u914d\u7f6e\u4fe1\u606f\u3002

                                                                # \u6587\u4ef6\u540d\u79f0\u5fc5\u987b\u4e3a CentOS-Base.repo\uff0c\u5426\u5219\u5b89\u88c5 gpu-operator \u65f6\u65e0\u6cd5\u88ab\u8bc6\u522b\ncat > CentOS-Base.repo << EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base #\u6b65\u9aa4\u4e09\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base #\u6b65\u9aa4\u4e09\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                                              2. \u57fa\u4e8e\u521b\u5efa\u7684 CentOS-Base.repo \u6587\u4ef6\uff0c\u5728 gpu-operator \u547d\u540d\u7a7a\u95f4\u4e0b\uff0c\u521b\u5efa\u540d\u4e3a local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\uff1a

                                                                kubectl create configmap local-repo-config  -n gpu-operator --from-file=CentOS-Base.repo=/etc/yum.repos.d/extension.repo\n

                                                                \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                configmap/local-repo-config created\n

                                                                local-repo-config \u914d\u7f6e\u6587\u4ef6\u7528\u4e8e\u5728\u5b89\u88c5 gpu-operator \u65f6\uff0c\u63d0\u4f9b RepoConfig.ConfigMapName \u53c2\u6570\u7684\u503c\uff0c\u914d\u7f6e\u6587\u4ef6\u540d\u79f0\u7528\u6237\u53ef\u81ea\u5b9a\u4e49\u3002

                                                              3. \u67e5\u770b local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\u7684\u5185\u5bb9\uff1a

                                                                kubectl get configmap local-repo-config  -n gpu-operator -oyaml\n

                                                                \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                apiVersion: v1\ndata:\nCentOS-Base.repo: \"[extension-0]\\nbaseurl = http://10.6.232.5:32618/centos-base#\u6b65\u9aa4\u4e8c\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u8def\u5f84\\ngpgcheck = 0\\nname = kubean extension 0\\n  \\n[extension-1]\\nbaseurl\n    = http://10.6.232.5:32618/centos-base #\u6b65\u9aa4\u4e8c\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u8def\u5f84\\ngpgcheck = 0\\nname\n    = kubean extension 1\\n\"\nkind: ConfigMap\nmetadata:\ncreationTimestamp: \"2023-10-18T01:59:02Z\"\nname: local-repo-config\nnamespace: gpu-operator\nresourceVersion: \"59445080\"\nuid: c5f0ebab-046f-442c-b932-f9003e014387\n

                                                              \u81f3\u6b64\uff0c\u60a8\u5df2\u6210\u529f\u4e3a\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u521b\u5efa\u4e86\u79bb\u7ebf yum \u6e90\u914d\u7f6e\u6587\u4ef6\u3002 \u901a\u8fc7\u5728\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u65f6\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html","title":"\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90","text":"

                                                              \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9884\u7f6e\u4e86 CentOS 7.9\uff0c\u5185\u6838\u4e3a 3.10.0-1160 \u7684 GPU operator \u79bb\u7ebf\u5305\u3002\u5176\u5b83 OS \u7c7b\u578b\u7684\u8282\u70b9\u6216\u5185\u6838\u9700\u8981\u7528\u6237\u624b\u52a8\u6784\u5efa\u79bb\u7ebf yum \u6e90\u3002

                                                              \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u57fa\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4efb\u610f\u8282\u70b9\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90\u5305\uff0c\u5e76\u5728\u5b89\u88c5 Gpu Operator \u65f6\uff0c\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              1. \u7528\u6237\u5df2\u7ecf\u5728\u5e73\u53f0\u4e0a\u5b89\u88c5\u4e86 v0.12.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u7684 addon \u79bb\u7ebf\u5305\u3002
                                                              2. \u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u8282\u70b9 OS \u5fc5\u987b\u4e3a Red Hat 8.4\uff0c\u4e14\u5185\u6838\u7248\u672c\u5b8c\u5168\u4e00\u81f4\u3002
                                                              3. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u548c\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u7f51\u7edc\u80fd\u591f\u8054\u901a\u7684\u6587\u4ef6\u670d\u52a1\u5668\uff0c\u5982 nginx \u6216 minio\u3002
                                                              4. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u3001\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\uff0c\u4e14\u8282\u70b9\u4e0a\u5df2\u7ecf\u5b8c\u6210 Docker \u7684\u5b89\u88c5\u3002
                                                              5. \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u8282\u70b9\u5fc5\u987b\u4e3a Red Hat 8.4 4.18.0-305.el8.x86_64\u3002
                                                              "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                              \u672c\u6587\u4ee5 Red Hat 8.4 4.18.0-305.el8.x86_64 \u8282\u70b9\u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u57fa\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4efb\u610f\u8282\u70b9\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90\u5305\uff0c \u5e76\u5728\u5b89\u88c5 Gpu Operator \u65f6\uff0c\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#yum","title":"\u4e0b\u8f7d\u706b\u79cd\u8282\u70b9\u4e2d\u7684 yum \u6e90","text":"

                                                              \u4ee5\u4e0b\u64cd\u4f5c\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684 master \u8282\u70b9\u4e0a\u6267\u884c\u3002

                                                              1. \u4f7f\u7528 ssh \u6216\u5176\u5b83\u65b9\u5f0f\u8fdb\u5165\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5185\u4efb\u4e00\u8282\u70b9\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

                                                                cat /etc/yum.repos.d/extension.repo #\u67e5\u770b extension.repo \u4e2d\u7684\u5185\u5bb9\n

                                                                \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                [extension-0]\nbaseurl = http://10.5.14.200:9000/kubean/redhat/$releasever/os/$basearch\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/kubean/redhat-iso/$releasever/os/$basearch/AppStream\ngpgcheck = 0\nname = kubean extension 1\n\n[extension-2]\nbaseurl = http://10.5.14.200:9000/kubean/redhat-iso/$releasever/os/$basearch/BaseOS\ngpgcheck = 0\nname = kubean extension 2\n
                                                              2. \u5728 root \u8def\u5f84\u4e0b\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a redhat-base-repo \u7684\u6587\u4ef6\u5939

                                                                mkdir redhat-base-repo\n
                                                              3. \u4e0b\u8f7d yum \u6e90\u4e2d\u7684 rpm \u5305\u5230\u672c\u5730\uff1a

                                                                yum install yum-utils\n
                                                              4. \u4e0b\u8f7d extension-1 \u4e2d\u7684 rpm \u5305\uff1a

                                                                reposync  -p redhat-base-repo  -n --repoid=extension-1\n
                                                              5. \u4e0b\u8f7d extension-2 \u4e2d\u7684 rpm \u5305\uff1a

                                                                reposync  -p redhat-base-repo  -n --repoid=extension-2\n
                                                              "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#elfutils-libelf-devel-0187-4el8x86_64rpm","title":"\u4e0b\u8f7d elfutils-libelf-devel-0.187-4.el8.x86_64.rpm \u5305","text":"

                                                              \u4ee5\u4e0b\u64cd\u4f5c\u5728\u8054\u7f51\u8282\u70b9\u6267\u884c\u64cd\u4f5c\uff0c\u5728\u64cd\u4f5c\u524d\uff0c\u60a8\u9700\u8981\u4fdd\u8bc1\u8054\u7f51\u8282\u70b9\u548c\u5168\u5c40\u670d\u52a1\u96c6\u7fa4 master \u8282\u70b9\u95f4\u7684\u7f51\u7edc\u8054\u901a\u6027\u3002

                                                              1. \u5728\u8054\u7f51\u8282\u70b9\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u4e0b\u8f7d elfutils-libelf-devel-0.187-4.el8.x86_64.rpm \u5305\uff1a

                                                                wget https://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/elfutils-libelf-devel-0.187-4.el8.x86_64.rpm\n
                                                              2. \u5728\u5f53\u524d\u76ee\u5f55\u4e0b\u5c06 elfutils-libelf-devel-0.187-4.el8.x86_64.rpm \u5305\u4f20\u8f93\u81f3\u6b65\u9aa4\u4e00\u4e2d\u7684\u8282\u70b9\u4e0a\uff1a

                                                                scp  elfutils-libelf-devel-0.187-4.el8.x86_64.rpm user@ip:~/redhat-base-repo/extension-2/Packages/\n

                                                                \u4f8b\u5982\uff1a

                                                                scp  elfutils-libelf-devel-0.187-4.el8.x86_64.rpm root@10.6.175.10:~/redhat-base-repo/extension-2/Packages/\n
                                                              "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#yum-repo","title":"\u751f\u6210\u672c\u5730 yum repo","text":"

                                                              \u4ee5\u4e0b\u64cd\u4f5c\u5728\u6b65\u9aa4\u4e00\u4e2d\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684 master \u8282\u70b9\u4e0a\u6267\u884c\u3002

                                                              1. \u8fdb\u5165 yum repo \u76ee\u5f55\uff1a

                                                                cd ~/redhat-base-repo/extension-1/Packages\ncd ~/redhat-base-repo/extension-2/Packages\n
                                                              2. \u751f\u6210\u76ee\u5f55 repo \u7d22\u5f15\uff1a

                                                                yum install createrepo -y  # \u82e5\u5df2\u5b89\u88c5 createrepo \u53ef\u7701\u7565\u6b64\u6b65\u9aa4\ncreaterepo_c ./\n

                                                              \u81f3\u6b64\uff0c\u60a8\u5df2\u7ecf\u751f\u6210\u4e86\u5185\u6838\u4e3a 4.18.0-305.el8.x86_64 \u7684\u79bb\u7ebf\u7684 yum \u6e90\uff1a redhat-base-repo \u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#yum-repo_1","title":"\u5c06\u672c\u5730\u751f\u6210\u7684 yum repo \u4e0a\u4f20\u81f3\u6587\u4ef6\u670d\u52a1\u5668","text":"

                                                              \u672c\u64cd\u4f5c\u793a\u4f8b\u91c7\u7528\u7684\u662f\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Minio \u4f5c\u4e3a\u6587\u4ef6\u670d\u52a1\u5668\uff0c\u7528\u6237\u53ef\u57fa\u4e8e\u81ea\u8eab\u60c5\u51b5\u9009\u62e9\u6587\u4ef6\u670d\u52a1\u5668\u3002Minio \u76f8\u5173\u4fe1\u606f\u5982\u4e0b\uff1a

                                                              • \u8bbf\u95ee\u5730\u5740\uff1a http://10.5.14.200:9000\uff08\u4e00\u822c\u4e3a{\u706b\u79cd\u8282\u70b9 IP} + {9000 \u7aef\u53e3}\uff09
                                                              • \u767b\u5f55\u7528\u6237\u540d\uff1arootuser
                                                              • \u767b\u5f55\u5bc6\u7801\uff1arootpass123

                                                              • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u5c06\u8282\u70b9\u672c\u5730 mc \u547d\u4ee4\u884c\u5de5\u5177\u548c minio \u670d\u52a1\u5668\u5efa\u7acb\u94fe\u63a5\u3002

                                                                mc config host add minio \u6587\u4ef6\u670d\u52a1\u5668\u8bbf\u95ee\u5730\u5740 \u7528\u6237\u540d \u5bc6\u7801\n

                                                                \u4f8b\u5982\uff1a

                                                                mc config host add minio http://10.5.14.200:9000 rootuser rootpass123\n

                                                                \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                Added `minio` successfully.\n

                                                                mc \u547d\u4ee4\u884c\u5de5\u5177\u662f Minio \u6587\u4ef6\u670d\u52a1\u5668\u63d0\u4f9b\u7684\u5ba2\u6237\u7aef\u547d\u4ee4\u884c\u5de5\u5177\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\uff1a MinIO Client\u3002

                                                              • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a redhat-base \u7684\u5b58\u50a8\u6876(bucket)\u3002

                                                                mc mb -p minio/redhat-base\n

                                                                \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                Bucket created successfully `minio/redhat-base`.\n
                                                              • \u5c06\u5b58\u50a8\u6876 redhat-base \u7684\u8bbf\u95ee\u7b56\u7565\u8bbe\u7f6e\u4e3a\u5141\u8bb8\u516c\u5f00\u4e0b\u8f7d\u3002\u4ee5\u4fbf\u5728\u540e\u671f\u5b89\u88c5 GPU-operator \u65f6\u80fd\u591f\u88ab\u8bbf\u95ee\u3002

                                                                mc anonymous set download minio/redhat-base\n

                                                                \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                Access permission for `minio/redhat-base` is set to `download` \n
                                                              • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u5c06\u6b65\u9aa4\u4e8c\u751f\u6210\u7684\u79bb\u7ebf yum \u6e90\u6587\u4ef6 redhat-base-repo \u590d\u5236\u5230 minio \u670d\u52a1\u5668\u7684 minio/redhat-base \u5b58\u50a8\u6876\u4e2d\u3002

                                                                mc cp redhat-base-repo minio/redhat-base --recursive\n
                                                              "},{"location":"admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#yum_1","title":"\u5728\u96c6\u7fa4\u521b\u5efa\u914d\u7f6e\u9879\u7528\u6765\u4fdd\u5b58 yum \u6e90\u4fe1\u606f","text":"

                                                              \u672c\u6b65\u9aa4\u5728\u5f85\u90e8\u7f72 GPU Operator \u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u4e0a\u8fdb\u884c\u64cd\u4f5c\u3002

                                                              1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\u521b\u5efa\u540d\u4e3a redhat.repo \u7684\u6587\u4ef6\uff0c\u7528\u6765\u6307\u5b9a yum \u6e90\u5b58\u50a8\u7684\u914d\u7f6e\u4fe1\u606f\u3002

                                                                # \u6587\u4ef6\u540d\u79f0\u5fc5\u987b\u4e3a redhat.repo\uff0c\u5426\u5219\u5b89\u88c5 gpu-operator \u65f6\u65e0\u6cd5\u88ab\u8bc6\u522b\ncat > redhat.repo << EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/redhat-base/redhat-base-repo/Packages #\u6b65\u9aa4\u4e00\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/redhat-base/redhat-base-repo/Packages #\u6b65\u9aa4\u4e00\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                                              2. \u57fa\u4e8e\u521b\u5efa\u7684 redhat.repo \u6587\u4ef6\uff0c\u5728 gpu-operator \u547d\u540d\u7a7a\u95f4\u4e0b\uff0c\u521b\u5efa\u540d\u4e3a local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\uff1a

                                                                kubectl create configmap local-repo-config  -n gpu-operator --from-file=./redhat.repo \n

                                                                \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                configmap/local-repo-config created\n

                                                                local-repo-config \u914d\u7f6e\u6587\u4ef6\u7528\u4e8e\u5728\u5b89\u88c5 gpu-operator \u65f6\uff0c\u63d0\u4f9b RepoConfig.ConfigMapName \u53c2\u6570\u7684\u503c\uff0c\u914d\u7f6e\u6587\u4ef6\u540d\u79f0\u7528\u6237\u53ef\u81ea\u5b9a\u4e49\u3002

                                                              3. \u67e5\u770b local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\u7684\u5185\u5bb9\uff1a

                                                                kubectl get configmap local-repo-config  -n gpu-operator -oyaml\n

                                                              \u81f3\u6b64\uff0c\u60a8\u5df2\u6210\u529f\u4e3a\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u521b\u5efa\u4e86\u79bb\u7ebf yum \u6e90\u914d\u7f6e\u6587\u4ef6\u3002 \u901a\u8fc7\u5728\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u65f6\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html","title":"\u6784\u5efa Red Hat 7.9 \u79bb\u7ebf yum \u6e90","text":""},{"location":"admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#_1","title":"\u4f7f\u7528\u573a\u666f\u4ecb\u7ecd","text":"

                                                              \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9884\u7f6e\u4e86 CentOS 7.9\uff0c\u5185\u6838\u4e3a 3.10.0-1160 \u7684 GPU Operator \u79bb\u7ebf\u5305\u3002\u5176\u5b83 OS \u7c7b\u578b\u7684\u8282\u70b9\u6216\u5185\u6838\u9700\u8981\u7528\u6237\u624b\u52a8\u6784\u5efa\u79bb\u7ebf yum \u6e90\u3002

                                                              \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u57fa\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4efb\u610f\u8282\u70b9\u6784\u5efa Red Hat 7.9 \u79bb\u7ebf yum \u6e90\u5305\uff0c\u5e76\u5728\u5b89\u88c5 Gpu Operator \u65f6\u4f7f\u7528 RepoConfig.ConfigMapName \u53c2\u6570\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              1. \u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u8282\u70b9 OS \u5fc5\u987b\u4e3a Red Hat 7.9\uff0c\u4e14\u5185\u6838\u7248\u672c\u5b8c\u5168\u4e00\u81f4
                                                              2. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u4e0e\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u7f51\u7edc\u8054\u901a\u7684\u6587\u4ef6\u670d\u52a1\u5668\uff0c\u5982 nginx \u6216 minio
                                                              3. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u3001\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\uff0c \u4e14\u8282\u70b9\u4e0a\u5df2\u7ecf\u5b8c\u6210 Docker \u7684\u5b89\u88c5
                                                              4. \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u8282\u70b9\u5fc5\u987b\u4e3a Red Hat 7.9
                                                              "},{"location":"admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#1-yum","title":"1. \u6784\u5efa\u76f8\u5173\u5185\u6838\u7248\u672c\u7684\u79bb\u7ebf Yum \u6e90","text":"
                                                              1. \u4e0b\u8f7d rhel7.9 ISO

                                                              2. \u4e0b\u8f7d\u4e0e Kubean \u7248\u672c\u5bf9\u5e94\u7684\u7684 rhel7.9 ospackage

                                                                \u5728 \u5bb9\u5668\u7ba1\u7406 \u7684\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u627e\u5230 Helm \u5e94\u7528 \uff0c\u641c\u7d22 kubean\uff0c\u53ef\u67e5\u770b kubean \u7684\u7248\u672c\u53f7\u3002

                                                                \u5728 kubean\u7684\u4ee3\u7801\u4ed3\u5e93 \u4e2d\u4e0b\u8f7d\u8be5\u7248\u672c\u7684 rhel7.9 ospackage\u3002

                                                              3. \u901a\u8fc7\u5b89\u88c5\u5668\u5bfc\u5165\u79bb\u7ebf\u8d44\u6e90

                                                                \u53c2\u8003\u5bfc\u5165\u79bb\u7ebf\u8d44\u6e90\u6587\u6863\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#2-red-hat-79-os","title":"2. \u4e0b\u8f7d Red Hat 7.9 OS \u7684\u79bb\u7ebf\u9a71\u52a8\u955c\u50cf","text":"

                                                              \u70b9\u51fb\u67e5\u770b\u4e0b\u8f7d\u5730\u5740\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#3-red-hat-gpu-opreator","title":"3. \u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20 Red Hat GPU Opreator \u79bb\u7ebf\u955c\u50cf","text":"

                                                              \u53c2\u8003\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20 Red Hat GPU Opreator \u79bb\u7ebf\u955c\u50cf\u3002

                                                              Note

                                                              \u6b64\u53c2\u8003\u4ee5 rhel8.4 \u4e3a\u4f8b\uff0c\u8bf7\u6ce8\u610f\u4fee\u6539\u6210 rhel7.9\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#4-yum","title":"4. \u5728\u96c6\u7fa4\u521b\u5efa\u914d\u7f6e\u9879\u7528\u6765\u4fdd\u5b58 Yum \u6e90\u4fe1\u606f","text":"

                                                              \u5728\u5f85\u90e8\u7f72 GPU Operator \u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u4e0a\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u3002

                                                              1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\u521b\u5efa\u540d\u4e3a CentOS-Base.repo \u7684\u6587\u4ef6\uff0c\u7528\u6765\u6307\u5b9a yum \u6e90\u5b58\u50a8\u7684\u914d\u7f6e\u4fe1\u606f\u3002

                                                                # \u6587\u4ef6\u540d\u79f0\u5fc5\u987b\u4e3a CentOS-Base.repo\uff0c\u5426\u5219\u5b89\u88c5 gpu-operator \u65f6\u65e0\u6cd5\u88ab\u8bc6\u522b\ncat > CentOS-Base.repo <<  EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # \u706b\u79cd\u8282\u70b9\u7684\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\uff0c\u4e00\u822c\u4e3a{\u706b\u79cd\u8282\u70b9 IP} + {9000 \u7aef\u53e3}\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # \u706b\u79cd\u8282\u70b9\u7684\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\uff0c\u4e00\u822c\u4e3a{\u706b\u79cd\u8282\u70b9 IP} + {9000 \u7aef\u53e3}\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                                              2. \u57fa\u4e8e\u521b\u5efa\u7684 CentOS-Base.repo \u6587\u4ef6\uff0c\u5728 gpu-operator \u547d\u540d\u7a7a\u95f4\u4e0b\uff0c\u521b\u5efa\u540d\u4e3a local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\uff1a

                                                                kubectl create configmap local-repo-config -n gpu-operator --from-file=CentOS-Base.repo=/etc/yum.repos.d/extension.repo\n

                                                                \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                configmap/local-repo-config created\n

                                                                local-repo-config \u914d\u7f6e\u6587\u4ef6\u7528\u4e8e\u5728\u5b89\u88c5 gpu-operator \u65f6\uff0c\u63d0\u4f9b RepoConfig.ConfigMapName \u53c2\u6570\u7684\u503c\uff0c\u914d\u7f6e\u6587\u4ef6\u540d\u79f0\u7528\u6237\u53ef\u81ea\u5b9a\u4e49\u3002

                                                              3. \u67e5\u770b local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\u7684\u5185\u5bb9\uff1a

                                                                kubectl get configmap local-repo-config -n gpu-operator -oyaml\n

                                                                \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                local-repo-config.yaml
                                                                apiVersion: v1\ndata:\n  CentOS-Base.repo: \"[extension-0]\\nbaseurl = http://10.6.232.5:32618/centos-base # \u6b65\u9aa4 2 \u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u8def\u5f84 \\ngpgcheck = 0\\nname = kubean extension 0\\n  \\n[extension-1]\\nbaseurl\n  = http://10.6.232.5:32618/centos-base # \u6b65\u9aa4 2 \u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u8def\u5f84 \\ngpgcheck = 0\\nname\n  = kubean extension 1\\n\"\nkind: ConfigMap\nmetadata:\n  creationTimestamp: \"2023-10-18T01:59:02Z\"\n  name: local-repo-config\n  namespace: gpu-operator\n  resourceVersion: \"59445080\"\n  uid: c5f0ebab-046f-442c-b932-f9003e014387\n

                                                              \u81f3\u6b64\uff0c\u60a8\u5df2\u6210\u529f\u4e3a\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u521b\u5efa\u4e86\u79bb\u7ebf yum \u6e90\u914d\u7f6e\u6587\u4ef6\u3002 \u5176\u4e2d\u5728\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u65f6\u4f7f\u7528\u4e86 RepoConfig.ConfigMapName \u53c2\u6570\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html","title":"GPU \u544a\u8b66\u89c4\u5219","text":"

                                                              \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8bbe\u7f6e GPU \u76f8\u5173\u7684\u544a\u8b66\u89c4\u5219\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                              • \u96c6\u7fa4\u8282\u70b9\u4e0a\u5df2\u6b63\u786e\u5b89\u88c5 GPU \u8bbe\u5907
                                                              • \u96c6\u7fa4\u4e2d\u5df2\u6b63\u786e\u5b89\u88c5 gpu-operator \u7ec4\u4ef6
                                                              • \u5982\u679c\u7528\u5230\u4e86 vGPU \u8fd8\u9700\u8981\u5728\u96c6\u7fa4\u4e2d\u5b89\u88c5 Nvidia-vgpu \u7ec4\u4ef6\uff0c\u5e76\u4e14\u5f00\u542f servicemonitor
                                                              • \u96c6\u7fa4\u6b63\u786e\u5b89\u88c5\u4e86 insight-agent \u7ec4\u4ef6
                                                              "},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#gpu_1","title":"\u544a\u8b66\u5e38\u7528 GPU \u6307\u6807","text":"

                                                              \u672c\u8282\u4ecb\u7ecd GPU \u544a\u8b66\u5e38\u7528\u7684\u6307\u6807\uff0c\u5206\u4e3a\u4e24\u4e2a\u90e8\u5206\uff1a

                                                              • GPU \u5361\u7eac\u5ea6\u7684\u6307\u6807\uff0c\u4e3b\u8981\u53cd\u5e94\u5355\u4e2a GPU \u8bbe\u5907\u7684\u8fd0\u884c\u72b6\u6001\u3002
                                                              • \u5e94\u7528\u7eac\u5ea6\u7684\u6307\u6807\uff0c\u4e3b\u8981\u53cd\u5e94 Pod \u5728 GPU \u4e0a\u7684\u8fd0\u884c\u72b6\u6001\u3002
                                                              "},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#gpu_2","title":"GPU \u5361\u6307\u6807","text":"\u6307\u6807\u540d\u79f0 \u6307\u6807\u5355\u4f4d \u8bf4\u660e DCGM_FI_DEV_GPU_UTIL % GPU \u5229\u7528\u7387 DCGM_FI_DEV_MEM_COPY_UTIL % \u663e\u5b58\u5229\u7528\u7387 DCGM_FI_DEV_ENC_UTIL % \u7f16\u7801\u5668\u5229\u7528\u7387 DCGM_FI_DEV_DEC_UTIL % \u89e3\u7801\u5668\u5229\u7528\u7387 DCGM_FI_DEV_FB_FREE MB \u8868\u793a\u663e\u5b58\u5269\u4f59\u91cf DCGM_FI_DEV_FB_USED MB \u8868\u793a\u663e\u5b58\u4f7f\u7528\u91cf DCGM_FI_DEV_GPU_TEMP \u6444\u6c0f\u5ea6 \u8868\u793a\u5f53\u524d GPU \u7684\u6e29\u5ea6\u5ea6\u6570 DCGM_FI_DEV_POWER_USAGE W \u8bbe\u5907\u7535\u6e90\u4f7f\u7528\u60c5\u51b5 DCGM_FI_DEV_XID_ERRORS - \u8868\u793a\u4e00\u6bb5\u65f6\u95f4\u5185\uff0c\u6700\u540e\u53d1\u751f\u7684 XID \u9519\u8bef\u53f7\u3002XID \u63d0\u4f9b GPU \u786c\u4ef6\u3001NVIDIA \u8f6f\u4ef6\u6216\u5e94\u7528\u4e2d\u7684\u9519\u8bef\u7c7b\u578b\u3001\u9519\u8bef\u4f4d\u7f6e\u3001\u9519\u8bef\u4ee3\u7801\u7b49\u4fe1\u606f\uff0c\u66f4\u591a XID \u4fe1\u606f"},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#_2","title":"\u5e94\u7528\u7ef4\u5ea6\u7684\u6307\u6807","text":"\u6307\u6807\u540d\u79f0 \u6307\u6807\u5355\u4f4d \u8bf4\u660e kpanda_gpu_pod_utilization % \u8868\u793a Pod \u5bf9 GPU \u7684\u4f7f\u7528\u7387 kpanda_gpu_mem_pod_usage MB \u8868\u793a Pod \u5bf9 GPU \u663e\u5b58\u7684\u4f7f\u7528\u91cf kpanda_gpu_mem_pod_utilization % \u8868\u793a Pod \u5bf9 GPU \u663e\u5b58\u7684\u4f7f\u7528\u7387"},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#_3","title":"\u8bbe\u7f6e\u544a\u8b66\u89c4\u5219","text":"

                                                              \u8fd9\u91cc\u4f1a\u4ecb\u7ecd\u5982\u4f55\u8bbe\u7f6e GPU \u544a\u8b66\u89c4\u5219\uff0c\u4f7f\u7528 GPU \u5361\u5229\u7528\u7387\u6307\u6807\u4f5c\u4e3a\u6848\u4f8b\uff0c\u8bf7\u7528\u6237\u6839\u636e\u5b9e\u9645\u7684\u4e1a\u52a1\u573a\u666f\u9009\u62e9\u6307\u6807\u4ee5\u53ca\u7f16\u5199 promql\u3002

                                                              \u76ee\u6807\uff1a\u5f53GPU\u5361\u5229\u7528\u7387\u5728\u4e94\u79d2\u949f\u5185\u4e00\u76f4\u4fdd\u6301 80% \u7684\u5229\u7528\u7387\u65f6\u53d1\u51fa\u544a\u8b66

                                                              1. \u5728\u53ef\u89c2\u6d4b\u9875\u9762\uff0c\u70b9\u51fb \u544a\u8b66 -> \u544a\u8b66\u7b56\u7565 -> \u521b\u5efa\u544a\u8b66\u7b56\u7565

                                                              2. \u586b\u5199\u57fa\u672c\u4fe1\u606f

                                                              3. \u6dfb\u52a0\u89c4\u5219

                                                              4. \u9009\u62e9\u901a\u77e5\u65b9\u5f0f

                                                              5. \u8bbe\u7f6e\u5b8c\u6210\u540e\uff0c\u5f53\u4e00\u4e2a GPU \u5728 5s \u5185\u4e00\u76f4\u4fdd\u6301 80% \u7684\u5229\u7528\u7387\uff0c\u4f1a\u6536\u5230\u5982\u4e0b\u7684\u544a\u8b66\u4fe1\u606f\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-metrics.html","title":"GPU \u76d1\u63a7\u6307\u6807","text":"

                                                              \u672c\u9875\u5217\u51fa\u4e00\u4e9b\u5e38\u7528\u7684 GPU \u76d1\u63a7\u6307\u6807\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-metrics.html#_1","title":"\u96c6\u7fa4\u7ef4\u5ea6","text":"\u6307\u6807\u540d\u79f0 \u63cf\u8ff0 GPU \u5361\u6570 \u96c6\u7fa4\u4e0b\u6240\u6709\u7684 GPU \u5361\u6570\u91cf GPU \u5e73\u5747\u4f7f\u7528\u7387 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u5e73\u5747\u7b97\u529b\u4f7f\u7528\u7387 GPU \u5e73\u5747\u663e\u5b58\u4f7f\u7528\u7387 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u5e73\u5747\u663e\u5b58\u4f7f\u7528\u7387 GPU \u5361\u529f\u7387 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u529f\u7387 GPU \u5361\u6e29\u5ea6 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u6e29\u5ea6 GPU \u7b97\u529b\u4f7f\u7528\u7387\u7ec6\u8282 24 \u5c0f\u65f6\u5185\uff0c\u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u4f7f\u7528\u7387\u7ec6\u8282\uff08\u5305\u542b max\u3001avg\u3001current\uff09 GPU \u663e\u5b58\u4f7f\u7528\u91cf\u7ec6\u8282 24 \u5c0f\u65f6\u5185\uff0c\u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u663e\u5b58\u4f7f\u7528\u91cf\u7ec6\u8282\uff08\u5305\u542b min\u3001max\u3001avg\u3001current\uff09 GPU \u663e\u5b58\u5e26\u5bbd\u4f7f\u7528\u7387 \u8868\u793a\u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387\u3002\u4ee5 Nvidia GPU V100 \u4e3a\u4f8b\uff0c\u5176\u6700\u5927\u5185\u5b58\u5e26\u5bbd\u4e3a 900 GB/sec\uff0c\u5982\u679c\u5f53\u524d\u7684\u5185\u5b58\u5e26\u5bbd\u4e3a 450 GB/sec\uff0c\u5219\u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387\u4e3a 50%"},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-metrics.html#_2","title":"\u8282\u70b9\u7ef4\u5ea6","text":"\u6307\u6807\u540d\u79f0 \u63cf\u8ff0 GPU \u6a21\u5f0f \u8282\u70b9\u4e0a GPU \u5361\u7684\u4f7f\u7528\u6a21\u5f0f\uff0c\u5305\u542b\u6574\u5361\u6a21\u5f0f\u3001MIG \u6a21\u5f0f\u3001vGPU \u6a21\u5f0f GPU \u7269\u7406\u5361\u6570 \u8282\u70b9\u4e0a\u6240\u6709\u7684 GPU \u5361\u6570\u91cf GPU \u865a\u62df\u5361\u6570 \u8282\u70b9\u4e0a\u5df2\u7ecf\u88ab\u521b\u5efa\u51fa\u6765\u7684 vGPU \u8bbe\u5907\u6570\u91cf GPU MIG \u5b9e\u4f8b\u6570 \u8282\u70b9\u4e0a\u5df2\u7ecf\u88ab\u521b\u5efa\u51fa\u6765\u7684 MIG \u5b9e\u4f8b\u6570 GPU \u663e\u5b58\u5206\u914d\u7387 \u8282\u70b9\u4e0a\u6240\u6709 GPU \u5361\u7684\u663e\u5b58\u5206\u914d\u7387 GPU \u7b97\u529b\u5e73\u5747\u4f7f\u7528\u7387 \u8282\u70b9\u4e0a\u6240\u6709 GPU \u5361\u7684\u7b97\u529b\u5e73\u5747\u4f7f\u7528\u7387 GPU \u663e\u5b58\u5e73\u5747\u4f7f\u7528\u7387 \u8282\u70b9\u4e0a\u6240\u6709 GPU \u5361\u7684\u5e73\u5747\u663e\u5b58\u4f7f\u7528\u7387 GPU \u9a71\u52a8\u7248\u672c \u8282\u70b9\u4e0a GPU \u5361\u9a71\u52a8\u7684\u7248\u672c\u4fe1\u606f GPU \u7b97\u529b\u4f7f\u7528\u7387\u7ec6\u8282 24 \u5c0f\u65f6\u5185\uff0c\u8282\u70b9\u4e0a\u6bcf\u5f20 GPU \u5361\u7684\u7b97\u529b\u4f7f\u7528\u7387\u7ec6\u8282\uff08\u5305\u542b max\u3001avg\u3001current\uff09 GPU \u663e\u5b58\u4f7f\u7528\u91cf 24 \u5c0f\u65f6\u5185\uff0c\u8282\u70b9\u4e0a\u6bcf\u5f20 GPU \u5361\u7684\u663e\u5b58\u4f7f\u7528\u91cf\u7ec6\u8282\uff08\u5305\u542b min\u3001max\u3001avg\u3001current\uff09

                                                              \u6839\u636e XID \u72b6\u6001\u6392\u67e5 GPU \u76f8\u5173\u95ee\u9898

                                                              XID \u6d88\u606f\u662f NVIDIA \u9a71\u52a8\u7a0b\u5e8f\u5411\u64cd\u4f5c\u7cfb\u7edf\u7684\u5185\u6838\u65e5\u5fd7\u6216\u4e8b\u4ef6\u65e5\u5fd7\u6253\u5370\u7684\u9519\u8bef\u62a5\u544a\u3002XID \u6d88\u606f\u7528\u4e8e\u6807\u8bc6 GPU \u9519\u8bef\u4e8b\u4ef6\uff0c \u63d0\u4f9b GPU \u786c\u4ef6\u3001NVIDIA \u8f6f\u4ef6\u6216\u5e94\u7528\u4e2d\u7684\u9519\u8bef\u7c7b\u578b\u3001\u9519\u8bef\u4f4d\u7f6e\u3001\u9519\u8bef\u4ee3\u7801\u7b49\u4fe1\u606f\u3002 \u5982\u68c0\u67e5\u9879 GPU \u8282\u70b9\u4e0a\u7684 XID \u5f02\u5e38\u4e3a\u7a7a\uff0c\u8868\u660e\u65e0 XID \u6d88\u606f\uff1b\u5982\u6709\uff0c\u60a8\u53ef\u6309\u7167\u4e0b\u8868\u81ea\u52a9\u6392\u67e5\u5e76\u89e3\u51b3\u95ee\u9898\uff0c \u6216\u67e5\u770b\u66f4\u591a XID \u6d88\u606f\u3002

                                                              XID \u6d88\u606f \u8bf4\u660e 13 Graphics Engine Exception. \u901a\u5e38\u662f\u6570\u7ec4\u8d8a\u754c\u3001\u6307\u4ee4\u9519\u8bef\uff0c\u5c0f\u6982\u7387\u662f\u786c\u4ef6\u95ee\u9898\u3002 31 GPU memory page fault. \u901a\u5e38\u662f\u5e94\u7528\u7a0b\u5e8f\u7684\u975e\u6cd5\u5730\u5740\u8bbf\u95ee\uff0c\u6781\u5c0f\u6982\u7387\u662f\u9a71\u52a8\u6216\u8005\u786c\u4ef6\u95ee\u9898\u3002 32 Invalid or corrupted push buffer stream. \u4e8b\u4ef6\u7531 PCIE \u603b\u7ebf\u4e0a\u7ba1\u7406 NVIDIA \u9a71\u52a8\u548c GPU \u4e4b\u95f4\u901a\u4fe1\u7684 DMA \u63a7\u5236\u5668\u4e0a\u62a5\uff0c\u901a\u5e38\u662f PCI \u8d28\u91cf\u95ee\u9898\u5bfc\u81f4\uff0c\u800c\u975e\u60a8\u7684\u7a0b\u5e8f\u4ea7\u751f\u3002 38 Driver firmware error. \u901a\u5e38\u662f\u9a71\u52a8\u56fa\u4ef6\u9519\u8bef\u800c\u975e\u786c\u4ef6\u95ee\u9898\u3002 43 GPU stopped processing. \u901a\u5e38\u662f\u60a8\u5e94\u7528\u81ea\u8eab\u9519\u8bef\uff0c\u800c\u975e\u786c\u4ef6\u95ee\u9898\u3002 45 Preemptive cleanup, due to previous errors -- Most likely to see when running multiple cuda applications and hitting a DBE. \u901a\u5e38\u662f\u60a8\u624b\u52a8\u9000\u51fa\u6216\u8005\u5176\u4ed6\u6545\u969c\uff08\u786c\u4ef6\u3001\u8d44\u6e90\u9650\u5236\u7b49\uff09\u5bfc\u81f4\u7684 GPU \u5e94\u7528\u9000\u51fa\uff0cXID 45 \u53ea\u63d0\u4f9b\u4e00\u4e2a\u7ed3\u679c\uff0c\u5177\u4f53\u539f\u56e0\u901a\u5e38\u9700\u8981\u8fdb\u4e00\u6b65\u5206\u6790\u65e5\u5fd7\u3002 48 Double Bit ECC Error (DBE). \u5f53 GPU \u53d1\u751f\u4e0d\u53ef\u7ea0\u6b63\u7684\u9519\u8bef\u65f6\uff0c\u4f1a\u4e0a\u62a5\u6b64\u4e8b\u4ef6\uff0c\u8be5\u9519\u8bef\u4e5f\u4f1a\u540c\u65f6\u53cd\u9988\u7ed9\u60a8\u7684\u5e94\u7528\u7a0b\u5e8f\u3002\u901a\u5e38\u9700\u8981\u91cd\u7f6e GPU \u6216\u91cd\u542f\u8282\u70b9\u6765\u6e05\u9664\u8fd9\u4e2a\u9519\u8bef\u3002 61 Internal micro-controller breakpoint/warning. GPU \u5185\u90e8\u5f15\u64ce\u505c\u6b62\u5de5\u4f5c\uff0c\u60a8\u7684\u4e1a\u52a1\u5df2\u7ecf\u53d7\u5230\u5f71\u54cd\u3002 62 Internal micro-controller halt. \u4e0e XID 61 \u7684\u89e6\u53d1\u573a\u666f\u7c7b\u4f3c\u3002 63 ECC page retirement or row remapping recording event. \u5f53\u5e94\u7528\u7a0b\u5e8f\u906d\u9047\u5230 GPU \u663e\u5b58\u786c\u4ef6\u9519\u8bef\u65f6\uff0cNVIDIA \u81ea\u7ea0\u9519\u673a\u5236\u4f1a\u5c06\u9519\u8bef\u7684\u5185\u5b58\u533a\u57df retire \u6216\u8005 remap\uff0cretirement \u548c remapped \u4fe1\u606f\u9700\u8bb0\u5f55\u5230 infoROM \u4e2d\u624d\u80fd\u6c38\u4e45\u751f\u6548\u3002Volt \u67b6\u6784\uff1a\u6210\u529f\u8bb0\u5f55 ECC page retirement \u4e8b\u4ef6\u5230 infoROM\u3002Ampere \u67b6\u6784\uff1a\u6210\u529f\u8bb0\u5f55 row remapping \u4e8b\u4ef6\u5230 infoROM\u3002 64 ECC page retirement or row remapper recording failure. \u4e0e XID 63 \u7684\u89e6\u53d1\u573a\u666f\u7c7b\u4f3c\u3002\u4f46 XID 63 \u4ee3\u8868 retirement \u548c remapped \u4fe1\u606f\u6210\u529f\u8bb0\u5f55\u5230\u4e86 infoROM\uff0cXID 64 \u4ee3\u8868\u8be5\u8bb0\u5f55\u64cd\u4f5c\u5931\u8d25\u3002 68 NVDEC0 Exception. \u901a\u5e38\u662f\u786c\u4ef6\u6216\u9a71\u52a8\u95ee\u9898\u3002 74 NVLINK Error. NVLink \u786c\u4ef6\u9519\u8bef\u4ea7\u751f\u7684 XID\uff0c\u8868\u660e GPU \u5df2\u7ecf\u51fa\u73b0\u4e25\u91cd\u786c\u4ef6\u6545\u969c\uff0c\u9700\u8981\u4e0b\u7ebf\u7ef4\u4fee\u3002 79 GPU has fallen off the bus. GPU \u786c\u4ef6\u68c0\u6d4b\u5230\u6389\u5361\uff0c\u603b\u7ebf\u4e0a\u65e0\u6cd5\u68c0\u6d4b\u8be5 GPU\uff0c\u8868\u660e\u8be5 GPU \u5df2\u7ecf\u51fa\u73b0\u4e25\u91cd\u786c\u4ef6\u6545\u969c\uff0c\u9700\u8981\u4e0b\u7ebf\u7ef4\u4fee\u3002 92 High single-bit ECC error rate. \u786c\u4ef6\u6216\u9a71\u52a8\u6545\u969c\u3002 94 Contained ECC error. \u5f53\u5e94\u7528\u7a0b\u5e8f\u906d\u9047\u5230 GPU \u4e0d\u53ef\u7ea0\u6b63\u7684\u663e\u5b58 ECC \u9519\u8bef\u65f6\uff0cNVIDIA \u9519\u8bef\u6291\u5236\u673a\u5236\u4f1a\u5c1d\u8bd5\u5c06\u9519\u8bef\u6291\u5236\u5728\u53d1\u751f\u786c\u4ef6\u6545\u969c\u7684\u5e94\u7528\u7a0b\u5e8f\uff0c\u907f\u514d\u8be5\u9519\u8bef\u5f71\u54cd GPU \u8282\u70b9\u4e0a\u8fd0\u884c\u7684\u5176\u4ed6\u5e94\u7528\u7a0b\u5e8f\u3002\u5f53\u6291\u5236\u673a\u5236\u6210\u529f\u6291\u5236\u9519\u8bef\u65f6\uff0c\u4f1a\u4ea7\u751f\u8be5\u4e8b\u4ef6\uff0c\u4ec5\u51fa\u73b0\u4e0d\u53ef\u7ea0\u6b63 ECC \u9519\u8bef\u7684\u5e94\u7528\u7a0b\u5e8f\u53d7\u5230\u5f71\u54cd\u3002 95 Uncontained ECC error. \u4e0e XID 94 \u7684\u89e6\u53d1\u573a\u666f\u7c7b\u4f3c\u3002\u4f46 XID 94 \u4ee3\u8868\u6291\u5236\u6210\u529f\uff0c\u800c XID 95 \u4ee3\u8868\u6291\u5236\u5931\u8d25\uff0c\u8868\u660e\u8fd0\u884c\u5728\u8be5 GPU \u4e0a\u7684\u6240\u6709\u5e94\u7528\u7a0b\u5e8f\u90fd\u5df2\u53d7\u5230\u5f71\u54cd\u3002"},{"location":"admin/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-metrics.html#pod","title":"Pod \u7ef4\u5ea6","text":"\u5206\u7c7b \u6307\u6807\u540d\u79f0 \u63cf\u8ff0 \u5e94\u7528\u6982\u89c8 GPU \u5361 - \u7b97\u529b & \u663e\u5b58 Pod GPU \u7b97\u529b\u4f7f\u7528\u7387 \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u7b97\u529b\u4f7f\u7528\u7387 Pod GPU \u663e\u5b58\u4f7f\u7528\u7387 \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u663e\u5b58\u4f7f\u7528\u7387 Pod \u663e\u5b58\u4f7f\u7528\u91cf \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u663e\u5b58\u4f7f\u7528\u91cf \u663e\u5b58\u5206\u914d\u91cf \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u663e\u5b58\u5206\u914d\u91cf Pod GPU \u663e\u5b58\u590d\u5236\u4f7f\u7528\u7387 \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u663e\u5b58\u663e\u5b58\u590d\u5236\u6bd4\u7387 GPU \u5361 - \u5f15\u64ce\u6982\u89c8 GPU \u56fe\u5f62\u5f15\u64ce\u6d3b\u52a8\u767e\u5206\u6bd4 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cGraphics \u6216 Compute \u5f15\u64ce\u5904\u4e8e Active \u7684\u65f6\u95f4\u5360\u603b\u7684\u65f6\u95f4\u7684\u6bd4\u4f8b GPU \u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387 \u8868\u793a\u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387\uff08Memory BW Utilization\uff09\u5c06\u6570\u636e\u53d1\u9001\u5230\u8bbe\u5907\u5185\u5b58\u6216\u4ece\u8bbe\u5907\u5185\u5b58\u63a5\u6536\u6570\u636e\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u8f83\u9ad8\u7684\u503c\u8868\u793a\u8bbe\u5907\u5185\u5b58\u7684\u5229\u7528\u7387\u8f83\u9ad8\u3002\u8be5\u503c\u4e3a 1\uff08100%\uff09\u8868\u793a\u5728\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u6bcf\u4e2a\u5468\u671f\u6267\u884c\u4e00\u6761 DRAM \u6307\u4ee4\uff08\u5b9e\u9645\u4e0a\uff0c\u5cf0\u503c\u7ea6\u4e3a 0.8 (80%) \u662f\u53ef\u5b9e\u73b0\u7684\u6700\u5927\u503c\uff09\u3002\u5047\u8bbe\u8be5\u503c\u4e3a 0.2\uff0820%\uff09\uff0c\u8868\u793a 20% \u7684\u5468\u671f\u5728\u65f6\u95f4\u95f4\u9694\u5185\u8bfb\u53d6\u6216\u5199\u5165\u8bbe\u5907\u5185\u5b58\u3002 Tensor \u6838\u5fc3\u5f15\u64ce\u4f7f\u7528\u7387 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cTensor Core \u7ba1\u9053\uff08Pipe\uff09\u5904\u4e8e Active \u65f6\u95f4\u5360\u603b\u65f6\u95f4\u7684\u6bd4\u4f8b FP16 \u5f15\u64ce\u4f7f\u7528\u7387 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cFP16 \u7ba1\u9053\u5904\u4e8e Active \u7684\u65f6\u95f4\u5360\u603b\u7684\u65f6\u95f4\u7684\u6bd4\u4f8b FP32 \u5f15\u64ce\u4f7f\u7528\u7387 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cFP32 \u7ba1\u9053\u5904\u4e8e Active \u7684\u65f6\u95f4\u5360\u603b\u7684\u65f6\u95f4\u7684\u6bd4\u4f8b FP64 \u5f15\u64ce\u4f7f\u7528\u7387 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cFP64 \u7ba1\u9053\u5904\u4e8e Active \u7684\u65f6\u95f4\u5360\u603b\u7684\u65f6\u95f4\u7684\u6bd4\u4f8b GPU \u89e3\u7801\u4f7f\u7528\u7387 GPU \u5361\u89e3\u7801\u5f15\u64ce\u6bd4\u7387 GPU \u7f16\u7801\u4f7f\u7528\u7387 GPU \u5361\u7f16\u7801\u5f15\u64ce\u6bd4\u7387 GPU \u5361 - \u6e29\u5ea6 & \u529f\u8017 GPU \u5361\u6e29\u5ea6 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u6e29\u5ea6 GPU \u5361\u529f\u7387 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u529f\u7387 GPU \u5361 - \u603b\u8017\u80fd GPU \u5361\u603b\u5171\u6d88\u8017\u7684\u80fd\u91cf GPU \u5361 - Clock GPU \u5361\u5185\u5b58\u9891\u7387 \u5185\u5b58\u9891\u7387 GPU \u5361\u5e94\u7528SM \u65f6\u949f\u9891\u7387 \u5e94\u7528\u7684 SM \u65f6\u949f\u9891\u7387 GPU \u5361\u5e94\u7528\u5185\u5b58\u9891\u7387 \u5e94\u7528\u5185\u5b58\u9891\u7387 GPU \u5361\u89c6\u9891\u5f15\u64ce\u9891\u7387 \u89c6\u9891\u5f15\u64ce\u9891\u7387 GPU \u5361\u964d\u9891\u539f\u56e0 \u964d\u9891\u539f\u56e0 GPU \u5361 - \u5176\u4ed6\u7ec6\u8282 \u56fe\u5f62\u5f15\u64ce\u6d3b\u52a8 \u56fe\u5f62\u6216\u8ba1\u7b97\u5f15\u64ce\u7684\u4efb\u4f55\u90e8\u5206\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u65f6\u95f4\u6bd4\u4f8b\u3002\u5982\u679c\u56fe\u5f62/\u8ba1\u7b97\u4e0a\u4e0b\u6587\u5df2\u7ed1\u5b9a\u4e14\u56fe\u5f62/\u8ba1\u7b97\u7ba1\u9053\u7e41\u5fd9\uff0c\u5219\u56fe\u5f62\u5f15\u64ce\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002 SM\u6d3b\u52a8 \u591a\u5904\u7406\u5668\u4e0a\u81f3\u5c11\u4e00\u4e2a Warp \u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u65f6\u95f4\u6bd4\u4f8b\uff0c\u6240\u6709\u591a\u5904\u7406\u5668\u7684\u5e73\u5747\u503c\u3002\u8bf7\u6ce8\u610f\uff0c\u201c\u6d3b\u52a8\u201d\u5e76\u4e0d\u4e00\u5b9a\u610f\u5473\u7740 Warp \u6b63\u5728\u79ef\u6781\u8ba1\u7b97\u3002\u4f8b\u5982\uff0c\u7b49\u5f85\u5185\u5b58\u8bf7\u6c42\u7684 Warp \u88ab\u89c6\u4e3a\u6d3b\u52a8\u72b6\u6001\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u30020.8 \u6216\u66f4\u5927\u7684\u503c\u662f\u6709\u6548\u4f7f\u7528 GPU \u7684\u5fc5\u8981\u6761\u4ef6\uff0c\u4f46\u8fd8\u4e0d\u591f\u3002\u5c0f\u4e8e 0.5 \u7684\u503c\u53ef\u80fd\u8868\u793a GPU \u4f7f\u7528\u6548\u7387\u4f4e\u4e0b\u3002\u7ed9\u51fa\u4e00\u4e2a\u7b80\u5316\u7684 GPU \u67b6\u6784\u89c6\u56fe\uff0c\u5982\u679c GPU \u6709 N \u4e2a SM\uff0c\u5219\u4f7f\u7528 N \u4e2a\u5757\u5e76\u5728\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u8fd0\u884c\u7684\u5185\u6838\u5c06\u5bf9\u5e94\u4e8e\u6d3b\u52a8 1\uff08100%\uff09\u3002\u4f7f\u7528 N/5 \u4e2a\u5757\u5e76\u5728\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u8fd0\u884c\u7684\u5185\u6838\u5c06\u5bf9\u5e94\u4e8e\u6d3b\u52a8 0.2\uff0820%\uff09\u3002\u4f7f\u7528 N \u4e2a\u5757\u5e76\u8fd0\u884c\u4e94\u5206\u4e4b\u4e00\u65f6\u95f4\u95f4\u9694\u7684\u5185\u6838\uff0c\u5982\u679c SM \u5904\u4e8e\u7a7a\u95f2\u72b6\u6001\uff0c\u5219\u6d3b\u52a8\u4e5f\u5c06\u4e3a 0.2\uff0820%\uff09\u3002\u8be5\u503c\u4e0e\u6bcf\u4e2a\u5757\u7684\u7ebf\u7a0b\u6570\u65e0\u5173\uff08\u53c2\u89c1DCGM_FI_PROF_SM_OCCUPANCY\uff09\u3002 SM \u5165\u4f4f\u7387 \u591a\u5904\u7406\u5668\u4e0a\u9a7b\u7559 Warp \u7684\u6bd4\u4f8b\uff0c\u76f8\u5bf9\u4e8e\u591a\u5904\u7406\u5668\u4e0a\u652f\u6301\u7684\u6700\u5927\u5e76\u53d1 Warp \u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u5360\u7528\u7387\u8d8a\u9ad8\u5e76\u4e0d\u4e00\u5b9a\u8868\u793a GPU \u4f7f\u7528\u7387\u8d8a\u9ad8\u3002\u5bf9\u4e8e GPU \u5185\u5b58\u5e26\u5bbd\u53d7\u9650\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff08\u53c2\u89c1DCGM_FI_PROF_DRAM_ACTIVE\uff09\uff0c\u5360\u7528\u7387\u8d8a\u9ad8\u8868\u660e GPU \u4f7f\u7528\u7387\u8d8a\u9ad8\u3002\u4f46\u662f\uff0c\u5982\u679c\u5de5\u4f5c\u8d1f\u8f7d\u662f\u8ba1\u7b97\u53d7\u9650\u7684\uff08\u5373\u4e0d\u53d7 GPU \u5185\u5b58\u5e26\u5bbd\u6216\u5ef6\u8fdf\u9650\u5236\uff09\uff0c\u5219\u5360\u7528\u7387\u8d8a\u9ad8\u5e76\u4e0d\u4e00\u5b9a\u4e0e GPU \u4f7f\u7528\u7387\u8d8a\u9ad8\u76f8\u5173\u3002\u8ba1\u7b97\u5360\u7528\u7387\u5e76\u4e0d\u7b80\u5355\uff0c\u5b83\u53d6\u51b3\u4e8e GPU \u5c5e\u6027\u3001\u6bcf\u4e2a\u5757\u7684\u7ebf\u7a0b\u6570\u3001\u6bcf\u4e2a\u7ebf\u7a0b\u7684\u5bc4\u5b58\u5668\u4ee5\u53ca\u6bcf\u4e2a\u5757\u7684\u5171\u4eab\u5185\u5b58\u7b49\u56e0\u7d20\u3002\u4f7f\u7528CUDA \u5360\u7528\u7387\u8ba1\u7b97\u5668 \u63a2\u7d22\u5404\u79cd\u5360\u7528\u7387\u573a\u666f\u3002 \u5f20\u91cf\u6d3b\u52a8 \u5f20\u91cf (HMMA / IMMA) \u7ba1\u9053\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0c\u5f20\u91cf\u6838\u5fc3\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8 1 (100%) \u76f8\u5f53\u4e8e\u5728\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u6bcf\u9694\u4e00\u4e2a\u5468\u671f\u53d1\u51fa\u4e00\u4e2a\u5f20\u91cf\u6307\u4ee4\u3002\u6d3b\u52a8 0.2 (20%) \u53ef\u80fd\u8868\u793a 20% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u7684\u5229\u7528\u7387\u4e3a 100%\uff0c100% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u7684\u5229\u7528\u7387\u4e3a 20%\uff0c100% \u7684 SM \u5728 20% \u7684\u65f6\u95f4\u6bb5\u5185\u7684\u5229\u7528\u7387\u4e3a 100%\uff0c\u6216\u8005\u4ecb\u4e8e\u4e24\u8005\u4e4b\u95f4\u7684\u4efb\u4f55\u7ec4\u5408\uff08\u8bf7\u53c2\u9605DCGM_FI_PROF_SM_ACTIVE\u4ee5\u5e2e\u52a9\u6d88\u9664\u8fd9\u4e9b\u53ef\u80fd\u6027\u7684\u6b67\u4e49\uff09\u3002 FP64 \u5f15\u64ce\u6d3b\u52a8 FP64\uff08\u53cc\u7cbe\u5ea6\uff09\u7ba1\u9053\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0cFP64 \u6838\u5fc3\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8\u91cf 1\uff08100%\uff09\u76f8\u5f53\u4e8e\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185 Volta \u4e0a\u6bcf\u56db\u4e2a\u5468\u671f\u7684\u6bcf\u4e2a SM\u4e0a\u6267\u884c\u4e00\u6761 FP64 \u6307\u4ee4 \u3002\u6d3b\u52a8\u91cf 0.2\uff0820%\uff09\u53ef\u80fd\u8868\u793a 20% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c100% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 20%\uff0c100% \u7684 SM \u5728 20% \u7684\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c\u6216\u8005\u4ecb\u4e8e\u4e24\u8005\u4e4b\u95f4\u7684\u4efb\u4f55\u7ec4\u5408\uff08\u8bf7\u53c2\u9605 DCGM_FI_PROF_SM_ACTIVE \u4ee5\u5e2e\u52a9\u6d88\u9664\u8fd9\u4e9b\u53ef\u80fd\u6027\u7684\u6b67\u4e49\uff09\u3002 FP32 \u5f15\u64ce\u6d3b\u52a8 FMA\uff08FP32\uff08\u5355\u7cbe\u5ea6\uff09\u548c\u6574\u6570\uff09\u7ba1\u9053\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0cFP32 \u6838\u5fc3\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8\u91cf 1\uff08100%\uff09\u76f8\u5f53\u4e8e\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u6bcf\u9694\u4e00\u4e2a\u5468\u671f\u6267\u884c\u4e00\u6b21 FP32 \u6307\u4ee4\u3002\u6d3b\u52a8\u91cf 0.2\uff0820%\uff09\u53ef\u80fd\u8868\u793a 20% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c100% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 20%\uff0c100% \u7684 SM \u5728 20% \u7684\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c\u6216\u8005\u4e24\u8005\u4e4b\u95f4\u7684\u4efb\u4f55\u7ec4\u5408\uff08\u8bf7\u53c2\u9605DCGM_FI_PROF_SM_ACTIVE\u4ee5\u5e2e\u52a9\u6d88\u9664\u8fd9\u4e9b\u53ef\u80fd\u6027\u7684\u6b67\u4e49\uff09\u3002 FP16 \u5f15\u64ce\u6d3b\u52a8 FP16\uff08\u534a\u7cbe\u5ea6\uff09\u7ba1\u9053\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0cFP16 \u6838\u5fc3\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8\u91cf 1\uff08100%\uff09\u76f8\u5f53\u4e8e\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u6bcf\u9694\u4e00\u4e2a\u5468\u671f\u6267\u884c\u4e00\u6b21 FP16 \u6307\u4ee4\u3002\u6d3b\u52a8\u91cf 0.2\uff0820%\uff09\u53ef\u80fd\u8868\u793a 20% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c100% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 20%\uff0c100% \u7684 SM \u5728 20% \u7684\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c\u6216\u8005\u4ecb\u4e8e\u4e24\u8005\u4e4b\u95f4\u7684\u4efb\u4f55\u7ec4\u5408\uff08\u8bf7\u53c2\u9605DCGM_FI_PROF_SM_ACTIVE\u4ee5\u5e2e\u52a9\u6d88\u9664\u8fd9\u4e9b\u53ef\u80fd\u6027\u7684\u6b67\u4e49\uff09\u3002 \u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387 \u5411\u8bbe\u5907\u5185\u5b58\u53d1\u9001\u6570\u636e\u6216\u4ece\u8bbe\u5907\u5185\u5b58\u63a5\u6536\u6570\u636e\u7684\u5468\u671f\u6bd4\u4f8b\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0c\u8bbe\u5907\u5185\u5b58\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8\u7387\u4e3a 1 (100%) \u76f8\u5f53\u4e8e\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u6bcf\u4e2a\u5468\u671f\u6267\u884c\u4e00\u6761 DRAM \u6307\u4ee4\uff08\u5b9e\u9645\u4e0a\uff0c\u5cf0\u503c\u7ea6\u4e3a 0.8 (80%) \u662f\u53ef\u5b9e\u73b0\u7684\u6700\u5927\u503c\uff09\u3002\u6d3b\u52a8\u7387\u4e3a 0.2 (20%) \u8868\u793a\u5728\u65f6\u95f4\u95f4\u9694\u5185\u6709 20% \u7684\u5468\u671f\u6b63\u5728\u8bfb\u53d6\u6216\u5199\u5165\u8bbe\u5907\u5185\u5b58\u3002 NVLink \u5e26\u5bbd \u901a\u8fc7 NVLink \u4f20\u8f93/\u63a5\u6536\u7684\u6570\u636e\u901f\u7387\uff08\u4e0d\u5305\u62ec\u534f\u8bae\u6807\u5934\uff09\uff0c\u4ee5\u6bcf\u79d2\u5b57\u8282\u6570\u4e3a\u5355\u4f4d\u3002\u8be5\u503c\u8868\u793a\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u901f\u7387\u662f\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u5e73\u5747\u503c\u3002\u4f8b\u5982\uff0c\u5982\u679c 1 \u79d2\u5185\u4f20\u8f93\u4e86 1 GB \u7684\u6570\u636e\uff0c\u5219\u65e0\u8bba\u6570\u636e\u662f\u4ee5\u6052\u5b9a\u901f\u7387\u8fd8\u662f\u7a81\u53d1\u901f\u7387\u4f20\u8f93\uff0c\u901f\u7387\u90fd\u662f 1 GB/s\u3002\u7406\u8bba\u4e0a\uff0c\u6bcf\u4e2a\u94fe\u8def\u6bcf\u4e2a\u65b9\u5411\u7684\u6700\u5927 NVLink Gen2 \u5e26\u5bbd\u4e3a 25 GB/s\u3002 PCIe \u5e26\u5bbd \u901a\u8fc7 PCIe \u603b\u7ebf\u4f20\u8f93/\u63a5\u6536\u7684\u6570\u636e\u901f\u7387\uff0c\u5305\u62ec\u534f\u8bae\u6807\u5934\u548c\u6570\u636e\u6709\u6548\u8d1f\u8f7d\uff0c\u4ee5\u5b57\u8282/\u79d2\u4e3a\u5355\u4f4d\u3002\u8be5\u503c\u8868\u793a\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u8be5\u901f\u7387\u662f\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u5e73\u5747\u503c\u3002\u4f8b\u5982\uff0c\u5982\u679c 1 \u79d2\u5185\u4f20\u8f93\u4e86 1 GB \u7684\u6570\u636e\uff0c\u5219\u65e0\u8bba\u6570\u636e\u662f\u4ee5\u6052\u5b9a\u901f\u7387\u8fd8\u662f\u7a81\u53d1\u901f\u7387\u4f20\u8f93\uff0c\u901f\u7387\u90fd\u662f 1 GB/s\u3002\u7406\u8bba\u4e0a\u6700\u5927 PCIe Gen3 \u5e26\u5bbd\u4e3a\u6bcf\u901a\u9053 985 MB/s\u3002 PCIe \u4f20\u8f93\u901f\u7387 \u8282\u70b9 GPU \u5361\u901a\u8fc7 PCIe \u603b\u7ebf\u4f20\u8f93\u7684\u6570\u636e\u901f\u7387 PCIe \u63a5\u6536\u901f\u7387 \u8282\u70b9 GPU \u5361\u901a\u8fc7 PCIe \u603b\u7ebf\u63a5\u6536\u7684\u6570\u636e\u901f\u7387"},{"location":"admin/kpanda/gpu/nvidia/mig/index.html","title":"NVIDIA \u591a\u5b9e\u4f8b GPU(MIG) \u6982\u8ff0","text":""},{"location":"admin/kpanda/gpu/nvidia/mig/index.html#mig","title":"MIG \u573a\u666f","text":"
                                                              • \u591a\u79df\u6237\u4e91\u73af\u5883

                                                                MIG \u5141\u8bb8\u4e91\u670d\u52a1\u63d0\u4f9b\u5546\u5c06\u4e00\u5757\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a\u72ec\u7acb\u7684 GPU \u5b9e\u4f8b\uff0c\u6bcf\u4e2a\u5b9e\u4f8b\u53ef\u4ee5\u72ec\u7acb\u5206\u914d\u7ed9\u4e0d\u540c\u7684\u79df\u6237\u3002\u8fd9\u6837\u53ef\u4ee5\u5b9e\u73b0\u8d44\u6e90\u7684\u9694\u79bb\u548c\u72ec\u7acb\u6027\uff0c\u6ee1\u8db3\u591a\u4e2a\u79df\u6237\u5bf9 GPU \u8ba1\u7b97\u80fd\u529b\u7684\u9700\u6c42\u3002

                                                              • \u5bb9\u5668\u5316\u5e94\u7528\u7a0b\u5e8f

                                                                MIG \u53ef\u4ee5\u5728\u5bb9\u5668\u5316\u73af\u5883\u4e2d\u5b9e\u73b0\u66f4\u7ec6\u7c92\u5ea6\u7684 GPU \u8d44\u6e90\u7ba1\u7406\u3002\u901a\u8fc7\u5c06\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a MIG \u5b9e\u4f8b\uff0c\u53ef\u4ee5\u4e3a\u6bcf\u4e2a\u5bb9\u5668\u5206\u914d\u72ec\u7acb\u7684 GPU \u8ba1\u7b97\u8d44\u6e90\uff0c\u63d0\u4f9b\u66f4\u597d\u7684\u6027\u80fd\u9694\u79bb\u548c\u8d44\u6e90\u5229\u7528\u3002

                                                              • \u6279\u5904\u7406\u4f5c\u4e1a

                                                                \u5bf9\u4e8e\u9700\u8981\u5927\u89c4\u6a21\u5e76\u884c\u8ba1\u7b97\u7684\u6279\u5904\u7406\u4f5c\u4e1a\uff0cMIG \u53ef\u4ee5\u63d0\u4f9b\u66f4\u9ad8\u7684\u8ba1\u7b97\u6027\u80fd\u548c\u66f4\u5927\u7684\u663e\u5b58\u5bb9\u91cf\u3002\u6bcf\u4e2a MIG \u5b9e\u4f8b\u53ef\u4ee5\u5229\u7528\u7269\u7406 GPU \u7684\u4e00\u90e8\u5206\u8ba1\u7b97\u8d44\u6e90\uff0c\u4ece\u800c\u52a0\u901f\u5927\u89c4\u6a21\u8ba1\u7b97\u4efb\u52a1\u7684\u5904\u7406\u3002

                                                              • AI/\u673a\u5668\u5b66\u4e60\u8bad\u7ec3

                                                                MIG \u53ef\u4ee5\u5728\u8bad\u7ec3\u5927\u89c4\u6a21\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u65f6\u63d0\u4f9b\u66f4\u5927\u7684\u8ba1\u7b97\u80fd\u529b\u548c\u663e\u5b58\u5bb9\u91cf\u3002\u5c06\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a MIG \u5b9e\u4f8b\uff0c\u6bcf\u4e2a\u5b9e\u4f8b\u53ef\u4ee5\u72ec\u7acb\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\uff0c\u63d0\u9ad8\u8bad\u7ec3\u6548\u7387\u548c\u541e\u5410\u91cf\u3002

                                                              \u603b\u4f53\u800c\u8a00\uff0cNVIDIA MIG \u9002\u7528\u4e8e\u9700\u8981\u66f4\u7ec6\u7c92\u5ea6\u7684GPU\u8d44\u6e90\u5206\u914d\u548c\u7ba1\u7406\u7684\u573a\u666f\uff0c\u53ef\u4ee5\u5b9e\u73b0\u8d44\u6e90\u7684\u9694\u79bb\u3001\u63d0\u9ad8\u6027\u80fd\u5229\u7528\u7387\uff0c\u5e76\u4e14\u6ee1\u8db3\u591a\u4e2a\u7528\u6237\u6216\u5e94\u7528\u7a0b\u5e8f\u5bf9 GPU \u8ba1\u7b97\u80fd\u529b\u7684\u9700\u6c42\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/mig/index.html#mig_1","title":"MIG \u6982\u8ff0","text":"

                                                              NVIDIA \u591a\u5b9e\u4f8b GPU\uff08Multi-Instance GPU\uff0c\u7b80\u79f0 MIG\uff09\u662f NVIDIA \u5728 H100\uff0cA100\uff0cA30 \u7cfb\u5217 GPU \u5361\u4e0a\u63a8\u51fa\u7684\u4e00\u9879\u65b0\u7279\u6027\uff0c \u65e8\u5728\u5c06\u4e00\u5757\u7269\u7406 GPU \u5206\u5272\u4e3a\u591a\u4e2a GPU \u5b9e\u4f8b\uff0c\u4ee5\u63d0\u4f9b\u66f4\u7ec6\u7c92\u5ea6\u7684\u8d44\u6e90\u5171\u4eab\u548c\u9694\u79bb\u3002MIG \u6700\u591a\u53ef\u5c06\u4e00\u5757 GPU \u5212\u5206\u6210\u4e03\u4e2a GPU \u5b9e\u4f8b\uff0c \u4f7f\u5f97\u4e00\u4e2a \u7269\u7406 GPU \u5361\u53ef\u4e3a\u591a\u4e2a\u7528\u6237\u63d0\u4f9b\u5355\u72ec\u7684 GPU \u8d44\u6e90\uff0c\u4ee5\u5b9e\u73b0\u6700\u4f73 GPU \u5229\u7528\u7387\u3002

                                                              \u8fd9\u4e2a\u529f\u80fd\u4f7f\u5f97\u591a\u4e2a\u5e94\u7528\u7a0b\u5e8f\u6216\u7528\u6237\u53ef\u4ee5\u540c\u65f6\u5171\u4eabGPU\u8d44\u6e90\uff0c\u63d0\u9ad8\u4e86\u8ba1\u7b97\u8d44\u6e90\u7684\u5229\u7528\u7387\uff0c\u5e76\u589e\u52a0\u4e86\u7cfb\u7edf\u7684\u53ef\u6269\u5c55\u6027\u3002

                                                              \u901a\u8fc7 MIG\uff0c\u6bcf\u4e2a GPU \u5b9e\u4f8b\u7684\u5904\u7406\u5668\u5728\u6574\u4e2a\u5185\u5b58\u7cfb\u7edf\u4e2d\u5177\u6709\u72ec\u7acb\u4e14\u9694\u79bb\u7684\u8def\u5f84\u2014\u2014\u82af\u7247\u4e0a\u7684\u4ea4\u53c9\u5f00\u5173\u7aef\u53e3\u3001L2 \u9ad8\u901f\u7f13\u5b58\u7ec4\u3001\u5185\u5b58\u63a7\u5236\u5668\u548c DRAM \u5730\u5740\u603b\u7ebf\u90fd\u552f\u4e00\u5206\u914d\u7ed9\u5355\u4e2a\u5b9e\u4f8b\u3002

                                                              \u8fd9\u786e\u4fdd\u4e86\u5355\u4e2a\u7528\u6237\u7684\u5de5\u4f5c\u8d1f\u8f7d\u80fd\u591f\u4ee5\u53ef\u9884\u6d4b\u7684\u541e\u5410\u91cf\u548c\u5ef6\u8fdf\u8fd0\u884c\uff0c\u5e76\u5177\u6709\u76f8\u540c\u7684\u4e8c\u7ea7\u7f13\u5b58\u5206\u914d\u548c DRAM \u5e26\u5bbd\u3002 MIG \u53ef\u4ee5\u5212\u5206\u53ef\u7528\u7684 GPU \u8ba1\u7b97\u8d44\u6e90\uff08\u5305\u62ec\u6d41\u591a\u5904\u7406\u5668\u6216 SM \u548c GPU \u5f15\u64ce\uff0c\u5982\u590d\u5236\u5f15\u64ce\u6216\u89e3\u7801\u5668\uff09\u8fdb\u884c\u5206\u533a\uff0c \u4ee5\u4fbf\u4e3a\u4e0d\u540c\u7684\u5ba2\u6237\u7aef\uff08\u5982\u4e91\u4e3b\u673a\u3001\u5bb9\u5668\u6216\u8fdb\u7a0b\uff09\u63d0\u4f9b\u5b9a\u4e49\u7684\u670d\u52a1\u8d28\u91cf\uff08QoS\uff09\u548c\u6545\u969c\u9694\u79bb\uff09\u3002 MIG \u4f7f\u591a\u4e2a GPU \u5b9e\u4f8b\u80fd\u591f\u5728\u5355\u4e2a\u7269\u7406 GPU \u4e0a\u5e76\u884c\u8fd0\u884c\u3002

                                                              MIG \u5141\u8bb8\u591a\u4e2a vGPU\uff08\u4ee5\u53ca\u4e91\u4e3b\u673a\uff09\u5728\u5355\u4e2a GPU \u5b9e\u4f8b\u4e0a\u5e76\u884c\u8fd0\u884c\uff0c\u540c\u65f6\u4fdd\u7559 vGPU \u63d0\u4f9b\u7684\u9694\u79bb\u4fdd\u8bc1\u3002 \u6709\u5173\u4f7f\u7528 vGPU \u548c MIG \u8fdb\u884c GPU \u5206\u533a\u7684\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605 NVIDIA Multi-Instance GPU and NVIDIA Virtual Compute Server\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/mig/index.html#mig_2","title":"MIG \u67b6\u6784","text":"

                                                              \u5982\u4e0b\u662f\u4e00\u4e2a MIG \u7684\u6982\u8ff0\u56fe\uff0c\u53ef\u4ee5\u770b\u51fa MIG \u5c06\u4e00\u5f20\u7269\u7406 GPU \u5361\u865a\u62df\u5316\u6210\u4e86 7 \u4e2a GPU \u5b9e\u4f8b\uff0c\u8fd9\u4e9b GPU \u5b9e\u4f8b\u80fd\u591f\u53ef\u4ee5\u88ab\u591a\u4e2a User \u4f7f\u7528\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/mig/index.html#_1","title":"\u91cd\u8981\u6982\u5ff5","text":"
                                                              • SM \uff1a\u6d41\u5f0f\u591a\u5904\u7406\u5668\uff08Streaming Multiprocessor\uff09\uff0cGPU \u7684\u6838\u5fc3\u8ba1\u7b97\u5355\u5143\uff0c\u8d1f\u8d23\u6267\u884c\u56fe\u5f62\u6e32\u67d3\u548c\u901a\u7528\u8ba1\u7b97\u4efb\u52a1\u3002 \u6bcf\u4e2a SM \u5305\u542b\u4e00\u7ec4 CUDA \u6838\u5fc3\uff0c\u4ee5\u53ca\u5171\u4eab\u5185\u5b58\u3001\u5bc4\u5b58\u5668\u6587\u4ef6\u548c\u5176\u4ed6\u8d44\u6e90\uff0c\u53ef\u4ee5\u540c\u65f6\u6267\u884c\u591a\u4e2a\u7ebf\u7a0b\u3002 \u6bcf\u4e2a MIG \u5b9e\u4f8b\u90fd\u62e5\u6709\u4e00\u5b9a\u6570\u91cf\u7684 SM \u548c\u5176\u4ed6\u76f8\u5173\u8d44\u6e90\uff0c\u4ee5\u53ca\u88ab\u5212\u5206\u51fa\u6765\u7684\u663e\u5b58\u3002
                                                              • GPU Memory Slice \uff1aGPU \u5185\u5b58\u5207\u7247\uff0cGPU \u5185\u5b58\u5207\u7247\u662f GPU \u5185\u5b58\u7684\u6700\u5c0f\u90e8\u5206\uff0c\u5305\u62ec\u76f8\u5e94\u7684\u5185\u5b58\u63a7\u5236\u5668\u548c\u7f13\u5b58\u3002 GPU \u5185\u5b58\u5207\u7247\u5927\u7ea6\u662f GPU \u5185\u5b58\u8d44\u6e90\u603b\u91cf\u7684\u516b\u5206\u4e4b\u4e00\uff0c\u5305\u62ec\u5bb9\u91cf\u548c\u5e26\u5bbd\u3002
                                                              • GPU SM Slice \uff1aGPU SM \u5207\u7247\u662f GPU \u4e0a SM \u7684\u6700\u5c0f\u8ba1\u7b97\u5355\u4f4d\u3002\u5728 MIG \u6a21\u5f0f\u4e0b\u914d\u7f6e\u65f6\uff0c GPU SM \u5207\u7247\u5927\u7ea6\u662f GPU \u4e2d\u53ef\u7528 SMS \u603b\u6570\u7684\u4e03\u5206\u4e4b\u4e00\u3002
                                                              • GPU Slice \uff1aGPU \u5207\u7247\u662f GPU \u4e2d\u7531\u5355\u4e2a GPU \u5185\u5b58\u5207\u7247\u548c\u5355\u4e2a GPU SM \u5207\u7247\u7ec4\u5408\u5728\u4e00\u8d77\u7684\u6700\u5c0f\u90e8\u5206\u3002
                                                              • GPU Instance \uff1aGPU \u5b9e\u4f8b \uff08GI\uff09 \u662f GPU \u5207\u7247\u548c GPU \u5f15\u64ce\uff08DMA\u3001NVDEC \u7b49\uff09\u7684\u7ec4\u5408\u3002 GPU \u5b9e\u4f8b\u4e2d\u7684\u4efb\u4f55\u5185\u5bb9\u59cb\u7ec8\u5171\u4eab\u6240\u6709 GPU \u5185\u5b58\u5207\u7247\u548c\u5176\u4ed6 GPU \u5f15\u64ce\uff0c\u4f46\u5b83\u7684 SM \u5207\u7247\u53ef\u4ee5\u8fdb\u4e00\u6b65\u7ec6\u5206\u4e3a\u8ba1\u7b97\u5b9e\u4f8b\uff08CI\uff09\u3002 GPU \u5b9e\u4f8b\u63d0\u4f9b\u5185\u5b58 QoS\u3002\u6bcf\u4e2a GPU \u5207\u7247\u90fd\u5305\u542b\u4e13\u7528\u7684 GPU \u5185\u5b58\u8d44\u6e90\uff0c\u8fd9\u4e9b\u8d44\u6e90\u4f1a\u9650\u5236\u53ef\u7528\u5bb9\u91cf\u548c\u5e26\u5bbd\uff0c\u5e76\u63d0\u4f9b\u5185\u5b58 QoS\u3002 \u6bcf\u4e2a GPU \u5185\u5b58\u5207\u7247\u83b7\u5f97\u603b GPU \u5185\u5b58\u8d44\u6e90\u7684\u516b\u5206\u4e4b\u4e00\uff0c\u6bcf\u4e2a GPU SM \u5207\u7247\u83b7\u5f97 SM \u603b\u6570\u7684\u4e03\u5206\u4e4b\u4e00\u3002
                                                              • Compute Instance \uff1aGPU \u5b9e\u4f8b\u7684\u8ba1\u7b97\u5207\u7247\u53ef\u4ee5\u8fdb\u4e00\u6b65\u7ec6\u5206\u4e3a\u591a\u4e2a\u8ba1\u7b97\u5b9e\u4f8b \uff08CI\uff09\uff0c\u5176\u4e2d CI \u5171\u4eab\u7236 GI \u7684\u5f15\u64ce\u548c\u5185\u5b58\uff0c\u4f46\u6bcf\u4e2a CI \u90fd\u6709\u4e13\u7528\u7684 SM \u8d44\u6e90\u3002
                                                              "},{"location":"admin/kpanda/gpu/nvidia/mig/index.html#gpu-gi","title":"GPU \u5b9e\u4f8b\uff08GI\uff09","text":"

                                                              \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728 GPU \u4e0a\u521b\u5efa\u5404\u79cd\u5206\u533a\u3002\u5c06\u4f7f\u7528 A100-40GB \u4f5c\u4e3a\u793a\u4f8b\u6f14\u793a\u5982\u4f55\u5bf9\u5355\u4e2a GPU \u7269\u7406\u5361\u4e0a\u8fdb\u884c\u5206\u533a\u3002

                                                              GPU \u7684\u5206\u533a\u662f\u4f7f\u7528\u5185\u5b58\u5207\u7247\u8fdb\u884c\u7684\uff0c\u56e0\u6b64\u53ef\u4ee5\u8ba4\u4e3a A100-40GB GPU \u5177\u6709 8x5GB \u5185\u5b58\u5207\u7247\u548c 7 \u4e2a GPU SM \u5207\u7247\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff0c\u5c55\u793a\u4e86 A100 \u4e0a\u53ef\u7528\u7684\u5185\u5b58\u5207\u7247\u3002

                                                              \u5982\u4e0a\u6240\u8ff0\uff0c\u521b\u5efa GPU \u5b9e\u4f8b \uff08GI\uff09 \u9700\u8981\u5c06\u4e00\u5b9a\u6570\u91cf\u7684\u5185\u5b58\u5207\u7247\u4e0e\u4e00\u5b9a\u6570\u91cf\u7684\u8ba1\u7b97\u5207\u7247\u76f8\u7ed3\u5408\u3002 \u5728\u4e0b\u56fe\u4e2d\uff0c\u4e00\u4e2a 5GB \u5185\u5b58\u5207\u7247\u4e0e 1 \u4e2a\u8ba1\u7b97\u5207\u7247\u76f8\u7ed3\u5408\uff0c\u4ee5\u521b\u5efa 1g.5gb GI \u914d\u7f6e\u6587\u4ef6\uff1a

                                                              \u540c\u6837\uff0c4x5GB \u5185\u5b58\u5207\u7247\u53ef\u4ee5\u4e0e 4x1 \u8ba1\u7b97\u5207\u7247\u7ed3\u5408\u4f7f\u7528\u4ee5\u521b\u5efa 4g.20gb \u7684 GI \u914d\u7f6e\u6587\u4ef6\uff1a

                                                              "},{"location":"admin/kpanda/gpu/nvidia/mig/index.html#ci","title":"\u8ba1\u7b97\u5b9e\u4f8b\uff08CI\uff09","text":"

                                                              GPU \u5b9e\u4f8b\u7684\u8ba1\u7b97\u5207\u7247(GI)\u53ef\u4ee5\u8fdb\u4e00\u6b65\u7ec6\u5206\u4e3a\u591a\u4e2a\u8ba1\u7b97\u5b9e\u4f8b\uff08CI\uff09\uff0c\u5176\u4e2d CI \u5171\u4eab\u7236 GI \u7684\u5f15\u64ce\u548c\u5185\u5b58\uff0c \u4f46\u6bcf\u4e2a CI \u90fd\u6709\u4e13\u7528\u7684 SM \u8d44\u6e90\u3002\u4f7f\u7528\u4e0a\u9762\u7684\u76f8\u540c 4g.20gb \u793a\u4f8b\uff0c\u53ef\u4ee5\u521b\u5efa\u4e00\u4e2a CI \u4ee5\u4ec5\u4f7f\u7528\u7b2c\u4e00\u4e2a\u8ba1\u7b97\u5207\u7247\u7684 1c.4g.20gb \u8ba1\u7b97\u914d\u7f6e\uff0c\u5982\u4e0b\u56fe\u84dd\u8272\u90e8\u5206\u6240\u793a\uff1a

                                                              \u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u53ef\u4ee5\u901a\u8fc7\u9009\u62e9\u4efb\u4f55\u8ba1\u7b97\u5207\u7247\u6765\u521b\u5efa 4 \u4e2a\u4e0d\u540c\u7684 CI\u3002\u8fd8\u53ef\u4ee5\u5c06\u4e24\u4e2a\u8ba1\u7b97\u5207\u7247\u7ec4\u5408\u5728\u4e00\u8d77\u4ee5\u521b\u5efa 2c.4g.20gb \u7684\u8ba1\u7b97\u914d\u7f6e\uff09\uff1a

                                                              \u9664\u6b64\u4e4b\u5916\uff0c\u8fd8\u53ef\u4ee5\u7ec4\u5408 3 \u4e2a\u8ba1\u7b97\u5207\u7247\u4ee5\u521b\u5efa\u8ba1\u7b97\u914d\u7f6e\u6587\u4ef6\uff0c\u6216\u8005\u53ef\u4ee5\u7ec4\u5408\u6240\u6709 4 \u4e2a\u8ba1\u7b97\u5207\u7247\u4ee5\u521b\u5efa 3c.4g.20gb \u3001 4c.4g.20gb \u8ba1\u7b97\u914d\u7f6e\u6587\u4ef6\u3002 \u5408\u5e76\u6240\u6709 4 \u4e2a\u8ba1\u7b97\u5207\u7247\u65f6\uff0c\u914d\u7f6e\u6587\u4ef6\u7b80\u79f0\u4e3a 4g.20gb \u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/mig/create_mig.html","title":"\u5f00\u542f MIG \u529f\u80fd","text":"

                                                              \u672c\u7ae0\u8282\u4ecb\u7ecd\u5982\u4f55\u5f00\u542f NVIDIA MIG \u529f\u80fd\u65b9\u5f0f\uff0cNVIDIA \u5f53\u524d\u63d0\u4f9b\u4e24\u79cd\u5728 Kubernetes \u8282\u70b9\u4e0a\u516c\u5f00 MIG \u8bbe\u5907\u7684\u7b56\u7565\uff1a

                                                              • Single \u6a21\u5f0f\uff0c\u8282\u70b9\u4ec5\u5728\u5176\u6240\u6709 GPU \u4e0a\u516c\u5f00\u5355\u4e00\u7c7b\u578b\u7684 MIG \u8bbe\u5907\u3002
                                                              • Mixed \u6a21\u5f0f\uff0c\u8282\u70b9\u5728\u5176\u6240\u6709 GPU \u4e0a\u516c\u5f00\u6df7\u5408 MIG \u8bbe\u5907\u7c7b\u578b\u3002

                                                              \u8be6\u60c5\u53c2\u8003 NVIDIA GPU \u5361\u4f7f\u7528\u6a21\u5f0f\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/mig/create_mig.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u5f85\u5b89\u88c5 GPU \u9a71\u52a8\u8282\u70b9\u7cfb\u7edf\u8981\u6c42\u8bf7\u53c2\u8003\uff1aGPU \u652f\u6301\u77e9\u9635
                                                              • \u786e\u8ba4\u96c6\u7fa4\u8282\u70b9\u4e0a\u5177\u6709\u5bf9\u5e94\u578b\u53f7\u7684 GPU \u5361\uff08NVIDIA H100\u3001 A100 \u548c A30 Tensor Core GPU\uff09\uff0c \u8be6\u60c5\u53c2\u8003 GPU \u652f\u6301\u77e9\u9635\u3002
                                                              • \u8282\u70b9\u4e0a\u7684\u6240\u6709 GPU \u5fc5\u987b\uff1a\u5c5e\u4e8e\u540c\u4e00\u4ea7\u54c1\u7ebf\uff08\u4f8b\u5982 A100-SXM-40GB\uff09
                                                              "},{"location":"admin/kpanda/gpu/nvidia/mig/create_mig.html#gpu-operator-addon","title":"\u5b89\u88c5 gpu-operator Addon","text":""},{"location":"admin/kpanda/gpu/nvidia/mig/create_mig.html#_2","title":"\u53c2\u6570\u914d\u7f6e","text":"

                                                              \u5b89\u88c5 Operator \u65f6\u9700\u8981\u5bf9\u5e94\u8bbe\u7f6e MigManager Config \u53c2\u6570\uff0c \u9ed8\u8ba4\u4e3a default-mig-parted-config \uff0c\u540c\u65f6\u4e5f\u53ef\u4ee5\u81ea\u5b9a\u4e49\u5207\u5206\u7b56\u7565\u914d\u7f6e\u6587\u4ef6\uff1a

                                                              "},{"location":"admin/kpanda/gpu/nvidia/mig/create_mig.html#_3","title":"\u81ea\u5b9a\u4e49\u5207\u5206\u7b56\u7565","text":"
                                                                ## \u81ea\u5b9a\u4e49\u5207\u5206 GI \u5b9e\u4f8b\u914d\u7f6e\n  all-disabled:\n    - devices: all\n      mig-enabled: false\n  all-enabled:\n    - devices: all\n      mig-enabled: true\n      mig-devices: {}\n  all-1g.10gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.5gb: 7\n  all-1g.10gb.me:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.10gb+me: 1\n  all-1g.20gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.20gb: 4\n  all-2g.20gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        2g.20gb: 3\n  all-3g.40gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        3g.40gb: 2\n  all-4g.40gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        4g.40gb: 1\n  all-7g.80gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        7g.80gb: 1\n  all-balanced:\n    - device-filter: [\"0x233110DE\", \"0x232210DE\", \"0x20B210DE\", \"0x20B510DE\", \"0x20F310DE\", \"0x20F510DE\"]\n      devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.10gb: 2\n        2g.20gb: 1\n        3g.40gb: 1\n  # \u8bbe\u7f6e\u540e\u4f1a\u6309\u7167\u8bbe\u7f6e\u89c4\u683c\u5207\u5206 CI \u5b9e\u4f8b\n  custom-config:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        3g.40gb: 2\n

                                                              \u5728\u4e0a\u8ff0\u7684 YAML \u4e2d\u8bbe\u7f6e custom-config \uff0c\u8bbe\u7f6e\u540e\u4f1a\u6309\u7167\u89c4\u683c\u5207\u5206 CI \u5b9e\u4f8b\u3002

                                                              custom-config:\n  - devices: all\n    mig-enabled: true\n    mig-devices:\n      1c.3g.40gb: 6\n

                                                              \u8bbe\u7f6e\u5b8c\u6210\u540e\uff0c\u5728\u786e\u8ba4\u90e8\u7f72\u5e94\u7528\u65f6\u5373\u53ef\u4f7f\u7528 GPU MIG \u8d44\u6e90\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/mig/create_mig.html#gpu","title":"\u5207\u6362\u8282\u70b9 GPU \u6a21\u5f0f","text":"

                                                              Note

                                                              \u5207\u6362 GPU \u6a21\u5f0f\u6216\u8005\u4fee\u6539\u5207\u5206\u89c4\u683c\u540e\u9700\u8981\u91cd\u542f nvidia-mig-manager\u3002

                                                              \u5f53\u6211\u4eec\u6210\u529f\u5b89\u88c5 gpu-operator \u4e4b\u540e\uff0c\u8282\u70b9\u9ed8\u8ba4\u662f\u6574\u5361\u6a21\u5f0f\uff0c\u5728\u8282\u70b9\u7ba1\u7406\u9875\u9762\u4f1a\u6709\u6807\u8bc6\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff1a

                                                              \u70b9\u51fb\u8282\u70b9\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u9009\u62e9 GPU \u6a21\u5f0f\u5207\u6362 \uff0c\u7136\u540e\u9009\u62e9\u5bf9\u5e94\u7684 MIG \u6a21\u5f0f\u4ee5\u53ca\u5207\u5206\u7684\u7b56\u7565\uff0c\u8fd9\u91cc\u4ee5 MIXED \u6a21\u5f0f\u4e3a\u4f8b\uff1a

                                                              \u8fd9\u91cc\u4e00\u5171\u6709\u4e24\u4e2a\u914d\u7f6e\uff1a

                                                              1. MIg \u7b56\u7565\uff1aMixed \u4ee5\u53ca Single \u3002
                                                              2. \u5207\u5206\u7b56\u7565\uff1a\u8fd9\u91cc\u7684\u7b56\u7565\u9700\u8981\u4e0e default-mig-parted-config \uff08\u6216\u8005\u7528\u6237\u81ea\u5b9a\u4e49\u7684\u5207\u5206\u7b56\u7565\uff09\u914d\u7f6e\u6587\u4ef6\u4e2d\u7684 key \u4fdd\u6301\u4e00\u81f4\u3002

                                                              \u70b9\u51fb \u786e\u5b9a \u6309\u94ae\u540e\uff0c\u7b49\u5f85\u7ea6\u4e00\u5206\u949f\u5de6\u53f3\u5237\u65b0\u9875\u9762\uff0cMIG \u6a21\u5f0f\u5207\u6362\u6210\uff1a

                                                              "},{"location":"admin/kpanda/gpu/nvidia/mig/mig_command.html","title":"MIG \u76f8\u5173\u547d\u4ee4","text":"

                                                              GI \u76f8\u5173\u547d\u540d\uff1a

                                                              \u5b50\u547d\u4ee4 \u8bf4\u660e nvidia-smi mig -lgi \u67e5\u770b\u521b\u5efa GI \u5b9e\u4f8b\u5217\u8868 nvidia-smi mig -dgi -gi \u5220\u9664\u6307\u5b9a\u7684 GI \u5b9e\u4f8b nvidia-smi mig -lgip \u67e5\u770b GI \u7684 profile nvidia-smi mig -cgi \u901a\u8fc7\u6307\u5b9a profile \u7684 ID \u521b\u5efa GI

                                                              CI \u76f8\u5173\u547d\u4ee4\uff1a

                                                              \u5b50\u547d\u4ee4 \u8bf4\u660e nvidia-smi mig -lcip { -gi {gi Instance ID}} \u67e5\u770b CI \u7684 profile \uff0c\u6307\u5b9a -gi \u53ef\u4ee5\u67e5\u770b\u7279\u5b9a GI \u5b9e\u4f8b\u53ef\u4ee5\u521b\u5efa\u7684 CI nvidia-smi mig -lci \u67e5\u770b\u521b\u5efa\u7684 CI \u5b9e\u4f8b\u5217\u8868 nvidia-smi mig -cci {profile id} -gi {gi instance id} \u6307\u5b9a\u7684 GI \u521b\u5efa CI \u5b9e\u4f8b nvidia-smi mig -dci -ci \u5220\u9664\u6307\u5b9a CI \u5b9e\u4f8b

                                                              GI+CI \u76f8\u5173\u547d\u4ee4\uff1a

                                                              \u5b50\u547d\u4ee4 \u8bf4\u660e nvidia-smi mig -i 0 -cgi {gi profile id} -C {ci profile id} \u76f4\u63a5\u521b\u5efa GI + CI \u5b9e\u4f8b"},{"location":"admin/kpanda/gpu/nvidia/mig/mig_usage.html","title":"\u4f7f\u7528 MIG GPU \u8d44\u6e90","text":"

                                                              \u672c\u8282\u4ecb\u7ecd\u5e94\u7528\u5982\u4f55\u4f7f\u7528 MIG GPU \u8d44\u6e90\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/mig/mig_usage.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u5df2\u7ecf\u90e8\u7f72 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u4e14\u5e73\u53f0\u8fd0\u884c\u6b63\u5e38\u3002
                                                              • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                                                              • \u5df2\u5b89\u88c5 GPU Operator\u3002
                                                              • \u96c6\u7fa4\u8282\u70b9\u4e0a\u5177\u6709\u5bf9\u5e94\u578b\u53f7\u7684 GPU \u5361
                                                              "},{"location":"admin/kpanda/gpu/nvidia/mig/mig_usage.html#ui-mig-gpu","title":"UI \u754c\u9762\u4f7f\u7528 MIG GPU","text":"
                                                              1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u8bc6\u522b GPU \u5361\u7c7b\u578b

                                                                \u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 -> \u8282\u70b9\u7ba1\u7406 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u6b63\u786e\u8bc6\u522b\u4e3a MIG \u6a21\u5f0f\u3002

                                                              2. \u901a\u8fc7\u955c\u50cf\u90e8\u7f72\u5e94\u7528\uff0c\u53ef\u9009\u62e9\u5e76\u4f7f\u7528 NVIDIA MIG \u8d44\u6e90\u3002

                                                                • MIG Single \u6a21\u5f0f\u793a\u4f8b\uff08\u4e0e\u6574\u5361\u4f7f\u7528\u65b9\u5f0f\u76f8\u540c\uff09\uff1a

                                                                  Note

                                                                  MIG single \u7b56\u7565\u5141\u8bb8\u7528\u6237\u4ee5\u4e0e GPU \u6574\u5361\u76f8\u540c\u7684\u65b9\u5f0f\uff08nvidia.com/gpu\uff09\u8bf7\u6c42\u548c\u4f7f\u7528GPU\u8d44\u6e90\uff0c\u4e0d\u540c\u7684\u662f\u8fd9\u4e9b\u8d44\u6e90\u53ef\u4ee5\u662f GPU \u7684\u4e00\u90e8\u5206\uff08MIG\u8bbe\u5907\uff09\uff0c\u800c\u4e0d\u662f\u6574\u4e2aGPU\u3002\u4e86\u89e3\u66f4\u591a GPU MIG \u6a21\u5f0f\u8bbe\u8ba1

                                                                • MIG Mixed \u6a21\u5f0f\u793a\u4f8b\uff1a

                                                              "},{"location":"admin/kpanda/gpu/nvidia/mig/mig_usage.html#yaml-mig","title":"YAML \u914d\u7f6e\u4f7f\u7528 MIG","text":"

                                                              MIG Single \u6a21\u5f0f\uff1a

                                                              apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mig-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mig-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mig-demo\n    spec:\n      containers:\n        - name: mig-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/gpu: 2 # (1)!\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                                                              1. \u7533\u8bf7 MIG GPU \u7684\u6570\u91cf

                                                              MIG Mixed \u6a21\u5f0f\uff1a

                                                              apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mig-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mig-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mig-demo\n    spec:\n      containers:\n        - name: mig-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/mig-4g.20gb: 1 # (1)!\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                                                              1. \u901a\u8fc7 nvidia.com/mig-g.gb \u7684\u8d44\u6e90\u7c7b\u578b\u516c\u5f00\u5404\u4e2a MIG \u8bbe\u5907

                                                              \u8fdb\u5165\u5bb9\u5668\u540e\u53ef\u4ee5\u67e5\u770b\u53ea\u4f7f\u7528\u4e86\u4e00\u4e2a MIG \u8bbe\u5907\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/vgpu/hami.html","title":"\u6784\u5efa vGPU \u663e\u5b58\u8d85\u914d\u955c\u50cf","text":"

                                                              Hami \u9879\u76ee\u4e2d vGPU \u663e\u5b58\u8d85\u914d\u7684\u529f\u80fd\u5df2\u7ecf\u4e0d\u5b58\u5728\uff0c\u76ee\u524d\u4f7f\u7528\u6709\u663e\u5b58\u8d85\u914d\u7684 libvgpu.so \u6587\u4ef6\u91cd\u65b0\u6784\u5efa\u3002

                                                              Dockerfile
                                                              FROM docker.m.daocloud.io/projecthami/hami:v2.3.11\nCOPY libvgpu.so /k8s-vgpu/lib/nvidia/\n

                                                              \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u6784\u5efa\u955c\u50cf\uff1a

                                                              docker build -t release.daocloud.io/projecthami/hami:v2.3.11 -f Dockerfile .\n

                                                              \u7136\u540e\u628a\u955c\u50cf push \u5230 release.daocloud.io \u4e2d\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/vgpu/vgpu_addon.html","title":"\u5b89\u88c5 NVIDIA vGPU Addon","text":"

                                                              \u5982\u9700\u5c06\u4e00\u5f20 NVIDIA \u865a\u62df\u5316\u6210\u591a\u4e2a\u865a\u62df GPU\uff0c\u5e76\u5c06\u5176\u5206\u914d\u7ed9\u4e0d\u540c\u7684\u4e91\u4e3b\u673a\u6216\u7528\u6237\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528 NVIDIA \u7684 vGPU \u80fd\u529b\u3002 \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u5b89\u88c5 vGPU \u63d2\u4ef6\uff0c\u8fd9\u662f\u4f7f\u7528 NVIDIA vGPU \u80fd\u529b\u7684\u524d\u63d0\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/vgpu/vgpu_addon.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u53c2\u8003 GPU \u652f\u6301\u77e9\u9635 \u786e\u8ba4\u96c6\u7fa4\u8282\u70b9\u4e0a\u5177\u6709\u5bf9\u5e94\u578b\u53f7\u7684 GPU \u5361\u3002
                                                              • \u5f53\u524d\u96c6\u7fa4\u5df2\u901a\u8fc7 Operator \u90e8\u7f72 NVIDIA \u9a71\u52a8\uff0c\u5177\u4f53\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5\u3002
                                                              "},{"location":"admin/kpanda/gpu/nvidia/vgpu/vgpu_addon.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u529f\u80fd\u6a21\u5757\u8def\u5f84\uff1a \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u7ba1\u7406 \uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f -> \u641c\u7d22 nvidia-vgpu \u3002

                                                              2. \u5728\u5b89\u88c5 vGPU \u7684\u8fc7\u7a0b\u4e2d\u63d0\u4f9b\u4e86\u51e0\u4e2a\u57fa\u672c\u4fee\u6539\u7684\u53c2\u6570\uff0c\u5982\u679c\u9700\u8981\u4fee\u6539\u9ad8\u7ea7\u53c2\u6570\u70b9\u51fb YAML \u5217\u8fdb\u884c\u4fee\u6539\uff1a

                                                                • deviceCoreScaling \uff1aNVIDIA \u88c5\u7f6e\u7b97\u529b\u4f7f\u7528\u6bd4\u4f8b\uff0c\u9884\u8bbe\u503c\u662f 1\u3002\u53ef\u4ee5\u5927\u4e8e 1\uff08\u542f\u7528\u865a\u62df\u7b97\u529b\uff0c\u5b9e\u9a8c\u529f\u80fd\uff09\u3002\u5982\u679c\u6211\u4eec\u914d\u7f6e devicePlugin.deviceCoreScaling \u53c2\u6570\u4e3a S\uff0c\u5728\u90e8\u7f72\u4e86\u6211\u4eec\u88c5\u7f6e\u63d2\u4ef6\u7684 Kubernetes \u96c6\u7fa4\u4e2d\uff0c\u8fd9\u5f20 GPU \u5206\u51fa\u7684 vGPU \u5c06\u603b\u5171\u5305\u542b S * 100% \u7b97\u529b\u3002

                                                                • deviceMemoryScaling \uff1aNVIDIA \u88c5\u7f6e\u663e\u5b58\u4f7f\u7528\u6bd4\u4f8b\uff0c\u9884\u8bbe\u503c\u662f 1\u3002\u53ef\u4ee5\u5927\u4e8e 1\uff08\u542f\u7528\u865a\u62df\u663e\u5b58\uff0c\u5b9e\u9a8c\u529f\u80fd\uff09\u3002 \u5bf9\u4e8e\u6709 M \u663e\u5b58\u5927\u5c0f\u7684 NVIDIA GPU\uff0c\u5982\u679c\u6211\u4eec\u914d\u7f6e devicePlugin.deviceMemoryScaling \u53c2\u6570\u4e3a S\uff0c \u5728\u90e8\u7f72\u4e86\u6211\u4eec\u88c5\u7f6e\u63d2\u4ef6\u7684 Kubernetes \u96c6\u7fa4\u4e2d\uff0c\u8fd9\u5f20 GPU \u5206\u51fa\u7684 vGPU \u5c06\u603b\u5171\u5305\u542b S * M \u663e\u5b58\u3002

                                                                • deviceSplitCount \uff1a\u6574\u6570\u7c7b\u578b\uff0c\u9884\u8bbe\u503c\u662f 10\u3002GPU \u7684\u5206\u5272\u6570\uff0c\u6bcf\u4e00\u5f20 GPU \u90fd\u4e0d\u80fd\u5206\u914d\u8d85\u8fc7\u5176\u914d\u7f6e\u6570\u76ee\u7684\u4efb\u52a1\u3002 \u82e5\u5176\u914d\u7f6e\u4e3a N \u7684\u8bdd\uff0c\u6bcf\u4e2a GPU \u4e0a\u6700\u591a\u53ef\u4ee5\u540c\u65f6\u5b58\u5728 N \u4e2a\u4efb\u52a1\u3002

                                                                • Resources \uff1a\u5c31\u662f\u5bf9\u5e94 vgpu-device-plugin \u548c vgpu-schedule pod \u7684\u8d44\u6e90\u4f7f\u7528\u91cf\u3002

                                                                • ServiceMonitor \uff1a\u9ed8\u8ba4\u4e0d\u5f00\u542f\uff0c\u5f00\u542f\u540e\u53ef\u524d\u5f80\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u67e5\u770b vGPU \u76f8\u5173\u76d1\u63a7\u3002\u5982\u9700\u5f00\u542f\uff0c\u8bf7\u786e\u4fdd insight-agent \u5df2\u5b89\u88c5\u5e76\u5904\u4e8e\u8fd0\u884c\u72b6\u6001\uff0c\u5426\u5219\u5c06\u5bfc\u81f4 NVIDIA vGPU Addon \u5b89\u88c5\u5931\u8d25\u3002

                                                              3. \u5b89\u88c5\u6210\u529f\u4e4b\u540e\u4f1a\u5728\u6307\u5b9a Namespace \u4e0b\u51fa\u73b0\u5982\u4e0b\u4e24\u4e2a\u7c7b\u578b\u7684 Pod\uff0c\u5373\u8868\u793a NVIDIA vGPU \u63d2\u4ef6\u5df2\u5b89\u88c5\u6210\u529f\uff1a

                                                              \u5b89\u88c5\u6210\u529f\u540e\uff0c\u90e8\u7f72\u5e94\u7528\u53ef\u4f7f\u7528 vGPU \u8d44\u6e90\u3002

                                                              Note

                                                              NVIDIA vGPU Addon \u4e0d\u652f\u6301\u4ece\u8001\u7248\u672c v2.0.0 \u76f4\u63a5\u5347\u7ea7\u4e3a\u6700\u65b0\u7248 v2.0.0+1\uff1b \u5982\u9700\u5347\u7ea7\uff0c\u8bf7\u5378\u8f7d\u8001\u7248\u672c\u540e\u91cd\u65b0\u5b89\u88c5\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html","title":"\u5e94\u7528\u4f7f\u7528 Nvidia vGPU","text":"

                                                              \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528 vGPU \u80fd\u529b\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u96c6\u7fa4\u8282\u70b9\u4e0a\u5177\u6709\u5bf9\u5e94\u578b\u53f7\u7684 GPU \u5361
                                                              • \u5df2\u6210\u529f\u5b89\u88c5 vGPU Addon\uff0c\u8be6\u60c5\u53c2\u8003 GPU Addon \u5b89\u88c5
                                                              • \u5df2\u5b89\u88c5 GPU Operator\uff0c\u5e76\u5df2 \u5173\u95ed Nvidia.DevicePlugin \u80fd\u529b\uff0c\u53ef\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5
                                                              "},{"location":"admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html#vgpu","title":"\u754c\u9762\u4f7f\u7528 vGPU","text":"
                                                              1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u68c0\u6d4b GPU \u5361\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002 \u76ee\u524d\u96c6\u7fa4\u4f1a\u81ea\u52a8\u542f\u7528 GPU \uff0c\u5e76\u4e14\u8bbe\u7f6e GPU \u7c7b\u578b\u4e3a Nvidia vGPU \u3002

                                                              2. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08Nvidia vGPU\uff09\u4e4b\u540e\uff0c\u4f1a\u81ea\u52a8\u51fa\u73b0\u5982\u4e0b\u51e0\u4e2a\u53c2\u6570\u9700\u8981\u586b\u5199\uff1a

                                                                • \u7269\u7406\u5361\u6570\u91cf\uff08nvidia.com/vgpu\uff09\uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u8f93\u5165\u503c\u5fc5\u987b\u4e3a\u6574\u6570\u4e14 \u5c0f\u4e8e\u7b49\u4e8e \u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002
                                                                • GPU \u7b97\u529b\uff08nvidia.com/gpucores\uff09: \u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u7b97\u529b\uff0c\u503c\u8303\u56f4\u4e3a 0-100\uff1b \u5982\u679c\u914d\u7f6e\u4e3a 0\uff0c \u5219\u8ba4\u4e3a\u4e0d\u5f3a\u5236\u9694\u79bb\uff1b\u914d\u7f6e\u4e3a100\uff0c\u5219\u8ba4\u4e3a\u72ec\u5360\u6574\u5f20\u5361\u3002
                                                                • GPU \u663e\u5b58\uff08nvidia.com/gpumem\uff09: \u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u663e\u5b58\uff0c\u503c\u5355\u4f4d\u4e3a MB\uff0c\u6700\u5c0f\u503c\u4e3a 1\uff0c\u6700\u5927\u503c\u4e3a\u6574\u5361\u7684\u663e\u5b58\u503c\u3002

                                                                \u5982\u679c\u4e0a\u8ff0\u503c\u914d\u7f6e\u7684\u6709\u95ee\u9898\u5219\u4f1a\u51fa\u73b0\u8c03\u5ea6\u5931\u8d25\uff0c\u8d44\u6e90\u5206\u914d\u4e0d\u4e86\u7684\u60c5\u51b5\u3002

                                                              "},{"location":"admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html#yaml-vgpu","title":"YAML \u914d\u7f6e\u4f7f\u7528 vGPU","text":"

                                                              \u53c2\u8003\u5982\u4e0b\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\uff0c\u5728\u8d44\u6e90\u7533\u8bf7\u548c\u9650\u5236\u914d\u7f6e\u4e2d\u589e\u52a0 nvidia.com/vgpu: '1' \u53c2\u6570\u6765\u914d\u7f6e\u5e94\u7528\u4f7f\u7528\u7269\u7406\u5361\u7684\u6570\u91cf\u3002

                                                              apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-vgpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-vgpu-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: full-vgpu-demo\n    spec:\n      containers:\n        - name: full-vgpu-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/gpucores: '20'   # \u7533\u8bf7\u6bcf\u5f20\u5361\u5360\u7528 20% \u7684 GPU \u7b97\u529b\n              nvidia.com/gpumem: '200'   # \u7533\u8bf7\u6bcf\u5f20\u5361\u5360\u7528 200MB \u7684\u663e\u5b58\n              nvidia.com/vgpu: '1'   # \u7533\u8bf7GPU\u7684\u6570\u91cf\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                                                              "},{"location":"admin/kpanda/gpu/volcano/drf.html","title":"DRF\uff08Dominant Resource Fairness\uff09 \u8c03\u5ea6\u7b56\u7565","text":"

                                                              DRF \u8c03\u5ea6\u7b56\u7565\u8ba4\u4e3a\u5360\u7528\u8d44\u6e90\u8f83\u5c11\u7684\u4efb\u52a1\u5177\u6709\u66f4\u9ad8\u7684\u4f18\u5148\u7ea7\u3002\u8fd9\u6837\u80fd\u591f\u6ee1\u8db3\u66f4\u591a\u7684\u4f5c\u4e1a\uff0c\u4e0d\u4f1a\u56e0\u4e3a\u4e00\u4e2a\u80d6\u4e1a\u52a1\uff0c \u997f\u6b7b\u5927\u6279\u5c0f\u4e1a\u52a1\u3002DRF \u8c03\u5ea6\u7b97\u6cd5\u80fd\u591f\u786e\u4fdd\u5728\u591a\u79cd\u7c7b\u578b\u8d44\u6e90\u5171\u5b58\u7684\u73af\u5883\u4e0b\uff0c\u5c3d\u53ef\u80fd\u6ee1\u8db3\u5206\u914d\u7684\u516c\u5e73\u539f\u5219\u3002

                                                              "},{"location":"admin/kpanda/gpu/volcano/drf.html#_1","title":"\u4f7f\u7528\u65b9\u5f0f","text":"

                                                              DRF \u8c03\u5ea6\u7b56\u7565\u9ed8\u8ba4\u5df2\u542f\u7528\uff0c\u65e0\u9700\u4efb\u4f55\u914d\u7f6e\u3002

                                                              kubectl -n volcano-system view configmaps volcano-scheduler-configmap\n
                                                              "},{"location":"admin/kpanda/gpu/volcano/drf.html#_2","title":"\u4f7f\u7528\u6848\u4f8b","text":"

                                                              \u5728 AI \u8bad\u7ec3\uff0c\u6216\u5927\u6570\u636e\u8ba1\u7b97\u4e2d\uff0c\u901a\u8fc7\u6709\u9650\u8fd0\u884c\u4f7f\u7528\u8d44\u6e90\u5c11\u7684\u4efb\u52a1\uff0c\u8fd9\u6837\u53ef\u4ee5\u8ba9\u96c6\u7fa4\u8d44\u6e90\u4f7f\u7528\u7387\u66f4\u9ad8\uff0c\u800c\u4e14\u8fd8\u80fd\u907f\u514d\u5c0f\u4efb\u52a1\u88ab\u997f\u6b7b\u3002 \u5982\u4e0b\u521b\u5efa\u4e24\u4e2a Job\uff0c\u4e00\u4e2a\u662f\u5c0f\u8d44\u6e90\u9700\u6c42\uff0c\u4e00\u4e2a\u662f\u5927\u8d44\u6e90\u9700\u6c42\uff0c\u53ef\u4ee5\u770b\u51fa\u6765\u5c0f\u8d44\u6e90\u9700\u6c42\u7684 Job \u4f18\u5148\u8fd0\u884c\u8d77\u6765\u3002

                                                              cat <<EOF | kubectl apply -f -  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: small-resource  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: small-resource  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"1\"  \n          restartPolicy: OnFailure  \n---  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: large-resource  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: large-resource  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"2\"  \n          restartPolicy: OnFailure  \nEOF\n
                                                              "},{"location":"admin/kpanda/gpu/volcano/numa.html","title":"NUMA \u4eb2\u548c\u6027\u8c03\u5ea6","text":"

                                                              NUMA \u8282\u70b9\u662f Non-Uniform Memory Access\uff08\u975e\u7edf\u4e00\u5185\u5b58\u8bbf\u95ee\uff09\u67b6\u6784\u4e2d\u7684\u4e00\u4e2a\u57fa\u672c\u7ec4\u6210\u5355\u5143\uff0c\u4e00\u4e2a Node \u8282\u70b9\u662f\u591a\u4e2a NUMA \u8282\u70b9\u7684\u96c6\u5408\uff0c \u5728\u591a\u4e2a NUMA \u8282\u70b9\u4e4b\u95f4\u8fdb\u884c\u5185\u5b58\u8bbf\u95ee\u65f6\u4f1a\u4ea7\u751f\u5ef6\u8fdf\uff0c\u5f00\u53d1\u8005\u53ef\u4ee5\u901a\u8fc7\u4f18\u5316\u4efb\u52a1\u8c03\u5ea6\u548c\u5185\u5b58\u5206\u914d\u7b56\u7565\uff0c\u6765\u63d0\u9ad8\u5185\u5b58\u8bbf\u95ee\u6548\u7387\u548c\u6574\u4f53\u6027\u80fd\u3002

                                                              "},{"location":"admin/kpanda/gpu/volcano/numa.html#_1","title":"\u4f7f\u7528\u573a\u666f","text":"

                                                              Numa \u4eb2\u548c\u6027\u8c03\u5ea6\u7684\u5e38\u89c1\u573a\u666f\u662f\u90a3\u4e9b\u5bf9 CPU \u53c2\u6570\u654f\u611f/\u8c03\u5ea6\u5ef6\u8fdf\u654f\u611f\u7684\u8ba1\u7b97\u5bc6\u96c6\u578b\u4f5c\u4e1a\u3002\u5982\u79d1\u5b66\u8ba1\u7b97\u3001\u89c6\u9891\u89e3\u7801\u3001\u52a8\u6f2b\u52a8\u753b\u6e32\u67d3\u3001\u5927\u6570\u636e\u79bb\u7ebf\u5904\u7406\u7b49\u5177\u4f53\u573a\u666f\u3002

                                                              "},{"location":"admin/kpanda/gpu/volcano/numa.html#_2","title":"\u8c03\u5ea6\u7b56\u7565","text":"

                                                              Pod \u8c03\u5ea6\u65f6\u53ef\u4ee5\u91c7\u7528\u7684 NUMA \u653e\u7f6e\u7b56\u7565\uff0c\u5177\u4f53\u7b56\u7565\u5bf9\u5e94\u7684\u8c03\u5ea6\u884c\u4e3a\u8bf7\u53c2\u89c1 Pod \u8c03\u5ea6\u884c\u4e3a\u8bf4\u660e\u3002

                                                              • single-numa-node\uff1aPod \u8c03\u5ea6\u65f6\u4f1a\u9009\u62e9\u62d3\u6251\u7ba1\u7406\u7b56\u7565\u5df2\u7ecf\u8bbe\u7f6e\u4e3a single-numa-node \u7684\u8282\u70b9\u6c60\u4e2d\u7684\u8282\u70b9\uff0c\u4e14 CPU \u9700\u8981\u653e\u7f6e\u5728\u76f8\u540c NUMA \u4e0b\uff0c\u5982\u679c\u8282\u70b9\u6c60\u4e2d\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u8282\u70b9\uff0cPod \u5c06\u65e0\u6cd5\u88ab\u8c03\u5ea6\u3002
                                                              • restricted\uff1aPod \u8c03\u5ea6\u65f6\u4f1a\u9009\u62e9\u62d3\u6251\u7ba1\u7406\u7b56\u7565\u5df2\u7ecf\u8bbe\u7f6e\u4e3a restricted \u8282\u70b9\u6c60\u7684\u8282\u70b9\uff0c\u4e14 CPU \u9700\u8981\u653e\u7f6e\u5728\u76f8\u540c\u7684 NUMA \u96c6\u5408\u4e0b\uff0c\u5982\u679c\u8282\u70b9\u6c60\u4e2d\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u8282\u70b9\uff0cPod \u5c06\u65e0\u6cd5\u88ab\u8c03\u5ea6\u3002
                                                              • best-effort\uff1aPod \u8c03\u5ea6\u65f6\u4f1a\u9009\u62e9\u62d3\u6251\u7ba1\u7406\u7b56\u7565\u5df2\u7ecf\u8bbe\u7f6e\u4e3a best-effort \u8282\u70b9\u6c60\u7684\u8282\u70b9\uff0c\u4e14\u5c3d\u91cf\u5c06 CPU \u653e\u7f6e\u5728\u76f8\u540c NUMA \u4e0b\uff0c\u5982\u679c\u6ca1\u6709\u8282\u70b9\u6ee1\u8db3\u8fd9\u4e00\u6761\u4ef6\uff0c\u5219\u9009\u62e9\u6700\u4f18\u8282\u70b9\u8fdb\u884c\u653e\u7f6e\u3002
                                                              "},{"location":"admin/kpanda/gpu/volcano/numa.html#_3","title":"\u8c03\u5ea6\u539f\u7406","text":"

                                                              \u5f53Pod\u8bbe\u7f6e\u4e86\u62d3\u6251\u7b56\u7565\u65f6\uff0cVolcano \u4f1a\u6839\u636e Pod \u8bbe\u7f6e\u7684\u62d3\u6251\u7b56\u7565\u9884\u6d4b\u5339\u914d\u7684\u8282\u70b9\u5217\u8868\u3002 \u8c03\u5ea6\u8fc7\u7a0b\u5982\u4e0b\uff1a

                                                              1. \u6839\u636e Pod \u8bbe\u7f6e\u7684 Volcano \u62d3\u6251\u7b56\u7565\uff0c\u7b5b\u9009\u5177\u6709\u76f8\u540c\u7b56\u7565\u7684\u8282\u70b9\u3002

                                                              2. \u5728\u8bbe\u7f6e\u4e86\u76f8\u540c\u7b56\u7565\u7684\u8282\u70b9\u4e2d\uff0c\u7b5b\u9009 CPU \u62d3\u6251\u6ee1\u8db3\u8be5\u7b56\u7565\u8981\u6c42\u7684\u8282\u70b9\u8fdb\u884c\u8c03\u5ea6\u3002

                                                              Pod \u53ef\u914d\u7f6e\u7684\u62d3\u6251\u7b56\u7565 1. \u6839\u636e Pod \u8bbe\u7f6e\u7684\u62d3\u6251\u7b56\u7565\uff0c\u7b5b\u9009\u53ef\u8c03\u5ea6\u7684\u8282\u70b9 2. \u8fdb\u4e00\u6b65\u7b5b\u9009 CPU \u62d3\u6251\u6ee1\u8db3\u7b56\u7565\u7684\u8282\u70b9\u8fdb\u884c\u8c03\u5ea6 none \u9488\u5bf9\u914d\u7f6e\u4e86\u4ee5\u4e0b\u51e0\u79cd\u62d3\u6251\u7b56\u7565\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u65f6\u5747\u65e0\u7b5b\u9009\u884c\u4e3a\u3002none\uff1a\u53ef\u8c03\u5ea6\uff1bbest-effort\uff1a\u53ef\u8c03\u5ea6\uff1brestricted\uff1a\u53ef\u8c03\u5ea6\uff1bsingle-numa-node\uff1a\u53ef\u8c03\u5ea6 - best-effort \u7b5b\u9009\u62d3\u6251\u7b56\u7565\u540c\u6837\u4e3a\u201cbest-effort\u201d\u7684\u8282\u70b9\uff1anone\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bbest-effort\uff1a\u53ef\u8c03\u5ea6\uff1brestricted\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bsingle-numa-node\uff1a\u4e0d\u53ef\u8c03\u5ea6 \u5c3d\u53ef\u80fd\u6ee1\u8db3\u7b56\u7565\u8981\u6c42\u8fdb\u884c\u8c03\u5ea6\uff1a\u4f18\u5148\u8c03\u5ea6\u81f3\u5355 NUMA \u8282\u70b9\uff0c\u5982\u679c\u5355 NUMA \u8282\u70b9\u65e0\u6cd5\u6ee1\u8db3 CPU \u7533\u8bf7\u503c\uff0c\u5141\u8bb8\u8c03\u5ea6\u81f3\u591a\u4e2a NUMA \u8282\u70b9\u3002 restricted \u7b5b\u9009\u62d3\u6251\u7b56\u7565\u540c\u6837\u4e3a\u201crestricted\u201d\u7684\u8282\u70b9\uff1anone\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bbest-effort\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1brestricted\uff1a\u53ef\u8c03\u5ea6\uff1bsingle-numa-node\uff1a\u4e0d\u53ef\u8c03\u5ea6 \u4e25\u683c\u9650\u5236\u7684\u8c03\u5ea6\u7b56\u7565\uff1a\u5355 NUMA \u8282\u70b9\u7684CPU\u5bb9\u91cf\u4e0a\u9650\u5927\u4e8e\u7b49\u4e8e CPU \u7684\u7533\u8bf7\u503c\u65f6\uff0c\u4ec5\u5141\u8bb8\u8c03\u5ea6\u81f3\u5355 NUMA \u8282\u70b9\u3002\u6b64\u65f6\u5982\u679c\u5355 NUMA \u8282\u70b9\u5269\u4f59\u7684 CPU \u53ef\u4f7f\u7528\u91cf\u4e0d\u8db3\uff0c\u5219 Pod \u65e0\u6cd5\u8c03\u5ea6\u3002\u5355 NUMA \u8282\u70b9\u7684 CPU \u5bb9\u91cf\u4e0a\u9650\u5c0f\u4e8e CPU \u7684\u7533\u8bf7\u503c\u65f6\uff0c\u53ef\u5141\u8bb8\u8c03\u5ea6\u81f3\u591a\u4e2a NUMA \u8282\u70b9\u3002 single-numa-node \u7b5b\u9009\u62d3\u6251\u7b56\u7565\u540c\u6837\u4e3a\u201csingle-numa-node\u201d\u7684\u8282\u70b9\uff1anone\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bbest-effort\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1brestricted\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bsingle-numa-node\uff1a\u53ef\u8c03\u5ea6 \u4ec5\u5141\u8bb8\u8c03\u5ea6\u81f3\u5355 NUMA \u8282\u70b9\u3002"},{"location":"admin/kpanda/gpu/volcano/numa.html#numa_1","title":"\u914d\u7f6e NUMA \u4eb2\u548c\u8c03\u5ea6\u7b56\u7565","text":"
                                                              1. \u5728 Job \u4e2d\u914d\u7f6e policies

                                                                task: \n  - replicas: 1 \n    name: \"test-1\" \n    topologyPolicy: single-numa-node \n  - replicas: 1 \n    name: \"test-2\" \n    topologyPolicy: best-effort \n
                                                              2. \u4fee\u6539 kubelet \u7684\u8c03\u5ea6\u7b56\u7565\uff0c\u8bbe\u7f6e --topology-manager-policy \u53c2\u6570\uff0c\u652f\u6301\u7684\u7b56\u7565\u6709\u56db\u79cd\uff1a

                                                                • none\uff08\u9ed8\u8ba4\uff09
                                                                • best-effort
                                                                • restricted
                                                                • single-numa-node
                                                              "},{"location":"admin/kpanda/gpu/volcano/numa.html#_4","title":"\u4f7f\u7528\u6848\u4f8b","text":"
                                                              1. \u793a\u4f8b\u4e00\uff1a\u5728\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u914d\u7f6e NUMA \u4eb2\u548c\u6027\u3002

                                                                kind: Deployment  \napiVersion: apps/v1  \nmetadata:  \n  name: numa-tset  \nspec:  \n  replicas: 1  \n  selector:  \n    matchLabels:  \n      app: numa-tset  \n  template:  \n    metadata:  \n      labels:  \n        app: numa-tset  \n      annotations:  \n        volcano.sh/numa-topology-policy: single-numa-node    # set the topology policy  \n    spec:  \n      containers:  \n        - name: container-1  \n          image: nginx:alpine  \n          resources:  \n            requests:  \n              cpu: 2           # \u5fc5\u987b\u4e3a\u6574\u6570\uff0c\u4e14\u9700\u8981\u4e0elimits\u4e2d\u4e00\u81f4  \n              memory: 2048Mi  \n            limits:  \n              cpu: 2           # \u5fc5\u987b\u4e3a\u6574\u6570\uff0c\u4e14\u9700\u8981\u4e0erequests\u4e2d\u4e00\u81f4  \n              memory: 2048Mi  \n      imagePullSecrets:  \n      - name: default-secret\n
                                                              2. \u793a\u4f8b\u4e8c\uff1a\u521b\u5efa\u4e00\u4e2a Volcano Job\uff0c\u5e76\u4f7f\u7528 NUMA \u4eb2\u548c\u6027\u3002

                                                                apiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: vj-test  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 1  \n  tasks:  \n    - replicas: 1  \n      name: \"test\"  \n      topologyPolicy: best-effort   # set the topology policy for task  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                limits:  \n                  cpu: 20  \n                  memory: \"100Mi\"  \n          restartPolicy: OnFailure\n
                                                              "},{"location":"admin/kpanda/gpu/volcano/numa.html#numa_2","title":"NUMA \u8c03\u5ea6\u5206\u6790","text":"

                                                              \u5047\u8bbe NUMA \u8282\u70b9\u60c5\u51b5\u5982\u4e0b\uff1a

                                                              \u5de5\u4f5c\u8282\u70b9 \u8282\u70b9\u7b56\u7565\u62d3\u6251\u7ba1\u7406\u5668\u7b56\u7565 NUMA \u8282\u70b9 0 \u4e0a\u7684\u53ef\u5206\u914d CPU NUMA \u8282\u70b9 1 \u4e0a\u7684\u53ef\u5206\u914d CPU node-1 single-numa-node 16U 16U node-2 best-effort 16U 16U node-3 best-effort 20U 20U
                                                              • \u793a\u4f8b\u4e00\u4e2d\uff0cPod \u7684 CPU \u7533\u8bf7\u503c\u4e3a 2U\uff0c\u8bbe\u7f6e\u62d3\u6251\u7b56\u7565\u4e3a\u201csingle-numa-node\u201d\uff0c\u56e0\u6b64\u4f1a\u88ab\u8c03\u5ea6\u5230\u76f8\u540c\u7b56\u7565\u7684 node-1\u3002
                                                              • \u793a\u4f8b\u4e8c\u4e2d\uff0cPod \u7684 CPU \u7533\u8bf7\u503c\u4e3a20U\uff0c\u8bbe\u7f6e\u62d3\u6251\u7b56\u7565\u4e3a\u201cbest-effort\u201d\uff0c\u5b83\u5c06\u88ab\u8c03\u5ea6\u5230 node-3\uff0c \u56e0\u4e3a node-3 \u53ef\u4ee5\u5728\u5355\u4e2a NUMA \u8282\u70b9\u4e0a\u5206\u914d Pod \u7684 CPU \u8bf7\u6c42\uff0c\u800c node-2 \u9700\u8981\u5728\u4e24\u4e2a NUMA \u8282\u70b9\u4e0a\u6267\u884c\u6b64\u64cd\u4f5c\u3002
                                                              "},{"location":"admin/kpanda/gpu/volcano/numa.html#cpu","title":"\u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684 CPU \u6982\u51b5","text":"

                                                              \u60a8\u53ef\u4ee5\u901a\u8fc7 lscpu \u547d\u4ee4\u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684 CPU \u6982\u51b5\uff1a

                                                              lscpu \n... \nCPU(s): 32 \nNUMA node(s): 2 \nNUMA node0 CPU(s): 0-15 \nNUMA node1 CPU(s): 16-31\n
                                                              "},{"location":"admin/kpanda/gpu/volcano/numa.html#cpu_1","title":"\u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684 CPU \u5206\u914d","text":"

                                                              \u7136\u540e\u67e5\u770b NUMA \u8282\u70b9\u4f7f\u7528\u60c5\u51b5\uff1a

                                                              # \u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684 CPU \u5206\u914d\ncat /var/lib/kubelet/cpu_manager_state\n{\"policyName\":\"static\",\"defaultCpuSet\":\"0,10-15,25-31\",\"entries\":{\"777870b5-c64f-42f5-9296-688b9dc212ba\":{\"container-1\":\"16-24\"},\"fb15e10a-b6a5-4aaa-8fcd-76c1aa64e6fd\":{\"container-1\":\"1-9\"}},\"checksum\":318470969}\n

                                                              \u4ee5\u4e0a\u793a\u4f8b\u4e2d\u8868\u793a\uff0c\u8282\u70b9\u4e0a\u8fd0\u884c\u4e86\u4e24\u4e2a\u5bb9\u5668\uff0c\u4e00\u4e2a\u5360\u7528\u4e86 NUMA node0 \u76841-9 \u6838\uff0c\u53e6\u4e00\u4e2a\u5360\u7528\u4e86 NUMA node1 \u7684 16-24 \u6838\u3002

                                                              "},{"location":"admin/kpanda/gpu/volcano/volcano-gang-scheduler.html","title":"\u4f7f\u7528 Volcano \u7684 Gang Scheduler","text":"

                                                              Gang \u8c03\u5ea6\u7b56\u7565\u662f volcano-scheduler \u7684\u6838\u5fc3\u8c03\u5ea6\u7b97\u6cd5\u4e4b\u4e00\uff0c\u5b83\u6ee1\u8db3\u4e86\u8c03\u5ea6\u8fc7\u7a0b\u4e2d\u7684 \u201cAll or nothing\u201d \u7684\u8c03\u5ea6\u9700\u6c42\uff0c \u907f\u514d Pod \u7684\u4efb\u610f\u8c03\u5ea6\u5bfc\u81f4\u96c6\u7fa4\u8d44\u6e90\u7684\u6d6a\u8d39\u3002\u5177\u4f53\u7b97\u6cd5\u662f\uff0c\u89c2\u5bdf Job \u4e0b\u7684 Pod \u5df2\u8c03\u5ea6\u6570\u91cf\u662f\u5426\u6ee1\u8db3\u4e86\u6700\u5c0f\u8fd0\u884c\u6570\u91cf\uff0c \u5f53 Job \u7684\u6700\u5c0f\u8fd0\u884c\u6570\u91cf\u5f97\u5230\u6ee1\u8db3\u65f6\uff0c\u4e3a Job \u4e0b\u7684\u6240\u6709 Pod \u6267\u884c\u8c03\u5ea6\u52a8\u4f5c\uff0c\u5426\u5219\uff0c\u4e0d\u6267\u884c\u3002

                                                              "},{"location":"admin/kpanda/gpu/volcano/volcano-gang-scheduler.html#_1","title":"\u4f7f\u7528\u573a\u666f","text":"

                                                              \u57fa\u4e8e\u5bb9\u5668\u7ec4\u6982\u5ff5\u7684 Gang \u8c03\u5ea6\u7b97\u6cd5\u5341\u5206\u9002\u5408\u9700\u8981\u591a\u8fdb\u7a0b\u534f\u4f5c\u7684\u573a\u666f\u3002AI \u573a\u666f\u5f80\u5f80\u5305\u542b\u590d\u6742\u7684\u6d41\u7a0b\uff0c Data Ingestion\u3001Data Analysts\u3001Data Splitting\u3001Trainer\u3001Serving\u3001Logging \u7b49\uff0c \u9700\u8981\u4e00\u7ec4\u5bb9\u5668\u8fdb\u884c\u534f\u540c\u5de5\u4f5c\uff0c\u5c31\u5f88\u9002\u5408\u57fa\u4e8e\u5bb9\u5668\u7ec4\u7684 Gang \u8c03\u5ea6\u7b56\u7565\u3002 MPI \u8ba1\u7b97\u6846\u67b6\u4e0b\u7684\u591a\u7ebf\u7a0b\u5e76\u884c\u8ba1\u7b97\u901a\u4fe1\u573a\u666f\uff0c\u7531\u4e8e\u9700\u8981\u4e3b\u4ece\u8fdb\u7a0b\u534f\u540c\u5de5\u4f5c\uff0c\u4e5f\u975e\u5e38\u9002\u5408\u4f7f\u7528 Gang \u8c03\u5ea6\u7b56\u7565\u3002 \u5bb9\u5668\u7ec4\u4e0b\u7684\u5bb9\u5668\u9ad8\u5ea6\u76f8\u5173\u4e5f\u53ef\u80fd\u5b58\u5728\u8d44\u6e90\u4e89\u62a2\uff0c\u6574\u4f53\u8c03\u5ea6\u5206\u914d\uff0c\u80fd\u591f\u6709\u6548\u89e3\u51b3\u6b7b\u9501\u3002

                                                              \u5728\u96c6\u7fa4\u8d44\u6e90\u4e0d\u8db3\u7684\u573a\u666f\u4e0b\uff0cGang \u7684\u8c03\u5ea6\u7b56\u7565\u5bf9\u4e8e\u96c6\u7fa4\u8d44\u6e90\u7684\u5229\u7528\u7387\u7684\u63d0\u5347\u662f\u975e\u5e38\u660e\u663e\u7684\u3002 \u6bd4\u5982\u96c6\u7fa4\u73b0\u5728\u53ea\u80fd\u5bb9\u7eb3 2 \u4e2a Pod\uff0c\u73b0\u5728\u8981\u6c42\u6700\u5c0f\u8c03\u5ea6\u7684 Pod \u6570\u4e3a 3\u3002 \u90a3\u73b0\u5728\u8fd9\u4e2a Job \u7684\u6240\u6709\u7684 Pod \u90fd\u4f1a pending\uff0c\u76f4\u5230\u96c6\u7fa4\u80fd\u591f\u5bb9\u7eb3 3 \u4e2a Pod\uff0cPod \u624d\u4f1a\u88ab\u8c03\u5ea6\u3002 \u6709\u6548\u9632\u6b62\u8c03\u5ea6\u90e8\u5206 Pod\uff0c\u4e0d\u6ee1\u8db3\u8981\u6c42\u53c8\u5360\u7528\u4e86\u8d44\u6e90\uff0c\u4f7f\u5176\u4ed6 Job \u65e0\u6cd5\u8fd0\u884c\u7684\u60c5\u51b5\u3002

                                                              "},{"location":"admin/kpanda/gpu/volcano/volcano-gang-scheduler.html#_2","title":"\u6982\u5ff5\u8bf4\u660e","text":"

                                                              Gang Scheduler \u662f Volcano \u7684\u6838\u5fc3\u7684\u8c03\u5ea6\u63d2\u4ef6\uff0c\u5b89\u88c5 Volcano \u540e\u9ed8\u8ba4\u5c31\u5f00\u542f\u4e86\u3002 \u5728\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\u53ea\u9700\u8981\u6307\u5b9a\u8c03\u5ea6\u5668\u7684\u540d\u79f0\u4e3a Volcano \u5373\u53ef\u3002

                                                              Volcano \u662f\u4ee5 PodGroup \u4e3a\u5355\u4f4d\u8fdb\u884c\u8c03\u5ea6\u7684\uff0c\u5728\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u5e76\u4e0d\u9700\u8981\u624b\u52a8\u521b\u5efa PodGroup \u8d44\u6e90\uff0c Volcano \u4f1a\u6839\u636e\u5de5\u4f5c\u8d1f\u8f7d\u7684\u4fe1\u606f\u81ea\u52a8\u521b\u5efa\u3002\u4e0b\u9762\u662f\u4e00\u4e2a PodGroup \u7684\u793a\u4f8b\uff1a

                                                              apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  name: test\n  namespace: default\nspec:\n  minMember: 1  # (1)!\n  minResources:  # (2)!\n    cpu: \"3\"\n    memory: \"2048Mi\"\n  priorityClassName: high-prority # (3)!\n  queue: default # (4)!\n
                                                              1. \u8868\u793a\u8be5 PodGroup \u4e0b \u6700\u5c11 \u9700\u8981\u8fd0\u884c\u7684 Pod \u6216\u4efb\u52a1\u6570\u91cf\u3002 \u5982\u679c\u96c6\u7fa4\u8d44\u6e90\u4e0d\u6ee1\u8db3 miniMember \u6570\u91cf\u4efb\u52a1\u7684\u8fd0\u884c\u9700\u6c42\uff0c\u8c03\u5ea6\u5668\u5c06\u4e0d\u4f1a\u8c03\u5ea6\u4efb\u4f55\u4e00\u4e2a\u8be5 PodGroup \u5185\u7684\u4efb\u52a1\u3002
                                                              2. \u8868\u793a\u8fd0\u884c\u8be5 PodGroup \u6240\u9700\u8981\u7684\u6700\u5c11\u8d44\u6e90\u3002\u5f53\u96c6\u7fa4\u53ef\u5206\u914d\u8d44\u6e90\u4e0d\u6ee1\u8db3 minResources \u65f6\uff0c\u8c03\u5ea6\u5668\u5c06\u4e0d\u4f1a\u8c03\u5ea6\u4efb\u4f55\u4e00\u4e2a\u8be5 PodGroup \u5185\u7684\u4efb\u52a1\u3002
                                                              3. \u8868\u793a\u8be5 PodGroup \u7684\u4f18\u5148\u7ea7\uff0c\u7528\u4e8e\u8c03\u5ea6\u5668\u4e3a\u8be5 queue \u4e2d\u6240\u6709 PodGroup \u8fdb\u884c\u8c03\u5ea6\u65f6\u8fdb\u884c\u6392\u5e8f\u3002 system-node-critical \u548c system-cluster-critical \u662f 2 \u4e2a\u9884\u7559\u7684\u503c\uff0c\u8868\u793a\u6700\u9ad8\u4f18\u5148\u7ea7\u3002\u4e0d\u7279\u522b\u6307\u5b9a\u65f6\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u4f18\u5148\u7ea7\u6216 zero \u4f18\u5148\u7ea7\u3002
                                                              4. \u8868\u793a\u8be5 PodGroup \u6240\u5c5e\u7684 queue\u3002queue \u5fc5\u987b\u63d0\u524d\u5df2\u521b\u5efa\u4e14\u72b6\u6001\u4e3a open\u3002
                                                              "},{"location":"admin/kpanda/gpu/volcano/volcano-gang-scheduler.html#_3","title":"\u4f7f\u7528\u6848\u4f8b","text":"

                                                              \u5728 MPI \u8ba1\u7b97\u6846\u67b6\u4e0b\u7684\u591a\u7ebf\u7a0b\u5e76\u884c\u8ba1\u7b97\u901a\u4fe1\u573a\u666f\u4e2d\uff0c\u6211\u4eec\u8981\u786e\u4fdd\u6240\u6709\u7684 Pod \u90fd\u80fd\u8c03\u5ea6\u6210\u529f\u624d\u80fd\u4fdd\u8bc1\u4efb\u52a1\u6b63\u5e38\u5b8c\u6210\u3002 \u8bbe\u7f6e minAvailable \u4e3a 4\uff0c\u8868\u793a\u8981\u6c42 1 \u4e2a mpimaster \u548c 3 \u4e2a mpiworker \u80fd\u8fd0\u884c\u3002

                                                              apiVersion: batch.volcano.sh/v1alpha1\nkind: Job\nmetadata:\n  name: lm-mpi-job\n  labels:\n    \"volcano.sh/job-type\": \"MPI\"\nspec:\n  minAvailable: 4\n  schedulerName: volcano\n  plugins:\n    ssh: []\n    svc: []\n  policies:\n    - event: PodEvicted\n      action: RestartJob\n  tasks:\n    - replicas: 1\n      name: mpimaster\n      policies:\n        - event: TaskCompleted\n          action: CompleteJob\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  MPI_HOST=`cat /etc/volcano/mpiworker.host | tr \"\\n\" \",\"`;\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd;\n                  mpiexec --allow-run-as-root --host ${MPI_HOST} -np 3 mpi_hello_world;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpimaster\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"500m\"\n                limits:\n                  cpu: \"500m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n    - replicas: 3\n      name: mpiworker\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd -D;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpiworker\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"1000m\"\n                limits:\n                  cpu: \"1000m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n

                                                              \u751f\u6210 PodGroup \u7684\u8d44\u6e90\uff1a

                                                              apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  annotations:\n  creationTimestamp: \"2024-05-28T09:18:50Z\"\n  generation: 5\n  labels:\n    volcano.sh/job-type: MPI\n  name: lm-mpi-job-9c571015-37c7-4a1a-9604-eaa2248613f2\n  namespace: default\n  ownerReferences:\n  - apiVersion: batch.volcano.sh/v1alpha1\n    blockOwnerDeletion: true\n    controller: true\n    kind: Job\n    name: lm-mpi-job\n    uid: 9c571015-37c7-4a1a-9604-eaa2248613f2\n  resourceVersion: \"25173454\"\n  uid: 7b04632e-7cff-4884-8e9a-035b7649d33b\nspec:\n  minMember: 4\n  minResources:\n    count/pods: \"4\"\n    cpu: 3500m\n    limits.cpu: 3500m\n    pods: \"4\"\n    requests.cpu: 3500m\n  minTaskMember:\n    mpimaster: 1\n    mpiworker: 3\n  queue: default\nstatus:\n  conditions:\n  - lastTransitionTime: \"2024-05-28T09:19:01Z\"\n    message: '3/4 tasks in gang unschedulable: pod group is not ready, 1 Succeeded,\n      3 Releasing, 4 minAvailable'\n    reason: NotEnoughResources\n    status: \"True\"\n    transitionID: f875efa5-0358-4363-9300-06cebc0e7466\n    type: Unschedulable\n  - lastTransitionTime: \"2024-05-28T09:18:53Z\"\n    reason: tasks in gang are ready to be scheduled\n    status: \"True\"\n    transitionID: 5a7708c8-7d42-4c33-9d97-0581f7c06dab\n    type: Scheduled\n  phase: Pending\n  succeeded: 1\n

                                                              \u4ece PodGroup \u53ef\u4ee5\u770b\u51fa\uff0c\u901a\u8fc7 ownerReferences \u5173\u8054\u5230\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5e76\u8bbe\u7f6e\u6700\u5c0f\u8fd0\u884c\u7684 Pod \u6570\u4e3a 4\u3002

                                                              "},{"location":"admin/kpanda/gpu/volcano/volcano_binpack.html","title":"\u4f7f\u7528 Volcano Binpack \u8c03\u5ea6\u7b56\u7565","text":"

                                                              Binpack \u8c03\u5ea6\u7b97\u6cd5\u7684\u76ee\u6807\u662f\u5c3d\u91cf\u628a\u5df2\u88ab\u5360\u7528\u7684\u8282\u70b9\u586b\u6ee1\uff08\u5c3d\u91cf\u4e0d\u5f80\u7a7a\u767d\u8282\u70b9\u5206\u914d\uff09\u3002\u5177\u4f53\u5b9e\u73b0\u4e0a\uff0cBinpack \u8c03\u5ea6\u7b97\u6cd5\u4f1a\u7ed9\u6295\u9012\u7684\u8282\u70b9\u6253\u5206\uff0c \u5206\u6570\u8d8a\u9ad8\u8868\u793a\u8282\u70b9\u7684\u8d44\u6e90\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u901a\u8fc7\u5c3d\u53ef\u80fd\u586b\u6ee1\u8282\u70b9\uff0c\u5c06\u5e94\u7528\u8d1f\u8f7d\u9760\u62e2\u5728\u90e8\u5206\u8282\u70b9\uff0c\u8fd9\u79cd\u8c03\u5ea6\u7b97\u6cd5\u80fd\u591f\u5c3d\u53ef\u80fd\u51cf\u5c0f\u8282\u70b9\u5185\u7684\u788e\u7247\uff0c \u5728\u7a7a\u95f2\u7684\u673a\u5668\u4e0a\u4e3a\u7533\u8bf7\u4e86\u66f4\u5927\u8d44\u6e90\u8bf7\u6c42\u7684 Pod \u9884\u7559\u8db3\u591f\u7684\u8d44\u6e90\u7a7a\u95f4\uff0c\u4f7f\u96c6\u7fa4\u4e0b\u7a7a\u95f2\u8d44\u6e90\u5f97\u5230\u6700\u5927\u5316\u7684\u5229\u7528\u3002

                                                              "},{"location":"admin/kpanda/gpu/volcano/volcano_binpack.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"

                                                              \u9884\u5148\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e0a\u5b89\u88c5 Volcano \u7ec4\u4ef6\u3002

                                                              "},{"location":"admin/kpanda/gpu/volcano/volcano_binpack.html#binpack","title":"Binpack \u7b97\u6cd5\u539f\u7406","text":"

                                                              Binpack \u5728\u5bf9\u4e00\u4e2a\u8282\u70b9\u6253\u5206\u65f6\uff0c\u4f1a\u6839\u636e Binpack \u63d2\u4ef6\u81ea\u8eab\u6743\u91cd\u548c\u5404\u8d44\u6e90\u8bbe\u7f6e\u7684\u6743\u91cd\u503c\u7efc\u5408\u6253\u5206\u3002 \u9996\u5148\uff0c\u5bf9 Pod \u8bf7\u6c42\u8d44\u6e90\u4e2d\u7684\u6bcf\u7c7b\u8d44\u6e90\u4f9d\u6b21\u6253\u5206\uff0c\u4ee5 CPU \u4e3a\u4f8b\uff0cCPU \u8d44\u6e90\u5728\u5f85\u8c03\u5ea6\u8282\u70b9\u7684\u5f97\u5206\u4fe1\u606f\u5982\u4e0b\uff1a

                                                              CPU.weight * (request + used) / allocatable\n

                                                              \u5373 CPU \u6743\u91cd\u503c\u8d8a\u9ad8\uff0c\u5f97\u5206\u8d8a\u9ad8\uff0c\u8282\u70b9\u8d44\u6e90\u4f7f\u7528\u91cf\u8d8a\u6ee1\uff0c\u5f97\u5206\u8d8a\u9ad8\u3002Memory\u3001GPU \u7b49\u8d44\u6e90\u539f\u7406\u7c7b\u4f3c\u3002\u5176\u4e2d\uff1a

                                                              • CPU.weight \u4e3a\u7528\u6237\u8bbe\u7f6e\u7684 CPU \u6743\u91cd
                                                              • request \u4e3a\u5f53\u524d Pod \u8bf7\u6c42\u7684 CPU \u8d44\u6e90\u91cf
                                                              • used \u4e3a\u5f53\u524d\u8282\u70b9\u5df2\u7ecf\u5206\u914d\u4f7f\u7528\u7684 CPU \u91cf
                                                              • allocatable \u4e3a\u5f53\u524d\u8282\u70b9 CPU \u53ef\u7528\u603b\u91cf

                                                              \u901a\u8fc7 Binpack \u7b56\u7565\u7684\u8282\u70b9\u603b\u5f97\u5206\u5982\u4e0b\uff1a

                                                              binpack.weight - (CPU.score + Memory.score + GPU.score) / (CPU.weight + Memory.weight + GPU.weight) - 100\n

                                                              \u5373 Binpack \u63d2\u4ef6\u7684\u6743\u91cd\u503c\u8d8a\u5927\uff0c\u5f97\u5206\u8d8a\u9ad8\uff0c\u67d0\u7c7b\u8d44\u6e90\u7684\u6743\u91cd\u8d8a\u5927\uff0c\u8be5\u8d44\u6e90\u5728\u6253\u5206\u65f6\u7684\u5360\u6bd4\u8d8a\u5927\u3002\u5176\u4e2d\uff1a

                                                              • binpack.weight \u4e3a\u7528\u6237\u8bbe\u7f6e\u7684\u88c5\u7bb1\u8c03\u5ea6\u7b56\u7565\u6743\u91cd
                                                              • CPU.score \u4e3a CPU \u8d44\u6e90\u5f97\u5206\uff0cCPU.weight \u4e3a CPU \u6743\u91cd
                                                              • Memory.score \u4e3a Memory \u8d44\u6e90\u5f97\u5206\uff0cMemory.weight \u4e3a Memory \u6743\u91cd
                                                              • GPU.score \u4e3a GPU \u8d44\u6e90\u5f97\u5206\uff0cGPU.weight \u4e3a GPU \u6743\u91cd

                                                              \u5982\u56fe\u6240\u793a\uff0c\u96c6\u7fa4\u4e2d\u5b58\u5728\u4e24\u4e2a\u8282\u70b9\uff0c\u5206\u522b\u4e3a Node1 \u548c Node 2\uff0c\u5728\u8c03\u5ea6 Pod \u65f6\uff0cBinpack \u7b56\u7565\u5bf9\u4e24\u4e2a\u8282\u70b9\u5206\u522b\u6253\u5206\u3002 \u5047\u8bbe\u96c6\u7fa4\u4e2d CPU.weight \u914d\u7f6e\u4e3a 1\uff0cMemory.weight \u914d\u7f6e\u4e3a 1\uff0cGPU.weight \u914d\u7f6e\u4e3a 2\uff0cbinpack.weight \u914d\u7f6e\u4e3a 5\u3002

                                                              1. Binpack \u5bf9 Node 1 \u7684\u8d44\u6e90\u6253\u5206\uff0c\u5404\u8d44\u6e90\u7684\u8ba1\u7b97\u516c\u5f0f\u4e3a\uff1a

                                                                • CPU Score\uff1a

                                                                  CPU.weight - (request + used) / allocatable = 1 - (2 + 4) / 8 = 0.75

                                                                • Memory Score\uff1a

                                                                  Memory.weight - (request + used) / allocatable = 1 - (4 + 8) / 16 = 0.75

                                                                • GPU Score\uff1a

                                                                  GPU.weight - (request + used) / allocatable = 2 - (4 + 4) / 8 = 2

                                                              2. \u8282\u70b9\u603b\u5f97\u5206\u7684\u8ba1\u7b97\u516c\u5f0f\u4e3a\uff1a

                                                                binpack.weight - (CPU.score + Memory.score + GPU.score) / (CPU.weight + Memory.weight + GPU.weight) - 100\n

                                                                \u5047\u8bbe binpack.weight \u914d\u7f6e\u4e3a 5\uff0cNode 1 \u5728 Binpack \u7b56\u7565\u4e0b\u7684\u5f97\u5206\u4e3a\uff1a

                                                                5 - (0.75 + 0.75 + 2) / (1 + 1 + 2) - 100 = 437.5\n
                                                              3. Binpack \u5bf9 Node 2 \u7684\u8d44\u6e90\u6253\u5206\uff1a

                                                                • CPU Score\uff1a

                                                                  CPU.weight - (request + used) / allocatable = 1 - (2 + 6) / 8 = 1

                                                                • Memory Score\uff1a

                                                                  Memory.weight - (request + used) / allocatable = 1 - (4 + 8) / 16 = 0.75

                                                                • GPU Score\uff1a

                                                                  GPU.weight - (request + used) / allocatable = 2 - (4 + 4) / 8 = 2

                                                              4. Node 2 \u5728 Binpack \u7b56\u7565\u4e0b\u7684\u5f97\u5206\u4e3a\uff1a

                                                                5 - (1 + 0.75 + 2) / (1 + 1 + 2) - 100 = 468.75\n

                                                              \u7efc\u4e0a\uff0cNode 2 \u5f97\u5206\u5927\u4e8e Node 1\uff0c\u6309\u7167 Binpack \u7b56\u7565\uff0cPod \u5c06\u4f1a\u4f18\u5148\u8c03\u5ea6\u81f3 Node 2\u3002

                                                              "},{"location":"admin/kpanda/gpu/volcano/volcano_binpack.html#_2","title":"\u4f7f\u7528\u6848\u4f8b","text":"

                                                              Binpack \u8c03\u5ea6\u63d2\u4ef6\u5728\u5b89\u88c5 Volcano \u7684\u65f6\u5019\u9ed8\u8ba4\u5c31\u4f1a\u5f00\u542f\uff1b\u5982\u679c\u7528\u6237\u6ca1\u6709\u914d\u7f6e\u6743\u91cd\uff0c\u5219\u4f7f\u7528\u5982\u4e0b\u9ed8\u8ba4\u7684\u914d\u7f6e\u6743\u91cd\u3002

                                                              - plugins:\n    - name: binpack\n      arguments:\n        binpack.weight: 1\n        binpack.cpu: 1\n        binpack.memory: 1\n

                                                              \u9ed8\u8ba4\u6743\u91cd\u4e0d\u80fd\u4f53\u73b0\u5806\u53e0\u7279\u6027\uff0c\u56e0\u6b64\u9700\u8981\u4fee\u6539\u4e3a binpack.weight: 10\u3002

                                                              kubectl -n volcano-system edit configmaps volcano-scheduler-configmap\n
                                                              - plugins:\n    - name: binpack\n      arguments:\n        binpack.weight: 10\n        binpack.cpu: 1\n        binpack.memory: 1\n        binpack.resources: nvidia.com/gpu, example.com/foo\n        binpack.resources.nvidia.com/gpu: 2\n        binpack.resources.example.com/foo: 3\n

                                                              \u6539\u597d\u4e4b\u540e\u91cd\u542f volcano-scheduler Pod \u4f7f\u5176\u751f\u6548\u3002

                                                              \u521b\u5efa\u5982\u4e0b\u7684 Deployment\u3002

                                                              apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: binpack-test\n  labels:\n    app: binpack-test\nspec:\n  replicas: 2\n  selector:\n    matchLabels:\n      app: test\n  template:\n    metadata:\n      labels:\n        app: test\n    spec:\n      schedulerName: volcano\n      containers:\n        - name: test\n          image: busybox\n          imagePullPolicy: IfNotPresent\n          command: [\"sh\", \"-c\", 'echo \"Hello, Kubernetes!\" && sleep 3600']\n          resources:\n            requests:\n              cpu: 500m\n            limits:\n              cpu: 500m\n

                                                              \u5728\u4e24\u4e2a Node \u7684\u96c6\u7fa4\u4e0a\u53ef\u4ee5\u770b\u5230 Pod \u88ab\u8c03\u5ea6\u5230\u4e00\u4e2a Node \u4e0a\u3002

                                                              "},{"location":"admin/kpanda/gpu/volcano/volcano_priority.html","title":"\u4f18\u5148\u7ea7\u62a2\u5360\uff08Preemption scheduling\uff09\u7b56\u7565","text":"

                                                              Volcano \u901a\u8fc7 Priority \u63d2\u4ef6\u5b9e\u73b0\u4e86\u4f18\u5148\u7ea7\u62a2\u5360\u7b56\u7565\uff0c\u5373 Preemption scheduling \u7b56\u7565\u3002\u5728\u96c6\u7fa4\u8d44\u6e90\u6709\u9650\u4e14\u591a\u4e2a Job \u7b49\u5f85\u8c03\u5ea6\u65f6\uff0c \u5982\u679c\u4f7f\u7528 Kubernetes \u9ed8\u8ba4\u8c03\u5ea6\u5668\uff0c\u53ef\u80fd\u4f1a\u5bfc\u81f4\u5177\u6709\u66f4\u591a Pod \u6570\u91cf\u7684 Job \u5206\u5f97\u66f4\u591a\u8d44\u6e90\u3002\u800c Volcano-scheduler \u63d0\u4f9b\u4e86\u7b97\u6cd5\uff0c\u652f\u6301\u4e0d\u540c\u7684 Job \u4ee5 fair-share \u7684\u5f62\u5f0f\u5171\u4eab\u96c6\u7fa4\u8d44\u6e90\u3002

                                                              Priority \u63d2\u4ef6\u5141\u8bb8\u7528\u6237\u81ea\u5b9a\u4e49 Job \u548c Task \u7684\u4f18\u5148\u7ea7\uff0c\u5e76\u6839\u636e\u9700\u6c42\u5728\u4e0d\u540c\u5c42\u6b21\u4e0a\u5b9a\u5236\u8c03\u5ea6\u7b56\u7565\u3002 \u4f8b\u5982\uff0c\u5bf9\u4e8e\u91d1\u878d\u573a\u666f\u3001\u7269\u8054\u7f51\u76d1\u63a7\u573a\u666f\u7b49\u9700\u8981\u8f83\u9ad8\u5b9e\u65f6\u6027\u7684\u5e94\u7528\uff0cPriority \u63d2\u4ef6\u80fd\u591f\u786e\u4fdd\u5176\u4f18\u5148\u5f97\u5230\u8c03\u5ea6\u3002

                                                              "},{"location":"admin/kpanda/gpu/volcano/volcano_priority.html#_1","title":"\u4f7f\u7528\u65b9\u5f0f","text":"

                                                              \u4f18\u5148\u7ea7\u7684\u51b3\u5b9a\u57fa\u4e8e\u914d\u7f6e\u7684 PriorityClass \u4e2d\u7684 Value \u503c\uff0c\u503c\u8d8a\u5927\u4f18\u5148\u7ea7\u8d8a\u9ad8\u3002\u9ed8\u8ba4\u5df2\u542f\u7528\uff0c\u65e0\u9700\u4fee\u6539\u3002\u53ef\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u786e\u8ba4\u6216\u4fee\u6539\u3002

                                                              kubectl -n volcano-system edit configmaps volcano-scheduler-configmap\n
                                                              "},{"location":"admin/kpanda/gpu/volcano/volcano_priority.html#_2","title":"\u4f7f\u7528\u6848\u4f8b","text":"

                                                              \u5047\u8bbe\u96c6\u7fa4\u4e2d\u5b58\u5728\u4e24\u4e2a\u7a7a\u95f2\u8282\u70b9\uff0c\u5e76\u6709\u4e09\u4e2a\u4f18\u5148\u7ea7\u4e0d\u540c\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff1ahigh-priority\u3001med-priority \u548c low-priority\u3002 \u5f53 high-priority \u5de5\u4f5c\u8d1f\u8f7d\u8fd0\u884c\u5e76\u5360\u6ee1\u96c6\u7fa4\u8d44\u6e90\u540e\uff0c\u518d\u63d0\u4ea4 med-priority \u548c low-priority \u5de5\u4f5c\u8d1f\u8f7d\u3002 \u7531\u4e8e\u96c6\u7fa4\u8d44\u6e90\u5168\u90e8\u88ab\u66f4\u9ad8\u4f18\u5148\u7ea7\u7684\u5de5\u4f5c\u8d1f\u8f7d\u5360\u7528\uff0cmed-priority \u548c low-priority \u5de5\u4f5c\u8d1f\u8f7d\u5c06\u5904\u4e8e pending \u72b6\u6001\u3002 \u5f53 high-priority \u5de5\u4f5c\u8d1f\u8f7d\u7ed3\u675f\u540e\uff0c\u6839\u636e\u4f18\u5148\u7ea7\u8c03\u5ea6\u539f\u5219\uff0cmed-priority \u5de5\u4f5c\u8d1f\u8f7d\u5c06\u4f18\u5148\u88ab\u8c03\u5ea6\u3002

                                                              1. \u901a\u8fc7 priority.yaml \u521b\u5efa 3 \u4e2a\u4f18\u5148\u7ea7\u5b9a\u4e49\uff0c\u5206\u522b\u4e3a\uff1ahigh-priority\uff0cmed-priority\uff0clow-priority\u3002

                                                                \u67e5\u770b priority.yaml

                                                                cat <<EOF | kubectl apply -f - \napiVersion: scheduling.k8s.io/v1 \nkind: PriorityClass \nitems: \n  - metadata: \n      name: high-priority \n    value: 100 \n    globalDefault: false \n    description: \"This priority class should be used for volcano job only.\" \n  - metadata: \n      name: med-priority \n    value: 50 \n    globalDefault: false \n    description: \"This priority class should be used for volcano job only.\" \n  - metadata: \n      name: low-priority \n    value: 10 \n    globalDefault: false \n    description: \"This priority class should be used for volcano job only.\" \nEOF\n
                                                                2. \u67e5\u770b\u4f18\u5148\u7ea7\u5b9a\u4e49\u4fe1\u606f\u3002

                                                                kubectl get PriorityClass\n
                                                                NAME                      VALUE        GLOBAL-DEFAULT   AGE  \nhigh-priority             100          false            97s  \nlow-priority              10           false            97s  \nmed-priority              50           false            97s  \nsystem-cluster-critical   2000000000   false            6d6h  \nsystem-node-critical      2000001000   false            6d6h\n

                                                              2. \u521b\u5efa\u9ad8\u4f18\u5148\u7ea7\u5de5\u4f5c\u8d1f\u8f7d high-priority-job\uff0c\u5360\u7528\u96c6\u7fa4\u7684\u5168\u90e8\u8d44\u6e90\u3002

                                                                \u67e5\u770b high-priority-job
                                                                cat <<EOF | kubectl apply -f -  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: priority-high  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: high-priority  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"4\"  \n          restartPolicy: OnFailure  \nEOF\n

                                                                \u901a\u8fc7 kubectl get pod \u67e5\u770b Pod\u8fd0\u884c \u4fe1\u606f\uff1a

                                                                kubectl get pods\n
                                                                NAME                   READY   STATUS    RESTARTS   AGE  \npriority-high-test-0   1/1     Running   0          3s  \npriority-high-test-1   1/1     Running   0          3s  \npriority-high-test-2   1/1     Running   0          3s  \npriority-high-test-3   1/1     Running   0          3s\n

                                                                \u6b64\u65f6\uff0c\u96c6\u7fa4\u8282\u70b9\u8d44\u6e90\u5df2\u5168\u90e8\u88ab\u5360\u7528\u3002

                                                              3. \u521b\u5efa\u4e2d\u4f18\u5148\u7ea7\u5de5\u4f5c\u8d1f\u8f7d med-priority-job \u548c\u4f4e\u4f18\u5148\u7ea7\u5de5\u4f5c\u8d1f\u8f7d low-priority-job\u3002

                                                                med-priority-job
                                                                cat <<EOF | kubectl apply -f -  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: priority-medium  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: med-priority  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"4\"  \n          restartPolicy: OnFailure  \nEOF\n
                                                                low-priority-job
                                                                cat <<EOF | kubectl apply -f -  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: priority-low  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: low-priority  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"4\"  \n          restartPolicy: OnFailure  \nEOF\n

                                                                \u901a\u8fc7 kubectl get pod \u67e5\u770b Pod \u8fd0\u884c\u4fe1\u606f\uff0c\u96c6\u7fa4\u8d44\u6e90\u4e0d\u8db3\uff0cPod \u5904\u4e8e Pending \u72b6\u6001\uff1a

                                                                kubectl get pods\n
                                                                NAME                     READY   STATUS    RESTARTS   AGE  \npriority-high-test-0     1/1     Running   0          3m29s  \npriority-high-test-1     1/1     Running   0          3m29s  \npriority-high-test-2     1/1     Running   0          3m29s  \npriority-high-test-3     1/1     Running   0          3m29s  \npriority-low-test-0      0/1     Pending   0          2m26s  \npriority-low-test-1      0/1     Pending   0          2m26s  \npriority-low-test-2      0/1     Pending   0          2m26s  \npriority-low-test-3      0/1     Pending   0          2m26s  \npriority-medium-test-0   0/1     Pending   0          2m36s  \npriority-medium-test-1   0/1     Pending   0          2m36s  \npriority-medium-test-2   0/1     Pending   0          2m36s  \npriority-medium-test-3   0/1     Pending   0          2m36s\n

                                                              4. \u5220\u9664 high_priority_job \u5de5\u4f5c\u8d1f\u8f7d\uff0c\u91ca\u653e\u96c6\u7fa4\u8d44\u6e90\uff0cmed_priority_job \u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002 \u6267\u884c kubectl delete -f high_priority_job.yaml \u91ca\u653e\u96c6\u7fa4\u8d44\u6e90\uff0c\u67e5\u770b Pod \u7684\u8c03\u5ea6\u4fe1\u606f\uff1a

                                                                kubectl get pods\n
                                                                NAME                     READY   STATUS    RESTARTS   AGE  \npriority-low-test-0      0/1     Pending   0          5m18s  \npriority-low-test-1      0/1     Pending   0          5m18s  \npriority-low-test-2      0/1     Pending   0          5m18s  \npriority-low-test-3      0/1     Pending   0          5m18s  \npriority-medium-test-0   1/1     Running   0          5m28s  \npriority-medium-test-1   1/1     Running   0          5m28s  \npriority-medium-test-2   1/1     Running   0          5m28s  \npriority-medium-test-3   1/1     Running   0          5m28s\n

                                                              "},{"location":"admin/kpanda/gpu/volcano/volcano_user_guide.html","title":"\u5b89\u88c5 Volcano","text":"

                                                              \u968f\u7740 Kubernetes\uff08K8s\uff09\u6210\u4e3a\u4e91\u539f\u751f\u5e94\u7528\u7f16\u6392\u4e0e\u7ba1\u7406\u7684\u9996\u9009\u5e73\u53f0\uff0c\u4f17\u591a\u5e94\u7528\u6b63\u79ef\u6781\u5411 K8s \u8fc1\u79fb\u3002 \u5728\u4eba\u5de5\u667a\u80fd\u4e0e\u673a\u5668\u5b66\u4e60\u9886\u57df\uff0c\u7531\u4e8e\u8fd9\u4e9b\u4efb\u52a1\u901a\u5e38\u6d89\u53ca\u5927\u91cf\u8ba1\u7b97\uff0c\u5f00\u53d1\u8005\u503e\u5411\u4e8e\u5728 Kubernetes \u4e0a\u6784\u5efa AI \u5e73\u53f0\uff0c \u4ee5\u5145\u5206\u5229\u7528\u5176\u5728\u8d44\u6e90\u7ba1\u7406\u3001\u5e94\u7528\u7f16\u6392\u53ca\u8fd0\u7ef4\u76d1\u63a7\u65b9\u9762\u7684\u4f18\u52bf\u3002

                                                              \u7136\u800c\uff0cKubernetes \u7684\u9ed8\u8ba4\u8c03\u5ea6\u5668\u4e3b\u8981\u9488\u5bf9\u957f\u671f\u8fd0\u884c\u7684\u670d\u52a1\u8bbe\u8ba1\uff0c\u5bf9\u4e8e AI\u3001\u5927\u6570\u636e\u7b49\u9700\u8981\u6279\u91cf\u548c\u5f39\u6027\u8c03\u5ea6\u7684\u4efb\u52a1\u5b58\u5728\u8bf8\u591a\u4e0d\u8db3\u3002 \u4f8b\u5982\uff0c\u5728\u8d44\u6e90\u7ade\u4e89\u6fc0\u70c8\u7684\u60c5\u51b5\u4e0b\uff0c\u9ed8\u8ba4\u8c03\u5ea6\u5668\u53ef\u80fd\u5bfc\u81f4\u8d44\u6e90\u5206\u914d\u4e0d\u5747\uff0c\u8fdb\u800c\u5f71\u54cd\u4efb\u52a1\u7684\u6b63\u5e38\u6267\u884c\u3002

                                                              \u4ee5 TensorFlow \u4f5c\u4e1a\u4e3a\u4f8b\uff0c\u5176\u5305\u542b PS\uff08\u53c2\u6570\u670d\u52a1\u5668\uff09\u548c Worker \u4e24\u79cd\u89d2\u8272\uff0c\u4e24\u8005\u9700\u534f\u540c\u5de5\u4f5c\u624d\u80fd\u5b8c\u6210\u4efb\u52a1\u3002 \u82e5\u4ec5\u90e8\u7f72\u5355\u4e00\u89d2\u8272\uff0c\u4f5c\u4e1a\u5c06\u65e0\u6cd5\u8fd0\u884c\u3002\u800c\u9ed8\u8ba4\u8c03\u5ea6\u5668\u5bf9 Pod \u7684\u8c03\u5ea6\u662f\u9010\u4e2a\u8fdb\u884c\u7684\uff0c\u65e0\u6cd5\u611f\u77e5 TFJob \u4e2d PS \u548c Worker \u7684\u4f9d\u8d56\u5173\u7cfb\u3002 \u5728\u9ad8\u8d1f\u8f7d\u60c5\u51b5\u4e0b\uff0c\u8fd9\u53ef\u80fd\u5bfc\u81f4\u591a\u4e2a\u4f5c\u4e1a\u5404\u81ea\u5206\u914d\u5230\u90e8\u5206\u8d44\u6e90\uff0c\u4f46\u5747\u65e0\u6cd5\u5b8c\u6210\uff0c\u4ece\u800c\u9020\u6210\u8d44\u6e90\u6d6a\u8d39\u3002

                                                              "},{"location":"admin/kpanda/gpu/volcano/volcano_user_guide.html#volcano_1","title":"Volcano \u7684\u8c03\u5ea6\u7b56\u7565\u4f18\u52bf","text":"

                                                              Volcano \u63d0\u4f9b\u4e86\u591a\u79cd\u8c03\u5ea6\u7b56\u7565\uff0c\u4ee5\u5e94\u5bf9\u4e0a\u8ff0\u6311\u6218\u3002\u5176\u4e2d\uff0cGang-scheduling \u7b56\u7565\u80fd\u786e\u4fdd\u5206\u5e03\u5f0f\u673a\u5668\u5b66\u4e60\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u591a\u4e2a\u4efb\u52a1\uff08Pod\uff09\u540c\u65f6\u542f\u52a8\uff0c \u907f\u514d\u6b7b\u9501\uff1bPreemption scheduling \u7b56\u7565\u5219\u5141\u8bb8\u9ad8\u4f18\u5148\u7ea7\u4f5c\u4e1a\u5728\u8d44\u6e90\u4e0d\u8db3\u65f6\u62a2\u5360\u4f4e\u4f18\u5148\u7ea7\u4f5c\u4e1a\u7684\u8d44\u6e90\uff0c\u786e\u4fdd\u5173\u952e\u4efb\u52a1\u4f18\u5148\u5b8c\u6210\u3002

                                                              \u6b64\u5916\uff0cVolcano \u4e0e Spark\u3001TensorFlow\u3001PyTorch \u7b49\u4e3b\u6d41\u8ba1\u7b97\u6846\u67b6\u65e0\u7f1d\u5bf9\u63a5\uff0c\u5e76\u652f\u6301 CPU \u548c GPU \u7b49\u5f02\u6784\u8bbe\u5907\u7684\u6df7\u5408\u8c03\u5ea6\uff0c\u4e3a AI \u8ba1\u7b97\u4efb\u52a1\u63d0\u4f9b\u4e86\u5168\u9762\u7684\u4f18\u5316\u652f\u6301\u3002

                                                              \u63a5\u4e0b\u6765\uff0c\u6211\u4eec\u5c06\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5\u548c\u4f7f\u7528 Volcano\uff0c\u4ee5\u4fbf\u60a8\u80fd\u591f\u5145\u5206\u5229\u7528\u5176\u8c03\u5ea6\u7b56\u7565\u4f18\u52bf\uff0c\u4f18\u5316 AI \u8ba1\u7b97\u4efb\u52a1\u3002

                                                              "},{"location":"admin/kpanda/gpu/volcano/volcano_user_guide.html#volcano_2","title":"\u5b89\u88c5 Volcano","text":"
                                                              1. \u5728 \u96c6\u7fa4\u8be6\u60c5 -> Helm \u5e94\u7528 -> Helm \u6a21\u677f \u4e2d\u627e\u5230 Volcano \u5e76\u5b89\u88c5\u3002

                                                              2. \u68c0\u67e5\u5e76\u786e\u8ba4 Volcano \u662f\u5426\u5b89\u88c5\u5b8c\u6210\uff0c\u5373 volcano-admission\u3001volcano-controllers\u3001volcano-scheduler \u7ec4\u4ef6\u662f\u5426\u6b63\u5e38\u8fd0\u884c\u3002

                                                              \u901a\u5e38 Volcano \u4f1a\u548c AI Lab \u5e73\u53f0\u914d\u5408\u4f7f\u7528\uff0c\u4ee5\u5b9e\u73b0\u6570\u636e\u96c6\u3001Notebook\u3001\u4efb\u52a1\u8bad\u7ec3\u7684\u6574\u4e2a\u5f00\u53d1\u3001\u8bad\u7ec3\u6d41\u7a0b\u7684\u6709\u6548\u95ed\u73af\u3002

                                                              "},{"location":"admin/kpanda/helm/index.html","title":"Helm \u6a21\u677f","text":"

                                                              Helm \u662f Kubernetes \u7684\u5305\u7ba1\u7406\u5de5\u5177\uff0c\u65b9\u4fbf\u7528\u6237\u5feb\u901f\u53d1\u73b0\u3001\u5171\u4eab\u548c\u4f7f\u7528 Kubernetes \u6784\u5efa\u7684\u5e94\u7528\u3002\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u63d0\u4f9b\u4e86\u4e0a\u767e\u4e2a Helm \u6a21\u677f\uff0c\u6db5\u76d6\u5b58\u50a8\u3001\u7f51\u7edc\u3001\u76d1\u63a7\u3001\u6570\u636e\u5e93\u7b49\u4e3b\u8981\u573a\u666f\u3002\u501f\u52a9\u8fd9\u4e9b\u6a21\u677f\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7 UI \u754c\u9762\u5feb\u901f\u90e8\u7f72\u3001\u4fbf\u6377\u7ba1\u7406 Helm \u5e94\u7528\u3002\u6b64\u5916\uff0c\u652f\u6301\u901a\u8fc7\u6dfb\u52a0 Helm \u4ed3\u5e93 \u6dfb\u52a0\u66f4\u591a\u7684\u4e2a\u6027\u5316\u6a21\u677f\uff0c\u6ee1\u8db3\u591a\u6837\u9700\u6c42\u3002

                                                              \u5173\u952e\u6982\u5ff5\uff1a

                                                              \u4f7f\u7528 Helm \u65f6\u9700\u8981\u4e86\u89e3\u4ee5\u4e0b\u51e0\u4e2a\u5173\u952e\u6982\u5ff5\uff1a

                                                              • Chart\uff1a\u4e00\u4e2a Helm \u5b89\u88c5\u5305\uff0c\u5176\u4e2d\u5305\u542b\u4e86\u8fd0\u884c\u4e00\u4e2a\u5e94\u7528\u6240\u9700\u8981\u7684\u955c\u50cf\u3001\u4f9d\u8d56\u548c\u8d44\u6e90\u5b9a\u4e49\u7b49\uff0c\u8fd8\u53ef\u80fd\u5305\u542b Kubernetes \u96c6\u7fa4\u4e2d\u7684\u670d\u52a1\u5b9a\u4e49\uff0c\u7c7b\u4f3c Homebrew \u4e2d\u7684 formula\u3001APT \u7684 dpkg \u6216\u8005 Yum \u7684 rpm \u6587\u4ef6\u3002Chart \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u79f0\u4e3a Helm \u6a21\u677f \u3002

                                                              • Release\uff1a\u5728 Kubernetes \u96c6\u7fa4\u4e0a\u8fd0\u884c\u7684\u4e00\u4e2a Chart \u5b9e\u4f8b\u3002\u4e00\u4e2a Chart \u53ef\u4ee5\u5728\u540c\u4e00\u4e2a\u96c6\u7fa4\u5185\u591a\u6b21\u5b89\u88c5\uff0c\u6bcf\u6b21\u5b89\u88c5\u90fd\u4f1a\u521b\u5efa\u4e00\u4e2a\u65b0\u7684 Release\u3002Release \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u79f0\u4e3a Helm \u5e94\u7528 \u3002

                                                              • Repository\uff1a\u7528\u4e8e\u53d1\u5e03\u548c\u5b58\u50a8 Chart \u7684\u5b58\u50a8\u5e93\u3002Repository \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u79f0\u4e3a Helm \u4ed3\u5e93\u3002

                                                              \u66f4\u591a\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u524d\u5f80 Helm \u5b98\u7f51\u67e5\u770b\u3002

                                                              \u76f8\u5173\u64cd\u4f5c\uff1a

                                                              • \u4e0a\u4f20 Helm \u6a21\u677f\uff0c\u4ecb\u7ecd\u4e0a\u4f20 Helm \u6a21\u677f\u64cd\u4f5c\u3002
                                                              • \u7ba1\u7406 Helm \u5e94\u7528\uff0c\u5305\u62ec\u5b89\u88c5\u3001\u66f4\u65b0\u3001\u5378\u8f7d Helm \u5e94\u7528\uff0c\u67e5\u770b Helm \u64cd\u4f5c\u8bb0\u5f55\u7b49\u3002
                                                              • \u7ba1\u7406 Helm \u4ed3\u5e93\uff0c\u5305\u62ec\u5b89\u88c5\u3001\u66f4\u65b0\u3001\u5220\u9664 Helm \u4ed3\u5e93\u7b49\u3002
                                                              "},{"location":"admin/kpanda/helm/Import-addon.html","title":"\u5c06\u81ea\u5b9a\u4e49 Helm \u5e94\u7528\u5bfc\u5165\u7cfb\u7edf\u5185\u7f6e Addon","text":"

                                                              \u672c\u6587\u4ece\u79bb\u7ebf\u548c\u5728\u7ebf\u4e24\u79cd\u73af\u5883\u8bf4\u660e\u5982\u4f55\u5c06 Helm \u5e94\u7528\u5bfc\u5165\u5230\u7cfb\u7edf\u5185\u7f6e\u7684 Addon \u4e2d\u3002

                                                              "},{"location":"admin/kpanda/helm/Import-addon.html#_1","title":"\u79bb\u7ebf\u73af\u5883","text":"

                                                              \u79bb\u7ebf\u73af\u5883\u6307\u7684\u662f\u65e0\u6cd5\u8fde\u901a\u4e92\u8054\u7f51\u6216\u5c01\u95ed\u7684\u79c1\u6709\u7f51\u7edc\u73af\u5883\u3002

                                                              "},{"location":"admin/kpanda/helm/Import-addon.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u5b58\u5728\u53ef\u4ee5\u8fd0\u884c\u7684\u00a0charts-syncer\u3002 \u82e5\u6ca1\u6709\uff0c\u53ef\u70b9\u51fb\u4e0b\u8f7d\u3002
                                                              • Helm Chart \u5df2\u7ecf\u5b8c\u6210\u9002\u914d charts-syncer\u3002 \u5373\u5728 Helm Chart \u5185\u6dfb\u52a0\u4e86 .relok8s-images.yaml \u6587\u4ef6\u3002\u8be5\u6587\u4ef6\u9700\u8981\u5305\u542b Chart \u4e2d\u6240\u6709\u4f7f\u7528\u5230\u955c\u50cf\uff0c \u4e5f\u53ef\u4ee5\u5305\u542b Chart \u4e2d\u672a\u76f4\u63a5\u4f7f\u7528\u7684\u955c\u50cf\uff0c\u7c7b\u4f3c Operator \u4e2d\u4f7f\u7528\u7684\u955c\u50cf\u3002

                                                              Note

                                                              • \u5982\u4f55\u7f16\u5199 Chart \u53ef\u53c2\u8003\u00a0image-hints-file\u3002 \u8981\u6c42\u955c\u50cf\u7684\u00a0registry \u548c repository \u5fc5\u987b\u5206\u5f00\uff0c\u56e0\u4e3a load \u955c\u50cf\u65f6\u9700\u66ff\u6362\u6216\u4fee\u6539 registry/repository\u3002
                                                              • \u5b89\u88c5\u5668\u6240\u5728\u7684\u706b\u79cd\u96c6\u7fa4\u5df2\u5b89\u88c5 charts-syncer\u3002 \u82e5\u5c06\u81ea\u5b9a\u4e49 Helm \u5e94\u7528\u5bfc\u5165\u5b89\u88c5\u5668\u6240\u5728\u706b\u79cd\u96c6\u7fa4\uff0c\u53ef\u8df3\u8fc7\u4e0b\u8f7d\u76f4\u63a5\u9002\u914d\uff1b \u82e5\u672a\u5b89\u88c5\u00a0charts-syncer\u4e8c\u8fdb\u5236\u6587\u4ef6\uff0c \u53ef\u7acb\u5373\u4e0b\u8f7d\u3002
                                                              "},{"location":"admin/kpanda/helm/Import-addon.html#helm-chart","title":"\u540c\u6b65 Helm Chart","text":"
                                                              1. \u8fdb\u5165\u5bb9\u5668\u7ba1\u7406 -> Helm \u5e94\u7528 -> Helm \u4ed3\u5e93\uff0c\u641c\u7d22 addon\uff0c\u83b7\u53d6\u5185\u7f6e\u4ed3\u5e93\u5730\u5740\u548c\u7528\u6237\u540d/\u5bc6\u7801\uff08\u7cfb\u7edf\u5185\u7f6e\u4ed3\u5e93\u9ed8\u8ba4\u7528\u6237\u540d/\u5bc6\u7801\u4e3a rootuser/rootpass123\uff09\u3002
                                                              1. \u540c\u6b65 Helm Chart \u5230\u5bb9\u5668\u7ba1\u7406\u5185\u7f6e\u4ed3\u5e93 Addon

                                                                • \u7f16\u5199\u5982\u4e0b\u914d\u7f6e\u6587\u4ef6\uff0c\u53ef\u4ee5\u6839\u636e\u5177\u4f53\u914d\u7f6e\u4fee\u6539\uff0c\u5e76\u4fdd\u5b58\u4e3a sync-dao-2048.yaml\u3002

                                                                  source:  # helm charts \u6e90\u4fe1\u606f\n  repo:\n    kind: HARBOR # \u4e5f\u53ef\u4ee5\u662f\u4efb\u4f55\u5176\u4ed6\u652f\u6301\u7684 Helm Chart \u4ed3\u5e93\u7c7b\u522b\uff0c\u6bd4\u5982 CHARTMUSEUM\n    url: https://release-ci.daocloud.io/chartrepo/community #  \u9700\u66f4\u6539\u4e3a chart repo url\n    #auth: # \u7528\u6237\u540d/\u5bc6\u7801,\u82e5\u6ca1\u6709\u8bbe\u7f6e\u5bc6\u7801\u53ef\u4ee5\u4e0d\u586b\u5199\n      #username: \"admin\"\n      #password: \"Harbor12345\"\ncharts:  # \u9700\u8981\u540c\u6b65\n  - name: dao-2048 # helm charts \u4fe1\u606f\uff0c\u82e5\u4e0d\u586b\u5199\u5219\u540c\u6b65\u6e90 helm repo \u5185\u6240\u6709 charts\n    versions:\n      - 1.4.1\ntarget:  # helm charts \u76ee\u6807\u4fe1\u606f\n  containerRegistry: 10.5.14.40 # \u955c\u50cf\u4ed3\u5e93 url\n  repo:\n    kind: CHARTMUSEUM # \u4e5f\u53ef\u4ee5\u662f\u4efb\u4f55\u5176\u4ed6\u652f\u6301\u7684 Helm Chart \u4ed3\u5e93\u7c7b\u522b\uff0c\u6bd4\u5982 HARBOR\n    url: http://10.5.14.40:8081 #  \u9700\u66f4\u6539\u4e3a\u6b63\u786e chart repo url\uff0c\u53ef\u4ee5\u901a\u8fc7 helm repo add $HELM-REPO \u9a8c\u8bc1\u5730\u5740\u662f\u5426\u6b63\u786e\n    auth: # \u7528\u6237\u540d/\u5bc6\u7801\uff0c\u82e5\u6ca1\u6709\u8bbe\u7f6e\u5bc6\u7801\u53ef\u4ee5\u4e0d\u586b\u5199\n      username: \"rootuser\"\n      password: \"rootpass123\"\n  containers:\n    # kind: HARBOR # \u82e5\u955c\u50cf\u4ed3\u5e93\u4e3a HARBOR \u4e14\u5e0c\u671b charts-syncer \u81ea\u52a8\u521b\u5efa\u955c\u50cf Repository \u5219\u586b\u5199\u8be5\u5b57\u6bb5  \n    # auth: # \u7528\u6237\u540d/\u5bc6\u7801\uff0c\u82e5\u6ca1\u6709\u8bbe\u7f6e\u5bc6\u7801\u53ef\u4ee5\u4e0d\u586b\u5199 \n      # username: \"admin\"\n      # password: \"Harbor12345\"\n\n# leverage .relok8s-images.yaml file inside the Charts to move the container images too\nrelocateContainerImages: true\n
                                                                • \u6267\u884c charts-syncer \u547d\u4ee4\u540c\u6b65 Chart \u53ca\u5176\u5305\u542b\u7684\u955c\u50cf

                                                                  charts-syncer sync --config sync-dao-2048.yaml --insecure --auto-create-repository\n

                                                                  \u9884\u671f\u8f93\u51fa\u4e3a\uff1a

                                                                  I1222 15:01:47.119777    8743 sync.go:45] Using config file: \"examples/sync-dao-2048.yaml\"\nW1222 15:01:47.234238    8743 syncer.go:263] Ignoring skipDependencies option as dependency sync is not supported if container image relocation is true or syncing from/to intermediate directory\nI1222 15:01:47.234685    8743 sync.go:58] There is 1 chart out of sync!\nI1222 15:01:47.234706    8743 sync.go:66] Syncing \"dao-2048_1.4.1\" chart...\n.relok8s-images.yaml hints file found\nComputing relocation...\n\nRelocating dao-2048@1.4.1...\nPushing 10.5.14.40/daocloud/dao-2048:v1.4.1...\nDone\nDone moving /var/folders/vm/08vw0t3j68z9z_4lcqyhg8nm0000gn/T/charts-syncer869598676/dao-2048-1.4.1.tgz\n
                                                              2. \u5f85\u4e0a\u4e00\u6b65\u6267\u884c\u5b8c\u6210\u540e\uff0c\u8fdb\u5165\u5bb9\u5668\u7ba1\u7406 -> Helm \u5e94\u7528 -> Helm \u4ed3\u5e93\uff0c\u627e\u5230\u5bf9\u5e94 Addon\uff0c \u5728\u64cd\u4f5c\u680f\u70b9\u51fb\u540c\u6b65\u4ed3\u5e93\uff0c\u56de\u5230 Helm \u6a21\u677f\u5c31\u53ef\u4ee5\u770b\u5230\u4e0a\u4f20\u7684 Helm \u5e94\u7528

                                                              3. \u540e\u7eed\u53ef\u6b63\u5e38\u8fdb\u884c\u5b89\u88c5\u3001\u5347\u7ea7\u3001\u5378\u8f7d

                                                              "},{"location":"admin/kpanda/helm/Import-addon.html#_3","title":"\u5728\u7ebf\u73af\u5883","text":"

                                                              \u5728\u7ebf\u73af\u5883\u7684 Helm Repo \u5730\u5740\u4e3a release.daocloud.io\u3002 \u5982\u679c\u7528\u6237\u65e0\u6743\u9650\u6dfb\u52a0 Helm Repo\uff0c\u5219\u65e0\u6cd5\u5c06\u81ea\u5b9a\u4e49 Helm \u5e94\u7528\u5bfc\u5165\u7cfb\u7edf\u5185\u7f6e Addon\u3002 \u60a8\u53ef\u4ee5\u6dfb\u52a0\u81ea\u5df1\u642d\u5efa\u7684 Helm \u4ed3\u5e93\uff0c\u7136\u540e\u6309\u7167\u79bb\u7ebf\u73af\u5883\u4e2d\u540c\u6b65 Helm Chart \u7684\u6b65\u9aa4\u5c06\u60a8\u7684 Helm \u4ed3\u5e93\u96c6\u6210\u5230\u5e73\u53f0\u4f7f\u7528\u3002

                                                              "},{"location":"admin/kpanda/helm/helm-app.html","title":"\u7ba1\u7406 Helm \u5e94\u7528","text":"

                                                              \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u5bf9 Helm \u8fdb\u884c\u754c\u9762\u5316\u7ba1\u7406\uff0c\u5305\u62ec\u4f7f\u7528 Helm \u6a21\u677f\u521b\u5efa Helm \u5b9e\u4f8b\u3001\u81ea\u5b9a\u4e49 Helm \u5b9e\u4f8b\u53c2\u6570\u3001\u5bf9 Helm \u5b9e\u4f8b\u8fdb\u884c\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406\u7b49\u529f\u80fd\u3002

                                                              \u672c\u8282\u5c06\u4ee5 cert-manager \u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u754c\u9762\u521b\u5efa\u5e76\u7ba1\u7406 Helm \u5e94\u7528\u3002

                                                              "},{"location":"admin/kpanda/helm/helm-app.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                              • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Admin \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                              "},{"location":"admin/kpanda/helm/helm-app.html#helm_1","title":"\u5b89\u88c5 Helm \u5e94\u7528","text":"

                                                              \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u5b89\u88c5 Helm \u5e94\u7528\u3002

                                                              1. \u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u8fdb\u5165 Helm \u6a21\u677f\u9875\u9762\u3002

                                                                \u5728 Helm \u6a21\u677f\u9875\u9762\u9009\u62e9\u540d\u4e3a addon \u7684 Helm \u4ed3\u5e93\uff0c\u6b64\u65f6\u754c\u9762\u4e0a\u5c06\u5448\u73b0 addon \u4ed3\u5e93\u4e0b\u6240\u6709\u7684 Helm chart \u6a21\u677f\u3002 \u70b9\u51fb\u540d\u79f0\u4e3a cert-manager \u7684 Chart\u3002

                                                              3. \u5728\u5b89\u88c5\u9875\u9762\uff0c\u80fd\u591f\u770b\u5230 Chart \u7684\u76f8\u5173\u8be6\u7ec6\u4fe1\u606f\uff0c\u5728\u754c\u9762\u53f3\u4e0a\u89d2\u9009\u62e9\u9700\u8981\u5b89\u88c5\u7684\u7248\u672c\uff0c\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u6b64\u5904\u9009\u62e9 v1.9.1 \u7248\u672c\u8fdb\u884c\u5b89\u88c5\u3002

                                                              4. \u914d\u7f6e \u540d\u79f0 \u3001 \u547d\u540d\u7a7a\u95f4 \u53ca \u7248\u672c\u4fe1\u606f \uff0c\u4e5f\u53ef\u4ee5\u5728\u4e0b\u65b9\u7684 \u53c2\u6570\u914d\u7f6e \u533a\u57df\u901a\u8fc7\u4fee\u6539 YAML \u6765\u81ea\u5b9a\u4e49\u53c2\u6570\u3002\u70b9\u51fb \u786e\u5b9a \u3002

                                                              5. \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de Helm \u5e94\u7528\u5217\u8868\uff0c\u65b0\u521b\u5efa\u7684 Helm \u5e94\u7528\u72b6\u6001\u4e3a \u5b89\u88c5\u4e2d \uff0c\u7b49\u5f85\u4e00\u6bb5\u65f6\u95f4\u540e\u72b6\u6001\u53d8\u4e3a \u8fd0\u884c\u4e2d \u3002

                                                              "},{"location":"admin/kpanda/helm/helm-app.html#helm_2","title":"\u66f4\u65b0 Helm \u5e94\u7528","text":"

                                                              \u5f53\u6211\u4eec\u901a\u8fc7\u754c\u9762\u5b8c\u6210\u4e00\u4e2a Helm \u5e94\u7528\u7684\u5b89\u88c5\u540e\uff0c\u6211\u4eec\u53ef\u4ee5\u5bf9 Helm \u5e94\u7528\u6267\u884c\u66f4\u65b0\u64cd\u4f5c\u3002\u6ce8\u610f\uff1a\u53ea\u6709\u901a\u8fc7\u754c\u9762\u5b89\u88c5\u7684 Helm \u5e94\u7528\u624d\u652f\u6301\u4f7f\u7528\u754c\u9762\u8fdb\u884c\u66f4\u65b0\u64cd\u4f5c\u3002

                                                              \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u66f4\u65b0 Helm \u5e94\u7528\u3002

                                                              1. \u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb Helm \u5e94\u7528 \uff0c\u8fdb\u5165 Helm \u5e94\u7528\u5217\u8868\u9875\u9762\u3002

                                                                \u5728 Helm \u5e94\u7528\u5217\u8868\u9875\u9009\u62e9\u9700\u8981\u66f4\u65b0\u7684 Helm \u5e94\u7528\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff0c\u5728\u4e0b\u62c9\u9009\u62e9\u4e2d\u9009\u62e9 \u66f4\u65b0 \u64cd\u4f5c\u3002

                                                              3. \u70b9\u51fb \u66f4\u65b0 \u6309\u94ae\u540e\uff0c\u7cfb\u7edf\u5c06\u8df3\u8f6c\u81f3\u66f4\u65b0\u754c\u9762\uff0c\u60a8\u53ef\u4ee5\u6839\u636e\u9700\u8981\u5bf9 Helm \u5e94\u7528\u8fdb\u884c\u66f4\u65b0\uff0c\u6b64\u5904\u6211\u4eec\u4ee5\u66f4\u65b0 dao-2048 \u8fd9\u4e2a\u5e94\u7528\u7684 http \u7aef\u53e3\u4e3a\u4f8b\u3002

                                                              4. \u4fee\u6539\u5b8c\u76f8\u5e94\u53c2\u6570\u540e\u3002\u60a8\u53ef\u4ee5\u5728\u53c2\u6570\u914d\u7f6e\u4e0b\u70b9\u51fb \u53d8\u5316 \u6309\u94ae\uff0c\u5bf9\u6bd4\u4fee\u6539\u524d\u540e\u7684\u6587\u4ef6\uff0c\u786e\u5b9a\u65e0\u8bef\u540e\uff0c\u70b9\u51fb\u5e95\u90e8 \u786e\u5b9a \u6309\u94ae\uff0c\u5b8c\u6210 Helm \u5e94\u7528\u7684\u66f4\u65b0\u3002

                                                              5. \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de Helm \u5e94\u7528\u5217\u8868\uff0c\u53f3\u4e0a\u89d2\u5f39\u7a97\u63d0\u793a \u66f4\u65b0\u6210\u529f \u3002

                                                              "},{"location":"admin/kpanda/helm/helm-app.html#helm_3","title":"\u67e5\u770b Helm \u64cd\u4f5c\u8bb0\u5f55","text":"

                                                              Helm \u5e94\u7528\u7684\u6bcf\u6b21\u5b89\u88c5\u3001\u66f4\u65b0\u3001\u5220\u9664\u90fd\u6709\u8be6\u7ec6\u7684\u64cd\u4f5c\u8bb0\u5f55\u548c\u65e5\u5fd7\u53ef\u4f9b\u67e5\u770b\u3002

                                                              1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb \u96c6\u7fa4\u8fd0\u7ef4 -> \u6700\u8fd1\u64cd\u4f5c \uff0c\u7136\u540e\u5728\u9875\u9762\u4e0a\u65b9\u9009\u62e9 Helm \u64cd\u4f5c \u6807\u7b7e\u9875\u3002\u6bcf\u4e00\u6761\u8bb0\u5f55\u5bf9\u5e94\u4e00\u6b21\u5b89\u88c5/\u66f4\u65b0/\u5220\u9664\u64cd\u4f5c\u3002

                                                              2. \u5982\u9700\u67e5\u770b\u6bcf\u4e00\u6b21\u64cd\u4f5c\u7684\u8be6\u7ec6\u65e5\u5fd7\uff1a\u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u65e5\u5fd7 \u3002

                                                              3. \u6b64\u65f6\u9875\u9762\u4e0b\u65b9\u5c06\u4ee5\u63a7\u5236\u53f0\u7684\u5f62\u5f0f\u5c55\u793a\u8be6\u7ec6\u7684\u8fd0\u884c\u65e5\u5fd7\u3002

                                                              "},{"location":"admin/kpanda/helm/helm-app.html#helm_4","title":"\u5220\u9664 Helm \u5e94\u7528","text":"

                                                              \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u5220\u9664 Helm \u5e94\u7528\u3002

                                                              1. \u627e\u5230\u5f85\u5220\u9664\u7684 Helm \u5e94\u7528\u6240\u5728\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb Helm \u5e94\u7528 \uff0c\u8fdb\u5165 Helm \u5e94\u7528\u5217\u8868\u9875\u9762\u3002

                                                                \u5728 Helm \u5e94\u7528\u5217\u8868\u9875\u9009\u62e9\u60a8\u9700\u8981\u5220\u9664\u7684 Helm \u5e94\u7528\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff0c\u5728\u4e0b\u62c9\u9009\u62e9\u4e2d\u9009\u62e9 \u5220\u9664 \u3002

                                                              3. \u5728\u5f39\u7a97\u5185\u8f93\u5165 Helm \u5e94\u7528\u7684\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\uff0c\u7136\u540e\u70b9\u51fb \u5220\u9664 \u6309\u94ae\u3002

                                                              "},{"location":"admin/kpanda/helm/helm-repo.html","title":"\u7ba1\u7406 Helm \u4ed3\u5e93","text":"

                                                              Helm \u4ed3\u5e93\u662f\u7528\u6765\u5b58\u50a8\u548c\u53d1\u5e03 Chart \u7684\u5b58\u50a8\u5e93\u3002Helm \u5e94\u7528\u6a21\u5757\u652f\u6301\u901a\u8fc7 HTTP(s) \u534f\u8bae\u6765\u8bbf\u95ee\u5b58\u50a8\u5e93\u4e2d\u7684 Chart \u5305\u3002\u7cfb\u7edf\u9ed8\u8ba4\u5185\u7f6e\u4e86\u4e0b\u8868\u6240\u793a\u7684 4 \u4e2a Helm \u4ed3\u5e93\u4ee5\u6ee1\u8db3\u4f01\u4e1a\u751f\u4ea7\u8fc7\u7a0b\u4e2d\u7684\u5e38\u89c1\u9700\u6c42\u3002

                                                              \u4ed3\u5e93 \u63cf\u8ff0 \u793a\u4f8b partner \u7531\u751f\u6001\u5408\u4f5c\u4f19\u4f34\u6240\u63d0\u4f9b\u7684\u5404\u7c7b\u4f18\u8d28\u7279\u8272 Chart tidb system \u7cfb\u7edf\u6838\u5fc3\u529f\u80fd\u7ec4\u4ef6\u53ca\u90e8\u5206\u9ad8\u7ea7\u529f\u80fd\u6240\u5fc5\u9700\u4f9d\u8d56\u7684 Chart\uff0c\u5982\u5fc5\u9700\u5b89\u88c5 insight-agent \u624d\u80fd\u591f\u83b7\u53d6\u96c6\u7fa4\u7684\u76d1\u63a7\u4fe1\u606f Insight addon \u4e1a\u52a1\u573a\u666f\u4e2d\u5e38\u89c1\u7684 Chart cert-manager community Kubernetes \u793e\u533a\u8f83\u4e3a\u70ed\u95e8\u7684\u5f00\u6e90\u7ec4\u4ef6 Chart Istio

                                                              \u9664\u4e0a\u8ff0\u9884\u7f6e\u4ed3\u5e93\u5916\uff0c\u60a8\u4e5f\u53ef\u4ee5\u81ea\u884c\u6dfb\u52a0\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93\u3002\u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u6dfb\u52a0\u3001\u66f4\u65b0\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93\u3002

                                                              "},{"location":"admin/kpanda/helm/helm-repo.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762

                                                              • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Admin \u6216\u66f4\u9ad8\u6743\u9650 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                              • \u5982\u679c\u4f7f\u7528\u79c1\u6709\u4ed3\u5e93\uff0c\u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u62e5\u6709\u5bf9\u8be5\u79c1\u6709\u4ed3\u5e93\u7684\u8bfb\u5199\u6743\u9650\u3002

                                                              "},{"location":"admin/kpanda/helm/helm-repo.html#helm_1","title":"\u5f15\u5165\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93","text":"

                                                              \u4e0b\u9762\u4ee5 Kubevela \u516c\u5f00\u7684\u955c\u50cf\u4ed3\u5e93\u4e3a\u4f8b\uff0c\u5f15\u5165 Helm \u4ed3\u5e93\u5e76\u7ba1\u7406\u3002

                                                              1. \u627e\u5230\u9700\u8981\u5f15\u5165\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u4ed3\u5e93 \uff0c\u8fdb\u5165 Helm \u4ed3\u5e93\u9875\u9762\u3002

                                                              3. \u5728 Helm \u4ed3\u5e93\u9875\u9762\u70b9\u51fb \u521b\u5efa\u4ed3\u5e93 \u6309\u94ae\uff0c\u8fdb\u5165\u521b\u5efa\u4ed3\u5e93\u9875\u9762\uff0c\u6309\u7167\u4e0b\u8868\u914d\u7f6e\u76f8\u5173\u53c2\u6570\u3002

                                                                • \u4ed3\u5e93\u540d\u79f0\uff1a\u8bbe\u7f6e\u4ed3\u5e93\u540d\u79f0\u3002\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26 - \uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u5e76\u7ed3\u5c3e\uff0c\u4f8b\u5982 kubevela
                                                                • \u4ed3\u5e93\u5730\u5740\uff1a\u7528\u6765\u6307\u5411\u76ee\u6807 Helm \u4ed3\u5e93\u7684 http\uff08s\uff09\u5730\u5740\u3002\u4f8b\u5982 https://charts.kubevela.net/core
                                                                • \u8df3\u8fc7 TLS \u9a8c\u8bc1: \u5982\u679c\u6dfb\u52a0\u7684 Helm \u4ed3\u5e93\u4e3a https \u5730\u5740\u4e14\u9700\u8df3\u8fc7 TLS \u9a8c\u8bc1\uff0c\u53ef\u4ee5\u52fe\u9009\u6b64\u9009\u9879\uff0c\u9ed8\u8ba4\u4e3a\u4e0d\u52fe\u9009
                                                                • \u8ba4\u8bc1\u65b9\u5f0f\uff1a\u8fde\u63a5\u4ed3\u5e93\u5730\u5740\u540e\u7528\u6765\u8fdb\u884c\u8eab\u4efd\u6821\u9a8c\u7684\u65b9\u5f0f\u3002\u5bf9\u4e8e\u516c\u5f00\u4ed3\u5e93\uff0c\u53ef\u4ee5\u9009\u62e9 None \uff0c\u79c1\u6709\u7684\u4ed3\u5e93\u9700\u8981\u8f93\u5165\u7528\u6237\u540d/\u5bc6\u7801\u4ee5\u8fdb\u884c\u8eab\u4efd\u6821\u9a8c
                                                                • \u6807\u7b7e\uff1a\u4e3a\u8be5 Helm \u4ed3\u5e93\u6dfb\u52a0\u6807\u7b7e\u3002\u4f8b\u5982 key: repo4\uff1bvalue: Kubevela
                                                                • \u6ce8\u89e3\uff1a\u4e3a\u8be5 Helm \u4ed3\u5e93\u6dfb\u52a0\u6ce8\u89e3\u3002\u4f8b\u5982 key: repo4\uff1bvalue: Kubevela
                                                                • \u63cf\u8ff0\uff1a\u4e3a\u8be5 Helm \u4ed3\u5e93\u6dfb\u52a0\u63cf\u8ff0\u3002\u4f8b\u5982\uff1a\u8fd9\u662f\u4e00\u4e2a Kubevela \u516c\u5f00 Helm \u4ed3\u5e93

                                                              4. \u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210 Helm \u4ed3\u5e93\u7684\u521b\u5efa\u3002\u9875\u9762\u4f1a\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u4ed3\u5e93\u5217\u8868\u3002

                                                              "},{"location":"admin/kpanda/helm/helm-repo.html#helm_2","title":"\u66f4\u65b0 Helm \u4ed3\u5e93","text":"

                                                              \u5f53 Helm \u4ed3\u5e93\u7684\u5730\u5740\u4fe1\u606f\u53d1\u751f\u53d8\u5316\u65f6\uff0c\u53ef\u4ee5\u66f4\u65b0 Helm \u4ed3\u5e93\u7684\u5730\u5740\u3001\u8ba4\u8bc1\u65b9\u5f0f\u3001\u6807\u7b7e\u3001\u6ce8\u89e3\u53ca\u63cf\u8ff0\u4fe1\u606f\u3002

                                                              1. \u627e\u5230\u5f85\u66f4\u65b0\u4ed3\u5e93\u6240\u5728\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u4ed3\u5e93 \uff0c\u8fdb\u5165 Helm \u4ed3\u5e93\u5217\u8868\u9875\u9762\u3002

                                                              3. \u5728\u4ed3\u5e93\u5217\u8868\u9875\u9762\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684 Helm \u4ed3\u5e93\uff0c\u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \u6309\u94ae\uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u70b9\u51fb \u66f4\u65b0 \u3002

                                                              4. \u5728 \u7f16\u8f91 Helm \u4ed3\u5e93 \u9875\u9762\u8fdb\u884c\u66f4\u65b0\uff0c\u5b8c\u6210\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                              5. \u8fd4\u56de Helm \u4ed3\u5e93\u5217\u8868\uff0c\u5c4f\u5e55\u63d0\u793a\u66f4\u65b0\u6210\u529f\u3002

                                                              "},{"location":"admin/kpanda/helm/helm-repo.html#helm_3","title":"\u5220\u9664 Helm \u4ed3\u5e93","text":"

                                                              \u9664\u4e86\u5f15\u5165\u3001\u66f4\u65b0\u4ed3\u5e93\u5916\uff0c\u60a8\u4e5f\u53ef\u4ee5\u5c06\u4e0d\u9700\u8981\u7684\u4ed3\u5e93\u5220\u9664\uff0c\u5305\u62ec\u7cfb\u7edf\u9884\u7f6e\u4ed3\u5e93\u548c\u7b2c\u4e09\u65b9\u4ed3\u5e93\u3002

                                                              1. \u627e\u5230\u5f85\u5220\u9664\u4ed3\u5e93\u6240\u5728\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u4ed3\u5e93 \uff0c\u8fdb\u5165 Helm \u4ed3\u5e93\u5217\u8868\u9875\u9762\u3002

                                                              3. \u5728\u4ed3\u5e93\u5217\u8868\u9875\u9762\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684 Helm \u4ed3\u5e93\uff0c\u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \u6309\u94ae\uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u70b9\u51fb \u5220\u9664 \u3002

                                                              4. \u8f93\u5165\u4ed3\u5e93\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\uff0c\u70b9\u51fb \u5220\u9664 \u3002

                                                              5. \u8fd4\u56de Helm \u4ed3\u5e93\u5217\u8868\uff0c\u5c4f\u5e55\u63d0\u793a\u5220\u9664\u6210\u529f\u3002

                                                              "},{"location":"admin/kpanda/helm/multi-archi-helm.html","title":"Helm \u5e94\u7528\u591a\u67b6\u6784\u548c\u5347\u7ea7\u5bfc\u5165\u6b65\u9aa4","text":"

                                                              \u901a\u5e38\u5728\u591a\u67b6\u6784\u96c6\u7fa4\u4e2d\uff0c\u4e5f\u4f1a\u4f7f\u7528\u591a\u67b6\u6784\u7684 Helm \u5305\u6765\u90e8\u7f72\u5e94\u7528\uff0c\u4ee5\u89e3\u51b3\u67b6\u6784\u5dee\u5f02\u5e26\u6765\u7684\u90e8\u7f72\u95ee\u9898\u3002 \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u5c06\u5355\u67b6\u6784 Helm \u5e94\u7528\u878d\u5408\u4e3a\u591a\u67b6\u6784\uff0c\u4ee5\u53ca\u591a\u67b6\u6784\u4e0e\u591a\u67b6\u6784 Helm \u5e94\u7528\u7684\u76f8\u4e92\u878d\u5408\u3002

                                                              "},{"location":"admin/kpanda/helm/multi-archi-helm.html#_1","title":"\u5bfc\u5165","text":""},{"location":"admin/kpanda/helm/multi-archi-helm.html#_2","title":"\u5355\u67b6\u6784\u5bfc\u5165","text":"

                                                              \u51c6\u5907\u597d\u5f85\u5bfc\u5165\u7684\u79bb\u7ebf\u5305 addon-offline-full-package-${version}-${arch}.tar.gz \u3002 \u628a\u8def\u5f84\u586b\u5199\u81f3 clusterConfig.yml \u914d\u7f6e\u6587\u4ef6\uff0c\u4f8b\u5982\uff1a

                                                              addonPackage:\n  path: \"/home/addon-offline-full-package-v0.9.0-amd64.tar.gz\"\n

                                                              \u7136\u540e\u6267\u884c\u5bfc\u5165\u547d\u4ee4\uff1a

                                                              ~/dce5-installer cluster-create -c /home/dce5/sample/clusterConfig.yaml -m /home/dce5/sample/manifest.yaml -d -j13\n
                                                              "},{"location":"admin/kpanda/helm/multi-archi-helm.html#_3","title":"\u591a\u67b6\u6784\u878d\u5408","text":"

                                                              \u51c6\u5907\u597d\u5f85\u878d\u5408\u7684\u79bb\u7ebf\u5305 addon-offline-full-package-${version}-${arch}.tar.gz\u3002

                                                              \u4ee5 addon-offline-full-package-v0.9.0-arm64.tar.gz \u4e3a\u4f8b\uff0c\u6267\u884c\u5bfc\u5165\u547d\u4ee4\uff1a

                                                              ~/dce5-installer import-addon -c /home/dce5/sample/clusterConfig.yaml --addon-path=/home/addon-offline-full-package-v0.9.0-arm64.tar.gz\n
                                                              "},{"location":"admin/kpanda/helm/multi-archi-helm.html#_4","title":"\u5347\u7ea7","text":""},{"location":"admin/kpanda/helm/multi-archi-helm.html#_5","title":"\u5355\u67b6\u6784\u5347\u7ea7","text":"

                                                              \u51c6\u5907\u597d\u5f85\u5bfc\u5165\u7684\u79bb\u7ebf\u5305 addon-offline-full-package-${version}-${arch}.tar.gz\u3002

                                                              \u628a\u8def\u5f84\u586b\u5199\u81f3 clusterConfig.yml \u914d\u7f6e\u6587\u4ef6\uff0c\u4f8b\u5982\uff1a

                                                              addonPackage:\n  path: \"/home/addon-offline-full-package-v0.11.0-amd64.tar.gz\"\n

                                                              \u7136\u540e\u6267\u884c\u5bfc\u5165\u547d\u4ee4\uff1a

                                                              ~/dce5-installer cluster-create -c /home/dce5/sample/clusterConfig.yaml -m /home/dce5/sample/manifest.yaml -d -j13\n
                                                              "},{"location":"admin/kpanda/helm/multi-archi-helm.html#_6","title":"\u591a\u67b6\u6784\u878d\u5408","text":"

                                                              \u51c6\u5907\u597d\u5f85\u878d\u5408\u7684\u79bb\u7ebf\u5305 addon-offline-full-package-${version}-${arch}.tar.gz\u3002

                                                              \u4ee5 addon-offline-full-package-v0.11.0-arm64.tar.gz \u4e3a\u4f8b\uff0c\u6267\u884c\u5bfc\u5165\u547d\u4ee4\uff1a

                                                              ~/dce5-installer import-addon -c /home/dce5/sample/clusterConfig.yaml --addon-path=/home/addon-offline-full-package-v0.11.0-arm64.tar.gz\n
                                                              "},{"location":"admin/kpanda/helm/multi-archi-helm.html#_7","title":"\u6ce8\u610f\u4e8b\u9879","text":""},{"location":"admin/kpanda/helm/multi-archi-helm.html#_8","title":"\u78c1\u76d8\u7a7a\u95f4","text":"

                                                              \u79bb\u7ebf\u5305\u6bd4\u8f83\u5927\uff0c\u4e14\u8fc7\u7a0b\u4e2d\u9700\u8981\u89e3\u538b\u548c load \u955c\u50cf\uff0c\u9700\u8981\u9884\u7559\u5145\u8db3\u7684\u7a7a\u95f4\uff0c\u5426\u5219\u53ef\u80fd\u5728\u8fc7\u7a0b\u4e2d\u62a5 \u201cno space left\u201d \u800c\u4e2d\u65ad\u3002

                                                              "},{"location":"admin/kpanda/helm/multi-archi-helm.html#_9","title":"\u5931\u8d25\u540e\u91cd\u8bd5","text":"

                                                              \u5982\u679c\u5728\u591a\u67b6\u6784\u878d\u5408\u6b65\u9aa4\u6267\u884c\u5931\u8d25\uff0c\u91cd\u8bd5\u524d\u9700\u8981\u6e05\u7406\u4e00\u4e0b\u6b8b\u7559\uff1a

                                                              rm -rf addon-offline-target-package\n
                                                              "},{"location":"admin/kpanda/helm/multi-archi-helm.html#_10","title":"\u955c\u50cf\u7a7a\u95f4","text":"

                                                              \u5982\u679c\u878d\u5408\u7684\u79bb\u7ebf\u5305\u4e2d\u5305\u542b\u4e86\u4e0e\u5bfc\u5165\u7684\u79bb\u7ebf\u5305\u4e0d\u4e00\u81f4\u7684\u955c\u50cf\u7a7a\u95f4\uff0c\u53ef\u80fd\u4f1a\u5728\u878d\u5408\u8fc7\u7a0b\u4e2d\u56e0\u4e3a\u955c\u50cf\u7a7a\u95f4\u4e0d\u5b58\u5728\u800c\u62a5\u9519\uff1a

                                                              \u89e3\u51b3\u529e\u6cd5\uff1a\u53ea\u9700\u8981\u5728\u878d\u5408\u4e4b\u524d\u521b\u5efa\u597d\u8be5\u955c\u50cf\u7a7a\u95f4\u5373\u53ef\uff0c\u4f8b\u5982\u4e0a\u56fe\u62a5\u9519\u53ef\u901a\u8fc7\u521b\u5efa\u955c\u50cf\u7a7a\u95f4 localhost \u63d0\u524d\u907f\u514d\u3002

                                                              "},{"location":"admin/kpanda/helm/multi-archi-helm.html#_11","title":"\u67b6\u6784\u51b2\u7a81","text":"

                                                              \u5347\u7ea7\u81f3\u4f4e\u4e8e 0.12.0 \u7248\u672c\u7684 addon \u65f6\uff0c\u7531\u4e8e\u76ee\u6807\u79bb\u7ebf\u5305\u91cc\u7684 charts-syncer \u6ca1\u6709\u68c0\u67e5\u955c\u50cf\u5b58\u5728\u5219\u4e0d\u63a8\u9001\u529f\u80fd\uff0c\u56e0\u6b64\u4f1a\u5728\u5347\u7ea7\u7684\u8fc7\u7a0b\u4e2d\u4f1a\u91cd\u65b0\u628a\u591a\u67b6\u6784\u51b2\u6210\u5355\u67b6\u6784\u3002 \u4f8b\u5982\uff1a\u5728 v0.10 \u7248\u672c\u5c06 addon \u5b9e\u73b0\u4e3a\u591a\u67b6\u6784\uff0c\u6b64\u65f6\u82e5\u5347\u7ea7\u4e3a v0.11 \u7248\u672c\uff0c\u5219\u591a\u67b6\u6784 addon \u4f1a\u88ab\u8986\u76d6\u4e3a\u5355\u67b6\u6784\uff1b\u82e5\u5347\u7ea7\u4e3a 0.12.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u5219\u4ecd\u80fd\u591f\u4fdd\u6301\u591a\u67b6\u6784\u3002

                                                              "},{"location":"admin/kpanda/helm/upload-helm.html","title":"\u4e0a\u4f20 Helm \u6a21\u677f","text":"

                                                              \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u4e0a\u4f20 Helm \u6a21\u677f\uff0c\u64cd\u4f5c\u6b65\u9aa4\u89c1\u4e0b\u6587\u3002

                                                              1. \u5f15\u5165 Helm \u4ed3\u5e93\uff0c\u64cd\u4f5c\u6b65\u9aa4\u53c2\u8003\u5f15\u5165\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93\u3002

                                                              2. \u4e0a\u4f20 Helm Chart \u5230 Helm \u4ed3\u5e93\u3002

                                                                \u5ba2\u6237\u7aef\u4e0a\u4f20\u9875\u9762\u4e0a\u4f20

                                                                Note

                                                                \u6b64\u65b9\u5f0f\u9002\u7528\u4e8e Harbor\u3001ChartMuseum\u3001JFrog \u7c7b\u578b\u4ed3\u5e93\u3002

                                                                1. \u767b\u5f55\u4e00\u4e2a\u53ef\u4ee5\u8bbf\u95ee\u5230 Helm \u4ed3\u5e93\u7684\u8282\u70b9\uff0c\u5c06 Helm \u4e8c\u8fdb\u5236\u6587\u4ef6\u4e0a\u4f20\u5230\u8282\u70b9\uff0c\u5e76\u5b89\u88c5 cm-push \u63d2\u4ef6\uff08\u9700\u8981\u8fde\u901a\u5916\u7f51\u5e76\u63d0\u524d\u5b89\u88c5 Git\uff09\u3002

                                                                  \u5b89\u88c5\u63d2\u4ef6\u6d41\u7a0b\u53c2\u8003\u5b89\u88c5 cm-push \u63d2\u4ef6\u3002

                                                                2. \u63a8\u9001 Helm Chart \u5230 Helm \u4ed3\u5e93\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1b

                                                                  helm cm-push ${charts-dir} ${HELM_REPO_URL} --username ${username} --password ${password}\n

                                                                  \u5b57\u6bb5\u8bf4\u660e\uff1a

                                                                  • charts-dir\uff1aHelm Chart \u7684\u76ee\u5f55\uff0c\u6216\u8005\u662f\u6253\u5305\u597d\u7684 Chart\uff08\u5373 .tgz \u6587\u4ef6\uff09\u3002
                                                                  • HELM_REPO_URL\uff1aHelm \u4ed3\u5e93\u7684 URL\u3002
                                                                  • username/password\uff1a\u6709\u63a8\u9001\u6743\u9650\u7684 Helm \u4ed3\u5e93\u7528\u6237\u540d\u548c\u5bc6\u7801\u3002
                                                                  • \u5982\u679c\u91c7\u7528 https \u8bbf\u95ee\u4e14\u9700\u8981\u8df3\u8fc7\u8bc1\u4e66\u9a8c\u8bc1\uff0c\u53ef\u6dfb\u52a0\u53c2\u6570 --insecure

                                                                Note

                                                                \u6b64\u65b9\u5f0f\u4ec5\u9002\u7528\u4e8e Harbor \u7c7b\u578b\u4ed3\u5e93\u3002

                                                                1. \u767b\u5f55\u7f51\u9875 Harbor \u4ed3\u5e93\uff0c\u8bf7\u786e\u4fdd\u767b\u5f55\u7528\u6237\u6709\u63a8\u9001\u6743\u9650\uff1b

                                                                2. \u8fdb\u5165\u5230\u5bf9\u5e94\u9879\u76ee\uff0c\u9009\u62e9 Helm Charts \u9875\u7b7e\uff0c\u70b9\u51fb\u9875\u9762 \u4e0a\u4f20 \u6309\u94ae\uff0c\u5b8c\u6210 Helm Chart \u4e0a\u4f20\u3002

                                                              3. \u540c\u6b65\u8fdc\u7aef\u4ed3\u5e93\u6570\u636e

                                                                \u624b\u52a8\u540c\u6b65\u81ea\u52a8\u540c\u6b65

                                                                \u9ed8\u8ba4\u96c6\u7fa4\u672a\u5f00\u542f Helm \u4ed3\u5e93\u81ea\u52a8\u5237\u65b0 \uff0c\u9700\u8981\u6267\u884c\u624b\u52a8\u540c\u6b65\u64cd\u4f5c\uff0c\u5927\u81f4\u6b65\u9aa4\u4e3a\uff1a

                                                                \u8fdb\u5165 Helm \u5e94\u7528 -> Helm \u4ed3\u5e93 \uff0c\u70b9\u51fb\u4ed3\u5e93\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \u6309\u94ae\uff0c\u9009\u62e9 \u540c\u6b65\u4ed3\u5e93 \uff0c\u5b8c\u6210\u4ed3\u5e93\u6570\u636e\u540c\u6b65\u3002

                                                                \u5982\u9700\u5f00\u542f Helm \u4ed3\u5e93\u81ea\u52a8\u540c\u6b65\u529f\u80fd\uff0c\u53ef\u8fdb\u5165 \u96c6\u7fa4\u8fd0\u7ef4 -> \u96c6\u7fa4\u8bbe\u7f6e -> \u9ad8\u7ea7\u914d\u7f6e \uff0c\u5f00\u542f Helm \u4ed3\u5e93\u81ea\u52a8\u5237\u65b0\u5f00\u5173\u3002

                                                              "},{"location":"admin/kpanda/inspect/index.html","title":"\u96c6\u7fa4\u5de1\u68c0","text":"

                                                              \u96c6\u7fa4\u5de1\u68c0\u53ef\u4ee5\u901a\u8fc7\u81ea\u52a8\u6216\u624b\u52a8\u65b9\u5f0f\uff0c\u5b9a\u671f\u6216\u968f\u65f6\u68c0\u67e5\u96c6\u7fa4\u7684\u6574\u4f53\u5065\u5eb7\u72b6\u6001\uff0c\u8ba9\u7ba1\u7406\u5458\u83b7\u5f97\u4fdd\u969c\u96c6\u7fa4\u5b89\u5168\u7684\u4e3b\u52a8\u6743\u3002 \u57fa\u4e8e\u5408\u7406\u7684\u5de1\u68c0\u8ba1\u5212\uff0c\u8fd9\u79cd\u4e3b\u52a8\u81ea\u53d1\u7684\u96c6\u7fa4\u68c0\u67e5\u53ef\u4ee5\u8ba9\u7ba1\u7406\u5458\u968f\u65f6\u638c\u63e1\u96c6\u7fa4\u72b6\u6001\uff0c\u6446\u8131\u4e4b\u524d\u51fa\u73b0\u6545\u969c\u65f6\u53ea\u80fd\u88ab\u52a8\u6392\u67e5\u95ee\u9898\u7684\u56f0\u5883\uff0c\u505a\u5230\u4e8b\u5148\u76d1\u63a7\u3001\u63d0\u524d\u9632\u8303\u3002

                                                              \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u63d0\u4f9b\u7684\u96c6\u7fa4\u5de1\u68c0\u529f\u80fd\uff0c\u652f\u6301\u4ece\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5bb9\u5668\u7ec4\uff08Pod\uff09\u4e09\u4e2a\u7ef4\u5ea6\u8fdb\u884c\u81ea\u5b9a\u4e49\u5de1\u68c0\u9879\uff0c\u5de1\u68c0\u7ed3\u675f\u540e\u4f1a\u81ea\u52a8\u751f\u6210\u53ef\u89c6\u5316\u7684\u5de1\u68c0\u62a5\u544a\u3002

                                                              • \u96c6\u7fa4\u7ef4\u5ea6\uff1a\u68c0\u67e5\u96c6\u7fa4\u4e2d\u7cfb\u7edf\u7ec4\u4ef6\u7684\u8fd0\u884c\u60c5\u51b5\uff0c\u5305\u62ec\u96c6\u7fa4\u72b6\u6001\u3001\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u4ee5\u53ca\u63a7\u5236\u8282\u70b9\u7279\u6709\u7684\u5de1\u68c0\u9879\u7b49\uff0c\u4f8b\u5982 kube-apiserver \u548c etcd \u7684\u72b6\u6001\u3002
                                                              • \u8282\u70b9\u7ef4\u5ea6\uff1a\u5305\u62ec\u63a7\u5236\u8282\u70b9\u548c\u5de5\u4f5c\u8282\u70b9\u901a\u7528\u7684\u68c0\u67e5\u9879\uff0c\u4f8b\u5982\u8282\u70b9\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u3001\u53e5\u67c4\u6570\u3001PID \u72b6\u6001\u3001\u7f51\u7edc\u72b6\u6001\u3002
                                                              • \u5bb9\u5668\u7ec4\u7ef4\u5ea6\uff1a\u68c0\u67e5 Pod \u7684 CPU \u548c\u5185\u5b58\u4f7f\u7528\u60c5\u51b5\u3001\u8fd0\u884c\u72b6\u6001\u3001PV \u548c PVC \u7684\u72b6\u6001\u7b49\u3002

                                                              \u5982\u9700\u4e86\u89e3\u6216\u6267\u884c\u5b89\u5168\u65b9\u9762\u7684\u5de1\u68c0\uff0c\u53ef\u53c2\u8003\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u7684\u5b89\u5168\u626b\u63cf\u7c7b\u578b\u3002

                                                              "},{"location":"admin/kpanda/inspect/config.html","title":"\u521b\u5efa\u5de1\u68c0\u914d\u7f6e","text":"

                                                              \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u63d0\u4f9b\u96c6\u7fa4\u5de1\u68c0\u529f\u80fd\uff0c\u652f\u6301\u4ece\u96c6\u7fa4\u7ef4\u5ea6\u3001\u8282\u70b9\u7ef4\u5ea6\u3001\u5bb9\u5668\u7ec4\u7ef4\u5ea6\u8fdb\u884c\u5de1\u68c0\u3002

                                                              • \u96c6\u7fa4\u7ef4\u5ea6\uff1a\u68c0\u67e5\u96c6\u7fa4\u4e2d\u7cfb\u7edf\u7ec4\u4ef6\u7684\u8fd0\u884c\u60c5\u51b5\uff0c\u5305\u62ec\u96c6\u7fa4\u72b6\u6001\u3001\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\uff0c\u4ee5\u53ca\u63a7\u5236\u8282\u70b9\u7279\u6709\u7684\u5de1\u68c0\u9879\u7b49\uff0c\u4f8b\u5982 kube-apiserver \u548c etcd \u7684\u72b6\u6001\u3002
                                                              • \u8282\u70b9\u7ef4\u5ea6\uff1a\u5305\u62ec\u63a7\u5236\u8282\u70b9\u548c\u5de5\u4f5c\u8282\u70b9\u901a\u7528\u7684\u68c0\u67e5\u9879\uff0c\u4f8b\u5982\u8282\u70b9\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u3001\u53e5\u67c4\u6570\u3001PID \u72b6\u6001\u3001\u7f51\u7edc\u72b6\u6001\u3002
                                                              • \u5bb9\u5668\u7ec4\u7ef4\u5ea6\uff1a\u68c0\u67e5 Pod \u7684 CPU \u548c\u5185\u5b58\u4f7f\u7528\u60c5\u51b5\u3001\u8fd0\u884c\u72b6\u6001\u3001PV \u548c PVC \u7684\u72b6\u6001\u7b49\u3002

                                                              \u4e0b\u9762\u4ecb\u7ecd\u5982\u4f55\u521b\u5efa\u5de1\u68c0\u914d\u7f6e\u3002

                                                              "},{"location":"admin/kpanda/inspect/config.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4
                                                              • \u6240\u9009\u96c6\u7fa4\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u4e14\u5df2\u7ecf\u5728\u96c6\u7fa4\u4e2d\u5b89\u88c5\u4e86 insight \u7ec4\u4ef6
                                                              "},{"location":"admin/kpanda/inspect/config.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u96c6\u7fa4\u5de1\u68c0 \u3002

                                                              2. \u5728\u9875\u9762\u53f3\u4fa7\u70b9\u51fb \u5de1\u68c0\u914d\u7f6e \u3002

                                                              3. \u53c2\u8003\u4ee5\u4e0b\u8bf4\u660e\u586b\u5199\u5de1\u68c0\u914d\u7f6e\uff0c\u7136\u540e\u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                                                                • \u96c6\u7fa4\uff1a\u4e0b\u62c9\u9009\u62e9\u8981\u5bf9\u54ea\u4e9b\u96c6\u7fa4\u8fdb\u884c\u5de1\u68c0\u3002\u5982\u679c\u9009\u62e9\u591a\u4e2a\u96c6\u7fa4\uff0c\u5219\u81ea\u52a8\u751f\u6210\u591a\u4e2a\u5de1\u68c0\u914d\u7f6e\uff08\u4ec5\u5de1\u68c0\u7684\u96c6\u7fa4\u4e0d\u4e00\u81f4\uff0c\u5176\u4ed6\u914d\u7f6e\u90fd\u5b8c\u5168\u4e00\u81f4\uff09
                                                                • \u5b9a\u65f6\u5de1\u68c0\uff1a\u542f\u7528\u540e\u53ef\u6839\u636e\u4e8b\u5148\u8bbe\u7f6e\u7684\u5de1\u68c0\u9891\u7387\u5b9a\u671f\u81ea\u52a8\u6267\u884c\u96c6\u7fa4\u5de1\u68c0
                                                                • \u5de1\u68c0\u9891\u7387\uff1a\u8bbe\u7f6e\u81ea\u52a8\u5de1\u68c0\u7684\u5468\u671f\uff0c\u4f8b\u5982\u6bcf\u5468\u4e8c\u4e0a\u5348\u5341\u70b9\u3002\u652f\u6301\u81ea\u5b9a\u4e49 CronExpression\uff0c\u53ef\u53c2\u8003 Cron \u65f6\u95f4\u8868\u8bed\u6cd5
                                                                • \u5de1\u68c0\u8bb0\u5f55\u4fdd\u7559\u6761\u6570\uff1a\u7d2f\u8ba1\u6700\u591a\u4fdd\u7559\u591a\u5c11\u6761\u5de1\u68c0\u8bb0\u5f55\uff0c\u5305\u62ec\u6240\u6709\u96c6\u7fa4\u7684\u5de1\u68c0\u8bb0\u5f55
                                                                • \u53c2\u6570\u914d\u7f6e\uff1a\u53c2\u6570\u914d\u7f6e\u5206\u4e3a\u96c6\u7fa4\u7ef4\u5ea6\u3001\u8282\u70b9\u7ef4\u5ea6\u3001\u5bb9\u5668\u7ec4\u7ef4\u5ea6\u4e09\u90e8\u5206\uff0c\u53ef\u4ee5\u6839\u636e\u573a\u666f\u9700\u6c42\u542f\u7528\u6216\u7981\u7528\u67d0\u4e9b\u5de1\u68c0\u9879\u3002

                                                              \u5de1\u68c0\u914d\u7f6e\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u4f1a\u81ea\u52a8\u663e\u793a\u5728\u5de1\u68c0\u914d\u7f6e\u5217\u8868\u4e2d\u3002\u5728\u914d\u7f6e\u53f3\u4fa7\u70b9\u51fb\u66f4\u591a\u64cd\u4f5c\u6309\u94ae\u53ef\u4ee5\u7acb\u5373\u6267\u884c\u5de1\u68c0\u3001\u4fee\u6539\u5de1\u68c0\u914d\u7f6e\u3001\u5220\u9664\u5de1\u68c0\u914d\u7f6e\u548c\u5de1\u68c0\u8bb0\u5f55\u3002

                                                              • \u70b9\u51fb \u5de1\u68c0 \u53ef\u4ee5\u6839\u636e\u8be5\u914d\u7f6e\u7acb\u5373\u6267\u884c\u4e00\u6b21\u5de1\u68c0\u3002
                                                              • \u70b9\u51fb \u5de1\u68c0\u914d\u7f6e \u53ef\u4ee5\u4fee\u6539\u5de1\u68c0\u914d\u7f6e\u3002
                                                              • \u70b9\u51fb \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u5de1\u68c0\u914d\u7f6e\u548c\u5386\u53f2\u7684\u5de1\u68c0\u8bb0\u5f55

                                                              Note

                                                              • \u5de1\u68c0\u914d\u7f6e\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u5982\u679c\u542f\u7528\u4e86 \u5b9a\u65f6\u5de1\u68c0 \u914d\u7f6e\uff0c\u5219\u4f1a\u5728\u6307\u5b9a\u65f6\u95f4\u81ea\u52a8\u6267\u884c\u5de1\u68c0\u3002
                                                              • \u5982\u672a\u542f\u7528 \u5b9a\u65f6\u5de1\u68c0 \u914d\u7f6e\uff0c\u5219\u9700\u8981\u624b\u52a8\u89e6\u53d1\u5de1\u68c0\u3002
                                                              "},{"location":"admin/kpanda/inspect/inspect.html","title":"\u6267\u884c\u96c6\u7fa4\u5de1\u68c0","text":"

                                                              \u5de1\u68c0\u914d\u7f6e\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u5982\u679c\u542f\u7528\u4e86 \u5b9a\u65f6\u5de1\u68c0 \u914d\u7f6e\uff0c\u5219\u4f1a\u5728\u6307\u5b9a\u65f6\u95f4\u81ea\u52a8\u6267\u884c\u5de1\u68c0\u3002\u5982\u672a\u542f\u7528 \u5b9a\u65f6\u5de1\u68c0 \u914d\u7f6e\uff0c\u5219\u9700\u8981\u624b\u52a8\u89e6\u53d1\u5de1\u68c0\u3002

                                                              \u6b64\u9875\u4ecb\u7ecd\u5982\u4f55\u624b\u52a8\u6267\u884c\u96c6\u7fa4\u5de1\u68c0\u3002

                                                              "},{"location":"admin/kpanda/inspect/inspect.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4
                                                              • \u5df2\u521b\u5efa\u5de1\u68c0\u914d\u7f6e
                                                              • \u6240\u9009\u96c6\u7fa4\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u4e14\u5df2\u7ecf\u5728\u96c6\u7fa4\u4e2d\u5b89\u88c5\u4e86 insight \u7ec4\u4ef6
                                                              "},{"location":"admin/kpanda/inspect/inspect.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                              \u6267\u884c\u5de1\u68c0\u65f6\uff0c\u652f\u6301\u52fe\u9009\u591a\u4e2a\u96c6\u7fa4\u8fdb\u884c\u6279\u91cf\u5de1\u68c0\uff0c\u6216\u8005\u4ec5\u5bf9\u67d0\u4e00\u4e2a\u96c6\u7fa4\u8fdb\u884c\u5355\u72ec\u5de1\u68c0\u3002

                                                              \u6279\u91cf\u5de1\u68c0\u5355\u72ec\u5de1\u68c0
                                                              1. \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684\u4e00\u7ea7\u5bfc\u822a\u680f\u70b9\u51fb \u96c6\u7fa4\u5de1\u68c0 \uff0c\u7136\u540e\u5728\u9875\u9762\u53f3\u4fa7\u70b9\u51fb \u5de1\u68c0 \u3002

                                                              2. \u52fe\u9009\u9700\u8981\u5de1\u68c0\u7684\u96c6\u7fa4\uff0c\u7136\u540e\u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                                                                • \u82e5\u9009\u62e9\u591a\u4e2a\u96c6\u7fa4\u8fdb\u884c\u540c\u65f6\u5de1\u68c0\uff0c\u7cfb\u7edf\u5c06\u6839\u636e\u4e0d\u540c\u96c6\u7fa4\u7684\u5de1\u68c0\u914d\u7f6e\u8fdb\u884c\u5de1\u68c0\u3002
                                                                • \u5982\u672a\u8bbe\u7f6e\u96c6\u7fa4\u5de1\u68c0\u914d\u7f6e\uff0c\u5c06\u4f7f\u7528\u7cfb\u7edf\u9ed8\u8ba4\u914d\u7f6e\u3002

                                                              1. \u8fdb\u5165\u96c6\u7fa4\u5de1\u68c0\u9875\u9762\u3002
                                                              2. \u5728\u5bf9\u5e94\u5de1\u68c0\u914d\u7f6e\u7684\u53f3\u4fa7\u70b9\u51fb \u2507 \u66f4\u591a\u64cd\u4f5c\u6309\u94ae\uff0c\u7136\u540e\u5728\u5f39\u51fa\u7684\u83dc\u5355\u4e2d\u9009\u62e9 \u5de1\u68c0 \u5373\u53ef\u3002

                                                              "},{"location":"admin/kpanda/inspect/report.html","title":"\u67e5\u770b\u5de1\u68c0\u62a5\u544a","text":"

                                                              \u5de1\u68c0\u6267\u884c\u5b8c\u6210\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u5de1\u68c0\u8bb0\u5f55\u548c\u8be6\u7ec6\u7684\u5de1\u68c0\u62a5\u544a\u3002

                                                              "},{"location":"admin/kpanda/inspect/report.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u5df2\u7ecf\u521b\u5efa\u4e86\u5de1\u68c0\u914d\u7f6e
                                                              • \u5df2\u7ecf\u6267\u884c\u8fc7\u81f3\u5c11\u4e00\u6b21\u5de1\u68c0
                                                              "},{"location":"admin/kpanda/inspect/report.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u8fdb\u5165\u96c6\u7fa4\u5de1\u68c0\u9875\u9762\uff0c\u70b9\u51fb\u76ee\u6807\u5de1\u68c0\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                              2. \u70b9\u51fb\u60f3\u8981\u67e5\u770b\u7684\u5de1\u68c0\u8bb0\u5f55\u540d\u79f0\u3002

                                                                • \u6bcf\u6267\u884c\u4e00\u6b21\u5de1\u68c0\uff0c\u5c31\u4f1a\u751f\u6210\u4e00\u6761\u5de1\u68c0\u8bb0\u5f55\u3002
                                                                • \u5f53\u5de1\u68c0\u8bb0\u5f55\u8d85\u8fc7\u5de1\u68c0\u914d\u7f6e\u4e2d\u8bbe\u7f6e\u7684\u6700\u5927\u4fdd\u7559\u6761\u6570\u65f6\uff0c\u4ece\u6267\u884c\u65f6\u95f4\u6700\u65e9\u7684\u8bb0\u5f55\u5f00\u59cb\u5220\u9664\u3002

                                                              3. \u67e5\u770b\u5de1\u68c0\u7684\u8be6\u7ec6\u4fe1\u606f\uff0c\u6839\u636e\u5de1\u68c0\u914d\u7f6e\u53ef\u80fd\u5305\u62ec\u96c6\u7fa4\u8d44\u6e90\u6982\u89c8\u3001\u7cfb\u7edf\u7ec4\u4ef6\u7684\u8fd0\u884c\u60c5\u51b5\u7b49\u3002

                                                                \u5728\u9875\u9762\u53f3\u4e0a\u89d2\u53ef\u4ee5\u4e0b\u8f7d\u5de1\u68c0\u62a5\u544a\u6216\u5220\u9664\u8be5\u9879\u5de1\u68c0\u62a5\u544a\u3002

                                                              "},{"location":"admin/kpanda/namespaces/createns.html","title":"\u547d\u540d\u7a7a\u95f4","text":"

                                                              \u547d\u540d\u7a7a\u95f4\u662f Kubernetes \u4e2d\u7528\u6765\u8fdb\u884c\u8d44\u6e90\u9694\u79bb\u7684\u4e00\u79cd\u62bd\u8c61\u3002\u4e00\u4e2a\u96c6\u7fa4\u4e0b\u53ef\u4ee5\u5305\u542b\u591a\u4e2a\u4e0d\u91cd\u540d\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u6bcf\u4e2a\u547d\u540d\u7a7a\u95f4\u4e2d\u7684\u8d44\u6e90\u76f8\u4e92\u9694\u79bb\u3002\u6709\u5173\u547d\u540d\u7a7a\u95f4\u7684\u8be6\u7ec6\u4ecb\u7ecd\uff0c\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u3002

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u547d\u540d\u7a7a\u95f4\u7684\u76f8\u5173\u64cd\u4f5c\u3002

                                                              "},{"location":"admin/kpanda/namespaces/createns.html#_2","title":"\u521b\u5efa\u547d\u540d\u7a7a\u95f4","text":"

                                                              \u652f\u6301\u901a\u8fc7\u8868\u5355\u8f7b\u677e\u521b\u5efa\u547d\u540d\u7a7a\u95f4\uff0c\u4e5f\u652f\u6301\u901a\u8fc7\u7f16\u5199\u6216\u5bfc\u5165 YAML \u6587\u4ef6\u5feb\u901f\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u3002

                                                              Note

                                                              • \u5728\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u4e4b\u524d\uff0c\u9700\u8981\u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\u3002
                                                              • \u96c6\u7fa4\u521d\u59cb\u5316\u540e\u901a\u5e38\u4f1a\u81ea\u52a8\u751f\u6210\u9ed8\u8ba4\u7684\u547d\u540d\u7a7a\u95f4 default \u3002\u4f46\u5bf9\u4e8e\u751f\u4ea7\u96c6\u7fa4\u800c\u8a00\uff0c\u4e3a\u4fbf\u4e8e\u7ba1\u7406\uff0c\u5efa\u8bae\u521b\u5efa\u5176\u4ed6\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u800c\u975e\u76f4\u63a5\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002
                                                              "},{"location":"admin/kpanda/namespaces/createns.html#_3","title":"\u8868\u5355\u521b\u5efa","text":"
                                                              1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u547d\u540d\u7a7a\u95f4 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae\u3002

                                                              3. \u586b\u5199\u547d\u540d\u7a7a\u95f4\u7684\u540d\u79f0\uff0c\u914d\u7f6e\u5de5\u4f5c\u7a7a\u95f4\u548c\u6807\u7b7e\uff08\u53ef\u9009\u8bbe\u7f6e\uff09\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                                Info

                                                                • \u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4\u4e4b\u540e\uff0c\u8be5\u547d\u540d\u7a7a\u95f4\u7684\u8d44\u6e90\u5c31\u4f1a\u5171\u4eab\u7ed9\u6240\u7ed1\u5b9a\u7684\u5de5\u4f5c\u7a7a\u95f4\u3002\u6709\u5173\u5de5\u4f5c\u7a7a\u95f4\u7684\u8be6\u7ec6\u8bf4\u660e\uff0c\u53ef\u53c2\u8003\u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7\u3002

                                                                • \u547d\u540d\u7a7a\u95f4\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u4ecd\u7136\u53ef\u4ee5\u7ed1\u5b9a/\u89e3\u7ed1\u5de5\u4f5c\u7a7a\u95f4\u3002

                                                              4. \u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3002\u5728\u547d\u540d\u7a7a\u95f4\u5217\u8868\u53f3\u4fa7\uff0c\u70b9\u51fb \u2507 \uff0c\u53ef\u4ee5\u4ece\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9\u67e5\u770b YAML\u3001\u4fee\u6539\u6807\u7b7e\u3001\u7ed1\u5b9a/\u89e3\u7ed1\u5de5\u4f5c\u7a7a\u95f4\u3001\u914d\u989d\u7ba1\u7406\u3001\u5220\u9664\u7b49\u66f4\u591a\u64cd\u4f5c\u3002

                                                              "},{"location":"admin/kpanda/namespaces/createns.html#yaml","title":"YAML \u521b\u5efa","text":"
                                                              1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u547d\u540d\u7a7a\u95f4 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4fa7\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                              3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u5185\u5bb9\uff0c\u6216\u8005\u4ece\u672c\u5730\u76f4\u63a5\u5bfc\u5165\u5df2\u6709\u7684 YAML \u6587\u4ef6\u3002

                                                                \u8f93\u5165 YAML \u5185\u5bb9\u540e\uff0c\u70b9\u51fb \u4e0b\u8f7d \u53ef\u4ee5\u5c06\u8be5 YAML \u6587\u4ef6\u4fdd\u5b58\u5230\u672c\u5730\u3002

                                                              4. \u6700\u540e\u5728\u5f39\u6846\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                                                              "},{"location":"admin/kpanda/namespaces/exclusive.html","title":"\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9","text":"

                                                              \u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\u6307\u5728 kubernetes \u96c6\u7fa4\u4e2d\uff0c\u901a\u8fc7\u6c61\u70b9\u548c\u6c61\u70b9\u5bb9\u5fcd\u7684\u65b9\u5f0f\u5b9e\u73b0\u7279\u5b9a\u547d\u540d\u7a7a\u95f4\u5bf9\u4e00\u4e2a\u6216\u591a\u4e2a\u8282\u70b9 CPU\u3001\u5185\u5b58\u7b49\u8d44\u6e90\u7684\u72ec\u4eab\u3002\u4e3a\u7279\u5b9a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\u72ec\u4eab\u8282\u70b9\u540e\uff0c\u5176\u5b83\u975e\u6b64\u547d\u540d\u7a7a\u95f4\u7684\u5e94\u7528\u548c\u670d\u52a1\u5747\u4e0d\u80fd\u8fd0\u884c\u5728\u88ab\u72ec\u4eab\u7684\u8282\u70b9\u4e0a\u3002\u4f7f\u7528\u72ec\u4eab\u8282\u70b9\u53ef\u4ee5\u8ba9\u91cd\u8981\u5e94\u7528\u72ec\u4eab\u4e00\u90e8\u5206\u8ba1\u7b97\u8d44\u6e90\uff0c\u4ece\u800c\u548c\u5176\u4ed6\u5e94\u7528\u5b9e\u73b0\u7269\u7406\u9694\u79bb\u3002

                                                              Note

                                                              \u5728\u8282\u70b9\u88ab\u8bbe\u7f6e\u4e3a\u72ec\u4eab\u8282\u70b9\u524d\u5df2\u7ecf\u8fd0\u884c\u5728\u6b64\u8282\u70b9\u4e0a\u7684\u5e94\u7528\u548c\u670d\u52a1\u5c06\u4e0d\u4f1a\u53d7\u5f71\u54cd\uff0c\u4f9d\u7136\u4f1a\u6b63\u5e38\u8fd0\u884c\u5728\u8be5\u8282\u70b9\u4e0a\uff0c\u4ec5\u5f53\u8fd9\u4e9b Pod \u88ab\u5220\u9664\u6216\u91cd\u5efa\u65f6\uff0c\u624d\u4f1a\u8c03\u5ea6\u5230\u5176\u5b83\u975e\u72ec\u4eab\u8282\u70b9\u4e0a\u3002

                                                              "},{"location":"admin/kpanda/namespaces/exclusive.html#_2","title":"\u51c6\u5907\u5de5\u4f5c","text":"

                                                              \u68c0\u67e5\u5f53\u524d\u96c6\u7fa4\u7684 kube-apiserver \u662f\u5426\u542f\u7528\u4e86 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668\u3002

                                                              \u4f7f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\u529f\u80fd\u9700\u8981\u7528\u6237\u542f\u7528 kube-apiserver \u4e0a\u7684 PodNodeSelector \u548c PodTolerationRestriction \u4e24\u4e2a\u7279\u6027\u51c6\u5165\u63a7\u5236\u5668\uff08Admission Controllers\uff09\uff0c\u5173\u4e8e\u51c6\u5165\u63a7\u5236\u5668\u66f4\u591a\u8bf4\u660e\u8bf7\u53c2\u9605 kubernetes Admission Controllers Reference\u3002

                                                              \u60a8\u53ef\u4ee5\u524d\u5f80\u5f53\u524d\u96c6\u7fa4\u4e0b\u4efb\u610f\u4e00\u4e2a Master \u8282\u70b9\u4e0a\u68c0\u67e5 kube-apiserver.yaml \u6587\u4ef6\u5185\u662f\u5426\u542f\u7528\u4e86\u8fd9\u4e24\u4e2a\u7279\u6027\uff0c\u4e5f\u53ef\u4ee5\u5728 Master \u8282\u70b9\u4e0a\u6267\u884c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u8fdb\u884c\u5feb\u901f\u68c0\u67e5\uff1a

                                                              ```bash\n[root@g-master1 ~]# cat /etc/kubernetes/manifests/kube-apiserver.yaml | grep  enable-admission-plugins\n\n# \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a\n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction\n```\n
                                                              "},{"location":"admin/kpanda/namespaces/exclusive.html#_3","title":"\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9","text":"

                                                              \u7531\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u8fd0\u884c\u7740 kpanda\u3001ghippo\u3001insight \u7b49\u5e73\u53f0\u57fa\u7840\u7ec4\u4ef6\uff0c\u5728 Global \u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\u5c06\u53ef\u80fd\u5bfc\u81f4\u5f53\u7cfb\u7edf\u7ec4\u4ef6\u91cd\u542f\u540e\uff0c\u7cfb\u7edf\u7ec4\u4ef6\u65e0\u6cd5\u8c03\u5ea6\u5230\u88ab\u72ec\u4eab\u7684\u8282\u70b9\u4e0a\uff0c\u5f71\u54cd\u7cfb\u7edf\u7684\u6574\u4f53\u9ad8\u53ef\u7528\u80fd\u529b\u3002\u56e0\u6b64\uff0c\u901a\u5e38\u60c5\u51b5\u4e0b\uff0c\u6211\u4eec\u4e0d\u63a8\u8350\u7528\u6237\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\u7279\u6027\u3002

                                                              \u5982\u679c\u60a8\u786e\u5b9e\u9700\u8981\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\uff0c\u8bf7\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u8fdb\u884c\u5f00\u542f\uff1a

                                                              1. \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684 kube-apiserver \u542f\u7528\u4e86 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668

                                                                Note

                                                                \u5982\u679c\u96c6\u7fa4\u5df2\u542f\u7528\u4e86\u4e0a\u8ff0\u7684\u4e24\u4e2a\u51c6\u5165\u63a7\u5236\u5668\uff0c\u8bf7\u8df3\u8fc7\u6b64\u6b65\uff0c\u76f4\u63a5\u524d\u5f80\u914d\u7f6e\u7cfb\u7edf\u7ec4\u4ef6\u5bb9\u5fcd\u3002

                                                                \u524d\u5f80\u5f53\u524d\u96c6\u7fa4\u4e0b\u4efb\u610f\u4e00\u4e2a Master \u8282\u70b9\u4e0a\u4fee\u6539 kube-apiserver.yaml \u914d\u7f6e\u6587\u4ef6\uff0c\u4e5f\u53ef\u4ee5\u5728 Master \u8282\u70b9\u4e0a\u6267\u884c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\uff1a

                                                                [root@g-master1 ~]# vi /etc/kubernetes/manifests/kube-apiserver.yaml\n\n# \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a\napiVersion: v1\nkind: Pod\nmetadata:\n    ......\nspec:\ncontainers:\n- command:\n    - kube-apiserver\n    ......\n    - --default-not-ready-toleration-seconds=300\n    - --default-unreachable-toleration-seconds=300\n    - --enable-admission-plugins=NodeRestriction   #\u542f\u7528\u7684\u51c6\u5165\u63a7\u5236\u5668\u5217\u8868\n    - --enable-aggregator-routing=False\n    - --enable-bootstrap-token-auth=true\n    - --endpoint-reconciler-type=lease\n    - --etcd-cafile=/etc/kubernetes/ssl/etcd/ca.crt\n    ......\n

                                                                \u627e\u5230 --enable-admission-plugins \u53c2\u6570\uff0c\u52a0\u5165\uff08\u4ee5\u82f1\u6587\u9017\u53f7\u5206\u9694\u7684\uff09 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668\u3002\u53c2\u8003\u5982\u4e0b\uff1a

                                                                # \u52a0\u5165 __ ,PodNodeSelector,PodTolerationRestriction__ \n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction \n
                                                              2. \u4e3a\u5e73\u53f0\u7ec4\u4ef6\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u5bb9\u5fcd\u6ce8\u89e3

                                                                \u5b8c\u6210\u51c6\u5165\u63a7\u5236\u5668\u7684\u5f00\u542f\u540e\uff0c\u60a8\u9700\u8981\u4e3a\u5e73\u53f0\u7ec4\u4ef6\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u5bb9\u5fcd\u6ce8\u89e3\uff0c\u4ee5\u4fdd\u8bc1\u5e73\u53f0\u7ec4\u4ef6\u7684\u9ad8\u53ef\u7528\u3002

                                                                \u76ee\u524d\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u7cfb\u7edf\u7ec4\u4ef6\u547d\u540d\u7a7a\u95f4\u5982\u4e0b\u8868\uff1a

                                                                \u547d\u540d\u7a7a\u95f4 \u6240\u5305\u542b\u7684\u7cfb\u7edf\u7ec4\u4ef6 kpanda-system kpanda hwameiStor-system hwameiStor istio-system istio metallb-system metallb cert-manager-system cert-manager contour-system contour kubean-system kubean ghippo-system ghippo kcoral-system kcoral kcollie-system kcollie insight-system insight\u3001insight-agent: ipavo-system ipavo kairship-system kairship karmada-system karmada amamba-system amamba\u3001jenkins skoala-system skoala mspider-system mspider mcamel-system mcamel-rabbitmq\u3001mcamel-elasticsearch\u3001mcamel-mysql\u3001mcamel-redis\u3001mcamel-kafka\u3001mcamel-minio\u3001mcamel-postgresql spidernet-system spidernet kangaroo-system kangaroo gmagpie-system gmagpie dowl-system dowl

                                                                \u68c0\u67e5\u5f53\u524d\u96c6\u7fa4\u4e2d\u6240\u6709\u547d\u540d\u7a7a\u95f4\u662f\u5426\u5b58\u5728\u4e0a\u8ff0\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5206\u522b\u4e3a\u6bcf\u4e2a\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u6ce8\u89e3\uff1a scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]' \u3002

                                                                kubectl annotate ns <namespace-name> scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \n\"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\n
                                                                \u8bf7\u786e\u4fdd\u5c06 <namespace-name> \u66ff\u6362\u4e3a\u8981\u6dfb\u52a0\u6ce8\u89e3\u7684\u5e73\u53f0\u547d\u540d\u7a7a\u95f4\u540d\u79f0\u3002

                                                              3. \u4f7f\u7528\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9

                                                                \u5f53\u60a8\u786e\u8ba4\u96c6\u7fa4 API \u670d\u52a1\u5668\u4e0a\u7684 PodNodeSelector \u548c PodTolerationRestriction \u4e24\u4e2a\u7279\u6027\u51c6\u5165\u63a7\u5236\u5668\u5df2\u7ecf\u5f00\u542f\u540e\uff0c\u8bf7\u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684 UI \u7ba1\u7406\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9\u4e86\u3002

                                                                1. \u5728\u96c6\u7fa4\u5217\u8868\u9875\u9762\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u547d\u540d\u7a7a\u95f4 \u3002

                                                                2. \u70b9\u51fb\u547d\u540d\u7a7a\u95f4\u540d\u79f0\uff0c\u7136\u540e\u70b9\u51fb \u72ec\u4eab\u8282\u70b9 \u9875\u7b7e\uff0c\u5728\u4e0b\u65b9\u53f3\u4fa7\u70b9\u51fb \u6dfb\u52a0\u8282\u70b9 \u3002

                                                                3. \u5728\u9875\u9762\u5de6\u4fa7\u9009\u62e9\u8ba9\u8be5\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u54ea\u4e9b\u8282\u70b9\uff0c\u5728\u53f3\u4fa7\u53ef\u4ee5\u6e05\u7a7a\u6216\u5220\u9664\u67d0\u4e2a\u5df2\u9009\u8282\u70b9\uff0c\u6700\u540e\u5728\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                                                4. \u53ef\u4ee5\u5728\u5217\u8868\u4e2d\u67e5\u770b\u6b64\u547d\u540d\u7a7a\u95f4\u7684\u5df2\u6709\u7684\u72ec\u4eab\u8282\u70b9\uff0c\u5728\u8282\u70b9\u53f3\u4fa7\u53ef\u4ee5\u9009\u62e9 \u53d6\u6d88\u72ec\u4eab \u3002

                                                                  \u53d6\u6d88\u72ec\u4eab\u4e4b\u540e\uff0c\u5176\u4ed6\u547d\u540d\u7a7a\u95f4\u4e0b\u7684 Pod \u4e5f\u53ef\u4ee5\u88ab\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u4e0a\u3002

                                                              "},{"location":"admin/kpanda/namespaces/exclusive.html#_4","title":"\u5728 \u975e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9","text":"

                                                              \u5728 \u975e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\uff0c\u8bf7\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u8fdb\u884c\u5f00\u542f\uff1a

                                                              1. \u4e3a\u5f53\u524d\u96c6\u7fa4\u7684 kube-apiserver \u542f\u7528\u4e86 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668

                                                                Note

                                                                \u5982\u679c\u96c6\u7fa4\u5df2\u542f\u7528\u4e86\u4e0a\u8ff0\u7684\u4e24\u4e2a\u51c6\u5165\u63a7\u5236\u5668\uff0c\u8bf7\u8df3\u8fc7\u6b64\u6b65\uff0c\u76f4\u63a5\u524d\u5f80\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9

                                                                \u524d\u5f80\u5f53\u524d\u96c6\u7fa4\u4e0b\u4efb\u610f\u4e00\u4e2a Master \u8282\u70b9\u4e0a\u4fee\u6539 kube-apiserver.yaml \u914d\u7f6e\u6587\u4ef6\uff0c\u4e5f\u53ef\u4ee5\u5728 Master \u8282\u70b9\u4e0a\u6267\u884c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\uff1a

                                                                [root@g-master1 ~]# vi /etc/kubernetes/manifests/kube-apiserver.yaml\n\n# \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a\napiVersion: v1\nkind: Pod\nmetadata:\n    ......\nspec:\ncontainers:\n- command:\n    - kube-apiserver\n    ......\n    - --default-not-ready-toleration-seconds=300\n    - --default-unreachable-toleration-seconds=300\n    - --enable-admission-plugins=NodeRestriction   #\u542f\u7528\u7684\u51c6\u5165\u63a7\u5236\u5668\u5217\u8868\n    - --enable-aggregator-routing=False\n    - --enable-bootstrap-token-auth=true\n    - --endpoint-reconciler-type=lease\n    - --etcd-cafile=/etc/kubernetes/ssl/etcd/ca.crt\n    ......\n

                                                                \u627e\u5230 --enable-admission-plugins \u53c2\u6570\uff0c\u52a0\u5165\uff08\u4ee5\u82f1\u6587\u9017\u53f7\u5206\u9694\u7684\uff09 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668\u3002\u53c2\u8003\u5982\u4e0b\uff1a

                                                                # \u52a0\u5165 __ ,PodNodeSelector,PodTolerationRestriction__ \n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction \n
                                                              2. \u4f7f\u7528\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9

                                                                \u5f53\u60a8\u786e\u8ba4\u96c6\u7fa4 API \u670d\u52a1\u5668\u4e0a\u7684 PodNodeSelector \u548c PodTolerationRestriction \u4e24\u4e2a\u7279\u6027\u51c6\u5165\u63a7\u5236\u5668\u5df2\u7ecf\u5f00\u542f\u540e\uff0c\u8bf7\u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684 UI \u7ba1\u7406\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9\u4e86\u3002

                                                                1. \u5728\u96c6\u7fa4\u5217\u8868\u9875\u9762\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u547d\u540d\u7a7a\u95f4 \u3002

                                                                2. \u70b9\u51fb\u547d\u540d\u7a7a\u95f4\u540d\u79f0\uff0c\u7136\u540e\u70b9\u51fb \u72ec\u4eab\u8282\u70b9 \u9875\u7b7e\uff0c\u5728\u4e0b\u65b9\u53f3\u4fa7\u70b9\u51fb \u6dfb\u52a0\u8282\u70b9 \u3002

                                                                3. \u5728\u9875\u9762\u5de6\u4fa7\u9009\u62e9\u8ba9\u8be5\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u54ea\u4e9b\u8282\u70b9\uff0c\u5728\u53f3\u4fa7\u53ef\u4ee5\u6e05\u7a7a\u6216\u5220\u9664\u67d0\u4e2a\u5df2\u9009\u8282\u70b9\uff0c\u6700\u540e\u5728\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                                                4. \u53ef\u4ee5\u5728\u5217\u8868\u4e2d\u67e5\u770b\u6b64\u547d\u540d\u7a7a\u95f4\u7684\u5df2\u6709\u7684\u72ec\u4eab\u8282\u70b9\uff0c\u5728\u8282\u70b9\u53f3\u4fa7\u53ef\u4ee5\u9009\u62e9 \u53d6\u6d88\u72ec\u4eab \u3002

                                                                  \u53d6\u6d88\u72ec\u4eab\u4e4b\u540e\uff0c\u5176\u4ed6\u547d\u540d\u7a7a\u95f4\u4e0b\u7684 Pod \u4e5f\u53ef\u4ee5\u88ab\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u4e0a\u3002

                                                              3. \u4e3a\u9700\u8981\u9ad8\u53ef\u7528\u7684\u7ec4\u4ef6\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u5bb9\u5fcd\u6ce8\u89e3\uff08\u53ef\u9009\uff09

                                                                \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u9700\u8981\u9ad8\u53ef\u7528\u7684\u7ec4\u4ef6\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u6ce8\u89e3\uff1ascheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\u3002

                                                                kubectl annotate ns <namespace-name> scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \n\"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\n

                                                                \u8bf7\u786e\u4fdd\u5c06 <namespace-name> \u66ff\u6362\u4e3a\u8981\u6dfb\u52a0\u6ce8\u89e3\u7684\u5e73\u53f0\u547d\u540d\u7a7a\u95f4\u540d\u79f0\u3002

                                                              "},{"location":"admin/kpanda/namespaces/podsecurity.html","title":"\u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565","text":"

                                                              \u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565\u6307\u5728 kubernetes \u96c6\u7fa4\u4e2d\uff0c\u901a\u8fc7\u4e3a\u6307\u5b9a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\u4e0d\u540c\u7684\u7b49\u7ea7\u548c\u6a21\u5f0f\uff0c\u5b9e\u73b0\u5728\u5b89\u5168\u7684\u5404\u4e2a\u65b9\u9762\u63a7\u5236 Pod \u7684\u884c\u4e3a\uff0c\u53ea\u6709\u6ee1\u8db3\u4e00\u5b9a\u7684\u6761\u4ef6\u7684 Pod \u624d\u4f1a\u88ab\u7cfb\u7edf\u63a5\u53d7\u3002\u5b83\u8bbe\u7f6e\u4e09\u4e2a\u7b49\u7ea7\u548c\u4e09\u79cd\u6a21\u5f0f\uff0c\u7528\u6237\u53ef\u4ee5\u6839\u636e\u81ea\u5df1\u7684\u9700\u6c42\u9009\u62e9\u66f4\u52a0\u5408\u9002\u7684\u65b9\u6848\u6765\u8bbe\u7f6e\u9650\u5236\u7b56\u7565\u3002

                                                              Note

                                                              \u4e00\u6761\u5b89\u5168\u6a21\u5f0f\u4ec5\u80fd\u914d\u7f6e\u4e00\u6761\u5b89\u5168\u7b56\u7565\u3002\u540c\u65f6\u8bf7\u8c28\u614e\u4e3a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e enforce \u7684\u5b89\u5168\u6a21\u5f0f\uff0c\u8fdd\u53cd\u540e\u5c06\u4f1a\u5bfc\u81f4 Pod \u65e0\u6cd5\u521b\u5efa\u3002

                                                              \u672c\u8282\u5c06\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565\u3002

                                                              "},{"location":"admin/kpanda/namespaces/podsecurity.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u96c6\u7fa4\u7684\u7248\u672c\u9700\u8981\u5728 v1.22 \u4ee5\u4e0a\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                              • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Admin \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                              "},{"location":"admin/kpanda/namespaces/podsecurity.html#_3","title":"\u4e3a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565","text":"
                                                              1. \u9009\u62e9\u9700\u8981\u914d\u7f6e\u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u8fdb\u5165\u8be6\u60c5\u9875\u3002\u5728 \u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565 \u9875\u9762\u70b9\u51fb \u914d\u7f6e\u7b56\u7565 \uff0c\u8fdb\u5165\u914d\u7f6e\u9875\u3002

                                                              2. \u5728\u914d\u7f6e\u9875\u70b9\u51fb \u6dfb\u52a0\u7b56\u7565 \uff0c\u5219\u4f1a\u51fa\u73b0\u4e00\u6761\u7b56\u7565\uff0c\u5305\u62ec\u5b89\u5168\u7ea7\u522b\u548c\u5b89\u5168\u6a21\u5f0f\uff0c\u4ee5\u4e0b\u662f\u5bf9\u5b89\u5168\u7ea7\u522b\u548c\u5b89\u5168\u7b56\u7565\u7684\u8be6\u7ec6\u4ecb\u7ecd\u3002

                                                                \u5b89\u5168\u7ea7\u522b \u63cf\u8ff0 Privileged \u4e0d\u53d7\u9650\u5236\u7684\u7b56\u7565\uff0c\u63d0\u4f9b\u6700\u5927\u53ef\u80fd\u8303\u56f4\u7684\u6743\u9650\u8bb8\u53ef\u3002\u6b64\u7b56\u7565\u5141\u8bb8\u5df2\u77e5\u7684\u7279\u6743\u63d0\u5347\u3002 Baseline \u9650\u5236\u6027\u6700\u5f31\u7684\u7b56\u7565\uff0c\u7981\u6b62\u5df2\u77e5\u7684\u7b56\u7565\u63d0\u5347\u3002\u5141\u8bb8\u4f7f\u7528\u9ed8\u8ba4\u7684\uff08\u89c4\u5b9a\u6700\u5c11\uff09Pod \u914d\u7f6e\u3002 Restricted \u9650\u5236\u6027\u975e\u5e38\u5f3a\u7684\u7b56\u7565\uff0c\u9075\u5faa\u5f53\u524d\u7684\u4fdd\u62a4 Pod \u7684\u6700\u4f73\u5b9e\u8df5\u3002 \u5b89\u5168\u6a21\u5f0f \u63cf\u8ff0 Audit \u8fdd\u53cd\u6307\u5b9a\u7b56\u7565\u4f1a\u5728\u5ba1\u8ba1\u65e5\u5fd7\u4e2d\u6dfb\u52a0\u65b0\u7684\u5ba1\u8ba1\u4e8b\u4ef6\uff0cPod \u53ef\u4ee5\u88ab\u521b\u5efa\u3002 Warn \u8fdd\u53cd\u6307\u5b9a\u7b56\u7565\u4f1a\u8fd4\u56de\u7528\u6237\u53ef\u89c1\u7684\u544a\u8b66\u4fe1\u606f\uff0cPod \u53ef\u4ee5\u88ab\u521b\u5efa\u3002 Enforce \u8fdd\u53cd\u6307\u5b9a\u7b56\u7565\u4f1a\u5bfc\u81f4 Pod \u65e0\u6cd5\u521b\u5efa\u3002

                                                              3. \u4e0d\u540c\u7684\u5b89\u5168\u7ea7\u522b\u5bf9\u5e94\u4e0d\u540c\u7684\u68c0\u67e5\u9879\uff0c\u82e5\u60a8\u4e0d\u77e5\u9053\u8be5\u5982\u4f55\u4e3a\u60a8\u7684\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\uff0c\u53ef\u4ee5\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u7b56\u7565\u914d\u7f6e\u9879\u8bf4\u660e \u67e5\u770b\u8be6\u7ec6\u4fe1\u606f\u3002

                                                              4. \u70b9\u51fb\u786e\u5b9a\uff0c\u82e5\u521b\u5efa\u6210\u529f\uff0c\u5219\u9875\u9762\u4e0a\u5c06\u51fa\u73b0\u60a8\u914d\u7f6e\u7684\u5b89\u5168\u7b56\u7565\u3002

                                                              5. \u70b9\u51fb \u2507 \u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u8005\u5220\u9664\u60a8\u914d\u7f6e\u7684\u5b89\u5168\u7b56\u7565\u3002

                                                              "},{"location":"admin/kpanda/network/create-ingress.html","title":"\u521b\u5efa\u8def\u7531\uff08Ingress\uff09","text":"

                                                              \u5728 Kubernetes \u96c6\u7fa4\u4e2d\uff0cIngress \u516c\u5f00\u4ece\u96c6\u7fa4\u5916\u90e8\u5230\u96c6\u7fa4\u5185\u670d\u52a1\u7684 HTTP \u548c HTTPS \u8def\u7531\u3002 \u6d41\u91cf\u8def\u7531\u7531 Ingress \u8d44\u6e90\u4e0a\u5b9a\u4e49\u7684\u89c4\u5219\u63a7\u5236\u3002\u4e0b\u9762\u662f\u4e00\u4e2a\u5c06\u6240\u6709\u6d41\u91cf\u90fd\u53d1\u9001\u5230\u540c\u4e00 Service \u7684\u7b80\u5355 Ingress \u793a\u4f8b\uff1a

                                                              Ingress \u662f\u5bf9\u96c6\u7fa4\u4e2d\u670d\u52a1\u7684\u5916\u90e8\u8bbf\u95ee\u8fdb\u884c\u7ba1\u7406\u7684 API \u5bf9\u8c61\uff0c\u5178\u578b\u7684\u8bbf\u95ee\u65b9\u5f0f\u662f HTTP\u3002Ingress \u53ef\u4ee5\u63d0\u4f9b\u8d1f\u8f7d\u5747\u8861\u3001SSL \u7ec8\u7ed3\u548c\u57fa\u4e8e\u540d\u79f0\u7684\u865a\u62df\u6258\u7ba1\u3002

                                                              "},{"location":"admin/kpanda/network/create-ingress.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                                                              • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a NS Editor \u89d2\u8272 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002
                                                              • \u5df2\u7ecf\u5b8c\u6210 Ingress \u5b9e\u4f8b\u7684\u521b\u5efa\uff0c\u5df2\u90e8\u7f72\u5e94\u7528\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5e76\u4e14\u5df2\u521b\u5efa\u5bf9\u5e94 Service
                                                              • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002
                                                              "},{"location":"admin/kpanda/network/create-ingress.html#_2","title":"\u521b\u5efa\u8def\u7531","text":"
                                                              1. \u4ee5 NS Editor \u7528\u6237\u6210\u529f\u767b\u5f55\u540e\uff0c\u70b9\u51fb\u5de6\u4e0a\u89d2\u7684 \u96c6\u7fa4\u5217\u8868 \u8fdb\u5165 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u3002\u5728\u96c6\u7fa4\u5217\u8868\u4e2d\uff0c\u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u8def\u7531 \u8fdb\u5165\u670d\u52a1\u5217\u8868\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u521b\u5efa\u8def\u7531 \u6309\u94ae\u3002

                                                                Note

                                                                \u4e5f\u53ef\u4ee5\u901a\u8fc7 YAML \u521b\u5efa \u4e00\u4e2a\u8def\u7531\u3002

                                                              3. \u6253\u5f00 \u521b\u5efa\u8def\u7531 \u9875\u9762\uff0c\u8fdb\u884c\u914d\u7f6e\u3002\u53ef\u9009\u62e9\u4e24\u79cd\u534f\u8bae\u7c7b\u578b\uff0c\u53c2\u8003\u4ee5\u4e0b\u4e24\u4e2a\u53c2\u6570\u8868\u8fdb\u884c\u914d\u7f6e\u3002

                                                              "},{"location":"admin/kpanda/network/create-ingress.html#http","title":"\u521b\u5efa HTTP \u534f\u8bae\u8def\u7531","text":"

                                                              \u8f93\u5165\u5982\u4e0b\u53c2\u6570\uff1a

                                                              • \u8def\u7531\u540d\u79f0 \uff1a\u5fc5\u586b\uff0c\u8f93\u5165\u65b0\u5efa\u8def\u7531\u7684\u540d\u79f0\u3002
                                                              • \u547d\u540d\u7a7a\u95f4 \uff1a\u5fc5\u586b\uff0c\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002
                                                              • \u8bbe\u7f6e\u8def\u7531\u89c4\u5219 \uff1a
                                                                • \u57df\u540d \uff1a\u5fc5\u586b\uff0c\u4f7f\u7528\u57df\u540d\u5bf9\u5916\u63d0\u4f9b\u8bbf\u95ee\u670d\u52a1\u3002\u9ed8\u8ba4\u4e3a\u96c6\u7fa4\u7684\u57df\u540d\u3002
                                                                • \u534f\u8bae \uff1a\u5fc5\u586b\uff0c\u6307\u6388\u6743\u5165\u7ad9\u5230\u8fbe\u96c6\u7fa4\u670d\u52a1\u7684\u534f\u8bae\uff0c\u652f\u6301 HTTP \uff08\u4e0d\u9700\u8981\u8eab\u4efd\u8ba4\u8bc1\uff09\u6216 HTTPS\uff08\u9700\u9700\u8981\u914d\u7f6e\u8eab\u4efd\u8ba4\u8bc1\uff09 \u534f\u8bae\u3002 \u8fd9\u91cc\u9009\u62e9 HTTP \u534f\u8bae\u7684\u8def\u7531\u3002
                                                                • \u8f6c\u53d1\u7b56\u7565 \uff1a\u9009\u586b\uff0c\u6307\u5b9a Ingress \u7684\u8bbf\u95ee\u7b56\u7565
                                                                • \u8def\u5f84 \uff1a\u6307\u5b9a\u670d\u52a1\u8bbf\u95ee\u7684URL\u8def\u5f84\uff0c\u9ed8\u8ba4\u4e3a\u6839\u8def\u5f84
                                                                • \u76ee\u6807\u670d\u52a1 \uff1a\u8fdb\u884c\u8def\u7531\u7684\u670d\u52a1\u540d\u79f0
                                                                • \u76ee\u6807\u670d\u52a1\u7aef\u53e3 \uff1a\u670d\u52a1\u5bf9\u5916\u66b4\u9732\u7684\u7aef\u53e3
                                                              • \u8d1f\u8f7d\u5747\u8861\u5668\u7c7b\u578b \uff1a\u5fc5\u586b\uff0cIngress \u5b9e\u4f8b\u7684\u4f7f\u7528\u8303\u56f4
                                                                • \u5e73\u53f0\u7ea7\u8d1f\u8f7d\u5747\u8861\u5668 \uff1a\u540c\u4e00\u4e2a\u96c6\u7fa4\u5185\uff0c\u5171\u4eab\u540c\u4e00\u4e2a Ingress \u5b9e\u4f8b\uff0c\u5176\u4e2d Pod \u90fd\u53ef\u4ee5\u63a5\u6536\u5230\u7531\u8be5\u8d1f\u8f7d\u5747\u8861\u5206\u53d1\u7684\u8bf7\u6c42
                                                                • \u79df\u6237\u7ea7\u8d1f\u8f7d\u5747\u8861\u5668 \uff1a\u79df\u6237\u8d1f\u8f7d\u5747\u8861\u5668\uff0cIngress \u5b9e\u4f8b\u72ec\u5c5e\u4e8e\u5f53\u524d\u547d\u540d\u7a7a\uff0c\u6216\u8005\u72ec\u5c5e\u4e8e\u67d0\u4e00\u5de5\u4f5c\u7a7a\u95f4\uff0c \u5e76\u4e14\u8bbe\u7f6e\u7684\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u5305\u542b\u5f53\u524d\u547d\u540d\u7a7a\u95f4\uff0c\u5176\u4e2d Pod \u90fd\u53ef\u4ee5\u63a5\u6536\u5230\u7531\u8be5\u8d1f\u8f7d\u5747\u8861\u5206\u53d1\u7684\u8bf7\u6c42
                                                              • Ingress Class \uff1a\u9009\u586b\uff0c\u9009\u62e9\u5bf9\u5e94\u7684 Ingress \u5b9e\u4f8b\uff0c\u9009\u62e9\u540e\u5c06\u6d41\u91cf\u5bfc\u5165\u5230\u6307\u5b9a\u7684 Ingress \u5b9e\u4f8b\u3002
                                                                • \u4e3a None \u65f6\u4f7f\u7528\u9ed8\u8ba4\u7684 DefaultClass\uff0c\u8bf7\u5728\u521b\u5efa Ingress \u5b9e\u4f8b\u65f6\u8bbe\u7f6e DefaultClass\uff0c \u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003 Ingress Class
                                                                • \u82e5\u9009\u62e9\u5176\u4ed6\u5b9e\u4f8b\uff08\u5982 ngnix \uff09\uff0c\u5219\u4f1a\u51fa\u73b0\u9ad8\u7ea7\u914d\u7f6e\uff0c\u53ef\u8bbe\u7f6e \u4f1a\u8bdd\u4fdd\u6301 \u3001 \u8def\u5f84\u91cd\u5199 \u3001 \u91cd\u5b9a\u5411 \u548c \u6d41\u91cf\u5206\u53d1 \u3002
                                                              • \u4f1a\u8bdd\u4fdd\u6301 \uff1a\u9009\u586b\uff0c\u4f1a\u8bdd\u4fdd\u6301\u5206\u4e3a \u4e09\u79cd\u7c7b\u578b\uff1a L4 \u6e90\u5730\u5740\u54c8\u5e0c \u3001 Cookie Key \u3001 L7 Header Name \uff0c\u5f00\u542f\u540e\u6839\u636e\u5bf9\u5e94\u89c4\u5219\u8fdb\u884c\u4f1a\u8bdd\u4fdd\u6301\u3002
                                                                • L4 \u6e90\u5730\u5740\u54c8\u5e0c \uff1a\u5f00\u542f\u540e\u9ed8\u8ba4\u5728 Annotation \u4e2d\u52a0\u5165\u5982\u4e0b\u6807\u7b7e\uff1a nginx.ingress.kubernetes.io/upstream-hash-by: \"$binary_remote_addr\"
                                                                • Cookie Key \uff1a\u5f00\u542f\u540e\u6765\u81ea\u7279\u5b9a\u5ba2\u6237\u7aef\u7684\u8fde\u63a5\u5c06\u4f20\u9012\u81f3\u76f8\u540c Pod\uff0c\u5f00\u542f\u540e \u9ed8\u8ba4\u5728 Annotation \u4e2d\u589e\u52a0\u5982\u4e0b\u53c2\u6570\uff1a nginx.ingress.kubernetes.io/affinity: \"cookie\"\u3002nginx.ingress.kubernetes.io/affinity-mode: persistent
                                                                • L7 Header Name \uff1a\u5f00\u542f\u540e\u9ed8\u8ba4\u5728 Annotation \u4e2d\u52a0\u5165\u5982\u4e0b\u6807\u7b7e\uff1a nginx.ingress.kubernetes.io/upstream-hash-by: \"$http_x_forwarded_for\"
                                                              • \u8def\u5f84\u91cd\u5199 \uff1a\u9009\u586b\uff0c rewrite-target \uff0c\u67d0\u4e9b\u573a\u666f\u4e2d\u540e\u7aef\u670d\u52a1\u66b4\u9732\u7684URL\u4e0eIngress\u89c4\u5219\u4e2d\u6307\u5b9a\u7684\u8def\u5f84\u4e0d\u540c\uff0c\u5982\u679c\u4e0d\u8fdb\u884cURL\u91cd\u5199\u914d\u7f6e\uff0c\u8bbf\u95ee\u4f1a\u51fa\u73b0\u9519\u8bef\u3002
                                                              • \u91cd\u5b9a\u5411 \uff1a\u9009\u586b\uff0c permanent-redirect \uff0c\u6c38\u4e45\u91cd\u5b9a\u5411\uff0c\u8f93\u5165\u91cd\u5199\u8def\u5f84\u540e\uff0c\u8bbf\u95ee\u8def\u5f84\u91cd\u5b9a\u5411\u81f3\u8bbe\u7f6e\u7684\u5730\u5740\u3002
                                                              • \u6d41\u91cf\u5206\u53d1 \uff1a\u9009\u586b\uff0c\u5f00\u542f\u540e\u5e76\u8bbe\u7f6e\u540e\uff0c\u6839\u636e\u8bbe\u5b9a\u6761\u4ef6\u8fdb\u884c\u6d41\u91cf\u5206\u53d1\u3002
                                                                • \u57fa\u4e8e\u6743\u91cd \uff1a\u8bbe\u5b9a\u6743\u91cd\u540e\uff0c\u5728\u521b\u5efa\u7684 Ingress \u6dfb\u52a0\u5982\u4e0b Annotation\uff1a nginx.ingress.kubernetes.io/canary-weight: \"10\"
                                                                • \u57fa\u4e8e Cookie \uff1a\u8bbe\u5b9a Cookie \u89c4\u5219\u540e\uff0c\u6d41\u91cf\u6839\u636e\u8bbe\u5b9a\u7684 Cookie \u6761\u4ef6\u8fdb\u884c\u6d41\u91cf\u5206\u53d1
                                                                • \u57fa\u4e8e Header \uff1a \u8bbe\u5b9a Header \u89c4\u5219\u540e\uff0c\u6d41\u91cf\u6839\u636e\u8bbe\u5b9a\u7684 Header \u6761\u4ef6\u8fdb\u884c\u6d41\u91cf\u5206\u53d1
                                                              • \u6807\u7b7e \uff1a\u9009\u586b\uff0c\u4e3a\u8def\u7531\u6dfb\u52a0\u6807\u7b7e
                                                              • \u6ce8\u89e3 \uff1a\u9009\u586b\uff0c\u4e3a\u8def\u7531\u6dfb\u52a0\u6ce8\u89e3
                                                              "},{"location":"admin/kpanda/network/create-ingress.html#https","title":"\u521b\u5efa HTTPS \u534f\u8bae\u8def\u7531","text":"

                                                              \u8f93\u5165\u5982\u4e0b\u53c2\u6570\uff1a

                                                              Note

                                                              \u6ce8\u610f\uff1a\u4e0e HTTP \u534f\u8bae \u8bbe\u7f6e\u8def\u7531\u89c4\u5219 \u4e0d\u540c\uff0c\u589e\u52a0\u5bc6\u94a5\u9009\u62e9\u8bc1\u4e66\uff0c\u5176\u4ed6\u57fa\u672c\u4e00\u81f4\u3002

                                                              • \u534f\u8bae \uff1a\u5fc5\u586b\u6307\u6388\u6743\u5165\u7ad9\u5230\u8fbe\u96c6\u7fa4\u670d\u52a1\u7684\u534f\u8bae\uff0c\u652f\u6301 HTTP \uff08\u4e0d\u9700\u8981\u8eab\u4efd\u8ba4\u8bc1\uff09\u6216 HTTPS\uff08\u9700\u9700\u8981\u914d\u7f6e\u8eab\u4efd\u8ba4\u8bc1\uff09 \u534f\u8bae\u3002\u8fd9\u91cc\u9009\u62e9 HTTPS \u534f\u8bae\u7684\u8def\u7531\u3002
                                                              • \u5bc6\u94a5 \uff1a\u5fc5\u586b\uff0cHttps TLS \u8bc1\u4e66\uff0c\u521b\u5efa\u79d8\u94a5\u3002
                                                              "},{"location":"admin/kpanda/network/create-ingress.html#_3","title":"\u5b8c\u6210\u8def\u7531\u521b\u5efa","text":"

                                                              \u914d\u7f6e\u5b8c\u6240\u6709\u53c2\u6570\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u81ea\u52a8\u8fd4\u56de\u8def\u7531\u5217\u8868\u3002\u5728\u5217\u8868\u53f3\u4fa7\uff0c\u70b9\u51fb \u2507 \uff0c\u53ef\u4ee5\u4fee\u6539\u6216\u5220\u9664\u6240\u9009\u8def\u7531\u3002

                                                              "},{"location":"admin/kpanda/network/create-services.html","title":"\u521b\u5efa\u670d\u52a1\uff08Service\uff09","text":"

                                                              \u5728 Kubernetes \u96c6\u7fa4\u4e2d\uff0c\u6bcf\u4e2a Pod \u90fd\u6709\u4e00\u4e2a\u5185\u90e8\u72ec\u7acb\u7684 IP \u5730\u5740\uff0c\u4f46\u662f\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u7684 Pod \u53ef\u80fd\u4f1a\u88ab\u968f\u65f6\u521b\u5efa\u548c\u5220\u9664\uff0c\u76f4\u63a5\u4f7f\u7528 Pod IP \u5730\u5740\u5e76\u4e0d\u80fd\u5bf9\u5916\u63d0\u4f9b\u670d\u52a1\u3002

                                                              \u8fd9\u5c31\u9700\u8981\u521b\u5efa\u670d\u52a1\uff0c\u901a\u8fc7\u670d\u52a1\u60a8\u4f1a\u83b7\u5f97\u4e00\u4e2a\u56fa\u5b9a\u7684 IP \u5730\u5740\uff0c\u4ece\u800c\u5b9e\u73b0\u5de5\u4f5c\u8d1f\u8f7d\u524d\u7aef\u548c\u540e\u7aef\u7684\u89e3\u8026\uff0c\u8ba9\u5916\u90e8\u7528\u6237\u80fd\u591f\u8bbf\u95ee\u670d\u52a1\u3002\u540c\u65f6\uff0c\u670d\u52a1\u8fd8\u63d0\u4f9b\u4e86\u8d1f\u8f7d\u5747\u8861\uff08LoadBalancer\uff09\u529f\u80fd\uff0c\u4f7f\u7528\u6237\u80fd\u4ece\u516c\u7f51\u8bbf\u95ee\u5230\u5de5\u4f5c\u8d1f\u8f7d\u3002

                                                              "},{"location":"admin/kpanda/network/create-services.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                              • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a NS Editor \u89d2\u8272 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                              • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                                                              "},{"location":"admin/kpanda/network/create-services.html#_2","title":"\u521b\u5efa\u670d\u52a1","text":"
                                                              1. \u4ee5 NS Editor \u7528\u6237\u6210\u529f\u767b\u5f55\u540e\uff0c\u70b9\u51fb\u5de6\u4e0a\u89d2\u7684 \u96c6\u7fa4\u5217\u8868 \u8fdb\u5165 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u3002\u5728\u96c6\u7fa4\u5217\u8868\u4e2d\uff0c\u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u670d\u52a1 \u8fdb\u5165\u670d\u52a1\u5217\u8868\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                                                                Tip

                                                                \u4e5f\u53ef\u4ee5\u901a\u8fc7 YAML \u521b\u5efa \u4e00\u4e2a\u670d\u52a1\u3002

                                                              3. \u6253\u5f00 \u521b\u5efa\u670d\u52a1 \u9875\u9762\uff0c\u9009\u62e9\u4e00\u79cd\u8bbf\u95ee\u7c7b\u578b\uff0c\u53c2\u8003\u4ee5\u4e0b\u51e0\u4e2a\u53c2\u6570\u8868\u8fdb\u884c\u914d\u7f6e\u3002

                                                              "},{"location":"admin/kpanda/network/create-services.html#clusterip","title":"\u521b\u5efa ClusterIP \u670d\u52a1","text":"

                                                              \u70b9\u9009 \u96c6\u7fa4\u5185\u8bbf\u95ee\uff08ClusterIP\uff09 \uff0c\u8fd9\u662f\u6307\u901a\u8fc7\u96c6\u7fa4\u7684\u5185\u90e8 IP \u66b4\u9732\u670d\u52a1\uff0c\u9009\u62e9\u6b64\u9879\u7684\u670d\u52a1\u53ea\u80fd\u5728\u96c6\u7fa4\u5185\u90e8\u8bbf\u95ee\u3002\u8fd9\u662f\u9ed8\u8ba4\u7684\u670d\u52a1\u7c7b\u578b\u3002\u53c2\u8003\u4e0b\u8868\u914d\u7f6e\u53c2\u6570\u3002

                                                              \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8bbf\u95ee\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6307\u5b9a Pod \u670d\u52a1\u53d1\u73b0\u7684\u65b9\u5f0f\uff0c\u8fd9\u91cc\u9009\u62e9\u96c6\u7fa4\u5185\u8bbf\u95ee\uff08ClusterIP\uff09\u3002 ClusterIP \u670d\u52a1\u540d\u79f0 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u65b0\u5efa\u670d\u52a1\u7684\u540d\u79f0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 Svc-01 \u547d\u540d\u7a7a\u95f4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 default \u6807\u7b7e\u9009\u62e9\u5668 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6dfb\u52a0\u6807\u7b7e\uff0cService \u6839\u636e\u6807\u7b7e\u9009\u62e9 Pod\uff0c\u586b\u5199\u540e\u70b9\u51fb\u201c\u6dfb\u52a0\u201d\u3002\u4e5f\u53ef\u4ee5\u5f15\u7528\u5df2\u6709\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6807\u7b7e\uff0c\u70b9\u51fb \u5f15\u7528\u8d1f\u8f7d\u6807\u7b7e \uff0c\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\u9009\u62e9\u8d1f\u8f7d\uff0c\u7cfb\u7edf\u4f1a\u9ed8\u8ba4\u5c06\u6240\u9009\u7684\u8d1f\u8f7d\u6807\u7b7e\u4f5c\u4e3a\u9009\u62e9\u5668\u3002 app:job01 \u7aef\u53e3\u914d\u7f6e \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u534f\u8bae\u7aef\u53e3\uff0c\u9700\u8981\u5148\u9009\u62e9\u7aef\u53e3\u534f\u8bae\u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301 TCP\u3001UDP \u4e24\u79cd\u4f20\u8f93\u534f\u8bae\u3002\u7aef\u53e3\u540d\u79f0\uff1a\u8f93\u5165\u81ea\u5b9a\u4e49\u7684\u7aef\u53e3\u7684\u540d\u79f0\u3002\u670d\u52a1\u7aef\u53e3\uff08port\uff09\uff1aPod \u5bf9\u5916\u63d0\u4f9b\u670d\u52a1\u7684\u8bbf\u95ee\u7aef\u53e3\u3002\u5bb9\u5668\u7aef\u53e3\uff08targetport\uff09\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u9645\u76d1\u542c\u7684\u5bb9\u5668\u7aef\u53e3\uff0c\u7528\u6765\u5bf9\u96c6\u7fa4\u5185\u66b4\u9732\u670d\u52a1\u3002 \u4f1a\u8bdd\u4fdd\u6301 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5f00\u542f\u540e\uff0c\u76f8\u540c\u5ba2\u6237\u7aef\u7684\u8bf7\u6c42\u5c06\u8f6c\u53d1\u81f3\u540c\u4e00 Pod \u5f00\u542f \u4f1a\u8bdd\u4fdd\u6301\u6700\u5927\u65f6\u957f \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5f00\u542f\u4f1a\u8bdd\u4fdd\u6301\u540e\uff0c\u4fdd\u6301\u7684\u6700\u5927\u65f6\u957f\uff0c\u9ed8\u8ba4\u4e3a 30 \u79d2 30 \u79d2 \u6ce8\u89e3 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u6ce8\u89e3"},{"location":"admin/kpanda/network/create-services.html#nodeport","title":"\u521b\u5efa NodePort \u670d\u52a1","text":"

                                                              \u70b9\u9009 \u8282\u70b9\u8bbf\u95ee\uff08NodePort\uff09 \uff0c\u8fd9\u662f\u6307\u901a\u8fc7\u6bcf\u4e2a\u8282\u70b9\u4e0a\u7684 IP \u548c\u9759\u6001\u7aef\u53e3\uff08 NodePort \uff09\u66b4\u9732\u670d\u52a1\u3002 NodePort \u670d\u52a1\u4f1a\u8def\u7531\u5230\u81ea\u52a8\u521b\u5efa\u7684 ClusterIP \u670d\u52a1\u3002\u901a\u8fc7\u8bf7\u6c42 <\u8282\u70b9 IP>:<\u8282\u70b9\u7aef\u53e3> \uff0c\u60a8\u53ef\u4ee5\u4ece\u96c6\u7fa4\u7684\u5916\u90e8\u8bbf\u95ee\u4e00\u4e2a NodePort \u670d\u52a1\u3002\u53c2\u8003\u4e0b\u8868\u914d\u7f6e\u53c2\u6570\u3002

                                                              \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8bbf\u95ee\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6307\u5b9a Pod \u670d\u52a1\u53d1\u73b0\u7684\u65b9\u5f0f\uff0c\u8fd9\u91cc\u9009\u62e9\u8282\u70b9\u8bbf\u95ee\uff08NodePort\uff09\u3002 NodePort \u670d\u52a1\u540d\u79f0 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u65b0\u5efa\u670d\u52a1\u7684\u540d\u79f0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 Svc-01 \u547d\u540d\u7a7a\u95f4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 default \u6807\u7b7e\u9009\u62e9\u5668 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6dfb\u52a0\u6807\u7b7e\uff0cService \u6839\u636e\u6807\u7b7e\u9009\u62e9 Pod\uff0c\u586b\u5199\u540e\u70b9\u51fb\u201c\u6dfb\u52a0\u201d\u3002\u4e5f\u53ef\u4ee5\u5f15\u7528\u5df2\u6709\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6807\u7b7e\uff0c\u70b9\u51fb \u5f15\u7528\u8d1f\u8f7d\u6807\u7b7e \uff0c\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\u9009\u62e9\u8d1f\u8f7d\uff0c\u7cfb\u7edf\u4f1a\u9ed8\u8ba4\u5c06\u6240\u9009\u7684\u8d1f\u8f7d\u6807\u7b7e\u4f5c\u4e3a\u9009\u62e9\u5668\u3002 \u7aef\u53e3\u914d\u7f6e \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u534f\u8bae\u7aef\u53e3\uff0c\u9700\u8981\u5148\u9009\u62e9\u7aef\u53e3\u534f\u8bae\u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301 TCP\u3001UDP \u4e24\u79cd\u4f20\u8f93\u534f\u8bae\u3002\u7aef\u53e3\u540d\u79f0\uff1a\u8f93\u5165\u81ea\u5b9a\u4e49\u7684\u7aef\u53e3\u7684\u540d\u79f0\u3002\u670d\u52a1\u7aef\u53e3\uff08port\uff09\uff1aPod \u5bf9\u5916\u63d0\u4f9b\u670d\u52a1\u7684\u8bbf\u95ee\u7aef\u53e3\u3002\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u4e3a\u4e86\u65b9\u4fbf\u8d77\u89c1\uff0c\u670d\u52a1\u7aef\u53e3\u88ab\u8bbe\u7f6e\u4e3a\u4e0e\u5bb9\u5668\u7aef\u53e3\u5b57\u6bb5\u76f8\u540c\u7684\u503c\u3002\u5bb9\u5668\u7aef\u53e3\uff08targetport\uff09\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u9645\u76d1\u542c\u7684\u5bb9\u5668\u7aef\u53e3\u3002\u8282\u70b9\u7aef\u53e3\uff08nodeport\uff09\uff1a\u8282\u70b9\u7684\u7aef\u53e3\uff0c\u63a5\u6536\u6765\u81ea ClusterIP \u4f20\u8f93\u7684\u6d41\u91cf\u3002\u7528\u6765\u505a\u5916\u90e8\u6d41\u91cf\u8bbf\u95ee\u7684\u5165\u53e3\u3002 \u4f1a\u8bdd\u4fdd\u6301 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5f00\u542f\u540e\uff0c\u76f8\u540c\u5ba2\u6237\u7aef\u7684\u8bf7\u6c42\u5c06\u8f6c\u53d1\u81f3\u540c\u4e00 Pod\u5f00\u542f\u540e Service \u7684 .spec.sessionAffinity \u4e3a ClientIP \uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\uff1aService \u7684\u4f1a\u8bdd\u4eb2\u548c\u6027 \u5f00\u542f \u4f1a\u8bdd\u4fdd\u6301\u6700\u5927\u65f6\u957f \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5f00\u542f\u4f1a\u8bdd\u4fdd\u6301\u540e\uff0c\u4fdd\u6301\u7684\u6700\u5927\u65f6\u957f\uff0c\u9ed8\u8ba4\u8d85\u65f6\u65f6\u957f\u4e3a 30 \u79d2.spec.sessionAffinityConfig.clientIP.timeoutSeconds \u9ed8\u8ba4\u8bbe\u7f6e\u4e3a 30 \u79d2 30 \u79d2 \u6ce8\u89e3 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u6ce8\u89e3"},{"location":"admin/kpanda/network/create-services.html#loadbalancer","title":"\u521b\u5efa LoadBalancer \u670d\u52a1","text":"

                                                              \u70b9\u9009 \u8d1f\u8f7d\u5747\u8861\uff08LoadBalancer\uff09 \uff0c\u8fd9\u662f\u6307\u4f7f\u7528\u4e91\u63d0\u4f9b\u5546\u7684\u8d1f\u8f7d\u5747\u8861\u5668\u5411\u5916\u90e8\u66b4\u9732\u670d\u52a1\u3002 \u5916\u90e8\u8d1f\u8f7d\u5747\u8861\u5668\u53ef\u4ee5\u5c06\u6d41\u91cf\u8def\u7531\u5230\u81ea\u52a8\u521b\u5efa\u7684 NodePort \u670d\u52a1\u548c ClusterIP \u670d\u52a1\u4e0a\u3002\u53c2\u8003\u4e0b\u8868\u914d\u7f6e\u53c2\u6570\u3002

                                                              \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8bbf\u95ee\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6307\u5b9a Pod \u670d\u52a1\u53d1\u73b0\u7684\u65b9\u5f0f\uff0c\u8fd9\u91cc\u9009\u62e9\u8d1f\u8f7d\u5747\u8861\uff08LoadBalancer\uff09\u3002 LoadBalancer \u670d\u52a1\u540d\u79f0 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u65b0\u5efa\u670d\u52a1\u7684\u540d\u79f0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 Svc-01 \u547d\u540d\u7a7a\u95f4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 default \u5916\u90e8\u6d41\u91cf\u7b56\u7565 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8bbe\u7f6e\u5916\u90e8\u6d41\u91cf\u7b56\u7565\u3002Cluster\uff1a\u6d41\u91cf\u53ef\u4ee5\u8f6c\u53d1\u5230\u96c6\u7fa4\u4e2d\u6240\u6709\u8282\u70b9\u4e0a\u7684 Pod\u3002Local\uff1a\u6d41\u91cf\u53ea\u53d1\u7ed9\u672c\u8282\u70b9\u4e0a\u7684 Pod\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 \u6807\u7b7e\u9009\u62e9\u5668 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6dfb\u52a0\u6807\u7b7e\uff0cService \u6839\u636e\u6807\u7b7e\u9009\u62e9 Pod\uff0c\u586b\u5199\u540e\u70b9\u51fb\u201c\u6dfb\u52a0\u201d\u3002\u4e5f\u53ef\u4ee5\u5f15\u7528\u5df2\u6709\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6807\u7b7e\uff0c\u70b9\u51fb \u5f15\u7528\u8d1f\u8f7d\u6807\u7b7e \uff0c\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\u9009\u62e9\u8d1f\u8f7d\uff0c\u7cfb\u7edf\u4f1a\u9ed8\u8ba4\u5c06\u6240\u9009\u7684\u8d1f\u8f7d\u6807\u7b7e\u4f5c\u4e3a\u9009\u62e9\u5668\u3002 \u8d1f\u8f7d\u5747\u8861\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u4f7f\u7528\u7684\u8d1f\u8f7d\u5747\u8861\u7c7b\u578b\uff0c\u5f53\u524d\u652f\u6301 MetalLB \u548c\u5176\u4ed6\u3002 MetalLB IP \u6c60 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u7684 \u8d1f\u8f7d\u5747\u8861\u7c7b\u578b\u4e3a MetalLB \u65f6\uff0cLoadBalancer Service\u9ed8\u8ba4\u4f1a\u4ece\u8fd9\u4e2a\u6c60\u4e2d\u5206\u914d IP \u5730\u5740, \u5e76\u4e14\u901a\u8fc7 APR \u5ba3\u544a\u8fd9\u4e2a\u6c60\u4e2d\u7684\u6240\u6709 IP \u5730\u5740 \u8d1f\u8f7d\u5747\u8861\u5730\u5740 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u30111.\u5982\u4f7f\u7528\u7684\u662f\u516c\u6709\u4e91 CloudProvider\uff0c\u6b64\u5904\u586b\u5199\u7684\u4e3a\u4e91\u5382\u5546\u63d0\u4f9b\u7684\u8d1f\u8f7d\u5747\u8861\u5730\u5740\uff1b2.\u5982\u679c\u4e0a\u8ff0\u8d1f\u8f7d\u5747\u8861\u7c7b\u578b\u9009\u62e9\u4e3a MetalLB \uff0c\u9ed8\u8ba4\u4ece\u4e0a\u8ff0 IP \u6c60\u4e2d\u83b7\u53d6 IP \uff0c\u5982\u679c\u4e0d\u586b\u5219\u81ea\u52a8\u83b7\u53d6\u3002 \u7aef\u53e3\u914d\u7f6e \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u534f\u8bae\u7aef\u53e3\uff0c\u9700\u8981\u5148\u9009\u62e9\u7aef\u53e3\u534f\u8bae\u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301 TCP\u3001UDP \u4e24\u79cd\u4f20\u8f93\u534f\u8bae\u3002\u7aef\u53e3\u540d\u79f0\uff1a\u8f93\u5165\u81ea\u5b9a\u4e49\u7684\u7aef\u53e3\u7684\u540d\u79f0\u3002\u670d\u52a1\u7aef\u53e3\uff08port\uff09\uff1aPod \u5bf9\u5916\u63d0\u4f9b\u670d\u52a1\u7684\u8bbf\u95ee\u7aef\u53e3\u3002\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u4e3a\u4e86\u65b9\u4fbf\u8d77\u89c1\uff0c\u670d\u52a1\u7aef\u53e3\u88ab\u8bbe\u7f6e\u4e3a\u4e0e\u5bb9\u5668\u7aef\u53e3\u5b57\u6bb5\u76f8\u540c\u7684\u503c\u3002\u5bb9\u5668\u7aef\u53e3\uff08targetport\uff09\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u9645\u76d1\u542c\u7684\u5bb9\u5668\u7aef\u53e3\u3002\u8282\u70b9\u7aef\u53e3\uff08nodeport\uff09\uff1a\u8282\u70b9\u7684\u7aef\u53e3\uff0c\u63a5\u6536\u6765\u81ea ClusterIP \u4f20\u8f93\u7684\u6d41\u91cf\u3002\u7528\u6765\u505a\u5916\u90e8\u6d41\u91cf\u8bbf\u95ee\u7684\u5165\u53e3\u3002 \u6ce8\u89e3 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u6ce8\u89e3"},{"location":"admin/kpanda/network/create-services.html#externalname","title":"\u521b\u5efa ExternalName \u670d\u52a1","text":"

                                                              \u70b9\u9009 \u5916\u90e8\u670d\u52a1\uff08ExternalName\uff09 \uff0c\u8fd9\u662f\u6307\u901a\u8fc7\u5c06\u670d\u52a1\u6620\u5c04\u5230\u5916\u90e8\u57df\u540d\u6765\u66b4\u9732\u670d\u52a1\u3002\u9009\u62e9\u6b64\u9879\u7684\u670d\u52a1\u4e0d\u4f1a\u521b\u5efa\u5178\u578b\u7684 ClusterIP \u6216 NodePort\uff0c\u800c\u662f\u901a\u8fc7 DNS \u540d\u79f0\u89e3\u6790\u5c06\u8bf7\u6c42\u91cd\u5b9a\u5411\u5230\u5916\u90e8\u7684\u670d\u52a1\u5730\u5740\u3002\u53c2\u8003\u4e0b\u8868\u914d\u7f6e\u53c2\u6570\u3002

                                                              \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8bbf\u95ee\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6307\u5b9a Pod \u670d\u52a1\u53d1\u73b0\u7684\u65b9\u5f0f\uff0c\u8fd9\u91cc\u9009\u62e9\u5916\u90e8\u670d\u52a1\uff08ExternalName\uff09\u3002 ExternalName \u670d\u52a1\u540d\u79f0 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u65b0\u5efa\u670d\u52a1\u7684\u540d\u79f0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 Svc-01 \u547d\u540d\u7a7a\u95f4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 default \u57df\u540d \u3010\u7c7b\u578b\u3011\u5fc5\u586b"},{"location":"admin/kpanda/network/create-services.html#_3","title":"\u5b8c\u6210\u670d\u52a1\u521b\u5efa","text":"

                                                              \u914d\u7f6e\u5b8c\u6240\u6709\u53c2\u6570\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u81ea\u52a8\u8fd4\u56de\u670d\u52a1\u5217\u8868\u3002\u5728\u5217\u8868\u53f3\u4fa7\uff0c\u70b9\u51fb \u2507 \uff0c\u53ef\u4ee5\u4fee\u6539\u6216\u5220\u9664\u6240\u9009\u670d\u52a1\u3002

                                                              "},{"location":"admin/kpanda/network/network-policy.html","title":"\u7f51\u7edc\u7b56\u7565","text":"

                                                              \u7f51\u7edc\u7b56\u7565\uff08NetworkPolicy\uff09\u53ef\u4ee5\u5728 IP \u5730\u5740\u6216\u7aef\u53e3\u5c42\u9762\uff08OSI \u7b2c 3 \u5c42\u6216\u7b2c 4 \u5c42\uff09\u63a7\u5236\u7f51\u7edc\u6d41\u91cf\u3002\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u76ee\u524d\u652f\u6301\u521b\u5efa\u57fa\u4e8e Pod \u6216\u547d\u540d\u7a7a\u95f4\u7684\u7f51\u7edc\u7b56\u7565\uff0c\u652f\u6301\u901a\u8fc7\u6807\u7b7e\u9009\u62e9\u5668\u6765\u8bbe\u5b9a\u54ea\u4e9b\u6d41\u91cf\u53ef\u4ee5\u8fdb\u5165\u6216\u79bb\u5f00\u5e26\u6709\u7279\u5b9a\u6807\u7b7e\u7684 Pod\u3002

                                                              \u6709\u5173\u7f51\u7edc\u7b56\u7565\u7684\u66f4\u591a\u8be6\u60c5\uff0c\u53ef\u53c2\u8003 Kubernetes \u5b98\u65b9\u6587\u6863\u7f51\u7edc\u7b56\u7565\u3002

                                                              "},{"location":"admin/kpanda/network/network-policy.html#_2","title":"\u521b\u5efa\u7f51\u7edc\u7b56\u7565","text":"

                                                              \u76ee\u524d\u652f\u6301\u901a\u8fc7 YAML \u548c\u8868\u5355\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u7f51\u7edc\u7b56\u7565\uff0c\u8fd9\u4e24\u79cd\u65b9\u5f0f\u5404\u6709\u4f18\u52a3\uff0c\u53ef\u4ee5\u6ee1\u8db3\u4e0d\u540c\u7528\u6237\u7684\u4f7f\u7528\u9700\u6c42\u3002

                                                              \u901a\u8fc7 YAML \u521b\u5efa\u6b65\u9aa4\u66f4\u5c11\u3001\u66f4\u9ad8\u6548\uff0c\u4f46\u95e8\u69db\u8981\u6c42\u8f83\u9ad8\uff0c\u9700\u8981\u719f\u6089\u7f51\u7edc\u7b56\u7565\u7684 YAML \u6587\u4ef6\u914d\u7f6e\u3002

                                                              \u901a\u8fc7\u8868\u5355\u521b\u5efa\u66f4\u76f4\u89c2\u66f4\u7b80\u5355\uff0c\u6839\u636e\u63d0\u793a\u586b\u5199\u5bf9\u5e94\u7684\u503c\u5373\u53ef\uff0c\u4f46\u6b65\u9aa4\u66f4\u52a0\u7e41\u7410\u3002

                                                              "},{"location":"admin/kpanda/network/network-policy.html#yaml","title":"YAML \u521b\u5efa","text":"
                                                              1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u7f51\u7edc\u7b56\u7565 -> YAML \u521b\u5efa \u3002

                                                              2. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                                              "},{"location":"admin/kpanda/network/network-policy.html#_3","title":"\u8868\u5355\u521b\u5efa","text":"
                                                              1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u7f51\u7edc\u7b56\u7565 -> \u521b\u5efa\u7b56\u7565 \u3002

                                                              2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u3002

                                                                \u540d\u79f0\u548c\u547d\u540d\u7a7a\u95f4\u5728\u521b\u5efa\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002

                                                              3. \u586b\u5199\u7b56\u7565\u914d\u7f6e\u3002

                                                                \u7b56\u7565\u914d\u7f6e\u5206\u4e3a\u5165\u6d41\u91cf\u7b56\u7565\u548c\u51fa\u6d41\u91cf\u7b56\u7565\u3002\u5982\u679c\u6e90 Pod \u60f3\u8981\u6210\u529f\u8fde\u63a5\u5230\u76ee\u6807 Pod\uff0c\u6e90 Pod \u7684\u51fa\u6d41\u91cf\u7b56\u7565\u548c\u76ee\u6807 Pod \u7684\u5165\u6d41\u91cf\u7b56\u7565\u90fd\u9700\u8981\u5141\u8bb8\u8fde\u63a5\u3002\u5982\u679c\u4efb\u4f55\u4e00\u65b9\u4e0d\u5141\u8bb8\u8fde\u63a5\uff0c\u90fd\u4f1a\u5bfc\u81f4\u8fde\u63a5\u5931\u8d25\u3002

                                                                • \u5165\u6d41\u91cf\u7b56\u7565\uff1a\u70b9\u51fb \u2795 \u5f00\u59cb\u914d\u7f6e\u7b56\u7565\uff0c\u652f\u6301\u914d\u7f6e\u591a\u6761\u7b56\u7565\u3002\u591a\u6761\u7f51\u7edc\u7b56\u7565\u7684\u6548\u679c\u76f8\u4e92\u53e0\u52a0\uff0c\u53ea\u6709\u540c\u65f6\u6ee1\u8db3\u6240\u6709\u7f51\u7edc\u7b56\u7565\uff0c\u624d\u80fd\u6210\u529f\u5efa\u7acb\u8fde\u63a5\u3002

                                                                • \u51fa\u6d41\u91cf\u7b56\u7565

                                                              "},{"location":"admin/kpanda/network/network-policy.html#_4","title":"\u67e5\u770b\u7f51\u7edc\u7b56\u7565","text":"
                                                              1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u7f51\u7edc\u7b56\u7565 \uff0c\u70b9\u51fb\u7f51\u7edc\u7b56\u7565\u7684\u540d\u79f0\u3002

                                                              2. \u67e5\u770b\u8be5\u7b56\u7565\u7684\u57fa\u672c\u914d\u7f6e\u3001\u5173\u8054\u5b9e\u4f8b\u4fe1\u606f\u3001\u5165\u6d41\u91cf\u7b56\u7565\u3001\u51fa\u6d41\u91cf\u7b56\u7565\u3002

                                                              Info

                                                              \u5728\u5173\u8054\u5b9e\u4f8b\u9875\u7b7e\u4e0b\uff0c\u652f\u6301\u67e5\u770b\u5b9e\u4f8b\u76d1\u63a7\u3001\u65e5\u5fd7\u3001\u5bb9\u5668\u5217\u8868\u3001YAML \u6587\u4ef6\u3001\u4e8b\u4ef6\u7b49\u3002

                                                              "},{"location":"admin/kpanda/network/network-policy.html#_5","title":"\u66f4\u65b0\u7f51\u7edc\u7b56\u7565","text":"

                                                              \u6709\u4e24\u79cd\u9014\u5f84\u53ef\u4ee5\u66f4\u65b0\u7f51\u7edc\u7b56\u7565\u3002\u652f\u6301\u901a\u8fc7\u8868\u5355\u6216 YAML \u6587\u4ef6\u66f4\u65b0\u7f51\u7edc\u7b56\u7565\u3002

                                                              • \u5728\u7f51\u7edc\u7b56\u7565\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u7b56\u7565\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                                                              • \u70b9\u51fb\u7f51\u7edc\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u7f51\u7edc\u7b56\u7565\u7684\u8be6\u60c5\u9875\u9762\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                                                              "},{"location":"admin/kpanda/network/network-policy.html#_6","title":"\u5220\u9664\u7f51\u7edc\u7b56\u7565","text":"

                                                              \u6709\u4e24\u79cd\u9014\u5f84\u53ef\u4ee5\u5220\u9664\u7f51\u7edc\u7b56\u7565\u3002\u652f\u6301\u901a\u8fc7\u8868\u5355\u6216 YAML \u6587\u4ef6\u66f4\u65b0\u7f51\u7edc\u7b56\u7565\u3002

                                                              • \u5728\u7f51\u7edc\u7b56\u7565\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u7b56\u7565\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u5220\u9664\u3002

                                                              • \u70b9\u51fb\u7f51\u7edc\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u7f51\u7edc\u7b56\u7565\u7684\u8be6\u60c5\u9875\u9762\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u5220\u9664\u3002

                                                              "},{"location":"admin/kpanda/nodes/add-node.html","title":"\u96c6\u7fa4\u8282\u70b9\u6269\u5bb9","text":"

                                                              \u968f\u7740\u4e1a\u52a1\u5e94\u7528\u4e0d\u65ad\u589e\u957f\uff0c\u96c6\u7fa4\u8d44\u6e90\u65e5\u8d8b\u7d27\u5f20\uff0c\u8fd9\u65f6\u53ef\u4ee5\u57fa\u4e8e kubean \u5bf9\u96c6\u7fa4\u8282\u70b9\u8fdb\u884c\u6269\u5bb9\u3002\u6269\u5bb9\u540e\uff0c\u5e94\u7528\u53ef\u4ee5\u8fd0\u884c\u5728\u65b0\u589e\u7684\u8282\u70b9\u4e0a\uff0c\u7f13\u89e3\u8d44\u6e90\u538b\u529b\u3002

                                                              \u53ea\u6709\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u521b\u5efa\u7684\u96c6\u7fa4\u624d\u652f\u6301\u8282\u70b9\u6269\u7f29\u5bb9\uff0c\u4ece\u5916\u90e8\u63a5\u5165\u7684\u96c6\u7fa4\u4e0d\u652f\u6301\u6b64\u64cd\u4f5c\u3002\u672c\u6587\u4e3b\u8981\u4ecb\u7ecd\u540c\u79cd\u67b6\u6784\u4e0b\u5de5\u4f5c\u96c6\u7fa4\u7684 \u5de5\u4f5c\u8282\u70b9 \u6269\u5bb9\u3002

                                                              1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                                \u82e5 \u96c6\u7fa4\u89d2\u8272 \u4e2d\u5e26\u6709 \u63a5\u5165\u96c6\u7fa4 \u7684\u6807\u7b7e\uff0c\u5219\u8bf4\u660e\u8be5\u96c6\u7fa4\u4e0d\u652f\u6301\u8282\u70b9\u6269\u7f29\u5bb9\u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u7136\u540e\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u70b9\u51fb \u63a5\u5165\u8282\u70b9 \u3002

                                                              3. \u8f93\u5165\u4e3b\u673a\u540d\u79f0\u548c\u8282\u70b9 IP \u5e76\u70b9\u51fb \u786e\u5b9a \u3002

                                                                \u70b9\u51fb \u2795 \u6dfb\u52a0\u5de5\u4f5c\u8282\u70b9 \u53ef\u4ee5\u7ee7\u7eed\u63a5\u5165\u66f4\u591a\u8282\u70b9\u3002

                                                              Note

                                                              \u63a5\u5165\u8282\u70b9\u5927\u7ea6\u9700\u8981 20 \u5206\u949f\uff0c\u8bf7\u60a8\u8010\u5fc3\u7b49\u5f85\u3002

                                                              "},{"location":"admin/kpanda/nodes/add-node.html#_2","title":"\u53c2\u8003\u6587\u6863","text":"
                                                              • \u5bf9\u5de5\u4f5c\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u6269\u5bb9
                                                              • \u4e3a\u5de5\u4f5c\u96c6\u7fa4\u6dfb\u52a0\u5f02\u6784\u8282\u70b9
                                                              • \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u5de5\u4f5c\u8282\u70b9\u6269\u5bb9
                                                              • \u66ff\u6362\u5de5\u4f5c\u96c6\u7fa4\u7684\u9996\u4e2a\u63a7\u5236\u8282\u70b9
                                                              "},{"location":"admin/kpanda/nodes/delete-node.html","title":"\u96c6\u7fa4\u8282\u70b9\u7f29\u5bb9","text":"

                                                              \u5f53\u4e1a\u52a1\u9ad8\u5cf0\u671f\u7ed3\u675f\u4e4b\u540e\uff0c\u4e3a\u4e86\u8282\u7701\u8d44\u6e90\u6210\u672c\uff0c\u53ef\u4ee5\u7f29\u5c0f\u96c6\u7fa4\u89c4\u6a21\uff0c\u5378\u8f7d\u5197\u4f59\u7684\u8282\u70b9\uff0c\u5373\u8282\u70b9\u7f29\u5bb9\u3002\u8282\u70b9\u5378\u8f7d\u540e\uff0c\u5e94\u7528\u65e0\u6cd5\u7ee7\u7eed\u8fd0\u884c\u5728\u8be5\u8282\u70b9\u4e0a\u3002

                                                              "},{"location":"admin/kpanda/nodes/delete-node.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5177\u6709 Cluster Admin \u89d2\u8272\u6388\u6743 \u3002
                                                              • \u53ea\u6709\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u521b\u5efa\u7684\u96c6\u7fa4\u624d\u652f\u6301\u8282\u70b9\u6269\u7f29\u5bb9\uff0c\u4ece\u5916\u90e8\u63a5\u5165\u7684\u96c6\u7fa4\u4e0d\u652f\u6301\u6b64\u64cd\u4f5c\u3002
                                                              • \u5378\u8f7d\u8282\u70b9\u4e4b\u524d\uff0c\u9700\u8981\u6682\u505c\u8c03\u5ea6\u8be5\u8282\u70b9\uff0c\u5e76\u4e14\u5c06\u8be5\u8282\u70b9\u4e0a\u7684\u5e94\u7528\u90fd\u9a71\u9010\u81f3\u5176\u4ed6\u8282\u70b9\u3002
                                                              • \u9a71\u9010\u65b9\u5f0f\uff1a\u767b\u5f55\u63a7\u5236\u5668\u8282\u70b9\uff0c\u901a\u8fc7 kubectl drain \u547d\u4ee4\u9a71\u9010\u8282\u70b9\u4e0a\u6240\u6709 Pod\u3002\u5b89\u5168\u9a71\u9010\u7684\u65b9\u5f0f\u53ef\u4ee5\u5141\u8bb8\u5bb9\u5668\u7ec4\u91cc\u9762\u7684\u5bb9\u5668\u4f18\u96c5\u5730\u4e2d\u6b62\u3002
                                                              "},{"location":"admin/kpanda/nodes/delete-node.html#_3","title":"\u6ce8\u610f\u4e8b\u9879","text":"
                                                              1. \u96c6\u7fa4\u8282\u70b9\u7f29\u5bb9\u65f6\uff0c\u53ea\u80fd\u9010\u4e2a\u8fdb\u884c\u5378\u8f7d\uff0c\u65e0\u6cd5\u6279\u91cf\u5378\u8f7d\u3002

                                                              2. \u5982\u9700\u5378\u8f7d\u96c6\u7fa4\u63a7\u5236\u5668\u8282\u70b9\uff0c\u9700\u8981\u786e\u4fdd\u6700\u7ec8\u63a7\u5236\u5668\u8282\u70b9\u6570\u4e3a \u5947\u6570\u3002

                                                              3. \u96c6\u7fa4\u8282\u70b9\u7f29\u5bb9\u65f6\u4e0d\u53ef\u4e0b\u7ebf \u7b2c\u4e00\u4e2a\u63a7\u5236\u5668 \u8282\u70b9\u3002\u5982\u679c\u5fc5\u987b\u6267\u884c\u6b64\u64cd\u4f5c\uff0c\u8bf7\u8054\u7cfb\u552e\u540e\u5de5\u7a0b\u5e08\u3002

                                                              "},{"location":"admin/kpanda/nodes/delete-node.html#_4","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                                \u82e5 \u96c6\u7fa4\u89d2\u8272 \u4e2d\u5e26\u6709 \u63a5\u5165\u96c6\u7fa4 \u7684\u6807\u7b7e\uff0c\u5219\u8bf4\u660e\u8be5\u96c6\u7fa4\u4e0d\u652f\u6301\u8282\u70b9\u6269\u7f29\u5bb9\u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u627e\u5230\u9700\u8981\u5378\u8f7d\u7684\u8282\u70b9\uff0c\u70b9\u51fb \u2507 \u9009\u62e9 \u79fb\u9664\u8282\u70b9 \u3002

                                                              3. \u8f93\u5165\u8282\u70b9\u540d\u79f0\uff0c\u5e76\u70b9\u51fb \u5220\u9664 \u8fdb\u884c\u786e\u8ba4\u3002

                                                              "},{"location":"admin/kpanda/nodes/labels-annotations.html","title":"\u6807\u7b7e\u4e0e\u6ce8\u89e3","text":"

                                                              \u6807\u7b7e\uff08Labels\uff09\u662f\u4e3a Pod\u3001\u8282\u70b9\u3001\u96c6\u7fa4\u7b49 Kubernetes \u5bf9\u8c61\u6dfb\u52a0\u7684\u6807\u8bc6\u6027\u952e\u503c\u5bf9\uff0c\u53ef\u7ed3\u5408\u6807\u7b7e\u9009\u62e9\u5668\u67e5\u627e\u5e76\u7b5b\u9009\u6ee1\u8db3\u67d0\u4e9b\u6761\u4ef6\u7684 Kubernetes \u5bf9\u8c61\u3002\u6bcf\u4e2a\u952e\u5bf9\u4e8e\u7ed9\u5b9a\u5bf9\u8c61\u5fc5\u987b\u662f\u552f\u4e00\u7684\u3002

                                                              \u6ce8\u89e3\uff08Annotations\uff09\u548c\u6807\u7b7e\u4e00\u6837\uff0c\u4e5f\u662f\u952e/\u503c\u5bf9\uff0c\u4f46\u4e0d\u5177\u5907\u6807\u8bc6\u6216\u7b5b\u9009\u529f\u80fd\u3002 \u4f7f\u7528\u6ce8\u89e3\u53ef\u4ee5\u4e3a\u8282\u70b9\u6dfb\u52a0\u4efb\u610f\u7684\u5143\u6570\u636e\u3002 \u6ce8\u89e3\u7684\u952e\u901a\u5e38\u4f7f\u7528\u7684\u683c\u5f0f\u4e3a \u524d\u7f00\uff08\u53ef\u9009\uff09/\u540d\u79f0\uff08\u5fc5\u586b\uff09 \uff0c\u4f8b\u5982 nfd.node.kubernetes.io/extended-resources \u3002 \u5982\u679c\u7701\u7565\u524d\u7f00\uff0c\u8868\u793a\u8be5\u6ce8\u89e3\u952e\u662f\u7528\u6237\u79c1\u6709\u7684\u3002

                                                              \u6709\u5173\u6807\u7b7e\u548c\u6ce8\u89e3\u7684\u66f4\u591a\u4fe1\u606f\uff0c\u53ef\u53c2\u8003 Kubernetes \u7684\u5b98\u65b9\u6587\u6863\u6807\u7b7e\u548c\u9009\u62e9\u7b97\u7b26\u6216\u6ce8\u89e3\u3002

                                                              \u6dfb\u52a0/\u5220\u9664\u6807\u7b7e\u4e0e\u6ce8\u89e3\u7684\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                              1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u5728\u8282\u70b9\u53f3\u4fa7\u70b9\u51fb \u2507 \u64cd\u4f5c\u56fe\u6807\uff0c\u70b9\u51fb \u4fee\u6539\u6807\u7b7e \u6216 \u4fee\u6539\u6ce8\u89e3 \u3002

                                                              3. \u70b9\u51fb \u2795 \u6dfb\u52a0 \u53ef\u4ee5\u6dfb\u52a0\u6807\u7b7e\u6216\u6ce8\u89e3\uff0c\u70b9\u51fb X \u53ef\u4ee5\u5220\u9664\u6807\u7b7e\u6216\u6ce8\u89e3\uff0c\u6700\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                              "},{"location":"admin/kpanda/nodes/node-authentication.html","title":"\u8282\u70b9\u8ba4\u8bc1","text":""},{"location":"admin/kpanda/nodes/node-authentication.html#ssh","title":"\u4f7f\u7528 SSH \u5bc6\u94a5\u8ba4\u8bc1\u8282\u70b9","text":"

                                                              \u5982\u679c\u60a8\u9009\u62e9\u4f7f\u7528 SSH \u5bc6\u94a5\u4f5c\u4e3a\u5f85\u521b\u5efa\u96c6\u7fa4\u7684\u8282\u70b9\u8ba4\u8bc1\u65b9\u5f0f\uff0c\u60a8\u9700\u8981\u6309\u7167\u5982\u4e0b\u8bf4\u660e\u914d\u7f6e\u516c\u79c1\u94a5\u3002

                                                              1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5728 \u5f85\u5efa\u96c6\u7fa4\u7684\u7ba1\u7406\u96c6\u7fa4\u4e2d\u7684\u4efb\u610f\u8282\u70b9 \u4e0a\u751f\u6210\u516c\u79c1\u94a5\u3002

                                                                cd /root/.ssh\nssh-keygen -t rsa\n
                                                              2. \u6267\u884c ls \u547d\u4ee4\u67e5\u770b\u7ba1\u7406\u96c6\u7fa4\u4e0a\u7684\u5bc6\u94a5\u662f\u5426\u521b\u5efa\u6210\u529f\uff0c\u6b63\u786e\u53cd\u9988\u5982\u4e0b\uff1a

                                                                ls\nid_rsa  id_rsa.pub  known_hosts\n

                                                                \u5176\u4e2d\u540d\u4e3a id_rsa \u7684\u6587\u4ef6\u662f\u79c1\u94a5\uff0c\u540d\u4e3a id_rsa.pub \u7684\u6587\u4ef6\u662f\u516c\u94a5\u3002

                                                              3. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5206\u522b\u5c06\u516c\u94a5\u6587\u4ef6 id_rsa.pub \u52a0\u8f7d\u5230\u5f85\u521b\u5efa\u96c6\u7fa4\u7684\u6240\u6709\u8282\u70b9\u4e0a\u3002

                                                                ssh-copy-id -i /root/.ssh/id_rsa.pub root@10.0.0.0\n

                                                                \u5c06\u4e0a\u9762\u547d\u4ee4\u4e2d\u7684 root@10.0.0.0 \u7528\u6237\u8d26\u53f7\u548c\u8282\u70b9 IP \u66ff\u6362\u4e3a\u5f85\u521b\u5efa\u96c6\u7fa4\u7684\u8282\u70b9\u7528\u6237\u540d\u548c IP\u3002** \u9700\u8981\u5728\u5f85\u521b\u5efa\u96c6\u7fa4\u7684\u6bcf\u53f0\u8282\u70b9\u90fd\u6267\u884c\u76f8\u540c\u7684\u64cd\u4f5c **\u3002

                                                              4. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u67e5\u770b\u6b65\u9aa4 1 \u6240\u521b\u5efa\u7684\u79c1\u94a5\u6587\u4ef6 id_rsa \u3002

                                                                cat /root/.ssh/id_rsa\n

                                                                \u8f93\u51fa\u5982\u4e0b\u5185\u5bb9\uff1a

                                                                -----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEA3UvyKINzY5BFuemQ+uJ6q+GqgfvnWwNC8HzZhpcMSjJy26MM\nUtBEBJxy8fMi57XcjYxPibXW/wnd+32ICCycqCwByUmuXeCC1cjlCQDqjcAvXae7\nY54IXGF7wm2IsMNwf0kjFEXjuS48FLDA0mGRaN3BG+Up5geXcHckg3K5LD8kXFFx\ndEmSIjdyw55NaUitmEdHzN7cIdfi6Z56jcV8dcFBgWKUx+ebiyPmZBkXToz6GnMF\nrswzzZCl+G6Jb2xTGy7g7ozb4BoZd1IpSD5EhDanRrESVE0C5YuJ5zUAC0CvVd1l\nv67AK8Ko6MXToHp01/bcsvlM6cqgwUFXZKVeOwIDAQABAoIBAQCO36GQlo3BEjxy\nM2HvGJmqrx+unDxafliRe4nVY2AD515Qf4xNSzke4QM1QoyenMOwf446krQkJPK0\nk+9nl6Xszby5gGCbK4BNFk8I6RaGPjZWeRx6zGUJf8avWJiPxx6yjz2esSC9RiR0\nF0nmiiefVMyAfgv2/5++dK2WUFNNRKLgSRRpP5bRaD5wMzzxtSSXrUon6217HO8p\n3RoWsI51MbVzhdVgpHUNABcoa0rpr9svT6XLKZxY8mxpKFYjM0Wv2JIDABg3kBvh\nQbJ7kStCO3naZjKMU9UuSqVJs06cflGYw7Or8/tABR3LErNQKPjkhAQqt0DXw7Iw\n3tKdTAJBAoGBAP687U7JAOqQkcphek2E/A/sbO/d37ix7Z3vNOy065STrA+ZWMZn\npZ6Ui1B/oJpoZssnfvIoz9sn559X0j67TljFALFd2ZGS0Fqh9KVCqDvfk+Vst1dq\n+3r/yZdTOyswoccxkJiC/GDwZGK0amJWqvob39JCZhDAKIGLbGMmjdAHAoGBAN5k\nm1WGnni1nZ+3dryIwgB6z1hWcnLTamzSET6KhSuo946ET0IRG9xtlheCx6dqICbr\nVk1Y4NtRZjK/p/YGx59rDWf7E3I8ZMgR7mjieOcUZ4lUlA4l7ZIlW/2WZHW+nUXO\nTi20fqJ8qSp4BUvOvuth1pz2GLUHe2/Fxjf7HIstAoGBAPHpPr9r+TfIlPsJeRj2\n6lzA3G8qWFRQfGRYjv0fjv0pA+RIb1rzgP/I90g5+63G6Z+R4WdcxI/OJJNY1iuG\nuw9n/pFxm7U4JC990BPE6nj5iLz+clpNGYckNDBF9VG9vFSrSDLdaYkxoVNvG/xJ\na9Na90H4lm7f3VewrPy310KvAoGAZr+mwNoEh5Kpc6xo8Gxi7aPP/mlaUVD6X7Ki\ngvmu02AqmC7rC4QqEiqTaONkaSXwGusqIWxJ3yp5hELmUBYLzszAEeV/s4zRp1oZ\ng133LBRSTbHFAdBmNdqK6Nu+KGRb92980UMOKvZbliKDl+W6cbfvVu+gtKrzTc3b\naevb4TUCgYEAnJAxyVYDP1nJf7bjBSHXQu1E/DMwbtrqw7dylRJ8cAzI7IxfSCez\n7BYWq41PqVd9/zrb3Pbh2phiVzKe783igAIMqummcjo/kZyCwFsYBzK77max1jF5\naPQsLbRS2aDz8kIH6jHPZ/R+15EROmdtLmA7vIJZGerWWQR0dUU+XXA=\n

                                                                \u5c06\u79c1\u94a5\u5185\u5bb9\u590d\u5236\u540e\u586b\u81f3\u754c\u9762\u5bc6\u94a5\u8f93\u5165\u6846\u3002

                                                              "},{"location":"admin/kpanda/nodes/node-check.html","title":"\u521b\u5efa\u96c6\u7fa4\u8282\u70b9\u53ef\u7528\u6027\u68c0\u67e5","text":"

                                                              \u5728\u521b\u5efa\u96c6\u7fa4\u6216\u4e3a\u5df2\u6709\u96c6\u7fa4\u6dfb\u52a0\u8282\u70b9\u65f6\uff0c\u8bf7\u53c2\u9605\u4e0b\u8868\uff0c\u68c0\u67e5\u8282\u70b9\u914d\u7f6e\uff0c\u4ee5\u907f\u514d\u56e0\u8282\u70b9\u914d\u7f6e\u9519\u8bef\u5bfc\u81f4\u96c6\u7fa4\u521b\u5efa\u6216\u6269\u5bb9\u5931\u8d25\u3002

                                                              \u68c0\u67e5\u9879 \u63cf\u8ff0 \u64cd\u4f5c\u7cfb\u7edf \u53c2\u8003\u652f\u6301\u7684\u67b6\u6784\u53ca\u64cd\u4f5c\u7cfb\u7edf SELinux \u5173\u95ed \u9632\u706b\u5899 \u5173\u95ed \u67b6\u6784\u4e00\u81f4\u6027 \u8282\u70b9\u95f4 CPU \u67b6\u6784\u4e00\u81f4\uff08\u5982\u5747\u4e3a ARM \u6216 x86\uff09 \u4e3b\u673a\u65f6\u95f4 \u6240\u6709\u4e3b\u673a\u95f4\u540c\u6b65\u8bef\u5dee\u5c0f\u4e8e 10 \u79d2\u3002 \u7f51\u7edc\u8054\u901a\u6027 \u8282\u70b9\u53ca\u5176 SSH \u7aef\u53e3\u80fd\u591f\u6b63\u5e38\u88ab\u5e73\u53f0\u8bbf\u95ee\u3002 CPU \u53ef\u7528 CPU \u8d44\u6e90\u5927\u4e8e 4 Core \u5185\u5b58 \u53ef\u7528\u5185\u5b58\u8d44\u6e90\u5927\u4e8e 8 GB"},{"location":"admin/kpanda/nodes/node-check.html#_2","title":"\u652f\u6301\u7684\u67b6\u6784\u53ca\u64cd\u4f5c\u7cfb\u7edf","text":"\u67b6\u6784 \u64cd\u4f5c\u7cfb\u7edf \u5907\u6ce8 ARM Kylin Linux Advanced Server release V10 (Sword) SP2 \u63a8\u8350 ARM UOS Linux ARM openEuler x86 CentOS 7.x \u63a8\u8350 x86 Redhat 7.x \u63a8\u8350 x86 Redhat 8.x \u63a8\u8350 x86 Flatcar Container Linux by Kinvolk x86 Debian Bullseye, Buster, Jessie, Stretch x86 Ubuntu 16.04, 18.04, 20.04, 22.04 x86 Fedora 35, 36 x86 Fedora CoreOS x86 openSUSE Leap 15.x/Tumbleweed x86 Oracle Linux 7, 8, 9 x86 Alma Linux 8, 9 x86 Rocky Linux 8, 9 x86 Amazon Linux 2 x86 Kylin Linux Advanced Server release V10 (Sword) - SP2 \u6d77\u5149 x86 UOS Linux x86 openEuler"},{"location":"admin/kpanda/nodes/node-details.html","title":"\u8282\u70b9\u8be6\u60c5","text":"

                                                              \u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4\u4e4b\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u96c6\u7fa4\u4e2d\u5404\u4e2a\u8282\u70b9\u7684\u4fe1\u606f\uff0c\u5305\u62ec\u8282\u70b9\u72b6\u6001\u3001\u6807\u7b7e\u3001\u8d44\u6e90\u7528\u91cf\u3001Pod\u3001\u76d1\u63a7\u4fe1\u606f\u7b49\u3002

                                                              1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u53ef\u4ee5\u67e5\u770b\u8282\u70b9\u72b6\u6001\u3001\u89d2\u8272\u3001\u6807\u7b7e\u3001CPU/\u5185\u5b58\u4f7f\u7528\u60c5\u51b5\u3001IP \u5730\u5740\u3001\u521b\u5efa\u65f6\u95f4\u3002

                                                              3. \u70b9\u51fb\u8282\u70b9\u540d\u79f0\uff0c\u53ef\u4ee5\u8fdb\u5165\u8282\u70b9\u8be6\u60c5\u9875\u9762\u67e5\u770b\u66f4\u591a\u4fe1\u606f\uff0c\u5305\u62ec\u6982\u89c8\u4fe1\u606f\u3001\u5bb9\u5668\u7ec4\u4fe1\u606f\u3001\u6807\u7b7e\u6ce8\u89e3\u4fe1\u606f\u3001\u4e8b\u4ef6\u5217\u8868\u3001\u72b6\u6001\u7b49\u3002

                                                                \u6b64\u5916\uff0c\u8fd8\u53ef\u4ee5\u67e5\u770b\u8282\u70b9\u7684 YAML \u6587\u4ef6\u3001\u76d1\u63a7\u4fe1\u606f\u3001\u6807\u7b7e\u548c\u6ce8\u89e3\u7b49\u3002

                                                              "},{"location":"admin/kpanda/nodes/schedule.html","title":"\u8282\u70b9\u8c03\u5ea6","text":"

                                                              \u652f\u6301\u5c06\u8282\u70b9\u6682\u505c\u8c03\u5ea6\u6216\u6062\u590d\u8c03\u5ea6\u3002\u6682\u505c\u8c03\u5ea6\u6307\uff0c\u505c\u6b62\u5c06 Pod \u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002\u6062\u590d\u8c03\u5ea6\u6307\uff0c\u53ef\u4ee5\u5c06 Pod \u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002

                                                              1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u5728\u8282\u70b9\u53f3\u4fa7\u70b9\u51fb \u2507 \u64cd\u4f5c\u56fe\u6807\uff0c\u70b9\u51fb \u6682\u505c\u8c03\u5ea6 \u6309\u94ae\u5373\u53ef\u6682\u505c\u8c03\u5ea6\u8be5\u8282\u70b9\u3002

                                                              3. \u5728\u8282\u70b9\u53f3\u4fa7\u70b9\u51fb \u2507 \u64cd\u4f5c\u56fe\u6807\uff0c\u70b9\u51fb \u6062\u590d\u8c03\u5ea6 \u6309\u94ae\u5373\u53ef\u6062\u590d\u8c03\u5ea6\u8be5\u8282\u70b9\u3002

                                                              \u8282\u70b9\u8c03\u5ea6\u72b6\u6001\u53ef\u80fd\u56e0\u7f51\u7edc\u60c5\u51b5\u6709\u6240\u5ef6\u8fdf\uff0c\u70b9\u51fb\u641c\u7d22\u6846\u53f3\u4fa7\u7684\u5237\u65b0\u56fe\u6807\u53ef\u4ee5\u5237\u65b0\u8282\u70b9\u8c03\u5ea6\u72b6\u6001\u3002

                                                              "},{"location":"admin/kpanda/nodes/taints.html","title":"\u8282\u70b9\u6c61\u70b9\u7ba1\u7406","text":"

                                                              \u6c61\u70b9 (Taint) \u80fd\u591f\u4f7f\u8282\u70b9\u6392\u65a5\u67d0\u4e00\u7c7b Pod\uff0c\u907f\u514d Pod \u88ab\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u4e0a\u3002 \u6bcf\u4e2a\u8282\u70b9\u4e0a\u53ef\u4ee5\u5e94\u7528\u4e00\u4e2a\u6216\u591a\u4e2a\u6c61\u70b9\uff0c\u4e0d\u80fd\u5bb9\u5fcd\u8fd9\u4e9b\u6c61\u70b9\u7684 Pod \u5219\u4e0d\u4f1a\u88ab\u8c03\u5ea6\u8be5\u8282\u70b9\u4e0a\u3002

                                                              "},{"location":"admin/kpanda/nodes/taints.html#_2","title":"\u6ce8\u610f\u4e8b\u9879","text":"
                                                              1. \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u5907 NS Editor \u89d2\u8272\u6388\u6743\u6216\u5176\u4ed6\u66f4\u9ad8\u6743\u9650\u3002
                                                              2. \u4e3a\u8282\u70b9\u6dfb\u52a0\u6c61\u70b9\u4e4b\u540e\uff0c\u53ea\u6709\u80fd\u5bb9\u5fcd\u8be5\u6c61\u70b9\u7684 Pod \u624d\u80fd\u88ab\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002
                                                              "},{"location":"admin/kpanda/nodes/taints.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u627e\u5230\u76ee\u6807\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u6982\u89c8 \u9875\u9762\u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u627e\u5230\u9700\u8981\u4fee\u6539\u6c61\u70b9\u7684\u8282\u70b9\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u56fe\u6807\u5e76\u70b9\u51fb \u4fee\u6539\u6c61\u70b9 \u6309\u94ae\u3002

                                                              3. \u5728\u5f39\u6846\u5185\u8f93\u5165\u6c61\u70b9\u7684\u952e\u503c\u4fe1\u606f\uff0c\u9009\u62e9\u6c61\u70b9\u6548\u679c\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                                \u70b9\u51fb \u2795 \u6dfb\u52a0 \u53ef\u4ee5\u4e3a\u8282\u70b9\u6dfb\u52a0\u591a\u4e2a\u6c61\u70b9\uff0c\u70b9\u51fb\u6c61\u70b9\u6548\u679c\u53f3\u4fa7\u7684 X \u53ef\u4ee5\u5220\u9664\u6c61\u70b9\u3002

                                                                \u76ee\u524d\u652f\u6301\u4e09\u79cd\u6c61\u70b9\u6548\u679c\uff1a

                                                                • NoSchedule\uff1a\u65b0\u7684 Pod \u4e0d\u4f1a\u88ab\u8c03\u5ea6\u5230\u5e26\u6709\u6b64\u6c61\u70b9\u7684\u8282\u70b9\u4e0a\uff0c\u9664\u975e\u65b0\u7684 Pod \u5177\u6709\u76f8\u5339\u914d\u7684\u5bb9\u5fcd\u5ea6\u3002\u5f53\u524d\u6b63\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u4e0d\u4f1a \u88ab\u9a71\u9010\u3002
                                                                • NoExecute\uff1a\u8fd9\u4f1a\u5f71\u54cd\u5df2\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod\uff1a
                                                                  • \u5982\u679c Pod \u4e0d\u80fd\u5bb9\u5fcd\u6b64\u6c61\u70b9\uff0c\u4f1a\u9a6c\u4e0a\u88ab\u9a71\u9010\u3002
                                                                  • \u5982\u679c Pod \u80fd\u591f\u5bb9\u5fcd\u6b64\u6c61\u70b9\uff0c\u4f46\u662f\u5728\u5bb9\u5fcd\u5ea6\u5b9a\u4e49\u4e2d\u6ca1\u6709\u6307\u5b9a tolerationSeconds\uff0c\u5219 Pod \u8fd8\u4f1a\u4e00\u76f4\u5728\u8fd9\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u3002
                                                                  • \u5982\u679c Pod \u80fd\u591f\u5bb9\u5fcd\u6b64\u6c61\u70b9\u800c\u4e14\u6307\u5b9a\u4e86 tolerationSeconds\uff0c\u5219 Pod \u8fd8\u80fd\u5728\u8fd9\u4e2a\u8282\u70b9\u4e0a\u7ee7\u7eed\u8fd0\u884c\u6307\u5b9a\u7684\u65f6\u957f\u3002\u8fd9\u6bb5\u65f6\u95f4\u8fc7\u53bb\u540e\uff0c\u518d\u4ece\u8282\u70b9\u4e0a\u9a71\u9664\u8fd9\u4e9b Pod\u3002
                                                                • PreferNoSchedule\uff1a\u8fd9\u662f\u201c\u8f6f\u6027\u201d\u7684 NoSchedule\u3002\u63a7\u5236\u5e73\u9762\u5c06**\u5c1d\u8bd5**\u907f\u514d\u5c06\u4e0d\u5bb9\u5fcd\u6b64\u6c61\u70b9\u7684 Pod \u8c03\u5ea6\u5230\u8282\u70b9\u4e0a\uff0c\u4f46\u4e0d\u80fd\u4fdd\u8bc1\u5b8c\u5168\u907f\u514d\u3002\u6240\u4ee5\u8981\u5c3d\u91cf\u907f\u514d\u4f7f\u7528\u6b64\u6c61\u70b9\u3002

                                                              \u6709\u5173\u6c61\u70b9\u7684\u66f4\u591a\u8be6\u60c5\uff0c\u8bf7\u53c2\u9605 Kubernetes \u5b98\u65b9\u6587\u6863\uff1a\u6c61\u70b9\u548c\u5bb9\u5fcd\u5ea6\u3002

                                                              "},{"location":"admin/kpanda/olm/import-miniooperator.html","title":"\u5bfc\u5165\u79bb\u7ebf MinIo Operator","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5728\u79bb\u7ebf\u73af\u5883\u4e0b\u5982\u4f55\u5bfc\u5165 MinIo Operator\u3002

                                                              "},{"location":"admin/kpanda/olm/import-miniooperator.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u5f53\u524d\u96c6\u7fa4\u5df2\u63a5\u5165\u5bb9\u5668\u7ba1\u7406\u4e14\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5df2\u7ecf\u5b89\u88c5 kolm \u7ec4\u4ef6\uff08helm \u6a21\u677f\u641c\u7d22 kolm\uff09
                                                              • \u5f53\u524d\u96c6\u7fa4\u5df2\u7ecf\u5b89\u88c5 olm \u7ec4\u4ef6\u4e14\u7248\u672c >= 0.2.4 (helm \u6a21\u677f\u641c\u7d22 olm)
                                                              • \u652f\u6301\u6267\u884c Docker \u547d\u4ee4
                                                              • \u51c6\u5907\u4e00\u4e2a\u955c\u50cf\u4ed3\u5e93
                                                              "},{"location":"admin/kpanda/olm/import-miniooperator.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u5728\u6267\u884c\u73af\u5883\u4e2d\u8bbe\u7f6e\u73af\u5883\u53d8\u91cf\u5e76\u5728\u540e\u7eed\u6b65\u9aa4\u4f7f\u7528\uff0c\u6267\u884c\u547d\u4ee4\uff1a

                                                                export OPM_IMG=10.5.14.200/quay.m.daocloud.io/operator-framework/opm:v1.29.0 \nexport BUNDLE_IMG=10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3 \n

                                                                \u5982\u4f55\u83b7\u53d6\u4e0a\u8ff0\u955c\u50cf\u5730\u5740\uff1a

                                                                \u524d\u5f80 \u5bb9\u5668\u7ba1\u7406 -> \u9009\u62e9\u5f53\u524d\u96c6\u7fa4 -> helm \u5e94\u7528 -> \u67e5\u770b olm \u7ec4\u4ef6 -> \u63d2\u4ef6\u8bbe\u7f6e \uff0c\u627e\u5230\u540e\u7eed\u6b65\u9aa4\u6240\u9700 opm\uff0cminio\uff0cminio bundle\uff0cminio operator \u7684\u955c\u50cf\u3002

                                                                \u4ee5\u4e0a\u8bc9\u622a\u56fe\u4e3a\u4f8b\uff0c\u5219\u56db\u4e2a\u955c\u50cf\u5730\u5740\u5982\u4e0b\n\n# opm \u955c\u50cf \n10.5.14.200/quay.m.daocloud.io/operator-framework/opm:v1.29.0\n\n# minio \u955c\u50cf\n10.5.14.200/quay.m.daocloud.io/minio/minio:RELEASE.2023-03-24T21-41-23Z\n\n# minio bundle \u955c\u50cf\n10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3\n\n# minio operator \u955c\u50cf \n10.5.14.200/quay.m.daocloud.io/minio/operator:v5.0.3\n
                                                              2. \u6267\u884c opm \u547d\u4ee4\u83b7\u53d6\u79bb\u7ebf bundle \u955c\u50cf\u5305\u542b\u7684 operator\u3002

                                                                # \u521b\u5efa operator \u5b58\u653e\u76ee\u5f55\n$ mkdir minio-operator && cd minio-operator \n\n# \u83b7\u53d6 operator yaml \n$ docker run --user root  -v $PWD/minio-operator:/minio-operator ${OPM_IMG} alpha bundle unpack --skip-tls-verify -v -d ${BUNDLE_IMG} -o ./minio-operator\n\n# \u9884\u671f\u7ed3\u679c\n.\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n3 directories, 9 files\n
                                                              3. \u66ff\u6362\u00a0 minio-operator/manifests/minio-operator.clusterserviceversion.yaml\u00a0 \u6587\u4ef6\u4e2d\u7684\u6240\u6709\u955c\u50cf\u5730\u5740\u4e3a\u79bb\u7ebf\u955c\u50cf\u4ed3\u5e93\u5730\u5740\u955c\u50cf\u3002

                                                                \u66ff\u6362\u524d\uff1a

                                                                \u66ff\u6362\u540e\uff1a

                                                              4. \u751f\u6210\u6784\u5efa bundle \u955c\u50cf\u7684 Dockerfile

                                                                $ docker run --user root  -v $PWD:/minio-operator -w /minio-operator ${OPM_IMG} alpha bundle generate --channels stable,beta -d /minio-operator/minio-operator/manifests -e stable -p minio-operator \u00a0\n\n# \u9884\u671f\u7ed3\u679c\n.\n\u251c\u2500\u2500 bundle.Dockerfile\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n3 directories, 10 files\n
                                                              5. \u6267\u884c\u6784\u5efa\u547d\u4ee4\uff0c\u6784\u5efa bundle \u955c\u50cf\u4e14\u63a8\u9001\u5230\u79bb\u7ebf registry\u3002

                                                                # \u8bbe\u7f6e\u65b0\u7684 bundle image \nexport OFFLINE_BUNDLE_IMG=10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3-offline \n\n$ docker build . -f bundle.Dockerfile -t ${OFFLINE_BUNDLE_IMG} \u00a0\n\n$ docker push ${OFFLINE_BUNDLE_IMG}\n
                                                              6. \u751f\u6210\u6784\u5efa catalog \u955c\u50cf\u7684 Dockerfile\u3002

                                                                $ docker run --user root  -v $PWD:/minio-operator  -w /minio-operator ${OPM_IMG} index add  --bundles ${OFFLINE_BUNDLE_IMG} --generate --binary-image ${OPM_IMG} --skip-tls-verify\n\n# \u9884\u671f\u7ed3\u679c\n.\n\u251c\u2500\u2500 bundle.Dockerfile\n\u251c\u2500\u2500 database\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 index.db\n\u251c\u2500\u2500 index.Dockerfile\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n4 directories, 12 files\n
                                                              7. \u6784\u5efa catalog \u955c\u50cf

                                                                # \u8bbe\u7f6e\u65b0\u7684 catalog image  \nexport OFFLINE_CATALOG_IMG=10.5.14.200/release.daocloud.io/operator-framework/system-operator-index:v0.1.0-offline\n\n$ docker build . -f index.Dockerfile -t ${OFFLINE_CATALOG_IMG}  \n\n$ docker push ${OFFLINE_CATALOG_IMG}\n
                                                              8. \u524d\u5f80\u5bb9\u5668\u7ba1\u7406\uff0c\u66f4\u65b0 helm \u5e94\u7528 olm \u7684\u5185\u7f6e catsrc \u955c\u50cf\uff08\u586b\u5199\u6784\u5efa catalog \u955c\u50cf\u6307\u5b9a\u7684 ${catalog-image} \u5373\u53ef\uff09

                                                              9. \u66f4\u65b0\u6210\u529f\u540e\uff0cOperator Hub \u4e2d\u4f1a\u51fa\u73b0 minio-operator \u7ec4\u4ef6

                                                              "},{"location":"admin/kpanda/permissions/cluster-ns-auth.html","title":"\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u6388\u6743","text":"

                                                              \u5bb9\u5668\u7ba1\u7406\u57fa\u4e8e\u5168\u5c40\u6743\u9650\u7ba1\u7406\u53ca\u5168\u5c40\u7528\u6237/\u7528\u6237\u7ec4\u7ba1\u7406\u5b9e\u73b0\u6388\u6743\uff0c\u5982\u9700\u4e3a\u7528\u6237\u6388\u4e88\u5bb9\u5668\u7ba1\u7406\u7684\u6700\u9ad8\u6743\u9650\uff08\u53ef\u4ee5\u521b\u5efa\u3001\u7ba1\u7406\u3001\u5220\u9664\u6240\u6709\u96c6\u7fa4\uff09\uff0c\u8bf7\u53c2\u89c1\u4ec0\u4e48\u662f\u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\u3002

                                                              "},{"location":"admin/kpanda/permissions/cluster-ns-auth.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u7ed9\u7528\u6237/\u7528\u6237\u7ec4\u6388\u6743\u4e4b\u524d\uff0c\u8bf7\u5b8c\u6210\u5982\u4e0b\u51c6\u5907\uff1a

                                                              • \u5df2\u5728\u5168\u5c40\u7ba1\u7406\u4e2d\u521b\u5efa\u4e86\u5f85\u6388\u6743\u7684\u7528\u6237/\u7528\u6237\u7ec4\uff0c\u8bf7\u53c2\u8003\u7528\u6237\u3002

                                                              • \u4ec5 Kpanda Owner \u53ca\u5f53\u524d\u96c6\u7fa4\u7684 Cluster Admin \u5177\u5907\u96c6\u7fa4\u6388\u6743\u80fd\u529b\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u6743\u9650\u8bf4\u660e\u3002

                                                              • \u4ec5 Kpanda Owner\u3001\u5f53\u524d\u96c6\u7fa4\u7684 Cluster Admin\uff0c\u5f53\u524d\u547d\u540d\u7a7a\u95f4\u7684 NS Admin \u5177\u5907\u547d\u540d\u7a7a\u95f4\u6388\u6743\u80fd\u529b\u3002

                                                              "},{"location":"admin/kpanda/permissions/cluster-ns-auth.html#_3","title":"\u96c6\u7fa4\u6388\u6743","text":"
                                                              1. \u7528\u6237\u767b\u5f55\u5e73\u53f0\u540e\uff0c\u70b9\u51fb\u5de6\u4fa7\u83dc\u5355\u680f \u5bb9\u5668\u7ba1\u7406 \u4e0b\u7684 \u6743\u9650\u7ba1\u7406 \uff0c\u9ed8\u8ba4\u4f4d\u4e8e \u96c6\u7fa4\u6743\u9650 \u9875\u7b7e\u3002

                                                              2. \u70b9\u51fb \u6dfb\u52a0\u6388\u6743 \u6309\u94ae\u3002

                                                              3. \u5728 \u6dfb\u52a0\u96c6\u7fa4\u6743\u9650 \u9875\u9762\u4e2d\uff0c\u9009\u62e9\u76ee\u6807\u96c6\u7fa4\u3001\u5f85\u6388\u6743\u7684\u7528\u6237/\u7528\u6237\u7ec4\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                                \u76ee\u524d\u4ec5\u652f\u6301\u7684\u96c6\u7fa4\u89d2\u8272\u4e3a Cluster Admin \uff0c\u8be6\u60c5\u6743\u9650\u53ef\u53c2\u8003\u6743\u9650\u8bf4\u660e\u3002\u5982\u9700\u8981\u7ed9\u591a\u4e2a\u7528\u6237/\u7528\u6237\u7ec4\u540c\u65f6\u8fdb\u884c\u6388\u6743\uff0c \u53ef\u70b9\u51fb \u6dfb\u52a0\u7528\u6237\u6743\u9650 \u8fdb\u884c\u591a\u6b21\u6dfb\u52a0\u3002

                                                              4. \u8fd4\u56de\u96c6\u7fa4\u6743\u9650\u7ba1\u7406\u9875\u9762\uff0c\u5c4f\u5e55\u51fa\u73b0\u6d88\u606f\uff1a \u6dfb\u52a0\u96c6\u7fa4\u6743\u9650\u6210\u529f \u3002

                                                              "},{"location":"admin/kpanda/permissions/cluster-ns-auth.html#_4","title":"\u547d\u540d\u7a7a\u95f4\u6388\u6743","text":"
                                                              1. \u7528\u6237\u767b\u5f55\u5e73\u53f0\u540e\uff0c\u70b9\u51fb\u5de6\u4fa7\u83dc\u5355\u680f \u5bb9\u5668\u7ba1\u7406 \u4e0b\u7684 \u6743\u9650\u7ba1\u7406 \uff0c\u70b9\u51fb \u547d\u540d\u7a7a\u95f4\u6743\u9650 \u9875\u7b7e\u3002

                                                              2. \u70b9\u51fb \u6dfb\u52a0\u6388\u6743 \u6309\u94ae\u3002\u5728 \u6dfb\u52a0\u547d\u540d\u7a7a\u95f4\u6743\u9650 \u9875\u9762\u4e2d\uff0c\u9009\u62e9\u76ee\u6807\u96c6\u7fa4\u3001\u76ee\u6807\u547d\u540d\u7a7a\u95f4\uff0c\u4ee5\u53ca\u5f85\u6388\u6743\u7684\u7528\u6237/\u7528\u6237\u7ec4\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                                \u76ee\u524d\u652f\u6301\u7684\u547d\u540d\u7a7a\u95f4\u89d2\u8272\u4e3a NS Admin\u3001NS Editor\u3001NS Viewer\uff0c\u8be6\u60c5\u6743\u9650\u53ef\u53c2\u8003\u6743\u9650\u8bf4\u660e\u3002\u5982\u9700\u7ed9\u591a\u4e2a\u7528\u6237/\u7528\u6237\u7ec4\u540c\u65f6\u8fdb\u884c\u6388\u6743\uff0c\u53ef\u70b9\u51fb \u6dfb\u52a0\u7528\u6237\u6743\u9650 \u8fdb\u884c\u591a\u6b21\u6dfb\u52a0\u3002\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u6743\u9650\u6388\u6743\u3002

                                                              3. \u8fd4\u56de\u547d\u540d\u7a7a\u95f4\u6743\u9650\u7ba1\u7406\u9875\u9762\uff0c\u5c4f\u5e55\u51fa\u73b0\u6d88\u606f\uff1a \u6dfb\u52a0\u96c6\u7fa4\u6743\u9650\u6210\u529f \u3002

                                                                Tip

                                                                \u540e\u7eed\u5982\u9700\u5220\u9664\u6216\u7f16\u8f91\u6743\u9650\uff0c\u53ef\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u9009\u62e9 \u7f16\u8f91 \u6216 \u5220\u9664 \u3002

                                                              "},{"location":"admin/kpanda/permissions/custom-kpanda-role.html","title":"\u589e\u52a0 Kpanda \u5185\u7f6e\u89d2\u8272\u6743\u9650\u70b9","text":"

                                                              \u8fc7\u53bb Kpanda \u5185\u7f6e\u89d2\u8272\u7684\u6743\u9650\u70b9\uff08rbac rules\uff09\u90fd\u662f\u63d0\u524d\u9884\u5b9a\u4e49\u597d\u7684\u4e14\u7528\u6237\u65e0\u6cd5\u4fee\u6539\uff0c\u56e0\u4e3a\u4ee5\u524d\u4fee\u6539\u5185\u7f6e\u89d2\u8272\u7684\u6743\u9650\u70b9\u4e4b\u540e\u4e5f\u4f1a\u88ab Kpanda \u63a7\u5236\u5668\u8fd8\u539f\u6210\u9884\u5b9a\u4e49\u7684\u6743\u9650\u70b9\u3002 \u4e3a\u4e86\u652f\u6301\u66f4\u52a0\u7075\u6d3b\u7684\u6743\u9650\u914d\u7f6e\uff0c\u6ee1\u8db3\u5bf9\u7cfb\u7edf\u89d2\u8272\u7684\u81ea\u5b9a\u4e49\u9700\u6c42\uff0c\u76ee\u524d Kpanda \u652f\u6301\u4e3a\u5185\u7f6e\u7cfb\u7edf\u89d2\u8272\uff08cluster admin\u3001ns admin\u3001ns editor\u3001ns viewer\uff09\u4fee\u6539\u6743\u9650\u70b9\u3002 \u4ee5\u4e0b\u793a\u4f8b\u6f14\u793a\u5982\u4f55\u65b0\u589e ns-viewer \u6743\u9650\u70b9\uff0c\u5c1d\u8bd5\u589e\u52a0\u53ef\u4ee5\u5220\u9664 Deployment \u7684\u6743\u9650\u3002\u5176\u4ed6\u6743\u9650\u70b9\u64cd\u4f5c\u7c7b\u4f3c\u3002

                                                              "},{"location":"admin/kpanda/permissions/custom-kpanda-role.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u9002\u7528\u4e8e\u5bb9\u5668\u7ba1\u7406 v0.27.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u3002
                                                              • \u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                                                              • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Viewer \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                              Note

                                                              • \u53ea\u9700\u5728 Global Cluster \u589e\u52a0\u6743\u9650\u70b9\uff0cKpanda \u63a7\u5236\u5668\u4f1a\u628a Global Cluster \u589e\u52a0\u7684\u6743\u9650\u70b9\u540c\u6b65\u5230\u6240\u6709\u63a5\u5165\u5b50\u96c6\u7fa4\u4e2d\uff0c\u540c\u6b65\u9700\u4e00\u6bb5\u65f6\u95f4\u624d\u80fd\u5b8c\u6210
                                                              • \u53ea\u80fd\u5728 Global Cluster \u589e\u52a0\u6743\u9650\u70b9\uff0c\u5728\u5b50\u96c6\u7fa4\u65b0\u589e\u7684\u6743\u9650\u70b9\u4f1a\u88ab Global Cluster \u5185\u7f6e\u89d2\u8272\u6743\u9650\u70b9\u8986\u76d6
                                                              • \u53ea\u652f\u6301\u4f7f\u7528\u56fa\u5b9a Label \u7684 ClusterRole \u8ffd\u52a0\u6743\u9650\uff0c\u4e0d\u652f\u6301\u66ff\u6362\u6216\u8005\u5220\u9664\u6743\u9650\uff0c\u4e5f\u4e0d\u80fd\u4f7f\u7528 role \u8ffd\u52a0\u6743\u9650\uff0c\u5185\u7f6e\u89d2\u8272\u8ddf\u7528\u6237\u521b\u5efa\u7684 ClusterRole Label \u5bf9\u5e94\u5173\u7cfb\u5982\u4e0b

                                                                cluster-admin: rbac.kpanda.io/role-template-cluster-admin: \"true\"\ncluster-edit: rbac.kpanda.io/role-template-cluster-edit: \"true\"\ncluster-view: rbac.kpanda.io/role-template-cluster-view: \"true\"\nns-admin: rbac.kpanda.io/role-template-ns-admin: \"true\"\nns-edit: rbac.kpanda.io/role-template-ns-edit: \"true\"\nns-view: rbac.kpanda.io/role-template-ns-view: \"true\"\n
                                                              "},{"location":"admin/kpanda/permissions/custom-kpanda-role.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u4f7f\u7528 admin \u6216\u8005 cluster admin \u6743\u9650\u7684\u7528\u6237\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d

                                                              2. \u6388\u6743 ns-viewer\uff0c\u7528\u6237\u6709\u8be5 namespace ns-view \u6743\u9650

                                                              3. \u5207\u6362\u767b\u5f55\u7528\u6237\u4e3a ns-viewer\uff0c\u6253\u5f00\u63a7\u5236\u53f0\u83b7\u53d6 ns-viewer \u7528\u6237\u5bf9\u5e94\u7684 token\uff0c\u4f7f\u7528 curl \u8bf7\u6c42\u5220\u9664\u4e0a\u8ff0\u7684 deployment nginx\uff0c\u53d1\u73b0\u65e0\u5220\u9664\u6743\u9650

                                                                [root@master-01 ~]# curl -k -X DELETE  'https://${URL}/apis/kpanda.io/v1alpha1/clusters/cluster-member/namespaces/default/deployments/nginx' -H 'authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJOU044MG9BclBRMzUwZ2VVU2ZyNy1xMEREVWY4MmEtZmJqR05uRE1sd1lFIn0.eyJleHAiOjE3MTU3NjY1NzksImlhdCI6MTcxNTY4MDE3OSwiYXV0aF90aW1lIjoxNzE1NjgwMTc3LCJqdGkiOiIxZjI3MzJlNC1jYjFhLTQ4OTktYjBiZC1iN2IxZWY1MzAxNDEiLCJpc3MiOiJodHRwczovLzEwLjYuMjAxLjIwMTozMDE0Ny9hdXRoL3JlYWxtcy9naGlwcG8iLCJhdWQiOiJfX2ludGVybmFsLWdoaXBwbyIsInN1YiI6ImMxZmMxM2ViLTAwZGUtNDFiYS05ZTllLWE5OGU2OGM0MmVmMCIsInR5cCI6IklEIiwiYXpwIjoiX19pbnRlcm5hbC1naGlwcG8iLCJzZXNzaW9uX3N0YXRlIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiYXRfaGFzaCI6IlJhTHoyQjlKQ2FNc1RrbGVMR3V6blEiLCJhY3IiOiIwIiwic2lkIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJncm91cHMiOltdLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJucy12aWV3ZXIiLCJsb2NhbGUiOiIifQ.As2ipMjfvzvgONAGlc9RnqOd3zMwAj82VXlcqcR74ZK9tAq3Q4ruQ1a6WuIfqiq8Kq4F77ljwwzYUuunfBli2zhU2II8zyxVhLoCEBu4pBVBd_oJyUycXuNa6HfQGnl36E1M7-_QG8b-_T51wFxxVb5b7SEDE1AvIf54NAlAr-rhDmGRdOK1c9CohQcS00ab52MD3IPiFFZ8_Iljnii-RpXKZoTjdcULJVn_uZNk_SzSUK-7MVWmPBK15m6sNktOMSf0pCObKWRqHd15JSe-2aA2PKBo1jBH3tHbOgZyMPdsLI0QdmEnKB5FiiOeMpwn_oHnT6IjT-BZlB18VkW8rA'\n{\"code\":7,\"message\":\"[RBAC] delete resources(deployments: nginx) is forbidden for user(ns-viewer) in cluster(cluster-member)\",\"details\":[]}[root@master-01 ~]#\n[root@master-01 ~]#\n
                                                              4. \u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u521b\u5efa\u5982\u4e0b ClusterRole\uff1a

                                                                apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: append-ns-view # (1)!\n  labels:\n    rbac.kpanda.io/role-template-ns-view: \"true\" # (2)!\nrules:\n  - apiGroups: [ \"apps\" ]\n    resources: [ \"deployments\" ]\n    verbs: [ \"delete\" ]\n
                                                                1. \u6b64\u5b57\u6bb5\u503c\u53ef\u4efb\u610f\u6307\u5b9a\uff0c\u53ea\u9700\u4e0d\u91cd\u590d\u4e14\u7b26\u5408 Kubernetes \u8d44\u6e90\u540d\u79f0\u89c4\u5219\u8981\u6c42
                                                                2. \u6ce8\u610f\u7ed9\u4e0d\u540c\u7684\u89d2\u8272\u6dfb\u52a0\u6743\u9650\u65f6\u5e94\u6253\u4e0a\u4e0d\u540c\u7684 label
                                                              5. \u7b49\u5f85 Kpanda \u63a7\u5236\u5668\u6dfb\u52a0\u7528\u6237\u521b\u5efa\u6743\u9650\u5230\u5185\u7f6e\u89d2\u8272 ns-viewer \u4e2d\uff0c\u53ef\u67e5\u770b\u5bf9\u5e94\u5185\u7f6e\u89d2\u8272\u5982\u662f\u5426\u6709\u4e0a\u4e00\u6b65\u65b0\u589e\u7684\u6743\u9650\u70b9

                                                                [root@master-01 ~]# kubectl get clusterrole role-template-ns-view -oyaml|grep deployments -C 10|tail -n 6\n
                                                                - apiGroups:\n  - apps\n  resources:\n  - deployments\n  verbs:\n  - delete\n

                                                              6. \u518d\u6b21\u4f7f\u7528 curl \u8bf7\u6c42\u5220\u9664\u4e0a\u8ff0\u7684 deployment nginx\uff0c\u8fd9\u6b21\u6210\u529f\u5220\u9664\u4e86\u3002\u4e5f\u5c31\u662f\u8bf4\uff0cns-viewer \u6210\u529f\u65b0\u589e\u4e86\u5220\u9664 Deployment \u7684\u6743\u9650\u3002

                                                                [root@master-01 ~]# curl -k -X DELETE  'https://${URL}/apis/kpanda.io/v1alpha1/clusters/cluster-member/namespaces/default/deployments/nginx' -H 'authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJOU044MG9BclBRMzUwZ2VVU2ZyNy1xMEREVWY4MmEtZmJqR05uRE1sd1lFIn0.eyJleHAiOjE3MTU3NjY1NzksImlhdCI6MTcxNTY4MDE3OSwiYXV0aF90aW1lIjoxNzE1NjgwMTc3LCJqdGkiOiIxZjI3MzJlNC1jYjFhLTQ4OTktYjBiZC1iN2IxZWY1MzAxNDEiLCJpc3MiOiJodHRwczovLzEwLjYuMjAxLjIwMTozMDE0Ny9hdXRoL3JlYWxtcy9naGlwcG8iLCJhdWQiOiJfX2ludGVybmFsLWdoaXBwbyIsInN1YiI6ImMxZmMxM2ViLTAwZGUtNDFiYS05ZTllLWE5OGU2OGM0MmVmMCIsInR5cCI6IklEIiwiYXpwIjoiX19pbnRlcm5hbC1naGlwcG8iLCJzZXNzaW9uX3N0YXRlIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiYXRfaGFzaCI6IlJhTHoyQjlKQ2FNc1RrbGVMR3V6blEiLCJhY3IiOiIwIiwic2lkIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJncm91cHMiOltdLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJucy12aWV3ZXIiLCJsb2NhbGUiOiIifQ.As2ipMjfvzvgONAGlc9RnqOd3zMwAj82VXlcqcR74ZK9tAq3Q4ruQ1a6WuIfqiq8Kq4F77ljwwzYUuunfBli2zhU2II8zyxVhLoCEBu4pBVBd_oJyUycXuNa6HfQGnl36E1M7-_QG8b-_T51wFxxVb5b7SEDE1AvIf54NAlAr-rhDmGRdOK1c9CohQcS00ab52MD3IPiFFZ8_Iljnii-RpXKZoTjdcULJVn_uZNk_SzSUK-7MVWmPBK15m6sNktOMSf0pCObKWRqHd15JSe-2aA2PKBo1jBH3tHbOgZyMPdsLI0QdmEnKB5FiiOeMpwn_oHnT6IjT-BZlB18VkW8rA'\n
                                                              "},{"location":"admin/kpanda/permissions/permission-brief.html","title":"\u5bb9\u5668\u7ba1\u7406\u6743\u9650\u8bf4\u660e","text":"

                                                              \u5bb9\u5668\u7ba1\u7406\u6743\u9650\u57fa\u4e8e\u5168\u5c40\u6743\u9650\u7ba1\u7406\u4ee5\u53ca Kubernetes RBAC \u6743\u9650\u7ba1\u7406\u6253\u9020\u7684\u591a\u7ef4\u5ea6\u6743\u9650\u7ba1\u7406\u4f53\u7cfb\u3002 \u652f\u6301\u96c6\u7fa4\u7ea7\u3001\u547d\u540d\u7a7a\u95f4\u7ea7\u7684\u6743\u9650\u63a7\u5236\uff0c\u5e2e\u52a9\u7528\u6237\u4fbf\u6377\u7075\u6d3b\u5730\u5bf9\u79df\u6237\u4e0b\u7684 IAM \u7528\u6237\u3001\u7528\u6237\u7ec4\uff08\u7528\u6237\u7684\u96c6\u5408\uff09\u8bbe\u5b9a\u4e0d\u540c\u7684\u64cd\u4f5c\u6743\u9650\u3002

                                                              "},{"location":"admin/kpanda/permissions/permission-brief.html#_2","title":"\u96c6\u7fa4\u6743\u9650","text":"

                                                              \u96c6\u7fa4\u6743\u9650\u57fa\u4e8e Kubernetes RBAC \u7684 ClusterRolebinding \u6388\u6743\uff0c\u96c6\u7fa4\u6743\u9650\u8bbe\u7f6e\u53ef\u8ba9\u7528\u6237/\u7528\u6237\u7ec4\u5177\u5907\u96c6\u7fa4\u76f8\u5173\u6743\u9650\u3002 \u76ee\u524d\u7684\u9ed8\u8ba4\u96c6\u7fa4\u89d2\u8272\u4e3a Cluster Admin \uff08\u4e0d\u5177\u5907\u96c6\u7fa4\u7684\u521b\u5efa\u3001\u5220\u9664\u6743\u9650\uff09\u3002

                                                              "},{"location":"admin/kpanda/permissions/permission-brief.html#cluster-admin","title":"Cluster Admin","text":"

                                                              Cluster Admin \u5177\u6709\u4ee5\u4e0b\u6743\u9650\uff1a

                                                              • \u53ef\u7ba1\u7406\u3001\u7f16\u8f91\u3001\u67e5\u770b\u5bf9\u5e94\u96c6\u7fa4

                                                              • \u7ba1\u7406\u3001\u7f16\u8f91\u3001\u67e5\u770b \u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u5de5\u4f5c\u8d1f\u8f7d\u53ca\u96c6\u7fa4\u5185\u6240\u6709\u8d44\u6e90

                                                              • \u53ef\u6388\u6743\u7528\u6237\u4e3a\u96c6\u7fa4\u5185\u89d2\u8272 (Cluster Admin\u3001NS Admin\u3001NS Editor\u3001NS Viewer)

                                                              \u8be5\u96c6\u7fa4\u89d2\u8272\u7684 YAML \u793a\u4f8b\u5982\u4e0b\uff1a

                                                              apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:49Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-cluster-admin\n  resourceVersion: \"15168\"\n  uid: f8f86d42-d5ef-47aa-b284-097615795076\nrules:\n- apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n- nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'\n
                                                              "},{"location":"admin/kpanda/permissions/permission-brief.html#_3","title":"\u547d\u540d\u7a7a\u95f4\u6743\u9650","text":"

                                                              \u547d\u540d\u7a7a\u95f4\u6743\u9650\u662f\u57fa\u4e8e Kubernetes RBAC \u80fd\u529b\u7684\u6388\u6743\uff0c\u53ef\u4ee5\u5b9e\u73b0\u4e0d\u540c\u7684\u7528\u6237/\u7528\u6237\u7ec4\u5bf9\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u8d44\u6e90\u5177\u6709\u4e0d\u540c\u7684\u64cd\u4f5c\u6743\u9650(\u5305\u62ec Kubernetes API \u6743\u9650)\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\uff1aKubernetes RBAC\u3002\u76ee\u524d\u5bb9\u5668\u7ba1\u7406\u7684\u9ed8\u8ba4\u89d2\u8272\u4e3a\uff1aNS Admin\u3001NS Editor\u3001NS Viewer\u3002

                                                              "},{"location":"admin/kpanda/permissions/permission-brief.html#ns-admin","title":"NS Admin","text":"

                                                              NS Admin \u5177\u6709\u4ee5\u4e0b\u6743\u9650\uff1a

                                                              • \u53ef\u67e5\u770b\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4
                                                              • \u7ba1\u7406\u3001\u7f16\u8f91\u3001\u67e5\u770b \u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u53ca\u81ea\u5b9a\u4e49\u8d44\u6e90
                                                              • \u53ef\u6388\u6743\u7528\u6237\u4e3a\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u89d2\u8272 (NS Editor\u3001NS Viewer)

                                                              \u8be5\u96c6\u7fa4\u89d2\u8272\u7684 YAML \u793a\u4f8b\u5982\u4e0b\uff1a

                                                              apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:49Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-admin\n  resourceVersion: \"15173\"\n  uid: 69f64c7e-70e7-4c7c-a3e0-053f507f2bc3\nrules:\n- apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n- nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'    \n
                                                              "},{"location":"admin/kpanda/permissions/permission-brief.html#ns-editor","title":"NS Editor","text":"

                                                              NS Editor \u5177\u6709\u4ee5\u4e0b\u6743\u9650\uff1a

                                                              • \u53ef\u67e5\u770b\u5bf9\u5e94\u6709\u6743\u9650\u7684\u547d\u540d\u7a7a\u95f4
                                                              • \u7ba1\u7406\u3001\u7f16\u8f91\u3001\u67e5\u770b \u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u5de5\u4f5c\u8d1f\u8f7d
                                                              \u70b9\u51fb\u67e5\u770b\u96c6\u7fa4\u89d2\u8272\u7684 YAML \u793a\u4f8b
                                                              apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:50Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-edit\n  resourceVersion: \"15175\"\n  uid: ca9e690e-96c0-4978-8915-6e4c00c748fe\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - configmaps\n  - endpoints\n  - persistentvolumeclaims\n  - persistentvolumeclaims/status\n  - pods\n  - replicationcontrollers\n  - replicationcontrollers/scale\n  - serviceaccounts\n  - services\n  - services/status\n  verbs:\n  - '*'\n- apiGroups:\n  - \"\"\n  resources:\n  - bindings\n  - events\n  - limitranges\n  - namespaces/status\n  - pods/log\n  - pods/status\n  - replicationcontrollers/status\n  - resourcequotas\n  - resourcequotas/status\n  verbs:\n  - '*'\n- apiGroups:\n  - \"\"\n  resources:\n  - namespaces\n  verbs:\n  - '*'\n- apiGroups:\n  - apps\n  resources:\n  - controllerrevisions\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - statefulsets\n  - statefulsets/scale\n  - statefulsets/status\n  verbs:\n  - '*'\n- apiGroups:\n  - autoscaling\n  resources:\n  - horizontalpodautoscalers\n  - horizontalpodautoscalers/status\n  verbs:\n  - '*'\n- apiGroups:\n  - batch\n  resources:\n  - cronjobs\n  - cronjobs/status\n  - jobs\n  - jobs/status\n  verbs:\n  - '*'\n- apiGroups:\n  - extensions\n  resources:\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - replicationcontrollers/scale\n  verbs:\n  - '*'\n- apiGroups:\n  - policy\n  resources:\n  - poddisruptionbudgets\n  - poddisruptionbudgets/status\n  verbs:\n  - '*'\n- apiGroups:\n  - networking.k8s.io\n  resources:\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  verbs:\n  - '*'      \n
                                                              "},{"location":"admin/kpanda/permissions/permission-brief.html#ns-viewer","title":"NS Viewer","text":"

                                                              NS Viewer \u5177\u6709\u4ee5\u4e0b\u6743\u9650\uff1a

                                                              • \u53ef\u67e5\u770b\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4
                                                              • \u53ef\u67e5\u770b\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u53ca\u81ea\u5b9a\u4e49\u8d44\u6e90
                                                              \u70b9\u51fb\u67e5\u770b\u96c6\u7fa4\u89d2\u8272\u7684 YAML \u793a\u4f8b
                                                              apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:50Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-view\n  resourceVersion: \"15183\"\n  uid: 853888fd-6ee8-42ac-b91e-63923918baf8\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - configmaps\n  - endpoints\n  - persistentvolumeclaims\n  - persistentvolumeclaims/status\n  - pods\n  - replicationcontrollers\n  - replicationcontrollers/scale\n  - serviceaccounts\n  - services\n  - services/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - bindings\n  - events\n  - limitranges\n  - namespaces/status\n  - pods/log\n  - pods/status\n  - replicationcontrollers/status\n  - resourcequotas\n  - resourcequotas/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - namespaces\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - apps\n  resources:\n  - controllerrevisions\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - statefulsets\n  - statefulsets/scale\n  - statefulsets/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - autoscaling\n  resources:\n  - horizontalpodautoscalers\n  - horizontalpodautoscalers/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - batch\n  resources:\n  - cronjobs\n  - cronjobs/status\n  - jobs\n  - jobs/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - extensions\n  resources:\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - replicationcontrollers/scale\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - policy\n  resources:\n  - poddisruptionbudgets\n  - poddisruptionbudgets/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - networking.k8s.io\n  resources:\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  verbs:\n  - get\n  - list\n  - watch \n
                                                              "},{"location":"admin/kpanda/permissions/permission-brief.html#faq","title":"\u6743\u9650 FAQ","text":"
                                                              1. \u5168\u5c40\u6743\u9650\u548c\u5bb9\u5668\u7ba1\u7406\u6743\u9650\u7ba1\u7406\u7684\u5173\u7cfb\uff1f

                                                                \u7b54\uff1a\u5168\u5c40\u6743\u9650\u4ec5\u6388\u6743\u4e3a\u7c97\u7c92\u5ea6\u6743\u9650\uff0c\u53ef\u7ba1\u7406\u6240\u6709\u96c6\u7fa4\u7684\u521b\u5efa\u3001\u7f16\u8f91\u3001\u5220\u9664\uff1b\u800c\u5bf9\u4e8e\u7ec6\u7c92\u5ea6\u7684\u6743\u9650\uff0c\u5982\u5355\u4e2a\u96c6\u7fa4\u7684\u7ba1\u7406\u6743\u9650\uff0c\u5355\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u7ba1\u7406\u3001\u7f16\u8f91\u3001\u5220\u9664\u6743\u9650\uff0c\u9700\u8981\u57fa\u4e8e Kubernetes RBAC \u7684\u5bb9\u5668\u7ba1\u7406\u6743\u9650\u8fdb\u884c\u5b9e\u73b0\u3002 \u4e00\u822c\u6743\u9650\u7684\u7528\u6237\u4ec5\u9700\u8981\u5728\u5bb9\u5668\u7ba1\u7406\u4e2d\u8fdb\u884c\u6388\u6743\u5373\u53ef\u3002

                                                              2. \u76ee\u524d\u4ec5\u652f\u6301\u56db\u4e2a\u9ed8\u8ba4\u89d2\u8272\uff0c\u540e\u53f0\u81ea\u5b9a\u4e49\u89d2\u8272\u7684 RoleBinding \u4ee5\u53ca ClusterRoleBinding \uff08Kubernetes \u7ec6\u7c92\u5ea6\u7684 RBAC\uff09\u662f\u5426\u4e5f\u80fd\u751f\u6548\uff1f

                                                                \u7b54\uff1a\u76ee\u524d\u81ea\u5b9a\u4e49\u6743\u9650\u6682\u65f6\u65e0\u6cd5\u901a\u8fc7\u56fe\u5f62\u754c\u9762\u8fdb\u884c\u7ba1\u7406\uff0c\u4f46\u662f\u901a\u8fc7 kubectl \u521b\u5efa\u7684\u6743\u9650\u89c4\u5219\u540c\u6837\u80fd\u751f\u6548\u3002

                                                              "},{"location":"admin/kpanda/scale/create-hpa.html","title":"\u57fa\u4e8e\u5185\u7f6e\u6307\u6807\u521b\u5efa HPA","text":"

                                                              \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301 Pod \u8d44\u6e90\u57fa\u4e8e\u6307\u6807\u8fdb\u884c\u5f39\u6027\u4f38\u7f29\uff08Horizontal Pod Autoscaling, HPA\uff09\u3002 \u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e CPU \u5229\u7528\u7387\u3001\u5185\u5b58\u7528\u91cf\u53ca\u81ea\u5b9a\u4e49\u6307\u6807\u6307\u6807\u6765\u52a8\u6001\u8c03\u6574 Pod \u8d44\u6e90\u7684\u526f\u672c\u6570\u91cf\u3002 \u4f8b\u5982\uff0c\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u8bbe\u7f6e\u57fa\u4e8e CPU \u5229\u7528\u7387\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u540e\uff0c\u5f53 Pod \u7684 CPU \u5229\u7528\u7387\u8d85\u8fc7/\u4f4e\u4e8e\u60a8\u8bbe\u7f6e\u7684\u6307\u6807\u9600\u503c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u63a7\u5236\u5668\u5c06\u4f1a\u81ea\u52a8\u589e\u52a0/\u8f83\u5c11 Pod \u526f\u672c\u6570\u3002

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u57fa\u4e8e\u5185\u7f6e\u6307\u6807\u7684\u5f39\u6027\u4f38\u7f29\u3002

                                                              Note

                                                              1. HPA \u4ec5\u9002\u7528\u4e8e Deployment \u548c StatefulSet\uff0c\u6bcf\u4e2a\u5de5\u4f5c\u8d1f\u8f7d\u53ea\u80fd\u521b\u5efa\u4e00\u4e2a HPA\u3002
                                                              2. \u5982\u679c\u57fa\u4e8e CPU \u5229\u7528\u7387\u521b\u5efa HPA \u7b56\u7565\uff0c\u5fc5\u987b\u9884\u5148\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u8bbe\u7f6e\u914d\u7f6e\u9650\u5236\uff08Limit\uff09\uff0c\u5426\u5219\u65e0\u6cd5\u8ba1\u7b97 CPU \u5229\u7528\u7387\u3002
                                                              3. \u5982\u679c\u540c\u65f6\u4f7f\u7528\u5185\u7f6e\u6307\u6807\u548c\u591a\u79cd\u81ea\u5b9a\u4e49\u6307\uff0cHPA \u4f1a\u6839\u636e\u591a\u9879\u6307\u6807\u5206\u522b\u8ba1\u7b97\u6240\u9700\u4f38\u7f29\u526f\u672c\u6570\uff0c\u53d6\u8f83\u5927\u503c\uff08\u4f46\u4e0d\u4f1a\u8d85\u8fc7\u8bbe\u7f6e HPA \u7b56\u7565\u65f6\u914d\u7f6e\u7684\u6700\u5927\u526f\u672c\u6570\uff09\u8fdb\u884c\u5f39\u6027\u4f38\u7f29\u3002
                                                              "},{"location":"admin/kpanda/scale/create-hpa.html#_1","title":"\u5185\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565","text":"

                                                              \u7cfb\u7edf\u5185\u7f6e\u4e86 CPU \u548c\u5185\u5b58\u4e24\u79cd\u5f39\u6027\u4f38\u7f29\u6307\u6807\u4ee5\u6ee1\u8db3\u7528\u6237\u7684\u57fa\u7840\u4e1a\u52a1\u4f7f\u7528\u573a\u666f\u3002

                                                              "},{"location":"admin/kpanda/scale/create-hpa.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u5728\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u5185\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                              • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa\u6216\u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa\u3002

                                                              • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                              • \u5df2\u5b8c\u6210 metrics-server \u63d2\u4ef6\u5b89\u88c5 \u3002

                                                              "},{"location":"admin/kpanda/scale/create-hpa.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                              \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u5185\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u3002

                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \u8fdb\u5165\u96c6\u7fa4\u5217\u8868\u9875\u9762\u3002\u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                              2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d \u8fdb\u5165\u5de5\u4f5c\u8d1f\u8f7d\u5217\u8868\u540e\uff0c\u70b9\u51fb\u4e00\u4e2a\u8d1f\u8f7d\u540d\u79f0\uff0c\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5 \u9875\u9762\u3002

                                                              3. \u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u7684\u5f39\u6027\u4f38\u7f29\u914d\u7f6e\u60c5\u51b5\u3002

                                                              4. \u786e\u8ba4\u96c6\u7fa4\u5df2\u5b89\u88c5\u4e86 metrics-server \u63d2\u4ef6\uff0c\u4e14\u63d2\u4ef6\u8fd0\u884c\u72b6\u6001\u4e3a\u6b63\u5e38\u540e\uff0c\u5373\u53ef\u70b9\u51fb \u65b0\u5efa\u4f38\u7f29 \u6309\u94ae\u3002

                                                              5. \u521b\u5efa\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u53c2\u6570\u3002

                                                                • \u7b56\u7565\u540d\u79f0\uff1a\u8f93\u5165\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 hpa-my-dep\u3002
                                                                • \u547d\u540d\u7a7a\u95f4\uff1a\u8d1f\u8f7d\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                                                • \u5de5\u4f5c\u8d1f\u8f7d\uff1a\u6267\u884c\u5f39\u6027\u4f38\u7f29\u7684\u5de5\u4f5c\u8d1f\u8f7d\u5bf9\u8c61\u3002
                                                                • \u76ee\u6807 CPU \u5229\u7528\u7387\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u4e0b Pod \u7684 CPU \u4f7f\u7528\u7387\u3002\u8ba1\u7b97\u65b9\u5f0f\u4e3a\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u7684 Pod \u8d44\u6e90 / \u5de5\u4f5c\u8d1f\u8f7d\u7684\u8bf7\u6c42\uff08request\uff09\u503c\u3002\u5f53\u5b9e\u9645 CPU \u7528\u91cf\u5927\u4e8e/\u5c0f\u4e8e\u76ee\u6807\u503c\u65f6\uff0c\u7cfb\u7edf\u81ea\u52a8\u51cf\u5c11/\u589e\u52a0 Pod \u526f\u672c\u6570\u91cf\u3002
                                                                • \u76ee\u6807\u5185\u5b58\u7528\u91cf\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u4e0b\u7684 Pod \u7684\u5185\u5b58\u7528\u91cf\u3002\u5f53\u5b9e\u9645\u5185\u5b58\u7528\u91cf\u5927\u4e8e/\u5c0f\u4e8e\u76ee\u6807\u503c\u65f6\uff0c\u7cfb\u7edf\u81ea\u52a8\u51cf\u5c11/\u589e\u52a0 Pod \u526f\u672c\u6570\u91cf\u3002
                                                                • \u526f\u672c\u8303\u56f4\uff1aPod \u526f\u672c\u6570\u7684\u5f39\u6027\u4f38\u7f29\u8303\u56f4\u3002\u9ed8\u8ba4\u533a\u95f4\u4e3a\u4e3a 1 - 10\u3002
                                                              6. \u5b8c\u6210\u53c2\u6570\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u81ea\u52a8\u8fd4\u56de\u5f39\u6027\u4f38\u7f29\u8be6\u60c5\u9875\u9762\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u6267\u884c\u7f16\u8f91\u3001\u5220\u9664\u64cd\u4f5c\uff0c\u8fd8\u53ef\u4ee5\u67e5\u770b\u76f8\u5173\u4e8b\u4ef6\u3002

                                                              "},{"location":"admin/kpanda/scale/create-vpa.html","title":"\u521b\u5efa VPA","text":"

                                                              \u5bb9\u5668\u5782\u76f4\u6269\u7f29\u5bb9\u7b56\u7565\uff08Vertical Pod Autoscaler, VPA\uff09\u901a\u8fc7\u76d1\u63a7 Pod \u5728\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u8d44\u6e90\u7533\u8bf7\u548c\u7528\u91cf\uff0c \u8ba1\u7b97\u51fa\u5bf9\u8be5 Pod \u800c\u8a00\u6700\u9002\u5408\u7684 CPU \u548c\u5185\u5b58\u8bf7\u6c42\u503c\u3002\u4f7f\u7528 VPA \u53ef\u4ee5\u66f4\u52a0\u5408\u7406\u5730\u4e3a\u96c6\u7fa4\u4e0b\u6bcf\u4e2a Pod \u5206\u914d\u8d44\u6e90\uff0c\u63d0\u9ad8\u96c6\u7fa4\u7684\u6574\u4f53\u8d44\u6e90\u5229\u7528\u7387\uff0c\u907f\u514d\u96c6\u7fa4\u8d44\u6e90\u6d6a\u8d39\u3002

                                                              \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u901a\u8fc7\u5bb9\u5668\u5782\u76f4\u6269\u7f29\u5bb9\u7b56\u7565\uff08Vertical Pod Autoscaler, VPA\uff09\uff0c\u57fa\u4e8e\u6b64\u529f\u80fd\u53ef\u4ee5\u6839\u636e\u5bb9\u5668\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\u52a8\u6001\u8c03\u6574 Pod \u8bf7\u6c42\u503c\u3002 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u901a\u8fc7\u624b\u52a8\u548c\u81ea\u52a8\u4e24\u79cd\u65b9\u5f0f\u6765\u4fee\u6539\u8d44\u6e90\u8bf7\u6c42\u503c\uff0c\u60a8\u53ef\u4ee5\u6839\u636e\u5b9e\u9645\u9700\u8981\u8fdb\u884c\u914d\u7f6e\u3002

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e Pod \u5782\u76f4\u4f38\u7f29\u3002

                                                              Warning

                                                              \u4f7f\u7528 VPA \u4fee\u6539 Pod \u8d44\u6e90\u8bf7\u6c42\u4f1a\u89e6\u53d1 Pod \u91cd\u542f\u3002\u7531\u4e8e Kubernetes \u672c\u8eab\u7684\u9650\u5236\uff0c Pod \u91cd\u542f\u540e\u53ef\u80fd\u4f1a\u88ab\u8c03\u5ea6\u5230\u5176\u5b83\u8282\u70b9\u4e0a\u3002

                                                              "},{"location":"admin/kpanda/scale/create-vpa.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u5782\u76f4\u4f38\u7f29\u7b56\u7565\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                              • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u3001\u7528\u6237\u3001\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u6216\u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u3002

                                                              • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                              • \u5f53\u524d\u96c6\u7fa4\u5df2\u7ecf\u5b89\u88c5 metrics-server \u548c VPA \u63d2\u4ef6\u3002

                                                              "},{"location":"admin/kpanda/scale/create-vpa.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                              \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u5185\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u3002

                                                              1. \u5728 \u96c6\u7fa4\u5217\u8868 \u4e2d\u627e\u5230\u76ee\u524d\u96c6\u7fa4\uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u627e\u5230\u9700\u8981\u521b\u5efa VPA \u7684\u8d1f\u8f7d\uff0c\u70b9\u51fb\u8be5\u8d1f\u8f7d\u7684\u540d\u79f0\u3002

                                                                3. \u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u7684\u5f39\u6027\u4f38\u7f29\u914d\u7f6e\uff0c\u786e\u8ba4\u5df2\u7ecf\u5b89\u88c5\u4e86\u76f8\u5173\u63d2\u4ef6\u5e76\u4e14\u63d2\u4ef6\u662f\u5426\u8fd0\u884c\u6b63\u5e38\u3002

                                                              3. \u70b9\u51fb \u65b0\u5efa\u4f38\u7f29 \u6309\u94ae\uff0c\u5e76\u914d\u7f6e VPA \u5782\u76f4\u4f38\u7f29\u7b56\u7565\u53c2\u6570\u3002

                                                                • \u7b56\u7565\u540d\u79f0\uff1a\u8f93\u5165\u5782\u76f4\u4f38\u7f29\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 vpa-my-dep\u3002
                                                                • \u4f38\u7f29\u6a21\u5f0f\uff1a\u6267\u884c\u4fee\u6539 CPU \u548c\u5185\u5b58\u8bf7\u6c42\u503c\u7684\u65b9\u5f0f\uff0c\u76ee\u524d\u5782\u76f4\u4f38\u7f29\u652f\u6301\u624b\u52a8\u548c\u81ea\u52a8\u4e24\u79cd\u4f38\u7f29\u6a21\u5f0f\u3002
                                                                  • \u624b\u52a8\u4f38\u7f29\uff1a\u5782\u76f4\u4f38\u7f29\u7b56\u7565\u8ba1\u7b97\u51fa\u63a8\u8350\u7684\u8d44\u6e90\u914d\u7f6e\u503c\u540e\uff0c\u9700\u7528\u6237\u624b\u52a8\u4fee\u6539\u5e94\u7528\u7684\u8d44\u6e90\u914d\u989d\u3002
                                                                  • \u81ea\u52a8\u4f38\u7f29\uff1a\u5782\u76f4\u4f38\u7f29\u7b56\u7565\u81ea\u52a8\u8ba1\u7b97\u548c\u4fee\u6539\u5e94\u7528\u7684\u8d44\u6e90\u914d\u989d\u3002
                                                                • \u76ee\u6807\u5bb9\u5668\uff1a\u9009\u62e9\u9700\u8981\u8fdb\u884c\u5782\u76f4\u4f38\u7f29\u7684\u5bb9\u5668\u3002
                                                              4. \u5b8c\u6210\u53c2\u6570\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u81ea\u52a8\u8fd4\u56de\u5f39\u6027\u4f38\u7f29\u8be6\u60c5\u9875\u9762\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u6267\u884c\u7f16\u8f91\u3001\u5220\u9664\u64cd\u4f5c\u3002

                                                              Note

                                                              \u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c--min-replicas \u7684\u503c\u4e3a 2\u3002\u8868\u793a\u5f53\u526f\u672c\u6570\u5927\u4e8e 1 \u65f6\uff0cVPA \u624d\u4f1a\u751f\u6548\uff0c \u53ef\u4ee5\u901a\u8fc7\u4fee\u6539 updater \u7684 --min-replicas \u53c2\u6570\u503c\u6765\u6539\u53d8\u8fd9\u4e00\u9ed8\u8ba4\u884c\u4e3a\u3002

                                                              spec: \n  containers: \n  - name: updater \n  args: \n  - \"--min-replicas=2\"\n
                                                              "},{"location":"admin/kpanda/scale/custom-hpa.html","title":"\u57fa\u4e8e\u81ea\u5b9a\u4e49\u6307\u6807\u521b\u5efa HPA","text":"

                                                              \u5f53\u7cfb\u7edf\u5185\u7f6e\u7684 CPU \u548c\u5185\u5b58\u4e24\u79cd\u6307\u6807\u4e0d\u80fd\u6ee1\u8db3\u60a8\u4e1a\u52a1\u7684\u5b9e\u9645\u9700\u6c42\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u914d\u7f6e ServiceMonitoring \u6765\u6dfb\u52a0\u81ea\u5b9a\u4e49\u6307\u6807\uff0c \u5e76\u57fa\u4e8e\u81ea\u5b9a\u4e49\u6307\u6807\u5b9e\u73b0\u5f39\u6027\u4f38\u7f29\u3002\u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u57fa\u4e8e\u81ea\u5b9a\u4e49\u6307\u6807\u8fdb\u884c\u5f39\u6027\u4f38\u7f29\u3002

                                                              Note

                                                              1. HPA \u4ec5\u9002\u7528\u4e8e Deployment \u548c StatefulSet\uff0c\u6bcf\u4e2a\u5de5\u4f5c\u8d1f\u8f7d\u53ea\u80fd\u521b\u5efa\u4e00\u4e2a HPA\u3002
                                                              2. \u5982\u679c\u540c\u65f6\u4f7f\u7528\u5185\u7f6e\u6307\u6807\u548c\u591a\u79cd\u81ea\u5b9a\u4e49\u6307\uff0cHPA \u4f1a\u6839\u636e\u591a\u9879\u6307\u6807\u5206\u522b\u8ba1\u7b97\u6240\u9700\u4f38\u7f29\u526f\u672c\u6570\uff0c\u53d6\u8f83\u5927\u503c\uff08\u4f46\u4e0d\u4f1a\u8d85\u8fc7\u8bbe\u7f6e HPA \u7b56\u7565\u65f6\u914d\u7f6e\u7684\u6700\u5927\u526f\u672c\u6570\uff09\u8fdb\u884c\u5f39\u6027\u4f38\u7f29\u3002
                                                              "},{"location":"admin/kpanda/scale/custom-hpa.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u5728\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c \u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762
                                                              • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa\u6216\u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa
                                                              • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c \u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743
                                                              • \u5df2\u5b89\u88c5 metrics-server \u63d2\u4ef6
                                                              • \u5df2\u5b89\u88c5 insight-agent \u63d2\u4ef6
                                                              • \u5df2\u5b89\u88c5 Prometheus-adapter \u63d2\u4ef6
                                                              "},{"location":"admin/kpanda/scale/custom-hpa.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                              \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u3002

                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \u8fdb\u5165\u96c6\u7fa4\u5217\u8868\u9875\u9762\u3002\u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                              2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d \u8fdb\u5165\u5de5\u4f5c\u8d1f\u8f7d\u5217\u8868\u540e\uff0c\u70b9\u51fb\u4e00\u4e2a\u8d1f\u8f7d\u540d\u79f0\uff0c\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5 \u9875\u9762\u3002

                                                              3. \u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u7684\u5f39\u6027\u4f38\u7f29\u914d\u7f6e\u60c5\u51b5\u3002

                                                              4. \u786e\u8ba4\u96c6\u7fa4\u5df2\u5b89\u88c5\u4e86 metrics-server \u3001Insight\u3001Prometheus-adapter \u63d2\u4ef6\u4e14\u63d2\u4ef6\u8fd0\u884c\u72b6\u6001\u4e3a\u6b63\u5e38\u540e\uff0c\u5373\u53ef\u70b9\u51fb \u65b0\u5efa\u4f38\u7f29 \u6309\u94ae\u3002

                                                                Note

                                                                \u5982\u679c\u76f8\u5173\u63d2\u4ef6\u672a\u5b89\u88c5\u6216\u63d2\u4ef6\u5904\u4e8e\u5f02\u5e38\u72b6\u6001\uff0c\u60a8\u5728\u9875\u9762\u4e0a\u5c06\u65e0\u6cd5\u770b\u89c1\u521b\u5efa\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u5165\u53e3\u3002

                                                              5. \u521b\u5efa\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u53c2\u6570\u3002

                                                                • \u7b56\u7565\u540d\u79f0\uff1a\u8f93\u5165\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 hpa-my-dep\u3002
                                                                • \u547d\u540d\u7a7a\u95f4\uff1a\u8d1f\u8f7d\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                                                • \u5de5\u4f5c\u8d1f\u8f7d\uff1a\u6267\u884c\u5f39\u6027\u4f38\u7f29\u7684\u5de5\u4f5c\u8d1f\u8f7d\u5bf9\u8c61\u3002
                                                                • \u8d44\u6e90\u7c7b\u578b\uff1a\u8fdb\u884c\u76d1\u63a7\u7684\u81ea\u5b9a\u4e49\u6307\u6807\u7c7b\u578b\uff0c\u5305\u542b Pod \u548c Service \u4e24\u79cd\u7c7b\u578b\u3002
                                                                • \u6307\u6807\uff1a\u4f7f\u7528 ServiceMonitoring \u521b\u5efa\u7684\u81ea\u5b9a\u4e49\u6307\u6807\u540d\u79f0\u6216\u7cfb\u7edf\u5185\u7f6e\u7684\u81ea\u5b9a\u4e49\u6307\u6807\u540d\u79f0\u3002
                                                                • \u6570\u636e\u7c7b\u578b\uff1a\u7528\u4e8e\u8ba1\u7b97\u6307\u6807\u503c\u7684\u65b9\u6cd5\uff0c\u5305\u542b\u76ee\u6807\u503c\u548c\u76ee\u6807\u5e73\u5747\u503c\u4e24\u79cd\u7c7b\u578b\uff0c\u5f53\u8d44\u6e90\u7c7b\u578b\u4e3a Pod \u65f6\uff0c\u53ea\u652f\u6301\u4f7f\u7528\u76ee\u6807\u5e73\u5747\u503c\u3002
                                                              "},{"location":"admin/kpanda/scale/custom-hpa.html#_3","title":"\u64cd\u4f5c\u793a\u4f8b","text":"

                                                              \u672c\u6848\u4f8b\u4ee5 Golang \u4e1a\u52a1\u7a0b\u5e8f\u4e3a\u4f8b\uff0c\u8be5\u793a\u4f8b\u7a0b\u5e8f\u66b4\u9732\u4e86 httpserver_requests_total \u6307\u6807\uff0c\u5e76\u8bb0\u5f55 HTTP \u7684\u8bf7\u6c42\uff0c\u901a\u8fc7\u8be5\u6307\u6807\u53ef\u4ee5\u8ba1\u7b97\u51fa\u4e1a\u52a1\u7a0b\u5e8f\u7684 QPS \u503c\u3002

                                                              "},{"location":"admin/kpanda/scale/custom-hpa.html#_4","title":"\u90e8\u7f72\u4e1a\u52a1\u7a0b\u5e8f","text":"

                                                              \u4f7f\u7528 Deployment \u90e8\u7f72\u4e1a\u52a1\u7a0b\u5e8f\uff1a

                                                              apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: httpserver\n  namespace: httpserver\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: httpserver\n  template:\n    metadata:\n      labels:\n        app: httpserver\n    spec:\n      containers:\n      - name: httpserver\n        image: registry.imroc.cc/test/httpserver:custom-metrics\n        imagePullPolicy: Always\n---\n\napiVersion: v1\nkind: Service\nmetadata:\n  name: httpserver\n  namespace: httpserver\n  labels:\n    app: httpserver\n  annotations:\n    prometheus.io/scrape: \"true\"\n    prometheus.io/path: \"/metrics\"\n    prometheus.io/port: \"http\"\nspec:\n  type: ClusterIP\n  ports:\n  - port: 80\n    protocol: TCP\n    name: http\n  selector:\n    app: httpserver\n
                                                              "},{"location":"admin/kpanda/scale/custom-hpa.html#prometheus","title":"Prometheus \u91c7\u96c6\u4e1a\u52a1\u76d1\u63a7","text":"

                                                              \u82e5\u5df2\u5b89\u88c5 insight-agent\uff0c\u53ef\u4ee5\u901a\u8fc7\u521b\u5efa ServiceMonitor \u7684 CRD \u5bf9\u8c61\u914d\u7f6e Prometheus\u3002

                                                              \u64cd\u4f5c\u6b65\u9aa4\uff1a\u5728 \u96c6\u7fa4\u8be6\u60c5 -> \u81ea\u5b9a\u4e49\u8d44\u6e90 \u641c\u7d22\u201cservicemonitors.monitoring.coreos.com\"\uff0c\u70b9\u51fb\u540d\u79f0\u8fdb\u5165\u8be6\u60c5\u3002 \u901a\u8fc7\u521b\u5efa YAML\uff0c\u5728\u547d\u540d\u7a7a\u95f4 httpserver \u4e0b\u521b\u5efa\u5982\u4e0b\u793a\u4f8b\u7684 CRD\uff1a

                                                              apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: httpserver\n  namespace: httpserver\n  labels:\n    operator.insight.io/managed-by: insight\nspec:\n  endpoints:\n  - port: http\n    interval: 5s\n  namespaceSelector:\n    matchNames:\n    - httpserver\n  selector:\n    matchLabels:\n      app: httpserver\n

                                                              Note

                                                              \u82e5\u901a\u8fc7 insight \u5b89\u88c5 Prometheus\uff0c\u5219 serviceMonitor \u4e0a\u5fc5\u987b\u6253\u4e0a operator.insight.io/managed-by: insight \u8fd9\u4e2a label\uff0c\u901a\u8fc7\u5176\u4ed6\u65b9\u5f0f\u5b89\u88c5\u5219\u65e0\u9700\u6b64 label\u3002

                                                              "},{"location":"admin/kpanda/scale/custom-hpa.html#prometheus-adapter","title":"\u5728 prometheus-adapter \u4e2d\u914d\u7f6e\u6307\u6807\u89c4\u5219","text":"

                                                              \u64cd\u4f5c\u6b65\u9aa4\uff1a\u5728 \u96c6\u7fa4\u8be6\u60c5 -> Helm \u5e94\u7528 \u641c\u7d22 \u201cprometheus-adapter\"\uff0c\u901a\u8fc7\u64cd\u4f5c\u680f\u8fdb\u5165\u66f4\u65b0\u9875\u9762\uff0c\u5728 YAML \u4e2d\u914d\u7f6e\u81ea\u5b9a\u4e49\u6307\u6807\uff0c\u793a\u4f8b\u5982\u4e0b\uff1a

                                                              rules:\n  custom:\n    - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)\n      name:\n        as: httpserver_requests_qps\n        matches: httpserver_requests_total\n      resources:\n        template: <<.Resource>>\n      seriesQuery: httpserver_requests_total\n

                                                              "},{"location":"admin/kpanda/scale/custom-hpa.html#_5","title":"\u521b\u5efa\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u53c2\u6570","text":"

                                                              \u6309\u7167\u4e0a\u8ff0\u6b65\u9aa4\u5728 Deployment \u4e2d\u627e\u5230\u5e94\u7528\u7a0b\u5e8f httpserver \u5e76\u901a\u8fc7\u81ea\u5b9a\u4e49\u6307\u6807\u521b\u5efa\u5f39\u6027\u4f38\u7f29\u3002

                                                              "},{"location":"admin/kpanda/scale/hpa-cronhpa-compatibility-rules.html","title":"HPA \u548c CronHPA \u517c\u5bb9\u89c4\u5219","text":"

                                                              HPA \u5168\u79f0\u4e3a HorizontalPodAutoscaler\uff0c\u5373 Pod \u6c34\u5e73\u81ea\u52a8\u4f38\u7f29\u3002

                                                              CronHPA \u5168\u79f0\u4e3a Cron HorizontalPodAutoscaler\uff0c\u5373 Pod \u5b9a\u65f6\u7684\u6c34\u5e73\u81ea\u52a8\u4f38\u7f29\u3002

                                                              "},{"location":"admin/kpanda/scale/hpa-cronhpa-compatibility-rules.html#cronhpa-hpa","title":"CronHPA \u548c HPA \u517c\u5bb9\u51b2\u7a81","text":"

                                                              \u5b9a\u65f6\u4f38\u7f29 CronHPA \u901a\u8fc7\u8bbe\u7f6e\u5b9a\u65f6\u7684\u65b9\u5f0f\u89e6\u53d1\u5bb9\u5668\u7684\u6c34\u5e73\u526f\u672c\u4f38\u7f29\u3002\u4e3a\u4e86\u9632\u6b62\u7a81\u53d1\u7684\u6d41\u91cf\u51b2\u51fb\u7b49\u72b6\u51b5\uff0c \u60a8\u53ef\u80fd\u5df2\u7ecf\u914d\u7f6e HPA \u4fdd\u969c\u5e94\u7528\u7684\u6b63\u5e38\u8fd0\u884c\u3002\u5982\u679c\u540c\u65f6\u68c0\u6d4b\u5230\u4e86 HPA \u548c CronHPA \u7684\u5b58\u5728\uff0c \u7531\u4e8e CronHPA \u548c HPA \u76f8\u4e92\u72ec\u7acb\u65e0\u6cd5\u611f\u77e5\uff0c\u5c31\u4f1a\u51fa\u73b0\u4e24\u4e2a\u63a7\u5236\u5668\u5404\u81ea\u5de5\u4f5c\uff0c\u540e\u6267\u884c\u7684\u64cd\u4f5c\u4f1a\u8986\u76d6\u5148\u6267\u884c\u7684\u64cd\u4f5c\u3002

                                                              \u5bf9\u6bd4 CronHPA \u548c HPA \u7684\u5b9a\u4e49\u6a21\u677f\uff0c\u53ef\u4ee5\u89c2\u5bdf\u5230\u4ee5\u4e0b\u51e0\u70b9\uff1a

                                                              • CronHPA \u548c HPA \u90fd\u662f\u901a\u8fc7 scaleTargetRef \u5b57\u6bb5\u6765\u83b7\u53d6\u4f38\u7f29\u5bf9\u8c61\u3002
                                                              • CronHPA \u901a\u8fc7 jobs \u7684 crontab \u89c4\u5219\u5b9a\u65f6\u4f38\u7f29\u526f\u672c\u6570\u3002
                                                              • HPA \u901a\u8fc7\u8d44\u6e90\u5229\u7528\u7387\u5224\u65ad\u4f38\u7f29\u60c5\u51b5\u3002

                                                              Note

                                                              \u5982\u679c\u540c\u65f6\u8bbe\u7f6e CronHPA \u548c HPA\uff0c\u4f1a\u51fa\u73b0 CronHPA \u548c HPA \u540c\u65f6\u64cd\u4f5c\u4e00\u4e2a scaleTargetRef \u7684\u573a\u666f\u3002

                                                              "},{"location":"admin/kpanda/scale/hpa-cronhpa-compatibility-rules.html#cronhpa-hpa_1","title":"CronHPA \u548c HPA \u517c\u5bb9\u65b9\u6848","text":"

                                                              \u4ece\u4e0a\u6587\u53ef\u77e5\uff0cCronHPA \u548c HPA \u540c\u65f6\u4f7f\u7528\u4f1a\u5bfc\u81f4\u540e\u6267\u884c\u7684\u64cd\u4f5c\u8986\u76d6\u5148\u6267\u884c\u64cd\u4f5c\u7684\u672c\u8d28\u539f\u56e0\u662f\u4e24\u4e2a\u63a7\u5236\u5668\u65e0\u6cd5\u76f8\u4e92\u611f\u77e5\uff0c \u90a3\u4e48\u53ea\u9700\u8981\u8ba9 CronHPA \u611f\u77e5 HPA \u7684\u5f53\u524d\u72b6\u6001\u5c31\u80fd\u89e3\u51b3\u51b2\u7a81\u95ee\u9898\u3002

                                                              \u7cfb\u7edf\u4f1a\u5c06 HPA \u4f5c\u4e3a\u5b9a\u65f6\u4f38\u7f29 CronHPA \u7684\u6269\u7f29\u5bb9\u5bf9\u8c61\uff0c\u4ece\u800c\u5b9e\u73b0\u5bf9\u8be5 HPA \u5b9a\u4e49\u7684 Deployment \u5bf9\u8c61\u7684\u5b9a\u65f6\u6269\u7f29\u5bb9\u3002

                                                              HPA \u7684\u5b9a\u4e49\u5c06 Deployment \u914d\u7f6e\u5728 scaleTargetRef \u5b57\u6bb5\u4e0b\uff0c\u7136\u540e Deployment \u901a\u8fc7\u81ea\u8eab\u5b9a\u4e49\u67e5\u627e ReplicaSet\uff0c\u6700\u540e\u901a\u8fc7 ReplicaSet \u8c03\u6574\u771f\u5b9e\u7684\u526f\u672c\u6570\u76ee\u3002

                                                              \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5c06 CronHPA \u4e2d\u7684 scaleTargetRef \u8bbe\u7f6e\u4e3a HPA \u5bf9\u8c61\uff0c\u7136\u540e\u901a\u8fc7 HPA \u5bf9\u8c61\u6765\u5bfb\u627e\u771f\u5b9e\u7684 scaleTargetRef\uff0c\u4ece\u800c\u8ba9 CronHPA \u611f\u77e5 HPA \u7684\u5f53\u524d\u72b6\u6001\u3002

                                                              CronHPA \u4f1a\u901a\u8fc7\u8c03\u6574 HPA \u7684\u65b9\u5f0f\u611f\u77e5 HPA\u3002CronHPA \u901a\u8fc7\u8bc6\u522b\u8981\u8fbe\u5230\u7684\u526f\u672c\u6570\u4e0e\u5f53\u524d\u526f\u672c\u6570\u4e24\u8005\u95f4\u7684\u8f83\u5927\u503c\uff0c \u5224\u65ad\u662f\u5426\u9700\u8981\u6269\u7f29\u5bb9\u53ca\u4fee\u6539 HPA \u7684\u4e0a\u9650\uff1bCronHPA \u901a\u8fc7\u8bc6\u522b CronHPA \u8981\u8fbe\u5230\u7684\u526f\u672c\u6570\u4e0e HPA \u7684\u914d\u7f6e\u95f4\u7684\u8f83\u5c0f\u503c\uff0c\u5224\u65ad\u662f\u5426\u9700\u8981\u4fee\u6539 HPA \u7684\u4e0b\u9650\u3002

                                                              "},{"location":"admin/kpanda/scale/install-cronhpa.html","title":"\u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6","text":"

                                                              \u5bb9\u5668\u526f\u672c\u5b9a\u65f6\u6c34\u5e73\u6269\u7f29\u5bb9\u7b56\u7565\uff08CronHPA\uff09\u80fd\u591f\u4e3a\u5468\u671f\u6027\u9ad8\u5e76\u53d1\u5e94\u7528\u63d0\u4f9b\u7a33\u5b9a\u7684\u8ba1\u7b97\u8d44\u6e90\u4fdd\u969c\uff0c kubernetes-cronhpa-controller \u5219\u662f\u5b9e\u73b0 CronHPA \u7684\u5173\u952e\u7ec4\u4ef6\u3002

                                                              \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6\u3002

                                                              Note

                                                              \u4e3a\u4e86\u4f7f\u7528 CornHPA\uff0c\u4e0d\u4ec5\u9700\u8981\u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6\uff0c\u8fd8\u8981\u5b89\u88c5 metrics-server \u63d2\u4ef6\u3002

                                                              "},{"location":"admin/kpanda/scale/install-cronhpa.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                              • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u3002

                                                              • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                              "},{"location":"admin/kpanda/scale/install-cronhpa.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                              \u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6\u3002

                                                              1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u627e\u5230\u9700\u8981\u5b89\u88c5\u6b64\u63d2\u4ef6\u7684\u76ee\u6807\u96c6\u7fa4\uff0c\u70b9\u51fb\u8be5\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u70b9\u51fb \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d \uff0c\u70b9\u51fb\u76ee\u6807\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u3002

                                                              2. \u5728\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u5728 CronHPA \u53f3\u4fa7\u70b9\u51fb \u5b89\u88c5 \u3002

                                                              3. \u9605\u8bfb\u8be5\u63d2\u4ef6\u7684\u76f8\u5173\u4ecb\u7ecd\uff0c\u9009\u62e9\u7248\u672c\u540e\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u63a8\u8350\u5b89\u88c5 1.3.0 \u6216\u66f4\u9ad8\u7248\u672c\u3002

                                                              4. \u53c2\u8003\u4ee5\u4e0b\u8bf4\u660e\u914d\u7f6e\u53c2\u6570\u3002

                                                                • \u540d\u79f0\uff1a\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 kubernetes-cronhpa-controller\u3002
                                                                • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u63d2\u4ef6\u5b89\u88c5\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u6b64\u5904\u4ee5 default \u4e3a\u4f8b\u3002
                                                                • \u7248\u672c\uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 1.3.0 \u7248\u672c\u4e3a\u4f8b\u3002
                                                                • \u5c31\u7eea\u7b49\u5f85\uff1a\u542f\u7528\u540e\uff0c\u5c06\u7b49\u5f85\u5e94\u7528\u4e0b\u7684\u6240\u6709\u5173\u8054\u8d44\u6e90\u90fd\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002
                                                                • \u5931\u8d25\u5220\u9664\uff1a\u5982\u679c\u63d2\u4ef6\u5b89\u88c5\u5931\u8d25\uff0c\u5219\u5220\u9664\u5df2\u7ecf\u5b89\u88c5\u7684\u5173\u8054\u8d44\u6e90\u3002\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u3002
                                                                • \u8be6\u60c5\u65e5\u5fd7\uff1a\u5f00\u542f\u540e\uff0c\u5c06\u8bb0\u5f55\u5b89\u88c5\u8fc7\u7a0b\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002

                                                                Note

                                                                \u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u548c/\u6216 \u5931\u8d25\u5220\u9664 \u540e\uff0c\u5e94\u7528\u9700\u8981\u8f83\u957f\u65f6\u95f4\u624d\u4f1a\u88ab\u6807\u8bb0\u4e3a\u201c\u8fd0\u884c\u4e2d\u201d\u72b6\u6001\u3002

                                                              5. \u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \uff0c\u7cfb\u7edf\u5c06\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u3002\u7a0d\u7b49\u51e0\u5206\u949f\u540e\u5237\u65b0\u9875\u9762\u4f5c\uff0c\u5373\u53ef\u770b\u5230\u521a\u521a\u5b89\u88c5\u7684\u5e94\u7528\u3002

                                                                Warning

                                                                \u5982\u9700\u5220\u9664 kubernetes-cronhpa-controller \u63d2\u4ef6\uff0c\u5e94\u5728 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u624d\u80fd\u5c06\u5176\u5f7b\u5e95\u5220\u9664\u3002

                                                                \u5982\u679c\u5728\u5de5\u4f5c\u8d1f\u8f7d\u7684 \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\u4e0b\u5220\u9664\u63d2\u4ef6\uff0c\u8fd9\u53ea\u662f\u5220\u9664\u4e86\u8be5\u63d2\u4ef6\u7684\u5de5\u4f5c\u8d1f\u8f7d\u526f\u672c\uff0c\u63d2\u4ef6\u672c\u8eab\u4ecd\u672a\u5220\u9664\uff0c\u540e\u7eed\u91cd\u65b0\u5b89\u88c5\u8be5\u63d2\u4ef6\u65f6\u4e5f\u4f1a\u63d0\u793a\u9519\u8bef\u3002

                                                              6. \u56de\u5230\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\u9762\u4e0b\u7684 \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u53ef\u4ee5\u770b\u5230\u754c\u9762\u663e\u793a \u63d2\u4ef6\u5df2\u5b89\u88c5 \u3002\u73b0\u5728\u53ef\u4ee5\u5f00\u59cb\u521b\u5efa CronHPA \u7b56\u7565\u4e86\u3002

                                                              "},{"location":"admin/kpanda/scale/install-metrics-server.html","title":"\u5b89\u88c5 metrics-server \u63d2\u4ef6","text":"

                                                              metrics-server \u662f Kubernetes \u5185\u7f6e\u7684\u8d44\u6e90\u4f7f\u7528\u6307\u6807\u91c7\u96c6\u7ec4\u4ef6\u3002 \u60a8\u53ef\u4ee5\u901a\u8fc7\u914d\u7f6e\u5f39\u6027\u4f38\u7f29\uff08HPA\uff09\u7b56\u7565\u6765\u5b9e\u73b0\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u81ea\u52a8\u6c34\u5e73\u4f38\u7f29 Pod \u526f\u672c\u3002

                                                              \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5 metrics-server \u3002

                                                              "},{"location":"admin/kpanda/scale/install-metrics-server.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u5b89\u88c5 metrics-server \u63d2\u4ef6\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                              • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3002

                                                              • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                              "},{"location":"admin/kpanda/scale/install-metrics-server.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                              \u8bf7\u6267\u884c\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 metrics-server \u63d2\u4ef6\u3002

                                                              1. \u5728\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u4e0b\u7684\u5f39\u6027\u4f38\u7f29\u9875\u9762\uff0c\u70b9\u51fb \u53bb\u5b89\u88c5 \uff0c\u8fdb\u5165 metrics-server \u63d2\u4ef6\u5b89\u88c5\u754c\u9762\u3002

                                                              2. \u9605\u8bfb metrics-server \u63d2\u4ef6\u76f8\u5173\u4ecb\u7ecd\uff0c\u9009\u62e9\u7248\u672c\u540e\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u672c\u6587\u5c06\u4ee5 3.8.2 \u7248\u672c\u4e3a\u4f8b\u8fdb\u884c\u5b89\u88c5\uff0c\u63a8\u8350\u60a8\u5b89\u88c5 3.8.2 \u53ca\u66f4\u9ad8\u7248\u672c\u3002

                                                              3. \u5728\u5b89\u88c5\u914d\u7f6e\u754c\u9762\u914d\u7f6e\u57fa\u672c\u53c2\u6570\u3002

                                                                • \u540d\u79f0\uff1a\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 metrics-server-01\u3002
                                                                • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u63d2\u4ef6\u5b89\u88c5\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u6b64\u5904\u4ee5 default \u4e3a\u4f8b\u3002
                                                                • \u7248\u672c\uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 3.8.2 \u7248\u672c\u4e3a\u4f8b\u3002
                                                                • \u5c31\u7eea\u7b49\u5f85\uff1a\u542f\u7528\u540e\uff0c\u5c06\u7b49\u5f85\u5e94\u7528\u4e0b\u6240\u6709\u5173\u8054\u8d44\u6e90\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002
                                                                • \u5931\u8d25\u5220\u9664\uff1a\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f\u5c31\u7eea\u7b49\u5f85\u3002\u5982\u679c\u5b89\u88c5\u5931\u8d25\uff0c\u5c06\u5220\u9664\u5b89\u88c5\u76f8\u5173\u8d44\u6e90\u3002
                                                                • \u8be6\u60c5\u65e5\u5fd7\uff1a\u5f00\u542f\u5b89\u88c5\u8fc7\u7a0b\u65e5\u5fd7\u7684\u8be6\u7ec6\u8f93\u51fa\u3002

                                                                Note

                                                                \u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u548c/\u6216 \u5931\u8d25\u5220\u9664 \u540e\uff0c\u5e94\u7528\u9700\u8981\u7ecf\u8fc7\u8f83\u957f\u65f6\u95f4\u624d\u4f1a\u88ab\u6807\u8bb0\u4e3a \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                                                              4. \u9ad8\u7ea7\u53c2\u6570\u914d\u7f6e

                                                                • \u5982\u679c\u96c6\u7fa4\u7f51\u7edc\u65e0\u6cd5\u8bbf\u95ee k8s.gcr.io \u4ed3\u5e93\uff0c\u8bf7\u5c1d\u8bd5\u4fee\u6539 repositort \u53c2\u6570\u4e3a repository: k8s.m.daocloud.io/metrics-server/metrics-server

                                                                • \u5b89\u88c5 metrics-server \u63d2\u4ef6\u8fd8\u9700\u63d0\u4f9b SSL \u8bc1\u4e66\u3002\u5982\u9700\u7ed5\u8fc7\u8bc1\u4e66\u6821\u9a8c\uff0c\u9700\u8981\u5728 defaultArgs: \u5904\u6dfb\u52a0 - --kubelet-insecure-tls \u53c2\u6570\u3002

                                                                \u70b9\u51fb\u67e5\u770b\u63a8\u8350\u7684 YAML \u53c2\u6570
                                                                image:\n  repository: k8s.m.daocloud.io/metrics-server/metrics-server # \u5c06\u4ed3\u5e93\u6e90\u5730\u5740\u4fee\u6539\u4e3a k8s.m.daocloud.io\n  tag: ''\n  pullPolicy: IfNotPresent\nimagePullSecrets: []\nnameOverride: ''\nfullnameOverride: ''\nserviceAccount:\n  create: true\n  annotations: {}\n  name: ''\nrbac:\n  create: true\n  pspEnabled: false\napiService:\n  create: true\npodLabels: {}\npodAnnotations: {}\npodSecurityContext: {}\nsecurityContext:\n  allowPrivilegeEscalation: false\n  readOnlyRootFilesystem: true\n  runAsNonRoot: true\n  runAsUser: 1000\npriorityClassName: system-cluster-critical\ncontainerPort: 4443\nhostNetwork:\n  enabled: false\nreplicas: 1\nupdateStrategy: {}\npodDisruptionBudget:\n  enabled: false\n  minAvailable: null\n  maxUnavailable: null\ndefaultArgs:\n  - '--cert-dir=/tmp'\n  - '--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname'\n  - '--kubelet-use-node-status-port'\n  - '--metric-resolution=15s'\n  - --kubelet-insecure-tls # \u7ed5\u8fc7\u8bc1\u4e66\u6821\u9a8c\nargs: []\nlivenessProbe:\n  httpGet:\n    path: /livez\n    port: https\n    scheme: HTTPS\n  initialDelaySeconds: 0\n  periodSeconds: 10\n  failureThreshold: 3\nreadinessProbe:\n  httpGet:\n    path: /readyz\n    port: https\n    scheme: HTTPS\n  initialDelaySeconds: 20\n  periodSeconds: 10\n  failureThreshold: 3\nservice:\n  type: ClusterIP\n  port: 443\n  annotations: {}\n  labels: {}\nmetrics:\n  enabled: false\nserviceMonitor:\n  enabled: false\n  additionalLabels: {}\n  interval: 1m\n  scrapeTimeout: 10s\nresources: {}\nextraVolumeMounts: []\nextraVolumes: []\nnodeSelector: {}\ntolerations: []\naffinity: {}\n
                                                              5. \u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u5b8c\u6210 metrics-server \u63d2\u4ef6\u7684\u5b89\u88c5\uff0c\u4e4b\u540e\u7cfb\u7edf\u5c06\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\uff0c \u7a0d\u7b49\u51e0\u5206\u949f\u540e\uff0c\u4e3a\u9875\u9762\u6267\u884c\u5237\u65b0\u64cd\u4f5c\uff0c\u5373\u53ef\u770b\u5230\u521a\u521a\u5b89\u88c5\u7684\u5e94\u7528\u3002

                                                              Note

                                                              \u5220\u9664 metrics-server \u63d2\u4ef6\u65f6\uff0c\u5728 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u624d\u80fd\u5f7b\u5e95\u5220\u9664\u8be5\u63d2\u4ef6\u3002\u5982\u679c\u4ec5\u5728\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u5220\u9664 metrics-server \uff0c \u8fd9\u53ea\u662f\u5220\u9664\u4e86\u8be5\u5e94\u7528\u7684\u5de5\u4f5c\u8d1f\u8f7d\u526f\u672c\uff0c\u5e94\u7528\u672c\u8eab\u4ecd\u672a\u5220\u9664\uff0c\u540e\u7eed\u91cd\u65b0\u5b89\u88c5\u8be5\u63d2\u4ef6\u65f6\u4e5f\u4f1a\u63d0\u793a\u9519\u8bef\u3002

                                                              "},{"location":"admin/kpanda/scale/install-vpa.html","title":"\u5b89\u88c5 vpa \u63d2\u4ef6","text":"

                                                              \u5bb9\u5668\u5782\u76f4\u6269\u7f29\u5bb9\u7b56\u7565\uff08Vertical Pod Autoscaler, VPA\uff09\u80fd\u591f\u8ba9\u96c6\u7fa4\u7684\u8d44\u6e90\u914d\u7f6e\u66f4\u52a0\u5408\u7406\uff0c\u907f\u514d\u96c6\u7fa4\u8d44\u6e90\u6d6a\u8d39\u3002 vpa \u5219\u662f\u5b9e\u73b0\u5bb9\u5668\u5782\u76f4\u6269\u7f29\u5bb9\u7684\u5173\u952e\u7ec4\u4ef6\u3002

                                                              \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5 vpa \u63d2\u4ef6\u3002

                                                              \u4e3a\u4e86\u4f7f\u7528 VPA \u7b56\u7565\uff0c\u4e0d\u4ec5\u9700\u8981\u5b89\u88c5 __vpa__ \u63d2\u4ef6\uff0c\u8fd8\u8981[\u5b89\u88c5 __metrics-server__ \u63d2\u4ef6](install-metrics-server.md)\u3002\n
                                                              "},{"location":"admin/kpanda/scale/install-vpa.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u5b89\u88c5 vpa \u63d2\u4ef6\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                              • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u3002

                                                              • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                              "},{"location":"admin/kpanda/scale/install-vpa.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                              \u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 vpa \u63d2\u4ef6\u3002

                                                              1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u627e\u5230\u9700\u8981\u5b89\u88c5\u6b64\u63d2\u4ef6\u7684\u76ee\u6807\u96c6\u7fa4\uff0c\u70b9\u51fb\u8be5\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u70b9\u51fb \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d \uff0c\u70b9\u51fb\u76ee\u6807\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u3002

                                                              2. \u5728\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u5728 VPA \u53f3\u4fa7\u70b9\u51fb \u5b89\u88c5 \u3002

                                                                3. \u9605\u8bfb\u8be5\u63d2\u4ef6\u7684\u76f8\u5173\u4ecb\u7ecd\uff0c\u9009\u62e9\u7248\u672c\u540e\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u63a8\u8350\u5b89\u88c5 1.5.0 \u6216\u66f4\u9ad8\u7248\u672c\u3002

                                                                4. \u67e5\u770b\u4ee5\u4e0b\u8bf4\u660e\u914d\u7f6e\u53c2\u6570\u3002

                                                                - \u540d\u79f0\uff1a\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 kubernetes-cronhpa-controller\u3002 - \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u63d2\u4ef6\u5b89\u88c5\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u6b64\u5904\u4ee5 default \u4e3a\u4f8b\u3002 - \u7248\u672c\uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 4.5.0 \u7248\u672c\u4e3a\u4f8b\u3002 - \u5c31\u7eea\u7b49\u5f85\uff1a\u542f\u7528\u540e\uff0c\u5c06\u7b49\u5f85\u5e94\u7528\u4e0b\u7684\u6240\u6709\u5173\u8054\u8d44\u6e90\u90fd\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002 - \u5931\u8d25\u5220\u9664\uff1a\u5982\u679c\u63d2\u4ef6\u5b89\u88c5\u5931\u8d25\uff0c\u5219\u5220\u9664\u5df2\u7ecf\u5b89\u88c5\u7684\u5173\u8054\u8d44\u6e90\u3002\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u3002 - \u8be6\u60c5\u65e5\u5fd7\uff1a\u5f00\u542f\u540e\uff0c\u5c06\u8bb0\u5f55\u5b89\u88c5\u8fc7\u7a0b\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002

                                                                Note

                                                                \u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u548c/\u6216 \u5931\u8d25\u5220\u9664 \u540e\uff0c\u5e94\u7528\u9700\u8981\u7ecf\u8fc7\u8f83\u957f\u65f6\u95f4\u624d\u4f1a\u88ab\u6807\u8bb0\u4e3a\u201c\u8fd0\u884c\u4e2d\u201d\u72b6\u6001\u3002

                                                              3. \u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \uff0c\u7cfb\u7edf\u5c06\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u3002\u7a0d\u7b49\u51e0\u5206\u949f\u540e\u5237\u65b0\u9875\u9762\u4f5c\uff0c\u5373\u53ef\u770b\u5230\u521a\u521a\u5b89\u88c5\u7684\u5e94\u7528\u3002

                                                                Warning

                                                                \u5982\u9700\u5220\u9664 vpa \u63d2\u4ef6\uff0c\u5e94\u5728 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u624d\u80fd\u5c06\u5176\u5f7b\u5e95\u5220\u9664\u3002

                                                                \u5982\u679c\u5728\u5de5\u4f5c\u8d1f\u8f7d\u7684 \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\u4e0b\u5220\u9664\u63d2\u4ef6\uff0c\u8fd9\u53ea\u662f\u5220\u9664\u4e86\u8be5\u63d2\u4ef6\u7684\u5de5\u4f5c\u8d1f\u8f7d\u526f\u672c\uff0c\u63d2\u4ef6\u672c\u8eab\u4ecd\u672a\u5220\u9664\uff0c\u540e\u7eed\u91cd\u65b0\u5b89\u88c5\u8be5\u63d2\u4ef6\u65f6\u4e5f\u4f1a\u63d0\u793a\u9519\u8bef\u3002

                                                              4. \u56de\u5230\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\u9762\u4e0b\u7684 \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u53ef\u4ee5\u770b\u5230\u754c\u9762\u663e\u793a \u63d2\u4ef6\u5df2\u5b89\u88c5 \u3002\u73b0\u5728\u53ef\u4ee5\u5f00\u59cb\u521b\u5efa VPA \u7b56\u7565\u4e86\u3002

                                                              "},{"location":"admin/kpanda/scale/knative/install.html","title":"\u5b89\u88c5","text":"

                                                              Knative \u662f\u4e00\u4e2a\u9762\u5411\u65e0\u670d\u52a1\u5668\u90e8\u7f72\u7684\u8de8\u5e73\u53f0\u89e3\u51b3\u65b9\u6848\u3002

                                                              1. \u767b\u5f55\u96c6\u7fa4\uff0c\u70b9\u51fb\u4fa7\u8fb9\u680f Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u5728\u53f3\u4fa7\u4e0a\u65b9\u641c\u7d22\u6846\u8f93\u5165 knative \uff0c\u7136\u540e\u6309\u56de\u8f66\u952e\u641c\u7d22\u3002

                                                              2. \u70b9\u51fb\u641c\u7d22\u51fa\u7684 knative-operator \uff0c\u8fdb\u5165\u5b89\u88c5\u914d\u7f6e\u754c\u9762\u3002\u4f60\u53ef\u4ee5\u5728\u8be5\u754c\u9762\u67e5\u770b\u53ef\u7528\u7248\u672c\u4ee5\u53ca Helm values \u7684 Parameters \u53ef\u9009\u9879\u3002

                                                              3. \u70b9\u51fb\u5b89\u88c5\u6309\u94ae\u540e\uff0c\u8fdb\u5165\u5b89\u88c5\u914d\u7f6e\u754c\u9762\u3002

                                                              4. \u8f93\u5165\u540d\u79f0\uff0c\u5b89\u88c5\u79df\u6237\uff0c\u5efa\u8bae\u52fe\u9009 \u5c31\u7eea\u7b49\u5f85 \u548c \u8be6\u7ec6\u65e5\u5fd7 \u3002

                                                              5. \u5728\u4e0b\u65b9\u8bbe\u7f6e\uff0c\u53ef\u4ee5\u52fe\u9009 Serving \uff0c\u5e76\u8f93\u5165 Knative Serving \u7ec4\u4ef6\u7684\u5b89\u88c5\u79df\u6237\uff0c\u4f1a\u5728\u5b89\u88c5\u540e\u90e8\u7f72 Knative Serving \u7ec4\u4ef6\uff0c\u8be5\u7ec4\u4ef6\u7531 Knative Operator \u7ba1\u7406\u3002

                                                              "},{"location":"admin/kpanda/scale/knative/knative.html","title":"Kantive \u4ecb\u7ecd","text":"

                                                              Knative \u63d0\u4f9b\u4e86\u4e00\u79cd\u66f4\u9ad8\u5c42\u6b21\u7684\u62bd\u8c61\uff0c\u7b80\u5316\u5e76\u52a0\u901f\u4e86\u5728 Kubernetes \u4e0a\u6784\u5efa\u3001\u90e8\u7f72\u548c\u7ba1\u7406\u5e94\u7528\u7684\u8fc7\u7a0b\u3002\u5b83\u4f7f\u5f97\u5f00\u53d1\u4eba\u5458\u80fd\u591f\u66f4\u4e13\u6ce8\u4e8e\u4e1a\u52a1\u903b\u8f91\u7684\u5b9e\u73b0\uff0c\u800c\u5c06\u5927\u90e8\u5206\u57fa\u7840\u8bbe\u65bd\u548c\u8fd0\u7ef4\u5de5\u4f5c\u4ea4\u7ed9 Knative \u53bb\u5904\u7406\uff0c\u4ece\u800c\u663e\u8457\u63d0\u9ad8\u751f\u4ea7\u529b\u3002

                                                              "},{"location":"admin/kpanda/scale/knative/knative.html#_1","title":"\u7ec4\u4ef6","text":"

                                                              knative-operator \u8fd0\u884c\u7ec4\u4ef6\u5982\u4e0b\u3002

                                                              knative-operator   knative-operator-58f7d7db5c-7f6r5      1/1     Running     0     6m55s\nknative-operator   operator-webhook-667dc67bc-qvrv4       1/1     Running     0     6m55s\n

                                                              knative-serving \u7ec4\u4ef6\u5982\u4e0b\u3002

                                                              knative-serving        3scale-kourier-gateway-d69fbfbd-bd8d8   1/1     Running     0                 7m13s\nknative-serving        activator-7c6fddd698-wdlng              1/1     Running     0                 7m3s\nknative-serving        autoscaler-8f4b876bb-kd25p              1/1     Running     0                 7m17s\nknative-serving        autoscaler-hpa-5f7f74679c-vkc7p         1/1     Running     0                 7m15s\nknative-serving        controller-789c896c46-tfvsv             1/1     Running     0                 7m17s\nknative-serving        net-kourier-controller-7db578c889-7gd5l 1/1     Running     0                 7m14s\nknative-serving        webhook-5c88b94c5-78x7m                 1/1     Running     0                 7m1s\nknative-serving        storage-version-migration-serving-serving-1.12.2-t7zvd   0/1  Completed   0   7m15s\n
                                                              \u7ec4\u4ef6 \u4f5c\u7528 Activator \u5bf9\u8bf7\u6c42\u6392\u961f\uff08\u5982\u679c\u4e00\u4e2a Knative Service \u5df2\u7ecf\u7f29\u51cf\u5230\u96f6\uff09\u3002\u8c03\u7528 autoscaler\uff0c\u5c06\u7f29\u51cf\u5230 0 \u7684\u670d\u52a1\u6062\u590d\u5e76\u8f6c\u53d1\u6392\u961f\u7684\u8bf7\u6c42\u3002Activator \u8fd8\u53ef\u4ee5\u5145\u5f53\u8bf7\u6c42\u7f13\u51b2\u5668\uff0c\u5904\u7406\u7a81\u53d1\u6d41\u91cf\u3002 Autoscaler Autoscaler \u8d1f\u8d23\u6839\u636e\u914d\u7f6e\u3001\u6307\u6807\u548c\u8fdb\u5165\u7684\u8bf7\u6c42\u6765\u7f29\u653e Knative \u670d\u52a1\u3002 Controller \u7ba1\u7406 Knative CR \u7684\u72b6\u6001\u3002\u5b83\u4f1a\u76d1\u89c6\u591a\u4e2a\u5bf9\u8c61\uff0c\u7ba1\u7406\u4f9d\u8d56\u8d44\u6e90\u7684\u751f\u547d\u5468\u671f\uff0c\u5e76\u66f4\u65b0\u8d44\u6e90\u72b6\u6001\u3002 Queue-Proxy Sidecar \u5bb9\u5668\uff0c\u6bcf\u4e2a Knative Service \u90fd\u4f1a\u6ce8\u5165\u4e00\u4e2a\u3002\u8d1f\u8d23\u6536\u96c6\u6d41\u91cf\u6570\u636e\u5e76\u62a5\u544a\u7ed9 Autoscaler\uff0cAutoscaler \u6839\u636e\u8fd9\u4e9b\u6570\u636e\u548c\u9884\u8bbe\u7684\u89c4\u5219\u6765\u53d1\u8d77\u6269\u5bb9\u6216\u7f29\u5bb9\u8bf7\u6c42\u3002 Webhooks Knative Serving \u6709\u51e0\u4e2a Webhooks \u8d1f\u8d23\u9a8c\u8bc1\u548c\u53d8\u66f4 Knative \u8d44\u6e90\u3002"},{"location":"admin/kpanda/scale/knative/knative.html#ingress","title":"Ingress \u6d41\u91cf\u5165\u53e3\u65b9\u6848","text":"\u65b9\u6848 \u9002\u7528\u573a\u666f Istio \u5982\u679c\u5df2\u7ecf\u7528\u4e86 Istio\uff0c\u53ef\u4ee5\u9009\u62e9 Istio \u4f5c\u4e3a\u6d41\u91cf\u5165\u53e3\u65b9\u6848\u3002 Contour \u5982\u679c\u96c6\u7fa4\u4e2d\u5df2\u7ecf\u542f\u7528\u4e86 Contour\uff0c\u53ef\u4ee5\u9009\u62e9 Contour \u4f5c\u4e3a\u6d41\u91cf\u5165\u53e3\u65b9\u6848\u3002 Kourier \u5982\u679c\u5728\u6ca1\u6709\u4e0a\u8ff0 2 \u79cd Ingress \u7ec4\u4ef6\u65f6\uff0c\u53ef\u4ee5\u4f7f\u7528 Knative \u57fa\u4e8e Envoy \u5b9e\u73b0\u7684 Kourier Ingress \u4f5c\u4e3a\u6d41\u91cf\u5165\u53e3\u3002"},{"location":"admin/kpanda/scale/knative/knative.html#autoscaler","title":"Autoscaler \u65b9\u6848\u5bf9\u6bd4","text":"Autoscaler \u7c7b\u578b \u662f\u5426\u4e3a Knative Serving \u6838\u5fc3\u90e8\u5206 \u9ed8\u8ba4\u542f\u7528 Scale to Zero \u652f\u6301 \u57fa\u4e8e CPU \u7684 Autoscaling \u652f\u6301 Knative Pod Autoscaler (KPA) \u662f \u662f \u662f \u5426 Horizontal Pod Autoscaler (HPA) \u5426 \u9700\u5b89\u88c5 Knative Serving \u540e\u542f\u7528 \u5426 \u662f"},{"location":"admin/kpanda/scale/knative/knative.html#crd","title":"CRD","text":"\u8d44\u6e90\u7c7b\u578b API \u540d\u79f0 \u63cf\u8ff0 Services service.serving.knative.dev \u81ea\u52a8\u7ba1\u7406 Workload \u7684\u6574\u4e2a\u751f\u547d\u5468\u671f\uff0c\u63a7\u5236\u5176\u4ed6\u5bf9\u8c61\u7684\u521b\u5efa\uff0c\u786e\u4fdd\u5e94\u7528\u5177\u6709 Routes\u3001Configurations \u4ee5\u53ca\u6bcf\u6b21\u66f4\u65b0\u65f6\u7684\u65b0 revision\u3002 Routes route.serving.knative.dev \u5c06\u7f51\u7edc\u7aef\u70b9\u6620\u5c04\u5230\u4e00\u4e2a\u6216\u591a\u4e2a\u4fee\u8ba2\u7248\u672c\uff0c\u652f\u6301\u6d41\u91cf\u5206\u914d\u548c\u7248\u672c\u8def\u7531\u3002 Configurations configuration.serving.knative.dev \u7ef4\u62a4\u90e8\u7f72\u7684\u671f\u671b\u72b6\u6001\uff0c\u63d0\u4f9b\u4ee3\u7801\u548c\u914d\u7f6e\u4e4b\u95f4\u7684\u5206\u79bb\uff0c\u9075\u5faa Twelve-Factor \u5e94\u7528\u7a0b\u5e8f\u65b9\u6cd5\u8bba\uff0c\u4fee\u6539\u914d\u7f6e\u4f1a\u521b\u5efa\u65b0\u7684 revision\u3002 Revisions revision.serving.knative.dev \u6bcf\u6b21\u5bf9\u5de5\u4f5c\u8d1f\u8f7d\u4fee\u6539\u7684\u65f6\u95f4\u70b9\u5feb\u7167\uff0c\u662f\u4e0d\u53ef\u53d8\u5bf9\u8c61\uff0c\u53ef\u6839\u636e\u6d41\u91cf\u81ea\u52a8\u6269\u5bb9\u548c\u7f29\u5bb9\u3002"},{"location":"admin/kpanda/scale/knative/playground.html","title":"Knative \u4f7f\u7528\u5b9e\u8df5","text":"

                                                              \u5728\u672c\u8282\u4e2d\uff0c\u6211\u4eec\u5c06\u901a\u8fc7\u51e0\u4e2a\u5b9e\u8df5\u6765\u6df1\u5165\u4e86\u89e3\u5b66\u4e60 Knative\u3002

                                                              "},{"location":"admin/kpanda/scale/knative/playground.html#case-1-hello-world","title":"case 1 - Hello World","text":"
                                                              apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n

                                                              \u53ef\u4ee5\u4f7f\u7528 kubectl \u5df2\u90e8\u7f72\u7684\u5e94\u7528\u7684\u72b6\u6001\uff0c\u8fd9\u4e2a\u5e94\u7528\u7531 knative \u81ea\u52a8\u914d\u7f6e\u4e86 ingress \u548c\u4f38\u7f29\u5668\u3002

                                                              ~ kubectl get service.serving.knative.dev/hello\nNAME    URL                                              LATESTCREATED   LATESTREADY   READY   REASON\nhello   http://hello.knative-serving.knative.loulan.me   hello-00001     hello-00001   True\n

                                                              \u90e8\u7f72\u51fa\u7684 Pod YAML \u5982\u4e0b\uff0c\u7531 2 \u4e2a Pod \u7ec4\u6210\uff1auser-container \u548c queue-proxy\u3002

                                                              apiVersion: v1\nkind: Pod\nmetadata:\n  name: hello-00003-deployment-5fcb8ccbf-7qjfk\nspec:\n  containers:\n  - name: user-container\n  - name: queue-proxy\n

                                                              \u8bf7\u6c42\u6d41\uff1a

                                                              1. case1 \u5728\u4f4e\u6d41\u91cf\u6216\u96f6\u6d41\u91cf\u65f6\uff0c\u6d41\u91cf\u5c06\u8def\u7531\u5230 activator
                                                              2. case2 \u6d41\u91cf\u5927\u65f6\uff0c\u6d41\u91cf\u5927\u4e8e target-burst-capacity \u65f6\u624d\u76f4\u63a5\u8def\u7531\u5230 Pod
                                                                1. \u914d\u7f6e\u4e3a 0\uff0c\u53ea\u6709\u4ece 0 \u6269\u5bb9\u5b58\u5728
                                                                2. \u914d\u7f6e\u4e3a -1\uff0cactivator \u4f1a\u4e00\u76f4\u5b58\u5728\u8bf7\u6c42\u8def\u5f84
                                                                3. \u914d\u7f6e\u4e3a >0\uff0c\u89e6\u53d1\u6269\u7f29\u5bb9\u4e4b\u524d\uff0c\u7cfb\u7edf\u80fd\u591f\u989d\u5916\u5904\u7406\u7684\u5e76\u53d1\u8bf7\u6c42\u6570\u91cf\u3002
                                                              3. case3 \u6d41\u91cf\u518d\u53d8\u5c0f\u65f6\uff0c\u6d41\u91cf\u4f4e\u4e8e current_demand + target-burst-capacity > (pods * concurrency-target) \u65f6\u5c06\u518d\u6b21\u8def\u7531\u5230 activator

                                                                \u5f85\u5904\u7406\u7684\u8bf7\u6c42\u603b\u6570 + \u80fd\u63a5\u53d7\u7684\u8d85\u8fc7\u76ee\u6807\u5e76\u53d1\u6570\u7684\u8bf7\u6c42\u6570\u91cf > \u6bcf\u4e2a Pod \u7684\u76ee\u6807\u5e76\u53d1\u6570 * Pod \u6570\u91cf

                                                              "},{"location":"admin/kpanda/scale/knative/playground.html#case-2-","title":"case 2 - \u57fa\u4e8e\u5e76\u53d1\u5f39\u6027\u4f38\u7f29","text":"

                                                              \u6211\u4eec\u9996\u5148\u5728\u96c6\u7fa4\u5e94\u7528\u4e0b\u9762 YAML \u5b9a\u4e49\u3002

                                                              apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"1\"\n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"\n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n

                                                              \u6267\u884c\u4e0b\u9762\u547d\u4ee4\u6d4b\u8bd5\uff0c\u5e76\u53ef\u4ee5\u901a\u8fc7 kubectl get pods -A -w \u6765\u89c2\u5bdf\u6269\u5bb9\u7684 Pod\u3002

                                                              wrk -t2 -c4 -d6s http://hello.knative-serving.knative.daocloud.io/\n
                                                              "},{"location":"admin/kpanda/scale/knative/playground.html#case-3-","title":"case 3 - \u57fa\u4e8e\u5e76\u53d1\u5f39\u6027\u4f38\u7f29\uff0c\u8fbe\u5230\u7279\u5b9a\u6bd4\u4f8b\u63d0\u524d\u6269\u5bb9","text":"

                                                              \u6211\u4eec\u53ef\u4ee5\u5f88\u8f7b\u677e\u7684\u5b9e\u73b0\uff0c\u4f8b\u5982\u9650\u5236\u6bcf\u4e2a\u5bb9\u5668\u5e76\u53d1\u4e3a 10\uff0c\u53ef\u4ee5\u901a\u8fc7 autoscaling.knative.dev/target-utilization-percentage: 70 \u6765\u5b9e\u73b0\uff0c\u8fbe\u5230 70% \u5c31\u5f00\u59cb\u6269\u5bb9 Pod\u3002

                                                              apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"10\"\n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"\n \u00a0 \u00a0 \u00a0 \u00a0autoscaling.knative.dev/target-utilization-percentage: \"70\" \n \u00a0 \u00a0 \u00a0 \u00a0autoscaling.knative.dev/metric: \"concurrency\"\n \u00a0 \u00a0 spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n
                                                              "},{"location":"admin/kpanda/scale/knative/playground.html#case-4-","title":"case 4 - \u7070\u5ea6\u53d1\u5e03/\u6d41\u91cf\u767e\u5206\u6bd4","text":"

                                                              \u6211\u4eec\u53ef\u4ee5\u901a\u8fc7 spec.traffic \u5b9e\u73b0\u5230\u6bcf\u4e2a\u7248\u672c\u6d41\u91cf\u7684\u63a7\u5236\u3002

                                                              apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"1\"  \n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"         \n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n  traffic:\n  - latestRevision: true\n    percent: 50\n  - latestRevision: false\n    percent: 50\n    revisionName: hello-00001\n
                                                              "},{"location":"admin/kpanda/scale/knative/scene.html","title":"\u4f7f\u7528\u573a\u666f","text":""},{"location":"admin/kpanda/scale/knative/scene.html#_2","title":"\u9002\u5408\u7684\u573a\u666f","text":"
                                                              • \u77ed\u8fde\u63a5\u9ad8\u5e76\u53d1\u4e1a\u52a1
                                                              • \u9700\u8981\u5f39\u6027\u4f38\u7f29\u7684\u4e1a\u52a1
                                                              • \u5927\u91cf\u5e94\u7528\u9700\u8981\u7f29\u5bb9\u5230 0 \u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u7387
                                                              • AI Serving \u670d\u52a1\uff0c\u57fa\u4e8e\u7279\u5b9a\u6307\u6807\u8fdb\u884c\u6269\u5bb9

                                                              Tip

                                                              \u77ed\u8fde\u63a5\u9ad8\u5e76\u53d1\u4e1a\u52a1\u4ee5\u53ca\u9700\u8981\u5f39\u6027\u4f38\u7f29\u7684\u4e1a\u52a1\uff0c\u63a8\u8350\u4f7f\u7528 HPA \u548c VPA \u80fd\u529b\u3002

                                                              "},{"location":"admin/kpanda/scale/knative/scene.html#_3","title":"\u4e0d\u9002\u5408\u7684\u573a\u666f","text":"
                                                              • \u957f\u8fde\u63a5\u4e1a\u52a1
                                                              • \u5ef6\u65f6\u654f\u611f\u4e1a\u52a1
                                                              • \u57fa\u4e8e cookie \u7684\u6d41\u91cf\u5206\u6d41
                                                              • \u57fa\u4e8e header \u7684\u6d41\u91cf\u5206\u6d41
                                                              "},{"location":"admin/kpanda/security/index.html","title":"\u5b89\u5168\u626b\u63cf\u7c7b\u578b","text":"

                                                              \u5728Kubernetes\uff08\u7b80\u79f0K8s\uff09\u73af\u5883\u4e2d\uff0c\u5b89\u5168\u626b\u63cf\u662f\u786e\u4fdd\u96c6\u7fa4\u5b89\u5168\u6027\u7684\u5173\u952e\u63aa\u65bd\u4e4b\u4e00\u3002\u5176\u4e2d\uff0c\u5408\u89c4\u6027\u626b\u63cf\uff08\u57fa\u4e8eCIS Benchmark\uff09\u3001\u6743\u9650\u626b\u63cf\uff08\u57fa\u4e8ekube-audit\u5ba1\u8ba1\u529f\u80fd\uff09\u3001\u6f0f\u6d1e\u626b\u63cf\uff08\u57fa\u4e8e kube-hunter\uff09\u662f\u4e09\u79cd\u5e38\u89c1\u4e14\u91cd\u8981\u7684\u5b89\u5168\u626b\u63cf\u624b\u6bb5\uff1a

                                                              • \u5408\u89c4\u6027\u626b\u63cf\uff1a\u57fa\u4e8e CIS Benchmark \u5bf9\u96c6\u7fa4\u8282\u70b9\u8fdb\u884c\u5b89\u5168\u626b\u63cf\u3002CIS Benchmark \u662f\u4e00\u5957\u5168\u7403\u516c\u8ba4\u7684\u6700\u4f73\u5b9e\u8df5\u6807\u51c6\uff0c\u4e3a Kubernetes \u96c6\u7fa4\u63d0\u4f9b\u4e86\u8be6\u7ec6\u7684\u5b89\u5168\u914d\u7f6e\u6307\u5357\u548c\u81ea\u52a8\u5316\u68c0\u67e5\u5de5\u5177\uff08\u5982Kube-Bench\uff09\uff0c\u5e2e\u52a9\u7ec4\u7ec7\u786e\u4fdd\u5176K8s\u96c6\u7fa4\u7b26\u5408\u5b89\u5168\u57fa\u7ebf\u8981\u6c42\uff0c\u4fdd\u62a4\u7cfb\u7edf\u548c\u6570\u636e\u514d\u53d7\u5a01\u80c1\u3002

                                                              • \u6743\u9650\u626b\u63cf\uff1a\u57fa\u4e8ekube-audit\u5ba1\u8ba1\u529f\u80fd\u3002\u6743\u9650\u626b\u63cf\u4e3b\u8981\u89e3\u51b3\u96c6\u7fa4\u8bbf\u95ee\u63a7\u5236\u548c\u64cd\u4f5c\u900f\u660e\u5ea6\u7684\u95ee\u9898\u3002\u901a\u8fc7\u5ba1\u8ba1\u65e5\u5fd7\uff0c\u96c6\u7fa4\u7ba1\u7406\u5458\u80fd\u591f\u8ffd\u6eaf\u96c6\u7fa4\u8d44\u6e90\u7684\u8bbf\u95ee\u5386\u53f2\uff0c\u8bc6\u522b\u5f02\u5e38\u884c\u4e3a\uff0c\u5982\u672a\u7ecf\u6388\u6743\u7684\u8bbf\u95ee\u3001\u654f\u611f\u6570\u636e\u7684\u6cc4\u9732\u3001\u6709\u5b89\u5168\u6f0f\u6d1e\u7684\u64cd\u4f5c\u8bb0\u5f55\u7b49\u3002\u8fd9\u5bf9\u4e8e\u6545\u969c\u6392\u67e5\u3001\u5b89\u5168\u4e8b\u4ef6\u54cd\u5e94\u4ee5\u53ca\u6ee1\u8db3\u5408\u89c4\u6027\u8981\u6c42\u81f3\u5173\u91cd\u8981\u3002\u6b64\u5916\uff0c\u6743\u9650\u626b\u63cf\u8fd8\u53ef\u4ee5\u5e2e\u52a9\u7ec4\u7ec7\u53d1\u73b0\u6f5c\u5728\u7684\u6743\u9650\u6ee5\u7528\u95ee\u9898\uff0c\u53ca\u65f6\u91c7\u53d6\u63aa\u65bd\u9632\u6b62\u5b89\u5168\u4e8b\u4ef6\u7684\u53d1\u751f\u3002

                                                              • \u6f0f\u6d1e\u626b\u63cf\uff1a\u57fa\u4e8e kube-hunter\uff0c\u4e3b\u8981\u89e3\u51b3 Kubernetes \u96c6\u7fa4\u4e2d\u5b58\u5728\u7684\u5df2\u77e5\u6f0f\u6d1e\u548c\u914d\u7f6e\u9519\u8bef\u95ee\u9898\u3002kube-hunter \u901a\u8fc7\u6a21\u62df\u653b\u51fb\u884c\u4e3a\uff0c\u80fd\u591f\u8bc6\u522b\u96c6\u7fa4\u4e2d\u53ef\u88ab\u6076\u610f\u5229\u7528\u7684\u6f0f\u6d1e\uff0c\u5982\u672a\u6388\u6743\u8bbf\u95ee\u3001\u66b4\u9732\u7684\u670d\u52a1\u548cAPI\u7aef\u70b9\u3001\u914d\u7f6e\u9519\u8bef\u7684\u89d2\u8272\u548c\u7ed1\u5b9a\u7b56\u7565\u7b49\u3002\u7279\u522b\u5730\uff0ckube-hunter\u80fd\u591f\u8bc6\u522b\u5e76\u62a5\u544a CVE \u6f0f\u6d1e\uff0c\u8fd9\u4e9b\u6f0f\u6d1e\u5982\u679c\u88ab\u6076\u610f\u5229\u7528\uff0c\u53ef\u80fd\u5bfc\u81f4\u6570\u636e\u6cc4\u9732\u3001\u670d\u52a1\u4e2d\u65ad\u7b49\u4e25\u91cd\u540e\u679c\u3002CVE \u6f0f\u6d1e\u662f\u7531\u56fd\u9645\u77e5\u540d\u7684\u5b89\u5168\u7ec4\u7ec7\u5982MITRE\u6240\u5b9a\u4e49\u548c\u7ef4\u62a4\u7684\uff0cCVE\u6570\u636e\u5e93\u4e3a\u8f6f\u4ef6\u548c\u56fa\u4ef6\u4e2d\u7684\u5df2\u77e5\u6f0f\u6d1e\u63d0\u4f9b\u4e86\u552f\u4e00\u6807\u8bc6\u7b26\uff0c\u6210\u4e3a\u5168\u7403\u5b89\u5168\u793e\u533a\u5171\u540c\u9075\u5faa\u7684\u6807\u51c6\u3002kube-hunter \u901a\u8fc7\u5229\u7528 CVE \u6570\u636e\u5e93\u4e2d\u7684\u4fe1\u606f\uff0c\u80fd\u591f\u5e2e\u52a9\u7528\u6237\u5feb\u901f\u8bc6\u522b\u5e76\u54cd\u5e94Kubernetes\u96c6\u7fa4\u4e2d\u7684\u5b89\u5168\u5a01\u80c1\u3002

                                                              "},{"location":"admin/kpanda/security/index.html#_2","title":"\u5408\u89c4\u6027\u626b\u63cf","text":"

                                                              \u5408\u89c4\u6027\u626b\u63cf\u7684\u5bf9\u8c61\u662f\u96c6\u7fa4\u8282\u70b9\u3002\u626b\u63cf\u7ed3\u679c\u4e2d\u4f1a\u5217\u51fa\u626b\u63cf\u9879\u4ee5\u53ca\u626b\u63cf\u7ed3\u679c\uff0c\u5e76\u9488\u5bf9\u672a\u901a\u8fc7\u7684\u626b\u63cf\u9879\u7ed9\u51fa\u4fee\u590d\u5efa\u8bae\u3002\u6709\u5173\u626b\u63cf\u65f6\u7528\u5230\u7684\u5177\u4f53\u5b89\u5168\u89c4\u5219\uff0c\u53ef\u53c2\u8003 CIS Kubernetes Benchmark

                                                              \u68c0\u67e5\u4e0d\u540c\u7c7b\u578b\u7684\u8282\u70b9\u65f6\uff0c\u626b\u63cf\u7684\u4fa7\u91cd\u70b9\u6709\u6240\u4e0d\u540c\u3002

                                                              • \u626b\u63cf\u63a7\u5236\u5e73\u9762\u8282\u70b9\uff08Controller\uff09

                                                                • \u5173\u6ce8 API Server \u3001 controller-manager \u3001 scheduler \u3001 kubelet \u7b49\u7cfb\u7edf\u7ec4\u4ef6\u7684\u5b89\u5168\u6027
                                                                • \u68c0\u67e5 Etcd \u6570\u636e\u5e93\u7684\u5b89\u5168\u914d\u7f6e
                                                                • \u68c0\u67e5\u96c6\u7fa4\u8eab\u4efd\u9a8c\u8bc1\u673a\u5236\u3001\u6388\u6743\u7b56\u7565\u548c\u7f51\u7edc\u5b89\u5168\u914d\u7f6e\u662f\u5426\u7b26\u5408\u5b89\u5168\u6807\u51c6
                                                              • \u626b\u63cf\u5de5\u4f5c\u8282\u70b9\uff08Worker\uff09

                                                                • \u68c0\u67e5 kubelet\u3001Docker\u7b49\u5bb9\u5668\u8fd0\u884c\u65f6\u7684\u914d\u7f6e\u5426\u7b26\u5408\u5b89\u5168\u6807\u51c6
                                                                • \u68c0\u67e5\u5bb9\u5668\u955c\u50cf\u662f\u5426\u7ecf\u8fc7\u4fe1\u4efb\u9a8c\u8bc1
                                                                • \u68c0\u67e5\u8282\u70b9\u7684\u7f51\u7edc\u5b89\u5168\u914d\u7f6e\u5426\u7b26\u5408\u5b89\u5168\u6807\u51c6

                                                              Tip

                                                              \u4f7f\u7528\u5408\u89c4\u6027\u626b\u63cf\u65f6\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u914d\u7f6e\uff0c\u7136\u540e\u57fa\u4e8e\u8be5\u914d\u7f6e\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002\u6267\u884c\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u626b\u63cf\u62a5\u544a\u3002

                                                              "},{"location":"admin/kpanda/security/index.html#_3","title":"\u6743\u9650\u626b\u63cf","text":"

                                                              \u6743\u9650\u626b\u63cf\u4fa7\u91cd\u4e8e\u6743\u9650\u95ee\u9898\u5f15\u53d1\u7684\u5b89\u5168\u6f0f\u6d1e\u3002\u6743\u9650\u626b\u63cf\u53ef\u4ee5\u5e2e\u52a9\u7528\u6237\u8bc6\u522b Kubernetes \u96c6\u7fa4\u4e2d\u7684\u5b89\u5168\u5a01\u80c1\uff0c\u6807\u8bc6\u54ea\u4e9b\u8d44\u6e90\u9700\u8981\u8fdb\u884c\u8fdb\u4e00\u6b65\u7684\u5ba1\u67e5\u548c\u4fdd\u62a4\u63aa\u65bd\u3002\u901a\u8fc7\u6267\u884c\u8fd9\u4e9b\u68c0\u67e5\u9879\uff0c\u7528\u6237\u53ef\u4ee5\u66f4\u6e05\u695a\u3001\u66f4\u5168\u9762\u5730\u4e86\u89e3\u81ea\u5df1\u7684 Kubernetes \u73af\u5883\uff0c\u786e\u4fdd\u96c6\u7fa4\u73af\u5883\u7b26\u5408 Kubernetes \u7684\u6700\u4f73\u5b9e\u8df5\u548c\u5b89\u5168\u6807\u51c6\u3002

                                                              \u5177\u4f53\u800c\u8a00\uff0c\u6743\u9650\u626b\u63cf\u652f\u6301\u4ee5\u4e0b\u64cd\u4f5c\uff1a

                                                              • \u626b\u63cf\u96c6\u7fa4\u4e2d\u7684\u6240\u6709\u8282\u70b9\u7684\u5065\u5eb7\u72b6\u6001\u3002

                                                              • \u626b\u63cf\u96c6\u7fa4\u7ec4\u4ef6\u7684\u8fd0\u884c\u72b6\u51b5\uff0c\u5982 kube-apiserver \u3001 kube-controller-manager \u3001 kube-scheduler \u7b49\u3002

                                                              • \u626b\u63cf\u5b89\u5168\u914d\u7f6e\uff1a\u68c0\u67e5 Kubernetes \u7684\u5b89\u5168\u914d\u7f6e

                                                                • API \u5b89\u5168\uff1a\u542f\u7528\u4e86\u4e0d\u5b89\u5168\u7684 API \u7248\u672c\uff0c\u662f\u5426\u8bbe\u7f6e\u4e86\u9002\u5f53\u7684 RBAC \u89d2\u8272\u548c\u6743\u9650\u9650\u5236\u7b49
                                                                • \u5bb9\u5668\u5b89\u5168\uff1a\u662f\u5426\u4f7f\u7528\u4e86\u4e0d\u5b89\u5168\u7684 Image\u3001\u662f\u5426\u5f00\u653e\u4e86\u7279\u6743\u6a21\u5f0f\uff0c\u662f\u5426\u8bbe\u7f6e\u4e86\u5408\u9002\u7684\u5b89\u5168\u4e0a\u4e0b\u6587\u7b49
                                                                • \u7f51\u7edc\u5b89\u5168\uff1a\u662f\u5426\u542f\u7528\u4e86\u5408\u9002\u7684\u7f51\u7edc\u7b56\u7565\u6765\u9650\u5236\u6d41\u91cf\uff0c\u662f\u5426\u4f7f\u7528\u4e86 TLS \u52a0\u5bc6\u7b49
                                                                • \u5b58\u50a8\u5b89\u5168\uff1a\u662f\u5426\u542f\u7528\u4e86\u9002\u5f53\u7684\u52a0\u5bc6\u3001\u8bbf\u95ee\u63a7\u5236\u7b49\u3002
                                                                • \u5e94\u7528\u7a0b\u5e8f\u5b89\u5168\uff1a\u662f\u5426\u8bbe\u7f6e\u4e86\u5fc5\u8981\u7684\u5b89\u5168\u63aa\u65bd\uff0c\u4f8b\u5982\u5bc6\u7801\u7ba1\u7406\u3001\u8de8\u7ad9\u811a\u672c\u653b\u51fb\u9632\u5fa1\u7b49\u3002
                                                              • \u63d0\u4f9b\u8b66\u544a\u548c\u5efa\u8bae\uff1a\u5efa\u8bae\u96c6\u7fa4\u7ba1\u7406\u5458\u6267\u884c\u7684\u5b89\u5168\u6700\u4f73\u5b9e\u8df5\uff0c\u4f8b\u5982\u5b9a\u671f\u8f6e\u6362\u8bc1\u4e66\u3001\u4f7f\u7528\u5f3a\u5bc6\u7801\u3001\u9650\u5236\u7f51\u7edc\u8bbf\u95ee\u7b49\u3002

                                                              Tip

                                                              \u4f7f\u7528\u5408\u89c4\u6027\u626b\u63cf\u65f6\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002\u6267\u884c\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u626b\u63cf\u62a5\u544a\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5b89\u5168\u626b\u63cf\u3002

                                                              "},{"location":"admin/kpanda/security/index.html#_4","title":"\u6f0f\u6d1e\u626b\u63cf","text":"

                                                              \u6f0f\u6d1e\u626b\u63cf\u4fa7\u91cd\u4e8e\u626b\u63cf\u6f5c\u5728\u7684\u6076\u610f\u653b\u51fb\u548c\u5b89\u5168\u6f0f\u6d1e\uff0c\u4f8b\u5982\u8fdc\u7a0b\u4ee3\u7801\u6267\u884c\u3001SQL \u6ce8\u5165\u3001XSS \u653b\u51fb\u7b49\uff0c\u4ee5\u53ca\u4e00\u4e9b\u9488\u5bf9 Kubernetes \u7279\u5b9a\u7684\u653b\u51fb\u3002\u6700\u7ec8\u7684\u626b\u63cf\u62a5\u544a\u4f1a\u5217\u51fa\u96c6\u7fa4\u4e2d\u5b58\u5728\u7684\u5b89\u5168\u6f0f\u6d1e\uff0c\u5e76\u63d0\u51fa\u4fee\u590d\u5efa\u8bae\u3002

                                                              Tip

                                                              \u4f7f\u7528\u5408\u89c4\u6027\u626b\u63cf\u65f6\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002\u6267\u884c\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u626b\u63cf\u62a5\u544a\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u6f0f\u6d1e\u626b\u63cf\u3002

                                                              "},{"location":"admin/kpanda/security/audit.html","title":"\u6743\u9650\u626b\u63cf","text":"

                                                              \u4e3a\u4e86\u4f7f\u7528\u6743\u9650\u626b\u63cf\u529f\u80fd\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u7b56\u7565\uff0c\u6267\u884c\u8be5\u7b56\u7565\u4e4b\u540e\u4f1a\u81ea\u52a8\u751f\u6210\u626b\u63cf\u62a5\u544a\u4ee5\u4f9b\u67e5\u770b\u3002

                                                              "},{"location":"admin/kpanda/security/audit.html#_2","title":"\u521b\u5efa\u626b\u63cf\u7b56\u7565","text":"
                                                              1. \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684\u9996\u9875\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5b89\u5168\u7ba1\u7406 \u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u6743\u9650\u626b\u63cf \uff0c\u70b9\u51fb \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\uff0c\u5728\u53f3\u4fa7\u70b9\u51fb \u521b\u5efa\u626b\u63cf\u7b56\u7565 \u3002

                                                              3. \u53c2\u8003\u4e0b\u5217\u8bf4\u660e\u586b\u5199\u914d\u7f6e\uff0c\u6700\u540e\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                                                                • \u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u626b\u63cf\u54ea\u4e2a\u96c6\u7fa4\u3002\u53ef\u9009\u7684\u96c6\u7fa4\u5217\u8868\u6765\u81ea\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u7684\u96c6\u7fa4\u3002\u5982\u679c\u6ca1\u6709\u60f3\u9009\u7684\u96c6\u7fa4\uff0c\u53ef\u4ee5\u53bb\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4\u3002
                                                                • \u626b\u63cf\u7c7b\u578b\uff1a

                                                                  • \u7acb\u5373\u626b\u63cf\uff1a\u5728\u626b\u63cf\u7b56\u7565\u521b\u5efa\u597d\u4e4b\u540e\u7acb\u5373\u6267\u884c\u4e00\u6b21\u626b\u63cf\uff0c\u540e\u7eed\u4e0d\u53ef\u4ee5\u81ea\u52a8/\u624b\u52a8\u518d\u6b21\u6267\u884c\u626b\u63cf\u3002
                                                                  • \u5b9a\u65f6\u626b\u63cf\uff1a\u901a\u8fc7\u8bbe\u7f6e\u626b\u63cf\u5468\u671f\uff0c\u81ea\u52a8\u6309\u65f6\u91cd\u590d\u6267\u884c\u626b\u63cf\u3002
                                                                • \u626b\u63cf\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff1a\u8bbe\u7f6e\u6700\u591a\u4fdd\u7559\u591a\u5c11\u626b\u63cf\u62a5\u544a\u3002\u8d85\u8fc7\u6307\u5b9a\u7684\u4fdd\u7559\u6570\u91cf\u65f6\uff0c\u4ece\u6700\u65e9\u7684\u62a5\u544a\u5f00\u59cb\u5220\u9664\u3002

                                                              "},{"location":"admin/kpanda/security/audit.html#_3","title":"\u66f4\u65b0/\u5220\u9664\u626b\u63cf\u7b56\u7565","text":"

                                                              \u521b\u5efa\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u8981\u66f4\u65b0\u6216\u5220\u9664\u626b\u63cf\u7b56\u7565\u3002

                                                              \u5728 \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u914d\u7f6e\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff1a

                                                              • \u5bf9\u4e8e\u5468\u671f\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a

                                                                • \u9009\u62e9 \u7acb\u5373\u6267\u884c \u610f\u5473\u7740\uff0c\u5728\u5468\u671f\u8ba1\u5212\u4e4b\u5916\u7acb\u5373\u518d\u626b\u63cf\u4e00\u6b21\u96c6\u7fa4
                                                                • \u9009\u62e9 \u7981\u7528 \u4f1a\u4e2d\u65ad\u626b\u63cf\u8ba1\u5212\uff0c\u76f4\u5230\u70b9\u51fb \u542f\u7528 \u624d\u53ef\u4ee5\u7ee7\u7eed\u6839\u636e\u5468\u671f\u8ba1\u5212\u6267\u884c\u8be5\u626b\u63cf\u7b56\u7565\u3002
                                                                • \u9009\u62e9 \u7f16\u8f91 \u53ef\u4ee5\u66f4\u65b0\u914d\u7f6e\uff0c\u652f\u6301\u66f4\u65b0\u626b\u63cf\u914d\u7f6e\u3001\u7c7b\u578b\u3001\u626b\u63cf\u5468\u671f\u3001\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff0c\u4e0d\u53ef\u66f4\u6539\u914d\u7f6e\u540d\u79f0\u548c\u9700\u8981\u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4\u3002
                                                                • \u9009\u62e9 \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u914d\u7f6e
                                                              • \u5bf9\u4e8e\u4e00\u6b21\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a\u4ec5\u652f\u6301 \u5220\u9664 \u64cd\u4f5c\u3002

                                                              "},{"location":"admin/kpanda/security/audit.html#_4","title":"\u67e5\u770b\u626b\u63cf\u62a5\u544a","text":"
                                                              1. \u5728 \u5b89\u5168\u7ba1\u7406 -> \u6743\u9650\u626b\u63cf -> \u626b\u63cf\u62a5\u544a \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u62a5\u544a\u540d\u79f0

                                                                \u5728\u62a5\u544a\u53f3\u4fa7\u70b9\u51fb \u5220\u9664 \u53ef\u4ee5\u624b\u52a8\u5220\u9664\u62a5\u544a\u3002

                                                              2. \u67e5\u770b\u626b\u63cf\u62a5\u544a\u5185\u5bb9\uff0c\u5305\u62ec\uff1a

                                                                • \u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4
                                                                • \u4f7f\u7528\u7684\u626b\u63cf\u7b56\u7565
                                                                • \u626b\u63cf\u9879\u603b\u6570\u3001\u8b66\u544a\u6570\u3001\u9519\u8bef\u6570
                                                                • \u5728\u5468\u671f\u6027\u626b\u63cf\u7b56\u7565\u751f\u6210\u7684\u626b\u63cf\u62a5\u544a\u4e2d\uff0c\u8fd8\u53ef\u4ee5\u67e5\u770b\u626b\u63cf\u9891\u7387
                                                                • \u626b\u63cf\u5f00\u59cb\u7684\u65f6\u95f4
                                                                • \u68c0\u67e5\u8be6\u60c5\uff0c\u4f8b\u5982\u88ab\u68c0\u67e5\u7684\u8d44\u6e90\u3001\u8d44\u6e90\u7c7b\u578b\u3001\u626b\u63cf\u7ed3\u679c\u3001\u9519\u8bef\u7c7b\u578b\u3001\u9519\u8bef\u8be6\u60c5

                                                              "},{"location":"admin/kpanda/security/hunter.html","title":"\u6f0f\u6d1e\u626b\u63cf","text":"

                                                              \u4e3a\u4e86\u4f7f\u7528\u6f0f\u6d1e\u626b\u63cf\u529f\u80fd\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u7b56\u7565\uff0c\u6267\u884c\u8be5\u7b56\u7565\u4e4b\u540e\u4f1a\u81ea\u52a8\u751f\u6210\u626b\u63cf\u62a5\u544a\u4ee5\u4f9b\u67e5\u770b\u3002

                                                              "},{"location":"admin/kpanda/security/hunter.html#_2","title":"\u521b\u5efa\u626b\u63cf\u7b56\u7565","text":"
                                                              1. \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684\u9996\u9875\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5b89\u5168\u7ba1\u7406 \u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u6f0f\u6d1e\u626b\u63cf \uff0c\u70b9\u51fb \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\uff0c\u5728\u53f3\u4fa7\u70b9\u51fb \u521b\u5efa\u626b\u63cf\u7b56\u7565 \u3002

                                                              3. \u53c2\u8003\u4e0b\u5217\u8bf4\u660e\u586b\u5199\u914d\u7f6e\uff0c\u6700\u540e\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                                                                • \u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u626b\u63cf\u54ea\u4e2a\u96c6\u7fa4\u3002\u53ef\u9009\u7684\u96c6\u7fa4\u5217\u8868\u6765\u81ea\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u7684\u96c6\u7fa4\u3002\u5982\u679c\u6ca1\u6709\u60f3\u9009\u7684\u96c6\u7fa4\uff0c\u53ef\u4ee5\u53bb\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4\u3002
                                                                • \u626b\u63cf\u7c7b\u578b\uff1a

                                                                  • \u7acb\u5373\u626b\u63cf\uff1a\u5728\u626b\u63cf\u7b56\u7565\u521b\u5efa\u597d\u4e4b\u540e\u7acb\u5373\u6267\u884c\u4e00\u6b21\u626b\u63cf\uff0c\u540e\u7eed\u4e0d\u53ef\u4ee5\u81ea\u52a8/\u624b\u52a8\u518d\u6b21\u6267\u884c\u626b\u63cf\u3002
                                                                  • \u5b9a\u65f6\u626b\u63cf\uff1a\u901a\u8fc7\u8bbe\u7f6e\u626b\u63cf\u5468\u671f\uff0c\u81ea\u52a8\u6309\u65f6\u91cd\u590d\u6267\u884c\u626b\u63cf\u3002
                                                                • \u626b\u63cf\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff1a\u8bbe\u7f6e\u6700\u591a\u4fdd\u7559\u591a\u5c11\u626b\u63cf\u62a5\u544a\u3002\u8d85\u8fc7\u6307\u5b9a\u7684\u4fdd\u7559\u6570\u91cf\u65f6\uff0c\u4ece\u6700\u65e9\u7684\u62a5\u544a\u5f00\u59cb\u5220\u9664\u3002

                                                              "},{"location":"admin/kpanda/security/hunter.html#_3","title":"\u66f4\u65b0/\u5220\u9664\u626b\u63cf\u7b56\u7565","text":"

                                                              \u521b\u5efa\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u8981\u66f4\u65b0\u6216\u5220\u9664\u626b\u63cf\u7b56\u7565\u3002

                                                              \u5728 \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u914d\u7f6e\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff1a

                                                              • \u5bf9\u4e8e\u5468\u671f\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a

                                                                • \u9009\u62e9 \u7acb\u5373\u6267\u884c \u610f\u5473\u7740\uff0c\u5728\u5468\u671f\u8ba1\u5212\u4e4b\u5916\u7acb\u5373\u518d\u626b\u63cf\u4e00\u6b21\u96c6\u7fa4
                                                                • \u9009\u62e9 \u7981\u7528 \u4f1a\u4e2d\u65ad\u626b\u63cf\u8ba1\u5212\uff0c\u76f4\u5230\u70b9\u51fb \u542f\u7528 \u624d\u53ef\u4ee5\u7ee7\u7eed\u6839\u636e\u5468\u671f\u8ba1\u5212\u6267\u884c\u8be5\u626b\u63cf\u7b56\u7565\u3002
                                                                • \u9009\u62e9 \u7f16\u8f91 \u53ef\u4ee5\u66f4\u65b0\u914d\u7f6e\uff0c\u652f\u6301\u66f4\u65b0\u626b\u63cf\u914d\u7f6e\u3001\u7c7b\u578b\u3001\u626b\u63cf\u5468\u671f\u3001\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff0c\u4e0d\u53ef\u66f4\u6539\u914d\u7f6e\u540d\u79f0\u548c\u9700\u8981\u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4\u3002
                                                                • \u9009\u62e9 \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u914d\u7f6e
                                                              • \u5bf9\u4e8e\u4e00\u6b21\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a\u4ec5\u652f\u6301 \u5220\u9664 \u64cd\u4f5c\u3002

                                                              "},{"location":"admin/kpanda/security/hunter.html#_4","title":"\u67e5\u770b\u626b\u63cf\u62a5\u544a","text":"
                                                              1. \u5728 \u5b89\u5168\u7ba1\u7406 -> \u6743\u9650\u626b\u63cf -> \u626b\u63cf\u62a5\u544a \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u62a5\u544a\u540d\u79f0

                                                                \u5728\u62a5\u544a\u53f3\u4fa7\u70b9\u51fb \u5220\u9664 \u53ef\u4ee5\u624b\u52a8\u5220\u9664\u62a5\u544a\u3002

                                                              2. \u67e5\u770b\u626b\u63cf\u62a5\u544a\u5185\u5bb9\uff0c\u5305\u62ec\uff1a

                                                                • \u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4
                                                                • \u4f7f\u7528\u7684\u626b\u63cf\u7b56\u7565
                                                                • \u626b\u63cf\u9891\u7387
                                                                • \u98ce\u9669\u603b\u6570\u3001\u9ad8\u98ce\u9669\u6570\u3001\u4e2d\u98ce\u9669\u6570\u3001\u4f4e\u98ce\u9669\u6570
                                                                • \u626b\u63cf\u65f6\u95f4
                                                                • \u68c0\u67e5\u8be6\u60c5\uff0c\u4f8b\u5982\u6f0f\u6d1e ID\u3001\u6f0f\u6d1e\u7c7b\u578b\u3001\u6f0f\u6d1e\u540d\u79f0\u3001\u6f0f\u6d1e\u63cf\u8ff0\u7b49

                                                              "},{"location":"admin/kpanda/security/cis/config.html","title":"\u626b\u63cf\u914d\u7f6e","text":"

                                                              \u4f7f\u7528\u5408\u89c4\u6027\u626b\u63cf\u7684\u7b2c\u4e00\u6b65\uff0c\u5c31\u662f\u5148\u521b\u5efa\u626b\u63cf\u914d\u7f6e\u3002\u57fa\u4e8e\u626b\u63cf\u914d\u7f6e\u518d\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3001\u6267\u884c\u626b\u63cf\u7b56\u7565\uff0c\u6700\u540e\u67e5\u770b\u626b\u63cf\u7ed3\u679c\u3002

                                                              "},{"location":"admin/kpanda/security/cis/config.html#_2","title":"\u521b\u5efa\u626b\u63cf\u914d\u7f6e","text":"

                                                              \u521b\u5efa\u626b\u63cf\u914d\u7f6e\u7684\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                              1. \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684\u9996\u9875\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5b89\u5168\u7ba1\u7406 \u3002

                                                              2. \u9ed8\u8ba4\u8fdb\u5165 \u5408\u89c4\u6027\u626b\u63cf \u9875\u9762\uff0c\u70b9\u51fb \u626b\u63cf\u914d\u7f6e \u9875\u7b7e\uff0c\u7136\u540e\u5728\u53f3\u4e0a\u89d2\u70b9\u51fb \u521b\u5efa\u626b\u63cf\u914d\u7f6e \u3002

                                                              3. \u586b\u5199\u914d\u7f6e\u540d\u79f0\u3001\u9009\u62e9\u914d\u7f6e\u6a21\u677f\u3001\u6309\u9700\u52fe\u9009\u626b\u63cf\u9879\uff0c\u6700\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                                \u626b\u63cf\u6a21\u677f\uff1a\u76ee\u524d\u63d0\u4f9b\u4e86\u4e24\u4e2a\u6a21\u677f\u3002 kubeadm \u6a21\u677f\u9002\u7528\u4e8e\u4e00\u822c\u60c5\u51b5\u4e0b\u7684 Kubernetes \u96c6\u7fa4\u3002 \u6211\u4eec\u5728 kubeadm \u6a21\u677f\u57fa\u7840\u4e0a\uff0c\u7ed3\u5408\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u5e73\u53f0\u8bbe\u8ba1\u5ffd\u7565\u4e86\u4e0d\u9002\u7528\u4e8e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u626b\u63cf\u9879\u3002

                                                              "},{"location":"admin/kpanda/security/cis/config.html#_3","title":"\u67e5\u770b\u626b\u63cf\u914d\u7f6e","text":"

                                                              \u5728\u626b\u63cf\u914d\u7f6e\u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u626b\u63cf\u914d\u7f6e\u7684\u540d\u79f0\uff0c\u53ef\u4ee5\u67e5\u770b\u8be5\u914d\u7f6e\u7684\u7c7b\u578b\u3001\u626b\u63cf\u9879\u6570\u91cf\u3001\u521b\u5efa\u65f6\u95f4\u3001\u914d\u7f6e\u6a21\u677f\uff0c\u4ee5\u53ca\u8be5\u914d\u7f6e\u542f\u7528\u7684\u5177\u4f53\u626b\u63cf\u9879\u3002

                                                              "},{"location":"admin/kpanda/security/cis/config.html#_4","title":"\u66f4\u65b0/\u5220\u9664\u626b\u63cf\u914d\u7f6e","text":"

                                                              \u626b\u63cf\u914d\u7f6e\u521b\u5efa\u6210\u529f\u4e4b\u540e\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u6c42\u66f4\u65b0\u914d\u7f6e\u6216\u5220\u9664\u8be5\u914d\u7f6e\u3002

                                                              \u5728\u626b\u63cf\u914d\u7f6e\u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u914d\u7f6e\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff1a

                                                              • \u9009\u62e9 \u7f16\u8f91 \u53ef\u4ee5\u66f4\u65b0\u914d\u7f6e\uff0c\u652f\u6301\u66f4\u65b0\u63cf\u8ff0\u3001\u6a21\u677f\u548c\u626b\u63cf\u9879\u3002\u4e0d\u53ef\u66f4\u6539\u914d\u7f6e\u540d\u79f0\u3002
                                                              • \u9009\u62e9 \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u914d\u7f6e\u3002

                                                              "},{"location":"admin/kpanda/security/cis/policy.html","title":"\u626b\u63cf\u7b56\u7565","text":""},{"location":"admin/kpanda/security/cis/policy.html#_2","title":"\u521b\u5efa\u626b\u63cf\u7b56\u7565","text":"

                                                              \u521b\u5efa\u626b\u63cf\u914d\u7f6e\u4e4b\u540e\uff0c\u53ef\u4ee5\u57fa\u4e8e\u914d\u7f6e\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002

                                                              1. \u5728 \u5b89\u5168\u7ba1\u7406 -> \u5408\u89c4\u6027\u626b\u63cf \u9875\u9762\u7684 \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\u4e0b\uff0c\u5728\u53f3\u4fa7\u70b9\u51fb\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002

                                                              2. \u53c2\u8003\u4e0b\u5217\u8bf4\u660e\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                                • \u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u626b\u63cf\u54ea\u4e2a\u96c6\u7fa4\u3002\u53ef\u9009\u7684\u96c6\u7fa4\u5217\u8868\u6765\u81ea\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u7684\u96c6\u7fa4\u3002\u5982\u679c\u6ca1\u6709\u60f3\u9009\u7684\u96c6\u7fa4\uff0c\u53ef\u4ee5\u53bb\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4\u3002
                                                                • \u626b\u63cf\u914d\u7f6e\uff1a\u9009\u62e9\u4e8b\u5148\u521b\u5efa\u597d\u7684\u626b\u63cf\u914d\u7f6e\u3002\u626b\u63cf\u914d\u7f6e\u89c4\u5b9a\u4e86\u9700\u8981\u6267\u884c\u54ea\u4e9b\u5177\u4f53\u7684\u626b\u63cf\u9879\u3002
                                                                • \u626b\u63cf\u7c7b\u578b\uff1a

                                                                  • \u7acb\u5373\u626b\u63cf\uff1a\u5728\u626b\u63cf\u7b56\u7565\u521b\u5efa\u597d\u4e4b\u540e\u7acb\u5373\u6267\u884c\u4e00\u6b21\u626b\u63cf\uff0c\u540e\u7eed\u4e0d\u53ef\u4ee5\u81ea\u52a8/\u624b\u52a8\u518d\u6b21\u6267\u884c\u626b\u63cf\u3002
                                                                  • \u5b9a\u65f6\u626b\u63cf\uff1a\u901a\u8fc7\u8bbe\u7f6e\u626b\u63cf\u5468\u671f\uff0c\u81ea\u52a8\u6309\u65f6\u91cd\u590d\u6267\u884c\u626b\u63cf\u3002
                                                                • \u626b\u63cf\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff1a\u8bbe\u7f6e\u6700\u591a\u4fdd\u7559\u591a\u5c11\u626b\u63cf\u62a5\u544a\u3002\u8d85\u8fc7\u6307\u5b9a\u7684\u4fdd\u7559\u6570\u91cf\u65f6\uff0c\u4ece\u6700\u65e9\u7684\u62a5\u544a\u5f00\u59cb\u5220\u9664\u3002

                                                              "},{"location":"admin/kpanda/security/cis/policy.html#_3","title":"\u66f4\u65b0/\u5220\u9664\u626b\u63cf\u7b56\u7565","text":"

                                                              \u521b\u5efa\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u8981\u66f4\u65b0\u6216\u5220\u9664\u626b\u63cf\u7b56\u7565\u3002

                                                              \u5728 \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u914d\u7f6e\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff1a

                                                              • \u5bf9\u4e8e\u5468\u671f\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a

                                                                • \u9009\u62e9 \u7acb\u5373\u6267\u884c \u610f\u5473\u7740\uff0c\u5728\u5468\u671f\u8ba1\u5212\u4e4b\u5916\u7acb\u5373\u518d\u626b\u63cf\u4e00\u6b21\u96c6\u7fa4
                                                                • \u9009\u62e9 \u7981\u7528 \u4f1a\u4e2d\u65ad\u626b\u63cf\u8ba1\u5212\uff0c\u76f4\u5230\u70b9\u51fb \u542f\u7528 \u624d\u53ef\u4ee5\u7ee7\u7eed\u6839\u636e\u5468\u671f\u8ba1\u5212\u6267\u884c\u8be5\u626b\u63cf\u7b56\u7565\u3002
                                                                • \u9009\u62e9 \u7f16\u8f91 \u53ef\u4ee5\u66f4\u65b0\u914d\u7f6e\uff0c\u652f\u6301\u66f4\u65b0\u626b\u63cf\u914d\u7f6e\u3001\u7c7b\u578b\u3001\u626b\u63cf\u5468\u671f\u3001\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff0c\u4e0d\u53ef\u66f4\u6539\u914d\u7f6e\u540d\u79f0\u548c\u9700\u8981\u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4\u3002
                                                                • \u9009\u62e9 \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u914d\u7f6e
                                                              • \u5bf9\u4e8e\u4e00\u6b21\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a\u4ec5\u652f\u6301 \u5220\u9664 \u64cd\u4f5c\u3002

                                                              "},{"location":"admin/kpanda/security/cis/report.html","title":"\u626b\u63cf\u62a5\u544a","text":"

                                                              hide\uff1a - toc

                                                              "},{"location":"admin/kpanda/security/cis/report.html#_1","title":"\u626b\u63cf\u62a5\u544a","text":"

                                                              \u6267\u884c\u626b\u63cf\u7b56\u7565\u4e4b\u540e\u4f1a\u81ea\u52a8\u751f\u6210\u626b\u63cf\u62a5\u544a\u3002\u60a8\u53ef\u4ee5\u5728\u7ebf\u67e5\u770b\u626b\u63cf\u62a5\u544a\u6216\u5c06\u5176\u4e0b\u8f7d\u5230\u672c\u5730\u67e5\u770b\u3002

                                                              • \u4e0b\u8f7d\u67e5\u770b\u626b\u63cf\u62a5\u544a

                                                                \u5b89\u5168\u7ba1\u7406 -> \u5408\u89c4\u6027\u626b\u63cf \u9875\u9762\u7684 \u626b\u63cf\u62a5\u544a \u9875\u7b7e\u70b9\u51fb\u62a5\u544a\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u4e0b\u8f7d \u3002

                                                              • \u5728\u7ebf\u67e5\u770b\u626b\u63cf\u62a5\u544a

                                                                \u70b9\u51fb\u67d0\u4e2a\u62a5\u544a\u7684\u540d\u79f0\uff0c\u60a8\u53ef\u4ee5\u5728\u7ebf\u67e5\u770b CIS \u5408\u89c4\u6027\u626b\u63cf\u7684\u62a5\u544a\u5185\u5bb9\u3002\u5177\u4f53\u5305\u62ec\uff1a

                                                                • \u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4
                                                                • \u4f7f\u7528\u7684\u626b\u63cf\u7b56\u7565\u548c\u626b\u63cf\u914d\u7f6e
                                                                • \u626b\u63cf\u5f00\u59cb\u65f6\u95f4
                                                                • \u626b\u63cf\u9879\u603b\u6570\u3001\u901a\u8fc7\u6570\u4e0e\u672a\u901a\u8fc7\u6570
                                                                • \u5bf9\u4e8e\u672a\u901a\u8fc7\u7684\u626b\u63cf\u9879\u7ed9\u51fa\u5bf9\u5e94\u7684\u4fee\u590d\u5efa\u8bae
                                                                • \u5bf9\u4e8e\u901a\u8fc7\u7684\u626b\u63cf\u9879\u7ed9\u51fa\u66f4\u5b89\u5168\u7684\u64cd\u4f5c\u5efa\u8bae

                                                              "},{"location":"admin/kpanda/storage/pv.html","title":"\u6570\u636e\u5377(PV)","text":"

                                                              \u6570\u636e\u5377\uff08PersistentVolume\uff0cPV\uff09\u662f\u96c6\u7fa4\u4e2d\u7684\u4e00\u5757\u5b58\u50a8\uff0c\u53ef\u7531\u7ba1\u7406\u5458\u4e8b\u5148\u5236\u5907\uff0c\u6216\u4f7f\u7528\u5b58\u50a8\u7c7b\uff08Storage Class\uff09\u6765\u52a8\u6001\u5236\u5907\u3002PV \u662f\u96c6\u7fa4\u8d44\u6e90\uff0c\u4f46\u62e5\u6709\u72ec\u7acb\u7684\u751f\u547d\u5468\u671f\uff0c\u4e0d\u4f1a\u968f\u7740 Pod \u8fdb\u7a0b\u7ed3\u675f\u800c\u88ab\u5220\u9664\u3002\u5c06 PV \u6302\u8f7d\u5230\u5de5\u4f5c\u8d1f\u8f7d\u53ef\u4ee5\u5b9e\u73b0\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u636e\u6301\u4e45\u5316\u3002PV \u4e2d\u4fdd\u5b58\u4e86\u53ef\u88ab Pod \u4e2d\u5bb9\u5668\u8bbf\u95ee\u7684\u6570\u636e\u76ee\u5f55\u3002

                                                              "},{"location":"admin/kpanda/storage/pv.html#_1","title":"\u521b\u5efa\u6570\u636e\u5377","text":"

                                                              \u76ee\u524d\u652f\u6301\u901a\u8fc7 YAML \u548c\u8868\u5355\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u6570\u636e\u5377\uff0c\u8fd9\u4e24\u79cd\u65b9\u5f0f\u5404\u6709\u4f18\u52a3\uff0c\u53ef\u4ee5\u6ee1\u8db3\u4e0d\u540c\u7528\u6237\u7684\u4f7f\u7528\u9700\u6c42\u3002

                                                              • \u901a\u8fc7 YAML \u521b\u5efa\u6b65\u9aa4\u66f4\u5c11\u3001\u66f4\u9ad8\u6548\uff0c\u4f46\u95e8\u69db\u8981\u6c42\u8f83\u9ad8\uff0c\u9700\u8981\u719f\u6089\u6570\u636e\u5377\u7684 YAML \u6587\u4ef6\u914d\u7f6e\u3002

                                                              • \u901a\u8fc7\u8868\u5355\u521b\u5efa\u66f4\u76f4\u89c2\u66f4\u7b80\u5355\uff0c\u6839\u636e\u63d0\u793a\u586b\u5199\u5bf9\u5e94\u7684\u503c\u5373\u53ef\uff0c\u4f46\u6b65\u9aa4\u66f4\u52a0\u7e41\u7410\u3002

                                                              "},{"location":"admin/kpanda/storage/pv.html#yaml","title":"YAML \u521b\u5efa","text":"
                                                              1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377(PV) -> YAML \u521b\u5efa \u3002

                                                              2. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                                                \u652f\u6301\u4ece\u672c\u5730\u5bfc\u5165 YAML \u6587\u4ef6\u6216\u5c06\u586b\u5199\u597d\u7684\u6587\u4ef6\u4e0b\u8f7d\u4fdd\u5b58\u5230\u672c\u5730\u3002

                                                              "},{"location":"admin/kpanda/storage/pv.html#_2","title":"\u8868\u5355\u521b\u5efa","text":"
                                                              1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377(PV) -> \u521b\u5efa\u6570\u636e\u5377(PV) \u3002

                                                              2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u3002

                                                                • \u6570\u636e\u5377\u540d\u79f0\u3001\u6570\u636e\u5377\u7c7b\u578b\u3001\u6302\u8f7d\u8def\u5f84\u3001\u5377\u6a21\u5f0f\u3001\u8282\u70b9\u4eb2\u548c\u6027\u5728\u521b\u5efa\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                                                • \u6570\u636e\u5377\u7c7b\u578b\uff1a\u6709\u5173\u5377\u7c7b\u578b\u7684\u8be6\u7ec6\u4ecb\u7ecd\uff0c\u53ef\u53c2\u8003 Kubernetes \u5b98\u65b9\u6587\u6863\u5377\u3002

                                                                • Local\uff1a\u5c06 Node \u8282\u70b9\u7684\u672c\u5730\u5b58\u50a8\u5305\u88c5\u6210 PVC \u63a5\u53e3\uff0c\u5bb9\u5668\u76f4\u63a5\u4f7f\u7528 PVC \u800c\u65e0\u9700\u5173\u6ce8\u5e95\u5c42\u7684\u5b58\u50a8\u7c7b\u578b\u3002Local \u5377\u4e0d\u652f\u6301\u52a8\u6001\u914d\u7f6e\u6570\u636e\u5377\uff0c\u4f46\u652f\u6301\u914d\u7f6e\u8282\u70b9\u4eb2\u548c\u6027\uff0c\u53ef\u4ee5\u9650\u5236\u80fd\u4ece\u54ea\u4e9b\u8282\u70b9\u4e0a\u8bbf\u95ee\u8be5\u6570\u636e\u5377\u3002

                                                                • HostPath\uff1a\u4f7f\u7528 Node \u8282\u70b9\u7684\u6587\u4ef6\u7cfb\u7edf\u4e0a\u7684\u6587\u4ef6\u6216\u76ee\u5f55\u4f5c\u4e3a\u6570\u636e\u5377\uff0c\u4e0d\u652f\u6301\u57fa\u4e8e\u8282\u70b9\u4eb2\u548c\u6027\u7684 Pod \u8c03\u5ea6\u3002

                                                                • \u6302\u8f7d\u8def\u5f84\uff1a\u5c06\u6570\u636e\u5377\u6302\u8f7d\u5230\u5bb9\u5668\u4e2d\u7684\u67d0\u4e2a\u5177\u4f53\u76ee\u5f55\u4e0b\u3002

                                                                • \u8bbf\u95ee\u6a21\u5f0f\uff1a

                                                                  • ReadWriteOnce\uff1a\u6570\u636e\u5377\u53ef\u4ee5\u88ab\u4e00\u4e2a\u8282\u70b9\u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002
                                                                  • ReadWriteMany\uff1a\u6570\u636e\u5377\u53ef\u4ee5\u88ab\u591a\u4e2a\u8282\u70b9\u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002
                                                                  • ReadOnlyMany\uff1a\u6570\u636e\u5377\u53ef\u4ee5\u88ab\u591a\u4e2a\u8282\u70b9\u4ee5\u53ea\u8bfb\u65b9\u5f0f\u6302\u8f7d\u3002
                                                                  • ReadWriteOncePod\uff1a\u6570\u636e\u5377\u53ef\u4ee5\u88ab\u5355\u4e2a Pod \u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002
                                                                • \u56de\u6536\u7b56\u7565\uff1a

                                                                  • Retain\uff1a\u4e0d\u5220\u9664 PV\uff0c\u4ec5\u5c06\u5176\u72b6\u6001\u53d8\u4e3a released \uff0c\u9700\u8981\u7528\u6237\u624b\u52a8\u56de\u6536\u3002\u6709\u5173\u5982\u4f55\u624b\u52a8\u56de\u6536\uff0c\u53ef\u53c2\u8003\u6301\u4e45\u5377\u3002
                                                                  • Recycle\uff1a\u4fdd\u7559 PV \u4f46\u6e05\u7a7a\u5176\u4e2d\u7684\u6570\u636e\uff0c\u6267\u884c\u57fa\u672c\u7684\u64e6\u9664\u64cd\u4f5c\uff08 rm -rf /thevolume/* \uff09\u3002
                                                                  • Delete\uff1a\u5220\u9664 PV \u65f6\u53ca\u5176\u4e2d\u7684\u6570\u636e\u3002
                                                                • \u5377\u6a21\u5f0f\uff1a

                                                                  • \u6587\u4ef6\u7cfb\u7edf\uff1a\u6570\u636e\u5377\u5c06\u88ab Pod \u6302\u8f7d\u5230\u67d0\u4e2a\u76ee\u5f55\u3002\u5982\u679c\u6570\u636e\u5377\u7684\u5b58\u50a8\u6765\u81ea\u67d0\u5757\u8bbe\u5907\u800c\u8be5\u8bbe\u5907\u76ee\u524d\u4e3a\u7a7a\uff0c\u7b2c\u4e00\u6b21\u6302\u8f7d\u5377\u4e4b\u524d\u4f1a\u5728\u8bbe\u5907\u4e0a\u521b\u5efa\u6587\u4ef6\u7cfb\u7edf\u3002
                                                                  • \u5757\uff1a\u5c06\u6570\u636e\u5377\u4f5c\u4e3a\u539f\u59cb\u5757\u8bbe\u5907\u6765\u4f7f\u7528\u3002\u8fd9\u7c7b\u5377\u4ee5\u5757\u8bbe\u5907\u7684\u65b9\u5f0f\u4ea4\u7ed9 Pod \u4f7f\u7528\uff0c\u5176\u4e0a\u6ca1\u6709\u4efb\u4f55\u6587\u4ef6\u7cfb\u7edf\uff0c\u53ef\u4ee5\u8ba9 Pod \u66f4\u5feb\u5730\u8bbf\u95ee\u6570\u636e\u5377\u3002
                                                                • \u8282\u70b9\u4eb2\u548c\u6027\uff1a

                                                              "},{"location":"admin/kpanda/storage/pv.html#_3","title":"\u67e5\u770b\u6570\u636e\u5377","text":"

                                                              \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377(PV) \u3002

                                                              • \u8be5\u9875\u9762\u53ef\u4ee5\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u4e2d\u7684\u6240\u6709\u6570\u636e\u5377\uff0c\u4ee5\u53ca\u5404\u4e2a\u6570\u636e\u5377\u7684\u72b6\u6001\u3001\u5bb9\u91cf\u3001\u547d\u540d\u7a7a\u95f4\u7b49\u4fe1\u606f\u3002

                                                              • \u652f\u6301\u6309\u7167\u6570\u636e\u5377\u7684\u540d\u79f0\u3001\u72b6\u6001\u3001\u547d\u540d\u7a7a\u95f4\u3001\u521b\u5efa\u65f6\u95f4\u8fdb\u884c\u987a\u5e8f\u6216\u9006\u5e8f\u6392\u5e8f\u3002

                                                              • \u70b9\u51fb\u6570\u636e\u5377\u7684\u540d\u79f0\uff0c\u53ef\u4ee5\u67e5\u770b\u8be5\u6570\u636e\u5377\u7684\u57fa\u672c\u914d\u7f6e\u3001\u5b58\u50a8\u6c60\u4fe1\u606f\u3001\u6807\u7b7e\u3001\u6ce8\u89e3\u7b49\u4fe1\u606f\u3002

                                                              "},{"location":"admin/kpanda/storage/pv.html#_4","title":"\u514b\u9686\u6570\u636e\u5377","text":"

                                                              \u901a\u8fc7\u514b\u9686\u6570\u636e\u5377\uff0c\u53ef\u4ee5\u57fa\u4e8e\u88ab\u514b\u9686\u6570\u636e\u5377\u7684\u914d\u7f6e\uff0c\u91cd\u65b0\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u6570\u636e\u5377\u3002

                                                              1. \u8fdb\u5165\u514b\u9686\u9875\u9762

                                                                • \u5728\u6570\u636e\u5377\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u514b\u9686\u7684\u6570\u636e\u5377\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u514b\u9686 \u3002

                                                                  \u4e5f\u53ef\u4ee5\u70b9\u51fb\u6570\u636e\u5377\u7684\u540d\u79f0\uff0c\u5728\u8be6\u60c5\u9875\u9762\u7684\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u514b\u9686 \u3002

                                                              2. \u76f4\u63a5\u4f7f\u7528\u539f\u914d\u7f6e\uff0c\u6216\u8005\u6309\u9700\u8fdb\u884c\u4fee\u6539\uff0c\u7136\u540e\u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                                              "},{"location":"admin/kpanda/storage/pv.html#_5","title":"\u66f4\u65b0\u6570\u636e\u5377","text":"

                                                              \u6709\u4e24\u79cd\u9014\u5f84\u53ef\u4ee5\u66f4\u65b0\u6570\u636e\u5377\u3002\u652f\u6301\u901a\u8fc7\u8868\u5355\u6216 YAML \u6587\u4ef6\u66f4\u65b0\u6570\u636e\u5377\u3002

                                                              Note

                                                              \u4ec5\u652f\u6301\u66f4\u65b0\u6570\u636e\u5377\u7684\u522b\u540d\u3001\u5bb9\u91cf\u3001\u8bbf\u95ee\u6a21\u5f0f\u3001\u56de\u6536\u7b56\u7565\u3001\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                                              • \u5728\u6570\u636e\u5377\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u6570\u636e\u5377\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                                                              • \u70b9\u51fb\u6570\u636e\u5377\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u6570\u636e\u5377\u7684\u8be6\u60c5\u9875\u9762\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                                                              "},{"location":"admin/kpanda/storage/pv.html#_6","title":"\u5220\u9664\u6570\u636e\u5377","text":"

                                                              \u5728\u6570\u636e\u5377\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u5220\u9664\u7684\u6570\u636e\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u5220\u9664 \u3002

                                                              \u4e5f\u53ef\u4ee5\u70b9\u51fb\u6570\u636e\u5377\u7684\u540d\u79f0\uff0c\u5728\u8be6\u60c5\u9875\u9762\u7684\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u5220\u9664 \u3002

                                                              "},{"location":"admin/kpanda/storage/pvc.html","title":"\u6570\u636e\u5377\u58f0\u660e(PVC)","text":"

                                                              \u6301\u4e45\u5377\u58f0\u660e\uff08PersistentVolumeClaim\uff0cPVC\uff09\u8868\u8fbe\u7684\u662f\u7528\u6237\u5bf9\u5b58\u50a8\u7684\u8bf7\u6c42\u3002PVC \u6d88\u8017 PV \u8d44\u6e90\uff0c\u7533\u9886\u4f7f\u7528\u7279\u5b9a\u5927\u5c0f\u3001\u7279\u5b9a\u8bbf\u95ee\u6a21\u5f0f\u7684\u6570\u636e\u5377\uff0c\u4f8b\u5982\u8981\u6c42 PV \u5377\u4ee5 ReadWriteOnce\u3001ReadOnlyMany \u6216 ReadWriteMany \u7b49\u6a21\u5f0f\u6765\u6302\u8f7d\u3002

                                                              "},{"location":"admin/kpanda/storage/pvc.html#_1","title":"\u521b\u5efa\u6570\u636e\u5377\u58f0\u660e","text":"

                                                              \u76ee\u524d\u652f\u6301\u901a\u8fc7 YAML \u548c\u8868\u5355\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u6570\u636e\u5377\u58f0\u660e\uff0c\u8fd9\u4e24\u79cd\u65b9\u5f0f\u5404\u6709\u4f18\u52a3\uff0c\u53ef\u4ee5\u6ee1\u8db3\u4e0d\u540c\u7528\u6237\u7684\u4f7f\u7528\u9700\u6c42\u3002

                                                              • \u901a\u8fc7 YAML \u521b\u5efa\u6b65\u9aa4\u66f4\u5c11\u3001\u66f4\u9ad8\u6548\uff0c\u4f46\u95e8\u69db\u8981\u6c42\u8f83\u9ad8\uff0c\u9700\u8981\u719f\u6089\u6570\u636e\u5377\u58f0\u660e\u7684 YAML \u6587\u4ef6\u914d\u7f6e\u3002

                                                              • \u901a\u8fc7\u8868\u5355\u521b\u5efa\u66f4\u76f4\u89c2\u66f4\u7b80\u5355\uff0c\u6839\u636e\u63d0\u793a\u586b\u5199\u5bf9\u5e94\u7684\u503c\u5373\u53ef\uff0c\u4f46\u6b65\u9aa4\u66f4\u52a0\u7e41\u7410\u3002

                                                              "},{"location":"admin/kpanda/storage/pvc.html#yaml","title":"YAML \u521b\u5efa","text":"
                                                              1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e (PVC) -> YAML \u521b\u5efa \u3002

                                                              2. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                                                \u652f\u6301\u4ece\u672c\u5730\u5bfc\u5165 YAML \u6587\u4ef6\u6216\u5c06\u586b\u5199\u597d\u7684\u6587\u4ef6\u4e0b\u8f7d\u4fdd\u5b58\u5230\u672c\u5730\u3002

                                                              "},{"location":"admin/kpanda/storage/pvc.html#_2","title":"\u8868\u5355\u521b\u5efa","text":"
                                                              1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e (PVC) -> \u521b\u5efa\u6570\u636e\u5377\u58f0\u660e (PVC) \u3002

                                                              2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u3002

                                                                • \u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\u3001\u547d\u540d\u7a7a\u95f4\u3001\u521b\u5efa\u65b9\u5f0f\u3001\u6570\u636e\u5377\u3001\u5bb9\u91cf\u3001\u8bbf\u95ee\u6a21\u5f0f\u5728\u521b\u5efa\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                                                • \u521b\u5efa\u65b9\u5f0f\uff1a\u5728\u5df2\u6709\u7684\u5b58\u50a8\u6c60\u6216\u8005\u6570\u636e\u5377\u4e2d\u52a8\u6001\u521b\u5efa\u65b0\u7684\u6570\u636e\u5377\u58f0\u660e\uff0c\u6216\u8005\u57fa\u4e8e\u6570\u636e\u5377\u58f0\u660e\u7684\u5feb\u7167\u521b\u5efa\u65b0\u7684\u6570\u636e\u5377\u58f0\u660e\u3002

                                                                  \u57fa\u4e8e\u5feb\u7167\u521b\u5efa\u65f6\u65e0\u6cd5\u4fee\u6539\u6570\u636e\u5377\u58f0\u660e\u7684\u5bb9\u91cf\uff0c\u53ef\u4ee5\u5728\u521b\u5efa\u5b8c\u6210\u540e\u518d\u8fdb\u884c\u4fee\u6539\u3002

                                                                • \u9009\u62e9\u521b\u5efa\u65b9\u5f0f\u4e4b\u540e\uff0c\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u9009\u62e9\u60f3\u8981\u4f7f\u7528\u7684\u5b58\u50a8\u6c60/\u6570\u636e\u5377/\u5feb\u7167\u3002

                                                                • \u8bbf\u95ee\u6a21\u5f0f\uff1a

                                                                • ReadWriteOnce\uff0c\u6570\u636e\u5377\u58f0\u660e\u53ef\u4ee5\u88ab\u4e00\u4e2a\u8282\u70b9\u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002

                                                                • ReadWriteMany\uff0c\u6570\u636e\u5377\u58f0\u660e\u53ef\u4ee5\u88ab\u591a\u4e2a\u8282\u70b9\u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002
                                                                • ReadOnlyMany\uff0c\u6570\u636e\u5377\u58f0\u660e\u53ef\u4ee5\u88ab\u591a\u4e2a\u8282\u70b9\u4ee5\u53ea\u8bfb\u65b9\u5f0f\u6302\u8f7d\u3002
                                                                • ReadWriteOncePod\uff0c\u6570\u636e\u5377\u58f0\u660e\u53ef\u4ee5\u88ab\u5355\u4e2a Pod \u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002

                                                              "},{"location":"admin/kpanda/storage/pvc.html#_3","title":"\u67e5\u770b\u6570\u636e\u5377\u58f0\u660e","text":"

                                                              \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e(PVC) \u3002

                                                              • \u8be5\u9875\u9762\u53ef\u4ee5\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u4e2d\u7684\u6240\u6709\u6570\u636e\u5377\u58f0\u660e\uff0c\u4ee5\u53ca\u5404\u4e2a\u6570\u636e\u5377\u58f0\u660e\u7684\u72b6\u6001\u3001\u5bb9\u91cf\u3001\u547d\u540d\u7a7a\u95f4\u7b49\u4fe1\u606f\u3002

                                                              • \u652f\u6301\u6309\u7167\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\u3001\u72b6\u6001\u3001\u547d\u540d\u7a7a\u95f4\u3001\u521b\u5efa\u65f6\u95f4\u8fdb\u884c\u987a\u5e8f\u6216\u9006\u5e8f\u6392\u5e8f\u3002

                                                              • \u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u53ef\u4ee5\u67e5\u770b\u8be5\u6570\u636e\u5377\u58f0\u660e\u7684\u57fa\u672c\u914d\u7f6e\u3001\u5b58\u50a8\u6c60\u4fe1\u606f\u3001\u6807\u7b7e\u3001\u6ce8\u89e3\u7b49\u4fe1\u606f\u3002

                                                              "},{"location":"admin/kpanda/storage/pvc.html#_4","title":"\u6269\u5bb9\u6570\u636e\u5377\u58f0\u660e","text":"
                                                              1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e(PVC) \uff0c\u627e\u5230\u60f3\u8981\u8c03\u6574\u5bb9\u91cf\u7684\u6570\u636e\u5377\u58f0\u660e\u3002

                                                              2. \u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u6269\u5bb9 \u3002

                                                              3. \u8f93\u5165\u76ee\u6807\u5bb9\u91cf\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                              "},{"location":"admin/kpanda/storage/pvc.html#_5","title":"\u514b\u9686\u6570\u636e\u5377\u58f0\u660e","text":"

                                                              \u901a\u8fc7\u514b\u9686\u6570\u636e\u5377\u58f0\u660e\uff0c\u53ef\u4ee5\u57fa\u4e8e\u88ab\u514b\u9686\u6570\u636e\u5377\u58f0\u660e\u7684\u914d\u7f6e\uff0c\u91cd\u65b0\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u6570\u636e\u5377\u58f0\u660e\u3002

                                                              1. \u8fdb\u5165\u514b\u9686\u9875\u9762

                                                                • \u5728\u6570\u636e\u5377\u58f0\u660e\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u514b\u9686\u7684\u6570\u636e\u5377\u58f0\u660e\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u514b\u9686 \u3002

                                                                  \u4e5f\u53ef\u4ee5\u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u5728\u8be6\u60c5\u9875\u9762\u7684\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u514b\u9686 \u3002

                                                              2. \u76f4\u63a5\u4f7f\u7528\u539f\u914d\u7f6e\uff0c\u6216\u8005\u6309\u9700\u8fdb\u884c\u4fee\u6539\uff0c\u7136\u540e\u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                                              "},{"location":"admin/kpanda/storage/pvc.html#_6","title":"\u66f4\u65b0\u6570\u636e\u5377\u58f0\u660e","text":"

                                                              \u6709\u4e24\u79cd\u9014\u5f84\u53ef\u4ee5\u66f4\u65b0\u6570\u636e\u5377\u58f0\u660e\u3002\u652f\u6301\u901a\u8fc7\u8868\u5355\u6216 YAML \u6587\u4ef6\u66f4\u65b0\u6570\u636e\u5377\u58f0\u660e\u3002

                                                              Note

                                                              \u4ec5\u652f\u6301\u66f4\u65b0\u6570\u636e\u5377\u58f0\u660e\u7684\u522b\u540d\u3001\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                                              • \u5728\u6570\u636e\u5377\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u6570\u636e\u5377\u58f0\u660e\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                                                              • \u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u6570\u636e\u5377\u58f0\u660e\u7684\u8be6\u60c5\u9875\u9762\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                                                              "},{"location":"admin/kpanda/storage/pvc.html#_7","title":"\u5220\u9664\u6570\u636e\u5377\u58f0\u660e","text":"

                                                              \u5728\u6570\u636e\u5377\u58f0\u660e\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u5220\u9664\u7684\u6570\u636e\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u5220\u9664 \u3002

                                                              \u4e5f\u53ef\u4ee5\u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u5728\u8be6\u60c5\u9875\u9762\u7684\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u5220\u9664 \u3002

                                                              "},{"location":"admin/kpanda/storage/pvc.html#_8","title":"\u5e38\u89c1\u95ee\u9898","text":"
                                                              1. \u5982\u679c\u5217\u8868\u4e2d\u6ca1\u6709\u53ef\u9009\u7684\u5b58\u50a8\u6c60\u6216\u6570\u636e\u5377\uff0c\u53ef\u4ee5\u521b\u5efa\u5b58\u50a8\u6c60\u6216\u521b\u5efa\u6570\u636e\u5377\u3002

                                                              2. \u5982\u679c\u5217\u8868\u4e2d\u6ca1\u6709\u53ef\u9009\u7684\u5feb\u7167\uff0c\u53ef\u4ee5\u8fdb\u5165\u6570\u636e\u5377\u58f0\u660e\u7684\u8be6\u60c5\u9875\uff0c\u5728\u53f3\u4e0a\u89d2\u5236\u4f5c\u5feb\u7167\u3002

                                                              3. \u5982\u679c\u6570\u636e\u5377\u58f0\u660e\u6240\u4f7f\u7528\u7684\u5b58\u50a8\u6c60 (SC) \u6ca1\u6709\u542f\u7528\u5feb\u7167\uff0c\u5219\u65e0\u6cd5\u5236\u4f5c\u5feb\u7167\uff0c\u9875\u9762\u4e0d\u4f1a\u663e\u793a\u201c\u5236\u4f5c\u5feb\u7167\u201d\u9009\u9879\u3002

                                                              4. \u5982\u679c\u6570\u636e\u5377\u58f0\u660e\u6240\u4f7f\u7528\u7684\u5b58\u50a8\u6c60 (SC) \u6ca1\u6709\u5f00\u542f\u6269\u5bb9\u529f\u80fd\uff0c\u5219\u8be5\u6570\u636e\u5377\u4e0d\u652f\u6301\u6269\u5bb9\uff0c\u9875\u9762\u4e0d\u4f1a\u663e\u793a\u6269\u5bb9\u9009\u9879\u3002

                                                              "},{"location":"admin/kpanda/storage/sc-share.html","title":"\u5171\u4eab\u5b58\u50a8\u6c60","text":"

                                                              \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u5c06\u4e00\u4e2a\u5b58\u50a8\u6c60\u5171\u4eab\u7ed9\u591a\u4e2a\u547d\u540d\u7a7a\u95f4\u4f7f\u7528\uff0c\u4ee5\u4fbf\u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u6548\u7387\u3002

                                                              1. \u5728\u5b58\u50a8\u6c60\u5217\u8868\u4e2d\u627e\u5230\u9700\u8981\u5171\u4eab\u7684\u5b58\u50a8\u6c60\uff0c\u5728\u53f3\u4fa7\u64cd\u4f5c\u680f\u4e0b\u70b9\u51fb \u6388\u6743\u547d\u540d\u7a7a\u95f4 \u3002

                                                              2. \u70b9\u51fb \u81ea\u5b9a\u4e49\u547d\u540d\u7a7a\u95f4 \u53ef\u4ee5\u9010\u4e00\u9009\u62e9\u9700\u8981\u5c06\u6b64\u5b58\u50a8\u6c60\u5171\u4eab\u5230\u54ea\u4e9b\u547d\u540d\u7a7a\u95f4\u3002

                                                                • \u70b9\u51fb \u6388\u6743\u6240\u6709\u547d\u540d\u7a7a\u95f4 \u53ef\u4ee5\u4e00\u6b21\u6027\u5c06\u6b64\u5b58\u50a8\u6c60\u5171\u4eab\u5230\u5f53\u524d\u96c6\u7fa4\u4e0b\u7684\u6240\u6709\u547d\u540d\u7a7a\u95f4\u3002
                                                                • \u5728\u5217\u8868\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u65b9\u70b9\u51fb \u79fb\u9664\u6388\u6743 \uff0c\u53ef\u4ee5\u89e3\u9664\u6388\u6743\uff0c\u505c\u6b62\u5c06\u6b64\u5b58\u50a8\u6c60\u5171\u4eab\u5230\u8be5\u547d\u540d\u7a7a\u95f4\u3002

                                                              "},{"location":"admin/kpanda/storage/sc.html","title":"\u5b58\u50a8\u6c60(SC)","text":"

                                                              \u5b58\u50a8\u6c60\u6307\u5c06\u8bb8\u591a\u7269\u7406\u78c1\u76d8\u7ec4\u6210\u4e00\u4e2a\u5927\u578b\u5b58\u50a8\u8d44\u6e90\u6c60\uff0c\u672c\u5e73\u53f0\u652f\u6301\u63a5\u5165\u5404\u7c7b\u5b58\u50a8\u5382\u5546\u540e\u521b\u5efa\u5757\u5b58\u50a8\u6c60\u3001\u672c\u5730\u5b58\u50a8\u6c60\u3001\u81ea\u5b9a\u4e49\u5b58\u50a8\u6c60\uff0c\u7136\u540e\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u52a8\u6001\u914d\u7f6e\u6570\u636e\u5377\u3002

                                                              "},{"location":"admin/kpanda/storage/sc.html#sc_1","title":"\u521b\u5efa\u5b58\u50a8\u6c60(SC)","text":"

                                                              \u76ee\u524d\u652f\u6301\u901a\u8fc7 YAML \u548c\u8868\u5355\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u5b58\u50a8\u6c60\uff0c\u8fd9\u4e24\u79cd\u65b9\u5f0f\u5404\u6709\u4f18\u52a3\uff0c\u53ef\u4ee5\u6ee1\u8db3\u4e0d\u540c\u7528\u6237\u7684\u4f7f\u7528\u9700\u6c42\u3002

                                                              • \u901a\u8fc7 YAML \u521b\u5efa\u6b65\u9aa4\u66f4\u5c11\u3001\u66f4\u9ad8\u6548\uff0c\u4f46\u95e8\u69db\u8981\u6c42\u8f83\u9ad8\uff0c\u9700\u8981\u719f\u6089\u5b58\u50a8\u6c60\u7684 YAML \u6587\u4ef6\u914d\u7f6e\u3002

                                                              • \u901a\u8fc7\u8868\u5355\u521b\u5efa\u66f4\u76f4\u89c2\u66f4\u7b80\u5355\uff0c\u6839\u636e\u63d0\u793a\u586b\u5199\u5bf9\u5e94\u7684\u503c\u5373\u53ef\uff0c\u4f46\u6b65\u9aa4\u66f4\u52a0\u7e41\u7410\u3002

                                                              "},{"location":"admin/kpanda/storage/sc.html#yaml","title":"YAML \u521b\u5efa","text":"
                                                              1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u5b58\u50a8\u6c60(SC) -> YAML \u521b\u5efa \u3002

                                                              2. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                                                \u652f\u6301\u4ece\u672c\u5730\u5bfc\u5165 YAML \u6587\u4ef6\u6216\u5c06\u586b\u5199\u597d\u7684\u6587\u4ef6\u4e0b\u8f7d\u4fdd\u5b58\u5230\u672c\u5730\u3002

                                                              "},{"location":"admin/kpanda/storage/sc.html#_1","title":"\u8868\u5355\u521b\u5efa","text":"
                                                              1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u5b58\u50a8\u6c60(SC) -> \u521b\u5efa\u5b58\u50a8\u6c60(SC) \u3002

                                                              2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\uff0c\u7136\u540e\u5728\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                                                \u81ea\u5b9a\u4e49\u5b58\u50a8\u7cfb\u7edf

                                                                • \u5b58\u50a8\u6c60\u540d\u79f0\u3001\u9a71\u52a8\u3001\u56de\u6536\u7b56\u7565\u5728\u521b\u5efa\u540e\u4e0d\u53ef\u4fee\u6539\u3002
                                                                • CSI \u5b58\u50a8\u9a71\u52a8\uff1a\u57fa\u4e8e\u6807\u51c6 Kubernetes \u7684\u5bb9\u5668\u5b58\u50a8\u63a5\u53e3\u63d2\u4ef6\uff0c\u9700\u9075\u5b88\u5b58\u50a8\u5382\u5546\u89c4\u5b9a\u7684\u683c\u5f0f\uff0c\u4f8b\u5982 rancher.io/local-path \u3002

                                                                  • \u6709\u5173\u5982\u4f55\u586b\u5199\u4e0d\u540c\u5382\u5546\u63d0\u4f9b\u7684 CSI \u9a71\u52a8\uff0c\u53ef\u53c2\u8003 Kubernetes \u5b98\u65b9\u6587\u6863\u5b58\u50a8\u7c7b\u3002
                                                                    • \u56de\u6536\u7b56\u7565\uff1a\u5220\u9664\u6570\u636e\u5377\u65f6\uff0c\u4fdd\u7559\u6570\u636e\u5377\u4e2d\u7684\u6570\u636e\u6216\u8005\u5220\u9664\u5176\u4e2d\u7684\u6570\u636e\u3002
                                                                    • \u5feb\u7167/\u6269\u5bb9\uff1a\u5f00\u542f\u540e\uff0c\u57fa\u4e8e\u8be5\u5b58\u50a8\u6c60\u7684\u6570\u636e\u5377/\u6570\u636e\u5377\u58f0\u660e\u624d\u80fd\u652f\u6301\u6269\u5bb9\u548c\u5feb\u7167\u529f\u80fd\uff0c\u4f46 \u524d\u63d0\u662f\u5e95\u5c42\u4f7f\u7528\u7684\u5b58\u50a8\u9a71\u52a8\u652f\u6301\u5feb\u7167\u548c\u6269\u5bb9\u529f\u80fd\u3002

                                                                HwameiStor \u5b58\u50a8\u7cfb\u7edf

                                                                • \u5b58\u50a8\u6c60\u540d\u79f0\u3001\u9a71\u52a8\u3001\u56de\u6536\u7b56\u7565\u5728\u521b\u5efa\u540e\u4e0d\u53ef\u4fee\u6539\u3002
                                                                • \u5b58\u50a8\u7cfb\u7edf\uff1aHwameiStor \u5b58\u50a8\u7cfb\u7edf\u3002
                                                                • \u5b58\u50a8\u7c7b\u578b\uff1a\u652f\u6301 LVM\uff0c\u88f8\u78c1\u76d8\u7c7b\u578b
                                                                  • LVM \u7c7b\u578b \uff1aHwameiStor \u63a8\u8350\u4f7f\u7528\u6b64\u65b9\u5f0f\uff0c\u53ef\u4f7f\u7528\u9ad8\u53ef\u7528\u6570\u636e\u5377\uff0c\u5bf9\u5e94\u7684\u7684 CSI \u5b58\u50a8\u9a71\u52a8\u4e3a lvm.hwameistor.io\u3002
                                                                  • \u88f8\u78c1\u76d8\u6570\u636e\u5377 \uff1a \u9002\u7528\u4e8e\u975e\u9ad8\u53ef\u7528\u573a\u666f\uff0c\u65e0\u9ad8\u53ef\u7528\u80fd\u529b\uff0c\u5bf9\u5e94\u7684 CSI \u9a71\u52a8\u4e3a hdd.hwameistor.io
                                                                • \u9ad8\u53ef\u7528\u6a21\u5f0f\uff1a\u4f7f\u7528\u9ad8\u53ef\u7528\u80fd\u529b\u4e4b\u524d\u8bf7\u786e\u8ba4 DRBD \u7ec4\u4ef6 \u5df2\u5b89\u88c5\u3002\u5f00\u542f\u9ad8\u53ef\u7528\u6a21\u5f0f\u540e\uff0c\u53ef\u5c06\u6570\u636e\u5377\u526f\u672c\u6570\u8bbe\u7f6e\u4e3a 1 \u548c 2\u3002 \u5982\u9700\u8981\u53ef\u5c06\u6570\u636e\u5377\u526f\u672c\u4ece 1 Convert \u6210 1
                                                                • \u56de\u6536\u7b56\u7565\uff1a\u5220\u9664\u6570\u636e\u5377\u65f6\uff0c\u4fdd\u7559\u6570\u636e\u5377\u4e2d\u7684\u6570\u636e\u6216\u8005\u5220\u9664\u5176\u4e2d\u7684\u6570\u636e\u3002
                                                                • \u5feb\u7167/\u6269\u5bb9\uff1a\u5f00\u542f\u540e\uff0c\u57fa\u4e8e\u8be5\u5b58\u50a8\u6c60\u7684\u6570\u636e\u5377/\u6570\u636e\u5377\u58f0\u660e\u624d\u80fd\u652f\u6301\u6269\u5bb9\u548c\u5feb\u7167\u529f\u80fd\uff0c\u4f46 \u524d\u63d0\u662f\u5e95\u5c42\u4f7f\u7528\u7684\u5b58\u50a8\u9a71\u52a8\u652f\u6301\u5feb\u7167\u548c\u6269\u5bb9\u529f\u80fd\u3002

                                                                Note

                                                                \u76ee\u524d HwameiStor xfs\u3001ext4 \u4e24\u79cd\u6587\u4ef6\u7cfb\u7edf\uff0c\u5176\u4e2d\u9ed8\u8ba4\u4f7f\u7528\u7684\u662f xfs \u6587\u4ef6\u7cfb\u7edf\uff0c\u5982\u679c\u60f3\u8981\u66ff\u6362\u4e3a ext4\uff0c\u53ef\u4ee5\u5728\u81ea\u5b9a\u4e49\u53c2\u6570\u6dfb\u52a0 csi.storage.k8s.io/fstype: ext4

                                                              "},{"location":"admin/kpanda/storage/sc.html#sc_2","title":"\u66f4\u65b0\u5b58\u50a8\u6c60(SC)","text":"

                                                              \u5728\u5b58\u50a8\u6c60\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u5b58\u50a8\u6c60\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u7f16\u8f91 \u5373\u53ef\u901a\u8fc7\u66f4\u65b0\u5b58\u50a8\u6c60\u3002

                                                              Info

                                                              \u9009\u62e9 \u67e5\u770b YAML \u53ef\u4ee5\u67e5\u770b\u8be5\u5b58\u50a8\u6c60\u7684 YAML \u6587\u4ef6\uff0c\u4f46\u4e0d\u652f\u6301\u7f16\u8f91\u3002

                                                              "},{"location":"admin/kpanda/storage/sc.html#sc_3","title":"\u5220\u9664\u5b58\u50a8\u6c60(SC)","text":"

                                                              \u5728\u5b58\u50a8\u6c60\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u5220\u9664\u7684\u5b58\u50a8\u6c60\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u5220\u9664 \u3002

                                                              "},{"location":"admin/kpanda/workloads/create-cronjob.html","title":"\u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\uff08CronJob\uff09","text":"

                                                              \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\uff08CronJob\uff09\u3002

                                                              \u5b9a\u65f6\u4efb\u52a1\uff08CronJob\uff09\u9002\u7528\u4e8e\u4e8e\u6267\u884c\u5468\u671f\u6027\u7684\u64cd\u4f5c\uff0c\u4f8b\u5982\u5907\u4efd\u3001\u62a5\u544a\u751f\u6210\u7b49\u3002\u8fd9\u4e9b\u4efb\u52a1\u53ef\u4ee5\u914d\u7f6e\u4e3a\u5468\u671f\u6027\u91cd\u590d\u7684\uff08\u4f8b\u5982\uff1a\u6bcf\u5929/\u6bcf\u5468/\u6bcf\u6708\u4e00\u6b21\uff09\uff0c\u53ef\u4ee5\u5b9a\u4e49\u4efb\u52a1\u5f00\u59cb\u6267\u884c\u7684\u65f6\u95f4\u95f4\u9694\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-cronjob.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\uff08CronJob\uff09\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                              • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                                                              • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                              • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-cronjob.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                                                              \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u5b9a\u65f6\u4efb\u52a1\u3002

                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                              2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u5b9a\u65f6\u4efb\u52a1 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                                                              3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u5b9a\u65f6\u4efb\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                                                                \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u5b9a\u65f6\u4efb\u52a1 \u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u5b9a\u65f6\u4efb\u52a1\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u91cd\u542f\u7b49\u64cd\u4f5c\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-cronjob.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"

                                                              \u5728 \u521b\u5efa\u5b9a\u65f6\u4efb\u52a1 \u9875\u9762\u4e2d\uff0c\u6839\u636e\u4e0b\u8868\u8f93\u5165\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                              • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                                              • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u5b9a\u65f6\u4efb\u52a1\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                                              • \u63cf\u8ff0\uff1a\u8f93\u5165\u5de5\u4f5c\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u91cf\u5e94\u4e0d\u8d85\u8fc7 512 \u4e2a\u3002
                                                              "},{"location":"admin/kpanda/workloads/create-cronjob.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                                                              \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                                              \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                                                              \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                                                              \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                                                              • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                                                              • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                                                              • \u955c\u50cf\uff1a
                                                                • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u63a5\u5165\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                                                                • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                                                                • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                                                                • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                                                              • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                                                              • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                                                              • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                                                                • \u6574\u5361\u6a21\u5f0f\uff1a
                                                                  • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                                                                  • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                  • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                                                                  • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                                                                  • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                                                                  • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                                                                  • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                                                                • Mig \u6a21\u5f0f
                                                                  • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                                                                  • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                                                              \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                                                              \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                                                              \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                                                              \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                                                              \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                                                              \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-cronjob.html#_5","title":"\u5b9a\u65f6\u4efb\u52a1\u914d\u7f6e","text":"
                                                              • \u5e76\u53d1\u7b56\u7565\uff1a\u662f\u5426\u5141\u8bb8\u591a\u4e2a Job \u4efb\u52a1\u5e76\u884c\u6267\u884c\u3002

                                                                • Allow \uff1a\u53ef\u4ee5\u5728\u524d\u4e00\u4e2a\u4efb\u52a1\u672a\u5b8c\u6210\u65f6\u5c31\u521b\u5efa\u65b0\u7684\u5b9a\u65f6\u4efb\u52a1\uff0c\u800c\u4e14\u591a\u4e2a\u4efb\u52a1\u53ef\u4ee5\u5e76\u884c\u3002\u4efb\u52a1\u592a\u591a\u53ef\u80fd\u62a2\u5360\u96c6\u7fa4\u8d44\u6e90\u3002
                                                                • Forbid \uff1a\u5728\u524d\u4e00\u4e2a\u4efb\u52a1\u5b8c\u6210\u4e4b\u524d\uff0c\u4e0d\u80fd\u521b\u5efa\u65b0\u4efb\u52a1\uff0c\u5982\u679c\u65b0\u4efb\u52a1\u7684\u6267\u884c\u65f6\u95f4\u5230\u4e86\u800c\u4e4b\u524d\u7684\u4efb\u52a1\u4ecd\u672a\u6267\u884c\u5b8c\uff0cCronJob \u4f1a\u5ffd\u7565\u65b0\u4efb\u52a1\u7684\u6267\u884c\u3002
                                                                • Replace \uff1a\u5982\u679c\u65b0\u4efb\u52a1\u7684\u6267\u884c\u65f6\u95f4\u5230\u4e86\uff0c\u4f46\u524d\u4e00\u4e2a\u4efb\u52a1\u8fd8\u672a\u5b8c\u6210\uff0c\u65b0\u7684\u4efb\u52a1\u4f1a\u53d6\u4ee3\u524d\u4e00\u4e2a\u4efb\u52a1\u3002

                                                                \u4e0a\u8ff0\u89c4\u5219\u4ec5\u9002\u7528\u4e8e\u540c\u4e00\u4e2a CronJob \u521b\u5efa\u7684\u591a\u4e2a\u4efb\u52a1\u3002\u591a\u4e2a CronJob \u521b\u5efa\u7684\u591a\u4e2a\u4efb\u52a1\u603b\u662f\u5141\u8bb8\u5e76\u53d1\u6267\u884c\u3002

                                                              • \u5b9a\u65f6\u89c4\u5219\uff1a\u57fa\u4e8e\u5206\u949f\u3001\u5c0f\u65f6\u3001\u5929\u3001\u5468\u3001\u6708\u8bbe\u7f6e\u4efb\u52a1\u6267\u884c\u7684\u65f6\u95f4\u5468\u671f\u3002\u652f\u6301\u7528\u6570\u5b57\u548c * \u81ea\u5b9a\u4e49 Cron \u8868\u8fbe\u5f0f\uff0c\u8f93\u5165\u8868\u8fbe\u5f0f\u540e\u4e0b\u65b9\u4f1a\u63d0\u793a\u5f53\u524d\u8868\u8fbe\u5f0f\u7684\u542b\u4e49\u3002\u6709\u5173\u8be6\u7ec6\u7684\u8868\u8fbe\u5f0f\u8bed\u6cd5\u89c4\u5219\uff0c\u53ef\u53c2\u8003 Cron \u65f6\u95f4\u8868\u8bed\u6cd5\u3002

                                                              • \u4efb\u52a1\u8bb0\u5f55\uff1a\u8bbe\u5b9a\u4fdd\u7559\u591a\u5c11\u6761\u4efb\u52a1\u6267\u884c\u6210\u529f\u6216\u5931\u8d25\u7684\u8bb0\u5f55\u3002 0 \u8868\u793a\u4e0d\u4fdd\u7559\u3002
                                                              • \u8d85\u65f6\u65f6\u95f4\uff1a\u8d85\u51fa\u8be5\u65f6\u95f4\u65f6\uff0c\u4efb\u52a1\u5c31\u4f1a\u88ab\u6807\u8bc6\u4e3a\u6267\u884c\u5931\u8d25\uff0c\u4efb\u52a1\u4e0b\u7684\u6240\u6709 Pod \u90fd\u4f1a\u88ab\u5220\u9664\u3002\u4e3a\u7a7a\u65f6\u8868\u793a\u4e0d\u8bbe\u7f6e\u8d85\u65f6\u65f6\u95f4\u3002\u9ed8\u8ba4\u503c\u4e3a 360 s\u3002
                                                              • \u91cd\u8bd5\u6b21\u6570\uff1a\u4efb\u52a1\u53ef\u91cd\u8bd5\u6b21\u6570\uff0c\u9ed8\u8ba4\u503c\u4e3a 6\u3002
                                                              • \u91cd\u542f\u7b56\u7565\uff1a\u8bbe\u7f6e\u4efb\u52a1\u5931\u8d25\u65f6\u662f\u5426\u91cd\u542f Pod\u3002
                                                              "},{"location":"admin/kpanda/workloads/create-cronjob.html#_6","title":"\u670d\u52a1\u914d\u7f6e","text":"

                                                              \u4e3a\u6709\u72b6\u6001\u8d1f\u8f7d\u914d\u7f6e\u670d\u52a1\uff08Service\uff09\uff0c\u4f7f\u6709\u72b6\u6001\u8d1f\u8f7d\u80fd\u591f\u88ab\u5916\u90e8\u8bbf\u95ee\u3002

                                                              1. \u70b9\u51fb \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                                                              2. \u53c2\u8003\u521b\u5efa\u670d\u52a1\uff0c\u914d\u7f6e\u670d\u52a1\u53c2\u6570\u3002

                                                              3. \u70b9\u51fb \u786e\u5b9a \uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                              "},{"location":"admin/kpanda/workloads/create-cronjob.html#_7","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                                                              \u5b9a\u65f6\u4efb\u52a1\u7684\u9ad8\u7ea7\u914d\u7f6e\u4e3b\u8981\u6d89\u53ca\u6807\u7b7e\u4e0e\u6ce8\u89e3\u3002

                                                              \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u4f8b Pod \u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-cronjob.html#yaml","title":"YAML \u521b\u5efa","text":"

                                                              \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\u3002

                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                              2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u5b9a\u65f6\u4efb\u52a1 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                              3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                                                              \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\u7684 YAML \u793a\u4f8b
                                                              apiVersion: batch/v1\nkind: CronJob\nmetadata:\n  creationTimestamp: '2022-12-26T09:45:47Z'\n  generation: 1\n  name: demo\n  namespace: default\n  resourceVersion: '92726617'\n  uid: d030d8d7-a405-4dcd-b09a-176942ef36c9\nspec:\n  concurrencyPolicy: Allow\n  failedJobsHistoryLimit: 1\n  jobTemplate:\n    metadata:\n      creationTimestamp: null\n    spec:\n      activeDeadlineSeconds: 360\n      backoffLimit: 6\n      template:\n        metadata:\n          creationTimestamp: null\n        spec:\n          containers:\n            - image: nginx\n              imagePullPolicy: IfNotPresent\n              lifecycle: {}\n              name: container-3\n              resources:\n                limits:\n                  cpu: 250m\n                  memory: 512Mi\n                requests:\n                  cpu: 250m\n                  memory: 512Mi\n              securityContext:\n                privileged: false\n              terminationMessagePath: /dev/termination-log\n              terminationMessagePolicy: File\n          dnsPolicy: ClusterFirst\n          restartPolicy: Never\n          schedulerName: default-scheduler\n          securityContext: {}\n          terminationGracePeriodSeconds: 30\n  schedule: 0 0 13 * 5\n  successfulJobsHistoryLimit: 3\n  suspend: false\nstatus: {}\n
                                                              "},{"location":"admin/kpanda/workloads/create-daemonset.html","title":"\u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b(DaemonSet)","text":"

                                                              \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b\uff08DaemonSet\uff09\u3002

                                                              \u5b88\u62a4\u8fdb\u7a0b\uff08DaemonSet\uff09\u901a\u8fc7\u8282\u70b9\u4eb2\u548c\u6027\u4e0e\u6c61\u70b9\u529f\u80fd\u786e\u4fdd\u5728\u5168\u90e8\u6216\u90e8\u5206\u8282\u70b9\u4e0a\u8fd0\u884c\u4e00\u4e2a Pod \u7684\u526f\u672c\u3002\u5bf9\u4e8e\u65b0\u52a0\u5165\u96c6\u7fa4\u7684\u8282\u70b9\uff0cDaemonSet \u81ea\u52a8\u5728\u65b0\u8282\u70b9\u4e0a\u90e8\u7f72\u76f8\u5e94\u7684 Pod\uff0c\u5e76\u8ddf\u8e2a Pod \u7684\u8fd0\u884c\u72b6\u6001\u3002\u5f53\u8282\u70b9\u88ab\u79fb\u9664\u65f6\uff0cDaemonSet \u5219\u5220\u9664\u5176\u521b\u5efa\u7684\u6240\u6709 Pod\u3002

                                                              \u5b88\u62a4\u8fdb\u7a0b\u7684\u5e38\u89c1\u7528\u4f8b\u5305\u62ec\uff1a

                                                              • \u5728\u6bcf\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u96c6\u7fa4\u5b88\u62a4\u8fdb\u7a0b\u3002

                                                              • \u5728\u6bcf\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u65e5\u5fd7\u6536\u96c6\u5b88\u62a4\u8fdb\u7a0b\u3002

                                                              • \u5728\u6bcf\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u76d1\u63a7\u5b88\u62a4\u8fdb\u7a0b\u3002

                                                              \u7b80\u5355\u8d77\u89c1\uff0c\u53ef\u4ee5\u5728\u6bcf\u4e2a\u8282\u70b9\u4e0a\u4e3a\u6bcf\u79cd\u7c7b\u578b\u7684\u5b88\u62a4\u8fdb\u7a0b\u90fd\u542f\u52a8\u4e00\u4e2a DaemonSet\u3002\u5982\u9700\u66f4\u7cbe\u7ec6\u3001\u66f4\u9ad8\u7ea7\u5730\u7ba1\u7406\u5b88\u62a4\u8fdb\u7a0b\uff0c\u4e5f\u53ef\u4ee5\u4e3a\u540c\u4e00\u79cd\u5b88\u62a4\u8fdb\u7a0b\u90e8\u7f72\u591a\u4e2a DaemonSet\u3002\u6bcf\u4e2a DaemonSet \u5177\u6709\u4e0d\u540c\u7684\u6807\u5fd7\uff0c\u5e76\u4e14\u5bf9\u4e0d\u540c\u786c\u4ef6\u7c7b\u578b\u5177\u6709\u4e0d\u540c\u7684\u5185\u5b58\u3001CPU \u8981\u6c42\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-daemonset.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u521b\u5efa DaemonSet \u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                              • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                                                              • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                              • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-daemonset.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                                                              \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u5b88\u62a4\u8fdb\u7a0b\u3002

                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                              2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u5b88\u62a4\u8fdb\u7a0b \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                                                              3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                                                                \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u5b88\u62a4\u8fdb\u7a0b \u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u5b88\u62a4\u8fdb\u7a0b\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u91cd\u542f\u7b49\u64cd\u4f5c\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-daemonset.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"

                                                              \u5728 \u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b \u9875\u9762\u4e2d\uff0c\u6839\u636e\u4e0b\u8868\u8f93\u5165\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                              • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                                              • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u5b88\u62a4\u8fdb\u7a0b\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                                              • \u63cf\u8ff0\uff1a\u8f93\u5165\u5de5\u4f5c\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u91cf\u5e94\u4e0d\u8d85\u8fc7 512 \u4e2a\u3002
                                                              "},{"location":"admin/kpanda/workloads/create-daemonset.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                                                              \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                                              \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                                                              \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                                                              \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                                                              • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                                                              • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                                                              • \u955c\u50cf\uff1a
                                                                • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u63a5\u5165\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                                                                • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                                                                • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                                                                • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                                                              • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                                                              • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                                                              • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                                                                • \u6574\u5361\u6a21\u5f0f\uff1a
                                                                  • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                                                                  • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                  • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                                                                  • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                                                                  • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                                                                  • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                                                                  • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                                                                • Mig \u6a21\u5f0f
                                                                  • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                                                                  • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                                                              \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                                                              \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                                                              \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                                                              \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                                                              \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                                                              \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-daemonset.html#_5","title":"\u670d\u52a1\u914d\u7f6e","text":"

                                                              \u4e3a\u5b88\u62a4\u8fdb\u7a0b\u521b\u5efa\u670d\u52a1\uff08Service\uff09\uff0c\u4f7f\u5b88\u62a4\u8fdb\u7a0b\u80fd\u591f\u88ab\u5916\u90e8\u8bbf\u95ee\u3002

                                                              1. \u70b9\u51fb \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                                                              2. \u914d\u7f6e\u670d\u52a1\u53c2\u6570\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\u521b\u5efa\u670d\u52a1\u3002

                                                              3. \u70b9\u51fb \u786e\u5b9a \uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                              "},{"location":"admin/kpanda/workloads/create-daemonset.html#_6","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                                                              \u9ad8\u7ea7\u914d\u7f6e\u5305\u62ec\u8d1f\u8f7d\u7684\u7f51\u7edc\u914d\u7f6e\u3001\u5347\u7ea7\u7b56\u7565\u3001\u8c03\u5ea6\u7b56\u7565\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u56db\u90e8\u5206\uff0c\u53ef\u70b9\u51fb\u4e0b\u65b9\u7684\u9875\u7b7e\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                                              \u7f51\u7edc\u914d\u7f6e\u5347\u7ea7\u7b56\u7565\u8c03\u5ea6\u7b56\u7565\u6807\u7b7e\u4e0e\u6ce8\u89e3

                                                              \u5e94\u7528\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u4f1a\u51fa\u73b0\u5197\u4f59\u7684 DNS \u67e5\u8be2\u3002Kubernetes \u4e3a\u5e94\u7528\u63d0\u4f9b\u4e86\u4e0e DNS \u76f8\u5173\u7684\u914d\u7f6e\u9009\u9879\uff0c\u80fd\u591f\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u6709\u6548\u5730\u51cf\u5c11\u5197\u4f59\u7684 DNS \u67e5\u8be2\uff0c\u63d0\u5347\u4e1a\u52a1\u5e76\u53d1\u91cf\u3002

                                                              • DNS \u7b56\u7565

                                                                • Default\uff1a\u4f7f\u5bb9\u5668\u4f7f\u7528 kubelet \u7684 --resolv-conf \u53c2\u6570\u6307\u5411\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u3002\u8be5\u914d\u7f6e\u53ea\u80fd\u89e3\u6790\u6ce8\u518c\u5230\u4e92\u8054\u7f51\u4e0a\u7684\u5916\u90e8\u57df\u540d\uff0c\u65e0\u6cd5\u89e3\u6790\u96c6\u7fa4\u5185\u90e8\u57df\u540d\uff0c\u4e14\u4e0d\u5b58\u5728\u65e0\u6548\u7684 DNS \u67e5\u8be2\u3002
                                                                • ClusterFirstWithHostNet\uff1a\u5e94\u7528\u5bf9\u63a5\u4e3b\u673a\u7684\u57df\u540d\u6587\u4ef6\u3002
                                                                • ClusterFirst\uff1a\u5e94\u7528\u5bf9\u63a5 Kube-DNS/CoreDNS\u3002
                                                                • None\uff1aKubernetes v1.9\uff08Beta in v1.10\uff09\u4e2d\u5f15\u5165\u7684\u65b0\u9009\u9879\u503c\u3002\u8bbe\u7f6e\u4e3a None \u4e4b\u540e\uff0c\u5fc5\u987b\u8bbe\u7f6e dnsConfig\uff0c\u6b64\u65f6\u5bb9\u5668\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u5c06\u5b8c\u5168\u901a\u8fc7 dnsConfig \u7684\u914d\u7f6e\u6765\u751f\u6210\u3002
                                                              • \u57df\u540d\u670d\u52a1\u5668\uff1a\u586b\u5199\u57df\u540d\u670d\u52a1\u5668\u7684\u5730\u5740\uff0c\u4f8b\u5982 10.6.175.20 \u3002

                                                              • \u641c\u7d22\u57df\uff1a\u57df\u540d\u67e5\u8be2\u65f6\u7684 DNS \u641c\u7d22\u57df\u5217\u8868\u3002\u6307\u5b9a\u540e\uff0c\u63d0\u4f9b\u7684\u641c\u7d22\u57df\u5217\u8868\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 search \u5b57\u6bb5\u4e2d\uff0c\u5e76\u5220\u9664\u91cd\u590d\u7684\u57df\u540d\u3002Kubernetes \u6700\u591a\u5141\u8bb8 6 \u4e2a\u641c\u7d22\u57df\u3002
                                                              • Options\uff1aDNS \u7684\u914d\u7f6e\u9009\u9879\uff0c\u5176\u4e2d\u6bcf\u4e2a\u5bf9\u8c61\u53ef\u4ee5\u5177\u6709 name \u5c5e\u6027\uff08\u5fc5\u9700\uff09\u548c value \u5c5e\u6027\uff08\u53ef\u9009\uff09\u3002\u8be5\u5b57\u6bb5\u4e2d\u7684\u5185\u5bb9\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 options \u5b57\u6bb5\u4e2d\uff0cdnsConfig \u7684 options \u7684\u67d0\u4e9b\u9009\u9879\u5982\u679c\u4e0e\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684\u9009\u9879\u51b2\u7a81\uff0c\u5219\u4f1a\u88ab dnsConfig \u6240\u8986\u76d6\u3002
                                                              • \u4e3b\u673a\u522b\u540d\uff1a\u4e3a\u4e3b\u673a\u8bbe\u7f6e\u7684\u522b\u540d\u3002

                                                              • \u5347\u7ea7\u65b9\u5f0f\uff1a \u6eda\u52a8\u5347\u7ea7 \u6307\u9010\u6b65\u7528\u65b0\u7248\u672c\u7684\u5b9e\u4f8b\u66ff\u6362\u65e7\u7248\u672c\u7684\u5b9e\u4f8b\uff0c\u5347\u7ea7\u7684\u8fc7\u7a0b\u4e2d\uff0c\u4e1a\u52a1\u6d41\u91cf\u4f1a\u540c\u65f6\u8d1f\u8f7d\u5747\u8861\u5206\u5e03\u5230\u65b0\u8001\u7684\u5b9e\u4f8b\u4e0a\uff0c\u56e0\u6b64\u4e1a\u52a1\u4e0d\u4f1a\u4e2d\u65ad\u3002 \u91cd\u5efa\u5347\u7ea7 \u6307\u5148\u5220\u9664\u8001\u7248\u672c\u7684\u8d1f\u8f7d\u5b9e\u4f8b\uff0c\u518d\u5b89\u88c5\u6307\u5b9a\u7684\u65b0\u7248\u672c\uff0c\u5347\u7ea7\u8fc7\u7a0b\u4e2d\u4e1a\u52a1\u4f1a\u4e2d\u65ad\u3002
                                                              • \u6700\u5927\u65e0\u6548 Pod \u6570\uff1a\u6307\u5b9a\u8d1f\u8f7d\u66f4\u65b0\u8fc7\u7a0b\u4e2d\u4e0d\u53ef\u7528 Pod \u7684\u6700\u5927\u503c\u6216\u6bd4\u7387\uff0c\u9ed8\u8ba4 25%\u3002\u5982\u679c\u7b49\u4e8e\u5b9e\u4f8b\u6570\u6709\u670d\u52a1\u4e2d\u65ad\u7684\u98ce\u9669\u3002
                                                              • \u6700\u5927\u6d6a\u6d8c\uff1a\u66f4\u65b0 Pod \u7684\u8fc7\u7a0b\u4e2d Pod \u603b\u6570\u8d85\u8fc7 Pod \u671f\u671b\u526f\u672c\u6570\u90e8\u5206\u7684\u6700\u5927\u503c\u6216\u6bd4\u7387\u3002\u9ed8\u8ba4 25%\u3002
                                                              • \u6700\u5927\u4fdd\u7559\u7248\u672c\u6570\uff1a\u8bbe\u7f6e\u7248\u672c\u56de\u6eda\u65f6\u4fdd\u7559\u7684\u65e7\u7248\u672c\u6570\u91cf\u3002\u9ed8\u8ba4 10\u3002
                                                              • Pod \u53ef\u7528\u6700\u77ed\u65f6\u95f4\uff1aPod \u5c31\u7eea\u7684\u6700\u77ed\u65f6\u95f4\uff0c\u53ea\u6709\u8d85\u51fa\u8fd9\u4e2a\u65f6\u95f4 Pod \u624d\u88ab\u8ba4\u4e3a\u53ef\u7528\uff0c\u9ed8\u8ba4 0 \u79d2\u3002
                                                              • \u5347\u7ea7\u6700\u5927\u6301\u7eed\u65f6\u95f4\uff1a\u5982\u679c\u8d85\u8fc7\u6240\u8bbe\u7f6e\u7684\u65f6\u95f4\u4ecd\u672a\u90e8\u7f72\u6210\u529f\uff0c\u5219\u5c06\u8be5\u8d1f\u8f7d\u6807\u8bb0\u4e3a\u5931\u8d25\u3002\u9ed8\u8ba4 600 \u79d2\u3002
                                                              • \u7f29\u5bb9\u65f6\u95f4\u7a97\uff1a\u8d1f\u8f7d\u505c\u6b62\u524d\u547d\u4ee4\u7684\u6267\u884c\u65f6\u95f4\u7a97\uff080-9,999\u79d2\uff09\uff0c\u9ed8\u8ba4 30 \u79d2\u3002

                                                              • \u5bb9\u5fcd\u65f6\u95f4\uff1a\u8d1f\u8f7d\u5b9e\u4f8b\u6240\u5728\u7684\u8282\u70b9\u4e0d\u53ef\u7528\u65f6\uff0c\u5c06\u8d1f\u8f7d\u5b9e\u4f8b\u91cd\u65b0\u8c03\u5ea6\u5230\u5176\u5b83\u53ef\u7528\u8282\u70b9\u7684\u65f6\u95f4\uff0c\u9ed8\u8ba4\u4e3a 300 \u79d2\u3002
                                                              • \u8282\u70b9\u4eb2\u548c\u6027\uff1a\u6839\u636e\u8282\u70b9\u4e0a\u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002
                                                              • \u5de5\u4f5c\u8d1f\u8f7d\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                                                              • \u5de5\u4f5c\u8d1f\u8f7d\u53cd\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u4e0d\u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                                                              • \u62d3\u6251\u57df\uff1a\u5373 topologyKey\uff0c\u7528\u4e8e\u6307\u5b9a\u53ef\u4ee5\u8c03\u5ea6\u7684\u4e00\u7ec4\u8282\u70b9\u3002\u4f8b\u5982\uff0c kubernetes.io/os \u8868\u793a\u53ea\u8981\u67d0\u4e2a\u64cd\u4f5c\u7cfb\u7edf\u7684\u8282\u70b9\u6ee1\u8db3 labelSelector \u7684\u6761\u4ef6\u5c31\u53ef\u4ee5\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002

                                                              \u5177\u4f53\u8be6\u60c5\u8bf7\u53c2\u8003\u8c03\u5ea6\u7b56\u7565\u3002

                                                              \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u548c\u5bb9\u5668\u7ec4\u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-daemonset.html#yaml","title":"YAML \u521b\u5efa","text":"

                                                              \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b\u3002

                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                              2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u5b88\u62a4\u8fdb\u7a0b \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                              3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                                                              \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b\u7684 YAML \u793a\u4f8b
                                                              kind: DaemonSet\napiVersion: apps/v1\nmetadata:\n  name: hwameistor-local-disk-manager\n  namespace: hwameistor\n  uid: ccbdc098-7de3-4a8a-96dd-d1cee159c92b\n  resourceVersion: '90999552'\n  generation: 1\n  creationTimestamp: '2022-12-15T09:03:44Z'\n  labels:\n    app.kubernetes.io/managed-by: Helm\n  annotations:\n    deprecated.daemonset.template.generation: '1'\n    meta.helm.sh/release-name: hwameistor\n    meta.helm.sh/release-namespace: hwameistor\nspec:\n  selector:\n    matchLabels:\n      app: hwameistor-local-disk-manager\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: hwameistor-local-disk-manager\n    spec:\n      volumes:\n        - name: udev\n          hostPath:\n            path: /run/udev\n            type: Directory\n        - name: procmount\n          hostPath:\n            path: /proc\n            type: Directory\n        - name: devmount\n          hostPath:\n            path: /dev\n            type: Directory\n        - name: socket-dir\n          hostPath:\n            path: /var/lib/kubelet/plugins/disk.hwameistor.io\n            type: DirectoryOrCreate\n        - name: registration-dir\n          hostPath:\n            path: /var/lib/kubelet/plugins_registry/\n            type: Directory\n        - name: plugin-dir\n          hostPath:\n            path: /var/lib/kubelet/plugins\n            type: DirectoryOrCreate\n        - name: pods-mount-dir\n          hostPath:\n            path: /var/lib/kubelet/pods\n            type: DirectoryOrCreate\n      containers:\n        - name: registrar\n          image: k8s-gcr.m.daocloud.io/sig-storage/csi-node-driver-registrar:v2.5.0\n          args:\n            - '--v=5'\n            - '--csi-address=/csi/csi.sock'\n            - >-\n              --kubelet-registration-path=/var/lib/kubelet/plugins/disk.hwameistor.io/csi.sock\n          env:\n            - name: KUBE_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: spec.nodeName\n          resources: {}\n          volumeMounts:\n            - name: socket-dir\n              mountPath: /csi\n            - name: registration-dir\n              mountPath: /registration\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /bin/sh\n                  - '-c'\n                  - >-\n                    rm -rf /registration/disk.hwameistor.io \n                    /registration/disk.hwameistor.io-reg.sock\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: manager\n          image: ghcr.m.daocloud.io/hwameistor/local-disk-manager:v0.6.1\n          command:\n            - /local-disk-manager\n          args:\n            - '--endpoint=$(CSI_ENDPOINT)'\n            - '--nodeid=$(NODENAME)'\n            - '--csi-enable=true'\n          env:\n            - name: CSI_ENDPOINT\n              value: unix://var/lib/kubelet/plugins/disk.hwameistor.io/csi.sock\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: WATCH_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: NODENAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: spec.nodeName\n            - name: OPERATOR_NAME\n              value: local-disk-manager\n          resources: {}\n          volumeMounts:\n            - name: udev\n              mountPath: /run/udev\n            - name: procmount\n              readOnly: true\n              mountPath: /host/proc\n            - name: devmount\n              mountPath: /dev\n            - name: registration-dir\n              mountPath: /var/lib/kubelet/plugins_registry\n            - name: plugin-dir\n              mountPath: /var/lib/kubelet/plugins\n              mountPropagation: Bidirectional\n            - name: pods-mount-dir\n              mountPath: /var/lib/kubelet/pods\n              mountPropagation: Bidirectional\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            privileged: true\n      restartPolicy: Always\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      serviceAccountName: hwameistor-admin\n      serviceAccount: hwameistor-admin\n      hostNetwork: true\n      hostPID: true\n      securityContext: {}\n      schedulerName: default-scheduler\n      tolerations:\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - key: node.kubernetes.io/not-ready\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/master\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/control-plane\n          operator: Exists\n          effect: NoSchedule\n        - key: node.cloudprovider.kubernetes.io/uninitialized\n          operator: Exists\n          effect: NoSchedule\n  updateStrategy:\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n      maxSurge: 0\n  revisionHistoryLimit: 10\nstatus:\n  currentNumberScheduled: 4\n  numberMisscheduled: 0\n  desiredNumberScheduled: 4\n  numberReady: 4\n  observedGeneration: 1\n  updatedNumberScheduled: 4\n  numberAvailable: 4\n
                                                              "},{"location":"admin/kpanda/workloads/create-deployment.html","title":"\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\uff08Deployment\uff09","text":"

                                                              \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\u3002

                                                              \u65e0\u72b6\u6001\u8d1f\u8f7d\uff08Deployment\uff09\u662f Kubernetes \u4e2d\u7684\u4e00\u79cd\u5e38\u89c1\u8d44\u6e90\uff0c\u4e3b\u8981\u4e3a Pod \u548c ReplicaSet \u63d0\u4f9b\u58f0\u660e\u5f0f\u66f4\u65b0\uff0c\u652f\u6301\u5f39\u6027\u4f38\u7f29\u3001\u6eda\u52a8\u5347\u7ea7\u3001\u7248\u672c\u56de\u9000\u7b49\u529f\u80fd\u3002\u5728 Deployment \u4e2d\u58f0\u660e\u671f\u671b\u7684 Pod \u72b6\u6001\uff0cDeployment Controller \u4f1a\u901a\u8fc7 ReplicaSet \u4fee\u6539\u5f53\u524d\u72b6\u6001\uff0c\u4f7f\u5176\u8fbe\u5230\u9884\u5148\u58f0\u660e\u7684\u671f\u671b\u72b6\u6001\u3002Deployment \u662f\u65e0\u72b6\u6001\u7684\uff0c\u4e0d\u652f\u6301\u6570\u636e\u6301\u4e45\u5316\uff0c\u9002\u7528\u4e8e\u90e8\u7f72\u65e0\u72b6\u6001\u7684\u3001\u4e0d\u9700\u8981\u4fdd\u5b58\u6570\u636e\u3001\u968f\u65f6\u53ef\u4ee5\u91cd\u542f\u56de\u6eda\u7684\u5e94\u7528\u3002

                                                              \u901a\u8fc7\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\uff0c\u53ef\u4ee5\u57fa\u4e8e\u76f8\u5e94\u7684\u89d2\u8272\u6743\u9650\u8f7b\u677e\u7ba1\u7406\u591a\u4e91\u591a\u96c6\u7fa4\u4e0a\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5305\u62ec\u5bf9\u65e0\u72b6\u6001\u8d1f\u8f7d\u7684\u521b\u5efa\u3001\u66f4\u65b0\u3001\u5220\u9664\u3001\u5f39\u6027\u6269\u7f29\u3001\u91cd\u542f\u3001\u7248\u672c\u56de\u9000\u7b49\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-deployment.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u5728\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                              • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                                                              • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                              • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-deployment.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                                                              \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u65e0\u72b6\u6001\u8d1f\u8f7d\u3002

                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                              2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                                                              3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                                                                \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u65e0\u72b6\u6001\u8d1f\u8f7d \u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u8d1f\u8f7d\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u5f39\u6027\u6269\u7f29\u3001\u91cd\u542f\u3001\u7248\u672c\u56de\u9000\u7b49\u64cd\u4f5c\u3002\u5982\u679c\u8d1f\u8f7d\u72b6\u6001\u51fa\u73b0\u5f02\u5e38\uff0c\u8bf7\u67e5\u770b\u5177\u4f53\u5f02\u5e38\u4fe1\u606f\uff0c\u53ef\u53c2\u8003\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-deployment.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"
                                                              • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 deployment-01\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                                              • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u8d1f\u8f7d\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                                              • \u5b9e\u4f8b\u6570\uff1a\u8f93\u5165\u8d1f\u8f7d\u7684 Pod \u5b9e\u4f8b\u6570\u91cf\uff0c\u9ed8\u8ba4\u521b\u5efa 1 \u4e2a Pod \u5b9e\u4f8b\u3002
                                                              • \u63cf\u8ff0\uff1a\u8f93\u5165\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u4e0d\u8d85\u8fc7 512\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-deployment.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                                                              \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                                              \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                                                              \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                                                              \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                                                              • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                                                              • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                                                              • \u955c\u50cf\uff1a
                                                                • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u5b89\u88c5\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                                                                • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                                                                • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                                                                • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                                                              • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                                                              • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                                                              • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                                                                • \u6574\u5361\u6a21\u5f0f\uff1a
                                                                  • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                                                                  • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                  • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                                                                  • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                                                                  • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                                                                  • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                                                                  • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                                                                • Mig \u6a21\u5f0f
                                                                  • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                                                                  • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                                                              \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                                                              \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                                                              \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                                                              \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                                                              \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                                                              \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-deployment.html#_5","title":"\u670d\u52a1\u914d\u7f6e","text":"

                                                              \u4e3a\u65e0\u72b6\u6001\u8d1f\u8f7d\u914d\u7f6e\u670d\u52a1\uff08Service\uff09\uff0c\u4f7f\u65e0\u72b6\u6001\u8d1f\u8f7d\u80fd\u591f\u88ab\u5916\u90e8\u8bbf\u95ee\u3002

                                                              1. \u70b9\u51fb \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                                                              2. \u53c2\u8003\u521b\u5efa\u670d\u52a1\uff0c\u914d\u7f6e\u670d\u52a1\u53c2\u6570\u3002

                                                              3. \u70b9\u51fb \u786e\u5b9a \uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                              "},{"location":"admin/kpanda/workloads/create-deployment.html#_6","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                                                              \u9ad8\u7ea7\u914d\u7f6e\u5305\u62ec\u8d1f\u8f7d\u7684\u7f51\u7edc\u914d\u7f6e\u3001\u5347\u7ea7\u7b56\u7565\u3001\u8c03\u5ea6\u7b56\u7565\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u56db\u90e8\u5206\uff0c\u53ef\u70b9\u51fb\u4e0b\u65b9\u7684\u9875\u7b7e\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                                              \u7f51\u7edc\u914d\u7f6e\u5347\u7ea7\u7b56\u7565\u8c03\u5ea6\u7b56\u7565\u6807\u7b7e\u4e0e\u6ce8\u89e3
                                                              • \u5982\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72\u4e86 SpiderPool \u548c Multus \u7ec4\u4ef6\uff0c\u5219\u53ef\u4ee5\u5728\u7f51\u7edc\u914d\u7f6e\u4e2d\u914d\u7f6e\u5bb9\u5668\u7f51\u5361\u3002

                                                              • DNS \u914d\u7f6e\uff1a\u5e94\u7528\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u4f1a\u51fa\u73b0\u5197\u4f59\u7684 DNS \u67e5\u8be2\u3002Kubernetes \u4e3a\u5e94\u7528\u63d0\u4f9b\u4e86\u4e0e DNS \u76f8\u5173\u7684\u914d\u7f6e\u9009\u9879\uff0c\u80fd\u591f\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u6709\u6548\u5730\u51cf\u5c11\u5197\u4f59\u7684 DNS \u67e5\u8be2\uff0c\u63d0\u5347\u4e1a\u52a1\u5e76\u53d1\u91cf\u3002

                                                              • DNS \u7b56\u7565

                                                                • Default\uff1a\u4f7f\u5bb9\u5668\u4f7f\u7528 kubelet \u7684 --resolv-conf \u53c2\u6570\u6307\u5411\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u3002\u8be5\u914d\u7f6e\u53ea\u80fd\u89e3\u6790\u6ce8\u518c\u5230\u4e92\u8054\u7f51\u4e0a\u7684\u5916\u90e8\u57df\u540d\uff0c\u65e0\u6cd5\u89e3\u6790\u96c6\u7fa4\u5185\u90e8\u57df\u540d\uff0c\u4e14\u4e0d\u5b58\u5728\u65e0\u6548\u7684 DNS \u67e5\u8be2\u3002
                                                                • ClusterFirstWithHostNet\uff1a\u5e94\u7528\u5bf9\u63a5\u4e3b\u673a\u7684\u57df\u540d\u6587\u4ef6\u3002
                                                                • ClusterFirst\uff1a\u5e94\u7528\u5bf9\u63a5 Kube-DNS/CoreDNS\u3002
                                                                • None\uff1aKubernetes v1.9\uff08Beta in v1.10\uff09\u4e2d\u5f15\u5165\u7684\u65b0\u9009\u9879\u503c\u3002\u8bbe\u7f6e\u4e3a None \u4e4b\u540e\uff0c\u5fc5\u987b\u8bbe\u7f6e dnsConfig\uff0c\u6b64\u65f6\u5bb9\u5668\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u5c06\u5b8c\u5168\u901a\u8fc7 dnsConfig \u7684\u914d\u7f6e\u6765\u751f\u6210\u3002
                                                              • \u57df\u540d\u670d\u52a1\u5668\uff1a\u586b\u5199\u57df\u540d\u670d\u52a1\u5668\u7684\u5730\u5740\uff0c\u4f8b\u5982 10.6.175.20 \u3002

                                                              • \u641c\u7d22\u57df\uff1a\u57df\u540d\u67e5\u8be2\u65f6\u7684 DNS \u641c\u7d22\u57df\u5217\u8868\u3002\u6307\u5b9a\u540e\uff0c\u63d0\u4f9b\u7684\u641c\u7d22\u57df\u5217\u8868\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 search \u5b57\u6bb5\u4e2d\uff0c\u5e76\u5220\u9664\u91cd\u590d\u7684\u57df\u540d\u3002Kubernetes \u6700\u591a\u5141\u8bb8 6 \u4e2a\u641c\u7d22\u57df\u3002
                                                              • Options\uff1aDNS \u7684\u914d\u7f6e\u9009\u9879\uff0c\u5176\u4e2d\u6bcf\u4e2a\u5bf9\u8c61\u53ef\u4ee5\u5177\u6709 name \u5c5e\u6027\uff08\u5fc5\u9700\uff09\u548c value \u5c5e\u6027\uff08\u53ef\u9009\uff09\u3002\u8be5\u5b57\u6bb5\u4e2d\u7684\u5185\u5bb9\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 options \u5b57\u6bb5\u4e2d\uff0cdnsConfig \u7684 options \u7684\u67d0\u4e9b\u9009\u9879\u5982\u679c\u4e0e\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684\u9009\u9879\u51b2\u7a81\uff0c\u5219\u4f1a\u88ab dnsConfig \u6240\u8986\u76d6\u3002
                                                              • \u4e3b\u673a\u522b\u540d\uff1a\u4e3a\u4e3b\u673a\u8bbe\u7f6e\u7684\u522b\u540d\u3002

                                                              • \u5347\u7ea7\u65b9\u5f0f\uff1a \u6eda\u52a8\u5347\u7ea7 \u6307\u9010\u6b65\u7528\u65b0\u7248\u672c\u7684\u5b9e\u4f8b\u66ff\u6362\u65e7\u7248\u672c\u7684\u5b9e\u4f8b\uff0c\u5347\u7ea7\u7684\u8fc7\u7a0b\u4e2d\uff0c\u4e1a\u52a1\u6d41\u91cf\u4f1a\u540c\u65f6\u8d1f\u8f7d\u5747\u8861\u5206\u5e03\u5230\u65b0\u8001\u7684\u5b9e\u4f8b\u4e0a\uff0c\u56e0\u6b64\u4e1a\u52a1\u4e0d\u4f1a\u4e2d\u65ad\u3002 \u91cd\u5efa\u5347\u7ea7 \u6307\u5148\u5220\u9664\u8001\u7248\u672c\u7684\u8d1f\u8f7d\u5b9e\u4f8b\uff0c\u518d\u5b89\u88c5\u6307\u5b9a\u7684\u65b0\u7248\u672c\uff0c\u5347\u7ea7\u8fc7\u7a0b\u4e2d\u4e1a\u52a1\u4f1a\u4e2d\u65ad\u3002
                                                              • \u6700\u5927\u4e0d\u53ef\u7528\uff1a\u6307\u5b9a\u8d1f\u8f7d\u66f4\u65b0\u8fc7\u7a0b\u4e2d\u4e0d\u53ef\u7528 Pod \u7684\u6700\u5927\u503c\u6216\u6bd4\u7387\uff0c\u9ed8\u8ba4 25%\u3002\u5982\u679c\u7b49\u4e8e\u5b9e\u4f8b\u6570\u6709\u670d\u52a1\u4e2d\u65ad\u7684\u98ce\u9669\u3002
                                                              • \u6700\u5927\u5cf0\u503c\uff1a\u66f4\u65b0 Pod \u7684\u8fc7\u7a0b\u4e2d Pod \u603b\u6570\u8d85\u8fc7 Pod \u671f\u671b\u526f\u672c\u6570\u90e8\u5206\u7684\u6700\u5927\u503c\u6216\u6bd4\u7387\u3002\u9ed8\u8ba4 25%\u3002
                                                              • \u6700\u5927\u4fdd\u7559\u7248\u672c\u6570\uff1a\u8bbe\u7f6e\u7248\u672c\u56de\u6eda\u65f6\u4fdd\u7559\u7684\u65e7\u7248\u672c\u6570\u91cf\u3002\u9ed8\u8ba4 10\u3002
                                                              • Pod \u53ef\u7528\u6700\u77ed\u65f6\u95f4\uff1aPod \u5c31\u7eea\u7684\u6700\u77ed\u65f6\u95f4\uff0c\u53ea\u6709\u8d85\u51fa\u8fd9\u4e2a\u65f6\u95f4 Pod \u624d\u88ab\u8ba4\u4e3a\u53ef\u7528\uff0c\u9ed8\u8ba4 0 \u79d2\u3002
                                                              • \u5347\u7ea7\u6700\u5927\u6301\u7eed\u65f6\u95f4\uff1a\u5982\u679c\u8d85\u8fc7\u6240\u8bbe\u7f6e\u7684\u65f6\u95f4\u4ecd\u672a\u90e8\u7f72\u6210\u529f\uff0c\u5219\u5c06\u8be5\u8d1f\u8f7d\u6807\u8bb0\u4e3a\u5931\u8d25\u3002\u9ed8\u8ba4 600 \u79d2\u3002
                                                              • \u7f29\u5bb9\u65f6\u95f4\u7a97\uff1a\u8d1f\u8f7d\u505c\u6b62\u524d\u547d\u4ee4\u7684\u6267\u884c\u65f6\u95f4\u7a97\uff080-9,999\u79d2\uff09\uff0c\u9ed8\u8ba4 30 \u79d2\u3002

                                                              • \u5bb9\u5fcd\u65f6\u95f4\uff1a\u8d1f\u8f7d\u5b9e\u4f8b\u6240\u5728\u7684\u8282\u70b9\u4e0d\u53ef\u7528\u65f6\uff0c\u5c06\u8d1f\u8f7d\u5b9e\u4f8b\u91cd\u65b0\u8c03\u5ea6\u5230\u5176\u5b83\u53ef\u7528\u8282\u70b9\u7684\u65f6\u95f4\uff0c\u9ed8\u8ba4\u4e3a 300 \u79d2\u3002
                                                              • \u8282\u70b9\u4eb2\u548c\u6027\uff1a\u6839\u636e\u8282\u70b9\u4e0a\u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002
                                                              • \u5de5\u4f5c\u8d1f\u8f7d\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                                                              • \u5de5\u4f5c\u8d1f\u8f7d\u53cd\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u4e0d\u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002

                                                              \u5177\u4f53\u8be6\u60c5\u8bf7\u53c2\u8003\u8c03\u5ea6\u7b56\u7565\u3002

                                                              \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u548c\u5bb9\u5668\u7ec4\u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-deployment.html#yaml","title":"YAML \u521b\u5efa","text":"

                                                              \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\u3002

                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                              2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                              3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                                                              \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\u7684 YAML \u793a\u4f8b
                                                              apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nginx-deployment\nspec:\n  selector:\n    matchLabels:\n      app: nginx\n  replicas: 2 # \u544a\u77e5 Deployment \u8fd0\u884c 2 \u4e2a\u4e0e\u8be5\u6a21\u677f\u5339\u914d\u7684 Pod\n  template:\n    metadata:\n      labels:\n        app: nginx\n    spec:\n      containers:\n      - name: nginx\n        image: nginx:1.14.2\n        ports:\n        - containerPort: 80\n
                                                              "},{"location":"admin/kpanda/workloads/create-job.html","title":"\u521b\u5efa\u4efb\u52a1\uff08Job\uff09","text":"

                                                              \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u4efb\u52a1\uff08Job\uff09\u3002

                                                              \u4efb\u52a1\uff08Job\uff09\u9002\u7528\u4e8e\u6267\u884c\u4e00\u6b21\u6027\u4efb\u52a1\u3002Job \u4f1a\u521b\u5efa\u4e00\u4e2a\u6216\u591a\u4e2a Pod\uff0cJob \u4f1a\u4e00\u76f4\u91cd\u65b0\u5c1d\u8bd5\u6267\u884c Pod\uff0c\u76f4\u5230\u6210\u529f\u7ec8\u6b62\u7684 Pod \u8fbe\u5230\u4e00\u5b9a\u6570\u91cf\u3002\u6210\u529f\u7ec8\u6b62\u7684 Pod \u8fbe\u5230\u6307\u5b9a\u7684\u6570\u91cf\u540e\uff0cJob \u4e5f\u968f\u4e4b\u7ed3\u675f\u3002\u5220\u9664 Job \u65f6\u4f1a\u4e00\u540c\u6e05\u9664\u8be5 Job \u521b\u5efa\u7684\u6240\u6709 Pod\u3002\u6682\u505c Job \u65f6\u5220\u9664\u8be5 Job \u4e2d\u7684\u6240\u6709\u6d3b\u8dc3 Pod\uff0c\u76f4\u5230 Job \u88ab\u7ee7\u7eed\u6267\u884c\u3002\u6709\u5173\u4efb\u52a1\uff08Job\uff09\u7684\u66f4\u591a\u4ecb\u7ecd\uff0c\u53ef\u53c2\u8003Job\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-job.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                              • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                                                              • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                              • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-job.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                                                              \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u4efb\u52a1\u3002

                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                              2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u4efb\u52a1 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                                                              3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                                                                \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u4efb\u52a1 \u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u4efb\u52a1\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u91cd\u542f\u7b49\u64cd\u4f5c\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-job.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"

                                                              \u5728 \u521b\u5efa\u4efb\u52a1 \u9875\u9762\u4e2d\uff0c\u6839\u636e\u4e0b\u8868\u8f93\u5165\u57fa\u672c\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                              • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                                              • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u4efb\u52a1\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                                              • \u5b9e\u4f8b\u6570\uff1a\u8f93\u5165\u5de5\u4f5c\u8d1f\u8f7d\u7684 Pod \u5b9e\u4f8b\u6570\u91cf\u3002\u9ed8\u8ba4\u521b\u5efa 1 \u4e2a Pod \u5b9e\u4f8b\u3002
                                                              • \u63cf\u8ff0\uff1a\u8f93\u5165\u5de5\u4f5c\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u91cf\u5e94\u4e0d\u8d85\u8fc7 512 \u4e2a\u3002
                                                              "},{"location":"admin/kpanda/workloads/create-job.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                                                              \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                                              \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                                                              \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                                                              \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                                                              • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                                                              • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                                                              • \u955c\u50cf\uff1a
                                                                • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u63a5\u5165\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                                                                • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                                                                • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                                                                • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                                                              • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                                                              • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                                                              • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                                                                • \u6574\u5361\u6a21\u5f0f\uff1a
                                                                  • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                                                                  • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                  • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                                                                  • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                                                                  • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                                                                  • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                                                                  • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                                                                • Mig \u6a21\u5f0f
                                                                  • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                                                                  • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                                                              \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                                                              \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                                                              \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                                                              \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                                                              \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                                                              \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-job.html#_5","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                                                              \u9ad8\u7ea7\u914d\u7f6e\u5305\u62ec\u4efb\u52a1\u8bbe\u7f6e\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u4e24\u90e8\u5206\u3002

                                                              \u4efb\u52a1\u8bbe\u7f6e\u6807\u7b7e\u4e0e\u6ce8\u89e3

                                                              • \u5e76\u884c\u6570\uff1a\u4efb\u52a1\u6267\u884c\u8fc7\u7a0b\u4e2d\u5141\u8bb8\u540c\u65f6\u521b\u5efa\u7684\u6700\u5927 Pod \u6570\uff0c\u5e76\u884c\u6570\u5e94\u4e0d\u5927\u4e8e Pod \u603b\u6570\u3002\u9ed8\u8ba4\u4e3a 1\u3002
                                                              • \u8d85\u65f6\u65f6\u95f4\uff1a\u8d85\u51fa\u8be5\u65f6\u95f4\u65f6\uff0c\u4efb\u52a1\u4f1a\u88ab\u6807\u8bc6\u4e3a\u6267\u884c\u5931\u8d25\uff0c\u4efb\u52a1\u4e0b\u7684\u6240\u6709 Pod \u90fd\u4f1a\u88ab\u5220\u9664\u3002\u4e3a\u7a7a\u65f6\u8868\u793a\u4e0d\u8bbe\u7f6e\u8d85\u65f6\u65f6\u95f4\u3002
                                                              • \u91cd\u542f\u7b56\u7565\uff1a\u8bbe\u7f6e\u5931\u8d25\u65f6\u662f\u5426\u91cd\u542f Pod\u3002

                                                              \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u4f8b Pod \u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-job.html#yaml","title":"YAML \u521b\u5efa","text":"

                                                              \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u4efb\u52a1\u3002

                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                              2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u4efb\u52a1 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                              3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                                                              \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u4efb\u52a1\u7684 YAML \u793a\u4f8b
                                                              kind: Job\napiVersion: batch/v1\nmetadata:\n  name: demo\n  namespace: default\n  uid: a9708239-0358-4aa1-87d3-a092c080836e\n  resourceVersion: '92751876'\n  generation: 1\n  creationTimestamp: '2022-12-26T10:52:22Z'\n  labels:\n    app: demo\n    controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n    job-name: demo\n  annotations:\n    revisions: >-\n      {\"1\":{\"status\":\"running\",\"uid\":\"a9708239-0358-4aa1-87d3-a092c080836e\",\"start-time\":\"2022-12-26T10:52:22Z\",\"completion-time\":\"0001-01-01T00:00:00Z\"}}\nspec:\n  parallelism: 1\n  backoffLimit: 6\n  selector:\n    matchLabels:\n      controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: demo\n        controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n        job-name: demo\n    spec:\n      containers:\n        - name: container-4\n          image: nginx\n          resources:\n            limits:\n              cpu: 250m\n              memory: 512Mi\n            requests:\n              cpu: 250m\n              memory: 512Mi\n          lifecycle: {}\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            privileged: false\n      restartPolicy: Never\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      securityContext: {}\n      schedulerName: default-scheduler\n  completionMode: NonIndexed\n  suspend: false\nstatus:\n  startTime: '2022-12-26T10:52:22Z'\n  active: 1\n
                                                              "},{"location":"admin/kpanda/workloads/create-statefulset.html","title":"\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\uff08StatefulSet\uff09","text":"

                                                              \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\uff08StatefulSet\uff09\u3002

                                                              \u6709\u72b6\u6001\u8d1f\u8f7d\uff08StatefulSet\uff09\u662f Kubernetes \u4e2d\u7684\u4e00\u79cd\u5e38\u89c1\u8d44\u6e90\uff0c\u548c\u65e0\u72b6\u6001\u8d1f\u8f7d\uff08Deployment\uff09\u7c7b\u4f3c\uff0c\u4e3b\u8981\u7528\u4e8e\u7ba1\u7406 Pod \u96c6\u5408\u7684\u90e8\u7f72\u548c\u4f38\u7f29\u3002\u4e8c\u8005\u7684\u4e3b\u8981\u533a\u522b\u5728\u4e8e\uff0cDeployment \u662f\u65e0\u72b6\u6001\u7684\uff0c\u4e0d\u4fdd\u5b58\u6570\u636e\uff0c\u800c StatefulSet \u662f\u6709\u72b6\u6001\u7684\uff0c\u4e3b\u8981\u7528\u4e8e\u7ba1\u7406\u6709\u72b6\u6001\u5e94\u7528\u3002\u6b64\u5916\uff0cStatefulSet \u4e2d\u7684 Pod \u5177\u6709\u6c38\u4e45\u4e0d\u53d8\u7684 ID\uff0c\u4fbf\u4e8e\u5728\u5339\u914d\u5b58\u50a8\u5377\u65f6\u8bc6\u522b\u5bf9\u5e94\u7684 Pod\u3002

                                                              \u901a\u8fc7\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\uff0c\u53ef\u4ee5\u57fa\u4e8e\u76f8\u5e94\u7684\u89d2\u8272\u6743\u9650\u8f7b\u677e\u7ba1\u7406\u591a\u4e91\u591a\u96c6\u7fa4\u4e0a\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5305\u62ec\u5bf9\u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa\u3001\u66f4\u65b0\u3001\u5220\u9664\u3001\u5f39\u6027\u6269\u7f29\u3001\u91cd\u542f\u3001\u7248\u672c\u56de\u9000\u7b49\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-statefulset.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u5728\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                              • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                                                              • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                              • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-statefulset.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                                                              \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u6709\u72b6\u6001\u8d1f\u8f7d\u3002

                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                              2. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u6709\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u53f3\u4e0a\u89d2 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                                                              3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                                                                \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d \u5217\u8868\uff0c\u7b49\u5f85\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u53d8\u4e3a \u8fd0\u884c\u4e2d \u3002\u5982\u679c\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u51fa\u73b0\u5f02\u5e38\uff0c\u8bf7\u67e5\u770b\u5177\u4f53\u5f02\u5e38\u4fe1\u606f\uff0c\u53ef\u53c2\u8003\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u3002

                                                                \u70b9\u51fb\u65b0\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u5217\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u5de5\u4f5c\u8d1f\u8f7d\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u5f39\u6027\u6269\u7f29\u3001\u91cd\u542f\u3001\u7248\u672c\u56de\u9000\u7b49\u64cd\u4f5c\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-statefulset.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"
                                                              • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 deployment-01\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                                              • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u8d1f\u8f7d\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                                              • \u5b9e\u4f8b\u6570\uff1a\u8f93\u5165\u8d1f\u8f7d\u7684 Pod \u5b9e\u4f8b\u6570\u91cf\uff0c\u9ed8\u8ba4\u521b\u5efa 1 \u4e2a Pod \u5b9e\u4f8b\u3002
                                                              • \u63cf\u8ff0\uff1a\u8f93\u5165\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u4e0d\u8d85\u8fc7 512\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-statefulset.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                                                              \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                                              \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                                                              \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                                                              \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                                                              • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                                                              • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                                                              • \u955c\u50cf\uff1a
                                                                • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u63a5\u5165\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                                                                • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                                                                • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                                                                • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                                                              • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                                                              • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                                                              • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                                                                • \u6574\u5361\u6a21\u5f0f\uff1a
                                                                  • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                                                                  • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                  • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                                                                  • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                                                                  • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                                                                  • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                                                                  • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                                                                • Mig \u6a21\u5f0f
                                                                  • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                                                                  • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                                                              \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                                                              \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                                                              \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\u3002\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                                                              \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                                                              \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                                                              \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-statefulset.html#_5","title":"\u670d\u52a1\u914d\u7f6e","text":"

                                                              \u4e3a\u6709\u72b6\u6001\u8d1f\u8f7d\u914d\u7f6e\u670d\u52a1\uff08Service\uff09\uff0c\u4f7f\u6709\u72b6\u6001\u8d1f\u8f7d\u80fd\u591f\u88ab\u5916\u90e8\u8bbf\u95ee\u3002

                                                              1. \u70b9\u51fb \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                                                              2. \u53c2\u8003\u521b\u5efa\u670d\u52a1\uff0c\u914d\u7f6e\u670d\u52a1\u53c2\u6570\u3002

                                                              3. \u70b9\u51fb \u786e\u5b9a \uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                              "},{"location":"admin/kpanda/workloads/create-statefulset.html#_6","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                                                              \u9ad8\u7ea7\u914d\u7f6e\u5305\u62ec\u8d1f\u8f7d\u7684\u7f51\u7edc\u914d\u7f6e\u3001\u5347\u7ea7\u7b56\u7565\u3001\u8c03\u5ea6\u7b56\u7565\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u56db\u90e8\u5206\uff0c\u53ef\u70b9\u51fb\u4e0b\u65b9\u7684\u9875\u7b7e\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                                              \u7f51\u7edc\u914d\u7f6e\u5347\u7ea7\u7b56\u7565\u5bb9\u5668\u7ba1\u7406\u7b56\u7565\u8c03\u5ea6\u7b56\u7565\u6807\u7b7e\u4e0e\u6ce8\u89e3
                                                              • \u5982\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72\u4e86 SpiderPool \u548c Multus \u7ec4\u4ef6\uff0c\u5219\u53ef\u4ee5\u5728\u7f51\u7edc\u914d\u7f6e\u4e2d\u914d\u7f6e\u5bb9\u5668\u7f51\u5361\u3002

                                                              • DNS \u914d\u7f6e\uff1a\u5e94\u7528\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u4f1a\u51fa\u73b0\u5197\u4f59\u7684 DNS \u67e5\u8be2\u3002Kubernetes \u4e3a\u5e94\u7528\u63d0\u4f9b\u4e86\u4e0e DNS \u76f8\u5173\u7684\u914d\u7f6e\u9009\u9879\uff0c\u80fd\u591f\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u6709\u6548\u5730\u51cf\u5c11\u5197\u4f59\u7684 DNS \u67e5\u8be2\uff0c\u63d0\u5347\u4e1a\u52a1\u5e76\u53d1\u91cf\u3002

                                                              • DNS \u7b56\u7565

                                                                • Default\uff1a\u4f7f\u5bb9\u5668\u4f7f\u7528 kubelet \u7684 --resolv-conf \u53c2\u6570\u6307\u5411\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u3002\u8be5\u914d\u7f6e\u53ea\u80fd\u89e3\u6790\u6ce8\u518c\u5230\u4e92\u8054\u7f51\u4e0a\u7684\u5916\u90e8\u57df\u540d\uff0c\u65e0\u6cd5\u89e3\u6790\u96c6\u7fa4\u5185\u90e8\u57df\u540d\uff0c\u4e14\u4e0d\u5b58\u5728\u65e0\u6548\u7684 DNS \u67e5\u8be2\u3002
                                                                • ClusterFirstWithHostNet\uff1a\u5e94\u7528\u5bf9\u63a5\u4e3b\u673a\u7684\u57df\u540d\u6587\u4ef6\u3002
                                                                • ClusterFirst\uff1a\u5e94\u7528\u5bf9\u63a5 Kube-DNS/CoreDNS\u3002
                                                                • None\uff1aKubernetes v1.9\uff08Beta in v1.10\uff09\u4e2d\u5f15\u5165\u7684\u65b0\u9009\u9879\u503c\u3002\u8bbe\u7f6e\u4e3a None \u4e4b\u540e\uff0c\u5fc5\u987b\u8bbe\u7f6e dnsConfig\uff0c\u6b64\u65f6\u5bb9\u5668\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u5c06\u5b8c\u5168\u901a\u8fc7 dnsConfig \u7684\u914d\u7f6e\u6765\u751f\u6210\u3002
                                                              • \u57df\u540d\u670d\u52a1\u5668\uff1a\u586b\u5199\u57df\u540d\u670d\u52a1\u5668\u7684\u5730\u5740\uff0c\u4f8b\u5982 10.6.175.20 \u3002

                                                              • \u641c\u7d22\u57df\uff1a\u57df\u540d\u67e5\u8be2\u65f6\u7684 DNS \u641c\u7d22\u57df\u5217\u8868\u3002\u6307\u5b9a\u540e\uff0c\u63d0\u4f9b\u7684\u641c\u7d22\u57df\u5217\u8868\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 search \u5b57\u6bb5\u4e2d\uff0c\u5e76\u5220\u9664\u91cd\u590d\u7684\u57df\u540d\u3002Kubernetes \u6700\u591a\u5141\u8bb8 6 \u4e2a\u641c\u7d22\u57df\u3002
                                                              • Options\uff1aDNS \u7684\u914d\u7f6e\u9009\u9879\uff0c\u5176\u4e2d\u6bcf\u4e2a\u5bf9\u8c61\u53ef\u4ee5\u5177\u6709 name \u5c5e\u6027\uff08\u5fc5\u9700\uff09\u548c value \u5c5e\u6027\uff08\u53ef\u9009\uff09\u3002\u8be5\u5b57\u6bb5\u4e2d\u7684\u5185\u5bb9\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 options \u5b57\u6bb5\u4e2d\uff0cdnsConfig \u7684 options \u7684\u67d0\u4e9b\u9009\u9879\u5982\u679c\u4e0e\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684\u9009\u9879\u51b2\u7a81\uff0c\u5219\u4f1a\u88ab dnsConfig \u6240\u8986\u76d6\u3002
                                                              • \u4e3b\u673a\u522b\u540d\uff1a\u4e3a\u4e3b\u673a\u8bbe\u7f6e\u7684\u522b\u540d\u3002

                                                              • \u5347\u7ea7\u65b9\u5f0f\uff1a \u6eda\u52a8\u5347\u7ea7 \u6307\u9010\u6b65\u7528\u65b0\u7248\u672c\u7684\u5b9e\u4f8b\u66ff\u6362\u65e7\u7248\u672c\u7684\u5b9e\u4f8b\uff0c\u5347\u7ea7\u7684\u8fc7\u7a0b\u4e2d\uff0c\u4e1a\u52a1\u6d41\u91cf\u4f1a\u540c\u65f6\u8d1f\u8f7d\u5747\u8861\u5206\u5e03\u5230\u65b0\u8001\u7684\u5b9e\u4f8b\u4e0a\uff0c\u56e0\u6b64\u4e1a\u52a1\u4e0d\u4f1a\u4e2d\u65ad\u3002 \u91cd\u5efa\u5347\u7ea7 \u6307\u5148\u5220\u9664\u8001\u7248\u672c\u7684\u8d1f\u8f7d\u5b9e\u4f8b\uff0c\u518d\u5b89\u88c5\u6307\u5b9a\u7684\u65b0\u7248\u672c\uff0c\u5347\u7ea7\u8fc7\u7a0b\u4e2d\u4e1a\u52a1\u4f1a\u4e2d\u65ad\u3002
                                                              • \u6700\u5927\u4fdd\u7559\u7248\u672c\u6570\uff1a\u8bbe\u7f6e\u7248\u672c\u56de\u6eda\u65f6\u4fdd\u7559\u7684\u65e7\u7248\u672c\u6570\u91cf\u3002\u9ed8\u8ba4 10\u3002
                                                              • \u7f29\u5bb9\u65f6\u95f4\u7a97\uff1a\u8d1f\u8f7d\u505c\u6b62\u524d\u547d\u4ee4\u7684\u6267\u884c\u65f6\u95f4\u7a97\uff080-9,999\u79d2\uff09\uff0c\u9ed8\u8ba4 30 \u79d2\u3002

                                                              Kubernetes v1.7 \u53ca\u5176\u4e4b\u540e\u7684\u7248\u672c\u53ef\u4ee5\u901a\u8fc7 .spec.podManagementPolicy \u8bbe\u7f6e Pod \u7684\u7ba1\u7406\u7b56\u7565\uff0c\u652f\u6301\u4ee5\u4e0b\u4e24\u79cd\u65b9\u5f0f\uff1a

                                                              • \u6309\u5e8f\u7b56\u7565\uff08OrderedReady\uff09 \uff1a\u9ed8\u8ba4\u7684 Pod \u7ba1\u7406\u7b56\u7565\uff0c\u8868\u793a\u6309\u987a\u5e8f\u90e8\u7f72 Pod\uff0c\u53ea\u6709\u524d\u4e00\u4e2a Pod \u90e8\u7f72 \u6210\u529f\u5b8c\u6210\u540e\uff0c\u6709\u72b6\u6001\u8d1f\u8f7d\u624d\u4f1a\u5f00\u59cb\u90e8\u7f72\u4e0b\u4e00\u4e2a Pod\u3002\u5220\u9664 Pod \u65f6\u5219\u91c7\u7528\u9006\u5e8f\uff0c\u6700\u540e\u521b\u5efa\u7684\u6700\u5148\u88ab\u5220\u9664\u3002

                                                              • \u5e76\u884c\u7b56\u7565\uff08Parallel\uff09 \uff1a\u5e76\u884c\u521b\u5efa\u6216\u5220\u9664\u5bb9\u5668\uff0c\u548c Deployment \u7c7b\u578b\u7684 Pod \u4e00\u6837\u3002StatefulSet \u63a7\u5236\u5668\u5e76\u884c\u5730\u542f\u52a8\u6216\u7ec8\u6b62\u6240\u6709\u7684\u5bb9\u5668\u3002\u542f\u52a8\u6216\u8005\u7ec8\u6b62\u5176\u4ed6 Pod \u524d\uff0c\u65e0\u9700\u7b49\u5f85 Pod \u8fdb\u5165 Running \u548c ready \u6216\u8005\u5b8c\u5168\u505c\u6b62\u72b6\u6001\u3002 \u8fd9\u4e2a\u9009\u9879\u53ea\u4f1a\u5f71\u54cd\u6269\u7f29\u64cd\u4f5c\u7684\u884c\u4e3a\uff0c\u4e0d\u5f71\u54cd\u66f4\u65b0\u65f6\u7684\u987a\u5e8f\u3002

                                                              • \u5bb9\u5fcd\u65f6\u95f4\uff1a\u8d1f\u8f7d\u5b9e\u4f8b\u6240\u5728\u7684\u8282\u70b9\u4e0d\u53ef\u7528\u65f6\uff0c\u5c06\u8d1f\u8f7d\u5b9e\u4f8b\u91cd\u65b0\u8c03\u5ea6\u5230\u5176\u5b83\u53ef\u7528\u8282\u70b9\u7684\u65f6\u95f4\uff0c\u9ed8\u8ba4\u4e3a 300 \u79d2\u3002
                                                              • \u8282\u70b9\u4eb2\u548c\u6027\uff1a\u6839\u636e\u8282\u70b9\u4e0a\u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002
                                                              • \u5de5\u4f5c\u8d1f\u8f7d\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                                                              • \u5de5\u4f5c\u8d1f\u8f7d\u53cd\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u4e0d\u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                                                              • \u62d3\u6251\u57df\uff1a\u5373 topologyKey\uff0c\u7528\u4e8e\u6307\u5b9a\u53ef\u4ee5\u8c03\u5ea6\u7684\u4e00\u7ec4\u8282\u70b9\u3002\u4f8b\u5982\uff0c kubernetes.io/os \u8868\u793a\u53ea\u8981\u67d0\u4e2a\u64cd\u4f5c\u7cfb\u7edf\u7684\u8282\u70b9\u6ee1\u8db3 labelSelector \u7684\u6761\u4ef6\u5c31\u53ef\u4ee5\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002

                                                              \u5177\u4f53\u8be6\u60c5\u8bf7\u53c2\u8003\u8c03\u5ea6\u7b56\u7565\u3002

                                                              ![\u8c03\u5ea6\u7b56\u7565](../../../images/deploy15_1.png)\n

                                                              \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u548c\u5bb9\u5668\u7ec4\u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                                              "},{"location":"admin/kpanda/workloads/create-statefulset.html#yaml","title":"YAML \u521b\u5efa","text":"

                                                              \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\u3002

                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                              2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u6709\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                              3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                                                              \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\u7684 YAML \u793a\u4f8b
                                                              kind: StatefulSet\napiVersion: apps/v1\nmetadata:\n  name: test-mysql-123-mysql\n  namespace: default\n  uid: d3f45527-a0ab-4b22-9013-5842a06f4e0e\n  resourceVersion: '20504385'\n  generation: 1\n  creationTimestamp: '2022-09-22T09:34:10Z'\n  ownerReferences:\n    - apiVersion: mysql.presslabs.org/v1alpha1\n      kind: MysqlCluster\n      name: test-mysql-123\n      uid: 5e877cc3-5167-49da-904e-820940cf1a6d\n      controller: true\n      blockOwnerDeletion: true\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app.kubernetes.io/managed-by: mysql.presslabs.org\n      app.kubernetes.io/name: mysql\n      mysql.presslabs.org/cluster: test-mysql-123\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app.kubernetes.io/component: database\n        app.kubernetes.io/instance: test-mysql-123\n        app.kubernetes.io/managed-by: mysql.presslabs.org\n        app.kubernetes.io/name: mysql\n        app.kubernetes.io/version: 5.7.31\n        mysql.presslabs.org/cluster: test-mysql-123\n      annotations:\n        config_rev: '13941099'\n        prometheus.io/port: '9125'\n        prometheus.io/scrape: 'true'\n        secret_rev: '13941101'\n    spec:\n      volumes:\n        - name: conf\n          emptyDir: {}\n        - name: init-scripts\n          emptyDir: {}\n        - name: config-map\n          configMap:\n            name: test-mysql-123-mysql\n            defaultMode: 420\n        - name: data\n          persistentVolumeClaim:\n            claimName: data\n      initContainers:\n        - name: init\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - clone-and-init\n          envFrom:\n            - secretRef:\n                name: test-mysql-123-mysql-operated\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: BACKUP_USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: BACKUP_USER\n                  optional: true\n            - name: BACKUP_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: BACKUP_PASSWORD\n                  optional: true\n          resources: {}\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: config-map\n              mountPath: /mnt/conf\n            - name: data\n              mountPath: /var/lib/mysql\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n      containers:\n        - name: mysql\n          image: docker.m.daocloud.io/mysql:5.7.31\n          ports:\n            - name: mysql\n              containerPort: 3306\n              protocol: TCP\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: ORCH_CLUSTER_ALIAS\n              value: test-mysql-123.default\n            - name: ORCH_HTTP_API\n              value: http://mysql-operator.mcamel-system/api\n            - name: MYSQL_ROOT_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: ROOT_PASSWORD\n                  optional: false\n            - name: MYSQL_USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: USER\n                  optional: true\n            - name: MYSQL_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: PASSWORD\n                  optional: true\n            - name: MYSQL_DATABASE\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: DATABASE\n                  optional: true\n          resources:\n            limits:\n              cpu: '1'\n              memory: 1Gi\n            requests:\n              cpu: 100m\n              memory: 512Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: data\n              mountPath: /var/lib/mysql\n          livenessProbe:\n            exec:\n              command:\n                - mysqladmin\n                - '--defaults-file=/etc/mysql/client.conf'\n                - ping\n            initialDelaySeconds: 60\n            timeoutSeconds: 5\n            periodSeconds: 5\n            successThreshold: 1\n            failureThreshold: 3\n          readinessProbe:\n            exec:\n              command:\n                - /bin/sh\n                - '-c'\n                - >-\n                  test $(mysql --defaults-file=/etc/mysql/client.conf -NB -e\n                  'SELECT COUNT(*) FROM sys_operator.status WHERE\n                  name=\"configured\" AND value=\"1\"') -eq 1\n            initialDelaySeconds: 5\n            timeoutSeconds: 5\n            periodSeconds: 2\n            successThreshold: 1\n            failureThreshold: 3\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - bash\n                  - /etc/mysql/pre-shutdown-ha.sh\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: sidecar\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - config-and-serve\n          ports:\n            - name: sidecar-http\n              containerPort: 8080\n              protocol: TCP\n          envFrom:\n            - secretRef:\n                name: test-mysql-123-mysql-operated\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: XTRABACKUP_TARGET_DIR\n              value: /tmp/xtrabackup_backupfiles/\n          resources:\n            limits:\n              cpu: '1'\n              memory: 1Gi\n            requests:\n              cpu: 10m\n              memory: 64Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: data\n              mountPath: /var/lib/mysql\n          readinessProbe:\n            httpGet:\n              path: /health\n              port: 8080\n              scheme: HTTP\n            initialDelaySeconds: 30\n            timeoutSeconds: 5\n            periodSeconds: 5\n            successThreshold: 1\n            failureThreshold: 3\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: metrics-exporter\n          image: prom/mysqld-exporter:v0.13.0\n          args:\n            - '--web.listen-address=0.0.0.0:9125'\n            - '--web.telemetry-path=/metrics'\n            - '--collect.heartbeat'\n            - '--collect.heartbeat.database=sys_operator'\n          ports:\n            - name: prometheus\n              containerPort: 9125\n              protocol: TCP\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: METRICS_EXPORTER_USER\n                  optional: false\n            - name: PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: METRICS_EXPORTER_PASSWORD\n                  optional: false\n            - name: DATA_SOURCE_NAME\n              value: $(USER):$(PASSWORD)@(127.0.0.1:3306)/\n          resources:\n            limits:\n              cpu: 100m\n              memory: 128Mi\n            requests:\n              cpu: 10m\n              memory: 32Mi\n          livenessProbe:\n            httpGet:\n              path: /metrics\n              port: 9125\n              scheme: HTTP\n            initialDelaySeconds: 30\n            timeoutSeconds: 30\n            periodSeconds: 30\n            successThreshold: 1\n            failureThreshold: 3\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: pt-heartbeat\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - pt-heartbeat\n            - '--update'\n            - '--replace'\n            - '--check-read-only'\n            - '--create-table'\n            - '--database'\n            - sys_operator\n            - '--table'\n            - heartbeat\n            - '--utc'\n            - '--defaults-file'\n            - /etc/mysql/heartbeat.conf\n            - '--fail-successive-errors=20'\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n          resources:\n            limits:\n              cpu: 100m\n              memory: 64Mi\n            requests:\n              cpu: 10m\n              memory: 32Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n      restartPolicy: Always\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      securityContext:\n        runAsUser: 999\n        fsGroup: 999\n      affinity:\n        podAntiAffinity:\n          preferredDuringSchedulingIgnoredDuringExecution:\n            - weight: 100\n              podAffinityTerm:\n                labelSelector:\n                  matchLabels:\n                    app.kubernetes.io/component: database\n                    app.kubernetes.io/instance: test-mysql-123\n                    app.kubernetes.io/managed-by: mysql.presslabs.org\n                    app.kubernetes.io/name: mysql\n                    app.kubernetes.io/version: 5.7.31\n                    mysql.presslabs.org/cluster: test-mysql-123\n                topologyKey: kubernetes.io/hostname\n      schedulerName: default-scheduler\n  volumeClaimTemplates:\n    - kind: PersistentVolumeClaim\n      apiVersion: v1\n      metadata:\n        name: data\n        creationTimestamp: null\n        ownerReferences:\n          - apiVersion: mysql.presslabs.org/v1alpha1\n            kind: MysqlCluster\n            name: test-mysql-123\n            uid: 5e877cc3-5167-49da-904e-820940cf1a6d\n            controller: true\n      spec:\n        accessModes:\n          - ReadWriteOnce\n        resources:\n          limits:\n            storage: 1Gi\n          requests:\n            storage: 1Gi\n        storageClassName: local-path\n        volumeMode: Filesystem\n      status:\n        phase: Pending\n  serviceName: mysql\n  podManagementPolicy: OrderedReady\n  updateStrategy:\n    type: RollingUpdate\n    rollingUpdate:\n      partition: 0\n  revisionHistoryLimit: 10\nstatus:\n  observedGeneration: 1\n  replicas: 1\n  readyReplicas: 1\n  currentReplicas: 1\n  updatedReplicas: 1\n  currentRevision: test-mysql-123-mysql-6b8f5577c7\n  updateRevision: test-mysql-123-mysql-6b8f5577c7\n  collisionCount: 0\n  availableReplicas: 1\n
                                                              "},{"location":"admin/kpanda/workloads/pod-config/env-variables.html","title":"\u914d\u7f6e\u73af\u5883\u53d8\u91cf","text":"

                                                              \u73af\u5883\u53d8\u91cf\u662f\u6307\u5bb9\u5668\u8fd0\u884c\u73af\u5883\u4e2d\u8bbe\u5b9a\u7684\u4e00\u4e2a\u53d8\u91cf\uff0c\u7528\u4e8e\u7ed9 Pod \u6dfb\u52a0\u73af\u5883\u6807\u5fd7\u6216\u4f20\u9012\u914d\u7f6e\u7b49\uff0c\u652f\u6301\u901a\u8fc7\u952e\u503c\u5bf9\u7684\u5f62\u5f0f\u4e3a Pod \u914d\u7f6e\u73af\u5883\u53d8\u91cf\u3002

                                                              \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u5728\u539f\u751f Kubernetes \u7684\u57fa\u7840\u4e0a\u589e\u52a0\u4e86\u56fe\u5f62\u5316\u754c\u9762\u4e3a Pod \u914d\u7f6e\u73af\u5883\u53d8\u91cf\uff0c\u652f\u6301\u4ee5\u4e0b\u51e0\u79cd\u914d\u7f6e\u65b9\u5f0f\uff1a

                                                              • \u952e\u503c\u5bf9\uff08Key/Value Pair\uff09\uff1a\u5c06\u81ea\u5b9a\u4e49\u7684\u952e\u503c\u5bf9\u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf
                                                              • \u8d44\u6e90\u5f15\u7528\uff08Resource\uff09\uff1a\u5c06 Container \u5b9a\u4e49\u7684\u5b57\u6bb5\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\uff0c\u4f8b\u5982\u5bb9\u5668\u7684\u5185\u5b58\u9650\u5236\u3001\u526f\u672c\u6570\u7b49
                                                              • \u53d8\u91cf/\u53d8\u91cf\u5f15\u7528\uff08Pod Field\uff09\uff1a\u5c06 Pod \u5b57\u6bb5\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\uff0c\u4f8b\u5982 Pod \u7684\u540d\u79f0
                                                              • \u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165\uff08ConfigMap key\uff09\uff1a\u5bfc\u5165\u914d\u7f6e\u9879\u4e2d\u67d0\u4e2a\u952e\u7684\u503c\u4f5c\u4e3a\u67d0\u4e2a\u73af\u5883\u53d8\u91cf\u7684\u503c
                                                              • \u5bc6\u94a5\u952e\u503c\u5bfc\u5165\uff08Secret Key\uff09\uff1a\u4f7f\u7528\u6765\u81ea Secret \u4e2d\u7684\u6570\u636e\u5b9a\u4e49\u73af\u5883\u53d8\u91cf\u7684\u503c
                                                              • \u5bc6\u94a5\u5bfc\u5165\uff08Secret\uff09\uff1a\u5c06 Secret \u4e2d\u7684\u6240\u6709\u952e\u503c\u90fd\u5bfc\u5165\u4e3a\u73af\u5883\u53d8\u91cf
                                                              • \u914d\u7f6e\u9879\u5bfc\u5165\uff08ConfigMap\uff09\uff1a\u5c06\u914d\u7f6e\u9879\u4e2d\u6240\u6709\u952e\u503c\u90fd\u5bfc\u5165\u4e3a\u73af\u5883\u53d8\u91cf
                                                              "},{"location":"admin/kpanda/workloads/pod-config/health-check.html","title":"\u5bb9\u5668\u7684\u5065\u5eb7\u68c0\u67e5","text":"

                                                              \u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u6839\u636e\u7528\u6237\u9700\u6c42\uff0c\u68c0\u67e5\u5bb9\u5668\u7684\u5065\u5eb7\u72b6\u51b5\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5185\u7684\u5e94\u7528\u7a0b\u5e8f\u5165\u5982\u679c\u5f02\u5e38\uff0c\u5bb9\u5668\u4f1a\u81ea\u52a8\u8fdb\u884c\u91cd\u542f\u6062\u590d\u3002Kubernetes \u63d0\u4f9b\u4e86\u5b58\u6d3b\uff08Liveness\uff09\u68c0\u67e5\u3001\u5c31\u7eea\uff08Readiness\uff09\u68c0\u67e5\u548c\u542f\u52a8\uff08Startup\uff09\u68c0\u67e5\u3002

                                                              • \u5b58\u6d3b\u68c0\u67e5\uff08LivenessProbe\uff09 \u53ef\u63a2\u6d4b\u5230\u5e94\u7528\u6b7b\u9501\uff08\u5e94\u7528\u7a0b\u5e8f\u5728\u8fd0\u884c\uff0c\u4f46\u662f\u65e0\u6cd5\u7ee7\u7eed\u6267\u884c\u540e\u9762\u7684\u6b65\u9aa4\uff09\u60c5\u51b5\u3002 \u91cd\u542f\u8fd9\u79cd\u72b6\u6001\u4e0b\u7684\u5bb9\u5668\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\uff0c\u5373\u4f7f\u5176\u4e2d\u5b58\u5728\u7f3a\u9677\u3002

                                                              • \u5c31\u7eea\u68c0\u67e5\uff08ReadinessProbe\uff09 \u53ef\u63a2\u77e5\u5bb9\u5668\u4f55\u65f6\u51c6\u5907\u597d\u63a5\u53d7\u8bf7\u6c42\u6d41\u91cf\uff0c\u5f53\u4e00\u4e2a Pod \u5185\u7684\u6240\u6709\u5bb9\u5668\u90fd\u5c31\u7eea\u65f6\uff0c\u624d\u80fd\u8ba4\u4e3a\u8be5 Pod \u5c31\u7eea\u3002 \u8fd9\u79cd\u4fe1\u53f7\u7684\u4e00\u4e2a\u7528\u9014\u5c31\u662f\u63a7\u5236\u54ea\u4e2a Pod \u4f5c\u4e3a Service \u7684\u540e\u7aef\u3002 \u82e5 Pod \u5c1a\u672a\u5c31\u7eea\uff0c\u4f1a\u88ab\u4ece Service \u7684\u8d1f\u8f7d\u5747\u8861\u5668\u4e2d\u5254\u9664\u3002

                                                              • \u542f\u52a8\u68c0\u67e5\uff08StartupProbe\uff09 \u53ef\u4ee5\u4e86\u89e3\u5e94\u7528\u5bb9\u5668\u4f55\u65f6\u542f\u52a8\uff0c\u914d\u7f6e\u540e\uff0c\u53ef\u63a7\u5236\u5bb9\u5668\u5728\u542f\u52a8\u6210\u529f\u540e\u518d\u8fdb\u884c\u5b58\u6d3b\u6027\u548c\u5c31\u7eea\u6001\u68c0\u67e5\uff0c \u786e\u4fdd\u8fd9\u4e9b\u5b58\u6d3b\u3001\u5c31\u7eea\u63a2\u6d4b\u5668\u4e0d\u4f1a\u5f71\u54cd\u5e94\u7528\u7684\u542f\u52a8\u3002 \u542f\u52a8\u63a2\u6d4b\u53ef\u4ee5\u7528\u4e8e\u5bf9\u6162\u542f\u52a8\u5bb9\u5668\u8fdb\u884c\u5b58\u6d3b\u6027\u68c0\u6d4b\uff0c\u907f\u514d\u5b83\u4eec\u5728\u542f\u52a8\u8fd0\u884c\u4e4b\u524d\u5c31\u88ab\u6740\u6389\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/health-check.html#_2","title":"\u5b58\u6d3b\u548c\u5c31\u7eea\u68c0\u67e5","text":"

                                                              \u5b58\u6d3b\u68c0\u67e5\uff08LivenessProbe\uff09\u7684\u914d\u7f6e\u548c\u5c31\u7eea\u68c0\u67e5\uff08ReadinessProbe\uff09\u7684\u914d\u7f6e\u53c2\u6570\u76f8\u4f3c\uff0c \u552f\u4e00\u533a\u522b\u662f\u8981\u4f7f\u7528 readinessProbe \u5b57\u6bb5\uff0c\u800c\u4e0d\u662f livenessProbe \u5b57\u6bb5\u3002

                                                              HTTP GET \u53c2\u6570\u8bf4\u660e\uff1a

                                                              \u53c2\u6570 \u53c2\u6570\u8bf4\u660e \u8def\u5f84\uff08 Path\uff09 \u8bbf\u95ee\u7684\u8bf7\u6c42\u8def\u5f84\u3002\u5982\uff1a \u793a\u4f8b\u4e2d\u7684 /healthz \u8def\u5f84 \u7aef\u53e3(Port) \u670d\u52a1\u76d1\u542c\u7aef\u53e3\u3002 \u5982\uff1a \u793a\u4f8b\u4e2d\u7684 8080 \u7aef\u53e3 \u534f\u8bae \u8bbf\u95ee\u534f\u8bae\uff0cHttp \u6216\u8005Https \u5ef6\u8fdf\u65f6\u95f4\uff08initialDelaySeconds\uff09 \u5ef6\u8fdf\u68c0\u67e5\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\uff0c\u6b64\u8bbe\u7f6e\u4e0e\u4e1a\u52a1\u7a0b\u5e8f\u6b63\u5e38\u542f\u52a8\u65f6\u95f4\u76f8\u5173\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a30\uff0c\u8868\u660e\u5bb9\u5668\u542f\u52a8\u540e30\u79d2\u624d\u5f00\u59cb\u5065\u5eb7\u68c0\u67e5\uff0c\u8be5\u65f6\u95f4\u662f\u9884\u7559\u7ed9\u4e1a\u52a1\u7a0b\u5e8f\u542f\u52a8\u7684\u65f6\u95f4\u3002 \u8d85\u65f6\u65f6\u95f4\uff08timeoutSeconds\uff09 \u8d85\u65f6\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a10\uff0c\u8868\u660e\u6267\u884c\u5065\u5eb7\u68c0\u67e5\u7684\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a10\u79d2\uff0c\u5982\u679c\u8d85\u8fc7\u8fd9\u4e2a\u65f6\u95f4\uff0c\u672c\u6b21\u5065\u5eb7\u68c0\u67e5\u5c31\u88ab\u89c6\u4e3a\u5931\u8d25\u3002\u82e5\u8bbe\u7f6e\u4e3a0\u6216\u4e0d\u8bbe\u7f6e\uff0c\u9ed8\u8ba4\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a1\u79d2\u3002 \u8d85\u65f6\u65f6\u95f4\uff08timeoutSeconds\uff09 \u8d85\u65f6\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a10\uff0c\u8868\u660e\u6267\u884c\u5065\u5eb7\u68c0\u67e5\u7684\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a10\u79d2\uff0c\u5982\u679c\u8d85\u8fc7\u8fd9\u4e2a\u65f6\u95f4\uff0c\u672c\u6b21\u5065\u5eb7\u68c0\u67e5\u5c31\u88ab\u89c6\u4e3a\u5931\u8d25\u3002\u82e5\u8bbe\u7f6e\u4e3a0\u6216\u4e0d\u8bbe\u7f6e\uff0c\u9ed8\u8ba4\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a1\u79d2\u3002 \u6210\u529f\u9608\u503c\uff08successThreshold\uff09 \u63a2\u6d4b\u5931\u8d25\u540e\uff0c\u88ab\u89c6\u4e3a\u6210\u529f\u7684\u6700\u5c0f\u8fde\u7eed\u6210\u529f\u6570\u3002\u9ed8\u8ba4\u503c\u662f 1\uff0c\u6700\u5c0f\u503c\u662f 1\u3002\u5b58\u6d3b\u548c\u542f\u52a8\u63a2\u6d4b\u7684\u8fd9\u4e2a\u503c\u5fc5\u987b\u662f 1\u3002 \u6700\u5927\u5931\u8d25\u6b21\u6570\uff08failureThreshold\uff09 \u5f53\u63a2\u6d4b\u5931\u8d25\u65f6\u91cd\u8bd5\u7684\u6b21\u6570\u3002\u5b58\u6d3b\u63a2\u6d4b\u60c5\u51b5\u4e0b\u7684\u653e\u5f03\u5c31\u610f\u5473\u7740\u91cd\u65b0\u542f\u52a8\u5bb9\u5668\u3002\u5c31\u7eea\u63a2\u6d4b\u60c5\u51b5\u4e0b\u7684\u653e\u5f03 Pod \u4f1a\u88ab\u6253\u4e0a\u672a\u5c31\u7eea\u7684\u6807\u7b7e\u3002\u9ed8\u8ba4\u503c\u662f 3\u3002\u6700\u5c0f\u503c\u662f 1\u3002"},{"location":"admin/kpanda/workloads/pod-config/health-check.html#http-get","title":"\u4f7f\u7528 HTTP GET \u8bf7\u6c42\u68c0\u67e5","text":"

                                                              YAML \u793a\u4f8b\uff1a

                                                              apiVersion: v1\nkind: Pod\nmetadata:\n  labels:\n    test: liveness\n  name: liveness-http\nspec:\n  containers:\n  - name: liveness\n    image: k8s.gcr.io/liveness\n    args:\n    - /server\n    livenessProbe:\n      httpGet:\n        path: /healthz  # \u8bbf\u95ee\u7684\u8bf7\u6c42\u8def\u5f84\n        port: 8080  # \u670d\u52a1\u76d1\u542c\u7aef\u53e3\n        httpHeaders:\n        - name: Custom-Header\n          value: Awesome\n      initialDelaySeconds: 3  # kubelet \u5728\u6267\u884c\u7b2c\u4e00\u6b21\u63a2\u6d4b\u524d\u5e94\u8be5\u7b49\u5f85 3 \u79d2\n      periodSeconds: 3   # kubelet \u6bcf\u9694 3 \u79d2\u6267\u884c\u4e00\u6b21\u5b58\u6d3b\u63a2\u6d4b\n

                                                              \u6309\u7167\u8bbe\u5b9a\u7684\u89c4\u5219\uff0ckubelet \u5411\u5bb9\u5668\u5185\u8fd0\u884c\u7684\u670d\u52a1\uff08\u670d\u52a1\u5728\u76d1\u542c 8080 \u7aef\u53e3\uff09\u53d1\u9001\u4e00\u4e2a HTTP GET \u8bf7\u6c42\u6765\u6267\u884c\u63a2\u6d4b\u3002\u5982\u679c\u670d\u52a1\u5668\u4e0a /healthz \u8def\u5f84\u4e0b\u7684\u5904\u7406\u7a0b\u5e8f\u8fd4\u56de\u6210\u529f\u4ee3\u7801\uff0c\u5219 kubelet \u8ba4\u4e3a\u5bb9\u5668\u662f\u5065\u5eb7\u5b58\u6d3b\u7684\u3002 \u5982\u679c\u5904\u7406\u7a0b\u5e8f\u8fd4\u56de\u5931\u8d25\u4ee3\u7801\uff0c\u5219 kubelet \u4f1a\u6740\u6b7b\u8fd9\u4e2a\u5bb9\u5668\u5e76\u5c06\u5176\u91cd\u542f\u3002\u8fd4\u56de\u5927\u4e8e\u6216\u7b49\u4e8e 200 \u5e76\u4e14\u5c0f\u4e8e 400 \u7684\u4efb\u4f55\u4ee3\u7801\u90fd\u6807\u793a\u6210\u529f\uff0c\u5176\u5b83\u8fd4\u56de\u4ee3\u7801\u90fd\u6807\u793a\u5931\u8d25\u3002 \u5bb9\u5668\u5b58\u6d3b\u671f\u95f4\u7684\u6700\u5f00\u59cb 10 \u79d2\u4e2d\uff0c /healthz \u5904\u7406\u7a0b\u5e8f\u8fd4\u56de 200 \u7684\u72b6\u6001\u7801\u3002 \u4e4b\u540e\u5904\u7406\u7a0b\u5e8f\u8fd4\u56de 500 \u7684\u72b6\u6001\u7801\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/health-check.html#tcp","title":"\u4f7f\u7528 TCP \u7aef\u53e3\u68c0\u67e5","text":"

                                                              TCP \u7aef\u53e3\u53c2\u6570\u8bf4\u660e\uff1a

                                                              \u53c2\u6570 \u53c2\u6570\u8bf4\u660e \u7aef\u53e3(Port) \u670d\u52a1\u76d1\u542c\u7aef\u53e3\u3002 \u5982\uff1a \u793a\u4f8b\u4e2d\u7684 8080 \u7aef\u53e3 \u5ef6\u8fdf\u65f6\u95f4\uff08initialDelaySeconds\uff09 \u5ef6\u8fdf\u68c0\u67e5\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\uff0c\u6b64\u8bbe\u7f6e\u4e0e\u4e1a\u52a1\u7a0b\u5e8f\u6b63\u5e38\u542f\u52a8\u65f6\u95f4\u76f8\u5173\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a30\uff0c\u8868\u660e\u5bb9\u5668\u542f\u52a8\u540e30\u79d2\u624d\u5f00\u59cb\u5065\u5eb7\u68c0\u67e5\uff0c\u8be5\u65f6\u95f4\u662f\u9884\u7559\u7ed9\u4e1a\u52a1\u7a0b\u5e8f\u542f\u52a8\u7684\u65f6\u95f4\u3002 \u8d85\u65f6\u65f6\u95f4\uff08timeoutSeconds\uff09 \u8d85\u65f6\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a10\uff0c\u8868\u660e\u6267\u884c\u5065\u5eb7\u68c0\u67e5\u7684\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a10\u79d2\uff0c\u5982\u679c\u8d85\u8fc7\u8fd9\u4e2a\u65f6\u95f4\uff0c\u672c\u6b21\u5065\u5eb7\u68c0\u67e5\u5c31\u88ab\u89c6\u4e3a\u5931\u8d25\u3002\u82e5\u8bbe\u7f6e\u4e3a0\u6216\u4e0d\u8bbe\u7f6e\uff0c\u9ed8\u8ba4\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a1\u79d2\u3002

                                                              \u5bf9\u4e8e\u63d0\u4f9bTCP\u901a\u4fe1\u670d\u52a1\u7684\u5bb9\u5668\uff0c\u57fa\u4e8e\u6b64\u914d\u7f6e\uff0c\u6309\u7167\u8bbe\u5b9a\u89c4\u5219\u96c6\u7fa4\u5bf9\u8be5\u5bb9\u5668\u5efa\u7acbTCP\u8fde\u63a5\uff0c\u5982\u679c\u8fde\u63a5\u6210\u529f\uff0c\u5219\u8bc1\u660e\u63a2\u6d4b\u6210\u529f\uff0c\u5426\u5219\u63a2\u6d4b\u5931\u8d25\u3002\u9009\u62e9TCP\u7aef\u53e3\u63a2\u6d4b\u65b9\u5f0f\uff0c\u5fc5\u987b\u6307\u5b9a\u5bb9\u5668\u76d1\u542c\u7684\u7aef\u53e3\u3002

                                                              YAML \u793a\u4f8b\uff1a

                                                              apiVersion: v1\nkind: Pod\nmetadata:\n  name: goproxy\n  labels:\n    app: goproxy\nspec:\n  containers:\n  - name: goproxy\n    image: k8s.gcr.io/goproxy:0.1\n    ports:\n    - containerPort: 8080\n    readinessProbe:\n      tcpSocket:\n        port: 8080\n      initialDelaySeconds: 5\n      periodSeconds: 10\n    livenessProbe:\n      tcpSocket:\n        port: 8080\n      initialDelaySeconds: 15\n      periodSeconds: 20\n

                                                              \u6b64\u793a\u4f8b\u540c\u65f6\u4f7f\u7528\u5c31\u7eea\u548c\u5b58\u6d3b\u63a2\u9488\u3002kubelet \u5728\u5bb9\u5668\u542f\u52a8 5 \u79d2\u540e\u53d1\u9001\u7b2c\u4e00\u4e2a\u5c31\u7eea\u63a2\u6d4b\u3002 \u5c1d\u8bd5\u8fde\u63a5 goproxy \u5bb9\u5668\u7684 8080 \u7aef\u53e3\uff0c \u5982\u679c\u63a2\u6d4b\u6210\u529f\uff0c\u8fd9\u4e2a Pod \u4f1a\u88ab\u6807\u8bb0\u4e3a\u5c31\u7eea\u72b6\u6001\uff0ckubelet \u5c06\u7ee7\u7eed\u6bcf\u9694 10 \u79d2\u8fd0\u884c\u4e00\u6b21\u68c0\u6d4b\u3002

                                                              \u9664\u4e86\u5c31\u7eea\u63a2\u6d4b\uff0c\u8fd9\u4e2a\u914d\u7f6e\u5305\u62ec\u4e86\u4e00\u4e2a\u5b58\u6d3b\u63a2\u6d4b\u3002 kubelet \u4f1a\u5728\u5bb9\u5668\u542f\u52a8 15 \u79d2\u540e\u8fdb\u884c\u7b2c\u4e00\u6b21\u5b58\u6d3b\u63a2\u6d4b\u3002 \u5c31\u7eea\u63a2\u6d4b\u4f1a\u5c1d\u8bd5\u8fde\u63a5 goproxy \u5bb9\u5668\u7684 8080 \u7aef\u53e3\u3002 \u5982\u679c\u5b58\u6d3b\u63a2\u6d4b\u5931\u8d25\uff0c\u5bb9\u5668\u4f1a\u88ab\u91cd\u65b0\u542f\u52a8\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/health-check.html#_3","title":"\u6267\u884c\u547d\u4ee4\u68c0\u67e5","text":"

                                                              YAML \u793a\u4f8b:

                                                              apiVersion: v1\nkind: Pod\nmetadata:\n  labels:\n    test: liveness\n  name: liveness-exec\nspec:\n  containers:\n  - name: liveness\n    image: k8s.gcr.io/busybox\n    args:\n    - /bin/sh\n    - -c\n    - touch /tmp/healthy; sleep 30; rm -f /tmp/healthy; sleep 600\n    livenessProbe:\n      exec:\n        command:\n        - cat\n        - /tmp/healthy\n      initialDelaySeconds: 5 # kubelet \u5728\u6267\u884c\u7b2c\u4e00\u6b21\u63a2\u6d4b\u524d\u7b49\u5f85 5 \u79d2\n      periodSeconds: 5  #kubelet \u6bcf 5 \u79d2\u6267\u884c\u4e00\u6b21\u5b58\u6d3b\u63a2\u6d4b\n

                                                              periodSeconds \u5b57\u6bb5\u6307\u5b9a\u4e86 kubelet \u6bcf 5 \u79d2\u6267\u884c\u4e00\u6b21\u5b58\u6d3b\u63a2\u6d4b\uff0c initialDelaySeconds \u5b57\u6bb5\u6307\u5b9a kubelet \u5728\u6267\u884c\u7b2c\u4e00\u6b21\u63a2\u6d4b\u524d\u7b49\u5f85 5 \u79d2\u3002\u6309\u7167\u8bbe\u5b9a\u89c4\u5219\uff0c\u96c6\u7fa4\u5468\u671f\u6027\u7684\u901a\u8fc7 kubelet \u5728\u5bb9\u5668\u5185\u6267\u884c\u547d\u4ee4 cat /tmp/healthy \u6765\u8fdb\u884c\u63a2\u6d4b\u3002 \u5982\u679c\u547d\u4ee4\u6267\u884c\u6210\u529f\u5e76\u4e14\u8fd4\u56de\u503c\u4e3a 0\uff0ckubelet \u5c31\u4f1a\u8ba4\u4e3a\u8fd9\u4e2a\u5bb9\u5668\u662f\u5065\u5eb7\u5b58\u6d3b\u7684\u3002 \u5982\u679c\u8fd9\u4e2a\u547d\u4ee4\u8fd4\u56de\u975e 0 \u503c\uff0ckubelet \u4f1a\u6740\u6b7b\u8fd9\u4e2a\u5bb9\u5668\u5e76\u91cd\u65b0\u542f\u52a8\u5b83\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/health-check.html#_4","title":"\u4f7f\u7528\u542f\u52a8\u524d\u68c0\u67e5\u4fdd\u62a4\u6162\u542f\u52a8\u5bb9\u5668","text":"

                                                              \u6709\u4e9b\u5e94\u7528\u5728\u542f\u52a8\u65f6\u9700\u8981\u8f83\u957f\u7684\u521d\u59cb\u5316\u65f6\u95f4\uff0c\u9700\u8981\u4f7f\u7528\u76f8\u540c\u7684\u547d\u4ee4\u6765\u8bbe\u7f6e\u542f\u52a8\u63a2\u6d4b\uff0c\u9488\u5bf9 HTTP \u6216 TCP \u68c0\u6d4b\uff0c\u53ef\u4ee5\u901a\u8fc7\u5c06 failureThreshold * periodSeconds \u53c2\u6570\u8bbe\u7f6e\u4e3a\u8db3\u591f\u957f\u7684\u65f6\u95f4\u6765\u5e94\u5bf9\u542f\u52a8\u9700\u8981\u8f83\u957f\u65f6\u95f4\u7684\u573a\u666f\u3002

                                                              YAML \u793a\u4f8b\uff1a

                                                              ports:\n- name: liveness-port\n  containerPort: 8080\n  hostPort: 8080\n\nlivenessProbe:\n  httpGet:\n    path: /healthz\n    port: liveness-port\n  failureThreshold: 1\n  periodSeconds: 10\n\nstartupProbe:\n  httpGet:\n    path: /healthz\n    port: liveness-port\n  failureThreshold: 30\n  periodSeconds: 10\n

                                                              \u5982\u4e0a\u8bbe\u7f6e\uff0c\u5e94\u7528\u5c06\u6709\u6700\u591a 5 \u5206\u949f\uff0830 * 10 = 300s\uff09\u7684\u65f6\u95f4\u6765\u5b8c\u6210\u542f\u52a8\u8fc7\u7a0b\uff0c \u4e00\u65e6\u542f\u52a8\u63a2\u6d4b\u6210\u529f\uff0c\u5b58\u6d3b\u63a2\u6d4b\u4efb\u52a1\u5c31\u4f1a\u63a5\u7ba1\u5bf9\u5bb9\u5668\u7684\u63a2\u6d4b\uff0c\u5bf9\u5bb9\u5668\u6b7b\u9501\u4f5c\u51fa\u5feb\u901f\u54cd\u5e94\u3002 \u5982\u679c\u542f\u52a8\u63a2\u6d4b\u4e00\u76f4\u6ca1\u6709\u6210\u529f\uff0c\u5bb9\u5668\u4f1a\u5728 300 \u79d2\u540e\u88ab\u6740\u6b7b\uff0c\u5e76\u4e14\u6839\u636e restartPolicy \u6765 \u6267\u884c\u8fdb\u4e00\u6b65\u5904\u7f6e\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/job-parameters.html","title":"\u4efb\u52a1\u53c2\u6570\u8bf4\u660e","text":"

                                                              \u6839\u636e .spec.completions \u548c .spec.Parallelism \u7684\u8bbe\u7f6e\uff0c\u53ef\u4ee5\u5c06\u4efb\u52a1\uff08Job\uff09\u5212\u5206\u4e3a\u4ee5\u4e0b\u51e0\u79cd\u7c7b\u578b:

                                                              Job \u7c7b\u578b \u8bf4\u660e \u975e\u5e76\u884c Job \u521b\u5efa\u4e00\u4e2a Pod \u76f4\u81f3\u5176 Job \u6210\u529f\u7ed3\u675f \u5177\u6709\u786e\u5b9a\u5b8c\u6210\u8ba1\u6570\u7684\u5e76\u884c Job \u5f53\u6210\u529f\u7684 Pod \u4e2a\u6570\u8fbe\u5230 .spec.completions \u65f6\uff0cJob \u88ab\u89c6\u4e3a\u5b8c\u6210 \u5e76\u884c Job \u521b\u5efa\u4e00\u4e2a\u6216\u591a\u4e2a Pod \u76f4\u81f3\u6709\u4e00\u4e2a\u6210\u529f\u7ed3\u675f

                                                              \u53c2\u6570\u8bf4\u660e

                                                              RestartPolicy \u521b\u5efa\u4e00\u4e2a Pod \u76f4\u81f3\u5176\u6210\u529f\u7ed3\u675f .spec.completions \u8868\u793a Job \u7ed3\u675f\u9700\u8981\u6210\u529f\u8fd0\u884c\u7684 Pod \u4e2a\u6570\uff0c\u9ed8\u8ba4\u4e3a 1 .spec.parallelism \u8868\u793a\u5e76\u884c\u8fd0\u884c\u7684 Pod \u7684\u4e2a\u6570\uff0c\u9ed8\u8ba4\u4e3a 1 spec.backoffLimit \u8868\u793a\u5931\u8d25 Pod \u7684\u91cd\u8bd5\u6700\u5927\u6b21\u6570\uff0c\u8d85\u8fc7\u8fd9\u4e2a\u6b21\u6570\u4e0d\u4f1a\u7ee7\u7eed\u91cd\u8bd5\u3002 .spec.activeDeadlineSeconds \u8868\u793a Pod \u8fd0\u884c\u65f6\u95f4\uff0c\u4e00\u65e6\u8fbe\u5230\u8fd9\u4e2a\u65f6\u95f4\uff0cJob \u5373\u5176\u6240\u6709\u7684 Pod \u90fd\u4f1a\u505c\u6b62\u3002\u4e14activeDeadlineSeconds \u4f18\u5148\u7ea7\u9ad8\u4e8e backoffLimit\uff0c\u5373\u5230\u8fbe activeDeadlineSeconds \u7684 Job \u4f1a\u5ffd\u7565backoffLimit \u7684\u8bbe\u7f6e\u3002

                                                              \u4ee5\u4e0b\u662f\u4e00\u4e2a Job \u914d\u7f6e\u793a\u4f8b\uff0c\u4fdd\u5b58\u5728 myjob.yaml \u4e2d\uff0c\u5176\u8ba1\u7b97 \u03c0 \u5230 2000 \u4f4d\u5e76\u6253\u5370\u8f93\u51fa\u3002

                                                              apiVersion: batch/v1\nkind: Job            #\u5f53\u524d\u8d44\u6e90\u7684\u7c7b\u578b\nmetadata:\n  name: myjob\nspec:\n  completions: 50        # Job\u7ed3\u675f\u9700\u8981\u8fd0\u884c50\u4e2aPod\uff0c\u8fd9\u4e2a\u793a\u4f8b\u4e2d\u5c31\u662f\u6253\u5370\u03c0 50\u6b21\n  parallelism: 5        # \u5e76\u884c5\u4e2aPod\n  backoffLimit: 5        # \u6700\u591a\u91cd\u8bd55\u6b21\n  template:\n    spec:\n      containers:\n      - name: pi\n        image: perl\n        command: [\"perl\",  \"-Mbignum=bpi\", \"-wle\", \"print bpi(2000)\"]\n      restartPolicy: Never #\u91cd\u542f\u7b56\u7565\n

                                                              \u76f8\u5173\u547d\u4ee4

                                                              kubectl apply -f myjob.yaml  #\u542f\u52a8 job\nkubectl get job #\u67e5\u770b\u8fd9\u4e2ajob\nkubectl logs myjob-1122dswzs \u67e5\u770bJob Pod \u7684\u65e5\u5fd7\n
                                                              "},{"location":"admin/kpanda/workloads/pod-config/lifecycle.html","title":"\u914d\u7f6e\u5bb9\u5668\u751f\u547d\u5468\u671f","text":"

                                                              Pod \u9075\u5faa\u4e00\u4e2a\u9884\u5b9a\u4e49\u7684\u751f\u547d\u5468\u671f\uff0c\u8d77\u59cb\u4e8e Pending \u9636\u6bb5\uff0c\u5982\u679c Pod \u5185\u81f3\u5c11\u6709\u4e00\u4e2a\u5bb9\u5668\u6b63\u5e38\u542f\u52a8\uff0c\u5219\u8fdb\u5165 Running \u72b6\u6001\u3002\u5982\u679c Pod \u4e2d\u6709\u5bb9\u5668\u4ee5\u5931\u8d25\u72b6\u6001\u7ed3\u675f\uff0c\u5219\u72b6\u6001\u53d8\u4e3a Failed \u3002\u4ee5\u4e0b phase \u5b57\u6bb5\u503c\u8868\u660e\u4e86\u4e00\u4e2a Pod \u5904\u4e8e\u751f\u547d\u5468\u671f\u7684\u54ea\u4e2a\u9636\u6bb5\u3002

                                                              \u503c \u63cf\u8ff0 Pending \uff08\u60ac\u51b3\uff09 Pod \u5df2\u88ab\u7cfb\u7edf\u63a5\u53d7\uff0c\u4f46\u6709\u4e00\u4e2a\u6216\u8005\u591a\u4e2a\u5bb9\u5668\u5c1a\u672a\u521b\u5efa\u4ea6\u672a\u8fd0\u884c\u3002\u8fd9\u4e2a\u9636\u6bb5\u5305\u62ec\u7b49\u5f85 Pod \u88ab\u8c03\u5ea6\u7684\u65f6\u95f4\u548c\u901a\u8fc7\u7f51\u7edc\u4e0b\u8f7d\u955c\u50cf\u7684\u65f6\u95f4\u3002 Running \uff08\u8fd0\u884c\u4e2d\uff09 Pod \u5df2\u7ecf\u7ed1\u5b9a\u5230\u4e86\u67d0\u4e2a\u8282\u70b9\uff0cPod \u4e2d\u7684\u6240\u6709\u5bb9\u5668\u90fd\u5df2\u88ab\u521b\u5efa\u3002\u81f3\u5c11\u6709\u4e00\u4e2a\u5bb9\u5668\u4ecd\u5728\u8fd0\u884c\uff0c\u6216\u8005\u6b63\u5904\u4e8e\u542f\u52a8\u6216\u91cd\u542f\u72b6\u6001\u3002 Succeeded \uff08\u6210\u529f\uff09 Pod \u4e2d\u7684\u6240\u6709\u5bb9\u5668\u90fd\u5df2\u6210\u529f\u7ec8\u6b62\uff0c\u5e76\u4e14\u4e0d\u4f1a\u518d\u91cd\u542f\u3002 Failed \uff08\u5931\u8d25\uff09 Pod \u4e2d\u7684\u6240\u6709\u5bb9\u5668\u90fd\u5df2\u7ec8\u6b62\uff0c\u5e76\u4e14\u81f3\u5c11\u6709\u4e00\u4e2a\u5bb9\u5668\u662f\u56e0\u4e3a\u5931\u8d25\u800c\u7ec8\u6b62\u3002\u4e5f\u5c31\u662f\u8bf4\uff0c\u5bb9\u5668\u4ee5\u975e 0 \u72b6\u6001\u9000\u51fa\u6216\u8005\u88ab\u7cfb\u7edf\u7ec8\u6b62\u3002 Unknown \uff08\u672a\u77e5\uff09 \u56e0\u4e3a\u67d0\u4e9b\u539f\u56e0\u65e0\u6cd5\u53d6\u5f97 Pod \u7684\u72b6\u6001\uff0c\u8fd9\u79cd\u60c5\u51b5\u901a\u5e38\u662f\u56e0\u4e3a\u4e0e Pod \u6240\u5728\u4e3b\u673a\u901a\u4fe1\u5931\u8d25\u6240\u81f4\u3002

                                                              \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u4e2d\u521b\u5efa\u4e00\u4e2a\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u901a\u5e38\u4f7f\u7528\u955c\u50cf\u6765\u6307\u5b9a\u5bb9\u5668\u4e2d\u7684\u8fd0\u884c\u73af\u5883\u3002\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u5728\u6784\u5efa\u955c\u50cf\u65f6\uff0c\u53ef\u4ee5\u901a\u8fc7 Entrypoint \u548c CMD \u4e24\u4e2a\u5b57\u6bb5\u6765\u5b9a\u4e49\u5bb9\u5668\u8fd0\u884c\u65f6\u6267\u884c\u7684\u547d\u4ee4\u548c\u53c2\u6570\u3002\u5982\u679c\u9700\u8981\u66f4\u6539\u5bb9\u5668\u955c\u50cf\u542f\u52a8\u524d\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u7684\u547d\u4ee4\u548c\u53c2\u6570\uff0c\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e\u5bb9\u5668\u7684\u751f\u547d\u5468\u671f\u4e8b\u4ef6\u547d\u4ee4\u548c\u53c2\u6570\uff0c\u6765\u8986\u76d6\u955c\u50cf\u4e2d\u9ed8\u8ba4\u7684\u547d\u4ee4\u548c\u53c2\u6570\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/lifecycle.html#_2","title":"\u751f\u547d\u5468\u671f\u914d\u7f6e","text":"

                                                              \u6839\u636e\u4e1a\u52a1\u9700\u8981\u5bf9\u5bb9\u5668\u7684\u542f\u52a8\u547d\u4ee4\u3001\u542f\u52a8\u540e\u547d\u4ee4\u3001\u505c\u6b62\u524d\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\u3002

                                                              \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u542f\u52a8\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5bb9\u5668\u5c06\u6309\u7167\u542f\u52a8\u547d\u4ee4\u8fdb\u884c\u542f\u52a8\u3002 \u542f\u52a8\u540e\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5bb9\u5668\u542f\u52a8\u540e\u51fa\u53d1\u7684\u547d\u4ee4 \u505c\u6b62\u524d\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5bb9\u5668\u5728\u6536\u5230\u505c\u6b62\u547d\u4ee4\u540e\u6267\u884c\u7684\u547d\u4ee4\u3002\u786e\u4fdd\u5347\u7ea7\u6216\u5b9e\u4f8b\u5220\u9664\u65f6\u53ef\u63d0\u524d\u5c06\u5b9e\u4f8b\u4e2d\u8fd0\u884c\u7684\u4e1a\u52a1\u6392\u6c34\u3002 --"},{"location":"admin/kpanda/workloads/pod-config/lifecycle.html#_3","title":"\u542f\u52a8\u547d\u4ee4","text":"

                                                              \u6839\u636e\u4e0b\u8868\u5bf9\u542f\u52a8\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\u3002

                                                              \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8fd0\u884c\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u53ef\u6267\u884c\u7684\u547d\u4ee4\uff0c\u591a\u4e2a\u547d\u4ee4\u4e4b\u95f4\u7528\u7a7a\u683c\u8fdb\u884c\u5206\u5272\uff0c\u5982\u547d\u4ee4\u672c\u8eab\u5e26\u7a7a\u683c\uff0c\u5219\u9700\u8981\u52a0\uff08\u201c\u201d\uff09\u3002\u3010\u542b\u4e49\u3011\u591a\u547d\u4ee4\u65f6\uff0c\u8fd0\u884c\u547d\u4ee4\u5efa\u8bae\u7528/bin/sh\u6216\u5176\u4ed6\u7684shell\uff0c\u5176\u4ed6\u5168\u90e8\u547d\u4ee4\u4f5c\u4e3a\u53c2\u6570\u6765\u4f20\u5165\u3002 /run/server \u8fd0\u884c\u53c2\u6570 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u63a7\u5236\u5bb9\u5668\u8fd0\u884c\u547d\u4ee4\u53c2\u6570\u3002 port=8080"},{"location":"admin/kpanda/workloads/pod-config/lifecycle.html#_4","title":"\u542f\u52a8\u540e\u547d\u4ee4","text":"

                                                              \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u547d\u4ee4\u884c\u811a\u672c\u548c HTTP \u8bf7\u6c42\u4e24\u79cd\u5904\u7406\u7c7b\u578b\u5bf9\u542f\u52a8\u540e\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\u3002\u60a8\u53ef\u4ee5\u6839\u636e\u4e0b\u8868\u9009\u62e9\u9002\u5408\u60a8\u7684\u914d\u7f6e\u65b9\u5f0f\u3002

                                                              \u547d\u4ee4\u884c\u811a\u672c\u914d\u7f6e

                                                              \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8fd0\u884c\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u53ef\u6267\u884c\u7684\u547d\u4ee4\uff0c\u591a\u4e2a\u547d\u4ee4\u4e4b\u95f4\u7528\u7a7a\u683c\u8fdb\u884c\u5206\u5272\uff0c\u5982\u547d\u4ee4\u672c\u8eab\u5e26\u7a7a\u683c\uff0c\u5219\u9700\u8981\u52a0\uff08\u201c\u201d\uff09\u3002\u3010\u542b\u4e49\u3011\u591a\u547d\u4ee4\u65f6\uff0c\u8fd0\u884c\u547d\u4ee4\u5efa\u8bae\u7528/bin/sh\u6216\u5176\u4ed6\u7684shell\uff0c\u5176\u4ed6\u5168\u90e8\u547d\u4ee4\u4f5c\u4e3a\u53c2\u6570\u6765\u4f20\u5165\u3002 /run/server \u8fd0\u884c\u53c2\u6570 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u63a7\u5236\u5bb9\u5668\u8fd0\u884c\u547d\u4ee4\u53c2\u6570\u3002 port=8080"},{"location":"admin/kpanda/workloads/pod-config/lifecycle.html#_5","title":"\u505c\u6b62\u524d\u547d\u4ee4","text":"

                                                              \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u547d\u4ee4\u884c\u811a\u672c\u548c HTTP \u8bf7\u6c42\u4e24\u79cd\u5904\u7406\u7c7b\u578b\u5bf9\u505c\u6b62\u524d\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\u3002\u60a8\u53ef\u4ee5\u6839\u636e\u4e0b\u8868\u9009\u62e9\u9002\u5408\u60a8\u7684\u914d\u7f6e\u65b9\u5f0f\u3002

                                                              HTTP \u8bf7\u6c42\u914d\u7f6e

                                                              \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c URL \u8def\u5f84 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8bf7\u6c42\u7684URL\u8def\u5f84\u3002\u3010\u542b\u4e49\u3011\u591a\u547d\u4ee4\u65f6\uff0c\u8fd0\u884c\u547d\u4ee4\u5efa\u8bae\u7528/bin/sh\u6216\u5176\u4ed6\u7684shell\uff0c\u5176\u4ed6\u5168\u90e8\u547d\u4ee4\u4f5c\u4e3a\u53c2\u6570\u6765\u4f20\u5165\u3002 /run/server \u7aef\u53e3 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8bf7\u6c42\u7684\u7aef\u53e3\u3002 port=8080 \u8282\u70b9\u5730\u5740 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8bf7\u6c42\u7684 IP \u5730\u5740\uff0c\u9ed8\u8ba4\u662f\u5bb9\u5668\u6240\u5728\u7684\u8282\u70b9 IP\u3002 --"},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html","title":"\u8c03\u5ea6\u7b56\u7565","text":"

                                                              \u5728 Kubernetes \u96c6\u7fa4\u4e2d\uff0c\u8282\u70b9\u4e5f\u6709\u6807\u7b7e\u3002\u60a8\u53ef\u4ee5\u624b\u52a8\u6dfb\u52a0\u6807\u7b7e\u3002 Kubernetes \u4e5f\u4f1a\u4e3a\u96c6\u7fa4\u4e2d\u6240\u6709\u8282\u70b9\u6dfb\u52a0\u4e00\u4e9b\u6807\u51c6\u7684\u6807\u7b7e\u3002\u53c2\u89c1\u5e38\u7528\u7684\u6807\u7b7e\u3001\u6ce8\u89e3\u548c\u6c61\u70b9\u4ee5\u4e86\u89e3\u5e38\u89c1\u7684\u8282\u70b9\u6807\u7b7e\u3002\u901a\u8fc7\u4e3a\u8282\u70b9\u6dfb\u52a0\u6807\u7b7e\uff0c\u60a8\u53ef\u4ee5\u8ba9 Pod \u8c03\u5ea6\u5230\u7279\u5b9a\u8282\u70b9\u6216\u8282\u70b9\u7ec4\u4e0a\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u8fd9\u4e2a\u529f\u80fd\u6765\u786e\u4fdd\u7279\u5b9a\u7684 Pod \u53ea\u80fd\u8fd0\u884c\u5728\u5177\u6709\u4e00\u5b9a\u9694\u79bb\u6027\uff0c\u5b89\u5168\u6027\u6216\u76d1\u7ba1\u5c5e\u6027\u7684\u8282\u70b9\u4e0a\u3002

                                                              nodeSelector \u662f\u8282\u70b9\u9009\u62e9\u7ea6\u675f\u7684\u6700\u7b80\u5355\u63a8\u8350\u5f62\u5f0f\u3002\u60a8\u53ef\u4ee5\u5c06 nodeSelector \u5b57\u6bb5\u6dfb\u52a0\u5230 Pod \u7684\u89c4\u7ea6\u4e2d\u8bbe\u7f6e\u60a8\u5e0c\u671b\u76ee\u6807\u8282\u70b9\u6240\u5177\u6709\u7684\u8282\u70b9\u6807\u7b7e\u3002Kubernetes \u53ea\u4f1a\u5c06 Pod \u8c03\u5ea6\u5230\u62e5\u6709\u6307\u5b9a\u6bcf\u4e2a\u6807\u7b7e\u7684\u8282\u70b9\u4e0a\u3002 nodeSelector \u63d0\u4f9b\u4e86\u4e00\u79cd\u6700\u7b80\u5355\u7684\u65b9\u6cd5\u6765\u5c06 Pod \u7ea6\u675f\u5230\u5177\u6709\u7279\u5b9a\u6807\u7b7e\u7684\u8282\u70b9\u4e0a\u3002\u4eb2\u548c\u6027\u548c\u53cd\u4eb2\u548c\u6027\u6269\u5c55\u4e86\u60a8\u53ef\u4ee5\u5b9a\u4e49\u7684\u7ea6\u675f\u7c7b\u578b\u3002\u4f7f\u7528\u4eb2\u548c\u6027\u4e0e\u53cd\u4eb2\u548c\u6027\u7684\u4e00\u4e9b\u597d\u5904\u6709\uff1a

                                                              • \u4eb2\u548c\u6027\u3001\u53cd\u4eb2\u548c\u6027\u8bed\u8a00\u7684\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002 nodeSelector \u53ea\u80fd\u9009\u62e9\u62e5\u6709\u6240\u6709\u6307\u5b9a\u6807\u7b7e\u7684\u8282\u70b9\u3002\u4eb2\u548c\u6027\u3001\u53cd\u4eb2\u548c\u6027\u4e3a\u60a8\u63d0\u4f9b\u5bf9\u9009\u62e9\u903b\u8f91\u7684\u66f4\u5f3a\u63a7\u5236\u80fd\u529b\u3002

                                                              • \u60a8\u53ef\u4ee5\u6807\u660e\u67d0\u89c4\u5219\u662f\u201c\u8f6f\u9700\u6c42\u201d\u6216\u8005\u201c\u504f\u597d\u201d\uff0c\u8fd9\u6837\u8c03\u5ea6\u5668\u5728\u65e0\u6cd5\u627e\u5230\u5339\u914d\u8282\u70b9\u65f6\uff0c\u4f1a\u5ffd\u7565\u4eb2\u548c\u6027/\u53cd\u4eb2\u548c\u6027\u89c4\u5219\uff0c\u786e\u4fdd Pod \u8c03\u5ea6\u6210\u529f\u3002

                                                              • \u60a8\u53ef\u4ee5\u4f7f\u7528\u8282\u70b9\u4e0a\uff08\u6216\u5176\u4ed6\u62d3\u6251\u57df\u4e2d\uff09\u8fd0\u884c\u7684\u5176\u4ed6 Pod \u7684\u6807\u7b7e\u6765\u5b9e\u65bd\u8c03\u5ea6\u7ea6\u675f\uff0c\u800c\u4e0d\u662f\u53ea\u80fd\u4f7f\u7528\u8282\u70b9\u672c\u8eab\u7684\u6807\u7b7e\u3002\u8fd9\u4e2a\u80fd\u529b\u8ba9\u60a8\u80fd\u591f\u5b9a\u4e49\u89c4\u5219\u5141\u8bb8\u54ea\u4e9b Pod \u53ef\u4ee5\u88ab\u653e\u7f6e\u5728\u4e00\u8d77\u3002

                                                              \u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e\u4eb2\u548c\uff08affinity\uff09\u4e0e\u53cd\u4eb2\u548c\uff08anti-affinity\uff09\u6765\u9009\u62e9 Pod \u8981\u90e8\u7f72\u7684\u8282\u70b9\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_2","title":"\u5bb9\u5fcd\u65f6\u95f4","text":"

                                                              \u5f53\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u4f8b\u6240\u5728\u7684\u8282\u70b9\u4e0d\u53ef\u7528\u65f6\uff0c\u7cfb\u7edf\u5c06\u5b9e\u4f8b\u91cd\u65b0\u8c03\u5ea6\u5230\u5176\u5b83\u53ef\u7528\u8282\u70b9\u7684\u65f6\u95f4\u7a97\u3002\u9ed8\u8ba4\u4e3a 300 \u79d2\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#nodeaffinity","title":"\u8282\u70b9\u4eb2\u548c\u6027\uff08nodeAffinity\uff09","text":"

                                                              \u8282\u70b9\u4eb2\u548c\u6027\u6982\u5ff5\u4e0a\u7c7b\u4f3c\u4e8e nodeSelector \uff0c \u5b83\u4f7f\u60a8\u53ef\u4ee5\u6839\u636e\u8282\u70b9\u4e0a\u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002 \u8282\u70b9\u4eb2\u548c\u6027\u6709\u4e24\u79cd\uff1a

                                                              • \u5fc5\u987b\u6ee1\u8db3\uff1a\uff08 requiredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u53ea\u6709\u5728\u89c4\u5219\u88ab\u6ee1\u8db3\u7684\u65f6\u5019\u624d\u80fd\u6267\u884c\u8c03\u5ea6\u3002\u6b64\u529f\u80fd\u7c7b\u4f3c\u4e8e nodeSelector \uff0c \u4f46\u5176\u8bed\u6cd5\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002\u60a8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002

                                                              • \u5c3d\u91cf\u6ee1\u8db3\uff1a\uff08 preferredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u4f1a\u5c1d\u8bd5\u5bfb\u627e\u6ee1\u8db3\u5bf9\u5e94\u89c4\u5219\u7684\u8282\u70b9\u3002\u5982\u679c\u627e\u4e0d\u5230\u5339\u914d\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u5668\u4ecd\u7136\u4f1a\u8c03\u5ea6\u8be5 Pod\u3002\u60a8\u8fd8\u53ef\u4e3a\u8f6f\u7ea6\u675f\u89c4\u5219\u8bbe\u5b9a\u6743\u91cd\uff0c\u5177\u4f53\u8c03\u5ea6\u65f6\uff0c\u82e5\u5b58\u5728\u591a\u4e2a\u7b26\u5408\u6761\u4ef6\u7684\u8282\u70b9\uff0c\u6743\u91cd\u6700\u5927\u7684\u8282\u70b9\u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002\u540c\u65f6\u60a8\u8fd8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_3","title":"\u6807\u7b7e\u540d","text":"

                                                              \u5bf9\u5e94\u8282\u70b9\u7684\u6807\u7b7e\uff0c\u53ef\u4ee5\u4f7f\u7528\u9ed8\u8ba4\u7684\u6807\u7b7e\u4e5f\u53ef\u4ee5\u7528\u6237\u81ea\u5b9a\u4e49\u6807\u7b7e\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_4","title":"\u64cd\u4f5c\u7b26","text":"
                                                              • In\uff1a\u6807\u7b7e\u503c\u9700\u8981\u5728 values \u7684\u5217\u8868\u4e2d
                                                              • NotIn\uff1a\u6807\u7b7e\u7684\u503c\u4e0d\u5728\u67d0\u4e2a\u5217\u8868\u4e2d
                                                              • Exists\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                                                              • DoesNotExist\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u4e0d\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                                                              • Gt\uff1a\u6807\u7b7e\u7684\u503c\u5927\u4e8e\u67d0\u4e2a\u503c\uff08\u5b57\u7b26\u4e32\u6bd4\u8f83\uff09
                                                              • Lt\uff1a\u6807\u7b7e\u7684\u503c\u5c0f\u4e8e\u67d0\u4e2a\u503c\uff08\u5b57\u7b26\u4e32\u6bd4\u8f83\uff09
                                                              "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_5","title":"\u6743\u91cd","text":"

                                                              \u4ec5\u652f\u6301\u5728\u201c\u5c3d\u91cf\u6ee1\u8db3\u201d\u7b56\u7565\u4e2d\u6dfb\u52a0\uff0c\u53ef\u4ee5\u7406\u89e3\u4e3a\u8c03\u5ea6\u7684\u4f18\u5148\u7ea7\uff0c\u6743\u91cd\u5927\u7684\u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002\u53d6\u503c\u8303\u56f4\u662f 1 \u5230 100\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_6","title":"\u5de5\u4f5c\u8d1f\u8f7d\u4eb2\u548c\u6027","text":"

                                                              \u4e0e\u8282\u70b9\u4eb2\u548c\u6027\u7c7b\u4f3c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u7684\u4eb2\u548c\u6027\u4e5f\u6709\u4e24\u79cd\u7c7b\u578b\uff1a

                                                              • \u5fc5\u987b\u6ee1\u8db3\uff1a\uff08 requiredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u53ea\u6709\u5728\u89c4\u5219\u88ab\u6ee1\u8db3\u7684\u65f6\u5019\u624d\u80fd\u6267\u884c\u8c03\u5ea6\u3002\u6b64\u529f\u80fd\u7c7b\u4f3c\u4e8e nodeSelector \uff0c \u4f46\u5176\u8bed\u6cd5\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002\u60a8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002
                                                              • \u5c3d\u91cf\u6ee1\u8db3\uff1a\uff08 preferredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u4f1a\u5c1d\u8bd5\u5bfb\u627e\u6ee1\u8db3\u5bf9\u5e94\u89c4\u5219\u7684\u8282\u70b9\u3002\u5982\u679c\u627e\u4e0d\u5230\u5339\u914d\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u5668\u4ecd\u7136\u4f1a\u8c03\u5ea6\u8be5 Pod\u3002\u60a8\u8fd8\u53ef\u4e3a\u8f6f\u7ea6\u675f\u89c4\u5219\u8bbe\u5b9a\u6743\u91cd\uff0c\u5177\u4f53\u8c03\u5ea6\u65f6\uff0c\u82e5\u5b58\u5728\u591a\u4e2a\u7b26\u5408\u6761\u4ef6\u7684\u8282\u70b9\uff0c\u6743\u91cd\u6700\u5927\u7684\u8282\u70b9\u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002\u540c\u65f6\u60a8\u8fd8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002

                                                              \u5de5\u4f5c\u8d1f\u8f7d\u7684\u4eb2\u548c\u6027\u4e3b\u8981\u7528\u6765\u51b3\u5b9a\u5de5\u4f5c\u8d1f\u8f7d\u7684 Pod \u53ef\u4ee5\u548c\u54ea\u4e9b Pod\u90e8 \u7f72\u5728\u540c\u4e00\u62d3\u6251\u57df\u3002\u4f8b\u5982\uff0c\u5bf9\u4e8e\u76f8\u4e92\u901a\u4fe1\u7684\u670d\u52a1\uff0c\u53ef\u901a\u8fc7\u5e94\u7528\u4eb2\u548c\u6027\u8c03\u5ea6\uff0c\u5c06\u5176\u90e8\u7f72\u5230\u540c\u4e00\u62d3\u6251\u57df\uff08\u5982\u540c\u4e00\u53ef\u7528\u533a\uff09\u4e2d\uff0c\u51cf\u5c11\u5b83\u4eec\u4e4b\u95f4\u7684\u7f51\u7edc\u5ef6\u8fdf\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_7","title":"\u6807\u7b7e\u540d","text":"

                                                              \u5bf9\u5e94\u8282\u70b9\u7684\u6807\u7b7e\uff0c\u53ef\u4ee5\u4f7f\u7528\u9ed8\u8ba4\u7684\u6807\u7b7e\u4e5f\u53ef\u4ee5\u7528\u6237\u81ea\u5b9a\u4e49\u6807\u7b7e\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_8","title":"\u547d\u540d\u7a7a\u95f4","text":"

                                                              \u6307\u5b9a\u8c03\u5ea6\u7b56\u7565\u751f\u6548\u7684\u547d\u540d\u7a7a\u95f4\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_9","title":"\u64cd\u4f5c\u7b26","text":"
                                                              • In\uff1a\u6807\u7b7e\u503c\u9700\u8981\u5728 values \u7684\u5217\u8868\u4e2d
                                                              • NotIn\uff1a\u6807\u7b7e\u7684\u503c\u4e0d\u5728\u67d0\u4e2a\u5217\u8868\u4e2d
                                                              • Exists\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                                                              • DoesNotExist\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u4e0d\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                                                              "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_10","title":"\u62d3\u6251\u57df","text":"

                                                              \u6307\u5b9a\u8c03\u5ea6\u65f6\u7684\u5f71\u54cd\u8303\u56f4\u3002\u4f8b\u5982\uff0c\u5982\u679c\u6307\u5b9a\u4e3a kubernetes.io/Clustername \u8868\u793a\u4ee5 Node \u8282\u70b9\u4e3a\u533a\u5206\u8303\u56f4\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_11","title":"\u5de5\u4f5c\u8d1f\u8f7d\u53cd\u4eb2\u548c\u6027","text":"

                                                              \u4e0e\u8282\u70b9\u4eb2\u548c\u6027\u7c7b\u4f3c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u7684\u53cd\u4eb2\u548c\u6027\u4e5f\u6709\u4e24\u79cd\u7c7b\u578b\uff1a

                                                              • \u5fc5\u987b\u6ee1\u8db3\uff1a\uff08 requiredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u53ea\u6709\u5728\u89c4\u5219\u88ab\u6ee1\u8db3\u7684\u65f6\u5019\u624d\u80fd\u6267\u884c\u8c03\u5ea6\u3002\u6b64\u529f\u80fd\u7c7b\u4f3c\u4e8e nodeSelector \uff0c \u4f46\u5176\u8bed\u6cd5\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002\u60a8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002
                                                              • \u5c3d\u91cf\u6ee1\u8db3\uff1a\uff08 preferredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u4f1a\u5c1d\u8bd5\u5bfb\u627e\u6ee1\u8db3\u5bf9\u5e94\u89c4\u5219\u7684\u8282\u70b9\u3002\u5982\u679c\u627e\u4e0d\u5230\u5339\u914d\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u5668\u4ecd\u7136\u4f1a\u8c03\u5ea6\u8be5 Pod\u3002\u60a8\u8fd8\u53ef\u4e3a\u8f6f\u7ea6\u675f\u89c4\u5219\u8bbe\u5b9a\u6743\u91cd\uff0c\u5177\u4f53\u8c03\u5ea6\u65f6\uff0c\u82e5\u5b58\u5728\u591a\u4e2a\u7b26\u5408\u6761\u4ef6\u7684\u8282\u70b9\uff0c\u6743\u91cd\u6700\u5927\u7684\u8282\u70b9\u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002\u540c\u65f6\u60a8\u8fd8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002

                                                              \u5de5\u4f5c\u8d1f\u8f7d\u7684\u53cd\u4eb2\u548c\u6027\u4e3b\u8981\u7528\u6765\u51b3\u5b9a\u5de5\u4f5c\u8d1f\u8f7d\u7684 Pod \u4e0d\u53ef\u4ee5\u548c\u54ea\u4e9b Pod \u90e8\u7f72\u5728\u540c\u4e00\u62d3\u6251\u57df\u3002\u4f8b\u5982\uff0c\u5c06\u4e00\u4e2a\u8d1f\u8f7d\u7684\u76f8\u540c Pod \u5206\u6563\u90e8\u7f72\u5230\u4e0d\u540c\u7684\u62d3\u6251\u57df\uff08\u4f8b\u5982\u4e0d\u540c\u4e3b\u673a\uff09\u4e2d\uff0c\u63d0\u9ad8\u8d1f\u8f7d\u672c\u8eab\u7684\u7a33\u5b9a\u6027\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_12","title":"\u6807\u7b7e\u540d","text":"

                                                              \u5bf9\u5e94\u8282\u70b9\u7684\u6807\u7b7e\uff0c\u53ef\u4ee5\u4f7f\u7528\u9ed8\u8ba4\u7684\u6807\u7b7e\u4e5f\u53ef\u4ee5\u7528\u6237\u81ea\u5b9a\u4e49\u6807\u7b7e\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_13","title":"\u547d\u540d\u7a7a\u95f4","text":"

                                                              \u6307\u5b9a\u8c03\u5ea6\u7b56\u7565\u751f\u6548\u7684\u547d\u540d\u7a7a\u95f4\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_14","title":"\u64cd\u4f5c\u7b26","text":"
                                                              • In\uff1a\u6807\u7b7e\u503c\u9700\u8981\u5728 values \u7684\u5217\u8868\u4e2d
                                                              • NotIn\uff1a\u6807\u7b7e\u7684\u503c\u4e0d\u5728\u67d0\u4e2a\u5217\u8868\u4e2d
                                                              • Exists\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                                                              • DoesNotExist\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u4e0d\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                                                              "},{"location":"admin/kpanda/workloads/pod-config/scheduling-policy.html#_15","title":"\u62d3\u6251\u57df","text":"

                                                              \u6307\u5b9a\u8c03\u5ea6\u65f6\u7684\u5f71\u54cd\u8303\u56f4\u3002\u4f8b\u5982\uff0c\u5982\u679c\u6307\u5b9a\u4e3a kubernetes.io/Clustername \u8868\u793a\u4ee5 Node \u8282\u70b9\u4e3a\u533a\u5206\u8303\u56f4\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/workload-status.html","title":"\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001","text":"

                                                              \u5de5\u4f5c\u8d1f\u8f7d\u662f\u8fd0\u884c\u5728 Kubernetes \u4e0a\u7684\u4e00\u4e2a\u5e94\u7528\u7a0b\u5e8f\uff0c\u5728 Kubernetes \u4e2d\uff0c\u65e0\u8bba\u60a8\u7684\u5e94\u7528\u7a0b\u5e8f\u662f\u7531\u5355\u4e2a\u540c\u4e00\u7ec4\u4ef6\u6216\u662f\u7531\u591a\u4e2a\u4e0d\u540c\u7684\u7ec4\u4ef6\u6784\u6210\uff0c\u90fd\u53ef\u4ee5\u4f7f\u7528\u4e00\u7ec4 Pod \u6765\u8fd0\u884c\u5b83\u3002Kubernetes \u63d0\u4f9b\u4e86\u4e94\u79cd\u5185\u7f6e\u7684\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u6765\u7ba1\u7406 Pod\uff1a

                                                              • \u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d
                                                              • \u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d
                                                              • \u5b88\u62a4\u8fdb\u7a0b
                                                              • \u4efb\u52a1
                                                              • \u5b9a\u65f6\u4efb\u52a1

                                                              \u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e\u81ea\u5b9a\u4e49\u8d44\u6e90 CRD \u6765\u5b9e\u73b0\u5bf9\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u7684\u6269\u5c55\u3002\u5728\u7b2c\u4e94\u4ee3\u5bb9\u5668\u7ba1\u7406\u4e2d\uff0c\u652f\u6301\u5bf9\u5de5\u4f5c\u8d1f\u8f7d\u8fdb\u884c\u521b\u5efa\u3001\u66f4\u65b0\u3001\u6269\u5bb9\u3001\u76d1\u63a7\u3001\u65e5\u5fd7\u3001\u5220\u9664\u3001\u7248\u672c\u7ba1\u7406\u7b49\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/workload-status.html#pod","title":"Pod \u72b6\u6001","text":"

                                                              Pod \u662f Kuberneters \u4e2d\u521b\u5efa\u548c\u7ba1\u7406\u7684\u3001\u6700\u5c0f\u7684\u8ba1\u7b97\u5355\u5143\uff0c\u5373\u4e00\u7ec4\u5bb9\u5668\u7684\u96c6\u5408\u3002\u8fd9\u4e9b\u5bb9\u5668\u5171\u4eab\u5b58\u50a8\u3001\u7f51\u7edc\u4ee5\u53ca\u7ba1\u7406\u63a7\u5236\u5bb9\u5668\u8fd0\u884c\u65b9\u5f0f\u7684\u7b56\u7565\u3002 Pod \u901a\u5e38\u4e0d\u7531\u7528\u6237\u76f4\u63a5\u521b\u5efa\uff0c\u800c\u662f\u901a\u8fc7\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u6765\u521b\u5efa\u3002 Pod \u9075\u5faa\u4e00\u4e2a\u9884\u5b9a\u4e49\u7684\u751f\u547d\u5468\u671f\uff0c\u8d77\u59cb\u4e8e Pending \u9636\u6bb5\uff0c\u5982\u679c\u81f3\u5c11\u5176\u4e2d\u6709\u4e00\u4e2a\u4e3b\u8981\u5bb9\u5668\u6b63\u5e38\u542f\u52a8\uff0c\u5219\u8fdb\u5165 Running \uff0c\u4e4b\u540e\u53d6\u51b3\u4e8e Pod \u4e2d\u662f\u5426\u6709\u5bb9\u5668\u4ee5\u5931\u8d25\u72b6\u6001\u7ed3\u675f\u800c\u8fdb\u5165 Succeeded \u6216\u8005 Failed \u9636\u6bb5\u3002

                                                              "},{"location":"admin/kpanda/workloads/pod-config/workload-status.html#_2","title":"\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001","text":"

                                                              \u7b2c\u4e94\u4ee3\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4f9d\u636e Pod \u7684\u72b6\u6001\u3001\u526f\u672c\u6570\u7b49\u56e0\u7d20\uff0c\u8bbe\u8ba1\u4e86\u4e00\u79cd\u5185\u7f6e\u7684\u5de5\u4f5c\u8d1f\u8f7d\u751f\u547d\u5468\u671f\u7684\u72b6\u6001\u96c6\uff0c\u4ee5\u8ba9\u7528\u6237\u80fd\u591f\u66f4\u52a0\u771f\u5b9e\u7684\u611f\u77e5\u5de5\u4f5c\u8d1f\u8f7d\u8fd0\u884c\u60c5\u51b5\u3002 \u7531\u4e8e\u4e0d\u540c\u7684\u5de5\u4f5c\u8d1f\u8f7d\u7c7b\u578b\uff08\u6bd4\u5982\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u548c\u4efb\u52a1\uff09\u5bf9 Pod \u7684\u7ba1\u7406\u673a\u5236\u4e0d\u4e00\u81f4\uff0c\u56e0\u6b64\uff0c\u4e0d\u540c\u7684\u5de5\u4f5c\u8d1f\u8f7d\u5728\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u4f1a\u5448\u73b0\u4e0d\u540c\u7684\u751f\u547d\u5468\u671f\u72b6\u6001\uff0c\u5177\u4f53\u5982\u4e0b\u8868\uff1a

                                                              "},{"location":"admin/kpanda/workloads/pod-config/workload-status.html#_3","title":"\u65e0\u72b6\u6001\u8d1f\u8f7d\u3001\u6709\u72b6\u6001\u8d1f\u8f7d\u3001\u5b88\u62a4\u8fdb\u7a0b\u72b6\u6001","text":"\u72b6\u6001 \u63cf\u8ff0 \u7b49\u5f85\u4e2d 1. \u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u6b63\u5728\u8fdb\u884c\u4e2d\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u30022. \u89e6\u53d1\u5347\u7ea7\u6216\u8005\u56de\u6eda\u52a8\u4f5c\u540e\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u30023. \u89e6\u53d1\u6682\u505c/\u6269\u7f29\u5bb9\u7b49\u64cd\u4f5c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u5728\u6b64\u72b6\u6001\u3002 \u8fd0\u884c\u4e2d \u8d1f\u8f7d\u4e0b\u7684\u6240\u6709\u5b9e\u4f8b\u90fd\u5728\u8fd0\u884c\u4e2d\u4e14\u526f\u672c\u6570\u4e0e\u7528\u6237\u9884\u5b9a\u4e49\u7684\u6570\u91cf\u4e00\u81f4\u65f6\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5220\u9664\u4e2d \u6267\u884c\u5220\u9664\u64cd\u4f5c\u65f6\uff0c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\uff0c\u76f4\u5230\u5220\u9664\u5b8c\u6210\u3002 \u5f02\u5e38 \u56e0\u4e3a\u67d0\u4e9b\u539f\u56e0\u65e0\u6cd5\u53d6\u5f97\u5de5\u4f5c\u8d1f\u8f7d\u7684\u72b6\u6001\u3002\u8fd9\u79cd\u60c5\u51b5\u901a\u5e38\u662f\u56e0\u4e3a\u4e0e Pod \u6240\u5728\u4e3b\u673a\u901a\u4fe1\u5931\u8d25\u3002 \u672a\u5c31\u7eea \u5bb9\u5668\u5904\u4e8e\u5f02\u5e38\uff0cpending \u72b6\u6001\u65f6\uff0c\u56e0\u672a\u77e5\u9519\u8bef\u5bfc\u81f4\u8d1f\u8f7d\u65e0\u6cd5\u542f\u52a8\u65f6\u663e\u793a\u6b64\u72b6\u6001"},{"location":"admin/kpanda/workloads/pod-config/workload-status.html#_4","title":"\u4efb\u52a1\u72b6\u6001","text":"\u72b6\u6001 \u63cf\u8ff0 \u7b49\u5f85\u4e2d \u4efb\u52a1\u521b\u5efa\u6b63\u5728\u8fdb\u884c\u4e2d\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u6267\u884c\u4e2d \u4efb\u52a1\u6b63\u5728\u6267\u884c\u4e2d\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u6267\u884c\u5b8c\u6210 \u4efb\u52a1\u6267\u884c\u5b8c\u6210\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5220\u9664\u4e2d \u89e6\u53d1\u5220\u9664\u64cd\u4f5c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u5728\u6b64\u72b6\u6001\u3002 \u5f02\u5e38 \u56e0\u4e3a\u67d0\u4e9b\u539f\u56e0\u65e0\u6cd5\u53d6\u5f97 Pod \u7684\u72b6\u6001\u3002\u8fd9\u79cd\u60c5\u51b5\u901a\u5e38\u662f\u56e0\u4e3a\u4e0e Pod \u6240\u5728\u4e3b\u673a\u901a\u4fe1\u5931\u8d25\u3002"},{"location":"admin/kpanda/workloads/pod-config/workload-status.html#_5","title":"\u5b9a\u65f6\u4efb\u52a1\u72b6\u6001","text":"\u72b6\u6001 \u63cf\u8ff0 \u7b49\u5f85\u4e2d \u5b9a\u65f6\u4efb\u52a1\u521b\u5efa\u6b63\u5728\u8fdb\u884c\u4e2d\uff0c\u5b9a\u65f6\u4efb\u52a1\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5df2\u542f\u52a8 \u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\u6210\u529f\u540e\uff0c\u6b63\u5e38\u8fd0\u884c\u6216\u5c06\u5df2\u6682\u505c\u7684\u4efb\u52a1\u542f\u52a8\u65f6\u5b9a\u65f6\u4efb\u52a1\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5df2\u505c\u6b62 \u6267\u884c\u505c\u6b62\u4efb\u52a1\u64cd\u4f5c\u65f6\uff0c\u5b9a\u65f6\u4efb\u52a1\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5220\u9664\u4e2d \u89e6\u53d1\u5220\u9664\u64cd\u4f5c\uff0c\u5b9a\u65f6\u4efb\u52a1\u5904\u5728\u6b64\u72b6\u6001\u3002

                                                              \u5f53\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u5f02\u5e38\u6216\u672a\u5c31\u7eea\u72b6\u6001\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u5c06\u9f20\u6807\u79fb\u52a8\u5230\u8d1f\u8f7d\u7684\u72b6\u6001\u503c\u4e0a\uff0c\u7cfb\u7edf\u5c06\u901a\u8fc7\u63d0\u793a\u6846\u5c55\u793a\u66f4\u52a0\u8be6\u7ec6\u7684\u9519\u8bef\u4fe1\u606f\u3002\u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u67e5\u770b\u65e5\u5fd7\u6216\u4e8b\u4ef6\u6765\u83b7\u53d6\u5de5\u4f5c\u8d1f\u8f7d\u7684\u76f8\u5173\u8fd0\u884c\u4fe1\u606f\u3002

                                                              "},{"location":"admin/register/index.html","title":"\u7528\u6237\u6ce8\u518c","text":"

                                                              \u65b0\u7528\u6237\u9996\u6b21\u4f7f\u7528 AI \u7b97\u529b\u5e73\u53f0\u9700\u8981\u8fdb\u884c\u6ce8\u518c\u3002

                                                              "},{"location":"admin/register/index.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                              • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                                              • \u5df2\u5f00\u542f\u90ae\u7bb1\u6ce8\u518c\u529f\u80fd
                                                              • \u6709\u4e00\u4e2a\u53ef\u7528\u7684\u90ae\u7bb1
                                                              "},{"location":"admin/register/index.html#_3","title":"\u90ae\u7bb1\u6ce8\u518c\u6b65\u9aa4","text":"
                                                              1. \u6253\u5f00 AI \u7b97\u529b\u5e73\u53f0\u9996\u9875 https://ai.isuanova.com/\uff0c\u70b9\u51fb \u6ce8\u518c

                                                              2. \u952e\u5165\u7528\u6237\u540d\u3001\u5bc6\u7801\u3001\u90ae\u7bb1\u540e\u70b9\u51fb \u6ce8\u518c

                                                              3. \u7cfb\u7edf\u63d0\u793a\u53d1\u9001\u4e86\u4e00\u5c01\u90ae\u4ef6\u5230\u60a8\u7684\u90ae\u7bb1\u3002

                                                              4. \u767b\u5f55\u81ea\u5df1\u7684\u90ae\u7bb1\uff0c\u627e\u5230\u90ae\u4ef6\uff0c\u70b9\u51fb\u94fe\u63a5\u3002

                                                              5. \u606d\u559c\uff0c\u60a8\u6210\u529f\u8fdb\u5165\u4e86 AI \u7b97\u529b\u5e73\u53f0\uff0c\u73b0\u5728\u53ef\u4ee5\u5f00\u59cb\u60a8\u7684 AI \u4e4b\u65c5\u4e86\u3002

                                                              \u4e0b\u4e00\u6b65\uff1a\u4e3a\u7528\u6237\u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4

                                                              "},{"location":"admin/register/bindws.html","title":"\u4e3a\u7528\u6237\u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4","text":"

                                                              \u7528\u6237\u6210\u529f\u6ce8\u518c\u4e4b\u540e\uff0c\u9700\u8981\u4e3a\u5176\u7ed1\u5b9a\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u3002

                                                              "},{"location":"admin/register/bindws.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                              • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                                              • \u7528\u6237\u5df2\u6210\u529f\u6ce8\u518c
                                                              • \u6709\u4e00\u4e2a\u53ef\u7528\u7684\u7ba1\u7406\u5458\u8d26\u53f7
                                                              "},{"location":"admin/register/bindws.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u4ee5\u7ba1\u7406\u5458\u8eab\u4efd\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                                                              2. \u5bfc\u822a\u5207\u6362\u81f3 \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \uff0c\u70b9\u51fb \u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4

                                                              3. \u8f93\u5165\u540d\u79f0\uff0c\u9009\u62e9\u6587\u4ef6\u5939\u540e\u70b9\u51fb \u786e\u5b9a \uff0c\u521b\u5efa\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4

                                                              4. \u7ed9\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u8d44\u6e90

                                                                \u53ef\u4ee5\u5728\u8fd9\u4e2a\u754c\u9762\u4e0a\u70b9\u51fb \u521b\u5efa\u96c6\u7fa4-\u547d\u540d\u7a7a\u95f4 \u6765\u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u3002

                                                              5. \u6dfb\u52a0\u6388\u6743\uff1a\u5c06\u7528\u6237\u5206\u914d\u81f3\u5de5\u4f5c\u7a7a\u95f4

                                                              6. \u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u67e5\u770b\u662f\u5426\u5177\u6709\u5de5\u4f5c\u7a7a\u95f4\u53ca\u547d\u540d\u7a7a\u95f4\u7684\u6743\u9650\u3002 \u7ba1\u7406\u5458\u53ef\u4ee5\u901a\u8fc7\u53f3\u4fa7\u7684 \u2507 \u6267\u884c\u66f4\u591a\u64cd\u4f5c\u3002

                                                              \u4e0b\u4e00\u6b65\uff1a\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u8d44\u6e90

                                                              "},{"location":"admin/register/wsres.html","title":"\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u8d44\u6e90","text":"

                                                              \u5c06\u7528\u6237\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4\u540e\uff0c\u9700\u8981\u7ed9\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u5408\u9002\u7684\u8d44\u6e90\u3002

                                                              "},{"location":"admin/register/wsres.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                              • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                                              • \u6709\u4e00\u4e2a\u53ef\u7528\u7684\u7ba1\u7406\u5458\u8d26\u53f7
                                                              • \u5de5\u4f5c\u7a7a\u95f4\u5df2\u521b\u5efa\u4e14\u7ed1\u5b9a\u4e86\u547d\u540d\u7a7a\u95f4
                                                              "},{"location":"admin/register/wsres.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u4ee5\u7ba1\u7406\u5458\u8eab\u4efd\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                                                              2. \u5bfc\u822a\u5230 \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7\uff0c\u627e\u5230\u8981\u6dfb\u52a0\u8d44\u6e90\u7684\u5de5\u4f5c\u7a7a\u95f4\uff0c\u70b9\u51fb \u65b0\u589e\u5171\u4eab\u8d44\u6e90

                                                              3. \u9009\u62e9\u96c6\u7fa4\uff0c\u8bbe\u7f6e\u5408\u9002\u7684\u8d44\u6e90\u914d\u989d\u540e\uff0c\u70b9\u51fb \u786e\u5b9a

                                                              4. \u8fd4\u56de\u5171\u4eab\u8d44\u6e90\u9875\uff0c\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u6210\u529f\u5206\u914d\u4e86\u8d44\u6e90\uff0c\u7ba1\u7406\u5458\u53ef\u4ee5\u901a\u8fc7\u53f3\u4fa7\u7684 \u2507 \u968f\u65f6\u4fee\u6539\u3002

                                                              \u4e0b\u4e00\u6b65\uff1a\u521b\u5efa\u4e91\u4e3b\u673a

                                                              "},{"location":"admin/security/index.html","title":"\u4e91\u539f\u751f\u5b89\u5168","text":"

                                                              \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9488\u5bf9\u5bb9\u5668\u3001Pod\u3001\u955c\u50cf\u3001\u8fd0\u884c\u65f6\u3001\u5fae\u670d\u52a1\u63d0\u4f9b\u4e86\u5168\u9762\u81ea\u52a8\u5316\u7684\u5b89\u5168\u5b9e\u73b0\u3002 \u4e0b\u8868\u5217\u51fa\u4e86\u4e00\u4e9b\u5df2\u5b9e\u73b0\u6216\u6b63\u5728\u5b9e\u73b0\u4e2d\u7684\u5b89\u5168\u7279\u6027\u3002

                                                              \u5b89\u5168\u7279\u6027 \u7ec6\u76ee \u63cf\u8ff0 \u955c\u50cf\u5b89\u5168 \u53ef\u4fe1\u955c\u50cf\u5206\u53d1 \u4e3a\u5b9e\u73b0\u955c\u50cf\u7684\u5b89\u5168\u4f20\u8f93\uff0c\u9700\u5177\u5907\u5bc6\u94a5\u5bf9\u548c\u7b7e\u540d\u4fe1\u606f\uff0c\u4fdd\u8bc1\u4f20\u8f93\u5b89\u5168\u3002\u5728\u4f20\u8f93\u955c\u50cf\u65f6\u5177\u5907\u9009\u62e9\u5bc6\u94a5\u8fdb\u884c\u955c\u50cf\u7b7e\u540d\u80fd\u529b\u3002 \u8fd0\u884c\u65f6\u5b89\u5168 \u4e8b\u4ef6\u5173\u8054\u5206\u6790 \u652f\u6301\u5bf9\u8fd0\u884c\u65f6\u68c0\u6d4b\u51fa\u7684\u5b89\u5168\u4e8b\u4ef6\u505a\u5173\u8054\u4e0e\u98ce\u9669\u5206\u6790\uff0c\u589e\u52a0\u653b\u51fb\u6eaf\u6e90\u80fd\u529b\uff0c\u6536\u655b\u544a\u8b66\uff0c\u964d\u4f4e\u65e0\u6548\u544a\u8b66\uff0c\u63d0\u9ad8\u4e8b\u4ef6\u54cd\u5e94\u6548\u7387\u3002 - \u5bb9\u5668\u8bf1\u9975\u4ed3\u5e93 \u5177\u5907\u5bb9\u5668\u8bf1\u9975\u4ed3\u5e93\uff0c\u5e38\u89c1\u8bf1\u9975\u5305\u62ec\u4f46\u4e0d\u9650\u4e8e\uff1a\u672a\u6388\u6743\u8bbf\u95ee\u6f0f\u6d1e\u3001\u4ee3\u7801\u6267\u884c\u6f0f\u6d1e\u3001\u672c\u5730\u6587\u4ef6\u8bfb\u53d6\u6f0f\u6d1e\u3001\u8fdc\u7a0b\u547d\u4ee4\u6267\u884c RCE \u6f0f\u6d1e\u7b49\u5bb9\u5668\u8bf1\u9975\u3002 - \u5bb9\u5668\u8bf1\u9975\u90e8\u7f72 \u652f\u6301\u81ea\u5b9a\u4e49\u65b0\u589e\u8bf1\u9975\u5bb9\u5668\uff0c\u53ef\u4ee5\u81ea\u5b9a\u4e49\u670d\u52a1\u540d\u79f0\u3001\u670d\u52a1\u4f4d\u7f6e\u7b49\u3002 - \u5bb9\u5668\u8bf1\u9975\u544a\u8b66 \u652f\u6301\u5bf9\u5bb9\u5668\u8bf1\u9975\u4e2d\u53ef\u7591\u884c\u4e3a\u8fdb\u884c\u544a\u8b66\u3002 - \u504f\u79fb\u68c0\u6d4b \u5728\u626b\u63cf\u955c\u50cf\u540c\u65f6\uff0c\u5b66\u4e60\u955c\u50cf\u4e2d\u5168\u90e8\u4e8c\u8fdb\u5236\u6587\u4ef6\u4fe1\u606f\uff0c\u5e76\u5f62\u6210\u201c\u767d\u540d\u5355\u201d\uff0c\u5bb9\u5668\u4e0a\u7ebf\u540e\u4ec5\u5141\u8bb8\u201c\u767d\u540d\u5355\u201d\u4e2d\u4e8c\u8fdb\u5236\u6587\u4ef6\u8fd0\u884c\uff0c\u786e\u4fdd\u5bb9\u5668\u5185\u4e0d\u80fd\u8fd0\u884c\u4e0d\u6388\u4fe1\uff08\u5982\u975e\u6cd5\u4e0b\u8f7d\uff09\u7684\u53ef\u6267\u884c\u6587\u4ef6\u3002 \u5fae\u9694\u79bb \u9694\u79bb\u7b56\u7565\u667a\u80fd\u63a8\u8350 \u652f\u6301\u8bb0\u5f55\u8d44\u6e90\u5386\u53f2\u8bbf\u95ee\u6d41\u91cf\uff0c\u5e76\u5728\u5bf9\u8d44\u6e90\u8fdb\u884c\u9694\u79bb\u7b56\u7565\u914d\u7f6e\u65f6\u80fd\u591f\u667a\u80fd\u4f9d\u636e\u5386\u53f2\u8bbf\u95ee\u6d41\u91cf\u8fdb\u884c\u7b56\u7565\u63a8\u8350\u3002 - \u79df\u6237\u9694\u79bb \u652f\u6301\u5bf9 Kubernetes \u96c6\u7fa4\u5185\u79df\u6237\u8fdb\u884c\u9694\u79bb\u63a7\u5236\uff0c\u5177\u5907\u5bf9\u4e0d\u540c\u7684\u79df\u6237\u8bbe\u7f6e\u4e0d\u540c\u7684\u7f51\u7edc\u5b89\u5168\u7ec4\u7684\u80fd\u529b\uff0c\u652f\u6301\u79df\u6237\u7ea7\u522b\u7684\u5b89\u5168\u7b56\u7565\u8bbe\u7f6e\u529f\u80fd\uff0c\u901a\u8fc7\u4e0d\u540c\u5b89\u5168\u7ec4\u548c\u8bbe\u7f6e\u7684\u5b89\u5168\u7b56\u7565\u5b9e\u73b0\u79df\u6237\u95f4\u7f51\u7edc\u7684\u8bbf\u95ee\u4e0e\u9694\u79bb\u3002 \u5fae\u670d\u52a1\u5b89\u5168 \u670d\u52a1\u53ca API \u5b89\u5168\u626b\u63cf \u5bf9\u96c6\u7fa4\u5185\u7684\u670d\u52a1\u53ca API \u652f\u6301\u81ea\u52a8\u626b\u63cf\u3001\u624b\u52a8\u626b\u63cf\u53ca\u5468\u671f\u6027\u626b\u63cf\u7684\u5b89\u5168\u626b\u63cf\u65b9\u5f0f\uff0c\u652f\u6301\u5168\u90e8\u7684\u4f20\u7edf web \u626b\u63cf\u9879\u76ee\u5305\u62ec XSS \u6f0f\u6d1e\u3001SQL \u6ce8\u5165\u3001\u547d\u4ee4/\u4ee3\u7801\u6ce8\u5165\u3001\u76ee\u5f55\u679a\u4e3e\u3001\u8def\u5f84\u7a7f\u8d8a\u3001XML \u5b9e\u4f53\u6ce8\u5165\u3001poc\u3001\u6587\u4ef6\u4e0a\u4f20\u3001\u5f31\u53e3\u4ee4\u3001jsonp\u3001ssrf\u3001\u4efb\u610f\u8df3\u8f6c\u3001CRLF \u6ce8\u5165\u7b49\u98ce\u9669\uff0c\u4ee5\u53ca\u5bb9\u5668\u73af\u5883\u7279\u6709\u7684\u9879\uff0c\u9488\u5bf9\u53d1\u73b0\u7684\u6f0f\u6d1e\u652f\u6301\u6f0f\u6d1e\u7c7b\u578b\u5c55\u793a\u3001url \u5c55\u793a\u3001\u53c2\u6570\u5c55\u793a\u3001\u5371\u9669\u7ea7\u522b\u5c55\u793a\u3001\u6d4b\u8bd5\u65b9\u6cd5\u5c55\u793a\u7b49\u3002"},{"location":"admin/security/falco-exporter.html","title":"Falco-exporter","text":"

                                                              Falco-exporter \u662f\u4e00\u4e2a Falco \u8f93\u51fa\u4e8b\u4ef6\u7684 Prometheus Metrics \u5bfc\u51fa\u5668\u3002

                                                              Falco-exporter \u4f1a\u90e8\u7f72\u4e3a Kubernetes \u96c6\u7fa4\u4e0a\u7684\u5b88\u62a4\u8fdb\u7a0b\u96c6\u3002\u5982\u679c\u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5\u5e76\u8fd0\u884c Prometheus\uff0cPrometheus \u5c06\u81ea\u52a8\u53d1\u73b0 Falco-exporter \u63d0\u4f9b\u7684\u6307\u6807\u3002

                                                              "},{"location":"admin/security/falco-exporter.html#falco-exporter_1","title":"\u5b89\u88c5 Falco-exporter","text":"

                                                              \u672c\u9875\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5 Falco-exporter \u7ec4\u4ef6\u3002

                                                              Note

                                                              \u5728\u5b89\u88c5\u4f7f\u7528 Falco-exporter \u4e4b\u524d\uff0c\u9700\u8981\u5b89\u88c5\u5e76\u8fd0\u884c Falco\uff0c\u5e76\u542f\u7528 gRPC \u8f93\u51fa\uff08\u9ed8\u8ba4\u901a\u8fc7 Unix \u5957\u63a5\u5b57\u542f\u7528\uff09\u3002 \u5173\u4e8e\u542f\u7528 gRPC \u8f93\u51fa\u7684\u66f4\u591a\u4fe1\u606f\uff0c\u53ef\u53c2\u9605\u5728 Falco Helm Chart \u4e2d\u542f\u7528 gRPC \u8f93\u51fa\u3002

                                                              \u8bf7\u786e\u8ba4\u60a8\u7684\u96c6\u7fa4\u5df2\u6210\u529f\u63a5\u5165\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u7136\u540e\u6267\u884c\u4ee5\u4e0b\u6b65\u9aa4\u5b89\u88c5 Falco-exporter\u3002

                                                              1. \u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb\u5bb9\u5668\u7ba1\u7406\u2014>\u96c6\u7fa4\u5217\u8868\uff0c\u7136\u540e\u627e\u5230\u51c6\u5907\u5b89\u88c5 Falco-exporter \u7684\u96c6\u7fa4\u540d\u79f0\u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u9009\u62e9 Helm \u5e94\u7528 -> Helm \u6a21\u677f\uff0c\u627e\u5230\u5e76\u70b9\u51fb falco-exporter\u3002

                                                              3. \u5728\u7248\u672c\u9009\u62e9\u4e2d\u9009\u62e9\u5e0c\u671b\u5b89\u88c5\u7684\u7248\u672c\uff0c\u70b9\u51fb\u5b89\u88c5\u3002

                                                              4. \u5728\u5b89\u88c5\u754c\u9762\uff0c\u586b\u5199\u6240\u9700\u7684\u5b89\u88c5\u53c2\u6570\u3002

                                                                \u5728\u5982\u4e0a\u754c\u9762\u4e2d\uff0c\u586b\u5199\u5e94\u7528\u540d\u79f0\u3001\u547d\u540d\u7a7a\u95f4\u3001\u7248\u672c\u7b49\u3002

                                                                \u5728\u5982\u4e0a\u754c\u9762\u4e2d\uff0c\u586b\u5199\u4ee5\u4e0b\u53c2\u6570:

                                                                • Falco Prometheus Exporter -> Image Settings -> Registry\uff1a\u8bbe\u7f6e falco-exporter \u955c\u50cf\u7684\u4ed3\u5e93\u5730\u5740\uff0c\u5df2\u7ecf\u9ed8\u8ba4\u586b\u5199\u53ef\u7528\u7684\u5728\u7ebf\u4ed3\u5e93\u3002\u5982\u679c\u662f\u79c1\u6709\u5316\u73af\u5883\uff0c\u53ef\u4fee\u6539\u4e3a\u79c1\u6709\u4ed3\u5e93\u5730\u5740\u3002
                                                                • Falco Prometheus Exporter -> Prometheus ServiceMonitor Settings -> Repository\uff1a\u8bbe\u7f6e falco-exporter \u955c\u50cf\u540d\u3002
                                                                • Falco Prometheus Exporter -> Prometheus ServiceMonitor Settings -> Install ServiceMonitor\uff1a\u5b89\u88c5 Prometheus Operator \u670d\u52a1\u76d1\u89c6\u5668\uff0c\u9ed8\u8ba4\u5f00\u542f\u3002
                                                                • Falco Prometheus Exporter -> Prometheus ServiceMonitor Settings -> Scrape Interval\uff1a\u7528\u6237\u81ea\u5b9a\u4e49\u7684\u95f4\u9694\uff1b\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528 Prometheus \u9ed8\u8ba4\u95f4\u9694\u3002
                                                                • Falco Prometheus Exporter -> Prometheus ServiceMonitor Settings -> Scrape Timeout\uff1a\u7528\u6237\u81ea\u5b9a\u4e49\u7684\u6293\u53d6\u8d85\u65f6\u65f6\u95f4\uff1b\u5982\u679c\u672a\u6307\u5b9a\uff0c\u5219\u4f7f\u7528 Prometheus \u9ed8\u8ba4\u7684\u6293\u53d6\u8d85\u65f6\u65f6\u95f4\u3002

                                                                \u5728\u5982\u4e0a\u754c\u9762\u4e2d\uff0c\u586b\u5199\u4ee5\u4e0b\u53c2\u6570:

                                                                • Falco Prometheus Exporter -> Prometheus prometheusRules -> Install prometheusRules\uff1a\u521b\u5efa PrometheusRules\uff0c\u5bf9\u4f18\u5148\u4e8b\u4ef6\u53d1\u51fa\u8b66\u62a5\uff0c\u9ed8\u8ba4\u5f00\u542f\u3002
                                                                • Falco Prometheus Exporter -> Prometheus prometheusRules -> Alerts settings\uff1a\u8b66\u62a5\u8bbe\u7f6e\uff0c\u4e3a\u4e0d\u540c\u7ea7\u522b\u7684\u65e5\u5fd7\u4e8b\u4ef6\u8bbe\u7f6e\u8b66\u62a5\u662f\u5426\u542f\u7528\u3001\u8b66\u62a5\u7684\u95f4\u9694\u65f6\u95f4\u3001\u8b66\u62a5\u7684\u9608\u503c\u3002
                                                              5. \u70b9\u51fb\u53f3\u4e0b\u89d2\u786e\u5b9a\u6309\u94ae\u5373\u53ef\u5b8c\u6210\u5b89\u88c5\u3002

                                                              "},{"location":"admin/security/falco-install.html","title":"\u5b89\u88c5 Falco","text":"

                                                              \u8bf7\u786e\u8ba4\u60a8\u7684\u96c6\u7fa4\u5df2\u6210\u529f\u63a5\u5165\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u7136\u540e\u6267\u884c\u4ee5\u4e0b\u6b65\u9aa4\u5b89\u88c5 Falco\u3002

                                                              1. \u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb\u5bb9\u5668\u7ba1\u7406\u2014>\u96c6\u7fa4\u5217\u8868\uff0c\u7136\u540e\u627e\u5230\u51c6\u5907\u5b89\u88c5 Falco \u7684\u96c6\u7fa4\u540d\u79f0\u3002

                                                              2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u9009\u62e9 Helm \u5e94\u7528 -> Helm \u6a21\u677f\uff0c\u627e\u5230\u5e76\u70b9\u51fb Falco\u3002

                                                              3. \u5728\u7248\u672c\u9009\u62e9\u4e2d\u9009\u62e9\u5e0c\u671b\u5b89\u88c5\u7684\u7248\u672c\uff0c\u70b9\u51fb\u5b89\u88c5\u3002

                                                              4. \u5728\u5b89\u88c5\u754c\u9762\uff0c\u586b\u5199\u6240\u9700\u7684\u5b89\u88c5\u53c2\u6570\u3002

                                                                \u5728\u5982\u4e0a\u754c\u9762\u4e2d\uff0c\u586b\u5199\u5e94\u7528\u540d\u79f0\u3001\u547d\u540d\u7a7a\u95f4\u3001\u7248\u672c\u7b49\u3002

                                                                \u5728\u5982\u4e0a\u754c\u9762\u4e2d\uff0c\u586b\u5199\u4ee5\u4e0b\u53c2\u6570\uff1a

                                                                • Falco -> Image Settings -> Registry\uff1a\u8bbe\u7f6e Falco \u955c\u50cf\u7684\u4ed3\u5e93\u5730\u5740\uff0c\u5df2\u7ecf\u9ed8\u8ba4\u586b\u5199\u53ef\u7528\u7684\u5728\u7ebf\u4ed3\u5e93\u3002\u5982\u679c\u662f\u79c1\u6709\u5316\u73af\u5883\uff0c\u53ef\u4fee\u6539\u4e3a\u79c1\u6709\u4ed3\u5e93\u5730\u5740\u3002

                                                                • Falco -> Image Settings -> Repository\uff1a\u8bbe\u7f6e Falco \u955c\u50cf\u540d\u3002

                                                                • Falco -> Falco Driver -> Image Settings -> Registry\uff1a\u8bbe\u7f6e Falco Driver \u955c\u50cf\u7684\u4ed3\u5e93\u5730\u5740\uff0c\u5df2\u7ecf\u9ed8\u8ba4\u586b\u5199\u53ef\u7528\u7684\u5728\u7ebf\u4ed3\u5e93\u3002\u5982\u679c\u662f\u79c1\u6709\u5316\u73af\u5883\uff0c\u53ef\u4fee\u6539\u4e3a\u79c1\u6709\u4ed3\u5e93\u5730\u5740\u3002

                                                                • Falco -> Falco Driver -> Image Settings -> Repository\uff1a\u8bbe\u7f6e Falco Driver \u955c\u50cf\u540d\u3002

                                                                • Falco -> Falco Driver -> Image Settings -> Driver Kind\uff1a\u8bbe\u7f6e Driver Kind\uff0c\u63d0\u4f9b\u4ee5\u4e0b\u4e24\u79cd\u9009\u62e9\uff1a

                                                                  1. ebpf\uff1a\u4f7f\u7528 ebpf \u6765\u68c0\u6d4b\u4e8b\u4ef6\uff0c\u8fd9\u9700\u8981 Linux \u5185\u6838\u652f\u6301 ebpf\uff0c\u5e76\u542f\u7528 CONFIG_BPF_JIT \u548c sysctl net.core.bpf_jit_enable=1\u3002

                                                                  2. module\uff1a\u4f7f\u7528\u5185\u6838\u6a21\u5757\u68c0\u6d4b\uff0c\u652f\u6301\u6709\u9650\u7684\u64cd\u4f5c\u7cfb\u7edf\u7248\u672c\uff0c\u53c2\u8003 module \u652f\u6301\u7cfb\u7edf\u7248\u672c\u3002

                                                                • Falco -> Falco Driver -> Image Settings -> Log Level\uff1a\u8981\u5305\u542b\u5728\u65e5\u5fd7\u4e2d\u7684\u6700\u5c0f\u65e5\u5fd7\u7ea7\u522b\u3002

                                                                  \u53ef\u9009\u62e9\u503c\u4e3a\uff1aemergency, alert, critical, error, warning, notice, info, debug\u3002

                                                                • Falco -> Falco Driver -> Image Settings -> Registry\uff1a\u8bbe\u7f6e Falco Driver \u955c\u50cf\u7684\u4ed3\u5e93\u5730\u5740\uff0c\u5df2\u7ecf\u9ed8\u8ba4\u586b\u5199\u53ef\u7528\u7684\u5728\u7ebf\u4ed3\u5e93\u3002\u5982\u679c\u662f\u79c1\u6709\u5316\u73af\u5883\uff0c\u53ef\u4fee\u6539\u4e3a\u79c1\u6709\u4ed3\u5e93\u5730\u5740\u3002

                                                                • Falco -> Falco Driver -> Image Settings -> Repository\uff1a\u8bbe\u7f6e Falco Driver \u955c\u50cf\u540d\u3002

                                                                • Falco -> Falco Driver -> Image Settings -> Driver Kind\uff1a\u8bbe\u7f6e Driver Kind\uff0c\u63d0\u4f9b\u4ee5\u4e0b\u4e24\u79cd\u9009\u62e9\uff1a

                                                                  1. ebpf\uff1a\u4f7f\u7528 ebpf \u6765\u68c0\u6d4b\u4e8b\u4ef6\uff0c\u8fd9\u9700\u8981 Linux \u5185\u6838\u652f\u6301 ebpf\uff0c\u5e76\u542f\u7528 CONFIG_BPF_JIT \u548c sysctl net.core.bpf_jit_enable=1\u3002
                                                                  2. module\uff1a\u4f7f\u7528\u5185\u6838\u6a21\u5757\u68c0\u6d4b\uff0c\u652f\u6301\u6709\u9650\u7684\u64cd\u4f5c\u7cfb\u7edf\u7248\u672c\uff0c\u53c2\u8003 module \u652f\u6301\u7cfb\u7edf\u7248\u672c\u3002
                                                                • Falco -> Falco Driver -> Image Settings -> Log Level\uff1a\u8981\u5305\u542b\u5728\u65e5\u5fd7\u4e2d\u7684\u6700\u5c0f\u65e5\u5fd7\u7ea7\u522b\u3002

                                                                  \u53ef\u9009\u62e9\u503c\u4e3a\uff1aemergency\u3001alert\u3001critical\u3001error\u3001warning\u3001notice\u3001info\u3001debug\u3002

                                                              5. \u70b9\u51fb\u53f3\u4e0b\u89d2\u786e\u5b9a\u6309\u94ae\u5373\u53ef\u5b8c\u6210\u5b89\u88c5\u3002

                                                              "},{"location":"admin/security/falco.html","title":"\u4ec0\u4e48\u662f Falco","text":"

                                                              Falco \u662f\u4e00\u4e2a\u4e91\u539f\u751f\u8fd0\u884c\u65f6\u5b89\u5168\u5de5\u5177\uff0c\u65e8\u5728\u68c0\u6d4b\u5e94\u7528\u7a0b\u5e8f\u4e2d\u7684\u5f02\u5e38\u6d3b\u52a8\uff0c\u53ef\u7528\u4e8e\u76d1\u63a7 Kubernetes \u5e94\u7528\u7a0b\u5e8f\u548c\u5185\u90e8\u7ec4\u4ef6\u7684\u8fd0\u884c\u65f6\u5b89\u5168\u6027\u3002\u4ec5\u9700\u4e3a Falco \u64b0\u5199\u4e00\u5957\u89c4\u5219\uff0c\u5373\u53ef\u6301\u7eed\u76d1\u6d4b\u5e76\u76d1\u63a7\u5bb9\u5668\u3001\u5e94\u7528\u3001\u4e3b\u673a\u53ca\u7f51\u7edc\u7684\u5f02\u5e38\u6d3b\u52a8\u3002

                                                              "},{"location":"admin/security/falco.html#falco_1","title":"Falco \u80fd\u68c0\u6d4b\u5230\u4ec0\u4e48\uff1f","text":"

                                                              Falco \u53ef\u5bf9\u4efb\u4f55\u6d89\u53ca Linux \u7cfb\u7edf\u8c03\u7528\u7684\u884c\u4e3a\u8fdb\u884c\u68c0\u6d4b\u548c\u62a5\u8b66\u3002Falco \u7684\u8b66\u62a5\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u7279\u5b9a\u7684\u7cfb\u7edf\u8c03\u7528\u3001\u53c2\u6570\u4ee5\u53ca\u8c03\u7528\u8fdb\u7a0b\u7684\u5c5e\u6027\u6765\u89e6\u53d1\u3002\u4f8b\u5982\uff0cFalco \u53ef\u4ee5\u8f7b\u677e\u68c0\u6d4b\u5230\u5305\u62ec\u4f46\u4e0d\u9650\u4e8e\u4ee5\u4e0b\u4e8b\u4ef6\uff1a

                                                              • Kubernetes \u4e2d\u7684\u5bb9\u5668\u6216 pod \u5185\u6b63\u5728\u8fd0\u884c\u4e00\u4e2a shell \u3002
                                                              • \u5bb9\u5668\u4ee5\u7279\u6743\u6a21\u5f0f\u8fd0\u884c\uff0c\u6216\u4ece\u4e3b\u673a\u6302\u8f7d\u654f\u611f\u8def\u5f84\uff0c\u5982 /proc\u3002
                                                              • \u4e00\u4e2a\u670d\u52a1\u5668\u8fdb\u7a0b\u6b63\u5728\u751f\u6210\u4e00\u4e2a\u610f\u5916\u7c7b\u578b\u7684\u5b50\u8fdb\u7a0b\u3002
                                                              • \u610f\u5916\u8bfb\u53d6\u4e00\u4e2a\u654f\u611f\u6587\u4ef6\uff0c\u5982 /etc/shadow\u3002
                                                              • \u4e00\u4e2a\u975e\u8bbe\u5907\u6587\u4ef6\u88ab\u5199\u5230 /dev\u3002
                                                              • \u4e00\u4e2a\u6807\u51c6\u7684\u7cfb\u7edf\u4e8c\u8fdb\u5236\u6587\u4ef6\uff0c\u5982 ls\uff0c\u6b63\u5728\u8fdb\u884c\u4e00\u4e2a\u5916\u5411\u7684\u7f51\u7edc\u8fde\u63a5\u3002
                                                              • \u5728 Kubernetes \u96c6\u7fa4\u4e2d\u542f\u52a8\u4e00\u4e2a\u6709\u7279\u6743\u7684 Pod\u3002

                                                              \u5173\u4e8e Falco \u9644\u5e26\u7684\u66f4\u591a\u9ed8\u8ba4\u89c4\u5219\uff0c\u8bf7\u53c2\u8003 Rules \u6587\u6863\u3002

                                                              "},{"location":"admin/security/falco.html#falco_2","title":"\u4ec0\u4e48\u662f Falco \u89c4\u5219\uff1f","text":"

                                                              Falco \u89c4\u5219\u5b9a\u4e49 Falco \u5e94\u76d1\u89c6\u7684\u884c\u4e3a\u53ca\u4e8b\u4ef6\uff1b\u53ef\u4ee5\u5728 Falco \u89c4\u5219\u6587\u4ef6\u6216\u901a\u7528\u914d\u7f6e\u6587\u4ef6\u64b0\u5199\u89c4\u5219\u3002\u6709\u5173\u7f16\u5199\u3001\u7ba1\u7406\u548c\u90e8\u7f72\u89c4\u5219\u7684\u66f4\u591a\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605 Falco Rules\u3002

                                                              "},{"location":"admin/security/falco.html#falco_3","title":"\u4ec0\u4e48\u662f Falco \u8b66\u62a5\uff1f","text":"

                                                              \u8b66\u62a5\u662f\u53ef\u914d\u7f6e\u7684\u4e0b\u6e38\u64cd\u4f5c\uff0c\u53ef\u4ee5\u50cf\u8bb0\u5f55\u65e5\u5fd7\u4e00\u6837\u7b80\u5355\uff0c\u4e5f\u53ef\u4ee5\u50cf STDOUT \u5411\u5ba2\u6237\u7aef\u4f20\u9012 gRPC \u8c03\u7528\u4e00\u6837\u590d\u6742\u3002\u6709\u5173\u914d\u7f6e\u3001\u7406\u89e3\u548c\u5f00\u53d1\u8b66\u62a5\u7684\u66f4\u591a\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605Falco \u8b66\u62a5\u3002Falco \u53ef\u4ee5\u5c06\u8b66\u62a5\u53d1\u9001\u81f3\uff1a

                                                              • \u6807\u51c6\u8f93\u51fa
                                                              • \u4e00\u4efd\u6587\u4ef6
                                                              • \u7cfb\u7edf\u65e5\u5fd7
                                                              • \u751f\u6210\u7684\u7a0b\u5e8f
                                                              • \u4e00\u4e2a HTTP[s] \u7aef\u70b9
                                                              • \u901a\u8fc7 gRPC API \u7684\u5ba2\u6237\u7aef
                                                              "},{"location":"admin/security/falco.html#falco_4","title":"Falco \u7531\u54ea\u4e9b\u90e8\u5206\u7ec4\u6210\uff1f","text":"

                                                              Falco \u7531\u4ee5\u4e0b\u51e0\u4e2a\u4e3b\u8981\u7ec4\u4ef6\u7ec4\u6210\uff1a

                                                              • \u7528\u6237\u7a7a\u95f4\u7a0b\u5e8f\uff1aCLI \u5de5\u5177\uff0c\u53ef\u7528\u4e8e\u4e0e Falco \u4ea4\u4e92\u3002\u7528\u6237\u7a7a\u95f4\u7a0b\u5e8f\u5904\u7406\u4fe1\u53f7\uff0c\u89e3\u6790\u6765\u81ea Falco \u9a71\u52a8\u7684\u4fe1\u606f\uff0c\u5e76\u53d1\u9001\u8b66\u62a5\u3002

                                                              • \u914d\u7f6e\uff1a\u5b9a\u4e49 Falco \u7684\u8fd0\u884c\u65b9\u5f0f\u3001\u8981\u65ad\u8a00\u7684\u89c4\u5219\u4ee5\u53ca\u5982\u4f55\u6267\u884c\u8b66\u62a5\u3002\u6709\u5173\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605\u914d\u7f6e\u3002

                                                              • Driver\uff1a\u4e00\u6b3e\u9075\u5faa Falco \u9a71\u52a8\u89c4\u8303\u5e76\u53d1\u9001\u7cfb\u7edf\u8c03\u7528\u4fe1\u606f\u6d41\u7684\u8f6f\u4ef6\u3002\u5982\u679c\u4e0d\u5b89\u88c5\u9a71\u52a8\u7a0b\u5e8f\uff0c\u5c06\u65e0\u6cd5\u8fd0\u884c Falco\u3002\u76ee\u524d\uff0cFalco \u652f\u6301\u4ee5\u4e0b\u9a71\u52a8\u7a0b\u5e8f\uff1a

                                                                • \u57fa\u4e8e C++ \u5e93\u6784\u5efa libscap \u7684\u5185\u6838\u6a21\u5757 libsinsp\uff08\u9ed8\u8ba4\uff09
                                                                • \u7531\u76f8\u540c\u6a21\u5757\u6784\u5efa\u7684 BPF \u63a2\u9488
                                                                • \u7528\u6237\u7a7a\u95f4\u68c0\u6d4b

                                                                  \u6709\u5173\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605 Falco \u9a71\u52a8\u7a0b\u5e8f\u3002

                                                              • \u63d2\u4ef6\uff1a\u53ef\u7528\u4e8e\u6269\u5c55 falco libraries/falco \u53ef\u6267\u884c\u6587\u4ef6\u7684\u529f\u80fd\uff0c\u6269\u5c55\u65b9\u5f0f\u662f\u901a\u8fc7\u6dfb\u52a0\u65b0\u7684\u4e8b\u4ef6\u6e90\u548c\u4ece\u4e8b\u4ef6\u4e2d\u63d0\u53d6\u4fe1\u606f\u7684\u65b0\u5b57\u6bb5\u3002 \u6709\u5173\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605\u63d2\u4ef6\u3002

                                                              "},{"location":"admin/share/infer.html","title":"\u521b\u5efa\u63a8\u7406\u670d\u52a1","text":""},{"location":"admin/share/job.html","title":"\u521b\u5efa\u8bad\u7ec3\u4efb\u52a1","text":""},{"location":"admin/share/notebook.html","title":"\u4f7f\u7528 Notebook","text":"

                                                              Notebook \u901a\u5e38\u6307\u7684\u662f Jupyter Notebook \u6216\u7c7b\u4f3c\u7684\u4ea4\u4e92\u5f0f\u8ba1\u7b97\u73af\u5883\u3002 \u8fd9\u662f\u4e00\u79cd\u975e\u5e38\u6d41\u884c\u7684\u5de5\u5177\uff0c\u5e7f\u6cdb\u7528\u4e8e\u6570\u636e\u79d1\u5b66\u3001\u673a\u5668\u5b66\u4e60\u548c\u6df1\u5ea6\u5b66\u4e60\u7b49\u9886\u57df\u3002 \u672c\u9875\u8bf4\u660e\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528 Notebook\u3002

                                                              "},{"location":"admin/share/notebook.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                              • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                                              • \u7528\u6237\u5df2\u6210\u529f\u6ce8\u518c
                                                              • \u7ba1\u7406\u5458\u4e3a\u7528\u6237\u5206\u914d\u4e86\u5de5\u4f5c\u7a7a\u95f4
                                                              • \u5df2\u51c6\u5907\u597d\u6570\u636e\u96c6\uff08\u4ee3\u7801\u3001\u6570\u636e\u7b49\uff09
                                                              "},{"location":"admin/share/notebook.html#notebook_1","title":"\u521b\u5efa\u548c\u4f7f\u7528 Notebook \u5b9e\u4f8b","text":"
                                                              1. \u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                                                              2. \u5bfc\u822a\u81f3 AI Lab -> \u8fd0\u7ef4\u7ba1\u7406 -> \u961f\u5217\u7ba1\u7406 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae

                                                              3. \u952e\u5165\u540d\u79f0\uff0c\u9009\u62e9\u96c6\u7fa4\u3001\u5de5\u4f5c\u7a7a\u95f4\u548c\u914d\u989d\u540e\uff0c\u70b9\u51fb \u786e\u5b9a

                                                              4. \u4ee5 \u7528\u6237\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5bfc\u822a\u81f3 AI Lab -> Notebook \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae

                                                              5. \u914d\u7f6e\u5404\u9879\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a

                                                                \u57fa\u672c\u4fe1\u606f\u8d44\u6e90\u914d\u7f6e\u9ad8\u7ea7\u914d\u7f6e

                                                                \u952e\u5165\u540d\u79f0\uff0c\u9009\u62e9\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\uff0c\u9009\u62e9\u521a\u521b\u5efa\u7684\u961f\u5217\uff0c\u70b9\u51fb \u4e00\u952e\u521d\u59cb\u5316

                                                                \u9009\u62e9 Notebook \u7c7b\u578b\uff0c\u914d\u7f6e\u5185\u5b58\u3001CPU\uff0c\u5f00\u542f GPU\uff0c\u521b\u5efa\u548c\u914d\u7f6e PVC\uff1a

                                                                \u5f00\u542f SSH \u5916\u7f51\u8bbf\u95ee\uff1a

                                                              6. \u81ea\u52a8\u8df3\u8f6c\u5230 Notebook \u5b9e\u4f8b\u5217\u8868\uff0c\u70b9\u51fb\u5b9e\u4f8b\u540d\u79f0

                                                              7. \u8fdb\u5165 Notebook \u5b9e\u4f8b\u8be6\u60c5\u9875\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u6253\u5f00 \u6309\u94ae

                                                              8. \u8fdb\u5165\u4e86 Notebook \u5f00\u53d1\u73af\u5883\uff0c\u6bd4\u5982\u5728 /home/jovyan \u76ee\u5f55\u6302\u8f7d\u4e86\u6301\u4e45\u5377\uff0c\u53ef\u4ee5\u901a\u8fc7 git \u514b\u9686\u4ee3\u7801\uff0c\u901a\u8fc7 SSH \u8fde\u63a5\u540e\u4e0a\u4f20\u6570\u636e\u7b49\u3002

                                                              "},{"location":"admin/share/notebook.html#ssh-notebook","title":"\u901a\u8fc7 SSH \u8bbf\u95ee Notebook \u5b9e\u4f8b","text":"
                                                              1. \u5728\u81ea\u5df1\u7684\u7535\u8111\u4e0a\u751f\u6210 SSH \u5bc6\u94a5\u5bf9

                                                                \u5728\u81ea\u5df1\u7535\u8111\u4e0a\u6253\u5f00\u547d\u4ee4\u884c\uff0c\u6bd4\u5982\u5728 Windows \u4e0a\u6253\u5f00 git bash\uff0c\u8f93\u5165 ssh-keygen.exe -t rsa\uff0c\u7136\u540e\u4e00\u8def\u56de\u8f66\u3002

                                                              2. \u901a\u8fc7 cat ~/.ssh/id_rsa.pub \u7b49\u547d\u4ee4\u67e5\u770b\u5e76\u590d\u5236\u516c\u94a5

                                                              3. \u4ee5\u7528\u6237\u8eab\u4efd\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5728\u53f3\u4e0a\u89d2\u70b9\u51fb \u4e2a\u4eba\u4e2d\u5fc3 -> SSH \u516c\u94a5 -> \u5bfc\u5165 SSH \u516c\u94a5

                                                              4. \u8fdb\u5165 Notebook \u5b9e\u4f8b\u7684\u8be6\u60c5\u9875\uff0c\u590d\u5236 SSH \u7684\u94fe\u63a5

                                                              5. \u5728\u5ba2\u6237\u7aef\u4f7f\u7528 SSH \u8bbf\u95ee Notebook \u5b9e\u4f8b

                                                              \u4e0b\u4e00\u6b65\uff1a\u521b\u5efa\u8bad\u7ec3\u4efb\u52a1

                                                              "},{"location":"admin/share/quota.html","title":"\u914d\u989d\u7ba1\u7406","text":"

                                                              \u7528\u6237\u88ab\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4\u540e\uff0c\u5373\u53ef\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u8d44\u6e90\uff0c\u7ba1\u7406\u8d44\u6e90\u914d\u989d\u3002

                                                              "},{"location":"admin/share/quota.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                              • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                                              • \u6709\u4e00\u4e2a\u53ef\u7528\u7684\u7ba1\u7406\u5458\u8d26\u53f7
                                                              "},{"location":"admin/share/quota.html#_3","title":"\u521b\u5efa\u548c\u7ba1\u7406\u914d\u989d","text":"
                                                              1. \u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                                                              2. \u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4\u548c\u547d\u540d\u7a7a\u95f4\uff0c\u5e76\u7ed1\u5b9a\u7528\u6237
                                                              3. \u4e3a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u8d44\u6e90\u914d\u989d

                                                              4. \u7ba1\u7406\u547d\u540d\u7a7a\u95f4 test-ns-1 \u7684\u8d44\u6e90\u914d\u989d\uff0c\u5176\u6570\u503c\u4e0d\u80fd\u8d85\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u7684\u914d\u989d\u3002

                                                              5. \u4ee5 \u7528\u6237\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u67e5\u770b\u5176\u662f\u5426\u88ab\u5206\u914d\u4e86 test-ns-1 \u547d\u540d\u7a7a\u95f4\u3002

                                                              \u4e0b\u4e00\u6b65\uff1a\u521b\u5efa AI \u8d1f\u8f7d\u4f7f\u7528 GPU \u8d44\u6e90

                                                              "},{"location":"admin/share/workload.html","title":"\u521b\u5efa AI \u8d1f\u8f7d\u4f7f\u7528 GPU \u8d44\u6e90","text":"

                                                              \u7ba1\u7406\u5458\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u8d44\u6e90\u914d\u989d\u540e\uff0c\u7528\u6237\u5c31\u53ef\u4ee5\u521b\u5efa AI \u5de5\u4f5c\u8d1f\u8f7d\u6765\u4f7f\u7528 GPU \u7b97\u529b\u8d44\u6e90\u3002

                                                              "},{"location":"admin/share/workload.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                              • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                                              • \u7528\u6237\u5df2\u6210\u529f\u6ce8\u518c
                                                              • \u7ba1\u7406\u5458\u4e3a\u7528\u6237\u5206\u914d\u4e86\u5de5\u4f5c\u7a7a\u95f4
                                                              • \u4e3a\u5de5\u4f5c\u7a7a\u95f4\u8bbe\u7f6e\u4e86\u8d44\u6e90\u914d\u989d
                                                              • \u5df2\u7ecf\u521b\u5efa\u4e86\u4e00\u4e2a\u96c6\u7fa4
                                                              "},{"location":"admin/share/workload.html#ai","title":"\u521b\u5efa AI \u8d1f\u8f7d\u6b65\u9aa4","text":"
                                                              1. \u4ee5\u7528\u6237\u8eab\u4efd\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                                                              2. \u5bfc\u822a\u81f3 \u5bb9\u5668\u7ba1\u7406 \uff0c\u9009\u62e9\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u70b9\u51fb \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u8d1f\u8f7d \uff0c \u70b9\u51fb\u53f3\u4fa7\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae

                                                              3. \u914d\u7f6e\u5404\u9879\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a

                                                                \u57fa\u672c\u4fe1\u606f\u5bb9\u5668\u914d\u7f6e\u5176\u4ed6

                                                                \u9009\u62e9\u81ea\u5df1\u7684\u547d\u540d\u7a7a\u95f4\u3002

                                                                \u8bbe\u7f6e\u955c\u50cf\uff0c\u914d\u7f6e CPU\u3001\u5185\u5b58\u3001GPU \u7b49\u8d44\u6e90\uff0c\u8bbe\u7f6e\u542f\u52a8\u547d\u4ee4\u3002

                                                                \u670d\u52a1\u914d\u7f6e\u548c\u9ad8\u7ea7\u914d\u7f6e\u53ef\u4ee5\u4f7f\u7528\u9ed8\u8ba4\u914d\u7f6e\u3002

                                                              4. \u81ea\u52a8\u8fd4\u56de\u65e0\u72b6\u6001\u8d1f\u8f7d\u5217\u8868\uff0c\u70b9\u51fb\u8d1f\u8f7d\u540d\u79f0

                                                              5. \u8fdb\u5165\u8be6\u60c5\u9875\uff0c\u53ef\u4ee5\u770b\u5230 GPU \u914d\u989d

                                                              6. \u4f60\u8fd8\u53ef\u4ee5\u8fdb\u5165\u63a7\u5236\u53f0\uff0c\u8fd0\u884c mx-smi \u547d\u4ee4\u67e5\u770b GPU \u8d44\u6e90

                                                              \u4e0b\u4e00\u6b65\uff1a\u4f7f\u7528 Notebook

                                                              "},{"location":"admin/virtnest/best-practice/import-ubuntu.html","title":"\u5982\u4f55\u4ece VMWare \u5bfc\u5165\u4f20\u7edf Linux \u4e91\u4e3b\u673a\u5230\u4e91\u539f\u751f\u4e91\u4e3b\u673a\u5e73\u53f0","text":"

                                                              \u672c\u6587\u5c06\u8be6\u7ec6\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u547d\u4ee4\u884c\u5c06\u5916\u90e8\u5e73\u53f0 VMware \u4e0a\u7684 Linux \u4e91\u4e3b\u673a\u5bfc\u5165\u5230 AI \u7b97\u529b\u4e2d\u5fc3\u7684\u4e91\u4e3b\u673a\u4e2d\u3002

                                                              Info

                                                              \u672c\u6587\u6863\u5916\u90e8\u865a\u62df\u5e73\u53f0\u662f VMware vSphere Client\uff0c\u540e\u7eed\u7b80\u5199\u4e3a vSphere\u3002 \u6280\u672f\u4e0a\u662f\u4f9d\u9760 kubevirt cdi \u6765\u5b9e\u73b0\u7684\u3002\u64cd\u4f5c\u524d\uff0cvSphere \u4e0a\u88ab\u5bfc\u5165\u7684\u4e91\u4e3b\u673a\u9700\u8981\u5173\u673a\u3002 \u4ee5 Ubuntu \u64cd\u4f5c\u7cfb\u7edf\u7684\u4e91\u4e3b\u673a\u4e3a\u4f8b\u3002

                                                              "},{"location":"admin/virtnest/best-practice/import-ubuntu.html#vsphere","title":"\u83b7\u53d6 vSphere \u7684\u4e91\u4e3b\u673a\u57fa\u7840\u4fe1\u606f","text":"
                                                              • vSphere URL\uff1a\u76ee\u6807\u5e73\u53f0\u7684 URL \u5730\u5740\u4fe1\u606f

                                                              • vSphere SSL \u8bc1\u4e66\u6307\u7eb9 thumbprint\uff1a\u9700\u8981\u901a\u8fc7 openssl \u83b7\u53d6

                                                                openssl s_client -connect 10.64.56.11:443 </dev/null | openssl x509 -in /dev/stdin -fingerprint -sha1 -noout\n

                                                                \u8f93\u51fa\u7c7b\u4f3c\u4e8e\uff1a

                                                                Can't use SSL_get_servername\ndepth=0 CN = vcsa.daocloud.io\nverify error:num=20:unable to get local issuer certificate\nverify return:1\ndepth=0 CN = vcsa.daocloud.io\nverify error:num=21:unable to verify the first certificate\nverify return:1\ndepth=0 CN = vcsa.daocloud.io\nverify return:1\nDONE\nsha1 Fingerprint=C3:9D:D7:55:6A:43:11:2B:DE:BA:27:EA:3B:C2:13:AF:E4:12:62:4D  # \u6240\u9700\u503c\n
                                                              • vSphere \u8d26\u53f7\uff1a\u83b7\u53d6 vSphere \u7684\u8d26\u53f7\u4fe1\u606f\uff0c\u6ce8\u610f\u6743\u9650\u95ee\u9898

                                                              • vSphere \u5bc6\u7801\uff1a\u83b7\u53d6 vSphere \u7684\u5bc6\u7801\u4fe1\u606f

                                                              • \u9700\u8981\u5bfc\u5165\u4e91\u4e3b\u673a\u7684 UUID\uff08\u9700\u8981\u5728 vSphere \u7684 web \u9875\u9762\u83b7\u53d6\uff09

                                                                • \u8fdb\u5165 Vsphere \u9875\u9762\u4e2d\uff0c\u8fdb\u5165\u88ab\u5bfc\u5165\u4e91\u4e3b\u673a\u7684\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb \u7f16\u8f91\u914d\u7f6e \uff0c\u6b64\u65f6\u6253\u5f00\u6d4f\u89c8\u5668\u7684\u5f00\u53d1\u8005\u63a7\u5236\u53f0\uff0c \u70b9\u51fb \u7f51\u7edc \u2014> \u6807\u5934 \u627e\u5230\u5982\u4e0b\u56fe\u6240\u793a\u7684 URL\u3002

                                                                • \u70b9\u51fb \u54cd\u5e94 \uff0c\u5b9a\u4f4d\u5230 vmConfigContext \u2014> config \uff0c\u6700\u7ec8\u627e\u5230\u76ee\u6807\u503c uuid \u3002

                                                              • \u9700\u8981\u5bfc\u5165\u4e91\u4e3b\u673a\u7684 vmdk \u6587\u4ef6 path

                                                              "},{"location":"admin/virtnest/best-practice/import-ubuntu.html#_1","title":"\u7f51\u7edc\u914d\u7f6e","text":"

                                                              \u9700\u8981\u6839\u636e\u7f51\u7edc\u6a21\u5f0f\u7684\u4e0d\u540c\u914d\u7f6e\u4e0d\u540c\u7684\u4fe1\u606f\uff0c\u82e5\u6709\u56fa\u5b9a IP \u7684\u9700\u6c42\uff0c\u9700\u8981\u9009\u62e9 Bridge \u7f51\u7edc\u6a21\u5f0f

                                                              • \u521b\u5efa ovs \u7c7b\u578b\u7684 Multus CR\uff0c\u53ef\u53c2\u8003\u521b\u5efa Multus CR
                                                              • \u521b\u5efa\u5b50\u7f51\u53ca IP \u6c60\uff0c\u53c2\u8003\u521b\u5efa\u5b50\u7f51\u548c IP \u6c60

                                                                apiVersion: spiderpool.spidernet.io/v2beta1\nkind: SpiderIPPool\nmetadata:\n  name: test2\nspec:\n  ips:\n  - 10.20.3.90\n  subnet: 10.20.0.0/16\n  gateway: 10.20.0.1\n\n---\napiVersion: spiderpool.spidernet.io/v2beta1\nkind: SpiderIPPool\nmetadata:\n  name: test3\nspec:\n  ips:\n  - 10.20.240.1\n  subnet: 10.20.0.0/16\n  gateway: 10.20.0.1\n\n---\napiVersion: spiderpool.spidernet.io/v2beta1\nkind: SpiderMultusConfig\nmetadata:\n  name: test1\n  namespace: kube-system\nspec:\n  cniType: ovs\n  coordinator:\n    detectGateway: false\n    detectIPConflict: false\n    mode: auto\n    tunePodRoutes: true\n  disableIPAM: false\n  enableCoordinator: true\n  ovs:\n    bridge: br-1\n    ippools:\n    ipv4:\n    - test1\n    - test2\n
                                                              "},{"location":"admin/virtnest/best-practice/import-ubuntu.html#vsphere-secret","title":"\u83b7\u53d6 vSphere \u7684\u8d26\u53f7\u5bc6\u7801 secret","text":"
                                                              apiVersion: v1\nkind: Secret\nmetadata:\n  name: vsphere   # \u53ef\u66f4\u6539\n  labels:\n    app: containerized-data-importer  # \u8bf7\u52ff\u66f4\u6539\ntype: Opaque\ndata:\n  accessKeyId: \"username-base64\"\n  secretKey: \"password-base64\"\n
                                                              "},{"location":"admin/virtnest/best-practice/import-ubuntu.html#kubevirt-vm-yaml-vm","title":"\u7f16\u5199 kubevirt vm yaml \u521b\u5efa vm","text":"

                                                              Tip

                                                              \u82e5\u6709\u56fa\u5b9aIP\u9700\u6c42\uff0c\u5219\u8be5 yaml \u4e0e\u4f7f\u7528\u9ed8\u8ba4\u7f51\u7edc\u7684 yaml \u6709\u4e00\u4e9b\u533a\u522b\uff0c\u5df2\u6807\u6ce8\u3002

                                                              apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n    virtnest.io/alias-name: \"\"\n    virtnest.io/image-secret: \"\"\n  creationTimestamp: \"2024-05-23T06:46:28Z\"\n  finalizers:\n  - kubevirt.io/virtualMachineControllerFinalize\n  generation: 1\n  labels:\n    virtnest.io/os-family: Ubuntu\n    virtnest.io/os-version: \"22.04\"\n  name: export-ubuntu\n  namespace: default\nspec:\n  dataVolumeTemplates:\n  - metadata:\n      creationTimestamp: null\n      name: export-ubuntu-rootdisk\n      namespace: default\n    spec:\n      pvc:\n        accessModes:\n        - ReadWriteOnce\n        resources:\n          requests:\n            storage: 10Gi\n        storageClassName: local-path\n      source:\n        vddk:\n          backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-ubuntu/virtnest-export-ubuntu.vmdk\"  \n          url: \"https://10.64.56.21\"                                                       \n          uuid: \"421d6135-4edb-df80-ee54-8c5b10cc4e78\"                                     \n          thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"            \n          secretRef: \"vsphere\"\n          initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"\n  runStrategy: Manual\n  template:\n    metadata:\n      annotations:\n        ipam.spidernet.io/ippools: '[{\"cleangateway\":false,\"ipv4\":[\"test2\"]}]'  // \u8fd9\u91cc\u6dfb\u52a0 spiderpool \u7f51\u7edc\n      creationTimestamp: null\n    spec:\n      architecture: amd64\n      domain:\n        devices:\n          disks:\n          - bootOrder: 1\n            disk:\n              bus: virtio\n            name: rootdisk\n          interfaces:                                                          // \u4fee\u6539\u8fd9\u91cc\u7684\u7f51\u7edc\u914d\u7f6e\n          - bridge: {}\n            name: ovs-bridge0\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 4Gi\n      networks:                                                                // \u4fee\u6539\u8fd9\u91cc\u7684\u7f51\u7edc\u914d\u7f6e\n      - multus:\n          default: true\n          networkName: kube-system/test1\n        name: ovs-bridge0\n      volumes:\n      - dataVolume:\n          name: export-ubuntu-rootdisk\n        name: rootdisk\n
                                                              "},{"location":"admin/virtnest/best-practice/import-ubuntu.html#vnc","title":"\u8fdb\u5165 VNC \u68c0\u67e5\u662f\u5426\u6210\u529f\u8fd0\u884c","text":"
                                                              1. \u4fee\u6539\u4e91\u4e3b\u673a\u7684\u7f51\u7edc\u914d\u7f6e

                                                              2. \u67e5\u770b\u5f53\u524d\u7f51\u7edc

                                                                \u5728\u5b9e\u9645\u5bfc\u5165\u5b8c\u6210\u65f6\uff0c\u5982\u4e0b\u56fe\u6240\u793a\u7684\u914d\u7f6e\u5df2\u7ecf\u5b8c\u6210\u3002\u7136\u800c\uff0c\u9700\u8981\u6ce8\u610f\u7684\u662f\uff0cenp1s0\u63a5\u53e3\u5e76\u6ca1\u6709\u5305\u542binet\u5b57\u6bb5\uff0c\u56e0\u6b64\u65e0\u6cd5\u8fde\u63a5\u5230\u5916\u90e8\u7f51\u7edc\u3002

                                                              3. \u914d\u7f6e netplan

                                                                \u5728\u4e0a\u56fe\u6240\u793a\u7684\u914d\u7f6e\u4e2d\uff0c\u5c06 ethernets \u4e2d\u7684\u5bf9\u8c61\u66f4\u6539\u4e3a enp1s0\uff0c\u5e76\u4f7f\u7528 DHCP \u83b7\u5f97 IP \u5730\u5740\u3002

                                                              4. \u5c06 netplan \u914d\u7f6e\u5e94\u7528\u5230\u7cfb\u7edf\u7f51\u7edc\u914d\u7f6e\u4e2d

                                                                sudo netplan apply\n
                                                              5. \u5bf9\u5916\u90e8\u7f51\u7edc\u8fdb\u884c ping \u6d4b\u8bd5

                                                              6. \u901a\u8fc7 SSH \u5728\u8282\u70b9\u4e0a\u8bbf\u95ee\u4e91\u4e3b\u673a\u3002

                                                              "},{"location":"admin/virtnest/best-practice/import-windows.html","title":"\u5982\u4f55\u4ece VMWare \u5bfc\u5165\u4f20\u7edf Windows \u4e91\u4e3b\u673a\u5230\u4e91\u539f\u751f\u4e91\u4e3b\u673a\u5e73\u53f0","text":"

                                                              \u672c\u6587\u5c06\u8be6\u7ec6\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u547d\u4ee4\u884c\u5c06\u5916\u90e8\u5e73\u53f0 VMware \u4e0a\u7684\u4e91\u4e3b\u673a\u5bfc\u5165\u5230 AI \u7b97\u529b\u4e2d\u5fc3\u7684\u4e91\u4e3b\u673a\u4e2d\u3002

                                                              Info

                                                              \u672c\u6587\u6863\u5916\u90e8\u865a\u62df\u5e73\u53f0\u662f VMware vSphere Client\uff0c\u540e\u7eed\u7b80\u5199\u4e3a vSphere\u3002 \u6280\u672f\u4e0a\u662f\u4f9d\u9760 kubevirt cdi \u6765\u5b9e\u73b0\u7684\u3002\u64cd\u4f5c\u524d\uff0cvSphere \u4e0a\u88ab\u5bfc\u5165\u7684\u4e91\u4e3b\u673a\u9700\u8981\u5173\u673a\u3002 \u4ee5 Windows \u64cd\u4f5c\u7cfb\u7edf\u7684\u4e91\u4e3b\u673a\u4e3a\u4f8b\u3002

                                                              "},{"location":"admin/virtnest/best-practice/import-windows.html#_1","title":"\u73af\u5883\u51c6\u5907","text":"

                                                              \u5bfc\u5165\u524d\uff0c\u9700\u8981\u53c2\u8003\u7f51\u7edc\u914d\u7f6e\u51c6\u5907\u73af\u5883\u3002

                                                              "},{"location":"admin/virtnest/best-practice/import-windows.html#windows","title":"\u83b7\u53d6 Windows \u4e91\u4e3b\u673a\u7684\u4fe1\u606f","text":"

                                                              \u4e0e\u5bfc\u5165 Linux \u64cd\u4f5c\u7cfb\u7edf\u7684\u4e91\u4e3b\u673a\u7c7b\u4f3c\uff0c\u53ef\u53c2\u8003\u5982\u4f55\u4ece VMWare \u5bfc\u5165\u4f20\u7edf Linuxs \u4e91\u4e3b\u673a\u5230\u4e91\u539f\u751f\u4e91\u4e3b\u673a\u5e73\u53f0\u83b7\u53d6\u4ee5\u4e0b\u4fe1\u606f\uff1a

                                                              • \u83b7\u53d6 vSphere \u8d26\u53f7\u5bc6\u7801
                                                              • \u83b7\u53d6 vSphere \u4e91\u4e3b\u673a\u4fe1\u606f
                                                              "},{"location":"admin/virtnest/best-practice/import-windows.html#windows_1","title":"\u68c0\u67e5 Windows \u7684\u5f15\u5bfc\u7c7b\u578b","text":"

                                                              \u5c06\u5916\u90e8\u5e73\u53f0\u7684\u4e91\u4e3b\u673a\u5bfc\u5165\u5230 AI \u7b97\u529b\u4e2d\u5fc3\u7684\u865a\u62df\u5316\u5e73\u53f0\u4e2d\u65f6\uff0c\u9700\u8981\u6839\u636e\u4e91\u4e3b\u673a\u7684\u542f\u52a8\u7c7b\u578b\uff08BIOS \u6216 UEFI\uff09\u8fdb\u884c\u76f8\u5e94\u7684\u914d\u7f6e\uff0c\u4ee5\u786e\u4fdd\u4e91\u4e3b\u673a\u80fd\u591f\u6b63\u786e\u542f\u52a8\u548c\u8fd0\u884c\u3002

                                                              \u53ef\u4ee5\u901a\u8fc7\"\u7cfb\u7edf\u4fe1\u606f\"\u68c0\u67e5 Windows \u662f BIOS \u8fd8\u662f UEFI \u5f15\u5bfc\u3002\u5982\u679c\u662f UEFI \u5219\u9700\u8981\u5728 YAML \u6587\u4ef6\u4e2d\u6dfb\u52a0\u76f8\u5173\u4fe1\u606f\u3002

                                                              "},{"location":"admin/virtnest/best-practice/import-windows.html#_2","title":"\u5bfc\u5165\u8fc7\u7a0b","text":"

                                                              \u51c6\u5907 window.yaml \u6587\u4ef6\uff0c\u6ce8\u610f\u4ee5\u4e0b\u914d\u7f6e\u9879

                                                              • \u5f15\u5bfc Virtio \u9a71\u52a8\u7684 PVC
                                                              • \u78c1\u76d8\u603b\u7ebf\u7c7b\u578b\uff0c\u6839\u636e\u5f15\u5bfc\u7c7b\u578b\u8bbe\u7f6e\u4e3a sata \u6216 virtio
                                                              • \u5982\u679c\u4f7f\u7528 UEFI\uff0c\u9700\u8981\u6dfb\u52a0 UEFI \u914d\u7f6e
                                                              \u70b9\u51fb\u67e5\u770b window.yaml \u793a\u4f8b window.yaml
                                                              apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  labels:\n    virtnest.io/os-family: windows\n    virtnest.io/os-version: \"server2019\"\n  name: export-window-21\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: export-window-21-rootdisk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 22Gi\n          storageClassName: local-path\n        source:\n          vddk:\n            backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-window/virtnest-export-window.vmdk\"\n            url: \"https://10.64.56.21\"\n            uuid: \"421d40f2-21a2-cfeb-d5c9-e7f8abfc2faa\"\n            thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"\n            secretRef: \"vsphere21\"\n            initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"\n    - metadata:\n        name: export-window-21-datadisk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 1Gi\n          storageClassName: local-path\n        source:\n          vddk:\n            backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-window/virtnest-export-window_1.vmdk\"\n            url: \"https://10.64.56.21\"\n            uuid: \"421d40f2-21a2-cfeb-d5c9-e7f8abfc2faa\"\n            thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"\n            secretRef: \"vsphere21\"\n            initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"\n    # <1>. \u5f15\u5bfc virtio \u9a71\u52a8\u7684 pvc\n    # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n    - metadata:\n        name: virtio-disk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Mi\n          storageClassName: local-path\n        source:\n          blank: {}\n          # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n  running: true\n  template:\n    metadata:\n      annotations:\n        ipam.spidernet.io/ippools: '[{\"cleangateway\":false,\"ipv4\":[\"test86\"]}]'\n    spec:\n      dnsConfig:\n        nameservers:\n        - 223.5.5.5\n      domain:\n        cpu:\n          cores: 2\n        memory:\n          guest: 4Gi\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: sata   # <2> \u78c1\u76d8\u603b\u7ebf\u7c7b\u578b\uff0c\u6839\u636e\u5f15\u5bfc\u7c7b\u578b\u8bbe\u7f6e\u4e3a sata \u6216 virtio\n              name: rootdisk\n            - bootOrder: 2\n              disk:\n                bus: sata   # <2> \u78c1\u76d8\u603b\u7ebf\u7c7b\u578b\uff0c\u6839\u636e\u5f15\u5bfc\u7c7b\u578b\u8bbe\u7f6e\u4e3a sata \u6216 virtio\n              name: datadisk\n            # <1>. \u5f15\u5bfc virtio \u9a71\u52a8\u7684 disk\n            # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n            - bootOrder: 3\n              disk:\n                bus: virtio\n              name: virtdisk\n            - bootOrder: 4\n              cdrom:\n                bus: sata\n              name: virtiocontainerdisk\n            # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n          interfaces:\n            - bridge: {}\n              name: ovs-bridge0\n        # <3> \u5728\u4e0a\u6587\u201c\u67e5\u770b window \u5f15\u5bfc\u662f BIOS \u8fd8\u662f UEFI\u201d\n        # \u5982\u679c\u4f7f\u7528\u4e86 UEFI \u9700\u8981\u6dfb\u52a0\u7684\u4fe1\u606f\n        # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n        features:\n          smm:\n            enabled: true\n        firmware:\n          bootloader:\n            efi:\n              secureBoot: false\n        # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 4Gi\n      networks:\n        - multus:\n            default: true\n            networkName: kube-system/test1\n          name: ovs-bridge0\n      volumes:\n        - dataVolume:\n            name: export-window-21-rootdisk\n          name: rootdisk\n        - dataVolume:\n            name: export-window-21-datadisk\n          name: datadisk      \n        # <1> \u5f15\u5bfc virtio \u9a71\u52a8\u7684 volumes\n        # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n        - dataVolume:\n            name: virtio-disk\n          name: virtdisk\n        - containerDisk:\n            image: release-ci.daocloud.io/virtnest/kubevirt/virtio-win:v4.12.12-5\n          name: virtiocontainerdisk\n        # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n
                                                              "},{"location":"admin/virtnest/best-practice/import-windows.html#vnc-virtio","title":"\u901a\u8fc7 VNC \u5b89\u88c5 VirtIO \u9a71\u52a8","text":"
                                                              1. \u901a\u8fc7 VNC \u8bbf\u95ee\u548c\u8fde\u63a5\u5230\u4e91\u4e3b\u673a\u3002
                                                              2. \u6839\u636e Windows \u7248\u672c\u4e0b\u8f7d\u5e76\u5b89\u88c5\u76f8\u5e94\u7684 VirtIO \u9a71\u52a8\u7a0b\u5e8f\u3002
                                                              3. \u53ef\u4ee5\u5f00\u542f\u8fdc\u7a0b\u684c\u9762\uff08Remote Desktop\uff09\uff0c\u65b9\u4fbf\u5728\u540e\u7eed\u901a\u8fc7\u8fdc\u7a0b\u684c\u9762\u534f\u8bae\uff08RDP\uff09\u8fde\u63a5\u5230\u4e91\u4e3b\u673a\u3002
                                                              4. \u5b89\u88c5\u5b8c\u6210\u540e\uff0c\u91cd\u542f\u4e91\u4e3b\u673a\u540e\u66f4\u65b0 YAML\u3002
                                                              "},{"location":"admin/virtnest/best-practice/import-windows.html#yaml","title":"\u91cd\u542f\u540e\u66f4\u65b0 YAML","text":"\u70b9\u51fb\u67e5\u770b\u4fee\u6539\u540e\u7684 window.yaml \u793a\u4f8b window.yaml
                                                              # \u5220\u9664 \u6807\u53f7 <1> \u76f8\u5173\u5b57\u6bb5\uff0c\u4fee\u6539\u6807\u53f7 <2> \u5b57\u6bb5\uff1asata \u6539\u6210 virtio\napiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  labels:\n    virtnest.io/os-family: windows\n    virtnest.io/os-version: \"server2019\"\n  name: export-window-21\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: export-window-21-rootdisk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 22Gi\n          storageClassName: local-path\n        source:\n          vddk:\n            backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-window/virtnest-export-window.vmdk\"\n            url: \"https://10.64.56.21\"\n            uuid: \"421d40f2-21a2-cfeb-d5c9-e7f8abfc2faa\"\n            thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"\n            secretRef: \"vsphere21\"\n            initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"\n    - metadata:\n        name: export-window-21-datadisk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 1Gi\n          storageClassName: local-path\n        source:\n          vddk:\n            backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-window/virtnest-export-window_1.vmdk\"\n            url: \"https://10.64.56.21\"\n            uuid: \"421d40f2-21a2-cfeb-d5c9-e7f8abfc2faa\"\n            thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"\n            secretRef: \"vsphere21\"\n            initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"\n  running: true\n  template:\n    metadata:\n      annotations:\n        ipam.spidernet.io/ippools: '[{\"cleangateway\":false,\"ipv4\":[\"test86\"]}]'\n    spec:\n      dnsConfig:\n        nameservers:\n        - 223.5.5.5\n      domain:\n        cpu:\n          cores: 2\n        memory:\n          guest: 4Gi\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio  # <2>\n              name: rootdisk\n            - bootOrder: 2\n              disk:\n                bus: virtio  # <2>\n              name: datadisk\n          interfaces:\n            - bridge: {}\n              name: ovs-bridge0\n        # <3> \u5728\u4e0a\u6587\u201c\u67e5\u770b window \u5f15\u5bfc\u662f BIOS \u8fd8\u662f UEFI\u201d\n        # \u5982\u679c\u4f7f\u7528\u4e86 UEFI \u9700\u8981\u6dfb\u52a0\u7684\u4fe1\u606f\n        # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n        features:\n          smm:\n            enabled: true\n        firmware:\n          bootloader:\n            efi:\n              secureBoot: false\n        # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 4Gi\n      networks:\n        - multus:\n            default: true\n            networkName: kube-system/test1\n          name: ovs-bridge0\n      volumes:\n        - dataVolume:\n            name: export-window-21-rootdisk\n          name: rootdisk\n        - dataVolume:\n            name: export-window-21-datadisk\n          name: datadisk\n
                                                              "},{"location":"admin/virtnest/best-practice/import-windows.html#rdp","title":"RDP \u8bbf\u95ee\u548c\u9a8c\u8bc1","text":"
                                                              • \u4f7f\u7528 RDP \u5ba2\u6237\u7aef\u8fde\u63a5\u5230\u4e91\u4e3b\u673a\u3002\u8f93\u5165\u9ed8\u8ba4\u8d26\u53f7 admin \u548c\u5bc6\u7801 dangerous!123 \u8fdb\u884c\u767b\u5f55\u3002

                                                              • \u9a8c\u8bc1\u7f51\u7edc\u8bbf\u95ee\u548c\u6570\u636e\u76d8\u6570\u636e

                                                              "},{"location":"admin/virtnest/best-practice/import-windows.html#linux-windows","title":"\u5bf9\u6bd4\u5bfc\u5165 Linux \u548c Windows \u4e91\u4e3b\u673a\u7684\u5dee\u5f02","text":"
                                                              • Windows \u53ef\u80fd\u9700\u8981 UEFI \u914d\u7f6e\u3002
                                                              • Windows \u901a\u5e38\u9700\u8981\u5b89\u88c5 VirtIO \u9a71\u52a8\u3002
                                                              • Windows \u591a\u78c1\u76d8\u5bfc\u5165\u901a\u5e38\u4e0d\u9700\u8981\u91cd\u65b0\u6302\u8f7d\u78c1\u76d8\u3002
                                                              "},{"location":"admin/virtnest/best-practice/vm-windows.html","title":"\u521b\u5efa Windows \u4e91\u4e3b\u673a","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u547d\u4ee4\u884c\u521b\u5efa Windows \u4e91\u4e3b\u673a\u3002

                                                              "},{"location":"admin/virtnest/best-practice/vm-windows.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              1. \u521b\u5efa Windows \u4e91\u4e3b\u673a\u4e4b\u524d\uff0c\u9700\u8981\u5148\u53c2\u8003\u5b89\u88c5\u4e91\u4e3b\u673a\u6a21\u5757\u7684\u4f9d\u8d56\u548c\u524d\u63d0\u786e\u5b9a\u60a8\u7684\u73af\u5883\u5df2\u7ecf\u51c6\u5907\u5c31\u7eea\u3002
                                                              2. \u521b\u5efa\u8fc7\u7a0b\u5efa\u8bae\u53c2\u8003\u5b98\u65b9\u6587\u6863\uff1a\u5b89\u88c5 windows \u7684\u6587\u6863\u3001 \u5b89\u88c5 Windows \u76f8\u5173\u9a71\u52a8\u7a0b\u5e8f\u3002
                                                              3. Windows \u4e91\u4e3b\u673a\u5efa\u8bae\u4f7f\u7528 VNC \u7684\u8bbf\u95ee\u65b9\u5f0f\u3002
                                                              "},{"location":"admin/virtnest/best-practice/vm-windows.html#iso","title":"\u5bfc\u5165 ISO \u955c\u50cf","text":"

                                                              \u200b\u521b\u5efa Windows \u4e91\u4e3b\u673a\u9700\u8981\u5bfc\u5165 ISO \u955c\u50cf\u7684\u4e3b\u8981\u539f\u56e0\u662f\u4e3a\u4e86\u5b89\u88c5 Windows \u64cd\u4f5c\u7cfb\u7edf\u3002 \u4e0e Linux \u64cd\u4f5c\u7cfb\u7edf\u4e0d\u540c\uff0cWindows \u64cd\u4f5c\u7cfb\u7edf\u5b89\u88c5\u8fc7\u7a0b\u901a\u5e38\u9700\u8981\u4ece\u5b89\u88c5\u5149\u76d8\u6216 ISO \u955c\u50cf\u6587\u4ef6\u4e2d\u5f15\u5bfc\u3002 \u56e0\u6b64\uff0c\u5728\u521b\u5efa Windows \u4e91\u4e3b\u673a\u65f6\uff0c\u9700\u8981\u5148\u5bfc\u5165 Windows \u64cd\u4f5c\u7cfb\u7edf\u7684\u5b89\u88c5 ISO \u955c\u50cf\u6587\u4ef6\uff0c\u4ee5\u4fbf\u4e91\u4e3b\u673a\u80fd\u591f\u6b63\u5e38\u5b89\u88c5\u3002

                                                              \u4ee5\u4e0b\u4ecb\u7ecd\u4e24\u4e2a\u5bfc\u5165 ISO \u955c\u50cf\u7684\u529e\u6cd5\uff1a

                                                              1. \uff08\u63a8\u8350\uff09\u5236\u4f5c Docker \u955c\u50cf\uff0c\u5efa\u8bae\u53c2\u8003 \u6784\u5efa\u955c\u50cf

                                                              2. \uff08\u4e0d\u63a8\u8350\uff09\u4f7f\u7528 virtctl \u5c06\u955c\u50cf\u5bfc\u5165\u5230 PVC \u4e2d

                                                                \u53ef\u53c2\u8003\u5982\u4e0b\u547d\u4ee4

                                                                virtctl image-upload -n <\u547d\u540d\u7a7a\u95f4> pvc <PVC \u540d\u79f0> \\ \n   --image-path=<IOS \u6587\u4ef6\u8def\u5f84> \\ \n   --access-mode=ReadWriteOnce \\ \n   --size=6G \\ --uploadproxy-url=<https://cdi-uploadproxy ClusterIP \u548c\u7aef\u53e3> \\ \n   --force-bind \\ \n   --insecure \\ \n   --wait-secs=240 \\ \n   --storage-class=<SC>\n

                                                                \u4f8b\u5982\uff1a

                                                                virtctl image-upload -n <\u547d\u540d\u7a7a\u95f4> pvc <PVC \u540d\u79f0> \\ \n   --image-path=<IOS \u6587\u4ef6\u8def\u5f84> \\ \n   --access-mode=ReadWriteOnce \\ \n   --size=6G \\ --uploadproxy-url=<https://cdi-uploadproxy ClusterIP \u548c\u7aef\u53e3> \\ \n   --force-bind \\ \n   --insecure \\ \n   --wait-secs=240 \\ \n   --storage-class=<SC>\n
                                                              "},{"location":"admin/virtnest/best-practice/vm-windows.html#yaml-windows","title":"YAML \u521b\u5efa Windows \u4e91\u4e3b\u673a","text":"

                                                              \u4f7f\u7528 yaml \u521b\u5efa Windows \u4e91\u4e3b\u673a\uff0c\u66f4\u52a0\u7075\u6d3b\u5e76\u4e14\u66f4\u6613\u7f16\u5199\u559d\u7ef4\u62a4\u3002\u4ee5\u4e0b\u4ecb\u7ecd\u4e09\u79cd\u53c2\u8003\u7684 yaml\uff1a

                                                              1. \u63a8\u8350\u4f7f\u7528 Virtio \u9a71\u52a8 + Docker \u955c\u50cf\u7684\u65b9\u5f0f

                                                                • \u5982\u679c\u4f60\u9700\u8981\u4f7f\u7528\u5b58\u50a8\u80fd\u529b-\u6302\u8f7d\u78c1\u76d8\uff0c\u8bf7\u5b89\u88c5 viostor \u9a71\u52a8\u7a0b\u5e8f
                                                                • \u5982\u679c\u4f60\u9700\u8981\u4f7f\u7528\u7f51\u7edc\u80fd\u529b\uff0c\u8bf7\u5b89\u88c5 NetKVM \u9a71\u52a8\u7a0b\u5e8f
                                                                apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n  labels:\n    virtnest.io/os-family: Windows\n    virtnest.io/os-version: '10'\n  name: windows10-virtio\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: win10-system-virtio\n        namespace: default\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 32Gi\n          storageClassName: local-path\n        source:\n          blank: {}\n  running: true\n  template:\n    metadata:\n      labels:\n        app: windows10-virtio\n        version: v1\n        kubevirt.io/domain: windows10-virtio\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 8\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio # \u4f7f\u7528 virtio\n              name: win10-system-virtio\n            - bootOrder: 2\n              cdrom:\n                bus: sata # \u5bf9\u4e8e ISO \u955c\u50cf\uff0c\u4f7f\u7528 sata\n              name: iso-win10\n            - bootOrder: 3\n              cdrom:\n                bus: sata # \u5bf9\u4e8e containerdisk\uff0c\u4f7f\u7528 sata\n              name: virtiocontainerdisk\n          interfaces:\n            - name: default\n              masquerade: {}\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 8G\n      networks:\n        - name: default\n          pod: {}\n      volumes:\n        - name: iso-win10\n          persistentVolumeClaim:\n            claimName: iso-win10\n        - name: win10-system-virtio\n          persistentVolumeClaim:\n            claimName: win10-system-virtio\n        - containerDisk:\n            image: kubevirt/virtio-container-disk\n          name: virtiocontainerdisk\n
                                                              2. \uff08\u4e0d\u63a8\u8350\uff09\u4f7f\u7528 Virtio \u9a71\u52a8\u548c virtctl \u5de5\u5177\u7684\u7ec4\u5408\u65b9\u5f0f\uff0c\u5c06\u955c\u50cf\u5bfc\u5165\u5230 Persistent Volume Claim\uff08PVC\uff09\u4e2d\u3002

                                                                apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n  labels:\n    virtnest.io/os-family: Windows\n    virtnest.io/os-version: '10'\n  name: windows10-virtio\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: win10-system-virtio\n        namespace: default\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 32Gi\n          storageClassName: local-path\n        source:\n          blank: {}\n  running: true\n  template:\n    metadata:\n      labels:\n        app: windows10-virtio\n        version: v1\n        kubevirt.io/domain: windows10-virtio\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 8\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              # \u8bf7\u4f7f\u7528 virtio\n              disk:\n                bus: virtio\n              name: win10-system-virtio\n              # ISO \u955c\u50cf\u8bf7\u4f7f\u7528 sata\n            - bootOrder: 2\n              cdrom:\n                bus: sata\n              name: iso-win10\n\u00a0 \u00a0 \u00a0 \u00a0 \u00a0 \u00a0 \u00a0 # containerdisk \u8bf7\u4f7f\u7528 sata\n            - bootOrder: 3\n              cdrom:\n                bus: sata\n              name: virtiocontainerdisk\n          interfaces:\n            - name: default\n              masquerade: {}\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 8G\n      networks:\n        - name: default\n          pod: {}\n      volumes:\n        - name: iso-win10\n          persistentVolumeClaim:\n            claimName: iso-win10\n        - name: win10-system-virtio\n          persistentVolumeClaim:\n            claimName: win10-system-virtio\n        - containerDisk:\n            image: kubevirt/virtio-container-disk\n          name: virtiocontainerdisk\n
                                                              3. \uff08\u4e0d\u63a8\u8350\uff09\u4e0d\u4f7f\u7528 Virtio \u9a71\u52a8\u7684\u60c5\u51b5\u4e0b\uff0c\u4f7f\u7528 virtctl \u5de5\u5177\u5c06\u955c\u50cf\u5bfc\u5165\u5230 Persistent Volume Claim\uff08PVC\uff09\u4e2d\u3002\u4e91\u4e3b\u673a\u53ef\u80fd\u4f7f\u7528\u5176\u4ed6\u7c7b\u578b\u7684\u9a71\u52a8\u6216\u9ed8\u8ba4\u9a71\u52a8\u6765\u64cd\u4f5c\u78c1\u76d8\u548c\u7f51\u7edc\u8bbe\u5907\u3002

                                                                apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n  labels:\n    virtnest.io/os-family: Windows\n    virtnest.io/os-version: '10'\n  name: windows10\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    # \u521b\u5efa\u7cfb\u7edf\u76d8\uff0c\u4f60\u521b\u5efa\u591a\u4e2a PVC\uff08\u78c1\u76d8\uff09\n    - metadata:\n        name: win10-system\n        namespace: default\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 32Gi\n          storageClassName: local-path\n        source:\n          blank: {}\n  running: true\n  template:\n    metadata:\n      labels:\n        app: windows10\n        version: v1\n        kubevirt.io/domain: windows10\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 8\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              # \u65e0 virtio \u9a71\u52a8\uff0c\u8bf7\u4f7f\u7528 sata\n\u00a0 \u00a0 \u00a0 \u00a0 \u00a0 \u00a0 \u00a0cdrom:\n                bus: sata\n              name: win10-system\n              # ISO \u955c\u50cf\uff0c\u8bf7\u4f7f\u7528 sata\n            - bootOrder: 2\n              cdrom:\n                bus: sata\n              name: iso-win10\n          interfaces:\n            - name: default\n              masquerade: {}\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 8G\n      networks:\n        - name: default\n          pod: {}\n      volumes:\n        - name: iso-win10\n          persistentVolumeClaim:\n            claimName: iso-win10\n        - name: win10-system\n          persistentVolumeClaim:\n            claimName: win10-system\n
                                                              "},{"location":"admin/virtnest/best-practice/vm-windows.html#_2","title":"\u4e91\u684c\u9762","text":"

                                                              Windows \u7248\u672c\u7684\u4e91\u4e3b\u673a\u5927\u591a\u6570\u60c5\u51b5\u662f\u9700\u8981\u8fdc\u7a0b\u684c\u9762\u63a7\u5236\u8bbf\u95ee\u7684\uff0c\u5efa\u8bae\u4f7f\u7528 Microsoft \u8fdc\u7a0b\u684c\u9762\u63a7\u5236\u60a8\u7684\u4e91\u4e3b\u673a\u3002

                                                              Note

                                                              • \u4f60\u7684 Windows \u7248\u672c\u9700\u652f\u6301\u8fdc\u7a0b\u684c\u9762\u63a7\u5236\uff0c\u624d\u80fd\u4f7f\u7528 Microsoft \u8fdc\u7a0b\u684c\u9762\u3002
                                                              • \u9700\u8981\u5173\u95ed Windows \u7684\u9632\u706b\u5899\u3002
                                                              "},{"location":"admin/virtnest/best-practice/vm-windows.html#_3","title":"\u589e\u52a0\u6570\u636e\u76d8","text":"

                                                              Windows \u4e91\u4e3b\u673a\u6dfb\u52a0\u6570\u636e\u76d8\u7684\u65b9\u5f0f\u548c Linux \u4e91\u4e3b\u673a\u4e00\u81f4\u3002\u4f60\u53ef\u4ee5\u53c2\u8003\u4e0b\u9762\u7684 YAML \u793a\u4f8b\uff1a

                                                                apiVersion: kubevirt.io/v1\n  kind: VirtualMachine\n  <...>\n  spec:\n    dataVolumeTemplates:\n      # \u6dfb\u52a0\u4e00\u5757\u6570\u636e\u76d8\n      - metadata:\n        name: win10-disk\n        namespace: default\n        spec:\n          pvc:\n            accessModes:\n              - ReadWriteOnce\n            resources:\n              requests:\n                storage: 16Gi\n            storageClassName: hwameistor-storage-lvm-hdd\n          source:\n            blank: {}\n    template:\n      spec:\n        domain:\n          devices:\n            disks:\n              - bootOrder: 1\n                disk:\n                  bus: virtio\n                name: win10-system\n              # \u6dfb\u52a0\u4e00\u5757\u6570\u636e\u76d8\n              - bootOrder: 2\n                disk:\n                  bus: virtio\n                name: win10-disk\n            <....>\n        volumes:\n          <....>\n          # \u6dfb\u52a0\u4e00\u5757\u6570\u636e\u76d8\n          - name: win10-disk\n            persistentVolumeClaim:\n              claimName: win10-disk\n
                                                              "},{"location":"admin/virtnest/best-practice/vm-windows.html#_4","title":"\u5feb\u7167\u3001\u514b\u9686\u3001\u5b9e\u65f6\u8fc1\u79fb","text":"

                                                              \u8fd9\u4e9b\u80fd\u529b\u548c Linux \u4e91\u4e3b\u673a\u4e00\u81f4\uff0c\u53ef\u76f4\u63a5\u53c2\u8003\u914d\u7f6e Linux \u4e91\u4e3b\u673a\u7684\u65b9\u5f0f\u3002

                                                              "},{"location":"admin/virtnest/best-practice/vm-windows.html#windows_1","title":"\u8bbf\u95ee Windows \u4e91\u4e3b\u673a","text":"
                                                              1. \u521b\u5efa\u6210\u529f\u540e\uff0c\u8fdb\u5165\u4e91\u4e3b\u673a\u5217\u8868\u9875\u9762\uff0c\u53d1\u73b0\u4e91\u4e3b\u673a\u6b63\u5e38\u8fd0\u884c\u3002

                                                              2. \u70b9\u51fb\u63a7\u5236\u53f0\u8bbf\u95ee\uff08VNC\uff09\uff0c\u53ef\u4ee5\u6b63\u5e38\u8bbf\u95ee\u3002

                                                              "},{"location":"admin/virtnest/gpu/vm-gpu.html","title":"\u4e91\u4e3b\u673a\u914d\u7f6e GPU\uff08\u76f4\u901a\u6a21\u5f0f\uff09","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u5728\u521b\u5efa\u4e91\u4e3b\u673a\u65f6\uff0c\u914d\u7f6e GPU \u7684\u524d\u63d0\u6761\u4ef6\u3002

                                                              \u914d\u7f6e\u4e91\u4e3b\u673a\u7684 GPU \u7684\u91cd\u70b9\u662f\u5bf9 GPU Operator \u8fdb\u884c\u914d\u7f6e\uff0c\u4ee5\u4fbf\u5728\u5de5\u4f5c\u8282\u70b9\u4e0a\u90e8\u7f72\u4e0d\u540c\u7684\u8f6f\u4ef6\u7ec4\u4ef6\uff0c \u5177\u4f53\u53d6\u51b3\u4e8e\u8fd9\u4e9b\u8282\u70b9\u4e0a\u914d\u7f6e\u8fd0\u884c\u7684 GPU \u5de5\u4f5c\u8d1f\u8f7d\u3002\u4ee5\u4e0b\u4e09\u4e2a\u8282\u70b9\u4e3a\u4f8b\uff1a

                                                              • controller-node-1 \u8282\u70b9\u914d\u7f6e\u4e3a\u8fd0\u884c\u5bb9\u5668\u3002
                                                              • work-node-1 \u8282\u70b9\u914d\u7f6e\u4e3a\u8fd0\u884c\u5177\u6709\u76f4\u901a GPU \u7684\u4e91\u4e3b\u673a\u3002
                                                              • work-node-2 \u8282\u70b9\u914d\u7f6e\u4e3a\u8fd0\u884c\u5177\u6709\u865a\u62df vGPU \u7684\u4e91\u4e3b\u673a\u3002
                                                              "},{"location":"admin/virtnest/gpu/vm-gpu.html#_1","title":"\u5047\u8bbe\u3001\u9650\u5236\u548c\u4f9d\u8d56\u6027","text":"

                                                              \u5de5\u4f5c\u8282\u70b9\u53ef\u4ee5\u8fd0\u884c GPU \u52a0\u901f\u5bb9\u5668\uff0c\u4e5f\u53ef\u4ee5\u8fd0\u884c\u5177\u6709 GPU \u76f4\u901a\u7684 GPU \u52a0\u901f VM\uff0c\u6216\u8005\u5177\u6709 vGPU \u7684 GPU \u52a0\u901f VM\uff0c\u4f46\u4e0d\u80fd\u8fd0\u884c\u5176\u4e2d\u4efb\u4f55\u4e00\u4e2a\u7684\u7ec4\u5408\u3002

                                                              1. \u96c6\u7fa4\u7ba1\u7406\u5458\u6216\u5f00\u53d1\u4eba\u5458\u9700\u8981\u63d0\u524d\u4e86\u89e3\u96c6\u7fa4\u60c5\u51b5\uff0c\u5e76\u6b63\u786e\u6807\u8bb0\u8282\u70b9\u4ee5\u6307\u793a\u5b83\u4eec\u5c06\u8fd0\u884c\u7684 GPU \u5de5\u4f5c\u8d1f\u8f7d\u7c7b\u578b\u3002
                                                              2. \u8fd0\u884c\u5177\u6709 GPU \u76f4\u901a\u6216 vGPU \u7684 GPU \u52a0\u901f VM \u7684\u5de5\u4f5c\u8282\u70b9\u88ab\u5047\u5b9a\u4e3a\u88f8\u673a\uff0c\u5982\u679c\u5de5\u4f5c\u8282\u70b9\u662f\u4e91\u4e3b\u673a\uff0c \u5219\u9700\u8981\u5728\u4e91\u4e3b\u673a\u5e73\u53f0\u4e0a\u542f\u7528 GPU \u76f4\u901a\u529f\u80fd\uff0c\u8bf7\u5411\u4e91\u4e3b\u673a\u5e73\u53f0\u63d0\u4f9b\u5546\u54a8\u8be2\u3002
                                                              3. \u4e0d\u652f\u6301 Nvidia MIG \u7684 vGPU\u3002
                                                              4. GPU Operator \u4e0d\u4f1a\u81ea\u52a8\u5728 VM \u4e2d\u5b89\u88c5 GPU \u9a71\u52a8\u7a0b\u5e8f\u3002
                                                              "},{"location":"admin/virtnest/gpu/vm-gpu.html#iommu","title":"\u542f\u7528 IOMMU","text":"

                                                              \u4e3a\u4e86\u542f\u7528GPU\u76f4\u901a\u529f\u80fd\uff0c\u96c6\u7fa4\u8282\u70b9\u9700\u8981\u5f00\u542fIOMMU\u3002\u8bf7\u53c2\u8003\u5982\u4f55\u5f00\u542f IOMMU\u3002 \u5982\u679c\u60a8\u7684\u96c6\u7fa4\u662f\u5728\u4e91\u4e3b\u673a\u4e0a\u8fd0\u884c\uff0c\u8bf7\u54a8\u8be2\u60a8\u7684\u4e91\u4e3b\u673a\u5e73\u53f0\u63d0\u4f9b\u5546\u3002

                                                              "},{"location":"admin/virtnest/gpu/vm-gpu.html#_2","title":"\u6807\u8bb0\u96c6\u7fa4\u8282\u70b9","text":"

                                                              \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \uff0c\u9009\u53d6\u60a8\u7684\u5de5\u4f5c\u96c6\u7fa4\uff0c\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \u7684\u64cd\u4f5c\u680f \u4fee\u6539\u6807\u7b7e \uff0c\u4e3a\u8282\u70b9\u6dfb\u52a0\u6807\u7b7e\uff0c\u6bcf\u4e2a\u8282\u70b9\u53ea\u80fd\u6709\u4e00\u79cd\u6807\u7b7e\u3002

                                                              \u60a8\u53ef\u4ee5\u4e3a\u6807\u7b7e\u5206\u914d\u4ee5\u4e0b\u503c\uff1acontainer\u3001vm-passthrough \u548c vm-vgpu\u3002

                                                              "},{"location":"admin/virtnest/gpu/vm-gpu.html#nvidia-operator","title":"\u5b89\u88c5 Nvidia Operator","text":"
                                                              1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \uff0c\u9009\u53d6\u60a8\u7684\u5de5\u4f5c\u96c6\u7fa4\uff0c\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u9009\u62e9\u5e76\u5b89\u88c5 gpu-operator\u3002 \u9700\u8981\u4fee\u6539\u4e00\u4e9b yaml \u4e2d\u7684\u76f8\u5173\u5b57\u6bb5\u3002

                                                                gpu-operator.sandboxWorkloads.enabled=true\ngpu-operator.vfioManager.enabled=true\ngpu-operator.sandboxDevicePlugin.enabled=true\ngpu-operator.sandboxDevicePlugin.version=v1.2.4   # (1)!\ngpu-operator.toolkit.version=v1.14.3-ubuntu20.04\n
                                                                1. version \u9700\u8981 >= v1.2.4
                                                              2. \u7b49\u5f85\u5b89\u88c5\u6210\u529f\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff1a

                                                              "},{"location":"admin/virtnest/gpu/vm-gpu.html#virtnest-agent-cr","title":"\u5b89\u88c5 virtnest-agent \u5e76\u914d\u7f6e CR","text":"
                                                              1. \u5b89\u88c5 virtnest-agent\uff0c\u53c2\u8003\u5b89\u88c5 virtnest-agent\u3002

                                                              2. \u5c06 vGPU \u548c GPU \u76f4\u901a\u52a0\u5165 Virtnest Kubevirt CR\uff0c\u4ee5\u4e0b\u793a\u4f8b\u662f\u6dfb\u52a0 vGPU \u548c GPU \u76f4\u901a\u540e\u7684 \u90e8\u5206\u5173\u952e yaml\uff1a

                                                                spec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    permittedHostDevices: # (1)!\n      mediatedDevices:            # (2)!\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com /GRID_P4-1Q\n      pciHostDevices:             # (3)!\n      - externalResourceProvider:  true\n        pciVendorSelector: 10DE:1BB3\n        resourceName: nvidia.com /GP104GL_TESLA_P4\n
                                                                1. \u4e0b\u9762\u662f\u9700\u8981\u586b\u5199\u7684\u4fe1\u606f
                                                                2. vGPU
                                                                3. GPU \u76f4\u901a
                                                              3. \u5728 kubevirt CR yaml \u4e2d\uff0cpermittedHostDevices \u7528\u4e8e\u5bfc\u5165 VM \u8bbe\u5907\uff0cvGPU \u9700\u5728\u5176\u4e2d\u6dfb\u52a0 mediatedDevices\uff0c\u5177\u4f53\u7ed3\u6784\u5982\u4e0b\uff1a

                                                                mediatedDevices:          \n- mdevNameSelector: GRID P4-1Q          # (1)!\n  resourceName: nvidia.com/GRID_P4-1Q   # (2)!\n
                                                                1. \u8bbe\u5907\u540d\u79f0
                                                                2. GPU Operator \u6ce8\u518c\u5230\u8282\u70b9\u7684 vGPU \u4fe1\u606f
                                                              4. GPU \u76f4\u901a\u9700\u8981\u5728 permittedHostDevices \u4e0b\u6dfb\u52a0 pciHostDevices\uff0c\u5177\u4f53\u7ed3\u6784\u5982\u4e0b\uff1a

                                                                pciHostDevices:           \n- externalResourceProvider: true            # (1)!\n  pciVendorSelector: 10DE:1BB3              # (2)!\n  resourceName: nvidia.com/GP104GL_TESLA_P4 # (3)!\n
                                                                1. \u9ed8\u8ba4\u4e0d\u8981\u66f4\u6539
                                                                2. \u5f53\u524d pci \u8bbe\u5907\u7684 vednor id
                                                                3. GPU Operator \u6ce8\u518c\u5230\u8282\u70b9\u7684 GPU \u4fe1\u606f
                                                              5. \u83b7\u53d6 vGPU \u4fe1\u606f\u793a\u4f8b\uff08\u4ec5\u9002\u7528\u4e8e vGPU\uff09\uff1a\u5728\u6807\u8bb0\u4e3a nvidia.com/gpu.workload.config=vm-gpu \u7684\u8282\u70b9\uff08\u4f8b\u5982 work-node-2\uff09\u4e0a\u67e5\u770b\u8282\u70b9\u4fe1\u606f\uff0c Capacity \u4e2d\u7684 nvidia.com/GRID_P4-1Q: 8 \u8868\u793a\u53ef\u7528 vGPU\uff1a

                                                                kubectl describe node work-node-2\n
                                                                Capacity:\n  cpu:                                 64\n  devices.kubevirt.io/kvm:             1k\n  devices.kubevirt.io/tun:             1k\n  devices.kubevirt.io/vhost-net:       1k\n  ephemeral-storage:                   102626232Ki\n  hugepages-1Gi:                       0\n  hugepages-2Mi:                       0\n  memory:                              264010840Ki\n  nvidia.com/GRID_P4-1Q :              8\n  pods:                                110\nAllocatable:\n  cpu:                                  64\n  devices.kubevirt.io/kvm:              1k\n  devices.kubevirt.io/tun:              1k\n  devices.kubevirt.io/vhost-net:        1k\n  ephemeral-storage:                    94580335255\n  hugepages-1Gi:                        0\n  hugepages-2Mi:                        0\n  memory:                               263908440Ki\n  nvidia.com/GRID_P4-1Q:                8\n  pods:                                 110\n

                                                                \u90a3\u4e48 mdevNameSelector \u5e94\u8be5\u662f \u201cGRID P4-1Q\u201d\uff0cresourceName \u5e94\u8be5\u662f \u201cGRID_P4-1Q\u201d

                                                              6. \u83b7\u53d6 GPU \u76f4\u901a\u4fe1\u606f\uff1a\u5728\u6807\u8bb0 nvidia.com/gpu.workload.config=vm-passthrough \u7684 node \u4e0a\uff08\u672c\u6587\u6863\u793a\u4f8b node \u4e3a work-node-1\uff09\uff0c \u67e5\u770b node \u4fe1\u606f\uff0cCapacity \u4e2d nvidia.com/GP104GL_TESLA_P4: 2 \u5c31\u662f\u53ef\u7528 vGPU\uff1a

                                                                kubectl describe node work-node-1\n
                                                                Capacity:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              102626232Ki\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         264010840Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\nAllocatable:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              94580335255\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         263908440Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\n

                                                                \u90a3\u4e48 resourceName \u5e94\u8be5\u662f \u201cGRID_P4-1Q\u201d, \u5982\u4f55\u83b7\u53d6 pciVendorSelector \u5462\uff1f\u901a\u8fc7 ssh \u767b\u5f55\u5230 work-node-1 \u76ee\u6807\u8282\u70b9\uff0c \u901a\u8fc7 lspci -nnk -d 10de: \u547d\u4ee4\u83b7\u53d6 Nvidia GPU PCI \u4fe1\u606f\uff0c\u5982\u4e0b\u6240\u793a\uff1a\u7ea2\u6846\u6240\u793a\u5373\u662f pciVendorSelector \u4fe1\u606f\u3002

                                                              7. \u7f16\u8f91 kubevirt CR \u63d0\u793a\uff1a\u5982\u679c\u540c\u4e00\u578b\u53f7 GPU \u6709\u591a\u4e2a\uff0c\u53ea\u9700\u5728 CR \u4e2d\u5199\u5165\u4e00\u4e2a\u5373\u53ef\uff0c\u65e0\u9700\u5217\u51fa\u6bcf\u4e2a GPU\u3002

                                                                kubectl -n virtnest-system edit kubevirt kubevirt\n
                                                                spec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    permittedHostDevices: # (1)!\n      mediatedDevices:                    # (2)!\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com/GRID_P4-1Q\n      pciHostDevices:                     # (3)!\n      - externalResourceProvider: true\n        pciVendorSelector: 10DE:1BB3\n        resourceName: nvidia.com/GP104GL_TESLA_P4 \n

                                                                1. \u4e0b\u9762\u662f\u9700\u8981\u586b\u5199\u7684\u4fe1\u606f
                                                                2. vGPU
                                                                3. GPU \u76f4\u901a\uff0c\u4e0a\u9762\u7684\u793a\u4f8b\u4e2d TEESLA P4 \u6709\u4e24\u4e2a GPU\uff0c\u8fd9\u91cc\u53ea\u9700\u8981\u6ce8\u518c\u4e00\u4e2a\u5373\u53ef
                                                              "},{"location":"admin/virtnest/gpu/vm-gpu.html#yaml-vm-gpu","title":"\u901a\u8fc7 YAML \u521b\u5efa VM \u5e76\u4f7f\u7528 GPU \u52a0\u901f","text":"

                                                              \u4e0e\u666e\u901a\u4e91\u4e3b\u673a\u552f\u4e00\u7684\u533a\u522b\u662f\u5728 devices \u4e2d\u6dfb\u52a0 GPU \u76f8\u5173\u4fe1\u606f\u3002

                                                              \u70b9\u51fb\u67e5\u770b\u5b8c\u6574 YAML
                                                              apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  name: testvm-gpu1\n  namespace: default\nspec:\n  dataVolumeTemplates:\n  - metadata:\n      creationTimestamp: null\n      name: systemdisk-testvm-gpu1\n      namespace: default\n    spec:\n      pvc:\n        accessModes:\n        - ReadWriteOnce\n        resources:\n          requests:\n            storage: 10Gi\n        storageClassName: www\n      source:\n        registry:\n          url: docker://release-ci.daocloud.io/virtnest/system-images/debian-12-x86_64:v1\nrunStrategy: Manual\ntemplate:\n    metadata:\n      creationTimestamp: null\n    spec:\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n          - bootOrder: 1\n            disk:\n              bus: virtio\n            name: systemdisk-testvm-gpu1\n          - disk:\n              bus: virtio\n            name: cloudinitdisk\n        gpus:\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n            name: gpu-0-0\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n          name: gpu-0-1\n        interfaces:\n        - masquerade: {}\n          name: default\n      machine:\n        type: q35\n      resources:\n        requests:\n          memory: 2Gi\n    networks:\n    - name: default\n      pod: {}\n    volumes:\n    - dataVolume:\n        name: systemdisk-testvm-gpu1\n      name: systemdisk-testvm-gpu1\n    - cloudInitNoCloud:\n        userDataBase64: I2Nsb3VkLWNvbmZpZwpzc2hfcHdhdXRoOiB0cnVlCmRpc2FibGVfcm9vdDogZmFsc2UKY2hwYXNzd2Q6IHsibGlzdCI6ICJyb290OmRhbmdlcm91cyIsIGV4cGlyZTogRmFsc2V9CgoKcnVuY21kOgogIC0gc2VkIC1pICIvI1w/UGVybWl0Um9vdExvZ2luL3MvXi4qJC9QZXJtaXRSb290TG9naW4geWVzL2ciIC9ldGMvc3NoL3NzaGRfY29uZmlnCiAgLSBzeXN0ZW1jdGwgcmVzdGFydCBzc2guc2VydmljZQ==\n        name: cloudinitdisk\n
                                                              "},{"location":"admin/virtnest/gpu/vm-vgpu.html","title":"\u4e91\u4e3b\u673a\u914d\u7f6e GPU\uff08vGPU\uff09","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u5728\u521b\u5efa\u4e91\u4e3b\u673a\u65f6\uff0c\u914d\u7f6e GPU \u7684\u524d\u63d0\u6761\u4ef6\u3002

                                                              \u914d\u7f6e\u4e91\u4e3b\u673a\u7684 GPU \u7684\u91cd\u70b9\u662f\u5bf9 GPU Operator \u8fdb\u884c\u914d\u7f6e\uff0c\u4ee5\u4fbf\u5728\u5de5\u4f5c\u8282\u70b9\u4e0a\u90e8\u7f72\u4e0d\u540c\u7684\u8f6f\u4ef6\u7ec4\u4ef6\uff0c \u5177\u4f53\u53d6\u51b3\u4e8e\u8fd9\u4e9b\u8282\u70b9\u4e0a\u914d\u7f6e\u8fd0\u884c\u7684 GPU \u5de5\u4f5c\u8d1f\u8f7d\u3002\u4ee5\u4e0b\u4e09\u4e2a\u8282\u70b9\u4e3a\u4f8b\uff1a

                                                              • controller-node-1 \u8282\u70b9\u914d\u7f6e\u4e3a\u8fd0\u884c\u5bb9\u5668\u3002
                                                              • work-node-1 \u8282\u70b9\u914d\u7f6e\u4e3a\u8fd0\u884c\u5177\u6709\u76f4\u901a GPU \u7684\u4e91\u4e3b\u673a\u3002
                                                              • work-node-2 \u8282\u70b9\u914d\u7f6e\u4e3a\u8fd0\u884c\u5177\u6709\u865a\u62df vGPU \u7684\u4e91\u4e3b\u673a\u3002
                                                              "},{"location":"admin/virtnest/gpu/vm-vgpu.html#_1","title":"\u5047\u8bbe\u3001\u9650\u5236\u548c\u4f9d\u8d56\u6027","text":"

                                                              \u5de5\u4f5c\u8282\u70b9\u53ef\u4ee5\u8fd0\u884c GPU \u52a0\u901f\u5bb9\u5668\uff0c\u4e5f\u53ef\u4ee5\u8fd0\u884c\u5177\u6709 GPU \u76f4\u901a\u7684 GPU \u52a0\u901f VM\uff0c\u6216\u8005\u5177\u6709 vGPU \u7684 GPU \u52a0\u901f VM\uff0c\u4f46\u4e0d\u80fd\u8fd0\u884c\u5176\u4e2d\u4efb\u4f55\u4e00\u4e2a\u7684\u7ec4\u5408\u3002

                                                              1. \u5de5\u4f5c\u8282\u70b9\u53ef\u4ee5\u5355\u72ec\u8fd0\u884c GPU \u52a0\u901f\u5bb9\u5668\u3001\u5177\u6709 GPU \u76f4\u901a\u7684 GPU \u52a0\u901f VM\uff0c\u6216\u8005\u5177\u6709 vGPU \u7684 GPU \u52a0\u901f VM\uff0c\u4e0d\u652f\u6301\u4efb\u4f55\u7ec4\u5408\u5f62\u5f0f\u3002
                                                              2. \u96c6\u7fa4\u7ba1\u7406\u5458\u6216\u5f00\u53d1\u4eba\u5458\u9700\u8981\u63d0\u524d\u4e86\u89e3\u96c6\u7fa4\u60c5\u51b5\uff0c\u5e76\u6b63\u786e\u6807\u8bb0\u8282\u70b9\u4ee5\u6307\u793a\u5b83\u4eec\u5c06\u8fd0\u884c\u7684 GPU \u5de5\u4f5c\u8d1f\u8f7d\u7c7b\u578b\u3002
                                                              3. \u8fd0\u884c\u5177\u6709 GPU \u76f4\u901a\u6216vGPU\u7684 GPU \u52a0\u901f VM\u7684\u5de5\u4f5c\u8282\u70b9\u88ab\u5047\u5b9a\u4e3a\u88f8\u673a\uff0c\u5982\u679c\u5de5\u4f5c\u8282\u70b9\u662f\u4e91\u4e3b\u673a\uff0c\u5219\u9700\u8981\u5728\u4e91\u4e3b\u673a\u5e73\u53f0\u4e0a\u542f\u7528GPU\u76f4\u901a\u529f\u80fd\uff0c\u8bf7\u5411\u4e91\u4e3b\u673a\u5e73\u53f0\u63d0\u4f9b\u5546\u54a8\u8be2\u3002
                                                              4. \u4e0d\u652f\u6301 Nvidia MIG \u7684 vGPU\u3002
                                                              5. GPU Operator \u4e0d\u4f1a\u81ea\u52a8\u5728 VM \u4e2d\u5b89\u88c5 GPU \u9a71\u52a8\u7a0b\u5e8f\u3002
                                                              "},{"location":"admin/virtnest/gpu/vm-vgpu.html#iommu","title":"\u542f\u7528 IOMMU","text":"

                                                              \u4e3a\u4e86\u542f\u7528GPU\u76f4\u901a\u529f\u80fd\uff0c\u96c6\u7fa4\u8282\u70b9\u9700\u8981\u5f00\u542fIOMMU\u3002\u8bf7\u53c2\u8003\u5982\u4f55\u5f00\u542fIOMMU\u3002 \u5982\u679c\u60a8\u7684\u96c6\u7fa4\u662f\u5728\u4e91\u4e3b\u673a\u4e0a\u8fd0\u884c\uff0c\u8bf7\u54a8\u8be2\u60a8\u7684\u4e91\u4e3b\u673a\u5e73\u53f0\u63d0\u4f9b\u5546\u3002

                                                              "},{"location":"admin/virtnest/gpu/vm-vgpu.html#vgpu-manager","title":"\u6784\u5efa vGPU Manager \u955c\u50cf","text":"

                                                              \u6ce8\u610f\uff1a\u4ec5\u5f53\u4f7f\u7528 NVIDIA vGPU \u65f6\u624d\u9700\u8981\u6784\u5efa vGPU Manager \u955c\u50cf\u3002\u5982\u679c\u60a8\u8ba1\u5212\u4ec5\u4f7f\u7528 GPU \u76f4\u901a\uff0c\u8bf7\u8df3\u8fc7\u6b64\u90e8\u5206\u3002

                                                              \u4ee5\u4e0b\u662f\u6784\u5efa vGPU Manager \u955c\u50cf\u5e76\u5c06\u5176\u63a8\u9001\u5230\u955c\u50cf\u4ed3\u5e93\u4e2d\u7684\u6b65\u9aa4\uff1a

                                                              1. \u4ece NVIDIA Licensing Portal \u4e0b\u8f7d vGPU \u8f6f\u4ef6\u3002

                                                                • \u767b\u5f55 NVIDIA Licensing Portal\uff0c\u8f6c\u5230 Software Downloads \u9875\u9762\u3002
                                                                • NVIDIA vGPU \u8f6f\u4ef6\u4f4d\u4e8e Software Downloads \u9875\u9762\u7684 Driver downloads \u9009\u9879\u5361\u4e2d\u3002
                                                                • \u5728\u7b5b\u9009\u6761\u4ef6\u4e2d\u9009\u62e9 VGPU + Linux \uff0c\u70b9\u51fb \u4e0b\u8f7d \u4ee5\u83b7\u53d6 Linux KVM \u7684\u8f6f\u4ef6\u5305\u3002 \u8bf7\u89e3\u538b\u4e0b\u8f7d\u7684\u6587\u4ef6\uff08NVIDIA-Linux-x86_64-<version>-vgpu-kvm.run\uff09\u3002

                                                              2. \u6253\u5f00\u7ec8\u7aef\u514b\u9686 container-images/driver \u4ed3\u5e93

                                                                git clone https://gitlab.com/nvidia/container-images/driver cd driver\n
                                                              3. \u5207\u6362\u5230\u60a8\u7684\u64cd\u4f5c\u7cfb\u7edf\u5bf9\u5e94 vgpu-manager \u76ee\u5f55

                                                                cd vgpu-manager/<your-os>\n
                                                              4. \u5c06\u6b65\u9aa4 1 \u4e2d\u63d0\u53d6\u7684 .run \u6587\u4ef6 copy \u5230\u5f53\u524d\u76ee\u5f55

                                                                cp <local-driver-download-directory>/*-vgpu-kvm.run ./\n
                                                              5. \u8bbe\u7f6e\u73af\u5883\u53d8\u91cf

                                                                • PRIVATE_REGISTRY\uff1a\u4e13\u7528\u6ce8\u518c\u8868\u7684\u540d\u79f0\uff0c\u7528\u4e8e\u5b58\u50a8\u9a71\u52a8\u7a0b\u5e8f\u6620\u50cf\u3002
                                                                • VERSION\uff1aNVIDIA vGPU\u7ba1\u7406\u5668\u7684\u7248\u672c\uff0c\u4eceNVIDIA\u8f6f\u4ef6\u95e8\u6237\u4e0b\u8f7d\u3002
                                                                • OS_TAG\uff1a\u5fc5\u987b\u4e0e\u96c6\u7fa4\u8282\u70b9\u64cd\u4f5c\u7cfb\u7edf\u7248\u672c\u5339\u914d\u3002
                                                                • CUDA_VERSION\uff1a\u7528\u4e8e\u6784\u5efa\u9a71\u52a8\u7a0b\u5e8f\u6620\u50cf\u7684CUDA\u57fa\u672c\u6620\u50cf\u7248\u672c\u3002
                                                                export PRIVATE_REGISTRY=my/private/registry VERSION=510.73.06 OS_TAG=ubuntu22.04 CUDA_VERSION=12.2.0\n
                                                              6. \u6784\u5efa NVIDIA vGPU Manager Image

                                                                docker build \\\n  --build-arg DRIVER_VERSION=${VERSION} \\\n  --build-arg CUDA_VERSION=${CUDA_VERSION} \\\n  -t ${PRIVATE_REGISTRY}/vgpu-manager:${VERSION}-${OS_TAG} .\n
                                                              7. \u5c06 NVIDIA vGPU Manager \u6620\u50cf\u63a8\u9001\u5230\u60a8\u7684\u955c\u50cf\u4ed3\u5e93

                                                                docker push ${PRIVATE_REGISTRY}/vgpu-manager:${VERSION}-${OS_TAG}\n
                                                              "},{"location":"admin/virtnest/gpu/vm-vgpu.html#_2","title":"\u6807\u8bb0\u96c6\u7fa4\u8282\u70b9","text":"

                                                              \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \uff0c\u9009\u53d6\u60a8\u7684\u5de5\u4f5c\u96c6\u7fa4\uff0c\u7136\u540e\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 , \u8fdb\u5165\u5217\u8868\u9875\u9762\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u9009\u62e9 \u4fee\u6539\u6807\u7b7e \uff0c\u4e3a\u8282\u70b9\u6dfb\u52a0\u6807\u7b7e\uff0c\u6bcf\u4e2a\u8282\u70b9\u53ea\u80fd\u6709\u4e00\u79cd\u6807\u7b7e\u3002

                                                              \u60a8\u53ef\u4ee5\u4e3a\u6807\u7b7e\u5206\u914d\u4ee5\u4e0b\u503c\uff1acontainer\u3001vm-passthrough \u548c vm-vgpu\u3002

                                                              "},{"location":"admin/virtnest/gpu/vm-vgpu.html#nvidia-operator","title":"\u5b89\u88c5 Nvidia Operator","text":"
                                                              1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \uff0c\u9009\u53d6\u60a8\u7684\u5de5\u4f5c\u96c6\u7fa4\uff0c\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u9009\u62e9\u5e76\u5b89\u88c5 gpu-operator\u3002\u9700\u8981\u4fee\u6539\u4e00\u4e9b yaml \u4e2d\u7684\u76f8\u5173\u5b57\u6bb5\u3002

                                                                gpu-operator.sandboxWorkloads.enabled=true\ngpu-operator.vgpuManager.enabled=true\ngpu-operator.vgpuManager.repository=<your-register-url>      # (1)!\ngpu-operator.vgpuManager.image=vgpu-manager\ngpu-operator.vgpuManager.version=<your-vgpu-manager-version> # (2)!\ngpu-operator.vgpuDeviceManager.enabled=true\n
                                                                1. \u201c\u6784\u5efa vGPU Manager \u955c\u50cf\u201d \u6b65\u9aa4\u4e2d\u7684\u955c\u50cf\u4ed3\u5e93\u5730\u5740
                                                                2. \u201c\u6784\u5efa vGPU Manager \u955c\u50cf\u201d \u6b65\u9aa4\u4e2d\u7684 VERSION
                                                              2. \u7b49\u5f85\u5b89\u88c5\u6210\u529f\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff1a

                                                              "},{"location":"admin/virtnest/gpu/vm-vgpu.html#virtnest-agent-cr","title":"\u5b89\u88c5 virtnest-agent \u5e76\u914d\u7f6e CR","text":"
                                                              1. \u5b89\u88c5 virtnest-agent\uff0c\u53c2\u8003\u5b89\u88c5 virtnest-agent\u3002

                                                              2. \u5c06 vGPU \u548c GPU \u76f4\u901a\u52a0\u5165 Virtnest Kubevirt CR\uff0c\u4ee5\u4e0b\u793a\u4f8b\u662f\u6dfb\u52a0 vGPU \u548c GPU \u76f4\u901a\u540e\u7684 \u90e8\u5206\u5173\u952e yaml\uff1a

                                                                spec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    permittedHostDevices: # (1)!\n      mediatedDevices:            # (2)!\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com /GRID_P4-1Q\n      pciHostDevices:             # (3)!\n      - externalResourceProvider:  true\n        pciVendorSelector: 10DE:1BB3\n        resourceName: nvidia.com /GP104GL_TESLA_P4\n
                                                                1. \u4e0b\u9762\u662f\u9700\u8981\u586b\u5199\u7684\u4fe1\u606f
                                                                2. vGPU
                                                                3. GPU \u76f4\u901a
                                                              3. \u5728 kubevirt CR yaml \u4e2d\uff0cpermittedHostDevices \u7528\u4e8e\u5bfc\u5165 VM \u8bbe\u5907\uff0cvGPU \u9700\u5728\u5176\u4e2d\u6dfb\u52a0 mediatedDevices\uff0c\u5177\u4f53\u7ed3\u6784\u5982\u4e0b\uff1a

                                                                mediatedDevices:          \n- mdevNameSelector: GRID P4-1Q          # (1)!\n  resourceName: nvidia.com/GRID_P4-1Q   # (2)!\n
                                                                1. \u8bbe\u5907\u540d\u79f0
                                                                2. GPU Operator \u6ce8\u518c\u5230\u8282\u70b9\u7684 vGPU \u4fe1\u606f
                                                              4. GPU \u76f4\u901a\u9700\u8981\u5728 permittedHostDevices \u4e0b\u6dfb\u52a0 pciHostDevices\uff0c\u5177\u4f53\u7ed3\u6784\u5982\u4e0b\uff1a

                                                                pciHostDevices:           \n- externalResourceProvider: true            # (1)!\n  pciVendorSelector: 10DE:1BB3              # (2)!\n  resourceName: nvidia.com/GP104GL_TESLA_P4 # (3)!\n
                                                                1. \u9ed8\u8ba4\u4e0d\u8981\u66f4\u6539
                                                                2. \u5f53\u524d pci \u8bbe\u5907\u7684 vednor id
                                                                3. GPU Operator \u6ce8\u518c\u5230\u8282\u70b9\u7684 GPU \u4fe1\u606f
                                                              5. \u83b7\u53d6 vGPU \u4fe1\u606f\u793a\u4f8b\uff08\u4ec5\u9002\u7528\u4e8e vGPU\uff09\uff1a\u5728\u6807\u8bb0\u4e3a nvidia.com/gpu.workload.config=vm-gpu \u7684\u8282\u70b9\uff08\u4f8b\u5982 work-node-2\uff09\u4e0a\u67e5\u770b\u8282\u70b9\u4fe1\u606f\uff0c Capacity \u4e2d\u7684 nvidia.com/GRID_P4-1Q: 8 \u8868\u793a\u53ef\u7528 vGPU\uff1a

                                                                kubectl describe node work-node-2\n
                                                                Capacity:\n  cpu:                                 64\n  devices.kubevirt.io/kvm:             1k\n  devices.kubevirt.io/tun:             1k\n  devices.kubevirt.io/vhost-net:       1k\n  ephemeral-storage:                   102626232Ki\n  hugepages-1Gi:                       0\n  hugepages-2Mi:                       0\n  memory:                              264010840Ki\n  nvidia.com/GRID_P4-1Q :              8\n  pods:                                110\nAllocatable:\n  cpu:                                  64\n  devices.kubevirt.io/kvm:              1k\n  devices.kubevirt.io/tun:              1k\n  devices.kubevirt.io/vhost-net:        1k\n  ephemeral-storage:                    94580335255\n  hugepages-1Gi:                        0\n  hugepages-2Mi:                        0\n  memory:                               263908440Ki\n  nvidia.com/GRID_P4-1Q:                8\n  pods:                                 110\n

                                                                \u90a3\u4e48 mdevNameSelector \u5e94\u8be5\u662f \u201cGRID P4-1Q\u201d\uff0cresourceName \u5e94\u8be5\u662f \u201cGRID_P4-1Q\u201d

                                                              6. \u83b7\u53d6 GPU \u76f4\u901a\u4fe1\u606f\uff1a\u5728\u6807\u8bb0 nvidia.com/gpu.workload.config=vm-passthrough \u7684 node \u4e0a\uff08\u672c\u6587\u6863\u793a\u4f8b node \u4e3a work-node-1\uff09\uff0c \u67e5\u770b node \u4fe1\u606f\uff0cCapacity \u4e2d nvidia.com/GP104GL_TESLA_P4: 2 \u5c31\u662f\u53ef\u7528 vGPU\uff1a

                                                                kubectl describe node work-node-1\n
                                                                Capacity:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              102626232Ki\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         264010840Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\nAllocatable:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              94580335255\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         263908440Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\n

                                                                \u90a3\u4e48 resourceName \u5e94\u8be5\u662f \u201cGRID_P4-1Q\u201d, \u5982\u4f55\u83b7\u53d6 pciVendorSelector \u5462\uff1f\u901a\u8fc7 ssh \u767b\u5f55\u5230 work-node-1 \u76ee\u6807\u8282\u70b9\uff0c \u901a\u8fc7 lspci -nnk -d 10de: \u547d\u4ee4\u83b7\u53d6 Nvidia GPU PCI \u4fe1\u606f\uff0c\u5982\u4e0b\u6240\u793a\uff1a\u7ea2\u6846\u6240\u793a\u5373\u662f pciVendorSelector \u4fe1\u606f\u3002

                                                              7. \u7f16\u8f91 kubevirt CR \u63d0\u793a\uff1a\u5982\u679c\u540c\u4e00\u578b\u53f7 GPU \u6709\u591a\u4e2a\uff0c\u53ea\u9700\u5728 CR \u4e2d\u5199\u5165\u4e00\u4e2a\u5373\u53ef\uff0c\u65e0\u9700\u5217\u51fa\u6bcf\u4e2a GPU\u3002

                                                                kubectl -n virtnest-system edit kubevirt kubevirt\n
                                                                spec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    permittedHostDevices: # (1)!\n      mediatedDevices:                    # (2)!\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com/GRID_P4-1Q\n      pciHostDevices:                       # (3)!\n      - externalResourceProvider: true\n        pciVendorSelector: 10DE:1BB3\n        resourceName: nvidia.com/GP104GL_TESLA_P4 \n

                                                                1. \u4e0b\u9762\u662f\u9700\u8981\u586b\u5199\u7684\u4fe1\u606f
                                                                2. vGPU
                                                                3. GPU \u76f4\u901a\uff0c\u4e0a\u9762\u7684\u793a\u4f8b\u4e2d TEESLA P4 \u6709\u4e24\u4e2a GPU\uff0c\u8fd9\u91cc\u53ea\u9700\u8981\u6ce8\u518c\u4e00\u4e2a\u5373\u53ef
                                                              "},{"location":"admin/virtnest/gpu/vm-vgpu.html#yaml-vm-gpu","title":"\u901a\u8fc7 YAML \u521b\u5efa VM \u5e76\u4f7f\u7528 GPU \u52a0\u901f","text":"

                                                              \u4e0e\u666e\u901a\u4e91\u4e3b\u673a\u552f\u4e00\u7684\u533a\u522b\u662f\u5728 devices \u4e2d\u6dfb\u52a0 gpu \u76f8\u5173\u4fe1\u606f\u3002

                                                              \u70b9\u51fb\u67e5\u770b\u5b8c\u6574 YAML
                                                              apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  name: testvm-gpu1\n  namespace: default\nspec:\n  dataVolumeTemplates:\n  - metadata:\n      creationTimestamp: null\n      name: systemdisk-testvm-gpu1\n      namespace: default\n    spec:\n      pvc:\n        accessModes:\n        - ReadWriteOnce\n        resources:\n          requests:\n            storage: 10Gi\n        storageClassName: www\n      source:\n        registry:\n          url: docker://release-ci.daocloud.io/virtnest/system-images/debian-12-x86_64:v1\nrunStrategy: Manual\ntemplate:\n    metadata:\n      creationTimestamp: null\n    spec:\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n          - bootOrder: 1\n            disk:\n              bus: virtio\n            name: systemdisk-testvm-gpu1\n          - disk:\n              bus: virtio\n            name: cloudinitdisk\n        gpus:\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n            name: gpu-0-0\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n          name: gpu-0-1\n        interfaces:\n        - masquerade: {}\n          name: default\n      machine:\n        type: q35\n      resources:\n        requests:\n          memory: 2Gi\n    networks:\n    - name: default\n      pod: {}\n    volumes:\n    - dataVolume:\n        name: systemdisk-testvm-gpu1\n      name: systemdisk-testvm-gpu1\n    - cloudInitNoCloud:\n        userDataBase64: I2Nsb3VkLWNvbmZpZwpzc2hfcHdhdXRoOiB0cnVlCmRpc2FibGVfcm9vdDogZmFsc2UKY2hwYXNzd2Q6IHsibGlzdCI6ICJyb290OmRhbmdlcm91cyIsIGV4cGlyZTogRmFsc2V9CgoKcnVuY21kOgogIC0gc2VkIC1pICIvI1w/UGVybWl0Um9vdExvZ2luL3MvXi4qJC9QZXJtaXRSb290TG9naW4geWVzL2ciIC9ldGMvc3NoL3NzaGRfY29uZmlnCiAgLSBzeXN0ZW1jdGwgcmVzdGFydCBzc2guc2VydmljZQ==\n        name: cloudinitdisk\n
                                                              "},{"location":"admin/virtnest/install/index.html","title":"\u5b89\u88c5\u4e91\u4e3b\u673a\u6a21\u5757","text":"

                                                              \u672c\u9875\u8bf4\u660e\u5982\u4f55\u5b89\u88c5\u4e91\u4e3b\u673a\u6a21\u5757\u3002

                                                              Info

                                                              \u4e0b\u8ff0\u547d\u4ee4\u6216\u811a\u672c\u5185\u51fa\u73b0\u7684 virtnest \u5b57\u6837\u662f\u4e91\u4e3b\u673a\u6a21\u5757\u7684\u5185\u90e8\u5f00\u53d1\u4ee3\u53f7\u3002

                                                              "},{"location":"admin/virtnest/install/index.html#virtnest-helm","title":"\u914d\u7f6e virtnest helm \u4ed3\u5e93","text":"

                                                              helm-charts \u4ed3\u5e93\u5730\u5740\uff1ahttps://release.daocloud.io/harbor/projects/10/helm-charts/virtnest/versions

                                                              helm repo add virtnest-release https://release.daocloud.io/chartrepo/virtnest\nhelm repo update virtnest-release\n

                                                              \u5982\u679c\u60a8\u60f3\u4f53\u9a8c\u6700\u65b0\u5f00\u53d1\u7248\u7684 virtnest\uff0c\u90a3\u4e48\u8bf7\u6dfb\u52a0\u5982\u4e0b\u4ed3\u5e93\u5730\u5740\uff08\u5f00\u53d1\u7248\u672c\u7684 virtnest \u6781\u5176\u4e0d\u7a33\u5b9a\uff09

                                                              helm repo add virtnest-release-ci https://release-ci.daocloud.io/chartrepo/virtnest\nhelm repo update virtnest-release-ci\n
                                                              "},{"location":"admin/virtnest/install/index.html#virtnest","title":"\u9009\u62e9\u60a8\u60f3\u5b89\u88c5\u7684 virtnest \u7248\u672c","text":"

                                                              \u5efa\u8bae\u5b89\u88c5\u6700\u65b0\u7248\u672c\u3002

                                                              [root@master ~]# helm search repo virtnest-release/virtnest --versions\nNAME                   CHART VERSION  APP VERSION  DESCRIPTION\nvirtnest-release/virtnest  0.6.0          v0.6.0       A Helm chart for virtnest\n
                                                              "},{"location":"admin/virtnest/install/index.html#namespace","title":"\u521b\u5efa namespace","text":"
                                                              kubectl create namespace virtnest-system\n
                                                              "},{"location":"admin/virtnest/install/index.html#_2","title":"\u6267\u884c\u5b89\u88c5\u6b65\u9aa4","text":"
                                                              helm install virtnest virtnest-release/virtnest -n virtnest-system --version 0.6.0\n
                                                              "},{"location":"admin/virtnest/install/index.html#_3","title":"\u5347\u7ea7","text":""},{"location":"admin/virtnest/install/index.html#virtnest-helm_1","title":"\u66f4\u65b0 virtnest helm \u4ed3\u5e93","text":"
                                                              helm repo update virtnest-release\n
                                                              "},{"location":"admin/virtnest/install/index.html#-set","title":"\u5907\u4efd --set \u53c2\u6570","text":"

                                                              \u5728\u5347\u7ea7 virtnest \u7248\u672c\u4e4b\u524d\uff0c\u6211\u4eec\u5efa\u8bae\u60a8\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5907\u4efd\u4e0a\u4e00\u4e2a\u7248\u672c\u7684 --set \u53c2\u6570

                                                              helm get values virtnest -n virtnest-system -o yaml > bak.yaml\n
                                                              "},{"location":"admin/virtnest/install/index.html#helm-upgrade","title":"\u6267\u884c helm upgrade","text":"
                                                              helm upgrade virtnest virtnest-release/virtnest \\\n    -n virtnest-system \\\n    -f ./bak.yaml \\\n    --version 0.6.0\n
                                                              "},{"location":"admin/virtnest/install/index.html#_4","title":"\u5378\u8f7d","text":"
                                                              helm delete virtnest -n virtnest-system\n
                                                              "},{"location":"admin/virtnest/install/install-dependency.html","title":"\u5b89\u88c5\u4f9d\u8d56\u548c\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u672c\u9875\u8bf4\u660e\u5b89\u88c5\u4e91\u4e3b\u673a\u6a21\u5757\u7684\u4f9d\u8d56\u548c\u524d\u63d0\u6761\u4ef6\u3002

                                                              Info

                                                              \u4e0b\u8ff0\u547d\u4ee4\u6216\u811a\u672c\u5185\u51fa\u73b0\u7684 virtnest \u5b57\u6837\u662f\u5168\u5c40\u7ba1\u7406\u6a21\u5757\u7684\u5185\u90e8\u5f00\u53d1\u4ee3\u53f7\u3002

                                                              "},{"location":"admin/virtnest/install/install-dependency.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":""},{"location":"admin/virtnest/install/install-dependency.html#411","title":"\u64cd\u4f5c\u7cfb\u7edf\u5185\u6838\u7248\u672c\u9700\u8981\u5728 4.11 \u4ee5\u4e0a","text":"

                                                              \u76ee\u6807\u96c6\u7fa4\u6240\u6709\u8282\u70b9\u7684\u64cd\u4f5c\u7cfb\u7edf\u5185\u6838\u7248\u672c\u9700\u8981\u5927\u4e8e 4.11\uff08\u8be6\u89c1 kubevirt issue\uff09\u3002 \u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b\u5185\u6838\u7248\u672c\uff1a

                                                              uname -a\n

                                                              \u793a\u4f8b\u8f93\u51fa\uff1a

                                                              Linux master 6.5.3-1.el7.elrepo.x86_64 #1 SMP PREEMPT_DYNAMIC Wed Sep 13 11:46:28 EDT 2023 x86_64 x86_64 x86_64 GNU/Linux\n
                                                              "},{"location":"admin/virtnest/install/install-dependency.html#cpu-x86-64-v2","title":"CPU \u9700\u652f\u6301 x86-64-v2 \u53ca\u4ee5\u4e0a\u7684\u6307\u4ee4\u96c6","text":"

                                                              \u4f7f\u7528\u4ee5\u4e0b\u811a\u672c\u68c0\u67e5\u5f53\u524d\u8282\u70b9\u7684 CPU \u662f\u5426\u652f\u6301\uff1a

                                                              Note

                                                              \u82e5\u51fa\u73b0\u4e0e\u8f93\u51fa\u4fe1\u606f\u65e0\u5173\u7684\u62a5\u9519\uff08\u5982\u4e0b\u6240\u793a\uff09\uff0c\u53ef\u65e0\u9700\u5173\u6ce8\uff0c\u4e0d\u5f71\u54cd\u6700\u7ec8\u7ed3\u679c\u3002

                                                              \u793a\u4f8b
                                                              $ sh detect-cpu.sh\ndetect-cpu.sh: line 3: fpu: command not found\n
                                                              cat <<EOF > detect-cpu.sh\n#!/bin/sh -eu\n\nflags=$(cat /proc/cpuinfo | grep flags | head -n 1 | cut -d: -f2)\n\nsupports_v2='awk \"/cx16/&&/lahf/&&/popcnt/&&/sse4_1/&&/sse4_2/&&/ssse3/ {found=1} END {exit !found}\"'\nsupports_v3='awk \"/avx/&&/avx2/&&/bmi1/&&/bmi2/&&/f16c/&&/fma/&&/abm/&&/movbe/&&/xsave/ {found=1} END {exit !found}\"'\nsupports_v4='awk \"/avx512f/&&/avx512bw/&&/avx512cd/&&/avx512dq/&&/avx512vl/ {found=1} END {exit !found}\"'\n\necho \"$flags\" | eval $supports_v2 || exit 2 && echo \"CPU supports x86-64-v2\"\necho \"$flags\" | eval $supports_v3 || exit 3 && echo \"CPU supports x86-64-v3\"\necho \"$flags\" | eval $supports_v4 || exit 4 && echo \"CPU supports x86-64-v4\"\nEOF\nchmod +x detect-cpu.sh\nsh detect-cpu.sh\n
                                                              "},{"location":"admin/virtnest/install/install-dependency.html#_3","title":"\u6240\u6709\u8282\u70b9\u5fc5\u987b\u542f\u7528\u786c\u4ef6\u865a\u62df\u5316\uff08\u5d4c\u5957\u865a\u62df\u5316\uff09","text":"
                                                              • \u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u68c0\u67e5\uff1a

                                                                virt-host-validate qemu\n
                                                                # \u6210\u529f\u7684\u60c5\u51b5\nQEMU: Checking for hardware virtualization                                 : PASS\nQEMU: Checking if device /dev/kvm exists                                   : PASS\nQEMU: Checking if device /dev/kvm is accessible                            : PASS\nQEMU: Checking if device /dev/vhost-net exists                             : PASS\nQEMU: Checking if device /dev/net/tun exists                               : PASS\nQEMU: Checking for cgroup 'cpu' controller support                         : PASS\nQEMU: Checking for cgroup 'cpuacct' controller support                     : PASS\nQEMU: Checking for cgroup 'cpuset' controller support                      : PASS\nQEMU: Checking for cgroup 'memory' controller support                      : PASS\nQEMU: Checking for cgroup 'devices' controller support                     : PASS\nQEMU: Checking for cgroup 'blkio' controller support                       : PASS\nQEMU: Checking for device assignment IOMMU support                         : PASS\nQEMU: Checking if IOMMU is enabled by kernel                               : PASS\nQEMU: Checking for secure guest support                                    : WARN (Unknown if this platform has Secure Guest support)\n\n# \u5931\u8d25\u7684\u60c5\u51b5\nQEMU: Checking for hardware virtualization                                 : FAIL (Only emulated CPUs are available, performance will be significantly limited)\nQEMU: Checking if device /dev/vhost-net exists                             : PASS\nQEMU: Checking if device /dev/net/tun exists                               : PASS\nQEMU: Checking for cgroup 'memory' controller support                      : PASS\nQEMU: Checking for cgroup 'memory' controller mount-point                  : PASS\nQEMU: Checking for cgroup 'cpu' controller support                         : PASS\nQEMU: Checking for cgroup 'cpu' controller mount-point                     : PASS\nQEMU: Checking for cgroup 'cpuacct' controller support                     : PASS\nQEMU: Checking for cgroup 'cpuacct' controller mount-point                 : PASS\nQEMU: Checking for cgroup 'cpuset' controller support                      : PASS\nQEMU: Checking for cgroup 'cpuset' controller mount-point                  : PASS\nQEMU: Checking for cgroup 'devices' controller support                     : PASS\nQEMU: Checking for cgroup 'devices' controller mount-point                 : PASS\nQEMU: Checking for cgroup 'blkio' controller support                       : PASS\nQEMU: Checking for cgroup 'blkio' controller mount-point                   : PASS\nWARN (Unknown if this platform has IOMMU support)\n
                                                              • \u5b89\u88c5 virt-host-validate\uff1a

                                                                \u5728 CentOS \u4e0a\u5b89\u88c5\u5728 Ubuntu \u4e0a\u5b89\u88c5
                                                                yum install -y qemu-kvm libvirt virt-install bridge-utils\n
                                                                apt install qemu-kvm libvirt-daemon-system libvirt-clients bridge-utils\n
                                                              • \u786c\u4ef6\u865a\u62df\u5316\u542f\u7528\u65b9\u6cd5\uff1a

                                                                \u4e0d\u540c\u5e73\u53f0\u542f\u7528\u786c\u4ef6\u865a\u62df\u5316\u7684\u65b9\u6cd5\u4e5f\u4e0d\u4e00\u6837\uff0c\u4ee5 vsphere \u4e3a\u4f8b\uff0c \u65b9\u6cd5\u8bf7\u53c2\u7167 vmware \u5b98\u7f51\u6587\u6863\u3002

                                                              "},{"location":"admin/virtnest/install/install-dependency.html#docker-engine","title":"\u5982\u679c\u4f7f\u7528 Docker Engine \u4f5c\u4e3a\u5bb9\u5668\u8fd0\u884c\u65f6","text":"

                                                              \u5982\u679c\u96c6\u7fa4\u4f7f\u7528 Docker Engine \u4f5c\u4e3a\u5bb9\u5668\u8fd0\u884c\u65f6\uff0c\u5219 Docker Engine \u7248\u672c\u9700\u8981\u5927\u4e8e 20.10.10

                                                              "},{"location":"admin/virtnest/install/install-dependency.html#iommu","title":"\u5efa\u8bae\u5f00\u542f IOMMU","text":"

                                                              \u4e3a\u4e86\u540e\u7eed\u529f\u80fd\u505a\u51c6\u5907\uff0c\u5efa\u8bae\u5f00\u542f IOMMU\u3002

                                                              "},{"location":"admin/virtnest/install/offline-install.html","title":"\u79bb\u7ebf\u5347\u7ea7","text":"

                                                              \u672c\u9875\u8bf4\u660e\u4ece\u4e0b\u8f7d\u4e2d\u5fc3\u4e0b\u8f7d\u4e91\u4e3b\u673a\u6a21\u5757\u540e\uff0c\u5e94\u8be5\u5982\u4f55\u5b89\u88c5\u6216\u5347\u7ea7\u3002

                                                              Info

                                                              \u4e0b\u8ff0\u547d\u4ee4\u6216\u811a\u672c\u5185\u51fa\u73b0\u7684 virtnest \u5b57\u6837\u662f\u4e91\u4e3b\u673a\u6a21\u5757\u7684\u5185\u90e8\u5f00\u53d1\u4ee3\u53f7\u3002

                                                              "},{"location":"admin/virtnest/install/offline-install.html#_2","title":"\u4ece\u5b89\u88c5\u5305\u4e2d\u52a0\u8f7d\u955c\u50cf","text":"

                                                              \u60a8\u53ef\u4ee5\u6839\u636e\u4e0b\u9762\u4e24\u79cd\u65b9\u5f0f\u4e4b\u4e00\u52a0\u8f7d\u955c\u50cf\uff0c\u5f53\u73af\u5883\u4e2d\u5b58\u5728\u955c\u50cf\u4ed3\u5e93\u65f6\uff0c\u5efa\u8bae\u9009\u62e9chart-syncer\u540c\u6b65\u955c\u50cf\u5230\u955c\u50cf\u4ed3\u5e93\uff0c\u8be5\u65b9\u6cd5\u66f4\u52a0\u9ad8\u6548\u4fbf\u6377\u3002

                                                              "},{"location":"admin/virtnest/install/offline-install.html#chart-syncer","title":"chart-syncer \u540c\u6b65\u955c\u50cf\u5230\u955c\u50cf\u4ed3\u5e93","text":"
                                                              1. \u521b\u5efa load-image.yaml

                                                                Note

                                                                \u8be5 YAML \u6587\u4ef6\u4e2d\u7684\u5404\u9879\u53c2\u6570\u5747\u4e3a\u5fc5\u586b\u9879\u3002\u60a8\u9700\u8981\u4e00\u4e2a\u79c1\u6709\u7684\u955c\u50cf\u4ed3\u5e93\uff0c\u5e76\u4fee\u6539\u76f8\u5173\u914d\u7f6e\u3002

                                                                \u5df2\u5b89\u88c5 chart repo\u672a\u5b89\u88c5 chart repo

                                                                \u82e5\u5f53\u524d\u73af\u5883\u5df2\u5b89\u88c5 chart repo\uff0cchart-syncer \u4e5f\u652f\u6301\u5c06 chart \u5bfc\u51fa\u4e3a tgz \u6587\u4ef6\u3002

                                                                load-image.yaml
                                                                source:\n  intermediateBundlesPath: virtnest-offline # \u5230\u6267\u884c charts-syncer \u547d\u4ee4\u7684\u76f8\u5bf9\u8def\u5f84\uff0c\u800c\u4e0d\u662f\u6b64 YAML \u6587\u4ef6\u548c\u79bb\u7ebf\u5305\u4e4b\u95f4\u7684\u76f8\u5bf9\u8def\u5f84\ntarget:\n  containerRegistry: 10.16.10.111 # \u9700\u66f4\u6539\u4e3a\u4f60\u7684\u955c\u50cf\u4ed3\u5e93 url\n  containerRepository: release.daocloud.io/virtnest # \u9700\u66f4\u6539\u4e3a\u4f60\u7684\u955c\u50cf\u4ed3\u5e93\n  repo:\n    kind: HARBOR # \u4e5f\u53ef\u4ee5\u662f\u4efb\u4f55\u5176\u4ed6\u652f\u6301\u7684 Helm Chart \u4ed3\u5e93\u7c7b\u522b\n    url: http://10.16.10.111/chartrepo/release.daocloud.io # \u9700\u66f4\u6539\u4e3a chart repo url\n    auth:\n      username: \"admin\" # \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u7528\u6237\u540d\n      password: \"Harbor12345\" # \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u5bc6\u7801\n  containers:\n    auth:\n      username: \"admin\" # \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u7528\u6237\u540d\n      password: \"Harbor12345\" # \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u5bc6\u7801\n

                                                                \u82e5\u5f53\u524d\u73af\u5883\u672a\u5b89\u88c5 chart repo\uff0cchart-syncer \u4e5f\u652f\u6301\u5c06 chart \u5bfc\u51fa\u4e3a tgz \u6587\u4ef6\uff0c\u5e76\u5b58\u653e\u5728\u6307\u5b9a\u8def\u5f84\u3002

                                                                load-image.yaml
                                                                source:\n  intermediateBundlesPath: virtnest-offline # \u5230\u6267\u884c charts-syncer \u547d\u4ee4\u7684\u76f8\u5bf9\u8def\u5f84\uff0c\u800c\u4e0d\u662f\u6b64 YAML \u6587\u4ef6\u548c\u79bb\u7ebf\u5305\u4e4b\u95f4\u7684\u76f8\u5bf9\u8def\u5f84\ntarget:\n  containerRegistry: 10.16.10.111 # \u9700\u66f4\u6539\u4e3a\u4f60\u7684\u955c\u50cf\u4ed3\u5e93 url\n  containerRepository: release.daocloud.io/virtnest # \u9700\u66f4\u6539\u4e3a\u4f60\u7684\u955c\u50cf\u4ed3\u5e93\n  repo:\n    kind: LOCAL\n    path: ./local-repo # chart \u672c\u5730\u8def\u5f84\n  containers:\n    auth:\n      username: \"admin\" # \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u7528\u6237\u540d\n      password: \"Harbor12345\" # \u4f60\u7684\u955c\u50cf\u4ed3\u5e93\u5bc6\u7801\n
                                                              2. \u6267\u884c\u540c\u6b65\u955c\u50cf\u547d\u4ee4\u3002

                                                                charts-syncer sync --config load-image.yaml\n
                                                              "},{"location":"admin/virtnest/install/offline-install.html#docker-containerd","title":"Docker \u6216 containerd \u76f4\u63a5\u52a0\u8f7d","text":"

                                                              \u89e3\u538b\u5e76\u52a0\u8f7d\u955c\u50cf\u6587\u4ef6\u3002

                                                              1. \u89e3\u538b tar \u538b\u7f29\u5305\u3002

                                                                tar xvf virtnest.bundle.tar\n

                                                                \u89e3\u538b\u6210\u529f\u540e\u4f1a\u5f97\u5230 3 \u4e2a\u6587\u4ef6\uff1a

                                                                • hints.yaml
                                                                • images.tar
                                                                • original-chart
                                                              2. \u4ece\u672c\u5730\u52a0\u8f7d\u955c\u50cf\u5230 Docker \u6216 containerd\u3002

                                                                Dockercontainerd
                                                                docker load -i images.tar\n
                                                                ctr -n k8s.io image import images.tar\n

                                                              Note

                                                              \u6bcf\u4e2a node \u90fd\u9700\u8981\u505a Docker \u6216 containerd \u52a0\u8f7d\u955c\u50cf\u64cd\u4f5c\uff0c \u52a0\u8f7d\u5b8c\u6210\u540e\u9700\u8981 tag \u955c\u50cf\uff0c\u4fdd\u6301 Registry\u3001Repository \u4e0e\u5b89\u88c5\u65f6\u4e00\u81f4\u3002

                                                              "},{"location":"admin/virtnest/install/offline-install.html#_3","title":"\u5347\u7ea7","text":"

                                                              \u6709\u4e24\u79cd\u5347\u7ea7\u65b9\u5f0f\u3002\u60a8\u53ef\u4ee5\u6839\u636e\u524d\u7f6e\u64cd\u4f5c\uff0c\u9009\u62e9\u5bf9\u5e94\u7684\u5347\u7ea7\u65b9\u6848\uff1a

                                                              \u901a\u8fc7 helm repo \u5347\u7ea7\u901a\u8fc7 chart \u5305\u5347\u7ea7
                                                              1. \u68c0\u67e5\u4e91\u4e3b\u673a helm \u4ed3\u5e93\u662f\u5426\u5b58\u5728\u3002

                                                                helm repo list | grep virtnest\n

                                                                \u82e5\u8fd4\u56de\u7ed3\u679c\u4e3a\u7a7a\u6216\u5982\u4e0b\u63d0\u793a\uff0c\u5219\u8fdb\u884c\u4e0b\u4e00\u6b65\uff1b\u53cd\u4e4b\u5219\u8df3\u8fc7\u4e0b\u4e00\u6b65\u3002

                                                                Error: no repositories to show\n
                                                              2. \u6dfb\u52a0\u4e91\u4e3b\u673a\u7684 helm \u4ed3\u5e93\u3002

                                                                helm repo add virtnest http://{harbor url}/chartrepo/{project}\n
                                                              3. \u66f4\u65b0\u4e91\u4e3b\u673a\u7684 helm \u4ed3\u5e93\u3002

                                                                helm repo update virtnest # (1)\n
                                                                1. helm \u7248\u672c\u8fc7\u4f4e\u4f1a\u5bfc\u81f4\u5931\u8d25\uff0c\u82e5\u5931\u8d25\uff0c\u8bf7\u5c1d\u8bd5\u6267\u884c helm update repo
                                                              4. \u9009\u62e9\u60a8\u60f3\u5b89\u88c5\u7684\u4e91\u4e3b\u673a\u7248\u672c\uff08\u5efa\u8bae\u5b89\u88c5\u6700\u65b0\u7248\u672c\uff09\u3002

                                                                helm search repo virtnest/virtnest --versions\n
                                                                [root@master ~]# helm search repo virtnest/virtnest --versions\nNAME                   CHART VERSION  APP VERSION  DESCRIPTION\nvirtnest/virtnest  0.2.0          v0.2.0       A Helm chart for virtnest\n...\n
                                                              5. \u5907\u4efd --set \u53c2\u6570\u3002

                                                                \u5728\u5347\u7ea7\u4e91\u4e3b\u673a\u7248\u672c\u4e4b\u524d\uff0c\u5efa\u8bae\u60a8\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5907\u4efd\u8001\u7248\u672c\u7684 --set \u53c2\u6570\u3002

                                                                helm get values virtnest -n virtnest-system -o yaml > bak.yaml\n
                                                              6. \u66f4\u65b0 virtnest crds

                                                                helm pull virtnest/virtnest --version 0.2.0 && tar -zxf virtnest-0.2.0.tgz\nkubectl apply -f virtnest/crds\n
                                                              7. \u6267\u884c helm upgrade\u3002

                                                                \u5347\u7ea7\u524d\u5efa\u8bae\u60a8\u8986\u76d6 bak.yaml \u4e2d\u7684 global.imageRegistry \u5b57\u6bb5\u4e3a\u5f53\u524d\u4f7f\u7528\u7684\u955c\u50cf\u4ed3\u5e93\u5730\u5740\u3002

                                                                export imageRegistry={\u4f60\u7684\u955c\u50cf\u4ed3\u5e93}\n
                                                                helm upgrade virtnest virtnest/virtnest \\\n  -n virtnest-system \\\n  -f ./bak.yaml \\\n  --set global.imageRegistry=$imageRegistry \\\n  --version 0.2.0\n
                                                              1. \u5907\u4efd --set \u53c2\u6570\u3002

                                                                \u5728\u5347\u7ea7\u4e91\u4e3b\u673a\u7248\u672c\u4e4b\u524d\uff0c\u5efa\u8bae\u60a8\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5907\u4efd\u8001\u7248\u672c\u7684 --set \u53c2\u6570\u3002

                                                                helm get values virtnest -n virtnest-system -o yaml > bak.yaml\n
                                                              2. \u66f4\u65b0 virtnest crds

                                                                kubectl apply -f ./crds\n
                                                              3. \u6267\u884c helm upgrade\u3002

                                                                \u5347\u7ea7\u524d\u5efa\u8bae\u60a8\u8986\u76d6 bak.yaml \u4e2d\u7684 global.imageRegistry \u4e3a\u5f53\u524d\u4f7f\u7528\u7684\u955c\u50cf\u4ed3\u5e93\u5730\u5740\u3002

                                                                export imageRegistry={\u4f60\u7684\u955c\u50cf\u4ed3\u5e93}\n
                                                                helm upgrade virtnest . \\\n  -n virtnest-system \\\n  -f ./bak.yaml \\\n  --set global.imageRegistry=$imageRegistry\n
                                                              "},{"location":"admin/virtnest/install/virtnest-agent.html","title":"\u5b89\u88c5 virtnest-agent","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u5728\u6307\u5b9a\u96c6\u7fa4\u5185\u5b89\u88c5 virtnest-agent\u3002

                                                              "},{"location":"admin/virtnest/install/virtnest-agent.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u5b89\u88c5 virtnest-agent \u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u64cd\u4f5c\u7cfb\u7edf\u5185\u6838\u7248\u672c\u9700\u8981\u5728 v4.11 \u4ee5\u4e0a\u3002
                                                              "},{"location":"admin/virtnest/install/virtnest-agent.html#_2","title":"\u5b89\u88c5\u6b65\u9aa4","text":"

                                                              \u521d\u59cb\u96c6\u7fa4\u9700\u8981\u5728 Helm \u4e2d\u5b89\u88c5 virtnest-agent \u7ec4\u4ef6\u540e\u65b9\u53ef\u4f7f\u7528\u4e91\u4e3b\u673a\u7684\u76f8\u5173\u80fd\u529b\u3002

                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u82e5\u672a\u5b89\u88c5 virtnest-agent \u7ec4\u4ef6\uff0c\u5219\u65e0\u6cd5\u6b63\u5e38\u4f7f\u7528\u4e91\u4e3b\u673a\u80fd\u529b\u3002\u5c06\u63d0\u9192\u7528\u6237\u5728\u6240\u9700\u96c6\u7fa4\u5185\u8fdb\u884c\u5b89\u88c5\u3002

                                                              2. \u9009\u62e9\u6240\u9700\u96c6\u7fa4\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 Helm \u5e94\u7528 \uff0c\u7136\u540e\u70b9\u51fb Helm \u6a21\u677f \uff0c\u67e5\u770b\u6a21\u677f\u5217\u8868\u3002

                                                              3. \u641c\u7d22 virtnest-agent \u7ec4\u4ef6\uff0c\u8fdb\u5165\u7ec4\u4ef6\u8be6\u60c5\uff0c\u9009\u62e9\u5408\u9002\u7248\u672c\uff0c\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\uff0c\u8fdb\u884c\u5b89\u88c5\u3002

                                                              4. \u8fdb\u5165\u5b89\u88c5\u8868\u5355\u9875\u9762\uff0c\u586b\u5199\u57fa\u672c\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \uff0c\u5b89\u88c5\u5b8c\u6210\u3002

                                                              5. \u91cd\u65b0\u70b9\u51fb \u4e91\u4e3b\u673a \u5bfc\u822a\u680f\uff0c\u6210\u529f\u51fa\u73b0\u4e91\u4e3b\u673a\u5217\u8868\uff0c\u53ef\u4ee5\u6b63\u5e38\u4f7f\u7528\u4e91\u4e3b\u673a\u80fd\u529b\u3002

                                                              "},{"location":"admin/virtnest/quickstart/index.html","title":"\u521b\u5efa\u4e91\u4e3b\u673a","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u4e91\u4e3b\u673a\u3002

                                                              \u4e91\u4e3b\u673a\u57fa\u4e8e KubeVirt \u6280\u672f\u5c06\u4e91\u4e3b\u673a\u4f5c\u4e3a\u4e91\u539f\u751f\u5e94\u7528\u8fdb\u884c\u7ba1\u7406\uff0c\u4e0e\u5bb9\u5668\u65e0\u7f1d\u5730\u8854\u63a5\u5728\u4e00\u8d77\uff0c \u4f7f\u7528\u6237\u80fd\u591f\u8f7b\u677e\u5730\u90e8\u7f72\u4e91\u4e3b\u673a\u5e94\u7528\uff0c\u4eab\u53d7\u4e0e\u5bb9\u5668\u5e94\u7528\u4e00\u81f4\u7684\u4e1d\u6ed1\u4f53\u9a8c\u3002

                                                              "},{"location":"admin/virtnest/quickstart/index.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u521b\u5efa\u4e91\u4e3b\u673a\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u5411\u7528\u6237\u673a\u64cd\u4f5c\u7cfb\u7edf\u516c\u5f00\u786c\u4ef6\u8f85\u52a9\u7684\u865a\u62df\u5316\u3002
                                                              • \u5728\u6307\u5b9a\u96c6\u7fa4\u5b89\u88c5 virtnest-agent\uff0c\u64cd\u4f5c\u7cfb\u7edf\u5185\u6838\u7248\u672c\u9700\u8981\u5728 3.15 \u4ee5\u4e0a\u3002
                                                              • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u3002
                                                              • \u63d0\u524d\u51c6\u5907\u597d\u955c\u50cf\uff0c\u5e73\u53f0\u5185\u7f6e\u4e09\u79cd\u955c\u50cf (\u5982\u4e0b\u6587\u6240\u793a)\uff0c\u5982\u9700\u5236\u4f5c\u955c\u50cf\uff0c\u53ef\u53c2\u8003\u5f00\u6e90\u9879\u76ee\u5236\u4f5c\u955c\u50cf\u3002
                                                              • \u8fdb\u884c\u7f51\u7edc\u914d\u7f6e\u65f6\uff0c\u82e5\u9009\u62e9\u4f7f\u7528 Passt \u7f51\u7edc\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u5347\u7ea7\u81f3 0.4.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u3002
                                                              "},{"location":"admin/virtnest/quickstart/index.html#_3","title":"\u955c\u50cf\u521b\u5efa","text":"

                                                              \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u4e91\u4e3b\u673a\u3002

                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u8fdb\u5165 \u4e91\u4e3b\u673a \u9875\u9762\u3002

                                                              2. \u5728\u4e91\u4e3b\u673a\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb \u521b\u5efa\u4e91\u4e3b\u673a -> \u9009\u62e9 \u901a\u8fc7\u955c\u50cf\u521b\u5efa \u3002

                                                              3. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u9875\u9762\uff0c\u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u955c\u50cf\u914d\u7f6e\u3001\u5b58\u50a8\u4e0e\u7f51\u7edc\u3001\u767b\u5f55\u8bbe\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                                                                \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de\u4e91\u4e3b\u673a\u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u4e91\u4e3b\u673a\u6267\u884c\u5173\u673a/\u5f00\u542f\u3001\u91cd\u542f\u3001\u514b\u9686\u3001\u66f4\u65b0\u3001\u521b\u5efa\u5feb\u7167\u3001\u63a7\u5236\u53f0\u8bbf\u95ee\uff08VNC\uff09\u3001\u5220\u9664\u7b49\u64cd\u4f5c\u3002 \u514b\u9686\u548c\u5feb\u7167\u80fd\u529b\u4f9d\u8d56\u4e8e\u5b58\u50a8\u6c60\u7684\u9009\u62e9\u3002

                                                              "},{"location":"admin/virtnest/quickstart/index.html#_4","title":"\u57fa\u672c\u4fe1\u606f","text":"

                                                              \u5728 \u521b\u5efa\u4e91\u4e3b\u673a \u9875\u9762\u4e2d\uff0c\u6839\u636e\u4e0b\u8868\u8f93\u5165\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                              • \u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002 \u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u540d\u79f0\u5728\u4e91\u4e3b\u673a\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                                              • \u522b\u540d\uff1a\u5141\u8bb8\u4efb\u4f55\u5b57\u7b26\uff0c\u6700\u957f 60 \u4e2a\u5b57\u7b26\u3002
                                                              • \u96c6\u7fa4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u4e91\u4e3b\u673a\u90e8\u7f72\u5728\u54ea\u4e2a\u96c6\u7fa4\u5185\uff0c\u82e5\u6709\u4f7f\u7528 GPU \u80fd\u529b\u7684\u9700\u6c42\uff0c\u5219\u9700\u8981\u9009\u62e9\u6709 GPU/vGPU \u5361\u7684\u96c6\u7fa4\u3002
                                                              • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u4e91\u4e3b\u673a\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\u3002 \u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                                              • \u6807\u7b7e/\u6ce8\u89e3\uff1a\u9009\u62e9\u4e3a\u4e91\u4e3b\u673a\u6dfb\u52a0\u6240\u9700\u7684\u6807\u7b7e/\u6ce8\u89e3\u4fe1\u606f\u3002
                                                              "},{"location":"admin/virtnest/quickstart/index.html#_5","title":"\u955c\u50cf\u914d\u7f6e","text":"

                                                              \u6839\u636e\u4e0b\u8868\u586b\u5199\u955c\u50cf\u76f8\u5173\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65

                                                              1. \u955c\u50cf\u6765\u6e90\uff1a\u652f\u6301\u4e09\u79cd\u7c7b\u578b\u7684\u6765\u6e90\u3002

                                                                • \u955c\u50cf\u4ed3\u5e93\u7c7b\u578b\uff1a\u955c\u50cf\u5b58\u50a8\u5728\u5bb9\u5668\u955c\u50cf\u4ed3\u5e93\u4e2d\uff0c\u652f\u6301\u4ece\u955c\u50cf\u4ed3\u5e93\u4e2d\u6309\u9700\u9009\u62e9\u955c\u50cf\uff1b
                                                                • HTTP \u7c7b\u578b\uff1a\u955c\u50cf\u5b58\u50a8\u4e8e HTTP \u534f\u8bae\u7684\u6587\u4ef6\u670d\u52a1\u5668\u4e2d\uff0c\u652f\u6301 HTTPS://\u548c HTTP://\u524d\u7f00\uff1b
                                                                • \u5bf9\u8c61\u5b58\u50a8\uff08S3\uff09\uff1a\u652f\u6301\u901a\u8fc7\u5bf9\u8c61\u5b58\u50a8\u534f\u8bae (S3) \u83b7\u53d6\u7684\u4e91\u4e3b\u673a\u955c\u50cf\uff0c\u82e5\u662f\u65e0\u9700\u8ba4\u8bc1\u7684\u5bf9\u8c61\u5b58\u50a8\u6587\u4ef6\uff0c\u8bf7\u4f7f\u7528 HTTP \u6765\u6e90\u3002
                                                              2. \u4ee5\u4e0b\u662f\u5e73\u53f0\u5185\u7f6e\u7684\u955c\u50cf\u4fe1\u606f\uff0c\u5305\u62ec\u64cd\u4f5c\u7cfb\u7edf\u548c\u7248\u672c\u3001\u955c\u50cf\u5730\u5740\u3002\u540c\u65f6\u4e5f\u652f\u6301\u81ea\u5b9a\u4e49\u4e91\u4e3b\u673a\u955c\u50cf\u3002

                                                                \u64cd\u4f5c\u7cfb\u7edf \u5bf9\u5e94\u7248\u672c \u955c\u50cf\u5730\u5740 CentOS CentOS 7.9 release-ci.daocloud.io/virtnest/system-images/centos-7.9-x86_64:v1 Ubuntu Ubuntu 22.04 release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1 Debian Debian 12 release-ci.daocloud.io/virtnest/system-images/debian-12-x86_64:v1
                                                              3. \u955c\u50cf\u5bc6\u94a5\uff1a\u4ec5\u652f\u6301\u9ed8\u8ba4\uff08Opaque\uff09\u7c7b\u578b\u5bc6\u94a5\uff0c\u5177\u4f53\u683c\u5f0f\u8bf7\u53c2\u8003\u521b\u5efa\u5bc6\u94a5\u3002

                                                                \u5e73\u53f0\u5185\u7f6e\u955c\u50cf\u5b58\u50a8\u5728\u70b9\u706b\u96c6\u7fa4\u4e2d\uff0c\u800c\u70b9\u706b\u96c6\u7fa4\u7684\u955c\u50cf\u4ed3\u5e93\u672a\u52a0\u5bc6\uff0c\u56e0\u6b64\u5f53\u9009\u62e9\u5185\u7f6e\u955c\u50cf\u65f6\uff0c\u65e0\u9700\u9009\u62e9\u5bc6\u94a5\u3002

                                                              Note

                                                              CPU \u548c\u5185\u5b58\u7684\u70ed\u52a0\u8f7d\u914d\u7f6e\u8981\u6c42\uff1avirtnest \u7684\u7248\u672c\u4e0d\u4f4e\u4e8e v0.10.0\uff0c\u5e76\u4e14 virtnest-agent \u7248\u672c\u4e0d\u4f4e\u4e8e v0.7.0\uff1b\u652f\u6301\u5b9e\u65f6\u8fc1\u79fb\uff08\u786e\u4fdd PVC \u8bbf\u95ee\u6a21\u5f0f\u4e3a ReadWriteMany\uff09\u3002

                                                              1. \u8d44\u6e90\u914d\u7f6e\uff1aCPU \u5efa\u8bae\u4f7f\u7528\u6574\u6570\uff0c\u82e5\u586b\u5199\u5c0f\u6570\u5219\u4f1a\u5411\u4e0a\u53d6\u6574\u3002\u652f\u6301 CPU\u3001\u5185\u5b58\u7684\u70ed\u52a0\u8f7d\u3002

                                                              2. GPU \u914d\u7f6e\uff1a\u542f\u7528 GPU \u529f\u80fd\u9700\u8981\u9700\u8981\u6ee1\u8db3\u524d\u63d0\u6761\u4ef6\uff0c\u5177\u4f53\u53ef\u53c2\u8003 \u4e91\u4e3b\u673a\u914d\u7f6e GPU\uff08Nvidia)\u3002 \u4e91\u4e3b\u673a\u652f\u6301 Nvidia\u2014GPU \u548c Nvidia\u2014vGPU \u4e24\u79cd\u7c7b\u578b\uff0c\u9009\u62e9\u6240\u9700\u7c7b\u578b\u540e\uff0c\u9700\u8981\u9009\u62e9\u5bf9\u5e94\u7684 GPU \u578b\u53f7\u548c\u5361\u7684\u6570\u91cf\u3002

                                                              "},{"location":"admin/virtnest/quickstart/index.html#_6","title":"\u5b58\u50a8\u4e0e\u7f51\u7edc\u914d\u7f6e","text":"
                                                              • \u5b58\u50a8\uff1a

                                                                • \u5b58\u50a8\u548c\u4e91\u4e3b\u673a\u7684\u529f\u80fd\u606f\u606f\u76f8\u5173\uff0c\u4e3b\u8981\u662f\u901a\u8fc7\u4f7f\u7528 Kubernetes \u7684\u6301\u4e45\u5377\u548c\u5b58\u50a8\u7c7b\uff0c\u63d0\u4f9b\u4e86\u7075\u6d3b\u4e14\u53ef\u6269\u5c55\u7684\u4e91\u4e3b\u673a\u5b58\u50a8\u80fd\u529b\u3002\u6bd4\u5982\u4e91\u4e3b\u673a\u955c\u50cf\u5b58\u50a8\u5728 pvc \u91cc\uff0c\u652f\u6301\u548c\u5176\u4ed6\u6570\u636e\u4e00\u8d77\u514b\u9686\u3001\u5feb\u7167\u7b49\u3002

                                                                • \u7cfb\u7edf\u76d8\uff1a\u7cfb\u7edf\u9ed8\u8ba4\u521b\u5efa\u4e00\u4e2a VirtIO \u7c7b\u578b\u7684 rootfs \u7cfb\u7edf\u76d8\uff0c\u7528\u4e8e\u5b58\u653e\u64cd\u4f5c\u7cfb\u7edf\u548c\u6570\u636e\u3002

                                                                • \u6570\u636e\u76d8\uff1a\u6570\u636e\u76d8\u662f\u4e91\u4e3b\u673a\u4e2d\u7528\u4e8e\u5b58\u50a8\u7528\u6237\u6570\u636e\u3001\u5e94\u7528\u7a0b\u5e8f\u6570\u636e\u6216\u5176\u4ed6\u975e\u64cd\u4f5c\u7cfb\u7edf\u76f8\u5173\u6587\u4ef6\u7684\u5b58\u50a8\u8bbe\u5907\u3002\u4e0e\u7cfb\u7edf\u76d8\u76f8\u6bd4\uff0c\u6570\u636e\u76d8\u662f\u975e\u5fc5\u9009\u7684\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u8981\u52a8\u6001\u6dfb\u52a0\u6216\u79fb\u9664\u3002\u6570\u636e\u76d8\u7684\u5bb9\u91cf\u4e5f\u53ef\u4ee5\u6839\u636e\u9700\u6c42\u8fdb\u884c\u7075\u6d3b\u914d\u7f6e\u3002

                                                                • \u9ed8\u8ba4\u4f7f\u7528\u5757\u5b58\u50a8\u3002\u5982\u679c\u9700\u8981\u4f7f\u7528\u514b\u9686\u548c\u5feb\u7167\u529f\u80fd\uff0c\u8bf7\u786e\u4fdd\u60a8\u7684\u5b58\u50a8\u6c60\u5df2\u7ecf\u521b\u5efa\u4e86\u5bf9\u5e94\u7684 VolumeSnapshotClass\uff0c \u53ef\u4ee5\u53c2\u8003\u4ee5\u4e0b\u793a\u4f8b\u3002\u5982\u679c\u9700\u8981\u4f7f\u7528\u5b9e\u65f6\u8fc1\u79fb\u529f\u80fd\uff0c\u8bf7\u786e\u4fdd\u60a8\u7684\u5b58\u50a8\u652f\u6301\u5e76\u9009\u62e9\u4e86 ReadWriteMany \u7684\u8bbf\u95ee\u6a21\u5f0f \u3002

                                                                  \u5927\u591a\u6570\u60c5\u51b5\u4e0b\uff0c\u5b58\u50a8\u5728\u5b89\u88c5\u8fc7\u7a0b\u4e2d\u4e0d\u4f1a\u81ea\u52a8\u521b\u5efa\u8fd9\u6837\u7684 VolumeSnapshotClass\uff0c\u56e0\u6b64\u60a8\u9700\u8981\u624b\u52a8\u521b\u5efa VolumeSnapshotClass\u3002 \u4ee5\u4e0b\u662f\u4e00\u4e2a HwameiStor \u521b\u5efa VolumeSnapshotClass \u7684\u793a\u4f8b\uff1a

                                                                  kind: VolumeSnapshotClass\napiVersion: snapshot.storage.k8s.io/v1\nmetadata:\n  name: hwameistor-storage-lvm-snapshot\n  annotations:\n    snapshot.storage.kubernetes.io/is-default-class: \"true\"\nparameters:\n  snapsize: \"1073741824\"\ndriver: lvm.hwameistor.io\ndeletionPolicy: Delete\n
                                                                • \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u68c0\u67e5 VolumeSnapshotClass \u662f\u5426\u521b\u5efa\u6210\u529f\u3002

                                                                  kubectl get VolumeSnapshotClass\n
                                                                • \u67e5\u770b\u5df2\u521b\u5efa\u7684 Snapshotclass\uff0c\u5e76\u4e14\u786e\u8ba4 provisioner \u5c5e\u6027\u540c\u5b58\u50a8\u6c60\u4e2d\u7684 Driver \u5c5e\u6027\u4e00\u81f4\u3002

                                                              • \u7f51\u7edc\uff1a

                                                                • \u7f51\u7edc\u914d\u7f6e\u53ef\u4ee5\u6839\u636e\u8868\u683c\u4fe1\u606f\u6309\u9700\u7ec4\u5408\u3002

                                                                  \u7f51\u7edc\u6a21\u5f0f CNI \u662f\u5426\u5b89\u88c5 Spiderpool \u7f51\u5361\u6a21\u5f0f \u56fa\u5b9a IP \u5b9e\u65f6\u8fc1\u79fb Masquerade\uff08NAT\uff09 Calico \u274c \u5355\u7f51\u5361 \u274c \u2705 Cilium \u274c \u5355\u7f51\u5361 \u274c \u2705 Flannel \u274c \u5355\u7f51\u5361 \u274c \u2705 Bridge\uff08\u6865\u63a5\uff09 OVS \u2705 \u591a\u7f51\u5361 \u2705 \u2705

                                                                • \u7f51\u7edc\u6a21\u5f0f\u5206\u4e3a Masquerade\uff08NAT\uff09\u548c Bridge\uff08\u6865\u63a5\uff09\uff0cBridge\uff08\u6865\u63a5\uff09\u6a21\u5f0f\u9700\u8981\u5b89\u88c5\u4e86 Spiderpool \u7ec4\u4ef6\u540e\u65b9\u53ef\u4f7f\u7528\u3002

                                                                  • \u9ed8\u8ba4\u9009\u62e9 Masquerade\uff08NAT\uff09\u7684\u7f51\u7edc\u6a21\u5f0f\uff0c\u4f7f\u7528 eth0 \u9ed8\u8ba4\u7f51\u5361\u3002
                                                                  • \u82e5\u96c6\u7fa4\u5185\u5b89\u88c5\u4e86 spiderpool \u7ec4\u4ef6\uff0c\u5219\u652f\u6301\u9009\u62e9 Bridge\uff08\u6865\u63a5\uff09\u6a21\u5f0f\uff0cBridge\uff08\u6865\u63a5\uff09\u6a21\u5f0f\u652f\u6301\u591a\u7f51\u5361\u5f62\u5f0f\u3002

                                                                • \u6dfb\u52a0\u7f51\u5361

                                                                  • Passt\uff08\u76f4\u901a\uff09/Bridge\uff08\u6865\u63a5\uff09\u6a21\u5f0f\u4e0b\u652f\u6301\u624b\u52a8\u6dfb\u52a0\u7f51\u5361\u3002\u70b9\u51fb \u6dfb\u52a0\u7f51\u5361 \uff0c\u8fdb\u884c\u7f51\u5361 IP \u6c60\u7684\u914d\u7f6e\u3002\u9009\u62e9\u548c\u7f51\u7edc\u6a21\u5f0f\u5339\u914d\u7684 Multus CR\uff0c\u82e5\u6ca1\u6709\u5219\u9700\u8981\u81ea\u884c\u521b\u5efa\u3002
                                                                  • \u82e5\u6253\u5f00 \u4f7f\u7528\u9ed8\u8ba4 IP \u6c60 \u5f00\u5173\uff0c\u5219\u4f7f\u7528 multus CR \u914d\u7f6e\u4e2d\u7684\u9ed8\u8ba4 IP \u6c60\u3002\u82e5\u5173\u95ed\u5f00\u5173\uff0c\u5219\u624b\u52a8\u9009\u62e9 IP \u6c60\u3002

                                                              "},{"location":"admin/virtnest/quickstart/index.html#_7","title":"\u767b\u5f55\u8bbe\u7f6e","text":"
                                                              • \u7528\u6237\u540d/\u5bc6\u7801\uff1a\u53ef\u4ee5\u901a\u8fc7\u7528\u6237\u540d\u548c\u5bc6\u7801\u767b\u5f55\u81f3\u4e91\u4e3b\u673a\u3002
                                                              • SSH\uff1a\u9009\u62e9 SSH \u767b\u5f55\u65b9\u5f0f\u65f6\u53ef\u4e3a\u4e91\u4e3b\u673a\u7ed1\u5b9a SSH \u5bc6\u94a5\uff0c\u7528\u4e8e\u65e5\u540e\u767b\u5f55\u4e91\u4e3b\u673a\u3002
                                                              "},{"location":"admin/virtnest/quickstart/index.html#yaml","title":"YAML \u521b\u5efa","text":"

                                                              \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u4e91\u4e3b\u673a\u3002

                                                              \u8fdb\u5165\u4e91\u4e3b\u673a\u5217\u8868\u9875\uff0c\u70b9\u51fb \u901a\u8fc7 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                              \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u4e91\u4e3b\u673a\u7684 YAML \u793a\u4f8b
                                                              apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  name: demo\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: systemdisk-demo\n        namespace: default\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Gi\n          storageClassName: hwameistor-storage-lvm-hdd\n        source:\n          registry:\n            url: >-\n              docker://release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  runStrategy: Always\n  template:\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio\n              name: systemdisk-demo\n            - disk:\n                bus: virtio\n              name: cloudinitdisk\n          interfaces:\n            - masquerade: {}\n              name: default\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 2Gi\n      networks:\n        - name: default\n          pod: {}\n      volumes:\n        - dataVolume:\n            name: systemdisk-demo\n          name: systemdisk-demo\n        - cloudInitNoCloud:\n            userDataBase64: >-\n              I2Nsb3VkLWNvbmZpZwpzc2hfcHdhdXRoOiB0cnVlCmRpc2FibGVfcm9vdDogZmFsc2UKY2hwYXNzd2Q6IHsibGlzdCI6ICJyb290OjEyMzQ1NiIsIGV4cGlyZTogRmFsc2V9CgoKcnVuY21kOgogIC0gc2VkIC1pICIvI1w/UGVybWl0Um9vdExvZ2luL3MvXi4qJC9QZXJtaXRSb290TG9naW4geWVzL2ciIC9ldGMvc3NoL3NzaGRfY29uZmlnCiAgLSBzeXN0ZW1jdGwgcmVzdGFydCBzc2guc2VydmljZQ==\n          name: cloudinitdisk\n
                                                              "},{"location":"admin/virtnest/quickstart/access.html","title":"\u8fde\u63a5\u4e91\u4e3b\u673a","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u4e24\u79cd\u8fde\u63a5\u4e91\u4e3b\u673a\u7684\u65b9\u5f0f\uff0c\u5206\u522b\u4e3a \u63a7\u5236\u53f0\u8bbf\u95ee\uff08VNC\uff09\u548c\u7ec8\u7aef\u65b9\u5f0f\u3002

                                                              "},{"location":"admin/virtnest/quickstart/access.html#_2","title":"\u7ec8\u7aef","text":"

                                                              \u901a\u8fc7\u7ec8\u7aef\u8bbf\u95ee\u4e91\u4e3b\u673a\u7684\u65b9\u5f0f\u66f4\u52a0\u7075\u6d3b\u548c\u8f7b\u91cf\uff0c\u4f46\u662f\u65e0\u6cd5\u76f4\u63a5\u5c55\u793a\u56fe\u5f62\u754c\u9762\uff0c\u4ea4\u4e92\u6027\u8f83\u5dee\uff0c\u4e14\u65e0\u6cd5\u591a\u7ec8\u7aef\u540c\u65f6\u5728\u7ebf\u3002

                                                              \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u8fdb\u5165\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u652f\u6301\u901a\u8fc7\u7ec8\u7aef\u65b9\u5f0f\u8bbf\u95ee\u4e91\u4e3b\u673a\u3002

                                                              "},{"location":"admin/virtnest/quickstart/access.html#vnc","title":"\u63a7\u5236\u53f0\u8bbf\u95ee\uff08VNC\uff09","text":"

                                                              \u901a\u8fc7 VNC \u8bbf\u95ee\u4e91\u4e3b\u673a\u7684\u65b9\u5f0f\u53ef\u4ee5\u5b9e\u73b0\u5bf9\u8fdc\u7a0b\u8ba1\u7b97\u673a\u7684\u5b8c\u6574\u56fe\u5f62\u754c\u9762\u7684\u8bbf\u95ee\u548c\u63a7\u5236\uff0c\u80fd\u591f\u76f4\u89c2\u5730\u64cd\u4f5c\u8fdc\u7a0b\u8bbe\u5907\uff0c\u4ea4\u4e92\u6027\u66f4\u52a0\u597d\uff0c\u4f46\u662f\u6027\u80fd\u4f1a\u53d7\u5230\u4e00\u5b9a\u5f71\u54cd\uff0c\u4e14\u65e0\u6cd5\u591a\u7ec8\u7aef\u540c\u65f6\u5728\u7ebf\u3002

                                                              Windows \u7cfb\u7edf\u9009\u62e9 VNC\u3002

                                                              \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u8fdb\u5165\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u652f\u6301\u901a\u8fc7\u63a7\u5236\u53f0\u8bbf\u95ee\uff08VNC\uff09\u7684\u65b9\u5f0f\u8bbf\u95ee\u4e91\u4e3b\u673a\u3002

                                                              "},{"location":"admin/virtnest/quickstart/detail.html","title":"\u4e91\u4e3b\u673a\u8be6\u60c5","text":"

                                                              \u6210\u529f\u521b\u5efa\u4e91\u4e3b\u673a\u540e\uff0c\u53ef\u8fdb\u5165\u4e91\u4e3b\u673a\u8be6\u60c5\u9875\u9762\uff0c\u652f\u6301\u67e5\u770b\u57fa\u672c\u4fe1\u606f\u3001\u914d\u7f6e\u4fe1\u606f\u3001GPU \u4fe1\u606f\u3001\u6982\u89c8\u3001\u5b58\u50a8\u3001\u7f51\u7edc\u3001\u5feb\u7167\u3001\u4e8b\u4ef6\u7b49\u3002

                                                              \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u96c6\u7fa4\u5217\u8868 \uff0c\u8fdb\u5165\u4e91\u4e3b\u673a\u6240\u5728\u96c6\u7fa4\u8be6\u60c5\uff0c\u70b9\u51fb\u4e91\u4e3b\u673a\u540d\u79f0\u67e5\u770b\u4e91\u4e3b\u673a\u8be6\u60c5\u3002

                                                              "},{"location":"admin/virtnest/quickstart/detail.html#_2","title":"\u57fa\u672c\u4fe1\u606f","text":"

                                                              \u4e91\u4e3b\u673a\u57fa\u672c\u4fe1\u606f\u5305\u542b\u72b6\u6001\u3001\u522b\u540d\u3001\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001IP\u3001\u6807\u7b7e\u3001\u8282\u70b9\u3001\u7528\u6237\u540d\u3001\u5bc6\u7801\u3001\u521b\u5efa\u65f6\u95f4\u7b49\u3002\u5176\u4e2d\uff0c

                                                              • \u72b6\u6001\uff1a\u4e91\u4e3b\u673a\u5f53\u524d\u7684\u8fd0\u884c\u72b6\u6001\uff08\u8fd0\u884c\u4e2d/\u5904\u7406\u4e2d/\u5173\u673a/\u9519\u8bef\uff09\u3002
                                                              • IP \uff1a\u4e91\u4e3b\u673a\u7684 IP \u5730\u5740\u3002\u5bf9\u4e8e\u6dfb\u52a0\u591a\u5f20\u7f51\u5361\u7684\u4e91\u4e3b\u673a\uff0c\u4f1a\u4e3a\u5176\u5206\u914d\u591a\u4e2a IP \u5730\u5740\u3002
                                                              "},{"location":"admin/virtnest/quickstart/detail.html#gpu","title":"\u914d\u7f6e\u4fe1\u606f & GPU \u914d\u7f6e","text":"

                                                              \u4e91\u4e3b\u673a\u914d\u7f6e\u4fe1\u606f\u5305\u62ec\uff1a

                                                              • \u64cd\u4f5c\u7cfb\u7edf\uff1a\u5b89\u88c5\u5728\u4e91\u4e3b\u673a\u4e0a\u7528\u4e8e\u6267\u884c\u7a0b\u5e8f\u7684\u64cd\u4f5c\u7cfb\u7edf\u3002
                                                              • \u955c\u50cf\u5730\u5740\uff1a\u5411\u4e00\u4e2a\u865a\u62df\u786c\u76d8\u6587\u4ef6\u6216\u64cd\u4f5c\u7cfb\u7edf\u5b89\u88c5\u4ecb\u8d28\u7684\u94fe\u63a5\uff0c\u8fd9\u4e2a\u5730\u5740\u7528\u4e8e\u5728\u4e91\u4e3b\u673a\u8f6f\u4ef6\u4e2d\u52a0\u8f7d\u548c\u5b89\u88c5\u64cd\u4f5c\u7cfb\u7edf\u3002
                                                              • \u7f51\u7edc\u6a21\u5f0f\uff1a\u4e91\u4e3b\u673a\u914d\u7f6e\u7684\u7f51\u7edc\u6a21\u5f0f\uff0cBridge\uff08\u6865\u63a5\uff09\u6216 Masquerade\uff08NAT\uff09\u3002
                                                              • CPU\u3001\u5185\u5b58\uff1a\u4e3a\u4e91\u4e3b\u673a\u5206\u914d\u7684\u8d44\u6e90\u3002

                                                              GPU \u914d\u7f6e\u4fe1\u606f\u5305\u542b GPU \u7c7b\u578b\u3001GPU \u578b\u53f7\u4ee5\u53ca\u5361\u6570\u91cf\u3002

                                                              "},{"location":"admin/virtnest/quickstart/detail.html#_3","title":"\u5176\u4ed6\u4fe1\u606f","text":"\u6982\u89c8\u50a8\u5b58\u7f51\u7edc\u5feb\u7167\u4e8b\u4ef6\u5217\u8868

                                                              \u4e91\u4e3b\u673a\u6982\u89c8\u9875\u53ef\u67e5\u770b\u4e91\u4e3b\u673a\u7684\u76d1\u63a7\u5185\u5bb9\u3002\u8bf7\u6ce8\u610f\uff0c\u82e5\u672a\u5b89\u88c5 insight-agent \u7ec4\u4ef6\uff0c\u5219\u65e0\u6cd5\u83b7\u53d6\u76d1\u63a7\u4fe1\u606f\u3002

                                                              \u5c55\u793a\u4e91\u4e3b\u673a\u6240\u7528\u7684\u5b58\u50a8\uff0c\u5305\u62ec\u7cfb\u7edf\u76d8\u548c\u6570\u636e\u76d8\u7684\u4fe1\u606f\u3002

                                                              \u5c55\u793a\u4e91\u4e3b\u673a\u7684\u7f51\u7edc\u914d\u7f6e\uff0c\u5305\u62ec Multus CR\u3001\u7f51\u5361\u540d\u79f0\u3001IP \u5730\u5740\u7b49\u4fe1\u606f\u3002

                                                              \u82e5\u5df2\u7ecf\u521b\u5efa\u5feb\u7167\uff0c\u672c\u9875\u5c06\u5c55\u793a\u4e91\u4e3b\u673a\u7684\u5feb\u7167\u4fe1\u606f\uff0c\u652f\u6301\u901a\u8fc7\u5feb\u7167\u6062\u590d\u4e91\u4e3b\u673a\u3002

                                                              \u4e8b\u4ef6\u5217\u8868\u5305\u542b\u4e91\u4e3b\u673a\u7684\u751f\u547d\u5468\u671f\u4e2d\u53d1\u751f\u7684\u5404\u79cd\u72b6\u6001\u53d8\u5316\u3001\u64cd\u4f5c\u8bb0\u5f55\u548c\u7cfb\u7edf\u6d88\u606f\u7b49\u3002

                                                              "},{"location":"admin/virtnest/quickstart/nodeport.html","title":"\u901a\u8fc7 NodePort \u8bbf\u95ee\u4e91\u4e3b\u673a","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7 NodePort \u8bbf\u95ee\u4e91\u4e3b\u673a\u3002

                                                              "},{"location":"admin/virtnest/quickstart/nodeport.html#_1","title":"\u73b0\u6709\u8bbf\u95ee\u65b9\u5f0f\u7684\u7f3a\u9677","text":"
                                                              1. \u4e91\u4e3b\u673a\u652f\u6301\u901a\u8fc7 VNC \u6216\u8005 console \u8bbf\u95ee\uff0c\u4f46\u8fd9\u4e24\u79cd\u8bbf\u95ee\u65b9\u5f0f\u90fd\u6709\u4e00\u4e2a\u5f0a\u7aef\uff0c\u65e0\u6cd5\u591a\u7ec8\u7aef\u540c\u65f6\u5728\u7ebf\u3002

                                                              2. \u901a\u8fc7 NodePort \u5f62\u5f0f\u7684 Service\uff0c\u53ef\u4ee5\u5e2e\u52a9\u89e3\u51b3\u8fd9\u4e2a\u95ee\u9898\u3002

                                                              "},{"location":"admin/virtnest/quickstart/nodeport.html#service","title":"\u521b\u5efa service \u7684\u65b9\u5f0f","text":"
                                                              1. \u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u9875\u9762

                                                                • \u9009\u62e9\u76ee\u6807\u8bbf\u95ee\u7684\u4e91\u4e3b\u673a\u6240\u5728\u96c6\u7fa4\u9875\u9762\u521b\u5efa\u670d\u52a1\uff08Service\uff09
                                                                • \u9009\u62e9\u8bbf\u95ee\u7c7b\u578b\u4e3a\u8282\u70b9\u8bbf\u95ee\uff08NodePort\uff09
                                                                • \u9009\u62e9\u547d\u540d\u7a7a\u95f4\uff08\u4e91\u4e3b\u673a\u6240\u5728 namespace\uff09
                                                                • \u6807\u7b7e\u9009\u62e9\u5668\u586b\u5199 vm.kubevirt.io/name: you-vm-name
                                                                • \u7aef\u53e3\u914d\u7f6e\uff1a\u534f\u8bae\u9009\u62e9 TCP\uff0c\u7aef\u53e3\u540d\u79f0\u81ea\u5b9a\u4e49\uff0c\u670d\u52a1\u7aef\u53e3\u3001\u5bb9\u5668\u7aef\u53e3\u586b\u5199 22
                                                              2. \u521b\u5efa\u6210\u529f\u540e\uff0c\u5c31\u53ef\u4ee5\u901a\u8fc7 ssh username@nodeip -p port \u6765\u8bbf\u95ee\u4e91\u4e3b\u673a

                                                              "},{"location":"admin/virtnest/quickstart/nodeport.html#kubectl-svc","title":"\u901a\u8fc7 kubectl \u521b\u5efa svc","text":"
                                                              1. \u7f16\u5199 YAML \u6587\u4ef6\uff0c\u793a\u4f8b\u5982\u4e0b\uff1a

                                                                apiVersion: v1\nkind: Service\n  metadata:\n    name: test-ssh\nspec:\n  ports:\n  - name: tcp-ssh\n    nodePort: 32090\n    protocol: TCP\n    // 22 \u7aef\u53e3\uff0c\u4e0d\u8981\u66f4\u6539\n    port: 22 \n    targetPort: 22\n  selector:\n    // \u4e91\u4e3b\u673a\u7684 name\n\u00a0 \u00a0vm.kubevirt.io/name: test-image-s3\n  type: NodePort\n
                                                              2. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4

                                                                kubectl apply -f you-svc.yaml\n
                                                              3. \u521b\u5efa\u6210\u529f\u540e\uff0c\u5c31\u53ef\u4ee5\u901a\u8fc7 ssh username@nodeip -p 32090 \u6765\u8bbf\u95ee\u4e91\u4e3b\u673a

                                                              "},{"location":"admin/virtnest/quickstart/update.html","title":"\u66f4\u65b0\u4e91\u4e3b\u673a","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u8868\u5355\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u66f4\u65b0\u4e91\u4e3b\u673a\u3002

                                                              "},{"location":"admin/virtnest/quickstart/update.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u5f00\u673a\u72b6\u6001\u4e0b\u66f4\u65b0\u4e91\u4e3b\u673a CPU\u3001\u5185\u5b58\u3001\u6570\u636e\u76d8\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u4e91\u4e3b\u673a\u652f\u6301\u5b9e\u65f6\u8fc1\u79fb\u80fd\u529b\u3002
                                                              "},{"location":"admin/virtnest/quickstart/update.html#_3","title":"\u8868\u5355\u66f4\u65b0\u4e91\u4e3b\u673a","text":"

                                                              \u5728\u4e91\u4e3b\u673a\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb \u66f4\u65b0 \u8fdb\u5165\u4e91\u4e3b\u673a\u66f4\u65b0\u9875\u9762\u3002

                                                              \u57fa\u672c\u4fe1\u606f\u955c\u50cf\u914d\u7f6e\u5b58\u50a8\u4e0e\u7f51\u7edc\u767b\u5f55\u8bbe\u7f6e

                                                              \u57fa\u672c\u4fe1\u606f\u9875\u9762\u4e2d\uff0c \u522b\u540d \u4e0e \u6807\u7b7e\u6ce8\u89e3 \u652f\u6301\u66f4\u65b0\uff0c\u5176\u4ed6\u4fe1\u606f\u65e0\u6cd5\u66f4\u6539\u3002\u5b8c\u6210\u66f4\u65b0\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \u8fdb\u5165\u955c\u50cf\u914d\u7f6e\u7684\u754c\u9762\u3002

                                                              \u5728\u955c\u50cf\u914d\u7f6e\u9875\u9762\u4e2d\uff0c\u955c\u50cf\u6765\u6e90\u3001\u64cd\u4f5c\u7cfb\u7edf\u3001\u7248\u672c\u7b49\u53c2\u6570\u4e00\u65e6\u9009\u62e9\u540e\u65e0\u6cd5\u66f4\u6539\uff0c\u5141\u8bb8\u7528\u6237\u66f4\u65b0 GPU \u914d\u7f6e \uff0c \u5305\u62ec\u542f\u7528\u6216\u7981\u7528 GPU \u652f\u6301\uff0c\u9009\u62e9 GPU \u7684\u7c7b\u578b\uff0c\u6307\u5b9a\u6240\u9700\u7684\u578b\u53f7\uff0c\u4ee5\u53ca\u914d\u7f6e GPU \u5361\u7684\u6570\u91cf\uff0c\u66f4\u65b0\u540e\u9700\u8981\u91cd\u542f\u624d\u80fd\u751f\u6548\u3002 \u5b8c\u6210\u66f4\u65b0\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \u8fdb\u5165\u5b58\u50a8\u4e0e\u7f51\u7edc\u7684\u754c\u9762\u3002

                                                              \u5728\u5b58\u50a8\u4e0e\u7f51\u7edc\u9875\u9762\u4e2d\uff0c\u7cfb\u7edf\u76d8\u7684\u5b58\u50a8\u6c60\u548c PVC \u8bbf\u95ee\u6a21\u5f0f\u4e00\u65e6\u9009\u62e9\u540e\u65e0\u6cd5\u66f4\u6539\uff0c\u652f\u6301\u589e\u52a0\u78c1\u76d8\u5bb9\u91cf\uff0c\u4e0d\u53ef\u51cf\u5c11\u3002 \u6b64\u5916\uff0c\u7528\u6237\u53ef\u4ee5\u81ea\u7531\u6dfb\u52a0\u6216\u8005\u79fb\u9664\u6570\u636e\u76d8\u3002\u4e0d\u652f\u6301\u66f4\u65b0\u7f51\u7edc\u914d\u7f6e\u3002\u5b8c\u6210\u66f4\u65b0\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \u8fdb\u5165\u767b\u5f55\u8bbe\u7f6e\u7684\u754c\u9762\u3002

                                                              Note

                                                              \u5efa\u8bae\u5728\u4fee\u6539\u5b58\u50a8\u5bb9\u91cf\u6216\u589e\u52a0\u6570\u636e\u76d8\u540e\u91cd\u542f\u4e91\u4e3b\u673a\uff0c\u4ee5\u786e\u4fdd\u914d\u7f6e\u751f\u6548\u3002

                                                              \u5728\u767b\u5f55\u8bbe\u7f6e\u9875\u9762\u4e2d\uff0c\u7528\u6237\u540d\u3001\u5bc6\u7801\u4ee5\u53ca SSH \u5bc6\u94a5\u914d\u7f6e\u4e00\u65e6\u8bbe\u7f6e\uff0c\u4e0d\u5141\u8bb8\u66f4\u6539\u3002\u786e\u8ba4\u60a8\u7684\u767b\u5f55\u4fe1\u606f\u65e0\u8bef\u540e\uff0c\u70b9\u51fb\u786e\u5b9a\u6309\u94ae\u4ee5\u5b8c\u6210\u66f4\u65b0\u6d41\u7a0b\u3002

                                                              "},{"location":"admin/virtnest/quickstart/update.html#yaml","title":"\u7f16\u8f91 YAML","text":"

                                                              \u9664\u4e86\u901a\u8fc7\u8868\u5355\u65b9\u5f0f\u66f4\u65b0\u4e91\u4e3b\u673a\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u66f4\u65b0\u4e91\u4e3b\u673a\u3002

                                                              \u8fdb\u5165\u4e91\u4e3b\u673a\u5217\u8868\u9875\uff0c\u70b9\u51fb \u7f16\u8f91 YAML \u6309\u94ae\u3002

                                                              "},{"location":"admin/virtnest/template/index.html","title":"\u901a\u8fc7\u6a21\u677f\u521b\u5efa\u4e91\u4e3b\u673a","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u6a21\u677f\u521b\u5efa\u4e91\u4e3b\u673a\u3002

                                                              \u901a\u8fc7\u5185\u7f6e\u6a21\u677f\u548c\u81ea\u5b9a\u4e49\u6a21\u677f\uff0c\u7528\u6237\u53ef\u4ee5\u8f7b\u677e\u521b\u5efa\u65b0\u7684\u4e91\u4e3b\u673a\u3002\u6b64\u5916\uff0c\u6211\u4eec\u8fd8\u63d0\u4f9b\u5c06\u73b0\u6709\u4e91\u4e3b\u673a\u8f6c\u6362\u4e3a\u4e91\u4e3b\u673a\u6a21\u677f\u7684\u529f\u80fd\uff0c\u8ba9\u7528\u6237\u80fd\u591f\u66f4\u52a0\u7075\u6d3b\u5730\u7ba1\u7406\u548c\u4f7f\u7528\u8d44\u6e90\u3002

                                                              "},{"location":"admin/virtnest/template/index.html#_2","title":"\u6a21\u677f\u521b\u5efa","text":"

                                                              \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u6a21\u677f\u521b\u5efa\u4e00\u4e2a\u4e91\u4e3b\u673a\u3002

                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u8fdb\u5165 \u4e91\u4e3b\u673a\u7ba1\u7406 \u9875\u9762\u3002\u5728\u4e91\u4e3b\u673a\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb\u521b\u5efa\u4e91\u4e3b\u673a-\u9009\u62e9\u6a21\u677f\u521b\u5efa\u4e91\u4e3b\u673a\u3002

                                                              2. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u9875\u9762\uff0c\u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u6a21\u677f\u914d\u7f6e\u3001\u5b58\u50a8\u4e0e\u7f51\u7edc\u3001\u767b\u5f55\u8bbe\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                                                                \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de\u4e91\u4e3b\u673a\u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u4e91\u4e3b\u673a\u6267\u884c\u5173\u673a/\u5f00\u542f\u3001\u91cd\u542f\u3001\u514b\u9686\u3001\u66f4\u65b0\u3001\u521b\u5efa\u5feb\u7167\u3001\u914d\u7f6e\u8f6c\u6362\u4e3a\u6a21\u677f\u3001\u63a7\u5236\u53f0\u8bbf\u95ee\uff08VNC\uff09\u3001\u5220\u9664\u7b49\u64cd\u4f5c\u3002 \u514b\u9686\u548c\u5feb\u7167\u80fd\u529b\u4f9d\u8d56\u4e8e\u5b58\u50a8\u6c60\u7684\u9009\u62e9\u3002

                                                              "},{"location":"admin/virtnest/template/index.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"

                                                              \u5728\u521b\u5efa\u4e91\u4e3b\u673a\u9875\u9762\u4e2d\uff0c\u6839\u636e\u4e0b\u8868\u8f93\u5165\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                              • \u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002 \u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u540d\u79f0\u5728\u4e91\u4e3b\u673a\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                                              • \u522b\u540d\uff1a\u5141\u8bb8\u4efb\u4f55\u5b57\u7b26\uff0c\u6700\u957f60\u4e2a\u5b57\u7b26\u3002
                                                              • \u96c6\u7fa4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u4e91\u4e3b\u673a\u90e8\u7f72\u5728\u54ea\u4e2a\u96c6\u7fa4\u5185\u3002
                                                              • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u4e91\u4e3b\u673a\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\u3002 \u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                                              "},{"location":"admin/virtnest/template/index.html#_4","title":"\u6a21\u677f\u914d\u7f6e","text":"

                                                              \u51fa\u73b0\u6a21\u677f\u5217\u8868\uff0c\u6309\u9700\u9009\u62e9\u5185\u7f6e\u6a21\u677f/\u81ea\u5b9a\u4e49\u6a21\u677f\u3002

                                                              • \u9009\u62e9\u5185\u7f6e\u6a21\u677f\uff1a\u5e73\u53f0\u5185\u7f6e\u4e862\u4e2a\u6807\u51c6\u6a21\u677f\uff0c\u4e0d\u5141\u8bb8\u7f16\u8f91\u548c\u5220\u9664\u3002\u9009\u62e9\u5185\u7f6e\u6a21\u677f\u540e\uff0c\u955c\u50cf\u6765\u6e90\u3001\u64cd\u4f5c\u7cfb\u7edf\u3001\u955c\u50cf\u5730\u5740\u7b49\u5c06\u4f7f\u7528\u6a21\u677f\u5185\u7684\u4fe1\u606f\uff0c\u65e0\u6cd5\u4fee\u6539\uff1b\u8d44\u6e90\u914d\u989d\u4e5f\u5c06\u4f7f\u7528\u6a21\u677f\u5185\u7684\u4fe1\u606f\uff0c\u5141\u8bb8\u4fee\u6539\u3002

                                                              • \u9009\u62e9\u81ea\u5b9a\u4e49\u6a21\u677f\uff1a\u7531\u4e91\u4e3b\u673a\u914d\u7f6e\u8f6c\u5316\u800c\u6765\u7684\u6a21\u677f\uff0c\u652f\u6301\u7f16\u8f91\u548c\u5220\u9664\u3002\u4f7f\u7528\u81ea\u5b9a\u4e49\u6a21\u677f\u5219\u6839\u636e\u5177\u4f53\u60c5\u51b5\u652f\u6301\u4fee\u6539\u955c\u50cf\u6765\u6e90\u7b49\u4fe1\u606f\u3002

                                                              "},{"location":"admin/virtnest/template/index.html#_5","title":"\u5b58\u50a8\u4e0e\u7f51\u7edc\u914d\u7f6e","text":"
                                                              • \u5b58\u50a8\uff1a\u7cfb\u7edf\u9ed8\u8ba4\u521b\u5efa\u4e00\u4e2a VirtIO \u7c7b\u578b\u7684 rootfs \u7cfb\u7edf\u76d8\uff0c\u7528\u4e8e\u5b58\u653e\u64cd\u4f5c\u7cfb\u7edf\u548c\u6570\u636e\u3002 \u9ed8\u8ba4\u4f7f\u7528\u5757\u5b58\u50a8\u3002\u5982\u679c\u9700\u8981\u4f7f\u7528\u514b\u9686\u548c\u5feb\u7167\u529f\u80fd\uff0c\u8bf7\u786e\u4fdd\u60a8\u7684\u5b58\u50a8\u6c60\u652f\u6301 VolumeSnapshots \u529f\u80fd\uff0c \u5e76\u5728\u5b58\u50a8\u6c60\uff08SC\uff09\u4e2d\u8fdb\u884c\u521b\u5efa\u3002\u8bf7\u6ce8\u610f\uff0c\u5b58\u50a8\u6c60\uff08SC\uff09\u8fd8\u6709\u5176\u4ed6\u4e00\u4e9b\u5148\u51b3\u6761\u4ef6\u9700\u8981\u6ee1\u8db3\u3002

                                                                • \u5148\u51b3\u6761\u4ef6\uff1a

                                                                  • KubeVirt \u5229\u7528 Kubernetes CSI \u9a71\u52a8\u7a0b\u5e8f\u7684 VolumeSnapshot\u529f\u80fd\u6765\u6355\u83b7\u6301\u4e45\u5316\u4e91\u4e3b\u673a\u72b6\u6001\u3002 \u56e0\u6b64\uff0c\u60a8\u9700\u8981\u786e\u4fdd\u60a8\u7684\u4e91\u4e3b\u673a\u4f7f\u7528\u7531\u652f\u6301 VolumeSnapshots \u7684 StorageClass \u5e76\u914d\u7f6e\u4e86\u6b63\u786e\u7684 VolumeSnapshotClass\u3002
                                                                  • \u67e5\u770b\u5df2\u521b\u5efa\u7684 Snapshotclass \uff0c\u5e76\u4e14\u786e\u8ba4 provisioner \u5c5e\u6027\u540c\u5b58\u50a8\u6c60\u4e2d\u7684 Driver \u5c5e\u6027\u4e00\u81f4\u3002
                                                                • \u652f\u6301\u6dfb\u52a0\u4e00\u5757\u7cfb\u7edf\u76d8\u548c\u591a\u5757\u6570\u636e\u76d8\u3002

                                                              • \u7f51\u7edc\uff1a\u82e5\u60a8\u4e0d\u505a\u4efb\u4f55\u914d\u7f6e\uff0c\u7cfb\u7edf\u5c06\u9ed8\u8ba4\u521b\u5efa\u4e00\u4e2a VirtIO \u7c7b\u578b\u7684\u7f51\u7edc\u3002

                                                              "},{"location":"admin/virtnest/template/index.html#_6","title":"\u767b\u5f55\u8bbe\u7f6e","text":"
                                                              • \u7528\u6237\u540d/\u5bc6\u7801\uff1a\u53ef\u4ee5\u901a\u8fc7\u7528\u6237\u540d\u548c\u5bc6\u7801\u767b\u5f55\u81f3\u4e91\u4e3b\u673a\u3002
                                                              • SSH\uff1a\u9009\u62e9 SSH \u767b\u5f55\u65b9\u5f0f\u65f6\u53ef\u4e3a\u4e91\u4e3b\u673a\u7ed1\u5b9a SSH \u5bc6\u94a5\uff0c\u7528\u4e8e\u65e5\u540e\u767b\u5f55\u4e91\u4e3b\u673a\u3002
                                                              "},{"location":"admin/virtnest/template/tep.html","title":"\u4e91\u4e3b\u673a\u6a21\u677f","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5185\u7f6e\u4e91\u4e3b\u673a\u6a21\u677f\u548c\u81ea\u5b9a\u4e49\u4e91\u4e3b\u673a\u6a21\u677f\u3002

                                                              \u901a\u8fc7\u5185\u7f6e\u6a21\u677f\u548c\u81ea\u5b9a\u4e49\u6a21\u677f\uff0c\u7528\u6237\u53ef\u4ee5\u8f7b\u677e\u521b\u5efa\u65b0\u7684\u4e91\u4e3b\u673a\u3002\u6b64\u5916\uff0c\u6211\u4eec\u8fd8\u63d0\u4f9b\u5c06\u73b0\u6709\u4e91\u4e3b\u673a\u8f6c\u6362\u4e3a\u4e91\u4e3b\u673a\u6a21\u677f\u7684\u529f\u80fd\uff0c\u8ba9\u7528\u6237\u80fd\u591f\u66f4\u52a0\u7075\u6d3b\u5730\u7ba1\u7406\u548c\u4f7f\u7528\u8d44\u6e90\u3002

                                                              "},{"location":"admin/virtnest/template/tep.html#_2","title":"\u4e91\u4e3b\u673a\u6a21\u677f","text":"
                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a\u6a21\u677f \uff0c\u8fdb\u5165 \u4e91\u4e3b\u673a\u6a21\u677f \u9875\u9762\uff0c\u82e5\u8be5\u6a21\u677f\u662f\u7531\u914d\u7f6e\u4e86 GPU \u7684\u4e91\u4e3b\u673a\u8f6c\u6362\u800c\u6765\uff0c\u6a21\u677f\u4e5f\u4f1a\u5e26\u6709 GPU \u7684\u4fe1\u606f\uff0c\u5c06\u5728\u6a21\u677f\u5217\u8868\u4e2d\u5c55\u793a\u3002

                                                              2. \u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u5185\u7f6e\u6a21\u677f\u6267\u884c\u521b\u5efa\u4e91\u4e3b\u673a\u548c\u67e5\u770b YAML \u64cd\u4f5c\uff1b\u5bf9\u81ea\u5b9a\u4e49\u6a21\u677f\u652f\u6301\u521b\u5efa\u4e91\u4e3b\u673a\u3001\u7f16\u8f91 YAML \u548c\u5220\u9664\u64cd\u4f5c\u3002

                                                              "},{"location":"admin/virtnest/template/tep.html#_3","title":"\u5185\u7f6e\u6a21\u677f","text":"
                                                              • \u5e73\u53f0\u5185\u5185\u7f6e\u4e24\u79cd\u6a21\u677f\uff0c\u5206\u522b\u662f CentOS \u548c Ubuntu\u3002

                                                              "},{"location":"admin/virtnest/template/tep.html#_4","title":"\u81ea\u5b9a\u4e49\u6a21\u677f","text":"

                                                              \u81ea\u5b9a\u4e49\u6a21\u677f\u662f\u7531\u4e91\u4e3b\u673a\u914d\u7f6e\u8f6c\u5316\u800c\u6765\u7684\u6a21\u677f\u3002\u4ee5\u4e0b\u4ecb\u7ecd\u5982\u4f55\u4ece\u4e91\u4e3b\u673a\u914d\u7f6e\u8f6c\u6362\u4e3a\u6a21\u677f\u3002

                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u8fdb\u5165\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \u652f\u6301\u5c06\u914d\u7f6e\u8f6c\u6362\u4e3a\u6a21\u677f\u3002\u53ea\u6709\u8fd0\u884c\u4e2d/\u5173\u95ed\u72b6\u6001\u4e0b\u7684\u4e91\u4e3b\u673a\u652f\u6301\u8f6c\u5316\u3002

                                                              2. \u586b\u5199\u65b0\u6a21\u677f\u7684\u540d\u79f0\uff0c\u63d0\u793a\u539f\u59cb\u4e91\u4e3b\u673a\u5c06\u4f1a\u4fdd\u7559\u5e76\u4e14\u53ef\u7528\u3002\u8f6c\u6362\u6210\u529f\u540e\uff0c\u5c06\u4f1a\u5728\u6a21\u677f\u5217\u8868\u65b0\u589e\u4e00\u6761\u6570\u636e\u3002

                                                              "},{"location":"admin/virtnest/template/tep.html#_5","title":"\u6a21\u677f\u8be6\u60c5","text":"

                                                              \u6210\u529f\u521b\u5efa\u51fa\u6765\u4e00\u4e2a\u6a21\u677f\u540e\uff0c\u70b9\u51fb\u6a21\u677f\u540d\u79f0\uff0c\u53ef\u4ee5\u67e5\u770b\u4e91\u4e3b\u673a\u8be6\u60c5\uff0c\u5305\u62ec\u57fa\u672c\u4fe1\u606f\u3001GPU \u4fe1\u606f\u3001\u5b58\u50a8\u3001\u7f51\u7edc\u7b49\u3002\u5982\u679c\u9700\u8981\u5feb\u901f\u57fa\u4e8e\u8be5\u6a21\u677f\u90e8\u7f72\u65b0\u7684\u4e91\u4e3b\u673a\uff0c\u53ea\u9700\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa\u4e91\u4e3b\u673a \u6309\u94ae\u5373\u53ef\u4fbf\u6377\u64cd\u4f5c\u3002

                                                              "},{"location":"admin/virtnest/vm/auto-migrate.html","title":"\u4e91\u4e3b\u673a\u81ea\u52a8\u6f02\u79fb","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5f53\u96c6\u7fa4\u5185\u67d0\u4e2a\u8282\u70b9\u56e0\u4e3a\u65ad\u7535\u6216\u7f51\u7edc\u6545\u969c\uff0c\u5bfc\u81f4\u8be5\u8282\u70b9\u4e0a\u7684\u4e91\u4e3b\u673a\u65e0\u6cd5\u8bbf\u95ee\u65f6\uff0c \u5982\u4f55\u5c06\u6b63\u5728\u8fd0\u884c\u7684\u4e91\u4e3b\u673a\u65e0\u7f1d\u8fc1\u79fb\u5230\u5176\u4ed6\u7684\u8282\u70b9\u4e0a\uff0c\u540c\u65f6\u4fdd\u8bc1\u4e1a\u52a1\u7684\u8fde\u7eed\u6027\u548c\u6570\u636e\u7684\u5b89\u5168\u6027\u3002

                                                              \u4e0e\u5b9e\u65f6\u8fc1\u79fb\u76f8\u6bd4\uff0c\u81ea\u52a8\u6f02\u79fb\u4e0d\u9700\u8981\u60a8\u5728\u754c\u9762\u4e2d\u4e3b\u52a8\u64cd\u4f5c\uff0c\u800c\u662f\u7cfb\u7edf\u81ea\u52a8\u89e6\u53d1\u8fc1\u79fb\u8fc7\u7a0b\u3002

                                                              "},{"location":"admin/virtnest/vm/auto-migrate.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u5b9e\u73b0\u81ea\u52a8\u6f02\u79fb\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u4e91\u4e3b\u673a\u672a\u8fdb\u884c\u78c1\u76d8\u843d\u76d8\u64cd\u4f5c\uff0c\u6216\u4f7f\u7528 Rook-Ceph\u3001HwameiStor HA \u6a21\u5f0f\u4f5c\u4e3a\u5b58\u50a8\u7cfb\u7edf
                                                              • \u8282\u70b9\u5931\u8054\u65f6\u95f4\u8d85\u8fc7\u4e94\u5206\u949f
                                                              • \u786e\u4fdd\u96c6\u7fa4\u5185\u81f3\u5c11\u6709\u4e24\u4e2a\u8282\u70b9\u53ef\u4f9b\u4f7f\u7528\uff0c\u5e76\u4e14\u4e91\u4e3b\u673a\u6ca1\u6709\u6307\u5b9a\u8c03\u5ea6\u8282\u70b9
                                                              • \u4e91\u4e3b\u673a\u7684 launcher pod \u5df2\u88ab\u5220\u9664
                                                              "},{"location":"admin/virtnest/vm/auto-migrate.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u68c0\u67e5\u4e91\u4e3b\u673a launcher pod \u72b6\u6001\uff1a

                                                                kubectl get pod\n

                                                                \u67e5\u770b launcher pod \u662f\u5426\u5904\u4e8e Terminating \u72b6\u6001\u3002

                                                              2. \u5f3a\u5236\u5220\u9664 launcher pod\uff1a

                                                                \u5982\u679c launcher pod \u72b6\u6001\u4e3a Terminating\uff0c\u53ef\u4ee5\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u8fdb\u884c\u5f3a\u5236\u5220\u9664\uff1a

                                                                kubectl delete <launcher pod> --force\n

                                                                \u66ff\u6362 <launcher pod> \u4e3a\u4f60\u7684 launcher pod \u540d\u79f0\u3002

                                                              3. \u7b49\u5f85\u91cd\u65b0\u521b\u5efa\u5e76\u68c0\u67e5\u72b6\u6001\uff1a

                                                                \u5220\u9664\u540e\uff0c\u7cfb\u7edf\u5c06\u81ea\u52a8\u91cd\u65b0\u521b\u5efa launcher pod\u3002 \u7b49\u5f85\u5176\u72b6\u6001\u53d8\u4e3a running\uff0c\u7136\u540e\u5237\u65b0\u4e91\u4e3b\u673a\u5217\u8868\uff0c\u89c2\u5bdf\u4e91\u4e3b\u673a\u662f\u5426\u6210\u529f\u8fc1\u79fb\u5230\u65b0\u8282\u70b9\u3002

                                                              "},{"location":"admin/virtnest/vm/auto-migrate.html#_4","title":"\u6ce8\u610f\u4e8b\u9879","text":"

                                                              \u5982\u679c\u4f7f\u7528 rook-ceph \u4f5c\u4e3a\u5b58\u50a8\uff0c\u9700\u8981\u914d\u7f6e\u4e3a ReadWriteOnce \u6a21\u5f0f\uff1a

                                                              1. \u5f3a\u5236\u5220\u9664 pod \u540e\uff0c\u9700\u8981\u7b49\u5f85\u5927\u7ea6\u516d\u5206\u949f\u4ee5\u8ba9 launcher pod \u542f\u52a8\uff0c\u6216\u8005\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u7acb\u5373\u542f\u52a8 pod\uff1a

                                                                kubectl get pv | grep <vm name>\nkubectl get VolumeAttachment | grep <pv name>\n

                                                                \u66ff\u6362 <vm name> \u548c <pv name> \u4e3a\u4f60\u7684\u4e91\u4e3b\u673a\u540d\u79f0\u548c\u6301\u4e45\u5377\u540d\u79f0\u3002

                                                              2. \u7136\u540e\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u5220\u9664\u5bf9\u5e94\u7684 VolumeAttachment\uff1a

                                                                kubectl delete VolumeAttachment <vm>\n

                                                                \u66ff\u6362 <vm> \u4e3a\u4f60\u7684\u4e91\u4e3b\u673a\u540d\u79f0\u3002

                                                              "},{"location":"admin/virtnest/vm/clone.html","title":"\u514b\u9686\u4e91\u4e3b\u673a","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u514b\u9686\u4e00\u53f0\u65b0\u7684\u4e91\u4e3b\u673a\u3002

                                                              \u7528\u6237\u53ef\u4ee5\u514b\u9686\u4e00\u53f0\u65b0\u7684\u4e91\u4e3b\u673a\uff0c\u514b\u9686\u540e\u7684\u4e91\u4e3b\u673a\u5c06\u5177\u6709\u4e0e\u539f\u59cb\u4e91\u4e3b\u673a\u76f8\u540c\u7684\u64cd\u4f5c\u7cfb\u7edf\u548c\u7cfb\u7edf\u914d\u7f6e\uff0c\u80fd\u591f\u5b9e\u73b0\u5feb\u901f\u90e8\u7f72\u548c\u6269\u5c55\uff0c\u5feb\u901f\u521b\u5efa\u76f8\u4f3c\u914d\u7f6e\u7684\u65b0\u4e91\u4e3b\u673a\uff0c\u800c\u65e0\u9700\u4ece\u5934\u5f00\u59cb\u5b89\u88c5\u3002

                                                              "},{"location":"admin/virtnest/vm/clone.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u4f7f\u7528\u514b\u9686\u529f\u80fd\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff08\u548c\u5feb\u7167\u529f\u80fd\u7684\u524d\u63d0\u6761\u4ef6\u4e00\u81f4\uff09\uff1a

                                                              • \u53ea\u6709\u975e\u9519\u8bef\u72b6\u6001\u4e0b\u7684\u4e91\u4e3b\u673a\u624d\u80fd\u4f7f\u7528\u514b\u9686\u529f\u80fd\u3002
                                                              • \u5b89\u88c5 Snapshot CRDs\u3001Snapshot Controller\u3001CSI Driver\u3002 \u5177\u4f53\u5b89\u88c5\u6b65\u9aa4\u53ef\u53c2\u8003 CSI Snapshotter\u3002
                                                              • \u7b49\u5f85 snapshot-controller \u7ec4\u4ef6\u51c6\u5907\u5c31\u7eea, \u8be5\u7ec4\u4ef6\u4f1a\u76d1\u63a7 VolumeSnapshot \u548c VolumeSnapshotContent \u76f8\u5173\u4e8b\u4ef6\uff0c\u5e76\u89e6\u53d1\u76f8\u5173\u64cd\u4f5c\u3002
                                                              • \u7b49\u5f85 CSI Driver \u51c6\u5907\u5c31\u7eea, \u786e\u4fdd csi-snapshotter sidecar \u8dd1\u5728 CSI Driver \u91cc\uff0ccsi-snapshotter sidecar \u4f1a\u76d1\u63a7 VolumeSnapshotContent \u76f8\u5173\u4e8b\u4ef6\uff0c\u5e76\u89e6\u53d1\u76f8\u5173\u64cd\u4f5c\u3002
                                                                • \u5982\u5b58\u50a8\u662f Rook-Ceph\uff0c\u53ef\u53c2\u8003 ceph-csi-snapshot
                                                                • \u5982\u5b58\u50a8\u662f HwameiStor\uff0c\u53ef\u53c2\u8003 huameistor-snapshot
                                                              "},{"location":"admin/virtnest/vm/clone.html#_3","title":"\u514b\u9686\u4e91\u4e3b\u673a","text":"
                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u8fdb\u5165\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u975e\u9519\u8bef\u72b6\u6001\u4e0b\u7684\u4e91\u4e3b\u673a\u6267\u884c\u5feb\u7167\u64cd\u4f5c\u3002

                                                              2. \u5f39\u51fa\u5f39\u6846\uff0c\u9700\u8981\u586b\u5199\u514b\u9686\u65b0\u7684\u4e91\u4e3b\u673a\u7684\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u514b\u9686\u64cd\u4f5c\u53ef\u80fd\u9700\u8981\u4e00\u4e9b\u65f6\u95f4\uff0c\u5177\u4f53\u53d6\u51b3\u4e8e\u4e91\u4e3b\u673a\u7684\u5927\u5c0f\u548c\u5b58\u50a8\u6027\u80fd\u3002

                                                              3. \u514b\u9686\u6210\u529f\u540e\u53ef\u4ee5\u5728\u4e91\u4e3b\u673a\u5217\u8868\u5185\u67e5\u770b\u5230\u65b0\u7684\u4e91\u4e3b\u673a\uff0c\u65b0\u521b\u5efa\u51fa\u6765\u7684\u4e91\u4e3b\u673a\u5904\u4e8e\u5173\u673a\u72b6\u6001\uff0c\u82e5\u9700\u8981\u5f00\u673a\u9700\u8981\u624b\u52a8\u64cd\u4f5c\u3002

                                                              4. \u514b\u9686\u524d\u5efa\u8bae\u5bf9\u539f\u6709\u4e91\u4e3b\u673a\u8fdb\u884c\u5feb\u7167\uff0c\u5982\u679c\u514b\u9686\u8fc7\u7a0b\u4e2d\u9047\u5230\u95ee\u9898\uff0c\u8bf7\u68c0\u67e5\u5148\u51b3\u6761\u4ef6\u662f\u5426\u6ee1\u8db3\uff0c\u5e76\u5c1d\u8bd5\u91cd\u65b0\u6267\u884c\u514b\u9686\u64cd\u4f5c\u3002

                                                              "},{"location":"admin/virtnest/vm/create-secret.html","title":"\u521b\u5efa\u5bc6\u94a5","text":"

                                                              \u5f53\u521b\u5efa\u4e91\u4e3b\u673a\u4f7f\u7528\u5bf9\u8c61\u5b58\u50a8\uff08S3\uff09\u4f5c\u4e3a\u955c\u50cf\u6765\u6e90\u65f6\uff0c\u6709\u65f6\u5019\u9700\u8981\u586b\u5199\u5bc6\u94a5\u6765\u83b7\u53d6\u901a\u8fc7 S3 \u7684\u9a8c\u8bc1\u3002\u4ee5\u4e0b\u5c06\u4ecb\u7ecd\u5982\u4f55\u521b\u5efa\u7b26\u5408\u4e91\u4e3b\u673a\u8981\u6c42\u7684\u5bc6\u94a5\u3002

                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u96c6\u7fa4\u5217\u8868 \uff0c\u8fdb\u5165\u4e91\u4e3b\u673a\u6240\u5728\u96c6\u7fa4\u8be6\u60c5\uff0c\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 \uff0c\u9009\u62e9 \u5bc6\u94a5 \uff0c\u70b9\u51fb \u521b\u5efa\u5bc6\u94a5 \u3002

                                                              2. \u8fdb\u5165\u521b\u5efa\u9875\u9762\uff0c\u586b\u5199\u5bc6\u94a5\u540d\u79f0\uff0c\u9009\u62e9\u548c\u4e91\u4e3b\u673a\u76f8\u540c\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u6ce8\u610f\u9700\u8981\u9009\u62e9 \u9ed8\u8ba4\uff08Opaque\uff09 \u7c7b\u578b\u3002\u5bc6\u94a5\u6570\u636e\u9700\u8981\u9075\u5faa\u4ee5\u4e0b\u539f\u5219

                                                                • accessKeyId: \u9700\u8981\u4ee5 Base64 \u7f16\u7801\u65b9\u5f0f\u8868\u793a\u7684\u6570\u636e
                                                                • secretKey: \u9700\u8981\u4ee5 Base64 \u7f16\u7801\u65b9\u5f0f\u8868\u793a\u7684\u6570\u636e
                                                              3. \u521b\u5efa\u6210\u529f\u540e\u53ef\u4ee5\u5728\u521b\u5efa\u4e91\u4e3b\u673a\u65f6\u4f7f\u7528\u6240\u9700\u5bc6\u94a5\uff0c\u6700\u540e\u901a\u8fc7\u9a8c\u8bc1\u3002

                                                              "},{"location":"admin/virtnest/vm/cross-cluster-migrate.html","title":"\u4e91\u4e3b\u673a\u8de8\u96c6\u7fa4\u8fc1\u79fb","text":"

                                                              \u672c\u529f\u80fd\u6682\u672a\u505a UI \u754c\u9762\u80fd\u529b\uff0c\u8bf7\u53c2\u8003\u6587\u6863\u7684\u64cd\u4f5c\u6b65\u9aa4\u6267\u884c\u3002

                                                              "},{"location":"admin/virtnest/vm/cross-cluster-migrate.html#_2","title":"\u4f7f\u7528\u573a\u666f","text":"
                                                              • \u5f53\u539f\u96c6\u7fa4\u53d1\u751f\u6545\u969c\u6216\u6027\u80fd\u4e0b\u964d\u5bfc\u81f4\u8be5\u96c6\u7fa4\u4e0a\u7684\u4e91\u4e3b\u673a\u65e0\u6cd5\u8bbf\u95ee\u65f6\uff0c\u5c06\u4e91\u4e3b\u673a\u8fc1\u79fb\u5230\u5176\u4ed6\u7684\u96c6\u7fa4\u4e0a\u3002
                                                              • \u9700\u8981\u5bf9\u96c6\u7fa4\u8fdb\u884c\u8ba1\u5212\u5185\u7684\u7ef4\u62a4\u6216\u5347\u7ea7\u65f6\uff0c\u5c06\u4e91\u4e3b\u673a\u8fc1\u79fb\u5230\u5176\u4ed6\u7684\u96c6\u7fa4\u4e0a\u3002
                                                              • \u5f53\u7279\u5b9a\u5e94\u7528\u7684\u6027\u80fd\u9700\u6c42\u53d8\u5316\uff0c\u9700\u8981\u8c03\u6574\u8d44\u6e90\u5206\u914d\u65f6\uff0c\u8fc1\u79fb\u4e91\u4e3b\u673a\u5230\u5176\u4ed6\u7684\u96c6\u7fa4\u4e0a\u4ee5\u5339\u914d\u66f4\u5408\u9002\u7684\u8d44\u6e90\u914d\u7f6e\u3002
                                                              "},{"location":"admin/virtnest/vm/cross-cluster-migrate.html#_3","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u5b9e\u73b0\u4e91\u4e3b\u673a\u8de8\u96c6\u7fa4\u8fc1\u79fb\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u96c6\u7fa4\u7f51\u7edc\u4e92\u901a\uff1a\u786e\u4fdd\u539f\u6709\u96c6\u7fa4\u4e0e\u76ee\u6807\u8fc1\u79fb\u96c6\u7fa4\u4e4b\u95f4\u7684\u7f51\u7edc\u662f\u4e92\u901a\u7684
                                                              • \u76f8\u540c\u5b58\u50a8\u7c7b\u578b\uff1a\u76ee\u6807\u8fc1\u79fb\u96c6\u7fa4\u9700\u652f\u6301\u4e0e\u539f\u6709\u96c6\u7fa4\u76f8\u540c\u7684\u5b58\u50a8\u7c7b\u578b\uff08\u4f8b\u5982\uff0c\u5982\u679c\u5bfc\u51fa\u96c6\u7fa4\u4f7f\u7528 rook-ceph-block \u7c7b\u578b\u7684 StorageClass\uff0c\u5219\u5bfc\u5165\u96c6\u7fa4\u4e5f\u5fc5\u987b\u652f\u6301\u6b64\u7c7b\u578b\uff09\u3002
                                                              • \u5728\u539f\u6709\u96c6\u7fa4\u7684 KubeVirt \u4e2d\u5f00\u542f VMExport Feature Gate\u3002
                                                              "},{"location":"admin/virtnest/vm/cross-cluster-migrate.html#vmexport-feature-gate","title":"\u5f00\u542f VMExport Feature Gate","text":"

                                                              \u6fc0\u6d3b VMExport Feature Gate\uff0c\u5728\u539f\u6709\u96c6\u7fa4\u5185\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c \u53ef\u53c2\u8003How to activate a feature gate

                                                              kubectl edit kubevirt kubevirt -n virtnest-system\n

                                                              \u8fd9\u6761\u547d\u4ee4\u5c06\u4fee\u6539 featureGates \uff0c\u589e\u52a0 VMExport \u3002

                                                              apiVersion: kubevirt.io/v1\nkind: KubeVirt\nmetadata:\n  name: kubevirt\n  namespace: virtnest-system\nspec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n        - DataVolumes\n        - LiveMigration\n        - VMExport\n
                                                              "},{"location":"admin/virtnest/vm/cross-cluster-migrate.html#ingress","title":"\u914d\u7f6e\u539f\u6709\u96c6\u7fa4\u7684 Ingress","text":"

                                                              \u4ee5 Nginx Ingress \u4e3a\u4f8b\uff0c\u914d\u7f6e Ingress \u4ee5\u6307\u5411 virt-exportproxy Service\uff1a

                                                              apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: ingress-vm-export\n  namespace: virtnest-system\nspec:\n  tls:\n    - hosts:\n        - upgrade-test.com\n      secretName: nginx-tls\n  rules:\n    - host: upgrade-test.com\n      http:\n        paths:\n          - path: /\n            pathType: Prefix\n            backend:\n              service:\n                name: virt-exportproxy\n                port:\n                  number: 8443\n  ingressClassName: nginx\n
                                                              "},{"location":"admin/virtnest/vm/cross-cluster-migrate.html#_4","title":"\u8fc1\u79fb\u6b65\u9aa4","text":"
                                                              1. \u521b\u5efa VirtualMachineExport CR

                                                                • \u5982\u679c \u4e91\u4e3b\u673a\u5173\u673a\u72b6\u6001 \u4e0b\u8fdb\u884c\u8fc1\u79fb\uff08\u51b7\u8fc1\u79fb\uff09\uff1a

                                                                  apiVersion: v1\nkind: Secret\nmetadata:\n  name: example-token # \u5bfc\u51fa\u4e91\u4e3b\u673a\u6240\u7528 token\n  namespace: default # \u4e91\u4e3b\u673a\u6240\u5728\u547d\u540d\u7a7a\u95f4\nstringData:\n  token: 1234567890ab # \u5bfc\u51fa\u4f7f\u7528\u7684 token,\u53ef\u4fee\u6539\n\n---\napiVersion: export.kubevirt.io/v1alpha1\nkind: VirtualMachineExport\nmetadata:\n  name: example-export # \u5bfc\u51fa\u540d\u79f0, \u53ef\u81ea\u884c\u4fee\u6539\n  namespace: default # \u4e91\u4e3b\u673a\u6240\u5728\u547d\u540d\u7a7a\u95f4\nspec:\n  tokenSecretRef: example-token # \u548c\u4e0a\u9762\u521b\u5efa\u7684token\u540d\u79f0\u4fdd\u6301\u4e00\u81f4\n  source:\n    apiGroup: \"kubevirt.io\"\n    kind: VirtualMachine\n    name: testvm # \u4e91\u4e3b\u673a\u540d\u79f0\n
                                                                • \u5982\u679c\u8981\u5728 \u4e91\u4e3b\u673a\u4e0d\u5173\u673a \u7684\u72b6\u6001\u4e0b\uff0c\u4f7f\u7528\u4e91\u4e3b\u673a\u5feb\u7167\u8fdb\u884c\u8fc1\u79fb\uff08\u70ed\u8fc1\u79fb\uff09\uff1a

                                                                  apiVersion: v1\nkind: Secret\nmetadata:\n  name: example-token # \u5bfc\u51fa\u4e91\u4e3b\u673a\u6240\u7528 token\n  namespace: default # \u4e91\u4e3b\u673a\u6240\u5728\u547d\u540d\u7a7a\u95f4\nstringData:\n  token: 1234567890ab # \u5bfc\u51fa\u4f7f\u7528\u7684 token ,\u53ef\u4fee\u6539\n\n---\napiVersion: export.kubevirt.io/v1alpha1\nkind: VirtualMachineExport\nmetadata:\n  name: export-snapshot # \u5bfc\u51fa\u540d\u79f0, \u53ef\u81ea\u884c\u4fee\u6539\n  namespace: default # \u4e91\u4e3b\u673a\u6240\u5728\u547d\u540d\u7a7a\u95f4\nspec:\n  tokenSecretRef: export-token # \u548c\u4e0a\u9762\u521b\u5efa\u7684token\u540d\u79f0\u4fdd\u6301\u4e00\u81f4\n  source:\n    apiGroup: \"snapshot.kubevirt.io\"\n    kind: VirtualMachineSnapshot\n    name: export-snap-202407191524 # \u5bf9\u5e94\u7684\u4e91\u4e3b\u673a\u5feb\u7167\u540d\u79f0\n
                                                              2. \u68c0\u67e5 VirtualMachineExport \u662f\u5426\u51c6\u5907\u5c31\u7eea\uff1a

                                                                # \u8fd9\u91cc\u7684 example-export \u9700\u8981\u66ff\u6362\u4e3a\u521b\u5efa\u7684 VirtualMachineExport \u540d\u79f0\nkubectl get VirtualMachineExport example-export -n default\n\nNAME             SOURCEKIND       SOURCENAME   PHASE\nexample-export   VirtualMachine   testvm       Ready\n
                                                              3. \u5f53 VirtualMachineExport \u51c6\u5907\u5c31\u7eea\u540e\uff0c\u5bfc\u51fa\u4e91\u4e3b\u673a YAML\u3002

                                                                • \u5982\u679c\u5df2\u5b89\u88c5 virtctl \uff0c\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u5bfc\u51fa\u4e91\u4e3b\u673a\u7684 YAML\uff1a

                                                                  # \u81ea\u884c\u5c06 example-export\u66ff\u6362\u4e3a\u521b\u5efa\u7684 VirtualMachineExport \u540d\u79f0\n# \u81ea\u884c\u901a\u8fc7 -n \u6307\u5b9a\u547d\u540d\u7a7a\u95f4\nvirtctl vmexport download example-export --manifest --include-secret --output=manifest.yaml\n
                                                                • \u5982\u679c\u6ca1\u6709\u5b89\u88c5 virtctl \uff0c\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u5bfc\u51fa\u4e91\u4e3b\u673a YAML\uff1a

                                                                  # \u81ea\u884c\u66ff\u6362 example-export \u66ff\u6362\u4e3a\u521b\u5efa\u7684 VirtualMachineExport \u540d\u79f0 \u548c\u547d\u540d\u7a7a\u95f4\nmanifesturl=$(kubectl get VirtualMachineExport example-export -n default -o=jsonpath='{.status.links.internal.manifests[0].url}')\nsecreturl=$(kubectl get VirtualMachineExport example-export -n default -o=jsonpath='{.status.links.internal.manifests[1].url}')\n# \u81ea\u884c\u66ff\u6362 secert \u540d\u79f0\u548c\u547d\u540d\u7a7a\u95f4\ntoken=$(kubectl get secret example-token -n default -o=jsonpath='{.data.token}' | base64 -d)\n\ncurl -H \"Accept: application/yaml\" -H \"x-kubevirt-export-token: $token\"  --insecure  $secreturl > manifest.yaml\ncurl -H \"Accept: application/yaml\" -H \"x-kubevirt-export-token: $token\"  --insecure  $manifesturl >> manifest.yaml\n
                                                              4. \u5bfc\u5165\u4e91\u4e3b\u673a

                                                                \u5c06\u5bfc\u51fa\u7684 manifest.yaml \u590d\u5236\u5230\u76ee\u6807\u8fc1\u79fb\u96c6\u7fa4\u5e76\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff08\u5982\u679c\u547d\u540d\u7a7a\u95f4\u4e0d\u5b58\u5728\u5219\u9700\u8981\u63d0\u524d\u521b\u5efa\uff09\uff1a

                                                                kubectl apply -f manifest.yaml\n
                                                                \u521b\u5efa\u6210\u529f\u540e\uff0c\u91cd\u542f\u4e91\u4e3b\u673a\uff0c\u4e91\u4e3b\u673a\u6210\u529f\u8fd0\u884c\u540e\uff0c\u5728\u539f\u6709\u96c6\u7fa4\u5185\u5220\u9664\u539f\u4e91\u4e3b\u673a\uff08\u4e91\u4e3b\u673a\u672a\u542f\u52a8\u6210\u529f\u65f6\uff0c\u8bf7\u52ff\u5220\u9664\u539f\u4e91\u4e3b\u673a\uff09\u3002

                                                              "},{"location":"admin/virtnest/vm/health-check.html","title":"\u5065\u5eb7\u68c0\u67e5","text":"

                                                              \u5f53\u914d\u7f6e\u4e91\u4e3b\u673a\u7684\u5b58\u6d3b\uff08Liveness\uff09\u548c\u5c31\u7eea\uff08Readiness\uff09\u63a2\u9488\u65f6\uff0c\u4e0e Kubernetes \u7684\u914d\u7f6e\u8fc7\u7a0b\u76f8\u4f3c\u3002\u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7 YAML \u4e3a\u4e91\u4e3b\u673a\u914d\u7f6e\u5065\u5eb7\u68c0\u67e5\u53c2\u6570\u3002

                                                              \u4f46\u662f\u9700\u8981\u6ce8\u610f\uff1a\u9700\u8981\u5728\u4e91\u4e3b\u673a\u521b\u5efa\u6210\u529f\u5e76\u4e14\u5904\u4e8e\u5173\u673a\u72b6\u6001\u4e0b\uff0c\u4fee\u6539 YAML \u8fdb\u884c\u914d\u7f6e\u3002

                                                              "},{"location":"admin/virtnest/vm/health-check.html#http-liveness-probe","title":"\u914d\u7f6e HTTP Liveness Probe","text":"
                                                              1. \u5728 spec.template.spec \u4e2d\u914d\u7f6e livenessProbe.httpGet\u3002
                                                              2. \u4fee\u6539 cloudInitNoCloud \u4ee5\u542f\u52a8\u4e00\u4e2a HTTP \u670d\u52a1\u5668\u3002

                                                                \u70b9\u51fb\u67e5\u770b YAML \u793a\u4f8b\u914d\u7f6e
                                                                apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n    virtnest.io/alias-name: ''\n    virtnest.io/image-secret: ''\n    virtnest.io/image-source: docker\n    virtnest.io/os-image: release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  creationTimestamp: '2024-10-15T02:39:45Z'\n  finalizers:\n    - kubevirt.io/virtualMachineControllerFinalize\n  generation: 1\n  labels:\n    virtnest.io/os-family: Ubuntu\n    virtnest.io/os-version: '22.04'\n  name: test-probe\n  namespace: amamba-team\n  resourceVersion: '254032135'\n  uid: 6d92779d-7415-4721-8c7b-a2dde163d758\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        creationTimestamp: null\n        name: test-probe-rootdisk\n        namespace: amamba-team\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Gi\n          storageClassName: hwameistor-storage-lvm-hdd\n        source:\n          registry:\n            url: >-\n          docker://release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  runStrategy: Halted\n  template:\n    metadata:\n      creationTimestamp: null\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio\n              name: rootdisk\n            - disk:\n                bus: virtio\n              name: cloudinitdisk\n          interfaces:\n            - masquerade: {}\n              name: default\n        machine:\n          type: q35\n        memory:\n          guest: 2Gi\n        resources:\n          requests:\n            memory: 2Gi\n      networks:\n        - name: default\n          pod: {}\n      livenessProbe:\n        initialDelaySeconds: 120\n        periodSeconds: 20\n        httpGet:\n          port: 1500\n        timeoutSeconds: 10\n      volumes:\n        - dataVolume:\n            name: test-probe-rootdisk\n          name: rootdisk\n        - cloudInitNoCloud:\n            userData: |\n              #cloud-config\n              ssh_pwauth: true\n              disable_root: false\n              chpasswd: {\"list\": \"root:dangerous\", expire: False}\n              runcmd:\n                - sed -i \"/#\\?PermitRootLogin/s/^.*$/PermitRootLogin yes/g\" /etc/ssh/sshd_config\n                - systemctl restart ssh.service\n                - dhclient -r && dhclient\n                - apt-get update && apt-get install -y ncat\n                - [\"systemd-run\", \"--unit=httpserver\", \"ncat\", \"-klp\", \"1500\", \"-e\", '/usr/bin/echo -e HTTP/1.1 200 OK\\nContent-Length: 12\\n\\nHello World!']\n          name: cloudinitdisk\n
                                                              3. \u6839\u636e\u64cd\u4f5c\u7cfb\u7edf\uff08\u5982 Ubuntu/Debian \u6216 CentOS\uff09\uff0cuserData \u7684\u914d\u7f6e\u53ef\u80fd\u6709\u6240\u4e0d\u540c\u3002\u4e3b\u8981\u533a\u522b\uff1a

                                                                • \u5305\u7ba1\u7406\u5668\uff1a

                                                                  Ubuntu/Debian \u4f7f\u7528 apt-get \u4f5c\u4e3a\u5305\u7ba1\u7406\u5668\u3002 CentOS \u4f7f\u7528 yum \u4f5c\u4e3a\u5305\u7ba1\u7406\u5668\u3002

                                                                • SSH \u670d\u52a1\u91cd\u542f\u547d\u4ee4\uff1a

                                                                  Ubuntu/Debian \u4f7f\u7528 systemctl restart ssh.service\u3002 CentOS \u4f7f\u7528 systemctl restart sshd.service\uff08\u6ce8\u610f CentOS 7 \u53ca\u4e4b\u524d\u7248\u672c\u4f7f\u7528 service sshd restart\uff09\u3002

                                                                • \u5b89\u88c5\u7684\u8f6f\u4ef6\u5305\uff1a

                                                                  Ubuntu/Debian \u5b89\u88c5 ncat\u3002 CentOS \u5b89\u88c5 nmap-ncat\uff08\u56e0\u4e3a ncat \u5728 CentOS \u7684\u9ed8\u8ba4\u4ed3\u5e93\u4e2d\u53ef\u80fd\u4e0d\u53ef\u7528\uff09\u3002

                                                              "},{"location":"admin/virtnest/vm/health-check.html#tcp-liveness-probe","title":"\u914d\u7f6e TCP Liveness Probe","text":"

                                                              \u5728 spec.template.spec \u4e2d\u914d\u7f6e livenessProbe.tcpSocket\u3002

                                                              \u70b9\u51fb\u67e5\u770b YAML \u793a\u4f8b\u914d\u7f6e
                                                              apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n    virtnest.io/alias-name: ''\n    virtnest.io/image-secret: ''\n    virtnest.io/image-source: docker\n    virtnest.io/os-image: release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  creationTimestamp: '2024-10-15T02:39:45Z'\n  finalizers:\n    - kubevirt.io/virtualMachineControllerFinalize\n  generation: 1\n  labels:\n    virtnest.io/os-family: Ubuntu\n    virtnest.io/os-version: '22.04'\n  name: test-probe\n  namespace: amamba-team\n  resourceVersion: '254032135'\n  uid: 6d92779d-7415-4721-8c7b-a2dde163d758\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        creationTimestamp: null\n        name: test-probe-rootdisk\n        namespace: amamba-team\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Gi\n          storageClassName: hwameistor-storage-lvm-hdd\n        source:\n          registry:\n            url: >-\n          docker://release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  runStrategy: Halted\n  template:\n    metadata:\n      creationTimestamp: null\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio\n              name: rootdisk\n            - disk:\n                bus: virtio\n              name: cloudinitdisk\n          interfaces:\n            - masquerade: {}\n              name: default\n        machine:\n          type: q35\n        memory:\n          guest: 2Gi\n        resources:\n          requests:\n            memory: 2Gi\n      networks:\n        - name: default\n          pod: {}\n      livenessProbe:\n        initialDelaySeconds: 120\n        periodSeconds: 20\n        tcpSocket:\n          port: 1500\n        timeoutSeconds: 10\n      volumes:\n        - dataVolume:\n            name: test-probe-rootdisk\n          name: rootdisk\n        - cloudInitNoCloud:\n            userData: |\n              #cloud-config\n              ssh_pwauth: true\n              disable_root: false\n              chpasswd: {\"list\": \"root:dangerous\", expire: False}\n              runcmd:\n                - sed -i \"/#\\?PermitRootLogin/s/^.*$/PermitRootLogin yes/g\" /etc/ssh/sshd_config\n                - systemctl restart ssh.service\n                - dhclient -r && dhclient\n                - apt-get update && apt-get install -y ncat\n                - [\"systemd-run\", \"--unit=httpserver\", \"ncat\", \"-klp\", \"1500\", \"-e\", '/usr/bin/echo -e HTTP/1.1 200 OK\\nContent-Length: 12\\n\\nHello World!']\n          name: cloudinitdisk\n
                                                              "},{"location":"admin/virtnest/vm/health-check.html#readiness-probes","title":"\u914d\u7f6e Readiness Probes","text":"

                                                              \u5728 spec.template.spec \u4e2d\u914d\u7f6e readiness\u3002

                                                              \u70b9\u51fb\u67e5\u770b YAML \u793a\u4f8b\u914d\u7f6e
                                                              apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n    virtnest.io/alias-name: ''\n    virtnest.io/image-secret: ''\n    virtnest.io/image-source: docker\n    virtnest.io/os-image: release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  creationTimestamp: '2024-10-15T02:39:45Z'\n  finalizers:\n    - kubevirt.io/virtualMachineControllerFinalize\n  generation: 1\n  labels:\n    virtnest.io/os-family: Ubuntu\n    virtnest.io/os-version: '22.04'\n  name: test-probe\n  namespace: amamba-team\n  resourceVersion: '254032135'\n  uid: 6d92779d-7415-4721-8c7b-a2dde163d758\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        creationTimestamp: null\n        name: test-probe-rootdisk\n        namespace: amamba-team\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Gi\n          storageClassName: hwameistor-storage-lvm-hdd\n        source:\n          registry:\n            url: >-\n          docker://release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  runStrategy: Halted\n  template:\n    metadata:\n      creationTimestamp: null\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio\n              name: rootdisk\n            - disk:\n                bus: virtio\n              name: cloudinitdisk\n          interfaces:\n            - masquerade: {}\n              name: default\n        machine:\n          type: q35\n        memory:\n          guest: 2Gi\n        resources:\n          requests:\n            memory: 2Gi\n      networks:\n        - name: default\n          pod: {}\n      readiness:\n        initialDelaySeconds: 120\n        periodSeconds: 20\n        httpGet:\n          port: 1500\n        timeoutSeconds: 10\n      volumes:\n        - dataVolume:\n            name: test-probe-rootdisk\n          name: rootdisk\n        - cloudInitNoCloud:\n            userData: |\n              #cloud-config\n              ssh_pwauth: true\n              disable_root: false\n              chpasswd: {\"list\": \"root:dangerous\", expire: False}\n              runcmd:\n                - sed -i \"/#\\?PermitRootLogin/s/^.*$/PermitRootLogin yes/g\" /etc/ssh/sshd_config\n                - systemctl restart ssh.service\n                - dhclient -r && dhclient\n                - apt-get update && apt-get install -y ncat\n                - [\"systemd-run\", \"--unit=httpserver\", \"ncat\", \"-klp\", \"1500\", \"-e\", '/usr/bin/echo -e HTTP/1.1 200 OK\\nContent-Length: 12\\n\\nHello World!']\n          name: cloudinitdisk\n
                                                              "},{"location":"admin/virtnest/vm/live-migration.html","title":"\u5b9e\u65f6\u8fc1\u79fb","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u5c06\u4e91\u4e3b\u673a\u4ece\u4e00\u4e2a\u8282\u70b9\u79fb\u52a8\u5230\u53e6\u4e00\u4e2a\u8282\u70b9\u3002

                                                              \u5f53\u8282\u70b9\u7ef4\u62a4\u6216\u8005\u5347\u7ea7\u65f6\uff0c\u7528\u6237\u53ef\u4ee5\u5c06\u6b63\u5728\u8fd0\u884c\u7684\u4e91\u4e3b\u673a\u65e0\u7f1d\u8fc1\u79fb\u5230\u5176\u4ed6\u7684\u8282\u70b9\u4e0a\uff0c\u540c\u65f6\u53ef\u4ee5\u4fdd\u8bc1\u4e1a\u52a1\u7684\u8fde\u7eed\u6027\u548c\u6570\u636e\u7684\u5b89\u5168\u6027\u3002

                                                              "},{"location":"admin/virtnest/vm/live-migration.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u4f7f\u7528\u5b9e\u65f6\u8fc1\u79fb\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u4e91\u4e3b\u673a\u5fc5\u987b\u5904\u4e8e\u8fd0\u884c\u72b6\u6001\u624d\u80fd\u8fdb\u884c\u5b9e\u65f6\u8fc1\u79fb\u3002
                                                              • \u786e\u4fdd\u60a8\u7684 PVC \u8bbf\u95ee\u6a21\u5f0f\u4e3a ReadWriteMany\uff0c\u4ee5\u4fbf\u4f7f\u7528\u5b9e\u65f6\u8fc1\u79fb\u529f\u80fd\u3002
                                                              • \u786e\u4fdd\u96c6\u7fa4\u5185\u81f3\u5c11\u6709\u4e24\u4e2a\u8282\u70b9\u53ef\u4f9b\u4f7f\u7528\u3002
                                                              "},{"location":"admin/virtnest/vm/live-migration.html#_3","title":"\u5b9e\u65f6\u8fc1\u79fb","text":"
                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u8fdb\u5165\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u8fd0\u884c\u72b6\u6001\u4e0b\u7684\u4e91\u4e3b\u673a\u8fdb\u884c\u8fc1\u79fb\u52a8\u4f5c\u3002\u76ee\u524d\u4e91\u4e3b\u673a\u6240\u5728\u8282\u70b9\u4e3a controller-node-3 \u3002

                                                              2. \u5f39\u51fa\u5f39\u6846\uff0c\u63d0\u793a\u5728\u5b9e\u65f6\u8fc1\u79fb\u671f\u95f4\uff0c\u6b63\u5728\u8fd0\u884c\u7684\u4e91\u4e3b\u673a\u5b9e\u4f8b\u4f1a\u79fb\u52a8\u5230\u53e6\u4e00\u4e2a\u8282\u70b9\uff0c\u53ef\u4ee5\u9009\u62e9\u6307\u5b9a\u8282\u70b9\u8fc1\u79fb\uff0c\u4e5f\u53ef\u4ee5\u968f\u673a\u8fc1\u79fb\uff0c\u8bf7\u786e\u4fdd\u5176\u4ed6\u8282\u70b9\u8d44\u6e90\u5145\u8db3\u3002

                                                              3. \u8fc1\u79fb\u9700\u8981\u4e00\u6bb5\u65f6\u95f4\uff0c\u8bf7\u8010\u5fc3\u7b49\u5f85\uff0c\u6210\u529f\u540e\u53ef\u4ee5\u5728\u4e91\u4e3b\u673a\u5217\u8868\u5185\u67e5\u770b\u8282\u70b9\u4fe1\u606f\uff0c\u6b64\u65f6\u8282\u70b9\u8fc1\u79fb\u5230 controller-node-1 \u3002

                                                              "},{"location":"admin/virtnest/vm/migratiom.html","title":"\u96c6\u7fa4\u5185\u51b7\u8fc1\u79fb","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5728\u5173\u673a\u72b6\u6001\u4e0b\u5982\u4f55\u5c06\u4e91\u4e3b\u673a\u5728\u540c\u4e00\u96c6\u7fa4\u5185\u4ece\u4e00\u4e2a\u8282\u70b9\u79fb\u52a8\u5230\u53e6\u4e00\u4e2a\u8282\u70b9\u3002

                                                              \u51b7\u8fc1\u79fb\u7684\u4e3b\u8981\u7279\u70b9\u662f\uff0c\u4e91\u4e3b\u673a\u5728\u8fc1\u79fb\u8fc7\u7a0b\u4e2d\u4f1a\u5904\u4e8e\u79bb\u7ebf\u72b6\u6001\uff0c\u8fd9\u53ef\u80fd\u4f1a\u5bf9\u4e1a\u52a1\u8fde\u7eed\u6027\u4ea7\u751f\u5f71\u54cd\u3002\u56e0\u6b64\uff0c \u5728\u5b9e\u65bd\u51b7\u8fc1\u79fb\u65f6\u9700\u8981\u4ed4\u7ec6\u89c4\u5212\u8fc1\u79fb\u65f6\u95f4\u7a97\u53e3\uff0c\u5e76\u8003\u8651\u4e1a\u52a1\u9700\u6c42\u548c\u7cfb\u7edf\u53ef\u7528\u6027\u3002\u901a\u5e38\uff0c\u51b7\u8fc1\u79fb\u9002\u7528\u4e8e\u5bf9\u505c\u673a\u65f6\u95f4\u8981\u6c42\u4e0d\u662f\u975e\u5e38\u4e25\u683c\u7684\u573a\u666f\u3002

                                                              "},{"location":"admin/virtnest/vm/migratiom.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u4f7f\u7528\u51b7\u8fc1\u79fb\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u4e91\u4e3b\u673a\u5fc5\u987b\u5904\u4e8e\u5173\u673a\u72b6\u6001\u624d\u80fd\u8fdb\u884c\u51b7\u8fc1\u79fb\u3002
                                                              "},{"location":"admin/virtnest/vm/migratiom.html#_3","title":"\u51b7\u8fc1\u79fb","text":"
                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u8fdb\u5165\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c \u53ef\u4ee5\u5bf9\u5173\u673a\u72b6\u6001\u4e0b\u7684\u4e91\u4e3b\u673a\u8fdb\u884c\u8fc1\u79fb\u52a8\u4f5c\u3002\u4e91\u4e3b\u673a\u5728\u5173\u673a\u72b6\u6001\u4e0b\u65f6\u65e0\u6cd5\u67e5\u770b\u6240\u5728\u8282\u70b9\uff0c\u9700\u8981\u63d0\u524d\u89c4\u5212\u6216\u8005\u5f00\u673a\u67e5\u8be2\u3002

                                                                Note

                                                                \u5982\u679c\u60a8\u5728\u539f\u59cb\u8282\u70b9\u7684\u5b58\u50a8\u6c60\u4e2d\u4f7f\u7528\u4e86 local-path\uff0c\u8de8\u8282\u70b9\u8fc1\u79fb\u65f6\u53ef\u80fd\u51fa\u73b0\u95ee\u9898\uff0c\u8bf7\u8c28\u614e\u9009\u62e9\u3002

                                                              2. \u70b9\u51fb\u8fc1\u79fb\u540e\uff0c\u63d0\u793a\u5728\u8fc1\u79fb\u671f\u95f4\uff0c\u53ef\u4ee5\u9009\u62e9\u6307\u5b9a\u8282\u70b9\u8fc1\u79fb\uff0c\u4e5f\u53ef\u4ee5\u968f\u673a\u8fc1\u79fb\uff0c\u82e5\u9700\u8981\u4fee\u6539\u5b58\u50a8\u6c60\uff0c \u9700\u8981\u786e\u4fdd\u76ee\u6807\u8282\u70b9\u5185\u6709\u53ef\u7528\u5b58\u50a8\u6c60\u3002\u540c\u65f6\u9700\u8981\u76ee\u6807\u8282\u70b9\u8d44\u6e90\u5145\u8db3\uff0c\u8fc1\u79fb\u8fc7\u7a0b\u8017\u8d39\u65f6\u95f4\u8f83\u957f\uff0c\u8bf7\u8010\u5fc3\u7b49\u5f85\u3002

                                                              3. \u8fc1\u79fb\u9700\u8981\u4e00\u6bb5\u65f6\u95f4\uff0c\u8bf7\u8010\u5fc3\u7b49\u5f85\uff0c\u6210\u529f\u540e\u9700\u8981\u91cd\u542f\u67e5\u770b\u662f\u5426\u8fc1\u79fb\u6210\u529f\u3002\u672c\u793a\u4f8b\u5df2\u7ecf\u5f00\u673a\u67e5\u770b\u8fc1\u79fb\u6548\u679c\u3002

                                                              "},{"location":"admin/virtnest/vm/monitor.html","title":"\u4e91\u4e3b\u673a\u76d1\u63a7","text":"

                                                              \u4e91\u4e3b\u673a\u57fa\u4e8e Kubevirt \u5f00\u6e90\u7684 Grafana Dashboard\uff0c\u4e3a\u4e86\u6bcf\u4e00\u4e2a\u4e91\u4e3b\u673a\u751f\u6210\u4e86\u76d1\u63a7\u770b\u677f

                                                              \u4e91\u4e3b\u673a\u7684\u76d1\u63a7\u4fe1\u606f\u53ef\u4ee5\u66f4\u597d\u7684\u4e86\u89e3\u4e91\u4e3b\u673a\u7684\u8d44\u6e90\u6d88\u8017\u60c5\u51b5\uff0c\u6bd4\u5982 CPU\u3001\u5185\u5b58\u3001\u5b58\u50a8\u548c\u7f51\u7edc\u7b49\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\uff0c \u4ece\u800c\u8fdb\u884c\u8d44\u6e90\u7684\u4f18\u5316\u548c\u89c4\u5212\uff0c\u63d0\u5347\u6574\u4f53\u7684\u8d44\u6e90\u5229\u7528\u6548\u7387\u3002

                                                              "},{"location":"admin/virtnest/vm/monitor.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u67e5\u770b\u4e91\u4e3b\u673a\u76d1\u63a7\u7684\u76f8\u5173\u4fe1\u606f\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u5728\u4e91\u4e3b\u673a\u6240\u5728\u7684\u540c\u4e00\u96c6\u7fa4\u5185\u5b89\u88c5 Insight-agent \u7ec4\u4ef6\uff0c\u5e76\u4e14\u4fdd\u8bc1 Insight-agent \u7ec4\u4ef6\u6b63\u5e38\u53ef\u7528\u3002
                                                              "},{"location":"admin/virtnest/vm/monitor.html#_3","title":"\u4e91\u4e3b\u673a\u76d1\u63a7","text":"

                                                              \u8fdb\u5165\u4e91\u4e3b\u673a\u7684\u8be6\u7ec6\u4fe1\u606f\u5e76\u70b9\u51fb \u6982\u89c8 \uff0c\u5373\u53ef\u67e5\u770b\u4e91\u4e3b\u673a\u7684\u76d1\u63a7\u5185\u5bb9\u3002\u8bf7\u6ce8\u610f\uff0c\u82e5\u672a\u5b89\u88c5 Insight-agent \u7ec4\u4ef6\uff0c\u5219\u65e0\u6cd5\u83b7\u53d6\u76d1\u63a7\u4fe1\u606f\u3002\u4ee5\u4e0b\u662f\u8be6\u7ec6\u4fe1\u606f\uff1a

                                                              • CPU \u603b\u91cf\u3001CPU \u4f7f\u7528\u91cf\u3001\u5185\u5b58\u603b\u91cf\u3001\u5185\u5b58\u4f7f\u7528\u91cf\u3002

                                                              • CPU \u4f7f\u7528\u7387\uff1a\u6307\u5f53\u524d\u4e91\u4e3b\u673a\u6b63\u5728\u4f7f\u7528\u7684 CPU \u8d44\u6e90\u7684\u767e\u5206\u6bd4\uff1b

                                                              • \u5185\u5b58\u4f7f\u7528\u7387\uff1a\u6307\u5f53\u524d\u4e91\u4e3b\u673a\u6b63\u5728\u4f7f\u7528\u7684\u5185\u5b58\u8d44\u6e90\u5360\u603b\u53ef\u7528\u5185\u5b58\u7684\u767e\u5206\u6bd4\u3002

                                                              • \u7f51\u7edc\u6d41\u91cf\uff1a\u6307\u4e91\u4e3b\u673a\u5728\u7279\u5b9a\u65f6\u95f4\u6bb5\u5185\u53d1\u9001\u548c\u63a5\u6536\u7684\u7f51\u7edc\u6570\u636e\u91cf\uff1b

                                                              • \u7f51\u7edc\u4e22\u5305\u7387\uff1a\u6307\u5728\u6570\u636e\u4f20\u8f93\u8fc7\u7a0b\u4e2d\u4e22\u5931\u7684\u6570\u636e\u5305\u5360\u603b\u53d1\u9001\u6570\u636e\u5305\u6570\u91cf\u7684\u6bd4\u4f8b\u3002

                                                              • \u7f51\u7edc\u9519\u8bef\u7387\uff1a\u6307\u5728\u7f51\u7edc\u4f20\u8f93\u8fc7\u7a0b\u4e2d\u53d1\u751f\u7684\u9519\u8bef\u7684\u6bd4\u7387\uff1b

                                                              • \u78c1\u76d8\u541e\u5410\uff1a\u6307\u4e91\u4e3b\u673a\u7cfb\u7edf\u5728\u4e00\u5b9a\u65f6\u95f4\u5185\u8bfb\u53d6\u548c\u5199\u5165\u78c1\u76d8\u7684\u901f\u5ea6\u548c\u80fd\u529b\u3002

                                                              • IOPS\uff1a\u6307\u7684\u662f\u5728\u4e00\u79d2\u949f\u5185\u4e91\u4e3b\u673a\u7cfb\u7edf\u8fdb\u884c\u7684\u8f93\u5165/\u8f93\u51fa\u64cd\u4f5c\u7684\u6b21\u6570\u3002\u78c1\u76d8\u5ef6\u8fdf\uff1a\u6307\u4e91\u4e3b\u673a\u7cfb\u7edf\u5728\u8fdb\u884c\u78c1\u76d8\u8bfb\u5199\u64cd\u4f5c\u65f6\u6240\u7ecf\u5386\u7684\u65f6\u95f4\u5ef6\u8fdf\u3002

                                                              "},{"location":"admin/virtnest/vm/scheduled-snapshot.html","title":"\u5b9a\u65f6\u5feb\u7167","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u4e3a\u4e91\u4e3b\u673a\u5b9a\u65f6\u521b\u5efa\u5feb\u7167\u3002

                                                              \u7528\u6237\u53ef\u4ee5\u4e3a\u4e91\u4e3b\u673a\u5b9a\u65f6\u521b\u5efa\u5feb\u7167\uff0c\u80fd\u591f\u4e3a\u6570\u636e\u63d0\u4f9b\u6301\u7eed\u7684\u4fdd\u62a4\uff0c\u786e\u4fdd\u5728\u53d1\u751f\u6570\u636e\u4e22\u5931\u3001\u635f\u574f\u6216\u5220\u9664\u7684\u60c5\u51b5\u4e0b\u53ef\u4ee5\u8fdb\u884c\u6709\u6548\u7684\u6570\u636e\u6062\u590d\u3002

                                                              "},{"location":"admin/virtnest/vm/scheduled-snapshot.html#_2","title":"\u5b9a\u65f6\u5feb\u7167\u6b65\u9aa4","text":"
                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \uff0c\u5728\u5217\u8868\u9875\u9762\uff0c\u9009\u62e9\u76ee\u6807\u4e91\u4e3b\u673a\u6240\u5728\u7684\u96c6\u7fa4\u3002 \u8fdb\u5165\u96c6\u7fa4\u540e\uff0c\u70b9\u51fb \u5de5\u4f5c\u8d1f\u8f7d -> \u5b9a\u65f6\u4efb\u52a1 \uff0c\u9009\u62e9 YAML \u521b\u5efa \u5b9a\u65f6\u4efb\u52a1\uff0c\u53c2\u8003\u4ee5\u4e0b YAML \u793a\u4f8b\u53ef\u4e3a\u6307\u5b9a\u4e91\u4e3b\u673a\u5b9a\u65f6\u521b\u5efa\u5feb\u7167\u3002

                                                                \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\u7684 YAML \u793a\u4f8b
                                                                apiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: xxxxx-xxxxx-cronjob # \u5b9a\u65f6\u4efb\u52a1\u540d\u79f0, \u53ef\u81ea\u5b9a\u4e49\n  namespace: virtnest-system # \u8bf7\u52ff\u4fee\u6539\u6b64namespace\nspec:\n  schedule: \"5 * * * *\" # \u6309\u9700\u4fee\u6539\u5b9a\u65f6\u4efb\u52a1\u6267\u884c\u95f4\u9694\n  concurrencyPolicy: Allow\n  suspend: false\n  successfulJobsHistoryLimit: 10\n  failedJobsHistoryLimit: 3\n  startingDeadlineSeconds: 60\n  jobTemplate:\n    spec:\n      template:\n        metadata:\n          labels:\n            virtnest.io/vm: xxxx # \u4fee\u6539\u4e3a\u9700\u8981\u5feb\u7167\u7684\u4e91\u4e3b\u673a\u540d\u79f0\n            virtnest.io/namespace: xxxx # \u4fee\u6539\u4e3a\u4e91\u4e3b\u673a\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\n        spec:\n          serviceAccountName: kubevirt-operator\n          containers:\n            - name: snapshot-job\n              image: release.daocloud.io/virtnest/tools:v0.1.5 # \u79bb\u7ebf\u73af\u5883\u4e0b,\u4ed3\u5e93\u5730\u5740\u4fee\u6539\u4e3a\u5bf9\u5e94\u706b\u79cd\u96c6\u7fa4\u4ed3\u5e93\u5730\u5740\n              imagePullPolicy: IfNotPresent\n              env:\n                - name: NS\n                  valueFrom:\n                    fieldRef:\n                      fieldPath: metadata.labels['virtnest.io/namespace']\n                - name: VM\n                  valueFrom:\n                    fieldRef:\n                      fieldPath: metadata.labels['virtnest.io/vm']\n              command:\n                - /bin/sh\n                - -c\n                - |\n                  export SUFFIX=$(date +\"%Y%m%d-%H%M%S\")\n                  cat <<EOF | kubectl apply -f -\n                  apiVersion: snapshot.kubevirt.io/v1alpha1\n                  kind: VirtualMachineSnapshot\n                  metadata:\n                    name: $(VM)-snapshot-$SUFFIX\n                    namespace: $(NS)\n                  spec:\n                    source:\n                      apiGroup: kubevirt.io\n                      kind: VirtualMachine\n                      name: $(VM)\n                  EOF\n          restartPolicy: OnFailure\n
                                                              2. \u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\u5e76\u6210\u529f\u8fd0\u884c\u540e\uff0c\u53ef\u70b9\u51fb \u4e91\u4e3b\u673a \u5728\u5217\u8868\u9875\u9762\u9009\u62e9\u76ee\u6807\u4e91\u4e3b\u673a\uff0c\u8fdb\u5165\u8be6\u60c5\u540e\u53ef\u67e5\u770b\u5feb\u7167\u5217\u8868\u3002

                                                              "},{"location":"admin/virtnest/vm/snapshot.html","title":"\u5feb\u7167\u7ba1\u7406","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u4e3a\u4e91\u4e3b\u673a\u521b\u5efa\u5feb\u7167\uff0c\u5e76\u4ece\u5feb\u7167\u4e2d\u6062\u590d\u7684\u3002

                                                              \u7528\u6237\u53ef\u4ee5\u4e3a\u4e91\u4e3b\u673a\u521b\u5efa\u5feb\u7167\uff0c\u4fdd\u5b58\u4e91\u4e3b\u673a\u5f53\u4e0b\u7684\u72b6\u6001\uff0c\u4e00\u4e2a\u5feb\u7167\u53ef\u4ee5\u652f\u6301\u591a\u6b21\u6062\u590d\uff0c\u6bcf\u6b21\u6062\u590d\u65f6\uff0c \u4e91\u4e3b\u673a\u5c06\u88ab\u8fd8\u539f\u5230\u5feb\u7167\u521b\u5efa\u65f6\u7684\u72b6\u6001\u3002\u901a\u5e38\u53ef\u4ee5\u7528\u4e8e\u5907\u4efd\u3001\u6062\u590d\u3001\u56de\u6eda\u7b49\u573a\u666f\u3002

                                                              "},{"location":"admin/virtnest/vm/snapshot.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u4f7f\u7528\u5feb\u7167\u529f\u80fd\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u53ea\u6709\u975e\u9519\u8bef\u72b6\u6001\u4e0b\u7684\u4e91\u4e3b\u673a\u624d\u80fd\u4f7f\u7528\u5feb\u7167\u529f\u80fd\u3002
                                                              • \u5b89\u88c5 Snapshot CRDs\u3001Snapshot Controller\u3001CSI Driver\u3002 \u5177\u4f53\u5b89\u88c5\u6b65\u9aa4\u53ef\u53c2\u8003 CSI Snapshotter\u3002
                                                              • \u7b49\u5f85 snapshot-controller \u7ec4\u4ef6\u51c6\u5907\u5c31\u7eea, \u8be5\u7ec4\u4ef6\u4f1a\u76d1\u63a7 VolumeSnapshot \u548c VolumeSnapshotContent \u76f8\u5173\u4e8b\u4ef6\uff0c\u5e76\u89e6\u53d1\u76f8\u5173\u64cd\u4f5c\u3002
                                                              • \u7b49\u5f85 CSI Driver \u51c6\u5907\u5c31\u7eea, \u786e\u4fdd csi-snapshotter sidecar \u8dd1\u5728 CSI Driver \u91cc\uff0ccsi-snapshotter sidecar \u4f1a\u76d1\u63a7 VolumeSnapshotContent \u76f8\u5173\u4e8b\u4ef6\uff0c\u5e76\u89e6\u53d1\u76f8\u5173\u64cd\u4f5c\u3002\u5982 POC \u4f7f\u7528\u7684\u5b58\u50a8\u662f rook-ceph\uff0c\u53ef\u53c2\u8003 ceph-csi-snapshot
                                                                • \u5982\u5b58\u50a8\u662f Rook-Ceph\uff0c\u53ef\u53c2\u8003 ceph-csi-snapshot
                                                                • \u5982\u5b58\u50a8\u662f HwameiStor\uff0c\u53ef\u53c2\u8003 huameistor-snapshot
                                                              "},{"location":"admin/virtnest/vm/snapshot.html#_3","title":"\u521b\u5efa\u5feb\u7167","text":"
                                                              1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u5bb9\u5668\u7ba1\u7406 \uff0c\u7136\u540e\u70b9\u51fb \u4e91\u4e3b\u673a \uff0c\u8fdb\u5165\u5217\u8868\u9875\u9762\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u975e\u9519\u8bef\u72b6\u6001\u4e0b\u7684\u4e91\u4e3b\u673a\u6267\u884c\u5feb\u7167\u64cd\u4f5c\u3002

                                                              2. \u5f39\u51fa\u5f39\u6846\uff0c\u9700\u8981\u586b\u5199\u5feb\u7167\u7684\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u521b\u5efa\u5feb\u7167\u5927\u6982\u9700\u8981\u51e0\u5206\u949f\u7684\u65f6\u95f4\uff0c\u5728\u6b64\u671f\u95f4\u65e0\u6cd5\u5bf9\u4e91\u4e3b\u673a\u505a\u4efb\u4f55\u64cd\u4f5c\u3002

                                                              3. \u521b\u5efa\u6210\u529f\u540e\u53ef\u4ee5\u5728\u4e91\u4e3b\u673a\u8be6\u60c5\u5185\u67e5\u770b\u5feb\u7167\u4fe1\u606f\uff0c\u652f\u6301\u7f16\u8f91\u63cf\u8ff0\u3001\u4ece\u5feb\u7167\u4e2d\u6062\u590d\u3001\u5220\u9664\u7b49\u64cd\u4f5c\u3002

                                                              "},{"location":"admin/virtnest/vm/snapshot.html#_4","title":"\u4ece\u5feb\u7167\u4e2d\u6062\u590d","text":"
                                                              1. \u70b9\u51fb \u4ece\u5feb\u7167\u6062\u590d \uff0c\u9700\u8981\u586b\u5199\u4e91\u4e3b\u673a\u6062\u590d\u8bb0\u5f55\u7684\u540d\u79f0\uff0c\u540c\u65f6\u6062\u590d\u64cd\u4f5c\u53ef\u80fd\u9700\u8981\u4e00\u4e9b\u65f6\u95f4\u6765\u5b8c\u6210\uff0c\u5177\u4f53\u53d6\u51b3\u4e8e\u5feb\u7167\u7684\u5927\u5c0f\u548c\u5176\u4ed6\u56e0\u7d20\u3002\u6062\u590d\u6210\u529f\u540e\uff0c\u4e91\u4e3b\u673a\u5c06\u56de\u5230\u5feb\u7167\u521b\u5efa\u65f6\u7684\u72b6\u6001\u3002

                                                              2. \u4e00\u6bb5\u65f6\u95f4\u540e\uff0c\u4e0b\u62c9\u5feb\u7167\u4fe1\u606f\uff0c\u53ef\u4ee5\u67e5\u770b\u5f53\u524d\u5feb\u7167\u7684\u6240\u6709\u6062\u590d\u8bb0\u5f55\uff0c\u5e76\u4e14\u652f\u6301\u5c55\u793a\u5b9a\u4f4d\u6062\u590d\u7684\u4f4d\u7f6e\u3002

                                                              "},{"location":"admin/virtnest/vm/vm-network.html","title":"\u4e91\u4e3b\u673a\u7f51\u7edc","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u5728\u521b\u5efa\u4e91\u4e3b\u673a\u65f6\uff0c\u914d\u7f6e\u7f51\u7edc\u4fe1\u606f\u3002

                                                              \u5728\u4e91\u4e3b\u673a\u4e2d\uff0c\u7f51\u7edc\u7ba1\u7406\u662f\u4e00\u4e2a\u5173\u952e\u7684\u90e8\u5206\uff0c\u5b83\u4f7f\u5f97\u6211\u4eec\u80fd\u591f\u5728 Kubernetes \u73af\u5883\u4e2d\u7ba1\u7406\u548c\u914d\u7f6e\u4e91\u4e3b\u673a\u7684\u7f51\u7edc\u8fde\u63a5\uff0c\u53ef\u4ee5\u6839\u636e\u4e0d\u540c\u7684\u9700\u6c42\u548c\u573a\u666f\u6765\u8fdb\u884c\u914d\u7f6e\uff0c\u5b9e\u73b0\u66f4\u7075\u6d3b\u548c\u591a\u6837\u5316\u7684\u7f51\u7edc\u67b6\u6784\u3002

                                                              1. \u5355\u7f51\u5361\u573a\u666f\uff1a\u5bf9\u4e8e\u4e00\u4e9b\u7b80\u5355\u7684\u53ea\u9700\u8981\u57fa\u672c\u7f51\u7edc\u8fde\u63a5\u7684\u5e94\u7528\uff0c\u6216\u8005\u5b58\u5728\u8d44\u6e90\u9650\u5236\u7684\u65f6\u5019\uff0c\u4f7f\u7528\u5355\u7f51\u5361\u53ef\u4ee5\u8282\u7ea6\u7f51\u7edc\u8d44\u6e90\uff0c\u5e76\u907f\u514d\u8d44\u6e90\u7684\u6d6a\u8d39\u3002
                                                              2. \u591a\u7f51\u5361\u573a\u666f\uff1a\u5f53\u9700\u8981\u5b9e\u73b0\u4e0d\u540c\u7f51\u7edc\u73af\u5883\u4e4b\u95f4\u7684\u5b89\u5168\u9694\u79bb\u65f6\uff0c\u53ef\u4ee5\u4f7f\u7528\u591a\u7f51\u5361\u6765\u5212\u5206\u4e0d\u540c\u7684\u7f51\u7edc\u533a\u57df\u3002\u540c\u65f6\u4e5f\u53ef\u4ee5\u5bf9\u63a7\u5236\u548c\u6d41\u91cf\u8fdb\u884c\u7ba1\u7406\u3002
                                                              "},{"location":"admin/virtnest/vm/vm-network.html#_2","title":"\u7f51\u7edc\u914d\u7f6e\u524d\u63d0","text":"

                                                              \u5728\u4f7f\u7528\u4e91\u4e3b\u673a\u7f51\u7edc\u529f\u80fd\u4e4b\u524d\uff0c\u9700\u8981\u6839\u636e\u7f51\u7edc\u6a21\u5f0f\u7684\u4e0d\u540c\u914d\u7f6e\u4e0d\u540c\u7684\u4fe1\u606f\uff1a

                                                              • \u9009\u62e9 Bridge \u7f51\u7edc\u6a21\u5f0f\u65f6\u9700\u8981\u63d0\u524d\u914d\u7f6e\u4e00\u4e9b\u4fe1\u606f\uff1a

                                                                • \u5728\u4e3b\u673a\u8282\u70b9\u4e0a\u5b89\u88c5\u5e76\u8fd0\u884c Open vSwitch, \u53ef\u53c2\u8003\u8fd9\u91cc
                                                                • \u5728\u4e3b\u673a\u8282\u70b9\u4e0a\u914d\u7f6e Open vSwitch \u7f51\u6865, \u53ef\u53c2\u8003\u8fd9\u91cc
                                                                • \u5b89\u88c5 Spiderpool\uff0c\u53ef\u53c2\u8003\u5b89\u88c5 Spiderpool, Spiderpool \u9ed8\u8ba4\u4f1a\u628a Multus CNI \u548c Ovs CNI \u90fd\u88c5\u4e0a
                                                                • \u521b\u5efa ovs \u7c7b\u578b\u7684 Multus CR\uff0c\u53ef\u53c2\u8003\u754c\u9762\u521b\u5efa Multus CR \u6216 YAML \u521b\u5efa Multus CR
                                                                • \u521b\u5efa\u5b50\u7f51\u53ca IP \u6c60\uff0c\u53c2\u8003\u521b\u5efa\u5b50\u7f51\u548c IP \u6c60
                                                              "},{"location":"admin/virtnest/vm/vm-network.html#_3","title":"\u7f51\u7edc\u914d\u7f6e","text":"
                                                              1. \u914d\u7f6e\u4e91\u4e3b\u673a\u7684\u7f51\u7edc\u914d\u7f6e\uff0c\u53ef\u4ee5\u6839\u636e\u8868\u683c\u4fe1\u606f\u6309\u9700\u7ec4\u5408\u3002

                                                                \u7f51\u7edc\u6a21\u5f0f CNI \u662f\u5426\u5b89\u88c5 Spiderpool \u7f51\u5361\u6a21\u5f0f \u56fa\u5b9a IP \u5b9e\u65f6\u8fc1\u79fb Masquerade\uff08NAT\uff09 Calico \u274c \u5355\u7f51\u5361 \u274c \u2705 Cilium \u274c \u5355\u7f51\u5361 \u274c \u2705 Flannel \u274c \u5355\u7f51\u5361 \u274c \u2705 Bridge\uff08\u6865\u63a5\uff09 OVS \u2705 \u591a\u7f51\u5361 \u2705 \u2705

                                                              2. \u7f51\u7edc\u6a21\u5f0f\uff1a\u5206\u4e3a Masquerade\uff08NAT\uff09\u3001Bridge\uff08\u6865\u63a5\uff09\u4e24\u79cd\uff0cBridge\uff08\u6865\u63a5\uff09\u6a21\u5f0f\u9700\u8981\u5b89\u88c5\u4e86 spiderpool \u7ec4\u4ef6\u540e\u65b9\u53ef\u4f7f\u7528\u3002

                                                                1. \u9ed8\u8ba4\u9009\u62e9 Masquerade\uff08NAT\uff09\u7684\u7f51\u7edc\u6a21\u5f0f\uff0c\u4f7f\u7528 eth0 \u9ed8\u8ba4\u7f51\u5361\u3002

                                                                2. \u82e5\u96c6\u7fa4\u5185\u5b89\u88c5\u4e86 spiderpool \u7ec4\u4ef6\uff0c\u5219\u652f\u6301\u9009\u62e9 Bridge\uff08\u6865\u63a5\uff09\u6a21\u5f0f\uff0c\u652f\u6301\u591a\u7f51\u5361\u5f62\u5f0f\u3002

                                                                  \u9009\u62e9 Bridge \u6a21\u5f0f\u65f6\uff0c\u9700\u8981\u6709\u4e00\u4e9b\u524d\u63d0\u6761\u4ef6

                                                              3. \u6dfb\u52a0\u7f51\u5361

                                                                1. Bridge\uff08\u6865\u63a5\uff09\u6a21\u5f0f\u4e0b\u652f\u6301\u624b\u52a8\u6dfb\u52a0\u7f51\u5361\u3002\u70b9\u51fb \u6dfb\u52a0\u7f51\u5361 \uff0c\u8fdb\u884c\u7f51\u5361 IP \u6c60\u7684\u914d\u7f6e\u3002\u9009\u62e9\u548c\u7f51\u7edc\u6a21\u5f0f\u5339\u914d\u7684 Multus CR\uff0c\u82e5\u6ca1\u6709\u5219\u9700\u8981\u81ea\u884c\u521b\u5efa\u3002

                                                                2. \u82e5\u6253\u5f00 \u4f7f\u7528\u9ed8\u8ba4 IP \u6c60 \u5f00\u5173\uff0c\u5219\u4f7f\u7528 multus CR \u914d\u7f6e\u4e2d\u7684\u9ed8\u8ba4 IP \u6c60\u3002\u82e5\u5173\u95ed\u5f00\u5173\uff0c\u5219\u624b\u52a8\u9009\u62e9 IP \u6c60\u3002

                                                              "},{"location":"admin/virtnest/vm/vm-sc.html","title":"\u4e91\u4e3b\u673a\u5b58\u50a8","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u5728\u521b\u5efa\u4e91\u4e3b\u673a\u65f6\uff0c\u914d\u7f6e\u5b58\u50a8\u4fe1\u606f\u3002

                                                              \u5b58\u50a8\u548c\u4e91\u4e3b\u673a\u7684\u529f\u80fd\u606f\u606f\u76f8\u5173\uff0c\u4e3b\u8981\u662f\u901a\u8fc7\u4f7f\u7528 Kubernetes \u7684\u6301\u4e45\u5377\u548c\u5b58\u50a8\u7c7b\uff0c\u63d0\u4f9b\u4e86\u7075\u6d3b\u4e14\u53ef\u6269\u5c55\u7684\u4e91\u4e3b\u673a\u5b58\u50a8\u80fd\u529b\u3002 \u6bd4\u5982\u4e91\u4e3b\u673a\u955c\u50cf\u5b58\u50a8\u5728 PVC \u91cc\uff0c\u652f\u6301\u548c\u5176\u4ed6\u6570\u636e\u4e00\u8d77\u514b\u9686\u3001\u5feb\u7167\u7b49

                                                              "},{"location":"admin/virtnest/vm/vm-sc.html#_2","title":"\u90e8\u7f72\u4e0d\u540c\u7684\u5b58\u50a8","text":"

                                                              \u5728\u4f7f\u7528\u4e91\u4e3b\u673a\u5b58\u50a8\u529f\u80fd\u4e4b\u524d\uff0c\u9700\u8981\u6839\u636e\u9700\u8981\u90e8\u7f72\u4e0d\u540c\u7684\u5b58\u50a8\uff1a

                                                              1. \u53c2\u8003\u90e8\u7f72 hwameistor\uff0c \u6216\u8005\u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684 Helm \u6a21\u677f\u4e2d\u5b89\u88c5 hwameistor-operator\u3002
                                                              2. \u53c2\u8003\u90e8\u7f72 rook-ceph
                                                              3. \u90e8\u7f72 localpath\uff0c\u4f7f\u7528\u547d\u4ee4 kubectl apply -f \u521b\u5efa\u4ee5\u4e0b YAML\uff1a
                                                              \u70b9\u51fb\u67e5\u770b\u5b8c\u6574 YAML
                                                              ---\napiVersion: v1\nkind: Namespace\nmetadata:\n  name: local-path-storage\n\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: local-path-provisioner-service-account\n  namespace: local-path-storage\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: local-path-provisioner-role\nrules:\n- apiGroups: [\"\"]\n  resources: [\"nodes\", \"persistentvolumeclaims\", \"configmaps\"]\n  verbs: [\"get\", \"list\", \"watch\"]\n- apiGroups: [\"\"]\n  resources: [\"endpoints\", \"persistentvolumes\", \"pods\"]\n  verbs: [\"*\"]\n- apiGroups: [\"\"]\n  resources: [\"events\"]\n  verbs: [\"create\", \"patch\"]\n- apiGroups: [\"storage.k8s.io\"]\n  resources: [\"storageclasses\"]\n  verbs: [\"get\", \"list\", \"watch\"]\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: local-path-provisioner-bind\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: local-path-provisioner-role\nsubjects:\n- kind: ServiceAccount\n  name: local-path-provisioner-service-account\n  namespace: local-path-storage\n\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: local-path-provisioner\n  namespace: local-path-storage\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: local-path-provisioner\n  template:\n    metadata:\n      labels:\n        app: local-path-provisioner\n    spec:\n      serviceAccountName: local-path-provisioner-service-account\n      containers:\n      - name: local-path-provisioner\n        image: rancher/local-path-provisioner:v0.0.22\n        imagePullPolicy: IfNotPresent\n        command:\n        - local-path-provisioner\n        - --debug\n        - start\n        - --config\n        - /etc/config/config.json\n        volumeMounts:\n        - name: config-volume\n          mountPath: /etc/config/\n        env:\n        - name: POD_NAMESPACE\n          valueFrom:\n            fieldRef:\n              fieldPath: metadata.namespace\n      volumes:\n      - name: config-volume\n        configMap:\n          name: local-path-config\n\n---\napiVersion: storage.k8s.io/v1\nkind: StorageClass\nmetadata:\n  name: local-path\nprovisioner: rancher.io/local-path\nvolumeBindingMode: WaitForFirstConsumer\nreclaimPolicy: Delete\n\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: local-path-config\n  namespace: local-path-storage\ndata:\n  config.json: |-\n    {\n      \"nodePathMap\": [\n        {\n          \"node\": \"DEFAULT_PATH_FOR_NON_LISTED_NODES\",\n          \"paths\": [\"/opt/local-path-provisioner\"]\n        }\n      ]\n    }\n  setup: |-\n    #!/bin/sh\n    set -eu\n    mkdir -m 0777 -p \"$VOL_DIR\"\n  teardown: |-\n    #!/bin/sh\n    set -eu\n    rm -rf \"$VOL_DIR\"\n  helperPod.yaml: |-\n    apiVersion: v1\n    kind: Pod\n    metadata:\n      name: helper-pod\n    spec:\n      containers:\n      - name: helper-pod\n        image: busybox\n        imagePullPolicy: IfNotPresent\n
                                                              "},{"location":"admin/virtnest/vm/vm-sc.html#_3","title":"\u4e91\u4e3b\u673a\u5b58\u50a8","text":"
                                                              1. \u7cfb\u7edf\u76d8\uff1a\u7cfb\u7edf\u9ed8\u8ba4\u521b\u5efa\u4e00\u4e2a VirtIO \u7c7b\u578b\u7684 rootfs \u7cfb\u7edf\u76d8\uff0c\u7528\u4e8e\u5b58\u653e\u64cd\u4f5c\u7cfb\u7edf\u548c\u6570\u636e\u3002

                                                              2. \u6570\u636e\u76d8\uff1a\u6570\u636e\u76d8\u662f\u4e91\u4e3b\u673a\u4e2d\u7528\u4e8e\u5b58\u50a8\u7528\u6237\u6570\u636e\u3001\u5e94\u7528\u7a0b\u5e8f\u6570\u636e\u6216\u5176\u4ed6\u975e\u64cd\u4f5c\u7cfb\u7edf\u76f8\u5173\u6587\u4ef6\u7684\u5b58\u50a8\u8bbe\u5907\u3002\u4e0e\u7cfb\u7edf\u76d8\u76f8\u6bd4\uff0c\u6570\u636e\u76d8\u662f\u975e\u5fc5\u9009\u7684\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u8981\u52a8\u6001\u6dfb\u52a0\u6216\u79fb\u9664\u3002\u6570\u636e\u76d8\u7684\u5bb9\u91cf\u4e5f\u53ef\u4ee5\u6839\u636e\u9700\u6c42\u8fdb\u884c\u7075\u6d3b\u914d\u7f6e\u3002

                                                                \u9ed8\u8ba4\u4f7f\u7528\u5757\u5b58\u50a8\u3002\u5982\u679c\u9700\u8981\u4f7f\u7528\u514b\u9686\u548c\u5feb\u7167\u529f\u80fd\uff0c\u8bf7\u786e\u4fdd\u60a8\u7684\u5b58\u50a8\u6c60\u5df2\u7ecf\u521b\u5efa\u4e86\u5bf9\u5e94\u7684 VolumeSnapshotClass\uff0c \u53ef\u4ee5\u53c2\u8003\u4ee5\u4e0b\u793a\u4f8b\u3002\u5982\u679c\u9700\u8981\u4f7f\u7528\u5b9e\u65f6\u8fc1\u79fb\u529f\u80fd\uff0c\u8bf7\u786e\u4fdd\u60a8\u7684\u5b58\u50a8\u652f\u6301\u5e76\u9009\u62e9\u4e86 ReadWriteMany \u7684\u8bbf\u95ee\u6a21\u5f0f \u3002

                                                                \u5927\u591a\u6570\u60c5\u51b5\u4e0b\uff0c\u5b58\u50a8\u5728\u5b89\u88c5\u8fc7\u7a0b\u4e2d\u4e0d\u4f1a\u81ea\u52a8\u521b\u5efa\u8fd9\u6837\u7684 VolumeSnapshotClass\uff0c\u56e0\u6b64\u60a8\u9700\u8981\u624b\u52a8\u521b\u5efa VolumeSnapshotClass\u3002 \u4ee5\u4e0b\u662f\u4e00\u4e2a HwameiStor \u521b\u5efa VolumeSnapshotClass \u7684\u793a\u4f8b\uff1a

                                                                kind: VolumeSnapshotClass\napiVersion: snapshot.storage.k8s.io/v1\nmetadata:\n  name: hwameistor-storage-lvm-snapshot\n  annotations:\n    snapshot.storage.kubernetes.io/is-default-class: \"true\"\nparameters:\n  snapsize: \"1073741824\"\ndriver: lvm.hwameistor.io\ndeletionPolicy: Delete\n
                                                                • \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u68c0\u67e5 VolumeSnapshotClass \u662f\u5426\u521b\u5efa\u6210\u529f\u3002

                                                                  kubectl get VolumeSnapshotClass\n
                                                                • \u67e5\u770b\u5df2\u521b\u5efa\u7684 Snapshotclass\uff0c\u5e76\u4e14\u786e\u8ba4 Provisioner \u5c5e\u6027\u540c\u5b58\u50a8\u6c60\u4e2d\u7684 Driver \u5c5e\u6027\u4e00\u81f4\u3002

                                                              "},{"location":"admin/virtnest/vm-image/index.html","title":"\u6784\u5efa\u4e91\u4e3b\u673a\u955c\u50cf","text":"

                                                              \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u6784\u5efa\u9700\u8981\u7684\u4e91\u4e3b\u673a\u955c\u50cf\u3002

                                                              \u4e91\u4e3b\u673a\u955c\u50cf\u5176\u5b9e\u5c31\u662f\u526f\u672c\u6587\u4ef6\uff0c\u662f\u5b89\u88c5\u6709\u64cd\u4f5c\u7cfb\u7edf\u7684\u4e00\u4e2a\u78c1\u76d8\u5206\u533a\u3002\u5e38\u89c1\u7684\u955c\u50cf\u6587\u4ef6\u683c\u5f0f\u5305\u62ec raw\u3001qcow2\u3001vmdk\u7b49\u3002

                                                              "},{"location":"admin/virtnest/vm-image/index.html#_2","title":"\u6784\u5efa\u955c\u50cf","text":"

                                                              \u4e0b\u9762\u662f\u6784\u5efa\u4e91\u4e3b\u673a\u955c\u50cf\u7684\u4e00\u4e9b\u8be6\u7ec6\u6b65\u9aa4\uff1a

                                                              1. \u4e0b\u8f7d\u7cfb\u7edf\u955c\u50cf

                                                                \u5728\u6784\u5efa\u4e91\u4e3b\u673a\u955c\u50cf\u4e4b\u524d\uff0c\u60a8\u9700\u8981\u4e0b\u8f7d\u6240\u9700\u7684\u7cfb\u7edf\u955c\u50cf\u3002\u6211\u4eec\u63a8\u8350\u4f7f\u7528 qcow2\u3001raw \u6216 vmdk \u683c\u5f0f\u7684\u955c\u50cf\u3002\u53ef\u4ee5\u8bbf\u95ee\u4ee5\u4e0b\u94fe\u63a5\u83b7\u53d6 CentOS \u548c Fedora \u7684\u955c\u50cf\uff1a

                                                                • CentOS Cloud Images\uff1a\u652f\u6301\u4ece\u5b98\u65b9 CentOS \u9879\u76ee\u6216\u5176\u4ed6\u8d44\u6e90\u4e2d\u83b7\u53d6 CentOS \u955c\u50cf\u3002\u8bf7\u786e\u4fdd\u9009\u62e9\u4e0e\u60a8\u7684\u865a\u62df\u5316\u5e73\u53f0\u517c\u5bb9\u7684\u7248\u672c\u3002
                                                                • Fedora Cloud Images\uff1a \u652f\u6301\u4ece\u5b98\u65b9 Fedora \u9879\u76ee\u83b7\u53d6\u955c\u50cf\u3002\u6839\u636e\u60a8\u7684\u9700\u6c42\u9009\u62e9\u5408\u9002\u7684\u7248\u672c\u3002
                                                              2. \u6784\u5efa Docker \u955c\u50cf\u5e76\u63a8\u9001\u5230\u5bb9\u5668\u955c\u50cf\u4ed3\u5e93

                                                                \u5728\u6b64\u6b65\u9aa4\u4e2d\uff0c\u6211\u4eec\u5c06\u4f7f\u7528 Docker \u6784\u5efa\u4e00\u4e2a\u955c\u50cf\uff0c\u5e76\u5c06\u5176\u63a8\u9001\u5230\u5bb9\u5668\u955c\u50cf\u4ed3\u5e93\uff0c\u4ee5\u4fbf\u5728\u9700\u8981\u65f6\u80fd\u591f\u65b9\u4fbf\u5730\u90e8\u7f72\u548c\u4f7f\u7528\u3002

                                                                • \u521b\u5efa Dockerfile \u6587\u4ef6

                                                                  FROM scratch\nADD --chown=107:107 CentOS-7-x86_64-GenericCloud.qcow2 /disk/\n

                                                                  \u5411\u57fa\u4e8e\u7a7a\u767d\u955c\u50cf\u6784\u5efa\u7684\u955c\u50cf\u4e2d\u6dfb\u52a0\u540d\u4e3a CentOS-7-x86_64-GenericCloud.qcow2 \u7684\u6587\u4ef6\uff0c\u5e76\u5c06\u5176\u653e\u7f6e\u5728\u955c\u50cf\u4e2d\u7684 /disk/ \u76ee\u5f55\u4e0b\u3002\u901a\u8fc7\u8fd9\u4e2a\u64cd\u4f5c\uff0c\u955c\u50cf\u5c31\u5305\u542b\u4e86\u8fd9\u4e2a\u6587\u4ef6\uff0c\u53ef\u4ee5\u5728\u521b\u5efa\u4e91\u4e3b\u673a\u65f6\u4f7f\u7528\u5b83\u6765\u63d0\u4f9b CentOS 7 x86_64 \u7684\u64cd\u4f5c\u7cfb\u7edf\u73af\u5883\u3002

                                                                • \u6784\u5efa\u955c\u50cf

                                                                  docker build -t release-ci.daocloud.io/ghippo/kubevirt-demo/centos7:v1 .\n

                                                                  \u4e0a\u8ff0\u547d\u4ee4\u5c06\u4f7f\u7528 Dockerfile \u4e2d\u7684\u6307\u4ee4\u6784\u5efa\u4e00\u4e2a\u540d\u4e3a release-ci.daocloud.io/ghippo/kubevirt-demo/centos7:v1 \u7684\u955c\u50cf\u3002\u60a8\u53ef\u4ee5\u6839\u636e\u9879\u76ee\u9700\u6c42\u4fee\u6539\u955c\u50cf\u540d\u79f0\u3002

                                                                • \u63a8\u9001\u955c\u50cf\u81f3\u5bb9\u5668\u955c\u50cf\u4ed3\u5e93

                                                                  \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u5c06\u6784\u5efa\u597d\u7684\u955c\u50cf\u63a8\u9001\u5230\u540d\u4e3a release-ci.daocloud.io \u7684\u955c\u50cf\u4ed3\u5e93\uff0c\u60a8\u8fd8\u53ef\u4ee5\u6839\u636e\u9700\u8981\u4fee\u6539\u955c\u50cf\u4ed3\u5e93\u7684\u540d\u79f0\u548c\u5730\u5740\u3002

                                                                  docker push release-ci.daocloud.io/ghippo/kubevirt-demo/centos7:v1\n

                                                              \u4ee5\u4e0a\u662f\u6784\u5efa\u4e91\u4e3b\u673a\u955c\u50cf\u7684\u8be6\u7ec6\u6b65\u9aa4\u548c\u8bf4\u660e\u3002\u901a\u8fc7\u6309\u7167\u8fd9\u4e9b\u6b65\u9aa4\u64cd\u4f5c\uff0c\u60a8\u5c06\u80fd\u591f\u6210\u529f\u6784\u5efa\u5e76\u63a8\u9001\u7528\u4e8e\u4e91\u4e3b\u673a\u7684\u955c\u50cf\uff0c\u4ee5\u6ee1\u8db3\u60a8\u7684\u4f7f\u7528\u9700\u6c42\u3002

                                                              "},{"location":"end-user/index.html","title":"\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 - \u7ec8\u7aef\u7528\u6237","text":"

                                                              \u8fd9\u662f\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9762\u5411\u7ec8\u7aef\u7528\u6237\u7684\u4f7f\u7528\u6587\u6863\u3002

                                                              • \u7528\u6237\u6ce8\u518c

                                                                \u7528\u6237\u6ce8\u518c\u662f\u4f7f\u7528 AI \u7b97\u529b\u5e73\u53f0\u7684\u7b2c\u4e00\u6b65\u3002

                                                                • \u7528\u6237\u6ce8\u518c
                                                              • \u4e91\u4e3b\u673a

                                                                \u4e91\u4e3b\u673a\u662f\u90e8\u7f72\u5728\u4e91\u7aef\u7684\u865a\u62df\u673a\u3002

                                                                • \u521b\u5efa\u4e91\u4e3b\u673a
                                                                • \u4f7f\u7528\u4e91\u4e3b\u673a
                                                              • \u5bb9\u5668\u7ba1\u7406

                                                                \u5bb9\u5668\u7ba1\u7406\u662f AI \u7b97\u529b\u4e2d\u5fc3\u7684\u6838\u5fc3\u6a21\u5757\u3002

                                                                • \u4e91\u4e0a K8s \u96c6\u7fa4
                                                                • \u8282\u70b9\u7ba1\u7406
                                                                • \u5de5\u4f5c\u8d1f\u8f7d
                                                                • Helm \u5e94\u7528\u548c\u6a21\u677f
                                                              • \u7b97\u6cd5\u5f00\u53d1

                                                                \u7ba1\u7406\u6570\u636e\u96c6\uff0c\u6267\u884c AI \u8bad\u7ec3\u548c\u63a8\u7406\u4efb\u52a1\u3002

                                                                • \u521b\u5efa AI \u5de5\u4f5c\u8d1f\u8f7d
                                                                • \u4f7f\u7528 Notebook
                                                                • \u521b\u5efa\u8bad\u7ec3\u4efb\u52a1
                                                                • \u521b\u5efa\u63a8\u7406\u670d\u52a1
                                                              • \u53ef\u89c2\u6d4b\u6027

                                                                \u901a\u8fc7\u4eea\u8868\u76d8\u76d1\u63a7\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u51b5\u3002

                                                                • \u76d1\u63a7\u96c6\u7fa4/\u8282\u70b9
                                                                • \u6307\u6807
                                                                • \u65e5\u5fd7
                                                                • \u94fe\u8def\u8ffd\u8e2a
                                                              • \u4e2a\u4eba\u4e2d\u5fc3

                                                                \u5728\u4e2a\u4eba\u4e2d\u5fc3\u8bbe\u7f6e\u5bc6\u7801\u3001\u5bc6\u94a5\u548c\u8bed\u8a00\u3002

                                                                • \u5b89\u5168\u8bbe\u7f6e
                                                                • \u8bbf\u95ee\u5bc6\u94a5
                                                                • \u8bed\u8a00\u8bbe\u7f6e
                                                              "},{"location":"end-user/baize/dataset/create-use-delete.html","title":"\u6570\u636e\u96c6\u5217\u8868","text":"

                                                              AI Lab \u63d0\u4f9b\u6a21\u578b\u5f00\u53d1\u3001\u8bad\u7ec3\u4ee5\u53ca\u63a8\u7406\u8fc7\u7a0b\u6240\u6709\u9700\u8981\u7684\u6570\u636e\u96c6\u7ba1\u7406\u529f\u80fd\u3002\u76ee\u524d\u652f\u6301\u5c06\u591a\u79cd\u6570\u636e\u6e90\u7edf\u4e00\u63a5\u5165\u80fd\u529b\u3002

                                                              \u901a\u8fc7\u7b80\u5355\u914d\u7f6e\u5373\u53ef\u5c06\u6570\u636e\u6e90\u63a5\u5165\u5230 AI Lab \u4e2d\uff0c\u5b9e\u73b0\u6570\u636e\u7684\u7edf\u4e00\u7eb3\u7ba1\u3001\u9884\u70ed\u3001\u6570\u636e\u96c6\u7ba1\u7406\u7b49\u529f\u80fd\u3002

                                                              "},{"location":"end-user/baize/dataset/create-use-delete.html#_2","title":"\u521b\u5efa\u6570\u636e\u96c6","text":"
                                                              1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u6570\u636e\u7ba1\u7406 -> \u6570\u636e\u96c6\u5217\u8868 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae\u3002

                                                              2. \u9009\u62e9\u6570\u636e\u96c6\u5f52\u5c5e\u7684\u5de5\u4f5c\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4 \u4e0b\u4e00\u6b65 \u3002

                                                              3. \u914d\u7f6e\u76ee\u6807\u6570\u636e\u7684\u6570\u636e\u6e90\u7c7b\u578b\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                                \u76ee\u524d\u652f\u6301\u8fd9\u51e0\u79cd\u6570\u636e\u6e90\uff1a

                                                                • GIT\uff1a\u652f\u6301 GitHub\u3001GitLab\u3001Gitee \u7b49\u4ed3\u5e93
                                                                • S3\uff1a\u652f\u6301 Amazon \u4e91\u7b49\u5bf9\u8c61\u5b58\u50a8
                                                                • HTTP\uff1a\u76f4\u63a5\u8f93\u5165\u4e00\u4e2a\u6709\u6548\u7684 HTTP \u7f51\u5740
                                                                • PVC\uff1a\u652f\u6301\u9884\u5148\u521b\u5efa\u7684 Kubernetes PersistentVolumeClaim
                                                                • NFS\uff1a\u652f\u6301 NFS \u5171\u4eab\u5b58\u50a8
                                                              4. \u6570\u636e\u96c6\u521b\u5efa\u6210\u529f\u5c06\u8fd4\u56de\u6570\u636e\u96c6\u5217\u8868\u3002\u4f60\u53ef\u4ee5\u901a\u8fc7\u53f3\u4fa7\u7684 \u2507 \u6267\u884c\u66f4\u591a\u64cd\u4f5c\u3002

                                                              Info

                                                              \u7cfb\u7edf\u81ea\u52a8\u4f1a\u5728\u6570\u636e\u96c6\u521b\u5efa\u6210\u529f\u540e\uff0c\u7acb\u5373\u8fdb\u884c\u4e00\u6b21\u6027\u7684\u6570\u636e\u9884\u52a0\u8f7d\uff1b\u5728\u9884\u52a0\u8f7d\u5b8c\u6210\u4e4b\u524d\uff0c\u6570\u636e\u96c6\u4e0d\u53ef\u4ee5\u4f7f\u7528\u3002

                                                              "},{"location":"end-user/baize/dataset/create-use-delete.html#_3","title":"\u6570\u636e\u96c6\u4f7f\u7528","text":"

                                                              \u6570\u636e\u96c6\u521b\u5efa\u6210\u529f\u540e\uff0c\u53ef\u4ee5\u5728\u6a21\u578b\u8bad\u7ec3\u3001\u63a8\u7406\u7b49\u4efb\u52a1\u4e2d\u4f7f\u7528\u3002

                                                              "},{"location":"end-user/baize/dataset/create-use-delete.html#notebook","title":"\u5728 Notebook \u4e2d\u4f7f\u7528","text":"

                                                              \u5728\u521b\u5efa Notebook \u4e2d\uff0c\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u6570\u636e\u96c6\uff1b\u4f7f\u7528\u65b9\u5f0f\u5982\u4e0b\uff1a

                                                              • \u4f7f\u7528\u6570\u636e\u96c6\u505a\u8bad\u7ec3\u6570\u636e\u6302\u8f7d
                                                              • \u4f7f\u7528\u6570\u636e\u96c6\u505a\u4ee3\u7801\u6302\u8f7d

                                                              "},{"location":"end-user/baize/dataset/create-use-delete.html#_4","title":"\u5728 \u8bad\u7ec3\u4efb\u52a1 \u4e2d\u4f7f\u7528","text":"
                                                              • \u4f7f\u7528\u6570\u636e\u96c6\u6307\u5b9a\u4efb\u52a1\u8f93\u51fa
                                                              • \u4f7f\u7528\u6570\u636e\u96c6\u6307\u5b9a\u4efb\u52a1\u8f93\u5165
                                                              • \u4f7f\u7528\u6570\u636e\u96c6\u6307\u5b9a TensorBoard \u8f93\u51fa
                                                              "},{"location":"end-user/baize/dataset/create-use-delete.html#_5","title":"\u5728\u63a8\u7406\u670d\u52a1 \u4e2d\u4f7f\u7528","text":"
                                                              • \u4f7f\u7528\u6570\u636e\u96c6\u6302\u8f7d\u6a21\u578b
                                                              "},{"location":"end-user/baize/dataset/create-use-delete.html#_6","title":"\u5220\u9664\u6570\u636e\u96c6","text":"

                                                              \u5982\u679c\u53d1\u73b0\u6570\u636e\u96c6\u5197\u4f59\u3001\u8fc7\u671f\u6216\u56e0\u5176\u4ed6\u7f18\u6545\u4e0d\u518d\u9700\u8981\uff0c\u53ef\u4ee5\u4ece\u6570\u636e\u96c6\u5217\u8868\u4e2d\u5220\u9664\u3002

                                                              1. \u5728\u6570\u636e\u96c6\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u5220\u9664 \u3002

                                                              2. \u5728\u5f39\u7a97\u4e2d\u786e\u8ba4\u8981\u5220\u9664\u7684\u6570\u636e\u96c6\uff0c\u8f93\u5165\u6570\u636e\u96c6\u540d\u79f0\u540e\u70b9\u51fb \u5220\u9664 \u3002

                                                              3. \u5c4f\u5e55\u63d0\u793a\u5220\u9664\u6210\u529f\uff0c\u8be5\u6570\u636e\u96c6\u4ece\u5217\u8868\u4e2d\u6d88\u5931\u3002

                                                              Caution

                                                              \u6570\u636e\u96c6\u4e00\u65e6\u5220\u9664\u5c06\u4e0d\u53ef\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                                                              "},{"location":"end-user/baize/dataset/environments.html","title":"\u7ba1\u7406\u73af\u5883","text":"

                                                              \u672c\u6587\u8bf4\u660e\u5982\u4f55\u5728 AI Lab \u4e2d\u7ba1\u7406\u4f60\u7684\u73af\u5883\u4f9d\u8d56\u5e93\uff0c\u4ee5\u4e0b\u662f\u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u548c\u6ce8\u610f\u4e8b\u9879\u3002

                                                              1. \u73af\u5883\u7ba1\u7406\u6982\u8ff0
                                                              2. \u521b\u5efa\u65b0\u73af\u5883
                                                              3. \u914d\u7f6e\u73af\u5883
                                                              4. \u6545\u969c\u6392\u9664
                                                              "},{"location":"end-user/baize/dataset/environments.html#_2","title":"\u73af\u5883\u7ba1\u7406\u6982\u8ff0","text":"

                                                              \u4f20\u7edf\u65b9\u5f0f\uff0c\u4e00\u822c\u4f1a\u5c06 Python \u73af\u5883\u4f9d\u8d56\u5728\u955c\u50cf\u4e2d\u6784\u5efa\uff0c\u955c\u50cf\u5e26\u6709 Python \u7248\u672c\u548c\u4f9d\u8d56\u5305\u7684\u955c\u50cf\uff0c\u7ef4\u62a4\u6210\u672c\u8f83\u9ad8\u4e14\u66f4\u65b0\u4e0d\u65b9\u4fbf\uff0c\u5f80\u5f80\u9700\u8981\u91cd\u65b0\u6784\u5efa\u955c\u50cf\u3002

                                                              \u800c\u5728 AI Lab \u4e2d\uff0c\u7528\u6237\u53ef\u4ee5\u901a\u8fc7 \u73af\u5883\u7ba1\u7406 \u6a21\u5757\u6765\u7ba1\u7406\u7eaf\u7cb9\u7684\u73af\u5883\u4f9d\u8d56\uff0c\u5c06\u8fd9\u90e8\u5206\u4ece\u955c\u50cf\u4e2d\u89e3\u8026\uff0c\u5e26\u6765\u7684\u4f18\u52bf\u6709\uff1a

                                                              • \u4e00\u4efd\u73af\u5883\u591a\u5904\u4f7f\u7528\uff0c\u540c\u65f6\u53ef\u4ee5\u5728 Notebook\u3001\u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u3001\u4e43\u81f3\u63a8\u7406\u670d\u52a1\u4e2d\u4f7f\u7528\u3002
                                                              • \u66f4\u65b0\u4f9d\u8d56\u5305\u66f4\u52a0\u65b9\u4fbf\uff0c\u53ea\u9700\u8981\u66f4\u65b0\u73af\u5883\u4f9d\u8d56\u5373\u53ef\uff0c\u65e0\u9700\u91cd\u65b0\u6784\u5efa\u955c\u50cf\u3002

                                                              \u4ee5\u4e0b\u4e3a\u73af\u5883\u7ba1\u7406\u7684\u4e3b\u8981\u7ec4\u6210\u90e8\u5206\uff1a

                                                              • \u96c6\u7fa4 \uff1a\u9009\u62e9\u9700\u8981\u64cd\u4f5c\u7684\u96c6\u7fa4\u3002
                                                              • \u547d\u540d\u7a7a\u95f4 \uff1a\u9009\u62e9\u547d\u540d\u7a7a\u95f4\u4ee5\u9650\u5b9a\u64cd\u4f5c\u8303\u56f4\u3002
                                                              • \u73af\u5883\u5217\u8868 \uff1a\u5c55\u793a\u5f53\u524d\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u73af\u5883\u53ca\u5176\u72b6\u6001\u3002

                                                              \u5b57\u6bb5 \u63cf\u8ff0 \u4e3e\u4f8b\u503c \u540d\u79f0 \u73af\u5883\u7684\u540d\u79f0 my-environment \u72b6\u6001 \u73af\u5883\u5f53\u524d\u7684\u72b6\u6001\uff08\u6b63\u5e38\u6216\u5931\u8d25\uff09\uff0c\u65b0\u521b\u5efa\u73af\u5883\u6709\u4e00\u4e2a\u9884\u70ed\u8fc7\u7a0b\uff0c\u9884\u70ed\u6210\u529f\u540e\u5373\u53ef\u5728\u5176\u4ed6\u4efb\u52a1\u4e2d\u4f7f\u7528 \u6b63\u5e38 \u521b\u5efa\u65f6\u95f4 \u73af\u5883\u521b\u5efa\u7684\u65f6\u95f4 2023-10-01 10:00:00"},{"location":"end-user/baize/dataset/environments.html#_3","title":"\u521b\u5efa\u65b0\u73af\u5883","text":"

                                                              \u5728 \u73af\u5883\u7ba1\u7406 \u754c\u9762\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u521b\u5efa\u73af\u5883\u7684\u6d41\u7a0b\u3002

                                                              \u5b57\u6bb5 \u63cf\u8ff0 \u4e3e\u4f8b\u503c \u540d\u79f0 \u8f93\u5165\u73af\u5883\u7684\u540d\u79f0\uff0c\u957f\u5ea6\u4e3a 2-63 \u4e2a\u5b57\u7b26\uff0c\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u5f00\u5934\u548c\u7ed3\u5c3e\u3002 my-environment \u90e8\u7f72\u4f4d\u7f6e \u96c6\u7fa4 \uff1a\u9009\u62e9\u9700\u8981\u90e8\u7f72\u7684\u96c6\u7fa4 gpu-cluster \u547d\u540d\u7a7a\u95f4 \uff1a\u9009\u62e9\u547d\u540d\u7a7a\u95f4 default \u5907\u6ce8 \u586b\u5199\u5907\u6ce8\u4fe1\u606f\u3002 \u8fd9\u662f\u4e00\u4e2a\u6d4b\u8bd5\u73af\u5883 \u6807\u7b7e \u4e3a\u73af\u5883\u6dfb\u52a0\u6807\u7b7e\u3002 env:test \u6ce8\u89e3 \u4e3a\u73af\u5883\u6dfb\u52a0\u6ce8\u89e3\u3002\u586b\u5199\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u8fdb\u5165\u73af\u5883\u914d\u7f6e\u3002 \u6ce8\u89e3\u793a\u4f8b"},{"location":"end-user/baize/dataset/environments.html#_4","title":"\u914d\u7f6e\u73af\u5883","text":"

                                                              \u5728\u73af\u5883\u914d\u7f6e\u6b65\u9aa4\u4e2d\uff0c\u7528\u6237\u9700\u8981\u914d\u7f6e Python \u7248\u672c\u548c\u4f9d\u8d56\u5305\u7ba1\u7406\u5de5\u5177\u3002

                                                              \u5b57\u6bb5 \u63cf\u8ff0 \u4e3e\u4f8b\u503c Python \u7248\u672c \u9009\u62e9\u6240\u9700\u7684 Python \u7248\u672c 3.12.3 \u5305\u7ba1\u7406\u5668 \u9009\u62e9\u5305\u7ba1\u7406\u5de5\u5177\uff0c\u53ef\u9009 PIP \u6216 CONDA PIP Environment Data \u5982\u679c\u9009\u62e9 PIP\uff1a\u5728\u4e0b\u65b9\u7f16\u8f91\u5668\u4e2d\u8f93\u5165 requirements.txt \u683c\u5f0f\u7684\u4f9d\u8d56\u5305\u5217\u8868\u3002 numpy==1.21.0 \u5982\u679c\u9009\u62e9 CONDA\uff1a\u5728\u4e0b\u65b9\u7f16\u8f91\u5668\u4e2d\u8f93\u5165 environment.yaml \u683c\u5f0f\u7684\u4f9d\u8d56\u5305\u5217\u8868\u3002 \u5176\u4ed6\u9009\u9879 pip \u989d\u5916\u7d22\u5f15\u5730\u5740 \uff1a\u914d\u7f6e pip \u989d\u5916\u7684\u7d22\u5f15\u5730\u5740\uff1b\u9002\u7528\u4e8e\u4f01\u4e1a\u5185\u90e8\u6709\u81ea\u5df1\u7684\u79c1\u6709\u4ed3\u5e93\u6216\u8005 PIP \u52a0\u901f\u7ad9\u70b9\u3002 https://pypi.example.com GPU \u914d\u7f6e \uff1a\u542f\u7528\u6216\u7981\u7528 GPU \u914d\u7f6e\uff1b\u90e8\u5206\u6d89\u53ca\u5230 GPU \u7684\u4f9d\u8d56\u5305\u9700\u8981\u5728\u9884\u52a0\u8f7d\u65f6\u914d\u7f6e GPU \u8d44\u6e90\u3002 \u542f\u7528 \u5173\u8054\u5b58\u50a8 \uff1a\u9009\u62e9\u5173\u8054\u7684\u5b58\u50a8\u914d\u7f6e\uff1b\u73af\u5883\u4f9d\u8d56\u5305\u4f1a\u5b58\u50a8\u5728\u5173\u8054\u5b58\u50a8\u4e2d\u3002\u6ce8\u610f\uff1a\u9700\u8981\u4f7f\u7528\u652f\u6301 ReadWriteMany \u7684\u5b58\u50a8\u3002 my-storage-config

                                                              \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u521b\u5efa \u6309\u94ae\uff0c\u7cfb\u7edf\u4f1a\u81ea\u52a8\u521b\u5efa\u5e76\u914d\u7f6e\u65b0\u7684 Python \u73af\u5883\u3002

                                                              "},{"location":"end-user/baize/dataset/environments.html#_5","title":"\u6545\u969c\u6392\u9664","text":"
                                                              • \u5982\u679c\u73af\u5883\u521b\u5efa\u5931\u8d25\uff1a

                                                                • \u68c0\u67e5\u7f51\u7edc\u8fde\u63a5\u662f\u5426\u6b63\u5e38\u3002
                                                                • \u786e\u8ba4\u586b\u5199\u7684 Python \u7248\u672c\u548c\u5305\u7ba1\u7406\u5668\u914d\u7f6e\u65e0\u8bef\u3002
                                                                • \u786e\u4fdd\u6240\u9009\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u53ef\u7528\u3002
                                                              • \u5982\u679c\u4f9d\u8d56\u9884\u70ed\u5931\u8d25\uff1a

                                                                • \u68c0\u67e5 requirements.txt \u6216 environment.yaml \u6587\u4ef6\u683c\u5f0f\u662f\u5426\u6b63\u786e\u3002
                                                                • \u786e\u8ba4\u4f9d\u8d56\u5305\u540d\u79f0\u548c\u7248\u672c\u662f\u5426\u6b63\u786e\u65e0\u8bef\u3002\u5982\u9047\u5230\u5176\u4ed6\u95ee\u9898\uff0c\u8bf7\u8054\u7cfb\u5e73\u53f0\u7ba1\u7406\u5458\u6216\u67e5\u770b\u5e73\u53f0\u5e2e\u52a9\u6587\u6863\u83b7\u53d6\u66f4\u591a\u652f\u6301\u3002

                                                              \u4ee5\u4e0a\u5373\u4e3a\u5728 AI Lab \u4e2d\u7ba1\u7406 Python \u4f9d\u8d56\u5e93\u7684\u57fa\u672c\u64cd\u4f5c\u6b65\u9aa4\u548c\u6ce8\u610f\u4e8b\u9879\u3002

                                                              "},{"location":"end-user/baize/inference/models.html","title":"\u4e86\u89e3\u6a21\u578b\u652f\u6301\u60c5\u51b5","text":"

                                                              \u968f\u7740 AI Lab \u7684\u5feb\u901f\u8fed\u4ee3\uff0c\u6211\u4eec\u5df2\u7ecf\u652f\u6301\u4e86\u591a\u79cd\u6a21\u578b\u7684\u63a8\u7406\u670d\u52a1\uff0c\u60a8\u53ef\u4ee5\u5728\u8fd9\u91cc\u770b\u5230\u6240\u652f\u6301\u7684\u6a21\u578b\u4fe1\u606f\u3002

                                                              • AI Lab v0.3.0 \u4e0a\u7ebf\u4e86\u6a21\u578b\u63a8\u7406\u670d\u52a1\uff0c\u9488\u5bf9\u4f20\u7edf\u7684\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\uff0c\u65b9\u4fbf\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528AI Lab \u7684\u63a8\u7406\u670d\u52a1\uff0c\u65e0\u9700\u5173\u5fc3\u6a21\u578b\u7684\u90e8\u7f72\u548c\u7ef4\u62a4\u3002
                                                              • AI Lab v0.6.0 \u652f\u6301\u4e86\u5b8c\u6574\u7248\u672c\u7684 vLLM \u63a8\u7406\u80fd\u529b\uff0c\u652f\u6301\u8bf8\u591a\u5927\u8bed\u8a00\u6a21\u578b\uff0c\u5982 LLama\u3001Qwen\u3001ChatGLM \u7b49\u3002

                                                              \u60a8\u53ef\u4ee5\u5728 AI Lab \u4e2d\u4f7f\u7528\u7ecf\u8fc7\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9a8c\u8bc1\u8fc7\u7684 GPU \u7c7b\u578b\uff1b \u66f4\u591a\u7ec6\u8282\u53c2\u9605 GPU \u652f\u6301\u77e9\u9635\u3002

                                                              "},{"location":"end-user/baize/inference/models.html#triton-inference-server","title":"Triton Inference Server","text":"

                                                              \u901a\u8fc7 Triton Inference Server \u53ef\u4ee5\u5f88\u597d\u7684\u652f\u6301\u4f20\u7edf\u7684\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\uff0c\u6211\u4eec\u76ee\u524d\u652f\u6301\u4e3b\u6d41\u7684\u63a8\u7406\u540e\u7aef\u670d\u52a1\uff1a

                                                              Backend \u652f\u6301\u6a21\u578b\u683c\u5f0f \u4ecb\u7ecd pytorch TorchScript\u3001PyTorch 2.0 \u683c\u5f0f\u7684\u6a21\u578b triton-inference-server/pytorch_backend tensorflow TensorFlow 2.x triton-inference-server/tensorflow_backend vLLM(Deprecated) \u4e0e vLLM \u4e00\u81f4 \u652f\u6301\u7684\u6a21\u578b\u548c vLLM support Model \u4e00\u81f4

                                                              Danger

                                                              \u4f7f\u7528 Triton \u7684 Backend vLLM \u7684\u65b9\u5f0f\u5df2\u88ab\u5f03\u7528\uff0c\u63a8\u8350\u4f7f\u7528\u6700\u65b0\u652f\u6301 vLLM \u6765\u90e8\u7f72\u60a8\u7684\u5927\u8bed\u8a00\u6a21\u578b\u3002

                                                              "},{"location":"end-user/baize/inference/models.html#vllm","title":"vLLM","text":"

                                                              \u901a\u8fc7 vLLM \u6211\u4eec\u53ef\u4ee5\u5f88\u5feb\u7684\u4f7f\u7528\u5927\u8bed\u8a00\u6a21\u578b\uff0c\u60a8\u53ef\u4ee5\u5728\u8fd9\u91cc\u770b\u5230\u6211\u4eec\u652f\u6301\u7684\u6a21\u578b\u5217\u8868\uff0c\u8fd9\u901a\u5e38\u548c vLLM Support Models \u4fdd\u6301\u4e00\u81f4\u3002

                                                              • HuggingFace \u6a21\u578b\uff1a\u6211\u4eec\u652f\u6301\u4e86 HuggingFace \u7684\u5927\u90e8\u5206\u6a21\u578b\uff0c\u60a8\u53ef\u4ee5\u5728 HuggingFace Model Hub \u67e5\u770b\u66f4\u591a\u6a21\u578b\u3002
                                                              • vLLM \u652f\u6301\u6a21\u578b\u5217\u51fa\u4e86\u652f\u6301\u7684\u5927\u8bed\u8a00\u6a21\u578b\u548c\u89c6\u89c9\u8bed\u8a00\u6a21\u578b\u3002
                                                              • \u4f7f\u7528 vLLM \u652f\u6301\u6846\u67b6\u7684\u6a21\u578b\u8fdb\u884c\u5fae\u8c03\u540e\u7684\u6a21\u578b\u3002
                                                              "},{"location":"end-user/baize/inference/models.html#vllm_1","title":"vLLM \u65b0\u7279\u6027","text":"

                                                              \u76ee\u524d\uff0cAI Lab \u8fd8\u652f\u6301\u5728\u4f7f\u7528 vLLM \u4f5c\u4e3a\u63a8\u7406\u5de5\u5177\u65f6\u7684\u4e00\u4e9b\u65b0\u7279\u6027\uff1a

                                                              • \u5728\u63a8\u7406\u6a21\u578b\u65f6\uff0c\u542f\u7528 Lora Adapter \u6765\u4f18\u5316\u6a21\u578b\u63a8\u7406\u670d\u52a1
                                                              • \u63d0\u4f9b\u517c\u5bb9 OpenAI \u7684 OpenAPI \u63a5\u53e3\uff0c\u65b9\u4fbf\u7528\u6237\u5207\u6362\u5230\u672c\u5730\u63a8\u7406\u670d\u52a1\u65f6\uff0c\u53ef\u4ee5\u4f4e\u6210\u672c\u7684\u5feb\u901f\u5207\u6362
                                                              "},{"location":"end-user/baize/inference/models.html#_2","title":"\u4e0b\u4e00\u6b65","text":"
                                                              • \u521b\u5efa Triton \u63a8\u7406\u670d\u52a1
                                                              • \u521b\u5efa vLLM \u63a8\u7406\u670d\u52a1
                                                              "},{"location":"end-user/baize/inference/triton-inference.html","title":"\u521b\u5efa Triton \u63a8\u7406\u670d\u52a1","text":"

                                                              AI Lab \u76ee\u524d\u63d0\u4f9b\u4ee5 Triton\u3001vLLM \u4f5c\u4e3a\u63a8\u7406\u6846\u67b6\uff0c\u7528\u6237\u53ea\u9700\u7b80\u5355\u914d\u7f6e\u5373\u53ef\u5feb\u901f\u542f\u52a8\u4e00\u4e2a\u9ad8\u6027\u80fd\u7684\u63a8\u7406\u670d\u52a1\u3002

                                                              Danger

                                                              \u4f7f\u7528 Triton \u7684 Backend vLLM \u7684\u65b9\u5f0f\u5df2\u88ab\u5f03\u7528\uff0c\u63a8\u8350\u4f7f\u7528\u6700\u65b0\u652f\u6301 vLLM \u6765\u90e8\u7f72\u60a8\u7684\u5927\u8bed\u8a00\u6a21\u578b\u3002

                                                              "},{"location":"end-user/baize/inference/triton-inference.html#triton_1","title":"Triton\u4ecb\u7ecd","text":"

                                                              Triton \u662f\u7531 NVIDIA \u5f00\u53d1\u7684\u4e00\u4e2a\u5f00\u6e90\u63a8\u7406\u670d\u52a1\u5668\uff0c\u65e8\u5728\u7b80\u5316\u673a\u5668\u5b66\u4e60\u6a21\u578b\u7684\u90e8\u7f72\u548c\u63a8\u7406\u670d\u52a1\u3002\u5b83\u652f\u6301\u591a\u79cd\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\uff0c\u5305\u62ec TensorFlow\u3001PyTorch \u7b49\uff0c\u4f7f\u5f97\u7528\u6237\u80fd\u591f\u8f7b\u677e\u7ba1\u7406\u548c\u90e8\u7f72\u4e0d\u540c\u7c7b\u578b\u7684\u6a21\u578b\u3002

                                                              "},{"location":"end-user/baize/inference/triton-inference.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u51c6\u5907\u6a21\u578b\u6570\u636e\uff1a\u5728\u6570\u636e\u96c6\u7ba1\u7406\u4e2d\u7eb3\u7ba1\u6a21\u578b\u4ee3\u7801\uff0c\u5e76\u4fdd\u8bc1\u6570\u636e\u6210\u529f\u9884\u52a0\u8f7d\uff0c\u4e0b\u9762\u4ee5 mnist \u624b\u5199\u6570\u5b57\u8bc6\u522b\u7684 PyTorch \u6a21\u578b\u4e3a\u4f8b\u3002

                                                              Note

                                                              \u5f85\u63a8\u7406\u7684\u6a21\u578b\u5728\u6570\u636e\u96c6\u4e2d\u9700\u8981\u9075\u4ee5\u4e0b\u76ee\u5f55\u683c\u5f0f\uff1a

                                                                <model-repository-name>\n  \u2514\u2500\u2500 <model-name>\n     \u2514\u2500\u2500 <version>\n        \u2514\u2500\u2500 <model-definition-file>\n

                                                              \u672c\u4f8b\u4e2d\u7684\u76ee\u5f55\u683c\u5f0f\u4e3a\uff1a

                                                                  model-repo\n    \u2514\u2500\u2500 mnist-cnn\n        \u2514\u2500\u2500 1\n            \u2514\u2500\u2500 model.pt\n
                                                              "},{"location":"end-user/baize/inference/triton-inference.html#_2","title":"\u521b\u5efa\u63a8\u7406\u670d\u52a1","text":"

                                                              \u76ee\u524d\u5df2\u7ecf\u652f\u6301\u8868\u5355\u521b\u5efa\uff0c\u53ef\u4ee5\u754c\u9762\u5b57\u6bb5\u63d0\u793a\uff0c\u8fdb\u884c\u670d\u52a1\u521b\u5efa\u3002

                                                              "},{"location":"end-user/baize/inference/triton-inference.html#_3","title":"\u914d\u7f6e\u6a21\u578b\u8def\u5f84","text":"

                                                              \u6a21\u578b\u8def\u5f84 model-repo/mnist-cnn/1/model.pt \u9700\u8981\u548c\u6570\u636e\u96c6\u4e2d\u7684\u6a21\u578b\u76ee\u5f55\u683c\u5f0f\u4e00\u81f4\u3002

                                                              "},{"location":"end-user/baize/inference/triton-inference.html#_4","title":"\u6a21\u578b\u914d\u7f6e","text":""},{"location":"end-user/baize/inference/triton-inference.html#_5","title":"\u914d\u7f6e\u8f93\u5165\u548c\u8f93\u51fa\u53c2\u6570","text":"

                                                              Note

                                                              \u8f93\u5165\u548c\u8f93\u51fa\u53c2\u6570\u7684\u7b2c\u4e00\u4e2a\u7ef4\u5ea6\u9ed8\u8ba4\u4e3a batchsize \u7684\u5927\u5c0f\uff0c\u8bbe\u7f6e\u4e3a -1 \u53ef\u4ee5\u6839\u636e\u8f93\u5165\u7684\u63a8\u7406\u6570\u636e\u81ea\u52a8\u8ba1\u7b97 batchsize\u3002\u53c2\u6570\u5176\u4f59\u7ef4\u5ea6\u548c\u6570\u636e\u7c7b\u578b\u9700\u8981\u4e0e\u6a21\u578b\u8f93\u5165\u5339\u914d\u3002

                                                              "},{"location":"end-user/baize/inference/triton-inference.html#_6","title":"\u914d\u7f6e\u73af\u5883","text":"

                                                              \u53ef\u4ee5\u5bfc\u5165 \u73af\u5883\u7ba1\u7406 \u4e2d\u521b\u5efa\u7684\u73af\u5883\u4f5c\u4e3a\u63a8\u7406\u65f6\u7684\u8fd0\u884c\u73af\u5883\u3002

                                                              "},{"location":"end-user/baize/inference/triton-inference.html#_7","title":"\u9ad8\u7ea7\u914d\u7f6e","text":""},{"location":"end-user/baize/inference/triton-inference.html#_8","title":"\u914d\u7f6e\u8ba4\u8bc1\u7b56\u7565","text":"

                                                              \u652f\u6301 API key \u7684\u8bf7\u6c42\u65b9\u5f0f\u8ba4\u8bc1\uff0c\u7528\u6237\u53ef\u4ee5\u81ea\u5b9a\u4e49\u589e\u52a0\u8ba4\u8bc1\u53c2\u6570\u3002

                                                              "},{"location":"end-user/baize/inference/triton-inference.html#_9","title":"\u4eb2\u548c\u6027\u8c03\u5ea6","text":"

                                                              \u652f\u6301 \u6839\u636e GPU \u8d44\u6e90\u7b49\u8282\u70b9\u914d\u7f6e\u5b9e\u73b0\u81ea\u52a8\u5316\u7684\u4eb2\u548c\u6027\u8c03\u5ea6\uff0c\u540c\u65f6\u4e5f\u65b9\u4fbf\u7528\u6237\u81ea\u5b9a\u4e49\u8c03\u5ea6\u7b56\u7565\u3002

                                                              "},{"location":"end-user/baize/inference/triton-inference.html#_10","title":"\u8bbf\u95ee","text":""},{"location":"end-user/baize/inference/triton-inference.html#api","title":"API \u8bbf\u95ee","text":"
                                                              • Triton \u63d0\u4f9b\u4e86\u4e00\u4e2a\u57fa\u4e8e REST \u7684 API\uff0c\u5141\u8bb8\u5ba2\u6237\u7aef\u901a\u8fc7 HTTP POST \u8bf7\u6c42\u8fdb\u884c\u6a21\u578b\u63a8\u7406\u3002
                                                              • \u5ba2\u6237\u7aef\u53ef\u4ee5\u53d1\u9001 JSON \u683c\u5f0f\u7684\u8bf7\u6c42\u4f53\uff0c\u5176\u4e2d\u5305\u542b\u8f93\u5165\u6570\u636e\u548c\u76f8\u5173\u7684\u5143\u6570\u636e\u3002
                                                              "},{"location":"end-user/baize/inference/triton-inference.html#http","title":"HTTP \u8bbf\u95ee","text":"
                                                              1. \u53d1\u9001 HTTP POST \u8bf7\u6c42\uff1a\u4f7f\u7528\u5de5\u5177\u5982 curl \u6216 HTTP \u5ba2\u6237\u7aef\u5e93\uff08\u5982 Python \u7684 requests \u5e93\uff09\u5411 Triton Server \u53d1\u9001 POST \u8bf7\u6c42\u3002

                                                              2. \u8bbe\u7f6e HTTP \u5934\uff1a\u6839\u636e\u7528\u6237\u914d\u7f6e\u9879\u81ea\u52a8\u751f\u6210\u7684\u914d\u7f6e\uff0c\u5305\u542b\u6a21\u578b\u8f93\u5165\u548c\u8f93\u51fa\u7684\u5143\u6570\u636e\u3002

                                                              3. \u6784\u5efa\u8bf7\u6c42\u4f53\uff1a\u8bf7\u6c42\u4f53\u901a\u5e38\u5305\u542b\u8981\u8fdb\u884c\u63a8\u7406\u7684\u8f93\u5165\u6570\u636e\uff0c\u4ee5\u53ca\u6a21\u578b\u7279\u5b9a\u7684\u5143\u6570\u636e\u3002

                                                              "},{"location":"end-user/baize/inference/triton-inference.html#curl","title":"\u793a\u4f8b curl \u547d\u4ee4","text":"
                                                                curl -X POST \"http://<ip>:<port>/v2/models/<inference-name>/infer\" \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\n    \"inputs\": [\n      {\n        \"name\": \"model_input\",            \n        \"shape\": [1, 1, 32, 32],          \n        \"datatype\": \"FP32\",               \n        \"data\": [\n          [0.1234, 0.5678, 0.9101, ... ]  \n        ]\n      }\n    ]\n  }'\n
                                                              • <ip> \u662f Triton Inference Server \u8fd0\u884c\u7684\u4e3b\u673a\u5730\u5740\u3002
                                                              • <port> \u662f Triton Inference Server \u8fd0\u884c\u7684\u4e3b\u673a\u7aef\u53e3\u53f7\u3002
                                                              • <inference-name> \u662f\u6240\u521b\u5efa\u7684\u63a8\u7406\u670d\u52a1\u7684\u540d\u79f0\u3002
                                                              • \"name\" \u8981\u4e0e\u6a21\u578b\u914d\u7f6e\u4e2d\u7684\u8f93\u5165\u53c2\u6570\u7684 name \u4e00\u81f4\u3002
                                                              • \"shape\" \u8981\u4e0e\u6a21\u578b\u914d\u7f6e\u4e2d\u7684\u8f93\u5165\u53c2\u6570\u7684 dims \u4e00\u81f4\u3002
                                                              • \"datatype\" \u8981\u4e0e\u6a21\u578b\u914d\u7f6e\u4e2d\u7684\u8f93\u5165\u53c2\u6570\u7684 Data Type \u4e00\u81f4\u3002
                                                              • \"data\" \u66ff\u6362\u4e3a\u5b9e\u9645\u7684\u63a8\u7406\u6570\u636e\u3002

                                                              \u8bf7\u6ce8\u610f\uff0c\u4e0a\u8ff0\u793a\u4f8b\u4ee3\u7801\u9700\u8981\u6839\u636e\u4f60\u7684\u5177\u4f53\u6a21\u578b\u548c\u73af\u5883\u8fdb\u884c\u8c03\u6574\uff0c\u8f93\u5165\u6570\u636e\u7684\u683c\u5f0f\u548c\u5185\u5bb9\u4e5f\u9700\u8981\u7b26\u5408\u6a21\u578b\u7684\u8981\u6c42\u3002

                                                              "},{"location":"end-user/baize/inference/vllm-inference.html","title":"\u521b\u5efa vLLM \u63a8\u7406\u670d\u52a1","text":"

                                                              AI Lab \u652f\u6301\u4ee5 vLLM \u4f5c\u4e3a\u63a8\u7406\u670d\u52a1\uff0c\u63d0\u4f9b\u5168\u90e8 vLLM \u7684\u80fd\u529b\uff0c\u540c\u65f6\u63d0\u4f9b\u4e86\u5b8c\u5168\u9002\u914d OpenAI \u63a5\u53e3\u5b9a\u4e49\u3002

                                                              "},{"location":"end-user/baize/inference/vllm-inference.html#vllm_1","title":"vLLM \u4ecb\u7ecd","text":"

                                                              vLLM \u662f\u4e00\u4e2a\u5feb\u901f\u4e14\u6613\u4e8e\u4f7f\u7528\u7684\u7528\u4e8e\u63a8\u7406\u548c\u670d\u52a1\u7684\u5e93\uff0cvLLM \u65e8\u5728\u6781\u5927\u5730\u63d0\u5347\u5b9e\u65f6\u573a\u666f\u4e0b\u7684\u8bed\u8a00\u6a21\u578b\u670d\u52a1\u7684\u541e\u5410\u4e0e\u5185\u5b58\u4f7f\u7528\u6548\u7387\u3002vLLM \u5728\u901f\u5ea6\u3001\u7075\u6d3b\u6027\u65b9\u9762\u5177\u6709\u4ee5\u4e0b\u90e8\u5206\u7279\u70b9\uff1a

                                                              • \u8fde\u7eed\u6279\u5904\u7406\u4f20\u5165\u8bf7\u6c42\uff1b
                                                              • \u4f7f\u7528 PagedAttention \u9ad8\u6548\u7ba1\u7406\u6ce8\u610f\u529b\u952e\u548c\u503c\u5185\u5b58\uff1b
                                                              • \u4e0e\u6d41\u884c\u7684 HuggingFace \u578b\u53f7\u65e0\u7f1d\u96c6\u6210\uff1b
                                                              • \u517c\u5bb9 OpenAI \u7684 API \u670d\u52a1\u5668\u3002
                                                              "},{"location":"end-user/baize/inference/vllm-inference.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u51c6\u5907\u6a21\u578b\u6570\u636e\uff1a\u5728\u6570\u636e\u96c6\u7ba1\u7406\u4e2d\u7eb3\u7ba1\u6a21\u578b\u4ee3\u7801\uff0c\u5e76\u4fdd\u8bc1\u6570\u636e\u6210\u529f\u9884\u52a0\u8f7d\u3002

                                                              "},{"location":"end-user/baize/inference/vllm-inference.html#_2","title":"\u521b\u5efa\u63a8\u7406\u670d\u52a1","text":"
                                                              1. \u9009\u62e9 vLLM \u63a8\u7406\u6846\u67b6\uff0c\u5e76\u5728\u9009\u62e9\u6a21\u578b\u6a21\u5757\u9009\u62e9\u63d0\u524d\u521b\u5efa\u597d\u7684\u6a21\u578b\u6570\u636e\u96c6 hdd-models \u5e76\u586b\u5199\u6570\u636e\u96c6\u4e2d\u6a21\u578b\u6240\u5728\u7684\u8def\u5f84\u4fe1\u606f\u3002

                                                                \u672c\u6587\u63a8\u7406\u670d\u52a1\u7684\u521b\u5efa\u4f7f\u7528 ChatGLM3 \u6a21\u578b\u3002

                                                              2. \u914d\u7f6e\u63a8\u7406\u670d\u52a1\u7684\u8d44\u6e90\uff0c\u5e76\u8c03\u6574\u63a8\u7406\u670d\u52a1\u8fd0\u884c\u7684\u53c2\u6570\u3002

                                                                \u53c2\u6570\u540d \u63cf\u8ff0 GPU \u8d44\u6e90 \u6839\u636e\u6a21\u578b\u89c4\u6a21\u4ee5\u53ca\u96c6\u7fa4\u8d44\u6e90\u53ef\u4ee5\u4e3a\u63a8\u7406\u914d\u7f6e GPU \u8d44\u6e90\u3002 \u5141\u8bb8\u8fdc\u7a0b\u4ee3\u7801 \u63a7\u5236 vLLM \u662f\u5426\u4fe1\u4efb\u5e76\u6267\u884c\u6765\u81ea\u8fdc\u7a0b\u6e90\u7684\u4ee3\u7801 LoRA LoRA \u662f\u4e00\u79cd\u9488\u5bf9\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u7684\u53c2\u6570\u9ad8\u6548\u8c03\u6574\u6280\u672f\u3002\u5b83\u901a\u8fc7\u5c06\u539f\u59cb\u6a21\u578b\u53c2\u6570\u77e9\u9635\u5206\u89e3\u4e3a\u4f4e\u79e9\u77e9\u9635\uff0c\u4ece\u800c\u51cf\u5c11\u53c2\u6570\u6570\u91cf\u548c\u8ba1\u7b97\u590d\u6742\u5ea6\u3002 1. --lora-modules\uff1a\u7528\u6765\u6307\u5b9a\u7279\u5b9a\u6a21\u5757\u6216\u5c42\u8fdb\u884c\u4f4e\u79e9\u8fd1\u4f3c 2. max_loras_rank\uff1a\u7528\u6765\u6307\u5b9a LoRA \u6a21\u578b\u4e2d\u6bcf\u4e2a\u9002\u914d\u5c42\u7684\u6700\u5927\u79e9\uff0c\u5bf9\u4e8e\u7b80\u5355\u7684\u4efb\u52a1\uff0c\u53ef\u4ee5\u9009\u62e9\u8f83\u5c0f\u7684\u79e9\u503c\uff0c\u800c\u5bf9\u4e8e\u590d\u6742\u4efb\u52a1\uff0c\u53ef\u80fd\u9700\u8981\u8f83\u5927\u7684\u79e9\u503c\u6765\u4fdd\u8bc1\u6a21\u578b\u6027\u80fd\u3002 3. max_loras\uff1a\u8868\u793a\u6a21\u578b\u4e2d\u53ef\u4ee5\u5305\u542b\u7684 LoRA \u5c42\u7684\u6700\u5927\u6570\u91cf\uff0c\u6839\u636e\u6a21\u578b\u5927\u5c0f\u3001\u63a8\u7406\u590d\u6742\u5ea6\u7b49\u56e0\u7d20\u81ea\u5b9a 4. max_cpu_loras\uff1a\u7528\u4e8e\u6307\u5b9a\u5728 CPU \u73af\u5883\u4e2d\u53ef\u4ee5\u5904\u7406\u7684 LoRA \u5c42\u7684\u6700\u5927\u6570\u3002 \u5173\u8054\u73af\u5883 \u901a\u8fc7\u9009\u62e9\u73af\u5883\u9884\u5b9a\u4e49\u63a8\u7406\u65f6\u6240\u9700\u7684\u73af\u5883\u4f9d\u8d56\u3002

                                                                Info

                                                                \u652f\u6301\u914d\u7f6e LoRA \u53c2\u6570\u7684\u6a21\u578b\u53ef\u53c2\u8003 vLLM \u652f\u6301\u7684\u6a21\u578b\u3002

                                                              3. \u5728 \u9ad8\u7ea7\u914d\u7f6e \u4e2d\uff0c\u652f\u6301\u6839\u636e GPU \u8d44\u6e90\u7b49\u8282\u70b9\u914d\u7f6e\u5b9e\u73b0\u81ea\u52a8\u5316\u7684\u4eb2\u548c\u6027\u8c03\u5ea6\uff0c\u540c\u65f6\u4e5f\u65b9\u4fbf\u7528\u6237\u81ea\u5b9a\u4e49\u8c03\u5ea6\u7b56\u7565\u3002

                                                              "},{"location":"end-user/baize/inference/vllm-inference.html#_3","title":"\u9a8c\u8bc1\u63a8\u7406\u670d\u52a1","text":"

                                                              \u63a8\u7406\u670d\u52a1\u521b\u5efa\u5b8c\u6210\u4e4b\u540e\uff0c\u70b9\u51fb\u63a8\u7406\u670d\u52a1\u540d\u79f0\u8fdb\u5165\u8be6\u60c5\uff0c\u67e5\u770b API \u8c03\u7528\u65b9\u6cd5\u3002\u901a\u8fc7\u4f7f\u7528 Curl\u3001Python\u3001Nodejs \u7b49\u65b9\u5f0f\u9a8c\u8bc1\u6267\u884c\u7ed3\u679c\u3002

                                                              \u62f7\u8d1d\u8be6\u60c5\u4e2d\u7684 curl \u547d\u4ee4\uff0c\u5e76\u5728\u7ec8\u7aef\u4e2d\u6267\u884c\u547d\u4ee4\u53d1\u9001\u4e00\u6761\u6a21\u578b\u63a8\u7406\u8bf7\u6c42\uff0c\u9884\u671f\u8f93\u51fa\uff1a

                                                              "},{"location":"end-user/baize/jobs/create.html","title":"\u521b\u5efa\u4efb\u52a1\uff08Job\uff09","text":"

                                                              \u4efb\u52a1\u7ba1\u7406\u662f\u6307\u901a\u8fc7\u4f5c\u4e1a\u8c03\u5ea6\u548c\u7ba1\u63a7\u7ec4\u4ef6\u6765\u521b\u5efa\u548c\u7ba1\u7406\u4efb\u52a1\u751f\u547d\u5468\u671f\u7684\u529f\u80fd\u3002

                                                              AI Lab \u91c7\u7528 Kubernetes \u7684 Job \u673a\u5236\u6765\u8c03\u5ea6\u5404\u9879 AI \u63a8\u7406\u3001\u8bad\u7ec3\u4efb\u52a1\u3002

                                                              "},{"location":"end-user/baize/jobs/create.html#_1","title":"\u901a\u7528\u6b65\u9aa4","text":"
                                                              1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u4efb\u52a1\u4e2d\u5fc3 -> \u8bad\u7ec3\u4efb\u52a1 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae\u3002

                                                              2. \u7cfb\u7edf\u4f1a\u9884\u5148\u586b\u5145\u57fa\u7840\u914d\u7f6e\u6570\u636e\uff0c\u5305\u62ec\u8981\u90e8\u7f72\u7684\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u4efb\u52a1\u7c7b\u578b\u3001\u961f\u5217\u3001\u4f18\u5148\u7ea7\u7b49\u3002 \u8c03\u6574\u8fd9\u4e9b\u53c2\u6570\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                              3. \u914d\u7f6e\u955c\u50cf\u5730\u5740\u3001\u8fd0\u884c\u53c2\u6570\u4ee5\u53ca\u5173\u8054\u7684\u6570\u636e\u96c6\u3001\u73af\u5883\u548c\u8d44\u6e90\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                              4. \u6309\u9700\u6dfb\u52a0\u6807\u7b7e\u3001\u6ce8\u89e3\u3001\u73af\u5883\u53d8\u91cf\u7b49\u4efb\u52a1\u53c2\u6570\uff0c\u9009\u62e9\u8c03\u5ea6\u7b56\u7565\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                              5. \u4efb\u52a1\u521b\u5efa\u6210\u529f\u540e\uff0c\u4f1a\u6709\u51e0\u79cd\u8fd0\u884c\u72b6\u6001\uff1a

                                                                • \u8fd0\u884c\u4e2d
                                                                • \u6392\u961f\u4e2d
                                                                • \u63d0\u4ea4\u6210\u529f\u3001\u63d0\u4ea4\u5931\u8d25
                                                                • \u4efb\u52a1\u6210\u529f\u3001\u4efb\u52a1\u5931\u8d25
                                                              "},{"location":"end-user/baize/jobs/create.html#_2","title":"\u521b\u5efa\u7279\u5b9a\u4efb\u52a1","text":"
                                                              • \u521b\u5efa Pytorch \u4efb\u52a1
                                                              • \u521b\u5efa TensorFlow \u4efb\u52a1
                                                              • \u521b\u5efa MPI \u4efb\u52a1
                                                              • \u521b\u5efa MXNet \u4efb\u52a1
                                                              • \u521b\u5efa PaddlePaddle \u4efb\u52a1
                                                              "},{"location":"end-user/baize/jobs/delete.html","title":"\u5220\u9664\u4efb\u52a1\uff08Job\uff09","text":"

                                                              \u5982\u679c\u53d1\u73b0\u4efb\u52a1\u5197\u4f59\u3001\u8fc7\u671f\u6216\u56e0\u5176\u4ed6\u7f18\u6545\u4e0d\u518d\u9700\u8981\uff0c\u53ef\u4ee5\u4ece\u8bad\u7ec3\u4efb\u52a1\u5217\u8868\u4e2d\u5220\u9664\u3002

                                                              1. \u5728\u8bad\u7ec3\u4efb\u52a1\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u5220\u9664 \u3002

                                                              2. \u5728\u5f39\u7a97\u4e2d\u786e\u8ba4\u8981\u5220\u9664\u7684\u4efb\u52a1\uff0c\u8f93\u5165\u4efb\u52a1\u540d\u79f0\u540e\u70b9\u51fb \u5220\u9664 \u3002

                                                              3. \u5c4f\u5e55\u63d0\u793a\u5220\u9664\u6210\u529f\uff0c\u8be5\u4efb\u52a1\u4ece\u5217\u8868\u4e2d\u6d88\u5931\u3002

                                                              Caution

                                                              \u4efb\u52a1\u4e00\u65e6\u5220\u9664\u5c06\u4e0d\u53ef\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                                                              "},{"location":"end-user/baize/jobs/mpi.html","title":"MPI \u4efb\u52a1","text":"

                                                              MPI\uff08Message Passing Interface\uff09\u662f\u4e00\u79cd\u7528\u4e8e\u5e76\u884c\u8ba1\u7b97\u7684\u901a\u4fe1\u534f\u8bae\uff0c\u5b83\u5141\u8bb8\u591a\u4e2a\u8ba1\u7b97\u8282\u70b9\u4e4b\u95f4\u8fdb\u884c\u6d88\u606f\u4f20\u9012\u548c\u534f\u4f5c\u3002 MPI \u4efb\u52a1\u662f\u4f7f\u7528 MPI \u534f\u8bae\u8fdb\u884c\u5e76\u884c\u8ba1\u7b97\u7684\u4efb\u52a1\uff0c\u9002\u7528\u4e8e\u9700\u8981\u5927\u89c4\u6a21\u5e76\u884c\u5904\u7406\u7684\u5e94\u7528\u573a\u666f\uff0c\u4f8b\u5982\u5206\u5e03\u5f0f\u8bad\u7ec3\u3001\u79d1\u5b66\u8ba1\u7b97\u7b49\u3002

                                                              \u5728 AI Lab \u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86 MPI \u4efb\u52a1\u7684\u652f\u6301\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c\u5feb\u901f\u521b\u5efa MPI \u4efb\u52a1\uff0c\u8fdb\u884c\u9ad8\u6027\u80fd\u7684\u5e76\u884c\u8ba1\u7b97\u3002 \u672c\u6559\u7a0b\u5c06\u6307\u5bfc\u60a8\u5982\u4f55\u5728 AI Lab \u4e2d\u521b\u5efa\u548c\u8fd0\u884c\u4e00\u4e2a MPI \u4efb\u52a1\u3002

                                                              "},{"location":"end-user/baize/jobs/mpi.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
                                                              • \u4efb\u52a1\u7c7b\u578b \uff1aMPI\uff0c\u7528\u4e8e\u8fd0\u884c\u5e76\u884c\u8ba1\u7b97\u4efb\u52a1\u3002
                                                              • \u8fd0\u884c\u73af\u5883 \uff1a\u9009\u7528\u9884\u88c5\u4e86 MPI \u73af\u5883\u7684\u955c\u50cf\uff0c\u6216\u8005\u5728\u4efb\u52a1\u4e2d\u6307\u5b9a\u5b89\u88c5\u5fc5\u8981\u7684\u4f9d\u8d56\u3002
                                                              • MPIJob \u914d\u7f6e \uff1a\u7406\u89e3\u5e76\u914d\u7f6e MPIJob \u7684\u5404\u9879\u53c2\u6570\uff0c\u5982\u526f\u672c\u6570\u3001\u8d44\u6e90\u8bf7\u6c42\u7b49\u3002
                                                              "},{"location":"end-user/baize/jobs/mpi.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

                                                              \u5728\u8fd9\u91cc\u6211\u4eec\u4f7f\u7528 baize-notebook \u57fa\u7840\u955c\u50cf\u548c \u5173\u8054\u73af\u5883 \u7684\u65b9\u5f0f\u6765\u4f5c\u4e3a\u4efb\u52a1\u7684\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002 \u786e\u4fdd\u8fd0\u884c\u73af\u5883\u4e2d\u5305\u542b MPI \u53ca\u76f8\u5173\u5e93\uff0c\u5982 OpenMPI\u3001mpi4py \u7b49\u3002

                                                              \u6ce8\u610f \uff1a\u4e86\u89e3\u5982\u4f55\u521b\u5efa\u73af\u5883\uff0c\u8bf7\u53c2\u8003\u73af\u5883\u5217\u8868\u3002

                                                              "},{"location":"end-user/baize/jobs/mpi.html#mpi_1","title":"\u521b\u5efa MPI \u4efb\u52a1","text":""},{"location":"end-user/baize/jobs/mpi.html#mpi_2","title":"MPI \u4efb\u52a1\u521b\u5efa\u6b65\u9aa4","text":"
                                                              1. \u767b\u5f55\u5e73\u53f0 \uff1a\u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3\uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
                                                              2. \u521b\u5efa\u4efb\u52a1 \uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                                                              3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b \uff1a\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\uff0c\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a MPI\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
                                                              4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f \uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cbenchmarks-mpi\u201d\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
                                                              5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570 \uff1a\u6839\u636e\u60a8\u7684\u9700\u6c42\uff0c\u914d\u7f6e\u4efb\u52a1\u7684\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u4fe1\u606f\u3002
                                                              "},{"location":"end-user/baize/jobs/mpi.html#_3","title":"\u8fd0\u884c\u53c2\u6570","text":"
                                                              • \u542f\u52a8\u547d\u4ee4 \uff1a\u4f7f\u7528 mpirun\uff0c\u8fd9\u662f\u8fd0\u884c MPI \u7a0b\u5e8f\u7684\u547d\u4ee4\u3002
                                                              • \u547d\u4ee4\u53c2\u6570 \uff1a\u8f93\u5165\u60a8\u8981\u8fd0\u884c\u7684 MPI \u7a0b\u5e8f\u7684\u53c2\u6570\u3002

                                                              \u793a\u4f8b\uff1a\u8fd0\u884c TensorFlow Benchmarks

                                                              \u5728\u672c\u793a\u4f8b\u4e2d\uff0c\u6211\u4eec\u5c06\u8fd0\u884c\u4e00\u4e2a TensorFlow \u7684\u57fa\u51c6\u6d4b\u8bd5\u7a0b\u5e8f\uff0c\u4f7f\u7528 Horovod \u8fdb\u884c\u5206\u5e03\u5f0f\u8bad\u7ec3\u3002 \u9996\u5148\uff0c\u786e\u4fdd\u60a8\u4f7f\u7528\u7684\u955c\u50cf\u4e2d\u5305\u542b\u6240\u9700\u7684\u4f9d\u8d56\u9879\uff0c\u4f8b\u5982 TensorFlow\u3001Horovod\u3001Open MPI \u7b49\u3002

                                                              \u955c\u50cf\u9009\u62e9 \uff1a\u4f7f\u7528\u5305\u542b TensorFlow \u548c MPI \u7684\u955c\u50cf\uff0c\u4f8b\u5982 mai.daocloud.io/docker.io/mpioperator/tensorflow-benchmarks:latest\u3002

                                                              \u547d\u4ee4\u53c2\u6570 \uff1a

                                                              mpirun --allow-run-as-root -np 2 -bind-to none -map-by slot \\\n  -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH -x PATH \\\n  -mca pml ob1 -mca btl ^openib \\\n  python scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py \\\n  --model=resnet101 --batch_size=64 --variable_update=horovod\n

                                                              \u8bf4\u660e \uff1a

                                                              • mpirun\uff1aMPI \u7684\u542f\u52a8\u547d\u4ee4\u3002
                                                              • --allow-run-as-root\uff1a\u5141\u8bb8\u4ee5 root \u7528\u6237\u8fd0\u884c\uff08\u5728\u5bb9\u5668\u4e2d\u901a\u5e38\u662f root \u7528\u6237\uff09\u3002
                                                              • -np 2\uff1a\u6307\u5b9a\u8fd0\u884c\u7684\u8fdb\u7a0b\u6570\u4e3a 2\u3002
                                                              • -bind-to none\uff0c-map-by slot\uff1aMPI \u8fdb\u7a0b\u7ed1\u5b9a\u548c\u6620\u5c04\u7684\u914d\u7f6e\u3002
                                                              • -x NCCL_DEBUG=INFO\uff1a\u8bbe\u7f6e NCCL\uff08NVIDIA Collective Communication Library\uff09\u7684\u8c03\u8bd5\u4fe1\u606f\u7ea7\u522b\u3002
                                                              • -x LD_LIBRARY_PATH\uff0c-x PATH\uff1a\u5728 MPI \u73af\u5883\u4e2d\u4f20\u9012\u5fc5\u8981\u7684\u73af\u5883\u53d8\u91cf\u3002
                                                              • -mca pml ob1 -mca btl ^openib\uff1aMPI \u7684\u914d\u7f6e\u53c2\u6570\uff0c\u6307\u5b9a\u4f20\u8f93\u5c42\u548c\u6d88\u606f\u5c42\u534f\u8bae\u3002
                                                              • python scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py\uff1a\u8fd0\u884c TensorFlow \u57fa\u51c6\u6d4b\u8bd5\u811a\u672c\u3002
                                                              • --model=resnet101\uff0c--batch_size=64\uff0c--variable_update=horovod\uff1aTensorFlow \u811a\u672c\u7684\u53c2\u6570\uff0c\u6307\u5b9a\u6a21\u578b\u3001\u6279\u91cf\u5927\u5c0f\u548c\u4f7f\u7528 Horovod \u8fdb\u884c\u53c2\u6570\u66f4\u65b0\u3002
                                                              "},{"location":"end-user/baize/jobs/mpi.html#_4","title":"\u8d44\u6e90\u914d\u7f6e","text":"

                                                              \u5728\u4efb\u52a1\u914d\u7f6e\u4e2d\uff0c\u9700\u8981\u4e3a\u6bcf\u4e2a\u8282\u70b9\uff08Launcher \u548c Worker\uff09\u5206\u914d\u9002\u5f53\u7684\u8d44\u6e90\uff0c\u4f8b\u5982 CPU\u3001\u5185\u5b58\u548c GPU\u3002

                                                              \u8d44\u6e90\u793a\u4f8b \uff1a

                                                              • Launcher\uff08\u542f\u52a8\u5668\uff09 \uff1a

                                                                • \u526f\u672c\u6570 \uff1a1
                                                                • \u8d44\u6e90\u8bf7\u6c42 \uff1a
                                                                  • CPU\uff1a2 \u6838
                                                                  • \u5185\u5b58\uff1a4 GiB
                                                              • Worker\uff08\u5de5\u4f5c\u8282\u70b9\uff09 \uff1a

                                                                • \u526f\u672c\u6570 \uff1a2
                                                                • \u8d44\u6e90\u8bf7\u6c42 \uff1a
                                                                  • CPU\uff1a2 \u6838
                                                                  • \u5185\u5b58\uff1a4 GiB
                                                                  • GPU\uff1a\u6839\u636e\u9700\u6c42\u5206\u914d
                                                              "},{"location":"end-user/baize/jobs/mpi.html#mpijob","title":"\u5b8c\u6574\u7684 MPIJob \u914d\u7f6e\u793a\u4f8b","text":"

                                                              \u4ee5\u4e0b\u662f\u5b8c\u6574\u7684 MPIJob \u914d\u7f6e\u793a\u4f8b\uff0c\u4f9b\u60a8\u53c2\u8003\u3002

                                                              apiVersion: kubeflow.org/v1\nkind: MPIJob\nmetadata:\n  name: tensorflow-benchmarks\nspec:\n  slotsPerWorker: 1\n  runPolicy:\n    cleanPodPolicy: Running\n  mpiReplicaSpecs:\n    Launcher:\n      replicas: 1\n      template:\n        spec:\n          containers:\n            - name: tensorflow-benchmarks\n              image: mai.daocloud.io/docker.io/mpioperator/tensorflow-benchmarks:latest\n              command:\n                - mpirun\n                - --allow-run-as-root\n                - -np\n                - \"2\"\n                - -bind-to\n                - none\n                - -map-by\n                - slot\n                - -x\n                - NCCL_DEBUG=INFO\n                - -x\n                - LD_LIBRARY_PATH\n                - -x\n                - PATH\n                - -mca\n                - pml\n                - ob1\n                - -mca\n                - btl\n                - ^openib\n                - python\n                - scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py\n                - --model=resnet101\n                - --batch_size=64\n                - --variable_update=horovod\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n    Worker:\n      replicas: 2\n      template:\n        spec:\n          containers:\n            - name: tensorflow-benchmarks\n              image: mai.daocloud.io/docker.io/mpioperator/tensorflow-benchmarks:latest\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpumem: 1k\n                  nvidia.com/vgpu: \"1\"\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n

                                                              \u914d\u7f6e\u89e3\u6790 \uff1a

                                                              • apiVersion \u548c kind\uff1a\u8868\u793a\u8d44\u6e90\u7684 API \u7248\u672c\u548c\u7c7b\u578b\uff0cMPIJob \u662f Kubeflow \u5b9a\u4e49\u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\uff0c\u7528\u4e8e\u521b\u5efa MPI \u7c7b\u578b\u7684\u4efb\u52a1\u3002
                                                              • metadata\uff1a\u5143\u6570\u636e\uff0c\u5305\u542b\u4efb\u52a1\u7684\u540d\u79f0\u7b49\u4fe1\u606f\u3002
                                                              • spec\uff1a\u4efb\u52a1\u7684\u8be6\u7ec6\u914d\u7f6e\u3002
                                                                • slotsPerWorker\uff1a\u6bcf\u4e2a Worker \u8282\u70b9\u7684\u69fd\u4f4d\u6570\u91cf\uff0c\u901a\u5e38\u8bbe\u7f6e\u4e3a 1\u3002
                                                                • runPolicy\uff1a\u8fd0\u884c\u7b56\u7565\uff0c\u4f8b\u5982\u4efb\u52a1\u5b8c\u6210\u540e\u662f\u5426\u6e05\u7406 Pod\u3002
                                                                • mpiReplicaSpecs\uff1aMPI \u4efb\u52a1\u7684\u526f\u672c\u914d\u7f6e\u3002
                                                                  • Launcher\uff1a\u542f\u52a8\u5668\uff0c\u8d1f\u8d23\u542f\u52a8 MPI \u4efb\u52a1\u3002
                                                                    • replicas\uff1a\u526f\u672c\u6570\uff0c\u901a\u5e38\u4e3a 1\u3002
                                                                    • template\uff1aPod \u6a21\u677f\uff0c\u5b9a\u4e49\u5bb9\u5668\u8fd0\u884c\u7684\u955c\u50cf\u3001\u547d\u4ee4\u3001\u8d44\u6e90\u7b49\u3002
                                                                  • Worker\uff1a\u5de5\u4f5c\u8282\u70b9\uff0c\u5b9e\u9645\u6267\u884c\u4efb\u52a1\u7684\u8ba1\u7b97\u8282\u70b9\u3002
                                                                    • replicas\uff1a\u526f\u672c\u6570\uff0c\u6839\u636e\u5e76\u884c\u9700\u6c42\u8bbe\u7f6e\uff0c\u8fd9\u91cc\u8bbe\u7f6e\u4e3a 2\u3002
                                                                    • template\uff1aPod \u6a21\u677f\uff0c\u540c\u6837\u5b9a\u4e49\u5bb9\u5668\u7684\u8fd0\u884c\u73af\u5883\u548c\u8d44\u6e90\u3002
                                                              "},{"location":"end-user/baize/jobs/mpi.html#_5","title":"\u8bbe\u7f6e\u4efb\u52a1\u526f\u672c\u6570","text":"

                                                              \u5728\u521b\u5efa MPI \u4efb\u52a1\u65f6\uff0c\u9700\u8981\u6839\u636e mpiReplicaSpecs \u4e2d\u914d\u7f6e\u7684\u526f\u672c\u6570\uff0c\u6b63\u786e\u8bbe\u7f6e \u4efb\u52a1\u526f\u672c\u6570\u3002

                                                              • \u603b\u526f\u672c\u6570 = Launcher \u526f\u672c\u6570 + Worker \u526f\u672c\u6570
                                                              • \u672c\u793a\u4f8b\u4e2d\uff1a

                                                                • Launcher \u526f\u672c\u6570\uff1a1
                                                                • Worker \u526f\u672c\u6570\uff1a2
                                                                • \u603b\u526f\u672c\u6570 \uff1a1 + 2 = 3

                                                              \u56e0\u6b64\uff0c\u5728\u4efb\u52a1\u914d\u7f6e\u4e2d\uff0c\u60a8\u9700\u8981\u5c06 \u4efb\u52a1\u526f\u672c\u6570 \u8bbe\u7f6e\u4e3a 3\u3002

                                                              "},{"location":"end-user/baize/jobs/mpi.html#_6","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

                                                              \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c MPI \u4efb\u52a1\u3002

                                                              "},{"location":"end-user/baize/jobs/mpi.html#_7","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

                                                              \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\u540e\uff0c\u60a8\u53ef\u4ee5\u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\u548c\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u3002 \u4ece\u53f3\u4e0a\u89d2\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\uff0c\u53ef\u4ee5\u67e5\u770b\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u6bcf\u4e2a\u8282\u70b9\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                                                              \u793a\u4f8b\u8f93\u51fa\uff1a

                                                              TensorFlow:  1.13\nModel:       resnet101\nMode:        training\nBatch size:  64\n...\n\nTotal images/sec: 125.67\n

                                                              \u8fd9\u8868\u793a MPI \u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0cTensorFlow \u57fa\u51c6\u6d4b\u8bd5\u7a0b\u5e8f\u5b8c\u6210\u4e86\u5206\u5e03\u5f0f\u8bad\u7ec3\u3002

                                                              "},{"location":"end-user/baize/jobs/mpi.html#_8","title":"\u5c0f\u7ed3","text":"

                                                              \u901a\u8fc7\u672c\u6559\u7a0b\uff0c\u60a8\u5b66\u4e60\u4e86\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c\u4e00\u4e2a MPI \u4efb\u52a1\u3002\u6211\u4eec\u8be6\u7ec6\u4ecb\u7ecd\u4e86 MPIJob \u7684\u914d\u7f6e\u65b9\u5f0f\uff0c \u4ee5\u53ca\u5982\u4f55\u5728\u4efb\u52a1\u4e2d\u6307\u5b9a\u8fd0\u884c\u7684\u547d\u4ee4\u548c\u8d44\u6e90\u9700\u6c42\u3002\u5e0c\u671b\u672c\u6559\u7a0b\u5bf9\u60a8\u6709\u6240\u5e2e\u52a9\uff0c\u5982\u6709\u4efb\u4f55\u95ee\u9898\uff0c\u8bf7\u53c2\u8003\u5e73\u53f0\u63d0\u4f9b\u7684\u5176\u4ed6\u6587\u6863\u6216\u8054\u7cfb\u6280\u672f\u652f\u6301\u3002

                                                              \u9644\u5f55 \uff1a

                                                              • \u5982\u679c\u60a8\u7684\u8fd0\u884c\u73af\u5883\u672a\u9884\u88c5\u6240\u9700\u7684\u5e93\uff08\u5982 mpi4py\u3001Horovod \u7b49\uff09\uff0c\u8bf7\u5728\u4efb\u52a1\u4e2d\u6dfb\u52a0\u5b89\u88c5\u547d\u4ee4\uff0c\u6216\u8005\u4f7f\u7528\u9884\u88c5\u4e86\u76f8\u5173\u4f9d\u8d56\u7684\u955c\u50cf\u3002
                                                              • \u5728\u5b9e\u9645\u5e94\u7528\u4e2d\uff0c\u60a8\u53ef\u4ee5\u6839\u636e\u9700\u6c42\u4fee\u6539 MPIJob \u7684\u914d\u7f6e\uff0c\u4f8b\u5982\u66f4\u6539\u955c\u50cf\u3001\u547d\u4ee4\u53c2\u6570\u3001\u8d44\u6e90\u8bf7\u6c42\u7b49\u3002
                                                              "},{"location":"end-user/baize/jobs/mxnet.html","title":"MXNet \u4efb\u52a1","text":"

                                                              Warning

                                                              \u7531\u4e8e Apache MXNet \u9879\u76ee\u5df2\u5b58\u6863\uff0c\u56e0\u6b64 Kubeflow MXJob \u5c06\u5728\u672a\u6765\u7684 Training Operator 1.9 \u7248\u672c\u4e2d\u5f03\u7528\u548c\u5220\u9664\u3002

                                                              Apache MXNet \u662f\u4e00\u4e2a\u9ad8\u6027\u80fd\u7684\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\uff0c\u652f\u6301\u591a\u79cd\u7f16\u7a0b\u8bed\u8a00\u3002MXNet \u4efb\u52a1\u53ef\u4ee5\u4f7f\u7528\u591a\u79cd\u65b9\u5f0f\u8fdb\u884c\u8bad\u7ec3\uff0c\u5305\u62ec\u5355\u673a\u6a21\u5f0f\u548c\u5206\u5e03\u5f0f\u6a21\u5f0f\u3002\u5728 AI Lab \u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86\u5bf9 MXNet \u4efb\u52a1\u7684\u652f\u6301\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c\u5feb\u901f\u521b\u5efa MXNet \u4efb\u52a1\uff0c\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\u3002

                                                              \u672c\u6559\u7a0b\u5c06\u6307\u5bfc\u60a8\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c MXNet \u7684\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4efb\u52a1\u3002

                                                              "},{"location":"end-user/baize/jobs/mxnet.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
                                                              • \u4efb\u52a1\u7c7b\u578b\uff1aMXNet\uff0c\u652f\u6301\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4e24\u79cd\u6a21\u5f0f\u3002
                                                              • \u8fd0\u884c\u73af\u5883\uff1a\u9009\u62e9\u5305\u542b MXNet \u6846\u67b6\u7684\u955c\u50cf\uff0c\u6216\u5728\u4efb\u52a1\u4e2d\u5b89\u88c5\u5fc5\u8981\u7684\u4f9d\u8d56\u3002
                                                              "},{"location":"end-user/baize/jobs/mxnet.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

                                                              \u6211\u4eec\u4f7f\u7528 release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest \u955c\u50cf\u4f5c\u4e3a\u4efb\u52a1\u7684\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002\u8be5\u955c\u50cf\u9884\u88c5\u4e86 MXNet \u53ca\u5176\u76f8\u5173\u4f9d\u8d56\uff0c\u652f\u6301 GPU \u52a0\u901f\u3002

                                                              \u6ce8\u610f\uff1a\u4e86\u89e3\u5982\u4f55\u521b\u5efa\u548c\u7ba1\u7406\u73af\u5883\uff0c\u8bf7\u53c2\u8003 \u73af\u5883\u5217\u8868\u3002

                                                              "},{"location":"end-user/baize/jobs/mxnet.html#mxnet_1","title":"\u521b\u5efa MXNet \u4efb\u52a1","text":""},{"location":"end-user/baize/jobs/mxnet.html#mxnet_2","title":"MXNet \u5355\u673a\u4efb\u52a1","text":""},{"location":"end-user/baize/jobs/mxnet.html#_3","title":"\u521b\u5efa\u6b65\u9aa4","text":"
                                                              1. \u767b\u5f55\u5e73\u53f0\uff1a\u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3\uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
                                                              2. \u521b\u5efa\u4efb\u52a1\uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                                                              3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\uff1a\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\uff0c\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a MXNet\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
                                                              4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f\uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cMXNet \u5355\u673a\u8bad\u7ec3\u4efb\u52a1\u201d\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a\u3002
                                                              5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570\uff1a\u6839\u636e\u60a8\u7684\u9700\u6c42\uff0c\u914d\u7f6e\u4efb\u52a1\u7684\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u4fe1\u606f\u3002
                                                              "},{"location":"end-user/baize/jobs/mxnet.html#_4","title":"\u8fd0\u884c\u53c2\u6570","text":"
                                                              • \u542f\u52a8\u547d\u4ee4\uff1apython3
                                                              • \u547d\u4ee4\u53c2\u6570\uff1a

                                                                /mxnet/mxnet/example/gluon/mnist/mnist.py --epochs 10 --cuda\n

                                                                \u8bf4\u660e\uff1a

                                                                • /mxnet/mxnet/example/gluon/mnist/mnist.py\uff1aMXNet \u63d0\u4f9b\u7684 MNIST \u624b\u5199\u6570\u5b57\u8bc6\u522b\u793a\u4f8b\u811a\u672c\u3002
                                                                • --epochs 10\uff1a\u8bbe\u7f6e\u8bad\u7ec3\u8f6e\u6570\u4e3a 10\u3002
                                                                • --cuda\uff1a\u4f7f\u7528 CUDA \u8fdb\u884c GPU \u52a0\u901f\u3002
                                                              "},{"location":"end-user/baize/jobs/mxnet.html#_5","title":"\u8d44\u6e90\u914d\u7f6e","text":"
                                                              • \u526f\u672c\u6570\uff1a1\uff08\u5355\u673a\u4efb\u52a1\uff09
                                                              • \u8d44\u6e90\u8bf7\u6c42\uff1a
                                                                • CPU\uff1a2 \u6838
                                                                • \u5185\u5b58\uff1a4 GiB
                                                                • GPU\uff1a1 \u5757
                                                              "},{"location":"end-user/baize/jobs/mxnet.html#mxjob","title":"\u5b8c\u6574\u7684 MXJob \u914d\u7f6e\u793a\u4f8b","text":"

                                                              \u4ee5\u4e0b\u662f\u5355\u673a MXJob \u7684 YAML \u914d\u7f6e\uff1a

                                                              apiVersion: \"kubeflow.org/v1\"\nkind: \"MXJob\"\nmetadata:\n  name: \"mxnet-single-job\"\nspec:\n  jobMode: MXTrain\n  mxReplicaSpecs:\n    Worker:\n      replicas: 1\n      restartPolicy: Never\n      template:\n        spec:\n          containers:\n            - name: mxnet\n              image: release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest\n              command: [\"python3\"]\n              args:\n                [\n                  \"/mxnet/mxnet/example/gluon/mnist/mnist.py\",\n                  \"--epochs\",\n                  \"10\",\n                  \"--cuda\",\n                ]\n              ports:\n                - containerPort: 9991\n                  name: mxjob-port\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n

                                                              \u914d\u7f6e\u89e3\u6790\uff1a

                                                              • apiVersion \u548c kind\uff1a\u6307\u5b9a\u8d44\u6e90\u7684 API \u7248\u672c\u548c\u7c7b\u578b\uff0c\u8fd9\u91cc\u662f MXJob\u3002
                                                              • metadata\uff1a\u5143\u6570\u636e\uff0c\u5305\u62ec\u4efb\u52a1\u540d\u79f0\u7b49\u4fe1\u606f\u3002
                                                              • spec\uff1a\u4efb\u52a1\u7684\u8be6\u7ec6\u914d\u7f6e\u3002
                                                                • jobMode\uff1a\u8bbe\u7f6e\u4e3a MXTrain\uff0c\u8868\u793a\u8bad\u7ec3\u4efb\u52a1\u3002
                                                                • mxReplicaSpecs\uff1aMXNet \u4efb\u52a1\u7684\u526f\u672c\u914d\u7f6e\u3002
                                                                  • Worker\uff1a\u6307\u5b9a\u5de5\u4f5c\u8282\u70b9\u7684\u914d\u7f6e\u3002
                                                                    • replicas\uff1a\u526f\u672c\u6570\uff0c\u8fd9\u91cc\u4e3a 1\u3002
                                                                    • restartPolicy\uff1a\u91cd\u542f\u7b56\u7565\uff0c\u8bbe\u4e3a Never\uff0c\u8868\u793a\u4efb\u52a1\u5931\u8d25\u65f6\u4e0d\u91cd\u542f\u3002
                                                                    • template\uff1aPod \u6a21\u677f\uff0c\u5b9a\u4e49\u5bb9\u5668\u7684\u8fd0\u884c\u73af\u5883\u548c\u8d44\u6e90\u3002
                                                                      • containers\uff1a\u5bb9\u5668\u5217\u8868\u3002
                                                                        • name\uff1a\u5bb9\u5668\u540d\u79f0\u3002
                                                                        • image\uff1a\u4f7f\u7528\u7684\u955c\u50cf\u3002
                                                                        • command \u548c args\uff1a\u542f\u52a8\u547d\u4ee4\u548c\u53c2\u6570\u3002
                                                                        • ports\uff1a\u5bb9\u5668\u7aef\u53e3\u914d\u7f6e\u3002
                                                                        • resources\uff1a\u8d44\u6e90\u8bf7\u6c42\u548c\u9650\u5236\u3002
                                                              "},{"location":"end-user/baize/jobs/mxnet.html#_6","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

                                                              \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c MXNet \u5355\u673a\u4efb\u52a1\u3002

                                                              "},{"location":"end-user/baize/jobs/mxnet.html#_7","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

                                                              \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\u540e\uff0c\u60a8\u53ef\u4ee5\u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\u548c\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u3002\u4ece\u53f3\u4e0a\u89d2\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\uff0c\u53ef\u4ee5\u67e5\u770b\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                                                              \u793a\u4f8b\u8f93\u51fa\uff1a

                                                              Epoch 1: accuracy=0.95\nEpoch 2: accuracy=0.97\n...\nEpoch 10: accuracy=0.98\nTraining completed.\n

                                                              \u8fd9\u8868\u793a MXNet \u5355\u673a\u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0c\u6a21\u578b\u8bad\u7ec3\u5b8c\u6210\u3002

                                                              "},{"location":"end-user/baize/jobs/mxnet.html#mxnet_3","title":"MXNet \u5206\u5e03\u5f0f\u4efb\u52a1","text":"

                                                              \u5728\u5206\u5e03\u5f0f\u6a21\u5f0f\u4e0b\uff0cMXNet \u4efb\u52a1\u53ef\u4ee5\u4f7f\u7528\u591a\u53f0\u8ba1\u7b97\u8282\u70b9\u5171\u540c\u5b8c\u6210\u8bad\u7ec3\uff0c\u63d0\u9ad8\u8bad\u7ec3\u6548\u7387\u3002

                                                              "},{"location":"end-user/baize/jobs/mxnet.html#_8","title":"\u521b\u5efa\u6b65\u9aa4","text":"
                                                              1. \u767b\u5f55\u5e73\u53f0\uff1a\u540c\u4e0a\u3002
                                                              2. \u521b\u5efa\u4efb\u52a1\uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                                                              3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\uff1a\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a MXNet\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
                                                              4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f\uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cMXNet \u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u201d\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a\u3002
                                                              5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570\uff1a\u6839\u636e\u9700\u6c42\uff0c\u914d\u7f6e\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u3002
                                                              "},{"location":"end-user/baize/jobs/mxnet.html#_9","title":"\u8fd0\u884c\u53c2\u6570","text":"
                                                              • \u542f\u52a8\u547d\u4ee4\uff1apython3
                                                              • \u547d\u4ee4\u53c2\u6570\uff1a

                                                                /mxnet/mxnet/example/image-classification/train_mnist.py --num-epochs 10 --num-layers 2 --kv-store dist_device_sync --gpus 0\n

                                                                \u8bf4\u660e\uff1a

                                                                • /mxnet/mxnet/example/image-classification/train_mnist.py\uff1aMXNet \u63d0\u4f9b\u7684\u56fe\u50cf\u5206\u7c7b\u793a\u4f8b\u811a\u672c\u3002
                                                                • --num-epochs 10\uff1a\u8bad\u7ec3\u8f6e\u6570\u4e3a 10\u3002
                                                                • --num-layers 2\uff1a\u6a21\u578b\u7684\u5c42\u6570\u4e3a 2\u3002
                                                                • --kv-store dist_device_sync\uff1a\u4f7f\u7528\u5206\u5e03\u5f0f\u8bbe\u5907\u540c\u6b65\u6a21\u5f0f\u3002
                                                                • --gpus 0\uff1a\u4f7f\u7528 GPU \u8fdb\u884c\u52a0\u901f\u3002
                                                              "},{"location":"end-user/baize/jobs/mxnet.html#_10","title":"\u8d44\u6e90\u914d\u7f6e","text":"
                                                              • \u4efb\u52a1\u526f\u672c\u6570\uff1a3\uff08\u5305\u62ec Scheduler\u3001Server \u548c Worker\uff09
                                                              • \u5404\u89d2\u8272\u8d44\u6e90\u8bf7\u6c42\uff1a
                                                                • Scheduler\uff08\u8c03\u5ea6\u5668\uff09\uff1a
                                                                  • \u526f\u672c\u6570\uff1a1
                                                                  • \u8d44\u6e90\u8bf7\u6c42\uff1a
                                                                    • CPU\uff1a2 \u6838
                                                                    • \u5185\u5b58\uff1a4 GiB
                                                                    • GPU\uff1a1 \u5757
                                                                • Server\uff08\u53c2\u6570\u670d\u52a1\u5668\uff09\uff1a
                                                                  • \u526f\u672c\u6570\uff1a1
                                                                  • \u8d44\u6e90\u8bf7\u6c42\uff1a
                                                                    • CPU\uff1a2 \u6838
                                                                    • \u5185\u5b58\uff1a4 GiB
                                                                    • GPU\uff1a1 \u5757
                                                                • Worker\uff08\u5de5\u4f5c\u8282\u70b9\uff09\uff1a
                                                                  • \u526f\u672c\u6570\uff1a1
                                                                  • \u8d44\u6e90\u8bf7\u6c42\uff1a
                                                                    • CPU\uff1a2 \u6838
                                                                    • \u5185\u5b58\uff1a4 GiB
                                                                    • GPU\uff1a1 \u5757
                                                              "},{"location":"end-user/baize/jobs/mxnet.html#mxjob_1","title":"\u5b8c\u6574\u7684 MXJob \u914d\u7f6e\u793a\u4f8b","text":"

                                                              \u4ee5\u4e0b\u662f\u5206\u5e03\u5f0f MXJob \u7684 YAML \u914d\u7f6e\uff1a

                                                              apiVersion: \"kubeflow.org/v1\"\nkind: \"MXJob\"\nmetadata:\n  name: \"mxnet-job\"\nspec:\n  jobMode: MXTrain\n  mxReplicaSpecs:\n    Scheduler:\n      replicas: 1\n      restartPolicy: Never\n      template:\n        spec:\n          containers:\n            - name: mxnet\n              image: release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest\n              ports:\n                - containerPort: 9991\n                  name: mxjob-port\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n    Server:\n      replicas: 1\n      restartPolicy: Never\n      template:\n        spec:\n          containers:\n            - name: mxnet\n              image: release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest\n              ports:\n                - containerPort: 9991\n                  name: mxjob-port\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n    Worker:\n      replicas: 1\n      restartPolicy: Never\n      template:\n        spec:\n          containers:\n            - name: mxnet\n              image: release-ci.daocloud.io/baize/kubeflow/mxnet-gpu:latest\n              command: [\"python3\"]\n              args:\n                [\n                  \"/mxnet/mxnet/example/image-classification/train_mnist.py\",\n                  \"--num-epochs\",\n                  \"10\",\n                  \"--num-layers\",\n                  \"2\",\n                  \"--kv-store\",\n                  \"dist_device_sync\",\n                  \"--gpus\",\n                  \"0\",\n                ]\n              ports:\n                - containerPort: 9991\n                  name: mxjob-port\n              resources:\n                limits:\n                  cpu: \"2\"\n                  memory: 4Gi\n                  nvidia.com/gpu: 1\n                requests:\n                  cpu: \"2\"\n                  memory: 4Gi\n

                                                              \u914d\u7f6e\u89e3\u6790\uff1a

                                                              • Scheduler\uff08\u8c03\u5ea6\u5668\uff09\uff1a\u8d1f\u8d23\u534f\u8c03\u96c6\u7fa4\u4e2d\u5404\u8282\u70b9\u7684\u4efb\u52a1\u8c03\u5ea6\u3002
                                                              • Server\uff08\u53c2\u6570\u670d\u52a1\u5668\uff09\uff1a\u7528\u4e8e\u5b58\u50a8\u548c\u66f4\u65b0\u6a21\u578b\u53c2\u6570\uff0c\u5b9e\u73b0\u5206\u5e03\u5f0f\u53c2\u6570\u540c\u6b65\u3002
                                                              • Worker\uff08\u5de5\u4f5c\u8282\u70b9\uff09\uff1a\u5b9e\u9645\u6267\u884c\u8bad\u7ec3\u4efb\u52a1\u3002
                                                              • \u8d44\u6e90\u914d\u7f6e\uff1a\u4e3a\u5404\u89d2\u8272\u5206\u914d\u9002\u5f53\u7684\u8d44\u6e90\uff0c\u786e\u4fdd\u4efb\u52a1\u987a\u5229\u8fd0\u884c\u3002
                                                              "},{"location":"end-user/baize/jobs/mxnet.html#_11","title":"\u8bbe\u7f6e\u4efb\u52a1\u526f\u672c\u6570","text":"

                                                              \u5728\u521b\u5efa MXNet \u5206\u5e03\u5f0f\u4efb\u52a1\u65f6\uff0c\u9700\u8981\u6839\u636e mxReplicaSpecs \u4e2d\u914d\u7f6e\u7684\u526f\u672c\u6570\uff0c\u6b63\u786e\u8bbe\u7f6e \u4efb\u52a1\u526f\u672c\u6570\u3002

                                                              • \u603b\u526f\u672c\u6570 = Scheduler \u526f\u672c\u6570 + Server \u526f\u672c\u6570 + Worker \u526f\u672c\u6570
                                                              • \u672c\u793a\u4f8b\u4e2d\uff1a
                                                                • Scheduler \u526f\u672c\u6570\uff1a1
                                                                • Server \u526f\u672c\u6570\uff1a1
                                                                • Worker \u526f\u672c\u6570\uff1a1
                                                                • \u603b\u526f\u672c\u6570\uff1a1 + 1 + 1 = 3

                                                              \u56e0\u6b64\uff0c\u5728\u4efb\u52a1\u914d\u7f6e\u4e2d\uff0c\u9700\u8981\u5c06 \u4efb\u52a1\u526f\u672c\u6570 \u8bbe\u7f6e\u4e3a 3\u3002

                                                              "},{"location":"end-user/baize/jobs/mxnet.html#_12","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

                                                              \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c MXNet \u5206\u5e03\u5f0f\u4efb\u52a1\u3002

                                                              "},{"location":"end-user/baize/jobs/mxnet.html#_13","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

                                                              \u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u548c\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u3002\u60a8\u53ef\u4ee5\u67e5\u770b\u6bcf\u4e2a\u89d2\u8272\uff08Scheduler\u3001Server\u3001Worker\uff09\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                                                              \u793a\u4f8b\u8f93\u51fa\uff1a

                                                              INFO:root:Epoch[0] Batch [50]     Speed: 1000 samples/sec   accuracy=0.85\nINFO:root:Epoch[0] Batch [100]    Speed: 1200 samples/sec   accuracy=0.87\n...\nINFO:root:Epoch[9] Batch [100]    Speed: 1300 samples/sec   accuracy=0.98\nTraining completed.\n

                                                              \u8fd9\u8868\u793a MXNet \u5206\u5e03\u5f0f\u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0c\u6a21\u578b\u8bad\u7ec3\u5b8c\u6210\u3002

                                                              "},{"location":"end-user/baize/jobs/mxnet.html#_14","title":"\u5c0f\u7ed3","text":"

                                                              \u901a\u8fc7\u672c\u6559\u7a0b\uff0c\u60a8\u5b66\u4e60\u4e86\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c MXNet \u7684\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4efb\u52a1\u3002\u6211\u4eec\u8be6\u7ec6\u4ecb\u7ecd\u4e86 MXJob \u7684\u914d\u7f6e\u65b9\u5f0f\uff0c\u4ee5\u53ca\u5982\u4f55\u5728\u4efb\u52a1\u4e2d\u6307\u5b9a\u8fd0\u884c\u7684\u547d\u4ee4\u548c\u8d44\u6e90\u9700\u6c42\u3002\u5e0c\u671b\u672c\u6559\u7a0b\u5bf9\u60a8\u6709\u6240\u5e2e\u52a9\uff0c\u5982\u6709\u4efb\u4f55\u95ee\u9898\uff0c\u8bf7\u53c2\u8003\u5e73\u53f0\u63d0\u4f9b\u7684\u5176\u4ed6\u6587\u6863\u6216\u8054\u7cfb\u6280\u672f\u652f\u6301\u3002

                                                              "},{"location":"end-user/baize/jobs/mxnet.html#_15","title":"\u9644\u5f55","text":"
                                                              • \u6ce8\u610f\u4e8b\u9879\uff1a

                                                                • \u786e\u4fdd\u60a8\u4f7f\u7528\u7684\u955c\u50cf\u5305\u542b\u6240\u9700\u7684 MXNet \u7248\u672c\u548c\u4f9d\u8d56\u3002
                                                                • \u6839\u636e\u5b9e\u9645\u9700\u6c42\u8c03\u6574\u8d44\u6e90\u914d\u7f6e\uff0c\u907f\u514d\u8d44\u6e90\u4e0d\u8db3\u6216\u6d6a\u8d39\u3002
                                                                • \u5982\u9700\u4f7f\u7528\u81ea\u5b9a\u4e49\u7684\u8bad\u7ec3\u811a\u672c\uff0c\u8bf7\u4fee\u6539\u542f\u52a8\u547d\u4ee4\u548c\u53c2\u6570\u3002
                                                              • \u53c2\u8003\u6587\u6863\uff1a

                                                                • MXNet \u5b98\u65b9\u6587\u6863
                                                                • Kubeflow MXJob \u6307\u5357
                                                              "},{"location":"end-user/baize/jobs/paddle.html","title":"PaddlePaddle \u4efb\u52a1","text":"

                                                              PaddlePaddle\uff08\u98de\u6868\uff09\u662f\u767e\u5ea6\u5f00\u6e90\u7684\u6df1\u5ea6\u5b66\u4e60\u5e73\u53f0\uff0c\u652f\u6301\u4e30\u5bcc\u7684\u795e\u7ecf\u7f51\u7edc\u6a21\u578b\u548c\u5206\u5e03\u5f0f\u8bad\u7ec3\u65b9\u5f0f\u3002PaddlePaddle \u4efb\u52a1\u53ef\u4ee5\u901a\u8fc7\u5355\u673a\u6216\u5206\u5e03\u5f0f\u6a21\u5f0f\u8fdb\u884c\u8bad\u7ec3\u3002\u5728 AI Lab \u5e73\u53f0\u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86\u5bf9 PaddlePaddle \u4efb\u52a1\u7684\u652f\u6301\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c\u5feb\u901f\u521b\u5efa PaddlePaddle \u4efb\u52a1\uff0c\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\u3002

                                                              \u672c\u6559\u7a0b\u5c06\u6307\u5bfc\u60a8\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c PaddlePaddle \u7684\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4efb\u52a1\u3002

                                                              "},{"location":"end-user/baize/jobs/paddle.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
                                                              • \u4efb\u52a1\u7c7b\u578b\uff1aPaddlePaddle\uff0c\u652f\u6301\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4e24\u79cd\u6a21\u5f0f\u3002
                                                              • \u8fd0\u884c\u73af\u5883\uff1a\u9009\u62e9\u5305\u542b PaddlePaddle \u6846\u67b6\u7684\u955c\u50cf\uff0c\u6216\u5728\u4efb\u52a1\u4e2d\u5b89\u88c5\u5fc5\u8981\u7684\u4f9d\u8d56\u3002
                                                              "},{"location":"end-user/baize/jobs/paddle.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

                                                              \u6211\u4eec\u4f7f\u7528 registry.baidubce.com/paddlepaddle/paddle:2.4.0rc0-cpu \u955c\u50cf\u4f5c\u4e3a\u4efb\u52a1\u7684\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002\u8be5\u955c\u50cf\u9884\u88c5\u4e86 PaddlePaddle \u6846\u67b6\uff0c\u9002\u7528\u4e8e CPU \u8ba1\u7b97\u3002\u5982\u679c\u9700\u8981\u4f7f\u7528 GPU\uff0c\u8bf7\u9009\u62e9\u5bf9\u5e94\u7684 GPU \u7248\u672c\u955c\u50cf\u3002

                                                              \u6ce8\u610f\uff1a\u4e86\u89e3\u5982\u4f55\u521b\u5efa\u548c\u7ba1\u7406\u73af\u5883\uff0c\u8bf7\u53c2\u8003 \u73af\u5883\u5217\u8868\u3002

                                                              "},{"location":"end-user/baize/jobs/paddle.html#paddlepaddle_1","title":"\u521b\u5efa PaddlePaddle \u4efb\u52a1","text":""},{"location":"end-user/baize/jobs/paddle.html#paddlepaddle_2","title":"PaddlePaddle \u5355\u673a\u8bad\u7ec3\u4efb\u52a1","text":""},{"location":"end-user/baize/jobs/paddle.html#_3","title":"\u521b\u5efa\u6b65\u9aa4","text":"
                                                              1. \u767b\u5f55\u5e73\u53f0\uff1a\u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3\uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
                                                              2. \u521b\u5efa\u4efb\u52a1\uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                                                              3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\uff1a\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\uff0c\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a PaddlePaddle\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
                                                              4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f\uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cPaddlePaddle \u5355\u673a\u8bad\u7ec3\u4efb\u52a1\u201d\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a\u3002
                                                              5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570\uff1a\u6839\u636e\u60a8\u7684\u9700\u6c42\uff0c\u914d\u7f6e\u4efb\u52a1\u7684\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u4fe1\u606f\u3002
                                                              "},{"location":"end-user/baize/jobs/paddle.html#_4","title":"\u8fd0\u884c\u53c2\u6570","text":"
                                                              • \u542f\u52a8\u547d\u4ee4\uff1apython
                                                              • \u547d\u4ee4\u53c2\u6570\uff1a

                                                                -m paddle.distributed.launch run_check\n

                                                                \u8bf4\u660e\uff1a

                                                                • -m paddle.distributed.launch\uff1a\u4f7f\u7528 PaddlePaddle \u63d0\u4f9b\u7684\u5206\u5e03\u5f0f\u542f\u52a8\u6a21\u5757\uff0c\u5373\u4f7f\u5728\u5355\u673a\u6a21\u5f0f\u4e0b\u4e5f\u53ef\u4ee5\u4f7f\u7528\uff0c\u65b9\u4fbf\u5c06\u6765\u8fc1\u79fb\u5230\u5206\u5e03\u5f0f\u3002
                                                                • run_check\uff1aPaddlePaddle \u63d0\u4f9b\u7684\u6d4b\u8bd5\u811a\u672c\uff0c\u7528\u4e8e\u68c0\u67e5\u5206\u5e03\u5f0f\u73af\u5883\u662f\u5426\u6b63\u5e38\u3002
                                                              "},{"location":"end-user/baize/jobs/paddle.html#_5","title":"\u8d44\u6e90\u914d\u7f6e","text":"
                                                              • \u526f\u672c\u6570\uff1a1\uff08\u5355\u673a\u4efb\u52a1\uff09
                                                              • \u8d44\u6e90\u8bf7\u6c42\uff1a
                                                                • CPU\uff1a\u6839\u636e\u9700\u6c42\u8bbe\u7f6e\uff0c\u5efa\u8bae\u81f3\u5c11 1 \u6838
                                                                • \u5185\u5b58\uff1a\u6839\u636e\u9700\u6c42\u8bbe\u7f6e\uff0c\u5efa\u8bae\u81f3\u5c11 2 GiB
                                                                • GPU\uff1a\u5982\u679c\u9700\u8981\u4f7f\u7528 GPU\uff0c\u9009\u62e9 GPU \u7248\u672c\u7684\u955c\u50cf\uff0c\u5e76\u5206\u914d\u76f8\u5e94\u7684 GPU \u8d44\u6e90
                                                              "},{"location":"end-user/baize/jobs/paddle.html#paddlejob","title":"\u5b8c\u6574\u7684 PaddleJob \u914d\u7f6e\u793a\u4f8b","text":"

                                                              \u4ee5\u4e0b\u662f\u5355\u673a PaddleJob \u7684 YAML \u914d\u7f6e\uff1a

                                                              apiVersion: kubeflow.org/v1\nkind: PaddleJob\nmetadata:\n    name: paddle-simple-cpu\n    namespace: kubeflow\nspec:\n    paddleReplicaSpecs:\n        Worker:\n            replicas: 1\n            restartPolicy: OnFailure\n            template:\n                spec:\n                    containers:\n                        - name: paddle\n                          image: registry.baidubce.com/paddlepaddle/paddle:2.4.0rc0-cpu\n                          command:\n                              [\n                                  'python',\n                                  '-m',\n                                  'paddle.distributed.launch',\n                                  'run_check',\n                              ]\n

                                                              \u914d\u7f6e\u89e3\u6790\uff1a

                                                              • apiVersion \u548c kind\uff1a\u6307\u5b9a\u8d44\u6e90\u7684 API \u7248\u672c\u548c\u7c7b\u578b\uff0c\u8fd9\u91cc\u662f PaddleJob\u3002
                                                              • metadata\uff1a\u5143\u6570\u636e\uff0c\u5305\u62ec\u4efb\u52a1\u540d\u79f0\u548c\u547d\u540d\u7a7a\u95f4\u3002
                                                              • spec\uff1a\u4efb\u52a1\u7684\u8be6\u7ec6\u914d\u7f6e\u3002
                                                                • paddleReplicaSpecs\uff1aPaddlePaddle \u4efb\u52a1\u7684\u526f\u672c\u914d\u7f6e\u3002
                                                                  • Worker\uff1a\u6307\u5b9a\u5de5\u4f5c\u8282\u70b9\u7684\u914d\u7f6e\u3002
                                                                    • replicas\uff1a\u526f\u672c\u6570\uff0c\u8fd9\u91cc\u4e3a 1\uff0c\u8868\u793a\u5355\u673a\u8bad\u7ec3\u3002
                                                                    • restartPolicy\uff1a\u91cd\u542f\u7b56\u7565\uff0c\u8bbe\u4e3a OnFailure\uff0c\u8868\u793a\u4efb\u52a1\u5931\u8d25\u65f6\u81ea\u52a8\u91cd\u542f\u3002
                                                                    • template\uff1aPod \u6a21\u677f\uff0c\u5b9a\u4e49\u5bb9\u5668\u7684\u8fd0\u884c\u73af\u5883\u548c\u8d44\u6e90\u3002
                                                                      • containers\uff1a\u5bb9\u5668\u5217\u8868\u3002
                                                                        • name\uff1a\u5bb9\u5668\u540d\u79f0\u3002
                                                                        • image\uff1a\u4f7f\u7528\u7684\u955c\u50cf\u3002
                                                                        • command\uff1a\u542f\u52a8\u547d\u4ee4\u548c\u53c2\u6570\u3002
                                                              "},{"location":"end-user/baize/jobs/paddle.html#_6","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

                                                              \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c PaddlePaddle \u5355\u673a\u4efb\u52a1\u3002

                                                              "},{"location":"end-user/baize/jobs/paddle.html#_7","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

                                                              \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\u540e\uff0c\u60a8\u53ef\u4ee5\u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\u548c\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u3002\u4ece\u53f3\u4e0a\u89d2\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\uff0c\u53ef\u4ee5\u67e5\u770b\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                                                              \u793a\u4f8b\u8f93\u51fa\uff1a

                                                              run check success, PaddlePaddle is installed correctly on this node :)\n

                                                              \u8fd9\u8868\u793a PaddlePaddle \u5355\u673a\u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0c\u73af\u5883\u914d\u7f6e\u6b63\u5e38\u3002

                                                              "},{"location":"end-user/baize/jobs/paddle.html#paddlepaddle_3","title":"PaddlePaddle \u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1","text":"

                                                              \u5728\u5206\u5e03\u5f0f\u6a21\u5f0f\u4e0b\uff0cPaddlePaddle \u4efb\u52a1\u53ef\u4ee5\u4f7f\u7528\u591a\u53f0\u8ba1\u7b97\u8282\u70b9\u5171\u540c\u5b8c\u6210\u8bad\u7ec3\uff0c\u63d0\u9ad8\u8bad\u7ec3\u6548\u7387\u3002

                                                              "},{"location":"end-user/baize/jobs/paddle.html#_8","title":"\u521b\u5efa\u6b65\u9aa4","text":"
                                                              1. \u767b\u5f55\u5e73\u53f0\uff1a\u540c\u4e0a\u3002
                                                              2. \u521b\u5efa\u4efb\u52a1\uff1a\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                                                              3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\uff1a\u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a PaddlePaddle\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002
                                                              4. \u586b\u5199\u4efb\u52a1\u4fe1\u606f\uff1a\u586b\u5199\u4efb\u52a1\u540d\u79f0\u548c\u63cf\u8ff0\uff0c\u4f8b\u5982 \u201cPaddlePaddle \u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u201d\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a\u3002
                                                              5. \u914d\u7f6e\u4efb\u52a1\u53c2\u6570\uff1a\u6839\u636e\u9700\u6c42\uff0c\u914d\u7f6e\u8fd0\u884c\u53c2\u6570\u3001\u955c\u50cf\u3001\u8d44\u6e90\u7b49\u3002
                                                              "},{"location":"end-user/baize/jobs/paddle.html#_9","title":"\u8fd0\u884c\u53c2\u6570","text":"
                                                              • \u542f\u52a8\u547d\u4ee4\uff1apython
                                                              • \u547d\u4ee4\u53c2\u6570\uff1a

                                                                -m paddle.distributed.launch train.py --epochs=10\n

                                                                \u8bf4\u660e\uff1a

                                                                • -m paddle.distributed.launch\uff1a\u4f7f\u7528 PaddlePaddle \u63d0\u4f9b\u7684\u5206\u5e03\u5f0f\u542f\u52a8\u6a21\u5757\u3002
                                                                • train.py\uff1a\u60a8\u7684\u8bad\u7ec3\u811a\u672c\uff0c\u9700\u8981\u653e\u5728\u955c\u50cf\u4e2d\u6216\u6302\u8f7d\u5230\u5bb9\u5668\u5185\u3002
                                                                • --epochs=10\uff1a\u8bad\u7ec3\u7684\u8f6e\u6570\uff0c\u8fd9\u91cc\u8bbe\u7f6e\u4e3a 10\u3002
                                                              "},{"location":"end-user/baize/jobs/paddle.html#_10","title":"\u8d44\u6e90\u914d\u7f6e","text":"
                                                              • \u4efb\u52a1\u526f\u672c\u6570\uff1a\u6839\u636e Worker \u526f\u672c\u6570\u8bbe\u7f6e\uff0c\u8fd9\u91cc\u4e3a 2\u3002
                                                              • \u8d44\u6e90\u8bf7\u6c42\uff1a
                                                                • CPU\uff1a\u6839\u636e\u9700\u6c42\u8bbe\u7f6e\uff0c\u5efa\u8bae\u81f3\u5c11 1 \u6838
                                                                • \u5185\u5b58\uff1a\u6839\u636e\u9700\u6c42\u8bbe\u7f6e\uff0c\u5efa\u8bae\u81f3\u5c11 2 GiB
                                                                • GPU\uff1a\u5982\u679c\u9700\u8981\u4f7f\u7528 GPU\uff0c\u9009\u62e9 GPU \u7248\u672c\u7684\u955c\u50cf\uff0c\u5e76\u5206\u914d\u76f8\u5e94\u7684 GPU \u8d44\u6e90
                                                              "},{"location":"end-user/baize/jobs/paddle.html#paddlejob_1","title":"\u5b8c\u6574\u7684 PaddleJob \u914d\u7f6e\u793a\u4f8b","text":"

                                                              \u4ee5\u4e0b\u662f\u5206\u5e03\u5f0f PaddleJob \u7684 YAML \u914d\u7f6e\uff1a

                                                              apiVersion: kubeflow.org/v1\nkind: PaddleJob\nmetadata:\n    name: paddle-distributed-job\n    namespace: kubeflow\nspec:\n    paddleReplicaSpecs:\n        Worker:\n            replicas: 2\n            restartPolicy: OnFailure\n            template:\n                spec:\n                    containers:\n                        - name: paddle\n                          image: registry.baidubce.com/paddlepaddle/paddle:2.4.0rc0-cpu\n                          command:\n                              [\n                                  'python',\n                                  '-m',\n                                  'paddle.distributed.launch',\n                                  'train.py',\n                              ]\n                          args:\n                              - '--epochs=10'\n

                                                              \u914d\u7f6e\u89e3\u6790\uff1a

                                                              • Worker\uff1a
                                                                • replicas\uff1a\u526f\u672c\u6570\uff0c\u8bbe\u7f6e\u4e3a 2\uff0c\u8868\u793a\u4f7f\u7528 2 \u4e2a\u5de5\u4f5c\u8282\u70b9\u8fdb\u884c\u5206\u5e03\u5f0f\u8bad\u7ec3\u3002
                                                                • \u5176\u4ed6\u914d\u7f6e\u4e0e\u5355\u673a\u6a21\u5f0f\u7c7b\u4f3c\u3002
                                                              "},{"location":"end-user/baize/jobs/paddle.html#_11","title":"\u8bbe\u7f6e\u4efb\u52a1\u526f\u672c\u6570","text":"

                                                              \u5728\u521b\u5efa PaddlePaddle \u5206\u5e03\u5f0f\u4efb\u52a1\u65f6\uff0c\u9700\u8981\u6839\u636e paddleReplicaSpecs \u4e2d\u914d\u7f6e\u7684\u526f\u672c\u6570\uff0c\u6b63\u786e\u8bbe\u7f6e \u4efb\u52a1\u526f\u672c\u6570\u3002

                                                              • \u603b\u526f\u672c\u6570 = Worker \u526f\u672c\u6570
                                                              • \u672c\u793a\u4f8b\u4e2d\uff1a
                                                                • Worker \u526f\u672c\u6570\uff1a2
                                                                • \u603b\u526f\u672c\u6570\uff1a2

                                                              \u56e0\u6b64\uff0c\u5728\u4efb\u52a1\u914d\u7f6e\u4e2d\uff0c\u9700\u8981\u5c06 \u4efb\u52a1\u526f\u672c\u6570 \u8bbe\u7f6e\u4e3a 2\u3002

                                                              "},{"location":"end-user/baize/jobs/paddle.html#_12","title":"\u63d0\u4ea4\u4efb\u52a1","text":"

                                                              \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u63d0\u4ea4 \u6309\u94ae\uff0c\u5f00\u59cb\u8fd0\u884c PaddlePaddle \u5206\u5e03\u5f0f\u4efb\u52a1\u3002

                                                              "},{"location":"end-user/baize/jobs/paddle.html#_13","title":"\u67e5\u770b\u8fd0\u884c\u7ed3\u679c","text":"

                                                              \u8fdb\u5165 \u4efb\u52a1\u8be6\u60c5 \u9875\u9762\uff0c\u67e5\u770b\u4efb\u52a1\u7684\u8fd0\u884c\u72b6\u6001\u548c\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u3002\u60a8\u53ef\u4ee5\u67e5\u770b\u6bcf\u4e2a\u5de5\u4f5c\u8282\u70b9\u7684\u65e5\u5fd7\u8f93\u51fa\uff0c\u786e\u8ba4\u5206\u5e03\u5f0f\u8bad\u7ec3\u662f\u5426\u6b63\u5e38\u8fd0\u884c\u3002

                                                              \u793a\u4f8b\u8f93\u51fa\uff1a

                                                              Worker 0: Epoch 1, Batch 100, Loss 0.5\nWorker 1: Epoch 1, Batch 100, Loss 0.6\n...\nTraining completed.\n

                                                              \u8fd9\u8868\u793a PaddlePaddle \u5206\u5e03\u5f0f\u4efb\u52a1\u6210\u529f\u8fd0\u884c\uff0c\u6a21\u578b\u8bad\u7ec3\u5b8c\u6210\u3002

                                                              "},{"location":"end-user/baize/jobs/paddle.html#_14","title":"\u5c0f\u7ed3","text":"

                                                              \u901a\u8fc7\u672c\u6559\u7a0b\uff0c\u60a8\u5b66\u4e60\u4e86\u5982\u4f55\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u548c\u8fd0\u884c PaddlePaddle \u7684\u5355\u673a\u548c\u5206\u5e03\u5f0f\u4efb\u52a1\u3002\u6211\u4eec\u8be6\u7ec6\u4ecb\u7ecd\u4e86 PaddleJob \u7684\u914d\u7f6e\u65b9\u5f0f\uff0c\u4ee5\u53ca\u5982\u4f55\u5728\u4efb\u52a1\u4e2d\u6307\u5b9a\u8fd0\u884c\u7684\u547d\u4ee4\u548c\u8d44\u6e90\u9700\u6c42\u3002\u5e0c\u671b\u672c\u6559\u7a0b\u5bf9\u60a8\u6709\u6240\u5e2e\u52a9\uff0c\u5982\u6709\u4efb\u4f55\u95ee\u9898\uff0c\u8bf7\u53c2\u8003\u5e73\u53f0\u63d0\u4f9b\u7684\u5176\u4ed6\u6587\u6863\u6216\u8054\u7cfb\u6280\u672f\u652f\u6301\u3002

                                                              "},{"location":"end-user/baize/jobs/paddle.html#_15","title":"\u9644\u5f55","text":"
                                                              • \u6ce8\u610f\u4e8b\u9879\uff1a

                                                                • \u8bad\u7ec3\u811a\u672c\uff1a\u786e\u4fdd train.py\uff08\u6216\u5176\u4ed6\u8bad\u7ec3\u811a\u672c\uff09\u5728\u5bb9\u5668\u5185\u5b58\u5728\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u81ea\u5b9a\u4e49\u955c\u50cf\u3001\u6302\u8f7d\u6301\u4e45\u5316\u5b58\u50a8\u7b49\u65b9\u5f0f\u5c06\u811a\u672c\u653e\u5165\u5bb9\u5668\u3002
                                                                • \u955c\u50cf\u9009\u62e9\uff1a\u6839\u636e\u60a8\u7684\u9700\u6c42\u9009\u62e9\u5408\u9002\u7684\u955c\u50cf\uff0c\u4f8b\u5982\u4f7f\u7528 GPU \u65f6\u9009\u62e9 paddle:2.4.0rc0-gpu \u7b49\u3002
                                                                • \u53c2\u6570\u8c03\u6574\uff1a\u53ef\u4ee5\u901a\u8fc7\u4fee\u6539 command \u548c args \u6765\u4f20\u9012\u4e0d\u540c\u7684\u8bad\u7ec3\u53c2\u6570\u3002
                                                              • \u53c2\u8003\u6587\u6863\uff1a

                                                                • PaddlePaddle \u5b98\u65b9\u6587\u6863
                                                                • Kubeflow PaddleJob \u6307\u5357
                                                              "},{"location":"end-user/baize/jobs/pytorch.html","title":"Pytorch \u4efb\u52a1","text":"

                                                              Pytorch \u662f\u4e00\u4e2a\u5f00\u6e90\u7684\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\uff0c\u5b83\u63d0\u4f9b\u4e86\u4e00\u4e2a\u7075\u6d3b\u7684\u8bad\u7ec3\u548c\u90e8\u7f72\u73af\u5883\u3002 Pytorch \u4efb\u52a1\u662f\u4e00\u4e2a\u4f7f\u7528 Pytorch \u6846\u67b6\u7684\u4efb\u52a1\u3002

                                                              \u5728 AI Lab \u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86 Pytorch \u4efb\u52a1\u652f\u6301\u548c\u9002\u914d\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c \u5feb\u901f\u521b\u5efa Pytorch \u4efb\u52a1\uff0c\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\u3002

                                                              "},{"location":"end-user/baize/jobs/pytorch.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
                                                              • \u4efb\u52a1\u7c7b\u578b\u540c\u65f6\u652f\u6301 Pytorch \u5355\u673a \u548c Pytorch \u5206\u5e03\u5f0f \u4e24\u79cd\u6a21\u5f0f\u3002
                                                              • \u8fd0\u884c\u955c\u50cf\u5185\u5df2\u7ecf\u9ed8\u8ba4\u652f\u6301 Pytorch \u6846\u67b6\uff0c\u65e0\u9700\u989d\u5916\u5b89\u88c5\u3002
                                                              "},{"location":"end-user/baize/jobs/pytorch.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

                                                              \u5728\u8fd9\u91cc\u6211\u4eec\u4f7f\u7528 baize-notebook \u57fa\u7840\u955c\u50cf \u548c \u5173\u8054\u73af\u5883 \u7684\u65b9\u5f0f\u6765\u4f5c\u4e3a\u4efb\u52a1\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002

                                                              \u4e86\u89e3\u5982\u4f55\u521b\u5efa\u73af\u5883\uff0c\u8bf7\u53c2\u8003\u73af\u5883\u5217\u8868\u3002

                                                              "},{"location":"end-user/baize/jobs/pytorch.html#_3","title":"\u521b\u5efa\u4efb\u52a1","text":""},{"location":"end-user/baize/jobs/pytorch.html#pytorch_1","title":"Pytorch \u5355\u673a\u4efb\u52a1","text":"
                                                              1. \u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3 \uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
                                                              2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                                                              3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a Pytorch \u5355\u673a\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002
                                                              4. \u586b\u5199\u4efb\u52a1\u540d\u79f0\u3001\u63cf\u8ff0\u540e\u70b9\u51fb \u786e\u5b9a \u3002
                                                              "},{"location":"end-user/baize/jobs/pytorch.html#_4","title":"\u8fd0\u884c\u53c2\u6570","text":"
                                                              • \u542f\u52a8\u547d\u4ee4 \u4f7f\u7528 bash
                                                              • \u547d\u4ee4\u53c2\u6570\u4f7f\u7528
                                                              import torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n# \u5b9a\u4e49\u4e00\u4e2a\u7b80\u5355\u7684\u795e\u7ecf\u7f51\u7edc\nclass SimpleNet(nn.Module):\n    def __init__(self):\n        super(SimpleNet, self).__init__()\n        self.fc = nn.Linear(10, 1)\n\n    def forward(self, x):\n        return self.fc(x)\n\n# \u521b\u5efa\u6a21\u578b\u3001\u635f\u5931\u51fd\u6570\u548c\u4f18\u5316\u5668\nmodel = SimpleNet()\ncriterion = nn.MSELoss()\noptimizer = optim.SGD(model.parameters(), lr=0.01)\n\n# \u751f\u6210\u4e00\u4e9b\u968f\u673a\u6570\u636e\nx = torch.randn(100, 10)\ny = torch.randn(100, 1)\n\n# \u8bad\u7ec3\u6a21\u578b\nfor epoch in range(100):\n    # \u524d\u5411\u4f20\u64ad\n    outputs = model(x)\n    loss = criterion(outputs, y)\n\n    # \u53cd\u5411\u4f20\u64ad\u548c\u4f18\u5316\n    optimizer.zero_grad()\n    loss.backward()\n    optimizer.step()\n\n    if (epoch + 1) % 10 == 0:\n        print(f'Epoch [{epoch+1}/100], Loss: {loss.item():.4f}')\n\nprint('Training finished.')\n
                                                              "},{"location":"end-user/baize/jobs/pytorch.html#_5","title":"\u8fd0\u884c\u7ed3\u679c","text":"

                                                              \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\uff0c\u6211\u4eec\u53ef\u4ee5\u8fdb\u5165\u4efb\u52a1\u8be6\u60c5\u67e5\u770b\u5230\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\uff0c\u4ece\u53f3\u4e0a\u89d2\u53bb\u5f80 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5 \uff0c\u53ef\u4ee5\u67e5\u770b\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u7684\u65e5\u5fd7\u8f93\u51fa

                                                              [HAMI-core Warn(1:140244541377408:utils.c:183)]: get default cuda from (null)\n[HAMI-core Msg(1:140244541377408:libvgpu.c:855)]: Initialized\nEpoch [10/100], Loss: 1.1248\nEpoch [20/100], Loss: 1.0486\nEpoch [30/100], Loss: 0.9969\nEpoch [40/100], Loss: 0.9611\nEpoch [50/100], Loss: 0.9360\nEpoch [60/100], Loss: 0.9182\nEpoch [70/100], Loss: 0.9053\nEpoch [80/100], Loss: 0.8960\nEpoch [90/100], Loss: 0.8891\nEpoch [100/100], Loss: 0.8841\nTraining finished.\n[HAMI-core Msg(1:140244541377408:multiprocess_memory_limit.c:468)]: Calling exit handler 1\n
                                                              "},{"location":"end-user/baize/jobs/pytorch.html#pytorch_2","title":"Pytorch \u5206\u5e03\u5f0f\u4efb\u52a1","text":"
                                                              1. \u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3 \uff0c\u8fdb\u5165 \u4efb\u52a1\u5217\u8868 \u9875\u9762\u3002
                                                              2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                                                              3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a Pytorch \u5206\u5e03\u5f0f\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002
                                                              4. \u586b\u5199\u4efb\u52a1\u540d\u79f0\u3001\u63cf\u8ff0\u540e\u70b9\u51fb \u786e\u5b9a \u3002
                                                              "},{"location":"end-user/baize/jobs/pytorch.html#_6","title":"\u8fd0\u884c\u53c2\u6570","text":"
                                                              • \u542f\u52a8\u547d\u4ee4 \u4f7f\u7528 bash
                                                              • \u547d\u4ee4\u53c2\u6570\u4f7f\u7528
                                                              import os\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nclass SimpleModel(nn.Module):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = nn.Linear(10, 1)\n\n    def forward(self, x):\n        return self.fc(x)\n\ndef train():\n    # \u6253\u5370\u73af\u5883\u4fe1\u606f\n    print(f'PyTorch version: {torch.__version__}')\n    print(f'CUDA available: {torch.cuda.is_available()}')\n    if torch.cuda.is_available():\n        print(f'CUDA version: {torch.version.cuda}')\n        print(f'CUDA device count: {torch.cuda.device_count()}')\n\n    rank = int(os.environ.get('RANK', '0'))\n    world_size = int(os.environ.get('WORLD_SIZE', '1'))\n\n    print(f'Rank: {rank}, World Size: {world_size}')\n\n    # \u521d\u59cb\u5316\u5206\u5e03\u5f0f\u73af\u5883\n    try:\n        if world_size > 1:\n            dist.init_process_group('nccl')\n            print('Distributed process group initialized successfully')\n        else:\n            print('Running in non-distributed mode')\n    except Exception as e:\n        print(f'Error initializing process group: {e}')\n        return\n\n    # \u8bbe\u7f6e\u8bbe\u5907\n    try:\n        if torch.cuda.is_available():\n            device = torch.device(f'cuda:{rank % torch.cuda.device_count()}')\n            print(f'Using CUDA device: {device}')\n        else:\n            device = torch.device('cpu')\n            print('CUDA not available, using CPU')\n    except Exception as e:\n        print(f'Error setting device: {e}')\n        device = torch.device('cpu')\n        print('Falling back to CPU')\n\n    try:\n        model = SimpleModel().to(device)\n        print('Model moved to device successfully')\n    except Exception as e:\n        print(f'Error moving model to device: {e}')\n        return\n\n    try:\n        if world_size > 1:\n            ddp_model = DDP(model, device_ids=[rank % torch.cuda.device_count()] if torch.cuda.is_available() else None)\n            print('DDP model created successfully')\n        else:\n            ddp_model = model\n            print('Using non-distributed model')\n    except Exception as e:\n        print(f'Error creating DDP model: {e}')\n        return\n\n    loss_fn = nn.MSELoss()\n    optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)\n\n    # \u751f\u6210\u4e00\u4e9b\u968f\u673a\u6570\u636e\n    try:\n        data = torch.randn(100, 10, device=device)\n        labels = torch.randn(100, 1, device=device)\n        print('Data generated and moved to device successfully')\n    except Exception as e:\n        print(f'Error generating or moving data to device: {e}')\n        return\n\n    for epoch in range(10):\n        try:\n            ddp_model.train()\n            outputs = ddp_model(data)\n            loss = loss_fn(outputs, labels)\n            optimizer.zero_grad()\n            loss.backward()\n            optimizer.step()\n\n            if rank == 0:\n                print(f'Epoch {epoch}, Loss: {loss.item():.4f}')\n        except Exception as e:\n            print(f'Error during training epoch {epoch}: {e}')\n            break\n\n    if world_size > 1:\n        dist.destroy_process_group()\n\nif __name__ == '__main__':\n    train()\n
                                                              "},{"location":"end-user/baize/jobs/pytorch.html#_7","title":"\u4efb\u52a1\u526f\u672c\u6570","text":"

                                                              \u6ce8\u610f Pytorch \u5206\u5e03\u5f0f \u8bad\u7ec3\u4efb\u52a1\u4f1a\u521b\u5efa\u4e00\u7ec4 Master \u548c Worker \u7684\u8bad\u7ec3 Pod\uff0c Master \u8d1f\u8d23\u534f\u8c03\u8bad\u7ec3\u4efb\u52a1\uff0cWorker \u8d1f\u8d23\u5b9e\u9645\u7684\u8bad\u7ec3\u5de5\u4f5c\u3002

                                                              Note

                                                              \u672c\u6b21\u6f14\u793a\u4e2d\uff1aMaster \u526f\u672c\u6570\u4e3a 1\uff0cWorker \u526f\u672c\u6570\u4e3a 2\uff1b \u6240\u4ee5\u6211\u4eec\u9700\u8981\u5728 \u4efb\u52a1\u914d\u7f6e \u4e2d\u8bbe\u7f6e\u526f\u672c\u6570\u4e3a 3\uff0c\u5373 Master \u526f\u672c\u6570 + Worker \u526f\u672c\u6570\u3002 Pytorch \u4f1a\u81ea\u52a8\u8c03\u8c10 Master \u548c Worker \u7684\u89d2\u8272\u3002

                                                              "},{"location":"end-user/baize/jobs/pytorch.html#_8","title":"\u8fd0\u884c\u7ed3\u679c","text":"

                                                              \u540c\u6837\uff0c\u6211\u4eec\u53ef\u4ee5\u8fdb\u5165\u4efb\u52a1\u8be6\u60c5\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\uff0c\u4ee5\u53ca\u6bcf\u4e2a Pod \u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                                                              "},{"location":"end-user/baize/jobs/tensorboard.html","title":"\u4efb\u52a1\u5206\u6790\u4ecb\u7ecd","text":"

                                                              \u5728 AI Lab \u6a21\u5757\u4e2d\uff0c\u63d0\u4f9b\u4e86\u6a21\u578b\u5f00\u53d1\u8fc7\u7a0b\u91cd\u8981\u7684\u53ef\u89c6\u5316\u5206\u6790\u5de5\u5177\uff0c\u7528\u4e8e\u5c55\u793a\u673a\u5668\u5b66\u4e60\u6a21\u578b\u7684\u8bad\u7ec3\u8fc7\u7a0b\u548c\u7ed3\u679c\u3002 \u672c\u6587\u5c06\u4ecb\u7ecd \u4efb\u52a1\u5206\u6790\uff08Tensorboard\uff09\u7684\u57fa\u672c\u6982\u5ff5\u3001\u5728 AI Lab \u7cfb\u7edf\u4e2d\u7684\u4f7f\u7528\u65b9\u6cd5\uff0c\u4ee5\u53ca\u5982\u4f55\u914d\u7f6e\u6570\u636e\u96c6\u7684\u65e5\u5fd7\u5185\u5bb9\u3002

                                                              Note

                                                              Tensorboard \u662f TensorFlow \u63d0\u4f9b\u7684\u4e00\u4e2a\u53ef\u89c6\u5316\u5de5\u5177\uff0c\u7528\u4e8e\u5c55\u793a\u673a\u5668\u5b66\u4e60\u6a21\u578b\u7684\u8bad\u7ec3\u8fc7\u7a0b\u548c\u7ed3\u679c\u3002 \u5b83\u53ef\u4ee5\u5e2e\u52a9\u5f00\u53d1\u8005\u66f4\u76f4\u89c2\u5730\u7406\u89e3\u6a21\u578b\u7684\u8bad\u7ec3\u52a8\u6001\uff0c\u5206\u6790\u6a21\u578b\u6027\u80fd\uff0c\u8c03\u8bd5\u6a21\u578b\u95ee\u9898\u7b49\u3002

                                                              Tensorboard \u5728\u6a21\u578b\u5f00\u53d1\u8fc7\u7a0b\u4e2d\u7684\u4f5c\u7528\u53ca\u4f18\u52bf\uff1a

                                                              • \u53ef\u89c6\u5316\u8bad\u7ec3\u8fc7\u7a0b\uff1a\u901a\u8fc7\u56fe\u8868\u5c55\u793a\u8bad\u7ec3\u548c\u9a8c\u8bc1\u7684\u635f\u5931\u3001\u7cbe\u5ea6\u7b49\u6307\u6807\uff0c\u5e2e\u52a9\u5f00\u53d1\u8005\u76f4\u89c2\u5730\u89c2\u5bdf\u6a21\u578b\u7684\u8bad\u7ec3\u6548\u679c\u3002
                                                              • \u8c03\u8bd5\u548c\u4f18\u5316\u6a21\u578b\uff1a\u901a\u8fc7\u67e5\u770b\u4e0d\u540c\u5c42\u7684\u6743\u91cd\u3001\u68af\u5ea6\u5206\u5e03\u7b49\uff0c\u5e2e\u52a9\u5f00\u53d1\u8005\u53d1\u73b0\u548c\u4fee\u6b63\u6a21\u578b\u4e2d\u7684\u95ee\u9898\u3002
                                                              • \u5bf9\u6bd4\u4e0d\u540c\u5b9e\u9a8c\uff1a\u53ef\u4ee5\u540c\u65f6\u5c55\u793a\u591a\u4e2a\u5b9e\u9a8c\u7684\u7ed3\u679c\uff0c\u65b9\u4fbf\u5f00\u53d1\u8005\u5bf9\u6bd4\u4e0d\u540c\u6a21\u578b\u548c\u8d85\u53c2\u6570\u914d\u7f6e\u7684\u6548\u679c\u3002
                                                              • \u8ffd\u8e2a\u8bad\u7ec3\u6570\u636e\uff1a\u8bb0\u5f55\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u4f7f\u7528\u7684\u6570\u636e\u96c6\u548c\u53c2\u6570\uff0c\u786e\u4fdd\u5b9e\u9a8c\u7684\u53ef\u590d\u73b0\u6027\u3002
                                                              "},{"location":"end-user/baize/jobs/tensorboard.html#tensorboard","title":"\u5982\u4f55\u521b\u5efa Tensorboard","text":"

                                                              \u5728 AI Lab \u7cfb\u7edf\u4e2d\uff0c\u6211\u4eec\u63d0\u4f9b\u4e86\u4fbf\u6377\u7684\u65b9\u5f0f\u6765\u521b\u5efa\u548c\u7ba1\u7406 Tensorboard\u3002\u4ee5\u4e0b\u662f\u5177\u4f53\u6b65\u9aa4\uff1a

                                                              "},{"location":"end-user/baize/jobs/tensorboard.html#notebook-tensorboard","title":"\u5728\u521b\u5efa\u65f6 Notebook \u542f\u7528 Tensorboard","text":"
                                                              1. \u521b\u5efa Notebook\uff1a\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u4e00\u4e2a\u65b0\u7684 Notebook\u3002
                                                              2. \u542f\u7528 Tensorboard\uff1a\u5728\u521b\u5efa Notebook \u7684\u9875\u9762\u4e2d\uff0c\u542f\u7528 Tensorboard \u9009\u9879\uff0c\u5e76\u6307\u5b9a\u6570\u636e\u96c6\u548c\u65e5\u5fd7\u8def\u5f84\u3002

                                                              "},{"location":"end-user/baize/jobs/tensorboard.html#tensorboard_1","title":"\u5728\u5206\u5e03\u5f0f\u4efb\u52a1\u521b\u5efa\u53ca\u5b8c\u6210\u540e\u542f\u7528 Tensorboard","text":"
                                                              1. \u521b\u5efa\u5206\u5e03\u5f0f\u4efb\u52a1\uff1a\u5728 AI Lab \u5e73\u53f0\u4e0a\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u5206\u5e03\u5f0f\u8bad\u7ec3\u4efb\u52a1\u3002
                                                              2. \u914d\u7f6e Tensorboard\uff1a\u5728\u4efb\u52a1\u914d\u7f6e\u9875\u9762\u4e2d\uff0c\u542f\u7528 Tensorboard \u9009\u9879\uff0c\u5e76\u6307\u5b9a\u6570\u636e\u96c6\u548c\u65e5\u5fd7\u8def\u5f84\u3002
                                                              3. \u4efb\u52a1\u5b8c\u6210\u540e\u67e5\u770b Tensorboard\uff1a\u4efb\u52a1\u5b8c\u6210\u540e\uff0c\u53ef\u4ee5\u5728\u4efb\u52a1\u8be6\u60c5\u9875\u9762\u4e2d\u67e5\u770b Tensorboard \u7684\u94fe\u63a5\uff0c\u70b9\u51fb\u94fe\u63a5\u5373\u53ef\u67e5\u770b\u8bad\u7ec3\u8fc7\u7a0b\u7684\u53ef\u89c6\u5316\u7ed3\u679c\u3002

                                                              "},{"location":"end-user/baize/jobs/tensorboard.html#notebook-tensorboard_1","title":"\u5728 Notebook \u4e2d\u76f4\u63a5\u5f15\u7528 Tensorboard","text":"

                                                              \u5728 Notebook \u4e2d\uff0c\u53ef\u4ee5\u901a\u8fc7\u4ee3\u7801\u76f4\u63a5\u542f\u52a8 Tensorboard\u3002\u4ee5\u4e0b\u662f\u4e00\u4e2a\u793a\u4f8b\u4ee3\u7801\uff1a

                                                              # \u5bfc\u5165\u5fc5\u8981\u7684\u5e93\nimport tensorflow as tf\nimport datetime\n\n# \u5b9a\u4e49\u65e5\u5fd7\u76ee\u5f55\nlog_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n# \u521b\u5efa Tensorboard \u56de\u8c03\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n\n# \u6784\u5efa\u5e76\u7f16\u8bd1\u6a21\u578b\nmodel = tf.keras.models.Sequential([\n    tf.keras.layers.Flatten(input_shape=(28, 28)),\n    tf.keras.layers.Dense(512, activation='relu'),\n    tf.keras.layers.Dropout(0.2),\n    tf.keras.layers.Dense(10, activation='softmax')\n])\n\nmodel.compile(optimizer='adam',\n              loss='sparse_categorical_crossentropy',\n              metrics=['accuracy'])\n\n# \u8bad\u7ec3\u6a21\u578b\u5e76\u542f\u7528 Tensorboard \u56de\u8c03\nmodel.fit(x_train, y_train, epochs=5, validation_data=(x_test, y_test), callbacks=[tensorboard_callback])\n
                                                              "},{"location":"end-user/baize/jobs/tensorboard.html#_2","title":"\u5982\u4f55\u914d\u7f6e\u6570\u636e\u96c6\u7684\u65e5\u5fd7\u5185\u5bb9","text":"

                                                              \u5728\u4f7f\u7528 Tensorboard \u65f6\uff0c\u53ef\u4ee5\u8bb0\u5f55\u548c\u914d\u7f6e\u4e0d\u540c\u7684\u6570\u636e\u96c6\u548c\u65e5\u5fd7\u5185\u5bb9\u3002\u4ee5\u4e0b\u662f\u4e00\u4e9b\u5e38\u89c1\u7684\u914d\u7f6e\u65b9\u5f0f\uff1a

                                                              "},{"location":"end-user/baize/jobs/tensorboard.html#_3","title":"\u914d\u7f6e\u8bad\u7ec3\u548c\u9a8c\u8bc1\u6570\u636e\u96c6\u7684\u65e5\u5fd7","text":"

                                                              \u5728\u8bad\u7ec3\u6a21\u578b\u65f6\uff0c\u53ef\u4ee5\u901a\u8fc7 TensorFlow \u7684 tf.summary API \u6765\u8bb0\u5f55\u8bad\u7ec3\u548c\u9a8c\u8bc1\u6570\u636e\u96c6\u7684\u65e5\u5fd7\u3002\u4ee5\u4e0b\u662f\u4e00\u4e2a\u793a\u4f8b\u4ee3\u7801\uff1a

                                                              # \u5bfc\u5165\u5fc5\u8981\u7684\u5e93\nimport tensorflow as tf\n\n# \u521b\u5efa\u65e5\u5fd7\u76ee\u5f55\ntrain_log_dir = 'logs/gradient_tape/train'\nval_log_dir = 'logs/gradient_tape/val'\ntrain_summary_writer = tf.summary.create_file_writer(train_log_dir)\nval_summary_writer = tf.summary.create_file_writer(val_log_dir)\n\n# \u8bad\u7ec3\u6a21\u578b\u5e76\u8bb0\u5f55\u65e5\u5fd7\nfor epoch in range(EPOCHS):\n    for (x_train, y_train) in train_dataset:\n        # \u8bad\u7ec3\u6b65\u9aa4\n        train_step(x_train, y_train)\n        with train_summary_writer.as_default():\n            tf.summary.scalar('loss', train_loss.result(), step=epoch)\n            tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)\n\n    for (x_val, y_val) in val_dataset:\n        # \u9a8c\u8bc1\u6b65\u9aa4\n        val_step(x_val, y_val)\n        with val_summary_writer.as_default():\n            tf.summary.scalar('loss', val_loss.result(), step=epoch)\n            tf.summary.scalar('accuracy', val_accuracy.result(), step=epoch)\n
                                                              "},{"location":"end-user/baize/jobs/tensorboard.html#_4","title":"\u914d\u7f6e\u81ea\u5b9a\u4e49\u65e5\u5fd7","text":"

                                                              \u9664\u4e86\u8bad\u7ec3\u548c\u9a8c\u8bc1\u6570\u636e\u96c6\u7684\u65e5\u5fd7\u5916\uff0c\u8fd8\u53ef\u4ee5\u8bb0\u5f55\u5176\u4ed6\u81ea\u5b9a\u4e49\u7684\u65e5\u5fd7\u5185\u5bb9\uff0c\u4f8b\u5982\u5b66\u4e60\u7387\u3001\u68af\u5ea6\u5206\u5e03\u7b49\u3002\u4ee5\u4e0b\u662f\u4e00\u4e2a\u793a\u4f8b\u4ee3\u7801\uff1a

                                                              # \u8bb0\u5f55\u81ea\u5b9a\u4e49\u65e5\u5fd7\nwith train_summary_writer.as_default():\n    tf.summary.scalar('learning_rate', learning_rate, step=epoch)\n    tf.summary.histogram('gradients', gradients, step=epoch)\n
                                                              "},{"location":"end-user/baize/jobs/tensorboard.html#tensorboard_2","title":"Tensorboard \u7ba1\u7406","text":"

                                                              \u5728 AI Lab \u4e2d\uff0c\u901a\u8fc7\u5404\u79cd\u65b9\u5f0f\u521b\u5efa\u51fa\u6765\u7684 Tensorboard \u4f1a\u7edf\u4e00\u5c55\u793a\u5728\u4efb\u52a1\u5206\u6790\u7684\u9875\u9762\u4e2d\uff0c\u65b9\u4fbf\u7528\u6237\u67e5\u770b\u548c\u7ba1\u7406\u3002

                                                              \u7528\u6237\u53ef\u4ee5\u5728\u4efb\u52a1\u5206\u6790\u9875\u9762\u4e2d\u67e5\u770b Tensorboard \u7684\u94fe\u63a5\u3001\u72b6\u6001\u3001\u521b\u5efa\u65f6\u95f4\u7b49\u4fe1\u606f\uff0c\u5e76\u901a\u8fc7\u94fe\u63a5\u76f4\u63a5\u8bbf\u95ee Tensorboard \u7684\u53ef\u89c6\u5316\u7ed3\u679c\u3002

                                                              "},{"location":"end-user/baize/jobs/tensorflow.html","title":"Tensorflow \u4efb\u52a1","text":"

                                                              Tensorflow \u662f\u9664\u4e86 Pytorch \u53e6\u5916\u4e00\u4e2a\u975e\u5e38\u6d3b\u8dc3\u7684\u5f00\u6e90\u7684\u6df1\u5ea6\u5b66\u4e60\u6846\u67b6\uff0c\u5b83\u63d0\u4f9b\u4e86\u4e00\u4e2a\u7075\u6d3b\u7684\u8bad\u7ec3\u548c\u90e8\u7f72\u73af\u5883\u3002

                                                              \u5728 AI Lab \u4e2d\uff0c\u6211\u4eec\u540c\u6837\u63d0\u4f9b\u4e86 Tensorflow \u6846\u67b6\u7684\u652f\u6301\u548c\u9002\u914d\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u754c\u9762\u5316\u64cd\u4f5c\uff0c\u5feb\u901f\u521b\u5efa Tensorflow \u4efb\u52a1\uff0c\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\u3002

                                                              "},{"location":"end-user/baize/jobs/tensorflow.html#_1","title":"\u4efb\u52a1\u914d\u7f6e\u4ecb\u7ecd","text":"
                                                              • \u4efb\u52a1\u7c7b\u578b\u540c\u65f6\u652f\u6301 Tensorflow \u5355\u673a \u548c Tensorflow \u5206\u5e03\u5f0f \u4e24\u79cd\u6a21\u5f0f\u3002
                                                              • \u8fd0\u884c\u955c\u50cf\u5185\u5df2\u7ecf\u9ed8\u8ba4\u652f\u6301 Tensorflow \u6846\u67b6\uff0c\u65e0\u9700\u989d\u5916\u5b89\u88c5\u3002
                                                              "},{"location":"end-user/baize/jobs/tensorflow.html#_2","title":"\u4efb\u52a1\u8fd0\u884c\u73af\u5883","text":"

                                                              \u5728\u8fd9\u91cc\u6211\u4eec\u4f7f\u7528 baize-notebook \u57fa\u7840\u955c\u50cf \u548c \u5173\u8054\u73af\u5883 \u7684\u65b9\u5f0f\u6765\u4f5c\u4e3a\u4efb\u52a1\u57fa\u7840\u8fd0\u884c\u73af\u5883\u3002

                                                              \u4e86\u89e3\u5982\u4f55\u521b\u5efa\u73af\u5883\uff0c\u8bf7\u53c2\u8003\u73af\u5883\u5217\u8868\u3002

                                                              "},{"location":"end-user/baize/jobs/tensorflow.html#_3","title":"\u521b\u5efa\u4efb\u52a1","text":""},{"location":"end-user/baize/jobs/tensorflow.html#tfjob","title":"\u793a\u4f8b TFJob \u5355\u673a\u4efb\u52a1","text":"
                                                              1. \u767b\u5f55 AI Lab \u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3 \uff0c\u8fdb\u5165 \u8bad\u7ec3\u4efb\u52a1 \u9875\u9762\u3002
                                                              2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                                                              3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a Tensorflow \u5355\u673a\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002
                                                              4. \u586b\u5199\u4efb\u52a1\u540d\u79f0\u3001\u63cf\u8ff0\u540e\u70b9\u51fb \u786e\u5b9a \u3002
                                                              "},{"location":"end-user/baize/jobs/tensorflow.html#_4","title":"\u63d0\u524d\u9884\u70ed\u4ee3\u7801\u4ed3\u5e93","text":"

                                                              \u4f7f\u7528 AI Lab -> \u6570\u636e\u96c6\u5217\u8868 \uff0c\u521b\u5efa\u4e00\u4e2a\u6570\u636e\u96c6\uff0c\u5e76\u5c06\u8fdc\u7aef Github \u7684\u4ee3\u7801\u62c9\u53d6\u5230\u6570\u636e\u96c6\u4e2d\uff0c \u8fd9\u6837\u5728\u521b\u5efa\u4efb\u52a1\u65f6\uff0c\u53ef\u4ee5\u76f4\u63a5\u9009\u62e9\u6570\u636e\u96c6\uff0c\u5c06\u4ee3\u7801\u6302\u8f7d\u5230\u4efb\u52a1\u4e2d\u3002

                                                              \u6f14\u793a\u4ee3\u7801\u4ed3\u5e93\u5730\u5740\uff1ahttps://github.com/d-run/training-sample-code/

                                                              "},{"location":"end-user/baize/jobs/tensorflow.html#_5","title":"\u8fd0\u884c\u53c2\u6570","text":"
                                                              • \u542f\u52a8\u547d\u4ee4 \u4f7f\u7528 bash
                                                              • \u547d\u4ee4\u53c2\u6570\u4f7f\u7528 python /code/tensorflow/tf-single.py
                                                              \"\"\"\n  pip install tensorflow numpy\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\n# \u521b\u5efa\u4e00\u4e9b\u968f\u673a\u6570\u636e\nx = np.random.rand(100, 1)\ny = 2 * x + 1 + np.random.rand(100, 1) * 0.1\n\n# \u521b\u5efa\u4e00\u4e2a\u7b80\u5355\u7684\u6a21\u578b\nmodel = tf.keras.Sequential([\n    tf.keras.layers.Dense(1, input_shape=(1,))\n])\n\n# \u7f16\u8bd1\u6a21\u578b\nmodel.compile(optimizer='adam', loss='mse')\n\n# \u8bad\u7ec3\u6a21\u578b\uff0c\u5c06 epochs \u6539\u4e3a 10\nhistory = model.fit(x, y, epochs=10, verbose=1)\n\n# \u6253\u5370\u6700\u7ec8\u635f\u5931\nprint('Final loss: {' + str(history.history['loss'][-1]) +'}')\n\n# \u4f7f\u7528\u6a21\u578b\u8fdb\u884c\u9884\u6d4b\ntest_x = np.array([[0.5]])\nprediction = model.predict(test_x)\nprint(f'Prediction for x=0.5: {prediction[0][0]}')\n
                                                              "},{"location":"end-user/baize/jobs/tensorflow.html#_6","title":"\u8fd0\u884c\u7ed3\u679c","text":"

                                                              \u4efb\u52a1\u63d0\u4ea4\u6210\u529f\u540e\uff0c\u53ef\u4ee5\u8fdb\u5165\u4efb\u52a1\u8be6\u60c5\u67e5\u770b\u5230\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\uff0c\u4ece\u53f3\u4e0a\u89d2\u53bb\u5f80 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5 \uff0c\u53ef\u4ee5\u67e5\u770b\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                                                              "},{"location":"end-user/baize/jobs/tensorflow.html#tfjob_1","title":"TFJob \u5206\u5e03\u5f0f\u4efb\u52a1","text":"
                                                              1. \u767b\u5f55 AI Lab \uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u7684 \u4efb\u52a1\u4e2d\u5fc3 \uff0c\u8fdb\u5165 \u4efb\u52a1\u5217\u8868 \u9875\u9762\u3002
                                                              2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa \u6309\u94ae\uff0c\u8fdb\u5165\u4efb\u52a1\u521b\u5efa\u9875\u9762\u3002
                                                              3. \u9009\u62e9\u4efb\u52a1\u7c7b\u578b\u4e3a Tensorflow \u5206\u5e03\u5f0f\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002
                                                              4. \u586b\u5199\u4efb\u52a1\u540d\u79f0\u3001\u63cf\u8ff0\u540e\u70b9\u51fb \u786e\u5b9a \u3002
                                                              "},{"location":"end-user/baize/jobs/tensorflow.html#_7","title":"\u793a\u4f8b\u4efb\u52a1\u4ecb\u7ecd","text":"

                                                              \u672c\u6b21\u5305\u542b\u4e86\u4e09\u79cd\u89d2\u8272\uff1aChief\u3001Worker \u548c Parameter Server (PS)\u3002

                                                              • Chief: \u4e3b\u8981\u8d1f\u8d23\u534f\u8c03\u8bad\u7ec3\u8fc7\u7a0b\u548c\u6a21\u578b\u68c0\u67e5\u70b9\u7684\u4fdd\u5b58\u3002
                                                              • Worker: \u6267\u884c\u5b9e\u9645\u7684\u6a21\u578b\u8bad\u7ec3\u3002
                                                              • PS: \u5728\u5f02\u6b65\u8bad\u7ec3\u4e2d\u7528\u4e8e\u5b58\u50a8\u548c\u66f4\u65b0\u6a21\u578b\u53c2\u6570\u3002

                                                              \u4e3a\u4e0d\u540c\u7684\u89d2\u8272\u5206\u914d\u4e86\u4e0d\u540c\u7684\u8d44\u6e90\u3002Chief \u548c Worker \u4f7f\u7528 GPU\uff0c\u800c PS \u4f7f\u7528 CPU \u548c\u8f83\u5927\u7684\u5185\u5b58\u3002

                                                              "},{"location":"end-user/baize/jobs/tensorflow.html#_8","title":"\u8fd0\u884c\u53c2\u6570","text":"
                                                              • \u542f\u52a8\u547d\u4ee4 \u4f7f\u7528 bash
                                                              • \u547d\u4ee4\u53c2\u6570\u4f7f\u7528 python /code/tensorflow/tensorflow-distributed.py
                                                              import os\nimport json\nimport tensorflow as tf\n\nclass SimpleModel(tf.keras.Model):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = tf.keras.layers.Dense(1, input_shape=(10,))\n\n    def call(self, x):\n        return self.fc(x)\n\ndef train():\n    # \u6253\u5370\u73af\u5883\u4fe1\u606f\n    print(f\"TensorFlow version: {tf.__version__}\")\n    print(f\"GPU available: {tf.test.is_gpu_available()}\")\n    if tf.test.is_gpu_available():\n        print(f\"GPU device count: {len(tf.config.list_physical_devices('GPU'))}\")\n\n    # \u83b7\u53d6\u5206\u5e03\u5f0f\u8bad\u7ec3\u4fe1\u606f\n    tf_config = json.loads(os.environ.get('TF_CONFIG') or '{}')\n    task_type = tf_config.get('task', {}).get('type')\n    task_id = tf_config.get('task', {}).get('index')\n\n    print(f\"Task type: {task_type}, Task ID: {task_id}\")\n\n    # \u8bbe\u7f6e\u5206\u5e03\u5f0f\u7b56\u7565\n    strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()\n\n    with strategy.scope():\n        model = SimpleModel()\n        loss_fn = tf.keras.losses.MeanSquaredError()\n        optimizer = tf.keras.optimizers.SGD(learning_rate=0.001)\n\n    # \u751f\u6210\u4e00\u4e9b\u968f\u673a\u6570\u636e\n    data = tf.random.normal((100, 10))\n    labels = tf.random.normal((100, 1))\n\n    @tf.function\n    def train_step(inputs, labels):\n        with tf.GradientTape() as tape:\n            predictions = model(inputs)\n            loss = loss_fn(labels, predictions)\n        gradients = tape.gradient(loss, model.trainable_variables)\n        optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n        return loss\n\n    for epoch in range(10):\n        loss = train_step(data, labels)\n        if task_type == 'chief':\n            print(f'Epoch {epoch}, Loss: {loss.numpy():.4f}')\n\nif __name__ == '__main__':\n    train()\n
                                                              "},{"location":"end-user/baize/jobs/tensorflow.html#_9","title":"\u8fd0\u884c\u7ed3\u679c","text":"

                                                              \u540c\u6837\uff0c\u6211\u4eec\u53ef\u4ee5\u8fdb\u5165\u4efb\u52a1\u8be6\u60c5\uff0c\u67e5\u770b\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\uff0c\u4ee5\u53ca\u6bcf\u4e2a Pod \u7684\u65e5\u5fd7\u8f93\u51fa\u3002

                                                              "},{"location":"end-user/baize/jobs/view.html","title":"\u67e5\u770b\u4efb\u52a1\uff08Job\uff09\u5de5\u4f5c\u8d1f\u8f7d","text":"

                                                              \u4efb\u52a1\u521b\u5efa\u597d\u540e\uff0c\u90fd\u4f1a\u663e\u793a\u5728\u8bad\u7ec3\u4efb\u52a1\u5217\u8868\u4e2d\u3002

                                                              1. \u5728\u8bad\u7ec3\u8bad\u7ec3\u4efb\u52a1\u5217\u8868\u4e2d\uff0c\u70b9\u51fb\u67d0\u4e2a\u4efb\u52a1\u53f3\u4fa7\u7684 \u2507 -> \u4efb\u52a1\u8d1f\u8f7d\u8be6\u60c5 \u3002

                                                              2. \u51fa\u73b0\u4e00\u4e2a\u5f39\u7a97\u9009\u62e9\u8981\u67e5\u770b\u54ea\u4e2a Pod \u540e\uff0c\u70b9\u51fb \u8fdb\u5165 \u3002

                                                              3. \u8df3\u8f6c\u5230\u5bb9\u5668\u7ba1\u7406\u754c\u9762\uff0c\u53ef\u4ee5\u67e5\u770b\u5bb9\u5668\u7684\u5de5\u4f5c\u72b6\u6001\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u4ee5\u53ca\u53d1\u751f\u7684\u4e8b\u4ef6\u3002

                                                              4. \u4f60\u8fd8\u53ef\u4ee5\u67e5\u770b\u5f53\u524d Pod \u6700\u8fd1\u4e00\u6bb5\u65f6\u95f4\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002 \u6b64\u5904\u9ed8\u8ba4\u5c55\u793a 100 \u884c\u65e5\u5fd7\uff0c\u5982\u679c\u8981\u67e5\u770b\u66f4\u8be6\u7ec6\u7684\u65e5\u5fd7\u6d3b\u4e0b\u8f7d\u65e5\u5fd7\uff0c\u8bf7\u70b9\u51fb\u9876\u90e8\u7684\u84dd\u8272 \u53ef\u89c2\u6d4b\u6027 \u6587\u5b57\u3002

                                                              5. \u5f53\u7136\u4f60\u8fd8\u53ef\u4ee5\u901a\u8fc7\u53f3\u4e0a\u89d2\u7684 ... \uff0c\u67e5\u770b\u5f53\u524d Pod \u7684 YAML\u3001\u4e0a\u4f20\u548c\u4e0b\u8f7d\u6587\u4ef6\u3002 \u4ee5\u4e0b\u662f\u4e00\u4e2a Pod \u7684 YAML \u793a\u4f8b\u3002

                                                              kind: Pod\napiVersion: v1\nmetadata:\n  name: neko-tensorboard-job-test-202404181843-skxivllb-worker-0\n  namespace: default\n  uid: ddedb6ff-c278-47eb-ae1e-0de9b7c62f8c\n  resourceVersion: '41092552'\n  creationTimestamp: '2024-04-18T10:43:36Z'\n  labels:\n    training.kubeflow.org/job-name: neko-tensorboard-job-test-202404181843-skxivllb\n    training.kubeflow.org/operator-name: pytorchjob-controller\n    training.kubeflow.org/replica-index: '0'\n    training.kubeflow.org/replica-type: worker\n  annotations:\n    cni.projectcalico.org/containerID: 0cfbb9af257d5e69027c603c6cb2d3890a17c4ae1a145748d5aef73a10d7fbe1\n    cni.projectcalico.org/podIP: ''\n    cni.projectcalico.org/podIPs: ''\n    hami.io/bind-phase: success\n    hami.io/bind-time: '1713437016'\n    hami.io/vgpu-devices-allocated: GPU-29d5fa0d-935b-2966-aff8-483a174d61d1,NVIDIA,1024,20:;\n    hami.io/vgpu-devices-to-allocate: ;\n    hami.io/vgpu-node: worker-a800-1\n    hami.io/vgpu-time: '1713437016'\n    k8s.v1.cni.cncf.io/network-status: |-\n      [{\n          \"name\": \"kube-system/calico\",\n          \"ips\": [\n              \"10.233.97.184\"\n          ],\n          \"default\": true,\n          \"dns\": {}\n      }]\n    k8s.v1.cni.cncf.io/networks-status: |-\n      [{\n          \"name\": \"kube-system/calico\",\n          \"ips\": [\n              \"10.233.97.184\"\n          ],\n          \"default\": true,\n          \"dns\": {}\n      }]\n  ownerReferences:\n    - apiVersion: kubeflow.org/v1\n      kind: PyTorchJob\n      name: neko-tensorboard-job-test-202404181843-skxivllb\n      uid: e5a8b05d-1f03-4717-8e1c-4ec928014b7b\n      controller: true\n      blockOwnerDeletion: true\nspec:\n  volumes:\n    - name: 0-dataset-pytorch-examples\n      persistentVolumeClaim:\n        claimName: pytorch-examples\n    - name: kube-api-access-wh9rh\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n  containers:\n    - name: pytorch\n      image: m.daocloud.io/docker.io/pytorch/pytorch\n      command:\n        - bash\n      args:\n        - '-c'\n        - >-\n          ls -la /root && which pip && pip install pytorch_lightning tensorboard\n          && python /root/Git/pytorch/examples/mnist/main.py\n      ports:\n        - name: pytorchjob-port\n          containerPort: 23456\n          protocol: TCP\n      env:\n        - name: PYTHONUNBUFFERED\n          value: '1'\n        - name: PET_NNODES\n          value: '1'\n      resources:\n        limits:\n          cpu: '4'\n          memory: 8Gi\n          nvidia.com/gpucores: '20'\n          nvidia.com/gpumem: '1024'\n          nvidia.com/vgpu: '1'\n        requests:\n          cpu: '4'\n          memory: 8Gi\n          nvidia.com/gpucores: '20'\n          nvidia.com/gpumem: '1024'\n          nvidia.com/vgpu: '1'\n      volumeMounts:\n        - name: 0-dataset-pytorch-examples\n          mountPath: /root/Git/pytorch/examples\n        - name: kube-api-access-wh9rh\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  restartPolicy: Never\n  terminationGracePeriodSeconds: 30\n  dnsPolicy: ClusterFirst\n  serviceAccountName: default\n  serviceAccount: default\n  nodeName: worker-a800-1\n  securityContext: {}\n  affinity: {}\n  schedulerName: hami-scheduler\n  tolerations:\n    - key: node.kubernetes.io/not-ready\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n    - key: node.kubernetes.io/unreachable\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n  priorityClassName: baize-high-priority\n  priority: 100000\n  enableServiceLinks: true\n  preemptionPolicy: PreemptLowerPriority\nstatus:\n  phase: Succeeded\n  conditions:\n    - type: Initialized\n      status: 'True'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:43:36Z'\n      reason: PodCompleted\n    - type: Ready\n      status: 'False'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:46:34Z'\n      reason: PodCompleted\n    - type: ContainersReady\n      status: 'False'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:46:34Z'\n      reason: PodCompleted\n    - type: PodScheduled\n      status: 'True'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:43:36Z'\n  hostIP: 10.20.100.211\n  podIP: 10.233.97.184\n  podIPs:\n    - ip: 10.233.97.184\n  startTime: '2024-04-18T10:43:36Z'\n  containerStatuses:\n    - name: pytorch\n      state:\n        terminated:\n          exitCode: 0\n          reason: Completed\n          startedAt: '2024-04-18T10:43:39Z'\n          finishedAt: '2024-04-18T10:46:34Z'\n          containerID: >-\n            containerd://09010214bcf3315e81d38fba50de3943c9d2b48f50a6cc2e83f8ef0e5c6eeec1\n      lastState: {}\n      ready: false\n      restartCount: 0\n      image: m.daocloud.io/docker.io/pytorch/pytorch:latest\n      imageID: >-\n        m.daocloud.io/docker.io/pytorch/pytorch@sha256:11691e035a3651d25a87116b4f6adc113a27a29d8f5a6a583f8569e0ee5ff897\n      containerID: >-\n        containerd://09010214bcf3315e81d38fba50de3943c9d2b48f50a6cc2e83f8ef0e5c6eeec1\n      started: false\n  qosClass: Guaranteed\n
                                                              "},{"location":"end-user/ghippo/personal-center/accesstoken.html","title":"\u8bbf\u95ee\u5bc6\u94a5","text":"

                                                              \u8bbf\u95ee\u5bc6\u94a5\uff08Access Key\uff09\u53ef\u7528\u4e8e\u8bbf\u95ee\u5f00\u653e API \u548c\u6301\u7eed\u53d1\u5e03\uff0c\u7528\u6237\u53ef\u5728\u4e2a\u4eba\u4e2d\u5fc3\u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u83b7\u53d6\u5bc6\u94a5\u5e76\u8bbf\u95ee API\u3002

                                                              "},{"location":"end-user/ghippo/personal-center/accesstoken.html#_2","title":"\u83b7\u53d6\u5bc6\u94a5","text":"

                                                              \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5728\u53f3\u4e0a\u89d2\u7684\u4e0b\u62c9\u83dc\u5355\u4e2d\u627e\u5230 \u4e2a\u4eba\u4e2d\u5fc3 \uff0c\u53ef\u4ee5\u5728 \u8bbf\u95ee\u5bc6\u94a5 \u9875\u9762\u7ba1\u7406\u8d26\u53f7\u7684\u8bbf\u95ee\u5bc6\u94a5\u3002

                                                              Info

                                                              \u8bbf\u95ee\u5bc6\u94a5\u4fe1\u606f\u4ec5\u663e\u793a\u4e00\u6b21\u3002\u5982\u679c\u60a8\u5fd8\u8bb0\u4e86\u8bbf\u95ee\u5bc6\u94a5\u4fe1\u606f\uff0c\u60a8\u9700\u8981\u91cd\u65b0\u521b\u5efa\u65b0\u7684\u8bbf\u95ee\u5bc6\u94a5\u3002

                                                              "},{"location":"end-user/ghippo/personal-center/accesstoken.html#api","title":"\u4f7f\u7528\u5bc6\u94a5\u8bbf\u95ee API","text":"

                                                              \u5728\u8bbf\u95ee\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0openAPI \u65f6\uff0c\u5728\u8bf7\u6c42\u4e2d\u52a0\u4e0a\u8bf7\u6c42\u5934 Authorization:Bearer ${token} \u4ee5\u6807\u8bc6\u8bbf\u95ee\u8005\u7684\u8eab\u4efd\uff0c \u5176\u4e2d ${token} \u662f\u4e0a\u4e00\u6b65\u4e2d\u83b7\u53d6\u5230\u7684\u5bc6\u94a5\uff0c\u5177\u4f53\u63a5\u53e3\u4fe1\u606f\u53c2\u89c1 OpenAPI \u63a5\u53e3\u6587\u6863\u3002

                                                              \u8bf7\u6c42\u793a\u4f8b

                                                              curl -X GET -H 'Authorization:Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRKVjlBTHRBLXZ4MmtQUC1TQnVGS0dCSWc1cnBfdkxiQVVqM2U3RVByWnMiLCJ0eXAiOiJKV1QifQ.eyJleHAiOjE2NjE0MTU5NjksImlhdCI6MTY2MDgxMTE2OSwiaXNzIjoiZ2hpcHBvLmlvIiwic3ViIjoiZjdjOGIxZjUtMTc2MS00NjYwLTg2MWQtOWI3MmI0MzJmNGViIiwicHJlZmVycmVkX3VzZXJuYW1lIjoiYWRtaW4iLCJncm91cHMiOltdfQ.RsUcrAYkQQ7C6BxMOrdD3qbBRUt0VVxynIGeq4wyIgye6R8Ma4cjxG5CbU1WyiHKpvIKJDJbeFQHro2euQyVde3ygA672ozkwLTnx3Tu-_mB1BubvWCBsDdUjIhCQfT39rk6EQozMjb-1X1sbLwzkfzKMls-oxkjagI_RFrYlTVPwT3Oaw-qOyulRSw7Dxd7jb0vINPq84vmlQIsI3UuTZSNO5BCgHpubcWwBss-Aon_DmYA-Et_-QtmPBA3k8E2hzDSzc7eqK0I68P25r9rwQ3DeKwD1dbRyndqWORRnz8TLEXSiCFXdZT2oiMrcJtO188Ph4eLGut1-4PzKhwgrQ' https://demo-dev.daocloud.io/apis/ghippo.io/v1alpha1/users?page=1&pageSize=10 -k\n

                                                              \u8bf7\u6c42\u7ed3\u679c

                                                              {\n    \"items\": [\n        {\n            \"id\": \"a7cfd010-ebbe-4601-987f-d098d9ef766e\",\n            \"name\": \"a\",\n            \"email\": \"\",\n            \"description\": \"\",\n            \"firstname\": \"\",\n            \"lastname\": \"\",\n            \"source\": \"locale\",\n            \"enabled\": true,\n            \"createdAt\": \"1660632794800\",\n            \"updatedAt\": \"0\",\n            \"lastLoginAt\": \"\"\n        }\n    ],\n    \"pagination\": {\n        \"page\": 1,\n        \"pageSize\": 10,\n        \"total\": 1\n    }\n}\n
                                                              "},{"location":"end-user/ghippo/personal-center/language.html","title":"\u8bed\u8a00\u8bbe\u7f6e","text":"

                                                              \u672c\u8282\u8bf4\u660e\u5982\u4f55\u8bbe\u7f6e\u754c\u9762\u8bed\u8a00\u3002\u76ee\u524d\u652f\u6301\u4e2d\u6587\u3001English \u4e24\u4e2a\u8bed\u8a00\u3002

                                                              \u8bed\u8a00\u8bbe\u7f6e\u662f\u5e73\u53f0\u63d0\u4f9b\u591a\u8bed\u8a00\u670d\u52a1\u7684\u5165\u53e3\uff0c\u5e73\u53f0\u9ed8\u8ba4\u663e\u793a\u4e3a\u4e2d\u6587\uff0c\u7528\u6237\u53ef\u6839\u636e\u9700\u8981\u9009\u62e9\u82f1\u8bed\u6216\u81ea\u52a8\u68c0\u6d4b\u6d4f\u89c8\u5668\u8bed\u8a00\u9996\u9009\u9879\u7684\u65b9\u5f0f\u6765\u5207\u6362\u5e73\u53f0\u8bed\u8a00\u3002 \u6bcf\u4e2a\u7528\u6237\u7684\u591a\u8bed\u8a00\u670d\u52a1\u662f\u76f8\u4e92\u72ec\u7acb\u7684\uff0c\u5207\u6362\u540e\u4e0d\u4f1a\u5f71\u54cd\u5176\u4ed6\u7528\u6237\u3002

                                                              \u5e73\u53f0\u63d0\u4f9b\u4e09\u79cd\u5207\u6362\u8bed\u8a00\u65b9\u5f0f\uff1a\u4e2d\u6587\u3001\u82f1\u8bed-English\u3001\u81ea\u52a8\u68c0\u6d4b\u60a8\u7684\u6d4f\u89c8\u5668\u8bed\u8a00\u9996\u9009\u9879\u3002

                                                              \u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\u3002

                                                              1. \u4f7f\u7528\u60a8\u7684\u7528\u6237\u540d/\u5bc6\u7801\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\u3002\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 \u3002

                                                              2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684\u7528\u6237\u540d\u4f4d\u7f6e\uff0c\u9009\u62e9 \u4e2a\u4eba\u4e2d\u5fc3 \u3002

                                                              3. \u70b9\u51fb \u8bed\u8a00\u8bbe\u7f6e \u9875\u7b7e\u3002

                                                              4. \u5207\u6362\u8bed\u8a00\u9009\u9879\u3002

                                                              "},{"location":"end-user/ghippo/personal-center/security-setting.html","title":"\u5b89\u5168\u8bbe\u7f6e","text":"

                                                              \u529f\u80fd\u8bf4\u660e\uff1a\u7528\u4e8e\u586b\u5199\u90ae\u7bb1\u5730\u5740\u548c\u4fee\u6539\u767b\u5f55\u5bc6\u7801\u3002

                                                              • \u90ae\u7bb1\uff1a\u5f53\u7ba1\u7406\u5458\u914d\u7f6e\u90ae\u7bb1\u670d\u52a1\u5668\u5730\u5740\u4e4b\u540e\uff0c\u7528\u6237\u80fd\u591f\u901a\u8fc7\u767b\u5f55\u9875\u7684\u5fd8\u8bb0\u5bc6\u7801\u6309\u94ae\uff0c\u586b\u5199\u8be5\u5904\u7684\u90ae\u7bb1\u5730\u5740\u4ee5\u627e\u56de\u5bc6\u7801\u3002
                                                              • \u5bc6\u7801\uff1a\u7528\u4e8e\u767b\u5f55\u5e73\u53f0\u7684\u5bc6\u7801\uff0c\u5efa\u8bae\u5b9a\u671f\u4fee\u6539\u5bc6\u7801\u3002

                                                              \u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                              1. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684\u7528\u6237\u540d\u4f4d\u7f6e\uff0c\u9009\u62e9 \u4e2a\u4eba\u4e2d\u5fc3 \u3002

                                                              2. \u70b9\u51fb \u5b89\u5168\u8bbe\u7f6e \u9875\u7b7e\u3002\u586b\u5199\u60a8\u7684\u90ae\u7bb1\u5730\u5740\u6216\u4fee\u6539\u767b\u5f55\u5bc6\u7801\u3002

                                                              "},{"location":"end-user/ghippo/personal-center/ssh-key.html","title":"\u914d\u7f6e SSH \u516c\u94a5","text":"

                                                              \u672c\u6587\u8bf4\u660e\u5982\u4f55\u914d\u7f6e SSH \u516c\u94a5\u3002

                                                              "},{"location":"end-user/ghippo/personal-center/ssh-key.html#1-ssh","title":"\u6b65\u9aa4 1\uff1a\u67e5\u770b\u5df2\u5b58\u5728\u7684 SSH \u5bc6\u94a5","text":"

                                                              \u5728\u751f\u6210\u65b0\u7684 SSH \u5bc6\u94a5\u524d\uff0c\u8bf7\u5148\u786e\u8ba4\u662f\u5426\u9700\u8981\u4f7f\u7528\u672c\u5730\u5df2\u751f\u6210\u7684 SSH \u5bc6\u94a5\uff0cSSH \u5bc6\u94a5\u5bf9\u4e00\u822c\u5b58\u653e\u5728\u672c\u5730\u7528\u6237\u7684\u6839\u76ee\u5f55\u4e0b\u3002 Linux\u3001Mac \u8bf7\u76f4\u63a5\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b\u5df2\u5b58\u5728\u7684\u516c\u94a5\uff0cWindows \u7528\u6237\u5728 WSL\uff08\u9700\u8981 Windows 10 \u6216\u4ee5\u4e0a\uff09\u6216 Git Bash \u4e0b\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b\u5df2\u751f\u6210\u7684\u516c\u94a5\u3002

                                                              • ED25519 \u7b97\u6cd5\uff1a

                                                                cat ~/.ssh/id_ed25519.pub\n
                                                              • RSA \u7b97\u6cd5\uff1a

                                                                cat ~/.ssh/id_rsa.pub\n

                                                              \u5982\u679c\u8fd4\u56de\u4e00\u957f\u4e32\u4ee5 ssh-ed25519 \u6216 ssh-rsa \u5f00\u5934\u7684\u5b57\u7b26\u4e32\uff0c\u8bf4\u660e\u5df2\u5b58\u5728\u672c\u5730\u516c\u94a5\uff0c \u60a8\u53ef\u4ee5\u8df3\u8fc7\u6b65\u9aa4 2 \u751f\u6210 SSH \u5bc6\u94a5\uff0c\u76f4\u63a5\u64cd\u4f5c\u6b65\u9aa4 3\u3002

                                                              "},{"location":"end-user/ghippo/personal-center/ssh-key.html#2-ssh","title":"\u6b65\u9aa4 2\uff1a\u751f\u6210 SSH \u5bc6\u94a5","text":"

                                                              \u82e5\u6b65\u9aa4 1 \u672a\u8fd4\u56de\u6307\u5b9a\u7684\u5185\u5bb9\u5b57\u7b26\u4e32\uff0c\u8868\u793a\u672c\u5730\u6682\u65e0\u53ef\u7528 SSH \u5bc6\u94a5\uff0c\u9700\u8981\u751f\u6210\u65b0\u7684 SSH \u5bc6\u94a5\uff0c\u8bf7\u6309\u5982\u4e0b\u6b65\u9aa4\u64cd\u4f5c\uff1a

                                                              1. \u8bbf\u95ee\u7ec8\u7aef\uff08Windows \u8bf7\u4f7f\u7528 WSL \u6216 Git Bash\uff09\uff0c \u8fd0\u884c ssh-keygen -t\u3002

                                                              2. \u8f93\u5165\u5bc6\u94a5\u7b97\u6cd5\u7c7b\u578b\u548c\u53ef\u9009\u7684\u6ce8\u91ca\u3002

                                                                \u6ce8\u91ca\u4f1a\u51fa\u73b0\u5728 .pub \u6587\u4ef6\u4e2d\uff0c\u4e00\u822c\u53ef\u4f7f\u7528\u90ae\u7bb1\u4f5c\u4e3a\u6ce8\u91ca\u5185\u5bb9\u3002

                                                                • \u57fa\u4e8e ED25519 \u7b97\u6cd5\uff0c\u751f\u6210\u5bc6\u94a5\u5bf9\u547d\u4ee4\u5982\u4e0b\uff1a

                                                                  ssh-keygen -t ed25519 -C \"<\u6ce8\u91ca\u5185\u5bb9>\"\n
                                                                • \u57fa\u4e8e RSA \u7b97\u6cd5\uff0c\u751f\u6210\u5bc6\u94a5\u5bf9\u547d\u4ee4\u5982\u4e0b\uff1a

                                                                  ssh-keygen -t rsa -C \"<\u6ce8\u91ca\u5185\u5bb9>\"\n
                                                              3. \u70b9\u51fb\u56de\u8f66\uff0c\u9009\u62e9 SSH \u5bc6\u94a5\u751f\u6210\u8def\u5f84\u3002

                                                                \u4ee5 ED25519 \u7b97\u6cd5\u4e3a\u4f8b\uff0c\u9ed8\u8ba4\u8def\u5f84\u5982\u4e0b\uff1a

                                                                Generating public/private ed25519 key pair.\nEnter file in which to save the key (/home/user/.ssh/id_ed25519):\n

                                                                \u5bc6\u94a5\u9ed8\u8ba4\u751f\u6210\u8def\u5f84\uff1a/home/user/.ssh/id_ed25519\uff0c\u516c\u94a5\u4e0e\u4e4b\u5bf9\u5e94\u4e3a\uff1a/home/user/.ssh/id_ed25519.pub\u3002

                                                              4. \u8bbe\u7f6e\u4e00\u4e2a\u5bc6\u94a5\u53e3\u4ee4\u3002

                                                                Enter passphrase (empty for no passphrase):\nEnter same passphrase again:\n

                                                                \u53e3\u4ee4\u9ed8\u8ba4\u4e3a\u7a7a\uff0c\u60a8\u53ef\u4ee5\u9009\u62e9\u4f7f\u7528\u53e3\u4ee4\u4fdd\u62a4\u79c1\u94a5\u6587\u4ef6\u3002 \u5982\u679c\u60a8\u4e0d\u60f3\u5728\u6bcf\u6b21\u4f7f\u7528 SSH \u534f\u8bae\u8bbf\u95ee\u4ed3\u5e93\u65f6\uff0c\u90fd\u8981\u8f93\u5165\u7528\u4e8e\u4fdd\u62a4\u79c1\u94a5\u6587\u4ef6\u7684\u53e3\u4ee4\uff0c\u53ef\u4ee5\u5728\u521b\u5efa\u5bc6\u94a5\u65f6\uff0c\u8f93\u5165\u7a7a\u53e3\u4ee4\u3002

                                                              5. \u70b9\u51fb\u56de\u8f66\uff0c\u5b8c\u6210\u5bc6\u94a5\u5bf9\u521b\u5efa\u3002

                                                              "},{"location":"end-user/ghippo/personal-center/ssh-key.html#3","title":"\u6b65\u9aa4 3\uff1a\u62f7\u8d1d\u516c\u94a5","text":"

                                                              \u9664\u4e86\u5728\u547d\u4ee4\u884c\u6253\u5370\u51fa\u5df2\u751f\u6210\u7684\u516c\u94a5\u4fe1\u606f\u624b\u52a8\u590d\u5236\u5916\uff0c\u53ef\u4ee5\u4f7f\u7528\u547d\u4ee4\u62f7\u8d1d\u516c\u94a5\u5230\u7c98\u8d34\u677f\u4e0b\uff0c\u8bf7\u53c2\u8003\u64cd\u4f5c\u7cfb\u7edf\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u8fdb\u884c\u62f7\u8d1d\u3002

                                                              • Windows\uff08\u5728 WSL \u6216 Git Bash \u4e0b\uff09\uff1a

                                                                cat ~/.ssh/id_ed25519.pub | clip\n
                                                              • Mac\uff1a

                                                                tr -d '\\n'< ~/.ssh/id_ed25519.pub | pbcopy\n
                                                              • GNU/Linux (requires xclip):

                                                                xclip -sel clip < ~/.ssh/id_ed25519.pub\n
                                                              "},{"location":"end-user/ghippo/personal-center/ssh-key.html#4-ai","title":"\u6b65\u9aa4 4\uff1a\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e0a\u8bbe\u7f6e\u516c\u94a5","text":"
                                                              1. \u767b\u5f55\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0UI \u9875\u9762\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u4e2a\u4eba\u4e2d\u5fc3 -> SSH \u516c\u94a5 \u3002

                                                              2. \u6dfb\u52a0\u751f\u6210\u7684 SSH \u516c\u94a5\u4fe1\u606f\u3002

                                                                1. SSH \u516c\u94a5\u5185\u5bb9\u3002

                                                                2. \u516c\u94a5\u6807\u9898\uff1a\u652f\u6301\u81ea\u5b9a\u4e49\u516c\u94a5\u540d\u79f0\uff0c\u7528\u4e8e\u533a\u5206\u7ba1\u7406\u3002

                                                                3. \u8fc7\u671f\u65f6\u95f4\uff1a\u8bbe\u7f6e\u516c\u94a5\u8fc7\u671f\u65f6\u95f4\uff0c\u5230\u671f\u540e\u516c\u94a5\u5c06\u81ea\u52a8\u5931\u6548\uff0c\u4e0d\u53ef\u4f7f\u7528\uff1b\u5982\u679c\u4e0d\u8bbe\u7f6e\uff0c\u5219\u6c38\u4e45\u6709\u6548\u3002

                                                              "},{"location":"end-user/ghippo/workspace/folder-permission.html","title":"\u6587\u4ef6\u5939\u6743\u9650\u8bf4\u660e","text":"

                                                              \u6587\u4ef6\u5939\u5177\u6709\u6743\u9650\u6620\u5c04\u80fd\u529b\uff0c\u80fd\u591f\u5c06\u7528\u6237/\u7528\u6237\u7ec4\u5728\u672c\u6587\u4ef6\u5939\u7684\u6743\u9650\u6620\u5c04\u5230\u5176\u4e0b\u7684\u5b50\u6587\u4ef6\u5939\u3001\u5de5\u4f5c\u7a7a\u95f4\u4ee5\u53ca\u8d44\u6e90\u4e0a\u3002

                                                              \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u672c\u6587\u4ef6\u5939\u662f Folder Admin \u89d2\u8272\uff0c\u6620\u5c04\u5230\u5b50\u6587\u4ef6\u5939\u4ecd\u4e3a Folder Admin \u89d2\u8272\uff0c\u6620\u5c04\u5230\u5176\u4e0b\u7684\u5de5\u4f5c\u7a7a\u95f4\u5219\u4e3a Workspace Admin\uff1b \u82e5\u5728 \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 -> \u8d44\u6e90\u7ec4 \u4e2d\u7ed1\u5b9a\u4e86 Namespace\uff0c\u5219\u6620\u5c04\u540e\u8be5\u7528\u6237/\u7528\u6237\u7ec4\u540c\u65f6\u8fd8\u662f Namespace Admin\u3002

                                                              Note

                                                              \u6587\u4ef6\u5939\u7684\u6743\u9650\u6620\u5c04\u80fd\u529b\u4e0d\u4f1a\u4f5c\u7528\u5230\u5171\u4eab\u8d44\u6e90\u4e0a\uff0c\u56e0\u4e3a\u5171\u4eab\u662f\u5c06\u96c6\u7fa4\u7684\u4f7f\u7528\u6743\u9650\u5171\u4eab\u7ed9\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff0c\u800c\u4e0d\u662f\u5c06\u7ba1\u7406\u6743\u9650\u53d7\u8ba9\u7ed9\u5de5\u4f5c\u7a7a\u95f4\uff0c\u56e0\u6b64\u4e0d\u4f1a\u5b9e\u73b0\u6743\u9650\u7ee7\u627f\u548c\u89d2\u8272\u6620\u5c04\u3002

                                                              "},{"location":"end-user/ghippo/workspace/folder-permission.html#_2","title":"\u5e94\u7528\u573a\u666f","text":"

                                                              \u6587\u4ef6\u5939\u5177\u6709\u5c42\u7ea7\u80fd\u529b\uff0c\u56e0\u6b64\u5c06\u6587\u4ef6\u5939\u5bf9\u5e94\u4e8e\u4f01\u4e1a\u4e2d\u7684\u90e8\u95e8/\u4f9b\u5e94\u5546/\u9879\u76ee\u7b49\u5c42\u7ea7\u65f6\uff0c

                                                              • \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u4e00\u7ea7\u90e8\u95e8\u5177\u6709\u7ba1\u7406\u6743\u9650\uff08Admin\uff09\uff0c\u5176\u4e0b\u7684\u4e8c\u7ea7\u3001\u4e09\u7ea7\u3001\u56db\u7ea7\u90e8\u95e8\u6216\u9879\u76ee\u540c\u6837\u5177\u6709\u7ba1\u7406\u6743\u9650\uff1b
                                                              • \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u4e00\u7ea7\u90e8\u95e8\u5177\u6709\u4f7f\u7528\u6743\u9650\uff08Editor\uff09\uff0c\u5176\u4e0b\u7684\u4e8c\u7ea7\u3001\u4e09\u7ea7\u3001\u56db\u7ea7\u90e8\u95e8\u6216\u9879\u76ee\u540c\u6837\u5177\u6709\u4f7f\u7528\u6743\u9650\uff1b
                                                              • \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u4e00\u7ea7\u90e8\u95e8\u5177\u6709\u53ea\u8bfb\u6743\u9650\uff08Viewer\uff09\uff0c\u5176\u4e0b\u7684\u4e8c\u7ea7\u3001\u4e09\u7ea7\u3001\u56db\u7ea7\u90e8\u95e8\u6216\u9879\u76ee\u540c\u6837\u5177\u6709\u53ea\u8bfb\u6743\u9650\u3002
                                                              \u5bf9\u8c61 \u64cd\u4f5c Folder Admin Folder Editor Folder Viewer \u5bf9\u6587\u4ef6\u5939\u672c\u8eab \u67e5\u770b \u2713 \u2713 \u2713 \u6388\u6743 \u2713 \u2717 \u2717 \u4fee\u6539\u522b\u540d \u2713 \u2717 \u2717 \u5bf9\u5b50\u6587\u4ef6\u5939 \u521b\u5efa \u2713 \u2717 \u2717 \u67e5\u770b \u2713 \u2713 \u2713 \u6388\u6743 \u2713 \u2717 \u2717 \u4fee\u6539\u522b\u540d \u2713 \u2717 \u2717 \u5bf9\u5176\u4e0b\u7684\u5de5\u4f5c\u7a7a\u95f4 \u521b\u5efa \u2713 \u2717 \u2717 \u67e5\u770b \u2713 \u2713 \u2713 \u6388\u6743 \u2713 \u2717 \u2717 \u4fee\u6539\u522b\u540d \u2713 \u2717 \u2717 \u5bf9\u5176\u4e0b\u7684\u5de5\u4f5c\u7a7a\u95f4 - \u8d44\u6e90\u7ec4 \u67e5\u770b \u2713 \u2713 \u2713 \u8d44\u6e90\u7ed1\u5b9a \u2713 \u2717 \u2717 \u89e3\u9664\u7ed1\u5b9a \u2713 \u2717 \u2717 \u5bf9\u5176\u4e0b\u7684\u5de5\u4f5c\u7a7a\u95f4 - \u5171\u4eab\u8d44\u6e90 \u67e5\u770b \u2713 \u2713 \u2713 \u65b0\u589e\u5171\u4eab \u2713 \u2717 \u2717 \u89e3\u9664\u5171\u4eab \u2713 \u2717 \u2717 \u8d44\u6e90\u9650\u989d \u2713 \u2717 \u2717"},{"location":"end-user/ghippo/workspace/folders.html","title":"\u521b\u5efa/\u5220\u9664\u6587\u4ef6\u5939","text":"

                                                              \u6587\u4ef6\u5939\u5177\u6709\u6743\u9650\u6620\u5c04\u80fd\u529b\uff0c\u80fd\u591f\u5c06\u7528\u6237/\u7528\u6237\u7ec4\u5728\u672c\u6587\u4ef6\u5939\u7684\u6743\u9650\u6620\u5c04\u5230\u5176\u4e0b\u7684\u5b50\u6587\u4ef6\u5939\u3001\u5de5\u4f5c\u7a7a\u95f4\u4ee5\u53ca\u8d44\u6e90\u4e0a\u3002

                                                              \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u521b\u5efa\u4e00\u4e2a\u6587\u4ef6\u5939\u3002

                                                              1. \u4f7f\u7528 admin/folder admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u3002

                                                              2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa\u6587\u4ef6\u5939 \u6309\u94ae\u3002

                                                              3. \u586b\u5199\u6587\u4ef6\u5939\u540d\u79f0\u3001\u4e0a\u4e00\u7ea7\u6587\u4ef6\u5939\u7b49\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u521b\u5efa\u6587\u4ef6\u5939\u3002

                                                              Tip

                                                              \u521b\u5efa\u6210\u529f\u540e\u6587\u4ef6\u5939\u540d\u79f0\u5c06\u663e\u793a\u5728\u5de6\u4fa7\u7684\u6811\u72b6\u7ed3\u6784\u4e2d\uff0c\u4ee5\u4e0d\u540c\u7684\u56fe\u6807\u8868\u793a\u5de5\u4f5c\u7a7a\u95f4\u548c\u6587\u4ef6\u5939\u3002

                                                              Note

                                                              \u9009\u4e2d\u67d0\u4e00\u4e2a\u6587\u4ef6\u5939\u6216\u6587\u4ef6\u5939\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u53ef\u4ee5\u8fdb\u884c\u7f16\u8f91\u6216\u5220\u9664\u3002

                                                              • \u5f53\u8be5\u6587\u4ef6\u5939\u4e0b\u8d44\u6e90\u7ec4\u3001\u5171\u4eab\u8d44\u6e90\u4e2d\u5b58\u5728\u8d44\u6e90\u65f6\uff0c\u8be5\u6587\u4ef6\u5939\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u6240\u6709\u8d44\u6e90\u89e3\u7ed1\u540e\u518d\u5220\u9664\u3002

                                                              • \u5f53\u5fae\u670d\u52a1\u5f15\u64ce\u6a21\u5757\u5728\u8be5\u6587\u4ef6\u5939\u4e0b\u5b58\u5728\u63a5\u5165\u6ce8\u518c\u4e2d\u5fc3\u8d44\u6e90\u65f6\uff0c\u8be5\u6587\u4ef6\u5939\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u6240\u6709\u63a5\u5165\u6ce8\u518c\u4e2d\u5fc3\u79fb\u9664\u540e\u518d\u5220\u9664\u6587\u4ef6\u5939\u3002

                                                              "},{"location":"end-user/ghippo/workspace/quota.html","title":"\u8d44\u6e90\u914d\u989d\uff08Quota\uff09","text":"

                                                              \u5171\u4eab\u8d44\u6e90\u5e76\u975e\u610f\u5473\u7740\u88ab\u5171\u4eab\u8005\u53ef\u4ee5\u65e0\u9650\u5236\u5730\u4f7f\u7528\u88ab\u5171\u4eab\u7684\u8d44\u6e90\u3002 Admin\u3001Kpanda Owner \u548c Workspace Admin \u53ef\u4ee5\u901a\u8fc7\u5171\u4eab\u8d44\u6e90\u4e2d\u7684 \u8d44\u6e90\u914d\u989d \u529f\u80fd\u9650\u5236\u67d0\u4e2a\u7528\u6237\u7684\u6700\u5927\u4f7f\u7528\u989d\u5ea6\u3002 \u82e5\u4e0d\u9650\u5236\uff0c\u5219\u8868\u793a\u53ef\u4ee5\u65e0\u9650\u5236\u4f7f\u7528\u3002

                                                              • CPU \u8bf7\u6c42\uff08Core\uff09
                                                              • CPU \u9650\u5236\uff08Core\uff09
                                                              • \u5185\u5b58\u8bf7\u6c42\uff08MB\uff09
                                                              • \u5185\u5b58\u9650\u5236\uff08MB\uff09
                                                              • \u5b58\u50a8\u8bf7\u6c42\u603b\u91cf\uff08GB\uff09
                                                              • \u5b58\u50a8\u5377\u58f0\u660e\uff08\u4e2a\uff09
                                                              • GPU \u7c7b\u578b\u3001\u89c4\u683c\u3001\u6570\u91cf\uff08\u5305\u62ec\u4f46\u4e0d\u9650\u4e8e Nvidia\u3001Ascend\u3001lluvatar\u7b49GPU\u5361\u7c7b\u578b\uff09

                                                              \u4e00\u4e2a\u8d44\u6e90\uff08\u96c6\u7fa4\uff09\u53ef\u4ee5\u88ab\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u5171\u4eab\uff0c\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u4e5f\u53ef\u4ee5\u540c\u65f6\u4f7f\u7528\u591a\u4e2a\u5171\u4eab\u96c6\u7fa4\u4e2d\u7684\u8d44\u6e90\u3002

                                                              "},{"location":"end-user/ghippo/workspace/quota.html#_1","title":"\u8d44\u6e90\u7ec4\u548c\u5171\u4eab\u8d44\u6e90","text":"

                                                              \u5171\u4eab\u8d44\u6e90\u548c\u8d44\u6e90\u7ec4\u4e2d\u7684\u96c6\u7fa4\u8d44\u6e90\u5747\u6765\u81ea\u5bb9\u5668\u7ba1\u7406\uff0c\u4f46\u662f\u96c6\u7fa4\u7ed1\u5b9a\u548c\u5171\u4eab\u7ed9\u540c\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u5c06\u4f1a\u4ea7\u751f\u4e24\u79cd\u622a\u7136\u4e0d\u540c\u7684\u6548\u679c\u3002

                                                              1. \u7ed1\u5b9a\u8d44\u6e90

                                                                \u4f7f\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u7528\u6237/\u7528\u6237\u7ec4\u5177\u6709\u8be5\u96c6\u7fa4\u7684\u5168\u90e8\u7ba1\u7406\u548c\u4f7f\u7528\u6743\u9650\uff0cWorkspace Admin \u5c06\u88ab\u6620\u5c04\u4e3a Cluster Admin\u3002 Workspace Admin \u80fd\u591f\u8fdb\u5165\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7ba1\u7406\u8be5\u96c6\u7fa4\u3002

                                                                Note

                                                                \u5f53\u524d\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u6682\u65e0 Cluster Editor \u548c Cluster Viewer \u89d2\u8272\uff0c\u56e0\u6b64 Workspace Editor\u3001Workspace Viewer \u8fd8\u65e0\u6cd5\u6620\u5c04\u3002

                                                              2. \u65b0\u589e\u5171\u4eab\u8d44\u6e90

                                                                \u4f7f\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u7528\u6237/\u7528\u6237\u7ec4\u5177\u6709\u8be5\u96c6\u7fa4\u8d44\u6e90\u7684\u4f7f\u7528\u6743\u9650\uff0c\u8fd9\u4e9b\u8d44\u6e90\u53ef\u4ee5\u5728\u521b\u5efa\u547d\u540d\u7a7a\u95f4\uff08Namespace\uff09\u65f6\u4f7f\u7528\u3002

                                                                \u4e0e\u8d44\u6e90\u7ec4\u4e0d\u540c\uff0c\u5c06\u96c6\u7fa4\u5171\u4eab\u5230\u5de5\u4f5c\u7a7a\u95f4\u65f6\uff0c\u7528\u6237\u5728\u5de5\u4f5c\u7a7a\u95f4\u7684\u89d2\u8272\u4e0d\u4f1a\u6620\u5c04\u5230\u8d44\u6e90\u4e0a\uff0c\u56e0\u6b64 Workspace Admin \u4e0d\u4f1a\u88ab\u6620\u5c04\u4e3a Cluster admin\u3002

                                                              \u672c\u8282\u5c55\u793a 3 \u4e2a\u4e0e\u8d44\u6e90\u914d\u989d\u6709\u5173\u7684\u573a\u666f\u3002

                                                              "},{"location":"end-user/ghippo/workspace/quota.html#_2","title":"\u521b\u5efa\u547d\u540d\u7a7a\u95f4","text":"

                                                              \u521b\u5efa\u547d\u540d\u7a7a\u95f4\u65f6\u4f1a\u6d89\u53ca\u5230\u8d44\u6e90\u914d\u989d\u3002

                                                              1. \u5728\u5de5\u4f5c\u7a7a\u95f4 ws01 \u65b0\u589e\u4e00\u4e2a\u5171\u4eab\u96c6\u7fa4\u3002

                                                              2. \u5728\u5e94\u7528\u5de5\u4f5c\u53f0\u9009\u62e9\u5de5\u4f5c\u7a7a\u95f4 ws01 \u548c\u5171\u4eab\u96c6\u7fa4\uff0c\u521b\u5efa\u547d\u540d\u7a7a\u95f4 ns01\u3002

                                                                • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u4e2d\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4e0d\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\u3002
                                                                • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u4e2d\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff08\u4f8b\u5982 CPU \u8bf7\u6c42 = 100 core\uff09\uff0c\u5219\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u65f6 CPU \u8bf7\u6c42 \u2264 100 core \u3002
                                                              "},{"location":"end-user/ghippo/workspace/quota.html#_3","title":"\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4","text":"

                                                              \u524d\u63d0\uff1a\u5de5\u4f5c\u7a7a\u95f4 ws01 \u5df2\u65b0\u589e\u5171\u4eab\u96c6\u7fa4\uff0c\u64cd\u4f5c\u8005\u4e3a Workspace Admin + Kpanda Owner \u6216 Admin \u89d2\u8272\u3002

                                                              \u4ee5\u4e0b\u4e24\u79cd\u7ed1\u5b9a\u65b9\u5f0f\u7684\u6548\u679c\u76f8\u540c\u3002

                                                              • \u5728\u5bb9\u5668\u7ba1\u7406\u4e2d\u5c06\u521b\u5efa\u7684\u547d\u540d\u7a7a\u95f4 ns01 \u7ed1\u5b9a\u5230 ws01

                                                                • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u65e0\u8bba\u662f\u5426\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5747\u53ef\u6210\u529f\u7ed1\u5b9a\u3002
                                                                • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d CPU \u8bf7\u6c42 = 100 core \uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u5fc5\u987b\u6ee1\u8db3 CPU \u8bf7\u6c42 \u2264 100 core \u624d\u80fd\u7ed1\u5b9a\u6210\u529f\u3002
                                                              • \u5728\u5168\u5c40\u7ba1\u7406\u4e2d\uff0c\u5c06\u547d\u540d\u7a7a\u95f4 ns01 \u7ed1\u5b9a\u5230 ws01

                                                                • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u65e0\u8bba\u662f\u5426\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5747\u53ef\u6210\u529f\u7ed1\u5b9a\u3002
                                                                • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d CPU \u8bf7\u6c42 = 100 core \uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u5fc5\u987b\u6ee1\u8db3 CPU \u8bf7\u6c42 \u2264 100 core \u624d\u80fd\u7ed1\u5b9a\u6210\u529f\u3002
                                                              "},{"location":"end-user/ghippo/workspace/quota.html#_4","title":"\u4ece\u5de5\u4f5c\u7a7a\u95f4\u89e3\u7ed1\u547d\u540d\u7a7a\u95f4","text":"

                                                              \u4ee5\u4e0b\u4e24\u79cd\u89e3\u7ed1\u65b9\u5f0f\u7684\u6548\u679c\u76f8\u540c\u3002

                                                              • \u5728\u5bb9\u5668\u7ba1\u7406\u4e2d\u5c06\u547d\u540d\u7a7a\u95f4 ns01 \u4ece\u5de5\u4f5c\u7a7a\u95f4 ws01 \u89e3\u7ed1

                                                                • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u4e2d\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u65e0\u8bba\u662f\u5426\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u89e3\u7ed1\u540e\u5747\u4e0d\u4f1a\u5bf9\u8d44\u6e90\u914d\u989d\u4ea7\u751f\u5f71\u54cd\u3002
                                                                • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d CPU \u8bf7\u6c42 = 100 core \uff0c\u547d\u540d\u7a7a\u95f4 ns01 \u4e5f\u8bbe\u7f6e\u4e86\u8d44\u6e90\u914d\u989d\uff0c\u5219\u89e3\u7ed1\u540e\u5c06\u91ca\u653e\u76f8\u5e94\u7684\u8d44\u6e90\u989d\u5ea6\u3002
                                                              • \u5728\u5168\u5c40\u7ba1\u7406\u4e2d\u5c06\u547d\u540d\u7a7a\u95f4 ns01 \u4ece\u5de5\u4f5c\u7a7a\u95f4 ws01 \u89e3\u7ed1

                                                                • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u672a\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u5219\u547d\u540d\u7a7a\u95f4 ns01 \u65e0\u8bba\u662f\u5426\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d\uff0c\u89e3\u7ed1\u540e\u5747\u4e0d\u4f1a\u5bf9\u8d44\u6e90\u914d\u989d\u4ea7\u751f\u5f71\u54cd\u3002
                                                                • \u82e5\u5728\u5171\u4eab\u96c6\u7fa4\u5df2\u8bbe\u7f6e\u8d44\u6e90\u914d\u989d CPU \u8bf7\u6c42 = 100 core \uff0c\u547d\u540d\u7a7a\u95f4 ns01 \u4e5f\u8bbe\u7f6e\u4e86\u8d44\u6e90\u914d\u989d\uff0c\u5219\u89e3\u7ed1\u540e\u5c06\u91ca\u653e\u76f8\u5e94\u7684\u8d44\u6e90\u989d\u5ea6\u3002
                                                              "},{"location":"end-user/ghippo/workspace/res-gp-and-shared-res.html","title":"\u8d44\u6e90\u7ec4\u4e0e\u5171\u4eab\u8d44\u6e90\u7684\u533a\u522b","text":"

                                                              \u8d44\u6e90\u7ec4\u4e0e\u5171\u4eab\u8d44\u6e90\u5747\u652f\u6301\u7ed1\u5b9a\u96c6\u7fa4\uff0c\u4f46\u4f7f\u7528\u4e0a\u5b58\u5728\u5f88\u5927\u533a\u522b\u3002

                                                              "},{"location":"end-user/ghippo/workspace/res-gp-and-shared-res.html#_2","title":"\u4f7f\u7528\u573a\u666f\u533a\u522b","text":"
                                                              • \u8d44\u6e90\u7ec4\u7ed1\u5b9a\u96c6\u7fa4\uff1a\u8d44\u6e90\u7ec4\u7ed1\u5b9a\u96c6\u7fa4\u901a\u5e38\u88ab\u7528\u6765\u6279\u91cf\u6388\u6743\u3002\u8d44\u6e90\u7ec4\u7ed1\u5b9a\u96c6\u7fa4\u540e\uff0c \u5de5\u4f5c\u7a7a\u95f4\u7ba1\u7406\u5458\u5c06\u88ab\u6620\u5c04\u4e3a\u96c6\u7fa4\u7ba1\u7406\u5458\uff0c\u80fd\u591f\u7ba1\u7406\u5e76\u4f7f\u7528\u96c6\u7fa4\u8d44\u6e90\u3002
                                                              • \u5171\u4eab\u8d44\u6e90\u7ed1\u5b9a\u96c6\u7fa4\uff1a\u8d44\u6e90\u5171\u4eab\u7ed1\u5b9a\u96c6\u7fa4\u901a\u5e38\u88ab\u7528\u6765\u505a\u8d44\u6e90\u9650\u989d\u3002 \u5178\u578b\u7684\u573a\u666f\u662f\u5e73\u53f0\u7ba1\u7406\u5458\u5c06\u96c6\u7fa4\u5206\u914d\u7ed9\u4e00\u7ea7\u4f9b\u5e94\u5546\u540e\uff0c\u518d\u7531\u4e00\u7ea7\u4f9b\u5e94\u5546\u5206\u914d\u7ed9\u4e8c\u7ea7\u4f9b\u5e94\u5546\u5e76\u5bf9\u4e8c\u7ea7\u4f9b\u5e94\u5546\u8fdb\u884c\u8d44\u6e90\u9650\u989d\u3002

                                                              \u8bf4\u660e\uff1a\u5728\u8be5\u573a\u666f\u4e2d\uff0c\u9700\u8981\u5e73\u53f0\u7ba1\u7406\u5458\u5bf9\u4e8c\u7ea7\u4f9b\u5e94\u5546\u8fdb\u884c\u8d44\u6e90\u9650\u5236\uff0c\u6682\u65f6\u8fd8\u4e0d\u652f\u6301\u4e00\u7ea7\u4f9b\u5e94\u5546\u9650\u5236\u4e8c\u7ea7\u4f9b\u5e94\u5546\u7684\u96c6\u7fa4\u989d\u5ea6\u3002

                                                              "},{"location":"end-user/ghippo/workspace/res-gp-and-shared-res.html#_3","title":"\u96c6\u7fa4\u989d\u5ea6\u7684\u4f7f\u7528\u533a\u522b","text":"
                                                              • \u8d44\u6e90\u7ec4\u7ed1\u5b9a\u96c6\u7fa4\uff1a\u5de5\u4f5c\u7a7a\u95f4\u7684\u7ba1\u7406\u5458\u5c06\u88ab\u6620\u5c04\u4e3a\u8be5\u96c6\u7fa4\u7684\u7ba1\u7406\u5458\uff0c\u76f8\u5f53\u4e8e\u5728\u5bb9\u5668\u7ba1\u7406-\u6743\u9650\u7ba1\u7406\u4e2d\u88ab\u6388\u4e88 Cluster Admin \u89d2\u8272\uff0c \u80fd\u591f\u65e0\u9650\u5236\u652f\u914d\u8be5\u96c6\u7fa4\u8d44\u6e90\uff0c\u7ba1\u7406\u8282\u70b9\u7b49\u91cd\u8981\u5185\u5bb9\uff0c\u4e14\u8d44\u6e90\u7ec4\u4e0d\u80fd\u591f\u88ab\u8d44\u6e90\u9650\u989d\u3002
                                                              • \u5171\u4eab\u8d44\u6e90\u7ed1\u5b9a\u8d44\u6e90\uff1a\u5de5\u4f5c\u7a7a\u95f4\u7ba1\u7406\u5458\u4ec5\u80fd\u591f\u4f7f\u7528\u96c6\u7fa4\u4e2d\u7684\u989d\u5ea6\u5728\u5e94\u7528\u5de5\u4f5c\u53f0\u521b\u5efa\u547d\u540d\u7a7a\u95f4\uff0c\u4e0d\u5177\u5907\u96c6\u7fa4\u7684\u7ba1\u7406\u6743\u9650\u3002 \u82e5\u5bf9\u8be5\u5de5\u4f5c\u7a7a\u95f4\u9650\u5236\u989d\u5ea6\uff0c\u5219\u5de5\u4f5c\u7a7a\u95f4\u7ba1\u7406\u4ec5\u80fd\u591f\u5728\u989d\u5ea6\u8303\u56f4\u5185\u521b\u5efa\u5e76\u4f7f\u7528\u547d\u540d\u7a7a\u95f4\u3002
                                                              "},{"location":"end-user/ghippo/workspace/res-gp-and-shared-res.html#_4","title":"\u8d44\u6e90\u7c7b\u578b\u7684\u533a\u522b","text":"
                                                              • \u8d44\u6e90\u7ec4\uff1a\u80fd\u591f\u7ed1\u5b9a\u96c6\u7fa4\u3001\u96c6\u7fa4-\u547d\u540d\u7a7a\u95f4\u3001\u591a\u4e91\u3001\u591a\u4e91-\u547d\u540d\u7a7a\u95f4\u3001\u7f51\u683c\u3001\u7f51\u683c-\u547d\u540d\u7a7a\u95f4
                                                              • \u5171\u4eab\u8d44\u6e90\uff1a\u4ec5\u80fd\u591f\u7ed1\u5b9a\u96c6\u7fa4
                                                              "},{"location":"end-user/ghippo/workspace/res-gp-and-shared-res.html#_5","title":"\u8d44\u6e90\u7ec4\u4e0e\u5171\u4eab\u8d44\u6e90\u7684\u76f8\u540c\u70b9","text":"

                                                              \u5728\u8d44\u6e90\u7ec4/\u5171\u4eab\u8d44\u6e90\u7ed1\u5b9a\u96c6\u7fa4\u540e\u90fd\u53ef\u4ee5\u524d\u5f80\u5e94\u7528\u5de5\u4f5c\u53f0\u521b\u5efa\u547d\u540d\u7a7a\u95f4\uff0c\u521b\u5efa\u540e\u547d\u540d\u7a7a\u95f4\u5c06\u81ea\u52a8\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4\u3002

                                                              "},{"location":"end-user/ghippo/workspace/workspace.html","title":"\u521b\u5efa/\u5220\u9664\u5de5\u4f5c\u7a7a\u95f4","text":"

                                                              \u5de5\u4f5c\u7a7a\u95f4\u662f\u4e00\u79cd\u8d44\u6e90\u8303\u7574\uff0c\u4ee3\u8868\u4e00\u79cd\u8d44\u6e90\u5c42\u7ea7\u5173\u7cfb\u3002 \u5de5\u4f5c\u7a7a\u95f4\u53ef\u4ee5\u5305\u542b\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u6ce8\u518c\u4e2d\u5fc3\u7b49\u8d44\u6e90\u3002 \u901a\u5e38\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u5bf9\u5e94\u4e00\u4e2a\u9879\u76ee\uff0c\u53ef\u4ee5\u4e3a\u6bcf\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u4e0d\u540c\u7684\u8d44\u6e90\uff0c\u6307\u6d3e\u4e0d\u540c\u7684\u7528\u6237\u548c\u7528\u6237\u7ec4\u3002

                                                              \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u521b\u5efa\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u3002

                                                              1. \u4f7f\u7528 admin/folder admin \u89d2\u8272\u7684\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u5e95\u90e8\u7684 \u5168\u5c40\u7ba1\u7406 -> \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u3002

                                                              2. \u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4 \u6309\u94ae\u3002

                                                              3. \u586b\u5199\u5de5\u4f5c\u7a7a\u95f4\u540d\u79f0\u3001\u6240\u5c5e\u6587\u4ef6\u5939\u7b49\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u521b\u5efa\u5de5\u4f5c\u7a7a\u95f4\u3002

                                                              Tip

                                                              \u521b\u5efa\u6210\u529f\u540e\u5de5\u4f5c\u7a7a\u95f4\u540d\u79f0\u5c06\u663e\u793a\u5728\u5de6\u4fa7\u7684\u6811\u72b6\u7ed3\u6784\u4e2d\uff0c\u4ee5\u4e0d\u540c\u7684\u56fe\u6807\u8868\u793a\u6587\u4ef6\u5939\u548c\u5de5\u4f5c\u7a7a\u95f4\u3002

                                                              Note

                                                              \u9009\u4e2d\u67d0\u4e00\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u6216\u6587\u4ef6\u5939\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 ... \u53ef\u4ee5\u8fdb\u884c\u7f16\u8f91\u6216\u5220\u9664\u3002

                                                              • \u5f53\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u8d44\u6e90\u7ec4\u3001\u5171\u4eab\u8d44\u6e90\u4e2d\u5b58\u5728\u8d44\u6e90\u65f6\uff0c\u8be5\u5de5\u4f5c\u7a7a\u95f4\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u6240\u6709\u8d44\u6e90\u89e3\u7ed1\u540e\u518d\u5220\u9664\u3002
                                                              • \u5f53\u5fae\u670d\u52a1\u5f15\u64ce\u6a21\u5757\u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u5b58\u5728\u63a5\u5165\u6ce8\u518c\u4e2d\u5fc3\u8d44\u6e90\u65f6\uff0c\u8be5\u5de5\u4f5c\u7a7a\u95f4\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u6240\u6709\u63a5\u5165\u6ce8\u518c\u4e2d\u5fc3\u79fb\u9664\u540e\u518d\u5220\u9664\u5de5\u4f5c\u7a7a\u95f4\u3002
                                                              • \u5f53\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u4e0b\u5b58\u5728\u955c\u50cf\u7a7a\u95f4\u6216\u96c6\u6210\u4ed3\u5e93\u65f6\uff0c\u8be5\u5de5\u4f5c\u7a7a\u95f4\u65e0\u6cd5\u88ab\u5220\u9664\uff0c\u9700\u8981\u5c06\u955c\u50cf\u7a7a\u95f4\u89e3\u7ed1\uff0c\u5c06\u4ed3\u5e93\u96c6\u6210\u5220\u9664\u540e\u518d\u5220\u9664\u5de5\u4f5c\u7a7a\u95f4\u3002
                                                              "},{"location":"end-user/ghippo/workspace/ws-folder.html","title":"\u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7","text":"

                                                              \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u662f\u4e00\u4e2a\u5177\u6709\u5c42\u7ea7\u7684\u8d44\u6e90\u9694\u79bb\u548c\u8d44\u6e90\u5206\u7ec4\u7279\u6027\uff0c\u4e3b\u8981\u89e3\u51b3\u8d44\u6e90\u7edf\u4e00\u6388\u6743\u3001\u8d44\u6e90\u5206\u7ec4\u4ee5\u53ca\u8d44\u6e90\u9650\u989d\u95ee\u9898\u3002

                                                              \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7 \u6709\u4e24\u4e2a\u6982\u5ff5\uff1a\u5de5\u4f5c\u7a7a\u95f4\u548c\u6587\u4ef6\u5939\u3002

                                                              "},{"location":"end-user/ghippo/workspace/ws-folder.html#_2","title":"\u5de5\u4f5c\u7a7a\u95f4","text":"

                                                              \u5de5\u4f5c\u7a7a\u95f4\u53ef\u901a\u8fc7 \u6388\u6743 \u3001 \u8d44\u6e90\u7ec4 \u548c \u5171\u4eab\u8d44\u6e90 \u6765\u7ba1\u7406\u8d44\u6e90\uff0c\u4f7f\u7528\u6237\uff08\u7528\u6237\u7ec4\uff09\u4e4b\u95f4\u80fd\u591f\u5171\u4eab\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u8d44\u6e90\u3002

                                                              • \u8d44\u6e90

                                                                \u8d44\u6e90\u5904\u4e8e\u8d44\u6e90\u7ba1\u7406\u6a21\u5757\u5c42\u7ea7\u7ed3\u6784\u7684\u6700\u4f4e\u5c42\u7ea7\uff0c\u8d44\u6e90\u5305\u62ec Cluster\u3001Namespace\u3001Pipeline\u3001\u7f51\u5173\u7b49\u3002 \u6240\u6709\u8fd9\u4e9b\u8d44\u6e90\u7684\u7236\u7ea7\u53ea\u80fd\u662f\u5de5\u4f5c\u7a7a\u95f4\uff0c\u800c\u5de5\u4f5c\u7a7a\u95f4\u4f5c\u4e3a\u8d44\u6e90\u5bb9\u5668\u662f\u4e00\u79cd\u8d44\u6e90\u5206\u7ec4\u5355\u4f4d\u3002

                                                              • \u5de5\u4f5c\u7a7a\u95f4

                                                                \u5de5\u4f5c\u7a7a\u95f4\u901a\u5e38\u4ee3\u6307\u4e00\u4e2a\u9879\u76ee\u6216\u73af\u5883\uff0c\u6bcf\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u7684\u8d44\u6e90\u76f8\u5bf9\u4e8e\u5176\u4ed6\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u8d44\u6e90\u65f6\u903b\u8f91\u9694\u79bb\u7684\u3002 \u60a8\u53ef\u4ee5\u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u6388\u6743\uff0c\u6388\u4e88\u7528\u6237\uff08\u7528\u6237\u7ec4\uff09\u540c\u4e00\u7ec4\u8d44\u6e90\u7684\u4e0d\u540c\u8bbf\u95ee\u6743\u9650\u3002

                                                                \u4ece\u5c42\u6b21\u7ed3\u6784\u7684\u5e95\u5c42\u7b97\u8d77\uff0c\u5de5\u4f5c\u7a7a\u95f4\u4f4d\u4e8e\u7b2c\u4e00\u5c42\uff0c\u4e14\u5305\u542b\u8d44\u6e90\u3002 \u9664\u5171\u4eab\u8d44\u6e90\u5916\uff0c\u6240\u6709\u8d44\u6e90\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u7236\u9879\u3002\u6240\u6709\u5de5\u4f5c\u7a7a\u95f4\u4e5f\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u7236\u7ea7\u6587\u4ef6\u5939\u3002

                                                                \u8d44\u6e90\u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u8fdb\u884c\u5206\u7ec4\uff0c\u800c\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u5b58\u5728\u4e24\u79cd\u5206\u7ec4\u6a21\u5f0f\uff0c\u5206\u522b\u662f \u8d44\u6e90\u7ec4 \u548c \u5171\u4eab\u8d44\u6e90 \u3002

                                                              • \u8d44\u6e90\u7ec4

                                                                \u4e00\u4e2a\u8d44\u6e90\u53ea\u80fd\u52a0\u5165\u4e00\u4e2a\u8d44\u6e90\u7ec4\uff0c\u8d44\u6e90\u7ec4\u4e0e\u5de5\u4f5c\u7a7a\u95f4\u4e00\u4e00\u5bf9\u5e94\u3002 \u8d44\u6e90\u88ab\u52a0\u5165\u5230\u8d44\u6e90\u7ec4\u540e\uff0cWorkspace Admin \u5c06\u83b7\u5f97\u8d44\u6e90\u7684\u7ba1\u7406\u6743\u9650\uff0c\u76f8\u5f53\u4e8e\u8be5\u8d44\u6e90\u7684\u6240\u6709\u8005\u3002

                                                              • \u5171\u4eab\u8d44\u6e90

                                                                \u800c\u5bf9\u4e8e\u5171\u4eab\u8d44\u6e90\u6765\u8bf4\uff0c\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\u53ef\u4ee5\u5171\u4eab\u540c\u4e00\u4e2a\u6216\u8005\u591a\u4e2a\u8d44\u6e90\u3002 \u8d44\u6e90\u7684\u6240\u6709\u8005\uff0c\u53ef\u4ee5\u9009\u62e9\u5c06\u81ea\u5df1\u62e5\u6709\u7684\u8d44\u6e90\u5171\u4eab\u7ed9\u5de5\u4f5c\u7a7a\u95f4\u4f7f\u7528\uff0c\u4e00\u822c\u5171\u4eab\u65f6\u8d44\u6e90\u6240\u6709\u8005\u4f1a\u9650\u5236\u88ab\u5171\u4eab\u5de5\u4f5c\u7a7a\u95f4\u80fd\u591f\u4f7f\u7528\u7684\u8d44\u6e90\u989d\u5ea6\u3002 \u8d44\u6e90\u88ab\u5171\u4eab\u540e\uff0cWorkspace Admin \u4ec5\u5177\u6709\u8d44\u6e90\u9650\u989d\u4e0b\u7684\u8d44\u6e90\u4f7f\u7528\u6743\u9650\uff0c\u65e0\u6cd5\u7ba1\u7406\u8d44\u6e90\u6216\u8005\u8c03\u6574\u5de5\u4f5c\u7a7a\u95f4\u80fd\u591f\u4f7f\u7528\u7684\u8d44\u6e90\u91cf\u3002

                                                                \u540c\u65f6\u5171\u4eab\u8d44\u6e90\u5bf9\u4e8e\u8d44\u6e90\u672c\u8eab\u4e5f\u5177\u6709\u4e00\u5b9a\u7684\u8981\u6c42\uff0c\u53ea\u6709 Cluster\uff08\u96c6\u7fa4\uff09\u8d44\u6e90\u53ef\u4ee5\u88ab\u5171\u4eab\u3002 Cluster Admin \u80fd\u591f\u5c06 Cluster \u8d44\u6e90\u5206\u4eab\u7ed9\u4e0d\u540c\u7684\u5de5\u4f5c\u7a7a\u95f4\u4f7f\u7528\uff0c\u5e76\u4e14\u9650\u5236\u5de5\u4f5c\u7a7a\u95f4\u5728\u6b64 Cluster \u4e0a\u7684\u4f7f\u7528\u989d\u5ea6\u3002

                                                                Workspace Admin \u5728\u8d44\u6e90\u9650\u989d\u5185\u80fd\u591f\u521b\u5efa\u591a\u4e2a Namespace\uff0c\u4f46\u662f Namespace \u7684\u8d44\u6e90\u989d\u5ea6\u603b\u548c\u4e0d\u80fd\u8d85\u8fc7 Cluster \u5728\u8be5\u5de5\u4f5c\u7a7a\u95f4\u7684\u8d44\u6e90\u9650\u989d\u3002 \u5bf9\u4e8e Kubernetes \u8d44\u6e90\uff0c\u5f53\u524d\u80fd\u591f\u5206\u4eab\u7684\u8d44\u6e90\u7c7b\u578b\u4ec5\u6709 Cluster\u3002

                                                              "},{"location":"end-user/ghippo/workspace/ws-folder.html#_3","title":"\u6587\u4ef6\u5939","text":"

                                                              \u6587\u4ef6\u5939\u53ef\u7528\u4e8e\u6784\u5efa\u4f01\u4e1a\u4e1a\u52a1\u5c42\u7ea7\u5173\u7cfb\u3002

                                                              • \u6587\u4ef6\u5939\u662f\u5728\u5de5\u4f5c\u7a7a\u95f4\u57fa\u7840\u4e4b\u4e0a\u7684\u8fdb\u4e00\u6b65\u5206\u7ec4\u673a\u5236\uff0c\u5177\u6709\u5c42\u7ea7\u7ed3\u6784\u3002 \u4e00\u4e2a\u6587\u4ef6\u5939\u53ef\u4ee5\u5305\u542b\u5de5\u4f5c\u7a7a\u95f4\u3001\u5176\u4ed6\u6587\u4ef6\u5939\u6216\u4e24\u8005\u7684\u7ec4\u5408\uff0c\u80fd\u591f\u5f62\u6210\u6811\u72b6\u7684\u7ec4\u7ec7\u5173\u7cfb\u3002

                                                              • \u501f\u52a9\u6587\u4ef6\u5939\u60a8\u53ef\u4ee5\u6620\u5c04\u4f01\u4e1a\u4e1a\u52a1\u5c42\u7ea7\u5173\u7cfb\uff0c\u6309\u7167\u90e8\u95e8\u5bf9\u5de5\u4f5c\u7a7a\u95f4\u8fdb\u884c\u5206\u7ec4\u3002 \u6587\u4ef6\u5939\u4e0d\u76f4\u63a5\u4e0e\u8d44\u6e90\u6302\u94a9\uff0c\u800c\u662f\u901a\u8fc7\u5de5\u4f5c\u7a7a\u95f4\u95f4\u63a5\u5b9e\u73b0\u8d44\u6e90\u5206\u7ec4\u3002

                                                              • \u6587\u4ef6\u5939\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u7236\u7ea7\u6587\u4ef6\u5939\uff0c\u800c\u6839\u6587\u4ef6\u5939\u662f\u5c42\u6b21\u7ed3\u6784\u7684\u6700\u9ad8\u5c42\u7ea7\u3002 \u6839\u6587\u4ef6\u5939\u6ca1\u6709\u7236\u7ea7\uff0c\u6587\u4ef6\u5939\u548c\u5de5\u4f5c\u7a7a\u95f4\u5747\u6302\u9760\u5230\u6839\u6587\u4ef6\u5939\u4e0b\u3002

                                                              \u53e6\u5916\uff0c\u7528\u6237\uff08\u7528\u6237\u7ec4\uff09\u5728\u6587\u4ef6\u5939\u4e2d\u80fd\u591f\u901a\u8fc7\u5c42\u7ea7\u7ed3\u6784\u7ee7\u627f\u6765\u81ea\u7236\u9879\u7684\u6743\u9650\u3002 \u7528\u6237\u5728\u5c42\u6b21\u7ed3\u6784\u4e2d\u7684\u6743\u9650\u6765\u81ea\u5f53\u524d\u5c42\u7ea7\u7684\u6743\u9650\u4ee5\u53ca\u7ee7\u627f\u5176\u7236\u9879\u6743\u9650\u7684\u7ec4\u5408\u7ed3\u679c\uff0c\u6743\u9650\u4e4b\u95f4\u662f\u52a0\u5408\u5173\u7cfb\u4e0d\u5b58\u5728\u4e92\u65a5\u3002

                                                              "},{"location":"end-user/ghippo/workspace/ws-permission.html","title":"\u5de5\u4f5c\u7a7a\u95f4\u6743\u9650\u8bf4\u660e","text":"

                                                              \u5de5\u4f5c\u7a7a\u95f4\u5177\u6709\u6743\u9650\u6620\u5c04\u548c\u8d44\u6e90\u9694\u79bb\u80fd\u529b\uff0c\u80fd\u591f\u5c06\u7528\u6237/\u7528\u6237\u7ec4\u5728\u5de5\u4f5c\u7a7a\u95f4\u7684\u6743\u9650\u6620\u5c04\u5230\u5176\u4e0b\u7684\u8d44\u6e90\u4e0a\u3002 \u82e5\u7528\u6237/\u7528\u6237\u7ec4\u5728\u5de5\u4f5c\u7a7a\u95f4\u662f Workspace Admin \u89d2\u8272\uff0c\u540c\u65f6\u5de5\u4f5c\u7a7a\u95f4-\u8d44\u6e90\u7ec4\u4e2d\u7ed1\u5b9a\u4e86\u8d44\u6e90 Namespace\uff0c\u5219\u6620\u5c04\u540e\u8be5\u7528\u6237/\u7528\u6237\u7ec4\u5c06\u6210\u4e3a Namespace Admin\u3002

                                                              Note

                                                              \u5de5\u4f5c\u7a7a\u95f4\u7684\u6743\u9650\u6620\u5c04\u80fd\u529b\u4e0d\u4f1a\u4f5c\u7528\u5230\u5171\u4eab\u8d44\u6e90\u4e0a\uff0c\u56e0\u4e3a\u5171\u4eab\u662f\u5c06\u96c6\u7fa4\u7684\u4f7f\u7528\u6743\u9650\u5171\u4eab\u7ed9\u591a\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff0c\u800c\u4e0d\u662f\u5c06\u7ba1\u7406\u6743\u9650\u53d7\u8ba9\u7ed9\u5de5\u4f5c\u7a7a\u95f4\uff0c\u56e0\u6b64\u4e0d\u4f1a\u5b9e\u73b0\u6743\u9650\u7ee7\u627f\u548c\u89d2\u8272\u6620\u5c04\u3002

                                                              "},{"location":"end-user/ghippo/workspace/ws-permission.html#_2","title":"\u5e94\u7528\u573a\u666f","text":"

                                                              \u901a\u8fc7\u5c06\u8d44\u6e90\u7ed1\u5b9a\u5230\u4e0d\u540c\u7684\u5de5\u4f5c\u7a7a\u95f4\u80fd\u591f\u5b9e\u73b0\u8d44\u6e90\u9694\u79bb\u3002 \u56e0\u6b64\u501f\u52a9\u6743\u9650\u6620\u5c04\u3001\u8d44\u6e90\u9694\u79bb\u548c\u5171\u4eab\u8d44\u6e90\u80fd\u529b\u80fd\u591f\u5c06\u8d44\u6e90\u7075\u6d3b\u5206\u914d\u7ed9\u5404\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff08\u79df\u6237\uff09\u3002

                                                              \u901a\u5e38\u9002\u7528\u4e8e\u4ee5\u4e0b\u4e24\u4e2a\u573a\u666f\uff1a

                                                              • \u96c6\u7fa4\u4e00\u5bf9\u4e00

                                                                \u666e\u901a\u96c6\u7fa4 \u90e8\u95e8/\u79df\u6237\uff08\u5de5\u4f5c\u7a7a\u95f4\uff09 \u7528\u9014 \u96c6\u7fa4 01 A \u7ba1\u7406\u548c\u4f7f\u7528 \u96c6\u7fa4 02 B \u7ba1\u7406\u548c\u4f7f\u7528
                                                              • \u96c6\u7fa4\u4e00\u5bf9\u591a

                                                                \u96c6\u7fa4 \u90e8\u95e8/\u79df\u6237\uff08\u5de5\u4f5c\u7a7a\u95f4\uff09 \u8d44\u6e90\u9650\u989d \u96c6\u7fa4 01 A 100 \u6838 CPU B 50 \u6838 CPU
                                                              "},{"location":"end-user/ghippo/workspace/ws-permission.html#_3","title":"\u6743\u9650\u8bf4\u660e","text":"\u64cd\u4f5c\u5bf9\u8c61 \u64cd\u4f5c Workspace Admin Workspace Editor Workspace Viewer \u672c\u8eab \u67e5\u770b \u2713 \u2713 \u2713 - \u6388\u6743 \u2713 \u2717 \u2717 - \u4fee\u6539\u522b\u540d \u2713 \u2713 \u2717 \u8d44\u6e90\u7ec4 \u67e5\u770b \u2713 \u2713 \u2713 - \u8d44\u6e90\u7ed1\u5b9a \u2713 \u2717 \u2717 - \u89e3\u9664\u7ed1\u5b9a \u2713 \u2717 \u2717 \u5171\u4eab\u8d44\u6e90 \u67e5\u770b \u2713 \u2713 \u2713 - \u65b0\u589e\u5171\u4eab \u2713 \u2717 \u2717 - \u89e3\u9664\u5171\u4eab \u2713 \u2717 \u2717 - \u8d44\u6e90\u9650\u989d \u2713 \u2717 \u2717 - \u4f7f\u7528\u5171\u4eab\u8d44\u6e90 1 \u2713 \u2717 \u2717
                                                              1. \u6388\u6743\u7528\u6237\u53ef\u524d\u5f80\u5e94\u7528\u5de5\u4f5c\u53f0\u3001\u5fae\u670d\u52a1\u5f15\u64ce\u3001\u4e2d\u95f4\u4ef6\u3001\u591a\u4e91\u7f16\u6392\u3001\u670d\u52a1\u7f51\u683c\u7b49\u6a21\u5757\u4f7f\u7528\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u7684\u8d44\u6e90\u3002 \u6709\u5173 Workspace Admin\u3001Workspace Editor\u3001Workspace Viewer \u89d2\u8272\u5728\u5404\u4ea7\u54c1\u6a21\u5757\u7684\u64cd\u4f5c\u8303\u56f4\uff0c\u8bf7\u67e5\u9605\u5404\u6a21\u5757\u7684\u6743\u9650\u8bf4\u660e\uff1a

                                                                • \u5e94\u7528\u5de5\u4f5c\u53f0\u6743\u9650\u8bf4\u660e
                                                                • \u670d\u52a1\u7f51\u683c\u6743\u9650\u8bf4\u660e
                                                                • \u4e2d\u95f4\u4ef6\u6743\u9650\u8bf4\u660e
                                                                • \u5fae\u670d\u52a1\u5f15\u64ce\u6743\u9650\u8bf4\u660e
                                                                • \u5bb9\u5668\u7ba1\u7406\u6743\u9650\u8bf4\u660e

                                                                \u21a9

                                                              "},{"location":"end-user/ghippo/workspace/wsbind-permission.html","title":"\u8d44\u6e90\u7ed1\u5b9a\u6743\u9650\u8bf4\u660e","text":"

                                                              \u5047\u5982\u7528\u6237\u5c0f\u660e\uff08\u201c\u5c0f\u660e\u201d\u4ee3\u8868\u4efb\u4f55\u6709\u8d44\u6e90\u7ed1\u5b9a\u9700\u6c42\u7684\u7528\u6237\uff09\u5df2\u7ecf\u5177\u5907\u4e86 Workspace Admin \u89d2\u8272\u6216\u5df2\u901a\u8fc7\u81ea\u5b9a\u4e49\u89d2\u8272\u6388\u6743\uff0c \u540c\u65f6\u81ea\u5b9a\u4e49\u89d2\u8272\u4e2d\u5305\u542b\u5de5\u4f5c\u7a7a\u95f4\u7684\u201c\u8d44\u6e90\u7ed1\u5b9a\u201d\u6743\u9650\uff0c\u5e0c\u671b\u5c06\u67d0\u4e2a\u96c6\u7fa4\u6216\u8005\u67d0\u4e2a\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u5176\u6240\u5728\u7684\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u3002

                                                              \u8981\u5c06\u96c6\u7fa4/\u547d\u540d\u7a7a\u95f4\u8d44\u6e90\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4\uff0c\u4e0d\u4ec5\u9700\u8981\u8be5\u5de5\u4f5c\u7a7a\u95f4\u7684\u201c\u8d44\u6e90\u7ed1\u5b9a\u201d\u6743\u9650\uff0c\u8fd8\u9700\u8981 Cluster Admin \u7684\u8d44\u6e90\u6743\u9650\u3002

                                                              "},{"location":"end-user/ghippo/workspace/wsbind-permission.html#_2","title":"\u7ed9\u5c0f\u660e\u6388\u6743","text":"
                                                              1. \u4f7f\u7528\u5e73\u53f0 Admin \u89d2\u8272\uff0c \u5728 \u5de5\u4f5c\u7a7a\u95f4 -> \u6388\u6743 \u9875\u9762\u7ed9\u5c0f\u660e\u6388\u4e88 Workspace Admin \u89d2\u8272\u3002

                                                              2. \u7136\u540e\u5728 \u5bb9\u5668\u7ba1\u7406 -> \u6743\u9650\u7ba1\u7406 \u9875\u9762\uff0c\u901a\u8fc7 \u6dfb\u52a0\u6388\u6743 \u5c06\u5c0f\u660e\u6388\u6743\u4e3a Cluster Admin\u3002

                                                              "},{"location":"end-user/ghippo/workspace/wsbind-permission.html#_3","title":"\u7ed1\u5b9a\u5230\u5de5\u4f5c\u7a7a\u95f4","text":"

                                                              \u4f7f\u7528\u5c0f\u660e\u7684\u8d26\u53f7\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5728 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \u9875\u9762\uff0c\u901a\u8fc7 \u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4 \u529f\u80fd\uff0c \u5c0f\u660e\u53ef\u4ee5\u5c06\u6307\u5b9a\u96c6\u7fa4\u7ed1\u5b9a\u5230\u81ea\u5df1\u7684\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u3002

                                                              Note

                                                              \u5c0f\u660e\u80fd\u4e14\u53ea\u80fd\u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5c06\u96c6\u7fa4\u6216\u8005\u8be5\u96c6\u7fa4\u4e0b\u7684\u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5230\u67d0\u4e2a\u5de5\u4f5c\u7a7a\u95f4\uff0c\u65e0\u6cd5\u5728\u5168\u5c40\u7ba1\u7406\u6a21\u5757\u5b8c\u6210\u6b64\u64cd\u4f5c\u3002

                                                              \u7ed1\u5b9a\u547d\u540d\u7a7a\u95f4\u5230\u5de5\u4f5c\u7a7a\u95f4\u4e5f\u81f3\u5c11\u9700\u8981 Workspace Admin + Cluster Admin \u6743\u9650\u3002

                                                              "},{"location":"end-user/host/createhost.html","title":"\u521b\u5efa\u548c\u542f\u52a8\u4e91\u4e3b\u673a","text":"

                                                              \u7528\u6237\u5b8c\u6210\u6ce8\u518c\uff0c\u4e3a\u5176\u5206\u914d\u4e86\u5de5\u4f5c\u7a7a\u95f4\u3001\u547d\u540d\u7a7a\u95f4\u548c\u8d44\u6e90\u540e\uff0c\u5373\u53ef\u4ee5\u521b\u5efa\u5e76\u542f\u52a8\u4e91\u4e3b\u673a\u3002

                                                              "},{"location":"end-user/host/createhost.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                              • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                                              • \u7528\u6237\u5df2\u6210\u529f\u6ce8\u518c
                                                              • \u7ba1\u7406\u5458\u4e3a\u7528\u6237\u7ed1\u5b9a\u4e86\u5de5\u4f5c\u7a7a\u95f4
                                                              • \u7ba1\u7406\u5458\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u4e86\u8d44\u6e90
                                                              "},{"location":"end-user/host/createhost.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u4ece\u5bfc\u822a\u680f\u8fdb\u5165 \u4e91\u4e3b\u673a
                                                              2. \u70b9\u51fb \u521b\u5efa\u4e91\u4e3b\u673a -> \u901a\u8fc7\u6a21\u677f\u521b\u5efa

                                                              3. \u5b9a\u4e49\u7684\u4e91\u4e3b\u673a\u5404\u9879\u914d\u7f6e\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65

                                                                \u57fa\u672c\u914d\u7f6e\u6a21\u677f\u914d\u7f6e\u5b58\u50a8\u4e0e\u7f51\u7edc

                                                              4. \u914d\u7f6e root \u5bc6\u7801\u6216 ssh \u5bc6\u94a5\u540e\u70b9\u51fb \u786e\u5b9a

                                                              5. \u8fd4\u56de\u4e3b\u673a\u5217\u8868\uff0c\u7b49\u5f85\u72b6\u6001\u53d8\u4e3a \u8fd0\u884c\u4e2d \u4e4b\u540e\uff0c\u53ef\u4ee5\u901a\u8fc7\u53f3\u4fa7\u7684 \u2507 \u542f\u52a8\u4e3b\u673a\u3002

                                                              \u4e0b\u4e00\u6b65\uff1a\u4f7f\u7528\u4e91\u4e3b\u673a

                                                              "},{"location":"end-user/host/usehost.html","title":"\u4f7f\u7528\u4e91\u4e3b\u673a","text":"

                                                              \u521b\u5efa\u5e76\u542f\u52a8\u4e91\u4e3b\u673a\u4e4b\u540e\uff0c\u7528\u6237\u5c31\u53ef\u4ee5\u5f00\u59cb\u4f7f\u7528\u4e91\u4e3b\u673a\u3002

                                                              "},{"location":"end-user/host/usehost.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                              • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                                              • \u7528\u6237\u5df2\u521b\u5efa\u5e76\u542f\u52a8\u4e91\u4e3b\u673a
                                                              "},{"location":"end-user/host/usehost.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u4ee5\u7ba1\u7406\u5458\u8eab\u4efd\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                                                              2. \u5bfc\u822a\u5230 \u5bb9\u5668\u7ba1\u7406 -> \u5bb9\u5668\u7f51\u7edc -> \u670d\u52a1 \uff0c\u70b9\u51fb\u670d\u52a1\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u670d\u52a1\u8be6\u60c5\u9875\uff0c\u5728\u53f3\u4e0a\u89d2\u70b9\u51fb \u66f4\u65b0

                                                              3. \u66f4\u6539\u7aef\u53e3\u8303\u56f4\u4e3a 30900-30999\uff0c\u4f46\u4e0d\u80fd\u51b2\u7a81\u3002

                                                              4. \u4ee5\u7ec8\u7aef\u7528\u6237\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5bfc\u822a\u5230\u5bf9\u5e94\u7684\u670d\u52a1\uff0c\u67e5\u770b\u8bbf\u95ee\u7aef\u53e3\u3002

                                                              5. \u5728\u5916\u7f51\u4f7f\u7528 SSH \u5ba2\u6237\u7aef\u767b\u5f55\u4e91\u4e3b\u673a

                                                              6. \u81f3\u6b64\uff0c\u4f60\u53ef\u4ee5\u5728\u4e91\u4e3b\u673a\u4e0a\u6267\u884c\u5404\u9879\u64cd\u4f5c\u3002

                                                              \u4e0b\u4e00\u6b65\uff1a\u4f7f\u7528 Notebook

                                                              "},{"location":"end-user/insight/alert-center/index.html","title":"\u544a\u8b66\u4e2d\u5fc3","text":"

                                                              \u544a\u8b66\u4e2d\u5fc3\u662f AI \u7b97\u529b\u5e73\u53f0 \u63d0\u4f9b\u7684\u4e00\u4e2a\u91cd\u8981\u529f\u80fd\uff0c\u5b83\u8ba9\u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u754c\u9762\u65b9\u4fbf\u5730\u6309\u7167\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u67e5\u770b\u6240\u6709\u6d3b\u52a8\u548c\u5386\u53f2\u544a\u8b66\uff0c \u5e76\u6839\u636e\u544a\u8b66\u7ea7\u522b\uff08\u7d27\u6025\u3001\u8b66\u544a\u3001\u63d0\u793a\uff09\u6765\u641c\u7d22\u544a\u8b66\u3002

                                                              \u6240\u6709\u544a\u8b66\u90fd\u662f\u57fa\u4e8e\u9884\u8bbe\u7684\u544a\u8b66\u89c4\u5219\u8bbe\u5b9a\u7684\u9608\u503c\u6761\u4ef6\u89e6\u53d1\u7684\u3002\u5728 AI \u7b97\u529b\u5e73\u53f0\u4e2d\uff0c\u5185\u7f6e\u4e86\u4e00\u4e9b\u5168\u5c40\u544a\u8b66\u7b56\u7565\uff0c\u540c\u65f6\u60a8\u4e5f\u53ef\u4ee5\u968f\u65f6\u521b\u5efa\u3001\u5220\u9664\u544a\u8b66\u7b56\u7565\uff0c\u5bf9\u4ee5\u4e0b\u6307\u6807\u8fdb\u884c\u8bbe\u7f6e\uff1a

                                                              • CPU \u4f7f\u7528\u91cf
                                                              • \u5185\u5b58\u4f7f\u7528\u91cf
                                                              • \u78c1\u76d8\u4f7f\u7528\u91cf
                                                              • \u78c1\u76d8\u6bcf\u79d2\u8bfb\u6b21\u6570
                                                              • \u78c1\u76d8\u6bcf\u79d2\u5199\u6b21\u6570
                                                              • \u96c6\u7fa4\u78c1\u76d8\u8bfb\u53d6\u541e\u5410\u91cf
                                                              • \u96c6\u7fa4\u78c1\u76d8\u5199\u5165\u541e\u5410\u91cf
                                                              • \u7f51\u7edc\u53d1\u9001\u901f\u7387
                                                              • \u7f51\u7edc\u63a5\u6536\u901f\u7387

                                                              \u8fd8\u53ef\u4ee5\u4e3a\u544a\u8b66\u89c4\u5219\u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002\u544a\u8b66\u89c4\u5219\u5206\u4e3a\u6d3b\u8dc3\u548c\u8fc7\u671f\u89c4\u5219\uff0c\u652f\u6301\u542f\u7528/\u7981\u7528\u67d0\u4e9b\u89c4\u5219\u6765\u5b9e\u73b0\u544a\u8b66\u9759\u9ed8\u3002

                                                              \u5f53\u8fbe\u5230\u9608\u503c\u6761\u4ef6\u540e\uff0c\u53ef\u4ee5\u914d\u7f6e\u544a\u8b66\u901a\u77e5\u65b9\u5f0f\uff0c\u5305\u62ec\u90ae\u4ef6\u3001\u9489\u9489\u3001\u4f01\u4e1a\u5fae\u4fe1\u3001Webhook \u548c\u77ed\u4fe1\u901a\u77e5\u3002 \u6240\u6709\u901a\u77e5\u7684\u6d88\u606f\u6a21\u677f\u90fd\u53ef\u4ee5\u81ea\u5b9a\u4e49\uff0c\u540c\u65f6\u8fd8\u652f\u6301\u6309\u8bbe\u5b9a\u7684\u95f4\u9694\u65f6\u95f4\u53d1\u9001\u901a\u77e5\u3002

                                                              \u6b64\u5916\uff0c\u544a\u8b66\u4e2d\u5fc3\u8fd8\u652f\u6301\u901a\u8fc7\u963f\u91cc\u4e91\u3001\u817e\u8baf\u4e91\u7b49\u63d0\u4f9b\u7684\u77ed\u4fe1\u670d\u52a1\u5c06\u544a\u8b66\u6d88\u606f\u53d1\u9001\u7ed9\u6307\u5b9a\u7528\u6237\uff0c\u5b9e\u73b0\u591a\u79cd\u65b9\u5f0f\u7684\u544a\u8b66\u901a\u77e5\u3002

                                                              AI \u7b97\u529b\u5e73\u53f0 \u544a\u8b66\u4e2d\u5fc3\u662f\u4e00\u4e2a\u529f\u80fd\u5f3a\u5927\u7684\u544a\u8b66\u7ba1\u7406\u5e73\u53f0\uff0c\u53ef\u5e2e\u52a9\u7528\u6237\u53ca\u65f6\u53d1\u73b0\u548c\u89e3\u51b3\u96c6\u7fa4\u4e2d\u51fa\u73b0\u7684\u95ee\u9898\uff0c \u63d0\u9ad8\u4e1a\u52a1\u7a33\u5b9a\u6027\u548c\u53ef\u7528\u6027\uff0c\u4fbf\u4e8e\u96c6\u7fa4\u5de1\u68c0\u548c\u6545\u969c\u6392\u67e5\u3002

                                                              "},{"location":"end-user/insight/alert-center/alert-policy.html","title":"\u544a\u8b66\u7b56\u7565","text":"

                                                              \u544a\u8b66\u7b56\u7565\u662f\u5728\u53ef\u89c2\u6d4b\u6027\u7cfb\u7edf\u4e2d\u5b9a\u4e49\u7684\u4e00\u7ec4\u89c4\u5219\u548c\u6761\u4ef6\uff0c\u7528\u4e8e\u68c0\u6d4b\u548c\u89e6\u53d1\u8b66\u62a5\uff0c\u4ee5\u4fbf\u5728\u7cfb\u7edf\u51fa\u73b0\u5f02\u5e38\u6216\u8fbe\u5230\u9884\u5b9a\u7684\u9608\u503c\u65f6\u53ca\u65f6\u901a\u77e5\u76f8\u5173\u4eba\u5458\u6216\u7cfb\u7edf\u3002

                                                              \u6bcf\u6761\u544a\u8b66\u7b56\u7565\u662f\u4e00\u7ec4\u544a\u8b66\u89c4\u5219\u7684\u96c6\u5408\uff0c\u652f\u6301\u5bf9\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5de5\u4f5c\u8d1f\u8f7d\u7b49\u8d44\u6e90\u3001\u65e5\u5fd7\u3001\u4e8b\u4ef6\u8bbe\u7f6e\u544a\u8b66\u89c4\u5219\u3002\u5f53\u544a\u8b66\u5bf9\u8c61\u8fbe\u5230\u7b56\u7565\u4e0b\u4efb\u4e00\u89c4\u5219\u8bbe\u5b9a\u7684\u9608\u503c\uff0c\u5219\u4f1a\u81ea\u52a8\u89e6\u53d1\u544a\u8b66\u5e76\u53d1\u9001\u901a\u77e5\u3002

                                                              "},{"location":"end-user/insight/alert-center/alert-policy.html#_2","title":"\u67e5\u770b\u544a\u8b66\u7b56\u7565","text":"
                                                              1. \u70b9\u51fb\u4e00\u7ea7\u5bfc\u822a\u680f\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027\u3002
                                                              2. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u544a\u8b66\u4e2d\u5fc3 -> \u544a\u8b66\u7b56\u7565\u3002

                                                                • \u96c6\u7fa4\uff1a\u5355\u51fb\u96c6\u7fa4\u4e0b\u62c9\u6846\u53ef\u5207\u6362\u96c6\u7fa4\uff1b
                                                                • \u547d\u540d\u7a7a\u95f4\uff1a\u5355\u51fb\u547d\u540d\u7a7a\u95f4\u5207\u6362\u4e0b\u62c9\u6846\u3002

                                                              3. \u70b9\u51fb\u544a\u8b66\u7b56\u7565\u540d\u79f0\u53ef\u67e5\u770b\u7b56\u7565\u7684\u57fa\u672c\u4fe1\u606f\u3001\u89c4\u5219\u4ee5\u53ca\u901a\u77e5\u914d\u7f6e\u3002

                                                                1. \u5728\u89c4\u5219\u5217\u8868\u4e2d\u53ef\u67e5\u770b\u89c4\u5219\u7c7b\u578b\u3001\u89c4\u5219\u7684\u8868\u8fbe\u5f0f\u3001\u7ea7\u522b\u3001\u72b6\u6001\u7b49\u4fe1\u606f\u3002
                                                                2. \u8fdb\u5165\u7b56\u7565\u8be6\u60c5\uff0c\u53ef\u4ee5\u6dfb\u52a0\u3001\u7f16\u8f91\u3001\u5220\u9664\u5176\u4e0b\u7684\u544a\u8b66\u89c4\u5219\u3002

                                                              "},{"location":"end-user/insight/alert-center/alert-policy.html#_3","title":"\u521b\u5efa\u544a\u8b66\u7b56\u7565","text":"
                                                              1. \u586b\u5199\u57fa\u672c\u4fe1\u606f\uff0c\u9009\u62e9\u4e00\u4e2a\u6216\u591a\u4e2a\u96c6\u7fa4\u3001\u8282\u70b9\u6216\u5de5\u4f5c\u8d1f\u8f7d\u4e3a\u544a\u8b66\u5bf9\u8c61\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65\u3002

                                                                Note

                                                                • \u9009\u62e9\u5168\u90e8\u96c6\u7fa4\u3001\u8282\u70b9\u6216\u5de5\u4f5c\u8d1f\u8f7d\uff1a\u521b\u5efa\u7684\u544a\u8b66\u89c4\u5219\u5bf9\u6240\u6709\u5df2\u5b89\u88c5 insight-agent \u7684\u96c6\u7fa4\u751f\u6548\u3002
                                                                • \u9009\u62e9\u5355\u4e2a\u6216\u591a\u4e2a\u96c6\u7fa4\u96c6\u7fa4\u3001\u8282\u70b9\u6216\u5de5\u4f5c\u8d1f\u8f7d\uff1a\u521b\u5efa\u7684\u544a\u8b66\u89c4\u5219\u4ec5\u5bf9\u6240\u9009\u7684\u8d44\u6e90\u5bf9\u8c61\u751f\u6548\u3002
                                                                • \u540c\u65f6\uff0c\u7528\u6237\u53ea\u80fd\u5bf9\u5df2\u6743\u9650\u7684\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u544a\u8b66\u89c4\u5219\u3002
                                                              "},{"location":"end-user/insight/alert-center/alert-policy.html#_4","title":"\u624b\u52a8\u6dfb\u52a0\u89c4\u5219","text":"
                                                              1. \u5728\u521b\u5efa\u544a\u8b66\u7b56\u7565\u7684\u7b2c\u4e8c\u90e8\u4e2d\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4e0a\u89d2\u7684\u6dfb\u52a0\u89c4\u5219\u3002

                                                              2. \u5728\u5f39\u7a97\u4e2d\u521b\u5efa\u544a\u8b66\u89c4\u5219\uff0c\u586b\u5199\u5404\u9879\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a\u3002

                                                                • \u6a21\u677f\u89c4\u5219\uff1a\u9884\u5b9a\u4e49\u4e86\u57fa\u7840\u6307\u6807\uff0c\u53ef\u4ee5\u6309 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u3001\u7f51\u7edc\u8bbe\u5b9a\u8981\u76d1\u63a7\u7684\u6307\u6807\u3002
                                                                • PromQL \u89c4\u5219\uff1a\u8f93\u5165\u4e00\u4e2a PromQL \u8868\u8fbe\u5f0f\uff0c\u5177\u4f53\u8bf7\u67e5\u8be2 Prometheus \u8868\u8fbe\u5f0f\u3002
                                                                • \u6301\u7eed\u65f6\u957f\uff1a\u544a\u8b66\u88ab\u89e6\u53d1\u4e14\u6301\u7eed\u65f6\u95f4\u8fbe\u5230\u8be5\u8bbe\u5b9a\u503c\u540e\uff0c\u544a\u8b66\u7b56\u7565\u5c06\u53d8\u4e3a\u89e6\u53d1\u4e2d\u72b6\u6001\u3002
                                                                • \u544a\u8b66\u7ea7\u522b\uff1a\u5305\u542b\u7d27\u6025\u3001\u8b66\u544a\u3001\u4fe1\u606f\u4e09\u79cd\u7ea7\u522b\u3002
                                                                • \u9ad8\u7ea7\u8bbe\u7f6e\uff1a\u53ef\u4ee5\u81ea\u5b9a\u4e49\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                                                Info

                                                                \u7cfb\u7edf\u5b9a\u4e49\u4e86\u5185\u7f6e\u6807\u7b7e\uff0c\u82e5\u81ea\u5b9a\u4e49\u6807\u7b7e\u4e0e\u5185\u7f6e\u6807\u7b7e\u7684\u952e\u503c\u76f8\u540c\uff0c\u5219\u81ea\u5b9a\u4e49\u6807\u7b7e\u4e0d\u751f\u6548\u3002 \u5185\u7f6e\u6807\u7b7e\u6709\uff1aseverity\u3001rule_id\uff0csource\u3001cluster_name\u3001group_id\u3001 target_type \u548c target\u3002

                                                              "},{"location":"end-user/insight/alert-center/alert-policy.html#_5","title":"\u521b\u5efa\u65e5\u5fd7\u89c4\u5219","text":"

                                                              \u5b8c\u6210\u57fa\u672c\u4fe1\u606f\u7684\u586b\u5199\u540e\uff0c\u70b9\u51fb \u6dfb\u52a0\u89c4\u5219\uff0c\u89c4\u5219\u7c7b\u578b\u9009\u62e9 \u65e5\u5fd7\u89c4\u5219\u3002

                                                              Note

                                                              \u4ec5\u5f53\u8d44\u6e90\u5bf9\u8c61\u9009\u62e9\u8282\u70b9\u6216\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u652f\u6301\u521b\u5efa\u65e5\u5fd7\u89c4\u5219\u3002

                                                              \u5b57\u6bb5\u8bf4\u660e\uff1a

                                                              • \u8fc7\u6ee4\u6761\u4ef6\uff1a\u67e5\u8be2\u65e5\u5fd7\u5185\u5bb9\u7684\u5b57\u6bb5\uff0c\u652f\u6301\u4e0e\u3001\u6216\u3001\u6b63\u5219\u5339\u914d\u3001\u6a21\u7cca\u5339\u914d\u56db\u79cd\u8fc7\u6ee4\u6761\u4ef6\u3002
                                                              • \u5224\u65ad\u6761\u4ef6\uff1a\u6839\u636e \u8fc7\u6ee4\u6761\u4ef6\uff0c\u8f93\u5165\u5173\u952e\u5b57\u6216\u5339\u914d\u6761\u4ef6\u3002
                                                              • \u65f6\u95f4\u8303\u56f4\uff1a\u65e5\u5fd7\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u3002
                                                              • \u9608\u503c\u6761\u4ef6\uff1a\u5728\u8f93\u5165\u6846\u4e2d\u8f93\u5165\u544a\u8b66\u9608\u503c\u3002\u5f53\u8fbe\u5230\u8bbe\u7f6e\u7684\u9608\u503c\u65f6\uff0c\u5219\u89e6\u53d1\u544a\u8b66\u3002\u652f\u6301\u7684\u6bd4\u8f83\u8fd0\u7b97\u7b26\u6709\uff1a >\u3001\u2265\u3001=\u3001\u2264\u3001<\u3002
                                                              • \u544a\u8b66\u7ea7\u522b\uff1a\u9009\u62e9\u544a\u8b66\u7ea7\u522b\uff0c\u7528\u4e8e\u8868\u793a\u544a\u8b66\u7684\u4e25\u91cd\u7a0b\u5ea6\u3002
                                                              "},{"location":"end-user/insight/alert-center/alert-policy.html#_6","title":"\u521b\u5efa\u4e8b\u4ef6\u89c4\u5219","text":"

                                                              \u5b8c\u6210\u57fa\u672c\u4fe1\u606f\u7684\u586b\u5199\u540e\uff0c\u70b9\u51fb \u6dfb\u52a0\u89c4\u5219\uff0c\u89c4\u5219\u7c7b\u578b\u9009\u62e9 \u4e8b\u4ef6\u89c4\u5219\u3002

                                                              Note

                                                              \u4ec5\u5f53\u8d44\u6e90\u5bf9\u8c61\u9009\u62e9\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u652f\u6301\u521b\u5efa\u4e8b\u4ef6\u89c4\u5219\u3002

                                                              \u5b57\u6bb5\u8bf4\u660e\uff1a

                                                              • \u4e8b\u4ef6\u89c4\u5219\uff1a\u4ec5\u652f\u6301\u8d44\u6e90\u5bf9\u8c61\u9009\u62e9\u5de5\u4f5c\u8d1f\u8f7d
                                                              • \u4e8b\u4ef6\u539f\u56e0\uff1a\u4e0d\u540c\u7684\u5de5\u4f5c\u8d1f\u8f7d\u7c7b\u578b\u7684\u4e8b\u4ef6\u539f\u56e0\u4e0d\u540c\uff0c\u4e8b\u4ef6\u539f\u56e0\u4e4b\u95f4\u662f\u201c\u548c\u201d\u7684\u5173\u7cfb\u3002
                                                              • \u65f6\u95f4\u8303\u56f4\uff1a\u68c0\u6d4b\u8be5\u65f6\u95f4\u8303\u56f4\u5185\u4ea7\u751f\u6570\u636e\uff0c\u82e5\u8fbe\u5230\u8bbe\u7f6e\u7684\u9608\u503c\u6761\u4ef6\uff0c\u5219\u89e6\u53d1\u544a\u8b66\u4e8b\u4ef6\u3002
                                                              • \u9608\u503c\u6761\u4ef6\uff1a\u5f53\u4ea7\u751f\u7684\u4e8b\u4ef6\u8fbe\u5230\u8bbe\u7f6e\u7684\u9608\u503c\u65f6\uff0c\u5219\u89e6\u53d1\u544a\u8b66\u4e8b\u4ef6\u3002
                                                              • \u8d8b\u52bf\u56fe\uff1a\u9ed8\u8ba4\u67e5\u8be2 10 \u5206\u949f\u5185\u7684\u4e8b\u4ef6\u53d8\u5316\u8d8b\u52bf\uff0c\u6bcf\u4e2a\u70b9\u7684\u6570\u503c\u7edf\u8ba1\u7684\u662f\u5f53\u524d\u65f6\u95f4\u70b9\u5230\u4e4b\u524d\u7684\u67d0\u6bb5\u65f6\u95f4\uff08\u65f6\u95f4\u8303\u56f4\uff09\u5185\u53d1\u751f\u7684\u603b\u6b21\u6570\u3002
                                                              "},{"location":"end-user/insight/alert-center/alert-policy.html#_7","title":"\u5bfc\u5165\u89c4\u5219\u6a21\u677f","text":"
                                                              1. \u53ef\u70b9\u51fb \u6a21\u677f\u5bfc\u5165\uff0c\u9009\u62e9\u5e73\u53f0\u7ba1\u7406\u5458\u5df2\u521b\u5efa\u597d\u7684\u544a\u8b66\u6a21\u677f\u6279\u91cf\u5bfc\u5165\u544a\u8b66\u89c4\u5219\u3002

                                                              2. \u70b9\u51fb \u4e0b\u4e00\u6b65 \u540e\u914d\u7f6e\u901a\u77e5\u3002

                                                              3. \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u8fd4\u56de\u544a\u8b66\u7b56\u7565\u5217\u8868\u3002

                                                              Tip

                                                              \u65b0\u5efa\u7684\u544a\u8b66\u7b56\u7565\u4e3a \u672a\u89e6\u53d1 \u72b6\u6001\u3002\u4e00\u65e6\u6ee1\u8db3\u89c4\u5219\u4e2d\u7684\u9608\u503c\u6761\u4ef6\u548c\u6301\u7eed\u65f6\u95f4\u540e\uff0c\u5c06\u53d8\u4e3a \u89e6\u53d1\u4e2d \u72b6\u6001\u3002

                                                              Warning

                                                              \u5220\u9664\u540e\u7684\u544a\u8b66\u7b56\u7565\u5c06\u5b8c\u5168\u6d88\u5931\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                                                              "},{"location":"end-user/insight/alert-center/alert-policy.html#yaml","title":"\u901a\u8fc7 YAML \u5bfc\u5165\u544a\u8b66\u7b56\u7565","text":"
                                                              1. \u8fdb\u5165\u544a\u8b66\u7b56\u7565\u5217\u8868\uff0c\u70b9\u51fb YAML \u521b\u5efa\u3002

                                                              2. \u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u7684\u9009\u62e9\u662f\u4e3a\u4e86\u544a\u8b66\u7b56\u7565\u7684\u7ba1\u7406\u6743\u9650\u3002

                                                              3. YAML \u7f16\u8f91\u5668\u4e2d\u8bf7\u586b\u5199 spec \u53ca\u5176\u4e2d\u7684\u5185\u5bb9\uff0c\u4ec5\u652f\u6301\u5bfc\u5165\u4e00\u4e2a group\u3002
                                                              4. \u544a\u8b66\u89c4\u5219\u540d\u79f0 \u9700\u8981\u7b26\u5408\u89c4\u8303\uff1a\u540d\u79f0\u53ea\u80fd\u5305\u542b\u5927\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u3001\u4e0b\u5212\u7ebf\uff08_\uff09\u548c\u8fde\u5b57\u7b26\uff08-\uff09\uff0c\u5fc5\u987b\u4ee5\u5b57\u6bcd\u5f00\u5934\uff0c\u6700\u957f 63 \u4e2a\u5b57\u7b26\u3002
                                                              5. \u5fc5\u586b severity \u4e14\u7b26\u5408\u89c4\u8303\uff1acritical\u3001warning\u3001info\u3002
                                                              6. \u5fc5\u586b\u8868\u8fbe\u5f0f expr\u3002

                                                              7. \u5bfc\u5165 YAML \u6587\u4ef6\u540e\uff0c\u70b9\u51fb \u9884\u89c8\uff0c\u53ef\u4ee5\u5bf9\u5bfc\u5165\u7684 YAML \u683c\u5f0f\u8fdb\u884c\u9a8c\u8bc1\uff0c\u5e76\u5feb\u901f\u786e\u8ba4\u5bfc\u5165\u7684\u544a\u8b66\u89c4\u5219\u3002

                                                              "},{"location":"end-user/insight/alert-center/alert-template.html","title":"\u544a\u8b66\u6a21\u677f","text":"

                                                              \u544a\u8b66\u6a21\u677f\u53ef\u652f\u6301\u5e73\u53f0\u7ba1\u7406\u5458\u521b\u5efa\u544a\u8b66\u6a21\u677f\u53ca\u89c4\u5219\uff0c\u4e1a\u52a1\u4fa7\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u544a\u8b66\u6a21\u677f\u521b\u5efa\u544a\u8b66\u7b56\u7565\u3002 \u8fd9\u4e2a\u529f\u80fd\u53ef\u4ee5\u51cf\u5c11\u4e1a\u52a1\u4eba\u5458\u5bf9\u544a\u8b66\u89c4\u5219\u7684\u7ba1\u7406\uff0c\u4e14\u53ef\u4ee5\u6839\u636e\u73af\u5883\u5b9e\u9645\u60c5\u51b5\u81ea\u884c\u4fee\u6539\u544a\u8b66\u9608\u503c\u3002

                                                              "},{"location":"end-user/insight/alert-center/alert-template.html#_2","title":"\u521b\u5efa\u544a\u8b66\u6a21\u677f","text":"
                                                              1. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9\u00a0\u544a\u8b66\u4e2d\u5fc3\u00a0->\u00a0\u544a\u8b66\u7b56\u7565\uff0c\u5355\u51fb\u9876\u90e8\u7684 \u544a\u8b66\u6a21\u677f \u3002

                                                              2. \u70b9\u51fb \u521b\u5efa\u544a\u8b66\u6a21\u677f \uff0c\u8bbe\u7f6e\u544a\u8b66\u6a21\u677f\u7684\u540d\u79f0\u3001\u63cf\u8ff0\u7b49\u4fe1\u606f\u3002

                                                                \u53c2\u6570 \u8bf4\u660e \u6a21\u677f\u540d\u79f0 \u540d\u79f0\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u8fde\u5b57\u7b26\uff08-\uff09\uff0c\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u548c\u7ed3\u5c3e\uff0c\u6700\u957f 63 \u4e2a\u5b57\u7b26\u3002 \u63cf\u8ff0 \u63cf\u8ff0\u53ef\u5305\u542b\u4efb\u610f\u5b57\u7b26\uff0c\u6700\u957f 256 \u4e2a\u5b57\u7b26\u3002 \u8d44\u6e90\u7c7b\u578b \u7528\u4e8e\u6307\u5b9a\u544a\u8b66\u6a21\u677f\u7684\u5339\u914d\u7c7b\u578b\u3002 \u544a\u8b66\u89c4\u5219 \u652f\u6301\u9884\u5b9a\u4e49\u591a\u4e2a\u544a\u8b66\u89c4\u5219\uff0c\u53ef\u6dfb\u52a0\u6a21\u677f\u89c4\u5219\u3001PromQL \u89c4\u5219\u3002
                                                              3. \u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u540e\u8fd4\u56de\u544a\u8b66\u6a21\u677f\u5217\u8868\uff0c\u70b9\u51fb\u6a21\u677f\u540d\u79f0\u540e\u53ef\u67e5\u770b\u6a21\u677f\u8be6\u60c5\u3002

                                                              "},{"location":"end-user/insight/alert-center/alert-template.html#_3","title":"\u7f16\u8f91\u544a\u8b66\u6a21\u677f","text":"

                                                              \u70b9\u51fb\u76ee\u6807\u89c4\u5219\u540e\u7684 \u2507 \uff0c\u70b9\u51fb \u7f16\u8f91\uff0c\u8fdb\u5165\u6291\u5236\u89c4\u5219\u7684\u7f16\u8f91\u9875\u3002

                                                              "},{"location":"end-user/insight/alert-center/alert-template.html#_4","title":"\u5220\u9664\u544a\u8b66\u6a21\u677f","text":"

                                                              \u70b9\u51fb\u76ee\u6807\u6a21\u677f\u540e\u4fa7\u7684 \u2507 \uff0c\u70b9\u51fb \u5220\u9664\uff0c\u5728\u8f93\u5165\u6846\u4e2d\u8f93\u5165\u544a\u8b66\u6a21\u677f\u7684\u540d\u79f0\u5373\u53ef\u5220\u9664\u3002

                                                              "},{"location":"end-user/insight/alert-center/inhibition.html","title":"\u544a\u8b66\u6291\u5236","text":"

                                                              \u544a\u8b66\u6291\u5236\u4e3b\u8981\u662f\u5bf9\u4e8e\u67d0\u4e9b\u4e0d\u9700\u8981\u7acb\u5373\u5173\u6ce8\u7684\u544a\u8b66\u8fdb\u884c\u4e34\u65f6\u9690\u85cf\u6216\u8005\u964d\u4f4e\u5176\u4f18\u5148\u7ea7\u7684\u4e00\u79cd\u673a\u5236\u3002\u8fd9\u4e2a\u529f\u80fd\u7684\u76ee\u7684\u662f\u4e3a\u4e86\u51cf\u5c11\u4e0d\u5fc5\u8981\u7684\u544a\u8b66\u4fe1\u606f\u5bf9\u8fd0\u7ef4\u4eba\u5458\u7684\u5e72\u6270\uff0c\u4f7f\u4ed6\u4eec\u80fd\u591f\u96c6\u4e2d\u7cbe\u529b\u5904\u7406\u66f4\u91cd\u8981\u7684\u95ee\u9898\u3002

                                                              \u544a\u8b66\u6291\u5236\u901a\u8fc7\u5b9a\u4e49\u4e00\u7ec4\u89c4\u5219\u6765\u8bc6\u522b\u548c\u5ffd\u7565\u67d0\u4e9b\u544a\u8b66\uff0c\u5f53\u5b83\u4eec\u5728\u7279\u5b9a\u6761\u4ef6\u4e0b\u53d1\u751f\u65f6\u3002\u4e3b\u8981\u6709\u4ee5\u4e0b\u51e0\u79cd\u60c5\u51b5\uff1a

                                                              • \u7236\u5b50\u5173\u7cfb\u6291\u5236\uff1a\u5f53\u4e00\u4e2a\u7236\u544a\u8b66\uff08\u4f8b\u5982\u67d0\u4e2a\u8282\u70b9\u7684\u5d29\u6e83\uff09\u89e6\u53d1\u65f6\uff0c\u53ef\u4ee5\u6291\u5236\u6240\u6709\u7531\u6b64\u5f15\u8d77\u7684\u5b50\u544a\u8b66\uff08\u4f8b\u5982\u8be5\u8282\u70b9\u4e0a\u8fd0\u884c\u7684\u5bb9\u5668\u5d29\u6e83\uff09\u3002
                                                              • \u76f8\u4f3c\u544a\u8b66\u6291\u5236\uff1a\u5f53\u591a\u4e2a\u544a\u8b66\u5177\u6709\u76f8\u540c\u7684\u7279\u5f81\uff08\u4f8b\u5982\u540c\u4e00\u5b9e\u4f8b\u4e0a\u7684\u76f8\u540c\u95ee\u9898\uff09\u65f6\uff0c\u53ef\u4ee5\u6291\u5236\u91cd\u590d\u7684\u544a\u8b66\u901a\u77e5\u3002
                                                              "},{"location":"end-user/insight/alert-center/inhibition.html#_2","title":"\u521b\u5efa\u6291\u5236\u89c4\u5219","text":"
                                                              1. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9\u00a0\u544a\u8b66\u4e2d\u5fc3\u00a0->\u00a0\u544a\u8b66\u964d\u566a\uff0c\u5355\u51fb\u9876\u90e8\u7684 \u544a\u8b66\u6291\u5236 \u3002

                                                              2. \u70b9\u51fb \u65b0\u5efa\u6291\u5236\u89c4\u5219 \uff0c\u8bbe\u7f6e\u6291\u5236\u89c4\u5219\u7684\u540d\u79f0\u3001\u89c4\u5219\u7b49\u3002

                                                                Note

                                                                \u901a\u8fc7\u89c4\u5219\u6807\u7b7e\u548c\u544a\u8b66\u6807\u7b7e\u5b9a\u4e49\u4e00\u7ec4\u89c4\u5219\u6765\u8bc6\u522b\u548c\u5ffd\u7565\u67d0\u4e9b\u544a\u8b66\uff0c\u8fbe\u5230\u907f\u514d\u540c\u4e00\u95ee\u9898\u53ef\u80fd\u4f1a\u89e6\u53d1\u591a\u4e2a\u76f8\u4f3c\u6216\u76f8\u5173\u7684\u544a\u8b66\u7684\u95ee\u9898\u3002

                                                                \u53c2\u6570\u65f6\u95f4 \u8bf4\u660e \u6291\u5236\u89c4\u5219\u540d\u79f0 \u6291\u5236\u89c4\u5219\u540d\u79f0\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u8fde\u5b57\u7b26\uff08-\uff09\uff0c\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u548c\u7ed3\u5c3e\uff0c\u6700\u957f 63 \u4e2a\u5b57\u7b26\u3002 \u63cf\u8ff0 \u63cf\u8ff0\u53ef\u5305\u542b\u4efb\u610f\u5b57\u7b26\uff0c\u6700\u957f 256 \u4e2a\u5b57\u7b26\u3002 \u96c6\u7fa4 \u8be5\u6291\u5236\u89c4\u5219\u4f5c\u7528\u7684\u96c6\u7fa4\u3002 \u547d\u540d\u7a7a\u95f4 \u8be5\u6291\u5236\u89c4\u5219\u4f5c\u7528\u7684\u547d\u540d\u7a7a\u95f4\u3002 \u6839\u6e90\u544a\u8b66 \u901a\u8fc7\u586b\u5199\u7684\u6807\u7b7e\u6761\u4ef6\u5339\u914d\u544a\u8b66\uff0c\u4f1a\u5c06\u7b26\u5408\u6240\u6709\u6807\u7b7e\u6761\u4ef6\u7684\u544a\u8b66\u4e0e\u7b26\u5408\u6291\u5236\u6761\u4ef6\u7684\u8fdb\u884c\u5bf9\u6bd4\uff0c\u4e0d\u7b26\u5408\u6291\u5236\u6761\u4ef6\u7684\u544a\u8b66\u5c06\u7167\u5e38\u53d1\u9001\u6d88\u606f\u7ed9\u7528\u6237\u3002 \u53d6\u503c\u8303\u56f4\u8bf4\u660e\uff1a - \u544a\u8b66\u7ea7\u522b\uff1a\u6307\u6807\u6216\u4e8b\u4ef6\u544a\u8b66\u7684\u7ea7\u522b\uff0c\u53ef\u4ee5\u8bbe\u7f6e\u4e3a\uff1a\u7d27\u6025\u3001\u91cd\u8981\u3001\u63d0\u793a\u3002 - \u8d44\u6e90\u7c7b\u578b\uff1a\u544a\u8b66\u5bf9\u8c61\u6240\u5bf9\u5e94\u7684\u8d44\u6e90\u7c7b\u578b\uff0c\u53ef\u4ee5\u8bbe\u7f6e\u4e3a\uff1a\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u65e0\u72b6\u6001\u8d1f\u8f7d\u3001\u6709\u72b6\u5bb9\u8d1f\u8f7d\u3001\u5b88\u62a4\u8fdb\u7a0b\u3001\u5bb9\u5668\u7ec4\u3002 - \u6807\u7b7e\uff1a\u544a\u8b66\u6807\u8bc6\u5c5e\u6027\uff0c\u7531\u6807\u7b7e\u540d\u548c\u6807\u7b7e\u503c\u6784\u6210\uff0c\u652f\u6301\u7528\u6237\u81ea\u5b9a\u4e49\u3002 \u6291\u5236\u544a\u8b66 \u7528\u4e8e\u6307\u5b9a\u76ee\u6807\u8b66\u62a5\uff08\u5c06\u88ab\u6291\u5236\u7684\u8b66\u62a5\uff09\u7684\u5339\u914d\u6761\u4ef6\uff0c\u7b26\u5408\u6240\u6709\u6807\u7b7e\u6761\u4ef6\u7684\u544a\u8b66\u5c06\u4e0d\u4f1a\u518d\u53d1\u9001\u6d88\u606f\u7ed9\u7528\u6237\u3002 \u5339\u914d\u6807\u7b7e \u7528\u4e8e\u6307\u5b9a\u5e94\u8be5\u6bd4\u8f83\u7684\u6807\u7b7e\u5217\u8868\uff0c\u4ee5\u786e\u5b9a\u6e90\u8b66\u62a5\u548c\u76ee\u6807\u8b66\u62a5\u662f\u5426\u5339\u914d\u3002\u53ea\u6709\u5728\u00a0equal\u00a0\u4e2d\u6307\u5b9a\u7684\u6807\u7b7e\u5728\u6e90\u548c\u76ee\u6807\u8b66\u62a5\u4e2d\u7684\u503c\u5b8c\u5168\u76f8\u540c\u7684\u60c5\u51b5\u4e0b\uff0c\u624d\u4f1a\u89e6\u53d1\u6291\u5236\u3002equal\u00a0\u5b57\u6bb5\u662f\u53ef\u9009\u7684\u3002\u5982\u679c\u7701\u7565\u00a0equal\u00a0\u5b57\u6bb5\uff0c\u5219\u4f1a\u5c06\u6240\u6709\u6807\u7b7e\u7528\u4e8e\u5339\u914d
                                                              3. \u70b9\u51fb**\u786e\u5b9a**\u5b8c\u6210\u521b\u5efa\u540e\u8fd4\u56de\u544a\u8b66\u6291\u5236\u5217\u8868\uff0c\u70b9\u51fb\u544a\u8b66\u6291\u5236\u540d\u79f0\u540e\u53ef\u67e5\u770b\u6291\u5236\u89c4\u5219\u8be6\u60c5\u3002

                                                              "},{"location":"end-user/insight/alert-center/inhibition.html#_3","title":"\u67e5\u770b\u89c4\u5219\u6807\u7b7e","text":"
                                                              1. \u70b9\u51fb\u53f3\u4fa7\u5bfc\u822a\u680f\u9009\u62e9\u00a0\u544a\u8b66\u4e2d\u5fc3\u00a0->\u00a0\u544a\u8b66\u7b56\u7565 \uff0c\u70b9\u51fb\u89c4\u5219\u6240\u5728\u7684\u7b56\u7565\u8be6\u60c5\u3002
                                                              2. \u70b9\u51fb\u76ee\u6807\u89c4\u5219\u540d\u79f0\uff0c\u67e5\u770b\u89c4\u5219\u8be6\u60c5\uff0c\u67e5\u770b\u5bf9\u5e94\u544a\u8b66\u89c4\u5219\u7684\u6807\u7b7e\u3002

                                                                Note

                                                                \u5728\u6dfb\u52a0\u89c4\u5219\u65f6\u53ef\u6dfb\u52a0\u81ea\u5b9a\u4e49\u6807\u7b7e\u3002

                                                              "},{"location":"end-user/insight/alert-center/inhibition.html#_4","title":"\u67e5\u770b\u544a\u8b66\u6807\u7b7e","text":"
                                                              1. \u70b9\u51fb\u53f3\u4fa7\u5bfc\u822a\u680f\u9009\u62e9\u00a0\u544a\u8b66\u4e2d\u5fc3\u00a0->\u00a0\u544a\u8b66\u5217\u8868 \uff0c\u70b9\u51fb\u544a\u8b66\u6240\u5728\u884c\u67e5\u770b\u544a\u8b66\u8be6\u60c5\u3002

                                                                Note

                                                                \u544a\u8b66\u6807\u7b7e\u7528\u4e8e\u63cf\u8ff0\u544a\u8b66\u7684\u8be6\u7ec6\u4fe1\u606f\u548c\u5c5e\u6027\uff0c\u53ef\u4ee5\u7528\u6765\u521b\u5efa\u6291\u5236\u89c4\u5219\u3002

                                                              "},{"location":"end-user/insight/alert-center/inhibition.html#_5","title":"\u7f16\u8f91\u6291\u5236\u89c4\u5219","text":"
                                                              1. \u70b9\u51fb\u76ee\u6807\u89c4\u5219\u540e\u4fa7\u7684 \u2507 \uff0c\u70b9\u51fb \u7f16\u8f91\uff0c\u8fdb\u5165\u6291\u5236\u89c4\u5219\u7684\u7f16\u8f91\u9875\u3002

                                                              "},{"location":"end-user/insight/alert-center/inhibition.html#_6","title":"\u5220\u9664\u6291\u5236\u89c4\u5219","text":"

                                                              \u70b9\u51fb\u76ee\u6807\u89c4\u5219\u540e\u4fa7\u7684 \u2507 \uff0c\u70b9\u51fb \u5220\u9664\uff0c\u5728\u8f93\u5165\u6846\u4e2d\u8f93\u5165\u6291\u5236\u89c4\u5219\u7684\u540d\u79f0\u5373\u53ef\u5220\u9664\u3002

                                                              "},{"location":"end-user/insight/alert-center/message.html","title":"\u901a\u77e5\u914d\u7f6e","text":"

                                                              \u5728 \u901a\u77e5\u914d\u7f6e \u9875\u9762\uff0c\u53ef\u4ee5\u914d\u7f6e\u901a\u8fc7\u90ae\u4ef6\u3001\u4f01\u4e1a\u5fae\u4fe1\u3001\u9489\u9489\u3001Webhook \u548c\u77ed\u4fe1\u7b49\u65b9\u5f0f\u5411\u7528\u6237\u53d1\u9001\u6d88\u606f\u3002

                                                              "},{"location":"end-user/insight/alert-center/message.html#_2","title":"\u90ae\u4ef6\u7ec4","text":"
                                                              1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u540e\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e\uff0c\u9ed8\u8ba4\u4f4d\u4e8e\u90ae\u4ef6\u901a\u77e5\u5bf9\u8c61\u3002

                                                              2. \u70b9\u51fb \u6dfb\u52a0\u90ae\u7bb1\u7ec4\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u90ae\u4ef6\u5730\u5740\u3002

                                                              3. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u90ae\u7bb1\u7ec4\u3002

                                                              "},{"location":"end-user/insight/alert-center/message.html#_3","title":"\u4f01\u4e1a\u5fae\u4fe1","text":"
                                                              1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u4f01\u4e1a\u5fae\u4fe1\u3002

                                                                \u6709\u5173\u4f01\u4e1a\u5fae\u4fe1\u7fa4\u673a\u5668\u4eba\u7684 URL\uff0c\u8bf7\u53c2\u9605\u4f01\u4e1a\u5fae\u4fe1\u5b98\u65b9\u6587\u6863\uff1a\u5982\u4f55\u4f7f\u7528\u7fa4\u673a\u5668\u4eba\u3002

                                                              2. \u70b9\u51fb \u6dfb\u52a0\u7fa4\u673a\u5668\u4eba\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u7fa4\u673a\u5668\u4eba\u3002

                                                              3. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\uff0c\u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u7fa4\u673a\u5668\u4eba\u3002

                                                              "},{"location":"end-user/insight/alert-center/message.html#_4","title":"\u9489\u9489","text":"
                                                              1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u9489\u9489\uff0c\u70b9\u51fb \u6dfb\u52a0\u7fa4\u673a\u5668\u4eba\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u7fa4\u673a\u5668\u4eba\u3002

                                                                \u6709\u5173\u9489\u9489\u7fa4\u673a\u5668\u4eba\u7684 URL\uff0c\u8bf7\u53c2\u9605\u9489\u9489\u5b98\u65b9\u6587\u6863\uff1a\u81ea\u5b9a\u4e49\u673a\u5668\u4eba\u63a5\u5165\u3002

                                                                Note

                                                                \u52a0\u7b7e\u7684\u65b9\u5f0f\u662f\u9489\u9489\u673a\u5668\u4eba\u4e0e\u5f00\u53d1\u8005\u53cc\u5411\u8fdb\u884c\u5b89\u5168\u8ba4\u8bc1\uff0c\u82e5\u5728\u521b\u5efa\u9489\u9489\u673a\u5668\u4eba\u65f6\u5f00\u542f\u4e86\u52a0\u7b7e\uff0c\u5219\u9700\u8981\u5728\u6b64\u5904\u8f93\u5165\u9489\u9489\u751f\u6210\u7684\u5bc6\u94a5\u3002 \u53ef\u53c2\u8003\u9489\u9489\u81ea\u5b9a\u4e49\u673a\u5668\u4eba\u5b89\u5168\u8bbe\u7f6e\u3002

                                                              2. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\uff0c\u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u7fa4\u673a\u5668\u4eba\u3002

                                                              "},{"location":"end-user/insight/alert-center/message.html#_5","title":"\u98de\u4e66","text":"
                                                              1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u98de\u4e66\uff0c\u70b9\u51fb \u6dfb\u52a0\u7fa4\u673a\u5668\u4eba\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u7fa4\u673a\u5668\u4eba\u3002

                                                                Note

                                                                \u5f53\u98de\u4e66\u7684\u7fa4\u673a\u5668\u4eba\u5f00\u542f\u7b7e\u540d\u6821\u9a8c\u65f6\uff0c\u6dfb\u52a0\u98de\u4e66\u901a\u77e5\u65f6\u9700\u8981\u586b\u5199\u5bf9\u5e94\u7684\u7b7e\u540d\u5bc6\u94a5\u3002\u8bf7\u67e5\u9605 \u81ea\u5b9a\u4e49\u673a\u5668\u4eba\u4f7f\u7528\u6307\u5357\u3002

                                                              2. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\uff0c\u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u7fa4\u673a\u5668\u4eba\u3002

                                                              "},{"location":"end-user/insight/alert-center/message.html#webhook","title":"Webhook","text":"
                                                              1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> Webhook\u3002

                                                                \u6709\u5173 Webhook URL \u53ca\u66f4\u591a\u914d\u7f6e\u65b9\u5f0f\uff0c\u8bf7\u53c2\u9605 webhook \u6587\u6863\u3002

                                                              2. \u70b9\u51fb \u65b0\u5efa Webhook\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a Webhook\u3002

                                                                HTTP Headers\uff1a\u975e\u5fc5\u586b\uff0c\u8bbe\u7f6e\u8bf7\u6c42\u5934\u3002\u53ef\u4ee5\u6dfb\u52a0\u591a\u4e2a Headers\u3002

                                                                Note

                                                                \u6709\u5173 Webhook URL \u53ca\u66f4\u591a\u914d\u7f6e\u65b9\u5f0f\uff0c\u8bf7\u53c2\u9605 webhook \u6587\u6863\u3002

                                                              3. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\uff0c\u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664 Webhook\u3002

                                                              "},{"location":"end-user/insight/alert-center/message.html#_6","title":"\u7ad9\u5185\u4fe1","text":"

                                                              Note

                                                              \u544a\u8b66\u6d88\u606f\u53d1\u9001\u81f3\u7528\u6237\u4e2a\u4eba\u7684\u7ad9\u5185\u4fe1\uff0c\u70b9\u51fb\u9876\u90e8\u7684 \ud83d\udd14 \u7b26\u53f7\u53ef\u4ee5\u67e5\u770b\u901a\u77e5\u6d88\u606f\u3002

                                                              1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u7ad9\u5185\u4fe1\uff0c\u70b9\u51fb\u521b\u5efa\u3002

                                                                • \u7ad9\u5185\u4fe1\u901a\u77e5\u5141\u8bb8\u6dfb\u52a0\u591a\u4e2a\u7528\u6237\u3002

                                                              2. \u914d\u7f6e\u5b8c\u6210\u540e\u81ea\u52a8\u8fd4\u56de \u7ad9\u5185\u4fe1\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u9009\u62e9 \u53d1\u9001\u6d4b\u8bd5\u4fe1\u606f\u3002

                                                              "},{"location":"end-user/insight/alert-center/message.html#_7","title":"\u77ed\u4fe1\u7ec4","text":"
                                                              1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\u70b9\u51fb \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u77ed\u4fe1\uff0c\u70b9\u51fb \u6dfb\u52a0\u77ed\u4fe1\u7ec4\uff0c\u6dfb\u52a0\u4e00\u4e2a\u6216\u591a\u4e2a\u77ed\u4fe1\u7ec4\u3002

                                                              2. \u5728\u5f39\u7a97\u4e2d\u8f93\u5165\u540d\u79f0\u3001\u63a5\u6536\u77ed\u4fe1\u7684\u5bf9\u8c61\u3001\u624b\u673a\u53f7\u4ee5\u53ca\u901a\u77e5\u670d\u52a1\u5668\u3002

                                                                \u901a\u77e5\u670d\u52a1\u5668\u9700\u8981\u9884\u5148\u5728 \u901a\u77e5\u914d\u7f6e -> \u901a\u77e5\u670d\u52a1\u5668 \u4e2d\u6dfb\u52a0\u521b\u5efa\u3002\u76ee\u524d\u652f\u6301\u963f\u91cc\u4e91\u3001\u817e\u8baf\u4e91\u4e24\u79cd\u4e91\u670d\u52a1\u5668\uff0c\u5177\u4f53\u914d\u7f6e\u7684\u53c2\u6570\u8bf7\u53c2\u9605\u81ea\u5df1\u7684\u4e91\u670d\u52a1\u5668\u4fe1\u606f\u3002

                                                              3. \u77ed\u4fe1\u7ec4\u6dfb\u52a0\u6210\u529f\u540e\uff0c\u81ea\u52a8\u8fd4\u56de\u901a\u77e5\u5217\u8868\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507\uff0c\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u77ed\u4fe1\u7ec4\u3002

                                                              "},{"location":"end-user/insight/alert-center/msg-template.html","title":"\u6d88\u606f\u6a21\u677f","text":"

                                                              \u53ef\u89c2\u6d4b\u6027\u63d0\u4f9b\u81ea\u5b9a\u4e49\u6d88\u606f\u6a21\u677f\u5185\u5bb9\u7684\u80fd\u529b\uff0c\u652f\u6301\u90ae\u4ef6\u3001\u4f01\u4e1a\u5fae\u4fe1\u3001\u9489\u9489\u3001Webhook\u3001\u98de\u4e66\u3001\u7ad9\u5185\u4fe1\u7b49\u4e0d\u540c\u7684\u901a\u77e5\u5bf9\u8c61\u5b9a\u4e49\u4e0d\u540c\u7684\u6d88\u606f\u901a\u77e5\u5185\u5bb9\u3002

                                                              "},{"location":"end-user/insight/alert-center/msg-template.html#_2","title":"\u521b\u5efa\u6d88\u606f\u6a21\u677f","text":"
                                                              1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u544a\u8b66\u4e2d\u5fc3 -> \u6d88\u606f\u6a21\u677f\u3002

                                                                Insight \u9ed8\u8ba4\u5185\u7f6e\u4e2d\u82f1\u6587\u4e24\u4e2a\u6a21\u677f\uff0c\u4ee5\u4fbf\u7528\u6237\u4f7f\u7528\u3002

                                                              2. \u70b9\u51fb \u65b0\u5efa\u6d88\u606f\u6a21\u677f \u6309\u94ae\uff0c\u586b\u5199\u6a21\u677f\u5185\u5bb9\u3002

                                                              Info

                                                              \u53ef\u89c2\u6d4b\u6027\u9884\u7f6e\u4e86\u6d88\u606f\u6a21\u677f\u3002\u82e5\u9700\u8981\u5b9a\u4e49\u6a21\u677f\u7684\u5185\u5bb9\uff0c\u8bf7\u53c2\u8003\u914d\u7f6e\u901a\u77e5\u6a21\u677f\u3002

                                                              "},{"location":"end-user/insight/alert-center/msg-template.html#_3","title":"\u6d88\u606f\u6a21\u677f\u8be6\u60c5","text":"

                                                              \u70b9\u51fb\u67d0\u4e00\u6d88\u606f\u6a21\u677f\u7684\u540d\u79f0\uff0c\u53f3\u4fa7\u6ed1\u5757\u53ef\u67e5\u770b\u6d88\u606f\u6a21\u677f\u7684\u8be6\u60c5\u3002

                                                              \u53c2\u6570 \u53d8\u91cf \u63cf\u8ff0 \u89c4\u5219\u540d\u79f0 {{ .Labels.alertname }} \u89e6\u53d1\u544a\u8b66\u7684\u89c4\u5219\u540d\u79f0 \u7b56\u7565\u540d\u79f0 {{ .Labels.alertgroup }} \u89e6\u53d1\u544a\u8b66\u89c4\u5219\u6240\u5c5e\u7684\u544a\u8b66\u7b56\u7565\u540d\u79f0 \u544a\u8b66\u7ea7\u522b {{ .Labels.severity }} \u89e6\u53d1\u544a\u8b66\u7684\u7ea7\u522b \u96c6\u7fa4 {{ .Labels.cluster }} \u89e6\u53d1\u544a\u8b66\u7684\u8d44\u6e90\u6240\u5728\u7684\u96c6\u7fa4 \u547d\u540d\u7a7a\u95f4 {{ .Labels.namespace }} \u89e6\u53d1\u544a\u8b66\u7684\u8d44\u6e90\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4 \u8282\u70b9 {{ .Labels.node }} \u89e6\u53d1\u544a\u8b66\u7684\u8d44\u6e90\u6240\u5728\u7684\u8282\u70b9 \u8d44\u6e90\u7c7b\u578b {{ .Labels.target_type }} \u544a\u8b66\u5bf9\u8c61\u7684\u8d44\u6e90\u7c7b\u578b \u8d44\u6e90\u540d\u79f0 {{ .Labels.target }} \u89e6\u53d1\u544a\u8b66\u7684\u5bf9\u8c61\u540d\u79f0 \u89e6\u53d1\u503c {{ .Annotations.value }} \u89e6\u53d1\u544a\u8b66\u901a\u77e5\u65f6\u7684\u6307\u6807\u503c \u53d1\u751f\u65f6\u95f4 {{ .StartsAt }} \u544a\u8b66\u5f00\u59cb\u53d1\u751f\u7684\u65f6\u95f4 \u7ed3\u675f\u65f6\u95f4 {{ .EndsAT }} \u544a\u8b66\u7ed3\u675f\u7684\u65f6\u95f4 \u63cf\u8ff0 {{ .Annotations.description }} \u544a\u8b66\u7684\u8be6\u7ec6\u63cf\u8ff0 \u6807\u7b7e {{ for .labels}} {{end}} \u544a\u8b66\u7684\u6240\u6709\u6807\u7b7e\uff0c\u4f7f\u7528 for \u51fd\u6570\u904d\u5386 labels \u5217\u8868\uff0c\u83b7\u53d6\u544a\u8b66\u7684\u6240\u6709\u6807\u7b7e\u5185\u5bb9\u3002"},{"location":"end-user/insight/alert-center/msg-template.html#_4","title":"\u7f16\u8f91\u6216\u5220\u9664\u6d88\u606f\u6a21\u677f","text":"

                                                              \u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507\uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u7f16\u8f91 \u6216 \u5220\u9664\uff0c\u53ef\u4ee5\u4fee\u6539\u6216\u5220\u9664\u6d88\u606f\u6a21\u677f\u3002

                                                              Warning

                                                              \u8bf7\u6ce8\u610f\uff0c\u5220\u9664\u6a21\u677f\u540e\u65e0\u6cd5\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                                                              "},{"location":"end-user/insight/alert-center/silent.html","title":"\u544a\u8b66\u9759\u9ed8","text":"

                                                              \u544a\u8b66\u9759\u9ed8\u662f\u6307\u5728\u7279\u5b9a\u7684\u65f6\u95f4\u8303\u56f4\u5185\uff0c\u6839\u636e\u5b9a\u4e49\u597d\u7684\u89c4\u5219\u5bf9\u7b26\u5408\u6761\u4ef6\u7684\u544a\u8b66\u4e0d\u518d\u53d1\u9001\u544a\u8b66\u901a\u77e5\u3002\u8be5\u529f\u80fd\u53ef\u4ee5\u5e2e\u52a9\u8fd0\u7ef4\u4eba\u5458\u907f\u514d\u5728\u67d0\u4e9b\u64cd\u4f5c\u6216\u4e8b\u4ef6\u671f\u95f4\u63a5\u6536\u5230\u8fc7\u591a\u7684\u566a\u58f0\u544a\u8b66\uff0c\u540c\u65f6\u4fbf\u4e8e\u66f4\u52a0\u7cbe\u786e\u5730\u5904\u7406\u771f\u6b63\u9700\u8981\u89e3\u51b3\u7684\u95ee\u9898\u3002

                                                              \u5728\u544a\u8b66\u9759\u9ed8\u9875\u9762\u4e0a\uff0c\u7528\u6237\u53ef\u4ee5\u770b\u5230\u4e24\u4e2a\u9875\u7b7e\uff1a\u6d3b\u8dc3\u89c4\u5219\u548c\u8fc7\u671f\u89c4\u5219\u3002 \u5176\u4e2d\uff0c\u6d3b\u8dc3\u89c4\u5219\u8868\u793a\u76ee\u524d\u6b63\u5728\u751f\u6548\u7684\u89c4\u5219\uff0c\u800c\u8fc7\u671f\u89c4\u5219\u5219\u662f\u4ee5\u524d\u5b9a\u4e49\u8fc7\u4f46\u5df2\u7ecf\u8fc7\u671f\uff08\u6216\u8005\u7528\u6237\u4e3b\u52a8\u5220\u9664\uff09\u7684\u89c4\u5219\u3002

                                                              "},{"location":"end-user/insight/alert-center/silent.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u544a\u8b66\u4e2d\u5fc3 -> \u544a\u8b66\u9759\u9ed8 ,\u70b9\u51fb \u65b0\u5efa\u9759\u9ed8\u89c4\u5219 \u6309\u94ae\u3002

                                                              2. \u586b\u5199\u9759\u9ed8\u89c4\u5219\u7684\u5404\u9879\u53c2\u6570\uff0c\u5982\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u6807\u7b7e\u3001\u65f6\u95f4\u7b49\uff0c\u4ee5\u5b9a\u4e49\u8fd9\u6761\u89c4\u5219\u7684\u4f5c\u7528\u8303\u56f4\u548c\u751f\u6548\u65f6\u95f4\u3002

                                                              3. \u8fd4\u56de\u89c4\u5219\u5217\u8868\uff0c\u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u53ef\u4ee5\u7f16\u8f91\u6216\u5220\u9664\u9759\u9ed8\u89c4\u5219\u3002

                                                              \u901a\u8fc7\u544a\u8b66\u9759\u9ed8\u529f\u80fd\uff0c\u60a8\u53ef\u4ee5\u7075\u6d3b\u5730\u63a7\u5236\u54ea\u4e9b\u544a\u8b66\u9700\u8981\u88ab\u5ffd\u7565\uff0c\u5728\u4ec0\u4e48\u65f6\u95f4\u6bb5\u5185\u751f\u6548\uff0c\u4ece\u800c\u63d0\u9ad8\u8fd0\u7ef4\u6548\u7387\uff0c\u51cf\u5c11\u8bef\u62a5\u7684\u53ef\u80fd\u6027\u3002

                                                              "},{"location":"end-user/insight/alert-center/sms-provider.html","title":"\u914d\u7f6e\u901a\u77e5\u670d\u52a1\u5668","text":"

                                                              \u53ef\u89c2\u6d4b\u6027 Insight \u652f\u6301\u77ed\u4fe1\u901a\u77e5\uff0c\u76ee\u524d\u901a\u8fc7\u96c6\u6210\u963f\u91cc\u4e91\u3001\u817e\u8baf\u4e91\u7684\u77ed\u4fe1\u670d\u52a1\u53d1\u9001\u544a\u8b66\u6d88\u606f\u3002\u672c\u6587\u4ecb\u7ecd\u4e86\u5982\u4f55\u5728 insight \u4e2d\u914d\u7f6e\u77ed\u4fe1\u901a\u77e5\u7684\u670d\u52a1\u5668\u3002\u77ed\u4fe1\u7b7e\u540d\u4e2d\u652f\u6301\u7684\u53d8\u91cf\u4e3a\u6d88\u606f\u6a21\u677f\u4e2d\u7684\u9ed8\u8ba4\u53d8\u91cf\uff0c\u540c\u65f6\u7531\u4e8e\u77ed\u4fe1\u5b57\u6570\u6709\u9650\uff0c\u5efa\u8bae\u9009\u62e9\u8f83\u4e3a\u660e\u786e\u7684\u53d8\u91cf\u3002

                                                              \u5982\u4f55\u914d\u7f6e\u77ed\u4fe1\u63a5\u6536\u4eba\u53ef\u53c2\u8003\u6587\u6863\uff1a\u914d\u7f6e\u77ed\u4fe1\u901a\u77e5\u7ec4\u3002

                                                              "},{"location":"end-user/insight/alert-center/sms-provider.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u8fdb\u5165 \u544a\u8b66\u4e2d\u5fc3 -> \u901a\u77e5\u914d\u7f6e -> \u901a\u77e5\u670d\u52a1\u5668 \u3002

                                                              2. \u70b9\u51fb \u6dfb\u52a0\u901a\u77e5\u670d\u52a1\u5668 \u3002

                                                                1. \u914d\u7f6e\u963f\u91cc\u4e91\u670d\u52a1\u5668\u3002

                                                                  \u7533\u8bf7\u963f\u91cc\u4e91\u77ed\u4fe1\u670d\u52a1\uff0c\u8bf7\u53c2\u8003\u963f\u91cc\u4e91\u77ed\u4fe1\u670d\u52a1\u3002

                                                                  \u5b57\u6bb5\u8bf4\u660e\uff1a

                                                                  • AccessKey ID \uff1a\u963f\u91cc\u4e91\u7528\u4e8e\u6807\u8bc6\u7528\u6237\u7684\u53c2\u6570\u3002
                                                                  • AccessKey Secret \uff1a\u963f\u91cc\u4e91\u7528\u4e8e\u9a8c\u8bc1\u7528\u6237\u7684\u5bc6\u94a5\u3002AccessKey Secret \u5fc5\u987b\u4fdd\u5bc6\u3002
                                                                  • \u77ed\u4fe1\u7b7e\u540d \uff1a\u77ed\u4fe1\u670d\u52a1\u652f\u6301\u6839\u636e\u7528\u6237\u9700\u6c42\u521b\u5efa\u7b26\u5408\u8981\u6c42\u7684\u7b7e\u540d\u3002\u53d1\u9001\u77ed\u4fe1\u65f6\uff0c\u77ed\u4fe1\u5e73\u53f0\u4f1a\u5c06\u5df2\u5ba1\u6838\u901a\u8fc7\u7684\u77ed\u4fe1\u7b7e\u540d\u6dfb\u52a0\u5230\u77ed\u4fe1\u5185\u5bb9\u4e2d\uff0c\u518d\u53d1\u9001\u7ed9\u77ed\u4fe1\u63a5\u6536\u65b9\u3002
                                                                  • \u6a21\u677f CODE \uff1a\u77ed\u4fe1\u6a21\u677f\u662f\u53d1\u9001\u77ed\u4fe1\u7684\u5177\u4f53\u5185\u5bb9\u3002
                                                                  • \u53c2\u6570\u6a21\u677f \uff1a\u77ed\u4fe1\u6b63\u6587\u6a21\u677f\u53ef\u4ee5\u5305\u542b\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u901a\u8fc7\u53d8\u91cf\u5b9e\u73b0\u81ea\u5b9a\u4e49\u77ed\u4fe1\u5185\u5bb9\u3002

                                                                  \u8bf7\u53c2\u8003\u963f\u91cc\u4e91\u53d8\u91cf\u89c4\u8303\u3002

                                                                  Note

                                                                  \u4e3e\u4f8b\uff1a\u5728\u963f\u91cc\u4e91\u5b9a\u4e49\u7684\u6a21\u677f\u5185\u5bb9\u4e3a\uff1a\\({severity}\uff1a\\) \u88ab\u89e6\u53d1\u3002\u53c2\u6570\u6a21\u677f\u4e2d\u7684\u914d\u7f6e\u53c2\u8003\u4e0a\u56fe\u3002} \u5728 ${startat

                                                                2. \u914d\u7f6e\u817e\u8baf\u4e91\u670d\u52a1\u5668\u3002

                                                                  \u7533\u8bf7\u817e\u8baf\u4e91\u77ed\u4fe1\u670d\u52a1\uff0c\u8bf7\u53c2\u8003\u817e\u8baf\u4e91\u77ed\u4fe1\u3002

                                                                  \u5b57\u6bb5\u8bf4\u660e\uff1a

                                                                  • Secret ID \uff1a\u817e\u8baf\u4e91\u7528\u4e8e\u6807\u8bc6 API \u8c03\u7528\u8005\u8eab\u4efd\u53c2\u6570\u3002
                                                                  • SecretKey \uff1a\u817e\u8baf\u4e91\u7528\u4e8e\u9a8c\u8bc1 API \u8c03\u7528\u8005\u7684\u8eab\u4efd\u7684\u53c2\u6570\u3002
                                                                  • \u77ed\u4fe1\u6a21\u677f ID \uff1a\u77ed\u4fe1\u6a21\u677f ID\uff0c\u7531\u817e\u8baf\u4e91\u7cfb\u7edf\u81ea\u52a8\u751f\u6210\u3002
                                                                  • \u7b7e\u540d\u5185\u5bb9 \uff1a\u77ed\u4fe1\u7b7e\u540d\u5185\u5bb9\uff0c\u5373\u5728\u817e\u8baf\u4e91\u77ed\u4fe1\u7b7e\u540d\u4e2d\u5b9a\u4e49\u7684\u5b9e\u9645\u7f51\u7ad9\u540d\u7684\u5168\u79f0\u6216\u7b80\u79f0\u3002
                                                                  • SdkAppId \uff1a\u77ed\u4fe1 SdkAppId\uff0c\u5728\u817e\u8baf\u4e91\u77ed\u4fe1\u63a7\u5236\u53f0\u6dfb\u52a0\u5e94\u7528\u540e\u751f\u6210\u7684\u5b9e\u9645 SdkAppId\u3002
                                                                  • \u53c2\u6570\u6a21\u677f \uff1a\u77ed\u4fe1\u6b63\u6587\u6a21\u677f\u53ef\u4ee5\u5305\u542b\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u901a\u8fc7\u53d8\u91cf\u5b9e\u73b0\u81ea\u5b9a\u4e49\u77ed\u4fe1\u5185\u5bb9\u3002\u8bf7\u53c2\u8003\uff1a\u817e\u8baf\u4e91\u53d8\u91cf\u89c4\u8303\u3002

                                                                  Note

                                                                  \u4e3e\u4f8b\uff1a\u5728\u817e\u8baf\u4e91\u5b9a\u4e49\u7684\u6a21\u677f\u5185\u5bb9\u4e3a\uff1a{1}\uff1a{2} \u5728 {3} \u88ab\u89e6\u53d1\u3002\u53c2\u6570\u6a21\u677f\u4e2d\u7684\u914d\u7f6e\u53c2\u8003\u4e0a\u56fe\u3002

                                                              "},{"location":"end-user/insight/collection-manag/agent-status.html","title":"insight-agent \u7ec4\u4ef6\u72b6\u6001\u8bf4\u660e","text":"

                                                              \u5728 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u53ef\u89c2\u6d4b\u6027 Insight \u4f5c\u4e3a\u591a\u96c6\u7fa4\u89c2\u6d4b\u4ea7\u54c1\uff0c\u4e3a\u4e86\u5b9e\u73b0\u591a\u96c6\u7fa4\u89c2\u6d4b\u6570\u636e\u7684\u7edf\u4e00\u91c7\u96c6\uff0c\u9700\u8981\u7528\u6237\u5b89\u88c5 Helm \u5e94\u7528 insight-agent \uff08\u9ed8\u8ba4\u5b89\u88c5\u5728 insight-system \u547d\u540d\u7a7a\u95f4\uff09\u3002\u53c2\u9605\u5982\u4f55\u5b89\u88c5 insight-agent \u3002

                                                              "},{"location":"end-user/insight/collection-manag/agent-status.html#_1","title":"\u72b6\u6001\u8bf4\u660e","text":"

                                                              \u5728 \u53ef\u89c2\u6d4b\u6027 -> \u91c7\u96c6\u7ba1\u7406 \u90e8\u5206\u53ef\u67e5\u770b\u5404\u96c6\u7fa4\u5b89\u88c5 insight-agent \u7684\u60c5\u51b5\u3002

                                                              • \u672a\u5b89\u88c5 \uff1a\u8be5\u96c6\u7fa4\u4e2d\u672a\u5728 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\u5b89\u88c5 insight-agent
                                                              • \u8fd0\u884c\u4e2d \uff1a\u8be5\u96c6\u7fa4\u4e2d\u6210\u529f\u5b89\u88c5 insight-agent \uff0c\u4e14\u90e8\u7f72\u7684\u6240\u6709\u7ec4\u4ef6\u5747\u5904\u4e8e\u8fd0\u884c\u4e2d\u72b6\u6001
                                                              • \u5f02\u5e38 \uff1a\u82e5 insight-agent \u5904\u4e8e\u6b64\u72b6\u6001\uff0c\u8bf4\u660e helm \u90e8\u7f72\u5931\u8d25\u6216\u5b58\u5728\u90e8\u7f72\u7684\u7ec4\u4ef6\u5904\u4e8e\u975e\u8fd0\u884c\u4e2d\u72b6\u6001

                                                              \u53ef\u901a\u8fc7\u4ee5\u4e0b\u65b9\u5f0f\u6392\u67e5\uff1a

                                                              1. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff0c\u82e5\u72b6\u6001\u4e3a deployed \uff0c\u5219\u6267\u884c\u4e0b\u4e00\u6b65\u3002\u82e5\u4e3a failed \uff0c\u7531\u4e8e\u4f1a\u5f71\u54cd\u5e94\u7528\u7684\u5347\u7ea7\uff0c\u5efa\u8bae\u5728 \u5bb9\u5668\u7ba1\u7406 -> helm \u5e94\u7528 \u5378\u8f7d\u540e\u91cd\u65b0\u5b89\u88c5 :

                                                                helm list -n insight-system\n
                                                              2. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u6216\u5728 \u53ef\u89c2\u6d4b\u6027 -> \u91c7\u96c6\u7ba1\u7406 \u4e2d\u67e5\u770b\u8be5\u96c6\u7fa4\u90e8\u7f72\u7684\u7ec4\u4ef6\u7684\u72b6\u6001\uff0c\u82e5\u5b58\u5728\u975e \u8fd0\u884c\u4e2d \u72b6\u6001\u7684\u5bb9\u5668\u7ec4\uff0c\u8bf7\u91cd\u542f\u5f02\u5e38\u7684\u5bb9\u5668\u7ec4\u3002

                                                                kubectl get pods -n insight-system\n
                                                              "},{"location":"end-user/insight/collection-manag/agent-status.html#_2","title":"\u8865\u5145\u8bf4\u660e","text":"
                                                              1. insight-agent \u4e2d\u6307\u6807\u91c7\u96c6\u7ec4\u4ef6 Prometheus \u7684\u8d44\u6e90\u6d88\u8017\u4e0e\u96c6\u7fa4\u4e2d\u8fd0\u884c\u7684\u5bb9\u5668\u7ec4\u6570\u91cf\u5b58\u5728\u6b63\u6bd4\u5173\u7cfb\uff0c \u8bf7\u6839\u636e\u96c6\u7fa4\u89c4\u6a21\u8c03\u6574 Prometheus \u7684\u8d44\u6e90\uff0c\u8bf7\u53c2\u8003\uff1aPrometheus \u8d44\u6e90\u89c4\u5212

                                                              2. \u7531\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u6307\u6807\u5b58\u50a8\u7ec4\u4ef6 vmstorage \u7684\u5b58\u50a8\u5bb9\u91cf\u4e0e\u5404\u4e2a\u96c6\u7fa4\u5bb9\u5668\u7ec4\u6570\u91cf\u603b\u548c\u5b58\u5728\u6b63\u6bd4\u5173\u7cfb\u3002

                                                                • \u8bf7\u8054\u7cfb\u5e73\u53f0\u7ba1\u7406\u5458\u6839\u636e\u96c6\u7fa4\u89c4\u6a21\u8c03\u6574 vmstorage \u7684\u78c1\u76d8\u5bb9\u91cf\uff0c\u53c2\u9605 vmstorage \u78c1\u76d8\u5bb9\u91cf\u89c4\u5212
                                                                • \u6839\u636e\u591a\u96c6\u7fa4\u89c4\u6a21\u8c03\u6574 vmstorage \u78c1\u76d8\uff0c\u53c2\u9605 vmstorge \u78c1\u76d8\u6269\u5bb9
                                                              "},{"location":"end-user/insight/collection-manag/collection-manag.html","title":"\u91c7\u96c6\u7ba1\u7406","text":"

                                                              \u91c7\u96c6\u7ba1\u7406 \u4e3b\u8981\u662f\u96c6\u4e2d\u7ba1\u7406\u3001\u5c55\u793a\u96c6\u7fa4\u5b89\u88c5\u91c7\u96c6\u63d2\u4ef6 insight-agent \u7684\u5165\u53e3\uff0c\u5e2e\u52a9\u7528\u6237\u5feb\u901f\u7684\u67e5\u770b\u96c6\u7fa4\u91c7\u96c6\u63d2\u4ef6\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u5e76\u63d0\u4f9b\u4e86\u5feb\u6377\u5165\u53e3\u914d\u7f6e\u91c7\u96c6\u89c4\u5219\u3002

                                                              \u5177\u4f53\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                              1. \u70b9\u51fb\u5de6\u4e0a\u89d2\u7684\uff0c\u9009\u62e9 \u53ef\u89c2\u6d4b\u6027 \u3002

                                                              2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u91c7\u96c6\u7ba1\u7406 \uff0c\u67e5\u770b\u5168\u90e8\u96c6\u7fa4\u91c7\u96c6\u63d2\u4ef6\u7684\u72b6\u6001\u3002

                                                              3. \u96c6\u7fa4\u63a5\u5165 insight-agent \u4e14\u5904\u4e8e\u8fd0\u884c\u4e2d\u72b6\u6001\u65f6\uff0c\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u540d\u79f0\u8fdb\u5165\u8be6\u60c5\u3002

                                                              4. \u5728 \u670d\u52a1\u76d1\u63a7 \u9875\u7b7e\u4e2d\uff0c\u70b9\u51fb\u5feb\u6377\u94fe\u63a5\u8df3\u8f6c\u5230 \u5bb9\u5668\u7ba1\u7406 -> \u81ea\u5b9a\u4e49\u8d44\u6e90 \u6dfb\u52a0\u670d\u52a1\u53d1\u73b0\u89c4\u5219\u3002

                                                              "},{"location":"end-user/insight/collection-manag/metric-collect.html","title":"\u6307\u6807\u6293\u53d6\u65b9\u5f0f","text":"

                                                              Prometheus \u4e3b\u8981\u901a\u8fc7 Pull \u7684\u65b9\u5f0f\u6765\u6293\u53d6\u76ee\u6807\u670d\u52a1\u66b4\u9732\u51fa\u6765\u7684\u76d1\u63a7\u63a5\u53e3\uff0c\u56e0\u6b64\u9700\u8981\u914d\u7f6e\u5bf9\u5e94\u7684\u6293\u53d6\u4efb\u52a1\u6765\u8bf7\u6c42\u76d1\u63a7\u6570\u636e\u5e76\u5199\u5165\u5230 Prometheus \u63d0\u4f9b\u7684\u5b58\u50a8\u4e2d\uff0c\u76ee\u524d Prometheus \u670d\u52a1\u63d0\u4f9b\u4e86\u5982\u4e0b\u51e0\u4e2a\u4efb\u52a1\u7684\u914d\u7f6e\uff1a

                                                              • \u539f\u751f Job \u914d\u7f6e\uff1a\u63d0\u4f9b Prometheus \u539f\u751f\u6293\u53d6 Job \u7684\u914d\u7f6e\u3002
                                                              • Pod Monitor\uff1a\u5728 K8S \u751f\u6001\u4e0b\uff0c\u57fa\u4e8e Prometheus Operator \u6765\u6293\u53d6 Pod \u4e0a\u5bf9\u5e94\u7684\u76d1\u63a7\u6570\u636e\u3002
                                                              • Service Monitor\uff1a\u5728 K8S \u751f\u6001\u4e0b\uff0c\u57fa\u4e8e Prometheus Operator \u6765\u6293\u53d6 Service \u5bf9\u5e94 Endpoints \u4e0a\u7684\u76d1\u63a7\u6570\u636e\u3002

                                                              Note

                                                              [ ] \u4e2d\u7684\u914d\u7f6e\u9879\u4e3a\u53ef\u9009\u3002

                                                              "},{"location":"end-user/insight/collection-manag/metric-collect.html#job","title":"\u539f\u751f Job \u914d\u7f6e","text":"

                                                              \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

                                                              # \u6293\u53d6\u4efb\u52a1\u540d\u79f0\uff0c\u540c\u65f6\u4f1a\u5728\u5bf9\u5e94\u6293\u53d6\u7684\u6307\u6807\u4e2d\u52a0\u4e86\u4e00\u4e2a label(job=job_name)\njob_name: <job_name>\n\n# \u6293\u53d6\u4efb\u52a1\u65f6\u95f4\u95f4\u9694\n[ scrape_interval: <duration> | default = <global_config.scrape_interval> ]\n\n# \u6293\u53d6\u8bf7\u6c42\u8d85\u65f6\u65f6\u95f4\n[ scrape_timeout: <duration> | default = <global_config.scrape_timeout> ]\n\n# \u6293\u53d6\u4efb\u52a1\u8bf7\u6c42 URI \u8def\u5f84\n[ metrics_path: <path> | default = /metrics ]\n\n# \u89e3\u51b3\u5f53\u6293\u53d6\u7684 label \u4e0e\u540e\u7aef Prometheus \u6dfb\u52a0 label \u51b2\u7a81\u65f6\u7684\u5904\u7406\u3002\n# true: \u4fdd\u7559\u6293\u53d6\u5230\u7684 label\uff0c\u5ffd\u7565\u4e0e\u540e\u7aef Prometheus \u51b2\u7a81\u7684 label\uff1b\n# false: \u5bf9\u51b2\u7a81\u7684 label\uff0c\u628a\u6293\u53d6\u7684 label \u524d\u52a0\u4e0a exported_<original-label>\uff0c\u6dfb\u52a0\u540e\u7aef Prometheus \u589e\u52a0\u7684 label\uff1b\n[ honor_labels: <boolean> | default = false ]\n\n# \u662f\u5426\u4f7f\u7528\u6293\u53d6\u5230 target \u4e0a\u4ea7\u751f\u7684\u65f6\u95f4\u3002\n# true: \u5982\u679c target \u4e2d\u6709\u65f6\u95f4\uff0c\u4f7f\u7528 target \u4e0a\u7684\u65f6\u95f4\uff1b\n# false: \u76f4\u63a5\u5ffd\u7565 target \u4e0a\u7684\u65f6\u95f4\uff1b\n[ honor_timestamps: <boolean> | default = true ]\n\n# \u6293\u53d6\u534f\u8bae: http \u6216\u8005 https\n[ scheme: <scheme> | default = http ]\n\n# \u6293\u53d6\u8bf7\u6c42\u5bf9\u5e94 URL \u53c2\u6570\nparams:\n  [ <string>: [<string>, ...] ]\n\n# \u901a\u8fc7 basic auth \u8bbe\u7f6e\u6293\u53d6\u8bf7\u6c42\u5934\u4e2d `Authorization` \u7684\u503c\uff0cpassword/password_file \u4e92\u65a5\uff0c\u4f18\u5148\u53d6 password_file \u91cc\u9762\u7684\u503c\u3002\nbasic_auth:\n  [ username: <string> ]\n  [ password: <secret> ]\n  [ password_file: <string> ]\n\n# \u901a\u8fc7 bearer token \u8bbe\u7f6e\u6293\u53d6\u8bf7\u6c42\u5934\u4e2d `Authorization` bearer_token/bearer_token_file \u4e92\u65a5\uff0c\u4f18\u5148\u53d6 bearer_token \u91cc\u9762\u7684\u503c\u3002\n[ bearer_token: <secret> ]\n\n# \u901a\u8fc7 bearer token \u8bbe\u7f6e\u6293\u53d6\u8bf7\u6c42\u5934\u4e2d `Authorization` bearer_token/bearer_token_file \u4e92\u65a5\uff0c\u4f18\u5148\u53d6 bearer_token \u91cc\u9762\u7684\u503c\u3002\n[ bearer_token_file: <filename> ]\n\n# \u6293\u53d6\u8fde\u63a5\u662f\u5426\u901a\u8fc7 TLS \u5b89\u5168\u901a\u9053\uff0c\u914d\u7f6e\u5bf9\u5e94\u7684 TLS \u53c2\u6570\ntls_config:\n  [ <tls_config> ]\n\n# \u901a\u8fc7\u4ee3\u7406\u670d\u52a1\u6765\u6293\u53d6 target \u4e0a\u7684\u6307\u6807\uff0c\u586b\u5199\u5bf9\u5e94\u7684\u4ee3\u7406\u670d\u52a1\u5730\u5740\u3002\n[ proxy_url: <string> ]\n\n# \u901a\u8fc7\u9759\u6001\u914d\u7f6e\u6765\u6307\u5b9a target\uff0c\u8be6\u89c1\u4e0b\u9762\u7684\u8bf4\u660e\u3002\nstatic_configs:\n  [ - <static_config> ... ]\n\n# CVM \u670d\u52a1\u53d1\u73b0\u914d\u7f6e\uff0c\u8be6\u89c1\u4e0b\u9762\u7684\u8bf4\u660e\u3002\ncvm_sd_configs:\n  [ - <cvm_sd_config> ... ]\n\n# \u5728\u6293\u53d6\u6570\u636e\u4e4b\u540e\uff0c\u628a target \u4e0a\u5bf9\u5e94\u7684 label \u901a\u8fc7 relabel \u7684\u673a\u5236\u8fdb\u884c\u6539\u5199\uff0c\u6309\u987a\u5e8f\u6267\u884c\u591a\u4e2a relabel \u89c4\u5219\u3002\n# relabel_config \u8be6\u89c1\u4e0b\u6587\u8bf4\u660e\u3002\nrelabel_configs:\n  [ - <relabel_config> ... ]\n\n# \u6570\u636e\u6293\u53d6\u5b8c\u6210\u5199\u5165\u4e4b\u524d\uff0c\u901a\u8fc7 relabel \u673a\u5236\u8fdb\u884c\u6539\u5199 label \u5bf9\u5e94\u7684\u503c\uff0c\u6309\u987a\u5e8f\u6267\u884c\u591a\u4e2a relabel \u89c4\u5219\u3002\n# relabel_config \u8be6\u89c1\u4e0b\u6587\u8bf4\u660e\u3002\nmetric_relabel_configs:\n  [ - <relabel_config> ... ]\n\n# \u4e00\u6b21\u6293\u53d6\u6570\u636e\u70b9\u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n[ sample_limit: <int> | default = 0 ]\n\n# \u4e00\u6b21\u6293\u53d6 Target \u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n[ target_limit: <int> | default = 0 ]\n
                                                              "},{"location":"end-user/insight/collection-manag/metric-collect.html#pod-monitor","title":"Pod Monitor","text":"

                                                              \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

                                                              # Prometheus Operator CRD \u7248\u672c\napiVersion: monitoring.coreos.com/v1\n# \u5bf9\u5e94 K8S \u7684\u8d44\u6e90\u7c7b\u578b\uff0c\u8fd9\u91cc\u9762 Pod Monitor\nkind: PodMonitor\n# \u5bf9\u5e94 K8S \u7684 Metadata\uff0c\u8fd9\u91cc\u53ea\u7528\u5173\u5fc3 name\uff0c\u5982\u679c\u6ca1\u6709\u6307\u5b9a jobLabel\uff0c\u5bf9\u5e94\u6293\u53d6\u6307\u6807 label \u4e2d job \u7684\u503c\u4e3a <namespace>/<name>\nmetadata:\n  name: redis-exporter # \u586b\u5199\u4e00\u4e2a\u552f\u4e00\u540d\u79f0\n  namespace: cm-prometheus  # namespace \u56fa\u5b9a\uff0c\u4e0d\u9700\u8981\u4fee\u6539\n# \u63cf\u8ff0\u6293\u53d6\u76ee\u6807 Pod \u7684\u9009\u53d6\u53ca\u6293\u53d6\u4efb\u52a1\u7684\u914d\u7f6e\n  label:\n    operator.insight.io/managed-by: insight # Insight \u7ba1\u7406\u7684\u6807\u7b7e\u6807\u8bc6\nspec:\n  # \u586b\u5199\u5bf9\u5e94 Pod \u7684 label\uff0cpod monitor \u4f1a\u53d6\u5bf9\u5e94\u7684\u503c\u4f5c\u4e3a job label \u7684\u503c\u3002\n  # \u5982\u679c\u67e5\u770b\u7684\u662f Pod Yaml\uff0c\u53d6 pod.metadata.labels \u4e2d\u7684\u503c\u3002\n  # \u5982\u679c\u67e5\u770b\u7684\u662f Deployment/Daemonset/Statefulset\uff0c\u53d6 spec.template.metadata.labels\u3002\n  [ jobLabel: string ]\n  # \u628a\u5bf9\u5e94 Pod \u4e0a\u7684 Label \u6dfb\u52a0\u5230 Target \u7684 Label \u4e2d\n  [ podTargetLabels: []string ]\n  # \u4e00\u6b21\u6293\u53d6\u6570\u636e\u70b9\u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n  [ sampleLimit: uint64 ]\n  # \u4e00\u6b21\u6293\u53d6 Target \u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n  [ targetLimit: uint64 ]\n  # \u914d\u7f6e\u9700\u8981\u6293\u53d6\u66b4\u9732\u7684 Prometheus HTTP \u63a5\u53e3\uff0c\u53ef\u4ee5\u914d\u7f6e\u591a\u4e2a Endpoint\n  podMetricsEndpoints:\n  [ - <endpoint_config> ... ] # \u8be6\u89c1\u4e0b\u9762 endpoint \u8bf4\u660e\n  # \u9009\u62e9\u8981\u76d1\u63a7 Pod \u6240\u5728\u7684 namespace\uff0c\u4e0d\u586b\u4e3a\u9009\u53d6\u6240\u6709 namespace\n  [ namespaceSelector: ]\n    # \u662f\u5426\u9009\u53d6\u6240\u6709 namespace\n    [ any: bool ]\n    # \u9700\u8981\u9009\u53d6 namespace \u5217\u8868\n    [ matchNames: []string ]\n  # \u586b\u5199\u8981\u76d1\u63a7 Pod \u7684 Label \u503c\uff0c\u4ee5\u5b9a\u4f4d\u76ee\u6807 Pod [K8S metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)\n  selector:\n    [ matchExpressions: array ]\n      [ example: - {key: tier, operator: In, values: [cache]} ]\n    [ matchLabels: object ]\n      [ example: k8s-app: redis-exporter ]\n
                                                              "},{"location":"end-user/insight/collection-manag/metric-collect.html#1","title":"\u4e3e\u4f8b 1","text":"
                                                              apiVersion: monitoring.coreos.com/v1\nkind: PodMonitor\nmetadata:\n  name: redis-exporter # \u586b\u5199\u4e00\u4e2a\u552f\u4e00\u540d\u79f0\n  namespace: cm-prometheus # namespace \u56fa\u5b9a\uff0c\u4e0d\u8981\u4fee\u6539\n  label:\n    operator.insight.io/managed-by: insight  # Insight \u7ba1\u7406\u7684\u6807\u7b7e\u6807\u8bc6\uff0c\u5fc5\u586b\u3002\nspec:\n  podMetricsEndpoints:\n    - interval: 30s\n      port: metric-port # \u586b\u5199 pod yaml \u4e2d Prometheus Exporter \u5bf9\u5e94\u7684 Port \u7684 Name\n      path: /metrics # \u586b\u5199 Prometheus Exporter \u5bf9\u5e94\u7684 Path \u7684\u503c\uff0c\u4e0d\u586b\u9ed8\u8ba4 /metrics\n      relabelings:\n        - action: replace\n          sourceLabels:\n            - instance\n          regex: (.*)\n          targetLabel: instance\n          replacement: \"crs-xxxxxx\" # \u8c03\u6574\u6210\u5bf9\u5e94\u7684 Redis \u5b9e\u4f8b ID\n        - action: replace\n          sourceLabels:\n            - instance\n          regex: (.*)\n          targetLabel: ip\n          replacement: \"1.x.x.x\" # \u8c03\u6574\u6210\u5bf9\u5e94\u7684 Redis \u5b9e\u4f8b IP\n  namespaceSelector: # \u9009\u62e9\u8981\u76d1\u63a7 Pod \u6240\u5728\u7684 namespace\n    matchNames:\n      - redis-test\n  selector: # \u586b\u5199\u8981\u76d1\u63a7 Pod \u7684 Label \u503c\uff0c\u4ee5\u5b9a\u4f4d\u76ee\u6807 pod\n    matchLabels:\n      k8s-app: redis-exporter\n
                                                              "},{"location":"end-user/insight/collection-manag/metric-collect.html#2","title":"\u4e3e\u4f8b 2","text":"
                                                              job_name: prometheus\nscrape_interval: 30s\nstatic_configs:\n- targets:\n  - 127.0.0.1:9090\n
                                                              "},{"location":"end-user/insight/collection-manag/metric-collect.html#service-monitor","title":"Service Monitor","text":"

                                                              \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

                                                              # Prometheus Operator CRD \u7248\u672c\napiVersion: monitoring.coreos.com/v1\n# \u5bf9\u5e94 K8S \u7684\u8d44\u6e90\u7c7b\u578b\uff0c\u8fd9\u91cc\u9762 Service Monitor\nkind: ServiceMonitor\n# \u5bf9\u5e94 K8S \u7684 Metadata\uff0c\u8fd9\u91cc\u53ea\u7528\u5173\u5fc3 name\uff0c\u5982\u679c\u6ca1\u6709\u6307\u5b9a jobLabel\uff0c\u5bf9\u5e94\u6293\u53d6\u6307\u6807 label \u4e2d job \u7684\u503c\u4e3a Service \u7684\u540d\u79f0\u3002\nmetadata:\n  name: redis-exporter # \u586b\u5199\u4e00\u4e2a\u552f\u4e00\u540d\u79f0\n  namespace: cm-prometheus  # namespace \u56fa\u5b9a\uff0c\u4e0d\u9700\u8981\u4fee\u6539\n# \u63cf\u8ff0\u6293\u53d6\u76ee\u6807 Pod \u7684\u9009\u53d6\u53ca\u6293\u53d6\u4efb\u52a1\u7684\u914d\u7f6e\n  label:\n    operator.insight.io/managed-by: insight  # Insight \u7ba1\u7406\u7684\u6807\u7b7e\u6807\u8bc6\uff0c\u5fc5\u586b\u3002\nspec:\n  # \u586b\u5199\u5bf9\u5e94 Pod \u7684 label(metadata/labels)\uff0cservice monitor \u4f1a\u53d6\u5bf9\u5e94\u7684\u503c\u4f5c\u4e3a job label \u7684\u503c\n  [ jobLabel: string ]\n  # \u628a\u5bf9\u5e94 service \u4e0a\u7684 Label \u6dfb\u52a0\u5230 Target \u7684 Label \u4e2d\n  [ targetLabels: []string ]\n  # \u628a\u5bf9\u5e94 Pod \u4e0a\u7684 Label \u6dfb\u52a0\u5230 Target \u7684 Label \u4e2d\n  [ podTargetLabels: []string ]\n  # \u4e00\u6b21\u6293\u53d6\u6570\u636e\u70b9\u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n  [ sampleLimit: uint64 ]\n  # \u4e00\u6b21\u6293\u53d6 Target \u9650\u5236\uff0c0\uff1a\u4e0d\u4f5c\u9650\u5236\uff0c\u9ed8\u8ba4\u4e3a 0\n  [ targetLimit: uint64 ]\n  # \u914d\u7f6e\u9700\u8981\u6293\u53d6\u66b4\u9732\u7684 Prometheus HTTP \u63a5\u53e3\uff0c\u53ef\u4ee5\u914d\u7f6e\u591a\u4e2a Endpoint\n  endpoints:\n  [ - <endpoint_config> ... ] # \u8be6\u89c1\u4e0b\u9762 endpoint \u8bf4\u660e\n  # \u9009\u62e9\u8981\u76d1\u63a7 Pod \u6240\u5728\u7684 namespace\uff0c\u4e0d\u586b\u4e3a\u9009\u53d6\u6240\u6709 namespace\n  [ namespaceSelector: ]\n    # \u662f\u5426\u9009\u53d6\u6240\u6709 namespace\n    [ any: bool ]\n    # \u9700\u8981\u9009\u53d6 namespace \u5217\u8868\n    [ matchNames: []string ]\n  # \u586b\u5199\u8981\u76d1\u63a7 Pod \u7684 Label \u503c\uff0c\u4ee5\u5b9a\u4f4d\u76ee\u6807 Pod [K8S metav1.LabelSelector](https://v1-17.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#labelselector-v1-meta)\n  selector:\n    [ matchExpressions: array ]\n      [ example: - {key: tier, operator: In, values: [cache]} ]\n    [ matchLabels: object ]\n      [ example: k8s-app: redis-exporter ]\n
                                                              "},{"location":"end-user/insight/collection-manag/metric-collect.html#_2","title":"\u4e3e\u4f8b","text":"
                                                              apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: go-demo # \u586b\u5199\u4e00\u4e2a\u552f\u4e00\u540d\u79f0\n  namespace: cm-prometheus # namespace \u56fa\u5b9a\uff0c\u4e0d\u8981\u4fee\u6539\n  label:\n    operator.insight.io/managed-by: insight  # Insight \u7ba1\u7406\u7684\u6807\u7b7e\u6807\u8bc6\uff0c\u5fc5\u586b\u3002\nspec:\n  endpoints:\n    - interval: 30s\n      # \u586b\u5199 service yaml \u4e2d Prometheus Exporter \u5bf9\u5e94\u7684 Port \u7684 Name\n      port: 8080-8080-tcp\n      # \u586b\u5199 Prometheus Exporter \u5bf9\u5e94\u7684 Path \u7684\u503c\uff0c\u4e0d\u586b\u9ed8\u8ba4 /metrics\n      path: /metrics\n      relabelings:\n        # ** \u5fc5\u987b\u8981\u6709\u4e00\u4e2a label \u4e3a application\uff0c\u8fd9\u91cc\u5047\u8bbe k8s \u6709\u4e00\u4e2a label \u4e3a app\uff0c\n        # \u6211\u4eec\u901a\u8fc7 relabel \u7684 replace \u52a8\u4f5c\u628a\u5b83\u66ff\u6362\u6210\u4e86 application\n        - action: replace\n          sourceLabels: [__meta_kubernetes_pod_label_app]\n          targetLabel: application\n  # \u9009\u62e9\u8981\u76d1\u63a7 service \u6240\u5728\u7684 namespace\n  namespaceSelector:\n    matchNames:\n      - golang-demo\n  # \u586b\u5199\u8981\u76d1\u63a7 service \u7684 Label \u503c\uff0c\u4ee5\u5b9a\u4f4d\u76ee\u6807 service\n  selector:\n    matchLabels:\n      app: golang-app-demo\n
                                                              "},{"location":"end-user/insight/collection-manag/metric-collect.html#endpoint_config","title":"endpoint_config","text":"

                                                              \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

                                                              # \u5bf9\u5e94 port \u7684\u540d\u79f0\uff0c\u8fd9\u91cc\u9700\u8981\u6ce8\u610f\u4e0d\u662f\u5bf9\u5e94\u7684\u7aef\u53e3\uff0c\u9ed8\u8ba4\uff1a80\uff0c\u5bf9\u5e94\u7684\u53d6\u503c\u5982\u4e0b\uff1a\n# ServiceMonitor: \u5bf9\u5e94 Service>spec/ports/name;\n# PodMonitor: \u8bf4\u660e\u5982\u4e0b\uff1a\n#   \u5982\u679c\u67e5\u770b\u7684\u662f Pod Yaml\uff0c\u53d6 pod.spec.containers.ports.name \u4e2d\u7684\u503c\u3002\n#   \u5982\u679c\u67e5\u770b\u7684\u662f Deployment/Daemonset/Statefulset\uff0c\u53d6\u503c spec.template.spec.containers.ports.name\n[ port: string | default = 80]\n# \u6293\u53d6\u4efb\u52a1\u8bf7\u6c42 URI \u8def\u5f84\n[ path: string | default = /metrics ]\n# \u6293\u53d6\u534f\u8bae: http \u6216\u8005 https\n[ scheme: string | default = http]\n# \u6293\u53d6\u8bf7\u6c42\u5bf9\u5e94 URL \u53c2\u6570\n[ params: map[string][]string]\n# \u6293\u53d6\u4efb\u52a1\u95f4\u9694\u7684\u65f6\u95f4\n[ interval: string | default = 30s ]\n# \u6293\u53d6\u4efb\u52a1\u8d85\u65f6\n[ scrapeTimeout: string | default = 30s]\n# \u6293\u53d6\u8fde\u63a5\u662f\u5426\u901a\u8fc7 TLS \u5b89\u5168\u901a\u9053\uff0c\u914d\u7f6e\u5bf9\u5e94\u7684 TLS \u53c2\u6570\n[ tlsConfig: TLSConfig ]\n# \u901a\u8fc7\u5bf9\u5e94\u7684\u6587\u4ef6\u8bfb\u53d6 bearer token \u5bf9\u5e94\u7684\u503c\uff0c\u653e\u5230\u6293\u53d6\u4efb\u52a1\u7684 header \u4e2d\n[ bearerTokenFile: string ]\n# \u901a\u8fc7\u5bf9\u5e94\u7684 K8S secret key \u8bfb\u53d6\u5bf9\u5e94\u7684 bearer token\uff0c\u6ce8\u610f secret namespace \u9700\u8981\u548c PodMonitor/ServiceMonitor \u76f8\u540c\n[ bearerTokenSecret: string ]\n# \u89e3\u51b3\u5f53\u6293\u53d6\u7684 label \u4e0e\u540e\u7aef Prometheus \u6dfb\u52a0 label \u51b2\u7a81\u65f6\u7684\u5904\u7406\u3002\n# true: \u4fdd\u7559\u6293\u53d6\u5230\u7684 label\uff0c\u5ffd\u7565\u4e0e\u540e\u7aef Prometheus \u51b2\u7a81\u7684 label\uff1b\n# false: \u5bf9\u51b2\u7a81\u7684 label\uff0c\u628a\u6293\u53d6\u7684 label \u524d\u52a0\u4e0a exported_<original-label>\uff0c\u6dfb\u52a0\u540e\u7aef Prometheus \u589e\u52a0\u7684 label\uff1b\n[ honorLabels: bool | default = false ]\n# \u662f\u5426\u4f7f\u7528\u6293\u53d6\u5230 target \u4e0a\u4ea7\u751f\u7684\u65f6\u95f4\u3002\n# true: \u5982\u679c target \u4e2d\u6709\u65f6\u95f4\uff0c\u4f7f\u7528 target \u4e0a\u7684\u65f6\u95f4\uff1b\n# false: \u76f4\u63a5\u5ffd\u7565 target \u4e0a\u7684\u65f6\u95f4\uff1b\n[ honorTimestamps: bool | default = true ]\n# basic auth \u7684\u8ba4\u8bc1\u4fe1\u606f\uff0cusername/password \u586b\u5199\u5bf9\u5e94 K8S secret key \u7684\u503c\uff0c\u6ce8\u610f secret namespace \u9700\u8981\u548c PodMonitor/ServiceMonitor \u76f8\u540c\u3002\n[ basicAuth: BasicAuth ]\n# \u901a\u8fc7\u4ee3\u7406\u670d\u52a1\u6765\u6293\u53d6 target \u4e0a\u7684\u6307\u6807\uff0c\u586b\u5199\u5bf9\u5e94\u7684\u4ee3\u7406\u670d\u52a1\u5730\u5740\n[ proxyUrl: string ]\n# \u5728\u6293\u53d6\u6570\u636e\u4e4b\u540e\uff0c\u628a target \u4e0a\u5bf9\u5e94\u7684 label \u901a\u8fc7 relabel \u7684\u673a\u5236\u8fdb\u884c\u6539\u5199\uff0c\u6309\u987a\u5e8f\u6267\u884c\u591a\u4e2a relabel \u89c4\u5219\u3002\n# relabel_config \u8be6\u89c1\u4e0b\u6587\u8bf4\u660e\nrelabelings:\n[ - <relabel_config> ...]\n# \u6570\u636e\u6293\u53d6\u5b8c\u6210\u5199\u5165\u4e4b\u524d\uff0c\u901a\u8fc7 relabel \u673a\u5236\u8fdb\u884c\u6539\u5199 label \u5bf9\u5e94\u7684\u503c\uff0c\u6309\u987a\u5e8f\u6267\u884c\u591a\u4e2a relabel \u89c4\u5219\u3002\n# relabel_config \u8be6\u89c1\u4e0b\u6587\u8bf4\u660e\nmetricRelabelings:\n[ - <relabel_config> ...]\n
                                                              "},{"location":"end-user/insight/collection-manag/metric-collect.html#relabel_config","title":"relabel_config","text":"

                                                              \u76f8\u5e94\u914d\u7f6e\u9879\u8bf4\u660e\u5982\u4e0b\uff1a

                                                              # \u4ece\u539f\u59cb labels \u4e2d\u53d6\u54ea\u4e9b label \u7684\u503c\u8fdb\u884c relabel\uff0c\u53d6\u51fa\u6765\u7684\u503c\u901a\u8fc7 separator \u4e2d\u7684\u5b9a\u4e49\u8fdb\u884c\u5b57\u7b26\u62fc\u63a5\u3002\n# \u5982\u679c\u662f PodMonitor/ServiceMonitor \u5bf9\u5e94\u7684\u914d\u7f6e\u9879\u4e3a sourceLabels\n[ source_labels: '[' <labelname> [, ...] ']' ]\n# \u5b9a\u4e49\u9700\u8981 relabel \u7684 label \u503c\u62fc\u63a5\u7684\u5b57\u7b26\uff0c\u9ed8\u8ba4\u4e3a ';'\n[ separator: <string> | default = ; ]\n\n# action \u4e3a replace/hashmod \u65f6\uff0c\u901a\u8fc7 target_label \u6765\u6307\u5b9a\u5bf9\u5e94 label name\u3002\n# \u5982\u679c\u662f PodMonitor/ServiceMonitor \u5bf9\u5e94\u7684\u914d\u7f6e\u9879\u4e3a targetLabel\n[ target_label: <labelname> ]\n\n# \u9700\u8981\u5bf9 source labels \u5bf9\u5e94\u503c\u8fdb\u884c\u6b63\u5219\u5339\u914d\u7684\u8868\u8fbe\u5f0f\n[ regex: <regex> | default = (.*) ]\n\n# action \u4e3a hashmod \u65f6\u7528\u5230\uff0c\u6839\u636e source label \u5bf9\u5e94\u503c md5 \u53d6\u6a21\u503c\n[ modulus: <int> ]\n\n# action \u4e3a replace \u7684\u65f6\u5019\uff0c\u901a\u8fc7 replacement \u6765\u5b9a\u4e49\u5f53 regex \u5339\u914d\u4e4b\u540e\u9700\u8981\u66ff\u6362\u7684\u8868\u8fbe\u5f0f\uff0c\u53ef\u4ee5\u7ed3\u5408 regex \u6b63\u89c4\u5219\u8868\u8fbe\u5f0f\u66ff\u6362\n[ replacement: <string> | default = $1 ]\n\n# \u57fa\u4e8e regex \u5339\u914d\u5230\u7684\u503c\u8fdb\u884c\u76f8\u5173\u7684\u64cd\u4f5c\uff0c\u5bf9\u5e94\u7684 action \u5982\u4e0b\uff0c\u9ed8\u8ba4\u4e3a replace\uff1a\n# replace: \u5982\u679c regex \u5339\u914d\u5230\uff0c\u901a\u8fc7 replacement \u4e2d\u5b9a\u4e49\u7684\u503c\u66ff\u6362\u76f8\u5e94\u7684\u503c\uff0c\u5e76\u901a\u8fc7 target_label \u8bbe\u503c\u5e76\u6dfb\u52a0\u76f8\u5e94\u7684 label\n# keep: \u5982\u679c regex \u6ca1\u6709\u5339\u914d\u5230\uff0c\u4e22\u5f03\n# drop: \u5982\u679c regex \u5339\u914d\u5230\uff0c\u4e22\u5f03\n# hashmod: \u901a\u8fc7 moduels \u6307\u5b9a\u7684\u503c\u628a source label \u5bf9\u5e94\u7684 md5 \u503c\u53d6\u6a21\n# \u5e76\u6dfb\u52a0\u4e00\u4e2a\u65b0\u7684 label\uff0clabel name \u901a\u8fc7 target_label \u6307\u5b9a\n# labelmap: \u5982\u679c regex \u5339\u914d\u5230\uff0c\u4f7f\u7528 replacement \u66ff\u6362\u5bf9\u5c31\u7684 label name\n# labeldrop: \u5982\u679c regex \u5339\u914d\u5230\uff0c\u5220\u9664\u5bf9\u5e94\u7684 label\n# labelkeep: \u5982\u679c regex \u6ca1\u6709\u5339\u914d\u5230\uff0c\u5220\u9664\u5bf9\u5e94\u7684 label\n[ action: <relabel_action> | default = replace ]\n
                                                              "},{"location":"end-user/insight/collection-manag/probe-module.html","title":"\u81ea\u5b9a\u4e49\u63a2\u6d4b\u65b9\u5f0f","text":"

                                                              Insight \u4f7f\u7528 Prometheus \u5b98\u65b9\u63d0\u4f9b\u7684 Blackbox Exporter \u4f5c\u4e3a\u9ed1\u76d2\u76d1\u63a7\u89e3\u51b3\u65b9\u6848\uff0c\u53ef\u4ee5\u901a\u8fc7 HTTP\u3001HTTPS\u3001DNS\u3001ICMP\u3001TCP \u548c gRPC \u65b9\u5f0f\u5bf9\u76ee\u6807\u5b9e\u4f8b\u8fdb\u884c\u68c0\u6d4b\u3002\u53ef\u7528\u4e8e\u4ee5\u4e0b\u4f7f\u7528\u573a\u666f\uff1a

                                                              • HTTP/HTTPS\uff1aURL/API\u53ef\u7528\u6027\u68c0\u6d4b
                                                              • ICMP\uff1a\u4e3b\u673a\u5b58\u6d3b\u68c0\u6d4b
                                                              • TCP\uff1a\u7aef\u53e3\u5b58\u6d3b\u68c0\u6d4b
                                                              • DNS\uff1a\u57df\u540d\u89e3\u6790

                                                              \u5728\u672c\u6587\u4e2d\uff0c\u6211\u4eec\u5c06\u4ecb\u7ecd\u5982\u4f55\u5728\u5df2\u6709\u7684 Blackbox ConfigMap \u4e2d\u914d\u7f6e\u81ea\u5b9a\u4e49\u7684\u63a2\u6d4b\u65b9\u5f0f\u3002

                                                              Insight \u9ed8\u8ba4\u672a\u5f00\u542f ICMP \u63a2\u6d4b\u65b9\u5f0f\uff0c\u56e0\u4e3a ICMP \u9700\u8981\u66f4\u9ad8\u6743\u9650\uff0c\u56e0\u6b64\uff0c\u6211\u4eec\u5c06\u4ee5 ICMP \u548c HTTP \u63a2\u6d4b\u65b9\u5f0f\u4f5c\u4e3a\u793a\u4f8b\uff0c\u5c55\u793a\u5982\u4f55\u4fee\u6539 ConfigMap \u4ee5\u5b9e\u73b0\u81ea\u5b9a\u4e49\u7684 ICMP \u548c HTTP \u63a2\u6d4b\u3002

                                                              "},{"location":"end-user/insight/collection-manag/probe-module.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb\u8fdb\u5165\u76ee\u6807\u96c6\u7fa4\u7684\u8be6\u60c5\uff1b
                                                              2. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\uff0c\u9009\u62e9 \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u914d\u7f6e\u9879 \uff1b
                                                              3. \u627e\u5230\u540d\u4e3a insight-agent-prometheus-blackbox-exporter \u7684\u914d\u7f6e\u9879\uff0c\u70b9\u51fb \u7f16\u8f91 YAML\uff1b

                                                                \u5728 modules \u4e0b\u6dfb\u52a0\u81ea\u5b9a\u4e49\u63a2\u6d4b\u65b9\u5f0f\uff1a

                                                              HTTP \u63a2\u6d4bICMP \u63a2\u6d4b
                                                              module:\n  http_2xx:\n    prober: http\n    timeout: 5s\n    http:\n      valid_http_versions: [HTTP/1.1, HTTP/2]\n      valid_status_codes: []  # Defaults to 2xx\n      method: GET\n

                                                              module:\n  ICMP: # ICMP \u63a2\u6d4b\u914d\u7f6e\u7684\u793a\u4f8b\n    prober: icmp\n    timeout: 5s\n    icmp:\n      preferred_ip_protocol: ip4\nicmp_example: # ICMP \u63a2\u6d4b\u914d\u7f6e\u7684\u793a\u4f8b 2\n  prober: icmp\n  timeout: 5s\n  icmp:\n    preferred_ip_protocol: \"ip4\"\n    source_ip_address: \"127.0.0.1\"\n
                                                              \u7531\u4e8e ICMP \u9700\u8981\u66f4\u9ad8\u6743\u9650\uff0c\u56e0\u6b64\uff0c\u6211\u4eec\u8fd8\u9700\u8981\u63d0\u5347 Pod \u6743\u9650\uff0c\u5426\u5219\u4f1a\u51fa\u73b0 operation not permitted \u7684\u9519\u8bef\u3002\u6709\u4ee5\u4e0b\u4e24\u79cd\u65b9\u5f0f\u63d0\u5347\u6743\u9650\uff1a

                                                              • \u65b9\u5f0f\u4e00\uff1a \u76f4\u63a5\u7f16\u8f91 BlackBox Exporter \u90e8\u7f72\u6587\u4ef6\u5f00\u542f

                                                                apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: insight-agent-prometheus-blackbox-exporter\n  namespace: insight-system\nspec:\n  template:\n    spec:\n      containers:\n        - name: blackbox-exporter\n          image: # ... (image, args, ports \u7b49\u4fdd\u6301\u4e0d\u53d8)\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            allowPrivilegeEscalation: false\n            capabilities:\n              add:\n              - NET_RAW\n              drop:\n              - ALL\n            readOnlyRootFilesystem: true\n            runAsGroup: 0\n            runAsNonRoot: false\n            runAsUser: 0\n
                                                              • \u65b9\u5f0f\u4e8c\uff1a \u901a\u8fc7 Helm Upgrade \u65b9\u5f0f\u63d0\u6743

                                                                prometheus-blackbox-exporter:\n  enabled: true\n  securityContext:\n    runAsUser: 0\n    runAsGroup: 0\n    readOnlyRootFilesystem: true\n    runAsNonRoot: false\n    allowPrivilegeEscalation: false\n    capabilities:\n      add: [\"NET_RAW\"]\n

                                                              Info

                                                              \u66f4\u591a\u63a2\u6d4b\u65b9\u5f0f\u53ef\u53c2\u8003 blackbox_exporter Configuration\u3002

                                                              "},{"location":"end-user/insight/collection-manag/probe-module.html#_3","title":"\u5176\u4ed6\u53c2\u8003","text":"

                                                              \u4ee5\u4e0b YAML \u6587\u4ef6\u4e2d\u5305\u542b\u4e86 HTTP\u3001TCP\u3001SMTP\u3001ICMP\u3001DNS \u7b49\u591a\u79cd\u63a2\u6d4b\u65b9\u5f0f\uff0c\u53ef\u6839\u636e\u9700\u6c42\u81ea\u884c\u4fee\u6539 insight-agent-prometheus-blackbox-exporter \u7684\u914d\u7f6e\u6587\u4ef6\u3002

                                                              \u70b9\u51fb\u67e5\u770b\u5b8c\u6574\u7684 YAML \u6587\u4ef6
                                                              kind: ConfigMap\napiVersion: v1\nmetadata:\n  name: insight-agent-prometheus-blackbox-exporter\n  namespace: insight-system\n  labels:\n    app.kubernetes.io/instance: insight-agent\n    app.kubernetes.io/managed-by: Helm\n    app.kubernetes.io/name: prometheus-blackbox-exporter\n    app.kubernetes.io/version: v0.24.0\n    helm.sh/chart: prometheus-blackbox-exporter-8.8.0\n  annotations:\n    meta.helm.sh/release-name: insight-agent\n    meta.helm.sh/release-namespace: insight-system\ndata:\n  blackbox.yaml: |\n    modules:\n      HTTP_GET:\n        prober: http\n        timeout: 5s\n        http:\n          method: GET\n          valid_http_versions: [\"HTTP/1.1\", \"HTTP/2.0\"]\n          follow_redirects: true\n          preferred_ip_protocol: \"ip4\"\n      HTTP_POST:\n        prober: http\n        timeout: 5s\n        http:\n          method: POST\n          body_size_limit: 1MB\n      TCP:\n        prober: tcp\n        timeout: 5s\n      # \u9ed8\u8ba4\u672a\u5f00\u542f\uff1a\n      # ICMP:\n      #   prober: icmp\n      #   timeout: 5s\n      #   icmp:\n      #     preferred_ip_protocol: ip4\n      SSH:\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n          - expect: \"^SSH-2.0-\"\n      POP3S:\n        prober: tcp\n        tcp:\n          query_response:\n          - expect: \"^+OK\"\n          tls: true\n          tls_config:\n            insecure_skip_verify: false\n      http_2xx_example:               # http \u63a2\u6d4b\u793a\u4f8b\n        prober: http\n        timeout: 5s                   # \u63a2\u6d4b\u7684\u8d85\u65f6\u65f6\u95f4\n        http:\n          valid_http_versions: [\"HTTP/1.1\", \"HTTP/2.0\"]                   # \u8fd4\u56de\u4fe1\u606f\u4e2d\u7684 Version\uff0c\u4e00\u822c\u9ed8\u8ba4\u5373\u53ef\n          valid_status_codes: []  # Defaults to 2xx                       # \u6709\u6548\u7684\u8fd4\u56de\u7801\u8303\u56f4\uff0c\u5982\u679c\u8bf7\u6c42\u7684\u8fd4\u56de\u7801\u5728\u8be5\u8303\u56f4\u5185\uff0c\u89c6\u4e3a\u63a2\u6d4b\u6210\u529f\n          method: GET                 # \u8bf7\u6c42\u65b9\u6cd5\n          headers:                    # \u8bf7\u6c42\u7684\u5934\u90e8\n            Host: vhost.example.com\n            Accept-Language: en-US\n            Origin: example.com\n          no_follow_redirects: false  # \u662f\u5426\u5141\u8bb8\u91cd\u5b9a\u5411\n          fail_if_ssl: false   \n          fail_if_not_ssl: false\n          fail_if_body_matches_regexp:\n            - \"Could not connect to database\"\n          fail_if_body_not_matches_regexp:\n            - \"Download the latest version here\"\n          fail_if_header_matches: # Verifies that no cookies are set\n            - header: Set-Cookie\n              allow_missing: true\n              regexp: '.*'\n          fail_if_header_not_matches:\n            - header: Access-Control-Allow-Origin\n              regexp: '(\\*|example\\.com)'\n          tls_config:                  # \u9488\u5bf9 https \u8bf7\u6c42\u7684 tls \u7684\u914d\u7f6e\n            insecure_skip_verify: false\n          preferred_ip_protocol: \"ip4\" # defaults to \"ip6\"                 # \u9996\u9009\u7684 IP \u534f\u8bae\u7248\u672c\n          ip_protocol_fallback: false  # no fallback to \"ip6\"            \n      http_post_2xx:                   # \u5e26 Body \u7684 http \u63a2\u6d4b\u7684\u793a\u4f8b\n        prober: http\n        timeout: 5s\n        http:\n          method: POST                 # \u63a2\u6d4b\u7684\u8bf7\u6c42\u65b9\u6cd5\n          headers:\n            Content-Type: application/json\n          body: '{\"username\":\"admin\",\"password\":\"123456\"}'                   # \u63a2\u6d4b\u65f6\u643a\u5e26\u7684 body\n      http_basic_auth_example:         # \u5e26\u7528\u6237\u540d\u5bc6\u7801\u7684\u63a2\u6d4b\u7684\u793a\u4f8b\n        prober: http\n        timeout: 5s\n        http:\n          method: POST\n          headers:\n            Host: \"login.example.com\"\n          basic_auth:                  # \u63a2\u6d4b\u65f6\u8981\u52a0\u7684\u7528\u6237\u540d\u5bc6\u7801\n            username: \"username\"\n            password: \"mysecret\"\n      http_custom_ca_example:\n        prober: http\n        http:\n          method: GET\n          tls_config:                  # \u6307\u5b9a\u63a2\u6d4b\u65f6\u4f7f\u7528\u7684\u6839\u8bc1\u4e66\n            ca_file: \"/certs/my_cert.crt\"\n      http_gzip:\n        prober: http\n        http:\n          method: GET\n          compression: gzip            # \u63a2\u6d4b\u65f6\u4f7f\u7528\u7684\u538b\u7f29\u65b9\u6cd5\n      http_gzip_with_accept_encoding:\n        prober: http\n        http:\n          method: GET\n          compression: gzip\n          headers:\n            Accept-Encoding: gzip\n      tls_connect:                     # TCP \u63a2\u6d4b\u7684\u793a\u4f8b\n        prober: tcp\n        timeout: 5s\n        tcp:\n          tls: true                    # \u662f\u5426\u4f7f\u7528 TLS\n      tcp_connect_example:\n        prober: tcp\n        timeout: 5s\n      imap_starttls:                   # \u63a2\u6d4b IMAP \u90ae\u7bb1\u670d\u52a1\u5668\u7684\u914d\u7f6e\u793a\u4f8b\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - expect: \"OK.*STARTTLS\"\n            - send: \". STARTTLS\"\n            - expect: \"OK\"\n            - starttls: true\n            - send: \". capability\"\n            - expect: \"CAPABILITY IMAP4rev1\"\n      smtp_starttls:                   # \u63a2\u6d4b SMTP \u90ae\u7bb1\u670d\u52a1\u5668\u7684\u914d\u7f6e\u793a\u4f8b\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - expect: \"^220 ([^ ]+) ESMTP (.+)$\"\n            - send: \"EHLO prober\\r\"\n            - expect: \"^250-STARTTLS\"\n            - send: \"STARTTLS\\r\"\n            - expect: \"^220\"\n            - starttls: true\n            - send: \"EHLO prober\\r\"\n            - expect: \"^250-AUTH\"\n            - send: \"QUIT\\r\"\n      irc_banner_example:\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - send: \"NICK prober\"\n            - send: \"USER prober prober prober :prober\"\n            - expect: \"PING :([^ ]+)\"\n              send: \"PONG ${1}\"\n            - expect: \"^:[^ ]+ 001\"\n      # icmp_example:                    # ICMP \u63a2\u6d4b\u914d\u7f6e\u7684\u793a\u4f8b\n      #   prober: icmp\n      #   timeout: 5s\n      #   icmp:\n      #     preferred_ip_protocol: \"ip4\"\n      #     source_ip_address: \"127.0.0.1\"\n      dns_udp_example:                 # \u4f7f\u7528 UDP \u8fdb\u884c DNS \u67e5\u8be2\u7684\u793a\u4f8b\n        prober: dns\n        timeout: 5s\n        dns:\n          query_name: \"www.prometheus.io\"                 # \u8981\u89e3\u6790\u7684\u57df\u540d\n          query_type: \"A\"              # \u8be5\u57df\u540d\u5bf9\u5e94\u7684\u7c7b\u578b\n          valid_rcodes:\n          - NOERROR\n          validate_answer_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n            fail_if_all_match_regexp:\n            - \".*127.0.0.1\"\n            fail_if_not_matches_regexp:\n            - \"www.prometheus.io.\\t300\\tIN\\tA\\t127.0.0.1\"\n            fail_if_none_matches_regexp:\n            - \"127.0.0.1\"\n          validate_authority_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n          validate_additional_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n      dns_soa:\n        prober: dns\n        dns:\n          query_name: \"prometheus.io\"\n          query_type: \"SOA\"\n      dns_tcp_example:               # \u4f7f\u7528 TCP \u8fdb\u884c DNS \u67e5\u8be2\u7684\u793a\u4f8b\n        prober: dns\n        dns:\n          transport_protocol: \"tcp\" # defaults to \"udp\"\n          preferred_ip_protocol: \"ip4\" # defaults to \"ip6\"\n          query_name: \"www.prometheus.io\"\n
                                                              "},{"location":"end-user/insight/collection-manag/service-monitor.html","title":"\u914d\u7f6e\u670d\u52a1\u53d1\u73b0\u89c4\u5219","text":"

                                                              \u53ef\u89c2\u6d4b Insight \u652f\u6301\u901a\u8fc7 \u5bb9\u5668\u7ba1\u7406 \u521b\u5efa CRD ServiceMonitor \u7684\u65b9\u5f0f\u6765\u6ee1\u8db3\u60a8\u81ea\u5b9a\u4e49\u670d\u52a1\u53d1\u73b0\u7684\u91c7\u96c6\u9700\u6c42\u3002 \u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528 ServiceMonitor \u81ea\u884c\u5b9a\u4e49 Pod \u53d1\u73b0\u7684 Namespace \u8303\u56f4\u4ee5\u53ca\u901a\u8fc7 matchLabel \u6765\u9009\u62e9\u76d1\u542c\u7684 Service\u3002

                                                              "},{"location":"end-user/insight/collection-manag/service-monitor.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u96c6\u7fa4\u5df2\u5b89\u88c5 Helm \u5e94\u7528 insight-agent \u4e14\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                                                              "},{"location":"end-user/insight/collection-manag/service-monitor.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u91c7\u96c6\u7ba1\u7406 \uff0c\u67e5\u770b\u5168\u90e8\u96c6\u7fa4\u91c7\u96c6\u63d2\u4ef6\u7684\u72b6\u6001\u3002

                                                              2. \u70b9\u51fb\u5217\u8868\u4e2d\u7684\u67d0\u4e2a\u96c6\u7fa4\u540d\u79f0\u8fdb\u5165\u91c7\u96c6\u914d\u7f6e\u8be6\u60c5\u3002

                                                              3. \u70b9\u51fb\u94fe\u63a5\u8df3\u8f6c\u5230 \u5bb9\u5668\u7ba1\u7406 \u4e2d\u521b\u5efa Service Monitor\u3002

                                                                apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: micrometer-demo # (1)\n  namespace: insight-system # (2)\n    labels: \n      operator.insight.io/managed-by: insight\nspec:\n  endpoints: # (3)\n    - honorLabels: true\n        interval: 15s\n        path: /actuator/prometheus\n        port: http\n  namespaceSelector: # (4)\n    matchNames:\n      - insight-system  # (5)\n  selector: # (6)\n    matchLabels:\n          micrometer-prometheus-discovery: \"true\"\n
                                                                1. \u6307\u5b9a ServiceMonitor \u7684\u540d\u79f0
                                                                2. \u6307\u5b9a ServiceMonitor \u7684\u547d\u540d\u7a7a\u95f4
                                                                3. \u8fd9\u662f\u670d\u52a1\u7aef\u70b9\uff0c\u4ee3\u8868 Prometheus \u6240\u9700\u7684\u91c7\u96c6 Metrics \u7684\u5730\u5740\u3002 endpoints \u4e3a\u4e00\u4e2a\u6570\u7ec4\uff0c \u540c\u65f6\u53ef\u4ee5\u521b\u5efa\u591a\u4e2a endpoints \u3002\u6bcf\u4e2a endpoints \u5305\u542b\u4e09\u4e2a\u5b57\u6bb5\uff0c\u6bcf\u4e2a\u5b57\u6bb5\u7684\u542b\u4e49\u5982\u4e0b\uff1a

                                                                  • interval \uff1a\u6307\u5b9a Prometheus \u5bf9\u5f53\u524d endpoints \u91c7\u96c6\u7684\u5468\u671f\u3002\u5355\u4f4d\u4e3a\u79d2\uff0c\u5728\u672c\u6b21\u793a\u4f8b\u4e2d\u8bbe\u5b9a\u4e3a 15s \u3002
                                                                  • path \uff1a\u6307\u5b9a Prometheus \u7684\u91c7\u96c6\u8def\u5f84\u3002\u5728\u672c\u6b21\u793a\u4f8b\u4e2d\uff0c\u6307\u5b9a\u4e3a /actuator/prometheus \u3002
                                                                  • port \uff1a\u6307\u5b9a\u91c7\u96c6\u6570\u636e\u9700\u8981\u901a\u8fc7\u7684\u7aef\u53e3\uff0c\u8bbe\u7f6e\u7684\u7aef\u53e3\u4e3a\u91c7\u96c6\u7684 Service \u7aef\u53e3\u6240\u8bbe\u7f6e\u7684 name \u3002
                                                                4. \u8fd9\u662f\u9700\u8981\u53d1\u73b0\u7684 Service \u7684\u8303\u56f4\u3002 namespaceSelector \u5305\u542b\u4e24\u4e2a\u4e92\u65a5\u5b57\u6bb5\uff0c\u5b57\u6bb5\u7684\u542b\u4e49\u5982\u4e0b\uff1a

                                                                  • any \uff1a\u6709\u4e14\u4ec5\u6709\u4e00\u4e2a\u503c true \uff0c\u5f53\u8be5\u5b57\u6bb5\u88ab\u8bbe\u7f6e\u65f6\uff0c\u5c06\u76d1\u542c\u6240\u6709\u7b26\u5408 Selector \u8fc7\u6ee4\u6761\u4ef6\u7684 Service \u7684\u53d8\u52a8\u3002
                                                                  • matchNames \uff1a\u6570\u7ec4\u503c\uff0c\u6307\u5b9a\u9700\u8981\u76d1\u542c\u7684 namespace \u7684\u8303\u56f4\u3002\u4f8b\u5982\uff0c\u53ea\u60f3\u76d1\u542c default \u548c insight-system \u4e24\u4e2a\u547d\u540d\u7a7a\u95f4\u4e2d\u7684 Service\uff0c\u90a3\u4e48 matchNames \u8bbe\u7f6e\u5982\u4e0b\uff1a

                                                                    namespaceSelector:\n  matchNames:\n    - default\n    - insight-system\n
                                                                5. \u6b64\u5904\u5339\u914d\u7684\u547d\u540d\u7a7a\u95f4\u4e3a\u9700\u8981\u66b4\u9732\u6307\u6807\u7684\u5e94\u7528\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4

                                                                6. \u7528\u4e8e\u9009\u62e9 Service
                                                              "},{"location":"end-user/insight/dashboard/dashboard.html","title":"\u4eea\u8868\u76d8","text":"

                                                              Grafana \u662f\u4e00\u79cd\u5f00\u6e90\u7684\u6570\u636e\u53ef\u89c6\u5316\u548c\u76d1\u63a7\u5e73\u53f0\uff0c\u5b83\u63d0\u4f9b\u4e86\u4e30\u5bcc\u7684\u56fe\u8868\u548c\u9762\u677f\uff0c\u7528\u4e8e\u5b9e\u65f6\u76d1\u63a7\u3001\u5206\u6790\u548c\u53ef\u89c6\u5316\u5404\u79cd\u6570\u636e\u6e90\u7684\u6307\u6807\u548c\u65e5\u5fd7\u3002\u53ef\u89c2\u6d4b\u6027 Insight \u4f7f\u7528\u5f00\u6e90 Grafana \u63d0\u4f9b\u76d1\u63a7\u670d\u52a1\uff0c\u652f\u6301\u4ece\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u547d\u540d\u7a7a\u95f4\u7b49\u591a\u7ef4\u5ea6\u67e5\u770b\u8d44\u6e90\u6d88\u8017\u60c5\u51b5\uff0c

                                                              \u5173\u4e8e\u5f00\u6e90 Grafana \u7684\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u89c1 Grafana \u5b98\u65b9\u6587\u6863\u3002

                                                              "},{"location":"end-user/insight/dashboard/dashboard.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 \u4eea\u8868\u76d8 \u3002

                                                                • \u5728 Insight /\u6982\u89c8 \u4eea\u8868\u76d8\u4e2d\uff0c\u53ef\u67e5\u770b\u591a\u9009\u96c6\u7fa4\u7684\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\uff0c\u5e76\u4ee5\u547d\u540d\u7a7a\u95f4\u3001\u5bb9\u5668\u7ec4\u7b49\u591a\u4e2a\u7ef4\u5ea6\u5206\u6790\u4e86\u8d44\u6e90\u4f7f\u7528\u3001\u7f51\u7edc\u3001\u5b58\u50a8\u7b49\u60c5\u51b5\u3002

                                                                • \u70b9\u51fb\u4eea\u8868\u76d8\u5de6\u4e0a\u4fa7\u7684\u4e0b\u62c9\u6846\u53ef\u5207\u6362\u96c6\u7fa4\u3002

                                                                • \u70b9\u51fb\u4eea\u8868\u76d8\u53f3\u4e0b\u4fa7\u53ef\u5207\u6362\u67e5\u8be2\u7684\u65f6\u95f4\u8303\u56f4\u3002

                                                              2. Insight \u7cbe\u9009\u591a\u4e2a\u793e\u533a\u63a8\u8350\u4eea\u8868\u76d8\uff0c\u53ef\u4ece\u8282\u70b9\u3001\u547d\u540d\u7a7a\u95f4\u3001\u5de5\u4f5c\u8d1f\u8f7d\u7b49\u591a\u4e2a\u7ef4\u5ea6\u8fdb\u884c\u76d1\u63a7\u3002\u70b9\u51fb insight-system / Insight /\u6982\u89c8 \u533a\u57df\u5207\u6362\u4eea\u8868\u76d8\u3002

                                                              Note

                                                              1. \u8bbf\u95ee Grafana UI \u8bf7\u53c2\u8003\u4ee5\u7ba1\u7406\u5458\u8eab\u4efd\u767b\u5f55 Grafana\u3002

                                                              2. \u5bfc\u5165\u81ea\u5b9a\u4e49\u4eea\u8868\u76d8\u8bf7\u53c2\u8003\u5bfc\u5165\u81ea\u5b9a\u4e49\u4eea\u8868\u76d8\u3002

                                                              "},{"location":"end-user/insight/dashboard/import-dashboard.html","title":"\u5bfc\u5165\u81ea\u5b9a\u4e49\u4eea\u8868\u76d8","text":"

                                                              \u901a\u8fc7\u4f7f\u7528 Grafana CRD\uff0c\u53ef\u4ee5\u5c06\u4eea\u8868\u677f\u7684\u7ba1\u7406\u548c\u90e8\u7f72\u7eb3\u5165\u5230 Kubernetes \u7684\u751f\u547d\u5468\u671f\u7ba1\u7406\u4e2d\uff0c\u5b9e\u73b0\u4eea\u8868\u677f\u7684\u7248\u672c\u63a7\u5236\u3001\u81ea\u52a8\u5316\u90e8\u7f72\u548c\u96c6\u7fa4\u7ea7\u7684\u7ba1\u7406\u3002\u672c\u9875\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7 CRD \u548c UI \u754c\u9762\u5bfc\u5165\u81ea\u5b9a\u4e49\u7684\u4eea\u8868\u76d8\u3002

                                                              "},{"location":"end-user/insight/dashboard/import-dashboard.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0 \u5e73\u53f0\uff0c\u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \uff0c\u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u9009\u62e9 kpanda-global-cluster \u3002

                                                              2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u5728\u5217\u8868\u4e2d\u67e5\u627e grafanadashboards.integreatly.org \u6587\u4ef6\uff0c\u8fdb\u5165\u8be6\u60c5\u3002

                                                              3. \u70b9\u51fb Yaml \u521b\u5efa \uff0c\u4f7f\u7528\u4ee5\u4e0b\u6a21\u677f\uff0c\u5728 Json \u5b57\u6bb5\u4e2d\u66ff\u6362\u4eea\u8868\u76d8 JSON\u3002

                                                                • namespace \uff1a\u586b\u5199\u76ee\u6807\u547d\u540d\u7a7a\u95f4\uff1b
                                                                • name \uff1a\u586b\u5199\u4eea\u8868\u76d8\u7684\u540d\u79f0\u3002
                                                                • label \uff1a\u5fc5\u586b\uff0c operator.insight.io/managed-by: insight \u3002
                                                                apiVersion: integreatly.org/v1alpha1\nkind: GrafanaDashboard\nmetadata:\n  labels:\n    app: insight-grafana-operator\n    operator.insight.io/managed-by: insight\n  name: sample-dashboard\n  namespace: insight-system\nspec:\n  json: >\n    {\n      \"id\": null,\n      \"title\": \"Simple Dashboard\",\n      \"tags\": [],\n      \"style\": \"dark\",\n      \"timezone\": \"browser\",\n      \"editable\": true,\n      \"hideControls\": false,\n      \"graphTooltip\": 1,\n      \"panels\": [],\n      \"time\": {\n        \"from\": \"now-6h\",\n        \"to\": \"now\"\n      },\n      \"timepicker\": {\n        \"time_options\": [],\n        \"refresh_intervals\": []\n      },\n      \"templating\": {\n        \"list\": []\n      },\n      \"annotations\": {\n        \"list\": []\n      },\n      \"refresh\": \"5s\",\n      \"schemaVersion\": 17,\n      \"version\": 0,\n      \"links\": []\n    }\n
                                                              4. \u70b9\u51fb \u786e\u8ba4 \u540e\uff0c\u7a0d\u7b49\u7247\u523b\u5373\u53ef\u5728 \u4eea\u8868\u76d8 \u4e2d\u67e5\u770b\u521a\u521a\u5bfc\u5165\u7684\u4eea\u8868\u76d8\u3002

                                                              Info

                                                              \u81ea\u5b9a\u4e49\u8bbe\u8ba1\u4eea\u8868\u76d8\uff0c\u8bf7\u53c2\u8003\u6dfb\u52a0\u4eea\u8868\u76d8\u9762\u677f\u3002

                                                              "},{"location":"end-user/insight/dashboard/login-grafana.html","title":"\u8bbf\u95ee\u539f\u751f Grafana","text":"

                                                              Insight \u501f\u52a9 Grafana \u63d0\u4f9b\u4e86\u4e30\u5bcc\u7684\u53ef\u89c6\u5316\u80fd\u529b\uff0c\u540c\u65f6\u4fdd\u7559\u4e86\u8bbf\u95ee\u539f\u751f Grafana \u7684\u5165\u53e3\u3002

                                                              "},{"location":"end-user/insight/dashboard/login-grafana.html#_1","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u767b\u5f55\u6d4f\u89c8\u5668\uff0c\u5728\u6d4f\u89c8\u5668\u4e2d\u8f93\u5165 Grafana \u5730\u5740\u3002

                                                                \u8bbf\u95ee\u5730\u5740\uff1a http://ip:\u8bbf\u95ee\u7aef\u53e3/ui/insight-grafana/login

                                                                \u4f8b\u5982\uff1a http://10.6.10.233:30209/ui/insight-grafana/login

                                                              2. \u70b9\u51fb\u53f3\u4e0b\u89d2\u7684\u767b\u5f55\uff0c\u4f7f\u7528\u9ed8\u8ba4\u7528\u6237\u540d\u3001\u5bc6\u7801\uff08admin/admin\uff09\u8fdb\u884c\u767b\u5f55\u3002

                                                              3. \u70b9\u51fb Log in \u5b8c\u6210\u767b\u5f55\u3002

                                                              "},{"location":"end-user/insight/dashboard/overview.html","title":"\u6982\u89c8","text":"

                                                              \u6982\u7387 \u4ec5\u7edf\u8ba1\u5df2\u5b89\u88c5 insight-agent \u4e14\u5176\u8fd0\u884c\u72b6\u6001\u4e3a\u6b63\u5e38\u7684\u96c6\u7fa4\u6570\u636e\u3002\u53ef\u5728\u6982\u89c8\u4e2d\u591a\u96c6\u7fa4\u7684\u8d44\u6e90\u6982\u51b5\uff1a

                                                              • \u544a\u8b66\u7edf\u8ba1\uff1a\u53ef\u67e5\u770b\u6240\u6709\u96c6\u7fa4\u7684\u6b63\u5728\u544a\u8b66\u7684\u7edf\u8ba1\u6570\u636e\u3002
                                                              • \u8d44\u6e90\u6d88\u8017\uff1a\u53ef\u6309 CPU \u4f7f\u7528\u7387\u3001\u5185\u5b58\u4f7f\u7528\u7387\u548c\u78c1\u76d8\u4f7f\u7528\u7387\u5206\u522b\u67e5\u770b\u8fd1\u4e00\u5c0f\u65f6 TOP5 \u96c6\u7fa4\u3001\u8282\u70b9\u7684\u8d44\u6e90\u53d8\u5316\u8d8b\u52bf\u3002
                                                              • \u9ed8\u8ba4\u6309\u7167\u6839\u636e CPU \u4f7f\u7528\u7387\u6392\u5e8f\u3002\u60a8\u53ef\u5207\u6362\u6307\u6807\u5207\u6362\u96c6\u7fa4\u3001\u8282\u70b9\u7684\u6392\u5e8f\u65b9\u5f0f\u3002
                                                              • \u8d44\u6e90\u53d8\u5316\u8d8b\u52bf\uff1a\u53ef\u67e5\u770b\u8fd1 15 \u5929\u7684\u8282\u70b9\u4e2a\u6570\u8d8b\u52bf\u4ee5\u53ca\u4e00\u5c0f\u65f6 Pod \u7684\u8fd0\u884c\u8d8b\u52bf\u3002
                                                              • \u670d\u52a1\u8bf7\u6c42\u6392\u884c\uff1a\u53ef\u67e5\u770b\u591a\u96c6\u7fa4\u4e2d\u8bf7\u6c42\u5ef6\u65f6\u3001\u9519\u8bef\u7387\u6392\u884c TOP5 \u7684\u670d\u52a1\u53ca\u6240\u5728\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u3002
                                                              "},{"location":"end-user/insight/dashboard/overview.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                              \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u6982\u89c8 \u3002

                                                              "},{"location":"end-user/insight/data-query/log.html","title":"\u65e5\u5fd7\u67e5\u8be2","text":"

                                                              Insight \u9ed8\u8ba4\u91c7\u96c6\u8282\u70b9\u65e5\u5fd7\u3001\u5bb9\u5668\u65e5\u5fd7\u4ee5\u53ca kubernetes \u5ba1\u8ba1\u65e5\u5fd7\u3002\u5728\u65e5\u5fd7\u67e5\u8be2\u9875\u9762\u4e2d\uff0c\u53ef\u67e5\u8be2\u767b\u5f55\u8d26\u53f7\u6743\u9650\u5185\u7684\u6807\u51c6\u8f93\u51fa (stdout) \u65e5\u5fd7\uff0c\u5305\u62ec\u8282\u70b9\u65e5\u5fd7\u3001\u4ea7\u54c1\u65e5\u5fd7\u3001Kubenetes \u5ba1\u8ba1\u65e5\u5fd7\u7b49\uff0c\u5feb\u901f\u5728\u5927\u91cf\u65e5\u5fd7\u4e2d\u67e5\u8be2\u5230\u6240\u9700\u7684\u65e5\u5fd7\uff0c\u540c\u65f6\u7ed3\u5408\u65e5\u5fd7\u7684\u6765\u6e90\u4fe1\u606f\u548c\u4e0a\u4e0b\u6587\u539f\u59cb\u6570\u636e\u8f85\u52a9\u5b9a\u4f4d\u95ee\u9898\u3002

                                                              "},{"location":"end-user/insight/data-query/log.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u70b9\u51fb\u4e00\u7ea7\u5bfc\u822a\u680f\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u3002
                                                              2. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u65e5\u5fd7 \u3002

                                                                • \u9ed8\u8ba4\u67e5\u8be2\u6700\u8fd1 24 \u5c0f\u65f6\uff1b
                                                                • \u7b2c\u4e00\u6b21\u8fdb\u5165\u65f6\uff0c\u9ed8\u8ba4\u6839\u636e\u767b\u5f55\u8d26\u53f7\u6743\u9650\u67e5\u8be2\u6709\u6743\u9650\u7684\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\u7684\u5bb9\u5668\u65e5\u5fd7\uff1b

                                                              3. \u9876\u90e8 Tab \u9ed8\u8ba4\u8fdb\u5165 \u666e\u901a\u67e5\u8be2 \u3002

                                                                1. \u70b9\u51fb \u7b5b\u9009 \u5c55\u5f00\u8fc7\u6ee4\u9762\u677f\uff0c\u53ef\u5207\u6362\u65e5\u5fd7\u641c\u7d22\u6761\u4ef6\u548c\u7c7b\u578b\u3002
                                                                2. \u65e5\u5fd7\u7c7b\u578b\uff1a

                                                                  • \u5bb9\u5668\u65e5\u5fd7 \uff1a\u8bb0\u5f55\u96c6\u7fa4\u4e2d\u5bb9\u5668\u5185\u90e8\u7684\u6d3b\u52a8\u548c\u4e8b\u4ef6\uff0c\u5305\u62ec\u5e94\u7528\u7a0b\u5e8f\u7684\u8f93\u51fa\u3001\u9519\u8bef\u6d88\u606f\u3001\u8b66\u544a\u548c\u8c03\u8bd5\u4fe1\u606f\u7b49\u3002\u652f\u6301\u901a\u8fc7\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u3001\u5bb9\u5668\u7ec4\u3001\u5bb9\u5668\u8fc7\u6ee4\u65e5\u5fd7\u3002
                                                                  • \u8282\u70b9\u65e5\u5fd7 \uff1a\u8bb0\u5f55\u96c6\u7fa4\u4e2d\u6bcf\u4e2a\u8282\u70b9\u7684\u7cfb\u7edf\u7ea7\u522b\u65e5\u5fd7\u3002\u8fd9\u4e9b\u65e5\u5fd7\u5305\u542b\u8282\u70b9\u7684\u64cd\u4f5c\u7cfb\u7edf\u3001\u5185\u6838\u3001\u670d\u52a1\u548c\u7ec4\u4ef6\u7684\u76f8\u5173\u4fe1\u606f\u3002\u652f\u6301\u901a\u8fc7\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u6587\u4ef6\u8def\u5f84\u8fc7\u6ee4\u65e5\u5fd7\u3002
                                                                3. \u652f\u6301\u5bf9\u5355\u4e2a\u5173\u952e\u5b57\u8fdb\u884c\u6a21\u7cca\u641c\u7d22\u3002

                                                              4. \u9876\u90e8\u5207\u6362 Tab \u9009\u62e9 Lucene \u8bed\u6cd5\u67e5\u8be2 \u3002

                                                                \u7b2c\u4e00\u6b21\u8fdb\u5165\u65f6\uff0c\u9ed8\u8ba4\u9009\u62e9\u767b\u5f55\u8d26\u53f7\u6743\u9650\u67e5\u8be2\u6709\u6743\u9650\u7684\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\u7684\u5bb9\u5668\u65e5\u5fd7\u3002

                                                                Lucene \u8bed\u6cd5\u8bf4\u660e\uff1a

                                                                1. \u4f7f\u7528 \u903b\u8f91\u64cd\u4f5c\u7b26\uff08AND\u3001OR\u3001NOT\u3001\"\" \uff09\u7b26\u67e5\u8be2\u591a\u4e2a\u5173\u952e\u5b57\uff0c\u4f8b\u5982\uff1akeyword1 AND (keyword2 OR keyword3) NOT keyword4\u3002
                                                                2. \u4f7f\u7528\u6ce2\u6d6a\u53f7 (~) \u5b9e\u73b0\u6a21\u7cca\u67e5\u8be2\uff0c\u5728 \"~\" \u540e\u53ef\u6307\u5b9a\u53ef\u9009\u7684\u53c2\u6570\uff0c\u7528\u4e8e\u63a7\u5236\u6a21\u7cca\u67e5\u8be2\u7684\u76f8\u4f3c\u5ea6\uff0c\u4e0d\u6307\u5b9a\u5219\u9ed8\u8ba4\u4f7f\u7528 0.5\u3002\u4f8b\u5982\uff1aerror~\u3002
                                                                3. \u4f7f\u7528\u901a\u914d\u7b26 (*\u3001?) \u7528\u4f5c\u5355\u5b57\u7b26\u901a\u914d\u7b26\uff0c\u8868\u793a\u5339\u914d\u4efb\u610f\u4e00\u4e2a\u5b57\u7b26\u3002
                                                                4. \u4f7f\u7528\u65b9\u62ec\u53f7 [ ] \u6216\u82b1\u62ec\u53f7 { } \u6765\u67e5\u8be2\u8303\u56f4\uff0c\u65b9\u62ec\u53f7\u00a0[ ]\u00a0\u8868\u793a\u95ed\u533a\u95f4\uff0c\u5305\u542b\u8fb9\u754c\u503c\u3002\u82b1\u62ec\u53f7\u00a0{ }\u00a0\u8868\u793a\u5f00\u533a\u95f4\uff0c\u6392\u9664\u8fb9\u754c\u503c\u3002\u8303\u56f4\u67e5\u8be2\u53ea\u9002\u7528\u4e8e\u80fd\u591f\u8fdb\u884c\u6392\u5e8f\u7684\u5b57\u6bb5\u7c7b\u578b\uff0c\u5982\u6570\u5b57\u3001\u65e5\u671f\u7b49\u3002\u4f8b\u5982\uff1atimestamp:[2022-01-01 TO 2022-01-31]\u3002
                                                                5. \u66f4\u591a\u7528\u6cd5\u8bf7\u67e5\u770b\uff1aLucene \u8bed\u6cd5\u8bf4\u660e\u3002
                                                              "},{"location":"end-user/insight/data-query/log.html#_3","title":"\u5176\u4ed6\u64cd\u4f5c","text":""},{"location":"end-user/insight/data-query/log.html#_4","title":"\u67e5\u770b\u65e5\u5fd7\u4e0a\u4e0b\u6587","text":"

                                                              \u70b9\u51fb\u65e5\u5fd7\u540e\u7684\u6309\u94ae\uff0c\u5728\u53f3\u4fa7\u5212\u51fa\u9762\u677f\u4e2d\u53ef\u67e5\u770b\u8be5\u6761\u65e5\u5fd7\u7684\u9ed8\u8ba4 100 \u6761\u4e0a\u4e0b\u6587\u3002\u53ef\u5207\u6362 \u663e\u793a\u884c\u6570 \u67e5\u770b\u66f4\u591a\u4e0a\u4e0b\u6587\u5185\u5bb9\u3002

                                                              "},{"location":"end-user/insight/data-query/log.html#_5","title":"\u5bfc\u51fa\u65e5\u5fd7\u6570\u636e","text":"

                                                              \u70b9\u51fb\u5217\u8868\u53f3\u4e0a\u4fa7\u7684\u4e0b\u8f7d\u6309\u94ae\u3002

                                                              • \u652f\u6301\u914d\u7f6e\u5bfc\u51fa\u7684\u65e5\u5fd7\u5b57\u6bb5\uff0c\u6839\u636e\u65e5\u5fd7\u7c7b\u578b\u53ef\u914d\u7f6e\u7684\u5b57\u6bb5\u4e0d\u540c\uff0c\u5176\u4e2d \u65e5\u5fd7\u5185\u5bb9 \u5b57\u6bb5\u4e3a\u5fc5\u9009\u3002
                                                              • \u652f\u6301\u5c06\u65e5\u5fd7\u67e5\u8be2\u7ed3\u679c\u5bfc\u51fa\u4e3a .txt \u6216 .csv \u683c\u5f0f\u3002

                                                              Note

                                                              \u82e5\u9700\u6307\u5b9a\u4e0d\u91c7\u96c6\u67d0\u4e00\u4e9b\u5bb9\u5668\u7ec4\u7684\u65e5\u5fd7\uff0c\u53ef\u53c2\u8003\uff1a\u5bb9\u5668\u65e5\u5fd7\u9ed1\u540d\u5355\u3002

                                                              "},{"location":"end-user/insight/data-query/metric.html","title":"\u6307\u6807\u67e5\u8be2","text":"

                                                              \u6307\u6807\u67e5\u8be2\u652f\u6301\u67e5\u8be2\u5bb9\u5668\u5404\u8d44\u6e90\u7684\u6307\u6807\u6570\u636e\uff0c\u53ef\u67e5\u770b\u76d1\u63a7\u6307\u6807\u7684\u8d8b\u52bf\u53d8\u5316\u3002\u540c\u65f6\uff0c\u9ad8\u7ea7\u67e5\u8be2\u652f\u6301\u539f\u751f PromQL \u8bed\u53e5\u8fdb\u884c\u6307\u6807\u67e5\u8be2\u3002

                                                              "},{"location":"end-user/insight/data-query/metric.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002
                                                              "},{"location":"end-user/insight/data-query/metric.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u70b9\u51fb\u4e00\u7ea7\u5bfc\u822a\u680f\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u3002

                                                              2. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u6307\u6807 \u3002

                                                              3. \u9009\u62e9\u96c6\u7fa4\u3001\u7c7b\u578b\u3001\u8282\u70b9\u3001\u6307\u6807\u540d\u79f0\u67e5\u8be2\u6761\u4ef6\u540e\uff0c\u70b9\u51fb \u641c\u7d22 \uff0c\u5c4f\u5e55\u53f3\u4fa7\u5c06\u663e\u793a\u5bf9\u5e94\u6307\u6807\u56fe\u8868\u53ca\u6570\u636e\u8be6\u60c5\u3002

                                                              4. \u652f\u6301\u81ea\u5b9a\u4e49\u65f6\u95f4\u8303\u56f4\u3002\u53ef\u624b\u52a8\u70b9\u51fb \u5237\u65b0 \u56fe\u6807\u6216\u9009\u62e9\u9ed8\u8ba4\u65f6\u95f4\u95f4\u9694\u8fdb\u884c\u5237\u65b0\u3002

                                                              5. \u70b9\u51fb \u9ad8\u7ea7\u67e5\u8be2 \u9875\u7b7e\u901a\u8fc7\u539f\u751f\u7684 PromQL \u67e5\u8be2\u3002

                                                              Note

                                                              \u53c2\u9605 PromQL \u8bed\u6cd5\u3002

                                                              "},{"location":"end-user/insight/infra/cluster.html","title":"\u96c6\u7fa4\u76d1\u63a7","text":"

                                                              \u901a\u8fc7\u96c6\u7fa4\u76d1\u63a7\uff0c\u4f60\u53ef\u4ee5\u67e5\u770b\u96c6\u7fa4\u7684\u57fa\u672c\u4fe1\u606f\u3001\u8be5\u96c6\u7fa4\u4e2d\u7684\u8d44\u6e90\u6d88\u8017\u4ee5\u53ca\u4e00\u6bb5\u65f6\u95f4\u7684\u8d44\u6e90\u6d88\u8017\u53d8\u5316\u8d8b\u52bf\u7b49\u3002

                                                              "},{"location":"end-user/insight/infra/cluster.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                                                              "},{"location":"end-user/insight/infra/cluster.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

                                                              2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd -> \u96c6\u7fa4 \u3002\u5728\u8be5\u9875\u9762\u53ef\u67e5\u770b\u4ee5\u4e0b\u4fe1\u606f\uff1a

                                                                • \u8d44\u6e90\u6982\u89c8 \uff1a\u591a\u9009\u96c6\u7fa4\u4e2d\u7684\u8282\u70b9\u3001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6b63\u5e38\u548c\u5168\u90e8\u7684\u6570\u91cf\u7edf\u8ba1\uff1b
                                                                • \u6545\u969c \uff1a\u7edf\u8ba1\u5f53\u524d\u96c6\u7fa4\u4ea7\u751f\u7684\u544a\u8b66\u6570\u91cf\uff1b
                                                                • \u8d44\u6e90\u6d88\u8017 \uff1a\u6240\u9009\u96c6\u7fa4\u7684 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u7684\u5b9e\u9645\u4f7f\u7528\u91cf\u548c\u603b\u91cf\uff1b
                                                                • \u6307\u6807\u8bf4\u660e \uff1a\u6240\u9009\u96c6\u7fa4\u7684 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u8bfb\u5199\u3001\u7f51\u7edc\u63a5\u6536\u53d1\u9001\u7684\u53d8\u5316\u8d8b\u52bf\u3002

                                                              3. \u5207\u6362\u5230 \u8d44\u6e90\u6c34\u4f4d\u7ebf\u76d1\u63a7 \u9875\u7b7e\uff0c\u53ef\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u7684\u66f4\u591a\u76d1\u63a7\u6570\u636e\u3002

                                                              "},{"location":"end-user/insight/infra/cluster.html#_4","title":"\u53c2\u8003\u6307\u6807\u8bf4\u660e","text":"\u6307\u6807\u540d \u8bf4\u660e CPU \u4f7f\u7528\u7387 \u8be5\u6307\u6807\u662f\u6307\u96c6\u7fa4\u4e2d\u6240\u6709 Pod \u8d44\u6e90\u7684\u5b9e\u9645 CPU \u7528\u91cf\u4e0e\u6240\u6709\u8282\u70b9\u7684 CPU \u603b\u91cf\u7684\u6bd4\u7387\u3002 CPU \u5206\u914d\u7387 \u8be5\u6307\u6807\u662f\u6307\u96c6\u7fa4\u4e2d\u6240\u6709 Pod \u7684 CPU \u8bf7\u6c42\u91cf\u7684\u603b\u548c\u4e0e\u6240\u6709\u8282\u70b9\u7684 CPU \u603b\u91cf\u7684\u6bd4\u7387\u3002 \u5185\u5b58\u4f7f\u7528\u7387 \u8be5\u6307\u6807\u662f\u6307\u96c6\u7fa4\u4e2d\u6240\u6709 Pod \u8d44\u6e90\u7684\u5b9e\u9645\u5185\u5b58\u7528\u91cf\u4e0e\u6240\u6709\u8282\u70b9\u7684\u5185\u5b58\u603b\u91cf\u7684\u6bd4\u7387\u3002 \u5185\u5b58\u5206\u914d\u7387 \u8be5\u6307\u6807\u662f\u6307\u96c6\u7fa4\u4e2d\u6240\u6709 Pod \u7684\u5185\u5b58\u8bf7\u6c42\u91cf\u7684\u603b\u548c\u4e0e\u6240\u6709\u8282\u70b9\u7684\u5185\u5b58\u603b\u91cf\u7684\u6bd4\u7387\u3002"},{"location":"end-user/insight/infra/container.html","title":"\u5bb9\u5668\u76d1\u63a7","text":"

                                                              \u5bb9\u5668\u76d1\u63a7\u662f\u5bf9\u96c6\u7fa4\u7ba1\u7406\u4e2d\u5de5\u4f5c\u8d1f\u8f7d\u7684\u76d1\u63a7\uff0c\u5728\u5217\u8868\u4e2d\u53ef\u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u57fa\u672c\u4fe1\u606f\u548c\u72b6\u6001\u3002\u5728\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\uff0c\u53ef\u67e5\u770b\u6b63\u5728\u544a\u8b66\u7684\u6570\u91cf\u4ee5\u53ca CPU\u3001\u5185\u5b58\u7b49\u8d44\u6e90\u6d88\u8017\u7684\u53d8\u5316\u8d8b\u52bf\u3002

                                                              "},{"location":"end-user/insight/infra/container.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u96c6\u7fa4\u5df2\u5b89\u88c5 insight-agent\uff0c\u4e14\u6240\u6709\u7684\u5bb9\u5668\u7ec4\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                                                              • \u5b89\u88c5 insight-agent\uff0c\u8bf7\u53c2\u8003\u5728\u7ebf\u5b89\u88c5 insight-agent \u6216\u79bb\u7ebf\u5347\u7ea7 insight-agent\u3002
                                                              "},{"location":"end-user/insight/infra/container.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                              \u8bf7\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u67e5\u770b\u670d\u52a1\u76d1\u63a7\u6307\u6807\uff1a

                                                              1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

                                                              2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd -> \u5de5\u4f5c\u8d1f\u8f7d \u3002

                                                              3. \u5207\u6362\u9876\u90e8 Tab\uff0c\u67e5\u770b\u4e0d\u540c\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u636e\u3002

                                                              4. \u70b9\u51fb\u76ee\u6807\u5de5\u4f5c\u8d1f\u8f7d\u540d\u79f0\u67e5\u770b\u8be6\u60c5\u3002

                                                                1. \u6545\u969c\uff1a\u5728\u6545\u969c\u5361\u7247\u4e2d\u7edf\u8ba1\u8be5\u5de5\u4f5c\u8d1f\u8f7d\u5f53\u524d\u6b63\u5728\u544a\u8b66\u7684\u603b\u6570\u3002
                                                                2. \u8d44\u6e90\u6d88\u8017\uff1a\u5728\u8be5\u5361\u7247\u53ef\u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u7684 CPU\u3001\u5185\u5b58\u3001\u7f51\u7edc\u7684\u4f7f\u7528\u60c5\u51b5\u3002
                                                                3. \u76d1\u63a7\u6307\u6807\uff1a\u53ef\u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u9ed8\u8ba4 1 \u5c0f\u65f6\u7684 CPU\u3001\u5185\u5b58\u3001\u7f51\u7edc\u548c\u78c1\u76d8\u7684\u53d8\u5316\u8d8b\u52bf\u3002

                                                              5. \u5207\u6362 Tab \u5230 \u5bb9\u5668\u7ec4\u5217\u8868 \uff0c\u53ef\u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u5404\u4e2a\u5bb9\u5668\u7ec4\u72b6\u6001\u3001\u6240\u5728\u8282\u70b9\u3001\u91cd\u542f\u6b21\u6570\u7b49\u4fe1\u606f\u3002

                                                              6. \u5207\u6362 Tab \u5230 JVM \u76d1\u63a7 \uff0c\u53ef\u67e5\u770b\u5404\u4e2a\u5bb9\u5668\u7ec4\u7684 JVM \u6307\u6807\u3002

                                                                Note

                                                                1. JVM \u76d1\u63a7\u529f\u80fd\u4ec5\u652f\u6301 Java \u8bed\u8a00\u3002
                                                                2. \u5f00\u542f JVM \u76d1\u63a7\u529f\u80fd\uff0c\u8bf7\u53c2\u8003\u5f00\u59cb\u76d1\u63a7 Java \u5e94\u7528\u3002
                                                              "},{"location":"end-user/insight/infra/container.html#_4","title":"\u6307\u6807\u53c2\u8003\u8bf4\u660e","text":"\u6307\u6807\u540d\u79f0 \u8bf4\u660e CPU \u4f7f\u7528\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684 CPU \u4f7f\u7528\u91cf\u4e4b\u548c\u3002 CPU \u8bf7\u6c42\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684 CPU \u8bf7\u6c42\u91cf\u4e4b\u548c\u3002 CPU \u9650\u5236\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684 CPU \u9650\u5236\u91cf\u4e4b\u548c\u3002 \u5185\u5b58\u4f7f\u7528\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u4f7f\u7528\u91cf\u4e4b\u548c\u3002 \u5185\u5b58\u8bf7\u6c42\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u4f7f\u7528\u91cf\u4e4b\u548c\u3002 \u5185\u5b58\u9650\u5236\u91cf \u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u9650\u5236\u91cf\u4e4b\u548c\u3002 \u78c1\u76d8\u8bfb\u5199\u901f\u7387 \u6307\u5b9a\u65f6\u95f4\u8303\u56f4\u5185\u78c1\u76d8\u6bcf\u79d2\u8fde\u7eed\u8bfb\u53d6\u548c\u5199\u5165\u7684\u603b\u548c\uff0c\u8868\u793a\u78c1\u76d8\u6bcf\u79d2\u8bfb\u53d6\u548c\u5199\u5165\u64cd\u4f5c\u6570\u7684\u6027\u80fd\u5ea6\u91cf\u3002 \u7f51\u7edc\u53d1\u9001\u63a5\u6536\u901f\u7387 \u6307\u5b9a\u65f6\u95f4\u8303\u56f4\u5185\uff0c\u6309\u5de5\u4f5c\u8d1f\u8f7d\u7edf\u8ba1\u7684\u7f51\u7edc\u6d41\u91cf\u7684\u6d41\u5165\u3001\u6d41\u51fa\u901f\u7387\u3002"},{"location":"end-user/insight/infra/event.html","title":"\u4e8b\u4ef6\u67e5\u8be2","text":"

                                                              AI \u7b97\u529b\u5e73\u53f0 Insight \u652f\u6301\u6309\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u67e5\u8be2\u4e8b\u4ef6\uff0c\u5e76\u63d0\u4f9b\u4e86\u4e8b\u4ef6\u72b6\u6001\u5206\u5e03\u56fe\uff0c\u5bf9\u91cd\u8981\u4e8b\u4ef6\u8fdb\u884c\u7edf\u8ba1\u3002

                                                              "},{"location":"end-user/insight/infra/event.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u70b9\u51fb\u4e00\u7ea7\u5bfc\u822a\u680f\u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u3002
                                                              2. \u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u9009\u62e9 \u57fa\u7840\u8bbe\u7f6e > \u4e8b\u4ef6 \u3002

                                                              "},{"location":"end-user/insight/infra/event.html#_3","title":"\u4e8b\u4ef6\u72b6\u6001\u5206\u5e03","text":"

                                                              \u9ed8\u8ba4\u663e\u793a\u6700\u8fd1 12 \u5c0f\u65f6\u5185\u53d1\u751f\u7684\u4e8b\u4ef6\uff0c\u60a8\u53ef\u4ee5\u5728\u53f3\u4e0a\u89d2\u9009\u62e9\u4e0d\u540c\u7684\u65f6\u95f4\u8303\u56f4\u6765\u67e5\u770b\u8f83\u957f\u6216\u8f83\u77ed\u7684\u65f6\u95f4\u6bb5\u3002 \u60a8\u8fd8\u53ef\u4ee5\u81ea\u5b9a\u4e49\u91c7\u6837\u95f4\u9694\u4e3a 1 \u5206\u949f\u81f3 5 \u5c0f\u65f6\u3002

                                                              \u901a\u8fc7\u4e8b\u4ef6\u72b6\u6001\u5206\u5e03\u56fe\uff0c\u60a8\u53ef\u4ee5\u76f4\u89c2\u5730\u4e86\u89e3\u4e8b\u4ef6\u7684\u5bc6\u96c6\u7a0b\u5ea6\u548c\u5206\u6563\u60c5\u51b5\u3002 \u8fd9\u6709\u52a9\u4e8e\u5bf9\u540e\u7eed\u7684\u96c6\u7fa4\u8fd0\u7ef4\u8fdb\u884c\u8bc4\u4f30\uff0c\u5e76\u505a\u597d\u51c6\u5907\u548c\u5b89\u6392\u5de5\u4f5c\u3002 \u5982\u679c\u4e8b\u4ef6\u5bc6\u96c6\u53d1\u751f\u5728\u7279\u5b9a\u65f6\u6bb5\uff0c\u60a8\u53ef\u80fd\u9700\u8981\u8c03\u914d\u66f4\u591a\u7684\u8d44\u6e90\u6216\u91c7\u53d6\u76f8\u5e94\u63aa\u65bd\u6765\u786e\u4fdd\u96c6\u7fa4\u7a33\u5b9a\u6027\u548c\u9ad8\u53ef\u7528\u6027\u3002 \u800c\u5982\u679c\u4e8b\u4ef6\u8f83\u4e3a\u5206\u6563\uff0c\u5728\u6b64\u671f\u95f4\u60a8\u53ef\u4ee5\u5408\u7406\u5b89\u6392\u5176\u4ed6\u8fd0\u7ef4\u5de5\u4f5c\uff0c\u4f8b\u5982\u7cfb\u7edf\u4f18\u5316\u3001\u5347\u7ea7\u6216\u5904\u7406\u5176\u4ed6\u4efb\u52a1\u3002

                                                              \u901a\u8fc7\u7efc\u5408\u8003\u8651\u4e8b\u4ef6\u72b6\u6001\u5206\u5e03\u56fe\u548c\u65f6\u95f4\u8303\u56f4\uff0c\u60a8\u80fd\u66f4\u597d\u5730\u89c4\u5212\u548c\u7ba1\u7406\u96c6\u7fa4\u7684\u8fd0\u7ef4\u5de5\u4f5c\uff0c\u786e\u4fdd\u7cfb\u7edf\u7a33\u5b9a\u6027\u548c\u53ef\u9760\u6027\u3002

                                                              "},{"location":"end-user/insight/infra/event.html#_4","title":"\u4e8b\u4ef6\u603b\u6570\u548c\u7edf\u8ba1","text":"

                                                              \u901a\u8fc7\u91cd\u8981\u4e8b\u4ef6\u7edf\u8ba1\uff0c\u60a8\u53ef\u4ee5\u65b9\u4fbf\u5730\u4e86\u89e3\u955c\u50cf\u62c9\u53d6\u5931\u8d25\u6b21\u6570\u3001\u5065\u5eb7\u68c0\u67e5\u5931\u8d25\u6b21\u6570\u3001\u5bb9\u5668\u7ec4\uff08Pod\uff09\u8fd0\u884c\u5931\u8d25\u6b21\u6570\u3001 Pod \u8c03\u5ea6\u5931\u8d25\u6b21\u6570\u3001\u5bb9\u5668 OOM \u5185\u5b58\u8017\u5c3d\u6b21\u6570\u3001\u5b58\u50a8\u5377\u6302\u8f7d\u5931\u8d25\u6b21\u6570\u4ee5\u53ca\u6240\u6709\u4e8b\u4ef6\u7684\u603b\u6570\u3002\u8fd9\u4e9b\u4e8b\u4ef6\u901a\u5e38\u5206\u4e3a\u300cWarning\u300d\u548c\u300cNormal\u300d\u4e24\u7c7b\u3002

                                                              "},{"location":"end-user/insight/infra/event.html#_5","title":"\u4e8b\u4ef6\u5217\u8868","text":"

                                                              \u4e8b\u4ef6\u5217\u8868\u4ee5\u65f6\u95f4\u4e3a\u8f74\uff0c\u4ee5\u6d41\u6c34\u7684\u5f62\u5f0f\u5c55\u793a\u53d1\u751f\u7684\u4e8b\u4ef6\u3002\u60a8\u53ef\u4ee5\u6839\u636e\u300c\u6700\u8fd1\u53d1\u751f\u65f6\u95f4\u300d\u548c\u300c\u7ea7\u522b\u300d\u8fdb\u884c\u6392\u5e8f\u3002

                                                              \u70b9\u51fb\u53f3\u4fa7\u7684 \u2699\ufe0f \u56fe\u6807\uff0c\u60a8\u53ef\u4ee5\u6839\u636e\u81ea\u5df1\u7684\u559c\u597d\u548c\u9700\u6c42\u6765\u81ea\u5b9a\u4e49\u663e\u793a\u7684\u5217\u3002

                                                              \u5728\u9700\u8981\u7684\u65f6\u5019\uff0c\u60a8\u8fd8\u53ef\u4ee5\u70b9\u51fb\u5237\u65b0\u56fe\u6807\u6765\u66f4\u65b0\u5f53\u524d\u7684\u4e8b\u4ef6\u5217\u8868\u3002

                                                              "},{"location":"end-user/insight/infra/event.html#_6","title":"\u5176\u4ed6\u64cd\u4f5c","text":"
                                                              1. \u5728\u4e8b\u4ef6\u5217\u8868\u4e2d\u64cd\u4f5c\u5217\u7684\u56fe\u6807\uff0c\u53ef\u67e5\u770b\u67d0\u4e00\u4e8b\u4ef6\u7684\u5143\u6570\u636e\u4fe1\u606f\u3002

                                                              2. \u70b9\u51fb\u9876\u90e8\u9875\u7b7e\u7684 \u4e0a\u4e0b\u6587 \u53ef\u67e5\u770b\u8be5\u4e8b\u4ef6\u5bf9\u5e94\u8d44\u6e90\u7684\u5386\u53f2\u4e8b\u4ef6\u8bb0\u5f55\u3002

                                                              "},{"location":"end-user/insight/infra/event.html#_7","title":"\u53c2\u8003","text":"

                                                              \u6709\u5173\u7cfb\u7edf\u81ea\u5e26\u7684 Event \u4e8b\u4ef6\u7684\u8be6\u7ec6\u542b\u4e49\uff0c\u8bf7\u53c2\u9605 Kubenetest API \u4e8b\u4ef6\u5217\u8868\u3002

                                                              "},{"location":"end-user/insight/infra/namespace.html","title":"\u547d\u540d\u7a7a\u95f4\u76d1\u63a7","text":"

                                                              \u4ee5\u547d\u540d\u7a7a\u95f4\u4e3a\u7ef4\u5ea6\uff0c\u5feb\u901f\u67e5\u8be2\u547d\u540d\u7a7a\u95f4\u5185\u7684\u8d44\u6e90\u6d88\u8017\u548c\u53d8\u5316\u8d8b\u52bf\u3002

                                                              "},{"location":"end-user/insight/infra/namespace.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                                                              "},{"location":"end-user/insight/infra/namespace.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

                                                              2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd > \u547d\u540d\u7a7a\u95f4 \u3002\u5728\u8be5\u9875\u9762\u53ef\u67e5\u770b\u4ee5\u4e0b\u4fe1\u606f\uff1a

                                                                1. \u5207\u6362\u547d\u540d\u7a7a\u95f4\uff1a\u5728\u9876\u90e8\u5207\u6362\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\uff1b
                                                                2. \u8d44\u6e90\u6982\u89c8\uff1a\u7edf\u8ba1\u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6b63\u5e38\u548c\u5168\u90e8\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u91cf\uff1b
                                                                3. \u6545\u969c\uff1a\u7edf\u8ba1\u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e0b\u4ea7\u751f\u7684\u544a\u8b66\u6570\u91cf\uff1b
                                                                4. \u4e8b\u4ef6\uff1a\u7edf\u8ba1\u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e0b 24 \u5c0f\u65f6\u5185 Warning \u7ea7\u522b\u7684\u4e8b\u4ef6\u6570\u91cf\uff1b
                                                                5. \u8d44\u6e90\u6d88\u8017\uff1a\u7edf\u8ba1\u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e0b\u5bb9\u5668\u7ec4\u7684 CPU\u3001\u5185\u5b58\u4f7f\u7528\u91cf\u4e4b\u548c \u53ca CPU\u3001\u5185\u5b58\u914d\u989d\u60c5\u51b5\u3002

                                                              "},{"location":"end-user/insight/infra/namespace.html#_4","title":"\u6307\u6807\u8bf4\u660e","text":"\u6307\u6807\u540d \u8bf4\u660e CPU \u4f7f\u7528\u91cf \u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e2d\u5bb9\u5668\u7ec4\u7684 CPU \u4f7f\u7528\u91cf\u4e4b\u548c \u5185\u5b58\u4f7f\u7528\u91cf \u6240\u9009\u547d\u540d\u7a7a\u95f4\u4e2d\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u4f7f\u7528\u91cf\u4e4b\u548c \u5bb9\u5668\u7ec4 CPU \u4f7f\u7528\u91cf \u547d\u540d\u7a7a\u95f4\u4e2d\u5404\u5bb9\u5668\u7ec4\u7684 CPU \u4f7f\u7528\u91cf \u5bb9\u5668\u7ec4\u5185\u5b58\u4f7f\u7528\u91cf \u547d\u540d\u7a7a\u95f4\u4e2d\u5404\u5bb9\u5668\u7ec4\u7684\u5185\u5b58\u4f7f\u7528\u91cf"},{"location":"end-user/insight/infra/node.html","title":"\u8282\u70b9\u76d1\u63a7","text":"

                                                              \u901a\u8fc7\u8282\u70b9\u76d1\u63a7\uff0c\u4f60\u53ef\u4ee5\u6982\u89c8\u6240\u9009\u96c6\u7fa4\u4e0b\u8282\u70b9\u7684\u5f53\u524d\u5065\u5eb7\u72b6\u6001\u3001\u5bf9\u5e94\u5bb9\u5668\u7ec4\u7684\u5f02\u5e38\u6570\u91cf\uff1b \u5728\u5f53\u524d\u8282\u70b9\u8be6\u60c5\u9875\uff0c\u4f60\u53ef\u4ee5\u67e5\u770b\u6b63\u5728\u544a\u8b66\u7684\u6570\u91cf\u4ee5\u53ca CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u7b49\u8d44\u6e90\u6d88\u8017\u7684\u53d8\u5316\u8d8b\u52bf\u56fe\u3002

                                                              "},{"location":"end-user/insight/infra/node.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                                                              "},{"location":"end-user/insight/infra/node.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

                                                              2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd -> \u8282\u70b9 \u3002\u5728\u8be5\u9875\u9762\u53ef\u67e5\u770b\u4ee5\u4e0b\u4fe1\u606f\uff1a

                                                                • \u96c6\u7fa4\u5207\u6362 \uff1a\u5207\u6362\u9876\u90e8\u7684\u4e0b\u62c9\u6846\u53ef\u5207\u6362\u96c6\u7fa4\uff1b
                                                                • \u8282\u70b9\u5217\u8868 \uff1a\u6240\u9009\u96c6\u7fa4\u4e2d\u7684\u8282\u70b9\u5217\u8868\uff0c\u5355\u51fb\u5207\u6362\u8282\u70b9\u3002
                                                                • \u6545\u969c \uff1a\u7edf\u8ba1\u5f53\u524d\u96c6\u7fa4\u4ea7\u751f\u7684\u544a\u8b66\u6570\u91cf\uff1b
                                                                • \u8d44\u6e90\u6d88\u8017 \uff1a\u6240\u9009\u8282\u70b9\u7684 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u7684\u5b9e\u9645\u4f7f\u7528\u91cf\u548c\u603b\u91cf\uff1b
                                                                • \u6307\u6807\u8bf4\u660e \uff1a\u6240\u9009\u8282\u70b9\u7684 CPU\u3001\u5185\u5b58\u3001\u78c1\u76d8\u8bfb\u5199\u3001\u7f51\u7edc\u63a5\u6536\u53d1\u9001\u7684\u53d8\u5316\u8d8b\u52bf\u3002

                                                              3. \u5207\u6362\u5230 \u8d44\u6e90\u6c34\u4f4d\u7ebf\u76d1\u63a7 \u9875\u7b7e\uff0c\u53ef\u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684\u66f4\u591a\u76d1\u63a7\u6570\u636e\u3002

                                                              "},{"location":"end-user/insight/infra/probe.html","title":"\u62e8\u6d4b","text":"

                                                              \u62e8\u6d4b\uff08Probe\uff09\u6307\u7684\u662f\u57fa\u4e8e\u9ed1\u76d2\u76d1\u63a7\uff0c\u5b9a\u671f\u901a\u8fc7 HTTP\u3001TCP \u7b49\u65b9\u5f0f\u5bf9\u76ee\u6807\u8fdb\u884c\u8fde\u901a\u6027\u6d4b\u8bd5\uff0c\u5feb\u901f\u53d1\u73b0\u6b63\u5728\u53d1\u751f\u7684\u6545\u969c\u3002

                                                              Insight \u57fa\u4e8e Prometheus Blackbox Exporter \u5de5\u5177\u901a\u8fc7 HTTP\u3001HTTPS\u3001DNS\u3001TCP \u548c ICMP \u7b49\u534f\u8bae\uff0c\u5bf9\u7f51\u7edc\u8fdb\u884c\u63a2\u6d4b\u5e76\u8fd4\u56de\u63a2\u6d4b\u7ed3\u679c\u4ee5\u4fbf\u4e86\u89e3\u7f51\u7edc\u72b6\u6001\u3002

                                                              "},{"location":"end-user/insight/infra/probe.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u76ee\u6807\u96c6\u7fa4\u4e2d\u5df2\u6210\u529f\u90e8\u7f72 insight-agent\uff0c\u4e14\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                                                              "},{"location":"end-user/insight/infra/probe.html#_3","title":"\u67e5\u770b\u62e8\u6d4b\u4efb\u52a1","text":"
                                                              1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\uff1b
                                                              2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u57fa\u7840\u8bbe\u65bd -> \u62e8\u6d4b\u3002

                                                                • \u70b9\u51fb\u8868\u683c\u4e2d\u7684\u96c6\u7fa4\u6216\u547d\u540d\u7a7a\u95f4\u4e0b\u62c9\u6846\uff0c\u53ef\u5207\u6362\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4
                                                                • \u4f60\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u2699\ufe0f \u4fee\u6539\u663e\u793a\u7684\u5217\uff0c\u9ed8\u8ba4\u4e3a\u62e8\u6d4b\u540d\u79f0\u3001\u63a2\u6d4b\u65b9\u5f0f\u3001\u63a2\u6d4b\u76ee\u6807\u3001\u8fde\u901a\u72b6\u6001\u3001\u521b\u5efa\u65f6\u95f4
                                                                • \u8fde\u901a\u72b6\u6001\u6709 3 \u79cd\uff1a
                                                                  • \u6b63\u5e38\uff1aProbe \u6210\u529f\u8fde\u63a5\u5230\u4e86\u76ee\u6807\uff0c\u76ee\u6807\u8fd4\u56de\u4e86\u9884\u671f\u7684\u54cd\u5e94
                                                                  • \u5f02\u5e38\uff1aProbe \u65e0\u6cd5\u8fde\u63a5\u5230\u76ee\u6807\uff0c\u6216\u76ee\u6807\u6ca1\u6709\u8fd4\u56de\u9884\u671f\u7684\u54cd\u5e94
                                                                  • Pending\uff1aProbe \u6b63\u5728\u5c1d\u8bd5\u8fde\u63a5\u76ee\u6807
                                                                • \u4f60\u53ef\u4ee5\u5728 \ud83d\udd0d \u641c\u7d22\u6846\u4e2d\u952e\u5165\u540d\u79f0\uff0c\u6a21\u7cca\u641c\u7d22\u67d0\u4e9b\u62e8\u6d4b\u4efb\u52a1

                                                              "},{"location":"end-user/insight/infra/probe.html#_4","title":"\u521b\u5efa\u62e8\u6d4b\u4efb\u52a1","text":"
                                                              1. \u70b9\u51fb \u521b\u5efa\u62e8\u6d4b\u4efb\u52a1\u3002
                                                              2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65

                                                                • \u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u62e8\u6d4b\u7684\u96c6\u7fa4
                                                                • \u547d\u540d\u7a7a\u95f4\uff1a\u62e8\u6d4b\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4

                                                              3. \u914d\u7f6e\u63a2\u6d4b\u53c2\u6570\u3002

                                                                • Blackbox \u5b9e\u4f8b\uff1a\u9009\u62e9\u8d1f\u8d23\u63a2\u6d4b\u7684 blackbox \u5b9e\u4f8b
                                                                • \u63a2\u6d4b\u65b9\u5f0f\uff1a
                                                                  • HTTP\uff1a\u901a\u8fc7\u53d1\u9001 HTTP \u6216 HTTPS \u8bf7\u6c42\u5230\u76ee\u6807 URL\uff0c\u68c0\u6d4b\u5176\u8fde\u901a\u6027\u548c\u54cd\u5e94\u65f6\u95f4\uff0c\u8fd9\u53ef\u4ee5\u7528\u4e8e\u76d1\u6d4b\u7f51\u7ad9\u6216 Web \u5e94\u7528\u7684\u53ef\u7528\u6027\u548c\u6027\u80fd
                                                                  • TCP\uff1a\u901a\u8fc7\u5efa\u7acb\u5230\u76ee\u6807\u4e3b\u673a\u548c\u7aef\u53e3\u7684 TCP \u8fde\u63a5\uff0c\u68c0\u6d4b\u5176\u8fde\u901a\u6027\u548c\u54cd\u5e94\u65f6\u95f4\u3002\u8fd9\u53ef\u4ee5\u7528\u4e8e\u76d1\u6d4b\u57fa\u4e8e TCP \u7684\u670d\u52a1\uff0c\u5982 Web \u670d\u52a1\u5668\u3001\u6570\u636e\u5e93\u670d\u52a1\u5668\u7b49
                                                                  • \u5176\u4ed6\uff1a\u652f\u6301\u901a\u8fc7\u914d\u7f6e ConfigMap \u81ea\u5b9a\u4e49\u63a2\u6d4b\u65b9\u5f0f\uff0c\u53ef\u53c2\u8003\u81ea\u5b9a\u4e49\u62e8\u6d4b\u65b9\u5f0f
                                                                • \u63a2\u6d4b\u76ee\u6807\uff1a\u63a2\u6d4b\u7684\u76ee\u6807\u5730\u5740\uff0c\u652f\u6301\u57df\u540d\u6216 IP \u5730\u5740\u7b49
                                                                • \u6807\u7b7e\uff1a\u81ea\u5b9a\u4e49\u6807\u7b7e\uff0c\u8be5\u6807\u7b7e\u4f1a\u81ea\u52a8\u6dfb\u52a0\u5230 Prometheus \u7684 Label \u4e2d
                                                                • \u63a2\u6d4b\u95f4\u9694\uff1a\u63a2\u6d4b\u95f4\u9694\u65f6\u95f4
                                                                • \u63a2\u6d4b\u8d85\u65f6\uff1a\u63a2\u6d4b\u76ee\u6807\u65f6\u7684\u6700\u957f\u7b49\u5f85\u65f6\u95f4

                                                              4. \u914d\u7f6e\u5b8c\u6210\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                                                              Warning

                                                              \u62e8\u6d4b\u4efb\u52a1\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u9700\u8981\u5927\u6982 3 \u5206\u949f\u7684\u65f6\u95f4\u6765\u540c\u6b65\u914d\u7f6e\u3002\u5728\u6b64\u671f\u95f4\uff0c\u4e0d\u4f1a\u8fdb\u884c\u63a2\u6d4b\uff0c\u65e0\u6cd5\u67e5\u770b\u63a2\u6d4b\u7ed3\u679c\u3002

                                                              "},{"location":"end-user/insight/infra/probe.html#_5","title":"\u7f16\u8f91\u62e8\u6d4b\u4efb\u52a1","text":"

                                                              \u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 -> \u7f16\u8f91\uff0c\u5b8c\u6210\u7f16\u8f91\u540e\u70b9\u51fb \u786e\u5b9a\u3002

                                                              "},{"location":"end-user/insight/infra/probe.html#_6","title":"\u67e5\u770b\u76d1\u63a7\u9762\u677f","text":"

                                                              \u70b9\u51fb\u62e8\u6d4b\u540d\u79f0 \u67e5\u770b\u62e8\u6d4b\u4efb\u52a1\u4e2d\u6bcf\u4e2a\u76ee\u6807\u7684\u76d1\u63a7\u72b6\u6001\uff0c\u4ee5\u56fe\u8868\u65b9\u5f0f\u663e\u793a\u9488\u5bf9\u7f51\u7edc\u72b6\u51b5\u7684\u63a2\u6d4b\u7ed3\u679c\u3002

                                                              \u6307\u6807\u540d\u79f0 \u63cf\u8ff0 Current Status Response \u8868\u793a HTTP \u63a2\u6d4b\u8bf7\u6c42\u7684\u54cd\u5e94\u72b6\u6001\u7801\u3002 Ping Status \u8868\u793a\u63a2\u6d4b\u8bf7\u6c42\u662f\u5426\u6210\u529f\u30021 \u8868\u793a\u63a2\u6d4b\u8bf7\u6c42\u6210\u529f\uff0c0 \u8868\u793a\u63a2\u6d4b\u8bf7\u6c42\u5931\u8d25\u3002 IP Protocol \u8868\u793a\u63a2\u6d4b\u8bf7\u6c42\u4f7f\u7528\u7684 IP \u534f\u8bae\u7248\u672c\u3002 SSL Expiry \u8868\u793a SSL/TLS \u8bc1\u4e66\u7684\u6700\u65e9\u5230\u671f\u65f6\u95f4\u3002 DNS Response (Latency) \u8868\u793a\u6574\u4e2a\u63a2\u6d4b\u8fc7\u7a0b\u7684\u6301\u7eed\u65f6\u95f4\uff0c\u5355\u4f4d\u662f\u79d2\u3002 HTTP Duration \u8868\u793a\u4ece\u53d1\u9001\u8bf7\u6c42\u5230\u63a5\u6536\u5230\u5b8c\u6574\u54cd\u5e94\u7684\u6574\u4e2a\u8fc7\u7a0b\u7684\u65f6\u95f4\u3002"},{"location":"end-user/insight/infra/probe.html#_7","title":"\u5220\u9664\u62e8\u6d4b\u4efb\u52a1","text":"

                                                              \u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 -> \u5220\u9664\uff0c\u786e\u8ba4\u65e0\u8bef\u540e\u70b9\u51fb \u786e\u5b9a\u3002

                                                              Caution

                                                              \u5220\u9664\u64cd\u4f5c\u4e0d\u53ef\u6062\u590d\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                                                              "},{"location":"end-user/insight/quickstart/install/index.html","title":"\u5f00\u59cb\u89c2\u6d4b","text":"

                                                              AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\u5b9e\u73b0\u4e86\u5bf9\u591a\u4e91\u591a\u96c6\u7fa4\u7684\u7eb3\u7ba1\uff0c\u5e76\u652f\u6301\u521b\u5efa\u96c6\u7fa4\u3002\u5728\u6b64\u57fa\u7840\u4e0a\uff0c\u53ef\u89c2\u6d4b\u6027 Insight \u4f5c\u4e3a\u591a\u96c6\u7fa4\u7edf\u4e00\u89c2\u6d4b\u65b9\u6848\uff0c\u901a\u8fc7\u90e8\u7f72 insight-agent \u63d2\u4ef6\u5b9e\u73b0\u5bf9\u591a\u96c6\u7fa4\u89c2\u6d4b\u6570\u636e\u7684\u91c7\u96c6\uff0c\u5e76\u652f\u6301\u901a\u8fc7 AI \u7b97\u529b\u4e2d\u5fc3 \u53ef\u89c2\u6d4b\u6027\u4ea7\u54c1\u5b9e\u73b0\u5bf9\u6307\u6807\u3001\u65e5\u5fd7\u3001\u94fe\u8def\u6570\u636e\u7684\u67e5\u8be2\u3002

                                                              insight-agent \u662f\u53ef\u89c2\u6d4b\u6027\u5b9e\u73b0\u5bf9\u591a\u96c6\u7fa4\u6570\u636e\u91c7\u96c6\u7684\u5de5\u5177\uff0c\u5b89\u88c5\u540e\u65e0\u9700\u4efb\u4f55\u4fee\u6539\uff0c\u5373\u53ef\u5b9e\u73b0\u5bf9\u6307\u6807\u3001\u65e5\u5fd7\u4ee5\u53ca\u94fe\u8def\u6570\u636e\u7684\u81ea\u52a8\u5316\u91c7\u96c6\u3002

                                                              \u901a\u8fc7 \u5bb9\u5668\u7ba1\u7406 \u521b\u5efa\u7684\u96c6\u7fa4\u9ed8\u8ba4\u4f1a\u5b89\u88c5 insight-agent\uff0c\u6545\u5728\u6b64\u4ec5\u9488\u5bf9\u63a5\u5165\u7684\u96c6\u7fa4\u5982\u4f55\u5f00\u542f\u89c2\u6d4b\u80fd\u529b\u63d0\u4f9b\u6307\u5bfc\u3002

                                                              • \u5728\u7ebf\u5b89\u88c5 insight-agent

                                                              \u53ef\u89c2\u6d4b\u6027 Insight \u4f5c\u4e3a\u591a\u96c6\u7fa4\u7684\u7edf\u4e00\u89c2\u6d4b\u5e73\u53f0\uff0c\u5176\u90e8\u5206\u7ec4\u4ef6\u7684\u8d44\u6e90\u6d88\u8017\u4e0e\u521b\u5efa\u96c6\u7fa4\u7684\u6570\u636e\u3001\u63a5\u5165\u96c6\u7fa4\u7684\u6570\u91cf\u606f\u606f\u76f8\u5173\uff0c\u5728\u5b89\u88c5 insight-agent \u65f6\uff0c\u9700\u8981\u6839\u636e\u96c6\u7fa4\u89c4\u6a21\u5bf9\u76f8\u5e94\u7ec4\u4ef6\u7684\u8d44\u6e90\u8fdb\u884c\u8c03\u6574\u3002

                                                              1. \u6839\u636e\u521b\u5efa\u96c6\u7fa4\u7684\u89c4\u6a21\u6216\u63a5\u5165\u96c6\u7fa4\u7684\u89c4\u6a21\uff0c\u8c03\u6574 insight-agent \u4e2d\u91c7\u96c6\u7ec4\u4ef6 Prometheus \u7684 CPU \u548c\u5185\u5b58\uff0c\u8bf7\u53c2\u8003: Prometheus \u8d44\u6e90\u89c4\u5212

                                                              2. \u7531\u4e8e\u591a\u96c6\u7fa4\u7684\u6307\u6807\u6570\u636e\u4f1a\u7edf\u4e00\u5b58\u50a8\uff0c\u5219\u9700\u8981 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\u7ba1\u7406\u5458\u6839\u636e\u521b\u5efa\u96c6\u7fa4\u7684\u89c4\u6a21\u3001\u63a5\u5165\u96c6\u7fa4\u7684\u89c4\u6a21\u5bf9\u5e94\u8c03\u6574 vmstorage \u7684\u78c1\u76d8\uff0c\u8bf7\u53c2\u8003\uff1avmstorage \u78c1\u76d8\u5bb9\u91cf\u89c4\u5212\u3002

                                                              3. \u5982\u4f55\u8c03\u6574 vmstorage \u7684\u78c1\u76d8\uff0c\u8bf7\u53c2\u8003\uff1avmstorge \u78c1\u76d8\u6269\u5bb9\u3002

                                                              \u7531\u4e8e AI \u7b97\u529b\u4e2d\u5fc3 \u652f\u6301\u5bf9\u591a\u4e91\u591a\u96c6\u7fa4\u7684\u7eb3\u7ba1\uff0cinsight-agent \u76ee\u524d\u4e5f\u5b8c\u6210\u4e86\u90e8\u5206\u9a8c\u8bc1\uff0c\u7531\u4e8e\u76d1\u63a7\u7ec4\u4ef6\u51b2\u7a81\u95ee\u9898\u5bfc\u81f4\u5728 AI \u7b97\u529b\u4e2d\u5fc34.0 \u96c6\u7fa4\u548c Openshift 4.x \u96c6\u7fa4\u4e2d\u5b89\u88c5 insight-agent \u4f1a\u51fa\u73b0\u95ee\u9898\uff0c\u82e5\u60a8\u9047\u5230\u540c\u6837\u95ee\u9898\uff0c\u8bf7\u53c2\u8003\u4ee5\u4e0b\u6587\u6863\uff1a

                                                              • \u5728 Openshift 4.x \u5b89\u88c5 insight-agent
                                                              "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html","title":"\u5f00\u542f\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f","text":"

                                                              \u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u4e3a\u4e86\u63d0\u9ad8\u5927\u89c4\u6a21\u73af\u5883\u4e0b\u7684\u6570\u636e\u5199\u5165\u80fd\u529b\uff0c\u652f\u6301\u5c06\u65e5\u5fd7\u5207\u6362\u4e3a \u5927\u65e5\u5fd7 \u6a21\u5f0f\u3001\u5c06\u94fe\u8def\u5207\u6362\u4e3a \u5927\u94fe\u8def \u6a21\u5f0f\u3002\u672c\u6587\u5c06\u4ecb\u7ecd\u4ee5\u4e0b\u51e0\u79cd\u5f00\u542f\u65b9\u5f0f\uff1a

                                                              • \u901a\u8fc7\u5b89\u88c5\u5668\u5f00\u542f\u6216\u5347\u7ea7\u81f3\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f\uff08\u901a\u8fc7 manifest.yaml \u4e2d\u540c\u4e00\u4e2a\u53c2\u6570\u503c\u63a7\u5236\uff09
                                                              • \u901a\u8fc7 Helm \u547d\u4ee4\u624b\u52a8\u5f00\u542f\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f
                                                              "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_2","title":"\u65e5\u5fd7","text":"

                                                              \u672c\u8282\u8bf4\u660e\u666e\u901a\u65e5\u5fd7\u6a21\u5f0f\u548c\u5927\u65e5\u5fd7\u6a21\u5f0f\u7684\u533a\u522b\u3002

                                                              "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_3","title":"\u65e5\u5fd7\u6a21\u5f0f","text":"

                                                              \u7ec4\u4ef6\uff1aFluentbit + Elasticsearch

                                                              \u8be5\u6a21\u5f0f\u7b80\u79f0\u4e3a ES \u6a21\u5f0f\uff0c\u6570\u636e\u6d41\u56fe\u5982\u4e0b\u6240\u793a\uff1a

                                                              "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_4","title":"\u5927\u65e5\u5fd7\u6a21\u5f0f","text":"

                                                              \u7ec4\u4ef6\uff1aFluentbit + Kafka + Vector + Elasticsearch

                                                              \u8be5\u6a21\u5f0f\u7b80\u79f0\u4e3a Kafka \u6a21\u5f0f\uff0c\u6570\u636e\u6d41\u56fe\u5982\u4e0b\u6240\u793a\uff1a

                                                              "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_5","title":"\u94fe\u8def","text":"

                                                              \u672c\u8282\u8bf4\u660e\u666e\u901a\u94fe\u8def\u6a21\u5f0f\u548c\u5927\u94fe\u8def\u6a21\u5f0f\u7684\u533a\u522b\u3002

                                                              "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_6","title":"\u94fe\u8def\u6a21\u5f0f","text":"

                                                              \u7ec4\u4ef6\uff1aAgent opentelemetry-collector + Global opentelemetry-collector + Jaeger-collector + Elasticsearch

                                                              \u8be5\u6a21\u5f0f\u7b80\u79f0\u4e3a OTlp \u6a21\u5f0f\uff0c\u6570\u636e\u6d41\u56fe\u5982\u4e0b\u6240\u793a\uff1a

                                                              "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_7","title":"\u5927\u94fe\u8def\u6a21\u5f0f","text":"

                                                              \u7ec4\u4ef6\uff1aAgent opentelemetry-collector + Kafka + Global opentelemetry-collector + Jaeger-collector + Elasticsearch

                                                              \u8be5\u6a21\u5f0f\u7b80\u79f0\u4e3a Kafka \u6a21\u5f0f\uff0c\u6570\u636e\u6d41\u56fe\u5982\u4e0b\u6240\u793a\uff1a

                                                              "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_8","title":"\u901a\u8fc7\u5b89\u88c5\u5668\u5f00\u542f","text":"

                                                              \u901a\u8fc7\u5b89\u88c5\u5668\u90e8\u7f72/\u5347\u7ea7 AI \u7b97\u529b\u4e2d\u5fc3 \u65f6\u4f7f\u7528\u7684 manifest.yaml \u4e2d\u5b58\u5728 infrastructures.kafka \u5b57\u6bb5\uff0c \u5982\u679c\u60f3\u5f00\u542f\u53ef\u89c2\u6d4b\u7684\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f\uff0c\u5219\u9700\u8981\u542f\u7528 kafka\uff1a

                                                              manifest.yaml
                                                              apiVersion: manifest.daocloud.io/v1alpha1\nkind: DCEManifest\n...\ninfrastructures:\n  ...\n  kafka:\n    enable: true # \u9ed8\u8ba4\u4e3a false\n    cpuLimit: 1\n    memLimit: 2Gi\n    pvcSize: 15Gi\n
                                                              "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_9","title":"\u5f00\u542f","text":"

                                                              \u5b89\u88c5\u65f6\u4f7f\u7528\u542f\u7528 kafka \u7684 manifest.yaml\uff0c\u5219\u4f1a\u9ed8\u8ba4\u5b89\u88c5 kafka \u4e2d\u95f4\u4ef6\uff0c \u5e76\u5728\u5b89\u88c5 Insight \u65f6\u9ed8\u8ba4\u5f00\u542f\u5927\u65e5\u5fd7\u548c\u5927\u94fe\u8def\u6a21\u5f0f\u3002\u5b89\u88c5\u547d\u4ee4\u4e3a\uff1a

                                                              ./dce5-installer cluster-create -c clusterConfig.yaml -m manifest.yaml\n
                                                              "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_10","title":"\u5347\u7ea7","text":"

                                                              \u5347\u7ea7\u540c\u6837\u662f\u4fee\u6539 kafka \u5b57\u6bb5\u3002\u4f46\u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u56e0\u4e3a\u8001\u73af\u5883\u5b89\u88c5\u65f6\u4f7f\u7528\u7684\u662f kafka: false\uff0c \u6240\u4ee5\u73af\u5883\u4e2d\u65e0 kafka\u3002\u6b64\u65f6\u5347\u7ea7\u9700\u8981\u6307\u5b9a\u5347\u7ea7 middleware\uff0c\u624d\u4f1a\u540c\u65f6\u5b89\u88c5 kafka \u4e2d\u95f4\u4ef6\u3002\u5347\u7ea7\u547d\u4ee4\u4e3a\uff1a

                                                              ./dce5-installer cluster-create -c clusterConfig.yaml -m manifest.yaml -u gproduct,middleware\n

                                                              Note

                                                              \u5728\u5347\u7ea7\u5b8c\u6210\u540e\uff0c\u9700\u8981\u624b\u52a8\u91cd\u542f\u4ee5\u4e0b\u7ec4\u4ef6\uff1a

                                                              • insight-agent-fluent-bit
                                                              • insight-agent-opentelemetry-collector
                                                              • insight-opentelemetry-collector
                                                              "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#helm","title":"\u901a\u8fc7 Helm \u547d\u4ee4\u5f00\u542f","text":"

                                                              \u524d\u63d0\u6761\u4ef6\uff1a\u9700\u8981\u4fdd\u8bc1\u5b58\u5728 \u53ef\u7528\u7684 kafka \u4e14\u5730\u5740\u53ef\u6b63\u5e38\u8bbf\u95ee\u3002

                                                              \u6839\u636e\u4ee5\u4e0b\u547d\u4ee4\u83b7\u53d6\u8001\u7248\u672c insight \u548c insight-agent \u7684 values\uff08\u5efa\u8bae\u505a\u597d\u5907\u4efd\uff09\uff1a

                                                              helm get values insight -n insight-system -o yaml > insight.yaml\nhelm get values insight-agent -n insight-system -o yaml > insight-agent.yaml\n
                                                              "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_11","title":"\u5f00\u542f\u5927\u65e5\u5fd7","text":"

                                                              \u6709\u4ee5\u4e0b\u51e0\u79cd\u65b9\u5f0f\u5f00\u542f\u6216\u5347\u7ea7\u81f3\u5927\u65e5\u5fd7\u6a21\u5f0f\uff1a

                                                              \u5728 helm upgrade \u547d\u4ee4\u4e2d\u4f7f\u7528 --set\u4fee\u6539 YAML \u540e\u8fd0\u884c helm upgrade\u5bb9\u5668\u7ba1\u7406 UI \u5347\u7ea7

                                                              \u5148\u8fd0\u884c\u4ee5\u4e0b insight \u5347\u7ea7\u547d\u4ee4\uff0c\u6ce8\u610f kafka brokers \u5730\u5740\u9700\u6b63\u786e\uff1a

                                                              helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --set global.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.kafka.enabled=true \\\n  --set vector.enabled=true \\\n  --version 0.30.1\n

                                                              \u7136\u540e\u8fd0\u884c\u4ee5\u4e0b insight-agent \u5347\u7ea7\u547d\u4ee4\uff0c\u6ce8\u610f kafka brokers \u5730\u5740\u9700\u6b63\u786e\uff1a

                                                              helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --set global.exporters.logging.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.exporters.logging.output=kafka \\\n  --version 0.30.1\n

                                                              \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539 YAMl \u540e\u8fd0\u884c helm upgrade \u547d\u4ee4\uff1a

                                                              1. \u4fee\u6539 insight.yaml

                                                                insight.yaml
                                                                global:\n  ...\n  kafka:\n    brokers: 10.6.216.111:30592\n    enabled: true\n...\nvector:\n  enabled: true\n
                                                              2. \u5347\u7ea7 insight \u7ec4\u4ef6\uff1a

                                                                helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --version 0.30.1\n
                                                              3. \u4fee\u6539 insight-agent.yaml

                                                                insight-agent.yaml
                                                                global:\n  ...\n  exporters:\n    ...\n    logging:\n      ...\n      kafka:\n        brokers: 10.6.216.111:30592\n      output: kafka\n
                                                              4. \u5347\u7ea7 insight-agent\uff1a

                                                                helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --version 0.30.1\n

                                                              \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\uff0c\u627e\u5230\u5bf9\u5e94\u7684\u96c6\u7fa4\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 Helm \u5e94\u7528 \uff0c\u627e\u5230\u5e76\u66f4\u65b0 insight-agent\u3002

                                                              \u5728 Logging Settings \u4e2d\uff0c\u4e3a output \u9009\u62e9 kafka\uff0c\u5e76\u586b\u5199\u6b63\u786e\u7684 brokers \u5730\u5740\u3002

                                                              \u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u5728\u5347\u7ea7\u5b8c\u6210\u540e\uff0c\u9700\u624b\u52a8\u91cd\u542f insight-agent-fluent-bit \u7ec4\u4ef6\u3002

                                                              "},{"location":"end-user/insight/quickstart/install/big-log-and-trace.html#_12","title":"\u5f00\u542f\u5927\u94fe\u8def","text":"

                                                              \u6709\u4ee5\u4e0b\u51e0\u79cd\u65b9\u5f0f\u5f00\u542f\u6216\u5347\u7ea7\u81f3\u5927\u94fe\u8def\u6a21\u5f0f\uff1a

                                                              \u5728 helm upgrade \u547d\u4ee4\u4e2d\u4f7f\u7528 --set\u4fee\u6539 YAML \u540e\u8fd0\u884c helm upgrade\u5bb9\u5668\u7ba1\u7406 UI \u5347\u7ea7

                                                              \u5148\u8fd0\u884c\u4ee5\u4e0b insight \u5347\u7ea7\u547d\u4ee4\uff0c\u6ce8\u610f kafka brokers \u5730\u5740\u9700\u6b63\u786e\uff1a

                                                              helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --set global.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.kafka.enabled=true \\\n  --set global.tracing.kafkaReceiver.enabled=true \\\n  --version 0.30.1\n

                                                              \u7136\u540e\u8fd0\u884c\u4ee5\u4e0b insight-agent \u5347\u7ea7\u547d\u4ee4\uff0c\u6ce8\u610f kafka brokers \u5730\u5740\u9700\u6b63\u786e\uff1a

                                                              helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --set global.exporters.trace.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.exporters.trace.output=kafka \\\n  --version 0.30.1\n

                                                              \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539 YAMl \u540e\u8fd0\u884c helm upgrade \u547d\u4ee4\uff1a

                                                              1. \u4fee\u6539 insight.yaml

                                                                insight.yaml
                                                                global:\n  ...\n  kafka:\n    brokers: 10.6.216.111:30592\n    enabled: true\n...\ntracing:\n  ...\n  kafkaReceiver:\n    enabled: true\n
                                                              2. \u5347\u7ea7 insight \u7ec4\u4ef6\uff1a

                                                                helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --version 0.30.1\n
                                                              3. \u4fee\u6539 insight-agent.yaml

                                                                insight-agent.yaml
                                                                global:\n  ...\n  exporters:\n    ...\n    trace:\n      ...\n      kafka:\n        brokers: 10.6.216.111:30592\n      output: kafka\n
                                                              4. \u5347\u7ea7 insight-agent\uff1a

                                                                helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --version 0.30.1\n

                                                              \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\uff0c\u627e\u5230\u5bf9\u5e94\u7684\u96c6\u7fa4\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 Helm \u5e94\u7528 \uff0c\u627e\u5230\u5e76\u66f4\u65b0 insight-agent\u3002

                                                              \u5728 Trace Settings \u4e2d\uff0c\u4e3a output \u9009\u62e9 kafka\uff0c\u5e76\u586b\u5199\u6b63\u786e\u7684 brokers \u5730\u5740\u3002

                                                              \u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u5728\u5347\u7ea7\u5b8c\u6210\u540e\uff0c\u9700\u624b\u52a8 \u91cd\u542f insight-agent-opentelemetry-collector \u548c insight-opentelemetry-collector \u7ec4\u4ef6\u3002

                                                              "},{"location":"end-user/insight/quickstart/install/component-scheduling.html","title":"\u81ea\u5b9a\u4e49 Insight \u7ec4\u4ef6\u8c03\u5ea6\u7b56\u7565","text":"

                                                              \u5f53\u90e8\u7f72\u53ef\u89c2\u6d4b\u5e73\u53f0 Insight \u5230 Kubernetes \u73af\u5883\u65f6\uff0c\u6b63\u786e\u7684\u8d44\u6e90\u7ba1\u7406\u548c\u4f18\u5316\u81f3\u5173\u91cd\u8981\u3002 Insight \u5305\u542b\u591a\u4e2a\u6838\u5fc3\u7ec4\u4ef6\uff0c\u5982 Prometheus\u3001OpenTelemetry\u3001FluentBit\u3001Vector\u3001Elasticsearch \u7b49\uff0c \u8fd9\u4e9b\u7ec4\u4ef6\u5728\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u53ef\u80fd\u56e0\u4e3a\u8d44\u6e90\u5360\u7528\u95ee\u9898\u5bf9\u96c6\u7fa4\u5185\u5176\u4ed6 Pod \u7684\u6027\u80fd\u4ea7\u751f\u8d1f\u9762\u5f71\u54cd\u3002 \u4e3a\u4e86\u6709\u6548\u5730\u7ba1\u7406\u8d44\u6e90\u5e76\u4f18\u5316\u96c6\u7fa4\u7684\u8fd0\u884c\uff0c\u8282\u70b9\u4eb2\u548c\u6027\u6210\u4e3a\u4e00\u9879\u91cd\u8981\u7684\u914d\u7f6e\u9009\u9879\u3002

                                                              \u672c\u6587\u5c06\u91cd\u70b9\u63a2\u8ba8\u5982\u4f55\u901a\u8fc7\u6c61\u70b9\u548c\u8282\u70b9\u4eb2\u548c\u6027\u7684\u914d\u7f6e\u7b56\u7565\uff0c\u4f7f\u5f97\u6bcf\u4e2a\u7ec4\u4ef6\u80fd\u591f\u5728\u9002\u5f53\u7684\u8282\u70b9\u4e0a\u8fd0\u884c\uff0c \u5e76\u907f\u514d\u8d44\u6e90\u7ade\u4e89\u6216\u4e89\u7528\uff0c\u4ece\u800c\u786e\u4fdd\u6574\u4e2a Kubernetes \u96c6\u7fa4\u7684\u7a33\u5b9a\u6027\u548c\u9ad8\u6548\u6027\u3002

                                                              "},{"location":"end-user/insight/quickstart/install/component-scheduling.html#insight_1","title":"\u901a\u8fc7\u6c61\u70b9\u4e3a Insight \u914d\u7f6e\u4e13\u6709\u8282\u70b9","text":"

                                                              \u7531\u4e8e Insight Agent \u5305\u542b\u4e86 DaemonSet \u7ec4\u4ef6\uff0c\u6240\u4ee5\u672c\u8282\u6240\u8ff0\u7684\u914d\u7f6e\u65b9\u5f0f\u662f\u8ba9\u9664\u4e86 Insight DameonSet \u4e4b\u5916\u7684\u5176\u4f59\u7ec4\u4ef6\u5747\u8fd0\u884c\u5728\u4e13\u6709\u8282\u70b9\u4e0a\u3002

                                                              \u8be5\u65b9\u5f0f\u662f\u901a\u8fc7\u4e3a\u4e13\u6709\u8282\u70b9\u6dfb\u52a0\u6c61\u70b9\uff08taint\uff09\uff0c\u5e76\u914d\u5408\u6c61\u70b9\u5bb9\u5fcd\u5ea6\uff08tolerations\uff09\u6765\u5b9e\u73b0\u7684\u3002 \u66f4\u591a\u7ec6\u8282\u53ef\u4ee5\u53c2\u8003 Kubernetes \u5b98\u65b9\u6587\u6863\u3002

                                                              \u53ef\u4ee5\u53c2\u8003\u5982\u4e0b\u547d\u4ee4\u4e3a\u8282\u70b9\u6dfb\u52a0\u53ca\u79fb\u9664\u6c61\u70b9\uff1a

                                                              # \u6dfb\u52a0\u6c61\u70b9\nkubectl taint nodes worker1 node.daocloud.io=insight-only:NoSchedule\n\n# \u79fb\u9664\u6c61\u70b9\nkubectl taint nodes worker1 node.daocloud.io:NoSchedule-\n

                                                              \u6709\u4ee5\u4e0b\u4e24\u79cd\u9014\u5f84\u8ba9 Insight \u7ec4\u4ef6\u8c03\u5ea6\u81f3\u4e13\u6709\u8282\u70b9\uff1a

                                                              "},{"location":"end-user/insight/quickstart/install/component-scheduling.html#1","title":"1. \u4e3a\u6bcf\u4e2a\u7ec4\u4ef6\u6dfb\u52a0\u6c61\u70b9\u5bb9\u5fcd\u5ea6","text":"

                                                              \u9488\u5bf9 insight-server \u548c insight-agent \u4e24\u4e2a Chart \u5206\u522b\u8fdb\u884c\u914d\u7f6e\uff1a

                                                              insight-server Chart \u914d\u7f6einsight-agent Chart \u914d\u7f6e
                                                              server:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nui:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nrunbook:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\n# mysql:\nvictoria-metrics-k8s-stack:\n  victoria-metrics-operator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  vmcluster:\n    spec:\n      vmstorage:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n      vmselect:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n      vminsert:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n  vmalert:\n    spec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n  alertmanager:\n    spec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n\njaeger:\n  collector:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  query:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n\nopentelemetry-collector-aggregator:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nopentelemetry-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\ngrafana-operator:\n  operator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  grafana:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\nkibana:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nelastic-alert:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nvector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n
                                                              kube-prometheus-stack:\n  prometheus:\n    prometheusSpec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n  prometheus-node-exporter:\n    tolerations:\n      - effect: NoSchedule\n        operator: Exists\n  prometheusOperator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n\nkube-state-metrics:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-operator:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\ntailing-sidecar-operator:\n  operator:\n    tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-kubernetes-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nprometheus-blackbox-exporter:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\netcd-exporter:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\" \n
                                                              "},{"location":"end-user/insight/quickstart/install/component-scheduling.html#2","title":"2. \u901a\u8fc7\u547d\u540d\u7a7a\u95f4\u7ea7\u522b\u914d\u7f6e","text":"

                                                              \u8ba9 insight-system \u547d\u540d\u7a7a\u95f4\u7684 Pod \u90fd\u5bb9\u5fcd node.daocloud.io=insight-only \u6c61\u70b9\u3002

                                                              1. \u8c03\u6574 apiserver \u7684\u914d\u7f6e\u6587\u4ef6 /etc/kubernetes/manifests/kube-apiserver.yaml\uff0c\u653e\u5f00 PodTolerationRestriction,PodNodeSelector, \u53c2\u8003\u4e0b\u56fe\uff1a

                                                              2. \u7ed9 insight-system \u547d\u540d\u7a7a\u95f4\u589e\u52a0\u6ce8\u89e3\uff1a

                                                                apiVersion: v1\nkind: Namespace\nmetadata:\n  name: insight-system\n  annotations:\n    scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Equal\", \"effect\": \"NoSchedule\", \"key\": \"node.daocloud.io\", \"value\": \"insight-only\"}]'\n

                                                              \u91cd\u542f insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\u9762\u7684\u7ec4\u4ef6\u5373\u53ef\u6b63\u5e38\u5bb9\u5fcd insight-system \u4e0b\u7684 Pod \u8c03\u5ea6\u3002

                                                              "},{"location":"end-user/insight/quickstart/install/component-scheduling.html#label","title":"\u4e3a\u8282\u70b9\u6dfb\u52a0 Label \u548c\u8282\u70b9\u4eb2\u548c\u6027\u6765\u7ba1\u7406\u7ec4\u4ef6\u8c03\u5ea6","text":"

                                                              Info

                                                              \u8282\u70b9\u4eb2\u548c\u6027\u6982\u5ff5\u4e0a\u7c7b\u4f3c\u4e8e nodeSelector\uff0c\u5b83\u4f7f\u4f60\u53ef\u4ee5\u6839\u636e\u8282\u70b9\u4e0a\u7684 \u6807\u7b7e(label) \u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002 \u8282\u70b9\u4eb2\u548c\u6027\u6709\u4e24\u79cd\uff1a

                                                              1. requiredDuringSchedulingIgnoredDuringExecution\uff1a\u8c03\u5ea6\u5668\u53ea\u6709\u5728\u89c4\u5219\u88ab\u6ee1\u8db3\u7684\u65f6\u5019\u624d\u80fd\u6267\u884c\u8c03\u5ea6\u3002\u6b64\u529f\u80fd\u7c7b\u4f3c\u4e8e nodeSelector\uff0c \u4f46\u5176\u8bed\u6cd5\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002
                                                              2. preferredDuringSchedulingIgnoredDuringExecution\uff1a\u8c03\u5ea6\u5668\u4f1a\u5c1d\u8bd5\u5bfb\u627e\u6ee1\u8db3\u5bf9\u5e94\u89c4\u5219\u7684\u8282\u70b9\u3002\u5982\u679c\u627e\u4e0d\u5230\u5339\u914d\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u5668\u4ecd\u7136\u4f1a\u8c03\u5ea6\u8be5 Pod\u3002

                                                              \u66f4\u8fc7\u7ec6\u8282\u8bf7\u53c2\u8003 kubernetes \u5b98\u65b9\u6587\u6863\u3002

                                                              \u4e3a\u4e86\u5b9e\u73b0\u4e0d\u540c\u7528\u6237\u5bf9 Insight \u7ec4\u4ef6\u8c03\u5ea6\u7684\u7075\u6d3b\u9700\u6c42\uff0cInsight \u5206\u522b\u63d0\u4f9b\u4e86\u8f83\u4e3a\u7ec6\u7c92\u5ea6\u7684 Label \u6765\u5b9e\u73b0\u4e0d\u540c\u7ec4\u4ef6\u7684\u8c03\u5ea6\u7b56\u7565\uff0c\u4ee5\u4e0b\u662f\u6807\u7b7e\u4e0e\u7ec4\u4ef6\u7684\u5173\u7cfb\u8bf4\u660e\uff1a

                                                              \u6807\u7b7e Key \u6807\u7b7e Value \u8bf4\u660e node.daocloud.io/insight-any \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u4ee3\u8868 Insight \u6240\u6709\u7ec4\u4ef6\u4f18\u5148\u8003\u8651\u5e26\u4e86\u8be5\u6807\u7b7e\u7684\u8282\u70b9 node.daocloud.io/insight-prometheus \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u7279\u6307 Prometheus \u7ec4\u4ef6 node.daocloud.io/insight-vmstorage \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u7279\u6307 VictoriaMetrics vmstorage \u7ec4\u4ef6 node.daocloud.io/insight-vector \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u7279\u6307 Vector \u7ec4\u4ef6 node.daocloud.io/insight-otel-col \u4efb\u610f\u503c\uff0c\u63a8\u8350\u7528 true \u7279\u6307 OpenTelemetry \u7ec4\u4ef6

                                                              \u53ef\u4ee5\u53c2\u8003\u5982\u4e0b\u547d\u4ee4\u4e3a\u8282\u70b9\u6dfb\u52a0\u53ca\u79fb\u9664\u6807\u7b7e\uff1a

                                                              # \u4e3a node8 \u6dfb\u52a0\u6807\u7b7e\uff0c\u5148\u5c06 insight-prometheus \u8c03\u5ea6\u5230 node8 \nkubectl label nodes node8 node.daocloud.io/insight-prometheus=true\n\n# \u79fb\u9664 node8 \u7684 node.daocloud.io/insight-prometheus \u6807\u7b7e\nkubectl label nodes node8 node.daocloud.io/insight-prometheus-\n

                                                              \u4ee5\u4e0b\u662f insight-prometheus \u7ec4\u4ef6\u5728\u90e8\u7f72\u65f6\u9ed8\u8ba4\u7684\u4eb2\u548c\u6027\u504f\u597d\uff1a

                                                              affinity:\n  nodeAffinity:\n    preferredDuringSchedulingIgnoredDuringExecution:\n    - preference:\n        matchExpressions:\n        - key: node-role.kubernetes.io/control-plane\n          operator: DoesNotExist\n      weight: 1\n    - preference:\n        matchExpressions:\n        - key: node.daocloud.io/insight-prometheus # (1)!\n          operator: Exists\n      weight: 2\n    - preference:\n        matchExpressions:\n        - key: node.daocloud.io/insight-any\n          operator: Exists\n      weight: 3\n    podAntiAffinity:\n      preferredDuringSchedulingIgnoredDuringExecution:\n        - weight: 1\n          podAffinityTerm:\n            topologyKey: kubernetes.io/hostname\n            labelSelector:\n              matchExpressions:\n                - key: app.kubernetes.io/instance\n                  operator: In\n                  values:\n                    - insight-agent-kube-prometh-prometheus\n
                                                              1. \u5148\u5c06 insight-prometheus \u8c03\u5ea6\u5230\u5e26\u6709 node.daocloud.io/insight-prometheus \u6807\u7b7e\u7684\u8282\u70b9
                                                              "},{"location":"end-user/insight/quickstart/install/gethosturl.html","title":"\u83b7\u53d6\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u6570\u636e\u5b58\u50a8\u5730\u5740","text":"

                                                              \u53ef\u89c2\u6d4b\u6027\u662f\u591a\u96c6\u7fa4\u7edf\u4e00\u89c2\u6d4b\u7684\u4ea7\u54c1\uff0c\u4e3a\u5b9e\u73b0\u5bf9\u591a\u96c6\u7fa4\u89c2\u6d4b\u6570\u636e\u7684\u7edf\u4e00\u5b58\u50a8\u3001\u67e5\u8be2\uff0c \u5b50\u96c6\u7fa4\u9700\u8981\u5c06\u91c7\u96c6\u7684\u89c2\u6d4b\u6570\u636e\u4e0a\u62a5\u7ed9\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u8fdb\u884c\u7edf\u4e00\u5b58\u50a8\u3002 \u672c\u6587\u63d0\u4f9b\u4e86\u5728\u5b89\u88c5\u91c7\u96c6\u7ec4\u4ef6 insight-agent \u65f6\u5fc5\u586b\u7684\u5b58\u50a8\u7ec4\u4ef6\u7684\u5730\u5740\u3002

                                                              "},{"location":"end-user/insight/quickstart/install/gethosturl.html#insight-agent","title":"\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5b89\u88c5 insight-agent","text":"

                                                              \u5982\u679c\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5b89\u88c5 insight-agent\uff0c\u63a8\u8350\u901a\u8fc7\u57df\u540d\u6765\u8bbf\u95ee\u96c6\u7fa4\uff1a

                                                              export vminsert_host=\"vminsert-insight-victoria-metrics-k8s-stack.insight-system.svc.cluster.local\" # (1)!\nexport es_host=\"insight-es-master.insight-system.svc.cluster.local\" # (2)!\nexport otel_col_host=\"insight-opentelemetry-collector.insight-system.svc.cluster.local\" # (3)!\n
                                                              "},{"location":"end-user/insight/quickstart/install/gethosturl.html#insight-agent_1","title":"\u5728\u5176\u4ed6\u96c6\u7fa4\u5b89\u88c5 insight-agent","text":""},{"location":"end-user/insight/quickstart/install/gethosturl.html#insight-server","title":"\u901a\u8fc7 Insight Server \u63d0\u4f9b\u7684\u63a5\u53e3\u83b7\u53d6\u5730\u5740","text":"
                                                              1. \u7ba1\u7406\u96c6\u7fa4\u4f7f\u7528\u9ed8\u8ba4\u7684 LoadBalancer \u65b9\u5f0f\u66b4\u9732

                                                                \u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                                export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam'\n

                                                                Note

                                                                \u8bf7\u66ff\u6362\u547d\u4ee4\u4e2d\u7684 ${INSIGHT_SERVER_IP} \u53c2\u6570\u3002

                                                                \u83b7\u5f97\u5982\u4e0b\u8fd4\u56de\u503c\uff1a

                                                                {\n  \"values\": {\n    \"global\": {\n      \"exporters\": {\n        \"logging\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"metric\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"auditLog\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"trace\": {\n          \"host\": \"10.6.182.32\"\n        }\n      }\n    },\n    \"opentelemetry-operator\": {\n      \"enabled\": true\n    },\n    \"opentelemetry-collector\": {\n      \"enabled\": true\n    }\n  }\n}\n
                                                                • global.exporters.logging.host \u662f\u65e5\u5fd7\u670d\u52a1\u5730\u5740\uff0c\u4e0d\u9700\u8981\u518d\u8bbe\u7f6e\u5bf9\u5e94\u670d\u52a1\u7684\u7aef\u53e3\uff0c\u90fd\u4f1a\u4f7f\u7528\u76f8\u5e94\u9ed8\u8ba4\u503c
                                                                • global.exporters.metric.host \u662f\u6307\u6807\u670d\u52a1\u5730\u5740
                                                                • global.exporters.trace.host \u662f\u94fe\u8def\u670d\u52a1\u5730\u5740
                                                                • global.exporters.auditLog.host \u662f\u5ba1\u8ba1\u65e5\u5fd7\u670d\u52a1\u5730\u5740\uff08\u548c\u94fe\u8def\u4f7f\u7528\u7684\u540c\u4e00\u4e2a\u670d\u52a1\u4e0d\u540c\u7aef\u53e3\uff09
                                                              2. \u7ba1\u7406\u96c6\u7fa4\u7981\u7528 LoadBalancer

                                                                \u8c03\u7528\u63a5\u53e3\u65f6\u9700\u8981\u989d\u5916\u4f20\u9012\u96c6\u7fa4\u4e2d\u4efb\u610f\u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u8282\u70b9 IP\uff0c\u4f1a\u4f7f\u7528\u8be5 IP \u62fc\u63a5\u51fa\u5bf9\u5e94\u670d\u52a1\u7684\u5b8c\u6574\u8bbf\u95ee\u5730\u5740\u3002

                                                                export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam' --data '{\"extra\": {\"EXPORTER_EXTERNAL_IP\": \"10.5.14.51\"}}'\n

                                                                \u5c06\u83b7\u5f97\u5982\u4e0b\u7684\u8fd4\u56de\u503c\uff1a

                                                                {\n  \"values\": {\n    \"global\": {\n      \"exporters\": {\n        \"logging\": {\n          \"scheme\": \"https\",\n          \"host\": \"10.5.14.51\",\n          \"port\": 32007,\n          \"user\": \"elastic\",\n          \"password\": \"j8V1oVoM1184HvQ1F3C8Pom2\"\n        },\n        \"metric\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30683\n        },\n        \"auditLog\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30884\n        },\n        \"trace\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30274\n        }\n      }\n    },\n    \"opentelemetry-operator\": {\n      \"enabled\": true\n    },\n    \"opentelemetry-collector\": {\n      \"enabled\": true\n    }\n  }\n}\n
                                                                • global.exporters.logging.host \u662f\u65e5\u5fd7\u670d\u52a1\u5730\u5740
                                                                • global.exporters.logging.port \u662f\u65e5\u5fd7\u670d\u52a1\u66b4\u9732\u7684 NodePort
                                                                • global.exporters.metric.host \u662f\u6307\u6807\u670d\u52a1\u5730\u5740
                                                                • global.exporters.metric.port \u662f\u6307\u6807\u670d\u52a1\u66b4\u9732\u7684 NodePort
                                                                • global.exporters.trace.host \u662f\u94fe\u8def\u670d\u52a1\u5730\u5740
                                                                • global.exporters.trace.port \u662f\u94fe\u8def\u670d\u52a1\u66b4\u9732\u7684 NodePort
                                                                • global.exporters.auditLog.host \u662f\u5ba1\u8ba1\u65e5\u5fd7\u670d\u52a1\u5730\u5740\uff08\u548c\u94fe\u8def\u4f7f\u7528\u7684\u540c\u4e00\u4e2a\u670d\u52a1\u4e0d\u540c\u7aef\u53e3\uff09
                                                                • global.exporters.auditLog.host \u662f\u5ba1\u8ba1\u65e5\u5fd7\u670d\u52a1\u66b4\u9732\u7684 NodePort
                                                              "},{"location":"end-user/insight/quickstart/install/gethosturl.html#loadbalancer","title":"\u901a\u8fc7 LoadBalancer \u8fde\u63a5","text":"
                                                              1. \u82e5\u96c6\u7fa4\u4e2d\u5f00\u542f LoadBalancer \u4e14\u4e3a Insight \u8bbe\u7f6e\u4e86 VIP \u65f6\uff0c\u60a8\u4e5f\u53ef\u4ee5\u624b\u52a8\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u83b7\u53d6 vminsert \u4ee5\u53ca opentelemetry-collector \u7684\u5730\u5740\u4fe1\u606f\uff1a

                                                                $ kubectl get service -n insight-system | grep lb\nlb-insight-opentelemetry-collector               LoadBalancer   10.233.23.12    <pending>     4317:31286/TCP,8006:31351/TCP  24d\nlb-vminsert-insight-victoria-metrics-k8s-stack   LoadBalancer   10.233.63.67    <pending>     8480:31629/TCP                 24d\n
                                                                • lb-vminsert-insight-victoria-metrics-k8s-stack \u662f\u6307\u6807\u670d\u52a1\u7684\u5730\u5740
                                                                • lb-insight-opentelemetry-collector \u662f\u94fe\u8def\u670d\u52a1\u7684\u5730\u5740
                                                              2. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u83b7\u53d6 elasticsearch \u5730\u5740\u4fe1\u606f\uff1a

                                                                $ kubectl get service -n mcamel-system | grep es\nmcamel-common-es-cluster-masters-es-http               NodePort    10.233.16.120   <none>        9200:30465/TCP               47d\n

                                                                mcamel-common-es-cluster-masters-es-http \u662f\u65e5\u5fd7\u670d\u52a1\u7684\u5730\u5740

                                                              "},{"location":"end-user/insight/quickstart/install/gethosturl.html#nodeport","title":"\u901a\u8fc7 NodePort \u8fde\u63a5","text":"

                                                              \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7981\u7528 LB \u7279\u6027

                                                              \u5728\u8be5\u60c5\u51b5\u4e0b\uff0c\u9ed8\u8ba4\u4e0d\u4f1a\u521b\u5efa\u4e0a\u8ff0\u7684 LoadBalancer \u8d44\u6e90\uff0c\u5bf9\u5e94\u670d\u52a1\u540d\u4e3a\uff1a

                                                              • vminsert-insight-victoria-metrics-k8s-stack\uff08\u6307\u6807\u670d\u52a1\uff09
                                                              • common-es\uff08\u65e5\u5fd7\u670d\u52a1\uff09
                                                              • insight-opentelemetry-collector\uff08\u94fe\u8def\u670d\u52a1\uff09

                                                              \u4e0a\u9762\u4e24\u79cd\u60c5\u51b5\u83b7\u53d6\u5230\u5bf9\u5e94\u670d\u52a1\u7684\u5bf9\u5e94\u7aef\u53e3\u4fe1\u606f\u540e\uff0c\u8fdb\u884c\u5982\u4e0b\u8bbe\u7f6e\uff1a

                                                              --set global.exporters.logging.host=  # (1)!\n--set global.exporters.logging.port=  # (2)!\n--set global.exporters.metric.host=   # (3)!\n--set global.exporters.metric.port=   # (4)!\n--set global.exporters.trace.host=    # (5)!\n--set global.exporters.trace.port=    # (6)!\n--set global.exporters.auditLog.host= # (7)!\n
                                                              1. \u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u7ba1\u7406\u96c6\u7fa4 NodeIP
                                                              2. \u65e5\u5fd7\u670d\u52a1 9200 \u7aef\u53e3\u5bf9\u5e94\u7684 NodePort
                                                              3. \u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u7ba1\u7406\u96c6\u7fa4 NodeIP
                                                              4. \u6307\u6807\u670d\u52a1 8480 \u7aef\u53e3\u5bf9\u5e94\u7684 NodePort
                                                              5. \u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u7ba1\u7406\u96c6\u7fa4 NodeIP
                                                              6. \u94fe\u8def\u670d\u52a1 4317 \u7aef\u53e3\u5bf9\u5e94\u7684 NodePort
                                                              7. \u5916\u90e8\u53ef\u8bbf\u95ee\u7684\u7ba1\u7406\u96c6\u7fa4 NodeIP
                                                              "},{"location":"end-user/insight/quickstart/install/helm-installagent.html","title":"\u901a\u8fc7 Helm \u90e8\u7f72 Insight Agent","text":"

                                                              \u672c\u6587\u63cf\u8ff0\u4e86\u5728\u547d\u4ee4\u884c\u4e2d\u901a\u8fc7 Helm \u547d\u4ee4\u5b89\u88c5 Insight Agent \u793e\u533a\u7248\u7684\u64cd\u4f5c\u6b65\u9aa4\u3002

                                                              "},{"location":"end-user/insight/quickstart/install/helm-installagent.html#insight-agent","title":"\u5b89\u88c5 Insight Agent","text":"
                                                              1. \u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u6dfb\u52a0\u955c\u50cf\u4ed3\u5e93\u7684\u5730\u5740

                                                                helm repo add insight https://release.daocloud.io/chartrepo/insight\nhelm repo upgrade\nhelm search repo  insight/insight-agent --versions\n
                                                              2. \u5b89\u88c5 Insight Agent \u9700\u8981\u786e\u4fdd\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u7684 Insight Server \u6b63\u5e38\u8fd0\u884c\uff0c\u6267\u884c\u4ee5\u4e0b\u5b89\u88c5\u547d\u4ee4\u5b89\u88c5 Insight Agent \u793e\u533a\u7248\uff0c\u8be5\u914d\u7f6e\u4e0d\u542f\u7528 Tracing \u529f\u80fd\uff1a

                                                                helm upgrade --install --create-namespace --cleanup-on-fail \\\n    --version ${version} \\      # \u8bf7\u6307\u5b9a\u90e8\u7f72\u7248\u672c\n    insight-agent  insight/insight-agent \\\n    --set global.exporters.logging.elasticsearch.host=10.10.10.x \\    # \u8bf7\u66ff\u6362\u201c10.10.10.x\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6216\u5916\u7f6e\u7684 Elasticsearch \u7684\u5730\u5740\n    --set global.exporters.logging.elasticsearch.port=32517 \\     # \u8bf7\u66ff\u6362\u201c32517\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6216\u5916\u7f6e\u7684 Elasticsearch \u66b4\u9732\u7684\u7aef\u53e3\n    --set global.exporters.logging.elasticsearch.user=elastic \\     # \u8bf7\u66ff\u6362\u201celastic\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6216\u5916\u7f6e\u7684 Elasticsearch \u7684\u7528\u6237\u540d\n    --set global.exporters.logging.elasticsearch.password=dangerous \\  # \u8bf7\u66ff\u6362\u201cdangerous\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u6216\u5916\u7f6e\u7684 Elasticsearch \u7684\u5bc6\u7801\n    --set global.exporters.metric.host=${vminsert_address} \\    # \u8bf7\u66ff\u6362\u201c10.10.10.x\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d vminsert \u7684\u5730\u5740\n    --set global.exporters.metric.port=${vminsert_port} \\    # \u8bf7\u66ff\u6362\u201c32517\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d vminsert \u7684\u5730\u5740\n    --set global.exporters.auditLog.host=${opentelemetry-collector address} \\     # \u8bf7\u66ff\u6362\u201c32517\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d opentelemetry-collector \u7684\u7aef\u53e3\n    --set global.exporters.auditLog.port=${otel_col_auditlog_port}\\   # \u8bf7\u66ff\u6362\u201c32517\" \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d opentelemetry-collector \u5bb9\u5668\u7aef\u53e3\u4e3a 8006 \u7684 service \u5bf9\u5916\u8bbf\u95ee\u7684\u5730\u5740\n    -n insight-system\n

                                                                Info

                                                                \u53ef\u53c2\u8003 \u5982\u4f55\u83b7\u53d6\u8fde\u63a5\u5730\u5740 \u83b7\u53d6\u5730\u5740\u4fe1\u606f\u3002

                                                              3. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u786e\u8ba4\u5b89\u88c5\u72b6\u6001\uff1a

                                                                helm list -A\nkubectl get pods -n insight-system\n
                                                              "},{"location":"end-user/insight/quickstart/install/helm-installagent.html#_1","title":"\u5982\u4f55\u83b7\u53d6\u8fde\u63a5\u5730\u5740","text":""},{"location":"end-user/insight/quickstart/install/helm-installagent.html#insight-agent_1","title":"\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5b89\u88c5 Insight Agent","text":"

                                                              \u5982\u679c Agent \u662f\u5b89\u88c5\u5728\u7ba1\u7406\u96c6\u7fa4\uff0c\u63a8\u8350\u901a\u8fc7\u57df\u540d\u6765\u8bbf\u95ee\u96c6\u7fa4\uff1a

                                                              export vminsert_host=\"vminsert-insight-victoria-metrics-k8s-stack.insight-system.svc.cluster.local\" # \u6307\u6807\nexport es_host=\"insight-es-master.insight-system.svc.cluster.local\" # \u65e5\u5fd7\nexport otel_col_host=\"insight-opentelemetry-collector.insight-system.svc.cluster.local\" # \u94fe\u8def\n
                                                              "},{"location":"end-user/insight/quickstart/install/helm-installagent.html#insight-agent_2","title":"\u5728\u5de5\u4f5c\u96c6\u7fa4\u5b89\u88c5 Insight Agent","text":"\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4f7f\u7528\u9ed8\u8ba4\u7684 LoadBalancer\u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\u64cd\u4f5c\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4f7f\u7528 Nodeport

                                                              \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4f7f\u7528\u9ed8\u8ba4\u7684 LoadBalancer \u65b9\u5f0f\u66b4\u9732\u670d\u52a1\u65f6\uff0c\u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                              export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam'\n

                                                              \u5c06\u83b7\u5f97\u5982\u4e0b\u7684\u8fd4\u56de\u503c\uff1a

                                                              {\"global\":{\"exporters\":{\"logging\":{\"output\":\"elasticsearch\",\"elasticsearch\":{\"host\":\"10.6.182.32\"},\"kafka\":{},\"host\":\"10.6.182.32\"},\"metric\":{\"host\":\"10.6.182.32\"},\"auditLog\":    {\"host\":\"10.6.182.32\"}}},\"opentelemetry-operator\":{\"enabled\":true},\"opentelemetry-collector\":{\"enabled\":true}}\n

                                                              \u5176\u4e2d\uff1a

                                                              • global.exporters.logging.elasticsearch.host \u662f\u65e5\u5fd7\u670d\u52a1\u5730\u5740\u3010\u4e0d\u9700\u8981\u518d\u8bbe\u7f6e\u5bf9\u5e94\u670d\u52a1\u7684\u7aef\u53e3\uff0c\u90fd\u4f1a\u4f7f\u7528\u76f8\u5e94\u9ed8\u8ba4\u503c\u3011\uff1b
                                                              • global.exporters.metric.host \u662f\u6307\u6807\u670d\u52a1\u5730\u5740\uff1b
                                                              • global.exporters.trace.host \u662f\u94fe\u8def\u670d\u52a1\u5730\u5740\uff1b
                                                              • global.exporters.auditLog.host \u662f\u5ba1\u8ba1\u65e5\u5fd7\u670d\u52a1\u5730\u5740 (\u548c\u94fe\u8def\u4f7f\u7528\u7684\u540c\u4e00\u4e2a\u670d\u52a1\u4e0d\u540c\u7aef\u53e3)\uff1b

                                                              \u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                              kubectl get service -n insight-system | grep lb\nkubectl get service -n mcamel-system | grep es\n

                                                              \u5176\u4e2d\uff1a

                                                              • lb-vminsert-insight-victoria-metrics-k8s-stack \u662f\u6307\u6807\u670d\u52a1\u7684\u5730\u5740\uff1b
                                                              • lb-insight-opentelemetry-collector \u662f\u94fe\u8def\u670d\u52a1\u7684\u5730\u5740;
                                                              • mcamel-common-es-cluster-masters-es-http \u662f\u65e5\u5fd7\u670d\u52a1\u7684\u5730\u5740;

                                                              \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4f7f\u7528 Nodeport \u65b9\u5f0f\u66b4\u9732\u670d\u52a1\u65f6\uff0c\u767b\u5f55\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                              kubectl get service -n insight-system\nkubectl get service -n mcamel-system\n

                                                              \u5176\u4e2d\uff1a

                                                              • vminsert-insight-victoria-metrics-k8s-stack \u662f\u6307\u6807\u670d\u52a1\u7684\u5730\u5740\uff1b
                                                              • insight-opentelemetry-collector \u662f\u94fe\u8def\u670d\u52a1\u7684\u5730\u5740;
                                                              • mcamel-common-es-cluster-masters-es-http \u662f\u65e5\u5fd7\u670d\u52a1\u7684\u5730\u5740;
                                                              "},{"location":"end-user/insight/quickstart/install/helm-installagent.html#insight-agent_3","title":"\u5347\u7ea7 Insight Agent","text":"
                                                              1. \u767b\u5f55\u76ee\u6807\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u5907\u4efd --set \u53c2\u6570\u3002

                                                                helm get values insight-agent -n insight-system -o yaml > insight-agent.yaml\n
                                                              2. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u66f4\u65b0\u4ed3\u5e93\u3002

                                                                helm repo upgrade\n
                                                              3. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u8fdb\u884c\u5347\u7ea7\u3002

                                                                helm upgrade insight-agent insight/insight-agent \\\n-n insight-system \\\n-f ./insight-agent.yaml \\\n--version ${version}   # \u6307\u5b9a\u5347\u7ea7\u7248\u672c\n
                                                              4. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u786e\u8ba4\u5b89\u88c5\u72b6\u6001\uff1a

                                                                kubectl get pods -n insight-system\n
                                                              "},{"location":"end-user/insight/quickstart/install/helm-installagent.html#insight-agent_4","title":"\u5378\u8f7d Insight Agent","text":"
                                                              helm uninstall insight-agent -n insight-system --timeout 10m\n
                                                              "},{"location":"end-user/insight/quickstart/install/install-agent.html","title":"\u5728\u7ebf\u5b89\u88c5 insight-agent","text":"

                                                              insight-agent \u662f\u96c6\u7fa4\u89c2\u6d4b\u6570\u636e\u91c7\u96c6\u7684\u63d2\u4ef6\uff0c\u652f\u6301\u5bf9\u6307\u6807\u3001\u94fe\u8def\u3001\u65e5\u5fd7\u6570\u636e\u7684\u7edf\u4e00\u89c2\u6d4b\u3002\u672c\u6587\u63cf\u8ff0\u4e86\u5982\u4f55\u5728\u5728\u7ebf\u73af\u5883\u4e2d\u4e3a\u63a5\u5165\u96c6\u7fa4\u5b89\u88c5 insight-agent\u3002

                                                              "},{"location":"end-user/insight/quickstart/install/install-agent.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u96c6\u7fa4\u5df2\u6210\u529f\u63a5\u5165 \u5bb9\u5668\u7ba1\u7406 \u6a21\u5757\u3002\u5982\u4f55\u63a5\u5165\u96c6\u7fa4\uff0c\u8bf7\u53c2\u8003\uff1a\u63a5\u5165\u96c6\u7fa4
                                                              "},{"location":"end-user/insight/quickstart/install/install-agent.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \u6a21\u5757\uff0c\u5728 \u96c6\u7fa4\u5217\u8868 \u4e2d\u627e\u5230\u8981\u5b89\u88c5 insight-agent \u7684\u96c6\u7fa4\u540d\u79f0\u3002

                                                              2. \u9009\u62e9 \u7acb\u5373\u5b89\u88c5 \u8df3\u8f6c\uff0c\u6216\u70b9\u51fb\u96c6\u7fa4\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u5185\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u641c\u7d22\u6846\u67e5\u8be2 insight-agent \uff0c\u70b9\u51fb\u8be5\u5361\u7247\u8fdb\u5165\u8be6\u60c5\u3002

                                                              3. \u67e5\u770b insight-agent \u7684\u5b89\u88c5\u9875\u9762\uff0c\u70b9\u51fb \u5b89\u88c5 \u8fdb\u5165\u4e0b\u4e00\u6b65\u3002

                                                              4. \u9009\u62e9\u5b89\u88c5\u7684\u7248\u672c\u5e76\u5728\u4e0b\u65b9\u8868\u5355\u5206\u522b\u586b\u5199\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u5bf9\u5e94\u6570\u636e\u5b58\u50a8\u7ec4\u4ef6\u7684\u5730\u5740\uff0c\u786e\u8ba4\u586b\u5199\u7684\u4fe1\u606f\u65e0\u8bef\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                                • insight-agent \u9ed8\u8ba4\u90e8\u7f72\u5728\u96c6\u7fa4\u7684 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\u3002
                                                                • \u5efa\u8bae\u5b89\u88c5\u6700\u65b0\u7248\u672c\u7684 insight-agent\u3002
                                                                • \u7cfb\u7edf\u9ed8\u8ba4\u5df2\u586b\u5199\u6570\u636e\u4e0a\u62a5\u7684\u7ec4\u4ef6\u7684\u5730\u5740\uff0c\u4ecd\u8bf7\u60a8\u68c0\u67e5\u65e0\u8bef\u540e\u518d\u70b9\u51fb \u786e\u5b9a \u8fdb\u884c\u5b89\u88c5\u3002 \u5982\u9700\u4fee\u6539\u6570\u636e\u4e0a\u62a5\u5730\u5740\uff0c\u8bf7\u53c2\u8003\uff1a\u83b7\u53d6\u6570\u636e\u4e0a\u62a5\u5730\u5740\u3002

                                                              5. \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de\u00a0 Helm \u5e94\u7528 \u5217\u8868\uff0c\u5f53\u5e94\u7528 insight-agent \u7684\u72b6\u6001\u4ece\u00a0 \u672a\u5c31\u7eea \u53d8\u4e3a \u5df2\u90e8\u7f72 \uff0c\u4e14\u6240\u6709\u7684\u7ec4\u4ef6\u72b6\u6001\u4e3a \u8fd0\u884c\u4e2d \u65f6\uff0c\u5219\u5b89\u88c5\u6210\u529f\u3002\u7b49\u5f85\u4e00\u6bb5\u65f6\u95f4\u540e\uff0c\u53ef\u5728 \u53ef\u89c2\u6d4b\u6027 \u6a21\u5757\u67e5\u770b\u8be5\u96c6\u7fa4\u7684\u6570\u636e\u3002

                                                              Note

                                                              • \u70b9\u51fb\u6700\u53f3\u4fa7\u7684 \u2507 \uff0c\u60a8\u53ef\u4ee5\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u6267\u884c\u66f4\u591a\u64cd\u4f5c\uff0c\u5982 \u66f4\u65b0 \u3001 \u67e5\u770b YAML \u548c \u5220\u9664 \u3002
                                                              "},{"location":"end-user/insight/quickstart/install/knownissues.html","title":"\u5df2\u77e5\u95ee\u9898","text":"

                                                              \u672c\u9875\u5217\u51fa\u4e00\u4e9b Insight Agent \u5b89\u88c5\u548c\u5378\u8f7d\u6709\u5173\u7684\u95ee\u9898\u53ca\u5176\u89e3\u51b3\u529e\u6cd5\u3002

                                                              "},{"location":"end-user/insight/quickstart/install/knownissues.html#v0230","title":"v0.23.0","text":""},{"location":"end-user/insight/quickstart/install/knownissues.html#insight-agent","title":"Insight Agent","text":""},{"location":"end-user/insight/quickstart/install/knownissues.html#insight-agent_1","title":"Insight Agent \u5378\u8f7d\u5931\u8d25","text":"

                                                              \u5f53\u4f60\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u5378\u8f7d Insight Agent \u65f6\u3002

                                                              helm uninstall insight-agent -n insight-system\n

                                                              otel-oprator \u6240\u4f7f\u7528\u7684 tls secret \u672a\u88ab\u5378\u8f7d\u6389\u3002

                                                              otel-operator \u5b9a\u4e49\u7684\u201c\u91cd\u590d\u5229\u7528 tls secret\u201d\u7684\u903b\u8f91\u4e2d\uff0c\u4f1a\u53bb\u5224\u65ad otel-oprator \u7684 MutationConfiguration \u662f\u5426\u5b58\u5728\u5e76\u91cd\u590d\u5229\u7528 MutationConfiguration \u4e2d\u7ed1\u5b9a\u7684 CA cert\u3002\u4f46\u662f\u7531\u4e8e helm uninstall \u5df2\u5378\u8f7d MutationConfiguration\uff0c\u5bfc\u81f4\u51fa\u73b0\u7a7a\u503c\u3002

                                                              \u7efc\u4e0a\u8bf7\u624b\u52a8\u5220\u9664\u5bf9\u5e94\u7684 secret\uff0c\u4ee5\u4e0b\u4e24\u79cd\u65b9\u5f0f\u4efb\u9009\u4e00\u79cd\u5373\u53ef\uff1a

                                                              • \u901a\u8fc7\u547d\u4ee4\u884c\u5220\u9664\uff1a\u767b\u5f55\u76ee\u6807\u96c6\u7fa4\u7684\u63a7\u5236\u53f0\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                                kubectl -n insight-system delete secret insight-agent-opentelemetry-operator-controller-manager-service-cert\n
                                                              • \u901a\u8fc7 UI \u5220\u9664\uff1a\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5bb9\u5668\u7ba1\u7406\uff0c\u9009\u62e9\u76ee\u6807\u96c6\u7fa4\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u8fdb\u5165\u5bc6\u94a5\uff0c\u8f93\u5165 insight-agent-opentelemetry-operator-controller-manager-service-cert\uff0c\u9009\u62e9\u5220\u9664\u3002

                                                              "},{"location":"end-user/insight/quickstart/install/knownissues.html#v0220","title":"v0.22.0","text":""},{"location":"end-user/insight/quickstart/install/knownissues.html#insight-agent_2","title":"Insight Agent","text":""},{"location":"end-user/insight/quickstart/install/knownissues.html#insight-agent_3","title":"\u5347\u7ea7 Insight Agent \u65f6\u66f4\u65b0\u65e5\u5fd7\u6536\u96c6\u7aef\uff0c\u672a\u751f\u6548","text":"

                                                              \u66f4\u65b0 insight-agent \u65e5\u5fd7\u914d\u7f6e\u4ece elasticsearch \u6539\u4e3a kafka \u6216\u8005\u4ece kafka \u6539\u4e3a elasticsearch\uff0c\u5b9e\u9645\u4e0a\u90fd\u672a\u751f\u6548\uff0c\u8fd8\u662f\u4f7f\u7528\u66f4\u65b0\u524d\u914d\u7f6e\u3002

                                                              \u89e3\u51b3\u65b9\u6848 \uff1a

                                                              \u624b\u52a8\u91cd\u542f\u96c6\u7fa4\u4e2d\u7684 fluentbit\u3002

                                                              "},{"location":"end-user/insight/quickstart/install/knownissues.html#v0210","title":"v0.21.0","text":""},{"location":"end-user/insight/quickstart/install/knownissues.html#insight-agent_4","title":"Insight Agent","text":""},{"location":"end-user/insight/quickstart/install/knownissues.html#podmonitor-jvm","title":"PodMonitor \u91c7\u96c6\u591a\u4efd JVM \u6307\u6807\u6570\u636e","text":"
                                                              1. \u8fd9\u4e2a\u7248\u672c\u7684 PodMonitor/insight-kubernetes-pod \u5b58\u5728\u7f3a\u9677\uff1a\u4f1a\u9519\u8bef\u5730\u521b\u5efa Job \u53bb\u91c7\u96c6\u6807\u8bb0\u4e86 insight.opentelemetry.io/metric-scrape=true \u7684 Pod \u7684\u6240\u6709 container\uff1b\u800c\u5b9e\u9645\u4e0a\u53ea\u9700\u91c7\u96c6 insight.opentelemetry.io/metric-port \u6240\u5bf9\u5e94 container \u7684\u7aef\u53e3\u3002

                                                              2. \u56e0\u4e3a PodMonitor \u58f0\u660e\u4e4b\u540e\uff0cPromethuesOperator \u4f1a\u9884\u8bbe\u7f6e\u4e00\u4e9b\u670d\u52a1\u53d1\u73b0\u914d\u7f6e\u3002 \u518d\u8003\u8651\u5230 CRD \u7684\u517c\u5bb9\u6027\u7684\u95ee\u9898\u3002\u56e0\u6b64\uff0c\u653e\u5f03\u901a\u8fc7 PodMonitor \u6765\u914d\u7f6e\u901a\u8fc7 annotation \u521b\u5efa\u91c7\u96c6\u4efb\u52a1\u7684\u673a\u5236\u3002

                                                              3. \u901a\u8fc7 Prometheus \u81ea\u5e26\u7684 additional scrape config \u673a\u5236\uff0c\u5c06\u670d\u52a1\u53d1\u73b0\u89c4\u5219\u914d\u7f6e\u5728 secret \u4e2d\uff0c\u5728\u5f15\u5165 Prometheus \u91cc\u3002

                                                              \u7efc\u4e0a\uff1a

                                                              1. \u5220\u9664\u8fd9\u4e2a PodMonitor \u7684\u5f53\u524d insight-kubernetes-pod
                                                              2. \u4f7f\u7528\u65b0\u7684\u89c4\u5219

                                                              \u65b0\u7684\u89c4\u5219\u91cc\u901a\u8fc7 action: keepequal \u6765\u6bd4\u8f83 source_labels \u548c target_label \u7684\u4e00\u81f4\u6027\uff0c \u6765\u5224\u65ad\u662f\u5426\u8981\u7ed9\u67d0\u4e2a container \u7684 port \u521b\u5efa\u91c7\u96c6\u4efb\u52a1\u3002\u9700\u8981\u6ce8\u610f\uff0c\u8fd9\u4e2a\u662f Prometheus 2.41.0\uff082022-12-20\uff09\u548c\u66f4\u9ad8\u7248\u672c\u624d\u5177\u5907\u7684\u529f\u80fd\u3002

                                                              +    - source_labels: [__meta_kubernetes_pod_annotation_insight_opentelemetry_io_metric_port]\n+      separator: ;\n+      target_label: __meta_kubernetes_pod_container_port_number\n+      action: keepequal\n
                                                              "},{"location":"end-user/insight/quickstart/install/upgrade-note.html","title":"\u5347\u7ea7\u6ce8\u610f\u4e8b\u9879","text":"

                                                              \u672c\u9875\u4ecb\u7ecd\u4e00\u4e9b\u5347\u7ea7 insight-server \u548c insight-agent \u7684\u6ce8\u610f\u4e8b\u9879\u3002

                                                              "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#insight-agent","title":"insight-agent","text":""},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v028x-v029x","title":"\u4ece v0.28.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.29.x","text":"

                                                              \u7531\u4e8e v0.29.0 \u5347\u7ea7\u4e86 Opentelemetry \u793e\u533a\u7684 operator chart \u7248\u672c\uff0cvalues \u4e2d\u7684 featureGates \u7684\u652f\u6301\u7684\u503c\u6709\u6240\u53d8\u5316\uff0c\u56e0\u6b64\uff0c\u5728 upgrade \u4e4b\u524d\uff0c\u9700\u8981\u5c06 featureGates \u7684\u503c\u8bbe\u7f6e\u4e3a\u7a7a, \u5373\uff1a

                                                              -  --set opentelemetry-operator.manager.featureGates=\"+operator.autoinstrumentation.go,+operator.autoinstrumentation.multi-instrumentation,+operator.autoinstrumentation.nginx\" \\\n+  --set opentelemetry-operator.manager.featureGates=\"\"\n
                                                              "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#insight-server","title":"insight-server","text":""},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v026x-v027x","title":"\u4ece v0.26.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.27.x \u6216\u66f4\u9ad8\u7248\u672c","text":"

                                                              \u5728 v0.27.x \u7248\u672c\u4e2d\u5c06 vector \u7ec4\u4ef6\u7684\u5f00\u5173\u5355\u72ec\u62bd\u51fa\u3002\u6545\u539f\u6709\u73af\u5883\u5f00\u542f\u4e86 vector\uff0c\u90a3\u5728\u5347\u7ea7 insight-server \u65f6\uff0c\u9700\u8981\u6307\u5b9a --set vector.enabled=true \u3002

                                                              "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v019x-020x","title":"\u4ece v0.19.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 0.20.x","text":"

                                                              \u5728\u5347\u7ea7 Insight \u4e4b\u524d\uff0c\u60a8\u9700\u8981\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u624b\u52a8\u5220\u9664 jaeger-collector \u548c jaeger-query \u90e8\u7f72\uff1a

                                                              kubectl -n insight-system delete deployment insight-jaeger-collector\nkubectl -n insight-system delete deployment insight-jaeger-query\n
                                                              "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v017x-v018x","title":"\u4ece v0.17.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.18.x","text":"

                                                              \u7531\u4e8e 0.18.x \u4e2d\u66f4\u65b0\u4e86 Jaeger \u76f8\u5173\u90e8\u7f72\u6587\u4ef6\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-server \u524d\u624b\u52a8\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

                                                              kubectl -n insight-system delete deployment insight-jaeger-collector\nkubectl -n insight-system delete deployment insight-jaeger-query\n

                                                              \u7531\u4e8e 0.18.x \u4e2d\u6307\u6807\u540d\u4ea7\u751f\u4e86\u53d8\u52a8\uff0c\u56e0\u6b64\uff0c\u9700\u8981\u5728\u5347\u7ea7 insight-server \u4e4b\u540e\uff0cinsight-agent \u4e5f\u5e94\u8be5\u505a\u5347\u7ea7\u3002

                                                              \u6b64\u5916\uff0c\u8c03\u6574\u4e86\u5f00\u542f\u94fe\u8def\u6a21\u5757\u7684\u53c2\u6570\uff0c\u4ee5\u53ca ElasticSearch \u8fde\u63a5\u8c03\u6574\u3002\u5177\u4f53\u53c2\u8003\u4ee5\u4e0b\u53c2\u6570\uff1a

                                                              +  --set global.tracing.enable=true \\\n-  --set jaeger.collector.enabled=true \\\n-  --set jaeger.query.enabled=true \\\n+  --set global.elasticsearch.scheme=${your-external-elasticsearch-scheme} \\\n+  --set global.elasticsearch.host=${your-external-elasticsearch-host} \\\n+  --set global.elasticsearch.port=${your-external-elasticsearch-port} \\\n+  --set global.elasticsearch.user=${your-external-elasticsearch-username} \\\n+  --set global.elasticsearch.password=${your-external-elasticsearch-password} \\\n-  --set jaeger.storage.elasticsearch.scheme=${your-external-elasticsearch-scheme} \\\n-  --set jaeger.storage.elasticsearch.host=${your-external-elasticsearch-host} \\\n-  --set jaeger.storage.elasticsearch.port=${your-external-elasticsearch-port} \\\n-  --set jaeger.storage.elasticsearch.user=${your-external-elasticsearch-username} \\\n-  --set jaeger.storage.elasticsearch.password=${your-external-elasticsearch-password} \\\n
                                                              "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v015x-v016x","title":"\u4ece v0.15.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.16.x","text":"

                                                              \u7531\u4e8e 0.16.x \u4e2d\u4f7f\u7528\u4e86 vmalertmanagers CRD \u7684\u65b0\u7279\u6027\u53c2\u6570 disableRouteContinueEnforce\uff0c \u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-server \u524d\u624b\u52a8\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u3002

                                                              kubectl apply --server-side -f https://raw.githubusercontent.com/VictoriaMetrics/operator/v0.33.0/config/crd/bases/operator.victoriametrics.com_vmalertmanagers.yaml --force-conflicts\n

                                                              Note

                                                              \u5982\u60a8\u662f\u79bb\u7ebf\u5b89\u88c5\uff0c\u53ef\u4ee5\u5728\u89e3\u538b Insight \u79bb\u7ebf\u5305\u540e\uff0c\u8bf7\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u66f4\u65b0 CRD\u3002

                                                              kubectl apply --server-side -f insight/dependency-crds --force-conflicts \n
                                                              "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#insight-agent_1","title":"insight-agent","text":""},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v023x-v024x","title":"\u4ece v0.23.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.24.x","text":"

                                                              \u7531\u4e8e 0.24.x \u7248\u672c\u4e2d OTEL operator chart \u4e2d\u65b0\u589e\u4e86 CRD\uff0c\u4f46\u7531\u4e8e Helm Upgrade \u65f6\u5e76\u4e0d\u4f1a\u66f4\u65b0 CRD\uff0c\u56e0\u6b64\uff0c\u9700\u8981\u624b\u52a8\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                              kubectl apply -f https://raw.githubusercontent.com/open-telemetry/opentelemetry-helm-charts/main/charts/opentelemetry-operator/crds/crd-opentelemetry.io_opampbridges.yaml\n

                                                              \u5982\u60a8\u662f\u79bb\u7ebf\u5b89\u88c5\uff0c\u53ef\u4ee5\u5728\u89e3\u538b insight-agent \u79bb\u7ebf\u5305\u540e\u53ef\u627e\u5230\u4e0a\u8ff0 CRD \u7684 yaml\uff0c\u89e3\u538b Insight-Agent Chart \u4e4b\u540e\u624b\u52a8\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                              kubectl apply -f charts/agent/crds/crd-opentelemetry.io_opampbridges.yaml\n
                                                              "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v019x-v020x","title":"\u4ece v0.19.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.20.x","text":"

                                                              \u7531\u4e8e 0.20.x \u4e2d\u589e\u52a0\u4e86 Kafka \u65e5\u5fd7\u5bfc\u51fa\u914d\u7f6e\uff0c\u65e5\u5fd7\u5bfc\u51fa\u914d\u7f6e\u505a\u4e86\u4e00\u4e9b\u8c03\u6574\u3002\u5347\u7ea7 insight-agent \u4e4b\u524d\u9700\u8981\u6ce8\u610f\u53c2\u6570\u53d8\u5316\uff0c \u5373\u539f\u6765 logging \u7684\u914d\u7f6e\u5df2\u7ecf\u79fb\u5230\u4e86\u914d\u7f6e\u4e2d logging.elasticsearch\uff1a

                                                              -  --set global.exporters.logging.host \\\n-  --set global.exporters.logging.port \\\n+  --set global.exporters.logging.elasticsearch.host \\\n+  --set global.exporters.logging.elasticsearch.port \\\n
                                                              "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v017x-v018x_1","title":"\u4ece v0.17.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.18.x","text":"

                                                              \u7531\u4e8e 0.18.x \u4e2d\u66f4\u65b0\u4e86 Jaeger \u76f8\u5173\u90e8\u7f72\u6587\u4ef6\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-agent \u524d\u9700\u8981\u6ce8\u610f\u53c2\u6570\u7684\u6539\u52a8\u3002

                                                              +  --set global.exporters.trace.enable=true \\\n-  --set opentelemetry-collector.enabled=true \\\n-  --set opentelemetry-operator.enabled=true \\\n
                                                              "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v016x-v017x","title":"\u4ece v0.16.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.17.x","text":"

                                                              \u5728 v0.17.x \u7248\u672c\u4e2d\u5c06 kube-prometheus-stack chart \u7248\u672c\u4ece 41.9.1 \u5347\u7ea7\u81f3 45.28.1, \u5176\u4e2d\u4f7f\u7528\u7684 CRD \u4e5f\u5b58\u5728\u4e00\u4e9b\u5b57\u6bb5\u7684\u5347\u7ea7\uff0c\u5982 servicemonitor \u7684 attachMetadata \u5b57\u6bb5\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-agent \u524d\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

                                                              kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --force-conflicts\n

                                                              \u5982\u60a8\u662f\u79bb\u7ebf\u5b89\u88c5\uff0c\u53ef\u4ee5\u5728\u89e3\u538b insight-agent \u79bb\u7ebf\u5305\u540e\uff0c\u5728 insight-agent/dependency-crds \u4e2d\u627e\u5230\u4e0a\u8ff0 CRD \u7684 yaml\u3002

                                                              "},{"location":"end-user/insight/quickstart/install/upgrade-note.html#v011x-v012x","title":"\u4ece v0.11.x\uff08\u6216\u66f4\u4f4e\u7248\u672c\uff09\u5347\u7ea7\u5230 v0.12.x","text":"

                                                              \u5728 v0.12.x \u5c06 kube-prometheus-stack chart \u4ece 39.6.0 \u5347\u7ea7\u5230 41.9.1\uff0c\u5176\u4e2d\u5305\u62ec prometheus-operator \u5347\u7ea7\u5230 v0.60.1, prometheus-node-exporter chart \u5347\u7ea7\u5230 4.3.0 \u7b49\u3002 prometheus-node-exporter \u5347\u7ea7\u540e\u4f7f\u7528\u4e86 Kubernetes \u63a8\u8350 label\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7\u524d\u5220\u9664 node-exporter \u7684 DaemonSet\u3002 prometheus-operator \u66f4\u65b0\u4e86 CRD\uff0c\u56e0\u6b64\u9700\u8981\u5728\u5347\u7ea7 insight-agent \u524d\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

                                                              kubectl delete daemonset insight-agent-prometheus-node-exporter -n insight-system\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --force-conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml --force-conflicts\n

                                                              Note

                                                              \u5982\u60a8\u662f\u79bb\u7ebf\u5b89\u88c5\uff0c\u53ef\u4ee5\u5728\u89e3\u538b insight-agent \u79bb\u7ebf\u5305\u540e\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u66f4\u65b0 CRD\u3002

                                                              kubectl apply --server-side -f insight-agent/dependency-crds --force-conflicts\n
                                                              "},{"location":"end-user/insight/quickstart/otel/operator.html","title":"\u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a","text":"

                                                              \u76ee\u524d\u53ea\u6709 Java\u3001NodeJs\u3001Python\u3001.Net\u3001Golang \u652f\u6301 Operator \u7684\u65b9\u5f0f\u65e0\u4fb5\u5165\u63a5\u5165\u3002

                                                              "},{"location":"end-user/insight/quickstart/otel/operator.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u8bf7\u786e\u4fdd insight-agent \u5df2\u7ecf\u5c31\u7eea\u3002\u5982\u82e5\u6ca1\u6709\uff0c\u8bf7\u53c2\u8003\u5b89\u88c5 insight-agent \u91c7\u96c6\u6570\u636e\u5e76\u786e\u4fdd\u4ee5\u4e0b\u4e09\u9879\u5c31\u7eea\uff1a

                                                              • \u4e3a insight-agent \u5f00\u542f trace \u529f\u80fd
                                                              • trace \u6570\u636e\u7684\u5730\u5740\u4ee5\u53ca\u7aef\u53e3\u662f\u5426\u586b\u5199\u6b63\u786e
                                                              • deployment/insight-agent-opentelemetry-operator \u548c deployment/insight-agent-opentelemetry-collector \u5bf9\u5e94\u7684 Pod \u5df2\u7ecf\u51c6\u5907\u5c31\u7eea
                                                              "},{"location":"end-user/insight/quickstart/otel/operator.html#instrumentation-cr","title":"\u5b89\u88c5 Instrumentation CR","text":"

                                                              Tip

                                                              \u4ece Insight v0.22.0 \u5f00\u59cb\uff0c\u4e0d\u518d\u9700\u8981\u624b\u52a8\u5b89\u88c5 Instrumentation CR\u3002

                                                              \u5728 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\u5b89\u88c5\uff0c\u4e0d\u540c\u7248\u672c\u4e4b\u95f4\u6709\u4e00\u4e9b\u7ec6\u5c0f\u7684\u5dee\u522b\u3002

                                                              Insight v0.21.xInsight v0.20.xInsight v0.18.xInsight v0.17.xInsight v0.16.x
                                                              K8S_CLUSTER_UID=$(kubectl get namespace kube-system -o jsonpath='{.metadata.uid}')\nkubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/openinsight-proj/autoinstrumentation-java:1.31.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n      - name: OTEL_K8S_CLUSTER_UID\n        value: $K8S_CLUSTER_UID\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                                                              kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.29.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0-rc.2\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                                                              kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.25.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.37.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.38b0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.1-alpha\nEOF\n
                                                              kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.23.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.34.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.33b0\nEOF\n
                                                              kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.23.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.34.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.33b0\nEOF\n
                                                              "},{"location":"end-user/insight/quickstart/otel/operator.html#_2","title":"\u4e0e\u670d\u52a1\u7f51\u683c\u94fe\u8def\u4e32\u8054\u573a\u666f","text":"

                                                              \u5982\u679c\u60a8\u5f00\u542f\u4e86\u670d\u52a1\u7f51\u683c\u7684\u94fe\u8def\u8ffd\u8e2a\u80fd\u529b\uff0c\u9700\u8981\u989d\u5916\u589e\u52a0\u4e00\u4e2a\u73af\u5883\u53d8\u91cf\u6ce8\u5165\u7684\u914d\u7f6e\uff1a

                                                              "},{"location":"end-user/insight/quickstart/otel/operator.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4\u5982\u4e0b","text":"
                                                              1. \u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3.0\uff0c\u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 \u540e\u9009\u62e9\u8fdb\u5165\u76ee\u6807\u96c6\u7fa4\uff0c
                                                              2. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u9009\u62e9 \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u627e\u5230 instrumentations.opentelemetry.io \u540e\u8fdb\u5165\u8be6\u60c5\u9875\u3002
                                                              3. \u9009\u62e9 insight-system \u547d\u540d\u7a7a\u95f4\u540e\uff0c\u7f16\u8f91 insight-opentelemetry-autoinstrumentation \uff0c\u5728 spec:env: \u4e0b\u6dfb\u52a0\u4ee5\u4e0b\u5185\u5bb9\uff1a

                                                                    - name: OTEL_SERVICE_NAME\n      valueFrom:\n        fieldRef:\n          fieldPath: metadata.labels['app'] \n

                                                                \u5b8c\u6574\u7684\u547d\u4ee4\u5982\u4e0b\uff08For Insight v0.21.x\uff09\uff1a

                                                                K8S_CLUSTER_UID=$(kubectl get namespace kube-system -o jsonpath='{.metadata.uid}')\nkubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n    - name: OTEL_SERVICE_NAME\n      valueFrom:\n        fieldRef:\n          fieldPath: metadata.labels['app'] \n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/openinsight-proj/autoinstrumentation-java:1.31.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n      - name: OTEL_K8S_CLUSTER_UID\n        value: $K8S_CLUSTER_UID\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                                                              "},{"location":"end-user/insight/quickstart/otel/operator.html#_4","title":"\u6dfb\u52a0\u6ce8\u89e3\uff0c\u81ea\u52a8\u63a5\u5165\u94fe\u8def","text":"

                                                              \u4ee5\u4e0a\u5c31\u7eea\u4e4b\u540e\uff0c\u60a8\u5c31\u53ef\u4ee5\u901a\u8fc7\u6ce8\u89e3\uff08Annotation\uff09\u65b9\u5f0f\u4e3a\u5e94\u7528\u7a0b\u5e8f\u63a5\u5165\u94fe\u8def\u8ffd\u8e2a\u4e86\uff0cOTel \u76ee\u524d\u652f\u6301\u901a\u8fc7\u6ce8\u89e3\u7684\u65b9\u5f0f\u63a5\u5165\u94fe\u8def\u3002 \u6839\u636e\u670d\u52a1\u8bed\u8a00\uff0c\u9700\u8981\u6dfb\u52a0\u4e0a\u4e0d\u540c\u7684 pod annotations\u3002\u6bcf\u4e2a\u670d\u52a1\u53ef\u6dfb\u52a0\u4e24\u7c7b\u6ce8\u89e3\u4e4b\u4e00\uff1a

                                                              • \u53ea\u6ce8\u5165\u73af\u5883\u53d8\u91cf\u6ce8\u89e3

                                                                \u8fd9\u7c7b\u6ce8\u89e3\u53ea\u6709\u4e00\u4e2a\uff0c\u7528\u4e8e\u6dfb\u52a0 otel \u76f8\u5173\u7684\u73af\u5883\u53d8\u91cf\uff0c\u6bd4\u5982\u94fe\u8def\u4e0a\u62a5\u5730\u5740\u3001\u5bb9\u5668\u6240\u5728\u7684\u96c6\u7fa4 id\u3001\u547d\u540d\u7a7a\u95f4\u7b49\uff08\u8fd9\u4e2a\u6ce8\u89e3\u5728\u5e94\u7528\u4e0d\u652f\u6301\u81ea\u52a8\u63a2\u9488\u8bed\u8a00\u65f6\u5341\u5206\u6709\u7528\uff09

                                                                instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                                                \u5176\u4e2d value \u88ab / \u5206\u6210\u4e24\u90e8\u5206\uff0c\u7b2c\u4e00\u4e2a\u503c (insight-system) \u662f\u4e0a\u4e00\u6b65\u5b89\u88c5\u7684 CR \u7684\u547d\u540d\u7a7a\u95f4\uff0c \u7b2c\u4e8c\u4e2a\u503c (insight-opentelemetry-autoinstrumentation) \u662f\u8fd9\u4e2a CR \u7684\u540d\u5b57\u3002

                                                              • \u81ea\u52a8\u63a2\u9488\u6ce8\u5165\u4ee5\u53ca\u73af\u5883\u53d8\u91cf\u6ce8\u5165\u6ce8\u89e3

                                                                \u8fd9\u7c7b\u6ce8\u89e3\u76ee\u524d\u6709 4 \u4e2a\uff0c\u5206\u522b\u5bf9\u5e94 4 \u79cd\u4e0d\u540c\u7684\u7f16\u7a0b\u8bed\u8a00\uff1ajava\u3001nodejs\u3001python\u3001dotnet\uff0c \u4f7f\u7528\u5b83\u540e\u5c31\u4f1a\u5bf9 spec.pod \u4e0b\u7684\u7b2c\u4e00\u4e2a\u5bb9\u5668\u6ce8\u5165\u81ea\u52a8\u63a2\u9488\u4ee5\u53ca otel \u9ed8\u8ba4\u73af\u5883\u53d8\u91cf\uff1a

                                                                Java \u5e94\u7528NodeJs \u5e94\u7528Python \u5e94\u7528Dotnet \u5e94\u7528Golang \u5e94\u7528
                                                                instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                                                instrumentation.opentelemetry.io/inject-nodejs: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                                                instrumentation.opentelemetry.io/inject-python: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                                                instrumentation.opentelemetry.io/inject-dotnet: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                                                \u7531\u4e8e Go \u81ea\u52a8\u68c0\u6d4b\u9700\u8981\u8bbe\u7f6e OTEL_GO_AUTO_TARGET_EXE\uff0c \u56e0\u6b64\u60a8\u5fc5\u987b\u901a\u8fc7\u6ce8\u89e3\u6216 Instrumentation \u8d44\u6e90\u63d0\u4f9b\u6709\u6548\u7684\u53ef\u6267\u884c\u8def\u5f84\u3002\u672a\u8bbe\u7f6e\u6b64\u503c\u4f1a\u5bfc\u81f4 Go \u81ea\u52a8\u68c0\u6d4b\u6ce8\u5165\u4e2d\u6b62\uff0c\u4ece\u800c\u5bfc\u81f4\u63a5\u5165\u94fe\u8def\u5931\u8d25\u3002

                                                                instrumentation.opentelemetry.io/inject-go: \"insight-system/insight-opentelemetry-autoinstrumentation\"\ninstrumentation.opentelemetry.io/otel-go-auto-target-exe: \"/path/to/container/executable\"\n

                                                                Go \u81ea\u52a8\u68c0\u6d4b\u4e5f\u9700\u8981\u63d0\u5347\u6743\u9650\u3002\u4ee5\u4e0b\u6743\u9650\u662f\u81ea\u52a8\u8bbe\u7f6e\u7684\u5e76\u4e14\u662f\u5fc5\u9700\u7684\u3002

                                                                securityContext:\n  privileged: true\n  runAsUser: 0\n

                                                              Tip

                                                              OpenTelemetry Operator \u5728\u6ce8\u5165\u63a2\u9488\u65f6\u4f1a\u81ea\u52a8\u6dfb\u52a0\u4e00\u4e9b OTel \u76f8\u5173\u73af\u5883\u53d8\u91cf\uff0c\u540c\u65f6\u4e5f\u652f\u6301\u8fd9\u4e9b\u73af\u5883\u53d8\u91cf\u7684\u8986\u76d6\u3002\u8fd9\u4e9b\u73af\u5883\u53d8\u91cf\u7684\u8986\u76d6\u4f18\u5148\u7ea7\uff1a

                                                              original container env vars -> language specific env vars -> common env vars -> instrument spec configs' vars\n

                                                              \u4f46\u662f\u9700\u8981\u907f\u514d\u624b\u52a8\u8986\u76d6 OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\uff0c\u5b83\u5728 Operator \u5185\u90e8\u4f5c\u4e3a\u4e00\u4e2a Pod \u662f\u5426\u5df2\u7ecf\u6ce8\u5165\u63a2\u9488\u7684\u6807\u8bc6\uff0c\u5982\u679c\u624b\u52a8\u6dfb\u52a0\u4e86\uff0c\u63a2\u9488\u53ef\u80fd\u65e0\u6cd5\u6ce8\u5165\u3002

                                                              "},{"location":"end-user/insight/quickstart/otel/operator.html#demo","title":"\u81ea\u52a8\u6ce8\u5165\u793a\u4f8b Demo","text":"

                                                              \u6ce8\u610f\u8fd9\u4e2a annotations \u662f\u52a0\u5728 spec.annotations \u4e0b\u7684\u3002

                                                              apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-app\n  labels:\n    app: my-app\nspec:\n  selector:\n    matchLabels:\n      app: my-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-app\n      annotations:\n        instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n    spec:\n      containers:\n      - name: myapp\n        image: jaegertracing/vertx-create-span:operator-e2e-tests\n        ports:\n          - containerPort: 8080\n            protocol: TCP\n

                                                              \u6700\u7ec8\u751f\u6210\u7684 YAML \u5185\u5bb9\u5982\u4e0b\uff1a

                                                              apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-deployment-with-sidecar-565bd877dd-nqkk6\n  generateName: my-deployment-with-sidecar-565bd877dd-\n  namespace: default\n  uid: aa89ca0d-620c-4d20-8bc1-37d67bad4ea4\n  resourceVersion: '2668986'\n  creationTimestamp: '2022-04-08T05:58:48Z'\n  labels:\n    app: my-pod-with-sidecar\n    pod-template-hash: 565bd877dd\n  annotations:\n    cni.projectcalico.org/containerID: 234eae5e55ea53db2a4bc2c0384b9a1021ed3908f82a675e4a92a49a7e80dd61\n    cni.projectcalico.org/podIP: 192.168.134.133/32\n    cni.projectcalico.org/podIPs: 192.168.134.133/32\n    instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\nspec:\n  volumes:\n    - name: kube-api-access-sp2mz\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n    - name: opentelemetry-auto-instrumentation\n      emptyDir: {}\n  initContainers:\n    - name: opentelemetry-auto-instrumentation\n      image: >-\n        ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java\n      command:\n        - cp\n        - /javaagent.jar\n        - /otel-auto-instrumentation/javaagent.jar\n      resources: {}\n      volumeMounts:\n        - name: opentelemetry-auto-instrumentation\n          mountPath: /otel-auto-instrumentation\n        - name: kube-api-access-sp2mz\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  containers:\n    - name: myapp\n      image: ghcr.io/pavolloffay/spring-petclinic:latest\n      env:\n        - name: OTEL_JAVAAGENT_DEBUG\n          value: 'true'\n        - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n          value: 'true'\n        - name: SPLUNK_PROFILER_ENABLED\n          value: 'false'\n        - name: JAVA_TOOL_OPTIONS\n          value: ' -javaagent:/otel-auto-instrumentation/javaagent.jar'\n        - name: OTEL_TRACES_EXPORTER\n          value: otlp\n        - name: OTEL_EXPORTER_OTLP_ENDPOINT\n          value: http://insight-agent-opentelemetry-collector.svc.cluster.local:4317\n        - name: OTEL_EXPORTER_OTLP_TIMEOUT\n          value: '20'\n        - name: OTEL_TRACES_SAMPLER\n          value: parentbased_traceidratio\n        - name: OTEL_TRACES_SAMPLER_ARG\n          value: '0.85'\n        - name: SPLUNK_TRACE_RESPONSE_HEADER_ENABLED\n          value: 'true'\n        - name: OTEL_SERVICE_NAME\n          value: my-deployment-with-sidecar\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.name\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_UID\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.uid\n        - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: spec.nodeName\n        - name: OTEL_RESOURCE_ATTRIBUTES\n          value: >-\n            k8s.container.name=myapp,k8s.deployment.name=my-deployment-with-sidecar,k8s.deployment.uid=8de6929d-dda0-436c-bca1-604e9ca7ea4e,k8s.namespace.name=default,k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME),k8s.pod.uid=$(OTEL_RESOURCE_ATTRIBUTES_POD_UID),k8s.replicaset.name=my-deployment-with-sidecar-565bd877dd,k8s.replicaset.uid=190d5f6e-ba7f-4794-b2e6-390b5879a6c4\n        - name: OTEL_PROPAGATORS\n          value: jaeger,b3\n      resources: {}\n      volumeMounts:\n        - name: kube-api-access-sp2mz\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n        - name: opentelemetry-auto-instrumentation\n          mountPath: /otel-auto-instrumentation\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  restartPolicy: Always\n  terminationGracePeriodSeconds: 30\n  dnsPolicy: ClusterFirst\n  serviceAccountName: default\n  serviceAccount: default\n  nodeName: k8s-master3\n  securityContext:\n    runAsUser: 1000\n    runAsGroup: 3000\n    fsGroup: 2000\n  schedulerName: default-scheduler\n  tolerations:\n    - key: node.kubernetes.io/not-ready\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n    - key: node.kubernetes.io/unreachable\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n  priority: 0\n  enableServiceLinks: true\n  preemptionPolicy: PreemptLowerPriority\n
                                                              "},{"location":"end-user/insight/quickstart/otel/operator.html#_5","title":"\u94fe\u8def\u67e5\u8be2","text":"

                                                              \u5982\u4f55\u67e5\u8be2\u5df2\u7ecf\u63a5\u5165\u7684\u670d\u52a1\uff0c\u53c2\u8003\u94fe\u8def\u67e5\u8be2\u3002

                                                              "},{"location":"end-user/insight/quickstart/otel/otel.html","title":"\u4f7f\u7528 OTel \u8d4b\u4e88\u5e94\u7528\u53ef\u89c2\u6d4b\u6027","text":"

                                                              \u589e\u5f3a\u662f\u4f7f\u5e94\u7528\u7a0b\u5e8f\u4ee3\u7801\u80fd\u591f\u751f\u6210\u9065\u6d4b\u6570\u636e\u7684\u8fc7\u7a0b\u3002\u5373\u4e00\u4e9b\u53ef\u4ee5\u5e2e\u52a9\u60a8\u76d1\u89c6\u6216\u6d4b\u91cf\u5e94\u7528\u7a0b\u5e8f\u7684\u6027\u80fd\u548c\u72b6\u6001\u7684\u4e1c\u897f\u3002

                                                              OpenTelemetry \u662f\u9886\u5148\u7684\u5f00\u6e90\u9879\u76ee\uff0c\u4e3a\u4e3b\u8981\u7f16\u7a0b\u8bed\u8a00\u548c\u6d41\u884c\u6846\u67b6\u63d0\u4f9b\u68c0\u6d4b\u5e93\u3002\u5b83\u662f\u4e91\u539f\u751f\u8ba1\u7b97\u57fa\u91d1\u4f1a\u4e0b\u7684\u4e00\u4e2a\u9879\u76ee\uff0c\u5f97\u5230\u4e86\u793e\u533a\u5e9e\u5927\u8d44\u6e90\u7684\u652f\u6301\u3002 \u5b83\u4e3a\u91c7\u96c6\u7684\u6570\u636e\u63d0\u4f9b\u6807\u51c6\u5316\u7684\u6570\u636e\u683c\u5f0f\uff0c\u65e0\u9700\u96c6\u6210\u7279\u5b9a\u7684\u4f9b\u5e94\u5546\u3002

                                                              Insight \u652f\u6301\u7528\u4e8e\u68c0\u6d4b\u5e94\u7528\u7a0b\u5e8f\u7684 OpenTelemetry \u6765\u589e\u5f3a\u60a8\u7684\u5e94\u7528\u7a0b\u5e8f\u3002

                                                              \u672c\u6307\u5357\u4ecb\u7ecd\u4e86\u4f7f\u7528 OpenTelemetry \u8fdb\u884c\u9065\u6d4b\u589e\u5f3a\u7684\u57fa\u672c\u6982\u5ff5\u3002 OpenTelemetry \u8fd8\u6709\u4e00\u4e2a\u7531\u5e93\u3001\u63d2\u4ef6\u3001\u96c6\u6210\u548c\u5176\u4ed6\u6709\u7528\u5de5\u5177\u7ec4\u6210\u7684\u751f\u6001\u7cfb\u7edf\u6765\u6269\u5c55\u5b83\u3002 \u60a8\u53ef\u4ee5\u5728 Otel Registry \u4e2d\u627e\u5230\u8fd9\u4e9b\u8d44\u6e90\u3002

                                                              \u60a8\u53ef\u4ee5\u4f7f\u7528\u4efb\u4f55\u5f00\u653e\u6807\u51c6\u5e93\u8fdb\u884c\u9065\u6d4b\u589e\u5f3a\uff0c\u5e76\u4f7f\u7528 Insight \u4f5c\u4e3a\u53ef\u89c2\u5bdf\u6027\u540e\u7aef\u6765\u6444\u53d6\u3001\u5206\u6790\u548c\u53ef\u89c6\u5316\u6570\u636e\u3002

                                                              \u4e3a\u4e86\u589e\u5f3a\u60a8\u7684\u4ee3\u7801\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528 OpenTelemetry \u4e3a\u7279\u5b9a\u8bed\u8a00\u63d0\u4f9b\u7684\u589e\u5f3a\u64cd\u4f5c\uff1a

                                                              Insight \u76ee\u524d\u63d0\u4f9b\u4e86\u4f7f\u7528 OpenTelemetry \u589e\u5f3a .Net NodeJS\u3001Java\u3001Python \u548c Golang \u5e94\u7528\u7a0b\u5e8f\u7684\u7b80\u5355\u65b9\u6cd5\u3002\u8bf7\u9075\u5faa\u4ee5\u4e0b\u6307\u5357\u3002

                                                              "},{"location":"end-user/insight/quickstart/otel/otel.html#_1","title":"\u94fe\u8def\u589e\u5f3a","text":"
                                                              • \u94fe\u8def\u63a5\u5165\u7684\u6700\u4f73\u5b9e\u8df5\uff1a\u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a
                                                              • \u4ee5 Go \u8bed\u8a00\u4e3a\u4f8b\u7684\u624b\u52a8\u57cb\u70b9\u63a5\u5165\uff1a\u4f7f\u7528 OpenTelemetry SDK \u589e\u5f3a Go \u5e94\u7528\u7a0b\u5e8f
                                                              • \u5229\u7528 ebpf \u5b9e\u73b0 Go \u8bed\u8a00\u65e0\u4fb5\u5165\u63a2\u9488\uff08\u5b9e\u9a8c\u6027\u529f\u80fd\uff09
                                                              "},{"location":"end-user/insight/quickstart/otel/send_tracing_to_insight.html","title":"\u5411 Insight \u53d1\u9001\u94fe\u8def\u6570\u636e","text":"

                                                              \u6b64\u6587\u6863\u4e3b\u8981\u63cf\u8ff0\u5ba2\u6237\u5e94\u7528\u5982\u4f55\u81ea\u884c\u5c06\u94fe\u8def\u6570\u636e\u4e0a\u62a5\u7ed9 Insight\u3002\u4e3b\u8981\u5305\u542b\u5982\u4e0b\u4e24\u79cd\u573a\u666f\uff1a

                                                              1. \u5ba2\u6237\u5e94\u7528\u901a\u8fc7 OTEL Agent/SDK \u4e0a\u62a5\u94fe\u8def\u7ed9 Insight
                                                              2. \u901a\u8fc7 Opentelemtry Collector(\u7b80\u79f0 OTEL COL) \u5c06\u94fe\u8def\u8f6c\u53d1\u7ed9 Insight

                                                              \u5728\u6bcf\u4e2a\u5df2\u5b89\u88c5 Insight Agent \u7684\u96c6\u7fa4\u4e2d\u90fd\u6709 insight-agent-otel-col \u7ec4\u4ef6\u7528\u4e8e\u7edf\u4e00\u63a5\u6536\u8be5\u96c6\u7fa4\u7684\u94fe\u8def\u6570\u636e\u3002 \u56e0\u6b64\uff0c\u8be5\u7ec4\u4ef6\u4f5c\u4e3a\u7528\u6237\u63a5\u5165\u4fa7\u7684\u5165\u53e3\uff0c\u9700\u8981\u5148\u83b7\u53d6\u8be5\u5730\u5740\u3002\u53ef\u4ee5\u901a\u8fc7 AI \u7b97\u529b\u4e2d\u5fc3 \u754c\u9762\u83b7\u53d6\u8be5\u96c6\u7fa4 Opentelemtry Collector \u7684\u5730\u5740\uff0c \u6bd4\u5982 insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 \uff1a

                                                              \u9664\u6b64\u4e4b\u5916\uff0c\u9488\u5bf9\u4e0d\u540c\u4e0a\u62a5\u65b9\u5f0f\uff0c\u6709\u4e00\u4e9b\u7ec6\u5fae\u5dee\u522b\uff1a

                                                              "},{"location":"end-user/insight/quickstart/otel/send_tracing_to_insight.html#otel-agentsdk-insight-agent-opentelemtry-collector","title":"\u5ba2\u6237\u5e94\u7528\u901a\u8fc7 OTel Agent/SDK \u4e0a\u62a5\u94fe\u8def\u7ed9 Insight Agent Opentelemtry Collector","text":"

                                                              \u4e3a\u4e86\u80fd\u591f\u5c06\u94fe\u8def\u6570\u636e\u6b63\u5e38\u4e0a\u62a5\u81f3 Insight \u5e76\u80fd\u591f\u5728 Insight \u6b63\u5e38\u5c55\u793a\uff0c\u9700\u8981\u5e76\u5efa\u8bae\u901a\u8fc7\u5982\u4e0b\u73af\u5883\u53d8\u91cf\u63d0\u4f9b OTLP \u6240\u9700\u7684\u5143\u6570\u636e (Resource Attribute)\uff0c\u6709\u4e24\u79cd\u65b9\u5f0f\u53ef\u5b9e\u73b0\uff1a

                                                              • \u5728\u90e8\u7f72\u6587\u4ef6 YAML \u4e2d\u624b\u52a8\u6dfb\u52a0\uff0c\u4f8b\u5982\uff1a

                                                                ...\n- name: OTEL_EXPORTER_OTLP_ENDPOINT\n  value: \"http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\"\n- name: \"OTEL_SERVICE_NAME\"\n  value: my-java-app-name\n- name: \"OTEL_K8S_NAMESPACE\"\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: metadata.namespace\n- name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: spec.nodeName\n- name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: metadata.name\n- name: OTEL_RESOURCE_ATTRIBUTES\n  value: \"k8s.namespace.name=$(OTEL_K8S_NAMESPACE),k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME)\"\n
                                                              • \u5229\u7528 Insight Agent \u81ea\u52a8\u6ce8\u5165\u5982\u4e0a\u5143\u6570\u636e (Resource Attribute) \u80fd\u529b

                                                                \u786e\u4fdd Insight Agent \u6b63\u5e38\u5de5\u4f5c\u5e76 \u5b89\u88c5 Instrumentation CR \u4e4b\u540e\uff0c \u53ea\u9700\u8981\u4e3a Pod \u6dfb\u52a0\u5982\u4e0b Annotation \u5373\u53ef\uff1a

                                                                instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                                                \u4e3e\u4f8b\uff1a

                                                                apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-with-aotu-instrumentation\nspec:\n  selector:\n    matchLabels:\n      app.kubernetes.io/name: my-deployment-with-aotu-instrumentation-kuberntes\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: my-deployment-with-aotu-instrumentation-kuberntes\n      annotations:\n        sidecar.opentelemetry.io/inject: \"false\"\n        instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                                              "},{"location":"end-user/insight/quickstart/otel/send_tracing_to_insight.html#opentelemtry-collector-insight-agent-opentelemtry-collector","title":"\u901a\u8fc7 Opentelemtry Collector \u5c06\u94fe\u8def\u8f6c\u53d1\u7ed9 Insight Agent Opentelemtry Collector","text":"

                                                              \u5728\u4fdd\u8bc1\u5e94\u7528\u6dfb\u52a0\u4e86\u5982\u4e0a\u5143\u6570\u636e\u4e4b\u540e\uff0c\u53ea\u9700\u5728\u5ba2\u6237 Opentelemtry Collector \u91cc\u9762\u65b0\u589e\u4e00\u4e2a OTLP Exporter \u5c06\u94fe\u8def\u6570\u636e\u8f6c\u53d1\u7ed9 Insight Agent Opentelemtry Collector \u5373\u53ef\uff0c\u5982\u4e0b Opentelemtry Collector \u914d\u7f6e\u6587\u4ef6\u6240\u793a\uff1a

                                                              ...\nexporters:\n  otlp/insight:\n    endpoint: insight-opentelemetry-collector.insight-system.svc.cluster.local:4317\nservice:\n...\npipelines:\n...\ntraces:\n  exporters:\n    - otlp/insight\n
                                                              "},{"location":"end-user/insight/quickstart/otel/send_tracing_to_insight.html#_1","title":"\u53c2\u8003","text":"
                                                              • \u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a
                                                              • \u4f7f\u7528 OTel \u8d4b\u4e88\u5e94\u7528\u53ef\u89c2\u6d4b\u6027
                                                              "},{"location":"end-user/insight/quickstart/otel/golang/golang.html","title":"\u4f7f\u7528 OTel SDK \u589e\u5f3a Go \u5e94\u7528\u7a0b\u5e8f","text":"

                                                              Golang \u65e0\u4fb5\u5165\u5f0f\u63a5\u5165\u94fe\u8def\u8bf7\u53c2\u8003 \u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a \u6587\u6863\uff0c\u901a\u8fc7\u6ce8\u89e3\u5b9e\u73b0\u81ea\u52a8\u63a5\u5165\u94fe\u8def\u3002

                                                              OpenTelemetry \u4e5f\u7b80\u79f0\u4e3a OTel\uff0c\u662f\u4e00\u4e2a\u5f00\u6e90\u7684\u53ef\u89c2\u6d4b\u6027\u6846\u67b6\uff0c\u53ef\u4ee5\u5e2e\u52a9\u5728 Go \u5e94\u7528\u7a0b\u5e8f\u4e2d\u751f\u6210\u548c\u6536\u96c6\u9065\u6d4b\u6570\u636e\uff1a\u94fe\u8def\u3001\u6307\u6807\u548c\u65e5\u5fd7\u3002

                                                              \u672c\u6587\u4e3b\u8981\u8bb2\u89e3\u5982\u4f55\u5728 Go \u5e94\u7528\u7a0b\u5e8f\u4e2d\u901a\u8fc7 OpenTelemetry Go SDK \u589e\u5f3a\u5e76\u63a5\u5165\u94fe\u8def\u76d1\u63a7\u3002

                                                              "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#otel-sdk-go_1","title":"\u4f7f\u7528 OTel SDK \u589e\u5f3a Go \u5e94\u7528","text":""},{"location":"end-user/insight/quickstart/otel/golang/golang.html#_1","title":"\u5b89\u88c5\u76f8\u5173\u4f9d\u8d56","text":"

                                                              \u5fc5\u987b\u5148\u5b89\u88c5\u4e0e OpenTelemetry exporter \u548c SDK \u76f8\u5173\u7684\u4f9d\u8d56\u9879\u3002\u5982\u679c\u60a8\u6b63\u5728\u4f7f\u7528\u5176\u4ed6\u8bf7\u6c42\u8def\u7531\u5668\uff0c\u8bf7\u53c2\u8003\u8bf7\u6c42\u8def\u7531\u3002 \u5207\u6362/\u8fdb\u5165\u5230\u5e94\u7528\u7a0b\u5e8f\u6e90\u6587\u4ef6\u5939\u540e\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                              go get go.opentelemetry.io/otel@v1.19.0 \\\n  go.opentelemetry.io/otel/trace@v1.19.0 \\\n  go.opentelemetry.io/otel/sdk@v1.19.0 \\\n  go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin@v0.46.1 \\\n  go.opentelemetry.io/otel/exporters/otlp/otlptrace@v1.19.0 \\\n  go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc@v1.19.0\n
                                                              "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#otel-sdk","title":"\u4f7f\u7528 OTel SDK \u521b\u5efa\u521d\u59cb\u5316\u51fd\u6570","text":"

                                                              \u4e3a\u4e86\u8ba9\u5e94\u7528\u7a0b\u5e8f\u80fd\u591f\u53d1\u9001\u6570\u636e\uff0c\u9700\u8981\u4e00\u4e2a\u51fd\u6570\u6765\u521d\u59cb\u5316 OpenTelemetry\u3002\u5728 main.go \u6587\u4ef6\u4e2d\u6dfb\u52a0\u4ee5\u4e0b\u4ee3\u7801\u7247\u6bb5:

                                                              import (\n    \"context\"\n    \"os\"\n    \"time\"\n\n    \"go.opentelemetry.io/otel\"\n    \"go.opentelemetry.io/otel/exporters/otlp/otlptrace\"\n    \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc\"\n    \"go.opentelemetry.io/otel/propagation\"\n    \"go.opentelemetry.io/otel/sdk/resource\"\n    sdktrace \"go.opentelemetry.io/otel/sdk/trace\"\n    semconv \"go.opentelemetry.io/otel/semconv/v1.7.0\"\n    \"go.uber.org/zap\"\n    \"google.golang.org/grpc\"\n)\n\nvar tracerExp *otlptrace.Exporter\n\nfunc retryInitTracer() func() {\n    var shutdown func()\n    go func() {\n        for {\n            // otel will reconnected and re-send spans when otel col recover. so, we don't need to re-init tracer exporter.\n            if tracerExp == nil {\n                shutdown = initTracer()\n            } else {\n                break\n            }\n            time.Sleep(time.Minute * 5)\n        }\n    }()\n    return shutdown\n}\n\nfunc initTracer() func() {\n    // temporarily set timeout to 10s\n    ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n    defer cancel()\n\n    serviceName, ok := os.LookupEnv(\"OTEL_SERVICE_NAME\")\n    if !ok {\n        serviceName = \"server_name\"\n        os.Setenv(\"OTEL_SERVICE_NAME\", serviceName)\n    }\n    otelAgentAddr, ok := os.LookupEnv(\"OTEL_EXPORTER_OTLP_ENDPOINT\")\n    if !ok {\n        otelAgentAddr = \"http://localhost:4317\"\n        os.Setenv(\"OTEL_EXPORTER_OTLP_ENDPOINT\", otelAgentAddr)\n    }\n    zap.S().Infof(\"OTLP Trace connect to: %s with service name: %s\", otelAgentAddr, serviceName)\n\n    traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithInsecure(), otlptracegrpc.WithDialOption(grpc.WithBlock()))\n    if err != nil {\n        handleErr(err, \"OTLP Trace gRPC Creation\")\n        return nil\n    }\n\n    tracerProvider := sdktrace.NewTracerProvider(\n        sdktrace.WithBatcher(traceExporter),\n        sdktrace.WithSampler(sdktrace.AlwaysSample()),\n    sdktrace.WithResource(resource.NewWithAttributes(semconv.SchemaURL)))\n\n    otel.SetTracerProvider(tracerProvider)\n    otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))\n\n    tracerExp = traceExporter\n    return func() {\n        // Shutdown will flush any remaining spans and shut down the exporter.\n        handleErr(tracerProvider.Shutdown(ctx), \"failed to shutdown TracerProvider\")\n    }\n}\n\nfunc handleErr(err error, message string) {\n    if err != nil {\n        zap.S().Errorf(\"%s: %v\", message, err)\n    }\n}\n
                                                              "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#maingo","title":"\u5728 main.go \u4e2d\u521d\u59cb\u5316\u8ddf\u8e2a\u5668","text":"

                                                              \u4fee\u6539 main \u51fd\u6570\u4ee5\u5728 main.go \u4e2d\u521d\u59cb\u5316\u8ddf\u8e2a\u5668\u3002\u53e6\u5916\u5f53\u60a8\u7684\u670d\u52a1\u5173\u95ed\u65f6\uff0c\u5e94\u8be5\u8c03\u7528 TracerProvider.Shutdown() \u786e\u4fdd\u5bfc\u51fa\u6240\u6709 Span\u3002\u8be5\u670d\u52a1\u5c06\u8be5\u8c03\u7528\u4f5c\u4e3a\u4e3b\u51fd\u6570\u4e2d\u7684\u5ef6\u8fdf\u51fd\u6570\uff1a

                                                              func main() {\n    // start otel tracing\n    if shutdown := retryInitTracer(); shutdown != nil {\n            defer shutdown()\n        }\n    ......\n}\n
                                                              "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#otel-gin","title":"\u4e3a\u5e94\u7528\u6dfb\u52a0 OTel Gin \u4e2d\u95f4\u4ef6","text":"

                                                              \u901a\u8fc7\u5728 main.go \u4e2d\u6dfb\u52a0\u4ee5\u4e0b\u884c\u6765\u914d\u7f6e Gin \u4ee5\u4f7f\u7528\u4e2d\u95f4\u4ef6:

                                                              import (\n    ....\n  \"go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin\"\n)\n\nfunc main() {\n    ......\n    r := gin.Default()\n    r.Use(otelgin.Middleware(\"my-app\"))\n    ......\n}\n
                                                              "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#_2","title":"\u8fd0\u884c\u5e94\u7528\u7a0b\u5e8f","text":"
                                                              • \u672c\u5730\u8c03\u8bd5\u8fd0\u884c

                                                                \u6ce8\u610f: \u6b64\u6b65\u9aa4\u4ec5\u7528\u4e8e\u672c\u5730\u5f00\u53d1\u8c03\u8bd5\uff0c\u751f\u4ea7\u73af\u5883\u4e2d Operator \u4f1a\u81ea\u52a8\u5b8c\u6210\u4ee5\u4e0b\u73af\u5883\u53d8\u91cf\u7684\u6ce8\u5165\u3002

                                                                \u4ee5\u4e0a\u6b65\u9aa4\u5df2\u7ecf\u5b8c\u6210\u4e86\u521d\u59cb\u5316 SDK \u7684\u5de5\u4f5c\uff0c\u73b0\u5728\u5982\u679c\u9700\u8981\u5728\u672c\u5730\u5f00\u53d1\u8fdb\u884c\u8c03\u8bd5\uff0c\u9700\u8981\u63d0\u524d\u83b7\u53d6\u5230 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b insight-agent-opentelemerty-collector \u7684\u5730\u5740\uff0c\u5047\u8bbe\u4e3a\uff1a insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 \u3002

                                                                \u56e0\u6b64\uff0c\u53ef\u4ee5\u5728\u4f60\u672c\u5730\u542f\u52a8\u5e94\u7528\u7a0b\u5e8f\u7684\u65f6\u5019\u6dfb\u52a0\u5982\u4e0b\u73af\u5883\u53d8\u91cf\uff1a

                                                                OTEL_SERVICE_NAME=my-golang-app OTEL_EXPORTER_OTLP_ENDPOINT=http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 go run main.go...\n
                                                              • \u751f\u4ea7\u73af\u5883\u8fd0\u884c

                                                                \u8bf7\u53c2\u8003\u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a \u4e2d \u53ea\u6ce8\u5165\u73af\u5883\u53d8\u91cf\u6ce8\u89e3 \u76f8\u5173\u4ecb\u7ecd\uff0c\u4e3a deployment yaml \u6dfb\u52a0\u6ce8\u89e3\uff1a

                                                                instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                                                \u5982\u679c\u65e0\u6cd5\u4f7f\u7528\u6ce8\u89e3\u7684\u65b9\u5f0f\uff0c\u60a8\u53ef\u4ee5\u624b\u52a8\u5728 deployment yaml \u6dfb\u52a0\u5982\u4e0b\u73af\u5883\u53d8\u91cf\uff1a

                                                              \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\nenv:\n  - name: OTEL_EXPORTER_OTLP_ENDPOINT\n    value: 'http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317'\n  - name: OTEL_SERVICE_NAME\n    value: \"your depolyment name\" # (1)!\n  - name: OTEL_K8S_NAMESPACE\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: metadata.namespace\n  - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: spec.nodeName\n  - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: metadata.name\n  - name: OTEL_RESOURCE_ATTRIBUTES\n    value: 'k8s.namespace.name=$(OTEL_K8S_NAMESPACE),k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME)'\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
                                                              1. \u4fee\u6539\u6b64\u503c
                                                              "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#_3","title":"\u8bf7\u6c42\u8def\u7531","text":""},{"location":"end-user/insight/quickstart/otel/golang/golang.html#opentelemetry-gingonic","title":"OpenTelemetry gin/gonic \u589e\u5f3a","text":"
                                                              # Add one line to your import() stanza depending upon your request router:\nmiddleware \"go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin\"\n

                                                              \u7136\u540e\u6ce8\u5165 OpenTelemetry \u4e2d\u95f4\u4ef6\uff1a

                                                              router.Use(middleware.Middleware(\"my-app\"))\n
                                                              "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#opentelemetry-gorillamux","title":"OpenTelemetry gorillamux \u589e\u5f3a","text":"
                                                              # Add one line to your import() stanza depending upon your request router:\nmiddleware \"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux\"\n

                                                              \u7136\u540e\u6ce8\u5165 OpenTelemetry \u4e2d\u95f4\u4ef6\uff1a

                                                              router.Use(middleware.Middleware(\"my-app\"))\n
                                                              "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#grpc","title":"gRPC \u589e\u5f3a","text":"

                                                              \u540c\u6837\uff0cOpenTelemetry \u4e5f\u53ef\u4ee5\u5e2e\u52a9\u60a8\u81ea\u52a8\u68c0\u6d4b gRPC \u8bf7\u6c42\u3002\u8981\u68c0\u6d4b\u60a8\u62e5\u6709\u7684\u4efb\u4f55 gRPC \u670d\u52a1\u5668\uff0c\u8bf7\u5c06\u62e6\u622a\u5668\u6dfb\u52a0\u5230\u670d\u52a1\u5668\u7684\u5b9e\u4f8b\u5316\u4e2d\u3002

                                                              import (\n  grpcotel \"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc\"\n)\nfunc main() {\n  [...]\n\n    s := grpc.NewServer(\n        grpc.UnaryInterceptor(grpcotel.UnaryServerInterceptor()),\n        grpc.StreamInterceptor(grpcotel.StreamServerInterceptor()),\n    )\n}\n

                                                              \u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u5982\u679c\u4f60\u7684\u7a0b\u5e8f\u91cc\u9762\u4f7f\u7528\u5230\u4e86 Grpc Client \u8c03\u7528\u7b2c\u4e09\u65b9\u670d\u52a1\uff0c\u4f60\u8fd8\u9700\u8981\u5bf9 Grpc Client \u6dfb\u52a0\u62e6\u622a\u5668\uff1a

                                                                  [...]\n\n    conn, err := grpc.Dial(addr, grpc.WithTransportCredentials(insecure.NewCredentials()),\n        grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()),\n        grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()),\n    )\n
                                                              "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#_4","title":"\u5982\u679c\u4e0d\u4f7f\u7528\u8bf7\u6c42\u8def\u7531","text":"
                                                              import (\n  \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\"\n)\n

                                                              \u5728\u5c06 http.Handler \u4f20\u9012\u7ed9 ServeMux \u7684\u6bcf\u4e2a\u5730\u65b9\uff0c\u60a8\u90fd\u5c06\u5305\u88c5\u5904\u7406\u7a0b\u5e8f\u51fd\u6570\u3002\u4f8b\u5982\uff0c\u5c06\u8fdb\u884c\u4ee5\u4e0b\u66ff\u6362\uff1a

                                                              - mux.Handle(\"/path\", h)\n+ mux.Handle(\"/path\", otelhttp.NewHandler(h, \"description of path\"))\n---\n- mux.Handle(\"/path\", http.HandlerFunc(f))\n+ mux.Handle(\"/path\", otelhttp.NewHandler(http.HandlerFunc(f), \"description of path\"))\n

                                                              \u901a\u8fc7\u8fd9\u79cd\u65b9\u5f0f\uff0c\u60a8\u53ef\u4ee5\u786e\u4fdd\u4f7f\u7528 othttp \u5305\u88c5\u7684\u6bcf\u4e2a\u51fd\u6570\u90fd\u4f1a\u81ea\u52a8\u6536\u96c6\u5176\u5143\u6570\u636e\u5e76\u542f\u52a8\u76f8\u5e94\u7684\u8ddf\u8e2a\u3002

                                                              "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#_5","title":"\u6570\u636e\u5e93\u8bbf\u95ee\u589e\u5f3a","text":""},{"location":"end-user/insight/quickstart/otel/golang/golang.html#golang-gorm","title":"Golang Gorm","text":"

                                                              OpenTelemetry \u793e\u533a\u4e5f\u5f00\u53d1\u4e86\u6570\u636e\u5e93\u8bbf\u95ee\u5e93\u7684\u4e2d\u95f4\u4ef6\uff0c\u6bd4\u5982 Gorm:

                                                              import (\n    \"github.com/uptrace/opentelemetry-go-extra/otelgorm\"\n    \"gorm.io/driver/sqlite\"\n    \"gorm.io/gorm\"\n)\n\ndb, err := gorm.Open(sqlite.Open(\"file::memory:?cache=shared\"), &gorm.Config{})\nif err != nil {\n    panic(err)\n}\n\notelPlugin := otelgorm.NewPlugin(otelgorm.WithDBName(\"mydb\"), # \u7f3a\u5931\u4f1a\u5bfc\u81f4\u6570\u636e\u5e93\u76f8\u5173\u62d3\u6251\u5c55\u793a\u4e0d\u5b8c\u6574\n    otelgorm.WithAttributes(semconv.ServerAddress(\"memory\"))) # \u7f3a\u5931\u4f1a\u5bfc\u81f4\u6570\u636e\u5e93\u76f8\u5173\u62d3\u6251\u5c55\u793a\u4e0d\u5b8c\u6574\nif err := db.Use(otelPlugin); err != nil {\n    panic(err)\n}\n

                                                              "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#span","title":"\u81ea\u5b9a\u4e49 Span","text":"

                                                              \u5f88\u591a\u65f6\u5019\uff0cOpenTelemetry \u63d0\u4f9b\u7684\u4e2d\u95f4\u4ef6\u4e0d\u80fd\u5e2e\u52a9\u6211\u4eec\u8bb0\u5f55\u66f4\u591a\u5185\u90e8\u8c03\u7528\u7684\u51fd\u6570\uff0c\u9700\u8981\u6211\u4eec\u81ea\u5b9a\u4e49 Span \u6765\u8bb0\u5f55

                                                               \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n    _, span := otel.Tracer(\"GetServiceDetail\").Start(ctx,\n        \"spanMetricDao.GetServiceDetail\",\n        trace.WithSpanKind(trace.SpanKindInternal))\n    defer span.End()\n  \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
                                                              "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#span_1","title":"\u5411 span \u6dfb\u52a0\u81ea\u5b9a\u4e49\u5c5e\u6027\u548c\u4e8b\u4ef6","text":"

                                                              \u4e5f\u53ef\u4ee5\u5c06\u81ea\u5b9a\u4e49\u5c5e\u6027\u6216\u6807\u7b7e\u8bbe\u7f6e\u4e3a Span\u3002\u8981\u6dfb\u52a0\u81ea\u5b9a\u4e49\u5c5e\u6027\u548c\u4e8b\u4ef6\uff0c\u8bf7\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u64cd\u4f5c\uff1a

                                                              "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#_6","title":"\u5bfc\u5165\u8ddf\u8e2a\u548c\u5c5e\u6027\u5e93","text":"
                                                              import (\n    ...\n    \"go.opentelemetry.io/otel/attribute\"\n    \"go.opentelemetry.io/otel/trace\"\n)\n
                                                              "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#span_2","title":"\u4ece\u4e0a\u4e0b\u6587\u4e2d\u83b7\u53d6\u5f53\u524d Span","text":"
                                                              span := trace.SpanFromContext(c.Request.Context())\n
                                                              "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#span_3","title":"\u5728\u5f53\u524d Span \u4e2d\u8bbe\u7f6e\u5c5e\u6027","text":"
                                                              span.SetAttributes(attribute.String(\"controller\", \"books\"))\n
                                                              "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#span-event","title":"\u4e3a\u5f53\u524d Span \u6dfb\u52a0 Event","text":"

                                                              \u6dfb\u52a0 span \u4e8b\u4ef6\u662f\u4f7f\u7528 span \u5bf9\u8c61\u4e0a\u7684 AddEvent \u5b8c\u6210\u7684\u3002

                                                              span.AddEvent(msg)\n
                                                              "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#_7","title":"\u8bb0\u5f55\u9519\u8bef\u548c\u5f02\u5e38","text":"
                                                              import \"go.opentelemetry.io/otel/codes\"\n\n// \u83b7\u53d6\u5f53\u524d span\nspan := trace.SpanFromContext(ctx)\n\n// RecordError \u4f1a\u81ea\u52a8\u5c06\u4e00\u4e2a\u9519\u8bef\u8f6c\u6362\u6210 span even\nspan.RecordError(err)\n\n// \u6807\u8bb0\u8fd9\u4e2a span \u9519\u8bef\nspan.SetStatus(codes.Error, \"internal error\")\n
                                                              "},{"location":"end-user/insight/quickstart/otel/golang/golang.html#_8","title":"\u53c2\u8003","text":"

                                                              \u6709\u5173 Demo \u6f14\u793a\u8bf7\u53c2\u8003\uff1a - opentelemetry-demo/productcatalogservice - opentelemetry-collector-contrib/demo

                                                              "},{"location":"end-user/insight/quickstart/otel/golang/meter.html","title":"\u4f7f\u7528 OTel SDK \u4e3a\u5e94\u7528\u7a0b\u5e8f\u66b4\u9732\u6307\u6807","text":"

                                                              \u672c\u6587\u4ec5\u4f9b\u5e0c\u671b\u8bc4\u4f30\u6216\u63a2\u7d22\u6b63\u5728\u5f00\u53d1\u7684 OTLP \u6307\u6807\u7684\u7528\u6237\u53c2\u8003\u3002

                                                              OpenTelemetry \u9879\u76ee\u8981\u6c42\u4ee5\u5fc5\u987b\u5728 OpenTelemetry \u534f\u8bae (OTLP) \u4e2d\u53d1\u51fa\u6570\u636e\u7684\u8bed\u8a00\u63d0\u4f9b API \u548c SDK\u3002

                                                              "},{"location":"end-user/insight/quickstart/otel/golang/meter.html#golang","title":"\u9488\u5bf9 Golang \u5e94\u7528\u7a0b\u5e8f","text":"

                                                              Golang \u53ef\u4ee5\u901a\u8fc7 sdk \u66b4\u9732 runtime \u6307\u6807\uff0c\u5177\u4f53\u6765\u8bf4\uff0c\u5728\u5e94\u7528\u4e2d\u6dfb\u52a0\u4ee5\u4e0b\u65b9\u6cd5\u5f00\u542f metrics \u66b4\u9732\u5668\uff1a

                                                              "},{"location":"end-user/insight/quickstart/otel/golang/meter.html#_1","title":"\u5b89\u88c5\u76f8\u5173\u4f9d\u8d56","text":"

                                                              \u5207\u6362/\u8fdb\u5165\u5230\u5e94\u7528\u7a0b\u5e8f\u6e90\u6587\u4ef6\u5939\u540e\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                              go get go.opentelemetry.io/otel \\\n  go.opentelemetry.io/otel/attribute \\\n  go.opentelemetry.io/otel/exporters/prometheus \\\n  go.opentelemetry.io/otel/metric/global \\\n  go.opentelemetry.io/otel/metric/instrument \\\n  go.opentelemetry.io/otel/sdk/metric\n
                                                              "},{"location":"end-user/insight/quickstart/otel/golang/meter.html#otel-sdk_1","title":"\u4f7f\u7528 OTel SDK \u521b\u5efa\u521d\u59cb\u5316\u51fd\u6570","text":"
                                                              import (\n    .....\n\n    \"go.opentelemetry.io/otel/attribute\"\n    otelPrometheus \"go.opentelemetry.io/otel/exporters/prometheus\"\n    \"go.opentelemetry.io/otel/metric/global\"\n    \"go.opentelemetry.io/otel/metric/instrument\"\n    \"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram\"\n    controller \"go.opentelemetry.io/otel/sdk/metric/controller/basic\"\n    \"go.opentelemetry.io/otel/sdk/metric/export/aggregation\"\n    processor \"go.opentelemetry.io/otel/sdk/metric/processor/basic\"\n    selector \"go.opentelemetry.io/otel/sdk/metric/selector/simple\"\n)\nfunc (s *insightServer) initMeter() *otelPrometheus.Exporter {\n    s.meter = global.Meter(\"xxx\")\n\n    config := otelPrometheus.Config{\n        DefaultHistogramBoundaries: []float64{1, 2, 5, 10, 20, 50},\n        Gatherer:                   prometheus.DefaultGatherer,\n        Registry:                   prometheus.NewRegistry(),\n        Registerer:                 prometheus.DefaultRegisterer,\n    }\n\n    c := controller.New(\n        processor.NewFactory(\n            selector.NewWithHistogramDistribution(\n                histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries),\n            ),\n            aggregation.CumulativeTemporalitySelector(),\n            processor.WithMemory(true),\n        ),\n    )\n\n    exporter, err := otelPrometheus.New(config, c)\n    if err != nil {\n        zap.S().Panicf(\"failed to initialize prometheus exporter %v\", err)\n    }\n\n    global.SetMeterProvider(exporter.MeterProvider())\n\n    http.HandleFunc(\"/metrics\", exporter.ServeHTTP)\n\n    go func() {\n        _ = http.ListenAndServe(fmt.Sprintf(\":%d\", 8888), nil)\n    }()\n\n    zap.S().Info(\"Prometheus server running on \", fmt.Sprintf(\":%d\", port))\n    return exporter\n}\n

                                                              \u4ee5\u4e0a\u65b9\u6cd5\u4f1a\u4e3a\u60a8\u7684\u5e94\u7528\u66b4\u9732\u4e00\u4e2a\u6307\u6807\u63a5\u53e3: http://localhost:8888/metrics

                                                              \u968f\u540e\uff0c\u5728 main.go \u4e2d\u5bf9\u5176\u8fdb\u884c\u521d\u59cb\u5316\uff1a

                                                              func main() {\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n    tp := initMeter()\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n}\n

                                                              \u6b64\u5916\uff0c\u5982\u679c\u60f3\u6dfb\u52a0\u81ea\u5b9a\u4e49\u6307\u6807\uff0c\u53ef\u4ee5\u53c2\u8003\uff1a

                                                              // exposeClusterMetric expose metric like \"insight_logging_count{} 1\"\nfunc (s *insightServer) exposeLoggingMetric(lserver *log.LogService) {\n    s.meter = global.Meter(\"insight.io/basic\")\n\n    var lock sync.Mutex\n    logCounter, err := s.meter.AsyncFloat64().Counter(\"insight_log_total\")\n    if err != nil {\n        zap.S().Panicf(\"failed to initialize instrument: %v\", err)\n    }\n\n    _ = s.meter.RegisterCallback([]instrument.Asynchronous{logCounter}, func(ctx context.Context) {\n        lock.Lock()\n        defer lock.Unlock()\n        count, err := lserver.Count(ctx)\n        if err == nil || count != -1 {\n            logCounter.Observe(ctx, float64(count))\n        }\n    })\n}\n

                                                              \u968f\u540e\uff0c\u5728 main.go \u8c03\u7528\u8be5\u65b9\u6cd5\uff1a

                                                              \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\ns.exposeLoggingMetric(lservice)\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n

                                                              \u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbf\u95ee http://localhost:8888/metrics \u6765\u68c0\u67e5\u60a8\u7684\u6307\u6807\u662f\u5426\u6b63\u5e38\u5de5\u4f5c\u3002

                                                              "},{"location":"end-user/insight/quickstart/otel/golang/meter.html#java","title":"\u9488\u5bf9 Java \u5e94\u7528\u7a0b\u5e8f","text":"

                                                              Java \u5728\u4f7f\u7528 otel agent \u5728\u5b8c\u6210\u94fe\u8def\u7684\u81ea\u52a8\u63a5\u5165\u7684\u57fa\u7840\u4e0a\uff0c\u901a\u8fc7\u6dfb\u52a0\u73af\u5883\u53d8\u91cf\uff1a

                                                              OTEL_METRICS_EXPORTER=prometheus\n

                                                              \u5c31\u53ef\u4ee5\u76f4\u63a5\u66b4\u9732 JVM \u76f8\u5173\u6307\u6807\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbf\u95ee http://localhost:8888/metrics \u6765\u68c0\u67e5\u60a8\u7684\u6307\u6807\u662f\u5426\u6b63\u5e38\u5de5\u4f5c\u3002

                                                              \u968f\u540e\uff0c\u518d\u914d\u5408 prometheus serviceMonitor \u5373\u53ef\u5b8c\u6210\u6307\u6807\u7684\u63a5\u5165\u3002 \u5982\u679c\u60f3\u66b4\u9732\u81ea\u5b9a\u4e49\u6307\u6807\u8bf7\u53c2\u9605 opentelemetry-java-docs/prometheus\u3002

                                                              \u4e3b\u8981\u5206\u4ee5\u4e0b\u4e24\u6b65\uff1a

                                                              • \u521b\u5efa meter provider\uff0c\u5e76\u6307\u5b9a prometheus \u4f5c\u4e3a exporter\u3002

                                                                /*\n* Copyright The OpenTelemetry Authors\n* SPDX-License-Identifier: Apache-2.0\n*/\n\npackage io.opentelemetry.example.prometheus;\n\nimport io.opentelemetry.api.metrics.MeterProvider;\nimport io.opentelemetry.exporter.prometheus.PrometheusHttpServer;\nimport io.opentelemetry.sdk.metrics.SdkMeterProvider;\nimport io.opentelemetry.sdk.metrics.export.MetricReader;\n\npublic final class ExampleConfiguration {\n\n  /**\n  * Initializes the Meter SDK and configures the prometheus collector with all default settings.\n  *\n  * @param prometheusPort the port to open up for scraping.\n  * @return A MeterProvider for use in instrumentation.\n  */\n  static MeterProvider initializeOpenTelemetry(int prometheusPort) {\n    MetricReader prometheusReader = PrometheusHttpServer.builder().setPort(prometheusPort).build();\n\n    return SdkMeterProvider.builder().registerMetricReader(prometheusReader).build();\n  }\n}\n
                                                              • \u81ea\u5b9a\u4e49 meter \u5e76\u5f00\u542f http server

                                                                package io.opentelemetry.example.prometheus;\n\nimport io.opentelemetry.api.common.Attributes;\nimport io.opentelemetry.api.metrics.Meter;\nimport io.opentelemetry.api.metrics.MeterProvider;\nimport java.util.concurrent.ThreadLocalRandom;\n\n/**\n* Example of using the PrometheusHttpServer to convert OTel metrics to Prometheus format and expose\n* these to a Prometheus instance via a HttpServer exporter.\n*\n* <p>A Gauge is used to periodically measure how many incoming messages are awaiting processing.\n* The Gauge callback gets executed every collection interval.\n*/\npublic final class PrometheusExample {\n  private long incomingMessageCount;\n\n  public PrometheusExample(MeterProvider meterProvider) {\n    Meter meter = meterProvider.get(\"PrometheusExample\");\n    meter\n        .gaugeBuilder(\"incoming.messages\")\n        .setDescription(\"No of incoming messages awaiting processing\")\n        .setUnit(\"message\")\n        .buildWithCallback(result -> result.record(incomingMessageCount, Attributes.empty()));\n  }\n\n  void simulate() {\n    for (int i = 500; i > 0; i--) {\n      try {\n        System.out.println(\n            i + \" Iterations to go, current incomingMessageCount is:  \" + incomingMessageCount);\n        incomingMessageCount = ThreadLocalRandom.current().nextLong(100);\n        Thread.sleep(1000);\n      } catch (InterruptedException e) {\n        // ignored here\n      }\n    }\n  }\n\n  public static void main(String[] args) {\n    int prometheusPort = 8888;\n\n    // it is important to initialize the OpenTelemetry SDK as early as possible in your process.\n    MeterProvider meterProvider = ExampleConfiguration.initializeOpenTelemetry(prometheusPort);\n\n    PrometheusExample prometheusExample = new PrometheusExample(meterProvider);\n\n    prometheusExample.simulate();\n\n    System.out.println(\"Exiting\");\n  }\n}\n

                                                              \u968f\u540e\uff0c\u5f85 java \u5e94\u7528\u7a0b\u5e8f\u8fd0\u884c\u4e4b\u540e\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbf\u95ee http://localhost:8888/metrics \u6765\u68c0\u67e5\u60a8\u7684\u6307\u6807\u662f\u5426\u6b63\u5e38\u5de5\u4f5c\u3002

                                                              "},{"location":"end-user/insight/quickstart/otel/golang/meter.html#insight","title":"Insight \u91c7\u96c6\u6307\u6807","text":"

                                                              \u6700\u540e\u91cd\u8981\u7684\u662f\uff0c\u60a8\u5df2\u7ecf\u5728\u5e94\u7528\u7a0b\u5e8f\u4e2d\u66b4\u9732\u51fa\u4e86\u6307\u6807\uff0c\u73b0\u5728\u9700\u8981 Insight \u6765\u91c7\u96c6\u6307\u6807\u3002

                                                              \u63a8\u8350\u7684\u6307\u6807\u66b4\u9732\u65b9\u5f0f\u662f\u901a\u8fc7 servicemonitor \u6216\u8005 podmonitor\u3002

                                                              "},{"location":"end-user/insight/quickstart/otel/golang/meter.html#servicemonitorpodmonitor","title":"\u521b\u5efa servicemonitor/podmonitor","text":"

                                                              \u6dfb\u52a0\u7684 servicemonitor/podmonitor \u9700\u8981\u6253\u4e0a label\uff1a\"operator.insight.io/managed-by\": \"insight\" \u624d\u4f1a\u88ab Operator \u8bc6\u522b\uff1a

                                                              apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: example-app\n  labels:\n    operator.insight.io/managed-by: insight\nspec:\n  selector:\n    matchLabels:\n      app: example-app\n  endpoints:\n  - port: web\n  namespaceSelector:\n    any: true\n
                                                              "},{"location":"end-user/insight/quickstart/otel/java/index.html","title":"\u5f00\u59cb\u76d1\u63a7 Java \u5e94\u7528","text":"
                                                              1. Java \u5e94\u7528\u94fe\u8def\u63a5\u5165\u4e0e\u76d1\u63a7\u8bf7\u53c2\u8003\u901a\u8fc7 Operator \u5b9e\u73b0\u5e94\u7528\u7a0b\u5e8f\u65e0\u4fb5\u5165\u589e\u5f3a \u6587\u6863\uff0c\u901a\u8fc7\u6ce8\u89e3\u5b9e\u73b0\u81ea\u52a8\u63a5\u5165\u94fe\u8def\u3002

                                                              2. Java \u5e94\u7528\u7684 JVM \u8fdb\u884c\u76d1\u63a7\uff1a\u5df2\u7ecf\u66b4\u9732 JVM \u6307\u6807\u548c\u4ecd\u672a\u66b4\u9732 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5982\u4f55\u4e0e\u53ef\u89c2\u6d4b\u6027 Insight \u5bf9\u63a5\u3002

                                                                • \u5982\u679c\u60a8\u7684 Java \u5e94\u7528\u672a\u5f00\u59cb\u66b4\u9732 JVM \u6307\u6807\uff0c\u60a8\u53ef\u4ee5\u53c2\u8003\u5982\u4e0b\u6587\u6863\uff1a

                                                                  • \u4f7f\u7528 JMX Exporter \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807
                                                                  • \u4f7f\u7528 OpenTelemetry Java Agent \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807
                                                                • \u5982\u679c\u60a8\u7684 Java \u5e94\u7528\u5df2\u7ecf\u66b4\u9732 JVM \u6307\u6807\uff0c\u60a8\u53ef\u4ee5\u53c2\u8003\u5982\u4e0b\u6587\u6863\uff1a

                                                                  • \u5df2\u6709 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5bf9\u63a5\u53ef\u89c2\u6d4b\u6027
                                                              3. \u5c06 TraceId \u548c SpanId \u5199\u5165 Java \u5e94\u7528\u65e5\u5fd7, \u5b9e\u73b0\u94fe\u8def\u6570\u636e\u4e0e\u65e5\u5fd7\u6570\u636e\u5173\u8054

                                                              "},{"location":"end-user/insight/quickstart/otel/java/mdc.html","title":"\u5c06 TraceId \u548c SpanId \u5199\u5165 Java \u5e94\u7528\u65e5\u5fd7","text":"

                                                              \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528 OpenTelemetry \u5c06 TraceId \u548c SpanId \u81ea\u52a8\u5199\u5165 Java \u5e94\u7528\u65e5\u5fd7\u3002 TraceId \u4e0e SpanId \u5199\u5165\u65e5\u5fd7\u540e\uff0c\u60a8\u53ef\u4ee5\u5c06\u5206\u5e03\u5f0f\u94fe\u8def\u6570\u636e\u4e0e\u65e5\u5fd7\u6570\u636e\u5173\u8054\u8d77\u6765\uff0c\u5b9e\u73b0\u66f4\u9ad8\u6548\u7684\u6545\u969c\u8bca\u65ad\u548c\u6027\u80fd\u5206\u6790\u3002

                                                              "},{"location":"end-user/insight/quickstart/otel/java/mdc.html#_1","title":"\u652f\u6301\u7684\u65e5\u5fd7\u5e93","text":"

                                                              \u66f4\u591a\u4fe1\u606f\uff0c\u8bf7\u53c2\u89c1 Logger MDC auto-instrumentation\u3002

                                                              \u65e5\u5fd7\u6846\u67b6 \u652f\u6301\u81ea\u52a8\u57cb\u70b9\u7684\u7248\u672c \u624b\u52a8\u57cb\u70b9\u9700\u8981\u5f15\u5165\u7684\u4f9d\u8d56 Log4j 1 1.2+ \u65e0 Log4j 2 2.7+ opentelemetry-log4j-context-data-2.17-autoconfigure Logback 1.0+ opentelemetry-logback-mdc-1.0"},{"location":"end-user/insight/quickstart/otel/java/mdc.html#logbackspringboot","title":"\u4f7f\u7528 Logback\uff08SpringBoot \u9879\u76ee\uff09","text":"

                                                              Spring Boot \u9879\u76ee\u5185\u7f6e\u4e86\u65e5\u5fd7\u6846\u67b6\uff0c\u5e76\u4e14\u9ed8\u8ba4\u4f7f\u7528 Logback \u4f5c\u4e3a\u5176\u65e5\u5fd7\u5b9e\u73b0\u3002\u5982\u679c\u60a8\u7684 Java \u9879\u76ee\u4e3a SpringBoot \u9879\u76ee\uff0c\u53ea\u9700\u5c11\u91cf\u914d\u7f6e\u5373\u53ef\u5c06 TraceId \u5199\u5165\u65e5\u5fd7\u3002

                                                              \u5728 application.properties \u4e2d\u8bbe\u7f6e logging.pattern.level\uff0c\u6dfb\u52a0 %mdc{trace_id} \u4e0e %mdc{span_id} \u5230\u65e5\u5fd7\u4e2d\u3002

                                                              logging.pattern.level=trace_id=%mdc{trace_id} span_id=%mdc{span_id} %5p ....\u7701\u7565...\n

                                                              \u4ee5\u4e0b\u4e3a\u65e5\u5fd7\u793a\u4f8b\uff1a

                                                              2024-06-26 10:56:31.200 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.a.c.c.C.[Tomcat].[localhost].[/]       : Initializing Spring DispatcherServlet 'dispatcherServlet'\n2024-06-26 10:56:31.201 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.s.web.servlet.DispatcherServlet        : Initializing Servlet 'dispatcherServlet'\n2024-06-26 10:56:31.209 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.s.web.servlet.DispatcherServlet        : Completed initialization in 8 ms\n2024-06-26 10:56:31.296 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=5743699405074f4e  INFO 53724 --- [nio-8081-exec-1] com.example.httpserver.ot.OTServer       : hello world\n
                                                              "},{"location":"end-user/insight/quickstart/otel/java/mdc.html#log4j2","title":"\u4f7f\u7528 Log4j2","text":"
                                                              1. \u5728 pom.xml \u4e2d\u6dfb\u52a0 OpenTelemetry Log4j2 \u4f9d\u8d56:

                                                                Tip

                                                                \u8bf7\u5c06 OPENTELEMETRY_VERSION \u66ff\u6362\u4e3a\u6700\u65b0\u7248\u672c

                                                                <dependencies>\n  <dependency>\n    <groupId>io.opentelemetry.instrumentation</groupId>\n    <artifactId>opentelemetry-log4j-context-data-2.17-autoconfigure</artifactId>\n    <version>OPENTELEMETRY_VERSION</version>\n    <scope>runtime</scope>\n  </dependency>\n</dependencies>\n
                                                              2. \u4fee\u6539 log4j2.xml \u914d\u7f6e\uff0c\u5728 pattern \u4e2d\u6dfb\u52a0 %X{trace_id} \u4e0e %X{span_id}\uff0c\u53ef\u4ee5\u5c06 TraceId \u4e0e SpanId \u81ea\u52a8\u5199\u5165\u65e5\u5fd7:

                                                                <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Configuration>\n  <Appenders>\n    <Console name=\"Console\" target=\"SYSTEM_OUT\">\n      <PatternLayout\n          pattern=\"%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} trace_id=%X{trace_id} span_id=%X{span_id} trace_flags=%X{trace_flags} - %msg%n\"/>\n    </Console>\n  </Appenders>\n  <Loggers>\n    <Root>\n      <AppenderRef ref=\"Console\" level=\"All\"/>\n    </Root>\n  </Loggers>\n</Configuration>\n
                                                              3. \u4f7f\u7528 Logback \u5728 pom.xml \u4e2d\u6dfb\u52a0 OpenTelemetry Logback \u4f9d\u8d56\u3002

                                                                Tip

                                                                \u8bf7\u5c06 OPENTELEMETRY_VERSION \u66ff\u6362\u4e3a\u6700\u65b0\u7248\u672c

                                                                <dependencies>\n  <dependency>\n    <groupId>io.opentelemetry.instrumentation</groupId>\n    <artifactId>opentelemetry-logback-mdc-1.0</artifactId>\n    <version>OPENTELEMETRY_VERSION</version>\n  </dependency>\n</dependencies>\n
                                                              4. \u4fee\u6539 log4j2.xml \u914d\u7f6e\uff0c\u5728 pattern \u4e2d\u6dfb\u52a0 %X{trace_id} \u4e0e %X{span_id}\uff0c\u53ef\u4ee5\u5c06 TraceId \u4e0e SpanId \u81ea\u52a8\u5199\u5165\u65e5\u5fd7:

                                                                <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<configuration>\n  <appender name=\"CONSOLE\" class=\"ch.qos.logback.core.ConsoleAppender\">\n    <encoder>\n      <pattern>%d{HH:mm:ss.SSS} trace_id=%X{trace_id} span_id=%X{span_id} trace_flags=%X{trace_flags} %msg%n</pattern>\n    </encoder>\n  </appender>\n\n  <!-- Just wrap your logging appender, for example ConsoleAppender, with OpenTelemetryAppender -->\n  <appender name=\"OTEL\" class=\"io.opentelemetry.instrumentation.logback.mdc.v1_0.OpenTelemetryAppender\">\n    <appender-ref ref=\"CONSOLE\"/>\n  </appender>\n\n  <!-- Use the wrapped \"OTEL\" appender instead of the original \"CONSOLE\" one -->\n  <root level=\"INFO\">\n    <appender-ref ref=\"OTEL\"/>\n  </root>\n\n</configuration>\n
                                                              "},{"location":"end-user/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html","title":"\u4f7f\u7528 JMX Exporter \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807","text":"

                                                              JMX-Exporter \u63d0\u4f9b\u4e86\u4e24\u79cd\u7528\u6cd5:

                                                              1. \u542f\u52a8\u72ec\u7acb\u8fdb\u7a0b\u3002JVM \u542f\u52a8\u65f6\u6307\u5b9a\u53c2\u6570\uff0c\u66b4\u9732 JMX \u7684 RMI \u63a5\u53e3\uff0cJMX Exporter \u8c03\u7528 RMI \u83b7\u53d6 JVM \u8fd0\u884c\u65f6\u72b6\u6001\u6570\u636e\uff0c \u8f6c\u6362\u4e3a Prometheus metrics \u683c\u5f0f\uff0c\u5e76\u66b4\u9732\u7aef\u53e3\u8ba9 Prometheus \u91c7\u96c6\u3002
                                                              2. JVM \u8fdb\u7a0b\u5185\u542f\u52a8(in-process)\u3002JVM \u542f\u52a8\u65f6\u6307\u5b9a\u53c2\u6570\uff0c\u901a\u8fc7 javaagent \u7684\u5f62\u5f0f\u8fd0\u884c JMX-Exporter \u7684 jar \u5305\uff0c \u8fdb\u7a0b\u5185\u8bfb\u53d6 JVM \u8fd0\u884c\u65f6\u72b6\u6001\u6570\u636e\uff0c\u8f6c\u6362\u4e3a Prometheus metrics \u683c\u5f0f\uff0c\u5e76\u66b4\u9732\u7aef\u53e3\u8ba9 Prometheus \u91c7\u96c6\u3002

                                                              Note

                                                              \u5b98\u65b9\u4e0d\u63a8\u8350\u4f7f\u7528\u7b2c\u4e00\u79cd\u65b9\u5f0f\uff0c\u4e00\u65b9\u9762\u914d\u7f6e\u590d\u6742\uff0c\u53e6\u4e00\u65b9\u9762\u56e0\u4e3a\u5b83\u9700\u8981\u4e00\u4e2a\u5355\u72ec\u7684\u8fdb\u7a0b\uff0c\u800c\u8fd9\u4e2a\u8fdb\u7a0b\u672c\u8eab\u7684\u76d1\u63a7\u53c8\u6210\u4e86\u65b0\u7684\u95ee\u9898\uff0c \u6240\u4ee5\u672c\u6587\u91cd\u70b9\u56f4\u7ed5\u7b2c\u4e8c\u79cd\u7528\u6cd5\u8bb2\u5982\u4f55\u5728 Kubernetes \u73af\u5883\u4e0b\u4f7f\u7528 JMX Exporter \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807\u3002

                                                              \u8fd9\u91cc\u4f7f\u7528\u7b2c\u4e8c\u79cd\u7528\u6cd5\uff0c\u542f\u52a8 JVM \u65f6\u9700\u8981\u6307\u5b9a JMX Exporter \u7684 jar \u5305\u6587\u4ef6\u548c\u914d\u7f6e\u6587\u4ef6\u3002 jar \u5305\u662f\u4e8c\u8fdb\u5236\u6587\u4ef6\uff0c\u4e0d\u597d\u901a\u8fc7 configmap \u6302\u8f7d\uff0c\u914d\u7f6e\u6587\u4ef6\u6211\u4eec\u51e0\u4e4e\u4e0d\u9700\u8981\u4fee\u6539\uff0c \u6240\u4ee5\u5efa\u8bae\u662f\u76f4\u63a5\u5c06 JMX Exporter \u7684 jar \u5305\u548c\u914d\u7f6e\u6587\u4ef6\u90fd\u6253\u5305\u5230\u4e1a\u52a1\u5bb9\u5668\u955c\u50cf\u4e2d\u3002

                                                              \u5176\u4e2d\uff0c\u7b2c\u4e8c\u79cd\u65b9\u5f0f\u6211\u4eec\u53ef\u4ee5\u9009\u62e9\u5c06 JMX Exporter \u7684 jar \u6587\u4ef6\u653e\u5728\u4e1a\u52a1\u5e94\u7528\u955c\u50cf\u4e2d\uff0c \u4e5f\u53ef\u4ee5\u9009\u62e9\u5728\u90e8\u7f72\u7684\u65f6\u5019\u6302\u8f7d\u8fdb\u53bb\u3002\u8fd9\u91cc\u5206\u522b\u5bf9\u4e24\u79cd\u65b9\u5f0f\u505a\u4e00\u4e2a\u4ecb\u7ecd\uff1a

                                                              "},{"location":"end-user/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html#jmx-exporter-jar","title":"\u65b9\u5f0f\u4e00\uff1a\u5c06 JMX Exporter JAR \u6587\u4ef6\u6784\u5efa\u81f3\u4e1a\u52a1\u955c\u50cf\u4e2d","text":"

                                                              prometheus-jmx-config.yaml \u5185\u5bb9\u5982\u4e0b\uff1a

                                                              prometheus-jmx-config.yaml
                                                              ...\nssl: false\nlowercaseOutputName: false\nlowercaseOutputLabelNames: false\nrules:\n- pattern: \".*\"\n

                                                              Note

                                                              \u66f4\u591a\u914d\u7f6e\u9879\u8bf7\u53c2\u8003\u5e95\u90e8\u4ecb\u7ecd\u6216Prometheus \u5b98\u65b9\u6587\u6863\u3002

                                                              \u7136\u540e\u51c6\u5907 jar \u5305\u6587\u4ef6\uff0c\u53ef\u4ee5\u5728 jmx_exporter \u7684 Github \u9875\u9762\u627e\u5230\u6700\u65b0\u7684 jar \u5305\u4e0b\u8f7d\u5730\u5740\u5e76\u53c2\u8003\u5982\u4e0b Dockerfile:

                                                              FROM openjdk:11.0.15-jre\nWORKDIR /app/\nCOPY target/my-app.jar ./\nCOPY prometheus-jmx-config.yaml ./\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\nENV JAVA_TOOL_OPTIONS=-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\nEXPOSE 8081 8999 8080 8888\nENTRYPOINT java $JAVA_OPTS -jar my-app.jar\n

                                                              \u6ce8\u610f\uff1a

                                                              • \u542f\u52a8\u53c2\u6570\u683c\u5f0f\uff1a-javaagent:=:
                                                              • \u8fd9\u91cc\u4f7f\u7528\u4e86 8088 \u7aef\u53e3\u66b4\u9732 JVM \u7684\u76d1\u63a7\u6307\u6807\uff0c\u5982\u679c\u548c Java \u5e94\u7528\u51b2\u7a81\uff0c\u53ef\u81ea\u884c\u66f4\u6539
                                                              "},{"location":"end-user/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html#init-container","title":"\u65b9\u5f0f\u4e8c\uff1a\u901a\u8fc7 init container \u5bb9\u5668\u6302\u8f7d","text":"

                                                              \u6211\u4eec\u9700\u8981\u5148\u5c06 JMX exporter \u505a\u6210 Docker \u955c\u50cf, \u4ee5\u4e0b Dockerfile \u4ec5\u4f9b\u53c2\u8003\uff1a

                                                              FROM alpine/curl:3.14\nWORKDIR /app/\n# \u5c06\u524d\u9762\u521b\u5efa\u7684 config \u6587\u4ef6\u62f7\u8d1d\u81f3\u955c\u50cf\nCOPY prometheus-jmx-config.yaml ./\n# \u5728\u7ebf\u4e0b\u8f7d jmx prometheus javaagent jar\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\n

                                                              \u6839\u636e\u4e0a\u9762 Dockerfile \u6784\u5efa\u955c\u50cf\uff1a docker build -t my-jmx-exporter .

                                                              \u5728 Java \u5e94\u7528\u90e8\u7f72 Yaml \u4e2d\u52a0\u5165\u5982\u4e0b init container\uff1a

                                                              \u70b9\u51fb\u5c55\u5f00 YAML \u6587\u4ef6
                                                              apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-demo-app\n  labels:\n    app: my-demo-app\nspec:\n  selector:\n    matchLabels:\n      app: my-demo-app\n  template:\n    metadata:\n      labels:\n        app: my-demo-app\n    spec:\n      imagePullSecrets:\n      - name: registry-pull\n      initContainers:\n      - name: jmx-sidecar\n        image: my-jmx-exporter\n        command: [\"cp\", \"-r\", \"/app/jmx_prometheus_javaagent-0.17.2.jar\", \"/target/jmx_prometheus_javaagent-0.17.2.jar\"]  \u278a\n        volumeMounts:\n        - name: sidecar\n          mountPath: /target\n      containers:\n      - image: my-demo-app-image\n        name: my-demo-app\n        resources:\n          requests:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n          limits:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n        ports:\n        - containerPort: 18083\n        env:\n        - name: JAVA_TOOL_OPTIONS\n          value: \"-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\" \u278b\n        volumeMounts:\n        - name: host-time\n          mountPath: /etc/localtime\n          readOnly: true\n        - name: sidecar\n          mountPath: /sidecar\n      volumes:\n      - name: host-time\n        hostPath:\n          path: /etc/localtime\n      - name: sidecar  #\u5171\u4eab agent \u6587\u4ef6\u5939\n        emptyDir: {}\n      restartPolicy: Always\n

                                                              \u7ecf\u8fc7\u5982\u4e0a\u7684\u6539\u9020\u4e4b\u540e\uff0c\u793a\u4f8b\u5e94\u7528 my-demo-app \u5177\u5907\u4e86\u66b4\u9732 JVM \u6307\u6807\u7684\u80fd\u529b\u3002 \u8fd0\u884c\u670d\u52a1\u4e4b\u540e\uff0c\u6211\u4eec\u53ef\u4ee5\u901a\u8fc7 http://lcoalhost:8088 \u8bbf\u95ee\u670d\u52a1\u66b4\u9732\u51fa\u6765\u7684 prometheus \u683c\u5f0f\u7684\u6307\u6807\u3002

                                                              \u63a5\u7740\uff0c\u60a8\u53ef\u4ee5\u53c2\u8003 \u5df2\u6709 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5bf9\u63a5\u53ef\u89c2\u6d4b\u6027\u3002

                                                              "},{"location":"end-user/insight/quickstart/otel/java/jvm-monitor/legacy-jvm.html","title":"\u5df2\u6709 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5bf9\u63a5\u53ef\u89c2\u6d4b\u6027","text":"

                                                              \u5982\u679c\u60a8\u7684 Java \u5e94\u7528\u901a\u8fc7\u5176\u4ed6\u65b9\u5f0f\uff08\u6bd4\u5982 Spring Boot Actuator\uff09\u66b4\u9732\u4e86 JVM \u7684\u76d1\u63a7\u6307\u6807\uff0c \u6211\u4eec\u9700\u8981\u8ba9\u76d1\u63a7\u6570\u636e\u88ab\u91c7\u96c6\u5230\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u5728\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u6dfb\u52a0\u6ce8\u89e3\uff08Kubernetes Annotations\uff09\u7684\u65b9\u5f0f\u8ba9 Insight \u6765\u91c7\u96c6\u5df2\u6709\u7684 JVM \u6307\u6807\uff1a

                                                              annatation: \n  insight.opentelemetry.io/metric-scrape: \"true\" # \u662f\u5426\u91c7\u96c6\n  insight.opentelemetry.io/metric-path: \"/\"      # \u91c7\u96c6\u6307\u6807\u7684\u8def\u5f84\n  insight.opentelemetry.io/metric-port: \"9464\"   # \u91c7\u96c6\u6307\u6807\u7684\u7aef\u53e3\n

                                                              \u4f8b\u5982\u4e3a my-deployment-app \u6dfb\u52a0\u6ce8\u89e3\uff1a

                                                              apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-app\nspec:\n  selector:\n    matchLabels:\n      app: my-deployment-app\n      app.kubernetes.io/name: my-deployment-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-deployment-app\n        app.kubernetes.io/name: my-deployment-app\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\" # \u662f\u5426\u91c7\u96c6\n        insight.opentelemetry.io/metric-path: \"/\"      # \u91c7\u96c6\u6307\u6807\u7684\u8def\u5f84\n        insight.opentelemetry.io/metric-port: \"9464\"   # \u91c7\u96c6\u6307\u6807\u7684\u7aef\u53e3\n

                                                              \u4ee5\u4e0b\u662f\u5b8c\u6574\u793a\u4f8b\uff1a

                                                              ---\napiVersion: v1\nkind: Service\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  type: NodePort\n  selector:\n    #app: my-deployment-with-aotu-instrumentation-app\n    app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  ports:\n    - name: http\n      port: 8080\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  selector:\n    matchLabels:\n      #app: my-deployment-with-aotu-instrumentation-app\n      app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\" # \u662f\u5426\u91c7\u96c6\n        insight.opentelemetry.io/metric-path: \"/actuator/prometheus\"      # \u91c7\u96c6\u6307\u6807\u7684\u8def\u5f84\n        insight.opentelemetry.io/metric-port: \"8080\"   # \u91c7\u96c6\u6307\u6807\u7684\u7aef\u53e3\n    spec:\n      containers:\n        - name: myapp\n          image: docker.m.daocloud.io/wutang/spring-boot-actuator-prometheus-metrics-demo\n          ports:\n            - name: http\n              containerPort: 8080\n          resources:\n            limits:\n              cpu: 500m\n              memory: 800Mi\n            requests:\n              cpu: 200m\n              memory: 400Mi\n

                                                              \u4ee5\u4e0a\u793a\u4f8b\u4e2d\uff0cInsight \u4f1a\u901a\u8fc7 :8080//actuator/prometheus \u6293\u53d6\u901a\u8fc7 Spring Boot Actuator \u66b4\u9732\u51fa\u6765\u7684 Prometheus \u6307\u6807\u3002

                                                              "},{"location":"end-user/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html","title":"\u4f7f\u7528 OpenTelemetry Java Agent \u66b4\u9732 JVM \u76d1\u63a7\u6307\u6807","text":"

                                                              \u5728 Opentelemetry Agent v1.20.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u4e2d\uff0cOpentelemetry Agent \u65b0\u589e\u4e86 JMX Metric Insight \u6a21\u5757\uff0c\u5982\u679c\u4f60\u7684\u5e94\u7528\u5df2\u7ecf\u96c6\u6210\u4e86 Opentelemetry Agent \u53bb\u91c7\u96c6\u5e94\u7528\u94fe\u8def\uff0c\u90a3\u4e48\u4f60\u4e0d\u518d\u9700\u8981\u53e6\u5916\u5f15\u5165\u5176\u4ed6 Agent \u53bb\u4e3a\u6211\u4eec\u7684\u5e94\u7528\u66b4\u9732 JMX \u6307\u6807\u3002Opentelemetry Agent \u4e5f\u662f\u901a\u8fc7\u68c0\u6d4b\u5e94\u7528\u7a0b\u5e8f\u4e2d\u672c\u5730\u53ef\u7528\u7684 MBean \u516c\u5f00\u7684\u6307\u6807\uff0c\u5bf9\u5176\u8fdb\u884c\u6536\u96c6\u5e76\u66b4\u9732\u6307\u6807\u3002

                                                              Opentelemetry Agent \u4e5f\u9488\u5bf9\u5e38\u89c1\u7684 Java Server \u6216\u6846\u67b6\u5185\u7f6e\u4e86\u4e00\u4e9b\u76d1\u63a7\u7684\u6837\u4f8b\uff0c\u8bf7\u53c2\u8003\u9884\u5b9a\u4e49\u7684\u6307\u6807\u3002

                                                              \u4f7f\u7528 OpenTelemetry Java Agent \u540c\u6837\u9700\u8981\u8003\u8651\u5982\u4f55\u5c06 JAR \u6302\u8f7d\u8fdb\u5bb9\u5668\uff0c\u9664\u4e86\u53ef\u4ee5\u53c2\u8003\u4e0a\u9762 JMX Exporter \u6302\u8f7d JAR \u6587\u4ef6\u7684\u65b9\u5f0f\u5916\uff0c\u6211\u4eec\u8fd8\u53ef\u4ee5\u501f\u52a9 Opentelemetry \u63d0\u4f9b\u7684 Operator \u7684\u80fd\u529b\u6765\u5b9e\u73b0\u81ea\u52a8\u4e3a\u6211\u4eec\u7684\u5e94\u7528\u5f00\u542f JVM \u6307\u6807\u66b4\u9732\uff1a

                                                              \u5982\u679c\u4f60\u7684\u5e94\u7528\u5df2\u7ecf\u96c6\u6210\u4e86 Opentelemetry Agent \u53bb\u91c7\u96c6\u5e94\u7528\u94fe\u8def\uff0c\u90a3\u4e48\u4f60\u4e0d\u518d\u9700\u8981\u53e6\u5916\u5f15\u5165\u5176\u4ed6 Agent \u53bb\u4e3a\u6211\u4eec\u7684\u5e94\u7528\u66b4\u9732 JMX \u6307\u6807\u3002Opentelemetry Agent \u901a\u8fc7\u68c0\u6d4b\u5e94\u7528\u7a0b\u5e8f\u4e2d\u672c\u5730\u53ef\u7528\u7684 MBean \u516c\u5f00\u7684\u6307\u6807\uff0c\u73b0\u5728\u53ef\u4ee5\u672c\u5730\u6536\u96c6\u5e76\u66b4\u9732\u6307\u6807\u63a5\u53e3\u3002

                                                              \u4f46\u662f\uff0c\u622a\u81f3\u76ee\u524d\u7248\u672c\uff0c\u4f60\u4ecd\u7136\u9700\u8981\u624b\u52a8\u4e3a\u5e94\u7528\u52a0\u4e0a\u76f8\u5e94\u6ce8\u89e3\u4e4b\u540e\uff0cJVM \u6570\u636e\u624d\u4f1a\u88ab Insight \u91c7\u96c6\u5230\uff0c\u5177\u4f53\u6ce8\u89e3\u5185\u5bb9\u8bf7\u53c2\u8003 \u5df2\u6709 JVM \u6307\u6807\u7684 Java \u5e94\u7528\u5bf9\u63a5\u53ef\u89c2\u6d4b\u6027\u3002

                                                              "},{"location":"end-user/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html#java","title":"\u4e3a Java \u4e2d\u95f4\u4ef6\u66b4\u9732\u6307\u6807","text":"

                                                              Opentelemetry Agent \u4e5f\u5185\u7f6e\u4e86\u4e00\u4e9b\u4e2d\u95f4\u4ef6\u76d1\u63a7\u7684\u6837\u4f8b\uff0c\u8bf7\u53c2\u8003 \u9884\u5b9a\u4e49\u6307\u6807\u3002

                                                              \u9ed8\u8ba4\u6ca1\u6709\u6307\u5b9a\u4efb\u4f55\u7c7b\u578b\uff0c\u9700\u8981\u901a\u8fc7 -Dotel.jmx.target.system JVM Options \u6307\u5b9a,\u6bd4\u5982 -Dotel.jmx.target.system=jetty,kafka-broker \u3002

                                                              "},{"location":"end-user/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html#_1","title":"\u53c2\u8003","text":"
                                                              • Gaining JMX Metric Insights with the OpenTelemetry Java Agent

                                                              • Otel jmx metrics

                                                              "},{"location":"end-user/insight/quickstart/other/install-agent-on-ocp.html","title":"OpenShift \u5b89\u88c5 Insight Agent","text":"

                                                              \u867d\u7136 OpenShift \u7cfb\u7edf\u81ea\u5e26\u4e86\u4e00\u5957\u76d1\u63a7\u7cfb\u7edf\uff0c\u56e0\u4e3a\u6570\u636e\u91c7\u96c6\u7ea6\u5b9a\u7684\u4e00\u4e9b\u89c4\u5219\uff0c\u6211\u4eec\u8fd8\u662f\u4f1a\u5b89\u88c5 Insight Agent\u3002

                                                              \u5176\u4e2d\uff0c\u5b89\u9664\u4e86\u57fa\u7840\u7684\u5b89\u88c5\u914d\u7f6e\u4e4b\u5916\uff0chelm install \u7684\u65f6\u5019\u8fd8\u9700\u8981\u589e\u52a0\u5982\u4e0b\u7684\u53c2\u6570\uff1a

                                                              ## \u9488\u5bf9 fluentbit \u76f8\u5173\u7684\u53c2\u6570\uff1b\n--set fluent-bit.ocp.enabled=true \\\n--set fluent-bit.serviceAccount.create=false \\\n--set fluent-bit.securityContext.runAsUser=0 \\\n--set fluent-bit.securityContext.seLinuxOptions.type=spc_t \\\n--set fluent-bit.securityContext.readOnlyRootFilesystem=false \\\n--set fluent-bit.securityContext.allowPrivilegeEscalation=false \\\n\n## \u542f\u7528\u9002\u914d OpenShift4.x \u7684 Prometheus(CR)\n--set compatibility.openshift.prometheus.enabled=true \\\n\n## \u5173\u95ed\u9ad8\u7248\u672c\u7684 Prometheus \u5b9e\u4f8b\n--set kube-prometheus-stack.prometheus.enabled=false \\\n--set kube-prometheus-stack.kubeApiServer.enabled=false \\\n--set kube-prometheus-stack.kubelet.enabled=false \\\n--set kube-prometheus-stack.kubeControllerManager.enabled=false \\\n--set kube-prometheus-stack.coreDns.enabled=false \\\n--set kube-prometheus-stack.kubeDns.enabled=false \\\n--set kube-prometheus-stack.kubeEtcd.enabled=false \\\n--set kube-prometheus-stack.kubeEtcd.enabled=false \\\n--set kube-prometheus-stack.kubeScheduler.enabled=false \\\n--set kube-prometheus-stack.kubeStateMetrics.enabled=false \\\n--set kube-prometheus-stack.nodeExporter.enabled=false \\\n\n## \u9650\u5236 PrometheusOperator \u5904\u7406\u7684 namespace\uff0c\u907f\u514d\u4e0e OpenShift \u81ea\u5e26\u7684 PrometheusOperator \u76f8\u4e92\u7ade\u4e89\n--set kube-prometheus-stack.prometheusOperator.kubeletService.namespace=\"insight-system\" \\\n--set kube-prometheus-stack.prometheusOperator.prometheusInstanceNamespaces=\"insight-system\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[0]=\"openshift-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[1]=\"openshift-user-workload-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[2]=\"openshift-customer-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[3]=\"openshift-route-monitor-operator\" \\\n
                                                              "},{"location":"end-user/insight/quickstart/other/install-agent-on-ocp.html#openshift-prometheus","title":"\u901a\u8fc7 OpenShift \u81ea\u8eab\u673a\u5236\uff0c\u5c06\u7cfb\u7edf\u76d1\u63a7\u6570\u636e\u5199\u5165 Prometheus \u4e2d","text":"
                                                              apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: cluster-monitoring-config\n  namespace: openshift-monitoring\ndata:\n  config.yaml: |\n    prometheusK8s:\n      remoteWrite:\n        - queueConfig:\n            batchSendDeadline: 60s\n            maxBackoff: 5s\n            minBackoff: 30ms\n            minShards: 1\n            capacity: 5000\n            maxSamplesPerSend: 1000\n            maxShards: 100\n          remoteTimeout: 30s\n          url: http://insight-agent-prometheus.insight-system.svc.cluster.local:9090/api/v1/write\n          writeRelabelConfigs:\n            - action: keep\n              regex: etcd|kubelet|node-exporter|apiserver|kube-state-metrics\n              sourceLabels:\n                - job\n
                                                              "},{"location":"end-user/insight/quickstart/res-plan/index.html","title":"\u90e8\u7f72\u5bb9\u91cf\u89c4\u5212","text":"

                                                              \u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u4e3a\u4e86\u907f\u514d\u6d88\u8017\u8fc7\u591a\u8d44\u6e90\uff0c\u5df2\u7ecf\u8bbe\u7f6e\u4e86\u8d44\u6e90\u4e0a\u7ebf\uff08resource limit\uff09\uff0c\u53ef\u89c2\u6d4b\u7cfb\u7edf\u9700\u8981\u5904\u7406\u5927\u91cf\u7684\u6570\u636e\uff0c\u5982\u679c\u5bb9\u91cf\u89c4\u5212\u4e0d\u5408\u7406\uff0c\u53ef\u80fd\u4f1a\u5bfc\u81f4\u7cfb\u7edf\u8d1f\u8f7d\u8fc7\u9ad8\uff0c\u5f71\u54cd\u7a33\u5b9a\u6027\u548c\u53ef\u9760\u6027\u3002

                                                              "},{"location":"end-user/insight/quickstart/res-plan/index.html#_2","title":"\u89c2\u6d4b\u7ec4\u4ef6\u7684\u8d44\u6e90\u89c4\u5212","text":"

                                                              \u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u5305\u542b Insight \u548c Insight Agent\u3002\u5176\u4e2d\uff0cInsight \u4e3b\u8981\u8d1f\u8d23\u89c2\u6d4b\u6570\u636e\u7684\u5b58\u50a8\uff0c\u5206\u6790\u4e0e\u5c55\u793a\u3002\u800c Insight Agent \u5305\u542b\u4e86\u6570\u636e\u91c7\u96c6\u3001\u6570\u636e\u5904\u7406\u3001\u6570\u636e\u4e0a\u4f20\u7b49\u529f\u80fd\u3002

                                                              "},{"location":"end-user/insight/quickstart/res-plan/index.html#_3","title":"\u5b58\u50a8\u7ec4\u4ef6\u7684\u5bb9\u91cf\u89c4\u5212","text":"

                                                              Insight \u7684\u5b58\u50a8\u7ec4\u4ef6\u4e3b\u8981\u5305\u62ec ElasticSearch \u548c VictoriaMetrics. \u5176\u4e2d\uff0cElasticSearch \u4e3b\u8981\u8d1f\u8d23\u5b58\u50a8\u548c\u67e5\u8be2\u65e5\u5fd7\u4e0e\u94fe\u8def\u6570\u636e\uff0cVictoriaMetrics \u4e3b\u8981\u8d1f\u8d23\u5b58\u50a8\u548c\u67e5\u8be2\u6307\u6807\u6570\u636e\u3002

                                                              • VictoriaMetircs: \u5176\u78c1\u76d8\u7528\u91cf\u4e0e\u5b58\u50a8\u7684\u6307\u6807\u6709\u5173\uff0c\u6839\u636e vmstorage \u7684\u78c1\u76d8\u89c4\u5212 \u9884\u4f30\u5bb9\u91cf\u540e \u8c03\u6574 vmstorage \u78c1\u76d8\u3002
                                                              "},{"location":"end-user/insight/quickstart/res-plan/index.html#_4","title":"\u91c7\u96c6\u5668\u7684\u8d44\u6e90\u89c4\u5212","text":"

                                                              Insight Agent \u7684\u91c7\u96c6\u5668\u4e2d\u5305\u542b Proemtheus\uff0c\u867d\u7136 Prometheus \u672c\u8eab\u662f\u4e00\u4e2a\u72ec\u7acb\u7684\u7ec4\u4ef6\uff0c\u4f46\u662f\u5728 Insight Agent \u4e2d\uff0cPrometheus \u4f1a\u88ab\u7528\u4e8e\u91c7\u96c6\u6570\u636e\uff0c\u56e0\u6b64\u9700\u8981\u5bf9 Prometheus \u7684\u8d44\u6e90\u8fdb\u884c\u89c4\u5212\u3002

                                                              • Prometheus\uff1a\u5176\u8d44\u6e90\u7528\u91cf\u4e0e\u91c7\u96c6\u7684\u6307\u6807\u91cf\u6709\u5173\uff0c\u53ef\u4ee5\u53c2\u8003 Prometheus \u8d44\u6e90\u89c4\u5212 \u8fdb\u884c\u8c03\u6574\u3002
                                                              "},{"location":"end-user/insight/quickstart/res-plan/modify-vms-disk.html","title":"vmstorge \u78c1\u76d8\u6269\u5bb9","text":"

                                                              \u672c\u6587\u63cf\u8ff0\u4e86 vmstorge \u78c1\u76d8\u6269\u5bb9\u7684\u65b9\u6cd5\uff0c vmstorge \u78c1\u76d8\u89c4\u8303\u8bf7\u53c2\u8003 vmstorage \u78c1\u76d8\u5bb9\u91cf\u89c4\u5212\u3002

                                                              "},{"location":"end-user/insight/quickstart/res-plan/modify-vms-disk.html#_1","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"end-user/insight/quickstart/res-plan/modify-vms-disk.html#_2","title":"\u5f00\u542f\u5b58\u50a8\u6c60\u6269\u5bb9","text":"
                                                              1. \u4ee5\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7ba1\u7406\u5458\u6743\u9650\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\uff0c\u70b9\u51fb \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb kpanda-global-cluster \u96c6\u7fa4\u3002

                                                              2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e(PVC) \uff0c\u627e\u5230 vmstorage \u7ed1\u5b9a\u7684\u6570\u636e\u5377\u58f0\u660e\u3002

                                                              3. \u70b9\u51fb\u67d0\u4e2a vmstorage PVC\uff0c\u8fdb\u5165 vmstorage \u7684\u6570\u636e\u5377\u58f0\u660e\u8be6\u60c5\uff0c\u786e\u8ba4\u8be5 PVC \u7ed1\u5b9a\u7684\u5b58\u50a8\u6c60\u3002

                                                              4. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5bb9\u5668\u5b58\u50a8 -> \u5b58\u50a8\u6c60(SC) \uff0c\u627e\u5230 local-path \uff0c\u70b9\u51fb\u76ee\u6807\u53f3\u4fa7\u7684 \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u7f16\u8f91 \u3002

                                                              5. \u5f00\u542f \u6269\u5bb9 \u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                              "},{"location":"end-user/insight/quickstart/res-plan/modify-vms-disk.html#vmstorage","title":"\u66f4\u6539 vmstorage \u7684\u78c1\u76d8\u5bb9\u91cf","text":"
                                                              1. \u4ee5\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7ba1\u7406\u5458\u6743\u9650\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\uff0c\u8fdb\u5165 kpanda-global-cluster \u96c6\u7fa4\u8be6\u60c5\u3002

                                                              2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u627e\u5230 vmcluster \u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\u3002

                                                              3. \u70b9\u51fb\u8be5 vmcluster \u81ea\u5b9a\u4e49\u8d44\u6e90\u8fdb\u5165\u8be6\u60c5\u9875\uff0c\u5207\u6362\u5230 insight-system \u547d\u540d\u7a7a\u95f4\u4e0b\uff0c\u4ece insight-victoria-metrics-k8s-stack \u53f3\u4fa7\u83dc\u5355\u9009\u62e9 \u7f16\u8f91 YAML \u3002

                                                              4. \u6839\u636e\u56fe\u4f8b\u4fee\u6539\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                              5. \u518d\u6b21\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e(PVC) \uff0c\u627e\u5230 vmstorage \u7ed1\u5b9a\u7684\u6570\u636e\u5377\u58f0\u660e\u786e\u8ba4\u4fee\u6539\u5df2\u751f\u6548\u3002\u5728\u67d0\u4e2a PVC \u8be6\u60c5\u9875\uff0c\u70b9\u51fb\u5173\u8054\u5b58\u50a8\u6e90 (PV)\u3002

                                                              6. \u6253\u5f00\u6570\u636e\u5377\u8be6\u60c5\u9875\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u66f4\u65b0 \u6309\u94ae\u3002

                                                              7. \u4fee\u6539 \u5bb9\u91cf \u540e\u70b9\u51fb \u786e\u5b9a \uff0c\u7a0d\u7b49\u7247\u523b\u7b49\u5230\u6269\u5bb9\u6210\u529f\u3002

                                                              "},{"location":"end-user/insight/quickstart/res-plan/modify-vms-disk.html#_3","title":"\u514b\u9686\u5b58\u50a8\u5377","text":"

                                                              \u82e5\u5b58\u50a8\u5377\u6269\u5bb9\u5931\u8d25\uff0c\u53ef\u53c2\u8003\u4ee5\u4e0b\u65b9\u6cd5\u514b\u9686\u5b58\u50a8\u5377\u3002

                                                              1. \u4ee5\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7ba1\u7406\u5458\u6743\u9650\u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\uff0c\u8fdb\u5165 kpanda-global-cluster \u96c6\u7fa4\u8be6\u60c5\u3002

                                                              2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5de5\u4f5c\u8d1f\u8f7d -> \u6709\u72b6\u6001\u8d1f\u8f7d \uff0c\u627e\u5230 vmstorage \u7684\u6709\u72b6\u6001\u8d1f\u8f7d\uff0c\u70b9\u51fb\u76ee\u6807\u53f3\u4fa7\u7684 \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u72b6\u6001 -> \u505c\u6b62 -> \u786e\u5b9a \u3002

                                                              3. \u5728\u547d\u4ee4\u884c\u4e2d\u767b\u5f55 kpanda-global-cluster \u96c6\u7fa4\u7684 master \u8282\u70b9\u540e\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u590d\u5236 vmstorage \u5bb9\u5668\u4e2d\u7684 vm-data \u76ee\u5f55\u5c06\u6307\u6807\u4fe1\u606f\u5b58\u50a8\u5728\u672c\u5730\uff1a

                                                                kubectl cp -n insight-system vmstorage-insight-victoria-metrics-k8s-stack-1:vm-data ./vm-data\n
                                                              4. \u767b\u5f55 AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\u8fdb\u5165 kpanda-global-cluster \u96c6\u7fa4\u8be6\u60c5\uff0c\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377(PV) \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u514b\u9686 \uff0c\u5e76\u4fee\u6539\u6570\u636e\u5377\u7684\u5bb9\u91cf\u3002

                                                              5. \u5220\u9664\u4e4b\u524d vmstorage \u7684\u6570\u636e\u5377\u3002

                                                              6. \u7a0d\u7b49\u7247\u523b\uff0c\u5f85\u5b58\u50a8\u5377\u58f0\u660e\u8ddf\u514b\u9686\u7684\u6570\u636e\u5377\u7ed1\u5b9a\u540e\uff0c\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u5c06\u7b2c 3 \u6b65\u4e2d\u5bfc\u51fa\u7684\u6570\u636e\u5bfc\u5165\u5230\u5bf9\u5e94\u7684\u5bb9\u5668\u4e2d\uff0c\u7136\u540e\u5f00\u542f\u4e4b\u524d\u6682\u505c\u7684 vmstorage \u3002

                                                                kubectl cp -n insight-system ./vm-data vmstorage-insight-victoria-metrics-k8s-stack-1:vm-data\n
                                                              "},{"location":"end-user/insight/quickstart/res-plan/prometheus-res.html","title":"Prometheus \u8d44\u6e90\u89c4\u5212","text":"

                                                              Prometheus \u5728\u5b9e\u9645\u4f7f\u7528\u8fc7\u7a0b\u4e2d\uff0c\u53d7\u5230\u96c6\u7fa4\u5bb9\u5668\u6570\u91cf\u4ee5\u53ca\u5f00\u542f Istio \u7684\u5f71\u54cd\uff0c\u4f1a\u5bfc\u81f4 Prometheus \u7684 CPU\u3001\u5185\u5b58\u7b49\u8d44\u6e90\u4f7f\u7528\u91cf\u8d85\u51fa\u8bbe\u5b9a\u7684\u8d44\u6e90\u3002

                                                              \u4e3a\u4e86\u4fdd\u8bc1\u4e0d\u540c\u89c4\u6a21\u96c6\u7fa4\u4e0b Prometheus \u7684\u6b63\u5e38\u8fd0\u884c\uff0c\u9700\u8981\u6839\u636e\u96c6\u7fa4\u7684\u5b9e\u9645\u89c4\u6a21\u5bf9 Prometheus \u8fdb\u884c\u8d44\u6e90\u8c03\u6574\u3002

                                                              "},{"location":"end-user/insight/quickstart/res-plan/prometheus-res.html#_1","title":"\u53c2\u8003\u8d44\u6e90\u89c4\u5212","text":"

                                                              \u5728\u672a\u5f00\u542f\u7f51\u683c\u60c5\u51b5\u4e0b\uff0c\u6d4b\u8bd5\u60c5\u51b5\u7edf\u8ba1\u51fa\u7cfb\u7edf Job \u6307\u6807\u91cf\u4e0e Pod \u7684\u5173\u7cfb\u4e3a Series \u6570\u91cf = 800 * Pod \u6570\u91cf

                                                              \u5728\u5f00\u542f\u670d\u52a1\u7f51\u683c\u65f6\uff0c\u5f00\u542f\u529f\u80fd\u540e Pod \u4ea7\u751f\u7684 Istio \u76f8\u5173\u6307\u6807\u6570\u91cf\u7ea7\u4e3a Series \u6570\u91cf = 768 * Pod \u6570\u91cf

                                                              "},{"location":"end-user/insight/quickstart/res-plan/prometheus-res.html#_2","title":"\u5f53\u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c\u65f6","text":"

                                                              \u4ee5\u4e0b\u8d44\u6e90\u89c4\u5212\u4e3a \u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c \u573a\u666f\u4e0b\uff0cPrometheus \u7684\u8d44\u6e90\u89c4\u5212\u63a8\u8350\uff1a

                                                              \u96c6\u7fa4\u89c4\u6a21(Pod \u6570) \u6307\u6807\u91cf(\u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c) CPU(core) \u5185\u5b58(GB) 100 8w Request: 0.5Limit\uff1a1 Request\uff1a2GBLimit\uff1a4GB 200 16w Request\uff1a1Limit\uff1a1.5 Request\uff1a3GBLimit\uff1a6GB 300 24w Request\uff1a1Limit\uff1a2 Request\uff1a3GBLimit\uff1a6GB 400 32w Request\uff1a1Limit\uff1a2 Request\uff1a4GBLimit\uff1a8GB 500 40w Request\uff1a1.5Limit\uff1a3 Request\uff1a5GBLimit\uff1a10GB 800 64w Request\uff1a2Limit\uff1a4 Request\uff1a8GBLimit\uff1a16GB 1000 80w Request\uff1a2.5Limit\uff1a5 Request\uff1a9GBLimit\uff1a18GB 2000 160w Request\uff1a3.5Limit\uff1a7 Request\uff1a20GBLimit\uff1a40GB 3000 240w Request\uff1a4Limit\uff1a8 Request\uff1a33GBLimit\uff1a66GB"},{"location":"end-user/insight/quickstart/res-plan/prometheus-res.html#_3","title":"\u5f53\u5f00\u542f\u670d\u52a1\u7f51\u683c\u529f\u80fd\u65f6","text":"

                                                              \u4ee5\u4e0b\u8d44\u6e90\u89c4\u5212\u4e3a \u5f00\u542f\u670d\u52a1\u7f51\u683c \u573a\u666f\u4e0b\uff0cPrometheus \u7684\u8d44\u6e90\u89c4\u5212\u63a8\u8350\uff1a

                                                              \u96c6\u7fa4\u89c4\u6a21(Pod \u6570) \u6307\u6807\u91cf(\u5df2\u5f00\u542f\u670d\u52a1\u7f51\u683c) CPU(core) \u5185\u5b58(GB) 100 15w Request: 1Limit\uff1a2 Request\uff1a3GBLimit\uff1a6GB 200 31w Request\uff1a2Limit\uff1a3 Request\uff1a5GBLimit\uff1a10GB 300 46w Request\uff1a2Limit\uff1a4 Request\uff1a6GBLimit\uff1a12GB 400 62w Request\uff1a2Limit\uff1a4 Request\uff1a8GBLimit\uff1a16GB 500 78w Request\uff1a3Limit\uff1a6 Request\uff1a10GBLimit\uff1a20GB 800 125w Request\uff1a4Limit\uff1a8 Request\uff1a15GBLimit\uff1a30GB 1000 156w Request\uff1a5Limit\uff1a10 Request\uff1a18GBLimit\uff1a36GB 2000 312w Request\uff1a7Limit\uff1a14 Request\uff1a40GBLimit\uff1a80GB 3000 468w Request\uff1a8Limit\uff1a16 Request\uff1a65GBLimit\uff1a130GB

                                                              Note

                                                              1. \u8868\u683c\u4e2d\u7684 Pod \u6570\u91cf \u6307\u96c6\u7fa4\u4e2d\u57fa\u672c\u7a33\u5b9a\u8fd0\u884c\u7684 Pod \u6570\u91cf\uff0c\u5982\u51fa\u73b0\u5927\u91cf\u7684 Pod \u91cd\u542f\uff0c\u5219\u4f1a\u9020\u6210\u77ed\u65f6\u95f4\u5185\u6307\u6807\u91cf\u7684\u9661\u589e\uff0c\u6b64\u65f6\u8d44\u6e90\u9700\u8981\u8fdb\u884c\u76f8\u5e94\u4e0a\u8c03\u3002
                                                              2. Prometheus \u5185\u5b58\u4e2d\u9ed8\u8ba4\u4fdd\u5b58\u4e24\u5c0f\u65f6\u6570\u636e\uff0c\u4e14\u96c6\u7fa4\u4e2d\u5f00\u542f\u4e86 Remote Write \u529f\u80fd\u65f6\uff0c\u4f1a\u5360\u7528\u4e00\u5b9a\u5185\u5b58\uff0c\u8d44\u6e90\u8d85\u914d\u6bd4\u5efa\u8bae\u914d\u7f6e\u4e3a 2\u3002
                                                              3. \u8868\u683c\u4e2d\u6570\u636e\u4e3a\u63a8\u8350\u503c\uff0c\u9002\u7528\u4e8e\u901a\u7528\u60c5\u51b5\u3002\u5982\u73af\u5883\u6709\u7cbe\u786e\u7684\u8d44\u6e90\u8981\u6c42\uff0c\u5efa\u8bae\u5728\u96c6\u7fa4\u8fd0\u884c\u4e00\u6bb5\u65f6\u95f4\u540e\uff0c\u67e5\u770b\u5bf9\u5e94 Prometheus \u7684\u8d44\u6e90\u5360\u7528\u91cf\u8fdb\u884c\u7cbe\u786e\u914d\u7f6e\u3002
                                                              "},{"location":"end-user/insight/quickstart/res-plan/vms-res-plan.html","title":"vmstorage \u78c1\u76d8\u5bb9\u91cf\u89c4\u5212","text":"

                                                              vmstorage \u662f\u8d1f\u8d23\u5b58\u50a8\u53ef\u89c2\u6d4b\u6027\u591a\u96c6\u7fa4\u6307\u6807\u3002 \u4e3a\u4fdd\u8bc1 vmstorage \u7684\u7a33\u5b9a\u6027\uff0c\u9700\u8981\u6839\u636e\u96c6\u7fa4\u6570\u91cf\u53ca\u96c6\u7fa4\u89c4\u6a21\u8c03\u6574 vmstorage \u7684\u78c1\u76d8\u5bb9\u91cf\u3002 \u66f4\u591a\u8d44\u6599\u8bf7\u53c2\u8003 vmstorage \u4fdd\u7559\u671f\u4e0e\u78c1\u76d8\u7a7a\u95f4\u3002

                                                              "},{"location":"end-user/insight/quickstart/res-plan/vms-res-plan.html#_1","title":"\u6d4b\u8bd5\u7ed3\u679c","text":"

                                                              \u7ecf\u8fc7 14 \u5929\u5bf9\u4e0d\u540c\u89c4\u6a21\u7684\u96c6\u7fa4\u7684 vmstorage \u7684\u78c1\u76d8\u89c2\u6d4b\uff0c \u6211\u4eec\u53d1\u73b0 vmstorage \u7684\u78c1\u76d8\u7528\u91cf\u4e0e\u5176\u5b58\u50a8\u7684\u6307\u6807\u91cf\u548c\u5355\u4e2a\u6570\u636e\u70b9\u5360\u7528\u78c1\u76d8\u6b63\u76f8\u5173\u3002

                                                              1. \u77ac\u65f6\u5b58\u50a8\u7684\u6307\u6807\u91cf increase(vm_rows{ type != \"indexdb\"}[30s]) \u4ee5\u83b7\u53d6 30s \u5185\u589e\u52a0\u7684\u6307\u6807\u91cf
                                                              2. \u5355\u4e2a\u6570\u636e\u70b9 (datapoint) \u7684\u5360\u7528\u78c1\u76d8\uff1a sum(vm_data_size_bytes{type!=\"indexdb\"}) /\u00a0sum(vm_rows{type\u00a0!=\u00a0\"indexdb\"})
                                                              "},{"location":"end-user/insight/quickstart/res-plan/vms-res-plan.html#_2","title":"\u8ba1\u7b97\u65b9\u6cd5","text":"

                                                              \u78c1\u76d8\u7528\u91cf = \u77ac\u65f6\u6307\u6807\u91cf x 2 x \u5355\u4e2a\u6570\u636e\u70b9\u7684\u5360\u7528\u78c1\u76d8 x 60 x 24 x \u5b58\u50a8\u65f6\u95f4 (\u5929)

                                                              \u53c2\u6570\u8bf4\u660e\uff1a

                                                              1. \u78c1\u76d8\u7528\u91cf\u5355\u4f4d\u4e3a Byte \u3002
                                                              2. \u5b58\u50a8\u65f6\u957f(\u5929) x 60 x 24 \u5c06\u65f6\u95f4(\u5929)\u6362\u7b97\u6210\u5206\u949f\u4ee5\u4fbf\u8ba1\u7b97\u78c1\u76d8\u7528\u91cf\u3002
                                                              3. Insight Agent \u4e2d Prometheus \u9ed8\u8ba4\u91c7\u96c6\u65f6\u95f4\u4e3a 30s \uff0c\u6545\u5728 1 \u5206\u949f\u5185\u4ea7\u751f\u4e24\u500d\u7684\u6307\u6807\u91cf\u3002
                                                              4. vmstorage \u4e2d\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 1 \u4e2a\u6708\uff0c\u4fee\u6539\u914d\u7f6e\u8bf7\u53c2\u8003\u4fee\u6539\u7cfb\u7edf\u914d\u7f6e\u3002

                                                              Warning

                                                              \u8be5\u516c\u5f0f\u4e3a\u901a\u7528\u65b9\u6848\uff0c\u5efa\u8bae\u5728\u8ba1\u7b97\u7ed3\u679c\u4e0a\u9884\u7559\u5197\u4f59\u78c1\u76d8\u5bb9\u91cf\u4ee5\u4fdd\u8bc1 vmstorage \u7684\u6b63\u5e38\u8fd0\u884c\u3002

                                                              "},{"location":"end-user/insight/quickstart/res-plan/vms-res-plan.html#_3","title":"\u53c2\u8003\u5bb9\u91cf","text":"

                                                              \u8868\u683c\u4e2d\u6570\u636e\u662f\u6839\u636e\u9ed8\u8ba4\u5b58\u50a8\u65f6\u95f4\u4e3a\u4e00\u4e2a\u6708 (30 \u5929)\uff0c\u5355\u4e2a\u6570\u636e\u70b9 (datapoint) \u7684\u5360\u7528\u78c1\u76d8\u53d6 0.9 \u8ba1\u7b97\u6240\u5f97\u7ed3\u679c\u3002 \u591a\u96c6\u7fa4\u573a\u666f\u4e0b\uff0cPod \u6570\u91cf\u8868\u793a\u591a\u96c6\u7fa4 Pod \u6570\u91cf\u7684\u603b\u548c\u3002

                                                              "},{"location":"end-user/insight/quickstart/res-plan/vms-res-plan.html#_4","title":"\u5f53\u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c\u65f6","text":"\u96c6\u7fa4\u89c4\u6a21 (Pod \u6570) \u6307\u6807\u91cf \u78c1\u76d8\u5bb9\u91cf 100 8w 6 GiB 200 16w 12 GiB 300 24w 18 GiB 400 32w 24 GiB 500 40w 30 GiB 800 64w 48 GiB 1000 80w 60 GiB 2000 160w 120 GiB 3000 240w 180 GiB"},{"location":"end-user/insight/quickstart/res-plan/vms-res-plan.html#_5","title":"\u5f53\u5f00\u542f\u670d\u52a1\u7f51\u683c\u65f6","text":"\u96c6\u7fa4\u89c4\u6a21 (Pod \u6570) \u6307\u6807\u91cf \u78c1\u76d8\u5bb9\u91cf 100 15w 12 GiB 200 31w 24 GiB 300 46w 36 GiB 400 62w 48 GiB 500 78w 60 GiB 800 125w 94 GiB 1000 156w 120 GiB 2000 312w 235 GiB 3000 468w 350 GiB"},{"location":"end-user/insight/quickstart/res-plan/vms-res-plan.html#_6","title":"\u4e3e\u4f8b\u8bf4\u660e","text":"

                                                              AI \u7b97\u529b\u4e2d\u5fc3 \u5e73\u53f0\u4e2d\u6709\u4e24\u4e2a\u96c6\u7fa4\uff0c\u5176\u4e2d\u5168\u5c40\u670d\u52a1\u96c6\u7fa4(\u5f00\u542f\u670d\u52a1\u7f51\u683c)\u4e2d\u8fd0\u884c 500 \u4e2a Pod\uff0c\u5de5\u4f5c\u96c6\u7fa4(\u672a\u5f00\u542f\u670d\u52a1\u7f51\u683c)\u8fd0\u884c\u4e86 1000 \u4e2a Pod\uff0c\u9884\u671f\u6307\u6807\u5b58 30 \u5929\u3002

                                                              • \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u6307\u6807\u91cf\u4e3a 800x500 + 768x500 = 784000
                                                              • \u5de5\u4f5c\u96c6\u7fa4\u6307\u6807\u91cf\u4e3a 800x1000 = 800000

                                                              \u5219\u5f53\u524d vmstorage \u78c1\u76d8\u7528\u91cf\u5e94\u8bbe\u7f6e\u4e3a (784000+80000)x2x0.9x60x24x31 = 124384896000 byte = 116 GiB

                                                              Note

                                                              \u96c6\u7fa4\u4e2d\u6307\u6807\u91cf\u4e0e Pod \u6570\u91cf\u7684\u5173\u7cfb\u53ef\u53c2\u8003 Prometheus \u8d44\u6e90\u89c4\u5212\u3002

                                                              "},{"location":"end-user/insight/system-config/modify-config.html","title":"\u4fee\u6539\u7cfb\u7edf\u914d\u7f6e","text":"

                                                              \u53ef\u89c2\u6d4b\u6027\u4f1a\u9ed8\u8ba4\u6301\u4e45\u5316\u4fdd\u5b58\u6307\u6807\u3001\u65e5\u5fd7\u3001\u94fe\u8def\u7684\u6570\u636e\uff0c\u60a8\u53ef\u53c2\u9605\u672c\u6587\u4fee\u6539\u7cfb\u7edf\u914d\u7f6e\u3002\u8be5\u6587\u6863\u4ec5\u9002\u7528\u4e8e\u5185\u7f6e\u90e8\u7f72\u7684 Elasticsearch\uff0c\u82e5\u4f7f\u7528\u5916\u90e8 Elasticsearch \u53ef\u81ea\u884c\u8c03\u6574\u3002

                                                              "},{"location":"end-user/insight/system-config/modify-config.html#_2","title":"\u5982\u4f55\u4fee\u6539\u6307\u6807\u6570\u636e\u4fdd\u7559\u671f\u9650","text":"

                                                              \u5148 ssh \u767b\u5f55\u5230\u5bf9\u5e94\u7684\u8282\u70b9\uff0c\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539\u6307\u6807\u6570\u636e\u4fdd\u7559\u671f\u9650\u3002

                                                              1. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                                kubectl edit vmcluster insight-victoria-metrics-k8s-stack -n insight-system\n
                                                              2. \u5728 Yaml \u6587\u4ef6\u4e2d\uff0c retentionPeriod \u7684\u9ed8\u8ba4\u503c\u4e3a 14 \uff0c\u5355\u4f4d\u4e3a \u5929 \u3002\u60a8\u53ef\u6839\u636e\u9700\u6c42\u4fee\u6539\u53c2\u6570\u3002

                                                                apiVersion: operator.victoriametrics.com/v1beta1\nkind: VMCluster\nmetadata:\n  annotations:\n    meta.helm.sh/release-name: insight\n    meta.helm.sh/release-namespace: insight-system\n  creationTimestamp: \"2022-08-25T04:31:02Z\"\n  finalizers:\n  - apps.victoriametrics.com/finalizer\n  generation: 2\n  labels:\n    app.kubernetes.io/instance: insight\n    app.kubernetes.io/managed-by: Helm\n    app.kubernetes.io/name: victoria-metrics-k8s-stack\n    app.kubernetes.io/version: 1.77.2\n    helm.sh/chart: victoria-metrics-k8s-stack-0.9.3\n  name: insight-victoria-metrics-k8s-stack\n  namespace: insight-system\n  resourceVersion: \"123007381\"\n  uid: 55cee8d6-c651-404b-b2c9-50603b405b54\nspec:\n  replicationFactor: 1\n  retentionPeriod: \"14\"\n  vminsert:\n    extraArgs:\n      maxLabelsPerTimeseries: \"45\"\n    image:\n      repository: docker.m.daocloud.io/victoriametrics/vminsert\n      tag: v1.80.0-cluster\n      replicaCount: 1\n
                                                              3. \u4fdd\u5b58\u4fee\u6539\u540e\uff0c\u8d1f\u8d23\u5b58\u50a8\u6307\u6807\u7684\u7ec4\u4ef6\u7684\u5bb9\u5668\u7ec4\u4f1a\u81ea\u52a8\u91cd\u542f\uff0c\u7a0d\u7b49\u7247\u523b\u5373\u53ef\u3002

                                                              "},{"location":"end-user/insight/system-config/modify-config.html#_3","title":"\u5982\u4f55\u4fee\u6539\u65e5\u5fd7\u6570\u636e\u5b58\u50a8\u65f6\u957f","text":"

                                                              \u5148 ssh \u767b\u5f55\u5230\u5bf9\u5e94\u7684\u8282\u70b9\uff0c\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539\u65e5\u5fd7\u6570\u636e\u4fdd\u7559\u671f\u9650\uff1a

                                                              "},{"location":"end-user/insight/system-config/modify-config.html#json","title":"\u65b9\u6cd5\u4e00\uff1a\u4fee\u6539 Json \u6587\u4ef6","text":"
                                                              1. \u4fee\u6539\u4ee5\u4e0b\u6587\u4ef6\u4e2d rollover \u5b57\u6bb5\u4e2d\u7684 max_age \u53c2\u6570\uff0c\u5e76\u8bbe\u7f6e\u4fdd\u7559\u671f\u9650\uff0c\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 7d \u3002\u6ce8\u610f\u9700\u8981\u4fee\u6539\u7b2c\u4e00\u884c\u4e2d\u7684 Elastic \u7528\u6237\u540d\u548c\u5bc6\u7801\u3001IP \u5730\u5740\u548c\u7d22\u5f15\u3002

                                                                curl  --insecure --location -u\"elastic:amyVt4o826e322TUVi13Ezw6\" -X PUT \"https://172.30.47.112:30468/_ilm/policy/insight-es-k8s-logs-policy?pretty\" -H 'Content-Type: application/json' -d'\n{\n    \"policy\": {\n        \"phases\": {\n            \"hot\": {\n                \"min_age\": \"0ms\",\n                \"actions\": {\n                    \"set_priority\": {\n                        \"priority\": 100\n                    },\n                    \"rollover\": {\n                        \"max_age\": \"8d\",\n                        \"max_size\": \"10gb\"\n                    }\n                }\n            },\n            \"warm\": {\n                \"min_age\": \"10d\",\n                \"actions\": {\n                    \"forcemerge\": {\n                        \"max_num_segments\": 1\n                    }\n                }\n            },\n            \"delete\": {\n                \"min_age\": \"30d\",\n                \"actions\": {\n                    \"delete\": {}\n                }\n            }\n        }\n    }\n}'\n
                                                              2. \u4fee\u6539\u5b8c\u540e\uff0c\u6267\u884c\u4ee5\u4e0a\u547d\u4ee4\u3002\u5b83\u4f1a\u6253\u5370\u51fa\u5982\u4e0b\u6240\u793a\u5185\u5bb9\uff0c\u5219\u4fee\u6539\u6210\u529f\u3002

                                                                {\n\"acknowledged\" : true\n}\n
                                                              "},{"location":"end-user/insight/system-config/modify-config.html#ui","title":"\u65b9\u6cd5\u4e8c\uff1a\u4ece UI \u4fee\u6539","text":"
                                                              1. \u767b\u5f55 kibana \uff0c\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f Stack Management \u3002

                                                              2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a Index Lifecycle Polices \uff0c\u5e76\u627e\u5230\u7d22\u5f15 insight-es-k8s-logs-policy \uff0c\u70b9\u51fb\u8fdb\u5165\u8be6\u60c5\u3002

                                                              3. \u5c55\u5f00 Hot phase \u914d\u7f6e\u9762\u677f\uff0c\u4fee\u6539 Maximum age \u53c2\u6570\uff0c\u5e76\u8bbe\u7f6e\u4fdd\u7559\u671f\u9650\uff0c\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 7d \u3002

                                                              4. \u4fee\u6539\u5b8c\u540e\uff0c\u70b9\u51fb\u9875\u9762\u5e95\u90e8\u7684 Save policy \u5373\u4fee\u6539\u6210\u529f\u3002

                                                              "},{"location":"end-user/insight/system-config/modify-config.html#_4","title":"\u5982\u4f55\u4fee\u6539\u94fe\u8def\u6570\u636e\u5b58\u50a8\u65f6\u957f","text":"

                                                              \u5148 ssh \u767b\u5f55\u5230\u5bf9\u5e94\u7684\u8282\u70b9\uff0c\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u4fee\u6539\u94fe\u8def\u6570\u636e\u4fdd\u7559\u671f\u9650\uff1a

                                                              "},{"location":"end-user/insight/system-config/modify-config.html#json_1","title":"\u65b9\u6cd5\u4e00\uff1a\u4fee\u6539 Json \u6587\u4ef6","text":"
                                                              1. \u4fee\u6539\u4ee5\u4e0b\u6587\u4ef6\u4e2d rollover \u5b57\u6bb5\u4e2d\u7684 max_age \u53c2\u6570\uff0c\u5e76\u8bbe\u7f6e\u4fdd\u7559\u671f\u9650\uff0c\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 7d \u3002\u6ce8\u610f\u9700\u8981\u4fee\u6539\u7b2c\u4e00\u884c\u4e2d\u7684 Elastic \u7528\u6237\u540d\u548c\u5bc6\u7801\u3001IP \u5730\u5740\u548c\u7d22\u5f15\u3002

                                                                curl --insecure --location -u\"elastic:amyVt4o826e322TUVi13Ezw6\" -X PUT \"https://172.30.47.112:30468/_ilm/policy/jaeger-ilm-policy?pretty\" -H 'Content-Type: application/json' -d'\n{\n    \"policy\": {\n        \"phases\": {\n            \"hot\": {\n                \"min_age\": \"0ms\",\n                \"actions\": {\n                    \"set_priority\": {\n                        \"priority\": 100\n                    },\n                    \"rollover\": {\n                        \"max_age\": \"6d\",\n                        \"max_size\": \"10gb\"\n                    }\n                }\n            },\n            \"warm\": {\n                \"min_age\": \"10d\",\n                \"actions\": {\n                    \"forcemerge\": {\n                        \"max_num_segments\": 1\n                    }\n                }\n            },\n            \"delete\": {\n                \"min_age\": \"30d\",\n                \"actions\": {\n                    \"delete\": {}\n                }\n            }\n        }\n    }\n}'\n
                                                              2. \u4fee\u6539\u5b8c\u540e\uff0c\u5728\u63a7\u5236\u53f0\u6267\u884c\u4ee5\u4e0a\u547d\u4ee4\u3002\u5b83\u4f1a\u6253\u5370\u51fa\u5982\u4e0b\u6240\u793a\u5185\u5bb9\uff0c\u5219\u4fee\u6539\u6210\u529f\u3002

                                                                {\n\"acknowledged\" : true\n}\n
                                                              "},{"location":"end-user/insight/system-config/modify-config.html#ui_1","title":"\u65b9\u6cd5\u4e8c\uff1a\u4ece UI \u4fee\u6539","text":"
                                                              1. \u767b\u5f55 kibana \uff0c\u9009\u62e9\u5de6\u4fa7\u5bfc\u822a\u680f Stack Management \u3002

                                                              2. \u9009\u62e9\u5de6\u4fa7\u5bfc\u822a Index Lifecycle Polices \uff0c\u5e76\u627e\u5230\u7d22\u5f15 jaeger-ilm-policy \uff0c\u70b9\u51fb\u8fdb\u5165\u8be6\u60c5\u3002

                                                              3. \u5c55\u5f00 Hot phase \u914d\u7f6e\u9762\u677f\uff0c\u4fee\u6539 Maximum age \u53c2\u6570\uff0c\u5e76\u8bbe\u7f6e\u4fdd\u7559\u671f\u9650\uff0c\u9ed8\u8ba4\u5b58\u50a8\u65f6\u957f\u4e3a 7d \u3002

                                                              4. \u4fee\u6539\u5b8c\u540e\uff0c\u70b9\u51fb\u9875\u9762\u5e95\u90e8\u7684 Save policy \u5373\u4fee\u6539\u6210\u529f\u3002

                                                              "},{"location":"end-user/insight/system-config/system-component.html","title":"\u7cfb\u7edf\u7ec4\u4ef6","text":"

                                                              \u5728\u7cfb\u7edf\u7ec4\u4ef6\u9875\u9762\u53ef\u5feb\u901f\u7684\u67e5\u770b\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u4e2d\u7cfb\u7edf\u7ec4\u4ef6\u7684\u8fd0\u884c\u72b6\u6001\uff0c\u5f53\u7cfb\u7528\u7ec4\u4ef6\u53d1\u751f\u6545\u969c\u65f6\uff0c\u4f1a\u5bfc\u81f4\u53ef\u89c2\u6d4b\u6a21\u5757\u4e2d\u7684\u90e8\u5206\u529f\u80fd\u4e0d\u53ef\u7528\u3002

                                                              1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\uff0c
                                                              2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u7cfb\u7edf\u7ba1\u7406 -> \u7cfb\u7edf\u7ec4\u4ef6 \u3002

                                                              "},{"location":"end-user/insight/system-config/system-component.html#_2","title":"\u7ec4\u4ef6\u8bf4\u660e","text":"\u6a21\u5757 \u7ec4\u4ef6\u540d\u79f0 \u8bf4\u660e \u6307\u6807 vminsert-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u5c06\u5404\u96c6\u7fa4\u4e2d Prometheus \u91c7\u96c6\u5230\u7684\u6307\u6807\u6570\u636e\u5199\u5165\u5b58\u50a8\u7ec4\u4ef6\u3002\u8be5\u7ec4\u4ef6\u5f02\u5e38\u4f1a\u5bfc\u81f4\u65e0\u6cd5\u5199\u5165\u5de5\u4f5c\u96c6\u7fa4\u7684\u6307\u6807\u6570\u636e\u3002 \u6307\u6807 vmalert-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u751f\u6548 VM Rule \u4e2d\u914d\u7f6e\u7684 recording \u548c Alert \u89c4\u5219\uff0c\u5e76\u5c06\u89e6\u53d1\u7684\u544a\u8b66\u89c4\u5219\u53d1\u9001\u7ed9 alertmanager\u3002 \u6307\u6807 vmalertmanager-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u5728\u544a\u8b66\u89e6\u65f6\u53d1\u9001\u6d88\u606f\u3002\u8be5\u7ec4\u4ef6\u5f02\u5e38\u4f1a\u5bfc\u81f4\u65e0\u6cd5\u53d1\u9001\u544a\u8b66\u4fe1\u606f\u3002 \u6307\u6807 vmselect-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u67e5\u8be2\u6307\u6807\u6570\u636e\u3002\u8be5\u7ec4\u4ef6\u5f02\u5e38\u4f1a\u5bfc\u81f4\u65e0\u6cd5\u67e5\u8be2\u6307\u6807\u3002 \u6307\u6807 vmstorage-insight-victoria-metrics-k8s-stack \u8d1f\u8d23\u5b58\u50a8\u591a\u96c6\u7fa4\u7684\u6307\u6807\u6570\u636e\u3002 \u4eea\u8868\u76d8 grafana-deployment \u63d0\u4f9b\u76d1\u63a7\u9762\u677f\u80fd\u529b\u3002\u8be5\u7ec4\u4ef6\u5f02\u5e38\u4f1a\u5bfc\u81f4\u65e0\u6cd5\u67e5\u770b\u5185\u7f6e\u7684\u4eea\u8868\u76d8\u3002 \u94fe\u8def insight-jaeger-collector \u8d1f\u8d23\u63a5\u6536\u00a0opentelemetry-collector\u00a0\u4e2d\u94fe\u8def\u6570\u636e\u5e76\u5c06\u5176\u8fdb\u884c\u5b58\u50a8\u3002 \u94fe\u8def insight-jaeger-query \u8d1f\u8d23\u67e5\u8be2\u5404\u96c6\u7fa4\u4e2d\u91c7\u96c6\u5230\u7684\u94fe\u8def\u6570\u636e\u3002 \u94fe\u8def insight-opentelemetry-collector \u8d1f\u8d23\u63a5\u6536\u5404\u5b50\u96c6\u7fa4\u8f6c\u53d1\u7684\u94fe\u8def\u6570\u636e \u65e5\u5fd7 elasticsearch \u8d1f\u8d23\u5b58\u50a8\u5404\u96c6\u7fa4\u7684\u65e5\u5fd7\u6570\u636e\u3002

                                                              Note

                                                              \u82e5\u4f7f\u7528\u5916\u90e8 Elasticsearch \u53ef\u80fd\u65e0\u6cd5\u83b7\u53d6\u90e8\u5206\u6570\u636e\u4ee5\u81f4\u4e8e Elasticsearch \u7684\u4fe1\u606f\u4e3a\u7a7a\u3002

                                                              "},{"location":"end-user/insight/system-config/system-config.html","title":"\u7cfb\u7edf\u914d\u7f6e","text":"

                                                              \u7cfb\u7edf\u914d\u7f6e \u5c55\u793a\u6307\u6807\u3001\u65e5\u5fd7\u3001\u94fe\u8def\u9ed8\u8ba4\u7684\u4fdd\u5b58\u65f6\u957f\u4ee5\u53ca\u9ed8\u8ba4\u7684 Apdex \u9608\u503c\u3002

                                                              1. \u70b9\u51fb\u53f3\u4fa7\u5bfc\u822a\u680f\uff0c\u9009\u62e9 \u7cfb\u7edf\u914d\u7f6e\u3002

                                                              2. \u4fee\u6539\u5386\u53f2\u544a\u8b66\u5b58\u50a8\u65f6\u957f\uff0c\u70b9\u51fb \u7f16\u8f91 \u8f93\u5165\u76ee\u6807\u65f6\u957f\u3002

                                                                \u5f53\u5b58\u50a8\u65f6\u957f\u8bbe\u7f6e\u4e3a \"0\" \u5c06\u4e0d\u6e05\u9664\u5386\u53f2\u544a\u8b66\u3002

                                                              3. \u4fee\u6539\u62d3\u6251\u56fe\u6e32\u67d3\u9ed8\u8ba4\u914d\u7f6e\uff0c\u70b9\u51fb \u7f16\u8f91 \u6839\u636e\u9700\u6c42\u5b9a\u4e49\u7cfb\u7edf\u4e2d\u62d3\u6251\u56fe\u9608\u503c\u3002

                                                                \u9608\u503c\u8bbe\u7f6e\u5fc5\u987b\u5927\u4e8e 0\uff0c\u524d\u9762\u586b\u5199\u7684\u9608\u503c\u5fc5\u987b\u5c0f\u4e8e\u540e\u9762\u586b\u5199\u7684\u3002\u4e14\u586b\u5199\u7684\u9608\u503c\u5fc5\u987b\u5728\u6700\u5927\u548c\u6700\u5c0f\u7684\u8303\u56f4\u4e4b\u95f4\u3002

                                                              Note

                                                              \u4fee\u6539\u5176\u4ed6\u914d\u7f6e\uff0c\u8bf7\u70b9\u51fb\u67e5\u770b\u5982\u4f55\u4fee\u6539\u7cfb\u7edf\u914d\u7f6e\uff1f

                                                              "},{"location":"end-user/insight/trace/service.html","title":"\u670d\u52a1\u76d1\u63a7","text":"

                                                              \u5728 \u53ef\u89c2\u6d4b\u6027 Insight \u4e2d\u670d\u52a1\u662f\u6307\u4f7f\u7528 Opentelemtry SDK \u63a5\u5165\u94fe\u8def\u6570\u636e\uff0c\u670d\u52a1\u76d1\u63a7\u80fd\u591f\u8f85\u52a9\u8fd0\u7ef4\u8fc7\u7a0b\u4e2d\u89c2\u5bdf\u5e94\u7528\u7a0b\u5e8f\u7684\u6027\u80fd\u548c\u72b6\u6001\u3002

                                                              \u5982\u4f55\u4f7f\u7528 OpenTelemetry \u8bf7\u53c2\u8003\u4f7f\u7528 OTel \u8d4b\u4e88\u5e94\u7528\u53ef\u89c2\u6d4b\u6027\u3002

                                                              "},{"location":"end-user/insight/trace/service.html#_2","title":"\u540d\u8bcd\u89e3\u91ca","text":"
                                                              • \u670d\u52a1 \uff1a\u670d\u52a1\u8868\u793a\u4e3a\u4f20\u5165\u8bf7\u6c42\u63d0\u4f9b\u76f8\u540c\u884c\u4e3a\u7684\u4e00\u7ec4\u5de5\u4f5c\u8d1f\u8f7d\u3002\u60a8\u53ef\u4ee5\u5728\u4f7f\u7528 OpenTelemetry SDK \u65f6\u5b9a\u4e49\u670d\u52a1\u540d\u79f0\u6216\u4f7f\u7528 Istio \u4e2d\u5b9a\u4e49\u7684\u540d\u79f0\u3002
                                                              • \u64cd\u4f5c \uff1a\u64cd\u4f5c\u662f\u6307\u4e00\u4e2a\u670d\u52a1\u5904\u7406\u7684\u7279\u5b9a\u8bf7\u6c42\u6216\u64cd\u4f5c\uff0c\u6bcf\u4e2a Span \u90fd\u6709\u4e00\u4e2a\u64cd\u4f5c\u540d\u79f0\u3002
                                                              • \u51fa\u53e3\u6d41\u91cf \uff1a\u51fa\u53e3\u6d41\u91cf\u662f\u6307\u5f53\u524d\u670d\u52a1\u53d1\u8d77\u8bf7\u6c42\u7684\u6240\u6709\u6d41\u91cf\u3002
                                                              • \u5165\u53e3\u6d41\u91cf \uff1a\u5165\u53e3\u6d41\u91cf\u662f\u6307\u4e0a\u6e38\u670d\u52a1\u5bf9\u5f53\u524d\u670d\u52a1\u53d1\u8d77\u8bf7\u6c42\u7684\u6240\u6709\u6d41\u91cf\u3002
                                                              "},{"location":"end-user/insight/trace/service.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                              \u670d\u52a1\u5217\u8868\u9875\u9762\u5c55\u793a\u4e86\u96c6\u7fa4\u4e2d\u6240\u6709\u5df2\u63a5\u5165\u94fe\u8def\u6570\u636e\u7684\u670d\u52a1\u7684\u541e\u5410\u7387\u3001\u9519\u8bef\u7387\u3001\u8bf7\u6c42\u5ef6\u65f6\u7b49\u5173\u952e\u6307\u6807\u3002 \u60a8\u53ef\u4ee5\u6839\u636e\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\u5bf9\u670d\u52a1\u8fdb\u884c\u8fc7\u6ee4\uff0c\u4e5f\u53ef\u4ee5\u6309\u7167\u541e\u5410\u7387\u3001\u9519\u8bef\u7387\u3001\u8bf7\u6c42\u5ef6\u65f6\u5bf9\u8be5\u5217\u8868\u8fdb\u884c\u6392\u5e8f\u3002\u5217\u8868\u4e2d\u7684\u6307\u6807\u6570\u636e\u9ed8\u8ba4\u65f6\u95f4\u4e3a 1 \u5c0f\u65f6\uff0c\u60a8\u53ef\u4ee5\u81ea\u5b9a\u4e49\u65f6\u95f4\u8303\u56f4\u3002

                                                              \u8bf7\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u67e5\u770b\u670d\u52a1\u76d1\u63a7\u6307\u6807\uff1a

                                                              1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\u3002

                                                              2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u94fe\u8def\u8ffd\u8e2a -> \u670d\u52a1 \u3002

                                                                Attention

                                                                1. \u82e5\u5217\u8868\u4e2d\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u4e3a unknown \u65f6\uff0c\u5219\u8868\u793a\u8be5\u670d\u52a1\u672a\u89c4\u8303\u63a5\u5165\uff0c\u5efa\u8bae\u91cd\u65b0\u63a5\u5165\u3002
                                                                2. \u82e5\u63a5\u5165\u7684\u670d\u52a1\u5b58\u5728\u540c\u540d\u4e14\u5747\u672a\u6b63\u786e\u586b\u5199\u73af\u5883\u53d8\u91cf\u4e2d\u7684 \u547d\u540d\u7a7a\u95f4 \u65f6\uff0c\u5217\u8868\u53ca\u670d\u52a1\u8be6\u60c5\u9875\u4e2d\u5c55\u793a\u7684\u76d1\u63a7\u6570\u636e\u4e3a\u591a\u4e2a\u670d\u52a1\u7684\u6c47\u603b\u6570\u636e\u3002
                                                              3. \u70b9\u51fb\u670d\u52a1\u540d (\u4ee5 insight-server \u4e3a\u4f8b)\uff0c\u70b9\u51fb\u8fdb\u5165\u670d\u52a1\u8be6\u60c5\u9875\uff0c\u67e5\u770b\u670d\u52a1\u7684\u8be6\u7ec6\u6307\u6807\u548c\u8be5\u670d\u52a1\u7684\u64cd\u4f5c\u6307\u6807\u3002

                                                                1. \u5728\u670d\u52a1\u62d3\u6251\u6a21\u5757\u4e2d\uff0c\u60a8\u53ef\u4ee5\u67e5\u770b\u5f53\u524d\u6240\u9009\u670d\u52a1\u7684\u4e0a\u4e0b\u5404\u4e00\u5c42\u7684\u670d\u52a1\u62d3\u6251\uff0c\u9f20\u6807\u60ac\u6d6e\u5728\u8282\u70b9\u4e0a\u65f6\u53ef\u4ee5\u67e5\u770b\u8282\u70b9\u7684\u4fe1\u606f\u3002
                                                                2. \u5728\u6d41\u91cf\u6307\u6807\u6a21\u5757\uff0c\u60a8\u53ef\u67e5\u770b\u5230\u8be5\u670d\u52a1\u9ed8\u8ba4\u4e00\u5c0f\u65f6\u5185\u5168\u90e8\u8bf7\u6c42\uff08\u5305\u542b\u5165\u53e3\u6d41\u91cf\u548c\u51fa\u53e3\u6d41\u91cf\uff09\u7684\u76d1\u63a7\u6307\u6807\u3002
                                                                3. \u652f\u6301\u901a\u8fc7\u53f3\u4e0a\u89d2\u7684\u65f6\u95f4\u9009\u62e9\u5668\u5feb\u901f\u9009\u62e9\u65f6\u95f4\u8303\u56f4\uff0c\u6216\u81ea\u5b9a\u4e49\u65f6\u95f4\u8303\u56f4\u3002
                                                                4. \u5728 \u5173\u8054\u5bb9\u5668 \u6a21\u5757\u70b9\u51fb\u5bb9\u5668\u7ec4\u540d\u79f0\uff0c\u53ef\u8df3\u8f6c\u81f3\u5bb9\u5668\u7ec4\u8be6\u60c5\u9875\u3002

                                                              4. \u70b9\u51fb Tab \u5207\u6362\u5230 \u64cd\u4f5c\u6307\u6807 \uff0c\u53ef\u67e5\u8be2\u591a\u9009\u670d\u52a1\u76f8\u540c\u64cd\u4f5c\u7684\u805a\u5408\u8d77\u6765\u7684\u6d41\u91cf\u6307\u6807\u3002

                                                                1. \u652f\u6301\u5bf9\u64cd\u4f5c\u6307\u6807\u4e2d\u7684\u541e\u5410\u7387\u3001\u9519\u8bef\u7387\u3001\u8bf7\u6c42\u5ef6\u65f6\u7b49\u6307\u6807\u8fdb\u884c\u6392\u5e8f\u3002
                                                                2. \u70b9\u51fb\u5355\u4e2a\u64cd\u4f5c\u540e\u7684\u56fe\u6807\uff0c\u53ef\u8df3\u8f6c\u81f3 \u8c03\u7528\u94fe \u5feb\u901f\u67e5\u8be2\u76f8\u5173\u94fe\u8def\u3002

                                                              "},{"location":"end-user/insight/trace/service.html#_4","title":"\u670d\u52a1\u6307\u6807\u8bf4\u660e","text":"\u53c2\u6570 \u8bf4\u660e \u541e\u5410\u7387 \u5355\u4f4d\u65f6\u95f4\u5185\u5904\u7406\u8bf7\u6c42\u7684\u6570\u91cf\u3002 \u9519\u8bef\u7387 \u67e5\u8be2\u65f6\u95f4\u8303\u56f4\u5185\u9519\u8bef\u8bf7\u6c42\u4e0e\u8bf7\u6c42\u603b\u6570\u7684\u6bd4\u503c\u3002 P50 \u8bf7\u6c42\u5ef6\u65f6 \u5728\u6240\u6709\u7684\u8bf7\u6c42\u4e2d\uff0c\u6709 50% \u7684\u8bf7\u6c42\u54cd\u5e94\u65f6\u95f4\u5c0f\u4e8e\u6216\u7b49\u4e8e\u8be5\u503c\u3002 P95 \u8bf7\u6c42\u5ef6\u65f6 \u5728\u6240\u6709\u7684\u8bf7\u6c42\u4e2d\uff0c\u6709 95% \u7684\u8bf7\u6c42\u54cd\u5e94\u65f6\u95f4\u5c0f\u4e8e\u6216\u7b49\u4e8e\u8be5\u503c\u3002 P99 \u8bf7\u6c42\u5ef6\u65f6 \u5728\u6240\u6709\u7684\u8bf7\u6c42\u4e2d\uff0c\u6709 95% \u7684\u8bf7\u6c42\u54cd\u5e94\u65f6\u95f4\u5c0f\u4e8e\u6216\u7b49\u4e8e\u8be5\u503c\u3002"},{"location":"end-user/insight/trace/topology.html","title":"\u670d\u52a1\u62d3\u6251","text":"

                                                              \u670d\u52a1\u62d3\u6251\u56fe\u662f\u5bf9\u670d\u52a1\u4e4b\u95f4\u8fde\u63a5\u3001\u901a\u4fe1\u548c\u4f9d\u8d56\u5173\u7cfb\u7684\u53ef\u89c6\u5316\u8868\u793a\u3002\u901a\u8fc7\u53ef\u89c6\u5316\u62d3\u6251\u4e86\u89e3\u670d\u52a1\u95f4\u7684\u8c03\u7528\u5173\u7cfb\uff0c \u67e5\u770b\u670d\u52a1\u5728\u6307\u5b9a\u65f6\u95f4\u5185\u7684\u8c03\u7528\u53ca\u5176\u6027\u80fd\u72b6\u51b5\u3002\u62d3\u6251\u56fe\u7684\u8282\u70b9\u4e4b\u95f4\u7684\u8054\u7cfb\u4ee3\u8868\u4e24\u4e2a\u670d\u52a1\u5728\u67e5\u8be2\u65f6\u95f4\u8303\u56f4\u5185\u670d\u52a1\u4e4b\u95f4\u7684\u5b58\u5728\u8c03\u7528\u5173\u7cfb\u3002

                                                              "},{"location":"end-user/insight/trace/topology.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              1. \u96c6\u7fa4\u4e2d\u5df2\u5b89\u88c5 insight-agent \u4e14\u5e94\u7528\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u3002
                                                              2. \u670d\u52a1\u5df2\u901a\u8fc7 Operator \u6216 Opentelemetry SDK \u7684\u65b9\u5f0f\u63a5\u5165\u94fe\u8def\u3002
                                                              "},{"location":"end-user/insight/trace/topology.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                              1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u6a21\u5757
                                                              2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u94fe\u8def\u8ffd\u8e2a -> \u670d\u52a1\u62d3\u6251
                                                              3. \u5728\u62d3\u6251\u56fe\u4e2d\uff0c\u60a8\u53ef\u6309\u9700\u6267\u884c\u4ee5\u4e0b\u64cd\u4f5c\uff1a

                                                                • \u5355\u51fb \u8282\u70b9\uff0c\u4ece\u53f3\u4fa7\u5212\u51fa\u670d\u52a1\u7684\u8be6\u60c5\uff0c\u53ef\u67e5\u770b\u670d\u52a1\u7684\u8bf7\u6c42\u5ef6\u65f6\u3001\u541e\u5410\u7387\u3001\u9519\u8bef\u7387\u7684\u6307\u6807\u3002\u70b9\u51fb\u670d\u52a1\u540d\u79f0\u53ef\u8df3\u8f6c\u81f3\u5bf9\u5e94\u670d\u52a1\u7684\u8be6\u60c5\u9875\u3002
                                                                • \u9f20\u6807\u60ac\u6d6e\u5728\u8fde\u7ebf\u4e0a\u65f6\uff0c\u53ef\u67e5\u770b\u4e24\u4e2a\u670d\u52a1\u4e4b\u95f4\u8bf7\u6c42\u7684\u6d41\u91cf\u6307\u6807\u3002
                                                                • \u5728 \u663e\u793a\u8bbe\u7f6e \u6a21\u5757\uff0c\u53ef\u914d\u7f6e\u62d3\u6251\u56fe\u4e2d\u7684\u663e\u793a\u5143\u7d20\u3002

                                                              4. \u70b9\u51fb\u53f3\u4e0b\u89d2 \u56fe\u4f8b \uff0c\u53ef\u901a\u8fc7 \u4e34\u65f6\u914d\u7f6e \u4fee\u6539\u5f53\u524d\u7684\u62d3\u6251\u56fe\u5b9a\u4e49\u7684\u6e32\u67d3\u9608\u503c\uff0c\u8df3\u51fa\u6216\u5173\u95ed\u8be5\u9875\u9762\u5373\u4f1a\u4e22\u5931\u8be5\u914d\u7f6e\u3002

                                                                \u9608\u503c\u8bbe\u7f6e\u5fc5\u987b\u5927\u4e8e 0\uff0c\u524d\u9762\u586b\u5199\u7684\u9608\u503c\u5fc5\u987b\u5c0f\u4e8e\u540e\u9762\u586b\u5199\u7684\u3002\u4e14\u586b\u5199\u7684\u9608\u503c\u5fc5\u987b\u5728\u6700\u5927\u548c\u6700\u5c0f\u7684\u8303\u56f4\u4e4b\u95f4\u3002

                                                              "},{"location":"end-user/insight/trace/topology.html#_4","title":"\u5176\u4ed6\u8282\u70b9","text":"

                                                              \u5728\u670d\u52a1\u62d3\u6251\u4e2d\u4f1a\u5b58\u5728\u6e38\u79bb\u5728\u96c6\u7fa4\u4e4b\u5916\u7684\u8282\u70b9\uff0c\u8fd9\u4e9b\u6e38\u79bb\u5728\u5916\u7684\u8282\u70b9\u53ef\u5206\u6210\u4e09\u7c7b\uff1a

                                                              • \u6570\u636e\u5e93
                                                              • \u6d88\u606f\u961f\u5217
                                                              • \u865a\u62df\u8282\u70b9

                                                              • \u82e5\u670d\u52a1\u53d1\u8d77\u8bf7\u6c42\u5230\u6570\u636e\u5e93\u6216\u6d88\u606f\u961f\u5217\u65f6\uff0c\u62d3\u6251\u56fe\u4e2d\u4f1a\u9ed8\u8ba4\u5c55\u793a\u8fd9\u4e24\u7c7b\u8282\u70b9\u3002 \u800c\u865a\u62df\u670d\u52a1\u8868\u793a\u96c6\u7fa4\u5185\u670d\u52a1\u8bf7\u6c42\u4e86\u96c6\u7fa4\u5916\u7684\u8282\u70b9\u6216\u8005\u672a\u63a5\u5165\u94fe\u8def\u7684\u670d\u52a1\uff0c\u62d3\u6251\u56fe\u4e2d\u9ed8\u8ba4\u4e0d\u4f1a\u5c55\u793a \u865a\u62df\u670d\u52a1\u3002

                                                              • \u5f53\u670d\u52a1\u8bf7\u6c42\u5230 MySQL\u3001PostgreSQL\u3001Oracle Database \u8fd9\u4e09\u79cd\u6570\u636e\u5e93\u65f6\uff0c\u5728\u62d3\u6251\u56fe\u4e2d\u53ef\u4ee5\u770b\u5230\u8bf7\u6c42\u7684\u8be6\u7ec6\u6570\u636e\u5e93\u7c7b\u578b\u3002

                                                              "},{"location":"end-user/insight/trace/topology.html#_5","title":"\u5f00\u542f\u865a\u62df\u8282\u70b9","text":"
                                                              1. \u66f4\u65b0 insight-server chart \u7684 values\uff0c\u627e\u5230\u4e0b\u56fe\u6240\u793a\u53c2\u6570\uff0c\u5c06 false \u6539\u4e3a true\u3002

                                                              2. \u5728\u670d\u52a1\u62d3\u6251\u7684\u663e\u793a\u8bbe\u7f6e\u4e2d\u52fe\u9009 \u865a\u62df\u670d\u52a1 \u3002

                                                              "},{"location":"end-user/insight/trace/trace.html","title":"\u94fe\u8def\u67e5\u8be2","text":"

                                                              \u5728\u94fe\u8def\u67e5\u8be2\u9875\u9762\uff0c\u60a8\u53ef\u4ee5\u8fc7 TraceID \u6216\u7cbe\u786e\u67e5\u8be2\u8c03\u7528\u94fe\u8def\u8be6\u7ec6\u60c5\u51b5\u6216\u7ed3\u5408\u591a\u79cd\u6761\u4ef6\u7b5b\u9009\u67e5\u8be2\u8c03\u7528\u94fe\u8def\u3002

                                                              "},{"location":"end-user/insight/trace/trace.html#_2","title":"\u540d\u8bcd\u89e3\u91ca","text":"
                                                              • TraceID\uff1a\u7528\u4e8e\u6807\u8bc6\u4e00\u4e2a\u5b8c\u6574\u7684\u8bf7\u6c42\u8c03\u7528\u94fe\u8def\u3002
                                                              • \u64cd\u4f5c\uff1a\u63cf\u8ff0 Span \u6240\u4ee3\u8868\u7684\u5177\u4f53\u64cd\u4f5c\u6216\u4e8b\u4ef6\u3002
                                                              • \u5165\u53e3 Span\uff1a\u5165\u53e3 Span \u4ee3\u8868\u4e86\u6574\u4e2a\u8bf7\u6c42\u7684\u7b2c\u4e00\u4e2a\u8bf7\u6c42\u3002
                                                              • \u5ef6\u65f6\uff1a\u6574\u4e2a\u8c03\u7528\u94fe\u4ece\u5f00\u59cb\u63a5\u6536\u8bf7\u6c42\u5230\u5b8c\u6210\u54cd\u5e94\u7684\u6301\u7eed\u65f6\u95f4\u3002
                                                              • Span\uff1a\u6574\u4e2a\u94fe\u8def\u4e2d\u5305\u542b\u7684 Span \u4e2a\u6570\u3002
                                                              • \u53d1\u751f\u65f6\u95f4\uff1a\u5f53\u524d\u94fe\u8def\u5f00\u59cb\u7684\u65f6\u95f4\u3002
                                                              • Tag\uff1a\u4e00\u7ec4\u952e\u503c\u5bf9\u6784\u6210\u7684 Span \u6807\u7b7e\u96c6\u5408\uff0cTag \u662f\u7528\u6765\u5bf9 Span \u8fdb\u884c\u7b80\u5355\u7684\u6ce8\u89e3\u548c\u8865\u5145\uff0c\u6bcf\u4e2a Span \u53ef\u4ee5\u6709\u591a\u4e2a\u7b80\u76f4\u5bf9\u5f62\u5f0f\u7684 Tag\u3002
                                                              "},{"location":"end-user/insight/trace/trace.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                              \u8bf7\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u67e5\u8be2\u94fe\u8def\uff1a

                                                              1. \u8fdb\u5165 \u53ef\u89c2\u6d4b\u6027 \u4ea7\u54c1\u6a21\u5757\uff0c
                                                              2. \u5728\u5de6\u8fb9\u5bfc\u822a\u680f\u9009\u62e9 \u94fe\u8def\u8ffd\u8e2a -> \u8c03\u7528\u94fe\u3002

                                                                Note

                                                                \u5217\u8868\u4e2d\u652f\u6301\u5bf9 Span \u6570\u3001\u5ef6\u65f6\u3001\u53d1\u751f\u65f6\u95f4\u8fdb\u884c\u6392\u5e8f\u3002

                                                              3. \u70b9\u51fb\u7b5b\u9009\u680f\u4e2d\u7684 TraceID \u641c\u7d22 \u5207\u6362\u4f7f\u7528 TraceID \u641c\u7d22\u94fe\u8def\u3002

                                                              4. \u4f7f\u7528 TraceID \u641c\u7d22\u8bf7\u8f93\u5165\u5b8c\u6574\u7684 TraceID\u3002

                                                              "},{"location":"end-user/insight/trace/trace.html#_4","title":"\u5176\u4ed6\u64cd\u4f5c","text":""},{"location":"end-user/insight/trace/trace.html#_5","title":"\u67e5\u770b\u94fe\u8def\u8be6\u60c5","text":"
                                                              1. \u70b9\u51fb\u94fe\u8def\u5217\u8868\u4e2d\u7684\u67d0\u4e00\u94fe\u8def\u7684 TraceID\uff0c\u53ef\u67e5\u770b\u8be5\u94fe\u8def\u7684\u8be6\u60c5\u8c03\u7528\u60c5\u51b5\u3002

                                                              "},{"location":"end-user/insight/trace/trace.html#_6","title":"\u67e5\u770b\u5173\u8054\u65e5\u5fd7","text":"
                                                              1. \u70b9\u51fb\u94fe\u8def\u6570\u636e\u53f3\u4fa7\u7684\u56fe\u6807\uff0c\u53ef\u67e5\u8be2\u8be5\u94fe\u8def\u7684\u5173\u8054\u65e5\u5fd7\u3002

                                                                • \u9ed8\u8ba4\u67e5\u8be2\u8be5\u94fe\u8def\u7684\u6301\u7eed\u65f6\u95f4\u53ca\u5176\u7ed3\u675f\u4e4b\u540e\u4e00\u5206\u949f\u5185\u7684\u65e5\u5fd7\u6570\u636e\u3002
                                                                • \u67e5\u8be2\u7684\u65e5\u5fd7\u5185\u5bb9\u4e3a\u65e5\u5fd7\u6587\u672c\u4e2d\u5305\u542b\u8be5\u94fe\u8def\u7684 TraceID \u7684\u65e5\u5fd7\u548c\u94fe\u8def\u8c03\u7528\u8fc7\u7a0b\u4e2d\u76f8\u5173\u7684\u5bb9\u5668\u65e5\u5fd7\u3002
                                                              2. \u70b9\u51fb \u67e5\u770b\u66f4\u591a \u540e\u53ef\u5e26\u6761\u4ef6\u8df3\u8f6c\u5230 \u65e5\u5fd7\u67e5\u8be2 \u7684\u9875\u9762\u3002

                                                              3. \u9ed8\u8ba4\u641c\u7d22\u5168\u90e8\u65e5\u5fd7\uff0c\u4f46\u53ef\u4e0b\u62c9\u6839\u636e\u94fe\u8def\u7684 TraceID \u6216\u94fe\u8def\u8c03\u7528\u8fc7\u7a0b\u4e2d\u76f8\u5173\u7684\u5bb9\u5668\u65e5\u5fd7\u8fdb\u884c\u8fc7\u6ee4\u3002

                                                                Note

                                                                \u7531\u4e8e\u94fe\u8def\u4f1a\u8de8\u96c6\u7fa4\u6216\u8de8\u547d\u540d\u7a7a\u95f4\uff0c\u82e5\u7528\u6237\u6743\u9650\u4e0d\u8db3\uff0c\u5219\u65e0\u6cd5\u67e5\u8be2\u8be5\u94fe\u8def\u7684\u5173\u8054\u65e5\u5fd7\u3002

                                                              "},{"location":"end-user/k8s/add-node.html","title":"\u6dfb\u52a0\u5de5\u4f5c\u8282\u70b9","text":"

                                                              \u5982\u679c\u8282\u70b9\u4e0d\u591f\u7528\u4e86\uff0c\u53ef\u4ee5\u6dfb\u52a0\u66f4\u591a\u8282\u70b9\u5230\u96c6\u7fa4\u4e2d\u3002

                                                              "},{"location":"end-user/k8s/add-node.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                              • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                                              • \u6709\u4e00\u4e2a\u7ba1\u7406\u5458\u5e10\u53f7
                                                              • \u5df2\u521b\u5efa\u5e26 GPU \u8282\u70b9\u7684\u96c6\u7fa4
                                                              • \u51c6\u5907\u4e00\u53f0\u4e91\u4e3b\u673a
                                                              "},{"location":"end-user/k8s/add-node.html#_3","title":"\u6dfb\u52a0\u6b65\u9aa4","text":"
                                                              1. \u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                                                              2. \u5bfc\u822a\u81f3 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0

                                                              3. \u8fdb\u5165\u96c6\u7fa4\u6982\u89c8\u9875\uff0c\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u63a5\u5165\u8282\u70b9 \u6309\u94ae

                                                              4. \u6309\u7167\u5411\u5bfc\uff0c\u586b\u5199\u5404\u9879\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a

                                                                \u57fa\u672c\u4fe1\u606f\u53c2\u6570\u914d\u7f6e

                                                              5. \u5728\u5f39\u7a97\u4e2d\u70b9\u51fb \u786e\u5b9a

                                                              6. \u8fd4\u56de\u8282\u70b9\u5217\u8868\uff0c\u65b0\u63a5\u5165\u7684\u8282\u70b9\u72b6\u6001\u4e3a \u63a5\u5165\u4e2d \uff0c\u7b49\u5f85\u51e0\u5206\u949f\u540e\u72b6\u6001\u53d8\u4e3a \u5065\u5eb7 \u5219\u8868\u793a\u63a5\u5165\u6210\u529f\u3002

                                                              Tip

                                                              \u5bf9\u4e8e\u521a\u63a5\u5165\u6210\u529f\u7684\u8282\u70b9\uff0c\u53ef\u80fd\u8fd8\u8981\u7b49 2-3 \u5206\u949f\u624d\u80fd\u8bc6\u522b\u51fa GPU\u3002

                                                              "},{"location":"end-user/k8s/create-k8s.html","title":"\u521b\u5efa\u4e91\u4e0a Kubernetes \u96c6\u7fa4","text":"

                                                              \u90e8\u7f72 Kubernetes \u96c6\u7fa4\u662f\u4e3a\u4e86\u652f\u6301\u9ad8\u6548\u7684 AI \u7b97\u529b\u8c03\u5ea6\u548c\u7ba1\u7406\uff0c\u5b9e\u73b0\u5f39\u6027\u4f38\u7f29\uff0c\u63d0\u4f9b\u9ad8\u53ef\u7528\u6027\uff0c\u4ece\u800c\u4f18\u5316\u6a21\u578b\u8bad\u7ec3\u548c\u63a8\u7406\u8fc7\u7a0b\u3002

                                                              "},{"location":"end-user/k8s/create-k8s.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                              • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0\u5df2
                                                              • \u6709\u4e00\u4e2a\u7ba1\u7406\u5458\u6743\u9650\u7684\u8d26\u53f7
                                                              • \u51c6\u5907\u4e00\u53f0\u5e26 GPU \u7684\u7269\u7406\u673a
                                                              • \u5206\u914d\u4e24\u6bb5 IP \u5730\u5740\uff08Pod CIDR 18 \u4f4d\u3001SVC CIDR 18 \u4f4d\uff0c\u4e0d\u80fd\u4e0e\u73b0\u6709\u7f51\u6bb5\u51b2\u7a81\uff09
                                                              "},{"location":"end-user/k8s/create-k8s.html#_2","title":"\u521b\u5efa\u6b65\u9aa4","text":"
                                                              1. \u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                                                              2. \u521b\u5efa\u5e76\u542f\u52a8 3 \u53f0\u4e0d\u5e26 GPU \u7684\u4e91\u4e3b\u673a\u7528\u4f5c\u96c6\u7fa4\u7684 Master \u8282\u70b9

                                                                • \u914d\u7f6e\u8d44\u6e90\uff0cCPU 16 \u6838\uff0c\u5185\u5b58 32 GB\uff0c\u7cfb\u7edf\u76d8 200 GB\uff08ReadWriteOnce\uff09
                                                                • \u7f51\u7edc\u6a21\u5f0f\u9009\u62e9 Bridge\uff08\u6865\u63a5\uff09
                                                                • \u8bbe\u7f6e root \u5bc6\u7801\u6216\u6dfb\u52a0 SSH \u516c\u94a5\uff0c\u65b9\u4fbf\u4ee5 SSH \u8fde\u63a5
                                                                • \u8bb0\u5f55\u597d 3 \u53f0\u4e3b\u673a\u7684 IP
                                                              3. \u5bfc\u822a\u81f3 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa\u96c6\u7fa4 \u6309\u94ae

                                                              4. \u6309\u7167\u5411\u5bfc\uff0c\u914d\u7f6e\u96c6\u7fa4\u7684\u5404\u9879\u53c2\u6570

                                                                \u57fa\u672c\u4fe1\u606f\u8282\u70b9\u914d\u7f6e\u7f51\u7edc\u914d\u7f6eAddon \u914d\u7f6e\u9ad8\u7ea7\u914d\u7f6e

                                                                \u914d\u7f6e\u5b8c\u8282\u70b9\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u5f00\u59cb\u68c0\u67e5 \uff0c

                                                                \u6bcf\u4e2a\u8282\u70b9\u9ed8\u8ba4\u53ef\u8fd0\u884c 110 \u4e2a Pod\uff08\u5bb9\u5668\u7ec4\uff09\uff0c\u5982\u679c\u8282\u70b9\u914d\u7f6e\u6bd4\u8f83\u9ad8\uff0c\u53ef\u4ee5\u8c03\u6574\u5230 200 \u6216 300 \u4e2a Pod\u3002

                                                              5. \u7b49\u5f85\u96c6\u7fa4\u521b\u5efa\u5b8c\u6210\u3002

                                                              6. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\uff0c\u627e\u5230\u521a\u521b\u5efa\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u5bfc\u822a\u5230 Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u5728\u641c\u7d22\u6846\u5185\u641c\u7d22 metax-gpu-extensions\uff0c\u70b9\u51fb\u5361\u7247

                                                              7. \u70b9\u51fb\u53f3\u4fa7\u7684 \u5b89\u88c5 \u6309\u94ae\uff0c\u5f00\u59cb\u5b89\u88c5 GPU \u63d2\u4ef6

                                                                \u5e94\u7528\u8bbe\u7f6eKubernetes \u7f16\u6392\u786e\u8ba4

                                                                \u8f93\u5165\u540d\u79f0\uff0c\u9009\u62e9\u547d\u540d\u7a7a\u95f4\uff0c\u5728 YAMl \u4e2d\u4fee\u6539\u955c\u50cf\u5730\u5740\uff1a

                                                              8. \u81ea\u52a8\u8fd4\u56de Helm \u5e94\u7528\u5217\u8868\uff0c\u7b49\u5f85 metax-gpu-extensions \u72b6\u6001\u53d8\u4e3a \u5df2\u90e8\u7f72

                                                              9. \u5230\u6b64\u96c6\u7fa4\u521b\u5efa\u6210\u529f\uff0c\u53ef\u4ee5\u53bb\u67e5\u770b\u96c6\u7fa4\u6240\u5305\u542b\u7684\u8282\u70b9\u3002\u4f60\u53ef\u4ee5\u53bb\u521b\u5efa AI \u5de5\u4f5c\u8d1f\u8f7d\u5e76\u4f7f\u7528 GPU \u4e86\u3002

                                                              \u4e0b\u4e00\u6b65\uff1a\u521b\u5efa AI \u5de5\u4f5c\u8d1f\u8f7d

                                                              "},{"location":"end-user/k8s/remove-node.html","title":"\u79fb\u9664 GPU \u5de5\u4f5c\u8282\u70b9","text":"

                                                              GPU \u8d44\u6e90\u7684\u6210\u672c\u76f8\u5bf9\u8f83\u9ad8\uff0c\u5982\u679c\u6682\u65f6\u7528\u4e0d\u5230 GPU\uff0c\u53ef\u4ee5\u5c06\u5e26 GPU \u7684\u5de5\u4f5c\u8282\u70b9\u79fb\u9664\u3002 \u4ee5\u4e0b\u6b65\u9aa4\u4e5f\u540c\u6837\u9002\u7528\u4e8e\u79fb\u9664\u666e\u901a\u5de5\u4f5c\u8282\u70b9\u3002

                                                              "},{"location":"end-user/k8s/remove-node.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                              • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                                              • \u6709\u4e00\u4e2a\u7ba1\u7406\u5458\u5e10\u53f7
                                                              • \u5df2\u521b\u5efa\u5e26 GPU \u8282\u70b9\u7684\u96c6\u7fa4
                                                              "},{"location":"end-user/k8s/remove-node.html#_2","title":"\u79fb\u9664\u6b65\u9aa4","text":"
                                                              1. \u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                                                              2. \u5bfc\u822a\u81f3 \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u5217\u8868 \uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0

                                                              3. \u8fdb\u5165\u96c6\u7fa4\u6982\u89c8\u9875\uff0c\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u627e\u5230\u8981\u79fb\u9664\u7684\u8282\u70b9\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u79fb\u9664\u8282\u70b9

                                                              4. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u8282\u70b9\u540d\u79f0\uff0c\u786e\u8ba4\u65e0\u8bef\u540e\u70b9\u51fb \u5220\u9664

                                                              5. \u81ea\u52a8\u8fd4\u56de\u8282\u70b9\u5217\u8868\uff0c\u72b6\u6001\u4e3a \u79fb\u9664\u4e2d \uff0c\u51e0\u5206\u949f\u540e\u5237\u65b0\u9875\u9762\uff0c\u8282\u70b9\u4e0d\u5728\u4e86\uff0c\u8bf4\u660e\u8282\u70b9\u88ab\u6210\u529f\u79fb\u9664

                                                              6. \u4ece UI \u5217\u8868\u79fb\u9664\u8282\u70b9\u540e\uff0c\u901a\u8fc7 SSH \u767b\u5f55\u5230\u5df2\u79fb\u9664\u7684\u8282\u70b9\u4e3b\u673a\uff0c\u6267\u884c\u5173\u673a\u547d\u4ee4\u3002

                                                              Tip

                                                              \u5728 UI \u4e0a\u79fb\u9664\u8282\u70b9\u5e76\u5c06\u5176\u5173\u673a\u540e\uff0c\u8282\u70b9\u4e0a\u7684\u6570\u636e\u5e76\u672a\u88ab\u7acb\u5373\u5220\u9664\uff0c\u8282\u70b9\u6570\u636e\u4f1a\u88ab\u4fdd\u7559\u4e00\u6bb5\u65f6\u95f4\u3002

                                                              "},{"location":"end-user/kpanda/backup/index.html","title":"\u5907\u4efd\u6062\u590d","text":"

                                                              \u5907\u4efd\u6062\u590d\u5206\u4e3a\u5907\u4efd\u548c\u6062\u590d\u4e24\u65b9\u9762\uff0c\u5b9e\u9645\u5e94\u7528\u65f6\u9700\u8981\u5148\u5907\u4efd\u7cfb\u7edf\u5728\u67d0\u4e00\u65f6\u70b9\u7684\u6570\u636e\uff0c\u7136\u540e\u5b89\u5168\u5b58\u50a8\u5730\u5907\u4efd\u6570\u636e\u3002\u540e\u7eed\u5982\u679c\u51fa\u73b0\u6570\u636e\u635f\u574f\u3001\u4e22\u5931\u3001\u8bef\u5220\u7b49\u4e8b\u6545\uff0c\u5c31\u53ef\u4ee5\u57fa\u4e8e\u4e4b\u524d\u7684\u6570\u636e\u5907\u4efd\u5feb\u901f\u8fd8\u539f\u7cfb\u7edf\uff0c\u7f29\u77ed\u6545\u969c\u65f6\u95f4\uff0c\u51cf\u5c11\u635f\u5931\u3002

                                                              • \u5728\u771f\u5b9e\u7684\u751f\u4ea7\u73af\u5883\u4e2d\uff0c\u670d\u52a1\u53ef\u80fd\u5206\u5e03\u5f0f\u5730\u90e8\u7f72\u5728\u4e0d\u540c\u7684\u4e91\u3001\u4e0d\u540c\u533a\u57df\u6216\u53ef\u7528\u533a\uff0c\u5982\u679c\u67d0\u4e00\u4e2a\u57fa\u7840\u8bbe\u65bd\u81ea\u8eab\u51fa\u73b0\u6545\u969c\uff0c\u4f01\u4e1a\u9700\u8981\u5728\u5176\u4ed6\u53ef\u7528\u73af\u5883\u4e2d\u5feb\u901f\u6062\u590d\u5e94\u7528\u3002\u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u8de8\u4e91/\u8de8\u96c6\u7fa4\u7684\u5907\u4efd\u6062\u590d\u663e\u5f97\u975e\u5e38\u91cd\u8981\u3002
                                                              • \u5728\u5927\u89c4\u6a21\u7cfb\u7edf\u4e2d\u5f80\u5f80\u6709\u5f88\u591a\u89d2\u8272\u548c\u7528\u6237\uff0c\u6743\u9650\u7ba1\u7406\u4f53\u7cfb\u590d\u6742\uff0c\u64cd\u4f5c\u8005\u4f17\u591a\uff0c\u96be\u514d\u6709\u4eba\u8bef\u64cd\u4f5c\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u4e5f\u9700\u8981\u80fd\u591f\u901a\u8fc7\u4e4b\u524d\u5907\u4efd\u7684\u6570\u636e\u5feb\u901f\u56de\u6eda\u7cfb\u7edf\uff0c\u5426\u5219\u5982\u679c\u4f9d\u8d56\u4eba\u4e3a\u6392\u67e5\u6545\u969c\u3001\u4fee\u590d\u6545\u969c\u3001\u6062\u590d\u7cfb\u7edf\u5c31\u4f1a\u8017\u8d39\u5927\u91cf\u65f6\u95f4\uff0c\u7cfb\u7edf\u4e0d\u53ef\u7528\u65f6\u95f4\u8d8a\u957f\uff0c\u4f01\u4e1a\u7684\u635f\u5931\u8d8a\u5927\u3002
                                                              • \u6b64\u5916\uff0c\u8fd8\u6709\u7f51\u7edc\u653b\u51fb\u3001\u81ea\u7136\u707e\u5bb3\u3001\u8bbe\u5907\u6545\u969c\u7b49\u5404\u79cd\u56e0\u7d20\u4e5f\u53ef\u80fd\u5bfc\u81f4\u6570\u636e\u4e8b\u6545

                                                              \u56e0\u6b64\uff0c\u5907\u4efd\u6062\u590d\u975e\u5e38\u91cd\u8981\uff0c\u53ef\u4ee5\u89c6\u4e4b\u4e3a\u7ef4\u62a4\u7cfb\u7edf\u7a33\u5b9a\u548c\u6570\u636e\u5b89\u5168\u7684\u6700\u540e\u4e00\u9053\u4fdd\u9669\u3002

                                                              \u5907\u4efd\u901a\u5e38\u5206\u4e3a\u5168\u91cf\u5907\u4efd\u3001\u589e\u91cf\u5907\u4efd\u3001\u5dee\u5f02\u5907\u4efd\u4e09\u79cd\u3002\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u76ee\u524d\u652f\u6301\u5168\u91cf\u5907\u4efd\u548c\u589e\u91cf\u5907\u4efd\u3002

                                                              \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u7684\u5907\u4efd\u6062\u590d\u53ef\u4ee5\u5206\u4e3a \u5e94\u7528\u5907\u4efd \u548c ETCD \u5907\u4efd \u4e24\u79cd\uff0c\u652f\u6301\u624b\u52a8\u5907\u4efd\uff0c\u6216\u57fa\u4e8e CronJob \u5b9a\u65f6\u81ea\u52a8\u5907\u4efd\u3002

                                                              • \u5e94\u7528\u5907\u4efd

                                                                \u5e94\u7528\u5907\u4efd\u6307\uff0c\u5907\u4efd\u96c6\u7fa4\u4e2d\u7684\u67d0\u4e2a\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u636e\uff0c\u7136\u540e\u5c06\u8be5\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u636e\u6062\u590d\u5230\u672c\u96c6\u7fa4\u6216\u8005\u5176\u4ed6\u96c6\u7fa4\u3002\u652f\u6301\u5907\u4efd\u6574\u4e2a\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u8d44\u6e90\uff0c\u4e5f\u652f\u6301\u901a\u8fc7\u6807\u7b7e\u9009\u62e9\u5668\u8fc7\u6ee4\uff0c\u4ec5\u5907\u4efd\u5e26\u6709\u7279\u5b9a\u6807\u7b7e\u7684\u8d44\u6e90\u3002

                                                                \u5e94\u7528\u5907\u4efd\u652f\u6301\u8de8\u96c6\u7fa4\u5907\u4efd\u6709\u72b6\u6001\u5e94\u7528\uff0c\u5177\u4f53\u6b65\u9aa4\u53ef\u53c2\u8003MySQL \u5e94\u7528\u53ca\u6570\u636e\u7684\u8de8\u96c6\u7fa4\u5907\u4efd\u6062\u590d\u3002

                                                              • ETCD \u5907\u4efd

                                                                etcd \u662f Kubernetes \u7684\u6570\u636e\u5b58\u50a8\u7ec4\u4ef6\uff0cKubernetes \u5c06\u81ea\u8eab\u7684\u7ec4\u4ef6\u6570\u636e\u548c\u5176\u4e2d\u7684\u5e94\u7528\u6570\u636e\u90fd\u5b58\u50a8\u5728 etcd \u4e2d\u3002\u56e0\u6b64\uff0c\u5907\u4efd etcd \u5c31\u76f8\u5f53\u4e8e\u5907\u4efd\u6574\u4e2a\u96c6\u7fa4\u7684\u6570\u636e\uff0c\u53ef\u4ee5\u5728\u6545\u969c\u65f6\u5feb\u901f\u5c06\u96c6\u7fa4\u6062\u590d\u5230\u4e4b\u524d\u67d0\u4e00\u65f6\u70b9\u7684\u72b6\u6001\u3002

                                                                \u9700\u8981\u6ce8\u610f\u7684\u662f\uff0c\u76ee\u524d\u4ec5\u652f\u6301\u5c06 etcd \u5907\u4efd\u6570\u636e\u6062\u590d\u5230\u540c\u4e00\u96c6\u7fa4\uff08\u539f\u96c6\u7fa4\uff09\u3002

                                                              "},{"location":"end-user/kpanda/backup/deployment.html","title":"\u5e94\u7528\u5907\u4efd","text":"

                                                              \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4e3a\u5e94\u7528\u505a\u5907\u4efd\uff0c\u672c\u6559\u7a0b\u4e2d\u4f7f\u7528\u7684\u6f14\u793a\u5e94\u7528\u540d\u4e3a dao-2048 \uff0c\u5c5e\u4e8e\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u3002

                                                              "},{"location":"end-user/kpanda/backup/deployment.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u5728\u5bf9\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u8fdb\u884c\u5907\u4efd\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                              • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                                                              • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                              • \u5b89\u88c5 velero \u7ec4\u4ef6\uff0c\u4e14 velero \u7ec4\u4ef6\u8fd0\u884c\u6b63\u5e38\u3002

                                                              • \u521b\u5efa\u4e00\u4e2a\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\uff08\u672c\u6559\u7a0b\u4e2d\u7684\u8d1f\u8f7d\u540d\u4e3a dao-2048 \uff09\uff0c\u5e76\u4e3a\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u6253\u4e0a app: dao-2048 \u7684\u6807\u7b7e\u3002

                                                              "},{"location":"end-user/kpanda/backup/deployment.html#_3","title":"\u5907\u4efd\u5de5\u4f5c\u8d1f\u8f7d","text":"

                                                              \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u5907\u4efd\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d dao-2048 \u3002

                                                              1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c \u70b9\u51fb \u5bb9\u5668\u7ba1\u7406 -> \u5907\u4efd\u6062\u590d \u3002

                                                              2. \u8fdb\u5165 \u5e94\u7528\u5907\u4efd \u5217\u8868\u9875\u9762\uff0c\u4ece\u96c6\u7fa4\u4e0b\u62c9\u5217\u8868\u4e2d\u9009\u62e9\u5df2\u5b89\u88c5\u4e86 velero \u548c dao-2048 \u7684\u96c6\u7fa4\u3002 \u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa\u5907\u4efd\u8ba1\u5212 \u6309\u94ae\u3002

                                                              3. \u53c2\u8003\u4e0b\u65b9\u8bf4\u660e\u586b\u5199\u5907\u4efd\u914d\u7f6e\u3002

                                                              4. \u53c2\u8003\u4e0b\u65b9\u8bf4\u660e\u8bbe\u7f6e\u5907\u4efd\u6267\u884c\u9891\u7387\uff0c\u7136\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                                • \u5907\u4efd\u9891\u7387\uff1a\u57fa\u4e8e\u5206\u949f\u3001\u5c0f\u65f6\u3001\u5929\u3001\u5468\u3001\u6708\u8bbe\u7f6e\u4efb\u52a1\u6267\u884c\u7684\u65f6\u95f4\u5468\u671f\u3002\u652f\u6301\u7528\u6570\u5b57\u548c * \u81ea\u5b9a\u4e49 Cron \u8868\u8fbe\u5f0f\uff0c\u8f93\u5165\u8868\u8fbe\u5f0f\u540e\u4e0b\u65b9\u4f1a\u63d0\u793a\u5f53\u524d\u8868\u8fbe\u5f0f\u7684\u542b\u4e49 \u3002\u6709\u5173\u8be6\u7ec6\u7684\u8868\u8fbe\u5f0f\u8bed\u6cd5\u89c4\u5219\uff0c\u53ef\u53c2\u8003 Cron \u65f6\u95f4\u8868\u8bed\u6cd5\u3002
                                                                • \u7559\u5b58\u65f6\u957f\uff08\u5929\uff09\uff1a\u8bbe\u7f6e\u5907\u4efd\u8d44\u6e90\u4fdd\u5b58\u7684\u65f6\u95f4\uff0c\u9ed8\u8ba4\u4e3a 30 \u5929\uff0c\u8fc7\u671f\u540e\u5c06\u4f1a\u88ab\u5220\u9664\u3002
                                                                • \u5907\u4efd\u6570\u636e\u5377\uff08PV\uff09\uff1a\u662f\u5426\u5907\u4efd\u6570\u636e\u5377\uff08PV\uff09\u4e2d\u7684\u6570\u636e\uff0c\u652f\u6301\u76f4\u63a5\u590d\u5236\u548c\u4f7f\u7528 CSI \u5feb\u7167\u4e24\u79cd\u65b9\u5f0f\u3002
                                                                  • \u76f4\u63a5\u590d\u5236\uff1a\u76f4\u63a5\u590d\u5236\u6570\u636e\u5377\uff08PV\uff09\u4e2d\u7684\u6570\u636e\u7528\u4e8e\u5907\u4efd\uff1b
                                                                  • \u4f7f\u7528 CSI \u5feb\u7167\uff1a\u4f7f\u7528 CSI \u5feb\u7167\u6765\u5907\u4efd\u6570\u636e\u5377\uff08PV\uff09\u3002\u9700\u8981\u96c6\u7fa4\u4e2d\u6709\u53ef\u7528\u4e8e\u5907\u4efd\u7684 CSI \u5feb\u7167\u7c7b\u578b\u3002

                                                              5. \u70b9\u51fb \u786e\u5b9a \uff0c\u9875\u9762\u4f1a\u81ea\u52a8\u8fd4\u56de\u5e94\u7528\u5907\u4efd\u8ba1\u5212\u5217\u8868\u3002\u60a8\u53ef\u4ee5\u627e\u5230\u65b0\u5efa\u7684 dao-2048 \u5907\u4efd\u8ba1\u5212\uff0c\u5728\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u9009\u62e9 \u7acb\u5373\u6267\u884c \u5f00\u59cb\u5907\u4efd\u3002

                                                              6. \u6b64\u65f6\u96c6\u7fa4\u7684 \u4e0a\u4e00\u6b21\u6267\u884c\u72b6\u6001 \u5c06\u8f6c\u53d8\u4e3a \u5907\u4efd\u4e2d \u3002\u7b49\u5f85\u5907\u4efd\u5b8c\u6210\u540e\u53ef\u4ee5\u70b9\u51fb\u5907\u4efd\u8ba1\u5212\u7684\u540d\u79f0\uff0c\u67e5\u770b\u5907\u4efd\u8ba1\u5212\u8be6\u60c5\u3002

                                                              Note

                                                              \u5982\u679c Job \u7c7b\u578b\u7684\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u4e3a \u6267\u884c\u5b8c\u6210 \uff0c\u5219\u4e0d\u652f\u6301\u5907\u4efd\u3002

                                                              "},{"location":"end-user/kpanda/backup/etcd-backup.html","title":"etcd \u5907\u4efd","text":"

                                                              etcd \u5907\u4efd\u662f\u4ee5\u96c6\u7fa4\u6570\u636e\u4e3a\u6838\u5fc3\u7684\u5907\u4efd\u3002\u5728\u786c\u4ef6\u8bbe\u5907\u635f\u574f\uff0c\u5f00\u53d1\u6d4b\u8bd5\u914d\u7f6e\u9519\u8bef\u7b49\u573a\u666f\u4e2d\uff0c\u53ef\u4ee5\u901a\u8fc7 etcd \u5907\u4efd\u6062\u590d\u96c6\u7fa4\u6570\u636e\u3002

                                                              \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u4e3a\u96c6\u7fa4\u5236\u4f5c etcd \u5907\u4efd\u3002

                                                              "},{"location":"end-user/kpanda/backup/etcd-backup.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                              • \u63a5\u5165\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                              • \u521b\u5efa\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Admin \u6216\u66f4\u9ad8\u6743\u9650\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                              • \u51c6\u5907\u4e00\u4e2a MinIO \u5b9e\u4f8b\u3002

                                                              "},{"location":"end-user/kpanda/backup/etcd-backup.html#etcd_1","title":"\u521b\u5efa etcd \u5907\u4efd","text":"

                                                              \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u521b\u5efa etcd \u5907\u4efd\u3002

                                                              1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 -> \u5907\u4efd\u6062\u590d -> etcd \u5907\u4efd \uff0c\u70b9\u51fb \u5907\u4efd\u7b56\u7565 \u9875\u7b7e\uff0c\u7136\u540e\u5728\u53f3\u4fa7\u70b9\u51fb \u521b\u5efa\u5907\u4efd\u7b56\u7565 \u3002

                                                              2. \u53c2\u8003\u4ee5\u4e0b\u8bf4\u660e\u586b\u5199 \u57fa\u672c\u4fe1\u606f \u3002\u586b\u5199\u5b8c\u6bd5\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \uff0c\u7cfb\u7edf\u5c06\u81ea\u52a8\u6821\u9a8c etcd \u7684\u8054\u901a\u6027\uff0c\u6821\u9a8c\u901a\u8fc7\u4e4b\u540e\u53ef\u4ee5\u8fdb\u884c\u4e0b\u4e00\u6b65\u3002

                                                                • \u5907\u4efd\u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u5907\u4efd\u54ea\u4e2a\u96c6\u7fa4\u7684 etcd \u6570\u636e\uff0c\u5e76\u5728\u7ec8\u7aef\u767b\u5f55
                                                                • etcd \u5730\u5740\uff1a\u683c\u5f0f\u4e3a https://${\u8282\u70b9IP}:${\u7aef\u53e3\u53f7}

                                                                  • \u5728\u6807\u51c6 Kubernetes \u96c6\u7fa4\u4e2d\uff0cetcd \u7684\u9ed8\u8ba4\u7aef\u53e3\u53f7\u4e3a 2379
                                                                  • \u5728\u516c\u6709\u4e91\u6258\u7ba1\u96c6\u7fa4\u4e2d\uff0c\u9700\u8981\u8054\u7cfb\u76f8\u5173\u5f00\u53d1\u4eba\u5458\u83b7\u53d6 etcd \u7684\u7aef\u53e3\u53f7\u3002 \u8fd9\u662f\u56e0\u4e3a\u516c\u6709\u4e91\u96c6\u7fa4\u7684\u63a7\u5236\u9762\u7ec4\u4ef6\u7531\u4e91\u670d\u52a1\u63d0\u4f9b\u5546\u7ef4\u62a4\u548c\u7ba1\u7406\uff0c\u7528\u6237\u65e0\u6cd5\u76f4\u63a5\u8bbf\u95ee\u6216\u67e5\u770b\u8fd9\u4e9b\u7ec4\u4ef6\uff0c \u4e5f\u65e0\u6cd5\u901a\u8fc7\u5e38\u89c4\u547d\u4ee4\uff08\u5982 kubectl\uff09\u65e0\u6cd5\u83b7\u53d6\u5230\u63a7\u5236\u9762\u7684\u7aef\u53e3\u7b49\u4fe1\u606f\u3002
                                                                  \u83b7\u53d6\u7aef\u53e3\u53f7\u7684\u65b9\u5f0f
                                                                  1. \u5728 kube-system \u547d\u540d\u7a7a\u95f4\u4e0b\u67e5\u627e etcd Pod

                                                                    kubectl get po -n kube-system | grep etcd\n
                                                                  2. \u83b7\u53d6 etcd Pod \u7684 listen-client-urls \u4e2d\u7684\u7aef\u53e3\u53f7

                                                                    kubectl get po -n kube-system ${etcd_pod_name} -oyaml | grep listen-client-urls # (1)!\n
                                                                    1. \u5c06 etcd_pod_name \u66ff\u6362\u4e3a\u5b9e\u9645\u7684 Pod \u540d\u79f0

                                                                    \u9884\u671f\u8f93\u51fa\u7ed3\u679c\u5982\u4e0b\uff0c\u8282\u70b9 IP \u540e\u7684\u6570\u5b57\u5373\u4e3a\u7aef\u53e3\u53f7:

                                                                    - --listen-client-urls=https://127.0.0.1:2379,https://10.6.229.191:2379\n
                                                                • CA \u8bc1\u4e66\uff1a\u53ef\u901a\u8fc7\u5982\u4e0b\u547d\u4ee4\u67e5\u770b\u8bc1\u4e66\uff0c\u7136\u540e\u5c06\u8bc1\u4e66\u5185\u5bb9\u590d\u5236\u7c98\u8d34\u5230\u5bf9\u5e94\u4f4d\u7f6e\uff1a

                                                                  cat /etc/kubernetes/ssl/etcd/ca.crt\n
                                                                • Cert \u8bc1\u4e66\uff1a\u53ef\u901a\u8fc7\u5982\u4e0b\u547d\u4ee4\u67e5\u770b\u8bc1\u4e66\uff0c\u7136\u540e\u5c06\u8bc1\u4e66\u5185\u5bb9\u590d\u5236\u7c98\u8d34\u5230\u5bf9\u5e94\u4f4d\u7f6e\uff1a

                                                                  cat /etc/kubernetes/ssl/apiserver-etcd-client.crt\n
                                                                • Key\uff1a\u53ef\u901a\u8fc7\u5982\u4e0b\u547d\u4ee4\u67e5\u770b\u8bc1\u4e66\uff0c\u7136\u540e\u5c06\u8bc1\u4e66\u5185\u5bb9\u590d\u5236\u7c98\u8d34\u5230\u5bf9\u5e94\u4f4d\u7f6e\uff1a

                                                                  cat /etc/kubernetes/ssl/apiserver-etcd-client.key\n

                                                                Note

                                                                \u70b9\u51fb\u8f93\u5165\u6846\u4e0b\u65b9\u7684 \u5982\u4f55\u83b7\u53d6 \u53ef\u4ee5\u5728 UI \u9875\u9762\u67e5\u770b\u83b7\u53d6\u5bf9\u5e94\u4fe1\u606f\u7684\u65b9\u5f0f\u3002

                                                              3. \u53c2\u8003\u4ee5\u4e0b\u4fe1\u606f\u586b\u5199 \u5907\u4efd\u7b56\u7565 \u3002

                                                                • \u5907\u4efd\u65b9\u5f0f\uff1a\u9009\u62e9\u624b\u52a8\u5907\u4efd\u6216\u5b9a\u65f6\u5907\u4efd

                                                                  • \u624b\u52a8\u5907\u4efd\uff1a\u57fa\u4e8e\u5907\u4efd\u914d\u7f6e\u7acb\u5373\u6267\u884c\u4e00\u6b21 etcd \u5168\u91cf\u6570\u636e\u7684\u5907\u4efd\u3002
                                                                  • \u5b9a\u65f6\u5907\u4efd\uff1a\u6309\u7167\u8bbe\u7f6e\u7684\u5907\u4efd\u9891\u7387\u5bf9 etcd \u6570\u636e\u8fdb\u884c\u5468\u671f\u6027\u5168\u91cf\u5907\u4efd\u3002
                                                                • \u5907\u4efd\u94fe\u957f\u5ea6\uff1a\u6700\u591a\u4fdd\u7559\u591a\u5c11\u6761\u5907\u4efd\u6570\u636e\u3002\u9ed8\u8ba4\u4e3a 30 \u6761\u3002

                                                                • \u5907\u4efd\u9891\u7387\uff1a\u652f\u6301\u5c0f\u65f6\u3001\u65e5\u3001\u5468\u3001\u6708\u7ea7\u522b\u548c\u81ea\u5b9a\u4e49\u65b9\u5f0f\u3002

                                                              4. \u53c2\u8003\u4ee5\u4e0b\u4fe1\u606f\u586b\u5199 \u5b58\u50a8\u4f4d\u7f6e \u3002

                                                                • \u5b58\u50a8\u4f9b\u5e94\u5546\uff1a\u9ed8\u8ba4\u9009\u62e9 S3 \u5b58\u50a8
                                                                • \u5bf9\u8c61\u5b58\u50a8\u8bbf\u95ee\u5730\u5740\uff1aMinIO \u7684\u8bbf\u95ee\u5730\u5740
                                                                • \u5b58\u50a8\u6876\uff1a\u5728 MinIO \u4e2d\u521b\u5efa\u4e00\u4e2a Bucket\uff0c\u586b\u5199 Bucket \u7684\u540d\u79f0
                                                                • \u7528\u6237\u540d\uff1aMinIO \u7684\u767b\u5f55\u7528\u6237\u540d
                                                                • \u5bc6\u7801\uff1aMinIO \u7684\u767b\u5f55\u5bc6\u7801

                                                              5. \u70b9\u51fb \u786e\u5b9a \u540e\u9875\u9762\u81ea\u52a8\u8df3\u8f6c\u5230\u5907\u4efd\u7b56\u7565\u5217\u8868\uff0c\u53ef\u4ee5\u67e5\u770b\u76ee\u524d\u521b\u5efa\u597d\u7684\u6240\u6709\u7b56\u7565\u3002

                                                                • \u5728\u7b56\u7565\u53f3\u4fa7\u70b9\u51fb \u2507 \u64cd\u4f5c\u6309\u94ae\u53ef\u4ee5\u67e5\u770b\u65e5\u5fd7\u3001\u67e5\u770b YAML\u3001\u66f4\u65b0\u7b56\u7565\u3001\u505c\u6b62\u7b56\u7565\u3001\u7acb\u5373\u6267\u884c\u7b56\u7565\u7b49\u3002
                                                                • \u5f53\u5907\u4efd\u65b9\u5f0f\u4e3a\u624b\u52a8\u65f6\uff0c\u53ef\u4ee5\u70b9\u51fb \u7acb\u5373\u6267\u884c \u8fdb\u884c\u5907\u4efd\u3002
                                                                • \u5f53\u5907\u4efd\u65b9\u5f0f\u4e3a\u5b9a\u65f6\u5907\u4efd\u65f6\uff0c\u5219\u4f1a\u6839\u636e\u914d\u7f6e\u7684\u65f6\u95f4\u8fdb\u884c\u5907\u4efd\u3002

                                                              "},{"location":"end-user/kpanda/backup/etcd-backup.html#_2","title":"\u67e5\u770b\u5907\u4efd\u7b56\u7565\u65e5\u5fd7","text":"

                                                              \u70b9\u51fb \u65e5\u5fd7 \u53ef\u4ee5\u67e5\u770b\u65e5\u5fd7\u5185\u5bb9\uff0c\u9ed8\u8ba4\u5c55\u793a 100 \u884c\u3002\u82e5\u60f3\u67e5\u770b\u66f4\u591a\u65e5\u5fd7\u4fe1\u606f\u6216\u8005\u4e0b\u8f7d\u65e5\u5fd7\uff0c\u53ef\u5728\u65e5\u5fd7\u4e0a\u65b9\u6839\u636e\u63d0\u793a\u524d\u5f80\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u3002

                                                              "},{"location":"end-user/kpanda/backup/etcd-backup.html#_3","title":"\u67e5\u770b\u5907\u4efd\u7b56\u7565\u8be6\u60c5","text":"

                                                              \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 -> \u5907\u4efd\u6062\u590d -> etcd \u5907\u4efd \uff0c\u70b9\u51fb \u5907\u4efd\u7b56\u7565 \u9875\u7b7e\uff0c\u63a5\u7740\u70b9\u51fb\u7b56\u7565\u540d\u79f0\u53ef\u4ee5\u67e5\u770b\u7b56\u7565\u8be6\u60c5\u3002

                                                              "},{"location":"end-user/kpanda/backup/etcd-backup.html#_4","title":"\u67e5\u770b\u5907\u4efd\u70b9","text":"
                                                              1. \u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 -> \u5907\u4efd\u6062\u590d -> etcd \u5907\u4efd \uff0c\u70b9\u51fb \u5907\u4efd\u70b9 \u9875\u7b7e\u3002
                                                              2. \u9009\u62e9\u76ee\u6807\u96c6\u7fa4\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u8be5\u96c6\u7fa4\u4e0b\u6240\u6709\u5907\u4efd\u4fe1\u606f\u3002

                                                                \u6bcf\u6267\u884c\u4e00\u6b21\u5907\u4efd\uff0c\u5bf9\u5e94\u751f\u6210\u4e00\u4e2a\u5907\u4efd\u70b9\uff0c\u53ef\u901a\u8fc7\u6210\u529f\u72b6\u6001\u7684\u5907\u4efd\u70b9\u5feb\u901f\u6062\u590d\u5e94\u7528\u3002

                                                              "},{"location":"end-user/kpanda/backup/install-velero.html","title":"\u5b89\u88c5 velero \u63d2\u4ef6","text":"

                                                              velero \u662f\u4e00\u4e2a\u5907\u4efd\u548c\u6062\u590d Kubernetes \u96c6\u7fa4\u8d44\u6e90\u7684\u5f00\u6e90\u5de5\u5177\u3002\u5b83\u53ef\u4ee5\u5c06 Kubernetes \u96c6\u7fa4\u4e2d\u7684\u8d44\u6e90\u5907\u4efd\u5230\u4e91\u5b58\u50a8\u670d\u52a1\u3001\u672c\u5730\u5b58\u50a8\u6216\u5176\u4ed6\u4f4d\u7f6e\uff0c\u5e76\u4e14\u53ef\u4ee5\u5728\u9700\u8981\u65f6\u5c06\u8fd9\u4e9b\u8d44\u6e90\u6062\u590d\u5230\u540c\u4e00\u6216\u4e0d\u540c\u7684\u96c6\u7fa4\u4e2d\u3002

                                                              \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528 Helm \u5e94\u7528 \u90e8\u7f72 velero \u63d2\u4ef6\u3002

                                                              "},{"location":"end-user/kpanda/backup/install-velero.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                              \u5b89\u88c5 velero \u63d2\u4ef6\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                              • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                              • \u521b\u5efa velero \u547d\u540d\u7a7a\u95f4\u3002

                                                              • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                              "},{"location":"end-user/kpanda/backup/install-velero.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                              \u8bf7\u6267\u884c\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 velero \u63d2\u4ef6\u3002

                                                              1. \u5728\u96c6\u7fa4\u5217\u8868\u9875\u9762\u627e\u5230\u9700\u8981\u5b89\u88c5 velero \u63d2\u4ef6\u7684\u76ee\u6807\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u5728\u641c\u7d22\u680f\u8f93\u5165 velero \u8fdb\u884c\u641c\u7d22\u3002

                                                              2. \u9605\u8bfb velero \u63d2\u4ef6\u76f8\u5173\u4ecb\u7ecd\uff0c\u9009\u62e9\u7248\u672c\u540e\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u672c\u6587\u5c06\u4ee5 4.0.2 \u7248\u672c\u4e3a\u4f8b\u8fdb\u884c\u5b89\u88c5\uff0c\u63a8\u8350\u5b89\u88c5 4.0.2 \u6216\u66f4\u9ad8\u7248\u672c\u3002

                                                              3. \u586b\u5199\u548c\u914d\u7f6e\u53c2\u6570\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65

                                                                \u57fa\u672c\u53c2\u6570\u53c2\u6570\u914d\u7f6e

                                                                • \u540d\u79f0\uff1a\u5fc5\u586b\u53c2\u6570\uff0c\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09,\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 metrics-server-01\u3002
                                                                • \u547d\u540d\u7a7a\u95f4\uff1a\u63d2\u4ef6\u5b89\u88c5\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4e3a velero \u547d\u540d\u7a7a\u95f4\u3002
                                                                • \u7248\u672c\uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 4.0.2 \u7248\u672c\u4e3a\u4f8b\u3002
                                                                • \u5c31\u7eea\u7b49\u5f85\uff1a\u53ef\u9009\u53c2\u6570\uff0c\u542f\u7528\u540e\uff0c\u5c06\u7b49\u5f85\u5e94\u7528\u4e0b\u6240\u6709\u5173\u8054\u8d44\u6e90\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002
                                                                • \u5931\u8d25\u5220\u9664\uff1a\u53ef\u9009\u53c2\u6570\uff0c\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f\u5c31\u7eea\u7b49\u5f85\u3002\u5982\u679c\u5b89\u88c5\u5931\u8d25\uff0c\u5c06\u5220\u9664\u5b89\u88c5\u76f8\u5173\u8d44\u6e90\u3002
                                                                • \u8be6\u60c5\u65e5\u5fd7\uff1a\u53ef\u9009\u53c2\u6570\uff0c\u5f00\u542f\u540e\u5c06\u8f93\u51fa\u5b89\u88c5\u8fc7\u7a0b\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002

                                                                Note

                                                                \u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u548c/\u6216 \u5931\u8d25\u5220\u9664 \u540e\uff0c\u5e94\u7528\u9700\u8981\u7ecf\u8fc7\u8f83\u957f\u65f6\u95f4\u624d\u4f1a\u88ab\u6807\u8bb0\u4e3a \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                                                                • S3 Credentials\uff1a

                                                                  • Use secret \uff1a\u4fdd\u6301\u9ed8\u8ba4\u914d\u7f6e true \u3002
                                                                  • Secret name \uff1a\u4fdd\u6301\u9ed8\u8ba4\u914d\u7f6e velero-s3-credential \u3002
                                                                  • SecretContents.aws_access_key_id = \uff1a\u914d\u7f6e\u8bbf\u95ee\u5bf9\u8c61\u5b58\u50a8\u7684\u7528\u6237\u540d\uff0c\u66ff\u6362 \u4e3a\u771f\u5b9e\u53c2\u6570\u3002
                                                                  • SecretContents.aws_secret_access_key = \uff1a\u914d\u7f6e\u8bbf\u95ee\u5bf9\u8c61\u5b58\u50a8\u7684\u5bc6\u7801\uff0c\u66ff\u6362 \u4e3a\u771f\u5b9e\u53c2\u6570\u3002

                                                                    config \"SecretContents \u6837\u4f8b\" [default] aws_access_key_id = minio aws_secret_access_key = minio123

                                                                  • Velero Configuration\uff1a

                                                                    • Backupstoragelocation \uff1avelero \u5907\u4efd\u6570\u636e\u5b58\u50a8\u7684\u4f4d\u7f6e
                                                                    • S3 bucket \uff1a\u7528\u4e8e\u4fdd\u5b58\u5907\u4efd\u6570\u636e\u7684\u5b58\u50a8\u6876\u540d\u79f0(\u9700\u4e3a minio \u5df2\u7ecf\u5b58\u5728\u7684\u771f\u5b9e\u5b58\u50a8\u6876)
                                                                    • Is default BackupStorage \uff1a\u4fdd\u6301\u9ed8\u8ba4\u914d\u7f6e true
                                                                    • S3 access mode \uff1avelero \u5bf9\u6570\u636e\u7684\u8bbf\u95ee\u6a21\u5f0f\uff0c\u53ef\u4ee5\u9009\u62e9
                                                                      • ReadWrite \uff1a\u5141\u8bb8 velero \u8bfb\u5199\u5907\u4efd\u6570\u636e
                                                                      • ReadOnly \uff1a\u5141\u8bb8 velero \u8bfb\u53d6\u5907\u4efd\u6570\u636e\uff0c\u4e0d\u80fd\u4fee\u6539\u5907\u4efd\u6570\u636e
                                                                      • WriteOnly \uff1a\u53ea\u5141\u8bb8 velero \u5199\u5165\u5907\u4efd\u6570\u636e\uff0c\u4e0d\u80fd\u8bfb\u53d6\u5907\u4efd\u6570\u636e
                                                                    • S3 Configs \uff1aS3 \u5b58\u50a8\uff08minio\uff09\u7684\u8be6\u7ec6\u914d\u7f6e
                                                                    • S3 region \uff1a\u4e91\u5b58\u50a8\u7684\u5730\u7406\u533a\u57df\u3002\u9ed8\u8ba4\u4f7f\u7528 us-east-1 \u53c2\u6570\uff0c\u7531\u7cfb\u7edf\u7ba1\u7406\u5458\u63d0\u4f9b
                                                                    • S3 force path style \uff1a\u4fdd\u6301\u9ed8\u8ba4\u914d\u7f6e true
                                                                    • S3 server URL \uff1a\u5bf9\u8c61\u5b58\u50a8\uff08minio\uff09\u7684\u63a7\u5236\u53f0\u8bbf\u95ee\u5730\u5740\uff0cminio \u4e00\u822c\u63d0\u4f9b\u4e86 UI \u8bbf\u95ee\u548c\u63a7\u5236\u53f0\u8bbf\u95ee\u4e24\u4e2a\u670d\u52a1\uff0c\u6b64\u5904\u8bf7\u4f7f\u7528\u63a7\u5236\u53f0\u8bbf\u95ee\u7684\u5730\u5740

                                                                    Note

                                                                    \u8bf7\u786e\u4fdd s3 \u5b58\u50a8\u670d\u52a1\u65f6\u95f4\u8ddf\u5907\u4efd\u8fd8\u539f\u96c6\u7fa4\u65f6\u95f4\u5dee\u572810\u5206\u949f\u4ee5\u5185\uff0c\u6700\u597d\u662f\u65f6\u95f4\u4fdd\u6301\u540c\u6b65\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u6267\u884c\u5907\u4efd\u64cd\u4f5c\u3002

                                                                  • migration plugin configuration\uff1a\u542f\u7528\u4e4b\u540e\uff0c\u5c06\u5728\u4e0b\u4e00\u6b65\u7684 YAML \u4ee3\u7801\u6bb5\u4e2d\u65b0\u589e\uff1a

                                                                    ...\ninitContainers:\n  - image: 'release.daocloud.io/kcoral/velero-plugin-for-migration:v0.3.0'\n    imagePullPolicy: IfNotPresent\n    name: velero-plugin-for-migration\n    volumeMounts:\n      - mountPath: /target\n        name: plugins\n  - image: 'docker.m.daocloud.io/velero/velero-plugin-for-csi:v0.7.0'\n    imagePullPolicy: IfNotPresent\n    name: velero-plugin-for-csi\n    volumeMounts:\n      - mountPath: /target\n        name: plugins\n  - image: 'docker.m.daocloud.io/velero/velero-plugin-for-aws:v1.9.0'\n    imagePullPolicy: IfNotPresent\n    name: velero-plugin-for-aws\n    volumeMounts:\n      - mountPath: /target\n        name: plugins\n...\n
                                                                  • \u786e\u8ba4 YAML \u65e0\u8bef\u540e\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210 velero \u63d2\u4ef6\u7684\u5b89\u88c5\u3002 \u4e4b\u540e\u7cfb\u7edf\u5c06\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\uff0c\u7a0d\u7b49\u51e0\u5206\u949f\u540e\uff0c\u4e3a\u9875\u9762\u6267\u884c\u5237\u65b0\u64cd\u4f5c\uff0c\u5373\u53ef\u770b\u5230\u521a\u521a\u5b89\u88c5\u7684\u5e94\u7528\u3002

                                                                  • "},{"location":"end-user/kpanda/clusterops/cluster-oversold.html","title":"\u96c6\u7fa4\u52a8\u6001\u8d44\u6e90\u8d85\u5356","text":"

                                                                    \u76ee\u524d\uff0c\u8bb8\u591a\u4e1a\u52a1\u5b58\u5728\u5cf0\u503c\u548c\u4f4e\u8c37\u7684\u73b0\u8c61\u3002\u4e3a\u4e86\u786e\u4fdd\u670d\u52a1\u7684\u6027\u80fd\u548c\u7a33\u5b9a\u6027\uff0c\u5728\u90e8\u7f72\u670d\u52a1\u65f6\uff0c\u901a\u5e38\u4f1a\u6839\u636e\u5cf0\u503c\u9700\u6c42\u6765\u7533\u8bf7\u8d44\u6e90\u3002 \u7136\u800c\uff0c\u5cf0\u503c\u671f\u53ef\u80fd\u975e\u5e38\u77ed\u6682\uff0c\u5bfc\u81f4\u5728\u975e\u5cf0\u503c\u671f\u65f6\u8d44\u6e90\u88ab\u6d6a\u8d39\u3002 \u96c6\u7fa4\u8d44\u6e90\u8d85\u5356 \u5c31\u662f\u5c06\u8fd9\u4e9b\u7533\u8bf7\u4e86\u800c\u672a\u4f7f\u7528\u7684\u8d44\u6e90\uff08\u5373\u7533\u8bf7\u91cf\u4e0e\u4f7f\u7528\u91cf\u7684\u5dee\u503c\uff09\u5229\u7528\u8d77\u6765\uff0c\u4ece\u800c\u63d0\u5347\u96c6\u7fa4\u8d44\u6e90\u5229\u7528\u7387\uff0c\u51cf\u5c11\u8d44\u6e90\u6d6a\u8d39\u3002

                                                                    \u672c\u6587\u4e3b\u8981\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528\u96c6\u7fa4\u52a8\u6001\u8d44\u6e90\u8d85\u5356\u529f\u80fd\u3002

                                                                    "},{"location":"end-user/kpanda/clusterops/cluster-oversold.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                    • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                                                                    • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 Cluster Admin \uff0c \u8be6\u60c5\u53ef\u53c2\u8003\u96c6\u7fa4\u6388\u6743\u3002
                                                                    "},{"location":"end-user/kpanda/clusterops/cluster-oversold.html#_3","title":"\u5f00\u542f\u96c6\u7fa4\u8d85\u5356","text":"
                                                                    1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762

                                                                    2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u96c6\u7fa4\u8fd0\u7ef4 -> \u96c6\u7fa4\u8bbe\u7f6e \uff0c\u7136\u540e\u9009\u62e9 \u9ad8\u7ea7\u914d\u7f6e \u9875\u7b7e

                                                                    3. \u6253\u5f00\u96c6\u7fa4\u8d85\u5356\uff0c\u8bbe\u7f6e\u8d85\u5356\u6bd4

                                                                      • \u82e5\u672a\u5b89\u88c5 cro-operator \u63d2\u4ef6\uff0c\u70b9\u51fb \u7acb\u5373\u5b89\u88c5 \u6309\u94ae\uff0c\u5b89\u88c5\u6d41\u7a0b\u53c2\u8003\u7ba1\u7406 Helm \u5e94\u7528
                                                                      • \u82e5\u5df2\u5b89\u88c5 cro-operator \u63d2\u4ef6\uff0c\u6253\u5f00\u96c6\u7fa4\u8d85\u5356\u5f00\u5173\uff0c\u5219\u53ef\u4ee5\u5f00\u59cb\u4f7f\u7528\u96c6\u7fa4\u8d85\u5356\u529f\u80fd\u3002

                                                                      Note

                                                                      \u9700\u8981\u5728\u96c6\u7fa4\u4e0b\u5bf9\u5e94\u7684 namespace \u6253\u4e0a\u5982\u4e0b\u6807\u7b7e\uff0c\u96c6\u7fa4\u8d85\u5356\u7b56\u7565\u624d\u80fd\u751f\u6548\u3002

                                                                      clusterresourceoverrides.admission.autoscaling.openshift.io/enabled: \"true\"\n

                                                                    "},{"location":"end-user/kpanda/clusterops/cluster-oversold.html#_4","title":"\u4f7f\u7528\u96c6\u7fa4\u8d85\u5356","text":"

                                                                    \u8bbe\u7f6e\u597d\u96c6\u7fa4\u52a8\u6001\u8d44\u6e90\u8d85\u5356\u6bd4\u540e\uff0c\u4f1a\u5728\u5de5\u4f5c\u8d1f\u8f7d\u8fd0\u884c\u65f6\u751f\u6548\u3002\u4e0b\u6587\u4ee5 niginx \u4e3a\u4f8b\uff0c\u9a8c\u8bc1\u4f7f\u7528\u8d44\u6e90\u8d85\u5356\u80fd\u529b\u3002

                                                                    1. \u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d nginx \u5e76\u8bbe\u7f6e\u5bf9\u5e94\u7684\u8d44\u6e90\u9650\u5236\u503c\uff0c\u521b\u5efa\u6d41\u7a0b\u53c2\u8003\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\uff08Deployment\uff09

                                                                    2. \u67e5\u770b\u5de5\u4f5c\u8d1f\u8f7d\u7684 Pod \u8d44\u6e90\u7533\u8bf7\u503c\u4e0e\u9650\u5236\u503c\u7684\u6bd4\u503c\u662f\u5426\u7b26\u5408\u8d85\u552e\u6bd4

                                                                    "},{"location":"end-user/kpanda/clusterops/cluster-settings.html","title":"\u96c6\u7fa4\u8bbe\u7f6e","text":"

                                                                    \u96c6\u7fa4\u8bbe\u7f6e\u7528\u4e8e\u4e3a\u60a8\u7684\u96c6\u7fa4\u81ea\u5b9a\u4e49\u9ad8\u7ea7\u7279\u6027\u8bbe\u7f6e\uff0c\u5305\u62ec\u662f\u5426\u542f\u7528 GPU\u3001Helm \u4ed3\u5e93\u5237\u65b0\u5468\u671f\u3001Helm \u64cd\u4f5c\u8bb0\u5f55\u4fdd\u7559\u7b49\u3002

                                                                    • \u542f\u7528 GPU\uff1a\u9700\u8981\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU \u5361\u53ca\u5bf9\u5e94\u9a71\u52a8\u63d2\u4ef6\u3002

                                                                      \u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u6700\u8fd1\u64cd\u4f5c -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \u3002

                                                                    • Helm \u64cd\u4f5c\u57fa\u7840\u955c\u50cf\u3001\u4ed3\u5e93\u5237\u65b0\u5468\u671f\u3001\u64cd\u4f5c\u8bb0\u5f55\u4fdd\u7559\u6761\u6570\u3001\u662f\u5426\u5f00\u542f\u96c6\u7fa4\u5220\u9664\u4fdd\u62a4\uff08\u5f00\u542f\u540e\u96c6\u7fa4\u5c06\u4e0d\u80fd\u76f4\u63a5\u5378\u8f7d\uff09

                                                                    "},{"location":"end-user/kpanda/clusterops/latest-operations.html","title":"\u6700\u8fd1\u64cd\u4f5c","text":"

                                                                    \u5728\u8be5\u9875\u9762\u53ef\u4ee5\u67e5\u770b\u6700\u8fd1\u7684\u96c6\u7fa4\u64cd\u4f5c\u8bb0\u5f55\u548c Helm \u64cd\u4f5c\u8bb0\u5f55\uff0c\u4ee5\u53ca\u5404\u9879\u64cd\u4f5c\u7684 YAML \u6587\u4ef6\u548c\u65e5\u5fd7\uff0c\u4e5f\u53ef\u4ee5\u5220\u9664\u67d0\u4e00\u6761\u8bb0\u5f55\u3002

                                                                    \u8bbe\u7f6e Helm \u64cd\u4f5c\u7684\u4fdd\u7559\u6761\u6570\uff1a

                                                                    \u7cfb\u7edf\u9ed8\u8ba4\u4fdd\u7559\u6700\u8fd1 100 \u6761 Helm \u64cd\u4f5c\u8bb0\u5f55\u3002\u82e5\u4fdd\u7559\u6761\u6570\u592a\u591a\uff0c\u53ef\u80fd\u4f1a\u9020\u6210\u6570\u636e\u5197\u4f59\uff0c\u4fdd\u7559\u6761\u6570\u592a\u5c11\u53ef\u80fd\u4f1a\u9020\u6210\u60a8\u6240\u9700\u8981\u7684\u5173\u952e\u64cd\u4f5c\u8bb0\u5f55\u7684\u7f3a\u5931\u3002\u9700\u8981\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u8bbe\u7f6e\u5408\u7406\u7684\u4fdd\u7559\u6570\u91cf\u3002\u5177\u4f53\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                                    1. \u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u6700\u8fd1\u64cd\u4f5c -> Helm \u64cd\u4f5c -> \u8bbe\u7f6e\u4fdd\u7559\u6761\u6570 \u3002

                                                                    2. \u8bbe\u7f6e\u9700\u8981\u4fdd\u7559\u591a\u5c11\u6761 Helm \u64cd\u4f5c\u8bb0\u5f55\uff0c\u5e76\u70b9\u51fb \u786e\u5b9a \u3002

                                                                    "},{"location":"end-user/kpanda/clusters/access-cluster.html","title":"\u8bbf\u95ee\u96c6\u7fa4","text":"

                                                                    \u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\u63a5\u5165\u6216\u521b\u5efa\u7684\u96c6\u7fa4\uff0c\u4e0d\u4ec5\u53ef\u4ee5\u901a\u8fc7 UI \u754c\u9762\u76f4\u63a5\u8bbf\u95ee\uff0c\u4e5f\u53ef\u4ee5\u901a\u8fc7\u5176\u4ed6\u4e24\u79cd\u65b9\u5f0f\u8fdb\u884c\u8bbf\u95ee\u63a7\u5236\uff1a

                                                                    • \u901a\u8fc7 CloudShell \u5728\u7ebf\u8bbf\u95ee
                                                                    • \u4e0b\u8f7d\u96c6\u7fa4\u8bc1\u4e66\u540e\u901a\u8fc7 kubectl \u8fdb\u884c\u8bbf\u95ee

                                                                    Note

                                                                    \u8bbf\u95ee\u96c6\u7fa4\u65f6\uff0c\u7528\u6237\u5e94\u5177\u6709 Cluster Admin \u6743\u9650\u6216\u66f4\u9ad8\u6743\u9650\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/access-cluster.html#cloudshell","title":"\u901a\u8fc7 CloudShell \u8bbf\u95ee","text":"
                                                                    1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9009\u62e9\u9700\u8981\u901a\u8fc7 CloudShell \u8bbf\u95ee\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u56fe\u6807\u5e76\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u70b9\u51fb \u63a7\u5236\u53f0 \u3002

                                                                    2. \u5728 CloudShell \u63a7\u5236\u53f0\u6267\u884c kubectl get node \u547d\u4ee4\uff0c\u9a8c\u8bc1 CloudShell \u4e0e\u96c6\u7fa4\u7684\u8fde\u901a\u6027\u3002\u5982\u56fe\uff0c\u63a7\u5236\u53f0\u5c06\u8fd4\u56de\u96c6\u7fa4\u4e0b\u7684\u8282\u70b9\u4fe1\u606f\u3002

                                                                    \u73b0\u5728\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7 CloudShell \u6765\u8bbf\u95ee\u5e76\u7ba1\u7406\u8be5\u96c6\u7fa4\u4e86\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/access-cluster.html#kubectl","title":"\u901a\u8fc7 kubectl \u8bbf\u95ee","text":"

                                                                    \u901a\u8fc7\u672c\u5730\u8282\u70b9\u8bbf\u95ee\u5e76\u7ba1\u7406\u4e91\u7aef\u96c6\u7fa4\u65f6\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u6761\u4ef6\uff1a

                                                                    • \u672c\u5730\u8282\u70b9\u548c\u4e91\u7aef\u96c6\u7fa4\u7684\u7f51\u7edc\u4e92\u8054\u4e92\u901a\u3002
                                                                    • \u5df2\u7ecf\u5c06\u96c6\u7fa4\u8bc1\u4e66\u4e0b\u8f7d\u5230\u4e86\u672c\u5730\u8282\u70b9\u3002
                                                                    • \u672c\u5730\u8282\u70b9\u5df2\u7ecf\u5b89\u88c5\u4e86 kubectl \u5de5\u5177\u3002\u5173\u4e8e\u8be6\u7ec6\u7684\u5b89\u88c5\u65b9\u5f0f\uff0c\u8bf7\u53c2\u9605\u5b89\u88c5 kubectl\u3002

                                                                    \u6ee1\u8db3\u4e0a\u8ff0\u6761\u4ef6\u540e\uff0c\u6309\u7167\u4e0b\u65b9\u6b65\u9aa4\u4ece\u672c\u5730\u8bbf\u95ee\u4e91\u7aef\u96c6\u7fa4\uff1a

                                                                    1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9009\u62e9\u9700\u8981\u4e0b\u8f7d\u8bc1\u4e66\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \uff0c\u5e76\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u70b9\u51fb \u8bc1\u4e66\u83b7\u53d6 \u3002

                                                                    2. \u9009\u62e9\u8bc1\u4e66\u6709\u6548\u671f\u5e76\u70b9\u51fb \u4e0b\u8f7d\u8bc1\u4e66 \u3002

                                                                    3. \u6253\u5f00\u4e0b\u8f7d\u597d\u7684\u96c6\u7fa4\u8bc1\u4e66\uff0c\u5c06\u8bc1\u4e66\u5185\u5bb9\u590d\u5236\u81f3\u672c\u5730\u8282\u70b9\u7684 config \u6587\u4ef6\u3002

                                                                      kubectl \u5de5\u5177\u9ed8\u8ba4\u4f1a\u4ece\u672c\u5730\u8282\u70b9\u7684 $HOME/.kube \u76ee\u5f55\u4e0b\u67e5\u627e\u540d\u4e3a config \u7684\u6587\u4ef6\u3002\u8be5\u6587\u4ef6\u5b58\u50a8\u4e86\u76f8\u5173\u96c6\u7fa4\u7684\u8bbf\u95ee\u51ed\u8bc1\uff0ckubectl \u53ef\u4ee5\u51ed\u8be5\u914d\u7f6e\u6587\u4ef6\u8fde\u63a5\u81f3\u96c6\u7fa4\u3002

                                                                    4. \u5728\u672c\u5730\u8282\u70b9\u4e0a\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u9a8c\u8bc1\u96c6\u7fa4\u7684\u8fde\u901a\u6027\uff1a

                                                                      kubectl get pod -n default\n

                                                                      \u9884\u671f\u7684\u8f93\u51fa\u7c7b\u4f3c\u4e8e:

                                                                      NAME                            READY   STATUS      RESTARTS    AGE\ndao-2048-2048-58c7f7fc5-mq7h4   1/1     Running     0           30h\n

                                                                    \u73b0\u5728\u60a8\u53ef\u4ee5\u5728\u672c\u5730\u901a\u8fc7 kubectl \u8bbf\u95ee\u5e76\u7ba1\u7406\u8be5\u96c6\u7fa4\u4e86\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/cluster-role.html","title":"\u96c6\u7fa4\u89d2\u8272","text":"

                                                                    \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u57fa\u4e8e\u96c6\u7fa4\u7684\u4e0d\u540c\u529f\u80fd\u5b9a\u4f4d\u5bf9\u96c6\u7fa4\u8fdb\u884c\u4e86\u89d2\u8272\u5206\u7c7b\uff0c\u5e2e\u52a9\u7528\u6237\u66f4\u597d\u5730\u7ba1\u7406 IT \u57fa\u7840\u8bbe\u65bd\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/cluster-role.html#_2","title":"\u5168\u5c40\u670d\u52a1\u96c6\u7fa4","text":"

                                                                    \u6b64\u96c6\u7fa4\u7528\u4e8e\u8fd0\u884c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7ec4\u4ef6\uff0c\u4f8b\u5982\u5bb9\u5668\u7ba1\u7406\u3001\u5168\u5c40\u7ba1\u7406\u3001\u53ef\u89c2\u6d4b\u6027\u3001\u955c\u50cf\u4ed3\u5e93\u7b49\u3002 \u4e00\u822c\u4e0d\u627f\u8f7d\u4e1a\u52a1\u8d1f\u8f7d\u3002

                                                                    \u652f\u6301\u7684\u529f\u80fd \u63cf\u8ff0 K8s \u7248\u672c 1.22+ \u64cd\u4f5c\u7cfb\u7edf RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86\uff1bUbuntu 18.04 x86, Ubuntu 20.04 x86\uff1bCentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD \u96c6\u7fa4\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406 \u652f\u6301 K8s \u8d44\u6e90\u7ba1\u7406 \u652f\u6301 \u4e91\u539f\u751f\u5b58\u50a8 \u652f\u6301 \u4e91\u539f\u751f\u7f51\u7edc Calico\u3001Cillium\u3001Multus \u548c\u5176\u5b83 CNI \u7b56\u7565\u7ba1\u7406 \u652f\u6301\u7f51\u7edc\u7b56\u7565\u3001\u914d\u989d\u7b56\u7565\u3001\u8d44\u6e90\u9650\u5236\u3001\u707e\u5907\u7b56\u7565\u3001\u5b89\u5168\u7b56\u7565"},{"location":"end-user/kpanda/clusters/cluster-role.html#_3","title":"\u7ba1\u7406\u96c6\u7fa4","text":"

                                                                    \u6b64\u96c6\u7fa4\u7528\u4e8e\u7ba1\u7406\u5de5\u4f5c\u96c6\u7fa4\uff0c\u4e00\u822c\u4e0d\u627f\u8f7d\u4e1a\u52a1\u8d1f\u8f7d\u3002

                                                                    • \u7ecf\u5178\u6a21\u5f0f\u5c06\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u548c\u7ba1\u7406\u96c6\u7fa4\u90e8\u7f72\u5728\u4e0d\u540c\u7684\u96c6\u7fa4\uff0c\u9002\u7528\u4e8e\u4f01\u4e1a\u591a\u6570\u636e\u4e2d\u5fc3\u3001\u591a\u67b6\u6784\u7684\u573a\u666f\u3002
                                                                    • \u7b80\u7ea6\u6a21\u5f0f\u5c06\u7ba1\u7406\u96c6\u7fa4\u548c\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u90e8\u7f72\u5728\u540c\u4e00\u4e2a\u96c6\u7fa4\u5185\u3002
                                                                    \u652f\u6301\u7684\u529f\u80fd \u63cf\u8ff0 K8s \u7248\u672c 1.22+ \u64cd\u4f5c\u7cfb\u7edf RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86\uff1bUbuntu 18.04 x86, Ubuntu 20.04 x86\uff1bCentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD \u96c6\u7fa4\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406 \u652f\u6301 K8s \u8d44\u6e90\u7ba1\u7406 \u652f\u6301 \u4e91\u539f\u751f\u5b58\u50a8 \u652f\u6301 \u4e91\u539f\u751f\u7f51\u7edc Calico\u3001Cillium\u3001Multus \u548c\u5176\u5b83 CNI \u7b56\u7565\u7ba1\u7406 \u652f\u6301\u7f51\u7edc\u7b56\u7565\u3001\u914d\u989d\u7b56\u7565\u3001\u8d44\u6e90\u9650\u5236\u3001\u707e\u5907\u7b56\u7565\u3001\u5b89\u5168\u7b56\u7565"},{"location":"end-user/kpanda/clusters/cluster-role.html#_4","title":"\u5de5\u4f5c\u96c6\u7fa4","text":"

                                                                    \u8fd9\u662f\u4f7f\u7528\u5bb9\u5668\u7ba1\u7406\u521b\u5efa\u7684\u96c6\u7fa4\uff0c\u4e3b\u8981\u7528\u4e8e\u627f\u8f7d\u4e1a\u52a1\u8d1f\u8f7d\u3002\u8be5\u96c6\u7fa4\u7531\u7ba1\u7406\u96c6\u7fa4\u8fdb\u884c\u7ba1\u7406\u3002

                                                                    \u652f\u6301\u7684\u529f\u80fd \u63cf\u8ff0 K8s \u7248\u672c \u652f\u6301 K8s 1.22 \u53ca\u4ee5\u4e0a\u7248\u672c \u64cd\u4f5c\u7cfb\u7edf RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86\uff1bUbuntu 18.04 x86, Ubuntu 20.04 x86\uff1bCentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD \u96c6\u7fa4\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406 \u652f\u6301 K8s \u8d44\u6e90\u7ba1\u7406 \u652f\u6301 \u4e91\u539f\u751f\u5b58\u50a8 \u652f\u6301 \u4e91\u539f\u751f\u7f51\u7edc Calico\u3001Cillium\u3001Multus \u548c\u5176\u5b83 CNI \u7b56\u7565\u7ba1\u7406 \u652f\u6301\u7f51\u7edc\u7b56\u7565\u3001\u914d\u989d\u7b56\u7565\u3001\u8d44\u6e90\u9650\u5236\u3001\u707e\u5907\u7b56\u7565\u3001\u5b89\u5168\u7b56\u7565"},{"location":"end-user/kpanda/clusters/cluster-role.html#_5","title":"\u63a5\u5165\u96c6\u7fa4","text":"

                                                                    \u6b64\u96c6\u7fa4\u7528\u4e8e\u63a5\u5165\u5df2\u6709\u7684\u6807\u51c6 K8s \u96c6\u7fa4\uff0c\u5305\u62ec\u4f46\u4e0d\u9650\u4e8e\u672c\u5730\u6570\u636e\u4e2d\u5fc3\u81ea\u5efa\u96c6\u7fa4\u3001\u516c\u6709\u4e91\u5382\u5546\u63d0\u4f9b\u7684\u96c6\u7fa4\u3001\u79c1\u6709\u4e91\u5382\u5546\u63d0\u4f9b\u7684\u96c6\u7fa4\u3001\u8fb9\u7f18\u96c6\u7fa4\u3001\u4fe1\u521b\u96c6\u7fa4\u3001\u5f02\u6784\u96c6\u7fa4\u3002\u4e3b\u8981\u7528\u4e8e\u627f\u62c5\u4e1a\u52a1\u8d1f\u8f7d\u3002

                                                                    \u652f\u6301\u7684\u529f\u80fd \u63cf\u8ff0 K8s \u7248\u672c 1.18+ \u652f\u6301\u53cb\u5546 Vmware Tanzu\u3001Amazon EKS\u3001Redhat Openshift\u3001SUSE Rancher\u3001\u963f\u91cc ACK\u3001\u534e\u4e3a CCE\u3001\u817e\u8baf TKE\u3001\u6807\u51c6 K8s \u96c6\u7fa4\u3001\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u96c6\u7fa4\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406 \u4e0d\u652f\u6301 K8s \u8d44\u6e90\u7ba1\u7406 \u652f\u6301 \u4e91\u539f\u751f\u5b58\u50a8 \u652f\u6301 \u4e91\u539f\u751f\u7f51\u7edc \u4f9d\u8d56\u4e8e\u63a5\u5165\u96c6\u7fa4\u53d1\u884c\u7248\u7f51\u7edc\u6a21\u5f0f \u7b56\u7565\u7ba1\u7406 \u652f\u6301\u7f51\u7edc\u7b56\u7565\u3001\u914d\u989d\u7b56\u7565\u3001\u8d44\u6e90\u9650\u5236\u3001\u707e\u5907\u7b56\u7565\u3001\u5b89\u5168\u7b56\u7565

                                                                    Note

                                                                    \u4e00\u4e2a\u96c6\u7fa4\u53ef\u4ee5\u6709\u591a\u4e2a\u96c6\u7fa4\u89d2\u8272\uff0c\u4f8b\u5982\u4e00\u4e2a\u96c6\u7fa4\u65e2\u53ef\u4ee5\u662f\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\uff0c\u4e5f\u53ef\u4ee5\u662f\u7ba1\u7406\u96c6\u7fa4\u6216\u5de5\u4f5c\u96c6\u7fa4\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/cluster-scheduler-plugin.html","title":"\u5982\u4f55\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72\u7b2c\u4e8c\u8c03\u5ea6\u5668 scheduler-plugins","text":"

                                                                    \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72\u7b2c\u4e8c\u4e2a\u8c03\u5ea6\u5668 scheduler-plugins\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/cluster-scheduler-plugin.html#scheduler-plugins_1","title":"\u4e3a\u4ec0\u4e48\u9700\u8981 scheduler-plugins\uff1f","text":"

                                                                    \u901a\u8fc7\u5e73\u53f0\u521b\u5efa\u7684\u96c6\u7fa4\u4e2d\u4f1a\u5b89\u88c5 K8s \u539f\u751f\u7684\u8c03\u5ea6\u5668\uff0c\u4f46\u662f\u539f\u751f\u7684\u8c03\u5ea6\u5668\u5b58\u5728\u5f88\u591a\u7684\u5c40\u9650\u6027\uff1a

                                                                    • \u539f\u751f\u7684\u8c03\u5ea6\u5668\u65e0\u6cd5\u6ee1\u8db3\u8c03\u5ea6\u9700\u6c42\uff0c\u4f60\u53ef\u4ee5\u9009\u62e9\u4f7f\u7528 CoScheduling\u3001 CapacityScheduling \u7b49 scheduler-plugins \u63d2\u4ef6\u3002
                                                                    • \u5728\u7279\u6b8a\u7684\u573a\u666f\uff0c\u9700\u8981\u65b0\u7684\u8c03\u5ea6\u5668\u6765\u5b8c\u6210\u8c03\u5ea6\u4efb\u52a1\u800c\u4e0d\u5f71\u54cd\u539f\u751f\u8c03\u5ea6\u5668\u7684\u6d41\u7a0b\u3002
                                                                    • \u533a\u5206\u4e0d\u540c\u529f\u80fd\u7684\u8c03\u5ea6\u5668\uff0c\u901a\u8fc7\u5207\u6362\u8c03\u5ea6\u5668\u540d\u79f0\u6765\u5b9e\u73b0\u4e0d\u540c\u7684\u8c03\u5ea6\u573a\u666f\u3002

                                                                    \u672c\u6587\u4ee5\u4f7f\u7528 vgpu \u8c03\u5ea6\u5668\u7684\u540c\u65f6\uff0c\u60f3\u7ed3\u5408 scheduler-plugins \u7684 coscheduling \u63d2\u4ef6\u80fd\u529b\u7684\u573a\u666f\u4e3a\u793a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5\u5e76\u4f7f\u7528 scheduler-plugins\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/cluster-scheduler-plugin.html#scheduler-plugins_2","title":"\u5b89\u88c5 scheduler-plugins","text":""},{"location":"end-user/kpanda/clusters/cluster-scheduler-plugin.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                                    • kubean \u662f\u5728 v0.13.0 \u7248\u672c\u63a8\u51fa\u7684\u65b0\u529f\u80fd\uff0c\u9009\u62e9\u7ba1\u7406\u96c6\u7fa4\u65f6\u8bf7\u786e\u4fdd\u7248\u672c\u4e0d\u4f4e\u4e8e\u6b64\u7248\u672c\u3002
                                                                    • \u5b89\u88c5 scheduler-plugins \u7248\u672c\u4e3a v0.27.8\uff0c\u8bf7\u786e\u4fdd\u96c6\u7fa4\u7248\u672c\u662f\u5426\u4e0e\u5b83\u517c\u5bb9\u3002 \u53c2\u8003\u6587\u6863 Compatibility Matrix\u3002
                                                                    "},{"location":"end-user/kpanda/clusters/cluster-scheduler-plugin.html#_2","title":"\u5b89\u88c5\u6d41\u7a0b","text":"
                                                                    1. \u5728 \u521b\u5efa\u96c6\u7fa4 -> \u9ad8\u7ea7\u914d\u7f6e -> \u81ea\u5b9a\u4e49\u53c2\u6570 \u4e2d\u6dfb\u52a0 scheduler-plugins \u53c2\u6570

                                                                      scheduler_plugins_enabled:true\nscheduler_plugins_plugin_config:\n  - name: Coscheduling\n    args:\n      permitWaitingTimeSeconds: 10 # default is 60\n

                                                                      \u53c2\u6570\u8bf4\u660e\uff1a

                                                                      • scheduler_plugins_enabled \u8bbe\u7f6e\u4e3a true \u65f6\uff0c\u5f00\u542f scheduler-plugins \u63d2\u4ef6\u80fd\u529b\u3002
                                                                      • \u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e scheduler_plugins_enabled_plugins \u6216 scheduler_plugins_disabled_plugins \u9009\u9879\u6765\u542f\u7528\u6216\u7981\u7528\u67d0\u4e9b\u63d2\u4ef6\u3002 \u53c2\u9605 K8s \u5b98\u65b9\u63d2\u4ef6\u540d\u79f0\u3002
                                                                      • \u5982\u679c\u9700\u8981\u8bbe\u7f6e\u81ea\u5b9a\u4e49\u63d2\u4ef6\u7684\u53c2\u6570\u8bf7\u914d\u7f6e scheduler_plugins_plugin_config\uff0c\u4f8b\u5982\uff1a\u8bbe\u7f6e coscheduling \u7684 permitWaitingTimeoutSeconds \u53c2\u6570\u3002 \u53c2\u9605 K8s \u5b98\u65b9\u63d2\u4ef6\u914d\u7f6e\u9879
                                                                    2. \u96c6\u7fa4\u521b\u5efa\u6210\u529f\u540e\u7cfb\u7edf\u4f1a\u81ea\u52a8\u5b89\u88c5 scheduler-plugins \u548c controller \u7ec4\u4ef6\u8d1f\u8f7d\uff0c\u53ef\u4ee5\u5728\u5bf9\u5e94\u96c6\u7fa4\u7684\u65e0\u72b6\u6001\u8d1f\u8f7d\u4e2d\u67e5\u770b\u8d1f\u8f7d\u72b6\u6001\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/cluster-scheduler-plugin.html#scheduler-plugins_3","title":"\u4f7f\u7528 scheduler-plugins","text":"

                                                                    \u4ee5\u4e0b\u4ee5\u4f7f\u7528 vgpu \u8c03\u5ea6\u5668\u7684\u540c\u65f6\uff0c\u60f3\u7ed3\u5408 scheduler-plugins \u7684 coscheduling \u63d2\u4ef6\u80fd\u529b\u573a\u666f\u4e3a\u793a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528 scheduler-plugins\u3002

                                                                    1. \u5728 Helm \u6a21\u677f\u4e2d\u5b89\u88c5 vgpu\uff0c\u8bbe\u7f6e values.yaml \u53c2\u6570\u3002

                                                                      • schedulerName: scheduler-plugins-scheduler\uff0c\u8fd9\u662f kubean \u9ed8\u8ba4\u5b89\u88c5\u7684 scheduler-plugins \u7684 scheduler \u540d\u79f0\uff0c\u76ee\u524d\u4e0d\u80fd\u4fee\u6539\u3002
                                                                      • scheduler.kubeScheduler.enabled: false\uff0c\u4e0d\u5b89\u88c5 kube-scheduler\uff0c\u5c06 vgpu-scheduler \u4f5c\u4e3a\u5355\u72ec\u7684 extender\u3002
                                                                    2. \u5728 scheduler-plugins \u4e0a\u6269\u5c55 vgpu-scheduler\u3002

                                                                      [root@master01 charts]# kubectl get cm -n scheduler-plugins scheduler-config -ojsonpath=\"{.data.scheduler-config\\.yaml}\"\n
                                                                      apiVersion: kubescheduler.config.k8s.io/v1\nkind: KubeSchedulerConfiguration\nleaderElection:\n  leaderElect: false\nprofiles:\n  # Compose all plugins in one profile\n  - schedulerName: scheduler-plugins-scheduler\n    plugins:\n      multiPoint:\n        enabled:\n          - name: Coscheduling\n          - name: CapacityScheduling\n          - name: NodeResourceTopologyMatch\n          - name: NodeResourcesAllocatable\n        disabled:\n          - name: PrioritySort\npluginConfig:\n  - args:\n      permitWaitingTimeSeconds: 10\n    name: Coscheduling\n

                                                                      \u4fee\u6539 scheduler-plugins \u7684 scheduler-config \u7684 configmap \u53c2\u6570\uff0c\u5982\u4e0b\uff1a

                                                                      [root@master01 charts]# kubectl get cm -n scheduler-plugins scheduler-config -ojsonpath=\"{.data.scheduler-config\\.yaml}\"\n
                                                                      apiVersion: kubescheduler.config.k8s.io/v1\nkind: KubeSchedulerConfiguration\nleaderElection:\n  leaderElect: false\nprofiles:\n  # Compose all plugins in one profile\n  - schedulerName: scheduler-plugins-scheduler\n    plugins:\n      multiPoint:\n        enabled:\n          - name: Coscheduling\n          - name: CapacityScheduling\n          - name: NodeResourceTopologyMatch\n          - name: NodeResourcesAllocatable\n        disabled:\n          - name: PrioritySort\npluginConfig:\n  - args:\n      permitWaitingTimeSeconds: 10\n    name: Coscheduling\nextenders:\n  - urlPrefix: \"${urlPrefix}\"\n    filterVerb: filter\n    bindVerb: bind\n    nodeCacheCapable: true\n    ignorable: true\n    httpTimeout: 30s\n    weight: 1\n    enableHTTPS: true\n    tlsConfig:\n      insecure: true\n    managedResources:\n      - name: nvidia.com/vgpu\n        ignoredByScheduler: true\n      - name: nvidia.com/gpumem\n        ignoredByScheduler: true\n      - name: nvidia.com/gpucores\n        ignoredByScheduler: true\n      - name: nvidia.com/gpumem-percentage\n        ignoredByScheduler: true\n      - name: nvidia.com/priority\n        ignoredByScheduler: true\n      - name: cambricon.com/mlunum\n        ignoredByScheduler: true\n
                                                                    3. \u5b89\u88c5\u5b8c vgpu-scheduler \u540e\uff0c\u7cfb\u7edf\u4f1a\u81ea\u52a8\u521b\u5efa svc\uff0curlPrefix \u6307\u5b9a svc \u7684 URL\u3002

                                                                      Note

                                                                      • svc \u6307 pod \u670d\u52a1\u8d1f\u8f7d\uff0c\u60a8\u53ef\u4ee5\u5230\u5b89\u88c5\u4e86 nvidia-vgpu \u63d2\u4ef6\u7684\u547d\u540d\u7a7a\u95f4\u4e0b\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u62ff\u5230 443 \u7aef\u53e3\u5bf9\u5e94\u7684\u5916\u90e8\u8bbf\u95ee\u4fe1\u606f\u3002

                                                                        kubectl get svc -n ${namespace} \n
                                                                      • urlprifix \u683c\u5f0f\u4e3a https://${ip \u5730\u5740}:${\u7aef\u53e3}

                                                                    4. \u5c06 scheduler-plugins \u7684 scheduler Pod \u91cd\u542f\uff0c\u52a0\u8f7d\u65b0\u7684\u914d\u7f6e\u6587\u4ef6\u3002

                                                                      Note

                                                                      \u5728\u521b\u5efa vgpu \u5e94\u7528\u65f6\u4e0d\u9700\u8981\u6307\u5b9a\u8c03\u5ea6\u5668\u540d\u79f0\uff0cvgpu-scheduler \u7684 Webhook \u4f1a\u81ea\u52a8\u5c06 Scheduler \u7684\u540d\u79f0\u4fee\u6539\u4e3a scheduler-plugins-scheduler\uff0c\u4e0d\u7528\u624b\u52a8\u6307\u5b9a\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/cluster-status.html","title":"\u96c6\u7fa4\u72b6\u6001","text":"

                                                                    \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u7eb3\u7ba1\u4e24\u79cd\u7c7b\u578b\u7684\u96c6\u7fa4\uff1a\u63a5\u5165\u96c6\u7fa4\u548c\u81ea\u5efa\u96c6\u7fa4\u3002 \u5173\u4e8e\u96c6\u7fa4\u7eb3\u7ba1\u7c7b\u578b\u7684\u66f4\u591a\u4fe1\u606f\uff0c\u8bf7\u53c2\u89c1\u96c6\u7fa4\u89d2\u8272\u3002

                                                                    \u8fd9\u4e24\u79cd\u96c6\u7fa4\u7684\u72b6\u6001\u5982\u4e0b\u6240\u8ff0\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/cluster-status.html#_2","title":"\u63a5\u5165\u96c6\u7fa4","text":"\u72b6\u6001 \u63cf\u8ff0 \u63a5\u5165\u4e2d\uff08Joining\uff09 \u96c6\u7fa4\u6b63\u5728\u63a5\u5165 \u89e3\u9664\u63a5\u5165\u4e2d\uff08Removing\uff09 \u96c6\u7fa4\u6b63\u5728\u89e3\u9664\u63a5\u5165 \u8fd0\u884c\u4e2d\uff08Running\uff09 \u96c6\u7fa4\u6b63\u5e38\u8fd0\u884c \u672a\u77e5\uff08Unknown\uff09 \u96c6\u7fa4\u5df2\u5931\u8054\uff0c\u7cfb\u7edf\u5c55\u793a\u6570\u636e\u4e3a\u5931\u8054\u524d\u7f13\u5b58\u6570\u636e\uff0c\u4e0d\u4ee3\u8868\u771f\u5b9e\u6570\u636e\uff0c\u540c\u65f6\u5931\u8054\u72b6\u6001\u4e0b\u6267\u884c\u7684\u4efb\u4f55\u64cd\u4f5c\u90fd\u5c06\u4e0d\u751f\u6548\uff0c\u8bf7\u68c0\u67e5\u96c6\u7fa4\u7f51\u7edc\u8fde\u901a\u6027\u6216\u4e3b\u673a\u72b6\u6001\u3002"},{"location":"end-user/kpanda/clusters/cluster-status.html#_3","title":"\u81ea\u5efa\u96c6\u7fa4","text":"\u72b6\u6001 \u63cf\u8ff0 \u521b\u5efa\u4e2d\uff08Creating\uff09 \u96c6\u7fa4\u6b63\u5728\u521b\u5efa \u66f4\u65b0\u4e2d\uff08Updating\uff09 \u66f4\u65b0\u96c6\u7fa4 Kubernetes \u7248\u672c \u5220\u9664\u4e2d\uff08Deleting\uff09 \u96c6\u7fa4\u6b63\u5728\u5220\u9664 \u8fd0\u884c\u4e2d\uff08Running\uff09 \u96c6\u7fa4\u6b63\u5e38\u8fd0\u884c \u672a\u77e5\uff08Unknown\uff09 \u96c6\u7fa4\u5df2\u5931\u8054\uff0c\u7cfb\u7edf\u5c55\u793a\u6570\u636e\u4e3a\u5931\u8054\u524d\u7f13\u5b58\u6570\u636e\uff0c\u4e0d\u4ee3\u8868\u771f\u5b9e\u6570\u636e\uff0c\u540c\u65f6\u5931\u8054\u72b6\u6001\u4e0b\u6267\u884c\u7684\u4efb\u4f55\u64cd\u4f5c\u90fd\u5c06\u4e0d\u751f\u6548\uff0c\u8bf7\u68c0\u67e5\u96c6\u7fa4\u7f51\u7edc\u8fde\u901a\u6027\u6216\u4e3b\u673a\u72b6\u6001\u3002 \u521b\u5efa\u5931\u8d25\uff08Failed\uff09 \u96c6\u7fa4\u521b\u5efa\u5931\u8d25\uff0c\u8bf7\u67e5\u770b\u65e5\u5fd7\u4ee5\u83b7\u53d6\u8be6\u7ec6\u5931\u8d25\u539f\u56e0"},{"location":"end-user/kpanda/clusters/cluster-version.html","title":"\u96c6\u7fa4\u7248\u672c\u652f\u6301\u8303\u56f4","text":"

                                                                    \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\uff0c\u63a5\u5165\u578b\u96c6\u7fa4\u548c\u81ea\u5efa\u96c6\u7fa4\u91c7\u53d6\u4e0d\u540c\u7684\u7248\u672c\u652f\u6301\u673a\u5236\u3002

                                                                    \u672c\u6587\u4e3b\u8981\u4ecb\u7ecd\u81ea\u5efa\u96c6\u7fa4\u7684\u7248\u672c\u652f\u6301\u673a\u5236\u3002

                                                                    Kubernetes \u793e\u533a\u652f\u6301 3 \u4e2a\u7248\u672c\u8303\u56f4\uff0c\u5982 1.26\u30011.27\u30011.28\u3002\u5f53\u793e\u533a\u65b0\u7248\u672c\u53d1\u5e03\u4e4b\u540e\uff0c\u652f\u6301\u7684\u7248\u672c\u8303\u56f4\u5c06\u4f1a\u8fdb\u884c\u9012\u589e\u3002 \u5982\u793e\u533a\u6700\u65b0\u7684 1.29 \u7248\u672c\u5df2\u7ecf\u53d1\u5e03\uff0c\u6b64\u65f6\u793e\u533a\u652f\u6301\u7684\u7248\u672c\u8303\u56f4\u662f 1.27\u30011.28\u30011.29\u3002

                                                                    \u4f8b\u5982\uff0c\u793e\u533a\u652f\u6301\u7684\u7248\u672c\u8303\u56f4\u662f 1.25\u30011.26\u30011.27\uff0c\u5219\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528\u754c\u9762\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u7248\u672c\u8303\u56f4\u662f 1.24\u30011.25\u30011.26\uff0c\u5e76\u4e14\u4f1a\u4e3a\u7528\u6237\u63a8\u8350\u4e00\u4e2a\u7a33\u5b9a\u7684\u7248\u672c\uff0c\u5982 1.24.7\u3002

                                                                    \u9664\u6b64\u4e4b\u5916\uff0c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528\u754c\u9762\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u7248\u672c\u8303\u56f4\u4e0e\u793e\u533a\u4fdd\u6301\u9ad8\u5ea6\u540c\u6b65\uff0c\u5f53\u793e\u533a\u7248\u672c\u8fdb\u884c\u9012\u589e\u540e\uff0c\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528\u754c\u9762\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u7248\u672c\u8303\u56f4\u4e5f\u4f1a\u540c\u6b65\u9012\u589e\u4e00\u4e2a\u7248\u672c\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/cluster-version.html#kubernetes","title":"Kubernetes \u7248\u672c\u652f\u6301\u8303\u56f4","text":"Kubernetes \u793e\u533a\u7248\u672c\u8303\u56f4 \u81ea\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7248\u672c\u8303\u56f4 \u81ea\u5efa\u5de5\u4f5c\u96c6\u7fa4\u63a8\u8350\u7248\u672c \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5b89\u88c5\u5668 \u53d1\u5e03\u65f6\u95f4
                                                                    • 1.26
                                                                    • 1.27
                                                                    • 1.28
                                                                    • 1.25
                                                                    • 1.26
                                                                    • 1.27
                                                                    1.27.5 v0.13.0 2023.11.30"},{"location":"end-user/kpanda/clusters/create-cluster.html","title":"\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4","text":"

                                                                    \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\uff0c\u96c6\u7fa4\u89d2\u8272\u5206\u56db\u7c7b\uff1a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u3001\u7ba1\u7406\u96c6\u7fa4\u3001\u5de5\u4f5c\u96c6\u7fa4\u3001\u63a5\u5165\u96c6\u7fa4\u3002 \u5176\u4e2d\uff0c\u63a5\u5165\u96c6\u7fa4\u53ea\u80fd\u4ece\u7b2c\u4e09\u65b9\u5382\u5546\u63a5\u5165\uff0c\u53c2\u89c1\u63a5\u5165\u96c6\u7fa4\u3002

                                                                    \u672c\u9875\u4ecb\u7ecd\u5982\u4f55\u521b\u5efa\u5de5\u4f5c\u96c6\u7fa4\uff0c\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u65b0\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7684\u5de5\u4f5c\u8282\u70b9 OS \u7c7b\u578b\u548c CPU \u67b6\u6784\u9700\u8981\u4e0e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4fdd\u6301\u4e00\u81f4\u3002 \u5982\u9700\u4f7f\u7528\u533a\u522b\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4 OS \u6216\u67b6\u6784\u7684\u8282\u70b9\u521b\u5efa\u96c6\u7fa4\uff0c\u53c2\u9605\u5728 centos \u7ba1\u7406\u5e73\u53f0\u4e0a\u521b\u5efa ubuntu \u5de5\u4f5c\u96c6\u7fa4\u8fdb\u884c\u521b\u5efa\u3002

                                                                    \u63a8\u8350\u4f7f\u7528 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u7684\u64cd\u4f5c\u7cfb\u7edf\u6765\u521b\u5efa\u96c6\u7fa4\u3002 \u5982\u60a8\u672c\u5730\u8282\u70b9\u4e0d\u5728\u4e0a\u8ff0\u652f\u6301\u8303\u56f4\uff0c\u53ef\u53c2\u8003\u5728\u975e\u4e3b\u6d41\u64cd\u4f5c\u7cfb\u7edf\u4e0a\u521b\u5efa\u96c6\u7fa4\u8fdb\u884c\u521b\u5efa\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/create-cluster.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                                    \u521b\u5efa\u96c6\u7fa4\u4e4b\u524d\u9700\u8981\u6ee1\u8db3\u4e00\u5b9a\u7684\u524d\u63d0\u6761\u4ef6\uff1a

                                                                    • \u6839\u636e\u4e1a\u52a1\u9700\u6c42\u51c6\u5907\u4e00\u5b9a\u6570\u91cf\u7684\u8282\u70b9\uff0c\u4e14\u8282\u70b9 OS \u7c7b\u578b\u548c CPU \u67b6\u6784\u4e00\u81f4\u3002
                                                                    • \u63a8\u8350 Kubernetes \u7248\u672c 1.29.5\uff0c\u5177\u4f53\u7248\u672c\u8303\u56f4\uff0c\u53c2\u9605 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u96c6\u7fa4\u7248\u672c\u652f\u6301\u4f53\u7cfb\uff0c \u76ee\u524d\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u81ea\u5efa\u5de5\u4f5c\u96c6\u7fa4\u7248\u672c\u8303\u56f4\u5728 v1.28.0-v1.30.2\u3002\u5982\u9700\u521b\u5efa\u4f4e\u7248\u672c\u7684\u96c6\u7fa4\uff0c\u8bf7\u53c2\u8003\u96c6\u7fa4\u7248\u672c\u652f\u6301\u8303\u56f4\u3001\u90e8\u7f72\u4e0e\u5347\u7ea7 Kubean \u5411\u4e0b\u517c\u5bb9\u7248\u672c\u3002
                                                                    • \u76ee\u6807\u4e3b\u673a\u9700\u8981\u5141\u8bb8 IPv4 \u8f6c\u53d1\u3002\u5982\u679c Pod \u548c Service \u4f7f\u7528\u7684\u662f IPv6\uff0c\u5219\u76ee\u6807\u670d\u52a1\u5668\u9700\u8981\u5141\u8bb8 IPv6 \u8f6c\u53d1\u3002
                                                                    • \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u6682\u4e0d\u63d0\u4f9b\u5bf9\u9632\u706b\u5899\u7684\u7ba1\u7406\u529f\u80fd\uff0c\u60a8\u9700\u8981\u9884\u5148\u81ea\u884c\u5b9a\u4e49\u76ee\u6807\u4e3b\u673a\u9632\u706b\u5899\u89c4\u5219\u3002\u4e3a\u4e86\u907f\u514d\u521b\u5efa\u96c6\u7fa4\u7684\u8fc7\u7a0b\u4e2d\u51fa\u73b0\u95ee\u9898\uff0c\u5efa\u8bae\u7981\u7528\u76ee\u6807\u4e3b\u673a\u7684\u9632\u706b\u5899\u3002
                                                                    • \u53c2\u9605\u8282\u70b9\u53ef\u7528\u6027\u68c0\u67e5\u3002
                                                                    "},{"location":"end-user/kpanda/clusters/create-cluster.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                                    1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u4e2d\uff0c\u70b9\u51fb \u521b\u5efa\u96c6\u7fa4 \u6309\u94ae\u3002

                                                                    2. \u53c2\u8003\u4e0b\u5217\u8981\u6c42\u586b\u5199\u96c6\u7fa4\u57fa\u672c\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                                      • \u96c6\u7fa4\u540d\u79f0\uff1a\u540d\u79f0\u53ea\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u8fde\u5b57\u7b26\uff08\"-\"\uff09\uff0c\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u8005\u6570\u5b57\u5f00\u5934\u548c\u7ed3\u5c3e\uff0c\u6700\u957f 63 \u4e2a\u5b57\u7b26\u3002
                                                                      • \u88ab\u7eb3\u7ba1\uff1a\u9009\u62e9\u7531\u54ea\u4e2a\u96c6\u7fa4\u6765\u7ba1\u7406\u6b64\u96c6\u7fa4\uff0c\u4f8b\u5982\u5728\u96c6\u7fa4\u751f\u547d\u5468\u671f\u4e2d\u521b\u5efa\u3001\u5347\u7ea7\u3001\u8282\u70b9\u6269\u7f29\u5bb9\u3001\u5220\u9664\u96c6\u7fa4\u7b49\u3002
                                                                      • \u8fd0\u884c\u65f6\uff1a\u9009\u62e9\u96c6\u7fa4\u7684\u8fd0\u884c\u65f6\u73af\u5883\uff0c\u76ee\u524d\u652f\u6301 containerd \u548c docker\uff0c\u5982\u4f55\u9009\u62e9\u5bb9\u5668\u8fd0\u884c\u65f6\u3002
                                                                      • Kubernetes \u7248\u672c\uff1a\u652f\u6301 3 \u4e2a\u7248\u672c\u8de8\u5ea6\uff0c\u5177\u4f53\u53d6\u51b3\u4e8e\u88ab\u7eb3\u7ba1\u96c6\u7fa4\u6240\u652f\u6301\u7684\u7248\u672c\u3002

                                                                    3. \u586b\u5199\u8282\u70b9\u914d\u7f6e\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                                      • \u9ad8\u53ef\u7528\uff1a\u5f00\u542f\u540e\u9700\u8981\u63d0\u4f9b\u81f3\u5c11 3 \u4e2a\u63a7\u5236\u5668\u8282\u70b9\u3002\u5173\u95ed\u540e\uff0c\u53ea\u63d0\u4f9b 1 \u4e2a\u63a7\u5236\u5668\u8282\u70b9\u5373\u53ef\u3002

                                                                        \u751f\u4ea7\u73af\u5883\u4e2d\u5efa\u8bae\u4f7f\u7528\u9ad8\u53ef\u7528\u6a21\u5f0f\u3002

                                                                      • \u8ba4\u8bc1\u65b9\u5f0f\uff1a\u9009\u62e9\u901a\u8fc7\u7528\u6237\u540d/\u5bc6\u7801\u8fd8\u662f\u516c\u79c1\u94a5\u8bbf\u95ee\u8282\u70b9\u3002

                                                                        \u5982\u679c\u4f7f\u7528\u516c\u79c1\u94a5\u65b9\u5f0f\u8bbf\u95ee\u8282\u70b9\uff0c\u9700\u8981\u9884\u5148\u914d\u7f6e\u8282\u70b9\u7684 SSH \u5bc6\u94a5\u3002\u53c2\u9605\u4f7f\u7528 SSH \u5bc6\u94a5\u8ba4\u8bc1\u8282\u70b9\u3002

                                                                      • \u4f7f\u7528\u7edf\u4e00\u7684\u5bc6\u7801\uff1a\u5f00\u542f\u540e\u96c6\u7fa4\u4e2d\u6240\u6709\u8282\u70b9\u7684\u8bbf\u95ee\u5bc6\u7801\u90fd\u76f8\u540c\uff0c\u9700\u8981\u5728\u4e0b\u65b9\u8f93\u5165\u8bbf\u95ee\u6240\u6709\u8282\u70b9\u7684\u7edf\u4e00\u5bc6\u7801\u3002\u5982\u679c\u5173\u95ed\uff0c\u5219\u53ef\u4ee5\u4e3a\u6bcf\u4e2a\u8282\u70b9\u8bbe\u7f6e\u5355\u72ec\u7684\u7528\u6237\u540d\u548c\u5bc6\u7801\u3002

                                                                      • \u8282\u70b9\u4fe1\u606f\uff1a\u586b\u5199\u8282\u70b9\u540d\u79f0\u548c IP \u5730\u5740\u3002

                                                                      • \u81ea\u5b9a\u4e49\u53c2\u6570\uff1a\u8bbe\u7f6e\u53d8\u91cf\u63a7\u5236 Ansible \u4e0e\u8fdc\u7a0b\u4e3b\u673a\u4ea4\u4e92\u3002\u53ef\u8bbe\u7f6e\u53d8\u91cf\u53c2\u8003\u8fde\u63a5\u5230\u4e3b\u673a\uff1a\u884c\u4e3a\u6e05\u5355\u53c2\u6570
                                                                      • NTP \u65f6\u95f4\u540c\u6b65\uff1a\u5f00\u542f\u540e\u4f1a\u81ea\u52a8\u540c\u6b65\u5404\u4e2a\u8282\u70b9\u4e0a\u7684\u65f6\u95f4\uff0c\u9700\u8981\u63d0\u4f9b NTP \u670d\u52a1\u5668\u5730\u5740\u3002

                                                                    4. \u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb\u8282\u70b9\u68c0\u67e5\u3002\u5982\u679c\u68c0\u67e5\u901a\u8fc7\u5219\u7ee7\u7eed\u4e0b\u4e00\u6b65\u64cd\u4f5c\u3002\u5982\u679c\u68c0\u67e5\u672a\u901a\u8fc7\uff0c\u5219\u66f4\u65b0 \u8282\u70b9\u4fe1\u606f \u5e76\u518d\u6b21\u6267\u884c\u68c0\u67e5\u3002

                                                                    5. \u586b\u5199\u7f51\u7edc\u914d\u7f6e\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                                      • \u7f51\u7edc\u63d2\u4ef6\uff1a\u8d1f\u8d23\u4e3a\u96c6\u7fa4\u5185\u7684 Pod \u63d0\u4f9b\u7f51\u7edc\u670d\u52a1\uff0c\u521b\u5efa\u96c6\u7fa4\u540e\u4e0d\u53ef\u66f4\u6539\u7f51\u7edc\u63d2\u4ef6\u3002\u652f\u6301 cilium \u548c calico\u3002\u9009\u62e9 none \u8868\u793a\u6682\u4e0d\u5b89\u88c5\u7f51\u7edc\u63d2\u4ef6\u3002

                                                                      • \u5bb9\u5668\u7f51\u6bb5\uff1a\u96c6\u7fa4\u4e0b\u5bb9\u5668\u4f7f\u7528\u7684\u7f51\u6bb5\uff0c\u51b3\u5b9a\u96c6\u7fa4\u4e0b\u5bb9\u5668\u7684\u6570\u91cf\u4e0a\u9650\u3002\u521b\u5efa\u540e\u4e0d\u53ef\u4fee\u6539\u3002

                                                                      • \u670d\u52a1\u7f51\u6bb5\uff1a\u540c\u4e00\u96c6\u7fa4\u4e0b\u5bb9\u5668\u4e92\u76f8\u8bbf\u95ee\u65f6\u4f7f\u7528\u7684 Service \u8d44\u6e90\u7684\u7f51\u6bb5\uff0c\u51b3\u5b9a Service \u8d44\u6e90\u7684\u4e0a\u9650\u3002\u521b\u5efa\u540e\u4e0d\u53ef\u4fee\u6539\u3002

                                                                    6. \u586b\u5199\u63d2\u4ef6\u914d\u7f6e\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                                    7. \u586b\u5199\u9ad8\u7ea7\u914d\u7f6e\u4fe1\u606f\uff0c\u5e76\u70b9\u51fb \u786e\u5b9a \u3002

                                                                      • kubelet_max_pods \uff1a\u8bbe\u7f6e\u6bcf\u4e2a\u8282\u70b9\u7684\u6700\u5927 Pod \u6570\u91cf\uff0c\u9ed8\u8ba4\u4e3a 110 \u4e2a\u3002
                                                                      • hostname_overide \uff1a\u91cd\u7f6e\u4e3b\u673a\u540d\uff0c\u5efa\u8bae\u4f7f\u7528\u9ed8\u8ba4\u503c\uff0c\u91c7\u7528\u7cfb\u7edf\u9ed8\u8ba4\u751f\u6210\u7684\u540d\u79f0\u4f5c\u4e3a\u4e3b\u673a\u540d\u79f0\u3002
                                                                      • kubernetes_audit \uff1aKubernetes \u7684\u5ba1\u8ba1\u65e5\u5fd7\uff0c\u9ed8\u8ba4\u5f00\u542f\u3002
                                                                      • auto_renew_certificate \uff1a\u5728\u6bcf\u6708\u7b2c\u4e00\u4e2a\u661f\u671f\u4e00\u81ea\u52a8\u66f4\u65b0 Kubernetes \u63a7\u5236\u5e73\u9762\u8bc1\u4e66\uff0c\u9ed8\u8ba4\u5f00\u542f\u3002
                                                                      • disable_firewalld&ufw \uff1a\u7981\u7528\u9632\u706b\u5899\uff0c\u907f\u514d\u8282\u70b9\u5728\u5b89\u88c5\u8fc7\u7a0b\u4e2d\u65e0\u6cd5\u88ab\u8bbf\u95ee\u3002
                                                                      • Insecure_registries \uff1a\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u914d\u7f6e\u3002\u4f7f\u7528\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u521b\u5efa\u96c6\u7fa4\u65f6\uff0c\u4e3a\u4e86\u907f\u514d\u8bc1\u4e66\u95ee\u9898\u5bfc\u81f4\u5bb9\u5668\u5f15\u64ce\u62d2\u7edd\u8bbf\u95ee\uff0c\u9700\u8981\u5728\u8fd9\u91cc\u586b\u5199\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u5730\u5740\uff0c\u4ee5\u7ed5\u8fc7\u5bb9\u5668\u5f15\u64ce\u7684\u8bc1\u4e66\u8ba4\u8bc1\u800c\u83b7\u53d6\u955c\u50cf\u3002
                                                                      • yum_repos \uff1a\u586b\u5199 Yum \u6e90\u4ed3\u5e93\u5730\u5740\u3002\u79bb\u7ebf\u73af\u5883\u4e0b\uff0c\u9ed8\u8ba4\u7ed9\u51fa\u7684\u5730\u5740\u9009\u9879\u4ec5\u4f9b\u53c2\u8003\uff0c\u8bf7\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u586b\u5199\u3002

                                                                    Success

                                                                    • \u586b\u5199\u6b63\u786e\u4fe1\u606f\u5e76\u5b8c\u6210\u4e0a\u8ff0\u6b65\u9aa4\u540e\uff0c\u9875\u9762\u4f1a\u63d0\u793a\u96c6\u7fa4\u6b63\u5728\u521b\u5efa\u4e2d\u3002
                                                                    • \u521b\u5efa\u96c6\u7fa4\u8017\u65f6\u8f83\u957f\uff0c\u9700\u8981\u8010\u5fc3\u7b49\u5f85\u3002\u5176\u95f4\uff0c\u53ef\u4ee5\u70b9\u51fb \u8fd4\u56de\u96c6\u7fa4\u5217\u8868 \u6309\u94ae\u8ba9\u5b89\u88c5\u8fc7\u7a0b\u540e\u53f0\u8fd0\u884c\u3002
                                                                    • \u5982\u9700\u67e5\u770b\u5f53\u524d\u72b6\u6001\uff0c\u53ef\u70b9\u51fb \u5b9e\u65f6\u65e5\u5fd7 \u3002

                                                                    Note

                                                                    • \u5f53\u96c6\u7fa4\u51fa\u73b0\u672a\u77e5\u72b6\u6001\u65f6\uff0c\u8868\u793a\u5f53\u524d\u96c6\u7fa4\u5df2\u5931\u8054\u3002
                                                                    • \u7cfb\u7edf\u5c55\u793a\u6570\u636e\u4e3a\u5931\u8054\u524d\u7f13\u5b58\u6570\u636e\uff0c\u4e0d\u4ee3\u8868\u771f\u5b9e\u6570\u636e\u3002
                                                                    • \u540c\u65f6\u5931\u8054\u72b6\u6001\u4e0b\u6267\u884c\u7684\u4efb\u4f55\u64cd\u4f5c\u90fd\u5c06\u4e0d\u751f\u6548\uff0c\u8bf7\u68c0\u67e5\u96c6\u7fa4\u7f51\u7edc\u8fde\u901a\u6027\u6216\u4e3b\u673a\u72b6\u6001\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/delete-cluster.html","title":"\u5378\u8f7d/\u89e3\u9664\u63a5\u5165\u96c6\u7fa4","text":"

                                                                    \u901a\u8fc7\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0 \u521b\u5efa\u7684\u96c6\u7fa4 \u652f\u6301 \u5378\u8f7d\u96c6\u7fa4 \u6216 \u89e3\u9664\u63a5\u5165 \u64cd\u4f5c\uff0c\u4ece\u5176\u4ed6\u73af\u5883\u76f4\u63a5 \u63a5\u5165\u7684\u96c6\u7fa4 \u4ec5\u652f\u6301 \u89e3\u9664\u63a5\u5165 \u64cd\u4f5c\u3002

                                                                    Info

                                                                    \u5982\u679c\u60f3\u5f7b\u5e95\u5220\u9664\u4e00\u4e2a\u63a5\u5165\u7684\u96c6\u7fa4\uff0c\u9700\u8981\u524d\u5f80\u521b\u5efa\u8be5\u96c6\u7fa4\u7684\u539f\u59cb\u5e73\u53f0\u64cd\u4f5c\u3002\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e0d\u652f\u6301\u5220\u9664\u63a5\u5165\u7684\u96c6\u7fa4\u3002

                                                                    \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\uff0c \u5378\u8f7d\u96c6\u7fa4 \u548c \u89e3\u9664\u63a5\u5165 \u7684\u533a\u522b\u5728\u4e8e\uff1a

                                                                    • \u5378\u8f7d\u96c6\u7fa4 \u64cd\u4f5c\u4f1a\u9500\u6bc1\u8be5\u96c6\u7fa4\uff0c\u5e76\u91cd\u7f6e\u96c6\u7fa4\u4e0b\u6240\u6709\u8282\u70b9\u7684\u6570\u636e\u3002\u6240\u6709\u6570\u636e\u90fd\u5c06\u88ab\u9500\u6bc1\uff0c\u5efa\u8bae\u505a\u597d\u5907\u4efd\u3002\u540e\u671f\u9700\u8981\u65f6\u5fc5\u987b\u91cd\u65b0\u521b\u5efa\u4e00\u4e2a\u96c6\u7fa4\u3002
                                                                    • \u89e3\u9664\u63a5\u5165 \u64cd\u4f5c\u4f1a\u5c06\u5f53\u524d\u96c6\u7fa4\u4ece\u5e73\u53f0\u4e2d\u79fb\u9664\uff0c\u4e0d\u4f1a\u6467\u6bc1\u96c6\u7fa4\uff0c\u4e5f\u4e0d\u4f1a\u9500\u6bc1\u6570\u636e\u3002
                                                                    "},{"location":"end-user/kpanda/clusters/delete-cluster.html#_2","title":"\u5378\u8f7d\u96c6\u7fa4","text":"

                                                                    Note

                                                                    • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u5907 Admin \u6216 Kpanda Owner \u6743\u9650\u624d\u80fd\u6267\u884c\u5378\u8f7d\u96c6\u7fa4\u7684\u64cd\u4f5c\u3002
                                                                    • \u5378\u8f7d\u96c6\u7fa4\u4e4b\u524d\uff0c\u5e94\u8be5\u5148\u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u5728 \u96c6\u7fa4\u8fd0\u7ef4 -> \u96c6\u7fa4\u8bbe\u7f6e -> \u9ad8\u7ea7\u914d\u7f6e \u4e2d\u5173\u95ed \u96c6\u7fa4\u5220\u9664\u4fdd\u62a4 \uff0c \u5426\u5219\u4e0d\u663e\u793a \u5378\u8f7d\u96c6\u7fa4 \u7684\u9009\u9879\u3002
                                                                    • \u5168\u5c40\u670d\u52a1\u96c6\u7fa4 \u4e0d\u652f\u6301\u5378\u8f7d\u6216\u79fb\u9664\u64cd\u4f5c\u3002
                                                                    1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u627e\u5230\u9700\u8981\u5378\u8f7d\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u5e76\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u70b9\u51fb \u5378\u8f7d\u96c6\u7fa4 \u3002

                                                                    2. \u8f93\u5165\u96c6\u7fa4\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\uff0c\u7136\u540e\u70b9\u51fb \u5220\u9664 \u3002

                                                                      \u5982\u679c\u63d0\u793a\u96c6\u7fa4\u4e2d\u8fd8\u6709\u4e00\u4e9b\u6b8b\u7559\u7684\u8d44\u6e90\uff0c\u5219\u9700\u8981\u6309\u63d0\u793a\u5220\u9664\u76f8\u5173\u8d44\u6e90\u540e\u624d\u80fd\u6267\u884c\u5378\u8f7d\u64cd\u4f5c\u3002

                                                                    3. \u8fd4\u56de \u96c6\u7fa4\u5217\u8868 \u9875\u53ef\u4ee5\u770b\u5230\u8be5\u96c6\u7fa4\u7684\u72b6\u6001\u5df2\u7ecf\u53d8\u6210 \u5220\u9664\u4e2d \u3002\u5378\u8f7d\u96c6\u7fa4\u53ef\u80fd\u9700\u8981\u4e00\u6bb5\u65f6\u95f4\uff0c\u8bf7\u60a8\u8010\u5fc3\u7b49\u5019\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/delete-cluster.html#_3","title":"\u89e3\u9664\u63a5\u5165\u96c6\u7fa4","text":"

                                                                    Note

                                                                    • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u5907 Admin \u6216 Kpanda Owner \u6743\u9650\u624d\u80fd\u6267\u884c\u89e3\u9664\u63a5\u5165\u7684\u64cd\u4f5c\u3002
                                                                    • \u5168\u5c40\u670d\u52a1\u96c6\u7fa4 \u4e0d\u652f\u6301\u89e3\u9664\u63a5\u5165\u3002
                                                                    1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u627e\u5230\u9700\u8981\u5378\u8f7d\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u5e76\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u70b9\u51fb \u89e3\u9664\u63a5\u5165 \u3002

                                                                    2. \u8f93\u5165\u96c6\u7fa4\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\uff0c\u7136\u540e\u70b9\u51fb \u89e3\u9664\u63a5\u5165 \u3002

                                                                      \u5982\u679c\u63d0\u793a\u96c6\u7fa4\u4e2d\u8fd8\u6709\u4e00\u4e9b\u6b8b\u7559\u7684\u8d44\u6e90\uff0c\u5219\u9700\u8981\u6309\u63d0\u793a\u5220\u9664\u76f8\u5173\u8d44\u6e90\u540e\u624d\u80fd\u89e3\u9664\u63a5\u5165\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/delete-cluster.html#_4","title":"\u6e05\u7406\u89e3\u9664\u63a5\u5165\u96c6\u7fa4\u914d\u7f6e\u6570\u636e","text":"

                                                                    \u96c6\u7fa4\u88ab\u79fb\u9664\u540e\uff0c\u96c6\u7fa4\u4e2d\u539f\u6709\u7684\u7ba1\u7406\u5e73\u53f0\u6570\u636e\u4e0d\u4f1a\u88ab\u81ea\u52a8\u6e05\u9664\uff0c\u5982\u9700\u5c06\u96c6\u7fa4\u63a5\u5165\u81f3\u65b0\u7ba1\u7406\u5e73\u53f0\u5219\u9700\u8981\u624b\u52a8\u6267\u884c\u5982\u4e0b\u64cd\u4f5c\uff1a

                                                                    \u5220\u9664 kpanda-system\u3001insight-system \u547d\u540d\u7a7a\u95f4

                                                                    kubectl delete ns kpanda-system insight-system\n
                                                                    "},{"location":"end-user/kpanda/clusters/integrate-cluster.html","title":"\u63a5\u5165\u96c6\u7fa4","text":"

                                                                    \u901a\u8fc7\u63a5\u5165\u96c6\u7fa4\u64cd\u4f5c\uff0c\u80fd\u591f\u5bf9\u4f17\u591a\u4e91\u670d\u52a1\u5e73\u53f0\u96c6\u7fa4\u548c\u672c\u5730\u79c1\u6709\u7269\u7406\u96c6\u7fa4\u8fdb\u884c\u7edf\u4e00\u7eb3\u7ba1\uff0c\u5f62\u6210\u7edf\u4e00\u6cbb\u7406\u5e73\u53f0\uff0c\u6709\u6548\u907f\u514d\u4e86\u88ab\u5382\u5546\u9501\u5b9a\u98ce\u9669\uff0c\u52a9\u529b\u4f01\u4e1a\u4e1a\u52a1\u5b89\u5168\u4e0a\u4e91\u3002

                                                                    \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u63a5\u5165\u591a\u79cd\u4e3b\u6d41\u7684\u5bb9\u5668\u96c6\u7fa4\uff0c\u4f8b\u5982 Redhat Openshift, SUSE Rancher, VMware Tanzu, Amazon EKS, Aliyun ACK, Huawei CCE, Tencent TKE, \u6807\u51c6 Kubernetes \u96c6\u7fa4\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/integrate-cluster.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                    • \u51c6\u5907\u4e00\u4e2a\u5f85\u63a5\u5165\u7684\u96c6\u7fa4\uff0c\u786e\u4fdd\u5bb9\u5668\u7ba1\u7406\u96c6\u7fa4\u548c\u5f85\u63a5\u5165\u96c6\u7fa4\u4e4b\u95f4\u7f51\u7edc\u901a\u7545\uff0c\u5e76\u4e14\u96c6\u7fa4\u7684 Kubernetes \u7248\u672c 1.22+\u3002
                                                                    • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 Kpanda Owner \u6216\u66f4\u9ad8\u6743\u9650\u3002
                                                                    "},{"location":"end-user/kpanda/clusters/integrate-cluster.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                                    1. \u8fdb\u5165 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u63a5\u5165\u96c6\u7fa4 \u6309\u94ae\u3002

                                                                    2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u3002

                                                                      • \u96c6\u7fa4\u540d\u79f0\uff1a\u540d\u79f0\u5e94\u5177\u6709\u552f\u4e00\u6027\uff0c\u8bbe\u7f6e\u540e\u4e0d\u53ef\u66f4\u6539\u3002\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26(\"-\")\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002
                                                                      • \u96c6\u7fa4\u522b\u540d\uff1a\u53ef\u8f93\u5165\u4efb\u610f\u5b57\u7b26\uff0c\u4e0d\u8d85\u8fc7 60 \u4e2a\u5b57\u7b26\u3002
                                                                      • \u53d1\u884c\u7248\uff1a\u96c6\u7fa4\u7684\u53d1\u884c\u5382\u5546\uff0c\u5305\u62ec\u5e02\u573a\u4e3b\u6d41\u4e91\u5382\u5546\u548c\u672c\u5730\u79c1\u6709\u7269\u7406\u96c6\u7fa4\u3002
                                                                    3. \u586b\u5199\u76ee\u6807\u96c6\u7fa4\u7684 KubeConfig\uff0c\u70b9\u51fb \u9a8c\u8bc1 Config \uff0c\u9a8c\u8bc1\u901a\u8fc7\u540e\u624d\u80fd\u6210\u529f\u63a5\u5165\u96c6\u7fa4\u3002

                                                                      \u5982\u679c\u4e0d\u77e5\u9053\u5982\u4f55\u83b7\u53d6\u96c6\u7fa4\u7684 KubeConfig \u6587\u4ef6\uff0c\u53ef\u4ee5\u5728\u8f93\u5165\u6846\u53f3\u4e0a\u89d2\u70b9\u51fb \u5982\u4f55\u83b7\u53d6 kubeConfig \u67e5\u770b\u5bf9\u5e94\u6b65\u9aa4\u3002

                                                                    4. \u786e\u8ba4\u6240\u6709\u53c2\u6570\u586b\u5199\u6b63\u786e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u3002

                                                                    Note

                                                                    • \u65b0\u63a5\u5165\u7684\u96c6\u7fa4\u72b6\u6001\u4e3a \u63a5\u5165\u4e2d \uff0c\u63a5\u5165\u6210\u529f\u540e\u53d8\u4e3a \u8fd0\u884c\u4e2d \u3002
                                                                    • \u5982\u679c\u96c6\u7fa4\u72b6\u6001\u4e00\u76f4\u5904\u4e8e \u63a5\u5165\u4e2d \uff0c\u8bf7\u786e\u8ba4\u63a5\u5165\u811a\u672c\u662f\u5426\u5728\u5bf9\u5e94\u96c6\u7fa4\u4e0a\u6267\u884c\u6210\u529f\u3002\u6709\u5173\u96c6\u7fa4\u72b6\u6001\u7684\u66f4\u591a\u8be6\u60c5\uff0c\u8bf7\u53c2\u8003\u96c6\u7fa4\u72b6\u6001\u3002
                                                                    "},{"location":"end-user/kpanda/clusters/integrate-rancher-cluster.html","title":"\u63a5\u5165 rancher \u96c6\u7fa4","text":"

                                                                    \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u63a5\u5165 rancher \u96c6\u7fa4\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/integrate-rancher-cluster.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                    • \u51c6\u5907\u4e00\u4e2a\u5177\u6709\u7ba1\u7406\u5458\u6743\u9650\u7684\u5f85\u63a5\u5165 ranhcer \u96c6\u7fa4\uff0c\u786e\u4fdd\u5bb9\u5668\u7ba1\u7406\u96c6\u7fa4\u548c\u5f85\u63a5\u5165\u96c6\u7fa4\u4e4b\u95f4\u7f51\u7edc\u901a\u7545\u3002
                                                                    • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 Kpanda Owner \u6216\u66f4\u9ad8\u6743\u9650\u3002
                                                                    "},{"location":"end-user/kpanda/clusters/integrate-rancher-cluster.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"end-user/kpanda/clusters/integrate-rancher-cluster.html#rancher-serviceaccount","title":"\u6b65\u9aa4\u4e00\uff1a\u5728 rancher \u96c6\u7fa4\u521b\u5efa\u5177\u6709\u7ba1\u7406\u5458\u6743\u9650\u7684 ServiceAccount \u7528\u6237","text":"
                                                                    1. \u4f7f\u7528\u5177\u6709\u7ba1\u7406\u5458\u6743\u9650\u7684\u89d2\u8272\u8fdb\u5165 rancher \u96c6\u7fa4\uff0c\u5e76\u4f7f\u7528\u7ec8\u7aef\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a sa.yaml \u7684\u6587\u4ef6\u3002

                                                                      vi sa.yaml\n

                                                                      \u7136\u540e\u6309\u4e0b i \u952e\u8fdb\u5165\u63d2\u5165\u6a21\u5f0f\uff0c\u8f93\u5165\u4ee5\u4e0b\u5185\u5bb9\uff1a

                                                                      sa.yaml
                                                                      apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: rancher-rke\nrules:\n  - apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n  - nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: rancher-rke\nroleRef:\n    apiGroup: rbac.authorization.k8s.io\n    kind: ClusterRole\n    name: rancher-rke\n  subjects:\n  - kind: ServiceAccount\n    name: rancher-rke\n    namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: rancher-rke\n  namespace: kube-system\n

                                                                      \u6309\u4e0b esc \u952e\u9000\u51fa\u63d2\u5165\u6a21\u5f0f\uff0c\u7136\u540e\u8f93\u5165 __ :wq__ \u4fdd\u5b58\u5e76\u9000\u51fa\u3002

                                                                    2. \u5728\u5f53\u524d\u8def\u5f84\u4e0b\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u65b0\u5efa\u540d\u4e3a rancher-rke \u7684 ServiceAccount\uff08\u4ee5\u4e0b\u7b80\u79f0\u4e3a SA \uff09\uff1a

                                                                      kubectl apply -f sa.yaml\n

                                                                      \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                      clusterrole.rbac.authorization.k8s.io/rancher-rke created\nclusterrolebinding.rbac.authorization.k8s.io/rancher-rke created\nserviceaccount/rancher-rke created\n
                                                                    3. \u521b\u5efa\u540d\u4e3a rancher-rke-secret \u7684\u5bc6\u94a5\uff0c\u5e76\u5c06\u5bc6\u94a5\u548c rancher-rke SA \u7ed1\u5b9a\u3002

                                                                      kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: rancher-rke-secret\n  namespace: kube-system\n  annotations:\n    kubernetes.io/service-account.name: rancher-rke\n  type: kubernetes.io/service-account-token\nEOF\n

                                                                      \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                      secret/rancher-rke-secret created\n

                                                                      Note

                                                                      \u5982\u679c\u60a8\u7684\u96c6\u7fa4\u7248\u672c\u4f4e\u4e8e 1.24\uff0c\u8bf7\u5ffd\u7565\u6b64\u6b65\u9aa4\uff0c\u76f4\u63a5\u524d\u5f80\u4e0b\u4e00\u6b65\u3002

                                                                    4. \u67e5\u627e rancher-rke SA \u7684\u5bc6\u94a5\uff1a

                                                                      kubectl -n kube-system get secret | grep rancher-rke | awk '{print $1}'\n

                                                                      \u9884\u671f\u8f93\u51fa\uff1a

                                                                      rancher-rke-secret\n

                                                                      \u67e5\u770b\u5bc6\u94a5 rancher-rke-secret \u7684\u8be6\u60c5\uff1a

                                                                      kubectl -n kube-system describe secret rancher-rke-secret\n

                                                                      \u9884\u671f\u8f93\u51fa\uff1a

                                                                      Name:         rancher-rke-secret\nNamespace:    kube-system\nLabels:       <none>\nAnnotations:  kubernetes.io/service-account.name: rancher-rke\n            kubernetes.io/service-account.uid: d83df5d9-bd7d-488d-a046-b740618a0174\n\nType:  kubernetes.io/service-account-token\n\nData\n====\nca.crt:     570 bytes\nnamespace:  11 bytes\ntoken:      eyJhbGciOiJSUzI1NiIsImtpZCI6IjUtNE9nUWZLRzVpbEJORkZaNmtCQXhqVzRsZHU4MHhHcDBfb0VCaUo0V1kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJyYW5jaGVyLXJrZS1zZWNyZXQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoicmFuY2hlci1ya2UiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkODNkZjVkOS1iZDdkLTQ4OGQtYTA0Ni1iNzQwNjE4YTAxNzQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06cmFuY2hlci1ya2UifQ.VNsMtPEFOdDDeGt_8VHblcMRvjOwPXMM-79o9UooHx6q-VkHOcIOp3FOT2hnEdNnIsyODZVKCpEdCgyozX-3y5x2cZSZpocnkMcBbQm-qfTyUcUhAY7N5gcYUtHUhvRAsNWJcsDCn6d96gT_qo-ddo_cT8Ri39Lc123FDYOnYG-YGFKSgRQVy7Vyv34HIajZCCjZzy7i--eE_7o4DXeTjNqAFMFstUxxHBOXI3Rdn1zKQKqh5Jhg4ES7X-edSviSUfJUX-QV_LlAw5DuAyGPH7bDH4QaQ5k-p6cIctmpWZE-9wRDlKA4LYRblKE7MJcI6OmM4ldlMM0Jc8N-gCtl4w\n
                                                                    "},{"location":"end-user/kpanda/clusters/integrate-rancher-cluster.html#rancher-rke-sa-kubeconfig","title":"\u6b65\u9aa4\u4e8c\uff1a\u5728\u672c\u5730\u4f7f\u7528 rancher-rke SA \u7684\u8ba4\u8bc1\u4fe1\u606f\u66f4\u65b0 kubeconfig \u6587\u4ef6","text":"

                                                                    \u5728\u4efb\u610f\u4e00\u53f0\u5b89\u88c5\u4e86 kubelet \u7684\u672c\u5730\u8282\u70b9\u6267\u884c\u5982\u4e0b\u64cd\u4f5c\uff1a

                                                                    1. \u914d\u7f6e kubelet token\uff1a

                                                                      kubectl config set-credentials rancher-rke --token=`rancher-rke-secret` \u91cc\u9762\u7684 token \u4fe1\u606f\n

                                                                      \u4f8b\u5982\uff1a

                                                                      kubectl config set-credentials eks-admin --token=eyJhbGciOiJSUzI1NiIsImtpZCI6IjUtNE9nUWZLRzVpbEJORkZaNmtCQXhqVzRsZHU4MHhHcDBfb0VCaUo0V1kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJyYW5jaGVyLXJrZS1zZWNyZXQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoicmFuY2hlci1ya2UiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkODNkZjVkOS1iZDdkLTQ4OGQtYTA0Ni1iNzQwNjE4YTAxNzQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06cmFuY2hlci1ya2UifQ.VNsMtPEFOdDDeGt_8VHblcMRvjOwPXMM-79o9UooHx6q-VkHOcIOp3FOT2hnEdNnIsyODZVKCpEdCgyozX-3y5x2cZSZpocnkMcBbQm-qfTyUcUhAY7N5gcYUtHUhvRAsNWJcsDCn6d96gT_qo-ddo_cT8Ri39Lc123FDYOnYG-YGFKSgRQVy7Vyv34HIajZCCjZzy7i--eE_7o4DXeTjNqAFMFstUxxHBOXI3Rdn1zKQKqh5Jhg4ES7X-edSviSUfJUX-QV_LlAw5DuAyGPH7bDH4QaQ5k-p6cIctmpWZE-9wRDlKA4LYRblKE7MJcI6OmM4ldlMM0Jc8N-gCtl4w\n
                                                                    2. \u914d\u7f6e kubelet APIServer \u4fe1\u606f\uff1a

                                                                      kubectl config set-cluster {\u96c6\u7fa4\u540d} --insecure-skip-tls-verify=true --server={APIServer}\n
                                                                      • {\u96c6\u7fa4\u540d} \uff1a\u6307 rancher \u96c6\u7fa4\u7684\u540d\u79f0\u3002
                                                                      • {APIServer} \uff1a\u6307\u96c6\u7fa4\u7684\u8bbf\u95ee\u5730\u5740\uff0c\u4e00\u822c\u4e3a\u96c6\u7fa4\u63a7\u5236\u8282\u70b9 IP + 6443 \u7aef\u53e3\uff0c\u5982 https://10.X.X.X:6443

                                                                      \u4f8b\u5982\uff1a

                                                                      kubectl config set-cluster rancher-rke --insecure-skip-tls-verify=true --server=https://10.X.X.X:6443\n
                                                                    3. \u914d\u7f6e kubelet \u4e0a\u4e0b\u6587\u4fe1\u606f\uff1a

                                                                      kubectl config set-context {\u4e0a\u4e0b\u6587\u540d\u79f0} --cluster={\u96c6\u7fa4\u540d} --user={SA \u7528\u6237\u540d}\n

                                                                      \u4f8b\u5982\uff1a

                                                                      kubectl config set-context rancher-rke-context --cluster=rancher-rke --user=rancher-rke\n
                                                                    4. \u5728 kubelet \u4e2d\u6307\u5b9a\u6211\u4eec\u521a\u521a\u65b0\u5efa\u7684\u4e0a\u4e0b\u6587 rancher-rke-context \uff1a

                                                                      kubectl config use-context rancher-rke-context\n
                                                                    5. \u83b7\u53d6\u4e0a\u4e0b\u6587 rancher-rke-context \u4e2d\u7684 kubeconfig \u4fe1\u606f\u3002

                                                                      kubectl config view --minify --flatten --raw\n

                                                                      \u9884\u671f\u8f93\u51fa\uff1a

                                                                      apiVersion: v1\n  clusters:\n  - cluster:\n    insecure-skip-tls-verify: true\n    server: https://77C321BCF072682C70C8665ED4BFA10D.gr7.ap-southeast-1.eks.amazonaws.com\n  name: joincluster\n  contexts:\n  - context:\n    cluster: joincluster\n    user: eks-admin\n  name: ekscontext\n  current-context: ekscontext\n  kind: Config\n  preferences: {}\n  users:\n  - name: eks-admin\n  user:\n    token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImcxTjJwNkktWm5IbmRJU1RFRExvdWY1TGFWVUtGQ3VIejFtNlFQcUNFalEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2V\n
                                                                    "},{"location":"end-user/kpanda/clusters/integrate-rancher-cluster.html#ai","title":"\u6b65\u9aa4\u4e09\uff1a\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u754c\u9762\u63a5\u5165\u96c6\u7fa4","text":"

                                                                    \u4f7f\u7528\u521a\u521a\u83b7\u53d6\u7684 kubeconfig \u6587\u4ef6\uff0c\u53c2\u8003\u63a5\u5165\u96c6\u7fa4\u6587\u6863\uff0c\u5c06 rancher \u96c6\u7fa4\u63a5\u5165\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/k8s-cert.html","title":"Kubernetes \u96c6\u7fa4\u8bc1\u4e66\u66f4\u65b0","text":"

                                                                    \u4e3a\u4fdd\u8bc1 Kubernetes \u5404\u7ec4\u4ef6\u4e4b\u95f4\u7684\u901a\u4fe1\u5b89\u5168\uff0c\u7ec4\u4ef6\u4e4b\u95f4\u7684\u8c03\u7528\u4f1a\u8fdb\u884c TLS \u8eab\u4efd\u9a8c\u8bc1\uff0c\u6267\u884c\u9a8c\u8bc1\u64cd\u4f5c\u9700\u8981\u914d\u7f6e\u96c6\u7fa4 PKI \u8bc1\u4e66\u3002

                                                                    \u96c6\u7fa4\u8bc1\u4e66\u6709\u6548\u671f\u4e3a1\u5e74\uff0c\u4e3a\u907f\u514d\u8bc1\u4e66\u8fc7\u671f\u5bfc\u81f4\u4e1a\u52a1\u65e0\u6cd5\u4f7f\u7528\uff0c\u8bf7\u53ca\u65f6\u66f4\u65b0\u8bc1\u4e66\u3002

                                                                    \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u624b\u52a8\u8fdb\u884c\u8bc1\u4e66\u66f4\u65b0\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/k8s-cert.html#_1","title":"\u68c0\u67e5\u8bc1\u4e66\u662f\u5426\u8fc7\u671f","text":"

                                                                    \u60a8\u53ef\u4ee5\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u67e5\u770b\u8bc1\u4e66\u662f\u5426\u8fc7\u671f\uff1a

                                                                    kubeadm certs check-expiration\n

                                                                    \u8f93\u51fa\u7c7b\u4f3c\u4e8e\u4ee5\u4e0b\u5185\u5bb9\uff1a

                                                                    CERTIFICATE                EXPIRES                  RESIDUAL TIME   CERTIFICATE AUTHORITY   EXTERNALLY MANAGED\nadmin.conf                 Dec 14, 2024 07:26 UTC   204d                                    no      \napiserver                  Dec 14, 2024 07:26 UTC   204d            ca                      no      \napiserver-etcd-client      Dec 14, 2024 07:26 UTC   204d            etcd-ca                 no      \napiserver-kubelet-client   Dec 14, 2024 07:26 UTC   204d            ca                      no      \ncontroller-manager.conf    Dec 14, 2024 07:26 UTC   204d                                    no      \netcd-healthcheck-client    Dec 14, 2024 07:26 UTC   204d            etcd-ca                 no      \netcd-peer                  Dec 14, 2024 07:26 UTC   204d            etcd-ca                 no      \netcd-server                Dec 14, 2024 07:26 UTC   204d            etcd-ca                 no      \nfront-proxy-client         Dec 14, 2024 07:26 UTC   204d            front-proxy-ca          no      \nscheduler.conf             Dec 14, 2024 07:26 UTC   204d                                    no      \n\nCERTIFICATE AUTHORITY   EXPIRES                  RESIDUAL TIME   EXTERNALLY MANAGED\nca                      Dec 12, 2033 07:26 UTC   9y              no      \netcd-ca                 Dec 12, 2033 07:26 UTC   9y              no      \nfront-proxy-ca          Dec 12, 2033 07:26 UTC   9y              no      \n
                                                                    "},{"location":"end-user/kpanda/clusters/k8s-cert.html#_2","title":"\u624b\u52a8\u66f4\u65b0\u8bc1\u4e66","text":"

                                                                    \u60a8\u53ef\u4ee5\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u624b\u52a8\u66f4\u65b0\u8bc1\u4e66\uff0c\u53ea\u9700\u5e26\u4e0a\u5408\u9002\u7684\u547d\u4ee4\u884c\u9009\u9879\u3002\u66f4\u65b0\u8bc1\u4e66\u524d\u8bf7\u5148\u5907\u4efd\u5f53\u524d\u8bc1\u4e66\u3002

                                                                    \u66f4\u65b0\u6307\u5b9a\u8bc1\u4e66\uff1a

                                                                    kubeadm certs renew\n

                                                                    \u66f4\u65b0\u5168\u90e8\u8bc1\u4e66\uff1a

                                                                    kubeadm certs renew all\n

                                                                    \u66f4\u65b0\u540e\u7684\u8bc1\u4e66\u53ef\u4ee5\u5728 /etc/kubernetes/pki \u76ee\u5f55\u4e0b\u67e5\u770b\uff0c\u6709\u6548\u671f\u5ef6\u7eed 1 \u5e74\u3002 \u4ee5\u4e0b\u5bf9\u5e94\u7684\u51e0\u4e2a\u914d\u7f6e\u6587\u4ef6\u4e5f\u4f1a\u540c\u6b65\u66f4\u65b0\uff1a

                                                                    • /etc/kubernetes/admin.conf
                                                                    • /etc/kubernetes/controller-manager.conf
                                                                    • /etc/kubernetes/scheduler.conf

                                                                    Note

                                                                    • \u5982\u679c\u60a8\u90e8\u7f72\u7684\u662f\u4e00\u4e2a\u9ad8\u53ef\u7528\u96c6\u7fa4\uff0c\u8fd9\u4e2a\u547d\u4ee4\u9700\u8981\u5728\u6240\u6709\u63a7\u5236\u8282\u70b9\u4e0a\u6267\u884c\u3002
                                                                    • \u6b64\u547d\u4ee4\u7528 CA\uff08\u6216\u8005 front-proxy-CA \uff09\u8bc1\u4e66\u548c\u5b58\u50a8\u5728 /etc/kubernetes/pki \u4e2d\u7684\u5bc6\u94a5\u6267\u884c\u66f4\u65b0\u3002
                                                                    "},{"location":"end-user/kpanda/clusters/k8s-cert.html#_3","title":"\u91cd\u542f\u670d\u52a1","text":"

                                                                    \u6267\u884c\u66f4\u65b0\u64cd\u4f5c\u4e4b\u540e\uff0c\u4f60\u9700\u8981\u91cd\u542f\u63a7\u5236\u9762 Pod\u3002\u56e0\u4e3a\u52a8\u6001\u8bc1\u4e66\u91cd\u8f7d\u76ee\u524d\u8fd8\u4e0d\u88ab\u6240\u6709\u7ec4\u4ef6\u548c\u8bc1\u4e66\u652f\u6301\uff0c\u6240\u6709\u8fd9\u9879\u64cd\u4f5c\u662f\u5fc5\u987b\u7684\u3002

                                                                    \u9759\u6001 Pod \u662f\u88ab\u672c\u5730 kubelet \u800c\u4e0d\u662f API \u670d\u52a1\u5668\u7ba1\u7406\uff0c\u6240\u4ee5 kubectl \u4e0d\u80fd\u7528\u6765\u5220\u9664\u6216\u91cd\u542f\u4ed6\u4eec\u3002

                                                                    \u8981\u91cd\u542f\u9759\u6001 Pod\uff0c\u4f60\u53ef\u4ee5\u4e34\u65f6\u5c06\u6e05\u5355\u6587\u4ef6\u4ece /etc/kubernetes/manifests/ \u79fb\u9664\u5e76\u7b49\u5f85 20 \u79d2\u3002 \u53c2\u8003 KubeletConfiguration \u7ed3\u6784\u4e2d\u7684 fileCheckFrequency \u503c\u3002

                                                                    \u5982\u679c Pod \u4e0d\u5728\u6e05\u5355\u76ee\u5f55\u91cc\uff0ckubelet \u5c06\u4f1a\u7ec8\u6b62\u5b83\u3002 \u5728\u53e6\u4e00\u4e2a fileCheckFrequency \u5468\u671f\u4e4b\u540e\u4f60\u53ef\u4ee5\u5c06\u6587\u4ef6\u79fb\u56de\u53bb\uff0ckubelet \u53ef\u4ee5\u5b8c\u6210 Pod \u7684\u91cd\u5efa\uff0c\u800c\u7ec4\u4ef6\u7684\u8bc1\u4e66\u66f4\u65b0\u64cd\u4f5c\u4e5f\u5f97\u4ee5\u5b8c\u6210\u3002

                                                                    mv ./manifests/* ./temp/\nmv ./temp/* ./manifests/\n

                                                                    Note

                                                                    \u5982\u679c\u5bb9\u5668\u670d\u52a1\u4f7f\u7528\u7684\u662f Docker\uff0c\u4e3a\u4e86\u8ba9\u8bc1\u4e66\u751f\u6548\uff0c\u53ef\u4ee5\u4f7f\u7528\u4ee5\u4e0b\u547d\u4ee4\u5bf9\u6d89\u53ca\u5230\u8bc1\u4e66\u4f7f\u7528\u7684\u51e0\u4e2a\u670d\u52a1\u8fdb\u884c\u91cd\u542f\uff1a

                                                                    docker ps | grep -E 'k8s_kube-apiserver|k8s_kube-controller-manager|k8s_kube-scheduler|k8s_etcd_etcd' | awk -F ' ' '{print $1}' | xargs docker restart\n
                                                                    "},{"location":"end-user/kpanda/clusters/k8s-cert.html#kubeconfig","title":"\u66f4\u65b0 KubeConfig","text":"

                                                                    \u6784\u5efa\u96c6\u7fa4\u65f6\u901a\u5e38\u4f1a\u5c06 admin.conf \u8bc1\u4e66\u590d\u5236\u5230 $HOME/.kube/config \u4e2d\uff0c\u4e3a\u4e86\u5728\u66f4\u65b0 admin.conf \u540e\u66f4\u65b0 $HOME/.kube/config \u7684\u5185\u5bb9\uff0c \u5fc5\u987b\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a

                                                                    sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\nsudo chown $(id -u):$(id -g) $HOME/.kube/config\n
                                                                    "},{"location":"end-user/kpanda/clusters/k8s-cert.html#kubelet","title":"\u4e3a kubelet \u914d\u7f6e\u8bc1\u4e66\u8f6e\u6362","text":"

                                                                    \u5b8c\u6210\u4ee5\u4e0a\u64cd\u4f5c\u540e\uff0c\u57fa\u672c\u5b8c\u6210\u4e86\u96c6\u7fa4\u6240\u6709\u8bc1\u4e66\u7684\u66f4\u65b0\uff0c\u4f46\u4e0d\u5305\u62ec kubelet\u3002

                                                                    \u56e0\u4e3a kubernetes \u5305\u542b\u7279\u6027 kubelet \u8bc1\u4e66\u8f6e\u6362\uff0c \u5728\u5f53\u524d\u8bc1\u4e66\u5373\u5c06\u8fc7\u671f\u65f6\uff0c \u5c06\u81ea\u52a8\u751f\u6210\u65b0\u7684\u79d8\u94a5\uff0c\u5e76\u4ece Kubernetes API \u7533\u8bf7\u65b0\u7684\u8bc1\u4e66\u3002 \u4e00\u65e6\u65b0\u7684\u8bc1\u4e66\u53ef\u7528\uff0c\u5b83\u5c06\u88ab\u7528\u4e8e\u4e0e Kubernetes API \u95f4\u7684\u8fde\u63a5\u8ba4\u8bc1\u3002

                                                                    Note

                                                                    \u6b64\u7279\u6027\u9002\u7528\u4e8e Kubernetes 1.8.0 \u6216\u66f4\u9ad8\u7684\u7248\u672c\u3002

                                                                    \u542f\u7528\u5ba2\u6237\u7aef\u8bc1\u4e66\u8f6e\u6362\uff0c\u914d\u7f6e\u53c2\u6570\u5982\u4e0b\uff1a

                                                                    • kubelet \u8fdb\u7a0b\u63a5\u6536 --rotate-certificates \u53c2\u6570\uff0c\u8be5\u53c2\u6570\u51b3\u5b9a kubelet \u5728\u5f53\u524d\u4f7f\u7528\u7684 \u8bc1\u4e66\u5373\u5c06\u5230\u671f\u65f6\uff0c\u662f\u5426\u4f1a\u81ea\u52a8\u7533\u8bf7\u65b0\u7684\u8bc1\u4e66\u3002

                                                                    • kube-controller-manager \u8fdb\u7a0b\u63a5\u6536 --cluster-signing-duration \u53c2\u6570 \uff08\u5728 1.19 \u7248\u672c\u4e4b\u524d\u4e3a --experimental-cluster-signing-duration\uff09\uff0c\u7528\u6765\u63a7\u5236\u7b7e\u53d1\u8bc1\u4e66\u7684\u6709\u6548\u671f\u9650\u3002

                                                                    \u66f4\u591a\u8be6\u60c5\u53c2\u8003\u4e3a kubelet \u914d\u7f6e\u8bc1\u4e66\u8f6e\u6362\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/k8s-cert.html#_4","title":"\u81ea\u52a8\u66f4\u65b0\u8bc1\u4e66","text":"

                                                                    \u4e3a\u4e86\u66f4\u9ad8\u6548\u4fbf\u6377\u5904\u7406\u5df2\u8fc7\u671f\u6216\u8005\u5373\u5c06\u8fc7\u671f\u7684 kubernetes \u96c6\u7fa4\u8bc1\u4e66\uff0c\u53ef\u53c2\u8003 k8s \u7248\u672c\u96c6\u7fa4\u8bc1\u4e66\u66f4\u65b0\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/runtime.html","title":"\u5982\u4f55\u9009\u62e9\u5bb9\u5668\u8fd0\u884c\u65f6","text":"

                                                                    \u5bb9\u5668\u8fd0\u884c\u65f6\u662f kubernetes \u4e2d\u5bf9\u5bb9\u5668\u548c\u5bb9\u5668\u955c\u50cf\u751f\u547d\u5468\u671f\u8fdb\u884c\u7ba1\u7406\u7684\u91cd\u8981\u7ec4\u4ef6\u3002 kubernetes \u5728 1.19 \u7248\u672c\u4e2d\u5c06 containerd \u8bbe\u4e3a\u9ed8\u8ba4\u7684\u5bb9\u5668\u8fd0\u884c\u65f6\uff0c\u5e76\u5728 1.24 \u7248\u672c\u4e2d\u79fb\u9664\u4e86 Dockershim \u7ec4\u4ef6\u7684\u652f\u6301\u3002

                                                                    \u56e0\u6b64\u76f8\u8f83\u4e8e Docker \u8fd0\u884c\u65f6\uff0c\u6211\u4eec\u66f4\u52a0 \u63a8\u8350\u60a8\u4f7f\u7528\u8f7b\u91cf\u7684 containerd \u4f5c\u4e3a\u60a8\u7684\u5bb9\u5668\u8fd0\u884c\u65f6\uff0c\u56e0\u4e3a\u8fd9\u5df2\u7ecf\u6210\u4e3a\u5f53\u524d\u4e3b\u6d41\u7684\u8fd0\u884c\u65f6\u9009\u62e9\u3002

                                                                    \u9664\u6b64\u4e4b\u5916\uff0c\u4e00\u4e9b\u64cd\u4f5c\u7cfb\u7edf\u53d1\u884c\u5382\u5546\u5bf9 Docker \u8fd0\u884c\u65f6\u7684\u517c\u5bb9\u4e5f\u4e0d\u591f\u53cb\u597d\uff0c\u4e0d\u540c\u64cd\u4f5c\u7cfb\u7edf\u5bf9\u8fd0\u884c\u65f6\u7684\u652f\u6301\u5982\u4e0b\u8868\uff1a

                                                                    "},{"location":"end-user/kpanda/clusters/runtime.html#_2","title":"\u4e0d\u540c\u64cd\u4f5c\u7cfb\u7edf\u548c\u63a8\u8350\u7684\u8fd0\u884c\u65f6\u7248\u672c\u5bf9\u5e94\u5173\u7cfb","text":"\u64cd\u4f5c\u7cfb\u7edf \u63a8\u8350\u7684 containerd \u7248\u672c \u63a8\u8350\u7684 Docker \u7248\u672c CentOS 1.7.5 20.10 RedHatOS 1.7.5 20.10 KylinOS 1.7.5 19.03\uff08\u4ec5 ARM \u67b6\u6784\u652f\u6301 \uff0c\u5728 x86 \u67b6\u6784\u4e0b\u4e0d\u652f\u6301\u4f7f\u7528 Docker \u4f5c\u4e3a\u8fd0\u884c\u65f6\uff09

                                                                    \u66f4\u591a\u652f\u6301\u7684\u8fd0\u884c\u65f6\u7248\u672c\u4fe1\u606f\uff0c\u8bf7\u53c2\u8003 RedHatOS \u652f\u6301\u7684\u8fd0\u884c\u65f6\u7248\u672c \u548c KylinOS \u652f\u6301\u7684\u8fd0\u884c\u65f6\u7248\u672c

                                                                    Note

                                                                    \u5728\u79bb\u7ebf\u5b89\u88c5\u6a21\u5f0f\u4e0b\uff0c\u9700\u8981\u63d0\u524d\u51c6\u5907\u76f8\u5173\u64cd\u4f5c\u7cfb\u7edf\u7684\u8fd0\u884c\u65f6\u79bb\u7ebf\u5305\u3002

                                                                    "},{"location":"end-user/kpanda/clusters/upgrade-cluster.html","title":"\u96c6\u7fa4\u5347\u7ea7","text":"

                                                                    Kubernetes \u793e\u533a\u6bcf\u4e2a\u5b63\u5ea6\u90fd\u4f1a\u53d1\u5e03\u4e00\u6b21\u5c0f\u7248\u672c\uff0c\u6bcf\u4e2a\u7248\u672c\u7684\u7ef4\u62a4\u5468\u671f\u5927\u6982\u53ea\u6709 9 \u4e2a\u6708\u3002 \u7248\u672c\u505c\u6b62\u7ef4\u62a4\u540e\u5c31\u4e0d\u4f1a\u518d\u66f4\u65b0\u4e00\u4e9b\u91cd\u5927\u6f0f\u6d1e\u6216\u5b89\u5168\u6f0f\u6d1e\u3002\u624b\u52a8\u5347\u7ea7\u96c6\u7fa4\u64cd\u4f5c\u8f83\u4e3a\u7e41\u7410\uff0c\u7ed9\u7ba1\u7406\u4eba\u5458\u5e26\u6765\u4e86\u6781\u5927\u7684\u5de5\u4f5c\u8d1f\u62c5\u3002

                                                                    \u672c\u8282\u5c06\u4ecb\u7ecd\u5982\u4f55\u5728\u901a\u8fc7 Web UI \u754c\u9762\u4e00\u952e\u5f0f\u5728\u7ebf\u5347\u7ea7\u5de5\u4f5c\u96c6\u7fa4 Kubernetes \u7248\u672c\uff0c \u5982\u9700\u79bb\u7ebf\u5347\u7ea7\u5de5\u4f5c\u96c6\u7fa4\u7684 kubernetes \u7248\u672c\uff0c\u8bf7\u53c2\u9605\u5de5\u4f5c\u96c6\u7fa4\u79bb\u7ebf\u5347\u7ea7\u6307\u5357\u8fdb\u884c\u5347\u7ea7\u3002

                                                                    Danger

                                                                    \u7248\u672c\u5347\u7ea7\u540e\u5c06\u65e0\u6cd5\u56de\u9000\u5230\u4e4b\u524d\u7684\u7248\u672c\uff0c\u8bf7\u8c28\u614e\u64cd\u4f5c\u3002

                                                                    Note

                                                                    • Kubernetes \u7248\u672c\u4ee5 x.y.z \u8868\u793a\uff0c\u5176\u4e2d x \u662f\u4e3b\u8981\u7248\u672c\uff0c y \u662f\u6b21\u8981\u7248\u672c\uff0c z \u662f\u8865\u4e01\u7248\u672c\u3002
                                                                    • \u4e0d\u5141\u8bb8\u8de8\u6b21\u8981\u7248\u672c\u5bf9\u96c6\u7fa4\u8fdb\u884c\u5347\u7ea7\uff0c\u4f8b\u5982\u4e0d\u80fd\u4ece 1.23 \u76f4\u63a5\u5347\u7ea7\u5230 1.25\u3002
                                                                    • \u63a5\u5165\u96c6\u7fa4 \u4e0d\u652f\u6301\u7248\u672c\u5347\u7ea7\u3002\u5982\u679c\u5de6\u4fa7\u5bfc\u822a\u680f\u6ca1\u6709 \u96c6\u7fa4\u5347\u7ea7 \uff0c\u8bf7\u68c0\u67e5\u8be5\u96c6\u7fa4\u662f\u5426\u4e3a \u63a5\u5165\u96c6\u7fa4 \u3002
                                                                    • \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u53ea\u80fd\u901a\u8fc7\u7ec8\u7aef\u8fdb\u884c\u5347\u7ea7\u3002
                                                                    • \u5347\u7ea7\u5de5\u4f5c\u96c6\u7fa4\u65f6\uff0c\u8be5\u5de5\u4f5c\u96c6\u7fa4\u7684\u7ba1\u7406\u96c6\u7fa4\u5e94\u8be5\u5df2\u7ecf\u63a5\u5165\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\uff0c\u5e76\u4e14\u5904\u4e8e\u6b63\u5e38\u8fd0\u884c\u4e2d\u3002
                                                                    • \u5982\u679c\u9700\u8981\u4fee\u6539\u96c6\u7fa4\u53c2\u6570\uff0c\u53ef\u4ee5\u901a\u8fc7\u5347\u7ea7\u76f8\u540c\u7248\u672c\u7684\u65b9\u5f0f\u5b9e\u73b0\uff0c\u5177\u4f53\u64cd\u4f5c\u53c2\u8003\u4e0b\u6587\u3002
                                                                    1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                                    2. \u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u96c6\u7fa4\u8fd0\u7ef4 -> \u96c6\u7fa4\u5347\u7ea7 \uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u70b9\u51fb \u7248\u672c\u5347\u7ea7 \u3002

                                                                    3. \u9009\u62e9\u53ef\u5347\u7ea7\u7684\u7248\u672c\uff0c\u8f93\u5165\u96c6\u7fa4\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\u3002

                                                                      Note

                                                                      \u5982\u679c\u60a8\u662f\u60f3\u901a\u8fc7\u5347\u7ea7\u65b9\u5f0f\u6765\u4fee\u6539\u96c6\u7fa4\u53c2\u6570\uff0c\u8bf7\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff1a

                                                                      1. \u627e\u5230\u96c6\u7fa4\u5bf9\u5e94\u7684 ConfigMap\uff0c\u60a8\u53ef\u4ee5\u767b\u5f55\u63a7\u5236\u8282\u70b9\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u627e\u5230 varsConfRef \u4e2d\u7684 ConfigMap \u540d\u79f0\u3002

                                                                        kubectl get cluster.kubean.io <clustername> -o yaml\n
                                                                      2. \u6839\u636e\u9700\u8981\uff0c\u4fee\u6539 ConfigMap \u4e2d\u7684\u53c2\u6570\u4fe1\u606f\u3002

                                                                      3. \u5728\u6b64\u5904\u9009\u62e9\u76f8\u540c\u7248\u672c\u8fdb\u884c\u5347\u7ea7\u64cd\u4f5c\uff0c\u5347\u7ea7\u5b8c\u6210\u5373\u53ef\u6210\u529f\u66f4\u65b0\u5bf9\u5e94\u7684\u96c6\u7fa4\u53c2\u6570\u3002

                                                                    4. \u70b9\u51fb \u786e\u5b9a \u540e\uff0c\u53ef\u4ee5\u770b\u5230\u96c6\u7fa4\u7684\u5347\u7ea7\u8fdb\u5ea6\u3002

                                                                    5. \u96c6\u7fa4\u5347\u7ea7\u9884\u8ba1\u9700\u8981 30 \u5206\u949f\uff0c\u53ef\u4ee5\u70b9\u51fb \u5b9e\u65f6\u65e5\u5fd7 \u6309\u94ae\u67e5\u770b\u96c6\u7fa4\u5347\u7ea7\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/configmap-hot-loading.html","title":"configmap/secret \u70ed\u52a0\u8f7d","text":"

                                                                    configmap/secret \u70ed\u52a0\u8f7d\u662f\u6307\u5c06 configmap/secret \u4f5c\u4e3a\u6570\u636e\u5377\u6302\u8f7d\u5728\u5bb9\u5668\u4e2d\u6302\u8f7d\u65f6\uff0c\u5f53\u914d\u7f6e\u53d1\u751f\u6539\u53d8\u65f6\uff0c\u5bb9\u5668\u5c06\u81ea\u52a8\u8bfb\u53d6 configmap/secret \u66f4\u65b0\u540e\u7684\u914d\u7f6e\uff0c\u800c\u65e0\u9700\u91cd\u542f Pod\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/configmap-hot-loading.html#_1","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                                    1. \u53c2\u8003\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d - \u5bb9\u5668\u914d\u7f6e\uff0c\u914d\u7f6e\u5bb9\u5668\u6570\u636e\u5b58\u50a8\uff0c\u9009\u62e9 Configmap \u3001 Configmap Key \u3001 Secret \u3001 Secret Key \u4f5c\u4e3a\u6570\u636e\u5377\u6302\u8f7d\u81f3\u5bb9\u5668\u3002

                                                                      Note

                                                                      \u4f7f\u7528\u5b50\u8def\u5f84\uff08SubPath\uff09\u65b9\u5f0f\u6302\u8f7d\u7684\u914d\u7f6e\u6587\u4ef6\u4e0d\u652f\u6301\u70ed\u52a0\u8f7d\u3002

                                                                    2. \u8fdb\u5165\u3010\u914d\u7f6e\u4e0e\u5bc6\u94a5\u3011\u9875\u9762\uff0c\u8fdb\u5165\u914d\u7f6e\u9879\u8be6\u60c5\u9875\u9762\uff0c\u5728\u3010\u5173\u8054\u8d44\u6e90\u3011\u4e2d\u627e\u5230\u5bf9\u5e94\u7684 container \u8d44\u6e90\uff0c\u70b9\u51fb \u7acb\u5373\u52a0\u8f7d \u6309\u94ae\uff0c\u8fdb\u5165\u914d\u7f6e\u70ed\u52a0\u8f7d\u9875\u9762\u3002

                                                                      Note

                                                                      \u5982\u679c\u60a8\u7684\u5e94\u7528\u652f\u6301\u81ea\u52a8\u8bfb\u53d6 configmap/secret \u66f4\u65b0\u540e\u7684\u914d\u7f6e\uff0c\u5219\u65e0\u9700\u624b\u52a8\u6267\u884c\u70ed\u52a0\u8f7d\u64cd\u4f5c\u3002

                                                                    3. \u5728\u70ed\u52a0\u8f7d\u914d\u7f6e\u5f39\u7a97\u4e2d\uff0c\u8f93\u5165\u8fdb\u5165\u5bb9\u5668\u5185\u7684 \u6267\u884c\u547d\u4ee4 \u5e76\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u4ee5\u91cd\u8f7d\u914d\u7f6e\u3002\u4f8b\u5982\uff0c\u5728 nginx \u5bb9\u5668\u4e2d\uff0c\u4ee5 root \u7528\u6237\u6743\u9650\uff0c\u6267\u884c nginx -s reload \u547d\u4ee4\u6765\u91cd\u8f7d\u914d\u7f6e\u3002

                                                                    4. \u5728\u754c\u9762\u5f39\u51fa\u7684 web \u7ec8\u7aef\u4e2d\u67e5\u770b\u5e94\u7528\u91cd\u8f7d\u60c5\u51b5\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/create-configmap.html","title":"\u521b\u5efa\u914d\u7f6e\u9879","text":"

                                                                    \u914d\u7f6e\u9879\uff08ConfigMap\uff09\u4ee5\u952e\u503c\u5bf9\u7684\u5f62\u5f0f\u5b58\u50a8\u975e\u673a\u5bc6\u6027\u6570\u636e\uff0c\u5b9e\u73b0\u914d\u7f6e\u6570\u636e\u548c\u5e94\u7528\u4ee3\u7801\u76f8\u4e92\u89e3\u8026\u7684\u6548\u679c\u3002\u914d\u7f6e\u9879\u53ef\u7528\u4f5c\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u3001\u547d\u4ee4\u884c\u53c2\u6570\u6216\u8005\u5b58\u50a8\u5377\u4e2d\u7684\u914d\u7f6e\u6587\u4ef6\u3002

                                                                    Note

                                                                    • \u5728\u914d\u7f6e\u9879\u4e2d\u4fdd\u5b58\u7684\u6570\u636e\u4e0d\u53ef\u8d85\u8fc7 1 MiB\u3002\u5982\u679c\u9700\u8981\u5b58\u50a8\u4f53\u79ef\u66f4\u5927\u7684\u6570\u636e\uff0c\u5efa\u8bae\u6302\u8f7d\u5b58\u50a8\u5377\u6216\u8005\u4f7f\u7528\u72ec\u7acb\u7684\u6570\u636e\u5e93\u6216\u8005\u6587\u4ef6\u670d\u52a1\u3002

                                                                    • \u914d\u7f6e\u9879\u4e0d\u63d0\u4f9b\u4fdd\u5bc6\u6216\u8005\u52a0\u5bc6\u529f\u80fd\u3002\u5982\u679c\u8981\u5b58\u50a8\u52a0\u5bc6\u6570\u636e\uff0c\u5efa\u8bae\u4f7f\u7528\u5bc6\u94a5\uff0c\u6216\u8005\u5176\u4ed6\u7b2c\u4e09\u65b9\u5de5\u5177\u6765\u4fdd\u8bc1\u6570\u636e\u7684\u79c1\u5bc6\u6027\u3002

                                                                    \u652f\u6301\u4e24\u79cd\u521b\u5efa\u65b9\u5f0f\uff1a

                                                                    • \u56fe\u5f62\u5316\u8868\u5355\u521b\u5efa
                                                                    • YAML \u521b\u5efa
                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/create-configmap.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                    • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762

                                                                    • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a NS Editor \u89d2\u8272 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/create-configmap.html#_3","title":"\u56fe\u5f62\u5316\u8868\u5355\u521b\u5efa","text":"
                                                                    1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                                    2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u914d\u7f6e\u9879 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u521b\u5efa\u914d\u7f6e\u9879 \u6309\u94ae\u3002

                                                                    3. \u5728 \u521b\u5efa\u914d\u7f6e\u9879 \u9875\u9762\u4e2d\u586b\u5199\u914d\u7f6e\u4fe1\u606f\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                                      Note

                                                                      \u70b9\u51fb \u4e0a\u4f20\u6587\u4ef6 \u53ef\u4ee5\u4ece\u672c\u5730\u5bfc\u5165\u5df2\u6709\u7684\u6587\u4ef6\uff0c\u5feb\u901f\u521b\u5efa\u914d\u7f6e\u9879\u3002

                                                                    4. \u521b\u5efa\u5b8c\u6210\u540e\u5728\u914d\u7f6e\u9879\u53f3\u4fa7\u70b9\u51fb\u66f4\u591a\u53ef\u4ee5\uff0c\u53ef\u4ee5\u7f16\u8f91 YAML\u3001\u66f4\u65b0\u3001\u5bfc\u51fa\u3001\u5220\u9664\u7b49\u64cd\u4f5c\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/create-configmap.html#yaml","title":"YAML \u521b\u5efa","text":"
                                                                    1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                                    2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u914d\u7f6e\u9879 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                                    3. \u586b\u5199\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684\u914d\u7f6e\u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u3002

                                                                      Note

                                                                      • \u70b9\u51fb \u5bfc\u5165 \u53ef\u4ee5\u4ece\u672c\u5730\u5bfc\u5165\u5df2\u6709\u7684\u6587\u4ef6\uff0c\u5feb\u901f\u521b\u5efa\u914d\u7f6e\u9879\u3002
                                                                      • \u586b\u5199\u6570\u636e\u4e4b\u540e\u70b9\u51fb \u4e0b\u8f7d \u53ef\u4ee5\u5c06\u914d\u7f6e\u6587\u4ef6\u4fdd\u5b58\u5728\u672c\u5730\u3002

                                                                    4. \u521b\u5efa\u5b8c\u6210\u540e\u5728\u914d\u7f6e\u9879\u53f3\u4fa7\u70b9\u51fb\u66f4\u591a\u53ef\u4ee5\uff0c\u53ef\u4ee5\u7f16\u8f91 YAML\u3001\u66f4\u65b0\u3001\u5bfc\u51fa\u3001\u5220\u9664\u7b49\u64cd\u4f5c\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/create-configmap.html#yaml_1","title":"\u914d\u7f6e\u9879 YAML \u793a\u4f8b","text":"
                                                                    ```yaml\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: kube-root-ca.crt\n  namespace: default\n  annotations:\ndata:\n  version: '1.0'\n```\n

                                                                    \u4e0b\u4e00\u6b65\uff1a\u4f7f\u7528\u914d\u7f6e\u9879

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/create-secret.html","title":"\u521b\u5efa\u5bc6\u94a5","text":"

                                                                    \u5bc6\u94a5\u662f\u4e00\u79cd\u7528\u4e8e\u5b58\u50a8\u548c\u7ba1\u7406\u5bc6\u7801\u3001OAuth \u4ee4\u724c\u3001SSH\u3001TLS \u51ed\u636e\u7b49\u654f\u611f\u4fe1\u606f\u7684\u8d44\u6e90\u5bf9\u8c61\u3002\u4f7f\u7528\u5bc6\u94a5\u610f\u5473\u7740\u60a8\u4e0d\u9700\u8981\u5728\u5e94\u7528\u7a0b\u5e8f\u4ee3\u7801\u4e2d\u5305\u542b\u654f\u611f\u7684\u673a\u5bc6\u6570\u636e\u3002

                                                                    \u5bc6\u94a5\u4f7f\u7528\u573a\u666f\uff1a

                                                                    • \u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u4f7f\u7528\uff0c\u63d0\u4f9b\u5bb9\u5668\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u6240\u9700\u7684\u4e00\u4e9b\u5fc5\u8981\u4fe1\u606f\u3002
                                                                    • \u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a Pod \u7684\u6570\u636e\u5377\u3002
                                                                    • \u5728 kubelet \u62c9\u53d6\u5bb9\u5668\u955c\u50cf\u65f6\u4f5c\u4e3a\u955c\u50cf\u4ed3\u5e93\u7684\u8eab\u4efd\u8ba4\u8bc1\u51ed\u8bc1\u3002

                                                                    \u652f\u6301\u4e24\u79cd\u521b\u5efa\u65b9\u5f0f\uff1a

                                                                    • \u56fe\u5f62\u5316\u8868\u5355\u521b\u5efa
                                                                    • YAML \u521b\u5efa
                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/create-secret.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                    • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762

                                                                    • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a NS Editor \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/create-secret.html#_3","title":"\u56fe\u5f62\u5316\u8868\u5355\u521b\u5efa","text":"
                                                                    1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                                    2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u5bc6\u94a5 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u521b\u5efa\u5bc6\u94a5 \u6309\u94ae\u3002

                                                                    3. \u5728 \u521b\u5efa\u5bc6\u94a5 \u9875\u9762\u4e2d\u586b\u5199\u914d\u7f6e\u4fe1\u606f\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                                      \u586b\u5199\u914d\u7f6e\u65f6\u9700\u8981\u6ce8\u610f\uff1a

                                                                      • \u5bc6\u94a5\u7684\u540d\u79f0\u5728\u540c\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u4e2d\u5fc5\u987b\u5177\u6709\u552f\u4e00\u6027
                                                                      • \u5bc6\u94a5\u7c7b\u578b\uff1a
                                                                        • \u9ed8\u8ba4\uff08Opaque\uff09\uff1aKubernetes \u9ed8\u8ba4\u7684\u5bc6\u94a5\u7c7b\u578b\uff0c\u652f\u6301\u7528\u6237\u5b9a\u4e49\u7684\u4efb\u610f\u6570\u636e\u3002
                                                                        • TLS (kubernetes.io/tls)\uff1a\u7528\u4e8e TLS \u5ba2\u6237\u7aef\u6216\u8005\u670d\u52a1\u5668\u7aef\u6570\u636e\u8bbf\u95ee\u7684\u51ed\u8bc1\u3002
                                                                        • \u955c\u50cf\u4ed3\u5e93\u4fe1\u606f (kubernetes.io/dockerconfigjson)\uff1a\u7528\u4e8e\u955c\u50cf\u4ed3\u5e93\u8bbf\u95ee\u7684\u51ed\u8bc1\u3002
                                                                        • \u7528\u6237\u540d\u548c\u5bc6\u7801\uff08kubernetes.io/basic-auth\uff09\uff1a\u7528\u4e8e\u57fa\u672c\u8eab\u4efd\u8ba4\u8bc1\u7684\u51ed\u8bc1\u3002
                                                                        • \u81ea\u5b9a\u4e49\uff1a\u7528\u6237\u6839\u636e\u4e1a\u52a1\u9700\u8981\u81ea\u5b9a\u4e49\u7684\u7c7b\u578b\u3002
                                                                      • \u5bc6\u94a5\u6570\u636e\uff1a\u5bc6\u94a5\u6240\u5b58\u50a8\u7684\u6570\u636e\uff0c\u4e0d\u540c\u6570\u636e\u9700\u8981\u586b\u5199\u7684\u53c2\u6570\u6709\u6240\u4e0d\u540c
                                                                        • \u5f53\u5bc6\u94a5\u7c7b\u578b\u4e3a\u9ed8\u8ba4\uff08Opaque\uff09/\u81ea\u5b9a\u4e49\uff1a\u53ef\u4ee5\u586b\u5165\u591a\u4e2a\u952e\u503c\u5bf9\u6570\u636e\u3002
                                                                        • \u5f53\u5bc6\u94a5\u7c7b\u578b\u4e3a TLS (kubernetes.io/tls)\uff1a\u9700\u8981\u586b\u5165\u8bc1\u4e66\u51ed\u8bc1\u548c\u79c1\u94a5\u6570\u636e\u3002\u8bc1\u4e66\u662f\u81ea\u7b7e\u540d\u6216 CA \u7b7e\u540d\u8fc7\u7684\u51ed\u636e\uff0c\u7528\u6765\u8fdb\u884c\u8eab\u4efd\u8ba4\u8bc1\u3002\u8bc1\u4e66\u8bf7\u6c42\u662f\u5bf9\u7b7e\u540d\u7684\u8bf7\u6c42\uff0c\u9700\u8981\u4f7f\u7528\u79c1\u94a5\u8fdb\u884c\u7b7e\u540d\u3002
                                                                        • \u5f53\u5bc6\u94a5\u7c7b\u578b\u4e3a\u955c\u50cf\u4ed3\u5e93\u4fe1\u606f (kubernetes.io/dockerconfigjson)\uff1a\u9700\u8981\u586b\u5165\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u7684\u8d26\u53f7\u548c\u5bc6\u7801\u3002
                                                                        • \u5f53\u5bc6\u94a5\u7c7b\u578b\u4e3a\u7528\u6237\u540d\u548c\u5bc6\u7801\uff08kubernetes.io/basic-auth\uff09\uff1a\u9700\u8981\u6307\u5b9a\u7528\u6237\u540d\u548c\u5bc6\u7801\u3002
                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/create-secret.html#yaml","title":"YAML \u521b\u5efa","text":"
                                                                    1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u67d0\u4e2a\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                                    2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u914d\u7f6e\u4e0e\u5bc6\u94a5 -> \u5bc6\u94a5 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                                    3. \u5728 YAML \u521b\u5efa \u9875\u9762\u4e2d\u586b\u5199 YAML \u914d\u7f6e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                                      \u652f\u6301\u4ece\u672c\u5730\u5bfc\u5165 YAML \u6587\u4ef6\u6216\u5c06\u586b\u5199\u597d\u7684\u6587\u4ef6\u4e0b\u8f7d\u4fdd\u5b58\u5230\u672c\u5730\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/create-secret.html#yaml_1","title":"\u5bc6\u94a5 YAML \u793a\u4f8b","text":"
                                                                    ```yaml\napiVersion: v1\nkind: Secret\nmetadata:\n  name: secretdemo\ntype: Opaque\ndata:\n  username: ******\n  password: ******\n```\n

                                                                    \u4e0b\u4e00\u6b65\uff1a\u4f7f\u7528\u5bc6\u94a5

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/use-configmap.html","title":"\u4f7f\u7528\u914d\u7f6e\u9879","text":"

                                                                    \u914d\u7f6e\u9879\uff08ConfigMap\uff09\u662f Kubernetes \u7684\u4e00\u79cd API \u5bf9\u8c61\uff0c\u7528\u6765\u5c06\u975e\u673a\u5bc6\u6027\u7684\u6570\u636e\u4fdd\u5b58\u5230\u952e\u503c\u5bf9\u4e2d\uff0c\u53ef\u4ee5\u5b58\u50a8\u5176\u4ed6\u5bf9\u8c61\u6240\u9700\u8981\u4f7f\u7528\u7684\u914d\u7f6e\u3002 \u4f7f\u7528\u65f6\uff0c \u5bb9\u5668\u53ef\u4ee5\u5c06\u5176\u7528\u4f5c\u73af\u5883\u53d8\u91cf\u3001\u547d\u4ee4\u884c\u53c2\u6570\u6216\u8005\u5b58\u50a8\u5377\u4e2d\u7684\u914d\u7f6e\u6587\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528\u914d\u7f6e\u9879\uff0c\u80fd\u591f\u5c06\u914d\u7f6e\u6570\u636e\u548c\u5e94\u7528\u7a0b\u5e8f\u4ee3\u7801\u5206\u5f00\uff0c\u4e3a\u5e94\u7528\u914d\u7f6e\u7684\u4fee\u6539\u63d0\u4f9b\u66f4\u52a0\u7075\u6d3b\u7684\u9014\u5f84\u3002

                                                                    Note

                                                                    \u914d\u7f6e\u9879\u5e76\u4e0d\u63d0\u4f9b\u4fdd\u5bc6\u6216\u8005\u52a0\u5bc6\u529f\u80fd\u3002\u5982\u679c\u8981\u5b58\u50a8\u7684\u6570\u636e\u662f\u673a\u5bc6\u7684\uff0c\u8bf7\u4f7f\u7528\u5bc6\u94a5\uff0c\u6216\u8005\u4f7f\u7528\u5176\u4ed6\u7b2c\u4e09\u65b9\u5de5\u5177\u6765\u4fdd\u8bc1\u6570\u636e\u7684\u79c1\u5bc6\u6027\uff0c\u800c\u4e0d\u662f\u7528\u914d\u7f6e\u9879\u3002 \u6b64\u5916\u5728\u5bb9\u5668\u91cc\u4f7f\u7528\u914d\u7f6e\u9879\u65f6\uff0c\u5bb9\u5668\u548c\u914d\u7f6e\u9879\u5fc5\u987b\u5904\u4e8e\u540c\u4e00\u96c6\u7fa4\u7684\u547d\u540d\u7a7a\u95f4\u4e2d\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/use-configmap.html#_2","title":"\u4f7f\u7528\u573a\u666f","text":"

                                                                    \u60a8\u53ef\u4ee5\u5728 Pod \u4e2d\u4f7f\u7528\u914d\u7f6e\u9879\uff0c\u6709\u591a\u79cd\u4f7f\u7528\u573a\u666f\uff0c\u4e3b\u8981\u5305\u62ec\uff1a

                                                                    • \u4f7f\u7528\u914d\u7f6e\u9879\u8bbe\u7f6e\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf

                                                                    • \u4f7f\u7528\u914d\u7f6e\u9879\u8bbe\u7f6e\u5bb9\u5668\u7684\u547d\u4ee4\u884c\u53c2\u6570

                                                                    • \u4f7f\u7528\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u7684\u6570\u636e\u5377

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/use-configmap.html#_3","title":"\u8bbe\u7f6e\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf","text":"

                                                                    \u60a8\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u5316\u754c\u9762\u6216\u8005\u7ec8\u7aef\u547d\u4ee4\u884c\u6765\u4f7f\u7528\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u3002

                                                                    Note

                                                                    \u914d\u7f6e\u9879\u5bfc\u5165\u662f\u5c06\u914d\u7f6e\u9879\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\uff1b\u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165\u662f\u5c06\u914d\u7f6e\u9879\u4e2d\u67d0\u4e00\u53c2\u6570\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/use-configmap.html#_4","title":"\u56fe\u5f62\u5316\u754c\u9762\u64cd\u4f5c","text":"

                                                                    \u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u53ef\u4ee5\u5728 \u73af\u5883\u53d8\u91cf \u754c\u9762\u901a\u8fc7\u9009\u62e9 \u914d\u7f6e\u9879\u5bfc\u5165 \u6216 \u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165 \u4e3a\u5bb9\u5668\u8bbe\u7f6e\u73af\u5883\u53d8\u91cf\u3002

                                                                    1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u4e2d\uff0c\u5728 \u5bb9\u5668\u914d\u7f6e \u8fd9\u4e00\u6b65\u4e2d\uff0c\u9009\u62e9 \u73af\u5883\u53d8\u91cf \u914d\u7f6e\uff0c\u70b9\u51fb \u6dfb\u52a0\u73af\u5883\u53d8\u91cf \u6309\u94ae\u3002

                                                                    2. \u5728\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u5904\u9009\u62e9 \u914d\u7f6e\u9879\u5bfc\u5165 \u6216 \u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165 \u3002

                                                                      • \u5f53\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u9009\u62e9\u4e3a \u914d\u7f6e\u9879\u5bfc\u5165 \u65f6\uff0c\u4f9d\u6b21\u8f93\u5165 \u53d8\u91cf\u540d \u3001 \u524d\u7f00 \u540d\u79f0\u3001 \u914d\u7f6e\u9879 \u7684\u540d\u79f0\u3002

                                                                      • \u5f53\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u9009\u62e9\u4e3a \u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165 \u65f6\uff0c\u4f9d\u6b21\u8f93\u5165 \u53d8\u91cf\u540d \u3001 \u914d\u7f6e\u9879 \u540d\u79f0\u3001 \u952e \u7684\u540d\u79f0\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/use-configmap.html#_5","title":"\u547d\u4ee4\u884c\u64cd\u4f5c","text":"

                                                                    \u60a8\u53ef\u4ee5\u5728\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\u5c06\u914d\u7f6e\u9879\u8bbe\u7f6e\u4e3a\u73af\u5883\u53d8\u91cf\uff0c\u4f7f\u7528 valueFrom \u53c2\u6570\u5f15\u7528 ConfigMap \u4e2d\u7684 Key/Value\u3002

                                                                    apiVersion: v1\nkind: Pod\nmetadata:\n  name: configmap-pod-1\nspec:\n  containers:\n    - name: test-container\n      image: busybox\n      command: [ \"/bin/sh\", \"-c\", \"env\" ]\n      env:\n        - name: SPECIAL_LEVEL_KEY\n          valueFrom:                  # (1)!\n            configMapKeyRef:\n              name: kpanda-configmap  # (2)!\n              key: SPECIAL_LEVEL      # (3)!\n  restartPolicy: Never\n
                                                                    1. \u4f7f\u7528 valueFrom \u6765\u6307\u5b9a env \u5f15\u7528\u914d\u7f6e\u9879\u7684 value \u503c
                                                                    2. \u5f15\u7528\u7684\u914d\u7f6e\u6587\u4ef6\u540d\u79f0
                                                                    3. \u5f15\u7528\u7684\u914d\u7f6e\u9879 key
                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/use-configmap.html#_6","title":"\u8bbe\u7f6e\u5bb9\u5668\u7684\u547d\u4ee4\u884c\u53c2\u6570","text":"

                                                                    \u60a8\u53ef\u4ee5\u4f7f\u7528\u914d\u7f6e\u9879\u8bbe\u7f6e\u5bb9\u5668\u4e2d\u7684\u547d\u4ee4\u6216\u8005\u53c2\u6570\u503c\uff0c\u4f7f\u7528\u73af\u5883\u53d8\u91cf\u66ff\u6362\u8bed\u6cd5 $(VAR_NAME) \u6765\u8fdb\u884c\u3002\u5982\u4e0b\u6240\u793a\u3002

                                                                    apiVersion: v1\nkind: Pod\nmetadata:\n  name: configmap-pod-3\nspec:\n  containers:\n    - name: test-container\n      image: busybox\n      command: [ \"/bin/sh\", \"-c\", \"echo $(SPECIAL_LEVEL_KEY) $(SPECIAL_TYPE_KEY)\" ]\n      env:\n        - name: SPECIAL_LEVEL_KEY\n          valueFrom:\n            configMapKeyRef:\n              name: kpanda-configmap\n              key: SPECIAL_LEVEL\n        - name: SPECIAL_TYPE_KEY\n          valueFrom:\n            configMapKeyRef:\n              name: kpanda-configmap\n              key: SPECIAL_TYPE\n  restartPolicy: Never\n

                                                                    \u8fd9\u4e2a Pod \u8fd0\u884c\u540e\uff0c\u8f93\u51fa\u5982\u4e0b\u5185\u5bb9\u3002

                                                                    Hello Kpanda\n
                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/use-configmap.html#_7","title":"\u7528\u4f5c\u5bb9\u5668\u6570\u636e\u5377","text":"

                                                                    \u60a8\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u5316\u754c\u9762\u6216\u8005\u7ec8\u7aef\u547d\u4ee4\u884c\u6765\u4f7f\u7528\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/use-configmap.html#_8","title":"\u56fe\u5f62\u5316\u64cd\u4f5c","text":"

                                                                    \u5728\u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u5728 \u6570\u636e\u5b58\u50a8 \u754c\u9762\u9009\u62e9\u5b58\u50a8\u7c7b\u578b\u4e3a \u914d\u7f6e\u9879 \uff0c\u5c06\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u7684\u6570\u636e\u5377\u3002

                                                                    1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u4e2d\uff0c\u5728 \u5bb9\u5668\u914d\u7f6e \u8fd9\u4e00\u6b65\u4e2d\uff0c\u9009\u62e9 \u6570\u636e\u5b58\u50a8 \u914d\u7f6e\uff0c\u5728 \u8282\u70b9\u8def\u5f84\u6620\u5c04 \u5217\u8868\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u3002

                                                                    2. \u5728\u5b58\u50a8\u7c7b\u578b\u5904\u9009\u62e9 \u914d\u7f6e\u9879 \uff0c\u5e76\u4f9d\u6b21\u8f93\u5165 \u5bb9\u5668\u8def\u5f84 \u3001 \u5b50\u8def\u5f84 \u7b49\u4fe1\u606f\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/use-configmap.html#_9","title":"\u547d\u4ee4\u884c\u64cd\u4f5c","text":"

                                                                    \u8981\u5728\u4e00\u4e2a Pod \u7684\u5b58\u50a8\u5377\u4e2d\u4f7f\u7528 ConfigMap\u3002

                                                                    \u4e0b\u9762\u662f\u4e00\u4e2a\u5c06 ConfigMap \u4ee5\u5377\u7684\u5f62\u5f0f\u8fdb\u884c\u6302\u8f7d\u7684 Pod \u793a\u4f8b\uff1a

                                                                    apiVersion: v1\nkind: Pod\nmetadata:\n  name: mypod\nspec:\n  containers:\n  - name: mypod\n    image: redis\n    volumeMounts:\n    - name: foo\n      mountPath: \"/etc/foo\"\n      readOnly: true\n  volumes:\n  - name: foo\n    configMap:\n      name: myconfigmap\n

                                                                    \u5982\u679c Pod \u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\uff0c\u5219\u6bcf\u4e2a\u5bb9\u5668\u90fd\u9700\u8981\u81ea\u5df1\u7684 volumeMounts \u5757\uff0c\u4f46\u9488\u5bf9\u6bcf\u4e2a ConfigMap\uff0c\u60a8\u53ea\u9700\u8981\u8bbe\u7f6e\u4e00\u4e2a spec.volumes \u5757\u3002

                                                                    Note

                                                                    \u5c06\u914d\u7f6e\u9879\u4f5c\u4e3a\u5bb9\u5668\u6302\u8f7d\u7684\u6570\u636e\u5377\u65f6\uff0c\u914d\u7f6e\u9879\u53ea\u80fd\u4f5c\u4e3a\u53ea\u8bfb\u6587\u4ef6\u8fdb\u884c\u8bfb\u53d6\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html","title":"\u4f7f\u7528\u5bc6\u94a5","text":"

                                                                    \u5bc6\u94a5\u662f\u4e00\u79cd\u7528\u4e8e\u5b58\u50a8\u548c\u7ba1\u7406\u5bc6\u7801\u3001OAuth \u4ee4\u724c\u3001SSH\u3001TLS \u51ed\u636e\u7b49\u654f\u611f\u4fe1\u606f\u7684\u8d44\u6e90\u5bf9\u8c61\u3002\u4f7f\u7528\u5bc6\u94a5\u610f\u5473\u7740\u60a8\u4e0d\u9700\u8981\u5728\u5e94\u7528\u7a0b\u5e8f\u4ee3\u7801\u4e2d\u5305\u542b\u654f\u611f\u7684\u673a\u5bc6\u6570\u636e\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html#_2","title":"\u4f7f\u7528\u573a\u666f","text":"

                                                                    \u60a8\u53ef\u4ee5\u5728 Pod \u4e2d\u4f7f\u7528\u5bc6\u94a5\uff0c\u6709\u591a\u79cd\u4f7f\u7528\u573a\u666f\uff0c\u4e3b\u8981\u5305\u62ec\uff1a

                                                                    • \u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u4f7f\u7528\uff0c\u63d0\u4f9b\u5bb9\u5668\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u6240\u9700\u7684\u4e00\u4e9b\u5fc5\u8981\u4fe1\u606f\u3002
                                                                    • \u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a Pod \u7684\u6570\u636e\u5377\u3002
                                                                    • \u5728 kubelet \u62c9\u53d6\u5bb9\u5668\u955c\u50cf\u65f6\u7528\u4f5c\u955c\u50cf\u4ed3\u5e93\u7684\u8eab\u4efd\u8ba4\u8bc1\u51ed\u8bc1\u4f7f\u7528\u3002
                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html#_3","title":"\u4f7f\u7528\u5bc6\u94a5\u8bbe\u7f6e\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf","text":"

                                                                    \u60a8\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u5316\u754c\u9762\u6216\u8005\u7ec8\u7aef\u547d\u4ee4\u884c\u6765\u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf\u3002

                                                                    Note

                                                                    \u5bc6\u94a5\u5bfc\u5165\u662f\u5c06\u5bc6\u94a5\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\uff1b\u5bc6\u94a5\u952e\u503c\u5bfc\u5165\u662f\u5c06\u5bc6\u94a5\u4e2d\u67d0\u4e00\u53c2\u6570\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html#_4","title":"\u56fe\u5f62\u754c\u9762\u64cd\u4f5c","text":"

                                                                    \u5728\u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u60a8\u53ef\u4ee5\u5728 \u73af\u5883\u53d8\u91cf \u754c\u9762\u901a\u8fc7\u9009\u62e9 \u5bc6\u94a5\u5bfc\u5165 \u6216 \u5bc6\u94a5\u952e\u503c\u5bfc\u5165 \u4e3a\u5bb9\u5668\u8bbe\u7f6e\u73af\u5883\u53d8\u91cf\u3002

                                                                    1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u3002

                                                                    2. \u5728 \u5bb9\u5668\u914d\u7f6e \u9009\u62e9 \u73af\u5883\u53d8\u91cf \u914d\u7f6e\uff0c\u70b9\u51fb \u6dfb\u52a0\u73af\u5883\u53d8\u91cf \u6309\u94ae\u3002

                                                                    3. \u5728\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u5904\u9009\u62e9 \u5bc6\u94a5\u5bfc\u5165 \u6216 \u5bc6\u94a5\u952e\u503c\u5bfc\u5165 \u3002

                                                                      • \u5f53\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u9009\u62e9\u4e3a \u5bc6\u94a5\u5bfc\u5165 \u65f6\uff0c\u4f9d\u6b21\u8f93\u5165 \u53d8\u91cf\u540d \u3001 \u524d\u7f00 \u3001 \u5bc6\u94a5 \u7684\u540d\u79f0\u3002

                                                                      • \u5f53\u73af\u5883\u53d8\u91cf\u7c7b\u578b\u9009\u62e9\u4e3a \u5bc6\u94a5\u952e\u503c\u5bfc\u5165 \u65f6\uff0c\u4f9d\u6b21\u8f93\u5165 \u53d8\u91cf\u540d \u3001 \u5bc6\u94a5 \u3001 \u952e \u7684\u540d\u79f0\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html#_5","title":"\u547d\u4ee4\u884c\u64cd\u4f5c","text":"

                                                                    \u5982\u4e0b\u4f8b\u6240\u793a\uff0c\u60a8\u53ef\u4ee5\u5728\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\u5c06\u5bc6\u94a5\u8bbe\u7f6e\u4e3a\u73af\u5883\u53d8\u91cf\uff0c\u4f7f\u7528 valueFrom \u53c2\u6570\u5f15\u7528 Secret \u4e2d\u7684 Key/Value\u3002

                                                                    apiVersion: v1\nkind: Pod\nmetadata:\n  name: secret-env-pod\nspec:\n  containers:\n  - name: mycontainer\n    image: redis\n    env:\n      - name: SECRET_USERNAME\n        valueFrom:\n          secretKeyRef:\n            name: mysecret\n            key: username\n            optional: false # (1)!\n      - name: SECRET_PASSWORD\n        valueFrom:\n          secretKeyRef:\n            name: mysecret\n            key: password\n            optional: false # (2)!\n
                                                                    1. \u6b64\u503c\u4e3a\u9ed8\u8ba4\u503c\uff1b\u610f\u5473\u7740 \"mysecret\"\uff0c\u5fc5\u987b\u5b58\u5728\u4e14\u5305\u542b\u540d\u4e3a \"username\" \u7684\u4e3b\u952e
                                                                    2. \u6b64\u503c\u4e3a\u9ed8\u8ba4\u503c\uff1b\u610f\u5473\u7740 \"mysecret\"\uff0c\u5fc5\u987b\u5b58\u5728\u4e14\u5305\u542b\u540d\u4e3a \"password\" \u7684\u4e3b\u952e
                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html#pod","title":"\u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a Pod \u7684\u6570\u636e\u5377","text":""},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html#_6","title":"\u56fe\u5f62\u754c\u9762\u64cd\u4f5c","text":"

                                                                    \u5728\u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u5728 \u6570\u636e\u5b58\u50a8 \u754c\u9762\u9009\u62e9\u5b58\u50a8\u7c7b\u578b\u4e3a \u5bc6\u94a5 \uff0c\u5c06\u5bc6\u94a5\u4f5c\u4e3a\u5bb9\u5668\u7684\u6570\u636e\u5377\u3002

                                                                    1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u3002

                                                                    2. \u5728 \u5bb9\u5668\u914d\u7f6e \u9009\u62e9 \u6570\u636e\u5b58\u50a8 \u914d\u7f6e\uff0c\u5728 \u8282\u70b9\u8def\u5f84\u6620\u5c04 \u5217\u8868\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u3002

                                                                    3. \u5728\u5b58\u50a8\u7c7b\u578b\u5904\u9009\u62e9 \u5bc6\u94a5 \uff0c\u5e76\u4f9d\u6b21\u8f93\u5165 \u5bb9\u5668\u8def\u5f84 \u3001 \u5b50\u8def\u5f84 \u7b49\u4fe1\u606f\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html#_7","title":"\u547d\u4ee4\u884c\u64cd\u4f5c","text":"

                                                                    \u4e0b\u9762\u662f\u4e00\u4e2a\u901a\u8fc7\u6570\u636e\u5377\u6765\u6302\u8f7d\u540d\u4e3a mysecret \u7684 Secret \u7684 Pod \u793a\u4f8b\uff1a

                                                                    apiVersion: v1\nkind: Pod\nmetadata:\n  name: mypod\nspec:\n  containers:\n  - name: mypod\n    image: redis\n    volumeMounts:\n    - name: foo\n      mountPath: \"/etc/foo\"\n      readOnly: true\n  volumes:\n  - name: foo\n    secret:\n      secretName: mysecret\n      optional: false # (1)!\n
                                                                    1. \u9ed8\u8ba4\u8bbe\u7f6e\uff0c\u610f\u5473\u7740 \"mysecret\" \u5fc5\u987b\u5df2\u7ecf\u5b58\u5728

                                                                    \u5982\u679c Pod \u4e2d\u5305\u542b\u591a\u4e2a\u5bb9\u5668\uff0c\u5219\u6bcf\u4e2a\u5bb9\u5668\u9700\u8981\u81ea\u5df1\u7684 volumeMounts \u5757\uff0c\u4e0d\u8fc7\u9488\u5bf9\u6bcf\u4e2a Secret \u800c\u8a00\uff0c\u53ea\u9700\u8981\u4e00\u4efd .spec.volumes \u8bbe\u7f6e\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html#kubelet","title":"\u5728 kubelet \u62c9\u53d6\u5bb9\u5668\u955c\u50cf\u65f6\u7528\u4f5c\u955c\u50cf\u4ed3\u5e93\u7684\u8eab\u4efd\u8ba4\u8bc1\u51ed\u8bc1","text":"

                                                                    \u60a8\u53ef\u4ee5\u901a\u8fc7\u56fe\u5f62\u5316\u754c\u9762\u6216\u8005\u7ec8\u7aef\u547d\u4ee4\u884c\u6765\u4f7f\u7528\u5bc6\u94a5\u4f5c\u4e3a\u955c\u50cf\u4ed3\u5e93\u8eab\u4efd\u8ba4\u8bc1\u51ed\u8bc1\u3002

                                                                    "},{"location":"end-user/kpanda/configmaps-secrets/use-secret.html#_8","title":"\u56fe\u5f62\u5316\u64cd\u4f5c","text":"

                                                                    \u5728\u901a\u8fc7\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u5728 \u6570\u636e\u5b58\u50a8 \u754c\u9762\u9009\u62e9\u5b58\u50a8\u7c7b\u578b\u4e3a \u5bc6\u94a5 \uff0c\u5c06\u5bc6\u94a5\u4f5c\u4e3a\u5bb9\u5668\u7684\u6570\u636e\u5377\u3002

                                                                    1. \u8fdb\u5165\u955c\u50cf\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u3002

                                                                    2. \u5728\u7b2c\u4e8c\u6b65 \u5bb9\u5668\u914d\u7f6e \u65f6\u9009\u62e9 \u57fa\u672c\u4fe1\u606f \u914d\u7f6e\uff0c\u70b9\u51fb \u9009\u62e9\u955c\u50cf \u6309\u94ae\u3002

                                                                    3. \u5728\u5f39\u6846\u7684 \u955c\u50cf\u4ed3\u5e93 \u4e0b\u62c9\u9009\u62e9\u79c1\u6709\u955c\u50cf\u4ed3\u5e93\u540d\u79f0\u3002\u5173\u4e8e\u79c1\u6709\u955c\u50cf\u5bc6\u94a5\u521b\u5efa\u8bf7\u67e5\u770b\u521b\u5efa\u5bc6\u94a5\u4e86\u89e3\u8be6\u60c5\u3002

                                                                    4. \u8f93\u5165\u79c1\u6709\u4ed3\u5e93\u5185\u7684\u955c\u50cf\u540d\u79f0\uff0c\u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u955c\u50cf\u9009\u62e9\u3002

                                                                    Note

                                                                    \u521b\u5efa\u5bc6\u94a5\u65f6\uff0c\u9700\u8981\u786e\u4fdd\u8f93\u5165\u6b63\u786e\u7684\u955c\u50cf\u4ed3\u5e93\u5730\u5740\u3001\u7528\u6237\u540d\u79f0\u3001\u5bc6\u7801\u5e76\u9009\u62e9\u6b63\u786e\u7684\u955c\u50cf\u540d\u79f0\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u83b7\u53d6\u955c\u50cf\u4ed3\u5e93\u4e2d\u7684\u955c\u50cf\u3002

                                                                    "},{"location":"end-user/kpanda/custom-resources/create.html","title":"\u521b\u5efa\u81ea\u5b9a\u4e49\u8d44\u6e90 (CRD)","text":"

                                                                    \u5728 Kubernetes \u4e2d\u4e00\u5207\u5bf9\u8c61\u90fd\u88ab\u62bd\u8c61\u4e3a\u8d44\u6e90\uff0c\u5982 Pod\u3001Deployment\u3001Service\u3001Volume \u7b49\u662f Kubernetes \u63d0\u4f9b\u7684\u9ed8\u8ba4\u8d44\u6e90\uff0c \u8fd9\u4e3a\u6211\u4eec\u7684\u65e5\u5e38\u8fd0\u7ef4\u548c\u7ba1\u7406\u5de5\u4f5c\u63d0\u4f9b\u4e86\u91cd\u8981\u652f\u6491\uff0c\u4f46\u662f\u5728\u4e00\u4e9b\u7279\u6b8a\u7684\u573a\u666f\u4e2d\uff0c\u73b0\u6709\u7684\u9884\u7f6e\u8d44\u6e90\u5e76\u4e0d\u80fd\u6ee1\u8db3\u4e1a\u52a1\u7684\u9700\u8981\uff0c \u56e0\u6b64\u6211\u4eec\u5e0c\u671b\u53bb\u6269\u5c55 Kubernetes API \u7684\u80fd\u529b\uff0c\u81ea\u5b9a\u4e49\u8d44\u6e90\uff08CustomResourceDefinition, CRD\uff09\u6b63\u662f\u57fa\u4e8e\u8fd9\u6837\u7684\u9700\u6c42\u5e94\u8fd0\u800c\u751f\u3002

                                                                    \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u5bf9\u81ea\u5b9a\u4e49\u8d44\u6e90\u7684\u754c\u9762\u5316\u7ba1\u7406\uff0c\u4e3b\u8981\u529f\u80fd\u5982\u4e0b\uff1a

                                                                    • \u83b7\u53d6\u96c6\u7fa4\u4e0b\u81ea\u5b9a\u4e49\u8d44\u6e90\u5217\u8868\u548c\u8be6\u7ec6\u4fe1\u606f
                                                                    • \u57fa\u4e8e YAML \u521b\u5efa\u81ea\u5b9a\u8d44\u6e90
                                                                    • \u57fa\u4e8e YAML \u521b\u5efa\u81ea\u5b9a\u4e49\u8d44\u6e90\u793a\u4f8b CR\uff08Custom Resource\uff09
                                                                    • \u5220\u9664\u81ea\u5b9a\u4e49\u8d44\u6e90
                                                                    "},{"location":"end-user/kpanda/custom-resources/create.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                    • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762

                                                                    • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a Cluster Admin \u89d2\u8272 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u6388\u6743

                                                                    "},{"location":"end-user/kpanda/custom-resources/create.html#yaml","title":"\u901a\u8fc7 YAML \u521b\u5efa\u81ea\u5b9a\u4e49\u8d44\u6e90","text":"
                                                                    1. \u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                                    2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                                    3. \u5728 YAML \u521b\u5efa \u9875\u9762\u4e2d\uff0c\u586b\u5199 YAML \u8bed\u53e5\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                                    4. \u8fd4\u56de\u81ea\u5b9a\u4e49\u8d44\u6e90\u5217\u8868\u9875\uff0c\u5373\u53ef\u67e5\u770b\u521a\u521a\u521b\u5efa\u7684\u540d\u4e3a crontabs.stable.example.com \u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\u3002

                                                                    \u81ea\u5b9a\u4e49\u8d44\u6e90\u793a\u4f8b\uff1a

                                                                    CRD example
                                                                    apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  name: crontabs.stable.example.com\nspec:\n  group: stable.example.com\n  versions:\n    - name: v1\n      served: true\n      storage: true\n      schema:\n        openAPIV3Schema:\n          type: object\n          properties:\n            spec:\n              type: object\n              properties:\n                cronSpec:\n                  type: string\n                image:\n                  type: string\n                replicas:\n                  type: integer\n  scope: Namespaced\n  names:\n    plural: crontabs\n    singular: crontab\n    kind: CronTab\n    shortNames:\n    - ct\n
                                                                    "},{"location":"end-user/kpanda/custom-resources/create.html#yaml_1","title":"\u901a\u8fc7 YAML \u521b\u5efa\u81ea\u5b9a\u4e49\u8d44\u6e90\u793a\u4f8b","text":"
                                                                    1. \u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                                    2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u81ea\u5b9a\u4e49\u8d44\u6e90 \uff0c\u8fdb\u5165\u81ea\u5b9a\u4e49\u8d44\u6e90\u5217\u8868\u9875\u9762\u3002

                                                                    3. \u70b9\u51fb\u540d\u4e3a crontabs.stable.example.com \u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\uff0c\u8fdb\u5165\u8be6\u60c5\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                                    4. \u5728 YAML \u521b\u5efa \u9875\u9762\u4e2d\uff0c\u586b\u5199 YAML \u8bed\u53e5\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                                    5. \u8fd4\u56de crontabs.stable.example.com \u7684\u8be6\u60c5\u9875\u9762\uff0c\u5373\u53ef\u67e5\u770b\u521a\u521a\u521b\u5efa\u7684\u540d\u4e3a my-new-cron-object \u7684\u81ea\u5b9a\u4e49\u8d44\u6e90\u3002

                                                                    CR \u793a\u4f8b\uff1a

                                                                    CR example
                                                                    apiVersion: \"stable.example.com/v1\"\nkind: CronTab\nmetadata:\n  name: my-new-cron-object\nspec:\n  cronSpec: \"* * * * */5\"\n  image: my-awesome-cron-image\n
                                                                    "},{"location":"end-user/kpanda/gpu/index.html","title":"GPU \u7ba1\u7406\u6982\u8ff0","text":"

                                                                    \u672c\u6587\u4ecb\u7ecd \u7b97\u4e30 AI \u7b97\u529b\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\u5bf9 GPU\u4e3a\u4ee3\u8868\u7684\u5f02\u6784\u8d44\u6e90\u7edf\u4e00\u8fd0\u7ef4\u7ba1\u7406\u80fd\u529b\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/index.html#_1","title":"\u80cc\u666f","text":"

                                                                    \u968f\u7740 AI \u5e94\u7528\u3001\u5927\u6a21\u578b\u3001\u4eba\u5de5\u667a\u80fd\u3001\u81ea\u52a8\u9a7e\u9a76\u7b49\u65b0\u5174\u6280\u672f\u7684\u5feb\u901f\u53d1\u5c55\uff0c\u4f01\u4e1a\u9762\u4e34\u7740\u8d8a\u6765\u8d8a\u591a\u7684\u8ba1\u7b97\u5bc6\u96c6\u578b\u4efb\u52a1\u548c\u6570\u636e\u5904\u7406\u9700\u6c42\u3002 \u4ee5 CPU \u4e3a\u4ee3\u8868\u7684\u4f20\u7edf\u8ba1\u7b97\u67b6\u6784\u5df2\u65e0\u6cd5\u6ee1\u8db3\u4f01\u4e1a\u65e5\u76ca\u589e\u957f\u7684\u8ba1\u7b97\u9700\u6c42\u3002\u6b64\u65f6\uff0c\u4ee5 GPU \u4e3a\u4ee3\u8868\u7684\u5f02\u6784\u8ba1\u7b97\u56e0\u5728\u5904\u7406\u5927\u89c4\u6a21\u6570\u636e\u3001\u8fdb\u884c\u590d\u6742\u8ba1\u7b97\u548c\u5b9e\u65f6\u56fe\u5f62\u6e32\u67d3\u65b9\u9762\u5177\u6709\u72ec\u7279\u7684\u4f18\u52bf\u88ab\u5e7f\u6cdb\u5e94\u7528\u3002

                                                                    \u4e0e\u6b64\u540c\u65f6\uff0c\u7531\u4e8e\u7f3a\u4e4f\u5f02\u6784\u8d44\u6e90\u8c03\u5ea6\u7ba1\u7406\u7b49\u65b9\u9762\u7684\u7ecf\u9a8c\u548c\u4e13\u4e1a\u7684\u89e3\u51b3\u65b9\u6848\uff0c\u5bfc\u81f4\u4e86 GPU \u8bbe\u5907\u7684\u8d44\u6e90\u5229\u7528\u7387\u6781\u4f4e\uff0c\u7ed9\u4f01\u4e1a\u5e26\u6765\u4e86\u9ad8\u6602\u7684 AI \u751f\u4ea7\u6210\u672c\u3002 \u5982\u4f55\u964d\u672c\u589e\u6548\uff0c\u63d0\u9ad8 GPU \u7b49\u5f02\u6784\u8d44\u6e90\u7684\u5229\u7528\u6548\u7387\uff0c\u6210\u4e3a\u4e86\u5f53\u524d\u4f17\u591a\u4f01\u4e1a\u4e9f\u9700\u8de8\u8d8a\u7684\u4e00\u9053\u96be\u9898\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/index.html#gpu_1","title":"GPU \u80fd\u529b\u4ecb\u7ecd","text":"

                                                                    \u7b97\u4e30 AI \u7b97\u529b\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\u652f\u6301\u5bf9 GPU\u3001NPU \u7b49\u5f02\u6784\u8d44\u6e90\u8fdb\u884c\u7edf\u4e00\u8c03\u5ea6\u548c\u8fd0\u7ef4\u7ba1\u7406\uff0c\u5145\u5206\u91ca\u653e GPU \u8d44\u6e90\u7b97\u529b\uff0c\u52a0\u901f\u4f01\u4e1a AI \u7b49\u65b0\u5174\u5e94\u7528\u53d1\u5c55\u3002GPU \u7ba1\u7406\u80fd\u529b\u5982\u4e0b\uff1a

                                                                    • \u652f\u6301\u7edf\u4e00\u7eb3\u7ba1 NVIDIA\u3001\u534e\u4e3a\u6607\u817e\u3001\u5929\u6570\u7b49\u56fd\u5185\u5916\u5382\u5546\u7684\u5f02\u6784\u8ba1\u7b97\u8d44\u6e90\u3002
                                                                    • \u652f\u6301\u540c\u4e00\u96c6\u7fa4\u591a\u5361\u5f02\u6784\u8c03\u5ea6\uff0c\u5e76\u652f\u6301\u96c6\u7fa4 GPU \u5361\u81ea\u52a8\u8bc6\u522b\u3002
                                                                    • \u652f\u6301 NVIDIA GPU\u3001vGPU\u3001MIG \u7b49 GPU \u539f\u751f\u7ba1\u7406\u65b9\u6848\uff0c\u5e76\u63d0\u4f9b\u4e91\u539f\u751f\u80fd\u529b\u3002
                                                                    • \u652f\u6301\u5355\u5757\u7269\u7406\u5361\u5207\u5206\u7ed9\u4e0d\u540c\u7684\u79df\u6237\u4f7f\u7528\uff0c\u5e76\u652f\u6301\u5bf9\u79df\u6237\u548c\u5bb9\u5668\u4f7f\u7528 GPU \u8d44\u6e90\u6309\u7167\u7b97\u529b\u3001\u663e\u5b58\u8fdb\u884c GPU \u8d44\u6e90\u914d\u989d\u3002
                                                                    • \u652f\u6301\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5e94\u7528\u7b49\u591a\u7ef4\u5ea6 GPU \u8d44\u6e90\u76d1\u63a7\uff0c\u5e2e\u52a9\u8fd0\u7ef4\u4eba\u5458\u7ba1\u7406 GPU \u8d44\u6e90\u3002
                                                                    • \u517c\u5bb9 TensorFlow\u3001pytorch \u7b49\u591a\u79cd\u8bad\u7ec3\u6846\u67b6\u3002
                                                                    "},{"location":"end-user/kpanda/gpu/index.html#gpu-operator","title":"GPU Operator \u4ecb\u7ecd","text":"

                                                                    \u540c\u666e\u901a\u8ba1\u7b97\u673a\u786c\u4ef6\u4e00\u6837\uff0cNVIDIA GPU \u5361\u4f5c\u4e3a\u7269\u7406\u786c\u4ef6\uff0c\u5fc5\u987b\u5b89\u88c5 NVIDIA GPU \u9a71\u52a8\u540e\u624d\u80fd\u4f7f\u7528\u3002 \u4e3a\u4e86\u964d\u4f4e\u7528\u6237\u5728 kuberneets \u4e0a\u4f7f\u7528 GPU \u7684\u6210\u672c\uff0cNVIDIA \u5b98\u65b9\u63d0\u4f9b\u4e86 NVIDIA GPU Operator \u7ec4\u4ef6\u6765\u7ba1\u7406\u4f7f\u7528 NVIDIA GPU \u6240\u4f9d\u8d56\u7684\u5404\u79cd\u7ec4\u4ef6\u3002 \u8fd9\u4e9b\u7ec4\u4ef6\u5305\u62ec NVIDIA \u9a71\u52a8\u7a0b\u5e8f\uff08\u7528\u4e8e\u542f\u7528 CUDA\uff09\u3001NVIDIA \u5bb9\u5668\u8fd0\u884c\u65f6\u3001GPU \u8282\u70b9\u6807\u8bb0\u3001\u57fa\u4e8e DCGM \u7684\u76d1\u63a7\u7b49\u3002 \u7406\u8bba\u4e0a\u6765\u8bf4\u7528\u6237\u53ea\u9700\u8981\u5c06 GPU \u5361\u63d2\u5728\u5df2\u7ecf\u88ab kubernetes \u6240\u7eb3\u7ba1\u7684\u8ba1\u7b97\u8bbe\u5907\u4e0a\uff0c\u7136\u540e\u901a\u8fc7 GPU Operator \u5c31\u80fd\u4f7f\u7528 NVIDIA GPU \u7684\u6240\u6709\u80fd\u529b\u4e86\u3002 \u4e86\u89e3\u66f4\u591a NVIDIA GPU Operator \u76f8\u5173\u4fe1\u606f\uff0c\u8bf7\u53c2\u8003 NVIDIA \u5b98\u65b9\u6587\u6863\u3002 \u5982\u4f55\u90e8\u7f72\u8bf7\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5

                                                                    NVIDIA GPU Operator \u67b6\u6784\u56fe\uff1a

                                                                    "},{"location":"end-user/kpanda/gpu/FAQ.html","title":"GPU \u76f8\u5173 FAQ","text":""},{"location":"end-user/kpanda/gpu/FAQ.html#pod-nvidia-smi-gpu","title":"Pod \u5185 nvidia-smi \u770b\u4e0d\u5230 GPU \u8fdb\u7a0b","text":"

                                                                    Q: \u5728\u4f7f\u7528 GPU \u7684 Pod \u5185\u6267\u884c nvidia-smi \u547d\u4ee4\u770b\u4e0d\u5230\u4f7f\u7528 GPU \u7684\u8fdb\u7a0b\u4fe1\u606f\uff0c\u5305\u62ec\u6574\u5361\u6a21\u5f0f\u3001vGPU \u6a21\u5f0f\u7b49\u3002

                                                                    A: \u56e0\u4e3a\u6709 PID namespace \u9694\u79bb\uff0c\u5bfc\u81f4\u5728 Pod \u5185\u67e5\u770b\u4e0d\u5230 GPU \u8fdb\u7a0b\uff0c\u5982\u679c\u8981\u67e5\u770b GPU \u8fdb\u7a0b\u6709\u5982\u4e0b\u51e0\u79cd\u65b9\u6cd5\uff1a

                                                                    • \u5728\u4f7f\u7528 GPU \u7684\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e hostPID: true\uff0c\u4f7f\u5176\u53ef\u4ee5\u67e5\u770b\u5230\u5bbf\u4e3b\u673a\u4e0a\u7684 PID
                                                                    • \u5728 gpu-operator \u7684 driver Pod \u4e2d\u6267\u884c nvidia-smi \u547d\u4ee4\u67e5\u770b\u8fdb\u7a0b
                                                                    • \u5728\u5bbf\u4e3b\u673a\u4e0a\u6267\u884c chroot /run/nvidia/driver nvidia-smi \u547d\u4ee4\u67e5\u770b\u8fdb\u7a0b
                                                                    "},{"location":"end-user/kpanda/gpu/Iluvatar_usage.html","title":"App \u4f7f\u7528\u5929\u6570\u667a\u82af\uff08Iluvatar\uff09GPU","text":"

                                                                    \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528\u5929\u6570\u667a\u82af\u865a\u62df GPU\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/Iluvatar_usage.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                    • \u5df2\u7ecf\u90e8\u7f72 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u4e14\u5e73\u53f0\u8fd0\u884c\u6b63\u5e38\u3002
                                                                    • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                                                                    • \u5f53\u524d\u96c6\u7fa4\u5df2\u5b89\u88c5\u5929\u6570\u667a\u82af GPU \u9a71\u52a8\uff0c\u9a71\u52a8\u5b89\u88c5\u8bf7\u53c2\u8003\u5929\u6570\u667a\u82af\u5b98\u65b9\u6587\u6863\u3002
                                                                    • \u5f53\u524d\u96c6\u7fa4\u5185 GPU \u5361\u672a\u8fdb\u884c\u4efb\u4f55\u865a\u62df\u5316\u64cd\u4f5c\u4e14\u672a\u88ab\u5176\u5b83 App \u5360\u7528\u3002
                                                                    "},{"location":"end-user/kpanda/gpu/Iluvatar_usage.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"end-user/kpanda/gpu/Iluvatar_usage.html#_3","title":"\u4f7f\u7528\u754c\u9762\u914d\u7f6e","text":"
                                                                    1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u68c0\u6d4b GPU \u5361\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002 \u76ee\u524d\u96c6\u7fa4\u4f1a\u81ea\u52a8\u542f\u7528 GPU \uff0c\u5e76\u4e14\u8bbe\u7f6e GPU \u7c7b\u578b\u4e3a Iluvatar \u3002

                                                                    2. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08Iluvatar\uff09\u4e4b\u540e\uff0c\u9700\u8981\u914d\u7f6e App \u4f7f\u7528\u7684 GPU \u8d44\u6e90\uff1a

                                                                      • \u7269\u7406\u5361\u6570\u91cf\uff08iluvatar.ai/vcuda-core\uff09\uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u8f93\u5165\u503c\u5fc5\u987b\u4e3a\u6574\u6570\u4e14 \u5c0f\u4e8e\u7b49\u4e8e \u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002
                                                                      • \u663e\u5b58\u4f7f\u7528\u6570\u91cf\uff08iluvatar.ai/vcuda-memory\uff09\uff1a\u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u663e\u5b58\uff0c\u503c\u5355\u4f4d\u4e3a MB\uff0c\u6700\u5c0f\u503c\u4e3a 1\uff0c\u6700\u5927\u503c\u4e3a\u6574\u5361\u7684\u663e\u5b58\u503c\u3002

                                                                      \u5982\u679c\u4e0a\u8ff0\u503c\u914d\u7f6e\u7684\u6709\u95ee\u9898\u5219\u4f1a\u51fa\u73b0\u8c03\u5ea6\u5931\u8d25\uff0c\u8d44\u6e90\u5206\u914d\u4e0d\u4e86\u7684\u60c5\u51b5\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/Iluvatar_usage.html#yaml","title":"\u4f7f\u7528 YAML \u914d\u7f6e","text":"

                                                                    \u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u7533\u8bf7 GPU \u8d44\u6e90\uff0c\u5728\u8d44\u6e90\u7533\u8bf7\u548c\u9650\u5236\u914d\u7f6e\u4e2d\u589e\u52a0iluvatar.ai/vcuda-core: 1\u3001iluvatar.ai/vcuda-memory: 200 \u53c2\u6570\uff0c\u914d\u7f6e App \u4f7f\u7528\u7269\u7406\u5361\u7684\u8d44\u6e90\u3002

                                                                    apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-iluvatar-gpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-iluvatar-gpu-demo\n  template:\n    metadata:\n      labels:\n        app: full-iluvatar-gpu-demo\n    spec:\n      containers:\n      - image: nginx:perl\n        name: container-0\n        resources:\n          limits:\n            cpu: 250m\n            iluvatar.ai/vcuda-core: '1'\n            iluvatar.ai/vcuda-memory: '200'\n            memory: 512Mi\n          requests:\n            cpu: 250m\n            memory: 512Mi\n      imagePullSecrets:\n      - name: default-secret\n
                                                                    "},{"location":"end-user/kpanda/gpu/dynamic-regulation.html","title":"GPU \u8d44\u6e90\u52a8\u6001\u8c03\u8282","text":"

                                                                    \u63d0\u4f9b GPU \u8d44\u6e90\u52a8\u6001\u8c03\u6574\u529f\u80fd\uff0c\u5141\u8bb8\u60a8\u5728\u65e0\u9700\u91cd\u65b0\u52a0\u8f7d\u3001\u91cd\u7f6e\u6216\u91cd\u542f\u6574\u4e2a\u8fd0\u884c\u73af\u5883\u7684\u60c5\u51b5\u4e0b\uff0c\u5bf9\u5df2\u7ecf\u5206\u914d\u7684 vGPU \u8d44\u6e90\u8fdb\u884c\u5b9e\u65f6\u3001\u52a8\u6001\u7684\u8c03\u6574\u3002 \u8fd9\u4e00\u529f\u80fd\u65e8\u5728\u6700\u5927\u7a0b\u5ea6\u5730\u51cf\u5c11\u5bf9\u4e1a\u52a1\u8fd0\u884c\u7684\u5f71\u54cd\uff0c\u786e\u4fdd\u60a8\u7684\u4e1a\u52a1\u80fd\u591f\u6301\u7eed\u7a33\u5b9a\u5730\u8fd0\u884c\uff0c\u540c\u65f6\u6839\u636e\u5b9e\u9645\u9700\u6c42\u7075\u6d3b\u8c03\u6574 GPU \u8d44\u6e90\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/dynamic-regulation.html#_1","title":"\u4f7f\u7528\u573a\u666f","text":"
                                                                    • \u5f39\u6027\u8d44\u6e90\u5206\u914d \uff1a\u5f53\u4e1a\u52a1\u9700\u6c42\u6216\u5de5\u4f5c\u8d1f\u8f7d\u53d1\u751f\u53d8\u5316\u65f6\uff0c\u53ef\u4ee5\u5feb\u901f\u8c03\u6574 GPU \u8d44\u6e90\u4ee5\u6ee1\u8db3\u65b0\u7684\u6027\u80fd\u8981\u6c42\u3002
                                                                    • \u5373\u65f6\u54cd\u5e94 \uff1a\u5728\u9762\u5bf9\u7a81\u53d1\u7684\u9ad8\u8d1f\u8f7d\u6216\u4e1a\u52a1\u9700\u6c42\u65f6\uff0c\u53ef\u4ee5\u8fc5\u901f\u589e\u52a0 GPU \u8d44\u6e90\u800c\u65e0\u9700\u4e2d\u65ad\u4e1a\u52a1\u8fd0\u884c\uff0c\u4ee5\u786e\u4fdd\u670d\u52a1\u7684\u7a33\u5b9a\u6027\u548c\u6027\u80fd\u3002
                                                                    "},{"location":"end-user/kpanda/gpu/dynamic-regulation.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                                    \u4ee5\u4e0b\u662f\u4e00\u4e2a\u5177\u4f53\u7684\u64cd\u4f5c\u793a\u4f8b\uff0c\u5c55\u793a\u5982\u4f55\u5728\u4e0d\u91cd\u542f vGPU Pod \u7684\u60c5\u51b5\u4e0b\u52a8\u6001\u8c03\u6574 vGPU \u7684\u7b97\u529b\u548c\u663e\u5b58\u8d44\u6e90\uff1a

                                                                    "},{"location":"end-user/kpanda/gpu/dynamic-regulation.html#vgpu-pod","title":"\u521b\u5efa\u4e00\u4e2a vGPU Pod","text":"

                                                                    \u9996\u5148\uff0c\u6211\u4eec\u4f7f\u7528\u4ee5\u4e0b YAML \u521b\u5efa\u4e00\u4e2a vGPU Pod\uff0c\u5176\u7b97\u529b\u521d\u59cb\u4e0d\u9650\u5236\uff0c\u663e\u5b58\u9650\u5236\u4e3a 200Mb\u3002

                                                                    kind: Deployment\napiVersion: apps/v1\nmetadata:\n  name: gpu-burn-test\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: gpu-burn-test\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: gpu-burn-test\n    spec:\n      containers:\n        - name: container-1\n          image: docker.io/chrstnhntschl/gpu_burn:latest\n          command:\n            - sleep\n            - '100000'\n          resources:\n            limits:\n              cpu: 1m\n              memory: 1Gi\n              nvidia.com/gpucores: '0'\n              nvidia.com/gpumem: '200'\n              nvidia.com/vgpu: '1'\n

                                                                    \u8c03\u6574\u524d\u67e5\u770b Pod \u4e2d\u7684\u8d44\u6e90 GPU \u5206\u914d\u8d44\u6e90\uff1a

                                                                    "},{"location":"end-user/kpanda/gpu/dynamic-regulation.html#_3","title":"\u52a8\u6001\u8c03\u6574\u7b97\u529b","text":"

                                                                    \u5982\u679c\u9700\u8981\u4fee\u6539\u7b97\u529b\u4e3a 10%\uff0c\u53ef\u4ee5\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u64cd\u4f5c\uff1a

                                                                    1. \u8fdb\u5165\u5bb9\u5668\uff1a

                                                                      kubectl exec -it <pod-name> -- /bin/bash\n
                                                                    2. \u6267\u884c\uff1a

                                                                      export CUDA_DEVICE_SM_LIMIT=10\n
                                                                    3. \u5728\u5f53\u524d\u7ec8\u7aef\u76f4\u63a5\u8fd0\u884c\uff1a

                                                                      ./gpu_burn 60\n

                                                                      \u7a0b\u5e8f\u5373\u53ef\u751f\u6548\u3002\u6ce8\u610f\uff0c\u4e0d\u80fd\u9000\u51fa\u5f53\u524d Bash \u7ec8\u7aef\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/dynamic-regulation.html#_4","title":"\u52a8\u6001\u8c03\u6574\u663e\u5b58","text":"

                                                                    \u5982\u679c\u9700\u8981\u4fee\u6539\u663e\u5b58\u4e3a 300 MB\uff0c\u53ef\u4ee5\u6309\u7167\u4ee5\u4e0b\u6b65\u9aa4\u64cd\u4f5c\uff1a

                                                                    1. \u8fdb\u5165\u5bb9\u5668\uff1a

                                                                      kubectl exec -it <pod-name> -- /bin/bash\n
                                                                    2. \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u6765\u8bbe\u7f6e\u663e\u5b58\u9650\u5236\uff1a

                                                                      export CUDA_DEVICE_MEMORY_LIMIT_0=300m\nexport CUDA_DEVICE_MEMORY_SHARED_CACHE=/usr/local/vgpu/d.cache\n

                                                                      Note

                                                                      \u6bcf\u6b21\u4fee\u6539\u663e\u5b58\u5927\u5c0f\u65f6\uff0cd.cache \u8fd9\u4e2a\u6587\u4ef6\u540d\u5b57\u90fd\u9700\u8981\u4fee\u6539\uff0c\u6bd4\u5982\u6539\u4e3a a.cache\u30011.cache \u7b49\uff0c\u4ee5\u907f\u514d\u7f13\u5b58\u51b2\u7a81\u3002

                                                                    3. \u5728\u5f53\u524d\u7ec8\u7aef\u76f4\u63a5\u8fd0\u884c\uff1a

                                                                      ./gpu_burn 60\n

                                                                      \u7a0b\u5e8f\u5373\u53ef\u751f\u6548\u3002\u540c\u6837\u5730\uff0c\u4e0d\u80fd\u9000\u51fa\u5f53\u524d Bash \u7ec8\u7aef\u3002

                                                                    \u8c03\u6574\u540e\u67e5\u770b Pod \u4e2d\u7684\u8d44\u6e90 GPU \u5206\u914d\u8d44\u6e90\uff1a

                                                                    \u901a\u8fc7\u4e0a\u8ff0\u6b65\u9aa4\uff0c\u60a8\u53ef\u4ee5\u5728\u4e0d\u91cd\u542f vGPU Pod \u7684\u60c5\u51b5\u4e0b\u52a8\u6001\u5730\u8c03\u6574\u5176\u7b97\u529b\u548c\u663e\u5b58\u8d44\u6e90\uff0c\u4ece\u800c\u66f4\u7075\u6d3b\u5730\u6ee1\u8db3\u4e1a\u52a1\u9700\u6c42\u5e76\u4f18\u5316\u8d44\u6e90\u5229\u7528\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/gpu_matrix.html","title":"GPU \u652f\u6301\u77e9\u9635","text":"

                                                                    \u672c\u9875\u8bf4\u660e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u7684 GPU \u53ca\u64cd\u4f5c\u7cfb\u7edf\u6240\u5bf9\u5e94\u7684\u77e9\u9635\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/gpu_matrix.html#nvidia-gpu","title":"NVIDIA GPU","text":"GPU \u5382\u5546\u53ca\u7c7b\u578b \u652f\u6301 GPU \u578b\u53f7 \u9002\u914d\u7684\u64cd\u4f5c\u7cfb\u7edf\uff08\u5728\u7ebf\uff09 \u63a8\u8350\u5185\u6838 \u63a8\u8350\u7684\u64cd\u4f5c\u7cfb\u7edf\u53ca\u5185\u6838 \u5b89\u88c5\u6587\u6863 NVIDIA GPU\uff08\u6574\u5361/vGPU\uff09 NVIDIA Fermi (2.1) \u67b6\u6784 CentOS 7 Kernel 3.10.0-123 ~ 3.10.0-1160\u5185\u6838\u53c2\u8003\u6587\u6863\u5efa\u8bae\u4f7f\u7528\u64cd\u4f5c\u7cfb\u7edf\u5bf9\u5e94 Kernel \u7248\u672c \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a 3.10.0-1160 GPU Operator \u79bb\u7ebf\u5b89\u88c5 NVIDIA GeForce 400 \u7cfb\u5217 CentOS 8 Kernel 4.18.0-80 ~ 4.18.0-348 NVIDIA Quadro 4000 \u7cfb\u5217 Ubuntu 20.04 Kernel 5.4 NVIDIA Tesla 20 \u7cfb\u5217 Ubuntu 22.04 Kernel 5.19 NVIDIA Ampere \u67b6\u6784\u7cfb\u5217(A100;A800;H100) RHEL 7 Kernel 3.10.0-123 ~ 3.10.0-1160 RHEL 8 Kernel 4.18.0-80 ~ 4.18.0-348 NVIDIA MIG NVIDIA Ampere \u67b6\u6784\u7cfb\u5217\uff08A100\u3001A800\u3001H100\uff09 CentOS 7 Kernel 3.10.0-123 ~ 3.10.0-1160 \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a3.10.0-1160 GPU Operator \u79bb\u7ebf\u5b89\u88c5 CentOS 8 Kernel 4.18.0-80 ~ 4.18.0-348 Ubuntu 20.04 Kernel 5.4 Ubuntu 22.04 Kernel 5.19 RHEL 7 Kernel 3.10.0-123 ~ 3.10.0-1160 RHEL 8 Kernel 4.18.0-80 ~ 4.18.0-348"},{"location":"end-user/kpanda/gpu/gpu_matrix.html#ascendnpu","title":"\u6607\u817e\uff08Ascend\uff09NPU","text":"GPU \u5382\u5546\u53ca\u7c7b\u578b \u652f\u6301 NPU \u578b\u53f7 \u9002\u914d\u7684\u64cd\u4f5c\u7cfb\u7edf\uff08\u5728\u7ebf\uff09 \u63a8\u8350\u5185\u6838 \u63a8\u8350\u7684\u64cd\u4f5c\u7cfb\u7edf\u53ca\u5185\u6838 \u5b89\u88c5\u6587\u6863 \u6607\u817e\uff08Ascend 310\uff09 Ascend 310 Ubuntu 20.04 \u8be6\u60c5\u53c2\u8003\uff1a\u5185\u6838\u7248\u672c\u8981\u6c42 \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a3.10.0-1160 300 \u548c 310P \u9a71\u52a8\u6587\u6863 Ascend 310P\uff1b CentOS 7.6 CentOS 8.2 KylinV10SP1 \u64cd\u4f5c\u7cfb\u7edf openEuler \u64cd\u4f5c\u7cfb\u7edf \u6607\u817e\uff08Ascend 910\uff09 Ascend 910B Ubuntu 20.04 \u8be6\u60c5\u53c2\u8003\u5185\u6838\u7248\u672c\u8981\u6c42 \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a3.10.0-1160 910 \u9a71\u52a8\u6587\u6863 CentOS 7.6 CentOS 8.2 KylinV10SP1 \u64cd\u4f5c\u7cfb\u7edf openEuler \u64cd\u4f5c\u7cfb\u7edf"},{"location":"end-user/kpanda/gpu/gpu_matrix.html#iluvatargpu","title":"\u5929\u6570\u667a\u82af\uff08Iluvatar\uff09GPU","text":"GPU \u5382\u5546\u53ca\u7c7b\u578b \u652f\u6301\u7684 GPU \u578b\u53f7 \u9002\u914d\u7684\u64cd\u4f5c\u7cfb\u7edf\uff08\u5728\u7ebf\uff09 \u63a8\u8350\u5185\u6838 \u63a8\u8350\u7684\u64cd\u4f5c\u7cfb\u7edf\u53ca\u5185\u6838 \u5b89\u88c5\u6587\u6863 \u5929\u6570\u667a\u82af(Iluvatar vGPU) BI100 CentOS 7 Kernel 3.10.0-957.el7.x86_64 ~ 3.10.0-1160.42.2.el7.x86_64 \u64cd\u4f5c\u7cfb\u7edf\uff1aCentOS 7.9\uff1b\u5185\u6838\u7248\u672c\uff1a 3.10.0-1160 \u8865\u5145\u4e2d MR100\uff1b CentOS 8 Kernel 4.18.0-80.el8.x86_64 ~ 4.18.0-305.19.1.el8_4.x86_64 Ubuntu 20.04 Kernel 4.15.0-20-generic ~ 4.15.0-160-generic Kernel 5.4.0-26-generic ~ 5.4.0-89-generic Kernel 5.8.0-23-generic ~ 5.8.0-63-generic Ubuntu 21.04 Kernel 4.15.0-20-generic ~ 4.15.0-160-generic Kernel 5.4.0-26-generic ~ 5.4.0-89-generic Kernel 5.8.0-23-generic ~ 5.8.0-63-generic openEuler 22.03 LTS Kernel \u7248\u672c\u5927\u4e8e\u7b49\u4e8e 5.1 \u4e14\u5c0f\u4e8e\u7b49\u4e8e 5.10"},{"location":"end-user/kpanda/gpu/gpu_matrix.html#metaxgpu","title":"\u6c90\u66e6\uff08Metax\uff09GPU","text":"GPU \u5382\u5546\u53ca\u7c7b\u578b \u652f\u6301\u7684 GPU \u578b\u53f7 \u9002\u914d\u7684\u64cd\u4f5c\u7cfb\u7edf\uff08\u5728\u7ebf\uff09 \u63a8\u8350\u5185\u6838 \u63a8\u8350\u7684\u64cd\u4f5c\u7cfb\u7edf\u53ca\u5185\u6838 \u5b89\u88c5\u6587\u6863 \u6c90\u66e6Metax\uff08\u6574\u5361/vGPU\uff09 \u66e6\u4e91 C500 \u6c90\u66e6 GPU \u5b89\u88c5\u4f7f\u7528"},{"location":"end-user/kpanda/gpu/gpu_scheduler_config.html","title":"GPU \u8c03\u5ea6\u914d\u7f6e\uff08Binpack \u548c Spread \uff09","text":"

                                                                    \u672c\u6587\u4ecb\u7ecd\u4f7f\u7528 NVIDIA vGPU \u65f6\uff0c\u5982\u4f55\u901a\u8fc7 Binpack \u548c Spread \u7684 GPU \u8c03\u5ea6\u914d\u7f6e\u51cf\u5c11 GPU \u8d44\u6e90\u788e\u7247\u3001\u9632\u6b62\u5355\u70b9\u6545\u969c\u7b49\uff0c\u5b9e\u73b0 vGPU \u7684\u9ad8\u7ea7\u8c03\u5ea6\u3002 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u4e86\u96c6\u7fa4\u548c\u5de5\u4f5c\u8d1f\u8f7d\u4e24\u79cd\u7ef4\u5ea6\u7684 Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565\uff0c\u5206\u522b\u6ee1\u8db3\u4e0d\u540c\u573a\u666f\u4e0b\u7684\u4f7f\u7528\u9700\u6c42\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/gpu_scheduler_config.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                                    • \u96c6\u7fa4\u8282\u70b9\u4e0a\u5df2\u6b63\u786e\u5b89\u88c5 GPU \u8bbe\u5907\u3002
                                                                    • \u96c6\u7fa4\u4e2d\u5df2\u6b63\u786e\u5b89\u88c5 gpu-operator \u7ec4\u4ef6 \u548c Nvidia-vgpu \u7ec4\u4ef6\u3002
                                                                    • \u96c6\u7fa4\u8282\u70b9\u5217\u8868\u4e2d\uff0cGPU \u6a21\u5f0f\u4e0b\u5b58\u5728 NVIDIA-vGPU \u7c7b\u578b\u3002
                                                                    "},{"location":"end-user/kpanda/gpu/gpu_scheduler_config.html#_2","title":"\u4f7f\u7528\u573a\u666f","text":"
                                                                    • \u57fa\u4e8e GPU \u5361\u7ef4\u5ea6\u8c03\u5ea6\u7b56\u7565

                                                                      • Binpack\uff1a\u4f18\u5148\u9009\u62e9\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\uff0c\u9002\u7528\u4e8e\u63d0\u9ad8 GPU \u5229\u7528\u7387\uff0c\u51cf\u5c11\u8d44\u6e90\u788e\u7247\u3002
                                                                      • Spread\uff1a\u591a\u4e2a Pod \u4f1a\u5206\u6563\u5728\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u9ad8\u53ef\u7528\u573a\u666f\uff0c\u907f\u514d\u5355\u5361\u6545\u969c\u3002
                                                                    • \u57fa\u4e8e\u8282\u70b9\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565

                                                                      • Binpack\uff1a \u591a\u4e2a Pod \u4f1a\u4f18\u5148\u9009\u62e9\u540c\u4e00\u4e2a\u8282\u70b9\uff0c\u9002\u7528\u4e8e\u63d0\u9ad8 GPU \u5229\u7528\u7387\uff0c\u51cf\u5c11\u8d44\u6e90\u788e\u7247\u3002
                                                                      • Spread\uff1a\u591a\u4e2a Pod \u4f1a\u5206\u6563\u5728\u4e0d\u540c\u8282\u70b9\u4e0a\uff0c\u9002\u7528\u4e8e\u9ad8\u53ef\u7528\u573a\u666f\uff0c\u907f\u514d\u5355\u8282\u70b9\u6545\u969c\u3002
                                                                    "},{"location":"end-user/kpanda/gpu/gpu_scheduler_config.html#binpack-spread","title":"\u96c6\u7fa4\u7ef4\u5ea6\u4f7f\u7528 Binpack \u548c Spread \u8c03\u5ea6\u914d\u7f6e","text":"

                                                                    Note

                                                                    \u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u4f1a\u9075\u5faa\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack \u548c Spread \u8c03\u5ea6\u914d\u7f6e\u3002 \u82e5\u5de5\u4f5c\u8d1f\u8f7d\u5355\u72ec\u8bbe\u7f6e\u4e86\u4e0e\u96c6\u7fa4\u4e0d\u4e00\u81f4\u7684 Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565\uff0c\u5219\u8be5\u5de5\u4f5c\u8d1f\u8f7d\u4f18\u5148\u9075\u5faa\u5176\u672c\u8eab\u7684\u8c03\u5ea6\u7b56\u7565\u3002

                                                                    1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9009\u62e9\u9700\u8981\u8c03\u6574 Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u56fe\u6807\u5e76\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u70b9\u51fb GPU \u8c03\u5ea6\u914d\u7f6e \u3002

                                                                    2. \u6839\u636e\u4e1a\u52a1\u573a\u666f\u8c03\u6574 GPU \u8c03\u5ea6\u914d\u7f6e\uff0c\u5e76\u70b9\u51fb \u786e\u5b9a \u540e\u4fdd\u5b58\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/gpu_scheduler_config.html#binpack-spread_1","title":"\u5de5\u4f5c\u8d1f\u8f7d\u7ef4\u5ea6\u4f7f\u7528 Binpack \u548c Spread \u8c03\u5ea6\u914d\u7f6e","text":"

                                                                    Note

                                                                    \u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ef4\u5ea6\u7684 Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684\u914d\u7f6e\u51b2\u7a81\u65f6\uff0c\u4f18\u5148\u9075\u5faa\u5de5\u4f5c\u8d1f\u8f7d\u7ef4\u5ea6\u7684\u914d\u7f6e\u3002

                                                                    \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u65e0\u72b6\u6001\u8d1f\u8f7d\uff0c\u5e76\u5728\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u914d\u7f6e Binpack \u548c Spread \u8c03\u5ea6\u7b56\u7565 \u3002

                                                                    1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                                    2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                                                                    3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\uff0c\u5e76\u5728 \u5bb9\u5668\u914d\u7f6e \u4e2d\u542f\u7528 GPU \u914d\u7f6e\uff0c\u9009\u62e9 GPU \u7c7b\u578b\u4e3a NVIDIA vGPU\uff0c \u70b9\u51fb \u9ad8\u7ea7\u8bbe\u7f6e \uff0c\u542f\u7528 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\uff0c\u6839\u636e\u4e1a\u52a1\u573a\u666f\u8c03\u6574 GPU \u8c03\u5ea6\u914d\u7f6e\u3002\u914d\u7f6e\u5b8c\u6210\u540e\u70b9\u51fb \u4e0b\u4e00\u6b65 \uff0c \u8fdb\u5165 \u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/vgpu_quota.html","title":"GPU \u914d\u989d\u7ba1\u7406","text":"

                                                                    \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528 vGPU \u80fd\u529b\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/vgpu_quota.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                                    \u5f53\u524d\u96c6\u7fa4\u5df2\u901a\u8fc7 Operator \u6216\u624b\u52a8\u65b9\u5f0f\u90e8\u7f72\u5bf9\u5e94\u7c7b\u578b GPU \u9a71\u52a8\uff08NVIDIA GPU\u3001NVIDIA MIG\u3001\u5929\u6570\u3001\u6607\u817e\uff09

                                                                    "},{"location":"end-user/kpanda/gpu/vgpu_quota.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                                    1. \u8fdb\u5165 Namespaces \u4e2d\uff0c\u70b9\u51fb \u914d\u989d\u7ba1\u7406 \u53ef\u4ee5\u914d\u7f6e\u5f53\u524d Namespace \u53ef\u4ee5\u4f7f\u7528\u7684 GPU \u8d44\u6e90\u3002

                                                                    2. \u5f53\u524d\u547d\u540d\u7a7a\u95f4\u914d\u989d\u7ba1\u7406\u8986\u76d6\u7684\u5361\u7c7b\u578b\u4e3a\uff1aNVIDIA vGPU\u3001NVIDIA MIG\u3001\u5929\u6570\u3001\u6607\u817e\u3002

                                                                      NVIDIA vGPU \u914d\u989d\u7ba1\u7406 \uff1a\u914d\u7f6e\u5177\u4f53\u53ef\u4ee5\u4f7f\u7528\u7684\u914d\u989d\uff0c\u4f1a\u521b\u5efa ResourcesQuota CR\uff1a

                                                                      • \u7269\u7406\u5361\u6570\u91cf\uff08nvidia.com/vgpu\uff09\uff1a\u8868\u793a\u5f53\u524d POD \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u5e76\u4e14\u8981 \u5c0f\u4e8e\u7b49\u4e8e \u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002
                                                                      • GPU \u7b97\u529b\uff08nvidia.com/gpucores\uff09\uff1a\u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u7b97\u529b\uff0c\u503c\u8303\u56f4\u4e3a 0-100\uff1b\u5982\u679c\u914d\u7f6e\u4e3a 0\uff0c\u5219\u8ba4\u4e3a\u4e0d\u5f3a\u5236\u9694\u79bb\uff1b\u914d\u7f6e\u4e3a 100\uff0c\u5219\u8ba4\u4e3a\u72ec\u5360\u6574\u5f20\u5361\u3002
                                                                      • GPU \u663e\u5b58\uff08nvidia.com/gpumem\uff09\uff1a\u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u663e\u5b58\uff0c\u503c\u5355\u4f4d\u4e3a MB\uff0c\u6700\u5c0f\u503c\u4e3a 1\uff0c\u6700\u5927\u503c\u4e3a\u6574\u5361\u7684\u663e\u5b58\u503c\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html","title":"\u6607\u817e NPU \u7ec4\u4ef6\u5b89\u88c5","text":"

                                                                    \u672c\u7ae0\u8282\u63d0\u4f9b\u6607\u817e NPU \u9a71\u52a8\u3001Device Plugin\u3001NPU-Exporter \u7b49\u7ec4\u4ef6\u7684\u5b89\u88c5\u6307\u5bfc\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                    1. \u5b89\u88c5\u524d\u8bf7\u786e\u8ba4\u652f\u6301\u7684 NPU \u578b\u53f7\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\u6607\u817e NPU \u77e9\u9635
                                                                    2. \u8bf7\u786e\u8ba4 \u5bf9\u5e94 NPU \u578b\u53f7\u6240\u8981\u6c42\u7684\u5185\u6838\u7248\u672c\u662f\u5426\u5339\u914d\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\u6607\u817e NPU \u77e9\u9635
                                                                    3. \u51c6\u5907 Kubernetes \u57fa\u7840\u73af\u5883
                                                                    "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html#_2","title":"\u5b89\u88c5\u6b65\u9aa4","text":"

                                                                    \u4f7f\u7528 NPU \u8d44\u6e90\u4e4b\u524d\uff0c\u9700\u8981\u5b8c\u6210\u56fa\u4ef6\u5b89\u88c5\u3001NPU \u9a71\u52a8\u5b89\u88c5\u3001 Docker Runtime \u5b89\u88c5\u3001\u7528\u6237\u521b\u5efa\u3001\u65e5\u5fd7\u76ee\u5f55\u521b\u5efa\u4ee5\u53ca NPU Device Plugin \u5b89\u88c5\uff0c\u8be6\u60c5\u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html#_3","title":"\u5b89\u88c5\u56fa\u4ef6","text":"
                                                                    1. \u5b89\u88c5\u524d\u8bf7\u786e\u8ba4\u5185\u6838\u7248\u672c\u5728\u201c\u4e8c\u8fdb\u5236\u5b89\u88c5\u201d\u5b89\u88c5\u65b9\u5f0f\u5bf9\u5e94\u7684\u7248\u672c\u8303\u56f4\u5185\uff0c\u5219\u53ef\u4ee5\u76f4\u63a5\u5b89\u88c5NPU\u9a71\u52a8\u56fa\u4ef6\u3002
                                                                    2. \u56fa\u4ef6\u4e0e\u9a71\u52a8\u4e0b\u8f7d\u8bf7\u53c2\u8003\u56fa\u4ef6\u4e0b\u8f7d\u5730\u5740
                                                                    3. \u56fa\u4ef6\u5b89\u88c5\u8bf7\u53c2\u8003\u5b89\u88c5 NPU \u9a71\u52a8\u56fa\u4ef6
                                                                    "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html#npu_1","title":"\u5b89\u88c5 NPU \u9a71\u52a8","text":"
                                                                    1. \u5982\u9a71\u52a8\u672a\u5b89\u88c5\uff0c\u8bf7\u53c2\u8003\u6607\u817e\u5b98\u65b9\u6587\u6863\u8fdb\u884c\u5b89\u88c5\u3002\u4f8b\u5982 Ascend910\uff0c\u53c2\u8003 910 \u9a71\u52a8\u5b89\u88c5\u6587\u6863\u3002
                                                                    2. \u8fd0\u884c npu-smi info \u547d\u4ee4\uff0c\u5e76\u4e14\u80fd\u591f\u6b63\u5e38\u8fd4\u56de NPU \u4fe1\u606f\uff0c\u8868\u793a NPU \u9a71\u52a8\u4e0e\u56fa\u4ef6\u5df2\u5c31\u7eea\u3002
                                                                    "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html#docker-runtime","title":"\u5b89\u88c5 Docker Runtime","text":"
                                                                    1. \u4e0b\u8f7d Ascend Docker Runtime

                                                                      \u793e\u533a\u7248\u4e0b\u8f7d\u5730\u5740\uff1ahttps://www.hiascend.com/zh/software/mindx-dl/community

                                                                      wget -c https://mindx.obs.cn-south-1.myhuaweicloud.com/OpenSource/MindX/MindX%205.0.RC2/MindX%20DL%205.0.RC2/Ascend-docker-runtime_5.0.RC2_linux-x86_64.run\n

                                                                      \u5b89\u88c5\u5230\u6307\u5b9a\u8def\u5f84\u4e0b\uff0c\u4f9d\u6b21\u6267\u884c\u4ee5\u4e0b\u4e24\u6761\u547d\u4ee4\uff0c\u53c2\u6570\u4e3a\u6307\u5b9a\u7684\u5b89\u88c5\u8def\u5f84:

                                                                      chmod u+x Ascend-docker-runtime_5.0.RC2_linux-x86_64.run \n./Ascend-docker-runtime_{version}_linux-{arch}.run --install --install-path=<path>\n
                                                                    2. \u4fee\u6539 containerd \u914d\u7f6e\u6587\u4ef6

                                                                      containerd \u65e0\u9ed8\u8ba4\u914d\u7f6e\u6587\u4ef6\u65f6\uff0c\u4f9d\u6b21\u6267\u884c\u4ee5\u4e0b3\u6761\u547d\u4ee4\uff0c\u521b\u5efa\u914d\u7f6e\u6587\u4ef6\uff1a

                                                                      mkdir /etc/containerd \ncontainerd config default > /etc/containerd/config.toml \nvim /etc/containerd/config.toml\n

                                                                      containerd \u6709\u914d\u7f6e\u6587\u4ef6\u65f6\uff1a

                                                                      vim /etc/containerd/config.toml\n

                                                                      \u6839\u636e\u5b9e\u9645\u60c5\u51b5\u4fee\u6539 runtime \u7684\u5b89\u88c5\u8def\u5f84\uff0c\u4e3b\u8981\u4fee\u6539 runtime \u5b57\u6bb5\uff1a

                                                                      ... \n[plugins.\"io.containerd.monitor.v1.cgroups\"]\n   no_prometheus = false  \n[plugins.\"io.containerd.runtime.v1.linux\"]\n   shim = \"containerd-shim\"\n   runtime = \"/usr/local/Ascend/Ascend-Docker-Runtime/ascend-docker-runtime\"\n   runtime_root = \"\"\n   no_shim = false\n   shim_debug = false\n [plugins.\"io.containerd.runtime.v2.task\"]\n   platforms = [\"linux/amd64\"]\n...\n

                                                                      \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff0c\u91cd\u542f containerd\uff1a

                                                                      systemctl restart containerd\n
                                                                    "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html#_4","title":"\u7528\u6237\u521b\u5efa","text":"

                                                                    \u5728\u5bf9\u5e94\u7ec4\u4ef6\u5b89\u88c5\u7684\u8282\u70b9\u4e0a\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u521b\u5efa\u7528\u6237\u3002

                                                                    # Ubuntu \u64cd\u4f5c\u7cfb\u7edf\nuseradd -d /home/hwMindX -u 9000 -m -s /usr/sbin/nologin hwMindX\nusermod -a -G HwHiAiUser hwMindX\n# Centos \u64cd\u4f5c\u7cfb\u7edf\nuseradd -d /home/hwMindX -u 9000 -m -s /sbin/nologin hwMindX\nusermod -a -G HwHiAiUser hwMindX\n
                                                                    "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html#_5","title":"\u65e5\u5fd7\u76ee\u5f55\u521b\u5efa","text":"

                                                                    \u5728\u5bf9\u5e94\u8282\u70b9\u521b\u5efa\u7ec4\u4ef6\u65e5\u5fd7\u7236\u76ee\u5f55\u548c\u5404\u7ec4\u4ef6\u7684\u65e5\u5fd7\u76ee\u5f55\uff0c\u5e76\u8bbe\u7f6e\u76ee\u5f55\u5bf9\u5e94\u5c5e\u4e3b\u548c\u6743\u9650\u3002\u6267\u884c\u4e0b\u8ff0\u547d\u4ee4\uff0c\u521b\u5efa\u7ec4\u4ef6\u65e5\u5fd7\u7236\u76ee\u5f55\u3002

                                                                    mkdir -m 755 /var/log/mindx-dl\nchown root:root /var/log/mindx-dl\n

                                                                    \u6267\u884c\u4e0b\u8ff0\u547d\u4ee4\uff0c\u521b\u5efa Device Plugin \u7ec4\u4ef6\u65e5\u5fd7\u76ee\u5f55\u3002

                                                                    mkdir -m 750 /var/log/mindx-dl/devicePlugin\nchown root:root /var/log/mindx-dl/devicePlugin\n

                                                                    Note

                                                                    \u8bf7\u5206\u522b\u4e3a\u6240\u9700\u7ec4\u4ef6\u521b\u5efa\u5bf9\u5e94\u7684\u65e5\u5fd7\u76ee\u5f55\uff0c\u5f53\u524d\u6848\u4f8b\u4e2d\u53ea\u9700\u8981 Device Plugin \u7ec4\u4ef6\u3002 \u5982\u679c\u6709\u5176\u4ed6\u7ec4\u4ef6\u9700\u6c42\u8bf7\u53c2\u8003\u5b98\u65b9\u6587\u6863

                                                                    "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html#label","title":"\u521b\u5efa\u8282\u70b9 Label","text":"

                                                                    \u53c2\u8003\u4e0b\u8ff0\u547d\u4ee4\u5728\u5bf9\u5e94\u8282\u70b9\u4e0a\u521b\u5efa Label\uff1a

                                                                    # \u5728\u5b89\u88c5\u4e86\u9a71\u52a8\u7684\u8ba1\u7b97\u8282\u70b9\u521b\u5efa\u6b64\u6807\u7b7e\nkubectl label node {nodename} huawei.com.ascend/Driver=installed\nkubectl label node {nodename} node-role.kubernetes.io/worker=worker\nkubectl label node {nodename} workerselector=dls-worker-node\nkubectl label node {nodename} host-arch=huawei-arm //\u6216\u8005host-arch=huawei-x86 \uff0c\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u9009\u62e9\nkubectl label node {nodename} accelerator=huawei-Ascend910 //\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u8fdb\u884c\u9009\u62e9\n# \u5728\u63a7\u5236\u8282\u70b9\u521b\u5efa\u6b64\u6807\u7b7e\nkubectl label node {nodename} masterselector=dls-master-node\n
                                                                    "},{"location":"end-user/kpanda/gpu/ascend/ascend_driver_install.html#device-plugin-npuexporter","title":"\u5b89\u88c5 Device Plugin \u548c NpuExporter","text":"

                                                                    \u529f\u80fd\u6a21\u5757\u8def\u5f84\uff1a \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u7ba1\u7406 \uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f -> \u641c\u7d22 ascend-mindxdl \u3002

                                                                    • DevicePlugin \uff1a\u901a\u8fc7\u63d0\u4f9b\u901a\u7528\u8bbe\u5907\u63d2\u4ef6\u673a\u5236\u548c\u6807\u51c6\u7684\u8bbe\u5907API\u63a5\u53e3\uff0c\u4f9bKubernetes\u4f7f\u7528\u8bbe\u5907\u3002\u5efa\u8bae\u4f7f\u7528\u9ed8\u8ba4\u7684\u955c\u50cf\u53ca\u7248\u672c\u3002
                                                                    • NpuExporter \uff1a\u57fa\u4e8ePrometheus/Telegraf\u751f\u6001\uff0c\u8be5\u7ec4\u4ef6\u63d0\u4f9b\u63a5\u53e3\uff0c\u5e2e\u52a9\u7528\u6237\u80fd\u591f\u5173\u6ce8\u5230\u6607\u817e\u7cfb\u5217AI\u5904\u7406\u5668\u4ee5\u53ca\u5bb9\u5668\u7ea7\u5206\u914d\u72b6\u6001\u3002\u5efa\u8bae\u4f7f\u7528\u9ed8\u8ba4\u7684\u955c\u50cf\u53ca\u7248\u672c\u3002
                                                                    • ServiceMonitor \uff1a\u9ed8\u8ba4\u4e0d\u5f00\u542f\uff0c\u5f00\u542f\u540e\u53ef\u524d\u5f80\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u67e5\u770b NPU \u76f8\u5173\u76d1\u63a7\u3002\u5982\u9700\u5f00\u542f\uff0c\u8bf7\u786e\u4fdd insight-agent \u5df2\u5b89\u88c5\u5e76\u5904\u4e8e\u8fd0\u884c\u72b6\u6001\uff0c\u5426\u5219\u5c06\u5bfc\u81f4 ascend-mindxdl \u5b89\u88c5\u5931\u8d25\u3002
                                                                    • isVirtualMachine \uff1a\u9ed8\u8ba4\u4e0d\u5f00\u542f\uff0c\u5982\u679c NPU \u8282\u70b9\u4e3a\u4e91\u4e3b\u673a\u573a\u666f\uff0c\u8bf7\u5f00\u542f\u00a0isVirtualMachine \u53c2\u6570\u3002

                                                                    \u5b89\u88c5\u6210\u529f\u540e\uff0c\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u4e0b\u4f1a\u51fa\u73b0\u4e24\u4e2a\u7ec4\u4ef6\uff0c\u5982\u4e0b\u56fe\uff1a

                                                                    \u540c\u65f6\u8282\u70b9\u4fe1\u606f\u4e0a\u4e5f\u4f1a\u51fa\u73b0\u5bf9\u5e94 NPU \u7684\u4fe1\u606f\uff1a

                                                                    \u4e00\u5207\u5c31\u7eea\u540e\uff0c\u6211\u4eec\u901a\u8fc7\u9875\u9762\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u5c31\u80fd\u591f\u9009\u62e9\u5230\u5bf9\u5e94\u7684 NPU \u8bbe\u5907\uff0c\u5982\u4e0b\u56fe\uff1a

                                                                    Note

                                                                    \u6709\u5173\u8be6\u7ec6\u4f7f\u7528\u6b65\u9aa4\uff0c\u8bf7\u53c2\u7167\u5e94\u7528\u4f7f\u7528\u6607\u817e\uff08Ascend\uff09NPU\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/ascend/ascend_usage.html","title":"\u5e94\u7528\u4f7f\u7528\u6607\u817e\uff08Ascend\uff09NPU","text":"

                                                                    \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528\u6607\u817e GPU\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/ascend/ascend_usage.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                    • \u5f53\u524d NPU \u8282\u70b9\u5df2\u5b89\u88c5\u6607\u817e \uff08Ascend\uff09\u9a71\u52a8\u3002
                                                                    • \u5f53\u524d NPU \u8282\u70b9\u5df2\u5b89\u88c5 Ascend-Docker-Runtime \u7ec4\u4ef6\u3002
                                                                    • \u5f53\u524d\u96c6\u7fa4\u5df2\u5b89\u88c5 NPU MindX DL \u5957\u4ef6\u3002
                                                                    • \u5f53\u524d\u96c6\u7fa4\u5185 NPU \u5361\u672a\u8fdb\u884c\u4efb\u4f55\u865a\u62df\u5316\u64cd\u4f5c\u6216\u88ab\u5176\u5b83\u5e94\u7528\u5360\u7528\u3002

                                                                    \u8bf7\u53c2\u8003\u6607\u817e NPU \u7ec4\u4ef6\u5b89\u88c5\u6587\u6863\u5b89\u88c5\u57fa\u7840\u73af\u5883\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/ascend/ascend_usage.html#_2","title":"\u5feb\u901f\u4f7f\u7528","text":"

                                                                    \u672c\u6587\u4f7f\u7528\u6607\u817e\u793a\u4f8b\u5e93\u4e2d\u7684 AscentCL \u56fe\u7247\u5206\u7c7b\u5e94\u7528\u793a\u4f8b\u3002

                                                                    1. \u4e0b\u8f7d\u6607\u817e\u4ee3\u7801\u5e93

                                                                      \u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u4e0b\u8f7d\u6607\u817e Demo \u793a\u4f8b\u4ee3\u7801\u5e93\uff0c\u5e76\u4e14\u8bf7\u8bb0\u4f4f\u4ee3\u7801\u5b58\u653e\u7684\u4f4d\u7f6e\uff0c\u540e\u7eed\u9700\u8981\u4f7f\u7528\u3002

                                                                      git clone https://gitee.com/ascend/samples.git\n
                                                                    2. \u51c6\u5907\u57fa\u7840\u955c\u50cf

                                                                      \u6b64\u4f8b\u4f7f\u7528 Ascent-pytorch \u57fa\u7840\u955c\u50cf\uff0c\u53ef\u8bbf\u95ee\u6607\u817e\u955c\u50cf\u4ed3\u5e93\u83b7\u53d6\u3002

                                                                    3. \u51c6\u5907 YAML

                                                                      ascend-demo.yaml
                                                                      apiVersion: batch/v1\nkind: Job\nmetadata:\n  name: resnetinfer1-1-1usoc\nspec:\n  template:\n    spec:\n      containers:\n        - image: ascendhub.huawei.com/public-ascendhub/ascend-pytorch:23.0.RC2-ubuntu18.04 # Inference image name\n          imagePullPolicy: IfNotPresent\n          name: resnet50infer\n          securityContext:\n            runAsUser: 0\n          command:\n            - \"/bin/bash\"\n            - \"-c\"\n            - |\n              source /usr/local/Ascend/ascend-toolkit/set_env.sh &&\n              TEMP_DIR=/root/samples_copy_$(date '+%Y%m%d_%H%M%S_%N') &&\n              cp -r /root/samples \"$TEMP_DIR\" &&\n              cd \"$TEMP_DIR\"/inference/modelInference/sampleResnetQuickStart/python/model &&\n              wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/resnet50/resnet50.onnx &&\n              atc --model=resnet50.onnx --framework=5 --output=resnet50 --input_shape=\"actual_input_1:1,3,224,224\"  --soc_version=Ascend910 &&\n              cd ../data &&\n              wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg &&\n              cd ../scripts &&\n              bash sample_run.sh\n          resources:\n            requests:\n              huawei.com/Ascend910: 1 # Number of the Ascend 910 Processors\n            limits:\n              huawei.com/Ascend910: 1 # The value should be the same as that of requests\n          volumeMounts:\n            - name: hiai-driver\n              mountPath: /usr/local/Ascend/driver\n              readOnly: true\n            - name: slog\n              mountPath: /var/log/npu/conf/slog/slog.conf\n            - name: localtime # The container time must be the same as the host time\n              mountPath: /etc/localtime\n            - name: dmp\n              mountPath: /var/dmp_daemon\n            - name: slogd\n              mountPath: /var/slogd\n            - name: hbasic\n              mountPath: /etc/hdcBasic.cfg\n            - name: sys-version\n              mountPath: /etc/sys_version.conf\n            - name: aicpu\n              mountPath: /usr/lib64/aicpu_kernels\n            - name: tfso\n              mountPath: /usr/lib64/libtensorflow.so\n            - name: sample-path\n              mountPath: /root/samples\n      volumes:\n        - name: hiai-driver\n          hostPath:\n            path: /usr/local/Ascend/driver\n        - name: slog\n          hostPath:\n            path: /var/log/npu/conf/slog/slog.conf\n        - name: localtime\n          hostPath:\n            path: /etc/localtime\n        - name: dmp\n          hostPath:\n            path: /var/dmp_daemon\n        - name: slogd\n          hostPath:\n            path: /var/slogd\n        - name: hbasic\n          hostPath:\n            path: /etc/hdcBasic.cfg\n        - name: sys-version\n          hostPath:\n            path: /etc/sys_version.conf\n        - name: aicpu\n          hostPath:\n            path: /usr/lib64/aicpu_kernels\n        - name: tfso\n          hostPath:\n            path: /usr/lib64/libtensorflow.so\n        - name: sample-path\n          hostPath:\n            path: /root/samples\n      restartPolicy: OnFailure\n

                                                                      \u4ee5\u4e0a YAML \u4e2d\u6709\u4e00\u4e9b\u5b57\u6bb5\u9700\u8981\u6839\u636e\u5b9e\u9645\u60c5\u51b5\u8fdb\u884c\u4fee\u6539\uff1a

                                                                      1. atc ... --soc_version=Ascend910 \u4f7f\u7528\u7684\u662f Ascend910 \uff0c\u8bf7\u4ee5\u5b9e\u9645\u60c5\u51b5\u4e3a\u4e3b \u60a8\u53ef\u4ee5\u4f7f\u7528 npu-smi info \u547d\u4ee4\u67e5\u770b\u663e\u5361\u578b\u53f7\u7136\u540e\u52a0\u4e0a Ascend \u524d\u7f00\u5373\u53ef
                                                                      2. samples-path \u4ee5\u5b9e\u9645\u60c5\u51b5\u4e3a\u51c6
                                                                      3. resources \u4ee5\u5b9e\u9645\u60c5\u51b5\u4e3a\u51c6
                                                                    4. \u90e8\u7f72 Job \u5e76\u67e5\u770b\u7ed3\u679c

                                                                      \u4f7f\u7528\u5982\u4e0b\u547d\u4ee4\u521b\u5efa Job\uff1a

                                                                      kubectl apply -f ascend-demo.yaml\n

                                                                      \u67e5\u770b Pod \u8fd0\u884c\u72b6\u6001\uff1a

                                                                      Pod \u6210\u529f\u8fd0\u884c\u540e\uff0c\u67e5\u770b\u65e5\u5fd7\u7ed3\u679c\u3002\u5728\u5c4f\u5e55\u4e0a\u7684\u5173\u952e\u63d0\u793a\u4fe1\u606f\u793a\u4f8b\u5982\u4e0b\u56fe\uff0c\u63d0\u793a\u4fe1\u606f\u4e2d\u7684 Label \u8868\u793a\u7c7b\u522b\u6807\u8bc6\uff0c Conf \u8868\u793a\u8be5\u5206\u7c7b\u7684\u6700\u5927\u7f6e\u4fe1\u5ea6\uff0cClass \u8868\u793a\u6240\u5c5e\u7c7b\u522b\u3002\u8fd9\u4e9b\u503c\u53ef\u80fd\u4f1a\u6839\u636e\u7248\u672c\u3001\u73af\u5883\u6709\u6240\u4e0d\u540c\uff0c\u8bf7\u4ee5\u5b9e\u9645\u60c5\u51b5\u4e3a\u51c6\uff1a

                                                                      \u7ed3\u679c\u56fe\u7247\u5c55\u793a\uff1a

                                                                    "},{"location":"end-user/kpanda/gpu/ascend/ascend_usage.html#_3","title":"\u754c\u9762\u4f7f\u7528","text":"
                                                                    1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u68c0\u6d4b GPU \u5361\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002 \u76ee\u524d\u96c6\u7fa4\u4f1a\u81ea\u52a8\u542f\u7528 GPU \uff0c\u5e76\u4e14\u8bbe\u7f6e GPU \u7c7b\u578b\u4e3a Ascend \u3002

                                                                    2. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08Ascend\uff09\u4e4b\u540e\uff0c\u9700\u8981\u914d\u7f6e\u5e94\u7528\u4f7f\u7528\u7684\u7269\u7406\u5361\u6570\u91cf\uff1a

                                                                      \u7269\u7406\u5361\u6570\u91cf\uff08huawei.com/Ascend910\uff09 \uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u8f93\u5165\u503c\u5fc5\u987b\u4e3a\u6574\u6570\u4e14**\u5c0f\u4e8e\u7b49\u4e8e**\u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002

                                                                      \u5982\u679c\u4e0a\u8ff0\u503c\u914d\u7f6e\u7684\u6709\u95ee\u9898\u5219\u4f1a\u51fa\u73b0\u8c03\u5ea6\u5931\u8d25\uff0c\u8d44\u6e90\u5206\u914d\u4e0d\u4e86\u7684\u60c5\u51b5\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/ascend/vnpu.html","title":"\u542f\u7528\u6607\u817e\u865a\u62df\u5316","text":"

                                                                    \u6607\u817e\u865a\u62df\u5316\u5206\u4e3a\u52a8\u6001\u865a\u62df\u5316\u548c\u9759\u6001\u865a\u62df\u5316\uff0c\u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5f00\u542f\u5e76\u4f7f\u7528\u6607\u817e\u9759\u6001\u865a\u62df\u5316\u80fd\u529b\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/ascend/vnpu.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                    • Kubernetes \u96c6\u7fa4\u73af\u5883\u642d\u5efa\u3002
                                                                    • \u5f53\u524d NPU \u8282\u70b9\u5df2\u5b89\u88c5\u6607\u817e \uff08Ascend\uff09\u9a71\u52a8\u3002
                                                                    • \u5f53\u524d NPU \u8282\u70b9\u5df2\u5b89\u88c5 Ascend-Docker-Runtime \u7ec4\u4ef6\u3002
                                                                    • \u5f53\u524d\u96c6\u7fa4\u5df2\u5b89\u88c5 NPU MindX DL \u5957\u4ef6\u3002
                                                                    • \u652f\u6301\u7684 NPU \u5361\u578b\u53f7\uff1a

                                                                      • Ascend 310P\uff0c\u5df2\u9a8c\u8bc1
                                                                      • Ascend 910b\uff0820 \u6838\uff09\uff0c\u5df2\u9a8c\u8bc1
                                                                      • Ascend 910\uff0832 \u6838\uff09\uff0c\u5b98\u65b9\u4ecb\u7ecd\u652f\u6301\uff0c\u672a\u5b9e\u9645\u9a8c\u8bc1
                                                                      • Ascend 910\uff0830 \u6838\uff09\uff0c\u5b98\u65b9\u4ecb\u7ecd\u652f\u6301\uff0c\u672a\u5b9e\u9645\u9a8c\u8bc1

                                                                      \u66f4\u591a\u7ec6\u8282\u53c2\u9605\u5b98\u65b9\u865a\u62df\u5316\u786c\u4ef6\u8bf4\u660e\u3002

                                                                    \u8bf7\u53c2\u8003\u6607\u817e NPU \u7ec4\u4ef6\u5b89\u88c5\u6587\u6863\u5b89\u88c5\u57fa\u7840\u73af\u5883\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/ascend/vnpu.html#_3","title":"\u5f00\u542f\u865a\u62df\u5316\u80fd\u529b","text":"

                                                                    \u5f00\u542f\u865a\u62df\u5316\u80fd\u529b\u9700\u8981\u624b\u52a8\u4fee\u6539\u00a0ascend-device-plugin-daemonset \u7ec4\u4ef6\u7684\u542f\u52a8\u53c2\u6570\uff0c\u53c2\u8003\u4e0b\u8ff0\u547d\u4ee4\uff1a

                                                                    - device-plugin -useAscendDocker=true -volcanoType=false -presetVirtualDevice=true\n- logFile=/var/log/mindx-dl/devicePlugin/devicePlugin.log -logLevel=0\n
                                                                    "},{"location":"end-user/kpanda/gpu/ascend/vnpu.html#vnpu","title":"\u5207\u5206 VNPU \u5b9e\u4f8b","text":"

                                                                    \u9759\u6001\u865a\u62df\u5316\u9700\u8981\u624b\u52a8\u5bf9 VNPU \u5b9e\u4f8b\u7684\u5207\u5206\uff0c\u8bf7\u53c2\u8003\u4e0b\u8ff0\u547d\u4ee4\uff1a

                                                                    npu-smi set -t create-vnpu -i 13 -c 0 -f vir02\n
                                                                    • i \u6307\u7684\u662f card id
                                                                    • c \u6307\u7684\u662f chip id
                                                                    • vir02 \u6307\u7684\u662f\u5207\u5206\u89c4\u683c\u6a21\u677f

                                                                    \u5173\u4e8e card id \u548c chip id\uff0c\u53ef\u4ee5\u901a\u8fc7 npu-smi info \u67e5\u8be2\uff0c\u5207\u5206\u89c4\u683c\u53ef\u901a\u8fc7 ascend \u5b98\u65b9\u6a21\u677f\u8fdb\u884c\u67e5\u8be2\u3002

                                                                    \u5207\u5206\u5b9e\u4f8b\u8fc7\u540e\u53ef\u901a\u8fc7\u4e0b\u8ff0\u547d\u4ee4\u67e5\u8be2\u5207\u5206\u7ed3\u679c\uff1a

                                                                    npu-smi info -t info-vnpu -i 13 -c 0\n

                                                                    \u67e5\u8be2\u7ed3\u679c\u5982\u4e0b\uff1a

                                                                    "},{"location":"end-user/kpanda/gpu/ascend/vnpu.html#ascend-device-plugin-daemonset","title":"\u91cd\u542f\u00a0ascend-device-plugin-daemonset","text":"

                                                                    \u5207\u5206\u5b9e\u4f8b\u540e\u624b\u52a8\u91cd\u542f device-plugin pod\uff0c\u7136\u540e\u4f7f\u7528 kubectl describe \u547d\u4ee4\u67e5\u770b\u5df2\u6ce8\u518c node \u7684\u8d44\u6e90\uff1a

                                                                    kubectl describe node {{nodename}}\n

                                                                    "},{"location":"end-user/kpanda/gpu/ascend/vnpu.html#_4","title":"\u5982\u4f55\u4f7f\u7528\u8bbe\u5907","text":"

                                                                    \u5728\u521b\u5efa\u5e94\u7528\u65f6\uff0c\u6307\u5b9a\u8d44\u6e90 key\uff0c\u53c2\u8003\u4e0b\u8ff0 YAML\uff1a

                                                                    ......\nresources:\n  requests:\n    huawei.com/Ascend310P-2c: 1\n  limits:\n    huawei.com/Ascend310P-2c: 1\n......\n
                                                                    "},{"location":"end-user/kpanda/gpu/metax/usemetax.html","title":"\u6c90\u66e6 GPU \u7ec4\u4ef6\u5b89\u88c5\u4e0e\u4f7f\u7528","text":"

                                                                    \u672c\u7ae0\u8282\u63d0\u4f9b\u6c90\u66e6 gpu-extensions\u3001gpu-operator \u7b49\u7ec4\u4ef6\u7684\u5b89\u88c5\u6307\u5bfc\u548c\u6c90\u66e6 GPU \u6574\u5361\u548c vGPU \u4e24\u79cd\u6a21\u5f0f\u7684\u4f7f\u7528\u65b9\u6cd5\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/metax/usemetax.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                    1. \u5df2\u5728\u6c90\u66e6\u8f6f\u4ef6\u4e2d\u5fc3\u4e0b\u8f7d\u5e76\u5b89\u88c5\u6240\u9700\u7684 tar \u5305\uff0c \u672c\u6587\u4ee5 metax-gpu-k8s-package.0.7.10.tar.gz \u4e3a\u4f8b\u3002
                                                                    2. \u51c6\u5907 Kubernetes \u57fa\u7840\u73af\u5883
                                                                    "},{"location":"end-user/kpanda/gpu/metax/usemetax.html#_2","title":"\u7ec4\u4ef6\u4ecb\u7ecd","text":"

                                                                    Metax \u63d0\u4f9b\u4e86\u4e24\u4e2a helm-chart \u5305\uff0c\u4e00\u4e2a\u662f metax-extensions\uff0c\u4e00\u4e2a\u662f gpu-operator\uff0c\u6839\u636e\u4f7f\u7528\u573a\u666f\u53ef\u9009\u62e9\u5b89\u88c5\u4e0d\u540c\u7684\u7ec4\u4ef6\u3002

                                                                    1. Metax-extensions\uff1a\u5305\u542b gpu-device \u548c gpu-label \u4e24\u4e2a\u7ec4\u4ef6\u3002\u5728\u4f7f\u7528 Metax-extensions \u65b9\u6848\u65f6\uff0c\u7528\u6237\u7684\u5e94\u7528\u5bb9\u5668\u955c\u50cf\u9700\u8981\u57fa\u4e8e MXMACA\u00ae \u57fa\u7840\u955c\u50cf\u6784\u5efa\u3002\u4e14 Metax-extensions \u4ec5\u9002\u7528\u4e8e GPU \u6574\u5361\u4f7f\u7528\u573a\u666f\u3002
                                                                    2. gpu-operator\uff1a\u5305\u542b gpu-device\u3001gpu-label\u3001driver-manager\u3001container-runtime\u3001operator-controller \u8fd9\u4e9b\u7ec4\u4ef6\u3002 \u4f7f\u7528 gpu-operator \u65b9\u6848\u65f6\uff0c\u7528\u6237\u53ef\u9009\u62e9\u5236\u4f5c\u4e0d\u5305\u542b MXMACA\u00ae SDK \u7684\u5e94\u7528\u5bb9\u5668\u955c\u50cf\u3002gpu-operator \u9002\u7528\u4e8e GPU \u6574\u5361\u548c vGPU \u573a\u666f\u3002
                                                                    "},{"location":"end-user/kpanda/gpu/metax/usemetax.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                                    1. \u4ece /home/metax/metax-docs/k8s/metax-gpu-k8s-package.0.7.10.tar.gz \u6587\u4ef6\u4e2d\u89e3\u538b\u51fa

                                                                      • deploy-gpu-extensions.yaml # \u90e8\u7f72yaml
                                                                      • metax-gpu-extensions-0.7.10.tgz\u3001metax-operator-0.7.10.tgz # helm chart\u6587\u4ef6
                                                                      • metax-k8s-images.0.7.10.run # \u79bb\u7ebf\u955c\u50cf
                                                                    2. \u67e5\u770b\u7cfb\u7edf\u662f\u5426\u5b89\u88c5\u9a71\u52a8

                                                                      $ lsmod | grep metax \nmetax 1605632 0 \nttm 86016 3 drm_vram_helper,metax,drm_ttm_helper \ndrm 618496 7 drm_kms_helper,drm_vram_helper,ast,metax,drm_ttm_helper,ttm\n
                                                                      • \u5982\u6ca1\u6709\u5185\u5bb9\u663e\u793a\uff0c\u5c31\u8868\u793a\u6ca1\u6709\u5b89\u88c5\u8fc7\u8f6f\u4ef6\u5305\u3002\u5982\u6709\u5185\u5bb9\u663e\u793a\uff0c\u5219\u8868\u793a\u5b89\u88c5\u8fc7\u8f6f\u4ef6\u5305\u3002
                                                                      • \u4f7f\u7528 metax-opeartor \u65f6\uff0c\u4e0d\u63a8\u8350\u5728\u5de5\u4f5c\u8282\u70b9\u9884\u5b89\u88c5 MXMACA \u5185\u6838\u6001\u9a71\u52a8\uff0c\u82e5\u5df2\u5b89\u88c5\u4e5f\u65e0\u9700\u5378\u8f7d\u3002
                                                                    3. \u5b89\u88c5\u9a71\u52a8

                                                                    "},{"location":"end-user/kpanda/gpu/metax/usemetax.html#gpu-extensions","title":"gpu-extensions","text":"
                                                                    1. \u63a8\u9001\u955c\u50cf

                                                                      tar -xf metax-gpu-k8s-package.0.7.10.tar.gz\n./metax-k8s-images.0.7.10.run push {registry}/metax\n
                                                                    2. \u63a8\u9001 Helm Chart

                                                                      helm plugin install https://github.com/chartmuseum/helm-push\nhelm repo add  --username rootuser --password rootpass123  metax http://172.16.16.5:8081\nhelm cm-push metax-operator-0.7.10.tgz metax\nhelm cm-push metax-gpu-extensions-0.7.10.tgz metax\n
                                                                    3. \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e0a\u5b89\u88c5 metax-gpu-extensions

                                                                      \u90e8\u7f72\u6210\u529f\u4e4b\u540e\uff0c\u53ef\u4ee5\u5728\u8282\u70b9\u4e0a\u67e5\u770b\u5230\u8d44\u6e90\u3002

                                                                    4. \u4fee\u6539\u6210\u529f\u4e4b\u540e\u5c31\u53ef\u4ee5\u5728\u8282\u70b9\u4e0a\u770b\u5230\u5e26\u6709 Metax GPU \u7684\u6807\u7b7e

                                                                    "},{"location":"end-user/kpanda/gpu/metax/usemetax.html#gpu-operator","title":"gpu-operator","text":"

                                                                    \u5b89\u88c5 gpu-opeartor \u65f6\u7684\u5df2\u77e5\u95ee\u9898\uff1a

                                                                    1. metax-operator\u3001gpu-label\u3001gpu-device \u3001container-runtime \u8fd9\u51e0\u4e2a\u7ec4\u4ef6\u955c\u50cf\u8981\u5e26\u6709 amd64 \u540e\u7f00\u3002

                                                                    2. metax-maca \u7ec4\u4ef6\u7684\u955c\u50cf\u4e0d\u5728 metax-k8s-images.0.7.13.run \u5305\u91cc\u9762\uff0c\u9700\u8981\u5355\u72ec\u4e0b\u8f7d maca-mxc500-2.23.0.23-ubuntu20.04-x86_64.tar.xz \u8fd9\u7c7b\u955c\u50cf\uff0cload \u4e4b\u540e\u91cd\u65b0\u4fee\u6539 metax-maca \u7ec4\u4ef6\u7684\u955c\u50cf\u3002

                                                                    3. metax-driver \u7ec4\u4ef6\u7684\u955c\u50cf\u9700\u8981\u4ece https://pub-docstore.metax-tech.com:7001 \u8fd9\u4e2a\u7f51\u7ad9\u4e0b\u8f7d k8s-driver-image.2.23.0.25.run \u6587\u4ef6\uff0c\u7136\u540e\u6267\u884c k8s-driver-image.2.23.0.25.run push {registry}/metax \u547d\u4ee4\u628a\u955c\u50cf\u63a8\u9001\u5230\u955c\u50cf\u4ed3\u5e93\u3002\u63a8\u9001\u4e4b\u540e\u4fee\u6539 metax-driver \u7ec4\u4ef6\u7684\u955c\u50cf\u5730\u5740\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/metax/usemetax.html#gpu_1","title":"\u4f7f\u7528 GPU","text":"

                                                                    \u5b89\u88c5\u540e\u53ef\u5728\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u4f7f\u7528\u6c90\u66e6 GPU\u3002\u6ce8\u610f\u542f\u7528 GPU \u540e\uff0c\u9700\u9009\u62e9GPU\u7c7b\u578b\u4e3a Metax GPU

                                                                    \u8fdb\u5165\u5bb9\u5668\uff0c\u6267\u884c mx-smi \u53ef\u67e5\u770b GPU \u7684\u4f7f\u7528\u60c5\u51b5.

                                                                    "},{"location":"end-user/kpanda/gpu/mlu/use-mlu.html","title":"\u4f7f\u7528\u5bd2\u6b66\u7eaa GPU","text":"

                                                                    \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528\u5bd2\u6b66\u7eaa GPU\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/mlu/use-mlu.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                                    • \u5df2\u7ecf\u90e8\u7f72 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u4e14\u5e73\u53f0\u8fd0\u884c\u6b63\u5e38\u3002
                                                                    • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                                                                    • \u5f53\u524d\u96c6\u7fa4\u5df2\u5b89\u88c5\u5bd2\u6b66\u7eaa\u56fa\u4ef6\u3001\u9a71\u52a8\u4ee5\u53caDevicePlugin\u7ec4\u4ef6\uff0c\u5b89\u88c5\u8be6\u60c5\u8bf7\u53c2\u8003\u5b98\u65b9\u6587\u6863\uff1a
                                                                      • \u9a71\u52a8\u56fa\u4ef6\u5b89\u88c5
                                                                      • DevicePlugin \u5b89\u88c5

                                                                    \u5728\u5b89\u88c5 DevicePlugin \u65f6\u8bf7\u5173\u95ed --enable-device-type \u53c2\u6570\uff0c\u5426\u5219\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5c06\u65e0\u6cd5\u6b63\u786e\u8bc6\u522b\u5bd2\u6b66\u7eaa GPU\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/mlu/use-mlu.html#gpu_1","title":"\u5bd2\u6b66\u7eaa GPU \u6a21\u5f0f\u4ecb\u7ecd","text":"

                                                                    \u5bd2\u6b66\u7eaa GPU \u6709\u4ee5\u4e0b\u51e0\u79cd\u6a21\u5f0f\uff1a

                                                                    • \u6574\u5361\u6a21\u5f0f\uff1a\u5c06\u5bd2\u6b66\u7eaaGPU\u4ee5\u6574\u5361\u7684\u65b9\u5f0f\u6ce8\u518c\u5230\u96c6\u7fa4\u5f53\u4e2d\u8fdb\u884c\u4f7f\u7528\u3002
                                                                    • Share \u6a21\u5f0f\uff1a\u53ef\u4ee5\u5c06\u4e00\u5f20\u5bd2\u6b66\u7eaaGPU\u5171\u4eab\u7ed9\u591a\u4e2a Pod \u8fdb\u884c\u4f7f\u7528\uff0c\u53ef\u4ee5\u901a\u8fc7 virtualization-num \u53c2\u6570\u8fdb\u884c\u8bbe\u7f6e\u53ef\u5171\u4eab\u5bb9\u5668\u7684\u6570\u91cf\u3002
                                                                    • Dynamic smlu \u6a21\u5f0f\uff1a\u8fdb\u4e00\u6b65\u5bf9\u8d44\u6e90\u8fdb\u884c\u4e86\u7ec6\u5316\uff0c\u53ef\u4ee5\u63a7\u5236\u5206\u914d\u7ed9\u5bb9\u5668\u7684\u663e\u5b58\u3001\u7b97\u529b\u7684\u5927\u5c0f\u3002
                                                                    • Mim \u6a21\u5f0f\uff1a\u53ef\u4ee5\u5c06\u5bd2\u6b66\u7eaa GPU \u6309\u7167\u56fa\u5b9a\u7684\u89c4\u683c\u5207\u5206\u6210\u591a\u5f20 GPU \u8fdb\u884c\u4f7f\u7528\u3002
                                                                    "},{"location":"end-user/kpanda/gpu/mlu/use-mlu.html#ai","title":"\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528\u5bd2\u6b66\u7eaa","text":"

                                                                    \u8fd9\u91cc\u4ee5 Dynamic smlu \u6a21\u5f0f\u4e3a\u4f8b\uff1a

                                                                    1. \u5728\u6b63\u786e\u5b89\u88c5 DevicePlugin \u7b49\u7ec4\u4ef6\u540e\uff0c\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8fd0\u7ef4-> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002

                                                                    2. \u70b9\u51fb\u8282\u70b9\u7ba1\u7406\u9875\u9762\uff0c\u67e5\u770b\u8282\u70b9\u662f\u5426\u5df2\u7ecf\u6b63\u786e\u8bc6\u522b\u5230\u5bf9\u5e94\u7684GPU\u7c7b\u578b\u3002

                                                                    3. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08MLU VGPU\uff09\u4e4b\u540e\uff0c\u9700\u8981\u914d\u7f6e App \u4f7f\u7528\u7684 GPU \u8d44\u6e90\uff1a

                                                                      • GPU \u7b97\u529b\uff08cambricon.com/mlu.smlu.vcore\uff09\uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u4f7f\u7528\u6838\u5fc3\u7684\u767e\u5206\u6bd4\u6570\u91cf\u3002
                                                                      • GPU \u663e\u5b58\uff08cambricon.com/mlu.smlu.vmemory\uff09\uff1a\u8868\u793a\u5f53\u524dPod\u9700\u8981\u4f7f\u7528\u663e\u5b58\u7684\u5927\u5c0f\uff0c\u5355\u4f4d\u662fMB\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/mlu/use-mlu.html#yaml","title":"\u4f7f\u7528 YAML \u914d\u7f6e","text":"

                                                                    \u53c2\u8003 YAML \u6587\u4ef6\u5982\u4e0b\uff1a

                                                                    apiVersion: v1  \nkind: Pod  \nmetadata:  \n  name: pod1  \nspec:  \n  restartPolicy: OnFailure  \n  containers:  \n    - image: ubuntu:16.04  \n      name: pod1-ctr  \n      command: [\"sleep\"]  \n      args: [\"100000\"]  \n      resources:  \n        limits:  \n          cambricon.com/mlu: \"1\" # use this when device type is not enabled, else delete this line.  \n          #cambricon.com/mlu: \"1\" #uncomment to use when device type is enabled  \n          #cambricon.com/mlu.share: \"1\" #uncomment to use device with env-share mode  \n          #cambricon.com/mlu.mim-2m.8gb: \"1\" #uncomment to use device with mim mode  \n          #cambricon.com/mlu.smlu.vcore: \"100\" #uncomment to use device with mim mode  \n          #cambricon.com/mlu.smlu.vmemory: \"1024\" #uncomment to use device with mim mode\n
                                                                    "},{"location":"end-user/kpanda/gpu/nvidia/index.html","title":"NVIDIA GPU \u5361\u4f7f\u7528\u6a21\u5f0f","text":"

                                                                    NVIDIA \u4f5c\u4e3a\u4e1a\u5185\u77e5\u540d\u7684\u56fe\u5f62\u8ba1\u7b97\u4f9b\u5e94\u5546\uff0c\u4e3a\u7b97\u529b\u7684\u63d0\u5347\u63d0\u4f9b\u4e86\u8bf8\u591a\u8f6f\u786c\u4ef6\u89e3\u51b3\u65b9\u6848\uff0c\u5176\u4e2d NVIDIA \u5728 GPU \u7684\u4f7f\u7528\u65b9\u5f0f\u4e0a\u63d0\u4f9b\u4e86\u5982\u4e0b\u4e09\u79cd\u89e3\u51b3\u65b9\u6848\uff1a

                                                                    "},{"location":"end-user/kpanda/gpu/nvidia/index.html#full-gpu","title":"\u6574\u5361\uff08Full GPU\uff09","text":"

                                                                    \u6574\u5361\u662f\u6307\u5c06\u6574\u4e2a NVIDIA GPU \u5206\u914d\u7ed9\u5355\u4e2a\u7528\u6237\u6216\u5e94\u7528\u7a0b\u5e8f\u3002\u5728\u8fd9\u79cd\u914d\u7f6e\u4e0b\uff0c\u5e94\u7528\u53ef\u4ee5\u5b8c\u5168\u5360\u7528 GPU \u7684\u6240\u6709\u8d44\u6e90\uff0c \u5e76\u83b7\u5f97\u6700\u5927\u7684\u8ba1\u7b97\u6027\u80fd\u3002\u6574\u5361\u9002\u7528\u4e8e\u9700\u8981\u5927\u91cf\u8ba1\u7b97\u8d44\u6e90\u548c\u5185\u5b58\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5982\u6df1\u5ea6\u5b66\u4e60\u8bad\u7ec3\u3001\u79d1\u5b66\u8ba1\u7b97\u7b49\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/nvidia/index.html#vgpuvirtual-gpu","title":"vGPU\uff08Virtual GPU\uff09","text":"

                                                                    vGPU \u662f\u4e00\u79cd\u865a\u62df\u5316\u6280\u672f\uff0c\u5141\u8bb8\u5c06\u4e00\u4e2a\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a\u865a\u62df GPU\uff0c\u6bcf\u4e2a\u865a\u62df GPU \u5206\u914d\u7ed9\u4e0d\u540c\u7684\u4e91\u4e3b\u673a\u6216\u7528\u6237\u3002 vGPU \u4f7f\u591a\u4e2a\u7528\u6237\u53ef\u4ee5\u5171\u4eab\u540c\u4e00\u53f0\u7269\u7406 GPU\uff0c\u5e76\u5728\u5404\u81ea\u7684\u865a\u62df\u73af\u5883\u4e2d\u72ec\u7acb\u4f7f\u7528 GPU \u8d44\u6e90\u3002 \u6bcf\u4e2a\u865a\u62df GPU \u53ef\u4ee5\u83b7\u5f97\u4e00\u5b9a\u7684\u8ba1\u7b97\u80fd\u529b\u548c\u663e\u5b58\u5bb9\u91cf\u3002vGPU \u9002\u7528\u4e8e\u865a\u62df\u5316\u73af\u5883\u548c\u4e91\u8ba1\u7b97\u573a\u666f\uff0c\u53ef\u4ee5\u63d0\u4f9b\u66f4\u9ad8\u7684\u8d44\u6e90\u5229\u7528\u7387\u548c\u7075\u6d3b\u6027\u3002

                                                                    "},{"location":"end-user/kpanda/gpu/nvidia/index.html#migmulti-instance-gpu","title":"MIG\uff08Multi-Instance GPU\uff09","text":"

                                                                    MIG \u662f NVIDIA Ampere \u67b6\u6784\u5f15\u5165\u7684\u4e00\u9879\u529f\u80fd\uff0c\u5b83\u5141\u8bb8\u5c06\u4e00\u4e2a\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a\u7269\u7406 GPU \u5b9e\u4f8b\uff0c\u6bcf\u4e2a\u5b9e\u4f8b\u53ef\u4ee5\u72ec\u7acb\u5206\u914d\u7ed9\u4e0d\u540c\u7684\u7528\u6237\u6216\u5de5\u4f5c\u8d1f\u8f7d\u3002 \u6bcf\u4e2a MIG \u5b9e\u4f8b\u5177\u6709\u81ea\u5df1\u7684\u8ba1\u7b97\u8d44\u6e90\u3001\u663e\u5b58\u548c PCIe \u5e26\u5bbd\uff0c\u5c31\u50cf\u4e00\u4e2a\u72ec\u7acb\u7684\u865a\u62df GPU\u3002 MIG \u63d0\u4f9b\u4e86\u66f4\u7ec6\u7c92\u5ea6\u7684 GPU \u8d44\u6e90\u5206\u914d\u548c\u7ba1\u7406\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u6c42\u52a8\u6001\u8c03\u6574\u5b9e\u4f8b\u7684\u6570\u91cf\u548c\u5927\u5c0f\u3002 MIG \u9002\u7528\u4e8e\u591a\u79df\u6237\u73af\u5883\u3001\u5bb9\u5668\u5316\u5e94\u7528\u7a0b\u5e8f\u548c\u6279\u5904\u7406\u4f5c\u4e1a\u7b49\u573a\u666f\u3002

                                                                    \u65e0\u8bba\u662f\u5728\u865a\u62df\u5316\u73af\u5883\u4e2d\u4f7f\u7528 vGPU\uff0c\u8fd8\u662f\u5728\u7269\u7406 GPU \u4e0a\u4f7f\u7528 MIG\uff0cNVIDIA \u4e3a\u7528\u6237\u63d0\u4f9b\u4e86\u66f4\u591a\u7684\u9009\u62e9\u548c\u4f18\u5316 GPU \u8d44\u6e90\u7684\u65b9\u5f0f\u3002 \u7b97\u4e30 AI \u7b97\u529b\u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\u5168\u9762\u517c\u5bb9\u4e86\u4e0a\u8ff0 NVIDIA \u7684\u80fd\u529b\u7279\u6027\uff0c\u7528\u6237\u53ea\u9700\u901a\u8fc7\u7b80\u5355\u7684\u754c\u9762\u64cd\u4f5c\uff0c\u5c31\u80fd\u591f\u83b7\u5f97\u5168\u90e8 NVIDIA GPU \u7684\u8ba1\u7b97\u80fd\u529b\uff0c\u4ece\u800c\u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u7387\u5e76\u964d\u4f4e\u6210\u672c\u3002

                                                                    • Single \u6a21\u5f0f\uff0c\u8282\u70b9\u4ec5\u5728\u5176\u6240\u6709 GPU \u4e0a\u516c\u5f00\u5355\u4e00\u7c7b\u578b\u7684 MIG \u8bbe\u5907\uff0c\u8282\u70b9\u4e0a\u7684\u6240\u6709 GPU \u5fc5\u987b\uff1a
                                                                      • \u5c5e\u4e8e\u540c\u4e00\u4e2a\u578b\u53f7\uff08\u4f8b\u5982 A100-SXM-40GB\uff09\uff0c\u53ea\u6709\u540c\u4e00\u578b\u53f7 GPU \u7684 MIG Profile \u624d\u662f\u4e00\u6837\u7684
                                                                      • \u542f\u7528 MIG \u914d\u7f6e\uff0c\u9700\u8981\u91cd\u542f\u673a\u5668\u624d\u80fd\u751f\u6548
                                                                      • \u4e3a\u5728\u6240\u6709\u4ea7\u54c1\u4e2d\u516c\u5f00\u201c\u5b8c\u5168\u76f8\u540c\u201d\u7684 MIG \u8bbe\u5907\u7c7b\u578b\uff0c\u521b\u5efa\u76f8\u540c\u7684GI \u548c CI
                                                                    • Mixed \u6a21\u5f0f\uff0c\u8282\u70b9\u5728\u5176\u6240\u6709 GPU \u4e0a\u516c\u5f00\u6df7\u5408 MIG \u8bbe\u5907\u7c7b\u578b\u3002\u8bf7\u6c42\u7279\u5b9a\u7684 MIG \u8bbe\u5907\u7c7b\u578b\u9700\u8981\u8bbe\u5907\u7c7b\u578b\u63d0\u4f9b\u7684\u8ba1\u7b97\u5207\u7247\u6570\u91cf\u548c\u5185\u5b58\u603b\u91cf\u3002
                                                                      • \u8282\u70b9\u4e0a\u7684\u6240\u6709 GPU \u5fc5\u987b\uff1a\u5c5e\u4e8e\u540c\u4e00\u4ea7\u54c1\u7ebf\uff08\u4f8b\u5982 A100-SXM-40GB\uff09
                                                                      • \u6bcf\u4e2a GPU \u53ef\u542f\u7528\u6216\u4e0d\u542f\u7528 MIG\uff0c\u5e76\u4e14\u53ef\u4ee5\u81ea\u7531\u914d\u7f6e\u4efb\u4f55\u53ef\u7528 MIG \u8bbe\u5907\u7c7b\u578b\u7684\u6df7\u5408\u642d\u914d\u3002
                                                                      • \u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 k8s-device-plugin \u5c06\uff1a
                                                                        • \u4f7f\u7528\u4f20\u7edf\u7684 nvidia.com/gpu \u8d44\u6e90\u7c7b\u578b\u516c\u5f00\u4efb\u4f55\u4e0d\u5904\u4e8e MIG \u6a21\u5f0f\u7684 GPU
                                                                        • \u4f7f\u7528\u9075\u5faa\u67b6\u6784 nvidia.com/mig-g.gb \u7684\u8d44\u6e90\u7c7b\u578b\u516c\u5f00\u5404\u4e2a MIG \u8bbe\u5907

                                                                          \u5f00\u542f\u914d\u7f6e\u8be6\u60c5\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/index.html#_1","title":"\u5982\u4f55\u4f7f\u7528","text":"

                                                                          \u60a8\u53ef\u4ee5\u53c2\u8003\u4ee5\u4e0b\u94fe\u63a5\uff0c\u5feb\u901f\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5173\u4e8e NVIDIA GPU \u5361\u7684\u7ba1\u7406\u80fd\u529b\u3002

                                                                          • NVIDIA GPU \u6574\u5361\u4f7f\u7528
                                                                          • NVIDIA vGPU \u4f7f\u7528
                                                                          • NVIDIA MIG \u4f7f\u7528
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/full_gpu_userguide.html","title":"\u5e94\u7528\u4f7f\u7528 GPU \u6574\u5361","text":"

                                                                          \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5c06\u6574\u4e2a NVIDIA GPU \u5361\u5206\u914d\u7ed9\u5355\u4e2a\u5e94\u7528\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/full_gpu_userguide.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          • \u5df2\u7ecf\u90e8\u7f72 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u4e14\u5e73\u53f0\u8fd0\u884c\u6b63\u5e38\u3002
                                                                          • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                                                                          • \u5f53\u524d\u96c6\u7fa4\u5df2\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u5e76\u5df2\u542f\u7528 NVIDIA DevicePlugin \uff0c\u53ef\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5\u3002
                                                                          • \u5f53\u524d\u96c6\u7fa4\u5185 GPU \u5361\u672a\u8fdb\u884c\u4efb\u4f55\u865a\u62df\u5316\u64cd\u4f5c\u6216\u88ab\u5176\u5b83\u5e94\u7528\u5360\u7528\u3002
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/full_gpu_userguide.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"end-user/kpanda/gpu/nvidia/full_gpu_userguide.html#ui","title":"\u4f7f\u7528 UI \u754c\u9762\u914d\u7f6e","text":"
                                                                          1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u68c0\u6d4b GPU \u5361\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002 \u76ee\u524d\u96c6\u7fa4\u4f1a\u81ea\u52a8\u542f\u7528 GPU \uff0c\u5e76\u4e14\u8bbe\u7f6e GPU \u7c7b\u578b\u4e3a Nvidia GPU \u3002

                                                                          2. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08Nvidia GPU\uff09\u4e4b\u540e\uff0c\u9700\u8981\u914d\u7f6e\u5e94\u7528\u4f7f\u7528\u7684\u7269\u7406\u5361\u6570\u91cf\uff1a

                                                                            \u7269\u7406\u5361\u6570\u91cf\uff08nvidia.com/gpu\uff09 \uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u8f93\u5165\u503c\u5fc5\u987b\u4e3a\u6574\u6570\u4e14 \u5c0f\u4e8e\u7b49\u4e8e \u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002

                                                                            \u5982\u679c\u4e0a\u8ff0\u503c\u914d\u7f6e\u7684\u6709\u95ee\u9898\u5219\u4f1a\u51fa\u73b0\u8c03\u5ea6\u5931\u8d25\uff0c\u8d44\u6e90\u5206\u914d\u4e0d\u4e86\u7684\u60c5\u51b5\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/full_gpu_userguide.html#yaml","title":"\u4f7f\u7528 YAML \u914d\u7f6e","text":"

                                                                          \u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u7533\u8bf7 GPU \u8d44\u6e90\uff0c\u5728\u8d44\u6e90\u7533\u8bf7\u548c\u9650\u5236\u914d\u7f6e\u4e2d\u589e\u52a0 nvidia.com/gpu: 1 \u53c2\u6570\u914d\u7f6e\u5e94\u7528\u4f7f\u7528\u7269\u7406\u5361\u7684\u6570\u91cf\u3002

                                                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-gpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-gpu-demo\n  template:\n    metadata:\n      labels:\n        app: full-gpu-demo\n    spec:\n      containers:\n      - image: chrstnhntschl/gpu_burn\n        name: container-0\n        resources:\n          requests:\n            cpu: 250m\n            memory: 512Mi\n            nvidia.com/gpu: 1   # \u7533\u8bf7 GPU \u7684\u6570\u91cf\n          limits:\n            cpu: 250m\n            memory: 512Mi\n            nvidia.com/gpu: 1   # GPU \u6570\u91cf\u7684\u4f7f\u7528\u4e0a\u9650\n      imagePullSecrets:\n      - name: default-secret\n

                                                                          Note

                                                                          \u4f7f\u7528 nvidia.com/gpu \u53c2\u6570\u6307\u5b9a GPU \u6570\u91cf\u65f6\uff0crequests \u548c limits \u503c\u9700\u8981\u4fdd\u6301\u4e00\u81f4\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html","title":"GPU Operator \u79bb\u7ebf\u5b89\u88c5","text":"

                                                                          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9884\u7f6e\u4e86 Ubuntu22.04\u3001Ubuntu20.04\u3001CentOS 7.9 \u8fd9\u4e09\u4e2a\u64cd\u4f5c\u7cfb\u7edf\u7684 Driver \u955c\u50cf\uff0c\u9a71\u52a8\u7248\u672c\u662f 535.104.12\uff1b \u5e76\u4e14\u5185\u7f6e\u4e86\u5404\u64cd\u4f5c\u7cfb\u7edf\u6240\u9700\u7684 Toolkit \u955c\u50cf\uff0c\u7528\u6237\u4e0d\u518d\u9700\u8981\u624b\u52a8\u79bb\u7ebf Toolkit \u955c\u50cf\u3002

                                                                          \u672c\u6587\u4f7f\u7528 AMD \u67b6\u6784\u7684 CentOS 7.9\uff083.10.0-1160\uff09\u8fdb\u884c\u6f14\u793a\u3002\u5982\u9700\u4f7f\u7528 Red Hat 8.4 \u90e8\u7f72\uff0c \u8bf7\u53c2\u8003\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20 Red Hat GPU Opreator \u79bb\u7ebf\u955c\u50cf\u548c\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          • \u5f85\u90e8\u7f72 gpu-operator \u7684\u96c6\u7fa4\u8282\u70b9\u5185\u6838\u7248\u672c\u5fc5\u987b\u5b8c\u5168\u4e00\u81f4\u3002\u8282\u70b9\u6240\u5728\u7684\u53d1\u884c\u7248\u548c GPU \u5361\u578b\u53f7\u5728 GPU \u652f\u6301\u77e9\u9635\u7684\u8303\u56f4\u5185\u3002
                                                                          • \u5b89\u88c5 gpu-operator \u65f6\u9009\u62e9 v23.9.0+2 \u53ca\u4ee5\u4e0a\u7248\u672c
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                                          \u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 gpu-operator \u63d2\u4ef6\u3002

                                                                          1. \u767b\u5f55\u5e73\u53f0\uff0c\u8fdb\u5165 \u5bb9\u5668\u7ba1\u7406 -> \u5f85\u5b89\u88c5 gpu-operator \u7684\u96c6\u7fa4 -> \u8fdb\u5165\u96c6\u7fa4\u8be6\u60c5\u3002

                                                                          2. \u5728 Helm \u6a21\u677f \u9875\u9762\uff0c\u9009\u62e9 \u5168\u90e8\u4ed3\u5e93 \uff0c\u641c\u7d22 gpu-operator \u3002

                                                                          3. \u9009\u62e9 gpu-operator \uff0c\u70b9\u51fb \u5b89\u88c5 \u3002

                                                                          4. \u53c2\u8003\u4e0b\u6587\u53c2\u6570\u914d\u7f6e\uff0c\u914d\u7f6e gpu-operator \u5b89\u88c5\u53c2\u6570\uff0c\u5b8c\u6210 gpu-operator \u7684\u5b89\u88c5\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_3","title":"\u53c2\u6570\u914d\u7f6e","text":"
                                                                          • systemOS \uff1a\u9009\u62e9\u673a\u5668\u7684\u64cd\u4f5c\u7cfb\u7edf\uff0c\u5f53\u524d\u5185\u7f6e\u4e86 Ubuntu 22.04\u3001Ubuntu20.04\u3001Centos7.9 \u3001other \u56db\u4e2a\u9009\u9879\uff0c\u8bf7\u6b63\u786e\u7684\u9009\u62e9\u64cd\u4f5c\u7cfb\u7edf\u3002
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_4","title":"\u57fa\u672c\u53c2\u6570\u914d\u7f6e","text":"
                                                                          • \u540d\u79f0 \uff1a\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\u3002
                                                                          • \u547d\u540d\u7a7a\u95f4 \uff1a\u9009\u62e9\u5c06\u63d2\u4ef6\u5b89\u88c5\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                                                          • \u7248\u672c \uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 v23.9.0+2 \u7248\u672c\u4e3a\u4f8b\u3002
                                                                          • \u5931\u8d25\u5220\u9664 \uff1a\u5b89\u88c5\u5931\u8d25\uff0c\u5219\u5220\u9664\u5df2\u7ecf\u5b89\u88c5\u7684\u5173\u8054\u8d44\u6e90\u3002\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u3002
                                                                          • \u5c31\u7eea\u7b49\u5f85 \uff1a\u542f\u7528\u540e\uff0c\u6240\u6709\u5173\u8054\u8d44\u6e90\u90fd\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002
                                                                          • \u8be6\u60c5\u65e5\u5fd7 \uff1a\u5f00\u542f\u540e\uff0c\u5c06\u8bb0\u5f55\u5b89\u88c5\u8fc7\u7a0b\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_5","title":"\u9ad8\u7ea7\u53c2\u6570\u914d\u7f6e","text":""},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#operator","title":"Operator \u53c2\u6570\u914d\u7f6e","text":"
                                                                          • InitContainer.image \uff1a\u914d\u7f6e CUDA \u955c\u50cf\uff0c\u63a8\u8350\u9ed8\u8ba4\u955c\u50cf\uff1a nvidia/cuda
                                                                          • InitContainer.repository \uff1aCUDA \u955c\u50cf\u6240\u5728\u7684\u955c\u50cf\u4ed3\u5e93\uff0c\u9ed8\u8ba4\u4e3a nvcr.m.daocloud.io \u4ed3\u5e93
                                                                          • InitContainer.version : CUDA \u955c\u50cf\u7684\u7248\u672c\uff0c\u8bf7\u4f7f\u7528\u9ed8\u8ba4\u53c2\u6570
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#driver","title":"Driver \u53c2\u6570\u914d\u7f6e","text":"
                                                                          • Driver.enable \uff1a\u914d\u7f6e\u662f\u5426\u5728\u8282\u70b9\u4e0a\u90e8\u7f72 NVIDIA \u9a71\u52a8\uff0c\u9ed8\u8ba4\u5f00\u542f\uff0c\u5982\u679c\u60a8\u5728\u4f7f\u7528 GPU Operator \u90e8\u7f72\u524d\uff0c\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u90e8\u7f72\u4e86 NVIDIA \u9a71\u52a8\u7a0b\u5e8f\uff0c\u8bf7\u5173\u95ed\u3002\uff08\u82e5\u624b\u52a8\u90e8\u7f72\u9a71\u52a8\u7a0b\u5e8f\u9700\u8981\u5173\u6ce8 CUDA Toolkit \u4e0e Toolkit Driver Version \u7684\u9002\u914d\u5173\u7cfb\uff0c\u901a\u8fc7 GPU operator \u5b89\u88c5\u5219\u65e0\u9700\u5173\u6ce8\uff09\u3002
                                                                          • Driver.usePrecompiled \uff1a\u542f\u7528\u9884\u7f16\u8bd1\u7684GPU\u9a71\u52a8
                                                                          • Driver.image \uff1a\u914d\u7f6e GPU \u9a71\u52a8\u955c\u50cf\uff0c\u63a8\u8350\u9ed8\u8ba4\u955c\u50cf\uff1a nvidia/driver \u3002
                                                                          • Driver.repository \uff1aGPU \u9a71\u52a8\u955c\u50cf\u6240\u5728\u7684\u955c\u50cf\u4ed3\u5e93\uff0c\u9ed8\u8ba4\u4e3a nvidia \u7684 nvcr.io \u4ed3\u5e93\u3002
                                                                          • Driver.usePrecompiled \uff1a\u5f00\u542f\u9884\u7f16\u8bd1\u6a21\u5f0f\u5b89\u88c5\u9a71\u52a8\u3002
                                                                          • Driver.version \uff1aGPU \u9a71\u52a8\u955c\u50cf\u7684\u7248\u672c\uff0c\u79bb\u7ebf\u90e8\u7f72\u8bf7\u4f7f\u7528\u9ed8\u8ba4\u53c2\u6570\uff0c\u4ec5\u5728\u7ebf\u5b89\u88c5\u65f6\u9700\u914d\u7f6e\u3002\u4e0d\u540c\u7c7b\u578b\u64cd\u4f5c\u7cfb\u7edf\u7684 Driver \u955c\u50cf\u7684\u7248\u672c\u5b58\u5728\u5982\u4e0b\u5dee\u5f02\uff0c \u8be6\u60c5\u53ef\u53c2\u8003\uff1aNvidia GPU Driver \u7248\u672c\u3002 \u5982\u4e0b\u4e0d\u540c\u64cd\u4f5c\u7cfb\u7edf\u7684 Driver Version \u793a\u4f8b\uff1a

                                                                            Note

                                                                            \u4f7f\u7528\u5185\u7f6e\u7684\u64cd\u4f5c\u7cfb\u7edf\u7248\u672c\u65e0\u9700\u4fee\u6539\u955c\u50cf\u7248\u672c\uff0c\u5176\u4ed6\u64cd\u4f5c\u7cfb\u7edf\u7248\u672c\u8bf7\u53c2\u8003\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20\u955c\u50cf\u3002 \u6ce8\u610f\u7248\u672c\u53f7\u540e\u65e0\u9700\u586b\u5199 Ubuntu\u3001CentOS\u3001Red Hat \u7b49\u64cd\u4f5c\u7cfb\u7edf\u540d\u79f0\uff0c\u82e5\u5b98\u65b9\u955c\u50cf\u542b\u6709\u64cd\u4f5c\u7cfb\u7edf\u540e\u7f00\uff0c\u8bf7\u624b\u52a8\u79fb\u9664\u3002

                                                                            • Red Hat \u7cfb\u7edf\uff0c\u4f8b\u5982 525.105.17
                                                                            • Ubuntu \u7cfb\u7edf\uff0c\u4f8b\u5982 535-5.15.0-1043-nvidia
                                                                            • CentOS \u7cfb\u7edf\uff0c\u4f8b\u5982 525.147.05
                                                                          • Driver.RepoConfig.ConfigMapName \uff1a\u7528\u6765\u8bb0\u5f55 GPU Operator \u7684\u79bb\u7ebf yum \u6e90\u914d\u7f6e\u6587\u4ef6\u540d\u79f0\uff0c \u5f53\u4f7f\u7528\u9884\u7f6e\u7684\u79bb\u7ebf\u5305\u65f6\uff0c\u5404\u7c7b\u578b\u7684\u64cd\u4f5c\u7cfb\u7edf\u8bf7\u53c2\u8003\u5982\u4e0b\u7684\u6587\u6863\u3002

                                                                            • \u6784\u5efa CentOS 7.9 \u79bb\u7ebf yum \u6e90
                                                                            • \u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#toolkit","title":"Toolkit \u914d\u7f6e\u53c2\u6570","text":"

                                                                          Toolkit.enable \uff1a\u9ed8\u8ba4\u5f00\u542f\uff0c\u8be5\u7ec4\u4ef6\u8ba9 conatainerd/docker \u652f\u6301\u8fd0\u884c\u9700\u8981 GPU \u7684\u5bb9\u5668\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#mig","title":"MIG \u914d\u7f6e\u53c2\u6570","text":"

                                                                          \u8be6\u7ec6\u914d\u7f6e\u65b9\u5f0f\u8bf7\u53c2\u8003\u5f00\u542f MIG \u529f\u80fd

                                                                          MigManager.Config.name \uff1aMIG \u7684\u5207\u5206\u914d\u7f6e\u6587\u4ef6\u540d\uff0c\u7528\u4e8e\u5b9a\u4e49 MIG \u7684\uff08GI, CI\uff09\u5207\u5206\u7b56\u7565\u3002 \u9ed8\u8ba4\u4e3a default-mig-parted-config \u3002\u81ea\u5b9a\u4e49\u53c2\u6570\u53c2\u8003\u5f00\u542f MIG \u529f\u80fd\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#_6","title":"\u4e0b\u4e00\u6b65\u64cd\u4f5c","text":"

                                                                          \u5b8c\u6210\u4e0a\u8ff0\u76f8\u5173\u53c2\u6570\u914d\u7f6e\u548c\u521b\u5efa\u540e\uff1a

                                                                          • \u5982\u679c\u4f7f\u7528 \u6574\u5361\u6a21\u5f0f\uff0c\u5e94\u7528\u521b\u5efa\u65f6\u53ef\u4f7f\u7528 GPU \u8d44\u6e90

                                                                          • \u5982\u679c\u4f7f\u7528 vGPU \u6a21\u5f0f \uff0c\u5b8c\u6210\u4e0a\u8ff0\u76f8\u5173\u53c2\u6570\u914d\u7f6e\u548c\u521b\u5efa\u540e\uff0c\u4e0b\u4e00\u6b65\u8bf7\u5b8c\u6210 vGPU Addon \u5b89\u88c5

                                                                          • \u5982\u679c\u4f7f\u7528 MIG \u6a21\u5f0f\uff0c\u5e76\u4e14\u9700\u8981\u7ed9\u4e2a\u522b GPU \u8282\u70b9\u6309\u7167\u67d0\u79cd\u5207\u5206\u89c4\u683c\u8fdb\u884c\u4f7f\u7528\uff0c \u5426\u5219\u6309\u7167 MigManager.Config \u4e2d\u7684 default \u503c\u8fdb\u884c\u5207\u5206\u3002

                                                                            • single \u6a21\u5f0f\u8bf7\u7ed9\u5bf9\u5e94\u8282\u70b9\u6253\u4e0a\u5982\u4e0b Label\uff1a

                                                                              kubectl label nodes {node} nvidia.com/mig.config=\"all-1g.10gb\" --overwrite\n
                                                                            • mixed \u6a21\u5f0f\u8bf7\u7ed9\u5bf9\u5e94\u8282\u70b9\u6253\u4e0a\u5982\u4e0b Label\uff1a

                                                                              kubectl label nodes {node} nvidia.com/mig.config=\"custom-config\" --overwrite\n

                                                                          \u200b \u5207\u5206\u540e\uff0c\u5e94\u7528\u53ef\u4f7f\u7528 MIG GPU \u8d44\u6e90\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/push_image_to_repo.html","title":"\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20 Red Hat GPU Opreator \u79bb\u7ebf\u955c\u50cf","text":"

                                                                          \u672c\u6587\u4ee5 Red Hat 8.4 \u7684 nvcr.io/nvidia/driver:525.105.17-rhel8.4 \u79bb\u7ebf\u9a71\u52a8\u955c\u50cf\u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20\u79bb\u7ebf\u955c\u50cf\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/push_image_to_repo.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          1. \u706b\u79cd\u8282\u70b9\u53ca\u5176\u7ec4\u4ef6\u72b6\u6001\u8fd0\u884c\u6b63\u5e38\u3002
                                                                          2. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u548c\u706b\u79cd\u8282\u70b9\u7684\u8282\u70b9\uff0c\u4e14\u8282\u70b9\u4e0a\u5df2\u7ecf\u5b8c\u6210 Docker \u7684\u5b89\u88c5\u3002
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/push_image_to_repo.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"end-user/kpanda/gpu/nvidia/push_image_to_repo.html#_3","title":"\u5728\u8054\u7f51\u8282\u70b9\u83b7\u53d6\u79bb\u7ebf\u955c\u50cf","text":"

                                                                          \u4ee5\u4e0b\u64cd\u4f5c\u5728\u8054\u7f51\u8282\u70b9\u4e0a\u8fdb\u884c\u3002

                                                                          1. \u5728\u8054\u7f51\u673a\u5668\u4e0a\u62c9\u53d6 nvcr.io/nvidia/driver:525.105.17-rhel8.4 \u79bb\u7ebf\u9a71\u52a8\u955c\u50cf\uff1a

                                                                            docker pull nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                                                          2. \u955c\u50cf\u62c9\u53d6\u5b8c\u6210\u540e\uff0c\u6253\u5305\u955c\u50cf\u4e3a nvidia-driver.tar \u538b\u7f29\u5305\uff1a

                                                                            docker save nvcr.io/nvidia/driver:525.105.17-rhel8.4 > nvidia-driver.tar\n
                                                                          3. \u62f7\u8d1d nvidia-driver.tar \u955c\u50cf\u538b\u7f29\u5305\u5230\u706b\u79cd\u8282\u70b9\uff1a

                                                                            scp  nvidia-driver.tar user@ip:/root\n

                                                                            \u4f8b\u5982\uff1a

                                                                            scp  nvidia-driver.tar root@10.6.175.10:/root\n
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/push_image_to_repo.html#_4","title":"\u63a8\u9001\u955c\u50cf\u5230\u706b\u79cd\u8282\u70b9\u4ed3\u5e93","text":"

                                                                          \u4ee5\u4e0b\u64cd\u4f5c\u5728\u706b\u79cd\u8282\u70b9\u4e0a\u8fdb\u884c\u3002

                                                                          1. \u767b\u5f55\u706b\u79cd\u8282\u70b9\uff0c\u5c06\u8054\u7f51\u8282\u70b9\u62f7\u8d1d\u7684\u955c\u50cf\u538b\u7f29\u5305 nvidia-driver.tar \u5bfc\u5165\u672c\u5730\uff1a

                                                                            docker load -i nvidia-driver.tar\n
                                                                          2. \u67e5\u770b\u521a\u521a\u5bfc\u5165\u7684\u955c\u50cf\uff1a

                                                                            docker images -a |grep nvidia\n

                                                                            \u9884\u671f\u8f93\u51fa\uff1a

                                                                            nvcr.io/nvidia/driver                 e3ed7dee73e9   1 days ago   1.02GB\n
                                                                          3. \u91cd\u65b0\u6807\u8bb0\u955c\u50cf\uff0c\u4f7f\u5176\u4e0e\u8fdc\u7a0b Registry \u4ed3\u5e93\u4e2d\u7684\u76ee\u6807\u4ed3\u5e93\u5bf9\u5e94\uff1a

                                                                            docker tag <image-name> <registry-url>/<repository-name>:<tag>\n
                                                                            • <image-name> \u662f\u4e0a\u4e00\u6b65 nvidia \u955c\u50cf\u7684\u540d\u79f0\uff0c
                                                                            • <registry-url> \u662f\u706b\u79cd\u8282\u70b9\u4e0a Registry \u670d\u52a1\u7684\u5730\u5740\uff0c
                                                                            • <repository-name> \u662f\u60a8\u8981\u63a8\u9001\u5230\u7684\u4ed3\u5e93\u540d\u79f0\uff0c
                                                                            • <tag> \u662f\u60a8\u4e3a\u955c\u50cf\u6307\u5b9a\u7684\u6807\u7b7e\u3002

                                                                            \u4f8b\u5982\uff1a

                                                                            registry\uff1adocker tag nvcr.io/nvidia/driver 10.6.10.5/nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                                                          4. \u5c06\u955c\u50cf\u63a8\u9001\u5230\u706b\u79cd\u8282\u70b9\u955c\u50cf\u4ed3\u5e93\uff1a

                                                                            docker push {ip}/nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/push_image_to_repo.html#_5","title":"\u63a5\u4e0b\u6765","text":"

                                                                          \u53c2\u8003\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90\u548c GPU Operator \u79bb\u7ebf\u5b89\u88c5\u6765\u4e3a\u96c6\u7fa4\u90e8\u7f72 GPU Operator\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/rhel9.2_offline_install_driver.html","title":"RHEL 9.2 \u79bb\u7ebf\u5b89\u88c5 gpu-operator \u9a71\u52a8","text":"

                                                                          \u524d\u63d0\u6761\u4ef6\uff1a\u5df2\u5b89\u88c5 gpu-operator v23.9.0+2 \u53ca\u66f4\u9ad8\u7248\u672c

                                                                          RHEL 9.2 \u9a71\u52a8\u955c\u50cf\u4e0d\u80fd\u76f4\u63a5\u5b89\u88c5\uff0c\u5b98\u65b9\u7684\u9a71\u52a8\u811a\u672c\u5b58\u5728\u4e00\u70b9\u95ee\u9898\uff0c\u5728\u5b98\u65b9\u4fee\u590d\u4e4b\u524d\uff0c\u63d0\u4f9b\u5982\u4e0b\u7684\u6b65\u9aa4\u6765\u5b9e\u73b0\u79bb\u7ebf\u5b89\u88c5\u9a71\u52a8\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/rhel9.2_offline_install_driver.html#nouveau","title":"\u7981\u7528nouveau\u9a71\u52a8","text":"

                                                                          \u5728 RHEL 9.2 \u4e2d\u5b58\u5728 nouveau \u975e\u5b98\u65b9\u7684 Nvidia \u9a71\u52a8\uff0c\u56e0\u6b64\u9700\u8981\u5148\u7981\u7528\u3002

                                                                          # \u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u6587\u4ef6\nsudo vi /etc/modprobe.d/blacklist-nouveau.conf\n# \u6dfb\u52a0\u4ee5\u4e0b\u4e24\u884c\u5185\u5bb9:\nblacklist nouveau\noptions nouveau modeset=0\n# \u7981\u7528Nouveau\nsudo dracut --force\n# \u91cd\u542fvm\nsudo reboot\n# \u68c0\u67e5\u662f\u5426\u5df2\u7ecf\u6210\u529f\u7981\u7528\nlsmod | grep nouveau\n
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/rhel9.2_offline_install_driver.html#_1","title":"\u81ea\u5b9a\u4e49\u9a71\u52a8\u955c\u50cf","text":"

                                                                          \u5148\u5728\u672c\u5730\u521b\u5efa nvidia-driver \u6587\u4ef6\uff1a

                                                                          \u70b9\u51fb\u67e5\u770b\u5b8c\u6574\u7684 nvidia-driver \u6587\u4ef6\u5185\u5bb9
                                                                          #! /bin/bash -x\n# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.\n\nset -eu\n\nRUN_DIR=/run/nvidia\nPID_FILE=${RUN_DIR}/${0##*/}.pid\nDRIVER_VERSION=${DRIVER_VERSION:?\"Missing DRIVER_VERSION env\"}\nKERNEL_UPDATE_HOOK=/run/kernel/postinst.d/update-nvidia-driver\nNUM_VGPU_DEVICES=0\nNVIDIA_MODULE_PARAMS=()\nNVIDIA_UVM_MODULE_PARAMS=()\nNVIDIA_MODESET_MODULE_PARAMS=()\nNVIDIA_PEERMEM_MODULE_PARAMS=()\nTARGETARCH=${TARGETARCH:?\"Missing TARGETARCH env\"}\nUSE_HOST_MOFED=\"${USE_HOST_MOFED:-false}\"\nDNF_RELEASEVER=${DNF_RELEASEVER:-\"\"}\nRHEL_VERSION=${RHEL_VERSION:-\"\"}\nRHEL_MAJOR_VERSION=9\n\nOPEN_KERNEL_MODULES_ENABLED=${OPEN_KERNEL_MODULES_ENABLED:-false}\n[[ \"${OPEN_KERNEL_MODULES_ENABLED}\" == \"true\" ]] && KERNEL_TYPE=kernel-open || KERNEL_TYPE=kernel\n\nDRIVER_ARCH=${TARGETARCH/amd64/x86_64} && DRIVER_ARCH=${DRIVER_ARCH/arm64/aarch64}\necho \"DRIVER_ARCH is $DRIVER_ARCH\"\n\nSCRIPT_DIR=$( cd -- \"$( dirname -- \"${BASH_SOURCE[0]}\" )\" &> /dev/null && pwd )\nsource $SCRIPT_DIR/common.sh\n\n_update_package_cache() {\n    if [ \"${PACKAGE_TAG:-}\" != \"builtin\" ]; then\n        echo \"Updating the package cache...\"\n        if ! yum -q makecache; then\n            echo \"FATAL: failed to reach RHEL package repositories. \"\\\n                 \"Ensure that the cluster can access the proper networks.\"\n            exit 1\n        fi\n    fi\n}\n\n_cleanup_package_cache() {\n    if [ \"${PACKAGE_TAG:-}\" != \"builtin\" ]; then\n        echo \"Cleaning up the package cache...\"\n        rm -rf /var/cache/yum/*\n    fi\n}\n\n_get_rhel_version_from_kernel() {\n    local rhel_version_underscore rhel_version_arr\n    rhel_version_underscore=$(echo \"${KERNEL_VERSION}\" | sed 's/.*el\\([0-9]\\+_[0-9]\\+\\).*/\\1/g')\n    # For e.g. :- from the kernel version 4.18.0-513.9.1.el8_9, we expect to extract the string \"8_9\"\n    if [[ ! ${rhel_version_underscore} =~ ^[0-9]+_[0-9]+$ ]]; then\n        echo \"Unable to resolve RHEL version from kernel version\" >&2\n        return 1\n    fi\n    IFS='_' read -r -a rhel_version_arr <<< \"$rhel_version_underscore\"\n    if [[ ${#rhel_version_arr[@]} -ne 2 ]]; then\n        echo \"Unable to resolve RHEL version from kernel version\" >&2\n        return 1\n    fi\n    RHEL_VERSION=\"${rhel_version_arr[0]}.${rhel_version_arr[1]}\"\n    echo \"RHEL VERSION successfully resolved from kernel: ${RHEL_VERSION}\"\n    return 0\n}\n\n_resolve_rhel_version() {\n    _get_rhel_version_from_kernel || RHEL_VERSION=\"${RHEL_MAJOR_VERSION}\"\n    # set dnf release version as rhel version by default\n    if [[ -z \"${DNF_RELEASEVER}\" ]]; then\n        DNF_RELEASEVER=\"${RHEL_VERSION}\"\n    fi\n    return 0\n}\n\n# Resolve the kernel version to the form major.minor.patch-revision.\n_resolve_kernel_version() {\n    echo \"Resolving Linux kernel version...\"\n    local version=$(yum -q list available --showduplicates kernel-headers |\n      awk -v arch=$(uname -m) 'NR>1 {print $2\".\"arch}' | tac | grep -E -m1 \"^${KERNEL_VERSION/latest/.*}\")\n\n    if [ -z \"${version}\" ]; then\n        echo \"Could not resolve Linux kernel version\" >&2\n        return 1\n    fi\n    KERNEL_VERSION=\"${version}\"\n    echo \"Proceeding with Linux kernel version ${KERNEL_VERSION}\"\n    return 0\n}\n\n# Install the kernel modules header/builtin/order files and generate the kernel version string.\n_install_prerequisites() (\n    local tmp_dir=$(mktemp -d)\n\n    trap \"rm -rf ${tmp_dir}\" EXIT\n    cd ${tmp_dir}\n\n    echo \"Installing elfutils...\"\n    if ! dnf install -q -y elfutils-libelf.$DRIVER_ARCH; then\n        echo \"FATAL: failed to install elfutils packages. RHEL entitlement may be improperly deployed.\"\n        exit 1\n    fi\n    if ! dnf install -q -y elfutils-libelf-devel.$DRIVER_ARCH; then\n        echo \"FATAL: failed to install elfutils packages. RHEL entitlement may be improperly deployed.\"\n        exit 1\n    fi    \n\n    rm -rf /lib/modules/${KERNEL_VERSION}\n    mkdir -p /lib/modules/${KERNEL_VERSION}/proc\n\n    echo \"Enabling RHOCP and EUS RPM repos...\"\n    if [ -n \"${OPENSHIFT_VERSION:-}\" ]; then\n        dnf config-manager --set-enabled rhocp-${OPENSHIFT_VERSION}-for-rhel-9-$DRIVER_ARCH-rpms || true\n        if ! dnf makecache --releasever=${DNF_RELEASEVER}; then\n            dnf config-manager --set-disabled rhocp-${OPENSHIFT_VERSION}-for-rhel-9-$DRIVER_ARCH-rpms || true\n        fi\n    fi\n\n    dnf config-manager --set-enabled rhel-9-for-$DRIVER_ARCH-baseos-eus-rpms  || true\n    if ! dnf makecache --releasever=${DNF_RELEASEVER}; then\n            dnf config-manager --set-disabled rhel-9-for-$DRIVER_ARCH-baseos-eus-rpms || true\n    fi\n\n    # try with EUS disabled, if it does not work, then try just major version\n    if ! dnf makecache --releasever=${DNF_RELEASEVER}; then\n      # If pointing to DNF_RELEASEVER does not work, we point to the RHEL_MAJOR_VERSION as a last resort\n      if ! dnf makecache --releasever=${RHEL_MAJOR_VERSION}; then\n        echo \"FATAL: failed to update the dnf metadata cache after multiple attempts with releasevers ${DNF_RELEASEVER}, ${RHEL_MAJOR_VERSION}\"\n        exit 1\n      else\n        DNF_RELEASEVER=${RHEL_MAJOR_VERSION}\n      fi\n    fi\n\n    echo \"Installing Linux kernel headers...\"\n    dnf -q -y --releasever=${DNF_RELEASEVER} install kernel-headers-${KERNEL_VERSION} kernel-devel-${KERNEL_VERSION} --allowerasing > /dev/null\n    ln -s /usr/src/kernels/${KERNEL_VERSION} /lib/modules/${KERNEL_VERSION}/build\n\n    echo \"Installing Linux kernel module files...\"\n    dnf -q -y --releasever=${DNF_RELEASEVER} install kernel-core-${KERNEL_VERSION} > /dev/null\n\n    # Prevent depmod from giving a WARNING about missing files\n    touch /lib/modules/${KERNEL_VERSION}/modules.order\n    touch /lib/modules/${KERNEL_VERSION}/modules.builtin\n\n    depmod ${KERNEL_VERSION}\n\n    echo \"Generating Linux kernel version string...\"\n    if [ \"$TARGETARCH\" = \"arm64\" ]; then\n        gunzip -c /lib/modules/${KERNEL_VERSION}/vmlinuz | strings | grep -E '^Linux version' | sed 's/^\\(.*\\)\\s\\+(.*)$/\\1/' > version\n    else\n        extract-vmlinux /lib/modules/${KERNEL_VERSION}/vmlinuz | strings | grep -E '^Linux version' | sed 's/^\\(.*\\)\\s\\+(.*)$/\\1/' > version\n    fi\n    if [ -z \"$(<version)\" ]; then\n        echo \"Could not locate Linux kernel version string\" >&2\n        return 1\n    fi\n    mv version /lib/modules/${KERNEL_VERSION}/proc\n\n    # Parse gcc version\n    # gcc_version is expected to match x.y.z\n    # current_gcc is expected to match 'gcc-x.y.z-rel.el8.x86_64\n    local gcc_version=$(cat /lib/modules/${KERNEL_VERSION}/proc/version | grep -Eo \"gcc \\(GCC\\) ([0-9\\.]+)\" | grep -Eo \"([0-9\\.]+)\")\n    local current_gcc=$(rpm -qa gcc)\n    echo \"kernel requires gcc version: 'gcc-${gcc_version}', current gcc version is '${current_gcc}'\"\n\n    if ! [[ \"${current_gcc}\" =~ \"gcc-${gcc_version}\"-.* ]]; then\n        dnf install -q -y --releasever=${DNF_RELEASEVER} \"gcc-${gcc_version}\"\n    fi\n)\n\n# Cleanup the prerequisites installed above.\n_remove_prerequisites() {\n    true\n    if [ \"${PACKAGE_TAG:-}\" != \"builtin\" ]; then\n        dnf -q -y remove kernel-headers-${KERNEL_VERSION} kernel-devel-${KERNEL_VERSION} > /dev/null\n        # TODO remove module files not matching an existing driver package.\n    fi\n}\n\n# Check if the kernel version requires a new precompiled driver packages.\n_kernel_requires_package() {\n    local proc_mount_arg=\"\"\n\n    echo \"Checking NVIDIA driver packages...\"\n\n    [[ ! -d /usr/src/nvidia-${DRIVER_VERSION}/${KERNEL_TYPE} ]] && return 0\n    cd /usr/src/nvidia-${DRIVER_VERSION}/${KERNEL_TYPE}\n\n    proc_mount_arg=\"--proc-mount-point /lib/modules/${KERNEL_VERSION}/proc\"\n    for pkg_name in $(ls -d -1 precompiled/** 2> /dev/null); do\n        is_match=$(../mkprecompiled --match ${pkg_name} ${proc_mount_arg})\n        if [ \"${is_match}\" == \"kernel interface matches.\" ]; then\n            echo \"Found NVIDIA driver package ${pkg_name##*/}\"\n            return 1\n        fi\n    done\n    return 0\n}\n\n# Compile the kernel modules, optionally sign them, and generate a precompiled package for use by the nvidia-installer.\n_create_driver_package() (\n    local pkg_name=\"nvidia-modules-${KERNEL_VERSION%%-*}${PACKAGE_TAG:+-${PACKAGE_TAG}}\"\n    local nvidia_sign_args=\"\"\n    local nvidia_modeset_sign_args=\"\"\n    local nvidia_uvm_sign_args=\"\"\n\n    trap \"make -s -j ${MAX_THREADS} SYSSRC=/lib/modules/${KERNEL_VERSION}/build clean > /dev/null\" EXIT\n\n    echo \"Compiling NVIDIA driver kernel modules...\"\n    cd /usr/src/nvidia-${DRIVER_VERSION}/${KERNEL_TYPE}\n\n    if _gpu_direct_rdma_enabled; then\n        ln -s /run/mellanox/drivers/usr/src/ofa_kernel /usr/src/\n        # if arch directory exists(MOFED >=5.5) then create a symlink as expected by GPU driver installer\n        # This is required as currently GPU driver installer doesn't expect headers in x86_64 folder, but only in either default or kernel-version folder.\n        # ls -ltr /usr/src/ofa_kernel/\n        # lrwxrwxrwx 1 root root   36 Dec  8 20:10 default -> /etc/alternatives/ofa_kernel_headers\n        # drwxr-xr-x 4 root root 4096 Dec  8 20:14 x86_64\n        # lrwxrwxrwx 1 root root   44 Dec  9 19:05 5.4.0-90-generic -> /usr/src/ofa_kernel/x86_64/5.4.0-90-generic/\n        if [[ -d \"/run/mellanox/drivers/usr/src/ofa_kernel/$(uname -m)/$(uname -r)\" ]]; then\n            if [[ ! -e \"/usr/src/ofa_kernel/$(uname -r)\" ]]; then\n                ln -s \"/run/mellanox/drivers/usr/src/ofa_kernel/$(uname -m)/$(uname -r)\" /usr/src/ofa_kernel/\n            fi\n        fi\n    fi\n\n    make -s -j ${MAX_THREADS} SYSSRC=/lib/modules/${KERNEL_VERSION}/build nv-linux.o nv-modeset-linux.o > /dev/null\n\n    echo \"Relinking NVIDIA driver kernel modules...\"\n    rm -f nvidia.ko nvidia-modeset.ko\n    ld -d -r -o nvidia.ko ./nv-linux.o ./nvidia/nv-kernel.o_binary\n    ld -d -r -o nvidia-modeset.ko ./nv-modeset-linux.o ./nvidia-modeset/nv-modeset-kernel.o_binary\n\n    if [ -n \"${PRIVATE_KEY}\" ]; then\n        echo \"Signing NVIDIA driver kernel modules...\"\n        donkey get ${PRIVATE_KEY} sh -c \"PATH=${PATH}:/usr/src/linux-headers-${KERNEL_VERSION}/scripts && \\\n          sign-file sha512 \\$DONKEY_FILE pubkey.x509 nvidia.ko nvidia.ko.sign &&                          \\\n          sign-file sha512 \\$DONKEY_FILE pubkey.x509 nvidia-modeset.ko nvidia-modeset.ko.sign &&          \\\n          sign-file sha512 \\$DONKEY_FILE pubkey.x509 nvidia-uvm.ko\"\n        nvidia_sign_args=\"--linked-module nvidia.ko --signed-module nvidia.ko.sign\"\n        nvidia_modeset_sign_args=\"--linked-module nvidia-modeset.ko --signed-module nvidia-modeset.ko.sign\"\n        nvidia_uvm_sign_args=\"--signed\"\n    fi\n\n    echo \"Building NVIDIA driver package ${pkg_name}...\"\n    ../mkprecompiled --pack ${pkg_name} --description ${KERNEL_VERSION}                              \\\n                                        --proc-mount-point /lib/modules/${KERNEL_VERSION}/proc       \\\n                                        --driver-version ${DRIVER_VERSION}                           \\\n                                        --kernel-interface nv-linux.o                                \\\n                                        --linked-module-name nvidia.ko                               \\\n                                        --core-object-name nvidia/nv-kernel.o_binary                 \\\n                                        ${nvidia_sign_args}                                          \\\n                                        --target-directory .                                         \\\n                                        --kernel-interface nv-modeset-linux.o                        \\\n                                        --linked-module-name nvidia-modeset.ko                       \\\n                                        --core-object-name nvidia-modeset/nv-modeset-kernel.o_binary \\\n                                        ${nvidia_modeset_sign_args}                                  \\\n                                        --target-directory .                                         \\\n                                        --kernel-module nvidia-uvm.ko                                \\\n                                        ${nvidia_uvm_sign_args}                                      \\\n                                        --target-directory .\n    mkdir -p precompiled\n    mv ${pkg_name} precompiled\n)\n\n_assert_nvswitch_system() {\n    [ -d /proc/driver/nvidia-nvswitch ] || return 1\n    entries=$(ls -1 /proc/driver/nvidia-nvswitch/devices/*)\n    if [ -z \"${entries}\" ]; then\n        return 1\n    fi\n    return 0\n}\n\n# For each kernel module configuration file mounted into the container,\n# parse the file contents and extract the custom module parameters that\n# are to be passed as input to 'modprobe'.\n#\n# Assumptions:\n# - Configuration files are named <module-name>.conf (i.e. nvidia.conf, nvidia-uvm.conf).\n# - Configuration files are mounted inside the container at /drivers.\n# - Each line in the file contains at least one parameter, where parameters on the same line\n#   are space delimited. It is up to the user to properly format the file to ensure\n#   the correct set of parameters are passed to 'modprobe'.\n_get_module_params() {\n    local base_path=\"/drivers\"\n    # nvidia\n    if [ -f \"${base_path}/nvidia.conf\" ]; then\n       while IFS=\"\" read -r param || [ -n \"$param\" ]; do\n           NVIDIA_MODULE_PARAMS+=(\"$param\")\n       done <\"${base_path}/nvidia.conf\"\n       echo \"Module parameters provided for nvidia: ${NVIDIA_MODULE_PARAMS[@]}\"\n    fi\n    # nvidia-uvm\n    if [ -f \"${base_path}/nvidia-uvm.conf\" ]; then\n       while IFS=\"\" read -r param || [ -n \"$param\" ]; do\n           NVIDIA_UVM_MODULE_PARAMS+=(\"$param\")\n       done <\"${base_path}/nvidia-uvm.conf\"\n       echo \"Module parameters provided for nvidia-uvm: ${NVIDIA_UVM_MODULE_PARAMS[@]}\"\n    fi\n    # nvidia-modeset\n    if [ -f \"${base_path}/nvidia-modeset.conf\" ]; then\n       while IFS=\"\" read -r param || [ -n \"$param\" ]; do\n           NVIDIA_MODESET_MODULE_PARAMS+=(\"$param\")\n       done <\"${base_path}/nvidia-modeset.conf\"\n       echo \"Module parameters provided for nvidia-modeset: ${NVIDIA_MODESET_MODULE_PARAMS[@]}\"\n    fi\n    # nvidia-peermem\n    if [ -f \"${base_path}/nvidia-peermem.conf\" ]; then\n       while IFS=\"\" read -r param || [ -n \"$param\" ]; do\n           NVIDIA_PEERMEM_MODULE_PARAMS+=(\"$param\")\n       done <\"${base_path}/nvidia-peermem.conf\"\n       echo \"Module parameters provided for nvidia-peermem: ${NVIDIA_PEERMEM_MODULE_PARAMS[@]}\"\n    fi\n}\n\n# Load the kernel modules and start persistenced.\n_load_driver() {\n    echo \"Parsing kernel module parameters...\"\n    _get_module_params\n\n    local nv_fw_search_path=\"$RUN_DIR/driver/lib/firmware\"\n    local set_fw_path=\"true\"\n    local fw_path_config_file=\"/sys/module/firmware_class/parameters/path\"\n    for param in \"${NVIDIA_MODULE_PARAMS[@]}\"; do\n        if [[ \"$param\" == \"NVreg_EnableGpuFirmware=0\" ]]; then\n          set_fw_path=\"false\"\n        fi\n    done\n\n    if [[ \"$set_fw_path\" == \"true\" ]]; then\n        echo \"Configuring the following firmware search path in '$fw_path_config_file': $nv_fw_search_path\"\n        if [[ ! -z $(grep '[^[:space:]]' $fw_path_config_file) ]]; then\n            echo \"WARNING: A search path is already configured in $fw_path_config_file\"\n            echo \"         Retaining the current configuration\"\n        else\n            echo -n \"$nv_fw_search_path\" > $fw_path_config_file || echo \"WARNING: Failed to configure the firmware search path\"\n        fi\n    fi\n\n    echo \"Loading ipmi and i2c_core kernel modules...\"\n    modprobe -a i2c_core ipmi_msghandler ipmi_devintf\n\n    echo \"Loading NVIDIA driver kernel modules...\"\n    set -o xtrace +o nounset\n    modprobe nvidia \"${NVIDIA_MODULE_PARAMS[@]}\"\n    modprobe nvidia-uvm \"${NVIDIA_UVM_MODULE_PARAMS[@]}\"\n    modprobe nvidia-modeset \"${NVIDIA_MODESET_MODULE_PARAMS[@]}\"\n    set +o xtrace -o nounset\n\n    if _gpu_direct_rdma_enabled; then\n        echo \"Loading NVIDIA Peer Memory kernel module...\"\n        set -o xtrace +o nounset\n        modprobe -a nvidia-peermem \"${NVIDIA_PEERMEM_MODULE_PARAMS[@]}\"\n        set +o xtrace -o nounset\n    fi\n\n    echo \"Starting NVIDIA persistence daemon...\"\n    nvidia-persistenced --persistence-mode\n\n    if [ \"${DRIVER_TYPE}\" = \"vgpu\" ]; then\n        echo \"Copying gridd.conf...\"\n        cp /drivers/gridd.conf /etc/nvidia/gridd.conf\n        if [ \"${VGPU_LICENSE_SERVER_TYPE}\" = \"NLS\" ]; then\n            echo \"Copying ClientConfigToken...\"\n            mkdir -p  /etc/nvidia/ClientConfigToken/\n            cp /drivers/ClientConfigToken/* /etc/nvidia/ClientConfigToken/\n        fi\n\n        echo \"Starting nvidia-gridd..\"\n        LD_LIBRARY_PATH=/usr/lib64/nvidia/gridd nvidia-gridd\n\n        # Start virtual topology daemon\n        _start_vgpu_topology_daemon\n    fi\n\n    if _assert_nvswitch_system; then\n        echo \"Starting NVIDIA fabric manager daemon...\"\n        nv-fabricmanager -c /usr/share/nvidia/nvswitch/fabricmanager.cfg\n    fi\n}\n\n# Stop persistenced and unload the kernel modules if they are currently loaded.\n_unload_driver() {\n    local rmmod_args=()\n    local nvidia_deps=0\n    local nvidia_refs=0\n    local nvidia_uvm_refs=0\n    local nvidia_modeset_refs=0\n    local nvidia_peermem_refs=0\n\n    echo \"Stopping NVIDIA persistence daemon...\"\n    if [ -f /var/run/nvidia-persistenced/nvidia-persistenced.pid ]; then\n        local pid=$(< /var/run/nvidia-persistenced/nvidia-persistenced.pid)\n\n        kill -SIGTERM \"${pid}\"\n        for i in $(seq 1 50); do\n            kill -0 \"${pid}\" 2> /dev/null || break\n            sleep 0.1\n        done\n        if [ $i -eq 50 ]; then\n            echo \"Could not stop NVIDIA persistence daemon\" >&2\n            return 1\n        fi\n    fi\n\n    if [ -f /var/run/nvidia-gridd/nvidia-gridd.pid ]; then\n        echo \"Stopping NVIDIA grid daemon...\"\n        local pid=$(< /var/run/nvidia-gridd/nvidia-gridd.pid)\n\n        kill -SIGTERM \"${pid}\"\n        for i in $(seq 1 10); do\n            kill -0 \"${pid}\" 2> /dev/null || break\n            sleep 0.1\n        done\n        if [ $i -eq 10 ]; then\n            echo \"Could not stop NVIDIA Grid daemon\" >&2\n            return 1\n        fi\n    fi\n\n    if [ -f /var/run/nvidia-fabricmanager/nv-fabricmanager.pid ]; then\n        echo \"Stopping NVIDIA fabric manager daemon...\"\n        local pid=$(< /var/run/nvidia-fabricmanager/nv-fabricmanager.pid)\n\n        kill -SIGTERM \"${pid}\"\n        for i in $(seq 1 50); do\n            kill -0 \"${pid}\" 2> /dev/null || break\n            sleep 0.1\n        done\n        if [ $i -eq 50 ]; then\n            echo \"Could not stop NVIDIA fabric manager daemon\" >&2\n            return 1\n        fi\n    fi\n\n    echo \"Unloading NVIDIA driver kernel modules...\"\n    if [ -f /sys/module/nvidia_modeset/refcnt ]; then\n        nvidia_modeset_refs=$(< /sys/module/nvidia_modeset/refcnt)\n        rmmod_args+=(\"nvidia-modeset\")\n        ((++nvidia_deps))\n    fi\n    if [ -f /sys/module/nvidia_uvm/refcnt ]; then\n        nvidia_uvm_refs=$(< /sys/module/nvidia_uvm/refcnt)\n        rmmod_args+=(\"nvidia-uvm\")\n        ((++nvidia_deps))\n    fi\n    if [ -f /sys/module/nvidia/refcnt ]; then\n        nvidia_refs=$(< /sys/module/nvidia/refcnt)\n        rmmod_args+=(\"nvidia\")\n    fi\n    if [ -f /sys/module/nvidia_peermem/refcnt ]; then\n        nvidia_peermem_refs=$(< /sys/module/nvidia_peermem/refcnt)\n        rmmod_args+=(\"nvidia-peermem\")\n        ((++nvidia_deps))\n    fi\n    if [ ${nvidia_refs} -gt ${nvidia_deps} ] || [ ${nvidia_uvm_refs} -gt 0 ] || [ ${nvidia_modeset_refs} -gt 0 ] || [ ${nvidia_peermem_refs} -gt 0 ]; then\n        echo \"Could not unload NVIDIA driver kernel modules, driver is in use\" >&2\n        return 1\n    fi\n\n    if [ ${#rmmod_args[@]} -gt 0 ]; then\n        rmmod ${rmmod_args[@]}\n    fi\n    return 0\n}\n\n# Link and install the kernel modules from a precompiled package using the nvidia-installer.\n_install_driver() {\n    local install_args=()\n\n    echo \"Installing NVIDIA driver kernel modules...\"\n    cd /usr/src/nvidia-${DRIVER_VERSION}\n    rm -rf /lib/modules/${KERNEL_VERSION}/video\n\n    if [ \"${ACCEPT_LICENSE}\" = \"yes\" ]; then\n        install_args+=(\"--accept-license\")\n    fi\n    IGNORE_CC_MISMATCH=1 nvidia-installer --kernel-module-only --no-drm --ui=none --no-nouveau-check -m=${KERNEL_TYPE} ${install_args[@]+\"${install_args[@]}\"}\n    # May need to add no-cc-check for Rhel, otherwise it complains about cc missing in path\n    # /proc/version and lib/modules/KERNEL_VERSION/proc are different, by default installer looks at /proc/ so, added the proc-mount-point\n    # TODO: remove the -a flag. its not needed. in the new driver version, license-acceptance is implicit\n    #nvidia-installer --kernel-module-only --no-drm --ui=none --no-nouveau-check --no-cc-version-check --proc-mount-point /lib/modules/${KERNEL_VERSION}/proc ${install_args[@]+\"${install_args[@]}\"}\n}\n\n# Mount the driver rootfs into the run directory with the exception of sysfs.\n_mount_rootfs() {\n    echo \"Mounting NVIDIA driver rootfs...\"\n    mount --make-runbindable /sys\n    mount --make-private /sys\n    mkdir -p ${RUN_DIR}/driver\n    mount --rbind / ${RUN_DIR}/driver\n\n    echo \"Check SELinux status\"\n    if [ -e /sys/fs/selinux ]; then\n        echo \"SELinux is enabled\"\n        echo \"Change device files security context for selinux compatibility\"\n        chcon -R -t container_file_t ${RUN_DIR}/driver/dev\n    else\n        echo \"SELinux is disabled, skipping...\"\n    fi\n}\n\n# Unmount the driver rootfs from the run directory.\n_unmount_rootfs() {\n    echo \"Unmounting NVIDIA driver rootfs...\"\n    if findmnt -r -o TARGET | grep \"${RUN_DIR}/driver\" > /dev/null; then\n        umount -l -R ${RUN_DIR}/driver\n    fi\n}\n\n# Write a kernel postinst.d script to automatically precompile packages on kernel update (similar to DKMS).\n_write_kernel_update_hook() {\n    if [ ! -d ${KERNEL_UPDATE_HOOK%/*} ]; then\n        return\n    fi\n\n    echo \"Writing kernel update hook...\"\n    cat > ${KERNEL_UPDATE_HOOK} <<'EOF'\n#!/bin/bash\n\nset -eu\ntrap 'echo \"ERROR: Failed to update the NVIDIA driver\" >&2; exit 0' ERR\n\nNVIDIA_DRIVER_PID=$(< /run/nvidia/nvidia-driver.pid)\n\nexport \"$(grep -z DRIVER_VERSION /proc/${NVIDIA_DRIVER_PID}/environ)\"\nnsenter -t \"${NVIDIA_DRIVER_PID}\" -m -- nvidia-driver update --kernel \"$1\"\nEOF\n    chmod +x ${KERNEL_UPDATE_HOOK}\n}\n\n_shutdown() {\n    if _unload_driver; then\n        _unmount_rootfs\n        rm -f ${PID_FILE} ${KERNEL_UPDATE_HOOK}\n        return 0\n    fi\n    return 1\n}\n\n_find_vgpu_driver_version() {\n    local count=\"\"\n    local version=\"\"\n    local drivers_path=\"/drivers\"\n\n    if [ \"${DISABLE_VGPU_VERSION_CHECK}\" = \"true\" ]; then\n        echo \"vgpu version compatibility check is disabled\"\n        return 0\n    fi\n    # check if vgpu devices are present\n    count=$(vgpu-util count)\n    if [ $? -ne 0 ]; then\n         echo \"cannot find vgpu devices on host, pleae check /var/log/vgpu-util.log for more details...\"\n         return 0\n    fi\n    NUM_VGPU_DEVICES=$(echo \"$count\" | awk -F= '{print $2}')\n    if [ $NUM_VGPU_DEVICES -eq 0 ]; then\n        # no vgpu devices found, treat as passthrough\n        return 0\n    fi\n    echo \"found $NUM_VGPU_DEVICES vgpu devices on host\"\n\n    # find compatible guest driver using driver catalog\n    if [ -d \"/mnt/shared-nvidia-driver-toolkit/drivers\" ]; then\n        drivers_path=\"/mnt/shared-nvidia-driver-toolkit/drivers\"\n    fi\n    version=$(vgpu-util match -i \"${drivers_path}\" -c \"${drivers_path}/vgpuDriverCatalog.yaml\")\n    if [ $? -ne 0 ]; then\n        echo \"cannot find match for compatible vgpu driver from available list, please check /var/log/vgpu-util.log for more details...\"\n        return 1\n    fi\n    DRIVER_VERSION=$(echo \"$version\" | awk -F= '{print $2}')\n    echo \"vgpu driver version selected: ${DRIVER_VERSION}\"\n    return 0\n}\n\n_start_vgpu_topology_daemon() {\n    type nvidia-topologyd > /dev/null 2>&1 || return 0\n    echo \"Starting nvidia-topologyd..\"\n    nvidia-topologyd\n}\n\n_prepare() {\n    if [ \"${DRIVER_TYPE}\" = \"vgpu\" ]; then\n        _find_vgpu_driver_version || exit 1\n    fi\n\n    # Install the userspace components and copy the kernel module sources.\n    sh NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION.run -x && \\\n        cd NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION && \\\n        sh /tmp/install.sh nvinstall && \\\n        mkdir -p /usr/src/nvidia-$DRIVER_VERSION && \\\n        mv LICENSE mkprecompiled ${KERNEL_TYPE} /usr/src/nvidia-$DRIVER_VERSION && \\\n        sed '9,${/^\\(kernel\\|LICENSE\\)/!d}' .manifest > /usr/src/nvidia-$DRIVER_VERSION/.manifest\n\n    echo -e \"\\n========== NVIDIA Software Installer ==========\\n\"\n    echo -e \"Starting installation of NVIDIA driver version ${DRIVER_VERSION} for Linux kernel version ${KERNEL_VERSION}\\n\"\n}\n\n_prepare_exclusive() {\n    _prepare\n\n    exec 3> ${PID_FILE}\n    if ! flock -n 3; then\n        echo \"An instance of the NVIDIA driver is already running, aborting\"\n        exit 1\n    fi\n    echo $$ >&3\n\n    trap \"echo 'Caught signal'; exit 1\" HUP INT QUIT PIPE TERM\n    trap \"_shutdown\" EXIT\n\n    _unload_driver || exit 1\n    _unmount_rootfs\n}\n\n_build() {\n    # Install dependencies\n    if _kernel_requires_package; then\n        _update_package_cache\n        _install_prerequisites\n        _create_driver_package\n        #_remove_prerequisites\n        _cleanup_package_cache\n    fi\n\n    # Build the driver\n    _install_driver\n}\n\n_load() {\n    _load_driver\n    _mount_rootfs\n    _write_kernel_update_hook\n\n    echo \"Done, now waiting for signal\"\n    sleep infinity &\n    trap \"echo 'Caught signal'; _shutdown && { kill $!; exit 0; }\" HUP INT QUIT PIPE TERM\n    trap - EXIT\n    while true; do wait $! || continue; done\n    exit 0\n}\n\ninit() {\n    _prepare_exclusive\n\n    _build\n\n    _load\n}\n\nbuild() {\n    _prepare\n\n    _build\n}\n\nload() {\n    _prepare_exclusive\n\n    _load\n}\n\nupdate() {\n    exec 3>&2\n    if exec 2> /dev/null 4< ${PID_FILE}; then\n        if ! flock -n 4 && read pid <&4 && kill -0 \"${pid}\"; then\n            exec > >(tee -a \"/proc/${pid}/fd/1\")\n            exec 2> >(tee -a \"/proc/${pid}/fd/2\" >&3)\n        else\n            exec 2>&3\n        fi\n        exec 4>&-\n    fi\n    exec 3>&-\n\n    # vgpu driver version is chosen dynamically during runtime, so pre-compile modules for\n    # only non-vgpu driver types\n    if [ \"${DRIVER_TYPE}\" != \"vgpu\" ]; then\n        # Install the userspace components and copy the kernel module sources.\n        if [ ! -e /usr/src/nvidia-${DRIVER_VERSION}/mkprecompiled ]; then\n            sh NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION.run -x && \\\n                cd NVIDIA-Linux-$DRIVER_ARCH-$DRIVER_VERSION && \\\n                sh /tmp/install.sh nvinstall && \\\n                mkdir -p /usr/src/nvidia-$DRIVER_VERSION && \\\n                mv LICENSE mkprecompiled ${KERNEL_TYPE} /usr/src/nvidia-$DRIVER_VERSION && \\\n                sed '9,${/^\\(kernel\\|LICENSE\\)/!d}' .manifest > /usr/src/nvidia-$DRIVER_VERSION/.manifest\n        fi\n    fi\n\n    echo -e \"\\n========== NVIDIA Software Updater ==========\\n\"\n    echo -e \"Starting update of NVIDIA driver version ${DRIVER_VERSION} for Linux kernel version ${KERNEL_VERSION}\\n\"\n\n    trap \"echo 'Caught signal'; exit 1\" HUP INT QUIT PIPE TERM\n\n    _update_package_cache\n    _resolve_kernel_version || exit 1\n    _install_prerequisites\n    if _kernel_requires_package; then\n        _create_driver_package\n    fi\n    _remove_prerequisites\n    _cleanup_package_cache\n\n    echo \"Done\"\n    exit 0\n}\n\n# Wait for MOFED drivers to be loaded and load nvidia-peermem whenever it gets unloaded during MOFED driver updates\nreload_nvidia_peermem() {\n    if [ \"$USE_HOST_MOFED\" = \"true\" ]; then\n        until  lsmod | grep mlx5_core > /dev/null 2>&1 && [ -f /run/nvidia/validations/.driver-ctr-ready ];\n        do\n            echo \"waiting for mellanox ofed and nvidia drivers to be installed\"\n            sleep 10\n        done\n    else\n        # use driver readiness flag created by MOFED container\n        until  [ -f /run/mellanox/drivers/.driver-ready ] && [ -f /run/nvidia/validations/.driver-ctr-ready ];\n        do\n            echo \"waiting for mellanox ofed and nvidia drivers to be installed\"\n            sleep 10\n        done\n    fi\n    # get any parameters provided for nvidia-peermem\n    _get_module_params && set +o nounset\n    if chroot /run/nvidia/driver modprobe nvidia-peermem \"${NVIDIA_PEERMEM_MODULE_PARAMS[@]}\"; then\n        if [ -f /sys/module/nvidia_peermem/refcnt ]; then\n            echo \"successfully loaded nvidia-peermem module, now waiting for signal\"\n            sleep inf\n            trap \"echo 'Caught signal'; exit 1\" HUP INT QUIT PIPE TERM\n        fi\n    fi\n    echo \"failed to load nvidia-peermem module\"\n    exit 1\n}\n\n# probe by gpu-operator for liveness/startup checks for nvidia-peermem module to be loaded when MOFED drivers are ready\nprobe_nvidia_peermem() {\n    if lsmod | grep mlx5_core > /dev/null 2>&1; then\n        if [ ! -f /sys/module/nvidia_peermem/refcnt ]; then\n            echo \"nvidia-peermem module is not loaded\"\n            return 1\n        fi\n    else\n        echo \"MOFED drivers are not ready, skipping probe to avoid container restarts...\"\n    fi\n    return 0\n}\n\nusage() {\n    cat >&2 <<EOF\nUsage: $0 COMMAND [ARG...]\n\nCommands:\n  init   [-a | --accept-license] [-m | --max-threads MAX_THREADS]\n  build  [-a | --accept-license] [-m | --max-threads MAX_THREADS]\n  load\n  update [-k | --kernel VERSION] [-s | --sign KEYID] [-t | --tag TAG] [-m | --max-threads MAX_THREADS]\nEOF\n    exit 1\n}\n\nif [ $# -eq 0 ]; then\n    usage\nfi\ncommand=$1; shift\ncase \"${command}\" in\n    init) options=$(getopt -l accept-license,max-threads: -o am: -- \"$@\") ;;\n    build) options=$(getopt -l accept-license,tag:,max-threads: -o a:t:m: -- \"$@\") ;;\n    load) options=\"\" ;;\n    update) options=$(getopt -l kernel:,sign:,tag:,max-threads: -o k:s:t:m: -- \"$@\") ;;\n    reload_nvidia_peermem) options=\"\" ;;\n    probe_nvidia_peermem) options=\"\" ;;\n    *) usage ;;\nesac\nif [ $? -ne 0 ]; then\n    usage\nfi\neval set -- \"${options}\"\n\nACCEPT_LICENSE=\"\"\nMAX_THREADS=\"\"\nKERNEL_VERSION=$(uname -r)\nPRIVATE_KEY=\"\"\nPACKAGE_TAG=\"\"\n\nfor opt in ${options}; do\n    case \"$opt\" in\n    -a | --accept-license) ACCEPT_LICENSE=\"yes\"; shift 1 ;;\n    -k | --kernel) KERNEL_VERSION=$2; shift 2 ;;\n    -m | --max-threads) MAX_THREADS=$2; shift 2 ;;\n    -s | --sign) PRIVATE_KEY=$2; shift 2 ;;\n    -t | --tag) PACKAGE_TAG=$2; shift 2 ;;\n    --) shift; break ;;\n    esac\ndone\nif [ $# -ne 0 ]; then\n    usage\nfi\n\n_resolve_rhel_version || exit 1\n\n$command\n

                                                                          \u4f7f\u7528\u5b98\u65b9\u7684\u955c\u50cf\u6765\u4e8c\u6b21\u6784\u5efa\u81ea\u5b9a\u4e49\u955c\u50cf\uff0c\u5982\u4e0b\u662f\u4e00\u4e2a Dockerfile \u6587\u4ef6\u7684\u5185\u5bb9\uff1a

                                                                          FROM nvcr.io/nvidia/driver:535.183.06-rhel9.2\nCOPY nvidia-driver /usr/local/bin\nRUN chmod +x /usr/local/bin/nvidia-driver\nCMD [\"/bin/bash\", \"-c\"]\n

                                                                          \u6784\u5efa\u547d\u4ee4\u5e76\u63a8\u9001\u5230\u706b\u79cd\u96c6\u7fa4\uff1a

                                                                          docker build -t {\u706b\u79cdregistry}/nvcr.m.daocloud.io/nvidia/driver:535.183.06-01-rhel9.2 -f Dockerfile .\ndocker push {\u706b\u79cdregistry}/nvcr.m.daocloud.io/nvidia/driver:535.183.06-01-rhel9.2\n
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/rhel9.2_offline_install_driver.html#_2","title":"\u5b89\u88c5\u9a71\u52a8","text":"
                                                                          1. \u5b89\u88c5 gpu-operator addon
                                                                          2. \u8bbe\u7f6e driver.version=535.183.06-01
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html","title":"Ubuntu22.04 \u79bb\u7ebf\u5b89\u88c5 gpu-operator \u9a71\u52a8","text":"

                                                                          \u524d\u63d0\u6761\u4ef6\uff1a\u5df2\u5b89\u88c5 gpu-operator v23.9.0+2 \u53ca\u66f4\u9ad8\u7248\u672c

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html#_1","title":"\u51c6\u5907\u79bb\u7ebf\u955c\u50cf","text":"
                                                                          1. \u67e5\u770b\u5185\u6838\u7248\u672c

                                                                            $ uname -r\n5.15.0-78-generic\n
                                                                          2. \u67e5\u770b\u5185\u6838\u5bf9\u5e94\u7684 GPU Driver \u955c\u50cf\u7248\u672c\uff0c https://catalog.ngc.nvidia.com/orgs/nvidia/containers/driver/tags\u3002 \u4f7f\u7528\u5185\u6838\u67e5\u8be2\u955c\u50cf\u7248\u672c\uff0c\u901a\u8fc7 ctr export \u4fdd\u5b58\u955c\u50cf\u3002

                                                                            ctr i pull nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04\nctr i export --all-platforms driver.tar.gz nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 \n
                                                                          3. \u628a\u955c\u50cf\u5bfc\u5165\u5230\u706b\u79cd\u96c6\u7fa4\u7684\u955c\u50cf\u4ed3\u5e93\u4e2d

                                                                            ctr i import driver.tar.gz\nctr i tag nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 {\u706b\u79cdregistry}/nvcr.m.daocloud.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04\nctr i push {\u706b\u79cdregistry}/nvcr.m.daocloud.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 --skip-verify=true\n
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html#_2","title":"\u5b89\u88c5\u9a71\u52a8","text":"
                                                                          1. \u5b89\u88c5 gpu-operator addon
                                                                          2. \u82e5\u4f7f\u7528\u9884\u7f16\u8bd1\u6a21\u5f0f\uff0c\u5219\u8bbe\u7f6e driver.usePrecompiled=true,\u5e76\u8bbe\u7f6e driver.version=535\uff0c\u8fd9\u91cc\u8981\u6ce8\u610f\uff0c\u5199\u7684\u662f 535\uff0c\u4e0d\u662f 535.104.12\u3002\uff08\u975e\u9884\u7f16\u8bd1\u6a21\u5f0f\u8df3\u8fc7\u6b64\u6b65\uff0c\u76f4\u63a5\u5b89\u88c5\u5373\u53ef\uff09
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html","title":"\u6784\u5efa CentOS 7.9 \u79bb\u7ebf yum \u6e90","text":""},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#_1","title":"\u4f7f\u7528\u573a\u666f\u4ecb\u7ecd","text":"

                                                                          \u5f53\u5de5\u4f5c\u8282\u70b9\u7684\u5185\u6838\u7248\u672c\u4e0e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u5185\u6838\u7248\u672c\u6216 OS \u7c7b\u578b\u4e0d\u4e00\u81f4\u65f6\uff0c\u9700\u8981\u7528\u6237\u624b\u52a8\u6784\u5efa\u79bb\u7ebf yum \u6e90\u3002

                                                                          \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u6784\u5efa\u79bb\u7ebf yum \u6e90\uff0c \u5e76\u5728\u5b89\u88c5 Gpu Operator \u65f6\uff0c\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          1. \u7528\u6237\u5df2\u7ecf\u5728\u5e73\u53f0\u4e0a\u5b89\u88c5\u4e86 v0.12.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u7684 addon \u79bb\u7ebf\u5305\u3002
                                                                          2. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u548c\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u7f51\u7edc\u80fd\u591f\u8054\u901a\u7684\u6587\u4ef6\u670d\u52a1\u5668\uff0c\u5982 nginx \u6216 minio\u3002
                                                                          3. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u3001\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\uff0c \u4e14\u8282\u70b9\u4e0a\u5df2\u7ecf\u5b8c\u6210 Docker \u7684\u5b89\u88c5\u3002
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                                          \u672c\u6587\u4ee5\u5185\u6838\u7248\u672c\u4e3a 3.10.0-1160.95.1.el7.x86_64 \u7684 CentOS 7.9 \u8282\u70b9\u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u6784\u5efa GPU operator \u79bb\u7ebf\u5305\u7684 yum \u6e90\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#os","title":"\u68c0\u67e5\u96c6\u7fa4\u8282\u70b9\u7684 OS \u548c\u5185\u6838\u7248\u672c","text":"

                                                                          \u5206\u522b\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u548c\u5f85\u90e8\u7f72 GPU Operator \u7684\u8282\u70b9\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u82e5\u4e24\u4e2a\u8282\u70b9\u7684 OS \u548c\u5185\u6838\u7248\u672c\u4e00\u81f4\u5219\u65e0\u9700\u6784\u5efa yum \u6e90\uff0c \u53ef\u53c2\u8003\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u6587\u6863\u76f4\u63a5\u5b89\u88c5\uff1b\u82e5\u4e24\u4e2a\u8282\u70b9\u7684 OS \u6216\u5185\u6838\u7248\u672c\u4e0d\u4e00\u81f4\uff0c\u8bf7\u6267\u884c\u4e0b\u4e00\u6b65\u3002

                                                                          1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u67e5\u770b\u96c6\u7fa4\u4e0b\u5f85\u90e8\u7f72 GPU Operator \u8282\u70b9\u7684\u53d1\u884c\u7248\u540d\u79f0\u548c\u7248\u672c\u53f7\u3002

                                                                            cat /etc/redhat-release\n

                                                                            \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                            CentOS Linux release 7.9 (Core)\n

                                                                            \u8f93\u51fa\u7ed3\u679c\u4e3a\u5f53\u524d\u8282\u70b9\u5185\u6838\u7248\u672c CentOS 7.9 \u3002

                                                                          2. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u67e5\u770b\u96c6\u7fa4\u4e0b\u5f85\u90e8\u7f72 GPU Operator \u8282\u70b9\u7684\u5185\u6838\u7248\u672c\u3002

                                                                            uname -a\n

                                                                            \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                            Linux localhost.localdomain 3.10.0-1160.95.1.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux\n

                                                                            \u8f93\u51fa\u7ed3\u679c\u4e3a\u5f53\u524d\u8282\u70b9\u5185\u6838\u7248\u672c 3.10.0-1160.el7.x86_64\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#yum","title":"\u5236\u4f5c\u79bb\u7ebf yum \u6e90","text":"

                                                                          \u5728\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\u4e0a\u8fdb\u884c\u64cd\u4f5c\u3002

                                                                          1. \u5728\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\u4e0a\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a yum.sh \u7684\u811a\u672c\u6587\u4ef6\u3002

                                                                            vi yum.sh\n

                                                                            \u7136\u540e\u6309\u4e0b i \u952e\u8fdb\u5165\u63d2\u5165\u6a21\u5f0f\uff0c\u8f93\u5165\u4ee5\u4e0b\u5185\u5bb9\uff1a

                                                                            export TARGET_KERNEL_VERSION=$1\n\ncat >> run.sh << \\EOF\n#! /bin/bash\necho \"start install kernel repo\"\necho ${KERNEL_VERSION}\nmkdir centos-base\n\nif [ \"$OS\" -eq 7 ]; then\n    yum install --downloadonly --downloaddir=./centos-base perl\n    yum install --downloadonly --downloaddir=./centos-base elfutils-libelf.x86_64\n    yum install --downloadonly --downloaddir=./redhat-base elfutils-libelf-devel.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-headers-${KERNEL_VERSION}.el7.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-devel-${KERNEL_VERSION}.el7.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-${KERNEL_VERSION}.el7.x86_64\n    yum install  -y --downloadonly --downloaddir=./centos-base groff-base\nelif [ \"$OS\" -eq 8 ]; then\n    yum install --downloadonly --downloaddir=./centos-base perl\n    yum install --downloadonly --downloaddir=./centos-base elfutils-libelf.x86_64\n    yum install --downloadonly --downloaddir=./redhat-base elfutils-libelf-devel.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-headers-${KERNEL_VERSION}.el8.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-devel-${KERNEL_VERSION}.el8.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-${KERNEL_VERSION}.el8.x86_64\n    yum install  -y --downloadonly --downloaddir=./centos-base groff-base\nelse\n    echo \"Error os version\"\nfi\n\ncreaterepo centos-base/\nls -lh centos-base/\ntar -zcf centos-base.tar.gz centos-base/\necho \"end install kernel repo\"\nEOF\n\ncat >> Dockerfile << EOF\nFROM centos:7\nENV KERNEL_VERSION=\"\"\nENV OS=7\nRUN yum install -y createrepo\nCOPY run.sh .\nENTRYPOINT [\"/bin/bash\",\"run.sh\"]\nEOF\n\ndocker build -t test:v1 -f Dockerfile .\ndocker run -e KERNEL_VERSION=$TARGET_KERNEL_VERSION --name centos7.9 test:v1\ndocker cp centos7.9:/centos-base.tar.gz .\ntar -xzf centos-base.tar.gz\n

                                                                            \u6309\u4e0b esc \u952e\u9000\u51fa\u63d2\u5165\u6a21\u5f0f\uff0c\u7136\u540e\u8f93\u5165 __ :wq__ \u4fdd\u5b58\u5e76\u9000\u51fa\u3002

                                                                          2. \u8fd0\u884c yum.sh \u6587\u4ef6\uff1a

                                                                            bash -x yum.sh TARGET_KERNEL_VERSION\n

                                                                            TARGET_KERNEL_VERSION \u53c2\u6570\u7528\u4e8e\u6307\u5b9a\u96c6\u7fa4\u8282\u70b9\u7684\u5185\u6838\u7248\u672c\uff0c\u6ce8\u610f\uff1a\u53d1\u884c\u7248\u6807\u8bc6\u7b26\uff08\u5982 __ .el7.x86_64 __ \uff09\u65e0\u9700\u8f93\u5165\u3002 \u4f8b\u5982\uff1a

                                                                            bash -x yum.sh 3.10.0-1160.95.1\n

                                                                          \u81f3\u6b64\uff0c\u60a8\u5df2\u7ecf\u751f\u6210\u4e86\u5185\u6838\u4e3a 3.10.0-1160.95.1.el7.x86_64 \u7684\u79bb\u7ebf\u7684 yum \u6e90\uff1a centos-base \u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#yum_1","title":"\u4e0a\u4f20\u79bb\u7ebf yum \u6e90\u5230\u6587\u4ef6\u670d\u52a1\u5668","text":"

                                                                          \u5728\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\u4e0a\u8fdb\u884c\u64cd\u4f5c\u3002\u4e3b\u8981\u7528\u4e8e\u5c06\u4e0a\u4e00\u6b65\u4e2d\u751f\u6210\u7684 yum \u6e90\u4e0a\u4f20\u5230\u53ef\u4ee5\u88ab\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u8fdb\u884c\u8bbf\u95ee\u7684\u6587\u4ef6\u670d\u52a1\u5668\u4e2d\u3002 \u6587\u4ef6\u670d\u52a1\u5668\u53ef\u4ee5\u4e3a Nginx \u3001 Minio \u6216\u5176\u5b83\u652f\u6301 Http \u534f\u8bae\u7684\u6587\u4ef6\u670d\u52a1\u5668\u3002

                                                                          \u672c\u64cd\u4f5c\u793a\u4f8b\u91c7\u7528\u7684\u662f\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Minio \u4f5c\u4e3a\u6587\u4ef6\u670d\u52a1\u5668\uff0cMinio \u76f8\u5173\u4fe1\u606f\u5982\u4e0b\uff1a

                                                                          • \u8bbf\u95ee\u5730\u5740\uff1a http://10.5.14.200:9000\uff08\u4e00\u822c\u4e3a{\u706b\u79cd\u8282\u70b9 IP} + {9000 \u7aef\u53e3}\uff09
                                                                          • \u767b\u5f55\u7528\u6237\u540d\uff1arootuser
                                                                          • \u767b\u5f55\u5bc6\u7801\uff1arootpass123

                                                                          • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u5c06\u8282\u70b9\u672c\u5730 mc \u547d\u4ee4\u884c\u5de5\u5177\u548c minio \u670d\u52a1\u5668\u5efa\u7acb\u94fe\u63a5\u3002

                                                                            mc config host add minio http://10.5.14.200:9000 rootuser rootpass123\n

                                                                            \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                            Added `minio` successfully.\n

                                                                            mc \u547d\u4ee4\u884c\u5de5\u5177\u662f Minio \u6587\u4ef6\u670d\u52a1\u5668\u63d0\u4f9b\u7684\u5ba2\u6237\u7aef\u547d\u4ee4\u884c\u5de5\u5177\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\uff1a MinIO Client\u3002

                                                                          • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a centos-base \u7684\u5b58\u50a8\u6876\uff08bucket\uff09\u3002

                                                                            mc mb -p minio/centos-base\n

                                                                            \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                            Bucket created successfully __minio/centos-base__ .\n
                                                                          • \u5c06\u5b58\u50a8\u6876 centos-base \u7684\u8bbf\u95ee\u7b56\u7565\u8bbe\u7f6e\u4e3a\u5141\u8bb8\u516c\u5f00\u4e0b\u8f7d\u3002\u4ee5\u4fbf\u5728\u540e\u671f\u5b89\u88c5 GPU-operator \u65f6\u80fd\u591f\u88ab\u8bbf\u95ee\u3002

                                                                            mc anonymous set download minio/centos-base\n

                                                                            \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                            Access permission for `minio/centos-base` is set to `download` \n
                                                                          • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u5c06\u6b65\u9aa4\u4e8c\u751f\u6210\u7684\u79bb\u7ebf yum \u6e90\u6587\u4ef6 centos-base \u590d\u5236\u5230 minio \u670d\u52a1\u5668\u7684 minio/centos-base \u5b58\u50a8\u6876\u4e2d\u3002

                                                                            mc cp centos-base minio/centos-base --recursive\n
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#yum_2","title":"\u5728\u96c6\u7fa4\u521b\u5efa\u914d\u7f6e\u9879\u7528\u6765\u4fdd\u5b58 yum \u6e90\u4fe1\u606f","text":"

                                                                          \u5728\u5f85\u90e8\u7f72 GPU Operator \u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u4e0a\u8fdb\u884c\u64cd\u4f5c\u3002

                                                                          1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\u521b\u5efa\u540d\u4e3a CentOS-Base.repo \u7684\u6587\u4ef6\uff0c\u7528\u6765\u6307\u5b9a yum \u6e90\u5b58\u50a8\u7684\u914d\u7f6e\u4fe1\u606f\u3002

                                                                            # \u6587\u4ef6\u540d\u79f0\u5fc5\u987b\u4e3a CentOS-Base.repo\uff0c\u5426\u5219\u5b89\u88c5 gpu-operator \u65f6\u65e0\u6cd5\u88ab\u8bc6\u522b\ncat > CentOS-Base.repo << EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base #\u6b65\u9aa4\u4e09\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base #\u6b65\u9aa4\u4e09\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                                                          2. \u57fa\u4e8e\u521b\u5efa\u7684 CentOS-Base.repo \u6587\u4ef6\uff0c\u5728 gpu-operator \u547d\u540d\u7a7a\u95f4\u4e0b\uff0c\u521b\u5efa\u540d\u4e3a local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\uff1a

                                                                            kubectl create configmap local-repo-config  -n gpu-operator --from-file=CentOS-Base.repo=/etc/yum.repos.d/extension.repo\n

                                                                            \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                            configmap/local-repo-config created\n

                                                                            local-repo-config \u914d\u7f6e\u6587\u4ef6\u7528\u4e8e\u5728\u5b89\u88c5 gpu-operator \u65f6\uff0c\u63d0\u4f9b RepoConfig.ConfigMapName \u53c2\u6570\u7684\u503c\uff0c\u914d\u7f6e\u6587\u4ef6\u540d\u79f0\u7528\u6237\u53ef\u81ea\u5b9a\u4e49\u3002

                                                                          3. \u67e5\u770b local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\u7684\u5185\u5bb9\uff1a

                                                                            kubectl get configmap local-repo-config  -n gpu-operator -oyaml\n

                                                                            \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                            apiVersion: v1\ndata:\nCentOS-Base.repo: \"[extension-0]\\nbaseurl = http://10.6.232.5:32618/centos-base#\u6b65\u9aa4\u4e8c\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u8def\u5f84\\ngpgcheck = 0\\nname = kubean extension 0\\n  \\n[extension-1]\\nbaseurl\n    = http://10.6.232.5:32618/centos-base #\u6b65\u9aa4\u4e8c\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u8def\u5f84\\ngpgcheck = 0\\nname\n    = kubean extension 1\\n\"\nkind: ConfigMap\nmetadata:\ncreationTimestamp: \"2023-10-18T01:59:02Z\"\nname: local-repo-config\nnamespace: gpu-operator\nresourceVersion: \"59445080\"\nuid: c5f0ebab-046f-442c-b932-f9003e014387\n

                                                                          \u81f3\u6b64\uff0c\u60a8\u5df2\u6210\u529f\u4e3a\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u521b\u5efa\u4e86\u79bb\u7ebf yum \u6e90\u914d\u7f6e\u6587\u4ef6\u3002 \u901a\u8fc7\u5728\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u65f6\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html","title":"\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90","text":"

                                                                          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9884\u7f6e\u4e86 CentOS 7.9\uff0c\u5185\u6838\u4e3a 3.10.0-1160 \u7684 GPU operator \u79bb\u7ebf\u5305\u3002\u5176\u5b83 OS \u7c7b\u578b\u7684\u8282\u70b9\u6216\u5185\u6838\u9700\u8981\u7528\u6237\u624b\u52a8\u6784\u5efa\u79bb\u7ebf yum \u6e90\u3002

                                                                          \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u57fa\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4efb\u610f\u8282\u70b9\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90\u5305\uff0c\u5e76\u5728\u5b89\u88c5 Gpu Operator \u65f6\uff0c\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          1. \u7528\u6237\u5df2\u7ecf\u5728\u5e73\u53f0\u4e0a\u5b89\u88c5\u4e86 v0.12.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u7684 addon \u79bb\u7ebf\u5305\u3002
                                                                          2. \u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u8282\u70b9 OS \u5fc5\u987b\u4e3a Red Hat 8.4\uff0c\u4e14\u5185\u6838\u7248\u672c\u5b8c\u5168\u4e00\u81f4\u3002
                                                                          3. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u548c\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u7f51\u7edc\u80fd\u591f\u8054\u901a\u7684\u6587\u4ef6\u670d\u52a1\u5668\uff0c\u5982 nginx \u6216 minio\u3002
                                                                          4. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u3001\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\uff0c\u4e14\u8282\u70b9\u4e0a\u5df2\u7ecf\u5b8c\u6210 Docker \u7684\u5b89\u88c5\u3002
                                                                          5. \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u8282\u70b9\u5fc5\u987b\u4e3a Red Hat 8.4 4.18.0-305.el8.x86_64\u3002
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                                          \u672c\u6587\u4ee5 Red Hat 8.4 4.18.0-305.el8.x86_64 \u8282\u70b9\u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u57fa\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4efb\u610f\u8282\u70b9\u6784\u5efa Red Hat 8.4 \u79bb\u7ebf yum \u6e90\u5305\uff0c \u5e76\u5728\u5b89\u88c5 Gpu Operator \u65f6\uff0c\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#yum","title":"\u4e0b\u8f7d\u706b\u79cd\u8282\u70b9\u4e2d\u7684 yum \u6e90","text":"

                                                                          \u4ee5\u4e0b\u64cd\u4f5c\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684 master \u8282\u70b9\u4e0a\u6267\u884c\u3002

                                                                          1. \u4f7f\u7528 ssh \u6216\u5176\u5b83\u65b9\u5f0f\u8fdb\u5165\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5185\u4efb\u4e00\u8282\u70b9\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1a

                                                                            cat /etc/yum.repos.d/extension.repo #\u67e5\u770b extension.repo \u4e2d\u7684\u5185\u5bb9\n

                                                                            \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                            [extension-0]\nbaseurl = http://10.5.14.200:9000/kubean/redhat/$releasever/os/$basearch\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/kubean/redhat-iso/$releasever/os/$basearch/AppStream\ngpgcheck = 0\nname = kubean extension 1\n\n[extension-2]\nbaseurl = http://10.5.14.200:9000/kubean/redhat-iso/$releasever/os/$basearch/BaseOS\ngpgcheck = 0\nname = kubean extension 2\n
                                                                          2. \u5728 root \u8def\u5f84\u4e0b\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a redhat-base-repo \u7684\u6587\u4ef6\u5939

                                                                            mkdir redhat-base-repo\n
                                                                          3. \u4e0b\u8f7d yum \u6e90\u4e2d\u7684 rpm \u5305\u5230\u672c\u5730\uff1a

                                                                            yum install yum-utils\n
                                                                          4. \u4e0b\u8f7d extension-1 \u4e2d\u7684 rpm \u5305\uff1a

                                                                            reposync  -p redhat-base-repo  -n --repoid=extension-1\n
                                                                          5. \u4e0b\u8f7d extension-2 \u4e2d\u7684 rpm \u5305\uff1a

                                                                            reposync  -p redhat-base-repo  -n --repoid=extension-2\n
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#elfutils-libelf-devel-0187-4el8x86_64rpm","title":"\u4e0b\u8f7d elfutils-libelf-devel-0.187-4.el8.x86_64.rpm \u5305","text":"

                                                                          \u4ee5\u4e0b\u64cd\u4f5c\u5728\u8054\u7f51\u8282\u70b9\u6267\u884c\u64cd\u4f5c\uff0c\u5728\u64cd\u4f5c\u524d\uff0c\u60a8\u9700\u8981\u4fdd\u8bc1\u8054\u7f51\u8282\u70b9\u548c\u5168\u5c40\u670d\u52a1\u96c6\u7fa4 master \u8282\u70b9\u95f4\u7684\u7f51\u7edc\u8054\u901a\u6027\u3002

                                                                          1. \u5728\u8054\u7f51\u8282\u70b9\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u4e0b\u8f7d elfutils-libelf-devel-0.187-4.el8.x86_64.rpm \u5305\uff1a

                                                                            wget https://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/elfutils-libelf-devel-0.187-4.el8.x86_64.rpm\n
                                                                          2. \u5728\u5f53\u524d\u76ee\u5f55\u4e0b\u5c06 elfutils-libelf-devel-0.187-4.el8.x86_64.rpm \u5305\u4f20\u8f93\u81f3\u6b65\u9aa4\u4e00\u4e2d\u7684\u8282\u70b9\u4e0a\uff1a

                                                                            scp  elfutils-libelf-devel-0.187-4.el8.x86_64.rpm user@ip:~/redhat-base-repo/extension-2/Packages/\n

                                                                            \u4f8b\u5982\uff1a

                                                                            scp  elfutils-libelf-devel-0.187-4.el8.x86_64.rpm root@10.6.175.10:~/redhat-base-repo/extension-2/Packages/\n
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#yum-repo","title":"\u751f\u6210\u672c\u5730 yum repo","text":"

                                                                          \u4ee5\u4e0b\u64cd\u4f5c\u5728\u6b65\u9aa4\u4e00\u4e2d\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684 master \u8282\u70b9\u4e0a\u6267\u884c\u3002

                                                                          1. \u8fdb\u5165 yum repo \u76ee\u5f55\uff1a

                                                                            cd ~/redhat-base-repo/extension-1/Packages\ncd ~/redhat-base-repo/extension-2/Packages\n
                                                                          2. \u751f\u6210\u76ee\u5f55 repo \u7d22\u5f15\uff1a

                                                                            yum install createrepo -y  # \u82e5\u5df2\u5b89\u88c5 createrepo \u53ef\u7701\u7565\u6b64\u6b65\u9aa4\ncreaterepo_c ./\n

                                                                          \u81f3\u6b64\uff0c\u60a8\u5df2\u7ecf\u751f\u6210\u4e86\u5185\u6838\u4e3a 4.18.0-305.el8.x86_64 \u7684\u79bb\u7ebf\u7684 yum \u6e90\uff1a redhat-base-repo \u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#yum-repo_1","title":"\u5c06\u672c\u5730\u751f\u6210\u7684 yum repo \u4e0a\u4f20\u81f3\u6587\u4ef6\u670d\u52a1\u5668","text":"

                                                                          \u672c\u64cd\u4f5c\u793a\u4f8b\u91c7\u7528\u7684\u662f\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u706b\u79cd\u8282\u70b9\u5185\u7f6e\u7684 Minio \u4f5c\u4e3a\u6587\u4ef6\u670d\u52a1\u5668\uff0c\u7528\u6237\u53ef\u57fa\u4e8e\u81ea\u8eab\u60c5\u51b5\u9009\u62e9\u6587\u4ef6\u670d\u52a1\u5668\u3002Minio \u76f8\u5173\u4fe1\u606f\u5982\u4e0b\uff1a

                                                                          • \u8bbf\u95ee\u5730\u5740\uff1a http://10.5.14.200:9000\uff08\u4e00\u822c\u4e3a{\u706b\u79cd\u8282\u70b9 IP} + {9000 \u7aef\u53e3}\uff09
                                                                          • \u767b\u5f55\u7528\u6237\u540d\uff1arootuser
                                                                          • \u767b\u5f55\u5bc6\u7801\uff1arootpass123

                                                                          • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u5c06\u8282\u70b9\u672c\u5730 mc \u547d\u4ee4\u884c\u5de5\u5177\u548c minio \u670d\u52a1\u5668\u5efa\u7acb\u94fe\u63a5\u3002

                                                                            mc config host add minio \u6587\u4ef6\u670d\u52a1\u5668\u8bbf\u95ee\u5730\u5740 \u7528\u6237\u540d \u5bc6\u7801\n

                                                                            \u4f8b\u5982\uff1a

                                                                            mc config host add minio http://10.5.14.200:9000 rootuser rootpass123\n

                                                                            \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                            Added `minio` successfully.\n

                                                                            mc \u547d\u4ee4\u884c\u5de5\u5177\u662f Minio \u6587\u4ef6\u670d\u52a1\u5668\u63d0\u4f9b\u7684\u5ba2\u6237\u7aef\u547d\u4ee4\u884c\u5de5\u5177\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\uff1a MinIO Client\u3002

                                                                          • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u65b0\u5efa\u4e00\u4e2a\u540d\u4e3a redhat-base \u7684\u5b58\u50a8\u6876(bucket)\u3002

                                                                            mc mb -p minio/redhat-base\n

                                                                            \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                            Bucket created successfully `minio/redhat-base`.\n
                                                                          • \u5c06\u5b58\u50a8\u6876 redhat-base \u7684\u8bbf\u95ee\u7b56\u7565\u8bbe\u7f6e\u4e3a\u5141\u8bb8\u516c\u5f00\u4e0b\u8f7d\u3002\u4ee5\u4fbf\u5728\u540e\u671f\u5b89\u88c5 GPU-operator \u65f6\u80fd\u591f\u88ab\u8bbf\u95ee\u3002

                                                                            mc anonymous set download minio/redhat-base\n

                                                                            \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                            Access permission for `minio/redhat-base` is set to `download` \n
                                                                          • \u5728\u8282\u70b9\u5f53\u524d\u8def\u5f84\u4e0b\uff0c\u5c06\u6b65\u9aa4\u4e8c\u751f\u6210\u7684\u79bb\u7ebf yum \u6e90\u6587\u4ef6 redhat-base-repo \u590d\u5236\u5230 minio \u670d\u52a1\u5668\u7684 minio/redhat-base \u5b58\u50a8\u6876\u4e2d\u3002

                                                                            mc cp redhat-base-repo minio/redhat-base --recursive\n
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#yum_1","title":"\u5728\u96c6\u7fa4\u521b\u5efa\u914d\u7f6e\u9879\u7528\u6765\u4fdd\u5b58 yum \u6e90\u4fe1\u606f","text":"

                                                                          \u672c\u6b65\u9aa4\u5728\u5f85\u90e8\u7f72 GPU Operator \u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u4e0a\u8fdb\u884c\u64cd\u4f5c\u3002

                                                                          1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\u521b\u5efa\u540d\u4e3a redhat.repo \u7684\u6587\u4ef6\uff0c\u7528\u6765\u6307\u5b9a yum \u6e90\u5b58\u50a8\u7684\u914d\u7f6e\u4fe1\u606f\u3002

                                                                            # \u6587\u4ef6\u540d\u79f0\u5fc5\u987b\u4e3a redhat.repo\uff0c\u5426\u5219\u5b89\u88c5 gpu-operator \u65f6\u65e0\u6cd5\u88ab\u8bc6\u522b\ncat > redhat.repo << EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/redhat-base/redhat-base-repo/Packages #\u6b65\u9aa4\u4e00\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/redhat-base/redhat-base-repo/Packages #\u6b65\u9aa4\u4e00\u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                                                          2. \u57fa\u4e8e\u521b\u5efa\u7684 redhat.repo \u6587\u4ef6\uff0c\u5728 gpu-operator \u547d\u540d\u7a7a\u95f4\u4e0b\uff0c\u521b\u5efa\u540d\u4e3a local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\uff1a

                                                                            kubectl create configmap local-repo-config  -n gpu-operator --from-file=./redhat.repo \n

                                                                            \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                            configmap/local-repo-config created\n

                                                                            local-repo-config \u914d\u7f6e\u6587\u4ef6\u7528\u4e8e\u5728\u5b89\u88c5 gpu-operator \u65f6\uff0c\u63d0\u4f9b RepoConfig.ConfigMapName \u53c2\u6570\u7684\u503c\uff0c\u914d\u7f6e\u6587\u4ef6\u540d\u79f0\u7528\u6237\u53ef\u81ea\u5b9a\u4e49\u3002

                                                                          3. \u67e5\u770b local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\u7684\u5185\u5bb9\uff1a

                                                                            kubectl get configmap local-repo-config  -n gpu-operator -oyaml\n

                                                                          \u81f3\u6b64\uff0c\u60a8\u5df2\u6210\u529f\u4e3a\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u521b\u5efa\u4e86\u79bb\u7ebf yum \u6e90\u914d\u7f6e\u6587\u4ef6\u3002 \u901a\u8fc7\u5728\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u65f6\u901a\u8fc7 RepoConfig.ConfigMapName \u53c2\u6570\u6765\u4f7f\u7528\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html","title":"\u6784\u5efa Red Hat 7.9 \u79bb\u7ebf yum \u6e90","text":""},{"location":"end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#_1","title":"\u4f7f\u7528\u573a\u666f\u4ecb\u7ecd","text":"

                                                                          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u9884\u7f6e\u4e86 CentOS 7.9\uff0c\u5185\u6838\u4e3a 3.10.0-1160 \u7684 GPU Operator \u79bb\u7ebf\u5305\u3002\u5176\u5b83 OS \u7c7b\u578b\u7684\u8282\u70b9\u6216\u5185\u6838\u9700\u8981\u7528\u6237\u624b\u52a8\u6784\u5efa\u79bb\u7ebf yum \u6e90\u3002

                                                                          \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u57fa\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4efb\u610f\u8282\u70b9\u6784\u5efa Red Hat 7.9 \u79bb\u7ebf yum \u6e90\u5305\uff0c\u5e76\u5728\u5b89\u88c5 Gpu Operator \u65f6\u4f7f\u7528 RepoConfig.ConfigMapName \u53c2\u6570\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          1. \u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u8282\u70b9 OS \u5fc5\u987b\u4e3a Red Hat 7.9\uff0c\u4e14\u5185\u6838\u7248\u672c\u5b8c\u5168\u4e00\u81f4
                                                                          2. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u4e0e\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u7f51\u7edc\u8054\u901a\u7684\u6587\u4ef6\u670d\u52a1\u5668\uff0c\u5982 nginx \u6216 minio
                                                                          3. \u51c6\u5907\u4e00\u4e2a\u80fd\u591f\u8bbf\u95ee\u4e92\u8054\u7f51\u3001\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u548c\u6587\u4ef6\u670d\u52a1\u5668\u7684\u8282\u70b9\uff0c \u4e14\u8282\u70b9\u4e0a\u5df2\u7ecf\u5b8c\u6210 Docker \u7684\u5b89\u88c5
                                                                          4. \u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u8282\u70b9\u5fc5\u987b\u4e3a Red Hat 7.9
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#1-yum","title":"1. \u6784\u5efa\u76f8\u5173\u5185\u6838\u7248\u672c\u7684\u79bb\u7ebf Yum \u6e90","text":"
                                                                          1. \u4e0b\u8f7d rhel7.9 ISO

                                                                          2. \u4e0b\u8f7d\u4e0e Kubean \u7248\u672c\u5bf9\u5e94\u7684\u7684 rhel7.9 ospackage

                                                                            \u5728 \u5bb9\u5668\u7ba1\u7406 \u7684\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e2d\u627e\u5230 Helm \u5e94\u7528 \uff0c\u641c\u7d22 kubean\uff0c\u53ef\u67e5\u770b kubean \u7684\u7248\u672c\u53f7\u3002

                                                                            \u5728 kubean\u7684\u4ee3\u7801\u4ed3\u5e93 \u4e2d\u4e0b\u8f7d\u8be5\u7248\u672c\u7684 rhel7.9 ospackage\u3002

                                                                          3. \u901a\u8fc7\u5b89\u88c5\u5668\u5bfc\u5165\u79bb\u7ebf\u8d44\u6e90

                                                                            \u53c2\u8003\u5bfc\u5165\u79bb\u7ebf\u8d44\u6e90\u6587\u6863\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#2-red-hat-79-os","title":"2. \u4e0b\u8f7d Red Hat 7.9 OS \u7684\u79bb\u7ebf\u9a71\u52a8\u955c\u50cf","text":"

                                                                          \u70b9\u51fb\u67e5\u770b\u4e0b\u8f7d\u5730\u5740\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#3-red-hat-gpu-opreator","title":"3. \u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20 Red Hat GPU Opreator \u79bb\u7ebf\u955c\u50cf","text":"

                                                                          \u53c2\u8003\u5411\u706b\u79cd\u8282\u70b9\u4ed3\u5e93\u4e0a\u4f20 Red Hat GPU Opreator \u79bb\u7ebf\u955c\u50cf\u3002

                                                                          Note

                                                                          \u6b64\u53c2\u8003\u4ee5 rhel8.4 \u4e3a\u4f8b\uff0c\u8bf7\u6ce8\u610f\u4fee\u6539\u6210 rhel7.9\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#4-yum","title":"4. \u5728\u96c6\u7fa4\u521b\u5efa\u914d\u7f6e\u9879\u7528\u6765\u4fdd\u5b58 Yum \u6e90\u4fe1\u606f","text":"

                                                                          \u5728\u5f85\u90e8\u7f72 GPU Operator \u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u4e0a\u8fd0\u884c\u4ee5\u4e0b\u547d\u4ee4\u3002

                                                                          1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\u521b\u5efa\u540d\u4e3a CentOS-Base.repo \u7684\u6587\u4ef6\uff0c\u7528\u6765\u6307\u5b9a yum \u6e90\u5b58\u50a8\u7684\u914d\u7f6e\u4fe1\u606f\u3002

                                                                            # \u6587\u4ef6\u540d\u79f0\u5fc5\u987b\u4e3a CentOS-Base.repo\uff0c\u5426\u5219\u5b89\u88c5 gpu-operator \u65f6\u65e0\u6cd5\u88ab\u8bc6\u522b\ncat > CentOS-Base.repo <<  EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # \u706b\u79cd\u8282\u70b9\u7684\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\uff0c\u4e00\u822c\u4e3a{\u706b\u79cd\u8282\u70b9 IP} + {9000 \u7aef\u53e3}\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # \u706b\u79cd\u8282\u70b9\u7684\u7684\u6587\u4ef6\u670d\u52a1\u5668\u5730\u5740\uff0c\u4e00\u822c\u4e3a{\u706b\u79cd\u8282\u70b9 IP} + {9000 \u7aef\u53e3}\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                                                          2. \u57fa\u4e8e\u521b\u5efa\u7684 CentOS-Base.repo \u6587\u4ef6\uff0c\u5728 gpu-operator \u547d\u540d\u7a7a\u95f4\u4e0b\uff0c\u521b\u5efa\u540d\u4e3a local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\uff1a

                                                                            kubectl create configmap local-repo-config -n gpu-operator --from-file=CentOS-Base.repo=/etc/yum.repos.d/extension.repo\n

                                                                            \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                            configmap/local-repo-config created\n

                                                                            local-repo-config \u914d\u7f6e\u6587\u4ef6\u7528\u4e8e\u5728\u5b89\u88c5 gpu-operator \u65f6\uff0c\u63d0\u4f9b RepoConfig.ConfigMapName \u53c2\u6570\u7684\u503c\uff0c\u914d\u7f6e\u6587\u4ef6\u540d\u79f0\u7528\u6237\u53ef\u81ea\u5b9a\u4e49\u3002

                                                                          3. \u67e5\u770b local-repo-config \u7684\u914d\u7f6e\u6587\u4ef6\u7684\u5185\u5bb9\uff1a

                                                                            kubectl get configmap local-repo-config -n gpu-operator -oyaml\n

                                                                            \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a

                                                                            local-repo-config.yaml
                                                                            apiVersion: v1\ndata:\n  CentOS-Base.repo: \"[extension-0]\\nbaseurl = http://10.6.232.5:32618/centos-base # \u6b65\u9aa4 2 \u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u8def\u5f84 \\ngpgcheck = 0\\nname = kubean extension 0\\n  \\n[extension-1]\\nbaseurl\n  = http://10.6.232.5:32618/centos-base # \u6b65\u9aa4 2 \u4e2d\uff0c\u653e\u7f6e yum \u6e90\u7684\u6587\u4ef6\u670d\u52a1\u5668\u8def\u5f84 \\ngpgcheck = 0\\nname\n  = kubean extension 1\\n\"\nkind: ConfigMap\nmetadata:\n  creationTimestamp: \"2023-10-18T01:59:02Z\"\n  name: local-repo-config\n  namespace: gpu-operator\n  resourceVersion: \"59445080\"\n  uid: c5f0ebab-046f-442c-b932-f9003e014387\n

                                                                          \u81f3\u6b64\uff0c\u60a8\u5df2\u6210\u529f\u4e3a\u5f85\u90e8\u7f72 GPU Operator \u7684\u96c6\u7fa4\u521b\u5efa\u4e86\u79bb\u7ebf yum \u6e90\u914d\u7f6e\u6587\u4ef6\u3002 \u5176\u4e2d\u5728\u79bb\u7ebf\u5b89\u88c5 GPU Operator \u65f6\u4f7f\u7528\u4e86 RepoConfig.ConfigMapName \u53c2\u6570\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html","title":"GPU \u544a\u8b66\u89c4\u5219","text":"

                                                                          \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u8bbe\u7f6e GPU \u76f8\u5173\u7684\u544a\u8b66\u89c4\u5219\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                                          • \u96c6\u7fa4\u8282\u70b9\u4e0a\u5df2\u6b63\u786e\u5b89\u88c5 GPU \u8bbe\u5907
                                                                          • \u96c6\u7fa4\u4e2d\u5df2\u6b63\u786e\u5b89\u88c5 gpu-operator \u7ec4\u4ef6
                                                                          • \u5982\u679c\u7528\u5230\u4e86 vGPU \u8fd8\u9700\u8981\u5728\u96c6\u7fa4\u4e2d\u5b89\u88c5 Nvidia-vgpu \u7ec4\u4ef6\uff0c\u5e76\u4e14\u5f00\u542f servicemonitor
                                                                          • \u96c6\u7fa4\u6b63\u786e\u5b89\u88c5\u4e86 insight-agent \u7ec4\u4ef6
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#gpu_1","title":"\u544a\u8b66\u5e38\u7528 GPU \u6307\u6807","text":"

                                                                          \u672c\u8282\u4ecb\u7ecd GPU \u544a\u8b66\u5e38\u7528\u7684\u6307\u6807\uff0c\u5206\u4e3a\u4e24\u4e2a\u90e8\u5206\uff1a

                                                                          • GPU \u5361\u7eac\u5ea6\u7684\u6307\u6807\uff0c\u4e3b\u8981\u53cd\u5e94\u5355\u4e2a GPU \u8bbe\u5907\u7684\u8fd0\u884c\u72b6\u6001\u3002
                                                                          • \u5e94\u7528\u7eac\u5ea6\u7684\u6307\u6807\uff0c\u4e3b\u8981\u53cd\u5e94 Pod \u5728 GPU \u4e0a\u7684\u8fd0\u884c\u72b6\u6001\u3002
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#gpu_2","title":"GPU \u5361\u6307\u6807","text":"\u6307\u6807\u540d\u79f0 \u6307\u6807\u5355\u4f4d \u8bf4\u660e DCGM_FI_DEV_GPU_UTIL % GPU \u5229\u7528\u7387 DCGM_FI_DEV_MEM_COPY_UTIL % \u663e\u5b58\u5229\u7528\u7387 DCGM_FI_DEV_ENC_UTIL % \u7f16\u7801\u5668\u5229\u7528\u7387 DCGM_FI_DEV_DEC_UTIL % \u89e3\u7801\u5668\u5229\u7528\u7387 DCGM_FI_DEV_FB_FREE MB \u8868\u793a\u663e\u5b58\u5269\u4f59\u91cf DCGM_FI_DEV_FB_USED MB \u8868\u793a\u663e\u5b58\u4f7f\u7528\u91cf DCGM_FI_DEV_GPU_TEMP \u6444\u6c0f\u5ea6 \u8868\u793a\u5f53\u524d GPU \u7684\u6e29\u5ea6\u5ea6\u6570 DCGM_FI_DEV_POWER_USAGE W \u8bbe\u5907\u7535\u6e90\u4f7f\u7528\u60c5\u51b5 DCGM_FI_DEV_XID_ERRORS - \u8868\u793a\u4e00\u6bb5\u65f6\u95f4\u5185\uff0c\u6700\u540e\u53d1\u751f\u7684 XID \u9519\u8bef\u53f7\u3002XID \u63d0\u4f9b GPU \u786c\u4ef6\u3001NVIDIA \u8f6f\u4ef6\u6216\u5e94\u7528\u4e2d\u7684\u9519\u8bef\u7c7b\u578b\u3001\u9519\u8bef\u4f4d\u7f6e\u3001\u9519\u8bef\u4ee3\u7801\u7b49\u4fe1\u606f\uff0c\u66f4\u591a XID \u4fe1\u606f"},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#_2","title":"\u5e94\u7528\u7ef4\u5ea6\u7684\u6307\u6807","text":"\u6307\u6807\u540d\u79f0 \u6307\u6807\u5355\u4f4d \u8bf4\u660e kpanda_gpu_pod_utilization % \u8868\u793a Pod \u5bf9 GPU \u7684\u4f7f\u7528\u7387 kpanda_gpu_mem_pod_usage MB \u8868\u793a Pod \u5bf9 GPU \u663e\u5b58\u7684\u4f7f\u7528\u91cf kpanda_gpu_mem_pod_utilization % \u8868\u793a Pod \u5bf9 GPU \u663e\u5b58\u7684\u4f7f\u7528\u7387"},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-alarm.html#_3","title":"\u8bbe\u7f6e\u544a\u8b66\u89c4\u5219","text":"

                                                                          \u8fd9\u91cc\u4f1a\u4ecb\u7ecd\u5982\u4f55\u8bbe\u7f6e GPU \u544a\u8b66\u89c4\u5219\uff0c\u4f7f\u7528 GPU \u5361\u5229\u7528\u7387\u6307\u6807\u4f5c\u4e3a\u6848\u4f8b\uff0c\u8bf7\u7528\u6237\u6839\u636e\u5b9e\u9645\u7684\u4e1a\u52a1\u573a\u666f\u9009\u62e9\u6307\u6807\u4ee5\u53ca\u7f16\u5199 promql\u3002

                                                                          \u76ee\u6807\uff1a\u5f53GPU\u5361\u5229\u7528\u7387\u5728\u4e94\u79d2\u949f\u5185\u4e00\u76f4\u4fdd\u6301 80% \u7684\u5229\u7528\u7387\u65f6\u53d1\u51fa\u544a\u8b66

                                                                          1. \u5728\u53ef\u89c2\u6d4b\u9875\u9762\uff0c\u70b9\u51fb \u544a\u8b66 -> \u544a\u8b66\u7b56\u7565 -> \u521b\u5efa\u544a\u8b66\u7b56\u7565

                                                                          2. \u586b\u5199\u57fa\u672c\u4fe1\u606f

                                                                          3. \u6dfb\u52a0\u89c4\u5219

                                                                          4. \u9009\u62e9\u901a\u77e5\u65b9\u5f0f

                                                                          5. \u8bbe\u7f6e\u5b8c\u6210\u540e\uff0c\u5f53\u4e00\u4e2a GPU \u5728 5s \u5185\u4e00\u76f4\u4fdd\u6301 80% \u7684\u5229\u7528\u7387\uff0c\u4f1a\u6536\u5230\u5982\u4e0b\u7684\u544a\u8b66\u4fe1\u606f\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-metrics.html","title":"GPU \u76d1\u63a7\u6307\u6807","text":"

                                                                          \u672c\u9875\u5217\u51fa\u4e00\u4e9b\u5e38\u7528\u7684 GPU \u76d1\u63a7\u6307\u6807\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-metrics.html#_1","title":"\u96c6\u7fa4\u7ef4\u5ea6","text":"\u6307\u6807\u540d\u79f0 \u63cf\u8ff0 GPU \u5361\u6570 \u96c6\u7fa4\u4e0b\u6240\u6709\u7684 GPU \u5361\u6570\u91cf GPU \u5e73\u5747\u4f7f\u7528\u7387 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u5e73\u5747\u7b97\u529b\u4f7f\u7528\u7387 GPU \u5e73\u5747\u663e\u5b58\u4f7f\u7528\u7387 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u5e73\u5747\u663e\u5b58\u4f7f\u7528\u7387 GPU \u5361\u529f\u7387 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u529f\u7387 GPU \u5361\u6e29\u5ea6 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u6e29\u5ea6 GPU \u7b97\u529b\u4f7f\u7528\u7387\u7ec6\u8282 24 \u5c0f\u65f6\u5185\uff0c\u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u4f7f\u7528\u7387\u7ec6\u8282\uff08\u5305\u542b max\u3001avg\u3001current\uff09 GPU \u663e\u5b58\u4f7f\u7528\u91cf\u7ec6\u8282 24 \u5c0f\u65f6\u5185\uff0c\u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u663e\u5b58\u4f7f\u7528\u91cf\u7ec6\u8282\uff08\u5305\u542b min\u3001max\u3001avg\u3001current\uff09 GPU \u663e\u5b58\u5e26\u5bbd\u4f7f\u7528\u7387 \u8868\u793a\u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387\u3002\u4ee5 Nvidia GPU V100 \u4e3a\u4f8b\uff0c\u5176\u6700\u5927\u5185\u5b58\u5e26\u5bbd\u4e3a 900 GB/sec\uff0c\u5982\u679c\u5f53\u524d\u7684\u5185\u5b58\u5e26\u5bbd\u4e3a 450 GB/sec\uff0c\u5219\u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387\u4e3a 50%"},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-metrics.html#_2","title":"\u8282\u70b9\u7ef4\u5ea6","text":"\u6307\u6807\u540d\u79f0 \u63cf\u8ff0 GPU \u6a21\u5f0f \u8282\u70b9\u4e0a GPU \u5361\u7684\u4f7f\u7528\u6a21\u5f0f\uff0c\u5305\u542b\u6574\u5361\u6a21\u5f0f\u3001MIG \u6a21\u5f0f\u3001vGPU \u6a21\u5f0f GPU \u7269\u7406\u5361\u6570 \u8282\u70b9\u4e0a\u6240\u6709\u7684 GPU \u5361\u6570\u91cf GPU \u865a\u62df\u5361\u6570 \u8282\u70b9\u4e0a\u5df2\u7ecf\u88ab\u521b\u5efa\u51fa\u6765\u7684 vGPU \u8bbe\u5907\u6570\u91cf GPU MIG \u5b9e\u4f8b\u6570 \u8282\u70b9\u4e0a\u5df2\u7ecf\u88ab\u521b\u5efa\u51fa\u6765\u7684 MIG \u5b9e\u4f8b\u6570 GPU \u663e\u5b58\u5206\u914d\u7387 \u8282\u70b9\u4e0a\u6240\u6709 GPU \u5361\u7684\u663e\u5b58\u5206\u914d\u7387 GPU \u7b97\u529b\u5e73\u5747\u4f7f\u7528\u7387 \u8282\u70b9\u4e0a\u6240\u6709 GPU \u5361\u7684\u7b97\u529b\u5e73\u5747\u4f7f\u7528\u7387 GPU \u663e\u5b58\u5e73\u5747\u4f7f\u7528\u7387 \u8282\u70b9\u4e0a\u6240\u6709 GPU \u5361\u7684\u5e73\u5747\u663e\u5b58\u4f7f\u7528\u7387 GPU \u9a71\u52a8\u7248\u672c \u8282\u70b9\u4e0a GPU \u5361\u9a71\u52a8\u7684\u7248\u672c\u4fe1\u606f GPU \u7b97\u529b\u4f7f\u7528\u7387\u7ec6\u8282 24 \u5c0f\u65f6\u5185\uff0c\u8282\u70b9\u4e0a\u6bcf\u5f20 GPU \u5361\u7684\u7b97\u529b\u4f7f\u7528\u7387\u7ec6\u8282\uff08\u5305\u542b max\u3001avg\u3001current\uff09 GPU \u663e\u5b58\u4f7f\u7528\u91cf 24 \u5c0f\u65f6\u5185\uff0c\u8282\u70b9\u4e0a\u6bcf\u5f20 GPU \u5361\u7684\u663e\u5b58\u4f7f\u7528\u91cf\u7ec6\u8282\uff08\u5305\u542b min\u3001max\u3001avg\u3001current\uff09

                                                                          \u6839\u636e XID \u72b6\u6001\u6392\u67e5 GPU \u76f8\u5173\u95ee\u9898

                                                                          XID \u6d88\u606f\u662f NVIDIA \u9a71\u52a8\u7a0b\u5e8f\u5411\u64cd\u4f5c\u7cfb\u7edf\u7684\u5185\u6838\u65e5\u5fd7\u6216\u4e8b\u4ef6\u65e5\u5fd7\u6253\u5370\u7684\u9519\u8bef\u62a5\u544a\u3002XID \u6d88\u606f\u7528\u4e8e\u6807\u8bc6 GPU \u9519\u8bef\u4e8b\u4ef6\uff0c \u63d0\u4f9b GPU \u786c\u4ef6\u3001NVIDIA \u8f6f\u4ef6\u6216\u5e94\u7528\u4e2d\u7684\u9519\u8bef\u7c7b\u578b\u3001\u9519\u8bef\u4f4d\u7f6e\u3001\u9519\u8bef\u4ee3\u7801\u7b49\u4fe1\u606f\u3002 \u5982\u68c0\u67e5\u9879 GPU \u8282\u70b9\u4e0a\u7684 XID \u5f02\u5e38\u4e3a\u7a7a\uff0c\u8868\u660e\u65e0 XID \u6d88\u606f\uff1b\u5982\u6709\uff0c\u60a8\u53ef\u6309\u7167\u4e0b\u8868\u81ea\u52a9\u6392\u67e5\u5e76\u89e3\u51b3\u95ee\u9898\uff0c \u6216\u67e5\u770b\u66f4\u591a XID \u6d88\u606f\u3002

                                                                          XID \u6d88\u606f \u8bf4\u660e 13 Graphics Engine Exception. \u901a\u5e38\u662f\u6570\u7ec4\u8d8a\u754c\u3001\u6307\u4ee4\u9519\u8bef\uff0c\u5c0f\u6982\u7387\u662f\u786c\u4ef6\u95ee\u9898\u3002 31 GPU memory page fault. \u901a\u5e38\u662f\u5e94\u7528\u7a0b\u5e8f\u7684\u975e\u6cd5\u5730\u5740\u8bbf\u95ee\uff0c\u6781\u5c0f\u6982\u7387\u662f\u9a71\u52a8\u6216\u8005\u786c\u4ef6\u95ee\u9898\u3002 32 Invalid or corrupted push buffer stream. \u4e8b\u4ef6\u7531 PCIE \u603b\u7ebf\u4e0a\u7ba1\u7406 NVIDIA \u9a71\u52a8\u548c GPU \u4e4b\u95f4\u901a\u4fe1\u7684 DMA \u63a7\u5236\u5668\u4e0a\u62a5\uff0c\u901a\u5e38\u662f PCI \u8d28\u91cf\u95ee\u9898\u5bfc\u81f4\uff0c\u800c\u975e\u60a8\u7684\u7a0b\u5e8f\u4ea7\u751f\u3002 38 Driver firmware error. \u901a\u5e38\u662f\u9a71\u52a8\u56fa\u4ef6\u9519\u8bef\u800c\u975e\u786c\u4ef6\u95ee\u9898\u3002 43 GPU stopped processing. \u901a\u5e38\u662f\u60a8\u5e94\u7528\u81ea\u8eab\u9519\u8bef\uff0c\u800c\u975e\u786c\u4ef6\u95ee\u9898\u3002 45 Preemptive cleanup, due to previous errors -- Most likely to see when running multiple cuda applications and hitting a DBE. \u901a\u5e38\u662f\u60a8\u624b\u52a8\u9000\u51fa\u6216\u8005\u5176\u4ed6\u6545\u969c\uff08\u786c\u4ef6\u3001\u8d44\u6e90\u9650\u5236\u7b49\uff09\u5bfc\u81f4\u7684 GPU \u5e94\u7528\u9000\u51fa\uff0cXID 45 \u53ea\u63d0\u4f9b\u4e00\u4e2a\u7ed3\u679c\uff0c\u5177\u4f53\u539f\u56e0\u901a\u5e38\u9700\u8981\u8fdb\u4e00\u6b65\u5206\u6790\u65e5\u5fd7\u3002 48 Double Bit ECC Error (DBE). \u5f53 GPU \u53d1\u751f\u4e0d\u53ef\u7ea0\u6b63\u7684\u9519\u8bef\u65f6\uff0c\u4f1a\u4e0a\u62a5\u6b64\u4e8b\u4ef6\uff0c\u8be5\u9519\u8bef\u4e5f\u4f1a\u540c\u65f6\u53cd\u9988\u7ed9\u60a8\u7684\u5e94\u7528\u7a0b\u5e8f\u3002\u901a\u5e38\u9700\u8981\u91cd\u7f6e GPU \u6216\u91cd\u542f\u8282\u70b9\u6765\u6e05\u9664\u8fd9\u4e2a\u9519\u8bef\u3002 61 Internal micro-controller breakpoint/warning. GPU \u5185\u90e8\u5f15\u64ce\u505c\u6b62\u5de5\u4f5c\uff0c\u60a8\u7684\u4e1a\u52a1\u5df2\u7ecf\u53d7\u5230\u5f71\u54cd\u3002 62 Internal micro-controller halt. \u4e0e XID 61 \u7684\u89e6\u53d1\u573a\u666f\u7c7b\u4f3c\u3002 63 ECC page retirement or row remapping recording event. \u5f53\u5e94\u7528\u7a0b\u5e8f\u906d\u9047\u5230 GPU \u663e\u5b58\u786c\u4ef6\u9519\u8bef\u65f6\uff0cNVIDIA \u81ea\u7ea0\u9519\u673a\u5236\u4f1a\u5c06\u9519\u8bef\u7684\u5185\u5b58\u533a\u57df retire \u6216\u8005 remap\uff0cretirement \u548c remapped \u4fe1\u606f\u9700\u8bb0\u5f55\u5230 infoROM \u4e2d\u624d\u80fd\u6c38\u4e45\u751f\u6548\u3002Volt \u67b6\u6784\uff1a\u6210\u529f\u8bb0\u5f55 ECC page retirement \u4e8b\u4ef6\u5230 infoROM\u3002Ampere \u67b6\u6784\uff1a\u6210\u529f\u8bb0\u5f55 row remapping \u4e8b\u4ef6\u5230 infoROM\u3002 64 ECC page retirement or row remapper recording failure. \u4e0e XID 63 \u7684\u89e6\u53d1\u573a\u666f\u7c7b\u4f3c\u3002\u4f46 XID 63 \u4ee3\u8868 retirement \u548c remapped \u4fe1\u606f\u6210\u529f\u8bb0\u5f55\u5230\u4e86 infoROM\uff0cXID 64 \u4ee3\u8868\u8be5\u8bb0\u5f55\u64cd\u4f5c\u5931\u8d25\u3002 68 NVDEC0 Exception. \u901a\u5e38\u662f\u786c\u4ef6\u6216\u9a71\u52a8\u95ee\u9898\u3002 74 NVLINK Error. NVLink \u786c\u4ef6\u9519\u8bef\u4ea7\u751f\u7684 XID\uff0c\u8868\u660e GPU \u5df2\u7ecf\u51fa\u73b0\u4e25\u91cd\u786c\u4ef6\u6545\u969c\uff0c\u9700\u8981\u4e0b\u7ebf\u7ef4\u4fee\u3002 79 GPU has fallen off the bus. GPU \u786c\u4ef6\u68c0\u6d4b\u5230\u6389\u5361\uff0c\u603b\u7ebf\u4e0a\u65e0\u6cd5\u68c0\u6d4b\u8be5 GPU\uff0c\u8868\u660e\u8be5 GPU \u5df2\u7ecf\u51fa\u73b0\u4e25\u91cd\u786c\u4ef6\u6545\u969c\uff0c\u9700\u8981\u4e0b\u7ebf\u7ef4\u4fee\u3002 92 High single-bit ECC error rate. \u786c\u4ef6\u6216\u9a71\u52a8\u6545\u969c\u3002 94 Contained ECC error. \u5f53\u5e94\u7528\u7a0b\u5e8f\u906d\u9047\u5230 GPU \u4e0d\u53ef\u7ea0\u6b63\u7684\u663e\u5b58 ECC \u9519\u8bef\u65f6\uff0cNVIDIA \u9519\u8bef\u6291\u5236\u673a\u5236\u4f1a\u5c1d\u8bd5\u5c06\u9519\u8bef\u6291\u5236\u5728\u53d1\u751f\u786c\u4ef6\u6545\u969c\u7684\u5e94\u7528\u7a0b\u5e8f\uff0c\u907f\u514d\u8be5\u9519\u8bef\u5f71\u54cd GPU \u8282\u70b9\u4e0a\u8fd0\u884c\u7684\u5176\u4ed6\u5e94\u7528\u7a0b\u5e8f\u3002\u5f53\u6291\u5236\u673a\u5236\u6210\u529f\u6291\u5236\u9519\u8bef\u65f6\uff0c\u4f1a\u4ea7\u751f\u8be5\u4e8b\u4ef6\uff0c\u4ec5\u51fa\u73b0\u4e0d\u53ef\u7ea0\u6b63 ECC \u9519\u8bef\u7684\u5e94\u7528\u7a0b\u5e8f\u53d7\u5230\u5f71\u54cd\u3002 95 Uncontained ECC error. \u4e0e XID 94 \u7684\u89e6\u53d1\u573a\u666f\u7c7b\u4f3c\u3002\u4f46 XID 94 \u4ee3\u8868\u6291\u5236\u6210\u529f\uff0c\u800c XID 95 \u4ee3\u8868\u6291\u5236\u5931\u8d25\uff0c\u8868\u660e\u8fd0\u884c\u5728\u8be5 GPU \u4e0a\u7684\u6240\u6709\u5e94\u7528\u7a0b\u5e8f\u90fd\u5df2\u53d7\u5230\u5f71\u54cd\u3002"},{"location":"end-user/kpanda/gpu/nvidia/gpu-monitoring-alarm/gpu-metrics.html#pod","title":"Pod \u7ef4\u5ea6","text":"\u5206\u7c7b \u6307\u6807\u540d\u79f0 \u63cf\u8ff0 \u5e94\u7528\u6982\u89c8 GPU \u5361 - \u7b97\u529b & \u663e\u5b58 Pod GPU \u7b97\u529b\u4f7f\u7528\u7387 \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u7b97\u529b\u4f7f\u7528\u7387 Pod GPU \u663e\u5b58\u4f7f\u7528\u7387 \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u663e\u5b58\u4f7f\u7528\u7387 Pod \u663e\u5b58\u4f7f\u7528\u91cf \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u663e\u5b58\u4f7f\u7528\u91cf \u663e\u5b58\u5206\u914d\u91cf \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u663e\u5b58\u5206\u914d\u91cf Pod GPU \u663e\u5b58\u590d\u5236\u4f7f\u7528\u7387 \u5f53\u524d Pod \u6240\u4f7f\u7528\u5230\u7684 GPU \u5361\u7684\u663e\u5b58\u663e\u5b58\u590d\u5236\u6bd4\u7387 GPU \u5361 - \u5f15\u64ce\u6982\u89c8 GPU \u56fe\u5f62\u5f15\u64ce\u6d3b\u52a8\u767e\u5206\u6bd4 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cGraphics \u6216 Compute \u5f15\u64ce\u5904\u4e8e Active \u7684\u65f6\u95f4\u5360\u603b\u7684\u65f6\u95f4\u7684\u6bd4\u4f8b GPU \u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387 \u8868\u793a\u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387\uff08Memory BW Utilization\uff09\u5c06\u6570\u636e\u53d1\u9001\u5230\u8bbe\u5907\u5185\u5b58\u6216\u4ece\u8bbe\u5907\u5185\u5b58\u63a5\u6536\u6570\u636e\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u8f83\u9ad8\u7684\u503c\u8868\u793a\u8bbe\u5907\u5185\u5b58\u7684\u5229\u7528\u7387\u8f83\u9ad8\u3002\u8be5\u503c\u4e3a 1\uff08100%\uff09\u8868\u793a\u5728\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u6bcf\u4e2a\u5468\u671f\u6267\u884c\u4e00\u6761 DRAM \u6307\u4ee4\uff08\u5b9e\u9645\u4e0a\uff0c\u5cf0\u503c\u7ea6\u4e3a 0.8 (80%) \u662f\u53ef\u5b9e\u73b0\u7684\u6700\u5927\u503c\uff09\u3002\u5047\u8bbe\u8be5\u503c\u4e3a 0.2\uff0820%\uff09\uff0c\u8868\u793a 20% \u7684\u5468\u671f\u5728\u65f6\u95f4\u95f4\u9694\u5185\u8bfb\u53d6\u6216\u5199\u5165\u8bbe\u5907\u5185\u5b58\u3002 Tensor \u6838\u5fc3\u5f15\u64ce\u4f7f\u7528\u7387 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cTensor Core \u7ba1\u9053\uff08Pipe\uff09\u5904\u4e8e Active \u65f6\u95f4\u5360\u603b\u65f6\u95f4\u7684\u6bd4\u4f8b FP16 \u5f15\u64ce\u4f7f\u7528\u7387 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cFP16 \u7ba1\u9053\u5904\u4e8e Active \u7684\u65f6\u95f4\u5360\u603b\u7684\u65f6\u95f4\u7684\u6bd4\u4f8b FP32 \u5f15\u64ce\u4f7f\u7528\u7387 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cFP32 \u7ba1\u9053\u5904\u4e8e Active \u7684\u65f6\u95f4\u5360\u603b\u7684\u65f6\u95f4\u7684\u6bd4\u4f8b FP64 \u5f15\u64ce\u4f7f\u7528\u7387 \u8868\u793a\u5728\u4e00\u4e2a\u76d1\u63a7\u5468\u671f\u5185\uff0cFP64 \u7ba1\u9053\u5904\u4e8e Active \u7684\u65f6\u95f4\u5360\u603b\u7684\u65f6\u95f4\u7684\u6bd4\u4f8b GPU \u89e3\u7801\u4f7f\u7528\u7387 GPU \u5361\u89e3\u7801\u5f15\u64ce\u6bd4\u7387 GPU \u7f16\u7801\u4f7f\u7528\u7387 GPU \u5361\u7f16\u7801\u5f15\u64ce\u6bd4\u7387 GPU \u5361 - \u6e29\u5ea6 & \u529f\u8017 GPU \u5361\u6e29\u5ea6 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u6e29\u5ea6 GPU \u5361\u529f\u7387 \u96c6\u7fa4\u4e0b\u6240\u6709 GPU \u5361\u7684\u529f\u7387 GPU \u5361 - \u603b\u8017\u80fd GPU \u5361\u603b\u5171\u6d88\u8017\u7684\u80fd\u91cf GPU \u5361 - Clock GPU \u5361\u5185\u5b58\u9891\u7387 \u5185\u5b58\u9891\u7387 GPU \u5361\u5e94\u7528SM \u65f6\u949f\u9891\u7387 \u5e94\u7528\u7684 SM \u65f6\u949f\u9891\u7387 GPU \u5361\u5e94\u7528\u5185\u5b58\u9891\u7387 \u5e94\u7528\u5185\u5b58\u9891\u7387 GPU \u5361\u89c6\u9891\u5f15\u64ce\u9891\u7387 \u89c6\u9891\u5f15\u64ce\u9891\u7387 GPU \u5361\u964d\u9891\u539f\u56e0 \u964d\u9891\u539f\u56e0 GPU \u5361 - \u5176\u4ed6\u7ec6\u8282 \u56fe\u5f62\u5f15\u64ce\u6d3b\u52a8 \u56fe\u5f62\u6216\u8ba1\u7b97\u5f15\u64ce\u7684\u4efb\u4f55\u90e8\u5206\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u65f6\u95f4\u6bd4\u4f8b\u3002\u5982\u679c\u56fe\u5f62/\u8ba1\u7b97\u4e0a\u4e0b\u6587\u5df2\u7ed1\u5b9a\u4e14\u56fe\u5f62/\u8ba1\u7b97\u7ba1\u9053\u7e41\u5fd9\uff0c\u5219\u56fe\u5f62\u5f15\u64ce\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002 SM\u6d3b\u52a8 \u591a\u5904\u7406\u5668\u4e0a\u81f3\u5c11\u4e00\u4e2a Warp \u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u65f6\u95f4\u6bd4\u4f8b\uff0c\u6240\u6709\u591a\u5904\u7406\u5668\u7684\u5e73\u5747\u503c\u3002\u8bf7\u6ce8\u610f\uff0c\u201c\u6d3b\u52a8\u201d\u5e76\u4e0d\u4e00\u5b9a\u610f\u5473\u7740 Warp \u6b63\u5728\u79ef\u6781\u8ba1\u7b97\u3002\u4f8b\u5982\uff0c\u7b49\u5f85\u5185\u5b58\u8bf7\u6c42\u7684 Warp \u88ab\u89c6\u4e3a\u6d3b\u52a8\u72b6\u6001\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u30020.8 \u6216\u66f4\u5927\u7684\u503c\u662f\u6709\u6548\u4f7f\u7528 GPU \u7684\u5fc5\u8981\u6761\u4ef6\uff0c\u4f46\u8fd8\u4e0d\u591f\u3002\u5c0f\u4e8e 0.5 \u7684\u503c\u53ef\u80fd\u8868\u793a GPU \u4f7f\u7528\u6548\u7387\u4f4e\u4e0b\u3002\u7ed9\u51fa\u4e00\u4e2a\u7b80\u5316\u7684 GPU \u67b6\u6784\u89c6\u56fe\uff0c\u5982\u679c GPU \u6709 N \u4e2a SM\uff0c\u5219\u4f7f\u7528 N \u4e2a\u5757\u5e76\u5728\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u8fd0\u884c\u7684\u5185\u6838\u5c06\u5bf9\u5e94\u4e8e\u6d3b\u52a8 1\uff08100%\uff09\u3002\u4f7f\u7528 N/5 \u4e2a\u5757\u5e76\u5728\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u8fd0\u884c\u7684\u5185\u6838\u5c06\u5bf9\u5e94\u4e8e\u6d3b\u52a8 0.2\uff0820%\uff09\u3002\u4f7f\u7528 N \u4e2a\u5757\u5e76\u8fd0\u884c\u4e94\u5206\u4e4b\u4e00\u65f6\u95f4\u95f4\u9694\u7684\u5185\u6838\uff0c\u5982\u679c SM \u5904\u4e8e\u7a7a\u95f2\u72b6\u6001\uff0c\u5219\u6d3b\u52a8\u4e5f\u5c06\u4e3a 0.2\uff0820%\uff09\u3002\u8be5\u503c\u4e0e\u6bcf\u4e2a\u5757\u7684\u7ebf\u7a0b\u6570\u65e0\u5173\uff08\u53c2\u89c1DCGM_FI_PROF_SM_OCCUPANCY\uff09\u3002 SM \u5165\u4f4f\u7387 \u591a\u5904\u7406\u5668\u4e0a\u9a7b\u7559 Warp \u7684\u6bd4\u4f8b\uff0c\u76f8\u5bf9\u4e8e\u591a\u5904\u7406\u5668\u4e0a\u652f\u6301\u7684\u6700\u5927\u5e76\u53d1 Warp \u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u5360\u7528\u7387\u8d8a\u9ad8\u5e76\u4e0d\u4e00\u5b9a\u8868\u793a GPU \u4f7f\u7528\u7387\u8d8a\u9ad8\u3002\u5bf9\u4e8e GPU \u5185\u5b58\u5e26\u5bbd\u53d7\u9650\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff08\u53c2\u89c1DCGM_FI_PROF_DRAM_ACTIVE\uff09\uff0c\u5360\u7528\u7387\u8d8a\u9ad8\u8868\u660e GPU \u4f7f\u7528\u7387\u8d8a\u9ad8\u3002\u4f46\u662f\uff0c\u5982\u679c\u5de5\u4f5c\u8d1f\u8f7d\u662f\u8ba1\u7b97\u53d7\u9650\u7684\uff08\u5373\u4e0d\u53d7 GPU \u5185\u5b58\u5e26\u5bbd\u6216\u5ef6\u8fdf\u9650\u5236\uff09\uff0c\u5219\u5360\u7528\u7387\u8d8a\u9ad8\u5e76\u4e0d\u4e00\u5b9a\u4e0e GPU \u4f7f\u7528\u7387\u8d8a\u9ad8\u76f8\u5173\u3002\u8ba1\u7b97\u5360\u7528\u7387\u5e76\u4e0d\u7b80\u5355\uff0c\u5b83\u53d6\u51b3\u4e8e GPU \u5c5e\u6027\u3001\u6bcf\u4e2a\u5757\u7684\u7ebf\u7a0b\u6570\u3001\u6bcf\u4e2a\u7ebf\u7a0b\u7684\u5bc4\u5b58\u5668\u4ee5\u53ca\u6bcf\u4e2a\u5757\u7684\u5171\u4eab\u5185\u5b58\u7b49\u56e0\u7d20\u3002\u4f7f\u7528CUDA \u5360\u7528\u7387\u8ba1\u7b97\u5668 \u63a2\u7d22\u5404\u79cd\u5360\u7528\u7387\u573a\u666f\u3002 \u5f20\u91cf\u6d3b\u52a8 \u5f20\u91cf (HMMA / IMMA) \u7ba1\u9053\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0c\u5f20\u91cf\u6838\u5fc3\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8 1 (100%) \u76f8\u5f53\u4e8e\u5728\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u6bcf\u9694\u4e00\u4e2a\u5468\u671f\u53d1\u51fa\u4e00\u4e2a\u5f20\u91cf\u6307\u4ee4\u3002\u6d3b\u52a8 0.2 (20%) \u53ef\u80fd\u8868\u793a 20% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u7684\u5229\u7528\u7387\u4e3a 100%\uff0c100% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u7684\u5229\u7528\u7387\u4e3a 20%\uff0c100% \u7684 SM \u5728 20% \u7684\u65f6\u95f4\u6bb5\u5185\u7684\u5229\u7528\u7387\u4e3a 100%\uff0c\u6216\u8005\u4ecb\u4e8e\u4e24\u8005\u4e4b\u95f4\u7684\u4efb\u4f55\u7ec4\u5408\uff08\u8bf7\u53c2\u9605DCGM_FI_PROF_SM_ACTIVE\u4ee5\u5e2e\u52a9\u6d88\u9664\u8fd9\u4e9b\u53ef\u80fd\u6027\u7684\u6b67\u4e49\uff09\u3002 FP64 \u5f15\u64ce\u6d3b\u52a8 FP64\uff08\u53cc\u7cbe\u5ea6\uff09\u7ba1\u9053\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0cFP64 \u6838\u5fc3\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8\u91cf 1\uff08100%\uff09\u76f8\u5f53\u4e8e\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185 Volta \u4e0a\u6bcf\u56db\u4e2a\u5468\u671f\u7684\u6bcf\u4e2a SM\u4e0a\u6267\u884c\u4e00\u6761 FP64 \u6307\u4ee4 \u3002\u6d3b\u52a8\u91cf 0.2\uff0820%\uff09\u53ef\u80fd\u8868\u793a 20% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c100% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 20%\uff0c100% \u7684 SM \u5728 20% \u7684\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c\u6216\u8005\u4ecb\u4e8e\u4e24\u8005\u4e4b\u95f4\u7684\u4efb\u4f55\u7ec4\u5408\uff08\u8bf7\u53c2\u9605 DCGM_FI_PROF_SM_ACTIVE \u4ee5\u5e2e\u52a9\u6d88\u9664\u8fd9\u4e9b\u53ef\u80fd\u6027\u7684\u6b67\u4e49\uff09\u3002 FP32 \u5f15\u64ce\u6d3b\u52a8 FMA\uff08FP32\uff08\u5355\u7cbe\u5ea6\uff09\u548c\u6574\u6570\uff09\u7ba1\u9053\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0cFP32 \u6838\u5fc3\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8\u91cf 1\uff08100%\uff09\u76f8\u5f53\u4e8e\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u6bcf\u9694\u4e00\u4e2a\u5468\u671f\u6267\u884c\u4e00\u6b21 FP32 \u6307\u4ee4\u3002\u6d3b\u52a8\u91cf 0.2\uff0820%\uff09\u53ef\u80fd\u8868\u793a 20% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c100% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 20%\uff0c100% \u7684 SM \u5728 20% \u7684\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c\u6216\u8005\u4e24\u8005\u4e4b\u95f4\u7684\u4efb\u4f55\u7ec4\u5408\uff08\u8bf7\u53c2\u9605DCGM_FI_PROF_SM_ACTIVE\u4ee5\u5e2e\u52a9\u6d88\u9664\u8fd9\u4e9b\u53ef\u80fd\u6027\u7684\u6b67\u4e49\uff09\u3002 FP16 \u5f15\u64ce\u6d3b\u52a8 FP16\uff08\u534a\u7cbe\u5ea6\uff09\u7ba1\u9053\u5904\u4e8e\u6d3b\u52a8\u72b6\u6001\u7684\u5468\u671f\u5206\u6570\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0cFP16 \u6838\u5fc3\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8\u91cf 1\uff08100%\uff09\u76f8\u5f53\u4e8e\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u6bcf\u9694\u4e00\u4e2a\u5468\u671f\u6267\u884c\u4e00\u6b21 FP16 \u6307\u4ee4\u3002\u6d3b\u52a8\u91cf 0.2\uff0820%\uff09\u53ef\u80fd\u8868\u793a 20% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c100% \u7684 SM \u5728\u6574\u4e2a\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 20%\uff0c100% \u7684 SM \u5728 20% \u7684\u65f6\u95f4\u6bb5\u5185\u5229\u7528\u7387\u4e3a 100%\uff0c\u6216\u8005\u4ecb\u4e8e\u4e24\u8005\u4e4b\u95f4\u7684\u4efb\u4f55\u7ec4\u5408\uff08\u8bf7\u53c2\u9605DCGM_FI_PROF_SM_ACTIVE\u4ee5\u5e2e\u52a9\u6d88\u9664\u8fd9\u4e9b\u53ef\u80fd\u6027\u7684\u6b67\u4e49\uff09\u3002 \u5185\u5b58\u5e26\u5bbd\u5229\u7528\u7387 \u5411\u8bbe\u5907\u5185\u5b58\u53d1\u9001\u6570\u636e\u6216\u4ece\u8bbe\u5907\u5185\u5b58\u63a5\u6536\u6570\u636e\u7684\u5468\u671f\u6bd4\u4f8b\u3002\u8be5\u503c\u8868\u793a\u65f6\u95f4\u95f4\u9694\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u503c\u8d8a\u9ad8\uff0c\u8bbe\u5907\u5185\u5b58\u7684\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u6d3b\u52a8\u7387\u4e3a 1 (100%) \u76f8\u5f53\u4e8e\u6574\u4e2a\u65f6\u95f4\u95f4\u9694\u5185\u6bcf\u4e2a\u5468\u671f\u6267\u884c\u4e00\u6761 DRAM \u6307\u4ee4\uff08\u5b9e\u9645\u4e0a\uff0c\u5cf0\u503c\u7ea6\u4e3a 0.8 (80%) \u662f\u53ef\u5b9e\u73b0\u7684\u6700\u5927\u503c\uff09\u3002\u6d3b\u52a8\u7387\u4e3a 0.2 (20%) \u8868\u793a\u5728\u65f6\u95f4\u95f4\u9694\u5185\u6709 20% \u7684\u5468\u671f\u6b63\u5728\u8bfb\u53d6\u6216\u5199\u5165\u8bbe\u5907\u5185\u5b58\u3002 NVLink \u5e26\u5bbd \u901a\u8fc7 NVLink \u4f20\u8f93/\u63a5\u6536\u7684\u6570\u636e\u901f\u7387\uff08\u4e0d\u5305\u62ec\u534f\u8bae\u6807\u5934\uff09\uff0c\u4ee5\u6bcf\u79d2\u5b57\u8282\u6570\u4e3a\u5355\u4f4d\u3002\u8be5\u503c\u8868\u793a\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u901f\u7387\u662f\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u5e73\u5747\u503c\u3002\u4f8b\u5982\uff0c\u5982\u679c 1 \u79d2\u5185\u4f20\u8f93\u4e86 1 GB \u7684\u6570\u636e\uff0c\u5219\u65e0\u8bba\u6570\u636e\u662f\u4ee5\u6052\u5b9a\u901f\u7387\u8fd8\u662f\u7a81\u53d1\u901f\u7387\u4f20\u8f93\uff0c\u901f\u7387\u90fd\u662f 1 GB/s\u3002\u7406\u8bba\u4e0a\uff0c\u6bcf\u4e2a\u94fe\u8def\u6bcf\u4e2a\u65b9\u5411\u7684\u6700\u5927 NVLink Gen2 \u5e26\u5bbd\u4e3a 25 GB/s\u3002 PCIe \u5e26\u5bbd \u901a\u8fc7 PCIe \u603b\u7ebf\u4f20\u8f93/\u63a5\u6536\u7684\u6570\u636e\u901f\u7387\uff0c\u5305\u62ec\u534f\u8bae\u6807\u5934\u548c\u6570\u636e\u6709\u6548\u8d1f\u8f7d\uff0c\u4ee5\u5b57\u8282/\u79d2\u4e3a\u5355\u4f4d\u3002\u8be5\u503c\u8868\u793a\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u5e73\u5747\u503c\uff0c\u800c\u4e0d\u662f\u77ac\u65f6\u503c\u3002\u8be5\u901f\u7387\u662f\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u5e73\u5747\u503c\u3002\u4f8b\u5982\uff0c\u5982\u679c 1 \u79d2\u5185\u4f20\u8f93\u4e86 1 GB \u7684\u6570\u636e\uff0c\u5219\u65e0\u8bba\u6570\u636e\u662f\u4ee5\u6052\u5b9a\u901f\u7387\u8fd8\u662f\u7a81\u53d1\u901f\u7387\u4f20\u8f93\uff0c\u901f\u7387\u90fd\u662f 1 GB/s\u3002\u7406\u8bba\u4e0a\u6700\u5927 PCIe Gen3 \u5e26\u5bbd\u4e3a\u6bcf\u901a\u9053 985 MB/s\u3002 PCIe \u4f20\u8f93\u901f\u7387 \u8282\u70b9 GPU \u5361\u901a\u8fc7 PCIe \u603b\u7ebf\u4f20\u8f93\u7684\u6570\u636e\u901f\u7387 PCIe \u63a5\u6536\u901f\u7387 \u8282\u70b9 GPU \u5361\u901a\u8fc7 PCIe \u603b\u7ebf\u63a5\u6536\u7684\u6570\u636e\u901f\u7387"},{"location":"end-user/kpanda/gpu/nvidia/mig/index.html","title":"NVIDIA \u591a\u5b9e\u4f8b GPU(MIG) \u6982\u8ff0","text":""},{"location":"end-user/kpanda/gpu/nvidia/mig/index.html#mig","title":"MIG \u573a\u666f","text":"
                                                                          • \u591a\u79df\u6237\u4e91\u73af\u5883

                                                                            MIG \u5141\u8bb8\u4e91\u670d\u52a1\u63d0\u4f9b\u5546\u5c06\u4e00\u5757\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a\u72ec\u7acb\u7684 GPU \u5b9e\u4f8b\uff0c\u6bcf\u4e2a\u5b9e\u4f8b\u53ef\u4ee5\u72ec\u7acb\u5206\u914d\u7ed9\u4e0d\u540c\u7684\u79df\u6237\u3002\u8fd9\u6837\u53ef\u4ee5\u5b9e\u73b0\u8d44\u6e90\u7684\u9694\u79bb\u548c\u72ec\u7acb\u6027\uff0c\u6ee1\u8db3\u591a\u4e2a\u79df\u6237\u5bf9 GPU \u8ba1\u7b97\u80fd\u529b\u7684\u9700\u6c42\u3002

                                                                          • \u5bb9\u5668\u5316\u5e94\u7528\u7a0b\u5e8f

                                                                            MIG \u53ef\u4ee5\u5728\u5bb9\u5668\u5316\u73af\u5883\u4e2d\u5b9e\u73b0\u66f4\u7ec6\u7c92\u5ea6\u7684 GPU \u8d44\u6e90\u7ba1\u7406\u3002\u901a\u8fc7\u5c06\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a MIG \u5b9e\u4f8b\uff0c\u53ef\u4ee5\u4e3a\u6bcf\u4e2a\u5bb9\u5668\u5206\u914d\u72ec\u7acb\u7684 GPU \u8ba1\u7b97\u8d44\u6e90\uff0c\u63d0\u4f9b\u66f4\u597d\u7684\u6027\u80fd\u9694\u79bb\u548c\u8d44\u6e90\u5229\u7528\u3002

                                                                          • \u6279\u5904\u7406\u4f5c\u4e1a

                                                                            \u5bf9\u4e8e\u9700\u8981\u5927\u89c4\u6a21\u5e76\u884c\u8ba1\u7b97\u7684\u6279\u5904\u7406\u4f5c\u4e1a\uff0cMIG \u53ef\u4ee5\u63d0\u4f9b\u66f4\u9ad8\u7684\u8ba1\u7b97\u6027\u80fd\u548c\u66f4\u5927\u7684\u663e\u5b58\u5bb9\u91cf\u3002\u6bcf\u4e2a MIG \u5b9e\u4f8b\u53ef\u4ee5\u5229\u7528\u7269\u7406 GPU \u7684\u4e00\u90e8\u5206\u8ba1\u7b97\u8d44\u6e90\uff0c\u4ece\u800c\u52a0\u901f\u5927\u89c4\u6a21\u8ba1\u7b97\u4efb\u52a1\u7684\u5904\u7406\u3002

                                                                          • AI/\u673a\u5668\u5b66\u4e60\u8bad\u7ec3

                                                                            MIG \u53ef\u4ee5\u5728\u8bad\u7ec3\u5927\u89c4\u6a21\u6df1\u5ea6\u5b66\u4e60\u6a21\u578b\u65f6\u63d0\u4f9b\u66f4\u5927\u7684\u8ba1\u7b97\u80fd\u529b\u548c\u663e\u5b58\u5bb9\u91cf\u3002\u5c06\u7269\u7406 GPU \u5212\u5206\u4e3a\u591a\u4e2a MIG \u5b9e\u4f8b\uff0c\u6bcf\u4e2a\u5b9e\u4f8b\u53ef\u4ee5\u72ec\u7acb\u8fdb\u884c\u6a21\u578b\u8bad\u7ec3\uff0c\u63d0\u9ad8\u8bad\u7ec3\u6548\u7387\u548c\u541e\u5410\u91cf\u3002

                                                                          \u603b\u4f53\u800c\u8a00\uff0cNVIDIA MIG \u9002\u7528\u4e8e\u9700\u8981\u66f4\u7ec6\u7c92\u5ea6\u7684GPU\u8d44\u6e90\u5206\u914d\u548c\u7ba1\u7406\u7684\u573a\u666f\uff0c\u53ef\u4ee5\u5b9e\u73b0\u8d44\u6e90\u7684\u9694\u79bb\u3001\u63d0\u9ad8\u6027\u80fd\u5229\u7528\u7387\uff0c\u5e76\u4e14\u6ee1\u8db3\u591a\u4e2a\u7528\u6237\u6216\u5e94\u7528\u7a0b\u5e8f\u5bf9 GPU \u8ba1\u7b97\u80fd\u529b\u7684\u9700\u6c42\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/mig/index.html#mig_1","title":"MIG \u6982\u8ff0","text":"

                                                                          NVIDIA \u591a\u5b9e\u4f8b GPU\uff08Multi-Instance GPU\uff0c\u7b80\u79f0 MIG\uff09\u662f NVIDIA \u5728 H100\uff0cA100\uff0cA30 \u7cfb\u5217 GPU \u5361\u4e0a\u63a8\u51fa\u7684\u4e00\u9879\u65b0\u7279\u6027\uff0c \u65e8\u5728\u5c06\u4e00\u5757\u7269\u7406 GPU \u5206\u5272\u4e3a\u591a\u4e2a GPU \u5b9e\u4f8b\uff0c\u4ee5\u63d0\u4f9b\u66f4\u7ec6\u7c92\u5ea6\u7684\u8d44\u6e90\u5171\u4eab\u548c\u9694\u79bb\u3002MIG \u6700\u591a\u53ef\u5c06\u4e00\u5757 GPU \u5212\u5206\u6210\u4e03\u4e2a GPU \u5b9e\u4f8b\uff0c \u4f7f\u5f97\u4e00\u4e2a \u7269\u7406 GPU \u5361\u53ef\u4e3a\u591a\u4e2a\u7528\u6237\u63d0\u4f9b\u5355\u72ec\u7684 GPU \u8d44\u6e90\uff0c\u4ee5\u5b9e\u73b0\u6700\u4f73 GPU \u5229\u7528\u7387\u3002

                                                                          \u8fd9\u4e2a\u529f\u80fd\u4f7f\u5f97\u591a\u4e2a\u5e94\u7528\u7a0b\u5e8f\u6216\u7528\u6237\u53ef\u4ee5\u540c\u65f6\u5171\u4eabGPU\u8d44\u6e90\uff0c\u63d0\u9ad8\u4e86\u8ba1\u7b97\u8d44\u6e90\u7684\u5229\u7528\u7387\uff0c\u5e76\u589e\u52a0\u4e86\u7cfb\u7edf\u7684\u53ef\u6269\u5c55\u6027\u3002

                                                                          \u901a\u8fc7 MIG\uff0c\u6bcf\u4e2a GPU \u5b9e\u4f8b\u7684\u5904\u7406\u5668\u5728\u6574\u4e2a\u5185\u5b58\u7cfb\u7edf\u4e2d\u5177\u6709\u72ec\u7acb\u4e14\u9694\u79bb\u7684\u8def\u5f84\u2014\u2014\u82af\u7247\u4e0a\u7684\u4ea4\u53c9\u5f00\u5173\u7aef\u53e3\u3001L2 \u9ad8\u901f\u7f13\u5b58\u7ec4\u3001\u5185\u5b58\u63a7\u5236\u5668\u548c DRAM \u5730\u5740\u603b\u7ebf\u90fd\u552f\u4e00\u5206\u914d\u7ed9\u5355\u4e2a\u5b9e\u4f8b\u3002

                                                                          \u8fd9\u786e\u4fdd\u4e86\u5355\u4e2a\u7528\u6237\u7684\u5de5\u4f5c\u8d1f\u8f7d\u80fd\u591f\u4ee5\u53ef\u9884\u6d4b\u7684\u541e\u5410\u91cf\u548c\u5ef6\u8fdf\u8fd0\u884c\uff0c\u5e76\u5177\u6709\u76f8\u540c\u7684\u4e8c\u7ea7\u7f13\u5b58\u5206\u914d\u548c DRAM \u5e26\u5bbd\u3002 MIG \u53ef\u4ee5\u5212\u5206\u53ef\u7528\u7684 GPU \u8ba1\u7b97\u8d44\u6e90\uff08\u5305\u62ec\u6d41\u591a\u5904\u7406\u5668\u6216 SM \u548c GPU \u5f15\u64ce\uff0c\u5982\u590d\u5236\u5f15\u64ce\u6216\u89e3\u7801\u5668\uff09\u8fdb\u884c\u5206\u533a\uff0c \u4ee5\u4fbf\u4e3a\u4e0d\u540c\u7684\u5ba2\u6237\u7aef\uff08\u5982\u4e91\u4e3b\u673a\u3001\u5bb9\u5668\u6216\u8fdb\u7a0b\uff09\u63d0\u4f9b\u5b9a\u4e49\u7684\u670d\u52a1\u8d28\u91cf\uff08QoS\uff09\u548c\u6545\u969c\u9694\u79bb\uff09\u3002 MIG \u4f7f\u591a\u4e2a GPU \u5b9e\u4f8b\u80fd\u591f\u5728\u5355\u4e2a\u7269\u7406 GPU \u4e0a\u5e76\u884c\u8fd0\u884c\u3002

                                                                          MIG \u5141\u8bb8\u591a\u4e2a vGPU\uff08\u4ee5\u53ca\u4e91\u4e3b\u673a\uff09\u5728\u5355\u4e2a GPU \u5b9e\u4f8b\u4e0a\u5e76\u884c\u8fd0\u884c\uff0c\u540c\u65f6\u4fdd\u7559 vGPU \u63d0\u4f9b\u7684\u9694\u79bb\u4fdd\u8bc1\u3002 \u6709\u5173\u4f7f\u7528 vGPU \u548c MIG \u8fdb\u884c GPU \u5206\u533a\u7684\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605 NVIDIA Multi-Instance GPU and NVIDIA Virtual Compute Server\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/mig/index.html#mig_2","title":"MIG \u67b6\u6784","text":"

                                                                          \u5982\u4e0b\u662f\u4e00\u4e2a MIG \u7684\u6982\u8ff0\u56fe\uff0c\u53ef\u4ee5\u770b\u51fa MIG \u5c06\u4e00\u5f20\u7269\u7406 GPU \u5361\u865a\u62df\u5316\u6210\u4e86 7 \u4e2a GPU \u5b9e\u4f8b\uff0c\u8fd9\u4e9b GPU \u5b9e\u4f8b\u80fd\u591f\u53ef\u4ee5\u88ab\u591a\u4e2a User \u4f7f\u7528\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/mig/index.html#_1","title":"\u91cd\u8981\u6982\u5ff5","text":"
                                                                          • SM \uff1a\u6d41\u5f0f\u591a\u5904\u7406\u5668\uff08Streaming Multiprocessor\uff09\uff0cGPU \u7684\u6838\u5fc3\u8ba1\u7b97\u5355\u5143\uff0c\u8d1f\u8d23\u6267\u884c\u56fe\u5f62\u6e32\u67d3\u548c\u901a\u7528\u8ba1\u7b97\u4efb\u52a1\u3002 \u6bcf\u4e2a SM \u5305\u542b\u4e00\u7ec4 CUDA \u6838\u5fc3\uff0c\u4ee5\u53ca\u5171\u4eab\u5185\u5b58\u3001\u5bc4\u5b58\u5668\u6587\u4ef6\u548c\u5176\u4ed6\u8d44\u6e90\uff0c\u53ef\u4ee5\u540c\u65f6\u6267\u884c\u591a\u4e2a\u7ebf\u7a0b\u3002 \u6bcf\u4e2a MIG \u5b9e\u4f8b\u90fd\u62e5\u6709\u4e00\u5b9a\u6570\u91cf\u7684 SM \u548c\u5176\u4ed6\u76f8\u5173\u8d44\u6e90\uff0c\u4ee5\u53ca\u88ab\u5212\u5206\u51fa\u6765\u7684\u663e\u5b58\u3002
                                                                          • GPU Memory Slice \uff1aGPU \u5185\u5b58\u5207\u7247\uff0cGPU \u5185\u5b58\u5207\u7247\u662f GPU \u5185\u5b58\u7684\u6700\u5c0f\u90e8\u5206\uff0c\u5305\u62ec\u76f8\u5e94\u7684\u5185\u5b58\u63a7\u5236\u5668\u548c\u7f13\u5b58\u3002 GPU \u5185\u5b58\u5207\u7247\u5927\u7ea6\u662f GPU \u5185\u5b58\u8d44\u6e90\u603b\u91cf\u7684\u516b\u5206\u4e4b\u4e00\uff0c\u5305\u62ec\u5bb9\u91cf\u548c\u5e26\u5bbd\u3002
                                                                          • GPU SM Slice \uff1aGPU SM \u5207\u7247\u662f GPU \u4e0a SM \u7684\u6700\u5c0f\u8ba1\u7b97\u5355\u4f4d\u3002\u5728 MIG \u6a21\u5f0f\u4e0b\u914d\u7f6e\u65f6\uff0c GPU SM \u5207\u7247\u5927\u7ea6\u662f GPU \u4e2d\u53ef\u7528 SMS \u603b\u6570\u7684\u4e03\u5206\u4e4b\u4e00\u3002
                                                                          • GPU Slice \uff1aGPU \u5207\u7247\u662f GPU \u4e2d\u7531\u5355\u4e2a GPU \u5185\u5b58\u5207\u7247\u548c\u5355\u4e2a GPU SM \u5207\u7247\u7ec4\u5408\u5728\u4e00\u8d77\u7684\u6700\u5c0f\u90e8\u5206\u3002
                                                                          • GPU Instance \uff1aGPU \u5b9e\u4f8b \uff08GI\uff09 \u662f GPU \u5207\u7247\u548c GPU \u5f15\u64ce\uff08DMA\u3001NVDEC \u7b49\uff09\u7684\u7ec4\u5408\u3002 GPU \u5b9e\u4f8b\u4e2d\u7684\u4efb\u4f55\u5185\u5bb9\u59cb\u7ec8\u5171\u4eab\u6240\u6709 GPU \u5185\u5b58\u5207\u7247\u548c\u5176\u4ed6 GPU \u5f15\u64ce\uff0c\u4f46\u5b83\u7684 SM \u5207\u7247\u53ef\u4ee5\u8fdb\u4e00\u6b65\u7ec6\u5206\u4e3a\u8ba1\u7b97\u5b9e\u4f8b\uff08CI\uff09\u3002 GPU \u5b9e\u4f8b\u63d0\u4f9b\u5185\u5b58 QoS\u3002\u6bcf\u4e2a GPU \u5207\u7247\u90fd\u5305\u542b\u4e13\u7528\u7684 GPU \u5185\u5b58\u8d44\u6e90\uff0c\u8fd9\u4e9b\u8d44\u6e90\u4f1a\u9650\u5236\u53ef\u7528\u5bb9\u91cf\u548c\u5e26\u5bbd\uff0c\u5e76\u63d0\u4f9b\u5185\u5b58 QoS\u3002 \u6bcf\u4e2a GPU \u5185\u5b58\u5207\u7247\u83b7\u5f97\u603b GPU \u5185\u5b58\u8d44\u6e90\u7684\u516b\u5206\u4e4b\u4e00\uff0c\u6bcf\u4e2a GPU SM \u5207\u7247\u83b7\u5f97 SM \u603b\u6570\u7684\u4e03\u5206\u4e4b\u4e00\u3002
                                                                          • Compute Instance \uff1aGPU \u5b9e\u4f8b\u7684\u8ba1\u7b97\u5207\u7247\u53ef\u4ee5\u8fdb\u4e00\u6b65\u7ec6\u5206\u4e3a\u591a\u4e2a\u8ba1\u7b97\u5b9e\u4f8b \uff08CI\uff09\uff0c\u5176\u4e2d CI \u5171\u4eab\u7236 GI \u7684\u5f15\u64ce\u548c\u5185\u5b58\uff0c\u4f46\u6bcf\u4e2a CI \u90fd\u6709\u4e13\u7528\u7684 SM \u8d44\u6e90\u3002
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/mig/index.html#gpu-gi","title":"GPU \u5b9e\u4f8b\uff08GI\uff09","text":"

                                                                          \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728 GPU \u4e0a\u521b\u5efa\u5404\u79cd\u5206\u533a\u3002\u5c06\u4f7f\u7528 A100-40GB \u4f5c\u4e3a\u793a\u4f8b\u6f14\u793a\u5982\u4f55\u5bf9\u5355\u4e2a GPU \u7269\u7406\u5361\u4e0a\u8fdb\u884c\u5206\u533a\u3002

                                                                          GPU \u7684\u5206\u533a\u662f\u4f7f\u7528\u5185\u5b58\u5207\u7247\u8fdb\u884c\u7684\uff0c\u56e0\u6b64\u53ef\u4ee5\u8ba4\u4e3a A100-40GB GPU \u5177\u6709 8x5GB \u5185\u5b58\u5207\u7247\u548c 7 \u4e2a GPU SM \u5207\u7247\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff0c\u5c55\u793a\u4e86 A100 \u4e0a\u53ef\u7528\u7684\u5185\u5b58\u5207\u7247\u3002

                                                                          \u5982\u4e0a\u6240\u8ff0\uff0c\u521b\u5efa GPU \u5b9e\u4f8b \uff08GI\uff09 \u9700\u8981\u5c06\u4e00\u5b9a\u6570\u91cf\u7684\u5185\u5b58\u5207\u7247\u4e0e\u4e00\u5b9a\u6570\u91cf\u7684\u8ba1\u7b97\u5207\u7247\u76f8\u7ed3\u5408\u3002 \u5728\u4e0b\u56fe\u4e2d\uff0c\u4e00\u4e2a 5GB \u5185\u5b58\u5207\u7247\u4e0e 1 \u4e2a\u8ba1\u7b97\u5207\u7247\u76f8\u7ed3\u5408\uff0c\u4ee5\u521b\u5efa 1g.5gb GI \u914d\u7f6e\u6587\u4ef6\uff1a

                                                                          \u540c\u6837\uff0c4x5GB \u5185\u5b58\u5207\u7247\u53ef\u4ee5\u4e0e 4x1 \u8ba1\u7b97\u5207\u7247\u7ed3\u5408\u4f7f\u7528\u4ee5\u521b\u5efa 4g.20gb \u7684 GI \u914d\u7f6e\u6587\u4ef6\uff1a

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/mig/index.html#ci","title":"\u8ba1\u7b97\u5b9e\u4f8b\uff08CI\uff09","text":"

                                                                          GPU \u5b9e\u4f8b\u7684\u8ba1\u7b97\u5207\u7247(GI)\u53ef\u4ee5\u8fdb\u4e00\u6b65\u7ec6\u5206\u4e3a\u591a\u4e2a\u8ba1\u7b97\u5b9e\u4f8b\uff08CI\uff09\uff0c\u5176\u4e2d CI \u5171\u4eab\u7236 GI \u7684\u5f15\u64ce\u548c\u5185\u5b58\uff0c \u4f46\u6bcf\u4e2a CI \u90fd\u6709\u4e13\u7528\u7684 SM \u8d44\u6e90\u3002\u4f7f\u7528\u4e0a\u9762\u7684\u76f8\u540c 4g.20gb \u793a\u4f8b\uff0c\u53ef\u4ee5\u521b\u5efa\u4e00\u4e2a CI \u4ee5\u4ec5\u4f7f\u7528\u7b2c\u4e00\u4e2a\u8ba1\u7b97\u5207\u7247\u7684 1c.4g.20gb \u8ba1\u7b97\u914d\u7f6e\uff0c\u5982\u4e0b\u56fe\u84dd\u8272\u90e8\u5206\u6240\u793a\uff1a

                                                                          \u5728\u8fd9\u79cd\u60c5\u51b5\u4e0b\uff0c\u53ef\u4ee5\u901a\u8fc7\u9009\u62e9\u4efb\u4f55\u8ba1\u7b97\u5207\u7247\u6765\u521b\u5efa 4 \u4e2a\u4e0d\u540c\u7684 CI\u3002\u8fd8\u53ef\u4ee5\u5c06\u4e24\u4e2a\u8ba1\u7b97\u5207\u7247\u7ec4\u5408\u5728\u4e00\u8d77\u4ee5\u521b\u5efa 2c.4g.20gb \u7684\u8ba1\u7b97\u914d\u7f6e\uff09\uff1a

                                                                          \u9664\u6b64\u4e4b\u5916\uff0c\u8fd8\u53ef\u4ee5\u7ec4\u5408 3 \u4e2a\u8ba1\u7b97\u5207\u7247\u4ee5\u521b\u5efa\u8ba1\u7b97\u914d\u7f6e\u6587\u4ef6\uff0c\u6216\u8005\u53ef\u4ee5\u7ec4\u5408\u6240\u6709 4 \u4e2a\u8ba1\u7b97\u5207\u7247\u4ee5\u521b\u5efa 3c.4g.20gb \u3001 4c.4g.20gb \u8ba1\u7b97\u914d\u7f6e\u6587\u4ef6\u3002 \u5408\u5e76\u6240\u6709 4 \u4e2a\u8ba1\u7b97\u5207\u7247\u65f6\uff0c\u914d\u7f6e\u6587\u4ef6\u7b80\u79f0\u4e3a 4g.20gb \u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/mig/create_mig.html","title":"\u5f00\u542f MIG \u529f\u80fd","text":"

                                                                          \u672c\u7ae0\u8282\u4ecb\u7ecd\u5982\u4f55\u5f00\u542f NVIDIA MIG \u529f\u80fd\u65b9\u5f0f\uff0cNVIDIA \u5f53\u524d\u63d0\u4f9b\u4e24\u79cd\u5728 Kubernetes \u8282\u70b9\u4e0a\u516c\u5f00 MIG \u8bbe\u5907\u7684\u7b56\u7565\uff1a

                                                                          • Single \u6a21\u5f0f\uff0c\u8282\u70b9\u4ec5\u5728\u5176\u6240\u6709 GPU \u4e0a\u516c\u5f00\u5355\u4e00\u7c7b\u578b\u7684 MIG \u8bbe\u5907\u3002
                                                                          • Mixed \u6a21\u5f0f\uff0c\u8282\u70b9\u5728\u5176\u6240\u6709 GPU \u4e0a\u516c\u5f00\u6df7\u5408 MIG \u8bbe\u5907\u7c7b\u578b\u3002

                                                                          \u8be6\u60c5\u53c2\u8003 NVIDIA GPU \u5361\u4f7f\u7528\u6a21\u5f0f\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/mig/create_mig.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          • \u5f85\u5b89\u88c5 GPU \u9a71\u52a8\u8282\u70b9\u7cfb\u7edf\u8981\u6c42\u8bf7\u53c2\u8003\uff1aGPU \u652f\u6301\u77e9\u9635
                                                                          • \u786e\u8ba4\u96c6\u7fa4\u8282\u70b9\u4e0a\u5177\u6709\u5bf9\u5e94\u578b\u53f7\u7684 GPU \u5361\uff08NVIDIA H100\u3001 A100 \u548c A30 Tensor Core GPU\uff09\uff0c \u8be6\u60c5\u53c2\u8003 GPU \u652f\u6301\u77e9\u9635\u3002
                                                                          • \u8282\u70b9\u4e0a\u7684\u6240\u6709 GPU \u5fc5\u987b\uff1a\u5c5e\u4e8e\u540c\u4e00\u4ea7\u54c1\u7ebf\uff08\u4f8b\u5982 A100-SXM-40GB\uff09
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/mig/create_mig.html#gpu-operator-addon","title":"\u5b89\u88c5 gpu-operator Addon","text":""},{"location":"end-user/kpanda/gpu/nvidia/mig/create_mig.html#_2","title":"\u53c2\u6570\u914d\u7f6e","text":"

                                                                          \u5b89\u88c5 Operator \u65f6\u9700\u8981\u5bf9\u5e94\u8bbe\u7f6e MigManager Config \u53c2\u6570\uff0c \u9ed8\u8ba4\u4e3a default-mig-parted-config \uff0c\u540c\u65f6\u4e5f\u53ef\u4ee5\u81ea\u5b9a\u4e49\u5207\u5206\u7b56\u7565\u914d\u7f6e\u6587\u4ef6\uff1a

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/mig/create_mig.html#_3","title":"\u81ea\u5b9a\u4e49\u5207\u5206\u7b56\u7565","text":"
                                                                            ## \u81ea\u5b9a\u4e49\u5207\u5206 GI \u5b9e\u4f8b\u914d\u7f6e\n  all-disabled:\n    - devices: all\n      mig-enabled: false\n  all-enabled:\n    - devices: all\n      mig-enabled: true\n      mig-devices: {}\n  all-1g.10gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.5gb: 7\n  all-1g.10gb.me:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.10gb+me: 1\n  all-1g.20gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.20gb: 4\n  all-2g.20gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        2g.20gb: 3\n  all-3g.40gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        3g.40gb: 2\n  all-4g.40gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        4g.40gb: 1\n  all-7g.80gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        7g.80gb: 1\n  all-balanced:\n    - device-filter: [\"0x233110DE\", \"0x232210DE\", \"0x20B210DE\", \"0x20B510DE\", \"0x20F310DE\", \"0x20F510DE\"]\n      devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.10gb: 2\n        2g.20gb: 1\n        3g.40gb: 1\n  # \u8bbe\u7f6e\u540e\u4f1a\u6309\u7167\u8bbe\u7f6e\u89c4\u683c\u5207\u5206 CI \u5b9e\u4f8b\n  custom-config:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        3g.40gb: 2\n

                                                                          \u5728\u4e0a\u8ff0\u7684 YAML \u4e2d\u8bbe\u7f6e custom-config \uff0c\u8bbe\u7f6e\u540e\u4f1a\u6309\u7167\u89c4\u683c\u5207\u5206 CI \u5b9e\u4f8b\u3002

                                                                          custom-config:\n  - devices: all\n    mig-enabled: true\n    mig-devices:\n      1c.3g.40gb: 6\n

                                                                          \u8bbe\u7f6e\u5b8c\u6210\u540e\uff0c\u5728\u786e\u8ba4\u90e8\u7f72\u5e94\u7528\u65f6\u5373\u53ef\u4f7f\u7528 GPU MIG \u8d44\u6e90\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/mig/create_mig.html#gpu","title":"\u5207\u6362\u8282\u70b9 GPU \u6a21\u5f0f","text":"

                                                                          Note

                                                                          \u5207\u6362 GPU \u6a21\u5f0f\u6216\u8005\u4fee\u6539\u5207\u5206\u89c4\u683c\u540e\u9700\u8981\u91cd\u542f nvidia-mig-manager\u3002

                                                                          \u5f53\u6211\u4eec\u6210\u529f\u5b89\u88c5 gpu-operator \u4e4b\u540e\uff0c\u8282\u70b9\u9ed8\u8ba4\u662f\u6574\u5361\u6a21\u5f0f\uff0c\u5728\u8282\u70b9\u7ba1\u7406\u9875\u9762\u4f1a\u6709\u6807\u8bc6\uff0c\u5982\u4e0b\u56fe\u6240\u793a\uff1a

                                                                          \u70b9\u51fb\u8282\u70b9\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u9009\u62e9 GPU \u6a21\u5f0f\u5207\u6362 \uff0c\u7136\u540e\u9009\u62e9\u5bf9\u5e94\u7684 MIG \u6a21\u5f0f\u4ee5\u53ca\u5207\u5206\u7684\u7b56\u7565\uff0c\u8fd9\u91cc\u4ee5 MIXED \u6a21\u5f0f\u4e3a\u4f8b\uff1a

                                                                          \u8fd9\u91cc\u4e00\u5171\u6709\u4e24\u4e2a\u914d\u7f6e\uff1a

                                                                          1. MIg \u7b56\u7565\uff1aMixed \u4ee5\u53ca Single \u3002
                                                                          2. \u5207\u5206\u7b56\u7565\uff1a\u8fd9\u91cc\u7684\u7b56\u7565\u9700\u8981\u4e0e default-mig-parted-config \uff08\u6216\u8005\u7528\u6237\u81ea\u5b9a\u4e49\u7684\u5207\u5206\u7b56\u7565\uff09\u914d\u7f6e\u6587\u4ef6\u4e2d\u7684 key \u4fdd\u6301\u4e00\u81f4\u3002

                                                                          \u70b9\u51fb \u786e\u5b9a \u6309\u94ae\u540e\uff0c\u7b49\u5f85\u7ea6\u4e00\u5206\u949f\u5de6\u53f3\u5237\u65b0\u9875\u9762\uff0cMIG \u6a21\u5f0f\u5207\u6362\u6210\uff1a

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/mig/mig_command.html","title":"MIG \u76f8\u5173\u547d\u4ee4","text":"

                                                                          GI \u76f8\u5173\u547d\u540d\uff1a

                                                                          \u5b50\u547d\u4ee4 \u8bf4\u660e nvidia-smi mig -lgi \u67e5\u770b\u521b\u5efa GI \u5b9e\u4f8b\u5217\u8868 nvidia-smi mig -dgi -gi \u5220\u9664\u6307\u5b9a\u7684 GI \u5b9e\u4f8b nvidia-smi mig -lgip \u67e5\u770b GI \u7684 profile nvidia-smi mig -cgi \u901a\u8fc7\u6307\u5b9a profile \u7684 ID \u521b\u5efa GI

                                                                          CI \u76f8\u5173\u547d\u4ee4\uff1a

                                                                          \u5b50\u547d\u4ee4 \u8bf4\u660e nvidia-smi mig -lcip { -gi {gi Instance ID}} \u67e5\u770b CI \u7684 profile \uff0c\u6307\u5b9a -gi \u53ef\u4ee5\u67e5\u770b\u7279\u5b9a GI \u5b9e\u4f8b\u53ef\u4ee5\u521b\u5efa\u7684 CI nvidia-smi mig -lci \u67e5\u770b\u521b\u5efa\u7684 CI \u5b9e\u4f8b\u5217\u8868 nvidia-smi mig -cci {profile id} -gi {gi instance id} \u6307\u5b9a\u7684 GI \u521b\u5efa CI \u5b9e\u4f8b nvidia-smi mig -dci -ci \u5220\u9664\u6307\u5b9a CI \u5b9e\u4f8b

                                                                          GI+CI \u76f8\u5173\u547d\u4ee4\uff1a

                                                                          \u5b50\u547d\u4ee4 \u8bf4\u660e nvidia-smi mig -i 0 -cgi {gi profile id} -C {ci profile id} \u76f4\u63a5\u521b\u5efa GI + CI \u5b9e\u4f8b"},{"location":"end-user/kpanda/gpu/nvidia/mig/mig_usage.html","title":"\u4f7f\u7528 MIG GPU \u8d44\u6e90","text":"

                                                                          \u672c\u8282\u4ecb\u7ecd\u5e94\u7528\u5982\u4f55\u4f7f\u7528 MIG GPU \u8d44\u6e90\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/mig/mig_usage.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          • \u5df2\u7ecf\u90e8\u7f72 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0 \u5bb9\u5668\u7ba1\u7406\u5e73\u53f0\uff0c\u4e14\u5e73\u53f0\u8fd0\u884c\u6b63\u5e38\u3002
                                                                          • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                                                                          • \u5df2\u5b89\u88c5 GPU Operator\u3002
                                                                          • \u96c6\u7fa4\u8282\u70b9\u4e0a\u5177\u6709\u5bf9\u5e94\u578b\u53f7\u7684 GPU \u5361
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/mig/mig_usage.html#ui-mig-gpu","title":"UI \u754c\u9762\u4f7f\u7528 MIG GPU","text":"
                                                                          1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u8bc6\u522b GPU \u5361\u7c7b\u578b

                                                                            \u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 -> \u8282\u70b9\u7ba1\u7406 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u6b63\u786e\u8bc6\u522b\u4e3a MIG \u6a21\u5f0f\u3002

                                                                          2. \u901a\u8fc7\u955c\u50cf\u90e8\u7f72\u5e94\u7528\uff0c\u53ef\u9009\u62e9\u5e76\u4f7f\u7528 NVIDIA MIG \u8d44\u6e90\u3002

                                                                            • MIG Single \u6a21\u5f0f\u793a\u4f8b\uff08\u4e0e\u6574\u5361\u4f7f\u7528\u65b9\u5f0f\u76f8\u540c\uff09\uff1a

                                                                              Note

                                                                              MIG single \u7b56\u7565\u5141\u8bb8\u7528\u6237\u4ee5\u4e0e GPU \u6574\u5361\u76f8\u540c\u7684\u65b9\u5f0f\uff08nvidia.com/gpu\uff09\u8bf7\u6c42\u548c\u4f7f\u7528GPU\u8d44\u6e90\uff0c\u4e0d\u540c\u7684\u662f\u8fd9\u4e9b\u8d44\u6e90\u53ef\u4ee5\u662f GPU \u7684\u4e00\u90e8\u5206\uff08MIG\u8bbe\u5907\uff09\uff0c\u800c\u4e0d\u662f\u6574\u4e2aGPU\u3002\u4e86\u89e3\u66f4\u591a GPU MIG \u6a21\u5f0f\u8bbe\u8ba1

                                                                            • MIG Mixed \u6a21\u5f0f\u793a\u4f8b\uff1a

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/mig/mig_usage.html#yaml-mig","title":"YAML \u914d\u7f6e\u4f7f\u7528 MIG","text":"

                                                                          MIG Single \u6a21\u5f0f\uff1a

                                                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mig-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mig-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mig-demo\n    spec:\n      containers:\n        - name: mig-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/gpu: 2 # (1)!\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                                                                          1. \u7533\u8bf7 MIG GPU \u7684\u6570\u91cf

                                                                          MIG Mixed \u6a21\u5f0f\uff1a

                                                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mig-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mig-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mig-demo\n    spec:\n      containers:\n        - name: mig-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/mig-4g.20gb: 1 # (1)!\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                                                                          1. \u901a\u8fc7 nvidia.com/mig-g.gb \u7684\u8d44\u6e90\u7c7b\u578b\u516c\u5f00\u5404\u4e2a MIG \u8bbe\u5907

                                                                          \u8fdb\u5165\u5bb9\u5668\u540e\u53ef\u4ee5\u67e5\u770b\u53ea\u4f7f\u7528\u4e86\u4e00\u4e2a MIG \u8bbe\u5907\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/vgpu/hami.html","title":"\u6784\u5efa vGPU \u663e\u5b58\u8d85\u914d\u955c\u50cf","text":"

                                                                          Hami \u9879\u76ee\u4e2d vGPU \u663e\u5b58\u8d85\u914d\u7684\u529f\u80fd\u5df2\u7ecf\u4e0d\u5b58\u5728\uff0c\u76ee\u524d\u4f7f\u7528\u6709\u663e\u5b58\u8d85\u914d\u7684 libvgpu.so \u6587\u4ef6\u91cd\u65b0\u6784\u5efa\u3002

                                                                          Dockerfile
                                                                          FROM docker.m.daocloud.io/projecthami/hami:v2.3.11\nCOPY libvgpu.so /k8s-vgpu/lib/nvidia/\n

                                                                          \u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\u6784\u5efa\u955c\u50cf\uff1a

                                                                          docker build -t release.daocloud.io/projecthami/hami:v2.3.11 -f Dockerfile .\n

                                                                          \u7136\u540e\u628a\u955c\u50cf push \u5230 release.daocloud.io \u4e2d\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/vgpu/vgpu_addon.html","title":"\u5b89\u88c5 NVIDIA vGPU Addon","text":"

                                                                          \u5982\u9700\u5c06\u4e00\u5f20 NVIDIA \u865a\u62df\u5316\u6210\u591a\u4e2a\u865a\u62df GPU\uff0c\u5e76\u5c06\u5176\u5206\u914d\u7ed9\u4e0d\u540c\u7684\u4e91\u4e3b\u673a\u6216\u7528\u6237\uff0c\u60a8\u53ef\u4ee5\u4f7f\u7528 NVIDIA \u7684 vGPU \u80fd\u529b\u3002 \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u5b89\u88c5 vGPU \u63d2\u4ef6\uff0c\u8fd9\u662f\u4f7f\u7528 NVIDIA vGPU \u80fd\u529b\u7684\u524d\u63d0\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/vgpu/vgpu_addon.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          • \u53c2\u8003 GPU \u652f\u6301\u77e9\u9635 \u786e\u8ba4\u96c6\u7fa4\u8282\u70b9\u4e0a\u5177\u6709\u5bf9\u5e94\u578b\u53f7\u7684 GPU \u5361\u3002
                                                                          • \u5f53\u524d\u96c6\u7fa4\u5df2\u901a\u8fc7 Operator \u90e8\u7f72 NVIDIA \u9a71\u52a8\uff0c\u5177\u4f53\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5\u3002
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/vgpu/vgpu_addon.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                                          1. \u529f\u80fd\u6a21\u5757\u8def\u5f84\uff1a \u5bb9\u5668\u7ba1\u7406 -> \u96c6\u7fa4\u7ba1\u7406 \uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u4ece\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f -> \u641c\u7d22 nvidia-vgpu \u3002

                                                                          2. \u5728\u5b89\u88c5 vGPU \u7684\u8fc7\u7a0b\u4e2d\u63d0\u4f9b\u4e86\u51e0\u4e2a\u57fa\u672c\u4fee\u6539\u7684\u53c2\u6570\uff0c\u5982\u679c\u9700\u8981\u4fee\u6539\u9ad8\u7ea7\u53c2\u6570\u70b9\u51fb YAML \u5217\u8fdb\u884c\u4fee\u6539\uff1a

                                                                            • deviceCoreScaling \uff1aNVIDIA \u88c5\u7f6e\u7b97\u529b\u4f7f\u7528\u6bd4\u4f8b\uff0c\u9884\u8bbe\u503c\u662f 1\u3002\u53ef\u4ee5\u5927\u4e8e 1\uff08\u542f\u7528\u865a\u62df\u7b97\u529b\uff0c\u5b9e\u9a8c\u529f\u80fd\uff09\u3002\u5982\u679c\u6211\u4eec\u914d\u7f6e devicePlugin.deviceCoreScaling \u53c2\u6570\u4e3a S\uff0c\u5728\u90e8\u7f72\u4e86\u6211\u4eec\u88c5\u7f6e\u63d2\u4ef6\u7684 Kubernetes \u96c6\u7fa4\u4e2d\uff0c\u8fd9\u5f20 GPU \u5206\u51fa\u7684 vGPU \u5c06\u603b\u5171\u5305\u542b S * 100% \u7b97\u529b\u3002

                                                                            • deviceMemoryScaling \uff1aNVIDIA \u88c5\u7f6e\u663e\u5b58\u4f7f\u7528\u6bd4\u4f8b\uff0c\u9884\u8bbe\u503c\u662f 1\u3002\u53ef\u4ee5\u5927\u4e8e 1\uff08\u542f\u7528\u865a\u62df\u663e\u5b58\uff0c\u5b9e\u9a8c\u529f\u80fd\uff09\u3002 \u5bf9\u4e8e\u6709 M \u663e\u5b58\u5927\u5c0f\u7684 NVIDIA GPU\uff0c\u5982\u679c\u6211\u4eec\u914d\u7f6e devicePlugin.deviceMemoryScaling \u53c2\u6570\u4e3a S\uff0c \u5728\u90e8\u7f72\u4e86\u6211\u4eec\u88c5\u7f6e\u63d2\u4ef6\u7684 Kubernetes \u96c6\u7fa4\u4e2d\uff0c\u8fd9\u5f20 GPU \u5206\u51fa\u7684 vGPU \u5c06\u603b\u5171\u5305\u542b S * M \u663e\u5b58\u3002

                                                                            • deviceSplitCount \uff1a\u6574\u6570\u7c7b\u578b\uff0c\u9884\u8bbe\u503c\u662f 10\u3002GPU \u7684\u5206\u5272\u6570\uff0c\u6bcf\u4e00\u5f20 GPU \u90fd\u4e0d\u80fd\u5206\u914d\u8d85\u8fc7\u5176\u914d\u7f6e\u6570\u76ee\u7684\u4efb\u52a1\u3002 \u82e5\u5176\u914d\u7f6e\u4e3a N \u7684\u8bdd\uff0c\u6bcf\u4e2a GPU \u4e0a\u6700\u591a\u53ef\u4ee5\u540c\u65f6\u5b58\u5728 N \u4e2a\u4efb\u52a1\u3002

                                                                            • Resources \uff1a\u5c31\u662f\u5bf9\u5e94 vgpu-device-plugin \u548c vgpu-schedule pod \u7684\u8d44\u6e90\u4f7f\u7528\u91cf\u3002

                                                                            • ServiceMonitor \uff1a\u9ed8\u8ba4\u4e0d\u5f00\u542f\uff0c\u5f00\u542f\u540e\u53ef\u524d\u5f80\u53ef\u89c2\u6d4b\u6027\u6a21\u5757\u67e5\u770b vGPU \u76f8\u5173\u76d1\u63a7\u3002\u5982\u9700\u5f00\u542f\uff0c\u8bf7\u786e\u4fdd insight-agent \u5df2\u5b89\u88c5\u5e76\u5904\u4e8e\u8fd0\u884c\u72b6\u6001\uff0c\u5426\u5219\u5c06\u5bfc\u81f4 NVIDIA vGPU Addon \u5b89\u88c5\u5931\u8d25\u3002

                                                                          3. \u5b89\u88c5\u6210\u529f\u4e4b\u540e\u4f1a\u5728\u6307\u5b9a Namespace \u4e0b\u51fa\u73b0\u5982\u4e0b\u4e24\u4e2a\u7c7b\u578b\u7684 Pod\uff0c\u5373\u8868\u793a NVIDIA vGPU \u63d2\u4ef6\u5df2\u5b89\u88c5\u6210\u529f\uff1a

                                                                          \u5b89\u88c5\u6210\u529f\u540e\uff0c\u90e8\u7f72\u5e94\u7528\u53ef\u4f7f\u7528 vGPU \u8d44\u6e90\u3002

                                                                          Note

                                                                          NVIDIA vGPU Addon \u4e0d\u652f\u6301\u4ece\u8001\u7248\u672c v2.0.0 \u76f4\u63a5\u5347\u7ea7\u4e3a\u6700\u65b0\u7248 v2.0.0+1\uff1b \u5982\u9700\u5347\u7ea7\uff0c\u8bf7\u5378\u8f7d\u8001\u7248\u672c\u540e\u91cd\u65b0\u5b89\u88c5\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html","title":"\u5e94\u7528\u4f7f\u7528 Nvidia vGPU","text":"

                                                                          \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4f7f\u7528 vGPU \u80fd\u529b\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          • \u96c6\u7fa4\u8282\u70b9\u4e0a\u5177\u6709\u5bf9\u5e94\u578b\u53f7\u7684 GPU \u5361
                                                                          • \u5df2\u6210\u529f\u5b89\u88c5 vGPU Addon\uff0c\u8be6\u60c5\u53c2\u8003 GPU Addon \u5b89\u88c5
                                                                          • \u5df2\u5b89\u88c5 GPU Operator\uff0c\u5e76\u5df2 \u5173\u95ed Nvidia.DevicePlugin \u80fd\u529b\uff0c\u53ef\u53c2\u8003 GPU Operator \u79bb\u7ebf\u5b89\u88c5
                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":""},{"location":"end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html#vgpu","title":"\u754c\u9762\u4f7f\u7528 vGPU","text":"
                                                                          1. \u786e\u8ba4\u96c6\u7fa4\u662f\u5426\u5df2\u68c0\u6d4b GPU \u5361\u3002\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u96c6\u7fa4\u8bbe\u7f6e -> Addon \u63d2\u4ef6 \uff0c\u67e5\u770b\u662f\u5426\u5df2\u81ea\u52a8\u542f\u7528\u5e76\u81ea\u52a8\u68c0\u6d4b\u5bf9\u5e94 GPU \u7c7b\u578b\u3002 \u76ee\u524d\u96c6\u7fa4\u4f1a\u81ea\u52a8\u542f\u7528 GPU \uff0c\u5e76\u4e14\u8bbe\u7f6e GPU \u7c7b\u578b\u4e3a Nvidia vGPU \u3002

                                                                          2. \u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u70b9\u51fb\u5bf9\u5e94 \u96c6\u7fa4 -> \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u90e8\u7f72\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u9009\u62e9\u7c7b\u578b\uff08Nvidia vGPU\uff09\u4e4b\u540e\uff0c\u4f1a\u81ea\u52a8\u51fa\u73b0\u5982\u4e0b\u51e0\u4e2a\u53c2\u6570\u9700\u8981\u586b\u5199\uff1a

                                                                            • \u7269\u7406\u5361\u6570\u91cf\uff08nvidia.com/vgpu\uff09\uff1a\u8868\u793a\u5f53\u524d Pod \u9700\u8981\u6302\u8f7d\u51e0\u5f20\u7269\u7406\u5361\uff0c\u8f93\u5165\u503c\u5fc5\u987b\u4e3a\u6574\u6570\u4e14 \u5c0f\u4e8e\u7b49\u4e8e \u5bbf\u4e3b\u673a\u4e0a\u7684\u5361\u6570\u91cf\u3002
                                                                            • GPU \u7b97\u529b\uff08nvidia.com/gpucores\uff09: \u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u7b97\u529b\uff0c\u503c\u8303\u56f4\u4e3a 0-100\uff1b \u5982\u679c\u914d\u7f6e\u4e3a 0\uff0c \u5219\u8ba4\u4e3a\u4e0d\u5f3a\u5236\u9694\u79bb\uff1b\u914d\u7f6e\u4e3a100\uff0c\u5219\u8ba4\u4e3a\u72ec\u5360\u6574\u5f20\u5361\u3002
                                                                            • GPU \u663e\u5b58\uff08nvidia.com/gpumem\uff09: \u8868\u793a\u6bcf\u5f20\u5361\u5360\u7528\u7684 GPU \u663e\u5b58\uff0c\u503c\u5355\u4f4d\u4e3a MB\uff0c\u6700\u5c0f\u503c\u4e3a 1\uff0c\u6700\u5927\u503c\u4e3a\u6574\u5361\u7684\u663e\u5b58\u503c\u3002

                                                                            \u5982\u679c\u4e0a\u8ff0\u503c\u914d\u7f6e\u7684\u6709\u95ee\u9898\u5219\u4f1a\u51fa\u73b0\u8c03\u5ea6\u5931\u8d25\uff0c\u8d44\u6e90\u5206\u914d\u4e0d\u4e86\u7684\u60c5\u51b5\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html#yaml-vgpu","title":"YAML \u914d\u7f6e\u4f7f\u7528 vGPU","text":"

                                                                          \u53c2\u8003\u5982\u4e0b\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\uff0c\u5728\u8d44\u6e90\u7533\u8bf7\u548c\u9650\u5236\u914d\u7f6e\u4e2d\u589e\u52a0 nvidia.com/vgpu: '1' \u53c2\u6570\u6765\u914d\u7f6e\u5e94\u7528\u4f7f\u7528\u7269\u7406\u5361\u7684\u6570\u91cf\u3002

                                                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-vgpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-vgpu-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: full-vgpu-demo\n    spec:\n      containers:\n        - name: full-vgpu-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/gpucores: '20'   # \u7533\u8bf7\u6bcf\u5f20\u5361\u5360\u7528 20% \u7684 GPU \u7b97\u529b\n              nvidia.com/gpumem: '200'   # \u7533\u8bf7\u6bcf\u5f20\u5361\u5360\u7528 200MB \u7684\u663e\u5b58\n              nvidia.com/vgpu: '1'   # \u7533\u8bf7GPU\u7684\u6570\u91cf\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                                                                          "},{"location":"end-user/kpanda/gpu/volcano/drf.html","title":"DRF\uff08Dominant Resource Fairness\uff09 \u8c03\u5ea6\u7b56\u7565","text":"

                                                                          DRF \u8c03\u5ea6\u7b56\u7565\u8ba4\u4e3a\u5360\u7528\u8d44\u6e90\u8f83\u5c11\u7684\u4efb\u52a1\u5177\u6709\u66f4\u9ad8\u7684\u4f18\u5148\u7ea7\u3002\u8fd9\u6837\u80fd\u591f\u6ee1\u8db3\u66f4\u591a\u7684\u4f5c\u4e1a\uff0c\u4e0d\u4f1a\u56e0\u4e3a\u4e00\u4e2a\u80d6\u4e1a\u52a1\uff0c \u997f\u6b7b\u5927\u6279\u5c0f\u4e1a\u52a1\u3002DRF \u8c03\u5ea6\u7b97\u6cd5\u80fd\u591f\u786e\u4fdd\u5728\u591a\u79cd\u7c7b\u578b\u8d44\u6e90\u5171\u5b58\u7684\u73af\u5883\u4e0b\uff0c\u5c3d\u53ef\u80fd\u6ee1\u8db3\u5206\u914d\u7684\u516c\u5e73\u539f\u5219\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/volcano/drf.html#_1","title":"\u4f7f\u7528\u65b9\u5f0f","text":"

                                                                          DRF \u8c03\u5ea6\u7b56\u7565\u9ed8\u8ba4\u5df2\u542f\u7528\uff0c\u65e0\u9700\u4efb\u4f55\u914d\u7f6e\u3002

                                                                          kubectl -n volcano-system view configmaps volcano-scheduler-configmap\n
                                                                          "},{"location":"end-user/kpanda/gpu/volcano/drf.html#_2","title":"\u4f7f\u7528\u6848\u4f8b","text":"

                                                                          \u5728 AI \u8bad\u7ec3\uff0c\u6216\u5927\u6570\u636e\u8ba1\u7b97\u4e2d\uff0c\u901a\u8fc7\u6709\u9650\u8fd0\u884c\u4f7f\u7528\u8d44\u6e90\u5c11\u7684\u4efb\u52a1\uff0c\u8fd9\u6837\u53ef\u4ee5\u8ba9\u96c6\u7fa4\u8d44\u6e90\u4f7f\u7528\u7387\u66f4\u9ad8\uff0c\u800c\u4e14\u8fd8\u80fd\u907f\u514d\u5c0f\u4efb\u52a1\u88ab\u997f\u6b7b\u3002 \u5982\u4e0b\u521b\u5efa\u4e24\u4e2a Job\uff0c\u4e00\u4e2a\u662f\u5c0f\u8d44\u6e90\u9700\u6c42\uff0c\u4e00\u4e2a\u662f\u5927\u8d44\u6e90\u9700\u6c42\uff0c\u53ef\u4ee5\u770b\u51fa\u6765\u5c0f\u8d44\u6e90\u9700\u6c42\u7684 Job \u4f18\u5148\u8fd0\u884c\u8d77\u6765\u3002

                                                                          cat <<EOF | kubectl apply -f -  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: small-resource  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: small-resource  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"1\"  \n          restartPolicy: OnFailure  \n---  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: large-resource  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: large-resource  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"2\"  \n          restartPolicy: OnFailure  \nEOF\n
                                                                          "},{"location":"end-user/kpanda/gpu/volcano/numa.html","title":"NUMA \u4eb2\u548c\u6027\u8c03\u5ea6","text":"

                                                                          NUMA \u8282\u70b9\u662f Non-Uniform Memory Access\uff08\u975e\u7edf\u4e00\u5185\u5b58\u8bbf\u95ee\uff09\u67b6\u6784\u4e2d\u7684\u4e00\u4e2a\u57fa\u672c\u7ec4\u6210\u5355\u5143\uff0c\u4e00\u4e2a Node \u8282\u70b9\u662f\u591a\u4e2a NUMA \u8282\u70b9\u7684\u96c6\u5408\uff0c \u5728\u591a\u4e2a NUMA \u8282\u70b9\u4e4b\u95f4\u8fdb\u884c\u5185\u5b58\u8bbf\u95ee\u65f6\u4f1a\u4ea7\u751f\u5ef6\u8fdf\uff0c\u5f00\u53d1\u8005\u53ef\u4ee5\u901a\u8fc7\u4f18\u5316\u4efb\u52a1\u8c03\u5ea6\u548c\u5185\u5b58\u5206\u914d\u7b56\u7565\uff0c\u6765\u63d0\u9ad8\u5185\u5b58\u8bbf\u95ee\u6548\u7387\u548c\u6574\u4f53\u6027\u80fd\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/volcano/numa.html#_1","title":"\u4f7f\u7528\u573a\u666f","text":"

                                                                          Numa \u4eb2\u548c\u6027\u8c03\u5ea6\u7684\u5e38\u89c1\u573a\u666f\u662f\u90a3\u4e9b\u5bf9 CPU \u53c2\u6570\u654f\u611f/\u8c03\u5ea6\u5ef6\u8fdf\u654f\u611f\u7684\u8ba1\u7b97\u5bc6\u96c6\u578b\u4f5c\u4e1a\u3002\u5982\u79d1\u5b66\u8ba1\u7b97\u3001\u89c6\u9891\u89e3\u7801\u3001\u52a8\u6f2b\u52a8\u753b\u6e32\u67d3\u3001\u5927\u6570\u636e\u79bb\u7ebf\u5904\u7406\u7b49\u5177\u4f53\u573a\u666f\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/volcano/numa.html#_2","title":"\u8c03\u5ea6\u7b56\u7565","text":"

                                                                          Pod \u8c03\u5ea6\u65f6\u53ef\u4ee5\u91c7\u7528\u7684 NUMA \u653e\u7f6e\u7b56\u7565\uff0c\u5177\u4f53\u7b56\u7565\u5bf9\u5e94\u7684\u8c03\u5ea6\u884c\u4e3a\u8bf7\u53c2\u89c1 Pod \u8c03\u5ea6\u884c\u4e3a\u8bf4\u660e\u3002

                                                                          • single-numa-node\uff1aPod \u8c03\u5ea6\u65f6\u4f1a\u9009\u62e9\u62d3\u6251\u7ba1\u7406\u7b56\u7565\u5df2\u7ecf\u8bbe\u7f6e\u4e3a single-numa-node \u7684\u8282\u70b9\u6c60\u4e2d\u7684\u8282\u70b9\uff0c\u4e14 CPU \u9700\u8981\u653e\u7f6e\u5728\u76f8\u540c NUMA \u4e0b\uff0c\u5982\u679c\u8282\u70b9\u6c60\u4e2d\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u8282\u70b9\uff0cPod \u5c06\u65e0\u6cd5\u88ab\u8c03\u5ea6\u3002
                                                                          • restricted\uff1aPod \u8c03\u5ea6\u65f6\u4f1a\u9009\u62e9\u62d3\u6251\u7ba1\u7406\u7b56\u7565\u5df2\u7ecf\u8bbe\u7f6e\u4e3a restricted \u8282\u70b9\u6c60\u7684\u8282\u70b9\uff0c\u4e14 CPU \u9700\u8981\u653e\u7f6e\u5728\u76f8\u540c\u7684 NUMA \u96c6\u5408\u4e0b\uff0c\u5982\u679c\u8282\u70b9\u6c60\u4e2d\u6ca1\u6709\u6ee1\u8db3\u6761\u4ef6\u7684\u8282\u70b9\uff0cPod \u5c06\u65e0\u6cd5\u88ab\u8c03\u5ea6\u3002
                                                                          • best-effort\uff1aPod \u8c03\u5ea6\u65f6\u4f1a\u9009\u62e9\u62d3\u6251\u7ba1\u7406\u7b56\u7565\u5df2\u7ecf\u8bbe\u7f6e\u4e3a best-effort \u8282\u70b9\u6c60\u7684\u8282\u70b9\uff0c\u4e14\u5c3d\u91cf\u5c06 CPU \u653e\u7f6e\u5728\u76f8\u540c NUMA \u4e0b\uff0c\u5982\u679c\u6ca1\u6709\u8282\u70b9\u6ee1\u8db3\u8fd9\u4e00\u6761\u4ef6\uff0c\u5219\u9009\u62e9\u6700\u4f18\u8282\u70b9\u8fdb\u884c\u653e\u7f6e\u3002
                                                                          "},{"location":"end-user/kpanda/gpu/volcano/numa.html#_3","title":"\u8c03\u5ea6\u539f\u7406","text":"

                                                                          \u5f53Pod\u8bbe\u7f6e\u4e86\u62d3\u6251\u7b56\u7565\u65f6\uff0cVolcano \u4f1a\u6839\u636e Pod \u8bbe\u7f6e\u7684\u62d3\u6251\u7b56\u7565\u9884\u6d4b\u5339\u914d\u7684\u8282\u70b9\u5217\u8868\u3002 \u8c03\u5ea6\u8fc7\u7a0b\u5982\u4e0b\uff1a

                                                                          1. \u6839\u636e Pod \u8bbe\u7f6e\u7684 Volcano \u62d3\u6251\u7b56\u7565\uff0c\u7b5b\u9009\u5177\u6709\u76f8\u540c\u7b56\u7565\u7684\u8282\u70b9\u3002

                                                                          2. \u5728\u8bbe\u7f6e\u4e86\u76f8\u540c\u7b56\u7565\u7684\u8282\u70b9\u4e2d\uff0c\u7b5b\u9009 CPU \u62d3\u6251\u6ee1\u8db3\u8be5\u7b56\u7565\u8981\u6c42\u7684\u8282\u70b9\u8fdb\u884c\u8c03\u5ea6\u3002

                                                                          Pod \u53ef\u914d\u7f6e\u7684\u62d3\u6251\u7b56\u7565 1. \u6839\u636e Pod \u8bbe\u7f6e\u7684\u62d3\u6251\u7b56\u7565\uff0c\u7b5b\u9009\u53ef\u8c03\u5ea6\u7684\u8282\u70b9 2. \u8fdb\u4e00\u6b65\u7b5b\u9009 CPU \u62d3\u6251\u6ee1\u8db3\u7b56\u7565\u7684\u8282\u70b9\u8fdb\u884c\u8c03\u5ea6 none \u9488\u5bf9\u914d\u7f6e\u4e86\u4ee5\u4e0b\u51e0\u79cd\u62d3\u6251\u7b56\u7565\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u65f6\u5747\u65e0\u7b5b\u9009\u884c\u4e3a\u3002none\uff1a\u53ef\u8c03\u5ea6\uff1bbest-effort\uff1a\u53ef\u8c03\u5ea6\uff1brestricted\uff1a\u53ef\u8c03\u5ea6\uff1bsingle-numa-node\uff1a\u53ef\u8c03\u5ea6 - best-effort \u7b5b\u9009\u62d3\u6251\u7b56\u7565\u540c\u6837\u4e3a\u201cbest-effort\u201d\u7684\u8282\u70b9\uff1anone\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bbest-effort\uff1a\u53ef\u8c03\u5ea6\uff1brestricted\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bsingle-numa-node\uff1a\u4e0d\u53ef\u8c03\u5ea6 \u5c3d\u53ef\u80fd\u6ee1\u8db3\u7b56\u7565\u8981\u6c42\u8fdb\u884c\u8c03\u5ea6\uff1a\u4f18\u5148\u8c03\u5ea6\u81f3\u5355 NUMA \u8282\u70b9\uff0c\u5982\u679c\u5355 NUMA \u8282\u70b9\u65e0\u6cd5\u6ee1\u8db3 CPU \u7533\u8bf7\u503c\uff0c\u5141\u8bb8\u8c03\u5ea6\u81f3\u591a\u4e2a NUMA \u8282\u70b9\u3002 restricted \u7b5b\u9009\u62d3\u6251\u7b56\u7565\u540c\u6837\u4e3a\u201crestricted\u201d\u7684\u8282\u70b9\uff1anone\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bbest-effort\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1brestricted\uff1a\u53ef\u8c03\u5ea6\uff1bsingle-numa-node\uff1a\u4e0d\u53ef\u8c03\u5ea6 \u4e25\u683c\u9650\u5236\u7684\u8c03\u5ea6\u7b56\u7565\uff1a\u5355 NUMA \u8282\u70b9\u7684CPU\u5bb9\u91cf\u4e0a\u9650\u5927\u4e8e\u7b49\u4e8e CPU \u7684\u7533\u8bf7\u503c\u65f6\uff0c\u4ec5\u5141\u8bb8\u8c03\u5ea6\u81f3\u5355 NUMA \u8282\u70b9\u3002\u6b64\u65f6\u5982\u679c\u5355 NUMA \u8282\u70b9\u5269\u4f59\u7684 CPU \u53ef\u4f7f\u7528\u91cf\u4e0d\u8db3\uff0c\u5219 Pod \u65e0\u6cd5\u8c03\u5ea6\u3002\u5355 NUMA \u8282\u70b9\u7684 CPU \u5bb9\u91cf\u4e0a\u9650\u5c0f\u4e8e CPU \u7684\u7533\u8bf7\u503c\u65f6\uff0c\u53ef\u5141\u8bb8\u8c03\u5ea6\u81f3\u591a\u4e2a NUMA \u8282\u70b9\u3002 single-numa-node \u7b5b\u9009\u62d3\u6251\u7b56\u7565\u540c\u6837\u4e3a\u201csingle-numa-node\u201d\u7684\u8282\u70b9\uff1anone\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bbest-effort\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1brestricted\uff1a\u4e0d\u53ef\u8c03\u5ea6\uff1bsingle-numa-node\uff1a\u53ef\u8c03\u5ea6 \u4ec5\u5141\u8bb8\u8c03\u5ea6\u81f3\u5355 NUMA \u8282\u70b9\u3002"},{"location":"end-user/kpanda/gpu/volcano/numa.html#numa_1","title":"\u914d\u7f6e NUMA \u4eb2\u548c\u8c03\u5ea6\u7b56\u7565","text":"
                                                                          1. \u5728 Job \u4e2d\u914d\u7f6e policies

                                                                            task: \n  - replicas: 1 \n    name: \"test-1\" \n    topologyPolicy: single-numa-node \n  - replicas: 1 \n    name: \"test-2\" \n    topologyPolicy: best-effort \n
                                                                          2. \u4fee\u6539 kubelet \u7684\u8c03\u5ea6\u7b56\u7565\uff0c\u8bbe\u7f6e --topology-manager-policy \u53c2\u6570\uff0c\u652f\u6301\u7684\u7b56\u7565\u6709\u56db\u79cd\uff1a

                                                                            • none\uff08\u9ed8\u8ba4\uff09
                                                                            • best-effort
                                                                            • restricted
                                                                            • single-numa-node
                                                                          "},{"location":"end-user/kpanda/gpu/volcano/numa.html#_4","title":"\u4f7f\u7528\u6848\u4f8b","text":"
                                                                          1. \u793a\u4f8b\u4e00\uff1a\u5728\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u914d\u7f6e NUMA \u4eb2\u548c\u6027\u3002

                                                                            kind: Deployment  \napiVersion: apps/v1  \nmetadata:  \n  name: numa-tset  \nspec:  \n  replicas: 1  \n  selector:  \n    matchLabels:  \n      app: numa-tset  \n  template:  \n    metadata:  \n      labels:  \n        app: numa-tset  \n      annotations:  \n        volcano.sh/numa-topology-policy: single-numa-node    # set the topology policy  \n    spec:  \n      containers:  \n        - name: container-1  \n          image: nginx:alpine  \n          resources:  \n            requests:  \n              cpu: 2           # \u5fc5\u987b\u4e3a\u6574\u6570\uff0c\u4e14\u9700\u8981\u4e0elimits\u4e2d\u4e00\u81f4  \n              memory: 2048Mi  \n            limits:  \n              cpu: 2           # \u5fc5\u987b\u4e3a\u6574\u6570\uff0c\u4e14\u9700\u8981\u4e0erequests\u4e2d\u4e00\u81f4  \n              memory: 2048Mi  \n      imagePullSecrets:  \n      - name: default-secret\n
                                                                          2. \u793a\u4f8b\u4e8c\uff1a\u521b\u5efa\u4e00\u4e2a Volcano Job\uff0c\u5e76\u4f7f\u7528 NUMA \u4eb2\u548c\u6027\u3002

                                                                            apiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: vj-test  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 1  \n  tasks:  \n    - replicas: 1  \n      name: \"test\"  \n      topologyPolicy: best-effort   # set the topology policy for task  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                limits:  \n                  cpu: 20  \n                  memory: \"100Mi\"  \n          restartPolicy: OnFailure\n
                                                                          "},{"location":"end-user/kpanda/gpu/volcano/numa.html#numa_2","title":"NUMA \u8c03\u5ea6\u5206\u6790","text":"

                                                                          \u5047\u8bbe NUMA \u8282\u70b9\u60c5\u51b5\u5982\u4e0b\uff1a

                                                                          \u5de5\u4f5c\u8282\u70b9 \u8282\u70b9\u7b56\u7565\u62d3\u6251\u7ba1\u7406\u5668\u7b56\u7565 NUMA \u8282\u70b9 0 \u4e0a\u7684\u53ef\u5206\u914d CPU NUMA \u8282\u70b9 1 \u4e0a\u7684\u53ef\u5206\u914d CPU node-1 single-numa-node 16U 16U node-2 best-effort 16U 16U node-3 best-effort 20U 20U
                                                                          • \u793a\u4f8b\u4e00\u4e2d\uff0cPod \u7684 CPU \u7533\u8bf7\u503c\u4e3a 2U\uff0c\u8bbe\u7f6e\u62d3\u6251\u7b56\u7565\u4e3a\u201csingle-numa-node\u201d\uff0c\u56e0\u6b64\u4f1a\u88ab\u8c03\u5ea6\u5230\u76f8\u540c\u7b56\u7565\u7684 node-1\u3002
                                                                          • \u793a\u4f8b\u4e8c\u4e2d\uff0cPod \u7684 CPU \u7533\u8bf7\u503c\u4e3a20U\uff0c\u8bbe\u7f6e\u62d3\u6251\u7b56\u7565\u4e3a\u201cbest-effort\u201d\uff0c\u5b83\u5c06\u88ab\u8c03\u5ea6\u5230 node-3\uff0c \u56e0\u4e3a node-3 \u53ef\u4ee5\u5728\u5355\u4e2a NUMA \u8282\u70b9\u4e0a\u5206\u914d Pod \u7684 CPU \u8bf7\u6c42\uff0c\u800c node-2 \u9700\u8981\u5728\u4e24\u4e2a NUMA \u8282\u70b9\u4e0a\u6267\u884c\u6b64\u64cd\u4f5c\u3002
                                                                          "},{"location":"end-user/kpanda/gpu/volcano/numa.html#cpu","title":"\u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684 CPU \u6982\u51b5","text":"

                                                                          \u60a8\u53ef\u4ee5\u901a\u8fc7 lscpu \u547d\u4ee4\u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684 CPU \u6982\u51b5\uff1a

                                                                          lscpu \n... \nCPU(s): 32 \nNUMA node(s): 2 \nNUMA node0 CPU(s): 0-15 \nNUMA node1 CPU(s): 16-31\n
                                                                          "},{"location":"end-user/kpanda/gpu/volcano/numa.html#cpu_1","title":"\u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684 CPU \u5206\u914d","text":"

                                                                          \u7136\u540e\u67e5\u770b NUMA \u8282\u70b9\u4f7f\u7528\u60c5\u51b5\uff1a

                                                                          # \u67e5\u770b\u5f53\u524d\u8282\u70b9\u7684 CPU \u5206\u914d\ncat /var/lib/kubelet/cpu_manager_state\n{\"policyName\":\"static\",\"defaultCpuSet\":\"0,10-15,25-31\",\"entries\":{\"777870b5-c64f-42f5-9296-688b9dc212ba\":{\"container-1\":\"16-24\"},\"fb15e10a-b6a5-4aaa-8fcd-76c1aa64e6fd\":{\"container-1\":\"1-9\"}},\"checksum\":318470969}\n

                                                                          \u4ee5\u4e0a\u793a\u4f8b\u4e2d\u8868\u793a\uff0c\u8282\u70b9\u4e0a\u8fd0\u884c\u4e86\u4e24\u4e2a\u5bb9\u5668\uff0c\u4e00\u4e2a\u5360\u7528\u4e86 NUMA node0 \u76841-9 \u6838\uff0c\u53e6\u4e00\u4e2a\u5360\u7528\u4e86 NUMA node1 \u7684 16-24 \u6838\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/volcano/volcano-gang-scheduler.html","title":"\u4f7f\u7528 Volcano \u7684 Gang Scheduler","text":"

                                                                          Gang \u8c03\u5ea6\u7b56\u7565\u662f volcano-scheduler \u7684\u6838\u5fc3\u8c03\u5ea6\u7b97\u6cd5\u4e4b\u4e00\uff0c\u5b83\u6ee1\u8db3\u4e86\u8c03\u5ea6\u8fc7\u7a0b\u4e2d\u7684 \u201cAll or nothing\u201d \u7684\u8c03\u5ea6\u9700\u6c42\uff0c \u907f\u514d Pod \u7684\u4efb\u610f\u8c03\u5ea6\u5bfc\u81f4\u96c6\u7fa4\u8d44\u6e90\u7684\u6d6a\u8d39\u3002\u5177\u4f53\u7b97\u6cd5\u662f\uff0c\u89c2\u5bdf Job \u4e0b\u7684 Pod \u5df2\u8c03\u5ea6\u6570\u91cf\u662f\u5426\u6ee1\u8db3\u4e86\u6700\u5c0f\u8fd0\u884c\u6570\u91cf\uff0c \u5f53 Job \u7684\u6700\u5c0f\u8fd0\u884c\u6570\u91cf\u5f97\u5230\u6ee1\u8db3\u65f6\uff0c\u4e3a Job \u4e0b\u7684\u6240\u6709 Pod \u6267\u884c\u8c03\u5ea6\u52a8\u4f5c\uff0c\u5426\u5219\uff0c\u4e0d\u6267\u884c\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/volcano/volcano-gang-scheduler.html#_1","title":"\u4f7f\u7528\u573a\u666f","text":"

                                                                          \u57fa\u4e8e\u5bb9\u5668\u7ec4\u6982\u5ff5\u7684 Gang \u8c03\u5ea6\u7b97\u6cd5\u5341\u5206\u9002\u5408\u9700\u8981\u591a\u8fdb\u7a0b\u534f\u4f5c\u7684\u573a\u666f\u3002AI \u573a\u666f\u5f80\u5f80\u5305\u542b\u590d\u6742\u7684\u6d41\u7a0b\uff0c Data Ingestion\u3001Data Analysts\u3001Data Splitting\u3001Trainer\u3001Serving\u3001Logging \u7b49\uff0c \u9700\u8981\u4e00\u7ec4\u5bb9\u5668\u8fdb\u884c\u534f\u540c\u5de5\u4f5c\uff0c\u5c31\u5f88\u9002\u5408\u57fa\u4e8e\u5bb9\u5668\u7ec4\u7684 Gang \u8c03\u5ea6\u7b56\u7565\u3002 MPI \u8ba1\u7b97\u6846\u67b6\u4e0b\u7684\u591a\u7ebf\u7a0b\u5e76\u884c\u8ba1\u7b97\u901a\u4fe1\u573a\u666f\uff0c\u7531\u4e8e\u9700\u8981\u4e3b\u4ece\u8fdb\u7a0b\u534f\u540c\u5de5\u4f5c\uff0c\u4e5f\u975e\u5e38\u9002\u5408\u4f7f\u7528 Gang \u8c03\u5ea6\u7b56\u7565\u3002 \u5bb9\u5668\u7ec4\u4e0b\u7684\u5bb9\u5668\u9ad8\u5ea6\u76f8\u5173\u4e5f\u53ef\u80fd\u5b58\u5728\u8d44\u6e90\u4e89\u62a2\uff0c\u6574\u4f53\u8c03\u5ea6\u5206\u914d\uff0c\u80fd\u591f\u6709\u6548\u89e3\u51b3\u6b7b\u9501\u3002

                                                                          \u5728\u96c6\u7fa4\u8d44\u6e90\u4e0d\u8db3\u7684\u573a\u666f\u4e0b\uff0cGang \u7684\u8c03\u5ea6\u7b56\u7565\u5bf9\u4e8e\u96c6\u7fa4\u8d44\u6e90\u7684\u5229\u7528\u7387\u7684\u63d0\u5347\u662f\u975e\u5e38\u660e\u663e\u7684\u3002 \u6bd4\u5982\u96c6\u7fa4\u73b0\u5728\u53ea\u80fd\u5bb9\u7eb3 2 \u4e2a Pod\uff0c\u73b0\u5728\u8981\u6c42\u6700\u5c0f\u8c03\u5ea6\u7684 Pod \u6570\u4e3a 3\u3002 \u90a3\u73b0\u5728\u8fd9\u4e2a Job \u7684\u6240\u6709\u7684 Pod \u90fd\u4f1a pending\uff0c\u76f4\u5230\u96c6\u7fa4\u80fd\u591f\u5bb9\u7eb3 3 \u4e2a Pod\uff0cPod \u624d\u4f1a\u88ab\u8c03\u5ea6\u3002 \u6709\u6548\u9632\u6b62\u8c03\u5ea6\u90e8\u5206 Pod\uff0c\u4e0d\u6ee1\u8db3\u8981\u6c42\u53c8\u5360\u7528\u4e86\u8d44\u6e90\uff0c\u4f7f\u5176\u4ed6 Job \u65e0\u6cd5\u8fd0\u884c\u7684\u60c5\u51b5\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/volcano/volcano-gang-scheduler.html#_2","title":"\u6982\u5ff5\u8bf4\u660e","text":"

                                                                          Gang Scheduler \u662f Volcano \u7684\u6838\u5fc3\u7684\u8c03\u5ea6\u63d2\u4ef6\uff0c\u5b89\u88c5 Volcano \u540e\u9ed8\u8ba4\u5c31\u5f00\u542f\u4e86\u3002 \u5728\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\u53ea\u9700\u8981\u6307\u5b9a\u8c03\u5ea6\u5668\u7684\u540d\u79f0\u4e3a Volcano \u5373\u53ef\u3002

                                                                          Volcano \u662f\u4ee5 PodGroup \u4e3a\u5355\u4f4d\u8fdb\u884c\u8c03\u5ea6\u7684\uff0c\u5728\u521b\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u5e76\u4e0d\u9700\u8981\u624b\u52a8\u521b\u5efa PodGroup \u8d44\u6e90\uff0c Volcano \u4f1a\u6839\u636e\u5de5\u4f5c\u8d1f\u8f7d\u7684\u4fe1\u606f\u81ea\u52a8\u521b\u5efa\u3002\u4e0b\u9762\u662f\u4e00\u4e2a PodGroup \u7684\u793a\u4f8b\uff1a

                                                                          apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  name: test\n  namespace: default\nspec:\n  minMember: 1  # (1)!\n  minResources:  # (2)!\n    cpu: \"3\"\n    memory: \"2048Mi\"\n  priorityClassName: high-prority # (3)!\n  queue: default # (4)!\n
                                                                          1. \u8868\u793a\u8be5 PodGroup \u4e0b \u6700\u5c11 \u9700\u8981\u8fd0\u884c\u7684 Pod \u6216\u4efb\u52a1\u6570\u91cf\u3002 \u5982\u679c\u96c6\u7fa4\u8d44\u6e90\u4e0d\u6ee1\u8db3 miniMember \u6570\u91cf\u4efb\u52a1\u7684\u8fd0\u884c\u9700\u6c42\uff0c\u8c03\u5ea6\u5668\u5c06\u4e0d\u4f1a\u8c03\u5ea6\u4efb\u4f55\u4e00\u4e2a\u8be5 PodGroup \u5185\u7684\u4efb\u52a1\u3002
                                                                          2. \u8868\u793a\u8fd0\u884c\u8be5 PodGroup \u6240\u9700\u8981\u7684\u6700\u5c11\u8d44\u6e90\u3002\u5f53\u96c6\u7fa4\u53ef\u5206\u914d\u8d44\u6e90\u4e0d\u6ee1\u8db3 minResources \u65f6\uff0c\u8c03\u5ea6\u5668\u5c06\u4e0d\u4f1a\u8c03\u5ea6\u4efb\u4f55\u4e00\u4e2a\u8be5 PodGroup \u5185\u7684\u4efb\u52a1\u3002
                                                                          3. \u8868\u793a\u8be5 PodGroup \u7684\u4f18\u5148\u7ea7\uff0c\u7528\u4e8e\u8c03\u5ea6\u5668\u4e3a\u8be5 queue \u4e2d\u6240\u6709 PodGroup \u8fdb\u884c\u8c03\u5ea6\u65f6\u8fdb\u884c\u6392\u5e8f\u3002 system-node-critical \u548c system-cluster-critical \u662f 2 \u4e2a\u9884\u7559\u7684\u503c\uff0c\u8868\u793a\u6700\u9ad8\u4f18\u5148\u7ea7\u3002\u4e0d\u7279\u522b\u6307\u5b9a\u65f6\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u4f18\u5148\u7ea7\u6216 zero \u4f18\u5148\u7ea7\u3002
                                                                          4. \u8868\u793a\u8be5 PodGroup \u6240\u5c5e\u7684 queue\u3002queue \u5fc5\u987b\u63d0\u524d\u5df2\u521b\u5efa\u4e14\u72b6\u6001\u4e3a open\u3002
                                                                          "},{"location":"end-user/kpanda/gpu/volcano/volcano-gang-scheduler.html#_3","title":"\u4f7f\u7528\u6848\u4f8b","text":"

                                                                          \u5728 MPI \u8ba1\u7b97\u6846\u67b6\u4e0b\u7684\u591a\u7ebf\u7a0b\u5e76\u884c\u8ba1\u7b97\u901a\u4fe1\u573a\u666f\u4e2d\uff0c\u6211\u4eec\u8981\u786e\u4fdd\u6240\u6709\u7684 Pod \u90fd\u80fd\u8c03\u5ea6\u6210\u529f\u624d\u80fd\u4fdd\u8bc1\u4efb\u52a1\u6b63\u5e38\u5b8c\u6210\u3002 \u8bbe\u7f6e minAvailable \u4e3a 4\uff0c\u8868\u793a\u8981\u6c42 1 \u4e2a mpimaster \u548c 3 \u4e2a mpiworker \u80fd\u8fd0\u884c\u3002

                                                                          apiVersion: batch.volcano.sh/v1alpha1\nkind: Job\nmetadata:\n  name: lm-mpi-job\n  labels:\n    \"volcano.sh/job-type\": \"MPI\"\nspec:\n  minAvailable: 4\n  schedulerName: volcano\n  plugins:\n    ssh: []\n    svc: []\n  policies:\n    - event: PodEvicted\n      action: RestartJob\n  tasks:\n    - replicas: 1\n      name: mpimaster\n      policies:\n        - event: TaskCompleted\n          action: CompleteJob\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  MPI_HOST=`cat /etc/volcano/mpiworker.host | tr \"\\n\" \",\"`;\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd;\n                  mpiexec --allow-run-as-root --host ${MPI_HOST} -np 3 mpi_hello_world;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpimaster\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"500m\"\n                limits:\n                  cpu: \"500m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n    - replicas: 3\n      name: mpiworker\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd -D;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpiworker\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"1000m\"\n                limits:\n                  cpu: \"1000m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n

                                                                          \u751f\u6210 PodGroup \u7684\u8d44\u6e90\uff1a

                                                                          apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  annotations:\n  creationTimestamp: \"2024-05-28T09:18:50Z\"\n  generation: 5\n  labels:\n    volcano.sh/job-type: MPI\n  name: lm-mpi-job-9c571015-37c7-4a1a-9604-eaa2248613f2\n  namespace: default\n  ownerReferences:\n  - apiVersion: batch.volcano.sh/v1alpha1\n    blockOwnerDeletion: true\n    controller: true\n    kind: Job\n    name: lm-mpi-job\n    uid: 9c571015-37c7-4a1a-9604-eaa2248613f2\n  resourceVersion: \"25173454\"\n  uid: 7b04632e-7cff-4884-8e9a-035b7649d33b\nspec:\n  minMember: 4\n  minResources:\n    count/pods: \"4\"\n    cpu: 3500m\n    limits.cpu: 3500m\n    pods: \"4\"\n    requests.cpu: 3500m\n  minTaskMember:\n    mpimaster: 1\n    mpiworker: 3\n  queue: default\nstatus:\n  conditions:\n  - lastTransitionTime: \"2024-05-28T09:19:01Z\"\n    message: '3/4 tasks in gang unschedulable: pod group is not ready, 1 Succeeded,\n      3 Releasing, 4 minAvailable'\n    reason: NotEnoughResources\n    status: \"True\"\n    transitionID: f875efa5-0358-4363-9300-06cebc0e7466\n    type: Unschedulable\n  - lastTransitionTime: \"2024-05-28T09:18:53Z\"\n    reason: tasks in gang are ready to be scheduled\n    status: \"True\"\n    transitionID: 5a7708c8-7d42-4c33-9d97-0581f7c06dab\n    type: Scheduled\n  phase: Pending\n  succeeded: 1\n

                                                                          \u4ece PodGroup \u53ef\u4ee5\u770b\u51fa\uff0c\u901a\u8fc7 ownerReferences \u5173\u8054\u5230\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5e76\u8bbe\u7f6e\u6700\u5c0f\u8fd0\u884c\u7684 Pod \u6570\u4e3a 4\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/volcano/volcano_binpack.html","title":"\u4f7f\u7528 Volcano Binpack \u8c03\u5ea6\u7b56\u7565","text":"

                                                                          Binpack \u8c03\u5ea6\u7b97\u6cd5\u7684\u76ee\u6807\u662f\u5c3d\u91cf\u628a\u5df2\u88ab\u5360\u7528\u7684\u8282\u70b9\u586b\u6ee1\uff08\u5c3d\u91cf\u4e0d\u5f80\u7a7a\u767d\u8282\u70b9\u5206\u914d\uff09\u3002\u5177\u4f53\u5b9e\u73b0\u4e0a\uff0cBinpack \u8c03\u5ea6\u7b97\u6cd5\u4f1a\u7ed9\u6295\u9012\u7684\u8282\u70b9\u6253\u5206\uff0c \u5206\u6570\u8d8a\u9ad8\u8868\u793a\u8282\u70b9\u7684\u8d44\u6e90\u5229\u7528\u7387\u8d8a\u9ad8\u3002\u901a\u8fc7\u5c3d\u53ef\u80fd\u586b\u6ee1\u8282\u70b9\uff0c\u5c06\u5e94\u7528\u8d1f\u8f7d\u9760\u62e2\u5728\u90e8\u5206\u8282\u70b9\uff0c\u8fd9\u79cd\u8c03\u5ea6\u7b97\u6cd5\u80fd\u591f\u5c3d\u53ef\u80fd\u51cf\u5c0f\u8282\u70b9\u5185\u7684\u788e\u7247\uff0c \u5728\u7a7a\u95f2\u7684\u673a\u5668\u4e0a\u4e3a\u7533\u8bf7\u4e86\u66f4\u5927\u8d44\u6e90\u8bf7\u6c42\u7684 Pod \u9884\u7559\u8db3\u591f\u7684\u8d44\u6e90\u7a7a\u95f4\uff0c\u4f7f\u96c6\u7fa4\u4e0b\u7a7a\u95f2\u8d44\u6e90\u5f97\u5230\u6700\u5927\u5316\u7684\u5229\u7528\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/volcano/volcano_binpack.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"

                                                                          \u9884\u5148\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e0a\u5b89\u88c5 Volcano \u7ec4\u4ef6\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/volcano/volcano_binpack.html#binpack","title":"Binpack \u7b97\u6cd5\u539f\u7406","text":"

                                                                          Binpack \u5728\u5bf9\u4e00\u4e2a\u8282\u70b9\u6253\u5206\u65f6\uff0c\u4f1a\u6839\u636e Binpack \u63d2\u4ef6\u81ea\u8eab\u6743\u91cd\u548c\u5404\u8d44\u6e90\u8bbe\u7f6e\u7684\u6743\u91cd\u503c\u7efc\u5408\u6253\u5206\u3002 \u9996\u5148\uff0c\u5bf9 Pod \u8bf7\u6c42\u8d44\u6e90\u4e2d\u7684\u6bcf\u7c7b\u8d44\u6e90\u4f9d\u6b21\u6253\u5206\uff0c\u4ee5 CPU \u4e3a\u4f8b\uff0cCPU \u8d44\u6e90\u5728\u5f85\u8c03\u5ea6\u8282\u70b9\u7684\u5f97\u5206\u4fe1\u606f\u5982\u4e0b\uff1a

                                                                          CPU.weight * (request + used) / allocatable\n

                                                                          \u5373 CPU \u6743\u91cd\u503c\u8d8a\u9ad8\uff0c\u5f97\u5206\u8d8a\u9ad8\uff0c\u8282\u70b9\u8d44\u6e90\u4f7f\u7528\u91cf\u8d8a\u6ee1\uff0c\u5f97\u5206\u8d8a\u9ad8\u3002Memory\u3001GPU \u7b49\u8d44\u6e90\u539f\u7406\u7c7b\u4f3c\u3002\u5176\u4e2d\uff1a

                                                                          • CPU.weight \u4e3a\u7528\u6237\u8bbe\u7f6e\u7684 CPU \u6743\u91cd
                                                                          • request \u4e3a\u5f53\u524d Pod \u8bf7\u6c42\u7684 CPU \u8d44\u6e90\u91cf
                                                                          • used \u4e3a\u5f53\u524d\u8282\u70b9\u5df2\u7ecf\u5206\u914d\u4f7f\u7528\u7684 CPU \u91cf
                                                                          • allocatable \u4e3a\u5f53\u524d\u8282\u70b9 CPU \u53ef\u7528\u603b\u91cf

                                                                          \u901a\u8fc7 Binpack \u7b56\u7565\u7684\u8282\u70b9\u603b\u5f97\u5206\u5982\u4e0b\uff1a

                                                                          binpack.weight - (CPU.score + Memory.score + GPU.score) / (CPU.weight + Memory.weight + GPU.weight) - 100\n

                                                                          \u5373 Binpack \u63d2\u4ef6\u7684\u6743\u91cd\u503c\u8d8a\u5927\uff0c\u5f97\u5206\u8d8a\u9ad8\uff0c\u67d0\u7c7b\u8d44\u6e90\u7684\u6743\u91cd\u8d8a\u5927\uff0c\u8be5\u8d44\u6e90\u5728\u6253\u5206\u65f6\u7684\u5360\u6bd4\u8d8a\u5927\u3002\u5176\u4e2d\uff1a

                                                                          • binpack.weight \u4e3a\u7528\u6237\u8bbe\u7f6e\u7684\u88c5\u7bb1\u8c03\u5ea6\u7b56\u7565\u6743\u91cd
                                                                          • CPU.score \u4e3a CPU \u8d44\u6e90\u5f97\u5206\uff0cCPU.weight \u4e3a CPU \u6743\u91cd
                                                                          • Memory.score \u4e3a Memory \u8d44\u6e90\u5f97\u5206\uff0cMemory.weight \u4e3a Memory \u6743\u91cd
                                                                          • GPU.score \u4e3a GPU \u8d44\u6e90\u5f97\u5206\uff0cGPU.weight \u4e3a GPU \u6743\u91cd

                                                                          \u5982\u56fe\u6240\u793a\uff0c\u96c6\u7fa4\u4e2d\u5b58\u5728\u4e24\u4e2a\u8282\u70b9\uff0c\u5206\u522b\u4e3a Node1 \u548c Node 2\uff0c\u5728\u8c03\u5ea6 Pod \u65f6\uff0cBinpack \u7b56\u7565\u5bf9\u4e24\u4e2a\u8282\u70b9\u5206\u522b\u6253\u5206\u3002 \u5047\u8bbe\u96c6\u7fa4\u4e2d CPU.weight \u914d\u7f6e\u4e3a 1\uff0cMemory.weight \u914d\u7f6e\u4e3a 1\uff0cGPU.weight \u914d\u7f6e\u4e3a 2\uff0cbinpack.weight \u914d\u7f6e\u4e3a 5\u3002

                                                                          1. Binpack \u5bf9 Node 1 \u7684\u8d44\u6e90\u6253\u5206\uff0c\u5404\u8d44\u6e90\u7684\u8ba1\u7b97\u516c\u5f0f\u4e3a\uff1a

                                                                            • CPU Score\uff1a

                                                                              CPU.weight - (request + used) / allocatable = 1 - (2 + 4) / 8 = 0.75

                                                                            • Memory Score\uff1a

                                                                              Memory.weight - (request + used) / allocatable = 1 - (4 + 8) / 16 = 0.75

                                                                            • GPU Score\uff1a

                                                                              GPU.weight - (request + used) / allocatable = 2 - (4 + 4) / 8 = 2

                                                                          2. \u8282\u70b9\u603b\u5f97\u5206\u7684\u8ba1\u7b97\u516c\u5f0f\u4e3a\uff1a

                                                                            binpack.weight - (CPU.score + Memory.score + GPU.score) / (CPU.weight + Memory.weight + GPU.weight) - 100\n

                                                                            \u5047\u8bbe binpack.weight \u914d\u7f6e\u4e3a 5\uff0cNode 1 \u5728 Binpack \u7b56\u7565\u4e0b\u7684\u5f97\u5206\u4e3a\uff1a

                                                                            5 - (0.75 + 0.75 + 2) / (1 + 1 + 2) - 100 = 437.5\n
                                                                          3. Binpack \u5bf9 Node 2 \u7684\u8d44\u6e90\u6253\u5206\uff1a

                                                                            • CPU Score\uff1a

                                                                              CPU.weight - (request + used) / allocatable = 1 - (2 + 6) / 8 = 1

                                                                            • Memory Score\uff1a

                                                                              Memory.weight - (request + used) / allocatable = 1 - (4 + 8) / 16 = 0.75

                                                                            • GPU Score\uff1a

                                                                              GPU.weight - (request + used) / allocatable = 2 - (4 + 4) / 8 = 2

                                                                          4. Node 2 \u5728 Binpack \u7b56\u7565\u4e0b\u7684\u5f97\u5206\u4e3a\uff1a

                                                                            5 - (1 + 0.75 + 2) / (1 + 1 + 2) - 100 = 468.75\n

                                                                          \u7efc\u4e0a\uff0cNode 2 \u5f97\u5206\u5927\u4e8e Node 1\uff0c\u6309\u7167 Binpack \u7b56\u7565\uff0cPod \u5c06\u4f1a\u4f18\u5148\u8c03\u5ea6\u81f3 Node 2\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/volcano/volcano_binpack.html#_2","title":"\u4f7f\u7528\u6848\u4f8b","text":"

                                                                          Binpack \u8c03\u5ea6\u63d2\u4ef6\u5728\u5b89\u88c5 Volcano \u7684\u65f6\u5019\u9ed8\u8ba4\u5c31\u4f1a\u5f00\u542f\uff1b\u5982\u679c\u7528\u6237\u6ca1\u6709\u914d\u7f6e\u6743\u91cd\uff0c\u5219\u4f7f\u7528\u5982\u4e0b\u9ed8\u8ba4\u7684\u914d\u7f6e\u6743\u91cd\u3002

                                                                          - plugins:\n    - name: binpack\n      arguments:\n        binpack.weight: 1\n        binpack.cpu: 1\n        binpack.memory: 1\n

                                                                          \u9ed8\u8ba4\u6743\u91cd\u4e0d\u80fd\u4f53\u73b0\u5806\u53e0\u7279\u6027\uff0c\u56e0\u6b64\u9700\u8981\u4fee\u6539\u4e3a binpack.weight: 10\u3002

                                                                          kubectl -n volcano-system edit configmaps volcano-scheduler-configmap\n
                                                                          - plugins:\n    - name: binpack\n      arguments:\n        binpack.weight: 10\n        binpack.cpu: 1\n        binpack.memory: 1\n        binpack.resources: nvidia.com/gpu, example.com/foo\n        binpack.resources.nvidia.com/gpu: 2\n        binpack.resources.example.com/foo: 3\n

                                                                          \u6539\u597d\u4e4b\u540e\u91cd\u542f volcano-scheduler Pod \u4f7f\u5176\u751f\u6548\u3002

                                                                          \u521b\u5efa\u5982\u4e0b\u7684 Deployment\u3002

                                                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: binpack-test\n  labels:\n    app: binpack-test\nspec:\n  replicas: 2\n  selector:\n    matchLabels:\n      app: test\n  template:\n    metadata:\n      labels:\n        app: test\n    spec:\n      schedulerName: volcano\n      containers:\n        - name: test\n          image: busybox\n          imagePullPolicy: IfNotPresent\n          command: [\"sh\", \"-c\", 'echo \"Hello, Kubernetes!\" && sleep 3600']\n          resources:\n            requests:\n              cpu: 500m\n            limits:\n              cpu: 500m\n

                                                                          \u5728\u4e24\u4e2a Node \u7684\u96c6\u7fa4\u4e0a\u53ef\u4ee5\u770b\u5230 Pod \u88ab\u8c03\u5ea6\u5230\u4e00\u4e2a Node \u4e0a\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/volcano/volcano_priority.html","title":"\u4f18\u5148\u7ea7\u62a2\u5360\uff08Preemption scheduling\uff09\u7b56\u7565","text":"

                                                                          Volcano \u901a\u8fc7 Priority \u63d2\u4ef6\u5b9e\u73b0\u4e86\u4f18\u5148\u7ea7\u62a2\u5360\u7b56\u7565\uff0c\u5373 Preemption scheduling \u7b56\u7565\u3002\u5728\u96c6\u7fa4\u8d44\u6e90\u6709\u9650\u4e14\u591a\u4e2a Job \u7b49\u5f85\u8c03\u5ea6\u65f6\uff0c \u5982\u679c\u4f7f\u7528 Kubernetes \u9ed8\u8ba4\u8c03\u5ea6\u5668\uff0c\u53ef\u80fd\u4f1a\u5bfc\u81f4\u5177\u6709\u66f4\u591a Pod \u6570\u91cf\u7684 Job \u5206\u5f97\u66f4\u591a\u8d44\u6e90\u3002\u800c Volcano-scheduler \u63d0\u4f9b\u4e86\u7b97\u6cd5\uff0c\u652f\u6301\u4e0d\u540c\u7684 Job \u4ee5 fair-share \u7684\u5f62\u5f0f\u5171\u4eab\u96c6\u7fa4\u8d44\u6e90\u3002

                                                                          Priority \u63d2\u4ef6\u5141\u8bb8\u7528\u6237\u81ea\u5b9a\u4e49 Job \u548c Task \u7684\u4f18\u5148\u7ea7\uff0c\u5e76\u6839\u636e\u9700\u6c42\u5728\u4e0d\u540c\u5c42\u6b21\u4e0a\u5b9a\u5236\u8c03\u5ea6\u7b56\u7565\u3002 \u4f8b\u5982\uff0c\u5bf9\u4e8e\u91d1\u878d\u573a\u666f\u3001\u7269\u8054\u7f51\u76d1\u63a7\u573a\u666f\u7b49\u9700\u8981\u8f83\u9ad8\u5b9e\u65f6\u6027\u7684\u5e94\u7528\uff0cPriority \u63d2\u4ef6\u80fd\u591f\u786e\u4fdd\u5176\u4f18\u5148\u5f97\u5230\u8c03\u5ea6\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/volcano/volcano_priority.html#_1","title":"\u4f7f\u7528\u65b9\u5f0f","text":"

                                                                          \u4f18\u5148\u7ea7\u7684\u51b3\u5b9a\u57fa\u4e8e\u914d\u7f6e\u7684 PriorityClass \u4e2d\u7684 Value \u503c\uff0c\u503c\u8d8a\u5927\u4f18\u5148\u7ea7\u8d8a\u9ad8\u3002\u9ed8\u8ba4\u5df2\u542f\u7528\uff0c\u65e0\u9700\u4fee\u6539\u3002\u53ef\u901a\u8fc7\u4ee5\u4e0b\u547d\u4ee4\u786e\u8ba4\u6216\u4fee\u6539\u3002

                                                                          kubectl -n volcano-system edit configmaps volcano-scheduler-configmap\n
                                                                          "},{"location":"end-user/kpanda/gpu/volcano/volcano_priority.html#_2","title":"\u4f7f\u7528\u6848\u4f8b","text":"

                                                                          \u5047\u8bbe\u96c6\u7fa4\u4e2d\u5b58\u5728\u4e24\u4e2a\u7a7a\u95f2\u8282\u70b9\uff0c\u5e76\u6709\u4e09\u4e2a\u4f18\u5148\u7ea7\u4e0d\u540c\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff1ahigh-priority\u3001med-priority \u548c low-priority\u3002 \u5f53 high-priority \u5de5\u4f5c\u8d1f\u8f7d\u8fd0\u884c\u5e76\u5360\u6ee1\u96c6\u7fa4\u8d44\u6e90\u540e\uff0c\u518d\u63d0\u4ea4 med-priority \u548c low-priority \u5de5\u4f5c\u8d1f\u8f7d\u3002 \u7531\u4e8e\u96c6\u7fa4\u8d44\u6e90\u5168\u90e8\u88ab\u66f4\u9ad8\u4f18\u5148\u7ea7\u7684\u5de5\u4f5c\u8d1f\u8f7d\u5360\u7528\uff0cmed-priority \u548c low-priority \u5de5\u4f5c\u8d1f\u8f7d\u5c06\u5904\u4e8e pending \u72b6\u6001\u3002 \u5f53 high-priority \u5de5\u4f5c\u8d1f\u8f7d\u7ed3\u675f\u540e\uff0c\u6839\u636e\u4f18\u5148\u7ea7\u8c03\u5ea6\u539f\u5219\uff0cmed-priority \u5de5\u4f5c\u8d1f\u8f7d\u5c06\u4f18\u5148\u88ab\u8c03\u5ea6\u3002

                                                                          1. \u901a\u8fc7 priority.yaml \u521b\u5efa 3 \u4e2a\u4f18\u5148\u7ea7\u5b9a\u4e49\uff0c\u5206\u522b\u4e3a\uff1ahigh-priority\uff0cmed-priority\uff0clow-priority\u3002

                                                                            \u67e5\u770b priority.yaml

                                                                            cat <<EOF | kubectl apply -f - \napiVersion: scheduling.k8s.io/v1 \nkind: PriorityClass \nitems: \n  - metadata: \n      name: high-priority \n    value: 100 \n    globalDefault: false \n    description: \"This priority class should be used for volcano job only.\" \n  - metadata: \n      name: med-priority \n    value: 50 \n    globalDefault: false \n    description: \"This priority class should be used for volcano job only.\" \n  - metadata: \n      name: low-priority \n    value: 10 \n    globalDefault: false \n    description: \"This priority class should be used for volcano job only.\" \nEOF\n
                                                                            2. \u67e5\u770b\u4f18\u5148\u7ea7\u5b9a\u4e49\u4fe1\u606f\u3002

                                                                            kubectl get PriorityClass\n
                                                                            NAME                      VALUE        GLOBAL-DEFAULT   AGE  \nhigh-priority             100          false            97s  \nlow-priority              10           false            97s  \nmed-priority              50           false            97s  \nsystem-cluster-critical   2000000000   false            6d6h  \nsystem-node-critical      2000001000   false            6d6h\n

                                                                          2. \u521b\u5efa\u9ad8\u4f18\u5148\u7ea7\u5de5\u4f5c\u8d1f\u8f7d high-priority-job\uff0c\u5360\u7528\u96c6\u7fa4\u7684\u5168\u90e8\u8d44\u6e90\u3002

                                                                            \u67e5\u770b high-priority-job
                                                                            cat <<EOF | kubectl apply -f -  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: priority-high  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: high-priority  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"4\"  \n          restartPolicy: OnFailure  \nEOF\n

                                                                            \u901a\u8fc7 kubectl get pod \u67e5\u770b Pod\u8fd0\u884c \u4fe1\u606f\uff1a

                                                                            kubectl get pods\n
                                                                            NAME                   READY   STATUS    RESTARTS   AGE  \npriority-high-test-0   1/1     Running   0          3s  \npriority-high-test-1   1/1     Running   0          3s  \npriority-high-test-2   1/1     Running   0          3s  \npriority-high-test-3   1/1     Running   0          3s\n

                                                                            \u6b64\u65f6\uff0c\u96c6\u7fa4\u8282\u70b9\u8d44\u6e90\u5df2\u5168\u90e8\u88ab\u5360\u7528\u3002

                                                                          3. \u521b\u5efa\u4e2d\u4f18\u5148\u7ea7\u5de5\u4f5c\u8d1f\u8f7d med-priority-job \u548c\u4f4e\u4f18\u5148\u7ea7\u5de5\u4f5c\u8d1f\u8f7d low-priority-job\u3002

                                                                            med-priority-job
                                                                            cat <<EOF | kubectl apply -f -  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: priority-medium  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: med-priority  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"4\"  \n          restartPolicy: OnFailure  \nEOF\n
                                                                            low-priority-job
                                                                            cat <<EOF | kubectl apply -f -  \napiVersion: batch.volcano.sh/v1alpha1  \nkind: Job  \nmetadata:  \n  name: priority-low  \nspec:  \n  schedulerName: volcano  \n  minAvailable: 4  \n  priorityClassName: low-priority  \n  tasks:  \n    - replicas: 4  \n      name: \"test\"  \n      template:  \n        spec:  \n          containers:  \n            - image: alpine  \n              command: [\"/bin/sh\", \"-c\", \"sleep 1000\"]  \n              imagePullPolicy: IfNotPresent  \n              name: running  \n              resources:  \n                requests:  \n                  cpu: \"4\"  \n          restartPolicy: OnFailure  \nEOF\n

                                                                            \u901a\u8fc7 kubectl get pod \u67e5\u770b Pod \u8fd0\u884c\u4fe1\u606f\uff0c\u96c6\u7fa4\u8d44\u6e90\u4e0d\u8db3\uff0cPod \u5904\u4e8e Pending \u72b6\u6001\uff1a

                                                                            kubectl get pods\n
                                                                            NAME                     READY   STATUS    RESTARTS   AGE  \npriority-high-test-0     1/1     Running   0          3m29s  \npriority-high-test-1     1/1     Running   0          3m29s  \npriority-high-test-2     1/1     Running   0          3m29s  \npriority-high-test-3     1/1     Running   0          3m29s  \npriority-low-test-0      0/1     Pending   0          2m26s  \npriority-low-test-1      0/1     Pending   0          2m26s  \npriority-low-test-2      0/1     Pending   0          2m26s  \npriority-low-test-3      0/1     Pending   0          2m26s  \npriority-medium-test-0   0/1     Pending   0          2m36s  \npriority-medium-test-1   0/1     Pending   0          2m36s  \npriority-medium-test-2   0/1     Pending   0          2m36s  \npriority-medium-test-3   0/1     Pending   0          2m36s\n

                                                                          4. \u5220\u9664 high_priority_job \u5de5\u4f5c\u8d1f\u8f7d\uff0c\u91ca\u653e\u96c6\u7fa4\u8d44\u6e90\uff0cmed_priority_job \u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002 \u6267\u884c kubectl delete -f high_priority_job.yaml \u91ca\u653e\u96c6\u7fa4\u8d44\u6e90\uff0c\u67e5\u770b Pod \u7684\u8c03\u5ea6\u4fe1\u606f\uff1a

                                                                            kubectl get pods\n
                                                                            NAME                     READY   STATUS    RESTARTS   AGE  \npriority-low-test-0      0/1     Pending   0          5m18s  \npriority-low-test-1      0/1     Pending   0          5m18s  \npriority-low-test-2      0/1     Pending   0          5m18s  \npriority-low-test-3      0/1     Pending   0          5m18s  \npriority-medium-test-0   1/1     Running   0          5m28s  \npriority-medium-test-1   1/1     Running   0          5m28s  \npriority-medium-test-2   1/1     Running   0          5m28s  \npriority-medium-test-3   1/1     Running   0          5m28s\n

                                                                          "},{"location":"end-user/kpanda/gpu/volcano/volcano_user_guide.html","title":"\u5b89\u88c5 Volcano","text":"

                                                                          \u968f\u7740 Kubernetes\uff08K8s\uff09\u6210\u4e3a\u4e91\u539f\u751f\u5e94\u7528\u7f16\u6392\u4e0e\u7ba1\u7406\u7684\u9996\u9009\u5e73\u53f0\uff0c\u4f17\u591a\u5e94\u7528\u6b63\u79ef\u6781\u5411 K8s \u8fc1\u79fb\u3002 \u5728\u4eba\u5de5\u667a\u80fd\u4e0e\u673a\u5668\u5b66\u4e60\u9886\u57df\uff0c\u7531\u4e8e\u8fd9\u4e9b\u4efb\u52a1\u901a\u5e38\u6d89\u53ca\u5927\u91cf\u8ba1\u7b97\uff0c\u5f00\u53d1\u8005\u503e\u5411\u4e8e\u5728 Kubernetes \u4e0a\u6784\u5efa AI \u5e73\u53f0\uff0c \u4ee5\u5145\u5206\u5229\u7528\u5176\u5728\u8d44\u6e90\u7ba1\u7406\u3001\u5e94\u7528\u7f16\u6392\u53ca\u8fd0\u7ef4\u76d1\u63a7\u65b9\u9762\u7684\u4f18\u52bf\u3002

                                                                          \u7136\u800c\uff0cKubernetes \u7684\u9ed8\u8ba4\u8c03\u5ea6\u5668\u4e3b\u8981\u9488\u5bf9\u957f\u671f\u8fd0\u884c\u7684\u670d\u52a1\u8bbe\u8ba1\uff0c\u5bf9\u4e8e AI\u3001\u5927\u6570\u636e\u7b49\u9700\u8981\u6279\u91cf\u548c\u5f39\u6027\u8c03\u5ea6\u7684\u4efb\u52a1\u5b58\u5728\u8bf8\u591a\u4e0d\u8db3\u3002 \u4f8b\u5982\uff0c\u5728\u8d44\u6e90\u7ade\u4e89\u6fc0\u70c8\u7684\u60c5\u51b5\u4e0b\uff0c\u9ed8\u8ba4\u8c03\u5ea6\u5668\u53ef\u80fd\u5bfc\u81f4\u8d44\u6e90\u5206\u914d\u4e0d\u5747\uff0c\u8fdb\u800c\u5f71\u54cd\u4efb\u52a1\u7684\u6b63\u5e38\u6267\u884c\u3002

                                                                          \u4ee5 TensorFlow \u4f5c\u4e1a\u4e3a\u4f8b\uff0c\u5176\u5305\u542b PS\uff08\u53c2\u6570\u670d\u52a1\u5668\uff09\u548c Worker \u4e24\u79cd\u89d2\u8272\uff0c\u4e24\u8005\u9700\u534f\u540c\u5de5\u4f5c\u624d\u80fd\u5b8c\u6210\u4efb\u52a1\u3002 \u82e5\u4ec5\u90e8\u7f72\u5355\u4e00\u89d2\u8272\uff0c\u4f5c\u4e1a\u5c06\u65e0\u6cd5\u8fd0\u884c\u3002\u800c\u9ed8\u8ba4\u8c03\u5ea6\u5668\u5bf9 Pod \u7684\u8c03\u5ea6\u662f\u9010\u4e2a\u8fdb\u884c\u7684\uff0c\u65e0\u6cd5\u611f\u77e5 TFJob \u4e2d PS \u548c Worker \u7684\u4f9d\u8d56\u5173\u7cfb\u3002 \u5728\u9ad8\u8d1f\u8f7d\u60c5\u51b5\u4e0b\uff0c\u8fd9\u53ef\u80fd\u5bfc\u81f4\u591a\u4e2a\u4f5c\u4e1a\u5404\u81ea\u5206\u914d\u5230\u90e8\u5206\u8d44\u6e90\uff0c\u4f46\u5747\u65e0\u6cd5\u5b8c\u6210\uff0c\u4ece\u800c\u9020\u6210\u8d44\u6e90\u6d6a\u8d39\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/volcano/volcano_user_guide.html#volcano_1","title":"Volcano \u7684\u8c03\u5ea6\u7b56\u7565\u4f18\u52bf","text":"

                                                                          Volcano \u63d0\u4f9b\u4e86\u591a\u79cd\u8c03\u5ea6\u7b56\u7565\uff0c\u4ee5\u5e94\u5bf9\u4e0a\u8ff0\u6311\u6218\u3002\u5176\u4e2d\uff0cGang-scheduling \u7b56\u7565\u80fd\u786e\u4fdd\u5206\u5e03\u5f0f\u673a\u5668\u5b66\u4e60\u8bad\u7ec3\u8fc7\u7a0b\u4e2d\u591a\u4e2a\u4efb\u52a1\uff08Pod\uff09\u540c\u65f6\u542f\u52a8\uff0c \u907f\u514d\u6b7b\u9501\uff1bPreemption scheduling \u7b56\u7565\u5219\u5141\u8bb8\u9ad8\u4f18\u5148\u7ea7\u4f5c\u4e1a\u5728\u8d44\u6e90\u4e0d\u8db3\u65f6\u62a2\u5360\u4f4e\u4f18\u5148\u7ea7\u4f5c\u4e1a\u7684\u8d44\u6e90\uff0c\u786e\u4fdd\u5173\u952e\u4efb\u52a1\u4f18\u5148\u5b8c\u6210\u3002

                                                                          \u6b64\u5916\uff0cVolcano \u4e0e Spark\u3001TensorFlow\u3001PyTorch \u7b49\u4e3b\u6d41\u8ba1\u7b97\u6846\u67b6\u65e0\u7f1d\u5bf9\u63a5\uff0c\u5e76\u652f\u6301 CPU \u548c GPU \u7b49\u5f02\u6784\u8bbe\u5907\u7684\u6df7\u5408\u8c03\u5ea6\uff0c\u4e3a AI \u8ba1\u7b97\u4efb\u52a1\u63d0\u4f9b\u4e86\u5168\u9762\u7684\u4f18\u5316\u652f\u6301\u3002

                                                                          \u63a5\u4e0b\u6765\uff0c\u6211\u4eec\u5c06\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5\u548c\u4f7f\u7528 Volcano\uff0c\u4ee5\u4fbf\u60a8\u80fd\u591f\u5145\u5206\u5229\u7528\u5176\u8c03\u5ea6\u7b56\u7565\u4f18\u52bf\uff0c\u4f18\u5316 AI \u8ba1\u7b97\u4efb\u52a1\u3002

                                                                          "},{"location":"end-user/kpanda/gpu/volcano/volcano_user_guide.html#volcano_2","title":"\u5b89\u88c5 Volcano","text":"
                                                                          1. \u5728 \u96c6\u7fa4\u8be6\u60c5 -> Helm \u5e94\u7528 -> Helm \u6a21\u677f \u4e2d\u627e\u5230 Volcano \u5e76\u5b89\u88c5\u3002

                                                                          2. \u68c0\u67e5\u5e76\u786e\u8ba4 Volcano \u662f\u5426\u5b89\u88c5\u5b8c\u6210\uff0c\u5373 volcano-admission\u3001volcano-controllers\u3001volcano-scheduler \u7ec4\u4ef6\u662f\u5426\u6b63\u5e38\u8fd0\u884c\u3002

                                                                          \u901a\u5e38 Volcano \u4f1a\u548c AI Lab \u5e73\u53f0\u914d\u5408\u4f7f\u7528\uff0c\u4ee5\u5b9e\u73b0\u6570\u636e\u96c6\u3001Notebook\u3001\u4efb\u52a1\u8bad\u7ec3\u7684\u6574\u4e2a\u5f00\u53d1\u3001\u8bad\u7ec3\u6d41\u7a0b\u7684\u6709\u6548\u95ed\u73af\u3002

                                                                          "},{"location":"end-user/kpanda/helm/index.html","title":"Helm \u6a21\u677f","text":"

                                                                          Helm \u662f Kubernetes \u7684\u5305\u7ba1\u7406\u5de5\u5177\uff0c\u65b9\u4fbf\u7528\u6237\u5feb\u901f\u53d1\u73b0\u3001\u5171\u4eab\u548c\u4f7f\u7528 Kubernetes \u6784\u5efa\u7684\u5e94\u7528\u3002\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u63d0\u4f9b\u4e86\u4e0a\u767e\u4e2a Helm \u6a21\u677f\uff0c\u6db5\u76d6\u5b58\u50a8\u3001\u7f51\u7edc\u3001\u76d1\u63a7\u3001\u6570\u636e\u5e93\u7b49\u4e3b\u8981\u573a\u666f\u3002\u501f\u52a9\u8fd9\u4e9b\u6a21\u677f\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7 UI \u754c\u9762\u5feb\u901f\u90e8\u7f72\u3001\u4fbf\u6377\u7ba1\u7406 Helm \u5e94\u7528\u3002\u6b64\u5916\uff0c\u652f\u6301\u901a\u8fc7\u6dfb\u52a0 Helm \u4ed3\u5e93 \u6dfb\u52a0\u66f4\u591a\u7684\u4e2a\u6027\u5316\u6a21\u677f\uff0c\u6ee1\u8db3\u591a\u6837\u9700\u6c42\u3002

                                                                          \u5173\u952e\u6982\u5ff5\uff1a

                                                                          \u4f7f\u7528 Helm \u65f6\u9700\u8981\u4e86\u89e3\u4ee5\u4e0b\u51e0\u4e2a\u5173\u952e\u6982\u5ff5\uff1a

                                                                          • Chart\uff1a\u4e00\u4e2a Helm \u5b89\u88c5\u5305\uff0c\u5176\u4e2d\u5305\u542b\u4e86\u8fd0\u884c\u4e00\u4e2a\u5e94\u7528\u6240\u9700\u8981\u7684\u955c\u50cf\u3001\u4f9d\u8d56\u548c\u8d44\u6e90\u5b9a\u4e49\u7b49\uff0c\u8fd8\u53ef\u80fd\u5305\u542b Kubernetes \u96c6\u7fa4\u4e2d\u7684\u670d\u52a1\u5b9a\u4e49\uff0c\u7c7b\u4f3c Homebrew \u4e2d\u7684 formula\u3001APT \u7684 dpkg \u6216\u8005 Yum \u7684 rpm \u6587\u4ef6\u3002Chart \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u79f0\u4e3a Helm \u6a21\u677f \u3002

                                                                          • Release\uff1a\u5728 Kubernetes \u96c6\u7fa4\u4e0a\u8fd0\u884c\u7684\u4e00\u4e2a Chart \u5b9e\u4f8b\u3002\u4e00\u4e2a Chart \u53ef\u4ee5\u5728\u540c\u4e00\u4e2a\u96c6\u7fa4\u5185\u591a\u6b21\u5b89\u88c5\uff0c\u6bcf\u6b21\u5b89\u88c5\u90fd\u4f1a\u521b\u5efa\u4e00\u4e2a\u65b0\u7684 Release\u3002Release \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u79f0\u4e3a Helm \u5e94\u7528 \u3002

                                                                          • Repository\uff1a\u7528\u4e8e\u53d1\u5e03\u548c\u5b58\u50a8 Chart \u7684\u5b58\u50a8\u5e93\u3002Repository \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u79f0\u4e3a Helm \u4ed3\u5e93\u3002

                                                                          \u66f4\u591a\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u524d\u5f80 Helm \u5b98\u7f51\u67e5\u770b\u3002

                                                                          \u76f8\u5173\u64cd\u4f5c\uff1a

                                                                          • \u4e0a\u4f20 Helm \u6a21\u677f\uff0c\u4ecb\u7ecd\u4e0a\u4f20 Helm \u6a21\u677f\u64cd\u4f5c\u3002
                                                                          • \u7ba1\u7406 Helm \u5e94\u7528\uff0c\u5305\u62ec\u5b89\u88c5\u3001\u66f4\u65b0\u3001\u5378\u8f7d Helm \u5e94\u7528\uff0c\u67e5\u770b Helm \u64cd\u4f5c\u8bb0\u5f55\u7b49\u3002
                                                                          • \u7ba1\u7406 Helm \u4ed3\u5e93\uff0c\u5305\u62ec\u5b89\u88c5\u3001\u66f4\u65b0\u3001\u5220\u9664 Helm \u4ed3\u5e93\u7b49\u3002
                                                                          "},{"location":"end-user/kpanda/helm/Import-addon.html","title":"\u5c06\u81ea\u5b9a\u4e49 Helm \u5e94\u7528\u5bfc\u5165\u7cfb\u7edf\u5185\u7f6e Addon","text":"

                                                                          \u672c\u6587\u4ece\u79bb\u7ebf\u548c\u5728\u7ebf\u4e24\u79cd\u73af\u5883\u8bf4\u660e\u5982\u4f55\u5c06 Helm \u5e94\u7528\u5bfc\u5165\u5230\u7cfb\u7edf\u5185\u7f6e\u7684 Addon \u4e2d\u3002

                                                                          "},{"location":"end-user/kpanda/helm/Import-addon.html#_1","title":"\u79bb\u7ebf\u73af\u5883","text":"

                                                                          \u79bb\u7ebf\u73af\u5883\u6307\u7684\u662f\u65e0\u6cd5\u8fde\u901a\u4e92\u8054\u7f51\u6216\u5c01\u95ed\u7684\u79c1\u6709\u7f51\u7edc\u73af\u5883\u3002

                                                                          "},{"location":"end-user/kpanda/helm/Import-addon.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          • \u5b58\u5728\u53ef\u4ee5\u8fd0\u884c\u7684\u00a0charts-syncer\u3002 \u82e5\u6ca1\u6709\uff0c\u53ef\u70b9\u51fb\u4e0b\u8f7d\u3002
                                                                          • Helm Chart \u5df2\u7ecf\u5b8c\u6210\u9002\u914d charts-syncer\u3002 \u5373\u5728 Helm Chart \u5185\u6dfb\u52a0\u4e86 .relok8s-images.yaml \u6587\u4ef6\u3002\u8be5\u6587\u4ef6\u9700\u8981\u5305\u542b Chart \u4e2d\u6240\u6709\u4f7f\u7528\u5230\u955c\u50cf\uff0c \u4e5f\u53ef\u4ee5\u5305\u542b Chart \u4e2d\u672a\u76f4\u63a5\u4f7f\u7528\u7684\u955c\u50cf\uff0c\u7c7b\u4f3c Operator \u4e2d\u4f7f\u7528\u7684\u955c\u50cf\u3002

                                                                          Note

                                                                          • \u5982\u4f55\u7f16\u5199 Chart \u53ef\u53c2\u8003\u00a0image-hints-file\u3002 \u8981\u6c42\u955c\u50cf\u7684\u00a0registry \u548c repository \u5fc5\u987b\u5206\u5f00\uff0c\u56e0\u4e3a load \u955c\u50cf\u65f6\u9700\u66ff\u6362\u6216\u4fee\u6539 registry/repository\u3002
                                                                          • \u5b89\u88c5\u5668\u6240\u5728\u7684\u706b\u79cd\u96c6\u7fa4\u5df2\u5b89\u88c5 charts-syncer\u3002 \u82e5\u5c06\u81ea\u5b9a\u4e49 Helm \u5e94\u7528\u5bfc\u5165\u5b89\u88c5\u5668\u6240\u5728\u706b\u79cd\u96c6\u7fa4\uff0c\u53ef\u8df3\u8fc7\u4e0b\u8f7d\u76f4\u63a5\u9002\u914d\uff1b \u82e5\u672a\u5b89\u88c5\u00a0charts-syncer\u4e8c\u8fdb\u5236\u6587\u4ef6\uff0c \u53ef\u7acb\u5373\u4e0b\u8f7d\u3002
                                                                          "},{"location":"end-user/kpanda/helm/Import-addon.html#helm-chart","title":"\u540c\u6b65 Helm Chart","text":"
                                                                          1. \u8fdb\u5165\u5bb9\u5668\u7ba1\u7406 -> Helm \u5e94\u7528 -> Helm \u4ed3\u5e93\uff0c\u641c\u7d22 addon\uff0c\u83b7\u53d6\u5185\u7f6e\u4ed3\u5e93\u5730\u5740\u548c\u7528\u6237\u540d/\u5bc6\u7801\uff08\u7cfb\u7edf\u5185\u7f6e\u4ed3\u5e93\u9ed8\u8ba4\u7528\u6237\u540d/\u5bc6\u7801\u4e3a rootuser/rootpass123\uff09\u3002
                                                                          1. \u540c\u6b65 Helm Chart \u5230\u5bb9\u5668\u7ba1\u7406\u5185\u7f6e\u4ed3\u5e93 Addon

                                                                            • \u7f16\u5199\u5982\u4e0b\u914d\u7f6e\u6587\u4ef6\uff0c\u53ef\u4ee5\u6839\u636e\u5177\u4f53\u914d\u7f6e\u4fee\u6539\uff0c\u5e76\u4fdd\u5b58\u4e3a sync-dao-2048.yaml\u3002

                                                                              source:  # helm charts \u6e90\u4fe1\u606f\n  repo:\n    kind: HARBOR # \u4e5f\u53ef\u4ee5\u662f\u4efb\u4f55\u5176\u4ed6\u652f\u6301\u7684 Helm Chart \u4ed3\u5e93\u7c7b\u522b\uff0c\u6bd4\u5982 CHARTMUSEUM\n    url: https://release-ci.daocloud.io/chartrepo/community #  \u9700\u66f4\u6539\u4e3a chart repo url\n    #auth: # \u7528\u6237\u540d/\u5bc6\u7801,\u82e5\u6ca1\u6709\u8bbe\u7f6e\u5bc6\u7801\u53ef\u4ee5\u4e0d\u586b\u5199\n      #username: \"admin\"\n      #password: \"Harbor12345\"\ncharts:  # \u9700\u8981\u540c\u6b65\n  - name: dao-2048 # helm charts \u4fe1\u606f\uff0c\u82e5\u4e0d\u586b\u5199\u5219\u540c\u6b65\u6e90 helm repo \u5185\u6240\u6709 charts\n    versions:\n      - 1.4.1\ntarget:  # helm charts \u76ee\u6807\u4fe1\u606f\n  containerRegistry: 10.5.14.40 # \u955c\u50cf\u4ed3\u5e93 url\n  repo:\n    kind: CHARTMUSEUM # \u4e5f\u53ef\u4ee5\u662f\u4efb\u4f55\u5176\u4ed6\u652f\u6301\u7684 Helm Chart \u4ed3\u5e93\u7c7b\u522b\uff0c\u6bd4\u5982 HARBOR\n    url: http://10.5.14.40:8081 #  \u9700\u66f4\u6539\u4e3a\u6b63\u786e chart repo url\uff0c\u53ef\u4ee5\u901a\u8fc7 helm repo add $HELM-REPO \u9a8c\u8bc1\u5730\u5740\u662f\u5426\u6b63\u786e\n    auth: # \u7528\u6237\u540d/\u5bc6\u7801\uff0c\u82e5\u6ca1\u6709\u8bbe\u7f6e\u5bc6\u7801\u53ef\u4ee5\u4e0d\u586b\u5199\n      username: \"rootuser\"\n      password: \"rootpass123\"\n  containers:\n    # kind: HARBOR # \u82e5\u955c\u50cf\u4ed3\u5e93\u4e3a HARBOR \u4e14\u5e0c\u671b charts-syncer \u81ea\u52a8\u521b\u5efa\u955c\u50cf Repository \u5219\u586b\u5199\u8be5\u5b57\u6bb5  \n    # auth: # \u7528\u6237\u540d/\u5bc6\u7801\uff0c\u82e5\u6ca1\u6709\u8bbe\u7f6e\u5bc6\u7801\u53ef\u4ee5\u4e0d\u586b\u5199 \n      # username: \"admin\"\n      # password: \"Harbor12345\"\n\n# leverage .relok8s-images.yaml file inside the Charts to move the container images too\nrelocateContainerImages: true\n
                                                                            • \u6267\u884c charts-syncer \u547d\u4ee4\u540c\u6b65 Chart \u53ca\u5176\u5305\u542b\u7684\u955c\u50cf

                                                                              charts-syncer sync --config sync-dao-2048.yaml --insecure --auto-create-repository\n

                                                                              \u9884\u671f\u8f93\u51fa\u4e3a\uff1a

                                                                              I1222 15:01:47.119777    8743 sync.go:45] Using config file: \"examples/sync-dao-2048.yaml\"\nW1222 15:01:47.234238    8743 syncer.go:263] Ignoring skipDependencies option as dependency sync is not supported if container image relocation is true or syncing from/to intermediate directory\nI1222 15:01:47.234685    8743 sync.go:58] There is 1 chart out of sync!\nI1222 15:01:47.234706    8743 sync.go:66] Syncing \"dao-2048_1.4.1\" chart...\n.relok8s-images.yaml hints file found\nComputing relocation...\n\nRelocating dao-2048@1.4.1...\nPushing 10.5.14.40/daocloud/dao-2048:v1.4.1...\nDone\nDone moving /var/folders/vm/08vw0t3j68z9z_4lcqyhg8nm0000gn/T/charts-syncer869598676/dao-2048-1.4.1.tgz\n
                                                                          2. \u5f85\u4e0a\u4e00\u6b65\u6267\u884c\u5b8c\u6210\u540e\uff0c\u8fdb\u5165\u5bb9\u5668\u7ba1\u7406 -> Helm \u5e94\u7528 -> Helm \u4ed3\u5e93\uff0c\u627e\u5230\u5bf9\u5e94 Addon\uff0c \u5728\u64cd\u4f5c\u680f\u70b9\u51fb\u540c\u6b65\u4ed3\u5e93\uff0c\u56de\u5230 Helm \u6a21\u677f\u5c31\u53ef\u4ee5\u770b\u5230\u4e0a\u4f20\u7684 Helm \u5e94\u7528

                                                                          3. \u540e\u7eed\u53ef\u6b63\u5e38\u8fdb\u884c\u5b89\u88c5\u3001\u5347\u7ea7\u3001\u5378\u8f7d

                                                                          "},{"location":"end-user/kpanda/helm/Import-addon.html#_3","title":"\u5728\u7ebf\u73af\u5883","text":"

                                                                          \u5728\u7ebf\u73af\u5883\u7684 Helm Repo \u5730\u5740\u4e3a release.daocloud.io\u3002 \u5982\u679c\u7528\u6237\u65e0\u6743\u9650\u6dfb\u52a0 Helm Repo\uff0c\u5219\u65e0\u6cd5\u5c06\u81ea\u5b9a\u4e49 Helm \u5e94\u7528\u5bfc\u5165\u7cfb\u7edf\u5185\u7f6e Addon\u3002 \u60a8\u53ef\u4ee5\u6dfb\u52a0\u81ea\u5df1\u642d\u5efa\u7684 Helm \u4ed3\u5e93\uff0c\u7136\u540e\u6309\u7167\u79bb\u7ebf\u73af\u5883\u4e2d\u540c\u6b65 Helm Chart \u7684\u6b65\u9aa4\u5c06\u60a8\u7684 Helm \u4ed3\u5e93\u96c6\u6210\u5230\u5e73\u53f0\u4f7f\u7528\u3002

                                                                          "},{"location":"end-user/kpanda/helm/helm-app.html","title":"\u7ba1\u7406 Helm \u5e94\u7528","text":"

                                                                          \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u5bf9 Helm \u8fdb\u884c\u754c\u9762\u5316\u7ba1\u7406\uff0c\u5305\u62ec\u4f7f\u7528 Helm \u6a21\u677f\u521b\u5efa Helm \u5b9e\u4f8b\u3001\u81ea\u5b9a\u4e49 Helm \u5b9e\u4f8b\u53c2\u6570\u3001\u5bf9 Helm \u5b9e\u4f8b\u8fdb\u884c\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406\u7b49\u529f\u80fd\u3002

                                                                          \u672c\u8282\u5c06\u4ee5 cert-manager \u4e3a\u4f8b\uff0c\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u754c\u9762\u521b\u5efa\u5e76\u7ba1\u7406 Helm \u5e94\u7528\u3002

                                                                          "},{"location":"end-user/kpanda/helm/helm-app.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                                          • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Admin \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                                          "},{"location":"end-user/kpanda/helm/helm-app.html#helm_1","title":"\u5b89\u88c5 Helm \u5e94\u7528","text":"

                                                                          \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u5b89\u88c5 Helm \u5e94\u7528\u3002

                                                                          1. \u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                                          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u8fdb\u5165 Helm \u6a21\u677f\u9875\u9762\u3002

                                                                            \u5728 Helm \u6a21\u677f\u9875\u9762\u9009\u62e9\u540d\u4e3a addon \u7684 Helm \u4ed3\u5e93\uff0c\u6b64\u65f6\u754c\u9762\u4e0a\u5c06\u5448\u73b0 addon \u4ed3\u5e93\u4e0b\u6240\u6709\u7684 Helm chart \u6a21\u677f\u3002 \u70b9\u51fb\u540d\u79f0\u4e3a cert-manager \u7684 Chart\u3002

                                                                          3. \u5728\u5b89\u88c5\u9875\u9762\uff0c\u80fd\u591f\u770b\u5230 Chart \u7684\u76f8\u5173\u8be6\u7ec6\u4fe1\u606f\uff0c\u5728\u754c\u9762\u53f3\u4e0a\u89d2\u9009\u62e9\u9700\u8981\u5b89\u88c5\u7684\u7248\u672c\uff0c\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u6b64\u5904\u9009\u62e9 v1.9.1 \u7248\u672c\u8fdb\u884c\u5b89\u88c5\u3002

                                                                          4. \u914d\u7f6e \u540d\u79f0 \u3001 \u547d\u540d\u7a7a\u95f4 \u53ca \u7248\u672c\u4fe1\u606f \uff0c\u4e5f\u53ef\u4ee5\u5728\u4e0b\u65b9\u7684 \u53c2\u6570\u914d\u7f6e \u533a\u57df\u901a\u8fc7\u4fee\u6539 YAML \u6765\u81ea\u5b9a\u4e49\u53c2\u6570\u3002\u70b9\u51fb \u786e\u5b9a \u3002

                                                                          5. \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de Helm \u5e94\u7528\u5217\u8868\uff0c\u65b0\u521b\u5efa\u7684 Helm \u5e94\u7528\u72b6\u6001\u4e3a \u5b89\u88c5\u4e2d \uff0c\u7b49\u5f85\u4e00\u6bb5\u65f6\u95f4\u540e\u72b6\u6001\u53d8\u4e3a \u8fd0\u884c\u4e2d \u3002

                                                                          "},{"location":"end-user/kpanda/helm/helm-app.html#helm_2","title":"\u66f4\u65b0 Helm \u5e94\u7528","text":"

                                                                          \u5f53\u6211\u4eec\u901a\u8fc7\u754c\u9762\u5b8c\u6210\u4e00\u4e2a Helm \u5e94\u7528\u7684\u5b89\u88c5\u540e\uff0c\u6211\u4eec\u53ef\u4ee5\u5bf9 Helm \u5e94\u7528\u6267\u884c\u66f4\u65b0\u64cd\u4f5c\u3002\u6ce8\u610f\uff1a\u53ea\u6709\u901a\u8fc7\u754c\u9762\u5b89\u88c5\u7684 Helm \u5e94\u7528\u624d\u652f\u6301\u4f7f\u7528\u754c\u9762\u8fdb\u884c\u66f4\u65b0\u64cd\u4f5c\u3002

                                                                          \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u66f4\u65b0 Helm \u5e94\u7528\u3002

                                                                          1. \u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                                          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb Helm \u5e94\u7528 \uff0c\u8fdb\u5165 Helm \u5e94\u7528\u5217\u8868\u9875\u9762\u3002

                                                                            \u5728 Helm \u5e94\u7528\u5217\u8868\u9875\u9009\u62e9\u9700\u8981\u66f4\u65b0\u7684 Helm \u5e94\u7528\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff0c\u5728\u4e0b\u62c9\u9009\u62e9\u4e2d\u9009\u62e9 \u66f4\u65b0 \u64cd\u4f5c\u3002

                                                                          3. \u70b9\u51fb \u66f4\u65b0 \u6309\u94ae\u540e\uff0c\u7cfb\u7edf\u5c06\u8df3\u8f6c\u81f3\u66f4\u65b0\u754c\u9762\uff0c\u60a8\u53ef\u4ee5\u6839\u636e\u9700\u8981\u5bf9 Helm \u5e94\u7528\u8fdb\u884c\u66f4\u65b0\uff0c\u6b64\u5904\u6211\u4eec\u4ee5\u66f4\u65b0 dao-2048 \u8fd9\u4e2a\u5e94\u7528\u7684 http \u7aef\u53e3\u4e3a\u4f8b\u3002

                                                                          4. \u4fee\u6539\u5b8c\u76f8\u5e94\u53c2\u6570\u540e\u3002\u60a8\u53ef\u4ee5\u5728\u53c2\u6570\u914d\u7f6e\u4e0b\u70b9\u51fb \u53d8\u5316 \u6309\u94ae\uff0c\u5bf9\u6bd4\u4fee\u6539\u524d\u540e\u7684\u6587\u4ef6\uff0c\u786e\u5b9a\u65e0\u8bef\u540e\uff0c\u70b9\u51fb\u5e95\u90e8 \u786e\u5b9a \u6309\u94ae\uff0c\u5b8c\u6210 Helm \u5e94\u7528\u7684\u66f4\u65b0\u3002

                                                                          5. \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de Helm \u5e94\u7528\u5217\u8868\uff0c\u53f3\u4e0a\u89d2\u5f39\u7a97\u63d0\u793a \u66f4\u65b0\u6210\u529f \u3002

                                                                          "},{"location":"end-user/kpanda/helm/helm-app.html#helm_3","title":"\u67e5\u770b Helm \u64cd\u4f5c\u8bb0\u5f55","text":"

                                                                          Helm \u5e94\u7528\u7684\u6bcf\u6b21\u5b89\u88c5\u3001\u66f4\u65b0\u3001\u5220\u9664\u90fd\u6709\u8be6\u7ec6\u7684\u64cd\u4f5c\u8bb0\u5f55\u548c\u65e5\u5fd7\u53ef\u4f9b\u67e5\u770b\u3002

                                                                          1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb \u96c6\u7fa4\u8fd0\u7ef4 -> \u6700\u8fd1\u64cd\u4f5c \uff0c\u7136\u540e\u5728\u9875\u9762\u4e0a\u65b9\u9009\u62e9 Helm \u64cd\u4f5c \u6807\u7b7e\u9875\u3002\u6bcf\u4e00\u6761\u8bb0\u5f55\u5bf9\u5e94\u4e00\u6b21\u5b89\u88c5/\u66f4\u65b0/\u5220\u9664\u64cd\u4f5c\u3002

                                                                          2. \u5982\u9700\u67e5\u770b\u6bcf\u4e00\u6b21\u64cd\u4f5c\u7684\u8be6\u7ec6\u65e5\u5fd7\uff1a\u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9 \u65e5\u5fd7 \u3002

                                                                          3. \u6b64\u65f6\u9875\u9762\u4e0b\u65b9\u5c06\u4ee5\u63a7\u5236\u53f0\u7684\u5f62\u5f0f\u5c55\u793a\u8be6\u7ec6\u7684\u8fd0\u884c\u65e5\u5fd7\u3002

                                                                          "},{"location":"end-user/kpanda/helm/helm-app.html#helm_4","title":"\u5220\u9664 Helm \u5e94\u7528","text":"

                                                                          \u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u5220\u9664 Helm \u5e94\u7528\u3002

                                                                          1. \u627e\u5230\u5f85\u5220\u9664\u7684 Helm \u5e94\u7528\u6240\u5728\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                                          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb Helm \u5e94\u7528 \uff0c\u8fdb\u5165 Helm \u5e94\u7528\u5217\u8868\u9875\u9762\u3002

                                                                            \u5728 Helm \u5e94\u7528\u5217\u8868\u9875\u9009\u62e9\u60a8\u9700\u8981\u5220\u9664\u7684 Helm \u5e94\u7528\uff0c\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff0c\u5728\u4e0b\u62c9\u9009\u62e9\u4e2d\u9009\u62e9 \u5220\u9664 \u3002

                                                                          3. \u5728\u5f39\u7a97\u5185\u8f93\u5165 Helm \u5e94\u7528\u7684\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\uff0c\u7136\u540e\u70b9\u51fb \u5220\u9664 \u6309\u94ae\u3002

                                                                          "},{"location":"end-user/kpanda/helm/helm-repo.html","title":"\u7ba1\u7406 Helm \u4ed3\u5e93","text":"

                                                                          Helm \u4ed3\u5e93\u662f\u7528\u6765\u5b58\u50a8\u548c\u53d1\u5e03 Chart \u7684\u5b58\u50a8\u5e93\u3002Helm \u5e94\u7528\u6a21\u5757\u652f\u6301\u901a\u8fc7 HTTP(s) \u534f\u8bae\u6765\u8bbf\u95ee\u5b58\u50a8\u5e93\u4e2d\u7684 Chart \u5305\u3002\u7cfb\u7edf\u9ed8\u8ba4\u5185\u7f6e\u4e86\u4e0b\u8868\u6240\u793a\u7684 4 \u4e2a Helm \u4ed3\u5e93\u4ee5\u6ee1\u8db3\u4f01\u4e1a\u751f\u4ea7\u8fc7\u7a0b\u4e2d\u7684\u5e38\u89c1\u9700\u6c42\u3002

                                                                          \u4ed3\u5e93 \u63cf\u8ff0 \u793a\u4f8b partner \u7531\u751f\u6001\u5408\u4f5c\u4f19\u4f34\u6240\u63d0\u4f9b\u7684\u5404\u7c7b\u4f18\u8d28\u7279\u8272 Chart tidb system \u7cfb\u7edf\u6838\u5fc3\u529f\u80fd\u7ec4\u4ef6\u53ca\u90e8\u5206\u9ad8\u7ea7\u529f\u80fd\u6240\u5fc5\u9700\u4f9d\u8d56\u7684 Chart\uff0c\u5982\u5fc5\u9700\u5b89\u88c5 insight-agent \u624d\u80fd\u591f\u83b7\u53d6\u96c6\u7fa4\u7684\u76d1\u63a7\u4fe1\u606f Insight addon \u4e1a\u52a1\u573a\u666f\u4e2d\u5e38\u89c1\u7684 Chart cert-manager community Kubernetes \u793e\u533a\u8f83\u4e3a\u70ed\u95e8\u7684\u5f00\u6e90\u7ec4\u4ef6 Chart Istio

                                                                          \u9664\u4e0a\u8ff0\u9884\u7f6e\u4ed3\u5e93\u5916\uff0c\u60a8\u4e5f\u53ef\u4ee5\u81ea\u884c\u6dfb\u52a0\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93\u3002\u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u6dfb\u52a0\u3001\u66f4\u65b0\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93\u3002

                                                                          "},{"location":"end-user/kpanda/helm/helm-repo.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762

                                                                          • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Admin \u6216\u66f4\u9ad8\u6743\u9650 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                                          • \u5982\u679c\u4f7f\u7528\u79c1\u6709\u4ed3\u5e93\uff0c\u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u62e5\u6709\u5bf9\u8be5\u79c1\u6709\u4ed3\u5e93\u7684\u8bfb\u5199\u6743\u9650\u3002

                                                                          "},{"location":"end-user/kpanda/helm/helm-repo.html#helm_1","title":"\u5f15\u5165\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93","text":"

                                                                          \u4e0b\u9762\u4ee5 Kubevela \u516c\u5f00\u7684\u955c\u50cf\u4ed3\u5e93\u4e3a\u4f8b\uff0c\u5f15\u5165 Helm \u4ed3\u5e93\u5e76\u7ba1\u7406\u3002

                                                                          1. \u627e\u5230\u9700\u8981\u5f15\u5165\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                                          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u4ed3\u5e93 \uff0c\u8fdb\u5165 Helm \u4ed3\u5e93\u9875\u9762\u3002

                                                                          3. \u5728 Helm \u4ed3\u5e93\u9875\u9762\u70b9\u51fb \u521b\u5efa\u4ed3\u5e93 \u6309\u94ae\uff0c\u8fdb\u5165\u521b\u5efa\u4ed3\u5e93\u9875\u9762\uff0c\u6309\u7167\u4e0b\u8868\u914d\u7f6e\u76f8\u5173\u53c2\u6570\u3002

                                                                            • \u4ed3\u5e93\u540d\u79f0\uff1a\u8bbe\u7f6e\u4ed3\u5e93\u540d\u79f0\u3002\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26 - \uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u5e76\u7ed3\u5c3e\uff0c\u4f8b\u5982 kubevela
                                                                            • \u4ed3\u5e93\u5730\u5740\uff1a\u7528\u6765\u6307\u5411\u76ee\u6807 Helm \u4ed3\u5e93\u7684 http\uff08s\uff09\u5730\u5740\u3002\u4f8b\u5982 https://charts.kubevela.net/core
                                                                            • \u8df3\u8fc7 TLS \u9a8c\u8bc1: \u5982\u679c\u6dfb\u52a0\u7684 Helm \u4ed3\u5e93\u4e3a https \u5730\u5740\u4e14\u9700\u8df3\u8fc7 TLS \u9a8c\u8bc1\uff0c\u53ef\u4ee5\u52fe\u9009\u6b64\u9009\u9879\uff0c\u9ed8\u8ba4\u4e3a\u4e0d\u52fe\u9009
                                                                            • \u8ba4\u8bc1\u65b9\u5f0f\uff1a\u8fde\u63a5\u4ed3\u5e93\u5730\u5740\u540e\u7528\u6765\u8fdb\u884c\u8eab\u4efd\u6821\u9a8c\u7684\u65b9\u5f0f\u3002\u5bf9\u4e8e\u516c\u5f00\u4ed3\u5e93\uff0c\u53ef\u4ee5\u9009\u62e9 None \uff0c\u79c1\u6709\u7684\u4ed3\u5e93\u9700\u8981\u8f93\u5165\u7528\u6237\u540d/\u5bc6\u7801\u4ee5\u8fdb\u884c\u8eab\u4efd\u6821\u9a8c
                                                                            • \u6807\u7b7e\uff1a\u4e3a\u8be5 Helm \u4ed3\u5e93\u6dfb\u52a0\u6807\u7b7e\u3002\u4f8b\u5982 key: repo4\uff1bvalue: Kubevela
                                                                            • \u6ce8\u89e3\uff1a\u4e3a\u8be5 Helm \u4ed3\u5e93\u6dfb\u52a0\u6ce8\u89e3\u3002\u4f8b\u5982 key: repo4\uff1bvalue: Kubevela
                                                                            • \u63cf\u8ff0\uff1a\u4e3a\u8be5 Helm \u4ed3\u5e93\u6dfb\u52a0\u63cf\u8ff0\u3002\u4f8b\u5982\uff1a\u8fd9\u662f\u4e00\u4e2a Kubevela \u516c\u5f00 Helm \u4ed3\u5e93

                                                                          4. \u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210 Helm \u4ed3\u5e93\u7684\u521b\u5efa\u3002\u9875\u9762\u4f1a\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u4ed3\u5e93\u5217\u8868\u3002

                                                                          "},{"location":"end-user/kpanda/helm/helm-repo.html#helm_2","title":"\u66f4\u65b0 Helm \u4ed3\u5e93","text":"

                                                                          \u5f53 Helm \u4ed3\u5e93\u7684\u5730\u5740\u4fe1\u606f\u53d1\u751f\u53d8\u5316\u65f6\uff0c\u53ef\u4ee5\u66f4\u65b0 Helm \u4ed3\u5e93\u7684\u5730\u5740\u3001\u8ba4\u8bc1\u65b9\u5f0f\u3001\u6807\u7b7e\u3001\u6ce8\u89e3\u53ca\u63cf\u8ff0\u4fe1\u606f\u3002

                                                                          1. \u627e\u5230\u5f85\u66f4\u65b0\u4ed3\u5e93\u6240\u5728\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                                          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u4ed3\u5e93 \uff0c\u8fdb\u5165 Helm \u4ed3\u5e93\u5217\u8868\u9875\u9762\u3002

                                                                          3. \u5728\u4ed3\u5e93\u5217\u8868\u9875\u9762\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684 Helm \u4ed3\u5e93\uff0c\u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \u6309\u94ae\uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u70b9\u51fb \u66f4\u65b0 \u3002

                                                                          4. \u5728 \u7f16\u8f91 Helm \u4ed3\u5e93 \u9875\u9762\u8fdb\u884c\u66f4\u65b0\uff0c\u5b8c\u6210\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                                          5. \u8fd4\u56de Helm \u4ed3\u5e93\u5217\u8868\uff0c\u5c4f\u5e55\u63d0\u793a\u66f4\u65b0\u6210\u529f\u3002

                                                                          "},{"location":"end-user/kpanda/helm/helm-repo.html#helm_3","title":"\u5220\u9664 Helm \u4ed3\u5e93","text":"

                                                                          \u9664\u4e86\u5f15\u5165\u3001\u66f4\u65b0\u4ed3\u5e93\u5916\uff0c\u60a8\u4e5f\u53ef\u4ee5\u5c06\u4e0d\u9700\u8981\u7684\u4ed3\u5e93\u5220\u9664\uff0c\u5305\u62ec\u7cfb\u7edf\u9884\u7f6e\u4ed3\u5e93\u548c\u7b2c\u4e09\u65b9\u4ed3\u5e93\u3002

                                                                          1. \u627e\u5230\u5f85\u5220\u9664\u4ed3\u5e93\u6240\u5728\u7684\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                                          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u4f9d\u6b21\u70b9\u51fb Helm \u5e94\u7528 -> Helm \u4ed3\u5e93 \uff0c\u8fdb\u5165 Helm \u4ed3\u5e93\u5217\u8868\u9875\u9762\u3002

                                                                          3. \u5728\u4ed3\u5e93\u5217\u8868\u9875\u9762\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684 Helm \u4ed3\u5e93\uff0c\u5728\u5217\u8868\u53f3\u4fa7\u70b9\u51fb \u2507 \u6309\u94ae\uff0c\u5728\u5f39\u51fa\u83dc\u5355\u4e2d\u70b9\u51fb \u5220\u9664 \u3002

                                                                          4. \u8f93\u5165\u4ed3\u5e93\u540d\u79f0\u8fdb\u884c\u786e\u8ba4\uff0c\u70b9\u51fb \u5220\u9664 \u3002

                                                                          5. \u8fd4\u56de Helm \u4ed3\u5e93\u5217\u8868\uff0c\u5c4f\u5e55\u63d0\u793a\u5220\u9664\u6210\u529f\u3002

                                                                          "},{"location":"end-user/kpanda/helm/multi-archi-helm.html","title":"Helm \u5e94\u7528\u591a\u67b6\u6784\u548c\u5347\u7ea7\u5bfc\u5165\u6b65\u9aa4","text":"

                                                                          \u901a\u5e38\u5728\u591a\u67b6\u6784\u96c6\u7fa4\u4e2d\uff0c\u4e5f\u4f1a\u4f7f\u7528\u591a\u67b6\u6784\u7684 Helm \u5305\u6765\u90e8\u7f72\u5e94\u7528\uff0c\u4ee5\u89e3\u51b3\u67b6\u6784\u5dee\u5f02\u5e26\u6765\u7684\u90e8\u7f72\u95ee\u9898\u3002 \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u5c06\u5355\u67b6\u6784 Helm \u5e94\u7528\u878d\u5408\u4e3a\u591a\u67b6\u6784\uff0c\u4ee5\u53ca\u591a\u67b6\u6784\u4e0e\u591a\u67b6\u6784 Helm \u5e94\u7528\u7684\u76f8\u4e92\u878d\u5408\u3002

                                                                          "},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_1","title":"\u5bfc\u5165","text":""},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_2","title":"\u5355\u67b6\u6784\u5bfc\u5165","text":"

                                                                          \u51c6\u5907\u597d\u5f85\u5bfc\u5165\u7684\u79bb\u7ebf\u5305 addon-offline-full-package-${version}-${arch}.tar.gz \u3002 \u628a\u8def\u5f84\u586b\u5199\u81f3 clusterConfig.yml \u914d\u7f6e\u6587\u4ef6\uff0c\u4f8b\u5982\uff1a

                                                                          addonPackage:\n  path: \"/home/addon-offline-full-package-v0.9.0-amd64.tar.gz\"\n

                                                                          \u7136\u540e\u6267\u884c\u5bfc\u5165\u547d\u4ee4\uff1a

                                                                          ~/dce5-installer cluster-create -c /home/dce5/sample/clusterConfig.yaml -m /home/dce5/sample/manifest.yaml -d -j13\n
                                                                          "},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_3","title":"\u591a\u67b6\u6784\u878d\u5408","text":"

                                                                          \u51c6\u5907\u597d\u5f85\u878d\u5408\u7684\u79bb\u7ebf\u5305 addon-offline-full-package-${version}-${arch}.tar.gz\u3002

                                                                          \u4ee5 addon-offline-full-package-v0.9.0-arm64.tar.gz \u4e3a\u4f8b\uff0c\u6267\u884c\u5bfc\u5165\u547d\u4ee4\uff1a

                                                                          ~/dce5-installer import-addon -c /home/dce5/sample/clusterConfig.yaml --addon-path=/home/addon-offline-full-package-v0.9.0-arm64.tar.gz\n
                                                                          "},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_4","title":"\u5347\u7ea7","text":""},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_5","title":"\u5355\u67b6\u6784\u5347\u7ea7","text":"

                                                                          \u51c6\u5907\u597d\u5f85\u5bfc\u5165\u7684\u79bb\u7ebf\u5305 addon-offline-full-package-${version}-${arch}.tar.gz\u3002

                                                                          \u628a\u8def\u5f84\u586b\u5199\u81f3 clusterConfig.yml \u914d\u7f6e\u6587\u4ef6\uff0c\u4f8b\u5982\uff1a

                                                                          addonPackage:\n  path: \"/home/addon-offline-full-package-v0.11.0-amd64.tar.gz\"\n

                                                                          \u7136\u540e\u6267\u884c\u5bfc\u5165\u547d\u4ee4\uff1a

                                                                          ~/dce5-installer cluster-create -c /home/dce5/sample/clusterConfig.yaml -m /home/dce5/sample/manifest.yaml -d -j13\n
                                                                          "},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_6","title":"\u591a\u67b6\u6784\u878d\u5408","text":"

                                                                          \u51c6\u5907\u597d\u5f85\u878d\u5408\u7684\u79bb\u7ebf\u5305 addon-offline-full-package-${version}-${arch}.tar.gz\u3002

                                                                          \u4ee5 addon-offline-full-package-v0.11.0-arm64.tar.gz \u4e3a\u4f8b\uff0c\u6267\u884c\u5bfc\u5165\u547d\u4ee4\uff1a

                                                                          ~/dce5-installer import-addon -c /home/dce5/sample/clusterConfig.yaml --addon-path=/home/addon-offline-full-package-v0.11.0-arm64.tar.gz\n
                                                                          "},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_7","title":"\u6ce8\u610f\u4e8b\u9879","text":""},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_8","title":"\u78c1\u76d8\u7a7a\u95f4","text":"

                                                                          \u79bb\u7ebf\u5305\u6bd4\u8f83\u5927\uff0c\u4e14\u8fc7\u7a0b\u4e2d\u9700\u8981\u89e3\u538b\u548c load \u955c\u50cf\uff0c\u9700\u8981\u9884\u7559\u5145\u8db3\u7684\u7a7a\u95f4\uff0c\u5426\u5219\u53ef\u80fd\u5728\u8fc7\u7a0b\u4e2d\u62a5 \u201cno space left\u201d \u800c\u4e2d\u65ad\u3002

                                                                          "},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_9","title":"\u5931\u8d25\u540e\u91cd\u8bd5","text":"

                                                                          \u5982\u679c\u5728\u591a\u67b6\u6784\u878d\u5408\u6b65\u9aa4\u6267\u884c\u5931\u8d25\uff0c\u91cd\u8bd5\u524d\u9700\u8981\u6e05\u7406\u4e00\u4e0b\u6b8b\u7559\uff1a

                                                                          rm -rf addon-offline-target-package\n
                                                                          "},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_10","title":"\u955c\u50cf\u7a7a\u95f4","text":"

                                                                          \u5982\u679c\u878d\u5408\u7684\u79bb\u7ebf\u5305\u4e2d\u5305\u542b\u4e86\u4e0e\u5bfc\u5165\u7684\u79bb\u7ebf\u5305\u4e0d\u4e00\u81f4\u7684\u955c\u50cf\u7a7a\u95f4\uff0c\u53ef\u80fd\u4f1a\u5728\u878d\u5408\u8fc7\u7a0b\u4e2d\u56e0\u4e3a\u955c\u50cf\u7a7a\u95f4\u4e0d\u5b58\u5728\u800c\u62a5\u9519\uff1a

                                                                          \u89e3\u51b3\u529e\u6cd5\uff1a\u53ea\u9700\u8981\u5728\u878d\u5408\u4e4b\u524d\u521b\u5efa\u597d\u8be5\u955c\u50cf\u7a7a\u95f4\u5373\u53ef\uff0c\u4f8b\u5982\u4e0a\u56fe\u62a5\u9519\u53ef\u901a\u8fc7\u521b\u5efa\u955c\u50cf\u7a7a\u95f4 localhost \u63d0\u524d\u907f\u514d\u3002

                                                                          "},{"location":"end-user/kpanda/helm/multi-archi-helm.html#_11","title":"\u67b6\u6784\u51b2\u7a81","text":"

                                                                          \u5347\u7ea7\u81f3\u4f4e\u4e8e 0.12.0 \u7248\u672c\u7684 addon \u65f6\uff0c\u7531\u4e8e\u76ee\u6807\u79bb\u7ebf\u5305\u91cc\u7684 charts-syncer \u6ca1\u6709\u68c0\u67e5\u955c\u50cf\u5b58\u5728\u5219\u4e0d\u63a8\u9001\u529f\u80fd\uff0c\u56e0\u6b64\u4f1a\u5728\u5347\u7ea7\u7684\u8fc7\u7a0b\u4e2d\u4f1a\u91cd\u65b0\u628a\u591a\u67b6\u6784\u51b2\u6210\u5355\u67b6\u6784\u3002 \u4f8b\u5982\uff1a\u5728 v0.10 \u7248\u672c\u5c06 addon \u5b9e\u73b0\u4e3a\u591a\u67b6\u6784\uff0c\u6b64\u65f6\u82e5\u5347\u7ea7\u4e3a v0.11 \u7248\u672c\uff0c\u5219\u591a\u67b6\u6784 addon \u4f1a\u88ab\u8986\u76d6\u4e3a\u5355\u67b6\u6784\uff1b\u82e5\u5347\u7ea7\u4e3a 0.12.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u5219\u4ecd\u80fd\u591f\u4fdd\u6301\u591a\u67b6\u6784\u3002

                                                                          "},{"location":"end-user/kpanda/helm/upload-helm.html","title":"\u4e0a\u4f20 Helm \u6a21\u677f","text":"

                                                                          \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u4e0a\u4f20 Helm \u6a21\u677f\uff0c\u64cd\u4f5c\u6b65\u9aa4\u89c1\u4e0b\u6587\u3002

                                                                          1. \u5f15\u5165 Helm \u4ed3\u5e93\uff0c\u64cd\u4f5c\u6b65\u9aa4\u53c2\u8003\u5f15\u5165\u7b2c\u4e09\u65b9 Helm \u4ed3\u5e93\u3002

                                                                          2. \u4e0a\u4f20 Helm Chart \u5230 Helm \u4ed3\u5e93\u3002

                                                                            \u5ba2\u6237\u7aef\u4e0a\u4f20\u9875\u9762\u4e0a\u4f20

                                                                            Note

                                                                            \u6b64\u65b9\u5f0f\u9002\u7528\u4e8e Harbor\u3001ChartMuseum\u3001JFrog \u7c7b\u578b\u4ed3\u5e93\u3002

                                                                            1. \u767b\u5f55\u4e00\u4e2a\u53ef\u4ee5\u8bbf\u95ee\u5230 Helm \u4ed3\u5e93\u7684\u8282\u70b9\uff0c\u5c06 Helm \u4e8c\u8fdb\u5236\u6587\u4ef6\u4e0a\u4f20\u5230\u8282\u70b9\uff0c\u5e76\u5b89\u88c5 cm-push \u63d2\u4ef6\uff08\u9700\u8981\u8fde\u901a\u5916\u7f51\u5e76\u63d0\u524d\u5b89\u88c5 Git\uff09\u3002

                                                                              \u5b89\u88c5\u63d2\u4ef6\u6d41\u7a0b\u53c2\u8003\u5b89\u88c5 cm-push \u63d2\u4ef6\u3002

                                                                            2. \u63a8\u9001 Helm Chart \u5230 Helm \u4ed3\u5e93\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff1b

                                                                              helm cm-push ${charts-dir} ${HELM_REPO_URL} --username ${username} --password ${password}\n

                                                                              \u5b57\u6bb5\u8bf4\u660e\uff1a

                                                                              • charts-dir\uff1aHelm Chart \u7684\u76ee\u5f55\uff0c\u6216\u8005\u662f\u6253\u5305\u597d\u7684 Chart\uff08\u5373 .tgz \u6587\u4ef6\uff09\u3002
                                                                              • HELM_REPO_URL\uff1aHelm \u4ed3\u5e93\u7684 URL\u3002
                                                                              • username/password\uff1a\u6709\u63a8\u9001\u6743\u9650\u7684 Helm \u4ed3\u5e93\u7528\u6237\u540d\u548c\u5bc6\u7801\u3002
                                                                              • \u5982\u679c\u91c7\u7528 https \u8bbf\u95ee\u4e14\u9700\u8981\u8df3\u8fc7\u8bc1\u4e66\u9a8c\u8bc1\uff0c\u53ef\u6dfb\u52a0\u53c2\u6570 --insecure

                                                                            Note

                                                                            \u6b64\u65b9\u5f0f\u4ec5\u9002\u7528\u4e8e Harbor \u7c7b\u578b\u4ed3\u5e93\u3002

                                                                            1. \u767b\u5f55\u7f51\u9875 Harbor \u4ed3\u5e93\uff0c\u8bf7\u786e\u4fdd\u767b\u5f55\u7528\u6237\u6709\u63a8\u9001\u6743\u9650\uff1b

                                                                            2. \u8fdb\u5165\u5230\u5bf9\u5e94\u9879\u76ee\uff0c\u9009\u62e9 Helm Charts \u9875\u7b7e\uff0c\u70b9\u51fb\u9875\u9762 \u4e0a\u4f20 \u6309\u94ae\uff0c\u5b8c\u6210 Helm Chart \u4e0a\u4f20\u3002

                                                                          3. \u540c\u6b65\u8fdc\u7aef\u4ed3\u5e93\u6570\u636e

                                                                            \u624b\u52a8\u540c\u6b65\u81ea\u52a8\u540c\u6b65

                                                                            \u9ed8\u8ba4\u96c6\u7fa4\u672a\u5f00\u542f Helm \u4ed3\u5e93\u81ea\u52a8\u5237\u65b0 \uff0c\u9700\u8981\u6267\u884c\u624b\u52a8\u540c\u6b65\u64cd\u4f5c\uff0c\u5927\u81f4\u6b65\u9aa4\u4e3a\uff1a

                                                                            \u8fdb\u5165 Helm \u5e94\u7528 -> Helm \u4ed3\u5e93 \uff0c\u70b9\u51fb\u4ed3\u5e93\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \u6309\u94ae\uff0c\u9009\u62e9 \u540c\u6b65\u4ed3\u5e93 \uff0c\u5b8c\u6210\u4ed3\u5e93\u6570\u636e\u540c\u6b65\u3002

                                                                            \u5982\u9700\u5f00\u542f Helm \u4ed3\u5e93\u81ea\u52a8\u540c\u6b65\u529f\u80fd\uff0c\u53ef\u8fdb\u5165 \u96c6\u7fa4\u8fd0\u7ef4 -> \u96c6\u7fa4\u8bbe\u7f6e -> \u9ad8\u7ea7\u914d\u7f6e \uff0c\u5f00\u542f Helm \u4ed3\u5e93\u81ea\u52a8\u5237\u65b0\u5f00\u5173\u3002

                                                                          "},{"location":"end-user/kpanda/inspect/index.html","title":"\u96c6\u7fa4\u5de1\u68c0","text":"

                                                                          \u96c6\u7fa4\u5de1\u68c0\u53ef\u4ee5\u901a\u8fc7\u81ea\u52a8\u6216\u624b\u52a8\u65b9\u5f0f\uff0c\u5b9a\u671f\u6216\u968f\u65f6\u68c0\u67e5\u96c6\u7fa4\u7684\u6574\u4f53\u5065\u5eb7\u72b6\u6001\uff0c\u8ba9\u7ba1\u7406\u5458\u83b7\u5f97\u4fdd\u969c\u96c6\u7fa4\u5b89\u5168\u7684\u4e3b\u52a8\u6743\u3002 \u57fa\u4e8e\u5408\u7406\u7684\u5de1\u68c0\u8ba1\u5212\uff0c\u8fd9\u79cd\u4e3b\u52a8\u81ea\u53d1\u7684\u96c6\u7fa4\u68c0\u67e5\u53ef\u4ee5\u8ba9\u7ba1\u7406\u5458\u968f\u65f6\u638c\u63e1\u96c6\u7fa4\u72b6\u6001\uff0c\u6446\u8131\u4e4b\u524d\u51fa\u73b0\u6545\u969c\u65f6\u53ea\u80fd\u88ab\u52a8\u6392\u67e5\u95ee\u9898\u7684\u56f0\u5883\uff0c\u505a\u5230\u4e8b\u5148\u76d1\u63a7\u3001\u63d0\u524d\u9632\u8303\u3002

                                                                          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u63d0\u4f9b\u7684\u96c6\u7fa4\u5de1\u68c0\u529f\u80fd\uff0c\u652f\u6301\u4ece\u96c6\u7fa4\u3001\u8282\u70b9\u3001\u5bb9\u5668\u7ec4\uff08Pod\uff09\u4e09\u4e2a\u7ef4\u5ea6\u8fdb\u884c\u81ea\u5b9a\u4e49\u5de1\u68c0\u9879\uff0c\u5de1\u68c0\u7ed3\u675f\u540e\u4f1a\u81ea\u52a8\u751f\u6210\u53ef\u89c6\u5316\u7684\u5de1\u68c0\u62a5\u544a\u3002

                                                                          • \u96c6\u7fa4\u7ef4\u5ea6\uff1a\u68c0\u67e5\u96c6\u7fa4\u4e2d\u7cfb\u7edf\u7ec4\u4ef6\u7684\u8fd0\u884c\u60c5\u51b5\uff0c\u5305\u62ec\u96c6\u7fa4\u72b6\u6001\u3001\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u4ee5\u53ca\u63a7\u5236\u8282\u70b9\u7279\u6709\u7684\u5de1\u68c0\u9879\u7b49\uff0c\u4f8b\u5982 kube-apiserver \u548c etcd \u7684\u72b6\u6001\u3002
                                                                          • \u8282\u70b9\u7ef4\u5ea6\uff1a\u5305\u62ec\u63a7\u5236\u8282\u70b9\u548c\u5de5\u4f5c\u8282\u70b9\u901a\u7528\u7684\u68c0\u67e5\u9879\uff0c\u4f8b\u5982\u8282\u70b9\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u3001\u53e5\u67c4\u6570\u3001PID \u72b6\u6001\u3001\u7f51\u7edc\u72b6\u6001\u3002
                                                                          • \u5bb9\u5668\u7ec4\u7ef4\u5ea6\uff1a\u68c0\u67e5 Pod \u7684 CPU \u548c\u5185\u5b58\u4f7f\u7528\u60c5\u51b5\u3001\u8fd0\u884c\u72b6\u6001\u3001PV \u548c PVC \u7684\u72b6\u6001\u7b49\u3002

                                                                          \u5982\u9700\u4e86\u89e3\u6216\u6267\u884c\u5b89\u5168\u65b9\u9762\u7684\u5de1\u68c0\uff0c\u53ef\u53c2\u8003\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u7684\u5b89\u5168\u626b\u63cf\u7c7b\u578b\u3002

                                                                          "},{"location":"end-user/kpanda/inspect/config.html","title":"\u521b\u5efa\u5de1\u68c0\u914d\u7f6e","text":"

                                                                          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u63d0\u4f9b\u96c6\u7fa4\u5de1\u68c0\u529f\u80fd\uff0c\u652f\u6301\u4ece\u96c6\u7fa4\u7ef4\u5ea6\u3001\u8282\u70b9\u7ef4\u5ea6\u3001\u5bb9\u5668\u7ec4\u7ef4\u5ea6\u8fdb\u884c\u5de1\u68c0\u3002

                                                                          • \u96c6\u7fa4\u7ef4\u5ea6\uff1a\u68c0\u67e5\u96c6\u7fa4\u4e2d\u7cfb\u7edf\u7ec4\u4ef6\u7684\u8fd0\u884c\u60c5\u51b5\uff0c\u5305\u62ec\u96c6\u7fa4\u72b6\u6001\u3001\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\uff0c\u4ee5\u53ca\u63a7\u5236\u8282\u70b9\u7279\u6709\u7684\u5de1\u68c0\u9879\u7b49\uff0c\u4f8b\u5982 kube-apiserver \u548c etcd \u7684\u72b6\u6001\u3002
                                                                          • \u8282\u70b9\u7ef4\u5ea6\uff1a\u5305\u62ec\u63a7\u5236\u8282\u70b9\u548c\u5de5\u4f5c\u8282\u70b9\u901a\u7528\u7684\u68c0\u67e5\u9879\uff0c\u4f8b\u5982\u8282\u70b9\u8d44\u6e90\u4f7f\u7528\u60c5\u51b5\u3001\u53e5\u67c4\u6570\u3001PID \u72b6\u6001\u3001\u7f51\u7edc\u72b6\u6001\u3002
                                                                          • \u5bb9\u5668\u7ec4\u7ef4\u5ea6\uff1a\u68c0\u67e5 Pod \u7684 CPU \u548c\u5185\u5b58\u4f7f\u7528\u60c5\u51b5\u3001\u8fd0\u884c\u72b6\u6001\u3001PV \u548c PVC \u7684\u72b6\u6001\u7b49\u3002

                                                                          \u4e0b\u9762\u4ecb\u7ecd\u5982\u4f55\u521b\u5efa\u5de1\u68c0\u914d\u7f6e\u3002

                                                                          "},{"location":"end-user/kpanda/inspect/config.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4
                                                                          • \u6240\u9009\u96c6\u7fa4\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u4e14\u5df2\u7ecf\u5728\u96c6\u7fa4\u4e2d\u5b89\u88c5\u4e86 insight \u7ec4\u4ef6
                                                                          "},{"location":"end-user/kpanda/inspect/config.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                                          1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u96c6\u7fa4\u5de1\u68c0 \u3002

                                                                          2. \u5728\u9875\u9762\u53f3\u4fa7\u70b9\u51fb \u5de1\u68c0\u914d\u7f6e \u3002

                                                                          3. \u53c2\u8003\u4ee5\u4e0b\u8bf4\u660e\u586b\u5199\u5de1\u68c0\u914d\u7f6e\uff0c\u7136\u540e\u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                                                                            • \u96c6\u7fa4\uff1a\u4e0b\u62c9\u9009\u62e9\u8981\u5bf9\u54ea\u4e9b\u96c6\u7fa4\u8fdb\u884c\u5de1\u68c0\u3002\u5982\u679c\u9009\u62e9\u591a\u4e2a\u96c6\u7fa4\uff0c\u5219\u81ea\u52a8\u751f\u6210\u591a\u4e2a\u5de1\u68c0\u914d\u7f6e\uff08\u4ec5\u5de1\u68c0\u7684\u96c6\u7fa4\u4e0d\u4e00\u81f4\uff0c\u5176\u4ed6\u914d\u7f6e\u90fd\u5b8c\u5168\u4e00\u81f4\uff09
                                                                            • \u5b9a\u65f6\u5de1\u68c0\uff1a\u542f\u7528\u540e\u53ef\u6839\u636e\u4e8b\u5148\u8bbe\u7f6e\u7684\u5de1\u68c0\u9891\u7387\u5b9a\u671f\u81ea\u52a8\u6267\u884c\u96c6\u7fa4\u5de1\u68c0
                                                                            • \u5de1\u68c0\u9891\u7387\uff1a\u8bbe\u7f6e\u81ea\u52a8\u5de1\u68c0\u7684\u5468\u671f\uff0c\u4f8b\u5982\u6bcf\u5468\u4e8c\u4e0a\u5348\u5341\u70b9\u3002\u652f\u6301\u81ea\u5b9a\u4e49 CronExpression\uff0c\u53ef\u53c2\u8003 Cron \u65f6\u95f4\u8868\u8bed\u6cd5
                                                                            • \u5de1\u68c0\u8bb0\u5f55\u4fdd\u7559\u6761\u6570\uff1a\u7d2f\u8ba1\u6700\u591a\u4fdd\u7559\u591a\u5c11\u6761\u5de1\u68c0\u8bb0\u5f55\uff0c\u5305\u62ec\u6240\u6709\u96c6\u7fa4\u7684\u5de1\u68c0\u8bb0\u5f55
                                                                            • \u53c2\u6570\u914d\u7f6e\uff1a\u53c2\u6570\u914d\u7f6e\u5206\u4e3a\u96c6\u7fa4\u7ef4\u5ea6\u3001\u8282\u70b9\u7ef4\u5ea6\u3001\u5bb9\u5668\u7ec4\u7ef4\u5ea6\u4e09\u90e8\u5206\uff0c\u53ef\u4ee5\u6839\u636e\u573a\u666f\u9700\u6c42\u542f\u7528\u6216\u7981\u7528\u67d0\u4e9b\u5de1\u68c0\u9879\u3002

                                                                          \u5de1\u68c0\u914d\u7f6e\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u4f1a\u81ea\u52a8\u663e\u793a\u5728\u5de1\u68c0\u914d\u7f6e\u5217\u8868\u4e2d\u3002\u5728\u914d\u7f6e\u53f3\u4fa7\u70b9\u51fb\u66f4\u591a\u64cd\u4f5c\u6309\u94ae\u53ef\u4ee5\u7acb\u5373\u6267\u884c\u5de1\u68c0\u3001\u4fee\u6539\u5de1\u68c0\u914d\u7f6e\u3001\u5220\u9664\u5de1\u68c0\u914d\u7f6e\u548c\u5de1\u68c0\u8bb0\u5f55\u3002

                                                                          • \u70b9\u51fb \u5de1\u68c0 \u53ef\u4ee5\u6839\u636e\u8be5\u914d\u7f6e\u7acb\u5373\u6267\u884c\u4e00\u6b21\u5de1\u68c0\u3002
                                                                          • \u70b9\u51fb \u5de1\u68c0\u914d\u7f6e \u53ef\u4ee5\u4fee\u6539\u5de1\u68c0\u914d\u7f6e\u3002
                                                                          • \u70b9\u51fb \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u5de1\u68c0\u914d\u7f6e\u548c\u5386\u53f2\u7684\u5de1\u68c0\u8bb0\u5f55

                                                                          Note

                                                                          • \u5de1\u68c0\u914d\u7f6e\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u5982\u679c\u542f\u7528\u4e86 \u5b9a\u65f6\u5de1\u68c0 \u914d\u7f6e\uff0c\u5219\u4f1a\u5728\u6307\u5b9a\u65f6\u95f4\u81ea\u52a8\u6267\u884c\u5de1\u68c0\u3002
                                                                          • \u5982\u672a\u542f\u7528 \u5b9a\u65f6\u5de1\u68c0 \u914d\u7f6e\uff0c\u5219\u9700\u8981\u624b\u52a8\u89e6\u53d1\u5de1\u68c0\u3002
                                                                          "},{"location":"end-user/kpanda/inspect/inspect.html","title":"\u6267\u884c\u96c6\u7fa4\u5de1\u68c0","text":"

                                                                          \u5de1\u68c0\u914d\u7f6e\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u5982\u679c\u542f\u7528\u4e86 \u5b9a\u65f6\u5de1\u68c0 \u914d\u7f6e\uff0c\u5219\u4f1a\u5728\u6307\u5b9a\u65f6\u95f4\u81ea\u52a8\u6267\u884c\u5de1\u68c0\u3002\u5982\u672a\u542f\u7528 \u5b9a\u65f6\u5de1\u68c0 \u914d\u7f6e\uff0c\u5219\u9700\u8981\u624b\u52a8\u89e6\u53d1\u5de1\u68c0\u3002

                                                                          \u6b64\u9875\u4ecb\u7ecd\u5982\u4f55\u624b\u52a8\u6267\u884c\u96c6\u7fa4\u5de1\u68c0\u3002

                                                                          "},{"location":"end-user/kpanda/inspect/inspect.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4
                                                                          • \u5df2\u521b\u5efa\u5de1\u68c0\u914d\u7f6e
                                                                          • \u6240\u9009\u96c6\u7fa4\u5904\u4e8e \u8fd0\u884c\u4e2d \u72b6\u6001\u4e14\u5df2\u7ecf\u5728\u96c6\u7fa4\u4e2d\u5b89\u88c5\u4e86 insight \u7ec4\u4ef6
                                                                          "},{"location":"end-user/kpanda/inspect/inspect.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                                          \u6267\u884c\u5de1\u68c0\u65f6\uff0c\u652f\u6301\u52fe\u9009\u591a\u4e2a\u96c6\u7fa4\u8fdb\u884c\u6279\u91cf\u5de1\u68c0\uff0c\u6216\u8005\u4ec5\u5bf9\u67d0\u4e00\u4e2a\u96c6\u7fa4\u8fdb\u884c\u5355\u72ec\u5de1\u68c0\u3002

                                                                          \u6279\u91cf\u5de1\u68c0\u5355\u72ec\u5de1\u68c0
                                                                          1. \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684\u4e00\u7ea7\u5bfc\u822a\u680f\u70b9\u51fb \u96c6\u7fa4\u5de1\u68c0 \uff0c\u7136\u540e\u5728\u9875\u9762\u53f3\u4fa7\u70b9\u51fb \u5de1\u68c0 \u3002

                                                                          2. \u52fe\u9009\u9700\u8981\u5de1\u68c0\u7684\u96c6\u7fa4\uff0c\u7136\u540e\u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                                                                            • \u82e5\u9009\u62e9\u591a\u4e2a\u96c6\u7fa4\u8fdb\u884c\u540c\u65f6\u5de1\u68c0\uff0c\u7cfb\u7edf\u5c06\u6839\u636e\u4e0d\u540c\u96c6\u7fa4\u7684\u5de1\u68c0\u914d\u7f6e\u8fdb\u884c\u5de1\u68c0\u3002
                                                                            • \u5982\u672a\u8bbe\u7f6e\u96c6\u7fa4\u5de1\u68c0\u914d\u7f6e\uff0c\u5c06\u4f7f\u7528\u7cfb\u7edf\u9ed8\u8ba4\u914d\u7f6e\u3002

                                                                          1. \u8fdb\u5165\u96c6\u7fa4\u5de1\u68c0\u9875\u9762\u3002
                                                                          2. \u5728\u5bf9\u5e94\u5de1\u68c0\u914d\u7f6e\u7684\u53f3\u4fa7\u70b9\u51fb \u2507 \u66f4\u591a\u64cd\u4f5c\u6309\u94ae\uff0c\u7136\u540e\u5728\u5f39\u51fa\u7684\u83dc\u5355\u4e2d\u9009\u62e9 \u5de1\u68c0 \u5373\u53ef\u3002

                                                                          "},{"location":"end-user/kpanda/inspect/report.html","title":"\u67e5\u770b\u5de1\u68c0\u62a5\u544a","text":"

                                                                          \u5de1\u68c0\u6267\u884c\u5b8c\u6210\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u5de1\u68c0\u8bb0\u5f55\u548c\u8be6\u7ec6\u7684\u5de1\u68c0\u62a5\u544a\u3002

                                                                          "},{"location":"end-user/kpanda/inspect/report.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          • \u5df2\u7ecf\u521b\u5efa\u4e86\u5de1\u68c0\u914d\u7f6e
                                                                          • \u5df2\u7ecf\u6267\u884c\u8fc7\u81f3\u5c11\u4e00\u6b21\u5de1\u68c0
                                                                          "},{"location":"end-user/kpanda/inspect/report.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                                          1. \u8fdb\u5165\u96c6\u7fa4\u5de1\u68c0\u9875\u9762\uff0c\u70b9\u51fb\u76ee\u6807\u5de1\u68c0\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                                          2. \u70b9\u51fb\u60f3\u8981\u67e5\u770b\u7684\u5de1\u68c0\u8bb0\u5f55\u540d\u79f0\u3002

                                                                            • \u6bcf\u6267\u884c\u4e00\u6b21\u5de1\u68c0\uff0c\u5c31\u4f1a\u751f\u6210\u4e00\u6761\u5de1\u68c0\u8bb0\u5f55\u3002
                                                                            • \u5f53\u5de1\u68c0\u8bb0\u5f55\u8d85\u8fc7\u5de1\u68c0\u914d\u7f6e\u4e2d\u8bbe\u7f6e\u7684\u6700\u5927\u4fdd\u7559\u6761\u6570\u65f6\uff0c\u4ece\u6267\u884c\u65f6\u95f4\u6700\u65e9\u7684\u8bb0\u5f55\u5f00\u59cb\u5220\u9664\u3002

                                                                          3. \u67e5\u770b\u5de1\u68c0\u7684\u8be6\u7ec6\u4fe1\u606f\uff0c\u6839\u636e\u5de1\u68c0\u914d\u7f6e\u53ef\u80fd\u5305\u62ec\u96c6\u7fa4\u8d44\u6e90\u6982\u89c8\u3001\u7cfb\u7edf\u7ec4\u4ef6\u7684\u8fd0\u884c\u60c5\u51b5\u7b49\u3002

                                                                            \u5728\u9875\u9762\u53f3\u4e0a\u89d2\u53ef\u4ee5\u4e0b\u8f7d\u5de1\u68c0\u62a5\u544a\u6216\u5220\u9664\u8be5\u9879\u5de1\u68c0\u62a5\u544a\u3002

                                                                          "},{"location":"end-user/kpanda/namespaces/createns.html","title":"\u547d\u540d\u7a7a\u95f4","text":"

                                                                          \u547d\u540d\u7a7a\u95f4\u662f Kubernetes \u4e2d\u7528\u6765\u8fdb\u884c\u8d44\u6e90\u9694\u79bb\u7684\u4e00\u79cd\u62bd\u8c61\u3002\u4e00\u4e2a\u96c6\u7fa4\u4e0b\u53ef\u4ee5\u5305\u542b\u591a\u4e2a\u4e0d\u91cd\u540d\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u6bcf\u4e2a\u547d\u540d\u7a7a\u95f4\u4e2d\u7684\u8d44\u6e90\u76f8\u4e92\u9694\u79bb\u3002\u6709\u5173\u547d\u540d\u7a7a\u95f4\u7684\u8be6\u7ec6\u4ecb\u7ecd\uff0c\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u3002

                                                                          \u672c\u6587\u5c06\u4ecb\u7ecd\u547d\u540d\u7a7a\u95f4\u7684\u76f8\u5173\u64cd\u4f5c\u3002

                                                                          "},{"location":"end-user/kpanda/namespaces/createns.html#_2","title":"\u521b\u5efa\u547d\u540d\u7a7a\u95f4","text":"

                                                                          \u652f\u6301\u901a\u8fc7\u8868\u5355\u8f7b\u677e\u521b\u5efa\u547d\u540d\u7a7a\u95f4\uff0c\u4e5f\u652f\u6301\u901a\u8fc7\u7f16\u5199\u6216\u5bfc\u5165 YAML \u6587\u4ef6\u5feb\u901f\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u3002

                                                                          Note

                                                                          • \u5728\u521b\u5efa\u547d\u540d\u7a7a\u95f4\u4e4b\u524d\uff0c\u9700\u8981\u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\u3002
                                                                          • \u96c6\u7fa4\u521d\u59cb\u5316\u540e\u901a\u5e38\u4f1a\u81ea\u52a8\u751f\u6210\u9ed8\u8ba4\u7684\u547d\u540d\u7a7a\u95f4 default \u3002\u4f46\u5bf9\u4e8e\u751f\u4ea7\u96c6\u7fa4\u800c\u8a00\uff0c\u4e3a\u4fbf\u4e8e\u7ba1\u7406\uff0c\u5efa\u8bae\u521b\u5efa\u5176\u4ed6\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u800c\u975e\u76f4\u63a5\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002
                                                                          "},{"location":"end-user/kpanda/namespaces/createns.html#_3","title":"\u8868\u5355\u521b\u5efa","text":"
                                                                          1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                                          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u547d\u540d\u7a7a\u95f4 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae\u3002

                                                                          3. \u586b\u5199\u547d\u540d\u7a7a\u95f4\u7684\u540d\u79f0\uff0c\u914d\u7f6e\u5de5\u4f5c\u7a7a\u95f4\u548c\u6807\u7b7e\uff08\u53ef\u9009\u8bbe\u7f6e\uff09\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                                            Info

                                                                            • \u547d\u540d\u7a7a\u95f4\u7ed1\u5b9a\u5de5\u4f5c\u7a7a\u95f4\u4e4b\u540e\uff0c\u8be5\u547d\u540d\u7a7a\u95f4\u7684\u8d44\u6e90\u5c31\u4f1a\u5171\u4eab\u7ed9\u6240\u7ed1\u5b9a\u7684\u5de5\u4f5c\u7a7a\u95f4\u3002\u6709\u5173\u5de5\u4f5c\u7a7a\u95f4\u7684\u8be6\u7ec6\u8bf4\u660e\uff0c\u53ef\u53c2\u8003\u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7\u3002

                                                                            • \u547d\u540d\u7a7a\u95f4\u521b\u5efa\u5b8c\u6210\u540e\uff0c\u4ecd\u7136\u53ef\u4ee5\u7ed1\u5b9a/\u89e3\u7ed1\u5de5\u4f5c\u7a7a\u95f4\u3002

                                                                          4. \u70b9\u51fb \u786e\u5b9a \uff0c\u5b8c\u6210\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3002\u5728\u547d\u540d\u7a7a\u95f4\u5217\u8868\u53f3\u4fa7\uff0c\u70b9\u51fb \u2507 \uff0c\u53ef\u4ee5\u4ece\u5f39\u51fa\u83dc\u5355\u4e2d\u9009\u62e9\u67e5\u770b YAML\u3001\u4fee\u6539\u6807\u7b7e\u3001\u7ed1\u5b9a/\u89e3\u7ed1\u5de5\u4f5c\u7a7a\u95f4\u3001\u914d\u989d\u7ba1\u7406\u3001\u5220\u9664\u7b49\u66f4\u591a\u64cd\u4f5c\u3002

                                                                          "},{"location":"end-user/kpanda/namespaces/createns.html#yaml","title":"YAML \u521b\u5efa","text":"
                                                                          1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                                          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u547d\u540d\u7a7a\u95f4 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4fa7\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                                          3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u5185\u5bb9\uff0c\u6216\u8005\u4ece\u672c\u5730\u76f4\u63a5\u5bfc\u5165\u5df2\u6709\u7684 YAML \u6587\u4ef6\u3002

                                                                            \u8f93\u5165 YAML \u5185\u5bb9\u540e\uff0c\u70b9\u51fb \u4e0b\u8f7d \u53ef\u4ee5\u5c06\u8be5 YAML \u6587\u4ef6\u4fdd\u5b58\u5230\u672c\u5730\u3002

                                                                          4. \u6700\u540e\u5728\u5f39\u6846\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                                                                          "},{"location":"end-user/kpanda/namespaces/exclusive.html","title":"\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9","text":"

                                                                          \u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\u6307\u5728 kubernetes \u96c6\u7fa4\u4e2d\uff0c\u901a\u8fc7\u6c61\u70b9\u548c\u6c61\u70b9\u5bb9\u5fcd\u7684\u65b9\u5f0f\u5b9e\u73b0\u7279\u5b9a\u547d\u540d\u7a7a\u95f4\u5bf9\u4e00\u4e2a\u6216\u591a\u4e2a\u8282\u70b9 CPU\u3001\u5185\u5b58\u7b49\u8d44\u6e90\u7684\u72ec\u4eab\u3002\u4e3a\u7279\u5b9a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\u72ec\u4eab\u8282\u70b9\u540e\uff0c\u5176\u5b83\u975e\u6b64\u547d\u540d\u7a7a\u95f4\u7684\u5e94\u7528\u548c\u670d\u52a1\u5747\u4e0d\u80fd\u8fd0\u884c\u5728\u88ab\u72ec\u4eab\u7684\u8282\u70b9\u4e0a\u3002\u4f7f\u7528\u72ec\u4eab\u8282\u70b9\u53ef\u4ee5\u8ba9\u91cd\u8981\u5e94\u7528\u72ec\u4eab\u4e00\u90e8\u5206\u8ba1\u7b97\u8d44\u6e90\uff0c\u4ece\u800c\u548c\u5176\u4ed6\u5e94\u7528\u5b9e\u73b0\u7269\u7406\u9694\u79bb\u3002

                                                                          Note

                                                                          \u5728\u8282\u70b9\u88ab\u8bbe\u7f6e\u4e3a\u72ec\u4eab\u8282\u70b9\u524d\u5df2\u7ecf\u8fd0\u884c\u5728\u6b64\u8282\u70b9\u4e0a\u7684\u5e94\u7528\u548c\u670d\u52a1\u5c06\u4e0d\u4f1a\u53d7\u5f71\u54cd\uff0c\u4f9d\u7136\u4f1a\u6b63\u5e38\u8fd0\u884c\u5728\u8be5\u8282\u70b9\u4e0a\uff0c\u4ec5\u5f53\u8fd9\u4e9b Pod \u88ab\u5220\u9664\u6216\u91cd\u5efa\u65f6\uff0c\u624d\u4f1a\u8c03\u5ea6\u5230\u5176\u5b83\u975e\u72ec\u4eab\u8282\u70b9\u4e0a\u3002

                                                                          "},{"location":"end-user/kpanda/namespaces/exclusive.html#_2","title":"\u51c6\u5907\u5de5\u4f5c","text":"

                                                                          \u68c0\u67e5\u5f53\u524d\u96c6\u7fa4\u7684 kube-apiserver \u662f\u5426\u542f\u7528\u4e86 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668\u3002

                                                                          \u4f7f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\u529f\u80fd\u9700\u8981\u7528\u6237\u542f\u7528 kube-apiserver \u4e0a\u7684 PodNodeSelector \u548c PodTolerationRestriction \u4e24\u4e2a\u7279\u6027\u51c6\u5165\u63a7\u5236\u5668\uff08Admission Controllers\uff09\uff0c\u5173\u4e8e\u51c6\u5165\u63a7\u5236\u5668\u66f4\u591a\u8bf4\u660e\u8bf7\u53c2\u9605 kubernetes Admission Controllers Reference\u3002

                                                                          \u60a8\u53ef\u4ee5\u524d\u5f80\u5f53\u524d\u96c6\u7fa4\u4e0b\u4efb\u610f\u4e00\u4e2a Master \u8282\u70b9\u4e0a\u68c0\u67e5 kube-apiserver.yaml \u6587\u4ef6\u5185\u662f\u5426\u542f\u7528\u4e86\u8fd9\u4e24\u4e2a\u7279\u6027\uff0c\u4e5f\u53ef\u4ee5\u5728 Master \u8282\u70b9\u4e0a\u6267\u884c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u8fdb\u884c\u5feb\u901f\u68c0\u67e5\uff1a

                                                                          ```bash\n[root@g-master1 ~]# cat /etc/kubernetes/manifests/kube-apiserver.yaml | grep  enable-admission-plugins\n\n# \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a\n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction\n```\n
                                                                          "},{"location":"end-user/kpanda/namespaces/exclusive.html#_3","title":"\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9","text":"

                                                                          \u7531\u4e8e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u8fd0\u884c\u7740 kpanda\u3001ghippo\u3001insight \u7b49\u5e73\u53f0\u57fa\u7840\u7ec4\u4ef6\uff0c\u5728 Global \u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\u5c06\u53ef\u80fd\u5bfc\u81f4\u5f53\u7cfb\u7edf\u7ec4\u4ef6\u91cd\u542f\u540e\uff0c\u7cfb\u7edf\u7ec4\u4ef6\u65e0\u6cd5\u8c03\u5ea6\u5230\u88ab\u72ec\u4eab\u7684\u8282\u70b9\u4e0a\uff0c\u5f71\u54cd\u7cfb\u7edf\u7684\u6574\u4f53\u9ad8\u53ef\u7528\u80fd\u529b\u3002\u56e0\u6b64\uff0c\u901a\u5e38\u60c5\u51b5\u4e0b\uff0c\u6211\u4eec\u4e0d\u63a8\u8350\u7528\u6237\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\u7279\u6027\u3002

                                                                          \u5982\u679c\u60a8\u786e\u5b9e\u9700\u8981\u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\uff0c\u8bf7\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u8fdb\u884c\u5f00\u542f\uff1a

                                                                          1. \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684 kube-apiserver \u542f\u7528\u4e86 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668

                                                                            Note

                                                                            \u5982\u679c\u96c6\u7fa4\u5df2\u542f\u7528\u4e86\u4e0a\u8ff0\u7684\u4e24\u4e2a\u51c6\u5165\u63a7\u5236\u5668\uff0c\u8bf7\u8df3\u8fc7\u6b64\u6b65\uff0c\u76f4\u63a5\u524d\u5f80\u914d\u7f6e\u7cfb\u7edf\u7ec4\u4ef6\u5bb9\u5fcd\u3002

                                                                            \u524d\u5f80\u5f53\u524d\u96c6\u7fa4\u4e0b\u4efb\u610f\u4e00\u4e2a Master \u8282\u70b9\u4e0a\u4fee\u6539 kube-apiserver.yaml \u914d\u7f6e\u6587\u4ef6\uff0c\u4e5f\u53ef\u4ee5\u5728 Master \u8282\u70b9\u4e0a\u6267\u884c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\uff1a

                                                                            [root@g-master1 ~]# vi /etc/kubernetes/manifests/kube-apiserver.yaml\n\n# \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a\napiVersion: v1\nkind: Pod\nmetadata:\n    ......\nspec:\ncontainers:\n- command:\n    - kube-apiserver\n    ......\n    - --default-not-ready-toleration-seconds=300\n    - --default-unreachable-toleration-seconds=300\n    - --enable-admission-plugins=NodeRestriction   #\u542f\u7528\u7684\u51c6\u5165\u63a7\u5236\u5668\u5217\u8868\n    - --enable-aggregator-routing=False\n    - --enable-bootstrap-token-auth=true\n    - --endpoint-reconciler-type=lease\n    - --etcd-cafile=/etc/kubernetes/ssl/etcd/ca.crt\n    ......\n

                                                                            \u627e\u5230 --enable-admission-plugins \u53c2\u6570\uff0c\u52a0\u5165\uff08\u4ee5\u82f1\u6587\u9017\u53f7\u5206\u9694\u7684\uff09 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668\u3002\u53c2\u8003\u5982\u4e0b\uff1a

                                                                            # \u52a0\u5165 __ ,PodNodeSelector,PodTolerationRestriction__ \n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction \n
                                                                          2. \u4e3a\u5e73\u53f0\u7ec4\u4ef6\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u5bb9\u5fcd\u6ce8\u89e3

                                                                            \u5b8c\u6210\u51c6\u5165\u63a7\u5236\u5668\u7684\u5f00\u542f\u540e\uff0c\u60a8\u9700\u8981\u4e3a\u5e73\u53f0\u7ec4\u4ef6\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u5bb9\u5fcd\u6ce8\u89e3\uff0c\u4ee5\u4fdd\u8bc1\u5e73\u53f0\u7ec4\u4ef6\u7684\u9ad8\u53ef\u7528\u3002

                                                                            \u76ee\u524d\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u7cfb\u7edf\u7ec4\u4ef6\u547d\u540d\u7a7a\u95f4\u5982\u4e0b\u8868\uff1a

                                                                            \u547d\u540d\u7a7a\u95f4 \u6240\u5305\u542b\u7684\u7cfb\u7edf\u7ec4\u4ef6 kpanda-system kpanda hwameiStor-system hwameiStor istio-system istio metallb-system metallb cert-manager-system cert-manager contour-system contour kubean-system kubean ghippo-system ghippo kcoral-system kcoral kcollie-system kcollie insight-system insight\u3001insight-agent: ipavo-system ipavo kairship-system kairship karmada-system karmada amamba-system amamba\u3001jenkins skoala-system skoala mspider-system mspider mcamel-system mcamel-rabbitmq\u3001mcamel-elasticsearch\u3001mcamel-mysql\u3001mcamel-redis\u3001mcamel-kafka\u3001mcamel-minio\u3001mcamel-postgresql spidernet-system spidernet kangaroo-system kangaroo gmagpie-system gmagpie dowl-system dowl

                                                                            \u68c0\u67e5\u5f53\u524d\u96c6\u7fa4\u4e2d\u6240\u6709\u547d\u540d\u7a7a\u95f4\u662f\u5426\u5b58\u5728\u4e0a\u8ff0\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5206\u522b\u4e3a\u6bcf\u4e2a\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u6ce8\u89e3\uff1a scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]' \u3002

                                                                            kubectl annotate ns <namespace-name> scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \n\"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\n
                                                                            \u8bf7\u786e\u4fdd\u5c06 <namespace-name> \u66ff\u6362\u4e3a\u8981\u6dfb\u52a0\u6ce8\u89e3\u7684\u5e73\u53f0\u547d\u540d\u7a7a\u95f4\u540d\u79f0\u3002

                                                                          3. \u4f7f\u7528\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9

                                                                            \u5f53\u60a8\u786e\u8ba4\u96c6\u7fa4 API \u670d\u52a1\u5668\u4e0a\u7684 PodNodeSelector \u548c PodTolerationRestriction \u4e24\u4e2a\u7279\u6027\u51c6\u5165\u63a7\u5236\u5668\u5df2\u7ecf\u5f00\u542f\u540e\uff0c\u8bf7\u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684 UI \u7ba1\u7406\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9\u4e86\u3002

                                                                            1. \u5728\u96c6\u7fa4\u5217\u8868\u9875\u9762\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u547d\u540d\u7a7a\u95f4 \u3002

                                                                            2. \u70b9\u51fb\u547d\u540d\u7a7a\u95f4\u540d\u79f0\uff0c\u7136\u540e\u70b9\u51fb \u72ec\u4eab\u8282\u70b9 \u9875\u7b7e\uff0c\u5728\u4e0b\u65b9\u53f3\u4fa7\u70b9\u51fb \u6dfb\u52a0\u8282\u70b9 \u3002

                                                                            3. \u5728\u9875\u9762\u5de6\u4fa7\u9009\u62e9\u8ba9\u8be5\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u54ea\u4e9b\u8282\u70b9\uff0c\u5728\u53f3\u4fa7\u53ef\u4ee5\u6e05\u7a7a\u6216\u5220\u9664\u67d0\u4e2a\u5df2\u9009\u8282\u70b9\uff0c\u6700\u540e\u5728\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                                                            4. \u53ef\u4ee5\u5728\u5217\u8868\u4e2d\u67e5\u770b\u6b64\u547d\u540d\u7a7a\u95f4\u7684\u5df2\u6709\u7684\u72ec\u4eab\u8282\u70b9\uff0c\u5728\u8282\u70b9\u53f3\u4fa7\u53ef\u4ee5\u9009\u62e9 \u53d6\u6d88\u72ec\u4eab \u3002

                                                                              \u53d6\u6d88\u72ec\u4eab\u4e4b\u540e\uff0c\u5176\u4ed6\u547d\u540d\u7a7a\u95f4\u4e0b\u7684 Pod \u4e5f\u53ef\u4ee5\u88ab\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u4e0a\u3002

                                                                          "},{"location":"end-user/kpanda/namespaces/exclusive.html#_4","title":"\u5728 \u975e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9","text":"

                                                                          \u5728 \u975e\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u542f\u7528\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u8282\u70b9\uff0c\u8bf7\u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\u8fdb\u884c\u5f00\u542f\uff1a

                                                                          1. \u4e3a\u5f53\u524d\u96c6\u7fa4\u7684 kube-apiserver \u542f\u7528\u4e86 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668

                                                                            Note

                                                                            \u5982\u679c\u96c6\u7fa4\u5df2\u542f\u7528\u4e86\u4e0a\u8ff0\u7684\u4e24\u4e2a\u51c6\u5165\u63a7\u5236\u5668\uff0c\u8bf7\u8df3\u8fc7\u6b64\u6b65\uff0c\u76f4\u63a5\u524d\u5f80\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9

                                                                            \u524d\u5f80\u5f53\u524d\u96c6\u7fa4\u4e0b\u4efb\u610f\u4e00\u4e2a Master \u8282\u70b9\u4e0a\u4fee\u6539 kube-apiserver.yaml \u914d\u7f6e\u6587\u4ef6\uff0c\u4e5f\u53ef\u4ee5\u5728 Master \u8282\u70b9\u4e0a\u6267\u884c\u6267\u884c\u5982\u4e0b\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\uff1a

                                                                            [root@g-master1 ~]# vi /etc/kubernetes/manifests/kube-apiserver.yaml\n\n# \u9884\u671f\u8f93\u51fa\u5982\u4e0b\uff1a\napiVersion: v1\nkind: Pod\nmetadata:\n    ......\nspec:\ncontainers:\n- command:\n    - kube-apiserver\n    ......\n    - --default-not-ready-toleration-seconds=300\n    - --default-unreachable-toleration-seconds=300\n    - --enable-admission-plugins=NodeRestriction   #\u542f\u7528\u7684\u51c6\u5165\u63a7\u5236\u5668\u5217\u8868\n    - --enable-aggregator-routing=False\n    - --enable-bootstrap-token-auth=true\n    - --endpoint-reconciler-type=lease\n    - --etcd-cafile=/etc/kubernetes/ssl/etcd/ca.crt\n    ......\n

                                                                            \u627e\u5230 --enable-admission-plugins \u53c2\u6570\uff0c\u52a0\u5165\uff08\u4ee5\u82f1\u6587\u9017\u53f7\u5206\u9694\u7684\uff09 PodNodeSelector \u548c PodTolerationRestriction \u51c6\u5165\u63a7\u5236\u5668\u3002\u53c2\u8003\u5982\u4e0b\uff1a

                                                                            # \u52a0\u5165 __ ,PodNodeSelector,PodTolerationRestriction__ \n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction \n
                                                                          2. \u4f7f\u7528\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9

                                                                            \u5f53\u60a8\u786e\u8ba4\u96c6\u7fa4 API \u670d\u52a1\u5668\u4e0a\u7684 PodNodeSelector \u548c PodTolerationRestriction \u4e24\u4e2a\u7279\u6027\u51c6\u5165\u63a7\u5236\u5668\u5df2\u7ecf\u5f00\u542f\u540e\uff0c\u8bf7\u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4f7f\u7528\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684 UI \u7ba1\u7406\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u8bbe\u7f6e\u72ec\u4eab\u8282\u70b9\u4e86\u3002

                                                                            1. \u5728\u96c6\u7fa4\u5217\u8868\u9875\u9762\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u547d\u540d\u7a7a\u95f4 \u3002

                                                                            2. \u70b9\u51fb\u547d\u540d\u7a7a\u95f4\u540d\u79f0\uff0c\u7136\u540e\u70b9\u51fb \u72ec\u4eab\u8282\u70b9 \u9875\u7b7e\uff0c\u5728\u4e0b\u65b9\u53f3\u4fa7\u70b9\u51fb \u6dfb\u52a0\u8282\u70b9 \u3002

                                                                            3. \u5728\u9875\u9762\u5de6\u4fa7\u9009\u62e9\u8ba9\u8be5\u547d\u540d\u7a7a\u95f4\u72ec\u4eab\u54ea\u4e9b\u8282\u70b9\uff0c\u5728\u53f3\u4fa7\u53ef\u4ee5\u6e05\u7a7a\u6216\u5220\u9664\u67d0\u4e2a\u5df2\u9009\u8282\u70b9\uff0c\u6700\u540e\u5728\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                                                            4. \u53ef\u4ee5\u5728\u5217\u8868\u4e2d\u67e5\u770b\u6b64\u547d\u540d\u7a7a\u95f4\u7684\u5df2\u6709\u7684\u72ec\u4eab\u8282\u70b9\uff0c\u5728\u8282\u70b9\u53f3\u4fa7\u53ef\u4ee5\u9009\u62e9 \u53d6\u6d88\u72ec\u4eab \u3002

                                                                              \u53d6\u6d88\u72ec\u4eab\u4e4b\u540e\uff0c\u5176\u4ed6\u547d\u540d\u7a7a\u95f4\u4e0b\u7684 Pod \u4e5f\u53ef\u4ee5\u88ab\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u4e0a\u3002

                                                                          3. \u4e3a\u9700\u8981\u9ad8\u53ef\u7528\u7684\u7ec4\u4ef6\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u5bb9\u5fcd\u6ce8\u89e3\uff08\u53ef\u9009\uff09

                                                                            \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u9700\u8981\u9ad8\u53ef\u7528\u7684\u7ec4\u4ef6\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u6dfb\u52a0\u6ce8\u89e3\uff1ascheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\u3002

                                                                            kubectl annotate ns <namespace-name> scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \n\"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\n

                                                                            \u8bf7\u786e\u4fdd\u5c06 <namespace-name> \u66ff\u6362\u4e3a\u8981\u6dfb\u52a0\u6ce8\u89e3\u7684\u5e73\u53f0\u547d\u540d\u7a7a\u95f4\u540d\u79f0\u3002

                                                                          "},{"location":"end-user/kpanda/namespaces/podsecurity.html","title":"\u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565","text":"

                                                                          \u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565\u6307\u5728 kubernetes \u96c6\u7fa4\u4e2d\uff0c\u901a\u8fc7\u4e3a\u6307\u5b9a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\u4e0d\u540c\u7684\u7b49\u7ea7\u548c\u6a21\u5f0f\uff0c\u5b9e\u73b0\u5728\u5b89\u5168\u7684\u5404\u4e2a\u65b9\u9762\u63a7\u5236 Pod \u7684\u884c\u4e3a\uff0c\u53ea\u6709\u6ee1\u8db3\u4e00\u5b9a\u7684\u6761\u4ef6\u7684 Pod \u624d\u4f1a\u88ab\u7cfb\u7edf\u63a5\u53d7\u3002\u5b83\u8bbe\u7f6e\u4e09\u4e2a\u7b49\u7ea7\u548c\u4e09\u79cd\u6a21\u5f0f\uff0c\u7528\u6237\u53ef\u4ee5\u6839\u636e\u81ea\u5df1\u7684\u9700\u6c42\u9009\u62e9\u66f4\u52a0\u5408\u9002\u7684\u65b9\u6848\u6765\u8bbe\u7f6e\u9650\u5236\u7b56\u7565\u3002

                                                                          Note

                                                                          \u4e00\u6761\u5b89\u5168\u6a21\u5f0f\u4ec5\u80fd\u914d\u7f6e\u4e00\u6761\u5b89\u5168\u7b56\u7565\u3002\u540c\u65f6\u8bf7\u8c28\u614e\u4e3a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e enforce \u7684\u5b89\u5168\u6a21\u5f0f\uff0c\u8fdd\u53cd\u540e\u5c06\u4f1a\u5bfc\u81f4 Pod \u65e0\u6cd5\u521b\u5efa\u3002

                                                                          \u672c\u8282\u5c06\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u754c\u9762\u4e3a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565\u3002

                                                                          "},{"location":"end-user/kpanda/namespaces/podsecurity.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u96c6\u7fa4\u7684\u7248\u672c\u9700\u8981\u5728 v1.22 \u4ee5\u4e0a\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                                          • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Admin \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                                          "},{"location":"end-user/kpanda/namespaces/podsecurity.html#_3","title":"\u4e3a\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565","text":"
                                                                          1. \u9009\u62e9\u9700\u8981\u914d\u7f6e\u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u8fdb\u5165\u8be6\u60c5\u9875\u3002\u5728 \u5bb9\u5668\u7ec4\u5b89\u5168\u7b56\u7565 \u9875\u9762\u70b9\u51fb \u914d\u7f6e\u7b56\u7565 \uff0c\u8fdb\u5165\u914d\u7f6e\u9875\u3002

                                                                          2. \u5728\u914d\u7f6e\u9875\u70b9\u51fb \u6dfb\u52a0\u7b56\u7565 \uff0c\u5219\u4f1a\u51fa\u73b0\u4e00\u6761\u7b56\u7565\uff0c\u5305\u62ec\u5b89\u5168\u7ea7\u522b\u548c\u5b89\u5168\u6a21\u5f0f\uff0c\u4ee5\u4e0b\u662f\u5bf9\u5b89\u5168\u7ea7\u522b\u548c\u5b89\u5168\u7b56\u7565\u7684\u8be6\u7ec6\u4ecb\u7ecd\u3002

                                                                            \u5b89\u5168\u7ea7\u522b \u63cf\u8ff0 Privileged \u4e0d\u53d7\u9650\u5236\u7684\u7b56\u7565\uff0c\u63d0\u4f9b\u6700\u5927\u53ef\u80fd\u8303\u56f4\u7684\u6743\u9650\u8bb8\u53ef\u3002\u6b64\u7b56\u7565\u5141\u8bb8\u5df2\u77e5\u7684\u7279\u6743\u63d0\u5347\u3002 Baseline \u9650\u5236\u6027\u6700\u5f31\u7684\u7b56\u7565\uff0c\u7981\u6b62\u5df2\u77e5\u7684\u7b56\u7565\u63d0\u5347\u3002\u5141\u8bb8\u4f7f\u7528\u9ed8\u8ba4\u7684\uff08\u89c4\u5b9a\u6700\u5c11\uff09Pod \u914d\u7f6e\u3002 Restricted \u9650\u5236\u6027\u975e\u5e38\u5f3a\u7684\u7b56\u7565\uff0c\u9075\u5faa\u5f53\u524d\u7684\u4fdd\u62a4 Pod \u7684\u6700\u4f73\u5b9e\u8df5\u3002 \u5b89\u5168\u6a21\u5f0f \u63cf\u8ff0 Audit \u8fdd\u53cd\u6307\u5b9a\u7b56\u7565\u4f1a\u5728\u5ba1\u8ba1\u65e5\u5fd7\u4e2d\u6dfb\u52a0\u65b0\u7684\u5ba1\u8ba1\u4e8b\u4ef6\uff0cPod \u53ef\u4ee5\u88ab\u521b\u5efa\u3002 Warn \u8fdd\u53cd\u6307\u5b9a\u7b56\u7565\u4f1a\u8fd4\u56de\u7528\u6237\u53ef\u89c1\u7684\u544a\u8b66\u4fe1\u606f\uff0cPod \u53ef\u4ee5\u88ab\u521b\u5efa\u3002 Enforce \u8fdd\u53cd\u6307\u5b9a\u7b56\u7565\u4f1a\u5bfc\u81f4 Pod \u65e0\u6cd5\u521b\u5efa\u3002

                                                                          3. \u4e0d\u540c\u7684\u5b89\u5168\u7ea7\u522b\u5bf9\u5e94\u4e0d\u540c\u7684\u68c0\u67e5\u9879\uff0c\u82e5\u60a8\u4e0d\u77e5\u9053\u8be5\u5982\u4f55\u4e3a\u60a8\u7684\u547d\u540d\u7a7a\u95f4\u914d\u7f6e\uff0c\u53ef\u4ee5\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u7b56\u7565\u914d\u7f6e\u9879\u8bf4\u660e \u67e5\u770b\u8be6\u7ec6\u4fe1\u606f\u3002

                                                                          4. \u70b9\u51fb\u786e\u5b9a\uff0c\u82e5\u521b\u5efa\u6210\u529f\uff0c\u5219\u9875\u9762\u4e0a\u5c06\u51fa\u73b0\u60a8\u914d\u7f6e\u7684\u5b89\u5168\u7b56\u7565\u3002

                                                                          5. \u70b9\u51fb \u2507 \u8fd8\u53ef\u4ee5\u7f16\u8f91\u6216\u8005\u5220\u9664\u60a8\u914d\u7f6e\u7684\u5b89\u5168\u7b56\u7565\u3002

                                                                          "},{"location":"end-user/kpanda/network/create-ingress.html","title":"\u521b\u5efa\u8def\u7531\uff08Ingress\uff09","text":"

                                                                          \u5728 Kubernetes \u96c6\u7fa4\u4e2d\uff0cIngress \u516c\u5f00\u4ece\u96c6\u7fa4\u5916\u90e8\u5230\u96c6\u7fa4\u5185\u670d\u52a1\u7684 HTTP \u548c HTTPS \u8def\u7531\u3002 \u6d41\u91cf\u8def\u7531\u7531 Ingress \u8d44\u6e90\u4e0a\u5b9a\u4e49\u7684\u89c4\u5219\u63a7\u5236\u3002\u4e0b\u9762\u662f\u4e00\u4e2a\u5c06\u6240\u6709\u6d41\u91cf\u90fd\u53d1\u9001\u5230\u540c\u4e00 Service \u7684\u7b80\u5355 Ingress \u793a\u4f8b\uff1a

                                                                          Ingress \u662f\u5bf9\u96c6\u7fa4\u4e2d\u670d\u52a1\u7684\u5916\u90e8\u8bbf\u95ee\u8fdb\u884c\u7ba1\u7406\u7684 API \u5bf9\u8c61\uff0c\u5178\u578b\u7684\u8bbf\u95ee\u65b9\u5f0f\u662f HTTP\u3002Ingress \u53ef\u4ee5\u63d0\u4f9b\u8d1f\u8f7d\u5747\u8861\u3001SSL \u7ec8\u7ed3\u548c\u57fa\u4e8e\u540d\u79f0\u7684\u865a\u62df\u6258\u7ba1\u3002

                                                                          "},{"location":"end-user/kpanda/network/create-ingress.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                                                                          • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a NS Editor \u89d2\u8272 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002
                                                                          • \u5df2\u7ecf\u5b8c\u6210 Ingress \u5b9e\u4f8b\u7684\u521b\u5efa\uff0c\u5df2\u90e8\u7f72\u5e94\u7528\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5e76\u4e14\u5df2\u521b\u5efa\u5bf9\u5e94 Service
                                                                          • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002
                                                                          "},{"location":"end-user/kpanda/network/create-ingress.html#_2","title":"\u521b\u5efa\u8def\u7531","text":"
                                                                          1. \u4ee5 NS Editor \u7528\u6237\u6210\u529f\u767b\u5f55\u540e\uff0c\u70b9\u51fb\u5de6\u4e0a\u89d2\u7684 \u96c6\u7fa4\u5217\u8868 \u8fdb\u5165 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u3002\u5728\u96c6\u7fa4\u5217\u8868\u4e2d\uff0c\u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\u3002

                                                                          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u8def\u7531 \u8fdb\u5165\u670d\u52a1\u5217\u8868\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u521b\u5efa\u8def\u7531 \u6309\u94ae\u3002

                                                                            Note

                                                                            \u4e5f\u53ef\u4ee5\u901a\u8fc7 YAML \u521b\u5efa \u4e00\u4e2a\u8def\u7531\u3002

                                                                          3. \u6253\u5f00 \u521b\u5efa\u8def\u7531 \u9875\u9762\uff0c\u8fdb\u884c\u914d\u7f6e\u3002\u53ef\u9009\u62e9\u4e24\u79cd\u534f\u8bae\u7c7b\u578b\uff0c\u53c2\u8003\u4ee5\u4e0b\u4e24\u4e2a\u53c2\u6570\u8868\u8fdb\u884c\u914d\u7f6e\u3002

                                                                          "},{"location":"end-user/kpanda/network/create-ingress.html#http","title":"\u521b\u5efa HTTP \u534f\u8bae\u8def\u7531","text":"

                                                                          \u8f93\u5165\u5982\u4e0b\u53c2\u6570\uff1a

                                                                          • \u8def\u7531\u540d\u79f0 \uff1a\u5fc5\u586b\uff0c\u8f93\u5165\u65b0\u5efa\u8def\u7531\u7684\u540d\u79f0\u3002
                                                                          • \u547d\u540d\u7a7a\u95f4 \uff1a\u5fc5\u586b\uff0c\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002
                                                                          • \u8bbe\u7f6e\u8def\u7531\u89c4\u5219 \uff1a
                                                                            • \u57df\u540d \uff1a\u5fc5\u586b\uff0c\u4f7f\u7528\u57df\u540d\u5bf9\u5916\u63d0\u4f9b\u8bbf\u95ee\u670d\u52a1\u3002\u9ed8\u8ba4\u4e3a\u96c6\u7fa4\u7684\u57df\u540d\u3002
                                                                            • \u534f\u8bae \uff1a\u5fc5\u586b\uff0c\u6307\u6388\u6743\u5165\u7ad9\u5230\u8fbe\u96c6\u7fa4\u670d\u52a1\u7684\u534f\u8bae\uff0c\u652f\u6301 HTTP \uff08\u4e0d\u9700\u8981\u8eab\u4efd\u8ba4\u8bc1\uff09\u6216 HTTPS\uff08\u9700\u9700\u8981\u914d\u7f6e\u8eab\u4efd\u8ba4\u8bc1\uff09 \u534f\u8bae\u3002 \u8fd9\u91cc\u9009\u62e9 HTTP \u534f\u8bae\u7684\u8def\u7531\u3002
                                                                            • \u8f6c\u53d1\u7b56\u7565 \uff1a\u9009\u586b\uff0c\u6307\u5b9a Ingress \u7684\u8bbf\u95ee\u7b56\u7565
                                                                            • \u8def\u5f84 \uff1a\u6307\u5b9a\u670d\u52a1\u8bbf\u95ee\u7684URL\u8def\u5f84\uff0c\u9ed8\u8ba4\u4e3a\u6839\u8def\u5f84
                                                                            • \u76ee\u6807\u670d\u52a1 \uff1a\u8fdb\u884c\u8def\u7531\u7684\u670d\u52a1\u540d\u79f0
                                                                            • \u76ee\u6807\u670d\u52a1\u7aef\u53e3 \uff1a\u670d\u52a1\u5bf9\u5916\u66b4\u9732\u7684\u7aef\u53e3
                                                                          • \u8d1f\u8f7d\u5747\u8861\u5668\u7c7b\u578b \uff1a\u5fc5\u586b\uff0cIngress \u5b9e\u4f8b\u7684\u4f7f\u7528\u8303\u56f4
                                                                            • \u5e73\u53f0\u7ea7\u8d1f\u8f7d\u5747\u8861\u5668 \uff1a\u540c\u4e00\u4e2a\u96c6\u7fa4\u5185\uff0c\u5171\u4eab\u540c\u4e00\u4e2a Ingress \u5b9e\u4f8b\uff0c\u5176\u4e2d Pod \u90fd\u53ef\u4ee5\u63a5\u6536\u5230\u7531\u8be5\u8d1f\u8f7d\u5747\u8861\u5206\u53d1\u7684\u8bf7\u6c42
                                                                            • \u79df\u6237\u7ea7\u8d1f\u8f7d\u5747\u8861\u5668 \uff1a\u79df\u6237\u8d1f\u8f7d\u5747\u8861\u5668\uff0cIngress \u5b9e\u4f8b\u72ec\u5c5e\u4e8e\u5f53\u524d\u547d\u540d\u7a7a\uff0c\u6216\u8005\u72ec\u5c5e\u4e8e\u67d0\u4e00\u5de5\u4f5c\u7a7a\u95f4\uff0c \u5e76\u4e14\u8bbe\u7f6e\u7684\u5de5\u4f5c\u7a7a\u95f4\u4e2d\u5305\u542b\u5f53\u524d\u547d\u540d\u7a7a\u95f4\uff0c\u5176\u4e2d Pod \u90fd\u53ef\u4ee5\u63a5\u6536\u5230\u7531\u8be5\u8d1f\u8f7d\u5747\u8861\u5206\u53d1\u7684\u8bf7\u6c42
                                                                          • Ingress Class \uff1a\u9009\u586b\uff0c\u9009\u62e9\u5bf9\u5e94\u7684 Ingress \u5b9e\u4f8b\uff0c\u9009\u62e9\u540e\u5c06\u6d41\u91cf\u5bfc\u5165\u5230\u6307\u5b9a\u7684 Ingress \u5b9e\u4f8b\u3002
                                                                            • \u4e3a None \u65f6\u4f7f\u7528\u9ed8\u8ba4\u7684 DefaultClass\uff0c\u8bf7\u5728\u521b\u5efa Ingress \u5b9e\u4f8b\u65f6\u8bbe\u7f6e DefaultClass\uff0c \u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003 Ingress Class
                                                                            • \u82e5\u9009\u62e9\u5176\u4ed6\u5b9e\u4f8b\uff08\u5982 ngnix \uff09\uff0c\u5219\u4f1a\u51fa\u73b0\u9ad8\u7ea7\u914d\u7f6e\uff0c\u53ef\u8bbe\u7f6e \u4f1a\u8bdd\u4fdd\u6301 \u3001 \u8def\u5f84\u91cd\u5199 \u3001 \u91cd\u5b9a\u5411 \u548c \u6d41\u91cf\u5206\u53d1 \u3002
                                                                          • \u4f1a\u8bdd\u4fdd\u6301 \uff1a\u9009\u586b\uff0c\u4f1a\u8bdd\u4fdd\u6301\u5206\u4e3a \u4e09\u79cd\u7c7b\u578b\uff1a L4 \u6e90\u5730\u5740\u54c8\u5e0c \u3001 Cookie Key \u3001 L7 Header Name \uff0c\u5f00\u542f\u540e\u6839\u636e\u5bf9\u5e94\u89c4\u5219\u8fdb\u884c\u4f1a\u8bdd\u4fdd\u6301\u3002
                                                                            • L4 \u6e90\u5730\u5740\u54c8\u5e0c \uff1a\u5f00\u542f\u540e\u9ed8\u8ba4\u5728 Annotation \u4e2d\u52a0\u5165\u5982\u4e0b\u6807\u7b7e\uff1a nginx.ingress.kubernetes.io/upstream-hash-by: \"$binary_remote_addr\"
                                                                            • Cookie Key \uff1a\u5f00\u542f\u540e\u6765\u81ea\u7279\u5b9a\u5ba2\u6237\u7aef\u7684\u8fde\u63a5\u5c06\u4f20\u9012\u81f3\u76f8\u540c Pod\uff0c\u5f00\u542f\u540e \u9ed8\u8ba4\u5728 Annotation \u4e2d\u589e\u52a0\u5982\u4e0b\u53c2\u6570\uff1a nginx.ingress.kubernetes.io/affinity: \"cookie\"\u3002nginx.ingress.kubernetes.io/affinity-mode: persistent
                                                                            • L7 Header Name \uff1a\u5f00\u542f\u540e\u9ed8\u8ba4\u5728 Annotation \u4e2d\u52a0\u5165\u5982\u4e0b\u6807\u7b7e\uff1a nginx.ingress.kubernetes.io/upstream-hash-by: \"$http_x_forwarded_for\"
                                                                          • \u8def\u5f84\u91cd\u5199 \uff1a\u9009\u586b\uff0c rewrite-target \uff0c\u67d0\u4e9b\u573a\u666f\u4e2d\u540e\u7aef\u670d\u52a1\u66b4\u9732\u7684URL\u4e0eIngress\u89c4\u5219\u4e2d\u6307\u5b9a\u7684\u8def\u5f84\u4e0d\u540c\uff0c\u5982\u679c\u4e0d\u8fdb\u884cURL\u91cd\u5199\u914d\u7f6e\uff0c\u8bbf\u95ee\u4f1a\u51fa\u73b0\u9519\u8bef\u3002
                                                                          • \u91cd\u5b9a\u5411 \uff1a\u9009\u586b\uff0c permanent-redirect \uff0c\u6c38\u4e45\u91cd\u5b9a\u5411\uff0c\u8f93\u5165\u91cd\u5199\u8def\u5f84\u540e\uff0c\u8bbf\u95ee\u8def\u5f84\u91cd\u5b9a\u5411\u81f3\u8bbe\u7f6e\u7684\u5730\u5740\u3002
                                                                          • \u6d41\u91cf\u5206\u53d1 \uff1a\u9009\u586b\uff0c\u5f00\u542f\u540e\u5e76\u8bbe\u7f6e\u540e\uff0c\u6839\u636e\u8bbe\u5b9a\u6761\u4ef6\u8fdb\u884c\u6d41\u91cf\u5206\u53d1\u3002
                                                                            • \u57fa\u4e8e\u6743\u91cd \uff1a\u8bbe\u5b9a\u6743\u91cd\u540e\uff0c\u5728\u521b\u5efa\u7684 Ingress \u6dfb\u52a0\u5982\u4e0b Annotation\uff1a nginx.ingress.kubernetes.io/canary-weight: \"10\"
                                                                            • \u57fa\u4e8e Cookie \uff1a\u8bbe\u5b9a Cookie \u89c4\u5219\u540e\uff0c\u6d41\u91cf\u6839\u636e\u8bbe\u5b9a\u7684 Cookie \u6761\u4ef6\u8fdb\u884c\u6d41\u91cf\u5206\u53d1
                                                                            • \u57fa\u4e8e Header \uff1a \u8bbe\u5b9a Header \u89c4\u5219\u540e\uff0c\u6d41\u91cf\u6839\u636e\u8bbe\u5b9a\u7684 Header \u6761\u4ef6\u8fdb\u884c\u6d41\u91cf\u5206\u53d1
                                                                          • \u6807\u7b7e \uff1a\u9009\u586b\uff0c\u4e3a\u8def\u7531\u6dfb\u52a0\u6807\u7b7e
                                                                          • \u6ce8\u89e3 \uff1a\u9009\u586b\uff0c\u4e3a\u8def\u7531\u6dfb\u52a0\u6ce8\u89e3
                                                                          "},{"location":"end-user/kpanda/network/create-ingress.html#https","title":"\u521b\u5efa HTTPS \u534f\u8bae\u8def\u7531","text":"

                                                                          \u8f93\u5165\u5982\u4e0b\u53c2\u6570\uff1a

                                                                          Note

                                                                          \u6ce8\u610f\uff1a\u4e0e HTTP \u534f\u8bae \u8bbe\u7f6e\u8def\u7531\u89c4\u5219 \u4e0d\u540c\uff0c\u589e\u52a0\u5bc6\u94a5\u9009\u62e9\u8bc1\u4e66\uff0c\u5176\u4ed6\u57fa\u672c\u4e00\u81f4\u3002

                                                                          • \u534f\u8bae \uff1a\u5fc5\u586b\u6307\u6388\u6743\u5165\u7ad9\u5230\u8fbe\u96c6\u7fa4\u670d\u52a1\u7684\u534f\u8bae\uff0c\u652f\u6301 HTTP \uff08\u4e0d\u9700\u8981\u8eab\u4efd\u8ba4\u8bc1\uff09\u6216 HTTPS\uff08\u9700\u9700\u8981\u914d\u7f6e\u8eab\u4efd\u8ba4\u8bc1\uff09 \u534f\u8bae\u3002\u8fd9\u91cc\u9009\u62e9 HTTPS \u534f\u8bae\u7684\u8def\u7531\u3002
                                                                          • \u5bc6\u94a5 \uff1a\u5fc5\u586b\uff0cHttps TLS \u8bc1\u4e66\uff0c\u521b\u5efa\u79d8\u94a5\u3002
                                                                          "},{"location":"end-user/kpanda/network/create-ingress.html#_3","title":"\u5b8c\u6210\u8def\u7531\u521b\u5efa","text":"

                                                                          \u914d\u7f6e\u5b8c\u6240\u6709\u53c2\u6570\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u81ea\u52a8\u8fd4\u56de\u8def\u7531\u5217\u8868\u3002\u5728\u5217\u8868\u53f3\u4fa7\uff0c\u70b9\u51fb \u2507 \uff0c\u53ef\u4ee5\u4fee\u6539\u6216\u5220\u9664\u6240\u9009\u8def\u7531\u3002

                                                                          "},{"location":"end-user/kpanda/network/create-services.html","title":"\u521b\u5efa\u670d\u52a1\uff08Service\uff09","text":"

                                                                          \u5728 Kubernetes \u96c6\u7fa4\u4e2d\uff0c\u6bcf\u4e2a Pod \u90fd\u6709\u4e00\u4e2a\u5185\u90e8\u72ec\u7acb\u7684 IP \u5730\u5740\uff0c\u4f46\u662f\u5de5\u4f5c\u8d1f\u8f7d\u4e2d\u7684 Pod \u53ef\u80fd\u4f1a\u88ab\u968f\u65f6\u521b\u5efa\u548c\u5220\u9664\uff0c\u76f4\u63a5\u4f7f\u7528 Pod IP \u5730\u5740\u5e76\u4e0d\u80fd\u5bf9\u5916\u63d0\u4f9b\u670d\u52a1\u3002

                                                                          \u8fd9\u5c31\u9700\u8981\u521b\u5efa\u670d\u52a1\uff0c\u901a\u8fc7\u670d\u52a1\u60a8\u4f1a\u83b7\u5f97\u4e00\u4e2a\u56fa\u5b9a\u7684 IP \u5730\u5740\uff0c\u4ece\u800c\u5b9e\u73b0\u5de5\u4f5c\u8d1f\u8f7d\u524d\u7aef\u548c\u540e\u7aef\u7684\u89e3\u8026\uff0c\u8ba9\u5916\u90e8\u7528\u6237\u80fd\u591f\u8bbf\u95ee\u670d\u52a1\u3002\u540c\u65f6\uff0c\u670d\u52a1\u8fd8\u63d0\u4f9b\u4e86\u8d1f\u8f7d\u5747\u8861\uff08LoadBalancer\uff09\u529f\u80fd\uff0c\u4f7f\u7528\u6237\u80fd\u4ece\u516c\u7f51\u8bbf\u95ee\u5230\u5de5\u4f5c\u8d1f\u8f7d\u3002

                                                                          "},{"location":"end-user/kpanda/network/create-services.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                                          • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u5c06\u7528\u6237\u6388\u6743\u4e3a NS Editor \u89d2\u8272 \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                                          • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                                                                          "},{"location":"end-user/kpanda/network/create-services.html#_2","title":"\u521b\u5efa\u670d\u52a1","text":"
                                                                          1. \u4ee5 NS Editor \u7528\u6237\u6210\u529f\u767b\u5f55\u540e\uff0c\u70b9\u51fb\u5de6\u4e0a\u89d2\u7684 \u96c6\u7fa4\u5217\u8868 \u8fdb\u5165 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u3002\u5728\u96c6\u7fa4\u5217\u8868\u4e2d\uff0c\u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\u3002

                                                                          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u4e2d\uff0c\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u670d\u52a1 \u8fdb\u5165\u670d\u52a1\u5217\u8868\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2 \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                                                                            Tip

                                                                            \u4e5f\u53ef\u4ee5\u901a\u8fc7 YAML \u521b\u5efa \u4e00\u4e2a\u670d\u52a1\u3002

                                                                          3. \u6253\u5f00 \u521b\u5efa\u670d\u52a1 \u9875\u9762\uff0c\u9009\u62e9\u4e00\u79cd\u8bbf\u95ee\u7c7b\u578b\uff0c\u53c2\u8003\u4ee5\u4e0b\u51e0\u4e2a\u53c2\u6570\u8868\u8fdb\u884c\u914d\u7f6e\u3002

                                                                          "},{"location":"end-user/kpanda/network/create-services.html#clusterip","title":"\u521b\u5efa ClusterIP \u670d\u52a1","text":"

                                                                          \u70b9\u9009 \u96c6\u7fa4\u5185\u8bbf\u95ee\uff08ClusterIP\uff09 \uff0c\u8fd9\u662f\u6307\u901a\u8fc7\u96c6\u7fa4\u7684\u5185\u90e8 IP \u66b4\u9732\u670d\u52a1\uff0c\u9009\u62e9\u6b64\u9879\u7684\u670d\u52a1\u53ea\u80fd\u5728\u96c6\u7fa4\u5185\u90e8\u8bbf\u95ee\u3002\u8fd9\u662f\u9ed8\u8ba4\u7684\u670d\u52a1\u7c7b\u578b\u3002\u53c2\u8003\u4e0b\u8868\u914d\u7f6e\u53c2\u6570\u3002

                                                                          \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8bbf\u95ee\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6307\u5b9a Pod \u670d\u52a1\u53d1\u73b0\u7684\u65b9\u5f0f\uff0c\u8fd9\u91cc\u9009\u62e9\u96c6\u7fa4\u5185\u8bbf\u95ee\uff08ClusterIP\uff09\u3002 ClusterIP \u670d\u52a1\u540d\u79f0 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u65b0\u5efa\u670d\u52a1\u7684\u540d\u79f0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 Svc-01 \u547d\u540d\u7a7a\u95f4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 default \u6807\u7b7e\u9009\u62e9\u5668 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6dfb\u52a0\u6807\u7b7e\uff0cService \u6839\u636e\u6807\u7b7e\u9009\u62e9 Pod\uff0c\u586b\u5199\u540e\u70b9\u51fb\u201c\u6dfb\u52a0\u201d\u3002\u4e5f\u53ef\u4ee5\u5f15\u7528\u5df2\u6709\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6807\u7b7e\uff0c\u70b9\u51fb \u5f15\u7528\u8d1f\u8f7d\u6807\u7b7e \uff0c\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\u9009\u62e9\u8d1f\u8f7d\uff0c\u7cfb\u7edf\u4f1a\u9ed8\u8ba4\u5c06\u6240\u9009\u7684\u8d1f\u8f7d\u6807\u7b7e\u4f5c\u4e3a\u9009\u62e9\u5668\u3002 app:job01 \u7aef\u53e3\u914d\u7f6e \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u534f\u8bae\u7aef\u53e3\uff0c\u9700\u8981\u5148\u9009\u62e9\u7aef\u53e3\u534f\u8bae\u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301 TCP\u3001UDP \u4e24\u79cd\u4f20\u8f93\u534f\u8bae\u3002\u7aef\u53e3\u540d\u79f0\uff1a\u8f93\u5165\u81ea\u5b9a\u4e49\u7684\u7aef\u53e3\u7684\u540d\u79f0\u3002\u670d\u52a1\u7aef\u53e3\uff08port\uff09\uff1aPod \u5bf9\u5916\u63d0\u4f9b\u670d\u52a1\u7684\u8bbf\u95ee\u7aef\u53e3\u3002\u5bb9\u5668\u7aef\u53e3\uff08targetport\uff09\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u9645\u76d1\u542c\u7684\u5bb9\u5668\u7aef\u53e3\uff0c\u7528\u6765\u5bf9\u96c6\u7fa4\u5185\u66b4\u9732\u670d\u52a1\u3002 \u4f1a\u8bdd\u4fdd\u6301 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5f00\u542f\u540e\uff0c\u76f8\u540c\u5ba2\u6237\u7aef\u7684\u8bf7\u6c42\u5c06\u8f6c\u53d1\u81f3\u540c\u4e00 Pod \u5f00\u542f \u4f1a\u8bdd\u4fdd\u6301\u6700\u5927\u65f6\u957f \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5f00\u542f\u4f1a\u8bdd\u4fdd\u6301\u540e\uff0c\u4fdd\u6301\u7684\u6700\u5927\u65f6\u957f\uff0c\u9ed8\u8ba4\u4e3a 30 \u79d2 30 \u79d2 \u6ce8\u89e3 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u6ce8\u89e3"},{"location":"end-user/kpanda/network/create-services.html#nodeport","title":"\u521b\u5efa NodePort \u670d\u52a1","text":"

                                                                          \u70b9\u9009 \u8282\u70b9\u8bbf\u95ee\uff08NodePort\uff09 \uff0c\u8fd9\u662f\u6307\u901a\u8fc7\u6bcf\u4e2a\u8282\u70b9\u4e0a\u7684 IP \u548c\u9759\u6001\u7aef\u53e3\uff08 NodePort \uff09\u66b4\u9732\u670d\u52a1\u3002 NodePort \u670d\u52a1\u4f1a\u8def\u7531\u5230\u81ea\u52a8\u521b\u5efa\u7684 ClusterIP \u670d\u52a1\u3002\u901a\u8fc7\u8bf7\u6c42 <\u8282\u70b9 IP>:<\u8282\u70b9\u7aef\u53e3> \uff0c\u60a8\u53ef\u4ee5\u4ece\u96c6\u7fa4\u7684\u5916\u90e8\u8bbf\u95ee\u4e00\u4e2a NodePort \u670d\u52a1\u3002\u53c2\u8003\u4e0b\u8868\u914d\u7f6e\u53c2\u6570\u3002

                                                                          \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8bbf\u95ee\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6307\u5b9a Pod \u670d\u52a1\u53d1\u73b0\u7684\u65b9\u5f0f\uff0c\u8fd9\u91cc\u9009\u62e9\u8282\u70b9\u8bbf\u95ee\uff08NodePort\uff09\u3002 NodePort \u670d\u52a1\u540d\u79f0 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u65b0\u5efa\u670d\u52a1\u7684\u540d\u79f0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 Svc-01 \u547d\u540d\u7a7a\u95f4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 default \u6807\u7b7e\u9009\u62e9\u5668 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6dfb\u52a0\u6807\u7b7e\uff0cService \u6839\u636e\u6807\u7b7e\u9009\u62e9 Pod\uff0c\u586b\u5199\u540e\u70b9\u51fb\u201c\u6dfb\u52a0\u201d\u3002\u4e5f\u53ef\u4ee5\u5f15\u7528\u5df2\u6709\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6807\u7b7e\uff0c\u70b9\u51fb \u5f15\u7528\u8d1f\u8f7d\u6807\u7b7e \uff0c\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\u9009\u62e9\u8d1f\u8f7d\uff0c\u7cfb\u7edf\u4f1a\u9ed8\u8ba4\u5c06\u6240\u9009\u7684\u8d1f\u8f7d\u6807\u7b7e\u4f5c\u4e3a\u9009\u62e9\u5668\u3002 \u7aef\u53e3\u914d\u7f6e \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u534f\u8bae\u7aef\u53e3\uff0c\u9700\u8981\u5148\u9009\u62e9\u7aef\u53e3\u534f\u8bae\u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301 TCP\u3001UDP \u4e24\u79cd\u4f20\u8f93\u534f\u8bae\u3002\u7aef\u53e3\u540d\u79f0\uff1a\u8f93\u5165\u81ea\u5b9a\u4e49\u7684\u7aef\u53e3\u7684\u540d\u79f0\u3002\u670d\u52a1\u7aef\u53e3\uff08port\uff09\uff1aPod \u5bf9\u5916\u63d0\u4f9b\u670d\u52a1\u7684\u8bbf\u95ee\u7aef\u53e3\u3002\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u4e3a\u4e86\u65b9\u4fbf\u8d77\u89c1\uff0c\u670d\u52a1\u7aef\u53e3\u88ab\u8bbe\u7f6e\u4e3a\u4e0e\u5bb9\u5668\u7aef\u53e3\u5b57\u6bb5\u76f8\u540c\u7684\u503c\u3002\u5bb9\u5668\u7aef\u53e3\uff08targetport\uff09\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u9645\u76d1\u542c\u7684\u5bb9\u5668\u7aef\u53e3\u3002\u8282\u70b9\u7aef\u53e3\uff08nodeport\uff09\uff1a\u8282\u70b9\u7684\u7aef\u53e3\uff0c\u63a5\u6536\u6765\u81ea ClusterIP \u4f20\u8f93\u7684\u6d41\u91cf\u3002\u7528\u6765\u505a\u5916\u90e8\u6d41\u91cf\u8bbf\u95ee\u7684\u5165\u53e3\u3002 \u4f1a\u8bdd\u4fdd\u6301 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5f00\u542f\u540e\uff0c\u76f8\u540c\u5ba2\u6237\u7aef\u7684\u8bf7\u6c42\u5c06\u8f6c\u53d1\u81f3\u540c\u4e00 Pod\u5f00\u542f\u540e Service \u7684 .spec.sessionAffinity \u4e3a ClientIP \uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\uff1aService \u7684\u4f1a\u8bdd\u4eb2\u548c\u6027 \u5f00\u542f \u4f1a\u8bdd\u4fdd\u6301\u6700\u5927\u65f6\u957f \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5f00\u542f\u4f1a\u8bdd\u4fdd\u6301\u540e\uff0c\u4fdd\u6301\u7684\u6700\u5927\u65f6\u957f\uff0c\u9ed8\u8ba4\u8d85\u65f6\u65f6\u957f\u4e3a 30 \u79d2.spec.sessionAffinityConfig.clientIP.timeoutSeconds \u9ed8\u8ba4\u8bbe\u7f6e\u4e3a 30 \u79d2 30 \u79d2 \u6ce8\u89e3 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u6ce8\u89e3"},{"location":"end-user/kpanda/network/create-services.html#loadbalancer","title":"\u521b\u5efa LoadBalancer \u670d\u52a1","text":"

                                                                          \u70b9\u9009 \u8d1f\u8f7d\u5747\u8861\uff08LoadBalancer\uff09 \uff0c\u8fd9\u662f\u6307\u4f7f\u7528\u4e91\u63d0\u4f9b\u5546\u7684\u8d1f\u8f7d\u5747\u8861\u5668\u5411\u5916\u90e8\u66b4\u9732\u670d\u52a1\u3002 \u5916\u90e8\u8d1f\u8f7d\u5747\u8861\u5668\u53ef\u4ee5\u5c06\u6d41\u91cf\u8def\u7531\u5230\u81ea\u52a8\u521b\u5efa\u7684 NodePort \u670d\u52a1\u548c ClusterIP \u670d\u52a1\u4e0a\u3002\u53c2\u8003\u4e0b\u8868\u914d\u7f6e\u53c2\u6570\u3002

                                                                          \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8bbf\u95ee\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6307\u5b9a Pod \u670d\u52a1\u53d1\u73b0\u7684\u65b9\u5f0f\uff0c\u8fd9\u91cc\u9009\u62e9\u8d1f\u8f7d\u5747\u8861\uff08LoadBalancer\uff09\u3002 LoadBalancer \u670d\u52a1\u540d\u79f0 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u65b0\u5efa\u670d\u52a1\u7684\u540d\u79f0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 Svc-01 \u547d\u540d\u7a7a\u95f4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 default \u5916\u90e8\u6d41\u91cf\u7b56\u7565 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8bbe\u7f6e\u5916\u90e8\u6d41\u91cf\u7b56\u7565\u3002Cluster\uff1a\u6d41\u91cf\u53ef\u4ee5\u8f6c\u53d1\u5230\u96c6\u7fa4\u4e2d\u6240\u6709\u8282\u70b9\u4e0a\u7684 Pod\u3002Local\uff1a\u6d41\u91cf\u53ea\u53d1\u7ed9\u672c\u8282\u70b9\u4e0a\u7684 Pod\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 \u6807\u7b7e\u9009\u62e9\u5668 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6dfb\u52a0\u6807\u7b7e\uff0cService \u6839\u636e\u6807\u7b7e\u9009\u62e9 Pod\uff0c\u586b\u5199\u540e\u70b9\u51fb\u201c\u6dfb\u52a0\u201d\u3002\u4e5f\u53ef\u4ee5\u5f15\u7528\u5df2\u6709\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6807\u7b7e\uff0c\u70b9\u51fb \u5f15\u7528\u8d1f\u8f7d\u6807\u7b7e \uff0c\u5728\u5f39\u51fa\u7684\u7a97\u53e3\u4e2d\u9009\u62e9\u8d1f\u8f7d\uff0c\u7cfb\u7edf\u4f1a\u9ed8\u8ba4\u5c06\u6240\u9009\u7684\u8d1f\u8f7d\u6807\u7b7e\u4f5c\u4e3a\u9009\u62e9\u5668\u3002 \u8d1f\u8f7d\u5747\u8861\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u4f7f\u7528\u7684\u8d1f\u8f7d\u5747\u8861\u7c7b\u578b\uff0c\u5f53\u524d\u652f\u6301 MetalLB \u548c\u5176\u4ed6\u3002 MetalLB IP \u6c60 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u7684 \u8d1f\u8f7d\u5747\u8861\u7c7b\u578b\u4e3a MetalLB \u65f6\uff0cLoadBalancer Service\u9ed8\u8ba4\u4f1a\u4ece\u8fd9\u4e2a\u6c60\u4e2d\u5206\u914d IP \u5730\u5740, \u5e76\u4e14\u901a\u8fc7 APR \u5ba3\u544a\u8fd9\u4e2a\u6c60\u4e2d\u7684\u6240\u6709 IP \u5730\u5740 \u8d1f\u8f7d\u5747\u8861\u5730\u5740 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u30111.\u5982\u4f7f\u7528\u7684\u662f\u516c\u6709\u4e91 CloudProvider\uff0c\u6b64\u5904\u586b\u5199\u7684\u4e3a\u4e91\u5382\u5546\u63d0\u4f9b\u7684\u8d1f\u8f7d\u5747\u8861\u5730\u5740\uff1b2.\u5982\u679c\u4e0a\u8ff0\u8d1f\u8f7d\u5747\u8861\u7c7b\u578b\u9009\u62e9\u4e3a MetalLB \uff0c\u9ed8\u8ba4\u4ece\u4e0a\u8ff0 IP \u6c60\u4e2d\u83b7\u53d6 IP \uff0c\u5982\u679c\u4e0d\u586b\u5219\u81ea\u52a8\u83b7\u53d6\u3002 \u7aef\u53e3\u914d\u7f6e \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u534f\u8bae\u7aef\u53e3\uff0c\u9700\u8981\u5148\u9009\u62e9\u7aef\u53e3\u534f\u8bae\u7c7b\u578b\uff0c\u76ee\u524d\u652f\u6301 TCP\u3001UDP \u4e24\u79cd\u4f20\u8f93\u534f\u8bae\u3002\u7aef\u53e3\u540d\u79f0\uff1a\u8f93\u5165\u81ea\u5b9a\u4e49\u7684\u7aef\u53e3\u7684\u540d\u79f0\u3002\u670d\u52a1\u7aef\u53e3\uff08port\uff09\uff1aPod \u5bf9\u5916\u63d0\u4f9b\u670d\u52a1\u7684\u8bbf\u95ee\u7aef\u53e3\u3002\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u4e3a\u4e86\u65b9\u4fbf\u8d77\u89c1\uff0c\u670d\u52a1\u7aef\u53e3\u88ab\u8bbe\u7f6e\u4e3a\u4e0e\u5bb9\u5668\u7aef\u53e3\u5b57\u6bb5\u76f8\u540c\u7684\u503c\u3002\u5bb9\u5668\u7aef\u53e3\uff08targetport\uff09\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u9645\u76d1\u542c\u7684\u5bb9\u5668\u7aef\u53e3\u3002\u8282\u70b9\u7aef\u53e3\uff08nodeport\uff09\uff1a\u8282\u70b9\u7684\u7aef\u53e3\uff0c\u63a5\u6536\u6765\u81ea ClusterIP \u4f20\u8f93\u7684\u6d41\u91cf\u3002\u7528\u6765\u505a\u5916\u90e8\u6d41\u91cf\u8bbf\u95ee\u7684\u5165\u53e3\u3002 \u6ce8\u89e3 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u4e3a\u670d\u52a1\u6dfb\u52a0\u6ce8\u89e3"},{"location":"end-user/kpanda/network/create-services.html#externalname","title":"\u521b\u5efa ExternalName \u670d\u52a1","text":"

                                                                          \u70b9\u9009 \u5916\u90e8\u670d\u52a1\uff08ExternalName\uff09 \uff0c\u8fd9\u662f\u6307\u901a\u8fc7\u5c06\u670d\u52a1\u6620\u5c04\u5230\u5916\u90e8\u57df\u540d\u6765\u66b4\u9732\u670d\u52a1\u3002\u9009\u62e9\u6b64\u9879\u7684\u670d\u52a1\u4e0d\u4f1a\u521b\u5efa\u5178\u578b\u7684 ClusterIP \u6216 NodePort\uff0c\u800c\u662f\u901a\u8fc7 DNS \u540d\u79f0\u89e3\u6790\u5c06\u8bf7\u6c42\u91cd\u5b9a\u5411\u5230\u5916\u90e8\u7684\u670d\u52a1\u5730\u5740\u3002\u53c2\u8003\u4e0b\u8868\u914d\u7f6e\u53c2\u6570\u3002

                                                                          \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8bbf\u95ee\u7c7b\u578b \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u6307\u5b9a Pod \u670d\u52a1\u53d1\u73b0\u7684\u65b9\u5f0f\uff0c\u8fd9\u91cc\u9009\u62e9\u5916\u90e8\u670d\u52a1\uff08ExternalName\uff09\u3002 ExternalName \u670d\u52a1\u540d\u79f0 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u65b0\u5efa\u670d\u52a1\u7684\u540d\u79f0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 Svc-01 \u547d\u540d\u7a7a\u95f4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u9009\u62e9\u65b0\u5efa\u670d\u52a1\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002\u5173\u4e8e\u547d\u540d\u7a7a\u95f4\u66f4\u591a\u4fe1\u606f\u8bf7\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6982\u8ff0\u3002\u3010\u6ce8\u610f\u3011\u8bf7\u8f93\u5165 4 \u5230 63 \u4e2a\u5b57\u7b26\u7684\u5b57\u7b26\u4e32\uff0c\u53ef\u4ee5\u5305\u542b\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u3001\u6570\u5b57\u548c\u4e2d\u5212\u7ebf\uff08-\uff09\uff0c\u5e76\u4ee5\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u5f00\u5934\uff0c\u5c0f\u5199\u82f1\u6587\u5b57\u6bcd\u6216\u6570\u5b57\u7ed3\u5c3e\u3002 default \u57df\u540d \u3010\u7c7b\u578b\u3011\u5fc5\u586b"},{"location":"end-user/kpanda/network/create-services.html#_3","title":"\u5b8c\u6210\u670d\u52a1\u521b\u5efa","text":"

                                                                          \u914d\u7f6e\u5b8c\u6240\u6709\u53c2\u6570\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u81ea\u52a8\u8fd4\u56de\u670d\u52a1\u5217\u8868\u3002\u5728\u5217\u8868\u53f3\u4fa7\uff0c\u70b9\u51fb \u2507 \uff0c\u53ef\u4ee5\u4fee\u6539\u6216\u5220\u9664\u6240\u9009\u670d\u52a1\u3002

                                                                          "},{"location":"end-user/kpanda/network/network-policy.html","title":"\u7f51\u7edc\u7b56\u7565","text":"

                                                                          \u7f51\u7edc\u7b56\u7565\uff08NetworkPolicy\uff09\u53ef\u4ee5\u5728 IP \u5730\u5740\u6216\u7aef\u53e3\u5c42\u9762\uff08OSI \u7b2c 3 \u5c42\u6216\u7b2c 4 \u5c42\uff09\u63a7\u5236\u7f51\u7edc\u6d41\u91cf\u3002\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u76ee\u524d\u652f\u6301\u521b\u5efa\u57fa\u4e8e Pod \u6216\u547d\u540d\u7a7a\u95f4\u7684\u7f51\u7edc\u7b56\u7565\uff0c\u652f\u6301\u901a\u8fc7\u6807\u7b7e\u9009\u62e9\u5668\u6765\u8bbe\u5b9a\u54ea\u4e9b\u6d41\u91cf\u53ef\u4ee5\u8fdb\u5165\u6216\u79bb\u5f00\u5e26\u6709\u7279\u5b9a\u6807\u7b7e\u7684 Pod\u3002

                                                                          \u6709\u5173\u7f51\u7edc\u7b56\u7565\u7684\u66f4\u591a\u8be6\u60c5\uff0c\u53ef\u53c2\u8003 Kubernetes \u5b98\u65b9\u6587\u6863\u7f51\u7edc\u7b56\u7565\u3002

                                                                          "},{"location":"end-user/kpanda/network/network-policy.html#_2","title":"\u521b\u5efa\u7f51\u7edc\u7b56\u7565","text":"

                                                                          \u76ee\u524d\u652f\u6301\u901a\u8fc7 YAML \u548c\u8868\u5355\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u7f51\u7edc\u7b56\u7565\uff0c\u8fd9\u4e24\u79cd\u65b9\u5f0f\u5404\u6709\u4f18\u52a3\uff0c\u53ef\u4ee5\u6ee1\u8db3\u4e0d\u540c\u7528\u6237\u7684\u4f7f\u7528\u9700\u6c42\u3002

                                                                          \u901a\u8fc7 YAML \u521b\u5efa\u6b65\u9aa4\u66f4\u5c11\u3001\u66f4\u9ad8\u6548\uff0c\u4f46\u95e8\u69db\u8981\u6c42\u8f83\u9ad8\uff0c\u9700\u8981\u719f\u6089\u7f51\u7edc\u7b56\u7565\u7684 YAML \u6587\u4ef6\u914d\u7f6e\u3002

                                                                          \u901a\u8fc7\u8868\u5355\u521b\u5efa\u66f4\u76f4\u89c2\u66f4\u7b80\u5355\uff0c\u6839\u636e\u63d0\u793a\u586b\u5199\u5bf9\u5e94\u7684\u503c\u5373\u53ef\uff0c\u4f46\u6b65\u9aa4\u66f4\u52a0\u7e41\u7410\u3002

                                                                          "},{"location":"end-user/kpanda/network/network-policy.html#yaml","title":"YAML \u521b\u5efa","text":"
                                                                          1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u7f51\u7edc\u7b56\u7565 -> YAML \u521b\u5efa \u3002

                                                                          2. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                                                          "},{"location":"end-user/kpanda/network/network-policy.html#_3","title":"\u8868\u5355\u521b\u5efa","text":"
                                                                          1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u7f51\u7edc\u7b56\u7565 -> \u521b\u5efa\u7b56\u7565 \u3002

                                                                          2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u3002

                                                                            \u540d\u79f0\u548c\u547d\u540d\u7a7a\u95f4\u5728\u521b\u5efa\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002

                                                                          3. \u586b\u5199\u7b56\u7565\u914d\u7f6e\u3002

                                                                            \u7b56\u7565\u914d\u7f6e\u5206\u4e3a\u5165\u6d41\u91cf\u7b56\u7565\u548c\u51fa\u6d41\u91cf\u7b56\u7565\u3002\u5982\u679c\u6e90 Pod \u60f3\u8981\u6210\u529f\u8fde\u63a5\u5230\u76ee\u6807 Pod\uff0c\u6e90 Pod \u7684\u51fa\u6d41\u91cf\u7b56\u7565\u548c\u76ee\u6807 Pod \u7684\u5165\u6d41\u91cf\u7b56\u7565\u90fd\u9700\u8981\u5141\u8bb8\u8fde\u63a5\u3002\u5982\u679c\u4efb\u4f55\u4e00\u65b9\u4e0d\u5141\u8bb8\u8fde\u63a5\uff0c\u90fd\u4f1a\u5bfc\u81f4\u8fde\u63a5\u5931\u8d25\u3002

                                                                            • \u5165\u6d41\u91cf\u7b56\u7565\uff1a\u70b9\u51fb \u2795 \u5f00\u59cb\u914d\u7f6e\u7b56\u7565\uff0c\u652f\u6301\u914d\u7f6e\u591a\u6761\u7b56\u7565\u3002\u591a\u6761\u7f51\u7edc\u7b56\u7565\u7684\u6548\u679c\u76f8\u4e92\u53e0\u52a0\uff0c\u53ea\u6709\u540c\u65f6\u6ee1\u8db3\u6240\u6709\u7f51\u7edc\u7b56\u7565\uff0c\u624d\u80fd\u6210\u529f\u5efa\u7acb\u8fde\u63a5\u3002

                                                                            • \u51fa\u6d41\u91cf\u7b56\u7565

                                                                          "},{"location":"end-user/kpanda/network/network-policy.html#_4","title":"\u67e5\u770b\u7f51\u7edc\u7b56\u7565","text":"
                                                                          1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u7f51\u7edc -> \u7f51\u7edc\u7b56\u7565 \uff0c\u70b9\u51fb\u7f51\u7edc\u7b56\u7565\u7684\u540d\u79f0\u3002

                                                                          2. \u67e5\u770b\u8be5\u7b56\u7565\u7684\u57fa\u672c\u914d\u7f6e\u3001\u5173\u8054\u5b9e\u4f8b\u4fe1\u606f\u3001\u5165\u6d41\u91cf\u7b56\u7565\u3001\u51fa\u6d41\u91cf\u7b56\u7565\u3002

                                                                          Info

                                                                          \u5728\u5173\u8054\u5b9e\u4f8b\u9875\u7b7e\u4e0b\uff0c\u652f\u6301\u67e5\u770b\u5b9e\u4f8b\u76d1\u63a7\u3001\u65e5\u5fd7\u3001\u5bb9\u5668\u5217\u8868\u3001YAML \u6587\u4ef6\u3001\u4e8b\u4ef6\u7b49\u3002

                                                                          "},{"location":"end-user/kpanda/network/network-policy.html#_5","title":"\u66f4\u65b0\u7f51\u7edc\u7b56\u7565","text":"

                                                                          \u6709\u4e24\u79cd\u9014\u5f84\u53ef\u4ee5\u66f4\u65b0\u7f51\u7edc\u7b56\u7565\u3002\u652f\u6301\u901a\u8fc7\u8868\u5355\u6216 YAML \u6587\u4ef6\u66f4\u65b0\u7f51\u7edc\u7b56\u7565\u3002

                                                                          • \u5728\u7f51\u7edc\u7b56\u7565\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u7b56\u7565\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                                                                          • \u70b9\u51fb\u7f51\u7edc\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u7f51\u7edc\u7b56\u7565\u7684\u8be6\u60c5\u9875\u9762\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                                                                          "},{"location":"end-user/kpanda/network/network-policy.html#_6","title":"\u5220\u9664\u7f51\u7edc\u7b56\u7565","text":"

                                                                          \u6709\u4e24\u79cd\u9014\u5f84\u53ef\u4ee5\u5220\u9664\u7f51\u7edc\u7b56\u7565\u3002\u652f\u6301\u901a\u8fc7\u8868\u5355\u6216 YAML \u6587\u4ef6\u66f4\u65b0\u7f51\u7edc\u7b56\u7565\u3002

                                                                          • \u5728\u7f51\u7edc\u7b56\u7565\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u7b56\u7565\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u5220\u9664\u3002

                                                                          • \u70b9\u51fb\u7f51\u7edc\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u7f51\u7edc\u7b56\u7565\u7684\u8be6\u60c5\u9875\u9762\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u5220\u9664\u3002

                                                                          "},{"location":"end-user/kpanda/nodes/add-node.html","title":"\u96c6\u7fa4\u8282\u70b9\u6269\u5bb9","text":"

                                                                          \u968f\u7740\u4e1a\u52a1\u5e94\u7528\u4e0d\u65ad\u589e\u957f\uff0c\u96c6\u7fa4\u8d44\u6e90\u65e5\u8d8b\u7d27\u5f20\uff0c\u8fd9\u65f6\u53ef\u4ee5\u57fa\u4e8e kubean \u5bf9\u96c6\u7fa4\u8282\u70b9\u8fdb\u884c\u6269\u5bb9\u3002\u6269\u5bb9\u540e\uff0c\u5e94\u7528\u53ef\u4ee5\u8fd0\u884c\u5728\u65b0\u589e\u7684\u8282\u70b9\u4e0a\uff0c\u7f13\u89e3\u8d44\u6e90\u538b\u529b\u3002

                                                                          \u53ea\u6709\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u521b\u5efa\u7684\u96c6\u7fa4\u624d\u652f\u6301\u8282\u70b9\u6269\u7f29\u5bb9\uff0c\u4ece\u5916\u90e8\u63a5\u5165\u7684\u96c6\u7fa4\u4e0d\u652f\u6301\u6b64\u64cd\u4f5c\u3002\u672c\u6587\u4e3b\u8981\u4ecb\u7ecd\u540c\u79cd\u67b6\u6784\u4e0b\u5de5\u4f5c\u96c6\u7fa4\u7684 \u5de5\u4f5c\u8282\u70b9 \u6269\u5bb9\u3002

                                                                          1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                                            \u82e5 \u96c6\u7fa4\u89d2\u8272 \u4e2d\u5e26\u6709 \u63a5\u5165\u96c6\u7fa4 \u7684\u6807\u7b7e\uff0c\u5219\u8bf4\u660e\u8be5\u96c6\u7fa4\u4e0d\u652f\u6301\u8282\u70b9\u6269\u7f29\u5bb9\u3002

                                                                          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u7136\u540e\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u70b9\u51fb \u63a5\u5165\u8282\u70b9 \u3002

                                                                          3. \u8f93\u5165\u4e3b\u673a\u540d\u79f0\u548c\u8282\u70b9 IP \u5e76\u70b9\u51fb \u786e\u5b9a \u3002

                                                                            \u70b9\u51fb \u2795 \u6dfb\u52a0\u5de5\u4f5c\u8282\u70b9 \u53ef\u4ee5\u7ee7\u7eed\u63a5\u5165\u66f4\u591a\u8282\u70b9\u3002

                                                                          Note

                                                                          \u63a5\u5165\u8282\u70b9\u5927\u7ea6\u9700\u8981 20 \u5206\u949f\uff0c\u8bf7\u60a8\u8010\u5fc3\u7b49\u5f85\u3002

                                                                          "},{"location":"end-user/kpanda/nodes/add-node.html#_2","title":"\u53c2\u8003\u6587\u6863","text":"
                                                                          • \u5bf9\u5de5\u4f5c\u96c6\u7fa4\u7684\u63a7\u5236\u8282\u70b9\u6269\u5bb9
                                                                          • \u4e3a\u5de5\u4f5c\u96c6\u7fa4\u6dfb\u52a0\u5f02\u6784\u8282\u70b9
                                                                          • \u4e3a\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u7684\u5de5\u4f5c\u8282\u70b9\u6269\u5bb9
                                                                          • \u66ff\u6362\u5de5\u4f5c\u96c6\u7fa4\u7684\u9996\u4e2a\u63a7\u5236\u8282\u70b9
                                                                          "},{"location":"end-user/kpanda/nodes/delete-node.html","title":"\u96c6\u7fa4\u8282\u70b9\u7f29\u5bb9","text":"

                                                                          \u5f53\u4e1a\u52a1\u9ad8\u5cf0\u671f\u7ed3\u675f\u4e4b\u540e\uff0c\u4e3a\u4e86\u8282\u7701\u8d44\u6e90\u6210\u672c\uff0c\u53ef\u4ee5\u7f29\u5c0f\u96c6\u7fa4\u89c4\u6a21\uff0c\u5378\u8f7d\u5197\u4f59\u7684\u8282\u70b9\uff0c\u5373\u8282\u70b9\u7f29\u5bb9\u3002\u8282\u70b9\u5378\u8f7d\u540e\uff0c\u5e94\u7528\u65e0\u6cd5\u7ee7\u7eed\u8fd0\u884c\u5728\u8be5\u8282\u70b9\u4e0a\u3002

                                                                          "},{"location":"end-user/kpanda/nodes/delete-node.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5177\u6709 Cluster Admin \u89d2\u8272\u6388\u6743 \u3002
                                                                          • \u53ea\u6709\u901a\u8fc7\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u521b\u5efa\u7684\u96c6\u7fa4\u624d\u652f\u6301\u8282\u70b9\u6269\u7f29\u5bb9\uff0c\u4ece\u5916\u90e8\u63a5\u5165\u7684\u96c6\u7fa4\u4e0d\u652f\u6301\u6b64\u64cd\u4f5c\u3002
                                                                          • \u5378\u8f7d\u8282\u70b9\u4e4b\u524d\uff0c\u9700\u8981\u6682\u505c\u8c03\u5ea6\u8be5\u8282\u70b9\uff0c\u5e76\u4e14\u5c06\u8be5\u8282\u70b9\u4e0a\u7684\u5e94\u7528\u90fd\u9a71\u9010\u81f3\u5176\u4ed6\u8282\u70b9\u3002
                                                                          • \u9a71\u9010\u65b9\u5f0f\uff1a\u767b\u5f55\u63a7\u5236\u5668\u8282\u70b9\uff0c\u901a\u8fc7 kubectl drain \u547d\u4ee4\u9a71\u9010\u8282\u70b9\u4e0a\u6240\u6709 Pod\u3002\u5b89\u5168\u9a71\u9010\u7684\u65b9\u5f0f\u53ef\u4ee5\u5141\u8bb8\u5bb9\u5668\u7ec4\u91cc\u9762\u7684\u5bb9\u5668\u4f18\u96c5\u5730\u4e2d\u6b62\u3002
                                                                          "},{"location":"end-user/kpanda/nodes/delete-node.html#_3","title":"\u6ce8\u610f\u4e8b\u9879","text":"
                                                                          1. \u96c6\u7fa4\u8282\u70b9\u7f29\u5bb9\u65f6\uff0c\u53ea\u80fd\u9010\u4e2a\u8fdb\u884c\u5378\u8f7d\uff0c\u65e0\u6cd5\u6279\u91cf\u5378\u8f7d\u3002

                                                                          2. \u5982\u9700\u5378\u8f7d\u96c6\u7fa4\u63a7\u5236\u5668\u8282\u70b9\uff0c\u9700\u8981\u786e\u4fdd\u6700\u7ec8\u63a7\u5236\u5668\u8282\u70b9\u6570\u4e3a \u5947\u6570\u3002

                                                                          3. \u96c6\u7fa4\u8282\u70b9\u7f29\u5bb9\u65f6\u4e0d\u53ef\u4e0b\u7ebf \u7b2c\u4e00\u4e2a\u63a7\u5236\u5668 \u8282\u70b9\u3002\u5982\u679c\u5fc5\u987b\u6267\u884c\u6b64\u64cd\u4f5c\uff0c\u8bf7\u8054\u7cfb\u552e\u540e\u5de5\u7a0b\u5e08\u3002

                                                                          "},{"location":"end-user/kpanda/nodes/delete-node.html#_4","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                                          1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                                            \u82e5 \u96c6\u7fa4\u89d2\u8272 \u4e2d\u5e26\u6709 \u63a5\u5165\u96c6\u7fa4 \u7684\u6807\u7b7e\uff0c\u5219\u8bf4\u660e\u8be5\u96c6\u7fa4\u4e0d\u652f\u6301\u8282\u70b9\u6269\u7f29\u5bb9\u3002

                                                                          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u627e\u5230\u9700\u8981\u5378\u8f7d\u7684\u8282\u70b9\uff0c\u70b9\u51fb \u2507 \u9009\u62e9 \u79fb\u9664\u8282\u70b9 \u3002

                                                                          3. \u8f93\u5165\u8282\u70b9\u540d\u79f0\uff0c\u5e76\u70b9\u51fb \u5220\u9664 \u8fdb\u884c\u786e\u8ba4\u3002

                                                                          "},{"location":"end-user/kpanda/nodes/labels-annotations.html","title":"\u6807\u7b7e\u4e0e\u6ce8\u89e3","text":"

                                                                          \u6807\u7b7e\uff08Labels\uff09\u662f\u4e3a Pod\u3001\u8282\u70b9\u3001\u96c6\u7fa4\u7b49 Kubernetes \u5bf9\u8c61\u6dfb\u52a0\u7684\u6807\u8bc6\u6027\u952e\u503c\u5bf9\uff0c\u53ef\u7ed3\u5408\u6807\u7b7e\u9009\u62e9\u5668\u67e5\u627e\u5e76\u7b5b\u9009\u6ee1\u8db3\u67d0\u4e9b\u6761\u4ef6\u7684 Kubernetes \u5bf9\u8c61\u3002\u6bcf\u4e2a\u952e\u5bf9\u4e8e\u7ed9\u5b9a\u5bf9\u8c61\u5fc5\u987b\u662f\u552f\u4e00\u7684\u3002

                                                                          \u6ce8\u89e3\uff08Annotations\uff09\u548c\u6807\u7b7e\u4e00\u6837\uff0c\u4e5f\u662f\u952e/\u503c\u5bf9\uff0c\u4f46\u4e0d\u5177\u5907\u6807\u8bc6\u6216\u7b5b\u9009\u529f\u80fd\u3002 \u4f7f\u7528\u6ce8\u89e3\u53ef\u4ee5\u4e3a\u8282\u70b9\u6dfb\u52a0\u4efb\u610f\u7684\u5143\u6570\u636e\u3002 \u6ce8\u89e3\u7684\u952e\u901a\u5e38\u4f7f\u7528\u7684\u683c\u5f0f\u4e3a \u524d\u7f00\uff08\u53ef\u9009\uff09/\u540d\u79f0\uff08\u5fc5\u586b\uff09 \uff0c\u4f8b\u5982 nfd.node.kubernetes.io/extended-resources \u3002 \u5982\u679c\u7701\u7565\u524d\u7f00\uff0c\u8868\u793a\u8be5\u6ce8\u89e3\u952e\u662f\u7528\u6237\u79c1\u6709\u7684\u3002

                                                                          \u6709\u5173\u6807\u7b7e\u548c\u6ce8\u89e3\u7684\u66f4\u591a\u4fe1\u606f\uff0c\u53ef\u53c2\u8003 Kubernetes \u7684\u5b98\u65b9\u6587\u6863\u6807\u7b7e\u548c\u9009\u62e9\u7b97\u7b26\u6216\u6ce8\u89e3\u3002

                                                                          \u6dfb\u52a0/\u5220\u9664\u6807\u7b7e\u4e0e\u6ce8\u89e3\u7684\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                                          1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                                          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u5728\u8282\u70b9\u53f3\u4fa7\u70b9\u51fb \u2507 \u64cd\u4f5c\u56fe\u6807\uff0c\u70b9\u51fb \u4fee\u6539\u6807\u7b7e \u6216 \u4fee\u6539\u6ce8\u89e3 \u3002

                                                                          3. \u70b9\u51fb \u2795 \u6dfb\u52a0 \u53ef\u4ee5\u6dfb\u52a0\u6807\u7b7e\u6216\u6ce8\u89e3\uff0c\u70b9\u51fb X \u53ef\u4ee5\u5220\u9664\u6807\u7b7e\u6216\u6ce8\u89e3\uff0c\u6700\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                                          "},{"location":"end-user/kpanda/nodes/node-authentication.html","title":"\u4f7f\u7528 SSH \u5bc6\u94a5\u8ba4\u8bc1\u8282\u70b9","text":"

                                                                          \u5982\u679c\u60a8\u9009\u62e9\u4f7f\u7528 SSH \u5bc6\u94a5\u4f5c\u4e3a\u5f85\u521b\u5efa\u96c6\u7fa4\u7684\u8282\u70b9\u8ba4\u8bc1\u65b9\u5f0f\uff0c\u60a8\u9700\u8981\u6309\u7167\u5982\u4e0b\u8bf4\u660e\u914d\u7f6e\u516c\u79c1\u94a5\u3002

                                                                          1. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5728 \u5f85\u5efa\u96c6\u7fa4\u7684\u7ba1\u7406\u96c6\u7fa4\u4e2d\u7684\u4efb\u610f\u8282\u70b9 \u4e0a\u751f\u6210\u516c\u79c1\u94a5\u3002

                                                                            cd /root/.ssh\nssh-keygen -t rsa\n
                                                                          2. \u6267\u884c ls \u547d\u4ee4\u67e5\u770b\u7ba1\u7406\u96c6\u7fa4\u4e0a\u7684\u5bc6\u94a5\u662f\u5426\u521b\u5efa\u6210\u529f\uff0c\u6b63\u786e\u53cd\u9988\u5982\u4e0b\uff1a

                                                                            ls\nid_rsa  id_rsa.pub  known_hosts\n

                                                                            \u5176\u4e2d\u540d\u4e3a id_rsa \u7684\u6587\u4ef6\u662f\u79c1\u94a5\uff0c\u540d\u4e3a id_rsa.pub \u7684\u6587\u4ef6\u662f\u516c\u94a5\u3002

                                                                          3. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u5206\u522b\u5c06\u516c\u94a5\u6587\u4ef6 id_rsa.pub \u52a0\u8f7d\u5230\u5f85\u521b\u5efa\u96c6\u7fa4\u7684\u6240\u6709\u8282\u70b9\u4e0a\u3002

                                                                            ssh-copy-id -i /root/.ssh/id_rsa.pub root@10.0.0.0\n

                                                                            \u5c06\u4e0a\u9762\u547d\u4ee4\u4e2d\u7684 root@10.0.0.0 \u7528\u6237\u8d26\u53f7\u548c\u8282\u70b9 IP \u66ff\u6362\u4e3a\u5f85\u521b\u5efa\u96c6\u7fa4\u7684\u8282\u70b9\u7528\u6237\u540d\u548c IP\u3002** \u9700\u8981\u5728\u5f85\u521b\u5efa\u96c6\u7fa4\u7684\u6bcf\u53f0\u8282\u70b9\u90fd\u6267\u884c\u76f8\u540c\u7684\u64cd\u4f5c **\u3002

                                                                          4. \u6267\u884c\u5982\u4e0b\u547d\u4ee4\uff0c\u67e5\u770b\u6b65\u9aa4 1 \u6240\u521b\u5efa\u7684\u79c1\u94a5\u6587\u4ef6 id_rsa \u3002

                                                                            cat /root/.ssh/id_rsa\n

                                                                            \u8f93\u51fa\u5982\u4e0b\u5185\u5bb9\uff1a

                                                                            -----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEA3UvyKINzY5BFuemQ+uJ6q+GqgfvnWwNC8HzZhpcMSjJy26MM\nUtBEBJxy8fMi57XcjYxPibXW/wnd+32ICCycqCwByUmuXeCC1cjlCQDqjcAvXae7\nY54IXGF7wm2IsMNwf0kjFEXjuS48FLDA0mGRaN3BG+Up5geXcHckg3K5LD8kXFFx\ndEmSIjdyw55NaUitmEdHzN7cIdfi6Z56jcV8dcFBgWKUx+ebiyPmZBkXToz6GnMF\nrswzzZCl+G6Jb2xTGy7g7ozb4BoZd1IpSD5EhDanRrESVE0C5YuJ5zUAC0CvVd1l\nv67AK8Ko6MXToHp01/bcsvlM6cqgwUFXZKVeOwIDAQABAoIBAQCO36GQlo3BEjxy\nM2HvGJmqrx+unDxafliRe4nVY2AD515Qf4xNSzke4QM1QoyenMOwf446krQkJPK0\nk+9nl6Xszby5gGCbK4BNFk8I6RaGPjZWeRx6zGUJf8avWJiPxx6yjz2esSC9RiR0\nF0nmiiefVMyAfgv2/5++dK2WUFNNRKLgSRRpP5bRaD5wMzzxtSSXrUon6217HO8p\n3RoWsI51MbVzhdVgpHUNABcoa0rpr9svT6XLKZxY8mxpKFYjM0Wv2JIDABg3kBvh\nQbJ7kStCO3naZjKMU9UuSqVJs06cflGYw7Or8/tABR3LErNQKPjkhAQqt0DXw7Iw\n3tKdTAJBAoGBAP687U7JAOqQkcphek2E/A/sbO/d37ix7Z3vNOy065STrA+ZWMZn\npZ6Ui1B/oJpoZssnfvIoz9sn559X0j67TljFALFd2ZGS0Fqh9KVCqDvfk+Vst1dq\n+3r/yZdTOyswoccxkJiC/GDwZGK0amJWqvob39JCZhDAKIGLbGMmjdAHAoGBAN5k\nm1WGnni1nZ+3dryIwgB6z1hWcnLTamzSET6KhSuo946ET0IRG9xtlheCx6dqICbr\nVk1Y4NtRZjK/p/YGx59rDWf7E3I8ZMgR7mjieOcUZ4lUlA4l7ZIlW/2WZHW+nUXO\nTi20fqJ8qSp4BUvOvuth1pz2GLUHe2/Fxjf7HIstAoGBAPHpPr9r+TfIlPsJeRj2\n6lzA3G8qWFRQfGRYjv0fjv0pA+RIb1rzgP/I90g5+63G6Z+R4WdcxI/OJJNY1iuG\nuw9n/pFxm7U4JC990BPE6nj5iLz+clpNGYckNDBF9VG9vFSrSDLdaYkxoVNvG/xJ\na9Na90H4lm7f3VewrPy310KvAoGAZr+mwNoEh5Kpc6xo8Gxi7aPP/mlaUVD6X7Ki\ngvmu02AqmC7rC4QqEiqTaONkaSXwGusqIWxJ3yp5hELmUBYLzszAEeV/s4zRp1oZ\ng133LBRSTbHFAdBmNdqK6Nu+KGRb92980UMOKvZbliKDl+W6cbfvVu+gtKrzTc3b\naevb4TUCgYEAnJAxyVYDP1nJf7bjBSHXQu1E/DMwbtrqw7dylRJ8cAzI7IxfSCez\n7BYWq41PqVd9/zrb3Pbh2phiVzKe783igAIMqummcjo/kZyCwFsYBzK77max1jF5\naPQsLbRS2aDz8kIH6jHPZ/R+15EROmdtLmA7vIJZGerWWQR0dUU+XXA=\n

                                                                            \u5c06\u79c1\u94a5\u5185\u5bb9\u590d\u5236\u540e\u586b\u81f3\u754c\u9762\u5bc6\u94a5\u8f93\u5165\u6846\u3002

                                                                          "},{"location":"end-user/kpanda/nodes/node-check.html","title":"\u521b\u5efa\u96c6\u7fa4\u8282\u70b9\u53ef\u7528\u6027\u68c0\u67e5","text":"

                                                                          \u5728\u521b\u5efa\u96c6\u7fa4\u6216\u4e3a\u5df2\u6709\u96c6\u7fa4\u6dfb\u52a0\u8282\u70b9\u65f6\uff0c\u8bf7\u53c2\u9605\u4e0b\u8868\uff0c\u68c0\u67e5\u8282\u70b9\u914d\u7f6e\uff0c\u4ee5\u907f\u514d\u56e0\u8282\u70b9\u914d\u7f6e\u9519\u8bef\u5bfc\u81f4\u96c6\u7fa4\u521b\u5efa\u6216\u6269\u5bb9\u5931\u8d25\u3002

                                                                          \u68c0\u67e5\u9879 \u63cf\u8ff0 \u64cd\u4f5c\u7cfb\u7edf \u53c2\u8003\u652f\u6301\u7684\u67b6\u6784\u53ca\u64cd\u4f5c\u7cfb\u7edf SELinux \u5173\u95ed \u9632\u706b\u5899 \u5173\u95ed \u67b6\u6784\u4e00\u81f4\u6027 \u8282\u70b9\u95f4 CPU \u67b6\u6784\u4e00\u81f4\uff08\u5982\u5747\u4e3a ARM \u6216 x86\uff09 \u4e3b\u673a\u65f6\u95f4 \u6240\u6709\u4e3b\u673a\u95f4\u540c\u6b65\u8bef\u5dee\u5c0f\u4e8e 10 \u79d2\u3002 \u7f51\u7edc\u8054\u901a\u6027 \u8282\u70b9\u53ca\u5176 SSH \u7aef\u53e3\u80fd\u591f\u6b63\u5e38\u88ab\u5e73\u53f0\u8bbf\u95ee\u3002 CPU \u53ef\u7528 CPU \u8d44\u6e90\u5927\u4e8e 4 Core \u5185\u5b58 \u53ef\u7528\u5185\u5b58\u8d44\u6e90\u5927\u4e8e 8 GB"},{"location":"end-user/kpanda/nodes/node-check.html#_2","title":"\u652f\u6301\u7684\u67b6\u6784\u53ca\u64cd\u4f5c\u7cfb\u7edf","text":"\u67b6\u6784 \u64cd\u4f5c\u7cfb\u7edf \u5907\u6ce8 ARM Kylin Linux Advanced Server release V10 (Sword) SP2 \u63a8\u8350 ARM UOS Linux ARM openEuler x86 CentOS 7.x \u63a8\u8350 x86 Redhat 7.x \u63a8\u8350 x86 Redhat 8.x \u63a8\u8350 x86 Flatcar Container Linux by Kinvolk x86 Debian Bullseye, Buster, Jessie, Stretch x86 Ubuntu 16.04, 18.04, 20.04, 22.04 x86 Fedora 35, 36 x86 Fedora CoreOS x86 openSUSE Leap 15.x/Tumbleweed x86 Oracle Linux 7, 8, 9 x86 Alma Linux 8, 9 x86 Rocky Linux 8, 9 x86 Amazon Linux 2 x86 Kylin Linux Advanced Server release V10 (Sword) - SP2 \u6d77\u5149 x86 UOS Linux x86 openEuler"},{"location":"end-user/kpanda/nodes/node-details.html","title":"\u8282\u70b9\u8be6\u60c5","text":"

                                                                          \u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4\u4e4b\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u96c6\u7fa4\u4e2d\u5404\u4e2a\u8282\u70b9\u7684\u4fe1\u606f\uff0c\u5305\u62ec\u8282\u70b9\u72b6\u6001\u3001\u6807\u7b7e\u3001\u8d44\u6e90\u7528\u91cf\u3001Pod\u3001\u76d1\u63a7\u4fe1\u606f\u7b49\u3002

                                                                          1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                                          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u53ef\u4ee5\u67e5\u770b\u8282\u70b9\u72b6\u6001\u3001\u89d2\u8272\u3001\u6807\u7b7e\u3001CPU/\u5185\u5b58\u4f7f\u7528\u60c5\u51b5\u3001IP \u5730\u5740\u3001\u521b\u5efa\u65f6\u95f4\u3002

                                                                          3. \u70b9\u51fb\u8282\u70b9\u540d\u79f0\uff0c\u53ef\u4ee5\u8fdb\u5165\u8282\u70b9\u8be6\u60c5\u9875\u9762\u67e5\u770b\u66f4\u591a\u4fe1\u606f\uff0c\u5305\u62ec\u6982\u89c8\u4fe1\u606f\u3001\u5bb9\u5668\u7ec4\u4fe1\u606f\u3001\u6807\u7b7e\u6ce8\u89e3\u4fe1\u606f\u3001\u4e8b\u4ef6\u5217\u8868\u3001\u72b6\u6001\u7b49\u3002

                                                                            \u6b64\u5916\uff0c\u8fd8\u53ef\u4ee5\u67e5\u770b\u8282\u70b9\u7684 YAML \u6587\u4ef6\u3001\u76d1\u63a7\u4fe1\u606f\u3001\u6807\u7b7e\u548c\u6ce8\u89e3\u7b49\u3002

                                                                          "},{"location":"end-user/kpanda/nodes/schedule.html","title":"\u8282\u70b9\u8c03\u5ea6","text":"

                                                                          \u652f\u6301\u5c06\u8282\u70b9\u6682\u505c\u8c03\u5ea6\u6216\u6062\u590d\u8c03\u5ea6\u3002\u6682\u505c\u8c03\u5ea6\u6307\uff0c\u505c\u6b62\u5c06 Pod \u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002\u6062\u590d\u8c03\u5ea6\u6307\uff0c\u53ef\u4ee5\u5c06 Pod \u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002

                                                                          1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                                          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u5728\u8282\u70b9\u53f3\u4fa7\u70b9\u51fb \u2507 \u64cd\u4f5c\u56fe\u6807\uff0c\u70b9\u51fb \u6682\u505c\u8c03\u5ea6 \u6309\u94ae\u5373\u53ef\u6682\u505c\u8c03\u5ea6\u8be5\u8282\u70b9\u3002

                                                                          3. \u5728\u8282\u70b9\u53f3\u4fa7\u70b9\u51fb \u2507 \u64cd\u4f5c\u56fe\u6807\uff0c\u70b9\u51fb \u6062\u590d\u8c03\u5ea6 \u6309\u94ae\u5373\u53ef\u6062\u590d\u8c03\u5ea6\u8be5\u8282\u70b9\u3002

                                                                          \u8282\u70b9\u8c03\u5ea6\u72b6\u6001\u53ef\u80fd\u56e0\u7f51\u7edc\u60c5\u51b5\u6709\u6240\u5ef6\u8fdf\uff0c\u70b9\u51fb\u641c\u7d22\u6846\u53f3\u4fa7\u7684\u5237\u65b0\u56fe\u6807\u53ef\u4ee5\u5237\u65b0\u8282\u70b9\u8c03\u5ea6\u72b6\u6001\u3002

                                                                          "},{"location":"end-user/kpanda/nodes/taints.html","title":"\u8282\u70b9\u6c61\u70b9\u7ba1\u7406","text":"

                                                                          \u6c61\u70b9 (Taint) \u80fd\u591f\u4f7f\u8282\u70b9\u6392\u65a5\u67d0\u4e00\u7c7b Pod\uff0c\u907f\u514d Pod \u88ab\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u4e0a\u3002 \u6bcf\u4e2a\u8282\u70b9\u4e0a\u53ef\u4ee5\u5e94\u7528\u4e00\u4e2a\u6216\u591a\u4e2a\u6c61\u70b9\uff0c\u4e0d\u80fd\u5bb9\u5fcd\u8fd9\u4e9b\u6c61\u70b9\u7684 Pod \u5219\u4e0d\u4f1a\u88ab\u8c03\u5ea6\u8be5\u8282\u70b9\u4e0a\u3002

                                                                          "},{"location":"end-user/kpanda/nodes/taints.html#_2","title":"\u6ce8\u610f\u4e8b\u9879","text":"
                                                                          1. \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u5907 NS Editor \u89d2\u8272\u6388\u6743\u6216\u5176\u4ed6\u66f4\u9ad8\u6743\u9650\u3002
                                                                          2. \u4e3a\u8282\u70b9\u6dfb\u52a0\u6c61\u70b9\u4e4b\u540e\uff0c\u53ea\u6709\u80fd\u5bb9\u5fcd\u8be5\u6c61\u70b9\u7684 Pod \u624d\u80fd\u88ab\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002
                                                                          "},{"location":"end-user/kpanda/nodes/taints.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                                          1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u627e\u5230\u76ee\u6807\u96c6\u7fa4\uff0c\u70b9\u51fb\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u6982\u89c8 \u9875\u9762\u3002

                                                                          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\uff0c\u70b9\u51fb \u8282\u70b9\u7ba1\u7406 \uff0c\u627e\u5230\u9700\u8981\u4fee\u6539\u6c61\u70b9\u7684\u8282\u70b9\uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u56fe\u6807\u5e76\u70b9\u51fb \u4fee\u6539\u6c61\u70b9 \u6309\u94ae\u3002

                                                                          3. \u5728\u5f39\u6846\u5185\u8f93\u5165\u6c61\u70b9\u7684\u952e\u503c\u4fe1\u606f\uff0c\u9009\u62e9\u6c61\u70b9\u6548\u679c\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                                            \u70b9\u51fb \u2795 \u6dfb\u52a0 \u53ef\u4ee5\u4e3a\u8282\u70b9\u6dfb\u52a0\u591a\u4e2a\u6c61\u70b9\uff0c\u70b9\u51fb\u6c61\u70b9\u6548\u679c\u53f3\u4fa7\u7684 X \u53ef\u4ee5\u5220\u9664\u6c61\u70b9\u3002

                                                                            \u76ee\u524d\u652f\u6301\u4e09\u79cd\u6c61\u70b9\u6548\u679c\uff1a

                                                                            • NoSchedule\uff1a\u65b0\u7684 Pod \u4e0d\u4f1a\u88ab\u8c03\u5ea6\u5230\u5e26\u6709\u6b64\u6c61\u70b9\u7684\u8282\u70b9\u4e0a\uff0c\u9664\u975e\u65b0\u7684 Pod \u5177\u6709\u76f8\u5339\u914d\u7684\u5bb9\u5fcd\u5ea6\u3002\u5f53\u524d\u6b63\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u4e0d\u4f1a \u88ab\u9a71\u9010\u3002
                                                                            • NoExecute\uff1a\u8fd9\u4f1a\u5f71\u54cd\u5df2\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod\uff1a
                                                                              • \u5982\u679c Pod \u4e0d\u80fd\u5bb9\u5fcd\u6b64\u6c61\u70b9\uff0c\u4f1a\u9a6c\u4e0a\u88ab\u9a71\u9010\u3002
                                                                              • \u5982\u679c Pod \u80fd\u591f\u5bb9\u5fcd\u6b64\u6c61\u70b9\uff0c\u4f46\u662f\u5728\u5bb9\u5fcd\u5ea6\u5b9a\u4e49\u4e2d\u6ca1\u6709\u6307\u5b9a tolerationSeconds\uff0c\u5219 Pod \u8fd8\u4f1a\u4e00\u76f4\u5728\u8fd9\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u3002
                                                                              • \u5982\u679c Pod \u80fd\u591f\u5bb9\u5fcd\u6b64\u6c61\u70b9\u800c\u4e14\u6307\u5b9a\u4e86 tolerationSeconds\uff0c\u5219 Pod \u8fd8\u80fd\u5728\u8fd9\u4e2a\u8282\u70b9\u4e0a\u7ee7\u7eed\u8fd0\u884c\u6307\u5b9a\u7684\u65f6\u957f\u3002\u8fd9\u6bb5\u65f6\u95f4\u8fc7\u53bb\u540e\uff0c\u518d\u4ece\u8282\u70b9\u4e0a\u9a71\u9664\u8fd9\u4e9b Pod\u3002
                                                                            • PreferNoSchedule\uff1a\u8fd9\u662f\u201c\u8f6f\u6027\u201d\u7684 NoSchedule\u3002\u63a7\u5236\u5e73\u9762\u5c06**\u5c1d\u8bd5**\u907f\u514d\u5c06\u4e0d\u5bb9\u5fcd\u6b64\u6c61\u70b9\u7684 Pod \u8c03\u5ea6\u5230\u8282\u70b9\u4e0a\uff0c\u4f46\u4e0d\u80fd\u4fdd\u8bc1\u5b8c\u5168\u907f\u514d\u3002\u6240\u4ee5\u8981\u5c3d\u91cf\u907f\u514d\u4f7f\u7528\u6b64\u6c61\u70b9\u3002

                                                                          \u6709\u5173\u6c61\u70b9\u7684\u66f4\u591a\u8be6\u60c5\uff0c\u8bf7\u53c2\u9605 Kubernetes \u5b98\u65b9\u6587\u6863\uff1a\u6c61\u70b9\u548c\u5bb9\u5fcd\u5ea6\u3002

                                                                          "},{"location":"end-user/kpanda/olm/import-miniooperator.html","title":"\u5bfc\u5165\u79bb\u7ebf MinIo Operator","text":"

                                                                          \u672c\u6587\u5c06\u4ecb\u7ecd\u5728\u79bb\u7ebf\u73af\u5883\u4e0b\u5982\u4f55\u5bfc\u5165 MinIo Operator\u3002

                                                                          "},{"location":"end-user/kpanda/olm/import-miniooperator.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          • \u5f53\u524d\u96c6\u7fa4\u5df2\u63a5\u5165\u5bb9\u5668\u7ba1\u7406\u4e14\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u5df2\u7ecf\u5b89\u88c5 kolm \u7ec4\u4ef6\uff08helm \u6a21\u677f\u641c\u7d22 kolm\uff09
                                                                          • \u5f53\u524d\u96c6\u7fa4\u5df2\u7ecf\u5b89\u88c5 olm \u7ec4\u4ef6\u4e14\u7248\u672c >= 0.2.4 (helm \u6a21\u677f\u641c\u7d22 olm)
                                                                          • \u652f\u6301\u6267\u884c Docker \u547d\u4ee4
                                                                          • \u51c6\u5907\u4e00\u4e2a\u955c\u50cf\u4ed3\u5e93
                                                                          "},{"location":"end-user/kpanda/olm/import-miniooperator.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                                          1. \u5728\u6267\u884c\u73af\u5883\u4e2d\u8bbe\u7f6e\u73af\u5883\u53d8\u91cf\u5e76\u5728\u540e\u7eed\u6b65\u9aa4\u4f7f\u7528\uff0c\u6267\u884c\u547d\u4ee4\uff1a

                                                                            export OPM_IMG=10.5.14.200/quay.m.daocloud.io/operator-framework/opm:v1.29.0 \nexport BUNDLE_IMG=10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3 \n

                                                                            \u5982\u4f55\u83b7\u53d6\u4e0a\u8ff0\u955c\u50cf\u5730\u5740\uff1a

                                                                            \u524d\u5f80 \u5bb9\u5668\u7ba1\u7406 -> \u9009\u62e9\u5f53\u524d\u96c6\u7fa4 -> helm \u5e94\u7528 -> \u67e5\u770b olm \u7ec4\u4ef6 -> \u63d2\u4ef6\u8bbe\u7f6e \uff0c\u627e\u5230\u540e\u7eed\u6b65\u9aa4\u6240\u9700 opm\uff0cminio\uff0cminio bundle\uff0cminio operator \u7684\u955c\u50cf\u3002

                                                                            \u4ee5\u4e0a\u8bc9\u622a\u56fe\u4e3a\u4f8b\uff0c\u5219\u56db\u4e2a\u955c\u50cf\u5730\u5740\u5982\u4e0b\n\n# opm \u955c\u50cf \n10.5.14.200/quay.m.daocloud.io/operator-framework/opm:v1.29.0\n\n# minio \u955c\u50cf\n10.5.14.200/quay.m.daocloud.io/minio/minio:RELEASE.2023-03-24T21-41-23Z\n\n# minio bundle \u955c\u50cf\n10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3\n\n# minio operator \u955c\u50cf \n10.5.14.200/quay.m.daocloud.io/minio/operator:v5.0.3\n
                                                                          2. \u6267\u884c opm \u547d\u4ee4\u83b7\u53d6\u79bb\u7ebf bundle \u955c\u50cf\u5305\u542b\u7684 operator\u3002

                                                                            # \u521b\u5efa operator \u5b58\u653e\u76ee\u5f55\n$ mkdir minio-operator && cd minio-operator \n\n# \u83b7\u53d6 operator yaml \n$ docker run --user root  -v $PWD/minio-operator:/minio-operator ${OPM_IMG} alpha bundle unpack --skip-tls-verify -v -d ${BUNDLE_IMG} -o ./minio-operator\n\n# \u9884\u671f\u7ed3\u679c\n.\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n3 directories, 9 files\n
                                                                          3. \u66ff\u6362\u00a0 minio-operator/manifests/minio-operator.clusterserviceversion.yaml\u00a0 \u6587\u4ef6\u4e2d\u7684\u6240\u6709\u955c\u50cf\u5730\u5740\u4e3a\u79bb\u7ebf\u955c\u50cf\u4ed3\u5e93\u5730\u5740\u955c\u50cf\u3002

                                                                            \u66ff\u6362\u524d\uff1a

                                                                            \u66ff\u6362\u540e\uff1a

                                                                          4. \u751f\u6210\u6784\u5efa bundle \u955c\u50cf\u7684 Dockerfile

                                                                            $ docker run --user root  -v $PWD:/minio-operator -w /minio-operator ${OPM_IMG} alpha bundle generate --channels stable,beta -d /minio-operator/minio-operator/manifests -e stable -p minio-operator \u00a0\n\n# \u9884\u671f\u7ed3\u679c\n.\n\u251c\u2500\u2500 bundle.Dockerfile\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n3 directories, 10 files\n
                                                                          5. \u6267\u884c\u6784\u5efa\u547d\u4ee4\uff0c\u6784\u5efa bundle \u955c\u50cf\u4e14\u63a8\u9001\u5230\u79bb\u7ebf registry\u3002

                                                                            # \u8bbe\u7f6e\u65b0\u7684 bundle image \nexport OFFLINE_BUNDLE_IMG=10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3-offline \n\n$ docker build . -f bundle.Dockerfile -t ${OFFLINE_BUNDLE_IMG} \u00a0\n\n$ docker push ${OFFLINE_BUNDLE_IMG}\n
                                                                          6. \u751f\u6210\u6784\u5efa catalog \u955c\u50cf\u7684 Dockerfile\u3002

                                                                            $ docker run --user root  -v $PWD:/minio-operator  -w /minio-operator ${OPM_IMG} index add  --bundles ${OFFLINE_BUNDLE_IMG} --generate --binary-image ${OPM_IMG} --skip-tls-verify\n\n# \u9884\u671f\u7ed3\u679c\n.\n\u251c\u2500\u2500 bundle.Dockerfile\n\u251c\u2500\u2500 database\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 index.db\n\u251c\u2500\u2500 index.Dockerfile\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502\u00a0\u00a0 \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502\u00a0\u00a0 \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n4 directories, 12 files\n
                                                                          7. \u6784\u5efa catalog \u955c\u50cf

                                                                            # \u8bbe\u7f6e\u65b0\u7684 catalog image  \nexport OFFLINE_CATALOG_IMG=10.5.14.200/release.daocloud.io/operator-framework/system-operator-index:v0.1.0-offline\n\n$ docker build . -f index.Dockerfile -t ${OFFLINE_CATALOG_IMG}  \n\n$ docker push ${OFFLINE_CATALOG_IMG}\n
                                                                          8. \u524d\u5f80\u5bb9\u5668\u7ba1\u7406\uff0c\u66f4\u65b0 helm \u5e94\u7528 olm \u7684\u5185\u7f6e catsrc \u955c\u50cf\uff08\u586b\u5199\u6784\u5efa catalog \u955c\u50cf\u6307\u5b9a\u7684 ${catalog-image} \u5373\u53ef\uff09

                                                                          9. \u66f4\u65b0\u6210\u529f\u540e\uff0cOperator Hub \u4e2d\u4f1a\u51fa\u73b0 minio-operator \u7ec4\u4ef6

                                                                          "},{"location":"end-user/kpanda/permissions/cluster-ns-auth.html","title":"\u96c6\u7fa4\u548c\u547d\u540d\u7a7a\u95f4\u6388\u6743","text":"

                                                                          \u5bb9\u5668\u7ba1\u7406\u57fa\u4e8e\u5168\u5c40\u6743\u9650\u7ba1\u7406\u53ca\u5168\u5c40\u7528\u6237/\u7528\u6237\u7ec4\u7ba1\u7406\u5b9e\u73b0\u6388\u6743\uff0c\u5982\u9700\u4e3a\u7528\u6237\u6388\u4e88\u5bb9\u5668\u7ba1\u7406\u7684\u6700\u9ad8\u6743\u9650\uff08\u53ef\u4ee5\u521b\u5efa\u3001\u7ba1\u7406\u3001\u5220\u9664\u6240\u6709\u96c6\u7fa4\uff09\u3002

                                                                          "},{"location":"end-user/kpanda/permissions/cluster-ns-auth.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                                          \u7ed9\u7528\u6237/\u7528\u6237\u7ec4\u6388\u6743\u4e4b\u524d\uff0c\u8bf7\u5b8c\u6210\u5982\u4e0b\u51c6\u5907\uff1a

                                                                          • \u5df2\u5728\u5168\u5c40\u7ba1\u7406\u4e2d\u521b\u5efa\u4e86\u5f85\u6388\u6743\u7684\u7528\u6237/\u7528\u6237\u7ec4\uff0c\u8bf7\u53c2\u8003\u7528\u6237\u3002

                                                                          • \u4ec5 Kpanda Owner\u53ca\u5f53\u524d\u96c6\u7fa4\u7684 Cluster Admin \u5177\u5907\u96c6\u7fa4\u6388\u6743\u80fd\u529b\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u6743\u9650\u8bf4\u660e\u3002

                                                                          • \u4ec5 Kpanda Owner\u3001\u5f53\u524d\u96c6\u7fa4\u7684 Cluster Admin\uff0c\u5f53\u524d\u547d\u540d\u7a7a\u95f4\u7684 NS Admin \u5177\u5907\u547d\u540d\u7a7a\u95f4\u6388\u6743\u80fd\u529b\u3002

                                                                          "},{"location":"end-user/kpanda/permissions/cluster-ns-auth.html#_3","title":"\u96c6\u7fa4\u6388\u6743","text":"
                                                                          1. \u7528\u6237\u767b\u5f55\u5e73\u53f0\u540e\uff0c\u70b9\u51fb\u5de6\u4fa7\u83dc\u5355\u680f \u5bb9\u5668\u7ba1\u7406 \u4e0b\u7684 \u6743\u9650\u7ba1\u7406 \uff0c\u9ed8\u8ba4\u4f4d\u4e8e \u96c6\u7fa4\u6743\u9650 \u9875\u7b7e\u3002

                                                                          2. \u70b9\u51fb \u6dfb\u52a0\u6388\u6743 \u6309\u94ae\u3002

                                                                          3. \u5728 \u6dfb\u52a0\u96c6\u7fa4\u6743\u9650 \u9875\u9762\u4e2d\uff0c\u9009\u62e9\u76ee\u6807\u96c6\u7fa4\u3001\u5f85\u6388\u6743\u7684\u7528\u6237/\u7528\u6237\u7ec4\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                                            \u76ee\u524d\u4ec5\u652f\u6301\u7684\u96c6\u7fa4\u89d2\u8272\u4e3a Cluster Admin \uff0c\u8be6\u60c5\u6743\u9650\u53ef\u53c2\u8003\u6743\u9650\u8bf4\u660e\u3002\u5982\u9700\u8981\u7ed9\u591a\u4e2a\u7528\u6237/\u7528\u6237\u7ec4\u540c\u65f6\u8fdb\u884c\u6388\u6743\uff0c \u53ef\u70b9\u51fb \u6dfb\u52a0\u7528\u6237\u6743\u9650 \u8fdb\u884c\u591a\u6b21\u6dfb\u52a0\u3002

                                                                          4. \u8fd4\u56de\u96c6\u7fa4\u6743\u9650\u7ba1\u7406\u9875\u9762\uff0c\u5c4f\u5e55\u51fa\u73b0\u6d88\u606f\uff1a \u6dfb\u52a0\u96c6\u7fa4\u6743\u9650\u6210\u529f \u3002

                                                                          "},{"location":"end-user/kpanda/permissions/cluster-ns-auth.html#_4","title":"\u547d\u540d\u7a7a\u95f4\u6388\u6743","text":"
                                                                          1. \u7528\u6237\u767b\u5f55\u5e73\u53f0\u540e\uff0c\u70b9\u51fb\u5de6\u4fa7\u83dc\u5355\u680f \u5bb9\u5668\u7ba1\u7406 \u4e0b\u7684 \u6743\u9650\u7ba1\u7406 \uff0c\u70b9\u51fb \u547d\u540d\u7a7a\u95f4\u6743\u9650 \u9875\u7b7e\u3002

                                                                          2. \u70b9\u51fb \u6dfb\u52a0\u6388\u6743 \u6309\u94ae\u3002\u5728 \u6dfb\u52a0\u547d\u540d\u7a7a\u95f4\u6743\u9650 \u9875\u9762\u4e2d\uff0c\u9009\u62e9\u76ee\u6807\u96c6\u7fa4\u3001\u76ee\u6807\u547d\u540d\u7a7a\u95f4\uff0c\u4ee5\u53ca\u5f85\u6388\u6743\u7684\u7528\u6237/\u7528\u6237\u7ec4\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                                            \u76ee\u524d\u652f\u6301\u7684\u547d\u540d\u7a7a\u95f4\u89d2\u8272\u4e3a NS Admin\u3001NS Editor\u3001NS Viewer\uff0c\u8be6\u60c5\u6743\u9650\u53ef\u53c2\u8003\u6743\u9650\u8bf4\u660e\u3002\u5982\u9700\u7ed9\u591a\u4e2a\u7528\u6237/\u7528\u6237\u7ec4\u540c\u65f6\u8fdb\u884c\u6388\u6743\uff0c\u53ef\u70b9\u51fb \u6dfb\u52a0\u7528\u6237\u6743\u9650 \u8fdb\u884c\u591a\u6b21\u6dfb\u52a0\u3002\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u6743\u9650\u6388\u6743\u3002

                                                                          3. \u8fd4\u56de\u547d\u540d\u7a7a\u95f4\u6743\u9650\u7ba1\u7406\u9875\u9762\uff0c\u5c4f\u5e55\u51fa\u73b0\u6d88\u606f\uff1a \u6dfb\u52a0\u96c6\u7fa4\u6743\u9650\u6210\u529f \u3002

                                                                            Tip

                                                                            \u540e\u7eed\u5982\u9700\u5220\u9664\u6216\u7f16\u8f91\u6743\u9650\uff0c\u53ef\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u9009\u62e9 \u7f16\u8f91 \u6216 \u5220\u9664 \u3002

                                                                          "},{"location":"end-user/kpanda/permissions/custom-kpanda-role.html","title":"\u589e\u52a0 Kpanda \u5185\u7f6e\u89d2\u8272\u6743\u9650\u70b9","text":"

                                                                          \u8fc7\u53bb Kpanda \u5185\u7f6e\u89d2\u8272\u7684\u6743\u9650\u70b9\uff08rbac rules\uff09\u90fd\u662f\u63d0\u524d\u9884\u5b9a\u4e49\u597d\u7684\u4e14\u7528\u6237\u65e0\u6cd5\u4fee\u6539\uff0c\u56e0\u4e3a\u4ee5\u524d\u4fee\u6539\u5185\u7f6e\u89d2\u8272\u7684\u6743\u9650\u70b9\u4e4b\u540e\u4e5f\u4f1a\u88ab Kpanda \u63a7\u5236\u5668\u8fd8\u539f\u6210\u9884\u5b9a\u4e49\u7684\u6743\u9650\u70b9\u3002 \u4e3a\u4e86\u652f\u6301\u66f4\u52a0\u7075\u6d3b\u7684\u6743\u9650\u914d\u7f6e\uff0c\u6ee1\u8db3\u5bf9\u7cfb\u7edf\u89d2\u8272\u7684\u81ea\u5b9a\u4e49\u9700\u6c42\uff0c\u76ee\u524d Kpanda \u652f\u6301\u4e3a\u5185\u7f6e\u7cfb\u7edf\u89d2\u8272\uff08cluster admin\u3001ns admin\u3001ns editor\u3001ns viewer\uff09\u4fee\u6539\u6743\u9650\u70b9\u3002 \u4ee5\u4e0b\u793a\u4f8b\u6f14\u793a\u5982\u4f55\u65b0\u589e ns-viewer \u6743\u9650\u70b9\uff0c\u5c1d\u8bd5\u589e\u52a0\u53ef\u4ee5\u5220\u9664 Deployment \u7684\u6743\u9650\u3002\u5176\u4ed6\u6743\u9650\u70b9\u64cd\u4f5c\u7c7b\u4f3c\u3002

                                                                          "},{"location":"end-user/kpanda/permissions/custom-kpanda-role.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          • \u9002\u7528\u4e8e\u5bb9\u5668\u7ba1\u7406 v0.27.0 \u53ca\u4ee5\u4e0a\u7248\u672c\u3002
                                                                          • \u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002
                                                                          • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u7528\u6237\u7684\u521b\u5efa\uff0c\u5e76\u4e3a\u7528\u6237\u6388\u4e88 NS Viewer \uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                                          Note

                                                                          • \u53ea\u9700\u5728 Global Cluster \u589e\u52a0\u6743\u9650\u70b9\uff0cKpanda \u63a7\u5236\u5668\u4f1a\u628a Global Cluster \u589e\u52a0\u7684\u6743\u9650\u70b9\u540c\u6b65\u5230\u6240\u6709\u63a5\u5165\u5b50\u96c6\u7fa4\u4e2d\uff0c\u540c\u6b65\u9700\u4e00\u6bb5\u65f6\u95f4\u624d\u80fd\u5b8c\u6210
                                                                          • \u53ea\u80fd\u5728 Global Cluster \u589e\u52a0\u6743\u9650\u70b9\uff0c\u5728\u5b50\u96c6\u7fa4\u65b0\u589e\u7684\u6743\u9650\u70b9\u4f1a\u88ab Global Cluster \u5185\u7f6e\u89d2\u8272\u6743\u9650\u70b9\u8986\u76d6
                                                                          • \u53ea\u652f\u6301\u4f7f\u7528\u56fa\u5b9a Label \u7684 ClusterRole \u8ffd\u52a0\u6743\u9650\uff0c\u4e0d\u652f\u6301\u66ff\u6362\u6216\u8005\u5220\u9664\u6743\u9650\uff0c\u4e5f\u4e0d\u80fd\u4f7f\u7528 role \u8ffd\u52a0\u6743\u9650\uff0c\u5185\u7f6e\u89d2\u8272\u8ddf\u7528\u6237\u521b\u5efa\u7684 ClusterRole Label \u5bf9\u5e94\u5173\u7cfb\u5982\u4e0b

                                                                            cluster-admin: rbac.kpanda.io/role-template-cluster-admin: \"true\"\ncluster-edit: rbac.kpanda.io/role-template-cluster-edit: \"true\"\ncluster-view: rbac.kpanda.io/role-template-cluster-view: \"true\"\nns-admin: rbac.kpanda.io/role-template-ns-admin: \"true\"\nns-edit: rbac.kpanda.io/role-template-ns-edit: \"true\"\nns-view: rbac.kpanda.io/role-template-ns-view: \"true\"\n
                                                                          "},{"location":"end-user/kpanda/permissions/custom-kpanda-role.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"
                                                                          1. \u4f7f\u7528 admin \u6216\u8005 cluster admin \u6743\u9650\u7684\u7528\u6237\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d

                                                                          2. \u6388\u6743 ns-viewer\uff0c\u7528\u6237\u6709\u8be5 namespace ns-view \u6743\u9650

                                                                          3. \u5207\u6362\u767b\u5f55\u7528\u6237\u4e3a ns-viewer\uff0c\u6253\u5f00\u63a7\u5236\u53f0\u83b7\u53d6 ns-viewer \u7528\u6237\u5bf9\u5e94\u7684 token\uff0c\u4f7f\u7528 curl \u8bf7\u6c42\u5220\u9664\u4e0a\u8ff0\u7684 deployment nginx\uff0c\u53d1\u73b0\u65e0\u5220\u9664\u6743\u9650

                                                                            [root@master-01 ~]# curl -k -X DELETE  'https://${URL}/apis/kpanda.io/v1alpha1/clusters/cluster-member/namespaces/default/deployments/nginx' -H 'authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJOU044MG9BclBRMzUwZ2VVU2ZyNy1xMEREVWY4MmEtZmJqR05uRE1sd1lFIn0.eyJleHAiOjE3MTU3NjY1NzksImlhdCI6MTcxNTY4MDE3OSwiYXV0aF90aW1lIjoxNzE1NjgwMTc3LCJqdGkiOiIxZjI3MzJlNC1jYjFhLTQ4OTktYjBiZC1iN2IxZWY1MzAxNDEiLCJpc3MiOiJodHRwczovLzEwLjYuMjAxLjIwMTozMDE0Ny9hdXRoL3JlYWxtcy9naGlwcG8iLCJhdWQiOiJfX2ludGVybmFsLWdoaXBwbyIsInN1YiI6ImMxZmMxM2ViLTAwZGUtNDFiYS05ZTllLWE5OGU2OGM0MmVmMCIsInR5cCI6IklEIiwiYXpwIjoiX19pbnRlcm5hbC1naGlwcG8iLCJzZXNzaW9uX3N0YXRlIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiYXRfaGFzaCI6IlJhTHoyQjlKQ2FNc1RrbGVMR3V6blEiLCJhY3IiOiIwIiwic2lkIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJncm91cHMiOltdLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJucy12aWV3ZXIiLCJsb2NhbGUiOiIifQ.As2ipMjfvzvgONAGlc9RnqOd3zMwAj82VXlcqcR74ZK9tAq3Q4ruQ1a6WuIfqiq8Kq4F77ljwwzYUuunfBli2zhU2II8zyxVhLoCEBu4pBVBd_oJyUycXuNa6HfQGnl36E1M7-_QG8b-_T51wFxxVb5b7SEDE1AvIf54NAlAr-rhDmGRdOK1c9CohQcS00ab52MD3IPiFFZ8_Iljnii-RpXKZoTjdcULJVn_uZNk_SzSUK-7MVWmPBK15m6sNktOMSf0pCObKWRqHd15JSe-2aA2PKBo1jBH3tHbOgZyMPdsLI0QdmEnKB5FiiOeMpwn_oHnT6IjT-BZlB18VkW8rA'\n{\"code\":7,\"message\":\"[RBAC] delete resources(deployments: nginx) is forbidden for user(ns-viewer) in cluster(cluster-member)\",\"details\":[]}[root@master-01 ~]#\n[root@master-01 ~]#\n
                                                                          4. \u5728\u5168\u5c40\u670d\u52a1\u96c6\u7fa4\u4e0a\u521b\u5efa\u5982\u4e0b ClusterRole\uff1a

                                                                            apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: append-ns-view # (1)!\n  labels:\n    rbac.kpanda.io/role-template-ns-view: \"true\" # (2)!\nrules:\n  - apiGroups: [ \"apps\" ]\n    resources: [ \"deployments\" ]\n    verbs: [ \"delete\" ]\n
                                                                            1. \u6b64\u5b57\u6bb5\u503c\u53ef\u4efb\u610f\u6307\u5b9a\uff0c\u53ea\u9700\u4e0d\u91cd\u590d\u4e14\u7b26\u5408 Kubernetes \u8d44\u6e90\u540d\u79f0\u89c4\u5219\u8981\u6c42
                                                                            2. \u6ce8\u610f\u7ed9\u4e0d\u540c\u7684\u89d2\u8272\u6dfb\u52a0\u6743\u9650\u65f6\u5e94\u6253\u4e0a\u4e0d\u540c\u7684 label
                                                                          5. \u7b49\u5f85 Kpanda \u63a7\u5236\u5668\u6dfb\u52a0\u7528\u6237\u521b\u5efa\u6743\u9650\u5230\u5185\u7f6e\u89d2\u8272 ns-viewer \u4e2d\uff0c\u53ef\u67e5\u770b\u5bf9\u5e94\u5185\u7f6e\u89d2\u8272\u5982\u662f\u5426\u6709\u4e0a\u4e00\u6b65\u65b0\u589e\u7684\u6743\u9650\u70b9

                                                                            [root@master-01 ~]# kubectl get clusterrole role-template-ns-view -oyaml|grep deployments -C 10|tail -n 6\n
                                                                            - apiGroups:\n  - apps\n  resources:\n  - deployments\n  verbs:\n  - delete\n

                                                                          6. \u518d\u6b21\u4f7f\u7528 curl \u8bf7\u6c42\u5220\u9664\u4e0a\u8ff0\u7684 deployment nginx\uff0c\u8fd9\u6b21\u6210\u529f\u5220\u9664\u4e86\u3002\u4e5f\u5c31\u662f\u8bf4\uff0cns-viewer \u6210\u529f\u65b0\u589e\u4e86\u5220\u9664 Deployment \u7684\u6743\u9650\u3002

                                                                            [root@master-01 ~]# curl -k -X DELETE  'https://${URL}/apis/kpanda.io/v1alpha1/clusters/cluster-member/namespaces/default/deployments/nginx' -H 'authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJOU044MG9BclBRMzUwZ2VVU2ZyNy1xMEREVWY4MmEtZmJqR05uRE1sd1lFIn0.eyJleHAiOjE3MTU3NjY1NzksImlhdCI6MTcxNTY4MDE3OSwiYXV0aF90aW1lIjoxNzE1NjgwMTc3LCJqdGkiOiIxZjI3MzJlNC1jYjFhLTQ4OTktYjBiZC1iN2IxZWY1MzAxNDEiLCJpc3MiOiJodHRwczovLzEwLjYuMjAxLjIwMTozMDE0Ny9hdXRoL3JlYWxtcy9naGlwcG8iLCJhdWQiOiJfX2ludGVybmFsLWdoaXBwbyIsInN1YiI6ImMxZmMxM2ViLTAwZGUtNDFiYS05ZTllLWE5OGU2OGM0MmVmMCIsInR5cCI6IklEIiwiYXpwIjoiX19pbnRlcm5hbC1naGlwcG8iLCJzZXNzaW9uX3N0YXRlIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiYXRfaGFzaCI6IlJhTHoyQjlKQ2FNc1RrbGVMR3V6blEiLCJhY3IiOiIwIiwic2lkIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJncm91cHMiOltdLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJucy12aWV3ZXIiLCJsb2NhbGUiOiIifQ.As2ipMjfvzvgONAGlc9RnqOd3zMwAj82VXlcqcR74ZK9tAq3Q4ruQ1a6WuIfqiq8Kq4F77ljwwzYUuunfBli2zhU2II8zyxVhLoCEBu4pBVBd_oJyUycXuNa6HfQGnl36E1M7-_QG8b-_T51wFxxVb5b7SEDE1AvIf54NAlAr-rhDmGRdOK1c9CohQcS00ab52MD3IPiFFZ8_Iljnii-RpXKZoTjdcULJVn_uZNk_SzSUK-7MVWmPBK15m6sNktOMSf0pCObKWRqHd15JSe-2aA2PKBo1jBH3tHbOgZyMPdsLI0QdmEnKB5FiiOeMpwn_oHnT6IjT-BZlB18VkW8rA'\n
                                                                          "},{"location":"end-user/kpanda/permissions/permission-brief.html","title":"\u5bb9\u5668\u7ba1\u7406\u6743\u9650\u8bf4\u660e","text":"

                                                                          \u5bb9\u5668\u7ba1\u7406\u6743\u9650\u57fa\u4e8e\u5168\u5c40\u6743\u9650\u7ba1\u7406\u4ee5\u53ca Kubernetes RBAC \u6743\u9650\u7ba1\u7406\u6253\u9020\u7684\u591a\u7ef4\u5ea6\u6743\u9650\u7ba1\u7406\u4f53\u7cfb\u3002 \u652f\u6301\u96c6\u7fa4\u7ea7\u3001\u547d\u540d\u7a7a\u95f4\u7ea7\u7684\u6743\u9650\u63a7\u5236\uff0c\u5e2e\u52a9\u7528\u6237\u4fbf\u6377\u7075\u6d3b\u5730\u5bf9\u79df\u6237\u4e0b\u7684 IAM \u7528\u6237\u3001\u7528\u6237\u7ec4\uff08\u7528\u6237\u7684\u96c6\u5408\uff09\u8bbe\u5b9a\u4e0d\u540c\u7684\u64cd\u4f5c\u6743\u9650\u3002

                                                                          "},{"location":"end-user/kpanda/permissions/permission-brief.html#_2","title":"\u96c6\u7fa4\u6743\u9650","text":"

                                                                          \u96c6\u7fa4\u6743\u9650\u57fa\u4e8e Kubernetes RBAC \u7684 ClusterRolebinding \u6388\u6743\uff0c\u96c6\u7fa4\u6743\u9650\u8bbe\u7f6e\u53ef\u8ba9\u7528\u6237/\u7528\u6237\u7ec4\u5177\u5907\u96c6\u7fa4\u76f8\u5173\u6743\u9650\u3002 \u76ee\u524d\u7684\u9ed8\u8ba4\u96c6\u7fa4\u89d2\u8272\u4e3a Cluster Admin \uff08\u4e0d\u5177\u5907\u96c6\u7fa4\u7684\u521b\u5efa\u3001\u5220\u9664\u6743\u9650\uff09\u3002

                                                                          "},{"location":"end-user/kpanda/permissions/permission-brief.html#cluster-admin","title":"Cluster Admin","text":"

                                                                          Cluster Admin \u5177\u6709\u4ee5\u4e0b\u6743\u9650\uff1a

                                                                          • \u53ef\u7ba1\u7406\u3001\u7f16\u8f91\u3001\u67e5\u770b\u5bf9\u5e94\u96c6\u7fa4

                                                                          • \u7ba1\u7406\u3001\u7f16\u8f91\u3001\u67e5\u770b \u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u5de5\u4f5c\u8d1f\u8f7d\u53ca\u96c6\u7fa4\u5185\u6240\u6709\u8d44\u6e90

                                                                          • \u53ef\u6388\u6743\u7528\u6237\u4e3a\u96c6\u7fa4\u5185\u89d2\u8272 (Cluster Admin\u3001NS Admin\u3001NS Editor\u3001NS Viewer)

                                                                          \u8be5\u96c6\u7fa4\u89d2\u8272\u7684 YAML \u793a\u4f8b\u5982\u4e0b\uff1a

                                                                          apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:49Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-cluster-admin\n  resourceVersion: \"15168\"\n  uid: f8f86d42-d5ef-47aa-b284-097615795076\nrules:\n- apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n- nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'\n
                                                                          "},{"location":"end-user/kpanda/permissions/permission-brief.html#_3","title":"\u547d\u540d\u7a7a\u95f4\u6743\u9650","text":"

                                                                          \u547d\u540d\u7a7a\u95f4\u6743\u9650\u662f\u57fa\u4e8e Kubernetes RBAC \u80fd\u529b\u7684\u6388\u6743\uff0c\u53ef\u4ee5\u5b9e\u73b0\u4e0d\u540c\u7684\u7528\u6237/\u7528\u6237\u7ec4\u5bf9\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u8d44\u6e90\u5177\u6709\u4e0d\u540c\u7684\u64cd\u4f5c\u6743\u9650(\u5305\u62ec Kubernetes API \u6743\u9650)\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\uff1aKubernetes RBAC\u3002\u76ee\u524d\u5bb9\u5668\u7ba1\u7406\u7684\u9ed8\u8ba4\u89d2\u8272\u4e3a\uff1aNS Admin\u3001NS Editor\u3001NS Viewer\u3002

                                                                          "},{"location":"end-user/kpanda/permissions/permission-brief.html#ns-admin","title":"NS Admin","text":"

                                                                          NS Admin \u5177\u6709\u4ee5\u4e0b\u6743\u9650\uff1a

                                                                          • \u53ef\u67e5\u770b\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4
                                                                          • \u7ba1\u7406\u3001\u7f16\u8f91\u3001\u67e5\u770b \u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u53ca\u81ea\u5b9a\u4e49\u8d44\u6e90
                                                                          • \u53ef\u6388\u6743\u7528\u6237\u4e3a\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u89d2\u8272 (NS Editor\u3001NS Viewer)

                                                                          \u8be5\u96c6\u7fa4\u89d2\u8272\u7684 YAML \u793a\u4f8b\u5982\u4e0b\uff1a

                                                                          apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:49Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-admin\n  resourceVersion: \"15173\"\n  uid: 69f64c7e-70e7-4c7c-a3e0-053f507f2bc3\nrules:\n- apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n- nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'    \n
                                                                          "},{"location":"end-user/kpanda/permissions/permission-brief.html#ns-editor","title":"NS Editor","text":"

                                                                          NS Editor \u5177\u6709\u4ee5\u4e0b\u6743\u9650\uff1a

                                                                          • \u53ef\u67e5\u770b\u5bf9\u5e94\u6709\u6743\u9650\u7684\u547d\u540d\u7a7a\u95f4
                                                                          • \u7ba1\u7406\u3001\u7f16\u8f91\u3001\u67e5\u770b \u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u5de5\u4f5c\u8d1f\u8f7d
                                                                          \u70b9\u51fb\u67e5\u770b\u96c6\u7fa4\u89d2\u8272\u7684 YAML \u793a\u4f8b
                                                                          apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:50Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-edit\n  resourceVersion: \"15175\"\n  uid: ca9e690e-96c0-4978-8915-6e4c00c748fe\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - configmaps\n  - endpoints\n  - persistentvolumeclaims\n  - persistentvolumeclaims/status\n  - pods\n  - replicationcontrollers\n  - replicationcontrollers/scale\n  - serviceaccounts\n  - services\n  - services/status\n  verbs:\n  - '*'\n- apiGroups:\n  - \"\"\n  resources:\n  - bindings\n  - events\n  - limitranges\n  - namespaces/status\n  - pods/log\n  - pods/status\n  - replicationcontrollers/status\n  - resourcequotas\n  - resourcequotas/status\n  verbs:\n  - '*'\n- apiGroups:\n  - \"\"\n  resources:\n  - namespaces\n  verbs:\n  - '*'\n- apiGroups:\n  - apps\n  resources:\n  - controllerrevisions\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - statefulsets\n  - statefulsets/scale\n  - statefulsets/status\n  verbs:\n  - '*'\n- apiGroups:\n  - autoscaling\n  resources:\n  - horizontalpodautoscalers\n  - horizontalpodautoscalers/status\n  verbs:\n  - '*'\n- apiGroups:\n  - batch\n  resources:\n  - cronjobs\n  - cronjobs/status\n  - jobs\n  - jobs/status\n  verbs:\n  - '*'\n- apiGroups:\n  - extensions\n  resources:\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - replicationcontrollers/scale\n  verbs:\n  - '*'\n- apiGroups:\n  - policy\n  resources:\n  - poddisruptionbudgets\n  - poddisruptionbudgets/status\n  verbs:\n  - '*'\n- apiGroups:\n  - networking.k8s.io\n  resources:\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  verbs:\n  - '*'      \n
                                                                          "},{"location":"end-user/kpanda/permissions/permission-brief.html#ns-viewer","title":"NS Viewer","text":"

                                                                          NS Viewer \u5177\u6709\u4ee5\u4e0b\u6743\u9650\uff1a

                                                                          • \u53ef\u67e5\u770b\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4
                                                                          • \u53ef\u67e5\u770b\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u4e0b\u7684\u6240\u6709\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u53ca\u81ea\u5b9a\u4e49\u8d44\u6e90
                                                                          \u70b9\u51fb\u67e5\u770b\u96c6\u7fa4\u89d2\u8272\u7684 YAML \u793a\u4f8b
                                                                          apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:50Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-view\n  resourceVersion: \"15183\"\n  uid: 853888fd-6ee8-42ac-b91e-63923918baf8\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - configmaps\n  - endpoints\n  - persistentvolumeclaims\n  - persistentvolumeclaims/status\n  - pods\n  - replicationcontrollers\n  - replicationcontrollers/scale\n  - serviceaccounts\n  - services\n  - services/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - bindings\n  - events\n  - limitranges\n  - namespaces/status\n  - pods/log\n  - pods/status\n  - replicationcontrollers/status\n  - resourcequotas\n  - resourcequotas/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - namespaces\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - apps\n  resources:\n  - controllerrevisions\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - statefulsets\n  - statefulsets/scale\n  - statefulsets/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - autoscaling\n  resources:\n  - horizontalpodautoscalers\n  - horizontalpodautoscalers/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - batch\n  resources:\n  - cronjobs\n  - cronjobs/status\n  - jobs\n  - jobs/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - extensions\n  resources:\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - replicationcontrollers/scale\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - policy\n  resources:\n  - poddisruptionbudgets\n  - poddisruptionbudgets/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - networking.k8s.io\n  resources:\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  verbs:\n  - get\n  - list\n  - watch \n
                                                                          "},{"location":"end-user/kpanda/permissions/permission-brief.html#faq","title":"\u6743\u9650 FAQ","text":"
                                                                          1. \u5168\u5c40\u6743\u9650\u548c\u5bb9\u5668\u7ba1\u7406\u6743\u9650\u7ba1\u7406\u7684\u5173\u7cfb\uff1f

                                                                            \u7b54\uff1a\u5168\u5c40\u6743\u9650\u4ec5\u6388\u6743\u4e3a\u7c97\u7c92\u5ea6\u6743\u9650\uff0c\u53ef\u7ba1\u7406\u6240\u6709\u96c6\u7fa4\u7684\u521b\u5efa\u3001\u7f16\u8f91\u3001\u5220\u9664\uff1b\u800c\u5bf9\u4e8e\u7ec6\u7c92\u5ea6\u7684\u6743\u9650\uff0c\u5982\u5355\u4e2a\u96c6\u7fa4\u7684\u7ba1\u7406\u6743\u9650\uff0c\u5355\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u7ba1\u7406\u3001\u7f16\u8f91\u3001\u5220\u9664\u6743\u9650\uff0c\u9700\u8981\u57fa\u4e8e Kubernetes RBAC \u7684\u5bb9\u5668\u7ba1\u7406\u6743\u9650\u8fdb\u884c\u5b9e\u73b0\u3002 \u4e00\u822c\u6743\u9650\u7684\u7528\u6237\u4ec5\u9700\u8981\u5728\u5bb9\u5668\u7ba1\u7406\u4e2d\u8fdb\u884c\u6388\u6743\u5373\u53ef\u3002

                                                                          2. \u76ee\u524d\u4ec5\u652f\u6301\u56db\u4e2a\u9ed8\u8ba4\u89d2\u8272\uff0c\u540e\u53f0\u81ea\u5b9a\u4e49\u89d2\u8272\u7684 RoleBinding \u4ee5\u53ca ClusterRoleBinding \uff08Kubernetes \u7ec6\u7c92\u5ea6\u7684 RBAC\uff09\u662f\u5426\u4e5f\u80fd\u751f\u6548\uff1f

                                                                            \u7b54\uff1a\u76ee\u524d\u81ea\u5b9a\u4e49\u6743\u9650\u6682\u65f6\u65e0\u6cd5\u901a\u8fc7\u56fe\u5f62\u754c\u9762\u8fdb\u884c\u7ba1\u7406\uff0c\u4f46\u662f\u901a\u8fc7 kubectl \u521b\u5efa\u7684\u6743\u9650\u89c4\u5219\u540c\u6837\u80fd\u751f\u6548\u3002

                                                                          "},{"location":"end-user/kpanda/scale/create-hpa.html","title":"\u57fa\u4e8e\u5185\u7f6e\u6307\u6807\u521b\u5efa HPA","text":"

                                                                          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301 Pod \u8d44\u6e90\u57fa\u4e8e\u6307\u6807\u8fdb\u884c\u5f39\u6027\u4f38\u7f29\uff08Horizontal Pod Autoscaling, HPA\uff09\u3002 \u7528\u6237\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e CPU \u5229\u7528\u7387\u3001\u5185\u5b58\u7528\u91cf\u53ca\u81ea\u5b9a\u4e49\u6307\u6807\u6307\u6807\u6765\u52a8\u6001\u8c03\u6574 Pod \u8d44\u6e90\u7684\u526f\u672c\u6570\u91cf\u3002 \u4f8b\u5982\uff0c\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u8bbe\u7f6e\u57fa\u4e8e CPU \u5229\u7528\u7387\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u540e\uff0c\u5f53 Pod \u7684 CPU \u5229\u7528\u7387\u8d85\u8fc7/\u4f4e\u4e8e\u60a8\u8bbe\u7f6e\u7684\u6307\u6807\u9600\u503c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u63a7\u5236\u5668\u5c06\u4f1a\u81ea\u52a8\u589e\u52a0/\u8f83\u5c11 Pod \u526f\u672c\u6570\u3002

                                                                          \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u57fa\u4e8e\u5185\u7f6e\u6307\u6807\u7684\u5f39\u6027\u4f38\u7f29\u3002

                                                                          Note

                                                                          1. HPA \u4ec5\u9002\u7528\u4e8e Deployment \u548c StatefulSet\uff0c\u6bcf\u4e2a\u5de5\u4f5c\u8d1f\u8f7d\u53ea\u80fd\u521b\u5efa\u4e00\u4e2a HPA\u3002
                                                                          2. \u5982\u679c\u57fa\u4e8e CPU \u5229\u7528\u7387\u521b\u5efa HPA \u7b56\u7565\uff0c\u5fc5\u987b\u9884\u5148\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u8bbe\u7f6e\u914d\u7f6e\u9650\u5236\uff08Limit\uff09\uff0c\u5426\u5219\u65e0\u6cd5\u8ba1\u7b97 CPU \u5229\u7528\u7387\u3002
                                                                          3. \u5982\u679c\u540c\u65f6\u4f7f\u7528\u5185\u7f6e\u6307\u6807\u548c\u591a\u79cd\u81ea\u5b9a\u4e49\u6307\uff0cHPA \u4f1a\u6839\u636e\u591a\u9879\u6307\u6807\u5206\u522b\u8ba1\u7b97\u6240\u9700\u4f38\u7f29\u526f\u672c\u6570\uff0c\u53d6\u8f83\u5927\u503c\uff08\u4f46\u4e0d\u4f1a\u8d85\u8fc7\u8bbe\u7f6e HPA \u7b56\u7565\u65f6\u914d\u7f6e\u7684\u6700\u5927\u526f\u672c\u6570\uff09\u8fdb\u884c\u5f39\u6027\u4f38\u7f29\u3002
                                                                          "},{"location":"end-user/kpanda/scale/create-hpa.html#_1","title":"\u5185\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565","text":"

                                                                          \u7cfb\u7edf\u5185\u7f6e\u4e86 CPU \u548c\u5185\u5b58\u4e24\u79cd\u5f39\u6027\u4f38\u7f29\u6307\u6807\u4ee5\u6ee1\u8db3\u7528\u6237\u7684\u57fa\u7840\u4e1a\u52a1\u4f7f\u7528\u573a\u666f\u3002

                                                                          "},{"location":"end-user/kpanda/scale/create-hpa.html#_2","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                                          \u5728\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u5185\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                                          • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                                          • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa\u6216\u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa\u3002

                                                                          • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                                          • \u5df2\u5b8c\u6210 metrics-server \u63d2\u4ef6\u5b89\u88c5 \u3002

                                                                          "},{"location":"end-user/kpanda/scale/create-hpa.html#_3","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                                          \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u5185\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u3002

                                                                          1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \u8fdb\u5165\u96c6\u7fa4\u5217\u8868\u9875\u9762\u3002\u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                                          2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d \u8fdb\u5165\u5de5\u4f5c\u8d1f\u8f7d\u5217\u8868\u540e\uff0c\u70b9\u51fb\u4e00\u4e2a\u8d1f\u8f7d\u540d\u79f0\uff0c\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5 \u9875\u9762\u3002

                                                                          3. \u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u7684\u5f39\u6027\u4f38\u7f29\u914d\u7f6e\u60c5\u51b5\u3002

                                                                          4. \u786e\u8ba4\u96c6\u7fa4\u5df2\u5b89\u88c5\u4e86 metrics-server \u63d2\u4ef6\uff0c\u4e14\u63d2\u4ef6\u8fd0\u884c\u72b6\u6001\u4e3a\u6b63\u5e38\u540e\uff0c\u5373\u53ef\u70b9\u51fb \u65b0\u5efa\u4f38\u7f29 \u6309\u94ae\u3002

                                                                          5. \u521b\u5efa\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u53c2\u6570\u3002

                                                                            • \u7b56\u7565\u540d\u79f0\uff1a\u8f93\u5165\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 hpa-my-dep\u3002
                                                                            • \u547d\u540d\u7a7a\u95f4\uff1a\u8d1f\u8f7d\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                                                            • \u5de5\u4f5c\u8d1f\u8f7d\uff1a\u6267\u884c\u5f39\u6027\u4f38\u7f29\u7684\u5de5\u4f5c\u8d1f\u8f7d\u5bf9\u8c61\u3002
                                                                            • \u76ee\u6807 CPU \u5229\u7528\u7387\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u4e0b Pod \u7684 CPU \u4f7f\u7528\u7387\u3002\u8ba1\u7b97\u65b9\u5f0f\u4e3a\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u4e0b\u6240\u6709\u7684 Pod \u8d44\u6e90 / \u5de5\u4f5c\u8d1f\u8f7d\u7684\u8bf7\u6c42\uff08request\uff09\u503c\u3002\u5f53\u5b9e\u9645 CPU \u7528\u91cf\u5927\u4e8e/\u5c0f\u4e8e\u76ee\u6807\u503c\u65f6\uff0c\u7cfb\u7edf\u81ea\u52a8\u51cf\u5c11/\u589e\u52a0 Pod \u526f\u672c\u6570\u91cf\u3002
                                                                            • \u76ee\u6807\u5185\u5b58\u7528\u91cf\uff1a\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u4e0b\u7684 Pod \u7684\u5185\u5b58\u7528\u91cf\u3002\u5f53\u5b9e\u9645\u5185\u5b58\u7528\u91cf\u5927\u4e8e/\u5c0f\u4e8e\u76ee\u6807\u503c\u65f6\uff0c\u7cfb\u7edf\u81ea\u52a8\u51cf\u5c11/\u589e\u52a0 Pod \u526f\u672c\u6570\u91cf\u3002
                                                                            • \u526f\u672c\u8303\u56f4\uff1aPod \u526f\u672c\u6570\u7684\u5f39\u6027\u4f38\u7f29\u8303\u56f4\u3002\u9ed8\u8ba4\u533a\u95f4\u4e3a\u4e3a 1 - 10\u3002
                                                                          6. \u5b8c\u6210\u53c2\u6570\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u81ea\u52a8\u8fd4\u56de\u5f39\u6027\u4f38\u7f29\u8be6\u60c5\u9875\u9762\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u6267\u884c\u7f16\u8f91\u3001\u5220\u9664\u64cd\u4f5c\uff0c\u8fd8\u53ef\u4ee5\u67e5\u770b\u76f8\u5173\u4e8b\u4ef6\u3002

                                                                          "},{"location":"end-user/kpanda/scale/create-vpa.html","title":"\u521b\u5efa VPA","text":"

                                                                          \u5bb9\u5668\u5782\u76f4\u6269\u7f29\u5bb9\u7b56\u7565\uff08Vertical Pod Autoscaler, VPA\uff09\u901a\u8fc7\u76d1\u63a7 Pod \u5728\u4e00\u6bb5\u65f6\u95f4\u5185\u7684\u8d44\u6e90\u7533\u8bf7\u548c\u7528\u91cf\uff0c \u8ba1\u7b97\u51fa\u5bf9\u8be5 Pod \u800c\u8a00\u6700\u9002\u5408\u7684 CPU \u548c\u5185\u5b58\u8bf7\u6c42\u503c\u3002\u4f7f\u7528 VPA \u53ef\u4ee5\u66f4\u52a0\u5408\u7406\u5730\u4e3a\u96c6\u7fa4\u4e0b\u6bcf\u4e2a Pod \u5206\u914d\u8d44\u6e90\uff0c\u63d0\u9ad8\u96c6\u7fa4\u7684\u6574\u4f53\u8d44\u6e90\u5229\u7528\u7387\uff0c\u907f\u514d\u96c6\u7fa4\u8d44\u6e90\u6d6a\u8d39\u3002

                                                                          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u901a\u8fc7\u5bb9\u5668\u5782\u76f4\u6269\u7f29\u5bb9\u7b56\u7565\uff08Vertical Pod Autoscaler, VPA\uff09\uff0c\u57fa\u4e8e\u6b64\u529f\u80fd\u53ef\u4ee5\u6839\u636e\u5bb9\u5668\u8d44\u6e90\u7684\u4f7f\u7528\u60c5\u51b5\u52a8\u6001\u8c03\u6574 Pod \u8bf7\u6c42\u503c\u3002 \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u652f\u6301\u901a\u8fc7\u624b\u52a8\u548c\u81ea\u52a8\u4e24\u79cd\u65b9\u5f0f\u6765\u4fee\u6539\u8d44\u6e90\u8bf7\u6c42\u503c\uff0c\u60a8\u53ef\u4ee5\u6839\u636e\u5b9e\u9645\u9700\u8981\u8fdb\u884c\u914d\u7f6e\u3002

                                                                          \u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e Pod \u5782\u76f4\u4f38\u7f29\u3002

                                                                          Warning

                                                                          \u4f7f\u7528 VPA \u4fee\u6539 Pod \u8d44\u6e90\u8bf7\u6c42\u4f1a\u89e6\u53d1 Pod \u91cd\u542f\u3002\u7531\u4e8e Kubernetes \u672c\u8eab\u7684\u9650\u5236\uff0c Pod \u91cd\u542f\u540e\u53ef\u80fd\u4f1a\u88ab\u8c03\u5ea6\u5230\u5176\u5b83\u8282\u70b9\u4e0a\u3002

                                                                          "},{"location":"end-user/kpanda/scale/create-vpa.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                                          \u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u5782\u76f4\u4f38\u7f29\u7b56\u7565\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                                          • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                                          • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u3001\u7528\u6237\u3001\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u6216\u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u3002

                                                                          • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                                          • \u5f53\u524d\u96c6\u7fa4\u5df2\u7ecf\u5b89\u88c5 metrics-server \u548c VPA \u63d2\u4ef6\u3002

                                                                          "},{"location":"end-user/kpanda/scale/create-vpa.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                                          \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u5185\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u3002

                                                                          1. \u5728 \u96c6\u7fa4\u5217\u8868 \u4e2d\u627e\u5230\u76ee\u524d\u96c6\u7fa4\uff0c\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\u3002

                                                                          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5de5\u4f5c\u8d1f\u8f7d \uff0c\u627e\u5230\u9700\u8981\u521b\u5efa VPA \u7684\u8d1f\u8f7d\uff0c\u70b9\u51fb\u8be5\u8d1f\u8f7d\u7684\u540d\u79f0\u3002

                                                                            3. \u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u7684\u5f39\u6027\u4f38\u7f29\u914d\u7f6e\uff0c\u786e\u8ba4\u5df2\u7ecf\u5b89\u88c5\u4e86\u76f8\u5173\u63d2\u4ef6\u5e76\u4e14\u63d2\u4ef6\u662f\u5426\u8fd0\u884c\u6b63\u5e38\u3002

                                                                          3. \u70b9\u51fb \u65b0\u5efa\u4f38\u7f29 \u6309\u94ae\uff0c\u5e76\u914d\u7f6e VPA \u5782\u76f4\u4f38\u7f29\u7b56\u7565\u53c2\u6570\u3002

                                                                            • \u7b56\u7565\u540d\u79f0\uff1a\u8f93\u5165\u5782\u76f4\u4f38\u7f29\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 vpa-my-dep\u3002
                                                                            • \u4f38\u7f29\u6a21\u5f0f\uff1a\u6267\u884c\u4fee\u6539 CPU \u548c\u5185\u5b58\u8bf7\u6c42\u503c\u7684\u65b9\u5f0f\uff0c\u76ee\u524d\u5782\u76f4\u4f38\u7f29\u652f\u6301\u624b\u52a8\u548c\u81ea\u52a8\u4e24\u79cd\u4f38\u7f29\u6a21\u5f0f\u3002
                                                                              • \u624b\u52a8\u4f38\u7f29\uff1a\u5782\u76f4\u4f38\u7f29\u7b56\u7565\u8ba1\u7b97\u51fa\u63a8\u8350\u7684\u8d44\u6e90\u914d\u7f6e\u503c\u540e\uff0c\u9700\u7528\u6237\u624b\u52a8\u4fee\u6539\u5e94\u7528\u7684\u8d44\u6e90\u914d\u989d\u3002
                                                                              • \u81ea\u52a8\u4f38\u7f29\uff1a\u5782\u76f4\u4f38\u7f29\u7b56\u7565\u81ea\u52a8\u8ba1\u7b97\u548c\u4fee\u6539\u5e94\u7528\u7684\u8d44\u6e90\u914d\u989d\u3002
                                                                            • \u76ee\u6807\u5bb9\u5668\uff1a\u9009\u62e9\u9700\u8981\u8fdb\u884c\u5782\u76f4\u4f38\u7f29\u7684\u5bb9\u5668\u3002
                                                                          4. \u5b8c\u6210\u53c2\u6570\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u81ea\u52a8\u8fd4\u56de\u5f39\u6027\u4f38\u7f29\u8be6\u60c5\u9875\u9762\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u6267\u884c\u7f16\u8f91\u3001\u5220\u9664\u64cd\u4f5c\u3002

                                                                          Note

                                                                          \u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c--min-replicas \u7684\u503c\u4e3a 2\u3002\u8868\u793a\u5f53\u526f\u672c\u6570\u5927\u4e8e 1 \u65f6\uff0cVPA \u624d\u4f1a\u751f\u6548\uff0c \u53ef\u4ee5\u901a\u8fc7\u4fee\u6539 updater \u7684 --min-replicas \u53c2\u6570\u503c\u6765\u6539\u53d8\u8fd9\u4e00\u9ed8\u8ba4\u884c\u4e3a\u3002

                                                                          spec: \n  containers: \n  - name: updater \n  args: \n  - \"--min-replicas=2\"\n
                                                                          "},{"location":"end-user/kpanda/scale/custom-hpa.html","title":"\u57fa\u4e8e\u81ea\u5b9a\u4e49\u6307\u6807\u521b\u5efa HPA","text":"

                                                                          \u5f53\u7cfb\u7edf\u5185\u7f6e\u7684 CPU \u548c\u5185\u5b58\u4e24\u79cd\u6307\u6807\u4e0d\u80fd\u6ee1\u8db3\u60a8\u4e1a\u52a1\u7684\u5b9e\u9645\u9700\u6c42\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u914d\u7f6e ServiceMonitoring \u6765\u6dfb\u52a0\u81ea\u5b9a\u4e49\u6307\u6807\uff0c \u5e76\u57fa\u4e8e\u81ea\u5b9a\u4e49\u6307\u6807\u5b9e\u73b0\u5f39\u6027\u4f38\u7f29\u3002\u672c\u6587\u5c06\u4ecb\u7ecd\u5982\u4f55\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u57fa\u4e8e\u81ea\u5b9a\u4e49\u6307\u6807\u8fdb\u884c\u5f39\u6027\u4f38\u7f29\u3002

                                                                          Note

                                                                          1. HPA \u4ec5\u9002\u7528\u4e8e Deployment \u548c StatefulSet\uff0c\u6bcf\u4e2a\u5de5\u4f5c\u8d1f\u8f7d\u53ea\u80fd\u521b\u5efa\u4e00\u4e2a HPA\u3002
                                                                          2. \u5982\u679c\u540c\u65f6\u4f7f\u7528\u5185\u7f6e\u6307\u6807\u548c\u591a\u79cd\u81ea\u5b9a\u4e49\u6307\uff0cHPA \u4f1a\u6839\u636e\u591a\u9879\u6307\u6807\u5206\u522b\u8ba1\u7b97\u6240\u9700\u4f38\u7f29\u526f\u672c\u6570\uff0c\u53d6\u8f83\u5927\u503c\uff08\u4f46\u4e0d\u4f1a\u8d85\u8fc7\u8bbe\u7f6e HPA \u7b56\u7565\u65f6\u914d\u7f6e\u7684\u6700\u5927\u526f\u672c\u6570\uff09\u8fdb\u884c\u5f39\u6027\u4f38\u7f29\u3002
                                                                          "},{"location":"end-user/kpanda/scale/custom-hpa.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                                          \u5728\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                                          • \u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c \u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762
                                                                          • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3001\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa\u6216\u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa
                                                                          • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c \u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743
                                                                          • \u5df2\u5b89\u88c5 metrics-server \u63d2\u4ef6
                                                                          • \u5df2\u5b89\u88c5 insight-agent \u63d2\u4ef6
                                                                          • \u5df2\u5b89\u88c5 Prometheus-adapter \u63d2\u4ef6
                                                                          "},{"location":"end-user/kpanda/scale/custom-hpa.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                                          \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u914d\u7f6e\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u3002

                                                                          1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \u8fdb\u5165\u96c6\u7fa4\u5217\u8868\u9875\u9762\u3002\u70b9\u51fb\u4e00\u4e2a\u96c6\u7fa4\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                                          2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d \u8fdb\u5165\u5de5\u4f5c\u8d1f\u8f7d\u5217\u8868\u540e\uff0c\u70b9\u51fb\u4e00\u4e2a\u8d1f\u8f7d\u540d\u79f0\uff0c\u8fdb\u5165 \u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5 \u9875\u9762\u3002

                                                                          3. \u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u7684\u5f39\u6027\u4f38\u7f29\u914d\u7f6e\u60c5\u51b5\u3002

                                                                          4. \u786e\u8ba4\u96c6\u7fa4\u5df2\u5b89\u88c5\u4e86 metrics-server \u3001Insight\u3001Prometheus-adapter \u63d2\u4ef6\u4e14\u63d2\u4ef6\u8fd0\u884c\u72b6\u6001\u4e3a\u6b63\u5e38\u540e\uff0c\u5373\u53ef\u70b9\u51fb \u65b0\u5efa\u4f38\u7f29 \u6309\u94ae\u3002

                                                                            Note

                                                                            \u5982\u679c\u76f8\u5173\u63d2\u4ef6\u672a\u5b89\u88c5\u6216\u63d2\u4ef6\u5904\u4e8e\u5f02\u5e38\u72b6\u6001\uff0c\u60a8\u5728\u9875\u9762\u4e0a\u5c06\u65e0\u6cd5\u770b\u89c1\u521b\u5efa\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u5165\u53e3\u3002

                                                                          5. \u521b\u5efa\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u53c2\u6570\u3002

                                                                            • \u7b56\u7565\u540d\u79f0\uff1a\u8f93\u5165\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u7684\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 hpa-my-dep\u3002
                                                                            • \u547d\u540d\u7a7a\u95f4\uff1a\u8d1f\u8f7d\u6240\u5728\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                                                            • \u5de5\u4f5c\u8d1f\u8f7d\uff1a\u6267\u884c\u5f39\u6027\u4f38\u7f29\u7684\u5de5\u4f5c\u8d1f\u8f7d\u5bf9\u8c61\u3002
                                                                            • \u8d44\u6e90\u7c7b\u578b\uff1a\u8fdb\u884c\u76d1\u63a7\u7684\u81ea\u5b9a\u4e49\u6307\u6807\u7c7b\u578b\uff0c\u5305\u542b Pod \u548c Service \u4e24\u79cd\u7c7b\u578b\u3002
                                                                            • \u6307\u6807\uff1a\u4f7f\u7528 ServiceMonitoring \u521b\u5efa\u7684\u81ea\u5b9a\u4e49\u6307\u6807\u540d\u79f0\u6216\u7cfb\u7edf\u5185\u7f6e\u7684\u81ea\u5b9a\u4e49\u6307\u6807\u540d\u79f0\u3002
                                                                            • \u6570\u636e\u7c7b\u578b\uff1a\u7528\u4e8e\u8ba1\u7b97\u6307\u6807\u503c\u7684\u65b9\u6cd5\uff0c\u5305\u542b\u76ee\u6807\u503c\u548c\u76ee\u6807\u5e73\u5747\u503c\u4e24\u79cd\u7c7b\u578b\uff0c\u5f53\u8d44\u6e90\u7c7b\u578b\u4e3a Pod \u65f6\uff0c\u53ea\u652f\u6301\u4f7f\u7528\u76ee\u6807\u5e73\u5747\u503c\u3002
                                                                          "},{"location":"end-user/kpanda/scale/custom-hpa.html#_3","title":"\u64cd\u4f5c\u793a\u4f8b","text":"

                                                                          \u672c\u6848\u4f8b\u4ee5 Golang \u4e1a\u52a1\u7a0b\u5e8f\u4e3a\u4f8b\uff0c\u8be5\u793a\u4f8b\u7a0b\u5e8f\u66b4\u9732\u4e86 httpserver_requests_total \u6307\u6807\uff0c\u5e76\u8bb0\u5f55 HTTP \u7684\u8bf7\u6c42\uff0c\u901a\u8fc7\u8be5\u6307\u6807\u53ef\u4ee5\u8ba1\u7b97\u51fa\u4e1a\u52a1\u7a0b\u5e8f\u7684 QPS \u503c\u3002

                                                                          "},{"location":"end-user/kpanda/scale/custom-hpa.html#_4","title":"\u90e8\u7f72\u4e1a\u52a1\u7a0b\u5e8f","text":"

                                                                          \u4f7f\u7528 Deployment \u90e8\u7f72\u4e1a\u52a1\u7a0b\u5e8f\uff1a

                                                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: httpserver\n  namespace: httpserver\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: httpserver\n  template:\n    metadata:\n      labels:\n        app: httpserver\n    spec:\n      containers:\n      - name: httpserver\n        image: registry.imroc.cc/test/httpserver:custom-metrics\n        imagePullPolicy: Always\n---\n\napiVersion: v1\nkind: Service\nmetadata:\n  name: httpserver\n  namespace: httpserver\n  labels:\n    app: httpserver\n  annotations:\n    prometheus.io/scrape: \"true\"\n    prometheus.io/path: \"/metrics\"\n    prometheus.io/port: \"http\"\nspec:\n  type: ClusterIP\n  ports:\n  - port: 80\n    protocol: TCP\n    name: http\n  selector:\n    app: httpserver\n
                                                                          "},{"location":"end-user/kpanda/scale/custom-hpa.html#prometheus","title":"Prometheus \u91c7\u96c6\u4e1a\u52a1\u76d1\u63a7","text":"

                                                                          \u82e5\u5df2\u5b89\u88c5 insight-agent\uff0c\u53ef\u4ee5\u901a\u8fc7\u521b\u5efa ServiceMonitor \u7684 CRD \u5bf9\u8c61\u914d\u7f6e Prometheus\u3002

                                                                          \u64cd\u4f5c\u6b65\u9aa4\uff1a\u5728 \u96c6\u7fa4\u8be6\u60c5 -> \u81ea\u5b9a\u4e49\u8d44\u6e90 \u641c\u7d22\u201cservicemonitors.monitoring.coreos.com\"\uff0c\u70b9\u51fb\u540d\u79f0\u8fdb\u5165\u8be6\u60c5\u3002 \u901a\u8fc7\u521b\u5efa YAML\uff0c\u5728\u547d\u540d\u7a7a\u95f4 httpserver \u4e0b\u521b\u5efa\u5982\u4e0b\u793a\u4f8b\u7684 CRD\uff1a

                                                                          apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: httpserver\n  namespace: httpserver\n  labels:\n    operator.insight.io/managed-by: insight\nspec:\n  endpoints:\n  - port: http\n    interval: 5s\n  namespaceSelector:\n    matchNames:\n    - httpserver\n  selector:\n    matchLabels:\n      app: httpserver\n

                                                                          Note

                                                                          \u82e5\u901a\u8fc7 insight \u5b89\u88c5 Prometheus\uff0c\u5219 serviceMonitor \u4e0a\u5fc5\u987b\u6253\u4e0a operator.insight.io/managed-by: insight \u8fd9\u4e2a label\uff0c\u901a\u8fc7\u5176\u4ed6\u65b9\u5f0f\u5b89\u88c5\u5219\u65e0\u9700\u6b64 label\u3002

                                                                          "},{"location":"end-user/kpanda/scale/custom-hpa.html#prometheus-adapter","title":"\u5728 prometheus-adapter \u4e2d\u914d\u7f6e\u6307\u6807\u89c4\u5219","text":"

                                                                          \u64cd\u4f5c\u6b65\u9aa4\uff1a\u5728 \u96c6\u7fa4\u8be6\u60c5 -> Helm \u5e94\u7528 \u641c\u7d22 \u201cprometheus-adapter\"\uff0c\u901a\u8fc7\u64cd\u4f5c\u680f\u8fdb\u5165\u66f4\u65b0\u9875\u9762\uff0c\u5728 YAML \u4e2d\u914d\u7f6e\u81ea\u5b9a\u4e49\u6307\u6807\uff0c\u793a\u4f8b\u5982\u4e0b\uff1a

                                                                          rules:\n  custom:\n    - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)\n      name:\n        as: httpserver_requests_qps\n        matches: httpserver_requests_total\n      resources:\n        template: <<.Resource>>\n      seriesQuery: httpserver_requests_total\n

                                                                          "},{"location":"end-user/kpanda/scale/custom-hpa.html#_5","title":"\u521b\u5efa\u81ea\u5b9a\u4e49\u6307\u6807\u5f39\u6027\u4f38\u7f29\u7b56\u7565\u53c2\u6570","text":"

                                                                          \u6309\u7167\u4e0a\u8ff0\u6b65\u9aa4\u5728 Deployment \u4e2d\u627e\u5230\u5e94\u7528\u7a0b\u5e8f httpserver \u5e76\u901a\u8fc7\u81ea\u5b9a\u4e49\u6307\u6807\u521b\u5efa\u5f39\u6027\u4f38\u7f29\u3002

                                                                          "},{"location":"end-user/kpanda/scale/hpa-cronhpa-compatibility-rules.html","title":"HPA \u548c CronHPA \u517c\u5bb9\u89c4\u5219","text":"

                                                                          HPA \u5168\u79f0\u4e3a HorizontalPodAutoscaler\uff0c\u5373 Pod \u6c34\u5e73\u81ea\u52a8\u4f38\u7f29\u3002

                                                                          CronHPA \u5168\u79f0\u4e3a Cron HorizontalPodAutoscaler\uff0c\u5373 Pod \u5b9a\u65f6\u7684\u6c34\u5e73\u81ea\u52a8\u4f38\u7f29\u3002

                                                                          "},{"location":"end-user/kpanda/scale/hpa-cronhpa-compatibility-rules.html#cronhpa-hpa","title":"CronHPA \u548c HPA \u517c\u5bb9\u51b2\u7a81","text":"

                                                                          \u5b9a\u65f6\u4f38\u7f29 CronHPA \u901a\u8fc7\u8bbe\u7f6e\u5b9a\u65f6\u7684\u65b9\u5f0f\u89e6\u53d1\u5bb9\u5668\u7684\u6c34\u5e73\u526f\u672c\u4f38\u7f29\u3002\u4e3a\u4e86\u9632\u6b62\u7a81\u53d1\u7684\u6d41\u91cf\u51b2\u51fb\u7b49\u72b6\u51b5\uff0c \u60a8\u53ef\u80fd\u5df2\u7ecf\u914d\u7f6e HPA \u4fdd\u969c\u5e94\u7528\u7684\u6b63\u5e38\u8fd0\u884c\u3002\u5982\u679c\u540c\u65f6\u68c0\u6d4b\u5230\u4e86 HPA \u548c CronHPA \u7684\u5b58\u5728\uff0c \u7531\u4e8e CronHPA \u548c HPA \u76f8\u4e92\u72ec\u7acb\u65e0\u6cd5\u611f\u77e5\uff0c\u5c31\u4f1a\u51fa\u73b0\u4e24\u4e2a\u63a7\u5236\u5668\u5404\u81ea\u5de5\u4f5c\uff0c\u540e\u6267\u884c\u7684\u64cd\u4f5c\u4f1a\u8986\u76d6\u5148\u6267\u884c\u7684\u64cd\u4f5c\u3002

                                                                          \u5bf9\u6bd4 CronHPA \u548c HPA \u7684\u5b9a\u4e49\u6a21\u677f\uff0c\u53ef\u4ee5\u89c2\u5bdf\u5230\u4ee5\u4e0b\u51e0\u70b9\uff1a

                                                                          • CronHPA \u548c HPA \u90fd\u662f\u901a\u8fc7 scaleTargetRef \u5b57\u6bb5\u6765\u83b7\u53d6\u4f38\u7f29\u5bf9\u8c61\u3002
                                                                          • CronHPA \u901a\u8fc7 jobs \u7684 crontab \u89c4\u5219\u5b9a\u65f6\u4f38\u7f29\u526f\u672c\u6570\u3002
                                                                          • HPA \u901a\u8fc7\u8d44\u6e90\u5229\u7528\u7387\u5224\u65ad\u4f38\u7f29\u60c5\u51b5\u3002

                                                                          Note

                                                                          \u5982\u679c\u540c\u65f6\u8bbe\u7f6e CronHPA \u548c HPA\uff0c\u4f1a\u51fa\u73b0 CronHPA \u548c HPA \u540c\u65f6\u64cd\u4f5c\u4e00\u4e2a scaleTargetRef \u7684\u573a\u666f\u3002

                                                                          "},{"location":"end-user/kpanda/scale/hpa-cronhpa-compatibility-rules.html#cronhpa-hpa_1","title":"CronHPA \u548c HPA \u517c\u5bb9\u65b9\u6848","text":"

                                                                          \u4ece\u4e0a\u6587\u53ef\u77e5\uff0cCronHPA \u548c HPA \u540c\u65f6\u4f7f\u7528\u4f1a\u5bfc\u81f4\u540e\u6267\u884c\u7684\u64cd\u4f5c\u8986\u76d6\u5148\u6267\u884c\u64cd\u4f5c\u7684\u672c\u8d28\u539f\u56e0\u662f\u4e24\u4e2a\u63a7\u5236\u5668\u65e0\u6cd5\u76f8\u4e92\u611f\u77e5\uff0c \u90a3\u4e48\u53ea\u9700\u8981\u8ba9 CronHPA \u611f\u77e5 HPA \u7684\u5f53\u524d\u72b6\u6001\u5c31\u80fd\u89e3\u51b3\u51b2\u7a81\u95ee\u9898\u3002

                                                                          \u7cfb\u7edf\u4f1a\u5c06 HPA \u4f5c\u4e3a\u5b9a\u65f6\u4f38\u7f29 CronHPA \u7684\u6269\u7f29\u5bb9\u5bf9\u8c61\uff0c\u4ece\u800c\u5b9e\u73b0\u5bf9\u8be5 HPA \u5b9a\u4e49\u7684 Deployment \u5bf9\u8c61\u7684\u5b9a\u65f6\u6269\u7f29\u5bb9\u3002

                                                                          HPA \u7684\u5b9a\u4e49\u5c06 Deployment \u914d\u7f6e\u5728 scaleTargetRef \u5b57\u6bb5\u4e0b\uff0c\u7136\u540e Deployment \u901a\u8fc7\u81ea\u8eab\u5b9a\u4e49\u67e5\u627e ReplicaSet\uff0c\u6700\u540e\u901a\u8fc7 ReplicaSet \u8c03\u6574\u771f\u5b9e\u7684\u526f\u672c\u6570\u76ee\u3002

                                                                          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5c06 CronHPA \u4e2d\u7684 scaleTargetRef \u8bbe\u7f6e\u4e3a HPA \u5bf9\u8c61\uff0c\u7136\u540e\u901a\u8fc7 HPA \u5bf9\u8c61\u6765\u5bfb\u627e\u771f\u5b9e\u7684 scaleTargetRef\uff0c\u4ece\u800c\u8ba9 CronHPA \u611f\u77e5 HPA \u7684\u5f53\u524d\u72b6\u6001\u3002

                                                                          CronHPA \u4f1a\u901a\u8fc7\u8c03\u6574 HPA \u7684\u65b9\u5f0f\u611f\u77e5 HPA\u3002CronHPA \u901a\u8fc7\u8bc6\u522b\u8981\u8fbe\u5230\u7684\u526f\u672c\u6570\u4e0e\u5f53\u524d\u526f\u672c\u6570\u4e24\u8005\u95f4\u7684\u8f83\u5927\u503c\uff0c \u5224\u65ad\u662f\u5426\u9700\u8981\u6269\u7f29\u5bb9\u53ca\u4fee\u6539 HPA \u7684\u4e0a\u9650\uff1bCronHPA \u901a\u8fc7\u8bc6\u522b CronHPA \u8981\u8fbe\u5230\u7684\u526f\u672c\u6570\u4e0e HPA \u7684\u914d\u7f6e\u95f4\u7684\u8f83\u5c0f\u503c\uff0c\u5224\u65ad\u662f\u5426\u9700\u8981\u4fee\u6539 HPA \u7684\u4e0b\u9650\u3002

                                                                          "},{"location":"end-user/kpanda/scale/install-cronhpa.html","title":"\u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6","text":"

                                                                          \u5bb9\u5668\u526f\u672c\u5b9a\u65f6\u6c34\u5e73\u6269\u7f29\u5bb9\u7b56\u7565\uff08CronHPA\uff09\u80fd\u591f\u4e3a\u5468\u671f\u6027\u9ad8\u5e76\u53d1\u5e94\u7528\u63d0\u4f9b\u7a33\u5b9a\u7684\u8ba1\u7b97\u8d44\u6e90\u4fdd\u969c\uff0c kubernetes-cronhpa-controller \u5219\u662f\u5b9e\u73b0 CronHPA \u7684\u5173\u952e\u7ec4\u4ef6\u3002

                                                                          \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6\u3002

                                                                          Note

                                                                          \u4e3a\u4e86\u4f7f\u7528 CornHPA\uff0c\u4e0d\u4ec5\u9700\u8981\u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6\uff0c\u8fd8\u8981\u5b89\u88c5 metrics-server \u63d2\u4ef6\u3002

                                                                          "},{"location":"end-user/kpanda/scale/install-cronhpa.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                                          \u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                                          • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                                          • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u3002

                                                                          • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                                          "},{"location":"end-user/kpanda/scale/install-cronhpa.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                                          \u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 kubernetes-cronhpa-controller \u63d2\u4ef6\u3002

                                                                          1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u627e\u5230\u9700\u8981\u5b89\u88c5\u6b64\u63d2\u4ef6\u7684\u76ee\u6807\u96c6\u7fa4\uff0c\u70b9\u51fb\u8be5\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u70b9\u51fb \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d \uff0c\u70b9\u51fb\u76ee\u6807\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u3002

                                                                          2. \u5728\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u5728 CronHPA \u53f3\u4fa7\u70b9\u51fb \u5b89\u88c5 \u3002

                                                                          3. \u9605\u8bfb\u8be5\u63d2\u4ef6\u7684\u76f8\u5173\u4ecb\u7ecd\uff0c\u9009\u62e9\u7248\u672c\u540e\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u63a8\u8350\u5b89\u88c5 1.3.0 \u6216\u66f4\u9ad8\u7248\u672c\u3002

                                                                          4. \u53c2\u8003\u4ee5\u4e0b\u8bf4\u660e\u914d\u7f6e\u53c2\u6570\u3002

                                                                            • \u540d\u79f0\uff1a\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 kubernetes-cronhpa-controller\u3002
                                                                            • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u63d2\u4ef6\u5b89\u88c5\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u6b64\u5904\u4ee5 default \u4e3a\u4f8b\u3002
                                                                            • \u7248\u672c\uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 1.3.0 \u7248\u672c\u4e3a\u4f8b\u3002
                                                                            • \u5c31\u7eea\u7b49\u5f85\uff1a\u542f\u7528\u540e\uff0c\u5c06\u7b49\u5f85\u5e94\u7528\u4e0b\u7684\u6240\u6709\u5173\u8054\u8d44\u6e90\u90fd\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002
                                                                            • \u5931\u8d25\u5220\u9664\uff1a\u5982\u679c\u63d2\u4ef6\u5b89\u88c5\u5931\u8d25\uff0c\u5219\u5220\u9664\u5df2\u7ecf\u5b89\u88c5\u7684\u5173\u8054\u8d44\u6e90\u3002\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u3002
                                                                            • \u8be6\u60c5\u65e5\u5fd7\uff1a\u5f00\u542f\u540e\uff0c\u5c06\u8bb0\u5f55\u5b89\u88c5\u8fc7\u7a0b\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002

                                                                            Note

                                                                            \u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u548c/\u6216 \u5931\u8d25\u5220\u9664 \u540e\uff0c\u5e94\u7528\u9700\u8981\u8f83\u957f\u65f6\u95f4\u624d\u4f1a\u88ab\u6807\u8bb0\u4e3a\u201c\u8fd0\u884c\u4e2d\u201d\u72b6\u6001\u3002

                                                                          5. \u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \uff0c\u7cfb\u7edf\u5c06\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u3002\u7a0d\u7b49\u51e0\u5206\u949f\u540e\u5237\u65b0\u9875\u9762\u4f5c\uff0c\u5373\u53ef\u770b\u5230\u521a\u521a\u5b89\u88c5\u7684\u5e94\u7528\u3002

                                                                            Warning

                                                                            \u5982\u9700\u5220\u9664 kubernetes-cronhpa-controller \u63d2\u4ef6\uff0c\u5e94\u5728 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u624d\u80fd\u5c06\u5176\u5f7b\u5e95\u5220\u9664\u3002

                                                                            \u5982\u679c\u5728\u5de5\u4f5c\u8d1f\u8f7d\u7684 \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\u4e0b\u5220\u9664\u63d2\u4ef6\uff0c\u8fd9\u53ea\u662f\u5220\u9664\u4e86\u8be5\u63d2\u4ef6\u7684\u5de5\u4f5c\u8d1f\u8f7d\u526f\u672c\uff0c\u63d2\u4ef6\u672c\u8eab\u4ecd\u672a\u5220\u9664\uff0c\u540e\u7eed\u91cd\u65b0\u5b89\u88c5\u8be5\u63d2\u4ef6\u65f6\u4e5f\u4f1a\u63d0\u793a\u9519\u8bef\u3002

                                                                          6. \u56de\u5230\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\u9762\u4e0b\u7684 \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u53ef\u4ee5\u770b\u5230\u754c\u9762\u663e\u793a \u63d2\u4ef6\u5df2\u5b89\u88c5 \u3002\u73b0\u5728\u53ef\u4ee5\u5f00\u59cb\u521b\u5efa CronHPA \u7b56\u7565\u4e86\u3002

                                                                          "},{"location":"end-user/kpanda/scale/install-metrics-server.html","title":"\u5b89\u88c5 metrics-server \u63d2\u4ef6","text":"

                                                                          metrics-server \u662f Kubernetes \u5185\u7f6e\u7684\u8d44\u6e90\u4f7f\u7528\u6307\u6807\u91c7\u96c6\u7ec4\u4ef6\u3002 \u60a8\u53ef\u4ee5\u901a\u8fc7\u914d\u7f6e\u5f39\u6027\u4f38\u7f29\uff08HPA\uff09\u7b56\u7565\u6765\u5b9e\u73b0\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u81ea\u52a8\u6c34\u5e73\u4f38\u7f29 Pod \u526f\u672c\u3002

                                                                          \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5 metrics-server \u3002

                                                                          "},{"location":"end-user/kpanda/scale/install-metrics-server.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                                          \u5b89\u88c5 metrics-server \u63d2\u4ef6\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                                          • \u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u5df2\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u5df2\u521b\u5efa Kubernetes \u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                                          • \u5df2\u5b8c\u6210\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u7684\u521b\u5efa\u3002

                                                                          • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                                          "},{"location":"end-user/kpanda/scale/install-metrics-server.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                                          \u8bf7\u6267\u884c\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 metrics-server \u63d2\u4ef6\u3002

                                                                          1. \u5728\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u4e0b\u7684\u5f39\u6027\u4f38\u7f29\u9875\u9762\uff0c\u70b9\u51fb \u53bb\u5b89\u88c5 \uff0c\u8fdb\u5165 metrics-server \u63d2\u4ef6\u5b89\u88c5\u754c\u9762\u3002

                                                                          2. \u9605\u8bfb metrics-server \u63d2\u4ef6\u76f8\u5173\u4ecb\u7ecd\uff0c\u9009\u62e9\u7248\u672c\u540e\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u672c\u6587\u5c06\u4ee5 3.8.2 \u7248\u672c\u4e3a\u4f8b\u8fdb\u884c\u5b89\u88c5\uff0c\u63a8\u8350\u60a8\u5b89\u88c5 3.8.2 \u53ca\u66f4\u9ad8\u7248\u672c\u3002

                                                                          3. \u5728\u5b89\u88c5\u914d\u7f6e\u754c\u9762\u914d\u7f6e\u57fa\u672c\u53c2\u6570\u3002

                                                                            • \u540d\u79f0\uff1a\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 metrics-server-01\u3002
                                                                            • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u63d2\u4ef6\u5b89\u88c5\u7684\u547d\u540d\u7a7a\u95f4\uff0c\u6b64\u5904\u4ee5 default \u4e3a\u4f8b\u3002
                                                                            • \u7248\u672c\uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 3.8.2 \u7248\u672c\u4e3a\u4f8b\u3002
                                                                            • \u5c31\u7eea\u7b49\u5f85\uff1a\u542f\u7528\u540e\uff0c\u5c06\u7b49\u5f85\u5e94\u7528\u4e0b\u6240\u6709\u5173\u8054\u8d44\u6e90\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002
                                                                            • \u5931\u8d25\u5220\u9664\uff1a\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f\u5c31\u7eea\u7b49\u5f85\u3002\u5982\u679c\u5b89\u88c5\u5931\u8d25\uff0c\u5c06\u5220\u9664\u5b89\u88c5\u76f8\u5173\u8d44\u6e90\u3002
                                                                            • \u8be6\u60c5\u65e5\u5fd7\uff1a\u5f00\u542f\u5b89\u88c5\u8fc7\u7a0b\u65e5\u5fd7\u7684\u8be6\u7ec6\u8f93\u51fa\u3002

                                                                            Note

                                                                            \u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u548c/\u6216 \u5931\u8d25\u5220\u9664 \u540e\uff0c\u5e94\u7528\u9700\u8981\u7ecf\u8fc7\u8f83\u957f\u65f6\u95f4\u624d\u4f1a\u88ab\u6807\u8bb0\u4e3a \u8fd0\u884c\u4e2d \u72b6\u6001\u3002

                                                                          4. \u9ad8\u7ea7\u53c2\u6570\u914d\u7f6e

                                                                            • \u5982\u679c\u96c6\u7fa4\u7f51\u7edc\u65e0\u6cd5\u8bbf\u95ee k8s.gcr.io \u4ed3\u5e93\uff0c\u8bf7\u5c1d\u8bd5\u4fee\u6539 repositort \u53c2\u6570\u4e3a repository: k8s.m.daocloud.io/metrics-server/metrics-server

                                                                            • \u5b89\u88c5 metrics-server \u63d2\u4ef6\u8fd8\u9700\u63d0\u4f9b SSL \u8bc1\u4e66\u3002\u5982\u9700\u7ed5\u8fc7\u8bc1\u4e66\u6821\u9a8c\uff0c\u9700\u8981\u5728 defaultArgs: \u5904\u6dfb\u52a0 - --kubelet-insecure-tls \u53c2\u6570\u3002

                                                                            \u70b9\u51fb\u67e5\u770b\u63a8\u8350\u7684 YAML \u53c2\u6570
                                                                            image:\n  repository: k8s.m.daocloud.io/metrics-server/metrics-server # \u5c06\u4ed3\u5e93\u6e90\u5730\u5740\u4fee\u6539\u4e3a k8s.m.daocloud.io\n  tag: ''\n  pullPolicy: IfNotPresent\nimagePullSecrets: []\nnameOverride: ''\nfullnameOverride: ''\nserviceAccount:\n  create: true\n  annotations: {}\n  name: ''\nrbac:\n  create: true\n  pspEnabled: false\napiService:\n  create: true\npodLabels: {}\npodAnnotations: {}\npodSecurityContext: {}\nsecurityContext:\n  allowPrivilegeEscalation: false\n  readOnlyRootFilesystem: true\n  runAsNonRoot: true\n  runAsUser: 1000\npriorityClassName: system-cluster-critical\ncontainerPort: 4443\nhostNetwork:\n  enabled: false\nreplicas: 1\nupdateStrategy: {}\npodDisruptionBudget:\n  enabled: false\n  minAvailable: null\n  maxUnavailable: null\ndefaultArgs:\n  - '--cert-dir=/tmp'\n  - '--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname'\n  - '--kubelet-use-node-status-port'\n  - '--metric-resolution=15s'\n  - --kubelet-insecure-tls # \u7ed5\u8fc7\u8bc1\u4e66\u6821\u9a8c\nargs: []\nlivenessProbe:\n  httpGet:\n    path: /livez\n    port: https\n    scheme: HTTPS\n  initialDelaySeconds: 0\n  periodSeconds: 10\n  failureThreshold: 3\nreadinessProbe:\n  httpGet:\n    path: /readyz\n    port: https\n    scheme: HTTPS\n  initialDelaySeconds: 20\n  periodSeconds: 10\n  failureThreshold: 3\nservice:\n  type: ClusterIP\n  port: 443\n  annotations: {}\n  labels: {}\nmetrics:\n  enabled: false\nserviceMonitor:\n  enabled: false\n  additionalLabels: {}\n  interval: 1m\n  scrapeTimeout: 10s\nresources: {}\nextraVolumeMounts: []\nextraVolumes: []\nnodeSelector: {}\ntolerations: []\naffinity: {}\n
                                                                          5. \u70b9\u51fb \u786e\u5b9a \u6309\u94ae\uff0c\u5b8c\u6210 metrics-server \u63d2\u4ef6\u7684\u5b89\u88c5\uff0c\u4e4b\u540e\u7cfb\u7edf\u5c06\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\uff0c \u7a0d\u7b49\u51e0\u5206\u949f\u540e\uff0c\u4e3a\u9875\u9762\u6267\u884c\u5237\u65b0\u64cd\u4f5c\uff0c\u5373\u53ef\u770b\u5230\u521a\u521a\u5b89\u88c5\u7684\u5e94\u7528\u3002

                                                                          Note

                                                                          \u5220\u9664 metrics-server \u63d2\u4ef6\u65f6\uff0c\u5728 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u624d\u80fd\u5f7b\u5e95\u5220\u9664\u8be5\u63d2\u4ef6\u3002\u5982\u679c\u4ec5\u5728\u5de5\u4f5c\u8d1f\u8f7d\u9875\u9762\u5220\u9664 metrics-server \uff0c \u8fd9\u53ea\u662f\u5220\u9664\u4e86\u8be5\u5e94\u7528\u7684\u5de5\u4f5c\u8d1f\u8f7d\u526f\u672c\uff0c\u5e94\u7528\u672c\u8eab\u4ecd\u672a\u5220\u9664\uff0c\u540e\u7eed\u91cd\u65b0\u5b89\u88c5\u8be5\u63d2\u4ef6\u65f6\u4e5f\u4f1a\u63d0\u793a\u9519\u8bef\u3002

                                                                          "},{"location":"end-user/kpanda/scale/install-vpa.html","title":"\u5b89\u88c5 vpa \u63d2\u4ef6","text":"

                                                                          \u5bb9\u5668\u5782\u76f4\u6269\u7f29\u5bb9\u7b56\u7565\uff08Vertical Pod Autoscaler, VPA\uff09\u80fd\u591f\u8ba9\u96c6\u7fa4\u7684\u8d44\u6e90\u914d\u7f6e\u66f4\u52a0\u5408\u7406\uff0c\u907f\u514d\u96c6\u7fa4\u8d44\u6e90\u6d6a\u8d39\u3002 vpa \u5219\u662f\u5b9e\u73b0\u5bb9\u5668\u5782\u76f4\u6269\u7f29\u5bb9\u7684\u5173\u952e\u7ec4\u4ef6\u3002

                                                                          \u672c\u8282\u4ecb\u7ecd\u5982\u4f55\u5b89\u88c5 vpa \u63d2\u4ef6\u3002

                                                                          \u4e3a\u4e86\u4f7f\u7528 VPA \u7b56\u7565\uff0c\u4e0d\u4ec5\u9700\u8981\u5b89\u88c5 __vpa__ \u63d2\u4ef6\uff0c\u8fd8\u8981[\u5b89\u88c5 __metrics-server__ \u63d2\u4ef6](install-metrics-server.md)\u3002\n
                                                                          "},{"location":"end-user/kpanda/scale/install-vpa.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                                          \u5b89\u88c5 vpa \u63d2\u4ef6\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                                          • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                                          • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u3002

                                                                          • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                                          "},{"location":"end-user/kpanda/scale/install-vpa.html#_2","title":"\u64cd\u4f5c\u6b65\u9aa4","text":"

                                                                          \u53c2\u8003\u5982\u4e0b\u6b65\u9aa4\u4e3a\u96c6\u7fa4\u5b89\u88c5 vpa \u63d2\u4ef6\u3002

                                                                          1. \u5728 \u96c6\u7fa4\u5217\u8868 \u9875\u9762\u627e\u5230\u9700\u8981\u5b89\u88c5\u6b64\u63d2\u4ef6\u7684\u76ee\u6807\u96c6\u7fa4\uff0c\u70b9\u51fb\u8be5\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u70b9\u51fb \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d \uff0c\u70b9\u51fb\u76ee\u6807\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u3002

                                                                          2. \u5728\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u5728 VPA \u53f3\u4fa7\u70b9\u51fb \u5b89\u88c5 \u3002

                                                                            3. \u9605\u8bfb\u8be5\u63d2\u4ef6\u7684\u76f8\u5173\u4ecb\u7ecd\uff0c\u9009\u62e9\u7248\u672c\u540e\u70b9\u51fb \u5b89\u88c5 \u6309\u94ae\u3002\u63a8\u8350\u5b89\u88c5 1.5.0 \u6216\u66f4\u9ad8\u7248\u672c\u3002

                                                                            4. \u67e5\u770b\u4ee5\u4e0b\u8bf4\u660e\u914d\u7f6e\u53c2\u6570\u3002

                                                                            - \u540d\u79f0\uff1a\u8f93\u5165\u63d2\u4ef6\u540d\u79f0\uff0c\u8bf7\u6ce8\u610f\u540d\u79f0\u6700\u957f 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 kubernetes-cronhpa-controller\u3002 - \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u63d2\u4ef6\u5b89\u88c5\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u6b64\u5904\u4ee5 default \u4e3a\u4f8b\u3002 - \u7248\u672c\uff1a\u63d2\u4ef6\u7684\u7248\u672c\uff0c\u6b64\u5904\u4ee5 4.5.0 \u7248\u672c\u4e3a\u4f8b\u3002 - \u5c31\u7eea\u7b49\u5f85\uff1a\u542f\u7528\u540e\uff0c\u5c06\u7b49\u5f85\u5e94\u7528\u4e0b\u7684\u6240\u6709\u5173\u8054\u8d44\u6e90\u90fd\u5904\u4e8e\u5c31\u7eea\u72b6\u6001\uff0c\u624d\u4f1a\u6807\u8bb0\u5e94\u7528\u5b89\u88c5\u6210\u529f\u3002 - \u5931\u8d25\u5220\u9664\uff1a\u5982\u679c\u63d2\u4ef6\u5b89\u88c5\u5931\u8d25\uff0c\u5219\u5220\u9664\u5df2\u7ecf\u5b89\u88c5\u7684\u5173\u8054\u8d44\u6e90\u3002\u5f00\u542f\u540e\uff0c\u5c06\u9ed8\u8ba4\u540c\u6b65\u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u3002 - \u8be6\u60c5\u65e5\u5fd7\uff1a\u5f00\u542f\u540e\uff0c\u5c06\u8bb0\u5f55\u5b89\u88c5\u8fc7\u7a0b\u7684\u8be6\u7ec6\u65e5\u5fd7\u3002

                                                                            Note

                                                                            \u5f00\u542f \u5c31\u7eea\u7b49\u5f85 \u548c/\u6216 \u5931\u8d25\u5220\u9664 \u540e\uff0c\u5e94\u7528\u9700\u8981\u7ecf\u8fc7\u8f83\u957f\u65f6\u95f4\u624d\u4f1a\u88ab\u6807\u8bb0\u4e3a\u201c\u8fd0\u884c\u4e2d\u201d\u72b6\u6001\u3002

                                                                          3. \u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \uff0c\u7cfb\u7edf\u5c06\u81ea\u52a8\u8df3\u8f6c\u81f3 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u3002\u7a0d\u7b49\u51e0\u5206\u949f\u540e\u5237\u65b0\u9875\u9762\u4f5c\uff0c\u5373\u53ef\u770b\u5230\u521a\u521a\u5b89\u88c5\u7684\u5e94\u7528\u3002

                                                                            Warning

                                                                            \u5982\u9700\u5220\u9664 vpa \u63d2\u4ef6\uff0c\u5e94\u5728 Helm \u5e94\u7528 \u5217\u8868\u9875\u9762\u624d\u80fd\u5c06\u5176\u5f7b\u5e95\u5220\u9664\u3002

                                                                            \u5982\u679c\u5728\u5de5\u4f5c\u8d1f\u8f7d\u7684 \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\u4e0b\u5220\u9664\u63d2\u4ef6\uff0c\u8fd9\u53ea\u662f\u5220\u9664\u4e86\u8be5\u63d2\u4ef6\u7684\u5de5\u4f5c\u8d1f\u8f7d\u526f\u672c\uff0c\u63d2\u4ef6\u672c\u8eab\u4ecd\u672a\u5220\u9664\uff0c\u540e\u7eed\u91cd\u65b0\u5b89\u88c5\u8be5\u63d2\u4ef6\u65f6\u4e5f\u4f1a\u63d0\u793a\u9519\u8bef\u3002

                                                                          4. \u56de\u5230\u5de5\u4f5c\u8d1f\u8f7d\u8be6\u60c5\u9875\u9762\u4e0b\u7684 \u5f39\u6027\u4f38\u7f29 \u9875\u7b7e\uff0c\u53ef\u4ee5\u770b\u5230\u754c\u9762\u663e\u793a \u63d2\u4ef6\u5df2\u5b89\u88c5 \u3002\u73b0\u5728\u53ef\u4ee5\u5f00\u59cb\u521b\u5efa VPA \u7b56\u7565\u4e86\u3002

                                                                          "},{"location":"end-user/kpanda/scale/knative/install.html","title":"\u5b89\u88c5","text":"

                                                                          Knative \u662f\u4e00\u4e2a\u9762\u5411\u65e0\u670d\u52a1\u5668\u90e8\u7f72\u7684\u8de8\u5e73\u53f0\u89e3\u51b3\u65b9\u6848\u3002

                                                                          "},{"location":"end-user/kpanda/scale/knative/install.html#_2","title":"\u6b65\u9aa4","text":"
                                                                          1. \u767b\u5f55\u96c6\u7fa4\uff0c\u70b9\u51fb\u4fa7\u8fb9\u680f Helm \u5e94\u7528 -> Helm \u6a21\u677f \uff0c\u5728\u53f3\u4fa7\u4e0a\u65b9\u641c\u7d22\u6846\u8f93\u5165 knative \uff0c\u7136\u540e\u6309\u56de\u8f66\u952e\u641c\u7d22\u3002

                                                                          2. \u70b9\u51fb\u641c\u7d22\u51fa\u7684 knative-operator \uff0c\u8fdb\u5165\u5b89\u88c5\u914d\u7f6e\u754c\u9762\u3002\u4f60\u53ef\u4ee5\u5728\u8be5\u754c\u9762\u67e5\u770b\u53ef\u7528\u7248\u672c\u4ee5\u53ca Helm values \u7684 Parameters \u53ef\u9009\u9879\u3002

                                                                          3. \u70b9\u51fb\u5b89\u88c5\u6309\u94ae\u540e\uff0c\u8fdb\u5165\u5b89\u88c5\u914d\u7f6e\u754c\u9762\u3002

                                                                          4. \u8f93\u5165\u540d\u79f0\uff0c\u5b89\u88c5\u79df\u6237\uff0c\u5efa\u8bae\u52fe\u9009 \u5c31\u7eea\u7b49\u5f85 \u548c \u8be6\u7ec6\u65e5\u5fd7 \u3002

                                                                          5. \u5728\u4e0b\u65b9\u8bbe\u7f6e\uff0c\u53ef\u4ee5\u52fe\u9009 Serving \uff0c\u5e76\u8f93\u5165 Knative Serving \u7ec4\u4ef6\u7684\u5b89\u88c5\u79df\u6237\uff0c\u4f1a\u5728\u5b89\u88c5\u540e\u90e8\u7f72 Knative Serving \u7ec4\u4ef6\uff0c\u8be5\u7ec4\u4ef6\u7531 Knative Operator \u7ba1\u7406\u3002

                                                                          "},{"location":"end-user/kpanda/scale/knative/knative.html","title":"Kantive \u4ecb\u7ecd","text":"

                                                                          Knative \u63d0\u4f9b\u4e86\u4e00\u79cd\u66f4\u9ad8\u5c42\u6b21\u7684\u62bd\u8c61\uff0c\u7b80\u5316\u5e76\u52a0\u901f\u4e86\u5728 Kubernetes \u4e0a\u6784\u5efa\u3001\u90e8\u7f72\u548c\u7ba1\u7406\u5e94\u7528\u7684\u8fc7\u7a0b\u3002\u5b83\u4f7f\u5f97\u5f00\u53d1\u4eba\u5458\u80fd\u591f\u66f4\u4e13\u6ce8\u4e8e\u4e1a\u52a1\u903b\u8f91\u7684\u5b9e\u73b0\uff0c\u800c\u5c06\u5927\u90e8\u5206\u57fa\u7840\u8bbe\u65bd\u548c\u8fd0\u7ef4\u5de5\u4f5c\u4ea4\u7ed9 Knative \u53bb\u5904\u7406\uff0c\u4ece\u800c\u663e\u8457\u63d0\u9ad8\u751f\u4ea7\u529b\u3002

                                                                          "},{"location":"end-user/kpanda/scale/knative/knative.html#_1","title":"\u7ec4\u4ef6","text":"

                                                                          knative-operator \u8fd0\u884c\u7ec4\u4ef6\u5982\u4e0b\u3002

                                                                          knative-operator   knative-operator-58f7d7db5c-7f6r5      1/1     Running     0     6m55s\nknative-operator   operator-webhook-667dc67bc-qvrv4       1/1     Running     0     6m55s\n

                                                                          knative-serving \u7ec4\u4ef6\u5982\u4e0b\u3002

                                                                          knative-serving        3scale-kourier-gateway-d69fbfbd-bd8d8   1/1     Running     0                 7m13s\nknative-serving        activator-7c6fddd698-wdlng              1/1     Running     0                 7m3s\nknative-serving        autoscaler-8f4b876bb-kd25p              1/1     Running     0                 7m17s\nknative-serving        autoscaler-hpa-5f7f74679c-vkc7p         1/1     Running     0                 7m15s\nknative-serving        controller-789c896c46-tfvsv             1/1     Running     0                 7m17s\nknative-serving        net-kourier-controller-7db578c889-7gd5l 1/1     Running     0                 7m14s\nknative-serving        webhook-5c88b94c5-78x7m                 1/1     Running     0                 7m1s\nknative-serving        storage-version-migration-serving-serving-1.12.2-t7zvd   0/1  Completed   0   7m15s\n
                                                                          \u7ec4\u4ef6 \u4f5c\u7528 Activator \u5bf9\u8bf7\u6c42\u6392\u961f\uff08\u5982\u679c\u4e00\u4e2a Knative Service \u5df2\u7ecf\u7f29\u51cf\u5230\u96f6\uff09\u3002\u8c03\u7528 autoscaler\uff0c\u5c06\u7f29\u51cf\u5230 0 \u7684\u670d\u52a1\u6062\u590d\u5e76\u8f6c\u53d1\u6392\u961f\u7684\u8bf7\u6c42\u3002Activator \u8fd8\u53ef\u4ee5\u5145\u5f53\u8bf7\u6c42\u7f13\u51b2\u5668\uff0c\u5904\u7406\u7a81\u53d1\u6d41\u91cf\u3002 Autoscaler Autoscaler \u8d1f\u8d23\u6839\u636e\u914d\u7f6e\u3001\u6307\u6807\u548c\u8fdb\u5165\u7684\u8bf7\u6c42\u6765\u7f29\u653e Knative \u670d\u52a1\u3002 Controller \u7ba1\u7406 Knative CR \u7684\u72b6\u6001\u3002\u5b83\u4f1a\u76d1\u89c6\u591a\u4e2a\u5bf9\u8c61\uff0c\u7ba1\u7406\u4f9d\u8d56\u8d44\u6e90\u7684\u751f\u547d\u5468\u671f\uff0c\u5e76\u66f4\u65b0\u8d44\u6e90\u72b6\u6001\u3002 Queue-Proxy Sidecar \u5bb9\u5668\uff0c\u6bcf\u4e2a Knative Service \u90fd\u4f1a\u6ce8\u5165\u4e00\u4e2a\u3002\u8d1f\u8d23\u6536\u96c6\u6d41\u91cf\u6570\u636e\u5e76\u62a5\u544a\u7ed9 Autoscaler\uff0cAutoscaler \u6839\u636e\u8fd9\u4e9b\u6570\u636e\u548c\u9884\u8bbe\u7684\u89c4\u5219\u6765\u53d1\u8d77\u6269\u5bb9\u6216\u7f29\u5bb9\u8bf7\u6c42\u3002 Webhooks Knative Serving \u6709\u51e0\u4e2a Webhooks \u8d1f\u8d23\u9a8c\u8bc1\u548c\u53d8\u66f4 Knative \u8d44\u6e90\u3002"},{"location":"end-user/kpanda/scale/knative/knative.html#ingress","title":"Ingress \u6d41\u91cf\u5165\u53e3\u65b9\u6848","text":"\u65b9\u6848 \u9002\u7528\u573a\u666f Istio \u5982\u679c\u5df2\u7ecf\u7528\u4e86 Istio\uff0c\u53ef\u4ee5\u9009\u62e9 Istio \u4f5c\u4e3a\u6d41\u91cf\u5165\u53e3\u65b9\u6848\u3002 Contour \u5982\u679c\u96c6\u7fa4\u4e2d\u5df2\u7ecf\u542f\u7528\u4e86 Contour\uff0c\u53ef\u4ee5\u9009\u62e9 Contour \u4f5c\u4e3a\u6d41\u91cf\u5165\u53e3\u65b9\u6848\u3002 Kourier \u5982\u679c\u5728\u6ca1\u6709\u4e0a\u8ff0 2 \u79cd Ingress \u7ec4\u4ef6\u65f6\uff0c\u53ef\u4ee5\u4f7f\u7528 Knative \u57fa\u4e8e Envoy \u5b9e\u73b0\u7684 Kourier Ingress \u4f5c\u4e3a\u6d41\u91cf\u5165\u53e3\u3002"},{"location":"end-user/kpanda/scale/knative/knative.html#autoscaler","title":"Autoscaler \u65b9\u6848\u5bf9\u6bd4","text":"Autoscaler \u7c7b\u578b \u662f\u5426\u4e3a Knative Serving \u6838\u5fc3\u90e8\u5206 \u9ed8\u8ba4\u542f\u7528 Scale to Zero \u652f\u6301 \u57fa\u4e8e CPU \u7684 Autoscaling \u652f\u6301 Knative Pod Autoscaler (KPA) \u662f \u662f \u662f \u5426 Horizontal Pod Autoscaler (HPA) \u5426 \u9700\u5b89\u88c5 Knative Serving \u540e\u542f\u7528 \u5426 \u662f"},{"location":"end-user/kpanda/scale/knative/knative.html#crd","title":"CRD","text":"\u8d44\u6e90\u7c7b\u578b API \u540d\u79f0 \u63cf\u8ff0 Services service.serving.knative.dev \u81ea\u52a8\u7ba1\u7406 Workload \u7684\u6574\u4e2a\u751f\u547d\u5468\u671f\uff0c\u63a7\u5236\u5176\u4ed6\u5bf9\u8c61\u7684\u521b\u5efa\uff0c\u786e\u4fdd\u5e94\u7528\u5177\u6709 Routes\u3001Configurations \u4ee5\u53ca\u6bcf\u6b21\u66f4\u65b0\u65f6\u7684\u65b0 revision\u3002 Routes route.serving.knative.dev \u5c06\u7f51\u7edc\u7aef\u70b9\u6620\u5c04\u5230\u4e00\u4e2a\u6216\u591a\u4e2a\u4fee\u8ba2\u7248\u672c\uff0c\u652f\u6301\u6d41\u91cf\u5206\u914d\u548c\u7248\u672c\u8def\u7531\u3002 Configurations configuration.serving.knative.dev \u7ef4\u62a4\u90e8\u7f72\u7684\u671f\u671b\u72b6\u6001\uff0c\u63d0\u4f9b\u4ee3\u7801\u548c\u914d\u7f6e\u4e4b\u95f4\u7684\u5206\u79bb\uff0c\u9075\u5faa Twelve-Factor \u5e94\u7528\u7a0b\u5e8f\u65b9\u6cd5\u8bba\uff0c\u4fee\u6539\u914d\u7f6e\u4f1a\u521b\u5efa\u65b0\u7684 revision\u3002 Revisions revision.serving.knative.dev \u6bcf\u6b21\u5bf9\u5de5\u4f5c\u8d1f\u8f7d\u4fee\u6539\u7684\u65f6\u95f4\u70b9\u5feb\u7167\uff0c\u662f\u4e0d\u53ef\u53d8\u5bf9\u8c61\uff0c\u53ef\u6839\u636e\u6d41\u91cf\u81ea\u52a8\u6269\u5bb9\u548c\u7f29\u5bb9\u3002"},{"location":"end-user/kpanda/scale/knative/playground.html","title":"Knative \u4f7f\u7528\u5b9e\u8df5","text":"

                                                                          \u5728\u672c\u8282\u4e2d\uff0c\u6211\u4eec\u5c06\u901a\u8fc7\u51e0\u4e2a\u5b9e\u8df5\u6765\u6df1\u5165\u4e86\u89e3\u5b66\u4e60 Knative\u3002

                                                                          "},{"location":"end-user/kpanda/scale/knative/playground.html#case-1-hello-world","title":"case 1 - Hello World","text":"
                                                                          apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n

                                                                          \u53ef\u4ee5\u4f7f\u7528 kubectl \u5df2\u90e8\u7f72\u7684\u5e94\u7528\u7684\u72b6\u6001\uff0c\u8fd9\u4e2a\u5e94\u7528\u7531 knative \u81ea\u52a8\u914d\u7f6e\u4e86 ingress \u548c\u4f38\u7f29\u5668\u3002

                                                                          ~ kubectl get service.serving.knative.dev/hello\nNAME    URL                                              LATESTCREATED   LATESTREADY   READY   REASON\nhello   http://hello.knative-serving.knative.loulan.me   hello-00001     hello-00001   True\n

                                                                          \u90e8\u7f72\u51fa\u7684 Pod YAML \u5982\u4e0b\uff0c\u7531 2 \u4e2a Pod \u7ec4\u6210\uff1auser-container \u548c queue-proxy\u3002

                                                                          apiVersion: v1\nkind: Pod\nmetadata:\n  name: hello-00003-deployment-5fcb8ccbf-7qjfk\nspec:\n  containers:\n  - name: user-container\n  - name: queue-proxy\n

                                                                          \u8bf7\u6c42\u6d41\uff1a

                                                                          1. case1 \u5728\u4f4e\u6d41\u91cf\u6216\u96f6\u6d41\u91cf\u65f6\uff0c\u6d41\u91cf\u5c06\u8def\u7531\u5230 activator
                                                                          2. case2 \u6d41\u91cf\u5927\u65f6\uff0c\u6d41\u91cf\u5927\u4e8e target-burst-capacity \u65f6\u624d\u76f4\u63a5\u8def\u7531\u5230 Pod
                                                                            1. \u914d\u7f6e\u4e3a 0\uff0c\u53ea\u6709\u4ece 0 \u6269\u5bb9\u5b58\u5728
                                                                            2. \u914d\u7f6e\u4e3a -1\uff0cactivator \u4f1a\u4e00\u76f4\u5b58\u5728\u8bf7\u6c42\u8def\u5f84
                                                                            3. \u914d\u7f6e\u4e3a >0\uff0c\u89e6\u53d1\u6269\u7f29\u5bb9\u4e4b\u524d\uff0c\u7cfb\u7edf\u80fd\u591f\u989d\u5916\u5904\u7406\u7684\u5e76\u53d1\u8bf7\u6c42\u6570\u91cf\u3002
                                                                          3. case3 \u6d41\u91cf\u518d\u53d8\u5c0f\u65f6\uff0c\u6d41\u91cf\u4f4e\u4e8e current_demand + target-burst-capacity > (pods * concurrency-target) \u65f6\u5c06\u518d\u6b21\u8def\u7531\u5230 activator

                                                                            \u5f85\u5904\u7406\u7684\u8bf7\u6c42\u603b\u6570 + \u80fd\u63a5\u53d7\u7684\u8d85\u8fc7\u76ee\u6807\u5e76\u53d1\u6570\u7684\u8bf7\u6c42\u6570\u91cf > \u6bcf\u4e2a Pod \u7684\u76ee\u6807\u5e76\u53d1\u6570 * Pod \u6570\u91cf

                                                                          "},{"location":"end-user/kpanda/scale/knative/playground.html#case-2-","title":"case 2 - \u57fa\u4e8e\u5e76\u53d1\u5f39\u6027\u4f38\u7f29","text":"

                                                                          \u6211\u4eec\u9996\u5148\u5728\u96c6\u7fa4\u5e94\u7528\u4e0b\u9762 YAML \u5b9a\u4e49\u3002

                                                                          apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"1\"\n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"\n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n

                                                                          \u6267\u884c\u4e0b\u9762\u547d\u4ee4\u6d4b\u8bd5\uff0c\u5e76\u53ef\u4ee5\u901a\u8fc7 kubectl get pods -A -w \u6765\u89c2\u5bdf\u6269\u5bb9\u7684 Pod\u3002

                                                                          wrk -t2 -c4 -d6s http://hello.knative-serving.knative.daocloud.io/\n
                                                                          "},{"location":"end-user/kpanda/scale/knative/playground.html#case-3-","title":"case 3 - \u57fa\u4e8e\u5e76\u53d1\u5f39\u6027\u4f38\u7f29\uff0c\u8fbe\u5230\u7279\u5b9a\u6bd4\u4f8b\u63d0\u524d\u6269\u5bb9","text":"

                                                                          \u6211\u4eec\u53ef\u4ee5\u5f88\u8f7b\u677e\u7684\u5b9e\u73b0\uff0c\u4f8b\u5982\u9650\u5236\u6bcf\u4e2a\u5bb9\u5668\u5e76\u53d1\u4e3a 10\uff0c\u53ef\u4ee5\u901a\u8fc7 autoscaling.knative.dev/target-utilization-percentage: 70 \u6765\u5b9e\u73b0\uff0c\u8fbe\u5230 70% \u5c31\u5f00\u59cb\u6269\u5bb9 Pod\u3002

                                                                          apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"10\"\n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"\n \u00a0 \u00a0 \u00a0 \u00a0autoscaling.knative.dev/target-utilization-percentage: \"70\" \n \u00a0 \u00a0 \u00a0 \u00a0autoscaling.knative.dev/metric: \"concurrency\"\n \u00a0 \u00a0 spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n
                                                                          "},{"location":"end-user/kpanda/scale/knative/playground.html#case-4-","title":"case 4 - \u7070\u5ea6\u53d1\u5e03/\u6d41\u91cf\u767e\u5206\u6bd4","text":"

                                                                          \u6211\u4eec\u53ef\u4ee5\u901a\u8fc7 spec.traffic \u5b9e\u73b0\u5230\u6bcf\u4e2a\u7248\u672c\u6d41\u91cf\u7684\u63a7\u5236\u3002

                                                                          apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"1\"  \n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"         \n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n  traffic:\n  - latestRevision: true\n    percent: 50\n  - latestRevision: false\n    percent: 50\n    revisionName: hello-00001\n
                                                                          "},{"location":"end-user/kpanda/scale/knative/scene.html","title":"\u4f7f\u7528\u573a\u666f","text":""},{"location":"end-user/kpanda/scale/knative/scene.html#_2","title":"\u9002\u5408\u7684\u573a\u666f","text":"
                                                                          • \u77ed\u8fde\u63a5\u9ad8\u5e76\u53d1\u4e1a\u52a1
                                                                          • \u9700\u8981\u5f39\u6027\u4f38\u7f29\u7684\u4e1a\u52a1
                                                                          • \u5927\u91cf\u5e94\u7528\u9700\u8981\u7f29\u5bb9\u5230 0 \u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u7387
                                                                          • AI Serving \u670d\u52a1\uff0c\u57fa\u4e8e\u7279\u5b9a\u6307\u6807\u8fdb\u884c\u6269\u5bb9

                                                                          Tip

                                                                          \u77ed\u8fde\u63a5\u9ad8\u5e76\u53d1\u4e1a\u52a1\u4ee5\u53ca\u9700\u8981\u5f39\u6027\u4f38\u7f29\u7684\u4e1a\u52a1\uff0c\u63a8\u8350\u4f7f\u7528 HPA \u548c VPA \u80fd\u529b\u3002

                                                                          "},{"location":"end-user/kpanda/scale/knative/scene.html#_3","title":"\u4e0d\u9002\u5408\u7684\u573a\u666f","text":"
                                                                          • \u957f\u8fde\u63a5\u4e1a\u52a1
                                                                          • \u5ef6\u65f6\u654f\u611f\u4e1a\u52a1
                                                                          • \u57fa\u4e8e cookie \u7684\u6d41\u91cf\u5206\u6d41
                                                                          • \u57fa\u4e8e header \u7684\u6d41\u91cf\u5206\u6d41
                                                                          "},{"location":"end-user/kpanda/security/index.html","title":"\u5b89\u5168\u626b\u63cf\u7c7b\u578b","text":"

                                                                          \u5728Kubernetes\uff08\u7b80\u79f0K8s\uff09\u73af\u5883\u4e2d\uff0c\u5b89\u5168\u626b\u63cf\u662f\u786e\u4fdd\u96c6\u7fa4\u5b89\u5168\u6027\u7684\u5173\u952e\u63aa\u65bd\u4e4b\u4e00\u3002\u5176\u4e2d\uff0c\u5408\u89c4\u6027\u626b\u63cf\uff08\u57fa\u4e8eCIS Benchmark\uff09\u3001\u6743\u9650\u626b\u63cf\uff08\u57fa\u4e8ekube-audit\u5ba1\u8ba1\u529f\u80fd\uff09\u3001\u6f0f\u6d1e\u626b\u63cf\uff08\u57fa\u4e8e kube-hunter\uff09\u662f\u4e09\u79cd\u5e38\u89c1\u4e14\u91cd\u8981\u7684\u5b89\u5168\u626b\u63cf\u624b\u6bb5\uff1a

                                                                          • \u5408\u89c4\u6027\u626b\u63cf\uff1a\u57fa\u4e8e CIS Benchmark \u5bf9\u96c6\u7fa4\u8282\u70b9\u8fdb\u884c\u5b89\u5168\u626b\u63cf\u3002CIS Benchmark \u662f\u4e00\u5957\u5168\u7403\u516c\u8ba4\u7684\u6700\u4f73\u5b9e\u8df5\u6807\u51c6\uff0c\u4e3a Kubernetes \u96c6\u7fa4\u63d0\u4f9b\u4e86\u8be6\u7ec6\u7684\u5b89\u5168\u914d\u7f6e\u6307\u5357\u548c\u81ea\u52a8\u5316\u68c0\u67e5\u5de5\u5177\uff08\u5982Kube-Bench\uff09\uff0c\u5e2e\u52a9\u7ec4\u7ec7\u786e\u4fdd\u5176K8s\u96c6\u7fa4\u7b26\u5408\u5b89\u5168\u57fa\u7ebf\u8981\u6c42\uff0c\u4fdd\u62a4\u7cfb\u7edf\u548c\u6570\u636e\u514d\u53d7\u5a01\u80c1\u3002

                                                                          • \u6743\u9650\u626b\u63cf\uff1a\u57fa\u4e8ekube-audit\u5ba1\u8ba1\u529f\u80fd\u3002\u6743\u9650\u626b\u63cf\u4e3b\u8981\u89e3\u51b3\u96c6\u7fa4\u8bbf\u95ee\u63a7\u5236\u548c\u64cd\u4f5c\u900f\u660e\u5ea6\u7684\u95ee\u9898\u3002\u901a\u8fc7\u5ba1\u8ba1\u65e5\u5fd7\uff0c\u96c6\u7fa4\u7ba1\u7406\u5458\u80fd\u591f\u8ffd\u6eaf\u96c6\u7fa4\u8d44\u6e90\u7684\u8bbf\u95ee\u5386\u53f2\uff0c\u8bc6\u522b\u5f02\u5e38\u884c\u4e3a\uff0c\u5982\u672a\u7ecf\u6388\u6743\u7684\u8bbf\u95ee\u3001\u654f\u611f\u6570\u636e\u7684\u6cc4\u9732\u3001\u6709\u5b89\u5168\u6f0f\u6d1e\u7684\u64cd\u4f5c\u8bb0\u5f55\u7b49\u3002\u8fd9\u5bf9\u4e8e\u6545\u969c\u6392\u67e5\u3001\u5b89\u5168\u4e8b\u4ef6\u54cd\u5e94\u4ee5\u53ca\u6ee1\u8db3\u5408\u89c4\u6027\u8981\u6c42\u81f3\u5173\u91cd\u8981\u3002\u6b64\u5916\uff0c\u6743\u9650\u626b\u63cf\u8fd8\u53ef\u4ee5\u5e2e\u52a9\u7ec4\u7ec7\u53d1\u73b0\u6f5c\u5728\u7684\u6743\u9650\u6ee5\u7528\u95ee\u9898\uff0c\u53ca\u65f6\u91c7\u53d6\u63aa\u65bd\u9632\u6b62\u5b89\u5168\u4e8b\u4ef6\u7684\u53d1\u751f\u3002

                                                                          • \u6f0f\u6d1e\u626b\u63cf\uff1a\u57fa\u4e8e kube-hunter\uff0c\u4e3b\u8981\u89e3\u51b3 Kubernetes \u96c6\u7fa4\u4e2d\u5b58\u5728\u7684\u5df2\u77e5\u6f0f\u6d1e\u548c\u914d\u7f6e\u9519\u8bef\u95ee\u9898\u3002kube-hunter \u901a\u8fc7\u6a21\u62df\u653b\u51fb\u884c\u4e3a\uff0c\u80fd\u591f\u8bc6\u522b\u96c6\u7fa4\u4e2d\u53ef\u88ab\u6076\u610f\u5229\u7528\u7684\u6f0f\u6d1e\uff0c\u5982\u672a\u6388\u6743\u8bbf\u95ee\u3001\u66b4\u9732\u7684\u670d\u52a1\u548cAPI\u7aef\u70b9\u3001\u914d\u7f6e\u9519\u8bef\u7684\u89d2\u8272\u548c\u7ed1\u5b9a\u7b56\u7565\u7b49\u3002\u7279\u522b\u5730\uff0ckube-hunter\u80fd\u591f\u8bc6\u522b\u5e76\u62a5\u544a CVE \u6f0f\u6d1e\uff0c\u8fd9\u4e9b\u6f0f\u6d1e\u5982\u679c\u88ab\u6076\u610f\u5229\u7528\uff0c\u53ef\u80fd\u5bfc\u81f4\u6570\u636e\u6cc4\u9732\u3001\u670d\u52a1\u4e2d\u65ad\u7b49\u4e25\u91cd\u540e\u679c\u3002CVE \u6f0f\u6d1e\u662f\u7531\u56fd\u9645\u77e5\u540d\u7684\u5b89\u5168\u7ec4\u7ec7\u5982MITRE\u6240\u5b9a\u4e49\u548c\u7ef4\u62a4\u7684\uff0cCVE\u6570\u636e\u5e93\u4e3a\u8f6f\u4ef6\u548c\u56fa\u4ef6\u4e2d\u7684\u5df2\u77e5\u6f0f\u6d1e\u63d0\u4f9b\u4e86\u552f\u4e00\u6807\u8bc6\u7b26\uff0c\u6210\u4e3a\u5168\u7403\u5b89\u5168\u793e\u533a\u5171\u540c\u9075\u5faa\u7684\u6807\u51c6\u3002kube-hunter \u901a\u8fc7\u5229\u7528 CVE \u6570\u636e\u5e93\u4e2d\u7684\u4fe1\u606f\uff0c\u80fd\u591f\u5e2e\u52a9\u7528\u6237\u5feb\u901f\u8bc6\u522b\u5e76\u54cd\u5e94Kubernetes\u96c6\u7fa4\u4e2d\u7684\u5b89\u5168\u5a01\u80c1\u3002

                                                                          "},{"location":"end-user/kpanda/security/index.html#_2","title":"\u5408\u89c4\u6027\u626b\u63cf","text":"

                                                                          \u5408\u89c4\u6027\u626b\u63cf\u7684\u5bf9\u8c61\u662f\u96c6\u7fa4\u8282\u70b9\u3002\u626b\u63cf\u7ed3\u679c\u4e2d\u4f1a\u5217\u51fa\u626b\u63cf\u9879\u4ee5\u53ca\u626b\u63cf\u7ed3\u679c\uff0c\u5e76\u9488\u5bf9\u672a\u901a\u8fc7\u7684\u626b\u63cf\u9879\u7ed9\u51fa\u4fee\u590d\u5efa\u8bae\u3002\u6709\u5173\u626b\u63cf\u65f6\u7528\u5230\u7684\u5177\u4f53\u5b89\u5168\u89c4\u5219\uff0c\u53ef\u53c2\u8003 CIS Kubernetes Benchmark

                                                                          \u68c0\u67e5\u4e0d\u540c\u7c7b\u578b\u7684\u8282\u70b9\u65f6\uff0c\u626b\u63cf\u7684\u4fa7\u91cd\u70b9\u6709\u6240\u4e0d\u540c\u3002

                                                                          • \u626b\u63cf\u63a7\u5236\u5e73\u9762\u8282\u70b9\uff08Controller\uff09

                                                                            • \u5173\u6ce8 API Server \u3001 controller-manager \u3001 scheduler \u3001 kubelet \u7b49\u7cfb\u7edf\u7ec4\u4ef6\u7684\u5b89\u5168\u6027
                                                                            • \u68c0\u67e5 Etcd \u6570\u636e\u5e93\u7684\u5b89\u5168\u914d\u7f6e
                                                                            • \u68c0\u67e5\u96c6\u7fa4\u8eab\u4efd\u9a8c\u8bc1\u673a\u5236\u3001\u6388\u6743\u7b56\u7565\u548c\u7f51\u7edc\u5b89\u5168\u914d\u7f6e\u662f\u5426\u7b26\u5408\u5b89\u5168\u6807\u51c6
                                                                          • \u626b\u63cf\u5de5\u4f5c\u8282\u70b9\uff08Worker\uff09

                                                                            • \u68c0\u67e5 kubelet\u3001Docker\u7b49\u5bb9\u5668\u8fd0\u884c\u65f6\u7684\u914d\u7f6e\u5426\u7b26\u5408\u5b89\u5168\u6807\u51c6
                                                                            • \u68c0\u67e5\u5bb9\u5668\u955c\u50cf\u662f\u5426\u7ecf\u8fc7\u4fe1\u4efb\u9a8c\u8bc1
                                                                            • \u68c0\u67e5\u8282\u70b9\u7684\u7f51\u7edc\u5b89\u5168\u914d\u7f6e\u5426\u7b26\u5408\u5b89\u5168\u6807\u51c6

                                                                          Tip

                                                                          \u4f7f\u7528\u5408\u89c4\u6027\u626b\u63cf\u65f6\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u914d\u7f6e\uff0c\u7136\u540e\u57fa\u4e8e\u8be5\u914d\u7f6e\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002\u6267\u884c\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u626b\u63cf\u62a5\u544a\u3002

                                                                          "},{"location":"end-user/kpanda/security/index.html#_3","title":"\u6743\u9650\u626b\u63cf","text":"

                                                                          \u6743\u9650\u626b\u63cf\u4fa7\u91cd\u4e8e\u6743\u9650\u95ee\u9898\u5f15\u53d1\u7684\u5b89\u5168\u6f0f\u6d1e\u3002\u6743\u9650\u626b\u63cf\u53ef\u4ee5\u5e2e\u52a9\u7528\u6237\u8bc6\u522b Kubernetes \u96c6\u7fa4\u4e2d\u7684\u5b89\u5168\u5a01\u80c1\uff0c\u6807\u8bc6\u54ea\u4e9b\u8d44\u6e90\u9700\u8981\u8fdb\u884c\u8fdb\u4e00\u6b65\u7684\u5ba1\u67e5\u548c\u4fdd\u62a4\u63aa\u65bd\u3002\u901a\u8fc7\u6267\u884c\u8fd9\u4e9b\u68c0\u67e5\u9879\uff0c\u7528\u6237\u53ef\u4ee5\u66f4\u6e05\u695a\u3001\u66f4\u5168\u9762\u5730\u4e86\u89e3\u81ea\u5df1\u7684 Kubernetes \u73af\u5883\uff0c\u786e\u4fdd\u96c6\u7fa4\u73af\u5883\u7b26\u5408 Kubernetes \u7684\u6700\u4f73\u5b9e\u8df5\u548c\u5b89\u5168\u6807\u51c6\u3002

                                                                          \u5177\u4f53\u800c\u8a00\uff0c\u6743\u9650\u626b\u63cf\u652f\u6301\u4ee5\u4e0b\u64cd\u4f5c\uff1a

                                                                          • \u626b\u63cf\u96c6\u7fa4\u4e2d\u7684\u6240\u6709\u8282\u70b9\u7684\u5065\u5eb7\u72b6\u6001\u3002

                                                                          • \u626b\u63cf\u96c6\u7fa4\u7ec4\u4ef6\u7684\u8fd0\u884c\u72b6\u51b5\uff0c\u5982 kube-apiserver \u3001 kube-controller-manager \u3001 kube-scheduler \u7b49\u3002

                                                                          • \u626b\u63cf\u5b89\u5168\u914d\u7f6e\uff1a\u68c0\u67e5 Kubernetes \u7684\u5b89\u5168\u914d\u7f6e

                                                                            • API \u5b89\u5168\uff1a\u542f\u7528\u4e86\u4e0d\u5b89\u5168\u7684 API \u7248\u672c\uff0c\u662f\u5426\u8bbe\u7f6e\u4e86\u9002\u5f53\u7684 RBAC \u89d2\u8272\u548c\u6743\u9650\u9650\u5236\u7b49
                                                                            • \u5bb9\u5668\u5b89\u5168\uff1a\u662f\u5426\u4f7f\u7528\u4e86\u4e0d\u5b89\u5168\u7684 Image\u3001\u662f\u5426\u5f00\u653e\u4e86\u7279\u6743\u6a21\u5f0f\uff0c\u662f\u5426\u8bbe\u7f6e\u4e86\u5408\u9002\u7684\u5b89\u5168\u4e0a\u4e0b\u6587\u7b49
                                                                            • \u7f51\u7edc\u5b89\u5168\uff1a\u662f\u5426\u542f\u7528\u4e86\u5408\u9002\u7684\u7f51\u7edc\u7b56\u7565\u6765\u9650\u5236\u6d41\u91cf\uff0c\u662f\u5426\u4f7f\u7528\u4e86 TLS \u52a0\u5bc6\u7b49
                                                                            • \u5b58\u50a8\u5b89\u5168\uff1a\u662f\u5426\u542f\u7528\u4e86\u9002\u5f53\u7684\u52a0\u5bc6\u3001\u8bbf\u95ee\u63a7\u5236\u7b49\u3002
                                                                            • \u5e94\u7528\u7a0b\u5e8f\u5b89\u5168\uff1a\u662f\u5426\u8bbe\u7f6e\u4e86\u5fc5\u8981\u7684\u5b89\u5168\u63aa\u65bd\uff0c\u4f8b\u5982\u5bc6\u7801\u7ba1\u7406\u3001\u8de8\u7ad9\u811a\u672c\u653b\u51fb\u9632\u5fa1\u7b49\u3002
                                                                          • \u63d0\u4f9b\u8b66\u544a\u548c\u5efa\u8bae\uff1a\u5efa\u8bae\u96c6\u7fa4\u7ba1\u7406\u5458\u6267\u884c\u7684\u5b89\u5168\u6700\u4f73\u5b9e\u8df5\uff0c\u4f8b\u5982\u5b9a\u671f\u8f6e\u6362\u8bc1\u4e66\u3001\u4f7f\u7528\u5f3a\u5bc6\u7801\u3001\u9650\u5236\u7f51\u7edc\u8bbf\u95ee\u7b49\u3002

                                                                          Tip

                                                                          \u4f7f\u7528\u5408\u89c4\u6027\u626b\u63cf\u65f6\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002\u6267\u884c\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u626b\u63cf\u62a5\u544a\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5b89\u5168\u626b\u63cf\u3002

                                                                          "},{"location":"end-user/kpanda/security/index.html#_4","title":"\u6f0f\u6d1e\u626b\u63cf","text":"

                                                                          \u6f0f\u6d1e\u626b\u63cf\u4fa7\u91cd\u4e8e\u626b\u63cf\u6f5c\u5728\u7684\u6076\u610f\u653b\u51fb\u548c\u5b89\u5168\u6f0f\u6d1e\uff0c\u4f8b\u5982\u8fdc\u7a0b\u4ee3\u7801\u6267\u884c\u3001SQL \u6ce8\u5165\u3001XSS \u653b\u51fb\u7b49\uff0c\u4ee5\u53ca\u4e00\u4e9b\u9488\u5bf9 Kubernetes \u7279\u5b9a\u7684\u653b\u51fb\u3002\u6700\u7ec8\u7684\u626b\u63cf\u62a5\u544a\u4f1a\u5217\u51fa\u96c6\u7fa4\u4e2d\u5b58\u5728\u7684\u5b89\u5168\u6f0f\u6d1e\uff0c\u5e76\u63d0\u51fa\u4fee\u590d\u5efa\u8bae\u3002

                                                                          Tip

                                                                          \u4f7f\u7528\u5408\u89c4\u6027\u626b\u63cf\u65f6\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002\u6267\u884c\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u67e5\u770b\u626b\u63cf\u62a5\u544a\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u6f0f\u6d1e\u626b\u63cf\u3002

                                                                          "},{"location":"end-user/kpanda/security/audit.html","title":"\u6743\u9650\u626b\u63cf","text":"

                                                                          \u4e3a\u4e86\u4f7f\u7528\u6743\u9650\u626b\u63cf\u529f\u80fd\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u7b56\u7565\uff0c\u6267\u884c\u8be5\u7b56\u7565\u4e4b\u540e\u4f1a\u81ea\u52a8\u751f\u6210\u626b\u63cf\u62a5\u544a\u4ee5\u4f9b\u67e5\u770b\u3002

                                                                          "},{"location":"end-user/kpanda/security/audit.html#_2","title":"\u521b\u5efa\u626b\u63cf\u7b56\u7565","text":"
                                                                          1. \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684\u9996\u9875\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5b89\u5168\u7ba1\u7406 \u3002

                                                                          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u6743\u9650\u626b\u63cf \uff0c\u70b9\u51fb \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\uff0c\u5728\u53f3\u4fa7\u70b9\u51fb \u521b\u5efa\u626b\u63cf\u7b56\u7565 \u3002

                                                                          3. \u53c2\u8003\u4e0b\u5217\u8bf4\u660e\u586b\u5199\u914d\u7f6e\uff0c\u6700\u540e\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                                                                            • \u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u626b\u63cf\u54ea\u4e2a\u96c6\u7fa4\u3002\u53ef\u9009\u7684\u96c6\u7fa4\u5217\u8868\u6765\u81ea\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u7684\u96c6\u7fa4\u3002\u5982\u679c\u6ca1\u6709\u60f3\u9009\u7684\u96c6\u7fa4\uff0c\u53ef\u4ee5\u53bb\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4\u3002
                                                                            • \u626b\u63cf\u7c7b\u578b\uff1a

                                                                              • \u7acb\u5373\u626b\u63cf\uff1a\u5728\u626b\u63cf\u7b56\u7565\u521b\u5efa\u597d\u4e4b\u540e\u7acb\u5373\u6267\u884c\u4e00\u6b21\u626b\u63cf\uff0c\u540e\u7eed\u4e0d\u53ef\u4ee5\u81ea\u52a8/\u624b\u52a8\u518d\u6b21\u6267\u884c\u626b\u63cf\u3002
                                                                              • \u5b9a\u65f6\u626b\u63cf\uff1a\u901a\u8fc7\u8bbe\u7f6e\u626b\u63cf\u5468\u671f\uff0c\u81ea\u52a8\u6309\u65f6\u91cd\u590d\u6267\u884c\u626b\u63cf\u3002
                                                                            • \u626b\u63cf\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff1a\u8bbe\u7f6e\u6700\u591a\u4fdd\u7559\u591a\u5c11\u626b\u63cf\u62a5\u544a\u3002\u8d85\u8fc7\u6307\u5b9a\u7684\u4fdd\u7559\u6570\u91cf\u65f6\uff0c\u4ece\u6700\u65e9\u7684\u62a5\u544a\u5f00\u59cb\u5220\u9664\u3002

                                                                          "},{"location":"end-user/kpanda/security/audit.html#_3","title":"\u66f4\u65b0/\u5220\u9664\u626b\u63cf\u7b56\u7565","text":"

                                                                          \u521b\u5efa\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u8981\u66f4\u65b0\u6216\u5220\u9664\u626b\u63cf\u7b56\u7565\u3002

                                                                          \u5728 \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u914d\u7f6e\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff1a

                                                                          • \u5bf9\u4e8e\u5468\u671f\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a

                                                                            • \u9009\u62e9 \u7acb\u5373\u6267\u884c \u610f\u5473\u7740\uff0c\u5728\u5468\u671f\u8ba1\u5212\u4e4b\u5916\u7acb\u5373\u518d\u626b\u63cf\u4e00\u6b21\u96c6\u7fa4
                                                                            • \u9009\u62e9 \u7981\u7528 \u4f1a\u4e2d\u65ad\u626b\u63cf\u8ba1\u5212\uff0c\u76f4\u5230\u70b9\u51fb \u542f\u7528 \u624d\u53ef\u4ee5\u7ee7\u7eed\u6839\u636e\u5468\u671f\u8ba1\u5212\u6267\u884c\u8be5\u626b\u63cf\u7b56\u7565\u3002
                                                                            • \u9009\u62e9 \u7f16\u8f91 \u53ef\u4ee5\u66f4\u65b0\u914d\u7f6e\uff0c\u652f\u6301\u66f4\u65b0\u626b\u63cf\u914d\u7f6e\u3001\u7c7b\u578b\u3001\u626b\u63cf\u5468\u671f\u3001\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff0c\u4e0d\u53ef\u66f4\u6539\u914d\u7f6e\u540d\u79f0\u548c\u9700\u8981\u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4\u3002
                                                                            • \u9009\u62e9 \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u914d\u7f6e
                                                                          • \u5bf9\u4e8e\u4e00\u6b21\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a\u4ec5\u652f\u6301 \u5220\u9664 \u64cd\u4f5c\u3002

                                                                          "},{"location":"end-user/kpanda/security/audit.html#_4","title":"\u67e5\u770b\u626b\u63cf\u62a5\u544a","text":"
                                                                          1. \u5728 \u5b89\u5168\u7ba1\u7406 -> \u6743\u9650\u626b\u63cf -> \u626b\u63cf\u62a5\u544a \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u62a5\u544a\u540d\u79f0

                                                                            \u5728\u62a5\u544a\u53f3\u4fa7\u70b9\u51fb \u5220\u9664 \u53ef\u4ee5\u624b\u52a8\u5220\u9664\u62a5\u544a\u3002

                                                                          2. \u67e5\u770b\u626b\u63cf\u62a5\u544a\u5185\u5bb9\uff0c\u5305\u62ec\uff1a

                                                                            • \u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4
                                                                            • \u4f7f\u7528\u7684\u626b\u63cf\u7b56\u7565
                                                                            • \u626b\u63cf\u9879\u603b\u6570\u3001\u8b66\u544a\u6570\u3001\u9519\u8bef\u6570
                                                                            • \u5728\u5468\u671f\u6027\u626b\u63cf\u7b56\u7565\u751f\u6210\u7684\u626b\u63cf\u62a5\u544a\u4e2d\uff0c\u8fd8\u53ef\u4ee5\u67e5\u770b\u626b\u63cf\u9891\u7387
                                                                            • \u626b\u63cf\u5f00\u59cb\u7684\u65f6\u95f4
                                                                            • \u68c0\u67e5\u8be6\u60c5\uff0c\u4f8b\u5982\u88ab\u68c0\u67e5\u7684\u8d44\u6e90\u3001\u8d44\u6e90\u7c7b\u578b\u3001\u626b\u63cf\u7ed3\u679c\u3001\u9519\u8bef\u7c7b\u578b\u3001\u9519\u8bef\u8be6\u60c5

                                                                          "},{"location":"end-user/kpanda/security/hunter.html","title":"\u6f0f\u6d1e\u626b\u63cf","text":"

                                                                          \u4e3a\u4e86\u4f7f\u7528\u6f0f\u6d1e\u626b\u63cf\u529f\u80fd\uff0c\u9700\u8981\u5148\u521b\u5efa\u626b\u63cf\u7b56\u7565\uff0c\u6267\u884c\u8be5\u7b56\u7565\u4e4b\u540e\u4f1a\u81ea\u52a8\u751f\u6210\u626b\u63cf\u62a5\u544a\u4ee5\u4f9b\u67e5\u770b\u3002

                                                                          "},{"location":"end-user/kpanda/security/hunter.html#_2","title":"\u521b\u5efa\u626b\u63cf\u7b56\u7565","text":"
                                                                          1. \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684\u9996\u9875\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5b89\u5168\u7ba1\u7406 \u3002

                                                                          2. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u6f0f\u6d1e\u626b\u63cf \uff0c\u70b9\u51fb \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\uff0c\u5728\u53f3\u4fa7\u70b9\u51fb \u521b\u5efa\u626b\u63cf\u7b56\u7565 \u3002

                                                                          3. \u53c2\u8003\u4e0b\u5217\u8bf4\u660e\u586b\u5199\u914d\u7f6e\uff0c\u6700\u540e\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u3002

                                                                            • \u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u626b\u63cf\u54ea\u4e2a\u96c6\u7fa4\u3002\u53ef\u9009\u7684\u96c6\u7fa4\u5217\u8868\u6765\u81ea\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u7684\u96c6\u7fa4\u3002\u5982\u679c\u6ca1\u6709\u60f3\u9009\u7684\u96c6\u7fa4\uff0c\u53ef\u4ee5\u53bb\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4\u3002
                                                                            • \u626b\u63cf\u7c7b\u578b\uff1a

                                                                              • \u7acb\u5373\u626b\u63cf\uff1a\u5728\u626b\u63cf\u7b56\u7565\u521b\u5efa\u597d\u4e4b\u540e\u7acb\u5373\u6267\u884c\u4e00\u6b21\u626b\u63cf\uff0c\u540e\u7eed\u4e0d\u53ef\u4ee5\u81ea\u52a8/\u624b\u52a8\u518d\u6b21\u6267\u884c\u626b\u63cf\u3002
                                                                              • \u5b9a\u65f6\u626b\u63cf\uff1a\u901a\u8fc7\u8bbe\u7f6e\u626b\u63cf\u5468\u671f\uff0c\u81ea\u52a8\u6309\u65f6\u91cd\u590d\u6267\u884c\u626b\u63cf\u3002
                                                                            • \u626b\u63cf\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff1a\u8bbe\u7f6e\u6700\u591a\u4fdd\u7559\u591a\u5c11\u626b\u63cf\u62a5\u544a\u3002\u8d85\u8fc7\u6307\u5b9a\u7684\u4fdd\u7559\u6570\u91cf\u65f6\uff0c\u4ece\u6700\u65e9\u7684\u62a5\u544a\u5f00\u59cb\u5220\u9664\u3002

                                                                          "},{"location":"end-user/kpanda/security/hunter.html#_3","title":"\u66f4\u65b0/\u5220\u9664\u626b\u63cf\u7b56\u7565","text":"

                                                                          \u521b\u5efa\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u8981\u66f4\u65b0\u6216\u5220\u9664\u626b\u63cf\u7b56\u7565\u3002

                                                                          \u5728 \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u914d\u7f6e\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff1a

                                                                          • \u5bf9\u4e8e\u5468\u671f\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a

                                                                            • \u9009\u62e9 \u7acb\u5373\u6267\u884c \u610f\u5473\u7740\uff0c\u5728\u5468\u671f\u8ba1\u5212\u4e4b\u5916\u7acb\u5373\u518d\u626b\u63cf\u4e00\u6b21\u96c6\u7fa4
                                                                            • \u9009\u62e9 \u7981\u7528 \u4f1a\u4e2d\u65ad\u626b\u63cf\u8ba1\u5212\uff0c\u76f4\u5230\u70b9\u51fb \u542f\u7528 \u624d\u53ef\u4ee5\u7ee7\u7eed\u6839\u636e\u5468\u671f\u8ba1\u5212\u6267\u884c\u8be5\u626b\u63cf\u7b56\u7565\u3002
                                                                            • \u9009\u62e9 \u7f16\u8f91 \u53ef\u4ee5\u66f4\u65b0\u914d\u7f6e\uff0c\u652f\u6301\u66f4\u65b0\u626b\u63cf\u914d\u7f6e\u3001\u7c7b\u578b\u3001\u626b\u63cf\u5468\u671f\u3001\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff0c\u4e0d\u53ef\u66f4\u6539\u914d\u7f6e\u540d\u79f0\u548c\u9700\u8981\u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4\u3002
                                                                            • \u9009\u62e9 \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u914d\u7f6e
                                                                          • \u5bf9\u4e8e\u4e00\u6b21\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a\u4ec5\u652f\u6301 \u5220\u9664 \u64cd\u4f5c\u3002

                                                                          "},{"location":"end-user/kpanda/security/hunter.html#_4","title":"\u67e5\u770b\u626b\u63cf\u62a5\u544a","text":"
                                                                          1. \u5728 \u5b89\u5168\u7ba1\u7406 -> \u6743\u9650\u626b\u63cf -> \u626b\u63cf\u62a5\u544a \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u62a5\u544a\u540d\u79f0

                                                                            \u5728\u62a5\u544a\u53f3\u4fa7\u70b9\u51fb \u5220\u9664 \u53ef\u4ee5\u624b\u52a8\u5220\u9664\u62a5\u544a\u3002

                                                                          2. \u67e5\u770b\u626b\u63cf\u62a5\u544a\u5185\u5bb9\uff0c\u5305\u62ec\uff1a

                                                                            • \u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4
                                                                            • \u4f7f\u7528\u7684\u626b\u63cf\u7b56\u7565
                                                                            • \u626b\u63cf\u9891\u7387
                                                                            • \u98ce\u9669\u603b\u6570\u3001\u9ad8\u98ce\u9669\u6570\u3001\u4e2d\u98ce\u9669\u6570\u3001\u4f4e\u98ce\u9669\u6570
                                                                            • \u626b\u63cf\u65f6\u95f4
                                                                            • \u68c0\u67e5\u8be6\u60c5\uff0c\u4f8b\u5982\u6f0f\u6d1e ID\u3001\u6f0f\u6d1e\u7c7b\u578b\u3001\u6f0f\u6d1e\u540d\u79f0\u3001\u6f0f\u6d1e\u63cf\u8ff0\u7b49

                                                                          "},{"location":"end-user/kpanda/security/cis/config.html","title":"\u626b\u63cf\u914d\u7f6e","text":"

                                                                          \u4f7f\u7528\u5408\u89c4\u6027\u626b\u63cf\u7684\u7b2c\u4e00\u6b65\uff0c\u5c31\u662f\u5148\u521b\u5efa\u626b\u63cf\u914d\u7f6e\u3002\u57fa\u4e8e\u626b\u63cf\u914d\u7f6e\u518d\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3001\u6267\u884c\u626b\u63cf\u7b56\u7565\uff0c\u6700\u540e\u67e5\u770b\u626b\u63cf\u7ed3\u679c\u3002

                                                                          "},{"location":"end-user/kpanda/security/cis/config.html#_2","title":"\u521b\u5efa\u626b\u63cf\u914d\u7f6e","text":"

                                                                          \u521b\u5efa\u626b\u63cf\u914d\u7f6e\u7684\u6b65\u9aa4\u5982\u4e0b\uff1a

                                                                          1. \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u7684\u9996\u9875\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5b89\u5168\u7ba1\u7406 \u3002

                                                                          2. \u9ed8\u8ba4\u8fdb\u5165 \u5408\u89c4\u6027\u626b\u63cf \u9875\u9762\uff0c\u70b9\u51fb \u626b\u63cf\u914d\u7f6e \u9875\u7b7e\uff0c\u7136\u540e\u5728\u53f3\u4e0a\u89d2\u70b9\u51fb \u521b\u5efa\u626b\u63cf\u914d\u7f6e \u3002

                                                                          3. \u586b\u5199\u914d\u7f6e\u540d\u79f0\u3001\u9009\u62e9\u914d\u7f6e\u6a21\u677f\u3001\u6309\u9700\u52fe\u9009\u626b\u63cf\u9879\uff0c\u6700\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                                            \u626b\u63cf\u6a21\u677f\uff1a\u76ee\u524d\u63d0\u4f9b\u4e86\u4e24\u4e2a\u6a21\u677f\u3002 kubeadm \u6a21\u677f\u9002\u7528\u4e8e\u4e00\u822c\u60c5\u51b5\u4e0b\u7684 Kubernetes \u96c6\u7fa4\u3002 \u6211\u4eec\u5728 kubeadm \u6a21\u677f\u57fa\u7840\u4e0a\uff0c\u7ed3\u5408\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u5e73\u53f0\u8bbe\u8ba1\u5ffd\u7565\u4e86\u4e0d\u9002\u7528\u4e8e\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u626b\u63cf\u9879\u3002

                                                                          "},{"location":"end-user/kpanda/security/cis/config.html#_3","title":"\u67e5\u770b\u626b\u63cf\u914d\u7f6e","text":"

                                                                          \u5728\u626b\u63cf\u914d\u7f6e\u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u626b\u63cf\u914d\u7f6e\u7684\u540d\u79f0\uff0c\u53ef\u4ee5\u67e5\u770b\u8be5\u914d\u7f6e\u7684\u7c7b\u578b\u3001\u626b\u63cf\u9879\u6570\u91cf\u3001\u521b\u5efa\u65f6\u95f4\u3001\u914d\u7f6e\u6a21\u677f\uff0c\u4ee5\u53ca\u8be5\u914d\u7f6e\u542f\u7528\u7684\u5177\u4f53\u626b\u63cf\u9879\u3002

                                                                          "},{"location":"end-user/kpanda/security/cis/config.html#_4","title":"\u66f4\u65b0/\u5220\u9664\u626b\u63cf\u914d\u7f6e","text":"

                                                                          \u626b\u63cf\u914d\u7f6e\u521b\u5efa\u6210\u529f\u4e4b\u540e\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u6c42\u66f4\u65b0\u914d\u7f6e\u6216\u5220\u9664\u8be5\u914d\u7f6e\u3002

                                                                          \u5728\u626b\u63cf\u914d\u7f6e\u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u914d\u7f6e\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff1a

                                                                          • \u9009\u62e9 \u7f16\u8f91 \u53ef\u4ee5\u66f4\u65b0\u914d\u7f6e\uff0c\u652f\u6301\u66f4\u65b0\u63cf\u8ff0\u3001\u6a21\u677f\u548c\u626b\u63cf\u9879\u3002\u4e0d\u53ef\u66f4\u6539\u914d\u7f6e\u540d\u79f0\u3002
                                                                          • \u9009\u62e9 \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u914d\u7f6e\u3002

                                                                          "},{"location":"end-user/kpanda/security/cis/policy.html","title":"\u626b\u63cf\u7b56\u7565","text":""},{"location":"end-user/kpanda/security/cis/policy.html#_2","title":"\u521b\u5efa\u626b\u63cf\u7b56\u7565","text":"

                                                                          \u521b\u5efa\u626b\u63cf\u914d\u7f6e\u4e4b\u540e\uff0c\u53ef\u4ee5\u57fa\u4e8e\u914d\u7f6e\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002

                                                                          1. \u5728 \u5b89\u5168\u7ba1\u7406 -> \u5408\u89c4\u6027\u626b\u63cf \u9875\u9762\u7684 \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\u4e0b\uff0c\u5728\u53f3\u4fa7\u70b9\u51fb\u521b\u5efa\u626b\u63cf\u7b56\u7565\u3002

                                                                          2. \u53c2\u8003\u4e0b\u5217\u8bf4\u660e\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u5b9a \u3002

                                                                            • \u96c6\u7fa4\uff1a\u9009\u62e9\u9700\u8981\u626b\u63cf\u54ea\u4e2a\u96c6\u7fa4\u3002\u53ef\u9009\u7684\u96c6\u7fa4\u5217\u8868\u6765\u81ea\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u7684\u96c6\u7fa4\u3002\u5982\u679c\u6ca1\u6709\u60f3\u9009\u7684\u96c6\u7fa4\uff0c\u53ef\u4ee5\u53bb\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165\u6216\u521b\u5efa\u96c6\u7fa4\u3002
                                                                            • \u626b\u63cf\u914d\u7f6e\uff1a\u9009\u62e9\u4e8b\u5148\u521b\u5efa\u597d\u7684\u626b\u63cf\u914d\u7f6e\u3002\u626b\u63cf\u914d\u7f6e\u89c4\u5b9a\u4e86\u9700\u8981\u6267\u884c\u54ea\u4e9b\u5177\u4f53\u7684\u626b\u63cf\u9879\u3002
                                                                            • \u626b\u63cf\u7c7b\u578b\uff1a

                                                                              • \u7acb\u5373\u626b\u63cf\uff1a\u5728\u626b\u63cf\u7b56\u7565\u521b\u5efa\u597d\u4e4b\u540e\u7acb\u5373\u6267\u884c\u4e00\u6b21\u626b\u63cf\uff0c\u540e\u7eed\u4e0d\u53ef\u4ee5\u81ea\u52a8/\u624b\u52a8\u518d\u6b21\u6267\u884c\u626b\u63cf\u3002
                                                                              • \u5b9a\u65f6\u626b\u63cf\uff1a\u901a\u8fc7\u8bbe\u7f6e\u626b\u63cf\u5468\u671f\uff0c\u81ea\u52a8\u6309\u65f6\u91cd\u590d\u6267\u884c\u626b\u63cf\u3002
                                                                            • \u626b\u63cf\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff1a\u8bbe\u7f6e\u6700\u591a\u4fdd\u7559\u591a\u5c11\u626b\u63cf\u62a5\u544a\u3002\u8d85\u8fc7\u6307\u5b9a\u7684\u4fdd\u7559\u6570\u91cf\u65f6\uff0c\u4ece\u6700\u65e9\u7684\u62a5\u544a\u5f00\u59cb\u5220\u9664\u3002

                                                                          "},{"location":"end-user/kpanda/security/cis/policy.html#_3","title":"\u66f4\u65b0/\u5220\u9664\u626b\u63cf\u7b56\u7565","text":"

                                                                          \u521b\u5efa\u626b\u63cf\u7b56\u7565\u4e4b\u540e\uff0c\u53ef\u4ee5\u6839\u636e\u9700\u8981\u66f4\u65b0\u6216\u5220\u9664\u626b\u63cf\u7b56\u7565\u3002

                                                                          \u5728 \u626b\u63cf\u7b56\u7565 \u9875\u7b7e\u4e0b\uff0c\u70b9\u51fb\u914d\u7f6e\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\uff1a

                                                                          • \u5bf9\u4e8e\u5468\u671f\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a

                                                                            • \u9009\u62e9 \u7acb\u5373\u6267\u884c \u610f\u5473\u7740\uff0c\u5728\u5468\u671f\u8ba1\u5212\u4e4b\u5916\u7acb\u5373\u518d\u626b\u63cf\u4e00\u6b21\u96c6\u7fa4
                                                                            • \u9009\u62e9 \u7981\u7528 \u4f1a\u4e2d\u65ad\u626b\u63cf\u8ba1\u5212\uff0c\u76f4\u5230\u70b9\u51fb \u542f\u7528 \u624d\u53ef\u4ee5\u7ee7\u7eed\u6839\u636e\u5468\u671f\u8ba1\u5212\u6267\u884c\u8be5\u626b\u63cf\u7b56\u7565\u3002
                                                                            • \u9009\u62e9 \u7f16\u8f91 \u53ef\u4ee5\u66f4\u65b0\u914d\u7f6e\uff0c\u652f\u6301\u66f4\u65b0\u626b\u63cf\u914d\u7f6e\u3001\u7c7b\u578b\u3001\u626b\u63cf\u5468\u671f\u3001\u62a5\u544a\u4fdd\u7559\u6570\u91cf\uff0c\u4e0d\u53ef\u66f4\u6539\u914d\u7f6e\u540d\u79f0\u548c\u9700\u8981\u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4\u3002
                                                                            • \u9009\u62e9 \u5220\u9664 \u53ef\u4ee5\u5220\u9664\u8be5\u914d\u7f6e
                                                                          • \u5bf9\u4e8e\u4e00\u6b21\u6027\u7684\u626b\u63cf\u7b56\u7565\uff1a\u4ec5\u652f\u6301 \u5220\u9664 \u64cd\u4f5c\u3002

                                                                          "},{"location":"end-user/kpanda/security/cis/report.html","title":"\u626b\u63cf\u62a5\u544a","text":"

                                                                          hide\uff1a - toc

                                                                          "},{"location":"end-user/kpanda/security/cis/report.html#_1","title":"\u626b\u63cf\u62a5\u544a","text":"

                                                                          \u6267\u884c\u626b\u63cf\u7b56\u7565\u4e4b\u540e\u4f1a\u81ea\u52a8\u751f\u6210\u626b\u63cf\u62a5\u544a\u3002\u60a8\u53ef\u4ee5\u5728\u7ebf\u67e5\u770b\u626b\u63cf\u62a5\u544a\u6216\u5c06\u5176\u4e0b\u8f7d\u5230\u672c\u5730\u67e5\u770b\u3002

                                                                          • \u4e0b\u8f7d\u67e5\u770b\u626b\u63cf\u62a5\u544a

                                                                            \u5b89\u5168\u7ba1\u7406 -> \u5408\u89c4\u6027\u626b\u63cf \u9875\u9762\u7684 \u626b\u63cf\u62a5\u544a \u9875\u7b7e\u70b9\u51fb\u62a5\u544a\u53f3\u4fa7\u7684 \u2507 \u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u4e0b\u8f7d \u3002

                                                                          • \u5728\u7ebf\u67e5\u770b\u626b\u63cf\u62a5\u544a

                                                                            \u70b9\u51fb\u67d0\u4e2a\u62a5\u544a\u7684\u540d\u79f0\uff0c\u60a8\u53ef\u4ee5\u5728\u7ebf\u67e5\u770b CIS \u5408\u89c4\u6027\u626b\u63cf\u7684\u62a5\u544a\u5185\u5bb9\u3002\u5177\u4f53\u5305\u62ec\uff1a

                                                                            • \u626b\u63cf\u7684\u76ee\u6807\u96c6\u7fa4
                                                                            • \u4f7f\u7528\u7684\u626b\u63cf\u7b56\u7565\u548c\u626b\u63cf\u914d\u7f6e
                                                                            • \u626b\u63cf\u5f00\u59cb\u65f6\u95f4
                                                                            • \u626b\u63cf\u9879\u603b\u6570\u3001\u901a\u8fc7\u6570\u4e0e\u672a\u901a\u8fc7\u6570
                                                                            • \u5bf9\u4e8e\u672a\u901a\u8fc7\u7684\u626b\u63cf\u9879\u7ed9\u51fa\u5bf9\u5e94\u7684\u4fee\u590d\u5efa\u8bae
                                                                            • \u5bf9\u4e8e\u901a\u8fc7\u7684\u626b\u63cf\u9879\u7ed9\u51fa\u66f4\u5b89\u5168\u7684\u64cd\u4f5c\u5efa\u8bae

                                                                          "},{"location":"end-user/kpanda/storage/pv.html","title":"\u6570\u636e\u5377(PV)","text":"

                                                                          \u6570\u636e\u5377\uff08PersistentVolume\uff0cPV\uff09\u662f\u96c6\u7fa4\u4e2d\u7684\u4e00\u5757\u5b58\u50a8\uff0c\u53ef\u7531\u7ba1\u7406\u5458\u4e8b\u5148\u5236\u5907\uff0c\u6216\u4f7f\u7528\u5b58\u50a8\u7c7b\uff08Storage Class\uff09\u6765\u52a8\u6001\u5236\u5907\u3002PV \u662f\u96c6\u7fa4\u8d44\u6e90\uff0c\u4f46\u62e5\u6709\u72ec\u7acb\u7684\u751f\u547d\u5468\u671f\uff0c\u4e0d\u4f1a\u968f\u7740 Pod \u8fdb\u7a0b\u7ed3\u675f\u800c\u88ab\u5220\u9664\u3002\u5c06 PV \u6302\u8f7d\u5230\u5de5\u4f5c\u8d1f\u8f7d\u53ef\u4ee5\u5b9e\u73b0\u5de5\u4f5c\u8d1f\u8f7d\u7684\u6570\u636e\u6301\u4e45\u5316\u3002PV \u4e2d\u4fdd\u5b58\u4e86\u53ef\u88ab Pod \u4e2d\u5bb9\u5668\u8bbf\u95ee\u7684\u6570\u636e\u76ee\u5f55\u3002

                                                                          "},{"location":"end-user/kpanda/storage/pv.html#_1","title":"\u521b\u5efa\u6570\u636e\u5377","text":"

                                                                          \u76ee\u524d\u652f\u6301\u901a\u8fc7 YAML \u548c\u8868\u5355\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u6570\u636e\u5377\uff0c\u8fd9\u4e24\u79cd\u65b9\u5f0f\u5404\u6709\u4f18\u52a3\uff0c\u53ef\u4ee5\u6ee1\u8db3\u4e0d\u540c\u7528\u6237\u7684\u4f7f\u7528\u9700\u6c42\u3002

                                                                          • \u901a\u8fc7 YAML \u521b\u5efa\u6b65\u9aa4\u66f4\u5c11\u3001\u66f4\u9ad8\u6548\uff0c\u4f46\u95e8\u69db\u8981\u6c42\u8f83\u9ad8\uff0c\u9700\u8981\u719f\u6089\u6570\u636e\u5377\u7684 YAML \u6587\u4ef6\u914d\u7f6e\u3002

                                                                          • \u901a\u8fc7\u8868\u5355\u521b\u5efa\u66f4\u76f4\u89c2\u66f4\u7b80\u5355\uff0c\u6839\u636e\u63d0\u793a\u586b\u5199\u5bf9\u5e94\u7684\u503c\u5373\u53ef\uff0c\u4f46\u6b65\u9aa4\u66f4\u52a0\u7e41\u7410\u3002

                                                                          "},{"location":"end-user/kpanda/storage/pv.html#yaml","title":"YAML \u521b\u5efa","text":"
                                                                          1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377(PV) -> YAML \u521b\u5efa \u3002

                                                                          2. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                                                            \u652f\u6301\u4ece\u672c\u5730\u5bfc\u5165 YAML \u6587\u4ef6\u6216\u5c06\u586b\u5199\u597d\u7684\u6587\u4ef6\u4e0b\u8f7d\u4fdd\u5b58\u5230\u672c\u5730\u3002

                                                                          "},{"location":"end-user/kpanda/storage/pv.html#_2","title":"\u8868\u5355\u521b\u5efa","text":"
                                                                          1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377(PV) -> \u521b\u5efa\u6570\u636e\u5377(PV) \u3002

                                                                          2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u3002

                                                                            • \u6570\u636e\u5377\u540d\u79f0\u3001\u6570\u636e\u5377\u7c7b\u578b\u3001\u6302\u8f7d\u8def\u5f84\u3001\u5377\u6a21\u5f0f\u3001\u8282\u70b9\u4eb2\u548c\u6027\u5728\u521b\u5efa\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                                                            • \u6570\u636e\u5377\u7c7b\u578b\uff1a\u6709\u5173\u5377\u7c7b\u578b\u7684\u8be6\u7ec6\u4ecb\u7ecd\uff0c\u53ef\u53c2\u8003 Kubernetes \u5b98\u65b9\u6587\u6863\u5377\u3002

                                                                            • Local\uff1a\u5c06 Node \u8282\u70b9\u7684\u672c\u5730\u5b58\u50a8\u5305\u88c5\u6210 PVC \u63a5\u53e3\uff0c\u5bb9\u5668\u76f4\u63a5\u4f7f\u7528 PVC \u800c\u65e0\u9700\u5173\u6ce8\u5e95\u5c42\u7684\u5b58\u50a8\u7c7b\u578b\u3002Local \u5377\u4e0d\u652f\u6301\u52a8\u6001\u914d\u7f6e\u6570\u636e\u5377\uff0c\u4f46\u652f\u6301\u914d\u7f6e\u8282\u70b9\u4eb2\u548c\u6027\uff0c\u53ef\u4ee5\u9650\u5236\u80fd\u4ece\u54ea\u4e9b\u8282\u70b9\u4e0a\u8bbf\u95ee\u8be5\u6570\u636e\u5377\u3002

                                                                            • HostPath\uff1a\u4f7f\u7528 Node \u8282\u70b9\u7684\u6587\u4ef6\u7cfb\u7edf\u4e0a\u7684\u6587\u4ef6\u6216\u76ee\u5f55\u4f5c\u4e3a\u6570\u636e\u5377\uff0c\u4e0d\u652f\u6301\u57fa\u4e8e\u8282\u70b9\u4eb2\u548c\u6027\u7684 Pod \u8c03\u5ea6\u3002

                                                                            • \u6302\u8f7d\u8def\u5f84\uff1a\u5c06\u6570\u636e\u5377\u6302\u8f7d\u5230\u5bb9\u5668\u4e2d\u7684\u67d0\u4e2a\u5177\u4f53\u76ee\u5f55\u4e0b\u3002

                                                                            • \u8bbf\u95ee\u6a21\u5f0f\uff1a

                                                                              • ReadWriteOnce\uff1a\u6570\u636e\u5377\u53ef\u4ee5\u88ab\u4e00\u4e2a\u8282\u70b9\u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002
                                                                              • ReadWriteMany\uff1a\u6570\u636e\u5377\u53ef\u4ee5\u88ab\u591a\u4e2a\u8282\u70b9\u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002
                                                                              • ReadOnlyMany\uff1a\u6570\u636e\u5377\u53ef\u4ee5\u88ab\u591a\u4e2a\u8282\u70b9\u4ee5\u53ea\u8bfb\u65b9\u5f0f\u6302\u8f7d\u3002
                                                                              • ReadWriteOncePod\uff1a\u6570\u636e\u5377\u53ef\u4ee5\u88ab\u5355\u4e2a Pod \u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002
                                                                            • \u56de\u6536\u7b56\u7565\uff1a

                                                                              • Retain\uff1a\u4e0d\u5220\u9664 PV\uff0c\u4ec5\u5c06\u5176\u72b6\u6001\u53d8\u4e3a released \uff0c\u9700\u8981\u7528\u6237\u624b\u52a8\u56de\u6536\u3002\u6709\u5173\u5982\u4f55\u624b\u52a8\u56de\u6536\uff0c\u53ef\u53c2\u8003\u6301\u4e45\u5377\u3002
                                                                              • Recycle\uff1a\u4fdd\u7559 PV \u4f46\u6e05\u7a7a\u5176\u4e2d\u7684\u6570\u636e\uff0c\u6267\u884c\u57fa\u672c\u7684\u64e6\u9664\u64cd\u4f5c\uff08 rm -rf /thevolume/* \uff09\u3002
                                                                              • Delete\uff1a\u5220\u9664 PV \u65f6\u53ca\u5176\u4e2d\u7684\u6570\u636e\u3002
                                                                            • \u5377\u6a21\u5f0f\uff1a

                                                                              • \u6587\u4ef6\u7cfb\u7edf\uff1a\u6570\u636e\u5377\u5c06\u88ab Pod \u6302\u8f7d\u5230\u67d0\u4e2a\u76ee\u5f55\u3002\u5982\u679c\u6570\u636e\u5377\u7684\u5b58\u50a8\u6765\u81ea\u67d0\u5757\u8bbe\u5907\u800c\u8be5\u8bbe\u5907\u76ee\u524d\u4e3a\u7a7a\uff0c\u7b2c\u4e00\u6b21\u6302\u8f7d\u5377\u4e4b\u524d\u4f1a\u5728\u8bbe\u5907\u4e0a\u521b\u5efa\u6587\u4ef6\u7cfb\u7edf\u3002
                                                                              • \u5757\uff1a\u5c06\u6570\u636e\u5377\u4f5c\u4e3a\u539f\u59cb\u5757\u8bbe\u5907\u6765\u4f7f\u7528\u3002\u8fd9\u7c7b\u5377\u4ee5\u5757\u8bbe\u5907\u7684\u65b9\u5f0f\u4ea4\u7ed9 Pod \u4f7f\u7528\uff0c\u5176\u4e0a\u6ca1\u6709\u4efb\u4f55\u6587\u4ef6\u7cfb\u7edf\uff0c\u53ef\u4ee5\u8ba9 Pod \u66f4\u5feb\u5730\u8bbf\u95ee\u6570\u636e\u5377\u3002
                                                                            • \u8282\u70b9\u4eb2\u548c\u6027\uff1a

                                                                          "},{"location":"end-user/kpanda/storage/pv.html#_3","title":"\u67e5\u770b\u6570\u636e\u5377","text":"

                                                                          \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377(PV) \u3002

                                                                          • \u8be5\u9875\u9762\u53ef\u4ee5\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u4e2d\u7684\u6240\u6709\u6570\u636e\u5377\uff0c\u4ee5\u53ca\u5404\u4e2a\u6570\u636e\u5377\u7684\u72b6\u6001\u3001\u5bb9\u91cf\u3001\u547d\u540d\u7a7a\u95f4\u7b49\u4fe1\u606f\u3002

                                                                          • \u652f\u6301\u6309\u7167\u6570\u636e\u5377\u7684\u540d\u79f0\u3001\u72b6\u6001\u3001\u547d\u540d\u7a7a\u95f4\u3001\u521b\u5efa\u65f6\u95f4\u8fdb\u884c\u987a\u5e8f\u6216\u9006\u5e8f\u6392\u5e8f\u3002

                                                                          • \u70b9\u51fb\u6570\u636e\u5377\u7684\u540d\u79f0\uff0c\u53ef\u4ee5\u67e5\u770b\u8be5\u6570\u636e\u5377\u7684\u57fa\u672c\u914d\u7f6e\u3001\u5b58\u50a8\u6c60\u4fe1\u606f\u3001\u6807\u7b7e\u3001\u6ce8\u89e3\u7b49\u4fe1\u606f\u3002

                                                                          "},{"location":"end-user/kpanda/storage/pv.html#_4","title":"\u514b\u9686\u6570\u636e\u5377","text":"

                                                                          \u901a\u8fc7\u514b\u9686\u6570\u636e\u5377\uff0c\u53ef\u4ee5\u57fa\u4e8e\u88ab\u514b\u9686\u6570\u636e\u5377\u7684\u914d\u7f6e\uff0c\u91cd\u65b0\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u6570\u636e\u5377\u3002

                                                                          1. \u8fdb\u5165\u514b\u9686\u9875\u9762

                                                                            • \u5728\u6570\u636e\u5377\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u514b\u9686\u7684\u6570\u636e\u5377\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u514b\u9686 \u3002

                                                                              \u4e5f\u53ef\u4ee5\u70b9\u51fb\u6570\u636e\u5377\u7684\u540d\u79f0\uff0c\u5728\u8be6\u60c5\u9875\u9762\u7684\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u514b\u9686 \u3002

                                                                          2. \u76f4\u63a5\u4f7f\u7528\u539f\u914d\u7f6e\uff0c\u6216\u8005\u6309\u9700\u8fdb\u884c\u4fee\u6539\uff0c\u7136\u540e\u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                                                          "},{"location":"end-user/kpanda/storage/pv.html#_5","title":"\u66f4\u65b0\u6570\u636e\u5377","text":"

                                                                          \u6709\u4e24\u79cd\u9014\u5f84\u53ef\u4ee5\u66f4\u65b0\u6570\u636e\u5377\u3002\u652f\u6301\u901a\u8fc7\u8868\u5355\u6216 YAML \u6587\u4ef6\u66f4\u65b0\u6570\u636e\u5377\u3002

                                                                          Note

                                                                          \u4ec5\u652f\u6301\u66f4\u65b0\u6570\u636e\u5377\u7684\u522b\u540d\u3001\u5bb9\u91cf\u3001\u8bbf\u95ee\u6a21\u5f0f\u3001\u56de\u6536\u7b56\u7565\u3001\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                                                          • \u5728\u6570\u636e\u5377\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u6570\u636e\u5377\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                                                                          • \u70b9\u51fb\u6570\u636e\u5377\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u6570\u636e\u5377\u7684\u8be6\u60c5\u9875\u9762\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                                                                          "},{"location":"end-user/kpanda/storage/pv.html#_6","title":"\u5220\u9664\u6570\u636e\u5377","text":"

                                                                          \u5728\u6570\u636e\u5377\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u5220\u9664\u7684\u6570\u636e\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u5220\u9664 \u3002

                                                                          \u4e5f\u53ef\u4ee5\u70b9\u51fb\u6570\u636e\u5377\u7684\u540d\u79f0\uff0c\u5728\u8be6\u60c5\u9875\u9762\u7684\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u5220\u9664 \u3002

                                                                          "},{"location":"end-user/kpanda/storage/pvc.html","title":"\u6570\u636e\u5377\u58f0\u660e(PVC)","text":"

                                                                          \u6301\u4e45\u5377\u58f0\u660e\uff08PersistentVolumeClaim\uff0cPVC\uff09\u8868\u8fbe\u7684\u662f\u7528\u6237\u5bf9\u5b58\u50a8\u7684\u8bf7\u6c42\u3002PVC \u6d88\u8017 PV \u8d44\u6e90\uff0c\u7533\u9886\u4f7f\u7528\u7279\u5b9a\u5927\u5c0f\u3001\u7279\u5b9a\u8bbf\u95ee\u6a21\u5f0f\u7684\u6570\u636e\u5377\uff0c\u4f8b\u5982\u8981\u6c42 PV \u5377\u4ee5 ReadWriteOnce\u3001ReadOnlyMany \u6216 ReadWriteMany \u7b49\u6a21\u5f0f\u6765\u6302\u8f7d\u3002

                                                                          "},{"location":"end-user/kpanda/storage/pvc.html#_1","title":"\u521b\u5efa\u6570\u636e\u5377\u58f0\u660e","text":"

                                                                          \u76ee\u524d\u652f\u6301\u901a\u8fc7 YAML \u548c\u8868\u5355\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u6570\u636e\u5377\u58f0\u660e\uff0c\u8fd9\u4e24\u79cd\u65b9\u5f0f\u5404\u6709\u4f18\u52a3\uff0c\u53ef\u4ee5\u6ee1\u8db3\u4e0d\u540c\u7528\u6237\u7684\u4f7f\u7528\u9700\u6c42\u3002

                                                                          • \u901a\u8fc7 YAML \u521b\u5efa\u6b65\u9aa4\u66f4\u5c11\u3001\u66f4\u9ad8\u6548\uff0c\u4f46\u95e8\u69db\u8981\u6c42\u8f83\u9ad8\uff0c\u9700\u8981\u719f\u6089\u6570\u636e\u5377\u58f0\u660e\u7684 YAML \u6587\u4ef6\u914d\u7f6e\u3002

                                                                          • \u901a\u8fc7\u8868\u5355\u521b\u5efa\u66f4\u76f4\u89c2\u66f4\u7b80\u5355\uff0c\u6839\u636e\u63d0\u793a\u586b\u5199\u5bf9\u5e94\u7684\u503c\u5373\u53ef\uff0c\u4f46\u6b65\u9aa4\u66f4\u52a0\u7e41\u7410\u3002

                                                                          "},{"location":"end-user/kpanda/storage/pvc.html#yaml","title":"YAML \u521b\u5efa","text":"
                                                                          1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e (PVC) -> YAML \u521b\u5efa \u3002

                                                                          2. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                                                            \u652f\u6301\u4ece\u672c\u5730\u5bfc\u5165 YAML \u6587\u4ef6\u6216\u5c06\u586b\u5199\u597d\u7684\u6587\u4ef6\u4e0b\u8f7d\u4fdd\u5b58\u5230\u672c\u5730\u3002

                                                                          "},{"location":"end-user/kpanda/storage/pvc.html#_2","title":"\u8868\u5355\u521b\u5efa","text":"
                                                                          1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e (PVC) -> \u521b\u5efa\u6570\u636e\u5377\u58f0\u660e (PVC) \u3002

                                                                          2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\u3002

                                                                            • \u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\u3001\u547d\u540d\u7a7a\u95f4\u3001\u521b\u5efa\u65b9\u5f0f\u3001\u6570\u636e\u5377\u3001\u5bb9\u91cf\u3001\u8bbf\u95ee\u6a21\u5f0f\u5728\u521b\u5efa\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                                                            • \u521b\u5efa\u65b9\u5f0f\uff1a\u5728\u5df2\u6709\u7684\u5b58\u50a8\u6c60\u6216\u8005\u6570\u636e\u5377\u4e2d\u52a8\u6001\u521b\u5efa\u65b0\u7684\u6570\u636e\u5377\u58f0\u660e\uff0c\u6216\u8005\u57fa\u4e8e\u6570\u636e\u5377\u58f0\u660e\u7684\u5feb\u7167\u521b\u5efa\u65b0\u7684\u6570\u636e\u5377\u58f0\u660e\u3002

                                                                              \u57fa\u4e8e\u5feb\u7167\u521b\u5efa\u65f6\u65e0\u6cd5\u4fee\u6539\u6570\u636e\u5377\u58f0\u660e\u7684\u5bb9\u91cf\uff0c\u53ef\u4ee5\u5728\u521b\u5efa\u5b8c\u6210\u540e\u518d\u8fdb\u884c\u4fee\u6539\u3002

                                                                            • \u9009\u62e9\u521b\u5efa\u65b9\u5f0f\u4e4b\u540e\uff0c\u5728\u4e0b\u62c9\u5217\u8868\u4e2d\u9009\u62e9\u60f3\u8981\u4f7f\u7528\u7684\u5b58\u50a8\u6c60/\u6570\u636e\u5377/\u5feb\u7167\u3002

                                                                            • \u8bbf\u95ee\u6a21\u5f0f\uff1a

                                                                            • ReadWriteOnce\uff0c\u6570\u636e\u5377\u58f0\u660e\u53ef\u4ee5\u88ab\u4e00\u4e2a\u8282\u70b9\u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002

                                                                            • ReadWriteMany\uff0c\u6570\u636e\u5377\u58f0\u660e\u53ef\u4ee5\u88ab\u591a\u4e2a\u8282\u70b9\u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002
                                                                            • ReadOnlyMany\uff0c\u6570\u636e\u5377\u58f0\u660e\u53ef\u4ee5\u88ab\u591a\u4e2a\u8282\u70b9\u4ee5\u53ea\u8bfb\u65b9\u5f0f\u6302\u8f7d\u3002
                                                                            • ReadWriteOncePod\uff0c\u6570\u636e\u5377\u58f0\u660e\u53ef\u4ee5\u88ab\u5355\u4e2a Pod \u4ee5\u8bfb\u5199\u65b9\u5f0f\u6302\u8f7d\u3002

                                                                          "},{"location":"end-user/kpanda/storage/pvc.html#_3","title":"\u67e5\u770b\u6570\u636e\u5377\u58f0\u660e","text":"

                                                                          \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e(PVC) \u3002

                                                                          • \u8be5\u9875\u9762\u53ef\u4ee5\u67e5\u770b\u5f53\u524d\u96c6\u7fa4\u4e2d\u7684\u6240\u6709\u6570\u636e\u5377\u58f0\u660e\uff0c\u4ee5\u53ca\u5404\u4e2a\u6570\u636e\u5377\u58f0\u660e\u7684\u72b6\u6001\u3001\u5bb9\u91cf\u3001\u547d\u540d\u7a7a\u95f4\u7b49\u4fe1\u606f\u3002

                                                                          • \u652f\u6301\u6309\u7167\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\u3001\u72b6\u6001\u3001\u547d\u540d\u7a7a\u95f4\u3001\u521b\u5efa\u65f6\u95f4\u8fdb\u884c\u987a\u5e8f\u6216\u9006\u5e8f\u6392\u5e8f\u3002

                                                                          • \u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u53ef\u4ee5\u67e5\u770b\u8be5\u6570\u636e\u5377\u58f0\u660e\u7684\u57fa\u672c\u914d\u7f6e\u3001\u5b58\u50a8\u6c60\u4fe1\u606f\u3001\u6807\u7b7e\u3001\u6ce8\u89e3\u7b49\u4fe1\u606f\u3002

                                                                          "},{"location":"end-user/kpanda/storage/pvc.html#_4","title":"\u6269\u5bb9\u6570\u636e\u5377\u58f0\u660e","text":"
                                                                          1. \u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u6570\u636e\u5377\u58f0\u660e(PVC) \uff0c\u627e\u5230\u60f3\u8981\u8c03\u6574\u5bb9\u91cf\u7684\u6570\u636e\u5377\u58f0\u660e\u3002

                                                                          2. \u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u6269\u5bb9 \u3002

                                                                          3. \u8f93\u5165\u76ee\u6807\u5bb9\u91cf\uff0c\u7136\u540e\u70b9\u51fb \u786e\u5b9a \u3002

                                                                          "},{"location":"end-user/kpanda/storage/pvc.html#_5","title":"\u514b\u9686\u6570\u636e\u5377\u58f0\u660e","text":"

                                                                          \u901a\u8fc7\u514b\u9686\u6570\u636e\u5377\u58f0\u660e\uff0c\u53ef\u4ee5\u57fa\u4e8e\u88ab\u514b\u9686\u6570\u636e\u5377\u58f0\u660e\u7684\u914d\u7f6e\uff0c\u91cd\u65b0\u521b\u5efa\u4e00\u4e2a\u65b0\u7684\u6570\u636e\u5377\u58f0\u660e\u3002

                                                                          1. \u8fdb\u5165\u514b\u9686\u9875\u9762

                                                                            • \u5728\u6570\u636e\u5377\u58f0\u660e\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u514b\u9686\u7684\u6570\u636e\u5377\u58f0\u660e\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u514b\u9686 \u3002

                                                                              \u4e5f\u53ef\u4ee5\u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u5728\u8be6\u60c5\u9875\u9762\u7684\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u514b\u9686 \u3002

                                                                          2. \u76f4\u63a5\u4f7f\u7528\u539f\u914d\u7f6e\uff0c\u6216\u8005\u6309\u9700\u8fdb\u884c\u4fee\u6539\uff0c\u7136\u540e\u5728\u9875\u9762\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                                                          "},{"location":"end-user/kpanda/storage/pvc.html#_6","title":"\u66f4\u65b0\u6570\u636e\u5377\u58f0\u660e","text":"

                                                                          \u6709\u4e24\u79cd\u9014\u5f84\u53ef\u4ee5\u66f4\u65b0\u6570\u636e\u5377\u58f0\u660e\u3002\u652f\u6301\u901a\u8fc7\u8868\u5355\u6216 YAML \u6587\u4ef6\u66f4\u65b0\u6570\u636e\u5377\u58f0\u660e\u3002

                                                                          Note

                                                                          \u4ec5\u652f\u6301\u66f4\u65b0\u6570\u636e\u5377\u58f0\u660e\u7684\u522b\u540d\u3001\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                                                          • \u5728\u6570\u636e\u5377\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u6570\u636e\u5377\u58f0\u660e\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                                                                          • \u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u8fdb\u5165\u6570\u636e\u5377\u58f0\u660e\u7684\u8be6\u60c5\u9875\u9762\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0a\u89d2\u9009\u62e9 \u66f4\u65b0 \u5373\u53ef\u901a\u8fc7\u8868\u5355\u66f4\u65b0\uff0c\u9009\u62e9 \u7f16\u8f91 YAML \u5373\u53ef\u901a\u8fc7 YAML \u66f4\u65b0\u3002

                                                                          "},{"location":"end-user/kpanda/storage/pvc.html#_7","title":"\u5220\u9664\u6570\u636e\u5377\u58f0\u660e","text":"

                                                                          \u5728\u6570\u636e\u5377\u58f0\u660e\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u5220\u9664\u7684\u6570\u636e\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u5220\u9664 \u3002

                                                                          \u4e5f\u53ef\u4ee5\u70b9\u51fb\u6570\u636e\u5377\u58f0\u660e\u7684\u540d\u79f0\uff0c\u5728\u8be6\u60c5\u9875\u9762\u7684\u53f3\u4e0a\u89d2\u70b9\u51fb\u64cd\u4f5c\u6309\u94ae\u9009\u62e9 \u5220\u9664 \u3002

                                                                          "},{"location":"end-user/kpanda/storage/pvc.html#_8","title":"\u5e38\u89c1\u95ee\u9898","text":"
                                                                          1. \u5982\u679c\u5217\u8868\u4e2d\u6ca1\u6709\u53ef\u9009\u7684\u5b58\u50a8\u6c60\u6216\u6570\u636e\u5377\uff0c\u53ef\u4ee5\u521b\u5efa\u5b58\u50a8\u6c60\u6216\u521b\u5efa\u6570\u636e\u5377\u3002

                                                                          2. \u5982\u679c\u5217\u8868\u4e2d\u6ca1\u6709\u53ef\u9009\u7684\u5feb\u7167\uff0c\u53ef\u4ee5\u8fdb\u5165\u6570\u636e\u5377\u58f0\u660e\u7684\u8be6\u60c5\u9875\uff0c\u5728\u53f3\u4e0a\u89d2\u5236\u4f5c\u5feb\u7167\u3002

                                                                          3. \u5982\u679c\u6570\u636e\u5377\u58f0\u660e\u6240\u4f7f\u7528\u7684\u5b58\u50a8\u6c60 (SC) \u6ca1\u6709\u542f\u7528\u5feb\u7167\uff0c\u5219\u65e0\u6cd5\u5236\u4f5c\u5feb\u7167\uff0c\u9875\u9762\u4e0d\u4f1a\u663e\u793a\u201c\u5236\u4f5c\u5feb\u7167\u201d\u9009\u9879\u3002

                                                                          4. \u5982\u679c\u6570\u636e\u5377\u58f0\u660e\u6240\u4f7f\u7528\u7684\u5b58\u50a8\u6c60 (SC) \u6ca1\u6709\u5f00\u542f\u6269\u5bb9\u529f\u80fd\uff0c\u5219\u8be5\u6570\u636e\u5377\u4e0d\u652f\u6301\u6269\u5bb9\uff0c\u9875\u9762\u4e0d\u4f1a\u663e\u793a\u6269\u5bb9\u9009\u9879\u3002

                                                                          "},{"location":"end-user/kpanda/storage/sc-share.html","title":"\u5171\u4eab\u5b58\u50a8\u6c60","text":"

                                                                          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u652f\u6301\u5c06\u4e00\u4e2a\u5b58\u50a8\u6c60\u5171\u4eab\u7ed9\u591a\u4e2a\u547d\u540d\u7a7a\u95f4\u4f7f\u7528\uff0c\u4ee5\u4fbf\u63d0\u9ad8\u8d44\u6e90\u5229\u7528\u6548\u7387\u3002

                                                                          1. \u5728\u5b58\u50a8\u6c60\u5217\u8868\u4e2d\u627e\u5230\u9700\u8981\u5171\u4eab\u7684\u5b58\u50a8\u6c60\uff0c\u5728\u53f3\u4fa7\u64cd\u4f5c\u680f\u4e0b\u70b9\u51fb \u6388\u6743\u547d\u540d\u7a7a\u95f4 \u3002

                                                                          2. \u70b9\u51fb \u81ea\u5b9a\u4e49\u547d\u540d\u7a7a\u95f4 \u53ef\u4ee5\u9010\u4e00\u9009\u62e9\u9700\u8981\u5c06\u6b64\u5b58\u50a8\u6c60\u5171\u4eab\u5230\u54ea\u4e9b\u547d\u540d\u7a7a\u95f4\u3002

                                                                            • \u70b9\u51fb \u6388\u6743\u6240\u6709\u547d\u540d\u7a7a\u95f4 \u53ef\u4ee5\u4e00\u6b21\u6027\u5c06\u6b64\u5b58\u50a8\u6c60\u5171\u4eab\u5230\u5f53\u524d\u96c6\u7fa4\u4e0b\u7684\u6240\u6709\u547d\u540d\u7a7a\u95f4\u3002
                                                                            • \u5728\u5217\u8868\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u65b9\u70b9\u51fb \u79fb\u9664\u6388\u6743 \uff0c\u53ef\u4ee5\u89e3\u9664\u6388\u6743\uff0c\u505c\u6b62\u5c06\u6b64\u5b58\u50a8\u6c60\u5171\u4eab\u5230\u8be5\u547d\u540d\u7a7a\u95f4\u3002

                                                                          "},{"location":"end-user/kpanda/storage/sc.html","title":"\u5b58\u50a8\u6c60(SC)","text":"

                                                                          \u5b58\u50a8\u6c60\u6307\u5c06\u8bb8\u591a\u7269\u7406\u78c1\u76d8\u7ec4\u6210\u4e00\u4e2a\u5927\u578b\u5b58\u50a8\u8d44\u6e90\u6c60\uff0c\u672c\u5e73\u53f0\u652f\u6301\u63a5\u5165\u5404\u7c7b\u5b58\u50a8\u5382\u5546\u540e\u521b\u5efa\u5757\u5b58\u50a8\u6c60\u3001\u672c\u5730\u5b58\u50a8\u6c60\u3001\u81ea\u5b9a\u4e49\u5b58\u50a8\u6c60\uff0c\u7136\u540e\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u52a8\u6001\u914d\u7f6e\u6570\u636e\u5377\u3002

                                                                          "},{"location":"end-user/kpanda/storage/sc.html#sc_1","title":"\u521b\u5efa\u5b58\u50a8\u6c60(SC)","text":"

                                                                          \u76ee\u524d\u652f\u6301\u901a\u8fc7 YAML \u548c\u8868\u5355\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u5b58\u50a8\u6c60\uff0c\u8fd9\u4e24\u79cd\u65b9\u5f0f\u5404\u6709\u4f18\u52a3\uff0c\u53ef\u4ee5\u6ee1\u8db3\u4e0d\u540c\u7528\u6237\u7684\u4f7f\u7528\u9700\u6c42\u3002

                                                                          • \u901a\u8fc7 YAML \u521b\u5efa\u6b65\u9aa4\u66f4\u5c11\u3001\u66f4\u9ad8\u6548\uff0c\u4f46\u95e8\u69db\u8981\u6c42\u8f83\u9ad8\uff0c\u9700\u8981\u719f\u6089\u5b58\u50a8\u6c60\u7684 YAML \u6587\u4ef6\u914d\u7f6e\u3002

                                                                          • \u901a\u8fc7\u8868\u5355\u521b\u5efa\u66f4\u76f4\u89c2\u66f4\u7b80\u5355\uff0c\u6839\u636e\u63d0\u793a\u586b\u5199\u5bf9\u5e94\u7684\u503c\u5373\u53ef\uff0c\u4f46\u6b65\u9aa4\u66f4\u52a0\u7e41\u7410\u3002

                                                                          "},{"location":"end-user/kpanda/storage/sc.html#yaml","title":"YAML \u521b\u5efa","text":"
                                                                          1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u5b58\u50a8\u6c60(SC) -> YAML \u521b\u5efa \u3002

                                                                          2. \u5728\u5f39\u6846\u4e2d\u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u7136\u540e\u5728\u5f39\u6846\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                                                            \u652f\u6301\u4ece\u672c\u5730\u5bfc\u5165 YAML \u6587\u4ef6\u6216\u5c06\u586b\u5199\u597d\u7684\u6587\u4ef6\u4e0b\u8f7d\u4fdd\u5b58\u5230\u672c\u5730\u3002

                                                                          "},{"location":"end-user/kpanda/storage/sc.html#_1","title":"\u8868\u5355\u521b\u5efa","text":"
                                                                          1. \u5728\u96c6\u7fa4\u5217\u8868\u4e2d\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u7136\u540e\u5728\u5de6\u4fa7\u5bfc\u822a\u680f\u70b9\u51fb \u5bb9\u5668\u5b58\u50a8 -> \u5b58\u50a8\u6c60(SC) -> \u521b\u5efa\u5b58\u50a8\u6c60(SC) \u3002

                                                                          2. \u586b\u5199\u57fa\u672c\u4fe1\u606f\uff0c\u7136\u540e\u5728\u5e95\u90e8\u70b9\u51fb \u786e\u5b9a \u3002

                                                                            \u81ea\u5b9a\u4e49\u5b58\u50a8\u7cfb\u7edf

                                                                            • \u5b58\u50a8\u6c60\u540d\u79f0\u3001\u9a71\u52a8\u3001\u56de\u6536\u7b56\u7565\u5728\u521b\u5efa\u540e\u4e0d\u53ef\u4fee\u6539\u3002
                                                                            • CSI \u5b58\u50a8\u9a71\u52a8\uff1a\u57fa\u4e8e\u6807\u51c6 Kubernetes \u7684\u5bb9\u5668\u5b58\u50a8\u63a5\u53e3\u63d2\u4ef6\uff0c\u9700\u9075\u5b88\u5b58\u50a8\u5382\u5546\u89c4\u5b9a\u7684\u683c\u5f0f\uff0c\u4f8b\u5982 rancher.io/local-path \u3002

                                                                              • \u6709\u5173\u5982\u4f55\u586b\u5199\u4e0d\u540c\u5382\u5546\u63d0\u4f9b\u7684 CSI \u9a71\u52a8\uff0c\u53ef\u53c2\u8003 Kubernetes \u5b98\u65b9\u6587\u6863\u5b58\u50a8\u7c7b\u3002
                                                                                • \u56de\u6536\u7b56\u7565\uff1a\u5220\u9664\u6570\u636e\u5377\u65f6\uff0c\u4fdd\u7559\u6570\u636e\u5377\u4e2d\u7684\u6570\u636e\u6216\u8005\u5220\u9664\u5176\u4e2d\u7684\u6570\u636e\u3002
                                                                                • \u5feb\u7167/\u6269\u5bb9\uff1a\u5f00\u542f\u540e\uff0c\u57fa\u4e8e\u8be5\u5b58\u50a8\u6c60\u7684\u6570\u636e\u5377/\u6570\u636e\u5377\u58f0\u660e\u624d\u80fd\u652f\u6301\u6269\u5bb9\u548c\u5feb\u7167\u529f\u80fd\uff0c\u4f46 \u524d\u63d0\u662f\u5e95\u5c42\u4f7f\u7528\u7684\u5b58\u50a8\u9a71\u52a8\u652f\u6301\u5feb\u7167\u548c\u6269\u5bb9\u529f\u80fd\u3002

                                                                            HwameiStor \u5b58\u50a8\u7cfb\u7edf

                                                                            • \u5b58\u50a8\u6c60\u540d\u79f0\u3001\u9a71\u52a8\u3001\u56de\u6536\u7b56\u7565\u5728\u521b\u5efa\u540e\u4e0d\u53ef\u4fee\u6539\u3002
                                                                            • \u5b58\u50a8\u7cfb\u7edf\uff1aHwameiStor \u5b58\u50a8\u7cfb\u7edf\u3002
                                                                            • \u5b58\u50a8\u7c7b\u578b\uff1a\u652f\u6301 LVM\uff0c\u88f8\u78c1\u76d8\u7c7b\u578b
                                                                              • LVM \u7c7b\u578b \uff1aHwameiStor \u63a8\u8350\u4f7f\u7528\u6b64\u65b9\u5f0f\uff0c\u53ef\u4f7f\u7528\u9ad8\u53ef\u7528\u6570\u636e\u5377\uff0c\u5bf9\u5e94\u7684\u7684 CSI \u5b58\u50a8\u9a71\u52a8\u4e3a lvm.hwameistor.io\u3002
                                                                              • \u88f8\u78c1\u76d8\u6570\u636e\u5377 \uff1a \u9002\u7528\u4e8e\u975e\u9ad8\u53ef\u7528\u573a\u666f\uff0c\u65e0\u9ad8\u53ef\u7528\u80fd\u529b\uff0c\u5bf9\u5e94\u7684 CSI \u9a71\u52a8\u4e3a hdd.hwameistor.io
                                                                            • \u9ad8\u53ef\u7528\u6a21\u5f0f\uff1a\u4f7f\u7528\u9ad8\u53ef\u7528\u80fd\u529b\u4e4b\u524d\u8bf7\u786e\u8ba4 DRBD \u7ec4\u4ef6 \u5df2\u5b89\u88c5\u3002\u5f00\u542f\u9ad8\u53ef\u7528\u6a21\u5f0f\u540e\uff0c\u53ef\u5c06\u6570\u636e\u5377\u526f\u672c\u6570\u8bbe\u7f6e\u4e3a 1 \u548c 2\u3002 \u5982\u9700\u8981\u53ef\u5c06\u6570\u636e\u5377\u526f\u672c\u4ece 1 Convert \u6210 1
                                                                            • \u56de\u6536\u7b56\u7565\uff1a\u5220\u9664\u6570\u636e\u5377\u65f6\uff0c\u4fdd\u7559\u6570\u636e\u5377\u4e2d\u7684\u6570\u636e\u6216\u8005\u5220\u9664\u5176\u4e2d\u7684\u6570\u636e\u3002
                                                                            • \u5feb\u7167/\u6269\u5bb9\uff1a\u5f00\u542f\u540e\uff0c\u57fa\u4e8e\u8be5\u5b58\u50a8\u6c60\u7684\u6570\u636e\u5377/\u6570\u636e\u5377\u58f0\u660e\u624d\u80fd\u652f\u6301\u6269\u5bb9\u548c\u5feb\u7167\u529f\u80fd\uff0c\u4f46 \u524d\u63d0\u662f\u5e95\u5c42\u4f7f\u7528\u7684\u5b58\u50a8\u9a71\u52a8\u652f\u6301\u5feb\u7167\u548c\u6269\u5bb9\u529f\u80fd\u3002

                                                                            Note

                                                                            \u76ee\u524d HwameiStor xfs\u3001ext4 \u4e24\u79cd\u6587\u4ef6\u7cfb\u7edf\uff0c\u5176\u4e2d\u9ed8\u8ba4\u4f7f\u7528\u7684\u662f xfs \u6587\u4ef6\u7cfb\u7edf\uff0c\u5982\u679c\u60f3\u8981\u66ff\u6362\u4e3a ext4\uff0c\u53ef\u4ee5\u5728\u81ea\u5b9a\u4e49\u53c2\u6570\u6dfb\u52a0 csi.storage.k8s.io/fstype: ext4

                                                                          "},{"location":"end-user/kpanda/storage/sc.html#sc_2","title":"\u66f4\u65b0\u5b58\u50a8\u6c60(SC)","text":"

                                                                          \u5728\u5b58\u50a8\u6c60\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u66f4\u65b0\u7684\u5b58\u50a8\u6c60\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u7f16\u8f91 \u5373\u53ef\u901a\u8fc7\u66f4\u65b0\u5b58\u50a8\u6c60\u3002

                                                                          Info

                                                                          \u9009\u62e9 \u67e5\u770b YAML \u53ef\u4ee5\u67e5\u770b\u8be5\u5b58\u50a8\u6c60\u7684 YAML \u6587\u4ef6\uff0c\u4f46\u4e0d\u652f\u6301\u7f16\u8f91\u3002

                                                                          "},{"location":"end-user/kpanda/storage/sc.html#sc_3","title":"\u5220\u9664\u5b58\u50a8\u6c60(SC)","text":"

                                                                          \u5728\u5b58\u50a8\u6c60\u5217\u8868\u9875\u9762\uff0c\u627e\u5230\u9700\u8981\u5220\u9664\u7684\u5b58\u50a8\u6c60\uff0c\u5728\u53f3\u4fa7\u7684\u64cd\u4f5c\u680f\u4e0b\u9009\u62e9 \u5220\u9664 \u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-cronjob.html","title":"\u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\uff08CronJob\uff09","text":"

                                                                          \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\uff08CronJob\uff09\u3002

                                                                          \u5b9a\u65f6\u4efb\u52a1\uff08CronJob\uff09\u9002\u7528\u4e8e\u4e8e\u6267\u884c\u5468\u671f\u6027\u7684\u64cd\u4f5c\uff0c\u4f8b\u5982\u5907\u4efd\u3001\u62a5\u544a\u751f\u6210\u7b49\u3002\u8fd9\u4e9b\u4efb\u52a1\u53ef\u4ee5\u914d\u7f6e\u4e3a\u5468\u671f\u6027\u91cd\u590d\u7684\uff08\u4f8b\u5982\uff1a\u6bcf\u5929/\u6bcf\u5468/\u6bcf\u6708\u4e00\u6b21\uff09\uff0c\u53ef\u4ee5\u5b9a\u4e49\u4efb\u52a1\u5f00\u59cb\u6267\u884c\u7684\u65f6\u95f4\u95f4\u9694\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-cronjob.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                                          \u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\uff08CronJob\uff09\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                                          • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                                          • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                                                                          • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                                          • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-cronjob.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                                                                          \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u5b9a\u65f6\u4efb\u52a1\u3002

                                                                          1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                                          2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u5b9a\u65f6\u4efb\u52a1 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                                                                          3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u5b9a\u65f6\u4efb\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                                                                            \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u5b9a\u65f6\u4efb\u52a1 \u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u5b9a\u65f6\u4efb\u52a1\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u91cd\u542f\u7b49\u64cd\u4f5c\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-cronjob.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"

                                                                          \u5728 \u521b\u5efa\u5b9a\u65f6\u4efb\u52a1 \u9875\u9762\u4e2d\uff0c\u6839\u636e\u4e0b\u8868\u8f93\u5165\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                                          • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                                                          • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u5b9a\u65f6\u4efb\u52a1\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                                                          • \u63cf\u8ff0\uff1a\u8f93\u5165\u5de5\u4f5c\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u91cf\u5e94\u4e0d\u8d85\u8fc7 512 \u4e2a\u3002
                                                                          "},{"location":"end-user/kpanda/workloads/create-cronjob.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                                                                          \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                                                          \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                                                                          \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                                                                          \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                                                                          • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                                                                          • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                                                                          • \u955c\u50cf\uff1a
                                                                            • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u63a5\u5165\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                                                                            • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                                                                            • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                                                                            • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                                                                          • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                                                                          • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                                                                          • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                                                                            • \u6574\u5361\u6a21\u5f0f\uff1a
                                                                              • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                            • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                                                                              • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                              • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                                                                              • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                                                                              • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                                                                              • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                                                                              • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                                                                            • Mig \u6a21\u5f0f
                                                                              • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                                                                              • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                                                                          \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                                                                          \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                                                                          \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                                                                          \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                                                                          \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                                                                          \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-cronjob.html#_5","title":"\u5b9a\u65f6\u4efb\u52a1\u914d\u7f6e","text":"
                                                                          • \u5e76\u53d1\u7b56\u7565\uff1a\u662f\u5426\u5141\u8bb8\u591a\u4e2a Job \u4efb\u52a1\u5e76\u884c\u6267\u884c\u3002

                                                                            • Allow \uff1a\u53ef\u4ee5\u5728\u524d\u4e00\u4e2a\u4efb\u52a1\u672a\u5b8c\u6210\u65f6\u5c31\u521b\u5efa\u65b0\u7684\u5b9a\u65f6\u4efb\u52a1\uff0c\u800c\u4e14\u591a\u4e2a\u4efb\u52a1\u53ef\u4ee5\u5e76\u884c\u3002\u4efb\u52a1\u592a\u591a\u53ef\u80fd\u62a2\u5360\u96c6\u7fa4\u8d44\u6e90\u3002
                                                                            • Forbid \uff1a\u5728\u524d\u4e00\u4e2a\u4efb\u52a1\u5b8c\u6210\u4e4b\u524d\uff0c\u4e0d\u80fd\u521b\u5efa\u65b0\u4efb\u52a1\uff0c\u5982\u679c\u65b0\u4efb\u52a1\u7684\u6267\u884c\u65f6\u95f4\u5230\u4e86\u800c\u4e4b\u524d\u7684\u4efb\u52a1\u4ecd\u672a\u6267\u884c\u5b8c\uff0cCronJob \u4f1a\u5ffd\u7565\u65b0\u4efb\u52a1\u7684\u6267\u884c\u3002
                                                                            • Replace \uff1a\u5982\u679c\u65b0\u4efb\u52a1\u7684\u6267\u884c\u65f6\u95f4\u5230\u4e86\uff0c\u4f46\u524d\u4e00\u4e2a\u4efb\u52a1\u8fd8\u672a\u5b8c\u6210\uff0c\u65b0\u7684\u4efb\u52a1\u4f1a\u53d6\u4ee3\u524d\u4e00\u4e2a\u4efb\u52a1\u3002

                                                                            \u4e0a\u8ff0\u89c4\u5219\u4ec5\u9002\u7528\u4e8e\u540c\u4e00\u4e2a CronJob \u521b\u5efa\u7684\u591a\u4e2a\u4efb\u52a1\u3002\u591a\u4e2a CronJob \u521b\u5efa\u7684\u591a\u4e2a\u4efb\u52a1\u603b\u662f\u5141\u8bb8\u5e76\u53d1\u6267\u884c\u3002

                                                                          • \u5b9a\u65f6\u89c4\u5219\uff1a\u57fa\u4e8e\u5206\u949f\u3001\u5c0f\u65f6\u3001\u5929\u3001\u5468\u3001\u6708\u8bbe\u7f6e\u4efb\u52a1\u6267\u884c\u7684\u65f6\u95f4\u5468\u671f\u3002\u652f\u6301\u7528\u6570\u5b57\u548c * \u81ea\u5b9a\u4e49 Cron \u8868\u8fbe\u5f0f\uff0c\u8f93\u5165\u8868\u8fbe\u5f0f\u540e\u4e0b\u65b9\u4f1a\u63d0\u793a\u5f53\u524d\u8868\u8fbe\u5f0f\u7684\u542b\u4e49\u3002\u6709\u5173\u8be6\u7ec6\u7684\u8868\u8fbe\u5f0f\u8bed\u6cd5\u89c4\u5219\uff0c\u53ef\u53c2\u8003 Cron \u65f6\u95f4\u8868\u8bed\u6cd5\u3002

                                                                          • \u4efb\u52a1\u8bb0\u5f55\uff1a\u8bbe\u5b9a\u4fdd\u7559\u591a\u5c11\u6761\u4efb\u52a1\u6267\u884c\u6210\u529f\u6216\u5931\u8d25\u7684\u8bb0\u5f55\u3002 0 \u8868\u793a\u4e0d\u4fdd\u7559\u3002
                                                                          • \u8d85\u65f6\u65f6\u95f4\uff1a\u8d85\u51fa\u8be5\u65f6\u95f4\u65f6\uff0c\u4efb\u52a1\u5c31\u4f1a\u88ab\u6807\u8bc6\u4e3a\u6267\u884c\u5931\u8d25\uff0c\u4efb\u52a1\u4e0b\u7684\u6240\u6709 Pod \u90fd\u4f1a\u88ab\u5220\u9664\u3002\u4e3a\u7a7a\u65f6\u8868\u793a\u4e0d\u8bbe\u7f6e\u8d85\u65f6\u65f6\u95f4\u3002\u9ed8\u8ba4\u503c\u4e3a 360 s\u3002
                                                                          • \u91cd\u8bd5\u6b21\u6570\uff1a\u4efb\u52a1\u53ef\u91cd\u8bd5\u6b21\u6570\uff0c\u9ed8\u8ba4\u503c\u4e3a 6\u3002
                                                                          • \u91cd\u542f\u7b56\u7565\uff1a\u8bbe\u7f6e\u4efb\u52a1\u5931\u8d25\u65f6\u662f\u5426\u91cd\u542f Pod\u3002
                                                                          "},{"location":"end-user/kpanda/workloads/create-cronjob.html#_6","title":"\u670d\u52a1\u914d\u7f6e","text":"

                                                                          \u4e3a\u6709\u72b6\u6001\u8d1f\u8f7d\u914d\u7f6e\u670d\u52a1\uff08Service\uff09\uff0c\u4f7f\u6709\u72b6\u6001\u8d1f\u8f7d\u80fd\u591f\u88ab\u5916\u90e8\u8bbf\u95ee\u3002

                                                                          1. \u70b9\u51fb \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                                                                          2. \u53c2\u8003\u521b\u5efa\u670d\u52a1\uff0c\u914d\u7f6e\u670d\u52a1\u53c2\u6570\u3002

                                                                          3. \u70b9\u51fb \u786e\u5b9a \uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-cronjob.html#_7","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                                                                          \u5b9a\u65f6\u4efb\u52a1\u7684\u9ad8\u7ea7\u914d\u7f6e\u4e3b\u8981\u6d89\u53ca\u6807\u7b7e\u4e0e\u6ce8\u89e3\u3002

                                                                          \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u4f8b Pod \u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-cronjob.html#yaml","title":"YAML \u521b\u5efa","text":"

                                                                          \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\u3002

                                                                          1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                                          2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u5b9a\u65f6\u4efb\u52a1 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                                          3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                                                                          \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\u7684 YAML \u793a\u4f8b
                                                                          apiVersion: batch/v1\nkind: CronJob\nmetadata:\n  creationTimestamp: '2022-12-26T09:45:47Z'\n  generation: 1\n  name: demo\n  namespace: default\n  resourceVersion: '92726617'\n  uid: d030d8d7-a405-4dcd-b09a-176942ef36c9\nspec:\n  concurrencyPolicy: Allow\n  failedJobsHistoryLimit: 1\n  jobTemplate:\n    metadata:\n      creationTimestamp: null\n    spec:\n      activeDeadlineSeconds: 360\n      backoffLimit: 6\n      template:\n        metadata:\n          creationTimestamp: null\n        spec:\n          containers:\n            - image: nginx\n              imagePullPolicy: IfNotPresent\n              lifecycle: {}\n              name: container-3\n              resources:\n                limits:\n                  cpu: 250m\n                  memory: 512Mi\n                requests:\n                  cpu: 250m\n                  memory: 512Mi\n              securityContext:\n                privileged: false\n              terminationMessagePath: /dev/termination-log\n              terminationMessagePolicy: File\n          dnsPolicy: ClusterFirst\n          restartPolicy: Never\n          schedulerName: default-scheduler\n          securityContext: {}\n          terminationGracePeriodSeconds: 30\n  schedule: 0 0 13 * 5\n  successfulJobsHistoryLimit: 3\n  suspend: false\nstatus: {}\n
                                                                          "},{"location":"end-user/kpanda/workloads/create-daemonset.html","title":"\u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b(DaemonSet)","text":"

                                                                          \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b\uff08DaemonSet\uff09\u3002

                                                                          \u5b88\u62a4\u8fdb\u7a0b\uff08DaemonSet\uff09\u901a\u8fc7\u8282\u70b9\u4eb2\u548c\u6027\u4e0e\u6c61\u70b9\u529f\u80fd\u786e\u4fdd\u5728\u5168\u90e8\u6216\u90e8\u5206\u8282\u70b9\u4e0a\u8fd0\u884c\u4e00\u4e2a Pod \u7684\u526f\u672c\u3002\u5bf9\u4e8e\u65b0\u52a0\u5165\u96c6\u7fa4\u7684\u8282\u70b9\uff0cDaemonSet \u81ea\u52a8\u5728\u65b0\u8282\u70b9\u4e0a\u90e8\u7f72\u76f8\u5e94\u7684 Pod\uff0c\u5e76\u8ddf\u8e2a Pod \u7684\u8fd0\u884c\u72b6\u6001\u3002\u5f53\u8282\u70b9\u88ab\u79fb\u9664\u65f6\uff0cDaemonSet \u5219\u5220\u9664\u5176\u521b\u5efa\u7684\u6240\u6709 Pod\u3002

                                                                          \u5b88\u62a4\u8fdb\u7a0b\u7684\u5e38\u89c1\u7528\u4f8b\u5305\u62ec\uff1a

                                                                          • \u5728\u6bcf\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u96c6\u7fa4\u5b88\u62a4\u8fdb\u7a0b\u3002

                                                                          • \u5728\u6bcf\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u65e5\u5fd7\u6536\u96c6\u5b88\u62a4\u8fdb\u7a0b\u3002

                                                                          • \u5728\u6bcf\u4e2a\u8282\u70b9\u4e0a\u8fd0\u884c\u76d1\u63a7\u5b88\u62a4\u8fdb\u7a0b\u3002

                                                                          \u7b80\u5355\u8d77\u89c1\uff0c\u53ef\u4ee5\u5728\u6bcf\u4e2a\u8282\u70b9\u4e0a\u4e3a\u6bcf\u79cd\u7c7b\u578b\u7684\u5b88\u62a4\u8fdb\u7a0b\u90fd\u542f\u52a8\u4e00\u4e2a DaemonSet\u3002\u5982\u9700\u66f4\u7cbe\u7ec6\u3001\u66f4\u9ad8\u7ea7\u5730\u7ba1\u7406\u5b88\u62a4\u8fdb\u7a0b\uff0c\u4e5f\u53ef\u4ee5\u4e3a\u540c\u4e00\u79cd\u5b88\u62a4\u8fdb\u7a0b\u90e8\u7f72\u591a\u4e2a DaemonSet\u3002\u6bcf\u4e2a DaemonSet \u5177\u6709\u4e0d\u540c\u7684\u6807\u5fd7\uff0c\u5e76\u4e14\u5bf9\u4e0d\u540c\u786c\u4ef6\u7c7b\u578b\u5177\u6709\u4e0d\u540c\u7684\u5185\u5b58\u3001CPU \u8981\u6c42\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-daemonset.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                                          \u521b\u5efa DaemonSet \u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                                          • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                                          • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                                                                          • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                                          • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-daemonset.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                                                                          \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u5b88\u62a4\u8fdb\u7a0b\u3002

                                                                          1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                                          2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u5b88\u62a4\u8fdb\u7a0b \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                                                                          3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                                                                            \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u5b88\u62a4\u8fdb\u7a0b \u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u5b88\u62a4\u8fdb\u7a0b\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u91cd\u542f\u7b49\u64cd\u4f5c\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-daemonset.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"

                                                                          \u5728 \u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b \u9875\u9762\u4e2d\uff0c\u6839\u636e\u4e0b\u8868\u8f93\u5165\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                                          • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                                                          • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u5b88\u62a4\u8fdb\u7a0b\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                                                          • \u63cf\u8ff0\uff1a\u8f93\u5165\u5de5\u4f5c\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u91cf\u5e94\u4e0d\u8d85\u8fc7 512 \u4e2a\u3002
                                                                          "},{"location":"end-user/kpanda/workloads/create-daemonset.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                                                                          \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                                                          \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                                                                          \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                                                                          \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                                                                          • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                                                                          • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                                                                          • \u955c\u50cf\uff1a
                                                                            • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u63a5\u5165\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                                                                            • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                                                                            • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                                                                            • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                                                                          • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                                                                          • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                                                                          • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                                                                            • \u6574\u5361\u6a21\u5f0f\uff1a
                                                                              • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                            • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                                                                              • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                              • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                                                                              • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                                                                              • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                                                                              • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                                                                              • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                                                                            • Mig \u6a21\u5f0f
                                                                              • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                                                                              • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                                                                          \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                                                                          \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                                                                          \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                                                                          \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                                                                          \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                                                                          \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-daemonset.html#_5","title":"\u670d\u52a1\u914d\u7f6e","text":"

                                                                          \u4e3a\u5b88\u62a4\u8fdb\u7a0b\u521b\u5efa\u670d\u52a1\uff08Service\uff09\uff0c\u4f7f\u5b88\u62a4\u8fdb\u7a0b\u80fd\u591f\u88ab\u5916\u90e8\u8bbf\u95ee\u3002

                                                                          1. \u70b9\u51fb \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                                                                          2. \u914d\u7f6e\u670d\u52a1\u53c2\u6570\uff0c\u8be6\u60c5\u8bf7\u53c2\u8003\u521b\u5efa\u670d\u52a1\u3002

                                                                          3. \u70b9\u51fb \u786e\u5b9a \uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-daemonset.html#_6","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                                                                          \u9ad8\u7ea7\u914d\u7f6e\u5305\u62ec\u8d1f\u8f7d\u7684\u7f51\u7edc\u914d\u7f6e\u3001\u5347\u7ea7\u7b56\u7565\u3001\u8c03\u5ea6\u7b56\u7565\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u56db\u90e8\u5206\uff0c\u53ef\u70b9\u51fb\u4e0b\u65b9\u7684\u9875\u7b7e\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                                                          \u7f51\u7edc\u914d\u7f6e\u5347\u7ea7\u7b56\u7565\u8c03\u5ea6\u7b56\u7565\u6807\u7b7e\u4e0e\u6ce8\u89e3

                                                                          \u5e94\u7528\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u4f1a\u51fa\u73b0\u5197\u4f59\u7684 DNS \u67e5\u8be2\u3002Kubernetes \u4e3a\u5e94\u7528\u63d0\u4f9b\u4e86\u4e0e DNS \u76f8\u5173\u7684\u914d\u7f6e\u9009\u9879\uff0c\u80fd\u591f\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u6709\u6548\u5730\u51cf\u5c11\u5197\u4f59\u7684 DNS \u67e5\u8be2\uff0c\u63d0\u5347\u4e1a\u52a1\u5e76\u53d1\u91cf\u3002

                                                                          • DNS \u7b56\u7565

                                                                            • Default\uff1a\u4f7f\u5bb9\u5668\u4f7f\u7528 kubelet \u7684 --resolv-conf \u53c2\u6570\u6307\u5411\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u3002\u8be5\u914d\u7f6e\u53ea\u80fd\u89e3\u6790\u6ce8\u518c\u5230\u4e92\u8054\u7f51\u4e0a\u7684\u5916\u90e8\u57df\u540d\uff0c\u65e0\u6cd5\u89e3\u6790\u96c6\u7fa4\u5185\u90e8\u57df\u540d\uff0c\u4e14\u4e0d\u5b58\u5728\u65e0\u6548\u7684 DNS \u67e5\u8be2\u3002
                                                                            • ClusterFirstWithHostNet\uff1a\u5e94\u7528\u5bf9\u63a5\u4e3b\u673a\u7684\u57df\u540d\u6587\u4ef6\u3002
                                                                            • ClusterFirst\uff1a\u5e94\u7528\u5bf9\u63a5 Kube-DNS/CoreDNS\u3002
                                                                            • None\uff1aKubernetes v1.9\uff08Beta in v1.10\uff09\u4e2d\u5f15\u5165\u7684\u65b0\u9009\u9879\u503c\u3002\u8bbe\u7f6e\u4e3a None \u4e4b\u540e\uff0c\u5fc5\u987b\u8bbe\u7f6e dnsConfig\uff0c\u6b64\u65f6\u5bb9\u5668\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u5c06\u5b8c\u5168\u901a\u8fc7 dnsConfig \u7684\u914d\u7f6e\u6765\u751f\u6210\u3002
                                                                          • \u57df\u540d\u670d\u52a1\u5668\uff1a\u586b\u5199\u57df\u540d\u670d\u52a1\u5668\u7684\u5730\u5740\uff0c\u4f8b\u5982 10.6.175.20 \u3002

                                                                          • \u641c\u7d22\u57df\uff1a\u57df\u540d\u67e5\u8be2\u65f6\u7684 DNS \u641c\u7d22\u57df\u5217\u8868\u3002\u6307\u5b9a\u540e\uff0c\u63d0\u4f9b\u7684\u641c\u7d22\u57df\u5217\u8868\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 search \u5b57\u6bb5\u4e2d\uff0c\u5e76\u5220\u9664\u91cd\u590d\u7684\u57df\u540d\u3002Kubernetes \u6700\u591a\u5141\u8bb8 6 \u4e2a\u641c\u7d22\u57df\u3002
                                                                          • Options\uff1aDNS \u7684\u914d\u7f6e\u9009\u9879\uff0c\u5176\u4e2d\u6bcf\u4e2a\u5bf9\u8c61\u53ef\u4ee5\u5177\u6709 name \u5c5e\u6027\uff08\u5fc5\u9700\uff09\u548c value \u5c5e\u6027\uff08\u53ef\u9009\uff09\u3002\u8be5\u5b57\u6bb5\u4e2d\u7684\u5185\u5bb9\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 options \u5b57\u6bb5\u4e2d\uff0cdnsConfig \u7684 options \u7684\u67d0\u4e9b\u9009\u9879\u5982\u679c\u4e0e\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684\u9009\u9879\u51b2\u7a81\uff0c\u5219\u4f1a\u88ab dnsConfig \u6240\u8986\u76d6\u3002
                                                                          • \u4e3b\u673a\u522b\u540d\uff1a\u4e3a\u4e3b\u673a\u8bbe\u7f6e\u7684\u522b\u540d\u3002

                                                                          • \u5347\u7ea7\u65b9\u5f0f\uff1a \u6eda\u52a8\u5347\u7ea7 \u6307\u9010\u6b65\u7528\u65b0\u7248\u672c\u7684\u5b9e\u4f8b\u66ff\u6362\u65e7\u7248\u672c\u7684\u5b9e\u4f8b\uff0c\u5347\u7ea7\u7684\u8fc7\u7a0b\u4e2d\uff0c\u4e1a\u52a1\u6d41\u91cf\u4f1a\u540c\u65f6\u8d1f\u8f7d\u5747\u8861\u5206\u5e03\u5230\u65b0\u8001\u7684\u5b9e\u4f8b\u4e0a\uff0c\u56e0\u6b64\u4e1a\u52a1\u4e0d\u4f1a\u4e2d\u65ad\u3002 \u91cd\u5efa\u5347\u7ea7 \u6307\u5148\u5220\u9664\u8001\u7248\u672c\u7684\u8d1f\u8f7d\u5b9e\u4f8b\uff0c\u518d\u5b89\u88c5\u6307\u5b9a\u7684\u65b0\u7248\u672c\uff0c\u5347\u7ea7\u8fc7\u7a0b\u4e2d\u4e1a\u52a1\u4f1a\u4e2d\u65ad\u3002
                                                                          • \u6700\u5927\u65e0\u6548 Pod \u6570\uff1a\u6307\u5b9a\u8d1f\u8f7d\u66f4\u65b0\u8fc7\u7a0b\u4e2d\u4e0d\u53ef\u7528 Pod \u7684\u6700\u5927\u503c\u6216\u6bd4\u7387\uff0c\u9ed8\u8ba4 25%\u3002\u5982\u679c\u7b49\u4e8e\u5b9e\u4f8b\u6570\u6709\u670d\u52a1\u4e2d\u65ad\u7684\u98ce\u9669\u3002
                                                                          • \u6700\u5927\u6d6a\u6d8c\uff1a\u66f4\u65b0 Pod \u7684\u8fc7\u7a0b\u4e2d Pod \u603b\u6570\u8d85\u8fc7 Pod \u671f\u671b\u526f\u672c\u6570\u90e8\u5206\u7684\u6700\u5927\u503c\u6216\u6bd4\u7387\u3002\u9ed8\u8ba4 25%\u3002
                                                                          • \u6700\u5927\u4fdd\u7559\u7248\u672c\u6570\uff1a\u8bbe\u7f6e\u7248\u672c\u56de\u6eda\u65f6\u4fdd\u7559\u7684\u65e7\u7248\u672c\u6570\u91cf\u3002\u9ed8\u8ba4 10\u3002
                                                                          • Pod \u53ef\u7528\u6700\u77ed\u65f6\u95f4\uff1aPod \u5c31\u7eea\u7684\u6700\u77ed\u65f6\u95f4\uff0c\u53ea\u6709\u8d85\u51fa\u8fd9\u4e2a\u65f6\u95f4 Pod \u624d\u88ab\u8ba4\u4e3a\u53ef\u7528\uff0c\u9ed8\u8ba4 0 \u79d2\u3002
                                                                          • \u5347\u7ea7\u6700\u5927\u6301\u7eed\u65f6\u95f4\uff1a\u5982\u679c\u8d85\u8fc7\u6240\u8bbe\u7f6e\u7684\u65f6\u95f4\u4ecd\u672a\u90e8\u7f72\u6210\u529f\uff0c\u5219\u5c06\u8be5\u8d1f\u8f7d\u6807\u8bb0\u4e3a\u5931\u8d25\u3002\u9ed8\u8ba4 600 \u79d2\u3002
                                                                          • \u7f29\u5bb9\u65f6\u95f4\u7a97\uff1a\u8d1f\u8f7d\u505c\u6b62\u524d\u547d\u4ee4\u7684\u6267\u884c\u65f6\u95f4\u7a97\uff080-9,999\u79d2\uff09\uff0c\u9ed8\u8ba4 30 \u79d2\u3002

                                                                          • \u5bb9\u5fcd\u65f6\u95f4\uff1a\u8d1f\u8f7d\u5b9e\u4f8b\u6240\u5728\u7684\u8282\u70b9\u4e0d\u53ef\u7528\u65f6\uff0c\u5c06\u8d1f\u8f7d\u5b9e\u4f8b\u91cd\u65b0\u8c03\u5ea6\u5230\u5176\u5b83\u53ef\u7528\u8282\u70b9\u7684\u65f6\u95f4\uff0c\u9ed8\u8ba4\u4e3a 300 \u79d2\u3002
                                                                          • \u8282\u70b9\u4eb2\u548c\u6027\uff1a\u6839\u636e\u8282\u70b9\u4e0a\u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002
                                                                          • \u5de5\u4f5c\u8d1f\u8f7d\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                                                                          • \u5de5\u4f5c\u8d1f\u8f7d\u53cd\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u4e0d\u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                                                                          • \u62d3\u6251\u57df\uff1a\u5373 topologyKey\uff0c\u7528\u4e8e\u6307\u5b9a\u53ef\u4ee5\u8c03\u5ea6\u7684\u4e00\u7ec4\u8282\u70b9\u3002\u4f8b\u5982\uff0c kubernetes.io/os \u8868\u793a\u53ea\u8981\u67d0\u4e2a\u64cd\u4f5c\u7cfb\u7edf\u7684\u8282\u70b9\u6ee1\u8db3 labelSelector \u7684\u6761\u4ef6\u5c31\u53ef\u4ee5\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002

                                                                          \u5177\u4f53\u8be6\u60c5\u8bf7\u53c2\u8003\u8c03\u5ea6\u7b56\u7565\u3002

                                                                          \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u548c\u5bb9\u5668\u7ec4\u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-daemonset.html#yaml","title":"YAML \u521b\u5efa","text":"

                                                                          \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b\u3002

                                                                          1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                                          2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u5b88\u62a4\u8fdb\u7a0b \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                                          3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                                                                          \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u5b88\u62a4\u8fdb\u7a0b\u7684 YAML \u793a\u4f8b
                                                                          kind: DaemonSet\napiVersion: apps/v1\nmetadata:\n  name: hwameistor-local-disk-manager\n  namespace: hwameistor\n  uid: ccbdc098-7de3-4a8a-96dd-d1cee159c92b\n  resourceVersion: '90999552'\n  generation: 1\n  creationTimestamp: '2022-12-15T09:03:44Z'\n  labels:\n    app.kubernetes.io/managed-by: Helm\n  annotations:\n    deprecated.daemonset.template.generation: '1'\n    meta.helm.sh/release-name: hwameistor\n    meta.helm.sh/release-namespace: hwameistor\nspec:\n  selector:\n    matchLabels:\n      app: hwameistor-local-disk-manager\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: hwameistor-local-disk-manager\n    spec:\n      volumes:\n        - name: udev\n          hostPath:\n            path: /run/udev\n            type: Directory\n        - name: procmount\n          hostPath:\n            path: /proc\n            type: Directory\n        - name: devmount\n          hostPath:\n            path: /dev\n            type: Directory\n        - name: socket-dir\n          hostPath:\n            path: /var/lib/kubelet/plugins/disk.hwameistor.io\n            type: DirectoryOrCreate\n        - name: registration-dir\n          hostPath:\n            path: /var/lib/kubelet/plugins_registry/\n            type: Directory\n        - name: plugin-dir\n          hostPath:\n            path: /var/lib/kubelet/plugins\n            type: DirectoryOrCreate\n        - name: pods-mount-dir\n          hostPath:\n            path: /var/lib/kubelet/pods\n            type: DirectoryOrCreate\n      containers:\n        - name: registrar\n          image: k8s-gcr.m.daocloud.io/sig-storage/csi-node-driver-registrar:v2.5.0\n          args:\n            - '--v=5'\n            - '--csi-address=/csi/csi.sock'\n            - >-\n              --kubelet-registration-path=/var/lib/kubelet/plugins/disk.hwameistor.io/csi.sock\n          env:\n            - name: KUBE_NODE_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: spec.nodeName\n          resources: {}\n          volumeMounts:\n            - name: socket-dir\n              mountPath: /csi\n            - name: registration-dir\n              mountPath: /registration\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - /bin/sh\n                  - '-c'\n                  - >-\n                    rm -rf /registration/disk.hwameistor.io \n                    /registration/disk.hwameistor.io-reg.sock\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: manager\n          image: ghcr.m.daocloud.io/hwameistor/local-disk-manager:v0.6.1\n          command:\n            - /local-disk-manager\n          args:\n            - '--endpoint=$(CSI_ENDPOINT)'\n            - '--nodeid=$(NODENAME)'\n            - '--csi-enable=true'\n          env:\n            - name: CSI_ENDPOINT\n              value: unix://var/lib/kubelet/plugins/disk.hwameistor.io/csi.sock\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: WATCH_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: NODENAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: spec.nodeName\n            - name: OPERATOR_NAME\n              value: local-disk-manager\n          resources: {}\n          volumeMounts:\n            - name: udev\n              mountPath: /run/udev\n            - name: procmount\n              readOnly: true\n              mountPath: /host/proc\n            - name: devmount\n              mountPath: /dev\n            - name: registration-dir\n              mountPath: /var/lib/kubelet/plugins_registry\n            - name: plugin-dir\n              mountPath: /var/lib/kubelet/plugins\n              mountPropagation: Bidirectional\n            - name: pods-mount-dir\n              mountPath: /var/lib/kubelet/pods\n              mountPropagation: Bidirectional\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            privileged: true\n      restartPolicy: Always\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      serviceAccountName: hwameistor-admin\n      serviceAccount: hwameistor-admin\n      hostNetwork: true\n      hostPID: true\n      securityContext: {}\n      schedulerName: default-scheduler\n      tolerations:\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - key: node.kubernetes.io/not-ready\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/master\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/control-plane\n          operator: Exists\n          effect: NoSchedule\n        - key: node.cloudprovider.kubernetes.io/uninitialized\n          operator: Exists\n          effect: NoSchedule\n  updateStrategy:\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n      maxSurge: 0\n  revisionHistoryLimit: 10\nstatus:\n  currentNumberScheduled: 4\n  numberMisscheduled: 0\n  desiredNumberScheduled: 4\n  numberReady: 4\n  observedGeneration: 1\n  updatedNumberScheduled: 4\n  numberAvailable: 4\n
                                                                          "},{"location":"end-user/kpanda/workloads/create-deployment.html","title":"\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\uff08Deployment\uff09","text":"

                                                                          \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\u3002

                                                                          \u65e0\u72b6\u6001\u8d1f\u8f7d\uff08Deployment\uff09\u662f Kubernetes \u4e2d\u7684\u4e00\u79cd\u5e38\u89c1\u8d44\u6e90\uff0c\u4e3b\u8981\u4e3a Pod \u548c ReplicaSet \u63d0\u4f9b\u58f0\u660e\u5f0f\u66f4\u65b0\uff0c\u652f\u6301\u5f39\u6027\u4f38\u7f29\u3001\u6eda\u52a8\u5347\u7ea7\u3001\u7248\u672c\u56de\u9000\u7b49\u529f\u80fd\u3002\u5728 Deployment \u4e2d\u58f0\u660e\u671f\u671b\u7684 Pod \u72b6\u6001\uff0cDeployment Controller \u4f1a\u901a\u8fc7 ReplicaSet \u4fee\u6539\u5f53\u524d\u72b6\u6001\uff0c\u4f7f\u5176\u8fbe\u5230\u9884\u5148\u58f0\u660e\u7684\u671f\u671b\u72b6\u6001\u3002Deployment \u662f\u65e0\u72b6\u6001\u7684\uff0c\u4e0d\u652f\u6301\u6570\u636e\u6301\u4e45\u5316\uff0c\u9002\u7528\u4e8e\u90e8\u7f72\u65e0\u72b6\u6001\u7684\u3001\u4e0d\u9700\u8981\u4fdd\u5b58\u6570\u636e\u3001\u968f\u65f6\u53ef\u4ee5\u91cd\u542f\u56de\u6eda\u7684\u5e94\u7528\u3002

                                                                          \u901a\u8fc7\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\uff0c\u53ef\u4ee5\u57fa\u4e8e\u76f8\u5e94\u7684\u89d2\u8272\u6743\u9650\u8f7b\u677e\u7ba1\u7406\u591a\u4e91\u591a\u96c6\u7fa4\u4e0a\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5305\u62ec\u5bf9\u65e0\u72b6\u6001\u8d1f\u8f7d\u7684\u521b\u5efa\u3001\u66f4\u65b0\u3001\u5220\u9664\u3001\u5f39\u6027\u6269\u7f29\u3001\u91cd\u542f\u3001\u7248\u672c\u56de\u9000\u7b49\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-deployment.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                                          \u5728\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                                          • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                                          • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                                                                          • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                                          • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-deployment.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                                                                          \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u65e0\u72b6\u6001\u8d1f\u8f7d\u3002

                                                                          1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                                          2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                                                                          3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                                                                            \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u65e0\u72b6\u6001\u8d1f\u8f7d \u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u8d1f\u8f7d\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u5f39\u6027\u6269\u7f29\u3001\u91cd\u542f\u3001\u7248\u672c\u56de\u9000\u7b49\u64cd\u4f5c\u3002\u5982\u679c\u8d1f\u8f7d\u72b6\u6001\u51fa\u73b0\u5f02\u5e38\uff0c\u8bf7\u67e5\u770b\u5177\u4f53\u5f02\u5e38\u4fe1\u606f\uff0c\u53ef\u53c2\u8003\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-deployment.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"
                                                                          • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 deployment-01\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                                                          • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u8d1f\u8f7d\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                                                          • \u5b9e\u4f8b\u6570\uff1a\u8f93\u5165\u8d1f\u8f7d\u7684 Pod \u5b9e\u4f8b\u6570\u91cf\uff0c\u9ed8\u8ba4\u521b\u5efa 1 \u4e2a Pod \u5b9e\u4f8b\u3002
                                                                          • \u63cf\u8ff0\uff1a\u8f93\u5165\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u4e0d\u8d85\u8fc7 512\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-deployment.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                                                                          \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                                                          \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                                                                          \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                                                                          \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                                                                          • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                                                                          • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                                                                          • \u955c\u50cf\uff1a
                                                                            • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u5b89\u88c5\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                                                                            • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                                                                            • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                                                                            • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                                                                          • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                                                                          • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                                                                          • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                                                                            • \u6574\u5361\u6a21\u5f0f\uff1a
                                                                              • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                            • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                                                                              • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                              • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                                                                              • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                                                                              • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                                                                              • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                                                                              • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                                                                            • Mig \u6a21\u5f0f
                                                                              • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                                                                              • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                                                                          \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                                                                          \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                                                                          \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                                                                          \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                                                                          \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                                                                          \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-deployment.html#_5","title":"\u670d\u52a1\u914d\u7f6e","text":"

                                                                          \u4e3a\u65e0\u72b6\u6001\u8d1f\u8f7d\u914d\u7f6e\u670d\u52a1\uff08Service\uff09\uff0c\u4f7f\u65e0\u72b6\u6001\u8d1f\u8f7d\u80fd\u591f\u88ab\u5916\u90e8\u8bbf\u95ee\u3002

                                                                          1. \u70b9\u51fb \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                                                                          2. \u53c2\u8003\u521b\u5efa\u670d\u52a1\uff0c\u914d\u7f6e\u670d\u52a1\u53c2\u6570\u3002

                                                                          3. \u70b9\u51fb \u786e\u5b9a \uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-deployment.html#_6","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                                                                          \u9ad8\u7ea7\u914d\u7f6e\u5305\u62ec\u8d1f\u8f7d\u7684\u7f51\u7edc\u914d\u7f6e\u3001\u5347\u7ea7\u7b56\u7565\u3001\u8c03\u5ea6\u7b56\u7565\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u56db\u90e8\u5206\uff0c\u53ef\u70b9\u51fb\u4e0b\u65b9\u7684\u9875\u7b7e\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                                                          \u7f51\u7edc\u914d\u7f6e\u5347\u7ea7\u7b56\u7565\u8c03\u5ea6\u7b56\u7565\u6807\u7b7e\u4e0e\u6ce8\u89e3
                                                                          • \u5982\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72\u4e86 SpiderPool \u548c Multus \u7ec4\u4ef6\uff0c\u5219\u53ef\u4ee5\u5728\u7f51\u7edc\u914d\u7f6e\u4e2d\u914d\u7f6e\u5bb9\u5668\u7f51\u5361\u3002

                                                                          • DNS \u914d\u7f6e\uff1a\u5e94\u7528\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u4f1a\u51fa\u73b0\u5197\u4f59\u7684 DNS \u67e5\u8be2\u3002Kubernetes \u4e3a\u5e94\u7528\u63d0\u4f9b\u4e86\u4e0e DNS \u76f8\u5173\u7684\u914d\u7f6e\u9009\u9879\uff0c\u80fd\u591f\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u6709\u6548\u5730\u51cf\u5c11\u5197\u4f59\u7684 DNS \u67e5\u8be2\uff0c\u63d0\u5347\u4e1a\u52a1\u5e76\u53d1\u91cf\u3002

                                                                          • DNS \u7b56\u7565

                                                                            • Default\uff1a\u4f7f\u5bb9\u5668\u4f7f\u7528 kubelet \u7684 --resolv-conf \u53c2\u6570\u6307\u5411\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u3002\u8be5\u914d\u7f6e\u53ea\u80fd\u89e3\u6790\u6ce8\u518c\u5230\u4e92\u8054\u7f51\u4e0a\u7684\u5916\u90e8\u57df\u540d\uff0c\u65e0\u6cd5\u89e3\u6790\u96c6\u7fa4\u5185\u90e8\u57df\u540d\uff0c\u4e14\u4e0d\u5b58\u5728\u65e0\u6548\u7684 DNS \u67e5\u8be2\u3002
                                                                            • ClusterFirstWithHostNet\uff1a\u5e94\u7528\u5bf9\u63a5\u4e3b\u673a\u7684\u57df\u540d\u6587\u4ef6\u3002
                                                                            • ClusterFirst\uff1a\u5e94\u7528\u5bf9\u63a5 Kube-DNS/CoreDNS\u3002
                                                                            • None\uff1aKubernetes v1.9\uff08Beta in v1.10\uff09\u4e2d\u5f15\u5165\u7684\u65b0\u9009\u9879\u503c\u3002\u8bbe\u7f6e\u4e3a None \u4e4b\u540e\uff0c\u5fc5\u987b\u8bbe\u7f6e dnsConfig\uff0c\u6b64\u65f6\u5bb9\u5668\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u5c06\u5b8c\u5168\u901a\u8fc7 dnsConfig \u7684\u914d\u7f6e\u6765\u751f\u6210\u3002
                                                                          • \u57df\u540d\u670d\u52a1\u5668\uff1a\u586b\u5199\u57df\u540d\u670d\u52a1\u5668\u7684\u5730\u5740\uff0c\u4f8b\u5982 10.6.175.20 \u3002

                                                                          • \u641c\u7d22\u57df\uff1a\u57df\u540d\u67e5\u8be2\u65f6\u7684 DNS \u641c\u7d22\u57df\u5217\u8868\u3002\u6307\u5b9a\u540e\uff0c\u63d0\u4f9b\u7684\u641c\u7d22\u57df\u5217\u8868\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 search \u5b57\u6bb5\u4e2d\uff0c\u5e76\u5220\u9664\u91cd\u590d\u7684\u57df\u540d\u3002Kubernetes \u6700\u591a\u5141\u8bb8 6 \u4e2a\u641c\u7d22\u57df\u3002
                                                                          • Options\uff1aDNS \u7684\u914d\u7f6e\u9009\u9879\uff0c\u5176\u4e2d\u6bcf\u4e2a\u5bf9\u8c61\u53ef\u4ee5\u5177\u6709 name \u5c5e\u6027\uff08\u5fc5\u9700\uff09\u548c value \u5c5e\u6027\uff08\u53ef\u9009\uff09\u3002\u8be5\u5b57\u6bb5\u4e2d\u7684\u5185\u5bb9\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 options \u5b57\u6bb5\u4e2d\uff0cdnsConfig \u7684 options \u7684\u67d0\u4e9b\u9009\u9879\u5982\u679c\u4e0e\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684\u9009\u9879\u51b2\u7a81\uff0c\u5219\u4f1a\u88ab dnsConfig \u6240\u8986\u76d6\u3002
                                                                          • \u4e3b\u673a\u522b\u540d\uff1a\u4e3a\u4e3b\u673a\u8bbe\u7f6e\u7684\u522b\u540d\u3002

                                                                          • \u5347\u7ea7\u65b9\u5f0f\uff1a \u6eda\u52a8\u5347\u7ea7 \u6307\u9010\u6b65\u7528\u65b0\u7248\u672c\u7684\u5b9e\u4f8b\u66ff\u6362\u65e7\u7248\u672c\u7684\u5b9e\u4f8b\uff0c\u5347\u7ea7\u7684\u8fc7\u7a0b\u4e2d\uff0c\u4e1a\u52a1\u6d41\u91cf\u4f1a\u540c\u65f6\u8d1f\u8f7d\u5747\u8861\u5206\u5e03\u5230\u65b0\u8001\u7684\u5b9e\u4f8b\u4e0a\uff0c\u56e0\u6b64\u4e1a\u52a1\u4e0d\u4f1a\u4e2d\u65ad\u3002 \u91cd\u5efa\u5347\u7ea7 \u6307\u5148\u5220\u9664\u8001\u7248\u672c\u7684\u8d1f\u8f7d\u5b9e\u4f8b\uff0c\u518d\u5b89\u88c5\u6307\u5b9a\u7684\u65b0\u7248\u672c\uff0c\u5347\u7ea7\u8fc7\u7a0b\u4e2d\u4e1a\u52a1\u4f1a\u4e2d\u65ad\u3002
                                                                          • \u6700\u5927\u4e0d\u53ef\u7528\uff1a\u6307\u5b9a\u8d1f\u8f7d\u66f4\u65b0\u8fc7\u7a0b\u4e2d\u4e0d\u53ef\u7528 Pod \u7684\u6700\u5927\u503c\u6216\u6bd4\u7387\uff0c\u9ed8\u8ba4 25%\u3002\u5982\u679c\u7b49\u4e8e\u5b9e\u4f8b\u6570\u6709\u670d\u52a1\u4e2d\u65ad\u7684\u98ce\u9669\u3002
                                                                          • \u6700\u5927\u5cf0\u503c\uff1a\u66f4\u65b0 Pod \u7684\u8fc7\u7a0b\u4e2d Pod \u603b\u6570\u8d85\u8fc7 Pod \u671f\u671b\u526f\u672c\u6570\u90e8\u5206\u7684\u6700\u5927\u503c\u6216\u6bd4\u7387\u3002\u9ed8\u8ba4 25%\u3002
                                                                          • \u6700\u5927\u4fdd\u7559\u7248\u672c\u6570\uff1a\u8bbe\u7f6e\u7248\u672c\u56de\u6eda\u65f6\u4fdd\u7559\u7684\u65e7\u7248\u672c\u6570\u91cf\u3002\u9ed8\u8ba4 10\u3002
                                                                          • Pod \u53ef\u7528\u6700\u77ed\u65f6\u95f4\uff1aPod \u5c31\u7eea\u7684\u6700\u77ed\u65f6\u95f4\uff0c\u53ea\u6709\u8d85\u51fa\u8fd9\u4e2a\u65f6\u95f4 Pod \u624d\u88ab\u8ba4\u4e3a\u53ef\u7528\uff0c\u9ed8\u8ba4 0 \u79d2\u3002
                                                                          • \u5347\u7ea7\u6700\u5927\u6301\u7eed\u65f6\u95f4\uff1a\u5982\u679c\u8d85\u8fc7\u6240\u8bbe\u7f6e\u7684\u65f6\u95f4\u4ecd\u672a\u90e8\u7f72\u6210\u529f\uff0c\u5219\u5c06\u8be5\u8d1f\u8f7d\u6807\u8bb0\u4e3a\u5931\u8d25\u3002\u9ed8\u8ba4 600 \u79d2\u3002
                                                                          • \u7f29\u5bb9\u65f6\u95f4\u7a97\uff1a\u8d1f\u8f7d\u505c\u6b62\u524d\u547d\u4ee4\u7684\u6267\u884c\u65f6\u95f4\u7a97\uff080-9,999\u79d2\uff09\uff0c\u9ed8\u8ba4 30 \u79d2\u3002

                                                                          • \u5bb9\u5fcd\u65f6\u95f4\uff1a\u8d1f\u8f7d\u5b9e\u4f8b\u6240\u5728\u7684\u8282\u70b9\u4e0d\u53ef\u7528\u65f6\uff0c\u5c06\u8d1f\u8f7d\u5b9e\u4f8b\u91cd\u65b0\u8c03\u5ea6\u5230\u5176\u5b83\u53ef\u7528\u8282\u70b9\u7684\u65f6\u95f4\uff0c\u9ed8\u8ba4\u4e3a 300 \u79d2\u3002
                                                                          • \u8282\u70b9\u4eb2\u548c\u6027\uff1a\u6839\u636e\u8282\u70b9\u4e0a\u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002
                                                                          • \u5de5\u4f5c\u8d1f\u8f7d\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                                                                          • \u5de5\u4f5c\u8d1f\u8f7d\u53cd\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u4e0d\u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002

                                                                          \u5177\u4f53\u8be6\u60c5\u8bf7\u53c2\u8003\u8c03\u5ea6\u7b56\u7565\u3002

                                                                          \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u548c\u5bb9\u5668\u7ec4\u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-deployment.html#yaml","title":"YAML \u521b\u5efa","text":"

                                                                          \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\u3002

                                                                          1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                                          2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                                          3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                                                                          \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u65e0\u72b6\u6001\u8d1f\u8f7d\u7684 YAML \u793a\u4f8b
                                                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: nginx-deployment\nspec:\n  selector:\n    matchLabels:\n      app: nginx\n  replicas: 2 # \u544a\u77e5 Deployment \u8fd0\u884c 2 \u4e2a\u4e0e\u8be5\u6a21\u677f\u5339\u914d\u7684 Pod\n  template:\n    metadata:\n      labels:\n        app: nginx\n    spec:\n      containers:\n      - name: nginx\n        image: nginx:1.14.2\n        ports:\n        - containerPort: 80\n
                                                                          "},{"location":"end-user/kpanda/workloads/create-job.html","title":"\u521b\u5efa\u4efb\u52a1\uff08Job\uff09","text":"

                                                                          \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u4efb\u52a1\uff08Job\uff09\u3002

                                                                          \u4efb\u52a1\uff08Job\uff09\u9002\u7528\u4e8e\u6267\u884c\u4e00\u6b21\u6027\u4efb\u52a1\u3002Job \u4f1a\u521b\u5efa\u4e00\u4e2a\u6216\u591a\u4e2a Pod\uff0cJob \u4f1a\u4e00\u76f4\u91cd\u65b0\u5c1d\u8bd5\u6267\u884c Pod\uff0c\u76f4\u5230\u6210\u529f\u7ec8\u6b62\u7684 Pod \u8fbe\u5230\u4e00\u5b9a\u6570\u91cf\u3002\u6210\u529f\u7ec8\u6b62\u7684 Pod \u8fbe\u5230\u6307\u5b9a\u7684\u6570\u91cf\u540e\uff0cJob \u4e5f\u968f\u4e4b\u7ed3\u675f\u3002\u5220\u9664 Job \u65f6\u4f1a\u4e00\u540c\u6e05\u9664\u8be5 Job \u521b\u5efa\u7684\u6240\u6709 Pod\u3002\u6682\u505c Job \u65f6\u5220\u9664\u8be5 Job \u4e2d\u7684\u6240\u6709\u6d3b\u8dc3 Pod\uff0c\u76f4\u5230 Job \u88ab\u7ee7\u7eed\u6267\u884c\u3002\u6709\u5173\u4efb\u52a1\uff08Job\uff09\u7684\u66f4\u591a\u4ecb\u7ecd\uff0c\u53ef\u53c2\u8003Job\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-job.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"
                                                                          • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                                          • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                                                                          • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                                          • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-job.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                                                                          \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u4efb\u52a1\u3002

                                                                          1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                                          2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u4efb\u52a1 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                                                                          3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                                                                            \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u4efb\u52a1 \u5217\u8868\u3002\u70b9\u51fb\u5217\u8868\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u4efb\u52a1\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u91cd\u542f\u7b49\u64cd\u4f5c\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-job.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"

                                                                          \u5728 \u521b\u5efa\u4efb\u52a1 \u9875\u9762\u4e2d\uff0c\u6839\u636e\u4e0b\u8868\u8f93\u5165\u57fa\u672c\u4fe1\u606f\u540e\uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                                          • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                                                          • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u4efb\u52a1\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                                                          • \u5b9e\u4f8b\u6570\uff1a\u8f93\u5165\u5de5\u4f5c\u8d1f\u8f7d\u7684 Pod \u5b9e\u4f8b\u6570\u91cf\u3002\u9ed8\u8ba4\u521b\u5efa 1 \u4e2a Pod \u5b9e\u4f8b\u3002
                                                                          • \u63cf\u8ff0\uff1a\u8f93\u5165\u5de5\u4f5c\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u91cf\u5e94\u4e0d\u8d85\u8fc7 512 \u4e2a\u3002
                                                                          "},{"location":"end-user/kpanda/workloads/create-job.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                                                                          \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                                                          \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                                                                          \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                                                                          \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                                                                          • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                                                                          • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                                                                          • \u955c\u50cf\uff1a
                                                                            • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u63a5\u5165\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                                                                            • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                                                                            • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                                                                            • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                                                                          • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                                                                          • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                                                                          • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                                                                            • \u6574\u5361\u6a21\u5f0f\uff1a
                                                                              • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                            • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                                                                              • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                              • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                                                                              • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                                                                              • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                                                                              • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                                                                              • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                                                                            • Mig \u6a21\u5f0f
                                                                              • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                                                                              • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                                                                          \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                                                                          \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                                                                          \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\uff0c\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                                                                          \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                                                                          \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                                                                          \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-job.html#_5","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                                                                          \u9ad8\u7ea7\u914d\u7f6e\u5305\u62ec\u4efb\u52a1\u8bbe\u7f6e\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u4e24\u90e8\u5206\u3002

                                                                          \u4efb\u52a1\u8bbe\u7f6e\u6807\u7b7e\u4e0e\u6ce8\u89e3

                                                                          • \u5e76\u884c\u6570\uff1a\u4efb\u52a1\u6267\u884c\u8fc7\u7a0b\u4e2d\u5141\u8bb8\u540c\u65f6\u521b\u5efa\u7684\u6700\u5927 Pod \u6570\uff0c\u5e76\u884c\u6570\u5e94\u4e0d\u5927\u4e8e Pod \u603b\u6570\u3002\u9ed8\u8ba4\u4e3a 1\u3002
                                                                          • \u8d85\u65f6\u65f6\u95f4\uff1a\u8d85\u51fa\u8be5\u65f6\u95f4\u65f6\uff0c\u4efb\u52a1\u4f1a\u88ab\u6807\u8bc6\u4e3a\u6267\u884c\u5931\u8d25\uff0c\u4efb\u52a1\u4e0b\u7684\u6240\u6709 Pod \u90fd\u4f1a\u88ab\u5220\u9664\u3002\u4e3a\u7a7a\u65f6\u8868\u793a\u4e0d\u8bbe\u7f6e\u8d85\u65f6\u65f6\u95f4\u3002
                                                                          • \u91cd\u542f\u7b56\u7565\uff1a\u8bbe\u7f6e\u5931\u8d25\u65f6\u662f\u5426\u91cd\u542f Pod\u3002

                                                                          \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u4f8b Pod \u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-job.html#yaml","title":"YAML \u521b\u5efa","text":"

                                                                          \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u4efb\u52a1\u3002

                                                                          1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                                          2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u4efb\u52a1 \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                                          3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                                                                          \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u4efb\u52a1\u7684 YAML \u793a\u4f8b
                                                                          kind: Job\napiVersion: batch/v1\nmetadata:\n  name: demo\n  namespace: default\n  uid: a9708239-0358-4aa1-87d3-a092c080836e\n  resourceVersion: '92751876'\n  generation: 1\n  creationTimestamp: '2022-12-26T10:52:22Z'\n  labels:\n    app: demo\n    controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n    job-name: demo\n  annotations:\n    revisions: >-\n      {\"1\":{\"status\":\"running\",\"uid\":\"a9708239-0358-4aa1-87d3-a092c080836e\",\"start-time\":\"2022-12-26T10:52:22Z\",\"completion-time\":\"0001-01-01T00:00:00Z\"}}\nspec:\n  parallelism: 1\n  backoffLimit: 6\n  selector:\n    matchLabels:\n      controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: demo\n        controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n        job-name: demo\n    spec:\n      containers:\n        - name: container-4\n          image: nginx\n          resources:\n            limits:\n              cpu: 250m\n              memory: 512Mi\n            requests:\n              cpu: 250m\n              memory: 512Mi\n          lifecycle: {}\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            privileged: false\n      restartPolicy: Never\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      securityContext: {}\n      schedulerName: default-scheduler\n  completionMode: NonIndexed\n  suspend: false\nstatus:\n  startTime: '2022-12-26T10:52:22Z'\n  active: 1\n
                                                                          "},{"location":"end-user/kpanda/workloads/create-statefulset.html","title":"\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\uff08StatefulSet\uff09","text":"

                                                                          \u672c\u6587\u4ecb\u7ecd\u5982\u4f55\u901a\u8fc7\u955c\u50cf\u548c YAML \u6587\u4ef6\u4e24\u79cd\u65b9\u5f0f\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\uff08StatefulSet\uff09\u3002

                                                                          \u6709\u72b6\u6001\u8d1f\u8f7d\uff08StatefulSet\uff09\u662f Kubernetes \u4e2d\u7684\u4e00\u79cd\u5e38\u89c1\u8d44\u6e90\uff0c\u548c\u65e0\u72b6\u6001\u8d1f\u8f7d\uff08Deployment\uff09\u7c7b\u4f3c\uff0c\u4e3b\u8981\u7528\u4e8e\u7ba1\u7406 Pod \u96c6\u5408\u7684\u90e8\u7f72\u548c\u4f38\u7f29\u3002\u4e8c\u8005\u7684\u4e3b\u8981\u533a\u522b\u5728\u4e8e\uff0cDeployment \u662f\u65e0\u72b6\u6001\u7684\uff0c\u4e0d\u4fdd\u5b58\u6570\u636e\uff0c\u800c StatefulSet \u662f\u6709\u72b6\u6001\u7684\uff0c\u4e3b\u8981\u7528\u4e8e\u7ba1\u7406\u6709\u72b6\u6001\u5e94\u7528\u3002\u6b64\u5916\uff0cStatefulSet \u4e2d\u7684 Pod \u5177\u6709\u6c38\u4e45\u4e0d\u53d8\u7684 ID\uff0c\u4fbf\u4e8e\u5728\u5339\u914d\u5b58\u50a8\u5377\u65f6\u8bc6\u522b\u5bf9\u5e94\u7684 Pod\u3002

                                                                          \u901a\u8fc7\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\uff0c\u53ef\u4ee5\u57fa\u4e8e\u76f8\u5e94\u7684\u89d2\u8272\u6743\u9650\u8f7b\u677e\u7ba1\u7406\u591a\u4e91\u591a\u96c6\u7fa4\u4e0a\u7684\u5de5\u4f5c\u8d1f\u8f7d\uff0c\u5305\u62ec\u5bf9\u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u7684\u521b\u5efa\u3001\u66f4\u65b0\u3001\u5220\u9664\u3001\u5f39\u6027\u6269\u7f29\u3001\u91cd\u542f\u3001\u7248\u672c\u56de\u9000\u7b49\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-statefulset.html#_1","title":"\u524d\u63d0\u6761\u4ef6","text":"

                                                                          \u5728\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\u4e4b\u524d\uff0c\u9700\u8981\u6ee1\u8db3\u4ee5\u4e0b\u524d\u63d0\u6761\u4ef6\uff1a

                                                                          • \u5728\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4e2d\u63a5\u5165 Kubernetes \u96c6\u7fa4\u6216\u8005\u7ba1\u7406\u5458\u5df2\u4e3a\u7528\u6237\u521b\u5efa\u4e86\u96c6\u7fa4\uff0c\u4e14\u80fd\u591f\u8bbf\u95ee\u96c6\u7fa4\u7684 UI \u754c\u9762\u3002

                                                                          • \u521b\u5efa\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\u548c\u7528\u6237\u3002

                                                                          • \u5f53\u524d\u64cd\u4f5c\u7528\u6237\u5e94\u5177\u6709 NS Editor \u6216\u66f4\u9ad8\u6743\u9650\uff0c\u8be6\u60c5\u53ef\u53c2\u8003\u547d\u540d\u7a7a\u95f4\u6388\u6743\u3002

                                                                          • \u5355\u4e2a\u5b9e\u4f8b\u4e2d\u6709\u591a\u4e2a\u5bb9\u5668\u65f6\uff0c\u8bf7\u786e\u4fdd\u5bb9\u5668\u4f7f\u7528\u7684\u7aef\u53e3\u4e0d\u51b2\u7a81\uff0c\u5426\u5219\u90e8\u7f72\u4f1a\u5931\u6548\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-statefulset.html#_2","title":"\u955c\u50cf\u521b\u5efa","text":"

                                                                          \u53c2\u8003\u4ee5\u4e0b\u6b65\u9aa4\uff0c\u4f7f\u7528\u955c\u50cf\u521b\u5efa\u4e00\u4e2a\u6709\u72b6\u6001\u8d1f\u8f7d\u3002

                                                                          1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u3002

                                                                          2. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u6709\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u53f3\u4e0a\u89d2 \u955c\u50cf\u521b\u5efa \u6309\u94ae\u3002

                                                                          3. \u4f9d\u6b21\u586b\u5199\u57fa\u672c\u4fe1\u606f\u3001\u5bb9\u5668\u914d\u7f6e\u3001\u670d\u52a1\u914d\u7f6e\u3001\u9ad8\u7ea7\u914d\u7f6e\u540e\uff0c\u5728\u9875\u9762\u53f3\u4e0b\u89d2\u70b9\u51fb \u786e\u5b9a \u5b8c\u6210\u521b\u5efa\u3002

                                                                            \u7cfb\u7edf\u5c06\u81ea\u52a8\u8fd4\u56de \u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d \u5217\u8868\uff0c\u7b49\u5f85\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u53d8\u4e3a \u8fd0\u884c\u4e2d \u3002\u5982\u679c\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u51fa\u73b0\u5f02\u5e38\uff0c\u8bf7\u67e5\u770b\u5177\u4f53\u5f02\u5e38\u4fe1\u606f\uff0c\u53ef\u53c2\u8003\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001\u3002

                                                                            \u70b9\u51fb\u65b0\u5efa\u5de5\u4f5c\u8d1f\u8f7d\u5217\u53f3\u4fa7\u7684 \u2507 \uff0c\u53ef\u4ee5\u5bf9\u5de5\u4f5c\u8d1f\u8f7d\u6267\u884c\u6267\u884c\u66f4\u65b0\u3001\u5220\u9664\u3001\u5f39\u6027\u6269\u7f29\u3001\u91cd\u542f\u3001\u7248\u672c\u56de\u9000\u7b49\u64cd\u4f5c\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-statefulset.html#_3","title":"\u57fa\u672c\u4fe1\u606f","text":"
                                                                          • \u8d1f\u8f7d\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u53ea\u80fd\u5305\u542b\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\uff0c\u4e14\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 deployment-01\u3002\u540c\u4e00\u547d\u540d\u7a7a\u95f4\u5185\u540c\u4e00\u7c7b\u578b\u5de5\u4f5c\u8d1f\u8f7d\u7684\u540d\u79f0\u4e0d\u5f97\u91cd\u590d\uff0c\u800c\u4e14\u8d1f\u8f7d\u540d\u79f0\u5728\u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u597d\u4e4b\u540e\u4e0d\u53ef\u66f4\u6539\u3002
                                                                          • \u547d\u540d\u7a7a\u95f4\uff1a\u9009\u62e9\u5c06\u65b0\u5efa\u7684\u8d1f\u8f7d\u90e8\u7f72\u5728\u54ea\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u9ed8\u8ba4\u4f7f\u7528 default \u547d\u540d\u7a7a\u95f4\u3002\u627e\u4e0d\u5230\u6240\u9700\u7684\u547d\u540d\u7a7a\u95f4\u65f6\u53ef\u4ee5\u6839\u636e\u9875\u9762\u63d0\u793a\u53bb\u521b\u5efa\u65b0\u7684\u547d\u540d\u7a7a\u95f4\u3002
                                                                          • \u5b9e\u4f8b\u6570\uff1a\u8f93\u5165\u8d1f\u8f7d\u7684 Pod \u5b9e\u4f8b\u6570\u91cf\uff0c\u9ed8\u8ba4\u521b\u5efa 1 \u4e2a Pod \u5b9e\u4f8b\u3002
                                                                          • \u63cf\u8ff0\uff1a\u8f93\u5165\u8d1f\u8f7d\u7684\u63cf\u8ff0\u4fe1\u606f\uff0c\u5185\u5bb9\u81ea\u5b9a\u4e49\u3002\u5b57\u7b26\u6570\u4e0d\u8d85\u8fc7 512\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-statefulset.html#_4","title":"\u5bb9\u5668\u914d\u7f6e","text":"

                                                                          \u5bb9\u5668\u914d\u7f6e\u5206\u4e3a\u57fa\u672c\u4fe1\u606f\u3001\u751f\u547d\u5468\u671f\u3001\u5065\u5eb7\u68c0\u67e5\u3001\u73af\u5883\u53d8\u91cf\u3001\u6570\u636e\u5b58\u50a8\u3001\u5b89\u5168\u8bbe\u7f6e\u516d\u90e8\u5206\uff0c\u70b9\u51fb\u4e0b\u65b9\u7684\u76f8\u5e94\u9875\u7b7e\u53ef\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                                                          \u5bb9\u5668\u914d\u7f6e\u4ec5\u9488\u5bf9\u5355\u4e2a\u5bb9\u5668\u8fdb\u884c\u914d\u7f6e\uff0c\u5982\u9700\u5728\u4e00\u4e2a\u5bb9\u5668\u7ec4\u4e2d\u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\uff0c\u53ef\u70b9\u51fb\u53f3\u4fa7\u7684 + \u6dfb\u52a0\u591a\u4e2a\u5bb9\u5668\u3002

                                                                          \u57fa\u672c\u4fe1\u606f\uff08\u5fc5\u586b\uff09\u751f\u547d\u5468\u671f\uff08\u9009\u586b\uff09\u5065\u5eb7\u68c0\u67e5\uff08\u9009\u586b\uff09\u73af\u5883\u53d8\u91cf\uff08\u9009\u586b\uff09\u6570\u636e\u5b58\u50a8\uff08\u9009\u586b\uff09\u5b89\u5168\u8bbe\u7f6e\uff08\u9009\u586b\uff09

                                                                          \u5728\u914d\u7f6e\u5bb9\u5668\u76f8\u5173\u53c2\u6570\u65f6\uff0c\u5fc5\u987b\u6b63\u786e\u586b\u5199\u5bb9\u5668\u7684\u540d\u79f0\u3001\u955c\u50cf\u53c2\u6570\uff0c\u5426\u5219\u5c06\u65e0\u6cd5\u8fdb\u5165\u4e0b\u4e00\u6b65\u3002\u53c2\u8003\u4ee5\u4e0b\u8981\u6c42\u586b\u5199\u914d\u7f6e\u540e\uff0c\u70b9\u51fb \u786e\u8ba4 \u3002

                                                                          • \u5bb9\u5668\u7c7b\u578b\uff1a\u9ed8\u8ba4\u4e3a\u5de5\u4f5c\u5bb9\u5668\u3002\u6709\u5173\u521d\u59cb\u5316\u5bb9\u5668\uff0c\u53c2\u89c1 k8s \u5b98\u65b9\u6587\u6863\u3002
                                                                          • \u5bb9\u5668\u540d\u79f0\uff1a\u6700\u591a\u5305\u542b 63 \u4e2a\u5b57\u7b26\uff0c\u652f\u6301\u5c0f\u5199\u5b57\u6bcd\u3001\u6570\u5b57\u53ca\u5206\u9694\u7b26\uff08\u201c-\u201d\uff09\u3002\u5fc5\u987b\u4ee5\u5c0f\u5199\u5b57\u6bcd\u6216\u6570\u5b57\u5f00\u5934\u53ca\u7ed3\u5c3e\uff0c\u4f8b\u5982 nginx-01\u3002
                                                                          • \u955c\u50cf\uff1a
                                                                            • \u5bb9\u5668\u955c\u50cf\uff1a\u4ece\u5217\u8868\u4e2d\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u955c\u50cf\u3002\u8f93\u5165\u955c\u50cf\u540d\u79f0\u65f6\uff0c\u9ed8\u8ba4\u4ece\u5b98\u65b9\u7684 DockerHub \u62c9\u53d6\u955c\u50cf\u3002 \u63a5\u5165\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u7684\u955c\u50cf\u4ed3\u5e93\u6a21\u5757\u540e\uff0c\u53ef\u4ee5\u70b9\u51fb\u53f3\u4fa7\u7684 \u9009\u62e9\u955c\u50cf \u6309\u94ae\u6765\u9009\u62e9\u955c\u50cf\u3002
                                                                            • \u955c\u50cf\u7248\u672c\uff1a\u4ece\u4e0b\u62c9\u5217\u8868\u9009\u62e9\u4e00\u4e2a\u5408\u9002\u7684\u7248\u672c\u3002
                                                                            • \u955c\u50cf\u62c9\u53d6\u7b56\u7565\uff1a\u52fe\u9009 \u603b\u662f\u62c9\u53d6\u955c\u50cf \u540e\uff0c\u8d1f\u8f7d\u6bcf\u6b21\u91cd\u542f/\u5347\u7ea7\u65f6\u90fd\u4f1a\u4ece\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u955c\u50cf\u3002 \u5982\u679c\u4e0d\u52fe\u9009\uff0c\u5219\u53ea\u62c9\u53d6\u672c\u5730\u955c\u50cf\uff0c\u53ea\u6709\u5f53\u955c\u50cf\u5728\u672c\u5730\u4e0d\u5b58\u5728\u65f6\u624d\u4ece\u955c\u50cf\u4ed3\u5e93\u91cd\u65b0\u62c9\u53d6\u3002 \u66f4\u591a\u8be6\u60c5\u53ef\u53c2\u8003\u955c\u50cf\u62c9\u53d6\u7b56\u7565\u3002
                                                                            • \u955c\u50cf\u4ed3\u5e93\u5bc6\u94a5\uff1a\u53ef\u9009\u3002\u5982\u679c\u76ee\u6807\u4ed3\u5e93\u9700\u8981 Secret \u624d\u80fd\u8bbf\u95ee\uff0c\u9700\u8981\u5148\u53bb\u521b\u5efa\u4e00\u4e2a\u5bc6\u94a5\u3002
                                                                          • \u7279\u6743\u5bb9\u5668\uff1a\u5bb9\u5668\u9ed8\u8ba4\u4e0d\u53ef\u4ee5\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u4efb\u4f55\u8bbe\u5907\uff0c\u5f00\u542f\u7279\u6743\u5bb9\u5668\u540e\uff0c\u5bb9\u5668\u5373\u53ef\u8bbf\u95ee\u5bbf\u4e3b\u673a\u4e0a\u7684\u6240\u6709\u8bbe\u5907\uff0c\u4eab\u6709\u5bbf\u4e3b\u673a\u4e0a\u7684\u8fd0\u884c\u8fdb\u7a0b\u7684\u6240\u6709\u6743\u9650\u3002
                                                                          • CPU/\u5185\u5b58\u914d\u989d\uff1aCPU/\u5185\u5b58\u8d44\u6e90\u7684\u8bf7\u6c42\u503c\uff08\u9700\u8981\u4f7f\u7528\u7684\u6700\u5c0f\u8d44\u6e90\uff09\u548c\u9650\u5236\u503c\uff08\u5141\u8bb8\u4f7f\u7528\u7684\u6700\u5927\u8d44\u6e90\uff09\u3002\u8bf7\u6839\u636e\u9700\u8981\u4e3a\u5bb9\u5668\u914d\u7f6e\u8d44\u6e90\uff0c\u907f\u514d\u8d44\u6e90\u6d6a\u8d39\u548c\u56e0\u5bb9\u5668\u8d44\u6e90\u8d85\u989d\u5bfc\u81f4\u7cfb\u7edf\u6545\u969c\u3002\u9ed8\u8ba4\u503c\u5982\u56fe\u6240\u793a\u3002
                                                                          • GPU \u914d\u7f6e\uff1a\u4e3a\u5bb9\u5668\u914d\u7f6e GPU \u7528\u91cf\uff0c \u4ec5\u652f\u6301\u8f93\u5165\u6b63\u6574\u6570\u3002
                                                                            • \u6574\u5361\u6a21\u5f0f\uff1a
                                                                              • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5c06\u5360\u7528\u6574\u5f20\u7269\u7406 GPU\u5361\u3002\u540c\u65f6\u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                            • \u865a\u62df\u5316\u6a21\u5f0f\uff1a
                                                                              • \u7269\u7406\u5361\u6570\u91cf\uff1a\u5bb9\u5668\u80fd\u591f\u4f7f\u7528\u7684\u7269\u7406 GPU \u5361\u6570\u91cf\uff0c \u7269\u7406\u5361\u6570\u91cf\u9700\u8981 \u2264 \u5355\u8282\u70b9\u63d2\u5165\u7684\u6700\u5927 GPU \u5361\u6570\u3002
                                                                              • GPU \u7b97\u529b\uff1a\u6bcf\u5f20\u7269\u7406 GPU \u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u7b97\u529b\u767e\u5206\u6bd4\uff0c\u6700\u591a\u4e3a100%\u3002
                                                                              • \u663e\u5b58\uff1a\u6bcf\u5f20\u7269\u7406\u5361\u4e0a\u9700\u8981\u4f7f\u7528\u7684\u663e\u5b58\u6570\u91cf\u3002
                                                                              • \u8c03\u5ea6\u7b56\u7565\uff08Binpack / Spread\uff09\uff1a\u652f\u6301\u57fa\u4e8e GPU \u5361\u548c\u57fa\u4e8e\u8282\u70b9\u7684\u4e24\u79cd\u7ef4\u5ea6\u7684\u8c03\u5ea6\u7b56\u7565\u3002Binpack \u662f\u96c6\u4e2d\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u540c\u4e00\u4e2a\u8282\u70b9\u7684\u540c\u4e00\u5f20 GPU \u5361\u4e0a\uff1bSpread \u662f\u5206\u6563\u5f0f\u8c03\u5ea6\u7b56\u7565\uff0c\u4f18\u5148\u5c06\u5bb9\u5668\u8c03\u5ea6\u5230\u4e0d\u540c\u8282\u70b9\u7684\u4e0d\u540c GPU \u5361\u4e0a\uff0c\u6839\u636e\u5b9e\u9645\u573a\u666f\u53ef\u7ec4\u5408\u4f7f\u7528\u3002\uff08\u5f53\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u4e0e\u96c6\u7fa4\u7ea7\u522b\u7684 Binpack / Spread \u8c03\u5ea6\u7b56\u7565\u51b2\u7a81\u65f6\uff0c\u7cfb\u7edf\u4f18\u5148\u4f7f\u7528\u5de5\u4f5c\u8d1f\u8f7d\u7ea7\u522b\u7684\u8c03\u5ea6\u7b56\u7565\uff09\u3002
                                                                              • \u4efb\u52a1\u4f18\u5148\u7ea7\uff1aGPU \u7b97\u529b\u4f1a\u4f18\u5148\u4f9b\u7ed9\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u4f7f\u7528\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u51cf\u5c11\u751a\u81f3\u6682\u505c\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u76f4\u5230\u9ad8\u4f18\u5148\u7ea7\u4efb\u52a1\u7ed3\u675f\uff0c\u666e\u901a\u4efb\u52a1\u4f1a\u91cd\u65b0\u7ee7\u7eed\u4f7f\u7528 GPU \u7b97\u529b\uff0c\u5e38\u7528\u4e8e\u5728\u79bb\u7ebf\u6df7\u90e8\u573a\u666f\u3002
                                                                              • \u6307\u5b9a\u578b\u53f7\uff1a\u5c06\u5de5\u4f5c\u8d1f\u8f7d\u8c03\u5ea6\u5230\u6307\u5b9a\u578b\u53f7\u7684 GPU \u5361\u4e0a\uff0c\u9002\u7528\u4e8e\u5bf9 GPU \u578b\u53f7\u6709\u7279\u6b8a\u8981\u6c42\u7684\u573a\u666f\u3002
                                                                            • Mig \u6a21\u5f0f
                                                                              • \u89c4\u683c\uff1a\u5207\u5206\u540e\u7684\u7269\u7406 GPU \u5361\u89c4\u683c\u3002
                                                                              • \u6570\u91cf\uff1a\u4f7f\u7528\u8be5\u89c4\u683c\u7684\u6570\u91cf\u3002

                                                                          \u8bbe\u7f6e GPU \u4e4b\u524d\uff0c\u9700\u8981\u7ba1\u7406\u5458\u9884\u5148\u5728\u96c6\u7fa4\u4e0a\u5b89\u88c5 GPU Operator \u548c nvidia-vgpu\uff08\u4ec5 vGPU \u6a21\u5f0f\u9700\u8981\u5b89\u88c5\uff09\uff0c\u5e76\u5728\u96c6\u7fa4\u8bbe\u7f6e\u4e2d\u5f00\u542f GPU \u7279\u6027\u3002

                                                                          \u8bbe\u7f6e\u5bb9\u5668\u542f\u52a8\u65f6\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u9700\u8981\u6267\u884c\u7684\u547d\u4ee4\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u751f\u547d\u5468\u671f\u914d\u7f6e\u3002

                                                                          \u7528\u4e8e\u5224\u65ad\u5bb9\u5668\u548c\u5e94\u7528\u7684\u5065\u5eb7\u72b6\u6001\u3002\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u914d\u7f6e\u3002

                                                                          \u914d\u7f6e Pod \u5185\u7684\u5bb9\u5668\u53c2\u6570\uff0c\u4e3a Pod \u6dfb\u52a0\u73af\u5883\u53d8\u91cf\u6216\u4f20\u9012\u914d\u7f6e\u7b49\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u73af\u5883\u53d8\u91cf\u914d\u7f6e\u3002

                                                                          \u914d\u7f6e\u5bb9\u5668\u6302\u8f7d\u6570\u636e\u5377\u548c\u6570\u636e\u6301\u4e45\u5316\u7684\u8bbe\u7f6e\u3002\u8be6\u60c5\u53ef\u53c2\u8003\u5bb9\u5668\u6570\u636e\u5b58\u50a8\u914d\u7f6e\u3002

                                                                          \u901a\u8fc7 Linux \u5185\u7f6e\u7684\u8d26\u53f7\u6743\u9650\u9694\u79bb\u673a\u5236\u6765\u5bf9\u5bb9\u5668\u8fdb\u884c\u5b89\u5168\u9694\u79bb\u3002\u60a8\u53ef\u4ee5\u901a\u8fc7\u4f7f\u7528\u4e0d\u540c\u6743\u9650\u7684\u8d26\u53f7 UID\uff08\u6570\u5b57\u8eab\u4efd\u6807\u8bb0\uff09\u6765\u9650\u5236\u5bb9\u5668\u7684\u6743\u9650\u3002\u4f8b\u5982\uff0c\u8f93\u5165 0 \u8868\u793a\u4f7f\u7528 root \u8d26\u53f7\u7684\u6743\u9650\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-statefulset.html#_5","title":"\u670d\u52a1\u914d\u7f6e","text":"

                                                                          \u4e3a\u6709\u72b6\u6001\u8d1f\u8f7d\u914d\u7f6e\u670d\u52a1\uff08Service\uff09\uff0c\u4f7f\u6709\u72b6\u6001\u8d1f\u8f7d\u80fd\u591f\u88ab\u5916\u90e8\u8bbf\u95ee\u3002

                                                                          1. \u70b9\u51fb \u521b\u5efa\u670d\u52a1 \u6309\u94ae\u3002

                                                                          2. \u53c2\u8003\u521b\u5efa\u670d\u52a1\uff0c\u914d\u7f6e\u670d\u52a1\u53c2\u6570\u3002

                                                                          3. \u70b9\u51fb \u786e\u5b9a \uff0c\u70b9\u51fb \u4e0b\u4e00\u6b65 \u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-statefulset.html#_6","title":"\u9ad8\u7ea7\u914d\u7f6e","text":"

                                                                          \u9ad8\u7ea7\u914d\u7f6e\u5305\u62ec\u8d1f\u8f7d\u7684\u7f51\u7edc\u914d\u7f6e\u3001\u5347\u7ea7\u7b56\u7565\u3001\u8c03\u5ea6\u7b56\u7565\u3001\u6807\u7b7e\u4e0e\u6ce8\u89e3\u56db\u90e8\u5206\uff0c\u53ef\u70b9\u51fb\u4e0b\u65b9\u7684\u9875\u7b7e\u67e5\u770b\u5404\u90e8\u5206\u7684\u914d\u7f6e\u8981\u6c42\u3002

                                                                          \u7f51\u7edc\u914d\u7f6e\u5347\u7ea7\u7b56\u7565\u5bb9\u5668\u7ba1\u7406\u7b56\u7565\u8c03\u5ea6\u7b56\u7565\u6807\u7b7e\u4e0e\u6ce8\u89e3
                                                                          • \u5982\u5728\u96c6\u7fa4\u4e2d\u90e8\u7f72\u4e86 SpiderPool \u548c Multus \u7ec4\u4ef6\uff0c\u5219\u53ef\u4ee5\u5728\u7f51\u7edc\u914d\u7f6e\u4e2d\u914d\u7f6e\u5bb9\u5668\u7f51\u5361\u3002

                                                                          • DNS \u914d\u7f6e\uff1a\u5e94\u7528\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u4f1a\u51fa\u73b0\u5197\u4f59\u7684 DNS \u67e5\u8be2\u3002Kubernetes \u4e3a\u5e94\u7528\u63d0\u4f9b\u4e86\u4e0e DNS \u76f8\u5173\u7684\u914d\u7f6e\u9009\u9879\uff0c\u80fd\u591f\u5728\u67d0\u4e9b\u573a\u666f\u4e0b\u6709\u6548\u5730\u51cf\u5c11\u5197\u4f59\u7684 DNS \u67e5\u8be2\uff0c\u63d0\u5347\u4e1a\u52a1\u5e76\u53d1\u91cf\u3002

                                                                          • DNS \u7b56\u7565

                                                                            • Default\uff1a\u4f7f\u5bb9\u5668\u4f7f\u7528 kubelet \u7684 --resolv-conf \u53c2\u6570\u6307\u5411\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u3002\u8be5\u914d\u7f6e\u53ea\u80fd\u89e3\u6790\u6ce8\u518c\u5230\u4e92\u8054\u7f51\u4e0a\u7684\u5916\u90e8\u57df\u540d\uff0c\u65e0\u6cd5\u89e3\u6790\u96c6\u7fa4\u5185\u90e8\u57df\u540d\uff0c\u4e14\u4e0d\u5b58\u5728\u65e0\u6548\u7684 DNS \u67e5\u8be2\u3002
                                                                            • ClusterFirstWithHostNet\uff1a\u5e94\u7528\u5bf9\u63a5\u4e3b\u673a\u7684\u57df\u540d\u6587\u4ef6\u3002
                                                                            • ClusterFirst\uff1a\u5e94\u7528\u5bf9\u63a5 Kube-DNS/CoreDNS\u3002
                                                                            • None\uff1aKubernetes v1.9\uff08Beta in v1.10\uff09\u4e2d\u5f15\u5165\u7684\u65b0\u9009\u9879\u503c\u3002\u8bbe\u7f6e\u4e3a None \u4e4b\u540e\uff0c\u5fc5\u987b\u8bbe\u7f6e dnsConfig\uff0c\u6b64\u65f6\u5bb9\u5668\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u5c06\u5b8c\u5168\u901a\u8fc7 dnsConfig \u7684\u914d\u7f6e\u6765\u751f\u6210\u3002
                                                                          • \u57df\u540d\u670d\u52a1\u5668\uff1a\u586b\u5199\u57df\u540d\u670d\u52a1\u5668\u7684\u5730\u5740\uff0c\u4f8b\u5982 10.6.175.20 \u3002

                                                                          • \u641c\u7d22\u57df\uff1a\u57df\u540d\u67e5\u8be2\u65f6\u7684 DNS \u641c\u7d22\u57df\u5217\u8868\u3002\u6307\u5b9a\u540e\uff0c\u63d0\u4f9b\u7684\u641c\u7d22\u57df\u5217\u8868\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 search \u5b57\u6bb5\u4e2d\uff0c\u5e76\u5220\u9664\u91cd\u590d\u7684\u57df\u540d\u3002Kubernetes \u6700\u591a\u5141\u8bb8 6 \u4e2a\u641c\u7d22\u57df\u3002
                                                                          • Options\uff1aDNS \u7684\u914d\u7f6e\u9009\u9879\uff0c\u5176\u4e2d\u6bcf\u4e2a\u5bf9\u8c61\u53ef\u4ee5\u5177\u6709 name \u5c5e\u6027\uff08\u5fc5\u9700\uff09\u548c value \u5c5e\u6027\uff08\u53ef\u9009\uff09\u3002\u8be5\u5b57\u6bb5\u4e2d\u7684\u5185\u5bb9\u5c06\u5408\u5e76\u5230\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684 options \u5b57\u6bb5\u4e2d\uff0cdnsConfig \u7684 options \u7684\u67d0\u4e9b\u9009\u9879\u5982\u679c\u4e0e\u57fa\u4e8e dnsPolicy \u751f\u6210\u7684\u57df\u540d\u89e3\u6790\u6587\u4ef6\u7684\u9009\u9879\u51b2\u7a81\uff0c\u5219\u4f1a\u88ab dnsConfig \u6240\u8986\u76d6\u3002
                                                                          • \u4e3b\u673a\u522b\u540d\uff1a\u4e3a\u4e3b\u673a\u8bbe\u7f6e\u7684\u522b\u540d\u3002

                                                                          • \u5347\u7ea7\u65b9\u5f0f\uff1a \u6eda\u52a8\u5347\u7ea7 \u6307\u9010\u6b65\u7528\u65b0\u7248\u672c\u7684\u5b9e\u4f8b\u66ff\u6362\u65e7\u7248\u672c\u7684\u5b9e\u4f8b\uff0c\u5347\u7ea7\u7684\u8fc7\u7a0b\u4e2d\uff0c\u4e1a\u52a1\u6d41\u91cf\u4f1a\u540c\u65f6\u8d1f\u8f7d\u5747\u8861\u5206\u5e03\u5230\u65b0\u8001\u7684\u5b9e\u4f8b\u4e0a\uff0c\u56e0\u6b64\u4e1a\u52a1\u4e0d\u4f1a\u4e2d\u65ad\u3002 \u91cd\u5efa\u5347\u7ea7 \u6307\u5148\u5220\u9664\u8001\u7248\u672c\u7684\u8d1f\u8f7d\u5b9e\u4f8b\uff0c\u518d\u5b89\u88c5\u6307\u5b9a\u7684\u65b0\u7248\u672c\uff0c\u5347\u7ea7\u8fc7\u7a0b\u4e2d\u4e1a\u52a1\u4f1a\u4e2d\u65ad\u3002
                                                                          • \u6700\u5927\u4fdd\u7559\u7248\u672c\u6570\uff1a\u8bbe\u7f6e\u7248\u672c\u56de\u6eda\u65f6\u4fdd\u7559\u7684\u65e7\u7248\u672c\u6570\u91cf\u3002\u9ed8\u8ba4 10\u3002
                                                                          • \u7f29\u5bb9\u65f6\u95f4\u7a97\uff1a\u8d1f\u8f7d\u505c\u6b62\u524d\u547d\u4ee4\u7684\u6267\u884c\u65f6\u95f4\u7a97\uff080-9,999\u79d2\uff09\uff0c\u9ed8\u8ba4 30 \u79d2\u3002

                                                                          Kubernetes v1.7 \u53ca\u5176\u4e4b\u540e\u7684\u7248\u672c\u53ef\u4ee5\u901a\u8fc7 .spec.podManagementPolicy \u8bbe\u7f6e Pod \u7684\u7ba1\u7406\u7b56\u7565\uff0c\u652f\u6301\u4ee5\u4e0b\u4e24\u79cd\u65b9\u5f0f\uff1a

                                                                          • \u6309\u5e8f\u7b56\u7565\uff08OrderedReady\uff09 \uff1a\u9ed8\u8ba4\u7684 Pod \u7ba1\u7406\u7b56\u7565\uff0c\u8868\u793a\u6309\u987a\u5e8f\u90e8\u7f72 Pod\uff0c\u53ea\u6709\u524d\u4e00\u4e2a Pod \u90e8\u7f72 \u6210\u529f\u5b8c\u6210\u540e\uff0c\u6709\u72b6\u6001\u8d1f\u8f7d\u624d\u4f1a\u5f00\u59cb\u90e8\u7f72\u4e0b\u4e00\u4e2a Pod\u3002\u5220\u9664 Pod \u65f6\u5219\u91c7\u7528\u9006\u5e8f\uff0c\u6700\u540e\u521b\u5efa\u7684\u6700\u5148\u88ab\u5220\u9664\u3002

                                                                          • \u5e76\u884c\u7b56\u7565\uff08Parallel\uff09 \uff1a\u5e76\u884c\u521b\u5efa\u6216\u5220\u9664\u5bb9\u5668\uff0c\u548c Deployment \u7c7b\u578b\u7684 Pod \u4e00\u6837\u3002StatefulSet \u63a7\u5236\u5668\u5e76\u884c\u5730\u542f\u52a8\u6216\u7ec8\u6b62\u6240\u6709\u7684\u5bb9\u5668\u3002\u542f\u52a8\u6216\u8005\u7ec8\u6b62\u5176\u4ed6 Pod \u524d\uff0c\u65e0\u9700\u7b49\u5f85 Pod \u8fdb\u5165 Running \u548c ready \u6216\u8005\u5b8c\u5168\u505c\u6b62\u72b6\u6001\u3002 \u8fd9\u4e2a\u9009\u9879\u53ea\u4f1a\u5f71\u54cd\u6269\u7f29\u64cd\u4f5c\u7684\u884c\u4e3a\uff0c\u4e0d\u5f71\u54cd\u66f4\u65b0\u65f6\u7684\u987a\u5e8f\u3002

                                                                          • \u5bb9\u5fcd\u65f6\u95f4\uff1a\u8d1f\u8f7d\u5b9e\u4f8b\u6240\u5728\u7684\u8282\u70b9\u4e0d\u53ef\u7528\u65f6\uff0c\u5c06\u8d1f\u8f7d\u5b9e\u4f8b\u91cd\u65b0\u8c03\u5ea6\u5230\u5176\u5b83\u53ef\u7528\u8282\u70b9\u7684\u65f6\u95f4\uff0c\u9ed8\u8ba4\u4e3a 300 \u79d2\u3002
                                                                          • \u8282\u70b9\u4eb2\u548c\u6027\uff1a\u6839\u636e\u8282\u70b9\u4e0a\u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002
                                                                          • \u5de5\u4f5c\u8d1f\u8f7d\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                                                                          • \u5de5\u4f5c\u8d1f\u8f7d\u53cd\u4eb2\u548c\u6027\uff1a\u57fa\u4e8e\u5df2\u7ecf\u5728\u8282\u70b9\u4e0a\u8fd0\u884c\u7684 Pod \u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u4e0d\u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u3002
                                                                          • \u62d3\u6251\u57df\uff1a\u5373 topologyKey\uff0c\u7528\u4e8e\u6307\u5b9a\u53ef\u4ee5\u8c03\u5ea6\u7684\u4e00\u7ec4\u8282\u70b9\u3002\u4f8b\u5982\uff0c kubernetes.io/os \u8868\u793a\u53ea\u8981\u67d0\u4e2a\u64cd\u4f5c\u7cfb\u7edf\u7684\u8282\u70b9\u6ee1\u8db3 labelSelector \u7684\u6761\u4ef6\u5c31\u53ef\u4ee5\u8c03\u5ea6\u5230\u8be5\u8282\u70b9\u3002

                                                                          \u5177\u4f53\u8be6\u60c5\u8bf7\u53c2\u8003\u8c03\u5ea6\u7b56\u7565\u3002

                                                                          ![\u8c03\u5ea6\u7b56\u7565](../../../images/deploy15_1.png)\n

                                                                          \u53ef\u4ee5\u70b9\u51fb \u6dfb\u52a0 \u6309\u94ae\u4e3a\u5de5\u4f5c\u8d1f\u8f7d\u548c\u5bb9\u5668\u7ec4\u6dfb\u52a0\u6807\u7b7e\u548c\u6ce8\u89e3\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/create-statefulset.html#yaml","title":"YAML \u521b\u5efa","text":"

                                                                          \u9664\u4e86\u901a\u8fc7\u955c\u50cf\u65b9\u5f0f\u5916\uff0c\u8fd8\u53ef\u4ee5\u901a\u8fc7 YAML \u6587\u4ef6\u66f4\u5feb\u901f\u5730\u521b\u5efa\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\u3002

                                                                          1. \u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u4e0a\u7684 \u96c6\u7fa4\u5217\u8868 \uff0c\u7136\u540e\u70b9\u51fb\u76ee\u6807\u96c6\u7fa4\u7684\u540d\u79f0\uff0c\u8fdb\u5165 \u96c6\u7fa4\u8be6\u60c5 \u9875\u9762\u3002

                                                                          2. \u5728\u96c6\u7fa4\u8be6\u60c5\u9875\u9762\uff0c\u70b9\u51fb\u5de6\u4fa7\u5bfc\u822a\u680f\u7684 \u5de5\u4f5c\u8d1f\u8f7d -> \u6709\u72b6\u6001\u8d1f\u8f7d \uff0c\u7136\u540e\u70b9\u51fb\u9875\u9762\u53f3\u4e0a\u89d2\u7684 YAML \u521b\u5efa \u6309\u94ae\u3002

                                                                          3. \u8f93\u5165\u6216\u7c98\u8d34\u4e8b\u5148\u51c6\u5907\u597d\u7684 YAML \u6587\u4ef6\uff0c\u70b9\u51fb \u786e\u5b9a \u5373\u53ef\u5b8c\u6210\u521b\u5efa\u3002

                                                                          \u70b9\u51fb\u67e5\u770b\u521b\u5efa\u6709\u72b6\u6001\u8d1f\u8f7d\u7684 YAML \u793a\u4f8b
                                                                          kind: StatefulSet\napiVersion: apps/v1\nmetadata:\n  name: test-mysql-123-mysql\n  namespace: default\n  uid: d3f45527-a0ab-4b22-9013-5842a06f4e0e\n  resourceVersion: '20504385'\n  generation: 1\n  creationTimestamp: '2022-09-22T09:34:10Z'\n  ownerReferences:\n    - apiVersion: mysql.presslabs.org/v1alpha1\n      kind: MysqlCluster\n      name: test-mysql-123\n      uid: 5e877cc3-5167-49da-904e-820940cf1a6d\n      controller: true\n      blockOwnerDeletion: true\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app.kubernetes.io/managed-by: mysql.presslabs.org\n      app.kubernetes.io/name: mysql\n      mysql.presslabs.org/cluster: test-mysql-123\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app.kubernetes.io/component: database\n        app.kubernetes.io/instance: test-mysql-123\n        app.kubernetes.io/managed-by: mysql.presslabs.org\n        app.kubernetes.io/name: mysql\n        app.kubernetes.io/version: 5.7.31\n        mysql.presslabs.org/cluster: test-mysql-123\n      annotations:\n        config_rev: '13941099'\n        prometheus.io/port: '9125'\n        prometheus.io/scrape: 'true'\n        secret_rev: '13941101'\n    spec:\n      volumes:\n        - name: conf\n          emptyDir: {}\n        - name: init-scripts\n          emptyDir: {}\n        - name: config-map\n          configMap:\n            name: test-mysql-123-mysql\n            defaultMode: 420\n        - name: data\n          persistentVolumeClaim:\n            claimName: data\n      initContainers:\n        - name: init\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - clone-and-init\n          envFrom:\n            - secretRef:\n                name: test-mysql-123-mysql-operated\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: BACKUP_USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: BACKUP_USER\n                  optional: true\n            - name: BACKUP_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: BACKUP_PASSWORD\n                  optional: true\n          resources: {}\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: config-map\n              mountPath: /mnt/conf\n            - name: data\n              mountPath: /var/lib/mysql\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n      containers:\n        - name: mysql\n          image: docker.m.daocloud.io/mysql:5.7.31\n          ports:\n            - name: mysql\n              containerPort: 3306\n              protocol: TCP\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: ORCH_CLUSTER_ALIAS\n              value: test-mysql-123.default\n            - name: ORCH_HTTP_API\n              value: http://mysql-operator.mcamel-system/api\n            - name: MYSQL_ROOT_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: ROOT_PASSWORD\n                  optional: false\n            - name: MYSQL_USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: USER\n                  optional: true\n            - name: MYSQL_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: PASSWORD\n                  optional: true\n            - name: MYSQL_DATABASE\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: DATABASE\n                  optional: true\n          resources:\n            limits:\n              cpu: '1'\n              memory: 1Gi\n            requests:\n              cpu: 100m\n              memory: 512Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: data\n              mountPath: /var/lib/mysql\n          livenessProbe:\n            exec:\n              command:\n                - mysqladmin\n                - '--defaults-file=/etc/mysql/client.conf'\n                - ping\n            initialDelaySeconds: 60\n            timeoutSeconds: 5\n            periodSeconds: 5\n            successThreshold: 1\n            failureThreshold: 3\n          readinessProbe:\n            exec:\n              command:\n                - /bin/sh\n                - '-c'\n                - >-\n                  test $(mysql --defaults-file=/etc/mysql/client.conf -NB -e\n                  'SELECT COUNT(*) FROM sys_operator.status WHERE\n                  name=\"configured\" AND value=\"1\"') -eq 1\n            initialDelaySeconds: 5\n            timeoutSeconds: 5\n            periodSeconds: 2\n            successThreshold: 1\n            failureThreshold: 3\n          lifecycle:\n            preStop:\n              exec:\n                command:\n                  - bash\n                  - /etc/mysql/pre-shutdown-ha.sh\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: sidecar\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - config-and-serve\n          ports:\n            - name: sidecar-http\n              containerPort: 8080\n              protocol: TCP\n          envFrom:\n            - secretRef:\n                name: test-mysql-123-mysql-operated\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: XTRABACKUP_TARGET_DIR\n              value: /tmp/xtrabackup_backupfiles/\n          resources:\n            limits:\n              cpu: '1'\n              memory: 1Gi\n            requests:\n              cpu: 10m\n              memory: 64Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: data\n              mountPath: /var/lib/mysql\n          readinessProbe:\n            httpGet:\n              path: /health\n              port: 8080\n              scheme: HTTP\n            initialDelaySeconds: 30\n            timeoutSeconds: 5\n            periodSeconds: 5\n            successThreshold: 1\n            failureThreshold: 3\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: metrics-exporter\n          image: prom/mysqld-exporter:v0.13.0\n          args:\n            - '--web.listen-address=0.0.0.0:9125'\n            - '--web.telemetry-path=/metrics'\n            - '--collect.heartbeat'\n            - '--collect.heartbeat.database=sys_operator'\n          ports:\n            - name: prometheus\n              containerPort: 9125\n              protocol: TCP\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: METRICS_EXPORTER_USER\n                  optional: false\n            - name: PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: METRICS_EXPORTER_PASSWORD\n                  optional: false\n            - name: DATA_SOURCE_NAME\n              value: $(USER):$(PASSWORD)@(127.0.0.1:3306)/\n          resources:\n            limits:\n              cpu: 100m\n              memory: 128Mi\n            requests:\n              cpu: 10m\n              memory: 32Mi\n          livenessProbe:\n            httpGet:\n              path: /metrics\n              port: 9125\n              scheme: HTTP\n            initialDelaySeconds: 30\n            timeoutSeconds: 30\n            periodSeconds: 30\n            successThreshold: 1\n            failureThreshold: 3\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: pt-heartbeat\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - pt-heartbeat\n            - '--update'\n            - '--replace'\n            - '--check-read-only'\n            - '--create-table'\n            - '--database'\n            - sys_operator\n            - '--table'\n            - heartbeat\n            - '--utc'\n            - '--defaults-file'\n            - /etc/mysql/heartbeat.conf\n            - '--fail-successive-errors=20'\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n          resources:\n            limits:\n              cpu: 100m\n              memory: 64Mi\n            requests:\n              cpu: 10m\n              memory: 32Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n      restartPolicy: Always\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      securityContext:\n        runAsUser: 999\n        fsGroup: 999\n      affinity:\n        podAntiAffinity:\n          preferredDuringSchedulingIgnoredDuringExecution:\n            - weight: 100\n              podAffinityTerm:\n                labelSelector:\n                  matchLabels:\n                    app.kubernetes.io/component: database\n                    app.kubernetes.io/instance: test-mysql-123\n                    app.kubernetes.io/managed-by: mysql.presslabs.org\n                    app.kubernetes.io/name: mysql\n                    app.kubernetes.io/version: 5.7.31\n                    mysql.presslabs.org/cluster: test-mysql-123\n                topologyKey: kubernetes.io/hostname\n      schedulerName: default-scheduler\n  volumeClaimTemplates:\n    - kind: PersistentVolumeClaim\n      apiVersion: v1\n      metadata:\n        name: data\n        creationTimestamp: null\n        ownerReferences:\n          - apiVersion: mysql.presslabs.org/v1alpha1\n            kind: MysqlCluster\n            name: test-mysql-123\n            uid: 5e877cc3-5167-49da-904e-820940cf1a6d\n            controller: true\n      spec:\n        accessModes:\n          - ReadWriteOnce\n        resources:\n          limits:\n            storage: 1Gi\n          requests:\n            storage: 1Gi\n        storageClassName: local-path\n        volumeMode: Filesystem\n      status:\n        phase: Pending\n  serviceName: mysql\n  podManagementPolicy: OrderedReady\n  updateStrategy:\n    type: RollingUpdate\n    rollingUpdate:\n      partition: 0\n  revisionHistoryLimit: 10\nstatus:\n  observedGeneration: 1\n  replicas: 1\n  readyReplicas: 1\n  currentReplicas: 1\n  updatedReplicas: 1\n  currentRevision: test-mysql-123-mysql-6b8f5577c7\n  updateRevision: test-mysql-123-mysql-6b8f5577c7\n  collisionCount: 0\n  availableReplicas: 1\n
                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/env-variables.html","title":"\u914d\u7f6e\u73af\u5883\u53d8\u91cf","text":"

                                                                          \u73af\u5883\u53d8\u91cf\u662f\u6307\u5bb9\u5668\u8fd0\u884c\u73af\u5883\u4e2d\u8bbe\u5b9a\u7684\u4e00\u4e2a\u53d8\u91cf\uff0c\u7528\u4e8e\u7ed9 Pod \u6dfb\u52a0\u73af\u5883\u6807\u5fd7\u6216\u4f20\u9012\u914d\u7f6e\u7b49\uff0c\u652f\u6301\u901a\u8fc7\u952e\u503c\u5bf9\u7684\u5f62\u5f0f\u4e3a Pod \u914d\u7f6e\u73af\u5883\u53d8\u91cf\u3002

                                                                          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u5728\u539f\u751f Kubernetes \u7684\u57fa\u7840\u4e0a\u589e\u52a0\u4e86\u56fe\u5f62\u5316\u754c\u9762\u4e3a Pod \u914d\u7f6e\u73af\u5883\u53d8\u91cf\uff0c\u652f\u6301\u4ee5\u4e0b\u51e0\u79cd\u914d\u7f6e\u65b9\u5f0f\uff1a

                                                                          • \u952e\u503c\u5bf9\uff08Key/Value Pair\uff09\uff1a\u5c06\u81ea\u5b9a\u4e49\u7684\u952e\u503c\u5bf9\u4f5c\u4e3a\u5bb9\u5668\u7684\u73af\u5883\u53d8\u91cf
                                                                          • \u8d44\u6e90\u5f15\u7528\uff08Resource\uff09\uff1a\u5c06 Container \u5b9a\u4e49\u7684\u5b57\u6bb5\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\uff0c\u4f8b\u5982\u5bb9\u5668\u7684\u5185\u5b58\u9650\u5236\u3001\u526f\u672c\u6570\u7b49
                                                                          • \u53d8\u91cf/\u53d8\u91cf\u5f15\u7528\uff08Pod Field\uff09\uff1a\u5c06 Pod \u5b57\u6bb5\u4f5c\u4e3a\u73af\u5883\u53d8\u91cf\u7684\u503c\uff0c\u4f8b\u5982 Pod \u7684\u540d\u79f0
                                                                          • \u914d\u7f6e\u9879\u952e\u503c\u5bfc\u5165\uff08ConfigMap key\uff09\uff1a\u5bfc\u5165\u914d\u7f6e\u9879\u4e2d\u67d0\u4e2a\u952e\u7684\u503c\u4f5c\u4e3a\u67d0\u4e2a\u73af\u5883\u53d8\u91cf\u7684\u503c
                                                                          • \u5bc6\u94a5\u952e\u503c\u5bfc\u5165\uff08Secret Key\uff09\uff1a\u4f7f\u7528\u6765\u81ea Secret \u4e2d\u7684\u6570\u636e\u5b9a\u4e49\u73af\u5883\u53d8\u91cf\u7684\u503c
                                                                          • \u5bc6\u94a5\u5bfc\u5165\uff08Secret\uff09\uff1a\u5c06 Secret \u4e2d\u7684\u6240\u6709\u952e\u503c\u90fd\u5bfc\u5165\u4e3a\u73af\u5883\u53d8\u91cf
                                                                          • \u914d\u7f6e\u9879\u5bfc\u5165\uff08ConfigMap\uff09\uff1a\u5c06\u914d\u7f6e\u9879\u4e2d\u6240\u6709\u952e\u503c\u90fd\u5bfc\u5165\u4e3a\u73af\u5883\u53d8\u91cf
                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/health-check.html","title":"\u5bb9\u5668\u7684\u5065\u5eb7\u68c0\u67e5","text":"

                                                                          \u5bb9\u5668\u5065\u5eb7\u68c0\u67e5\u6839\u636e\u7528\u6237\u9700\u6c42\uff0c\u68c0\u67e5\u5bb9\u5668\u7684\u5065\u5eb7\u72b6\u51b5\u3002\u914d\u7f6e\u540e\uff0c\u5bb9\u5668\u5185\u7684\u5e94\u7528\u7a0b\u5e8f\u5165\u5982\u679c\u5f02\u5e38\uff0c\u5bb9\u5668\u4f1a\u81ea\u52a8\u8fdb\u884c\u91cd\u542f\u6062\u590d\u3002Kubernetes \u63d0\u4f9b\u4e86\u5b58\u6d3b\uff08Liveness\uff09\u68c0\u67e5\u3001\u5c31\u7eea\uff08Readiness\uff09\u68c0\u67e5\u548c\u542f\u52a8\uff08Startup\uff09\u68c0\u67e5\u3002

                                                                          • \u5b58\u6d3b\u68c0\u67e5\uff08LivenessProbe\uff09 \u53ef\u63a2\u6d4b\u5230\u5e94\u7528\u6b7b\u9501\uff08\u5e94\u7528\u7a0b\u5e8f\u5728\u8fd0\u884c\uff0c\u4f46\u662f\u65e0\u6cd5\u7ee7\u7eed\u6267\u884c\u540e\u9762\u7684\u6b65\u9aa4\uff09\u60c5\u51b5\u3002 \u91cd\u542f\u8fd9\u79cd\u72b6\u6001\u4e0b\u7684\u5bb9\u5668\u6709\u52a9\u4e8e\u63d0\u9ad8\u5e94\u7528\u7684\u53ef\u7528\u6027\uff0c\u5373\u4f7f\u5176\u4e2d\u5b58\u5728\u7f3a\u9677\u3002

                                                                          • \u5c31\u7eea\u68c0\u67e5\uff08ReadinessProbe\uff09 \u53ef\u63a2\u77e5\u5bb9\u5668\u4f55\u65f6\u51c6\u5907\u597d\u63a5\u53d7\u8bf7\u6c42\u6d41\u91cf\uff0c\u5f53\u4e00\u4e2a Pod \u5185\u7684\u6240\u6709\u5bb9\u5668\u90fd\u5c31\u7eea\u65f6\uff0c\u624d\u80fd\u8ba4\u4e3a\u8be5 Pod \u5c31\u7eea\u3002 \u8fd9\u79cd\u4fe1\u53f7\u7684\u4e00\u4e2a\u7528\u9014\u5c31\u662f\u63a7\u5236\u54ea\u4e2a Pod \u4f5c\u4e3a Service \u7684\u540e\u7aef\u3002 \u82e5 Pod \u5c1a\u672a\u5c31\u7eea\uff0c\u4f1a\u88ab\u4ece Service \u7684\u8d1f\u8f7d\u5747\u8861\u5668\u4e2d\u5254\u9664\u3002

                                                                          • \u542f\u52a8\u68c0\u67e5\uff08StartupProbe\uff09 \u53ef\u4ee5\u4e86\u89e3\u5e94\u7528\u5bb9\u5668\u4f55\u65f6\u542f\u52a8\uff0c\u914d\u7f6e\u540e\uff0c\u53ef\u63a7\u5236\u5bb9\u5668\u5728\u542f\u52a8\u6210\u529f\u540e\u518d\u8fdb\u884c\u5b58\u6d3b\u6027\u548c\u5c31\u7eea\u6001\u68c0\u67e5\uff0c \u786e\u4fdd\u8fd9\u4e9b\u5b58\u6d3b\u3001\u5c31\u7eea\u63a2\u6d4b\u5668\u4e0d\u4f1a\u5f71\u54cd\u5e94\u7528\u7684\u542f\u52a8\u3002 \u542f\u52a8\u63a2\u6d4b\u53ef\u4ee5\u7528\u4e8e\u5bf9\u6162\u542f\u52a8\u5bb9\u5668\u8fdb\u884c\u5b58\u6d3b\u6027\u68c0\u6d4b\uff0c\u907f\u514d\u5b83\u4eec\u5728\u542f\u52a8\u8fd0\u884c\u4e4b\u524d\u5c31\u88ab\u6740\u6389\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/health-check.html#_2","title":"\u5b58\u6d3b\u548c\u5c31\u7eea\u68c0\u67e5","text":"

                                                                          \u5b58\u6d3b\u68c0\u67e5\uff08LivenessProbe\uff09\u7684\u914d\u7f6e\u548c\u5c31\u7eea\u68c0\u67e5\uff08ReadinessProbe\uff09\u7684\u914d\u7f6e\u53c2\u6570\u76f8\u4f3c\uff0c \u552f\u4e00\u533a\u522b\u662f\u8981\u4f7f\u7528 readinessProbe \u5b57\u6bb5\uff0c\u800c\u4e0d\u662f livenessProbe \u5b57\u6bb5\u3002

                                                                          HTTP GET \u53c2\u6570\u8bf4\u660e\uff1a

                                                                          \u53c2\u6570 \u53c2\u6570\u8bf4\u660e \u8def\u5f84\uff08 Path\uff09 \u8bbf\u95ee\u7684\u8bf7\u6c42\u8def\u5f84\u3002\u5982\uff1a \u793a\u4f8b\u4e2d\u7684 /healthz \u8def\u5f84 \u7aef\u53e3(Port) \u670d\u52a1\u76d1\u542c\u7aef\u53e3\u3002 \u5982\uff1a \u793a\u4f8b\u4e2d\u7684 8080 \u7aef\u53e3 \u534f\u8bae \u8bbf\u95ee\u534f\u8bae\uff0cHttp \u6216\u8005Https \u5ef6\u8fdf\u65f6\u95f4\uff08initialDelaySeconds\uff09 \u5ef6\u8fdf\u68c0\u67e5\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\uff0c\u6b64\u8bbe\u7f6e\u4e0e\u4e1a\u52a1\u7a0b\u5e8f\u6b63\u5e38\u542f\u52a8\u65f6\u95f4\u76f8\u5173\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a30\uff0c\u8868\u660e\u5bb9\u5668\u542f\u52a8\u540e30\u79d2\u624d\u5f00\u59cb\u5065\u5eb7\u68c0\u67e5\uff0c\u8be5\u65f6\u95f4\u662f\u9884\u7559\u7ed9\u4e1a\u52a1\u7a0b\u5e8f\u542f\u52a8\u7684\u65f6\u95f4\u3002 \u8d85\u65f6\u65f6\u95f4\uff08timeoutSeconds\uff09 \u8d85\u65f6\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a10\uff0c\u8868\u660e\u6267\u884c\u5065\u5eb7\u68c0\u67e5\u7684\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a10\u79d2\uff0c\u5982\u679c\u8d85\u8fc7\u8fd9\u4e2a\u65f6\u95f4\uff0c\u672c\u6b21\u5065\u5eb7\u68c0\u67e5\u5c31\u88ab\u89c6\u4e3a\u5931\u8d25\u3002\u82e5\u8bbe\u7f6e\u4e3a0\u6216\u4e0d\u8bbe\u7f6e\uff0c\u9ed8\u8ba4\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a1\u79d2\u3002 \u8d85\u65f6\u65f6\u95f4\uff08timeoutSeconds\uff09 \u8d85\u65f6\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a10\uff0c\u8868\u660e\u6267\u884c\u5065\u5eb7\u68c0\u67e5\u7684\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a10\u79d2\uff0c\u5982\u679c\u8d85\u8fc7\u8fd9\u4e2a\u65f6\u95f4\uff0c\u672c\u6b21\u5065\u5eb7\u68c0\u67e5\u5c31\u88ab\u89c6\u4e3a\u5931\u8d25\u3002\u82e5\u8bbe\u7f6e\u4e3a0\u6216\u4e0d\u8bbe\u7f6e\uff0c\u9ed8\u8ba4\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a1\u79d2\u3002 \u6210\u529f\u9608\u503c\uff08successThreshold\uff09 \u63a2\u6d4b\u5931\u8d25\u540e\uff0c\u88ab\u89c6\u4e3a\u6210\u529f\u7684\u6700\u5c0f\u8fde\u7eed\u6210\u529f\u6570\u3002\u9ed8\u8ba4\u503c\u662f 1\uff0c\u6700\u5c0f\u503c\u662f 1\u3002\u5b58\u6d3b\u548c\u542f\u52a8\u63a2\u6d4b\u7684\u8fd9\u4e2a\u503c\u5fc5\u987b\u662f 1\u3002 \u6700\u5927\u5931\u8d25\u6b21\u6570\uff08failureThreshold\uff09 \u5f53\u63a2\u6d4b\u5931\u8d25\u65f6\u91cd\u8bd5\u7684\u6b21\u6570\u3002\u5b58\u6d3b\u63a2\u6d4b\u60c5\u51b5\u4e0b\u7684\u653e\u5f03\u5c31\u610f\u5473\u7740\u91cd\u65b0\u542f\u52a8\u5bb9\u5668\u3002\u5c31\u7eea\u63a2\u6d4b\u60c5\u51b5\u4e0b\u7684\u653e\u5f03 Pod \u4f1a\u88ab\u6253\u4e0a\u672a\u5c31\u7eea\u7684\u6807\u7b7e\u3002\u9ed8\u8ba4\u503c\u662f 3\u3002\u6700\u5c0f\u503c\u662f 1\u3002"},{"location":"end-user/kpanda/workloads/pod-config/health-check.html#http-get","title":"\u4f7f\u7528 HTTP GET \u8bf7\u6c42\u68c0\u67e5","text":"

                                                                          YAML \u793a\u4f8b\uff1a

                                                                          apiVersion: v1\nkind: Pod\nmetadata:\n  labels:\n    test: liveness\n  name: liveness-http\nspec:\n  containers:\n  - name: liveness\n    image: k8s.gcr.io/liveness\n    args:\n    - /server\n    livenessProbe:\n      httpGet:\n        path: /healthz  # \u8bbf\u95ee\u7684\u8bf7\u6c42\u8def\u5f84\n        port: 8080  # \u670d\u52a1\u76d1\u542c\u7aef\u53e3\n        httpHeaders:\n        - name: Custom-Header\n          value: Awesome\n      initialDelaySeconds: 3  # kubelet \u5728\u6267\u884c\u7b2c\u4e00\u6b21\u63a2\u6d4b\u524d\u5e94\u8be5\u7b49\u5f85 3 \u79d2\n      periodSeconds: 3   # kubelet \u6bcf\u9694 3 \u79d2\u6267\u884c\u4e00\u6b21\u5b58\u6d3b\u63a2\u6d4b\n

                                                                          \u6309\u7167\u8bbe\u5b9a\u7684\u89c4\u5219\uff0cubelet \u5411\u5bb9\u5668\u5185\u8fd0\u884c\u7684\u670d\u52a1\uff08\u670d\u52a1\u5728\u76d1\u542c 8080 \u7aef\u53e3\uff09\u53d1\u9001\u4e00\u4e2a HTTP GET \u8bf7\u6c42\u6765\u6267\u884c\u63a2\u6d4b\u3002\u5982\u679c\u670d\u52a1\u5668\u4e0a /healthz \u8def\u5f84\u4e0b\u7684\u5904\u7406\u7a0b\u5e8f\u8fd4\u56de\u6210\u529f\u4ee3\u7801\uff0c\u5219 kubelet \u8ba4\u4e3a\u5bb9\u5668\u662f\u5065\u5eb7\u5b58\u6d3b\u7684\u3002 \u5982\u679c\u5904\u7406\u7a0b\u5e8f\u8fd4\u56de\u5931\u8d25\u4ee3\u7801\uff0c\u5219 kubelet \u4f1a\u6740\u6b7b\u8fd9\u4e2a\u5bb9\u5668\u5e76\u5c06\u5176\u91cd\u542f\u3002\u8fd4\u56de\u5927\u4e8e\u6216\u7b49\u4e8e 200 \u5e76\u4e14\u5c0f\u4e8e 400 \u7684\u4efb\u4f55\u4ee3\u7801\u90fd\u6807\u793a\u6210\u529f\uff0c\u5176\u5b83\u8fd4\u56de\u4ee3\u7801\u90fd\u6807\u793a\u5931\u8d25\u3002 \u5bb9\u5668\u5b58\u6d3b\u671f\u95f4\u7684\u6700\u5f00\u59cb 10 \u79d2\u4e2d\uff0c /healthz \u5904\u7406\u7a0b\u5e8f\u8fd4\u56de 200 \u7684\u72b6\u6001\u7801\u3002 \u4e4b\u540e\u5904\u7406\u7a0b\u5e8f\u8fd4\u56de 500 \u7684\u72b6\u6001\u7801\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/health-check.html#tcp","title":"\u4f7f\u7528 TCP \u7aef\u53e3\u68c0\u67e5","text":"

                                                                          TCP \u7aef\u53e3\u53c2\u6570\u8bf4\u660e\uff1a

                                                                          \u53c2\u6570 \u53c2\u6570\u8bf4\u660e \u7aef\u53e3(Port) \u670d\u52a1\u76d1\u542c\u7aef\u53e3\u3002 \u5982\uff1a \u793a\u4f8b\u4e2d\u7684 8080 \u7aef\u53e3 \u5ef6\u8fdf\u65f6\u95f4\uff08initialDelaySeconds\uff09 \u5ef6\u8fdf\u68c0\u67e5\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\uff0c\u6b64\u8bbe\u7f6e\u4e0e\u4e1a\u52a1\u7a0b\u5e8f\u6b63\u5e38\u542f\u52a8\u65f6\u95f4\u76f8\u5173\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a30\uff0c\u8868\u660e\u5bb9\u5668\u542f\u52a8\u540e30\u79d2\u624d\u5f00\u59cb\u5065\u5eb7\u68c0\u67e5\uff0c\u8be5\u65f6\u95f4\u662f\u9884\u7559\u7ed9\u4e1a\u52a1\u7a0b\u5e8f\u542f\u52a8\u7684\u65f6\u95f4\u3002 \u8d85\u65f6\u65f6\u95f4\uff08timeoutSeconds\uff09 \u8d85\u65f6\u65f6\u95f4\uff0c\u5355\u4f4d\u4e3a\u79d2\u3002\u4f8b\u5982\uff0c\u8bbe\u7f6e\u4e3a10\uff0c\u8868\u660e\u6267\u884c\u5065\u5eb7\u68c0\u67e5\u7684\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a10\u79d2\uff0c\u5982\u679c\u8d85\u8fc7\u8fd9\u4e2a\u65f6\u95f4\uff0c\u672c\u6b21\u5065\u5eb7\u68c0\u67e5\u5c31\u88ab\u89c6\u4e3a\u5931\u8d25\u3002\u82e5\u8bbe\u7f6e\u4e3a0\u6216\u4e0d\u8bbe\u7f6e\uff0c\u9ed8\u8ba4\u8d85\u65f6\u7b49\u5f85\u65f6\u95f4\u4e3a1\u79d2\u3002

                                                                          \u5bf9\u4e8e\u63d0\u4f9bTCP\u901a\u4fe1\u670d\u52a1\u7684\u5bb9\u5668\uff0c\u57fa\u4e8e\u6b64\u914d\u7f6e\uff0c\u6309\u7167\u8bbe\u5b9a\u89c4\u5219\u96c6\u7fa4\u5bf9\u8be5\u5bb9\u5668\u5efa\u7acbTCP\u8fde\u63a5\uff0c\u5982\u679c\u8fde\u63a5\u6210\u529f\uff0c\u5219\u8bc1\u660e\u63a2\u6d4b\u6210\u529f\uff0c\u5426\u5219\u63a2\u6d4b\u5931\u8d25\u3002\u9009\u62e9TCP\u7aef\u53e3\u63a2\u6d4b\u65b9\u5f0f\uff0c\u5fc5\u987b\u6307\u5b9a\u5bb9\u5668\u76d1\u542c\u7684\u7aef\u53e3\u3002

                                                                          YAML \u793a\u4f8b\uff1a

                                                                          apiVersion: v1\nkind: Pod\nmetadata:\n  name: goproxy\n  labels:\n    app: goproxy\nspec:\n  containers:\n  - name: goproxy\n    image: k8s.gcr.io/goproxy:0.1\n    ports:\n    - containerPort: 8080\n    readinessProbe:\n      tcpSocket:\n        port: 8080\n      initialDelaySeconds: 5\n      periodSeconds: 10\n    livenessProbe:\n      tcpSocket:\n        port: 8080\n      initialDelaySeconds: 15\n      periodSeconds: 20\n

                                                                          \u6b64\u793a\u4f8b\u540c\u65f6\u4f7f\u7528\u5c31\u7eea\u548c\u5b58\u6d3b\u63a2\u9488\u3002kubelet \u5728\u5bb9\u5668\u542f\u52a8 5 \u79d2\u540e\u53d1\u9001\u7b2c\u4e00\u4e2a\u5c31\u7eea\u63a2\u6d4b\u3002 \u5c1d\u8bd5\u8fde\u63a5 goproxy \u5bb9\u5668\u7684 8080 \u7aef\u53e3\uff0c \u5982\u679c\u63a2\u6d4b\u6210\u529f\uff0c\u8fd9\u4e2a Pod \u4f1a\u88ab\u6807\u8bb0\u4e3a\u5c31\u7eea\u72b6\u6001\uff0ckubelet \u5c06\u7ee7\u7eed\u6bcf\u9694 10 \u79d2\u8fd0\u884c\u4e00\u6b21\u68c0\u6d4b\u3002

                                                                          \u9664\u4e86\u5c31\u7eea\u63a2\u6d4b\uff0c\u8fd9\u4e2a\u914d\u7f6e\u5305\u62ec\u4e86\u4e00\u4e2a\u5b58\u6d3b\u63a2\u6d4b\u3002 kubelet \u4f1a\u5728\u5bb9\u5668\u542f\u52a8 15 \u79d2\u540e\u8fdb\u884c\u7b2c\u4e00\u6b21\u5b58\u6d3b\u63a2\u6d4b\u3002 \u5c31\u7eea\u63a2\u6d4b\u4f1a\u5c1d\u8bd5\u8fde\u63a5 goproxy \u5bb9\u5668\u7684 8080 \u7aef\u53e3\u3002 \u5982\u679c\u5b58\u6d3b\u63a2\u6d4b\u5931\u8d25\uff0c\u5bb9\u5668\u4f1a\u88ab\u91cd\u65b0\u542f\u52a8\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/health-check.html#_3","title":"\u6267\u884c\u547d\u4ee4\u68c0\u67e5","text":"

                                                                          YAML \u793a\u4f8b:

                                                                          apiVersion: v1\nkind: Pod\nmetadata:\n  labels:\n    test: liveness\n  name: liveness-exec\nspec:\n  containers:\n  - name: liveness\n    image: k8s.gcr.io/busybox\n    args:\n    - /bin/sh\n    - -c\n    - touch /tmp/healthy; sleep 30; rm -f /tmp/healthy; sleep 600\n    livenessProbe:\n      exec:\n        command:\n        - cat\n        - /tmp/healthy\n      initialDelaySeconds: 5 # kubelet \u5728\u6267\u884c\u7b2c\u4e00\u6b21\u63a2\u6d4b\u524d\u7b49\u5f85 5 \u79d2\n      periodSeconds: 5  #kubelet \u6bcf 5 \u79d2\u6267\u884c\u4e00\u6b21\u5b58\u6d3b\u63a2\u6d4b\n

                                                                          periodSeconds \u5b57\u6bb5\u6307\u5b9a\u4e86 kubelet \u6bcf 5 \u79d2\u6267\u884c\u4e00\u6b21\u5b58\u6d3b\u63a2\u6d4b\uff0c initialDelaySeconds \u5b57\u6bb5\u6307\u5b9a kubelet \u5728\u6267\u884c\u7b2c\u4e00\u6b21\u63a2\u6d4b\u524d\u7b49\u5f85 5 \u79d2\u3002\u6309\u7167\u8bbe\u5b9a\u89c4\u5219\uff0c\u96c6\u7fa4\u5468\u671f\u6027\u7684\u901a\u8fc7 kubelet \u5728\u5bb9\u5668\u5185\u6267\u884c\u547d\u4ee4 cat /tmp/healthy \u6765\u8fdb\u884c\u63a2\u6d4b\u3002 \u5982\u679c\u547d\u4ee4\u6267\u884c\u6210\u529f\u5e76\u4e14\u8fd4\u56de\u503c\u4e3a 0\uff0ckubelet \u5c31\u4f1a\u8ba4\u4e3a\u8fd9\u4e2a\u5bb9\u5668\u662f\u5065\u5eb7\u5b58\u6d3b\u7684\u3002 \u5982\u679c\u8fd9\u4e2a\u547d\u4ee4\u8fd4\u56de\u975e 0 \u503c\uff0ckubelet \u4f1a\u6740\u6b7b\u8fd9\u4e2a\u5bb9\u5668\u5e76\u91cd\u65b0\u542f\u52a8\u5b83\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/health-check.html#_4","title":"\u4f7f\u7528\u542f\u52a8\u524d\u68c0\u67e5\u4fdd\u62a4\u6162\u542f\u52a8\u5bb9\u5668","text":"

                                                                          \u6709\u4e9b\u5e94\u7528\u5728\u542f\u52a8\u65f6\u9700\u8981\u8f83\u957f\u7684\u521d\u59cb\u5316\u65f6\u95f4\uff0c\u9700\u8981\u4f7f\u7528\u76f8\u540c\u7684\u547d\u4ee4\u6765\u8bbe\u7f6e\u542f\u52a8\u63a2\u6d4b\uff0c\u9488\u5bf9 HTTP \u6216 TCP \u68c0\u6d4b\uff0c\u53ef\u4ee5\u901a\u8fc7\u5c06 failureThreshold * periodSeconds \u53c2\u6570\u8bbe\u7f6e\u4e3a\u8db3\u591f\u957f\u7684\u65f6\u95f4\u6765\u5e94\u5bf9\u542f\u52a8\u9700\u8981\u8f83\u957f\u65f6\u95f4\u7684\u573a\u666f\u3002

                                                                          YAML \u793a\u4f8b\uff1a

                                                                          ports:\n- name: liveness-port\n  containerPort: 8080\n  hostPort: 8080\n\nlivenessProbe:\n  httpGet:\n    path: /healthz\n    port: liveness-port\n  failureThreshold: 1\n  periodSeconds: 10\n\nstartupProbe:\n  httpGet:\n    path: /healthz\n    port: liveness-port\n  failureThreshold: 30\n  periodSeconds: 10\n

                                                                          \u5982\u4e0a\u8bbe\u7f6e\uff0c\u5e94\u7528\u5c06\u6709\u6700\u591a 5 \u5206\u949f\uff0830 * 10 = 300s\uff09\u7684\u65f6\u95f4\u6765\u5b8c\u6210\u542f\u52a8\u8fc7\u7a0b\uff0c \u4e00\u65e6\u542f\u52a8\u63a2\u6d4b\u6210\u529f\uff0c\u5b58\u6d3b\u63a2\u6d4b\u4efb\u52a1\u5c31\u4f1a\u63a5\u7ba1\u5bf9\u5bb9\u5668\u7684\u63a2\u6d4b\uff0c\u5bf9\u5bb9\u5668\u6b7b\u9501\u4f5c\u51fa\u5feb\u901f\u54cd\u5e94\u3002 \u5982\u679c\u542f\u52a8\u63a2\u6d4b\u4e00\u76f4\u6ca1\u6709\u6210\u529f\uff0c\u5bb9\u5668\u4f1a\u5728 300 \u79d2\u540e\u88ab\u6740\u6b7b\uff0c\u5e76\u4e14\u6839\u636e restartPolicy \u6765 \u6267\u884c\u8fdb\u4e00\u6b65\u5904\u7f6e\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/job-parameters.html","title":"\u4efb\u52a1\u53c2\u6570\u8bf4\u660e","text":"

                                                                          \u6839\u636e .spec.completions \u548c .spec.Parallelism \u7684\u8bbe\u7f6e\uff0c\u53ef\u4ee5\u5c06\u4efb\u52a1\uff08Job\uff09\u5212\u5206\u4e3a\u4ee5\u4e0b\u51e0\u79cd\u7c7b\u578b:

                                                                          Job \u7c7b\u578b \u8bf4\u660e \u975e\u5e76\u884c Job \u521b\u5efa\u4e00\u4e2a Pod \u76f4\u81f3\u5176 Job \u6210\u529f\u7ed3\u675f \u5177\u6709\u786e\u5b9a\u5b8c\u6210\u8ba1\u6570\u7684\u5e76\u884c Job \u5f53\u6210\u529f\u7684 Pod \u4e2a\u6570\u8fbe\u5230 .spec.completions \u65f6\uff0cJob \u88ab\u89c6\u4e3a\u5b8c\u6210 \u5e76\u884c Job \u521b\u5efa\u4e00\u4e2a\u6216\u591a\u4e2a Pod \u76f4\u81f3\u6709\u4e00\u4e2a\u6210\u529f\u7ed3\u675f

                                                                          \u53c2\u6570\u8bf4\u660e

                                                                          RestartPolicy \u521b\u5efa\u4e00\u4e2a Pod \u76f4\u81f3\u5176\u6210\u529f\u7ed3\u675f .spec.completions \u8868\u793a Job \u7ed3\u675f\u9700\u8981\u6210\u529f\u8fd0\u884c\u7684 Pod \u4e2a\u6570\uff0c\u9ed8\u8ba4\u4e3a 1 .spec.parallelism \u8868\u793a\u5e76\u884c\u8fd0\u884c\u7684 Pod \u7684\u4e2a\u6570\uff0c\u9ed8\u8ba4\u4e3a 1 spec.backoffLimit \u8868\u793a\u5931\u8d25 Pod \u7684\u91cd\u8bd5\u6700\u5927\u6b21\u6570\uff0c\u8d85\u8fc7\u8fd9\u4e2a\u6b21\u6570\u4e0d\u4f1a\u7ee7\u7eed\u91cd\u8bd5\u3002 .spec.activeDeadlineSeconds \u8868\u793a Pod \u8fd0\u884c\u65f6\u95f4\uff0c\u4e00\u65e6\u8fbe\u5230\u8fd9\u4e2a\u65f6\u95f4\uff0cJob \u5373\u5176\u6240\u6709\u7684 Pod \u90fd\u4f1a\u505c\u6b62\u3002\u4e14activeDeadlineSeconds \u4f18\u5148\u7ea7\u9ad8\u4e8e backoffLimit\uff0c\u5373\u5230\u8fbe activeDeadlineSeconds \u7684 Job \u4f1a\u5ffd\u7565backoffLimit \u7684\u8bbe\u7f6e\u3002

                                                                          \u4ee5\u4e0b\u662f\u4e00\u4e2a Job \u914d\u7f6e\u793a\u4f8b\uff0c\u4fdd\u5b58\u5728 myjob.yaml \u4e2d\uff0c\u5176\u8ba1\u7b97 \u03c0 \u5230 2000 \u4f4d\u5e76\u6253\u5370\u8f93\u51fa\u3002

                                                                          apiVersion: batch/v1\nkind: Job            # \u5f53\u524d\u8d44\u6e90\u7684\u7c7b\u578b\nmetadata:\n  name: myjob\nspec:\n  completions: 50        # Job\u7ed3\u675f\u9700\u8981\u8fd0\u884c50\u4e2aPod\uff0c\u8fd9\u4e2a\u793a\u4f8b\u4e2d\u5c31\u662f\u6253\u5370\u03c0 50\u6b21\n  parallelism: 5        # \u5e76\u884c5\u4e2aPod\n  backoffLimit: 5        # \u6700\u591a\u91cd\u8bd55\u6b21\n  template:\n    spec:\n      containers:\n      - name: pi\n        image: perl\n        command: [\"perl\",  \"-Mbignum=bpi\", \"-wle\", \"print bpi(2000)\"]\n      restartPolicy: Never #\u91cd\u542f\u7b56\u7565\n

                                                                          \u76f8\u5173\u547d\u4ee4

                                                                          kubectl apply -f myjob.yaml  #\u542f\u52a8 job\nkubectl get job #\u67e5\u770b\u8fd9\u4e2ajob\nkubectl logs myjob-1122dswzs \u67e5\u770bJob Pod \u7684\u65e5\u5fd7\n
                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/lifecycle.html","title":"\u914d\u7f6e\u5bb9\u5668\u751f\u547d\u5468\u671f","text":"

                                                                          Pod \u9075\u5faa\u4e00\u4e2a\u9884\u5b9a\u4e49\u7684\u751f\u547d\u5468\u671f\uff0c\u8d77\u59cb\u4e8e Pending \u9636\u6bb5\uff0c\u5982\u679c Pod \u5185\u81f3\u5c11\u6709\u4e00\u4e2a\u5bb9\u5668\u6b63\u5e38\u542f\u52a8\uff0c\u5219\u8fdb\u5165 Running \u72b6\u6001\u3002\u5982\u679c Pod \u4e2d\u6709\u5bb9\u5668\u4ee5\u5931\u8d25\u72b6\u6001\u7ed3\u675f\uff0c\u5219\u72b6\u6001\u53d8\u4e3a Failed \u3002\u4ee5\u4e0b phase \u5b57\u6bb5\u503c\u8868\u660e\u4e86\u4e00\u4e2a Pod \u5904\u4e8e\u751f\u547d\u5468\u671f\u7684\u54ea\u4e2a\u9636\u6bb5\u3002

                                                                          \u503c \u63cf\u8ff0 Pending \uff08\u60ac\u51b3\uff09 Pod \u5df2\u88ab\u7cfb\u7edf\u63a5\u53d7\uff0c\u4f46\u6709\u4e00\u4e2a\u6216\u8005\u591a\u4e2a\u5bb9\u5668\u5c1a\u672a\u521b\u5efa\u4ea6\u672a\u8fd0\u884c\u3002\u8fd9\u4e2a\u9636\u6bb5\u5305\u62ec\u7b49\u5f85 Pod \u88ab\u8c03\u5ea6\u7684\u65f6\u95f4\u548c\u901a\u8fc7\u7f51\u7edc\u4e0b\u8f7d\u955c\u50cf\u7684\u65f6\u95f4\u3002 Running \uff08\u8fd0\u884c\u4e2d\uff09 Pod \u5df2\u7ecf\u7ed1\u5b9a\u5230\u4e86\u67d0\u4e2a\u8282\u70b9\uff0cPod \u4e2d\u7684\u6240\u6709\u5bb9\u5668\u90fd\u5df2\u88ab\u521b\u5efa\u3002\u81f3\u5c11\u6709\u4e00\u4e2a\u5bb9\u5668\u4ecd\u5728\u8fd0\u884c\uff0c\u6216\u8005\u6b63\u5904\u4e8e\u542f\u52a8\u6216\u91cd\u542f\u72b6\u6001\u3002 Succeeded \uff08\u6210\u529f\uff09 Pod \u4e2d\u7684\u6240\u6709\u5bb9\u5668\u90fd\u5df2\u6210\u529f\u7ec8\u6b62\uff0c\u5e76\u4e14\u4e0d\u4f1a\u518d\u91cd\u542f\u3002 Failed \uff08\u5931\u8d25\uff09 Pod \u4e2d\u7684\u6240\u6709\u5bb9\u5668\u90fd\u5df2\u7ec8\u6b62\uff0c\u5e76\u4e14\u81f3\u5c11\u6709\u4e00\u4e2a\u5bb9\u5668\u662f\u56e0\u4e3a\u5931\u8d25\u800c\u7ec8\u6b62\u3002\u4e5f\u5c31\u662f\u8bf4\uff0c\u5bb9\u5668\u4ee5\u975e 0 \u72b6\u6001\u9000\u51fa\u6216\u8005\u88ab\u7cfb\u7edf\u7ec8\u6b62\u3002 Unknown \uff08\u672a\u77e5\uff09 \u56e0\u4e3a\u67d0\u4e9b\u539f\u56e0\u65e0\u6cd5\u53d6\u5f97 Pod \u7684\u72b6\u6001\uff0c\u8fd9\u79cd\u60c5\u51b5\u901a\u5e38\u662f\u56e0\u4e3a\u4e0e Pod \u6240\u5728\u4e3b\u673a\u901a\u4fe1\u5931\u8d25\u6240\u81f4\u3002

                                                                          \u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u5bb9\u5668\u7ba1\u7406\u4e2d\u521b\u5efa\u4e00\u4e2a\u5de5\u4f5c\u8d1f\u8f7d\u65f6\uff0c\u901a\u5e38\u4f7f\u7528\u955c\u50cf\u6765\u6307\u5b9a\u5bb9\u5668\u4e2d\u7684\u8fd0\u884c\u73af\u5883\u3002\u9ed8\u8ba4\u60c5\u51b5\u4e0b\uff0c\u5728\u6784\u5efa\u955c\u50cf\u65f6\uff0c\u53ef\u4ee5\u901a\u8fc7 Entrypoint \u548c CMD \u4e24\u4e2a\u5b57\u6bb5\u6765\u5b9a\u4e49\u5bb9\u5668\u8fd0\u884c\u65f6\u6267\u884c\u7684\u547d\u4ee4\u548c\u53c2\u6570\u3002\u5982\u679c\u9700\u8981\u66f4\u6539\u5bb9\u5668\u955c\u50cf\u542f\u52a8\u524d\u3001\u542f\u52a8\u540e\u3001\u505c\u6b62\u524d\u7684\u547d\u4ee4\u548c\u53c2\u6570\uff0c\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e\u5bb9\u5668\u7684\u751f\u547d\u5468\u671f\u4e8b\u4ef6\u547d\u4ee4\u548c\u53c2\u6570\uff0c\u6765\u8986\u76d6\u955c\u50cf\u4e2d\u9ed8\u8ba4\u7684\u547d\u4ee4\u548c\u53c2\u6570\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/lifecycle.html#_2","title":"\u751f\u547d\u5468\u671f\u914d\u7f6e","text":"

                                                                          \u6839\u636e\u4e1a\u52a1\u9700\u8981\u5bf9\u5bb9\u5668\u7684\u542f\u52a8\u547d\u4ee4\u3001\u542f\u52a8\u540e\u547d\u4ee4\u3001\u505c\u6b62\u524d\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\u3002

                                                                          \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u542f\u52a8\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5bb9\u5668\u5c06\u6309\u7167\u542f\u52a8\u547d\u4ee4\u8fdb\u884c\u542f\u52a8\u3002 \u542f\u52a8\u540e\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5bb9\u5668\u542f\u52a8\u540e\u51fa\u53d1\u7684\u547d\u4ee4 \u505c\u6b62\u524d\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u5bb9\u5668\u5728\u6536\u5230\u505c\u6b62\u547d\u4ee4\u540e\u6267\u884c\u7684\u547d\u4ee4\u3002\u786e\u4fdd\u5347\u7ea7\u6216\u5b9e\u4f8b\u5220\u9664\u65f6\u53ef\u63d0\u524d\u5c06\u5b9e\u4f8b\u4e2d\u8fd0\u884c\u7684\u4e1a\u52a1\u6392\u6c34\u3002"},{"location":"end-user/kpanda/workloads/pod-config/lifecycle.html#_3","title":"\u542f\u52a8\u547d\u4ee4","text":"

                                                                          \u6839\u636e\u4e0b\u8868\u5bf9\u542f\u52a8\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\u3002

                                                                          \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8fd0\u884c\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u53ef\u6267\u884c\u7684\u547d\u4ee4\uff0c\u591a\u4e2a\u547d\u4ee4\u4e4b\u95f4\u7528\u7a7a\u683c\u8fdb\u884c\u5206\u5272\uff0c\u5982\u547d\u4ee4\u672c\u8eab\u5e26\u7a7a\u683c\uff0c\u5219\u9700\u8981\u52a0\uff08\u201c\u201d\uff09\u3002\u3010\u542b\u4e49\u3011\u591a\u547d\u4ee4\u65f6\uff0c\u8fd0\u884c\u547d\u4ee4\u5efa\u8bae\u7528/bin/sh\u6216\u5176\u4ed6\u7684shell\uff0c\u5176\u4ed6\u5168\u90e8\u547d\u4ee4\u4f5c\u4e3a\u53c2\u6570\u6765\u4f20\u5165\u3002 /run/server \u8fd0\u884c\u53c2\u6570 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u63a7\u5236\u5bb9\u5668\u8fd0\u884c\u547d\u4ee4\u53c2\u6570\u3002 port=8080"},{"location":"end-user/kpanda/workloads/pod-config/lifecycle.html#_4","title":"\u542f\u52a8\u540e\u547d\u4ee4","text":"

                                                                          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u547d\u4ee4\u884c\u811a\u672c\u548c HTTP \u8bf7\u6c42\u4e24\u79cd\u5904\u7406\u7c7b\u578b\u5bf9\u542f\u52a8\u540e\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\u3002\u60a8\u53ef\u4ee5\u6839\u636e\u4e0b\u8868\u9009\u62e9\u9002\u5408\u60a8\u7684\u914d\u7f6e\u65b9\u5f0f\u3002

                                                                          \u547d\u4ee4\u884c\u811a\u672c\u914d\u7f6e

                                                                          \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c \u8fd0\u884c\u547d\u4ee4 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u53ef\u6267\u884c\u7684\u547d\u4ee4\uff0c\u591a\u4e2a\u547d\u4ee4\u4e4b\u95f4\u7528\u7a7a\u683c\u8fdb\u884c\u5206\u5272\uff0c\u5982\u547d\u4ee4\u672c\u8eab\u5e26\u7a7a\u683c\uff0c\u5219\u9700\u8981\u52a0\uff08\u201c\u201d\uff09\u3002\u3010\u542b\u4e49\u3011\u591a\u547d\u4ee4\u65f6\uff0c\u8fd0\u884c\u547d\u4ee4\u5efa\u8bae\u7528/bin/sh\u6216\u5176\u4ed6\u7684shell\uff0c\u5176\u4ed6\u5168\u90e8\u547d\u4ee4\u4f5c\u4e3a\u53c2\u6570\u6765\u4f20\u5165\u3002 /run/server \u8fd0\u884c\u53c2\u6570 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8f93\u5165\u63a7\u5236\u5bb9\u5668\u8fd0\u884c\u547d\u4ee4\u53c2\u6570\u3002 port=8080"},{"location":"end-user/kpanda/workloads/pod-config/lifecycle.html#_5","title":"\u505c\u6b62\u524d\u547d\u4ee4","text":"

                                                                          \u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u63d0\u4f9b\u547d\u4ee4\u884c\u811a\u672c\u548c HTTP \u8bf7\u6c42\u4e24\u79cd\u5904\u7406\u7c7b\u578b\u5bf9\u505c\u6b62\u524d\u547d\u4ee4\u8fdb\u884c\u914d\u7f6e\u3002\u60a8\u53ef\u4ee5\u6839\u636e\u4e0b\u8868\u9009\u62e9\u9002\u5408\u60a8\u7684\u914d\u7f6e\u65b9\u5f0f\u3002

                                                                          HTTP \u8bf7\u6c42\u914d\u7f6e

                                                                          \u53c2\u6570 \u8bf4\u660e \u4e3e\u4f8b\u503c URL \u8def\u5f84 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8bf7\u6c42\u7684URL\u8def\u5f84\u3002\u3010\u542b\u4e49\u3011\u591a\u547d\u4ee4\u65f6\uff0c\u8fd0\u884c\u547d\u4ee4\u5efa\u8bae\u7528/bin/sh\u6216\u5176\u4ed6\u7684shell\uff0c\u5176\u4ed6\u5168\u90e8\u547d\u4ee4\u4f5c\u4e3a\u53c2\u6570\u6765\u4f20\u5165\u3002 /run/server \u7aef\u53e3 \u3010\u7c7b\u578b\u3011\u5fc5\u586b\u3010\u542b\u4e49\u3011\u8bf7\u6c42\u7684\u7aef\u53e3\u3002 port=8080 \u8282\u70b9\u5730\u5740 \u3010\u7c7b\u578b\u3011\u9009\u586b\u3010\u542b\u4e49\u3011\u8bf7\u6c42\u7684 IP \u5730\u5740\uff0c\u9ed8\u8ba4\u662f\u5bb9\u5668\u6240\u5728\u7684\u8282\u70b9 IP\u3002"},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html","title":"\u8c03\u5ea6\u7b56\u7565","text":"

                                                                          \u5728 Kubernetes \u96c6\u7fa4\u4e2d\uff0c\u8282\u70b9\u4e5f\u6709\u6807\u7b7e\u3002\u60a8\u53ef\u4ee5\u624b\u52a8\u6dfb\u52a0\u6807\u7b7e\u3002 Kubernetes \u4e5f\u4f1a\u4e3a\u96c6\u7fa4\u4e2d\u6240\u6709\u8282\u70b9\u6dfb\u52a0\u4e00\u4e9b\u6807\u51c6\u7684\u6807\u7b7e\u3002\u53c2\u89c1\u5e38\u7528\u7684\u6807\u7b7e\u3001\u6ce8\u89e3\u548c\u6c61\u70b9\u4ee5\u4e86\u89e3\u5e38\u89c1\u7684\u8282\u70b9\u6807\u7b7e\u3002\u901a\u8fc7\u4e3a\u8282\u70b9\u6dfb\u52a0\u6807\u7b7e\uff0c\u60a8\u53ef\u4ee5\u8ba9 Pod \u8c03\u5ea6\u5230\u7279\u5b9a\u8282\u70b9\u6216\u8282\u70b9\u7ec4\u4e0a\u3002\u60a8\u53ef\u4ee5\u4f7f\u7528\u8fd9\u4e2a\u529f\u80fd\u6765\u786e\u4fdd\u7279\u5b9a\u7684 Pod \u53ea\u80fd\u8fd0\u884c\u5728\u5177\u6709\u4e00\u5b9a\u9694\u79bb\u6027\uff0c\u5b89\u5168\u6027\u6216\u76d1\u7ba1\u5c5e\u6027\u7684\u8282\u70b9\u4e0a\u3002

                                                                          nodeSelector \u662f\u8282\u70b9\u9009\u62e9\u7ea6\u675f\u7684\u6700\u7b80\u5355\u63a8\u8350\u5f62\u5f0f\u3002\u60a8\u53ef\u4ee5\u5c06 nodeSelector \u5b57\u6bb5\u6dfb\u52a0\u5230 Pod \u7684\u89c4\u7ea6\u4e2d\u8bbe\u7f6e\u60a8\u5e0c\u671b\u76ee\u6807\u8282\u70b9\u6240\u5177\u6709\u7684\u8282\u70b9\u6807\u7b7e\u3002Kubernetes \u53ea\u4f1a\u5c06 Pod \u8c03\u5ea6\u5230\u62e5\u6709\u6307\u5b9a\u6bcf\u4e2a\u6807\u7b7e\u7684\u8282\u70b9\u4e0a\u3002 nodeSelector \u63d0\u4f9b\u4e86\u4e00\u79cd\u6700\u7b80\u5355\u7684\u65b9\u6cd5\u6765\u5c06 Pod \u7ea6\u675f\u5230\u5177\u6709\u7279\u5b9a\u6807\u7b7e\u7684\u8282\u70b9\u4e0a\u3002\u4eb2\u548c\u6027\u548c\u53cd\u4eb2\u548c\u6027\u6269\u5c55\u4e86\u60a8\u53ef\u4ee5\u5b9a\u4e49\u7684\u7ea6\u675f\u7c7b\u578b\u3002\u4f7f\u7528\u4eb2\u548c\u6027\u4e0e\u53cd\u4eb2\u548c\u6027\u7684\u4e00\u4e9b\u597d\u5904\u6709\uff1a

                                                                          • \u4eb2\u548c\u6027\u3001\u53cd\u4eb2\u548c\u6027\u8bed\u8a00\u7684\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002 nodeSelector \u53ea\u80fd\u9009\u62e9\u62e5\u6709\u6240\u6709\u6307\u5b9a\u6807\u7b7e\u7684\u8282\u70b9\u3002\u4eb2\u548c\u6027\u3001\u53cd\u4eb2\u548c\u6027\u4e3a\u60a8\u63d0\u4f9b\u5bf9\u9009\u62e9\u903b\u8f91\u7684\u66f4\u5f3a\u63a7\u5236\u80fd\u529b\u3002

                                                                          • \u60a8\u53ef\u4ee5\u6807\u660e\u67d0\u89c4\u5219\u662f\u201c\u8f6f\u9700\u6c42\u201d\u6216\u8005\u201c\u504f\u597d\u201d\uff0c\u8fd9\u6837\u8c03\u5ea6\u5668\u5728\u65e0\u6cd5\u627e\u5230\u5339\u914d\u8282\u70b9\u65f6\uff0c\u4f1a\u5ffd\u7565\u4eb2\u548c\u6027/\u53cd\u4eb2\u548c\u6027\u89c4\u5219\uff0c\u786e\u4fdd Pod \u8c03\u5ea6\u6210\u529f\u3002

                                                                          • \u60a8\u53ef\u4ee5\u4f7f\u7528\u8282\u70b9\u4e0a\uff08\u6216\u5176\u4ed6\u62d3\u6251\u57df\u4e2d\uff09\u8fd0\u884c\u7684\u5176\u4ed6 Pod \u7684\u6807\u7b7e\u6765\u5b9e\u65bd\u8c03\u5ea6\u7ea6\u675f\uff0c\u800c\u4e0d\u662f\u53ea\u80fd\u4f7f\u7528\u8282\u70b9\u672c\u8eab\u7684\u6807\u7b7e\u3002\u8fd9\u4e2a\u80fd\u529b\u8ba9\u60a8\u80fd\u591f\u5b9a\u4e49\u89c4\u5219\u5141\u8bb8\u54ea\u4e9b Pod \u53ef\u4ee5\u88ab\u653e\u7f6e\u5728\u4e00\u8d77\u3002

                                                                          \u60a8\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e\u4eb2\u548c\uff08affinity\uff09\u4e0e\u53cd\u4eb2\u548c\uff08anti-affinity\uff09\u6765\u9009\u62e9 Pod \u8981\u90e8\u7f72\u7684\u8282\u70b9\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_2","title":"\u5bb9\u5fcd\u65f6\u95f4","text":"

                                                                          \u5f53\u5de5\u4f5c\u8d1f\u8f7d\u5b9e\u4f8b\u6240\u5728\u7684\u8282\u70b9\u4e0d\u53ef\u7528\u65f6\uff0c\u7cfb\u7edf\u5c06\u5b9e\u4f8b\u91cd\u65b0\u8c03\u5ea6\u5230\u5176\u5b83\u53ef\u7528\u8282\u70b9\u7684\u65f6\u95f4\u7a97\u3002\u9ed8\u8ba4\u4e3a 300 \u79d2\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#nodeaffinity","title":"\u8282\u70b9\u4eb2\u548c\u6027\uff08nodeAffinity\uff09","text":"

                                                                          \u8282\u70b9\u4eb2\u548c\u6027\u6982\u5ff5\u4e0a\u7c7b\u4f3c\u4e8e nodeSelector \uff0c \u5b83\u4f7f\u60a8\u53ef\u4ee5\u6839\u636e\u8282\u70b9\u4e0a\u7684\u6807\u7b7e\u6765\u7ea6\u675f Pod \u53ef\u4ee5\u8c03\u5ea6\u5230\u54ea\u4e9b\u8282\u70b9\u4e0a\u3002 \u8282\u70b9\u4eb2\u548c\u6027\u6709\u4e24\u79cd\uff1a

                                                                          • \u5fc5\u987b\u6ee1\u8db3\uff1a\uff08 requiredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u53ea\u6709\u5728\u89c4\u5219\u88ab\u6ee1\u8db3\u7684\u65f6\u5019\u624d\u80fd\u6267\u884c\u8c03\u5ea6\u3002\u6b64\u529f\u80fd\u7c7b\u4f3c\u4e8e nodeSelector \uff0c \u4f46\u5176\u8bed\u6cd5\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002\u60a8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002

                                                                          • \u5c3d\u91cf\u6ee1\u8db3\uff1a\uff08 preferredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u4f1a\u5c1d\u8bd5\u5bfb\u627e\u6ee1\u8db3\u5bf9\u5e94\u89c4\u5219\u7684\u8282\u70b9\u3002\u5982\u679c\u627e\u4e0d\u5230\u5339\u914d\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u5668\u4ecd\u7136\u4f1a\u8c03\u5ea6\u8be5 Pod\u3002\u60a8\u8fd8\u53ef\u4e3a\u8f6f\u7ea6\u675f\u89c4\u5219\u8bbe\u5b9a\u6743\u91cd\uff0c\u5177\u4f53\u8c03\u5ea6\u65f6\uff0c\u82e5\u5b58\u5728\u591a\u4e2a\u7b26\u5408\u6761\u4ef6\u7684\u8282\u70b9\uff0c\u6743\u91cd\u6700\u5927\u7684\u8282\u70b9\u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002\u540c\u65f6\u60a8\u8fd8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_3","title":"\u6807\u7b7e\u540d","text":"

                                                                          \u5bf9\u5e94\u8282\u70b9\u7684\u6807\u7b7e\uff0c\u53ef\u4ee5\u4f7f\u7528\u9ed8\u8ba4\u7684\u6807\u7b7e\u4e5f\u53ef\u4ee5\u7528\u6237\u81ea\u5b9a\u4e49\u6807\u7b7e\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_4","title":"\u64cd\u4f5c\u7b26","text":"
                                                                          • In\uff1a\u6807\u7b7e\u503c\u9700\u8981\u5728 values \u7684\u5217\u8868\u4e2d
                                                                          • NotIn\uff1a\u6807\u7b7e\u7684\u503c\u4e0d\u5728\u67d0\u4e2a\u5217\u8868\u4e2d
                                                                          • Exists\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                                                                          • DoesNotExist\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u4e0d\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                                                                          • Gt\uff1a\u6807\u7b7e\u7684\u503c\u5927\u4e8e\u67d0\u4e2a\u503c\uff08\u5b57\u7b26\u4e32\u6bd4\u8f83\uff09
                                                                          • Lt\uff1a\u6807\u7b7e\u7684\u503c\u5c0f\u4e8e\u67d0\u4e2a\u503c\uff08\u5b57\u7b26\u4e32\u6bd4\u8f83\uff09
                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_5","title":"\u6743\u91cd","text":"

                                                                          \u4ec5\u652f\u6301\u5728\u201c\u5c3d\u91cf\u6ee1\u8db3\u201d\u7b56\u7565\u4e2d\u6dfb\u52a0\uff0c\u53ef\u4ee5\u7406\u89e3\u4e3a\u8c03\u5ea6\u7684\u4f18\u5148\u7ea7\uff0c\u6743\u91cd\u5927\u7684\u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002\u53d6\u503c\u8303\u56f4\u662f 1 \u5230 100\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_6","title":"\u5de5\u4f5c\u8d1f\u8f7d\u4eb2\u548c\u6027","text":"

                                                                          \u4e0e\u8282\u70b9\u4eb2\u548c\u6027\u7c7b\u4f3c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u7684\u4eb2\u548c\u6027\u4e5f\u6709\u4e24\u79cd\u7c7b\u578b\uff1a

                                                                          • \u5fc5\u987b\u6ee1\u8db3\uff1a\uff08 requiredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u53ea\u6709\u5728\u89c4\u5219\u88ab\u6ee1\u8db3\u7684\u65f6\u5019\u624d\u80fd\u6267\u884c\u8c03\u5ea6\u3002\u6b64\u529f\u80fd\u7c7b\u4f3c\u4e8e nodeSelector \uff0c \u4f46\u5176\u8bed\u6cd5\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002\u60a8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002
                                                                          • \u5c3d\u91cf\u6ee1\u8db3\uff1a\uff08 preferredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u4f1a\u5c1d\u8bd5\u5bfb\u627e\u6ee1\u8db3\u5bf9\u5e94\u89c4\u5219\u7684\u8282\u70b9\u3002\u5982\u679c\u627e\u4e0d\u5230\u5339\u914d\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u5668\u4ecd\u7136\u4f1a\u8c03\u5ea6\u8be5 Pod\u3002\u60a8\u8fd8\u53ef\u4e3a\u8f6f\u7ea6\u675f\u89c4\u5219\u8bbe\u5b9a\u6743\u91cd\uff0c\u5177\u4f53\u8c03\u5ea6\u65f6\uff0c\u82e5\u5b58\u5728\u591a\u4e2a\u7b26\u5408\u6761\u4ef6\u7684\u8282\u70b9\uff0c\u6743\u91cd\u6700\u5927\u7684\u8282\u70b9\u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002\u540c\u65f6\u60a8\u8fd8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002

                                                                          \u5de5\u4f5c\u8d1f\u8f7d\u7684\u4eb2\u548c\u6027\u4e3b\u8981\u7528\u6765\u51b3\u5b9a\u5de5\u4f5c\u8d1f\u8f7d\u7684 Pod \u53ef\u4ee5\u548c\u54ea\u4e9b Pod\u90e8 \u7f72\u5728\u540c\u4e00\u62d3\u6251\u57df\u3002\u4f8b\u5982\uff0c\u5bf9\u4e8e\u76f8\u4e92\u901a\u4fe1\u7684\u670d\u52a1\uff0c\u53ef\u901a\u8fc7\u5e94\u7528\u4eb2\u548c\u6027\u8c03\u5ea6\uff0c\u5c06\u5176\u90e8\u7f72\u5230\u540c\u4e00\u62d3\u6251\u57df\uff08\u5982\u540c\u4e00\u53ef\u7528\u533a\uff09\u4e2d\uff0c\u51cf\u5c11\u5b83\u4eec\u4e4b\u95f4\u7684\u7f51\u7edc\u5ef6\u8fdf\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_7","title":"\u6807\u7b7e\u540d","text":"

                                                                          \u5bf9\u5e94\u8282\u70b9\u7684\u6807\u7b7e\uff0c\u53ef\u4ee5\u4f7f\u7528\u9ed8\u8ba4\u7684\u6807\u7b7e\u4e5f\u53ef\u4ee5\u7528\u6237\u81ea\u5b9a\u4e49\u6807\u7b7e\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_8","title":"\u547d\u540d\u7a7a\u95f4","text":"

                                                                          \u6307\u5b9a\u8c03\u5ea6\u7b56\u7565\u751f\u6548\u7684\u547d\u540d\u7a7a\u95f4\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_9","title":"\u64cd\u4f5c\u7b26","text":"
                                                                          • In\uff1a\u6807\u7b7e\u503c\u9700\u8981\u5728 values \u7684\u5217\u8868\u4e2d
                                                                          • NotIn\uff1a\u6807\u7b7e\u7684\u503c\u4e0d\u5728\u67d0\u4e2a\u5217\u8868\u4e2d
                                                                          • Exists\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                                                                          • DoesNotExist\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u4e0d\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_10","title":"\u62d3\u6251\u57df","text":"

                                                                          \u6307\u5b9a\u8c03\u5ea6\u65f6\u7684\u5f71\u54cd\u8303\u56f4\u3002\u4f8b\u5982\uff0c\u5982\u679c\u6307\u5b9a\u4e3a kubernetes.io/Clustername \u8868\u793a\u4ee5 Node \u8282\u70b9\u4e3a\u533a\u5206\u8303\u56f4\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_11","title":"\u5de5\u4f5c\u8d1f\u8f7d\u53cd\u4eb2\u548c\u6027","text":"

                                                                          \u4e0e\u8282\u70b9\u4eb2\u548c\u6027\u7c7b\u4f3c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u7684\u53cd\u4eb2\u548c\u6027\u4e5f\u6709\u4e24\u79cd\u7c7b\u578b\uff1a

                                                                          • \u5fc5\u987b\u6ee1\u8db3\uff1a\uff08 requiredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u53ea\u6709\u5728\u89c4\u5219\u88ab\u6ee1\u8db3\u7684\u65f6\u5019\u624d\u80fd\u6267\u884c\u8c03\u5ea6\u3002\u6b64\u529f\u80fd\u7c7b\u4f3c\u4e8e nodeSelector \uff0c \u4f46\u5176\u8bed\u6cd5\u8868\u8fbe\u80fd\u529b\u66f4\u5f3a\u3002\u60a8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002
                                                                          • \u5c3d\u91cf\u6ee1\u8db3\uff1a\uff08 preferredDuringSchedulingIgnoredDuringExecution \uff09 \u8c03\u5ea6\u5668\u4f1a\u5c1d\u8bd5\u5bfb\u627e\u6ee1\u8db3\u5bf9\u5e94\u89c4\u5219\u7684\u8282\u70b9\u3002\u5982\u679c\u627e\u4e0d\u5230\u5339\u914d\u7684\u8282\u70b9\uff0c\u8c03\u5ea6\u5668\u4ecd\u7136\u4f1a\u8c03\u5ea6\u8be5 Pod\u3002\u60a8\u8fd8\u53ef\u4e3a\u8f6f\u7ea6\u675f\u89c4\u5219\u8bbe\u5b9a\u6743\u91cd\uff0c\u5177\u4f53\u8c03\u5ea6\u65f6\uff0c\u82e5\u5b58\u5728\u591a\u4e2a\u7b26\u5408\u6761\u4ef6\u7684\u8282\u70b9\uff0c\u6743\u91cd\u6700\u5927\u7684\u8282\u70b9\u4f1a\u88ab\u4f18\u5148\u8c03\u5ea6\u3002\u540c\u65f6\u60a8\u8fd8\u53ef\u4ee5\u5b9a\u4e49\u591a\u6761\u786c\u7ea6\u675f\u89c4\u5219\uff0c\u4f46\u53ea\u9700\u6ee1\u8db3\u5176\u4e2d\u4e00\u6761\u3002

                                                                          \u5de5\u4f5c\u8d1f\u8f7d\u7684\u53cd\u4eb2\u548c\u6027\u4e3b\u8981\u7528\u6765\u51b3\u5b9a\u5de5\u4f5c\u8d1f\u8f7d\u7684 Pod \u4e0d\u53ef\u4ee5\u548c\u54ea\u4e9b Pod \u90e8\u7f72\u5728\u540c\u4e00\u62d3\u6251\u57df\u3002\u4f8b\u5982\uff0c\u5c06\u4e00\u4e2a\u8d1f\u8f7d\u7684\u76f8\u540c Pod \u5206\u6563\u90e8\u7f72\u5230\u4e0d\u540c\u7684\u62d3\u6251\u57df\uff08\u4f8b\u5982\u4e0d\u540c\u4e3b\u673a\uff09\u4e2d\uff0c\u63d0\u9ad8\u8d1f\u8f7d\u672c\u8eab\u7684\u7a33\u5b9a\u6027\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_12","title":"\u6807\u7b7e\u540d","text":"

                                                                          \u5bf9\u5e94\u8282\u70b9\u7684\u6807\u7b7e\uff0c\u53ef\u4ee5\u4f7f\u7528\u9ed8\u8ba4\u7684\u6807\u7b7e\u4e5f\u53ef\u4ee5\u7528\u6237\u81ea\u5b9a\u4e49\u6807\u7b7e\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_13","title":"\u547d\u540d\u7a7a\u95f4","text":"

                                                                          \u6307\u5b9a\u8c03\u5ea6\u7b56\u7565\u751f\u6548\u7684\u547d\u540d\u7a7a\u95f4\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_14","title":"\u64cd\u4f5c\u7b26","text":"
                                                                          • In\uff1a\u6807\u7b7e\u503c\u9700\u8981\u5728 values \u7684\u5217\u8868\u4e2d
                                                                          • NotIn\uff1a\u6807\u7b7e\u7684\u503c\u4e0d\u5728\u67d0\u4e2a\u5217\u8868\u4e2d
                                                                          • Exists\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                                                                          • DoesNotExist\uff1a\u5224\u65ad\u67d0\u4e2a\u6807\u7b7e\u662f\u4e0d\u5b58\u5728\uff0c\u65e0\u9700\u8bbe\u7f6e\u6807\u7b7e\u503c
                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/scheduling-policy.html#_15","title":"\u62d3\u6251\u57df","text":"

                                                                          \u6307\u5b9a\u8c03\u5ea6\u65f6\u7684\u5f71\u54cd\u8303\u56f4\u3002\u4f8b\u5982\uff0c\u5982\u679c\u6307\u5b9a\u4e3a kubernetes.io/Clustername \u8868\u793a\u4ee5 Node \u8282\u70b9\u4e3a\u533a\u5206\u8303\u56f4\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/workload-status.html","title":"\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001","text":"

                                                                          \u5de5\u4f5c\u8d1f\u8f7d\u662f\u8fd0\u884c\u5728 Kubernetes \u4e0a\u7684\u4e00\u4e2a\u5e94\u7528\u7a0b\u5e8f\uff0c\u5728 Kubernetes \u4e2d\uff0c\u65e0\u8bba\u60a8\u7684\u5e94\u7528\u7a0b\u5e8f\u662f\u7531\u5355\u4e2a\u540c\u4e00\u7ec4\u4ef6\u6216\u662f\u7531\u591a\u4e2a\u4e0d\u540c\u7684\u7ec4\u4ef6\u6784\u6210\uff0c\u90fd\u53ef\u4ee5\u4f7f\u7528\u4e00\u7ec4 Pod \u6765\u8fd0\u884c\u5b83\u3002Kubernetes \u63d0\u4f9b\u4e86\u4e94\u79cd\u5185\u7f6e\u7684\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u6765\u7ba1\u7406 Pod\uff1a

                                                                          • \u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d
                                                                          • \u6709\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d
                                                                          • \u5b88\u62a4\u8fdb\u7a0b
                                                                          • \u4efb\u52a1
                                                                          • \u5b9a\u65f6\u4efb\u52a1

                                                                          \u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u8bbe\u7f6e\u81ea\u5b9a\u4e49\u8d44\u6e90 CRD \u6765\u5b9e\u73b0\u5bf9\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u7684\u6269\u5c55\u3002\u5728\u7b2c\u4e94\u4ee3\u5bb9\u5668\u7ba1\u7406\u4e2d\uff0c\u652f\u6301\u5bf9\u5de5\u4f5c\u8d1f\u8f7d\u8fdb\u884c\u521b\u5efa\u3001\u66f4\u65b0\u3001\u6269\u5bb9\u3001\u76d1\u63a7\u3001\u65e5\u5fd7\u3001\u5220\u9664\u3001\u7248\u672c\u7ba1\u7406\u7b49\u5168\u751f\u547d\u5468\u671f\u7ba1\u7406\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/workload-status.html#pod","title":"Pod \u72b6\u6001","text":"

                                                                          Pod \u662f Kuberneters \u4e2d\u521b\u5efa\u548c\u7ba1\u7406\u7684\u3001\u6700\u5c0f\u7684\u8ba1\u7b97\u5355\u5143\uff0c\u5373\u4e00\u7ec4\u5bb9\u5668\u7684\u96c6\u5408\u3002\u8fd9\u4e9b\u5bb9\u5668\u5171\u4eab\u5b58\u50a8\u3001\u7f51\u7edc\u4ee5\u53ca\u7ba1\u7406\u63a7\u5236\u5bb9\u5668\u8fd0\u884c\u65b9\u5f0f\u7684\u7b56\u7565\u3002 Pod \u901a\u5e38\u4e0d\u7531\u7528\u6237\u76f4\u63a5\u521b\u5efa\uff0c\u800c\u662f\u901a\u8fc7\u5de5\u4f5c\u8d1f\u8f7d\u8d44\u6e90\u6765\u521b\u5efa\u3002 Pod \u9075\u5faa\u4e00\u4e2a\u9884\u5b9a\u4e49\u7684\u751f\u547d\u5468\u671f\uff0c\u8d77\u59cb\u4e8e Pending \u9636\u6bb5\uff0c\u5982\u679c\u81f3\u5c11\u5176\u4e2d\u6709\u4e00\u4e2a\u4e3b\u8981\u5bb9\u5668\u6b63\u5e38\u542f\u52a8\uff0c\u5219\u8fdb\u5165 Running \uff0c\u4e4b\u540e\u53d6\u51b3\u4e8e Pod \u4e2d\u662f\u5426\u6709\u5bb9\u5668\u4ee5\u5931\u8d25\u72b6\u6001\u7ed3\u675f\u800c\u8fdb\u5165 Succeeded \u6216\u8005 Failed \u9636\u6bb5\u3002

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/workload-status.html#_2","title":"\u5de5\u4f5c\u8d1f\u8f7d\u72b6\u6001","text":"

                                                                          \u7b2c\u4e94\u4ee3\u5bb9\u5668\u7ba1\u7406\u6a21\u5757\u4f9d\u636e Pod \u7684\u72b6\u6001\u3001\u526f\u672c\u6570\u7b49\u56e0\u7d20\uff0c\u8bbe\u8ba1\u4e86\u4e00\u79cd\u5185\u7f6e\u7684\u5de5\u4f5c\u8d1f\u8f7d\u751f\u547d\u5468\u671f\u7684\u72b6\u6001\u96c6\uff0c\u4ee5\u8ba9\u7528\u6237\u80fd\u591f\u66f4\u52a0\u771f\u5b9e\u7684\u611f\u77e5\u5de5\u4f5c\u8d1f\u8f7d\u8fd0\u884c\u60c5\u51b5\u3002 \u7531\u4e8e\u4e0d\u540c\u7684\u5de5\u4f5c\u8d1f\u8f7d\u7c7b\u578b\uff08\u6bd4\u5982\u65e0\u72b6\u6001\u5de5\u4f5c\u8d1f\u8f7d\u548c\u4efb\u52a1\uff09\u5bf9 Pod \u7684\u7ba1\u7406\u673a\u5236\u4e0d\u4e00\u81f4\uff0c\u56e0\u6b64\uff0c\u4e0d\u540c\u7684\u5de5\u4f5c\u8d1f\u8f7d\u5728\u8fd0\u884c\u8fc7\u7a0b\u4e2d\u4f1a\u5448\u73b0\u4e0d\u540c\u7684\u751f\u547d\u5468\u671f\u72b6\u6001\uff0c\u5177\u4f53\u5982\u4e0b\u8868\uff1a

                                                                          "},{"location":"end-user/kpanda/workloads/pod-config/workload-status.html#_3","title":"\u65e0\u72b6\u6001\u8d1f\u8f7d\u3001\u6709\u72b6\u6001\u8d1f\u8f7d\u3001\u5b88\u62a4\u8fdb\u7a0b\u72b6\u6001","text":"\u72b6\u6001 \u63cf\u8ff0 \u7b49\u5f85\u4e2d 1. \u5de5\u4f5c\u8d1f\u8f7d\u521b\u5efa\u6b63\u5728\u8fdb\u884c\u4e2d\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u30022. \u89e6\u53d1\u5347\u7ea7\u6216\u8005\u56de\u6eda\u52a8\u4f5c\u540e\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u30023. \u89e6\u53d1\u6682\u505c/\u6269\u7f29\u5bb9\u7b49\u64cd\u4f5c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u5728\u6b64\u72b6\u6001\u3002 \u8fd0\u884c\u4e2d \u8d1f\u8f7d\u4e0b\u7684\u6240\u6709\u5b9e\u4f8b\u90fd\u5728\u8fd0\u884c\u4e2d\u4e14\u526f\u672c\u6570\u4e0e\u7528\u6237\u9884\u5b9a\u4e49\u7684\u6570\u91cf\u4e00\u81f4\u65f6\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5220\u9664\u4e2d \u6267\u884c\u5220\u9664\u64cd\u4f5c\u65f6\uff0c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\uff0c\u76f4\u5230\u5220\u9664\u5b8c\u6210\u3002 \u5f02\u5e38 \u56e0\u4e3a\u67d0\u4e9b\u539f\u56e0\u65e0\u6cd5\u53d6\u5f97\u5de5\u4f5c\u8d1f\u8f7d\u7684\u72b6\u6001\u3002\u8fd9\u79cd\u60c5\u51b5\u901a\u5e38\u662f\u56e0\u4e3a\u4e0e Pod \u6240\u5728\u4e3b\u673a\u901a\u4fe1\u5931\u8d25\u3002 \u672a\u5c31\u7eea \u5bb9\u5668\u5904\u4e8e\u5f02\u5e38\uff0cpending \u72b6\u6001\u65f6\uff0c\u56e0\u672a\u77e5\u9519\u8bef\u5bfc\u81f4\u8d1f\u8f7d\u65e0\u6cd5\u542f\u52a8\u65f6\u663e\u793a\u6b64\u72b6\u6001"},{"location":"end-user/kpanda/workloads/pod-config/workload-status.html#_4","title":"\u4efb\u52a1\u72b6\u6001","text":"\u72b6\u6001 \u63cf\u8ff0 \u7b49\u5f85\u4e2d \u4efb\u52a1\u521b\u5efa\u6b63\u5728\u8fdb\u884c\u4e2d\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u6267\u884c\u4e2d \u4efb\u52a1\u6b63\u5728\u6267\u884c\u4e2d\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u6267\u884c\u5b8c\u6210 \u4efb\u52a1\u6267\u884c\u5b8c\u6210\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5220\u9664\u4e2d \u89e6\u53d1\u5220\u9664\u64cd\u4f5c\uff0c\u5de5\u4f5c\u8d1f\u8f7d\u5904\u5728\u6b64\u72b6\u6001\u3002 \u5f02\u5e38 \u56e0\u4e3a\u67d0\u4e9b\u539f\u56e0\u65e0\u6cd5\u53d6\u5f97 Pod \u7684\u72b6\u6001\u3002\u8fd9\u79cd\u60c5\u51b5\u901a\u5e38\u662f\u56e0\u4e3a\u4e0e Pod \u6240\u5728\u4e3b\u673a\u901a\u4fe1\u5931\u8d25\u3002"},{"location":"end-user/kpanda/workloads/pod-config/workload-status.html#_5","title":"\u5b9a\u65f6\u4efb\u52a1\u72b6\u6001","text":"\u72b6\u6001 \u63cf\u8ff0 \u7b49\u5f85\u4e2d \u5b9a\u65f6\u4efb\u52a1\u521b\u5efa\u6b63\u5728\u8fdb\u884c\u4e2d\uff0c\u5b9a\u65f6\u4efb\u52a1\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5df2\u542f\u52a8 \u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\u6210\u529f\u540e\uff0c\u6b63\u5e38\u8fd0\u884c\u6216\u5c06\u5df2\u6682\u505c\u7684\u4efb\u52a1\u542f\u52a8\u65f6\u5b9a\u65f6\u4efb\u52a1\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5df2\u505c\u6b62 \u6267\u884c\u505c\u6b62\u4efb\u52a1\u64cd\u4f5c\u65f6\uff0c\u5b9a\u65f6\u4efb\u52a1\u5904\u4e8e\u6b64\u72b6\u6001\u3002 \u5220\u9664\u4e2d \u89e6\u53d1\u5220\u9664\u64cd\u4f5c\uff0c\u5b9a\u65f6\u4efb\u52a1\u5904\u5728\u6b64\u72b6\u6001\u3002

                                                                          \u5f53\u5de5\u4f5c\u8d1f\u8f7d\u5904\u4e8e\u5f02\u5e38\u6216\u672a\u5c31\u7eea\u72b6\u6001\u65f6\uff0c\u60a8\u53ef\u4ee5\u901a\u8fc7\u5c06\u9f20\u6807\u79fb\u52a8\u5230\u8d1f\u8f7d\u7684\u72b6\u6001\u503c\u4e0a\uff0c\u7cfb\u7edf\u5c06\u901a\u8fc7\u63d0\u793a\u6846\u5c55\u793a\u66f4\u52a0\u8be6\u7ec6\u7684\u9519\u8bef\u4fe1\u606f\u3002\u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u67e5\u770b\u65e5\u5fd7\u6216\u4e8b\u4ef6\u6765\u83b7\u53d6\u5de5\u4f5c\u8d1f\u8f7d\u7684\u76f8\u5173\u8fd0\u884c\u4fe1\u606f\u3002

                                                                          "},{"location":"end-user/register/index.html","title":"\u7528\u6237\u6ce8\u518c","text":"

                                                                          \u65b0\u7528\u6237\u9996\u6b21\u4f7f\u7528 AI \u7b97\u529b\u5e73\u53f0\u9700\u8981\u8fdb\u884c\u6ce8\u518c\u3002

                                                                          "},{"location":"end-user/register/index.html#_2","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                                          • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                                                          • \u5df2\u5f00\u542f\u90ae\u7bb1\u6ce8\u518c\u529f\u80fd
                                                                          • \u6709\u4e00\u4e2a\u53ef\u7528\u7684\u90ae\u7bb1
                                                                          "},{"location":"end-user/register/index.html#_3","title":"\u90ae\u7bb1\u6ce8\u518c\u6b65\u9aa4","text":"
                                                                          1. \u6253\u5f00 AI \u7b97\u529b\u5e73\u53f0\u9996\u9875 https://ai.isuanova.com/\uff0c\u70b9\u51fb \u6ce8\u518c

                                                                          2. \u952e\u5165\u7528\u6237\u540d\u3001\u5bc6\u7801\u3001\u90ae\u7bb1\u540e\u70b9\u51fb \u6ce8\u518c

                                                                          3. \u7cfb\u7edf\u63d0\u793a\u53d1\u9001\u4e86\u4e00\u5c01\u90ae\u4ef6\u5230\u60a8\u7684\u90ae\u7bb1\u3002

                                                                          4. \u767b\u5f55\u81ea\u5df1\u7684\u90ae\u7bb1\uff0c\u627e\u5230\u90ae\u4ef6\uff0c\u70b9\u51fb\u94fe\u63a5\u3002

                                                                          5. \u606d\u559c\uff0c\u60a8\u6210\u529f\u8fdb\u5165\u4e86 AI \u7b97\u529b\u5e73\u53f0\uff0c\u73b0\u5728\u53ef\u4ee5\u5f00\u59cb\u60a8\u7684 AI \u4e4b\u65c5\u4e86\u3002

                                                                          "},{"location":"end-user/share/notebook.html","title":"\u4f7f\u7528 Notebook","text":"

                                                                          Notebook \u901a\u5e38\u6307\u7684\u662f Jupyter Notebook \u6216\u7c7b\u4f3c\u7684\u4ea4\u4e92\u5f0f\u8ba1\u7b97\u73af\u5883\u3002 \u8fd9\u662f\u4e00\u79cd\u975e\u5e38\u6d41\u884c\u7684\u5de5\u5177\uff0c\u5e7f\u6cdb\u7528\u4e8e\u6570\u636e\u79d1\u5b66\u3001\u673a\u5668\u5b66\u4e60\u548c\u6df1\u5ea6\u5b66\u4e60\u7b49\u9886\u57df\u3002 \u672c\u9875\u8bf4\u660e\u5982\u4f55\u5728\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0\u4e2d\u4f7f\u7528 Notebook\u3002

                                                                          "},{"location":"end-user/share/notebook.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                                          • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                                                          • \u7528\u6237\u5df2\u6210\u529f\u6ce8\u518c
                                                                          • \u7ba1\u7406\u5458\u4e3a\u7528\u6237\u5206\u914d\u4e86\u5de5\u4f5c\u7a7a\u95f4
                                                                          • \u5df2\u51c6\u5907\u597d\u6570\u636e\u96c6\uff08\u4ee3\u7801\u3001\u6570\u636e\u7b49\uff09
                                                                          "},{"location":"end-user/share/notebook.html#notebook_1","title":"\u521b\u5efa\u548c\u4f7f\u7528 Notebook \u5b9e\u4f8b","text":"
                                                                          1. \u4ee5 \u7ba1\u7406\u5458\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                                                                          2. \u5bfc\u822a\u81f3 AI Lab -> \u8fd0\u7ef4\u7ba1\u7406 -> \u961f\u5217\u7ba1\u7406 \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae

                                                                          3. \u952e\u5165\u540d\u79f0\uff0c\u9009\u62e9\u96c6\u7fa4\u3001\u5de5\u4f5c\u7a7a\u95f4\u548c\u914d\u989d\u540e\uff0c\u70b9\u51fb \u786e\u5b9a

                                                                          4. \u4ee5 \u7528\u6237\u8eab\u4efd \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5bfc\u822a\u81f3 AI Lab -> Notebook \uff0c\u70b9\u51fb\u53f3\u4fa7\u7684 \u521b\u5efa \u6309\u94ae

                                                                          5. \u914d\u7f6e\u5404\u9879\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a

                                                                            \u57fa\u672c\u4fe1\u606f\u8d44\u6e90\u914d\u7f6e\u9ad8\u7ea7\u914d\u7f6e

                                                                            \u952e\u5165\u540d\u79f0\uff0c\u9009\u62e9\u96c6\u7fa4\u3001\u547d\u540d\u7a7a\u95f4\uff0c\u9009\u62e9\u521a\u521b\u5efa\u7684\u961f\u5217\uff0c\u70b9\u51fb \u4e00\u952e\u521d\u59cb\u5316

                                                                            \u9009\u62e9 Notebook \u7c7b\u578b\uff0c\u914d\u7f6e\u5185\u5b58\u3001CPU\uff0c\u5f00\u542f GPU\uff0c\u521b\u5efa\u548c\u914d\u7f6e PVC\uff1a

                                                                            \u5f00\u542f SSH \u5916\u7f51\u8bbf\u95ee\uff1a

                                                                          6. \u81ea\u52a8\u8df3\u8f6c\u5230 Notebook \u5b9e\u4f8b\u5217\u8868\uff0c\u70b9\u51fb\u5b9e\u4f8b\u540d\u79f0

                                                                          7. \u8fdb\u5165 Notebook \u5b9e\u4f8b\u8be6\u60c5\u9875\uff0c\u70b9\u51fb\u53f3\u4e0a\u89d2\u7684 \u6253\u5f00 \u6309\u94ae

                                                                          8. \u8fdb\u5165\u4e86 Notebook \u5f00\u53d1\u73af\u5883\uff0c\u6bd4\u5982\u5728 /home/jovyan \u76ee\u5f55\u6302\u8f7d\u4e86\u6301\u4e45\u5377\uff0c\u53ef\u4ee5\u901a\u8fc7 git \u514b\u9686\u4ee3\u7801\uff0c\u901a\u8fc7 SSH \u8fde\u63a5\u540e\u4e0a\u4f20\u6570\u636e\u7b49\u3002

                                                                          "},{"location":"end-user/share/notebook.html#ssh-notebook","title":"\u901a\u8fc7 SSH \u8bbf\u95ee Notebook \u5b9e\u4f8b","text":"
                                                                          1. \u5728\u81ea\u5df1\u7684\u7535\u8111\u4e0a\u751f\u6210 SSH \u5bc6\u94a5\u5bf9

                                                                            \u5728\u81ea\u5df1\u7535\u8111\u4e0a\u6253\u5f00\u547d\u4ee4\u884c\uff0c\u6bd4\u5982\u5728 Windows \u4e0a\u6253\u5f00 git bash\uff0c\u8f93\u5165 ssh-keygen.exe -t rsa\uff0c\u7136\u540e\u4e00\u8def\u56de\u8f66\u3002

                                                                          2. \u901a\u8fc7 cat ~/.ssh/id_rsa.pub \u7b49\u547d\u4ee4\u67e5\u770b\u5e76\u590d\u5236\u516c\u94a5

                                                                          3. \u4ee5\u7528\u6237\u8eab\u4efd\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5728\u53f3\u4e0a\u89d2\u70b9\u51fb \u4e2a\u4eba\u4e2d\u5fc3 -> SSH \u516c\u94a5 -> \u5bfc\u5165 SSH \u516c\u94a5

                                                                          4. \u8fdb\u5165 Notebook \u5b9e\u4f8b\u7684\u8be6\u60c5\u9875\uff0c\u590d\u5236 SSH \u7684\u94fe\u63a5

                                                                          5. \u5728\u5ba2\u6237\u7aef\u4f7f\u7528 SSH \u8bbf\u95ee Notebook \u5b9e\u4f8b

                                                                          \u4e0b\u4e00\u6b65\uff1a\u521b\u5efa\u8bad\u7ec3\u4efb\u52a1

                                                                          "},{"location":"end-user/share/workload.html","title":"\u521b\u5efa AI \u8d1f\u8f7d\u4f7f\u7528 GPU \u8d44\u6e90","text":"

                                                                          \u7ba1\u7406\u5458\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u5206\u914d\u8d44\u6e90\u914d\u989d\u540e\uff0c\u7528\u6237\u5c31\u53ef\u4ee5\u521b\u5efa AI \u5de5\u4f5c\u8d1f\u8f7d\u6765\u4f7f\u7528 GPU \u7b97\u529b\u8d44\u6e90\u3002

                                                                          "},{"location":"end-user/share/workload.html#_1","title":"\u524d\u7f6e\u6761\u4ef6","text":"
                                                                          • \u5df2\u5b89\u88c5 AI \u7b97\u529b\u5e73\u53f0
                                                                          • \u7528\u6237\u5df2\u6210\u529f\u6ce8\u518c
                                                                          • \u7ba1\u7406\u5458\u4e3a\u7528\u6237\u5206\u914d\u4e86\u5de5\u4f5c\u7a7a\u95f4
                                                                          • \u7ba1\u7406\u5458\u4e3a\u5de5\u4f5c\u7a7a\u95f4\u8bbe\u7f6e\u4e86\u8d44\u6e90\u914d\u989d
                                                                          • \u7ba1\u7406\u5458\u5df2\u7ecf\u4e3a\u7528\u6237\u5206\u914d\u4e86\u4e00\u4e2a\u96c6\u7fa4
                                                                          "},{"location":"end-user/share/workload.html#ai","title":"\u521b\u5efa AI \u8d1f\u8f7d\u6b65\u9aa4","text":"
                                                                          1. \u4ee5\u7528\u6237\u8eab\u4efd\u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0
                                                                          2. \u5bfc\u822a\u81f3 \u5bb9\u5668\u7ba1\u7406 \uff0c\u9009\u62e9\u4e00\u4e2a\u547d\u540d\u7a7a\u95f4\uff0c\u70b9\u51fb \u5de5\u4f5c\u8d1f\u8f7d -> \u65e0\u72b6\u6001\u8d1f\u8f7d \uff0c \u70b9\u51fb\u53f3\u4fa7\u7684 \u955c\u50cf\u521b\u5efa \u6309\u94ae

                                                                          3. \u914d\u7f6e\u5404\u9879\u53c2\u6570\u540e\u70b9\u51fb \u786e\u5b9a

                                                                            \u57fa\u672c\u4fe1\u606f\u5bb9\u5668\u914d\u7f6e\u5176\u4ed6

                                                                            \u9009\u62e9\u81ea\u5df1\u7684\u547d\u540d\u7a7a\u95f4\u3002

                                                                            \u8bbe\u7f6e\u955c\u50cf\uff0c\u914d\u7f6e CPU\u3001\u5185\u5b58\u3001GPU \u7b49\u8d44\u6e90\uff0c\u8bbe\u7f6e\u542f\u52a8\u547d\u4ee4\u3002

                                                                            \u670d\u52a1\u914d\u7f6e\u548c\u9ad8\u7ea7\u914d\u7f6e\u53ef\u4ee5\u4f7f\u7528\u9ed8\u8ba4\u914d\u7f6e\u3002

                                                                          4. \u81ea\u52a8\u8fd4\u56de\u65e0\u72b6\u6001\u8d1f\u8f7d\u5217\u8868\uff0c\u70b9\u51fb\u8d1f\u8f7d\u540d\u79f0

                                                                          5. \u8fdb\u5165\u8be6\u60c5\u9875\uff0c\u53ef\u4ee5\u770b\u5230 GPU \u914d\u989d

                                                                          6. \u4f60\u8fd8\u53ef\u4ee5\u8fdb\u5165\u63a7\u5236\u53f0\uff0c\u8fd0\u884c mx-smi \u547d\u4ee4\u67e5\u770b GPU \u8d44\u6e90

                                                                          \u4e0b\u4e00\u6b65\uff1a\u4f7f\u7528 Notebook

                                                                          "},{"location":"openapi/index.html","title":"OpenAPI \u6587\u6863","text":"

                                                                          \u8fd9\u662f\u9762\u5411\u5f00\u53d1\u8005\u7684\u4e00\u4e9b OpenAPI \u6587\u6863\u3002

                                                                          • \u4e91\u4e3b\u673a OpenAPI \u6587\u6863
                                                                          • AI Lab OpenAPI \u6587\u6863
                                                                          • \u5bb9\u5668\u7ba1\u7406 OpenAPI \u6587\u6863
                                                                          • \u53ef\u89c2\u6d4b\u6027 OpenAPI \u6587\u6863
                                                                          • \u5168\u5c40\u7ba1\u7406 OpenAPI \u6587\u6863
                                                                          "},{"location":"openapi/index.html#openapi_1","title":"\u83b7\u53d6 OpenAPI \u8bbf\u95ee\u5bc6\u94a5","text":"

                                                                          \u8bbf\u95ee\u5bc6\u94a5\uff08Access Key\uff09\u53ef\u7528\u4e8e\u8bbf\u95ee OpenAPI \u548c\u6301\u7eed\u53d1\u5e03\uff0c\u7528\u6237\u53ef\u5728\u4e2a\u4eba\u4e2d\u5fc3\u53c2\u7167\u4ee5\u4e0b\u6b65\u9aa4\u83b7\u53d6\u5bc6\u94a5\u5e76\u8bbf\u95ee API\u3002

                                                                          \u767b\u5f55 AI \u7b97\u529b\u5e73\u53f0\uff0c\u5728\u53f3\u4e0a\u89d2\u7684\u4e0b\u62c9\u83dc\u5355\u4e2d\u627e\u5230 \u4e2a\u4eba\u4e2d\u5fc3 \uff0c\u53ef\u4ee5\u5728 \u8bbf\u95ee\u5bc6\u94a5 \u9875\u9762\u7ba1\u7406\u8d26\u53f7\u7684\u8bbf\u95ee\u5bc6\u94a5\u3002

                                                                          Info

                                                                          \u8bbf\u95ee\u5bc6\u94a5\u4fe1\u606f\u4ec5\u663e\u793a\u4e00\u6b21\u3002\u5982\u679c\u60a8\u5fd8\u8bb0\u4e86\u8bbf\u95ee\u5bc6\u94a5\u4fe1\u606f\uff0c\u60a8\u9700\u8981\u91cd\u65b0\u521b\u5efa\u65b0\u7684\u8bbf\u95ee\u5bc6\u94a5\u3002

                                                                          "},{"location":"openapi/index.html#api","title":"\u4f7f\u7528\u5bc6\u94a5\u8bbf\u95ee API","text":"

                                                                          \u5728\u8bbf\u95ee\u7b97\u4e30 AI \u7b97\u529b\u5e73\u53f0openAPI \u65f6\uff0c\u5728\u8bf7\u6c42\u4e2d\u52a0\u4e0a\u8bf7\u6c42\u5934 Authorization:Bearer ${token} \u4ee5\u6807\u8bc6\u8bbf\u95ee\u8005\u7684\u8eab\u4efd\uff0c \u5176\u4e2d ${token} \u662f\u4e0a\u4e00\u6b65\u4e2d\u83b7\u53d6\u5230\u7684\u5bc6\u94a5\u3002

                                                                          \u8bf7\u6c42\u793a\u4f8b

                                                                          curl -X GET -H 'Authorization:Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRKVjlBTHRBLXZ4MmtQUC1TQnVGS0dCSWc1cnBfdkxiQVVqM2U3RVByWnMiLCJ0eXAiOiJKV1QifQ.eyJleHAiOjE2NjE0MTU5NjksImlhdCI6MTY2MDgxMTE2OSwiaXNzIjoiZ2hpcHBvLmlvIiwic3ViIjoiZjdjOGIxZjUtMTc2MS00NjYwLTg2MWQtOWI3MmI0MzJmNGViIiwicHJlZmVycmVkX3VzZXJuYW1lIjoiYWRtaW4iLCJncm91cHMiOltdfQ.RsUcrAYkQQ7C6BxMOrdD3qbBRUt0VVxynIGeq4wyIgye6R8Ma4cjxG5CbU1WyiHKpvIKJDJbeFQHro2euQyVde3ygA672ozkwLTnx3Tu-_mB1BubvWCBsDdUjIhCQfT39rk6EQozMjb-1X1sbLwzkfzKMls-oxkjagI_RFrYlTVPwT3Oaw-qOyulRSw7Dxd7jb0vINPq84vmlQIsI3UuTZSNO5BCgHpubcWwBss-Aon_DmYA-Et_-QtmPBA3k8E2hzDSzc7eqK0I68P25r9rwQ3DeKwD1dbRyndqWORRnz8TLEXSiCFXdZT2oiMrcJtO188Ph4eLGut1-4PzKhwgrQ' https://demo-dev.daocloud.io/apis/ghippo.io/v1alpha1/users?page=1&pageSize=10 -k\n

                                                                          \u8bf7\u6c42\u7ed3\u679c

                                                                          {\n    \"items\": [\n        {\n            \"id\": \"a7cfd010-ebbe-4601-987f-d098d9ef766e\",\n            \"name\": \"a\",\n            \"email\": \"\",\n            \"description\": \"\",\n            \"firstname\": \"\",\n            \"lastname\": \"\",\n            \"source\": \"locale\",\n            \"enabled\": true,\n            \"createdAt\": \"1660632794800\",\n            \"updatedAt\": \"0\",\n            \"lastLoginAt\": \"\"\n        }\n    ],\n    \"pagination\": {\n        \"page\": 1,\n        \"pageSize\": 10,\n        \"total\": 1\n    }\n}\n
                                                                          "},{"location":"openapi/baize/index.html","title":"AI Lab OpenAPI \u6587\u6863","text":""},{"location":"openapi/ghippo/index.html","title":"\u5168\u5c40\u7ba1\u7406 OpenAPI \u6587\u6863","text":""},{"location":"openapi/insight/index.html","title":"\u53ef\u89c2\u6d4b\u6027 OpenAPI \u6587\u6863","text":""},{"location":"openapi/kpanda/index.html","title":"\u5bb9\u5668\u7ba1\u7406 OpenAPI \u6587\u6863","text":""},{"location":"openapi/virtnest/index.html","title":"\u4e91\u4e3b\u673a OpenAPI \u6587\u6863","text":""},{"location":"stylesheets/tags.html","title":"Tags","text":"

                                                                          Following is a list of relevant tags:

                                                                          [TAGS]

                                                                          "},{"location":"en/index.html","title":"Suanova Website for AI Platform","text":"

                                                                          This is the website for the Suanova AI Platform.

                                                                          • User Manual: Develop AI algorithms, build training and inference jobs using cloud hosts in a containerized environment.
                                                                          • Administrator Manual: Ensure smooth and efficient operation of the platform for containerized end users.
                                                                          • Developer Manual: A compilation of OpenAPI manuals for five modules.

                                                                          "},{"location":"en/admin/index.html","title":"Suanova AI Platform - Administrator","text":"

                                                                          This is the operation and maintenance documentation for the Suanova AI Platform aimed at administrators.

                                                                          • Cloud Host

                                                                            A cloud host is a virtual machine deployed in the cloud.

                                                                            • Manage Cloud Hosts
                                                                            • Cloud Host vGPU
                                                                            • Cloud Host Templates
                                                                            • Import Cloud Hosts from VMWare
                                                                          • Container Management

                                                                            Manage K8s clusters, nodes, applications, resources, and permissions.

                                                                            • Create Cluster
                                                                            • Add Worker Nodes
                                                                            • Manage Helm Apps
                                                                            • HPA Horizontal Scaling
                                                                          • AI Lab

                                                                            Manage AI resources and queues.

                                                                            • Manage Resources
                                                                            • Manage Queues
                                                                            • Best Practices for AI Training and Deployment
                                                                            • AI Lab Troubleshooting
                                                                          • Insight

                                                                            Understand Insight resources, configuration, and troubleshooting.

                                                                            • Resource Planning Deployment
                                                                            • Install and Upgrade
                                                                            • Compatibility Test
                                                                            • Frequently Asked Questions
                                                                          • Global Management

                                                                            Control access permissions for users, user groups, workspaces, resources, etc.

                                                                            • Bind Workspace
                                                                            • Allocate Resources to Workspaces
                                                                            • Audit Logs
                                                                            • Platform Settings

                                                                          "},{"location":"en/admin/baize/best-practice/add-scheduler.html","title":"Add Job Scheduler","text":"

                                                                          AI Lab provides a job scheduler to help you better manage jobs. In addition to the basic scheduler, it also supports custom schedulers.

                                                                          "},{"location":"en/admin/baize/best-practice/add-scheduler.html#introduction-to-job-scheduler","title":"Introduction to Job Scheduler","text":"

                                                                          In Kubernetes, the job scheduler is responsible for deciding which node to assign a Pod to. It considers various factors such as resource requirements, hardware/software constraints, affinity/anti-affinity rules, and data locality.

                                                                          The default scheduler is a core component in a Kubernetes cluster that decides which node a Pod should run on. Let's delve into its working principles, features, and configuration methods.

                                                                          "},{"location":"en/admin/baize/best-practice/add-scheduler.html#scheduler-workflow","title":"Scheduler Workflow","text":"

                                                                          The workflow of the default scheduler can be divided into two main phases: filtering and scoring.

                                                                          "},{"location":"en/admin/baize/best-practice/add-scheduler.html#filtering-phase","title":"Filtering Phase","text":"

                                                                          The scheduler traverses all nodes and excludes those that do not meet the Pod's requirements, considering factors such as:

                                                                          • Resource requirements
                                                                          • Node selectors
                                                                          • Node affinity
                                                                          • Taints and tolerations

                                                                          These parameters can be set through advanced configurations when creating a job.

                                                                          "},{"location":"en/admin/baize/best-practice/add-scheduler.html#scoring-phase","title":"Scoring Phase","text":"

                                                                          The scheduler scores the nodes that passed the filtering phase and selects the highest-scoring node to run the Pod. Factors considered include:

                                                                          • Resource utilization
                                                                          • Pod affinity/anti-affinity
                                                                          • Node affinity
                                                                          "},{"location":"en/admin/baize/best-practice/add-scheduler.html#scheduler-plugins","title":"Scheduler Plugins","text":"

                                                                          In addition to basic job scheduling capabilities, we also support the use of Scheduler Plugins: Kubernetes SIG Scheduling, which maintains a set of scheduler plugins including Coscheduling (Gang Scheduling) and other features.

                                                                          "},{"location":"en/admin/baize/best-practice/add-scheduler.html#deploy-scheduler-plugins","title":"Deploy Scheduler Plugins","text":"

                                                                          To deploy a secondary scheduler plugin in a worker cluster, refer to Deploying Secondary Scheduler Plugin.

                                                                          "},{"location":"en/admin/baize/best-practice/add-scheduler.html#enable-scheduler-plugins-in-ai-lab","title":"Enable Scheduler Plugins in AI Lab","text":"

                                                                          Danger

                                                                          Improper operations when adding scheduler plugins may affect the stability of the entire cluster. It is recommended to test in a test environment or contact our technical support team.

                                                                          Note that if you wish to use more scheduler plugins in training jobs, you need to manually install them successfully in the worker cluster first. Then, when deploying the baize-agent in the cluster, add the proper scheduler plugin configuration.

                                                                          Through the container management UI provided by Helm Apps , you can easily deploy scheduler plugins in the cluster.

                                                                          Then, click Install in the top right corner. (If the baize-agent has already been deployed, you can update it in the Helm App list.) Add the scheduler.

                                                                          Note the parameter hierarchy of the scheduler. After adding, click OK .

                                                                          Note: Do not omit this configuration when updating the baize-agent in the future.

                                                                          "},{"location":"en/admin/baize/best-practice/add-scheduler.html#specify-scheduler-when-creating-a-job","title":"Specify Scheduler When Creating a Job","text":"

                                                                          Once you have successfully deployed the proper scheduler plugin in the cluster and correctly added the proper scheduler configuration in the baize-agent, you can specify the scheduler when creating a job.

                                                                          If everything is set up correctly, you will see the scheduler plugin you deployed in the scheduler dropdown menu.

                                                                          This concludes the instructions for configuring and using the scheduler options in AI Lab.

                                                                          "},{"location":"en/admin/baize/best-practice/change-notebook-image.html","title":"Update Built-in Notebook Images","text":"

                                                                          In the Notebook, multiple available base images are provided by default for developers to choose from. In most cases, this will meet the developers' needs.

                                                                          DaoCloud provides a default Notebook image that contains all necessary development tools and resources.

                                                                          baize/baize-notebook\n

                                                                          This Notebook includes basic development tools. Taking baize-notebook:v0.5.0 (May 30, 2024) as an example, the relevant dependencies and versions are as follows:

                                                                          Dependency Version Description Ubuntu 22.04.3 Default OS Python 3.11.6 Default Python version pip 23.3.1 conda(mamba) 23.3.1 jupyterlab 3.6.6 JupyterLab image, providing a complete Notebook experience codeserver v4.89.1 Mainstream Code development tool for a familiar experience *baizectl v0.5.0 DaoCloud built-in CLI task management tool *SSH - Supports local SSH direct access to the Notebook container *kubectl v1.27 Kubernetes CLI for managing container resources within Notebook

                                                                          Note

                                                                          With each version iteration, AI platform will proactively maintain and update.

                                                                          However, sometimes users may need custom images. This page explains how to update images and add them to the Notebook creation interface for selection.

                                                                          "},{"location":"en/admin/baize/best-practice/change-notebook-image.html#build-custom-images-for-reference-only","title":"Build Custom Images (For Reference Only)","text":"

                                                                          Note

                                                                          Building a new image requires using baize-notebook as the base image to ensure the Notebook runs properly.

                                                                          When building a custom image, it is recommended to first understand the Dockerfile of the baize-notebook image to better understand how to build a custom image.

                                                                          "},{"location":"en/admin/baize/best-practice/change-notebook-image.html#dockerfile-for-baize-notebook","title":"Dockerfile for baize-notebook","text":"
                                                                          ARG BASE_IMG=docker.m.daocloud.io/kubeflownotebookswg/jupyter:v1.8.0\n\nFROM $BASE_IMG\n\nUSER root\n\n# install - useful linux packages\nRUN export DEBIAN_FRONTEND=noninteractive \\\n && apt-get -yq update \\\n && apt-get -yq install --no-install-recommends \\\n    openssh-server git git-lfs bash-completion \\\n && apt-get clean \\\n && rm -rf /var/lib/apt/lists/*\n\n# remove default s6 jupyterlab run script\nRUN rm -rf /etc/services.d/jupyterlab\n\n# install - useful jupyter plugins\nRUN mamba install -n base -y jupyterlab-language-pack-zh-cn \\\n  && mamba clean --all -y\n\nARG CODESERVER_VERSION=4.89.1\nARG TARGETARCH\n\nRUN curl -fsSL \"https://github.com/coder/code-server/releases/download/v$CODESERVER_VERSION/code-server_${CODESERVER_VERSION}_$TARGETARCH.deb\" -o /tmp/code-server.deb \\\n  && dpkg -i /tmp/code-server.deb \\\n  && rm -f /tmp/code-server.deb\n\nARG CODESERVER_PYTHON_VERSION=2024.4.1\nARG CODESERVER_JUPYTER_VERSION=2024.3.1\nARG CODESERVER_LANGUAGE_PACK_ZH_CN=1.89.0\nARG CODESERVER_YAML=1.14.0\nARG CODESERVER_DOTENV=1.0.1\nARG CODESERVER_EDITORCONFIG=0.16.6\nARG CODESERVER_TOML=0.19.1\nARG CODESERVER_GITLENS=15.0.4\n\n# configure for code-server extensions\n# # https://github.com/kubeflow/kubeflow/blob/709254159986d2cc99e675d0fad5a128ddeb0917/components/example-notebook-servers/codeserver-python/Dockerfile\n# # and\n# # https://github.com/kubeflow/kubeflow/blob/709254159986d2cc99e675d0fad5a128ddeb0917/components/example-notebook-servers/codeserver/Dockerfile\nRUN code-server --list-extensions --show-versions \\\n  && code-server --list-extensions --show-versions \\\n  && code-server \\\n    --install-extension MS-CEINTL.vscode-language-pack-zh-hans@$CODESERVER_LANGUAGE_PACK_ZH_CN \\\n    --install-extension ms-python.python@$CODESERVER_PYTHON_VERSION \\\n    --install-extension ms-toolsai.jupyter@$CODESERVER_JUPYTER_VERSION \\\n    --install-extension redhat.vscode-yaml@$CODESERVER_YAML \\\n    --install-extension mikestead.dotenv@$CODESERVER_DOTENV \\\n    --install-extension EditorConfig.EditorConfig@$CODESERVER_EDITORCONFIG \\\n    --install-extension tamasfe.even-better-toml@$CODESERVER_TOML \\\n    --install-extension eamodio.gitlens@$CODESERVER_GITLENS \\\n    --install-extension catppuccin.catppuccin-vsc-pack \\\n    --force \\\n  && code-server --list-extensions --show-versions\n\n# configure for code-server\nRUN mkdir -p /home/${NB_USER}/.local/share/code-server/User \\\n  && chown -R ${NB_USER}:users /home/${NB_USER} \\\n  && cat <<EOF > /home/${NB_USER}/.local/share/code-server/User/settings.json\n{\n  \"gitlens.showWelcomeOnInstall\": false,\n  \"workbench.colorTheme\": \"Catppuccin Mocha\",\n}\nEOF\n\nRUN mkdir -p /tmp_home/${NB_USER}/.local/share \\\n  && mv /home/${NB_USER}/.local/share/code-server /tmp_home/${NB_USER}/.local/share\n\n# set ssh configuration\nRUN mkdir -p /run/sshd \\\n && chown -R ${NB_USER}:users /etc/ssh \\\n && chown -R ${NB_USER}:users /run/sshd \\\n && sed -i \"/#\\?Port/s/^.*$/Port 2222/g\" /etc/ssh/sshd_config \\\n && sed -i \"/#\\?PasswordAuthentication/s/^.*$/PasswordAuthentication no/g\" /etc/ssh/sshd_config \\\n && sed -i \"/#\\?PubkeyAuthentication/s/^.*$/PubkeyAuthentication yes/g\" /etc/ssh/sshd_config \\\n && rclone_version=v1.65.0 && \\\n       arch=$(uname -m | sed -E 's/x86_64/amd64/g;s/aarch64/arm64/g') && \\\n       filename=rclone-${rclone_version}-linux-${arch} && \\\n       curl -fsSL https://github.com/rclone/rclone/releases/download/${rclone_version}/${filename}.zip -o ${filename}.zip && \\\n       unzip ${filename}.zip && mv ${filename}/rclone /usr/local/bin && rm -rf ${filename} ${filename}.zip\n\n# Init mamba\nRUN mamba init --system\n\n# init baize-base environment for essential python packages\nRUN mamba create -n baize-base -y python \\\n  && /opt/conda/envs/baize-base/bin/pip install tensorboard \\\n  && mamba clean --all -y \\\n  && ln -s /opt/conda/envs/baize-base/bin/tensorboard /usr/local/bin/tensorboard\n\n# prepare baize-runtime-env directory\nRUN mkdir -p /opt/baize-runtime-env \\\n  && chown -R ${NB_USER}:users /opt/baize-runtime-env\n\nARG APP\nARG PROD_NAME\nARG TARGETOS\n\nCOPY out/$TARGETOS/$TARGETARCH/data-loader /usr/local/bin/\nCOPY out/$TARGETOS/$TARGETARCH/baizectl /usr/local/bin/\n\nRUN chmod +x /usr/local/bin/baizectl /usr/local/bin/data-loader && \\\n    echo \"source /etc/bash_completion\" >> /opt/conda/etc/profile.d/conda.sh && \\\n    echo \"source <(baizectl completion bash)\" >> /opt/conda/etc/profile.d/conda.sh && \\\n    echo \"source <(kubectl completion bash)\" >> /opt/conda/etc/profile.d/conda.sh && \\\n    echo '[ -f /run/baize-env ] && export $(cat /run/baize-env | xargs)' >> /opt/conda/etc/profile.d/conda.sh && \\\n    echo 'alias conda=\"mamba\"' >> /opt/conda/etc/profile.d/conda.sh\n\nUSER ${NB_UID}\n
                                                                          "},{"location":"en/admin/baize/best-practice/change-notebook-image.html#build-your-image","title":"Build Your Image","text":"
                                                                          ARG BASE_IMG=release.daocloud.io/baize/baize-notebook:v0.5.0\n\nFROM $BASE_IMG\nUSER root\n\n# Do Customization\nRUN mamba install -n baize-base -y pytorch torchvision torchaudio cpuonly -c pytorch \\\n && mamba install -n baize-base -y tensorflow \\\n && mamba clean --all -y\n\nUSER ${NB_UID}\n
                                                                          "},{"location":"en/admin/baize/best-practice/change-notebook-image.html#add-to-the-notebook-image-list-helm","title":"Add to the Notebook Image List (Helm)","text":"

                                                                          Warning

                                                                          Note that this must be done by the platform administrator. Be cautious with changes.

                                                                          Currently, the image selector needs to be modified by updating the Helm parameters of baize. The specific steps are as follows:

                                                                          In the Helm Apps list of the kpanda-global-cluster global management cluster, find baize, enter the update page, and modify the Notebook image in the YAML parameters:

                                                                          Note the parameter modification path global.config.notebook_images:

                                                                          ...\nglobal:\n  ...\n  config:\n    notebook_images:\n      ...\n      names: release.daocloud.io/baize/baize-notebook:v0.5.0\n      # Add your image information here\n

                                                                          After the update is completed and the Helm App restarts successfully, you can see the new image in the Notebook creation interface image selection.

                                                                          "},{"location":"en/admin/baize/best-practice/checkpoint.html","title":"Checkpoint Mechanism and Usage","text":"

                                                                          In practical deep learning scenarios, model training typically lasts for a period, which places higher demands on the stability and efficiency of distributed training tasks. Moreover, during actual training, unexpected interruptions can cause the loss of the model state, requiring the training process to start over. This not only wastes time and resources, which is particularly evident in LLM training, but also affects the training effectiveness of the model.

                                                                          The ability to save the model state during training, so that it can be restored in case of an interruption, becomes crucial. Checkpointing is the mainstream solution to this problem. This article will introduce the basic concepts of the Checkpoint mechanism and its usage in PyTorch and TensorFlow.

                                                                          "},{"location":"en/admin/baize/best-practice/checkpoint.html#what-is-a-checkpoint","title":"What is a Checkpoint?","text":"

                                                                          A checkpoint is a mechanism for saving the state of a model during training. By periodically saving checkpoints, you can restore the model in the following situations:

                                                                          • Training interruption (e.g., system crash or manual interruption)
                                                                          • Need to evaluate at a certain stage of training
                                                                          • Reuse the model in different experiments
                                                                          "},{"location":"en/admin/baize/best-practice/checkpoint.html#pytorch","title":"PyTorch","text":"

                                                                          In PyTorch, torch.save and torch.load are the basic functions used for saving and loading models.

                                                                          "},{"location":"en/admin/baize/best-practice/checkpoint.html#save-checkpoints-in-pytorch","title":"Save Checkpoints in PyTorch","text":"

                                                                          In PyTorch, the state_dict is typically used to save the model's parameters. Here is a simple example:

                                                                          import torch\nimport torch.nn as nn\n\n# Assume you have a simple neural network\nclass SimpleModel(nn.Module):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = nn.Linear(10, 2)\n\n    def forward(self, x):\n        return self.fc(x)\n\n# Initialize model and optimizer\nmodel = SimpleModel()\noptimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n\n# Train the model...\n# Save checkpoint\ncheckpoint_path = 'model_checkpoint.pth'\ntorch.save({\n    'epoch': 10,\n    'model_state_dict': model.state_dict(),\n    'optimizer_state_dict': optimizer.state_dict(),\n    'loss': 0.02,\n}, checkpoint_path)\n
                                                                          "},{"location":"en/admin/baize/best-practice/checkpoint.html#restore-checkpoints-in-pytorch","title":"Restore Checkpoints in PyTorch","text":"

                                                                          When loading the model, you need to restore the model parameters and optimizer state, and then continue training or inference:

                                                                          # Restore checkpoint\ncheckpoint = torch.load('model_checkpoint.pth')\nmodel.load_state_dict(checkpoint['model_state_dict'])\noptimizer.load_state_dict(checkpoint['optimizer_state_dict'])\nepoch = checkpoint['epoch']\nloss = checkpoint['loss']\n\n# Continue training or inference...\n
                                                                          • model_state_dict: Model parameters
                                                                          • optimizer_state_dict: Optimizer state
                                                                          • epoch: Current training epoch
                                                                          • loss: Loss value
                                                                          • learning_rate: Learning rate
                                                                          • best_accuracy: Best accuracy
                                                                          "},{"location":"en/admin/baize/best-practice/checkpoint.html#tensorflow","title":"TensorFlow","text":"

                                                                          TensorFlow provides the tf.train.Checkpoint class to manage the saving and restoring of models and optimizers.

                                                                          "},{"location":"en/admin/baize/best-practice/checkpoint.html#save-checkpoints-in-tensorflow","title":"Save Checkpoints in TensorFlow","text":"

                                                                          Here is an example of saving a checkpoint in TensorFlow:

                                                                          import tensorflow as tf\n\n# Assume you have a simple model\nmodel = tf.keras.Sequential([\n    tf.keras.layers.Dense(2, input_shape=(10,))\n])\noptimizer = tf.keras.optimizers.Adam(learning_rate=0.001)\n\n# Define checkpoint\ncheckpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)\ncheckpoint_dir = './checkpoints'\ncheckpoint_prefix = f'{checkpoint_dir}/ckpt'\n\n# Train the model...\n# Save checkpoint\ncheckpoint.save(file_prefix=checkpoint_prefix)\n

                                                                          Note

                                                                          Users of AI Lab can directly mount high-performance storage as the checkpoint directory to improve the speed of saving and restoring checkpoints.

                                                                          "},{"location":"en/admin/baize/best-practice/checkpoint.html#restore-checkpoints-in-tensorflow","title":"Restore Checkpoints in TensorFlow","text":"

                                                                          Load the checkpoint and restore the model and optimizer state:

                                                                          # Restore checkpoint\nlatest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)\ncheckpoint.restore(latest_checkpoint)\n\n# Continue training or inference...\n
                                                                          "},{"location":"en/admin/baize/best-practice/checkpoint.html#manage-checkpoints-in-distributed-training-with-tensorflow","title":"Manage Checkpoints in Distributed Training with TensorFlow","text":"

                                                                          In distributed training, TensorFlow manages checkpoints primarily through the following methods:

                                                                          • Using tf.train.Checkpoint and tf.train.CheckpointManager

                                                                            checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)\nmanager = tf.train.CheckpointManager(checkpoint, directory='/tmp/model', max_to_keep=3)\n
                                                                          • Saving checkpoints within a distributed strategy

                                                                            strategy = tf.distribute.MirroredStrategy()\nwith strategy.scope():\n    checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)\n    manager = tf.train.CheckpointManager(checkpoint, directory='/tmp/model', max_to_keep=3)\n
                                                                          • Saving checkpoints only on the chief worker node

                                                                            if strategy.cluster_resolver.task_type == 'chief':\n    manager.save()\n
                                                                          • Special handling when using MultiWorkerMirroredStrategy

                                                                            strategy = tf.distribute.MultiWorkerMirroredStrategy()\nwith strategy.scope():\n    # Define model\n    ...\n    checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)\n    manager = tf.train.CheckpointManager(checkpoint, '/tmp/model', max_to_keep=3)\n\ndef _chief_worker(task_type, task_id):\n    return task_type is None or task_type == 'chief' or (task_type == 'worker' and task_id == 0)\n\nif _chief_worker(strategy.cluster_resolver.task_type, strategy.cluster_resolver.task_id):\n    manager.save()\n
                                                                          • Using a distributed file system

                                                                            Ensure all worker nodes can access the same checkpoint directory, typically using a distributed file system such as HDFS or GCS.

                                                                          • Asynchronous saving

                                                                            Use tf.keras.callbacks.ModelCheckpoint and set the save_freq parameter to asynchronously save checkpoints during training.

                                                                          • Checkpoint restoration

                                                                            status = checkpoint.restore(manager.latest_checkpoint)\nstatus.assert_consumed()  # (1)!\n
                                                                            1. Ensure all variables are restored
                                                                          • Performance optimization

                                                                            • Enable mixed precision training using tf.train.experimental.enable_mixed_precision_graph_rewrite()
                                                                            • Adjust saving frequency to avoid too frequent I/O operations
                                                                            • Consider using tf.saved_model.save() to save the entire model, not just the weights
                                                                          "},{"location":"en/admin/baize/best-practice/checkpoint.html#considerations","title":"Considerations","text":"
                                                                          1. Regular Saving : Determine a suitable saving frequency based on training time and resource consumption, such as every epoch or every few training steps.

                                                                          2. Save Multiple Checkpoints : Keep the latest few checkpoints to prevent issues like file corruption or inapplicability.

                                                                          3. Record Metadata : Save additional information in the checkpoint, such as the epoch number and loss value, to better restore the training state.

                                                                          4. Use Version Control : Save checkpoints for different experiments to facilitate comparison and reuse.

                                                                          5. Validation and Testing : Use checkpoints for validation and testing at different training stages to ensure model performance and stability.

                                                                          "},{"location":"en/admin/baize/best-practice/checkpoint.html#conclusion","title":"Conclusion","text":"

                                                                          The checkpoint mechanism plays a crucial role in deep learning training. By effectively using the checkpoint features in PyTorch and TensorFlow, you can significantly improve the reliability and efficiency of training. The methods and best practices described in this article should help you better manage the training process of deep learning models.

                                                                          "},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html","title":"Deploy NFS for Preloading Dataset","text":"

                                                                          A Network File System (NFS) allows remote hosts to mount file systems over a network and interact with those file systems as though they are mounted locally. This enables system administrators to consolidate resources onto centralized servers on the network.

                                                                          Dataset is a core feature provided by AI Lab. By abstracting the dependency on data throughout the entire lifecycle of MLOps into datasets, users can manage various types of data in datasets so that training tasks can directly use the data in the dataset.

                                                                          When remote data is not within the worker cluster, datasets provide the capability to automatically preheat data, supporting data preloading from sources such as Git, S3, and HTTP to the local cluster.

                                                                          A storage service supporting the ReadWriteMany mode is needed for preloading remote data for the dataset, and it is recommended to deploy NFS within the cluster.

                                                                          This article mainly introduces how to quickly deploy an NFS service and add it as a StorageClass for the cluster.

                                                                          "},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html#preparation","title":"Preparation","text":"
                                                                          • NFS by default uses the node's storage as a data caching point, so it is necessary to ensure that the disk itself has enough disk space.
                                                                          • The installation method uses Helm and Kubectl, please make sure they are already installed.
                                                                          "},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html#deployment-steps","title":"Deployment Steps","text":"

                                                                          Several components need to be installed:

                                                                          • NFS Server
                                                                          • csi-driver-nfs
                                                                          • StorageClass
                                                                          "},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html#initialize-namespace","title":"Initialize Namespace","text":"

                                                                          All system components will be installed in the nfs namespace, so it is necessary to create this namespace first.

                                                                          kubectl create namespace nfs\n
                                                                          "},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html#install-nfs-server","title":"Install NFS Server","text":"

                                                                          Here is a simple YAML deployment file that can be used directly.

                                                                          Note

                                                                          Be sure to check the image: and modify it to a domestic mirror based on the location of the cluster.

                                                                          nfs-server.yaml
                                                                          ---\nkind: Service\napiVersion: v1\nmetadata:\n  name: nfs-server\n  namespace: nfs\n  labels:\n    app: nfs-server\nspec:\n  type: ClusterIP\n  selector:\n    app: nfs-server\n  ports:\n    - name: tcp-2049\n      port: 2049\n      protocol: TCP\n    - name: udp-111\n      port: 111\n      protocol: UDP\n---\nkind: Deployment\napiVersion: apps/v1\nmetadata:\n  name: nfs-server\n  namespace: nfs\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: nfs-server\n  template:\n    metadata:\n      name: nfs-server\n      labels:\n        app: nfs-server\n    spec:\n      nodeSelector:\n        \"kubernetes.io/os\": linux\n      containers:\n        - name: nfs-server\n          image: itsthenetwork/nfs-server-alpine:latest\n          env:\n            - name: SHARED_DIRECTORY\n              value: \"/exports\"\n          volumeMounts:\n            - mountPath: /exports\n              name: nfs-vol\n          securityContext:\n            privileged: true\n          ports:\n            - name: tcp-2049\n              containerPort: 2049\n              protocol: TCP\n            - name: udp-111\n              containerPort: 111\n              protocol: UDP\n      volumes:\n        - name: nfs-vol\n          hostPath:\n            path: /nfsdata  # (1)!\n            type: DirectoryOrCreate\n
                                                                          1. Modify this to specify another path to store NFS shared data

                                                                          Save the above YAML as nfs-server.yaml, then run the following commands for deployment:

                                                                          kubectl -n nfs apply -f nfs-server.yaml\n\n# Check the deployment result\nkubectl -n nfs get pod,svc\n
                                                                          "},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html#install-csi-driver-nfs","title":"Install csi-driver-nfs","text":"

                                                                          Installing csi-driver-nfs requires the use of Helm, please ensure it is installed beforehand.

                                                                          # Add Helm repository\nhelm repo add csi-driver-nfs https://mirror.ghproxy.com/https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts\nhelm repo update csi-driver-nfs\n\n# Deploy csi-driver-nfs\n# The parameters here mainly optimize the image address to accelerate downloads in China\nhelm upgrade --install csi-driver-nfs csi-driver-nfs/csi-driver-nfs \\\n    --set image.nfs.repository=k8s.m.daocloud.io/sig-storage/nfsplugin \\\n    --set image.csiProvisioner.repository=k8s.m.daocloud.io/sig-storage/csi-provisioner \\\n    --set image.livenessProbe.repository=k8s.m.daocloud.io/sig-storage/livenessprobe \\\n    --set image.nodeDriverRegistrar.repository=k8s.m.daocloud.io/sig-storage/csi-node-driver-registrar \\\n    --namespace nfs \\\n    --version v4.5.0\n

                                                                          Warning

                                                                          Not all images of csi-nfs-controller support helm parameters, so the image field of the deployment needs to be manually modified. Change image: registry.k8s.io to image: k8s.dockerproxy.com to accelerate downloads in China.

                                                                          "},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html#create-storageclass","title":"Create StorageClass","text":"

                                                                          Save the following YAML as nfs-sc.yaml:

                                                                          nfs-sc.yaml
                                                                          apiVersion: storage.k8s.io/v1\nkind: StorageClass\nmetadata:\n  name: nfs-csi\nprovisioner: nfs.csi.k8s.io\nparameters:\n  server: nfs-server.nfs.svc.cluster.local\n  share: /\n  # csi.storage.k8s.io/provisioner-secret is only needed for providing mountOptions in DeleteVolume\n  # csi.storage.k8s.io/provisioner-secret-name: \"mount-options\"\n  # csi.storage.k8s.io/provisioner-secret-namespace: \"default\"\nreclaimPolicy: Delete\nvolumeBindingMode: Immediate\nmountOptions:\n  - nfsvers=4.1\n

                                                                          then run the following command:

                                                                          kubectl apply -f nfs-sc.yaml\n
                                                                          "},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html#test","title":"Test","text":"

                                                                          Create a dataset and set the dataset's associated storage class and preloading method to NFS to preheat remote data into the cluster.

                                                                          After the dataset is successfully created, you can see that the dataset's status is preloading, and you can start using it after the preloading is completed.

                                                                          "},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html#faqs","title":"FAQs","text":""},{"location":"en/admin/baize/best-practice/deploy-nfs-in-worker.html#missing-necessary-nfs-client-software-sbinmount","title":"Missing Necessary NFS Client Software /sbin/mount","text":"
                                                                          bad option; for several filesystems (e.g. nfs, cifs) you might need a /sbin/mount.<type> helper program.\n

                                                                          On the nodes running Kubernetes, ensure that the NFS client is installed:

                                                                          Ubuntu/DebianCentOS/RHEL

                                                                          Run the following commands to install the NFS client:

                                                                          sudo apt-get update\nsudo apt-get install nfs-common\n

                                                                          Run the following command to install the NFS client:

                                                                          sudo yum install nfs-utils\n

                                                                          Check the NFS server configuration to ensure that the NFS server is running and configured correctly. You can try mounting manually to test:

                                                                          sudo mkdir -p /mnt/test\nsudo mount -t nfs <nfs-server>:/nfsdata /mnt/test\n
                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html","title":"Fine-tune the ChatGLM3 Model by Using AI Lab","text":"

                                                                          This page uses the ChatGLM3 model as an example to demonstrate how to use LoRA (Low-Rank Adaptation) to fine-tune the ChatGLM3 model within the AI Lab environment. The demo program is from the ChatGLM3 official example.

                                                                          The general process of fine-tuning is as follows:

                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#environment-requirements","title":"Environment Requirements","text":"
                                                                          • GPU with at least 20GB memory, recommended RTX4090 or NVIDIA A/H series
                                                                          • At least 200GB of available disk space
                                                                          • At least 8-core CPU, recommended 16-core
                                                                          • 64GB RAM, recommended 128GB

                                                                          Info

                                                                          Before starting, ensure AI platform and AI Lab are correctly installed, GPU queue resources are successfully initialized, and computing resources are sufficient.

                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#prepare-data","title":"Prepare Data","text":"

                                                                          Utilize the dataset management feature provided by AI Lab to quickly preheat and persist the data required for fine-tuning large models, reducing GPU resource occupation due to data preparation, and improving resource utilization efficiency.

                                                                          Create the required data resources on the dataset list page. These resources include the ChatGLM3 code and data files, all of which can be managed uniformly through the dataset list.

                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#code-and-model-files","title":"Code and Model Files","text":"

                                                                          ChatGLM3 is a dialogue pre-training model jointly released by zhipuai.cn and Tsinghua University KEG Lab.

                                                                          First, pull the ChatGLM3 code repository and download the pre-training model for subsequent fine-tuning tasks.

                                                                          AI Lab will automatically preheat the data in the background to ensure quick data access for subsequent tasks.

                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#advertisegen-dataset","title":"AdvertiseGen Dataset","text":"

                                                                          Domestic data can be directly obtained from Tsinghua Cloud using the HTTP data source method.

                                                                          After creation, wait for the dataset to be preheated, which is usually quick and depends on your network conditions.

                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#fine-tune-output-data","title":"Fine-tune Output Data","text":"

                                                                          You also need to prepare an empty dataset to store the model files output after the fine-tuning task is completed. Here, create an empty dataset, using PVC as an example.

                                                                          Warning

                                                                          Ensure to use a storage type that supports ReadWriteMany to allow quick access to resources for subsequent tasks.

                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#set-up-environment","title":"Set up Environment","text":"

                                                                          For model developers, preparing the Python environment dependencies required for model development is crucial. Traditionally, environment dependencies are either packaged directly into the development tool's image or installed in the local environment, which can lead to inconsistency in environment dependencies and difficulties in managing and updating dependencies.

                                                                          AI Lab provides environment management capabilities, decoupling Python environment dependency package management from development tools and task images, solving dependency management chaos and environment inconsistency issues.

                                                                          Here, use the environment management feature provided by AI Lab to create the environment required for ChatGLM3 fine-tuning for subsequent use.

                                                                          Warning

                                                                          1. The ChatGLM repository contains a requirements.txt file that includes the environment dependencies required for ChatGLM3 fine-tuning.
                                                                          2. This fine-tuning does not use the deepspeed and mpi4py packages. It is recommended to comment them out in the requirements.txt file to avoid compilation failures.

                                                                          In the environment management list, you can quickly create a Python environment and complete the environment creation through a simple form configuration; a Python 3.11.x environment is required here.

                                                                          Since CUDA is required for this experiment, GPU resources need to be configured here to preheat the necessary resource dependencies.

                                                                          Creating the environment involves downloading a series of Python dependencies, and download speeds may vary based on your location. Using a domestic mirror for acceleration can speed up the download.

                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#use-notebook-as-ide","title":"Use Notebook as IDE","text":"

                                                                          AI Lab provides Notebook as an IDE feature, allowing users to write, run, and view code results directly in the browser. This is very suitable for development in data analysis, machine learning, and deep learning fields.

                                                                          You can use the JupyterLab Notebook provided by AI Lab for the ChatGLM3 fine-tuning task.

                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#create-jupyterlab-notebook","title":"Create JupyterLab Notebook","text":"

                                                                          In the Notebook list, you can create a Notebook according to the page operation guide. Note that you need to configure the proper Notebook resource parameters according to the resource requirements mentioned earlier to avoid resource issues affecting the fine-tuning process.

                                                                          Note

                                                                          When creating a Notebook, you can directly mount the preloaded model code dataset and environment, greatly saving data preparation time.

                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#mount-dataset-and-code","title":"Mount Dataset and Code","text":"

                                                                          Note: The ChatGLM3 code files are mounted to the /home/jovyan/ChatGLM3 directory, and you also need to mount the AdvertiseGen dataset to the /home/jovyan/ChatGLM3/finetune_demo/data/AdvertiseGen directory to allow the fine-tuning task to access the data.

                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#mount-pvc-to-model-output-folder","title":"Mount PVC to Model Output Folder","text":"

                                                                          The model output location used this time is the /home/jovyan/ChatGLM3/finetune_demo/output directory. You can mount the previously created PVC dataset to this directory, so the trained model can be saved to the dataset for subsequent inference tasks.

                                                                          After creation, you can see the Notebook interface where you can write, run, and view code results directly in the Notebook.

                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#fine-tune-chatglm3","title":"Fine-tune ChatGLM3","text":"

                                                                          Once in the Notebook, you can find the previously mounted dataset and code in the File Browser option in the Notebook sidebar. Locate the ChatGLM3 folder.

                                                                          You will find the fine-tuning code for ChatGLM3 in the finetune_demo folder. Open the lora_finetune.ipynb file, which contains the fine-tuning code for ChatGLM3.

                                                                          First, follow the instructions in the README.md file to understand the entire fine-tuning process. It is recommended to read it thoroughly to ensure that the basic environment dependencies and data preparation work are completed.

                                                                          Open the terminal and use conda to switch to the preheated environment, ensuring consistency with the JupyterLab Kernel for subsequent code execution.

                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#preprocess-data","title":"Preprocess Data","text":"

                                                                          First, preprocess the AdvertiseGen dataset, standardizing the data to meet the Lora pre-training format requirements. Save the processed data to the AdvertiseGen_fix folder.

                                                                          import json\nfrom typing import Union\nfrom pathlib import Path\n\ndef _resolve_path(path: Union[str, Path]) -> Path:\n    return Path(path).expanduser().resolve()\n\ndef _mkdir(dir_name: Union[str, Path]):\n    dir_name = _resolve_path(dir_name)\n    if not dir_name.is_dir():\n        dir_name.mkdir(parents=True, exist_ok=False)\n\ndef convert_adgen(data_dir: Union[str, Path], save_dir: Union[str, Path]):\n    def _convert(in_file: Path, out_file: Path):\n        _mkdir(out_file.parent)\n        with open(in_file, encoding='utf-8') as fin:\n            with open(out_file, 'wt', encoding='utf-8') as fout:\n                for line in fin:\n                    dct = json.loads(line)\n                    sample = {'conversations': [{'role': 'user', 'content': dct['content']},\n                                                {'role': 'assistant', 'content': dct['summary']}]}\n                    fout.write(json.dumps(sample, ensure_ascii=False) + '\\n')\n\n    data_dir = _resolve_path(data_dir)\n    save_dir = _resolve_path(save_dir)\n\n    train_file = data_dir / 'train.json'\n    if train_file is_file():\n        out_file = save_dir / train_file.relative_to(data_dir)\n        _convert(train_file, out_file)\n\n    dev_file = data_dir / 'dev.json'\n    if dev_file.is_file():\n        out_file = save_dir / dev_file.relative_to(data_dir)\n        _convert(dev_file, out_file)\n\nconvert_adgen('data/AdvertiseGen', 'data/AdvertiseGen_fix')\n

                                                                          To save debugging time, you can reduce the number of entries in /home/jovyan/ChatGLM3/finetune_demo/data/AdvertiseGen_fix/dev.json to 50. The data is in JSON format, making it easy to process.

                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#local-lora-fine-tuning-test","title":"Local LoRA Fine-tuning Test","text":"

                                                                          After preprocessing the data, you can proceed with the fine-tuning test. Configure the fine-tuning parameters in the /home/jovyan/ChatGLM3/finetune_demo/configs/lora.yaml file. Key parameters to focus on include:

                                                                          Open a new terminal window and use the following command for local fine-tuning testing. Ensure that the parameter configurations and paths are correct:

                                                                          !CUDA_VISIBLE_DEVICES=0 NCCL_P2P_DISABLE=\"1\" NCCL_IB_DISABLE=\"1\" python finetune_hf.py data/AdvertiseGen_fix ./chatglm3-6b configs/lora.yaml\n

                                                                          In this command:

                                                                          • finetune_hf.py is the fine-tuning script in the ChatGLM3 code
                                                                          • data/AdvertiseGen_fix is your preprocessed dataset
                                                                          • ./chatglm3-6b is your pre-trained model path
                                                                          • configs/lora.yaml is the fine-tuning configuration file

                                                                          During fine-tuning, you can use the nvidia-smi command to check GPU memory usage:

                                                                          After fine-tuning is complete, an output directory will be generated in the finetune_demo directory, containing the fine-tuned model files. This way, the fine-tuned model files are saved to the previously created PVC dataset.

                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#submit-fine-tuning-tasks","title":"Submit Fine-tuning Tasks","text":"

                                                                          After completing the local fine-tuning test and ensuring that your code and data are correct, you can submit the fine-tuning task to the AI Lab for large-scale training and fine-tuning tasks.

                                                                          Note

                                                                          This is the recommended model development and fine-tuning process: first, conduct local fine-tuning tests to ensure that the code and data are correct.

                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#submit-fine-tuning-tasks-via-ui","title":"Submit Fine-tuning Tasks via UI","text":"

                                                                          Use Pytorch to create a fine-tuning task. Select the resources of the cluster you need to use based on your actual situation. Ensure to meet the resource requirements mentioned earlier.

                                                                          • Image: You can directly use the model image provided by baizectl.
                                                                          • Startup command: Based on your experience using LoRA fine-tuning in the Notebook, the code files and data are in the /home/jovyan/ChatGLM3/finetune_demo directory, so you can directly use this path:

                                                                            bash -c \"cd /home/jovyan/ChatGLM3/finetune_demo && CUDA_VISIBLE_DEVICES=0 NCCL_P2P_DISABLE=\"1\" NCCL_IB_DISABLE=\"1\" python finetune_hf.py data/AdvertiseGen_fix ./chatglm3-6b configs/lora.yaml\"\n
                                                                          • Mount environment: This way, the preloaded environment dependencies can be used not only in the Notebook but also in the tasks.

                                                                          • Dataset: Use the preheated dataset
                                                                            • Set the model output path to the previously created PVC dataset
                                                                            • Mount the AdvertiseGen dataset to the /home/jovyan/ChatGLM3/finetune_demo/data/AdvertiseGen directory
                                                                          • Configure sufficient GPU resources to ensure the fine-tuning task runs smoothly
                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#check-task-status","title":"Check Task Status","text":"

                                                                          After successfully submitting the task, you can view the training progress of the task in real-time in the task list. You can see the task status, resource usage, logs, and other information.

                                                                          View task logs

                                                                          After the task is completed, you can view the fine-tuned model files in the data output dataset for subsequent inference tasks.

                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#submit-tasks-via-baizectl","title":"Submit Tasks via baizectl","text":"

                                                                          AI Lab's Notebook supports using the baizectl command-line tool without authentication. If you prefer using CLI, you can directly use the baizectl command-line tool to submit tasks.

                                                                          baizectl job submit --name finetunel-chatglm3 -t PYTORCH \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --resources cpu=8,memory=16Gi,nvidia.com/gpu=1 \\\n    --workers 1 \\\n    --queue default \\\n    --working-dir /home/jovyan/ChatGLM3 \\\n    --datasets AdvertiseGen:/home/jovyan/ChatGLM3/finetune_demo/data/AdvertiseGen  \\\n    --datasets output:/home/jovyan/ChatGLM3/finetune_demo/output  \\\n    --labels job_type=pytorch \\\n    --restart-policy on-failure \\\n    -- bash -c \"cd /home/jovyan/ChatGLM3/finetune_demo && CUDA_VISIBLE_DEVICES=0 NCCL_P2P_DISABLE=\"1\" NCCL_IB_DISABLE=\"1\" python finetune_hf.py data/AdvertiseGen_fix ./chatglm3-6b configs/lora.yaml\"\n

                                                                          For more information on using baizectl, refer to the baizectl Usage Documentation.

                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#model-inference","title":"Model Inference","text":"

                                                                          After completing the fine-tuning task, you can use the fine-tuned model for inference tasks. Here, you can use the inference service provided by AI Lab to create an inference service with the output model.

                                                                          In the inference service list, you can create a new inference service. When selecting the model, choose the previously output dataset and configure the model path.

                                                                          Regarding model resource requirements and GPU resource requirements for inference services, configure them based on the model size and inference concurrency. Refer to the resource configuration of the previous fine-tuning tasks.

                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#configure-model-runtime","title":"Configure Model Runtime","text":"

                                                                          Configuring the model runtime is crucial. Currently, AI Lab supports vLLM as the model inference service runtime, which can be directly selected.

                                                                          Tip

                                                                          vLLM supports a wide range of large language models. Visit vLLM for more information. These models can be easily used within AI Lab.

                                                                          After creation, you can see the created inference service in the inference service list. The model service list allows you to get the model's access address directly.

                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#test-the-model-service","title":"Test the Model Service","text":"

                                                                          Try using the curl command in the terminal to test the model service. Here, you can see the returned results, enabling you to use the model service for inference tasks.

                                                                          curl -X POST http://10.20.100.210:31118/v2/models/chatglm3-6b/generate \\\n  -d '{\"text_input\": \"hello\", \"stream\": false, \"sampling_parameters\": \"{\\\"temperature\\\": 0.7, \\\"top_p\\\": 0.95, \\'max_tokens\\\": 1024\uff5d\"\uff5d'\n
                                                                          "},{"location":"en/admin/baize/best-practice/finetunel-llm.html#wrap-up","title":"Wrap up","text":"

                                                                          This page used ChatGLM3 as an example to quickly introduce and get you started with the AI Lab for model fine-tuning, using LoRA to fine-tune the ChatGLM3 model.

                                                                          AI Lab provides a wealth of features to help model developers quickly conduct model development, fine-tuning, and inference tasks. It also offers rich OpenAPI interfaces, facilitating integration with third-party application ecosystems.

                                                                          "},{"location":"en/admin/baize/best-practice/label-studio.html","title":"Deploy Label Studio","text":"

                                                                          Note

                                                                          Refer to the video tutorial: Data Labeling and Dataset Usage Instructions

                                                                          Label Studio is an open-source data labeling tool used for various machine learning and artificial intelligence jobs. Here is a brief introduction to Label Studio:

                                                                          • Supports labeling of various data types including images, audio, video, and text
                                                                          • Can be used for jobs such as object detection, image classification, speech transcription, and named entity recognition
                                                                          • Provides a customizable labeling interface
                                                                          • Supports various labeling formats and export options

                                                                          Label Studio offers a powerful data labeling solution for data scientists and machine learning engineers due to its flexibility and rich features.

                                                                          "},{"location":"en/admin/baize/best-practice/label-studio.html#deploy-to-ai-platform","title":"Deploy to AI platform","text":"

                                                                          To use Label Studio in AI Lab, it needs to be deployed to the Global Service Cluster. You can quickly deploy it using Helm.

                                                                          Note

                                                                          For more deployment details, refer to Deploy Label Studio on Kubernetes.

                                                                          1. Enter the Global Service Cluster, find Helm Apps -> Helm Repositories from the left navigation bar, click the Create Repository button, and fill in the following parameters:

                                                                          2. After successfully adding the repository, click the \u2507 on the right side of the list and select Sync Repository. Wait a moment to complete the synchronization. (This sync operation will also be used for future updates of Label Studio).

                                                                          3. Then navigate to the Helm Charts page, search for label-studio, and click the card.

                                                                          4. Choose the latest version and configure the installation parameters as shown below, naming it label-studio. It is recommended to create a new namespace. Switch the parameters to YAML and modify the configuration according to the instructions.

                                                                            global:\n  image:\n    repository: heartexlabs/label-studio   # Configure proxy address here if docker.io is inaccessible\n  extraEnvironmentVars:\n    LABEL_STUDIO_HOST: https://{Access_Address}/label-studio    # Use the AI platform login address, refer to the current webpage URL\n    LABEL_STUDIO_USERNAME: {User_Email}    # Must be an email, replace with your own\n    LABEL_STUDIO_PASSWORD: {User_Password}\napp:\n  nginx:\n    livenessProbe:\n      path: /label-studio/nginx_health\n    readinessProbe:\n      path: /label-studio/version\n

                                                                          At this point, the installation of Label Studio is complete.

                                                                          Warning

                                                                          By default, PostgreSQL will be installed as the data service middleware. If the image pull fails, it may be because docker.io is inaccessible. Ensure to switch to an available proxy.

                                                                          If you have your own PostgreSQL data service middleware, you can use the following parameters:

                                                                          global:\n  image:\n    repository: heartexlabs/label-studio   # Configure proxy address here if docker.io is inaccessible\n  extraEnvironmentVars:\n    LABEL_STUDIO_HOST: https://{Access_Address}/label-studio    # Use the AI platform login address, refer to the current webpage URL\n    LABEL_STUDIO_USERNAME: {User_Email}    # Must be an email, replace with your own\n    LABEL_STUDIO_PASSWORD: {User_Password}\napp:\n  nginx:\n    livenessProbe:\n      path: /label-studio/nginx_health\n    readinessProbe:\n      path: /label-studio/version\npostgresql:\n  enabled: false  # Disable the built-in PostgreSQL\nexternalPostgresql:\n  host: \"postgres-postgresql\"  # PostgreSQL address\n  port: 5432\n  username: \"label_studio\"  # PostgreSQL username\n  password: \"your_label_studio_password\"  # PostgreSQL password\n  database: \"label_studio\"  # PostgreSQL database name\n
                                                                          "},{"location":"en/admin/baize/best-practice/label-studio.html#add-gproduct-to-navigation-bar","title":"Add GProduct to Navigation Bar","text":"

                                                                          To add Label Studio to the AI platform navigation bar, you can refer to the method in Global Management OEM IN. The following example shows how to add it to the secondary navigation of AI Lab.

                                                                          "},{"location":"en/admin/baize/best-practice/label-studio.html#add-proxy-access","title":"Add Proxy Access","text":"
                                                                          apiVersion: ghippo.io/v1alpha1\nkind: GProductProxy\nmetadata:\n  name: label-studio\nspec:\n  gproduct: label-studio\n  proxies:\n  - authnCheck: false\n    destination:\n      host: label-studio-ls-app.label-studio.svc.cluster.local\n      port: 80\n    match:\n      uri:\n        prefix: /label-studio\n
                                                                          "},{"location":"en/admin/baize/best-practice/label-studio.html#add-to-ai-lab","title":"Add to AI Lab","text":"

                                                                          Modify the CRD for GProductNavigator CR baize, then make the following changes:

                                                                          apiVersion: ghippo.io/v1alpha1\nkind: GProductNavigator\nmetadata:\n  labelings:\n    meta.helm.sh/release-name: baize\n    meta.helm.sh/release-namespace: baize-system\n  labels:\n    app.kubernetes.io/managed-by: Helm\n    gProductName: baize\n  name: baize\nspec:\n  category: cloudnativeai\n  gproduct: baize\n  iconUrl: ./ui/baize/logo.svg\n  isCustom: false\n  localizedName:\n    en-US: AI Lab\n    zh-CN: AI Lab\n  menus:\n    - iconUrl: ''\n      isCustom: false\n      localizedName:\n        en-US: AI Lab\n        zh-CN: AI Lab\n      name: workspace-view\n      order: 1\n      url: ./baize\n      visible: true\n    - iconUrl: ''\n      isCustom: false\n      localizedName:\n        en-US: Operator\n        zh-CN: \u8fd0\u7ef4\u7ba1\u7406\n      name: admin-view\n      order: 1\n      url: ./baize/admin\n      visible: true\n    # Start adding\n    - iconUrl: ''\n      localizedName:\n        en-US: Data Labeling\n        zh-CN: \u6570\u636e\u6807\u6ce8\n      name: label-studio\n      order: 1\n      target: blank    # Control new blank page\n      url: https://{Access_Address}/label-studio    # url to access\n      visible: true\n    # End adding\n  name: AI Lab\n  order: 10\n  url: ./baize\n  visible: true\n
                                                                          "},{"location":"en/admin/baize/best-practice/label-studio.html#adding-effect","title":"Adding Effect","text":""},{"location":"en/admin/baize/best-practice/label-studio.html#conclusion","title":"Conclusion","text":"

                                                                          The above describes how to add Label Studio and integrate it as an labeling component in AI Lab. By adding labels to the datasets in AI Lab, you can associate it with algorithm development and improve the algorithm development process. For further usage, refer to relevant documentation.

                                                                          "},{"location":"en/admin/baize/best-practice/train-with-deepspeed.html","title":"Submit a DeepSpeed Training Task","text":"

                                                                          According to the DeepSpeed official documentation, it's recommended to modifying your code to implement the training task.

                                                                          Specifically, you can use deepspeed.init_distributed() instead of torch.distributed.init_process_group(...). Then run the command using torchrun to submit it as a PyTorch distributed task, which will allow you to run a DeepSpeed task.

                                                                          Yes, you can use torchrun to run your DeepSpeed training script. torchrun is a utility provided by PyTorch for distributed training. You can combine torchrun with the DeepSpeed API to start your training task.

                                                                          Below is an example of running a DeepSpeed training script using torchrun:

                                                                          1. Write the training script:

                                                                            train.py
                                                                            import torch\nimport deepspeed\nfrom torch.utils.data import DataLoader\n\n# Load model and data\nmodel = YourModel()\ntrain_dataset = YourDataset()\ntrain_dataloader = DataLoader(train_dataset, batch_size=32)\n\n# Configure file path\ndeepspeed_config = \"deepspeed_config.json\"\n\n# Create DeepSpeed training engine\nmodel_engine, optimizer, _, _ = deepspeed.initialize(\n    model=model,\n    model_parameters=model.parameters(),\n    config_params=deepspeed_config\n)\n\n# Training loop\nfor batch in train_dataloader:\n    loss = model_engine(batch)\n    model_engine.backward(loss)\n    model_engine.step()\n
                                                                          2. Create the DeepSpeed configuration file:

                                                                            deepspeed_config.json
                                                                            {\n  \"train_batch_size\": 32,\n  \"gradient_accumulation_steps\": 1,\n  \"fp16\": {\n    \"enabled\": true,\n    \"loss_scale\": 0\n  },\n  \"optimizer\": {\n    \"type\": \"Adam\",\n    \"params\": {\n      \"lr\": 0.00015,\n      \"betas\": [0.9, 0.999],\n      \"eps\": 1e-08,\n      \"weight_decay\": 0\n    }\n  }\n}\n
                                                                          3. Run the training script using torchrun or baizectl:

                                                                            torchrun train.py\n

                                                                            In this way, you can combine PyTorch's distributed training capabilities with DeepSpeed's optimization technologies for more efficient training. You can use the baizectl command to submit a job in a notebook:

                                                                            baizectl job submit --pytorch --workers 2 -- torchrun train.py\n
                                                                          "},{"location":"en/admin/baize/developer/index.html","title":"Developer Console","text":"

                                                                          The developer console is a console for developers to perform tasks such as AI inference and training large models on a daily basis.

                                                                          "},{"location":"en/admin/baize/developer/quick-start.html","title":"Quick Start","text":"

                                                                          This document provides a simple guide for users to use the AI Lab platform for the entire development and training process of datasets, Notebooks, and job training.

                                                                          1. Click Data Management -> Datasets in the navigation bar, then click Create. Create three datasets as follows:

                                                                            • Code: https://github.com/d-run/drun-samples
                                                                              • For faster access in China, use Gitee: https://gitee.com/samzong_lu/training-sample-code.git
                                                                            • Data: https://github.com/zalandoresearch/fashion-mnist
                                                                              • For faster access in China, use Gitee: https://gitee.com/samzong_lu/fashion-mnist.git
                                                                            • Empty PVC: Create an empty PVC to output the trained model and logs after training.

                                                                            Note

                                                                            Currently, only StorageClass with ReadWriteMany mode is supported. Please use NFS or the recommended JuiceFS.

                                                                          2. Prepare the development environment by clicking Notebooks in the navigation bar, then click Create. Associate the three datasets created in the previous step and fill in the mount paths as shown in the image below:

                                                                          3. Wait for the Notebook to be created successfully, click the access link in the list to enter the Notebook. Execute the following command in the Notebook terminal to start the job training.

                                                                            python /home/jovyan/code/tensorflow/tf-fashion-mnist-sample/train.py\n
                                                                          4. Click Job Center -> Jobs in the navigation bar, create a Tensorflow Single job. Refer to the image below for job configuration and enable the Job Analysis (Tensorboard) feature. Click Create and wait for the status to complete.

                                                                            • Image address: release.daocloud.io/baize/jupyter-tensorflow-full:v1.8.0-baize
                                                                            • Command: python
                                                                            • Arguments: /home/jovyan/code/tensorflow/tf-fashion-mnist-sample/train.py

                                                                            Note

                                                                            For large datasets or models, it is recommended to enable GPU configuration in the resource configuration step.

                                                                          5. In the job created in the previous step, you can click the specific job analysis to view the job status and optimize the job training.

                                                                          "},{"location":"en/admin/baize/developer/dataset/create-use-delete.html","title":"Create, Use and Delete Datasets","text":"

                                                                          AI Lab provides comprehensive dataset management functions needed for model development, training, and inference processes. Currently, it supports unified access to various data sources.

                                                                          With simple configurations, you can connect data sources to AI Lab, achieving unified data management, preloading, dataset management, and other functionalities.

                                                                          "},{"location":"en/admin/baize/developer/dataset/create-use-delete.html#create-a-dataset","title":"Create a Dataset","text":"
                                                                          1. In the left navigation bar, click Data Management -> Dataset List, and then click the Create button on the right.

                                                                          2. Select the worker cluster and namespace to which the dataset belongs, then click Next.

                                                                          3. Configure the data source type for the target data, then click OK.

                                                                            Currently supported data sources include:

                                                                            • GIT: Supports repositories such as GitHub, GitLab, and Gitee
                                                                            • S3: Supports object storage like Amazon Cloud
                                                                            • HTTP: Directly input a valid HTTP URL
                                                                            • PVC: Supports pre-created Kubernetes PersistentVolumeClaim
                                                                            • NFS: Supports NFS shared storage
                                                                          4. Upon successful creation, the dataset will be returned to the dataset list. You can perform more actions by clicking \u2507 on the right.

                                                                          Info

                                                                          The system will automatically perform a one-time data preloading after the dataset is successfully created; the dataset cannot be used until the preloading is complete.

                                                                          "},{"location":"en/admin/baize/developer/dataset/create-use-delete.html#use-a-dataset","title":"Use a Dataset","text":"

                                                                          Once the dataset is successfully created, it can be used in tasks such as model training and inference.

                                                                          "},{"location":"en/admin/baize/developer/dataset/create-use-delete.html#use-in-notebook","title":"Use in Notebook","text":"

                                                                          In creating a Notebook, you can directly use the dataset; the usage is as follows:

                                                                          • Use the dataset as training data mount
                                                                          • Use the dataset as code mount

                                                                          "},{"location":"en/admin/baize/developer/dataset/create-use-delete.html#use-in-training-obs","title":"Use in Training obs","text":"
                                                                          • Use the dataset to specify job output
                                                                          • Use the dataset to specify job input
                                                                          • Use the dataset to specify TensorBoard output
                                                                          "},{"location":"en/admin/baize/developer/dataset/create-use-delete.html#use-in-inference-services","title":"Use in Inference Services","text":"
                                                                          • Use the dataset to mount a model
                                                                          "},{"location":"en/admin/baize/developer/dataset/create-use-delete.html#delete-a-dataset","title":"Delete a Dataset","text":"

                                                                          If you find a dataset to be redundant, expired, or no longer needed, you can delete it from the dataset list.

                                                                          1. Click the \u2507 on the right side of the dataset list, then choose Delete from the dropdown menu.

                                                                          2. In the pop-up window, confirm the dataset you want to delete, enter the dataset name, and then click Delete.

                                                                          3. A confirmation message will appear indicating successful deletion, and the dataset will disappear from the list.

                                                                          Caution

                                                                          Once a dataset is deleted, it cannot be recovered, so please proceed with caution.

                                                                          "},{"location":"en/admin/baize/developer/dataset/environments.html","title":"Manage Python Environment Dependencies","text":"

                                                                          This document aims to guide users on managing environment dependencies using AI platform. Below are the specific steps and considerations.

                                                                          1. Overview of Environment Management
                                                                          2. Create New Environment
                                                                          3. Configure Environment
                                                                          4. Troubleshooting
                                                                          "},{"location":"en/admin/baize/developer/dataset/environments.html#overview","title":"Overview","text":"

                                                                          Traditionally, Python environment dependencies are built into an image, which includes the Python version and dependency packages. This approach has high maintenance costs and is inconvenient to update, often requiring a complete rebuild of the image.

                                                                          In AI Lab, users can manage pure environment dependencies through the Environment Management module, decoupling this part from the image. The advantages include:

                                                                          • One environment can be used in multiple places, such as in Notebooks, distributed training tasks, and even inference services.
                                                                          • Updating dependency packages is more convenient; you only need to update the environment dependencies without rebuilding the image.

                                                                          The main components of the environment management are:

                                                                          • Cluster : Select the cluster to operate on.
                                                                          • Namespace : Select the namespace to limit the scope of operations.
                                                                          • Environment List : Displays all environments and their statuses under the current cluster and namespace.

                                                                          "},{"location":"en/admin/baize/developer/dataset/environments.html#explanation-of-environment-list-fields","title":"Explanation of Environment List Fields","text":"
                                                                          • Name : The name of the environment.
                                                                          • Status : The current status of the environment (normal or failed). New environments undergo a warming-up process, after which they can be used in other tasks.
                                                                          • Creation Time : The time the environment was created.
                                                                          "},{"location":"en/admin/baize/developer/dataset/environments.html#creat-new-environment","title":"Creat New Environment","text":"

                                                                          On the Environment Management interface, click the Create button at the top right to enter the environment creation process.

                                                                          Fill in the following basic information:

                                                                          • Name : Enter the environment name, with a length of 2-63 characters, starting and ending with lowercase letters or numbers.
                                                                          • Deployment Location:
                                                                            • Cluster : Select the cluster to deploy, such as gpu-cluster.
                                                                            • Namespace : Select the namespace, such as default.
                                                                          • Remarks (optional): Enter remarks.
                                                                          • Labels (optional): Add labels to the environment.
                                                                          • Annotations (optional): Add annotations to the environment. After completing the information, click Next to proceed to environment configuration.
                                                                          "},{"location":"en/admin/baize/developer/dataset/environments.html#configure-environment","title":"Configure Environment","text":"

                                                                          In the environment configuration step, users need to configure the Python version and dependency management tool.

                                                                          "},{"location":"en/admin/baize/developer/dataset/environments.html#environment-settings","title":"Environment Settings","text":"
                                                                          • Python Version : Select the required Python version, such as 3.12.3.
                                                                          • Package Manager : Choose the package management tool, either PIP or CONDA.
                                                                          • Environment Data :
                                                                            • If PIP is selected: Enter the dependency package list in requirements.txt format in the editor below.
                                                                            • If CONDA is selected: Enter the dependency package list in environment.yaml format in the editor below.
                                                                          • Other Options (optional):
                                                                            • Additional pip Index URLs : Configure additional pip index URLs; suitable for internal enterprise private repositories or PIP acceleration sites.
                                                                            • GPU Configuration : Enable or disable GPU configuration; some GPU-related dependency packages need GPU resources configured during preloading.
                                                                            • Associated Storage : Select the associated storage configuration; environment dependency packages will be stored in the associated storage. Note: Storage must support ReadWriteMany.

                                                                          After configuration, click the Create button, and the system will automatically create and configure the new Python environment.

                                                                          "},{"location":"en/admin/baize/developer/dataset/environments.html#troubleshooting","title":"Troubleshooting","text":"
                                                                          • If environment creation fails:

                                                                            • Check if the network connection is normal.
                                                                            • Verify that the Python version and package manager configuration are correct.
                                                                            • Ensure the selected cluster and namespace are available.
                                                                          • If dependency preloading fails:

                                                                            • Check if the requirements.txt or environment.yaml file format is correct.
                                                                            • Verify that the dependency package names and versions are correct. If other issues arise, contact the platform administrator or refer to the platform help documentation for more support.

                                                                          These are the basic steps and considerations for managing Python dependencies in AI Lab.

                                                                          "},{"location":"en/admin/baize/developer/inference/models.html","title":"Model Support","text":"

                                                                          With the rapid iteration of AI Lab, we have now supported various model inference services. Here, you can see information about the supported models.

                                                                          • AI Lab v0.3.0 launched model inference services, facilitating users to directly use the inference services of AI Lab without worrying about model deployment and maintenance for traditional deep learning models.
                                                                          • AI Lab v0.6.0 supports the complete version of vLLM inference capabilities, supporting many large language models such as LLama, Qwen, ChatGLM, and more.

                                                                          Note

                                                                          The support for inference capabilities is related to the version of AI Lab.

                                                                          You can use GPU types that have been verified by AI platform in AI Lab. For more details, refer to the GPU Support Matrix.

                                                                          "},{"location":"en/admin/baize/developer/inference/models.html#triton-inference-server","title":"Triton Inference Server","text":"

                                                                          Through the Triton Inference Server, traditional deep learning models can be well supported. Currently, AI Lab supports mainstream inference backend services:

                                                                          Backend Supported Model Formats Description pytorch TorchScript, PyTorch 2.0 formats triton-inference-server/pytorch_backend tensorflow TensorFlow 2.x triton-inference-server/tensorflow_backend vLLM (Deprecated) TensorFlow 2.x triton-inference-server/tensorflow_backend

                                                                          Danger

                                                                          The use of Triton's Backend vLLM method has been deprecated. It is recommended to use the latest support for vLLM to deploy your large language models.

                                                                          "},{"location":"en/admin/baize/developer/inference/models.html#vllm","title":"vLLM","text":"

                                                                          With vLLM, we can quickly use large language models. Here, you can see the list of models we support, which generally aligns with the vLLM Support Models.

                                                                          • HuggingFace Models: We support most of HuggingFace's models. You can see more models at the HuggingFace Model Hub.
                                                                          • The vLLM Supported Models list includes supported large language models and vision-language models.
                                                                          • Models fine-tuned using the vLLM support framework.
                                                                          "},{"location":"en/admin/baize/developer/inference/models.html#new-features-of-vllm","title":"New Features of vLLM","text":"

                                                                          Currently, AI Lab also supports some new features when using vLLM as an inference tool:

                                                                          • Enable Lora Adapter to optimize model inference services during inference.
                                                                          • Provide a compatible OpenAPI interface with OpenAI, making it easy for users to switch to local inference services at a low cost and quickly transition.
                                                                          "},{"location":"en/admin/baize/developer/inference/triton-inference.html","title":"Create Inference Service Using Triton Framework","text":"

                                                                          The AI Lab currently offers Triton and vLLM as inference frameworks. Users can quickly start a high-performance inference service with simple configurations.

                                                                          Danger

                                                                          The use of Triton's Backend vLLM method has been deprecated. It is recommended to use the latest support for vLLM to deploy your large language models.

                                                                          "},{"location":"en/admin/baize/developer/inference/triton-inference.html#introduction-to-triton","title":"Introduction to Triton","text":"

                                                                          Triton is an open-source inference server developed by NVIDIA, designed to simplify the deployment and inference of machine learning models. It supports a variety of deep learning frameworks, including TensorFlow and PyTorch, enabling users to easily manage and deploy different types of models.

                                                                          "},{"location":"en/admin/baize/developer/inference/triton-inference.html#prerequisites","title":"Prerequisites","text":"

                                                                          Prepare model data: Manage the model code in dataset management and ensure that the data is successfully preloaded. The following example illustrates the PyTorch model for mnist handwritten digit recognition.

                                                                          Note

                                                                          The model to be inferred must adhere to the following directory structure within the dataset:

                                                                            <model-repository-name>\n  \u2514\u2500\u2500 <model-name>\n     \u2514\u2500\u2500 <version>\n        \u2514\u2500\u2500 <model-definition-file>\n

                                                                          The directory structure in this example is as follows:

                                                                              model-repo\n    \u2514\u2500\u2500 mnist-cnn\n        \u2514\u2500\u2500 1\n            \u2514\u2500\u2500 model.pt\n
                                                                          "},{"location":"en/admin/baize/developer/inference/triton-inference.html#create-inference-service","title":"Create Inference Service","text":"

                                                                          Currently, form-based creation is supported, allowing you to create services with field prompts in the interface.

                                                                          "},{"location":"en/admin/baize/developer/inference/triton-inference.html#configure-model-path","title":"Configure Model Path","text":"

                                                                          The model path model-repo/mnist-cnn/1/model.pt must be consistent with the directory structure of the dataset.

                                                                          "},{"location":"en/admin/baize/developer/inference/triton-inference.html#model-configuration","title":"Model Configuration","text":""},{"location":"en/admin/baize/developer/inference/triton-inference.html#configure-input-and-output-parameters","title":"Configure Input and Output Parameters","text":"

                                                                          Note

                                                                          The first dimension of the input and output parameters defaults to batchsize, setting it to -1 allows for the automatic calculation of the batchsize based on the input inference data. The remaining dimensions and data type must match the model's input.

                                                                          "},{"location":"en/admin/baize/developer/inference/triton-inference.html#configure-environment","title":"Configure Environment","text":"

                                                                          You can import the environment created in Manage Python Environment Dependencies to serve as the runtime environment for inference.

                                                                          "},{"location":"en/admin/baize/developer/inference/triton-inference.html#advanced-settings","title":"Advanced Settings","text":""},{"location":"en/admin/baize/developer/inference/triton-inference.html#configure-authentication-policy","title":"Configure Authentication Policy","text":"

                                                                          Supports API key-based request authentication. Users can customize and add authentication parameters.

                                                                          "},{"location":"en/admin/baize/developer/inference/triton-inference.html#affinity-scheduling","title":"Affinity Scheduling","text":"

                                                                          Supports automated affinity scheduling based on GPU resources and other node configurations. It also allows users to customize scheduling policies.

                                                                          "},{"location":"en/admin/baize/developer/inference/triton-inference.html#access","title":"Access","text":""},{"location":"en/admin/baize/developer/inference/triton-inference.html#api-access","title":"API Access","text":"
                                                                          • Triton provides a REST-based API, allowing clients to perform model inference via HTTP POST requests.
                                                                          • Clients can send requests with JSON-formatted bodies containing input data and related metadata.
                                                                          "},{"location":"en/admin/baize/developer/inference/triton-inference.html#http-access","title":"HTTP Access","text":"
                                                                          1. Send HTTP POST Request: Use tools like curl or HTTP client libraries (e.g., Python's requests library) to send POST requests to the Triton Server.

                                                                          2. Set HTTP Headers: Configuration generated automatically based on user settings, include metadata about the model inputs and outputs in the HTTP headers.

                                                                          3. Construct Request Body: The request body usually contains the input data for inference and model-specific metadata.

                                                                          "},{"location":"en/admin/baize/developer/inference/triton-inference.html#example-curl-command","title":"Example curl Command","text":"
                                                                            curl -X POST \"http://<ip>:<port>/v2/models/<inference-name>/infer\" \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\n    \"inputs\": [\n      {\n        \"name\": \"model_input\",            \n        \"shape\": [1, 1, 32, 32],          \n        \"datatype\": \"FP32\",               \n        \"data\": [\n          [0.1234, 0.5678, 0.9101, ... ]  \n        ]\n      }\n    ]\n  }'\n
                                                                          • <ip> is the host address where the Triton Inference Server is running.
                                                                          • <port> is the port where the Triton Inference Server is running.
                                                                          • <inference-name> is the name of the inference service that has been created.
                                                                          • \"name\" must match the name of the input parameter in the model configuration.
                                                                          • \"shape\" must match the dims of the input parameter in the model configuration.
                                                                          • \"datatype\" must match the Data Type of the input parameter in the model configuration.
                                                                          • \"data\" should be replaced with the actual inference data.

                                                                          Please note that the above example code needs to be adjusted according to your specific model and environment. The format and content of the input data must also comply with the model's requirements.

                                                                          "},{"location":"en/admin/baize/developer/inference/vllm-inference.html","title":"Create Inference Service Using vLLM Framework","text":"

                                                                          AI Lab supports using vLLM as an inference service, offering all the capabilities of vLLM while fully adapting to the OpenAI interface definition.

                                                                          "},{"location":"en/admin/baize/developer/inference/vllm-inference.html#introduction-to-vllm","title":"Introduction to vLLM","text":"

                                                                          vLLM is a fast and easy-to-use library for inference and services. It aims to significantly improve the throughput and memory efficiency of language model services in real-time scenarios. vLLM boasts several features in terms of speed and flexibility:

                                                                          • Continuous batching of incoming requests.
                                                                          • Efficiently manages attention keys and values memory using PagedAttention.
                                                                          • Seamless integration with popular HuggingFace models.
                                                                          • Compatible with OpenAI's API server.
                                                                          "},{"location":"en/admin/baize/developer/inference/vllm-inference.html#prerequisites","title":"Prerequisites","text":"

                                                                          Prepare model data: Manage the model code in dataset management and ensure that the data is successfully preloaded.

                                                                          "},{"location":"en/admin/baize/developer/inference/vllm-inference.html#create-inference-service","title":"Create Inference Service","text":"
                                                                          1. Select the vLLM inference framework. In the model module selection, choose the pre-created model dataset hdd-models and fill in the path information where the model is located within the dataset.

                                                                            This guide uses the ChatGLM3 model for creating the inference service.

                                                                          2. Configure the resources for the inference service and adjust the parameters for running the inference service.

                                                                            Parameter Name Description GPU Resources Configure GPU resources for inference based on the model scale and cluster resources. Allow Remote Code Controls whether vLLM trusts and executes code from remote sources. LoRA LoRA is a parameter-efficient fine-tuning technique for deep learning models. It reduces the number of parameters and computational complexity by decomposing the original model parameter matrix into low-rank matrices. 1. --lora-modules: Specifies specific modules or layers for low-rank approximation. 2. max_loras_rank: Specifies the maximum rank for each adapter layer in the LoRA model. For simpler tasks, a smaller rank value can be chosen, while more complex tasks may require a larger rank value to ensure model performance. 3. max_loras: Indicates the maximum number of LoRA layers that can be included in the model, customized based on model size and inference complexity. 4. max_cpu_loras: Specifies the maximum number of LoRA layers that can be handled in a CPU environment. Associated Environment Selects predefined environment dependencies required for inference.

                                                                            Info

                                                                            For models that support LoRA parameters, refer to vLLM Supported Models.

                                                                          3. In the Advanced Configuration , support is provided for automated affinity scheduling based on GPU resources and other node configurations. Users can also customize scheduling policies.

                                                                          "},{"location":"en/admin/baize/developer/inference/vllm-inference.html#verify-inference-service","title":"Verify Inference Service","text":"

                                                                          Once the inference service is created, click the name of the inference service to enter the details and view the API call methods. Verify the execution results using Curl, Python, and Node.js.

                                                                          Copy the curl command from the details and execute it in the terminal to send a model inference request. The expected output should be:

                                                                          "},{"location":"en/admin/baize/developer/jobs/create.html","title":"Create Job","text":"

                                                                          Job management refers to the functionality of creating and managing job lifecycles through job scheduling and control components.

                                                                          AI platform Smart Computing Capability adopts Kubernetes' Job mechanism to schedule various AI inference and training jobs.

                                                                          1. Click Job Center -> Jobs in the left navigation bar to enter the job list. Click the Create button on the right.

                                                                          2. The system will pre-fill basic configuration data, including the cluster, namespace, type, queue, and priority. Adjust these parameters and click Next.

                                                                          3. Configure the URL, runtime parameters, and associated datasets, then click Next.

                                                                          4. Optionally add labels, annotations, runtime env variables, and other job parameters. Select a scheduling policy and click Confirm.

                                                                          5. After the job is successfully created, it will have several running statuses:

                                                                            • Running
                                                                            • Queued
                                                                            • Submission successful, Submission failed
                                                                            • Successful, Failed
                                                                          "},{"location":"en/admin/baize/developer/jobs/create.html#next-steps","title":"Next Steps","text":"
                                                                          • View Job Load
                                                                          • Delete Job
                                                                          "},{"location":"en/admin/baize/developer/jobs/delete.html","title":"Delete Job","text":"

                                                                          If you find a job to be redundant, expired, or no longer needed for any other reason, you can delete it from the job list.

                                                                          1. Click the \u2507 on the right side of the job in the job list, then choose Delete from the dropdown menu.

                                                                          2. In the pop-up window, confirm the job you want to delete, enter the job name, and then click Delete.

                                                                          3. A confirmation message will appear indicating successful deletion, and the job will disappear from the list.

                                                                          Caution

                                                                          Once a job is deleted, it cannot be recovered, so please proceed with caution.

                                                                          "},{"location":"en/admin/baize/developer/jobs/pytorch.html","title":"Pytorch Jobs","text":"

                                                                          Pytorch is an open-source deep learning framework that provides a flexible environment for training and deployment. A Pytorch job is a job that uses the Pytorch framework.

                                                                          In the AI Lab platform, we provide support and adaptation for Pytorch jobs. Through a graphical interface, you can quickly create Pytorch jobs and perform model training.

                                                                          "},{"location":"en/admin/baize/developer/jobs/pytorch.html#job-configuration","title":"Job Configuration","text":"
                                                                          • Job types support both Pytorch Single and Pytorch Distributed modes.
                                                                          • The runtime image already supports the Pytorch framework by default, so no additional installation is required.
                                                                          "},{"location":"en/admin/baize/developer/jobs/pytorch.html#job-runtime-environment","title":"Job Runtime Environment","text":"

                                                                          Here we use the baize-notebook base image and the associated environment as the basic runtime environment for the job.

                                                                          To learn how to create an environment, refer to Environments.

                                                                          "},{"location":"en/admin/baize/developer/jobs/pytorch.html#create-jobs","title":"Create Jobs","text":""},{"location":"en/admin/baize/developer/jobs/pytorch.html#pytorch-single-jobs","title":"Pytorch Single Jobs","text":"
                                                                          1. Log in to the AI Lab platform, click Job Center in the left navigation bar to enter the Jobs page.
                                                                          2. Click the Create button in the upper right corner to enter the job creation page.
                                                                          3. Select the job type as Pytorch Single and click Next .
                                                                          4. Fill in the job name and description, then click OK .
                                                                          "},{"location":"en/admin/baize/developer/jobs/pytorch.html#parameters","title":"Parameters","text":"
                                                                          • Start command: bash
                                                                          • Command parameters:
                                                                          import torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n# Define a simple neural network\nclass SimpleNet(nn.Module):\n    def __init__(self):\n        super(SimpleNet, self).__init__()\n        self.fc = nn.Linear(10, 1)\n\n    def forward(self, x):\n        return self.fc(x)\n\n# Create model, loss function, and optimizer\nmodel = SimpleNet()\ncriterion = nn.MSELoss()\noptimizer = optim.SGD(model.parameters(), lr=0.01)\n\n# Generate some random data\nx = torch.randn(100, 10)\ny = torch.randn(100, 1)\n\n# Train the model\nfor epoch in range(100):\n    # Forward pass\n    outputs = model(x)\n    loss = criterion(outputs, y)\n\n    # Backward pass and optimization\n    optimizer.zero_grad()\n    loss.backward()\n    optimizer.step()\n\n    if (epoch + 1) % 10 == 0:\n        print(f'Epoch [{epoch+1}/100], Loss: {loss.item():.4f}')\n\nprint('Training finished.')\n
                                                                          "},{"location":"en/admin/baize/developer/jobs/pytorch.html#results","title":"Results","text":"

                                                                          Once the job is successfully submitted, we can enter the job details to see the resource usage. From the upper right corner, go to Workload Details to view the log output during the training process.

                                                                          [HAMI-core Warn(1:140244541377408:utils.c:183)]: get default cuda from (null)\n[HAMI-core Msg(1:140244541377408:libvgpu.c:855)]: Initialized\nEpoch [10/100], Loss: 1.1248\nEpoch [20/100], Loss: 1.0486\nEpoch [30/100], Loss: 0.9969\nEpoch [40/100], Loss: 0.9611\nEpoch [50/100], Loss: 0.9360\nEpoch [60/100], Loss: 0.9182\nEpoch [70/100], Loss: 0.9053\nEpoch [80/100], Loss: 0.8960\nEpoch [90/100], Loss: 0.8891\nEpoch [100/100], Loss: 0.8841\nTraining finished.\n[HAMI-core Msg(1:140244541377408:multiprocess_memory_limit.c:468)]: Calling exit handler 1\n
                                                                          "},{"location":"en/admin/baize/developer/jobs/pytorch.html#pytorch-distributed-jobs","title":"Pytorch Distributed Jobs","text":"
                                                                          1. Log in to the AI Lab platform, click Job Center in the left navigation bar to enter the Jobs page.
                                                                          2. Click the Create button in the upper right corner to enter the job creation page.
                                                                          3. Select the job type as Pytorch Distributed and click Next.
                                                                          4. Fill in the job name and description, then click OK.
                                                                          "},{"location":"en/admin/baize/developer/jobs/pytorch.html#parameters_1","title":"Parameters","text":"
                                                                          • Start command: bash
                                                                          • Command parameters:
                                                                          import os\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nclass SimpleModel(nn.Module):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = nn.Linear(10, 1)\n\n    def forward(self, x):\n        return self.fc(x)\n\ndef train():\n    # Print environment information\n    print(f'PyTorch version: {torch.__version__}')\n    print(f'CUDA available: {torch.cuda.is_available()}')\n    if torch.cuda.is_available():\n        print(f'CUDA version: {torch.version.cuda}')\n        print(f'CUDA device count: {torch.cuda.device_count()}')\n\n    rank = int(os.environ.get('RANK', '0'))\n    world_size = int(os.environ.get('WORLD_SIZE', '1'))\n\n    print(f'Rank: {rank}, World Size: {world_size}')\n\n    # Initialize distributed environment\n    try:\n        if world_size > 1:\n            dist.init_process_group('nccl')\n            print('Distributed process group initialized successfully')\n        else:\n            print('Running in non-distributed mode')\n    except Exception as e:\n        print(f'Error initializing process group: {e}')\n        return\n\n    # Set device\n    try:\n        if torch.cuda.is_available():\n            device = torch.device(f'cuda:{rank % torch.cuda.device_count()}')\n            print(f'Using CUDA device: {device}')\n        else:\n            device = torch.device('cpu')\n            print('CUDA not available, using CPU')\n    except Exception as e:\n        print(f'Error setting device: {e}')\n        device = torch.device('cpu')\n        print('Falling back to CPU')\n\n    try:\n        model = SimpleModel().to(device)\n        print('Model moved to device successfully')\n    except Exception as e:\n        print(f'Error moving model to device: {e}')\n        return\n\n    try:\n        if world_size > 1:\n            ddp_model = DDP(model, device_ids=[rank % torch.cuda.device_count()] if torch.cuda.is_available() else None)\n            print('DDP model created successfully')\n        else:\n            ddp_model = model\n            print('Using non-distributed model')\n    except Exception as e:\n        print(f'Error creating DDP model: {e}')\n        return\n\n    loss_fn = nn.MSELoss()\n    optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)\n\n    # Generate some random data\n    try:\n        data = torch.randn(100, 10, device=device)\n        labels = torch.randn(100, 1, device=device)\n        print('Data generated and moved to device successfully')\n    except Exception as e:\n        print(f'Error generating or moving data to device: {e}')\n        return\n\n    for epoch in range(10):\n        try:\n            ddp_model.train()\n            outputs = ddp_model(data)\n            loss = loss_fn(outputs, labels)\n            optimizer.zero_grad()\n            loss.backward()\n            optimizer.step()\n\n            if rank == 0:\n                print(f'Epoch {epoch}, Loss: {loss.item():.4f}')\n        except Exception as e:\n            print(f'Error during training epoch {epoch}: {e}')\n            break\n\n    if world_size > 1:\n        dist.destroy_process_group()\n\nif __name__ == '__main__':\n    train()\n
                                                                          "},{"location":"en/admin/baize/developer/jobs/pytorch.html#number-of-job-replicas","title":"Number of Job Replicas","text":"

                                                                          Note that Pytorch Distributed training jobs will create a group of Master and Worker training Pods, where the Master is responsible for coordinating the training job, and the Worker is responsible for the actual training work.

                                                                          Note

                                                                          In this demonstration: Master replica count is 1, Worker replica count is 2; Therefore, we need to set the replica count to 3 in the Job Configuration , which is the sum of Master and Worker replica counts. Pytorch will automatically tune the roles of Master and Worker.

                                                                          "},{"location":"en/admin/baize/developer/jobs/pytorch.html#results_1","title":"Results","text":"

                                                                          Similarly, we can enter the job details to view the resource usage and the log output of each Pod.

                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorboard.html","title":"Job Analysis","text":"

                                                                          AI Lab provides important visualization analysis tools provided for the model development process, used to display the training process and results of machine learning models. This document will introduce the basic concepts of Job Analysis (Tensorboard), its usage in the AI Lab system, and how to configure the log content of datasets.

                                                                          Note

                                                                          Tensorboard is a visualization tool provided by TensorFlow, used to display the training process and results of machine learning models. It can help developers more intuitively understand the training dynamics of their models, analyze model performance, debug issues, and more.

                                                                          The role and advantages of Tensorboard in the model development process:

                                                                          • Visualize Training Process : Display metrics such as training and validation loss, and accuracy through charts, helping developers intuitively observe the training effects of the model.
                                                                          • Debug and Optimize Models : By viewing the weights and gradient distributions of different layers, help developers discover and fix issues in the model.
                                                                          • Compare Different Experiments : Simultaneously display the results of multiple experiments, making it convenient for developers to compare the effects of different models and hyperparameter configurations.
                                                                          • Track Training Data : Record the datasets and parameters used during training to ensure the reproducibility of experiments.
                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorboard.html#how-to-create-tensorboard","title":"How to Create Tensorboard","text":"

                                                                          In the AI Lab system, we provide a convenient way to create and manage Tensorboard. Here are the specific steps:

                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorboard.html#enable-tensorboard-when-creating-a-notebook","title":"Enable Tensorboard When Creating a Notebook","text":"
                                                                          1. Create a Notebook : Create a new Notebook on the AI Lab platform.
                                                                          2. Enable Tensorboard : On the Notebook creation page, enable the Tensorboard option and specify the dataset and log path.

                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorboard.html#enable-tensorboard-after-creating-and-completing-a-distributed-job","title":"Enable Tensorboard After Creating and Completing a Distributed Job","text":"
                                                                          1. Create a Distributed Job : Create a new distributed training job on the AI Lab platform.
                                                                          2. Configure Tensorboard : On the job configuration page, enable the Tensorboard option and specify the dataset and log path.
                                                                          3. View Tensorboard After Job Completion : After the job is completed, you can view the Tensorboard link on the job details page. Click the link to see the visualized results of the training process.

                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorboard.html#directly-reference-tensorboard-in-a-notebook","title":"Directly Reference Tensorboard in a Notebook","text":"

                                                                          In a Notebook, you can directly start Tensorboard through code. Here is a sample code snippet:

                                                                          # Import necessary libraries\nimport tensorflow as tf\nimport datetime\n\n# Define log directory\nlog_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n# Create Tensorboard callback\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n\n# Build and compile model\nmodel = tf.keras.models.Sequential([\n    tf.keras.layers.Flatten(input_shape=(28, 28)),\n    tf.keras.layers.Dense(512, activation='relu'),\n    tf.keras.layers.Dropout(0.2),\n    tf.keras.layers.Dense(10, activation='softmax')\n])\n\nmodel.compile(optimizer='adam',\n              loss='sparse_categorical_crossentropy',\n              metrics=['accuracy'])\n\n# Train model and enable Tensorboard callback\nmodel.fit(x_train, y_train, epochs=5, validation_data=(x_test, y_test), callbacks=[tensorboard_callback])\n
                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorboard.html#how-to-configure-dataset-log-content","title":"How to Configure Dataset Log Content","text":"

                                                                          When using Tensorboard, you can record and configure different datasets and log content. Here are some common configuration methods:

                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorboard.html#configure-training-and-validation-dataset-logs","title":"Configure Training and Validation Dataset Logs","text":"

                                                                          While training the model, you can use TensorFlow's tf.summary API to record logs for the training and validation datasets. Here is a sample code snippet:

                                                                          # Import necessary libraries\nimport tensorflow as tf\n\n# Create log directories\ntrain_log_dir = 'logs/gradient_tape/train'\nval_log_dir = 'logs/gradient_tape/val'\ntrain_summary_writer = tf.summary.create_file_writer(train_log_dir)\nval_summary_writer = tf.summary.create_file_writer(val_log_dir)\n\n# Train model and record logs\nfor epoch in range(EPOCHS):\n    for (x_train, y_train) in train_dataset:\n        # Training step\n        train_step(x_train, y_train)\n        with train_summary_writer.as_default():\n            tf.summary.scalar('loss', train_loss.result(), step=epoch)\n            tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)\n\n    for (x_val, y_val) in val_dataset:\n        # Validation step\n        val_step(x_val, y_val)\n        with val_summary_writer.as_default():\n            tf.summary.scalar('loss', val_loss.result(), step=epoch)\n            tf.summary.scalar('accuracy', val_accuracy.result(), step=epoch)\n
                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorboard.html#configure-custom-logs","title":"Configure Custom Logs","text":"

                                                                          In addition to logs for training and validation datasets, you can also record other custom log content such as learning rate and gradient distribution. Here is a sample code snippet:

                                                                          # Record custom logs\nwith train_summary_writer.as_default():\n    tf.summary.scalar('learning_rate', learning_rate, step=epoch)\n    tf.summary.histogram('gradients', gradients, step=epoch)\n
                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorboard.html#tensorboard-management","title":"Tensorboard Management","text":"

                                                                          In AI Lab, Tensorboards created through various methods are uniformly displayed on the job analysis page, making it convenient for users to view and manage.

                                                                          Users can view information such as the link, status, and creation time of Tensorboard on the job analysis page and directly access the visualized results of Tensorboard through the link.

                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorflow.html","title":"Tensorflow Jobs","text":"

                                                                          Tensorflow, along with Pytorch, is a highly active open-source deep learning framework that provides a flexible environment for training and deployment.

                                                                          AI Lab provides support and adaptation for the Tensorflow framework. You can quickly create Tensorflow jobs and conduct model training through graphical operations.

                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#job-configuration","title":"Job Configuration","text":"
                                                                          • The job types support both Tensorflow Single and Tensorflow Distributed modes.
                                                                          • The runtime image already supports the Tensorflow framework by default, so no additional installation is required.
                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#job-runtime-environment","title":"Job Runtime Environment","text":"

                                                                          Here, we use the baize-notebook base image and the associated environment as the basic runtime environment for jobs.

                                                                          For information on how to create an environment, refer to Environment List.

                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#creating-a-job","title":"Creating a Job","text":""},{"location":"en/admin/baize/developer/jobs/tensorflow.html#example-tfjob-single","title":"Example TFJob Single","text":"
                                                                          1. Log in to the AI Lab platform and click Job Center in the left navigation bar to enter the Jobs page.
                                                                          2. Click the Create button in the upper right corner to enter the job creation page.
                                                                          3. Select the job type as Tensorflow Single and click Next .
                                                                          4. Fill in the job name and description, then click OK .
                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#pre-warming-the-code-repository","title":"Pre-warming the Code Repository","text":"

                                                                          Use AI Lab -> Dataset List to create a dataset and pull the code from a remote GitHub repository into the dataset. This way, when creating a job, you can directly select the dataset and mount the code into the job.

                                                                          Demo code repository address: https://github.com/d-run/training-sample-code/

                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#parameters","title":"Parameters","text":"
                                                                          • Launch command: Use bash
                                                                          • Command parameters: Use python /code/tensorflow/tf-single.py
                                                                          \"\"\"\n  pip install tensorflow numpy\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\n# Create some random data\nx = np.random.rand(100, 1)\ny = 2 * x + 1 + np.random.rand(100, 1) * 0.1\n\n# Create a simple model\nmodel = tf.keras.Sequential([\n    tf.keras.layers.Dense(1, input_shape=(1,))\n])\n\n# Compile the model\nmodel.compile(optimizer='adam', loss='mse')\n\n# Train the model, setting epochs to 10\nhistory = model.fit(x, y, epochs=10, verbose=1)\n\n# Print the final loss\nprint('Final loss: {' + str(history.history['loss'][-1]) +'}')\n\n# Use the model to make predictions\ntest_x = np.array([[0.5]])\nprediction = model.predict(test_x)\nprint(f'Prediction for x=0.5: {prediction[0][0]}')\n
                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#results","title":"Results","text":"

                                                                          After the job is successfully submitted, you can enter the job details to see the resource usage. From the upper right corner, navigate to Workload Details to view log outputs during the training process.

                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#tfjob-distributed-job","title":"TFJob Distributed Job","text":"
                                                                          1. Log in to AI Lab and click Job Center in the left navigation bar to enter the Jobs page.
                                                                          2. Click the Create button in the upper right corner to enter the job creation page.
                                                                          3. Select the job type as Tensorflow Distributed and click Next.
                                                                          4. Fill in the job name and description, then click OK.
                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#example-job-introduction","title":"Example Job Introduction","text":"

                                                                          This job includes three roles: Chief, Worker, and Parameter Server (PS).

                                                                          • Chief: Responsible for coordinating the training process and saving model checkpoints.
                                                                          • Worker: Executes the actual model training.
                                                                          • PS: Used in asynchronous training to store and update model parameters.

                                                                          Different resources are allocated to different roles. Chief and Worker use GPUs, while PS uses CPUs and larger memory.

                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#parameters_1","title":"Parameters","text":"
                                                                          • Launch command: Use bash
                                                                          • Command parameters: Use python /code/tensorflow/tensorflow-distributed.py
                                                                          import os\nimport json\nimport tensorflow as tf\n\nclass SimpleModel(tf.keras.Model):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = tf.keras.layers.Dense(1, input_shape=(10,))\n\n    def call(self, x):\n        return self.fc(x)\n\ndef train():\n    # Print environment information\n    print(f\"TensorFlow version: {tf.__version__}\")\n    print(f\"GPU available: {tf.test.is_gpu_available()}\")\n    if tf.test.is_gpu_available():\n        print(f\"GPU device count: {len(tf.config.list_physical_devices('GPU'))}\")\n\n    # Retrieve distributed training information\n    tf_config = json.loads(os.environ.get('TF_CONFIG') or '{}')\n    job_type = tf_config.get('job', {}).get('type')\n    job_id = tf_config.get('job', {}).get('index')\n\n    print(f\"Job type: {job_type}, Job ID: {job_id}\")\n\n    # Set up distributed strategy\n    strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()\n\n    with strategy.scope():\n        model = SimpleModel()\n        loss_fn = tf.keras.losses.MeanSquaredError()\n        optimizer = tf.keras.optimizers.SGD(learning_rate=0.001)\n\n    # Generate some random data\n    data = tf.random.normal((100, 10))\n    labels = tf.random.normal((100, 1))\n\n    @tf.function\n    def train_step(inputs, labels):\n        with tf.GradientTape() as tape:\n            predictions = model(inputs)\n            loss = loss_fn(labels, predictions)\n        gradients = tape.gradient(loss, model.trainable_variables)\n        optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n        return loss\n\n    for epoch in range(10):\n        loss = train_step(data, labels)\n        if job_type == 'chief':\n            print(f'Epoch {epoch}, Loss: {loss.numpy():.4f}')\n\nif __name__ == '__main__':\n    train()\n
                                                                          "},{"location":"en/admin/baize/developer/jobs/tensorflow.html#results_1","title":"Results","text":"

                                                                          Similarly, you can enter the job details to view the resource usage and log outputs of each Pod.

                                                                          "},{"location":"en/admin/baize/developer/jobs/view.html","title":"View Job Workloads","text":"

                                                                          Once a job is created, it will be displayed in the job list.

                                                                          1. In the job list, click the \u2507 on the right side of a job and select Job Workload Details .

                                                                          2. A pop-up window will appear asking you to choose which Pod to view. Click Enter .

                                                                          3. You will be redirected to the container management interface, where you can view the container\u2019s working status, labels and annotations, and any events that have occurred.

                                                                          4. You can also view detailed logs of the current Pod for the recent period. By default, 100 lines of logs are displayed. To view more detailed logs or to download logs, click the blue Insight text at the top.

                                                                          5. Additionally, you can use the ... in the upper right corner to view the current Pod's YAML, and to upload or download files. Below is an example of a Pod's YAML.

                                                                          kind: Pod\napiVersion: v1\nmetadata:\n  name: neko-tensorboard-job-test-202404181843-skxivllb-worker-0\n  namespace: default\n  uid: ddedb6ff-c278-47eb-ae1e-0de9b7c62f8c\n  resourceVersion: '41092552'\n  creationTimestamp: '2024-04-18T10:43:36Z'\n  labels:\n    training.kubeflow.org/job-name: neko-tensorboard-job-test-202404181843-skxivllb\n    training.kubeflow.org/operator-name: pytorchjob-controller\n    training.kubeflow.org/replica-index: '0'\n    training.kubeflow.org/replica-type: worker\n  annotations:\n    cni.projectcalico.org/containerID: 0cfbb9af257d5e69027c603c6cb2d3890a17c4ae1a145748d5aef73a10d7fbe1\n    cni.projectcalico.org/podIP: ''\n    cni.projectcalico.org/podIPs: ''\n    hami.io/bind-phase: success\n    hami.io/bind-time: '1713437016'\n    hami.io/vgpu-devices-allocated: GPU-29d5fa0d-935b-2966-aff8-483a174d61d1,NVIDIA,1024,20:;\n    hami.io/vgpu-devices-to-allocate: ;\n    hami.io/vgpu-node: worker-a800-1\n    hami.io/vgpu-time: '1713437016'\n    k8s.v1.cni.cncf.io/network-status: |-\n      [{\n          \"name\": \"kube-system/calico\",\n          \"ips\": [\n              \"10.233.97.184\"\n          ],\n          \"default\": true,\n          \"dns\": {}\n      }]\n    k8s.v1.cni.cncf.io/networks-status: |-\n      [{\n          \"name\": \"kube-system/calico\",\n          \"ips\": [\n              \"10.233.97.184\"\n          ],\n          \"default\": true,\n          \"dns\": {}\n      }]\n  ownerReferences:\n    - apiVersion: kubeflow.org/v1\n      kind: PyTorchJob\n      name: neko-tensorboard-job-test-202404181843-skxivllb\n      uid: e5a8b05d-1f03-4717-8e1c-4ec928014b7b\n      controller: true\n      blockOwnerDeletion: true\nspec:\n  volumes:\n    - name: 0-dataset-pytorch-examples\n      persistentVolumeClaim:\n        claimName: pytorch-examples\n    - name: kube-api-access-wh9rh\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n  containers:\n    - name: pytorch\n      image: m.daocloud.io/docker.io/pytorch/pytorch\n      command:\n        - bash\n      args:\n        - '-c'\n        - >-\n          ls -la /root && which pip && pip install pytorch_lightning tensorboard\n          && python /root/Git/pytorch/examples/mnist/main.py\n      ports:\n        - name: pytorchjob-port\n          containerPort: 23456\n          protocol: TCP\n      env:\n        - name: PYTHONUNBUFFERED\n          value: '1'\n        - name: PET_NNODES\n          value: '1'\n      resources:\n        limits:\n          cpu: '4'\n          memory: 8Gi\n          nvidia.com/gpucores: '20'\n          nvidia.com/gpumem: '1024'\n          nvidia.com/vgpu: '1'\n        requests:\n          cpu: '4'\n          memory: 8Gi\n          nvidia.com/gpucores: '20'\n          nvidia.com/gpumem: '1024'\n          nvidia.com/vgpu: '1'\n      volumeMounts:\n        - name: 0-dataset-pytorch-examples\n          mountPath: /root/Git/pytorch/examples\n        - name: kube-api-access-wh9rh\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  restartPolicy: Never\n  terminationGracePeriodSeconds: 30\n  dnsPolicy: ClusterFirst\n  serviceAccountName: default\n  serviceAccount: default\n  nodeName: worker-a800-1\n  securityContext: {}\n  affinity: {}\n  schedulerName: hami-scheduler\n  tolerations:\n    - key: node.kubernetes.io/not-ready\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n    - key: node.kubernetes.io/unreachable\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n  priorityClassName: baize-high-priority\n  priority: 100000\n  enableServiceLinks: true\n  preemptionPolicy: PreemptLowerPriority\nstatus:\n  phase: Succeeded\n  conditions:\n    - type: Initialized\n      status: 'True'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:43:36Z'\n      reason: PodCompleted\n    - type: Ready\n      status: 'False'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:46:34Z'\n      reason: PodCompleted\n    - type: ContainersReady\n      status: 'False'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:46:34Z'\n      reason: PodCompleted\n    - type: PodScheduled\n      status: 'True'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:43:36Z'\n  hostIP: 10.20.100.211\n  podIP: 10.233.97.184\n  podIPs:\n    - ip: 10.233.97.184\n  startTime: '2024-04-18T10:43:36Z'\n  containerStatuses:\n    - name: pytorch\n      state:\n        terminated:\n          exitCode: 0\n          reason: Completed\n          startedAt: '2024-04-18T10:43:39Z'\n          finishedAt: '2024-04-18T10:46:34Z'\n          containerID: >-\n            containerd://09010214bcf3315e81d38fba50de3943c9d2b48f50a6cc2e83f8ef0e5c6eeec1\n      lastState: {}\n      ready: false\n      restartCount: 0\n      image: m.daocloud.io/docker.io/pytorch/pytorch:latest\n      imageID: >-\n        m.daocloud.io/docker.io/pytorch/pytorch@sha256:11691e035a3651d25a87116b4f6adc113a27a29d8f5a6a583f8569e0ee5ff897\n      containerID: >-\n        containerd://09010214bcf3315e81d38fba50de3943c9d2b48f50a6cc2e83f8ef0e5c6eeec1\n      started: false\n  qosClass: Guaranteed\n
                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html","title":"baizectl CLI Usage Guide","text":"

                                                                          baizectl is a command line tool specifically designed for model developers and data scientists within the AI Lab module. It provides a series of commands to help users manage distributed training jobs, check job statuses, manage datasets, and more. It also supports connecting to Kubernetes worker clusters and AI platform workspaces, aiding users in efficiently using and managing Kubernetes platform resources.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#installation","title":"Installation","text":"

                                                                          Currently, baizectl is integrated within AI Lab. Once you create a Notebook, you can directly use baizectl within it.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#getting-started","title":"Getting Started","text":""},{"location":"en/admin/baize/developer/notebooks/baizectl.html#basic-information","title":"Basic Information","text":"

                                                                          The basic format of the baizectl command is as follows:

                                                                          jovyan@19d0197587cc:/$ baizectl\nAI platform management tool\n\nUsage:\n  baizectl [command]\n\nAvailable Commands:\n  completion  Generate the autocompletion script for the specified shell\n  data        Management datasets\n  help        Help about any command\n  job         Manage jobs\n  login       Login to the platform\n  version     Show cli version\n\nFlags:\n      --cluster string     Cluster name to operate\n  -h, --help               help for baizectl\n      --mode string        Connection mode: auto, api, notebook (default \"auto\")\n  -n, --namespace string   Namespace to use for the operation. If not set, the default Namespace will be used.\n  -s, --server string      access base url\n      --skip-tls-verify    Skip TLS certificate verification\n      --token string       access token\n  -w, --workspace int32    Workspace ID to use for the operation\n\nUse \"baizectl [command] --help\" for more information about a command.\n

                                                                          The above provides basic information about baizectl. Users can view the help information using baizectl --help, or view the help information for specific commands using baizectl [command] --help.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#view-versions","title":"View Versions","text":"

                                                                          baizectl supports viewing version information using the version command.

                                                                          (base) jovyan@den-0:~$ baizectl version \nbaizectl version: v0.5.0, commit sha: ac0837c4\n
                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#command-format","title":"Command Format","text":"

                                                                          The basic format of the baizectl command is as follows:

                                                                          baizectl [command] [flags]\n

                                                                          Here, [command] refers to the specific operation command, such as data and job, and [flags] are optional parameters used to specify detailed information about the operation.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#common-options","title":"Common Options","text":"
                                                                          • --cluster string: Specify the name of the cluster to operate on.
                                                                          • -h, --help: Display help information.
                                                                          • --mode string: Connection mode, optional values are auto, api, notebook (default value is auto).
                                                                          • -n, --namespace string: Specify the namespace for the operation. If not set, the default namespace will be used.
                                                                          • -s, --server string: Base URL
                                                                          • --skip-tls-verify: Skip TLS certificate verification.
                                                                          • --token string: Access token
                                                                          • -w, --workspace int32: Specify the workspace ID for the operation.
                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#features","title":"Features","text":""},{"location":"en/admin/baize/developer/notebooks/baizectl.html#job-management","title":"Job Management","text":"

                                                                          baizectl provides a series of commands to manage distributed training jobs, including viewing job lists, submitting jobs, viewing logs, restarting jobs, deleting jobs, and more.

                                                                          jovyan@19d0197587cc:/$ baizectl job\nManage jobs\n\nUsage:\n  baizectl job [command]\n\nAvailable Commands:\n  delete      Delete a job\n  logs        Show logs of a job\n  ls          List jobs\n  restart     restart a job\n  submit      Submit a job\n\nFlags:\n  -h, --help            help for job\n  -o, --output string   Output format. One of: table, json, yaml (default \"table\")\n      --page int        Page number (default 1)\n      --page-size int   Page size (default -1)\n      --search string   Search query\n      --sort string     Sort order\n      --truncate int    Truncate output to the given length, 0 means no truncation (default 50)\n\nUse \"baizectl job [command] --help\" for more information about a command.\n
                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#submit-training-jobs","title":"Submit Training Jobs","text":"

                                                                          baizectl supports submitting a job using the submit command. You can view detailed information by using baizectl job submit --help.

                                                                          (base) jovyan@den-0:~$ baizectl job submit --help\nSubmit a job\n\nUsage:\n  baizectl job submit [flags] -- command ...\n\nAliases:\n  submit, create\n\nExamples:\n# Submit a job to run the command \"torchrun python train.py\"\nbaizectl job submit -- torchrun python train.py\n# Submit a job with 2 workers(each pod use 4 gpus) to run the command \"torchrun python train.py\" and use the image \"pytorch/pytorch:1.8.1-cuda11.1-cudnn8-runtime\"\nbaizectl job submit --image pytorch/pytorch:1.8.1-cuda11.1-cudnn8-runtime --workers 2 --resources nvidia.com/gpu=4 -- torchrun python train.py\n# Submit a tensorflow job to run the command \"python train.py\"\nbaizectl job submit --tensorflow -- python train.py\n\n\nFlags:\n      --annotations stringArray                       The annotations of the job, the format is key=value\n      --auto-load-env                                 It only takes effect when executed in Notebook, the environment variables of the current environment will be automatically read and set to the environment variables of the Job, the specific environment variables to be read can be specified using the BAIZE_MAPPING_ENVS environment variable, the default is PATH,CONDA_*,*PYTHON*,NCCL_*, if set to false, the environment variables of the current environment will not be read. (default true)\n      --commands stringArray                          The default command of the job\n  -d, --datasets stringArray                          The dataset bind to the job, the format is datasetName:mountPath, e.g. mnist:/data/mnist\n  -e, --envs stringArray                              The environment variables of the job, the format is key=value\n  -x, --from-notebook string                          Define whether to read the configuration of the current Notebook and directly create tasks, including images, resources, and dataset.\n                                                      auto: Automatically determine the mode according to the current environment. If the current environment is a Notebook, it will be set to notebook mode.\n                                                      false: Do not read the configuration of the current Notebook.\n                                                      true: Read the configuration of the current Notebook. (default \"auto\")\n  -h, --help                                          help for submit\n      --image string                                  The image of the job, it must be specified if fromNotebook is false.\n  -t, --job-type string                               Job type: PYTORCH, TENSORFLOW, PADDLE (default \"PYTORCH\")\n      --labels stringArray                            The labels of the job, the format is key=value\n      --max-retries int32                             number of retries before marking this job failed\n      --max-run-duration int                          Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it\n      --name string                                   The name of the job, if empty, the name will be generated automatically.\n      --paddle                                        PaddlePaddle Job, has higher priority than --job-type\n      --priority string                               The priority of the job, current support baize-medium-priority, baize-low-priority, baize-high-priority\n      --pvcs stringArray                              The pvcs bind to the job, the format is pvcName:mountPath, e.g. mnist:/data/mnist\n      --pytorch                                       Pytorch Job, has higher priority than --job-type\n      --queue string                                  The queue to used\n      --requests-resources stringArray                Similar to resources, but sets the resources of requests\n      --resources stringArray                         The resources of the job, it is a string in the format of cpu=1,memory=1Gi,nvidia.com/gpu=1, it will be set to the limits and requests of the container.\n      --restart-policy string                         The job restart policy (default \"on-failure\")\n      --runtime-envs baizectl data ls --runtime-env   The runtime environment to use for the job, you can use baizectl data ls --runtime-env to get the runtime environment\n      --shm-size int32                                The shared memory size of the job, default is 0, which means no shared memory, if set to more than 0, the job will use the shared memory, the unit is MiB\n      --tensorboard-log-dir string                    The tensorboard log directory, if set, the job will automatically start tensorboard, else not. The format is /path/to/log, you can use relative path in notebook.\n      --tensorflow                                    Tensorflow Job, has higher priority than --job-type\n      --workers int                                   The workers of the job, default is 1, which means single worker, if set to more than 1, the job will be distributed. (default 1)\n      --working-dir string                            The working directory of job container, if in notebook mode, the default is the directory of the current file\n

                                                                          Note

                                                                          Explanation of command parameters for submitting jobs:

                                                                          • --name: Job name. If empty, it will be auto-generated.
                                                                          • --image: Image name. This must be specified.
                                                                          • --priority: Job priority, supporting high=baize-high-priority, medium=baize-medium-priority, low=baize-low-priority.
                                                                          • --resources: Job resources, formatted as cpu=1 memory=1Gi,nvidia.com/gpu=1.
                                                                          • --workers: Number of job worker nodes. The default is 1. When set to greater than 1, the job will run in a distributed manner.
                                                                          • --queue: Job queue. Queue resources need to be created in advance.
                                                                          • --working-dir: Working directory. In Notebook mode, the current file directory will be used by default.
                                                                          • --datasets: Dataset, formatted as datasetName:mountPath, for example mnist:/data/mnist.
                                                                          • --shm-size: Shared memory size. This can be enabled for distributed training jobs, indicating the use of shared memory, with units in MiB.
                                                                          • --labels: Job labels, formatted as key=value.
                                                                          • --max-retries: Maximum retry count. The number of times to retry the job upon failure. The job will restart upon failure. Default is unlimited.
                                                                          • --max-run-duration: Maximum run duration. The job will be terminated by the system if it exceeds the specified run time. Default is unlimited.
                                                                          • --restart-policy: Restart policy, supporting on-failure, never, always. The default is on-failure.
                                                                          • --from-notebook: Whether to read configurations from the Notebook. Supports auto, true, false, with the default being auto.
                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#example-of-a-pytorch-single-node-job","title":"Example of a PyTorch Single-Node Job","text":"

                                                                          Example of submitting a training job. Users can modify parameters based on their actual needs. Below is an example of creating a PyTorch job:

                                                                          baizectl job submit --name demojob-v2 -t PYTORCH \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --resources cpu=1,memory=1Gi \\\n    --workers 1 \\\n    --queue default \\\n    --working-dir /data \\\n    --datasets fashion-mnist:/data/mnist \\\n    --labels job_type=pytorch \\\n    --max-retries 3 \\\n    --max-run-duration 60 \\\n    --restart-policy on-failure \\\n    -- sleep 1000\n
                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#pytorch","title":"PyTorch \u5206\u5e03\u5f0f\u4efb\u52a1\u793a\u4f8b","text":"

                                                                          \u63d0\u4ea4\u8bad\u7ec3\u4efb\u52a1\u793a\u4f8b\uff0c\u7528\u6237\u53ef\u4ee5\u6839\u636e\u5b9e\u9645\u9700\u6c42\u4fee\u6539\u53c2\u6570\uff0c\u4ee5\u4e0b\u4e3a\u521b\u5efa\u4e00\u4e2a PyTorch \u4efb\u52a1\u7684\u793a\u4f8b\uff1a

                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#example-of-a-distributed-pytorch-job","title":"Example of a Distributed PyTorch Job","text":"

                                                                          Example of submitting a training job. You can modify parameters based on their actual needs. Below is an example of creating a distributed PyTorch job:

                                                                          baizectl job submit --name demojob-v2 -t PYTORCH \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --resources cpu=1,memory=1Gi \\\n    --workers 2 \\   # Multiple job replicas will automatically create a distributed job.\n    --shm-size 1024 \\\n    --queue default \\\n    --working-dir /data \\\n    --datasets fashion-mnist:/data/mnist \\\n    --labels job_type=pytorch \\\n    --max-retries 3 \\\n    --max-run-duration 60 \\\n    --restart-policy on-failure \\\n    -- sleep 1000\n
                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#example-of-a-tensorflow-job","title":"Example of a TensorFlow Job","text":"

                                                                          Use the -t parameter to specify the job type. Below is an example of creating a TensorFlow job:

                                                                          baizectl job submit --name demojob-v2 -t TENSORFLOW \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --from-notebook auto \\\n    --workers 1 \\\n    --queue default \\\n    --working-dir /data \\\n    --datasets fashion-mnist:/data/mnist \\\n    --labels job_type=pytorch \\\n    --max-retries 3 \\\n    --max-run-duration 60 \\\n    --restart-policy on-failure \\\n    -- sleep 1000\n

                                                                          You can also use the --job-type or --tensorflow parameter to specify the job type.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#example-of-a-paddle-job","title":"Example of a Paddle Job","text":"
                                                                          baizectl job submit --name demojob-v2 -t PADDLE \\\n    --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --priority baize-high-priority \\\n    --queue default \\\n    --working-dir /data \\\n    --datasets fashion-mnist:/data/mnist \\\n    --labels job_type=pytorch \\\n    --max-retries 3 \\\n    --max-run-duration 60 \\\n    --restart-policy on-failure \\\n    -- sleep 1000\n
                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#view-job-list","title":"View Job List","text":"

                                                                          baizectl job supports viewing the job list using the ls command. By default, it displays pytorch jobs, but users can specify the job type using the -t parameter.

                                                                          (base) jovyan@den-0:~$ baizectl job ls  # View pytorch jobs by default\n NAME        TYPE     PHASE      DURATION  COMMAND    \n demong      PYTORCH  SUCCEEDED  1m2s      sleep 60   \n demo-sleep  PYTORCH  RUNNING    1h25m28s  sleep 7200 \n(base) jovyan@den-0:~$ baizectl job ls demo-sleep  # View a specific job\n NAME        TYPE     PHASE      DURATION  COMMAND     \n demo-sleep  PYTORCH  RUNNING    1h25m28s  sleep 7200 \n(base) jovyan@den-0:~$ baizectl job ls -t TENSORFLOW   # View tensorflow jobs\n NAME       TYPE        PHASE    DURATION  COMMAND    \n demotfjob  TENSORFLOW  CREATED  0s        sleep 1000 \n

                                                                          The job list uses table as the default display format. If you want to view more information, you can use the json or yaml format, which can be specified using the -o parameter.

                                                                          (base) jovyan@den-0:~$ baizectl job ls -t TENSORFLOW -o yaml\n- baseConfig:\n    args:\n    - sleep\n    - \"1000\"\n    image: release.daocloud.io/baize/baize-notebook:v0.5.0\n    labels:\n      app: den\n    podConfig:\n      affinity: {}\n      kubeEnvs:\n      - name: CONDA_EXE\n        value: /opt/conda/bin/conda\n      - name: CONDA_PREFIX\n        value: /opt/conda\n      - name: CONDA_PROMPT_MODIFIER\n        value: '(base) '\n      - name: CONDA_SHLVL\n        value: \"1\"\n      - name: CONDA_DIR\n        value: /opt/conda\n      - name: CONDA_PYTHON_EXE\n        value: /opt/conda/bin/python\n      - name: CONDA_PYTHON_EXE\n        value: /opt/conda/bin/python\n      - name: CONDA_DEFAULT_ENV\n        value: base\n      - name: PATH\n        value: /opt/conda/bin:/opt/conda/condabin:/command:/opt/conda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\n      priorityClass: baize-high-priority\n      queue: default\n  creationTimestamp: \"2024-06-16T07:47:27Z\"\n  jobSpec:\n    runPolicy:\n      suspend: true\n    tfReplicaSpecs:\n      Worker:\n        replicas: 1\n        restartPolicy: OnFailure\n        template:\n          metadata:\n            creationTimestamp: null\n          spec:\n            affinity: {}\n            containers:\n            - args:\n              - sleep\n              - \"1000\"\n              env:\n              - name: CONDA_EXE\n                value: /opt/conda/bin/conda\n              - name: CONDA_PREFIX\n                value: /opt/conda\n              - name: CONDA_PROMPT_MODIFIER\n                value: '(base) '\n              - name: CONDA_SHLVL\n                value: \"1\"\n              - name: CONDA_DIR\n                value: /opt/conda\n              - name: CONDA_PYTHON_EXE\n                value: /opt/conda/bin/python\n              - name: CONDA_PYTHON_EXE\n                value: /opt/conda/bin/python\n              - name: CONDA_DEFAULT_ENV\n                value: base\n              - name: PATH\n                value: /opt/conda/bin:/opt/conda/condabin:/command:/opt/conda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\n              image: release.daocloud.io/baize/baize-notebook:v0.5.0\n              name: tensorflow\n              resources:\n                limits:\n                  memory: 1Gi\n                requests:\n                  cpu: \"1\"\n                  memory: 2Gi\n              workingDir: /home/jovyan\n            priorityClassName: baize-high-priority\n  name: demotfjob\n  namespace: ns-chuanjia-ndx\n  phase: CREATED\n  roleConfig:\n    TF_WORKER:\n      replicas: 1\n      resources:\n        limits:\n          memory: 1Gi\n        requests:\n          cpu: \"1\"\n          memory: 2Gi\n  totalResources:\n    limits:\n      memory: \"1073741824\"\n    requests:\n      cpu: \"1\"\n      memory: \"2147483648\"\n  trainingConfig:\n    restartPolicy: RESTART_POLICY_ON_FAILURE\n  trainingMode: SINGLE\n  type: TENSORFLOW\n
                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#view-job-logs","title":"View Job Logs","text":"

                                                                          baizectl job supports viewing job logs using the logs command. You can view detailed information by using baizectl job logs --help.

                                                                          (base) jovyan@den-0:~$ baizectl job logs --help\nShow logs of a job\n\nUsage:\n  baizectl job logs <job-name> [pod-name] [flags]\n\nAliases:\n  logs, log\n\nFlags:\n  -f, --follow            Specify if the logs should be streamed.\n  -h, --help              help for logs\n  -t, --job-type string   Job type: PYTORCH, TENSORFLOW, PADDLE (default \"PYTORCH\")\n      --paddle            PaddlePaddle Job, has higher priority than --job-type\n      --pytorch           Pytorch Job, has higher priority than --job-type\n      --tail int          Lines of recent log file to display.\n      --tensorflow        Tensorflow Job, has higher priority than --job-type\n      --timestamps        Show timestamps\n

                                                                          Note

                                                                          • The --follow parameter allows for real-time log viewing.
                                                                          • The --tail parameter specifies the number of log lines to view, with a default of 50 lines.
                                                                          • The --timestamps parameter displays timestamps.

                                                                          Example of viewing job logs:

                                                                          (base) jovyan@den-0:~$ baizectl job log -t TENSORFLOW tf-sample-job-v2-202406161632-evgrbrhn -f\n2024-06-16 08:33:06.083766: I tensorflow/core/util/port.cc:110] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n2024-06-16 08:33:06.086189: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used.\n2024-06-16 08:33:06.132416: I tensorflow/tsl/cuda/cudart_stub.cc:28] Could not find cuda drivers on your machine, GPU will not be used.\n2024-06-16 08:33:06.132903: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\nTo enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n2024-06-16 08:33:07.223046: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\nModel: \"sequential\"\n_________________________________________________________________\n Layer (type)                Output Shape              Param #   \n=================================================================\n Conv1 (Conv2D)              (None, 13, 13, 8)         80        \n\n flatten (Flatten)           (None, 1352)              0         \n\n Softmax (Dense)             (None, 10)                13530     \n\n=================================================================\nTotal params: 13610 (53.16 KB)\nTrainable params: 13610 (53.16 KB)\nNon-trainable params: 0 (0.00 Byte)\n...\n
                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#delete-jobs","title":"Delete Jobs","text":"

                                                                          baizectl job supports deleting jobs using the delete command and also supports deleting multiple jobs simultaneously.

                                                                          (base) jovyan@den-0:~$ baizectl job delete --help\nDelete a job\n\nUsage:\n  baizectl job delete [flags]\n\nAliases:\n  delete, del, remove, rm\n\nFlags:\n  -h, --help              help for delete\n  -t, --job-type string   Job type: PYTORCH, TENSORFLOW, PADDLE (default \"PYTORCH\")\n      --paddle            PaddlePaddle Job, has higher priority than --job-type\n      --pytorch           Pytorch Job, has higher priority than --job-type\n      --tensorflow        Tensorflow Job, has higher priority than --job-type\n

                                                                          Here is an example to delete jobs:

                                                                          (base) jovyan@den-0:~$ baizectl job ls\n NAME        TYPE     PHASE      DURATION  COMMAND    \n demong      PYTORCH  SUCCEEDED  1m2s      sleep 60   \n demo-sleep  PYTORCH  RUNNING    1h20m51s  sleep 7200 \n demojob     PYTORCH  FAILED     16m46s    sleep 1000 \n demojob-v2  PYTORCH  RUNNING    3m13s     sleep 1000 \n demojob-v3  PYTORCH  CREATED    0s        sleep 1000 \n(base) jovyan@den-0:~$ baizectl job delete demojob      # delete a job\nDelete job demojob in ns-chuanjia-ndx successfully\n(base) jovyan@den-0:~$ baizectl job delete demojob-v2 demojob-v3     # delete several jobs\nDelete job demojob-v2 in ns-chuanjia-ndx successfully\nDelete job demojob-v3 in ns-chuanjia-ndx successfully\n
                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#restart-jobs","title":"Restart Jobs","text":"

                                                                          baizectl job supports restarting jobs using the restart command. You can view detailed information by using baizectl job restart --help.

                                                                          (base) jovyan@den-0:~$ baizectl job restart --help\nrestart a job\n\nUsage:\n  baizectl job restart [flags] job\n\nAliases:\n  restart, rerun\n\nFlags:\n  -h, --help              help for restart\n  -t, --job-type string   Job type: PYTORCH, TENSORFLOW, PADDLE (default \"PYTORCH\")\n      --paddle            PaddlePaddle Job, has higher priority than --job-type\n      --pytorch           Pytorch Job, has higher priority than --job-type\n      --tensorflow        Tensorflow Job, has higher priority than --job-type\n
                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#dataset-management","title":"Dataset Management","text":"

                                                                          baizectl supports managing datasets. Currently, it supports viewing the dataset list, making it convenient to quickly bind datasets during job training.

                                                                          (base) jovyan@den-0:~$ baizectl data \nManagement datasets\n\nUsage:\n  baizectl data [flags]\n  baizectl data [command]\n\nAliases:\n  data, dataset, datasets, envs, runtime-envs\n\nAvailable Commands:\n  ls          List datasets\n\nFlags:\n  -h, --help            help for data\n  -o, --output string   Output format. One of: table, json, yaml (default \"table\")\n      --page int        Page number (default 1)\n      --page-size int   Page size (default -1)\n      --search string   Search query\n      --sort string     Sort order\n      --truncate int    Truncate output to the given length, 0 means no truncation (default 50)\n\nUse \"baizectl data [command] --help\" for more information about a command.\n
                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#view-datasets","title":"View Datasets","text":"

                                                                          baizectl data supports viewing the datasets using the ls command. By default, it displays in table format, but users can specify the output format using the -o parameter.

                                                                          (base) jovyan@den-0:~$ baizectl data ls\n NAME             TYPE  URI                                                    PHASE \n fashion-mnist    GIT   https://gitee.com/samzong_lu/fashion-mnist.git         READY \n sample-code      GIT   https://gitee.com/samzong_lu/training-sample-code....  READY \n training-output  PVC   pvc://training-output                                  READY \n

                                                                          When submitting a training job, you can specify the dataset using the -d or --datasets parameter, for example:

                                                                          baizectl job submit --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --datasets sample-code:/home/jovyan/code \\\n    -- sleep 1000\n

                                                                          To mount multiple datasets simultaneously, you can use the following format:

                                                                          baizectl job submit --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --datasets sample-code:/home/jovyan/code fashion-mnist:/home/jovyan/data \\\n    -- sleep 1000\n
                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#view-dependencies-environment","title":"View Dependencies (Environment)","text":"

                                                                          The environment runtime-env is a unique environment management capability of Suanova. By decoupling the dependencies required for model development, training tasks, and inference, it offers a more flexible way to manage dependencies without the need to repeatedly build complex Docker images. You simply need to select the appropriate environment.

                                                                          Additionally, runtime-env supports hot updates and dynamic upgrades, allowing you to update environment dependencies without rebuilding the image.

                                                                          baizectl data supports viewing the environment list using the runtime-env command. By default, it displays in table format, but users can specify the output format using the -o parameter.

                                                                          (base) jovyan@den-0:~$ baizectl data ls --runtime-env \n NAME               TYPE   URI                                                    PHASE      \n fashion-mnist      GIT    https://gitee.com/samzong_lu/fashion-mnist.git         READY      \n sample-code        GIT    https://gitee.com/samzong_lu/training-sample-code....  READY      \n training-output    PVC    pvc://training-output                                  READY      \n tensorflow-sample  CONDA  conda://python?version=3.12.3                          PROCESSING \n

                                                                          When submitting a training job, you can specify the environment using the --runtime-env parameter:

                                                                          baizectl job submit --image release.daocloud.io/baize/baize-notebook:v0.5.0 \\\n    --runtime-env tensorflow-sample \\\n    -- sleep 1000\n
                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#advanced-usage","title":"Advanced Usage","text":"

                                                                          baizectl supports more advanced usage, such as generating auto-completion scripts, using specific clusters and namespaces, and using specific workspaces.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#generating-auto-completion-scripts","title":"Generating Auto-Completion Scripts","text":"
                                                                          baizectl completion bash > /etc/bash_completion.d/baizectl\n

                                                                          The above command generates an auto-completion script for bash and saves it to the /etc/bash_completion.d/baizectl directory. You can load the auto-completion script by using source /etc/bash_completion.d/baizectl.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#using-specific-clusters-and-namespaces","title":"Using Specific Clusters and Namespaces","text":"
                                                                          baizectl job ls --cluster my-cluster --namespace my-namespace\n

                                                                          This command will list all jobs in the my-namespace namespace within the my-cluster cluster.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#using-specific-workspaces","title":"Using Specific Workspaces","text":"
                                                                          baizectl job ls --workspace 123\n
                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#frequently-asked-questions","title":"Frequently Asked Questions","text":"
                                                                          • Question: Why can't I connect to the server?

                                                                            Solution: Check if the --server parameter is set correctly and ensure that the network connection is stable. If the server uses a self-signed certificate, you can use --skip-tls-verify to skip TLS certificate verification.

                                                                          • Question: How can I resolve insufficient permissions issues?

                                                                            Solution: Ensure that you are using the correct --token parameter to log in and check if the current user has the necessary permissions for the operation.

                                                                          • Question: Why can't I list the datasets?

                                                                            Solution: Check if the namespace and workspace are set correctly and ensure that the current user has permission to access these resources.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizectl.html#conclusion","title":"Conclusion","text":"

                                                                          With this guide, you can quickly get started with baizectl commands and efficiently manage AI platform resources in practical applications. If you have any questions or issues, it is recommended to use baizectl [command] --help to check more detailed information.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizess.html","title":"baizess Source Switch Tool Usage Guide","text":"

                                                                          baizess is a built-in, out-of-the-box source switch tool within the Notebook of AI Lab module. It provides a streamlined command-line interface to facilitate the management of package sources for various programming environments. With baizess, users can easily switch sources for commonly used package managers, ensuring seamless access to the latest libraries and dependencies. This tool enhances the efficiency of developers and data scientists by simplifying the process of managing package sources.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizess.html#installation","title":"Installation","text":"

                                                                          Currently, baizess is integrated within AI Lab. Once you create a Notebook, you can directly use baizess within it.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizess.html#getting-started","title":"Getting Started","text":""},{"location":"en/admin/baize/developer/notebooks/baizess.html#basic-information","title":"Basic Information","text":"

                                                                          The basic information of the baizess command is as follows:

                                                                          jovyan@19d0197587cc:/$ baizess\nsource switch tool\n\nUsage:\n  baizess [command] [package-manager]\n\nAvailable Commands:\n  set     Switch the source of specified package manager to current fastest source\n  reset   Reset the source of specified package manager to default source\n\nAvailable Package-managers:\n  apt     (require root privilege)\n  conda\n  pip\n
                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizess.html#command-format","title":"Command Format","text":"

                                                                          The basic format of the baizess command is as follows:

                                                                          baizess [command] [package-manager]\n

                                                                          Here,[command] refers to the specific operation command, and [package-manager] is used to specify the proper package manager for the operation.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizess.html#command","title":"Command","text":"
                                                                          • set\uff1aBackup the source, perform speed test, and switch the specified package manager's source to the fastest domestic source based on speed test result.
                                                                          • reset\uff1aReset the specified package manager to default source.
                                                                          "},{"location":"en/admin/baize/developer/notebooks/baizess.html#currently-supported-package-manager","title":"Currently supported package-manager","text":"
                                                                          • apt (Source switch and reset require root privilege)
                                                                          • conda (original source will be backed up in /etc/apt/backup/)
                                                                          • pip (updated source will be written to ~/.condarc)
                                                                          "},{"location":"en/admin/baize/developer/notebooks/create.html","title":"Create Notebook","text":"

                                                                          Notebook provides an online web interactive programming environment, making it convenient for developers to quickly conduct data science and machine learning experiments.

                                                                          Upon entering the developer console, developers can create and manage Notebooks in different clusters and namespaces.

                                                                          1. Click Notebooks in the left navigation bar to enter the Notebook list. Click the Create button on the right.

                                                                          2. The system will pre-fill basic configuration data, including the cluster, namespace, queue, priority, resources, and job arguments. Adjust these arguments and click OK.

                                                                          3. The newly created Notebook will initially be in the Pending state, and will change to Running after a moment, with the latest one appearing at the top of the list by default.

                                                                          4. Click the \u2507 on the right side to perform more actions: update arguments, start/stop, clone Notebook, view workload details, and delete.

                                                                          Note

                                                                          If you choose pure CPU resources and find that all GPUs on the node are mounted, you can try adding the following container environment variable to resolve this issue:

                                                                          NVIDIA_VISIBLE_DEVICES=\"\"\n
                                                                          "},{"location":"en/admin/baize/developer/notebooks/delete.html","title":"Delete Notebook","text":"

                                                                          If you find a Notebook to be redundant, expired, or no longer needed for any other reason, you can delete it from the Notebook list.

                                                                          1. Click the \u2507 on the right side of the Notebook in the Notebook list, then choose Delete from the dropdown menu.

                                                                          2. In the pop-up window, confirm the Notebook you want to delete, enter the Notebook name, and then click Delete.

                                                                          3. A confirmation message will appear indicating successful deletion, and the Notebook will disappear from the list.

                                                                          Caution

                                                                          Once a Notebook is deleted, it cannot be recovered, so please proceed with caution.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-auto-close.html","title":"Automatic Shutdown of Idle Notebooks","text":"

                                                                          To optimize resource usage, the smart computing system automatically shuts down idle notebooks after a period of inactivity. This helps free up resources when a notebook is not in use.

                                                                          • Advantages: This feature significantly reduces resource waste from long periods of inactivity, enhancing overall efficiency.
                                                                          • Disadvantages: Without proper backup strategies in place, this may lead to potential data loss.

                                                                          Note

                                                                          This feature is enabled by default at the cluster level, with a default timeout of 30 minutes.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-auto-close.html#change-configurations","title":"Change Configurations","text":"

                                                                          Currently, configuration changes must be made manually, but more convenient options will be available in the future.

                                                                          To modify the deployment parameters of baize-agent in your worker cluster, update the Helm App.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-auto-close.html#modify-on-ui","title":"Modify on UI","text":"
                                                                          1. In the clusters page, locate your worker cluster, go to its details, select Helm Apps, and find baize-agent under the baize-system namespace, Click Update on the upper right corner.

                                                                          2. Adjust YAML as shown below:

                                                                            ...\nnotebook-controller:\n  culling_enabled: false\n  cull_idle_time: 120\n  idleness_check_period: 1\n...\n
                                                                          3. After confirming the changes, click Next and OK .

                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-auto-close.html#modify-on-cli","title":"Modify on CLI","text":"

                                                                          In the console, use the helm upgrade command to change the configuration:

                                                                          # Set version number\nexport VERSION=0.8.0\n\n# Update Helm Chart \nhelm upgrade --install baize-agent baize/baize-agent \\\n    --namespace baize-system \\\n    --create-namespace \\\n    --set global.imageRegistry=release.daocloud.io \\\n    --set notebook-controller.culling_enabled=true \\    # Enable automatic shutdown (default: true)\n    --set notebook-controller.cull_idle_time=120 \\      # Set idle timeout to 120 minutes (default: 30 minutes)\n    --set notebook-controller.idleness_check_period=1 \\ # Set check interval to 1 minute (default: 1 minute)\n    --version=$VERSION\n

                                                                          Note

                                                                          To prevent data loss after an automatic shutdown, upgrade to v0.8.0 or higher and enable the auto-save feature in your notebook configuration.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-with-envs.html","title":"Use Environments in Notebooks","text":"

                                                                          Environment management is one of the key features of AI Lab. By associating an environment in a Notebook , you can quickly switch between different environments, making it easier for them to develop and debug.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-with-envs.html#select-an-environment-when-creating-a-notebook","title":"Select an Environment When Creating a Notebook","text":"

                                                                          When creating a Notebook, you can select one or more environments. If there isn\u2019t a suitable environment, you can create a new one in Environments .

                                                                          For instructions on how to create an environment, refer to Environments.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-with-envs.html#use-environments-in-notebooks_1","title":"Use Environments in Notebooks","text":"

                                                                          Note

                                                                          In the Notebook, both conda and mamba are provided as environment management tools. You can choose the appropriate tool based on their needs.

                                                                          In AI Lab, you can use the conda environment management tool. You can view the list of current environments in the Notebook by using the command !conda env list.

                                                                          (base) jovyan@chuanjia-jupyter-0:~/yolov8$ conda env list\n# conda environments:\n#\ndkj-python312-pure       /opt/baize-runtime-env/dkj-python312-pure/conda/envs/dkj-python312-pure\npython-3.10              /opt/baize-runtime-env/python-3.10/conda/envs/python-3.10\ntorch-smaple             /opt/baize-runtime-env/torch-smaple/conda/envs/torch-smaple\nbase                  *  /opt/conda     # Currently activated environment\nbaize-base               /opt/conda/envs/baize-base\n

                                                                          This command lists all conda environments and adds an asterisk (*) before the currently activated environment.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-with-envs.html#manage-kernel-environment-in-jupyterlab","title":"Manage Kernel Environment in JupyterLab","text":"

                                                                          In JupyterLab, the environments associated with the Notebook are automatically bounded to the Kernel list, allowing you to quickly switch environments through the Kernel.

                                                                          With this method, you can simultaneously write and debug algorithms in a single Notebook.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-with-envs.html#switch-environments-in-a-terminal","title":"Switch Environments in a Terminal","text":"

                                                                          The Notebook for AI Lab now also supports VSCode.

                                                                          If you prefer managing and switching environments in the Terminal, you can follow these steps:

                                                                          Upon first starting and using the Notebook, you need to execute conda init, and then run conda activate <env_name> to switch to the proper environment.

                                                                          (base) jovyan@chuanjia-jupyter-0:~/yolov8$ conda init bash  # Initialize bash environment, only needed for the first use\nno change     /opt/conda/condabin/conda\n change     /opt/conda/bin/conda\n change     /opt/conda/bin/conda-env\n change     /opt/conda/bin/activate\n change     /opt/conda/bin/deactivate\n change     /opt/conda/etc/profile.d/conda.sh\n change     /opt/conda/etc/fish/conf.d/conda.fish\n change     /opt/conda/shell/condabin/Conda.psm1\n change     /opt/conda/shell/condabin/conda-hook.ps1\n change     /opt/conda/lib/python3.11/site-packages/xontrib/conda.xsh\n change     /opt/conda/etc/profile.d/conda.csh\n change     /home/jovyan/.bashrc\n action taken.\nAdded mamba to /home/jovyan/.bashrc\n\n==> For changes to take effect, close and re-open your current shell. <==\n\n(base) jovyan@chuanjia-jupyter-0:~/yolov8$ source ~/.bashrc  # Reload bash environment\n(base) jovyan@chuanjia-jupyter-0:~/yolov8$ conda activate python-3.10   # Switch to python-3.10 environment\n(python-3.10) jovyan@chuanjia-jupyter-0:~/yolov8$ conda env list\n\n              mamba version : 1.5.1\n# conda environments:\n#\ndkj-python312-pure       /opt/baize-runtime-env/dkj-python312-pure/conda/envs/dkj-python312-pure\npython-3.10           *  /opt/baize-runtime-env/python-3.10/conda/envs/python-3.10    # Currently activated environment\ntorch-smaple             /opt/baize-runtime-env/torch-smaple/conda/envs/torch-smaple\nbase                     /opt/conda\nbaize-base               /opt/conda/envs/baize-base\n

                                                                          If you prefer to use mamba, you will need to use mamba init and mamba activate <env_name>.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-with-envs.html#view-packages-in-environment","title":"View Packages in Environment","text":"

                                                                          One important feature of different environment management is the ability to use different packages by quickly switching environments within a Notebook.

                                                                          You can use the command below to view all packages in the current environment using conda.

                                                                          (python-3.10) jovyan@chuanjia-jupyter-0:~/yolov8$ conda list\n# packages in environment at /opt/baize-runtime-env/python-3.10/conda/envs/python-3.10:\n#\n# Name                    Version                   Build  Channel\n_libgcc_mutex             0.1                        main    defaults\n_openmp_mutex             5.1                       1_gnu    defaults\n... # Output truncated\nidna                      3.7             py310h06a4308_0    defaults\nipykernel                 6.28.0          py310h06a4308_0    defaults\nipython                   8.20.0          py310h06a4308_0    defaults\nipython_genutils          0.2.0              pyhd3eb1b0_1    defaults\njedi                      0.18.1          py310h06a4308_1    defaults\njinja2                    3.1.4           py310h06a4308_0    defaults\njsonschema                4.19.2          py310h06a4308_0    defaults\njsonschema-specifications 2023.7.1        py310h06a4308_0    defaults\njupyter_client            7.4.9           py310h06a4308_0    defaults\njupyter_core              5.5.0           py310h06a4308_0    defaults\njupyter_events            0.8.0           py310h06a4308_0    defaults\njupyter_server            2.10.0          py310h06a4308_0    defaults\njupyter_server_terminals  0.4.4           py310h06a4308_1    defaults\njupyterlab_pygments       0.2.2           py310h06a4308_0    defaults\n... # Output truncated\nxz                        5.4.6                h5eee18b_1    defaults\nyaml                      0.2.5                h7b6447c_0    defaults\nzeromq                    4.3.5                h6a678d5_0    defaults\nzlib                      1.2.13               h5eee18b_1    defaults\n
                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-with-envs.html#update-packages-in-environment","title":"Update Packages in Environment","text":"

                                                                          Currently, you can update the packages in the environment through the Environment Management UI in AI Lab.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html","title":"Notebook SSH Guide","text":"

                                                                          The AI Lab provided by Notebook supports local access via SSH;

                                                                          With simple configuration, you can use SSH to access the Jupyter Notebook. Whether you are using Windows, Mac, or Linux operating systems, you can follow the steps below.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#configure-ssh-credentials","title":"Configure SSH Credentials","text":""},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#generate-ssh-key-pair","title":"Generate SSH Key Pair","text":"

                                                                          First, you need to generate an SSH public and private key pair on your computer. This key pair will be used for the authentication process to ensure secure access.

                                                                          Mac/LinuxWindows
                                                                          1. Open the terminal.
                                                                          2. Enter the command:

                                                                            ssh-keygen -t rsa -b 4096\n
                                                                          3. When prompted with \u201cEnter a file in which to save the key,\u201d you can press Enter to use the default path or specify a new path.

                                                                          4. Next, you will be prompted to enter a passphrase (optional), which adds an extra layer of security. If you choose to enter a passphrase, remember it as you will need it each time you use the key.
                                                                          1. Install Git Bash (if you haven't already).
                                                                          2. Open Git Bash.
                                                                          3. Enter the command:

                                                                            ssh-keygen -t rsa -b 4096\n
                                                                          4. Follow the same steps as Mac/Linux.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#add-ssh-public-key-to-personal-center-optional","title":"Add SSH Public Key to Personal Center (Optional)","text":"
                                                                          1. Open the generated public key file, usually located at ~/.ssh/id_rsa.pub (if you did not change the default path).
                                                                          2. Copy the public key content.
                                                                          3. Log in to the system's personal center.
                                                                          4. Look for the SSH public key configuration area and paste the copied public key into the designated location.
                                                                          5. Save the changes.
                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#enable-ssh-in-notebook","title":"Enable SSH in Notebook","text":"
                                                                          1. Log in to the Jupyter Notebook web interface.
                                                                          2. Find the Notebook for which you want to enable SSH.
                                                                          3. In the Notebook's settings or details page, find the option Enable SSH and enable it.
                                                                          4. Record or copy the displayed SSH access command. This command will be used in subsequent steps for SSH connection.
                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#ssh-in-different-environments","title":"SSH in Different Environments","text":""},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#example","title":"Example","text":"

                                                                          Assume the SSH command you obtained is as follows:

                                                                          ssh username@mockhost -p 2222\n

                                                                          Replace username with your username, mockhost with the actual hostname, and 2222 with the actual port number.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#windows","title":"Windows","text":"

                                                                          It is recommended to use PuTTY or Git Bash for SSH connection.

                                                                          PuTTYGit Bash
                                                                          1. Open PuTTY.
                                                                          2. In the Host Name (or IP address) field, enter mockhost (the actual hostname).
                                                                          3. Enter the port number 2222 (the actual port number).
                                                                          4. Click Open to start the connection.
                                                                          5. On the first connection, you may be prompted to verify the server's identity. Click Yes .
                                                                          1. Open Git Bash.
                                                                          2. Enter the ssh command to access your machine:

                                                                            ssh username@mockhost -p 2222\n
                                                                          3. Press Enter.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#maclinux","title":"Mac/Linux","text":"
                                                                          1. Open the terminal.
                                                                          2. Enter the ssh command to access your machine

                                                                            ssh username@mockhost -p 2222\n
                                                                          3. If prompted to accept the host's identity, type yes.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#remote-development-with-ide","title":"Remote Development with IDE","text":"

                                                                          In addition to using command line tools for SSH connection, you can also utilize modern IDEs such as Visual Studio Code (VSCode) and PyCharm's SSH remote connection feature to develop locally while utilizing remote server resources.

                                                                          Using SSH in VSCodeUsing SSH in PyCharm

                                                                          VSCode supports SSH remote connection through the Remote - SSH extension, allowing you to edit files on the remote server directly in the local VSCode environment and run commands.

                                                                          Steps:

                                                                          1. Ensure you have installed VSCode and the Remote - SSH extension.
                                                                          2. Open VSCode and click the remote resource manager icon at the bottom of the left activity bar.
                                                                          3. Select Remote-SSH: Connect to Host... and then click + Add New SSH Host...
                                                                          4. Enter the SSH connection command, for example:

                                                                            ssh username@mockhost -p 2222\n
                                                                          5. Press Enter. Replace username, mockhost, and 2222 with your actual username, hostname, and port number.

                                                                          6. Select a configuration file to save this SSH host, usually the default is fine.

                                                                          After completing, your SSH host will be added to the SSH target list. Click your host to connect. If it's your first connection, you may be prompted to verify the host's fingerprint. After accepting, you will be asked to enter the passphrase (if the SSH key has a passphrase). Once connected successfully, you can edit remote files in VSCode and utilize remote resources just as if you were developing locally.

                                                                          PyCharm Professional Edition supports connecting to remote servers via SSH and directly developing in the local PyCharm.

                                                                          Steps:

                                                                          1. Open PyCharm and open or create a project.
                                                                          2. Select File -> Settings (on Mac, it's PyCharm -> Preferences).
                                                                          3. In the settings window, navigate to Project: YourProjectName -> Python Interpreter.
                                                                          4. Click the gear icon in the upper right corner and select Add...

                                                                            • In the pop-up window, select SSH Interpreter.
                                                                            • Enter the remote host information: hostname (mockhost), port number (2222), username (username). Replace these placeholders with your actual information.
                                                                            • Click Next. PyCharm will attempt to connect to the remote server. If the connection is successful, you will be asked to enter the passphrase or select the private key file.
                                                                          5. Once configured, click Finish. Now, your PyCharm will use the Python interpreter on the remote server.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/notebook-with-ssh.html#security-restrictions","title":"Security Restrictions","text":"

                                                                          Within the same Workspace, any user can log in to a Notebook with SSH enabled using their own SSH credentials. This means that as long as users have configured their SSH public key in the personal center and the Notebook has enabled SSH, they can use SSH for a secure connection.

                                                                          Note that permissions for different users may vary depending on the Workspace configuration. Ensure you understand and comply with your organization's security and access policies.

                                                                          By following the above steps, you should be able to successfully configure and use SSH to access the Jupyter Notebook. If you encounter any issues, refer to the system help documentation or contact the system administrator.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/start-pause.html","title":"Start and Stop Notebook","text":"

                                                                          After a Notebook is successfully created, it typically has several states:

                                                                          • Pending
                                                                          • Running
                                                                          • Stopped

                                                                          If a Notebook is in the Stopped state, click the \u2507 on the right side in the list, then choose Start from the dropdown menu.

                                                                          This Notebook will move into the running queue, and its status will change to Pending. If everything is normal, its status will change to Running after a moment.

                                                                          If you have finished using the Notebook, you can choose Stop from the menu to change its status to Stopped.

                                                                          "},{"location":"en/admin/baize/developer/notebooks/view.html","title":"View Notebook Workload","text":"

                                                                          If you want to view the workload of a specific Notebook, you can follow these steps:

                                                                          1. Click the \u2507 on the right side of the Notebook in the Notebook list, then choose Workload Details from the dropdown menu.

                                                                          2. You will be directed to the StatefulSet list, where you can view:

                                                                            • The running status, IP address, resource requests, and usage of the Pod containers
                                                                            • Container configuration information
                                                                            • Access methods: ClusterIP, NodePort
                                                                            • Scheduling strategies: node and workload affinity, anti-affinity
                                                                            • Labels and annotations: key-value pairs of labels and annotations for the workload and Pods
                                                                            • Autoscaling: support for HPA, CronHPA, and VPA
                                                                            • Event list: warnings, notifications, and other messages
                                                                          3. In the StatefulSet list, click the \u2507 on the right side to perform more actions specific to the Pods.

                                                                          "},{"location":"en/admin/baize/oam/index.html","title":"Operator","text":"

                                                                          Operator is the daily management of IT resources by IT operations personnel, handling workspace tasks.

                                                                          Here, you can visually understand the current usage status of resources such as clusters, nodes, CPUs, GPUs, and vGPUs.

                                                                          "},{"location":"en/admin/baize/oam/index.html#glossary","title":"Glossary","text":"
                                                                          • GPU Allocated: Statistics on the GPU allocation status of all unfinished tasks in the current cluster, calculating the ratio between requested GPUs (Request) and total resources (Total).
                                                                          • GPU Utilization: Statistics on the actual resource utilization of all running tasks in the current cluster, calculating the ratio between the GPUs actually used (Usage) and the total resources (Total).
                                                                          "},{"location":"en/admin/baize/oam/resource.html","title":"GPU Management","text":"

                                                                          Automatically consolidate GPU resource information across the entire platform, providing detailed GPU device information display, and allowing you to view workload statistics and task execution information for various GPUs.

                                                                          After entering Operator, click Resource Management -> GPU Management in the left navigation bar to view GPU and task information.

                                                                          "},{"location":"en/admin/baize/oam/queue/create.html","title":"Create Queue","text":"

                                                                          In the Operator mode, queues can be used to schedule and optimize batch job workloads, effectively managing multiple tasks running on a cluster and optimizing resource utilization through a queue system.

                                                                          1. Click Queue Management in the left navigation bar, then click the Create button on the right.

                                                                          2. The system will pre-fill basic setup data, including the cluster to deploy to, workspace, and queuing policy. Click OK after adjusting these parameters.

                                                                          3. A confirmation message will appear upon creation, returning you to the queue management list. Click the \u2507 on the right side of the list to perform additional operations such as update or delete.

                                                                          "},{"location":"en/admin/baize/oam/queue/delete.html","title":"Delete Queue","text":"

                                                                          In the Operator mode, if you find a queue to be redundant, expired, or no longer needed for any other reason, you can delete it from the queue list.

                                                                          1. Click the \u2507 on the right side of the queue in the queue list, then choose Delete from the dropdown menu.

                                                                          2. In the pop-up window, confirm the queue you want to delete, enter the queue name, and then click Delete.

                                                                          3. A confirmation message will appear indicating successful deletion, and the queue will disappear from the list.

                                                                          Caution

                                                                          Once a queue is deleted, it cannot be recovered, so please proceed with caution.

                                                                          "},{"location":"en/admin/baize/troubleshoot/index.html","title":"Troubleshooting","text":"

                                                                          This document will continuously compile and organize errors that may arise from environmental issues or improper operations during the use of AI Lab, as well as analyze and provide solutions for certain errors encountered during use.

                                                                          Warning

                                                                          This documentation is only applicable to version AI platform. If you encounter issues with the use of AI Lab, please refer to this troubleshooting guide first.

                                                                          In AI platform, the module name for AI Lab is baize, which offers one-stop solutions for model training, inference, model management, and more.

                                                                          "},{"location":"en/admin/baize/troubleshoot/index.html#common-troubleshooting-cases","title":"Common Troubleshooting Cases","text":"
                                                                          • Cluster Not Found in Drop-Down List
                                                                          • Notebook Not Controlled by Queue Quotas
                                                                          • Queue Initialization Failed
                                                                          "},{"location":"en/admin/baize/troubleshoot/cluster-not-found.html","title":"Cluster Not Found in Drop-Down List","text":""},{"location":"en/admin/baize/troubleshoot/cluster-not-found.html#symptom","title":"Symptom","text":"

                                                                          In the AI Lab Developer and Operator UI, the desired cluster cannot be found in the drop-down list while you search for a cluster.

                                                                          "},{"location":"en/admin/baize/troubleshoot/cluster-not-found.html#analysis","title":"Analysis","text":"

                                                                          If the desired cluster is missing from the cluster drop-down list in AI Lab, it could be due to the following reasons:

                                                                          • The baize-agent is not installed or failed to install, causing AI Lab to be unable to retrieve cluster information.
                                                                          • The cluster name was not configured when installing baize-agent, causing AI Lab to be unable to retrieve cluster information.
                                                                          • Observable components within the worker cluster are abnormal, leading to the inability to collect metrics information from the cluster.
                                                                          "},{"location":"en/admin/baize/troubleshoot/cluster-not-found.html#solution","title":"Solution","text":""},{"location":"en/admin/baize/troubleshoot/cluster-not-found.html#baize-agent-not-installed-or-failed-to-install","title":"baize-agent not installed or failed to install","text":"

                                                                          AI Lab requires some basic components to be installed in each worker cluster. If the baize-agent is not installed in the worker cluster, you can choose to install it via UI, which might lead to some unexpected errors.

                                                                          Therefore, to ensure a good user experience, the selectable cluster range only includes clusters where the baize-agent has been successfully installed.

                                                                          If the issue is due to the baize-agent not being installed or installation failure, use the following steps:

                                                                          Container Management -> Clusters -> Helm Apps -> Helm Charts , find baize-agent and install it.

                                                                          Note

                                                                          Quickly jump to this address: https://<host>/kpanda/clusters/<cluster_name>/helm/charts/addon/baize-agent. Note to replace <host> with the actual console address, and <cluster_name> with the actual cluster name.

                                                                          "},{"location":"en/admin/baize/troubleshoot/cluster-not-found.html#cluster-name-not-configured-in-the-process-of-installing-baize-agent","title":"Cluster name not configured in the process of installing baize-agent","text":"

                                                                          When installing baize-agent, ensure to configure the cluster name. This name will be used for Insight metrics collection and is empty by default, requiring manual configuration .

                                                                          "},{"location":"en/admin/baize/troubleshoot/cluster-not-found.html#insight-components-in-the-worker-cluster-are-abnormal","title":"Insight components in the worker cluster are abnormal","text":"

                                                                          If the Insight components in the cluster are abnormal, it might cause AI Lab to be unable to retrieve cluster information. Check if the platform's Insight services are running and configured correctly.

                                                                          • Check if the insight-server component is running properly in the Global Service Cluster.
                                                                          • Check if the insight-agent component is running properly in the worker cluster.
                                                                          "},{"location":"en/admin/baize/troubleshoot/local-queue-initialization-failed.html","title":"Local Queue Initialization Failed","text":""},{"location":"en/admin/baize/troubleshoot/local-queue-initialization-failed.html#issue-description","title":"Issue Description","text":"

                                                                          When creating a Notebook, training task, or inference service, if the queue is being used for the first time in that namespace, there will be a prompt to initialize the queue with one click. However, the initialization fails.

                                                                          "},{"location":"en/admin/baize/troubleshoot/local-queue-initialization-failed.html#issue-analysis","title":"Issue Analysis","text":"

                                                                          In the AI Lab environment, the queue management capability is provided by Kueue. Kueue provides two types of queue management resources:

                                                                          • ClusterQueue: A cluster-level queue mainly used to manage resource quotas within the queue, including CPU, memory, and GPU.
                                                                          • LocalQueue: A namespace-level queue that needs to point to a ClusterQueue for resource allocation within the queue.

                                                                          In the AI Lab environment, if a service is created and the specified namespace does not have a LocalQueue, there will be a prompt to initialize the queue.

                                                                          In rare cases, the LocalQueue initialization might fail due to special reasons.

                                                                          "},{"location":"en/admin/baize/troubleshoot/local-queue-initialization-failed.html#solution","title":"Solution","text":"

                                                                          Check if Kueue is running normally. If the kueue-controller-manager is not running, you can check it with the following command:

                                                                          kubectl get deploy kueue-controller-manager -n baize-system\n

                                                                          If the kueue-controller-manager is not running properly, fix Kueue first.

                                                                          "},{"location":"en/admin/baize/troubleshoot/local-queue-initialization-failed.html#references","title":"References","text":"
                                                                          • ClusterQueue
                                                                          • LocalQueue
                                                                          "},{"location":"en/admin/baize/troubleshoot/notebook-not-controlled-by-quotas.html","title":"Notebook Not Controlled by Queue Quota","text":"

                                                                          In the AI Lab module, when users create a Notebook, they find that even if the selected queue lacks resources, the Notebook can still be created successfully.

                                                                          "},{"location":"en/admin/baize/troubleshoot/notebook-not-controlled-by-quotas.html#issue-01-unsupported-kubernetes-version","title":"Issue 01: Unsupported Kubernetes Version","text":"
                                                                          • Analysis:

                                                                            The queue management capability in AI Lab is provided by Kueue, and the Notebook service is provided through JupyterHub. JupyterHub has high requirements for the Kubernetes version. For versions below v1.27, even if queue quotas are set in AI platform, and users select the quota when creating a Notebook, the Notebook will not actually be restricted by the queue quota.

                                                                          • Solution: Plan in advance. It is recommended to use Kubernetes version v1.27 or above in the production environment.

                                                                          • Reference: Jupyter Notebook Documentation

                                                                          "},{"location":"en/admin/baize/troubleshoot/notebook-not-controlled-by-quotas.html#issue-02-configuration-not-enabled","title":"Issue 02: Configuration Not Enabled","text":"
                                                                          • Analysis:

                                                                            When the Kubernetes cluster version is greater than v1.27, the Notebook still cannot be restricted by the queue quota.

                                                                            This is because Kueue needs to have support for enablePlainPod enabled to take effect for the Notebook service.

                                                                          • Solution: When deploying baize-agent in the worker cluster, enable Kueue support for enablePlainPod.

                                                                          • Reference: Run Plain Pods as a Kueue-Managed Job

                                                                          "},{"location":"en/admin/ghippo/password.html","title":"Reset Password","text":"

                                                                          If you forget your password, you can reset it by following the instructions on this page.

                                                                          "},{"location":"en/admin/ghippo/password.html#steps-to-reset-password","title":"Steps to Reset Password","text":"

                                                                          When an administrator initially creates a user, it sets a username and password for him. After the user logs in, fill in the email address and change the password in Personal Center . If the user has not set an email address, he can only contact the administrator to reset the password.

                                                                          1. If you forget your password, you can click Forgot your password? on the login interface.

                                                                          2. Enter your login email and click Submit .

                                                                          3. Find the password reset email in the mailbox, and click the link in your email. The link is effective for 5 minutes.

                                                                          4. Install applications that support 2FA dynamic password generation (such as Google Authenticator) on mobile phone or other devices. Set up a dynamic password to activate your account, and click Submit .

                                                                          5. Set a new password and click Submit . The requirements for setting a new password are consistent with the password rules when creating an account.

                                                                          6. The password is successfully reset, and you enter the home page directly.

                                                                          "},{"location":"en/admin/ghippo/password.html#reset-password-process","title":"Reset password process","text":"

                                                                          The flow of the password reset process is as follows.

                                                                          graph TB\n\npass[Forgot password] --> usern[Enter username]\n--> button[Click button to send a mail] --> judge1[Check your username is correct or not]\n\n    judge1 -.Correct.-> judge2[Check if you have bounded a mail]\n    judge1 -.Wrong.-> tip1[Error of incorrect username]\n\n        judge2 -.A mail has been bounded.-> send[Send a reset mail]\n        judge2 -.No any mail bounded.-> tip2[No any mail bounded<br>Contact admin to reset password]\n\nsend --> click[Click the mail link] --> config[Config dynamic password] --> reset[Reset password]\n--> success[Successfully reset]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill:#326ce5,stroke:#fff,stroke-width:1px,color:#fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass pass,usern,button,tip1,send,tip2,send,click,config,reset,success plain;\nclass judge1,judge2 k8s
                                                                          "},{"location":"en/admin/ghippo/access-control/custom-role.html","title":"Custom Roles","text":"

                                                                          AI platform supports the creation of three scopes of custom roles:

                                                                          • The permissions of Platform Role take effect on all relevant resources of the platform
                                                                          • The permissions of workspace role take effect on the resources under the workspace where the user is located
                                                                          • The permissions of folder role take effect on the folder where the user is located and the subfolders and workspace resources under it
                                                                          "},{"location":"en/admin/ghippo/access-control/custom-role.html#create-a-platform-role","title":"Create a platform role","text":"

                                                                          A platform role refers to a role that can manipulate features related to a certain module of AI platform (such as container management, microservice engine, Multicloud Management, service mesh, Container registry, Workbench, and global management).

                                                                          1. From the left navigation bar, click Global Management -> Access Control -> Roles , and click Create Custom Role .

                                                                          2. Enter the name and description, select Platform Role , check the role permissions and click OK .

                                                                          3. Return to the role list, search for the custom role you just created, and click \u2507 on the right to perform operations such as copying, editing, and deleting.

                                                                          4. After the platform role is successfully created, you can go to User/group to add users and groups for this role.

                                                                          "},{"location":"en/admin/ghippo/access-control/custom-role.html#create-a-workspace-role","title":"Create a workspace role","text":"

                                                                          A workspace role refers to a role that can manipulate features related to a module (such as container management, microservice engine, Multicloud Management, service mesh, container registry, Workbench, and global management) according to the workspace.

                                                                          1. From the left navigation bar, click Global Management -> Access Control -> Roles , and click Create Custom Role .

                                                                          2. Enter the name and description, select Workspace role , check the role permissions and click OK .

                                                                          3. Return to the role list, search for the custom role you just created, and click \u2507 on the right to perform operations such as copying, editing, and deleting.

                                                                          4. After the workspace role is successfully created, you can go to Workspace to authorize and set which workspaces this role can manage.

                                                                          "},{"location":"en/admin/ghippo/access-control/custom-role.html#create-folder-role","title":"Create Folder Role","text":"

                                                                          The folder role refers to the ability to manipulate the relevant features of a module of AI platform (such as container management, microservice engine, Multicloud Management, service mesh, container registry, Workbench and global management) according to folders and subfolders. Role.

                                                                          1. From the left navigation bar, click Global Management -> Access Control -> Roles , and click Create Custom Role .

                                                                          2. Enter the name and description, select Folder Role , check the role permissions and click OK .

                                                                          3. Return to the role list, search for the custom role you just created, and click \u2507 on the right to perform operations such as copying, editing, and deleting.

                                                                          4. After the folder role is successfully created, you can go to Folder to authorize and set which folders this role can manage.

                                                                          "},{"location":"en/admin/ghippo/access-control/docking.html","title":"Docking Portal","text":"

                                                                          When two or more platforms need to integrate or embed with each other, user system integration is usually required. During the process of user system integration, the Docking Portal mainly provides SSO (Single Sign-On) capability. If you want to integrate AI platform as a user source into a client platform, you can achieve it by docking a product through Docking Portal .

                                                                          "},{"location":"en/admin/ghippo/access-control/docking.html#docking-a-product","title":"Docking a product","text":"

                                                                          Prerequisite: Administrator privileges for the platform or IAM Owner privileges for access control.

                                                                          1. Log in with an admin, navigate to Access Control , select Docking Portal , enter the Docking Portal list, and click Create SSO Profile in the upper right corner.

                                                                          2. On the Create SSO Profile page, fill in the Client ID.

                                                                          3. After successfully creating the SSO access, in the Docking Portal list, click the just created Client ID to enter the details, copy the Client ID, Secret Key, and Single Sign-On URL information, and fill them in the client platform to complete the user system integration.

                                                                          "},{"location":"en/admin/ghippo/access-control/global.html","title":"System Roles","text":""},{"location":"en/admin/ghippo/access-control/global.html#use-cases","title":"Use cases","text":"

                                                                          AI platform provides predefined system roles to help users simplify the process of role permission usage.

                                                                          Note

                                                                          AI platform provides three types of system roles: platform role, workspace role, and folder role.

                                                                          • Platform role: has proper permissions for all related resources on the platform. Please go to user/group page for authorization.
                                                                          • Workspace role: has proper permissions for a specific workspace. Please go to the specific workspace page for authorization.
                                                                          • Folder role: has proper permissions for a specific folder, subfolder, and resources under its workspace. Please go to the specific folder page for authorization.
                                                                          "},{"location":"en/admin/ghippo/access-control/global.html#platform-roles","title":"Platform Roles","text":"

                                                                          Five system roles are predefined in Access Control: Admin, IAM Owner, Audit Owner, Kpanda Owner, and Workspace and Folder Owner. These five roles are created by the system and cannot be modified by users. The proper permissions of each role are as follows:

                                                                          Role Name Role Type Module Role Permissions Admin System role All Platform administrator, manages all platform resources, represents the highest authority of the platform. IAM Owner System role Access Control Administrator of Access Control, has all permissions under this service, such as managing users/groups and authorization. Audit Owner System role Audit Log Administrator of Audit Log, has all permissions under this service, such as setting audit log policies and exporting audit logs. Kpanda Owner System role Container Management Administrator of Container Management, has all permissions under this service, such as creating/accessing clusters, deploying applications, granting cluster/namespace-related permissions to users/groups. Workspace and Folder Owner System role Workspace and Folder Administrator of Workspace and Folder, has all permissions under this service, such as creating folders/workspaces, authorizing folder/workspace-related permissions to users/groups, using features such as Workbench and microservice engine under the workspace."},{"location":"en/admin/ghippo/access-control/global.html#workspace-roles","title":"Workspace Roles","text":"

                                                                          Three system roles are predefined in Access Control: Workspace Admin, Workspace Editor, and Workspace Viewer. These three roles are created by the system and cannot be modified by users. The proper permissions of each role are as follows:

                                                                          Role Name Role Type Module Role Permissions Workspace Admin System role Workspace Administrator of a workspace, with management permission of the workspace. Workspace Editor System role Workspace Editor of a workspace, with editing permission of the workspace. Workspace Viewer System role Workspace Viewer of a workspace, with readonly permission of the workspace."},{"location":"en/admin/ghippo/access-control/global.html#folder-roles","title":"Folder Roles","text":"

                                                                          Three system roles are predefined in Access Control: Folder Admin, Folder Editor, and Folder Viewer. These three roles are created by the system and cannot be modified by users. The proper permissions of each role are as follows:

                                                                          Role Name Role Type Module Role Permissions Folder Admin System role Workspace Administrator of a folder and its subfolders/workspaces, with management permission. Folder Editor System role Workspace Editor of a folder and its subfolders/workspaces, with editing permission. Folder Viewer System role Workspace Viewer of a folder and its subfolders/workspaces, with readonly permission."},{"location":"en/admin/ghippo/access-control/group.html","title":"Group","text":"

                                                                          A group is a collection of users. By joining a group, a user can inherit the role permissions of the group. Authorize users in batches through groups to better manage users and their permissions.

                                                                          "},{"location":"en/admin/ghippo/access-control/group.html#use-cases","title":"Use cases","text":"

                                                                          When a user's permission changes, it only needs to be moved to the proper group without affecting other users.

                                                                          When the permissions of a group change, you only need to modify the role permissions of the group to apply to all users in the group.

                                                                          "},{"location":"en/admin/ghippo/access-control/group.html#create-group","title":"Create group","text":"

                                                                          Prerequisite: Admin or IAM Owner.

                                                                          1. Enters Access Control , selects Groups , enters the list of groups, and clicks Create a group on the upper right.

                                                                          2. Fill in the group information on the Create group page.

                                                                          3. Click OK , the group is created successfully, and you will return to the group list page. The first line in the list is the newly created group.

                                                                          "},{"location":"en/admin/ghippo/access-control/group.html#add-permissions-to-a-group","title":"Add permissions to a group","text":"

                                                                          Prerequisite: The group already exists.

                                                                          1. Enters Access Control , selects Groups , enters the list of groups, and clicks \u2507 -> Add permissions .

                                                                          2. On the Add permissions page, check the required role permissions (multiple choices are allowed).

                                                                          3. Click OK to add permissions to the group. Automatically return to the group list, click a group to view the permissions granted to the group.

                                                                          "},{"location":"en/admin/ghippo/access-control/group.html#add-users-to-a-group","title":"Add users to a group","text":"
                                                                          1. Enters Access Control , selects Groups to display the group list, and on the right side of a group, click \u2507 -> Add Members .

                                                                          2. On the Add Group Members page, click the user to be added (multiple choices are allowed). If there is no user available, click Create a new user , first go to create a user, and then return to this page and click the refresh icon to display the newly created user.

                                                                          3. Click OK to finish adding users to the group.

                                                                          Note

                                                                          Users in the group will inherit the permissions of the group; users who join the group can be viewed in the group details.

                                                                          "},{"location":"en/admin/ghippo/access-control/group.html#delete-group","title":"Delete group","text":"

                                                                          Note: Deleting a group will not delete the users in the group, but the users in the group will no longer be able to inherit the permissions of the group

                                                                          1. The administrator enters Access Control , selects group to enter the group list, and on the right side of a group, click \u2507 -> Delete .

                                                                          2. Click Delete to delete the group.

                                                                          3. Return to the group list, and the screen will prompt that the deletion is successful.

                                                                          Note

                                                                          Deleting a group will not delete the users in the group, but the users in the group will no longer be able to inherit the permissions from the group.

                                                                          "},{"location":"en/admin/ghippo/access-control/iam.html","title":"What is IAM","text":"

                                                                          IAM (Identity and Access Management) is an important module of global management. You can create, manage and destroy users (groups) through the access control module, and use system roles and custom roles to control other users Access to the AI platform.

                                                                          "},{"location":"en/admin/ghippo/access-control/iam.html#benefits","title":"Benefits","text":"
                                                                          • Simple and smooth

                                                                            Structures and roles within an enterprise can be complex, with the management of projects, work groups, and mandates constantly changing. Access control uses a clear and tidy page to open up the authorization relationship between users, groups, and roles, and realize the authorization of users (groups) with the shortest link.

                                                                          • Appropriate role

                                                                            Access control pre-defines an administrator role for each sub-module, without user maintenance, you can directly authorize the predefined system roles of the platform to users to realize the modular management of the platform. For fine-grained permissions, please refer to Permission Management.

                                                                          • Enterprise-grade access control

                                                                            When you want your company's employees to use the company's internal authentication system to log in to the AI platform without creating proper users on the AI platform, you can use the identity provider feature of access control to establish a trust relationship between your company and Suanova, Through joint authentication, employees can directly log in to the AI platform with the existing account of the enterprise, realizing single sign-on.

                                                                          "},{"location":"en/admin/ghippo/access-control/iam.html#usage-process","title":"Usage Process","text":"

                                                                          Here is a typical process to perform access control.

                                                                          graph TD\n    login[Login] --> user[Create User]\n    user --> auth[Authorize User]\n    auth --> group[Create Group]\n    group --> role[Create Custom Role]\n    role --> id[Create Identity Provider]\n\n classDef plain fill:#ddd,stroke:#fff,stroke-width:4px,color:#000;\n classDef k8s fill:#326ce5,stroke:#fff,stroke-width:4px,color:#fff;\n classDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n class login,user,auth,group,role,id cluster;\n\nclick login \"https://docs.daocloud.io/en/ghippo/install/login.html\"\nclick user \"https://docs.daocloud.io/en/ghippo/access-control/user.html\"\nclick auth \"https://docs.daocloud.io/en/ghippo/access-control/role.html\"\nclick group \"https://docs.daocloud.io/en/ghippo/access-control/group.html\"\nclick role \"https://docs.daocloud.io/en/ghippo/access-control/custom-role.html\"\nclick id \"https://docs.daocloud.io/en/ghippo/access-control/idprovider.html\"
                                                                          "},{"location":"en/admin/ghippo/access-control/idprovider.html","title":"Identity provider","text":"

                                                                          Global management supports single sign-on based on LDPA and OIDC protocols. If your enterprise or organization has its own account system and you want to manage members in the organization to use AI platform resources, you can use the identity provider feature provided by global management. Instead of having to create username/passwords for every organization member in your AI platform. You can grant permissions to use AI platform resources to these external user identities.

                                                                          "},{"location":"en/admin/ghippo/access-control/idprovider.html#basic-concept","title":"Basic concept","text":"
                                                                          • Identity Provider (IdP for short)

                                                                            Responsible for collecting and storing user identity information, usernames, and passwords, and responsible for authenticating users when they log in. In the identity authentication process between an enterprise and AI platform, the identity provider refers to the identity provider of the enterprise itself.

                                                                          • Service Provider (SP)

                                                                            The service provider establishes a trust relationship with the identity provider IdP, and uses the user information provided by the IDP to provide users with specific services. In the process of enterprise authentication with AI platform, the service provider refers to AI platform.

                                                                          • LDAP

                                                                            LDAP refers to Lightweight Directory Access Protocol (Lightweight Directory Access Protocol), which is often used for single sign-on, that is, users can log in with one account password in multiple services. Global management supports LDAP for identity authentication, so the enterprise IdP that establishes identity authentication with AI platform through the LDAP protocol must support the LDAP protocol. For a detailed description of LDAP, please refer to: Welcome to LDAP.

                                                                          • OIDC

                                                                            OIDC, short for OpenID Connect, is an identity authentication standard protocol based on the OAuth 2.0 protocol. Global management supports the OIDC protocol for identity authentication, so the enterprise IdP that establishes identity authentication with AI platform through the OIDC protocol must support the OIDC protocol. For a detailed description of OIDC, please refer to: Welcome to OpenID Connect.

                                                                          • OAuth 2.0

                                                                            OAuth 2.0 is the abbreviation of Open Authorization 2.0. It is an open authorization protocol. The authorization framework supports third-party applications to obtain access permissions in their own name.

                                                                          "},{"location":"en/admin/ghippo/access-control/idprovider.html#features","title":"Features","text":"
                                                                          • Administrators do not need to recreate AI platform users

                                                                            Before using the identity provider for identity authentication, the administrator needs to create an account for the user in the enterprise management system and AI platform respectively; after using the identity provider for identity authentication, the enterprise administrator only needs to create an account for the user in the enterprise management system, Users can access both systems at the same time, reducing personnel management costs.

                                                                          • Users do not need to remember two sets of platform accounts

                                                                            Before using the identity provider for identity authentication, users need to log in with the accounts of the two systems to access the enterprise management system and AI platform; after using the identity provider for identity authentication, users can log in to the enterprise management system to access the two systems.

                                                                          "},{"location":"en/admin/ghippo/access-control/ldap.html","title":"LDAP","text":"

                                                                          The full name of LDAP is Lightweight Directory Access Protocol, which is an open and neutral industry-standard application protocol that provides access control and maintains directories for distributed information through the IP protocol.

                                                                          If your enterprise or organization has its own account system, and your enterprise user management system supports the LDAP protocol, you can use the identity provider feature based on the LDAP protocol provided by the Global Management instead of creating usernames/passwords for each member in AI platform. You can grant permissions to use AI platform resources to these external user identities.

                                                                          In Global Management, the operation steps are as follows:

                                                                          1. Log in to AI platform as a user with admin role. Click Global Management -> Access Control in the lower left corner of the left navigation bar.

                                                                          2. Click Identity Provider on the left nav bar, click Create an Identity Provider button.

                                                                          3. In the LDAP tab, fill in the following fields and click Save to establish a trust relationship with the identity provider and a user mapping relationship.

                                                                            Field Description Vendor Supports LDAP (Lightweight Directory Access Protocol) and AD (Active Directory) Identity Provider Name (UI display name) Used to distinguish different identity providers Connection URL The address and port number of the LDAP service, e.g., ldap://10.6.165.2:30061 Bind DN The DN of the LDAP administrator, which Keycloak will use to access the LDAP server Bind credentials The password of the LDAP administrator. This field can retrieve its value from a vault using the ${vault.ID} format. Users DN The full DN of the LDAP tree where your users are located. This DN is the parent of the LDAP users. For example, if the DN of a typical user is similar to \u201cuid='john',ou=users,dc=example,dc=com\u201d, it can be \u201cou=users,dc=example,dc=com\u201d. User Object Classes All values of the LDAP objectClass attribute for users in LDAP, separated by commas. For example: \u201cinetOrgPerson,organizationalPerson\u201d. New Keycloak users will be written to LDAP with all of these object classes, and existing LDAP user records will be found if they contain all of these object classes. Enable StartTLS Encrypts the connection between AI platform and LDAP when enabled Default Permission Users/groups have no permissions by default after synchronization Full name mapping proper First name and Last Name User Name Mapping The unique username for the user Mailbox Mapping User email

                                                                            Advanced Config

                                                                            Field Description Enable or not Enabled by default. When disabled, this LDAP configuration will not take effect. Periodic full sync Disabled by default. When enabled, a sync period can be configured, such as syncing once every hour. Edit mode Read-only mode will not modify the source data in LDAP. Write mode will sync data back to LDAP after user information is edited on the platform. Read timeout Adjusting this value can effectively avoid interface timeouts when the amount of LDAP data is large. User LDAP filter An additional LDAP filter used to filter the search for users. Leave it empty if no additional filter is needed. Ensure it starts with \u201c(\u201d and ends with \u201c)\u201d. Username LDAP attribute The name of the LDAP attribute maps to the Keycloak username. For many LDAP server vendors, it can be \u201cuid\u201d. For Active Directory, it can be \u201csAMAccountName\u201d or \u201ccn\u201d. This attribute should be filled in for all LDAP user records you want to import into Keycloak. RDN LDAP attribute The name of the LDAP attribute that serves as the RDN (top-level attribute) of the typical user DN. It is usually the same as the Username LDAP attribute, but this is not required. For example, for Active Directory, when the username attribute might be \u201csAMAccountName\u201d, \u201ccn\u201d is often used as the RDN attribute. UUID LDAP attribute The name of the LDAP attribute used as the unique object identifier (UUID) for objects in LDAP. For many LDAP server vendors, it is \u201centryUUID\u201d. However, some may differ. For example, for Active Directory, it should be \u201cobjectGUID\u201d. If your LDAP server does not support the UUID concept, you can use any other attribute that should be unique among LDAP users in the tree, such as \u201cuid\u201d or \u201centryDN\u201d.
                                                                          4. On the Sync Groups tab, fill in the following fields to configure the mapping relationship of groups, and click Save again.

                                                                            Field Description Example base DN location of the group in the LDAP tree ou=groups,dc=example,dc=org Usergroup Object Filter Object classes for usergroups, separated by commas if more classes are required. In a typical LDAP deployment, usually \"groupOfNames\", the system has been filled in automatically, if you need to change it, just edit it. * means all. * group name cn Unchangeable

                                                                          Note

                                                                          1. After you have established a trust relationship between the enterprise user management system and AI platform through the LDAP protocol, you can synchronize the users or groups in the enterprise user management system to AI platform at one time through auto/manual synchronization.
                                                                          2. After synchronization, the administrator can authorize groups/groups in batches, and users can log in to AI platform through the account/password in the enterprise user management system.
                                                                          3. See the LDAP Operations Demo Video for a hands-on tutorial.
                                                                          "},{"location":"en/admin/ghippo/access-control/oauth2.0.html","title":"OAuth 2.0 - WeCom","text":"

                                                                          If all members in your enterprise or organization are managed in WeCom, you can use the identity provider feature based on the OAuth 2.0 protocol provided by Global Management, without the need to create a username/password for each organization member in AI platform. You can grant these external user identities permission to use AI platform resources.

                                                                          "},{"location":"en/admin/ghippo/access-control/oauth2.0.html#steps","title":"Steps","text":"
                                                                          1. Log in to AI platform with a user who has the admin role. Click Global Management -> Access Control at the bottom of the left navigation bar.

                                                                          2. Select Identity Providers on the left navigation bar, and click the OAuth 2.0 tab. Fill in the form fields and establish a trust relationship with WeCom, then click Save.

                                                                          "},{"location":"en/admin/ghippo/access-control/oauth2.0.html#proper-fields-in-wecom","title":"proper fields in WeCom","text":"

                                                                          Note

                                                                          Before integration, you need to create a custom application in the WeCom management console. Refer to How to create a custom application link

                                                                          Field Description Corp ID ID of WeCom Agent ID ID of the custom application ClientSecret Secret of the custom application

                                                                          WeCom ID:

                                                                          Agent ID and ClientSecret:

                                                                          "},{"location":"en/admin/ghippo/access-control/oidc.html","title":"Create and Manage OIDC","text":"

                                                                          OIDC (OpenID Connect) is an identity layer based on OAuth 2.0 and an identity authentication standard protocol based on the OAuth2 protocol.

                                                                          If your enterprise or organization already has its own account system, and your enterprise user management system supports the OIDC protocol, you can use the OIDC protocol-based identity provider feature provided by the Global Management instead of creating usernames/passwords for each member in AI platform. You can grant permissions to use AI platform resources to these external user identities.

                                                                          The specific operation steps are as follows.

                                                                          1. Log in to AI platform as a user with admin role. Click Global Management -> Access Control at the bottom of the left navigation bar.

                                                                          2. On the left nav bar select Identity Provider , click OIDC -> Create an Identity Provider

                                                                          3. After completing the form fields and establishing a trust relationship with the identity provider, click Save .

                                                                            Fields Descriptions Provider Name displayed on the login page and is the entry point for the identity provider Authentication Method Client authentication method. If the JWT is signed with a private key, select JWT signed with private key from the dropdown. For details, refer to Client Authentication. Client ID Client ID Client Secret Client Secret Client URL One-click access to login URL, Token URL, user information URL and logout URL through the identity provider's well-known interface Auto-associate After it is turned on, when the identity provider username/email is duplicated with the AI platform username/email, the two will be automatically associated

                                                                          Note

                                                                          1. After the user completes the first login to AI platform through the enterprise user management system, the user information will be synchronized to Access Control -> User List of AI platform.
                                                                          2. Users who log in for the first time will not be given any default permissions and need to be authorized by an administrator (the administrator can be a platform administrator, submodule administrator or resource administrator).
                                                                          3. For practical tutorials, please refer to OIDC Operation Video Tutorials, or refer to Azure OpenID Connect (OIDC) Access Process.
                                                                          "},{"location":"en/admin/ghippo/access-control/oidc.html#user-identity-authentication-interaction-process","title":"User identity authentication interaction process","text":"

                                                                          The interactive process of user authentication is as follows:

                                                                          1. Use a browser to initiate a single sign-on request for AI platform.
                                                                          2. According to the information carried in the login link, AI platform searches for the proper configuration information in Global Management -> Access Control -> Identity Provider , constructs an OIDC authorization Request, and sends it to the browser.
                                                                          3. After the browser receives the request, it forwards the OIDC authorization Request to the enterprise IdP.
                                                                          4. Enter the username and password on the login page of the enterprise IdP. The enterprise IdP verifies the provided identity information, constructs an ID token carrying user information, and sends an OIDC authorization response to the browser.
                                                                          5. After the browser responds, it forwards the OIDC authorization Response to AI platform.
                                                                          6. AI platform takes the ID Token from the OIDC Authorization Response, maps it to a specific user list according to the configured identity conversion rules, and issues the Token.
                                                                          7. Complete single sign-on to access AI platform.
                                                                          "},{"location":"en/admin/ghippo/access-control/role.html","title":"Role and Permission Management","text":"

                                                                          A role corresponds to a set of permissions that determine the actions that can be performed on resources. Granting a user a role means granting all the permissions included in that role.

                                                                          AI platform platform provides three levels of roles, which effectively solve your permission-related issues:

                                                                          • Platform Roles
                                                                          • Workspace Roles
                                                                          • Folder Roles
                                                                          "},{"location":"en/admin/ghippo/access-control/role.html#platform-roles","title":"Platform Roles","text":"

                                                                          Platform roles are coarse-grained permissions that grant proper permissions to all relevant resources on the platform. By assigning platform roles, users can have permissions to create, delete, modify, and view all clusters and workspaces, but not specifically to a particular cluster or workspace. AI platform provides 5 pre-defined platform roles that users can directly use:

                                                                          • Admin
                                                                          • Kpanda Owner
                                                                          • Workspace and Folder Owner
                                                                          • IAM Owner
                                                                          • Audit Owner

                                                                          Additionally, AI platform supports the creation of custom platform roles with customized content as needed. For example, creating a platform role that includes all functional permissions in the Workbench. Since the Workbench depends on workspaces, the platform will automatically select the \"view\" permission for workspaces by default. Please do not manually deselect it. If User A is granted this Workbench role, they will automatically have all functional permissions related to the Workbench in all workspaces.

                                                                          "},{"location":"en/admin/ghippo/access-control/role.html#platform-role-authorization-methods","title":"Platform Role Authorization Methods","text":"

                                                                          There are three ways to authorize platform roles:

                                                                          • In the Global Management -> Access Control -> Users section, find the user in the user list, click ... , select Authorization , and grant platform role permissions to the user.

                                                                          • In the Global Management -> Access Control -> Groups section, create a group in the group list, add the user to the group, and grant authorization to the group (the specific operation is: find the group in the group list, click ... , select Add Permissions , and grant platform roles to the group).

                                                                          • In the Global Management -> Access Control -> Roles section, find the proper platform role in the role list, click the role name to access details, click the Related Members button, select the user or group, and click OK .

                                                                          "},{"location":"en/admin/ghippo/access-control/role.html#workspace-roles","title":"Workspace Roles","text":"

                                                                          Workspace roles are fine-grained roles that grant users management permissions, view permissions, or Workbench-related permissions for a specific workspace. Users with these roles can only manage the assigned workspace and cannot access other workspaces. AI platform provides 3 pre-defined workspace roles that users can directly use:

                                                                          • Workspace Admin
                                                                          • Workspace Editor
                                                                          • Workspace Viewer

                                                                          Moreover, AI platform supports the creation of custom workspace roles with customized content as needed. For example, creating a workspace role that includes all functional permissions in the Workbench. Since the Workbench depends on workspaces, the platform will automatically select the \"view\" permission for workspaces by default. Please do not manually deselect it. If User A is granted this role in Workspace 01, they will have all functional permissions related to the Workbench in Workspace 01.

                                                                          Note

                                                                          Unlike platform roles, workspace roles need to be used within the workspace. Once authorized, users will only have the functional permissions of that role within the assigned workspace.

                                                                          "},{"location":"en/admin/ghippo/access-control/role.html#workspace-role-authorization-methods","title":"Workspace Role Authorization Methods","text":"

                                                                          In the Global Management -> Workspace and Folder list, find the workspace, click Authorization , and grant workspace role permissions to the user.

                                                                          "},{"location":"en/admin/ghippo/access-control/role.html#folder-roles","title":"Folder Roles","text":"

                                                                          Folder roles have permissions granularity between platform roles and workspace roles. They grant users management permissions and view permissions for a specific folder and its sub-folders, as well as all workspaces within that folder. Folder roles are commonly used in departmental scenarios in enterprises. For example, User B is a leader of a first-level department and usually has management permissions over the first-level department, all second-level departments under it, and projects within those departments. In this scenario, User B is granted admin permissions for the first-level folder, which also grants proper permissions for the second-level folders and workspaces below them. AI platform provides 3 pre-defined folder roles that users can directly use:

                                                                          • Folder Admin
                                                                          • Folder Editor
                                                                          • Folder Viewer

                                                                          Additionally, AI platform supports the creation of custom folder roles with customized content as needed. For example, creating a folder role that includes all functional permissions in the Workbench. If User A is granted this role in Folder 01, they will have all functional permissions related to the Workbench in all workspaces within Folder 01.

                                                                          Note

                                                                          The functionality of modules depends on workspaces, and folders provide further grouping mechanisms with permission inheritance capabilities. Therefore, folder permissions not only include the folder itself but also its sub-folders and workspaces.

                                                                          "},{"location":"en/admin/ghippo/access-control/role.html#folder-role-authorization-methods","title":"Folder Role Authorization Methods","text":"

                                                                          In the Global Management -> Workspace and Folder list, find the folder, click Authorization , and grant folder role permissions to the user.

                                                                          "},{"location":"en/admin/ghippo/access-control/user.html","title":"User","text":"

                                                                          A user refers to a user created by the platform administrator Admin or the access control administrator IAM Owner on the Global Management -> Access Control -> Users page, or a user connected through LDAP / OIDC . The username represents the account, and the user logs in to the Suanova Enterprise platform through the username and password.

                                                                          Having a user account is a prerequisite for users to access the platform. The newly created user does not have any permissions by default. For example, you need to assign proper role permissions to users, such as granting administrator permissions to submodules in User List or User Details . The sub-module administrator has the highest authority of the sub-module, and can create, manage, and delete all resources of the module. If a user needs to be granted permission for a specific resource, such as the permission to use a certain resource, please see Resource Authorization Description.

                                                                          This page introduces operations such as creating, authorizing, disabling, enabling, and deleting users.

                                                                          "},{"location":"en/admin/ghippo/access-control/user.html#create-user","title":"Create user","text":"

                                                                          Prerequisite: You have the platform administrator Admin permission or the access control administrator IAM Admin permission.

                                                                          1. The administrator enters Access Control , selects Users , enters the user list, and clicks Create User on the upper right.

                                                                          2. Fill in the username and login password on the Create User page. If you need to create multiple users at one time, you can click Create User to create in batches, and you can create up to 5 users at a time. Determine whether to set the user to reset the password when logging in for the first time according to your actual situation.

                                                                          3. Click OK , the user is successfully created and returns to the user list page.

                                                                          Note

                                                                          The username and password set here will be used to log in to the platform.

                                                                          "},{"location":"en/admin/ghippo/access-control/user.html#authorize-for-user","title":"Authorize for User","text":"

                                                                          Prerequisite: The user already exists.

                                                                          1. The administrator enters Access Control , selects Users , enters the user list, and clicks \u2507 -> Authorization .

                                                                          2. On the Authorization page, check the required role permissions (multiple choices are allowed).

                                                                          3. Click OK to complete the authorization for the user.

                                                                          Note

                                                                          In the user list, click a user to enter the user details page.

                                                                          "},{"location":"en/admin/ghippo/access-control/user.html#add-user-to-group","title":"Add user to group","text":"
                                                                          1. The administrator enters Access Control , selects Users , enters the user list, and clicks \u2507 -> Add to Group .

                                                                          2. On the Add to Group page, check the groups to be joined (multiple choices are allowed). If there is no optional group, click Create a new group to create a group, and then return to this page and click the Refresh button to display the newly created group.

                                                                          3. Click OK to add the user to the group.

                                                                          Note

                                                                          The user will inherit the permissions of the group, and you can view the groups that the user has joined in User Details .

                                                                          "},{"location":"en/admin/ghippo/access-control/user.html#enabledisable-user","title":"Enable/Disable user","text":"

                                                                          Once a user is deactivated, that user will no longer be able to access the Platform. Unlike deleting a user, a disabled user can be enabled again as needed. It is recommended to disable the user before deleting it to ensure that no critical service is using the key created by the user.

                                                                          1. The administrator enters Access Control , selects Users , enters the user list, and clicks a username to enter user details.

                                                                          2. Click Edit on the upper right, turn off the status button, and make the button gray and inactive.

                                                                          3. Click OK to finish disabling the user.

                                                                          "},{"location":"en/admin/ghippo/access-control/user.html#forgot-password","title":"Forgot password","text":"

                                                                          Premise: User mailboxes need to be set. There are two ways to set user mailboxes.

                                                                          • On the user details page, the administrator clicks Edit , enters the user's email address in the pop-up box, and clicks OK to complete the email setting.

                                                                          • Users can also enter the Personal Center and set the email address on the Security Settings page.

                                                                          If the user forgets the password when logging in, please refer to Reset Password.

                                                                          "},{"location":"en/admin/ghippo/access-control/user.html#delete-users","title":"Delete users","text":"

                                                                          Warning

                                                                          After deleting a user, the user will no longer be able to access platform resources in any way, please delete carefully. Before deleting a user, make sure your key programs no longer use keys created by that user. If you are unsure, it is recommended to disable the user before deleting. If you delete a user and then create a new user with the same name, the new user is considered a new, separate identity that does not inherit the deleted user's roles.

                                                                          1. The administrator enters Access Control , selects Users , enters the user list, and clicks \u2507 -> Delete .

                                                                          2. Click Delete to finish deleting the user.

                                                                          "},{"location":"en/admin/ghippo/access-control/webhook.html","title":"Webhook Message Notification","text":"

                                                                          With AI platform integrated into the client's system, you can create Webhooks to send message notifications when users are created, updated, deleted, logged in, or logged out.

                                                                          Webhook is a mechanism for implementing real-time event notifications. It allows an application to push data or events to another application without the need for polling or continuous querying. By configuring Webhooks, you can specify that the target application receives and processes notifications when a certain event occurs.

                                                                          The working principle of Webhook is as follows:

                                                                          1. The source application (AI platform) performs a specific operation or event.
                                                                          2. The source application packages the relevant data and information into an HTTP request and sends it to the URL specified by the target application (e.g., enterprise WeChat group robot).
                                                                          3. The target application receives the request and processes it based on the data and information provided.

                                                                          By using Webhooks, you can achieve the following functionalities:

                                                                          • Real-time notification: Notify other applications in a timely manner when a specific event occurs.
                                                                          • Automation: The target application can automatically trigger predefined operations based on the received Webhook requests, eliminating the need for manual intervention.
                                                                          • Data synchronization: Use Webhooks to pass data from one application to another, enabling synchronized updates.

                                                                          Common use cases include:

                                                                          • Version control systems (e.g., GitHub, GitLab): Automatically trigger build and deployment operations when code repositories change.
                                                                          • E-commerce platforms: Send update notifications to logistics systems when order statuses change.
                                                                          • Chatbot platforms: Push messages to target servers via Webhooks for processing when user messages are received.
                                                                          "},{"location":"en/admin/ghippo/access-control/webhook.html#configuration-steps","title":"Configuration Steps","text":"

                                                                          The steps to configure Webhooks in AI platform are as follows:

                                                                          1. On the left nav, click Global Management -> Access Control -> Docking Portal , create a client ID.

                                                                          2. Click a client ID to enter the details page, then click the Create Webhook button.

                                                                          3. Fill in the field information in the popup window and click OK .

                                                                            • Object: Currently only supports the User object.
                                                                            • Action: Send Webhook messages when users are created/updated/deleted/logged in or out.
                                                                            • URL: The address to receive the messages.
                                                                            • Method: Choose the appropriate method as required, e.g., for enterprise WeChat, POST is recommended.
                                                                            • Advanced Configuration: You can write the message body in JSON format. For enterprise WeChat groups, refer to the Group Robot configuration guide.

                                                                          4. A screen prompt indicates that the Webhook was created successfully.

                                                                          5. Now try creating a user.

                                                                          6. User creation succeeds, and you can see that an enterprise WeChat group received a message.

                                                                          "},{"location":"en/admin/ghippo/access-control/webhook.html#advanced-configuration-example","title":"Advanced Configuration Example","text":"

                                                                          Default Message Body

                                                                          AI platform predefines some variables that you can use in the message body based on your needs.

                                                                          {\n  \"id\": \"{{$$.ID$$}}\",\n  \"email\": \"{{$$.Email$$}}\",\n  \"username\": \"{{$$.Name$$}}\",\n  \"last_name\": \"{{$$.LastName$$}}\",\n  \"first_name\": \"{{$$.FirstName$$}}\",\n  \"created_at\": \"{{$$.CreatedAt$$}}\",\n  \"enabled\": \"{{$$.Enabled$$}}\"\n}\n

                                                                          Message Body for WeCom Group Robot

                                                                          {\n    \"msgtype\": \"text\",\n    \"text\": {\n      \"content\": \"{{$$.Name$$}} hello world\"\n    }\n}\n
                                                                          "},{"location":"en/admin/ghippo/audit/audit-log.html","title":"Audit log","text":"

                                                                          Audit logs help you monitor and record the activities of each user, and provide features for collecting, storing and querying security-related records arranged in chronological order. With the audit log service, you can continuously monitor and retain user behaviors in the Global Management module, including but not limited to user creation, user login/logout, user authorization, and user operations related to Kubernetes.

                                                                          "},{"location":"en/admin/ghippo/audit/audit-log.html#features","title":"Features","text":"

                                                                          The audit log feature has the following characteristics:

                                                                          • Out of the box: When installing and using the platform, the audit log feature will be enabled by default, automatically recording various user-related actions, such as creating users, authorization, and login/logout. By default, 365 days of user behavior can be viewed within the platform.

                                                                          • Security analysis: The audit log will record user operations in detail and provide an export function. Through these events, you can judge whether the account is at risk.

                                                                          • Real-time recording: Quickly collect operation events, and trace back in the audit log list after user operations, so that suspicious behavior can be found at any time.

                                                                          • Convenient and reliable: The audit log supports manual cleaning and automatic cleaning, and the cleaning policy can be configured according to your storage size.

                                                                          "},{"location":"en/admin/ghippo/audit/audit-log.html#view-audit-logs","title":"View Audit Logs","text":"
                                                                          1. Log in to AI platform with a user account that has the admin or Audit Owner role.

                                                                          2. At the bottom of the left navigation bar, click Global Management -> Audit Logs .

                                                                          "},{"location":"en/admin/ghippo/audit/audit-log.html#user-operations","title":"User operations","text":"

                                                                          On the User operations tab, you can search for user operation events by time range, or by using fuzzy or exact search.

                                                                          Click the \u2507 icon on the right side of an event to view its details.

                                                                          The event details are shown in the following figure.

                                                                          Click the Export in the upper right corner to export the user operation logs within the selected time range in CSV or Excel format.

                                                                          "},{"location":"en/admin/ghippo/audit/audit-log.html#system-operations","title":"System operations","text":"

                                                                          On the System operations tab, you can search for system operation events by time range, or by using fuzzy or exact search.

                                                                          Similarly, click the \u2507 icon on the right side of an event to view its details.

                                                                          Click the Export in the upper right corner to export the system operation logs within the selected time range in CSV or Excel format.

                                                                          "},{"location":"en/admin/ghippo/audit/audit-log.html#settings","title":"Settings","text":"

                                                                          On the Settings tab, you can clean up audit logs for user operations and system operations.

                                                                          You can manually clean up the logs, but it is recommended to export and save them before cleaning. You can also set the maximum retention time for the logs to automatically clean them up.

                                                                          Note

                                                                          The audit logs related to Kubernetes in the auditing module are provided by the Insight module. To reduce the storage pressure of the audit logs, Global Management by default does not collect Kubernetes-related logs. If you need to record them, please refer to Enabling K8s Audit Logs. Once enabled, the cleanup function is consistent with the Global Management cleanup function, but they do not affect each other.

                                                                          "},{"location":"en/admin/ghippo/audit/open-audit.html","title":"Enable/Disable collection of audit logs","text":"
                                                                          • Kubernetes Audit Logs: Kubernetes itself generates audit logs. When this feature is enabled, audit log files for Kubernetes will be created in the specified directory.
                                                                          • Collecting Kubernetes Audit Logs: The log files mentioned above are collected using the Insight Agent. The prerequisite for collecting Kubernetes audit logs are that the cluster has enabled Kubernetes audit logs, the export of audit logs has been allowed, and the collection of audit logs has been opened.
                                                                          "},{"location":"en/admin/ghippo/audit/open-audit.html#ai-platform-installation-status","title":"AI platform Installation Status","text":"
                                                                          • For AI Community installations, the Kubernetes audit log switch was not operated during the management cluster installation process.
                                                                          • For AI platform Enterprise installations, the Kubernetes audit log switch is enabled by default.
                                                                            • To set it to default off, you can modify the installer's clusterConfig.yaml file (set logPath to empty \"\").
                                                                          • The collection of Kubernetes audit logs switch is disabled by default for the management cluster.
                                                                            • Default settings do not support configuration.
                                                                          "},{"location":"en/admin/ghippo/audit/open-audit.html#management-cluster-collection-of-kubernetes-audit-logs-switch","title":"Management Cluster Collection of Kubernetes Audit Logs Switch","text":""},{"location":"en/admin/ghippo/audit/open-audit.html#ai-platform-enterprise-installation-environment","title":"AI platform Enterprise Installation Environment","text":""},{"location":"en/admin/ghippo/audit/open-audit.html#confirm-enabling-kubernetes-audit-logs","title":"Confirm Enabling Kubernetes Audit Logs","text":"

                                                                          Run the following command to check if audit logs are generated under the /var/log/kubernetes/audit directory. If they exist, it means that Kubernetes audit logs are successfully enabled.

                                                                          ls /var/log/kubernetes/audit\n

                                                                          If they are not enabled, please refer to the documentation on enabling/disabling Kubernetes audit logs.

                                                                          "},{"location":"en/admin/ghippo/audit/open-audit.html#enable-collection-of-kubernetes-audit-logs-process","title":"Enable Collection of Kubernetes Audit Logs Process","text":"
                                                                          1. Add ChartMuseum to the helm repo.

                                                                            helm repo add chartmuseum http://10.5.14.30:8081\n

                                                                            Modify the IP address in this command to the IP address of the Spark node.

                                                                            Note

                                                                            If using a self-built Harbor repository, please modify the chart repo URL in the first step to the insight-agent chart URL of the self-built repository.

                                                                          2. Save the current Insight Agent helm values.

                                                                            helm get values insight-agent -n insight-system -o yaml > insight-agent-values-bak.yaml\n
                                                                          3. Get the current version number ${insight_version_code}.

                                                                            insight_version_code=`helm list -n insight-system |grep insight-agent | awk {'print $10'}`\n
                                                                          4. Update the helm value configuration.

                                                                            helm upgrade --install --create-namespace --version ${insight_version_code} --cleanup-on-fail insight-agent chartmuseum/insight-agent -n insight-system -f insight-agent-values-bak.yaml --set global.exporters.auditLog.kubeAudit.enabled=true\n
                                                                          5. Restart all fluentBit pods under the insight-system namespace.

                                                                            fluent_pod=`kubectl get pod -n insight-system | grep insight-agent-fluent-bit | awk {'print $1'} | xargs`\nkubectl delete pod ${fluent_pod} -n insight-system\n
                                                                          "},{"location":"en/admin/ghippo/audit/open-audit.html#disable-collection-of-kubernetes-audit-logs","title":"Disable Collection of Kubernetes Audit Logs","text":"

                                                                          The remaining steps are the same as enabling the collection of Kubernetes audit logs, with only a modification in the previous section's step 4: updating the helm value configuration.

                                                                          helm upgrade --install --create-namespace --version ${insight_version_code} --cleanup-on-fail insight-agent chartmuseum/insight-agent -n insight-system -f insight-agent-values-bak.yaml --set global.exporters.auditLog.kubeAudit.enabled=false\n
                                                                          "},{"location":"en/admin/ghippo/audit/open-audit.html#ai-community-online-installation-environment","title":"AI Community Online Installation Environment","text":"

                                                                          Note

                                                                          If installing AI Community in a Kind cluster, perform the following steps inside the Kind container.

                                                                          "},{"location":"en/admin/ghippo/audit/open-audit.html#confirm-enabling-kubernetes-audit-logs_1","title":"Confirm Enabling Kubernetes Audit Logs","text":"

                                                                          Run the following command to check if audit logs are generated under the /var/log/kubernetes/audit directory. If they exist, it means that Kubernetes audit logs are successfully enabled.

                                                                          ls /var/log/kubernetes/audit\n

                                                                          If they are not enabled, please refer to the documentation on enabling/disabling Kubernetes audit logs.

                                                                          "},{"location":"en/admin/ghippo/audit/open-audit.html#enable-collection-of-kubernetes-audit-logs-process_1","title":"Enable Collection of Kubernetes Audit Logs Process","text":"
                                                                          1. Save the current values.

                                                                            helm get values insight-agent -n insight-system -o yaml > insight-agent-values-bak.yaml\n
                                                                          2. Get the current version number ${insight_version_code} and update the configuration.

                                                                            insight_version_code=`helm list -n insight-system |grep insight-agent | awk {'print $10'}`\n
                                                                          3. Update the helm value configuration.

                                                                            helm upgrade --install --create-namespace --version ${insight_version_code} --cleanup-on-fail insight-agent insight-release/insight-agent -n insight-system -f insight-agent-values-bak.yaml --set global.exporters.auditLog.kubeAudit.enabled=true\n

                                                                            If the upgrade fails due to an unsupported version, check if the helm repo used in the command has that version. If not, retry after you updated the helm repo.

                                                                            helm repo update insight-release\n
                                                                          4. Restart all fluentBit pods under the insight-system namespace.

                                                                            fluent_pod=`kubectl get pod -n insight-system | grep insight-agent-fluent-bit | awk {'print $1'} | xargs`\nkubectl delete pod ${fluent_pod} -n insight-system\n
                                                                          "},{"location":"en/admin/ghippo/audit/open-audit.html#disable-collection-of-kubernetes-audit-logs_1","title":"Disable Collection of Kubernetes Audit Logs","text":"

                                                                          The remaining steps are the same as enabling the collection of Kubernetes audit logs, with only a modification in the previous section's step 3: updating the helm value configuration.

                                                                          helm upgrade --install --create-namespace --version ${insight_version_code} --cleanup-on-fail insight-agent insight-release/insight-agent -n insight-system -f insight-agent-values-bak.yaml --set global.exporters.auditLog.kubeAudit.enabled=false\n
                                                                          "},{"location":"en/admin/ghippo/audit/open-audit.html#change-worker-cluster","title":"Change Worker Cluster","text":"

                                                                          Each worker cluster is independent and can be turned on as needed.

                                                                          "},{"location":"en/admin/ghippo/audit/open-audit.html#steps-to-enable-audit-log-collection-when-creating-a-cluster","title":"Steps to Enable Audit Log Collection When Creating a Cluster","text":"

                                                                          By default, the collection of K8s audit logs is turned off. If you need to enable it, you can follow these steps:

                                                                          Set the switch to the enabled state to enable the collection of K8s audit logs.

                                                                          When creating a worker cluster via AI platform, ensure that the K8s audit log option for the cluster is set to 'true' so that the created worker cluster will have audit logs enabled.

                                                                          After the cluster creation is successful, the K8s audit logs for that worker cluster will be collected.

                                                                          "},{"location":"en/admin/ghippo/audit/open-audit.html#steps-to-enabledisable-after-accessing-or-creating-the-cluster","title":"Steps to Enable/Disable After Accessing or Creating the Cluster","text":""},{"location":"en/admin/ghippo/audit/open-audit.html#confirm-enabling-k8s-audit-logs","title":"Confirm Enabling K8s Audit Logs","text":"

                                                                          Run the following command to check if audit logs are generated under the /var/log/kubernetes/audit directory. If they exist, it means that K8s audit logs are successfully enabled.

                                                                          ls /var/log/kubernetes/audit\n

                                                                          If they are not enabled, please refer to the documentation on enabling/disabling K8s audit logs.

                                                                          "},{"location":"en/admin/ghippo/audit/open-audit.html#enable-collection-of-k8s-audit-logs","title":"Enable Collection of K8s Audit Logs","text":"

                                                                          The collection of K8s audit logs is disabled by default. To enable it, follow these steps:

                                                                          1. Select the cluster that has been accessed and needs to enable the collection of K8s audit logs.

                                                                          2. Go to the Helm App management page and update the insight-agent configuration (if insight-agent is not installed, you can install it).

                                                                          3. Enable/Disable the collection of K8s audit logs switch.

                                                                          4. After enabling/disabling the switch, the fluent-bit pod needs to be restarted for the changes to take effect.

                                                                          "},{"location":"en/admin/ghippo/audit/open-k8s-audit.html","title":"Generate K8s Audit Logs","text":"

                                                                          By default, the Kubernetes cluster does not generate audit log information. Through the following configuration, you can enable the audit log feature of Kubernetes.

                                                                          Note

                                                                          In a public cloud environment, it may not be possible to control the output and output path of Kubernetes audit logs.

                                                                          1. Prepare the Policy file for the audit log
                                                                          2. Configure the API server, and enable audit logs
                                                                          3. Reboot and verify
                                                                          "},{"location":"en/admin/ghippo/audit/open-k8s-audit.html#prepare-audit-log-policy-file","title":"Prepare audit log Policy file","text":"Click to view Policy YAML for audit log policy.yaml
                                                                          apiVersion: audit.k8s.io/v1\nkind: Policy\nrules:\n# The following requests were manually identified as high-volume and low-risk,\n# so drop them.\n- level: None\n  users: [\"system:kube-proxy\"]\n  verbs: [\"watch\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"endpoints\", \"services\", \"services/status\"]\n- level: None\n  # Ingress controller reads `configmaps/ingress-uid` through the unsecured port.\n  # TODO(#46983): Change this to the ingress controller service account.\n  users: [\"system:unsecured\"]\n  namespaces: [\"kube-system\"]\n  verbs: [\"get\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"configmaps\"]\n- level: None\n  users: [\"kubelet\"] # legacy kubelet identity\n  verbs: [\"get\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"nodes\", \"nodes/status\"]\n- level: None\n  userGroups: [\"system:nodes\"]\n  verbs: [\"get\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"nodes\", \"nodes/status\"]\n- level: None\n  users:\n   - system:kube-controller-manager\n   - system:kube-scheduler\n   - system:serviceaccount:kube-system:endpoint-controller\n     verbs: [\"get\", \"update\"]\n     namespaces: [\"kube-system\"]\n     resources:\n   - group: \"\" # core\n     resources: [\"endpoints\"]\n- level: None\n  users: [\"system:apiserver\"]\n  verbs: [\"get\"]\n  resources:\n   - group: \"\" # core\n     resources: [\"namespaces\", \"namespaces/status\", \"namespaces/finalize\"]\n# Don't log HPA fetching metrics.\n- level: None\n  users:\n   - system:kube-controller-manager\n     verbs: [\"get\", \"list\"]\n     resources:\n   - group: \"metrics.k8s.io\"\n# Don't log these read-only URLs.\n- level: None\n  nonResourceURLs:\n   - /healthz*\n   - /version\n   - /swagger*\n# Don't log events requests.\n- level: None\n  resources:\n   - group: \"\" # core\n     resources: [\"events\"]\n# Secrets, ConfigMaps, TokenRequest and TokenReviews can contain sensitive & binary data,\n# so only log at the Metadata level.\n- level: Metadata\n  resources:\n   - group: \"\" # core\n     resources: [\"secrets\", \"configmaps\", \"serviceaccounts/token\"]\n   - group: authentication.k8s.io\n     resources: [\"tokenreviews\"]\n     omitStages:\n   - \"RequestReceived\"\n# Get responses can be large; skip them.\n- level: Request\n  verbs: [\"get\", \"list\", \"watch\"]\n  resources:\n   - group: \"\" # core\n   - group: \"admissionregistration.k8s.io\"\n   - group: \"apiextensions.k8s.io\"\n   - group: \"apiregistration.k8s.io\"\n   - group: \"apps\"\n   - group: \"authentication.k8s.io\"\n   - group: \"authorization.k8s.io\"\n   - group: \"autoscaling\"\n   - group: \"batch\"\n   - group: \"certificates.k8s.io\"\n   - group: \"extensions\"\n   - group: \"metrics.k8s.io\"\n   - group: \"networking.k8s.io\"\n   - group: \"policy\"\n   - group: \"rbac.authorization.k8s.io\"\n   - group: \"settings.k8s.io\"\n   - group: \"storage.k8s.io\"\n     omitStages:\n   - \"RequestReceived\"\n# Default level for known APIs\n- level: RequestResponse\n  resources:\n   - group: \"\" # core\n   - group: \"admissionregistration.k8s.io\"\n   - group: \"apiextensions.k8s.io\"\n   - group: \"apiregistration.k8s.io\"\n   - group: \"apps\"\n   - group: \"authentication.k8s.io\"\n   - group: \"authorization.k8s.io\"\n   - group: \"autoscaling\"\n   - group: \"batch\"\n   - group: \"certificates.k8s.io\"\n   - group: \"extensions\"\n   - group: \"metrics.k8s.io\"\n   - group: \"networking.k8s.io\"\n   - group: \"policy\"\n   - group: \"rbac.authorization.k8s.io\"\n   - group: \"settings.k8s.io\"\n   - group: \"storage.k8s.io\"\n     omitStages:\n   - \"RequestReceived\"\n# Default level for all other requests.\n- level: Metadata\n  omitStages:\n   - \"RequestReceived\"\n

                                                                          Put the above audit log file in /etc/kubernetes/audit-policy/ folder, and name it apiserver-audit-policy.yaml .

                                                                          "},{"location":"en/admin/ghippo/audit/open-k8s-audit.html#configure-the-api-server","title":"Configure the API server","text":"

                                                                          Open the configuration file kube-apiserver.yaml of the API server, usually in the /etc/kubernetes/manifests/ folder, and add the following configuration information:

                                                                          Please back up kube-apiserver.yaml before this step. The backup file cannot be placed in the /etc/kubernetes/manifests/ , and it is recommended to put it in the /etc/kubernetes/tmp .

                                                                          1. Add the command under spec.containers.command :

                                                                            --audit-log-maxage=30\n--audit-log-maxbackup=10\n--audit-log-maxsize=100\n--audit-log-path=/var/log/audit/kube-apiserver-audit.log\n--audit-policy-file=/etc/kubernetes/audit-policy/apiserver-audit-policy.yaml\n
                                                                          2. Add the command under spec.containers.volumeMounts :

                                                                            - mountPath: /var/log/audit\n  name: audit-logs\n- mountPath: /etc/kubernetes/audit-policy\n  name: audit-policy\n
                                                                          3. Add the command under spec.volumes :

                                                                            - hostPath:\n  path: /var/log/kubernetes/audit\n  type: \"\"\n  name: audit-logs\n- hostPath:\n  path: /etc/kubernetes/audit-policy\n  type: \"\"\n  name: audit-policy\n
                                                                          "},{"location":"en/admin/ghippo/audit/open-k8s-audit.html#test-and-verify","title":"Test and verify","text":"

                                                                          After a while, the API server will automatically restart, and run the following command to check whether there is an audit log generated in the /var/log/kubernetes/audit directory. If so, it means that the K8s audit log is successfully enabled.

                                                                          ls /var/log/kubernetes/audit\n

                                                                          If you want to close it, just remove the relevant commands in spec.containers.command .

                                                                          "},{"location":"en/admin/ghippo/audit/source-ip.html","title":"Get Source IP in Audit Logs","text":"

                                                                          The source IP in audit logs plays a critical role in system and network management. It helps track activities, maintain security, resolve issues, and ensure system compliance. However, getting the source IP can result in some performance overhead, so that audit logs are not always enabled in AI platform. The default enablement of source IP in audit logs and the methods to enable it vary depending on the installation mode. The following sections will explain the default enablement and the steps to enable source IP in audit logs based on the installation mode.

                                                                          Note

                                                                          Enabling audit logs will modify the replica count of the istio-ingressgateway, resulting in a certain performance overhead. Enabling audit logs requires disabling LoadBalance of kube-proxy and Topology Aware Routing, which can have a certain impact on cluster performance. After enabling audit logs, it is essential to ensure that the istio-ingressgateway exists on the proper node to the access IP. If the istio-ingressgateway drifts due to node health issues or other issues, it needs to be manually rescheduled back to that node. Otherwise, it will affect the normal operation of AI platform.

                                                                          "},{"location":"en/admin/ghippo/audit/source-ip.html#determine-the-installation-mode","title":"Determine the Installation Mode","text":"
                                                                          kubectl get pod -n metallb-system\n

                                                                          Run the above command in the cluster. If the result is as follows, it means that the cluster is not in the MetalLB installation mode:

                                                                          No resources found in metallbs-system namespace.\n
                                                                          "},{"location":"en/admin/ghippo/audit/source-ip.html#nodeport-installation-mode","title":"NodePort Installation Mode","text":"

                                                                          In this mode, the source IP in audit logs is disabled by default. The steps to enable it are as follows:

                                                                          1. Set the minimum replica count of the istio-ingressgateway HPA to be equal to the number of control plane nodes

                                                                            count=$(kubectl get nodes --selector=node-role.kubernetes.io/control-plane | wc -l)\ncount=$((count-1))\n\nkubectl patch hpa istio-ingressgateway -n istio-system -p '{\"spec\":{\"minReplicas\":'$count'}}'\n
                                                                          2. Modify the externalTrafficPolicy and internalTrafficPolicy value of the istio-ingressgateway service to \"Local\"

                                                                            kubectl patch svc istio-ingressgateway -n istio-system -p '{\"spec\":{\"externalTrafficPolicy\":\"Local\",\"internalTrafficPolicy\":\"Local\"}}'\n
                                                                          "},{"location":"en/admin/ghippo/audit/source-ip.html#metallb-installation-mode","title":"MetalLB Installation Mode","text":"

                                                                          In this mode, the source IP in audit logs is gotten by default after the installation. For more information, refer to MetalLB Source IP.

                                                                          "},{"location":"en/admin/ghippo/audit/gproduct-audit/ghippo.html","title":"Audit Items of Global Management","text":"Events Resource Type Notes UpdateEmail-Account Account UpdatePassword-Account Account CreateAccessKeys-Account Account UpdateAccessKeys-Account Account DeleteAccessKeys-Account Account Create-User User Delete-User User Update-User User UpdateRoles-User User UpdatePassword-User User CreateAccessKeys-User User UpdateAccessKeys-User User DeleteAccessKeys-User User Create-Group Group Delete-Group Group Update-Group Group AddUserTo-Group Group RemoveUserFrom-Group Group UpdateRoles-Group Group UpdateRoles-User User Create-LADP LADP Update-LADP LADP Delete-LADP LADP Unable to audit through API server for OIDC Login-User User Logout-User User UpdatePassword-SecurityPolicy SecurityPolicy UpdateSessionTimeout-SecurityPolicy SecurityPolicy UpdateAccountLockout-SecurityPolicy SecurityPolicy UpdateLogout-SecurityPolicy SecurityPolicy MailServer-SecurityPolicy SecurityPolicy CustomAppearance-SecurityPolicy SecurityPolicy OfficialAuthz-SecurityPolicy SecurityPolicy Create-Workspace Workspace Delete-Workspace Workspace BindResourceTo-Workspace Workspace UnBindResource-Workspace Workspace BindShared-Workspace Workspace SetQuota-Workspace Workspace Authorize-Workspace Workspace DeAuthorize-Workspace Workspace UpdateDeAuthorize-Workspace Workspace Update-Workspace Workspace Create-Folder Folder Delete-Folder Folder UpdateAuthorize-Folder Folder Update-Folder Folder Authorize-Folder Folder DeAuthorize-Folder Folder AutoCleanup-Audit Audit ManualCleanup-Audit Audit Export-Audit Audit"},{"location":"en/admin/ghippo/audit/gproduct-audit/insight.html","title":"Insight Audit Items","text":"Events Resource Type Notes Create-ProbeJob ProbeJob Update-ProbeJob ProbeJob Delete-ProbeJob ProbeJob Create-AlertPolicy AlertPolicy Update-AlertPolicy AlertPolicy Delete-AlertPolicy AlertPolicy Import-AlertPolicy AlertPolicy Create-AlertRule AlertRule Update-AlertRule AlertRule Delete-AlertRule AlertRule Create-RuleTemplate RuleTemplate Update-RuleTemplate RuleTemplate Delete-RuleTemplate RuleTemplate Create-email email Update-email email Delete-Receiver Receiver Create-dingtalk dingtalk Update-dingtalk dingtalk Delete-Receiver Receiver Create-wecom wecom Update-wecom wecom Delete-Receiver Receiver Create-webhook webhook Update-webhook webhook Delete-Receiver Receiver Create-sms sms Update-sms sms Delete-Receiver Receiver Create-aliyun(tencent,custom) aliyun, tencent, custom Update-aliyun(tencent,custom) aliyun, tencent, custom Delete-SMSserver SMSserver Create-MessageTemplate MessageTemplate Update-MessageTemplate MessageTemplate Delete-MessageTemplate MessageTemplate Create-AlertSilence AlertSilence Update-AlertSilence AlertSilence Delete-AlertSilence AlertSilence Create-AlertInhibition AlertInhibition Update-AlertInhibition AlertInhibition Delete-AlertInhibition AlertInhibition Update-SystemSettings SystemSettings"},{"location":"en/admin/ghippo/audit/gproduct-audit/kpanda.html","title":"Audit Items of Container Management","text":"Events Resource Types Create-Cluster Cluster Delete-Cluster Cluster Integrate-Cluster Cluster Remove-Cluster Cluster Upgrade-Cluster Cluster Integrate-Node Node Remove-Node Node Update-NodeGPUMode NodeGPUMode Create-HelmRepo HelmRepo Create-HelmApp HelmApp Delete-HelmApp HelmApp Create-Deployment Deployment Delete-Deployment Deployment Create-DaemonSet DaemonSet Delete-DaemonSet DaemonSet Create-StatefulSet StatefulSet Delete-StatefulSet StatefulSet Create-Job Job Delete-Job Job Create-CronJob CronJob Delete-CronJob CronJob Delete-Pod Pod Create-Service Service Delete-Service Service Create-Ingress Ingress Delete-Ingress Ingress Create-StorageClass StorageClass Delete-StorageClass StorageClass Create-PersistentVolume PersistentVolume Delete-PersistentVolume PersistentVolume Create-PersistentVolumeClaim PersistentVolumeClaim Delete-PersistentVolumeClaim PersistentVolumeClaim Delete-ReplicaSet ReplicaSet BindResourceTo-Workspace Workspace UnBindResource-Workspace Workspace BindResourceTo-Workspace Workspace UnBindResource-Workspace Workspace Create-CloudShell CloudShell Delete-CloudShell CloudShell"},{"location":"en/admin/ghippo/audit/gproduct-audit/virtnest.html","title":"Audit Items of Virtual Machine","text":"Events Resource Type Notes Restart-VMs VM ConvertToTemplate-VMs VM Edit-VMs VM Update-VMs VM Restore-VMs VM Power on-VMs VM LiveMigrate-VMs VM Delete-VMs VM Delete-VM Template VM Template Create-VMs VM CreateSnapshot-VMs VM Power off-VMs VM Clone-VMs VM"},{"location":"en/admin/ghippo/best-practice/authz-plan.html","title":"Ordinary user authorization plan","text":"

                                                                          Ordinary users refer to those who can use most product modules and features (except management features), have certain operation rights to resources within the scope of authority, and can independently use resources to deploy applications.

                                                                          The authorization and resource planning process for such users is shown in the following figure.

                                                                          graph TB\n\n    start([Start]) --> user[1. Create User]\n    user --> ns[2. Prepare Kubernetes Namespace]\n    ns --> ws[3. Prepare Workspace]\n    ws --> ws-to-ns[4. Bind a workspace to namespace]\n    ws-to-ns --> authu[5. Authorize a user with Workspace Editor]\n    authu --> complete([End])\n\nclick user \"https://docs.daocloud.io/en/ghippo/access-control/user/\"\nclick ns \"https://docs.daocloud.io/en/kpanda/namespaces/createns/\"\nclick ws \"https://docs.daocloud.io/en/ghippo/workspace/workspace/\"\nclick ws-to-ns \"https://docs.daocloud.io/en/ghippo/workspace/ws-to-ns-across-clus/\"\nclick authu \"https://docs.daocloud.io/en/ghippo/workspace/wspermission/\"\n\n classDef plain fill:#ddd,stroke:#fff,stroke-width:4px,color:#000;\n classDef k8s fill:#326ce5,stroke:#fff,stroke-width:4px,color:#fff;\n classDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n class user,ns,ws,ws-to-ns,authu cluster;\n class start,complete plain;
                                                                          "},{"location":"en/admin/ghippo/best-practice/cluster-for-multiws.html","title":"Assign a Cluster to Multiple Workspaces (Tenants)","text":"

                                                                          Cluster resources are typically managed by operations personnel. When allocating resources, they need to create namespaces to isolate resources and set resource quotas. This method has a drawback: if the business volume of the enterprise is large, manually allocating resources requires a significant amount of work, and flexibly adjusting resource quotas can also be challenging.

                                                                          To address this, the AI platform introduces the concept of workspaces. By sharing resources, workspaces can provide higher-dimensional resource quota capabilities, allowing workspaces (tenants) to self-create Kubernetes namespaces under resource quotas.

                                                                          For example, if you want several departments to share different clusters:

                                                                          Cluster01 (Normal) Cluster02 (High Availability) Department (Workspace) A 50 quota 10 quota Department (Workspace) B 100 quota 20 quota

                                                                          You can follow the process below to share clusters with multiple departments/workspaces/tenants:

                                                                          graph TB\n\npreparews[Prepare Workspace] --> preparecs[Prepare Cluster]\n--> share[Share Cluster to Workspace]\n--> judge([Judge Workspace Remaining Quota])\njudge -.Greater than remaining quota.->modifyns[Modify Namespace Quota]\njudge -.Less than remaining quota.->createns[Create Namespace]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill:#326ce5,stroke:#fff,stroke-width:1px,color:#fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass preparews,preparecs,share, cluster;\nclass judge plain\nclass modifyns,createns k8s\n\nclick preparews \"https://docs.daocloud.io/en/ghippo/workspace/cluster-for-multiws/#prepare-a-workspace\"\nclick preparecs \"https://docs.daocloud.io/en/ghippo/workspace/cluster-for-multiws/#prepare-a-cluster\"\nclick share \"https://docs.daocloud.io/en/ghippo/workspace/cluster-for-multiws/#add-a-cluster-to-the-workspace\"
                                                                          "},{"location":"en/admin/ghippo/best-practice/cluster-for-multiws.html#prepare-a-workspace","title":"Prepare a Workspace","text":"

                                                                          Workspaces are designed to meet multi-tenant usage scenarios, forming isolated resource environments based on clusters, cluster namespaces, meshes, mesh namespaces, multicloud, multicloud namespaces, and other resources. Workspaces can be mapped to various concepts such as projects, tenants, enterprises, and suppliers.

                                                                          1. Log in to AI platform with a user having the admin/folder admin role and click Global Management at the bottom of the left navigation bar.

                                                                          2. Click Workspaces and Folders in the left navigation bar, then click the Create Workspace button at the top right.

                                                                          3. Fill in the workspace name, folder, and other information, then click OK to complete the creation of the workspace.

                                                                          "},{"location":"en/admin/ghippo/best-practice/cluster-for-multiws.html#prepare-a-cluster","title":"Prepare a Cluster","text":"

                                                                          Workspaces are designed to meet multi-tenant usage scenarios, forming isolated resource environments based on clusters, cluster namespaces, meshes, mesh namespaces, multicloud, multicloud namespaces, and other resources. Workspaces can be mapped to various concepts such as projects, tenants, enterprises, and suppliers.

                                                                          Follow these steps to prepare a cluster.

                                                                          1. Click Container Management at the bottom of the left navigation bar, then select Clusters .

                                                                          2. Click Create Cluster to create a cluster or click Integrate Cluster to integrate a cluster.

                                                                          "},{"location":"en/admin/ghippo/best-practice/cluster-for-multiws.html#add-cluster-to-workspace","title":"Add Cluster to Workspace","text":"

                                                                          Return to Global Management to add clusters to the workspace.

                                                                          1. Click Global Management -> Workspaces and Folders -> Shared Resources, then click a workspace name and click the New Shared Resource button.

                                                                          2. Select the cluster, fill in the resource quota, and click OK .

                                                                          "},{"location":"en/admin/ghippo/best-practice/folder-practice.html","title":"Folder Best Practices","text":"

                                                                          A folder represents an organizational unit (such as a department) and is a node in the resource hierarchy.

                                                                          A folder can contain workspaces, subfolders, or a combination of both. It provides identity management, multi-level and permission mapping capabilities, and can map the role of a user/group in a folder to its subfolders, workspaces and resources. Therefore, with the help of folders, enterprise managers can centrally manage and control all resources.

                                                                          1. Build corporate hierarchy

                                                                            First of all, according to the existing enterprise hierarchy structure, build the same folder hierarchy as the enterprise. The AI platform supports 5-level folders, which can be freely combined according to the actual situation of the enterprise, and folders and workspaces are mapped to entities such as departments, projects, and suppliers in the enterprise.

                                                                            Folders are not directly linked to resources, but indirectly achieve resource grouping through workspaces.

                                                                          2. User identity management

                                                                            Folder provides three roles: Folder Admin, Folder Editor, and Folder Viewer. View role permissions, you can grant different roles to users/groups in the same folder through Authorization.

                                                                          3. Role and permission mapping

                                                                            Enterprise Administrator: Grant the Folder Admin role on the root folder. He will have administrative authority over all departments, projects and their resources.

                                                                            Department manager: grant separate management rights to each subfolder and workspace.

                                                                            Project members: Grant management rights separately at the workspace and resource levels.

                                                                          "},{"location":"en/admin/ghippo/best-practice/super-group.html","title":"Architecture Management of Large Enterprises","text":"

                                                                          With the continuous scaling of business, the company's scale continues to grow, subsidiaries and branches are established one after another, and some subsidiaries even further establish subsidiaries. The original large departments are gradually subdivided into multiple smaller departments, leading to an increasing number of hierarchical levels in the organizational structure. This organizational structure change also affects the IT governance architecture.

                                                                          The specific operational steps are as follows:

                                                                          1. Enable Isolation Mode between Folder/WS

                                                                            Please refer to Enable Isolation Mode between Folder/WS.

                                                                          2. Plan Enterprise Architecture according to the Actual Situation

                                                                            Under a multi-level organizational structure, it is recommended to use the second-level folder as an isolation unit to isolate users/user groups/resources between \"sub-companies\". After isolation, users/user groups/resources between \"sub-companies\" are not visible to each other.

                                                                          3. Create Users/Integrate User Systems

                                                                            The main platform administrator Admin can create users on the platform or integrate users through LDAP/OIDC/OAuth2.0 and other identity providers to AI platform.

                                                                          4. Create Folder Roles

                                                                            In the isolation mode of Folder/WS, the platform administrator Admin needs to first authorize users to invite them to various sub-companies, so that the \"sub-company administrators (Folder Admin)\" can manage these users, such as secondary authorization or editing permissions. It is recommended to simplify the management work of the platform administrator Admin by creating a role without actual permissions to assist the platform administrator Admin in inviting users to sub-companies through \"authorization\". The actual permissions of sub-company users are delegated to the sub-company administrators (Folder Admin) to manage independently. (The following demonstrates how to create a resource-bound role without actual permissions, i.e., minirole)

                                                                            Note

                                                                            Resource-bound permissions used alone do not take effect, hence meeting the requirement of inviting users to sub-companies through \"authorization\" and then managed by sub-company administrators Folder Admin.

                                                                          5. Authorize Users

                                                                            The platform administrator invites users to various sub-companies according to the actual situation and appoints sub-company administrators.

                                                                            Authorize sub-company regular users as \"minirole\" (1), and authorize sub-company administrators as Folder Admin.

                                                                            1. Refers to the role without actual permissions created in step 4
                                                                          6. Sub-company Administrators Manage Users/User Groups Independently

                                                                            Sub-company administrator Folder Admin can only see their own \"Sub-company 2\" after logging into the platform, and can adjust the architecture by creating folders, creating workspaces, and assigning other permissions to users in Sub-company 2 through adding authorization/edit permissions.

                                                                            When adding authorization, sub-company administrator Folder Admin can only see users invited by the platform administrator through \"authorization\", and cannot see all users on the platform, thus achieving user isolation between Folder/WS, and the same applies to user groups (the platform administrator can see and authorize all users and user groups on the platform).

                                                                          Note

                                                                          The main difference between large enterprises and small/medium-sized enterprises lies in whether users/user groups in Folder and workspaces are visible to each other. In large enterprises, users/user groups between subsidiaries are not visible + permission isolation; in small/medium-sized enterprises, users between departments are visible to each other + permission isolation.

                                                                          "},{"location":"en/admin/ghippo/best-practice/system-message.html","title":"System Messages","text":"

                                                                          System messages are used to notify all users, similar to system announcements, and will be displayed at the top bar of the AI platform UI at specific times.

                                                                          "},{"location":"en/admin/ghippo/best-practice/system-message.html#configure-system-messages","title":"Configure System Messages","text":"

                                                                          You can create a system message by applying the YAML for the system message in the Cluster Roles. The display time of the message is determined by the time fields in the YAML. System messages will only be displayed within the time range configured by the start and end fields.

                                                                          1. In the Clusters, click the name of the Global Service Cluster to enter the Gobal Service Cluster.

                                                                          2. Select CRDs from the left navigation bar, search for ghippoconfig, and click the ghippoconfigs.ghippo.io that appears in the search results.

                                                                          3. Click Create from YAML or modify an existing YAML.

                                                                          A sample YAML is as follows:

                                                                          apiVersion: ghippo.io/v1alpha1\nkind: GhippoConfig\nmetadata:\n  name: system-message\nspec:\n  message: \"this is a message\"\n  start: 2024-01-02T15:04:05+08:00\n  end: 2024-07-24T17:26:05+08:00\n
                                                                          "},{"location":"en/admin/ghippo/best-practice/ws-best-practice.html","title":"Workspace Best Practices","text":"

                                                                          A workspace is a resource grouping unit, and most resources can be bound to a certain workspace. The workspace can realize the binding relationship between users and roles through authorization and resource binding, and apply it to all resources in the workspace at one time.

                                                                          Through the workspace, you can easily manage teams and resources, and solve cross-module and cross-cluster resource authorization issues.

                                                                          "},{"location":"en/admin/ghippo/best-practice/ws-best-practice.html#workspace-features","title":"Workspace features","text":"

                                                                          A workspace consists of three features: authorization, resource groups, and shared resources. It mainly solves the problems of unified authorization of resources, resource grouping and resource quota.

                                                                          1. Authorization: Grant users/groups different roles in the workspace, and apply the roles to the resources in the workspace.

                                                                            Best practice: When ordinary users want to use Workbench, microservice engine, service mesh, and middleware module features, or need to have permission to use container management and some resources in the service mesh, the administrator needs to grant the workspace permissions (Workspace Admin, Workspace Edit, Workspace View). The administrator here can be the Admin role, the Workspace Admin role of the workspace, or the Folder Admin role above the workspace. See Relationship between Folder and Workspace.

                                                                          2. Resource group: Resource group and shared resource are two resource management modes of the workspace.

                                                                            Resource groups support four resource types: Cluster, Cluster-Namespace (cross-cluster), Mesh, and Mesh-Namespace. A resource can only be bound to one resource group. After a resource is bound to a resource group, the owner of the workspace will have all the management rights of the resource, which is equivalent to the owner of the resource, so it is not limited by the resource quota.

                                                                            Best practice: The workspace can grant different role permissions to department members through the \"authorization\" function, and the workspace can apply the authorization relationship between people and roles to all resources in the workspace at one time. Therefore, the operation and maintenance personnel only need to bind resources to resource groups, and add different roles in the department to different resource groups to ensure that resource permissions are assigned correctly.

                                                                            Department Role Cluster Cross-cluster Cluster-Namespace Mesh Mesh-Namespace Department Admin Workspace Admin \u2713 \u2713 \u2713 \u2713 Department Core Members Workspace Edit \u2713 \u2717 \u2713 \u2717 Other Members Workspace View \u2713 \u2717 \u2717 \u2717
                                                                          3. Shared resources: The shared resource feature is mainly for cluster resources.

                                                                            A cluster can be shared by multiple workspaces (referring to the shared resource feature in the workspace); a workspace can also use the resources of multiple clusters at the same time. However, resource sharing does not mean that the sharer (workspace) can use the shared resource (cluster) without restriction, so the resource quota that the sharer (workspace) can use is usually limited.

                                                                            At the same time, unlike resource groups, workspace members are only users of shared resources and can use resources in the cluster under resource quotas. For example, go to Workbench to create a namespace, and deploy applications, but do not have the management authority of the cluster. After the restriction, the total resource quota of the namespace created/bound under this workspace cannot exceed the resources set by the cluster in this workspace.

                                                                            Best practice: The operation and maintenance department has a high-availability cluster 01, and wants to allocate it to department A (workspace A) and department B (workspace B), where department A allocates 50 CPU cores, and department B allocates CPU 100 cores. Then you can borrow the concept of shared resources, share cluster 01 with department A and department B respectively, and limit the CPU usage quota of department A to 50, and the CPU usage quota of department B to 100. Then the administrator of department A (workspace A Admin) can create and use a namespace in Workbench, and the sum of the namespace quotas cannot exceed 50 cores, and the administrator of department B (workspace B Admin) can create a namespace in Workbench And use namespaces, where the sum of namespace credits cannot exceed 100 cores. The namespaces created by the administrators of department A and department B will be automatically bound to the department, and other members of the department will have the roles of Namesapce Admin, Namesapce Edit, and Namesapce View proper to the namespace (the department here refers to Workspace, workspace can also be mapped to other concepts such as organization, and supplier). The whole process is as follows:

                                                                            Department Role Cluster Resource Quota Department Administrator A Workspace Admin CPU 50 cores CPU 50 cores Department Administrator B Workspace Admin CPU 100 cores CPU 100 cores Other Members of the Department Namesapce AdminNamesapce EditNamesapce View Assign as Needed Assign as Needed
                                                                          "},{"location":"en/admin/ghippo/best-practice/ws-best-practice.html#the-effect-of-the-workspace-on-the-ai-platfrom","title":"The effect of the workspace on the AI platfrom","text":"

                                                                          Module name: Container Management

                                                                          Due to the particularity of functional modules, resources created in the container management module will not be automatically bound to a certain workspace.

                                                                          If you need to perform unified authorization management on people and resources through workspaces, you can manually bind the required resources to a certain workspace, to apply the roles of users in this workspace to resources (resources here can be cross- clustered).

                                                                          In addition, there is a slight difference between container management and service mesh in terms of resource binding entry. The workspace provides the binding entry of Cluster and Cluster-Namespace resources in container management, but has not opened the Mesh and Mesh-Namespace for service mesh. Bindings for Namespace resources.

                                                                          For Mesh and Mesh-Namespace resources, you can manually bind them in the resource list of the service mesh.

                                                                          "},{"location":"en/admin/ghippo/best-practice/ws-best-practice.html#use-cases-of-workspace","title":"Use Cases of Workspace","text":"
                                                                          • Mapping to concepts such as different departments, projects, and organizations. At the same time, the roles of Workspace Admin, Workspace Edit, and Workspace View in the workspace can be mapped to different roles in departments, projects, and organizations
                                                                          • Add resources for different purposes to different workspaces for separate management and use
                                                                          • Set up completely independent administrators for different workspaces to realize user and authority management within the scope of the workspace
                                                                          • Share resources to different workspaces, and limit the upper limit of resources that can be used by workspaces
                                                                          "},{"location":"en/admin/ghippo/best-practice/ws-to-ns.html","title":"Workspaces (tenants) bind namespaces across clusters","text":"

                                                                          Namespaces from different clusters are bound under the workspace (tenant), which enables the workspace (tenant) to flexibly manage the Kubernetes Namespace under any cluster on the platform. At the same time, the platform provides permission mapping capabilities, which can map the user's permissions in the workspace to the bound namespace.

                                                                          When one or more cross-cluster namespaces are bound under the workspace (tenant), the administrator does not need to authorize the members in the workspace again. The roles of members in the workspace will be automatically mapped according to the following mapping relationship to complete the authorization, avoiding repeated operations of multiple authorizations:

                                                                          • Workspace Admin corresponds to Namespace Admin
                                                                          • Workspace Editor corresponds to Namespace Editor
                                                                          • Workspace Viewer corresponds to Namespace Viewer

                                                                          Here is an example:

                                                                          User Workspace Role User A Workspace01 Workspace Admin

                                                                          After binding a namespace to a workspace:

                                                                          User Category Role User A Workspace01 Workspace Admin Namespace01 Namespace Admin"},{"location":"en/admin/ghippo/best-practice/ws-to-ns.html#implementation-plan","title":"Implementation plan","text":"

                                                                          Bind different namespaces from different clusters to the same workspace (tenant), and use the process for members under the workspace (tenant) as shown in the figure.

                                                                          graph TB\n\npreparews[prepare workspace] --> preparens[prepare namespace]\n--> judge([whether the namespace is bound to another workspace])\njudge -.unbound.->nstows[bind namespace to workspace] -->wsperm[manage workspace access]\njudge -.bound.->createns[Create a new namespace]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill: #326ce5, stroke: #fff, stroke-width: 1px, color: #fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass preparews, preparens, createns, nstows, wsperm cluster;\nclass judge plain\n\nclick preparews \"https://docs.daocloud.io/ghippo/workspace/ws-to-ns-across-clus/#_3\"\nclick prepares \"https://docs.daocloud.io/ghippo/workspace/ws-to-ns-across-clus/#_4\"\nclick nstows \"https://docs.daocloud.io/ghippo/workspace/ws-to-ns-across-clus/#_5\"\nclick wsperm \"https://docs.daocloud.io/ghippo/workspace/ws-to-ns-across-clus/#_6\"\nclick creates \"https://docs.daocloud.io/ghippo/workspace/ws-to-ns-across-clus/#_4\"

                                                                          Tip

                                                                          A namespace can only be bound by one workspace.

                                                                          "},{"location":"en/admin/ghippo/best-practice/ws-to-ns.html#prepare-workspace","title":"Prepare workspace","text":"

                                                                          In order to meet the multi-tenant use cases, the workspace forms an isolated resource environment based on multiple resources such as clusters, cluster namespaces, meshs, mesh namespaces, multicloud, and multicloud namespaces. Workspaces can be mapped to various concepts such as projects, tenants, enterprises, and suppliers.

                                                                          1. Log in to AI platform as a user with the admin/folder admin role, and click Global Management at the bottom of the left navigation bar.

                                                                          2. Click Workspace and Folder in the left navigation bar, and click the Create Workspace button in the upper right corner.

                                                                          3. After filling in the workspace name, folder and other information, click OK to complete the creation of the workspace.

                                                                          Tip: If the created namespace already exists in the platform, click a workspace, and under the Resource Group tab, click Bind Resource to directly bind the namespace.

                                                                          "},{"location":"en/admin/ghippo/best-practice/ws-to-ns.html#prepare-the-namespace","title":"Prepare the namespace","text":"

                                                                          A namespace is a smaller unit of resource isolation that can be managed and used by members of a workspace after it is bound to a workspace.

                                                                          Follow the steps below to prepare a namespace that is not yet bound to any workspace.

                                                                          1. Click Container Management at the bottom of the left navigation bar.

                                                                          2. Click the name of the target cluster to enter Cluster Details .

                                                                          3. Click Namespace on the left navigation bar to enter the namespace management page, and click the Create button on the right side of the page.

                                                                          4. Fill in the name of the namespace, configure the workspace and tags (optional settings), and click OK .

                                                                            Info

                                                                            Workspaces are primarily used to divide groups of resources and grant users (groups of users) different access rights to that resource. For a detailed description of the workspace, please refer to Workspace and Folder.

                                                                          5. Click OK to complete the creation of the namespace. On the right side of the namespace list, click \u2507 , and you can select Bind Workspace from the pop-up menu.

                                                                          "},{"location":"en/admin/ghippo/best-practice/ws-to-ns.html#bind-the-namespace-to-the-workspace","title":"Bind the namespace to the workspace","text":"

                                                                          In addition to binding in the namespace list, you can also return to global management , follow the steps below to bind the workspace.

                                                                          1. Click Global Management -> Workspace and Folder -> Resource Group , click a workspace name, and click the Bind Resource button.

                                                                          2. Select the workspace to be bound (multiple choices are allowed), and click OK to complete the binding.

                                                                          "},{"location":"en/admin/ghippo/best-practice/ws-to-ns.html#add-members-to-the-workspace-and-authorize","title":"Add members to the workspace and authorize","text":"
                                                                          1. In Workspace and Folder -> Authorization , click the name of a workspace, and click the Add Authorization button.

                                                                          2. After selecting the User/group and Role to be authorized, click OK to complete the authorization.

                                                                          "},{"location":"en/admin/ghippo/best-practice/gproduct/intro.html","title":"How GProduct connects to global management","text":"

                                                                          GProduct is the general term for all other modules in AI platform except the global management. These modules need to be connected with the global management before they can be added to AI platform.

                                                                          "},{"location":"en/admin/ghippo/best-practice/gproduct/intro.html#what-to-be-docking","title":"What to be docking","text":"
                                                                          • Docking Navigation Bar

                                                                            The entrances are unified on the left navigation bar.

                                                                          • Access Routing and AuthN

                                                                            Unify the IP or domain name, and unify the routing entry through the globally managed Istio Gateway.

                                                                          • Unified login / unified AuthN authentication

                                                                            The login page is unified using the global management (Keycloak) login page, and the API authn token verification uses Istio Gateway. After GProduct is connected to the global management, there is no need to pay attention to how to implement login and authentication.

                                                                          "},{"location":"en/admin/ghippo/best-practice/gproduct/nav.html","title":"Docking navigation bar","text":"

                                                                          Take container management (codename kpanda) as an example, docking to the navigation bar.

                                                                          The expected effect after docking is as follows:

                                                                          "},{"location":"en/admin/ghippo/best-practice/gproduct/nav.html#docking-method","title":"Docking method","text":"

                                                                          Refer to the following steps to dock the GProduct:

                                                                          1. Register all kpanda (container management) features to the nav bar via GProductNavigator CR.

                                                                            apiVersion: ghippo.io/v1alpha1\nkind: GProductNavigator\nmetadata:\n  name: kpanda\nspec:\n  gproduct: kpanda\n  name: \u5bb9\u5668\u7ba1\u7406\n  localizedName:\n    zh-CN: \u5bb9\u5668\u7ba1\u7406\n    en-US: Container Management\n  url: /kpanda\n  category: \u5bb9\u5668  # (1)\n  iconUrl: /kpanda/nav-icon.png\n  order: 10 # (2)\n  menus:\n  - name: \u5907\u4efd\u7ba1\u7406\n    localizedName:\n      zh-CN: \u5907\u4efd\u7ba1\u7406\n      en-US: Backup Management\n    iconUrl: /kpanda/bkup-icon.png\n    url: /kpanda/backup\n
                                                                            1. Only support one of overview, workbench, container, microservice, data service, and management
                                                                            2. The larger the number, the higher it is ranked

                                                                            The configuration for the global management navigation bar category is stored in a ConfigMap and cannot be added through registration at present. Please contact the global management team to add it.

                                                                          2. The kpanda front-end is integrated into the AI platform parent application Anakin as a micro-frontend.

                                                                            AI platform frontend uses qiankun to connect the sub-applications UI. See getting started.

                                                                            After registering the GProductNavigator CR, the proper registration information will be generated for the front-end parent application. For example, kpanda will generate the following registration information:

                                                                            {\n  \"id\": \"kpanda\",\n  \"title\": \"\u5bb9\u5668\u7ba1\u7406\",\n  \"url\": \"/kpanda\",\n  \"uiAssetsUrl\": \"/ui/kpanda/\", // The trailing / is required\n  \"needImportLicense\": false\n},\n

                                                                            The proper relation between the above registration and the qiankun sub-application fields is:

                                                                            {\n    name: id,\n    entry: uiAssetsUrl,\n    container: '#container',\n    activeRule: url, \n    loader,\n    props: globalProps,\n}\n

                                                                            container and loader are provided by the frontend parent application. The sub-application does not need to concern it. Props will provide a pinia store containing user basic information and sub-product registration information.

                                                                            qiankun will use the following parameters on startup:

                                                                            start({\n  sandbox: {\n    experimentalStyleIsolation: true,\n  },\n  // Remove the favicon in the sub-application to prevent it from overwriting the parent application's favicon in Firefox\n  getTemplate: (template) => template.replaceAll(/<link\\s* rel=\"[\\w\\s]*icon[\\w\\s]*\"\\s*( href=\".*?\")?\\s*\\/?>/g, ''),\n});\n

                                                                          Refer to Docking demo tar to GProduct provided by frontend team.

                                                                          "},{"location":"en/admin/ghippo/best-practice/gproduct/route-auth.html","title":"Access routing and login authentication","text":"

                                                                          Unified login and password verification after docking, the effect is as follows:

                                                                          The API bear token verification of each GProduct module goes through the Istio Gateway.

                                                                          The routing map after access is as follows:

                                                                          "},{"location":"en/admin/ghippo/best-practice/gproduct/route-auth.html#docking-method","title":"Docking method","text":"

                                                                          Take kpanda as an example to register GProductProxy CR.

                                                                          # GProductProxy CR example, including routing and login authentication\n\n# spec.proxies: The route written later cannot be a subset of the route written first, and vice versa\n# spec.proxies.match.uri.prefix: If it is a backend api, it is recommended to add \"/\" at the end of the prefix to indicate the end of this path (special requirements can not be added)\n# spec.proxies.match.uri: supports prefix and exact modes; Prefix and Exact can only choose 1 out of 2; Prefix has a higher priority than Exact\n\napiVersion: ghippo.io/v1alpha1\nkind: GProductProxy\nmetadata:\n  name: kpanda  # (1)\nspec:\n  gproduct: kpanda  # (2)\n  proxies:\n  - labels:\n      kind: UIEntry\n    match:\n      uri:\n        prefix: /kpanda # (3)\n    rewrite:\n      uri: /index.html\n    destination:\n      host: ghippo-anakin.ghippo-system.svc.cluster.local\n      port: 80\n    authnCheck: false  # (4)\n  - labels:\n      kind: UIAssets\n    match:\n      uri:\n        prefix: /ui/kpanda/ # (5)\n    destination:\n      host: kpanda-ui.kpanda-system.svc.cluster.local\n      port: 80\n    authnCheck: false\n  - match:\n      uri:\n        prefix: /apis/kpanda.io/v1/a\n    destination:\n      host: kpanda-service.kpanda-system.svc.cluster.local\n      port: 80\n    authnCheck: false\n  - match:\n      uri:\n        prefix: /apis/kpanda.io/v1 # (6)\n    destination:\n      host: kpanda-service.kpanda-system.svc.cluster.local\n      port: 80\n    authnCheck: true\n
                                                                          1. Cluster-level CRDs
                                                                          2. You need to specify the GProduct name in lowercase
                                                                          3. Can also support exact
                                                                          4. Whether istio-gateway is required to perform AuthN Token authentication for this routing API, false means to skip authentication
                                                                          5. UIAssets recommends adding / at the end to indicate the end (otherwise there may be problems in the front end)
                                                                          6. The route written later cannot be a subset of the route written earlier, and vice versa
                                                                          "},{"location":"en/admin/ghippo/best-practice/menu/menu-display-or-hiding.html","title":"Display/Hide Navigation Bar Menu Based on Permissions","text":"

                                                                          Under the current permission system, Global Management has the capability to regulate the visibility of navigation bar menus according to user permissions. However, due to the authorization information of Container Management not being synchronized with Global Management, Global Management cannot accurately determine whether to display the Container Management menu.

                                                                          This document implements the following through configuration: By default, the menus for Container Management and Insight will not be displayed in areas where Global Management cannot make a judgment. A Whitelist authorization strategy is employed to effectively manage the visibility of these menus. (The permissions for clusters or namespaces authorized through the Container Management page cannot be perceived or judged by Global Management)

                                                                          For example, if User A holds the Cluster Admin role for cluster A in Container Management, Global Management cannot determine whether to display the Container Management menu. After the configuration described in this document, User A will not see the Container Management menu by default. They will need to have explicit permission in Global Management to access the Container Management menu.

                                                                          "},{"location":"en/admin/ghippo/best-practice/menu/menu-display-or-hiding.html#prerequisites","title":"Prerequisites","text":"

                                                                          The feature to show/hide menus based on permissions must be enabled. The methods to enable this are as follows:

                                                                          • For new installation enviroments, add the --set global.navigatorVisibleDependency=true parameter when using helm install.
                                                                          • For existing environments, back up values using helm get values ghippo -n ghippo-system -o yaml, then modify bak.yaml and add global.navigatorVisibleDependency: true.

                                                                          Then upgrade the Global Management using the following command:

                                                                          helm upgrade ghippo ghippo-release/ghippo \\  \n  -n ghippo-system \\  \n  -f ./bak.yaml \\  \n  --version ${version}\n
                                                                          "},{"location":"en/admin/ghippo/best-practice/menu/menu-display-or-hiding.html#configure-the-navigation-bar","title":"Configure the Navigation Bar","text":"

                                                                          Apply the following YAML in kpanda-global-cluster:

                                                                          apiVersion: ghippo.io/v1alpha1  \nkind: GProductNavigator  \nmetadata:  \n  name: kpanda-menus-custom  \nspec:  \n  category: container  \n  gproduct: kpanda  \n  iconUrl: ./ui/kpanda/kpanda.svg  \n  isCustom: true  \n  localizedName:  \n    en-US: Container Management  \n    zh-CN: \u5bb9\u5668\u7ba1\u7406  \n  menus:  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Clusters  \n        zh-CN: \u96c6\u7fa4\u5217\u8868  \n      name: Clusters  \n      order: 80  \n      url: ./kpanda/clusters  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Namespaces  \n        zh-CN: \u547d\u540d\u7a7a\u95f4  \n      name: Namespaces  \n      order: 70  \n      url: ./kpanda/namespaces  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Workloads  \n        zh-CN: \u5de5\u4f5c\u8d1f\u8f7d  \n      name: Workloads  \n      order: 60  \n      url: ./kpanda/workloads/deployments  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Permissions  \n        zh-CN: \u6743\u9650\u7ba1\u7406  \n      name: Permissions  \n      order: 10  \n      url: ./kpanda/rbac/content/cluster  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n  name: Container Management \n  order: 50  \n  url: ./kpanda/clusters  \n  visible: true  \n\n---\napiVersion: ghippo.io/v1alpha1  \nkind: GProductNavigator  \nmetadata:  \n  name: insight-menus-custom  \nspec:  \n  category: microservice  \n  gproduct: insight  \n  iconUrl: ./ui/insight/logo.svg  \n  isCustom: true  \n  localizedName:  \n    en-US: Insight  \n    zh-CN: \u53ef\u89c2\u6d4b\u6027  \n  menus:  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Overview  \n        zh-CN: \u6982\u89c8  \n      name: Overview  \n      order: 9  \n      url: ./insight/overview  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Dashboard  \n        zh-CN: \u4eea\u8868\u76d8  \n      name: Dashboard  \n      order: 8  \n      url: ./insight/dashboard  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Infrastructure  \n        zh-CN: \u57fa\u7840\u8bbe\u65bd  \n      name: Infrastructure  \n      order: 7  \n      url: ./insight/clusters  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Metrics  \n        zh-CN: \u6307\u6807  \n      name: Metrics  \n      order: 6  \n      url: ./insight/metric/basic  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Logs  \n        zh-CN: \u65e5\u5fd7  \n      name: Logs  \n      order: 5  \n      url: ./insight/logs  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Trace Tracking  \n        zh-CN: \u94fe\u8def\u8ffd\u8e2a  \n      name: Trace Tracking  \n      order: 4  \n      url: ./insight/topology  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Alerts  \n        zh-CN: \u544a\u8b66  \n      name: Alerts  \n      order: 3  \n      url: ./insight/alerts/active/metrics  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: Collect Management  \n        zh-CN: \u91c7\u96c6\u7ba1\u7406  \n      name: Collect Management  \n      order: 2  \n      url: ./insight/agents  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n    - iconUrl: ''  \n      isCustom: true  \n      localizedName:  \n        en-US: System Management  \n        zh-CN: \u7cfb\u7edf\u7ba1\u7406  \n      name: System Management  \n      order: 1  \n      url: ./insight/system-components  \n      visible: true  \n      visibleDependency:  \n        permissions:  \n          - kpanda.cluster.*  \n          - kpanda.menu.get  \n  name: Insight \n  order: 30  \n  url: ./insight  \n  visible: true  \n\n---\napiVersion: ghippo.io/v1alpha1  \nkind: GProductResourcePermissions  \nmetadata:  \n  name: kpanda  \nspec:  \n  actions:  \n    - localizedName:  \n        en-US: Create  \n        zh-CN: \u521b\u5efa  \n      name: create  \n    - localizedName:  \n        en-US: Delete  \n        zh-CN: \u5220\u9664  \n      name: delete  \n    - localizedName:  \n        en-US: Update  \n        zh-CN: \u7f16\u8f91  \n      name: update  \n    - localizedName:  \n        en-US: Get  \n        zh-CN: \u67e5\u770b  \n      name: get  \n    - localizedName:  \n        en-US: Admin  \n        zh-CN: \u7ba1\u7406  \n      name: admin  \n  authScopes:  \n    - resourcePermissions:  \n        - actions:  \n            - name: get  \n            - dependPermissions:  \n                - action: get  \n              name: create  \n            - dependPermissions:  \n                - action: get  \n              name: update  \n            - dependPermissions:  \n                - action: get  \n              name: delete  \n          resourceType: cluster  \n        - actions:  \n            - name: get  \n          resourceType: menu  \n      scope: platform  \n    - resourcePermissions:  \n        - actions:  \n            - name: admin  \n              tips:  \n                - en-US: >-  \n                    If the workspace is bound to a cluster, it will be assigned  \n                    the Cluster Admin role upon authorization.  \n                  zh-CN: \u82e5\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u4e86\u96c6\u7fa4\uff0c\u6388\u6743\u540e\u8fd8\u5c06\u88ab\u6620\u5c04\u4e3a\u5bf9\u5e94\u96c6\u7fa4\u7684 Cluster Admin \u89d2\u8272  \n          resourceType: cluster  \n        - actions:  \n            - name: get  \n              tips:  \n                - en-US: >-  \n                    If the workspace is bound to a namespace, it will be  \n                    assigned the NS View role upon authorization.  \n                  zh-CN: \u82e5\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u4e86\u547d\u540d\u7a7a\u95f4\uff0c\u6388\u6743\u540e\u8fd8\u5c06\u88ab\u6620\u5c04\u4e3a\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u7684 NS View \u89d2\u8272  \n            - name: update  \n              tips:  \n                - en-US: >-  \n                    If the workspace is bound to a namespace, it will be  \n                    assigned the NS Edit role upon authorization.  \n                  zh-CN: \u82e5\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u4e86\u547d\u540d\u7a7a\u95f4\uff0c\u6388\u6743\u540e\u8fd8\u5c06\u88ab\u6620\u5c04\u4e3a\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u7684 NS  Edit \u89d2\u8272  \n            - name: admin  \n              tips:  \n                - en-US: >-  \n                    If the workspace is bound to a namespace, it will be  \n                    assigned the NS Admin role upon authorization.  \n                  zh-CN: \u82e5\u5de5\u4f5c\u7a7a\u95f4\u7ed1\u5b9a\u4e86\u547d\u540d\u7a7a\u95f4\uff0c\u6388\u6743\u540e\u8fd8\u5c06\u88ab\u6620\u5c04\u4e3a\u5bf9\u5e94\u547d\u540d\u7a7a\u95f4\u7684 NS Admin \u89d2\u8272  \n          resourceType: namespace  \n      scope: workspace  \n  gproduct: kpanda  \n  resourceTypes:  \n    - localizedName:  \n        en-US: Cluster Management  \n        zh-CN: \u96c6\u7fa4\u7ba1\u7406  \n      name: cluster  \n    - localizedName:  \n        en-US: Menu  \n        zh-CN: \u83dc\u5355  \n      name: menu  \n    - localizedName:  \n        en-US: Namespace Management  \n        zh-CN: \u547d\u540d\u7a7a\u95f4  \n      name: namespace\n
                                                                          "},{"location":"en/admin/ghippo/best-practice/menu/menu-display-or-hiding.html#achieve-the-above-effect-through-custom-roles","title":"Achieve the Above Effect Through Custom Roles","text":"

                                                                          Note

                                                                          Only the menus for the Container Management module need to be configured separately menu permissions. Other modules will automatically show/hide based on user permissions

                                                                          Create a custom role that includes the permission to view the Container Management menu, and then grant this role to users who need access to the Container Management menu.

                                                                          you can see the navigation bar menus for container management and observability. The result is as follows:

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/custom-idp.html","title":"Customizing AI platform Integration with IdP","text":"

                                                                          Identity Provider (IdP): In AI platform, when a client system needs to be used as the user source and user authentication is performed through the client system's login interface, the client system is referred to as the Identity Provider for AI platform.

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/custom-idp.html#use-cases","title":"Use Cases","text":"

                                                                          If there is a high customization requirement for the Ghippo login IdP, such as supporting WeCom, WeChat, or other social organization login requirements, please refer to this document for implementation.

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/custom-idp.html#supported-versions","title":"Supported Versions","text":"

                                                                          Ghippo v0.15.0 and above.

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/custom-idp.html#specific-steps","title":"Specific Steps","text":""},{"location":"en/admin/ghippo/best-practice/oem/custom-idp.html#customizing-ghippo-keycloak-plugin","title":"Customizing Ghippo Keycloak Plugin","text":"
                                                                          1. Customize the plugin

                                                                            Refer to the official keycloak documentation and customizing Keycloak IdP for development.

                                                                          2. Build the image

                                                                            # FROM scratch\nFROM scratch\n\n# plugin\nCOPY ./xxx-jar-with-dependencies.jar /plugins/\n

                                                                          Note

                                                                          If you need two customized IdPs, you need to copy two jar packages.

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/custom-idp.html#deploying-ghippo-keycloak-plugin-steps","title":"Deploying Ghippo Keycloak Plugin Steps","text":"
                                                                          1. Upgrade Ghippo to v0.15.0 or above. You can also directly install and deploy Ghippo v0.15.0, but make sure to manually record the following information.

                                                                            helm -n ghippo-system get values ghippo -o yaml\n
                                                                            apiserver:\n  image:\n    repository: release.daocloud.io/ghippo-ci/ghippo-apiserver\n    tag: v0.4.2-test-3-gaba5ec2\ncontrollermanager:\n  image:\n    repository: release.daocloud.io/ghippo-ci/ghippo-apiserver\n    tag: v0.4.2-test-3-gaba5ec2\nglobal:\n  database:\n    builtIn: true\n  reverseProxy: http://192.168.31.10:32628\n
                                                                          2. After a successful upgrade, an installation command should be manually run. The parameter values set in --set should be gotten from the above saved content, along with additional parameter values:

                                                                            • global.idpPlugin.enabled: Whether to enable the custom plugin, default is disabled.
                                                                            • global.idpPlugin.image.repository: The image address used by the initContainer to initialize the custom plugin.
                                                                            • global.idpPlugin.image.tag: The image tag used by the initContainer to initialize the custom plugin.
                                                                            • global.idpPlugin.path: The directory file of the custom plugin within the above image.

                                                                            Here is an example:

                                                                            helm upgrade \\\n    ghippo \\\n    ghippo-release/ghippo \\\n    --version v0.4.2-test-3-gaba5ec2 \\\n    -n ghippo-system \\\n    --set apiserver.image.repository=release.daocloud.io/ghippo-ci/ghippo-apiserver \\\n    --set apiserver.image.tag=v0.4.2-test-3-gaba5ec2 \\\n    --set controllermanager.image.repository=release.daocloud.io/ghippo-ci/ghippo-apiserver \\\n    --set controllermanager.image.tag=v0.4.2-test-3-gaba5ec2 \\\n    --set global.reverseProxy=http://192.168.31.10:32628 \\\n    --set global.database.builtIn=true \\\n    --set global.idpPlugin.enabled=true \\\n    --set global.idpPlugin.image.repository=chenyang-idp \\\n    --set global.idpPlugin.image.tag=v0.0.1 \\\n    --set global.idpPlugin.path=/plugins/.\n
                                                                          3. Select the desired plugin on the Keycloak administration page.

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/demo.html","title":"gproduct-demo","text":""},{"location":"en/admin/ghippo/best-practice/oem/demo.html#environment-setup","title":"Environment setup","text":"
                                                                          npm install\n

                                                                          Compile and hot-reload for development:

                                                                          npm run serve\n

                                                                          Compile and build:

                                                                          npm run build\n

                                                                          Fix linting issues:

                                                                          npm run lint\n
                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/demo.html#custom-configuration","title":"Custom Configuration","text":"

                                                                          Refer to the Configuration Reference for customization options.

                                                                          Build the image:

                                                                          docker build -t release.daocloud.io/henry/gproduct-demo .\n

                                                                          Run on Kubernetes:

                                                                          kubectl apply -f demo.yaml\n
                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/keycloak-idp.html","title":"Customizing Keycloak Identity Provider (IdP)","text":"

                                                                          Requirements: keycloak >= v20

                                                                          Known issue in keycloak >= v21, support for old version themes has been removed and may be fixed in v22. See Issue #15344.

                                                                          This demo uses Keycloak v20.0.5.

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/keycloak-idp.html#source-based-development","title":"Source-based Development","text":""},{"location":"en/admin/ghippo/best-practice/oem/keycloak-idp.html#configure-the-environment","title":"Configure the Environment","text":"

                                                                          Refer to keycloak/building.md for environment configuration.

                                                                          Run the following commands based on keycloak/README.md:

                                                                          cd quarkus\nmvn -f ../pom.xml clean install -DskipTestsuite -DskipExamples -DskipTests\n
                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/keycloak-idp.html#run-from-ide","title":"Run from IDE","text":""},{"location":"en/admin/ghippo/best-practice/oem/keycloak-idp.html#add-service-code","title":"Add Service Code","text":""},{"location":"en/admin/ghippo/best-practice/oem/keycloak-idp.html#if-inheriting-some-functionality-from-keycloak","title":"If inheriting some functionality from Keycloak","text":"

                                                                          Add files under the directory services/src/main/java/org/keycloak/broker :

                                                                          The file names should be xxxProvider.java and xxxProviderFactory.java .

                                                                          xxxProviderFactory.java example:

                                                                          Pay attention to the variable PROVIDER_ID = \"oauth\"; , as it will be used in the HTML definition later.

                                                                          xxxProvider.java example:

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/keycloak-idp.html#if-unable-to-inherit-functionality-from-keycloak","title":"If unable to inherit functionality from Keycloak","text":"

                                                                          Refer to the three files in the image below to write your own code:

                                                                          Add xxxProviderFactory to resource service

                                                                          Add xxxProviderFactory to services/src/main/resources/META-INF/services/org.keycloak.broker.provider.IdentityProviderFactory so that the newly added code can work:

                                                                          Add HTML file

                                                                          Copy the file themes/src/main/resources/theme/base/admin/resources/partials/realm-identity-provider-oidc.html and rename it as realm-identity-provider-oauth.html (remember the variable to pay attention to from earlier).

                                                                          Place the copied file in themes/src/main/resources/theme/base/admin/resources/partials/realm-identity-provider-oauth.html .

                                                                          All the necessary files have been added. Now you can start debugging the functionality.

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/keycloak-idp.html#packaging-as-a-jar-plugin","title":"Packaging as a JAR Plugin","text":"

                                                                          Create a new Java project and copy the above code into the project, as shown below:

                                                                          Refer to pom.xml.

                                                                          Run mvn clean package to package the code, resulting in the xxx-jar-with-dependencies.jar file.

                                                                          Download Keycloak Release 20.0.5 zip package and extract it.

                                                                          Copy the xxx-jar-with-dependencies.jar file to the keycloak-20.0.5/providers directory.

                                                                          Run the following command to check if the functionality is working correctly:

                                                                          bin/kc.sh start-dev\n
                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/oem-in.html","title":"Integrating Customer Systems into AI platform (OEM IN)","text":"

                                                                          OEM IN refers to the partner's platform being embedded as a submodule in AI platform, appearing in the primary navigation bar of AI platform. Users can log in and manage it uniformly through AI platform. The implementation of OEM IN is divided into 5 steps:

                                                                          1. Unify Domain
                                                                          2. Integrate User Systems
                                                                          3. Integrate Navigation Bar
                                                                          4. Customize Appearance
                                                                          5. Integrate Permission System (Optional)

                                                                          For specific operational demonstrations, refer to the OEM IN Best Practices Video Tutorial.

                                                                          Note

                                                                          The open source software Label Studio is used for nested demonstrations below. In actual scenarios, you need to solve the following issues in the customer system:

                                                                          The customer system needs to add a Subpath to distinguish which services belong to AI platform and which belong to the customer system.

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/oem-in.html#environment-preparation","title":"Environment Preparation","text":"
                                                                          1. Deploy the AI platform environment:

                                                                            https://10.6.202.177:30443 as AI platform

                                                                          2. Deploy the customer system environment:

                                                                            http://10.6.202.177:30123 as the customer system

                                                                            Adjust the operations on the customer system during the application according to the actual situation.

                                                                          3. Plan the Subpath path of the customer system: http://10.6.202.177:30123/label-studio (It is recommended to use a recognizable name as the Subpath, which should not conflict with the HTTP router of the main AI platform). Ensure that users can access the customer system through http://10.6.202.177:30123/label-studio.

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/oem-in.html#unify-domain-name-and-port","title":"Unify Domain Name and Port","text":"
                                                                          1. SSH into the AI platform server.

                                                                            ssh root@10.6.202.177\n
                                                                          2. Create the label-studio.yaml file using the vim command.

                                                                            vim label-studio.yaml\n
                                                                            label-studio.yaml
                                                                            apiVersion: networking.istio.io/v1beta1\nkind: ServiceEntry\nmetadata:\n  name: label-studio\n  namespace: ghippo-system\nspec:\n  exportTo:\n  - \"*\"\n  hosts:\n  - label-studio.svc.external\n  ports:\n  # Add a virtual port\n  - number: 80\n    name: http\n    protocol: HTTP\n  location: MESH_EXTERNAL\n  resolution: STATIC\n  endpoints:\n  # Change to the domain name (or IP) of the customer system\n  - address: 10.6.202.177\n    ports:\n      # Change to the port number of the customer system\n      http: 30123\n---\napiVersion: networking.istio.io/v1alpha3\nkind: VirtualService\nmetadata:\n  # Change to the name of the customer system\n  name: label-studio\n  namespace: ghippo-system\nspec:\n  exportTo:\n  - \"*\"\n  hosts:\n  - \"*\"\n  gateways:\n  - ghippo-gateway\n  http:\n  - match:\n      - uri:\n          exact: /label-studio # Change to the routing address of the customer system in the Web UI entry\n      - uri:\n          prefix: /label-studio/ # Change to the routing address of the customer system in the Web UI entry\n    route:\n    - destination:\n        # Change to the value of spec.hosts in the ServiceEntry above\n        host: label-studio.svc.external\n        port:\n          # Change to the value of spec.ports in the ServiceEntry above\n          number: 80\n---\napiVersion: security.istio.io/v1beta1\nkind: AuthorizationPolicy\nmetadata:\n  # Change to the name of the customer system\n  name: label-studio\n  namespace: istio-system\nspec:\n  action: ALLOW\n  selector:\n    matchLabels:\n      app: istio-ingressgateway\n  rules:\n  - from:\n    - source:\n        requestPrincipals:\n        - '*'\n  - to:\n    - operation:\n        paths:\n        - /label-studio # Change to the value of spec.http.match.uri.prefix in VirtualService\n        - /label-studio/* # Change to the value of spec.http.match.uri.prefix in VirtualService (Note: add \"*\" at the end)\n
                                                                          3. Apply the label-studio.yaml using the kubectl command:

                                                                            kubectl apply -f\u00a0label-studio.yaml\n
                                                                          4. Verify if the IP and port of the Label Studio UI are consistent:

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/oem-in.html#integrate-user-systems","title":"Integrate User Systems","text":"

                                                                          Integrate the customer system with the AI platform platform through protocols like OIDC/OAUTH, allowing users to enter the customer system without logging in again after logging into the AI platform platform.

                                                                          1. In the scenario of two AI platform, you can create SSO access through Global Management -> Access Control -> Docking Portal.

                                                                          2. After creating, fill in the details such as the Client ID, Client Secret, and Login URL in the customer system's Global Management -> Access Control -> Identity Provider -> OIDC, to complete user integration.

                                                                          3. After integration, the customer system login page will display the OIDC (Custom) option. Select to log in via OIDC the first time entering the customer system from the AI platform platform, and subsequently, you will directly enter the customer system without selecting again.

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/oem-in.html#integrate-navigation-bar","title":"Integrate Navigation Bar","text":"

                                                                          Refer to the tar package at the bottom of the document to implement an empty frontend sub-application, and embed the customer system into this empty shell application in the form of an iframe.

                                                                          1. Download the gproduct-demo-main.tar.gz file and change the value of the src attribute in App-iframe.vue under the src folder (the user entering the customer system):

                                                                            • The absolute address: src=\"https://10.6.202.177:30443/label-studio\" (AI platform address + Subpath)
                                                                            • The relative address, such as src=\"./external-anyproduct/insight\"
                                                                            App-iframe.vue
                                                                            <template>\n  <iframe>\n    src=\"https://daocloud.io\"\n    title=\"demo\"\n    class=\"iframe-container\"\n  </iframe>\n</template>\n\n<style lang=\"scss\">\nhtml,\nbody {\n  height: 100%;\n}\n\n# app {\n  display: flex;\n  height: 100%;\n  .iframe-container {\n    border: 0;\n    flex: 1 1 0;\n  }\n}\n</style>\n
                                                                          2. Delete the App.vue and main.ts files under the src folder, and rename:

                                                                            • Rename App-iframe.vue to App.vue
                                                                            • Rename main-iframe.ts to main.ts
                                                                          3. Build the image following the steps in the readme (Note: before executing the last step, replace the image address in demo.yaml with the built image address)

                                                                            demo.yaml
                                                                            kind: Namespace\napiVersion: v1\nmetadata:\n  name: gproduct-demo\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: gproduct-demo\n  namespace: gproduct-demo\n  labels:\n    app: gproduct-demo\nspec:\n  selector:\n    matchLabels:\n      app: gproduct-demo\n  template:\n    metadata:\n      name: gproduct-demo\n      labels:\n        app: gproduct-demo\n    spec:\n      containers:\n      - name: gproduct-demo\n        image: release.daocloud.io/gproduct-demo # Modify this image address\n        ports:\n        - containerPort: 80\n---\napiVersion: v1\nkind: Service\n...\n

                                                                          After integration, the Customer System will appear in the primary navigation bar of AI platform, and clicking it will allow users to enter the customer system.

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/oem-in.html#customize-appearance","title":"Customize Appearance","text":"

                                                                          Note

                                                                          AI platform supports customizing the appearance by writing CSS. How the customer system implements appearance customization in actual applications needs to be handled according to the actual situation.

                                                                          Log in to the customer system, and through Global Management -> Settings -> Appearance, you can customize platform background colors, logos, and names. For specific operations, please refer to Appearance Customization.

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/oem-in.html#integrate-permission-system-optional","title":"Integrate Permission System (Optional)","text":"

                                                                          Method One:

                                                                          Customized teams can implement a customized module that AI platform will notify each user login event to the customized module via Webhook, and the customized module can call the OpenAPI of AnyProduct and AI platform to synchronize the user's permission information.

                                                                          Method Two:

                                                                          Through Webhook, notify AnyProduct of each authorization change (if required, it can be implemented later).

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/oem-in.html#use-other-capabilities-of-ai-platform-in-anyproduct-optional","title":"Use Other Capabilities of AI platform in AnyProduct (Optional)","text":"

                                                                          The method is to call the AI platform OpenAPI.

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/oem-in.html#references","title":"References","text":"
                                                                          • Refer to OEM OUT Document
                                                                          • Download the tar package for gProduct-demo-main integration
                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/oem-out.html","title":"Integrate AI platform into Customer System (OEM OUT)","text":"

                                                                          OEM OUT refers to integrating AI platform as a sub-module into other products, appearing in their menus. You can directly access AI platform without logging in again after logging into other products. The OEM OUT integration involves 5 steps:

                                                                          1. Unify domain name
                                                                          2. User system integration
                                                                          3. Navigation bar integration
                                                                          4. Customize appearance
                                                                          5. Permission system integration (optional)
                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/oem-out.html#unify-domain-name","title":"Unify Domain Name","text":"
                                                                          1. Deploy AI platform (Assuming the access address after deployment is https://10.6.8.2:30343/).

                                                                          2. To achieve cross-domain access between the customer system and AI platform, you can use an nginx reverse proxy. Use the following example configuration in vi /etc/nginx/conf.d/default.conf :

                                                                            server {\n    listen       80;\n    server_name  localhost;\n\n    location /dce5/ {\n      proxy_pass https://10.6.8.2:30343/;\n      proxy_http_version 1.1;\n      proxy_read_timeout 300s; # This line is required for using kpanda cloudtty, otherwise it can be removed\n      proxy_send_timeout 300s; # This line is required for using kpanda cloudtty, otherwise it can be removed\n\n      proxy_set_header Host $host;\n      proxy_set_header X-Real-IP $remote_addr;\n      proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n\n      proxy_set_header Upgrade $http_upgrade; # This line is required for using kpanda cloudtty, otherwise it can be removed\n      proxy_set_header Connection $connection_upgrade; # This line is required for using kpanda cloudtty, otherwise it can be removed\n    }\n\n    location / {\n        proxy_pass https://10.6.165.50:30443/; # Assuming this is the customer system address (e.g., Yiyun)\n        proxy_http_version 1.1;\n\n        proxy_set_header Host $host;\n        proxy_set_header X-Real-IP $remote_addr;\n        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n    }\n}\n
                                                                          3. Assuming the nginx entry address is 10.6.165.50, follow the Customize AI platform Reverse Proxy Server Address to set the AI_PROXY reverse proxy as http://10.6.165.50/dce5. Ensure that AI platform can be accessed via http://10.6.165.50/dce5. The customer system also needs to configure the reverse proxy based on its specific requirements.

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/oem-out.html#user-system-integration","title":"User System Integration","text":"

                                                                          Integrate the customer system with AI platform using protocols like OIDC/OAUTH, allowing users to access AI platform without logging in again after logging into the customer system. Fill in the OIDC information of the customer system in Global Management -> Access Control -> Identity Provider .

                                                                          After integration, the AI platform login page will display the OIDC (custom) option. When accessing AI platform from the customer system for the first time, select OIDC login, and subsequent logins will directly enter AI platform without needing to choose again.

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/oem-out.html#navigation-bar-integration","title":"Navigation Bar Integration","text":"

                                                                          Navigation bar integration means adding AI platform to the menu of the customer system. You can directly access AI platform by clicking the proper menu item. The navigation bar integration depends on the customer system and needs to be handled based on specific circumstances.

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/oem-out.html#customizie-appearance","title":"Customizie Appearance","text":"

                                                                          Use Global Management -> Settings -> Appearance to customize the platform's background color, logo, and name. For detailed instructions, refer to Appearance Customization.

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/oem-out.html#permission-system-integration-optional","title":"Permission System Integration (optional)","text":"

                                                                          Permission system integration is complex. If you have such requirements, please contact the Global Management team.

                                                                          "},{"location":"en/admin/ghippo/best-practice/oem/oem-out.html#reference","title":"Reference","text":"
                                                                          • OEM IN
                                                                          "},{"location":"en/admin/ghippo/install/gm-gateway.html","title":"Use Guomi Gateway to proxy AI platform","text":"

                                                                          Follow the steps below to configure the Guomi Gateway for AI platform.

                                                                          "},{"location":"en/admin/ghippo/install/gm-gateway.html#software-introduction","title":"Software Introduction","text":"

                                                                          Tengine: Tengine is a web server project initiated by taobao.com. Based on Nginx, it adds many advanced features and features for the needs of high-traffic websites.

                                                                          Tongsuo: Formerly known as BabaSSL, Tongsuo is an open-source cryptographic library that offers a range of modern cryptographic algorithms and secure communication protocols. It is designed to support a variety of use cases, including storage, network security, key management, and privacy computing. By providing foundational cryptographic capabilities, Tongsuo ensures the privacy, integrity, and authenticity of data during transmission, storage, and usage. It also enhances security throughout the data lifecycle, offering robust privacy protection and security features.

                                                                          "},{"location":"en/admin/ghippo/install/gm-gateway.html#preparation","title":"Preparation","text":"

                                                                          A Linux host with Docker installed and internet access.

                                                                          "},{"location":"en/admin/ghippo/install/gm-gateway.html#compile-and-install-tengine-tongsuo","title":"Compile and install Tengine & Tongsuo","text":"

                                                                          Note

                                                                          This configuration is for reference only.

                                                                          FROM docker.m.daocloud.io/debian:11.3\n\n# Version\nENV TENGINE_VERSION=\"2.3.4\" \\\n    TONGSUO_VERSION=\"8.3.2\"\n\n# Install required system packages and dependencies\nRUN apt update && \\\n    apt -y install \\\n    wget \\\n    gcc \\\n    make \\\n    libpcre3 \\\n    libpcre3-dev \\\n    zlib1g-dev \\\n    perl \\\n    && apt clean\n\n# Build tengine\nRUN mkdir -p /tmp/pkg/cache/ && cd /tmp/pkg/cache/ \\\n    && wget https://github.com/alibaba/tengine/archive/refs/tags/${TENGINE_VERSION}.tar.gz -O tengine-${TENGINE_VERSION}.tar.gz \\\n    && tar zxvf tengine-${TENGINE_VERSION}.tar.gz \\\n    && wget https://github.com/Tongsuo-Project/Tongsuo/archive/refs/tags/${TONGSUO_VERSION}.tar.gz -O Tongsuo-${TONGSUO_VERSION}.tar.gz \\\n    && tar zxvf Tongsuo-${TONGSUO_VERSION}.tar.gz \\\n    && cd tengine-${TENGINE_VERSION} \\\n    && ./configure \\\n        --add-module=modules/ngx_openssl_ntls \\\n        --with-openssl=/tmp/pkg/cache/Tongsuo-${TONGSUO_VERSION} \\\n        --with-openssl-opt=\"--strict-warnings enable-ntls\" \\\n        --with-http_ssl_module --with-stream \\\n        --with-stream_ssl_module --with-stream_sni \\\n    && make \\\n    && make install \\\n    && ln -s /usr/local/nginx/sbin/nginx /usr/sbin/ \\\n    && rm -rf /tmp/pkg/cache\n\nEXPOSE 80 443\nSTOPSIGNAL SIGTERM\nCMD [\"nginx\", \"-g\", \"daemon off;\"]\n
                                                                          docker build -t tengine:0.0.1 .\n
                                                                          "},{"location":"en/admin/ghippo/install/gm-gateway.html#generate-sm2-and-rsa-tls-certificates","title":"Generate SM2 and RSA TLS Certificates","text":"

                                                                          Here's how to generate SM2 and RSA TLS certificates and configure the Guomi gateway.

                                                                          "},{"location":"en/admin/ghippo/install/gm-gateway.html#sm2-tls-certificate","title":"SM2 TLS Certificate","text":"

                                                                          Note

                                                                          This certificate is only for testing purposes.

                                                                          You can refer to the Tongsuo official documentation to use OpenSSL to generate SM2 certificates, or visit Guomi SSL Laboratory to apply for SM2 certificates.

                                                                          In the end, we will get the following files:

                                                                          -rw-r--r-- 1 root root  749 Dec  8 02:59 sm2.*.enc.crt.pem\n-rw-r--r-- 1 root root  258 Dec  8 02:59 sm2.*.enc.key.pem\n-rw-r--r-- 1 root root  749 Dec  8 02:59 sm2.*.sig.crt.pem\n-rw-r--r-- 1 root root  258 Dec  8 02:59 sm2.*.sig.key.pem\n
                                                                          "},{"location":"en/admin/ghippo/install/gm-gateway.html#rsa-tls-certificate","title":"RSA TLS Certificate","text":"
                                                                          -rw-r--r-- 1 root root  216 Dec  8 03:21 rsa.*.crt.pem\n-rw-r--r-- 1 root root 4096 Dec  8 02:59 rsa.*.key.pem\n
                                                                          "},{"location":"en/admin/ghippo/install/gm-gateway.html#configure-sm2-and-rsa-tls-certificates-for-the-guomi-gateway","title":"Configure SM2 and RSA TLS Certificates for the Guomi Gateway","text":"

                                                                          The Guomi gateway used in this article supports SM2 and RSA TLS certificates. The advantage of dual certificates is that when the browser does not support SM2 TLS certificates, it automatically switches to RSA TLS certificates.

                                                                          For more detailed configurations, please refer to the Tongsuo official documentation.

                                                                          We enter the Tengine container:

                                                                          # Go to the nginx configuration file directory\ncd /usr/local/nginx/conf\n\n# Create the cert folder to store TLS certificates\nmkdir cert\n\n# Copy the SM2 and RSA TLS certificates to the `/usr/local/nginx/conf/cert` directory\ncp sm2.*.enc.crt.pem sm2.*.enc.key.pem  sm2.*.sig.crt.pem  sm2.*.sig.key.pem /usr/local/nginx/conf/cert\ncp rsa.*.crt.pem  rsa.*.key.pem /usr/local/nginx/conf/cert\n\n# Edit the nginx.conf configuration\nvim nginx.conf\n...\nserver {\n  listen 443          ssl;\n  proxy_http_version  1.1;\n  # Enable Guomi function to support SM2 TLS certificates\n  enable_ntls         on;\n\n  # RSA certificate\n  # If your browser does not support Guomi certificates, you can enable this option, and Tengine will automatically recognize the user's browser and use RSA certificates for fallback\n  ssl_certificate                 /usr/local/nginx/conf/cert/rsa.*.crt.pem;\n  ssl_certificate_key             /usr/local/nginx/conf/cert/rsa.*.key.pem;\n\n  # Configure two pairs of SM2 certificates for encryption and signature\n  # SM2 signature certificate\n  ssl_sign_certificate            /usr/local/nginx/conf/cert/sm2.*.sig.crt.pem;\n  ssl_sign_certificate_key        /usr/local/nginx/conf/cert/sm2.*.sig.key.pem;\n  # SM2 encryption certificate\n  ssl_enc_certificate             /usr/local/nginx/conf/cert/sm2.*.enc.crt.pem;\n  ssl_enc_certificate_key         /usr/local/nginx/conf/cert/sm2.*.enc.key.pem;\n  ssl_protocols                   TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;\n\n  location / {\n    proxy_set_header Host $http_host;\n    proxy_set_header X-Real-IP $remote_addr;\n    proxy_set_header REMOTE-HOST $remote_addr;\n    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n    # You need to modify the address here to the address of the Istio ingress gateway\n    # For example, proxy_pass https://istio-ingressgateway.istio-system.svc.cluster.local\n    # Or proxy_pass https://demo-dev.daocloud.io\n    proxy_pass https://istio-ingressgateway.istio-system.svc.cluster.local;\n  }\n}\n
                                                                          "},{"location":"en/admin/ghippo/install/gm-gateway.html#reload-the-configuration-of-the-guomi-gateway","title":"Reload the Configuration of the Guomi Gateway","text":"
                                                                          nginx -s reload\n
                                                                          "},{"location":"en/admin/ghippo/install/gm-gateway.html#next-steps","title":"Next Steps","text":"

                                                                          After successfully deploying the Guomi gateway, customize the AI platform reverse proxy server address.

                                                                          "},{"location":"en/admin/ghippo/install/gm-gateway.html#verification","title":"Verification","text":"

                                                                          You can deploy a web browser that supports Guomi certificates. For example, Samarium Browser, and then access the UI interface through Tengine to verify if the Guomi certificate is effective.

                                                                          "},{"location":"en/admin/ghippo/install/login.html","title":"Login","text":"

                                                                          Before a user uses a new system, there is no data in the system, and the system cannot identify the new user. In order to identify the user identity and bind user data, the user needs an account that can uniquely identify the user identity.

                                                                          AI platform assigns an account with certain permissions to the user through the way the administrator creates a new user in User and Access Control . All behaviors generated by this user will be associated with their own account.

                                                                          The user logs in through the account/password, and the system verifies whether the identity is legal. If the verification is legal, the user logs in successfully.

                                                                          Note

                                                                          If the user does not perform any operation within 24 hours after logging in, the login status will be automatically logged out. If the logged-in user is always active, the logged-in state will persist.

                                                                          The simple process of user login is shown in the figure below.

                                                                          graph TB\n\nuser[Input username] --> pass[Input password] --> judge([Click Login and verify username and password])\njudge -.Correct.->success[Success]\njudge -.Incorrect.->fail[Fail]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill:#326ce5,stroke:#fff,stroke-width:1px,color:#fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass user,pass cluster;\nclass judge plain\nclass success,fail k8s

                                                                          The user login screen is as shown in the figure below. For the specific login screen, please refer to the actual product.

                                                                          "},{"location":"en/admin/ghippo/install/reverse-proxy.html","title":"Customize AI platform Reverse Proxy Server Address","text":"

                                                                          The specific setup steps are as follows:

                                                                          1. Check if the global management Helm repository exists.

                                                                            helm repo list | grep ghippo\n

                                                                            If the result is empty or shows the following error, proceed to the next step; otherwise, skip the next step.

                                                                            Error: no repositories to show\n
                                                                          2. Add and update the global management Helm repository.

                                                                            helm repo add ghippo http://{harbor url}/chartrepo/{project}\nhelm repo update ghippo\n
                                                                          3. Set environment variables for easier use in the following steps.

                                                                            # Your reverse proxy address, for example `export Suanova_PROXY=\"https://demo-alpha.daocloud.io\"` \nexport Suanova_PROXY=\"https://domain:port\"\n\n# Helm --set parameter backup file\nexport GHIPPO_VALUES_BAK=\"ghippo-values-bak.yaml\"\n\n# Get the current version of ghippo\nexport GHIPPO_HELM_VERSION=$(helm get notes ghippo -n ghippo-system | grep \"Chart Version\" | awk -F ': ' '{ print $2 }')\n
                                                                          4. Backup the --set parameters.

                                                                            helm get values ghippo -n ghippo-system -o yaml > ${GHIPPO_VALUES_BAK}\n
                                                                          5. Add your reverse proxy address.

                                                                            Note

                                                                            • If possible, you can use the yq command:

                                                                              yq -i \".global.reverseProxy = \\\"${Suanova_PROXY}\\\"\" ${GHIPPO_VALUES_BAK}\n
                                                                            • Or you can use the vim command to edit and save:

                                                                              vim ${GHIPPO_VALUES_BAK}\n\nUSER-SUPPLIED VALUES:\n...\nglobal:\n  ...\n  reverseProxy: ${Suanova_PROXY} # Only need to modify this line\n
                                                                          6. Run helm upgrade to apply the configuration.

                                                                            helm upgrade ghippo ghippo/ghippo \\\n  -n ghippo-system \\\n  -f ${GHIPPO_VALUES_BAK} \\\n  --version ${GHIPPO_HELM_VERSION}\n
                                                                          7. Use kubectl to restart the global management Pod to apply the configuration.

                                                                            kubectl rollout restart deploy/ghippo-apiserver -n ghippo-system\nkubectl rollout restart statefulset/ghippo-keycloakx -n ghippo-system\n
                                                                          "},{"location":"en/admin/ghippo/install/user-isolation.html","title":"Customize AI platform Reverse Proxy Server Address","text":"

                                                                          The specific setup steps are as follows:

                                                                          1. Check if the global management Helm repository exists.

                                                                            helm repo list | grep ghippo\n

                                                                            If the result is empty or shows the following error, proceed to the next step; otherwise, skip the next step.

                                                                            Error: no repositories to show\n
                                                                          2. Add and update the global management Helm repository.

                                                                            helm repo add ghippo http://{harbor url}/chartrepo/{project}\nhelm repo update ghippo\n
                                                                          3. Set environment variables for easier use in the following steps.

                                                                            # Your reverse proxy address, for example `export Suanova_PROXY=\"https://demo-alpha.daocloud.io\"` \nexport Suanova_PROXY=\"https://domain:port\"\n\n# Helm --set parameter backup file\nexport GHIPPO_VALUES_BAK=\"ghippo-values-bak.yaml\"\n\n# Get the current version of ghippo\nexport GHIPPO_HELM_VERSION=$(helm get notes ghippo -n ghippo-system | grep \"Chart Version\" | awk -F ': ' '{ print $2 }')\n
                                                                          4. Backup the --set parameters.

                                                                            helm get values ghippo -n ghippo-system -o yaml > ${GHIPPO_VALUES_BAK}\n
                                                                          5. Add your reverse proxy address.

                                                                            Note

                                                                            • If possible, you can use the yq command:

                                                                              yq -i \".apiserver.userIsolationMode = \\\"Folder\\\"\" ${GHIPPO_VALUES_BAK}\n
                                                                            • Or you can use the vim command to edit and save:

                                                                              vim ${GHIPPO_VALUES_BAK}\n\nUSER-SUPPLIED VALUES:\n...\n# Just add the following two lines\napiserver:\n  userIsolationMode: Folder\n
                                                                          6. Run helm upgrade to apply the configuration.

                                                                            helm upgrade ghippo ghippo/ghippo \\\n  -n ghippo-system \\\n  -f ${GHIPPO_VALUES_BAK} \\\n  --version ${GHIPPO_HELM_VERSION}\n
                                                                          7. Use kubectl to restart the global management Pod to apply the configuration.

                                                                            kubectl rollout restart deploy/ghippo-apiserver -n ghippo-system\n
                                                                          "},{"location":"en/admin/ghippo/permissions/baize.html","title":"AI Lab Permissions","text":"

                                                                          AI Lab supports four user roles:

                                                                          • Admin / Baize Owner: Has full permissions (create, read, update, delete) for all features in the Developer and Operator.
                                                                          • Workspace Admin: Has full permissions (create, read, update, delete) for all features in the authorized workspace's Developer.
                                                                          • Workspace Editor: Has update and read permissions for all features in the authorized workspace's Developer.
                                                                          • Workspace Viewer: Has read permissions for all features in the authorized workspace's Developer.

                                                                          Each role has different permissions, as detailed below.

                                                                          Menu Object Operation Admin / Baize Owner Workspace Admin Workspace Editor Workspace Viewer Developer Overview View Overview \u2713 \u2713 \u2713 \u2713 Notebooks View Notebooks \u2713 \u2713 \u2713 \u2713 View Notebooks Details \u2713 \u2713 \u2713 \u2717 Create Notebooks \u2713 \u2713 \u2717 \u2717 Update Notebooks \u2713 \u2713 \u2713 \u2717 Clone Notebooks \u2713 \u2713 \u2717 \u2717 Stop Notebooks \u2713 \u2713 \u2713 \u2717 Start Notebooks \u2713 \u2713 \u2713 \u2717 Delete Notebooks \u2713 \u2713 \u2717 \u2717 Jobs View Jobs \u2713 \u2713 \u2713 \u2713 View Job Details \u2713 \u2713 \u2713 \u2713 Create Job \u2713 \u2713 \u2717 \u2717 Clone Job \u2713 \u2713 \u2717 \u2717 View Job Load Details \u2713 \u2713 \u2713 \u2717 Delete Job \u2713 \u2713 \u2717 \u2717 Job Analysis View Job Analysis \u2713 \u2713 \u2713 \u2713 View Job Analysis Details \u2713 \u2713 \u2713 \u2713 Delete Job Analysis \u2713 \u2713 \u2717 \u2717 Datasets View Datasets \u2713 \u2713 \u2713 \u2717 Create Dataset \u2713 \u2713 \u2717 \u2717 Resync Dataset \u2713 \u2713 \u2713 \u2717 Update Credentials \u2713 \u2713 \u2713 \u2717 Delete Dataset \u2713 \u2713 \u2717 \u2717 Runtime Env View Runtime Env \u2713 \u2713 \u2713 \u2713 Create Runtime Env \u2713 \u2713 \u2717 \u2717 Update Runtime Env \u2713 \u2713 \u2713 \u2717 Delete Runtime Env \u2713 \u2713 \u2717 \u2717 Inference Services View Inference Services \u2713 \u2713 \u2713 \u2713 View Inference Services Details \u2713 \u2713 \u2713 \u2713 Create Inference Service \u2713 \u2713 \u2717 \u2717 Update Inference Service \u2713 \u2713 \u2713 \u2717 Stop Inference Service \u2713 \u2713 \u2713 \u2717 Start Inference Service \u2713 \u2713 \u2713 \u2717 Delete Inference Service \u2713 \u2713 \u2717 \u2717 Operator Overview View Overview \u2713 \u2717 \u2717 \u2717 GPU Management View GPU Management \u2713 \u2717 \u2717 \u2717 Queue Management View Queue Management \u2713 \u2717 \u2717 \u2717 View Queue Details \u2713 \u2717 \u2717 \u2717 Create Queue \u2713 \u2717 \u2717 \u2717 Update Queue \u2713 \u2717 \u2717 \u2717 Delete Queue \u2713 \u2717 \u2717 \u2717"},{"location":"en/admin/ghippo/permissions/kpanda.html","title":"Container Management Permissions","text":"

                                                                          The container management module uses the following roles:

                                                                          • Admin / Kpanda Owner
                                                                          • Cluster Admin
                                                                          • NS Admin
                                                                          • NS Editor
                                                                          • NS Viewer

                                                                          Note

                                                                          • For more information about permissions, please refer to the Container Management Permission System Description.
                                                                          • For creating, managing, and deleting roles, please refer to Role and Permission Management.
                                                                          • The permissions of Cluster Admin , NS Admin , NS Editor , NS Viewer only take effect within the current cluster or namespace.

                                                                          The permissions granted to each role are as follows:

                                                                          Primary Function Secondary Function Permission Cluster Admin Ns Admin Ns Editor NS Viewer Cluster Clusters View Clusters \u2714 \u2714 \u2714 \u2714 Access Cluster \u2718 \u2718 \u2718 \u2718 Create Cluster \u2718 \u2718 \u2718 \u2718 Cluster Operations Enter Console \u2714 \u2714 (only in the list) \u2714 \u2718 View Monitoring \u2714 \u2718 \u2718 \u2718 Edit Basic Configuration \u2714 \u2718 \u2718 \u2718 Download kubeconfig \u2714 \u2714 (with ns permission) \u2714 (with ns permission) \u2714 (with ns permission) Disconnect Cluster \u2718 \u2718 \u2718 \u2718 View Logs \u2714 \u2718 \u2718 \u2718 Retry \u2718 \u2718 \u2718 \u2718 Uninstall Cluster \u2718 \u2718 \u2718 \u2718 Cluster Overview View Cluster Overview \u2714 \u2718 \u2718 \u2718 Node Management Access Node \u2718 \u2718 \u2718 \u2718 View Node List \u2714 \u2718 \u2718 \u2718 View Node Details \u2714 \u2718 \u2718 \u2718 View YAML \u2714 \u2718 \u2718 \u2718 Pause Scheduling \u2714 \u2718 \u2718 \u2718 Modify Labels \u2714 \u2718 \u2718 \u2718 Modify Annotations \u2714 \u2718 \u2718 \u2718 Modify Taints \u2714 \u2718 \u2718 \u2718 Remove Node \u2718 \u2718 \u2718 \u2718 Deployment View List \u2714 \u2714 \u2714 \u2714 View/Manage Details \u2714 \u2714 \u2714 \u2714 (view only) Create by YAML \u2714 \u2714 \u2714 \u2718 Create by image \u2714 \u2714 \u2714 \u2718 Select an instance in ws bound to ns Select image \u2714 \u2714 \u2714 \u2718 View IP Pool \u2714 \u2714 \u2714 \u2718 Edit Network Interface \u2714 \u2714 \u2714 \u2718 Enter Console \u2714 \u2714 \u2714 \u2718 View Monitoring \u2714 \u2714 \u2714 \u2714 View Logs \u2714 \u2714 \u2714 \u2714 Load Balancer Scaling \u2714 \u2714 \u2714 \u2718 Edit YAML \u2714 \u2714 \u2714 \u2718 Update \u2714 \u2714 \u2714 \u2718 Status - Pause Upgrade \u2714 \u2714 \u2714 \u2718 Status - Stop \u2714 \u2714 \u2714 \u2718 Status - Restart \u2714 \u2714 \u2714 \u2718 Delete \u2714 \u2714 \u2714 \u2718 StatefulSet View List \u2714 \u2714 \u2714 \u2714 View/Manage Details \u2714 \u2714 \u2714 \u2714 (view only) Create by YAML \u2714 \u2714 \u2714 \u2718 Create by image \u2714 \u2714 \u2714 \u2718 Select an instance in ws bound to ns Select image \u2714 \u2714 \u2714 \u2718 Enter Console \u2714 \u2714 \u2714 \u2718 View Monitoring \u2714 \u2714 \u2714 \u2714 View Logs \u2714 \u2714 \u2714 \u2714 Load Balancer Scaling \u2714 \u2714 \u2714 \u2718 Edit YAML \u2714 \u2714 \u2714 \u2718 Update \u2714 \u2714 \u2714 \u2718 Status - Stop \u2714 \u2714 \u2714 \u2718 Status - Restart \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 \u5b88\u62a4\u8fdb\u7a0b View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Create by YAML \u2713 \u2713 \u2713 \u2717 Create by image \u2713 \u2713 \u2713 \u2717 Select an instance in ws bound to ns Select image \u2713 \u2713 \u2713 \u2717 Go to console \u2713 \u2713 \u2713 \u2717 Check monitor \u2713 \u2713 \u2713 \u2713 View logs \u2713 \u2713 \u2713 \u2713 Edit YAML \u2713 \u2713 \u2713 \u2717 Update \u2713 \u2713 \u2713 \u2717 Status - restart \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 Job View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Create by YAML \u2713 \u2713 \u2713 \u2717 Create by image \u2713 \u2713 \u2713 \u2717 Instance list \u2713 \u2713 \u2713 \u2713 Select an instance in ws bound to ns Select image \u2713 \u2713 \u2713 \u2717 Go to console \u2713 \u2713 \u2713 \u2717 View logs \u2713 \u2713 \u2713 \u2713 View YAML \u2713 \u2713 \u2713 \u2713 Restart \u2713 \u2713 \u2713 \u2717 View event \u2713 \u2713 \u2713 \u2713 Delete \u2713 \u2713 \u2713 \u2717 CronJob View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Create by YAML \u2713 \u2713 \u2713 \u2717 Create by image \u2713 \u2713 \u2713 \u2717 Select an instance in ws bound to ns Select image \u2713 \u2713 \u2713 \u2717 Edit YAML \u2713 \u2713 \u2713 \u2717 Stop \u2713 \u2713 \u2713 \u2717 View jobs \u2713 \u2713 \u2713 \u2713 View event \u2713 \u2713 \u2713 \u2713 Delete \u2713 \u2713 \u2713 \u2717 Pod View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Go to console \u2713 \u2713 \u2713 \u2717 Check monitor \u2713 \u2713 \u2713 \u2713 View logs \u2713 \u2713 \u2713 \u2713 View YAML \u2713 \u2713 \u2713 \u2713 Upload file \u2713 \u2713 \u2713 \u2717 Download file \u2713 \u2713 \u2713 \u2717 View containers \u2713 \u2713 \u2713 \u2713 View event \u2713 \u2713 \u2713 \u2713 Delete \u2713 \u2713 \u2713 \u2717 ReplicaSet View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Go to console \u2713 \u2713 \u2713 \u2717 Check monitor \u2713 \u2713 \u2713 \u2713 View logs \u2713 \u2713 \u2713 \u2713 View YAML \u2713 \u2713 \u2713 \u2713 Delete \u2713 \u2713 \u2713 \u2717 Helm app View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Update \u2713 \u2713 \u2713 \u2717 View YAML \u2713 \u2713 \u2713 \u2713 Delete \u2713 \u2713 \u2713 \u2717 Helm chart View list \u2713 \u2713 \u2713 \u2713 View details \u2713 \u2713 \u2713 \u2713 Install chart \u2713 \u2713 (Fine for ns level) \u2717 \u2717 Download chart \u2713 \u2713 \u2713 (Consistent with viewing interface) \u2713 Helm repo View list \u2713 \u2713 \u2713 \u2713 Create repo \u2713 \u2717 \u2717 \u2717 Update repo \u2713 \u2717 \u2717 \u2717 Clone repo \u2713 \u2717 \u2717 \u2717 Refresh repo \u2713 \u2717 \u2717 \u2717 Modify label \u2713 \u2717 \u2717 \u2717 Modify annotation \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 Service View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Create by YAML \u2713 \u2713 \u2713 \u2717 Create \u2713 \u2713 \u2713 \u2717 Update \u2713 \u2713 \u2713 \u2717 View event \u2713 \u2713 \u2713 \u2713 Edit YAML \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 Ingress View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Create by YAML \u2713 \u2713 \u2713 \u2717 Create \u2713 \u2713 \u2713 \u2717 Update \u2713 \u2713 \u2713 \u2717 View event \u2713 \u2713 \u2713 \u2713 Edit YAML \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 Network policy View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2717 Create by YAML \u2713 \u2713 \u2713 \u2717 Create \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 Network config Config \u2713 \u2713 \u2713 \u2717 CRD View list \u2713 \u2717 \u2717 \u2717 View/Manage details \u2713 \u2717 \u2717 \u2717 Create by YAML \u2713 \u2717 \u2717 \u2717 Edit YAML \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 PVC View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Create \u2713 \u2713 \u2713 \u2717 Select sc \u2713 \u2713 \u2713 \u2717 Create by YAML \u2713 \u2713 \u2713 \u2717 Edit YAML \u2713 \u2713 \u2713 \u2717 Clone \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 PV View list \u2713 \u2717 \u2717 \u2717 View/Manage details \u2713 \u2717 \u2717 \u2717 Create by YAML \u2713 \u2717 \u2717 \u2717 Create \u2713 \u2717 \u2717 \u2717 Edit YAML \u2713 \u2717 \u2717 \u2717 Update \u2713 \u2717 \u2717 \u2717 Clone \u2713 \u2717 \u2717 \u2717 Modify label \u2713 \u2717 \u2717 \u2717 Modify annotation \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 SC View list \u2713 \u2717 \u2717 \u2717 Create by YAML \u2713 \u2717 \u2717 \u2717 Create \u2713 \u2717 \u2717 \u2717 View YAML \u2713 \u2717 \u2717 \u2717 Update \u2713 \u2717 \u2717 \u2717 Authorize NS \u2713 \u2717 \u2717 \u2717 Deauthorize \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 ConfigMap View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Create by YAML \u2713 \u2713 \u2713 \u2717 Create \u2713 \u2713 \u2713 \u2717 Edit YAML \u2713 \u2713 \u2713 \u2717 Update \u2713 \u2713 \u2713 \u2717 Export ConfigMap \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 Secret View list \u2713 \u2713 \u2713 \u2717 View/Manage details \u2713 \u2713 \u2713 \u2717 Create by YAML \u2713 \u2713 \u2713 \u2717 Create \u2713 \u2713 \u2713 \u2717 Edit YAML \u2713 \u2713 \u2713 \u2717 Update \u2713 \u2713 \u2713 \u2717 Export secret \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 Namespace View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Create by YAML \u2713 \u2717 \u2717 \u2717 Create \u2713 \u2717 \u2717 \u2717 View YAML \u2713 \u2713 \u2713 \u2717 Modify label \u2713 \u2713 \u2717 \u2717 Unbind WS \u2717 \u2717 \u2717 \u2717 Bind WS \u2717 \u2717 \u2717 \u2717 Quotas \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 Cluster operation View list \u2713 \u2717 \u2717 \u2717 View YAML \u2713 \u2717 \u2717 \u2717 View logs \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 Helm operation Set preserved entries \u2713 \u2717 \u2717 \u2717 View YAML \u2713 \u2713 \u2717 \u2717 View logs \u2713 \u2713 \u2717 \u2717 Delete \u2713 \u2713 \u2717 \u2717 Cluster upgrade View details \u2713 \u2717 \u2717 \u2717 Upgrade \u2717 \u2717 \u2717 \u2717 Cluster settings Addon config \u2713 \u2717 \u2717 \u2717 Advanced config \u2713 \u2717 \u2717 \u2717 Namespace View list \u2713 \u2713 \u2713 \u2713 Create \u2713 \u2717 \u2717 \u2717 View/Manage details \u2713 \u2713 \u2713 \u2713 View YAML \u2713 \u2713 \u2713 \u2717 Modify label \u2713 \u2713 \u2717 \u2717 Bind WS \u2713 \u2717 \u2717 \u2717 Quotas \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 Workload Deployment View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Go to console \u2713 \u2713 \u2713 \u2717 Check monitor \u2713 \u2713 \u2713 \u2713 View logs \u2713 \u2713 \u2713 \u2713 Workload scaling \u2713 \u2713 \u2713 \u2717 Edit YAML \u2713 \u2713 \u2713 \u2717 Update \u2713 \u2713 \u2713 \u2717 Status - Pause Upgrade \u2713 \u2713 \u2713 \u2717 Status - Stop \u2713 \u2713 \u2713 \u2717 Status - restart \u2713 \u2713 \u2713 \u2717 Revert \u2713 \u2713 \u2713 \u2717 Modify label and annotation \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 StatefulSet View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Go to console \u2713 \u2713 \u2713 \u2717 Check monitor \u2713 \u2713 \u2713 \u2713 View logs \u2713 \u2713 \u2713 \u2713 Workload scaling \u2713 \u2713 \u2713 \u2717 Edit YAML \u2713 \u2713 \u2713 \u2717 Update \u2713 \u2713 \u2713 \u2717 Status - Stop \u2713 \u2713 \u2713 \u2717 Status - restart \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 DaemonSet View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Go to console \u2713 \u2713 \u2713 \u2717 Check monitor \u2713 \u2713 \u2713 \u2713 View logs \u2713 \u2713 \u2713 \u2713 Edit YAML \u2713 \u2713 \u2713 \u2717 Update \u2713 \u2713 \u2713 \u2717 Status - restart \u2713 \u2713 \u2713 \u2717 Delete \u2713 \u2713 \u2713 \u2717 Job View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Go to console \u2713 \u2713 \u2713 \u2717 View logs \u2713 \u2713 \u2713 \u2713 View YAML \u2713 \u2713 \u2713 \u2717 Restart \u2713 \u2713 \u2713 \u2717 View event \u2713 \u2713 \u2713 \u2713 Delete \u2713 \u2713 \u2713 \u2717 CronJob View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) View event \u2713 \u2713 \u2713 \u2713 Delete \u2713 \u2713 \u2713 \u2717 Pod View list \u2713 \u2713 \u2713 \u2713 View/Manage details \u2713 \u2713 \u2713 \u2713 (Only view) Go to console \u2713 \u2713 \u2713 \u2717 Check monitor \u2713 \u2713 \u2713 \u2713 View logs \u2713 \u2713 \u2713 \u2713 View YAML \u2713 \u2713 \u2713 \u2713 Upload file \u2713 \u2713 \u2713 \u2717 Download file \u2713 \u2713 \u2713 \u2717 View containers \u2713 \u2713 \u2713 \u2713 View event \u2713 \u2713 \u2713 \u2713 Delete \u2713 \u2713 \u2713 \u2717 Backup and Restore App backup View list \u2713 \u2717 \u2717 \u2717 View/Manage details \u2713 \u2717 \u2717 \u2717 Create backup schedule \u2713 \u2717 \u2717 \u2717 View YAML \u2713 \u2717 \u2717 \u2717 Update Schedule \u2713 \u2717 \u2717 \u2717 Pause \u2713 \u2717 \u2717 \u2717 Run now \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 Resume backup View list \u2713 \u2717 \u2717 \u2717 View/Manage details \u2713 \u2717 \u2717 \u2717 Resume backup \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 Backup point View list \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 Object storage View list \u2713 \u2717 \u2717 \u2717 etcd backup View backup policies \u2713 \u2717 \u2717 \u2717 Create backup policies \u2713 \u2717 \u2717 \u2717 View logs \u2713 \u2717 \u2717 \u2717 View YAML \u2713 \u2717 \u2717 \u2717 Update backup policy \u2713 \u2717 \u2717 \u2717 Stop/Start \u2713 \u2717 \u2717 \u2717 Run now \u2713 \u2717 \u2717 \u2717 View/Manage details \u2713 \u2717 \u2717 \u2717 Delete backup records \u2713 \u2717 \u2717 \u2717 View backup points \u2713 \u2717 \u2717 \u2717 Cluster inspection Cluster inspection View list \u2713 \u2717 \u2717 \u2717 View/Manage details \u2713 \u2717 \u2717 \u2717 Cluster inspection \u2713 \u2717 \u2717 \u2717 Settings \u2713 \u2717 \u2717 \u2717 Permissions Permissions View list \u2713 \u2717 \u2717 \u2717 Grant to cluster admin \u2713 \u2717 \u2717 \u2717 Delete \u2713 \u2717 \u2717 \u2717 NS permissions View list \u2713 \u2713 \u2717 \u2717 Grant to ns admin \u2713 \u2713 \u2717 \u2717 Grant to ns editor \u2713 \u2713 \u2717 \u2717 Grant to ns viewer \u2713 \u2713 \u2717 \u2717 Edit permissions \u2713 \u2713 \u2717 \u2717 Delete \u2713 \u2713 \u2717 \u2717 Security Compliance scanning View scanning report \u2713 \u2717 \u2717 \u2717 View scanning report details \u2713 \u2717 \u2717 \u2717 Download scanning report \u2713 \u2717 \u2717 \u2717 Delete scanning report \u2713 \u2717 \u2717 \u2717 View scanning policies \u2713 \u2717 \u2717 \u2717 Create scanning policy \u2713 \u2717 \u2717 \u2717 Delete scanning policy \u2713 \u2717 \u2717 \u2717 View scanning config list \u2713 \u2717 \u2717 \u2717 View scanning config details \u2713 \u2717 \u2717 \u2717 Delete scanning config \u2713 \u2717 \u2717 \u2717 Scan permission View scanning reports \u2713 \u2717 \u2717 \u2717 View scanning report details \u2713 \u2717 \u2717 \u2717 Delete scanning report \u2713 \u2717 \u2717 \u2717 View scanning policies \u2713 \u2717 \u2717 \u2717 Create scanning policy \u2713 \u2717 \u2717 \u2717 Delete scanning policy \u2713 \u2717 \u2717 \u2717 Scan vulnerability View scanning reports \u2713 \u2717 \u2717 \u2717 View scanning report detail \u2713 \u2717 \u2717 \u2717 Delete scanning report \u2713 \u2717 \u2717 \u2717 View scanning policies \u2713 \u2717 \u2717 \u2717 Create scanning policy \u2713 \u2717 \u2717 \u2717 Delete scanning policy \u2713 \u2717 \u2717 \u2717"},{"location":"en/admin/ghippo/personal-center/accesstoken.html","title":"Access key","text":"

                                                                          The access key can be used to access the openAPI and continuous delivery. Users can obtain the key and access the API by referring to the following steps in the personal center.

                                                                          "},{"location":"en/admin/ghippo/personal-center/accesstoken.html#get-key","title":"Get key","text":"

                                                                          Log in to AI platform, find Personal Center in the drop-down menu in the upper right corner, and you can manage the access key of the account on the Access Keys page.

                                                                          Info

                                                                          Access key is displayed only once. If you forget your access key, you will need to create a new key.

                                                                          "},{"location":"en/admin/ghippo/personal-center/accesstoken.html#use-the-key-to-access-api","title":"Use the key to access API","text":"

                                                                          When accessing AI platform openAPI, add the header Authorization:Bearer ${token} to the request to identify the visitor, where ${token} is the key obtained in the previous step. Request Example

                                                                          curl -X GET -H 'Authorization:Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRKVjlBTHRBLXZ4MmtQUC1TQnVGS0dCSWc1cnBfdkxiQVVqM2U3RVByWnMiLCJ0eXAiOiJKV1QifQ.eyJleHAiOjE2NjE0MTU5NjksImlhdCI6MTY2MDgxMTE2OSwiaXNzIjoiZ2hpcHBvLmlvIiwic3ViIjoiZjdjOGIxZjUtMTc2MS00NjYwLTg2MWQtOWI3MmI0MzJmNGViIiwicHJlZmVycmVkX3VzZXJuYW1lIjoiYWRtaW4iLCJncm91cHMiOltdfQ.RsUcrAYkQQ7C6BxMOrdD3qbBRUt0VVxynIGeq4wyIgye6R8Ma4cjxG5CbU1WyiHKpvIKJDJbeFQHro2euQyVde3ygA672ozkwLTnx3Tu-_mB1BubvWCBsDdUjIhCQfT39rk6EQozMjb-1X1sbLwzkfzKMls-oxkjagI_RFrYlTVPwT3Oaw-qOyulRSw7Dxd7jb0vINPq84vmlQIsI3UuTZSNO5BCgHpubcWwBss-Aon_DmYA-Et_-QtmPBA3k8E2hzDSzc7eqK0I68P25r9rwQ3DeKwD1dbRyndqWORRnz8TLEXSiCFXdZT2oiMrcJtO188Ph4eLGut1-4PzKhwgrQ' https://demo-dev.daocloud.io/apis/ghippo.io/v1alpha1/users?page=1&pageSize=10 -k\n

                                                                          Request result

                                                                          {\n    \"items\": [\n        {\n            \"id\": \"a7cfd010-ebbe-4601-987f-d098d9ef766e\",\n            \"name\": \"a\",\n            \"email\": \"\",\n            \"description\": \"\",\n            \"firstname\": \"\",\n            \"lastname\": \"\",\n            \"source\": \"locale\",\n            \"enabled\": true,\n            \"createdAt\": \"1660632794800\",\n            \"updatedAt\": \"0\",\n            \"lastLoginAt\": \"\"\n        }\n    ],\n    \"pagination\": {\n        \"page\": 1,\n        \"pageSize\": 10,\n        \"total\": 1\n    }\n}\n
                                                                          "},{"location":"en/admin/ghippo/personal-center/language.html","title":"language settings","text":"

                                                                          This section explains how to set the interface language. Currently supports Chinese, English two languages.

                                                                          Language setting is the portal for the platform to provide multilingual services. The platform is displayed in Chinese by default. Users can switch the platform language by selecting English or automatically detecting the browser language preference according to their needs. Each user's multilingual service is independent of each other, and switching will not affect other users.

                                                                          The platform provides three ways to switch languages: Chinese, English-English, and automatically detect your browser language preference.

                                                                          The operation steps are as follows.

                                                                          1. Log in to the AI platform with your username/password. Click Global Management at the bottom of the left navigation bar.

                                                                          2. Click the username in the upper right corner and select Personal Center .

                                                                          3. Click the Language Settings tab.

                                                                          4. Toggle the language option.

                                                                          "},{"location":"en/admin/ghippo/personal-center/security-setting.html","title":"Security Settings","text":"

                                                                          Function description: It is used to fill in the email address and modify the login password.

                                                                          • Email: After the administrator configures the email server address, the user can click the Forget Password button on the login page to fill in the email address there to retrieve the password.
                                                                          • Password: The password used to log in to the platform, it is recommended to change the password regularly.

                                                                          The specific operation steps are as follows:

                                                                          1. Click the username in the upper right corner and select Personal Center .

                                                                          2. Click the Security Settings tab. Fill in your email address or change the login password.

                                                                          "},{"location":"en/admin/ghippo/personal-center/ssh-key.html","title":"Configuring SSH Public Key","text":"

                                                                          This article explains how to configure SSH public key.

                                                                          "},{"location":"en/admin/ghippo/personal-center/ssh-key.html#step-1-view-existing-ssh-keys","title":"Step 1. View Existing SSH Keys","text":"

                                                                          Before generating a new SSH key, please check if you need to use an existing SSH key stored in the root directory of the local user. For Linux and Mac, use the following command to view existing public keys. Windows users can use the following command in WSL (requires Windows 10 or above) or Git Bash to view the generated public keys.

                                                                          • ED25519 Algorithm:

                                                                            cat ~/.ssh/id_ed25519.pub\n
                                                                          • RSA Algorithm:

                                                                            cat ~/.ssh/id_rsa.pub\n

                                                                          If a long string starting with ssh-ed25519 or ssh-rsa is returned, it means that a local public key already exists. You can skip Step 2 Generate SSH Key and proceed directly to Step 3.

                                                                          "},{"location":"en/admin/ghippo/personal-center/ssh-key.html#step-2-generate-ssh-key","title":"Step 2. Generate SSH Key","text":"

                                                                          If Step 1 does not return the specified content string, it means that there is no available SSH key locally and a new SSH key needs to be generated. Please follow these steps:

                                                                          1. Access the terminal (Windows users please use WSL or Git Bash), and run ssh-keygen -t.

                                                                          2. Enter the key algorithm type and an optional comment.

                                                                            The comment will appear in the .pub file and can generally use the email address as the comment content.

                                                                            • To generate a key pair based on the ED25519 algorithm, use the following command:

                                                                              ssh-keygen -t ed25519 -C \"<comment>\"\n
                                                                            • To generate a key pair based on the RSA algorithm, use the following command:

                                                                              ssh-keygen -t rsa -C \"<comment>\"\n
                                                                          3. Press Enter to choose the SSH key generation path.

                                                                            Taking the ED25519 algorithm as an example, the default path is as follows:

                                                                            Generating public/private ed25519 key pair.\nEnter file in which to save the key (/home/user/.ssh/id_ed25519):\n

                                                                            The default key generation path is /home/user/.ssh/id_ed25519, and the proper public key is /home/user/.ssh/id_ed25519.pub.

                                                                          4. Set a passphrase for the key.

                                                                            Enter passphrase (empty for no passphrase):\nEnter same passphrase again:\n

                                                                            The passphrase is empty by default, and you can choose to use a passphrase to protect the private key file. If you do not want to enter a passphrase every time you access the repository using the SSH protocol, you can enter an empty passphrase when creating the key.

                                                                          5. Press Enter to complete the key pair creation.

                                                                          "},{"location":"en/admin/ghippo/personal-center/ssh-key.html#step-3-copy-the-public-key","title":"Step 3. Copy the Public Key","text":"

                                                                          In addition to manually copying the generated public key information printed on the command line, you can use the following commands to copy the public key to the clipboard, depending on the operating system.

                                                                          • Windows (in WSL or Git Bash):

                                                                            cat ~/.ssh/id_ed25519.pub | clip\n
                                                                          • Mac:

                                                                            tr -d '\\n'< ~/.ssh/id_ed25519.pub | pbcopy\n
                                                                          • GNU/Linux (requires xclip):

                                                                            xclip -sel clip < ~/.ssh/id_ed25519.pub\n
                                                                          "},{"location":"en/admin/ghippo/personal-center/ssh-key.html#step-4-set-the-public-key-on-ai-platform-platform","title":"Step 4. Set the Public Key on AI platform Platform","text":"
                                                                          1. Log in to the AI platform UI page and select Profile -> SSH Public Key in the upper right corner of the page.

                                                                          2. Add the generated SSH public key information.

                                                                            1. SSH public key content.

                                                                            2. Public key title: Supports customizing the public key name for management differentiation.

                                                                            3. Expiration: Set the expiration period for the public key. After it expires, the public key will be automatically invalidated and cannot be used. If not set, it will be permanently valid.

                                                                          "},{"location":"en/admin/ghippo/platform-setting/about.html","title":"About","text":"

                                                                          The About page primarily showcases the latest versions of each module, highlights the open source software used, and expresses gratitude to the technical team via an animated video.

                                                                          Steps to view are as follows:

                                                                          1. Log in to AI platform as a user with Admin role. Click Global Management at the bottom of the left navigation bar.

                                                                          2. Click Settings , select About , and check the product version, open source software statement, and development teams.

                                                                            License Statement

                                                                            Technical Team

                                                                          "},{"location":"en/admin/ghippo/platform-setting/appearance.html","title":"Customize Appearance","text":"

                                                                          In AI platform, you have the option to customize the appearance of the login page, top navigation bar, bottom copyright and ICP registration to enhance your product recognition.

                                                                          "},{"location":"en/admin/ghippo/platform-setting/appearance.html#customizing-login-page-and-top-navigation-bar","title":"Customizing Login Page and Top Navigation Bar","text":"
                                                                          1. To get started, log in to AI platform as a user with the admin role and navigate to Global Management -> Settings found at the bottom of the left navigation bar.

                                                                          2. Select Appearance . On the Custom your login page tab, modify the icon and text of the login page as needed, then click Save .

                                                                          3. Log out and refresh the login page to see the configured effect.

                                                                          4. On the Advanced customization tab, you can modify login page, navigation bar, copyright, and ICP registration with css.

                                                                          Note

                                                                          If you wish to restore the default settings, simply click Revert . This action will discard all customized settings.

                                                                          "},{"location":"en/admin/ghippo/platform-setting/appearance.html#advanced-customization","title":"Advanced Customization","text":"

                                                                          Advanced customization allows you to modify the color, font spacing, and font size of the entire container platform using CSS styles. Please note that familiarity with CSS syntax is required.

                                                                          To reset any advanced customizations, delete the contents of the black input box or click the Revert button.

                                                                          Sample CSS for Login Page Customization:

                                                                          .test {\n  width: 12px;\n}\n\n#kc-login {\n /* color: red!important; */\n}\n

                                                                          CSS sample for page customization after login:

                                                                          .dao-icon.dao-iconfont.icon-service-global.dao-nav__head-icon {\n   color: red!important;\n}\n.ghippo-header-logo {\n  background-color: green!important;\n}\n.ghippo-header {\n  background-color: rgb(128, 115, 0)!important;\n}\n.ghippo-header-nav-main {\n  background-color: rgb(0, 19, 128)!important;\n}\n.ghippo-header-sub-nav-main .dao-popper-inner {\n  background-color: rgb(231, 82, 13) !important;\n}\n

                                                                          CSS sample for custom footer (including copyright, filing, and other information at the bottom)

                                                                          <div class=\"footer-content\">\n  <span class=\"footer-item\">Copyright \u00a9 2024 Suanova</span>\n  <a class=\"footer-item\" href=\"https://beian.miit.gov.cn/\" target=\"_blank\" rel=\"noopener noreferrer\">\u6caa ICP \u5907 14048409 \u53f7 - 1</a>\n  <a class=\"footer-item\" href=\"https://beian.miit.gov.cn/\" target=\"_blank\" rel=\"noopener noreferrer\">\u6caa ICP \u5907 14048409 \u53f7 - 2</a>\n</div>\n<div class=\"footer-content\">\n  <img class=\"gongan-icon\" src=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABIAAAASCAYAAABWzo5XAAAACXBIWXMAAAsTAAALEwEAmpwYAAAKTWlDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVN3WJP3Fj7f92UPVkLY8LGXbIEAIiOsCMgQWaIQkgBhhBASQMWFiApWFBURnEhVxILVCkidiOKgKLhnQYqIWotVXDjuH9yntX167+3t+9f7vOec5/zOec8PgBESJpHmomoAOVKFPDrYH49PSMTJvYACFUjgBCAQ5svCZwXFAADwA3l4fnSwP/wBr28AAgBw1S4kEsfh/4O6UCZXACCRAOAiEucLAZBSAMguVMgUAMgYALBTs2QKAJQAAGx5fEIiAKoNAOz0ST4FANipk9wXANiiHKkIAI0BAJkoRyQCQLsAYFWBUiwCwMIAoKxAIi4EwK4BgFm2MkcCgL0FAHaOWJAPQGAAgJlCLMwAIDgCAEMeE80DIEwDoDDSv+CpX3CFuEgBAMDLlc2XS9IzFLiV0Bp38vDg4iHiwmyxQmEXKRBmCeQinJebIxNI5wNMzgwAABr50cH+OD+Q5+bk4eZm52zv9MWi/mvwbyI+IfHf/ryMAgQAEE7P79pf5eXWA3DHAbB1v2upWwDaVgBo3/ldM9sJoFoK0Hr5i3k4/EAenqFQyDwdHAoLC+0lYqG9MOOLPv8z4W/gi372/EAe/tt68ABxmkCZrcCjg/1xYW52rlKO58sEQjFu9+cj/seFf/2OKdHiNLFcLBWK8ViJuFAiTcd5uVKRRCHJleIS6X8y8R+W/QmTdw0ArIZPwE62B7XLbMB+7gECiw5Y0nYAQH7zLYwaC5EAEGc0Mnn3AACTv/mPQCsBAM2XpOMAALzoGFyolBdMxggAAESggSqwQQcMwRSswA6cwR28wBcCYQZEQAwkwDwQQgbkgBwKoRiWQRlUwDrYBLWwAxqgEZrhELTBMTgN5+ASXIHrcBcGYBiewhi8hgkEQcgIE2EhOogRYo7YIs4IF5mOBCJhSDSSgKQg6YgUUSLFyHKkAqlCapFdSCPyLXIUOY1cQPqQ28ggMor8irxHMZSBslED1AJ1QLmoHxqKxqBz0XQ0D12AlqJr0Rq0Hj2AtqKn0UvodXQAfYqOY4DRMQ5mjNlhXIyHRWCJWBomxxZj5Vg1Vo81Yx1YN3YVG8CeYe8IJAKLgBPsCF6EEMJsgpCQR1hMWEOoJewjtBK6CFcJg4Qxwicik6hPtCV6EvnEeGI6sZBYRqwm7iEeIZ4lXicOE1+TSCQOyZLkTgohJZAySQtJa0jbSC2kU6Q+0hBpnEwm65Btyd7kCLKArCCXkbeQD5BPkvvJw+S3FDrFiOJMCaIkUqSUEko1ZT/lBKWfMkKZoKpRzame1AiqiDqfWkltoHZQL1OHqRM0dZolzZsWQ8ukLaPV0JppZ2n3aC/pdLoJ3YMeRZfQl9Jr6Afp5+mD9HcMDYYNg8dIYigZaxl7GacYtxkvmUymBdOXmchUMNcyG5lnmA+Yb1VYKvYqfBWRyhKVOpVWlX6V56pUVXNVP9V5qgtUq1UPq15WfaZGVbNQ46kJ1Bar1akdVbupNq7OUndSj1DPUV+jvl/9gvpjDbKGhUaghkijVGO3xhmNIRbGMmXxWELWclYD6yxrmE1iW7L57Ex2Bfsbdi97TFNDc6pmrGaRZp3mcc0BDsax4PA52ZxKziHODc57LQMtPy2x1mqtZq1+rTfaetq+2mLtcu0W7eva73VwnUCdLJ31Om0693UJuja6UbqFutt1z+o+02PreekJ9cr1Dund0Uf1bfSj9Rfq79bv0R83MDQINpAZbDE4Y/DMkGPoa5hpuNHwhOGoEctoupHEaKPRSaMnuCbuh2fjNXgXPmasbxxirDTeZdxrPGFiaTLbpMSkxeS+Kc2Ua5pmutG003TMzMgs3KzYrMnsjjnVnGueYb7ZvNv8jYWlRZzFSos2i8eW2pZ8ywWWTZb3rJhWPlZ5VvVW16xJ1lzrLOtt1ldsUBtXmwybOpvLtqitm63Edptt3xTiFI8p0in1U27aMez87ArsmuwG7Tn2YfYl9m32zx3MHBId1jt0O3xydHXMdmxwvOuk4TTDqcSpw+lXZxtnoXOd8zUXpkuQyxKXdpcXU22niqdun3rLleUa7rrStdP1o5u7m9yt2W3U3cw9xX2r+00umxvJXcM970H08PdY4nHM452nm6fC85DnL152Xlle+70eT7OcJp7WMG3I28Rb4L3Le2A6Pj1l+s7pAz7GPgKfep+Hvqa+It89viN+1n6Zfgf8nvs7+sv9j/i/4XnyFvFOBWABwQHlAb2BGoGzA2sDHwSZBKUHNQWNBbsGLww+FUIMCQ1ZH3KTb8AX8hv5YzPcZyya0RXKCJ0VWhv6MMwmTB7WEY6GzwjfEH5vpvlM6cy2CIjgR2yIuB9pGZkX+X0UKSoyqi7qUbRTdHF09yzWrORZ+2e9jvGPqYy5O9tqtnJ2Z6xqbFJsY+ybuIC4qriBeIf4RfGXEnQTJAntieTE2MQ9ieNzAudsmjOc5JpUlnRjruXcorkX5unOy553PFk1WZB8OIWYEpeyP+WDIEJQLxhP5aduTR0T8oSbhU9FvqKNolGxt7hKPJLmnVaV9jjdO31D+miGT0Z1xjMJT1IreZEZkrkj801WRNberM/ZcdktOZSclJyjUg1plrQr1zC3KLdPZisrkw3keeZtyhuTh8r35CP5c/PbFWyFTNGjtFKuUA4WTC+oK3hbGFt4uEi9SFrUM99m/ur5IwuCFny9kLBQuLCz2Lh4WfHgIr9FuxYji1MXdy4xXVK6ZHhp8NJ9y2jLspb9UOJYUlXyannc8o5Sg9KlpUMrglc0lamUycturvRauWMVYZVkVe9ql9VbVn8qF5VfrHCsqK74sEa45uJXTl/VfPV5bdra3kq3yu3rSOuk626s91m/r0q9akHV0IbwDa0b8Y3lG19tSt50oXpq9Y7NtM3KzQM1YTXtW8y2rNvyoTaj9nqdf13LVv2tq7e+2Sba1r/dd3vzDoMdFTve75TsvLUreFdrvUV99W7S7oLdjxpiG7q/5n7duEd3T8Wej3ulewf2Re/ranRvbNyvv7+yCW1SNo0eSDpw5ZuAb9qb7Zp3tXBaKg7CQeXBJ9+mfHvjUOihzsPcw83fmX+39QjrSHkr0jq/dawto22gPaG97+iMo50dXh1Hvrf/fu8x42N1xzWPV56gnSg98fnkgpPjp2Snnp1OPz3Umdx590z8mWtdUV29Z0PPnj8XdO5Mt1/3yfPe549d8Lxw9CL3Ytslt0utPa49R35w/eFIr1tv62X3y+1XPK509E3rO9Hv03/6asDVc9f41y5dn3m978bsG7duJt0cuCW69fh29u0XdwruTNxdeo94r/y+2v3qB/oP6n+0/rFlwG3g+GDAYM/DWQ/vDgmHnv6U/9OH4dJHzEfVI0YjjY+dHx8bDRq98mTOk+GnsqcTz8p+Vv9563Or59/94vtLz1j82PAL+YvPv655qfNy76uprzrHI8cfvM55PfGm/K3O233vuO+638e9H5ko/ED+UPPR+mPHp9BP9z7nfP78L/eE8/sl0p8zAAAAIGNIUk0AAHolAACAgwAA+f8AAIDpAAB1MAAA6mAAADqYAAAXb5JfxUYAAAQjSURBVHjaVNNZbFRlGIDh95w525zpdGa6TVtbykBbyiICxQY0AhYTJUCiiYqGqEEiJhKQmBg0ESPeeCGRENEYb4jhBr0gNQrRlCBiSgyLaSlSaKEs3Wemy+xnzuqFYdD/6rt6ku/N9wue55EcPwWArCgIgkx5ZRuYVxsnJ801Z05f3jY1MRnb/HxHV+uSph9RKq4mhkdwbZVgdQ2SHkPTwgj/h1QUWWi8/tfg/hM/XN/Y2zfaZnkSnuRDtLMsXhBOvrJtya/LlrcdMs1Qb1lVRQmSAEDAsU1kxpgamXp3y+azu1esreK9dyRqs9PIjkW6OsLx7lTV1ld/237s8HRV57MbnvO8CA+e9GCQFTk6Mza+4/0P+t9a9VSEI3uyTH/eR27aB2Ed31Q/Hx1sI6BHOPT13c5Frd0HW9p3HPUQEwAigJW9RDp+bstrOy981nVGLN/7RpHUV70YfXnEAtjxFPasxPDBQXatjzNTdOQXtg983H/51AFFy1KCIg2bNIdC+8270NwmUmelsXqSqHkDK5PDl8iCW0QcnEW+lqCjvcjQuMZ4YnQRTkotQUZu4GkjcfZNv19G011kXw4vayNYNvqCCvSVTciOgABgeuhBGwhgz5zbkI2ff7HUqJiNR2QktbbSYnBYYqbMT/ilKI4SIbT/GcRylbnvLmJ2X8N7tJ7rR8OE/BbliqEYea81WIotmOs02WFpc55Lf0f5/mSI3dsamOgxSX7ZjaALuBmB6M6FnB+S+POCwmOLk1QFFAqZyQWl1YrpiRZJLvDkygyC5NJ1XCax7xYNiTQVEYVIuUulayIcGeLkpw6WK7GuPY/fb2CkhleXIFFe8XPGaKBj9QxLW1Ik0bg8EuT2zRCJYZvZIYepe0EGbvi4bQUJVZhs2phADFYj+df0lBqJUnaekS4SUHXe3jrOnoE2PhSewHfRpfZGgcryIvfHdQruQlLo7Ns6QizqkJ31CIUlqwQJXuWUpDXj6qOsW32HT3YNImll9FwJsb4jyaLmWQ4fa6a+2sQw0ry8YZSiHcPxxXBtMfCv4XkUCrfliWs/fTE31rtTVfv9vsIorvQIniMhqXM4popVcJFVMHMpfMEaLPdxR1Tnna1b1vl6tGntpAjgCTNWONZyIFBR8Ydtr6EgrCI3VySfzZPLBDHyIq5gkpmzcOUmTGMF+bh7M9LYulfWzMmHBzk7Fpq9deWEYxjrtaCMXjWfstp6BCGNXZzBdYqYhogWqkMum4+oBVD0YnP63u/fFqbv1D+M7VSlBbmmK5uYaLYLYwslfwFVAyXQiOfcx3XyyGIM8DDn0lgWyGokHogu/0UJxpL/+f2e569s/CZQZ53OpzJr0+NXludUfb5jVdf7VUGXJUPIZast1S9PeII6jFDT5xMjFwO1S4c8zwTgnwEAxufYSzA67PMAAAAASUVORK5CYII=\" >\n  <a class=\"footer-item\" href=\"http://www.beian.gov.cn/portal/registerSystemInfo\">\u6caa\u516c\u7f51\u5b89\u5907 12345678912345\u53f7</a>\n</div>\n<style>\n.footer-content {\n  display: flex;\n  flex-wrap: wrap;\n  align-items: center;\n  justify-content: center;\n}\n.footer-content + .footer-content {\n  margin-top: 8px;\n}\n.login-pf .footer-item {\n  color: white;\n}\n.footer-item {\n  color: var(--dao-gray-010);\n  text-decoration: none;\n}\n.footer-item + .footer-item {\n  margin-left: 8px;\n}\n.gongan-icon {\n  width: 18px;\n  height: 18px;\n  margin-right: 4px;\n}\n</style>\n
                                                                          "},{"location":"en/admin/ghippo/platform-setting/mail-server.html","title":"Mail Server","text":"

                                                                          AI platform will send an e-mail to the user to verify the e-mail address if the user forgets the password to ensure that the user is acting in person. In order for AI platform to be able to send email, you need to provide your mail server address first.

                                                                          The specific operation steps are as follows:

                                                                          1. Log in to AI platform as a user with admin role. Click Global Management at the bottom of the left navigation bar.

                                                                          2. Click Settings , select Mail Server Settings .

                                                                            Complete the following fields to configure the mail server:

                                                                            Field Description Example SMTP server address SMTP server address that can provide mail service smtp.163.com SMTP server port Port for sending mail 25 Username Name of the SMTP user test@163.com Password Password for the SMTP account 123456 Sender's email address Sender's email address test@163.com Use SSL secure connection SSL can be used to encrypt emails, thereby improving the security of information transmitted via emails, usually need to configure a certificate for the mail server Disable
                                                                          3. After the configuration is complete, click Save , and click Test Mail Server .

                                                                          4. A message indicating that the mail has been successfully sent appears in the upper right corner of the screen, indicating that the mail server has been successfully set up.

                                                                          "},{"location":"en/admin/ghippo/platform-setting/mail-server.html#common-problem","title":"Common problem","text":"

                                                                          Q: What is the reason why the user still cannot retrieve the password after the mail server is set up?

                                                                          Answer: The user may not have an email address or set a wrong email address; at this time, users with the admin role can find the user by username in Global Management -> Access Control , and set it as The user sets a new login password.

                                                                          If the mail server is not connected, please check whether the mail server address, username and password are correct.

                                                                          "},{"location":"en/admin/ghippo/platform-setting/security.html","title":"Security Policy","text":"

                                                                          AI platform offers robust security measures, including password policies and access control for the graphical interface.

                                                                          "},{"location":"en/admin/ghippo/platform-setting/security.html#password-policy","title":"Password Policy","text":"
                                                                          • New passwords must differ from the most recent historical password.
                                                                          • Users are required to change their passwords upon expiration.
                                                                          • Passwords must not match the username.
                                                                          • Passwords cannot be the same as the user's email address.
                                                                          • Customizable password rules.
                                                                          • Customizable minimum password length.
                                                                          "},{"location":"en/admin/ghippo/platform-setting/security.html#access-control-policy","title":"Access Control Policy","text":"
                                                                          • Session Timeout Policy: Users will be automatically logged out after a period of inactivity lasting x hours.
                                                                          • Account Lockout Policy: Accounts will be locked after multiple failed login attempts within a specified time frame.
                                                                          • Login/Logout Policy: Users will be logged out when closing the browser.

                                                                          To configure the password and access control policies, navigate to global management, then click Settings -> Security Policy in the left navigation bar.

                                                                          "},{"location":"en/admin/ghippo/report-billing/index.html","title":"Operation Management","text":"

                                                                          Operation Management provides a visual representation of the total usage and utilization rates of CPU, memory, storage and GPU across various dimensions such as cluster, node, namespace, pod, and workspace within a specified time range on the platform. It also automatically calculates platform consumption information based on usage, usage time, and unit price. By default, the module enables all report statistics, but platform administrators can manually enable or disable individual reports. After enabling or disabling, the platform will start or stop collecting report data within a maximum of 20 minutes. Previously collected data will still be displayed normally. Operation Management data can be retained on the platform for up to 365 days. Statistical data exceeding this retention period will be automatically deleted. You can also download reports in CSV or Excel format for further statistics and analysis.

                                                                          Operation Management is available only for the Standard Edition and above. It is not supported in the Community Edition.

                                                                          You need to install or upgrade the Operations Management module first, and then you can experience report management and billing metering.

                                                                          "},{"location":"en/admin/ghippo/report-billing/index.html#report-management","title":"Report Management","text":"

                                                                          Report Management provides data statistics for cluster, node, pods, workspace, and namespace across five dimensions: CPU Utilization, Memory Utilization, Storage Utilization, GPU Utilization, and GPU Memory Utilization. It also integrates with the audit and alert modules to support the statistical management of audit and alert data, supporting a total of seven types of reports.

                                                                          "},{"location":"en/admin/ghippo/report-billing/index.html#accounting-billing","title":"Accounting & Billing","text":"

                                                                          Accounting & Billing provides billing statistics for clusters, nodes, pods, namespaces, and workspaces on the platform. It calculates the consumption for each resource during the statistical period based on the usage of CPU, memory, storage and GPU, as well as user-configured prices and currency units. Depending on the selected time span, such as monthly, quarterly, or annually, it can quickly calculate the actual consumption for that period.

                                                                          "},{"location":"en/admin/ghippo/report-billing/billing.html","title":"Accounting & Billing","text":"

                                                                          Accounting and billing further process the usage data of resources based on reports. You can manually set the unit price and currency unit for CPU, memory, GPU and storage. After setting, the system will automatically calculate the expenses of clusters, nodes, pods, namespaces, and workspaces over a period. You can adjust the period freely and export billing reports in Excel or Csv format after filtering by week, month, quarter, or year.

                                                                          "},{"location":"en/admin/ghippo/report-billing/billing.html#billing-rules-and-effective-time","title":"Billing Rules and Effective Time","text":"
                                                                          • Billing Rules: Default billing is based on the maximum value of request and usage.
                                                                          • Effective Time: Effective the next day, the fees incurred on that day are calculated based on the unit price and quantity obtained at midnight the next day.
                                                                          "},{"location":"en/admin/ghippo/report-billing/billing.html#features","title":"Features","text":"
                                                                          • Support customizing the billing unit for CPU, memory, storage and GPU, as well as the currency unit.
                                                                          • Support custom querying of billing data within a year, automatically calculating the billing situation for the selected time period.
                                                                          • Support exporting billing reports in CSV and Excel formats.
                                                                          • Support enabling/disabling individual billing reports. After enabling/disabling, the platform will start/stop collecting data within 20 minutes, and past collected data will still be displayed normally.
                                                                          • Support selective display of billing data for CPU, total memory, storage, GPU and total.
                                                                          "},{"location":"en/admin/ghippo/report-billing/billing.html#report-dimensions","title":"Report Dimensions","text":"

                                                                          Currently, the following reports are supported:

                                                                          • Cluster Billing Report: Displays the CPU billing, memory billing, storage billing, GPU billing and overall billing situation for all clusters within a certain period, as well as the number of nodes in that cluster. By clicking the number of nodes, you can quickly enter the node billing report and view the billing situation of nodes in that cluster during that time period.
                                                                          • Node Billing Report: Displays the CPU billing, memory billing, storage billing, GPU billing and overall billing situation for all nodes within a certain period, as well as the IP, type, and belonging cluster of nodes.
                                                                          • Pod Report: Displays the CPU billing, memory billing, storage billing, GPU billing and overall billing situation for all pods within a certain period, as well as the namespace, cluster, and workspace to which the pod belongs.
                                                                          • Workspace Billing Report: Displays the CPU billing, memory billing, storage billing, GPU billing and overall billing situation for all workspaces within a certain period, as well as the number of namespaces and pods. By clicking the number of namespaces, you can quickly enter the namespace billing report and view the billing situation of namespaces in that workspace during that time period; the same method can be used to view the billing situation of pods in that workspace during that time period.
                                                                          • Namespace Billing Report: Displays the CPU billing, memory billing, storage billing, GPU billing and overall billing situation for all namespaces within a certain period, as well as the number of pods, the belonging cluster, and workspace. By clicking the number of pods, you can quickly enter the pod billing report and view the billing situation of pods in that namespace during that time period.
                                                                          "},{"location":"en/admin/ghippo/report-billing/billing.html#operating-steps","title":"Operating Steps","text":"
                                                                          1. Log in to AI platform as a user with the admin role. Click Global Management -> Operations Management at the bottom of the left navigation bar.

                                                                          2. After entering the Operations Management , switch to different menus to view billing reports for clusters, nodes, and pods.

                                                                          "},{"location":"en/admin/ghippo/report-billing/report.html","title":"Report Management","text":"

                                                                          Report management visually displays statistical data across clusters, nodes, pods, workspaces, namespaces, audits, and alarms. This data provides a reliable foundation for platform billing and utilization optimization.

                                                                          "},{"location":"en/admin/ghippo/report-billing/report.html#features","title":"Features","text":"
                                                                          • Supports custom queries for statistical data within a year
                                                                          • Allows exporting reports in CSV and Excel formats
                                                                          • Supports enabling/disabling individual reports; once toggled, the platform will start/stop data collection within 20 minutes, but previously collected data will still be displayed.
                                                                          • Displays maximum, minimum, and average values for CPU utilization, memory utilization, storage utilization, and GPU memory utilization
                                                                          "},{"location":"en/admin/ghippo/report-billing/report.html#report-dimensions","title":"Report Dimensions","text":"

                                                                          Currently, the following reports are supported:

                                                                          • Cluster Report: Displays the maximum, minimum, and average values of CPU utilization, memory utilization, storage utilization, and GPU memory utilization for all clusters during a specific time period, as well as the number of nodes under the cluster. You can quickly access the node report by clicking on the node count and view the utilization of nodes under the cluster during that period.
                                                                          • Node Report: Displays the maximum, minimum, and average values of CPU utilization, memory utilization, storage utilization, and GPU memory utilization for all nodes during a specific time period, along with the node's IP, type, and affiliated cluster.
                                                                          • Pod Report: Shows the maximum, minimum, and average values of CPU utilization, memory utilization, storage utilization, and GPU memory utilization for all pods during a specific time period, as well as the pod's namespace, affiliated cluster, and workspace.
                                                                          • Workspace Report: Displays the maximum, minimum, and average values of CPU utilization, memory utilization, storage utilization, and GPU memory utilization for all workspaces during a specific time period, along with the number of namespaces and pods. You can quickly access the namespace report by clicking on the namespace count and view the utilization of namespaces under the workspace during that period; similarly, you can view the utilization of pods under the workspace.
                                                                          • Namespace Report: Displays the maximum, minimum, and average values of CPU utilization, memory utilization, storage utilization, and GPU memory utilization for all namespaces during a specific time period, as well as the number of pods, affiliated clusters, and workspaces. You can quickly access the pod report by clicking on the pod count and view the utilization of pods within the namespace during that period.
                                                                          • Audit Report: Divided into user actions and resource operations. The user action report mainly counts the number of operations by a single user during a period, including successful and failed attempts; The resource operation report mainly counts the number of operations on a type of resource by all users.
                                                                          • Alarm Report: Displays the number of alarms for all nodes during a specific period, including the occurrences of fatal, severe, and warning alarms.
                                                                          "},{"location":"en/admin/ghippo/report-billing/report.html#steps","title":"Steps","text":"
                                                                          1. Log in to AI platform as a user with the Admin role. Click Global Management -> Operations Management at the bottom of the left sidebar.

                                                                          2. After entering Operations Management, switch between different menus to view reports on clusters, nodes, and pods.

                                                                          "},{"location":"en/admin/ghippo/troubleshooting/ghippo01.html","title":"Unable to start istio-ingressgateway when restarting the cluster (virtual machine)?","text":"

                                                                          The error message is as shown in the following image:

                                                                          Possible cause: The jwtsUri address of the RequestAuthentication CR cannot be accessed, causing istiod to be unable to push the configuration to istio-ingressgateway (This bug can be avoided in Istio 1.15: https://github.com/istio/istio/pull/39341/).

                                                                          Solution:

                                                                          1. Backup the RequestAuthentication ghippo CR.

                                                                            kubectl get RequestAuthentication ghippo -n istio-system -o yaml > ghippo-ra.yaml\n
                                                                          2. Delete the RequestAuthentication ghippo CR.

                                                                            kubectl delete RequestAuthentication ghippo -n istio-system\n
                                                                          3. Restart Istio.

                                                                            kubectl rollout restart deploy/istiod -n istio-system\nkubectl rollout restart deploy/istio-ingressgateway -n istio-system\n
                                                                          4. Reapply the RequestAuthentication ghippo CR.

                                                                            kubectl apply -f ghippo-ra.yaml\n

                                                                            Note

                                                                            Before applying the RequestAuthentication ghippo CR, make sure that ghippo-apiserver and ghippo-keycloak are started correctly.

                                                                          "},{"location":"en/admin/ghippo/troubleshooting/ghippo02.html","title":"Login loop with error 401 or 403","text":"

                                                                          This issue occurs when the MySQL database connected to ghippo-keycloak encounters a failure, causing the OIDC Public keys to be reset.

                                                                          For Global Management version 0.11.1 and above, you can follow these steps to restore normal operation by updating the Global Management configuration file using helm .

                                                                          # Update helm repository\nhelm repo update ghippo\n\n# Backup ghippo parameters\nhelm get values ghippo -n ghippo-system -o yaml > ghippo-values-bak.yaml\n\n# Get the current deployed ghippo version\nversion=$(helm get notes ghippo -n ghippo-system | grep \"Chart Version\" | awk -F ': ' '{ print $2 }')\n\n# Perform the update operation to make the configuration file take effect\nhelm upgrade ghippo ghippo/ghippo \\\n-n ghippo-system \\\n-f ./ghippo-values-bak.yaml \\\n--version ${version}\n
                                                                          "},{"location":"en/admin/ghippo/troubleshooting/ghippo03.html","title":"Keycloak Unable to Start","text":""},{"location":"en/admin/ghippo/troubleshooting/ghippo03.html#common-issues","title":"Common Issues","text":""},{"location":"en/admin/ghippo/troubleshooting/ghippo03.html#symptoms","title":"Symptoms","text":"

                                                                          MySQL is ready with no errors. After installing the global management, Keycloak fails to start (more than 10 times).

                                                                          "},{"location":"en/admin/ghippo/troubleshooting/ghippo03.html#checklist","title":"Checklist","text":"
                                                                          • If the database is MySQL, check if the Keycloak database encoding is UTF8.
                                                                          • Check the network connection from Keycloak to the database, ensure the database resources are sufficient, including but not limited to resource limits, storage space, and physical machine resources.
                                                                          "},{"location":"en/admin/ghippo/troubleshooting/ghippo03.html#troubleshooting-steps","title":"Troubleshooting Steps","text":"
                                                                          1. Check if MySQL resource usage has reached the limit
                                                                          2. Check if the number of tables in the MySQL database keycloak is 95. (The number of tables may vary across different versions of Keycloak, so you can compare it with the number of tables in the Keycloak database of the same version in development or testing environments). If the number is fewer, it indicates that there may be an issue with the database table initialization (The command to check the number of tables is: show tables;).
                                                                          3. Delete and recreate the Keycloak database with the command CREATE DATABASE IF NOT EXISTS keycloak CHARACTER SET utf8
                                                                          4. Restart the Keycloak Pod to resolve the issue
                                                                          "},{"location":"en/admin/ghippo/troubleshooting/ghippo03.html#symptoms_1","title":"Symptoms","text":"

                                                                          Keycloak cannot start normally, the Keycloak pod is in the CrashLoopBackOff state, and the Keycloak log shows:

                                                                          "},{"location":"en/admin/ghippo/troubleshooting/ghippo03.html#checklist_1","title":"Checklist","text":"

                                                                          Run the following script to check the supported CPU types:

                                                                          cat <<\"EOF\" > detect-cpu.sh\n#!/bin/sh -eu\n\nflags=$(cat /proc/cpuinfo | grep flags | head -n 1 | cut -d: -f2)\n\nsupports_v2='awk \"/cx16/&&/lahf/&&/popcnt/&&/sse4_1/&&/sse4_2/&&/ssse3/ {found=1} END {exit !found}\"'\nsupports_v3='awk \"/avx/&&/avx2/&&/bmi1/&&/bmi2/&&/f16c/&&/fma/&&/abm/&&/movbe/&&/xsave/ {found=1} END {exit !found}\"'\nsupports_v4='awk \"/avx512f/&&/avx512bw/&&/avx512cd/&&/avx512dq/&&/avx512vl/ {found=1} END {exit !found}\"'\n\necho \"$flags\" | eval $supports_v2 || exit 2 && echo \"CPU supports x86-64-v2\"\necho \"$flags\" | eval $supports_v3 || exit 3 && echo \"CPU supports x86-64-v3\"\necho \"$flags\" | eval $supports_v4 || exit 4 && echo \"CPU supports x86-64-v4\"\nEOF\n\nchmod +x detect-cpu.sh\nsh detect-cpu.sh\n

                                                                          Execute the command below to check the current CPU features. If the output contains sse4_2, it indicates that your processor supports SSE 4.2.

                                                                          lscpu | grep sse4_2\n
                                                                          "},{"location":"en/admin/ghippo/troubleshooting/ghippo03.html#solution","title":"Solution","text":"

                                                                          You need to upgrade your virtual machine or physical machine CPU to support x86-64-v2 and above, ensuring that the x86 CPU instruction set supports SSE4.2. For details on how to upgrade, you should consult your virtual machine platform provider or your physical machine provider.

                                                                          For more information, see: https://github.com/keycloak/keycloak/issues/17290

                                                                          "},{"location":"en/admin/ghippo/troubleshooting/ghippo04.html","title":"Failure to Upgrade Global Management Separately","text":"

                                                                          If the upgrade fails and includes the following message, you can refer to the Offline Upgrade section to complete the installation of CRDs by following the steps for updating the ghippo crd.

                                                                          ensure CRDs are installed first\n
                                                                          "},{"location":"en/admin/ghippo/workspace/folder-permission.html","title":"Description of folder permissions","text":"

                                                                          Folders have permission mapping capabilities, which can map the permissions of users/groups in this folder to subfolders, workspaces and resources under it.

                                                                          If the user/group is Folder Admin role in this folder, it is still Folder Admin role when mapped to a subfolder, and Workspace Admin is mapped to the workspace under it; If a Namespace is bound in Workspace and Folder -> Resource Group , the user/group is also a Namespace Admin after mapping.

                                                                          Note

                                                                          The permission mapping capability of folders will not be applied to shared resources, because sharing is to share the use permissions of the cluster to multiple workspaces, rather than assigning management permissions to workspaces, so permission inheritance and role mapping will not be implemented.

                                                                          "},{"location":"en/admin/ghippo/workspace/folder-permission.html#use-cases","title":"Use cases","text":"

                                                                          Folders have hierarchical capabilities, so when folders are mapped to departments/suppliers/projects in the enterprise,

                                                                          • If a user/group has administrative authority (Admin) in the first-level department, the second-level, third-level, and fourth-level departments or projects under it also have administrative authority;
                                                                          • If a user/group has access rights (Editor) in the first-level department, the second-, third-, and fourth-level departments or projects under it also have access rights;
                                                                          • If a user/group has read-only permission (Viewer) in the first-level department, the second-level, third-level, and fourth-level departments or projects under it also have read-only permission.
                                                                          Objects Actions Folder Admin Folder Editor Folder Viewer on the folder itself view \u2713 \u2713 \u2713 Authorization \u2713 \u2717 \u2717 Modify Alias \u200b\u200b \u2713 \u2717 \u2717 To Subfolder Create \u2713 \u2717 \u2717 View \u2713 \u2713 \u2713 Authorization \u2713 \u2717 \u2717 Modify Alias \u200b\u200b \u2713 \u2717 \u2717 workspace under it create \u2713 \u2717 \u2717 View \u2713 \u2713 \u2713 Authorization \u2713 \u2717 \u2717 Modify Alias \u200b\u200b \u2713 \u2717 \u2717 Workspace under it - Resource Group View \u2713 \u2713 \u2713 resource binding \u2713 \u2717 \u2717 unbind \u2713 \u2717 \u2717 Workspaces under it - Shared Resources View \u2713 \u2713 \u2713 New share \u2713 \u2717 \u2717 Unshare \u2713 \u2717 \u2717 Resource Quota \u2713 \u2717 \u2717"},{"location":"en/admin/ghippo/workspace/folders.html","title":"Create/Delete Folders","text":"

                                                                          Folders have the capability to map permissions, allowing users/user groups to have their permissions in the folder mapped to its sub-folders, workspaces, and resources.

                                                                          Follow the steps below to create a folder:

                                                                          1. Log in to AI platform with a user account having the admin/folder admin role. Click Global Management -> Workspace and Folder at the bottom of the left navigation bar.

                                                                          2. Click the Create Folder button in the top right corner.

                                                                          3. Fill in the folder name, parent folder, and other information, then click OK to complete creating the folder.

                                                                          Tip

                                                                          After successful creation, the folder name will be displayed in the left tree structure, represented by different icons for workspaces and folders.

                                                                          Note

                                                                          To edit or delete a specific folder, select it and Click \u2507 on the right side.

                                                                          • If there are resources bound to the resource group or shared resources within the folder, the folder cannot be deleted. All resources need to be unbound before deleting.

                                                                          • If there are registry resources accessed by the microservice engine module within the folder, the folder cannot be deleted. All access to the registry needs to be removed before deleting the folder.

                                                                          "},{"location":"en/admin/ghippo/workspace/quota.html","title":"Resource Quota","text":"

                                                                          Shared resources do not necessarily mean that the shared users can use the shared resources without any restrictions. Admin, Kpanda Owner, and Workspace Admin can limit the maximum usage quota of a user through the Resource Quota feature in shared resources. If no restrictions are set, it means the usage is unlimited.

                                                                          • CPU Request (Core)
                                                                          • CPU Limit (Core)
                                                                          • Memory Request (MB)
                                                                          • Memory Limit (MB)
                                                                          • Total Storage Request (GB)
                                                                          • Persistent Volume Claims (PVC)
                                                                          • GPU Type, Spec, Quantity (including but not limited to Nvidia, Ascend, ILLUVATAR, and other GPUs)

                                                                          A resource (cluster) can be shared among multiple workspaces, and a workspace can use resources from multiple shared clusters simultaneously.

                                                                          "},{"location":"en/admin/ghippo/workspace/quota.html#resource-groups-and-shared-resources","title":"Resource Groups and Shared Resources","text":"

                                                                          Cluster resources in both shared resources and resource groups are derived from Container Management. However, different effects will occur when binding a cluster to a workspace or sharing it with a workspace.

                                                                          1. Binding Resources

                                                                            Users/User groups in the workspace will have full management and usage permissions for the cluster. Workspace Admin will be mapped as Cluster Admin. Workspace Admin can access the Container Management module to manage the cluster.

                                                                            Note

                                                                            As of now, there are no Cluster Editor and Cluster Viewer roles in the Container Management module. Therefore, Workspace Editor and Workspace Viewer cannot be mapped.

                                                                          2. Adding Shared Resources

                                                                            Users/User groups in the workspace will have usage permissions for the cluster resources.

                                                                            Unlike resource groups, when sharing a cluster with a workspace, the roles of the users in the workspace will not be mapped to the resources. Therefore, Workspace Admin will not be mapped as Cluster Admin.

                                                                          This section demonstrates three scenarios related to resource quotas.

                                                                          "},{"location":"en/admin/ghippo/workspace/quota.html#create-namespaces","title":"Create Namespaces","text":"

                                                                          Creating a namespace involves resource quotas.

                                                                          1. Add a shared cluster to workspace ws01 .

                                                                          2. Select workspace ws01 and the shared cluster in Workbench, and create a namespace ns01 .

                                                                            • If no resource quotas are set in the shared cluster, there is no need to set resource quotas when creating the namespace.
                                                                            • If resource quotas are set in the shared cluster (e.g., CPU Request = 100 cores), the CPU request for the namespace must be less than or equal to 100 cores (CPU Request \u2264 100 core) for successful creation.
                                                                          "},{"location":"en/admin/ghippo/workspace/quota.html#bind-namespace-to-workspace","title":"Bind Namespace to Workspace","text":"

                                                                          Prerequisite: Workspace ws01 has added a shared cluster, and the operator has the Workspace Admin + Kpanda Owner or Admin role.

                                                                          The two methods of binding have the same effect.

                                                                          • Bind the created namespace ns01 to ws01 in Container Management.

                                                                            • If no resource quotas are set in the shared cluster, the namespace ns01 can be successfully bound regardless of whether resource quotas are set.
                                                                            • If resource quotas are set in the shared cluster (e.g., CPU Request = 100 cores), the namespace ns01 must meet the requirement of CPU requests less than or equal to 100 cores (CPU Request \u2264 100 core) for successful binding.
                                                                          • Bind the namespace ns01 to ws01 in Global Management.

                                                                            • If no resource quotas are set in the shared cluster, the namespace ns01 can be successfully bound regardless of whether resource quotas are set.
                                                                            • If resource quotas are set in the shared cluster (e.g., CPU Request = 100 cores), the namespace ns01 must meet the requirement of CPU requests less than or equal to 100 cores (CPU Request \u2264 100 core) for successful binding.
                                                                          "},{"location":"en/admin/ghippo/workspace/quota.html#unbind-namespace-from-workspace","title":"Unbind Namespace from Workspace","text":"

                                                                          The two methods of unbinding have the same effect.

                                                                          • Unbind the namespace ns01 from workspace ws01 in Container Management.

                                                                            • If no resource quotas are set in the shared cluster, unbinding the namespace ns01 will not affect the resource quotas, regardless of whether resource quotas were set for the namespace.
                                                                            • If resource quotas (CPU Request = 100 cores) are set in the shared cluster and the namespace ns01 has its own resource quotas, unbinding will release the proper resource quota.
                                                                          • Unbind the namespace ns01 from workspace ws01 in Global Management.

                                                                            • If no resource quotas are set in the shared cluster, unbinding the namespace ns01 will not affect the resource quotas, regardless of whether resource quotas were set for the namespace.
                                                                            • If resource quotas (CPU Request = 100 cores) are set in the shared cluster and the namespace ns01 has its own resource quotas, unbinding will release the proper resource quota.
                                                                          "},{"location":"en/admin/ghippo/workspace/res-gp-and-shared-res.html","title":"Differences between Resource Groups and Shared Resources","text":"

                                                                          Both resource groups and shared resources support cluster binding, but they have significant differences in usage.

                                                                          "},{"location":"en/admin/ghippo/workspace/res-gp-and-shared-res.html#differences-in-usage-scenarios","title":"Differences in Usage Scenarios","text":"
                                                                          • Cluster Binding for Resource Groups: Resource groups are usually used for batch authorization. After binding a resource group to a cluster, the workspace administrator will be mapped as a cluster administrator and able to manage and use cluster resources.
                                                                          • Cluster Binding for Shared Resources: Shared resources are usually used for resource quotas. A typical scenario is that the platform administrator assigns a cluster to a first-level supplier, who then assigns the cluster to a second-level supplier and sets resource quotas for the second-level supplier.

                                                                          Note: In this scenario, the platform administrator needs to impose resource restrictions on secondary suppliers. Currently, it is not supported to limit the cluster quota of secondary suppliers by the primary supplier.

                                                                          "},{"location":"en/admin/ghippo/workspace/res-gp-and-shared-res.html#differences-in-cluster-quota-usage","title":"Differences in Cluster Quota Usage","text":"
                                                                          • Cluster Binding for Resource Groups: The workspace administrator is mapped as the administrator of the cluster and is equivalent to being granted the Cluster Admin role in Container Management-Permission Management. They can have unrestricted access to cluster resources, manage important content such as management nodes, and cannot be subject to resource quotas.
                                                                          • Cluster Binding for Shared Resources: The workspace administrator can only use the quota in the cluster to create namespaces in the Workbench and does not have cluster management permissions. If the workspace is restricted by a quota, the workspace administrator can only create and use namespaces within the quota range.
                                                                          "},{"location":"en/admin/ghippo/workspace/res-gp-and-shared-res.html#differences-in-resource-types","title":"Differences in Resource Types","text":"
                                                                          • Resource Groups: Can bind to clusters, cluster-namespaces, multiclouds, multicloud namespaces, meshs, and mesh-namespaces.
                                                                          • Shared Resources: Can only bind to clusters.
                                                                          "},{"location":"en/admin/ghippo/workspace/res-gp-and-shared-res.html#similarities-between-resource-groups-and-shared-resources","title":"Similarities between Resource Groups and Shared Resources","text":"

                                                                          After binding to a cluster, both resource groups and shared resources can go to the Workbench to create namespaces, which will be automatically bound to the workspace.

                                                                          "},{"location":"en/admin/ghippo/workspace/workspace.html","title":"Creating/Deleting Workspaces","text":"

                                                                          A workspace is a resource category that represents a hierarchical relationship of resources. A workspace can contain resources such as clusters, namespaces, and registries. Typically, each workspace corresponds to a project and different resources can be allocated, and different users and user groups can be assigned to each workspace.

                                                                          Follow the steps below to create a workspace:

                                                                          1. Log in to AI platform with a user account having the admin/folder admin role. Click Global Management -> Workspace and Folder at the bottom of the left navigation bar.

                                                                          2. Click the Create Workspace button in the top right corner.

                                                                          3. Fill in the workspace name, folder assignment, and other information, then click OK to complete creating the workspace.

                                                                          Tip

                                                                          After successful creation, the workspace name will be displayed in the left tree structure, represented by different icons for folders and workspaces.

                                                                          Note

                                                                          To edit or delete a specific workspace or folder, select it and click ... on the right side.

                                                                          • If resource groups and shared resources have resources under the workspace, the workspace cannot be deleted. All resources need to be unbound before deletion of the workspace.

                                                                          • If Microservices Engine has Integrated Registry under the workspace, the workspace cannot be deleted. Integrated Registry needs to be removed before deletion of the workspace.

                                                                          • If Container Registry has Registry Space or Integrated Registry under the workspace, the workspace cannot be deleted. Registry Space needs to be removed, and Integrated Registry needs to be deleted before deletion of the workspace.

                                                                          "},{"location":"en/admin/ghippo/workspace/ws-folder.html","title":"Workspace and Folder","text":"

                                                                          Workspace and Folder is a feature that provides resource isolation and grouping, addressing issues related to unified authorization, resource grouping, and resource quotas.

                                                                          Workspace and Folder involves two concepts: workspaces and folders.

                                                                          "},{"location":"en/admin/ghippo/workspace/ws-folder.html#workspaces","title":"Workspaces","text":"

                                                                          Workspaces allow the management of resources through Authorization , Resource Group , and Shared Resource , enabling users (and user groups) to share resources within the workspace.

                                                                          • Resources

                                                                            Resources are at the lowest level of the hierarchy in the resource management module. They include clusters, namespaces, pipelines, gateways, and more. All these resources can only have workspaces as their parent level. Workspaces act as containers for grouping resources.

                                                                          • Workspace

                                                                            A workspace usually refers to a project or environment, and the resources in each workspace are logically isolated from those in other workspaces. You can grant users (groups of users) different access rights to the same set of resources through authorization in the workspace.

                                                                            Workspaces are at the first level, counting from the bottom of the hierarchy, and contain resources. All resources except shared resources have one and only one parent. All workspaces also have one and only one parent folder.

                                                                            Resources are grouped by workspace, and there are two grouping modes in workspace, namely Resource Group and Shared Resource .

                                                                          • Resource group

                                                                            A resource can only be added to one resource group, and resource groups correspond to workspaces one by one. After a resource is added to a resource group, Workspace Admin will obtain the management authority of the resource, which is equivalent to the owner of the resource.

                                                                          • Share resource

                                                                            For shared resources, multiple workspaces can share one or more resources. Resource owners can choose to share their own resources with the workspace. Generally, when sharing, the resource owner will limit the amount of resources that can be used by the shared workspace. After resources are shared, Workspace Admin only has resource usage rights under the resource limit, and cannot manage resources or adjust the amount of resources that can be used by the workspace.

                                                                            At the same time, shared resources also have certain requirements for the resources themselves. Only Cluster (cluster) resources can be shared. Cluster Admin can share Cluster resources to different workspaces, and limit the use of workspaces on this Cluster.

                                                                            Workspace Admin can create multiple Namespaces within the resource quota, but the sum of the resource quotas of the Namespaces cannot exceed the resource quota of the Cluster in the workspace. For Kubernetes resources, the only resource type that can be shared currently is Cluster.

                                                                          "},{"location":"en/admin/ghippo/workspace/ws-folder.html#folder","title":"Folder","text":"

                                                                          Folders can be used to build enterprise business hierarchy relationships.

                                                                          • Folders are a further grouping mechanism based on workspaces and have a hierarchical structure. A folder can contain workspaces, other folders, or a combination of both, forming a tree-like organizational relationship.

                                                                          • Folders allow you to map your business hierarchy and group workspaces by department. Folders are not directly linked to resources, but indirectly achieve resource grouping through workspaces.

                                                                          • A folder has one and only one parent folder, and the root folder is the highest level of the hierarchy. The root folder has no parent, and folders and workspaces are attached to the root folder.

                                                                          In addition, users (groups) in folders can inherit permissions from their parents through a hierarchical structure. The permissions of the user in the hierarchical structure come from the combination of the permissions of the current level and the permissions inherited from its parents. The permissions are additive and there is no mutual exclusion.

                                                                          "},{"location":"en/admin/ghippo/workspace/ws-permission.html","title":"Description of workspace permissions","text":"

                                                                          The workspace has permission mapping and resource isolation capabilities, and can map the permissions of users/groups in the workspace to the resources under it. If the user/group has the Workspace Admin role in the workspace and the resource Namespace is bound to the workspace-resource group, the user/group will become Namespace Admin after mapping.

                                                                          Note

                                                                          The permission mapping capability of the workspace will not be applied to shared resources, because sharing is to share the cluster usage permissions to multiple workspaces, rather than assigning management permissions to the workspaces, so permission inheritance and role mapping will not be implemented.

                                                                          "},{"location":"en/admin/ghippo/workspace/ws-permission.html#use-cases","title":"Use cases","text":"

                                                                          Resource isolation is achieved by binding resources to different workspaces. Therefore, resources can be flexibly allocated to each workspace (tenant) with the help of permission mapping, resource isolation, and resource sharing capabilities.

                                                                          Generally applicable to the following two use cases:

                                                                          • Cluster one-to-one

                                                                            Ordinary Cluster Department/Tenant (Workspace) Purpose Cluster 01 A Administration and Usage Cluster 02 B Administration and Usage
                                                                          • Cluster one-to-many

                                                                            Cluster Department/Tenant (Workspace) Resource Quota Cluster 01 A 100 core CPU B 50-core CPU
                                                                          "},{"location":"en/admin/ghippo/workspace/ws-permission.html#permission-description","title":"Permission description","text":"Action Objects Operations Workspace Admin Workspace Editor Workspace Viewer itself view \u2713 \u2713 \u2713 - Authorization \u2713 \u2717 \u2717 - Modify Alias \u2713 \u2713 \u2717 Resource Group View \u2713 \u2713 \u2713 - resource binding \u2713 \u2717 \u2717 - unbind \u2713 \u2717 \u2717 Shared Resources View \u2713 \u2713 \u2713 - Add Share \u2713 \u2717 \u2717 - Unshare \u2713 \u2717 \u2717 - Resource Quota \u2713 \u2717 \u2717 - Using Shared Resources 1 \u2713 \u2717 \u2717
                                                                          1. Authorized users can go to modules such as workbench, microservice engine, middleware, multicloud orchestration, and service mesh to use resources in the workspace. For the operation scope of the roles of Workspace Admin, Workspace Editor, and Workspace Viewer in each module, please refer to the permission description:

                                                                            • Container Management Permissions

                                                                            \u21a9

                                                                          "},{"location":"en/admin/ghippo/workspace/wsbind-permission.html","title":"Resource Binding Permission Instructions","text":"

                                                                          If a user John (\"John\" represents any user who is required to bind resources) has the Workspace Admin role assigned or has been granted proper permissions through a custom role, which includes the Workspace's \"Resource Binding\" Permissions, and wants to bind a specific cluster or namespace to the workspace.

                                                                          To bind cluster/namespace resources to a workspace, not only the workspace's \"Resource Binding\" permissions are required, but also the permissions of Cluster Admin.

                                                                          "},{"location":"en/admin/ghippo/workspace/wsbind-permission.html#granting-authorization-to-john","title":"Granting Authorization to John","text":"
                                                                          1. Using the Platform Admin Role, grant John the role of Workspace Admin on the Workspace -> Authorization page.

                                                                          2. Then, on the Container Management -> Permissions page, authorize John as a Cluster Admin by Add Permission.

                                                                          "},{"location":"en/admin/ghippo/workspace/wsbind-permission.html#binding-to-workspace","title":"Binding to Workspace","text":"

                                                                          Using John's account to log in to AI platform, on the Container Management -> Clusters page, John can bind the specified cluster to his own workspace by using the Bind Workspace button.

                                                                          Note

                                                                          John can only bind clusters or namespaces to a specific workspace in the Container Management module, and cannot perform this operation in the Global Management module.

                                                                          To bind a namespace to a workspace, you must have at least Workspace Admin and Cluster Admin permissions.

                                                                          "},{"location":"en/admin/host/createhost.html","title":"Create and Start a Cloud Host","text":"

                                                                          After the user completes registration and is assigned a workspace, namespace, and resources, they can create and start a cloud host.

                                                                          "},{"location":"en/admin/host/createhost.html#prerequisites","title":"Prerequisites","text":"
                                                                          • AI platform installed
                                                                          • User has successfully registered
                                                                          • Workspace has been bound to the user
                                                                          • Resources have been allocated to the workspace
                                                                          "},{"location":"en/admin/host/createhost.html#steps","title":"Steps","text":"
                                                                          1. User logs into the AI platform.
                                                                          2. Click Create Cloud Host -> Create from Template

                                                                          3. After defining all configurations for the cloud host, click Next

                                                                            Basic ConfigurationTemplate ConfigurationStorage and Network

                                                                          4. After configuring the root password or SSH key, click Confirm

                                                                          5. Return to the host list and wait for the status to change to Running. After that, you can start the host by clicking the \u2507 on the right side.

                                                                          Next step: Use the Cloud Host

                                                                          "},{"location":"en/admin/host/usehost.html","title":"Using Cloud Host","text":"

                                                                          After creating and starting the cloud host, users can begin using it.

                                                                          "},{"location":"en/admin/host/usehost.html#prerequisites","title":"Prerequisites","text":"
                                                                          • AI platform is installed
                                                                          • User has created and started a cloud host
                                                                          "},{"location":"en/admin/host/usehost.html#steps-to-follow","title":"Steps to Follow","text":"
                                                                          1. Log in to the AI platform as an administrator.
                                                                          2. Navigate to Container Management -> Container Network -> Services, click the service name to enter the service details page, and click Update at the top right corner.

                                                                          3. Change the port range to 30900-30999, ensuring there are no conflicts.

                                                                          4. Log in to the AI platform as an end user, navigate to the proper service, and check the access port.

                                                                          5. Use an SSH client to log in to the cloud host from the external network.

                                                                          6. At this point, you can perform various operations on the cloud host.

                                                                          Next step: Cloud Resource Sharing: Quota Management

                                                                          "},{"location":"en/admin/insight/alert-center/index.html","title":"Alert Center","text":"

                                                                          The Alert Center is an important feature provided by AI platform that allows users to easily view all active and historical alerts by cluster and namespace through a graphical interface, and search alerts based on severity level (critical, warning, info).

                                                                          All alerts are triggered based on the threshold conditions set in the preset alert rules. In AI platform, some global alert policies are built-in, but users can also create or delete alert policies at any time, and set thresholds for the following metrics:

                                                                          • CPU usage
                                                                          • Memory usage
                                                                          • Disk usage
                                                                          • Disk reads per second
                                                                          • Disk writes per second
                                                                          • Cluster disk read throughput
                                                                          • Cluster disk write throughput
                                                                          • Network send rate
                                                                          • Network receive rate

                                                                          Users can also add labels and annotations to alert rules. Alert rules can be classified as active or expired, and certain rules can be enabled/disabled to achieve silent alerts.

                                                                          When the threshold condition is met, users can configure how they want to be notified, including email, DingTalk, WeCom, webhook, and SMS notifications. All notification message templates can be customized and all messages are sent at specified intervals.

                                                                          In addition, the Alert Center also supports sending alert messages to designated users through short message services provided by Alibaba Cloud, Tencent Cloud, and more platforms that will be added soon, enabling multiple ways of alert notification.

                                                                          AI platform Alert Center is a powerful alert management platform that helps users quickly detect and resolve problems in the cluster, improve business stability and availability, and facilitate cluster inspection and troubleshooting.

                                                                          "},{"location":"en/admin/insight/alert-center/alert-policy.html","title":"Alert Policies","text":"

                                                                          In addition to the built-in alert policies, AI platform allows users to create custom alert policies. Each alert policy is a collection of alert rules that can be set for clusters, nodes, and workloads. When an alert object reaches the threshold set by any of the rules in the policy, an alert is automatically triggered and a notification is sent.

                                                                          Taking the built-in alerts as an example, click the first alert policy alertmanager.rules .

                                                                          You can see that some alert rules have been set under it. You can add more rules under this policy, or edit or delete them at any time. You can also view the historical and active alerts related to this alert policy and edit the notification configuration.

                                                                          "},{"location":"en/admin/insight/alert-center/alert-policy.html#create-alert-policies","title":"Create Alert Policies","text":"
                                                                          1. Select Alert Center -> Alert Policies , and click the Create Alert Policy button.

                                                                          2. Fill in the basic information, select one or more clusters, nodes, or workloads as the alert objects, and click Next .

                                                                          3. The list must have at least one rule. If the list is empty, please Add Rule .

                                                                            Create an alert rule in the pop-up window, fill in the parameters, and click OK .

                                                                            • Template rules: Pre-defined basic metrics that can monitor CPU, memory, disk, and network.
                                                                            • PromQL rules: Input a PromQL expression, please query Prometheus expressions.
                                                                            • Duration: After the alert is triggered and the duration reaches the set value, the alert policy will become a triggered state.
                                                                            • Alert level: Including emergency, warning, and information levels.
                                                                            • Advanced settings: Custom tags and annotations.
                                                                          4. After clicking Next , configure notifications.

                                                                          5. After the configuration is complete, click the OK button to return to the Alert Policy list.

                                                                          Tip

                                                                          The newly created alert policy is in the Not Triggered state. Once the threshold conditions and duration specified in the rules are met, it will change to the Triggered state.

                                                                          "},{"location":"en/admin/insight/alert-center/alert-policy.html#create-log-rules","title":"Create Log Rules","text":"

                                                                          After filling in the basic information, click Add Rule and select Log Rule as the rule type.

                                                                          Creating log rules is supported only when the resource object is selected as a node or workload.

                                                                          Field Explanation:

                                                                          • Filter Condition : Field used to query log content, supports four filtering conditions: AND, OR, regular expression matching, and fuzzy matching.
                                                                          • Condition : Based on the filter condition, enter keywords or matching conditions.
                                                                          • Time Range : Time range for log queries.
                                                                          • Threshold Condition : Enter the alert threshold value in the input box. When the set threshold is reached, an alert will be triggered. Supported comparison operators are: >, \u2265, =, \u2264, <.
                                                                          • Alert Level : Select the alert level to indicate the severity of the alert.
                                                                          "},{"location":"en/admin/insight/alert-center/alert-policy.html#create-event-rules","title":"Create Event Rules","text":"

                                                                          After filling in the basic information, click Add Rule and select Event Rule as the rule type.

                                                                          Creating event rules is supported only when the resource object is selected as a workload.

                                                                          Field Explanation:

                                                                          • Event Rule : Only supports selecting the workload as the resource object.
                                                                          • Event Reason : Different event reasons for different types of workloads, where the event reasons are combined with \"AND\" relationship.
                                                                          • Time Range : Detect data generated within this time range. If the threshold condition is reached, an alert event will be triggered.
                                                                          • Threshold Condition : When the generated events reach the set threshold, an alert event will be triggered.
                                                                          • Trend Chart : By default, it queries the trend of event changes within the last 10 minutes. The value at each point represents the total number of occurrences within a certain period of time (time range) from the current time point to a previous time.
                                                                          "},{"location":"en/admin/insight/alert-center/alert-policy.html#other-operations","title":"Other Operations","text":"

                                                                          Click \u2507 at the right side of the list, then choose Delete from the pop-up menu to delete an alert policy. By clicking on the policy name, you can enter the policy details where you can add, edit, or delete the alert rules under it.

                                                                          Warning

                                                                          Deleted alert strategies will be permanently removed, so please proceed with caution.

                                                                          "},{"location":"en/admin/insight/alert-center/alert-template.html","title":"Alert Template","text":"

                                                                          The Alert template allows platform administrators to create Alert templates and rules, and business units can directly use Alert templates to create Alert policies. This feature can reduce the management of Alert rules by business personnel and allow for modification of Alert thresholds based on actual environment conditions.

                                                                          "},{"location":"en/admin/insight/alert-center/alert-template.html#create-alert-template","title":"Create Alert Template","text":"
                                                                          1. In the navigation bar, select Alert -> Alert Policy, and click Alert Template at the top.

                                                                          2. Click Create Alert Template, and set the name, description, and other information for the Alert template.

                                                                            Parameter Description Template Name The name can only contain lowercase letters, numbers, and hyphens (-), must start and end with a lowercase letter or number, and can be up to 63 characters long. Description The description can contain any characters and can be up to 256 characters long. Resource Type Used to specify the matching type of the Alert template. Alert Rule Supports pre-defined multiple Alert rules, including template rules and PromQL rules.
                                                                          3. Click OK to complete the creation and return to the Alert template list. Click the template name to view the template details.

                                                                          "},{"location":"en/admin/insight/alert-center/alert-template.html#edit-alert-template","title":"Edit Alert Template","text":"

                                                                          Click \u2507 next to the target rule, then click Edit to enter the editing page for the suppression rule.

                                                                          "},{"location":"en/admin/insight/alert-center/alert-template.html#delete-alert-template","title":"Delete Alert Template","text":"

                                                                          Click \u2507 next to the target template, then click Delete. Enter the name of the Alert template in the input box to confirm deletion.

                                                                          "},{"location":"en/admin/insight/alert-center/inhibition.html","title":"Alert Inhibition","text":"

                                                                          Alert Inhibition is mainly a mechanism for temporarily hiding or reducing the priority of alerts that do not need immediate attention. The purpose of this feature is to reduce unnecessary alert information that may disturb operations personnel, allowing them to focus on more critical issues.

                                                                          Alert inhibition recognizes and ignores certain alerts by defining a set of rules to deal with specific conditions. There are mainly the following conditions:

                                                                          • Parent-child inhibition: when a parent alert (for example, a crash on a node) is triggered, all child alerts aroused by it (for example, a crash on a container running on that node) are inhibited.
                                                                          • Similar alert inhibition: When alerts have the same characteristics (for example, the same problem on the same instance), multiple alerts are inhibited.
                                                                          "},{"location":"en/admin/insight/alert-center/inhibition.html#create-inhibition","title":"Create Inhibition","text":"
                                                                          1. In the left navigation bar, select Alert -> Noise Reduction, and click Inhibition at the top.

                                                                          2. Click Create Inhibition, and set the name and rules for the inhibition.

                                                                            Note

                                                                            The problem of avoiding multiple similar or related alerts that may be triggered by the same issue is achieved by defining a set of rules to identify and ignore certain alerts through Rule Details and Alert Details.

                                                                            Parameter Description Name The name can only contain lowercase letters, numbers, and hyphens (-), must start and end with a lowercase letter or number, and can be up to 63 characters long. Description The description can contain any characters and can be up to 256 characters long. Cluster The cluster where the inhibition rule applies. Namespace The namespace where the inhibition rule applies. Source Alert Matching alerts by label conditions. It compares alerts that meet all label conditions with those that meet inhibition conditions, and alerts that do not meet inhibition conditions will be sent to the user as usual. Value range explanation: - Alert Level: The level of metric or event alerts, can be set as: Critical, Major, Minor. - Resource Type: The resource type specific for the alert object, can be set as: Cluster, Node, StatefulSet, Deployment, DaemonSet, Pod. - Labels: Alert identification attributes, consisting of label name and label value, supports user-defined values. Inhibition Specifies the matching conditions for the target alert (the alert to be inhibited). Alerts that meet all the conditions will no longer be sent to the user. Equal Specifies the list of labels to compare to determine if the source alert and target alert match. Inhibition is triggered only when the values of the labels specified in equal are exactly the same in the source and target alerts. The equal field is optional. If the equal field is omitted, all labels are used for matching.
                                                                          3. Click OK to complete the creation and return to Inhibition list. Click the inhibition rule name to view the rule details.

                                                                          "},{"location":"en/admin/insight/alert-center/inhibition.html#view-rule-details","title":"View Rule Details","text":"

                                                                          In the left navigation bar, select Alert -> Alert Policy, and click the policy name to view the rule details.

                                                                          !!! note\n\n    You can add cuntom tags when adding rules.\n
                                                                          "},{"location":"en/admin/insight/alert-center/inhibition.html#view-alert-details","title":"View Alert Details","text":"

                                                                          In the left navigation bar, select Alert -> Alerts, and click the policy name to view details.

                                                                          !!! note\n\n    Alert details show information and settings for creating inhibitions.\n
                                                                          "},{"location":"en/admin/insight/alert-center/inhibition.html#edit-inhibition-rule","title":"Edit Inhibition Rule","text":"

                                                                          Click \u2507 next to the target rule, then click Edit to enter the editing page for the inhibition rule.

                                                                          "},{"location":"en/admin/insight/alert-center/inhibition.html#delete-inhibition-rule","title":"Delete Inhibition Rule","text":"

                                                                          Click \u2507 next to the target rule, then click Delete. Enter the name of the inhibition rule in the input box to confirm deletion.

                                                                          "},{"location":"en/admin/insight/alert-center/message.html","title":"Notification Settings","text":"

                                                                          On the Notification Settings page, you can configure how to send messages to users through email, WeCom, DingTalk, Webhook, and SMS.

                                                                          "},{"location":"en/admin/insight/alert-center/message.html#email-group","title":"Email Group","text":"
                                                                          1. After entering Insight , click Alert Center -> Notification Settings in the left navigation bar. By default, the email notification object is selected. Click Add email group and add one or more email addresses.

                                                                          2. Multiple email addresses can be added.

                                                                          3. After the configuration is complete, the notification list will automatically return. Click \u2507 on the right side of the list to edit or delete the email group.

                                                                          "},{"location":"en/admin/insight/alert-center/message.html#wecom","title":"WeCom","text":"
                                                                          1. In the left navigation bar, click Alert Center -> Notification Settings -> WeCom . Click Add Group Robot and add one or more group robots.

                                                                            For the URL of the WeCom group robot, please refer to the official document of WeCom: How to use group robots.

                                                                          2. After the configuration is complete, the notification list will automatically return. Click \u2507 on the right side of the list, select Send Test Information , and you can also edit or delete the group robot.

                                                                          "},{"location":"en/admin/insight/alert-center/message.html#dingtalk","title":"DingTalk","text":"
                                                                          1. In the left navigation bar, click Alert Center -> Notification Settings -> DingTalk . Click Add Group Robot and add one or more group robots.

                                                                            For the URL of the DingTalk group robot, please refer to the official document of DingTalk: Custom Robot Access.

                                                                          2. After the configuration is complete, the notification list will automatically return. Click \u2507 on the right side of the list, select Send Test Information , and you can also edit or delete the group robot.

                                                                          "},{"location":"en/admin/insight/alert-center/message.html#lark","title":"Lark","text":"
                                                                          1. In the left navigation bar, click Alert Center -> Notification Settings -> Lark . Click Add Group Bot and add one or more group bots.

                                                                            Note

                                                                            When signature verification is required in Lark's group bot, you need to fill in the specific signature key when enabling notifications. Refer to Customizing Bot User Guide.

                                                                          2. After configuration, you will be automatically redirected to the list page. Click \u2507 on the right side of the list and select Send Test Message . You can edit or delete group bots.

                                                                          "},{"location":"en/admin/insight/alert-center/message.html#webhook","title":"Webhook","text":"
                                                                          1. In the left navigation bar, click Alert Center -> Notification Settings -> Webhook . Click New Webhook and add one or more Webhooks.

                                                                            For the Webhook URL and more configuration methods, please refer to the webhook document.

                                                                          2. After the configuration is complete, the notification list will automatically return. Click \u2507 on the right side of the list, select Send Test Information , and you can also edit or delete the Webhook.

                                                                          "},{"location":"en/admin/insight/alert-center/message.html#message","title":"Message","text":"

                                                                          Note

                                                                          Alert messages are sent to the personal Message sector and notifications can be viewed by clicking \ud83d\udd14 at the top.

                                                                          1. In the left navigation bar, click Alert Center -> Notification Settings -> Message\uff0cclick Create Message .

                                                                            You can add and notify multiple users for a message.

                                                                          2. After configuration, you will be automatically redirected to the list page. Click \u2507 on the right side of the list and select Send Test Message .

                                                                          "},{"location":"en/admin/insight/alert-center/message.html#sms-group","title":"SMS Group","text":"
                                                                          1. In the left navigation bar, click Alert Center -> Notification Settings -> SMS . Click Add SMS Group and add one or more SMS groups.

                                                                          2. Enter the name, the object receiving the message, phone number, and notification server in the pop-up window.

                                                                            The notification server needs to be created in advance under Notification Settings -> Notification Server . Currently, two cloud servers, Alibaba Cloud and Tencent Cloud, are supported. Please refer to your own cloud server information for the specific configuration parameters.

                                                                          3. After the SMS group is successfully added, the notification list will automatically return. Click \u2507 on the right side of the list to edit or delete the SMS group.

                                                                          "},{"location":"en/admin/insight/alert-center/msg-template.html","title":"Message Templates","text":"

                                                                          The message template feature supports customizing the content of message templates and can notify specified objects in the form of email, WeCom, DingTalk, Webhook, and SMS.

                                                                          "},{"location":"en/admin/insight/alert-center/msg-template.html#creating-a-message-template","title":"Creating a Message Template","text":"
                                                                          1. In the left navigation bar, select Alert -> Message Template .

                                                                            Insight comes with two default built-in templates in both Chinese and English for user convenience.

                                                                          2. Fill in the template content.

                                                                          Info

                                                                          Observability comes with predefined message templates. If you need to define the content of the templates, refer to Configure Notification Templates.

                                                                          "},{"location":"en/admin/insight/alert-center/msg-template.html#message-template-details","title":"Message Template Details","text":"

                                                                          Click the name of a message template to view the details of the message template in the right slider.

                                                                          Parameters Variable Description ruleName {{ .Labels.alertname }} The name of the rule that triggered the alert groupName {{ .Labels.alertgroup }} The name of the alert policy to which the alert rule belongs severity {{ .Labels.severity }} The level of the alert that was triggered cluster {{ .Labels.cluster }} The cluster where the resource that triggered the alert is located namespace {{ .Labels.namespace }} The namespace where the resource that triggered the alert is located node {{ .Labels.node }} The node where the resource that triggered the alert is located targetType {{ .Labels.target_type }} The resource type of the alert target target {{ .Labels.target }} The name of the object that triggered the alert value {{ .Annotations.value }} The metric value at the time the alert notification was triggered startsAt {{ .StartsAt }} The time when the alert started to occur endsAt {{ .EndsAt }} The time when the alert ended description {{ .Annotations.description }} A detailed description of the alert labels {{ for .labels }} {{ end }} All labels of the alert use the for function to iterate through the labels list to get all label contents."},{"location":"en/admin/insight/alert-center/msg-template.html#editing-or-deleting-a-message-template","title":"Editing or Deleting a Message Template","text":"

                                                                          Click \u2507 on the right side of the list and select Edit or Delete from the pop-up menu to modify or delete the message template.

                                                                          Warning

                                                                          Once a template is deleted, it cannot be recovered, so please use caution when deleting templates.

                                                                          "},{"location":"en/admin/insight/alert-center/silent.html","title":"Alert Silence","text":"

                                                                          Alert silence is a feature that allows alerts meeting certain criteria to be temporarily disabled from sending notifications within a specific time range. This feature helps operations personnel avoid receiving too many noisy alerts during certain operations or events, while also allowing for more precise handling of real issues that need to be addressed.

                                                                          On the Alert Silence page, you can see two tabs: Active Rule and Expired Rule. The former presents the rules currently in effect, while the latter presents those that were defined in the past but have now expired (or have been deleted by the user).

                                                                          "},{"location":"en/admin/insight/alert-center/silent.html#creating-a-silent-rule","title":"Creating a Silent Rule","text":"
                                                                          1. In the left navigation bar, select Alert -> Noice Reduction -> Alert Silence , and click the Create Silence Rule button.

                                                                          2. Fill in the parameters for the silent rule, such as cluster, namespace, tags, and time, to define the scope and effective time of the rule, and then click OK .

                                                                          3. Return to the rule list, and on the right side of the list, click \u2507 to edit or delete a silent rule.

                                                                          Through the Alert Silence feature, you can flexibly control which alerts should be ignored and when they should be effective, thereby improving operational efficiency and reducing the possibility of false alerts.

                                                                          "},{"location":"en/admin/insight/alert-center/sms-provider.html","title":"Configure Notification Server","text":"

                                                                          Insight supports SMS notifications and currently sends alert messages using integrated Alibaba Cloud and Tencent Cloud SMS services. This article explains how to configure the SMS notification server in Insight. The variables supported in the SMS signature are the default variables in the message template. As the number of SMS characters is limited, it is recommended to choose more explicit variables.

                                                                          For information on how to configure SMS recipients, refer to the document: Configure SMS Notification Group.

                                                                          "},{"location":"en/admin/insight/alert-center/sms-provider.html#procedure","title":"Procedure","text":"
                                                                          1. Go to Alert Center -> Notification Settings -> Notification Server .

                                                                          2. Click Add Notification Server .

                                                                            • Configure Alibaba Cloud server.

                                                                              To apply for Alibaba Cloud SMS service, please refer to Alibaba Cloud SMS Service.

                                                                              Field descriptions:

                                                                              • AccessKey ID : Parameter used by Alibaba Cloud to identify the user.
                                                                              • AccessKey Secret : Key used by Alibaba Cloud to authenticate the user. AccessKey Secret must be kept confidential.
                                                                              • SMS Signature : The SMS service supports creating signatures that meet the requirements according to user needs. When sending SMS, the SMS platform will add the approved SMS signature to the SMS content before sending it to the SMS recipient.
                                                                              • Template CODE : The SMS template is the specific content of the SMS to be sent.
                                                                              • Parameter Template : The SMS body template can contain variables. Users can use variables to customize the SMS content.

                                                                              Please refer to Alibaba Cloud Variable Specification.

                                                                              Note

                                                                              Example: The template content defined in Alibaba Cloud is: ${severity}: ${alertname} triggered at ${startat}. Refer to the configuration in the parameter template.

                                                                            • Configure Tencent Cloud server.

                                                                              To apply for Tencent Cloud SMS service, please refer to Tencent Cloud SMS.

                                                                              Field descriptions:

                                                                              • Secret ID : Parameter used by Tencent Cloud to identify the API caller.
                                                                              • SecretKey : Parameter used by Tencent Cloud to authenticate the API caller.
                                                                              • SMS Template ID : The SMS template ID automatically generated by Tencent Cloud system.
                                                                              • Signature Content : The SMS signature content, which is the full name or abbreviation of the actual website name defined in the Tencent Cloud SMS signature.
                                                                              • SdkAppId : SMS SdkAppId, the actual SdkAppId generated after adding the application in the Tencent Cloud SMS console.
                                                                              • Parameter Template : The SMS body template can contain variables. Users can use variables to customize the SMS content. Please refer to: Tencent Cloud Variable Specification.

                                                                              Note

                                                                              Example: The template content defined in Tencent Cloud is: {1}: {2} triggered at {3}. Refer to the configuration in the parameter template.

                                                                          "},{"location":"en/admin/insight/best-practice/debug-log.html","title":"Log Collection Troubleshooting Guide","text":"

                                                                          After installing the insight-agent in the cluster, Fluent Bit in insight-agent will collect logs in the cluster by default, including Kubernetes event logs, node logs, and container logs. Fluent Bit has already configured various log collection plugins, related filter plugins, and log output plugins. The working status of these plugins determines whether log collection is normal. Below is a dashboard for Fluent Bit that monitors the working conditions of each Fluent Bit in the cluster and the collection, processing, and export of plugin logs.

                                                                          1. Use AI platform platform, enter Insight , and select the Dashboard in the left navigation bar.

                                                                          2. Click the dashboard title Overview .

                                                                          3. Switch to the insight-system -> Fluent Bit dashboard.

                                                                          4. There are several check boxes above the Fluent Bit dashboard to select the input plugin, filter plugin, output plugin, and cluster in which it is located.

                                                                          "},{"location":"en/admin/insight/best-practice/debug-log.html#plugin-description","title":"Plugin Description","text":"

                                                                          Here are some plugins for Fluent Bit .

                                                                          Log Collection Plugin

                                                                          Input Plugin Plugin Description Collection Directory tail.kube Collect container logs /var/log/containers/*.log tail.kubeevent Collect Kubernetes event logs /var/log/containers/-kubernetes-event-exporter.log tail.syslog.dmesg Collect host dmesg logs /var/log/dmesg tail.syslog.messages Collect frequently used host logs /var/log/secure, /var/log/messages, /var/log/syslog,/var/log/auth.log syslog.syslog.RSyslog Collect RSyslog logs systemd.syslog.systemd Collect Journald daemon logs tail.audit_log.k8s Collect Kubernetes audit logs /var/log//audit/.log tail.audit_log.ghippo Collect global management audit logs /var/log/containers/_ghippo-system_audit-log.log tail.skoala-gw Collect microservice gateway logs /var/log/containers/_skoala-gw.log

                                                                          Log Filter Plugin

                                                                          Filter Plugin Plugin Description Lua.audit_log.k8s Use lua to filter Kubernetes audit logs that meet certain conditions

                                                                          Note

                                                                          There are more filter plugins than Lua.audit_log.k8s, which only introduces filters that will discard logs.

                                                                          Log Output Plugin

                                                                          Output Plugin Plugin Description es.kube.kubeevent.syslog Write Kubernetes audit logs, event logs, and syslog logs to ElasticSearch cluster forward.audit_log Send Kubernetes audit logs and global management audit logs to Global Management es.skoala Write request logs and instance logs of microservice gateway to ElasticSearch cluster"},{"location":"en/admin/insight/best-practice/debug-trace.html","title":"Trace Collection Troubleshooting Guide","text":"

                                                                          Before attempting to troubleshoot issues with trace data collection, you need to understand the transmission path of trace data. The following is a schematic diagram of the transmission of trace data:

                                                                          graph TB\n\nsdk[Language proble / SDK] --> workload[Workload cluster otel collector]\n--> otel[Global cluster otel collector]\n--> jaeger[Global cluster jaeger collector]\n--> es[Elasticsearch cluster]\n\nclassDef plain fill:#ddd,stroke:#fff,stroke-width:1px,color:#000;\nclassDef k8s fill:#326ce5,stroke:#fff,stroke-width:1px,color:#fff;\nclassDef cluster fill:#fff,stroke:#bbb,stroke-width:1px,color:#326ce5;\n\nclass sdk,workload,otel,jaeger,es cluster

                                                                          As shown in the above figure, any transmission failure at any step will result in the inability to query trace data. If you find that there is no trace data after completing the application trace enhancement, please perform the following steps:

                                                                          1. Use AI platform platform, enter Insight , and select the Dashboard in the left navigation bar.

                                                                          2. Click the dashboard title Overview .

                                                                          3. Switch to the insight-system -> insight tracing debug dashboard.

                                                                          4. You can see that this dashboard is composed of three blocks, each responsible for monitoring the data transmission of different clusters and components. Check whether there are problems with trace data transmission through the generated time series chart.

                                                                            • workload opentelemetry collector
                                                                            • global opentelemetry collector
                                                                            • global jaeger collector

                                                                          "},{"location":"en/admin/insight/best-practice/debug-trace.html#block-introduction","title":"Block Introduction","text":"
                                                                          1. workload opentelemetry collector

                                                                            Display the opentelemetry collector in different worker clusters receiving language probe/SDK trace data and sending aggregated trace data. You can select the cluster where it is located by the Cluster selection box in the upper left corner.

                                                                            Note

                                                                            Based on these four time series charts, you can determine whether the opentelemetry collector in this cluster is running normally.

                                                                          2. global opentelemetry collector

                                                                            Display the opentelemetry collector in the Global Service Cluster receiving trace data from the worker cluster's opentelemetry collector and sending aggregated trace data.

                                                                            Note

                                                                            The opentelemetry collector in the Global Management Cluster is also responsible for sending audit logs of all worker clusters' global management module and Kubernetes audit logs (not collected by default) to the audit server component of the global management module.

                                                                          3. global jaeger collector

                                                                            Display the jaeger collector in the Global Management Cluster receiving data from the otel collector in the Global Management Cluster and sending trace data to the ElasticSearch cluster.

                                                                          "},{"location":"en/admin/insight/best-practice/find_root_cause.html","title":"Troubleshooting Service Issues with Insight","text":"

                                                                          This article serves as a guide on using Insight to identify and analyze abnormal components in AI platform and determine the root causes of component exceptions.

                                                                          Please note that this post assumes you have a basic understanding of Insight's product features or vision.

                                                                          "},{"location":"en/admin/insight/best-practice/find_root_cause.html#service-map-identifying-abnormalities-on-a-macro-level","title":"Service Map - Identifying Abnormalities on a Macro Level","text":"

                                                                          In enterprise microservice architectures, managing a large number of services with complex interdependencies can be challenging. Insight offers service map monitoring, allowing users to gain a high-level overview of the running microservices in the system.

                                                                          In the example below, you observe that the node insight-server is highlighted in red/yellow on the service map. By hovering over the node, you can see the error rate associated with it. To investigate further and understand why the error rate is not 0 , you can explore more detailed information:

                                                                          Alternatively, clicking on the service name at the top will take you to the service's overview UI:

                                                                          "},{"location":"en/admin/insight/best-practice/find_root_cause.html#service-overview-delving-into-detailed-analysis","title":"Service Overview - Delving into Detailed Analysis","text":"

                                                                          When it becomes necessary to analyze inbound and outbound traffic separately, you can use the filter in the upper right corner to refine the data. After applying the filter, you can observe that the service has multiple operations proper to a non-zero error rate. To investigate further, you can inspect the traces generated by these operations during a specific time period by clicking on \"View Traces\":

                                                                          "},{"location":"en/admin/insight/best-practice/find_root_cause.html#trace-details-identifying-and-eliminating-root-causes-of-errors","title":"Trace Details - Identifying and Eliminating Root Causes of Errors","text":"

                                                                          In the trace list, you can easily identify traces marked as error (circled in red in the figure above) and examine their details by clicking on the proper trace. The following figure illustrates the trace details:

                                                                          Within the trace diagram, you can quickly locate the last piece of data in an error state. Expanding the associated logs section reveals the cause of the request error:

                                                                          Following the above analysis method, you can also identify traces related to other operation errors:

                                                                          "},{"location":"en/admin/insight/best-practice/find_root_cause.html#lets-get-started-with-your-analysis","title":"Let's Get Started with Your Analysis!","text":""},{"location":"en/admin/insight/best-practice/insight-kafka.html","title":"Kafka + Elasticsearch Stream Architecture for Handling Large-Scale Logs","text":"

                                                                          As businesses grow, the amount of log data generated by applications increases significantly. To ensure that systems can properly collect and analyze massive amounts of log data, it is common practice to introduce a streaming architecture using Kafka to handle asynchronous data collection. The collected log data flows through Kafka and is consumed by proper components, which then store the data into Elasticsearch for visualization and analysis using Insight.

                                                                          This article will introduce two solutions:

                                                                          • Fluentbit + Kafka + Logstash + Elasticsearch
                                                                          • Fluentbit + Kafka + Vector + Elasticsearch

                                                                          Once we integrate Kafka into the logging system, the data flow diagram looks as follows:

                                                                          Both solutions share similarities but differ in the component used to consume Kafka data. To ensure compatibility with Insight's data analysis, the format of the data consumed from Kafka and written into Elasticsearch should be consistent with the data directly written by Fluentbit to Elasticsearch.

                                                                          Let's first see how Fluentbit writes logs to Kafka:

                                                                          "},{"location":"en/admin/insight/best-practice/insight-kafka.html#modifying-fluentbit-output-configuration","title":"Modifying Fluentbit Output Configuration","text":"

                                                                          Once the Kafka cluster is ready, we need to modify the content of the insight-system namespace's ConfigMap . We will add three Kafka outputs and comment out the original three Elasticsearch outputs:

                                                                          Assuming the Kafka Brokers address is: insight-kafka.insight-system.svc.cluster.local:9092

                                                                              [OUTPUT]\n        Name        kafka\n        Match_Regex (?:kube|syslog)\\.(.*)\n        Brokers     insight-kafka.insight-system.svc.cluster.local:9092\n        Topics      insight-logs\n        format      json\n        timestamp_key @timestamp\n        rdkafka.batch.size 65536\n        rdkafka.compression.level 6\n        rdkafka.compression.type lz4\n        rdkafka.linger.ms 0\n        rdkafka.log.connection.close false\n        rdkafka.message.max.bytes 2.097152e+06\n        rdkafka.request.required.acks 1\n    [OUTPUT]\n        Name        kafka\n        Match_Regex (?:skoala-gw)\\.(.*)\n        Brokers     insight-kafka.insight-system.svc.cluster.local:9092\n        Topics      insight-gw-skoala\n        format      json\n        timestamp_key @timestamp\n        rdkafka.batch.size 65536\n        rdkafka.compression.level 6\n        rdkafka.compression.type lz4\n        rdkafka.linger.ms 0\n        rdkafka.log.connection.close false\n        rdkafka.message.max.bytes 2.097152e+06\n        rdkafka.request.required.acks 1\n    [OUTPUT]\n        Name        kafka\n        Match_Regex (?:kubeevent)\\.(.*)\n        Brokers     insight-kafka.insight-system.svc.cluster.local:9092\n        Topics      insight-event\n        format      json\n        timestamp_key @timestamp\n        rdkafka.batch.size 65536\n        rdkafka.compression.level 6\n        rdkafka.compression.type lz4\n        rdkafka.linger.ms 0\n        rdkafka.log.connection.close false\n        rdkafka.message.max.bytes 2.097152e+06\n        rdkafka.request.required.acks 1\n

                                                                          Next, let's discuss the subtle differences in consuming Kafka data and writing it to Elasticsearch. As mentioned at the beginning of this article, we will explore Logstash and Vector as two ways to consume Kafka data.

                                                                          "},{"location":"en/admin/insight/best-practice/insight-kafka.html#consuming-kafka-and-writing-to-elasticsearch","title":"Consuming Kafka and Writing to Elasticsearch","text":"

                                                                          Assuming the Elasticsearch address is: https://mcamel-common-es-cluster-es-http.mcamel-system:9200

                                                                          "},{"location":"en/admin/insight/best-practice/insight-kafka.html#using-logstash-for-consumption","title":"Using Logstash for Consumption","text":"

                                                                          If you are familiar with the Logstash technology stack, you can continue using this approach.

                                                                          When deploying Logstash via Helm, you can add the following pipeline in the logstashPipeline section:

                                                                          replicas: 3\nresources:\n  requests:\n    cpu: 100m\n    memory: 1536Mi\n  limits:\n    cpu: 1000m\n    memory: 1536Mi\nlogstashConfig:\n  logstash.yml: |\n    http.host: 0.0.0.0\n    xpack.monitoring.enabled: false\nlogstashPipeline:\n  insight-event.conf: |\n    input {\n      kafka {\n        add_field => {\"kafka_topic\" => \"insight-event\"}\n        topics => [\"insight-event\"]         \n        bootstrap_servers => \"172.30.120.189:32082\" # kafka\u7684ip \u548c\u7aef\u53e3\n        enable_auto_commit => true\n        consumer_threads => 1                       # \u5bf9\u5e94 partition \u7684\u6570\u91cf\n        decorate_events => true\n        codec => \"plain\"\n      }\n    }\n\n    filter {\n      mutate { gsub => [ \"message\", \"@timestamp\", \"_@timestamp\"] }\n      json {source => \"message\"}\n      date {\n        match => [ \"_@timestamp\", \"UNIX\" ]\n        remove_field => \"_@timestamp\"\n        remove_tag => \"_timestampparsefailure\"\n      }\n      mutate {\n        remove_field => [\"event\", \"message\"]\n      }\n    }\n\n    output {\n      if [kafka_topic] == \"insight-event\" {\n        elasticsearch {\n          hosts => [\"https://172.30.120.201:32427\"] # elasticsearch \u5730\u5740\n          user => 'elastic'                         # elasticsearch \u7528\u6237\u540d\n          ssl => 'true'\n          password => '0OWj4D54GTH3xK06f9Gg01Zk'    # elasticsearch \u5bc6\u7801\n          ssl_certificate_verification => 'false'\n          action => \"create\"\n          index => \"insight-es-k8s-event-logs-alias\"\n          data_stream => \"false\"\n        }\n      }\n    }\n  insight-gw-skoala.conf: |\n    input {\n      kafka {\n        add_field => {\"kafka_topic\" => \"insight-gw-skoala\"}\n        topics => [\"insight-gw-skoala\"]         \n        bootstrap_servers => \"172.30.120.189:32082\"\n        enable_auto_commit => true\n        consumer_threads => 1\n        decorate_events => true\n        codec => \"plain\"\n      }\n    }\n\n    filter {\n      mutate { gsub => [ \"message\", \"@timestamp\", \"_@timestamp\"] }\n      json {source => \"message\"}\n      date {\n        match => [ \"_@timestamp\", \"UNIX\" ]\n        remove_field => \"_@timestamp\"\n        remove_tag => \"_timestampparsefailure\"\n      }\n      mutate {\n        remove_field => [\"event\", \"message\"]\n      }\n    }\n\n    output {\n      if [kafka_topic] == \"insight-gw-skoala\" {\n        elasticsearch {\n          hosts => [\"https://172.30.120.201:32427\"]\n          user => 'elastic'\n          ssl => 'true'\n          password => '0OWj4D54GTH3xK06f9Gg01Zk'\n          ssl_certificate_verification => 'false'\n          action => \"create\"\n          index => \"skoala-gw-alias\"\n          data_stream => \"false\"\n        }\n      }\n    }\n  insight-logs.conf: |\n    input {\n      kafka {\n        add_field => {\"kafka_topic\" => \"insight-logs\"}\n        topics => [\"insight-logs\"]         \n        bootstrap_servers => \"172.30.120.189:32082\"   \n        enable_auto_commit => true\n        consumer_threads => 1\n        decorate_events => true\n        codec => \"plain\"\n      }\n    }\n\n    filter {\n      mutate { gsub => [ \"message\", \"@timestamp\", \"_@timestamp\"] }\n      json {source => \"message\"}\n      date {\n        match => [ \"_@timestamp\", \"UNIX\" ]\n        remove_field => \"_@timestamp\"\n        remove_tag => \"_timestampparsefailure\"\n      }\n      mutate {\n        remove_field => [\"event\", \"message\"]\n      }\n    }\n\n    output {\n      if [kafka_topic] == \"insight-logs\" {\n        elasticsearch {\n          hosts => [\"https://172.30.120.201:32427\"]\n          user => 'elastic'\n          ssl => 'true'\n          password => '0OWj4D54GTH3xK06f9Gg01Zk'\n          ssl_certificate_verification => 'false'\n          action => \"create\"\n          index => \"insight-es-k8s-logs-alias\"\n          data_stream => \"false\"\n        }\n      }\n    }\n
                                                                          "},{"location":"en/admin/insight/best-practice/insight-kafka.html#consumption-with-vector","title":"Consumption with Vector","text":"

                                                                          If you are familiar with the Vector technology stack, you can continue using this approach.

                                                                          When deploying Vector via Helm, you can reference a ConfigMap with the following rules:

                                                                          metadata:\n  name: vector\napiVersion: v1\ndata:\n  aggregator.yaml: |\n    api:\n      enabled: true\n      address: '0.0.0.0:8686'\n    sources:\n      insight_logs_kafka:\n        type: kafka\n        bootstrap_servers: 'insight-kafka.insight-system.svc.cluster.local:9092'\n        group_id: consumer-group-insight\n        topics:\n          - insight-logs\n      insight_event_kafka:\n        type: kafka\n        bootstrap_servers: 'insight-kafka.insight-system.svc.cluster.local:9092'\n        group_id: consumer-group-insight\n        topics:\n          - insight-event\n      insight_gw_skoala_kafka:\n        type: kafka\n        bootstrap_servers: 'insight-kafka.insight-system.svc.cluster.local:9092'\n        group_id: consumer-group-insight\n        topics:\n          - insight-gw-skoala\n    transforms:\n      insight_logs_remap:\n        type: remap\n        inputs:\n          - insight_logs_kafka\n        source: |2\n              . = parse_json!(string!(.message))\n              .@timestamp = now()\n      insight_event_kafka_remap:\n        type: remap\n        inputs:\n          - insight_event_kafka\n          - insight_gw_skoala_kafka\n        source: |2\n              . = parse_json!(string!(.message))\n              .@timestamp = now()\n      insight_gw_skoala_kafka_remap:\n        type: remap\n        inputs:\n          - insight_gw_skoala_kafka\n        source: |2\n              . = parse_json!(string!(.message))\n              .@timestamp = now()\n    sinks:\n      insight_es_logs:\n        type: elasticsearch\n        inputs:\n          - insight_logs_remap\n        api_version: auto\n        auth:\n          strategy: basic\n          user: elastic\n          password: 8QZJ656ax3TXZqQh205l3Ee0\n        bulk:\n          index: insight-es-k8s-logs-alias-1418\n        endpoints:\n          - 'https://mcamel-common-es-cluster-es-http.mcamel-system:9200'\n        tls:\n          verify_certificate: false\n          verify_hostname: false\n      insight_es_event:\n        type: elasticsearch\n        inputs:\n          - insight_event_kafka_remap\n        api_version: auto\n        auth:\n          strategy: basic\n          user: elastic\n          password: 8QZJ656ax3TXZqQh205l3Ee0\n        bulk:\n          index: insight-es-k8s-event-logs-alias-1418\n        endpoints:\n          - 'https://mcamel-common-es-cluster-es-http.mcamel-system:9200'\n        tls:\n          verify_certificate: false\n          verify_hostname: false\n      insight_es_gw_skoala:\n        type: elasticsearch\n        inputs:\n          - insight_gw_skoala_kafka_remap\n        api_version: auto\n        auth:\n          strategy: basic\n          user: elastic\n          password: 8QZJ656ax3TXZqQh205l3Ee0\n        bulk:\n          index: skoala-gw-alias-1418\n        endpoints:\n          - 'https://mcamel-common-es-cluster-es-http.mcamel-system:9200'\n        tls:\n          verify_certificate: false\n          verify_hostname: false\n
                                                                          "},{"location":"en/admin/insight/best-practice/insight-kafka.html#checking-if-its-working-properly","title":"Checking if it's Working Properly","text":"

                                                                          You can verify if the configuration is successful by checking if there are new data in the Insight log query interface or observing an increase in the number of indices in Elasticsearch.

                                                                          "},{"location":"en/admin/insight/best-practice/insight-kafka.html#references","title":"References","text":"
                                                                          • Logstash Helm Chart
                                                                          • Vector Helm Chart
                                                                          • Vector Practices
                                                                          • Vector Perfomance
                                                                          "},{"location":"en/admin/insight/best-practice/integration_deepflow.html","title":"Integrate DeepFlow","text":"

                                                                          DeepFlow is an observability product based on eBPF. Its community edition has been integrated into Insight. The following is the integration process.

                                                                          "},{"location":"en/admin/insight/best-practice/integration_deepflow.html#prerequisites","title":"Prerequisites","text":"
                                                                          • Your global service cluster has installed Insight
                                                                          • Insight minimum version requirement is v0.23.0
                                                                          • Understand and meet the DeepFlow runtime permissions and kernel requirements
                                                                          • Storage volume is ready
                                                                          "},{"location":"en/admin/insight/best-practice/integration_deepflow.html#install-deepflow-and-configure-insight","title":"Install DeepFlow and Configure Insight","text":"

                                                                          Installing DeepFlow components requires two charts:

                                                                          • deepflow: includes components such as deepflow-app, deepflow-server, deepflow-clickhouse, and deepflow-agent. Generally, deepflow is deployed in the global service cluster, so it also installs deepflow-agent together.
                                                                          • deepflow-agent: only includes the deepflow-agent component, used to collect eBPF data and send it to deepflow-server.
                                                                          "},{"location":"en/admin/insight/best-practice/integration_deepflow.html#install-deepflow","title":"Install DeepFlow","text":"

                                                                          DeepFlow needs to be installed in the global service cluster.

                                                                          1. Go to the kpanda-global-cluster cluster and click Helm Apps -> Helm Charts in the left navigation bar, select community as the repository, and search for deepflow in the search box:

                                                                          2. Click the deepflow card to enter the details page:

                                                                          3. Click Install to enter the installation page:

                                                                          4. Most of the values have default values. Clickhouse and Mysql require applying storage volumes, and their default sizes are 10Gi. You can search for relevant configurations and modify them using the persistence keyword.

                                                                          5. After configuring, click OK to start the installation.

                                                                          "},{"location":"en/admin/insight/best-practice/integration_deepflow.html#configure-insight","title":"Configure Insight","text":"

                                                                          After installing DeepFlow, you also need to enable the related feature switches in Insight.

                                                                          1. Click ConfigMps & Keys -> ConfigMaps in the left navigation bar, search for insight-server-config in the search box, and edit it:

                                                                          2. In the YAML, find the eBPF Flow feature switch and enable it:

                                                                          3. Save the changes and restart insight-server. The Insight main page will display Network Observability :

                                                                          "},{"location":"en/admin/insight/best-practice/integration_deepflow.html#install-deepflow-agent","title":"Install DeepFlow Agent","text":"

                                                                          DeepFlow Agent is installed in the sub-cluster using the deepflow-agent chart. It is used to collect eBPF observability data from the sub-cluster and report it to the global service cluster. Similar to installing deepflow, go to Helm Apps -> Helm Charts, select community as the repository, and search for deepflow-agent in the search box. Follow the process to enter the installation page.

                                                                          Parameter Explanation:

                                                                          • DeployComponent : deployment mode, default is daemonset.
                                                                          • timezone : timezone, default is Asia/Shanghai.
                                                                          • DeepflowServerNodeIPS : addresses of the nodes where deepflow server is installed.
                                                                          • deepflowK8sClusterID : cluster UUID.
                                                                          • agentGroupID : agent group ID.
                                                                          • controllerPort : data reporting port of deepflow server, can be left blank, default is 30035.
                                                                          • clusterNAME : cluster name.

                                                                          After configuring, click OK to complete the installation.

                                                                          "},{"location":"en/admin/insight/best-practice/integration_deepflow.html#usage","title":"Usage","text":"

                                                                          After correctly installing DeepFlow, click Network Observability to enter the DeepFlow Grafana UI. It contains a large number of dashboards for viewing and helping analyze issues. Click DeepFlow Templates to browse all available dashboards:

                                                                          "},{"location":"en/admin/insight/best-practice/sw-to-otel.html","title":"Simplifying Trace Data Integration with OpenTelemetry and SkyWalking","text":"

                                                                          This article explains how to seamlessly integrate trace data from SkyWalking into the Insight platform, using OpenTelemetry. With zero code modification required, you can transform your existing SkyWalking trace data and leverage Insight's capabilities.

                                                                          "},{"location":"en/admin/insight/best-practice/sw-to-otel.html#understanding-the-code","title":"Understanding the Code","text":"

                                                                          To ensure compatibility with different distributed tracing implementations, OpenTelemetry provides a way to incorporate components that standardize data processing and output to various backends. While Jaeger and Zipkin are already available, we have contributed the SkyWalkingReceiver to the OpenTelemetry community. This receiver has been refined and is now suitable for use in production environments without any modifications to your application's code.

                                                                          Although SkyWalking and OpenTelemetry share similarities, such as using Trace to define a trace and Span to mark the smallest granularity, there are differences in certain details and implementations:

                                                                          SkyWalking OpenTelemetry Data Structure Span -> Segment -> Trace Span -> Trace Attribute Information Tags Attributes Application Time Logs Events Reference Relationship References Links

                                                                          Now, let's discuss the steps involved in converting SkyWalking Trace to OpenTelemetry Trace. The main tasks include:

                                                                          1. Constructing OpenTelemetry's TraceId and SpanId

                                                                          2. Constructing OpenTelemetry's ParentSpanId

                                                                          3. Retaining SkyWalking's original TraceId, SegmentId, and SpanId in OpenTelemetry Spans

                                                                          First, let's look at how to construct the TraceId and SpanId for OpenTelemetry. Both SkyWalking and OpenTelemetry use TraceId to connect distributed service calls and use SpanId to mark each Span, but there are significant differences in the implementation specifications:

                                                                          Info

                                                                          View GitHub for code implementation\uff1a

                                                                          1. Skywalking Receiver
                                                                          2. PR: Create skywalking component folder/structure
                                                                          3. PR: add Skywalking tracing receiver impl

                                                                          Specifically, the possible formats for SkyWalking TraceId and SegmentId are as follows:

                                                                          In the OpenTelemetry protocol, a Span is unique across all Traces, while in SkyWalking, a Span is only unique within each Segment. This means that to uniquely identify a Span in SkyWalking, it is necessary to combine the SegmentId and SpanId, and convert it to the SpanId in OpenTelemetry.

                                                                          Info

                                                                          View GitHub for code implementation\uff1a

                                                                          1. Skywalking Receiver
                                                                          2. PR: Fix skywalking traceid and spanid convertion

                                                                          Next, let's see how to construct the ParentSpanId for OpenTelemetry. Within a Segment, the ParentSpanId field in SkyWalking can be directly used to construct the ParentSpanId field in OpenTelemetry. However, when a Trace spans multiple Segments, SkyWalking uses the association information represented by ParentTraceSegmentId and ParentSpanId in the Reference. In this case, the ParentSpanId in OpenTelemetry needs to be constructed using the information in the Reference.

                                                                          Code implementation can be found on GitHub: Skywalking Receiver

                                                                          Finally, let's see how to preserve the original TraceId, SegmentId, and SpanId from SkyWalking in the OpenTelemetry Span. We carry these original information to associate the OpenTelemetry TraceId and SpanId displayed in the distributed tracing backend with the SkyWalking TraceId, SegmentId, and SpanId in the application logs. We choose to carry the original TraceId, SegmentId, and ParentSegmentId from SkyWalking to the OpenTelemetry Attributes.

                                                                          Info

                                                                          View GitHub for code implementation\uff1a

                                                                          1. Skywalking Receiver
                                                                          2. Add extra link attributes from skywalking ref

                                                                          After this series of conversions, we have fully transformed the SkyWalking Segment Object into an OpenTelemetry Trace, as shown in the following diagram:

                                                                          "},{"location":"en/admin/insight/best-practice/sw-to-otel.html#deploying-the-demo","title":"Deploying the Demo","text":"

                                                                          To demonstrate the complete process of collecting and displaying SkyWalking tracing data using OpenTelemetry, we will use a demo application.

                                                                          First, deploy the OpenTelemetry Agent and enable the following configuration to ensure compatibility with the SkyWalking protocol:

                                                                          # otel-agent config\nreceivers:\n  skywalking:\n    protocols:\n      grpc:\n        endpoint: 0.0.0.0:11800 # Receive trace data reported by the SkyWalking Agent\n      http: \n        endpoint: 0.0.0.0:12800 # Receive trace data reported from the front-end / nginx or other HTTP protocols\nservice: \n  pipelines: \n    traces:      \n      receivers: [skywalking]\n\n# otel-agent service yaml\nspec:\n  ports: \n    - name: sw-http\n      port: 12800    \n      protocol: TCP    \n      targetPort: 12800 \n    - name: sw-grpc     \n      port: 11800 \n      protocol: TCP  \n      targetPort: 11800\n

                                                                          Next, modify the connection of your business application from the SkyWalking OAP Service (e.g., oap:11800) to the OpenTelemetry Agent Service (e.g., otel-agent:11800). This will allow you to start receiving trace data from the SkyWalking probe using OpenTelemetry.

                                                                          To demonstrate the entire process, we will use the SkyWalking-showcase Demo. This demo utilizes the SkyWalking Agent for tracing, and after being processed by OpenTelemetry, the final results are presented using Jaeger:

                                                                          From the architecture diagram of the SkyWalking Showcase, we can observe that the data remains intact even after standardization by OpenTelemetry. In this trace, the request starts from app/homepage, then two requests /rcmd and /songs/top are initiated simultaneously within the app, distributed to the recommendation and songs services, and finally reach the database for querying, completing the entire request chain.

                                                                          Additionally, you can view the original SkyWalking Id information on the Jaeger page, which facilitates correlation with application logs:

                                                                          By following these steps, you can seamlessly integrate SkyWalking trace data into OpenTelemetry and leverage the capabilities of the Insight platform.

                                                                          "},{"location":"en/admin/insight/best-practice/tail-based-sampling.html","title":"About Trace Sampling and Configuration","text":"

                                                                          Using distributed tracing, you can observe how requests flow through various systems in a distributed system. Undeniably, it is very useful for understanding service connections, diagnosing latency issues, and providing many other benefits.

                                                                          However, if most of your requests are successful and there are no unacceptable delays or errors, do you really need all this data? Therefore, you only need to achieve the right insights through appropriate data sampling rather than a large amount or complete data.

                                                                          The idea behind sampling is to control the traces sent to the observability collector, thereby reducing collection costs. Different organizations have different reasons for sampling, including why they want to sample and what types of data they wish to sample. Therefore, we need to customize the sampling strategy:

                                                                          • Cost Management: If a large amount of telemetry data needs to be stored, it incurs higher computational and storage costs.
                                                                          • Focus on Interesting Traces: Different organizations prioritize different data types.
                                                                          • Filter Out Noise: For example, you may want to filter out health checks.

                                                                          It is important to use consistent terminology when discussing sampling. A Trace or Span is considered sampled or unsampled:

                                                                          • Sampled: A Trace or Span that is processed and stored. It is chosen by the sampler to represent the overall data, so it is considered sampled.
                                                                          • Unsampled: A Trace or Span that is not processed or stored. Because it was not selected by the sampler, it is considered unsampled.
                                                                          "},{"location":"en/admin/insight/best-practice/tail-based-sampling.html#what-are-the-sampling-options","title":"What Are the Sampling Options?","text":""},{"location":"en/admin/insight/best-practice/tail-based-sampling.html#head-sampling","title":"Head Sampling","text":"

                                                                          Head sampling is a sampling technique used to make a sampling decision as early as possible. A decision to sample or drop a span or trace is not made by inspecting the trace as a whole.

                                                                          For example, the most common form of head sampling is Consistent Probability Sampling. This is also be referred to as Deterministic Sampling. In this case, a sampling decision is made based on the trace ID and the desired percentage of traces to sample. This ensures that whole traces are sampled - no missing spans - at a consistent rate, such as 5% of all traces.

                                                                          The upsides to head sampling are: - Easy to understand - Easy to configure - Efficient - Can be done at any point in the trace collection pipeline

                                                                          The primary downside to head sampling is that it is not possible to make a sampling decision based on data in the entire trace. This means that while head sampling is effective as a blunt instrument, but it is completely insufficient for sampling strategies that must consider information from the entire system. For example, you cannot ensure that all traces with an error within them are sampled with head sampling alone. For this situation and many others, you need tail sampling.

                                                                          "},{"location":"en/admin/insight/best-practice/tail-based-sampling.html#tail-sampling-recommended","title":"Tail Sampling (Recommended)","text":"

                                                                          Tail sampling is where the decision to sample a trace takes place by considering all or most of the spans within the trace. Tail Sampling gives you the option to sample your traces based on specific criteria derived from different parts of a trace, which isn\u2019t an option with Head Sampling.

                                                                          Some examples of how to use tail sampling include:

                                                                          • Always sampling traces that contain an error
                                                                          • Sampling traces based on overall latency
                                                                          • Sampling traces based on the presence or value of specific attributes on one or more spans in a trace; for example, sampling more traces originating from a newly deployed service
                                                                          • Applying different sampling rates to traces based on certain criteria, such as when traces only come from low-volume services versus traces with high-volume services

                                                                          As you can see, tail sampling allows for a much higher degree of sophistication in how you sample data. For larger systems that must sample telemetry, it is almost always necessary to use Tail Sampling to balance data volume with the usefulness of that data.

                                                                          There are three primary downsides to tail sampling today:

                                                                          • Tail sampling can be difficult to implement. Depending on the kind of sampling techniques available to you, it is not always a \u201cset and forget\u201d kind of thing. As your systems change, so too will your sampling strategies. For a large and sophisticated distributed system, rules that implement sampling strategies can also be large and sophisticated.
                                                                          • Tail sampling can be difficult to operate. The component(s) that implement tail sampling must be stateful systems that can accept and store a large amount of data. Depending on traffic patterns, this can require dozens or even hundreds of compute nodes that all utilize resources differently. Furthermore, a tail sampler might need to \u201cfall back\u201d to less computationally intensive sampling techniques if it is unable to keep up with the volume of data it is receiving. Because of these factors, it is critical to monitor tail-sampling components to ensure that they have the resources they need to make the correct sampling decisions.
                                                                          • Tail samplers often end up as vendor-specific technology today. If you\u2019re using a paid vendor for Observability, the most effective tail sampling options available to you might be limited to what the vendor offers.

                                                                          Finally, for some systems, tail sampling might be used in conjunction with Head Sampling. For example, a set of services that produce an extremely high volume of trace data might first use head sampling to sample only a small percentage of traces, and then later in the telemetry pipeline use tail sampling to make more sophisticated sampling decisions before exporting to a backend. This is often done in the interest of protecting the telemetry pipeline from being overloaded.

                                                                          Insight currently recommends using tail sampling and prioritizes support for tail sampling.

                                                                          The tail sampling processor samples traces based on a defined set of strategies. However, all spans of a trace must be received by the same collector instance to make effective sampling decisions.

                                                                          Therefore, adjustments need to be made to the Global OpenTelemetry Collector architecture of Insight to implement the tail sampling strategy.

                                                                          "},{"location":"en/admin/insight/best-practice/tail-based-sampling.html#specific-changes-to-insight","title":"Specific Changes to Insight","text":"

                                                                          Introduce an Opentelemetry Collector Gateway component with load balancing capabilities in front of the insight-opentelemetry-collector in the Global cluster, allowing the same group of Traces to be routed to the same Opentelemetry Collector instance based on the TraceID.

                                                                          1. Deploy an OTEL COL Gateway component with load balancing capabilities.

                                                                            If you are using Insight V0.25.x, you can quickly enable this by using the Helm Upgrade parameter --set opentelemetry-collector-gateway.enabled=true, thereby skipping the deployment process described below.

                                                                            Refer to the following YAML to deploy the component.

                                                                            Click to view deployment configuration
                                                                            kind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: insight-otel-collector-gateway\nrules:\n- apiGroups: [\"\"]\n  resources: [\"endpoints\"]\n  verbs: [\"get\", \"watch\", \"list\"]\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: insight-otel-collector-gateway\n  namespace: insight-system\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: insight-otel-collector-gateway\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: insight-otel-collector-gateway\nsubjects:\n- kind: ServiceAccount\n  name: insight-otel-collector-gateway\n  namespace: insight-system\n---\nkind: ConfigMap\nmetadata:\n  labels:\n    app.kubernetes.io/component: opentelemetry-collector\n    app.kubernetes.io/instance: insight-otel-collector-gateway\n    app.kubernetes.io/name: insight-otel-collector-gateway\n  name: insight-otel-collector-gateway-collector\n  namespace: insight-system\napiVersion: v1\ndata:\n  collector.yaml: |\n    receivers:\n      otlp:\n        protocols:\n          grpc:\n          http:\n      jaeger:\n        protocols:\n          grpc:\n    processors:\n\n    extensions:\n      health_check:\n      pprof:\n        endpoint: :1888\n      zpages:\n        endpoint: :55679\n    exporters:\n      logging:\n      loadbalancing:\n        routing_key: \"traceID\"\n        protocol:\n          otlp:\n            # all options from the OTLP exporter are supported\n            # except the endpoint\n            timeout: 1s\n            tls:\n              insecure: true\n        resolver:\n          k8s:\n            service: insight-opentelemetry-collector\n            ports:\n              - 4317\n    service:\n      extensions: [pprof, zpages, health_check]\n      pipelines:\n        traces:\n          receivers: [otlp, jaeger]\n          exporters: [loadbalancing]\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app.kubernetes.io/component: opentelemetry-collector\n    app.kubernetes.io/instance: insight-otel-collector-gateway\n    app.kubernetes.io/name: insight-otel-collector-gateway\n  name: insight-otel-collector-gateway\n  namespace: insight-system\nspec:\n  replicas: 2\n  selector:\n    matchLabels:\n      app.kubernetes.io/component: opentelemetry-collector\n      app.kubernetes.io/instance: insight-otel-collector-gateway\n      app.kubernetes.io/name: insight-otel-collector-gateway\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/component: opentelemetry-collector\n        app.kubernetes.io/instance: insight-otel-collector-gateway\n        app.kubernetes.io/name: insight-otel-collector-gateway\n    spec:\n      containers:\n      - args:\n        - --config=/conf/collector.yaml\n        env:\n        - name: POD_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.name\n        image: ghcr.m.daocloud.io/openinsight-proj/opentelemetry-collector-contrib:5baef686672cfe5551e03b5c19d3072c432b6f33\n        imagePullPolicy: IfNotPresent\n        livenessProbe:\n          failureThreshold: 3\n          httpGet:\n            path: /\n            port: 13133\n            scheme: HTTP\n          periodSeconds: 10\n          successThreshold: 1\n          timeoutSeconds: 1\n        name: otc-container\n        resources:\n          limits:\n            cpu: '1'\n            memory: 2Gi\n          requests:\n            cpu: 100m\n            memory: 400Mi\n        ports:\n        - containerPort: 14250\n          name: jaeger-grpc\n          protocol: TCP\n        - containerPort: 8888\n          name: metrics\n          protocol: TCP\n        - containerPort: 4317\n          name: otlp-grpc\n          protocol: TCP\n        - containerPort: 4318\n          name: otlp-http\n          protocol: TCP\n        - containerPort: 55679\n          name: zpages\n          protocol: TCP\n\n        volumeMounts:\n        - mountPath: /conf\n          name: otc-internal\n\n      serviceAccount: insight-otel-collector-gateway\n      serviceAccountName: insight-otel-collector-gateway\n      volumes:\n      - configMap:\n          defaultMode: 420\n          items:\n          - key: collector.yaml\n            path: collector.yaml\n          name: insight-otel-collector-gateway-collector\n        name: otc-internal\n---\nkind: Service\napiVersion: v1\nmetadata:\n  name: insight-opentelemetry-collector-gateway\n  namespace: insight-system\n  labels:\n    app.kubernetes.io/component: opentelemetry-collector\n    app.kubernetes.io/instance: insight-otel-collector-gateway\n    app.kubernetes.io/name: insight-otel-collector-gateway\nspec:\n  ports:\n    - name: fluentforward\n      protocol: TCP\n      port: 8006\n      targetPort: 8006\n    - name: jaeger-compact\n      protocol: UDP\n      port: 6831\n      targetPort: 6831\n    - name: jaeger-grpc\n      protocol: TCP\n      port: 14250\n      targetPort: 14250\n    - name: jaeger-thrift\n      protocol: TCP\n      port: 14268\n      targetPort: 14268\n    - name: metrics\n      protocol: TCP\n      port: 8888\n      targetPort: 8888\n    - name: otlp\n      protocol: TCP\n      appProtocol: grpc\n      port: 4317\n      targetPort: 4317\n    - name: otlp-http\n      protocol: TCP\n      port: 4318\n      targetPort: 4318\n    - name: zipkin\n      protocol: TCP\n      port: 9411\n      targetPort: 9411\n    - name: zpages\n      protocol: TCP\n      port: 55679\n      targetPort: 55679\n  selector:\n    app.kubernetes.io/component: opentelemetry-collector\n    app.kubernetes.io/instance: insight-otel-collector-gateway\n    app.kubernetes.io/name: insight-otel-collector-gateway\n
                                                                          2. Configure Tail Sampling Rules

                                                                            Note

                                                                            Tail sampling rules need to be added to the existing insight-otel-collector-config configmap configuration group.

                                                                          3. Add the following content in the processor section, and adjust the specific rules as needed, refer to the OTel official example.

                                                                            ........\ntail_sampling:\n  decision_wait: 10s # Wait for 10 seconds, traces older than 10 seconds will no longer be processed\n  num_traces: 1500000  # Number of traces saved in memory, assuming 1000 traces per second, should not be less than 1000 * decision_wait * 2;\n                       # Setting it too large may consume too much memory resources, setting it too small may cause some traces to be dropped\n  expected_new_traces_per_sec: 10\n  policies: # Reporting policies\n    [\n        {\n          name: latency-policy,\n          type: latency,  # Report traces that exceed 500ms\n          latency: {threshold_ms: 500}\n        },\n        {\n          name: status_code-policy,\n          type: status_code,  # Report traces with ERROR status code\n          status_code: {status_codes: [ ERROR ]}\n        }\n    ]\n......\ntail_sampling: # Composite sampling\n  decision_wait: 10s # Wait for 10 seconds, traces older than 10 seconds will no longer be processed\n  num_traces: 1500000  # Number of traces saved in memory, assuming 1000 traces per second, should not be less than 1000 * decision_wait * 2;\n                       # Setting it too large may consume too much memory resources, setting it too small may cause some traces to be dropped\n  expected_new_traces_per_sec: 10\n  policies: [\n      {\n        name: debug-worker-cluster-sample-policy,\n        type: and,\n        and:\n          {\n            and_sub_policy:\n              [\n                {\n                  name: service-name-policy,\n                  type: string_attribute,\n                  string_attribute:\n                    { key: k8s.cluster.id, values: [xxxxxxx] },\n                },\n                {\n                  name: trace-status-policy,\n                  type: status_code,\n                  status_code: { status_codes: [ERROR] },\n                },\n                {\n                  name: probabilistic-policy,\n                  type: probabilistic,\n                  probabilistic: { sampling_percentage: 1 },\n                }\n              ]\n          }\n      }\n    ]\n
                                                                          4. Activate the processor in the otel col pipeline within the insight-otel-collector-config configmap:

                                                                            traces:\n  exporters:\n    - servicegraph\n    - otlp/jaeger\n  processors:\n    - memory_limiter\n    - tail_sampling # \ud83d\udc48\n    - batch\n  receivers:\n    - otlp\n
                                                                          5. Restart the insight-opentelemetry-collector component.

                                                                          6. When deploying the insight-agent, modify the reporting address of the link data to the 4317 port address of the otel-col LB.

                                                                            ....\n    exporters:\n      otlp/global:\n        endpoint: insight-opentelemetry-collector-lb.insight-system.svc.cluster.local:4317  # \ud83d\udc48 Modify to lb address\n
                                                                          "},{"location":"en/admin/insight/best-practice/tail-based-sampling.html#reference","title":"Reference","text":"
                                                                          • sampling
                                                                          "},{"location":"en/admin/insight/collection-manag/agent-status.html","title":"insight-agent Component Status Explanation","text":"

                                                                          In AI platform, Insight acts as a multi-cluster observability product. To achieve unified data collection across multiple clusters, users need to install the Helm App insight-agent (installed by default in the insight-system namespace). Refer to How to Install insight-agent .

                                                                          "},{"location":"en/admin/insight/collection-manag/agent-status.html#status-explanation","title":"Status Explanation","text":"

                                                                          In the \"Observability\" -> \"Collection Management\" section, you can view the installation status of insight-agent in each cluster.

                                                                          • Not Installed : insight-agent is not installed in the insight-system namespace of the cluster.
                                                                          • Running : insight-agent is successfully installed in the cluster, and all deployed components are running.
                                                                          • Error : If insight-agent is in this state, it indicates that the helm deployment failed or there are components deployed that are not in a running state.

                                                                          You can troubleshoot using the following steps:

                                                                          1. Run the following command. If the status is deployed , proceed to the next step. If it is failed , it is recommended to uninstall and reinstall it from Container Management -> Helm Apps as it may affect application upgrades:

                                                                            helm list -n insight-system\n
                                                                          2. Run the following command or check the status of the deployed components in Insight -> Data Collection . If there are Pods not in the Running state, restart the containers in an abnormal state.

                                                                            kubectl get pods -n insight-system\n
                                                                          "},{"location":"en/admin/insight/collection-manag/agent-status.html#additional-notes","title":"Additional Notes","text":"
                                                                          1. The resource consumption of the Prometheus metric collection component in insight-agent is directly proportional to the number of Pods running in the cluster. Please adjust the resources for Prometheus according to the cluster size. Refer to Prometheus Resource Planning.

                                                                          2. The storage capacity of the vmstorage metric storage component in the global service cluster is directly proportional to the total number of Pods in the clusters.

                                                                            • Please contact the platform administrator to adjust the disk capacity of vmstorage based on the cluster size. Refer to vmstorage Disk Capacity Planning.
                                                                            • Adjust vmstorage disk based on multi-cluster scale. Refer to vmstorge Disk Expansion.
                                                                          "},{"location":"en/admin/insight/collection-manag/collection-manag.html","title":"Data Collection","text":"

                                                                          Data Collection is mainly to centrally manage and display the entrance of the cluster installation collection plug-in insight-agent , which helps users quickly view the health status of the cluster collection plug-in, and provides a quick entry to configure collection rules.

                                                                          The specific operation steps are as follows:

                                                                          1. Click in the upper left corner and select Insight -> Data Collection .

                                                                          2. You can view the status of all cluster collection plug-ins.

                                                                          3. When the cluster is connected to insight-agent and is running, click a cluster name to enter the details\u3002

                                                                          4. In the Service Monitor tab, click the shortcut link to jump to Container Management -> CRD to add service discovery rules.

                                                                          "},{"location":"en/admin/insight/collection-manag/metric-collect.html","title":"Metrics Retrieval Methods","text":"

                                                                          Prometheus primarily uses the Pull approach to retrieve monitoring metrics from target services' exposed endpoints. Therefore, it requires configuring proper scraping jobs to request monitoring data and write it into the storage provided by Prometheus. Currently, Prometheus offers several configurations for these jobs:

                                                                          • Native Job Configuration: This provides native Prometheus job configuration for scraping.
                                                                          • Pod Monitor: In the Kubernetes ecosystem, it allows scraping of monitoring data from Pods using Prometheus Operator.
                                                                          • Service Monitor: In the Kubernetes ecosystem, it allows scraping monitoring data from Endpoints of Services using Prometheus Operator.

                                                                          Note

                                                                          [ ] indicates optional configmaps.

                                                                          "},{"location":"en/admin/insight/collection-manag/metric-collect.html#native-job-configuration","title":"Native Job Configuration","text":"

                                                                          The proper configmaps are explained as follows:

                                                                          # Name of the scraping job, also adds a label (job=job_name) to the scraped metrics\njob_name: <job_name>\n\n# Time interval between scrapes\n[ scrape_interval: <duration> | default = <global_config.scrape_interval> ]\n\n# Timeout for scrape requests\n[ scrape_timeout: <duration> | default = <global_config.scrape_timeout> ]\n\n# URI path for the scrape request\n[ metrics_path: <path> | default = /metrics ]\n\n# Handling of label conflicts between scraped labels and labels added by the backend Prometheus.\n# true: Retains the scraped labels and ignores conflicting labels from the backend Prometheus.\n# false: Adds an \"exported_<original-label>\" prefix to the scraped labels and includes the additional labels added by the backend Prometheus.\n[ honor_labels: <boolean> | default = false ]\n\n# Whether to use the timestamp generated by the target being scraped.\n# true: Uses the timestamp from the target if available.\n# false: Ignores the timestamp from the target.\n[ honor_timestamps: <boolean> | default = true ]\n\n# Protocol for the scrape request: http or https\n[ scheme: <scheme> | default = http ]\n\n# URL parameters for the scrape request\nparams:\n  [ <string>: [<string>, ...] ]\n\n# Set the value of the `Authorization` header in the scrape request through basic authentication. password/password_file are mutually exclusive, with password_file taking precedence.\nbasic_auth:\n  [ username: <string> ]\n  [ password: <secret> ]\n  [ password_file: <string> ]\n\n# Set the value of the `Authorization` header in the scrape request through bearer token authentication. bearer_token/bearer_token_file are mutually exclusive, with bearer_token taking precedence.\n[ bearer_token: <secret> ]\n\n# Set the value of the `Authorization` header in the scrape request through bearer token authentication. bearer_token/bearer_token_file are mutually exclusive, with bearer_token taking precedence.\n[ bearer_token_file: <filename> ]\n\n# Whether the scrape connection should use a TLS secure channel, configure the proper TLS parameters\ntls_config:\n  [ <tls_config> ]\n\n# Use a proxy service to scrape the metrics from the target, specify the address of the proxy service.\n[ proxy_url: <string> ]\n\n# Specify the targets using static configuration, see explanation below.\nstatic_configs:\n  [ - <static_config> ... ]\n\n# CVM service discovery configuration, see explanation below.\ncvm_sd_configs:\n  [ - <cvm_sd_config> ... ]\n\n# After scraping the data, rewrite the labels of the proper target using the relabel mechanism. Executes multiple relabel rules in order.\n# See explanation below for relabel_config.\nrelabel_configs:\n  [ - <relabel_config> ... ]\n\n# Before writing the scraped data, rewrite the values of the labels using the relabel mechanism. Executes multiple relabel rules in order.\n# See explanation below for relabel_config.\nmetric_relabel_configs:\n  [ - <relabel_config> ... ]\n\n# Limit the number of data points per scrape, 0: no limit, default is 0\n[ sample_limit: <int> | default = 0 ]\n\n# Limit the number of targets per scrape, 0: no limit, default is 0\n[ target_limit: <int> | default = 0 ]\n
                                                                          "},{"location":"en/admin/insight/collection-manag/metric-collect.html#pod-monitor","title":"Pod Monitor","text":"

                                                                          The explanation for the proper configmaps is as follows:

                                                                          # Prometheus Operator CRD version\napiVersion: monitoring.coreos.com/v1\n# proper Kubernetes resource type, here it is PodMonitor\nkind: PodMonitor\n# proper Kubernetes Metadata, only the name needs to be concerned. If jobLabel is not specified, the value of the job label in the scraped metrics will be <namespace>/<name>\nmetadata:\n  name: redis-exporter # Specify a unique name\n  namespace: cm-prometheus  # Fixed namespace, no need to modify\n# Describes the selection and configuration of the target Pods to be scraped\n  labels:\n    operator.insight.io/managed-by: insight # Label indicating managed by Insight\nspec:\n  # Specify the label of the proper Pod, pod monitor will use this value as the job label value.\n  # If viewing the Pod YAML, use the values in pod.metadata.labels.\n  # If viewing Deployment/Daemonset/Statefulset, use spec.template.metadata.labels.\n  [ jobLabel: string ]\n  # Adds the proper Pod's Labels to the Target's Labels\n  [ podTargetLabels: []string ]\n  # Limit the number of data points per scrape, 0: no limit, default is 0\n  [ sampleLimit: uint64 ]\n  # Limit the number of targets per scrape, 0: no limit, default is 0\n  [ targetLimit: uint64 ]\n  # Configure the Prometheus HTTP endpoints that need to be scraped and exposed. Multiple endpoints can be configured.\n  podMetricsEndpoints:\n  [ - <endpoint_config> ... ] # See explanation below for endpoint\n  # Select the namespaces where the monitored Pods are located. Leave it blank to select all namespaces.\n  [ namespaceSelector: ]\n    # Select all namespaces\n    [ any: bool ]\n    # Specify the list of namespaces to be selected\n    [ matchNames: []string ]\n  # Specify the Label values of the Pods to be monitored in order to locate the target Pods [K8S metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)\n  selector:\n    [ matchExpressions: array ]\n      [ example: - {key: tier, operator: In, values: [cache]} ]\n    [ matchLabels: object ]\n      [ example: k8s-app: redis-exporter ]\n
                                                                          "},{"location":"en/admin/insight/collection-manag/metric-collect.html#example-1","title":"Example 1","text":"
                                                                          apiVersion: monitoring.coreos.com/v1\nkind: PodMonitor\nmetadata:\n  name: redis-exporter # Specify a unique name\n  namespace: cm-prometheus # Fixed namespace, do not modify\n  labels:\n    operator.insight.io/managed-by: insight  # Label indicating managed by Insight, required.\nspec:\n  podMetricsEndpoints:\n    - interval: 30s\n      port: metric-port # Specify the Port Name proper to Prometheus Exporter in the pod YAML\n      path: /metrics # Specify the value of the Path proper to Prometheus Exporter, if not specified, default is /metrics\n      relabelings:\n        - action: replace\n          sourceLabels:\n            - instance\n          regex: (.*)\n          targetLabel: instance\n          replacement: \"crs-xxxxxx\" # Adjust to the proper Redis instance ID\n        - action: replace\n          sourceLabels:\n            - instance\n          regex: (.*)\n          targetLabel: ip\n          replacement: \"1.x.x.x\" # Adjust to the proper Redis instance IP\n  namespaceSelector: # Select the namespaces where the monitored Pods are located\n    matchNames:\n      - redis-test\n  selector: # Specify the Label values of the Pods to be monitored in order to locate the target pods\n    matchLabels:\n      k8s-app: redis-exporter\n
                                                                          "},{"location":"en/admin/insight/collection-manag/metric-collect.html#example-2","title":"Example 2","text":"
                                                                          job_name: prometheus\nscrape_interval: 30s\nstatic_configs:\n- targets:\n  - 127.0.0.1:9090\n
                                                                          "},{"location":"en/admin/insight/collection-manag/metric-collect.html#service-monitor","title":"Service Monitor","text":"

                                                                          The explanation for the proper configmaps is as follows:

                                                                          # Prometheus Operator CRD version\napiVersion: monitoring.coreos.com/v1\n# proper Kubernetes resource type, here it is ServiceMonitor\nkind: ServiceMonitor\n# proper Kubernetes Metadata, only the name needs to be concerned. If jobLabel is not specified, the value of the job label in the scraped metrics will be the name of the Service.\nmetadata:\n  name: redis-exporter # Specify a unique name\n  namespace: cm-prometheus  # Fixed namespace, no need to modify\n# Describes the selection and configuration of the target Pods to be scraped\n  labels:\n    operator.insight.io/managed-by: insight # Label indicating managed by Insight, required.\nspec:\n  # Specify the label(metadata/labels) of the proper Pod, service monitor will use this value as the job label value.\n  [ jobLabel: string ]\n  # Adds the Labels of the proper service to the Target's Labels\n  [ targetLabels: []string ]\n  # Adds the Labels of the proper Pod to the Target's Labels\n  [ podTargetLabels: []string ]\n  # Limit the number of data points per scrape, 0: no limit, default is 0\n  [ sampleLimit: uint64 ]\n  # Limit the number of targets per scrape, 0: no limit, default is 0\n  [ targetLimit: uint64 ]\n  # Configure the Prometheus HTTP endpoints that need to be scraped and exposed. Multiple endpoints can be configured.\n  endpoints:\n  [ - <endpoint_config> ... ] # See explanation below for endpoint\n  # Select the namespaces where the monitored Pods are located. Leave it blank to select all namespaces.\n  [ namespaceSelector: ]\n    # Select all namespaces\n    [ any: bool ]\n    # Specify the list of namespaces to be selected\n    [ matchNames: []string ]\n  # Specify the Label values of the Pods to be monitored in order to locate the target Pods [K8S metav1.LabelSelector](https://v1-17.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#labelselector-v1-meta)\n  selector:\n    [ matchExpressions: array ]\n      [ example: - {key: tier, operator: In, values: [cache]} ]\n    [ matchLabels: object ]\n      [ example: k8s-app: redis-exporter ]\n
                                                                          "},{"location":"en/admin/insight/collection-manag/metric-collect.html#example","title":"Example","text":"
                                                                          apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: go-demo # Specify a unique name\n  namespace: cm-prometheus # Fixed namespace, do not modify\n  labels:\n    operator.insight.io/managed-by: insight  # Label indicating managed by Insight, required.\nspec:\n  endpoints:\n    - interval: 30s\n      # Specify the Port Name proper to Prometheus Exporter in the service YAML\n      port: 8080-8080-tcp\n      # Specify the value of the Path proper to Prometheus Exporter, if not specified, default is /metrics\n      path: /metrics\n      relabelings:\n        # ** There must be a label named 'application', assuming there is a label named 'app' in k8s,\n        # we replace it with 'application' using the relabel 'replace' action\n        - action: replace\n          sourceLabels: [__meta_kubernetes_pod_label_app]\n          targetLabel: application\n  # Select the namespace where the monitored service is located\n  namespaceSelector:\n    matchNames:\n      - golang-demo\n  # Specify the Label values of the service to be monitored in order to locate the target service\n  selector:\n    matchLabels:\n      app: golang-app-demo\n
                                                                          "},{"location":"en/admin/insight/collection-manag/metric-collect.html#endpoint_config","title":"endpoint_config","text":"

                                                                          The explanation for the proper configmaps is as follows:

                                                                          # The name of the proper port. Please note that it's not the actual port number.\n# Default: 80. Possible values are as follows:\n# ServiceMonitor: corresponds to Service>spec/ports/name;\n# PodMonitor: explained as follows:\n#   If viewing the Pod YAML, take the value from pod.spec.containers.ports.name.\n#   If viewing Deployment/DaemonSet/StatefulSet, take the value from spec.template.spec.containers.ports.name.\n[ port: string | default = 80]\n# The URI path for the scrape request.\n[ path: string | default = /metrics ]\n# The protocol for the scrape: http or https.\n[ scheme: string | default = http]\n# URL parameters for the scrape request.\n[ params: map[string][]string]\n# The interval between scrape requests.\n[ interval: string | default = 30s ]\n# The timeout for the scrape request.\n[ scrapeTimeout: string | default = 30s]\n# Whether the scrape connection should be made over a secure TLS channel, and the TLS configuration.\n[ tlsConfig: TLSConfig ]\n# Read the bearer token value from the specified file and include it in the headers of the scrape request.\n[ bearerTokenFile: string ]\n# Read the bearer token from the specified K8S secret key. Note that the secret namespace must match the PodMonitor/ServiceMonitor.\n[ bearerTokenSecret: string ]\n# Handling conflicts when scraped labels conflict with labels added by the backend Prometheus.\n# true: Keep the scraped labels and ignore the conflicting labels from the backend Prometheus.\n# false: For conflicting labels, prefix the scraped label with 'exported_<original-label>' and add the labels added by the backend Prometheus.\n[ honorLabels: bool | default = false ]\n# Whether to use the timestamp generated on the target during the scrape.\n# true: Use the timestamp on the target if available.\n# false: Ignore the timestamp on the target.\n[ honorTimestamps: bool | default = true ]\n# Basic authentication credentials. Fill in the values of username/password from the proper K8S secret key. Note that the secret namespace must match the PodMonitor/ServiceMonitor.\n[ basicAuth: BasicAuth ]\n# Scrape the metrics from the target through a proxy server. Specify the address of the proxy server.\n[ proxyUrl: string ]\n# After scraping the data, rewrite the values of the labels on the target using the relabeling mechanism. Multiple relabel rules are executed in order.\n# See explanation below for relabel_config\nrelabelings:\n[ - <relabel_config> ...]\n# Before writing the scraped data, rewrite the values of the proper labels on the target using the relabeling mechanism. Multiple relabel rules are executed in order.\n# See explanation below for relabel_config\nmetricRelabelings:\n[ - <relabel_config> ...]\n
                                                                          "},{"location":"en/admin/insight/collection-manag/metric-collect.html#relabel_config","title":"relabel_config","text":"

                                                                          The explanation for the proper configmaps is as follows:

                                                                          # Specifies which labels to take from the original labels for relabeling. The values taken are concatenated using the separator defined in the configuration.\n# For PodMonitor/ServiceMonitor, the proper configmap is sourceLabels.\n[ source_labels: '[' <labelname> [, ...] ']' ]\n# Defines the character used to concatenate the values of the labels to be relabeled. Default is ';'.\n[ separator: <string> | default = ; ]\n\n# When the action is replace/hashmod, target_label is used to specify the proper label name.\n# For PodMonitor/ServiceMonitor, the proper configmap is targetLabel.\n[ target_label: <labelname> ]\n\n# Regular expression used to match the values of the source labels.\n[ regex: <regex> | default = (.*) ]\n\n# Used when action is hashmod, it takes the modulus value based on the MD5 hash of the source label's value.\n[ modulus: <int> ]\n\n# Used when action is replace, it defines the expression to replace when the regex matches. It can use regular expression replacement with regex.\n[ replacement: <string> | default = $1 ]\n\n# Actions performed based on the matched values of regex. The available actions are as follows, with replace being the default:\n# replace: If the regex matches, replace the proper value with the value defined in replacement. Set the value using target_label and add the proper label.\n# keep: If the regex doesn't match, discard the value.\n# drop: If the regex matches, discard the value.\n# hashmod: Take the modulus of the MD5 hash of the source label's value based on the value specified in modulus.\n# Add a new label with a label name specified by target_label.\n# labelmap: If the regex matches, replace the proper label name with the value specified in replacement.\n# labeldrop: If the regex matches, delete the proper label.\n# labelkeep: If the regex doesn't match, delete the proper label.\n[ action: <relabel_action> | default = replace ]\n
                                                                          "},{"location":"en/admin/insight/collection-manag/probe-module.html","title":"Custom probers","text":"

                                                                          Insight uses the Blackbox Exporter provided by Prometheus as a blackbox monitoring solution, allowing detection of target instances via HTTP, HTTPS, DNS, ICMP, TCP, and gRPC. It can be used in the following scenarios:

                                                                          • HTTP/HTTPS: URL/API availability monitoring
                                                                          • ICMP: Host availability monitoring
                                                                          • TCP: Port availability monitoring
                                                                          • DNS: Domain name resolution

                                                                          In this page, we will explain how to configure custom probers in an existing Blackbox ConfigMap.

                                                                          ICMP prober is not enabled by default in Insight because it requires higher permissions. Therfore We will use the HTTP prober as an example to demonstrate how to modify the ConfigMap to achieve custom HTTP probing.

                                                                          "},{"location":"en/admin/insight/collection-manag/probe-module.html#procedure","title":"Procedure","text":"
                                                                          1. Go to Clusters in Container Management and enter the details of the target cluster.
                                                                          2. Click the left navigation bar and select ConfigMaps & Secrets -> ConfigMaps .
                                                                          3. Find the ConfigMap named insight-agent-prometheus-blackbox-exporter and click Edit YAML .

                                                                            Add custom probers under modules :

                                                                          HTTP ProberICMP Prober
                                                                          module:\n  http_2xx:\n    prober: http\n    timeout: 5s\n    http:\n      valid_http_versions: [HTTP/1.1, HTTP/2]\n      valid_status_codes: []  # Defaults to 2xx\n      method: GET\n

                                                                          module:\n  ICMP: # Example of ICMP prober configuration\n    prober: icmp\n    timeout: 5s\n    icmp:\n      preferred_ip_protocol: ip4\nicmp_example: # Example 2 of ICMP prober configuration\n  prober: icmp\n  timeout: 5s\n  icmp:\n    preferred_ip_protocol: \"ip4\"\n    source_ip_address: \"127.0.0.1\"\n
                                                                          Since ICMP requires higher permissions, we also need to elevate the pod permissions. Otherwise, an operation not permitted error will occur. There are two ways to elevate permissions:

                                                                          • Directly edit the BlackBox Exporter deployment file to enable it

                                                                            apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: insight-agent-prometheus-blackbox-exporter\n  namespace: insight-system\nspec:\n  template:\n    spec:\n      containers:\n        - name: blackbox-exporter\n          image: # ... (image, args, ports, etc. remain unchanged)\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            allowPrivilegeEscalation: false\n            capabilities:\n              add:\n              - NET_RAW\n              drop:\n              - ALL\n            readOnlyRootFilesystem: true\n            runAsGroup: 0\n            runAsNonRoot: false\n            runAsUser: 0\n
                                                                          • Elevate permissions via helm upgrade

                                                                            prometheus-blackbox-exporter:\n  enabled: true\n  securityContext:\n    runAsUser: 0\n    runAsGroup: 0\n    readOnlyRootFilesystem: true\n    runAsNonRoot: false\n    allowPrivilegeEscalation: false\n    capabilities:\n      add: [\"NET_RAW\"]\n

                                                                          Info

                                                                          For more probers, refer to blackbox_exporter Configuration.

                                                                          "},{"location":"en/admin/insight/collection-manag/probe-module.html#other-references","title":"Other References","text":"

                                                                          The following YAML file contains various probers such as HTTP, TCP, SMTP, ICMP, and DNS. You can modify the configuration file of insight-agent-prometheus-blackbox-exporter according to your needs.

                                                                          Click to view the complete YAML file
                                                                          kind: ConfigMap\napiVersion: v1\nmetadata:\n  name: insight-agent-prometheus-blackbox-exporter\n  namespace: insight-system\n  labels:\n    app.kubernetes.io/instance: insight-agent\n    app.kubernetes.io/managed-by: Helm\n    app.kubernetes.io/name: prometheus-blackbox-exporter\n    app.kubernetes.io/version: v0.24.0\n    helm.sh/chart: prometheus-blackbox-exporter-8.8.0\n  annotations:\n    meta.helm.sh/release-name: insight-agent\n    meta.helm.sh/release-namespace: insight-system\ndata:\n  blackbox.yaml: |\n    modules:\n      HTTP_GET:\n        prober: http\n        timeout: 5s\n        http:\n          method: GET\n          valid_http_versions: [\"HTTP/1.1\", \"HTTP/2.0\"]\n          follow_redirects: true\n          preferred_ip_protocol: \"ip4\"\n      HTTP_POST:\n        prober: http\n        timeout: 5s\n        http:\n          method: POST\n          body_size_limit: 1MB\n      TCP:\n        prober: tcp\n        timeout: 5s\n      # Not enabled by default:\n      # ICMP:\n      #   prober: icmp\n      #   timeout: 5s\n      #   icmp:\n      #     preferred_ip_protocol: ip4\n      SSH:\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n          - expect: \"^SSH-2.0-\"\n      POP3S:\n        prober: tcp\n        tcp:\n          query_response:\n          - expect: \"^+OK\"\n          tls: true\n          tls_config:\n            insecure_skip_verify: false\n      http_2xx_example:               # http prober example\n        prober: http\n        timeout: 5s                   # probe timeout\n        http:\n          valid_http_versions: [\"HTTP/1.1\", \"HTTP/2.0\"]                   # Version in the response, usually default\n          valid_status_codes: []  # Defaults to 2xx                       # Valid range of response codes, probe successful if within this range\n          method: GET                 # request method\n          headers:                    # request headers\n            Host: vhost.example.com\n            Accept-Language: en-US\n            Origin: example.com\n          no_follow_redirects: false  # allow redirects\n          fail_if_ssl: false   \n          fail_if_not_ssl: false\n          fail_if_body_matches_regexp:\n            - \"Could not connect to database\"\n          fail_if_body_not_matches_regexp:\n            - \"Download the latest version here\"\n          fail_if_header_matches: # Verifies that no cookies are set\n            - header: Set-Cookie\n              allow_missing: true\n              regexp: '.*'\n          fail_if_header_not_matches:\n            - header: Access-Control-Allow-Origin\n              regexp: '(\\*|example\\.com)'\n          tls_config:                  # tls configuration for https requests\n            insecure_skip_verify: false\n          preferred_ip_protocol: \"ip4\" # defaults to \"ip6\"                 # Preferred IP protocol version\n          ip_protocol_fallback: false  # no fallback to \"ip6\"            \n      http_post_2xx:                   # http prober example with body\n        prober: http\n        timeout: 5s\n        http:\n          method: POST                 # probe request method\n          headers:\n            Content-Type: application/json\n          body: '{\"username\":\"admin\",\"password\":\"123456\"}'                   # body carried during probe\n      http_basic_auth_example:         # prober example with username and password\n        prober: http\n        timeout: 5s\n        http:\n          method: POST\n          headers:\n            Host: \"login.example.com\"\n          basic_auth:                  # username and password to be added during probe\n            username: \"username\"\n            password: \"mysecret\"\n      http_custom_ca_example:\n        prober: http\n        http:\n          method: GET\n          tls_config:                  # root certificate used during probe\n            ca_file: \"/certs/my_cert.crt\"\n      http_gzip:\n        prober: http\n        http:\n          method: GET\n          compression: gzip            # compression method used during probe\n      http_gzip_with_accept_encoding:\n        prober: http\n        http:\n          method: GET\n          compression: gzip\n          headers:\n            Accept-Encoding: gzip\n      tls_connect:                     # TCP prober example\n        prober: tcp\n        timeout: 5s\n        tcp:\n          tls: true                    # use TLS\n      tcp_connect_example:\n        prober: tcp\n        timeout: 5s\n      imap_starttls:                   # IMAP email server probe configuration example\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - expect: \"OK.*STARTTLS\"\n            - send: \". STARTTLS\"\n            - expect: \"OK\"\n            - starttls: true\n            - send: \". capability\"\n            - expect: \"CAPABILITY IMAP4rev1\"\n      smtp_starttls:                   # SMTP email server probe configuration example\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - expect: \"^220 ([^ ]+) ESMTP (.+)$\"\n            - send: \"EHLO prober\\r\"\n            - expect: \"^250-STARTTLS\"\n            - send: \"STARTTLS\\r\"\n            - expect: \"^220\"\n            - starttls: true\n            - send: \"EHLO prober\\r\"\n            - expect: \"^250-AUTH\"\n            - send: \"QUIT\\r\"\n      irc_banner_example:\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - send: \"NICK prober\"\n            - send: \"USER prober prober prober :prober\"\n            - expect: \"PING :([^ ]+)\"\n              send: \"PONG ${1}\"\n            - expect: \"^:[^ ]+ 001\"\n      # icmp_example:                    # ICMP prober configuration example\n      #  prober: icmp\n      #  timeout: 5s\n      #  icmp:\n      #    preferred_ip_protocol: \"ip4\"\n      #    source_ip_address: \"127.0.0.1\"\n      dns_udp_example:                 # DNS query example using UDP\n        prober: dns\n        timeout: 5s\n        dns:\n          query_name: \"www.prometheus.io\"                 # domain name to resolve\n          query_type: \"A\"              # type proper to this domain\n          valid_rcodes:\n          - NOERROR\n          validate_answer_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n            fail_if_all_match_regexp:\n            - \".*127.0.0.1\"\n            fail_if_not_matches_regexp:\n            - \"www.prometheus.io.\\t300\\tIN\\tA\\t127.0.0.1\"\n            fail_if_none_matches_regexp:\n            - \"127.0.0.1\"\n          validate_authority_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n          validate_additional_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n      dns_soa:\n        prober: dns\n        dns:\n          query_name: \"prometheus.io\"\n          query_type: \"SOA\"\n      dns_tcp_example:               # DNS query example using TCP\n        prober: dns\n        dns:\n          transport_protocol: \"tcp\" # defaults to \"udp\"\n          preferred_ip_protocol: \"ip4\" # defaults to \"ip6\"\n          query_name: \"www.prometheus.io\"\n
                                                                          "},{"location":"en/admin/insight/collection-manag/service-monitor.html","title":"Configure service discovery rules","text":"

                                                                          Observable Insight supports the way of creating CRD ServiceMonitor through container management to meet your collection requirements for custom service discovery. Users can use ServiceMonitor to define the scope of the Namespace discovered by the Pod and select the monitored Service through matchLabel .

                                                                          "},{"location":"en/admin/insight/collection-manag/service-monitor.html#prerequisites","title":"Prerequisites","text":"

                                                                          The cluster has the Helm App insight-agent installed and in the running state.

                                                                          "},{"location":"en/admin/insight/collection-manag/service-monitor.html#steps","title":"Steps","text":"
                                                                          1. Select Data Collection on the left navigation bar to view the status of all cluster collection plug-ins.

                                                                          2. Click a cluster name to enter the collection configuration details.

                                                                          3. Click the link to jump to Container Management to create a Service Monitor.

                                                                            apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: micrometer-demo # (1)\n  namespace: insight-system # (2)\n    labels: \n     operator.insight.io/managed-by: insight\nspec:\n  endpoints: # (3)\n    - honorLabels: true\n       interval: 15s\n        path: /actuator/prometheus\n        port: http\n  namespaceSelector: # (4)\n    matchNames:\n      - insight-system  # (5)\n  selector: # (6)\n    matchLabels:\n      micrometer-prometheus-discovery: \"true\"\n
                                                                            1. Specify the name of the ServiceMonitor.
                                                                            2. Specify the namespace of the ServiceMonitor.
                                                                            3. This is the service endpoint, which represents the address where Prometheus collects Metrics. endpoints is an array, and multiple endpoints can be created at the same time. Each endpoint contains three fields, and the meaning of each field is as follows:

                                                                              • interval : Specifies the collection cycle of Prometheus for the current endpoint . The unit is seconds, set to 15s in this example.
                                                                              • path : Specifies the collection path of Prometheus. In this example, it is specified as /actuator/prometheus .
                                                                              • port : Specifies the port through which the collected data needs to pass. The set port is the name set by the port of the Service being collected.
                                                                            4. This is the scope of the Service that needs to be discovered. namespaceSelector contains two mutually exclusive fields, and the meaning of the fields is as follows:

                                                                              • any : Only one value true , when this field is set, it will listen to changes of all Services that meet the Selector filtering conditions.
                                                                              • matchNames : An array value that specifies the scope of namespace to be monitored. For example, if you only want to monitor the Services in two namespaces, default and insight-system, the matchNames are set as follows:

                                                                                namespaceSelector:\n  matchNames:\n    - default\n    - insight-system\n
                                                                            5. The namespace where the application that needs to expose metrics is located

                                                                            6. Used to select the Service
                                                                          "},{"location":"en/admin/insight/compati-test/k8s-compatibility.html","title":"Kubernetes Cluster Compatibility Test","text":"

                                                                          \u2705: Test passed; \u274c: Test failed; No Value: Test not conducted.

                                                                          "},{"location":"en/admin/insight/compati-test/k8s-compatibility.html#kubernetes-compatibility-testing-for-insight-server","title":"Kubernetes Compatibility Testing for Insight Server","text":"Scenario Testing Method K8s 1.31 K8s 1.30 K8s 1.29 K8s 1.28 K8s 1.27 K8s 1.26 k8s 1.25.0 k8s 1.24 k8s 1.23 k8s 1.22 Baseline Scenario E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 Metrics Query E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 Logs Query E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 Traces Query E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 Alert Center E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 Topology Query E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705"},{"location":"en/admin/insight/compati-test/k8s-compatibility.html#kubernetes-compatibility-testing-for-insight-agent","title":"Kubernetes Compatibility Testing for Insight-agent","text":"Scenario Testing Method K8s 1.31 K8s 1.30 K8s 1.29 K8s 1.28 K8s 1.27 K8s 1.26 k8s 1.25 k8s 1.24 k8s 1.23 k8s 1.22 k8s 1.21 k8s 1.20 k8s 1.19 k8s 1.18 k8s 1.17 k8s 1.16 Baseline Scenario E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c Metrics Query E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c Logs Query E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c Traces Query E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c Alert Center E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c Topology Query E2E \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u2705 \u274c \u274c \u274c

                                                                          Note

                                                                          Insight-agent Version Compatibility History:

                                                                          1. Insight Agent is not compatible with k8s v1.16.15 starting from v0.16.x.
                                                                          2. Insight Agent v0.20.0 is compatible with k8s v1.18.20.
                                                                          3. Insight Agent v0.19.2/v0.18.2/v0.17.x is not compatible with k8s v1.18.20.
                                                                          4. Insight Agent v0.30.1 is compatible with k8s v1.18.x and below versions.
                                                                          "},{"location":"en/admin/insight/compati-test/ocp-compatibility.html","title":"Openshift 4.x Cluster Compatibility Test","text":"

                                                                          \u2705: Test passed; \u274c: Test failed.

                                                                          Note

                                                                          The table does not cover all test scenarios.

                                                                          Test Case Test Method OCP 4.10 (K8s 1.23.0) Remarks Collect and query web application metrics Manual \u2705 - Add custom metric collection Manual \u2705 - Query real-time metrics Manual \u2705 - Instantaneous index query Manual \u2705 - Instantaneous metric API field verification Manual \u2705 - Query metrics over a period of time Manual \u2705 - Metric API field verification within a period of time Manual \u2705 - Batch query cluster CPU, memory usage, total cluster CPU, cluster memory usage, total number of cluster nodes Manual \u2705 - Batch query node CPU, memory usage, total node CPU, node memory usage Manual \u2705 - Batch query cluster metrics within a period of time Manual \u2705 - Metric API field verification within a period of time Manual \u2705 - Query Pod log Manual \u2705 - Query SVC log Manual \u2705 - Query statefulset logs Manual \u2705 - Query Deployment Logs Manual \u2705 - Query NPD log Manual \u2705 - Log Filtering Manual \u2705 - Log fuzzy query - workloadSearch Manual \u2705 - Log fuzzy query - podSearch Manual \u2705 - Log fuzzy query - containerSearch Manual \u2705 - Log Accurate Query - cluster Manual \u2705 - Log Accurate Query - namespace Manual \u2705 - Log query API field verification Manual \u2705 - Alert Rule - CRUD operations Manual \u2705 - Alert Template - CRUD operations Manual \u2705 - Notification Method - CRUD operations Manual \u2705 - Link Query Manual \u2705 - Topology Query Manual \u2705 -

                                                                          The table above represents the Openshift 4.x cluster compatibility test. It includes various test cases, their proper test method (manual), and the test results for the OCP version 4.10 (with Kubernetes version 1.23.0).

                                                                          Please note that this is not an exhaustive list, and additional test scenarios may exist.

                                                                          "},{"location":"en/admin/insight/compati-test/rancher-compatibility.html","title":"Rancher Cluster Compatibility Test","text":"

                                                                          \u2705: Test passed; \u274c: Test failed.

                                                                          Note

                                                                          The table does not cover all test scenarios.

                                                                          Test Scenario Test Method Rancher rke2c1 (K8s 1.24.11) Notes Collect and query web application metrics Manual \u2705 - Add custom metric collection Manual \u2705 - Query real-time metrics Manual \u2705 - Instantaneous index query Manual \u2705 - Instantaneous metric API field verification Manual \u2705 - Query metrics over a period of time Manual \u2705 - Metric API field verification within a period of time Manual \u2705 - Batch query cluster CPU, memory usage, total cluster CPU, cluster memory usage, total number of cluster nodes Manual \u2705 - Batch query node CPU, memory usage, total node CPU, node memory usage Manual \u2705 - Batch query cluster metrics within a period of time Manual \u2705 - Metric API field verification within a period of time Manual \u2705 - Query Pod log Manual \u2705 - Query SVC log Manual \u2705 - Query statefulset logs Manual \u2705 - Query Deployment Logs Manual \u2705 - Query NPD log Manual \u2705 - Log Filtering Manual \u2705 - Log fuzzy query - workloadSearch Manual \u2705 - Log fuzzy query - podSearch Manual \u2705 - Log fuzzy query - containerSearch Manual \u2705 - Log Accurate Query - cluster Manual \u2705 - Log Accurate Query - namespace Manual \u2705 - Log query API field verification Manual \u2705 - Alert Rule - CRUD operations Manual \u2705 - Alert Template - CRUD operations Manual \u2705 - Notification Method - CRUD operations Manual \u2705 - Link Query Manual \u2705 - Topology Query Manual \u2705 -"},{"location":"en/admin/insight/dashboard/dashboard.html","title":"Dashboard","text":"

                                                                          Grafana is a cross-platform open source visual analysis tool. Insight uses open source Grafana to provide monitoring services, and supports viewing resource consumption from multiple dimensions such as clusters, nodes, and namespaces.

                                                                          For more information on open source Grafana, see Grafana Official Documentation.

                                                                          "},{"location":"en/admin/insight/dashboard/dashboard.html#steps","title":"Steps","text":"
                                                                          1. Select Dashboard from the left navigation bar .

                                                                            • In the Insight / Overview dashboard, you can view the resource usage of multiple clusters and analyze resource usage, network, storage, and more based on dimensions such as namespaces and Pods.

                                                                            • Click the dropdown menu in the upper-left corner of the dashboard to switch between clusters.

                                                                            • Click the lower-right corner of the dashboard to switch the time range for queries.

                                                                          2. Insight provides several recommended dashboards that allow monitoring from different dimensions such as nodes, namespaces, and workloads. Switch between dashboards by clicking the insight-system / Insight / Overview section.

                                                                          Note

                                                                          1. For accessing Grafana UI, refer to Access Native Grafana.

                                                                          2. For importing custom dashboards, refer to Importing Custom Dashboards.

                                                                          "},{"location":"en/admin/insight/dashboard/import-dashboard.html","title":"Import Custom Dashboards","text":"

                                                                          By using Grafana CRD, you can incorporate the management and deployment of dashboards into the lifecycle management of Kubernetes. This enables version control, automated deployment, and cluster-level management of dashboards. This page describes how to import custom dashboards using CRD and the UI interface.

                                                                          "},{"location":"en/admin/insight/dashboard/import-dashboard.html#steps","title":"Steps","text":"
                                                                          1. Log in to the AI platform platform and go to Container Management . Select the kpanda-global-cluster from the cluster list.

                                                                          2. Choose Custom Resources from the left navigation bar. Look for the grafanadashboards.integreatly.org file in the list and click it to view the details.

                                                                          3. Click YAML Create and use the following template. Replace the dashboard JSON in the Json field.

                                                                            • namespace : Specify the target namespace.
                                                                            • name : Provide a name for the dashboard.
                                                                            • label : Mandatory. Set the label as operator.insight.io/managed-by: insight .
                                                                            apiVersion: integreatly.org/v1alpha1\nkind: GrafanaDashboard\nmetadata:\n  labels:\n    app: insight-grafana-operator\n    operator.insight.io/managed-by: insight\n  name: sample-dashboard\n  namespace: insight-system\nspec:\n  json: >\n    {\n      \"id\": null,\n      \"title\": \"Simple Dashboard\",\n      \"tags\": [],\n      \"style\": \"dark\",\n      \"timezone\": \"browser\",\n      \"editable\": true,\n      \"hideControls\": false,\n      \"graphTooltip\": 1,\n      \"panels\": [],\n      \"time\": {\n        \"from\": \"now-6h\",\n        \"to\": \"now\"\n      },\n      \"timepicker\": {\n        \"time_options\": [],\n        \"refresh_intervals\": []\n      },\n      \"templating\": {\n        \"list\": []\n      },\n      \"annotations\": {\n        \"list\": []\n      },\n      \"refresh\": \"5s\",\n      \"schemaVersion\": 17,\n      \"version\": 0,\n      \"links\": []\n    }\n
                                                                          4. After clicking OK , wait for a while to view the newly imported dashboard in Dashboard .

                                                                          Info

                                                                          If you need to customize the dashboard, refer to Add Dashboard Panel.

                                                                          "},{"location":"en/admin/insight/dashboard/login-grafana.html","title":"Access Native Grafana","text":"

                                                                          Please make sure that the Helm App Insight in your global management cluster is in Running state.

                                                                          The specific operation steps are as follows:

                                                                          1. Log in to the console to access native Grafana.

                                                                            Access address: http://ip:port/ui/insight-grafana

                                                                            For example: http://10.6.10.233:30209/ui/insight-grafana

                                                                          2. Click Login in the lower right corner, and use the default username and password to log in.

                                                                            • Default username: admin

                                                                            • Default password: admin

                                                                          3. Click Log in to complete the login.

                                                                          "},{"location":"en/admin/insight/dashboard/overview.html","title":"Overview","text":"

                                                                          Insight only collects data from clusters that have insight-agent installed and running in a normal state. The overview provides an overview of resources across multiple clusters:

                                                                          • Alert Statistics: Provides statistics on active alerts across all clusters.
                                                                          • Resource Consumption: Displays the resource usage trends for the top 5 clusters and nodes in the past hour, based on CPU usage, memory usage, and disk usage.
                                                                          • By default, the sorting is based on CPU usage. You can switch the metric to sort clusters and nodes.
                                                                          • Resource Trends: Shows the trends in the number of nodes over the past 15 days and the running trend of pods in the last hour.
                                                                          • Service Requests Ranking: Displays the top 5 services with the highest request latency and error rates, along with their respective clusters and namespaces in the multi-cluster environment.
                                                                          "},{"location":"en/admin/insight/dashboard/overview.html#operation-procedure","title":"Operation procedure","text":"

                                                                          Select Overview in the left navigation bar to enter the details page.

                                                                          "},{"location":"en/admin/insight/data-query/log.html","title":"Log query","text":"

                                                                          By default, Insight collects node logs, container logs, and Kubernetes audit logs. In the log query page, you can search for standard output (stdout) logs within the permissions of your login account. This includes node logs, product logs, and Kubernetes audit logs. You can quickly find the desired logs among a large volume of logs. Additionally, you can use the source information and contextual raw data of the logs to assist in troubleshooting and issue resolution.

                                                                          "},{"location":"en/admin/insight/data-query/log.html#prerequisites","title":"Prerequisites","text":"

                                                                          The cluster has insight-agent installed and the application is in running state.

                                                                          "},{"location":"en/admin/insight/data-query/log.html#query-log","title":"Query log","text":"
                                                                          1. In the left navigation bar, select Data Query -> Log Query .

                                                                          2. After selecting the query criteria, click Search , and the log records in the form of graphs will be displayed. The most recent logs are displayed on top.

                                                                          3. In the Filter panel, switch Type and select Node to check the logs of all nodes in the cluster.

                                                                          4. In the Filter panel, switch Type and select Event to view the logs generated by all Kubernetes events in the cluster.

                                                                          Lucene Syntax Explanation:

                                                                          1. Use logical operators (AND, OR, NOT, \"\") to query multiple keywords. For example: keyword1 AND (keyword2 OR keyword3) NOT keyword4.
                                                                          2. Use a tilde (~) for fuzzy queries. You can optionally specify a parameter after the \"~\" to control the similarity of the fuzzy query. If not specified, it defaults to 0.5. For example: error~.
                                                                          3. Use wildcards (*, ?) as single-character placeholders to match any character.
                                                                          4. Use square brackets [ ] or curly braces { } for range queries. Square brackets [ ] represent a closed interval and include the boundary values. Curly braces { } represent an open interval and exclude the boundary values. Range queries are applicable only to fields that can be sorted, such as numeric fields and date fields. For example timestamp:[2022-01-01 TO 2022-01-31].
                                                                          5. For more information, please refer to the Lucene Syntax Explanation.
                                                                          "},{"location":"en/admin/insight/data-query/log.html#view-log-context","title":"View log context","text":"

                                                                          Clicking on the button next to a log will slide out a panel on the right side where you can view the default 100 lines of context for that log. You can switch the Display Rows option to view more contextual content.

                                                                          "},{"location":"en/admin/insight/data-query/log.html#export-log","title":"Export log","text":"

                                                                          Click the download button located in the upper right corner of the list.

                                                                          • You can configure the exported log fields. The available fields may vary depending on the log type, with the Log Content field being mandatory.
                                                                          • You can export the log query results in .txt or .csv format.

                                                                          "},{"location":"en/admin/insight/data-query/metric.html","title":"Metric query","text":"

                                                                          Metric query supports querying the index data of each container resource, and you can view the trend changes of the monitoring index. At the same time, advanced query supports native PromQL statements for Metric query.

                                                                          "},{"location":"en/admin/insight/data-query/metric.html#prerequisites","title":"Prerequisites","text":"
                                                                          • The cluster has insight-agent installed and the application is in running state.
                                                                          "},{"location":"en/admin/insight/data-query/metric.html#common-query","title":"Common query","text":"
                                                                          1. In the left navigation bar, click Data Query -> Metrics .

                                                                          2. After selecting query conditions such as cluster, type, node, and metric name, click Search , and the proper metric chart and data details will be displayed on the right side of the screen.

                                                                          Tip

                                                                          Support custom time range. You can manually click the Refresh icon or select a default time interval to refresh.

                                                                          "},{"location":"en/admin/insight/data-query/metric.html#advanced-search","title":"Advanced Search","text":"
                                                                          1. In the left navigation bar, click Data Query -> metric Query , click the Advanced Query tab to switch to the advanced query page.

                                                                          2. Enter a PromQL statement (see PromQL Syntax), click Query , and the query metric chart and data details will be displayed.

                                                                          "},{"location":"en/admin/insight/faq/expand-once-es-full.html","title":"What to Do When ElasticSearch is Full?","text":"

                                                                          When ElasticSearch memory is full, you can choose to either scale up or delete data to resolve the issue:

                                                                          You can run the following command to check the resource usage of ES nodes.

                                                                          kubectl get pod -n mcamel-system | grep common-es-cluster-masters-es | awk '{print $1}' | xargs -I {} kubectl exec {} -n mcamel-system -c elasticsearch -- df -h | grep /usr/share/elasticsearch/data\n
                                                                          "},{"location":"en/admin/insight/faq/expand-once-es-full.html#scale-up","title":"Scale Up","text":"

                                                                          If the host still has available resources, scaling up is a common solution, which involves increasing the PVC capacity.

                                                                          1. First, run the following command to get the PVC configuration of the es-data-0 node. Use the actual environment's PVC as a reference.

                                                                            kubectl edit -n mcamel-system pvc elasticsearch-data-mcamel-common-es-cluster-masters-es-data-0\n
                                                                          2. Then modify the following storage field (the storage class SC you are using should be scalable):

                                                                            spec:\n  accessModes:\n    - ReadWriteOnce\n  resources:\n    requests:\n      storage: 35Gi # (1)!\n
                                                                            1. Adjust this value as needed.
                                                                          "},{"location":"en/admin/insight/faq/expand-once-es-full.html#delete-data","title":"Delete Data","text":"

                                                                          When ElasticSearch memory is full, you can also delete index data to free up resources.

                                                                          You can follow the steps below to access the Kibana page and manually delete data.

                                                                          1. First, ensure that the Kibana Pod exists and is running normally:

                                                                            kubectl get po -n mcamel-system | grep mcamel-common-es-cluster-masters-kb\n
                                                                          2. If it does not exist, manually set the replica to 1 and wait for the service to run normally. If it exists, skip this step.

                                                                            kubectl scale -n mcamel-system deployment mcamel-common-es-cluster-masters-kb --replicas 1\n
                                                                          3. Modify the Kibana Service to be exposed as a NodePort for access:

                                                                            kubectl patch svc -n mcamel-system mcamel-common-es-cluster-masters-kb-http -p '{\"spec\":{\"type\":\"NodePort\"}}'\n\n# After modification, check the NodePort. For example, if the port is 30128, the access URL will be https://{NodeIP in the cluster}:30128\n[root@insight-master1 ~]# kubectl get svc -n mcamel-system | grep mcamel-common-es-cluster-masters-kb-http\nmcamel-common-es-cluster-masters-kb-http   NodePort    10.233.51.174   <none>   5601:30128/TCP    108m\n
                                                                          4. Retrieve the ElasticSearch Secret to log in to Kibana (username is elastic):

                                                                            kubectl get secrets -n mcamel-system mcamel-common-es-cluster-masters-es-elastic-user -o jsonpath=\"{.data.elastic}\" | base64 -d\n
                                                                          5. Go to Kibana -> Stack Management -> Index Management and enable the Include hidden indices option to see all indexes. Based on the index sequence numbers, keep the indexes with larger numbers and delete the ones with smaller numbers.

                                                                          "},{"location":"en/admin/insight/faq/traceclockskew.html","title":"Clock offset in trace data","text":"

                                                                          In a distributed system, due to Clock Skew (clock skew adjustment) influence, Time drift exists between different hosts. Generally speaking, the system time of different hosts at the same time has a slight deviation.

                                                                          The traces system is a typical distributed system, and it is also affected by this phenomenon in terms of time data collection. For example, in a link, the start time of the server-side span is earlier than that of the client-side span. This phenomenon does not exist logically, but due to the influence of clock skew, there is a deviation in the system time between the hosts at the moment when the trace data is collected in each service, which eventually leads to the phenomenon shown in the following figure:

                                                                          The phenomenon in the above figure cannot be eliminated theoretically. However, this phenomenon is rare, and even if it occurs, it will not affect the calling relationship between services.

                                                                          Currently Insight uses Jaeger UI to display trace data, and the UI will remind when encountering such a link:

                                                                          Currently Jaeger's community is trying to optimize this problem through the UI level.

                                                                          For more information, refer to:

                                                                          • Clock Skew Adjuster considered harmful
                                                                          • Add ability to display unadjusted trace in the UI
                                                                          • Clock Skew Adjustment
                                                                          "},{"location":"en/admin/insight/infra/cluster.html","title":"Cluster Monitoring","text":"

                                                                          Through cluster monitoring, you can view the basic information of the cluster, the resource consumption and the trend of resource consumption over a period of time.

                                                                          "},{"location":"en/admin/insight/infra/cluster.html#prerequisites","title":"Prerequisites","text":"

                                                                          The cluster has insight-agent installed and the application is in running state.

                                                                          "},{"location":"en/admin/insight/infra/cluster.html#steps","title":"Steps","text":"
                                                                          1. Go to the Insight product module.

                                                                          2. Select Infrastructure > Clusters from the left navigation bar. On this page, you can view the following information:

                                                                            • Resource Overview: Provides statistics on the number of normal/all nodes and workloads across multiple clusters.
                                                                            • Fault: Displays the number of alerts generated in the current cluster.
                                                                            • Resource Consumption: Shows the actual usage and total capacity of CPU, memory, and disk for the selected cluster.
                                                                            • Metric Explanations: Describes the trends in CPU, memory, disk I/O, and network bandwidth.

                                                                          3. Click Resource Level Monitor, you can view more metrics of the current cluster.

                                                                          "},{"location":"en/admin/insight/infra/cluster.html#metric-explanations","title":"Metric Explanations","text":"Metric Name Description CPU Usage The ratio of the actual CPU usage of all pod resources in the cluster to the total CPU capacity of all nodes. CPU Allocation The ratio of the sum of CPU requests of all pods in the cluster to the total CPU capacity of all nodes. Memory Usage The ratio of the actual memory usage of all pod resources in the cluster to the total memory capacity of all nodes. Memory Allocation The ratio of the sum of memory requests of all pods in the cluster to the total memory capacity of all nodes."},{"location":"en/admin/insight/infra/container.html","title":"Container Insight","text":"

                                                                          Container insight is the process of monitoring workloads in cluster management. In the list, you can view basic information and status of workloads. On the Workloads details page, you can see the number of active alerts and the trend of resource consumption such as CPU and memory.

                                                                          "},{"location":"en/admin/insight/infra/container.html#prerequisites","title":"Prerequisites","text":"
                                                                          • The cluster has insight-agent installed, and all pods are in the Running state.

                                                                          • To install insight-agent, please refer to: Installing insight-agent online or Offline upgrade of insight-agent.

                                                                          "},{"location":"en/admin/insight/infra/container.html#steps","title":"Steps","text":"

                                                                          Follow these steps to view service monitoring metrics:

                                                                          1. Go to the Insight product module.

                                                                          2. Select Infrastructure > Workloads from the left navigation bar.

                                                                          3. Switch between tabs at the top to view data for different types of workloads.

                                                                          4. Click the target workload name to view the details.

                                                                            1. Faults: Displays the total number of active alerts for the workload.
                                                                            2. Resource Consumption: Shows the CPU, memory, and network usage of the workload.
                                                                            3. Monitoring Metrics: Provides the trends of CPU, Memory, Network, and disk usage for the workload over the past hour.

                                                                          5. Switch to the Pods tab to view the status of various pods for the workload, including their nodes, restart counts, and other information.

                                                                          6. Switch to the JVM monitor tab to view the JVM metrics for each pods

                                                                            Note

                                                                            1. The JVM monitoring feature only supports the Java language.
                                                                            2. To enable the JVM monitoring feature, refer to Getting Started with Monitoring Java Applications.
                                                                          "},{"location":"en/admin/insight/infra/container.html#metric-explanations","title":"Metric Explanations","text":"Metric Name Description CPU Usage The sum of CPU usage for all pods under the workload. CPU Requests The sum of CPU requests for all pods under the workload. CPU Limits The sum of CPU limits for all pods under the workload. Memory Usage The sum of memory usage for all pods under the workload. Memory Requests The sum of memory requests for all pods under the workload. Memory Limits The sum of memory limits for all pods under the workload. Disk Read/Write Rate The total number of continuous disk reads and writes per second within the specified time range, representing a performance measure of the number of read and write operations per second on the disk. Network Send/Receive Rate The incoming and outgoing rates of network traffic, aggregated by workload, within the specified time range."},{"location":"en/admin/insight/infra/event.html","title":"Event Query","text":"

                                                                          AI platform Insight supports event querying by cluster and namespace.

                                                                          "},{"location":"en/admin/insight/infra/event.html#event-status-distribution","title":"Event Status Distribution","text":"

                                                                          By default, the events that occurred within the last 12 hours are displayed. You can select a different time range in the upper right corner to view longer or shorter periods. You can also customize the sampling interval from 1 minute to 5 hours.

                                                                          The event status distribution chart provides a visual representation of the intensity and dispersion of events. This helps in evaluating and preparing for subsequent cluster operations and maintenance tasks. If events are densely concentrated during specific time periods, you may need to allocate more resources or take proper measures to ensure cluster stability and high availability. On the other hand, if events are dispersed, you can effectively schedule other maintenance tasks such as system optimization, upgrades, or handling other tasks during this period.

                                                                          By considering the event status distribution chart and the selected time range, you can better plan and manage your cluster operations and maintenance work, ensuring system stability and reliability.

                                                                          "},{"location":"en/admin/insight/infra/event.html#event-count-and-statistics","title":"Event Count and Statistics","text":"

                                                                          Through important event statistics, you can easily understand the number of image pull failures, health check failures, Pod execution failures, Pod scheduling failures, container OOM (Out-of-Memory) occurrences, volume mounting failures, and the total count of all events. These events are typically categorized as \"Warning\" and \"Normal\".

                                                                          "},{"location":"en/admin/insight/infra/event.html#event-list","title":"Event List","text":"

                                                                          The event list is presented chronologically based on time. You can sort the events by Last Occurrend At and Type .

                                                                          By clicking on the \u2699\ufe0f icon on the right side, you can customize the displayed columns according to your preferences and needs.

                                                                          Additionally, you can click the refresh icon to update the current event list when needed.

                                                                          In the operation column on the right, clicking the icon allows you to view the history of a specific event.

                                                                          "},{"location":"en/admin/insight/infra/event.html#reference","title":"Reference","text":"

                                                                          For detailed meanings of the built-in Events in the system, refer to the Kubernetes API Event List.

                                                                          "},{"location":"en/admin/insight/infra/namespace.html","title":"Namespace Monitoring","text":"

                                                                          With namespaces as the dimension, you can quickly query resource consumption and trends within a namespace.

                                                                          "},{"location":"en/admin/insight/infra/namespace.html#prerequisites","title":"Prerequisites","text":"
                                                                          • Insight Agent is installed in the cluster and the applications are in the Running state.
                                                                          "},{"location":"en/admin/insight/infra/namespace.html#steps","title":"Steps","text":"
                                                                          1. Go to the Insight product module.

                                                                          2. Select Infrastructure -> Namespaces from the left navigation bar. On this page, you can view the following information:

                                                                            1. Switch Namespace: Switch between clusters or namespaces at the top.
                                                                            2. Resource Overview: Provides statistics on the number of normal and total workloads within the selected namespace.
                                                                            3. Incidents: Displays the number of alerts generated within the selected namespace.
                                                                            4. Events: Shows the number of Warning level events within the selected namespace in the past 24 hours.
                                                                            5. Resource Consumption: Provides the sum of CPU and memory usage for Pods within the selected namespace, along with the CPU and memory quota information.
                                                                          "},{"location":"en/admin/insight/infra/namespace.html#metric-explanations","title":"Metric Explanations","text":"Metric Name Description CPU Usage The sum of CPU usage for Pods within the selected namespace. Memory Usage The sum of memory usage for Pods within the selected namespace. Pod CPU Usage The CPU usage for each Pod within the selected namespace. Pod Memory Usage The memory usage for each Pod within the selected namespace."},{"location":"en/admin/insight/infra/node.html","title":"Node Monitoring","text":"

                                                                          Through node monitoring, you can get an overview of the current health status of the nodes in the selected cluster and the number of abnormal pod; on the current node details page, you can view the number of alerts and the trend of resource consumption such as CPU, memory, and disk.

                                                                          "},{"location":"en/admin/insight/infra/node.html#prerequisites","title":"Prerequisites","text":"
                                                                          • The cluster has insight-agent installed and the application is in running state.
                                                                          "},{"location":"en/admin/insight/infra/node.html#steps","title":"Steps","text":"
                                                                          1. Go to the Insight product module.

                                                                          2. Select Infrastructure -> Nodes from the left navigation bar. On this page, you can view the following information:

                                                                            • Cluster: Uses the dropdown at the top to switch between clusters.
                                                                            • Nodes: Shows a list of nodes within the selected cluster. Click a specific node to view detailed information.
                                                                            • Alert: Displays the number of alerts generated in the current cluster.
                                                                            • Resource Consumption: Shows the actual usage and total capacity of CPU, memory, and disk for the selected node.
                                                                            • Metric Explanations: Describes the trends in CPU, memory, disk I/O, and network traffic for the selected node.

                                                                          3. Click Resource Level Monitor, you can view more metrics of the current cluster.

                                                                          "},{"location":"en/admin/insight/infra/probe.html","title":"Probe","text":"

                                                                          Probe refers to the use of black-box monitoring to regularly test the connectivity of targets through HTTP, TCP, and other methods, enabling quick detection of ongoing faults.

                                                                          Insight uses the Prometheus Blackbox Exporter tool to probe the network using protocols such as HTTP, HTTPS, DNS, TCP, and ICMP, and returns the probe results to understand the network status.

                                                                          "},{"location":"en/admin/insight/infra/probe.html#prerequisites","title":"Prerequisites","text":"

                                                                          The insight-agent has been successfully deployed in the target cluster and is in the Running state.

                                                                          "},{"location":"en/admin/insight/infra/probe.html#view-probes","title":"View Probes","text":"
                                                                          1. Go to the Insight product module.
                                                                          2. Select Infrastructure -> Probes in the left navigation bar.

                                                                            • Click the cluster or namespace dropdown in the table to switch between clusters and namespaces.
                                                                            • The list displays the name, probe method, probe target, connectivity status, and creation time of the probes by default.
                                                                            • The connectivity status can be:
                                                                              • Normal: The probe successfully connects to the target, and the target returns the expected response.
                                                                              • Abnormal: The probe fails to connect to the target, or the target does not return the expected response.
                                                                              • Pending: The probe is attempting to connect to the target.
                                                                            • Supports fuzzy search of probe names.
                                                                          "},{"location":"en/admin/insight/infra/probe.html#create-a-probe","title":"Create a Probe","text":"
                                                                          1. Click Create Probe .
                                                                          2. Fill in the basic information and click Next .

                                                                            • Name: The name can only contain lowercase letters, numbers, and hyphens (-), and must start and end with a lowercase letter or number, with a maximum length of 63 characters.
                                                                            • Cluster: Select the cluster for the probe task.
                                                                            • Namespace: The namespace where the probe task is located.
                                                                          3. Configure the probe parameters.

                                                                            • Blackbox Instance: Select the blackbox instance responsible for the probe.
                                                                            • Probe Method:
                                                                              • HTTP: Sends HTTP or HTTPS requests to the target URL to check its connectivity and response time. This can be used to monitor the availability and performance of websites or web applications.
                                                                              • TCP: Establishes a TCP connection to the target host and port to check its connectivity and response time. This can be used to monitor TCP-based services such as web servers and database servers.
                                                                              • Other: Supports custom probe methods by configuring ConfigMap. For more information, refer to: Custom Probe Methods
                                                                            • Probe Target: The target address of the probe, supports domain names or IP addresses.
                                                                            • Labels: Custom labels that will be automatically added to Prometheus' labels.
                                                                            • Probe Interval: The interval between probes.
                                                                            • Probe Timeout: The maximum waiting time when probing the target.
                                                                          4. After configuring, click OK to complete the creation.

                                                                          Warning

                                                                          After the probe task is created, it takes about 3 minutes to synchronize the configuration. During this period, no probes will be performed, and probe results cannot be viewed.

                                                                          "},{"location":"en/admin/insight/infra/probe.html#view-monitoring-dashboards","title":"View Monitoring Dashboards","text":"

                                                                          Click \u2507 in the operations column and click View Monitoring Dashboard .

                                                                          Metric Name Description Current Status Response Represents the response status code of the HTTP probe request. Ping Status Indicates whether the probe request was successful. 1 indicates a successful probe request, and 0 indicates a failed probe request. IP Protocol Indicates the IP protocol version used in the probe request. SSL Expiry Represents the earliest expiration time of the SSL/TLS certificate. DNS Response (Latency) Represents the duration of the entire probe process in seconds. HTTP Duration Represents the duration of the entire process from sending the request to receiving the complete response."},{"location":"en/admin/insight/infra/probe.html#edit-a-probe","title":"Edit a Probe","text":"

                                                                          Click \u2507 in the operations column and click Edit .

                                                                          "},{"location":"en/admin/insight/infra/probe.html#delete-a-probe","title":"Delete a Probe","text":"

                                                                          Click \u2507 in the operations column and click Delete .

                                                                          "},{"location":"en/admin/insight/quickstart/install/index.html","title":"Start Observing","text":"

                                                                          AI platform platform enables the management and creation of multicloud and multiple clusters. Building upon this capability, Insight serves as a unified observability solution for multiple clusters. It collects observability data from multiple clusters by deploying the insight-agent plugin and allows querying of metrics, logs, and trace data through the AI platform Insight.

                                                                          insight-agent is a tool that facilitates the collection of observability data from multiple clusters. Once installed, it automatically collects metrics, logs, and trace data without any modifications.

                                                                          Clusters created through Container Management come pre-installed with insight-agent. Hence, this guide specifically provides instructions on enabling observability for integrated clusters.

                                                                          • Install insight-agent online

                                                                          As a unified observability platform for multiple clusters, Insight's resource consumption of certain components is closely related to the data of cluster creation and the number of integrated clusters. When installing insight-agent, it is necessary to adjust the resources of the proper components based on the cluster size.

                                                                          1. Adjust the CPU and memory resources of the Prometheus collection component in insight-agent according to the size of the cluster created or integrated. Please refer to Prometheus resource planning.

                                                                          2. As the metric data from multiple clusters is stored centrally, AI platform platform administrators need to adjust the disk space of vmstorage based on the cluster size. Please refer to vmstorage disk capacity planning.

                                                                          3. For instructions on adjusting the disk space of vmstorage, please refer to Expanding vmstorage disk.

                                                                          Since AI platform supports the management of multicloud and multiple clusters, insight-agent has undergone partial verification. However, there are known conflicts with monitoring components when installing insight-agent in Suanova 4.0 clusters and Openshift 4.x clusters. If you encounter similar issues, please refer to the following documents:

                                                                          • Install insight-agent in Suanova 4.0.x
                                                                          • Install insight-agent in Openshift 4.x

                                                                          Currently, the insight-agent collection component has undergone functional testing for popular versions of Kubernetes. Please refer to:

                                                                          • Kubernetes cluster compatibility testing
                                                                          • Openshift 4.x cluster compatibility testing
                                                                          • Rancher cluster compatibility testing
                                                                          "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html","title":"Enable Big Log and Big Trace Modes","text":"

                                                                          The Insight Module supports switching log to Big Log mode and trace to Big Trace mode, in order to enhance data writing capabilities in large-scale environments. This page introduces following methods for enabling these modes:

                                                                          • Enable or upgrade to Big Log and Big Trace modes through the installer (controlled by the same parameter value in manifest.yaml)
                                                                          • Manually enable Big Log and Big Trace modes through Helm commands
                                                                          "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#logs","title":"Logs","text":"

                                                                          This section explains the differences between the normal log mode and the Big Log mode.

                                                                          "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#log-mode","title":"Log Mode","text":"

                                                                          Components: Fluentbit + Elasticsearch

                                                                          This mode is referred to as the ES mode, and the data flow diagram is shown below:

                                                                          "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#big-log-mode","title":"Big Log Mode","text":"

                                                                          Components: Fluentbit + Kafka + Vector + Elasticsearch

                                                                          This mode is referred to as the Kafka mode, and the data flow diagram is shown below:

                                                                          "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#traces","title":"Traces","text":"

                                                                          This section explains the differences between the normal trace mode and the Big Trace mode.

                                                                          "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#trace-mode","title":"Trace Mode","text":"

                                                                          Components: Agent opentelemetry-collector + Global opentelemetry-collector + Jaeger-collector + Elasticsearch

                                                                          This mode is referred to as the OTlp mode, and the data flow diagram is shown below:

                                                                          "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#big-trace-mode","title":"Big Trace Mode","text":"

                                                                          Components: Agent opentelemetry-collector + Kafka + Global opentelemetry-collector + Jaeger-collector + Elasticsearch

                                                                          This mode is referred to as the Kafka mode, and the data flow diagram is shown below:

                                                                          "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#enabling-via-installer","title":"Enabling via Installer","text":"

                                                                          When deploying/upgrading AI platform using the installer, the manifest.yaml file includes the infrastructures.kafka field. To enable observable Big Log and Big Trace modes, Kafka must be activated:

                                                                          manifest.yaml
                                                                          apiVersion: manifest.daocloud.io/v1alpha1\nkind: SuanovaManifest\n...\ninfrastructures:\n  ...\n  kafka:\n    enable: true # Default is false\n    cpuLimit: 1\n    memLimit: 2Gi\n    pvcSize: 15Gi\n
                                                                          "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#enable","title":"Enable","text":"

                                                                          When using a manifest.yaml that enables kafka during installation, Kafka middleware will be installed by default, and Big Log and Big Trace modes will be enabled automatically. The installation command is:

                                                                          ./dce5-installer cluster-create -c clusterConfig.yaml -m manifest.yaml\n
                                                                          "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#upgrade","title":"Upgrade","text":"

                                                                          The upgrade also involves modifying the kafka field. However, note that since the old environment was installed with kafka: false, Kafka is not present in the environment. Therefore, you need to specify the upgrade for middleware to install Kafka middleware simultaneously. The upgrade command is:

                                                                          ./dce5-installer cluster-create -c clusterConfig.yaml -m manifest.yaml -u gproduct,middleware\n

                                                                          Note

                                                                          After the upgrade is complete, you need to manually restart the following components:

                                                                          • insight-agent-fluent-bit
                                                                          • insight-agent-opentelemetry-collector
                                                                          • insight-opentelemetry-collector
                                                                          "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#enabling-via-helm-commands","title":"Enabling via Helm Commands","text":"

                                                                          Prerequisites: Ensure that there is a usable Kafka and that the address is accessible.

                                                                          Use the following commands to retrieve the values of the old versions of Insight and insight-agent (it's recommended to back them up):

                                                                          helm get values insight -n insight-system -o yaml > insight.yaml\nhelm get values insight-agent -n insight-system -o yaml > insight-agent.yaml\n
                                                                          "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#enabling-big-log","title":"Enabling Big Log","text":"

                                                                          There are several ways to enable or upgrade to Big Log mode:

                                                                          Use --set in the helm upgrade commandModify YAML and run helm upgradeUpgrade via Container Management UI

                                                                          First, run the following Insight upgrade command, ensuring the Kafka brokers address is correct:

                                                                          helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --set global.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.kafka.enabled=true \\\n  --set vector.enabled=true \\\n  --version 0.30.1\n

                                                                          Then, run the following insight-agent upgrade command, ensuring the Kafka brokers address is correct:

                                                                          helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --set global.exporters.logging.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.exporters.logging.output=kafka \\\n  --version 0.30.1\n

                                                                          Follow these steps to modify the YAML and then run the helm upgrade command:

                                                                          1. Modify insight.yaml

                                                                            insight.yaml
                                                                            global:\n  ...\n  kafka:\n    brokers: 10.6.216.111:30592\n    enabled: true\n...\nvector:\n  enabled: true\n
                                                                          2. Upgrade the Insight component:

                                                                            helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --version 0.30.1\n
                                                                          3. Modify insight-agent.yaml

                                                                            insight-agent.yaml
                                                                            global:\n  ...\n  exporters:\n    ...\n    logging:\n      ...\n      kafka:\n        brokers: 10.6.216.111:30592\n      output: kafka\n
                                                                          4. Upgrade the insight-agent:

                                                                            helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --version 0.30.1\n

                                                                          In the Container Management module, find the cluster, select Helm Apps from the left navigation bar, and find and update the insight-agent.

                                                                          In Logging Settings, select kafka for output and fill in the correct brokers address.

                                                                          Note that after the upgrade is complete, you need to manually restart the insight-agent-fluent-bit component.

                                                                          "},{"location":"en/admin/insight/quickstart/install/big-log-and-trace.html#enabling-big-trace","title":"Enabling Big Trace","text":"

                                                                          There are several ways to enable or upgrade to Big Trace mode:

                                                                          Using --set in the helm upgrade commandModify YAML and run helm upgradeUpgrade via Container Management UI

                                                                          First, run the following Insight upgrade command, ensuring the Kafka brokers address is correct:

                                                                          helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --set global.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.kafka.enabled=true \\\n  --set global.tracing.kafkaReceiver.enabled=true \\\n  --version 0.30.1\n

                                                                          Then, run the following insight-agent upgrade command, ensuring the Kafka brokers address is correct:

                                                                          helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --set global.exporters.trace.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.exporters.trace.output=kafka \\\n  --version 0.30.1\n

                                                                          Follow these steps to modify the YAML and then run the helm upgrade command:

                                                                          1. Modify insight.yaml

                                                                            insight.yaml
                                                                            global:\n  ...\n  kafka:\n    brokers: 10.6.216.111:30592\n    enabled: true\n...\ntracing:\n  ...\n  kafkaReceiver:\n    enabled: true\n
                                                                          2. Upgrade the Insight component:

                                                                            helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --version 0.30.1\n
                                                                          3. Modify insight-agent.yaml

                                                                            insight-agent.yaml
                                                                            global:\n  ...\n  exporters:\n    ...\n    trace:\n      ...\n      kafka:\n        brokers: 10.6.216.111:30592\n      output: kafka\n
                                                                          4. Upgrade the insight-agent:

                                                                            helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --version 0.30.1\n

                                                                          In the Container Management module, find the cluster, select Helm Apps from the left navigation bar, and find and update the insight-agent.

                                                                          In Trace Settings, select kafka for output and fill in the correct brokers address.

                                                                          Note that after the upgrade is complete, you need to manually restart the insight-agent-opentelemetry-collector and insight-opentelemetry-collector components.

                                                                          "},{"location":"en/admin/insight/quickstart/install/component-scheduling.html","title":"Custom Insight Component Scheduling Policy","text":"

                                                                          When deploying Insight to a Kubernetes environment, proper resource management and optimization are crucial. Insight includes several core components such as Prometheus, OpenTelemetry, FluentBit, Vector, and Elasticsearch. These components, during their operation, may negatively impact the performance of other pods within the cluster due to resource consumption issues. To effectively manage resources and optimize cluster operations, node affinity becomes an important option.

                                                                          This page is about how to add taints and node affinity to ensure that each component runs on the appropriate nodes, avoiding resource competition or contention, thereby guranttee the stability and efficiency of the entire Kubernetes cluster.

                                                                          "},{"location":"en/admin/insight/quickstart/install/component-scheduling.html#configure-dedicated-nodes-for-insight-using-taints","title":"Configure dedicated nodes for Insight using taints","text":"

                                                                          Since the Insight Agent includes DaemonSet components, the configuration method described in this section is to have all components except the Insight DaemonSet run on dedicated nodes.

                                                                          This is achieved by adding taints to the dedicated nodes and using tolerations to match them. More details can be found in the Kubernetes official documentation.

                                                                          You can refer to the following commands to add and remove taints on nodes:

                                                                          # Add taint\nkubectl taint nodes worker1 node.daocloud.io=insight-only:NoSchedule\n\n# Remove taint\nkubectl taint nodes worker1 node.daocloud.io:NoSchedule-\n

                                                                          There are two ways to schedule Insight components to dedicated nodes:

                                                                          "},{"location":"en/admin/insight/quickstart/install/component-scheduling.html#1-add-tolerations-for-each-component","title":"1. Add tolerations for each component","text":"

                                                                          Configure the tolerations for the insight-server and insight-agent Charts respectively:

                                                                          insight-server Chartinsight-agent Chart
                                                                          server:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nui:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nrunbook:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\n# mysql:\nvictoria-metrics-k8s-stack:\n  victoria-metrics-operator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  vmcluster:\n    spec:\n      vmstorage:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n      vmselect:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n      vminsert:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n  vmalert:\n    spec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n  alertmanager:\n    spec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n\njaeger:\n  collector:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  query:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n\nopentelemetry-collector-aggregator:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nopentelemetry-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\ngrafana-operator:\n  operator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  grafana:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\nkibana:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nelastic-alert:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nvector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n
                                                                          kube-prometheus-stack:\n  prometheus:\n    prometheusSpec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n  prometheus-node-exporter:\n    tolerations:\n      - effect: NoSchedule\n        operator: Exists\n  prometheusOperator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n\nkube-state-metrics:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-operator:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\ntailing-sidecar-operator:\n  operator:\n    tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-kubernetes-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nprometheus-blackbox-exporter:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\netcd-exporter:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\" \n
                                                                          "},{"location":"en/admin/insight/quickstart/install/component-scheduling.html#2-configure-at-the-namespace-level","title":"2. Configure at the namespace level","text":"

                                                                          Allow pods in the insight-system namespace to tolerate the node.daocloud.io=insight-only taint.

                                                                          1. Adjust the apiserver configuration file /etc/kubernetes/manifests/kube-apiserver.yaml to include PodTolerationRestriction,PodNodeSelector. See the following picture:

                                                                          2. Add an annotation to the insight-system namespace:

                                                                            apiVersion: v1\nkind: Namespace\nmetadata:\n  name: insight-system\n  annotations:\n    scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Equal\", \"effect\": \"NoSchedule\", \"key\": \"node.daocloud.io\", \"value\": \"insight-only\"}]'\n

                                                                          Restart the components under the insight-system namespace to allow normal scheduling of pods under the insight-system.

                                                                          "},{"location":"en/admin/insight/quickstart/install/component-scheduling.html#use-node-labels-and-node-affinity-to-manage-component-scheduling","title":"Use node labels and node affinity to manage component scheduling","text":"

                                                                          Info

                                                                          Node affinity is conceptually similar to nodeSelector, allowing you to constrain which nodes a pod can be scheduled on based on labels on the nodes. There are two types of node affinity:

                                                                          1. requiredDuringSchedulingIgnoredDuringExecution: The scheduler will only schedule the pod if the rules are met. This feature is similar to nodeSelector but has more expressive syntax.
                                                                          2. preferredDuringSchedulingIgnoredDuringExecution: The scheduler will try to find nodes that meet the rules. If no matching nodes are found, the scheduler will still schedule the Pod.

                                                                          For more details, please refer to the Kubernetes official documentation.

                                                                          To meet different user needs for scheduling Insight components, Insight provides fine-grained labels for different components' scheduling policies. Below is a description of the labels and their associated components:

                                                                          Label Key Label Value Description node.daocloud.io/insight-any Any value, recommended to use true Represents that all Insight components prefer nodes with this label node.daocloud.io/insight-prometheus Any value, recommended to use true Specifically for Prometheus components node.daocloud.io/insight-vmstorage Any value, recommended to use true Specifically for VictoriaMetrics vmstorage components node.daocloud.io/insight-vector Any value, recommended to use true Specifically for Vector components node.daocloud.io/insight-otel-col Any value, recommended to use true Specifically for OpenTelemetry components

                                                                          You can refer to the following commands to add and remove labels on nodes:

                                                                          # Add label to node8, prioritizing scheduling insight-prometheus to node8 \nkubectl label nodes node8 node.daocloud.io/insight-prometheus=true\n\n# Remove the node.daocloud.io/insight-prometheus label from node8\nkubectl label nodes node8 node.daocloud.io/insight-prometheus-\n

                                                                          Below is the default affinity preference for the insight-prometheus component during deployment:

                                                                          affinity:\n  nodeAffinity:\n    preferredDuringSchedulingIgnoredDuringExecution:\n    - preference:\n        matchExpressions:\n        - key: node-role.kubernetes.io/control-plane\n          operator: DoesNotExist\n      weight: 1\n    - preference:\n        matchExpressions:\n        - key: node.daocloud.io/insight-prometheus # (1)!\n          operator: Exists\n      weight: 2\n    - preference:\n        matchExpressions:\n        - key: node.daocloud.io/insight-any\n          operator: Exists\n      weight: 3\n    podAntiAffinity:\n      preferredDuringSchedulingIgnoredDuringExecution:\n        - weight: 1\n          podAffinityTerm:\n            topologyKey: kubernetes.io/hostname\n            labelSelector:\n              matchExpressions:\n                - key: app.kubernetes.io/instance\n                  operator: In\n                  values:\n                    - insight-agent-kube-prometh-prometheus\n
                                                                          1. Prioritize scheduling insight-prometheus to nodes with the node.daocloud.io/insight-prometheus label
                                                                          "},{"location":"en/admin/insight/quickstart/install/gethosturl.html","title":"Get Data Storage Address of Global Service Cluster","text":"

                                                                          Insight is a product for unified observation of multiple clusters. To achieve unified storage and querying of observation data from multiple clusters, sub-clusters need to report the collected observation data to the global service cluster for unified storage. This document provides the required address of the storage component when installing the collection component insight-agent.

                                                                          "},{"location":"en/admin/insight/quickstart/install/gethosturl.html#install-insight-agent-in-global-service-cluster","title":"Install insight-agent in Global Service Cluster","text":"

                                                                          If installing insight-agent in the global service cluster, it is recommended to access the cluster via domain name:

                                                                          export vminsert_host=\"vminsert-insight-victoria-metrics-k8s-stack.insight-system.svc.cluster.local\" # (1)!\nexport es_host=\"insight-es-master.insight-system.svc.cluster.local\" # (2)!\nexport otel_col_host=\"insight-opentelemetry-collector.insight-system.svc.cluster.local\" # (3)!\n
                                                                          "},{"location":"en/admin/insight/quickstart/install/gethosturl.html#install-insight-agent-in-other-clusters","title":"Install insight-agent in Other Clusters","text":""},{"location":"en/admin/insight/quickstart/install/gethosturl.html#get-address-via-interface-provided-by-insight-server","title":"Get Address via Interface Provided by Insight Server","text":"
                                                                          1. The management cluster uses the default LoadBalancer mode for exposure.

                                                                            Log in to the console of the global service cluster and run the following command:

                                                                            export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam'\n

                                                                            Note

                                                                            Please replace the ${INSIGHT_SERVER_IP} parameter in the command.

                                                                            You will get the following response:

                                                                            {\n  \"values\": {\n    \"global\": {\n      \"exporters\": {\n        \"logging\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"metric\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"auditLog\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"trace\": {\n          \"host\": \"10.6.182.32\"\n        }\n      }\n    },\n    \"opentelemetry-operator\": {\n      \"enabled\": true\n    },\n    \"opentelemetry-collector\": {\n      \"enabled\": true\n    }\n  }\n}\n
                                                                            • global.exporters.logging.host is the log service address, no need to set the proper service port, the default value will be used.
                                                                            • global.exporters.metric.host is the metrics service address.
                                                                            • global.exporters.trace.host is the trace service address.
                                                                            • global.exporters.auditLog.host is the audit log service address (same service as trace but different port).
                                                                          2. Management cluster disables LoadBalancer

                                                                            When calling the interface, you need to additionally pass an externally accessible node IP from the cluster, which will be used to construct the complete access address of the proper service.

                                                                            export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam' --data '{\"extra\": {\"EXPORTER_EXTERNAL_IP\": \"10.5.14.51\"}}'\n

                                                                            You will get the following response:

                                                                            {\n  \"values\": {\n    \"global\": {\n      \"exporters\": {\n        \"logging\": {\n          \"scheme\": \"https\",\n          \"host\": \"10.5.14.51\",\n          \"port\": 32007,\n          \"user\": \"elastic\",\n          \"password\": \"j8V1oVoM1184HvQ1F3C8Pom2\"\n        },\n        \"metric\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30683\n        },\n        \"auditLog\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30884\n        },\n        \"trace\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30274\n        }\n      }\n    },\n    \"opentelemetry-operator\": {\n      \"enabled\": true\n    },\n    \"opentelemetry-collector\": {\n      \"enabled\": true\n    }\n  }\n}\n
                                                                            • global.exporters.logging.host is the log service address.
                                                                            • global.exporters.logging.port is the NodePort exposed by the log service.
                                                                            • global.exporters.metric.host is the metrics service address.
                                                                            • global.exporters.metric.port is the NodePort exposed by the metrics service.
                                                                            • global.exporters.trace.host is the trace service address.
                                                                            • global.exporters.trace.port is the NodePort exposed by the trace service.
                                                                            • global.exporters.auditLog.host is the audit log service address (same service as trace but different port).
                                                                            • global.exporters.auditLog.port is the NodePort exposed by the audit log service.
                                                                          "},{"location":"en/admin/insight/quickstart/install/gethosturl.html#connect-via-loadbalancer","title":"Connect via LoadBalancer","text":"
                                                                          1. If LoadBalancer is enabled in the cluster and a VIP is set for Insight, you can manually execute the following command to obtain the address information for vminsert and opentelemetry-collector:

                                                                            $ kubectl get service -n insight-system | grep lb\nlb-insight-opentelemetry-collector               LoadBalancer   10.233.23.12    <pending>     4317:31286/TCP,8006:31351/TCP  24d\nlb-vminsert-insight-victoria-metrics-k8s-stack   LoadBalancer   10.233.63.67    <pending>     8480:31629/TCP                 24d\n
                                                                            • lb-vminsert-insight-victoria-metrics-k8s-stack is the address for the metrics service.
                                                                            • lb-insight-opentelemetry-collector is the address for the tracing service.
                                                                          2. Execute the following command to obtain the address information for elasticsearch:

                                                                            $ kubectl get service -n mcamel-system | grep es\nmcamel-common-es-cluster-masters-es-http               NodePort    10.233.16.120   <none>        9200:30465/TCP               47d\n

                                                                            mcamel-common-es-cluster-masters-es-http is the address for the logging service.

                                                                          "},{"location":"en/admin/insight/quickstart/install/gethosturl.html#connect-via-nodeport","title":"Connect via NodePort","text":"

                                                                          The LoadBalancer feature is disabled in the global service cluster.

                                                                          In this case, the LoadBalancer resources mentioned above will not be created by default. The relevant service names are:

                                                                          • vminsert-insight-victoria-metrics-k8s-stack (metrics service)
                                                                          • common-es (logging service)
                                                                          • insight-opentelemetry-collector (tracing service)

                                                                          After obtaining the proper port information for the services in the above two scenarios, make the following settings:

                                                                          --set global.exporters.logging.host=  # (1)!\n--set global.exporters.logging.port=  # (2)!\n--set global.exporters.metric.host=   # (3)!\n--set global.exporters.metric.port=   # (4)!\n--set global.exporters.trace.host=    # (5)!\n--set global.exporters.trace.port=    # (6)!\n--set global.exporters.auditLog.host= # (7)!\n
                                                                          1. NodeIP of the externally accessible management cluster
                                                                          2. NodePort of the logging service port 9200
                                                                          3. NodeIP of the externally accessible management cluster
                                                                          4. NodePort of the metrics service port 8480
                                                                          5. NodeIP of the externally accessible management cluster
                                                                          6. NodePort of the tracing service port 4317
                                                                          7. NodeIP of the externally accessible management cluster
                                                                          "},{"location":"en/admin/insight/quickstart/install/install-agent.html","title":"Install insight-agent","text":"

                                                                          insight-agent is a plugin for collecting insight data, supporting unified observation of metrics, links, and log data. This article describes how to install insight-agent in an online environment for the accessed cluster.

                                                                          "},{"location":"en/admin/insight/quickstart/install/install-agent.html#prerequisites","title":"Prerequisites","text":"

                                                                          Please confirm that your cluster has successfully connected to the container management platform. You can refer to Integrate Clusters for details.

                                                                          "},{"location":"en/admin/insight/quickstart/install/install-agent.html#steps","title":"Steps","text":"
                                                                          1. Enter Container Management from the left navigation bar, and enter Clusters . Find the cluster where you want to install insight-agent.

                                                                          2. Choose Install now to jump, or click the cluster and click Helm Apps -> Helm Templates in the left navigation bar, search for insight-agent in the search box, and click it for details.

                                                                          3. Select the appropriate version and click Install .

                                                                          4. Fill in the name, select the namespace and version, and fill in the addresses of logging, metric, audit, and trace reporting data in the yaml file. The system has filled in the address of the component for data reporting by default, please check it before clicking OK to install.

                                                                            If you need to modify the data reporting address, please refer to Get Data Reporting Address.

                                                                          5. The system will automatically return to Helm Apps . When the application status changes from Unknown to Deployed , it means that insight-agent is installed successfully.

                                                                            Note

                                                                            • Click \u2507 on the far right, and you can perform more operations such as Update , View YAML and Delete in the pop-up menu.
                                                                            • For a practical installation demo, watch Video demo of installing insight-agent
                                                                          "},{"location":"en/admin/insight/quickstart/install/knownissues.html","title":"Known Issues","text":"

                                                                          This page lists some issues related to the installation and uninstallation of Insight Agent and their workarounds.

                                                                          "},{"location":"en/admin/insight/quickstart/install/knownissues.html#uninstallation-failure-of-insight-agent","title":"Uninstallation Failure of Insight Agent","text":"

                                                                          When you run the following command to uninstall Insight Agent,

                                                                          helm uninstall insight agent\n

                                                                          The tls secret used by otel-operator is failed to uninstall.

                                                                          Due to the logic of \"reusing tls secret\" in the following code of otel-operator, it checks whether MutationConfiguration exists and reuses the CA cert bound in MutationConfiguration. However, since helm uninstall has uninstalled MutationConfiguration, it results in a null value.

                                                                          Therefore, please manually delete the proper secret using one of the following methods:

                                                                          • Delete via command line: Log in to the console of the target cluster and run the following command:

                                                                            kubectl -n insight-system delete secret insight-agent-opentelemetry-operator-controller-manager-service-cert\n
                                                                          • Delete via UI: Log in to AI platform container management, select the target cluster, select Secret from the left menu, input insight-agent-opentelemetry-operator-controller-manager-service-cert, then select Delete.

                                                                          "},{"location":"en/admin/insight/quickstart/install/knownissues.html#insight-agent_1","title":"Insight Agent","text":""},{"location":"en/admin/insight/quickstart/install/knownissues.html#log-collection-endpoint-not-updated-when-upgrading-insight-agent","title":"Log Collection Endpoint Not Updated When Upgrading Insight Agent","text":"

                                                                          When updating the log configuration of the insight-agent from Elasticsearch to Kafka or from Kafka to Elasticsearch, the changes do not take effect and the agent continues to use the previous configuration.

                                                                          Solution :

                                                                          Manually restart Fluent Bit in the cluster.

                                                                          "},{"location":"en/admin/insight/quickstart/install/knownissues.html#podmonitor-collects-multiple-sets-of-jvm-metrics","title":"PodMonitor Collects Multiple Sets of JVM Metrics","text":"
                                                                          1. In this version, there is a defect in PodMonitor/insight-kubernetes-pod: it will incorrectly create Jobs to collect metrics for all containers in Pods that are marked with insight.opentelemetry.io/metric-scrape=true, instead of only the containers proper to insight.opentelemetry.io/metric-port.

                                                                          2. After PodMonitor is declared, PrometheusOperator will pre-configure some service discovery configurations. Considering the compatibility of CRDs, it is abandoned to configure the collection tasks through annotations.

                                                                          3. Use the additional scrape config mechanism provided by Prometheus to configure the service discovery rules in a secret and introduce them into Prometheus.

                                                                          Therefore:

                                                                          1. Delete the current PodMonitor for insight-kubernetes-pod
                                                                          2. Use a new rule

                                                                          In the new rule, action: keepequal is used to compare the consistency between source_labels and target_label to determine whether to create collection tasks for the ports of a container. Note that this feature is only available in Prometheus v2.41.0 (2022-12-20) and higher.

                                                                          +    - source_labels: [__meta_kubernetes_pod_annotation_insight_opentelemetry_io_metric_port]\n+      separator: ;\n+      target_label: __meta_kubernetes_pod_container_port_number\n+      action: keepequal\n
                                                                          "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html","title":"Upgrade Notes","text":"

                                                                          This page provides some considerations for upgrading insight-server and insight-agent.

                                                                          "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v028x-or-lower-to-v029x","title":"Upgrade from v0.28.x (or lower) to v0.29.x","text":"

                                                                          Due to the upgrade of the Opentelemetry community operator chart version in v0.29.0, the supported values for featureGates in the values file have changed. Therefore, before upgrading, you need to set the value of featureGates to empty, as follows:

                                                                          -  --set opentelemetry-operator.manager.featureGates=\"+operator.autoinstrumentation.go,+operator.autoinstrumentation.multi-instrumentation,+operator.autoinstrumentation.nginx\" \\\n+  --set opentelemetry-operator.manager.featureGates=\"\"\n
                                                                          "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v026x-or-lower-to-v027x-or-higher","title":"Upgrade from v0.26.x (or lower) to v0.27.x or higher","text":"

                                                                          In v0.27.x, the switch for the vector component has been separated. If the existing environment has vector enabled, you need to specify --set vector.enabled=true when upgrading the insight-server.

                                                                          "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v019x-or-lower-to-020x","title":"Upgrade from v0.19.x (or lower) to 0.20.x","text":"

                                                                          Before upgrading Insight , you need to manually delete the jaeger-collector and jaeger-query deployments by running the following command:

                                                                          kubectl -n insight-system delete deployment insight-jaeger-collector\nkubectl -n insight-system delete deployment insight-jaeger-query\n
                                                                          "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v017x-or-lower-to-v018x","title":"Upgrade from v0.17.x (or lower) to v0.18.x","text":"

                                                                          In v0.18.x, there have been updates to the Jaeger-related deployment files, so you need to manually run the following commands before upgrading insight-server:

                                                                          kubectl -n insight-system delete deployment insight-jaeger-collector\nkubectl -n insight-system delete deployment insight-jaeger-query\n

                                                                          There have been changes to metric names in v0.18.x, so after upgrading insight-server, insight-agent should also be upgraded.

                                                                          In addition, the parameters for enabling the tracing module and adjusting the ElasticSearch connection have been modified. Refer to the following parameters:

                                                                          +  --set global.tracing.enable=true \\\n-  --set jaeger.collector.enabled=true \\\n-  --set jaeger.query.enabled=true \\\n+  --set global.elasticsearch.scheme=${your-external-elasticsearch-scheme} \\\n+  --set global.elasticsearch.host=${your-external-elasticsearch-host} \\\n+  --set global.elasticsearch.port=${your-external-elasticsearch-port} \\\n+  --set global.elasticsearch.user=${your-external-elasticsearch-username} \\\n+  --set global.elasticsearch.password=${your-external-elasticsearch-password} \\\n-  --set jaeger.storage.elasticsearch.scheme=${your-external-elasticsearch-scheme} \\\n-  --set jaeger.storage.elasticsearch.host=${your-external-elasticsearch-host} \\\n-  --set jaeger.storage.elasticsearch.port=${your-external-elasticsearch-port} \\\n-  --set jaeger.storage.elasticsearch.user=${your-external-elasticsearch-username} \\\n-  --set jaeger.storage.elasticsearch.password=${your-external-elasticsearch-password} \\\n
                                                                          "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v015x-or-lower-to-v016x","title":"Upgrade from v0.15.x (or lower) to v0.16.x","text":"

                                                                          In v0.16.x, a new feature parameter disableRouteContinueEnforce in the vmalertmanagers CRD is used. Therefore, you need to manually run the following command before upgrading insight-server:

                                                                          kubectl apply --server-side -f https://raw.githubusercontent.com/VictoriaMetrics/operator/v0.33.0/config/crd/bases/operator.victoriametrics.com_vmalertmanagers.yaml --force-conflicts\n

                                                                          Note

                                                                          If you are performing an offline installation, after extracting the insight offline package, please run the following command to update CRDs.

                                                                          kubectl apply --server-side -f insight/dependency-crds --force-conflicts \n
                                                                          "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v023x-or-lower-to-v024x","title":"Upgrade from v0.23.x (or lower) to v0.24.x","text":"

                                                                          In v0.24.x, CRDs have been added to the OTEL operator chart. However, helm upgrade does not update CRDs, so you need to manually run the following command:

                                                                          kubectl apply -f https://raw.githubusercontent.com/open-telemetry/opentelemetry-helm-charts/main/charts/opentelemetry-operator/crds/crd-opentelemetry.io_opampbridges.yaml\n

                                                                          If you are performing an offline installation, you can find the above CRD yaml file after extracting the insight-agent offline package. After extracting the insight-agent Chart, manually run the following command:

                                                                          kubectl apply -f charts/agent/crds/crd-opentelemetry.io_opampbridges.yaml\n
                                                                          "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v019x-or-lower-to-v020x","title":"Upgrade from v0.19.x (or lower) to v0.20.x","text":"

                                                                          In v0.20.x, Kafka log export configuration has been added, and there have been some adjustments to the log export configuration. Before upgrading insight-agent , please note the parameter changes. The previous logging configuration has been moved to the logging.elasticsearch configuration:

                                                                          -  --set global.exporters.logging.host \\\n-  --set global.exporters.logging.port \\\n+  --set global.exporters.logging.elasticsearch.host \\\n+  --set global.exporters.logging.elasticsearch.port \\\n
                                                                          "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v017x-or-lower-to-v018x_1","title":"Upgrade from v0.17.x (or lower) to v0.18.x","text":"

                                                                          Due to the updated deployment files for Jaeger In v0.18.x, it is important to note the changes in parameters before upgrading the insight-agent.

                                                                          +  --set global.exporters.trace.enable=true \\\n-  --set opentelemetry-collector.enabled=true \\\n-  --set opentelemetry-operator.enabled=true \\\n
                                                                          "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v016x-or-lower-to-v017x","title":"Upgrade from v0.16.x (or lower) to v0.17.x","text":"

                                                                          In v0.17.x, the kube-prometheus-stack chart version was upgraded from 41.9.1 to 45.28.1, and there were also some field upgrades in the CRD used, such as the attachMetadata field of servicemonitor. Therefore, the following command needs to be rund before upgrading the insight-agent:

                                                                          kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --force-conflicts\n

                                                                          If you are performing an offline installation, you can find the yaml for the above CRD in insight-agent/dependency-crds after extracting the insight-agent offline package.

                                                                          "},{"location":"en/admin/insight/quickstart/install/upgrade-note.html#upgrade-from-v011x-or-earlier-to-v012x","title":"Upgrade from v0.11.x (or earlier) to v0.12.x","text":"

                                                                          v0.12.x upgrades kube-prometheus-stack chart from 39.6.0 to 41.9.1, including prometheus-operator to v0.60.1, prometheus-node-exporter chart to v4.3.0. Prometheus-node-exporter uses Kubernetes recommended label after upgrading, so you need to delete node-exporter daemonset. prometheus-operator has updated the CRD, so you need to run the following command before upgrading the insight-agent:

                                                                          kubectl delete daemonset insight-agent-prometheus-node-exporter -n insight-system\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml --force- conflicts\n

                                                                          Note

                                                                          If you are installing offline, you can run the following command to update the CRD after decompressing the insight-agent offline package.

                                                                          kubectl apply --server-side -f insight-agent/dependency-crds --force-conflicts\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/operator.html","title":"Enhance Applications Non-Intrusively with Operators","text":"

                                                                          Currently, only Java, Node.js, Python, .NET, and Golang support non-intrusive integration through the Operator approach.

                                                                          "},{"location":"en/admin/insight/quickstart/otel/operator.html#prerequisites","title":"Prerequisites","text":"

                                                                          Please ensure that the insight-agent is ready. If not, please refer to Install insight-agent for data collection and make sure the following three items are ready:

                                                                          • Enable trace functionality for insight-agent
                                                                          • Check if the address and port for trace data are correctly filled
                                                                          • Ensure that the Pods proper to deployment/insight-agent-opentelemetry-operator and deployment/insight-agent-opentelemetry-collector are ready
                                                                          "},{"location":"en/admin/insight/quickstart/otel/operator.html#install-instrumentation-cr","title":"Install Instrumentation CR","text":"

                                                                          Tip

                                                                          Starting from Insight v0.22.0, there is no longer a need to manually install the Instrumentation CR.

                                                                          Install it in the insight-system namespace. There are some minor differences between different versions.

                                                                          Insight v0.21.xInsight v0.20.xInsight v0.18.xInsight v0.17.xInsight v0.16.x
                                                                          K8S_CLUSTER_UID=$(kubectl get namespace kube-system -o jsonpath='{.metadata.uid}')\nkubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/openinsight-proj/autoinstrumentation-java:1.31.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n      - name: OTEL_K8S_CLUSTER_UID\n        value: $K8S_CLUSTER_UID\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                                                                          kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.29.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0-rc.2\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                                                                          kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.25.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.37.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.38b0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.1-alpha\nEOF\n
                                                                          kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.23.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.34.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.33b0\nEOF\n
                                                                          kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.23.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.34.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.33b0\nEOF\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/operator.html#works-with-the-service-mesh-product-mspider","title":"Works with the Service Mesh Product (Mspider)","text":"

                                                                          If you enable the tracing capability of the Mspider(Service Mesh), you need to add an additional environment variable injection configuration:

                                                                          "},{"location":"en/admin/insight/quickstart/otel/operator.html#the-operation-steps-are-as-follows","title":"The operation steps are as follows","text":"
                                                                          1. Log in to AI platform, then enter Container Management and select the target cluster.
                                                                          2. Click CRDs in the left navigation bar, find instrumentations.opentelemetry.io, and enter the details page.
                                                                          3. Select the insight-system namespace, then edit insight-opentelemetry-autoinstrumentation, and add the following content under spec:env::

                                                                                - name: OTEL_SERVICE_NAME\n      valueFrom:\n        fieldRef:\n          fieldPath: metadata.labels['app'] \n

                                                                            The complete example (for Insight v0.21.x) is as follows:

                                                                            K8S_CLUSTER_UID=$(kubectl get namespace kube-system -o jsonpath='{.metadata.uid}')\nkubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n    - name: OTEL_SERVICE_NAME\n      valueFrom:\n        fieldRef:\n          fieldPath: metadata.labels['app'] \n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/openinsight-proj/autoinstrumentation-java:1.31.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n      - name: OTEL_K8S_CLUSTER_UID\n        value: $K8S_CLUSTER_UID\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/operator.html#add-annotations-to-automatically-access-traces","title":"Add annotations to automatically access traces","text":"

                                                                          After the above is ready, you can access traces for the application through annotations (Annotation). Otel currently supports accessing traces through annotations. Depending on the service language, different pod annotations need to be added. Each service can add one of two types of annotations:

                                                                          • Only inject environment variable annotations

                                                                            There is only one such annotation, which is used to add otel-related environment variables, such as link reporting address, cluster id where the container is located, and namespace (this annotation is very useful when the application does not support automatic probe language)

                                                                            instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                                                            The value is divided into two parts by /, the first value (insight-system) is the namespace of the CR installed in the previous step, and the second value (insight-opentelemetry-autoinstrumentation) is the name of the CR.

                                                                          • Automatic probe injection and environment variable injection annotations

                                                                            There are currently 4 such annotations, proper to 4 different programming languages: java, nodejs, python, dotnet. After using it, automatic probes and otel default environment variables will be injected into the first container under spec.pod:

                                                                            Java applicationNodeJs applicationPython applicationDotnet applicationGolang application
                                                                            instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                                                            instrumentation.opentelemetry.io/inject-nodejs: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                                                            instrumentation.opentelemetry.io/inject-python: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                                                            instrumentation.opentelemetry.io/inject-dotnet: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                                                            Since Go's automatic detection requires the setting of OTEL_GO_AUTO_TARGET_EXE, you must provide a valid executable path through annotations or Instrumentation resources. Failure to set this value will result in the termination of Go's automatic detection injection, leading to a failure in the connection trace.

                                                                            instrumentation.opentelemetry.io/inject-go: \"insight-system/insight-opentelemetry-autoinstrumentation\"\ninstrumentation.opentelemetry.io/otel-go-auto-target-exe: \"/path/to/container/executable\"\n

                                                                            Go's automatic detection also requires elevated permissions. The following permissions are automatically set and are necessary.

                                                                            securityContext:\n  privileged: true\n  runAsUser: 0\n

                                                                          Tip

                                                                          The OpenTelemetry Operator automatically adds some OTel-related environment variables when injecting probes and also supports overriding these variables. The priority order for overriding these environment variables is as follows:

                                                                          original container env vars -> language specific env vars -> common env vars -> instrument spec configs' vars\n

                                                                          However, it is important to avoid manually overriding OTEL_RESOURCE_ATTRIBUTES_NODE_NAME . This variable serves as an identifier within the operator to determine if a pod has already been injected with a probe. Manually adding this variable may prevent the probe from being injected successfully.

                                                                          "},{"location":"en/admin/insight/quickstart/otel/operator.html#automatic-injection-demo","title":"Automatic injection Demo","text":"

                                                                          Note that the annotation is added under spec.annotations.

                                                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-app\n  labels:\n    app: my-app\nspec:\n  selector:\n    matchLabels:\n      app: my-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-app\n      annotations:\n        instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n    spec:\n      containers:\n      - name: myapp\n        image: jaegertracing/vertx-create-span:operator-e2e-tests\n        ports:\n          - containerPort: 8080\n            protocol: TCP\n

                                                                          The final generated YAML is as follows:

                                                                          apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-deployment-with-sidecar-565bd877dd-nqkk6\n  generateName: my-deployment-with-sidecar-565bd877dd-\n  namespace: default\n  uid: aa89ca0d-620c-4d20-8bc1-37d67bad4ea4\n  resourceVersion: '2668986'\n  creationTimestamp: '2022-04-08T05:58:48Z'\n  labels:\n    app: my-pod-with-sidecar\n    pod-template-hash: 565bd877dd\n  annotations:\n    cni.projectcalico.org/containerID: 234eae5e55ea53db2a4bc2c0384b9a1021ed3908f82a675e4a92a49a7e80dd61\n    cni.projectcalico.org/podIP: 192.168.134.133/32\n    cni.projectcalico.org/podIPs: 192.168.134.133/32\n    instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\nspec:\n  volumes:\n    - name: kube-api-access-sp2mz\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n    - name: opentelemetry-auto-instrumentation\n      emptyDir: {}\n  initContainers:\n    - name: opentelemetry-auto-instrumentation\n      image: >-\n        ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java\n      command:\n        - cp\n        - /javaagent.jar\n        - /otel-auto-instrumentation/javaagent.jar\n      resources: {}\n      volumeMounts:\n        - name: opentelemetry-auto-instrumentation\n          mountPath: /otel-auto-instrumentation\n        - name: kube-api-access-sp2mz\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  containers:\n    - name: myapp\n      image: ghcr.io/pavolloffay/spring-petclinic:latest\n      env:\n        - name: OTEL_JAVAAGENT_DEBUG\n          value: 'true'\n        - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n          value: 'true'\n        - name: SPLUNK_PROFILER_ENABLED\n          value: 'false'\n        - name: JAVA_TOOL_OPTIONS\n          value: ' -javaagent:/otel-auto-instrumentation/javaagent.jar'\n        - name: OTEL_TRACES_EXPORTER\n          value: otlp\n        - name: OTEL_EXPORTER_OTLP_ENDPOINT\n          value: http://insight-agent-opentelemetry-collector.svc.cluster.local:4317\n        - name: OTEL_EXPORTER_OTLP_TIMEOUT\n          value: '20'\n        - name: OTEL_TRACES_SAMPLER\n          value: parentbased_traceidratio\n        - name: OTEL_TRACES_SAMPLER_ARG\n          value: '0.85'\n        - name: SPLUNK_TRACE_RESPONSE_HEADER_ENABLED\n          value: 'true'\n        - name: OTEL_SERVICE_NAME\n          value: my-deployment-with-sidecar\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.name\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_UID\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.uid\n        - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: spec.nodeName\n        - name: OTEL_RESOURCE_ATTRIBUTES\n          value: >-\n            k8s.container.name=myapp,k8s.deployment.name=my-deployment-with-sidecar,k8s.deployment.uid=8de6929d-dda0-436c-bca1-604e9ca7ea4e,k8s.namespace.name=default,k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME),k8s.pod.uid=$(OTEL_RESOURCE_ATTRIBUTES_POD_UID),k8s.replicaset.name=my-deployment-with-sidecar-565bd877dd,k8s.replicaset.uid=190d5f6e-ba7f-4794-b2e6-390b5879a6c4\n        - name: OTEL_PROPAGATORS\n          value: jaeger,b3\n      resources: {}\n      volumeMounts:\n        - name: kube-api-access-sp2mz\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n        - name: opentelemetry-auto-instrumentation\n          mountPath: /otel-auto-instrumentation\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  restartPolicy: Always\n  terminationGracePeriodSeconds: 30\n  dnsPolicy: ClusterFirst\n  serviceAccountName: default\n  serviceAccount: default\n  nodeName: k8s-master3\n  securityContext:\n    runAsUser: 1000\n    runAsGroup: 3000\n    fsGroup: 2000\n  schedulerName: default-scheduler\n  tolerations:\n    - key: node.kubernetes.io/not-ready\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n    - key: node.kubernetes.io/unreachable\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n  priority: 0\n  enableServiceLinks: true\n  preemptionPolicy: PreemptLowerPriority\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/operator.html#trace-query","title":"Trace query","text":"

                                                                          How to query the connected services, refer to Trace Query.

                                                                          "},{"location":"en/admin/insight/quickstart/otel/otel.html","title":"Use OTel to provide the application observability","text":"

                                                                          Enhancement is the process of enabling application code to generate telemetry data. i.e. something that helps you monitor or measure the performance and status of your application.

                                                                          OpenTelemetry is a leading open source project providing instrumentation libraries for major programming languages \u200b\u200band popular frameworks. It is a project under the Cloud Native Computing Foundation and is supported by the vast resources of the community. It provides a standardized data format for collected data without the need to integrate specific vendors.

                                                                          Insight supports OpenTelemetry for application instrumentation to enhance your applications.

                                                                          This guide introduces the basic concepts of telemetry enhancement using OpenTelemetry. OpenTelemetry also has an ecosystem of libraries, plugins, integrations, and other useful tools to extend it. You can find these resources at the OTel Registry.

                                                                          You can use any open standard library for telemetry enhancement and use Insight as an observability backend to ingest, analyze, and visualize data.

                                                                          To enhance your code, you can use the enhanced operations provided by OpenTelemetry for specific languages:

                                                                          Insight currently provides an easy way to enhance .Net NodeJS, Java, Python and Golang applications with OpenTelemetry. Please follow the guidelines below.

                                                                          "},{"location":"en/admin/insight/quickstart/otel/otel.html#trace-enhancement","title":"Trace Enhancement","text":"
                                                                          • Best practices for integrate trace: Application Non-Intrusive Enhancement via Operator
                                                                          • Manual instrumentation with Go language as an example: Enhance Go application with OpenTelemetry SDK
                                                                          • Using ebpf to implement non-intrusive auto-instrumetation in Go language (experimental feature)
                                                                          "},{"location":"en/admin/insight/quickstart/otel/send_tracing_to_insight.html","title":"Sending Trace Data to Insight","text":"

                                                                          This document describes how customers can send trace data to Insight on their own. It mainly includes the following two scenarios:

                                                                          1. Customer apps report traces to Insight through OTEL Agent/SDK
                                                                          2. Forwarding traces to Insight through Opentelemetry Collector (OTEL COL)

                                                                          In each cluster where Insight Agent is installed, there is an insight-agent-otel-col component that is used to receive trace data from that cluster. Therefore, this component serves as the entry point for user access and needs to obtain its address first. You can get the address of the Opentelemetry Collector in the cluster through the AI platform interface, such as insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 :

                                                                          In addition, there are some slight differences for different reporting methods:

                                                                          "},{"location":"en/admin/insight/quickstart/otel/send_tracing_to_insight.html#customer-apps-report-traces-to-insight-through-otel-agentsdk","title":"Customer apps report traces to Insight through OTEL Agent/SDK","text":"

                                                                          To successfully report trace data to Insight and display it properly, it is recommended to provide the required metadata (Resource Attributes) for OTLP through the following environment variables. There are two ways to achieve this:

                                                                          • Manually add them to the deployment YAML file, for example:

                                                                            ...\n- name: OTEL_EXPORTER_OTLP_ENDPOINT\n  value: \"http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\"\n- name: \"OTEL_SERVICE_NAME\"\n  value: my-java-app-name\n- name: \"OTEL_K8S_NAMESPACE\"\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: metadata.namespace\n- name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: spec.nodeName\n- name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: metadata.name\n- name: OTEL_RESOURCE_ATTRIBUTES\n  value: \"k8s.namespace.name=$(OTEL_K8S_NAMESPACE),k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME)\"\n
                                                                          • Use the automatic injection capability of Insight Agent to inject the metadata (Resource Attributes)

                                                                            Ensure that Insight Agent is working properly and after installing the Instrumentation CR, you only need to add the following annotation to the Pod:

                                                                            instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                                                            For example:

                                                                            apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-with-aotu-instrumentation\nspec:\n  selector:\n    matchLabels:\n      app.kubernetes.io/name: my-deployment-with-aotu-instrumentation-kuberntes\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: my-deployment-with-aotu-instrumentation-kuberntes\n      annotations:\n        sidecar.opentelemetry.io/inject: \"false\"\n        instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/send_tracing_to_insight.html#forwarding-traces-to-insight-through-opentelemetry-collector","title":"Forwarding traces to Insight through Opentelemetry Collector","text":"

                                                                          After ensuring that the application has added the metadata mentioned above, you only need to add an OTLP Exporter in your customer's Opentelemetry Collector to forward the trace data to Insight Agent Opentelemetry Collector. Below is an example Opentelemetry Collector configuration file:

                                                                          ...\nexporters:\n  otlp/insight:\n    endpoint: insight-opentelemetry-collector.insight-system.svc.cluster.local:4317\nservice:\n...\npipelines:\n...\ntraces:\n  exporters:\n    - otlp/insight\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/send_tracing_to_insight.html#references","title":"References","text":"
                                                                          • Enhancing Applications Non-intrusively with the Operator
                                                                          • Achieving Observability with OTel
                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html","title":"Enhance Go applications with OTel SDK","text":"

                                                                          This page contains instructions on how to set up OpenTelemetry enhancements in a Go application.

                                                                          OpenTelemetry, also known simply as OTel, is an open-source observability framework that helps generate and collect telemetry data: traces, metrics, and logs in Go apps.

                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#enhance-go-apps-with-the-opentelemetry-sdk","title":"Enhance Go apps with the OpenTelemetry SDK","text":""},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#install-related-dependencies","title":"Install related dependencies","text":"

                                                                          Dependencies related to the OpenTelemetry exporter and SDK must be installed first. If you are using another request router, please refer to request routing. After switching/going into the application source folder run the following command:

                                                                          go get go.opentelemetry.io/otel@v1.8.0 \\\n  go.opentelemetry.io/otel/trace@v1.8.0 \\\n  go.opentelemetry.io/otel/sdk@v1.8.0 \\\n  go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin@v0.33.0 \\\n  go.opentelemetry.io/otel/exporters/otlp/otlptrace@v1.7.0 \\\n  go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc@v1.4.1\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#create-an-initialization-feature-using-the-opentelemetry-sdk","title":"Create an initialization feature using the OpenTelemetry SDK","text":"

                                                                          In order for an application to be able to send data, a feature is required to initialize OpenTelemetry. Add the following code snippet to the main.go file:

                                                                          import (\n    \"context\"\n    \"os\"\n    \"time\"\n\n    \"go.opentelemetry.io/otel\"\n    \"go.opentelemetry.io/otel/exporters/otlp/otlptrace\"\n    \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc\"\n    \"go.opentelemetry.io/otel/propagation\"\n    \"go.opentelemetry.io/otel/sdk/resource\"\n    sdktrace \"go.opentelemetry.io/otel/sdk/trace\"\n    semconv \"go.opentelemetry.io/otel/semconv/v1.7.0\"\n    \"go.uber.org/zap\"\n    \"google.golang.org/grpc\"\n)\n\nvar tracerExp *otlptrace.Exporter\n\nfunc retryInitTracer() func() {\n    var shutdown func()\n    go func() {\n        for {\n            // otel will reconnected and re-send spans when otel col recover. so, we don't need to re-init tracer exporter.\n            if tracerExp == nil {\n                shutdown = initTracer()\n            } else {\n                break\n            }\n            time.Sleep(time.Minute * 5)\n        }\n    }()\n    return shutdown\n}\n\nfunc initTracer() func() {\n    // temporarily set timeout to 10s\n    ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n    defer cancel()\n\n    serviceName, ok := os.LookupEnv(\"OTEL_SERVICE_NAME\")\n    if !ok {\n        serviceName = \"server_name\"\n        os.Setenv(\"OTEL_SERVICE_NAME\", serviceName)\n    }\n    otelAgentAddr, ok := os.LookupEnv(\"OTEL_EXPORTER_OTLP_ENDPOINT\")\n    if !ok {\n        otelAgentAddr = \"http://localhost:4317\"\n        os.Setenv(\"OTEL_EXPORTER_OTLP_ENDPOINT\", otelAgentAddr)\n    }\n    zap.S().Infof(\"OTLP Trace connect to: %s with service name: %s\", otelAgentAddr, serviceName)\n\n    traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithInsecure(), otlptracegrpc.WithDialOption(grpc.WithBlock()))\n    if err != nil {\n        handleErr(err, \"OTLP Trace gRPC Creation\")\n        return nil\n    }\n\n    tracerProvider := sdktrace.NewTracerProvider(\n        sdktrace.WithBatcher(traceExporter),\n        sdktrace.WithSampler(sdktrace.AlwaysSample()),\n    sdktrace.WithResource(resource.NewWithAttributes(semconv.SchemaURL)))\n\n    otel.SetTracerProvider(tracerProvider)\n    otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))\n\n    tracerExp = traceExporter\n    return func() {\n        // Shutdown will flush any remaining spans and shut down the exporter.\n        handleErr(tracerProvider.Shutdown(ctx), \"failed to shutdown TracerProvider\")\n    }\n}\n\nfunc handleErr(err error, message string) {\n    if err != nil {\n        zap.S().Errorf(\"%s: %v\", message, err)\n    }\n}\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#initialize-tracker-in-maingo","title":"Initialize tracker in main.go","text":"

                                                                          Modify the main feature to initialize the tracker in main.go. Also when your service shuts down, you should call TracerProvider.Shutdown() to ensure all spans are exported. The service makes the call as a deferred feature in the main function:

                                                                          func main() {\n    // start otel tracing\n    if shutdown := retryInitTracer(); shutdown != nil {\n            defer shutdown()\n        }\n    ......\n}\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#add-opentelemetry-gin-middleware-to-the-application","title":"Add OpenTelemetry Gin middleware to the application","text":"

                                                                          Configure Gin to use the middleware by adding the following line to main.go :

                                                                          import (\n    ....\n  \"go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin\"\n)\n\nfunc main() {\n    ......\n    r := gin.Default()\n    r.Use(otelgin.Middleware(\"my-app\"))\n    ......\n}\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#run-the-application","title":"Run the application","text":"
                                                                          • Local debugging and running

                                                                            Note: This step is only used for local development and debugging. In the production environment, the Operator will automatically complete the injection of the following environment variables.

                                                                            The above steps have completed the work of initializing the SDK. Now if you need to develop and debug locally, you need to obtain the address of insight-agent-opentelemerty-collector in the insight-system namespace in advance, assuming: insight-agent-opentelemetry-collector .insight-system.svc.cluster.local:4317 .

                                                                            Therefore, you can add the following environment variables when you start the application locally:

                                                                            OTEL_SERVICE_NAME=my-golang-app OTEL_EXPORTER_OTLP_ENDPOINT=http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 go run main.go...\n
                                                                          • Running in a production environment

                                                                            Please refer to the introduction of Only injecting environment variable annotations in Achieving non-intrusive enhancement of applications through Operators to add annotations to deployment yaml:

                                                                            instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                                                            If you cannot use annotations, you can manually add the following environment variables to the deployment yaml:

                                                                          \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\nenv:\n  - name: OTEL_EXPORTER_OTLP_ENDPOINT\n    value: 'http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317'\n  - name: OTEL_SERVICE_NAME\n    value: \"your depolyment name\" # modify it.\n  - name: OTEL_K8S_NAMESPACE\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: metadata.namespace\n  - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: spec.nodeName\n  - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: metadata.name\n  - name: OTEL_RESOURCE_ATTRIBUTES\n    value: 'k8s.namespace.name=$(OTEL_K8S_NAMESPACE),k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME)'\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#request-routing","title":"Request Routing","text":""},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#opentelemetry-gingonic-enhancements","title":"OpenTelemetry gin/gonic enhancements","text":"
                                                                          # Add one line to your import() stanza depending upon your request router:\nmiddleware \"go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin\"\n

                                                                          Then inject the OpenTelemetry middleware:

                                                                          router. Use(middleware. Middleware(\"my-app\"))\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#opentelemetry-gorillamux-enhancements","title":"OpenTelemetry gorillamux enhancements","text":"
                                                                          # Add one line to your import() stanza depending upon your request router:\nmiddleware \"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux\"\n

                                                                          Then inject the OpenTelemetry middleware:

                                                                          router. Use(middleware. Middleware(\"my-app\"))\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#grpc-enhancements","title":"gRPC enhancements","text":"

                                                                          Likewise, OpenTelemetry can help you auto-detect gRPC requests. To detect any gRPC server you have, add the interceptor to the server's instantiation.

                                                                          import (\n  grpcotel \"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc\"\n)\nfunc main() {\n  [...]\n\n    s := grpc.NewServer(\n        grpc.UnaryInterceptor(grpcotel.UnaryServerInterceptor()),\n        grpc.StreamInterceptor(grpcotel.StreamServerInterceptor()),\n    )\n}\n

                                                                          It should be noted that if your program uses Grpc Client to call third-party services, you also need to add an interceptor to Grpc Client:

                                                                              [...]\n\n    conn, err := grpc.Dial(addr, grpc.WithTransportCredentials(insecure.NewCredentials()),\n        grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()),\n        grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()),\n    )\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#if-not-using-request-routing","title":"If not using request routing","text":"
                                                                          import (\n  \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\"\n)\n

                                                                          Everywhere you pass http.Handler to ServeMux you will wrap the handler function. For example, the following replacements would be made:

                                                                          - mux.Handle(\"/path\", h)\n+ mux.Handle(\"/path\", otelhttp.NewHandler(h, \"description of path\"))\n---\n- mux.Handle(\"/path\", http.HandlerFunc(f))\n+ mux.Handle(\"/path\", otelhttp.NewHandler(http.HandlerFunc(f), \"description of path\"))\n

                                                                          In this way, you can ensure that each feature wrapped with othttp will automatically collect its metadata and start the proper trace.

                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#database-enhancements","title":"database enhancements","text":""},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#golang-gorm","title":"Golang Gorm","text":"

                                                                          The OpenTelemetry community has also developed middleware for database access libraries, such as Gorm:

                                                                          import (\n    \"github.com/uptrace/opentelemetry-go-extra/otelgorm\"\n    \"gorm.io/driver/sqlite\"\n    \"gorm.io/gorm\"\n)\n\ndb, err := gorm.Open(sqlite.Open(\"file::memory:?cache=shared\"), &gorm.Config{})\nif err != nil {\n    panic(err)\n}\n\notelPlugin := otelgorm.NewPlugin(otelgorm.WithDBName(\"mydb\"), # Missing this can lead to incomplete display of database related topology\n    otelgorm.WithAttributes(semconv.ServerAddress(\"memory\"))) # Missing this can lead to incomplete display of database related topology\nif err := db.Use(otelPlugin); err != nil {\n    panic(err)\n}\n

                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#custom-span","title":"Custom Span","text":"

                                                                          In many cases, the middleware provided by OpenTelemetry cannot help us record more internally called features, and we need to customize Span to record

                                                                           \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n    _, span := otel.Tracer(\"GetServiceDetail\").Start(ctx,\n        \"spanMetricDao.GetServiceDetail\",\n        trace.WithSpanKind(trace.SpanKindInternal))\n    defer span.End()\n  \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#add-custom-properties-and-custom-events-to-span","title":"Add custom properties and custom events to span","text":"

                                                                          It is also possible to set a custom attribute or tag as a span. To add custom properties and events, follow these steps:

                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#import-tracking-and-property-libraries","title":"Import Tracking and Property Libraries","text":"
                                                                          import (\n    ...\n    \"go.opentelemetry.io/otel/attribute\"\n    \"go.opentelemetry.io/otel/trace\"\n)\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#get-the-current-span-from-the-context","title":"Get the current Span from the context","text":"
                                                                          span := trace.SpanFromContext(c.Request.Context())\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#set-properties-in-the-current-span","title":"Set properties in the current Span","text":"
                                                                          span.SetAttributes(attribute. String(\"controller\", \"books\"))\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#add-an-event-to-the-current-span","title":"Add an Event to the current Span","text":"

                                                                          Adding span events is done using AddEvent on the span object.

                                                                          span.AddEvent(msg)\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#log-errors-and-exceptions","title":"Log errors and exceptions","text":"
                                                                          import \"go.opentelemetry.io/otel/codes\"\n\n// Get the current span\nspan := trace.SpanFromContext(ctx)\n\n// RecordError will automatically convert an error into a span even\nspan.RecordError(err)\n\n// Flag this span as an error\nspan.SetStatus(codes.Error, \"internal error\")\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/golang.html#references","title":"References","text":"

                                                                          For the Demo presentation, please refer to:

                                                                          • otel-grpc-examples
                                                                          • opentelemetry-demo/productcatalogservice
                                                                          • opentelemetry-collector-contrib/demo
                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/meter.html","title":"Exposing Metrics for Applications Using OpenTelemetry SDK","text":"

                                                                          This article is intended for users who wish to evaluate or explore the developing OTLP metrics.

                                                                          The OpenTelemetry project requires that APIs and SDKs must emit data in the OpenTelemetry Protocol (OTLP) for supported languages.

                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/meter.html#for-golang-applications","title":"For Golang Applications","text":"

                                                                          Golang can expose runtime metrics through the SDK by adding the following methods to enable the metrics exporter within the application:

                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/meter.html#install-required-dependencies","title":"Install Required Dependencies","text":"

                                                                          Navigate to your application\u2019s source folder and run the following command:

                                                                          go get go.opentelemetry.io/otel \\\n  go.opentelemetry.io/otel/attribute \\\n  go.opentelemetry.io/otel/exporters/prometheus \\\n  go.opentelemetry.io/otel/metric/global \\\n  go.opentelemetry.io/otel/metric/instrument \\\n  go.opentelemetry.io/otel/sdk/metric\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/meter.html#create-an-initialization-function-using-otel-sdk","title":"Create an Initialization Function Using OTel SDK","text":"
                                                                          import (\n    .....\n\n    \"go.opentelemetry.io/otel/attribute\"\n    otelPrometheus \"go.opentelemetry.io/otel/exporters/prometheus\"\n    \"go.opentelemetry.io/otel/metric/global\"\n    \"go.opentelemetry.io/otel/metric/instrument\"\n    \"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram\"\n    controller \"go.opentelemetry.io/otel/sdk/metric/controller/basic\"\n    \"go.opentelemetry.io/otel/sdk/metric/export/aggregation\"\n    processor \"go.opentelemetry.io/otel/sdk/metric/processor/basic\"\n    selector \"go.opentelemetry.io/otel/sdk/metric/selector/simple\"\n)\n\nfunc (s *insightServer) initMeter() *otelPrometheus.Exporter {\n    s.meter = global.Meter(\"xxx\")\n\n    config := otelPrometheus.Config{\n        DefaultHistogramBoundaries: []float64{1, 2, 5, 10, 20, 50},\n        Gatherer:                   prometheus.DefaultGatherer,\n        Registry:                   prometheus.NewRegistry(),\n        Registerer:                 prometheus.DefaultRegisterer,\n    }\n\n    c := controller.New(\n        processor.NewFactory(\n            selector.NewWithHistogramDistribution(\n                histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries),\n            ),\n            aggregation.CumulativeTemporalitySelector(),\n            processor.WithMemory(true),\n        ),\n    )\n\n    exporter, err := otelPrometheus.New(config, c)\n    if err != nil {\n        zap.S().Panicf(\"failed to initialize prometheus exporter %v\", err)\n    }\n\n    global.SetMeterProvider(exporter.MeterProvider())\n\n    http.HandleFunc(\"/metrics\", exporter.ServeHTTP)\n\n    go func() {\n        _ = http.ListenAndServe(fmt.Sprintf(\":%d\", 8888), nil)\n    }()\n\n    zap.S().Info(\"Prometheus server running on \", fmt.Sprintf(\":%d\", port))\n    return exporter\n}\n

                                                                          The above method will expose a metrics endpoint for your application at: http://localhost:8888/metrics.

                                                                          Next, initialize it in main.go:

                                                                          func main() {\n    // ...\n    tp := initMeter()\n    // ...\n}\n

                                                                          If you want to add custom metrics, you can refer to the following:

                                                                          // exposeClusterMetric exposes a metric like \"insight_logging_count{} 1\"\nfunc (s *insightServer) exposeLoggingMetric(lserver *log.LogService) {\n    s.meter = global.Meter(\"insight.io/basic\")\n\n    var lock sync.Mutex\n    logCounter, err := s.meter.AsyncFloat64().Counter(\"insight_log_total\")\n    if err != nil {\n        zap.S().Panicf(\"failed to initialize instrument: %v\", err)\n    }\n\n    _ = s.meter.RegisterCallback([]instrument.Asynchronous{logCounter}, func(ctx context.Context) {\n        lock.Lock()\n        defer lock.Unlock()\n        count, err := lserver.Count(ctx)\n        if err == nil || count != -1 {\n            logCounter.Observe(ctx, float64(count))\n        }\n    })\n}\n

                                                                          Then, call this method in main.go:

                                                                          // ...\ns.exposeLoggingMetric(lservice)\n// ...\n

                                                                          You can check if your metrics are working correctly by visiting http://localhost:8888/metrics.

                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/meter.html#for-java-applications","title":"For Java Applications","text":"

                                                                          For Java applications, you can directly expose JVM-related metrics by using the OpenTelemetry agent with the following environment variable:

                                                                          OTEL_METRICS_EXPORTER=prometheus\n

                                                                          You can then check your metrics at http://localhost:8888/metrics.

                                                                          Next, combine it with a Prometheus ServiceMonitor to complete the metrics integration. If you want to expose custom metrics, please refer to opentelemetry-java-docs/prometheus.

                                                                          The process is mainly divided into two steps:

                                                                          • Create a meter provider and specify Prometheus as the exporter.
                                                                          /*\n * Copyright The OpenTelemetry Authors\n * SPDX-License-Identifier: Apache-2.0\n */\n\npackage io.opentelemetry.example.prometheus;\n\nimport io.opentelemetry.api.metrics.MeterProvider;\nimport io.opentelemetry.exporter.prometheus.PrometheusHttpServer;\nimport io.opentelemetry.sdk.metrics.SdkMeterProvider;\nimport io.opentelemetry.sdk.metrics.export.MetricReader;\n\npublic final class ExampleConfiguration {\n\n  /**\n   * Initializes the Meter SDK and configures the Prometheus collector with all default settings.\n   *\n   * @param prometheusPort the port to open up for scraping.\n   * @return A MeterProvider for use in instrumentation.\n   */\n  static MeterProvider initializeOpenTelemetry(int prometheusPort) {\n    MetricReader prometheusReader = PrometheusHttpServer.builder().setPort(prometheusPort).build();\n\n    return SdkMeterProvider.builder().registerMetricReader(prometheusReader).build();\n  }\n}\n
                                                                          • Create a custom meter and start the HTTP server.
                                                                          package io.opentelemetry.example.prometheus;\n\nimport io.opentelemetry.api.common.Attributes;\nimport io.opentelemetry.api.metrics.Meter;\nimport io.opentelemetry.api.metrics.MeterProvider;\nimport java.util.concurrent.ThreadLocalRandom;\n\n/**\n * Example of using the PrometheusHttpServer to convert OTel metrics to Prometheus format and expose\n * these to a Prometheus instance via a HttpServer exporter.\n *\n * <p>A Gauge is used to periodically measure how many incoming messages are awaiting processing.\n * The Gauge callback gets executed every collection interval.\n */\npublic final class PrometheusExample {\n  private long incomingMessageCount;\n\n  public PrometheusExample(MeterProvider meterProvider) {\n    Meter meter = meterProvider.get(\"PrometheusExample\");\n    meter\n        .gaugeBuilder(\"incoming.messages\")\n        .setDescription(\"No of incoming messages awaiting processing\")\n        .setUnit(\"message\")\n        .buildWithCallback(result -> result.record(incomingMessageCount, Attributes.empty()));\n  }\n\n  void simulate() {\n    for (int i = 500; i > 0; i--) {\n      try {\n        System.out.println(\n            i + \" Iterations to go, current incomingMessageCount is:  \" + incomingMessageCount);\n        incomingMessageCount = ThreadLocalRandom.current().nextLong(100);\n        Thread.sleep(1000);\n      } catch (InterruptedException e) {\n        // ignored here\n      }\n    }\n  }\n\n  public static void main(String[] args) {\n    int prometheusPort = 8888;\n\n    // It is important to initialize the OpenTelemetry SDK as early as possible in your process.\n    MeterProvider meterProvider = ExampleConfiguration.initializeOpenTelemetry(prometheusPort);\n\n    PrometheusExample prometheusExample = new PrometheusExample(meterProvider);\n\n    prometheusExample.simulate();\n\n    System.out.println(\"Exiting\");\n  }\n}\n

                                                                          After running the Java application, you can check if your metrics are working correctly by visiting http://localhost:8888/metrics.

                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/meter.html#insight-collecting-metrics","title":"Insight Collecting Metrics","text":"

                                                                          Lastly, it is important to note that you have exposed metrics in your application, and now you need Insight to collect those metrics.

                                                                          The recommended way to expose metrics is via ServiceMonitor or PodMonitor.

                                                                          "},{"location":"en/admin/insight/quickstart/otel/golang/meter.html#creating-servicemonitorpodmonitor","title":"Creating ServiceMonitor/PodMonitor","text":"

                                                                          The added ServiceMonitor/PodMonitor needs to have the label operator.insight.io/managed-by: insight for the Operator to recognize it:

                                                                          apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: example-app\n  labels:\n    operator.insight.io/managed-by: insight\nspec:\n  selector:\n    matchLabels:\n      app: example-app\n  endpoints:\n  - port: web\n  namespaceSelector:\n    any: true\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/java/index.html","title":"Start Monitoring Java Applications","text":"
                                                                          1. For accessing and monitoring Java application links, please refer to the document Implementing Non-Intrusive Enhancements for Applications via Operator, which explains how to automatically integrate links through annotations.

                                                                          2. Monitoring the JVM of Java applications: How Java applications that have already exposed JVM metrics and those that have not yet exposed JVM metrics can connect with observability Insight.

                                                                          3. If your Java application has not yet started exposing JVM metrics, you can refer to the following documents:

                                                                            • Exposing JVM Monitoring Metrics Using JMX Exporter
                                                                            • Exposing JVM Monitoring Metrics Using OpenTelemetry Java Agent
                                                                          4. If your Java application has already exposed JVM metrics, you can refer to the following document:

                                                                            • Connecting Existing JVM Metrics of Java Applications to Observability
                                                                          5. Writing TraceId and SpanId into Java Application Logs to correlate link data with log data.

                                                                          "},{"location":"en/admin/insight/quickstart/otel/java/mdc.html","title":"Writing TraceId and SpanId into Java Application Logs","text":"

                                                                          This article explains how to automatically write TraceId and SpanId into Java application logs using OpenTelemetry. By including TraceId and SpanId in your logs, you can correlate distributed tracing data with log data, enabling more efficient fault diagnosis and performance analysis.

                                                                          "},{"location":"en/admin/insight/quickstart/otel/java/mdc.html#supported-logging-libraries","title":"Supported Logging Libraries","text":"

                                                                          For more information, please refer to the Logger MDC auto-instrumentation.

                                                                          Logging Framework Supported Automatic Instrumentation Versions Dependencies Required for Manual Instrumentation Log4j 1 1.2+ None Log4j 2 2.7+ opentelemetry-log4j-context-data-2.17-autoconfigure Logback 1.0+ opentelemetry-logback-mdc-1.0"},{"location":"en/admin/insight/quickstart/otel/java/mdc.html#using-logback-spring-boot-project","title":"Using Logback (Spring Boot Project)","text":"

                                                                          Spring Boot projects come with a built-in logging framework and use Logback as the default logging implementation. If your Java project is a Spring Boot project, you can write TraceId into logs with minimal configuration.

                                                                          Set logging.pattern.level in application.properties, adding %mdc{trace_id} and %mdc{span_id} to the logs.

                                                                          logging.pattern.level=trace_id=%mdc{trace_id} span_id=%mdc{span_id} %5p ....omited...\n

                                                                          Here is an example of the logs:

                                                                          2024-06-26 10:56:31.200 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.a.c.c.C.[Tomcat].[localhost].[/]       : Initializing Spring DispatcherServlet 'dispatcherServlet'\n2024-06-26 10:56:31.201 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.s.web.servlet.DispatcherServlet        : Initializing Servlet 'dispatcherServlet'\n2024-06-26 10:56:31.209 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.s.web.servlet.DispatcherServlet        : Completed initialization in 8 ms\n2024-06-26 10:56:31.296 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=5743699405074f4e  INFO 53724 --- [nio-8081-exec-1] com.example.httpserver.ot.OTServer       : hello world\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/java/mdc.html#using-log4j2","title":"Using Log4j2","text":"
                                                                          1. Add OpenTelemetry Log4j2 dependency in pom.xml:

                                                                            Tip

                                                                            Please replace OPENTELEMETRY_VERSION with the latest version.

                                                                            <dependencies>\n  <dependency>\n    <groupId>io.opentelemetry.instrumentation</groupId>\n    <artifactId>opentelemetry-log4j-context-data-2.17-autoconfigure</artifactId>\n    <version>OPENTELEMETRY_VERSION</version>\n    <scope>runtime</scope>\n  </dependency>\n</dependencies>\n
                                                                          2. Modify the log4j2.xml configuration, adding %X{trace_id} and %X{span_id} in the pattern to automatically write TraceId and SpanId into the logs:

                                                                            <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Configuration>\n  <Appenders>\n    <Console name=\"Console\" target=\"SYSTEM_OUT\">\n      <PatternLayout\n          pattern=\"%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} trace_id=%X{trace_id} span_id=%X{span_id} trace_flags=%X{trace_flags} - %msg%n\"/>\n    </Console>\n  </Appenders>\n  <Loggers>\n    <Root>\n      <AppenderRef ref=\"Console\" level=\"All\"/>\n    </Root>\n  </Loggers>\n</Configuration>\n
                                                                          3. If using Logback, add OpenTelemetry Logback dependency in pom.xml.

                                                                            Tip

                                                                            Please replace OPENTELEMETRY_VERSION with the latest version.

                                                                            <dependencies>\n  <dependency>\n    <groupId>io.opentelemetry.instrumentation</groupId>\n    <artifactId>opentelemetry-logback-mdc-1.0</artifactId>\n    <version>OPENTELEMETRY_VERSION</version>\n  </dependency>\n</dependencies>\n
                                                                          4. Modify the log4j2.xml configuration, adding %X{trace_id} and %X{span_id} in the pattern to automatically write TraceId and SpanId into the logs:

                                                                            <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<configuration>\n  <appender name=\"CONSOLE\" class=\"ch.qos.logback.core.ConsoleAppender\">\n    <encoder>\n      <pattern>%d{HH:mm:ss.SSS} trace_id=%X{trace_id} span_id=%X{span_id} trace_flags=%X{trace_flags} %msg%n</pattern>\n    </encoder>\n  </appender>\n\n  <!-- Just wrap your logging appender, for example ConsoleAppender, with OpenTelemetryAppender -->\n  <appender name=\"OTEL\" class=\"io.opentelemetry.instrumentation.logback.mdc.v1_0.OpenTelemetryAppender\">\n    <appender-ref ref=\"CONSOLE\"/>\n  </appender>\n\n  <!-- Use the wrapped \"OTEL\" appender instead of the original \"CONSOLE\" one -->\n  <root level=\"INFO\">\n    <appender-ref ref=\"OTEL\"/>\n  </root>\n\n</configuration>\n
                                                                          "},{"location":"en/admin/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html","title":"Exposing JVM Monitoring Metrics Using JMX Exporter","text":"

                                                                          JMX Exporter provides two usage methods:

                                                                          1. Standalone Process: Specify parameters when starting the JVM to expose a JMX RMI interface. The JMX Exporter calls RMI to obtain the JVM runtime state data, converts it into Prometheus metrics format, and exposes a port for Prometheus to scrape.
                                                                          2. In-Process (JVM process): Specify parameters when starting the JVM to run the JMX Exporter jar file as a javaagent. This method reads the JVM runtime state data in-process, converts it into Prometheus metrics format, and exposes a port for Prometheus to scrape.

                                                                          Note

                                                                          The official recommendation is not to use the first method due to its complex configuration and the requirement for a separate process, which introduces additional monitoring challenges. Therefore, this article focuses on the second method, detailing how to use JMX Exporter to expose JVM monitoring metrics in a Kubernetes environment.

                                                                          In this method, you need to specify the JMX Exporter jar file and configuration file when starting the JVM. Since the jar file is a binary file that is not ideal for mounting via a configmap, and the configuration file typically does not require modifications, it is recommended to package both the JMX Exporter jar file and the configuration file directly into the business container image.

                                                                          For the second method, you can choose to include the JMX Exporter jar file in the application image or mount it during deployment. Below are explanations for both approaches:

                                                                          "},{"location":"en/admin/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html#method-1-building-jmx-exporter-jar-file-into-the-business-image","title":"Method 1: Building JMX Exporter JAR File into the Business Image","text":"

                                                                          The content of prometheus-jmx-config.yaml is as follows:

                                                                          prometheus-jmx-config.yaml
                                                                          ...\nssl: false\nlowercaseOutputName: false\nlowercaseOutputLabelNames: false\nrules:\n- pattern: \".*\"\n

                                                                          Note

                                                                          For more configuration options, please refer to the introduction at the bottom or Prometheus official documentation.

                                                                          Next, prepare the jar file. You can find the latest jar download link on the jmx_exporter GitHub page and refer to the following Dockerfile:

                                                                          FROM openjdk:11.0.15-jre\nWORKDIR /app/\nCOPY target/my-app.jar ./\nCOPY prometheus-jmx-config.yaml ./\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\nENV JAVA_TOOL_OPTIONS=-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\nEXPOSE 8081 8999 8080 8888\nENTRYPOINT java $JAVA_OPTS -jar my-app.jar\n

                                                                          Note:

                                                                          • The format for the startup parameter is: -javaagent:=:
                                                                          • Here, port 8088 is used to expose JVM monitoring metrics; you may change it if it conflicts with the Java application.
                                                                          "},{"location":"en/admin/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html#method-2-mounting-via-init-container","title":"Method 2: Mounting via Init Container","text":"

                                                                          First, we need to create a Docker image for the JMX Exporter. The following Dockerfile is for reference:

                                                                          FROM alpine/curl:3.14\nWORKDIR /app/\n# Copy the previously created config file into the image\nCOPY prometheus-jmx-config.yaml ./\n# Download the jmx prometheus javaagent jar online\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\n

                                                                          Build the image using the above Dockerfile: docker build -t my-jmx-exporter .

                                                                          Add the following init container to the Java application deployment YAML:

                                                                          Click to expand YAML file
                                                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-demo-app\n  labels:\n    app: my-demo-app\nspec:\n  selector:\n    matchLabels:\n      app: my-demo-app\n  template:\n    metadata:\n      labels:\n        app: my-demo-app\n    spec:\n      imagePullSecrets:\n      - name: registry-pull\n      initContainers:\n      - name: jmx-sidecar\n        image: my-jmx-exporter\n        command: [\"cp\", \"-r\", \"/app/jmx_prometheus_javaagent-0.17.2.jar\", \"/target/jmx_prometheus_javaagent-0.17.2.jar\"]  \u278a\n        volumeMounts:\n        - name: sidecar\n          mountPath: /target\n      containers:\n      - image: my-demo-app-image\n        name: my-demo-app\n        resources:\n          requests:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n          limits:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n        ports:\n        - containerPort: 18083\n        env:\n        - name: JAVA_TOOL_OPTIONS\n          value: \"-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\" \u278b\n        volumeMounts:\n        - name: host-time\n          mountPath: /etc/localtime\n          readOnly: true\n        - name: sidecar\n          mountPath: /sidecar\n      volumes:\n      - name: host-time\n        hostPath:\n          path: /etc/localtime\n      - name: sidecar  # Shared agent folder\n        emptyDir: {}\n      restartPolicy: Always\n

                                                                          With the above modifications, the example application my-demo-app now has the capability to expose JVM metrics. After running the service, you can access the Prometheus formatted metrics at http://localhost:8088.

                                                                          Next, you can refer to Connecting Existing JVM Metrics of Java Applications to Observability.

                                                                          "},{"location":"en/admin/insight/quickstart/otel/java/jvm-monitor/legacy-jvm.html","title":"Integrating Existing JVM Metrics of Java Applications with Observability","text":"

                                                                          If your Java application exposes JVM monitoring metrics through other means (such as Spring Boot Actuator), you will need to ensure that the monitoring data is collected. You can achieve this by adding annotations (Kubernetes Annotations) to your workload to allow Insight to scrape the existing JVM metrics:

                                                                          annotations: \n  insight.opentelemetry.io/metric-scrape: \"true\"  # Whether to scrape\n  insight.opentelemetry.io/metric-path: \"/\"         # Path to scrape metrics\n  insight.opentelemetry.io/metric-port: \"9464\"      # Port to scrape metrics\n

                                                                          For example, to add annotations to the my-deployment-app:

                                                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-app\nspec:\n  selector:\n    matchLabels:\n      app: my-deployment-app\n      app.kubernetes.io/name: my-deployment-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-deployment-app\n        app.kubernetes.io/name: my-deployment-app\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\"  # Whether to scrape\n        insight.opentelemetry.io/metric-path: \"/\"         # Path to scrape metrics\n        insight.opentelemetry.io/metric-port: \"9464\"      # Port to scrape metrics\n

                                                                          Here is a complete example:

                                                                          ---\napiVersion: v1\nkind: Service\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  type: NodePort\n  selector:\n    app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  ports:\n    - name: http\n      port: 8080\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  selector:\n    matchLabels:\n      app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\"  # Whether to scrape\n        insight.opentelemetry.io/metric-path: \"/actuator/prometheus\"  # Path to scrape metrics\n        insight.opentelemetry.io/metric-port: \"8080\"      # Port to scrape metrics\n    spec:\n      containers:\n        - name: myapp\n          image: docker.m.daocloud.io/wutang/spring-boot-actuator-prometheus-metrics-demo\n          ports:\n            - name: http\n              containerPort: 8080\n          resources:\n            limits:\n              cpu: 500m\n              memory: 800Mi\n            requests:\n              cpu: 200m\n              memory: 400Mi\n

                                                                          In the above example, Insight will scrape the Prometheus metrics exposed through Spring Boot Actuator via http://<service-ip>:8080/actuator/prometheus.

                                                                          "},{"location":"en/admin/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html","title":"Exposing JVM Metrics Using OpenTelemetry Java Agent","text":"

                                                                          Starting from OpenTelemetry Agent v1.20.0 and later, the OpenTelemetry Agent has introduced the JMX Metric Insight module. If your application is already integrated with the OpenTelemetry Agent for tracing, you no longer need to introduce another agent to expose JMX metrics for your application. The OpenTelemetry Agent collects and exposes metrics by detecting the locally available MBeans in the application.

                                                                          The OpenTelemetry Agent also provides built-in monitoring examples for common Java servers or frameworks. Please refer to the Predefined Metrics.

                                                                          When using the OpenTelemetry Java Agent, you also need to consider how to mount the JAR into the container. In addition to the methods for mounting the JAR file as described with the JMX Exporter, you can leverage the capabilities provided by the OpenTelemetry Operator to automatically enable JVM metrics exposure for your application.

                                                                          If your application is already integrated with the OpenTelemetry Agent for tracing, you do not need to introduce another agent to expose JMX metrics. The OpenTelemetry Agent can now locally collect and expose metrics interfaces by detecting the locally available MBeans in the application.

                                                                          However, as of the current version, you still need to manually add the appropriate annotations to your application for the JVM data to be collected by Insight. For specific annotation content, please refer to Integrating Existing JVM Metrics of Java Applications with Observability.

                                                                          "},{"location":"en/admin/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html#exposing-metrics-for-java-middleware","title":"Exposing Metrics for Java Middleware","text":"

                                                                          The OpenTelemetry Agent also includes built-in examples for monitoring middleware. Please refer to the Predefined Metrics.

                                                                          By default, no specific types are designated; you need to specify them using the -Dotel.jmx.target.system JVM options, for example, -Dotel.jmx.target.system=jetty,kafka-broker.

                                                                          "},{"location":"en/admin/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html#references","title":"References","text":"
                                                                          • Gaining JMX Metric Insights with the OpenTelemetry Java Agent

                                                                          • Otel JMX Metrics

                                                                          "},{"location":"en/admin/insight/quickstart/other/install-agent-on-ocp.html","title":"OpenShift Install Insight Agent","text":"

                                                                          Although the OpenShift system comes with a monitoring system, we will still install Insight Agent because of some rules in the data collection agreement.

                                                                          Among them, in addition to the basic installation configuration, the following parameters need to be added during helm install:

                                                                          ## Parameters related to fluentbit;\n--set fluent-bit.ocp.enabled=true \\\n--set fluent-bit.serviceAccount.create=false \\\n--set fluent-bit.securityContext.runAsUser=0 \\\n--set fluent-bit.securityContext.seLinuxOptions.type=spc_t \\\n--set fluent-bit.securityContext.readOnlyRootFilesystem=false \\\n--set fluent-bit.securityContext.allowPrivilegeEscalation=false \\\n\n## Enable Prometheus(CR) for OpenShift4.x\n--set compatibility.openshift.prometheus.enabled=true \\\n\n## Close the Prometheus instance of the higher version\n--set kube-prometheus-stack.prometheus.enabled=false \\\n--set kube-prometheus-stack.kubeApiServer.enabled=false \\\n--set kube-prometheus-stack.kubelet.enabled=false \\\n--set kube-prometheus-stack.kubeControllerManager.enabled=false \\\n--set kube-prometheus-stack.coreDns.enabled=false \\\n--set kube-prometheus-stack.kubeDns.enabled=false \\\n--set kube-prometheus-stack.kubeEtcd.enabled=false \\\n--set kube-prometheus-stack.kubeEtcd.enabled=false \\\n--set kube-prometheus-stack.kubeScheduler.enabled=false \\\n--set kube-prometheus-stack.kubeStateMetrics.enabled=false \\\n--set kube-prometheus-stack.nodeExporter.enabled=false \\\n\n## Limit the namespace processed by PrometheusOperator to avoid competition with OpenShift's own PrometheusOperator\n--set kube-prometheus-stack.prometheusOperator.kubeletService.namespace=\"insight-system\" \\\n--set kube-prometheus-stack.prometheusOperator.prometheusInstanceNamespaces=\"insight-system\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[0]=\"openshift-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[1]=\"openshift-user-workload-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[2]=\"openshift-customer-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[3]=\"openshift-route-monitor-operator\" \\\n
                                                                          "},{"location":"en/admin/insight/quickstart/other/install-agent-on-ocp.html#write-system-monitoring-data-into-prometheus-through-openshifts-own-mechanism","title":"Write system monitoring data into Prometheus through OpenShift's own mechanism","text":"
                                                                          apiVersion: v1\nkind: ConfigMap\nmetadata:\n   name: cluster-monitoring-config\n   namespace: openshift-monitoring\ndata:\n   config.yaml: |\n     prometheusK8s:\n       remoteWrite:\n         - queueConfig:\n             batchSendDeadline: 60s\n             maxBackoff: 5s\n             minBackoff: 30ms\n             minShards: 1\n             capacity: 5000\n             maxSamplesPerSend: 1000\n             maxShards: 100\n           remoteTimeout: 30s\n           url: http://insight-agent-prometheus.insight-system.svc.cluster.local:9090/api/v1/write\n           writeRelabelConfigs:\n             - action: keep\n               regex: etcd|kubelet|node-exporter|apiserver|kube-state-metrics\n               sourceLabels:\n                 - job\n
                                                                          "},{"location":"en/admin/insight/quickstart/res-plan/modify-vms-disk.html","title":"vmstorage Disk Expansion","text":"

                                                                          This article describes the method for expanding the vmstorage disk. Please refer to the vmstorage disk capacity planning for the specifications of the vmstorage disk.

                                                                          "},{"location":"en/admin/insight/quickstart/res-plan/modify-vms-disk.html#procedure","title":"Procedure","text":""},{"location":"en/admin/insight/quickstart/res-plan/modify-vms-disk.html#enable-storageclass-expansion","title":"Enable StorageClass expansion","text":"
                                                                          1. Log in to the AI platform platform as a global service cluster administrator. Click Container Management -> Clusters and go to the details of the kpanda-global-cluster cluster.

                                                                          2. Select the left navigation menu Container Storage -> PVCs and find the PVC bound to the vmstorage.

                                                                          3. Click a vmstorage PVC to enter the details of the volume claim for vmstorage and confirm the StorageClass that the PVC is bound to.

                                                                          4. Select the left navigation menu Container Storage -> Storage Class and find local-path . Click the \u2507 on the right side of the target and select Edit in the popup menu.

                                                                          5. Enable Scale Up and click OK .

                                                                          "},{"location":"en/admin/insight/quickstart/res-plan/modify-vms-disk.html#modify-the-disk-capacity-of-vmstorage","title":"Modify the disk capacity of vmstorage","text":"
                                                                          1. Log in to the AI platform platform as a global service cluster administrator and go to the details of the kpanda-global-cluster cluster.

                                                                          2. Select the left navigation menu CRDs and find the custom resource for vmcluster .

                                                                          3. Click the custom resource for vmcluster to enter the details page, switch to the insight-system namespace, and select Edit YAML from the right menu of insight-victoria-metrics-k8s-stack .

                                                                          4. Modify according to the legend and click OK .

                                                                          5. Select the left navigation menu Container Storage -> PVCs again and find the volume claim bound to vmstorage. Confirm that the modification has taken effect. In the details page of a PVC, click the associated storage source (PV).

                                                                          6. Open the volume details page and click the Update button in the upper right corner.

                                                                          7. After modifying the Capacity , click OK and wait for a moment until the expansion is successful.

                                                                          "},{"location":"en/admin/insight/quickstart/res-plan/modify-vms-disk.html#clone-the-storage-volume","title":"Clone the storage volume","text":"

                                                                          If the storage volume expansion fails, you can refer to the following method to clone the storage volume.

                                                                          1. Log in to the AI platform platform as a global service cluster administrator and go to the details of the kpanda-global-cluster cluster.

                                                                          2. Select the left navigation menu Workloads -> StatefulSets and find the statefulset for vmstorage . Click the \u2507 on the right side of the target and select Status -> Stop -> OK in the popup menu.

                                                                          3. After logging into the master node of the kpanda-global-cluster cluster in the command line, run the following command to copy the vm-data directory in the vmstorage container to store the metric information locally:

                                                                            kubectl cp -n insight-system vmstorage-insight-victoria-metrics-k8s-stack-1:vm-data ./vm-data\n
                                                                          4. Log in to the AI platform platform and go to the details of the kpanda-global-cluster cluster. Select the left navigation menu Container Storage -> PVs , click Clone in the upper right corner, and modify the capacity of the volume.

                                                                          5. Delete the previous data volume of vmstorage.

                                                                          6. Wait for a moment until the volume claim is bound to the cloned data volume, then run the following command to import the exported data from step 3 into the proper container, and then start the previously paused vmstorage .

                                                                            kubectl cp -n insight-system ./vm-data vmstorage-insight-victoria-metrics-k8s-stack-1:vm-data\n
                                                                          "},{"location":"en/admin/insight/quickstart/res-plan/prometheus-res.html","title":"Prometheus Resource Planning","text":"

                                                                          In the actual use of Prometheus, affected by the number of cluster containers and the opening of Istio, the CPU, memory and other resource usage of Prometheus will exceed the set resources.

                                                                          In order to ensure the normal operation of Prometheus in clusters of different sizes, it is necessary to adjust the resources of Prometheus according to the actual size of the cluster.

                                                                          "},{"location":"en/admin/insight/quickstart/res-plan/prometheus-res.html#reference-resource-planning","title":"Reference resource planning","text":"

                                                                          In the case that the mesh is not enabled, the test statistics show that the relationship between the system Job index and pods is Series count = 800 * pod count

                                                                          When the service mesh is enabled, the magnitude of the Istio-related metrics generated by the pod after the feature is enabled is Series count = 768 * pod count

                                                                          "},{"location":"en/admin/insight/quickstart/res-plan/prometheus-res.html#when-the-service-mesh-is-not-enabled","title":"When the service mesh is not enabled","text":"

                                                                          The following resource planning is recommended by Prometheus when the service mesh is not enabled :

                                                                          Cluster size (pod count) Metrics (service mesh is not enabled) CPU (core) Memory (GB) 100 8w Request: 0.5Limit: 1 Request: 2GBLimit: 4GB 200 16w Request: 1Limit: 1.5 Request: 3GBLimit: 6GB 300 24w Request: 1Limit: 2 Request: 3GBLimit: 6GB 400 32w Request: 1Limit: 2 Request: 4GBLimit: 8GB 500 40w Request: 1.5Limit: 3 Request: 5GBLimit: 10GB 800 64w Request: 2Limit: 4 Request: 8GBLimit: 16GB 1000 80w Request: 2.5Limit: 5 Request: 9GBLimit: 18GB 2000 160w Request: 3.5Limit: 7 Request: 20GBLimit: 40GB 3000 240w Request: 4Limit: 8 Request: 33GBLimit: 66GB"},{"location":"en/admin/insight/quickstart/res-plan/prometheus-res.html#when-the-service-mesh-feature-is-enabled","title":"When the service mesh feature is enabled","text":"

                                                                          The following resource planning is recommended by Prometheus in the scenario of starting the service mesh:

                                                                          Cluster size (pod count) metric volume (service mesh enabled) CPU (core) Memory (GB) 100 15w Request: 1Limit: 2 Request: 3GBLimit: 6GB 200 31w Request: 2Limit: 3 Request: 5GBLimit: 10GB 300 46w Request: 2Limit: 4 Request: 6GBLimit: 12GB 400 62w Request: 2Limit: 4 Request: 8GBLimit: 16GB 500 78w Request: 3Limit: 6 Request: 10GBLimit: 20GB 800 125w Request: 4Limit: 8 Request: 15GBLimit: 30GB 1000 156w Request: 5Limit: 10 Request: 18GBLimit: 36GB 2000 312w Request: 7Limit: 14 Request: 40GBLimit: 80GB 3000 468w Request: 8Limit: 16 Request: 65GBLimit: 130GB

                                                                          Note

                                                                          1. Pod count in the table refers to the pod count that is basically running stably in the cluster. If a large number of pods are restarted, the index will increase sharply in a short period of time. At this time, resources need to be adjusted accordingly.
                                                                          2. Prometheus stores two hours of data by default in memory, and when the Remote Write function is enabled in the cluster, a certain amount of memory will be occupied, and resources surge ratio is recommended to be set to 2.
                                                                          3. The data in the table are recommended values, applicable to general situations. If the environment has precise resource requirements, it is recommended to check the resource usage of the proper Prometheus after the cluster has been running for a period of time for precise configuration.
                                                                          "},{"location":"en/admin/insight/quickstart/res-plan/vms-res-plan.html","title":"vmstorage disk capacity planning","text":"

                                                                          vmstorage is responsible for storing multicluster metrics for observability. In order to ensure the stability of vmstorage, it is necessary to adjust the disk capacity of vmstorage according to the number of clusters and the size of the cluster. For more information, please refer to vmstorage retention period and disk space.

                                                                          "},{"location":"en/admin/insight/quickstart/res-plan/vms-res-plan.html#test-results","title":"Test Results","text":"

                                                                          After 14 days of disk observation of vmstorage of clusters of different sizes, We found that the disk usage of vmstorage was positively correlated with the amount of metrics it stored and the disk usage of individual data points.

                                                                          1. The amount of metrics stored instantaneously increase(vm_rows{ type != \"indexdb\"}[30s]) to obtain the increased amount of metrics within 30s
                                                                          2. Disk usage of a single data point: sum(vm_data_size_bytes{type!=\"indexdb\"}) / sum(vm_rows{type != \"indexdb\"})
                                                                          "},{"location":"en/admin/insight/quickstart/res-plan/vms-res-plan.html#calculation-method","title":"calculation method","text":"

                                                                          Disk usage = Instantaneous metrics x 2 x disk usage for a single data point x 60 x 24 x storage time (days)

                                                                          Parameter Description:

                                                                          1. The unit of disk usage is Byte .
                                                                          2. Storage duration (days) x 60 x 24 converts time (days) into minutes to calculate disk usage.
                                                                          3. The default collection time of Prometheus in Insight Agent is 30s, so twice the amount of metrics will be generated within 1 minute.
                                                                          4. The default storage duration in vmstorage is 1 month, please refer to Modify System Configuration to modify the configuration.

                                                                          Warning

                                                                          This formula is a general solution, and it is recommended to reserve redundant disk capacity on the calculation result to ensure the normal operation of vmstorage.

                                                                          "},{"location":"en/admin/insight/quickstart/res-plan/vms-res-plan.html#reference-capacity","title":"reference capacity","text":"

                                                                          The data in the table is calculated based on the default storage time of one month (30 days), and the disk usage of a single data point (datapoint) is calculated as 0.9. In a multicluster scenario, the number of Pods represents the sum of the number of Pods in the multicluster.

                                                                          "},{"location":"en/admin/insight/quickstart/res-plan/vms-res-plan.html#when-the-service-mesh-is-not-enabled","title":"When the service mesh is not enabled","text":"Cluster size (number of Pods) Metrics Disk capacity 100 8W 6 GiB 200 16W 12 GiB 300 24w 18 GiB 400 32w 24 GiB 500 40w 30 GiB 800 64w 48 GiB 1000 80W 60 GiB 2000 160w 120 GiB 3000 240w 180 GiB"},{"location":"en/admin/insight/quickstart/res-plan/vms-res-plan.html#when-the-service-mesh-is-enabled","title":"When the service mesh is enabled","text":"Cluster size (number of Pods) Metrics Disk capacity 100 15W 12 GiB 200 31w 24 GiB 300 46w 36 GiB 400 62w 48 GiB 500 78w 60 GiB 800 125w 94 GiB 1000 156w 120 GiB 2000 312w 235 GiB 3000 468w 350 GiB"},{"location":"en/admin/insight/quickstart/res-plan/vms-res-plan.html#example","title":"Example","text":"

                                                                          There are two clusters in the AI platform platform, of which 500 Pods are running in the global management cluster (service mesh is turned on), and 1000 Pods are running in the worker cluster (service mesh is not turned on), and the expected metrics are stored for 30 days.

                                                                          • The number of metrics in the global management cluster is 800x500 + 768x500 = 784000
                                                                          • Worker cluster metrics are 800x1000 = 800000

                                                                          Then the current vmstorage disk usage should be set to (784000+80000)x2x0.9x60x24x31 =124384896000 byte = 116 GiB

                                                                          Note

                                                                          For the relationship between the number of metrics and the number of Pods in the cluster, please refer to Prometheus Resource Planning.

                                                                          "},{"location":"en/admin/insight/reference/alertnotification.html","title":"Alert Notification Process Description","text":"

                                                                          When configuring an alert policy in Insight, you have the ability to set different notification sending intervals for alerts triggered at different levels within the same policy. However, due to the presence of two parameters, group_interval and repeat_interval , in the native Alertmanager configuration, the actual intervals for sending alert notifications may deviate.

                                                                          "},{"location":"en/admin/insight/reference/alertnotification.html#parameter-configuration","title":"Parameter Configuration","text":"

                                                                          In the Alertmanager configuration, set the following parameters:

                                                                          route:  \n  group_by: [\"rulename\"]\n  group_wait: 30s\n  group_interval: 5m\n  repeat_interval: 1h\n

                                                                          Parameter descriptions:

                                                                          • group_wait : Specifies the waiting time before sending alert notifications. When Alertmanager receives a group of alerts, if no further alerts are received within the duration specified by group_wait , Alertmanager waits for a certain amount of time to collect additional alerts with the same labels and content. It then includes all qualifying alerts in the same notification.

                                                                          • group_interval : Determines the waiting time before merging a group of alerts into a single notification. If no more alerts from the same group are received during this period, Alertmanager sends a notification containing all received alerts.

                                                                          • repeat_interval : Sets the interval for resending alert notifications. After Alertmanager sends an alert notification to a receiver, if it continues to receive alerts with the same labels and content within the duration specified by repeat_interval , Alertmanager will resend the alert notification.

                                                                          When the group_wait , group_interval , and repeat_interval parameters are set simultaneously, Alertmanager handles alert notifications under the same group as follows:

                                                                          1. When Alertmanager receives qualifying alerts, it waits for at least the duration specified in the group_wait parameter to collect additional alerts with the same labels and content. It includes all qualifying alerts in the same notification.

                                                                          2. If no further alerts are received during the group_wait duration, Alertmanager sends all received alerts to the receiver after that time. If additional qualifying alerts arrive during this period, Alertmanager continues to wait until all alerts are collected or a timeout occurs.

                                                                          3. If more alerts with the same labels and content are received within the group_interval parameter, these new alerts are merged into the previous notification and sent together. If there are still unsent alerts after the group_interval duration, Alertmanager starts a new timing cycle and waits for more alerts until the group_interval duration is reached again or new alerts are received.

                                                                          4. If Alertmanager keeps receiving alerts with the same labels and content within the duration specified by repeat_interval , it will resend the previously sent alert notifications. When resending alert notifications, Alertmanager does not wait for group_wait or group_interval , but sends notifications repeatedly according to the time interval specified by repeat_interval .

                                                                          5. If there are still unsent alerts after the repeat_interval duration, Alertmanager starts a new timing cycle and continues to wait for new alerts with the same labels and content. This process continues until there are no new alerts or Alertmanager is stopped.

                                                                          "},{"location":"en/admin/insight/reference/alertnotification.html#example","title":"Example","text":"

                                                                          In the following example, Alertmanager assigns all alerts with CPU usage above the threshold to a policy named \"critical_alerts\".

                                                                          groups:\n- name: critical_alerts\n  rules:\n  - alert: HighCPUUsage\n    expr: node_cpu_seconds_total{mode=\"idle\"} < 50\n    for: 5m\n    labels:\n      severity: critical\n    annotations:\n      summary: \"High CPU usage detected on instance {{ $labels.instance }}\"\n  group_by: [rulename]\n  group_wait: 30s\n  group_interval: 5m\n  repeat_interval: 1h\n

                                                                          In this case:

                                                                          • When Alertmanager receives an alert, it waits for at least 30 seconds to collect additional alerts with the same labels and content, and includes them in the same notification.

                                                                          • If more alerts with the same labels and content are received within 5 minutes, these new alerts are merged into the previous notification and sent together. If there are still unsent alerts after 15 minutes, Alertmanager starts a new timing cycle and waits for more alerts until 5 minutes have passed or new alerts are received.

                                                                          • If Alertmanager continues to receive alerts with the same labels and content within 1 hour, it will resend the previously sent alert notifications.

                                                                          "},{"location":"en/admin/insight/reference/lucene.html","title":"Lucene Syntax Usage","text":""},{"location":"en/admin/insight/reference/lucene.html#introduction-to-lucene","title":"Introduction to Lucene","text":"

                                                                          Lucene is a subproject of Apache Software Foundation's Jakarta project and is an open-source full-text search engine toolkit. The purpose of Lucene is to provide software developers with a simple and easy-to-use toolkit for implementing full-text search functionality in their target systems.

                                                                          "},{"location":"en/admin/insight/reference/lucene.html#lucene-syntax","title":"Lucene Syntax","text":"

                                                                          Lucene's syntax allows you to construct search queries in a flexible way to meet different search requirements. Here is a detailed explanation of Lucene's syntax:

                                                                          "},{"location":"en/admin/insight/reference/lucene.html#keyword-queries","title":"Keyword Queries","text":"

                                                                          To perform searches with multiple keywords using Lucene syntax, you can use Boolean logical operators to combine multiple keywords. Lucene supports the following operators:

                                                                          1. AND operator

                                                                            • Use AND or && to represent the logical AND relationship.
                                                                            • Example: term1 AND term2 or term1 && term2
                                                                          2. OR operator

                                                                            • Use OR or || to represent the logical OR relationship.
                                                                            • Example: term1 OR term2 or term1 || term2
                                                                          3. NOT operator

                                                                            • Use NOT or - to represent the logical NOT relationship.
                                                                            • Example: term1 NOT term2 or term1 -term2
                                                                          4. Quotes

                                                                            • You can enclose a phrase in quotes for exact matching.
                                                                            • Example: \"exact phrase\"
                                                                          "},{"location":"en/admin/insight/reference/lucene.html#examples","title":"Examples","text":"
                                                                          1. Specify fields

                                                                            field1:keyword1 AND (field2:keyword2 OR field3:keyword3) NOT field4:keyword4\n

                                                                            Explanation:

                                                                            • The query field field1 must contain the keyword keyword1 .
                                                                            • Additionally, either the field field2 must contain the keyword keyword2 or the field field3 must contain the keyword keyword3 .
                                                                            • Finally, the field field4 must not contain the keyword keyword4 .
                                                                          2. Not specify fields

                                                                            keyword1 AND (keyword2 OR keyword3) NOT keyword4\n

                                                                            Explanation:

                                                                            • The query keyword keyword1 must exist in any searchable field.
                                                                            • Additionally, either the keyword keyword2 must exist or the keyword keyword3 must exist in any searchable field.
                                                                            • Finally, the keyword keyword4 must not exist in any searchable field.
                                                                          "},{"location":"en/admin/insight/reference/lucene.html#fuzzy-queries","title":"Fuzzy Queries","text":"

                                                                          In Lucene, fuzzy queries can be performed using the tilde ( ~ ) operator for approximate matching. You can specify an edit distance to limit the degree of similarity in the matches.

                                                                          term~\n

                                                                          In the above example, term is the keyword to perform a fuzzy match on.

                                                                          Please note the following:

                                                                          • After the tilde ( ~ ), you can optionally specify a parameter to control the similarity of the fuzzy query.
                                                                          • The parameter value ranges from 0 to 2, where 0 represents an exact match, 1 allows for one edit operation (such as adding, deleting, or replacing characters) to match, and 2 allows for two edit operations to match.
                                                                          • If no parameter value is specified, the default similarity threshold used is 0.5.
                                                                          • Fuzzy queries will return documents that are similar to the given keyword but may incur some performance overhead, especially for larger indexes.
                                                                          "},{"location":"en/admin/insight/reference/lucene.html#wildcards","title":"Wildcards","text":"

                                                                          Lucene supports the following wildcard queries:

                                                                          1. * wildcard: Used to match zero or more characters.

                                                                            For example, te*t can match \"test\", \"text\", and \"tempest\".

                                                                          2. ? wildcard: Used to match a single character.

                                                                            For example, te?t can match \"test\" and \"text\".

                                                                          "},{"location":"en/admin/insight/reference/lucene.html#example","title":"Example","text":"
                                                                          te?t\n

                                                                          In the above example, te?t represents a word that starts with \"te\", followed by any single character, and ends with \"t\". This query can match words like \"test\", \"text\", and \"tent\".

                                                                          It is important to note that the question mark ( ? ) represents only a single character. If you want to match multiple characters or varying lengths of characters, you can use the asterisk ( * ) for multi-character wildcard matching. Additionally, the question mark will not match an empty string.

                                                                          To summarize, in Lucene syntax, the question mark ( ? ) is used as a single-character wildcard to match any single character. By using the question mark in your search keywords, you can perform more flexible and specific pattern matching.

                                                                          "},{"location":"en/admin/insight/reference/lucene.html#range-queries","title":"Range Queries","text":"

                                                                          Lucene syntax supports range queries, where you can use square brackets [ ] or curly braces { } to represent a range. Here are examples of range queries:

                                                                          1. Inclusive boundary range query:

                                                                            • Square brackets [ ] indicate a closed interval that includes the boundary values.
                                                                            • Example: field:[value1 TO value2] represents the range of values for field , including both value1 and value2 .
                                                                          2. Exclusive boundary range query:

                                                                            • Curly braces { } indicate an open interval that excludes the boundary values.
                                                                            • Example: field:{value1 TO value2} represents the range of values for field between value1 and value2 , excluding both.
                                                                          3. Omitted boundary range query:

                                                                            • You can omit one or both boundary values to specify an infinite range.
                                                                            • Example: field:[value TO ] represents the range of values for field from value to positive infinity, and field:[ TO value] represents the range of values for field from negative infinity to value .

                                                                            Note

                                                                            Please note that range queries are applicable only to fields that can be sorted, such as numeric fields and date fields. Also, ensure that you correctly specify the boundary values as the actual value type of the field in your query. If you want to perform a range query across the entire index without specifying a specific field, you can use the wildcard query * instead of a field name.

                                                                          "},{"location":"en/admin/insight/reference/lucene.html#examples_1","title":"Examples","text":"
                                                                          1. Specify a field

                                                                            timestamp:[2022-01-01 TO 2022-01-31]\n

                                                                            This will retrieve data where the timestamp field falls within the range from January 1, 2022, to January 31, 2022.

                                                                          2. Not specify a field

                                                                            *:[value1 TO value2]\n

                                                                            This will search the entire index for documents with values ranging from value1 to value2 .

                                                                          "},{"location":"en/admin/insight/reference/lucene.html#insight-common-keywords","title":"Insight Common Keywords","text":""},{"location":"en/admin/insight/reference/lucene.html#container-logs","title":"Container Logs","text":"
                                                                          • kubernetes.container_image: Container image name
                                                                          • kubernetes.container_name: Container name
                                                                          • kubernetes.namespace_name: Namespace name
                                                                          • kubernetes.pod_name: Pod name
                                                                          • log: Log content
                                                                          • time: Log timestamp
                                                                          "},{"location":"en/admin/insight/reference/lucene.html#host-logs","title":"Host Logs","text":"
                                                                          • syslog.file: Log file name
                                                                          • syslog.host: Host name
                                                                          • log: Log content

                                                                          If you want to accurately match a specific value, you can add a .keyword suffix after the keyword, e.g. kubernetes.containername.keyword.

                                                                          "},{"location":"en/admin/insight/reference/lucene.html#examples_2","title":"Examples","text":"
                                                                          1. Query container logs of the specified container in the specified Pod

                                                                            kubernetes.pod_name.keyword:nginx-pod AND kubernetes.container_name.keyword:nginx\n
                                                                            2. Query container logs containing 'nginx pod' in the Pod name

                                                                            kubernetes.pod_name:nginx-pod\n
                                                                          "},{"location":"en/admin/insight/reference/notify-helper.html","title":"Configure Notification Templates","text":""},{"location":"en/admin/insight/reference/notify-helper.html#template-syntax-go-template-description","title":"Template Syntax (Go Template) Description","text":"

                                                                          The alert notification template uses Go Template syntax to render the template.

                                                                          The template will be rendered based on the following data.

                                                                          {\n    \"status\": \"firing\",\n    \"labels\": {\n        \"alertgroup\": \"test-group\",           // Alert policy name\n        \"alertname\": \"test-rule\",          // Alert rule name\n        \"cluster\": \"35b54a48-b66c-467b-a8dc-503c40826330\",\n        \"customlabel1\": \"v1\",\n        \"customlabel2\": \"v2\",\n        \"endpoint\": \"https\",\n        \"group_id\": \"01gypg06fcdf7rmqc4ksv97646\",\n        \"instance\": \"10.6.152.85:6443\",\n        \"job\": \"apiserver\",\n        \"namespace\": \"default\",\n        \"prometheus\": \"insight-system/insight-agent-kube-prometh-prometheus\",\n        \"prometheus_replica\": \"prometheus-insight-agent-kube-prometh-prometheus-0\",\n        \"rule_id\": \"01gypg06fcyn2g9zyehbrvcdfn\",\n        \"service\": \"kubernetes\",\n        \"severity\": \"critical\",\n        \"target\": \"35b54a48-b66c-467b-a8dc-503c40826330\",\n        \"target_type\": \"cluster\"\n   },\n    \"annotations\": {\n        \"customanno1\": \"v1\",\n        \"customanno2\": \"v2\",\n        \"description\": \"This is a test rule, 10.6.152.85:6443 down\",\n        \"value\": \"1\"\n    },\n    \"startsAt\": \"2023-04-20T07:53:54.637363473Z\",\n    \"endsAt\": \"0001-01-01T00:00:00Z\",\n    \"generatorURL\": \"http://vmalert-insight-victoria-metrics-k8s-stack-df987997b-npsl9:8080/vmalert/alert?group_id=16797738747470868115&alert_id=10071735367745833597\",\n    \"fingerprint\": \"25c8d93d5bf58ac4\"\n}\n
                                                                          "},{"location":"en/admin/insight/reference/notify-helper.html#instructions-for-use","title":"Instructions for Use","text":"
                                                                          1. . character

                                                                            Render the specified object in the current scope.

                                                                            Example 1: Take all content under the top-level scope, which is all of the context data in the example code.

                                                                            {{ . }}\n
                                                                          2. Conditional statement if / else

                                                                            Use if to check the data and run else if it does not meet.

                                                                            {{if .Labels.namespace }}Namespace: {{ .Labels.namespace }} \\n{{ end }}\n
                                                                          3. Loop feature for

                                                                            The for feature is used to repeat the code content.

                                                                            Example 1: Traverse the labels list to obtain all label content for alerts.

                                                                            {{ for .Labels}} \\n {{end}}\n
                                                                          "},{"location":"en/admin/insight/reference/notify-helper.html#functions","title":"FUNCTIONS","text":"

                                                                          Insight's \"notification templates\" and \"SMS templates\" support over 70 sprig functions, as well as custom functions.

                                                                          "},{"location":"en/admin/insight/reference/notify-helper.html#sprig-functions","title":"Sprig Functions","text":"

                                                                          Sprig provides over 70 built-in template functions to assist in rendering data. The following are some commonly used functions:

                                                                          • Date operations
                                                                          • String operations
                                                                          • Type conversion operations
                                                                          • Mathematical calculations with integers

                                                                          For more details, you can refer to the official documentation.

                                                                          "},{"location":"en/admin/insight/reference/notify-helper.html#custom-functions","title":"Custom Functions","text":""},{"location":"en/admin/insight/reference/notify-helper.html#toclustername","title":"toClusterName","text":"

                                                                          The toClusterName function retrieves the \"cluster name\" based on the \"cluster unique identifier (ID)\". If there is no proper cluster found, it will directly return the passed-in cluster's unique identifier.

                                                                          func toClusterName(id string) (string, error)\n

                                                                          Example:

                                                                          {{ toClusterName \"clusterId\" }}\n{{ \"clusterId\" | toClusterName }}\n
                                                                          "},{"location":"en/admin/insight/reference/notify-helper.html#toclusterid","title":"toClusterId","text":"

                                                                          The toClusterId function retrieves the \"cluster unique identifier (ID)\" based on the \"cluster name\". If there is no proper cluster found, it will directly return the passed-in cluster name.

                                                                          func toClusterId(name string) (string, error)\n

                                                                          Example:

                                                                          {{ toClusterId \"clusterName\" }}\n{{ \"clusterName\" | toClusterId }}\n
                                                                          "},{"location":"en/admin/insight/reference/notify-helper.html#todateinzone","title":"toDateInZone","text":"

                                                                          The toDateInZone function converts a string date into the desired time format and applies the specified time zone.

                                                                          func toDateInZone(fmt string, date interface{}, zone string) string\n

                                                                          Example 1:

                                                                          {{ toDateInZone \"2006-01-02T15:04:05\" \"2022-08-15T05:59:08.064449533Z\" \"Asia/Shanghai\" }}\n

                                                                          This will return 2022-08-15T13:59:08. Additionally, you can achieve the same effect as toDateInZone using the built-in functions provided by sprig:

                                                                          {{ dateInZone \"2006-01-02T15:04:05\" (toDate \"2006-01-02T15:04:05Z07:00\" .StartsAt) \"Asia/Shanghai\" }}\n

                                                                          Example 2:

                                                                          {{ toDateInZone \"2006-01-02T15:04:05\" .StartsAt \"Asia/Shanghai\" }}\n\n## Threshold Template Description\n\nThe built-in webhook alert template in Insight is as follows. Other contents such as email and WeCom are the same, only proper adjustments are made for line breaks.\n\n```text\nRule Name: {{ .Labels.alertname }} \\n\nPolicy Name: {{ .Labels.alertgroup }} \\n\nAlert level: {{ .Labels.severity }} \\n\nCluster: {{ .Labels.cluster }} \\n\n{{if .Labels.namespace }}Namespace: {{ .Labels.namespace }} \\n{{ end }}\n{{if .Labels.node }}Node: {{ .Labels.node }} \\n{{ end }}\nResource Type: {{ .Labels.target_type }} \\n\n{{if .Labels.target }}Resource Name: {{ .Labels.target }} \\n{{ end }}\nTrigger Value: {{ .Annotations.value }} \\n\nOccurred Time: {{ .StartsAt }} \\n\n{{if ne \"0001-01-01T00:00:00Z\" .EndsAt }}End Time: {{ .EndsAt }} \\n{{ end }}\nDescription: {{ .Annotations.description }} \\n\n
                                                                          "},{"location":"en/admin/insight/reference/notify-helper.html#email-subject-parameters","title":"Email Subject Parameters","text":"

                                                                          Because Insight combines messages generated by the same rule at the same time when sending alert messages, email subjects are different from the four templates above and only use the content of commonLabels in the alert message to render the template. The default template is as follows:

                                                                          [{{ .status }}] [{{ .severity }}] Alert: {{ .alertname }}\n

                                                                          Other fields that can be used as email subjects are as follows:

                                                                          {{ .status }} Triggering status of the alert message\n{{ .alertgroup }} Name of the policy to which the alert belongs\n{{ .alertname }} Name of the rule to which the alert belongs\n{{ .severity }} Severity level of the alert\n{{ .target_type }} Type of resource for which the alert is raised\n{{ .target }} Resource object for which the alert is raised\n{{ .Custom label key for other rules }}\n
                                                                          "},{"location":"en/admin/insight/reference/tailing-sidecar.html","title":"Collecting Container Logs through Sidecar","text":"

                                                                          Tailing Sidecar is a Kubernetes cluster-level logging proxy that acts as a streaming sidecar container. It allows automatic collection and summarization of log files within containers, even when the container cannot write to standard output or standard error streams.

                                                                          Insight supports log collection through the Sidecar mode, which involves running a Sidecar container alongside each Pod to output log data to the standard output stream. This enables FluentBit to collect container logs effectively.

                                                                          The Insight Agent comes with the tailing-sidecar operator installed by default. To enable file log collection within a container, you can add annotations to the Pod, which will automatically inject the Tailing Sidecar container. The injected Sidecar container reads the files in the business container and outputs them to the standard output stream.

                                                                          Here are the specific steps to follow:

                                                                          1. Modify the YAML file of the Pod and add the following parameters in the annotation field:

                                                                            metadata:\n  annotations:\n    tailing-sidecar: <sidecar-name-0>:<volume-name-0>:<path-to-tail-0>;<sidecar-name-1>:<volume-name-1>:<path-to -tail-1>\n

                                                                            Field description:

                                                                            • sidecar-name-0 : Name for the Tailing Sidecar container (optional; a container name will be created automatically if not specified, starting with the prefix \"tailing-sidecar\").
                                                                            • volume-name-0 : Name of the storage volume.
                                                                            • path-to-tail-0 : File path to tail.

                                                                            Note

                                                                            Each Pod can run multiple sidecar containers, separated by ; . This allows different sidecar containers to collect multiple files and store them in various volumes.

                                                                          2. Restart the Pod. Once the Pod's status changes to Running , you can use the Log Query interface to search for logs within the container of the Pod.

                                                                          "},{"location":"en/admin/insight/reference/used-metric-in-insight.html","title":"Insight Reference Metric","text":"

                                                                          The metrics in this article are organized based on the community's kube-prometheus framework. Currently, it covers metrics from multiple levels, including Cluster, Node, Namespace, and Workload. This article lists some commonly used metrics, their descriptions, and units for easy reference.

                                                                          "},{"location":"en/admin/insight/reference/used-metric-in-insight.html#cluster","title":"Cluster","text":"Metric Name Description Unit cluster_cpu_utilization Cluster CPU Utilization cluster_cpu_total Total CPU in Cluster Core cluster_cpu_usage CPU Used in Cluster Core cluster_cpu_requests_commitment CPU Allocation Rate in Cluster cluster_memory_utilization Cluster Memory Utilization cluster_memory_usage Memory Usage in Cluster Byte cluster_memory_available Available Memory in Cluster Byte cluster_memory_requests_commitment Memory Allocation Rate in Cluster cluster_memory_total Total Memory in Cluster Byte cluster_net_utilization Network Data Transfer Rate in Cluster Byte/s cluster_net_bytes_transmitted Network Data Transmitted in Cluster (Upstream) Byte/s cluster_net_bytes_received Network Data Received in Cluster (Downstream) Byte/s cluster_disk_read_iops Disk Read IOPS in Cluster times/s cluster_disk_write_iops Disk Write IOPS in Cluster times/s cluster_disk_read_throughput Disk Read Throughput in Cluster Byte/s cluster_disk_write_throughput Disk Write Throughput in Cluster Byte/s cluster_disk_size_capacity Total Disk Capacity in Cluster Byte cluster_disk_size_available Available Disk Size in Cluster Byte cluster_disk_size_usage Disk Usage in Cluster Byte cluster_disk_size_utilization Disk Utilization in Cluster cluster_node_total Total Nodes in Cluster units cluster_node_online Online Nodes in Cluster units cluster_node_offline_count Count of Offline Nodes in Cluster units cluster_pod_count Total Pods in Cluster units cluster_pod_running_count Count of Running Pods in Cluster units cluster_pod_abnormal_count Count of Abnormal Pods in Cluster units cluster_deployment_count Total Deployments in Cluster units cluster_deployment_normal_count Count of Normal Deployments in Cluster units cluster_deployment_abnormal_count Count of Abnormal Deployments in Cluster units cluster_statefulset_count Count of StatefulSets in Cluster units cluster_statefulset_normal_count Count of Normal StatefulSets in Cluster units cluster_statefulset_abnormal_count Count of Abnormal StatefulSets in Cluster units cluster_daemonset_count Count of DaemonSets in Cluster units cluster_daemonset_normal_count Count of Normal DaemonSets in Cluster units cluster_daemonset_abnormal_count Count of Abnormal DaemonSets in Cluster units cluster_job_count Total Jobs in Cluster units cluster_job_normal_count Count of Normal Jobs in Cluster units cluster_job_abnormal_count Count of Abnormal Jobs in Cluster units

                                                                          Tip

                                                                          Utilization is generally a number in the range (0,1] (e.g., 0.21, not 21%)

                                                                          "},{"location":"en/admin/insight/reference/used-metric-in-insight.html#node","title":"Node","text":"Metric Name Description Unit node_cpu_utilization Node CPU Utilization node_cpu_total Total CPU in Node Core node_cpu_usage CPU Usage in Node Core node_cpu_requests_commitment CPU Allocation Rate in Node node_memory_utilization Node Memory Utilization node_memory_usage Memory Usage in Node Byte node_memory_requests_commitment Memory Allocation Rate in Node node_memory_available Available Memory in Node Byte node_memory_total Total Memory in Node Byte node_net_utilization Network Data Transfer Rate in Node Byte/s node_net_bytes_transmitted Network Data Transmitted in Node (Upstream) Byte/s node_net_bytes_received Network Data Received in Node (Downstream) Byte/s node_disk_read_iops Disk Read IOPS in Node times/s node_disk_write_iops Disk Write IOPS in Node times/s node_disk_read_throughput Disk Read Throughput in Node Byte/s node_disk_write_throughput Disk Write Throughput in Node Byte/s node_disk_size_capacity Total Disk Capacity in Node Byte node_disk_size_available Available Disk Size in Node Byte node_disk_size_usage Disk Usage in Node Byte node_disk_size_utilization Disk Utilization in Node"},{"location":"en/admin/insight/reference/used-metric-in-insight.html#workload","title":"Workload","text":"

                                                                          The currently supported workload types include: Deployment, StatefulSet, DaemonSet, Job, and CronJob.

                                                                          Metric Name Description Unit workload_cpu_usage Workload CPU Usage Core workload_cpu_limits Workload CPU Limit Core workload_cpu_requests Workload CPU Requests Core workload_cpu_utilization Workload CPU Utilization workload_memory_usage Workload Memory Usage Byte workload_memory_limits Workload Memory Limit Byte workload_memory_requests Workload Memory Requests Byte workload_memory_utilization Workload Memory Utilization workload_memory_usage_cached Workload Memory Usage (including cache) Byte workload_net_bytes_transmitted Workload Network Data Transmitted Rate Byte/s workload_net_bytes_received Workload Network Data Received Rate Byte/s workload_disk_read_throughput Workload Disk Read Throughput Byte/s workload_disk_write_throughput Workload Disk Write Throughput Byte/s
                                                                          1. Total workload is calculated here.
                                                                          2. Metrics can be obtained using workload_cpu_usage{workload_type=\"deployment\", workload=\"prometheus\"}.
                                                                          3. Calculation rule for workload_pod_utilization: workload_pod_usage / workload_pod_request.
                                                                          "},{"location":"en/admin/insight/reference/used-metric-in-insight.html#pod","title":"Pod","text":"Metric Name Description Unit pod_cpu_usage Pod CPU Usage Core pod_cpu_limits Pod CPU Limit Core pod_cpu_requests Pod CPU Requests Core pod_cpu_utilization Pod CPU Utilization pod_memory_usage Pod Memory Usage Byte pod_memory_limits Pod Memory Limit Byte pod_memory_requests Pod Memory Requests Byte pod_memory_utilization Pod Memory Utilization pod_memory_usage_cached Pod Memory Usage (including cache) Byte pod_net_bytes_transmitted Pod Network Data Transmitted Rate Byte/s pod_net_bytes_received Pod Network Data Received Rate Byte/s pod_disk_read_throughput Pod Disk Read Throughput Byte/s pod_disk_write_throughput Pod Disk Write Throughput Byte/s

                                                                          You can obtain the CPU usage of all Pods belonging to the Deployment named prometheus by using pod_cpu_usage{workload_type=\"deployment\", workload=\"prometheus\"}.

                                                                          "},{"location":"en/admin/insight/reference/used-metric-in-insight.html#span-metrics","title":"Span Metrics","text":"Metric Name Description Unit calls_total Total Service Requests duration_milliseconds_bucket Service Latency Histogram duration_milliseconds_sum Total Service Latency ms duration_milliseconds_count Number of Latency Records otelcol_processor_groupbytrace_spans_released Number of Collected Spans otelcol_processor_groupbytrace_traces_released Number of Collected Traces traces_service_graph_request_total Total Service Requests (Topology Feature) traces_service_graph_request_server_seconds_sum Total Latency (Topology Feature) ms traces_service_graph_request_server_seconds_bucket Service Latency Histogram (Topology Feature) traces_service_graph_request_server_seconds_count Total Service Requests (Topology Feature)"},{"location":"en/admin/insight/system-config/modify-config.html","title":"Modify system configuration","text":"

                                                                          Observability will persist the data of metrics, logs, and traces by default. Users can modify the system configuration according to This page.

                                                                          "},{"location":"en/admin/insight/system-config/modify-config.html#how-to-modify-the-metric-data-retention-period","title":"How to modify the metric data retention period","text":"

                                                                          Refer to the following steps to modify the metric data retention period.

                                                                          1. run the following command:

                                                                            kubectl edit vmcluster insight-victoria-metrics-k8s-stack -n insight-system\n
                                                                          2. In the Yaml file, the default value of retentionPeriod is 14 , and the unit is day . You can modify the parameters according to your needs.

                                                                            apiVersion: operator.victoriametrics.com/v1beta1\nkind: VMCluster\nmetadata:\n  annotations:\n    meta.helm.sh/release-name: insight\n    meta.helm.sh/release-namespace: insight-system\n  creationTimestamp: \"2022-08-25T04:31:02Z\"\n  finalizers:\n  - apps.victoriametrics.com/finalizer\n  generation: 2\n  labels:\n    app.kubernetes.io/instance: insight\n    app.kubernetes.io/managed-by: Helm\n    app.kubernetes.io/name: victoria-metrics-k8s-stack\n    app.kubernetes.io/version: 1.77.2\n    helm.sh/chart: victoria-metrics-k8s-stack-0.9.3\n  name: insight-victoria-metrics-k8s-stack\n  namespace: insight-system\n  resourceVersion: \"123007381\"\n  uid: 55cee8d6-c651-404b-b2c9-50603b405b54\nspec:\n  replicationFactor: 1\n  retentionPeriod: \"14\"\n  vminsert:\n    extraArgs:\n      maxLabelsPerTimeseries: \"45\"\n    image:\n      repository: docker.m.daocloud.io/victoriametrics/vminsert\n      tag: v1.80.0-cluster\n      replicaCount: 1\n
                                                                          3. After saving the modification, the pod of the component responsible for storing the metrics will automatically restart, just wait for a while.

                                                                          "},{"location":"en/admin/insight/system-config/modify-config.html#how-to-modify-the-log-data-storage-duration","title":"How to modify the log data storage duration","text":"

                                                                          Refer to the following steps to modify the log data retention period:

                                                                          "},{"location":"en/admin/insight/system-config/modify-config.html#method-1-modify-the-json-file","title":"Method 1: Modify the Json file","text":"
                                                                          1. Modify the max_age parameter in the rollover field in the following files, and set the retention period. The default storage period is 7d . Change http://localhost:9200 to the address of elastic .

                                                                            curl -X PUT \"http://localhost:9200/_ilm/policy/insight-es-k8s-logs-policy?pretty\" -H 'Content-Type: application/json' -d'\n{\n\"policy\": {\n    \"phases\": {\n        \"hot\": {\n            \"min_age\": \"0ms\",\n            \"actions\": {\n            \"set_priority\": {\n                \"priority\": 100\n            },\n            \"rollover\": {\n                \"max_age\": \"7d\",\n                \"max_size\": \"10gb\"\n            }\n            }\n        },\n    \"warm\": {\n        \"min_age\": \"10d\",\n        \"actions\": {\n        \"forcemerge\": {\n            \"max_num_segments\": 1\n        }\n        }\n    },\n    \"delete\": {\n        \"min_age\": \"30d\",\n        \"actions\": {\n        \"delete\": {}\n        }\n    }\n    }\n}\n}\n
                                                                          2. After modification, run the above command. It will print out the content as shown below, then the modification is successful.

                                                                            {\n\"acknowledged\": true\n}\n
                                                                          "},{"location":"en/admin/insight/system-config/modify-config.html#method-2-modify-from-the-ui","title":"Method 2: Modify from the UI","text":"
                                                                          1. Log in kibana , select Stack Management in the left navigation bar.

                                                                          2. Select the left navigation Index Lifecycle Polices , and find the index insight-es-k8s-logs-policy , click to enter the details.

                                                                          3. Expand the Hot phase configuration panel, modify the Maximum age parameter, and set the retention period. The default storage period is 7d .

                                                                          4. After modification, click Save policy at the bottom of the page to complete the modification.

                                                                          "},{"location":"en/admin/insight/system-config/modify-config.html#how-to-modify-the-trace-data-storage-duration","title":"How to modify the trace data storage duration","text":"

                                                                          Refer to the following steps to modify the trace data retention period:

                                                                          "},{"location":"en/admin/insight/system-config/modify-config.html#method-1-modify-the-json-file_1","title":"Method 1: Modify the Json file","text":"
                                                                          1. Modify the max_age parameter in the rollover field in the following files, and set the retention period. The default storage period is 7d . At the same time, modify http://localhost:9200 to the access address of elastic .

                                                                            curl -X PUT \"http://localhost:9200/_ilm/policy/jaeger-ilm-policy?pretty\" -H 'Content-Type: application/json' -d'\n{\n\"policy\": {\n    \"phases\": {\n        \"hot\": {\n            \"min_age\": \"0ms\",\n            \"actions\": {\n            \"set_priority\": {\n                \"priority\": 100\n            },\n            \"rollover\": {\n                \"max_age\": \"7d\",\n                \"max_size\": \"10gb\"\n            }\n            }\n        },\n    \"warm\": {\n        \"min_age\": \"10d\",\n        \"actions\": {\n        \"forcemerge\": {\n            \"max_num_segments\": 1\n        }\n        }\n    },\n    \"delete\": {\n        \"min_age\": \"30d\",\n        \"actions\": {\n        \"delete\": {}\n        }\n    }\n    }\n}\n}\n
                                                                          2. After modification, run the above command on the console. It will print out the content as shown below, then the modification is successful.

                                                                            {\n\"acknowledged\": true\n}\n
                                                                          "},{"location":"en/admin/insight/system-config/modify-config.html#method-2-modify-from-the-ui_1","title":"Method 2: Modify from the UI","text":"
                                                                          1. Log in kibana , select Stack Management in the left navigation bar.

                                                                          2. Select the left navigation Index Lifecycle Polices , and find the index jaeger-ilm-policy , click to enter the details.

                                                                          3. Expand the Hot phase configuration panel, modify the Maximum age parameter, and set the retention period. The default storage period is 7d .

                                                                          4. After modification, click Save policy at the bottom of the page to complete the modification.

                                                                          "},{"location":"en/admin/insight/system-config/system-component.html","title":"System Components","text":"

                                                                          On the system component page, you can quickly view the running status of the system components in Insight. When a system component fails, some features in Insight will be unavailable.

                                                                          1. Go to Insight product module,
                                                                          2. In the left navigation bar, select System Management -> System Components .
                                                                          "},{"location":"en/admin/insight/system-config/system-component.html#component-description","title":"Component description","text":"Module Component Name Description Metrics vminsert-insight-victoria-metrics-k8s-stack Responsible for writing the metric data collected by Prometheus in each cluster to the storage component. If this component is abnormal, the metric data of the worker cluster cannot be written. Metrics vmalert-insight-victoria-metrics-k8s-stack Responsible for taking effect of the recording and alert rules configured in the VM Rule, and sending the triggered alert rules to alertmanager. Metrics vmalertmanager-insight-victoria-metrics-k8s-stack is responsible for sending messages when alerts are triggered. If this component is abnormal, the alert information cannot be sent. Metrics vmselect-insight-victoria-metrics-k8s-stack Responsible for querying metrics data. If this component is abnormal, the metric cannot be queried. Metrics vmstorage-insight-victoria-metrics-k8s-stack Responsible for storing multicluster metrics data. Dashboard grafana-deployment Provide monitoring panel capability. The exception of this component will make it impossible to view the built-in dashboard. Link insight-jaeger-collector Responsible for receiving trace data in opentelemetry-collector and storing it. Link insight-jaeger-query Responsible for querying the trace data collected in each cluster. Link insight-opentelemetry-collector Responsible for receiving trace data forwarded by each sub-cluster Log elasticsearch Responsible for storing the log data of each cluster."},{"location":"en/admin/insight/system-config/system-config.html","title":"System Settings","text":"

                                                                          System Settings displays the default storage time of metrics, logs, traces and the default Apdex threshold.

                                                                          1. Click the right navigation bar and select System Settings .

                                                                          2. Currently only supports modifying the storage duration of historical alerts, click Edit to enter the target duration.

                                                                            When the storage duration is set to \"0\", the historical alerts will not be cleared.

                                                                          Note

                                                                          To modify other settings, please click to view How to modify the system settings?

                                                                          "},{"location":"en/admin/insight/trace/service.html","title":"Service Insight","text":"

                                                                          In Insight , a service refers to a group of workloads that provide the same behavior for incoming requests. Service insight helps observe the performance and status of applications during the operation process by using the OpenTelemetry SDK.

                                                                          For how to use OpenTelemetry, please refer to: Using OTel to give your application insight.

                                                                          "},{"location":"en/admin/insight/trace/service.html#glossary","title":"Glossary","text":"
                                                                          • Service: A service represents a group of workloads that provide the same behavior for incoming requests. You can define the service name when using the OpenTelemetry SDK or use the name defined in Istio.
                                                                          • Operation: An operation refers to a specific request or action handled by a service. Each span has an operation name.
                                                                          • Outbound Traffic: Outbound traffic refers to all the traffic generated by the current service when making requests.
                                                                          • Inbound Traffic: Inbound traffic refers to all the traffic initiated by the upstream service targeting the current service.
                                                                          "},{"location":"en/admin/insight/trace/service.html#steps","title":"Steps","text":"

                                                                          The Services List page displays key metrics such as throughput rate, error rate, and request latency for all services that have been instrumented with distributed tracing. You can filter services based on clusters or namespaces and sort the list by throughput rate, error rate, or request latency. By default, the data displayed in the list is for the last hour, but you can customize the time range.

                                                                          Follow these steps to view service insight metrics:

                                                                          1. Go to the Insight product module.

                                                                          2. Select Trace Tracking -> Services from the left navigation bar.

                                                                            Attention

                                                                            1. If the namespace of a service in the list is unknown , it means that the service has not been properly instrumented. We recommend reconfiguring the instrumentation.
                                                                            2. If multiple services have the same name and none of them have the correct Namespace environment variable configured, the metrics displayed in the list and service details page will be aggregated for all those services.
                                                                          3. Click a service name (taking insight-system as an example) to view the detailed metrics and operation metrics for that service.

                                                                            1. In the Service Topology section, you can view the service topology one layer above or below the current service. When you hover over a node, you can see its information.
                                                                            2. In the Traffic Metrics section, you can view the monitoring metrics for all requests to the service within the past hour (including inbound and outbound traffic).
                                                                            3. You can use the time selector in the upper right corner to quickly select a time range or specify a custom time range.
                                                                            4. Sorting is available for throughput, error rate, and request latency in the operation metrics.
                                                                            5. Clicking on the icon next to an individual operation will take you to the Traces page to quickly search for related traces.

                                                                          "},{"location":"en/admin/insight/trace/service.html#service-metric-explanations","title":"Service Metric Explanations","text":"Metric Description Throughput Rate The number of requests processed within a unit of time. Error Rate The ratio of erroneous requests to the total number of requests within the specified time range. P50 Request Latency The response time within which 50% of requests complete. P95 Request Latency The response time within which 95% of requests complete. P99 Request Latency The response time within which 99% of requests complete."},{"location":"en/admin/insight/trace/topology.html","title":"Service Map","text":"

                                                                          Service map is a visual representation of the connections, communication, and dependencies between services. It provides insights into the service-to-service interactions, allowing you to view the calls and performance of services within a specified time range. The connections between nodes in the topology map represent the existence of service-to-service calls during the queried time period.

                                                                          "},{"location":"en/admin/insight/trace/topology.html#prerequisites","title":"Prerequisites","text":"
                                                                          1. Insight Agent is installed in the cluster and the applications are in the Running state.
                                                                          2. Services have been instrumented for distributed tracing using Operator or OpenTelemetry SDK.
                                                                          "},{"location":"en/admin/insight/trace/topology.html#steps","title":"Steps","text":"
                                                                          1. Go to the Insight product module.

                                                                          2. Select Tracing -> Service Map from the left navigation bar.

                                                                          3. In the Service Map, you can perform the following actions:

                                                                            • Click a node to slide out the details of the service on the right side. Here, you can view metrics such as request latency, throughput, and error rate for the service. Clicking on the service name takes you to the service details page.
                                                                            • Hover over the connections to view the traffic metrics between the two services.
                                                                            • Click Display Settings , you can configure the display elements in the service map.

                                                                          "},{"location":"en/admin/insight/trace/topology.html#other-nodes","title":"Other Nodes","text":"

                                                                          In the Service Map, there can be nodes that are not part of the cluster. These external nodes can be categorized into three types:

                                                                          • Database
                                                                          • Message Queue
                                                                          • Virtual Node

                                                                          • If a service makes a request to a Database or Message Queue, these two types of nodes will be displayed by default in the topology map. However, Virtual Nodes represent nodes outside the cluster or services not integrated into the trace, and they will not be displayed by default in the map.

                                                                          • When a service makes a request to MySQL, PostgreSQL, or Oracle Database, the detailed database type can be seen in the map.

                                                                          "},{"location":"en/admin/insight/trace/topology.html#enabling-virtual-nodes","title":"Enabling Virtual Nodes","text":"
                                                                          1. Update the insight-server chart values, locate the parameter shown in the image below, and change false to true.
                                                                          1. In the display settings of the service map, check the Virtual Services option to enable it.
                                                                          "},{"location":"en/admin/insight/trace/trace.html","title":"Trace Query","text":"

                                                                          On the trace query page, you can query detailed information about a call trace by TraceID or filter call traces based on various conditions.

                                                                          "},{"location":"en/admin/insight/trace/trace.html#glossary","title":"Glossary","text":"
                                                                          • TraceID: Used to identify a complete request call trace.
                                                                          • Operation: Describes the specific operation or event represented by a Span.
                                                                          • Entry Span: The entry Span represents the first request of the entire call.
                                                                          • Latency: The duration from receiving the request to completing the response for the entire call trace.
                                                                          • Span: The number of Spans included in the entire trace.
                                                                          • Start Time: The time when the current trace starts.
                                                                          • Tag: A collection of key-value pairs that constitute Span tags. Tags are used to annotate and supplement Spans, and each Span can have multiple key-value tag pairs.
                                                                          "},{"location":"en/admin/insight/trace/trace.html#steps","title":"Steps","text":"

                                                                          Please follow these steps to search for a trace:

                                                                          1. Go to the Insight product module.
                                                                          2. Select Tracing -> Traces from the left navigation bar.

                                                                            Note

                                                                            Sorting by Span, Latency, and Start At is supported in the list.

                                                                          3. Click the TraceID Query in the filter bar to switch to TraceID search.

                                                                          4. To search using TraceID, please enter the complete TraceID.

                                                                          "},{"location":"en/admin/insight/trace/trace.html#other-operations","title":"Other Operations","text":""},{"location":"en/admin/insight/trace/trace.html#view-trace-details","title":"View Trace Details","text":"
                                                                          1. Click the TraceID of a trace in the trace list to view its detailed call information.

                                                                          "},{"location":"en/admin/insight/trace/trace.html#associated-logs","title":"Associated Logs","text":"
                                                                          1. Click the icon on the right side of the trace data to search for associated logs.

                                                                            • By default, it queries the log data within the duration of the trace and one minute after its completion.
                                                                            • The queried logs include those with the trace's TraceID in their log text and container logs related to the trace invocation process.
                                                                          2. Click View More to jump to the Associated Log page with conditions.

                                                                          3. By default, all logs are searched, but you can filter by the TraceID or the relevant container logs from the trace call process using the dropdown.

                                                                            Note

                                                                            Since trace may span across clusters or namespaces, if the user does not have sufficient permissions, they will be unable to query the associated logs for that trace.

                                                                          "},{"location":"en/admin/k8s/add-node.html","title":"Adding Worker Nodes","text":"

                                                                          If there are not enough nodes, you can add more nodes to the cluster.

                                                                          "},{"location":"en/admin/k8s/add-node.html#prerequisites","title":"Prerequisites","text":"
                                                                          • AI platform is installed
                                                                          • An administrator account is available
                                                                          • A cluster with GPU nodes has been created
                                                                          • A cloud host has been prepared
                                                                          "},{"location":"en/admin/k8s/add-node.html#steps-to-add-nodes","title":"Steps to Add Nodes","text":"
                                                                          1. Log in to the AI platform as an administrator.
                                                                          2. Navigate to Container Management -> Clusters, and click the name of the target cluster.

                                                                          3. On the cluster overview page, click Node Management, and then click the Add Node button on the right side.

                                                                          4. Follow the wizard, fill in the required parameters, and then click OK.

                                                                            Basic InformationParameter Configuration

                                                                          5. Click OK in the popup window.

                                                                          6. Return to the node list. The status of the newly added node will be Pending. After a few minutes, if the status changes to Running, it indicates that the node has been successfully added.

                                                                          Tip

                                                                          For nodes that have just been successfully added, it may take an additional 2-3 minutes for the GPU to be recognized.

                                                                          "},{"location":"en/admin/k8s/create-k8s.html","title":"Creating a Kubernetes Cluster on the Cloud","text":"

                                                                          Deploying a Kubernetes cluster is aimed at supporting efficient AI computing resource scheduling and management, achieving elastic scalability, providing high availability, and optimizing the model training and inference processes.

                                                                          "},{"location":"en/admin/k8s/create-k8s.html#prerequisites","title":"Prerequisites","text":"
                                                                          • An AI platform is installed
                                                                          • An administrator account is available
                                                                          • A physical machine with a GPU is prepared
                                                                          • Two segments of IP addresses are allocated (Pod CIDR 18 bits, SVC CIDR 18 bits, must not conflict with existing network segments)
                                                                          "},{"location":"en/admin/k8s/create-k8s.html#steps-to-create-the-cluster","title":"Steps to Create the Cluster","text":"
                                                                          1. Log in to the AI platform as an administrator.
                                                                          2. Create and launch 3 cloud hosts without GPUs to serve as the Master nodes for the cluster.

                                                                            • Configure resources: 16 CPU cores, 32 GB memory, 200 GB system disk (ReadWriteOnce)
                                                                            • Select Bridge network mode
                                                                            • Set the root password or add an SSH public key for SSH connection
                                                                            • Take note of the IP addresses of the 3 hosts
                                                                          3. Navigate to Container Management -> Clusters, and click the Create Cluster button on the right side.

                                                                          4. Follow the wizard to configure the various parameters of the cluster.

                                                                            Basic InformationNode ConfigurationNetwork ConfigurationAddon ConfigurationAdvanced Configuration

                                                                            After configuring the node information, click Start Check.

                                                                            Each node can run a default of 110 Pods (container groups). If the node configuration is higher, it can be adjusted to 200 or 300 Pods.

                                                                          5. Wait for the cluster creation to complete.

                                                                          6. In the cluster list, find the newly created cluster, click the cluster name, navigate to Helm Apps -> Helm Charts, and search for metax-gpu-extensions in the search box, then click the card.

                                                                          7. Click the Install button on the right to begin installing the GPU plugin.

                                                                            Application SettingsKubernetes Orchestration Confirmation

                                                                            Enter a name, select a namespace, and modify the image address in the YAML:

                                                                          8. You will automatically return to the Helm App list. Wait for the status of metax-gpu-extensions to change to Deployed.

                                                                          9. The cluster has been successfully created. You can now check the nodes included in the cluster. You can create AI workloads and use the GPU.

                                                                          Next step: Create AI Workloads

                                                                          "},{"location":"en/admin/k8s/remove-node.html","title":"Removing GPU Worker Nodes","text":"

                                                                          The cost of GPU resources is relatively high. If you temporarily do not need a GPU, you can remove the worker nodes with GPUs. The following steps are also applicable for removing regular worker nodes.

                                                                          "},{"location":"en/admin/k8s/remove-node.html#prerequisites","title":"Prerequisites","text":"
                                                                          • AI platform installed
                                                                          • An administrator account
                                                                          • A cluster with GPU nodes created
                                                                          "},{"location":"en/admin/k8s/remove-node.html#removal-steps","title":"Removal Steps","text":"
                                                                          1. Log in to the AI platform as an administrator.
                                                                          2. Navigate to Container Management -> Clusters, and click the name of the target cluster.

                                                                          3. On the cluster overview page, click Nodes, find the node you want to remove, click the \u2507 on the right side of the list, and select Remove Node from the pop-up menu.

                                                                          4. In the pop-up window, enter the node name, and after confirming it is correct, click Delete.

                                                                          5. You will automatically return to the node list, and the status will be Removing. After a few minutes, refresh the page; if the node is no longer there, it indicates that the node has been successfully removed.

                                                                          6. After removing the node from the UI list and shutting it down, log in to the host of the removed node via SSH and execute the shutdown command.

                                                                          Tip

                                                                          After removing the node from the UI and shutting it down, the data on the node is not immediately deleted; the node's data will be retained for a period of time.

                                                                          "},{"location":"en/admin/kpanda/backup/index.html","title":"Backup and Restore","text":"

                                                                          Backup and restore are essential aspects of system management. In practice, it is important to first back up the data of the system at a specific point in time and securely store the backup. In case of incidents such as data corruption, loss, or accidental deletion, the system can be quickly restored based on the previous backup data, reducing downtime and minimizing losses.

                                                                          • In real production environments, services may be deployed across different clouds, regions, or availability zones. If one infrastructure faces a failure, organizations need to quickly restore applications in other available environments. In such cases, cross-cloud or cross-cluster backup and restore become crucial.
                                                                          • Large-scale systems often involve multiple roles and users with complex permission management systems. With many operators involved, accidents caused by human error can lead to system failures. In such scenarios, the ability to roll back the system quickly using previously backed-up data is necessary. Relying solely on manual troubleshooting, fault repair, and system recovery can be time-consuming, resulting in prolonged system unavailability and increased losses for organizations.
                                                                          • Additionally, factors like network attacks, natural disasters, and equipment malfunctions can also cause data accidents.

                                                                          Therefore, backup and restore are vital as the last line of defense for maintaining system stability and ensuring data security.

                                                                          Backups are typically classified into three types: full backups, incremental backups, and differential backups. Currently, AI platform supports full backups and incremental backups.

                                                                          The backup and restore provided by AI platform can be divided into two categories: Application Backup and ETCD Backup. It supports both manual backups and scheduled automatic backups using CronJobs.

                                                                          • Application Backup

                                                                            Application backup refers to backing up data of a specific workload in the cluster and then restoring that data either within the same cluster or in another cluster. It supports backing up all resources under a namespace or filtering resources by specific labels.

                                                                            Application backup also supports cross-cluster backup of stateful applications. For detailed steps, refer to the Backup and Restore MySQL Applications and Data Across Clusters guide.

                                                                          • etcd Backup

                                                                            etcd is the data storage component of Kubernetes. Kubernetes stores its own component's data and application data in etcd. Therefore, backing up etcd is equivalent to backing up the entire cluster's data, allowing quick restoration of the cluster to a previous state in case of failures.

                                                                            It's worth noting that currently, restoring etcd backup data is only supported within the same cluster (the original cluster). To learn more about related best practices, refer to the ETCD Backup and Restore guide.

                                                                          "},{"location":"en/admin/kpanda/backup/deployment.html","title":"Application Backup","text":"

                                                                          This article explains how to backup applications in AI platform. The demo application used in this tutorial is called dao-2048 , which is a deployment.

                                                                          "},{"location":"en/admin/kpanda/backup/deployment.html#prerequisites","title":"Prerequisites","text":"

                                                                          Before backing up a deployment, the following prerequisites must be met:

                                                                          • Integrate a Kubernetes cluster or create a Kubernetes cluster in the Container Management module, and be able to access the UI interface of the cluster.

                                                                          • Create a Namespace and a user.

                                                                          • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                                          • Install the velero component, and ensure the velero component is running properly.

                                                                          • Create a deployment (the workload in this tutorial is named dao-2048 ), and label the deployment with app: dao-2048 .

                                                                          "},{"location":"en/admin/kpanda/backup/deployment.html#backup-workload","title":"Backup workload","text":"

                                                                          Follow the steps below to backup the deployment dao-2048 .

                                                                          1. Enter the Container Management module, click Backup Recovery -> Application Backup on the left navigation bar, and enter the Application Backup list page.

                                                                          2. On the Application Backup list page, select the cluster where the velero and dao-2048 applications have been installed. Click Backup Plan in the upper right corner to create a new backup cluster.

                                                                          3. Refer to the instructions below to fill in the backup configuration.

                                                                            • Name: The name of the new backup plan.
                                                                            • Source Cluster: The cluster where the application backup plan is to be executed.
                                                                            • Object Storage Location: The access path of the object storage configured when installing velero on the source cluster.
                                                                            • Namespace: The namespaces that need to be backed up, multiple selections are supported.
                                                                            • Advanced Configuration: Back up specific resources in the namespace based on resource labels, such as an application, or do not back up specific resources in the namespace based on resource labels during backup.

                                                                          4. Refer to the instructions below to set the backup execution frequency, and then click Next .

                                                                            • Backup Frequency: Set the time period for task execution based on minutes, hours, days, weeks, and months. Support custom Cron expressions with numbers and * , after inputting the expression, the meaning of the current expression will be prompted. For detailed expression syntax rules, refer to Cron Schedule Syntax.
                                                                            • Retention Time (days): Set the storage time of backup resources, the default is 30 days, and will be deleted after expiration.
                                                                            • Backup Data Volume (PV): Whether to back up the data in the data volume (PV), support direct copy and use CSI snapshot.

                                                                              • Direct Replication: directly copy the data in the data volume (PV) for backup;
                                                                              • Use CSI snapshots: Use CSI snapshots to back up data volumes (PVs). Requires a CSI snapshot type available for backup in the cluster.

                                                                          5. Click OK , the page will automatically return to the application backup plan list, find the newly created dao-2048 backup plan, and perform the Immediate Execution operation.

                                                                          6. At this point, the Last Execution State of the cluster will change to in progress . After the backup is complete, you can click the name of the backup plan to view the details of the backup plan.

                                                                          "},{"location":"en/admin/kpanda/backup/etcd-backup.html","title":"etcd backup","text":"

                                                                          etcd backup is based on cluster data as the core backup. In cases such as hardware device damage, development and test configuration errors, etc., the backup cluster data can be restored through etcd backup.

                                                                          This section will introduce how to realize the etcd backup for clusters. Also see etcd Backup and Restore Best Practices.

                                                                          "},{"location":"en/admin/kpanda/backup/etcd-backup.html#prerequisites","title":"Prerequisites","text":"
                                                                          • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                                                          • Created a namespace, user, and granted NS Admin or higher permissions to the user. For details, refer to Namespace Authorization.

                                                                          • Prepared a MinIO instance. It is recommended to create it through AI platform's MinIO middleware. For specific steps, refer to MinIO Object Storage.

                                                                          "},{"location":"en/admin/kpanda/backup/etcd-backup.html#create-etcd-backup","title":"Create etcd backup","text":"

                                                                          Follow the steps below to create an etcd backup.

                                                                          1. Enter Container Management -> Backup Recovery -> etcd Backup page, you can see all the current backup policies. Click Create Backup Policy on the right.

                                                                          2. Fill in the Basic Information. Then, click Next to automatically verify the connectivity of etcd. If the verification passes, proceed to the next step.

                                                                            • First select the backup cluster and log in to the terminal
                                                                            • Enter etcd, and the format is https://${NodeIP}:${Port}.

                                                                              • In a standard Kubernetes cluster, the default port for etcd is 2379.
                                                                              • In a Suanova 4.0 cluster, the default port for etcd is 12379.
                                                                              • In a public cloud managed cluster, you need to contact the relevant developers to obtain the etcd port number. This is because the control plane components of public cloud clusters are maintained and managed by the cloud service provider. Users cannot directly access or view these components, nor can they obtain control plane port information through regular commands (such as kubectl).
                                                                              Ways to obtain port number
                                                                              1. Find the etcd Pod in the kube-system namespace

                                                                                kubectl get po -n kube-system | grep etcd\n
                                                                              2. Get the port number from the listen-client-urls of the etcd Pod

                                                                                kubectl get po -n kube-system ${etcd_pod_name} -oyaml | grep listen-client-urls # (1)!\n
                                                                                1. Replace etcd_pod_name with the actual Pod name

                                                                                The expected output is as follows, where the number after the node IP is the port number:

                                                                                - --listen-client-urls=https://127.0.0.1:2379,https://10.6.229.191:2379\n
                                                                            • Fill in the CA certificate, you can use the following command to view the certificate content. Then, copy and paste it to the proper location:

                                                                              Standard Kubernetes ClusterSuanova 4.0 Cluster
                                                                              cat /etc/kubernetes/ssl/etcd/ca.crt\n
                                                                              cat /etc/daocloud/dce/certs/ca.crt\n
                                                                            • Fill in the Cert certificate, you can use the following command to view the content of the certificate. Then, copy and paste it to the proper location:

                                                                              Standard Kubernetes ClusterSuanova 4.0 Cluster
                                                                              cat /etc/kubernetes/ssl/apiserver-etcd-client.crt\n
                                                                              cat /etc/daocloud/dce/certs/etcd/server.crt\n
                                                                            • Fill in the Key, you can use the following command to view the content of the certificate and copy and paste it to the proper location:

                                                                              Standard Kubernetes ClusterSuanova 4.0 Cluster
                                                                              cat /etc/kubernetes/ssl/apiserver-etcd-client.key\n
                                                                              cat /etc/daocloud/dce/certs/etcd/server.key\n

                                                                            Note

                                                                            Click How to get below the input box to see how to obtain the proper information on the UI page.

                                                                          3. Refer to the following information to fill in the Backup Policy.

                                                                            • Backup Method: Choose either manual backup or scheduled backup

                                                                              • Manual Backup: Immediately perform a full backup of etcd data based on the backup configuration.
                                                                              • Scheduled Backup: Periodically perform full backups of etcd data according to the set backup frequency.
                                                                            • Backup Chain Length: the maximum number of backup data to retain. The default is 30.

                                                                            • Backup Frequency: it can be per hour, per day, per week or per month, and can also be customized.
                                                                          4. Refer to the following information to fill in the Storage Path.

                                                                            • Storage Provider: Default is S3 storage
                                                                            • Object Storage Access Address: The access address of MinIO
                                                                            • Bucket: Create a Bucket in MinIO and fill in the Bucket name
                                                                            • Username: The login username for MinIO
                                                                            • Password: The login password for MinIO
                                                                          5. After clicking OK , the page will automatically redirect to the backup policy list, where you can view all the currently created ones.

                                                                            • Click the \u2507 action button on the right side of the policy to view logs, view YAML, update the policy, stop the policy, or execute the policy immediately.
                                                                            • When the backup method is manual, you can click Execute Now to perform the backup.
                                                                            • When the backup method is scheduled, the backup will be performed according to the configured time.
                                                                          "},{"location":"en/admin/kpanda/backup/etcd-backup.html#view-backup-policy-logs","title":"View Backup Policy Logs","text":"

                                                                          Click Logs to view the log content. By default, 100 lines are displayed. If you want to see more log information or download the logs, you can follow the prompts above the logs to go to the observability module.

                                                                          "},{"location":"en/admin/kpanda/backup/etcd-backup.html#view-backup-policy-details","title":"View Backup POlicy Details","text":"

                                                                          Go to Container Management -> Backup Recovery -> etcd Backup , click the Backup Policy tab, and then click the policy to view the details.

                                                                          "},{"location":"en/admin/kpanda/backup/etcd-backup.html#view-recovery-point","title":"View Recovery Point","text":"
                                                                          1. Go to Container Management -> Backup Recovery -> etcd Backup, and click the Recovery Point tab.
                                                                          2. After selecting the target cluster, you can view all the backup information under that cluster.

                                                                            Each time a backup is executed, a proper recovery point is generated, which can be used to quickly restore the application from a successful recovery point.

                                                                          "},{"location":"en/admin/kpanda/backup/install-velero.html","title":"Install the Velero Plugin","text":"

                                                                          velero is an open source tool for backing up and restoring Kubernetes cluster resources. It can back up resources in a Kubernetes cluster to cloud storage services, local storage, or other locations, and restore those resources to the same or a different cluster when needed.

                                                                          This section introduces how to deploy the Velero plugin in AI platform using the Helm Apps.

                                                                          "},{"location":"en/admin/kpanda/backup/install-velero.html#prerequisites","title":"Prerequisites","text":"

                                                                          Before installing the velero plugin, the following prerequisites need to be met:

                                                                          • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.
                                                                          • Created a velero namespace.
                                                                          • You should have permissions not lower than NS Editor. For details, refer to Namespace Authorization.
                                                                          "},{"location":"en/admin/kpanda/backup/install-velero.html#steps","title":"Steps","text":"

                                                                          Please perform the following steps to install the velero plugin for your cluster.

                                                                          1. On the cluster list page, find the target cluster that needs to install the velero plugin, click the name of the cluster, click Helm Apps -> Helm chart in the left navigation bar, and enter velero in the search bar to search .

                                                                          2. Read the introduction of the velero plugin, select the version and click the Install button. This page will take 5.2.0 version as an example to install, and it is recommended that you install 5.2.0 and later versions.

                                                                          3. Configure basic info .

                                                                            • Name: Enter the plugin name, please note that the name can be up to 63 characters, can only contain lowercase letters, numbers and separators (\"-\"), and must start and end with lowercase letters or numbers, such as metrics-server-01.
                                                                            • Namespace: Select the namespace for plugin installation, it must be velero namespace.
                                                                            • Version: The version of the plugin, here we take 5.2.0 version as an example.
                                                                            • Wait: When enabled, it will wait for all associated resources under the application to be ready before marking the application installation as successful.
                                                                            • Deletion Failed: After it is enabled, the synchronization will be enabled by default and ready to wait. If the installation fails, the installation-related resources will be removed.
                                                                            • Detailed Logs: Turn on the verbose output of the installation process log.

                                                                            Note

                                                                            After enabling Ready Wait and/or Failed Delete , it takes a long time for the app to be marked as Running .

                                                                          4. Configure Velero chart Parameter Settings according to the following instructions

                                                                            • S3 Credentials: Configure the authentication information of object storage (minio).

                                                                              • Use secret: Keep the default configuration true.
                                                                              • Secret name: Keep the default configuration velero-s3-credential.
                                                                              • SecretContents.aws_access_key_id = : Configure the username for accessing object storage, replace with the actual parameter.
                                                                              • SecretContents.aws_secret_access_key = : Configure the password for accessing object storage, replace with the actual parameter.

                                                                                Use existing secret parameter example is as follows:

                                                                                [default]\naws_access_key_id = minio\naws_secret_access_key = minio123\n
                                                                                • BackupStorageLocation: The location where Velero backs up data.

                                                                                  • S3 bucket: The name of the storage bucket used to save backup data (must be a real storage bucket that already exists in minio).
                                                                                  • Is default BackupStorage: Keep the default configuration true.
                                                                                  • S3 access mode: The access mode of Velero to data, which can be selected
                                                                                  • ReadWrite: Allow Velero to read and write backup data;
                                                                                  • ReadOnly: Allow Velero to read backup data, but cannot modify backup data;
                                                                                  • WriteOnly: Only allow Velero to write backup data, and cannot read backup data.
                                                                                • S3 Configs: Detailed configuration of S3 storage (minio).

                                                                                  • S3 region: The geographical region of cloud storage. The default is to use the us-east-1 parameter, which is provided by the system administrator.
                                                                                  • S3 force path style: Keep the default configuration true.
                                                                                  • S3 server URL: The console access address of object storage (minio). Minio generally provides two services, UI access and console access. Please use the console access address here.

                                                                              • Click the OK button to complete the installation of the Velero plugin. The system will automatically jump to the Helm Apps list page. After waiting for a few minutes, refresh the page, and you can see the application just installed.

                                                                              • "},{"location":"en/admin/kpanda/best-practice/add-master-node.html","title":"Scaling Controller Nodes in a Worker Cluster","text":"

                                                                                This article provides a step-by-step guide on how to manually scale the control nodes in a worker cluster to achieve high availability for self-built clusters.

                                                                                Note

                                                                                It is recommended to enable high availability mode when creating the worker cluster in the interface. Manually scaling the control nodes of the worker cluster involves certain operational risks, so please proceed with caution.

                                                                                "},{"location":"en/admin/kpanda/best-practice/add-master-node.html#prerequisites","title":"Prerequisites","text":"
                                                                                • A worker cluster has been created using the AI platform platform. You can refer to the documentation on Creating a Worker Cluster.
                                                                                • The managed cluster associated with the worker cluster exists in the current platform and is running normally.

                                                                                Note

                                                                                Managed cluster refers to the cluster specified during the creation of the worker cluster, which provides capabilities such as Kubernetes version upgrades, node scaling, uninstallation, and operation records for the current cluster.

                                                                                "},{"location":"en/admin/kpanda/best-practice/add-master-node.html#modify-the-host-manifest","title":"Modify the Host manifest","text":"
                                                                                1. Log in to the container management platform and go to the overview page of the cluster where you want to scale the control nodes. In the Basic Information section, locate the Managed Cluster of the current cluster and click its name to enter the overview page.

                                                                                2. In the overview page of the managed cluster, click Console to open the cloud terminal console. Run the following command to find the host manifest of the worker cluster that needs to be scaled.

                                                                                  kubectl get cm -n kubean-system ${ClusterName}-hosts-conf -oyaml\n

                                                                                  ${ClusterName} is the name of the worker cluster to be scaled.

                                                                                3. Modify the host manifest file based on the example below and add information for the controller nodes.

                                                                                  Before ModificationAfter Modification
                                                                                  apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: tanghai-dev-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      hosts:\n        node1:\n          ip: 10.6.175.10 \n          access_ip: 10.6.175.10\n          ansible_host: 10.6.175.10 \n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: password01\n      children:\n        kube_control_plane:\n          hosts:\n            node1:\n        kube_node:\n          hosts:\n            node1:\n        etcd:\n          hosts:\n            node1:\n        k8s_cluster:\n          children:\n            kube_control_plane:\n            kube_node:\n        calico_rr:\n          hosts: {}\n......\n
                                                                                  apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: tanghai-dev-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      hosts:\n        node1:\n          ip: 10.6.175.10\n          access_ip: 10.6.175.10 \n          ansible_host: 10.6.175.10\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: password01\n        node2: # Add controller node2\n          ip: 10.6.175.20\n          access_ip: 10.6.175.20\n          ansible_host: 10.6.175.20\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: password01\n        node3:\n          ip: 10.6.175.30 \n          access_ip: 10.6.175.30\n          ansible_host: 10.6.175.30 \n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: password01\n      children:\n        kube_control_plane:\n          hosts:\n            node1:\n            node2: # Add controller node2\n            node3: # Add controller node3\n        kube_node:\n          hosts:\n            node1:\n            node2: # Add controller node2\n            node3: # Add controller node3\n        etcd:\n          hosts:\n            node1:\n            node2: # Add controller node2\n            node3: # Add controller node3\n        k8s_cluster:\n          children:\n            kube_control_plane:\n            kube_node:\n        calico_rr:\n          hosts: {}\n

                                                                                Important Parameters:

                                                                                • all.hosts.node1: Existing master node in the original cluster
                                                                                • all.hosts.node2, all.hosts.node3: Control nodes to be added during cluster scaling
                                                                                • all.children.kube_control_plane.hosts: Control plane group in the cluster
                                                                                • all.children.kube_node.hosts: Worker node group in the cluster
                                                                                • all.children.etcd.hosts: ETCD node group in the cluster
                                                                                "},{"location":"en/admin/kpanda/best-practice/add-master-node.html#add-expansion-task-scale-master-node-opsyaml-using-the-clusteroperationyml-template","title":"Add Expansion Task \"scale-master-node-ops.yaml\" using the ClusterOperation.yml Template","text":"

                                                                                Use the following ClusterOperation.yml template to add a cluster control node expansion task called \"scale-master-node-ops.yaml\".

                                                                                ClusterOperation.yml
                                                                                apiVersion: kubean.io/v1alpha1\nkind: ClusterOperation\nmetadata:\n  name: cluster1-online-install-ops\nspec:\n  cluster: ${cluster-name} # Specify cluster name\n  image: ghcr.m.daocloud.io/kubean-io/spray-job:v0.18.0 # Specify the image for the kubean job\n  actionType: playbook\n  action: cluster.yml\n  extraArgs: --limit=etcd,kube_control_plane -e ignore_assert_errors=yes\n  preHook:\n    - actionType: playbook\n      action: ping.yml\n    - actionType: playbook\n      action: disable-firewalld.yml\n    - actionType: playbook\n      action: enable-repo.yml  # In an offline environment, you need to add this yaml and\n      # set the correct repo-list (for installing operating system packages).\n      # The following parameter values are for reference only.\n      extraArgs: |\n        -e \"{repo_list: ['http://172.30.41.0:9000/kubean/centos/\\$releasever/os/\\$basearch','http://172.30.41.0:9000/kubean/centos-iso/\\$releasever/os/\\$basearch']}\"\n  postHook:\n    - actionType: playbook\n      action: upgrade-cluster.yml\n      extraArgs: --limit=etcd,kube_control_plane -e ignore_assert_errors=yes\n    - actionType: playbook\n      action: kubeconfig.yml\n    - actionType: playbook\n      action: cluster-info.yml\n

                                                                                Note

                                                                                • spec.image: The image address should be consistent with the image within the job that was previously deployed
                                                                                • spec.action: set to cluster.yml, if adding Master (etcd) nodes exceeds (including) three at once, additional parameter -e etcd_retries=10 should be added to cluster.yaml to increase etcd node join retry times
                                                                                • spec.extraArgs: set to --limit=etcd,kube_control_plane -e ignore_assert_errors=yes
                                                                                • If it is an offline environment, spec.preHook needs to add enable-repo.yml, and the extraArgs parameter should fill in the correct repo_list for the relevant OS
                                                                                • spec.postHook.action: should include upgrade-cluster.yml, where extraArgs is set to --limit=etcd,kube_control_plane -e ignore_assert_errors=yes

                                                                                Create and deploy scale-master-node-ops.yaml based on the above configuration.

                                                                                # Copy the above manifest\nvi scale-master-node-ops.yaml\nkubectl apply -f scale-master-node-ops.yaml -n kubean-system\n

                                                                                Perform the following command to verify it.

                                                                                kubectl get node\n
                                                                                "},{"location":"en/admin/kpanda/best-practice/add-worker-node-on-global.html","title":"Scaling the Worker Nodes of the Global Service Cluster","text":"

                                                                                This page introduces how to manually scale the worker nodes of the global service cluster in offline mode. By default, it is not recommended to scale the global service cluster after deploying AI platform. Please ensure proper resource planning before deploying AI platform.

                                                                                Note

                                                                                The controller node of the global service cluster do not support scaling.

                                                                                "},{"location":"en/admin/kpanda/best-practice/add-worker-node-on-global.html#prerequisites","title":"Prerequisites","text":"
                                                                                • The AI platform deployment has been completed through bootstrap node, and the kind cluster on the bootstrap node is running normally.
                                                                                • You must log in with a user account that has admin privileges on the platform.
                                                                                "},{"location":"en/admin/kpanda/best-practice/add-worker-node-on-global.html#get-kubeconfig-for-the-kind-cluster-on-the-bootstrap-node","title":"Get kubeconfig for the kind cluster on the bootstrap node","text":"
                                                                                1. Run the following command to log in to the bootstrap node:

                                                                                  ssh root@bootstrap-node-ip-address\n
                                                                                2. On the bootstrap node, run the following command to get the CONTAINER ID of the kind cluster:

                                                                                  [root@localhost ~]# podman ps\n\n# Expected output:\nCONTAINER ID  IMAGE                                      COMMAND     CREATED      STATUS      PORTS                                                                                                         NAMES\n220d662b1b6a  docker.m.daocloud.io/kindest/node:v1.26.2              2 weeks ago  Up 2 weeks  0.0.0.0:443->30443/tcp, 0.0.0.0:8081->30081/tcp, 0.0.0.0:9000-9001->32000-32001/tcp, 0.0.0.0:36674->6443/tcp  my-cluster-installer-control-plane\n
                                                                                3. Run the following command to enter a container in the kind cluster:

                                                                                  podman exec -it {CONTAINER ID} bash\n

                                                                                  Replace {CONTAINER ID} with your actual container ID.

                                                                                4. Inside the container of the kind cluster, run the following command to get the kubeconfig information for the kind cluster:

                                                                                  kubectl config view --minify --flatten --raw\n

                                                                                  After the console output, copy the kubeconfig information of the kind cluster for the next step.

                                                                                "},{"location":"en/admin/kpanda/best-practice/add-worker-node-on-global.html#create-clusterkubeanio-resources-in-the-kind-cluster-on-the-bootstrap-node","title":"Create cluster.kubean.io resources in the kind cluster on the bootstrap node","text":"
                                                                                1. Use the command podman exec -it {CONTAINER ID} bash to enter the kind cluster container.

                                                                                2. Inside the kind cluster container, run the following command to get the kind cluster name:

                                                                                  kubectl get clusters\n
                                                                                3. Copy and run the following command within the kind cluster to create the cluster.kubean.io resource:

                                                                                  kubectl apply -f - <<EOF\napiVersion: kubean.io/v1alpha1\nkind: Cluster\nmetadata:\n  labels:\n    clusterName: kpanda-global-cluster\n  name: kpanda-global-cluster\nspec:\n  hostsConfRef:\n    name: my-cluster-hosts-conf\n    namespace: kubean-system\n  kubeconfRef:\n    name: my-cluster-kubeconf\n    namespace: kubean-system\n  varsConfRef:\n    name: my-cluster-vars-conf\n    namespace: kubean-system\nEOF\n

                                                                                  Note

                                                                                  The default cluster name for spec.hostsConfRef.name, spec.kubeconfRef.name, and spec.varsConfRef.name is my-cluster. Please replace it with the kind cluster name obtained in the previous step.

                                                                                4. Run the following command in the kind cluster to verify if the cluster.kubean.io resource is created successfully:

                                                                                  kubectl get clusters\n

                                                                                  Expected output is:

                                                                                  NAME                    AGE\nkpanda-global-cluster   3s\nmy-cluster              16d\n
                                                                                "},{"location":"en/admin/kpanda/best-practice/add-worker-node-on-global.html#update-the-containerd-configuration-in-the-kind-cluster-on-the-bootstrap-node","title":"Update the containerd configuration in the kind cluster on the bootstrap node","text":"
                                                                                1. Run the following command to log in to one of the controller nodes of the global service cluster:

                                                                                  ssh root@<global-service-cluster-controller-node-IP>\n
                                                                                2. On the global service cluster controller node, run the following command to copy the containerd configuration file config.toml from the controller node to the bootstrap node:

                                                                                  scp /etc/containerd/config.toml root@<bootstrap-node-IP>:/root\n
                                                                                3. On the bootstrap node, select the insecure registry section from the containerd configuration file config.toml that was copied from the controller node, and add it to the config.toml in the kind cluster.

                                                                                  An example of the insecure registry section is as follows:

                                                                                  [plugins.\"io.containerd.grpc.v1.cri\".registry]\n  [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors]\n    [plugins.\"io.containerd.grpc.v1.cri\".registry.mirrors.\"10.6.202.20\"]\n      endpoint = [\"https://10.6.202.20\"]\n    [plugins.\"io.containerd.grpc.v1.cri\".registry.configs.\"10.6.202.20\".tls]\n      insecure_skip_verify = true\n

                                                                                  Note

                                                                                  Since the config.toml file in the kind cluster cannot be modified directly, you can first copy the file out to modify it and then copy it back to the kind cluster. The steps are as follows:

                                                                                  1. Run the following command on the bootstrap node to copy the file out:

                                                                                    podman cp {CONTAINER ID}:/etc/containerd/config.toml ./config.toml.kind\n
                                                                                  2. Run the following command to edit the config.toml file:

                                                                                    vim ./config.toml.kind\n
                                                                                  3. After modifying the file, copy it back to the kind cluster by running the following command:

                                                                                    podman cp ./config.toml.kind {CONTAINER ID}:/etc/containerd/config.toml\n

                                                                                    {CONTAINER ID} should be replaced with your actual container ID.

                                                                                4. Run the following command within the kind cluster to restart the containerd service:

                                                                                  systemctl restart containerd\n
                                                                                "},{"location":"en/admin/kpanda/best-practice/add-worker-node-on-global.html#integrate-a-kind-cluster-into-the-ai-platform-cluster-list","title":"Integrate a Kind cluster into the AI platform cluster list","text":"
                                                                                1. Log in to AI platform, navigate to Container Management, and on the right side of the cluster list, click the Integrate Cluster button.

                                                                                2. In the integration configuration section, fill in and edit the kubeconfig of the Kind cluster.

                                                                                  apiVersion: v1\nclusters:\n- cluster:\n    insecure-skip-tls-verify: true # (1)!\n    certificate-authority-data: LS0TLSCFDFWEFEWFEWFGGEWGFWFEWGWEGFEWGEWGSDGFSDSD\n    server: https://my-cluster-installer-control-plane:6443 # (2)!\nname: my-cluster-installer\ncontexts:\n- context:\n    cluster: my-cluster-installer\n    user: kubernetes-admin\nname: kubernetes-admin@my-cluster-installer\ncurrent-context: kubernetes-admin@my-cluster-installer\nkind: Config\npreferences: {}\nusers:\n
                                                                                  1. Skip TLS verification; this line needs to be added manually.
                                                                                  2. Replace it with the IP of the Kind node, and change port 6443 to the port mapped to the node (you can run the command podman ps|grep 6443 to check the mapped port).

                                                                                3. Click the OK to complete the integration of the Kind cluster.

                                                                                "},{"location":"en/admin/kpanda/best-practice/add-worker-node-on-global.html#add-labels-to-the-global-service-cluster","title":"Add Labels to the Global Service Cluster","text":"
                                                                                1. Log in to AI platform, navigate to Container Management, find the kapnda-global-cluster , and in the right-side, find the Basic Configuration menu options.

                                                                                2. In the Basic Configuration page, add the label kpanda.io/managed-by=my-cluster for the global service cluster:

                                                                                Note

                                                                                The value in the label kpanda.io/managed-by=my-cluster corresponds to the name of the cluster specified during the integration process, which defaults to my-cluster. Please adjust this according to your actual situation.

                                                                                "},{"location":"en/admin/kpanda/best-practice/add-worker-node-on-global.html#add-nodes-to-the-global-service-cluster","title":"Add nodes to the global service cluster","text":"
                                                                                1. Go to the node list page of the global service cluster, find the Integrate Node button on the right side of the node list, and click to enter the node configuration page.

                                                                                2. After filling in the IP and authentication information of the node to be integrated, click Start Check . Once the node check is completed, click Next .

                                                                                3. Add the following custom parameters in the Custom Parameters section:

                                                                                  download_run_once: false\ndownload_container: false\ndownload_force_cache: false\ndownload_localhost: false\n

                                                                                4. Click the OK button and wait for the node to be added.

                                                                                "},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html","title":"Cross-Cluster Backup and Recovery of MySQL Application and Data","text":"

                                                                                This demonstration will show how to use the application backup feature in AI platform to perform cross-cluster backup migration for a stateful application.

                                                                                Note

                                                                                The current operator should have admin privileges on the AI platform platform.

                                                                                "},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html#prepare-the-demonstration-environment","title":"Prepare the Demonstration Environment","text":""},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html#prepare-two-clusters","title":"Prepare Two Clusters","text":"

                                                                                main-cluster will be the source cluster for backup data, and recovery-cluster will be the target cluster for data recovery.

                                                                                Cluster IP Nodes main-cluster 10.6.175.100 1 node recovery-cluster 10.6.175.110 1 node"},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html#set-up-minio-configuration","title":"Set Up MinIO Configuration","text":"MinIO Server Address Bucket Username Password http://10.7.209.110:9000 mysql-demo root dangerous"},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html#deploy-nfs-storage-service-in-both-clusters","title":"Deploy NFS Storage Service in Both Clusters","text":"

                                                                                Note

                                                                                NFS storage service needs to be deployed on all nodes in both the source and target clusters.

                                                                                1. Install the dependencies required for NFS on all nodes in both clusters.

                                                                                  yum install nfs-utils iscsi-initiator-utils nfs-utils iscsi-initiator-utils nfs-utils iscsi-initiator-utils -y\n

                                                                                  Expected output

                                                                                  [root@g-master1 ~]# kubectl apply -f nfs.yaml\nclusterrole.rbac.authorization.k8s.io/nfs-provisioner-runner created\nclusterrolebinding.rbac.authorization.k8s.io/run-nfs-provisioner created\nrole.rbac.authorization.k8s.io/leader-locking-nfs-provisioner created\nrolebinding.rbac.authorization.k8s.io/leader-locking-nfs-provisioner created\nserviceaccount/nfs-provisioner created\nservice/nfs-provisioner created\ndeployment.apps/nfs-provisioner created\nstorageclass.storage.k8s.io/nfs created\n

                                                                                2. Prepare NFS storage service for the MySQL application.

                                                                                Log in to any control node of both main-cluster and recovery-cluster . Use the command vi nfs.yaml to create a file named nfs.yaml on the node, and copy the following YAML content into the nfs.yaml file.

                                                                                <details>\n<summary>nfs.yaml</summary>\n```yaml\nkind: ClusterRole\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: nfs-provisioner-runner\nnamespace: nfs-system\nrules:\n- apiGroups: [\"\"]\n    resources: [\"persistentvolumes\"]\n    verbs: [\"get\", \"list\", \"watch\", \"create\", \"delete\"]\n- apiGroups: [\"\"]\n    resources: [\"persistentvolumeclaims\"]\n    verbs: [\"get\", \"list\", \"watch\", \"update\"]\n- apiGroups: [\"storage.k8s.io\"]\n    resources: [\"storageclasses\"]\n    verbs: [\"get\", \"list\", \"watch\"]\n- apiGroups: [\"\"]\n    resources: [\"events\"]\n    verbs: [\"create\", \"update\", \"patch\"]\n- apiGroups: [\"\"]\n    resources: [\"services\", \"endpoints\"]\n    verbs: [\"get\"]\n- apiGroups: [\"extensions\"]\n    resources: [\"podsecuritypolicies\"]\n    resourceNames: [\"nfs-provisioner\"]\n    verbs: [\"use\"]\n---\nkind: ClusterRoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: run-nfs-provisioner\nsubjects:\n- kind: ServiceAccount\n    name: nfs-provisioner\n    # replace with namespace where provisioner is deployed\n    namespace: default\nroleRef:\nkind: ClusterRole\nname: nfs-provisioner-runner\napiGroup: rbac.authorization.k8s.io\n---\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: leader-locking-nfs-provisioner\nrules:\n- apiGroups: [\"\"]\n    resources: [\"endpoints\"]\n    verbs: [\"get\", \"list\", \"watch\", \"create\", \"update\", \"patch\"]\n---\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\nname: leader-locking-nfs-provisioner\nsubjects:\n- kind: ServiceAccount\n    name: nfs-provisioner\n    # replace with namespace where provisioner is deployed\n    namespace: default\nroleRef:\nkind: Role\nname: leader-locking-nfs-provisioner\napiGroup: rbac.authorization.k8s.io\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\nname: nfs-provisioner\n---\nkind: Service\napiVersion: v1\nmetadata:\nname: nfs-provisioner\nlabels:\n    app: nfs-provisioner\nspec:\nports:\n    - name: nfs\n    port: 2049\n    - name: nfs-udp\n    port: 2049\n    protocol: UDP\n    - name: nlockmgr\n    port: 32803\n    - name: nlockmgr-udp\n    port: 32803\n    protocol: UDP\n    - name: mountd\n    port: 20048\n    - name: mountd-udp\n    port: 20048\n    protocol: UDP\n    - name: rquotad\n    port: 875\n    - name: rquotad-udp\n    port: 875\n    protocol: UDP\n    - name: rpcbind\n    port: 111\n    - name: rpcbind-udp\n    port: 111\n    protocol: UDP\n    - name: statd\n    port: 662\n    - name: statd-udp\n    port: 662\n    protocol: UDP\nselector:\n    app: nfs-provisioner\n---\nkind: Deployment\napiVersion: apps/v1\nmetadata:\nname: nfs-provisioner\nspec:\nselector:\n    matchLabels:\n    app: nfs-provisioner\nreplicas: 1\nstrategy:\n    type: Recreate\ntemplate:\n    metadata:\n    labels:\n        app: nfs-provisioner\n    spec:\n    serviceAccount: nfs-provisioner\n    containers:\n        - name: nfs-provisioner\n        resources:\n            limits:\n            cpu: \"1\"\n            memory: \"4294967296\"\n        image: release.daocloud.io/velero/nfs-provisioner:v3.0.0\n        ports:\n            - name: nfs\n            containerPort: 2049\n            - name: nfs-udp\n            containerPort: 2049\n            protocol: UDP\n            - name: nlockmgr\n            containerPort: 32803\n            - name: nlockmgr-udp\n            containerPort: 32803\n            protocol: UDP\n            - name: mountd\n            containerPort: 20048\n            - name: mountd-udp\n            containerPort: 20048\n            protocol: UDP\n            - name: rquotad\n            containerPort: 875\n            - name: rquotad-udp\n            containerPort: 875\n            protocol: UDP\n            - name: rpcbind\n            containerPort: 111\n            - name: rpcbind-udp\n            containerPort: 111\n            protocol: UDP\n            - name: statd\n            containerPort: 662\n            - name: statd-udp\n            containerPort: 662\n            protocol: UDP\n        securityContext:\n            capabilities:\n            add:\n                - DAC_READ_SEARCH\n                - SYS_RESOURCE\n        args:\n            - \"-provisioner=example.com/nfs\"\n        env:\n            - name: POD_IP\n            valueFrom:\n                fieldRef:\n                fieldPath: status.podIP\n            - name: SERVICE_NAME\n            value: nfs-provisioner\n            - name: POD_NAMESPACE\n            valueFrom:\n                fieldRef:\n                fieldPath: metadata.namespace\n        imagePullPolicy: \"IfNotPresent\"\n        volumeMounts:\n            - name: export-volume\n            mountPath: /export\n    volumes:\n        - name: export-volume\n        hostPath:\n            path: /data\n---\nkind: StorageClass\napiVersion: storage.k8s.io/v1\nmetadata:\nname: nfs\nprovisioner: example.com/nfs\nmountOptions:\n- vers=4.1\n```\n</details>\n
                                                                                1. Run the nfs.yaml file on the control nodes of both clusters.

                                                                                  kubectl apply -f nfs.yaml\n
                                                                                2. Check the status of the NFS Pod and wait for its status to become running (approximately 2 minutes).

                                                                                  kubectl get pod -n nfs-system -owide\n

                                                                                  Expected output

                                                                                  [root@g-master1 ~]# kubectl get pod -owide\nNAME                               READY   STATUS    RESTARTS   AGE     IP              NODE        NOMINATED NODE   READINESS GATES\nnfs-provisioner-7dfb9bcc45-74ws2   1/1     Running   0          4m45s   10.6.175.100   g-master1   <none>           <none>\n

                                                                                "},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html#deploy-mysql-application","title":"Deploy MySQL Application","text":"
                                                                                1. Prepare a PVC (Persistent Volume Claim) based on NFS storage for the MySQL application to store its data.

                                                                                  Use the command vi pvc.yaml to create a file named pvc.yaml on the node, and copy the following YAML content into the pvc.yaml file.

                                                                                  pvc.yaml

                                                                                  apiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: mydata\n  namespace: default\nspec:\n  accessModes:\n  - ReadWriteOnce\n  resources:\n    requests:\n      storage: \"1Gi\"\n  storageClassName: nfs\n  volumeMode: Filesystem\n

                                                                                2. Run the pvc.yaml file using the kubectl tool on the node.

                                                                                  kubectl apply -f pvc.yaml\n

                                                                                  Expected output

                                                                                  [root@g-master1 ~]# kubectl apply -f pvc.yaml\npersistentvolumeclaim/mydata created\n

                                                                                3. Deploy the MySQL application.

                                                                                  Use the command vi mysql.yaml to create a file named mysql.yaml on the node, and copy the following YAML content into the mysql.yaml file.

                                                                                  mysql.yaml

                                                                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: mysql-deploy\n  name: mysql-deploy\n  namespace: default\nspec:\n  progressDeadlineSeconds: 600\n  replicas: 1\n  revisionHistoryLimit: 10\n  selector:\n    matchLabels:\n      app: mysql-deploy\n  strategy:\n    rollingUpdate:\n      maxSurge: 25%\n      maxUnavailable: 25%\n    type: RollingUpdate\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mysql-deploy\n      name: mysql-deploy\n    spec:\n      containers:\n      - args:\n        - --ignore-db-dir=lost+found\n        env:\n        - name: MYSQL_ROOT_PASSWORD\n          value: dangerous\n        image: release.daocloud.io/velero/mysql:5\n        imagePullPolicy: IfNotPresent\n        name: mysql-deploy\n        ports:\n        - containerPort: 3306\n          protocol: TCP\n        resources:\n          limits:\n            cpu: \"1\"\n            memory: \"4294967296\"\n        terminationMessagePath: /dev/termination-log\n        terminationMessagePolicy: File\n        volumeMounts:\n        - mountPath: /var/lib/mysql\n          name: data\n      dnsPolicy: ClusterFirst\n      restartPolicy: Always\n      schedulerName: default-scheduler\n      securityContext:\n        fsGroup: 999\n      terminationGracePeriodSeconds: 30\n      volumes:\n      - name: data\n        persistentVolumeClaim:\n          claimName: mydata\n

                                                                                4. Run the mysql.yaml file using the kubectl tool on the node.

                                                                                  kubectl apply -f mysql.yaml\n

                                                                                  Expected output

                                                                                  [root@g-master1 ~]# kubectl apply -f mysql.yaml\ndeployment.apps/mysql-deploy created\n

                                                                                5. Check the status of the MySQL Pod.

                                                                                  Run kubectl get pod | grep mysql to view the status of the MySQL Pod and wait for its status to become running (approximately 2 minutes).

                                                                                  Expected output

                                                                                  [root@g-master1 ~]# kubectl get pod |grep mysql\nmysql-deploy-5d6f94cb5c-gkrks      1/1     Running   0          2m53s\n

                                                                                  Note

                                                                                  • If the MySQL Pod remains in a non-running state for a long time, it is usually because NFS dependencies are not installed on all nodes in the cluster.
                                                                                  • Run kubectl describe pod ${mysql pod name} to view detailed information about the Pod.
                                                                                  • If there is an error message like MountVolume.SetUp failed for volume \"pvc-4ad70cc6-df37-4253-b0c9-8cb86518ccf8\" : mount failed: exit status 32 , please delete the previous resources by executing kubectl delete -f nfs.yaml/pvc.yaml/mysql.yaml and start from deploying the NFS service again.
                                                                                6. Write data to the MySQL application.

                                                                                  To verify the success of the data migration later, you can use a script to write test data to the MySQL application.

                                                                                  1. Use the command vi insert.sh to create a script named insert.sh on the node, and copy the following content into the script.

                                                                                    insert.sh

                                                                                    #!/bin/bash\n\nfunction rand(){\n    min=$1\n    max=$(($2-$min+1))\n    num=$(date +%s%N)\n    echo $(($num%$max+$min))\n}\n\nfunction insert(){\n    user=$(date +%s%N | md5sum | cut -c 1-9)\n    age=$(rand 1 100)\n\n    sql=\"INSERT INTO test.users(user_name, age)VALUES('${user}', ${age});\"\n    echo -e ${sql}\n\n    kubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"${sql}\"\n\n}\n\nkubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"CREATE DATABASE IF NOT EXISTS test;\"\nkubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"CREATE TABLE IF NOT EXISTS test.users(user_name VARCHAR(10) NOT NULL,age INT UNSIGNED)ENGINE=InnoDB DEFAULT CHARSET=utf8;\"\n\nwhile true;do\n    insert\n    sleep 1\ndone\n

                                                                                  2. Add permission to insert.sh and run this script.

                                                                                    [root@g-master1 ~]# chmod +x insert.sh\n[root@g-master1 ~]# ./insert.sh\n

                                                                                    Expected output

                                                                                    mysql: [Warning] Using a password on the command line interface can be insecure.\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('dc09195ba', 10);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('80ab6aa28', 70);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('f488e3d46', 23);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('e6098695c', 93);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('eda563e7d', 63);\nmysql: [Warning] Using a password on the command line interface can be insecure.\nINSERT INTO test.users(user_name, age)VALUES('a4d1b8d68', 17);\nmysql: [Warning] Using a password on the command line interface can be insecure.\n

                                                                                  3. Press Ctrl + C on the keyboard simultaneously to pause the script execution.

                                                                                  4. Go to the MySQL Pod and check the data written in MySQL.

                                                                                    kubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"SELECT * FROM test.users;\"\n

                                                                                    Expected output

                                                                                    [root@g-master1 ~]# kubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"SELECT * FROM test.users;\"\nmysql: [Warning] Using a password on the command line interface can be insecure.\nuser_name   age\ndc09195ba   10\n80ab6aa28   70\nf488e3d46   23\ne6098695c   93\neda563e7d   63\na4d1b8d68   17\nea47546d9   86\na34311f2e   47\n740cefe17   33\nede85ea28   65\nb6d0d6a0e   46\nf0eb38e50   44\nc9d2f28f5   72\n8ddaafc6f   31\n3ae078d0e   23\n6e041631e   96\n

                                                                                "},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html#install-velero-plugin-on-both-clusters","title":"Install Velero Plugin on Both Clusters","text":"

                                                                                Note

                                                                                The velero plugin needs to be installed on both the source and target clusters.

                                                                                Refer to the Install Velero Plugin documentation and the MinIO configuration below to install the velero plugin on the main-cluster and recovery-cluster .

                                                                                MinIO Server Address Bucket Username Password http://10.7.209.110:9000 mysql-demo root dangerous

                                                                                Note

                                                                                When installing the plugin, replace S3url with the MinIO server address prepared for this demonstration, and replace the bucket with an existing bucket in MinIO.

                                                                                "},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html#backup-mysql-application-and-data","title":"Backup MySQL Application and Data","text":"
                                                                                1. Add a unique label, backup=mysql , to the MySQL application and PVC data. This will facilitate resource selection during backup.

                                                                                  kubectl label deploy mysql-deploy backup=mysql # Add label to mysql-deploy\nkubectl label pod mysql-deploy-5d6f94cb5c-gkrks backup=mysql # Add label to mysql pod\nkubectl label pvc mydata backup=mysql # Add label to mysql pvc\n
                                                                                2. Refer to the steps described in Application Backup and the parameters below to create an application backup.

                                                                                3. Name: backup-mysql (can be customized)

                                                                                4. Source Cluster: main-cluster
                                                                                5. Namespace: default
                                                                                6. Resource Filter - Specify resource label: backup:mysql

                                                                                1. After creating the backup plan, the page will automatically return to the backup plan list. Find the newly created backup plan backup-mysq and click the more options button __ ...__ in the plan. Select \"Run Now\" to execute the newly created backup plan.

                                                                                1. Wait for the backup plan execution to complete before proceeding with the next steps.
                                                                                "},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html#cross-cluster-recovery-of-mysql-application-and-data","title":"Cross-Cluster Recovery of MySQL Application and Data","text":"
                                                                                1. Log in to the AI platform platform and select Container Management -> Backup & Restore -> Application Backup from the left navigation menu.
                                                                                1. Select Recovery in the left-side toolbar, then click Restore Backup on the right side.
                                                                                1. Fill in the parameters based on the following instructions:

                                                                                2. Name: restore-mysql (can be customized)

                                                                                3. Backup Source Cluster: main-cluster
                                                                                4. Backup Plan: backup-mysql
                                                                                5. Backup Point: default
                                                                                6. Recovery Target Cluster: recovery-cluster

                                                                                1. Refresh the backup plan list and wait for the backup plan execution to complete.
                                                                                "},{"location":"en/admin/kpanda/best-practice/backup-mysql-on-nfs.html#check-if-the-data-is-restored-successfully","title":"Check if the data is restored successfully","text":"
                                                                                1. Log in to the control plane of recovery-cluster , check if mysql-deploy is successfully backed up in the current cluster.

                                                                                  kubectl get pod\n

                                                                                  Expected output\u5982\u4e0b\uff1a

                                                                                  NAME                               READY   STATUS    RESTARTS   AGE\nmysql-deploy-5798f5d4b8-62k6c      1/1     Running   0          24h\n
                                                                                2. Check if the data in MySQL datasheet is restored or not.

                                                                                  kubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"SELECT * FROM test.users;\"\n

                                                                                  Expected output is as follows\uff1a

                                                                                  [root@g-master1 ~]# kubectl exec deploy/mysql-deploy -- mysql -uroot -pdangerous -e \"SELECT * FROM test.users;\"\nmysql: [Warning] Using a password on the command line interface can be insecure.\nuser_name   age\ndc09195ba   10\n80ab6aa28   70\nf488e3d46   23\ne6098695c   93\neda563e7d   63\na4d1b8d68   17\nea47546d9   86\na34311f2e   47\n740cefe17   33\nede85ea28   65\nb6d0d6a0e   46\nf0eb38e50   44\nc9d2f28f5   72\n8ddaafc6f   31\n3ae078d0e   23\n6e041631e   96\n

                                                                                  Success

                                                                                  As you can see, the data in the Pod is consistent with the data inside the Pods in the main-cluster . This indicates that the MySQL application and its data from the main-cluster have been successfully recovered to the recovery-cluster cluster.

                                                                                "},{"location":"en/admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html","title":"Create a RedHat 9.2 Worker Cluster on a CentOS Management Platform","text":"

                                                                                This article explains how to create a RedHat 9.2 worker cluster on an existing CentOS management platform.

                                                                                Note

                                                                                This article only applies to the offline mode, using the AI platform platform to create a worker cluster. The architecture of the management platform and the cluster to be created are both AMD. When creating a cluster, heterogeneous deployment (mixing AMD and ARM) is not supported. After the cluster is created, you can use the method of connecting heterogeneous nodes to achieve mixed deployment and management of the cluster.

                                                                                "},{"location":"en/admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#prerequisites","title":"Prerequisites","text":"

                                                                                A AI platform full-mode has been deployed, and the spark node is still alive. For deployment, see the document Offline Install AI platform Enterprise.

                                                                                "},{"location":"en/admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#download-and-import-redhat-offline-packages","title":"Download and Import RedHat Offline Packages","text":"

                                                                                Make sure you are logged into the spark node! And the clusterConfig.yaml file used when deploying AI platform is still available.

                                                                                "},{"location":"en/admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#download-the-relevant-redhat-offline-packages","title":"Download the Relevant RedHat Offline Packages","text":"

                                                                                Download the required RedHat OS package and ISO offline packages:

                                                                                Resource Name Description Download Link os-pkgs-redhat9-v0.9.3.tar.gz RedHat9.2 OS-package package Download ISO Offline Package ISO package import script Go to RedHat Official Download Site import-iso ISO import script Download"},{"location":"en/admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#import-the-os-package-to-the-minio-of-the-spark-node","title":"Import the OS Package to the MinIO of the Spark Node","text":"

                                                                                Extract the RedHat OS package

                                                                                Execute the following command to extract the downloaded OS package. Here we download the RedHat OS package.

                                                                                tar -xvf os-pkgs-redhat9-v0.9.3.tar.gz\n

                                                                                The contents of the extracted OS package are as follows:

                                                                                    os-pkgs\n    \u251c\u2500\u2500 import_ospkgs.sh       # This script is used to import OS packages into the MinIO file service\n    \u251c\u2500\u2500 os-pkgs-amd64.tar.gz   # OS packages for the amd64 architecture\n    \u251c\u2500\u2500 os-pkgs-arm64.tar.gz   # OS packages for the arm64 architecture\n    \u2514\u2500\u2500 os-pkgs.sha256sum.txt  # sha256sum verification file of the OS packages\n

                                                                                Import the OS Package to the MinIO of the Spark Node

                                                                                Execute the following command to import the OS packages to the MinIO file service:

                                                                                MINIO_USER=rootuser MINIO_PASS=rootpass123 ./import_ospkgs.sh  http://127.0.0.1:9000 os-pkgs-redhat9-v0.9.3.tar.gz\n

                                                                                Note

                                                                                The above command is only applicable to the MinIO service built into the spark node. If an external MinIO is used, replace http://127.0.0.1:9000 with the access address of the external MinIO. \"rootuser\" and \"rootpass123\" are the default account and password of the MinIO service built into the spark node. \"os-pkgs-redhat9-v0.9.3.tar.gz\" is the name of the downloaded OS package offline package.

                                                                                "},{"location":"en/admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#import-the-iso-offline-package-to-the-minio-of-the-spark-node","title":"Import the ISO Offline Package to the MinIO of the Spark Node","text":"

                                                                                Execute the following command to import the ISO package to the MinIO file service:

                                                                                MINIO_USER=rootuser MINIO_PASS=rootpass123 ./import_iso.sh http://127.0.0.1:9000 rhel-9.2-x86_64-dvd.iso\n

                                                                                Note

                                                                                The above command is only applicable to the MinIO service built into the spark node. If an external MinIO is used, replace http://127.0.0.1:9000 with the access address of the external MinIO. \"rootuser\" and \"rootpass123\" are the default account and password of the MinIO service built into the spark node. \"rhel-9.2-x86_64-dvd.iso\" is the name of the downloaded ISO offline package.

                                                                                "},{"location":"en/admin/kpanda/best-practice/create-redhat9.2-on-centos-platform.html#create-the-cluster-in-the-ui","title":"Create the Cluster in the UI","text":"

                                                                                Refer to the document Creating a Worker Cluster to create a RedHat 9.2 cluster.

                                                                                "},{"location":"en/admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html","title":"Create an Ubuntu Worker Cluster on CentOS","text":"

                                                                                This page explains how to create an Ubuntu worker cluster on an existing CentOS.

                                                                                Note

                                                                                This page is specifically for the offline mode, using the AI platform platform to create a worker cluster, where both the CentOS platform and the worker cluster to be created are based on AMD architecture. Heterogeneous (mixed AMD and ARM) deployments are not supported during cluster creation; however, after the cluster is created, you can manage a mixed deployment by adding heterogeneous nodes.

                                                                                "},{"location":"en/admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#prerequisite","title":"Prerequisite","text":"
                                                                                • A fully deployed AI platform system, with the bootstrap node still active. For deployment reference, see the documentation Offline Install AI platform Enterprise.
                                                                                "},{"location":"en/admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#download-and-import-ubuntu-offline-packages","title":"Download and Import Ubuntu Offline Packages","text":"

                                                                                Please ensure you are logged into the bootstrap node! Also, make sure that the clusterConfig.yaml file used during the AI platform deployment is still available.

                                                                                "},{"location":"en/admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#download-ubuntu-offline-packages","title":"Download Ubuntu Offline Packages","text":"

                                                                                Download the required Ubuntu OS packages and ISO offline packages:

                                                                                Resource Name Description Download Link os-pkgs-ubuntu2204-v0.18.2.tar.gz Ubuntu 20.04 OS package https://github.com/kubean-io/kubean/releases/download/v0.18.2/os-pkgs-ubuntu2204-v0.18.2.tar.gz ISO Offline Package ISO Package http://mirrors.melbourne.co.uk/ubuntu-releases/"},{"location":"en/admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#import-os-and-iso-packages-into-minio-on-the-bootstrap-node","title":"Import OS and ISO Packages into MinIO on the Bootstrap Node","text":"

                                                                                Refer to the documentation Importing Offline Resources to import offline resources into MinIO on the bootstrap node.

                                                                                "},{"location":"en/admin/kpanda/best-practice/create-ubuntu-on-centos-platform.html#create-cluster-on-ui","title":"Create Cluster on UI","text":"

                                                                                Refer to the documentation Creating a Worker Cluster to create the Ubuntu cluster.

                                                                                "},{"location":"en/admin/kpanda/best-practice/etcd-backup.html","title":"etcd Backup and Restore","text":"

                                                                                Using the ETCD backup feature to create a backup policy, you can back up the etcd data of a specified cluster to S3 storage on a scheduled basis. This page focuses on how to restore the data that has been backed up to the current cluster.

                                                                                Note

                                                                                • AI platform ETCD backup restores are limited to backups and restores for the same cluster (with no change in the number of nodes and IP addresses). For example, after the etcd data of Cluster A is backed up, the backup data can only be restored to Cluster A, not to Cluster B.
                                                                                • The feature is recommended app backup and restore for cross-cluster backups and restores.
                                                                                • First, create a backup policy to back up the current status. It is recommended to refer to the ETCD backup.

                                                                                The following is a specific case to illustrate the whole process of backup and restore.

                                                                                "},{"location":"en/admin/kpanda/best-practice/etcd-backup.html#environmental-information","title":"Environmental Information","text":"

                                                                                Begin with basic information about the target cluster and S3 storage for the restore. Here, MinIo is used as S3 storage, and the whole cluster has 3 control planes (3 etcd copies).

                                                                                IP Host Role Remarks 10.6.212.10 host01 k8s-master01 k8s node 1 10.6.212.11 host02 k8s-master02 k8s node 2 10.6.212.12 host03 k8s-master03 k8s node 3 10.6.212.13 host04 minio minio service"},{"location":"en/admin/kpanda/best-practice/etcd-backup.html#prerequisites","title":"Prerequisites","text":""},{"location":"en/admin/kpanda/best-practice/etcd-backup.html#install-the-etcdbrctl-tool","title":"Install the etcdbrctl tool","text":"

                                                                                To implement ETCD data backup and restore, you need to install the etcdbrctl open source tool on any of the above k8s nodes. This tool does not have binary files for the time being and needs to be compiled by itself. Refer to the compilation mode.

                                                                                After installation, use the following command to check whether the tool is available:

                                                                                etcdbrctl -v\n

                                                                                The expected output is as follows:

                                                                                INFO[0000] etcd-backup-restore Version: v0.23.0-dev\nINFO[0000] Git SHA: b980beec\nINFO[0000] Go Version: go1.19.3\nINFO[0000] Go OS/Arch: linux/amd64\n
                                                                                "},{"location":"en/admin/kpanda/best-practice/etcd-backup.html#check-the-backup-data","title":"Check the backup data","text":"

                                                                                You need to check the following before restoring:

                                                                                • Have you successfully backed up your data in AI platform
                                                                                • Check if backup data exists in S3 storage

                                                                                Note

                                                                                The backup of AI platform is a full data backup, and the full data of the last backup will be restored when restoring.

                                                                                "},{"location":"en/admin/kpanda/best-practice/etcd-backup.html#shut-down-the-cluster","title":"Shut down the cluster","text":"

                                                                                Before backing up, the cluster must be shut down. The default clusters etcd and kube-apiserver are started as static pods. To close the cluster here means to move the static Pod manifest file out of the /etc/kubernetes/manifest directory, and the cluster will remove Pods to close the service.

                                                                                1. First, delete the previous backup data. Removing the data does not delete the existing etcd data, but refers to modifying the name of the etcd data directory. Wait for the backup to be successfully restored before deleting this directory. The purpose of this is to also try to restore the current cluster if the etcd backup restore fails. This step needs to be performed for each node.

                                                                                  rm -rf /var/lib/etcd_bak\n
                                                                                2. The service then needs to be shut down kube-apiserver to ensure that there are no new changes to the etcd data. This step needs to be performed for each node.

                                                                                  mv /etc/kubernetes/manifests/kube-apiserver.yaml /tmp/kube-apiserver.yaml\n
                                                                                3. You also need to turn off the etcd service. This step needs to be performed for each node.

                                                                                  mv /etc/kubernetes/manifests/etcd.yaml /tmp/etcd.yaml\n
                                                                                4. Ensure that all control plane kube-apiserver and etcd services are turned off.

                                                                                5. After shutting down all the nodes, use the following command to check etcd the cluster status. This command can be executed at any node.

                                                                                  The endpoints value of needs to be replaced with the actual node name

                                                                                  etcdctl endpoint status --endpoints=controller-node-1:2379,controller-node-2:2379,controller-node-3:2379 -w table \\\n  --cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n  --cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n  --key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\"\n

                                                                                  The expected output is as follows, indicating that all etcd nodes have been destroyed:

                                                                                  {\"level\":\"warn\",\"ts\":\"2023-03-29T17:51:50.817+0800\",\"logger\":\"etcd-client\",\"caller\":\"v3@v3.5.6/retry_interceptor.go:62\",\"msg\":\"retrying of unary invoker failed\",\"target\":\"etcd-endpoints://0xc0001ba000/controller-node-1:2379\",\"attempt\":0,\"error\":\"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \\\"transport: Error while dialing dial tcp 10.5.14.31:2379: connect: connection refused\\\"\"}\nFailed to get the status of endpoint controller-node-1:2379 (context deadline exceeded)\n{\"level\":\"warn\",\"ts\":\"2023-03-29T17:51:55.818+0800\",\"logger\":\"etcd-client\",\"caller\":\"v3@v3.5.6/retry_interceptor.go:62\",\"msg\":\"retrying of unary invoker failed\",\"target\":\"etcd-endpoints://0xc0001ba000/controller-node-2:2379\",\"attempt\":0,\"error\":\"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \\\"transport: Error while dialing dial tcp 10.5.14.32:2379: connect: connection refused\\\"\"}\nFailed to get the status of endpoint controller-node-2:2379 (context deadline exceeded)\n{\"level\":\"warn\",\"ts\":\"2023-03-29T17:52:00.820+0800\",\"logger\":\"etcd-client\",\"caller\":\"v3@v3.5.6/retry_interceptor.go:62\",\"msg\":\"retrying of unary invoker failed\",\"target\":\"etcd-endpoints://0xc0001ba000/controller-node-1:2379\",\"attempt\":0,\"error\":\"rpc error: code = DeadlineExceeded desc = latest balancer error: last connection error: connection error: desc = \\\"transport: Error while dialing dial tcp 10.5.14.33:2379: connect: connection refused\\\"\"}\nFailed to get the status of endpoint controller-node-3:2379 (context deadline exceeded)\n+----------+----+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |\n+----------+----+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n+----------+----+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n
                                                                                "},{"location":"en/admin/kpanda/best-practice/etcd-backup.html#restore-the-backup","title":"Restore the backup","text":"

                                                                                You only need to restore the data of one node, and the etcd data of other nodes will be automatically synchronized.

                                                                                1. Set environment variables

                                                                                  Before restoring the data using etcdbrctl, run the following command to set the authentication information of the connection S3 as an environment variable:

                                                                                  export ECS_ENDPOINT=http://10.6.212.13:9000 # (1)\nexport ECS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE # (2)\nexport ECS_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY # (3)\n
                                                                                  1. Access points for S3 storage
                                                                                  2. S3 Stored username
                                                                                  3. S3 Stored Password
                                                                                2. Perform the restore operation

                                                                                  Run the etcdbrctl command line tool to perform the restore, which is the most critical step.

                                                                                  etcdbrctl restore --data-dir /var/lib/etcd/ --store-container=\"etcd-backup\" \\ \n  --storage-provider=ECS \\\n  --initial-cluster=controller-node1=https://10.6.212.10:2380 \\\n  --initial-advertise-peer-urls=https://10.6.212.10:2380 \n

                                                                                  The parameters are described as follows:

                                                                                  • --data-dir: etcd data directory. This directory must be consistent with the etcd data directory so that etcd can load data normally.
                                                                                  • --store-container: The location of S3 storage, the bucket in MinIO, must correspond to the bucket of data backup.
                                                                                  • --initial-cluster: etcd is configured initially. The name of the etcd cluster must be the same as the original one.
                                                                                  • --initial-advertise-peer-urls: etcd member inter-cluster access address. Must be consistent with etcd configuration.

                                                                                  The expected output is as follows:

                                                                                  INFO[0000] Finding latest set of snapshot to recover from...\nINFO[0000] Restoring from base snapshot: Full-00000000-00111147-1679991074  actor=restorer\nINFO[0001] successfully fetched data of base snapshot in 1.241380207 seconds  actor=restorer\n{\"level\":\"info\",\"ts\":1680011221.2511616,\"caller\":\"mvcc/kvstore.go:380\",\"msg\":\"restored last compact revision\",\"meta-bucket-name\":\"meta\",\"meta-bucket-name-key\":\"finishedCompactRev\",\"restored-compact-revision\":110327}\n{\"level\":\"info\",\"ts\":1680011221.3045986,\"caller\":\"membership/cluster.go:392\",\"msg\":\"added member\",\"cluster-id\":\"66638454b9dd7b8a\",\"local-member-id\":\"0\",\"added-peer-id\":\"123c2503a378fc46\",\"added-peer-peer-urls\":[\"https://10.6.212.10:2380\"]}\nINFO[0001] Starting embedded etcd server...              actor=restorer\n\n....\n\n{\"level\":\"info\",\"ts\":\"2023-03-28T13:47:02.922Z\",\"caller\":\"embed/etcd.go:565\",\"msg\":\"stopped serving peer traffic\",\"address\":\"127.0.0.1:37161\"}\n{\"level\":\"info\",\"ts\":\"2023-03-28T13:47:02.922Z\",\"caller\":\"embed/etcd.go:367\",\"msg\":\"closed etcd server\",\"name\":\"default\",\"data-dir\":\"/var/lib/etcd\",\"advertise-peer-urls\":[\"http://localhost:0\"],\"advertise-client-urls\":[\"http://localhost:0\"]}\nINFO[0003] Successfully restored the etcd data directory.\n

                                                                                  !!! note \u201cYou can check the YAML file of etcd for comparison to avoid configuration errors\u201d

                                                                                  ```shell\ncat /tmp/etcd.yaml | grep initial-\n- --experimental-initial-corrupt-check=true\n- --initial-advertise-peer-urls=https://10.6.212.10:2380\n- --initial-cluster=controller-node-1=https://10.6.212.10:2380\n```\n
                                                                                3. The following command is executed on node 01 in order to restore the etcd service for node 01.

                                                                                  First, move the manifest file of etcd static Pod to the /etc/kubernetes/manifests directory, and kubelet will restart etcd:

                                                                                  mv /tmp/etcd.yaml /etc/kubernetes/manifests/etcd.yaml\n

                                                                                  Then wait for the etcd service to finish starting, and check the status of etcd. The default directory of etcd-related certificates is: /etc/kubernetes/ssl . If the cluster certificate is stored in another location, specify the proper path.

                                                                                  • Check the etcd cluster list:

                                                                                    etcdctl member list -w table \\\n--cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n--cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n--key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\" \n

                                                                                    The expected output is as follows:

                                                                                    +------------------+---------+-------------------+--------------------------+--------------------------+------------+\n|        ID        | STATUS  |       NAME        |        PEER ADDRS        |       CLIENT ADDRS       | IS LEARNER |\n+------------------+---------+-------------------+--------------------------+--------------------------+------------+\n| 123c2503a378fc46 | started | controller-node-1 | https://10.6.212.10:2380 | https://10.6.212.10:2379 |      false |\n+------------------+---------+-------------------+--------------------------+--------------------------+------------+\n
                                                                                  • To view the status of controller-node-1:

                                                                                    etcdctl endpoint status --endpoints=controller-node-1:2379 -w table \\\n--cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n--cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n--key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\"\n

                                                                                    The expected output is as follows:

                                                                                    +------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n|        ENDPOINT        |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |\n+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n| controller-node-1:2379 | 123c2503a378fc46 |   3.5.6 |   15 MB |      true |      false |         3 |       1200 |               1199 |        |\n+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n
                                                                                4. Restore other node data

                                                                                  The above steps have restored the data of node 01. If you want to restore the data of other nodes, you only need to start the Pod of etcd and let etcd complete the data synchronization by itself.

                                                                                  • Do the same at both node 02 and node 03:

                                                                                    mv /tmp/etcd.yaml /etc/kubernetes/manifests/etcd.yaml\n
                                                                                  • Data synchronization between etcd member clusters takes some time. You can check the etcd cluster status to ensure that all etcd clusters are normal:

                                                                                    Check whether the etcd cluster status is normal:

                                                                                    etcdctl member list -w table \\\n--cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n--cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n--key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\"\n

                                                                                    The expected output is as follows:

                                                                                    +------------------+---------+-------------------+-------------------------+-------------------------+------------+\n|        ID        | STATUS  |    NAME           |       PEER ADDRS        |      CLIENT ADDRS       | IS LEARNER |\n+------------------+---------+-------------------+-------------------------+-------------------------+------------+\n| 6ea47110c5a87c03 | started | controller-node-1 | https://10.5.14.31:2380 | https://10.5.14.31:2379 |      false |\n| e222e199f1e318c4 | started | controller-node-2 | https://10.5.14.32:2380 | https://10.5.14.32:2379 |      false |\n| f64eeda321aabe2d | started | controller-node-3 | https://10.5.14.33:2380 | https://10.5.14.33:2379 |      false |\n+------------------+---------+-------------------+-------------------------+-------------------------+------------+\n

                                                                                    Check whether the three member nodes are normal:

                                                                                    etcdctl endpoint status --endpoints=controller-node-1:2379,controller-node-2:2379,controller-node-3:2379 -w table \\\n--cacert=\"/etc/kubernetes/ssl/etcd/ca.crt\" \\\n--cert=\"/etc/kubernetes/ssl/apiserver-etcd-client.crt\" \\\n--key=\"/etc/kubernetes/ssl/apiserver-etcd-client.key\"\n

                                                                                    The expected output is as follows:

                                                                                    +------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n|     ENDPOINT           |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |\n+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n| controller-node-1:2379 | 6ea47110c5a87c03 |   3.5.6 |   88 MB |      true |      false |         6 |     199008 |             199008 |        |\n| controller-node-2:2379 | e222e199f1e318c4 |   3.5.6 |   88 MB |     false |      false |         6 |     199114 |             199114 |        |\n| controller-node-3:2379 | f64eeda321aabe2d |   3.5.6 |   88 MB |     false |      false |         6 |     199316 |             199316 |        |\n+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+\n
                                                                                "},{"location":"en/admin/kpanda/best-practice/etcd-backup.html#restore-the-cluster","title":"Restore the cluster","text":"

                                                                                After the etcd data of all nodes are synchronized, the kube-apiserver can be restarted to restore the entire cluster to an accessible state:

                                                                                1. Restart the kube-apiserver service for node1

                                                                                  mv /tmp/kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.yaml\n
                                                                                2. Restart the kube-apiserver service for node2

                                                                                  mv /tmp/kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.yaml\n
                                                                                3. Restart the kube-apiserver service for node3

                                                                                  mv /tmp/kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.yaml\n
                                                                                4. After kubelet starts kube-apiserver, check whether the restored k8s data is normal:

                                                                                  kubectl get nodes\n

                                                                                  The expected output is as follows:

                                                                                  NAME                STATUS     ROLES           AGE     VERSION\ncontroller-node-1   Ready      <none>          3h30m   v1.25.4\ncontroller-node-3   Ready      control-plane   3h29m   v1.25.4\ncontroller-node-3   Ready      control-plane   3h28m   v1.25.4\n
                                                                                "},{"location":"en/admin/kpanda/best-practice/hardening-cluster.html","title":"How to Harden a Self-built Work Cluster","text":"

                                                                                In AI platform, when using the CIS Benchmark (CIS) scan on a work cluster created using the user interface, some scan items did not pass the scan. This article provides hardening instructions based on different versions of CIS Benchmark.

                                                                                "},{"location":"en/admin/kpanda/best-practice/hardening-cluster.html#cis-benchmark-127","title":"CIS Benchmark 1.27","text":"

                                                                                Scan Environment:

                                                                                • Kubernetes version: 1.25.4
                                                                                • Containerd: 1.7.0
                                                                                • Kubean version: 0.4.9
                                                                                • Kubespray version: v2.22
                                                                                "},{"location":"en/admin/kpanda/best-practice/hardening-cluster.html#failed-scan-items","title":"Failed Scan Items","text":"
                                                                                1. [FAIL] 1.2.5 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)
                                                                                2. [FAIL] 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)
                                                                                3. [FAIL] 1.4.1 Ensure that the --profiling argument is set to false (Automated)
                                                                                4. [FAIL] 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)
                                                                                "},{"location":"en/admin/kpanda/best-practice/hardening-cluster.html#analysis-of-scan-failures","title":"Analysis of Scan Failures","text":"
                                                                                1. [FAIL] 1.2.5 Ensure that the --kubelet-certificate-authority argument is set as appropriate (Automated)

                                                                                  Reason: CIS requires that kube-apiserver must specify the CA certificate path for kubelet:

                                                                                2. [FAIL] 1.3.7 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)

                                                                                  Reason: CIS requires that kube-controller-manager's --bind-address=127.0.0.1

                                                                                3. [FAIL] 1.4.1 Ensure that the --profiling argument is set to false (Automated)

                                                                                  Reason: CIS requires that kube-scheduler sets --profiling=false

                                                                                4. [FAIL] 1.4.2 Ensure that the --bind-address argument is set to 127.0.0.1 (Automated)

                                                                                  Reason: CIS requires setting kube-scheduler's --bind-address=127.0.0.1

                                                                                "},{"location":"en/admin/kpanda/best-practice/hardening-cluster.html#hardening-configuration-to-pass-cis-scan","title":"Hardening Configuration to Pass CIS Scan","text":"

                                                                                To address these security scan issues, kubespray has added default values in v2.22 to solve some of the problems. For more details, refer to the kubespray hardening documentation.

                                                                                • Add parameters by modifying the kubean var-config configuration file:

                                                                                  kubernetes_audit: true\nkube_controller_manager_bind_address: 127.0.0.1\nkube_scheduler_bind_address: 127.0.0.1\nkube_kubeadm_scheduler_extra_args:\n  profiling: false\nkubelet_rotate_server_certificates: true\n
                                                                                • In AI platform, there is also a feature to configure advanced parameters through the user interface. Add custom parameters in the last step of cluster creation:

                                                                                • After setting the custom parameters, the following parameters are added to the var-config configmap in kubean:

                                                                                • Perform a scan after installing the cluster:

                                                                                After the scan, all scan items passed the scan (WARN and INFO are counted as PASS). Note that this document only applies to CIS Benchmark 1.27, as CIS Benchmark is continuously updated.

                                                                                "},{"location":"en/admin/kpanda/best-practice/kubean-low-version.html","title":"Deploy and Upgrade Compatible Versions of Kubean in Offline Scenarios","text":"

                                                                                In order to meet the customer's demand for building Kubernetes (K8s) clusters with lower versions, Kubean provides the capability to be compatible with lower versions and create K8s clusters with those versions.

                                                                                Currently, the supported versions for self-built worker clusters range from 1.26.0-v1.28. Refer to the AI platform Cluster Version Support System for more information.

                                                                                This article will demonstrate how to deploy a K8s cluster with a lower version.

                                                                                Note

                                                                                Node environment used in the document:

                                                                                • X86 architecture
                                                                                • CentOS 7 Linux distribution
                                                                                "},{"location":"en/admin/kpanda/best-practice/kubean-low-version.html#prerequisites","title":"Prerequisites","text":"
                                                                                • Prepare a management cluster where kubean resides, and the current environment has deployed the podman, skopeo, and minio client commands. If not supported, you can install the dependent components through the script, Installing Prerequisite Dependencies.

                                                                                • Go to kubean to view the released artifacts, and choose the specific artifact version based on the actual situation. The currently supported artifact versions and their proper cluster version ranges are as follows:

                                                                                  Artifact Version Cluster Range AI platform Support release-2.21 v1.23.0 ~ v1.25.6 Supported since installer v0.14.0 release-2.22 v1.24.0 ~ v1.26.9 Supported since installer v0.15.0 release-2.23 v1.25.0 ~ v1.27.7 Expected to support from installer v0.16.0

                                                                                  This article demonstrates the offline deployment of a K8s cluster with version 1.23.0 and the offline upgrade of a K8s cluster from version 1.23.0 to 1.24.0, so we choose the artifact release-2.21.

                                                                                "},{"location":"en/admin/kpanda/best-practice/kubean-low-version.html#procedure","title":"Procedure","text":""},{"location":"en/admin/kpanda/best-practice/kubean-low-version.html#prepare-the-relevant-artifacts-for-the-lower-version-of-kubespray-release","title":"Prepare the Relevant Artifacts for the Lower Version of Kubespray Release","text":"

                                                                                Import the spray-job image into the registry of the offline environment.

                                                                                # Assuming the registry address in the bootstrap cluster is 172.30.41.200\nREGISTRY_ADDR=\"172.30.41.200\"\n\n# The image spray-job can use the accelerator address here, and the image address is determined based on the selected artifact version\nSPRAY_IMG_ADDR=\"ghcr.m.daocloud.io/kubean-io/spray-job:2.21-d6f688f\"\n\n# skopeo parameters\nSKOPEO_PARAMS=\" --insecure-policy -a --dest-tls-verify=false --retry-times=3 \"\n\n# Online environment: Export the spray-job image of version release-2.21 and transfer it to the offline environment\nskopeo copy docker://${SPRAY_IMG_ADDR} docker-archive:spray-job-2.21.tar\n\n# Offline environment: Import the spray-job image of version release-2.21 into the bootstrap registry\nskopeo copy ${SKOPEO_PARAMS} docker-archive:spray-job-2.21.tar docker://${REGISTRY_ADDR}/${SPRAY_IMG_ADDR}\n
                                                                                "},{"location":"en/admin/kpanda/best-practice/kubean-low-version.html#create-offline-resources-for-the-earlier-versions-of-k8s","title":"Create Offline Resources for the Earlier Versions of K8s","text":"
                                                                                1. Prepare the manifest.yml file.

                                                                                  cat > \"manifest.yml\" <<EOF\nimage_arch:\n  - \"amd64\" ## \"arm64\"\nkube_version: ## Fill in the cluster version according to the actual scenario\n  - \"v1.23.0\"\n  - \"v1.24.0\"\nEOF\n
                                                                                2. Create the offline incremental package.

                                                                                  # Create the data directory\nmkdir data\n# Create the offline package\nAIRGAP_IMG_ADDR=\"ghcr.m.daocloud.io/kubean-io/airgap-patch:2.21-d6f688f\" # (1)\npodman run --rm -v $(pwd)/manifest.yml:/manifest.yml -v $(pwd)/data:/data -e ZONE=CN -e MODE=FULL ${AIRGAP_IMG_ADDR}\n
                                                                                  1. The image spray-job can use the accelerator address here, and the image address is determined based on the selected artifact version
                                                                                3. Import the offline images and binary packages for the proper K8s version.

                                                                                  # Import the binaries from the data directory to the minio in the bootstrap node\ncd ./data/amd64/files/\nMINIO_ADDR=\"http://172.30.41.200:9000\" # Replace IP with the actual repository url\nMINIO_USER=rootuser MINIO_PASS=rootpass123 ./import_files.sh ${MINIO_ADDR}\n\n# Import the images from the data directory to the image repository in the bootstrap node\ncd ./data/amd64/images/\nREGISTRY_ADDR=\"172.30.41.200\"  ./import_images.sh # Replace IP with the actual repository url\n
                                                                                4. Deploy the manifest and localartifactset.cr.yaml custom resources to the management cluster where kubean resides or the Global cluster. In this example, we use the Global cluster.

                                                                                  # Deploy the localArtifactSet resources in the data directory\ncd ./data\nkubectl apply -f data/localartifactset.cr.yaml\n\n# Download the manifest resources proper to release-2.21\nwget https://raw.githubusercontent.com/kubean-io/kubean-manifest/main/manifests/manifest-2.21-d6f688f.yml\n\n# Deploy the manifest resources proper to release-2.21\nkubectl apply -f manifest-2.21-d6f688f.yml\n
                                                                                "},{"location":"en/admin/kpanda/best-practice/kubean-low-version.html#deployment-and-upgrade-legacy-k8s-cluster","title":"Deployment and Upgrade Legacy K8s Cluster","text":""},{"location":"en/admin/kpanda/best-practice/kubean-low-version.html#deploy","title":"Deploy","text":"
                                                                                1. Go to Container Management and click the Create Cluster button on the Clusters page.

                                                                                2. Choose the manifest and localartifactset.cr.yaml custom resources deployed cluster as the Managed parameter. In this example, we use the Global cluster.

                                                                                3. Refer to Creating a Cluster for the remaining parameters.

                                                                                "},{"location":"en/admin/kpanda/best-practice/kubean-low-version.html#upgrade","title":"Upgrade","text":"
                                                                                1. Select the newly created cluster and go to the details page.

                                                                                2. Click Cluster Operations in the left navigation bar, then click Cluster Upgrade on the top right of the page.

                                                                                3. Select the available cluster for upgrade.

                                                                                "},{"location":"en/admin/kpanda/best-practice/multi-arch.html","title":"How to Add Heterogeneous Nodes to a Worker Cluster","text":"

                                                                                This page explains how to add ARM architecture nodes with Kylin v10 sp2 operating system to an AMD architecture worker cluster with CentOS 7.9 operating system.

                                                                                Note

                                                                                This page is only applicable to adding heterogeneous nodes to a worker cluster created using the AI platform platform in offline mode, excluding connected clusters.

                                                                                "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#prerequisites","title":"Prerequisites","text":"
                                                                                • A AI platform Full Mode deployment has been successfully completed, and the bootstrap node is still alive. Refer to the documentation Offline Installation of AI platform Enterprise for the deployment process.
                                                                                • A worker cluster with AMD architecture and CentOS 7.9 operating system has been created through the AI platform platform. Refer to the documentation Creating a Worker Cluster for the creation process.
                                                                                "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#procedure","title":"Procedure","text":""},{"location":"en/admin/kpanda/best-practice/multi-arch.html#download-and-import-offline-packages","title":"Download and Import Offline Packages","text":"

                                                                                Take ARM architecture and Kylin v10 sp2 operating system as examples.

                                                                                Make sure you are logged into the bootstrap node! Also, make sure the clusterConfig.yaml file used during the AI platform deployment is available.

                                                                                "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#offline-image-package","title":"Offline Image Package","text":"

                                                                                Note

                                                                                The latest version can be downloaded from the Download Center.

                                                                                CPU Architecture Version Download Link AMD64 v0.18.0 https://qiniu-download-public.daocloud.io/DaoCloud_Enterprise/dce5/offline-v0.18.0-amd64.tar ARM64 v0.18.0 https://qiniu-download-public.daocloud.io/DaoCloud_Enterprise/dce5/offline-v0.18.0-arm64.tar

                                                                                After downloading, extract the offline package:

                                                                                tar -xvf offline-v0.18.0-arm64.tar\n
                                                                                "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#iso-offline-package-kylin-v10-sp2","title":"ISO Offline Package (Kylin v10 sp2)","text":"CPU Architecture Operating System Version Download Link ARM64 Kylin Linux Advanced Server release V10 (Sword) SP2 https://www.kylinos.cn/support/trial.html

                                                                                Note

                                                                                Kylin operating system requires personal information to be provided for downloading and usage. Select V10 (Sword) SP2 when downloading.

                                                                                "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#ospackage-offline-package-kylin-v10-sp2","title":"osPackage Offline Package (Kylin v10 sp2)","text":"

                                                                                The Kubean project provides osPackage offline packages for different operating systems. Visit https://github.com/kubean-io/kubean/releases to view the available packages.

                                                                                Operating System Version Download Link Kylin Linux Advanced Server release V10 (Sword) SP2 https://github.com/kubean-io/kubean/releases/download/v0.16.3/os-pkgs-kylinv10-v0.16.3.tar.gz

                                                                                Note

                                                                                Check the specific version of the osPackage offline package in the offline/sample/clusterConfig.yaml file of the offline image package.

                                                                                "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#import-offline-packages-to-the-bootstrap-node","title":"Import Offline Packages to the Bootstrap Node","text":"

                                                                                Run the import-artifact command:

                                                                                ./offline/dce5-installer import-artifact -c clusterConfig.yaml \\\n    --offline-path=/root/offline \\\n    --iso-path=/root/Kylin-Server-10-SP2-aarch64-Release-Build09-20210524.iso \\\n    --os-pkgs-path=/root/os-pkgs-kylinv10-v0.7.4.tar.gz\n

                                                                                Note

                                                                                Parameter Explanation:

                                                                                • -c clusterConfig.yaml specifies the clusterConfig.yaml file used during the previous AI platform deployment.
                                                                                • --offline-path specifies the file path of the downloaded offline image package.
                                                                                • --iso-path specifies the file path of the downloaded ISO operating system image.
                                                                                • --os-pkgs-path specifies the file path of the downloaded osPackage offline package.

                                                                                After a successful import command execution, the offline package will be uploaded to Minio on the bootstrap node.

                                                                                "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#add-heterogeneous-worker-nodes","title":"Add Heterogeneous Worker Nodes","text":"

                                                                                Make sure you are logged into the management node of the AI platform Global Service Cluster.

                                                                                "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#modify-the-host-manifest","title":"Modify the Host Manifest","text":"

                                                                                Here is an example of host manifest:

                                                                                Before adding a nodeAfter adding a node
                                                                                apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: ${cluster-name}-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      children:\n        etcd:\n          hosts:\n            centos-master:\n        k8s_cluster:\n          children:\n            kube_control_plane:\n            kube_node:\n        kube_control_plane:\n          hosts:\n            centos-master:\n        kube_node:\n          hosts:\n            centos-master:\n    hosts:\n      centos-master:\n        ip: 10.5.14.122\n        access_ip: 10.5.14.122\n        ansible_host: 10.5.14.122\n        ansible_connection: ssh\n        ansible_user: root\n        ansible_ssh_pass: ******\n
                                                                                apiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: ${cluster-name}-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      hosts:\n        centos-master:\n          ip: 10.5.14.122\n          access_ip: 10.5.14.122\n          ansible_host: 10.5.14.122\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_ssh_pass: ******\n          # Add heterogeneous nodes\n        kylin-worker:\n          ip: 10.5.10.220\n          access_ip: 10.5.10.220\n          ansible_host: 10.5.10.220\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_ssh_pass: dangerous@2022\n        children:\n          kube_control_plane:\n            hosts:\n              - centos-master\n          kube_node:\n            hosts:\n              - centos-master\n              - kylin-worker  # Add the name of heterogeneous node\n          etcd:\n            hosts:\n              - centos-master\n          k8s_cluster:\n            children:\n              - kube_control_plane\n              - kube_node\n

                                                                                To add information about the newly added worker nodes according to the above comments:

                                                                                kubectl edit cm ${cluster-name}-hosts-conf -n kubean-system\n
                                                                                "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#add-expansion-tasks-through-clusteroperationyml","title":"Add Expansion Tasks through ClusterOperation.yml","text":"

                                                                                Example:

                                                                                ClusterOperation.yml
                                                                                apiVersion: kubean.io/v1alpha1\nkind: ClusterOperation\nmetadata:\n  name: add-worker-node\nspec:\n  cluster: ${cluster-name} # Specify cluster name\n  image: ghcr.m.daocloud.io/kubean-io/spray-job:v0.5.0\n  backoffLimit: 0\n  actionType: playbook\n  action: scale.yml\n  extraArgs: --limit=kylin-worker\n  preHook:\n    - actionType: playbook\n      action: ping.yml\n    - actionType: playbook\n      action: disable-firewalld.yml\n    - actionType: playbook\n      action: enable-repo.yml\n      extraArgs: |\n        -e \"{repo_list: [\"http://10.5.14.30:9000/kubean/kylin-iso/\\$releasever/os/\\$basearch\",\"http://10.5.14.30:9000/kubean/kylin/\\$releasever/os/\\$basearch\"]}\"\n  postHook:\n    - actionType: playbook\n      action: cluster-info.yml\n

                                                                                Note

                                                                                • Ensure the spec.image image address matches the image used in the previous deployment job.
                                                                                • Set spec.action to scale.yml .
                                                                                • Set spec.extraArgs to --limit=g-worker .
                                                                                • Fill in the correct repo_list parameter for the relevant OS in spec.preHook 's enable-repo.yml script.

                                                                                To create and deploy join-node-ops.yaml according to the above configuration:

                                                                                vi join-node-ops.yaml\nkubectl apply -f join-node-ops.yaml -n kubean-system\n
                                                                                "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#check-the-status-of-the-task-execution","title":"Check the status of the task execution","text":"
                                                                                kubectl -n kubean-system get pod | grep add-worker-node\n

                                                                                To check the progress of the scaling task, you can view the logs of the proper pod.

                                                                                "},{"location":"en/admin/kpanda/best-practice/multi-arch.html#verify-in-ui","title":"Verify in UI","text":"
                                                                                1. Go to Container Management -> Clusters -> Nodes .

                                                                                2. Click the newly added node to view details.

                                                                                "},{"location":"en/admin/kpanda/best-practice/replace-first-master-node.html","title":"Replace the first master node of the worker cluster","text":"

                                                                                This page will take a highly available three-master-node worker cluster as an example. When the first master node of the worker cluster fails or malfunctions, how to replace or reintroduce the first master node.

                                                                                This page features a highly available cluster with three master nodes.

                                                                                • node1 (172.30.41.161)
                                                                                • node2 (172.30.41.162)
                                                                                • node3 (172.30.41.163)

                                                                                Assuming node1 is down, the following steps will explain how to reintroduce the recovered node1 back into the worker cluster.

                                                                                "},{"location":"en/admin/kpanda/best-practice/replace-first-master-node.html#preparations","title":"Preparations","text":"

                                                                                Before performing the replacement operation, first obtain basic information about the cluster resources, which will be used when modifying related configurations.

                                                                                Note

                                                                                The following commands to obtain cluster resource information are executed in the management cluster.

                                                                                1. Get the cluster name

                                                                                  Run the following command to find the clusters.kubean.io resource proper to the cluster:

                                                                                  # For example, if the resource name of clusters.kubean.io is cluster-mini-1\n# Get the name of the cluster\nCLUSTER_NAME=$(kubectl get clusters.kubean.io cluster-mini-1 -o=jsonpath=\"{.metadata.name}{'\\n'}\")\n
                                                                                2. Get the host list configmap of the cluster

                                                                                  kubectl get clusters.kubean.io cluster-mini-1 -o=jsonpath=\"{.spec.hostsConfRef}{'\\n'}\"\n{\"name\":\"mini-1-hosts-conf\",\"namespace\":\"kubean-system\"}\n
                                                                                3. Get the configuration parameters configmap of the cluster

                                                                                  kubectl get clusters.kubean.io cluster-mini-1 -o=jsonpath=\"{.spec.varsConfRef}{'\\n'}\"\n{\"name\":\"mini-1-vars-conf\",\"namespace\":\"kubean-system\"}\n
                                                                                "},{"location":"en/admin/kpanda/best-practice/replace-first-master-node.html#steps","title":"Steps","text":"
                                                                                1. Adjust the order of control plane nodes

                                                                                  Reset the node1 node to restore it to the state before installing the cluster (or use a new node), maintaining the network connectivity of the node1 node.

                                                                                  Adjust the order of the node1 node in the kube_control_plane, kube_node, and etcd sections in the host list (node1/node2/node3 -> node2/node3/node1):

                                                                                  function change_control_plane_order() {\n  cat << EOF | kubectl apply -f -\n---\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: mini-1-hosts-conf\n  namespace: kubean-system\ndata:\n  hosts.yml: |\n    all:\n      hosts:\n        node1:\n          ip: \"172.30.41.161\"\n          access_ip: \"172.30.41.161\"\n          ansible_host: \"172.30.41.161\"\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: dangerous\n        node2:\n          ip: \"172.30.41.162\"\n          access_ip: \"172.30.41.162\"\n          ansible_host: \"172.30.41.162\"\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: dangerous\n        node3:\n          ip: \"172.30.41.163\"\n          access_ip: \"172.30.41.163\"\n          ansible_host: \"172.30.41.163\"\n          ansible_connection: ssh\n          ansible_user: root\n          ansible_password: dangerous\n      children:\n        kube_control_plane:\n          hosts:\n            node2:\n            node3:\n            node1:\n        kube_node:\n          hosts:\n            node2:\n            node3:\n            node1:\n        etcd:\n          hosts:\n            node2:\n            node3:\n            node1:\n        k8s_cluster:\n          children:\n            kube_control_plane:\n            kube_node:\n        calico_rr:\n          hosts: {}\nEOF\n}\n\nchange_control_plane_order\n
                                                                                2. Remove the first master node in an abnormal state

                                                                                  After adjusting the order of nodes in the host list, remove the node1 in an abnormal state of the K8s control plane.

                                                                                  Note

                                                                                  If node1 is offline or malfunctioning, the following ConfigMaps must be added to extraArgs, you need not to add them when node1 is online.

                                                                                  reset_nodes=false # Skip resetting node operation\nallow_ungraceful_removal=true # Allow ungraceful removal operation\n
                                                                                  # Image spray-job can use an accelerator address here\n\nSPRAY_IMG_ADDR=\"ghcr.m.daocloud.io/kubean-io/spray-job\"\nSPRAY_RLS_2_22_TAG=\"2.22-336b323\"\nKUBE_VERSION=\"v1.24.14\"\nCLUSTER_NAME=\"cluster-mini-1\"\nREMOVE_NODE_NAME=\"node1\"\n\ncat << EOF | kubectl apply -f -\n---\napiVersion: kubean.io/v1alpha1\nkind: ClusterOperation\nmetadata:\n  name: cluster-mini-1-remove-node-ops\nspec:\n  cluster: ${CLUSTER_NAME}\n  image: ${SPRAY_IMG_ADDR}:${SPRAY_RLS_2_22_TAG}\n  actionType: playbook\n  action: remove-node.yml\n  extraArgs: -e node=${REMOVE_NODE_NAME} -e reset_nodes=false -e allow_ungraceful_removal=true -e kube_version=${KUBE_VERSION}\n  postHook:\n    - actionType: playbook\n      action: cluster-info.yml\nEOF\n
                                                                                3. Manually modify the cluster configuration, edit and update cluster-info

                                                                                  # Edit cluster-info\nkubectl -n kube-public edit cm cluster-info\n\n# 1. If the ca.crt certificate is updated, the content of the certificate-authority-data field needs to be updated\n# View the base64 encoding of the ca certificate:\ncat /etc/kubernetes/ssl/ca.crt | base64 | tr -d '\\n'\n\n# 2. Change the IP address in the server field to the new first master IP, this document will use the IP address of node2, 172.30.41.162\n
                                                                                4. Manually modify the cluster configuration, edit and update kubeadm-config

                                                                                  # Edit kubeadm-config\nkubectl -n kube-system edit cm kubeadm-config\n\n# Change controlPlaneEndpoint to the new first master IP,\n# this document will use the IP address of node2, 172.30.41.162\n
                                                                                5. Scale up the master node and update the cluster

                                                                                  Note

                                                                                  • Use --limit to limit the update operation to only affect the etcd and kube_control_plane node groups.
                                                                                  • If it is an offline environment, spec.preHook needs to add enable-repo.yml, and the extraArgs parameter should fill in the correct repo_list for the related OS.
                                                                                  cat << EOF | kubectl apply -f -\n---\napiVersion: kubean.io/v1alpha1\nkind: ClusterOperation\nmetadata:\n  name: cluster-mini-1-update-cluster-ops\nspec:\n  cluster: ${CLUSTER_NAME}\n  image: ${SPRAY_IMG_ADDR}:${SPRAY_RLS_2_22_TAG}\n  actionType: playbook\n  action: cluster.yml\n  extraArgs: --limit=etcd,kube_control_plane -e kube_version=${KUBE_VERSION}\n  preHook:\n    - actionType: playbook\n      action: enable-repo.yml  # This yaml needs to be added in an offline environment,\n                               # and set the correct repo-list (install operating system packages),\n                               # the following parameter values are for reference only\n      extraArgs: |\n        -e \"{repo_list: ['http://172.30.41.0:9000/kubean/centos/\\$releasever/os/\\$basearch','http://172.30.41.0:9000/kubean/centos-iso/\\$releasever/os/\\$basearch']}\"\n  postHook:\n    - actionType: playbook\n      action: cluster-info.yml\nEOF\n

                                                                                Now, you completed the replacement of the first Master node.

                                                                                "},{"location":"en/admin/kpanda/best-practice/update-offline-cluster.html","title":"Offline Deployment/Upgrade Guide for Worker Clusters","text":"

                                                                                Note

                                                                                This document is specifically designed for deploying or upgrading the Kubernetes version of worker clusters created on the AI platform platform in offline mode. It does not cover the deployment or upgrade of other Kubernetes components.

                                                                                This guide is applicable to the following offline scenarios:

                                                                                • You can follow the operational guidelines to deploy the recommended Kubernetes version in a non-GUI environment created by the AI platform platform.
                                                                                • You can upgrade the Kubernetes version of worker clusters created using the AI platform platform by generating incremental offline packages.

                                                                                The overall approach is as follows:

                                                                                1. Build the offline package on an integrated node.
                                                                                2. Import the offline package to the bootstrap node.
                                                                                3. Update the Kubernetes version manifest for the global service cluster.
                                                                                4. Use the AI platform UI to create or upgrade the Kubernetes version of the worker cluster.

                                                                                Note

                                                                                For a list of currently supported offline Kubernetes versions, refer to the list of Kubernetes versions supported by Kubean.

                                                                                "},{"location":"en/admin/kpanda/best-practice/update-offline-cluster.html#building-the-offline-package-on-an-integrated-node","title":"Building the Offline Package on an Integrated Node","text":"

                                                                                Since the offline environment cannot connect to the internet, you need to prepare an integrated node in advance to build the incremental offline package and start Docker or Podman services on this node. Refer to How to Install Docker?

                                                                                1. Check the status of the Docker service on the integrated node.

                                                                                  ps aux | grep docker\n

                                                                                  You should see output similar to the following:

                                                                                  root     12341  0.5  0.2 654372 26736 ?        Ssl  23:45   0:00 /usr/bin/docked\nroot     12351  0.2  0.1 625080 13740 ?        Ssl  23:45   0:00 docker-containerd --config /var/run/docker/containerd/containerd.toml\nroot     13024  0.0  0.0 112824   980 pts/0    S+   23:45   0:00 grep --color=auto docker\n
                                                                                2. Create a file named manifest.yaml in the /root directory of the integrated node with the following command:

                                                                                  vi manifest.yaml\n

                                                                                  The content of manifest.yaml should be as follows:

                                                                                  manifest.yaml
                                                                                  image_arch:\n- \"amd64\"\nkube_version: # Specify the version of the cluster to be upgraded\n- \"v1.28.0\"\n
                                                                                  • image_arch specifies the CPU architecture type, with options for amd64 and arm64.
                                                                                  • kube_version indicates the version of the Kubernetes offline package to be built. You can refer to the supported offline Kubernetes versions mentioned earlier.
                                                                                3. Create a folder named /data in the /root directory to store the incremental offline package.

                                                                                  mkdir data\n

                                                                                  Run the following command to generate the offline package using the kubean airgap-patch image. Make sure the tag of the airgap-patch image matches the Kubean version, and that the Kubean version covers the Kubernetes version you wish to upgrade.

                                                                                  # Assuming the Kubean version is v0.13.9\ndocker run --rm -v $(pwd)/manifest.yaml:/manifest.yaml -v $(pwd)/data:/data ghcr.m.daocloud.io/kubean-io/airgap-patch:v0.13.9\n

                                                                                  After the Docker service completes running, check the files in the /data folder. The folder structure should look like this:

                                                                                  data\n\u251c\u2500\u2500 amd64\n\u2502   \u251c\u2500\u2500 files\n\u2502   \u2502   \u251c\u2500\u2500 import_files.sh\n\u2502   \u2502   \u2514\u2500\u2500 offline-files.tar.gz\n\u2502   \u251c\u2500\u2500 images\n\u2502   \u2502   \u251c\u2500\u2500 import_images.sh\n\u2502   \u2502   \u2514\u2500\u2500 offline-images.tar.gz\n\u2502   \u2514\u2500\u2500 os-pkgs\n\u2502       \u2514\u2500\u2500 import_ospkgs.sh\n\u2514\u2500\u2500 localartifactset.cr.yaml\n
                                                                                "},{"location":"en/admin/kpanda/best-practice/update-offline-cluster.html#importing-the-offline-package-to-the-bootstrap-node","title":"Importing the Offline Package to the Bootstrap Node","text":"
                                                                                1. Copy the /data files from the integrated node to the /root directory of the bootstrap node. On the integrated node , run the following command:

                                                                                  scp -r data root@x.x.x.x:/root\n

                                                                                  Replace x.x.x.x with the IP address of the bootstrap node.

                                                                                2. On the bootstrap node, copy the image files in the /data folder to the built-in Docker registry of the bootstrap node. After logging into the bootstrap node, run the following commands:

                                                                                  1. Navigate to the directory where the image files are located.

                                                                                    cd data/amd64/images\n
                                                                                  2. Run the import_images.sh script to import the images into the built-in Docker Registry of the bootstrap node.

                                                                                    REGISTRY_ADDR=\"127.0.0.1\" ./import_images.sh\n

                                                                                  Note

                                                                                  The above command is only applicable to the built-in Docker Registry of the bootstrap node. If you are using an external registry, use the following command:

                                                                                  REGISTRY_SCHEME=https REGISTRY_ADDR=${registry_address} REGISTRY_USER=${username} REGISTRY_PASS=${password} ./import_images.sh\n
                                                                                  • REGISTRY_ADDR is the address of the image repository, such as 1.2.3.4:5000.
                                                                                  • If the image repository requires username and password authentication, set REGISTRY_USER and REGISTRY_PASS accordingly.
                                                                                3. On the bootstrap node, copy the binary files in the /data folder to the built-in Minio service of the bootstrap node.

                                                                                  1. Navigate to the directory where the binary files are located.

                                                                                    cd data/amd64/files/\n
                                                                                  2. Run the import_files.sh script to import the binary files into the built-in Minio service of the bootstrap node.

                                                                                    MINIO_USER=rootuser MINIO_PASS=rootpass123 ./import_files.sh http://127.0.0.1:9000\n

                                                                                Note

                                                                                The above command is only applicable to the built-in Minio service of the bootstrap node. If you are using an external Minio, replace http://127.0.0.1:9000 with the access address of the external Minio. \"rootuser\" and \"rootpass123\" are the default account and password for the built-in Minio service of the bootstrap node.

                                                                                "},{"location":"en/admin/kpanda/best-practice/update-offline-cluster.html#updating-the-kubernetes-version-manifest-for-the-global-service-cluster","title":"Updating the Kubernetes Version Manifest for the Global Service Cluster","text":"

                                                                                Run the following command on the bootstrap node to deploy the localartifactset resource to the global service cluster:

                                                                                kubectl apply -f data/kubeanofflineversion.cr.patch.yaml\n
                                                                                "},{"location":"en/admin/kpanda/best-practice/update-offline-cluster.html#next-steps","title":"Next Steps","text":"

                                                                                Log into the AI platform UI management interface to continue with the following actions:

                                                                                1. Refer to the Creating Cluster Documentation to create a worker cluster, where you can select the incremental version of Kubernetes.

                                                                                2. Refer to the Upgrading Cluster Documentation to upgrade your self-built worker cluster.

                                                                                "},{"location":"en/admin/kpanda/best-practice/use-otherlinux-create-custer.html","title":"Creating a Cluster on Non-Supported Operating Systems","text":"

                                                                                This document outlines how to create a worker cluster on an unsupported OS in offline mode. For the range of OS supported by AI platform, refer to AI platform Supported Operating Systems.

                                                                                The main process for creating a worker cluster on an unsupported OS in offline mode is illustrated in the diagram below:

                                                                                Next, we will use the openAnolis operating system as an example to demonstrate how to create a cluster on a non-mainstream operating system.

                                                                                "},{"location":"en/admin/kpanda/best-practice/use-otherlinux-create-custer.html#prerequisites","title":"Prerequisites","text":"
                                                                                • AI platform Full Mode has been deployed following the documentation: Offline Installation of AI platform Enterprise.
                                                                                • At least one node with the same architecture and version that can connect to the internet.
                                                                                "},{"location":"en/admin/kpanda/best-practice/use-otherlinux-create-custer.html#procedure","title":"Procedure","text":""},{"location":"en/admin/kpanda/best-practice/use-otherlinux-create-custer.html#online-node-building-an-offline-package","title":"Online Node - Building an Offline Package","text":"

                                                                                Find an online environment with the same architecture and OS as the nodes in the target cluster. In this example, we will use AnolisOS 8.8 GA. Run the following command to generate an offline os-pkgs package:

                                                                                # Download relevant scripts and build os packages package\n$ curl -Lo ./pkgs.yml https://raw.githubusercontent.com/kubean-io/kubean/main/build/os-packages/others/pkgs.yml\n$ curl -Lo ./other_os_pkgs.sh https://raw.githubusercontent.com/kubean-io/kubean/main/build/os-packages/others/other_os_pkgs.sh && chmod +x other_os_pkgs.sh\n$ ./other_os_pkgs.sh build # Build the offline package\n

                                                                                After executing the above command, you should have a compressed package named os-pkgs-anolis-8.8.tar.gz in the current directory. The file structure in the current directory should look like this:

                                                                                    .\n    \u251c\u2500\u2500 other_os_pkgs.sh\n    \u251c\u2500\u2500 pkgs.yml\n    \u2514\u2500\u2500 os-pkgs-anolis-8.8.tar.gz\n
                                                                                "},{"location":"en/admin/kpanda/best-practice/use-otherlinux-create-custer.html#offline-node-installing-the-offline-package","title":"Offline Node - Installing the Offline Package","text":"

                                                                                Copy the three files generated on the online node ( other_os_pkgs.sh , pkgs.yml , and os-pkgs-anolis-8.8.tar.gz ) to all nodes in the target cluster in the offline environment.

                                                                                Login to any one of the nodes in the offline environment that is part of the target cluster, and run the following command to install the os-pkg package on the node:

                                                                                # Configure environment variables\n$ export PKGS_YML_PATH=/root/workspace/os-pkgs/pkgs.yml # Path to the pkgs.yml file on the current offline node\n$ export PKGS_TAR_PATH=/root/workspace/os-pkgs/os-pkgs-anolis-8.8.tar.gz # Path to the os-pkgs-anolis-8.8.tar.gz file on the current offline node\n$ export SSH_USER=root # Username for the current offline node\n$ export SSH_PASS=dangerous # Password for the current offline node\n$ export HOST_IPS='172.30.41.168' # IP address of the current offline node\n$ ./other_os_pkgs.sh install # Install the offline package\n

                                                                                After executing the above command, wait for the interface to prompt: All packages for node (X.X.X.X) have been installed , which indicates that the installation is complete.

                                                                                "},{"location":"en/admin/kpanda/best-practice/use-otherlinux-create-custer.html#go-to-the-user-interface-to-create-cluster","title":"Go to the User Interface to Create Cluster","text":"

                                                                                Refer to the documentation on Creating a Worker Cluster to create an openAnolis cluster.

                                                                                "},{"location":"en/admin/kpanda/clusterops/cluster-oversold.html","title":"Dynamic Resource Overprovision in the Cluster","text":"

                                                                                Currently, many businesses experience peaks and valleys in demand. To ensure service performance and stability, resources are typically allocated based on peak demand when deploying services. However, peak periods may be very short, resulting in resource waste during off-peak times. Cluster resource overprovision utilizes these allocated but unused resources (i.e., the difference between allocation and usage) to enhance cluster resource utilization and reduce waste.

                                                                                This article mainly introduces how to use the cluster dynamic resource overprovision feature.

                                                                                "},{"location":"en/admin/kpanda/clusterops/cluster-oversold.html#prerequisites","title":"Prerequisites","text":"
                                                                                • The container management module has been integrated with a Kubernetes cluster or a Kubernetes cluster has been created, and access to the cluster's UI interface is available.
                                                                                • A namespace has been created, and the user has been granted Cluster Admin permissions. For details, refer to Cluster Authorization.
                                                                                "},{"location":"en/admin/kpanda/clusterops/cluster-oversold.html#enable-cluster-overprovision","title":"Enable Cluster Overprovision","text":"
                                                                                1. click Clusters in the left navigation bar, then clickthe name of the target cluster to enter the Cluster Details page.

                                                                                2. On the cluster details page, click Cluster Operations -> Cluster Settings in the left navigation bar, then select the Advanced Configuration tab.

                                                                                3. Enable cluster overprovision and set the overprovision ratio.

                                                                                  • If the cro-operator plugin is not installed, click the Install Now button and follow the installation process as per Managing Helm Apps.
                                                                                  • If the cro-operator plugin is already installed, enable the cluster overprovision switch to start using the cluster overprovision feature.

                                                                                  Note

                                                                                  The proper namespace in the cluster must have the following label applied for the cluster overprovision policy to take effect.

                                                                                  clusterresourceoverrides.admission.autoscaling.openshift.io/enabled: \"true\"\n

                                                                                "},{"location":"en/admin/kpanda/clusterops/cluster-oversold.html#using-cluster-overprovision","title":"Using Cluster Overprovision","text":"

                                                                                Once the cluster dynamic resource overprovision ratio is set, it will take effect while workloads are running. The following example uses nginx to validate the use of resource overprovision capabilities.

                                                                                1. Create a workload (nginx) and set the proper resource limits. For the creation process, refer to Creating Stateless Workloads (Deployment).

                                                                                2. Check whether the ratio of the Pod's resource requests to limits meets the overprovision ratio.

                                                                                "},{"location":"en/admin/kpanda/clusterops/cluster-settings.html","title":"Cluster Settings","text":"

                                                                                Cluster settings are used to customize advanced feature settings for your cluster, including whether to enable GPU, helm repo refresh cycle, Helm operation record retention, etc.

                                                                                • Enable GPU: GPUs and proper driver plug-ins need to be installed on the cluster in advance.

                                                                                  Click the name of the target cluster, and click Operations and Maintenance -> Cluster Settings -> Addons in the left navigation bar.

                                                                                • Helm operation basic image, registry refresh cycle, number of operation records retained, whether to enable cluster deletion protection (the cluster cannot be uninstalled directly after enabling)

                                                                                "},{"location":"en/admin/kpanda/clusterops/latest-operations.html","title":"recent operations","text":"

                                                                                On this page, you can view the recent cluster operation records and Helm operation records, as well as the YAML files and logs of each operation, and you can also delete a certain record.

                                                                                Set the number of reserved entries for Helm operations:

                                                                                By default, the system keeps the last 100 Helm operation records. If you keep too many entries, it may cause data redundancy, and if you keep too few entries, you may lose the key operation records you need. A reasonable reserved quantity needs to be set according to the actual situation. Specific steps are as follows:

                                                                                1. Click the name of the target cluster, and click Recent Operations -> Helm Operations -> Set Number of Retained Items in the left navigation bar.

                                                                                2. Set how many Helm operation records need to be kept, and click OK .

                                                                                "},{"location":"en/admin/kpanda/clusters/access-cluster.html","title":"Access Clusters","text":"

                                                                                Clusters integrated or created using the AI platform Container Management platform can be accessed not only through the UI interface but also in two other ways for access control:

                                                                                • Access online via CloudShell
                                                                                • Access via kubectl after downloading the cluster certificate

                                                                                Note

                                                                                When accessing the cluster, the user should have Cluster Admin permission or higher.

                                                                                "},{"location":"en/admin/kpanda/clusters/access-cluster.html#access-via-cloudshell","title":"Access via CloudShell","text":"
                                                                                1. Enter Clusters page, select the cluster you want to access via CloudShell, click the ... icon on the right, and then click Console from the dropdown list.

                                                                                2. Run kubectl get node command in the Console to verify the connectivity between CloudShell and the cluster. If the console returns node information of the cluster, you can access and manage the cluster through CloudShell.

                                                                                "},{"location":"en/admin/kpanda/clusters/access-cluster.html#access-via-kubectl","title":"Access via kubectl","text":"

                                                                                If you want to access and manage remote clusters from a local node, make sure you have met these prerequisites:

                                                                                • Your local node and the cloud cluster are in a connected network.
                                                                                • The cluster certificate has been downloaded to the local node.
                                                                                • The kubectl tool has been installed on the local node. For detailed installation guides, see Installing tools.

                                                                                If everything is in place, follow these steps to access a cloud cluster from your local environment.

                                                                                1. Enter Clusters page, find your target cluster, click ... on the right, and select Download kubeconfig in the drop-down list.

                                                                                2. Set the Kubeconfig period and click Download .

                                                                                3. Open the downloaded certificate and copy its content to the config file of the local node.

                                                                                  By default, the kubectl tool will look for a file named config in the $HOME/.kube directory on the local node. This file stores access credentials of clusters. Kubectl can access the cluster with that configuration file.

                                                                                4. Run the following command on the local node to verify its connectivity with the cluster:

                                                                                  kubectl get pod -n default\n

                                                                                  An expected output is as follows:

                                                                                  NAME                            READY   STATUS      RESTARTS    AGE\ndao-2048-2048-58c7f7fc5-mq7h4   1/1     Running     0           30h\n

                                                                                Now you can access and manage the cluster locally with kubectl.

                                                                                "},{"location":"en/admin/kpanda/clusters/cluster-role.html","title":"Cluster Roles","text":"

                                                                                Suanova AI platform categorizes clusters based on different functionalities to help users better manage IT infrastructure.

                                                                                "},{"location":"en/admin/kpanda/clusters/cluster-role.html#global-service-cluster","title":"Global Service Cluster","text":"

                                                                                This cluster is used to run AI platform components such as Container Management, Global Management, Insight. It generally does not carry business workloads.

                                                                                Supported Features Description K8s Version 1.22+ Operating System RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86;Ubuntu 18.04 x86, Ubuntu 20.04 x86;CentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD Full Lifecycle Management Supported K8s Resource Management Supported Cloud Native Storage Supported Cloud Native Network Calico, Cillium, Multus, and other CNIs Policy Management Supports network policies, quota policies, resource limits, disaster recovery policies, security policies"},{"location":"en/admin/kpanda/clusters/cluster-role.html#management-cluster","title":"Management Cluster","text":"

                                                                                This cluster is used to manage worker clusters and generally does not carry business workloads.

                                                                                • Classic Mode deploys the global service cluster and management cluster in different clusters, suitable for multi-data center, multi-architecture enterprise scenarios.
                                                                                • Simple Mode deploys the management cluster and global service cluster in the same cluster.
                                                                                Supported Features Description K8s Version 1.22+ Operating System RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86;Ubuntu 18.04 x86, Ubuntu 20.04 x86;CentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD Full Lifecycle Management Supported K8s Resource Management Supported Cloud Native Storage Supported Cloud Native Network Calico, Cillium, Multus, and other CNIs Policy Management Supports network policies, quota policies, resource limits, disaster recovery policies, security policies"},{"location":"en/admin/kpanda/clusters/cluster-role.html#worker-cluster","title":"Worker Cluster","text":"

                                                                                This is a cluster created using Container Management and is mainly used to carry business workloads. This cluster is managed by the management cluster.

                                                                                Supported Features Description K8s Version Supports K8s 1.22 and above Operating System RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86;Ubuntu 18.04 x86, Ubuntu 20.04 x86;CentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD Full Lifecycle Management Supported K8s Resource Management Supported Cloud Native Storage Supported Cloud Native Network Calico, Cillium, Multus, and other CNIs Policy Management Supports network policies, quota policies, resource limits, disaster recovery policies, security policies"},{"location":"en/admin/kpanda/clusters/cluster-role.html#integrated-cluster","title":"Integrated Cluster","text":"

                                                                                This cluster is used to integrate existing standard K8s clusters, including but not limited to self-built clusters in local data centers, clusters provided by public cloud vendors, clusters provided by private cloud vendors, edge clusters, Xinchuang clusters, heterogeneous clusters, and different Suanova clusters. It is mainly used to carry business workloads.

                                                                                Supported Features Description K8s Version 1.18+ Supported Vendors VMware Tanzu, Amazon EKS, Redhat Openshift, SUSE Rancher, Alibaba ACK, Huawei CCE, Tencent TKE, Standard K8s Cluster, Suanova Full Lifecycle Management Not Supported K8s Resource Management Supported Cloud Native Storage Supported Cloud Native Network Depends on the network mode of the integrated cluster's kernel Policy Management Supports network policies, quota policies, resource limits, disaster recovery policies, security policies

                                                                                Note

                                                                                A cluster can have multiple cluster roles. For example, a cluster can be both a global service cluster and a management cluster or a worker cluster.

                                                                                "},{"location":"en/admin/kpanda/clusters/cluster-scheduler-plugin.html","title":"Deploy Second Scheduler scheduler-plugins in a Cluster","text":"

                                                                                This page describes how to deploy a second scheduler-plugins in a cluster.

                                                                                "},{"location":"en/admin/kpanda/clusters/cluster-scheduler-plugin.html#why-do-we-need-scheduler-plugins","title":"Why do we need scheduler-plugins?","text":"

                                                                                The cluster created through the platform will install the native K8s scheduler-plugin, but the native scheduler-plugin has many limitations:

                                                                                • The native scheduler-plugin cannot meet scheduling requirements, so you can use either CoScheduling, CapacityScheduling or other types of scheduler-plugins.
                                                                                • In special scenarios, a new scheduler-plugin is needed to complete scheduling tasks without affecting the process of the native scheduler-plugin.
                                                                                • Distinguish scheduler-plugins with different functionalities and achieve different scheduling scenarios by switching scheduler-plugin names.

                                                                                This page takes the scenario of using the vgpu scheduler-plugin while combining the coscheduling plugin capability of scheduler-plugins as an example to introduce how to install and use scheduler-plugins.

                                                                                "},{"location":"en/admin/kpanda/clusters/cluster-scheduler-plugin.html#installing-scheduler-plugins","title":"Installing scheduler-plugins","text":""},{"location":"en/admin/kpanda/clusters/cluster-scheduler-plugin.html#prerequisites","title":"Prerequisites","text":"
                                                                                • kubean is a new feature introduced in v0.13.0, please ensure that your version is v0.13.0 or higher.
                                                                                • The installation version of scheduler-plugins is v0.27.8, please ensure that the cluster version is compatible with it. Refer to the document Compatibility Matrix.
                                                                                "},{"location":"en/admin/kpanda/clusters/cluster-scheduler-plugin.html#installation-process","title":"Installation Process","text":"
                                                                                1. Add the scheduler-plugins parameter in Create Cluster -> Advanced Settings -> Custom Parameters.

                                                                                  scheduler_plugins_enabled:true\nscheduler_plugins_plugin_config:\n  - name: Coscheduling\n    args:\n      permitWaitingTimeSeconds: 10 # default is 60\n

                                                                                  Parameters:

                                                                                  • scheduler_plugins_enabled Set to true to enable the scheduler-plugins capability.
                                                                                  • You can enable or disable certain plugins by setting the scheduler_plugins_enabled_plugins or scheduler_plugins_disabled_plugins options. See K8s Official Plugin Names for reference.
                                                                                  • If you need to set parameters for custom plugins, please configure scheduler_plugins_plugin_config, for example: set the permitWaitingTimeoutSeconds parameter for coscheduling. See K8s Official Plugin Configuration for reference.
                                                                                2. After successful cluster creation, the system will automatically install the scheduler-plugins and controller component loads. You can check the workload status in the proper cluster's deployment.

                                                                                "},{"location":"en/admin/kpanda/clusters/cluster-scheduler-plugin.html#using-scheduler-plugins","title":"Using scheduler-plugins","text":"

                                                                                Here is an example of how to use scheduler-plugins by demonstrating a scenario where the vgpu scheduler is used in combination with the coscheduling plugin capability of scheduler-plugins.

                                                                                1. Install vgpu in the Helm Charts and set the values.yaml parameters.

                                                                                  • schedulerName: scheduler-plugins-scheduler: This is the scheduler name for scheduler-plugins installed by kubean, and currently cannot be modified.
                                                                                  • scheduler.kubeScheduler.enabled: false: Do not install kube-scheduler and use vgpu-scheduler as a separate extender.
                                                                                2. Extend vgpu-scheduler on scheduler-plugins.

                                                                                  [root@master01 charts]# kubectl get cm -n scheduler-plugins scheduler-config -ojsonpath=\"{.data.scheduler-config\\.yaml}\"\n
                                                                                  apiVersion: kubescheduler.config.k8s.io/v1\nkind: KubeSchedulerConfiguration\nleaderElection:\n  leaderElect: false\nprofiles:\n  # Compose all plugins in one profile\n  - schedulerName: scheduler-plugins-scheduler\n    plugins:\n      multiPoint:\n        enabled:\n          - name: Coscheduling\n          - name: CapacityScheduling\n          - name: NodeResourceTopologyMatch\n          - name: NodeResourcesAllocatable\n        disabled:\n          - name: PrioritySort\npluginConfig:\n  - args:\n      permitWaitingTimeSeconds: 10\n    name: Coscheduling\n

                                                                                  Modify configmap of scheduler-config for scheduler-plugins:

                                                                                  [root@master01 charts]# kubectl get cm -n scheduler-plugins scheduler-config -ojsonpath=\"{.data.scheduler-config\\.yaml}\"\n
                                                                                  apiVersion: kubescheduler.config.k8s.io/v1\nkind: KubeSchedulerConfiguration\nleaderElection:\n  leaderElect: false\nprofiles:\n  # Compose all plugins in one profile\n  - schedulerName: scheduler-plugins-scheduler\n    plugins:\n      multiPoint:\n        enabled:\n          - name: Coscheduling\n          - name: CapacityScheduling\n          - name: NodeResourceTopologyMatch\n          - name: NodeResourcesAllocatable\n        disabled:\n          - name: PrioritySort\npluginConfig:\n  - args:\n      permitWaitingTimeSeconds: 10\n    name: Coscheduling\nextenders:\n  - urlPrefix: \"${urlPrefix}\"\n    filterVerb: filter\n    bindVerb: bind\n    nodeCacheCapable: true\n    ignorable: true\n    httpTimeout: 30s\n    weight: 1\n    enableHTTPS: true\n    tlsConfig:\n      insecure: true\n    managedResources:\n      - name: nvidia.com/vgpu\n        ignoredByScheduler: true\n      - name: nvidia.com/gpumem\n        ignoredByScheduler: true\n      - name: nvidia.com/gpucores\n        ignoredByScheduler: true\n      - name: nvidia.com/gpumem-percentage\n        ignoredByScheduler: true\n      - name: nvidia.com/priority\n        ignoredByScheduler: true\n      - name: cambricon.com/mlunum\n        ignoredByScheduler: true\n
                                                                                3. After installing vgpu-scheduler, the system will automatically create a service (svc), and the urlPrefix specifies the URL of the svc.

                                                                                  Note

                                                                                  • The svc refers to the pod service load. You can use the following command in the namespace where the nvidia-vgpu plugin is installed to get the external access information for port 443.

                                                                                    kubectl get svc -n ${namespace}\n
                                                                                  • The urlPrefix format is https://${ip address}:${port}

                                                                                4. Restart the scheduler pod of scheduler-plugins to load the new configuration file.

                                                                                  Note

                                                                                  When creating a vgpu application, you do not need to specify the name of a scheduler-plugin. The vgpu-scheduler webhook will automatically change the scheduler's name to \"scheduler-plugins-scheduler\" without manual specification.

                                                                                "},{"location":"en/admin/kpanda/clusters/cluster-status.html","title":"Cluster Status","text":"

                                                                                AI platform Container Management module can manage two types of clusters: integrated clusters and created clusters.

                                                                                • Integrated clusters: clusters created in other platforms and now integrated into AI platform.
                                                                                • Created clusters: clusters created in AI platform.

                                                                                For more information about cluster types, see Cluster Role.

                                                                                We designed several status for these two clusters.

                                                                                "},{"location":"en/admin/kpanda/clusters/cluster-status.html#integrated-clusters","title":"Integrated Clusters","text":"Status Description Integrating The cluster is being integrated into AI platform. Removing The cluster is being removed from AI platform. Running The cluster is running as expected. Unknown The cluster is lost. Data displayed in the AI platform UI is the cached data before the disconnection, which does not represent real-time data. Any operation during this status will not take effect. You should check cluster network connectivity or host status."},{"location":"en/admin/kpanda/clusters/cluster-status.html#created-clusters","title":"Created Clusters","text":"Status Description Creating The cluster is being created. Updating The Kubernetes version of the cluster is being operating. Deleting The cluster is being deleted. Running The cluster is running as expected. Unknown The cluster is lost. Data displayed in the AI platform UI is the cached data before the disconnection, which does not represent real-time data. Any operation during this status will not take effect. You should check cluster network connectivity or host status. Failed The cluster creation is failed. You should check the logs for detailed reasons."},{"location":"en/admin/kpanda/clusters/cluster-version.html","title":"Supported Kubernetes Versions","text":"

                                                                                In AI platform, the integrated clusters and created clusters have different version support mechanisms.

                                                                                This page focuses on the version support mechanism for created clusters.

                                                                                The Kubernetes community supports three version ranges: 1.26, 1.27, and 1.28. When a new version is released by the community, the supported version range is incremented. For example, if the latest version released by the community is 1.27, the supported version range by the community will be 1.27, 1.28, and 1.29.

                                                                                To ensure the security and stability of the clusters, when creating clusters in AI platform, the supported version range will always be one version lower than the community's version.

                                                                                For instance, if the Kubernetes community supports v1.25, v1.26, and v1.27, then the version range for creating worker clusters in AI platform will be v1.24, v1.25, and v1.26. Additionally, a stable version, such as 1.24.7, will be recommended to users.

                                                                                Furthermore, the version range for creating worker clusters in AI platform will remain highly synchronized with the community. When the community version increases incrementally, the version range for creating worker clusters in AI platform will also increase by one version.

                                                                                "},{"location":"en/admin/kpanda/clusters/cluster-version.html#supported-kubernetes-versions_1","title":"Supported Kubernetes Versions","text":"Kubernetes Community Versions Created Worker Cluster Versions Recommended Versions for Created Worker Cluster AI platform Installer Release Date
                                                                                • 1.26
                                                                                • 1.27
                                                                                • 1.28
                                                                                • 1.26
                                                                                • 1.27
                                                                                • 1.28
                                                                                1.27.5 v0.13.0 2023.11.30"},{"location":"en/admin/kpanda/clusters/create-cluster.html","title":"Create Worker Clusters","text":"

                                                                                In AI platform Container Management, clusters can have four roles: global service cluster, management cluster, worker cluster, and integrated cluster. An integrated cluster can only be integrated from third-party vendors (see Integrate Cluster).

                                                                                This page explains how to create a Worker Cluster. By default, when creating a new Worker Cluster, the operating system type and CPU architecture of the worker nodes should be consistent with the Global Service Cluster. If you want to create a cluster with a different operating system or architecture than the Global Management Cluster, refer to Creating an Ubuntu Worker Cluster on a CentOS Management Platform for instructions.

                                                                                It is recommended to use the supported operating systems in AI platform to create the cluster. If your local nodes are not within the supported range, you can refer to Creating a Cluster on Non-Mainstream Operating Systems for instructions.

                                                                                "},{"location":"en/admin/kpanda/clusters/create-cluster.html#prerequisites","title":"Prerequisites","text":"

                                                                                Certain prerequisites must be met before creating a cluster:

                                                                                • Prepare enough nodes to be joined into the cluster.
                                                                                • It is recommended to use Kubernetes version 1.25.7. For the specific version range, refer to the AI platform Cluster Version Support System. Currently, the supported version range for created worker clusters is v1.26.0-v1.28. If you need to create a cluster with a lower version, refer to the Supporterd Cluster Versions.
                                                                                • The target host must allow IPv4 forwarding. If using IPv6 in Pods and Services, the target server needs to allow IPv6 forwarding.
                                                                                • AI platform does not provide firewall management. You need to pre-define the firewall rules of the target host by yourself. To avoid errors during cluster creation, it is recommended to disable the firewall of the target host.
                                                                                • See Node Availability Check.
                                                                                "},{"location":"en/admin/kpanda/clusters/create-cluster.html#steps","title":"Steps","text":"
                                                                                1. Enter the Container Management module, click Create Cluster on the upper right corner of the Clusters page.

                                                                                2. Fill in the basic information by referring to the following instructions.

                                                                                  • Cluster Name: only contain lowercase letters, numbers, and hyphens (\"-\"). Must start and end with a lowercase letter or number and totally up to 63 characters.
                                                                                  • Managed By: Choose a cluster to manage this new cluster through its lifecycle, such as creating, upgrading, node scaling, deleting the new cluster, etc.
                                                                                  • Runtime: Select the runtime environment of the cluster. Currently support containerd and docker (see How to Choose Container Runtime).
                                                                                  • Kubernetes Version: Allow span of three major versions, such as from 1.23-1.25, subject to the versions supported by the management cluster.

                                                                                3. Fill in the node configuration information and click Node Check .

                                                                                  • High Availability: When enabled, at least 3 controller nodes are required. When disabled, only 1 controller node is needed.

                                                                                    It is recommended to use High Availability mode in production environments.

                                                                                  • Credential Type: Choose whether to access nodes using username/password or public/private keys.

                                                                                    If using public/private key authentication, SSH keys for the nodes need to be configured in advance. Refer to Using SSH Key Authentication for Nodes.

                                                                                  • Same Password: When enabled, all nodes in the cluster will have the same access password. Enter the unified password for accessing all nodes in the field below. If disabled, you can set separate usernames and passwords for each node.

                                                                                  • Node Information: Set note names and IPs.
                                                                                  • NTP Time Synchronization: When enabled, time will be automatically synchronized across all nodes. Provide the NTP server address.

                                                                                4. If node check is passed, click Next . If the check failed, update Node Information and check again.

                                                                                5. Fill in the network configuration and click Next .

                                                                                  • CNI: Provide network services for Pods in the cluster. CNI cannot be changed after the cluster is created. Supports cilium and calico. Set none means not installing CNI when creating the cluster. You may install a CNI later.

                                                                                    For CNI configuration details, see Cilium Installation Parameters or Calico Installation Parameters.

                                                                                  • Container IP Range: Set an IP range for allocating IPs for containers in the cluster. IP range determines the max number of containers allowed in the cluster. Cannot be modified after creation.

                                                                                  • Service IP Range: Set an IP range for allocating IPs for container Services in the cluster. This range determines the max number of container Services that can be created in the cluster. Cannot be modified after creation.
                                                                                6. Fill in the plug-in configuration and click Next .

                                                                                7. Fill in advanced settings and click OK .

                                                                                  • kubelet_max_pods : Set the maximum number of Pods per node. The default is 110.
                                                                                  • hostname_override : Reset the hostname (not recommended).
                                                                                  • kubernetes_audit : Kubernetes audit log, enabled by default.
                                                                                  • auto_renew_certificate : Automatically renew the certificate of the control plane on the first Monday of each month, enabled by default.
                                                                                  • disable_firewalld&ufw : Disable the firewall to prevent the node from being inaccessible during installation.
                                                                                  • Insecure_registries : Set the address of you private container registry. If you use a private container registry, fill in its address can bypass certificate authentication of the container engine and obtain the image.
                                                                                  • yum_repos : Fill in the Yum source registry address.

                                                                                Success

                                                                                • After correctly filling in the above information, the page will prompt that the cluster is being created.
                                                                                • Creating a cluster takes a long time, so you need to wait patiently. You can click the Back to Clusters button to let it running backend.
                                                                                • To view the current status, click Real-time Log .

                                                                                Note

                                                                                • hen the cluster is in an unknown state, it means that the current cluster has been disconnected.
                                                                                • The data displayed by the system is the cached data before the disconnection, which does not represent real data.
                                                                                • Any operations performed in the disconnected state will not take effect. Please check the cluster network connectivity or Host Status.
                                                                                "},{"location":"en/admin/kpanda/clusters/delete-cluster.html","title":"Delete/Remove Clusters","text":"

                                                                                Clusters created in AI platform Container Management can be either deleted or removed. Clusters integrated into AI platform can only be removed.

                                                                                Info

                                                                                If you want to delete an integrated cluster, you should delete it in the platform where it is created.

                                                                                In AI platform, the difference between Delete and Remove is:

                                                                                • Delete will destroy the cluster and reset the data of all nodes under the cluster. All data will be totally cleared and lost. Making a backup before deleting a cluster is a recommended best practice. You can no longer use that cluster anymore.
                                                                                • Remove just removes the cluster from AI platform. It will not destroy the cluster and no data will be lost. You can still use the cluster in other platforms or re-integrate it into AI platform later if needed.

                                                                                Note

                                                                                • You should have Admin or Kpanda Owner permissions to perform delete or remove operations.
                                                                                • Before deleting a cluster, you should turn off Cluster Deletion Protection in Cluster Settings -> Advanced Settings , otherwise the Delete Cluster option will not be displayed.
                                                                                • The global service cluster cannot be deleted or removed.
                                                                                1. Enter the Container Management module, find your target cluster, click __ ...__ on the right, and select Delete cluster / Remove in the drop-down list.

                                                                                2. Enter the cluster name to confirm and click Delete .

                                                                                3. You will be auto directed to cluster lists. The status of this cluster will changed to Deleting . It may take a while to delete/remove a cluster.

                                                                                "},{"location":"en/admin/kpanda/clusters/integrate-cluster.html","title":"Integrate Clusters","text":"

                                                                                With the features of integrating clusters, AI platform allows you to manage on-premise and cloud clusters of various providers in a unified manner. This is quite important in avoiding the risk of being locked in by a certain providers, helping enterprises safely migrate their business to the cloud.

                                                                                In AI platform Container Management module, you can integrate a cluster of the following providers: standard Kubernetes clusters, Redhat Openshift, SUSE Rancher, VMware Tanzu, Amazon EKS, Aliyun ACK, Huawei CCE, Tencent TKE, etc.

                                                                                "},{"location":"en/admin/kpanda/clusters/integrate-cluster.html#prerequisites","title":"Prerequisites","text":"
                                                                                • Prepare a cluster of K8s v1.22+ and ensure its network connectivity.
                                                                                • The operator should have the NS Editor or higher permissions.
                                                                                "},{"location":"en/admin/kpanda/clusters/integrate-cluster.html#steps","title":"Steps","text":"
                                                                                1. Enter Container Management module, and click Integrate Cluster in the upper right corner.

                                                                                2. Fill in the basic information by referring to the following instructions.

                                                                                  • Cluster Name: It should be unique and cannot be changed after the integration. Maximum 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number.
                                                                                  • Cluster Alias: Enter any characters, no more than 60 characters.
                                                                                  • Release Distribution: the cluster provider, support mainstream vendors listed at the beginning.
                                                                                3. Fill in the KubeConfig of the target cluster and click Verify Config . The cluster can be successfully connected only after the verification is passed.

                                                                                  Click How do I get the KubeConfig? to see the specific steps for getting this file.

                                                                                4. Confirm that all parameters are filled in correctly and click OK in the lower right corner of the page.

                                                                                Note

                                                                                The status of the newly integrated cluster is Integrating , which will become Running after the integration succeeds.

                                                                                "},{"location":"en/admin/kpanda/clusters/integrate-rancher-cluster.html","title":"Integrate the Rancher Cluster","text":"

                                                                                This page explains how to integrate a Rancher cluster.

                                                                                "},{"location":"en/admin/kpanda/clusters/integrate-rancher-cluster.html#prerequisites","title":"Prerequisites","text":"
                                                                                • Prepare a Rancher cluster with administrator privileges and ensure network connectivity between the container management cluster and the target cluster.
                                                                                • Be equipped with permissions not lower than kpanda owner.
                                                                                "},{"location":"en/admin/kpanda/clusters/integrate-rancher-cluster.html#steps","title":"Steps","text":""},{"location":"en/admin/kpanda/clusters/integrate-rancher-cluster.html#step-1-create-a-serviceaccount-user-with-administrator-privileges-in-the-rancher-cluster","title":"Step 1: Create a ServiceAccount user with administrator privileges in the Rancher cluster","text":"
                                                                                1. Log in to the Rancher cluster with a role that has administrator privileges, and create a file named sa.yaml using the terminal.

                                                                                  vi sa.yaml\n

                                                                                  Press the i key to enter insert mode, then copy and paste the following content:

                                                                                  sa.yaml
                                                                                  apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: rancher-rke\n  rules:\n  - apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n  - nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: rancher-rke\n  roleRef:\n    apiGroup: rbac.authorization.k8s.io\n    kind: ClusterRole\n    name: rancher-rke\n  subjects:\n  - kind: ServiceAccount\n    name: rancher-rke\n    namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: rancher-rke\n  namespace: kube-system\n

                                                                                  Press the Esc key to exit insert mode, then type :wq to save and exit.

                                                                                2. Run the following command in the current directory to create a ServiceAccount named rancher-rke (referred to as SA for short):

                                                                                  kubectl apply -f sa.yaml\n

                                                                                  The expected output is as follows:

                                                                                  clusterrole.rbac.authorization.k8s.io/rancher-rke created\nclusterrolebinding.rbac.authorization.k8s.io/rancher-rke created\nserviceaccount/rancher-rke created\n
                                                                                3. Create a secret named rancher-rke-secret and bind the secret to the rancher-rke SA.

                                                                                  kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: rancher-rke-secret\n  namespace: kube-system\n  annotations:\n    kubernetes.io/service-account.name: rancher-rke\n  type: kubernetes.io/service-account-token\nEOF\n

                                                                                  The output is expected to be:

                                                                                  secret/rancher-rke-secret created\n

                                                                                  Note

                                                                                  If your cluster version is lower than 1.24, please ignore this step and proceed to the next one.

                                                                                4. Check secret for rancher-rke SA:

                                                                                  kubectl -n kube-system get secret | grep rancher-rke | awk '{print $1}'\n

                                                                                  The output is expected to be:

                                                                                  rancher-rke-secret\n

                                                                                  Check the rancher-rke-secret secret:

                                                                                  kubectl -n kube-system describe secret rancher-rke-secret\n

                                                                                  The output is expected to be:

                                                                                  Name:         rancher-rke-secret\nNamespace:    kube-system\nLabels:       <none>\nAnnotations:  kubernetes.io/service-account.name: rancher-rke\n            kubernetes.io/service-account.uid: d83df5d9-bd7d-488d-a046-b740618a0174\n\nType:  kubernetes.io/service-account-token\n\nData\n====\nca.crt:     570 bytes\nnamespace:  11 bytes\ntoken:      eyJhbGciOiJSUzI1NiIsImtpZCI6IjUtNE9nUWZLRzVpbEJORkZaNmtCQXhqVzRsZHU4MHhHcDBfb0VCaUo0V1kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJyYW5jaGVyLXJrZS1zZWNyZXQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoicmFuY2hlci1ya2UiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkODNkZjVkOS1iZDdkLTQ4OGQtYTA0Ni1iNzQwNjE4YTAxNzQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06cmFuY2hlci1ya2UifQ.VNsMtPEFOdDDeGt_8VHblcMRvjOwPXMM-79o9UooHx6q-VkHOcIOp3FOT2hnEdNnIsyODZVKCpEdCgyozX-3y5x2cZSZpocnkMcBbQm-qfTyUcUhAY7N5gcYUtHUhvRAsNWJcsDCn6d96gT_qo-ddo_cT8Ri39Lc123FDYOnYG-YGFKSgRQVy7Vyv34HIajZCCjZzy7i--eE_7o4DXeTjNqAFMFstUxxHBOXI3Rdn1zKQKqh5Jhg4ES7X-edSviSUfJUX-QV_LlAw5DuAyGPH7bDH4QaQ5k-p6cIctmpWZE-9wRDlKA4LYRblKE7MJcI6OmM4ldlMM0Jc8N-gCtl4w\n
                                                                                "},{"location":"en/admin/kpanda/clusters/integrate-rancher-cluster.html#step-2-update-kubeconfig-with-the-rancher-rke-sa-authentication-on-your-local-machine","title":"Step 2: Update kubeconfig with the rancher-rke SA authentication on your local machine","text":"

                                                                                Perform the following steps on any local node where kubelet is installed:

                                                                                1. Configure kubelet token.

                                                                                  kubectl config set-credentials rancher-rke --token= __rancher-rke-secret__ # token \u4fe1\u606f\n

                                                                                  For example,

                                                                                  kubectl config set-credentials eks-admin --token=eyJhbGciOiJSUzI1NiIsImtpZCI6IjUtNE9nUWZLRzVpbEJORkZaNmtCQXhqVzRsZHU4MHhHcDBfb0VCaUo0V1kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJyYW5jaGVyLXJrZS1zZWNyZXQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoicmFuY2hlci1ya2UiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkODNkZjVkOS1iZDdkLTQ4OGQtYTA0Ni1iNzQwNjE4YTAxNzQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06cmFuY2hlci1ya2UifQ.VNsMtPEFOdDDeGt_8VHblcMRvjOwPXMM-79o9UooHx6q-VkHOcIOp3FOT2hnEdNnIsyODZVKCpEdCgyozX-3y5x2cZSZpocnkMcBbQm-qfTyUcUhAY7N5gcYUtHUhvRAsNWJcsDCn6d96gT_qo-ddo_cT8Ri39Lc123FDYOnYG-YGFKSgRQVy7Vyv34HIajZCCjZzy7i--eE_7o4DXeTjNqAFMFstUxxHBOXI3Rdn1zKQKqh5Jhg4ES7X-edSviSUfJUX-QV_LlAw5DuAyGPH7bDH4QaQ5k-p6cIctmpWZE-9wRDlKA4LYRblKE7MJcI6OmM4ldlMM0Jc8N-gCtl4w\n
                                                                                2. Configure the kubelet APIServer information.

                                                                                  kubectl config set-cluster {cluster-name} --insecure-skip-tls-verify=true --server={APIServer}\n
                                                                                  • {cluster-name} : the name of your Rancher cluster.
                                                                                  • {APIServer} : the access address of the cluster, usually refering to the IP address of the control node + port \"6443\", such as https://10.X.X.X:6443 .

                                                                                  For example,

                                                                                  kubectl config set-cluster rancher-rke --insecure-skip-tls-verify=true --server=https://10.X.X.X:6443\n
                                                                                3. Configure the kubelet context.

                                                                                  kubectl config set-context {context-name} --cluster={cluster-name} --user={SA-usename}\n

                                                                                  For example,

                                                                                  kubectl config set-context rancher-rke-context --cluster=rancher-rke --user=rancher-rke\n
                                                                                4. Specify the newly created context rancher-rke-context in kubelet.

                                                                                  kubectl config use-context rancher-rke-context\n
                                                                                5. Fetch the kubeconfig information for the context rancher-rke-context .

                                                                                  kubectl config view --minify --flatten --raw\n

                                                                                  The output is expected to be:

                                                                                  ```yaml apiVersion: v1 clusters: - cluster: insecure-skip-tls-verify: true server: https://77C321BCF072682C70C8665ED4BFA10D.gr7.ap-southeast-1.eks.amazonaws.com name: joincluster contexts: - context: cluster: joincluster user: eks-admin name: ekscontext current-context: ekscontext kind: Config preferences: {} users: - name: eks-admin user: token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImcxTjJwNkktWm5IbmRJU1RFRExvdWY1TGFWVUtGQ3VIejFtNlFQcUNFalEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2V

                                                                                "},{"location":"en/admin/kpanda/clusters/integrate-rancher-cluster.html#step-3-connect-the-cluster-in-the-suanova-interface","title":"Step 3: Connect the cluster in the Suanova Interface","text":"

                                                                                Using the kubeconfig file fetched earlier, refer to the Integrate Cluster documentation to integrate the Rancher cluster to the global cluster.

                                                                                "},{"location":"en/admin/kpanda/clusters/runtime.html","title":"How to choose the container runtime","text":"

                                                                                The container runtime is an important component in kubernetes to manage the life cycle of containers and container images. Kubernetes made containerd the default container runtime in version 1.19, and removed support for the Dockershim component in version 1.24.

                                                                                Therefore, compared to the Docker runtime, we recommend you to use the lightweight containerd as your container runtime, because this has become the current mainstream runtime choice.

                                                                                In addition, some operating system distribution vendors are not friendly enough for Docker runtime compatibility. The runtime support of different operating systems is as follows:

                                                                                "},{"location":"en/admin/kpanda/clusters/runtime.html#operating-systems-and-supported-runtimes","title":"Operating systems and supported runtimes","text":"Operating System Supported containerd Versions Supported Docker Versions CentOS 1.5.5, 1.5.7, 1.5.8, 1.5.9, 1.5.10, 1.5.11, 1.5.12, 1.5.13, 1.6.0, 1.6.1, 1.6.2, 1.6.3, 1.6.4, 1.6.5, 1.6.6, 1.6.7, 1.6.8, 1.6.9, 1.6.10, 1.6.11, 1.6.12, 1.6.13, 1.6.14, 1.6.15 (default) 18.09, 19.03, 20.10 (default) RedHatOS 1.5.5, 1.5.7, 1.5.8, 1.5.9, 1.5.10, 1.5.11, 1.5.12, 1.5.13, 1.6.0, 1.6.1, 1.6.2, 1.6.3, 1.6.4, 1.6.5, 1.6.6, 1.6.7, 1.6.8, 1.6.9, 1.6.10, 1.6.11, 1.6.12, 1.6.13, 1.6.14, 1.6.15 (default) 18.09, 19.03, 20.10 (default) KylinOS 1.5.5, 1.5.7, 1.5.8, 1.5.9, 1.5.10, 1.5.11, 1.5.12, 1.5.13, 1.6.0, 1.6.1, 1.6.2, 1.6.3, 1.6.4, 1.6.5, 1.6.6, 1.6.7, 1.6.8, 1.6.9, 1.6.10, 1.6.11, 1.6.12, 1.6.13, 1.6.14, 1.6.15 (default) 19.03 (Only supported by ARM architecture, Docker is not supported as a runtime under x86 architecture)

                                                                                Note

                                                                                In the offline installation mode, you need to prepare the runtime offline package of the relevant operating system in advance.

                                                                                "},{"location":"en/admin/kpanda/clusters/upgrade-cluster.html","title":"Cluster Upgrade","text":"

                                                                                The Kubernetes Community packages a small version every quarter, and the maintenance cycle of each version is only about 9 months. Some major bugs or security holes will not be updated after the version stops maintenance. Manually upgrading cluster operations is cumbersome and places a huge workload on administrators.

                                                                                In Suanova, you can upgrade the Kubernetes cluster with one click through the web UI interface.

                                                                                Danger

                                                                                After the version is upgraded, it will not be possible to roll back to the previous version, please proceed with caution.

                                                                                Note

                                                                                • Kubernetes versions are denoted as x.y.z , where x is the major version, y is the minor version, and z is the patch version.
                                                                                • Cluster upgrades across minor versions are not allowed, e.g. a direct upgrade from 1.23 to 1.25 is not possible.
                                                                                • **Access clusters do not support version upgrades. If there is no \"cluster upgrade\" in the left navigation bar, please check whether the cluster is an access cluster. **
                                                                                • The global service cluster can only be upgraded through the terminal.
                                                                                • When upgrading a worker cluster, the Management Cluster of the worker cluster should have been connected to the container management module and be running normally.
                                                                                1. Click the name of the target cluster in the cluster list.

                                                                                2. Then click Cluster Operation and Maintenance -> Cluster Upgrade in the left navigation bar, and click Version Upgrade in the upper right corner of the page.

                                                                                3. Select the version that can be upgraded, and enter the cluster name to confirm.

                                                                                4. After clicking OK , you can see the upgrade progress of the cluster.

                                                                                5. The cluster upgrade is expected to take 30 minutes. You can click the Real-time Log button to view the detailed log of the cluster upgrade.

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/create-configmap.html","title":"Create ConfigMaps","text":"

                                                                                ConfigMaps store non-confidential data in the form of key-value pairs to achieve the effect of mutual decoupling of configuration data and application code. ConfigMaps can be used as environment variables for containers, command-line parameters, or configuration files in storage volumes.

                                                                                Note

                                                                                • The data saved in ConfigMaps cannot exceed 1 MiB. If you need to store larger volumes of data, it is recommended to mount a storage volume or use an independent database or file service.

                                                                                • ConfigMaps do not provide confidentiality or encryption. If you want to store encrypted data, it is recommended to use secret, or other third-party tools to ensure the privacy of data.

                                                                                You can create ConfigMaps with two methods:

                                                                                • Graphical form creation
                                                                                • YAML creation
                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/create-configmap.html#prerequisites","title":"Prerequisites","text":"
                                                                                • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                                                                • Created a namespace, user, and authorized the user as NS Editor. For details, refer to Namespace Authorization.

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/create-configmap.html#graphical-form-creation","title":"Graphical form creation","text":"
                                                                                1. Click the name of a cluster on the Clusters page to enter Cluster Details .

                                                                                2. In the left navigation bar, click ConfigMap and Secret -> ConfigMap , and click the Create ConfigMap button in the upper right corner.

                                                                                3. Fill in the configuration information on the Create ConfigMap page, and click OK .

                                                                                  !!! note

                                                                                   Click __Upload File__ to import an existing file locally to quickly create ConfigMaps.\n
                                                                                4. After the creation is complete, click More on the right side of the ConfigMap to edit YAML, update, export, delete and other operations.

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/create-configmap.html#yaml-creation","title":"YAML creation","text":"
                                                                                1. Click the name of a cluster on the Clusters page to enter Cluster Details .

                                                                                2. In the left navigation bar, click ConfigMap and Secret -> ConfigMap , and click the YAML Create button in the upper right corner.

                                                                                3. Fill in or paste the configuration file prepared in advance, and then click OK in the lower right corner of the pop-up box.

                                                                                  !!! note

                                                                                   - Click __Import__ to import an existing file locally to quickly create ConfigMaps.\n - After filling in the data, click __Download__ to save the configuration file locally.\n
                                                                                4. After the creation is complete, click More on the right side of the ConfigMap to edit YAML, update, export, delete and other operations.

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/create-configmap.html#configmap-yaml-example","title":"ConfigMap YAML example","text":"
                                                                                 ```yaml\n kind: ConfigMap\n apiVersion: v1\n metadata:\n   name: kube-root-ca.crt\n   namespace: default\n   annotations:\n data:\n   version: '1.0'\n ```\n

                                                                                Next step: Use ConfigMaps

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/create-secret.html","title":"Create Secret","text":"

                                                                                A secret is a resource object used to store and manage sensitive information such as passwords, OAuth tokens, SSH, TLS credentials, etc. Using keys means you don't need to include sensitive secrets in your application code.

                                                                                Secrets can be used in some cases:

                                                                                • Used as an environment variable of the container to provide some necessary information required during the running of the container.
                                                                                • Use secrets as pod data volumes.
                                                                                • As the identity authentication credential for the container registry when the kubelet pulls the container image.

                                                                                You can create ConfigMaps with two methods:

                                                                                • Graphical form creation
                                                                                • YAML creation
                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/create-secret.html#prerequisites","title":"Prerequisites","text":"
                                                                                • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster

                                                                                • Created a namespace, user, and authorized the user as NS Editor. For details, refer to Namespace Authorization.

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/create-secret.html#create-secret-with-wizard","title":"Create secret with wizard","text":"
                                                                                1. Click the name of a cluster on the Clusters page to enter Cluster Details .

                                                                                2. In the left navigation bar, click ConfigMap and Secret -> Secret , and click the Create Secret button in the upper right corner.

                                                                                3. Fill in the configuration information on the Create Secret page, and click OK .

                                                                                  Note when filling in the configuration:

                                                                                  • The name of the key must be unique within the same namespace
                                                                                  • Key type:
                                                                                    • Default (Opaque): Kubernetes default key type, which supports arbitrary data defined by users.
                                                                                    • TLS (kubernetes.io/tls): credentials for TLS client or server data access.
                                                                                    • Container registry information (kubernetes.io/dockerconfigjson): Credentials for Container registry access.
                                                                                    • username and password (kubernetes.io/basic-auth): Credentials for basic authentication.
                                                                                    • Custom: the type customized by the user according to business needs.
                                                                                  • Key data: the data stored in the key, the parameters that need to be filled in are different for different data
                                                                                    • When the key type is default (Opaque)/custom: multiple key-value pairs can be filled in.
                                                                                    • When the key type is TLS (kubernetes.io/tls): you need to fill in the certificate certificate and private key data. Certificates are self-signed or CA-signed credentials used for authentication. A certificate request is a request for a signature and needs to be signed with a private key.
                                                                                    • When the key type is container registry information (kubernetes.io/dockerconfigjson): you need to fill in the account and password of the private container registry.
                                                                                    • When the key type is username and password (kubernetes.io/basic-auth): Username and password need to be specified.
                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/create-secret.html#yaml-creation","title":"YAML creation","text":"
                                                                                1. Click the name of a cluster on the Clusters page to enter Cluster Details .

                                                                                2. In the left navigation bar, click ConfigMap and Secret -> Secret , and click the YAML Create button in the upper right corner.

                                                                                3. Fill in the YAML configuration on the Create with YAML page, and click OK .

                                                                                  Supports importing YAML files from local or downloading and saving filled files to local.

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/create-secret.html#key-yaml-example","title":"key YAML example","text":"
                                                                                 ```yaml\n apiVersion: v1\n kind: Secret\n metadata:\n   name: secretdemo\n type: Opaque\n data:\n   username: ****\n   password: ****\n ```\n

                                                                                Next step: use secret

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/use-configmap.html","title":"Use ConfigMaps","text":"

                                                                                ConfigMap (ConfigMap) is an API object of Kubernetes, which is used to save non-confidential data into key-value pairs, and can store configurations that other objects need to use. When used, the container can use it as an environment variable, a command-line argument, or a configuration file in a storage volume. By using ConfigMaps, configuration data and application code can be separated, providing a more flexible way to modify application configuration.

                                                                                Note

                                                                                ConfigMaps do not provide confidentiality or encryption. If the data to be stored is confidential, please use secret, or use other third-party tools to ensure the privacy of the data instead of ConfigMaps. In addition, when using ConfigMaps in containers, the container and ConfigMaps must be in the same cluster namespace.

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/use-configmap.html#scenes-to-be-used","title":"scenes to be used","text":"

                                                                                You can use ConfigMaps in Pods. There are many use cases, mainly including:

                                                                                • Use ConfigMaps to set the environment variables of the container

                                                                                • Use ConfigMaps to set the command line parameters of the container

                                                                                • Use ConfigMaps as container data volumes

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/use-configmap.html#set-the-environment-variables-of-the-container","title":"Set the environment variables of the container","text":"

                                                                                You can use the ConfigMap as the environment variable of the container through the graphical interface or the terminal command line.

                                                                                Note

                                                                                The ConfigMap import is to use the ConfigMap as the value of the environment variable; the ConfigMap key value import is to use a certain parameter in the ConfigMap as the value of the environment variable.

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/use-configmap.html#graphical-interface-operation","title":"Graphical interface operation","text":"

                                                                                When creating a workload through an image, you can set environment variables for the container by selecting Import ConfigMaps or Import ConfigMap Key Values on the Environment Variables interface.

                                                                                1. Go to the Image Creation Workload page, in the Container Configuration step, select the Environment Variables configuration, and click the Add Environment Variable button.

                                                                                2. Select ConfigMap Import or ConfigMap Key Value Import in the environment variable type.

                                                                                  • When the environment variable type is selected as ConfigMap import , enter variable name , prefix name, ConfigMap name in sequence.

                                                                                  • When the environment variable type is selected as ConfigMap key-value import , enter variable name , ConfigMap name, and Secret name in sequence.

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/use-configmap.html#command-line-operation","title":"Command line operation","text":"

                                                                                You can set ConfigMaps as environment variables when creating a workload, using the valueFrom parameter to refer to the Key/Value in the ConfigMap.

                                                                                apiVersion: v1\nkind: Pod\nmetadata:\n   name: configmap-pod-1\nspec:\n   containers:\n     - name: test-container\n       image: busybox\n       command: [ \"/bin/sh\", \"-c\", \"env\" ]\n       env:\n         - name: SPECIAL_LEVEL_KEY\n           valueFrom: # (1)\n             configMapKeyRef:\n               name: kpanda-configmap # (2)\n               key: SPECIAL_LEVEL # (3)\n   restartPolicy: Never\n
                                                                                1. Use valueFrom to specify the value of the env reference ConfigMap
                                                                                2. Referenced configuration file name
                                                                                3. Referenced ConfigMap key
                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/use-configmap.html#set-the-command-line-parameters-of-the-container","title":"Set the command line parameters of the container","text":"

                                                                                You can use ConfigMaps to set the command or parameter value in the container, and use the environment variable substitution syntax $(VAR_NAME) to do so. As follows.

                                                                                apiVersion: v1\nkind: Pod\nmetadata:\n   name: configmap-pod-3\nspec:\n   containers:\n     - name: test-container\n       image: busybox\n       command: [ \"/bin/sh\", \"-c\", \"echo $(SPECIAL_LEVEL_KEY) $(SPECIAL_TYPE_KEY)\" ]\n       env:\n         - name: SPECIAL_LEVEL_KEY\n           valueFrom:\n             configMapKeyRef:\n               name: kpanda-configmap\n               key: SPECIAL_LEVEL\n         - name: SPECIAL_TYPE_KEY\n           valueFrom:\n             configMapKeyRef:\n               name: kpanda-configmap\n               key: SPECIAL_TYPE\n   restartPolicy: Never\n

                                                                                After the Pod runs, the output is as follows.

                                                                                Hello Kpanda\n
                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/use-configmap.html#used-as-container-data-volume","title":"Used as container data volume","text":"

                                                                                You can use the ConfigMap as the environment variable of the container through the graphical interface or the terminal command line.

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/use-configmap.html#graphical-operation","title":"Graphical operation","text":"

                                                                                When creating a workload through an image, you can use the ConfigMap as the data volume of the container by selecting the storage type as \"ConfigMap\" on the \"Data Storage\" interface.

                                                                                1. Go to the Image Creation Workload page, in the Container Configuration step, select the Data Storage configuration, and click __Add in the __ Node Path Mapping __ list __ button.

                                                                                2. Select ConfigMap in the storage type, and enter container path , subpath and other information in sequence.

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/use-configmap.html#command-line-operation_1","title":"Command line operation","text":"

                                                                                To use a ConfigMap in a Pod's storage volume.

                                                                                Here is an example Pod that mounts a ConfigMap as a volume:

                                                                                apiVersion: v1\nkind: Pod\nmetadata:\n   name: mypod\nspec:\n   containers:\n   -name: mypod\n     image: redis\n     volumeMounts:\n     - name: foo\n       mountPath: \"/etc/foo\"\n       readOnly: true\n   volumes:\n   - name: foo\n     configMap:\n       name: myconfigmap\n

                                                                                If there are multiple containers in a Pod, each container needs its own volumeMounts block, but you only need to set one spec.volumes block per ConfigMap.

                                                                                Note

                                                                                When a ConfigMap is used as a data volume mounted on a container, the ConfigMap can only be read as a read-only file.

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html","title":"use key","text":"

                                                                                A secret is a resource object used to store and manage sensitive information such as passwords, OAuth tokens, SSH, TLS credentials, etc. Using keys means you don't need to include sensitive secrets in your application code.

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html#scenes-to-be-used","title":"scenes to be used","text":"

                                                                                You can use keys in Pods in a variety of use cases, mainly including:

                                                                                • Used as an environment variable of the container to provide some necessary information required during the running of the container.
                                                                                • Use secrets as pod data volumes.
                                                                                • Used as the identity authentication credential for the container registry when the kubelet pulls the container image.
                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html#use-the-key-to-set-the-environment-variable-of-the-container","title":"Use the key to set the environment variable of the container","text":"

                                                                                You can use the key as the environment variable of the container through the GUI or the terminal command line.

                                                                                Note

                                                                                Key import is to use the key as the value of an environment variable; key key value import is to use a parameter in the key as the value of an environment variable.

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html#graphical-interface-operation","title":"Graphical interface operation","text":"

                                                                                When creating a workload from an image, you can set environment variables for the container by selecting Key Import or Key Key Value Import on the Environment Variables interface.

                                                                                1. Go to the Image Creation Workload page.

                                                                                2. Select the Environment Variables configuration in Container Configuration , and click the Add Environment Variable button.

                                                                                3. Select Key Import or Key Key Value Import in the environment variable type.

                                                                                  • When the environment variable type is selected as Key Import , enter Variable Name , Prefix , and Secret in sequence.

                                                                                  • When the environment variable type is selected as key key value import , enter variable name , Secret , Secret name in sequence.

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html#command-line-operation","title":"Command line operation","text":"

                                                                                As shown in the example below, you can set the secret as an environment variable when creating the workload, using the valueFrom parameter to refer to the Key/Value in the Secret.

                                                                                apiVersion: v1\nkind: Pod\nmetadata:\n   name: secret-env-pod\nspec:\n   containers:\n   -name: mycontainer\n     image: redis\n     env:\n       - name: SECRET_USERNAME\n         valueFrom:\n           secretKeyRef:\n             name: mysecret\n             key: username\n             optional: false # (1)\n       - name: SECRET_PASSWORD\n         valueFrom:\n           secretKeyRef:\n             name: mysecret\n             key: password\n             optional: false # (2)\n
                                                                                1. This value is the default; means \"mysecret\", which must exist and contain a primary key named \"username\"
                                                                                2. This value is the default; means \"mysecret\", which must exist and contain a primary key named \"password\"
                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html#use-the-key-as-the-pods-data-volume","title":"Use the key as the pod's data volume","text":""},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html#graphical-interface-operation_1","title":"Graphical interface operation","text":"

                                                                                When creating a workload through an image, you can use the key as the data volume of the container by selecting the storage type as \"key\" on the \"data storage\" interface.

                                                                                1. Go to the Image Creation Workload page.

                                                                                2. In the Container Configuration , select the Data Storage configuration, and click the Add button in the Node Path Mapping list.

                                                                                3. Select Secret in the storage type, and enter container path , subpath and other information in sequence.

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html#command-line-operation_1","title":"Command line operation","text":"

                                                                                The following is an example of a Pod that mounts a Secret named mysecret via a data volume:

                                                                                apiVersion: v1\nkind: Pod\nmetadata:\n   name: mypod\nspec:\n   containers:\n   -name: mypod\n     image: redis\n     volumeMounts:\n     - name: foo\n       mountPath: \"/etc/foo\"\n       readOnly: true\n   volumes:\n   - name: foo\n     secret:\n       secretName: mysecret\n       optional: false # (1)\n
                                                                                1. Default setting, means \"mysecret\" must already exist

                                                                                If the Pod contains multiple containers, each container needs its own volumeMounts block, but only one .spec.volumes setting is required for each Secret.

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html#used-as-the-identity-authentication-credential-for-the-container-registry-when-the-kubelet-pulls-the-container-image","title":"Used as the identity authentication credential for the container registry when the kubelet pulls the container image","text":"

                                                                                You can use the key as the identity authentication credential for the Container registry through the GUI or the terminal command line.

                                                                                "},{"location":"en/admin/kpanda/configmaps-secrets/use-secret.html#graphical-operation","title":"Graphical operation","text":"

                                                                                When creating a workload through an image, you can use the key as the data volume of the container by selecting the storage type as \"key\" on the \"data storage\" interface.

                                                                                1. Go to the Image Creation Workload page.

                                                                                2. In the second step of Container Configuration , select the Basic Information configuration, and click the Select Image button.

                                                                                3. Select the name of the private container registry in the drop-down list of `container registry' in the pop-up box. Please see Create Secret for details on private image secret creation.

                                                                                4. Enter the image name in the private registry, click OK to complete the image selection.

                                                                                Note

                                                                                When creating a key, you need to ensure that you enter the correct container registry address, username, password, and select the correct mirror name, otherwise you will not be able to obtain the mirror image in the container registry.

                                                                                "},{"location":"en/admin/kpanda/custom-resources/create.html","title":"CustomResourceDefinition (CRD)","text":"

                                                                                In Kubernetes, all objects are abstracted as resources, such as Pod, Deployment, Service, Volume, etc. are the default resources provided by Kubernetes. This provides important support for our daily operation and maintenance and management work, but in some special cases, the existing preset resources cannot meet the needs of the business. Therefore, we hope to expand the capabilities of the Kubernetes API, and CustomResourceDefinition (CRD) was born based on this requirement.

                                                                                The container management module supports interface-based management of custom resources, and its main features are as follows:

                                                                                • Obtain the list and detailed information of custom resources under the cluster
                                                                                • Create custom resources based on YAML
                                                                                • Create a custom resource example CR (Custom Resource) based on YAML
                                                                                • Delete custom resources
                                                                                "},{"location":"en/admin/kpanda/custom-resources/create.html#prerequisites","title":"Prerequisites","text":"
                                                                                • Integrated the Kubernetes cluster or created Kubernetes, and you can access the cluster UI interface.

                                                                                • Created a namespace, user, and authorized the user as Cluster Admin For details, refer to Namespace Authorization.

                                                                                "},{"location":"en/admin/kpanda/custom-resources/create.html#create-crd-via-yaml","title":"Create CRD via YAML","text":"
                                                                                1. Click a cluster name to enter Cluster Details .

                                                                                2. In the left navigation bar, click Custom Resource , and click the YAML Create button in the upper right corner.

                                                                                3. On the Create with YAML page, fill in the YAML statement and click OK .

                                                                                4. Return to the custom resource list page, and you can view the custom resource named crontabs.stable.example.com just created.

                                                                                Custom resource example:

                                                                                CRD example
                                                                                apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  name: crontabs.stable.example.com\nspec:\n  group: stable.example.com\n  versions:\n    - name: v1\n      served: true\n      storage: true\n      schema:\n        openAPIV3Schema:\n          type: object\n          properties:\n            spec:\n              type: object\n              properties:\n                cronSpec:\n                  type: string\n                image:\n                  type: string\n                replicas:\n                  type: integer\n  scope: Namespaced\n  names:\n    plural: crontabs\n    singular: crontab\n    kind: CronTab\n    shortNames:\n    - ct\n
                                                                                "},{"location":"en/admin/kpanda/custom-resources/create.html#create-a-custom-resource-example-via-yaml","title":"Create a custom resource example via YAML","text":"
                                                                                1. Click a cluster name to enter Cluster Details .

                                                                                2. In the left navigation bar, click Custom Resource , and click the YAML Create button in the upper right corner.

                                                                                3. Click the custom resource named crontabs.stable.example.com , enter the details, and click the YAML Create button in the upper right corner.

                                                                                4. On the Create with YAML page, fill in the YAML statement and click OK .

                                                                                5. Return to the details page of crontabs.stable.example.com , and you can view the custom resource named my-new-cron-object just created.

                                                                                CR Example:

                                                                                CR example
                                                                                apiVersion: \"stable.example.com/v1\"\nkind: CronTab\nmetadata:\n  name: my-new-cron-object\nspec:\n  cronSpec: \"* * * * */5\"\n  image: my-awesome-cron-image\n
                                                                                "},{"location":"en/admin/kpanda/gpu/index.html","title":"Overview of GPU Management","text":"

                                                                                This article introduces the capability of Suanova container management platform in unified operations and management of heterogeneous resources, with a focus on GPUs.

                                                                                "},{"location":"en/admin/kpanda/gpu/index.html#background","title":"Background","text":"

                                                                                With the rapid development of emerging technologies such as AI applications, large-scale models, artificial intelligence, and autonomous driving, enterprises are facing an increasing demand for compute-intensive tasks and data processing. Traditional compute architectures represented by CPUs can no longer meet the growing computational requirements of enterprises. At this point, heterogeneous computing represented by GPUs has been widely applied due to its unique advantages in processing large-scale data, performing complex calculations, and real-time graphics rendering.

                                                                                Meanwhile, due to the lack of experience and professional solutions in scheduling and managing heterogeneous resources, the utilization efficiency of GPU devices is extremely low, resulting in high AI production costs for enterprises. The challenge of reducing costs, increasing efficiency, and improving the utilization of GPUs and other heterogeneous resources has become a pressing issue for many enterprises.

                                                                                "},{"location":"en/admin/kpanda/gpu/index.html#introduction-to-gpu-capabilities","title":"Introduction to GPU Capabilities","text":"

                                                                                The Suanova container management platform supports unified scheduling and operations management of GPUs, NPUs, and other heterogeneous resources, fully unleashing the computational power of GPU resources, and accelerating the development of enterprise AI and other emerging applications. The GPU management capabilities of Suanova are as follows:

                                                                                • Support for unified management of heterogeneous computing resources from domestic and foreign manufacturers such as NVIDIA, Huawei Ascend, and Iluvatar.
                                                                                • Support for multi-card heterogeneous scheduling within the same cluster, with automatic recognition of GPUs in the cluster.
                                                                                • Support for native management solutions for NVIDIA GPUs, vGPUs, and MIG, with cloud native capabilities.
                                                                                • Support for partitioning a single physical card for use by different tenants, and allocate GPU resources to tenants and containers based on computing power and memory quotas.
                                                                                • Support for multi-dimensional GPU resource monitoring at the cluster, node, and application levels, assisting operators in managing GPU resources.
                                                                                • Compatibility with various training frameworks such as TensorFlow and PyTorch.
                                                                                "},{"location":"en/admin/kpanda/gpu/index.html#introduction-to-gpu-operator","title":"Introduction to GPU Operator","text":"

                                                                                Similar to regular computer hardware, NVIDIA GPUs, as physical devices, need to have the NVIDIA GPU driver installed in order to be used. To reduce the cost of using GPUs on Kubernetes, NVIDIA provides the NVIDIA GPU Operator component to manage various components required for using NVIDIA GPUs. These components include the NVIDIA driver (for enabling CUDA), NVIDIA container runtime, GPU node labeling, DCGM-based monitoring, and more. In theory, users only need to plug the GPU into a compute device managed by Kubernetes, and they can use all the capabilities of NVIDIA GPUs through the GPU Operator. For more information about NVIDIA GPU Operator, refer to the NVIDIA official documentation. For deployment instructions, refer to Offline Installation of GPU Operator.

                                                                                Architecture diagram of NVIDIA GPU Operator:

                                                                                "},{"location":"en/admin/kpanda/gpu/FAQ.html","title":"GPU FAQs","text":""},{"location":"en/admin/kpanda/gpu/FAQ.html#gpu-processes-are-not-visible-while-running-nvidia-smi-inside-a-pod","title":"GPU processes are not visible while running nvidia-smi inside a pod","text":"

                                                                                Q: When running the nvidia-smi command inside a GPU-utilizing pod, no GPU process information is visible in the full-card mode and vGPU mode.

                                                                                A: Due to PID namespace isolation, GPU processes are not visible inside the Pod. To view GPU processes, you can use one of the following methods:

                                                                                • Configure the workload using the GPU with hostPID: true to enable viewing PIDs on the host.
                                                                                • Run the nvidia-smi command in the driver pod of the gpu-operator to view processes.
                                                                                • Run the chroot /run/nvidia/driver nvidia-smi command on the host to view processes.
                                                                                "},{"location":"en/admin/kpanda/gpu/Iluvatar_usage.html","title":"How to Use Iluvatar GPU in Applications","text":"

                                                                                This section describes how to use Iluvatar virtual GPU on AI platform.

                                                                                "},{"location":"en/admin/kpanda/gpu/Iluvatar_usage.html#prerequisites","title":"Prerequisites","text":"
                                                                                • Deployed AI platform container management platform and it is running smoothly.
                                                                                • The container management module has been integrated with a Kubernetes cluster or a Kubernetes cluster has been created, and the UI interface of the cluster can be accessed.
                                                                                • The Iluvatar GPU driver has been installed on the current cluster. Refer to the Iluvatar official documentation for driver installation instructions, or contact the Suanova ecosystem team for enterprise-level support at peg-pem@daocloud.io.
                                                                                • The GPUs in the current cluster have not undergone any virtualization operations and not been occupied by other applications.
                                                                                "},{"location":"en/admin/kpanda/gpu/Iluvatar_usage.html#procedure","title":"Procedure","text":""},{"location":"en/admin/kpanda/gpu/Iluvatar_usage.html#configuration-via-user-interface","title":"Configuration via User Interface","text":"
                                                                                1. Check if the GPU in the cluster has been detected. Click Clusters -> Cluster Settings -> Addon Plugins , and check if the proper GPU type has been automatically enabled and detected. Currently, the cluster will automatically enable GPU and set the GPU type as Iluvatar .

                                                                                2. Deploy a workload. Click Clusters -> Workloads and deploy a workload using the image. After selecting the type as (Iluvatar) , configure the GPU resources used by the application:

                                                                                  • Physical Card Count (iluvatar.ai/vcuda-core): Indicates the number of physical cards that the current pod needs to mount. The input value must be an integer and less than or equal to the number of cards on the host machine.

                                                                                  • Memory Usage (iluvatar.ai/vcuda-memory): Indicates the amount of GPU memory occupied by each card. The value is in MB, with a minimum value of 1 and a maximum value equal to the entire memory of the card.

                                                                                  If there are any issues with the configuration values, scheduling failures or resource allocation failures may occur.

                                                                                "},{"location":"en/admin/kpanda/gpu/Iluvatar_usage.html#configuration-via-yaml","title":"Configuration via YAML","text":"

                                                                                To request GPU resources for a workload, add the iluvatar.ai/vcuda-core: 1 and iluvatar.ai/vcuda-memory: 200 to the requests and limits. These parameters configure the application to use the physical card resources.

                                                                                apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-iluvatar-gpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-iluvatar-gpu-demo\n  template:\n    metadata:\n      labels:\n        app: full-iluvatar-gpu-demo\n    spec:\n      containers:\n      - image: nginx:perl\n        name: container-0\n        resources:\n          limits:\n            cpu: 250m\n            iluvatar.ai/vcuda-core: '1'\n            iluvatar.ai/vcuda-memory: '200'\n            memory: 512Mi\n          requests:\n            cpu: 250m\n            memory: 512Mi\n      imagePullSecrets:\n      - name: default-secret\n
                                                                                "},{"location":"en/admin/kpanda/gpu/dynamic-regulation.html","title":"GPU Scheduling Configuration (Binpack and Spread)","text":"

                                                                                This page introduces how to reduce GPU resource fragmentation and prevent single points of failure through Binpack and Spread when using NVIDIA vGPU, achieving advanced scheduling for vGPU. The AI platform platform provides Binpack and Spread scheduling policies across two dimensions: clusters and workloads, meeting different usage requirements in various scenarios.

                                                                                "},{"location":"en/admin/kpanda/gpu/dynamic-regulation.html#prerequisites","title":"Prerequisites","text":"
                                                                                • GPU devices are correctly installed on the cluster nodes.
                                                                                • The gpu-operator component and Nvidia-vgpu component are correctly installed in the cluster.
                                                                                • The NVIDIA-vGPU type exists in the GPU mode in the node list in the cluster.
                                                                                "},{"location":"en/admin/kpanda/gpu/dynamic-regulation.html#use-cases","title":"Use Cases","text":"
                                                                                • Scheduling policy based on GPU dimension

                                                                                  • Binpack: Prioritizes using the same GPU on a node, suitable for increasing GPU utilization and reducing resource fragmentation.
                                                                                  • Spread: Multiple Pods are distributed across different GPUs on nodes, suitable for high availability scenarios to avoid single card failures.
                                                                                • Scheduling policy based on node dimension

                                                                                  • Binpack: Multiple Pods prioritize using the same node, suitable for increasing GPU utilization and reducing resource fragmentation.
                                                                                  • Spread: Multiple Pods are distributed across different nodes, suitable for high availability scenarios to avoid single node failures.
                                                                                "},{"location":"en/admin/kpanda/gpu/dynamic-regulation.html#use-binpack-and-spread-at-cluster-level","title":"Use Binpack and Spread at Cluster-Level","text":"

                                                                                Note

                                                                                By default, workloads will follow the cluster-level Binpack and Spread. If a workload sets its own Binpack and Spread scheduling policies that differ from the cluster, the workload will prioritize its own scheduling policy.

                                                                                1. On the Clusters page, select the cluster for which you want to adjust the Binpack and Spread scheduling policies. Click the \u2507 icon on the right and select GPU Scheduling Configuration from the dropdown list.

                                                                                2. Adjust the GPU scheduling configuration according to your business scenario, and click OK to save.

                                                                                "},{"location":"en/admin/kpanda/gpu/dynamic-regulation.html#use-binpack-and-spread-at-workload-level","title":"Use Binpack and Spread at Workload-Level","text":"

                                                                                Note

                                                                                When the Binpack and Spread scheduling policies at the workload level conflict with the cluster-level configuration, the workload-level configuration takes precedence.

                                                                                Follow the steps below to create a deployment using an image and configure Binpack and Spread scheduling policies within the workload.

                                                                                1. Click Clusters in the left navigation bar, then click the name of the target cluster to enter the Cluster Details page.

                                                                                2. On the Cluster Details page, click Workloads -> Deployments in the left navigation bar, then click the Create by Image button in the upper right corner of the page.

                                                                                3. Sequentially fill in the Basic Information, Container Settings, and in the Container Configuration section, enable GPU configuration, selecting the GPU type as NVIDIA vGPU. Click Advanced Settings, enable the Binpack / Spread scheduling policy, and adjust the GPU scheduling configuration according to the business scenario. After configuration, click Next to proceed to Service Settings and Advanced Settings. Finally, click OK at the bottom right of the page to complete the creation.

                                                                                "},{"location":"en/admin/kpanda/gpu/gpu_matrix.html","title":"GPU Support Matrix","text":"

                                                                                This page explains the matrix of supported GPUs and operating systems for AI platform.

                                                                                "},{"location":"en/admin/kpanda/gpu/gpu_matrix.html#nvidia-gpu","title":"NVIDIA GPU","text":"GPU Manufacturer and Type Supported GPU Models Compatible Operating System (Online) Recommended Kernel Recommended Operating System and Kernel Installation Documentation NVIDIA GPU (Full Card/vGPU)
                                                                                • NVIDIA Fermi (2.1) Architecture:
                                                                                • NVIDIA GeForce 400 Series
                                                                                • NVIDIA Quadro 4000 Series
                                                                                • NVIDIA Tesla 20 Series
                                                                                • NVIDIA Ampere Architecture Series (A100; A800; H100)
                                                                                CentOS 7
                                                                                • Kernel 3.10.0-123 ~ 3.10.0-1160
                                                                                • Kernel Reference Document
                                                                                • Recommended Operating System with Proper Kernel Version
                                                                                Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 Offline Installation with GPU Operator CentOS 8 Kernel 4.18.0-80 ~ 4.18.0-348 Ubuntu 20.04 Kernel 5.4 Ubuntu 22.04 Kernel 5.19 RHEL 7 Kernel 3.10.0-123 ~ 3.10.0-1160 RHEL 8 Kernel 4.18.0-80 ~ 4.18.0-348 NVIDIA MIG
                                                                                • Ampere Architecture Series:
                                                                                • A100
                                                                                • A800
                                                                                • H100
                                                                                CentOS 7 Kernel 3.10.0-123 ~ 3.10.0-1160 Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 Offline Installation with GPU Operator CentOS 8 Kernel 4.18.0-80 ~ 4.18.0-348 Ubuntu 20.04 Kernel 5.4 Ubuntu 22.04 Kernel 5.19 RHEL 7 Kernel 3.10.0-123 ~ 3.10.0-1160 RHEL 8 Kernel 4.18.0-80 ~ 4.18.0-348"},{"location":"en/admin/kpanda/gpu/gpu_matrix.html#ascend-npu","title":"Ascend NPU","text":"GPU Manufacturer and Type Supported NPU Models Compatible Operating System (Online) Recommended Kernel Recommended Operating System and Kernel Installation Documentation Ascend (Ascend 310)
                                                                                • Ascend 310;
                                                                                • Ascend 310P;
                                                                                Ubuntu 20.04 Details refer to: Kernel Version Requirements Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 300 and 310P Driver Documentation CentOS 7.6 CentOS 8.2 KylinV10SP1 Operating System openEuler Operating System Ascend (Ascend 910P) Ascend 910 Ubuntu 20.04 Details refer to: Kernel Version Requirements Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 910 Driver Documentation CentOS 7.6 CentOS 8.2 KylinV10SP1 Operating System openEuler Operating System"},{"location":"en/admin/kpanda/gpu/gpu_matrix.html#iluvatar-gpu","title":"Iluvatar GPU","text":"GPU Manufacturer and Type Supported GPU Models Compatible Operating System (Online) Recommended Kernel Recommended Operating System and Kernel Installation Documentation Iluvatar (Iluvatar vGPU)
                                                                                • BI100;
                                                                                • MR100;
                                                                                CentOS 7
                                                                                • Kernel 3.10.0-957.el7.x86_64 ~ 3.10.0-1160.42.2.el7.x86_64
                                                                                Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 Coming Soon CentOS 8
                                                                                • Kernel 4.18.0-80.el8.x86_64 ~ 4.18.0-305.19.1.el8_4.x86_64
                                                                                Ubuntu 20.04
                                                                                • Kernel 4.15.0-20-generic ~ 4.15.0-160-generic
                                                                                • Kernel 5.4.0-26-generic ~ 5.4.0-89-generic
                                                                                • Kernel 5.8.0-23-generic ~ 5.8.0-63-generic
                                                                                Ubuntu 21.04
                                                                                • Kernel 4.15.0-20-generic ~ 4.15.0-160-generic
                                                                                • Kernel 5.4.0-26-generic ~ 5.4.0-89-generic
                                                                                • Kernel 5.8.0-23-generic ~ 5.8.0-63-generic
                                                                                openEuler 22.03 LTS
                                                                                • Kernel version >= 5.1 and <= 5.10
                                                                                "},{"location":"en/admin/kpanda/gpu/gpu_scheduler_config.html","title":"GPU Scheduling Configuration","text":"

                                                                                This document mainly introduces the configuration of GPU scheduling, which can implement advanced scheduling policies. Currently, the primary implementation is the vgpu scheduling policy.

                                                                                "},{"location":"en/admin/kpanda/gpu/gpu_scheduler_config.html#vgpu-resource-scheduling-configuration","title":"vGPU Resource Scheduling Configuration","text":"

                                                                                vGPU provides two policies for resource usage: binpack and spread. These correspond to node-level and GPU-level dimensions, respectively. The use case is whether you want to distribute workloads more sparsely across different nodes and GPUs or concentrate them on the same node and GPU, thereby making resource utilization more efficient and reducing resource fragmentation.

                                                                                You can modify the scheduling policy in your cluster by following these steps:

                                                                                1. Go to the cluster management list in the container management interface.
                                                                                2. Click the settings button ... next to the cluster.
                                                                                3. Click GPU Scheduling Configuration.
                                                                                4. Toggle the scheduling policy between node-level and GPU-level. By default, the node-level policy is binpack, and the GPU-level policy is spread.

                                                                                The above steps modify the cluster-level scheduling policy. Users can also specify their own scheduling policy at the workload level to change the scheduling results. Below is an example of modifying the scheduling policy at the workload level:

                                                                                apiVersion: v1\nkind: Pod\nmetadata:\n  name: gpu-pod\n  annotations:\n    hami.io/node-scheduler-policy: \"binpack\"\n    hami.io/gpu-scheduler-policy: \"binpack\"\nspec:\n  containers:\n    - name: ubuntu-container\n      image: ubuntu:18.04\n      command: [\"bash\", \"-c\", \"sleep 86400\"]\n      resources:\n        limits:\n          nvidia.com/gpu: 1\n          nvidia.com/gpumem: 3000\n          nvidia.com/gpucores: 30\n

                                                                                In this example, both the node- and GPU-level scheduling policies are set to binpack. This ensures that the workload is scheduled to maximize resource utilization and reduce fragmentation.

                                                                                "},{"location":"en/admin/kpanda/gpu/vgpu_quota.html","title":"GPU Quota Management","text":"

                                                                                This section describes how to use vGPU capabilities on the AI platform platform.

                                                                                "},{"location":"en/admin/kpanda/gpu/vgpu_quota.html#prerequisites","title":"Prerequisites","text":"

                                                                                The proper GPU driver (NVIDIA GPU, NVIDIA MIG, Iluvatar, Ascend) has been deployed on the current cluster either through an Operator or manually.

                                                                                "},{"location":"en/admin/kpanda/gpu/vgpu_quota.html#procedure","title":"Procedure","text":"

                                                                                Follow these steps to manage GPU quotas in AI platform:

                                                                                1. Go to Namespaces and click Quota Management to configure the GPU resources that can be used by a specific namespace.

                                                                                2. The currently supported card types for quota management in a namespace are: NVIDIA vGPU, NVIDIA MIG, Iluvatar, and Ascend.

                                                                                3. NVIDIA vGPU Quota Management: Configure the specific quota that can be used. This will create a ResourcesQuota CR.

                                                                                  - Physical Card Count (nvidia.com/vgpu): Indicates the number of physical cards that the current pod needs to mount. The input value must be an integer and **less than or equal to** the number of cards on the host machine.\n- GPU Core Count (nvidia.com/gpucores): Indicates the GPU compute power occupied by each card. The value ranges from 0 to 100. If configured as 0, it is considered not to enforce isolation. If configured as 100, it is considered to exclusively occupy the entire card.\n- GPU Memory Usage (nvidia.com/gpumem): Indicates the amount of GPU memory occupied by each card. The value is in MB, with a minimum value of 1 and a maximum value equal to the entire memory of the card.\n
                                                                                "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html","title":"Installation of Ascend NPU Components","text":"

                                                                                This chapter provides installation guidance for Ascend NPU drivers, Device Plugin, NPU-Exporter, and other components.

                                                                                "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html#prerequisites","title":"Prerequisites","text":"
                                                                                1. Before installation, confirm the supported NPU models. For details, refer to the Ascend NPU Matrix.
                                                                                2. Ensure that the kernel version required for the proper NPU model is compatible. For more details, refer to the Ascend NPU Matrix.
                                                                                3. Prepare the basic Kubernetes environment.
                                                                                "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html#installation-steps","title":"Installation Steps","text":"

                                                                                Before using NPU resources, you need to complete the firmware installation, NPU driver installation, Docker Runtime installation, user creation, log directory creation, and NPU Device Plugin installation. Refer to the following steps for details.

                                                                                "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html#install-firmware","title":"Install Firmware","text":"
                                                                                1. Confirm that the kernel version is within the range proper to the \"binary installation\" method, and then you can directly install the NPU driver firmware.
                                                                                2. For firmware and driver downloads, refer to: Firmware Download Link
                                                                                3. For firmware installation, refer to: Install NPU Driver Firmware
                                                                                "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html#install-npu-driver","title":"Install NPU Driver","text":"
                                                                                1. If the driver is not installed, refer to the official Ascend documentation for installation. For example, for Ascend910, refer to: 910 Driver Installation Document.
                                                                                2. Run the command npu-smi info, and if the NPU information is returned normally, it indicates that the NPU driver and firmware are ready.
                                                                                "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html#install-docker-runtime","title":"Install Docker Runtime","text":"
                                                                                1. Download Ascend Docker Runtime

                                                                                  Community edition download link: https://www.hiascend.com/zh/software/mindx-dl/community

                                                                                  wget -c https://mindx.obs.cn-south-1.myhuaweicloud.com/OpenSource/MindX/MindX%205.0.RC2/MindX%20DL%205.0.RC2/Ascend-docker-runtime_5.0.RC2_linux-x86_64.run\n

                                                                                  Install to the specified path by executing the following two commands in order, with parameters specifying the installation path:

                                                                                  chmod u+x Ascend-docker-runtime_5.0.RC2_linux-x86_64.run \n./Ascend-docker-runtime_{version}_linux-{arch}.run --install --install-path=<path>\n
                                                                                2. Modify the containerd configuration file

                                                                                  If containerd has no default configuration file, execute the following three commands in order to create the configuration file:

                                                                                  mkdir /etc/containerd \ncontainerd config default > /etc/containerd/config.toml \nvim /etc/containerd/config.toml\n

                                                                                  If containerd has a configuration file:

                                                                                  vim /etc/containerd/config.toml\n

                                                                                  Modify the runtime installation path according to the actual situation, mainly modifying the runtime field:

                                                                                  ... \n[plugins.\"io.containerd.monitor.v1.cgroups\"]\n   no_prometheus = false  \n[plugins.\"io.containerd.runtime.v1.linux\"]\n   shim = \"containerd-shim\"\n   runtime = \"/usr/local/Ascend/Ascend-Docker-Runtime/ascend-docker-runtime\"\n   runtime_root = \"\"\n   no_shim = false\n   shim_debug = false\n [plugins.\"io.containerd.runtime.v2.task\"]\n   platforms = [\"linux/amd64\"]\n...\n

                                                                                  Execute the following command to restart containerd:

                                                                                  systemctl restart containerd\n
                                                                                "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html#create-a-user","title":"Create a User","text":"

                                                                                Execute the following commands on the node where the components are installed to create a user.

                                                                                # Ubuntu operating system\nuseradd -d /home/hwMindX -u 9000 -m -s /usr/sbin/nologin hwMindX\nusermod -a -G HwHiAiUser hwMindX\n# CentOS operating system\nuseradd -d /home/hwMindX -u 9000 -m -s /sbin/nologin hwMindX\nusermod -a -G HwHiAiUser hwMindX\n
                                                                                "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html#create-log-directory","title":"Create Log Directory","text":"

                                                                                Create the parent directory for component logs and the log directories for each component on the proper node, and set the appropriate owner and permissions for the directories. Execute the following command to create the parent directory for component logs.

                                                                                mkdir -m 755 /var/log/mindx-dl\nchown root:root /var/log/mindx-dl\n

                                                                                Execute the following command to create the Device Plugin component log directory.

                                                                                mkdir -m 750 /var/log/mindx-dl/devicePlugin\nchown root:root /var/log/mindx-dl/devicePlugin\n

                                                                                Note

                                                                                Please create the proper log directory for each required component. In this example, only the Device Plugin component is needed. For other component requirements, refer to the official documentation

                                                                                "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html#create-node-labels","title":"Create Node Labels","text":"

                                                                                Refer to the following commands to create labels on the proper nodes:

                                                                                # Create this label on computing nodes where the driver is installed\nkubectl label node {nodename} huawei.com.ascend/Driver=installed\nkubectl label node {nodename} node-role.kubernetes.io/worker=worker\nkubectl label node {nodename} workerselector=dls-worker-node\nkubectl label node {nodename} host-arch=huawei-arm // or host-arch=huawei-x86, select according to the actual situation\nkubectl label node {nodename} accelerator=huawei-Ascend910 // select according to the actual situation\n# Create this label on control nodes\nkubectl label node {nodename} masterselector=dls-master-node\n
                                                                                "},{"location":"en/admin/kpanda/gpu/ascend/ascend_driver_install.html#install-device-plugin-and-npuexporter","title":"Install Device Plugin and NpuExporter","text":"

                                                                                Functional module path: Container Management -> Cluster, click the name of the target cluster, then click Helm Apps -> Helm Charts from the left navigation bar, and search for ascend-mindxdl.

                                                                                • DevicePlugin: Provides a general device plugin mechanism and standard device API interface for Kubernetes to use devices. It is recommended to use the default image and version.
                                                                                • NpuExporter: Based on the Prometheus/Telegraf ecosystem, this component provides interfaces to help users monitor the Ascend series AI processors and container-level allocation status. It is recommended to use the default image and version.
                                                                                • ServiceMonitor: Disabled by default. If enabled, you can view NPU-related monitoring in the observability module. To enable, ensure that the insight-agent is installed and running, otherwise, the ascend-mindxdl installation will fail.
                                                                                • isVirtualMachine: Disabled by default. If the NPU node is a virtual machine scenario, enable the isVirtualMachine parameter.

                                                                                After a successful installation, two components will appear under the proper namespace, as shown below:

                                                                                At the same time, the proper NPU information will also appear on the node information:

                                                                                Once everything is ready, you can select the proper NPU device when creating a workload through the page, as shown below:

                                                                                Note

                                                                                For detailed information of how to use, refer to Using Ascend (Ascend) NPU.

                                                                                "},{"location":"en/admin/kpanda/gpu/ascend/vnpu.html","title":"Enable Ascend Virtualization","text":"

                                                                                Ascend virtualization is divided into dynamic virtualization and static virtualization. This document describes how to enable and use Ascend static virtualization capabilities.

                                                                                "},{"location":"en/admin/kpanda/gpu/ascend/vnpu.html#prerequisites","title":"Prerequisites","text":"
                                                                                • Setup of Kubernetes cluster environment.
                                                                                • The current NPU node has the Ascend driver installed.
                                                                                • The current NPU node has the Ascend-Docker-Runtime component installed.
                                                                                • The NPU MindX DL suite is installed on the current cluster.
                                                                                • Supported NPU models:

                                                                                  • Ascend 310P, verified
                                                                                  • Ascend 910b (20 cores), verified
                                                                                  • Ascend 910 (32 cores), officially supported but not verified
                                                                                  • Ascend 910 (30 cores), officially supported but not verified

                                                                                  For more details, refer to the official virtualization hardware documentation.

                                                                                Refer to the Ascend NPU Component Installation Documentation for the basic environment setup.

                                                                                "},{"location":"en/admin/kpanda/gpu/ascend/vnpu.html#enable-virtualization-capabilities","title":"Enable Virtualization Capabilities","text":"

                                                                                To enable virtualization capabilities, you need to manually modify the startup parameters of the ascend-device-plugin-daemonset component. Refer to the following command:

                                                                                - device-plugin -useAscendDocker=true -volcanoType=false -presetVirtualDevice=true\n- logFile=/var/log/mindx-dl/devicePlugin/devicePlugin.log -logLevel=0\n
                                                                                "},{"location":"en/admin/kpanda/gpu/ascend/vnpu.html#split-vnpu-instances","title":"Split VNPU Instances","text":"

                                                                                Static virtualization requires manually splitting VNPU instances. Refer to the following command:

                                                                                npu-smi set -t create-vnpu -i 13 -c 0 -f vir02\n
                                                                                • i refers to the card id.
                                                                                • c refers to the chip id.
                                                                                • vir02 refers to the split specification template.

                                                                                Card id and chip id can be queried using npu-smi info. The split specifications can be found in the Ascend official templates.

                                                                                After splitting the instance, you can query the split results using the following command:

                                                                                npu-smi info -t info-vnpu -i 13 -c 0\n

                                                                                The query result is as follows:

                                                                                "},{"location":"en/admin/kpanda/gpu/ascend/vnpu.html#restart-ascend-device-plugin-daemonset","title":"Restart ascend-device-plugin-daemonset","text":"

                                                                                After splitting the instance, manually restart the device-plugin pod, then use the kubectl describe command to check the resources of the registered node:

                                                                                kubectl describe node {{nodename}}\n

                                                                                "},{"location":"en/admin/kpanda/gpu/ascend/vnpu.html#how-to-use-the-device","title":"How to Use the Device","text":"

                                                                                When creating an application, specify the resource key as shown in the following YAML:

                                                                                ......\nresources:\n  requests:\n    huawei.com/Ascend310P-2c: 1\n  limits:\n    huawei.com/Ascend310P-2c: 1\n......\n
                                                                                "},{"location":"en/admin/kpanda/gpu/metax/usemetax.html","title":"MetaX GPU Component Installation and Usage","text":"

                                                                                This chapter provides installation guidance for MetaX's gpu-extensions, gpu-operator, and other components, as well as usage methods for both the full GPU and vGPU modes.

                                                                                "},{"location":"en/admin/kpanda/gpu/metax/usemetax.html#prerequisites","title":"Prerequisites","text":"
                                                                                1. The required tar package has been downloaded and installed from the MetaX Software Center. This article uses metax-gpu-k8s-package.0.7.10.tar.gz as an example.
                                                                                2. Prepare the basic Kubernetes environment.
                                                                                "},{"location":"en/admin/kpanda/gpu/metax/usemetax.html#component-introduction","title":"Component Introduction","text":"

                                                                                Metax provides two helm-chart packages: metax-extensions and gpu-operator. Depending on the usage scenario, different components can be selected for installation.

                                                                                1. Metax-extensions: Includes two components, gpu-device and gpu-label. When using the Metax-extensions solution, the user's application container image needs to be built based on the MXMACA\u00ae base image. Moreover, Metax-extensions is only suitable for scenarios using the full GPU.
                                                                                2. gpu-operator: Includes components such as gpu-device, gpu-label, driver-manager, container-runtime, and operator-controller. When using the gpu-operator solution, users can choose to create application container images that do not include the MXMACA\u00ae SDK. The gpu-operator is suitable for both full GPU and vGPU scenarios.
                                                                                "},{"location":"en/admin/kpanda/gpu/metax/usemetax.html#operation-steps","title":"Operation Steps","text":"
                                                                                1. Extract the following from the /home/metax/metax-docs/k8s/metax-gpu-k8s-package.0.7.10.tar.gz file:

                                                                                  • deploy-gpu-extensions.yaml # Deployment YAML
                                                                                  • metax-gpu-extensions-0.7.10.tgz, metax-operator-0.7.10.tgz # Helm chart files
                                                                                  • metax-k8s-images.0.7.10.run # Offline image
                                                                                2. Check if the system has the driver installed:

                                                                                  $ lsmod | grep metax \nmetax 1605632 0 \nttm 86016 3 drm_vram_helper,metax,drm_ttm_helper \ndrm 618496 7 drm_kms_helper,drm_vram_helper,ast,metax,drm_ttm_helper,ttm\n
                                                                                  • If no content is displayed, it indicates that the software package has not been installed. If content is displayed, it indicates that the software package has been installed.
                                                                                  • When using metax-operator, it is not recommended to pre-install the MXMACA kernel driver on worker nodes; if it has already been installed, there is no need to uninstall it.
                                                                                3. Install the driver.

                                                                                "},{"location":"en/admin/kpanda/gpu/metax/usemetax.html#gpu-extensions","title":"gpu-extensions","text":"
                                                                                1. Push the image:

                                                                                  tar -xf metax-gpu-k8s-package.0.7.10.tar.gz\n./metax-k8s-images.0.7.10.run push {registry}/metax\n
                                                                                2. Push the Helm Chart:

                                                                                  helm plugin install https://github.com/chartmuseum/helm-push\nhelm repo add --username rootuser --password rootpass123  metax http://172.16.16.5:8081\nhelm cm-push metax-operator-0.7.10.tgz metax\nhelm cm-push metax-gpu-extensions-0.7.10.tgz metax\n
                                                                                3. Install metax-gpu-extensions on the AI computing platform.

                                                                                  After successful deployment, resources can be viewed on the node.

                                                                                4. After successful modification, you can see the label with Metax GPU on the node.

                                                                                "},{"location":"en/admin/kpanda/gpu/metax/usemetax.html#gpu-operator","title":"gpu-operator","text":"

                                                                                Known issues when installing gpu-operator:

                                                                                1. The images for the components metax-operator, gpu-label, gpu-device, and container-runtime must have the amd64 suffix.

                                                                                2. The image for the metax-maca component is not included in the metax-k8s-images.0.7.13.run package and needs to be separately downloaded, such as maca-mxc500-2.23.0.23-ubuntu20.04-x86_64.tar.xz. After loading it, the image for the metax-maca component needs to be modified again.

                                                                                3. The image for the metax-driver component needs to be downloaded from https://pub-docstore.metax-tech.com:7001 as the k8s-driver-image.2.23.0.25.run file, and then execute the command k8s-driver-image.2.23.0.25.run push {registry}/metax to push the image to the image repository. After pushing, modify the image address for the metax-driver component.

                                                                                "},{"location":"en/admin/kpanda/gpu/metax/usemetax.html#using-gpu","title":"Using GPU","text":"

                                                                                After installation, you can use MetaX GPU in workloads. Note that after enabling the GPU, you need to select the GPU type as Metax GPU.

                                                                                Enter the container and execute mx-smi to view the GPU usage.

                                                                                "},{"location":"en/admin/kpanda/gpu/mlu/use-mlu.html","title":"Using Cambricon GPU","text":"

                                                                                This article introduces how to use Cambricon GPU in the Suanova AI computing platform.

                                                                                "},{"location":"en/admin/kpanda/gpu/mlu/use-mlu.html#prerequisites","title":"Prerequisites","text":"
                                                                                • The Suanova AI computing platform's container management platform has been deployed and is running normally.
                                                                                • The container management module has either integrated with a Kubernetes cluster or created a Kubernetes cluster, and is able to access the cluster's UI interface.
                                                                                • The current cluster has installed the Cambricon firmware, drivers, and DevicePlugin components. For installation details, please refer to the official documentation:
                                                                                  • Driver Firmware Installation
                                                                                  • DevicePlugin Installation

                                                                                When installing DevicePlugin, please disable the --enable-device-type parameter; otherwise, the Suanova AI computing platform will not be able to correctly recognize the Cambricon GPU.

                                                                                "},{"location":"en/admin/kpanda/gpu/mlu/use-mlu.html#introduction-to-cambricon-gpu-modes","title":"Introduction to Cambricon GPU Modes","text":"

                                                                                Cambricon GPUs have the following modes:

                                                                                • Full Card Mode: Register the Cambricon GPU as a whole card for use in the cluster.
                                                                                • Share Mode: Allows one Cambricon GPU to be shared among multiple Pods, with the number of shareable containers set by the virtualization-num parameter.
                                                                                • Dynamic SMLU Mode: Further refines resource allocation, allowing control over the size of memory and computing power allocated to containers.
                                                                                • MIM Mode: Allows the Cambricon GPU to be divided into multiple GPUs of fixed specifications for use.
                                                                                "},{"location":"en/admin/kpanda/gpu/mlu/use-mlu.html#using-cambricon-in-suanova-ai-computing-platform","title":"Using Cambricon in Suanova AI Computing Platform","text":"

                                                                                Here, we take the Dynamic SMLU mode as an example:

                                                                                1. After correctly installing the DevicePlugin and other components, click the proper Cluster -> Cluster Maintenance -> Cluster Settings -> Addon Plugins to check whether the proper GPU type has been automatically enabled and detected.

                                                                                2. Click the node management page to check if the nodes have correctly recognized the proper GPU type.

                                                                                3. Deploy workloads. Click the proper Cluster -> Workloads, and deploy workloads using images. After selecting the type (MLU VGPU), you need to configure the GPU resources used by the App:

                                                                                  • GPU Computing Power (cambricon.com/mlu.smlu.vcore): Indicates the percentage of cores the current Pod needs to use.
                                                                                  • GPU Memory (cambricon.com/mlu.smlu.vmemory): Indicates the size of memory the current Pod needs to use, in MB.
                                                                                "},{"location":"en/admin/kpanda/gpu/mlu/use-mlu.html#using-yaml-configuration","title":"Using YAML Configuration","text":"

                                                                                Refer to the following YAML file:

                                                                                apiVersion: v1  \nkind: Pod  \nmetadata:  \n  name: pod1  \nspec:  \n  restartPolicy: OnFailure  \n  containers:  \n    - image: ubuntu:16.04  \n      name: pod1-ctr  \n      command: [\"sleep\"]  \n      args: [\"100000\"]  \n      resources:  \n        limits:  \n          cambricon.com/mlu: \"1\" # use this when device type is not enabled, else delete this line.  \n          #cambricon.com/mlu: \"1\" #uncomment to use when device type is enabled  \n          #cambricon.com/mlu.share: \"1\" #uncomment to use device with env-share mode  \n          #cambricon.com/mlu.mim-2m.8gb: \"1\" #uncomment to use device with mim mode  \n          #cambricon.com/mlu.smlu.vcore: \"100\" #uncomment to use device with mim mode  \n          #cambricon.com/mlu.smlu.vmemory: \"1024\" #uncomment to use device with mim mode\n
                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/index.html","title":"NVIDIA GPU Usage Modes","text":"

                                                                                NVIDIA, as a well-known graphics computing provider, offers various software and hardware solutions to enhance computational power. Among them, NVIDIA provides the following three solutions for GPU usage:

                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/index.html#full-gpu","title":"Full GPU","text":"

                                                                                Full GPU refers to allocating the entire NVIDIA GPU to a single user or application. In this configuration, the application can fully occupy all the resources of the GPU and achieve maximum computational performance. Full GPU is suitable for workloads that require a large amount of computational resources and memory, such as deep learning training, scientific computing, etc.

                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/index.html#vgpu-virtual-gpu","title":"vGPU (Virtual GPU)","text":"

                                                                                vGPU is a virtualization technology that allows one physical GPU to be partitioned into multiple virtual GPUs, with each virtual GPU assigned to different virtual machines or users. vGPU enables multiple users to share the same physical GPU and independently use GPU resources in their respective virtual environments. Each virtual GPU can access a certain amount of compute power and memory capacity. vGPU is suitable for virtualized environments and cloud computing scenarios, providing higher resource utilization and flexibility.

                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/index.html#mig-multi-instance-gpu","title":"MIG (Multi-Instance GPU)","text":"

                                                                                MIG is a feature introduced by the NVIDIA Ampere architecture that allows one physical GPU to be divided into multiple physical GPU instances, each of which can be independently allocated to different users or workloads. Each MIG instance has its own compute resources, memory, and PCIe bandwidth, just like an independent virtual GPU. MIG provides finer-grained GPU resource allocation and management and allows dynamic adjustment of the number and size of instances based on demand. MIG is suitable for multi-tenant environments, containerized applications, batch jobs, and other scenarios.

                                                                                Whether using vGPU in a virtualized environment or MIG on a physical GPU, NVIDIA provides users with more choices and optimized ways to utilize GPU resources. The Suanova container management platform fully supports the above NVIDIA capabilities. Users can easily access the full computational power of NVIDIA GPUs through simple UI operations, thereby improving resource utilization and reducing costs.

                                                                                • Single Mode: The node only exposes a single type of MIG device on all its GPUs. All GPUs on the node must:
                                                                                  • Be of the same model (e.g., A100-SXM-40GB), with matching MIG profiles only for GPUs of the same model.
                                                                                  • Have MIG configuration enabled, which requires a machine reboot to take effect.
                                                                                  • Create identical GI and CI for exposing \"identical\" MIG devices across all products.
                                                                                • Mixed Mode: The node exposes mixed MIG device types on all its GPUs. Requesting a specific MIG device type requires the number of compute slices and total memory provided by the device type.
                                                                                  • All GPUs on the node must: Be in the same product line (e.g., A100-SXM-40GB).
                                                                                  • Each GPU can enable or disable MIG individually and freely configure any available mixture of MIG device types.
                                                                                  • The k8s-device-plugin running on the node will:
                                                                                    • Expose any GPUs not in MIG mode using the traditional nvidia.com/gpu resource type.
                                                                                    • Expose individual MIG devices using resource types that follow the pattern nvidia.com/mig-<slice_count>g.<memory_size>gb .

                                                                                For detailed instructions on enabling these configurations, refer to Offline Installation of GPU Operator.

                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/index.html#how-to-use","title":"How to Use","text":"

                                                                                You can refer to the following links to quickly start using Suanova's management capabilities for NVIDIA GPUs.

                                                                                • Using Full NVIDIA GPU
                                                                                • Using NVIDIA vGPU
                                                                                • Using NVIDIA MIG
                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/full_gpu_userguide.html","title":"Using the Whole NVIDIA GPU for an Application","text":"

                                                                                This section describes how to allocate the entire NVIDIA GPU to a single application on the AI platform platform.

                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/full_gpu_userguide.html#prerequisites","title":"Prerequisites","text":"
                                                                                • AI platform container management platform has been deployed and is running properly.
                                                                                • The container management module has been connected to a Kubernetes cluster or a Kubernetes cluster has been created, and you can access the UI interface of the cluster.
                                                                                • GPU Operator has been offline installed and NVIDIA DevicePlugin has been enabled on the current cluster. Refer to Offline Installation of GPU Operator for instructions.
                                                                                • The GPU in the current cluster has not undergone any virtualization operations or been occupied by other applications.
                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/full_gpu_userguide.html#procedure","title":"Procedure","text":""},{"location":"en/admin/kpanda/gpu/nvidia/full_gpu_userguide.html#configuring-via-the-user-interface","title":"Configuring via the User Interface","text":"
                                                                                1. Check if the cluster has detected the GPUs. Click Clusters -> Cluster Settings -> Addon Plugins to see if it has automatically enabled and detected the proper GPU types. Currently, the cluster will automatically enable GPU and set the GPU Type as Nvidia GPU .

                                                                                2. Deploy a workload. Click Clusters -> Workloads , and deploy the workload using the image method. After selecting the type ( Nvidia GPU ), configure the number of physical cards used by the application:

                                                                                  Physical Card Count (nvidia.com/gpu): Indicates the number of physical cards that the current pod needs to mount. The input value must be an integer and less than or equal to the number of cards on the host machine.

                                                                                  If the above value is configured incorrectly, scheduling failures and resource allocation issues may occur.

                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/full_gpu_userguide.html#configuring-via-yaml","title":"Configuring via YAML","text":"

                                                                                To request GPU resources for a workload, add the nvidia.com/gpu: 1 parameter to the resource request and limit configuration in the YAML file. This parameter configures the number of physical cards used by the application.

                                                                                apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-gpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-gpu-demo\n  template:\n    metadata:\n      labels:\n        app: full-gpu-demo\n    spec:\n      containers:\n      - image: chrstnhntschl/gpu_burn\n        name: container-0\n        resources:\n          requests:\n            cpu: 250m\n            memory: 512Mi\n            nvidia.com/gpu: 1   # Number of GPUs requested\n          limits:\n            cpu: 250m\n            memory: 512Mi\n            nvidia.com/gpu: 1   # Upper limit of GPU usage\n      imagePullSecrets:\n      - name: default-secret\n

                                                                                Note

                                                                                When using the nvidia.com/gpu parameter to specify the number of GPUs, the values for requests and limits must be consistent.

                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html","title":"Offline Install gpu-operator","text":"

                                                                                AI platform comes with pre-installed driver images for the following three operating systems: Ubuntu 22.04, Ubuntu 20.04, and CentOS 7.9. The driver version is 535.104.12. Additionally, it includes the required Toolkit images for each operating system, so users no longer need to manually provide offline toolkit images.

                                                                                This page demonstrates using AMD architecture with CentOS 7.9 (3.10.0-1160). If you need to deploy on Red Hat 8.4, refer to Uploading Red Hat gpu-operator Offline Image to the Bootstrap Node Repository and Building Offline Yum Source for Red Hat 8.4.

                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#prerequisites","title":"Prerequisites","text":"
                                                                                • The kernel version of the cluster nodes where the gpu-operator is to be deployed must be completely consistent. The distribution and GPU model of the nodes must fall within the scope specified in the GPU Support Matrix.
                                                                                • When installing the gpu-operator, select v23.9.0+2 or above.
                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#steps","title":"Steps","text":"

                                                                                To install the gpu-operator plugin for your cluster, follow these steps:

                                                                                1. Log in to the platform and go to Container Management -> Clusters , check cluster eetails.

                                                                                2. On the Helm Charts page, select All Repositories and search for gpu-operator .

                                                                                3. Select gpu-operator and click Install .

                                                                                4. Configure the installation parameters for gpu-operator based on the instructions below to complete the installation.

                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#configure-parameters","title":"Configure parameters","text":"
                                                                                • systemOS : Select the operating system for the host. The current options are Ubuntu 22.04, Ubuntu 20.04, Centos 7.9, and other. Please choose the correct operating system.
                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#basic-information","title":"Basic information","text":"
                                                                                • Name : Enter the plugin name
                                                                                • Namespace : Select the namespace for installing the plugin
                                                                                • Version: The version of the plugin. Here, we use version v23.9.0+2 as an example.
                                                                                • Failure Deletion: If the installation fails, it will delete the already installed associated resources. When enabled, Ready Wait will also be enabled by default.
                                                                                • Ready Wait: When enabled, the application will be marked as successfully installed only when all associated resources are in a ready state.
                                                                                • Detailed Logs: When enabled, detailed logs of the installation process will be recorded.
                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#advanced-settings","title":"Advanced settings","text":""},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#operator-parameters","title":"Operator parameters","text":"
                                                                                • InitContainer.image : Configure the CUDA image, recommended default image: nvidia/cuda
                                                                                • InitContainer.repository : Repository where the CUDA image is located, defaults to nvcr.m.daocloud.io repository
                                                                                • InitContainer.version : Version of the CUDA image, please use the default parameter
                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#driver-parameters","title":"Driver parameters","text":"
                                                                                • Driver.enable : Configure whether to deploy the NVIDIA driver on the node, default is enabled. If you have already deployed the NVIDIA driver on the node before using the gpu-operator, please disable this.
                                                                                • Driver.image : Configure the GPU driver image, recommended default image: nvidia/driver .
                                                                                • Driver.repository : Repository where the GPU driver image is located, default is nvidia's nvcr.io repository.
                                                                                • Driver.usePrecompiled : Enable the precompiled mode to install the driver.
                                                                                • Driver.version : Version of the GPU driver image, use default parameters for offline deployment. Configuration is only required for online installation. Different versions of the Driver image exist for different types of operating systems. For more details, refer to Nvidia GPU Driver Versions. Examples of Driver Version for different operating systems are as follows:

                                                                                  Note

                                                                                  When using the built-in operating system version, there is no need to modify the image version. For other operating system versions, please refer to Uploading Images to the Bootstrap Node Repository. note that there is no need to include the operating system name such as Ubuntu, CentOS, or Red Hat in the version number. If the official image contains an operating system suffix, please manually remove it.

                                                                                  • For Red Hat systems, for example, 525.105.17
                                                                                  • For Ubuntu systems, for example, 535-5.15.0-1043-nvidia
                                                                                  • For CentOS systems, for example, 525.147.05
                                                                                • Driver.RepoConfig.ConfigMapName : Used to record the name of the offline yum repository configuration file for the gpu-operator. When using the pre-packaged offline bundle, refer to the following documents for different types of operating systems.

                                                                                  • Building CentOS 7.9 Offline Yum Repository
                                                                                  • Building Red Hat 8.4 Offline Yum Repository
                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#toolkit-parameters","title":"Toolkit parameters","text":"

                                                                                Toolkit.enable : Enabled by default. This component allows containerd/docker to support running containers that require GPUs.

                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#mig-parameters","title":"MIG parameters","text":"

                                                                                For detailed configuration methods, refer to Enabling MIG Functionality.

                                                                                MigManager.Config.name : The name of the MIG split configuration file, used to define the MIG (GI, CI) split policy. The default is default-mig-parted-config . For custom parameters, refer to Enabling MIG Functionality.

                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#next-steps","title":"Next Steps","text":"

                                                                                After completing the configuration and creation of the above parameters:

                                                                                • If using full-card mode , GPU resources can be used when creating applications.

                                                                                • If using vGPU mode , after completing the above configuration and creation, proceed to vGPU Addon Installation.

                                                                                • If using MIG mode and you need to use a specific split specification for individual GPU nodes, otherwise, split according to the default value in MigManager.Config.

                                                                                  • For single mode, add label to nodes as follows:

                                                                                    kubectl label nodes {node} nvidia.com/mig.config=\"all-1g.10gb\" --overwrite\n
                                                                                  • For mixed mode, add label to nodes as follows:

                                                                                    kubectl label nodes {node} nvidia.com/mig.config=\"custom-config\" --overwrite\n

                                                                                  After spliting, applications can use MIG GPU resources.

                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/push_image_to_repo.html","title":"Uploading Red Hat GPU Operator Offline Image to Bootstrap Repository","text":"

                                                                                This guide explains how to upload an offline image to the bootstrap repository using the nvcr.io/nvidia/driver:525.105.17-rhel8.4 offline driver image for Red Hat 8.4 as an example.

                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/push_image_to_repo.html#prerequisites","title":"Prerequisites","text":"
                                                                                1. The bootstrap node and its components are running properly.
                                                                                2. Prepare a node that has internet access and can access the bootstrap node. Docker should also be installed on this node. You can refer to Installing Docker for installation instructions.
                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/push_image_to_repo.html#procedure","title":"Procedure","text":""},{"location":"en/admin/kpanda/gpu/nvidia/push_image_to_repo.html#step-1-obtain-the-offline-image-on-an-internet-connected-node","title":"Step 1: Obtain the Offline Image on an Internet-Connected Node","text":"

                                                                                Perform the following steps on the internet-connected node:

                                                                                1. Pull the nvcr.io/nvidia/driver:525.105.17-rhel8.4 offline driver image:

                                                                                  docker pull nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                                                                2. Once the image is pulled, save it as a compressed archive named nvidia-driver.tar :

                                                                                  docker save nvcr.io/nvidia/driver:525.105.17-rhel8.4 > nvidia-driver.tar\n
                                                                                3. Copy the compressed image archive nvidia-driver.tar to the bootstrap node:

                                                                                  scp nvidia-driver.tar user@ip:/root\n

                                                                                  For example:

                                                                                  scp nvidia-driver.tar root@10.6.175.10:/root\n
                                                                                "},{"location":"en/admin/kpanda/gpu/nvidia/push_image_to_repo.html#step-2-push-the-image-to-the-bootstrap-repository","title":"Step 2: Push the Image to the Bootstrap Repository","text":"

                                                                                Perform the following steps on the bootstrap node:

                                                                                1. Log in to the bootstrap node and import the compressed image archive nvidia-driver.tar :

                                                                                  docker load -i nvidia-driver.tar\n
                                                                                2. View the imported image:

                                                                                  docker images -a | grep nvidia\n

                                                                                  Expected output:

                                                                                  nvcr.io/nvidia/driver                 e3ed7dee73e9   1 days ago   1.02GB\n
                                                                                3. Retag the image to correspond to the target repository in the remote Registry repository:

                                                                                  docker tag <image-name> <registry-url>/<repository-name>:<tag>\n

                                                                                  Replace with the name of the Nvidia image from the previous step, with the address of the Registry service on the bootstrap node, with the name of the repository you want to push the image to, and with the desired tag for the image.

                                                                                  For example:

                                                                                  docker tag nvcr.io/nvidia/driver 10.6.10.5/nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                                                                4. Push the image to the bootstrap repository:

                                                                                  docker push {ip}/nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                                                                5. "},{"location":"en/admin/kpanda/gpu/nvidia/push_image_to_repo.html#whats-next","title":"What's Next","text":"

                                                                                  Refer to Building Red Hat 8.4 Offline Yum Source and Offline Installation of GPU Operator to deploy the GPU Operator to your cluster.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html","title":"Offline Install gpu-operator Driver on Ubuntu 22.04","text":"

                                                                                  Prerequisite: Installed gpu-operator v23.9.0+2 or higher versions

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html#prepare-offline-image","title":"Prepare Offline Image","text":"
                                                                                  1. Check the kernel version

                                                                                    $ uname -r\n5.15.0-78-generic\n
                                                                                  2. Check the GPU Driver image version applicable to your kernel, at https://catalog.ngc.nvidia.com/orgs/nvidia/containers/driver/tags. Use the kernel to query the image version and save the image using ctr export.

                                                                                    ctr i pull nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04\nctr i export --all-platforms driver.tar.gz nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 \n
                                                                                  3. Import the image into the cluster's container registry

                                                                                    ctr i import driver.tar.gz\nctr i tag nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 {your_registry}/nvcr.m.daocloud.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04\nctr i push {your_registry}/nvcr.m.daocloud.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 --skip-verify=true\n
                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html#install-the-driver","title":"Install the Driver","text":"
                                                                                  1. Install the gpu-operator addon and set driver.usePrecompiled=true
                                                                                  2. Set driver.version=535, note that it should be 535, not 535.104.12
                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html","title":"Build CentOS 7.9 Offline Yum Source","text":"

                                                                                  The AI platform comes with a pre-installed GPU Operator offline package for CentOS 7.9 with kernel version 3.10.0-1160. or other OS types or kernel versions, users need to manually build an offline yum source.

                                                                                  This guide explains how to build an offline yum source for CentOS 7.9 with a specific kernel version and use it when installing the GPU Operator by specifying the RepoConfig.ConfigMapName parameter.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#prerequisites","title":"Prerequisites","text":"
                                                                                  1. The user has already installed the v0.12.0 or later version of the addon offline package on the platform.
                                                                                  2. Prepare a file server that is accessible from the cluster network, such as Nginx or MinIO.
                                                                                  3. Prepare a node that has internet access, can access the cluster where the GPU Operator will be deployed, and can access the file server. Docker should also be installed on this node. You can refer to Installing Docker for installation instructions.
                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#procedure","title":"Procedure","text":"

                                                                                  This guide uses CentOS 7.9 with kernel version 3.10.0-1160.95.1.el7.x86_64 as an example to explain how to upgrade the pre-installed GPU Operator offline package's yum source.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#check-os-and-kernel-versions-of-cluster-nodes","title":"Check OS and Kernel Versions of Cluster Nodes","text":"

                                                                                  Run the following commands on both the control node of the Global cluster and the node where GPU Operator will be deployed. If the OS and kernel versions of the two nodes are consistent, there is no need to build a yum source. You can directly refer to the Offline Installation of GPU Operator document for installation. If the OS or kernel versions of the two nodes are not consistent, please proceed to the next step.

                                                                                  1. Run the following command to view the distribution name and version of the node where GPU Operator will be deployed in the cluster.

                                                                                    cat /etc/redhat-release\n

                                                                                    Expected output:

                                                                                    CentOS Linux release 7.9 (Core)\n

                                                                                    The output shows the current node's OS version as CentOS 7.9.

                                                                                  2. Run the following command to view the kernel version of the node where GPU Operator will be deployed in the cluster.

                                                                                    uname -a\n

                                                                                    Expected output:

                                                                                    Linux localhost.localdomain 3.10.0-1160.95.1.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux\n

                                                                                    The output shows the current node's kernel version as 3.10.0-1160.el7.x86_64.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#create-the-offline-yum-source","title":"Create the Offline Yum Source","text":"

                                                                                  Perform the following steps on a node that has internet access and can access the file server:

                                                                                  1. Create a script file named yum.sh by running the following command:

                                                                                    vi yum.sh\n

                                                                                    Then press the i key to enter insert mode and enter the following content:

                                                                                    export TARGET_KERNEL_VERSION=$1\n\ncat >> run.sh << \\EOF\n#! /bin/bash\necho \"start install kernel repo\"\necho ${KERNEL_VERSION}\nmkdir centos-base\n\nif [ \"$OS\" -eq 7 ]; then\n    yum install --downloadonly --downloaddir=./centos-base perl\n    yum install --downloadonly --downloaddir=./centos-base elfutils-libelf.x86_64\n    yum install --downloadonly --downloaddir=./redhat-base elfutils-libelf-devel.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-headers-${KERNEL_VERSION}.el7.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-devel-${KERNEL_VERSION}.el7.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-${KERNEL_VERSION}.el7.x86_64\n    yum install  -y --downloadonly --downloaddir=./centos-base groff-base\nelif [ \"$OS\" -eq 8 ]; then\n    yum install --downloadonly --downloaddir=./centos-base perl\n    yum install --downloadonly --downloaddir=./centos-base elfutils-libelf.x86_64\n    yum install --downloadonly --downloaddir=./redhat-base elfutils-libelf-devel.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-headers-${KERNEL_VERSION}.el8.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-devel-${KERNEL_VERSION}.el8.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-${KERNEL_VERSION}.el8.x86_64\n    yum install  -y --downloadonly --downloaddir=./centos-base groff-base\nelse\n    echo \"Error os version\"\nfi\n\ncreaterepo centos-base/\nls -lh centos-base/\ntar -zcf centos-base.tar.gz centos-base/\necho \"end install kernel repo\"\nEOF\n\ncat >> Dockerfile << EOF\nFROM centos:7\nENV KERNEL_VERSION=\"\"\nENV OS=7\nRUN yum install -y createrepo\nCOPY run.sh .\nENTRYPOINT [\"/bin/bash\",\"run.sh\"]\nEOF\n\ndocker build -t test:v1 -f Dockerfile .\ndocker run -e KERNEL_VERSION=$TARGET_KERNEL_VERSION --name centos7.9 test:v1\ndocker cp centos7.9:/centos-base.tar.gz .\ntar -xzf centos-base.tar.gz\n

                                                                                    Press the Esc key to exit insert mode, then enter :wq to save and exit.

                                                                                  2. Run the yum.sh file:

                                                                                    bash -x yum.sh TARGET_KERNEL_VERSION\n

                                                                                    The TARGET_KERNEL_VERSION parameter is used to specify the kernel version of the cluster nodes.

                                                                                    Note: You don't need to include the distribution identifier (e.g., __ .el7.x86_64__ ). For example:

                                                                                    bash -x yum.sh 3.10.0-1160.95.1\n

                                                                                  Now you have generated an offline yum source, centos-base , for the kernel version 3.10.0-1160.95.1.el7.x86_64 .

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#upload-the-offline-yum-source-to-the-file-server","title":"Upload the Offline Yum Source to the File Server","text":"

                                                                                  Perform the following steps on a node that has internet access and can access the file server. This step is used to upload the generated yum source from the previous step to a file server that can be accessed by the cluster where the GPU Operator will be deployed. The file server can be Nginx, MinIO, or any other file server that supports the HTTP protocol.

                                                                                  In this example, we will use the built-in MinIO as the file server. The MinIO details are as follows:

                                                                                  • Access URL: http://10.5.14.200:9000 (usually {bootstrap-node IP} + {port-9000} )
                                                                                  • Login username: rootuser
                                                                                  • Login password: rootpass123

                                                                                  • Run the following command in the current directory of the node to establish a connection between the node's local mc command-line tool and the MinIO server:

                                                                                    mc config host add minio http://10.5.14.200:9000 rootuser rootpass123\n

                                                                                    The expected output should resemble the following:

                                                                                    Added __minio__ successfully.\n

                                                                                    mc is the command-line tool provided by MinIO for interacting with the MinIO server. For more details, refer to the MinIO Client documentation.

                                                                                  • In the current directory of the node, create a bucket named centos-base :

                                                                                    mc mb -p minio/centos-base\n

                                                                                    The expected output should resemble the following:

                                                                                    Bucket created successfully __minio/centos-base__ .\n
                                                                                  • Set the access policy of the bucket centos-base to allow public download. This will enable access during the installation of the GPU Operator:

                                                                                    mc anonymous set download minio/centos-base\n

                                                                                    The expected output should resemble the following:

                                                                                    Access permission for __minio/centos-base__ is set to __download__ \n
                                                                                  • In the current directory of the node, copy the generated centos-base offline yum source to the minio/centos-base bucket on the MinIO server:

                                                                                    mc cp centos-base minio/centos-base --recursive\n
                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#create-a-configmap-to-store-the-yum-source-info-in-the-cluster","title":"Create a ConfigMap to Store the Yum Source Info in the Cluster","text":"

                                                                                  Perform the following steps on the control node of the cluster where the GPU Operator will be deployed.

                                                                                  1. Run the following command to create a file named CentOS-Base.repo that specifies the configmap for the yum source storage:

                                                                                    # The file name must be CentOS-Base.repo, otherwise it cannot be recognized during the installation of the GPU Operator\ncat > CentOS-Base.repo << EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # The file server address where the yum source is placed in step 3\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # The file server address where the yum source is placed in step 3\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                                                                  2. Based on the created CentOS-Base.repo file, create a configmap named local-repo-config in the gpu-operator namespace:

                                                                                    kubectl create configmap local-repo-config  -n gpu-operator --from-file=CentOS-Base.repo=/etc/yum.repos.d/extension.repo\n

                                                                                    The expected output should resemble the following:

                                                                                    configmap/local-repo-config created\n

                                                                                    The local-repo-config configmap will be used to provide the value for the RepoConfig.ConfigMapName parameter during the installation of the GPU Operator. You can customize the configuration file name.

                                                                                  3. View the content of the local-repo-config configmap:

                                                                                    kubectl get configmap local-repo-config -n gpu-operator -oyaml\n

                                                                                    The expected output should resemble the following:

                                                                                    apiVersion: v1\ndata:\nCentOS-Base.repo: \"[extension-0]\\nbaseurl = http://10.6.232.5:32618/centos-base# The file server path where the yum source is placed in step 2\\ngpgcheck = 0\\nname = kubean extension 0\\n  \\n[extension-1]\\nbaseurl = http://10.6.232.5:32618/centos-base # The file server path where the yum source is placed in step 2\\ngpgcheck = 0\\nname = kubean extension 1\\n\"\nkind: ConfigMap\nmetadata:\ncreationTimestamp: \"2023-10-18T01:59:02Z\"\nname: local-repo-config\nnamespace: gpu-operator\nresourceVersion: \"59445080\"\nuid: c5f0ebab-046f-442c-b932-f9003e014387\n

                                                                                  You have successfully created an offline yum source configuration file for the cluster where the GPU Operator will be deployed. You can use it during the offline installation of the GPU Operator by specifying the RepoConfig.ConfigMapName parameter.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html","title":"Building Red Hat 8.4 Offline Yum Source","text":"

                                                                                  The AI platform comes with pre-installed CentOS v7.9 and GPU Operator offline packages with kernel v3.10.0-1160. For other OS types or nodes with different kernels, users need to manually build the offline yum source.

                                                                                  This guide explains how to build an offline yum source package for Red Hat 8.4 based on any node in the Global cluster. It also demonstrates how to use it during the installation of the GPU Operator by specifying the RepoConfig.ConfigMapName parameter.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#prerequisites","title":"Prerequisites","text":"
                                                                                  1. The user has already installed the addon offline package v0.12.0 or higher on the platform.
                                                                                  2. The OS of the cluster nodes where the GPU Operator will be deployed must be Red Hat v8.4, and the kernel version must be identical.
                                                                                  3. Prepare a file server that can communicate with the cluster network where the GPU Operator will be deployed, such as Nginx or MinIO.
                                                                                  4. Prepare a node that can access the internet, the cluster where the GPU Operator will be deployed, and the file server. Ensure that Docker is already installed on this node.
                                                                                  5. The nodes in the Global cluster must be Red Hat 8.4 4.18.0-305.el8.x86_64.
                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#procedure","title":"Procedure","text":"

                                                                                  This guide uses a node with Red Hat 8.4 4.18.0-305.el8.x86_64 as an example to demonstrate how to build an offline yum source package for Red Hat 8.4 based on any node in the Global cluster. It also explains how to use it during the installation of the GPU Operator by specifying the RepoConfig.ConfigMapName parameter.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-1-download-the-yum-source-from-the-bootstrap-node","title":"Step 1: Download the Yum Source from the Bootstrap Node","text":"

                                                                                  Perform the following steps on the master node of the Global cluster.

                                                                                  1. Use SSH or any other method to access any node in the Global cluster and run the following command:

                                                                                    cat /etc/yum.repos.d/extension.repo # View the contents of extension.repo.\n

                                                                                    The expected output should resemble the following:

                                                                                    [extension-0]\nbaseurl = http://10.5.14.200:9000/kubean/redhat/$releasever/os/$basearch\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/kubean/redhat-iso/$releasever/os/$basearch/AppStream\ngpgcheck = 0\nname = kubean extension 1\n\n[extension-2]\nbaseurl = http://10.5.14.200:9000/kubean/redhat-iso/$releasever/os/$basearch/BaseOS\ngpgcheck = 0\nname = kubean extension 2\n
                                                                                  2. Create a folder named redhat-base-repo under the root directory:

                                                                                    mkdir redhat-base-repo\n
                                                                                  3. Download the RPM packages from the yum source to your local machine:

                                                                                    Download the RPM packages from extension-1 :

                                                                                    reposync -p redhat-base-repo -n --repoid=extension-1\n

                                                                                    Download the RPM packages from extension-2 :

                                                                                    reposync -p redhat-base-repo -n --repoid=extension-2\n
                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-2-download-the-elfutils-libelf-devel-0187-4el8x86_64rpm-package","title":"Step 2: Download the elfutils-libelf-devel-0.187-4.el8.x86_64.rpm Package","text":"

                                                                                  Perform the following steps on a node with internet access. Before proceeding, ensure that there is network connectivity between the node with internet access and the master node of the Global cluster.

                                                                                  1. Run the following command on the node with internet access to download the elfutils-libelf-devel-0.187-4.el8.x86_64.rpm package:

                                                                                    wget https://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/elfutils-libelf-devel-0.187-4.el8.x86_64.rpm\n
                                                                                  2. Transfer the elfutils-libelf-devel-0.187-4.el8.x86_64.rpm package from the current directory to the node mentioned in step 1:

                                                                                    scp elfutils-libelf-devel-0.187-4.el8.x86_64.rpm user@ip:~/redhat-base-repo/extension-2/Packages/\n

                                                                                    For example:

                                                                                    scp elfutils-libelf-devel-0.187-4.el8.x86_64.rpm root@10.6.175.10:~/redhat-base-repo/extension-2/Packages/\n
                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-3-generate-the-local-yum-repository","title":"Step 3: Generate the Local Yum Repository","text":"

                                                                                  Perform the following steps on the master node of the Global cluster mentioned in Step 1.

                                                                                  1. Enter the yum repository directories:

                                                                                    cd ~/redhat-base-repo/extension-1/Packages\ncd ~/redhat-base-repo/extension-2/Packages\n
                                                                                  2. Generate the repository index for the directories:

                                                                                    createrepo_c ./\n

                                                                                  You have now generated the offline yum source named redhat-base-repo for kernel version 4.18.0-305.el8.x86_64 .

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-4-upload-the-local-yum-repository-to-the-file-server","title":"Step 4: Upload the Local Yum Repository to the File Server","text":"

                                                                                  In this example, we will use Minio, which is built-in as the file server in the bootstrap node. However, you can choose any file server that suits your needs. Here are the details for Minio:

                                                                                  • Access URL: http://10.5.14.200:9000 (usually the {bootstrap-node-IP} + {port-9000})
                                                                                  • Login username: rootuser
                                                                                  • Login password: rootpass123

                                                                                  • On the current node, establish a connection between the local mc command-line tool and the Minio server by running the following command:

                                                                                    mc config host add minio <file_server_access_url> <username> <password>\n

                                                                                    For example:

                                                                                    mc config host add minio http://10.5.14.200:9000 rootuser rootpass123\n

                                                                                    The expected output should be similar to:

                                                                                    Added __minio__ successfully.\n

                                                                                    The mc command-line tool is provided by the Minio file server as a client command-line tool. For more details, refer to the MinIO Client documentation.

                                                                                  • Create a bucket named redhat-base in the current location:

                                                                                    mc mb -p minio/redhat-base\n

                                                                                    The expected output should be similar to:

                                                                                    Bucket created successfully __minio/redhat-base__ .\n
                                                                                  • Set the access policy of the redhat-base bucket to allow public downloads so that it can be accessed during the installation of the GPU Operator:

                                                                                    mc anonymous set download minio/redhat-base\n

                                                                                    The expected output should be similar to:

                                                                                    Access permission for __minio/redhat-base__ is set to __download__ \n
                                                                                  • Copy the offline yum repository files ( redhat-base-repo ) from the current location to the Minio server's minio/redhat-base bucket:

                                                                                    mc cp redhat-base-repo minio/redhat-base --recursive\n
                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-5-create-a-configmap-to-store-yum-repository-information-in-the-cluster","title":"Step 5: Create a ConfigMap to Store Yum Repository Information in the Cluster","text":"

                                                                                  Perform the following steps on the control node of the cluster where you will deploy the GPU Operator.

                                                                                  1. Run the following command to create a file named redhat.repo , which specifies the configuration information for the yum repository storage:

                                                                                    # The file name must be redhat.repo, otherwise it won't be recognized when installing gpu-operator\ncat > redhat.repo << EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/redhat-base/redhat-base-repo/Packages # The file server address where the yum source is stored in Step 1\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/redhat-base/redhat-base-repo/Packages # The file server address where the yum source is stored in Step 1\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                                                                  2. Based on the created redhat.repo file, create a configmap named local-repo-config in the gpu-operator namespace:

                                                                                    kubectl create configmap local-repo-config -n gpu-operator --from-file=./redhat.repo\n

                                                                                    The expected output should be similar to:

                                                                                    configmap/local-repo-config created\n

                                                                                    The local-repo-config configuration file is used to provide the value for the RepoConfig.ConfigMapName parameter during the installation of the GPU Operator. You can choose a different name for the configuration file.

                                                                                  3. View the contents of the local-repo-config configuration file:

                                                                                    kubectl get configmap local-repo-config -n gpu-operator -oyaml\n

                                                                                  You have successfully created the offline yum source configuration file for the cluster where the GPU Operator will be deployed. You can use it by specifying the RepoConfig.ConfigMapName parameter during the offline installation of the GPU Operator.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html","title":"Build an Offline Yum Repository for Red Hat 7.9","text":""},{"location":"en/admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#introduction","title":"Introduction","text":"

                                                                                  AI platform comes with a pre-installed CentOS 7.9 with GPU Operator offline package for kernel 3.10.0-1160. You need to manually build an offline yum repository for other OS types or nodes with different kernels.

                                                                                  This page explains how to build an offline yum repository for Red Hat 7.9 based on any node in the Global cluster, and how to use the RepoConfig.ConfigMapName parameter when installing the GPU Operator.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#prerequisites","title":"Prerequisites","text":"
                                                                                  1. The cluster nodes where the GPU Operator is to be deployed must be Red Hat 7.9 with the exact same kernel version.
                                                                                  2. Prepare a file server that can be connected to the cluster network where the GPU Operator is to be deployed, such as nginx or minio.
                                                                                  3. Prepare a node that can access the internet, the cluster where the GPU Operator is to be deployed, and the file server. Docker installation must be completed on this node.
                                                                                  4. The nodes in the global service cluster must be Red Hat 7.9.
                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#steps","title":"Steps","text":""},{"location":"en/admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#1-build-offline-yum-repo-for-relevant-kernel","title":"1. Build Offline Yum Repo for Relevant Kernel","text":"
                                                                                  1. Download rhel7.9 ISO

                                                                                  2. Download the rhel7.9 ospackage that corresponds to your Kubean version.

                                                                                    Find the version number of Kubean in the Container Management section of the Global cluster under Helm Apps.

                                                                                    Download the rhel7.9 ospackage for that version from the Kubean repository.

                                                                                  3. Import offline resources using the installer.

                                                                                    Refer to the Import Offline Resources document.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#2-download-offline-driver-image-for-red-hat-79-os","title":"2. Download Offline Driver Image for Red Hat 7.9 OS","text":"

                                                                                  Click here to view the download url.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#3-upload-red-hat-gpu-operator-offline-image-to-boostrap-node-repository","title":"3. Upload Red Hat GPU Operator Offline Image to Boostrap Node Repository","text":"

                                                                                  Refer to Upload Red Hat GPU Operator Offline Image to Boostrap Node Repository.

                                                                                  Note

                                                                                  This reference is based on rhel8.4, so make sure to modify it for rhel7.9.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/yum_source_redhat7_9.html#4-create-configmaps-in-the-cluster-to-save-yum-repository-information","title":"4. Create ConfigMaps in the Cluster to Save Yum Repository Information","text":"

                                                                                  Run the following command on the control node of the cluster where the GPU Operator is to be deployed.

                                                                                  1. Run the following command to create a file named CentOS-Base.repo to specify the configuration information where the yum repository is stored.

                                                                                    # The file name must be CentOS-Base.repo, otherwise it will not be recognized when installing gpu-operator\ncat > CentOS-Base.repo <<  EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # The server file address of the boostrap node, usually {boostrap node IP} + {9000 port}\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # The server file address of the boostrap node, usually {boostrap node IP} + {9000 port}\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                                                                  2. Based on the created CentOS-Base.repo file, create a profile named local-repo-config in the gpu-operator namespace:

                                                                                    kubectl create configmap local-repo-config -n gpu-operator --from-file=CentOS-Base.repo=/etc/yum.repos.d/extension.repo\n

                                                                                    The expected output is as follows:

                                                                                    configmap/local-repo-config created\n

                                                                                    The local-repo-config profile is used to provide the value of the RepoConfig.ConfigMapName parameter when installing gpu-operator, and the profile name can be customized by the user.

                                                                                  3. View the contents of the local-repo-config profile:

                                                                                    kubectl get configmap local-repo-config -n gpu-operator -oyaml\n

                                                                                    The expected output is as follows:

                                                                                    local-repo-config.yaml
                                                                                    apiVersion: v1\ndata:\n  CentOS-Base.repo: \"[extension-0]\\nbaseurl = http://10.6.232.5:32618/centos-base # The file path where yum repository is placed in Step 2 \\ngpgcheck = 0\\nname = kubean extension 0\\n  \\n[extension-1]\\nbaseurl\n  = http://10.6.232.5:32618/centos-base # The file path where yum repository is placed in Step 2 \\ngpgcheck = 0\\nname\n  = kubean extension 1\\n\"\nkind: ConfigMap\nmetadata:\n  creationTimestamp: \"2023-10-18T01:59:02Z\"\n  name: local-repo-config\n  namespace: gpu-operator\n  resourceVersion: \"59445080\"\n  uid: c5f0ebab-046f-442c-b932-f9003e014387\n

                                                                                  At this point, you have successfully created the offline yum repository profile for the cluster where the GPU Operator is to be deployed. The RepoConfig.ConfigMapName parameter was used during the Offline Installation of GPU Operator.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/mig/index.html","title":"Overview of NVIDIA Multi-Instance GPU (MIG)","text":""},{"location":"en/admin/kpanda/gpu/nvidia/mig/index.html#mig-scenarios","title":"MIG Scenarios","text":"
                                                                                  • Multi-Tenant Cloud Environments:

                                                                                  MIG allows cloud service providers to partition a physical GPU into multiple independent GPU instances, which can be allocated to different tenants. This enables resource isolation and independence, meeting the GPU computing needs of multiple tenants.

                                                                                  • Containerized Applications:

                                                                                  MIG enables finer-grained GPU resource management in containerized environments. By partitioning a physical GPU into multiple MIG instances, each container can be assigned with dedicated GPU compute resources, providing better performance isolation and resource utilization.

                                                                                  • Batch Processing Jobs:

                                                                                  For batch processing jobs requiring large-scale parallel computing, MIG provides higher computational performance and larger memory capacity. Each MIG instance can utilize a portion of the physical GPU's compute resources, accelerating the processing of large-scale computational tasks.

                                                                                  • AI/Machine Learning Training:

                                                                                  MIG offers increased compute power and memory capacity for training large-scale deep learning models. By partitioning the physical GPU into multiple MIG instances, each instance can independently carry out model training, improving training efficiency and throughput.

                                                                                  In general, NVIDIA MIG is suitable for scenarios that require finer-grained allocation and management of GPU resources. It enables resource isolation, improved performance utilization, and meets the GPU computing needs of multiple users or applications.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/mig/index.html#overview-of-mig","title":"Overview of MIG","text":"

                                                                                  NVIDIA Multi-Instance GPU (MIG) is a new feature introduced by NVIDIA on H100, A100, and A30 series GPUs. Its purpose is to divide a physical GPU into multiple GPU instances to provide finer-grained resource sharing and isolation. MIG can split a GPU into up to seven GPU instances, allowing a single physical GPU to provide separate GPU resources to multiple users, maximizing GPU utilization.

                                                                                  This feature enables multiple applications or users to share GPU resources simultaneously, improving the utilization of computational resources and increasing system scalability.

                                                                                  With MIG, each GPU instance's processor has an independent and isolated path throughout the entire memory system, including cross-switch ports on the chip, L2 cache groups, memory controllers, and DRAM address buses, all uniquely allocated to a single instance.

                                                                                  This ensures that the workload of individual users can run with predictable throughput and latency, along with identical L2 cache allocation and DRAM bandwidth. MIG can partition available GPU compute resources (such as streaming multiprocessors or SMs and GPU engines like copy engines or decoders) to provide defined quality of service (QoS) and fault isolation for different clients such as virtual machines, containers, or processes. MIG enables multiple GPU instances to run in parallel on a single physical GPU.

                                                                                  MIG allows multiple vGPUs (and virtual machines) to run in parallel on a single GPU instance while retaining the isolation guarantees provided by vGPU. For more details on using vGPU and MIG for GPU partitioning, refer to NVIDIA Multi-Instance GPU and NVIDIA Virtual Compute Server.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/mig/index.html#mig-architecture","title":"MIG Architecture","text":"

                                                                                  The following diagram provides an overview of MIG, illustrating how it virtualizes one physical GPU into seven GPU instances that can be used by multiple users.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/mig/index.html#important-concepts","title":"Important Concepts","text":"
                                                                                  • SM (Streaming Multiprocessor): The core computational unit of a GPU responsible for executing graphics rendering and general-purpose computing tasks. Each SM contains a group of CUDA cores, as well as shared memory, register files, and other resources, capable of executing multiple threads concurrently. Each MIG instance has a certain number of SMs and other related resources, along with the allocated memory slices.
                                                                                  • GPU Memory Slice : The smallest portion of GPU memory, including the proper memory controller and cache. A GPU memory slice is approximately one-eighth of the total GPU memory resources in terms of capacity and bandwidth.
                                                                                  • GPU SM Slice : The smallest computational unit of SMs on a GPU. When configuring in MIG mode, the GPU SM slice is approximately one-seventh of the total available SMs in the GPU.
                                                                                  • GPU Slice : The GPU slice represents the smallest portion of the GPU, consisting of a single GPU memory slice and a single GPU SM slice combined together.
                                                                                  • GPU Instance (GI): A GPU instance is the combination of a GPU slice and GPU engines (DMA, NVDEC, etc.). Anything within a GPU instance always shares all GPU memory slices and other GPU engines, but its SM slice can be further subdivided into Compute Instances (CIs). A GPU instance provides memory QoS. Each GPU slice contains dedicated GPU memory resources, limiting available capacity and bandwidth while providing memory QoS. Each GPU memory slice gets one-eighth of the total GPU memory resources, and each GPU SM slice gets one-seventh of the total SM count.
                                                                                  • Compute Instance (CI): A Compute Instance represents the smallest computational unit within a GPU instance. It consists of a subset of SMs, along with dedicated register files, shared memory, and other resources. Each CI has its own CUDA context and can run independent CUDA kernels. The number of CIs in a GPU instance depends on the number of available SMs and the configuration chosen during MIG setup.
                                                                                  • Instance Slice : An Instance Slice represents a single CI within a GPU instance. It is the combination of a subset of SMs and a portion of the GPU memory slice. Each Instance Slice provides isolation and resource allocation for individual applications or users running on the GPU instance.
                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/mig/index.html#key-benefits-of-mig","title":"Key Benefits of MIG","text":"
                                                                                  • Resource Sharing: MIG allows a single physical GPU to be divided into multiple GPU instances, providing efficient sharing of GPU resources among different users or applications. This maximizes GPU utilization and enables improved performance isolation.

                                                                                  • Fine-Grained Resource Allocation: With MIG, GPU resources can be allocated at a finer granularity, allowing for more precise partitioning and allocation of compute power and memory capacity.

                                                                                  • Improved Performance Isolation: Each MIG instance operates independently with its dedicated resources, ensuring predictable throughput and latency for individual users or applications. This improves performance isolation and prevents interference between different workloads running on the same GPU.

                                                                                  • Enhanced Security and Fault Isolation: MIG provides better security and fault isolation by ensuring that each user or application has its dedicated GPU resources. This prevents unauthorized access to data and mitigates the impact of faults or errors in one instance on others.

                                                                                  • Increased Scalability: MIG enables the simultaneous usage of GPU resources by multiple users or applications, increasing system scalability and accommodating the needs of various workloads.

                                                                                  • Efficient Containerization: By using MIG in containerized environments, GPU resources can be effectively allocated to different containers, improving performance isolation and resource utilization.

                                                                                  Overall, MIG offers significant advantages in terms of resource sharing, fine-grained allocation, performance isolation, security, scalability, and containerization, making it a valuable feature for various GPU computing scenarios.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/mig/create_mig.html","title":"Enabling MIG Features","text":"

                                                                                  This section describes how to enable NVIDIA MIG features. NVIDIA currently provides two strategies for exposing MIG devices on Kubernetes nodes:

                                                                                  • Single mode : Nodes expose a single type of MIG device on all their GPUs.
                                                                                  • Mixed mode : Nodes expose a mixture of MIG device types on all their GPUs.

                                                                                  For more details, refer to the NVIDIA GPU Usage Modes.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/mig/create_mig.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • Check the system requirements for the GPU driver installation on the target node: GPU Support Matrix
                                                                                  • Ensure that the cluster nodes have GPUs of the proper models (NVIDIA H100, A100, and A30 Tensor Core GPUs). For more information, see the GPU Support Matrix.
                                                                                  • All GPUs on the nodes must belong to the same product line (e.g., A100-SXM-40GB).
                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/mig/create_mig.html#install-gpu-operator-addon","title":"Install GPU Operator Addon","text":""},{"location":"en/admin/kpanda/gpu/nvidia/mig/create_mig.html#parameter-configuration","title":"Parameter Configuration","text":"

                                                                                  When installing the Operator, you need to set the MigManager Config parameter accordingly. The default setting is default-mig-parted-config. You can also customize the sharding policy configuration file:

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/mig/create_mig.html#custom-sharding-policy","title":"Custom Sharding Policy","text":"
                                                                                    ## Custom GI Instance Configuration\n  all-disabled:\n    - devices: all\n      mig-enabled: false\n  all-enabled:\n    - devices: all\n      mig-enabled: true\n      mig-devices: {}\n  all-1g.10gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.5gb: 7\n  all-1g.10gb.me:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.10gb+me: 1\n  all-1g.20gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.20gb: 4\n  all-2g.20gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        2g.20gb: 3\n  all-3g.40gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        3g.40gb: 2\n  all-4g.40gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        4g.40gb: 1\n  all-7g.80gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        7g.80gb: 1\n  all-balanced:\n    - device-filter: [\"0x233110DE\", \"0x232210DE\", \"0x20B210DE\", \"0x20B510DE\", \"0x20F310DE\", \"0x20F510DE\"]\n      devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.10gb: 2\n        2g.20gb: 1\n        3g.40gb: 1\n  # After setting, CI instances will be partitioned according to the specified configuration\n  custom-config:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        3g.40gb: 2\n

                                                                                  In the above YAML, set custom-config to partition CI instances according to the specifications.

                                                                                  custom-config:\n  - devices: all\n    mig-enabled: true\n    mig-devices:\n      1c.3g.40gb: 6\n

                                                                                  After completing the settings, you can use GPU MIG resources when confirming the deployment of the application.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/mig/create_mig.html#switch-node-gpu-mode","title":"Switch Node GPU Mode","text":"

                                                                                  After successfully installing the GPU operator, the node is in full card mode by default. There will be an indicator on the node management page, as shown below:

                                                                                  Click the \u2507 at the right side of the node list, select a GPU mode to switch, and then choose the proper MIG mode and sharding policy. Here, we take MIXED mode as an example:

                                                                                  There are two configurations here:

                                                                                  1. MIG Policy: Mixed and Single.
                                                                                  2. Sharding Policy: The policy here needs to match the key in the default-mig-parted-config (or user-defined sharding policy) configuration file.

                                                                                  After clicking OK button, wait for about a minute and refresh the page. The MIG mode will be switched to:

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/mig/mig_command.html","title":"MIG Related Commands","text":"

                                                                                  GI Related Commands:

                                                                                  Subcommand Description nvidia-smi mig -lgi View the list of created GI instances nvidia-smi mig -dgi -gi Delete a specific GI instance nvidia-smi mig -lgip View the profile of GI nvidia-smi mig -cgi Create a GI using the specified profile ID

                                                                                  CI Related Commands:

                                                                                  Subcommand Description nvidia-smi mig -lcip { -gi {gi Instance ID}} View the profile of CI, specifying -gi will show the CIs that can be created for a particular GI instance nvidia-smi mig -lci View the list of created CI instances nvidia-smi mig -cci {profile id} -gi {gi instance id} Create a CI instance with the specified GI nvidia-smi mig -dci -ci Delete a specific CI instance

                                                                                  GI+CI Related Commands:

                                                                                  Subcommand Description nvidia-smi mig -i 0 -cgi {gi profile id} -C {ci profile id} Create a GI + CI instance directly"},{"location":"en/admin/kpanda/gpu/nvidia/mig/mig_usage.html","title":"Using MIG GPU Resources","text":"

                                                                                  This section explains how applications can use MIG GPU resources.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/mig/mig_usage.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • AI platform container management platform is deployed and running successfully.
                                                                                  • The container management module is integrated with a Kubernetes cluster or a Kubernetes cluster is created, and the UI interface of the cluster can be accessed.
                                                                                  • NVIDIA DevicePlugin and MIG capabilities are enabled. Refer to Offline installation of GPU Operator for details.
                                                                                  • The nodes in the cluster have GPUs of the proper models.
                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/mig/mig_usage.html#using-mig-gpu-through-the-ui","title":"Using MIG GPU through the UI","text":"
                                                                                  1. Confirm if the cluster has recognized the GPU type.

                                                                                    Go to Cluster Details -> Nodes and check if it has been correctly recognized as MIG.

                                                                                  2. When deploying an application using an image, you can select and use NVIDIA MIG resources.

                                                                                  3. Example of MIG Single Mode (used in the same way as a full GPU):

                                                                                    Note

                                                                                    The MIG single policy allows users to request and use GPU resources in the same way as a full GPU (nvidia.com/gpu). The difference is that these resources can be a portion of the GPU (MIG device) rather than the entire GPU. Learn more from the GPU MIG Mode Design.

                                                                                  4. MIG Mixed Mode

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/mig/mig_usage.html#using-mig-through-yaml-configuration","title":"Using MIG through YAML Configuration","text":"

                                                                                  MIG Single mode:

                                                                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mig-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mig-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mig-demo\n    spec:\n      containers:\n        - name: mig-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/gpu: 2 # (1)!\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                                                                                  1. Number of MIG GPUs to request

                                                                                  MIG Mixed mode:

                                                                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mig-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mig-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mig-demo\n    spec:\n      containers:\n        - name: mig-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/mig-4g.20gb: 1 # (1)!\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                                                                                  1. Expose MIG device through nvidia.com/mig-g.gb resource type

                                                                                  After entering the container, you can check if only one MIG device is being used:

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/vgpu/hami.html","title":"Build a vGPU Memory Oversubscription Image","text":"

                                                                                  The vGPU memory oversubscription feature in the Hami Project no longer exists. To use this feature, you need to rebuild with the libvgpu.so file that supports memory oversubscription.

                                                                                  Dockerfile
                                                                                  FROM docker.m.daocloud.io/projecthami/hami:v2.3.11\nCOPY libvgpu.so /k8s-vgpu/lib/nvidia/\n

                                                                                  Run the following command to build the image:

                                                                                  docker build -t release.daocloud.io/projecthami/hami:v2.3.11 -f Dockerfile .\n

                                                                                  Then, push the image to release.daocloud.io.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/vgpu/vgpu_addon.html","title":"Installing NVIDIA vGPU Addon","text":"

                                                                                  To virtualize a single NVIDIA GPU into multiple virtual GPUs and allocate them to different virtual machines or users, you can use NVIDIA's vGPU capability. This section explains how to install the vGPU plugin in the AI platform platform, which is a prerequisite for using NVIDIA vGPU capability.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/vgpu/vgpu_addon.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • Refer to the GPU Support Matrix to confirm that the nodes in the cluster have GPUs of the proper models.
                                                                                  • The current cluster has deployed NVIDIA drivers through the Operator. For specific instructions, refer to Offline Installation of GPU Operator.
                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/vgpu/vgpu_addon.html#procedure","title":"Procedure","text":"
                                                                                  1. Path: Container Management -> Cluster Management -> Click the target cluster -> Helm Apps -> Helm Charts -> Search for nvidia-vgpu .

                                                                                  2. During the installation of vGPU, several basic modification parameters are provided. If you need to modify advanced parameters, click the YAML column to make changes:

                                                                                    • deviceMemoryScaling : NVIDIA device memory scaling factor, the input value must be an integer, with a default value of 1. It can be greater than 1 (enabling virtual memory, experimental feature). For an NVIDIA GPU with a memory size of M, if we configure the devicePlugin.deviceMemoryScaling parameter as S, in a Kubernetes cluster where we have deployed our device plugin, the vGPUs assigned from this GPU will have a total memory of S * M .

                                                                                    • deviceSplitCount : An integer type, with a default value of 10. Number of GPU splits, each GPU cannot be assigned more tasks than its configuration count. If configured as N, each GPU can have up to N tasks simultaneously.

                                                                                    • Resources : Represents the resource usage of the vgpu-device-plugin and vgpu-schedule pods.

                                                                                  3. After a successful installation, you will see two types of pods in the specified namespace, indicating that the NVIDIA vGPU plugin has been successfully installed:

                                                                                  After a successful installation, you can deploy applications using vGPU resources.

                                                                                  Note

                                                                                  NVIDIA vGPU Addon does not support upgrading directly from the older v2.0.0 to the latest v2.0.0+1; To upgrade, please uninstall the older version and then reinstall the latest version.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html","title":"Using NVIDIA vGPU in Applications","text":"

                                                                                  This section explains how to use the vGPU capability in the AI platform platform.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • The nodes in the cluster have GPUs of the proper models.
                                                                                  • vGPU Addon has been successfully installed. Refer to Installing GPU Addon for details.
                                                                                  • GPU Operator is installed, and the Nvidia.DevicePlugin capability is disabled. Refer to Offline Installation of GPU Operator for details.
                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html#procedure","title":"Procedure","text":""},{"location":"en/admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html#using-vgpu-through-the-ui","title":"Using vGPU through the UI","text":"
                                                                                  1. Confirm if the cluster has detected GPUs. Click the Clusters -> Cluster Settings -> Addon Plugins and check if the GPU plugin has been automatically enabled and the proper GPU type has been detected. Currently, the cluster will automatically enable the GPU addon and set the GPU Type as Nvidia vGPU .

                                                                                  2. Deploy a workload by clicking Clusters -> Workloads . When deploying a workload using an image, select the type Nvidia vGPU , and you will be prompted with the following parameters:

                                                                                    • Number of Physical Cards (nvidia.com/vgpu) : Indicates how many physical cards need to be mounted by the current pod. The input value must be an integer and less than or equal to the number of cards on the host machine.
                                                                                    • GPU Cores (nvidia.com/gpucores): Indicates the GPU cores utilized by each card, with a value range from 0 to 100. Setting it to 0 means no enforced isolation, while setting it to 100 means exclusive use of the entire card.
                                                                                    • GPU Memory (nvidia.com/gpumem): Indicates the GPU memory occupied by each card, with a value in MB. The minimum value is 1, and the maximum value is the total memory of the card.

                                                                                    If there are issues with the configuration values above, it may result in scheduling failure or inability to allocate resources.

                                                                                  "},{"location":"en/admin/kpanda/gpu/nvidia/vgpu/vgpu_user.html#using-vgpu-through-yaml-configuration","title":"Using vGPU through YAML Configuration","text":"

                                                                                  Refer to the following workload configuration and add the parameter nvidia.com/vgpu: '1' in the resource requests and limits section to configure the number of physical cards used by the application.

                                                                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-vgpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-vgpu-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: full-vgpu-demo\n    spec:\n      containers:\n        - name: full-vgpu-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/gpucores: '20'   # Request 20% of GPU cores for each card\n              nvidia.com/gpumem: '200'   # Request 200MB of GPU memory for each card\n              nvidia.com/vgpu: '1'   # Request 1 GPU\n          imagePullPolicy: Always\n      restartPolicy: Always\n

                                                                                  This YAML configuration requests the application to use vGPU resources. It specifies that each card should utilize 20% of GPU cores, 200MB of GPU memory, and requests 1 GPU.

                                                                                  "},{"location":"en/admin/kpanda/gpu/volcano/volcano-gang-scheduler.html","title":"Using Volcano's Gang Scheduler","text":"

                                                                                  The Gang scheduling policy is one of the core scheduling algorithms of the volcano-scheduler. It satisfies the \"All or nothing\" scheduling requirement during the scheduling process, preventing arbitrary scheduling of Pods that could waste cluster resources. The specific algorithm observes whether the number of scheduled Pods under a Job meets the minimum running quantity. When the Job's minimum running quantity is satisfied, scheduling actions are performed for all Pods under the Job; otherwise, no actions are taken.

                                                                                  "},{"location":"en/admin/kpanda/gpu/volcano/volcano-gang-scheduler.html#use-cases","title":"Use Cases","text":"

                                                                                  The Gang scheduling algorithm, based on the concept of a Pod group, is particularly suitable for scenarios that require multi-process collaboration. AI scenarios often involve complex workflows, such as Data Ingestion, Data Analysis, Data Splitting, Training, Serving, and Logging, which require a group of containers to work together. This makes the Gang scheduling policy based on pods very appropriate.

                                                                                  In multi-threaded parallel computing communication scenarios under the MPI computation framework, Gang scheduling is also very suitable because it requires master and slave processes to work together. High relevance among containers in a pod may lead to resource contention, and overall scheduling allocation can effectively resolve deadlocks.

                                                                                  In scenarios with insufficient cluster resources, the Gang scheduling policy significantly improves the utilization of cluster resources. For example, if the cluster can currently accommodate only 2 Pods, but the minimum number of Pods required for scheduling is 3, then all Pods of this Job will remain pending until the cluster can accommodate 3 Pods, at which point the Pods will be scheduled. This effectively prevents the partial scheduling of Pods, which would not meet the requirements and would occupy resources, making other Jobs unable to run.

                                                                                  "},{"location":"en/admin/kpanda/gpu/volcano/volcano-gang-scheduler.html#concept-explanation","title":"Concept Explanation","text":"

                                                                                  The Gang Scheduler is the core scheduling plugin of Volcano, and it is enabled by default upon installing Volcano. When creating a workload, you only need to specify the scheduler name as Volcano.

                                                                                  Volcano schedules based on PodGroups. When creating a workload, there is no need to manually create PodGroup resources; Volcano will automatically create them based on the workload information. Below is an example of a PodGroup:

                                                                                  apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  name: test\n  namespace: default\nspec:\n  minMember: 1  # (1)!\n  minResources:  # (2)!\n    cpu: \"3\"\n    memory: \"2048Mi\"\n  priorityClassName: high-prority # (3)!\n  queue: default # (4)!\n
                                                                                  1. Represents the minimum number of Pods or jobs that need to run under this PodGroup. If the cluster resources do not meet the requirements to run the number of jobs specified by miniMember, the scheduler will not schedule any jobs within this PodGroup.
                                                                                  2. Represents the minimum resources required to run this PodGroup. If the allocatable resources of the cluster do not meet the minResources, the scheduler will not schedule any jobs within this PodGroup.
                                                                                  3. Represents the priority of this PodGroup, used by the scheduler to sort all PodGroups within the queue during scheduling. system-node-critical and system-cluster-critical are two reserved values indicating the highest priority. If not specifically designated, the default priority or zero priority is used.
                                                                                  4. Represents the queue to which this PodGroup belongs. The queue must be pre-created and in the open state.
                                                                                  "},{"location":"en/admin/kpanda/gpu/volcano/volcano-gang-scheduler.html#use-case","title":"Use Case","text":"

                                                                                  In a multi-threaded parallel computing communication scenario under the MPI computation framework, we need to ensure that all Pods can be successfully scheduled to ensure the job is completed correctly. Setting minAvailable to 4 means that 1 mpimaster and 3 mpiworkers are required to run.

                                                                                  apiVersion: batch.volcano.sh/v1alpha1\nkind: Job\nmetadata:\n  name: lm-mpi-job\n  labels:\n    \"volcano.sh/job-type\": \"MPI\"\nspec:\n  minAvailable: 4\n  schedulerName: volcano\n  plugins:\n    ssh: []\n    svc: []\n  policies:\n    - event: PodEvicted\n      action: RestartJob\n  tasks:\n    - replicas: 1\n      name: mpimaster\n      policies:\n        - event: TaskCompleted\n          action: CompleteJob\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  MPI_HOST=`cat /etc/volcano/mpiworker.host | tr \"\\n\" \",\"`;\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd;\n                  mpiexec --allow-run-as-root --host ${MPI_HOST} -np 3 mpi_hello_world;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpimaster\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"500m\"\n                limits:\n                  cpu: \"500m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n    - replicas: 3\n      name: mpiworker\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd -D;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpiworker\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"1000m\"\n                limits:\n                  cpu: \"1000m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n

                                                                                  Generate the resources for PodGroup:

                                                                                  apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  annotations:\n  creationTimestamp: \"2024-05-28T09:18:50Z\"\n  generation: 5\n  labels:\n    volcano.sh/job-type: MPI\n  name: lm-mpi-job-9c571015-37c7-4a1a-9604-eaa2248613f2\n  namespace: default\n  ownerReferences:\n  - apiVersion: batch.volcano.sh/v1alpha1\n    blockOwnerDeletion: true\n    controller: true\n    kind: Job\n    name: lm-mpi-job\n    uid: 9c571015-37c7-4a1a-9604-eaa2248613f2\n  resourceVersion: \"25173454\"\n  uid: 7b04632e-7cff-4884-8e9a-035b7649d33b\nspec:\n  minMember: 4\n  minResources:\n    count/pods: \"4\"\n    cpu: 3500m\n    limits.cpu: 3500m\n    pods: \"4\"\n    requests.cpu: 3500m\n  minTaskMember:\n    mpimaster: 1\n    mpiworker: 3\n  queue: default\nstatus:\n  conditions:\n  - lastTransitionTime: \"2024-05-28T09:19:01Z\"\n    message: '3/4 tasks in gang unschedulable: pod group is not ready, 1 Succeeded,\n      3 Releasing, 4 minAvailable'\n    reason: NotEnoughResources\n    status: \"True\"\n    transitionID: f875efa5-0358-4363-9300-06cebc0e7466\n    type: Unschedulable\n  - lastTransitionTime: \"2024-05-28T09:18:53Z\"\n    reason: tasks in gang are ready to be scheduled\n    status: \"True\"\n    transitionID: 5a7708c8-7d42-4c33-9d97-0581f7c06dab\n    type: Scheduled\n  phase: Pending\n  succeeded: 1\n

                                                                                  From the PodGroup, it can be seen that it is associated with the workload through ownerReferences and sets the minimum number of running Pods to 4.

                                                                                  "},{"location":"en/admin/kpanda/gpu/volcano/volcano_user_guide.html","title":"Use Volcano for AI Compute","text":""},{"location":"en/admin/kpanda/gpu/volcano/volcano_user_guide.html#usage-scenarios","title":"Usage Scenarios","text":"

                                                                                  Kubernetes has become the de facto standard for orchestrating and managing cloud-native applications, and an increasing number of applications are choosing to migrate to K8s. The fields of artificial intelligence and machine learning inherently involve a large number of compute-intensive tasks, and developers are very willing to build AI platforms based on Kubernetes to fully leverage its resource management, application orchestration, and operations monitoring capabilities. However, the default Kubernetes scheduler was initially designed primarily for long-running services and has many shortcomings in batch and elastic scheduling for AI and big data tasks. For example, resource contention issues:

                                                                                  Take TensorFlow job scenarios as an example. TensorFlow jobs include two different roles, PS and Worker, and the Pods for these two roles need to work together to complete the entire job. If only one type of role Pod is running, the entire job cannot be executed properly. The default scheduler schedules Pods one by one and is unaware of the PS and Worker roles in a Kubeflow TFJob. In a high-load cluster (insufficient resources), multiple jobs may each be allocated some resources to run a portion of their Pods, but the jobs cannot complete successfully, leading to resource waste. For instance, if a cluster has 4 GPUs and both TFJob1 and TFJob2 each have 4 Workers, TFJob1 and TFJob2 might each be allocated 2 GPUs. However, both TFJob1 and TFJob2 require 4 GPUs to run. This mutual waiting for resource release creates a deadlock situation, resulting in GPU resource waste.

                                                                                  "},{"location":"en/admin/kpanda/gpu/volcano/volcano_user_guide.html#volcano-batch-scheduling-system","title":"Volcano Batch Scheduling System","text":"

                                                                                  Volcano is the first Kubernetes-based container batch computing platform under CNCF, focusing on high-performance computing scenarios. It fills in the missing functionalities of Kubernetes in fields such as machine learning, big data, and scientific computing, providing essential support for these high-performance workloads. Additionally, Volcano seamlessly integrates with mainstream computing frameworks like Spark, TensorFlow, and PyTorch, and supports hybrid scheduling of heterogeneous devices, including CPUs and GPUs, effectively resolving the deadlock issues mentioned above.

                                                                                  The following sections will introduce how to install and use Volcano.

                                                                                  "},{"location":"en/admin/kpanda/gpu/volcano/volcano_user_guide.html#install-volcano","title":"Install Volcano","text":"
                                                                                  1. Find Volcano in Cluster Details -> Helm Apps -> Helm Charts and install it.

                                                                                  2. Check and confirm whether Volcano is installed successfully, that is, whether the components volcano-admission, volcano-controllers, and volcano-scheduler are running properly.

                                                                                  Typically, Volcano is used in conjunction with the AI Lab to achieve an effective closed-loop process for the development and training of datasets, Notebooks, and task training.

                                                                                  "},{"location":"en/admin/kpanda/gpu/volcano/volcano_user_guide.html#volcano-use-cases","title":"Volcano Use Cases","text":"
                                                                                  • Volcano is a standalone scheduler. To enable the Volcano scheduler when creating workloads, simply specify the scheduler's name (schedulerName: volcano).
                                                                                  • The volcanoJob resource is an extension of the Job in Volcano, breaking the Job down into smaller working units called tasks, which can interact with each other.
                                                                                  "},{"location":"en/admin/kpanda/gpu/volcano/volcano_user_guide.html#volcano-supports-tensorflow","title":"Volcano Supports TensorFlow","text":"

                                                                                  Here is an example:

                                                                                  apiVersion: batch.volcano.sh/v1alpha1\nkind: Job\nmetadata:\n  name: tensorflow-benchmark\n  labels:\n    \"volcano.sh/job-type\": \"Tensorflow\"\nspec:\n  minAvailable: 3\n  schedulerName: volcano\n  plugins:\n    env: []\n    svc: []\n  policies:\n    - event: PodEvicted\n      action: RestartJob\n  tasks:\n    - replicas: 1\n      name: ps\n      template:\n        spec:\n          imagePullSecrets:\n            - name: default-secret\n          containers:\n            - command:\n                - sh\n                - -c\n                - |\n                  PS_HOST=`cat /etc/volcano/ps.host | sed 's/$/&:2222/g' | tr \"\\n\" \",\"`;\n                  WORKER_HOST=`cat /etc/volcano/worker.host | sed 's/$/&:2222/g' | tr \"\\n\" \",\"`;\n                  python tf_cnn_benchmarks.py --batch_size=32 --model=resnet50 --variable_update=parameter_server --flush_stdout=true --num_gpus=1 --local_parameter_device=cpu --device=cpu --data_format=NHWC --job_name=ps --task_index=${VK_TASK_INDEX} --ps_hosts=${PS_HOST} --worker_hosts=${WORKER_HOST}\n              image: docker.m.daocloud.io/volcanosh/example-tf:0.0.1\n              name: tensorflow\n              ports:\n                - containerPort: 2222\n                  name: tfjob-port\n              resources:\n                requests:\n                  cpu: \"1000m\"\n                  memory: \"2048Mi\"\n                limits:\n                  cpu: \"1000m\"\n                  memory: \"2048Mi\"\n              workingDir: /opt/tf-benchmarks/scripts/tf_cnn_benchmarks\n          restartPolicy: OnFailure\n    - replicas: 2\n      name: worker\n      policies:\n        - event: TaskCompleted\n          action: CompleteJob\n      template:\n        spec:\n          imagePullSecrets:\n            - name: default-secret\n          containers:\n            - command:\n                - sh\n                - -c\n                - |\n                  PS_HOST=`cat /etc/volcano/ps.host | sed 's/$/&:2222/g' | tr \"\\n\" \",\"`;\n                  WORKER_HOST=`cat /etc/volcano/worker.host | sed 's/$/&:2222/g' | tr \"\\n\" \",\"`;\n                  python tf_cnn_benchmarks.py --batch_size=32 --model=resnet50 --variable_update=parameter_server --flush_stdout=true --num_gpus=1 --local_parameter_device=cpu --device=cpu --data_format=NHWC --job_name=worker --task_index=${VK_TASK_INDEX} --ps_hosts=${PS_HOST} --worker_hosts=${WORKER_HOST}\n              image: docker.m.daocloud.io/volcanosh/example-tf:0.0.1\n              name: tensorflow\n              ports:\n                - containerPort: 2222\n                  name: tfjob-port\n              resources:\n                requests:\n                  cpu: \"2000m\"\n                  memory: \"2048Mi\"\n                limits:\n                  cpu: \"2000m\"\n                  memory: \"4096Mi\"\n              workingDir: /opt/tf-benchmarks/scripts/tf_cnn_benchmarks\n          restartPolicy: OnFailure\n
                                                                                  "},{"location":"en/admin/kpanda/gpu/volcano/volcano_user_guide.html#parallel-computing-with-mpi","title":"Parallel Computing with MPI","text":"

                                                                                  In multi-threaded parallel computing communication scenarios under the MPI computing framework, we need to ensure that all Pods are successfully scheduled to guarantee the task's proper completion. Setting minAvailable to 4 indicates that 1 mpimaster and 3 mpiworkers are required to run. By simply setting the schedulerName field value to \"volcano,\" you can enable the Volcano scheduler.

                                                                                  Here is an example:

                                                                                  apiVersion: batch.volcano.sh/v1alpha1\nkind: Job\nmetadata:\n  name: lm-mpi-job\n  labels:\n    \"volcano.sh/job-type\": \"MPI\"\nspec:\n  minAvailable: 4\n  schedulerName: volcano\n  plugins:\n    ssh: []\n    svc: []\n  policies:\n    - event: PodEvicted\n      action: RestartJob\n  tasks:\n    - replicas: 1\n      name: mpimaster\n      policies:\n        - event: TaskCompleted\n          action: CompleteJob\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  MPI_HOST=`cat /etc/volcano/mpiworker.host | tr \"\\n\" \",\"`;\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd;\n                  mpiexec --allow-run-as-root --host ${MPI_HOST} -np 3 mpi_hello_world;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpimaster\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"500m\"\n                limits:\n                  cpu: \"500m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n    - replicas: 3\n      name: mpiworker\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd -D;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpiworker\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"1000m\"\n                limits:\n                  cpu: \"1000m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n

                                                                                  Resources to generate PodGroup:

                                                                                  apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  annotations:\n  creationTimestamp: \"2024-05-28T09:18:50Z\"\n  generation: 5\n  labels:\n    volcano.sh/job-type: MPI\n  name: lm-mpi-job-9c571015-37c7-4a1a-9604-eaa2248613f2\n  namespace: default\n  ownerReferences:\n  - apiVersion: batch.volcano.sh/v1alpha1\n    blockOwnerDeletion: true\n    controller: true\n    kind: Job\n    name: lm-mpi-job\n    uid: 9c571015-37c7-4a1a-9604-eaa2248613f2\n  resourceVersion: \"25173454\"\n  uid: 7b04632e-7cff-4884-8e9a-035b7649d33b\nspec:\n  minMember: 4\n  minResources:\n    count/pods: \"4\"\n    cpu: 3500m\n    limits.cpu: 3500m\n    pods: \"4\"\n    requests.cpu: 3500m\n  minTaskMember:\n    mpimaster: 1\n    mpiworker: 3\n  queue: default\nstatus:\n  conditions:\n  - lastTransitionTime: \"2024-05-28T09:19:01Z\"\n    message: '3/4 tasks in gang unschedulable: pod group is not ready, 1 Succeeded,\n      3 Releasing, 4 minAvailable'\n    reason: NotEnoughResources\n    status: \"True\"\n    transitionID: f875efa5-0358-4363-9300-06cebc0e7466\n    type: Unschedulable\n  - lastTransitionTime: \"2024-05-28T09:18:53Z\"\n    reason: tasks in gang are ready to be scheduled\n    status: \"True\"\n    transitionID: 5a7708c8-7d42-4c33-9d97-0581f7c06dab\n    type: Scheduled\n  phase: Pending\n  succeeded: 1\n

                                                                                  From the PodGroup, it can be seen that it is associated with the workload through ownerReferences and sets the minimum number of running Pods to 4.

                                                                                  If you want to learn more about the features and usage scenarios of Volcano, refer to Volcano Introduction.

                                                                                  "},{"location":"en/admin/kpanda/helm/index.html","title":"Helm Charts","text":"

                                                                                  Helm is a package management tool for Kubernetes, which makes it easy for users to quickly discover, share and use applications built with Kubernetes. The Container Management Module provides hundreds of Helm charts, covering storage, network, monitoring, database and other main cases. With these templates, you can quickly deploy and easily manage Helm apps through the UI interface. In addition, it supports adding more personalized templates through Add Helm repository to meet various needs.

                                                                                  Key Concepts:

                                                                                  There are a few key concepts to understand when using Helm:

                                                                                  • Chart: A Helm installation package, which contains the images, dependencies, and resource definitions required to run an application, and may also contain service definitions in the Kubernetes cluster, similar to the formula in Homebrew, dpkg in APT, or rpm files in Yum. Charts are called Helm Charts in AI platform.

                                                                                  • Release: A Chart instance running on the Kubernetes cluster. A Chart can be installed multiple times in the same cluster, and each installation will create a new Release. Release is called Helm Apps in AI platform.

                                                                                  • Repository: A repository for publishing and storing Charts. Repository is called Helm Repositories in AI platform.

                                                                                  For more details, refer to Helm official website.

                                                                                  Related operations:

                                                                                  • Manage Helm apps, including installing, updating, uninstalling Helm apps, viewing Helm operation records, etc.
                                                                                  • Manage Helm repository, including installing, updating, deleting Helm repository, etc.
                                                                                  "},{"location":"en/admin/kpanda/helm/Import-addon.html","title":"Import Custom Helm Apps into Built-in Addons","text":"

                                                                                  This article explains how to import Helm appss into the system's built-in addons in both offline and online environments.

                                                                                  "},{"location":"en/admin/kpanda/helm/Import-addon.html#offline-environment","title":"Offline Environment","text":"

                                                                                  An offline environment refers to an environment that cannot connect to the internet or is a closed private network environment.

                                                                                  "},{"location":"en/admin/kpanda/helm/Import-addon.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • charts-syncer is available and running. If not, you can click here to download.
                                                                                  • The Helm Chart has been adapted for charts-syncer. This means adding a .relok8s-images.yaml file to the Helm Chart. This file should include all the images used in the Chart, including any images that are not directly used in the Chart but are used similar to images used in an Operator.

                                                                                  Note

                                                                                  • Refer to image-hints-file for instructions on how to write a Chart. It is required to separate the registry and repository of the image because the registry/repository needs to be replaced or modified when loading the image.
                                                                                  • The installer's fire cluster has charts-syncer installed. If you are importing a custom Helm apps into the installer's fire cluster, you can skip the download and proceed to the adaptation. If charts-syncer binary is not installed, you can download it immediately.
                                                                                  "},{"location":"en/admin/kpanda/helm/Import-addon.html#sync-helm-chart","title":"Sync Helm Chart","text":"
                                                                                  1. Go to Container Management -> Helm Apps -> Helm Repositories , search for the addon, and obtain the built-in repository address and username/password (the default username/password for the system's built-in repository is rootuser/rootpass123).

                                                                                  2. Sync the Helm Chart to the built-in repository addon of the container management system

                                                                                    • Write the following configuration file, modify it according to your specific configuration, and save it as sync-dao-2048.yaml .

                                                                                      source:  # helm charts source information\n  repo:\n    kind: HARBOR # It can also be any other supported Helm Chart repository type, such as CHARTMUSEUM\n    url: https://release-ci.daocloud.io/chartrepo/community #  Change to the chart repo URL\n    #auth: # username/password, if no password is set, leave it blank\n      #username: \"admin\"\n      #password: \"Harbor12345\"\ncharts:  # charts to sync\n  - name: dao-2048 # helm charts information, if not specified, sync all charts in the source helm repo\n    versions:\n      - 1.4.1\ntarget:  # helm charts target information\n  containerRegistry: 10.5.14.40 # image repository URL\n  repo:\n    kind: CHARTMUSEUM # It can also be any other supported Helm Chart repository type, such as HARBOR\n    url: http://10.5.14.40:8081 #  Change to the correct chart repo URL, you can verify the address by using helm repo add $HELM-REPO\n    auth: # username/password, if no password is set, leave it blank\n      username: \"rootuser\"\n      password: \"rootpass123\"\n  containers:\n    # kind: HARBOR # If the image repository is HARBOR and you want charts-syncer to automatically create an image repository, fill in this field\n    # auth: # username/password, if no password is set, leave it blank\n      # username: \"admin\"\n      # password: \"Harbor12345\"\n\n# leverage .relok8s-images.yaml file inside the Charts to move the container images too\nrelocateContainerImages: true\n
                                                                                    • Run the charts-syncer command to sync the Chart and its included images

                                                                                      charts-syncer sync --config sync-dao-2048.yaml --insecure --auto-create-repository\n

                                                                                      The expected output is:

                                                                                      I1222 15:01:47.119777    8743 sync.go:45] Using config file: \"examples/sync-dao-2048.yaml\"\nW1222 15:01:47.234238    8743 syncer.go:263] Ignoring skipDependencies option as dependency sync is not supported if container image relocation is true or syncing from/to intermediate directory\nI1222 15:01:47.234685    8743 sync.go:58] There is 1 chart out of sync!\nI1222 15:01:47.234706    8743 sync.go:66] Syncing \"dao-2048_1.4.1\" chart...\n.relok8s-images.yaml hints file found\nComputing relocation...\n\nRelocating dao-2048@1.4.1...\nPushing 10.5.14.40/daocloud/dao-2048:v1.4.1...\nDone\nDone moving /var/folders/vm/08vw0t3j68z9z_4lcqyhg8nm0000gn/T/charts-syncer869598676/dao-2048-1.4.1.tgz\n
                                                                                  3. Once the previous step is completed, go to Container Management -> Helm Apps -> Helm Repositories , find the proper addon, click Sync Repository in the action column, and you will see the uploaded Helm apps in the Helm template.

                                                                                  4. You can then proceed with normal installation, upgrade, and uninstallation.

                                                                                  "},{"location":"en/admin/kpanda/helm/Import-addon.html#online-environment","title":"Online Environment","text":"

                                                                                  The Helm Repo address for the online environment is release.daocloud.io . If the user does not have permission to add Helm Repo, they will not be able to import custom Helm appss into the system's built-in addons. You can add your own Helm repository and then integrate your Helm repository into the platform using the same steps as syncing Helm Chart in the offline environment.

                                                                                  "},{"location":"en/admin/kpanda/helm/helm-app.html","title":"Manage Helm Apps","text":"

                                                                                  The container management module supports interface-based management of Helm, including creating Helm instances using Helm charts, customizing Helm instance arguments, and managing the full lifecycle of Helm instances.

                                                                                  This section will take cert-manager as an example to introduce how to create and manage Helm apps through the container management interface.

                                                                                  "},{"location":"en/admin/kpanda/helm/helm-app.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                                                                  • Created a namespace, user, and granted NS Admin or higher permissions to the user. For details, refer to Namespace Authorization.

                                                                                  "},{"location":"en/admin/kpanda/helm/helm-app.html#install-the-helm-app","title":"Install the Helm app","text":"

                                                                                  Follow the steps below to install the Helm app.

                                                                                  1. Click a cluster name to enter Cluster Details .

                                                                                  2. In the left navigation bar, click Helm Apps -> Helm Chart to enter the Helm chart page.

                                                                                    On the Helm chart page, select the Helm repository named addon , and all the Helm chart templates under the addon repository will be displayed on the interface. Click the Chart named cert-manager .

                                                                                  3. On the installation page, you can see the relevant detailed information of the Chart, select the version to be installed in the upper right corner of the interface, and click the Install button. Here select v1.9.1 version for installation.

                                                                                  4. Configure Name , Namespace and Version Information . You can also customize arguments by modifying YAML in the argument Configuration area below. Click OK .

                                                                                  5. The system will automatically return to the list of Helm apps, and the status of the newly created Helm app is Installing , and the status will change to Running after a period of time.

                                                                                  "},{"location":"en/admin/kpanda/helm/helm-app.html#update-the-helm-app","title":"Update the Helm app","text":"

                                                                                  After we have completed the installation of a Helm app through the interface, we can perform an update operation on the Helm app. Note: Update operations using the UI are only supported for Helm apps installed via the UI.

                                                                                  Follow the steps below to update the Helm app.

                                                                                  1. Click a cluster name to enter Cluster Details .

                                                                                  2. In the left navigation bar, click Helm Apps to enter the Helm app list page.

                                                                                    On the Helm app list page, select the Helm app that needs to be updated, click the __ ...__ operation button on the right side of the list, and select the Update operation in the drop-down selection.

                                                                                  3. After clicking the Update button, the system will jump to the update interface, where you can update the Helm app as needed. Here we take updating the http port of the dao-2048 application as an example.

                                                                                  4. After modifying the proper arguments. You can click the Change button under the argument configuration to compare the files before and after the modification. After confirming that there is no error, click the OK button at the bottom to complete the update of the Helm app.

                                                                                  5. The system will automatically return to the Helm app list, and a pop-up window in the upper right corner will prompt update successful .

                                                                                  "},{"location":"en/admin/kpanda/helm/helm-app.html#view-helm-operation-records","title":"View Helm operation records","text":"

                                                                                  Every installation, update, and deletion of Helm apps has detailed operation records and logs for viewing.

                                                                                  1. In the left navigation bar, click Cluster Operations -> Recent Operations , and then select the Helm Operations tab at the top of the page. Each record corresponds to an install/update/delete operation.

                                                                                  2. To view the detailed log of each operation: Click \u2507 on the right side of the list, and select Log from the pop-up menu.

                                                                                  3. At this point, the detailed operation log will be displayed in the form of console at the bottom of the page.

                                                                                  "},{"location":"en/admin/kpanda/helm/helm-app.html#delete-the-helm-app","title":"Delete the Helm app","text":"

                                                                                  Follow the steps below to delete the Helm app.

                                                                                  1. Find the cluster where the Helm app to be deleted resides, click the cluster name, and enter Cluster Details .

                                                                                  2. In the left navigation bar, click Helm Apps to enter the Helm app list page.

                                                                                    On the Helm app list page, select the Helm app you want to delete, click the __ ...__ operation button on the right side of the list, and select Delete from the drop-down selection.

                                                                                  3. Enter the name of the Helm app in the pop-up window to confirm, and then click the Delete button.

                                                                                  "},{"location":"en/admin/kpanda/helm/helm-repo.html","title":"Manage Helm Repository","text":"

                                                                                  The Helm repository is a repository for storing and publishing Charts. The Helm App module supports HTTP(s) protocol to access Chart packages in the repository. By default, the system has 4 built-in helm repos as shown in the table below to meet common needs in the production process of enterprises.

                                                                                  Repository Description Example partner Various high-quality features provided by ecological partners Chart tidb system Chart that must be relied upon by system core functional components and some advanced features. For example, insight-agent must be installed to obtain cluster monitoring information Insight addon Common Chart in business cases cert-manager community The most popular open source components in the Kubernetes community Chart Istio

                                                                                  In addition to the above preset repositories, you can also add third-party Helm repositories yourself. This page will introduce how to add and update third-party Helm repositories.

                                                                                  "},{"location":"en/admin/kpanda/helm/helm-repo.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                                                                  • Created a namespace, user, and granted NS Admin or higher permissions to the user. For details, refer to Namespace Authorization.

                                                                                  • If using a private repository, you should have read and write permissions to the repository.

                                                                                  "},{"location":"en/admin/kpanda/helm/helm-repo.html#introduce-third-party-helm-repository","title":"Introduce third-party Helm repository","text":"

                                                                                  The following takes the public container repository of Kubevela as an example to introduce and manage the helm repo.

                                                                                  1. Find the cluster that needs to be imported into the third-party helm repo, click the cluster name, and enter cluster details.

                                                                                  2. In the left navigation bar, click Helm Apps -> Helm Repositories to enter the helm repo page.

                                                                                  3. Click the Create Repository button on the helm repo page to enter the Create repository page, and configure relevant arguments according to the table below.

                                                                                    • Repository Name: Set the repository name. It can be up to 63 characters long and may only include lowercase letters, numbers, and separators -. It must start and end with a lowercase letter or number, for example, kubevela.
                                                                                    • Repository URL: The HTTP(S) address pointing to the target Helm repository. For example, https://charts.kubevela.net/core.
                                                                                    • Skip TLS Verification: If the added Helm repository uses an HTTPS address and requires skipping TLS verification, you can check this option. The default is unchecked.
                                                                                    • Authentication Method: The method used for identity verification after connecting to the repository URL. For public repositories, you can select None. For private repositories, you need to enter a username/password for identity verification.
                                                                                    • Labels: Add labels to this Helm repository. For example, key: repo4; value: Kubevela.
                                                                                    • Annotations: Add annotations to this Helm repository. For example, key: repo4; value: Kubevela.
                                                                                    • Description: Add a description for this Helm repository. For example: This is a Kubevela public Helm repository.

                                                                                  4. Click OK to complete the creation of the Helm repository. The page will automatically jump to the list of Helm repositories.

                                                                                  "},{"location":"en/admin/kpanda/helm/helm-repo.html#update-the-helm-repository","title":"Update the Helm repository","text":"

                                                                                  When the address information of the helm repo changes, the address, authentication method, label, annotation, and description information of the helm repo can be updated.

                                                                                  1. Find the cluster where the repository to be updated is located, click the cluster name, and enter cluster details .

                                                                                  2. In the left navigation bar, click Helm Apps -> Helm Repositories to enter the helm repo list page.

                                                                                  3. Find the Helm repository that needs to be updated on the repository list page, click the \u2507 button on the right side of the list, and click Update in the pop-up menu.

                                                                                  4. Update on the Update Helm Repository page, and click OK when finished.

                                                                                  5. Return to the helm repo list, and the screen prompts that the update is successful.

                                                                                  "},{"location":"en/admin/kpanda/helm/helm-repo.html#delete-the-helm-repository","title":"Delete the Helm repository","text":"

                                                                                  In addition to importing and updating repositorys, you can also delete unnecessary repositories, including system preset repositories and third-party repositories.

                                                                                  1. Find the cluster where the repository to be deleted is located, click the cluster name, and enter cluster details .

                                                                                  2. In the left navigation bar, click Helm Apps -> Helm Repositories to enter the helm repo list page.

                                                                                  3. Find the Helm repository that needs to be updated on the repository list page, click the \u2507 button on the right side of the list, and click Delete in the pop-up menu.

                                                                                  4. Enter the repository name to confirm, and click Delete .

                                                                                  5. Return to the list of Helm repositories, and the screen prompts that the deletion is successful.

                                                                                  "},{"location":"en/admin/kpanda/helm/multi-archi-helm.html","title":"Import and Upgrade Multi-Arch Helm Apps","text":"

                                                                                  In a multi-arch cluster, it is common to use Helm charts that support multiple architectures to address deployment issues caused by architectural differences. This guide will explain how to integrate single-arch Helm apps into multi-arch deployments and how to integrate multi-arch Helm apps.

                                                                                  "},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#import","title":"Import","text":""},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#import-single-arch","title":"Import Single-arch","text":"

                                                                                  Prepare the offline package addon-offline-full-package-${version}-${arch}.tar.gz.

                                                                                  Specify the path in the clusterConfig.yml configuration file, for example:

                                                                                  addonPackage:\n  path: \"/home/addon-offline-full-package-v0.9.0-amd64.tar.gz\"\n

                                                                                  Then run the import command:

                                                                                  ~/dce5-installer cluster-create -c /home/dce5/sample/clusterConfig.yaml -m /home/dce5/sample/manifest.yaml -d -j13\n
                                                                                  "},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#integrate-multi-arch","title":"Integrate Multi-arch","text":"

                                                                                  Prepare the offline package addon-offline-full-package-${version}-${arch}.tar.gz.

                                                                                  Take addon-offline-full-package-v0.9.0-arm64.tar.gz as an example and run the import command:

                                                                                  ~/dce5-installer import-addon -c /home/dce5/sample/clusterConfig.yaml --addon-path=/home/addon-offline-full-package-v0.9.0-arm64.tar.gz\n
                                                                                  "},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#upgrade","title":"Upgrade","text":""},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#upgrade-single-arch","title":"Upgrade Single-arch","text":"

                                                                                  Prepare the offline package addon-offline-full-package-${version}-${arch}.tar.gz.

                                                                                  Specify the path in the clusterConfig.yml configuration file, for example:

                                                                                  addonPackage:\n  path: \"/home/addon-offline-full-package-v0.11.0-amd64.tar.gz\"\n

                                                                                  Then run the import command:

                                                                                  ~/dce5-installer cluster-create -c /home/dce5/sample/clusterConfig.yaml -m /home/dce5/sample/manifest.yaml -d -j13\n
                                                                                  "},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#multi-arch-integration","title":"Multi-arch Integration","text":"

                                                                                  Prepare the offline package addon-offline-full-package-${version}-${arch}.tar.gz.

                                                                                  Take addon-offline-full-package-v0.11.0-arm64.tar.gz as an example and run the import command:

                                                                                  ~/dce5-installer import-addon -c /home/dce5/sample/clusterConfig.yaml --addon-path=/home/addon-offline-full-package-v0.11.0-arm64.tar.gz\n
                                                                                  "},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#notes","title":"Notes","text":""},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#disk-space","title":"Disk Space","text":"

                                                                                  The offline package is quite large and requires sufficient space for decompression and loading of images. Otherwise, it may interrupt the process with a \"no space left\" error.

                                                                                  "},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#retry-after-failure","title":"Retry after Failure","text":"

                                                                                  If the multi-arch fusion step fails, you need to clean up the residue before retrying:

                                                                                  rm -rf addon-offline-target-package\n
                                                                                  "},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#registry-space","title":"Registry Space","text":"

                                                                                  If the offline package for fusion contains registry spaces that are inconsistent with the imported offline package, an error may occur during the fusion process due to the non-existence of the registry spaces:

                                                                                  Solution: Simply create the registry space before the fusion. For example, in the above error, creating the registry space \"localhost\" in advance can prevent the error.

                                                                                  "},{"location":"en/admin/kpanda/helm/multi-archi-helm.html#architecture-conflict","title":"Architecture Conflict","text":"

                                                                                  When upgrading to a version lower than 0.12.0 of the addon, the charts-syncer in the target offline package does not check the existence of the image before pushing, so it will recombine the multi-arch into a single architecture during the upgrade process. For example, if the addon is implemented as a multi-arch in v0.10, upgrading to v0.11 will overwrite the multi-arch addon with a single architecture. However, upgrading to v0.12.0 or above can still maintain the multi-arch.

                                                                                  "},{"location":"en/admin/kpanda/helm/upload-helm.html","title":"Upload Helm Charts","text":"

                                                                                  This article explains how to upload Helm charts. See the steps below.

                                                                                  1. Add a Helm repository, refer to Adding a Third-Party Helm Repository for the procedure.

                                                                                  2. Upload the Helm Chart to the Helm repository.

                                                                                    Upload with ClientUpload with Web Page

                                                                                    Note

                                                                                    This method is suitable for Harbor, ChartMuseum, JFrog type repositories.

                                                                                    1. Log in to a node that can access the Helm repository, upload the Helm binary to the node, and install the cm-push plugin (VPN is needed and Git should be installed in advance).

                                                                                      Refer to the plugin installation process.

                                                                                    2. Push the Helm Chart to the Helm repository by executing the following command:

                                                                                      helm cm-push ${charts-dir} ${HELM_REPO_URL} --username ${username} --password ${password}\n

                                                                                      Argument descriptions:

                                                                                      • charts-dir: The directory of the Helm Chart, or the packaged Chart (i.e., .tgz file).
                                                                                      • HELM_REPO_URL: The URL of the Helm repository.
                                                                                      • username/password: The username and password for the Helm repository with push permissions.
                                                                                      • If you want to access via HTTPS and skip the certificate verification, you can add the argument --insecure.

                                                                                    Note

                                                                                    This method is only applicable to Harbor repositories.

                                                                                    1. Log into the Harbor repository, ensuring the logged-in user has permissions to push;

                                                                                    2. Go to the relevant project, select the Helm Charts tab, click the Upload button on the page to upload the Helm Chart.

                                                                                  3. Sync Remote Repository Data

                                                                                    Manual SyncAuto Sync

                                                                                    By default, the cluster does not enable Helm Repository Auto-Refresh, so you need to perform a manual sync operation. The general steps are:

                                                                                    Go to Helm Apps -> Helm Repositories, click the \u2507 button on the right side of the repository list, and select Sync Repository to complete the repository data synchronization.

                                                                                    If you need to enable the Helm repository auto-sync feature, you can go to Cluster Maintenance -> Cluster Settings -> Advanced Settings and turn on the Helm repository auto-refresh switch.

                                                                                  "},{"location":"en/admin/kpanda/inspect/index.html","title":"Cluster Inspection","text":"

                                                                                  Cluster inspection allows administrators to regularly or ad-hoc check the overall health of the cluster, giving them proactive control over ensuring cluster security. With a well-planned inspection schedule, this proactive cluster check allows administrators to monitor the cluster status at any time and address potential issues in advance. It eliminates the previous dilemma of passive troubleshooting during failures, enabling proactive monitoring and prevention.

                                                                                  The cluster inspection feature provided by AI platform's container management module supports custom inspection items at the cluster, node, and pod levels. After the inspection is completed, it automatically generates visual inspection reports.

                                                                                  • Cluster Level: Checks the running status of system components in the cluster, including cluster status, resource usage, and specific inspection items for control nodes, such as the status of kube-apiserver and etcd .
                                                                                  • Node Level: Includes common inspection items for both control nodes and worker nodes, such as node resource usage, handle counts, PID status, and network status.
                                                                                  • pod Level: Checks the CPU and memory usage, running status of pods, and the status of PV (Persistent Volume) and PVC (PersistentVolumeClaim).

                                                                                  For information on security inspections or executing security-related inspections, refer to the supported security scan types in AI platform.

                                                                                  "},{"location":"en/admin/kpanda/inspect/config.html","title":"Creating Inspection Configuration","text":"

                                                                                  AI platform Container Management module provides cluster inspection functionality, which supports inspection at the cluster, node, and pod levels.

                                                                                  • Cluster level: Check the running status of system components in the cluster, including cluster status, resource usage, and specific inspection items for control nodes such as kube-apiserver and etcd .
                                                                                  • Node level: Includes common inspection items for both control nodes and worker nodes, such as node resource usage, handle count, PID status, and network status.
                                                                                  • Pod level: Check the CPU and memory usage, running status, PV and PVC status of Pods.

                                                                                  Here's how to create an inspection configuration.

                                                                                  1. Click Cluster Inspection in the left navigation bar.

                                                                                  2. On the right side of the page, click Inspection Configuration .

                                                                                  3. Fill in the inspection configuration based on the following instructions, then click OK at the bottom of the page.

                                                                                    • Cluster: Select the clusters that you want to inspect from the dropdown list. If you select multiple clusters, multiple inspection configurations will be automatically generated (only the inspected clusters are inconsistent, all other configurations are identical).
                                                                                    • Scheduled Inspection: When enabled, it allows for regular automatic execution of cluster inspections based on a pre-set inspection frequency.
                                                                                    • Inspection Frequency: Set the interval for automatic inspections, e.g., every Tuesday at 10 AM. It supports custom CronExpressios, refer to Cron Schedule Syntax for more information.
                                                                                    • Number of Inspection Records to Retain: Specifies the maximum number of inspection records to be retained, including all inspection records for each cluster.
                                                                                    • Parameter Configuration: The parameter configuration is divided into three parts: cluster level, node level, and pod level. You can enable or disable specific inspection items based on your requirements.

                                                                                  After creating the inspection configuration, it will be automatically displayed in the inspection configuration list. Click the more options button on the right of the configuration to immediately perform an inspection, modify the inspection configuration or delete the inspection configuration and reports.

                                                                                  • Click Inspection to perform an inspection once based on the configuration.
                                                                                  • Click Inspection Configuration to modify the inspection configuration.
                                                                                  • Click Delete to delete the inspection configuration and reports.

                                                                                  Note

                                                                                  • After creating the inspection configuration, if the Scheduled Inspection configuration is enabled, inspections will be automatically executed at the specified time.
                                                                                  • If Scheduled Inspection configuration is not enabled, you need to manually trigger the inspection.
                                                                                  "},{"location":"en/admin/kpanda/inspect/inspect.html","title":"Start Cluster Inspection","text":"

                                                                                  After creating an inspection configuration, if the Scheduled Inspection configuration is enabled, inspections will be automatically executed at the specified time. If the Scheduled Inspection configuration is not enabled, you need to manually trigger the inspection.

                                                                                  This page explains how to manually perform a cluster inspection.

                                                                                  "},{"location":"en/admin/kpanda/inspect/inspect.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • Integrate or create a cluster in the Container Management module.
                                                                                  • Create an inspection configuration.
                                                                                  • The selected cluster is in the Running state and the insight component has been installed in the cluster.
                                                                                  "},{"location":"en/admin/kpanda/inspect/inspect.html#steps","title":"Steps","text":"

                                                                                  When performing an inspection, you can choose to inspect multiple clusters in batches or perform a separate inspection for a specific cluster.

                                                                                  Batch InspectionIndividual Inspection
                                                                                  1. Click Cluster Inspection in the top-level navigation bar of the Container Management module, then click Inspection on the right side of the page.

                                                                                  2. Select the clusters you want to inspect, then click OK at the bottom of the page.

                                                                                    • If you choose to inspect multiple clusters at the same time, the system will perform inspections based on different inspection configurations for each cluster.
                                                                                    • If no inspection configuration is set for a cluster, the system will use the default configuration.

                                                                                  1. Go to the Cluster Inspection page.
                                                                                  2. Click the more options button ( \u2507 ) on the right of the proper inspection configuration, then select Inspection from the popup menu.

                                                                                  "},{"location":"en/admin/kpanda/inspect/report.html","title":"Check Inspection Reports","text":"

                                                                                  After the inspection execution is completed, you can view the inspection records and detailed inspection reports.

                                                                                  "},{"location":"en/admin/kpanda/inspect/report.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • Create an inspection configuration.
                                                                                  • Perform at least one inspection execution.
                                                                                  "},{"location":"en/admin/kpanda/inspect/report.html#steps","title":"Steps","text":"
                                                                                  1. Go to the Cluster Inspection page and click the name of the target inspection cluster.

                                                                                  2. Click the name of the inspection record you want to view.

                                                                                    • Each inspection execution generates an inspection record.
                                                                                    • When the number of inspection records exceeds the maximum retention specified in the inspection configuration, the earliest record will be deleted starting from the execution time.

                                                                                  3. View the detailed information of the inspection, which may include an overview of cluster resources and the running status of system components.

                                                                                    You can download the inspection report or delete the inspection report from the top right corner of the page.

                                                                                  "},{"location":"en/admin/kpanda/namespaces/createns.html","title":"Namespaces","text":"

                                                                                  Namespaces are an abstraction used in Kubernetes for resource isolation. A cluster can contain multiple namespaces with different names, and the resources in each namespace are isolated from each other. For a detailed introduction to namespaces, refer to Namespaces.

                                                                                  This page will introduce the related operations of the namespace.

                                                                                  "},{"location":"en/admin/kpanda/namespaces/createns.html#create-a-namespace","title":"Create a namespace","text":"

                                                                                  Supports easy creation of namespaces through forms, and quick creation of namespaces by writing or importing YAML files.

                                                                                  Note

                                                                                  • Before creating a namespace, you need to Integrate a Kubernetes cluster or Create a Kubernetes cluster in the container management module.
                                                                                  • The default namespace default is usually automatically generated after cluster initialization. But for production clusters, for ease of management, it is recommended to create other namespaces instead of using the default namespace directly.
                                                                                  "},{"location":"en/admin/kpanda/namespaces/createns.html#create-with-form","title":"Create with form","text":"
                                                                                  1. On the cluster list page, click the name of the target cluster.

                                                                                  2. Click Namespace in the left navigation bar, then click the Create button on the right side of the page.

                                                                                  3. Fill in the name of the namespace, configure the workspace and labels (optional), and then click OK.

                                                                                    Info

                                                                                    • After binding a namespace to a workspace, the resources of that namespace will be shared with the bound workspace. For a detailed explanation of workspaces, refer to Workspaces and Hierarchies.

                                                                                    • After the namespace is created, you can still bind/unbind the workspace.

                                                                                  4. Click OK to complete the creation of the namespace. On the right side of the namespace list, click \u2507 to select update, bind/unbind workspace, quota management, delete, and more from the pop-up menu.

                                                                                  "},{"location":"en/admin/kpanda/namespaces/createns.html#create-from-yaml","title":"Create from YAML","text":"
                                                                                  1. On the Clusters page, click the name of the target cluster.

                                                                                  2. Click Namespace in the left navigation bar, then click the YAML Create button on the right side of the page.

                                                                                  3. Enter or paste the prepared YAML content, or directly import an existing YAML file locally.

                                                                                    After entering the YAML content, click Download to save the YAML file locally.

                                                                                  4. Finally, click OK in the lower right corner of the pop-up box.

                                                                                  "},{"location":"en/admin/kpanda/namespaces/exclusive.html","title":"Namespace Exclusive Nodes","text":"

                                                                                  Namespace exclusive nodes in a Kubernetes cluster allow a specific namespace to have exclusive access to one or more node's CPU, memory, and other resources through taints and tolerations. Once exclusive nodes are configured for a specific namespace, applications and services from other namespaces cannot run on the exclusive nodes. Using exclusive nodes allows important applications to have exclusive access to some computing resources, achieving physical isolation from other applications.

                                                                                  Note

                                                                                  Applications and services running on a node before it is set to be an exclusive node will not be affected and will continue to run normally on that node. Only when these Pods are deleted or rebuilt will they be scheduled to other non-exclusive nodes.

                                                                                  "},{"location":"en/admin/kpanda/namespaces/exclusive.html#preparation","title":"Preparation","text":"

                                                                                  Check whether the kube-apiserver of the current cluster has enabled the PodNodeSelector and PodTolerationRestriction admission controllers.

                                                                                  The use of namespace exclusive nodes requires users to enable the PodNodeSelector and PodTolerationRestriction admission controllers on the kube-apiserver. For more information about admission controllers, refer to Kubernetes Admission Controllers Reference.

                                                                                  You can go to any Master node in the current cluster to check whether these two features are enabled in the kube-apiserver.yaml file, or you can execute the following command on the Master node for a quick check:

                                                                                  [root@g-master1 ~]# cat /etc/kubernetes/manifests/kube-apiserver.yaml | grep  enable-admission-plugins\n\n# The expected output is as follows:\n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction\n
                                                                                  "},{"location":"en/admin/kpanda/namespaces/exclusive.html#enable-namespace-exclusive-nodes-on-global-cluster","title":"Enable Namespace Exclusive Nodes on Global Cluster","text":"

                                                                                  Since the Global cluster runs platform basic components such as kpanda, ghippo, and insight, enabling namespace exclusive nodes on Global may cause system components to not be scheduled to the exclusive nodes when they restart, affecting the overall high availability of the system. Therefore, we generally do not recommend users to enable the namespace exclusive node feature on the Global cluster.

                                                                                  If you do need to enable namespace exclusive nodes on the Global cluster, please follow the steps below:

                                                                                  1. Enable the PodNodeSelector and PodTolerationRestriction admission controllers for the kube-apiserver of the Global cluster

                                                                                    Note

                                                                                    If the cluster has already enabled the above two admission controllers, please skip this step and go directly to configure system component tolerations.

                                                                                    Go to any Master node in the current cluster to modify the kube-apiserver.yaml configuration file, or execute the following command on the Master node for configuration:

                                                                                    [root@g-master1 ~]# vi /etc/kubernetes/manifests/kube-apiserver.yaml\n\n# The expected output is as follows:\napiVersion: v1\nkind: Pod\nmetadata:\n    ......\nspec:\ncontainers:\n- command:\n    - kube-apiserver\n    ......\n    - --default-not-ready-toleration-seconds=300\n    - --default-unreachable-toleration-seconds=300\n    - --enable-admission-plugins=NodeRestriction   #List of enabled admission controllers\n    - --enable-aggregator-routing=False\n    - --enable-bootstrap-token-auth=true\n    - --endpoint-reconciler-type=lease\n    - --etcd-cafile=/etc/kubernetes/ssl/etcd/ca.crt\n    ......\n

                                                                                    Find the --enable-admission-plugins parameter and add the PodNodeSelector and PodTolerationRestriction admission controllers (separated by commas). Refer to the following:

                                                                                    # Add __ ,PodNodeSelector,PodTolerationRestriction__ \n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction \n
                                                                                  2. Add toleration annotations to the namespace where the platform components are located

                                                                                    After enabling the admission controllers, you need to add toleration annotations to the namespace where the platform components are located to ensure the high availability of the platform components.

                                                                                    The system component namespaces for AI platform are as follows:

                                                                                    Namespace System Components Included kpanda-system kpanda hwameiStor-system hwameiStor istio-system istio metallb-system metallb cert-manager-system cert-manager contour-system contour kubean-system kubean ghippo-system ghippo kcoral-system kcoral kcollie-system kcollie insight-system insight, insight-agent: ipavo-system ipavo kairship-system kairship karmada-system karmada amamba-system amamba, jenkins skoala-system skoala mspider-system mspider mcamel-system mcamel-rabbitmq, mcamel-elasticsearch, mcamel-mysql, mcamel-redis, mcamel-kafka, mcamel-minio, mcamel-postgresql spidernet-system spidernet kangaroo-system kangaroo gmagpie-system gmagpie dowl-system dowl

                                                                                    Check whether there are the above namespaces in the current cluster, execute the following command, and add the annotation: scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]' for each namespace.

                                                                                    kubectl annotate ns <namespace-name> scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \n\"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\n
                                                                                    Please make sure to replace <namespace-name> with the name of the platform namespace you want to add the annotation to.

                                                                                  3. Use the interface to set exclusive nodes for the namespace

                                                                                    After confirming that the PodNodeSelector and PodTolerationRestriction admission controllers on the cluster API server have been enabled, please follow the steps below to use the AI platform UI management interface to set exclusive nodes for the namespace.

                                                                                    1. Click the cluster name in the cluster list page, then click Namespace in the left navigation bar.

                                                                                    2. Click the namespace name, then click the Exclusive Node tab, and click Add Node on the bottom right.

                                                                                    3. Select which nodes you want to be exclusive to this namespace on the left side of the page. On the right side, you can clear or delete a selected node. Finally, click OK at the bottom.

                                                                                    4. You can view the current exclusive nodes for this namespace in the list. You can choose to Stop Exclusivity on the right side of the node.

                                                                                      After cancelling exclusivity, Pods from other namespaces can also be scheduled to this node.

                                                                                  "},{"location":"en/admin/kpanda/namespaces/exclusive.html#enable-namespace-exclusive-nodes-on-non-global-clusters","title":"Enable Namespace Exclusive Nodes on Non-Global Clusters","text":"

                                                                                  To enable namespace exclusive nodes on non-Global clusters, please follow the steps below:

                                                                                  1. Enable the PodNodeSelector and PodTolerationRestriction admission controllers for the kube-apiserver of the current cluster

                                                                                    Note

                                                                                    If the cluster has already enabled the above two admission controllers, please skip this step and go directly to using the interface to set exclusive nodes for the namespace.

                                                                                    Go to any Master node in the current cluster to modify the kube-apiserver.yaml configuration file, or execute the following command on the Master node for configuration:

                                                                                    [root@g-master1 ~]# vi /etc/kubernetes/manifests/kube-apiserver.yaml\n\n# The expected output is as follows:\napiVersion: v1\nkind: Pod\nmetadata:\n    ......\nspec:\ncontainers:\n- command:\n    - kube-apiserver\n    ......\n    - --default-not-ready-toleration-seconds=300\n    - --default-unreachable-toleration-seconds=300\n    - --enable-admission-plugins=NodeRestriction   #List of enabled admission controllers\n    - --enable-aggregator-routing=False\n    - --enable-bootstrap-token-auth=true\n    - --endpoint-reconciler-type=lease\n    - --etcd-cafile=/etc/kubernetes/ssl/etcd/ca.crt\n    ......\n

                                                                                    Find the --enable-admission-plugins parameter and add the PodNodeSelector and PodTolerationRestriction admission controllers (separated by commas). Refer to the following:

                                                                                    # Add __ ,PodNodeSelector,PodTolerationRestriction__ \n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction \n
                                                                                  2. Use the interface to set exclusive nodes for the namespace

                                                                                    After confirming that the PodNodeSelector and PodTolerationRestriction admission controllers on the cluster API server have been enabled, please follow the steps below to use the AI platform UI management interface to set exclusive nodes for the namespace.

                                                                                    1. Click the cluster name in the cluster list page, then click Namespace in the left navigation bar.

                                                                                    2. Click the namespace name, then click the Exclusive Node tab, and click Add Node on the bottom right.

                                                                                    3. Select which nodes you want to be exclusive to this namespace on the left side of the page. On the right side, you can clear or delete a selected node. Finally, click OK at the bottom.

                                                                                    4. You can view the current exclusive nodes for this namespace in the list. You can choose to Stop Exclusivity on the right side of the node.

                                                                                      After cancelling exclusivity, Pods from other namespaces can also be scheduled to this node.

                                                                                  3. Add toleration annotations to the namespace where the components that need high availability are located (optional)

                                                                                    Execute the following command to add the annotation: scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]' to the namespace where the components that need high availability are located.

                                                                                    kubectl annotate ns <namespace-name> scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \n\"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\n

                                                                                    Please make sure to replace <namespace-name> with the name of the platform namespace you want to add the annotation to.

                                                                                  "},{"location":"en/admin/kpanda/namespaces/podsecurity.html","title":"Pod Security Policy","text":"

                                                                                  Pod security policies in a Kubernetes cluster allow you to control the behavior of Pods in various aspects of security by configuring different levels and modes for specific namespaces. Only Pods that meet certain conditions will be accepted by the system. It sets three levels and three modes, allowing users to choose the most suitable scheme to set restriction policies according to their needs.

                                                                                  Note

                                                                                  Only one security policy can be configured for one security mode. Please be careful when configuring the enforce security mode for a namespace, as violations will prevent Pods from being created.

                                                                                  This section will introduce how to configure Pod security policies for namespaces through the container management interface.

                                                                                  "},{"location":"en/admin/kpanda/namespaces/podsecurity.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • The container management module has integrated a Kubernetes cluster or created a Kubernetes cluster. The cluster version needs to be v1.22 or above, and you should be able to access the cluster's UI interface.

                                                                                  • A namespace has been created, a user has been created, and the user has been granted NS Admin or higher permissions. For details, refer to Namespace Authorization.

                                                                                  "},{"location":"en/admin/kpanda/namespaces/podsecurity.html#configure-pod-security-policies-for-namespace","title":"Configure Pod Security Policies for Namespace","text":"
                                                                                  1. Select the namespace for which you want to configure Pod security policies and go to the details page. Click Configure Policy on the Pod Security Policy page to go to the configuration page.

                                                                                  2. Click Add Policy on the configuration page, and a policy will appear, including security level and security mode. The following is a detailed introduction to the security level and security policy.

                                                                                    Security Level Description Privileged An unrestricted policy that provides the maximum possible range of permissions. This policy allows known privilege elevations. Baseline The least restrictive policy that prohibits known privilege elevations. Allows the use of default (minimum specified) Pod configurations. Restricted A highly restrictive policy that follows current best practices for protecting Pods. Security Mode Description Audit Violations of the specified policy will add new audit events in the audit log, and the Pod can be created. Warn Violations of the specified policy will return user-visible warning information, and the Pod can be created. Enforce Violations of the specified policy will prevent the Pod from being created.

                                                                                  3. Different security levels correspond to different check items. If you don't know how to configure your namespace, you can Policy ConfigMap Explanation at the top right corner of the page to view detailed information.

                                                                                  4. Click Confirm. If the creation is successful, the security policy you configured will appear on the page.

                                                                                  5. Click \u2507 to edit or delete the security policy you configured.

                                                                                  "},{"location":"en/admin/kpanda/network/create-ingress.html","title":"Create an Ingress","text":"

                                                                                  In a Kubernetes cluster, Ingress exposes services from outside the cluster to inside the cluster HTTP and HTTPS ingress. Traffic ingress is controlled by rules defined on the Ingress resource. Here's an example of a simple Ingress that sends all traffic to the same Service:

                                                                                  Ingress is an API object that manages external access to services in the cluster, and the typical access method is HTTP. Ingress can provide load balancing, SSL termination, and name-based virtual hosting.

                                                                                  "},{"location":"en/admin/kpanda/network/create-ingress.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • Container management module connected to Kubernetes cluster or created Kubernetes, and can access the cluster UI interface.
                                                                                  • Completed a namespace creation, user creation, and authorize the user as NS Editor role, for details, refer to Namespace Authorization.
                                                                                  • Completed Create Ingress Instance, Deploy Application Workload, and have created the proper Service
                                                                                  • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.
                                                                                  "},{"location":"en/admin/kpanda/network/create-ingress.html#create-ingress","title":"Create ingress","text":"
                                                                                  1. After successfully logging in as the NS Editor user, click Clusters in the upper left corner to enter the Clusters page. In the list of clusters, click a cluster name.

                                                                                  2. In the left navigation bar, click Container Network -> Ingress to enter the service list, and click the Create Ingress button in the upper right corner.

                                                                                    Note

                                                                                    It is also possible to Create from YAML .

                                                                                  3. Open Create Ingress page to configure. There are two protocol types to choose from, refer to the following two parameter tables for configuration.

                                                                                  "},{"location":"en/admin/kpanda/network/create-ingress.html#create-http-protocol-ingress","title":"Create HTTP protocol ingress","text":"Parameter Description Example value Ingress name [Type] Required[Meaning] Enter the name of the new ingress. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter, lowercase English letters or numbers. Ing-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default Protocol [Type] Required [Meaning] Refers to the protocol that authorizes inbound access to the cluster service, and supports HTTP (no identity authentication required) or HTTPS (identity authentication needs to be configured) protocol. Here select the ingress of HTTP protocol. HTTP Domain Name [Type] Required [Meaning] Use the domain name to provide external access services. The default is the domain name of the cluster testing.daocloud.io LB Type [Type] Required [Meaning] The usage range of the Ingress instance. Scope of use of Ingress Platform-level load balancer : In the same cluster, share the same Ingress instance, where all Pods can receive requests distributed by the load balancer. Tenant-level load balancer : Tenant load balancer, the Ingress instance belongs exclusively to the current namespace, or belongs to a certain workspace, and the set workspace includes the current namespace, and all Pods can receive it Requests distributed by this load balancer. Platform Level Load Balancer Ingress Class [Type] Optional[Meaning] Select the proper Ingress instance, and import traffic to the specified Ingress instance after selection. When it is None, the default DefaultClass is used. Please set the DefaultClass when creating an Ingress instance. For more information, refer to Ingress Class< br /> Ngnix Session persistence [Type] Optional[Meaning] Session persistence is divided into three types: L4 source address hash , Cookie Key , L7 Header Name . Keep L4 Source Address Hash : : When enabled, the following tag is added to the Annotation by default: nginx.ingress.kubernetes.io/upstream-hash-by: \"\\(binary_remote_addr\"<br /> __Cookie Key__ : When enabled, the connection from a specific client will be passed to the same Pod. After enabled, the following parameters are added to the Annotation by default:<br /> nginx.ingress.kubernetes.io/affinity: \"cookie\"<br /> nginx.ingress.kubernetes .io/affinity-mode: persistent<br /> __L7 Header Name__ : After enabled, the following tag is added to the Annotation by default: nginx.ingress.kubernetes.io/upstream-hash-by: \"\\)http_x_forwarded_for\" Close Path Rewriting [Type] Optional [Meaning] rewrite-target , in some cases, the URL exposed by the backend service is different from the path specified in the Ingress rule. If no URL rewriting configuration is performed, There will be an error when accessing. close Redirect [Type] Optional[Meaning] permanent-redirect , permanent redirection, after entering the rewriting path, the access path will be redirected to the set address. close Traffic Distribution [Type] Optional[Meaning] After enabled and set, traffic distribution will be performed according to the set conditions. Based on weight : After setting the weight, add the following Annotation to the created Ingress: nginx.ingress.kubernetes.io/canary-weight: \"10\" Based on Cookie : set After the cookie rules, the traffic will be distributed according to the set cookie conditions Based on Header : After setting the header rules, the traffic will be distributed according to the set header conditions Close Labels [Type] Optional [Meaning] Add a label for the ingress - Annotations [Type] Optional [Meaning] Add annotation for ingress -"},{"location":"en/admin/kpanda/network/create-ingress.html#create-https-protocol-ingress","title":"Create HTTPS protocol ingress","text":"Parameter Description Example value Ingress name [Type] Required[Meaning] Enter the name of the new ingress. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter, lowercase English letters or numbers. Ing-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default Protocol [Type] Required [Meaning] Refers to the protocol that authorizes inbound access to the cluster service, and supports HTTP (no identity authentication required) or HTTPS (identity authentication needs to be configured) protocol. Here select the ingress of HTTPS protocol. HTTPS Domain Name [Type] Required [Meaning] Use the domain name to provide external access services. The default is the domain name of the cluster testing.daocloud.io Secret [Type] Required [Meaning] Https TLS certificate, Create Secret. Forwarding policy [Type] Optional[Meaning] Specify the access policy of Ingress. Path: Specifies the URL path for service access, the default is the root path/directoryTarget service: Service name for ingressTarget service port: Port exposed by the service LB Type [Type] Required [Meaning] The usage range of the Ingress instance. Platform-level load balancer : In the same cluster, the same Ingress instance is shared, and all Pods can receive requests distributed by the load balancer. Tenant-level load balancer : Tenant load balancer, the Ingress instance belongs exclusively to the current namespace or to a certain workspace. This workspace contains the current namespace, and all Pods can receive the workload from this Balanced distribution of requests. Platform Level Load Balancer Ingress Class [Type] Optional[Meaning] Select the proper Ingress instance, and import traffic to the specified Ingress instance after selection. When it is None, the default DefaultClass is used. Please set the DefaultClass when creating an Ingress instance. For more information, refer to Ingress Class< br /> None Session persistence [Type] Optional[Meaning] Session persistence is divided into three types: L4 source address hash , Cookie Key , L7 Header Name . Keep L4 Source Address Hash : : When enabled, the following tag is added to the Annotation by default: nginx.ingress.kubernetes.io/upstream-hash-by: \"\\(binary_remote_addr\"<br /> __Cookie Key__ : When enabled, the connection from a specific client will be passed to the same Pod. After enabled, the following parameters are added to the Annotation by default:<br /> nginx.ingress.kubernetes.io/affinity: \"cookie\"<br /> nginx.ingress.kubernetes .io/affinity-mode: persistent<br /> __L7 Header Name__ : After enabled, the following tag is added to the Annotation by default: nginx.ingress.kubernetes.io/upstream-hash-by: \"\\)http_x_forwarded_for\" Close Labels [Type] Optional [Meaning] Add a label for the ingress Annotations [Type] Optional[Meaning] Add annotation for ingress"},{"location":"en/admin/kpanda/network/create-ingress.html#create-ingress-successfully","title":"Create ingress successfully","text":"

                                                                                  After configuring all the parameters, click the OK button to return to the ingress list automatically. On the right side of the list, click \u2507 to modify or delete the selected ingress.

                                                                                  "},{"location":"en/admin/kpanda/network/create-services.html","title":"Create a Service","text":"

                                                                                  In a Kubernetes cluster, each Pod has an internal independent IP address, but Pods in the workload may be created and deleted at any time, and directly using the Pod IP address cannot provide external services.

                                                                                  This requires creating a service through which you get a fixed IP address, decoupling the front-end and back-end of the workload, and allowing external users to access the service. At the same time, the service also provides the Load Balancer feature, enabling users to access workloads from the public network.

                                                                                  "},{"location":"en/admin/kpanda/network/create-services.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • Container management module connected to Kubernetes cluster or created Kubernetes, and can access the cluster UI interface.

                                                                                  • Completed a namespace creation, user creation, and authorize the user as NS Editor role, for details, refer to Namespace Authorization.

                                                                                  • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                                                                  "},{"location":"en/admin/kpanda/network/create-services.html#create-service","title":"Create service","text":"
                                                                                  1. After successfully logging in as the NS Editor user, click Clusters in the upper left corner to enter the Clusters page. In the list of clusters, click a cluster name.

                                                                                  2. In the left navigation bar, click Container Network -> Service to enter the service list, and click the Create Service button in the upper right corner.

                                                                                    !!! tip

                                                                                     It is also possible to create a service via __YAML__ .\n
                                                                                  3. Open the Create Service page, select an access type, and refer to the following three parameter tables for configuration.

                                                                                  "},{"location":"en/admin/kpanda/network/create-services.html#create-clusterip-service","title":"Create ClusterIP service","text":"

                                                                                  Click Intra-Cluster Access (ClusterIP) , which refers to exposing services through the internal IP of the cluster. The services selected for this option can only be accessed within the cluster. This is the default service type. Refer to the configuration parameters in the table below.

                                                                                  Parameter Description Example value Access type [Type] Required[Meaning] Specify the method of Pod service discovery, here select intra-cluster access (ClusterIP). ClusterIP Service Name [Type] Required[Meaning] Enter the name of the new service. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. Svc-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default Label selector [Type] Required[Meaning] Add a label, the Service selects a Pod according to the label, and click \"Add\" after filling. You can also refer to the label of an existing workload. Click Reference workload label , select the workload in the pop-up window, and the system will use the selected workload label as the selector by default. app:job01 Port configuration [Type] Required[Meaning] To add a protocol port for a service, you need to select the port protocol type first. Currently, it supports TCP and UDP. Port Name: Enter the name of the custom port. Service port (port): The access port for Pod to provide external services. Container port (targetport): The container port that the workload actually monitors, used to expose services to the cluster. Session Persistence [Type] Optional [Meaning] When enabled, requests from the same client will be forwarded to the same Pod Enabled Maximum session hold time [Type] Optional [Meaning] After session hold is enabled, the maximum hold time is 30 seconds by default 30 seconds Annotation [Type] Optional[Meaning] Add annotation for service"},{"location":"en/admin/kpanda/network/create-services.html#create-nodeport-service","title":"Create NodePort service","text":"

                                                                                  Click NodePort , which means exposing the service via IP and static port ( NodePort ) on each node. The NodePort service is routed to the automatically created ClusterIP service. You can access a NodePort service from outside the cluster by requesting : . Refer to the configuration parameters in the table below. Parameter Description Example value Access type [Type] Required[Meaning] Specify the method of Pod service discovery, here select node access (NodePort). NodePort Service Name [Type] Required[Meaning] Enter the name of the new service. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. Svc-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default Label selector [Type] Required[Meaning] Add a label, the Service selects a Pod according to the label, and click \"Add\" after filling. You can also refer to the label of an existing workload. Click Reference workload label , select the workload in the pop-up window, and the system will use the selected workload label as the selector by default. Port configuration [Type] Required[Meaning] To add a protocol port for a service, you need to select the port protocol type first. Currently, it supports TCP and UDP. Port Name: Enter the name of the custom port. Service port (port): The access port for Pod to provide external services. By default, the service port is set to the same value as the container port field for convenience. ***Container port (targetport)*: The container port actually monitored by the workload. Node port (nodeport): The port of the node, which receives traffic from ClusterIP transmission. It is used as the entrance for external traffic access. Session Persistence [Type] Optional [Meaning] When enabled, requests from the same client will be forwarded to the same PodAfter enabled, .spec.sessionAffinity of Service is ClientIP , refer to for details : Session Affinity for Service Enabled Maximum session hold time [Type] Optional [Meaning] After session hold is enabled, the maximum hold time, the default timeout is 30 seconds.spec.sessionAffinityConfig.clientIP.timeoutSeconds is set to 30 by default seconds 30 seconds Annotation [Type] Optional[Meaning] Add annotation for service"},{"location":"en/admin/kpanda/network/create-services.html#create-loadbalancer-service","title":"Create LoadBalancer service","text":"

                                                                                  Click Load Balancer , which refers to using the cloud provider's load balancer to expose services to the outside. External load balancers can route traffic to automatically created NodePort services and ClusterIP services. Refer to the configuration parameters in the table below.

                                                                                  Parameter Description Example value Access type [Type] Required[Meaning] Specify the method of Pod service discovery, here select node access (NodePort). NodePort Service Name [Type] Required[Meaning] Enter the name of the new service. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. Svc-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default External Traffic Policy [Type] Required[Meaning] Set external traffic policy. Cluster: Traffic can be forwarded to Pods on all nodes in the cluster. Local: Traffic is only sent to Pods on this node. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. Tag selector [Type] Required [Meaning] Add tag, Service Select the Pod according to the label, fill it out and click \"Add\". You can also refer to the label of an existing workload. Click Reference workload label , select the workload in the pop-up window, and the system will use the selected workload label as the selector by default. Load balancing type [Type] Required [Meaning] The type of load balancing used, currently supports MetalLB and others. MetalLB IP Pool [Type] Required[Meaning] When the selected load balancing type is MetalLB, LoadBalancer Service will allocate IP addresses from this pool by default, and declare all IP addresses in this pool through APR, For details, refer to: Install MetalLB Load balancing address [Type] Required[Meaning] 1. If you are using a public cloud CloudProvider, fill in the load balancing address provided by the cloud provider here;2. If the above load balancing type is selected as MetalLB, the IP will be obtained from the above IP pool by default, if not filled, it will be obtained automatically. Port configuration [Type] Required[Meaning] To add a protocol port for a service, you need to select the port protocol type first. Currently, it supports TCP and UDP. Port Name: Enter the name of the custom port. Service port (port): The access port for Pod to provide external services. By default, the service port is set to the same value as the container port field for convenience. Container port (targetport): The container port actually monitored by the workload. Node port (nodeport): The port of the node, which receives traffic from ClusterIP transmission. It is used as the entrance for external traffic access. Annotation [Type] Optional[Meaning] Add annotation for service"},{"location":"en/admin/kpanda/network/create-services.html#complete-service-creation","title":"Complete service creation","text":"

                                                                                  After configuring all parameters, click the OK button to return to the service list automatically. On the right side of the list, click \u2507 to modify or delete the selected service.

                                                                                  "},{"location":"en/admin/kpanda/network/network-policy.html","title":"Network Policies","text":"

                                                                                  Network policies in Kubernetes allow you to control network traffic at the IP address or port level (OSI layer 3 or layer 4). The container management module currently supports creating network policies based on Pods or namespaces, using label selectors to specify which traffic can enter or leave Pods with specific labels.

                                                                                  For more details on network policies, refer to the official Kubernetes documentation on Network Policies.

                                                                                  "},{"location":"en/admin/kpanda/network/network-policy.html#creating-network-policies","title":"Creating Network Policies","text":"

                                                                                  Currently, there are two methods available for creating network policies: YAML and form-based creation. Each method has its advantages and disadvantages, catering to different user needs.

                                                                                  YAML creation requires fewer steps and is more efficient, but it has a higher learning curve as it requires familiarity with configuring network policy YAML files.

                                                                                  Form-based creation is more intuitive and straightforward. Users can simply fill in the proper values based on the prompts. However, this method involves more steps.

                                                                                  "},{"location":"en/admin/kpanda/network/network-policy.html#yaml-creation","title":"YAML Creation","text":"
                                                                                  1. In the cluster list, click the name of the target cluster, then navigate to Container Network -> Network Policies -> Create with YAML in the left navigation bar.

                                                                                  2. In the pop-up dialog, enter or paste the pre-prepared YAML file, then click OK at the bottom of the dialog.

                                                                                  "},{"location":"en/admin/kpanda/network/network-policy.html#form-based-creation","title":"Form-Based Creation","text":"
                                                                                  1. In the cluster list, click the name of the target cluster, then navigate to Container Network -> Network Policies -> Create Policy in the left navigation bar.

                                                                                  2. Fill in the basic information.

                                                                                    The name and namespace cannot be changed after creation.

                                                                                  3. Fill in the policy configuration.

                                                                                    The policy configuration includes ingress and egress policies. To establish a successful connection from a source Pod to a target Pod, both the egress policy of the source Pod and the ingress policy of the target Pod need to allow the connection. If either side does not allow the connection, the connection will fail.

                                                                                    • Ingress Policy: Click \u2795 to begin configuring the policy. Multiple policies can be configured. The effects of multiple network policies are cumulative. Only when all network policies are satisfied simultaneously can a connection be successfully established.

                                                                                    • Egress Policy

                                                                                  "},{"location":"en/admin/kpanda/network/network-policy.html#viewing-network-policies","title":"Viewing Network Policies","text":"
                                                                                  1. In the cluster list, click the name of the target cluster, then navigate to Container Network -> Network Policies . Click the name of the network policy.

                                                                                  2. View the basic configuration, associated instances, ingress policies, and egress policies of the policy.

                                                                                  Info

                                                                                  Under the \"Associated Instances\" tab, you can view instance monitoring, logs, container lists, YAML files, events, and more.

                                                                                  "},{"location":"en/admin/kpanda/network/network-policy.html#updating-network-policies","title":"Updating Network Policies","text":"

                                                                                  There are two ways to update network policies. You can either update them through the form or by using a YAML file.

                                                                                  • On the network policy list page, find the policy you want to update, and choose Update in the action column on the right to update it via the form. Choose Edit YAML to update it using a YAML file.

                                                                                  • Click the name of the network policy, then choose Update in the top right corner of the policy details page to update it via the form. Choose Edit YAML to update it using a YAML file.

                                                                                  "},{"location":"en/admin/kpanda/network/network-policy.html#deleting-network-policies","title":"Deleting Network Policies","text":"

                                                                                  There are two ways to delete network policies. You can delete network policies either through the form or by using a YAML file.

                                                                                  • On the network policy list page, find the policy you want to delete, and choose Delete in the action column on the right to delete it via the form. Choose Edit YAML to delete it using a YAML file.

                                                                                  • Click the name of the network policy, then choose Delete in the top right corner of the policy details page to delete it via the form. Choose Edit YAML to delete it using a YAML file.

                                                                                  "},{"location":"en/admin/kpanda/nodes/add-node.html","title":"Cluster Node Expansion","text":"

                                                                                  As the number of business applications continues to grow, the resources of the cluster become increasingly tight. At this point, you can expand the cluster nodes based on kubean. After the expansion, applications can run on the newly added nodes, alleviating resource pressure.

                                                                                  Only clusters created through the container management module support node autoscaling. Clusters accessed from the outside do not support this operation. This article mainly introduces the expansion of worker nodes in the same architecture work cluster. If you need to add control nodes or heterogeneous work nodes to the cluster, refer to: Expanding the control node of the work cluster, Adding heterogeneous nodes to the work cluster, Expanding the worker node of the global service cluster.

                                                                                  1. On the Clusters page, click the name of the target cluster.

                                                                                    If the Cluster Type contains the label Integrated Cluster, it means that the cluster does not support node autoscaling.

                                                                                  2. Click Nodes in the left navigation bar, and then click Integrate Node in the upper right corner of the page.

                                                                                  3. Enter the host name and node IP and click OK.

                                                                                    Click \u2795 Add Worker Node to continue accessing more nodes.

                                                                                  Note

                                                                                  Accessing the node takes about 20 minutes, please be patient.

                                                                                  "},{"location":"en/admin/kpanda/nodes/delete-node.html","title":"Node Scales Down","text":"

                                                                                  When the peak business period is over, in order to save resource costs, you can reduce the size of the cluster and unload redundant nodes, that is, node scaling. After a node is uninstalled, applications cannot continue to run on the node.

                                                                                  "},{"location":"en/admin/kpanda/nodes/delete-node.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • The current operating user has the Cluster Admin role authorization.
                                                                                  • Only through the container management module created cluster can node autoscaling be supported, and the cluster accessed from the outside does not support this operation.
                                                                                  • Before uninstalling a node, you need to pause scheduling the node, and expel the applications on the node to other nodes.
                                                                                  • Eviction method: log in to the controller node, and use the kubectl drain command to evict all Pods on the node. The safe eviction method allows the containers in the pod to terminate gracefully.
                                                                                  "},{"location":"en/admin/kpanda/nodes/delete-node.html#precautions","title":"Precautions","text":"
                                                                                  1. When cluster nodes scales down, they can only be uninstalled one by one, not in batches.

                                                                                  2. If you need to uninstall cluster controller nodes, you need to ensure that the final number of controller nodes is an odd number.

                                                                                  3. The first controller node cannot be offline when the cluster node scales down. If it is necessary to perform this operation, please contact the after-sales engineer.

                                                                                  "},{"location":"en/admin/kpanda/nodes/delete-node.html#steps","title":"Steps","text":"
                                                                                  1. On the Clusters page, click the name of the target cluster.

                                                                                    If the Cluster Type has the tag Integrate Cluster , it means that the cluster does not support node autoscaling.

                                                                                  2. Click Nodes on the left navigation bar, find the node to be uninstalled, click \u2507 and select Remove .

                                                                                  3. Enter the node name, and click Delete to confirm.

                                                                                  "},{"location":"en/admin/kpanda/nodes/labels-annotations.html","title":"Labels and Annotations","text":"

                                                                                  Labels are identifying key-value pairs added to Kubernetes objects such as Pods, nodes, and clusters, which can be combined with label selectors to find and filter Kubernetes objects that meet certain conditions. Each key must be unique for a given object.

                                                                                  Annotations, like tags, are key/value pairs, but they do not have identification or filtering features. Annotations can be used to add arbitrary metadata to nodes. Annotation keys usually use the format prefix(optional)/name(required) , for example nfd.node.kubernetes.io/extended-resources . If the prefix is \u200b\u200bomitted, it means that the annotation key is private to the user.

                                                                                  For more information about labels and annotations, refer to the official Kubernetes documentation labels and selectors Or Annotations.

                                                                                  The steps to add/delete tags and annotations are as follows:

                                                                                  1. On the Clusters page, click the name of the target cluster.

                                                                                  2. Click Nodes on the left navigation bar, click the \u2507 operation icon on the right side of the node, and click Edit Labels or Edit Annotations .

                                                                                  3. Click \u2795 Add to add tags or annotations, click X to delete tags or annotations, and finally click OK .

                                                                                  "},{"location":"en/admin/kpanda/nodes/node-authentication.html","title":"Node Authentication","text":""},{"location":"en/admin/kpanda/nodes/node-authentication.html#authenticate-nodes-using-ssh-keys","title":"Authenticate Nodes Using SSH Keys","text":"

                                                                                  If you choose to authenticate the nodes of the cluster-to-be-created using SSH keys, you need to configure the public and private keys according to the following instructions.

                                                                                  1. Run the following command on any node within the management cluster of the cluster-to-be-created to generate the public and private keys.

                                                                                    cd /root/.ssh\nssh-keygen -t rsa\n
                                                                                  2. Run the ls command to check if the keys have been successfully created in the management cluster. The correct output should be as follows:

                                                                                    ls\nid_rsa  id_rsa.pub  known_hosts\n

                                                                                    The file named id_rsa is the private key, and the file named id_rsa.pub is the public key.

                                                                                  3. Run the following command to load the public key file id_rsa.pub onto all the nodes of the cluster-to-be-created.

                                                                                    ssh-copy-id -i /root/.ssh/id_rsa.pub root@10.0.0.0\n

                                                                                    Replace the user account and node IP in the above command with the username and IP of the nodes in the cluster-to-be-created. The same operation needs to be performed on every node in the cluster-to-be-created.

                                                                                  4. Run the following command to view the private key file id_rsa created in step 1.

                                                                                    cat /root/.ssh/id_rsa\n

                                                                                    The output should be as follows:

                                                                                    -----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEA3UvyKINzY5BFuemQ+uJ6q+GqgfvnWwNC8HzZhpcMSjJy26MM\nUtBEBJxy8fMi57XcjYxPibXW/wnd+32ICCycqCwByUmuXeCC1cjlCQDqjcAvXae7\nY54IXGF7wm2IsMNwf0kjFEXjuS48FLDA0mGRaN3BG+Up5geXcHckg3K5LD8kXFFx\ndEmSIjdyw55NaUitmEdHzN7cIdfi6Z56jcV8dcFBgWKUx+ebiyPmZBkXToz6GnMF\nrswzzZCl+G6Jb2xTGy7g7ozb4BoZd1IpSD5EhDanRrESVE0C5YuJ5zUAC0CvVd1l\nv67AK8Ko6MXToHp01/bcsvlM6cqgwUFXZKVeOwIDAQABAoIBAQCO36GQlo3BEjxy\nM2HvGJmqrx+unDxafliRe4nVY2AD515Qf4xNSzke4QM1QoyenMOwf446krQkJPK0\nk+9nl6Xszby5gGCbK4BNFk8I6RaGPjZWeRx6zGUJf8avWJiPxx6yjz2esSC9RiR0\nF0nmiiefVMyAfgv2/5++dK2WUFNNRKLgSRRpP5bRaD5wMzzxtSSXrUon6217HO8p\n3RoWsI51MbVzhdVgpHUNABcoa0rpr9svT6XLKZxY8mxpKFYjM0Wv2JIDABg3kBvh\nQbJ7kStCO3naZjKMU9UuSqVJs06cflGYw7Or8/tABR3LErNQKPjkhAQqt0DXw7Iw\n3tKdTAJBAoGBAP687U7JAOqQkcphek2E/A/sbO/d37ix7Z3vNOy065STrA+ZWMZn\npZ6Ui1B/oJpoZssnfvIoz9sn559X0j67TljFALFd2ZGS0Fqh9KVCqDvfk+Vst1dq\n+3r/yZdTOyswoccxkJiC/GDwZGK0amJWqvob39JCZhDAKIGLbGMmjdAHAoGBAN5k\nm1WGnni1nZ+3dryIwgB6z1hWcnLTamzSET6KhSuo946ET0IRG9xtlheCx6dqICbr\nVk1Y4NtRZjK/p/YGx59rDWf7E3I8ZMgR7mjieOcUZ4lUlA4l7ZIlW/2WZHW+nUXO\nTi20fqJ8qSp4BUvOvuth1pz2GLUHe2/Fxjf7HIstAoGBAPHpPr9r+TfIlPsJeRj2\n6lzA3G8qWFRQfGRYjv0fjv0pA+RIb1rzgP/I90g5+63G6Z+R4WdcxI/OJJNY1iuG\nuw9n/pFxm7U4JC990BPE6nj5iLz+clpNGYckNDBF9VG9vFSrSDLdaYkxoVNvG/xJ\na9Na90H4lm7f3VewrPy310KvAoGAZr+mwNoEh5Kpc6xo8Gxi7aPP/mlaUVD6X7Ki\ngvmu02AqmC7rC4QqEiqTaONkaSXwGusqIWxJ3yp5hELmUBYLzszAEeV/s4zRp1oZ\ng133LBRSTbHFAdBmNdqK6Nu+KGRb92980UMOKvZbliKDl+W6cbfvVu+gtKrzTc3b\naevb4TUCgYEAnJAxyVYDP1nJf7bjBSHXQu1E/DMwbtrqw7dylRJ8cAzI7IxfSCez\n7BYWq41PqVd9/zrb3Pbh2phiVzKe783igAIMqummcjo/kZyCwFsYBzK77max1jF5\naPQsLbRS2aDz8kIH6jHPZ/R+15EROmdtLmA7vIJZGerWWQR0dUU+XXA=\n

                                                                                  Copy the content of the private key and paste it into the interface's key input field.

                                                                                  "},{"location":"en/admin/kpanda/nodes/node-check.html","title":"Create a cluster node availability check","text":"

                                                                                  When creating a cluster or adding nodes to an existing cluster, refer to the table below to check the node configuration to avoid cluster creation or expansion failure due to wrong node configuration.

                                                                                  Check Item Description OS Refer to Supported Architectures and Operating Systems SELinux Off Firewall Off Architecture Consistency Consistent CPU architecture between nodes (such as ARM or x86) Host Time All hosts are out of sync within 10 seconds. Network Connectivity The node and its SSH port can be accessed normally by the platform. CPU Available CPU resources are greater than 4 Cores Memory Available memory resources are greater than 8 GB"},{"location":"en/admin/kpanda/nodes/node-check.html#supported-architectures-and-operating-systems","title":"Supported architectures and operating systems","text":"Architecture Operating System Remarks ARM Kylin Linux Advanced Server release V10 (Sword) SP2 Recommended ARM UOS Linux ARM openEuler x86 CentOS 7.x Recommended x86 Redhat 7.x Recommended x86 Redhat 8.x Recommended x86 Flatcar Container Linux by Kinvolk x86 Debian Bullseye, Buster, Jessie, Stretch x86 Ubuntu 16.04, 18.04, 20.04, 22.04 x86 Fedora 35, 36 x86 Fedora CoreOS x86 openSUSE Leap 15.x/Tumbleweed x86 Oracle Linux 7, 8, 9 x86 Alma Linux 8, 9 x86 Rocky Linux 8, 9 x86 Amazon Linux 2 x86 Kylin Linux Advanced Server release V10 (Sword) - SP2 Haiguang x86 UOS Linux x86 openEuler"},{"location":"en/admin/kpanda/nodes/node-details.html","title":"Node Details","text":"

                                                                                  After accessing or creating a cluster, you can view the information of each node in the cluster, including node status, labels, resource usage, Pod, monitoring information, etc.

                                                                                  1. On the Clusters page, click the name of the target cluster.

                                                                                  2. Click Nodes on the left navigation bar to view the node status, role, label, CPU/memory usage, IP address, and creation time.

                                                                                  3. Click the node name to enter the node details page to view more information, including overview information, pod information, label annotation information, event list, status, etc.

                                                                                    In addition, you can also view the node's YAML file, monitoring information, labels and annotations, etc.

                                                                                  "},{"location":"en/admin/kpanda/nodes/schedule.html","title":"Node Scheduling","text":"

                                                                                  Supports suspending or resuming scheduling of nodes. Pausing scheduling means stopping the scheduling of Pods to the node. Resuming scheduling means that Pods can be scheduled to that node.

                                                                                  1. On the Clusters page, click the name of the target cluster.

                                                                                  2. Click Nodes on the left navigation bar, click the \u2507 operation icon on the right side of the node, and click the Cordon button to suspend scheduling the node.

                                                                                  3. Click the \u2507 operation icon on the right side of the node, and click the Uncordon button to resume scheduling the node.

                                                                                  The node scheduling status may be delayed due to network conditions. Click the refresh icon on the right side of the search box to refresh the node scheduling status.

                                                                                  "},{"location":"en/admin/kpanda/nodes/taints.html","title":"Node Taints","text":"

                                                                                  Taint can make a node exclude a certain type of Pod and prevent Pod from being scheduled on the node. One or more taints can be applied to each node, and Pods that cannot tolerate these taints will not be scheduled on that node.

                                                                                  "},{"location":"en/admin/kpanda/nodes/taints.html#precautions","title":"Precautions","text":"
                                                                                  1. The current operating user should have NS Editor role authorization or other higher permissions.
                                                                                  2. After adding a taint to a node, only Pods that can tolerate the taint can be scheduled to the node.
                                                                                  "},{"location":"en/admin/kpanda/nodes/taints.html#steps","title":"Steps","text":"
                                                                                  1. Find the target cluster on the Clusters page, and click the cluster name to enter the Cluster page.

                                                                                  2. In the left navigation bar, click Nodes , find the node that needs to modify the taint, click the \u2507 operation icon on the right and click the Edit Taints button.

                                                                                  3. Enter the key value information of the taint in the pop-up box, select the taint effect, and click OK .

                                                                                    Click \u2795 Add to add multiple taints to the node, and click X on the right side of the taint effect to delete the taint.

                                                                                    Currently supports three taint effects:

                                                                                    • NoExecute: This affects pods that are already running on the node as follows:

                                                                                      • Pods that do not tolerate the taint are evicted immediately
                                                                                      • Pods that tolerate the taint without specifying tolerationSeconds in their toleration specification remain bound forever
                                                                                      • Pods that tolerate the taint with a specified tolerationSeconds remain bound for the specified amount of time. After that time elapses, the node lifecycle controller evicts the Pods from the node.
                                                                                    • NoSchedule: No new Pods will be scheduled on the tainted node unless they have a matching toleration. Pods currently running on the node are not evicted.

                                                                                    • PreferNoSchedule: This is a \"preference\" or \"soft\" version of NoSchedule. The control plane will try to avoid placing a Pod that does not tolerate the taint on the node, but it is not guaranteed, so this taint is not recommended to use in a production environment.

                                                                                  For more details about taints, refer to the Kubernetes documentation Taints and Tolerance.

                                                                                  "},{"location":"en/admin/kpanda/olm/import-miniooperator.html","title":"Importing MinIo Operator Offline","text":"

                                                                                  This guide explains how to import the MinIo Operator offline in an environment without internet access.

                                                                                  "},{"location":"en/admin/kpanda/olm/import-miniooperator.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • The current cluster is connected to the container management and the Global cluster has installed the kolm component (search for helm templates for kolm).
                                                                                  • The current cluster has the olm component installed with a version of 0.2.4 or higher (search for helm templates for olm).
                                                                                  • Ability to execute Docker commands.
                                                                                  • Prepare a container registry.
                                                                                  "},{"location":"en/admin/kpanda/olm/import-miniooperator.html#steps","title":"Steps","text":"
                                                                                  1. Set the environment variables in the execution environment and use them in the subsequent steps by running the following command:

                                                                                    export OPM_IMG=10.5.14.200/quay.m.daocloud.io/operator-framework/opm:v1.29.0 \nexport BUNDLE_IMG=10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3 \n

                                                                                    How to get the above image addresses:

                                                                                    Go to Container Management -> Select the current cluster -> Helm Apps -> View the olm component -> Plugin Settings , and find the images needed for the opm, minio, minio bundle, and minio operator in the subsequent steps.

                                                                                    Using the screenshot as an example, the four image addresses are as follows:\n\n# opm image\n10.5.14.200/quay.m.daocloud.io/operator-framework/opm:v1.29.0\n\n# minio image\n10.5.14.200/quay.m.daocloud.io/minio/minio:RELEASE.2023-03-24T21-41-23Z\n\n# minio bundle image\n10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3\n\n# minio operator image\n10.5.14.200/quay.m.daocloud.io/minio/operator:v5.0.3\n
                                                                                  2. Run the opm command to get the operators included in the offline bundle image.

                                                                                    # Create the operator directory\n$ mkdir minio-operator && cd minio-operator \n\n# Get the operator yaml\n$ docker run --user root -v $PWD/minio-operator:/minio-operator ${OPM_IMG} alpha bundle unpack --skip-tls-verify -v -d ${BUNDLE_IMG} -o ./minio-operator\n\n# Expected result\n.\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502   \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502   \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502   \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502   \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502   \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502   \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n3 directories, 9 files\n
                                                                                  3. Replace all image addresses in the minio-operator/manifests/minio-operator.clusterserviceversion.yaml file with the image addresses from the offline container registry.

                                                                                    Before replacement:

                                                                                    After replacement:

                                                                                  4. Generate a Dockerfile for building the bundle image.

                                                                                    $ docker run --user root -v $PWD:/minio-operator -w /minio-operator ${OPM_IMG} alpha bundle generate --channels stable,beta -d /minio-operator/minio-operator/manifests -e stable -p minio-operator \u00a0\n\n# Expected result\n.\n\u251c\u2500\u2500 bundle.Dockerfile\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502   \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502   \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502   \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502   \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502   \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502   \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n3 directories, 10 files\n
                                                                                  5. Build the bundle image and push it to the offline registry.

                                                                                    # Set the new bundle image\nexport OFFLINE_BUNDLE_IMG=10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3-offline \n\n$ docker build . -f bundle.Dockerfile -t ${OFFLINE_BUNDLE_IMG} \u00a0\n\n$ docker push ${OFFLINE_BUNDLE_IMG}\n
                                                                                  6. Generate a Dockerfile for building the catalog image.

                                                                                    $ docker run --user root -v $PWD:/minio-operator -w /minio-operator ${OPM_IMG} index add  --bundles ${OFFLINE_BUNDLE_IMG} --generate --binary-image ${OPM_IMG} --skip-tls-verify\n\n# Expected result\n.\n\u251c\u2500\u2500 bundle.Dockerfile\n\u251c\u2500\u2500 database\n\u2502   \u2514\u2500\u2500 index.db\n\u251c\u2500\u2500 index.Dockerfile\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502   \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502   \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502   \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502   \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502   \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502   \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n4 directories, 12 files\n
                                                                                  7. Build the catalog image.

                                                                                    # Set the new catalog image  \nexport OFFLINE_CATALOG_IMG=10.5.14.200/release.daocloud.io/operator-framework/system-operator-index:v0.1.0-offline\n\n$ docker build . -f index.Dockerfile -t ${OFFLINE_CATALOG_IMG}  \n\n$ docker push ${OFFLINE_CATALOG_IMG}\n
                                                                                  8. Go to Container Management and update the built-in catsrc image for the Helm App olm (enter the catalog image specified in the construction of the catalog image, ${catalog-image} ).

                                                                                  9. After the update is successful, the minio-operator component will appear in the Operator Hub.

                                                                                  "},{"location":"en/admin/kpanda/permissions/cluster-ns-auth.html","title":"Cluster and Namespace Authorization","text":"

                                                                                  Container management implements authorization based on global authority management and global user/group management. If you need to grant users the highest authority for container management (can create, manage, and delete all clusters), refer to What are Access Control.

                                                                                  "},{"location":"en/admin/kpanda/permissions/cluster-ns-auth.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before authorizing users/groups, complete the following preparations:

                                                                                  • The user/group to be authorized has been created in the global management, refer to user.

                                                                                  • Only Kpanda Owner and Cluster Admin of the current cluster have Cluster authorization capability. For details, refer to Permission Description.

                                                                                  • only Kpanda Owner , Cluster Admin for the current cluster, NS Admin of the current namespace has namespace authorization capability.

                                                                                  "},{"location":"en/admin/kpanda/permissions/cluster-ns-auth.html#cluster-authorization","title":"Cluster Authorization","text":"
                                                                                  1. After the user logs in to the platform, click Privilege Management under Container Management on the left menu bar, which is located on the Cluster Permissions tab by default.

                                                                                  2. Click the Add Authorization button.

                                                                                  3. On the Add Cluster Permission page, select the target cluster, the user/group to be authorized, and click OK .

                                                                                    Currently, the only cluster role supported is Cluster Admin . For details about permissions, refer to Permission Description. If you need to authorize multiple users/groups at the same time, you can click Add User Permissions to add multiple times.

                                                                                  4. Return to the cluster permission management page, and a message appears on the screen: Cluster permission added successfully .

                                                                                  "},{"location":"en/admin/kpanda/permissions/cluster-ns-auth.html#namespace-authorization","title":"Namespace Authorization","text":"
                                                                                  1. After the user logs in to the platform, click Permissions under Container Management on the left menu bar, and click the Namespace Permissions tab.

                                                                                  2. Click the Add Authorization button. On the Add Namespace Permission page, select the target cluster, target namespace, and user/group to be authorized, and click OK .

                                                                                    The currently supported namespace roles are NS Admin, NS Editor, and NS Viewer. For details about permissions, refer to Permission Description. If you need to authorize multiple users/groups at the same time, you can click Add User Permission to add multiple times. Click OK to complete the permission authorization.

                                                                                  3. Return to the namespace permission management page, and a message appears on the screen: Cluster permission added successfully .

                                                                                    Tip

                                                                                    If you need to delete or edit permissions later, you can click \u2507 on the right side of the list and select Edit or Delete .

                                                                                  "},{"location":"en/admin/kpanda/permissions/custom-kpanda-role.html","title":"Adding RBAC Rules to System Roles","text":"

                                                                                  In the past, the RBAC rules for those system roles in container management were pre-defined and could not be modified by users. To support more flexible permission settings and to meet the customized needs for system roles, now you can modify RBAC rules for system roles such as cluster admin, ns admin, ns editor, ns viewer.

                                                                                  The following example demonstrates how to add a new ns-view rule, granting the authority to delete workload deployments. Similar operations can be performed for other rules.

                                                                                  "},{"location":"en/admin/kpanda/permissions/custom-kpanda-role.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before adding RBAC rules to system roles, the following prerequisites must be met:

                                                                                  • Container management v0.27.0 and above.
                                                                                  • Integrated Kubernetes cluster or created Kubernetes cluster, and able to access the cluster's UI interface.
                                                                                  • Completed creation of a namespace and user account, and the granting of NS Viewer. For details, refer to namespace authorization.

                                                                                  Note

                                                                                  • RBAC rules only need to be added in the Global Cluster, and the Kpanda controller will synchronize those added rules to all integrated subclusters. Synchronization may take some time to complete.
                                                                                  • RBAC rules can only be added in the Global Cluster. RBAC rules added in subclusters will be overridden by the system role permissions of the Global Cluster.
                                                                                  • Only ClusterRoles with fixed Label are supported for adding rules. Replacing or deleting rules is not supported, nor is adding rules by using role. The correspondence between built-in roles and ClusterRole Label created by users is as follows.

                                                                                    cluster-admin: rbac.kpanda.io/role-template-cluster-admin: \"true\"\ncluster-edit: rbac.kpanda.io/role-template-cluster-edit: \"true\"\ncluster-view: rbac.kpanda.io/role-template-cluster-view: \"true\"\nns-admin: rbac.kpanda.io/role-template-ns-admin: \"true\"\nns-edit: rbac.kpanda.io/role-template-ns-edit: \"true\"\nns-view: rbac.kpanda.io/role-template-ns-view: \"true\"\n
                                                                                  "},{"location":"en/admin/kpanda/permissions/custom-kpanda-role.html#steps","title":"Steps","text":"
                                                                                  1. Create a deployment by a user with admin or cluster admin permissions.

                                                                                  2. Grant a user the ns-viewer role to provide them with the ns-view permission.

                                                                                  3. Switch the login user to ns-viewer, open the console to get the token for the ns-viewer user, and use curl to request and delete the nginx deployment mentioned above. However, a prompt appears as below, indicating the user doesn't have permission to delete it.

                                                                                    [root@master-01 ~]# curl -k -X DELETE  'https://${URL}/apis/kpanda.io/v1alpha1/clusters/cluster-member/namespaces/default/deployments/nginx' -H 'authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJOU044MG9BclBRMzUwZ2VVU2ZyNy1xMEREVWY4MmEtZmJqR05uRE1sd1lFIn0.eyJleHAiOjE3MTU3NjY1NzksImlhdCI6MTcxNTY4MDE3OSwiYXV0aF90aW1lIjoxNzE1NjgwMTc3LCJqdGkiOiIxZjI3MzJlNC1jYjFhLTQ4OTktYjBiZC1iN2IxZWY1MzAxNDEiLCJpc3MiOiJodHRwczovLzEwLjYuMjAxLjIwMTozMDE0Ny9hdXRoL3JlYWxtcy9naGlwcG8iLCJhdWQiOiJfX2ludGVybmFsLWdoaXBwbyIsInN1YiI6ImMxZmMxM2ViLTAwZGUtNDFiYS05ZTllLWE5OGU2OGM0MmVmMCIsInR5cCI6IklEIiwiYXpwIjoiX19pbnRlcm5hbC1naGlwcG8iLCJzZXNzaW9uX3N0YXRlIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiYXRfaGFzaCI6IlJhTHoyQjlKQ2FNc1RrbGVMR3V6blEiLCJhY3IiOiIwIiwic2lkIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJncm91cHMiOltdLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJucy12aWV3ZXIiLCJsb2NhbGUiOiIifQ.As2ipMjfvzvgONAGlc9RnqOd3zMwAj82VXlcqcR74ZK9tAq3Q4ruQ1a6WuIfqiq8Kq4F77ljwwzYUuunfBli2zhU2II8zyxVhLoCEBu4pBVBd_oJyUycXuNa6HfQGnl36E1M7-_QG8b-_T51wFxxVb5b7SEDE1AvIf54NAlAr-rhDmGRdOK1c9CohQcS00ab52MD3IPiFFZ8_Iljnii-RpXKZoTjdcULJVn_uZNk_SzSUK-7MVWmPBK15m6sNktOMSf0pCObKWRqHd15JSe-2aA2PKBo1jBH3tHbOgZyMPdsLI0QdmEnKB5FiiOeMpwn_oHnT6IjT-BZlB18VkW8rA'\n{\"code\":7,\"message\":\"[RBAC] delete resources(deployments: nginx) is forbidden for user(ns-viewer) in cluster(cluster-member)\",\"details\":[]}[root@master-01 ~]#\n[root@master-01 ~]#\n
                                                                                  4. Create a ClusterRole on the global cluster, as shown in the yaml below.

                                                                                    apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: append-ns-view # (1)!\n  labels:\n    rbac.kpanda.io/role-template-ns-view: \"true\" # (2)!\nrules:\n  - apiGroups: [ \"apps\" ]\n    resources: [ \"deployments\" ]\n    verbs: [ \"delete\" ]\n
                                                                                    1. This field value can be arbitrarily specified, as long as it is not duplicated and complies with the Kubernetes resource naming conventions.
                                                                                    2. When adding rules to different roles, make sure to apply different labels.
                                                                                  5. Wait for the kpanda controller to add a rule of user creation to the built-in role: ns-viewer, then you can check if the rules added in the previous step are present for ns-viewer.

                                                                                    [root@master-01 ~]# kubectl get clusterrole role-template-ns-view -oyaml|grep deployments -C 10|tail -n 6\n
                                                                                    - apiGroups:\n  - apps\n  resources:\n  - deployments\n  verbs:\n  - delete\n

                                                                                  6. When using curl again to request the deletion of the aforementioned nginx deployment, this time the deletion was successful. This means that ns-viewer has successfully added the rule to delete deployments.

                                                                                    [root@master-01 ~]# curl -k -X DELETE  'https://${URL}/apis/kpanda.io/v1alpha1/clusters/cluster-member/namespaces/default/deployments/nginx' -H 'authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJOU044MG9BclBRMzUwZ2VVU2ZyNy1xMEREVWY4MmEtZmJqR05uRE1sd1lFIn0.eyJleHAiOjE3MTU3NjY1NzksImlhdCI6MTcxNTY4MDE3OSwiYXV0aF90aW1lIjoxNzE1NjgwMTc3LCJqdGkiOiIxZjI3MzJlNC1jYjFhLTQ4OTktYjBiZC1iN2IxZWY1MzAxNDEiLCJpc3MiOiJodHRwczovLzEwLjYuMjAxLjIwMTozMDE0Ny9hdXRoL3JlYWxtcy9naGlwcG8iLCJhdWQiOiJfX2ludGVybmFsLWdoaXBwbyIsInN1YiI6ImMxZmMxM2ViLTAwZGUtNDFiYS05ZTllLWE5OGU2OGM0MmVmMCIsInR5cCI6IklEIiwiYXpwIjoiX19pbnRlcm5hbC1naGlwcG8iLCJzZXNzaW9uX3N0YXRlIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiYXRfaGFzaCI6IlJhTHoyQjlKQ2FNc1RrbGVMR3V6blEiLCJhY3IiOiIwIiwic2lkIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJncm91cHMiOltdLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJucy12aWV3ZXIiLCJsb2NhbGUiOiIifQ.As2ipMjfvzvgONAGlc9RnqOd3zMwAj82VXlcqcR74ZK9tAq3Q4ruQ1a6WuIfqiq8Kq4F77ljwwzYUuunfBli2zhU2II8zyxVhLoCEBu4pBVBd_oJyUycXuNa6HfQGnl36E1M7-_QG8b-_T51wFxxVb5b7SEDE1AvIf54NAlAr-rhDmGRdOK1c9CohQcS00ab52MD3IPiFFZ8_Iljnii-RpXKZoTjdcULJVn_uZNk_SzSUK-7MVWmPBK15m6sNktOMSf0pCObKWRqHd15JSe-2aA2PKBo1jBH3tHbOgZyMPdsLI0QdmEnKB5FiiOeMpwn_oHnT6IjT-BZlB18VkW8rA'\n
                                                                                  "},{"location":"en/admin/kpanda/permissions/permission-brief.html","title":"Container Management Permissions","text":"

                                                                                  Container management permissions are based on a multi-dimensional permission management system created by global permission management and Kubernetes RBAC permission management. It supports cluster-level and namespace-level permission control, helping users to conveniently and flexibly set different operation permissions for IAM users and groups (collections of users) under a tenant.

                                                                                  "},{"location":"en/admin/kpanda/permissions/permission-brief.html#cluster-permissions","title":"Cluster Permissions","text":"

                                                                                  Cluster permissions are authorized based on Kubernetes RBAC's ClusterRoleBinding, allowing users/groups to have cluster-related permissions. The current default cluster role is Cluster Admin (does not have the permission to create or delete clusters).

                                                                                  "},{"location":"en/admin/kpanda/permissions/permission-brief.html#cluster-admin","title":"Cluster Admin","text":"

                                                                                  Cluster Admin has the following permissions:

                                                                                  • Can manage, edit, and view the proper cluster
                                                                                  • Manage, edit, and view all workloads and all resources within the namespace
                                                                                  • Can authorize users for roles within the cluster (Cluster Admin, NS Admin, NS Editor, NS Viewer)

                                                                                  The YAML example for this cluster role is as follows:

                                                                                  apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:49Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-cluster-admin\n  resourceVersion: \"15168\"\n  uid: f8f86d42-d5ef-47aa-b284-097615795076\nrules:\n- apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n- nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'\n
                                                                                  "},{"location":"en/admin/kpanda/permissions/permission-brief.html#namespace-permissions","title":"Namespace Permissions","text":"

                                                                                  Namespace permissions are authorized based on Kubernetes RBAC capabilities, allowing different users/groups to have different operation permissions on resources under a namespace (including Kubernetes API permissions). For details, refer to: Kubernetes RBAC. Currently, the default roles for container management are: NS Admin, NS Editor, NS Viewer.

                                                                                  "},{"location":"en/admin/kpanda/permissions/permission-brief.html#ns-admin","title":"NS Admin","text":"

                                                                                  NS Admin has the following permissions:

                                                                                  • Can view the proper namespace
                                                                                  • Manage, edit, and view all workloads and custom resources within the namespace
                                                                                  • Can authorize users for proper namespace roles (NS Editor, NS Viewer)

                                                                                  The YAML example for this cluster role is as follows:

                                                                                  apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:49Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-admin\n  resourceVersion: \"15173\"\n  uid: 69f64c7e-70e7-4c7c-a3e0-053f507f2bc3\nrules:\n- apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n- nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'    \n
                                                                                  "},{"location":"en/admin/kpanda/permissions/permission-brief.html#ns-editor","title":"NS Editor","text":"

                                                                                  NS Editor has the following permissions:

                                                                                  • Can view proper namespaces where permissions are granted
                                                                                  • Manage, edit, and view all workloads within the namespace
                                                                                  Click to view the YAML example of the cluster role
                                                                                  apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:50Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-edit\n  resourceVersion: \"15175\"\n  uid: ca9e690e-96c0-4978-8915-6e4c00c748fe\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - configmaps\n  - endpoints\n  - persistentvolumeclaims\n  - persistentvolumeclaims/status\n  - pods\n  - replicationcontrollers\n  - replicationcontrollers/scale\n  - serviceaccounts\n  - services\n  - services/status\n  verbs:\n  - '*'\n- apiGroups:\n  - \"\"\n  resources:\n  - bindings\n  - events\n  - limitranges\n  - namespaces/status\n  - pods/log\n  - pods/status\n  - replicationcontrollers/status\n  - resourcequotas\n  - resourcequotas/status\n  verbs:\n  - '*'\n- apiGroups:\n  - \"\"\n  resources:\n  - namespaces\n  verbs:\n  - '*'\n- apiGroups:\n  - apps\n  resources:\n  - controllerrevisions\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - statefulsets\n  - statefulsets/scale\n  - statefulsets/status\n  verbs:\n  - '*'\n- apiGroups:\n  - autoscaling\n  resources:\n  - horizontalpodautoscalers\n  - horizontalpodautoscalers/status\n  verbs:\n  - '*'\n- apiGroups:\n  - batch\n  resources:\n  - cronjobs\n  - cronjobs/status\n  - jobs\n  - jobs/status\n  verbs:\n  - '*'\n- apiGroups:\n  - extensions\n  resources:\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - replicationcontrollers/scale\n  verbs:\n  - '*'\n- apiGroups:\n  - policy\n  resources:\n  - poddisruptionbudgets\n  - poddisruptionbudgets/status\n  verbs:\n  - '*'\n- apiGroups:\n  - networking.k8s.io\n  resources:\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  verbs:\n  - '*'      \n
                                                                                  "},{"location":"en/admin/kpanda/permissions/permission-brief.html#ns-viewer","title":"NS Viewer","text":"

                                                                                  NS Viewer has the following permissions:

                                                                                  • Can view the proper namespace
                                                                                  • Can view all workloads and custom resources within the proper namespace
                                                                                  Click to view the YAML example of the cluster role
                                                                                  apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:50Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-view\n  resourceVersion: \"15183\"\n  uid: 853888fd-6ee8-42ac-b91e-63923918baf8\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - configmaps\n  - endpoints\n  - persistentvolumeclaims\n  - persistentvolumeclaims/status\n  - pods\n  - replicationcontrollers\n  - replicationcontrollers/scale\n  - serviceaccounts\n  - services\n  - services/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - bindings\n  - events\n  - limitranges\n  - namespaces/status\n  - pods/log\n  - pods/status\n  - replicationcontrollers/status\n  - resourcequotas\n  - resourcequotas/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - namespaces\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - apps\n  resources:\n  - controllerrevisions\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - statefulsets\n  - statefulsets/scale\n  - statefulsets/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - autoscaling\n  resources:\n  - horizontalpodautoscalers\n  - horizontalpodautoscalers/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - batch\n  resources:\n  - cronjobs\n  - cronjobs/status\n  - jobs\n  - jobs/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - extensions\n  resources:\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - replicationcontrollers/scale\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - policy\n  resources:\n  - poddisruptionbudgets\n  - poddisruptionbudgets/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - networking.k8s.io\n  resources:\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  verbs:\n  - get\n  - list\n  - watch \n
                                                                                  "},{"location":"en/admin/kpanda/permissions/permission-brief.html#permissions-faq","title":"Permissions FAQ","text":"
                                                                                  1. What is the relationship between global permissions and container management permissions?

                                                                                    Answer: Global permissions only authorize coarse-grained permissions, which can manage the creation, editing, and deletion of all clusters; while for fine-grained permissions, such as the management permissions of a single cluster, the management, editing, and deletion permissions of a single namespace, they need to be implemented based on Kubernetes RBAC container management permissions. Generally, users only need to be authorized in container management.

                                                                                  2. Currently, only four default roles are supported. Can the RoleBinding and ClusterRoleBinding (Kubernetes fine-grained RBAC) for custom roles also take effect?

                                                                                    Answer: Currently, custom permissions cannot be managed through the graphical interface, but the permission rules created using kubectl can still take effect.

                                                                                  "},{"location":"en/admin/kpanda/scale/create-hpa.html","title":"Create HPA","text":"

                                                                                  Suanova AI platform supports elastic scaling of Pod resources based on metrics (Horizontal Pod Autoscaling, HPA). Users can dynamically adjust the number of copies of Pod resources by setting CPU utilization, memory usage, and custom metrics. For example, after setting an auto scaling policy based on the CPU utilization metric for the workload, when the CPU utilization of the Pod exceeds/belows the metric threshold you set, the workload controller will automatically increase/decrease the number of Pod replicas.

                                                                                  This page describes how to configure auto scaling based on built-in metrics and custom metrics for workloads.

                                                                                  Note

                                                                                  1. HPA is only applicable to Deployment and StatefulSet, and only one HPA can be created per workload.
                                                                                  2. If you create an HPA policy based on CPU utilization, you must set the configuration limit (Limit) for the workload in advance, otherwise the CPU utilization cannot be calculated.
                                                                                  3. If built-in metrics and multiple custom metrics are used at the same time, HPA will calculate the number of scaling copies required based on multiple metrics, and take the larger value (but not exceed the maximum number of copies configured when setting the HPA policy) for elastic scaling .
                                                                                  "},{"location":"en/admin/kpanda/scale/create-hpa.html#built-in-metric-elastic-scaling-policy","title":"Built-in metric elastic scaling policy","text":"

                                                                                  The system has two built-in elastic scaling metrics of CPU and memory to meet users' basic business cases.

                                                                                  "},{"location":"en/admin/kpanda/scale/create-hpa.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before configuring the built-in index auto scaling policy for the workload, the following prerequisites need to be met:

                                                                                  • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                                                                  • Created a namespace, deployment or statefulset.

                                                                                  • You should have permissions not lower than NS Editor. For details, refer to Namespace Authorization.

                                                                                  • Installed metrics-server plugin install.

                                                                                  "},{"location":"en/admin/kpanda/scale/create-hpa.html#steps","title":"Steps","text":"

                                                                                  Refer to the following steps to configure the built-in index auto scaling policy for the workload.

                                                                                  1. Click Clusters on the left navigation bar to enter the cluster list page. Click a cluster name to enter the Cluster Details page.

                                                                                  2. On the cluster details page, click Workload in the left navigation bar to enter the workload list, and then click a workload name to enter the Workload Details page.

                                                                                  3. Click the Auto Scaling tab to view the auto scaling configuration of the current cluster.

                                                                                  4. After confirming that the cluster has installed the metrics-server plug-in, and the plug-in is running normally, you can click the New Scaling button.

                                                                                  5. Create custom metric auto scaling policy parameters.

                                                                                    • Policy name: Enter the name of the auto scaling policy. Please note that the name can contain up to 63 characters, and can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with lowercase letters or numbers, such as hpa- my-dep.
                                                                                    • Namespace: The namespace where the payload resides.
                                                                                    • Workload: The workload object that performs auto scaling.
                                                                                    • Target CPU Utilization: The CPU usage of the Pod under the workload resource. The calculation method is: the request (request) value of all Pod resources/workloads under the workload. When the actual CPU usage is greater/lower than the target value, the system automatically reduces/increases the number of Pod replicas.
                                                                                    • Target Memory Usage: The memory usage of the Pod under the workload resource. When the actual memory usage is greater/lower than the target value, the system automatically reduces/increases the number of Pod replicas.
                                                                                    • Replica range: the elastic scaling range of the number of Pod replicas. The default interval is 1 - 10.
                                                                                  6. After completing the parameter configuration, click the OK button to automatically return to the elastic scaling details page. Click \u2507 on the right side of the list to edit, delete, and view related events.

                                                                                  "},{"location":"en/admin/kpanda/scale/create-vpa.html","title":"Create VPAs","text":"

                                                                                  The container Vertical Pod Autoscaler (VPA) calculates the most suitable CPU and memory request values \u200b\u200bfor the Pod by monitoring the Pod's resource application and usage over a period of time. Using VPA can allocate resources to each Pod in the cluster more reasonably, improve the overall resource utilization of the cluster, and avoid waste of cluster resources.

                                                                                  AI platform supports VPA through containers. Based on this feature, the Pod request value can be dynamically adjusted according to the usage of container resources. AI platform supports manual and automatic modification of resource request values, and you can configure them according to actual needs.

                                                                                  This page describes how to configure VPA for deployment.

                                                                                  Warning

                                                                                  Using VPA to modify a Pod resource request will trigger a Pod restart. Due to the limitations of Kubernetes itself, Pods may be scheduled to other nodes after restarting.

                                                                                  "},{"location":"en/admin/kpanda/scale/create-vpa.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before configuring a vertical scaling policy for deployment, the following prerequisites must be met:

                                                                                  • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                                                  • Create a namespace, user, Deployments or Statefulsets.

                                                                                  • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                                                  • The current cluster has installed metrics-server and VPA plugins.

                                                                                  "},{"location":"en/admin/kpanda/scale/create-vpa.html#steps","title":"Steps","text":"

                                                                                  Refer to the following steps to configure the built-in index auto scaling policy for the deployment.

                                                                                  1. Find the current cluster in Clusters , and click the name of the target cluster.

                                                                                  2. Click Deployments in the left navigation bar, find the deployment that needs to create a VPA, and click the name of the deployment.

                                                                                  3. Click the Auto Scaling tab to view the auto scaling configuration of the current cluster, and confirm that the relevant plug-ins have been installed and are running normally.

                                                                                  4. Click the Create Autoscaler button and configure the VPA vertical scaling policy parameters.

                                                                                    • Policy name: Enter the name of the vertical scaling policy. Please note that the name can contain up to 63 characters, and can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with lowercase letters or numbers, such as vpa- my-dep.
                                                                                    • Scaling mode: Run the method of modifying the CPU and memory request values. Currently, vertical scaling supports manual and automatic scaling modes.
                                                                                      • Manual scaling: After the vertical scaling policy calculates the recommended resource configuration value, the user needs to manually modify the resource quota of the application.
                                                                                      • Auto-scaling: The vertical scaling policy automatically calculates and modifies the resource quota of the application.
                                                                                    • Target container: Select the container to be scaled vertically.
                                                                                  5. After completing the parameter configuration, click the OK button to automatically return to the elastic scaling details page. Click \u2507 on the right side of the list to perform edit and delete operations.

                                                                                  "},{"location":"en/admin/kpanda/scale/custom-hpa.html","title":"Creating HPA Based on Custom Metrics","text":"

                                                                                  When the built-in CPU and memory metrics in the system do not meet your business needs, you can add custom metrics by configuring ServiceMonitoring and achieve auto-scaling based on these custom metrics. This article will introduce how to configure auto-scaling for workloads based on custom metrics.

                                                                                  Note

                                                                                  1. HPA is only applicable to Deployment and StatefulSet, and each workload can only create one HPA.
                                                                                  2. If both built-in metrics and multiple custom metrics are used, HPA will calculate the required number of scaled replicas based on multiple metrics respectively, and take the larger value (but not exceeding the maximum number of replicas configured when setting the HPA policy) for scaling.
                                                                                  "},{"location":"en/admin/kpanda/scale/custom-hpa.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before configuring the custom metrics auto-scaling policy for workloads, the following prerequisites must be met:

                                                                                  • Integrated Kubernetes cluster or created Kubernetes cluster, and able to access the cluster's UI interface.
                                                                                  • Completed creation of a namespace, deployment, or statefulSet.
                                                                                  • The current user should have permissions higher than NS Editor. For details, refer to namespace authorization.
                                                                                  • metrics-server plugin has been installed.
                                                                                  • insight-agent plugin has been installed.
                                                                                  • Prometheus-adapter plugin has been installed.
                                                                                  "},{"location":"en/admin/kpanda/scale/custom-hpa.html#steps","title":"Steps","text":"

                                                                                  Refer to the following steps to configure the auto-scaling policy based on metrics for workloads.

                                                                                  1. Click Clusters in the left navigation bar to enter the clusters page. Click a cluster name to enter the Cluster Overview page.

                                                                                  2. On the Cluster Details page, click Workloads in the left navigation bar to enter the workload list, and click a workload name to enter the Workload Details page.

                                                                                  3. Click the Auto Scaling tab to view the current autoscaling configuration of the cluster.

                                                                                  4. Confirm that the cluster has installed metrics-server, Insight, and Prometheus-adapter plugins, and that the plugins are running normally, then click the Create AutoScaler button.

                                                                                    Note

                                                                                    If the related plugins are not installed or the plugins are in an abnormal state, you will not be able to see the entry for creating custom metrics auto-scaling on the page.

                                                                                  5. Create custom metrics auto-scaling policy parameters.

                                                                                    • Policy Name: Enter the name of the auto-scaling policy. Note that the name can be up to 63 characters long, can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with a lowercase letter or number, e.g., hpa-my-dep.
                                                                                    • Namespace: The namespace where the workload is located.
                                                                                    • Workload: The workload object that performs auto-scaling.
                                                                                    • Resource Type: The type of custom metric being monitored, including Pod and Service types.
                                                                                    • Metric: The name of the custom metric created using ServiceMonitoring or the name of the system-built custom metric.
                                                                                    • Data Type: The method used to calculate the metric value, including target value and target average value. When the resource type is Pod, only the target average value can be used.
                                                                                  "},{"location":"en/admin/kpanda/scale/custom-hpa.html#operation-example","title":"Operation Example","text":"

                                                                                  This case takes a Golang business program as an example. The example program exposes the httpserver_requests_total metric and records HTTP requests. This metric can be used to calculate the QPS value of the business program.

                                                                                  "},{"location":"en/admin/kpanda/scale/custom-hpa.html#deploy-business-program","title":"Deploy Business Program","text":"

                                                                                  Use Deployment to deploy the business program:

                                                                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: httpserver\n  namespace: httpserver\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: httpserver\n  template:\n    metadata:\n      labels:\n        app: httpserver\n    spec:\n      containers:\n      - name: httpserver\n        image: registry.imroc.cc/test/httpserver:custom-metrics\n        imagePullPolicy: Always\n---\n\napiVersion: v1\nkind: Service\nmetadata:\n  name: httpserver\n  namespace: httpserver\n  labels:\n    app: httpserver\n  annotations:\n    prometheus.io/scrape: \"true\"\n    prometheus.io/path: \"/metrics\"\n    prometheus.io/port: \"http\"\nspec:\n  type: ClusterIP\n  ports:\n  - port: 80\n    protocol: TCP\n    name: http\n  selector:\n    app: httpserver\n
                                                                                  "},{"location":"en/admin/kpanda/scale/custom-hpa.html#prometheus-collects-business-monitoring","title":"Prometheus Collects Business Monitoring","text":"

                                                                                  If the insight-agent is installed, Prometheus can be configured by creating a ServiceMonitor CRD object.

                                                                                  Operation steps: In Cluster Details -> Custom Resources, search for \u201cservicemonitors.monitoring.coreos.com\", click the name to enter the details. Create the following example CRD in the httpserver namespace via YAML:

                                                                                  apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: httpserver\n  namespace: httpserver\n  labels:\n    operator.insight.io/managed-by: insight\nspec:\n  endpoints:\n  - port: http\n    interval: 5s\n  namespaceSelector:\n    matchNames:\n    - httpserver\n  selector:\n    matchLabels:\n      app: httpserver\n

                                                                                  Note

                                                                                  If Prometheus is installed via insight, the serviceMonitor must be labeled with operator.insight.io/managed-by: insight. If installed by other means, this label is not required.

                                                                                  "},{"location":"en/admin/kpanda/scale/custom-hpa.html#configure-metric-rules-in-prometheus-adapter","title":"Configure Metric Rules in Prometheus-adapter","text":"

                                                                                  steps: In Clusters -> Helm Apps, search for \u201cprometheus-adapter\",enter the update page through the action bar, and configure custom metrics in YAML as follows:

                                                                                  rules:\n  custom:\n    - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)\n      name:\n        as: httpserver_requests_qps\n        matches: httpserver_requests_total\n      resources:\n        template: <<.Resource>>\n      seriesQuery: httpserver_requests_total\n
                                                                                  "},{"location":"en/admin/kpanda/scale/custom-hpa.html#create-custom-metrics-auto-scaling-policy-parameters","title":"Create Custom Metrics Auto-scaling Policy Parameters","text":"

                                                                                  Follow the above steps to find the application httpserver in the Deployment and create auto-scaling via custom metrics.

                                                                                  "},{"location":"en/admin/kpanda/scale/hpa-cronhpa-compatibility-rules.html","title":"Compatibility Rules for HPA and CronHPA","text":"

                                                                                  HPA stands for HorizontalPodAutoscaler, which refers to horizontal pod auto-scaling.

                                                                                  CronHPA stands for Cron HorizontalPodAutoscaler, which refers to scheduled horizontal pod auto-scaling.

                                                                                  "},{"location":"en/admin/kpanda/scale/hpa-cronhpa-compatibility-rules.html#conflict-between-cronhpa-and-hpa","title":"Conflict Between CronHPA and HPA","text":"

                                                                                  Scheduled scaling with CronHPA triggers horizontal pod scaling at specified times. To prevent sudden traffic surges, you may have configured HPA to ensure the normal operation of your application. If both HPA and CronHPA are detected simultaneously, conflicts arise because CronHPA and HPA operate independently without awareness of each other. Consequently, the actions performed last will override those executed first.

                                                                                  By comparing the definition templates of CronHPA and HPA, the following points can be observed:

                                                                                  • Both CronHPA and HPA use the scaleTargetRef field to identify the scaling target.
                                                                                  • CronHPA schedules the number of replicas to scale based on crontab rules in jobs.
                                                                                  • HPA determines scaling based on resource utilization.

                                                                                  Note

                                                                                  If both CronHPA and HPA are set, there will be scenarios where CronHPA and HPA simultaneously operate on a single scaleTargetRef.

                                                                                  "},{"location":"en/admin/kpanda/scale/hpa-cronhpa-compatibility-rules.html#compatibility-solution-for-cronhpa-and-hpa","title":"Compatibility Solution for CronHPA and HPA","text":"

                                                                                  As noted above, the fundamental reason that simultaneous use of CronHPA and HPA results in the later action overriding the earlier one is that the two controllers cannot sense each other. Therefore, the conflict can be resolved by enabling CronHPA to be aware of HPA's current state.

                                                                                  The system will treat HPA as the scaling object for CronHPA, thus achieving scheduled scaling for the Deployment object defined by the HPA.

                                                                                  HPA's definition configures the Deployment in the scaleTargetRef field, and then the Deployment uses its definition to locate the ReplicaSet, which ultimately adjusts the actual number of replicas.

                                                                                  In AI platform, the scaleTargetRef in CronHPA is set to the HPA object, and it uses the HPA object to find the actual scaleTargetRef, allowing CronHPA to be aware of HPA's current state.

                                                                                  CronHPA senses HPA by adjusting HPA. CronHPA determines whether scaling is needed and modifies the HPA upper limit by comparing the target number of replicas with the current number of replicas, choosing the larger value. Similarly, CronHPA determines whether to modify the HPA lower limit by comparing the target number of replicas from CronHPA with the configuration in HPA, choosing the smaller value.

                                                                                  "},{"location":"en/admin/kpanda/scale/install-cronhpa.html","title":"Install kubernetes-cronhpa-controller","text":"

                                                                                  The container copy timing horizontal autoscaling policy (CronHPA) can provide stable computing resource guarantee for periodic high-concurrency applications, and kubernetes-cronhpa-controller is a key component to implement CronHPA.

                                                                                  This section describes how to install the kubernetes-cronhpa-controller plugin.

                                                                                  Note

                                                                                  In order to use CornHPA, not only the kubernetes-cronhpa-controller plugin needs to be installed, but also install the metrics-server plugin.

                                                                                  "},{"location":"en/admin/kpanda/scale/install-cronhpa.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before installing the kubernetes-cronhpa-controller plugin, the following prerequisites need to be met:

                                                                                  • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                                                  • Create a namespace.

                                                                                  • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                                                  "},{"location":"en/admin/kpanda/scale/install-cronhpa.html#steps","title":"Steps","text":"

                                                                                  Refer to the following steps to install the kubernetes-cronhpa-controller plugin for the cluster.

                                                                                  1. On the Clusters page, find the target cluster where the plugin needs to be installed, click the name of the cluster, then click Workloads -> Deployments on the left, and click the name of the target workload.

                                                                                  2. On the workload details page, click the Auto Scaling tab, and click Install on the right side of CronHPA .

                                                                                  3. Read the relevant introduction of the plug-in, select the version and click the Install button. It is recommended to install 1.3.0 or later.

                                                                                  4. Refer to the following instructions to configure the parameters.

                                                                                    • Name: Enter the plugin name, please note that the name can be up to 63 characters, can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with lowercase letters or numbers, such as kubernetes-cronhpa-controller.
                                                                                    • Namespace: Select which namespace the plugin will be installed in, here we take default as an example.
                                                                                    • Version: The version of the plugin, here we take the 1.3.0 version as an example.
                                                                                    • Ready Wait: When enabled, it will wait for all associated resources under the application to be in the ready state before marking the application installation as successful.
                                                                                    • Failed to delete: If the plugin installation fails, delete the associated resources that have already been installed. When enabled, Wait will be enabled synchronously by default.
                                                                                    • Detailed log: When enabled, a detailed log of the installation process will be recorded.

                                                                                    Note

                                                                                    After enabling ready wait and/or failed deletion , it takes a long time for the application to be marked as \"running\".

                                                                                  5. Click OK in the lower right corner of the page, and the system will automatically jump to the Helm Apps list page. Wait a few minutes and refresh the page to see the application you just installed.

                                                                                    Warning

                                                                                    If you need to delete the kubernetes-cronhpa-controller plugin, you should go to the Helm Apps list page to delete it completely.

                                                                                    If you delete the plug-in under the Auto Scaling tab of the workload, this only deletes the workload copy of the plug-in, and the plug-in itself is still not deleted, and an error will be prompted when the plug-in is reinstalled later.

                                                                                  6. Go back to the Auto Scaling tab under the workload details page, and you can see that the interface displays Plug-in installed . Now it's time to start creating CronHPA policies.

                                                                                  "},{"location":"en/admin/kpanda/scale/install-metrics-server.html","title":"Install metrics-server","text":"

                                                                                  metrics-server is the built-in resource usage metrics collection component of Kubernetes. You can automatically scale Pod copies horizontally for workload resources by configuring HPA policies.

                                                                                  This section describes how to install metrics-server .

                                                                                  "},{"location":"en/admin/kpanda/scale/install-metrics-server.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before installing the metrics-server plugin, the following prerequisites need to be met:

                                                                                  • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                                                                  • Created a namespace.

                                                                                  • You should have permissions not lower than NS Editor. For details, refer to Namespace Authorization.

                                                                                  "},{"location":"en/admin/kpanda/scale/install-metrics-server.html#steps","title":"Steps","text":"

                                                                                  Please perform the following steps to install the metrics-server plugin for the cluster.

                                                                                  1. On the Auto Scaling page under workload details, click the Install button to enter the metrics-server plug-in installation interface.

                                                                                  2. Read the introduction of the metrics-server plugin, select the version and click the Install button. This page will use the 3.8.2 version as an example to install, and it is recommended that you install 3.8.2 and later versions.

                                                                                  3. Configure basic parameters on the installation configuration interface.

                                                                                    • Name: Enter the plugin name, please note that the name can be up to 63 characters, can only contain lowercase letters, numbers and separators (\"-\"), and must start and end with lowercase letters or numbers, such as metrics-server-01.
                                                                                    • Namespace: Select the namespace for plugin installation, here we take default as an example.
                                                                                    • Version: The version of the plugin, here we take 3.8.2 version as an example.
                                                                                    • Ready Wait: When enabled, it will wait for all associated resources under the application to be ready before marking the application installation as successful.
                                                                                    • Failed to delete: After it is enabled, the synchronization will be enabled by default and ready to wait. If the installation fails, the installation-related resources will be removed.
                                                                                    • Verbose log: Turn on the verbose output of the installation process log.

                                                                                    Note

                                                                                    After enabling Wait and/or Deletion failed , it takes a long time for the app to be marked as Running .

                                                                                  4. Advanced parameter configuration

                                                                                    • If the cluster network cannot access the k8s.gcr.io repository, please try to modify the repositort parameter to repository: k8s.m.daocloud.io/metrics-server/metrics-server .

                                                                                    • An SSL certificate is also required to install the metrics-server plugin. To bypass certificate verification, you need to add - --kubelet-insecure-tls parameter at defaultArgs: .

                                                                                    Click to view and use the YAML parameters to replace the default YAML
                                                                                    image:\n  repository: k8s.m.daocloud.io/metrics-server/metrics-server # Change the registry source address to k8s.m.daocloud.io\n  tag: ''\n  pullPolicy: IfNotPresent\nimagePullSecrets: []\nnameOverride: ''\nfullnameOverride: ''\nserviceAccount:\n  create: true\n  annotations: {}\n  name: ''\nrbac:\n  create: true\n  pspEnabled: false\napiService:\n  create: true\npodLabels: {}\npodAnnotations: {}\npodSecurityContext: {}\nsecurityContext:\n  allowPrivilegeEscalation: false\n  readOnlyRootFilesystem: true\n  runAsNonRoot: true\n  runAsUser: 1000\npriorityClassName: system-cluster-critical\ncontainerPort: 4443\nhostNetwork:\n  enabled: false\nreplicas: 1\nupdateStrategy: {}\npodDisruptionBudget:\n  enabled: false\n  minAvailable: null\n  maxUnavailable: null\ndefaultArgs:\n  - '--cert-dir=/tmp'\n  - '--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname'\n  - '--kubelet-use-node-status-port'\n  - '--metric-resolution=15s'\n  - --kubelet-insecure-tls # Bypass certificate verification\nargs: []\nlivenessProbe:\n  httpGet:\n    path: /livez\n    port:https\n    scheme: HTTPS\n  initialDelaySeconds: 0\n  periodSeconds: 10\n  failureThreshold: 3\nreadinessProbe:\n  httpGet:\n    path: /readyz\n    port:https\n    scheme: HTTPS\n  initialDelaySeconds: 20\n  periodSeconds: 10\n  failureThreshold: 3\nservice:\n  type: ClusterIP\n  port: 443\n  annotations: {}\n  labels: {}\nmetrics:\n  enabled: false\nserviceMonitor:\n  enabled: false\n  additionalLabels: {}\n  interval: 1m\n  scrapeTimeout: 10s\nresources: {}\nextraVolumeMounts: []\nextraVolumes: []\nnodeSelector: {}\ntolerations: []\naffinity: {}\n
                                                                                  5. Click the OK button to complete the installation of the metrics-server plug-in, and then the system will automatically jump to the Helm Apps list page. After a few minutes, refresh the page and you will see the newly installed Applications.

                                                                                  Note

                                                                                  When deleting the metrics-server plugin, the plugin can only be completely deleted on the Helm Apps list page. If you only delete metrics-server on the workload page, this only deletes the workload copy of the application, the application itself is still not deleted, and an error will be prompted when you reinstall the plugin later.

                                                                                  "},{"location":"en/admin/kpanda/scale/install-vpa.html","title":"Install vpa","text":"

                                                                                  The Vertical Pod Autoscaler, VPA, can make the resource allocation of the cluster more reasonable and avoid the waste of cluster resources. vpa is the key component to realize the vertical autoscaling of the container.

                                                                                  This section describes how to install the vpa plugin.

                                                                                  In order to use VPA policies, not only the __vpa__ plugin needs to be installed, but also [install the __metrics-server__ plugin](install-metrics-server.md).\n
                                                                                  "},{"location":"en/admin/kpanda/scale/install-vpa.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before installing the vpa plugin, the following prerequisites need to be met:

                                                                                  • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                                                  • Create a namespace.

                                                                                  • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                                                  "},{"location":"en/admin/kpanda/scale/install-vpa.html#steps","title":"Steps","text":"

                                                                                  Refer to the following steps to install the vpa plugin for the cluster.

                                                                                  1. On the Clusters page, find the target cluster where the plugin needs to be installed, click the name of the cluster, then click Workloads -> Deployments on the left, and click the name of the target workload.

                                                                                  2. On the workload details page, click the Auto Scaling tab, and click Install on the right side of VPA .

                                                                                  3. Read the relevant introduction of the plug-in, select the version and click the Install button. It is recommended to install 1.5.0 or later.

                                                                                  4. Review the configuration parameters described below.

                                                                                    • Name: Enter the plugin name, please note that the name can be up to 63 characters, can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with lowercase letters or numbers, such as kubernetes-cronhpa-controller.
                                                                                    • Namespace: Select which namespace the plugin will be installed in, here we take default as an example.
                                                                                    • Version: The version of the plugin, here we take the 1.5.0 version as an example.
                                                                                    • Ready Wait: When enabled, it will wait for all associated resources under the application to be in the ready state before marking the application installation as successful.
                                                                                    • Failed to delete: If the plugin installation fails, delete the associated resources that have already been installed. When enabled, Wait will be enabled synchronously by default.
                                                                                    • Detailed log: When enabled, a detailed log of the installation process will be recorded.

                                                                                    Note

                                                                                    After enabling Wait and/or Deletion failed , it takes a long time for the application to be marked as running .

                                                                                  5. Click OK in the lower right corner of the page, and the system will automatically jump to the Helm Apps list page. Wait a few minutes and refresh the page to see the application you just installed.

                                                                                    Warning

                                                                                    If you need to delete the vpa plugin, you should go to the Helm Apps list page to delete it completely.

                                                                                    If you delete the plug-in under the Auto Scaling tab of the workload, this only deletes the workload copy of the plug-in, and the plug-in itself is still not deleted, and an error will be prompted when the plug-in is reinstalled later.

                                                                                  6. Go back to the Auto Scaling tab under the workload details page, and you can see that the interface displays Plug-in installed . Now you can start Create VPA policy.

                                                                                  "},{"location":"en/admin/kpanda/scale/knative/install.html","title":"Installation","text":"

                                                                                  Knative is a platform-agnostic solution for running serverless deployments.

                                                                                  "},{"location":"en/admin/kpanda/scale/knative/install.html#steps","title":"Steps","text":"
                                                                                  1. Log in to the cluster, click the sidebar Helm Apps \u2192 Helm Charts , enter knative in the search box at the top right, and then press the enter key to search.

                                                                                  2. Click the knative-operator to enter the installation configuration interface. You can view the available versions and the Parameters optional items of Helm values on this interface.

                                                                                  3. After clicking the install button, you will enter the installation configuration interface.

                                                                                  4. Enter the name, installation tenant, and it is recommended to check Wait and Detailed Logs .

                                                                                  5. In the settings below, you can tick Serving and enter the installation tenant of the Knative Serving component, which will deploy the Knative Serving component after installation. This component is managed by the Knative Operator.

                                                                                  "},{"location":"en/admin/kpanda/scale/knative/knative.html","title":"Knative Introduction","text":"

                                                                                  Knative provides a higher level of abstraction, simplifying and speeding up the process of building, deploying, and managing applications on Kubernetes. It allows developers to focus more on implementing business logic, while leaving most of the infrastructure and operations work to Knative, significantly improving productivity.

                                                                                  "},{"location":"en/admin/kpanda/scale/knative/knative.html#components","title":"Components","text":"

                                                                                  The Knative operator runs the following components.

                                                                                  knative-operator   knative-operator-58f7d7db5c-7f6r5      1/1     Running     0     6m55s\nknative-operator   operator-webhook-667dc67bc-qvrv4       1/1     Running     0     6m55s\n

                                                                                  The Knative serving components are as follows.

                                                                                  knative-serving        3scale-kourier-gateway-d69fbfbd-bd8d8   1/1     Running     0                 7m13s\nknative-serving        activator-7c6fddd698-wdlng              1/1     Running     0                 7m3s\nknative-serving        autoscaler-8f4b876bb-kd25p              1/1     Running     0                 7m17s\nknative-serving        autoscaler-hpa-5f7f74679c-vkc7p         1/1     Running     0                 7m15s\nknative-serving        controller-789c896c46-tfvsv             1/1     Running     0                 7m17s\nknative-serving        net-kourier-controller-7db578c889-7gd5l 1/1     Running     0                 7m14s\nknative-serving        webhook-5c88b94c5-78x7m                 1/1     Running     0                 7m1s\nknative-serving        storage-version-migration-serving-serving-1.12.2-t7zvd   0/1  Completed   0   7m15s\n
                                                                                  Component Features Activator Queues requests (if a Knative Service has scaled to zero). Calls the autoscaler to bring back services that have scaled down to zero and forward queued requests. The Activator can also act as a request buffer, handling bursts of traffic. Autoscaler Responsible for scaling Knative services based on configuration, metrics, and incoming requests. Controller Manages the state of Knative CRs. It monitors multiple objects, manages the lifecycle of dependent resources, and updates resource status. Queue-Proxy Sidecar container injected into each Knative Service. Responsible for collecting traffic data and reporting it to the Autoscaler, which then initiates scaling requests based on this data and preset rules. Webhooks Knative Serving has several Webhooks responsible for validating and mutating Knative resources."},{"location":"en/admin/kpanda/scale/knative/knative.html#ingress-traffic-entry-solutions","title":"Ingress Traffic Entry Solutions","text":"Solution Use Case Istio If Istio is already in use, it can be chosen as the traffic entry solution. Contour If Contour has been enabled in the cluster, it can be chosen as the traffic entry solution. Kourier If neither of the above two Ingress components are present, Knative's Envoy-based Kourier Ingress can be used as the traffic entry solution."},{"location":"en/admin/kpanda/scale/knative/knative.html#autoscaler-solutions-comparison","title":"Autoscaler Solutions Comparison","text":"Autoscaler Type Core Part of Knative Serving Default Enabled Scale to Zero Support CPU-based Autoscaling Support Knative Pod Autoscaler (KPA) Yes Yes Yes No Horizontal Pod Autoscaler (HPA) No Needs to be enabled after installing Knative Serving No Yes"},{"location":"en/admin/kpanda/scale/knative/knative.html#crd","title":"CRD","text":"Resource Type API Name Description Services service.serving.knative.dev Automatically manages the entire lifecycle of Workloads, controls the creation of other objects, ensures applications have Routes, Configurations, and new revisions with each update. Routes route.serving.knative.dev Maps network endpoints to one or more revision versions, supports traffic distribution and version routing. Configurations configuration.serving.knative.dev Maintains the desired state of deployments, provides separation between code and configuration, follows the Twelve-Factor App methodology, modifying configurations creates new revisions. Revisions revision.serving.knative.dev Snapshot of the workload at each modification time point, immutable object, automatically scales based on traffic."},{"location":"en/admin/kpanda/scale/knative/playground.html","title":"Knative Practices","text":"

                                                                                  In this section, we will delve into learning Knative through several practical exercises.

                                                                                  "},{"location":"en/admin/kpanda/scale/knative/playground.html#case-1-hello-world","title":"case 1 - Hello World","text":"
                                                                                  apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n

                                                                                  You can use kubectl to check the status of a deployed application that has been automatically configured with ingress and scalers by Knative.

                                                                                  ~ kubectl get service.serving.knative.dev/hello\nNAME    URL                                              LATESTCREATED   LATESTREADY   READY   REASON\nhello   http://hello.knative-serving.knative.loulan.me   hello-00001     hello-00001   True\n

                                                                                  The deployed Pod YAML is as follows, consisting of two Pods: user-container and queue-proxy.

                                                                                  apiVersion: v1\nkind: Pod\nmetadata:\n  name: hello-00003-deployment-5fcb8ccbf-7qjfk\nspec:\n  containers:\n  - name: user-container\n  - name: queue-proxy\n

                                                                                  Request Flow:

                                                                                  1. case1 When there is low traffic or no traffic, traffic will be routed to the activator.
                                                                                  2. case2 When there is high traffic, traffic will be routed directly to the Pod only if it exceeds the target-burst-capacity.
                                                                                    1. Configured as 0, expansion from 0 is the only scenario.
                                                                                    2. Configured as -1, the activator will always be present in the request path.
                                                                                    3. Configured as >0, the number of additional concurrent requests that the system can handle before triggering scaling.
                                                                                  3. case3 When the traffic decreases again, traffic will be routed back to the activator if the traffic is lower than current_demand + target-burst-capacity > (pods * concurrency-target).

                                                                                    The total number of pending requests + the number of requests that can exceed the target concurrency > the target concurrency per Pod * number of Pods.

                                                                                  "},{"location":"en/admin/kpanda/scale/knative/playground.html#case-2-based-on-concurrent-elastic-scaling","title":"case 2 - Based on Concurrent Elastic Scaling","text":"

                                                                                  We first apply the following YAML definition under the cluster.

                                                                                  apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"1\"\n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"\n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n

                                                                                  Execute the following command for testing, and you can observe the scaling of the Pods by using kubectl get pods -A -w.

                                                                                  wrk -t2 -c4 -d6s http://hello.knative-serving.knative.daocloud.io/\n
                                                                                  "},{"location":"en/admin/kpanda/scale/knative/playground.html#case-3-based-on-concurrent-elastic-scaling-scale-out-in-advance-to-reach-a-specific-ratio","title":"case 3 - Based on concurrent elastic scaling, scale out in advance to reach a specific ratio.","text":"

                                                                                  We can easily achieve this, for example, by limiting the concurrency to 10 per container. This can be implemented through autoscaling.knative.dev/target-utilization-percentage: 70, starting to scale out the Pods when 70% is reached.

                                                                                  apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"10\"\n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"\n \u00a0 \u00a0 \u00a0 \u00a0autoscaling.knative.dev/target-utilization-percentage: \"70\" \n \u00a0 \u00a0 \u00a0 \u00a0autoscaling.knative.dev/metric: \"concurrency\"\n \u00a0 \u00a0 spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n
                                                                                  "},{"location":"en/admin/kpanda/scale/knative/playground.html#case-4-canary-releasetraffic-percentage","title":"case 4 - Canary Release/Traffic Percentage","text":"

                                                                                  We can control the distribution of traffic to each version through spec.traffic.

                                                                                  apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"1\"  \n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"         \n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n  traffic:\n  - latestRevision: true\n    percent: 50\n  - latestRevision: false\n    percent: 50\n    revisionName: hello-00001\n
                                                                                  "},{"location":"en/admin/kpanda/scale/knative/scene.html","title":"Use Cases","text":""},{"location":"en/admin/kpanda/scale/knative/scene.html#suitable-cases","title":"Suitable Cases","text":"
                                                                                  • High concurrency business with short connections
                                                                                  • Businesses that require elastic scaling
                                                                                  • A large number of applications need to scale down to 0 to improve resource utilization
                                                                                  • AI Serving services that scale based on specific metrics
                                                                                  "},{"location":"en/admin/kpanda/scale/knative/scene.html#unsuitable-cases","title":"Unsuitable Cases","text":"
                                                                                  • Long-lived connection business
                                                                                  • Latency-sensitive business
                                                                                  • Traffic splitting based on cookies
                                                                                  • Traffic splitting based on headers
                                                                                  "},{"location":"en/admin/kpanda/security/index.html","title":"Types of Security Scans","text":"

                                                                                  AI platform Container Management provides three types of security scans:

                                                                                  • Compliance Scan: Conducts security scans on cluster nodes based on CIS Benchmark.
                                                                                  • Authorization Scan: Checks for security and compliance issues in the Kubernetes cluster, records and verifies authorized access, object changes, events, and other activities related to the Kubernetes API.
                                                                                  • Vulnerability Scan: Scans the Kubernetes cluster for potential vulnerabilities and risks, such as unauthorized access, sensitive information leakage, weak authentication, container escape, etc.
                                                                                  "},{"location":"en/admin/kpanda/security/index.html#compliance-scan","title":"Compliance Scan","text":"

                                                                                  The object of compliance scanning is the cluster node. The scan result lists the scan items and results and provides repair suggestions for any failed scan items. For specific security rules used during scanning, refer to the CIS Kubernetes Benchmark.

                                                                                  The focus of the scan varies when checking different types of nodes.

                                                                                  • Scan the control plane node (Controller)

                                                                                    • Focus on the security of system components such as API Server , controller-manager , scheduler , kubelet , etc.
                                                                                    • Check the security configuration of the Etcd database.
                                                                                    • Verify whether the cluster's authentication mechanism, authorization policy, and network security configuration meet security standards.
                                                                                  • Scan worker nodes

                                                                                    • Check if the configuration of container runtimes such as kubelet and Docker meets security standards.
                                                                                    • Verify whether the container image has been trusted and verified.
                                                                                    • Check if the network security configuration of the node meets security standards.

                                                                                  Tip

                                                                                  To use compliance scanning, you need to create a scan configuration first, and then create a scan policy based on that configuration. After executing the scan policy, you can view the scan report.

                                                                                  "},{"location":"en/admin/kpanda/security/index.html#authorization-scan","title":"Authorization Scan","text":"

                                                                                  Authorization scanning focuses on security vulnerabilities caused by authorization issues. Authorization scans can help users identify security threats in Kubernetes clusters, identify which resources need further review and protection measures. By performing these checks, users can gain a clearer and more comprehensive understanding of their Kubernetes environment and ensure that the cluster environment meets Kubernetes' best practices and security standards.

                                                                                  Specifically, authorization scanning supports the following operations:

                                                                                  • Scans the health status of all nodes in the cluster.

                                                                                  • Scans the running state of components in the cluster, such as kube-apiserver , kube-controller-manager , kube-scheduler , etc.

                                                                                  • Scans security configurations: Check Kubernetes' security configuration.

                                                                                    • API security: whether unsafe API versions are enabled, whether appropriate RBAC roles and permission restrictions are set, etc.
                                                                                    • Container security: whether insecure images are used, whether privileged mode is enabled, whether appropriate security context is set, etc.
                                                                                    • Network security: whether appropriate network policy is enabled to restrict traffic, whether TLS encryption is used, etc.
                                                                                    • Storage security: whether appropriate encryption and access controls are enabled.
                                                                                    • Application security: whether necessary security measures are in place, such as password management, cross-site scripting attack defense, etc.
                                                                                  • Provides warnings and suggestions: Security best practices that cluster administrators should perform, such as regularly rotating certificates, using strong passwords, restricting network access, etc.

                                                                                  Tip

                                                                                  To use authorization scanning, you need to create a scan policy first. After executing the scan policy, you can view the scan report. For details, refer to Security Scanning.

                                                                                  "},{"location":"en/admin/kpanda/security/index.html#vulnerability-scan","title":"Vulnerability Scan","text":"

                                                                                  Vulnerability scanning focuses on scanning potential malicious attacks and security vulnerabilities, such as remote code execution, SQL injection, XSS attacks, and some attacks specific to Kubernetes. The final scan report lists the security vulnerabilities in the cluster and provides repair suggestions.

                                                                                  Tip

                                                                                  To use vulnerability scanning, you need to create a scan policy first. After executing the scan policy, you can view the scan report. For details, refer to Vulnerability Scan.

                                                                                  "},{"location":"en/admin/kpanda/security/audit.html","title":"Permission Scan","text":"

                                                                                  To use the Permission Scan feature, you need to create a scan policy first. After executing the policy, a scan report will be automatically generated for viewing.

                                                                                  "},{"location":"en/admin/kpanda/security/audit.html#create-a-scan-policy","title":"Create a Scan Policy","text":"
                                                                                  1. On the left navigation bar of the homepage in the Container Management module, click Security Management .

                                                                                  2. Click Permission Scan on the left navigation bar, then click the Scan Policy tab and click Create Scan Policy on the right.

                                                                                  3. Fill in the configuration according to the following instructions, and then click OK .

                                                                                    • Cluster: Select the cluster to be scanned. The optional cluster list comes from the clusters accessed or created in the Container Management module. If the desired cluster is not available, you can access or create a cluster in the Container Management module.
                                                                                    • Scan Type:

                                                                                      • Immediate scan: Perform a scan immediately after the scan policy is created. It cannot be automatically/manually executed again later.
                                                                                      • Scheduled scan: Automatically repeat the scan at scheduled intervals.
                                                                                    • Number of Scan Reports to Keep: Set the maximum number of scan reports to be kept. When the specified retention quantity is exceeded, delete from the earliest report.

                                                                                  "},{"location":"en/admin/kpanda/security/audit.html#updatedelete-scan-policies","title":"Update/Delete Scan Policies","text":"

                                                                                  After creating a scan policy, you can update or delete it as needed.

                                                                                  Under the Scan Policy tab, click the \u2507 action button to the right of a configuration:

                                                                                  • For periodic scan policies:

                                                                                    • Select Execute Immediately to perform an additional scan outside the regular schedule.
                                                                                    • Select Disable to interrupt the scanning plan until Enable is clicked to resume executing the scan policy according to the scheduling plan.
                                                                                    • Select Edit to update the configuration. You can update the scan configuration, type, scan cycle, and report retention quantity. The configuration name and the target cluster to be scanned cannot be changed.
                                                                                    • Select Delete to delete the configuration.
                                                                                  • For one-time scan policies: Only support the Delete operation.

                                                                                  "},{"location":"en/admin/kpanda/security/audit.html#view-scan-reports","title":"View Scan Reports","text":"
                                                                                  1. Under the Security Management -> Permission Scanning -> Scan Reports tab, click the report name.

                                                                                    Clicking Delete on the right of a report allows you to manually delete the report.

                                                                                  2. View the scan report content, including:

                                                                                    • The target cluster scanned.
                                                                                    • The scan policy used.
                                                                                    • The total number of scan items, warnings, and errors.
                                                                                    • In periodic scan reports generated by periodic scan policies, you can also view the scan frequency.
                                                                                    • The start time of the scan.
                                                                                    • Check details, such as the checked resources, resource types, scan results, error types, and error details.
                                                                                  "},{"location":"en/admin/kpanda/security/hunter.html","title":"Vulnerability Scan","text":"

                                                                                  To use the Vulnerability Scan feature, you need to create a scan policy first. After executing the policy, a scan report will be automatically generated for viewing.

                                                                                  "},{"location":"en/admin/kpanda/security/hunter.html#create-a-scan-policy","title":"Create a Scan Policy","text":"
                                                                                  1. On the left navigation bar of the homepage in the Container Management module, click Security Management .

                                                                                  2. Click Vulnerability Scan on the left navigation bar, then click the Scan Policy tab and click Create Scan Policy on the right.

                                                                                  3. Fill in the configuration according to the following instructions, and then click OK .

                                                                                    • Cluster: Select the cluster to be scanned. The optional cluster list comes from the clusters accessed or created in the Container Management module. If the desired cluster is not available, you can access or create a cluster in the Container Management module.
                                                                                    • Scan Type:

                                                                                      • Immediate scan: Perform a scan immediately after the scan policy is created. It cannot be automatically/manually executed again later.
                                                                                      • Scheduled scan: Automatically repeat the scan at scheduled intervals.
                                                                                    • Number of Scan Reports to Keep: Set the maximum number of scan reports to be kept. When the specified retention quantity is exceeded, delete from the earliest report.

                                                                                  "},{"location":"en/admin/kpanda/security/hunter.html#updatedelete-scan-policies","title":"Update/Delete Scan Policies","text":"

                                                                                  After creating a scan policy, you can update or delete it as needed.

                                                                                  Under the Scan Policy tab, click the \u2507 action button to the right of a configuration:

                                                                                  • For periodic scan policies:

                                                                                    • Select Execute Immediately to perform an additional scan outside the regular schedule.
                                                                                    • Select Disable to interrupt the scanning plan until Enable is clicked to resume executing the scan policy according to the scheduling plan.
                                                                                    • Select Edit to update the configuration. You can update the scan configuration, type, scan cycle, and report retention quantity. The configuration name and the target cluster to be scanned cannot be changed.
                                                                                    • Select Delete to delete the configuration.
                                                                                  • For one-time scan policies: Only support the Delete operation.

                                                                                  "},{"location":"en/admin/kpanda/security/hunter.html#viewe-scan-reports","title":"Viewe Scan Reports","text":"
                                                                                  1. Under the Security Management -> Vulnerability Scanning -> Scan Reports tab, click the report name.

                                                                                    Clicking Delete on the right of a report allows you to manually delete the report.

                                                                                  2. View the scan report content, including:

                                                                                    • The target cluster scanned.
                                                                                    • The scan policy used.
                                                                                    • The scan frequency.
                                                                                    • The total number of risks, high risks, medium risks, and low risks.
                                                                                    • The time of the scan.
                                                                                    • Check details such as vulnerability ID, vulnerability type, vulnerability name, vulnerability description, etc.
                                                                                  "},{"location":"en/admin/kpanda/security/cis/config.html","title":"Scan Configuration","text":"

                                                                                  The first step in using CIS Scanning is to create a scan configuration. Based on the scan configuration, you can then create scan policies, execute scan policies, and finally view scan results.

                                                                                  "},{"location":"en/admin/kpanda/security/cis/config.html#create-a-scan-configuration","title":"Create a Scan Configuration","text":"

                                                                                  The steps for creating a scan configuration are as follows:

                                                                                  1. Click Security Management in the left navigation bar of the homepage of the container management module.

                                                                                  2. By default, enter the Compliance Scanning page, click the Scan Configuration tab, and then click Create Scan Configuration in the upper-right corner.

                                                                                  3. Fill in the configuration name, select the configuration template, and optionally check the scan items, then click OK .

                                                                                    Scan Template: Currently, two templates are provided. The kubeadm template is suitable for general Kubernetes clusters. The daocloud template ignores scan items that are not applicable to AI platform based on the kubeadm template and the platform design of AI platform.

                                                                                  "},{"location":"en/admin/kpanda/security/cis/config.html#view-scan-configuration","title":"View Scan Configuration","text":"

                                                                                  Under the scan configuration tab, clicking the name of a scan configuration displays the type of the configuration, the number of scan items, the creation time, the configuration template, and the specific scan items enabled for the configuration.

                                                                                  "},{"location":"en/admin/kpanda/security/cis/config.html#updatdelete-scan-configuration","title":"Updat/Delete Scan Configuration","text":"

                                                                                  After a scan configuration has been successfully created, it can be updated or deleted according to your needs.

                                                                                  Under the scan configuration tab, click the \u2507 action button to the right of a configuration:

                                                                                  • Select Edit to update the configuration. You can update the description, template, and scan items. The configuration name cannot be changed.
                                                                                  • Select Delete to delete the configuration.
                                                                                  "},{"location":"en/admin/kpanda/security/cis/policy.html","title":"Scan Policy","text":""},{"location":"en/admin/kpanda/security/cis/policy.html#create-a-scan-policy","title":"Create a Scan Policy","text":"

                                                                                  After creating a scan configuration, you can create a scan policy based on the configuration.

                                                                                  1. Under the Security Management -> Compliance Scanning page, click the Scan Policy tab on the right to create a scan policy.

                                                                                  2. Fill in the configuration according to the following instructions and click OK .

                                                                                    • Cluster: Select the cluster to be scanned. The optional cluster list comes from the clusters accessed or created in the Container Management module. If the desired cluster is not available, you can access or create a cluster in the Container Management module.
                                                                                    • Scan Configuration: Select a pre-created scan configuration. The scan configuration determines which specific scan items need to be performed.
                                                                                    • Scan Type:

                                                                                      • Immediate scan: Perform a scan immediately after the scan policy is created. It cannot be automatically/manually executed again later.
                                                                                      • Scheduled scan: Automatically repeat the scan at scheduled intervals.
                                                                                    • Number of Scan Reports to Keep: Set the maximum number of scan reports to be kept. When the specified retention quantity is exceeded, delete from the earliest report.

                                                                                  "},{"location":"en/admin/kpanda/security/cis/policy.html#updatedelete-scan-policies","title":"Update/Delete Scan Policies","text":"

                                                                                  After creating a scan policy, you can update or delete it as needed.

                                                                                  Under the Scan Policy tab, click the \u2507 action button to the right of a configuration:

                                                                                  • For periodic scan policies:

                                                                                    • Select Execute Immediately to perform an additional scan outside the regular schedule.
                                                                                    • Select Disable to interrupt the scanning plan until Enable is clicked to resume executing the scan policy according to the scheduling plan.
                                                                                    • Select Edit to update the configuration. You can update the scan configuration, type, scan cycle, and report retention quantity. The configuration name and the target cluster to be scanned cannot be changed.
                                                                                    • Select Delete to delete the configuration.
                                                                                  • For one-time scan policies: Only support the Delete operation.

                                                                                  "},{"location":"en/admin/kpanda/security/cis/report.html","title":"Scan Report","text":"

                                                                                  After executing a scan policy, a scan report will be generated automatically. You can view the scan report online or download it to your local computer.

                                                                                  • Download and View

                                                                                    Under the Security Management -> Compliance Scanning page, click the Scan Report tab, then click the \u2507 action button to the right of a report and select Download .

                                                                                  • View Online

                                                                                    Clicking the name of a report allows you to view its content online, which includes:

                                                                                    • The target cluster scanned.
                                                                                    • The scan policy and scan configuration used.
                                                                                    • The start time of the scan.
                                                                                    • The total number of scan items, the number passed, and the number failed.
                                                                                    • For failed scan items, repair suggestions are provided.
                                                                                    • For passed scan items, more secure operational suggestions are provided.
                                                                                  "},{"location":"en/admin/kpanda/storage/pv.html","title":"data volume (PV)","text":"

                                                                                  A data volume (PersistentVolume, PV) is a piece of storage in the cluster, which can be prepared in advance by the administrator, or dynamically prepared using a storage class (Storage Class). PV is a cluster resource, but it has an independent life cycle and will not be deleted when the Pod process ends. Mounting PVs to workloads can achieve data persistence for workloads. The PV holds the data directory that can be accessed by the containers in the Pod.

                                                                                  "},{"location":"en/admin/kpanda/storage/pv.html#create-data-volume","title":"Create data volume","text":"

                                                                                  Currently, there are two ways to create data volumes: YAML and form. These two ways have their own advantages and disadvantages, and can meet the needs of different users.

                                                                                  • There are fewer steps and more efficient creation through YAML, but the threshold requirement is high, and you need to be familiar with the YAML file configuration of the data volume.

                                                                                  • It is more intuitive and easier to create through the form, just fill in the proper values \u200b\u200baccording to the prompts, but the steps are more cumbersome.

                                                                                  "},{"location":"en/admin/kpanda/storage/pv.html#yaml-creation","title":"YAML creation","text":"
                                                                                  1. Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume (PV) -> Create with YAML in the left navigation bar.

                                                                                  2. Enter or paste the prepared YAML file in the pop-up box, and click OK at the bottom of the pop-up box.

                                                                                    Supports importing YAML files from local or downloading and saving filled files to local.

                                                                                  "},{"location":"en/admin/kpanda/storage/pv.html#form-creation","title":"Form Creation","text":"
                                                                                  1. Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume (PV) -> Create Data Volume (PV) in the left navigation bar.

                                                                                  2. Fill in the basic information.

                                                                                    • The data volume name, data volume type, mount path, volume mode, and node affinity cannot be changed after creation.
                                                                                    • Data volume type: For a detailed introduction to volume types, refer to the official Kubernetes document Volumes.

                                                                                    • Local: The local storage of the Node node is packaged into a PVC interface, and the container directly uses the PVC without paying attention to the underlying storage type. Local volumes do not support dynamic configuration of data volumes, but support configuration of node affinity, which can limit which nodes can access the data volume.

                                                                                    • HostPath: Use files or directories on the file system of Node nodes as data volumes, and do not support Pod scheduling based on node affinity.

                                                                                    • Mount path: mount the data volume to a specific directory in the container.

                                                                                    • access mode:

                                                                                      • ReadWriteOnce: The data volume can be mounted by a node in read-write mode.
                                                                                      • ReadWriteMany: The data volume can be mounted by multiple nodes in read-write mode.
                                                                                      • ReadOnlyMany: The data volume can be mounted read-only by multiple nodes.
                                                                                      • ReadWriteOncePod: The data volume can be mounted read-write by a single Pod.
                                                                                    • Recycling policy:

                                                                                      • Retain: The PV is not deleted, but its status is only changed to released , which needs to be manually recycled by the user. For how to manually reclaim, refer to Persistent Volume.
                                                                                      • Recycle: keep the PV but empty its data, perform a basic wipe ( rm -rf /thevolume/* ).
                                                                                      • Delete: When deleting a PV and its data.
                                                                                    • Volume mode:

                                                                                      • File system: The data volume will be mounted to a certain directory by the Pod. If the data volume is stored from a device and the device is currently empty, a file system is created on the device before the volume is mounted for the first time.
                                                                                      • Block: Use the data volume as a raw block device. This type of volume is given to the Pod as a block device without any file system on it, allowing the Pod to access the data volume faster.
                                                                                    • Node affinity:

                                                                                  "},{"location":"en/admin/kpanda/storage/pv.html#view-data-volume","title":"View data volume","text":"

                                                                                  Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume (PV) in the left navigation bar.

                                                                                  • On this page, you can view all data volumes in the current cluster, as well as information such as the status, capacity, and namespace of each data volume.

                                                                                  • Supports sequential or reverse sorting according to the name, status, namespace, and creation time of data volumes.

                                                                                  • Click the name of a data volume to view the basic configuration, StorageClass information, labels, comments, etc. of the data volume.

                                                                                  "},{"location":"en/admin/kpanda/storage/pv.html#clone-data-volume","title":"Clone data volume","text":"

                                                                                  By cloning a data volume, a new data volume can be recreated based on the configuration of the cloned data volume.

                                                                                  1. Enter the clone page

                                                                                    • On the data volume list page, find the data volume to be cloned, and select Clone under the operation bar on the right.

                                                                                      You can also click the name of the data volume, click the operation button in the upper right corner of the details page and select Clone .

                                                                                  2. Use the original configuration directly, or modify it as needed, and click OK at the bottom of the page.

                                                                                  "},{"location":"en/admin/kpanda/storage/pv.html#update-data-volume","title":"Update data volume","text":"

                                                                                  There are two ways to update data volumes. Support for updating data volumes via forms or YAML files.

                                                                                  Note

                                                                                  Only updating the alias, capacity, access mode, reclamation policy, label, and comment of the data volume is supported.

                                                                                  • On the data volume list page, find the data volume that needs to be updated, select Update under the operation bar on the right to update through the form, select Edit YAML to update through YAML.

                                                                                  • Click the name of the data volume to enter the details page of the data volume, select Update in the upper right corner of the page to update through the form, select Edit YAML to update through YAML.

                                                                                  "},{"location":"en/admin/kpanda/storage/pv.html#delete-data-volume","title":"Delete data volume","text":"

                                                                                  On the data volume list page, find the data to be deleted, and select Delete in the operation column on the right.

                                                                                  You can also click the name of the data volume, click the operation button in the upper right corner of the details page and select Delete .

                                                                                  "},{"location":"en/admin/kpanda/storage/pvc.html","title":"Data volume declaration (PVC)","text":"

                                                                                  A persistent volume claim (PersistentVolumeClaim, PVC) expresses a user's request for storage. PVC consumes PV resources and claims a data volume with a specific size and specific access mode. For example, the PV volume is required to be mounted in ReadWriteOnce, ReadOnlyMany or ReadWriteMany modes.

                                                                                  "},{"location":"en/admin/kpanda/storage/pvc.html#create-data-volume-statement","title":"Create data volume statement","text":"

                                                                                  Currently, there are two ways to create data volume declarations: YAML and form. These two ways have their own advantages and disadvantages, and can meet the needs of different users.

                                                                                  • There are fewer steps and more efficient creation through YAML, but the threshold requirement is high, and you need to be familiar with the YAML file configuration of the data volume declaration.

                                                                                  • It is more intuitive and easier to create through the form, just fill in the proper values \u200b\u200baccording to the prompts, but the steps are more cumbersome.

                                                                                  "},{"location":"en/admin/kpanda/storage/pvc.html#yaml-creation","title":"YAML creation","text":"
                                                                                  1. Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume Declaration (PVC) -> Create with YAML in the left navigation bar.

                                                                                  2. Enter or paste the prepared YAML file in the pop-up box, and click OK at the bottom of the pop-up box.

                                                                                    Supports importing YAML files from local or downloading and saving filled files to local.

                                                                                  "},{"location":"en/admin/kpanda/storage/pvc.html#form-creation","title":"Form Creation","text":"
                                                                                  1. Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume Declaration (PVC) -> Create Data Volume Declaration (PVC) in the left navigation bar.

                                                                                  2. Fill in the basic information.

                                                                                    • The name, namespace, creation method, data volume, capacity, and access mode of the data volume declaration cannot be changed after creation.
                                                                                    • Creation method: dynamically create a new data volume claim in an existing StorageClass or data volume, or create a new data volume claim based on a snapshot of a data volume claim.

                                                                                      The declared capacity of the data volume cannot be modified when the snapshot is created, and can be modified after the creation is complete.

                                                                                    • After selecting the creation method, select the desired StorageClass/data volume/snapshot from the drop-down list.

                                                                                    • access mode:

                                                                                    • ReadWriteOnce, the data volume declaration can be mounted by a node in read-write mode.

                                                                                    • ReadWriteMany, the data volume declaration can be mounted by multiple nodes in read-write mode.
                                                                                    • ReadOnlyMany, the data volume declaration can be mounted read-only by multiple nodes.
                                                                                    • ReadWriteOncePod, the data volume declaration can be mounted by a single Pod in read-write mode.
                                                                                  "},{"location":"en/admin/kpanda/storage/pvc.html#view-data-volume-statement","title":"View data volume statement","text":"

                                                                                  Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume Declaration (PVC) in the left navigation bar.

                                                                                  • On this page, you can view all data volume declarations in the current cluster, as well as information such as the status, capacity, and namespace of each data volume declaration.

                                                                                  • Supports sorting in sequential or reverse order according to the declared name, status, namespace, and creation time of the data volume.

                                                                                  • Click the name of the data volume declaration to view the basic configuration, StorageClass information, labels, comments and other information of the data volume declaration.

                                                                                  "},{"location":"en/admin/kpanda/storage/pvc.html#expansion-data-volume-statement","title":"Expansion data volume statement","text":"
                                                                                  1. In the left navigation bar, click Container Storage -> Data Volume Declaration (PVC) , and find the data volume declaration whose capacity you want to adjust.

                                                                                  2. Click the name of the data volume declaration, and then click the operation button in the upper right corner of the page and select Expansion .

                                                                                  3. Enter the target capacity and click OK .

                                                                                  "},{"location":"en/admin/kpanda/storage/pvc.html#clone-data-volume-statement","title":"Clone data volume statement","text":"

                                                                                  By cloning a data volume claim, a new data volume claim can be recreated based on the configuration of the cloned data volume claim.

                                                                                  1. Enter the clone page

                                                                                    • On the data volume declaration list page, find the data volume declaration that needs to be cloned, and select Clone under the operation bar on the right.

                                                                                      You can also click the name of the data volume declaration, click the operation button in the upper right corner of the details page and select Clone .

                                                                                  2. Use the original configuration directly, or modify it as needed, and click OK at the bottom of the page.

                                                                                  "},{"location":"en/admin/kpanda/storage/pvc.html#update-data-volume-statement","title":"Update data volume statement","text":"

                                                                                  There are two ways to update data volume claims. Support for updating data volume claims via form or YAML file.

                                                                                  Note

                                                                                  Only aliases, labels, and annotations for data volume claims are updated.

                                                                                  • On the data volume list page, find the data volume declaration that needs to be updated, select Update in the operation bar on the right to update it through the form, and select Edit YAML to update it through YAML.

                                                                                  • Click the name of the data volume declaration, enter the details page of the data volume declaration, select Update in the upper right corner of the page to update through the form, select Edit YAML to update through YAML.

                                                                                  "},{"location":"en/admin/kpanda/storage/pvc.html#delete-data-volume-statement","title":"Delete data volume statement","text":"

                                                                                  On the data volume declaration list page, find the data to be deleted, and select Delete in the operation column on the right.

                                                                                  You can also click the name of the data volume statement, click the operation button in the upper right corner of the details page and select Delete .

                                                                                  "},{"location":"en/admin/kpanda/storage/pvc.html#common-problem","title":"common problem","text":"
                                                                                  1. If there is no optional StorageClass or data volume in the list, you can Create a StorageClass or Create a data volume.

                                                                                  2. If there is no optional snapshot in the list, you can enter the details page of the data volume declaration and create a snapshot in the upper right corner.

                                                                                  3. If the StorageClass (SC) used by the data volume declaration is not enabled for snapshots, snapshots cannot be made, and the page will not display the \"Make Snapshot\" option.

                                                                                  4. If the StorageClass (SC) used by the data volume declaration does not have the capacity expansion feature enabled, the data volume does not support capacity expansion, and the page will not display the capacity expansion option.
                                                                                  "},{"location":"en/admin/kpanda/storage/sc-share.html","title":"shared StorageClass","text":"

                                                                                  The AI platform container management module supports sharing a StorageClass with multiple namespaces to improve resource utilization efficiency.

                                                                                  1. Find the StorageClass that needs to be shared in the StorageClass list, and click Authorize Namespace under the operation bar on the right.

                                                                                  2. Click Custom Namespace to select which namespaces this StorageClass needs to be shared to one by one.

                                                                                    • Click Authorize All Namespaces to share this StorageClass to all namespaces under the current cluster at one time.
                                                                                    • Click Remove Authorization under the operation bar on the right side of the list to deauthorize and stop sharing this StorageClass to this namespace.
                                                                                  "},{"location":"en/admin/kpanda/storage/sc.html","title":"StorageClass (SC)","text":"

                                                                                  A StorageClass refers to a large storage resource pool composed of many physical disks. This platform supports the creation of block StorageClass, local StorageClass, and custom StorageClass after accessing various storage vendors, and then dynamically configures data volumes for workloads.

                                                                                  "},{"location":"en/admin/kpanda/storage/sc.html#create-storageclass-sc","title":"Create StorageClass (SC)","text":"

                                                                                  Currently, it supports creating StorageClass through YAML and forms. These two methods have their own advantages and disadvantages, and can meet the needs of different users.

                                                                                  • There are fewer steps and more efficient creation through YAML, but the threshold requirement is high, and you need to be familiar with the YAML file configuration of the StorageClass.

                                                                                  • It is more intuitive and easier to create through the form, just fill in the proper values \u200b\u200baccording to the prompts, but the steps are more cumbersome.

                                                                                  "},{"location":"en/admin/kpanda/storage/sc.html#yaml-creation","title":"YAML creation","text":"
                                                                                  1. Click the name of the target cluster in the cluster list, and then click Container Storage -> StorageClass (SC) -> Create with YAML in the left navigation bar.

                                                                                  2. Enter or paste the prepared YAML file in the pop-up box, and click OK at the bottom of the pop-up box.

                                                                                    Supports importing YAML files from local or downloading and saving filled files to local.

                                                                                  "},{"location":"en/admin/kpanda/storage/sc.html#form-creation","title":"Form Creation","text":"
                                                                                  1. Click the name of the target cluster in the cluster list, and then click Container Storage -> StorageClass (SC) -> Create StorageClass (SC) in the left navigation bar.

                                                                                  2. Fill in the basic information and click OK at the bottom.

                                                                                    CUSTOM STORAGE SYSTEM

                                                                                    • The StorageClass name, driver, and reclamation policy cannot be modified after creation.
                                                                                    • CSI storage driver: A standard Kubernetes-based container storage interface plug-in, which must comply with the format specified by the storage manufacturer, such as rancher.io/local-path .

                                                                                      • For how to fill in the CSI drivers provided by different vendors, refer to the official Kubernetes document Storage Class.
                                                                                        • Recycling policy: When deleting a data volume, keep the data in the data volume or delete the data in it.
                                                                                        • Snapshot/Expansion: After it is enabled, the data volume/data volume declaration based on the StorageClass can support the expansion and snapshot features, but the premise is that the underlying storage driver supports the snapshot and expansion features.

                                                                                    HwameiStor storage system

                                                                                    • The StorageClass name, driver, and reclamation policy cannot be modified after creation.
                                                                                    • Storage system: HwameiStor storage system.
                                                                                    • Storage type: support LVM, raw disk type
                                                                                      • LVM type : HwameiStor recommended usage method, which can use highly available data volumes, and the proper CSI storage driver is lvm.hwameistor.io .
                                                                                      • Raw disk data volume : suitable for high availability cases, without high availability capability, the proper CSI driver is hdd.hwameistor.io .
                                                                                    • High Availability Mode: Before using the high availability capability, please make sure DRBD component has been installed. After the high availability mode is turned on, the number of data volume copies can be set to 1 and 2. Convert data volume copy from 1 to 1 if needed.
                                                                                    • Recycling policy: When deleting a data volume, keep the data in the data volume or delete the data in it.
                                                                                    • Snapshot/Expansion: After it is enabled, the data volume/data volume declaration based on the StorageClass can support the expansion and snapshot features, but the premise is that the underlying storage driver supports the snapshot and expansion features.
                                                                                  "},{"location":"en/admin/kpanda/storage/sc.html#update-storageclass-sc","title":"Update StorageClass (SC)","text":"

                                                                                  On the StorageClass list page, find the StorageClass that needs to be updated, and select Edit under the operation bar on the right to update the StorageClass.

                                                                                  Info

                                                                                  Select View YAML to view the YAML file of the StorageClass, but editing is not supported.

                                                                                  "},{"location":"en/admin/kpanda/storage/sc.html#delete-storageclass-sc","title":"Delete StorageClass (SC)","text":"

                                                                                  On the StorageClass list page, find the StorageClass to be deleted, and select Delete in the operation column on the right.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-cronjob.html","title":"Create CronJob","text":"

                                                                                  This page introduces how to create a CronJob through images and YAML files.

                                                                                  CronJobs are suitable for performing periodic operations, such as backup and report generation. These jobs can be configured to repeat periodically (for example: daily/weekly/monthly), and the time interval at which the job starts to run can be defined.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-cronjob.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before creating a CronJob, the following prerequisites need to be met:

                                                                                  • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                                                  • Create a namespace and a user.

                                                                                  • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                                                  • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-cronjob.html#create-by-image","title":"Create by image","text":"

                                                                                  Refer to the following steps to create a CronJob using the image.

                                                                                  1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                                                                  2. On the cluster details page, click Workloads -> CronJobs in the left navigation bar, and then click the Create by Image button in the upper right corner of the page.

                                                                                  3. Fill in Basic Information, Container Settings, CronJob Settings, Advanced Configuration, click OK in the lower right corner of the page to complete the creation.

                                                                                    The system will automatically return to the CronJobs list. Click \u2507 on the right side of the list to perform operations such as updating, deleting, and restarting the CronJob.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-cronjob.html#basic-information","title":"Basic information","text":"

                                                                                  On the Create CronJobs page, enter the information according to the table below, and click Next .

                                                                                  • Workload Name: Can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                                                                  • Namespace: Select which namespace to deploy the newly created CronJob in, and the default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                                                                  • Description: Enter the description information of the workload and customize the content. The number of characters should not exceed 512.
                                                                                  "},{"location":"en/admin/kpanda/workloads/create-cronjob.html#container-settings","title":"Container settings","text":"

                                                                                  Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the requirements of each part.

                                                                                  Container setting is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                                                                  Basic information (required)Lifecycle (optional)Health Check (optional)Environment variables (optional)Data storage (optional)Security settings (optional)

                                                                                  When configuring container-related parameters, you must correctly fill in the container name and image parameters, otherwise you will not be able to proceed to the next step. After filling in the configuration with reference to the following requirements, click OK .

                                                                                  • Container Name: Up to 63 characters, lowercase letters, numbers and separators (\"-\") are supported. Must start and end with a lowercase letter or number, eg nginx-01.
                                                                                  • Image: Enter the address or name of the image. When entering the image name, the image will be pulled from the official DockerHub by default.
                                                                                  • Image Pull Policy: After checking Always pull the image , the image will be pulled from the registry every time the workload restarts/upgrades. If it is not checked, only the local mirror will be pulled, and only when the mirror does not exist locally, it will be re-pulled from the container registry. For more details, refer to Image Pull Policy.
                                                                                  • Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
                                                                                  • CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
                                                                                  • GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU or part of the vGPU for the container. For example, for an 8-core GPU, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.

                                                                                    Before setting exclusive GPU, the administrator needs to install the GPU and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

                                                                                  Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle Configuration.

                                                                                  It is used to judge the health status of containers and applications, which helps to improve the availability of applications. For details, refer to Container Health Check Configuration.

                                                                                  Configure container parameters within the Pod, add environment variables or pass configuration to the Pod, etc. For details, refer to Container environment variable configuration.

                                                                                  Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage Configuration.

                                                                                  Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-cronjob.html#cronjob-settings","title":"CronJob Settings","text":"
                                                                                  • Concurrency Policy: Whether to allow multiple Job jobs to run in parallel.

                                                                                    • Allow : A new CronJob can be created before the previous job is completed, and multiple jobs can be parallelized. Too many jobs may occupy cluster resources.
                                                                                    • Forbid : Before the previous job is completed, a new job cannot be created. If the execution time of the new job is up and the previous job has not been completed, CronJob will ignore the execution of the new job.
                                                                                    • Replace : If the execution time of the new job is up, but the previous job has not been completed, the new job will replace the previous job.

                                                                                    The above rules only apply to multiple jobs created by the same CronJob. Multiple jobs created by multiple CronJobs are always allowed to run concurrently.

                                                                                  • Policy Settings: Set the time period for job execution based on minutes, hours, days, weeks, and months. Support custom Cron expressions with numbers and * , after inputting the expression, the meaning of the current expression will be prompted. For detailed expression syntax rules, refer to Cron Schedule Syntax.

                                                                                  • Job Records: Set how many records of successful or failed jobs to keep. 0 means do not keep.
                                                                                  • Timeout: When this time is exceeded, the job will be marked as failed to execute, and all Pods under the job will be deleted. When it is empty, it means that no timeout is set. The default is 360 s.
                                                                                  • Retries: the number of times the job can be retried, the default value is 6.
                                                                                  • Restart Policy: Set whether to restart the Pod when the job fails.
                                                                                  "},{"location":"en/admin/kpanda/workloads/create-cronjob.html#service-settings","title":"Service settings","text":"

                                                                                  Configure Service for the statefulset, so that the statefulset can be accessed externally.

                                                                                  1. Click the Create Service button.

                                                                                  2. Refer to Create Service to configure service parameters.

                                                                                  3. Click OK and click Next .

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-cronjob.html#advanced-configuration","title":"Advanced configuration","text":"

                                                                                  The advanced configuration of CronJobs mainly involves labels and annotations.

                                                                                  You can click the Add button to add labels and annotations to the workload instance Pod.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-cronjob.html#create-from-yaml","title":"Create from YAML","text":"

                                                                                  In addition to mirroring, you can also create timed jobs more quickly through YAML files.

                                                                                  1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                                                                  2. On the cluster details page, click Workloads -> CronJobs in the left navigation bar, and then click the Create from YAML button in the upper right corner of the page.

                                                                                  3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                                                                  click to view the complete YAML
                                                                                  apiVersion: batch/v1\nkind: CronJob\nmetadata:\n  creationTimestamp: '2022-12-26T09:45:47Z'\n  generation: 1\n  name: demo\n  namespace: default\n  resourceVersion: '92726617'\n  uid: d030d8d7-a405-4dcd-b09a-176942ef36c9\nspec:\n  concurrencyPolicy: Allow\n  failedJobsHistoryLimit: 1\n  jobTemplate:\n    metadata:\n      creationTimestamp: null\n    spec:\n      activeDeadlineSeconds: 360\n      backoffLimit: 6\n      template:\n        metadata:\n          creationTimestamp: null\n        spec:\n          containers:\n            - image: nginx\n              imagePullPolicy: IfNotPresent\n              lifecycle: {}\n              name: container-3\n              resources:\n                limits:\n                  cpu: 250m\n                  memory: 512Mi\n                requests:\n                  cpu: 250m\n                  memory: 512Mi\n              securityContext:\n                privileged: false\n              terminationMessagePath: /dev/termination-log\n              terminationMessagePolicy: File\n          dnsPolicy: ClusterFirst\n          restartPolicy: Never\n          schedulerName: default-scheduler\n          securityContext: {}\n          terminationGracePeriodSeconds: 30\n  schedule: 0 0 13 * 5\n  successfulJobsHistoryLimit: 3\n  suspend: false\nstatus: {}\n
                                                                                  "},{"location":"en/admin/kpanda/workloads/create-daemonset.html","title":"Create DaemonSet","text":"

                                                                                  This page introduces how to create a daemonSet through image and YAML files.

                                                                                  DaemonSet is connected to taint through node affinity feature ensures that a replica of a Pod is running on all or some of the nodes. For nodes that newly joined the cluster, DaemonSet automatically deploys the proper Pod on the new node and tracks the running status of the Pod. When a node is removed, the DaemonSet deletes all Pods it created.

                                                                                  Common cases for daemons include:

                                                                                  • Run cluster daemons on each node.
                                                                                  • Run a log collection daemon on each node.
                                                                                  • Run a monitoring daemon on each node.

                                                                                  For simplicity, a DaemonSet can be started on each node for each type of daemon. For finer and more advanced daemon management, you can also deploy multiple DaemonSets for the same daemon. Each DaemonSet has different flags and has different memory, CPU requirements for different hardware types.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-daemonset.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before creating a DaemonSet, the following prerequisites need to be met:

                                                                                  • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                                                  • Create a namespace and a user.

                                                                                  • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                                                  • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-daemonset.html#create-by-image","title":"Create by image","text":"

                                                                                  Refer to the following steps to create a daemon using the image.

                                                                                  1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                                                                  2. On the cluster details page, click Workloads -> DaemonSets in the left navigation bar, and then click the Create by Image button in the upper right corner of the page.

                                                                                  3. Fill in Basic Information, Container Settings, Service Settings, Advanced Settings, click OK in the lower right corner of the page to complete the creation.

                                                                                    The system will automatically return the list of DaemonSets . Click \u2507 on the right side of the list to perform operations such as updating, deleting, and restarting the DaemonSet.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-daemonset.html#basic-information","title":"Basic information","text":"

                                                                                  On the Create DaemonSets page, after entering the information according to the table below, click Next .

                                                                                  • Workload Name: Can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                                                                  • Namespace: Select which namespace to deploy the newly created DaemonSet in, and the default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                                                                  • Description: Enter the description information of the workload and customize the content. The number of characters should not exceed 512.
                                                                                  "},{"location":"en/admin/kpanda/workloads/create-daemonset.html#container-settings","title":"Container settings","text":"

                                                                                  Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the requirements of each part.

                                                                                  Container setting is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                                                                  Basic information (required)Lifecycle (optional)Health Check (optional)Environment variables (optional)Data storage (optional)Security settings (optional)

                                                                                  When configuring container-related parameters, you must correctly fill in the container name and image parameters, otherwise you will not be able to proceed to the next step. After filling in the settings with reference to the following requirements, click OK .

                                                                                  • Container Name: Up to 63 characters, lowercase letters, numbers and separators (\"-\") are supported. Must start and end with a lowercase letter or number, eg nginx-01.
                                                                                  • Image: Enter the address or name of the image. When entering the image name, the image will be pulled from the official DockerHub by default.
                                                                                  • Image Pull Policy: After checking Always pull image , the image will be pulled from the registry every time the workload restarts/upgrades. If it is not checked, only the local image will be pulled, and only when the image does not exist locally, it will be re-pulled from the container registry. For more details, refer to Image Pull Policy.
                                                                                  • Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
                                                                                  • CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
                                                                                  • GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU or part of the vGPU for the container. For example, for an 8-core GPU, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.

                                                                                    Before setting exclusive GPU, the administrator needs to install the GPU and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

                                                                                  Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle Configuration.

                                                                                  It is used to judge the health status of containers and applications, which helps to improve the availability of applications. For details, refer to Container Health Check Configuration.

                                                                                  Configure container parameters within the Pod, add environment variables or pass settings to the Pod, etc. For details, refer to Container environment variable settings.

                                                                                  Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage Configuration.

                                                                                  Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-daemonset.html#service-settings","title":"Service settings","text":"

                                                                                  Create a Service (Service) for the daemon, so that the daemon can be accessed externally.

                                                                                  1. Click the Create Service button.

                                                                                  2. Configure service parameters, refer to Create Service for details.

                                                                                  3. Click OK and click Next .

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-daemonset.html#advanced-settings","title":"Advanced settings","text":"

                                                                                  Advanced setting includes four parts: load network settings, upgrade policy, scheduling policy, label and annotation. You can click the tabs below to view the requirements of each part.

                                                                                  Network ConfigurationUpgrade PolicyScheduling PoliciesLabels and Annotations

                                                                                  In some cases, the application will have redundant DNS queries. Kubernetes provides DNS-related settings options for applications, which can effectively reduce redundant DNS queries and increase business concurrency in certain cases.

                                                                                  • DNS Policy

                                                                                    • Default: Make the container use the domain name resolution file pointed to by the --resolv-conf parameter of kubelet. This setting can only resolve external domain names registered on the Internet, but cannot resolve cluster internal domain names, and there is no invalid DNS query.
                                                                                    • ClusterFirstWithHostNet: The domain name file of the host to which the application is connected.
                                                                                    • ClusterFirst: application docking with Kube-DNS/CoreDNS.
                                                                                    • None: New option value introduced in Kubernetes v1.9 (Beta in v1.10). After setting to None, dnsConfig must be set, at this time the domain name of the containerThe parsing file will be completely generated through the settings of dnsConfig.
                                                                                  • Nameservers: fill in the address of the domain name server, such as 10.6.175.20 .

                                                                                  • Search domains: DNS search domain list for domain name query. When specified, the provided search domain list will be merged into the search field of the domain name resolution file generated based on dnsPolicy, and duplicate domain names will be deleted. Kubernetes allows up to 6 search domains.
                                                                                  • Options: Configuration options for DNS, where each object can have a name attribute (required) and a value attribute (optional). The content in this field will be merged into the options field of the domain name resolution file generated based on dnsPolicy. If some options of dnsConfig options conflict with the options of the domain name resolution file generated based on dnsPolicy, they will be overwritten by dnsConfig.
                                                                                  • Host Alias: the alias set for the host.

                                                                                  • Upgrade Mode: Rolling upgrade refers to gradually replacing instances of the old version with instances of the new version. During the upgrade process, business traffic will be load-balanced to the old and new instances at the same time, so the business will not be interrupted. Rebuild and upgrade refers to deleting the workload instance of the old version first, and then installing the specified new version. During the upgrade process, the business will be interrupted.
                                                                                  • Max Unavailable Pods: Specify the maximum value or ratio of unavailable pods during the workload update process, the default is 25%. If it is equal to the number of instances, there is a risk of service interruption.
                                                                                  • Max Surge: The maximum or ratio of the total number of Pods exceeding the desired replica count of Pods during a Pod update. Default is 25%.
                                                                                  • Revision History Limit: Set the number of old versions retained when the version is rolled back. The default is 10.
                                                                                  • Minimum Ready: The minimum time for a Pod to be ready. Only after this time is the Pod considered available. The default is 0 seconds.
                                                                                  • Upgrade Max Duration: If the deployment is not successful after the set time, the workload will be marked as failed. Default is 600 seconds.
                                                                                  • Graceful Period: The execution period (0-9,999 seconds) of the command before the workload stops, the default is 30 seconds.

                                                                                  • Toleration time: When the node where the workload instance is located is unavailable, the time for rescheduling the workload instance to other available nodes, the default is 300 seconds.
                                                                                  • Node affinity: According to the label on the node, constrain which nodes the Pod can be scheduled on.
                                                                                  • Workload Affinity: Constrains which nodes a Pod can be scheduled to based on the labels of the Pods already running on the node.
                                                                                  • Workload anti-affinity: Constrains which nodes a Pod cannot be scheduled to based on the labels of Pods already running on the node.
                                                                                  • Topology domain: namely topologyKey, used to specify a group of nodes that can be scheduled. For example, kubernetes.io/os indicates that as long as the node of an operating system meets the conditions of labelSelector, it can be scheduled to the node.

                                                                                  For details, refer to Scheduling Policy.

                                                                                  You can click the Add button to add tags and annotations to workloads and pods.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-daemonset.html#create-from-yaml","title":"Create from YAML","text":"

                                                                                  In addition to image, you can also create daemons more quickly through YAML files.

                                                                                  1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the Cluster Details page.

                                                                                  2. On the cluster details page, click Workload -> Daemons in the left navigation bar, and then click the YAML Create button in the upper right corner of the page.

                                                                                  3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                                                                  Click to see an example YAML for creating a daemon
                                                                                   kind: DaemonSet\n apiVersion: apps/v1\n metadata:\n   name: hwameistor-local-disk-manager\n   namespace: hwameistor\n   uid: ccbdc098-7de3-4a8a-96dd-d1cee159c92b\n   resourceVersion: '90999552'\n   generation: 1\n   creationTimestamp: '2022-12-15T09:03:44Z'\n   labels:\n     app.kubernetes.io/managed-by: Helm\n   annotations:\n     deprecated.DaemonSet.template.generation: '1'\n     meta.helm.sh/release-name: hwameistor\n     meta.helm.sh/release-namespace:hwameistor\n spec:\n   selector:\n     matchLabels:\n       app: hwameistor-local-disk-manager\n   template:\n     metadata:\n       creationTimestamp: null\n       labels:\n         app: hwameistor-local-disk-manager\n     spec:\n       volumes:\n         - name: udev\n           hostPath:\n             path: /run/udev\n             type: Directory\n         - name: procmount\n           hostPath:\n             path: /proc\n             type: Directory\n         - name: devmount\n           hostPath:\n             path: /dev\n             type: Directory\n         - name: socket-dir\n           hostPath:\n             path: /var/lib/kubelet/plugins/disk.hwameistor.io\n             type: DirectoryOrCreate\n         - name: registration-dir\n           hostPath:\n             path: /var/lib/kubelet/plugins_registry/\n             type: Directory\n         - name: plugin-dir\n           hostPath:\n             path: /var/lib/kubelet/plugins\n             type: DirectoryOrCreate\n         - name: pods-mount-dir\n           hostPath:\n             path: /var/lib/kubelet/pods\n             type: DirectoryOrCreate\n       containers:\n         - name: registrar\n           image: k8s-gcr.m.daocloud.io/sig-storage/csi-node-driver-registrar:v2.5.0\n           args:\n             - '--v=5'\n             - '--csi-address=/csi/csi.sock'\n             - >-\n               --kubelet-registration-path=/var/lib/kubelet/plugins/disk.hwameistor.io/csi.sock\n           env:\n             - name: KUBE_NODE_NAME\n               valueFrom:\n                 fieldRef:\n                   apiVersion: v1\n                   fieldPath: spec.nodeName\n           resources: {}\n           volumeMounts:\n             - name: socket-dir\n               mountPath: /csi\n             - name: registration-dir\n               mountPath: /registration\n           lifecycle:\n             preStop:\n               exec:\n                 command:\n                   - /bin/sh\n                   - '-c'\n                   - >-\n                     rm -rf /registration/disk.hwameistor.io\n                     /registration/disk.hwameistor.io-reg.sock\n           terminationMessagePath: /dev/termination-log\n           terminationMessagePolicy: File\n           imagePullPolicy: IfNotPresent\n         -name: managerimage: ghcr.m.daocloud.io/hwameistor/local-disk-manager:v0.6.1\n          command:\n            - /local-disk-manager\n          args:\n            - '--endpoint=$(CSI_ENDPOINT)'\n            - '--nodeid=$(NODENAME)'\n            - '--csi-enable=true'\n          env:\n            - name: CSI_ENDPOINT\n              value: unix://var/lib/kubelet/plugins/disk.hwameistor.io/csi.sock\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: WATCH_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: NODENAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: spec.nodeName\n            - name: OPERATOR_NAME\n              value: local-disk-manager\n          resources: {}\n          volumeMounts:\n            - name: udev\n              mountPath: /run/udev\n            - name: procmount\n              readOnly: true\n              mountPath: /host/proc\n            - name: devmount\n              mountPath: /dev\n            - name: registration-dir\n              mountPath: /var/lib/kubelet/plugins_registry\n            - name: plugin-dir\n              mountPath: /var/lib/kubelet/plugins\n              mountPropagation: Bidirectional\n            - name: pods-mount-dir\n              mountPath: /var/lib/kubelet/pods\n              mountPropagation: Bidirectional\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            privileged: true\n      restartPolicy: Always\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      serviceAccountName: hwameistor-admin\n      serviceAccount: hwameistor-admin\n      hostNetwork: true\n      hostPID: true\n      securityContext: {}\n      schedulerName: default-scheduler\n      tolerations:\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - key: node.kubernetes.io/not-ready\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/master\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/control-plane\n          operator: Exists\n          effect: NoSchedule\n        - key: node.cloudprovider.kubernetes.io/uninitialized\n          operator: Exists\n          effect: NoSchedule\n  updateStrategy:\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n      maxSurge: 0\n  revisionHistoryLimit: 10\nstatus:\n  currentNumberScheduled: 4\n  numberMisscheduled: 0\n  desiredNumberScheduled: 4\n  numberReady: 4\n  observedGeneration: 1\n  updatedNumberScheduled: 4\n  numberAvailable: 4\n
                                                                                  "},{"location":"en/admin/kpanda/workloads/create-deployment.html","title":"Create Deployment","text":"

                                                                                  This page describes how to create deployments through images and YAML files.

                                                                                  Deployment is a common resource in Kubernetes, mainly Pod and ReplicaSet provide declarative updates, support elastic scaling, rolling upgrades, and version rollbacks features. Declare the desired Pod state in the Deployment, and the Deployment Controller will modify the current state through the ReplicaSet to make it reach the pre-declared desired state. Deployment is stateless and does not support data persistence. It is suitable for deploying stateless applications that do not need to save data and can be restarted and rolled back at any time.

                                                                                  Through the container management module of AI platform, workloads on multicloud and multiclusters can be easily managed based on proper role permissions, including the creation of deployments, Full life cycle management such as update, deletion, elastic scaling, restart, and version rollback.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-deployment.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before using image to create deployments, the following prerequisites need to be met:

                                                                                  • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                                                  • Create a namespace and a user.

                                                                                  • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                                                  • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-deployment.html#create-by-image","title":"Create by image","text":"

                                                                                  Follow the steps below to create a deployment by image.

                                                                                  1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the Cluster Details page.

                                                                                  2. On the cluster details page, click Workloads -> Deployments in the left navigation bar, and then click the Create by Image button in the upper right corner of the page.

                                                                                  3. Fill in Basic Information, Container Setting, Service Setting, Advanced Setting in turn, click OK in the lower right corner of the page to complete the creation.

                                                                                    The system will automatically return the list of Deployments . Click \u2507 on the right side of the list to perform operations such as update, delete, elastic scaling, restart, and version rollback on the load. If the workload status is abnormal, please check the specific abnormal information, refer to Workload Status.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-deployment.html#basic-information","title":"Basic information","text":"
                                                                                  • Workload Name: can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number, such as deployment-01. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                                                                  • Namespace: Select the namespace where the newly created payload will be deployed. The default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                                                                  • Pods: Enter the number of Pod instances for the load, and one Pod instance is created by default.
                                                                                  • Description: Enter the description information of the payload and customize the content. The number of characters cannot exceed 512.
                                                                                  "},{"location":"en/admin/kpanda/workloads/create-deployment.html#container-settings","title":"Container settings","text":"

                                                                                  Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the requirements of each part.

                                                                                  Container setting is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                                                                  Basic Information (Required)Lifecycle (optional)Health Check (optional)Environment variables (optional)Data storage (optional)Security settings (optional)

                                                                                  When configuring container-related parameters, it is essential to correctly fill in the container name and image parameters; otherwise, you will not be able to proceed to the next step. After filling in the configuration according to the following requirements, click OK.

                                                                                  • Container Type: The default is Work Container. For information on init containers, see the [K8s Official Documentation] (https://kubernetes.io/docs/concepts/workloads/pods/init-containers/).
                                                                                  • Container Name: No more than 63 characters, supporting lowercase letters, numbers, and separators (\"-\"). It must start and end with a lowercase letter or number, for example, nginx-01.
                                                                                  • Image:
                                                                                    • Image: Select an appropriate image from the list. When entering the image name, the default is to pull the image from the official DockerHub.
                                                                                    • Image Version: Select an appropriate version from the dropdown list.
                                                                                    • Image Pull Policy: By checking Always pull the image, the image will be pulled from the repository each time the workload restarts/upgrades. If unchecked, it will only pull the local image, and will pull from the repository only if the image does not exist locally. For more details, refer to Image Pull Policy.
                                                                                    • Registry Secret: Optional. If the target repository requires a Secret to access, you need to create secret first.
                                                                                  • Privileged Container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and has all the privileges of running processes on the host.
                                                                                  • CPU/Memory Request: The request value (the minimum resource needed) and the limit value (the maximum resource allowed) for CPU/memory resources. Configure resources for the container as needed to avoid resource waste and system failures caused by container resource overages. Default values are shown in the figure.
                                                                                  • GPU Configuration: Configure GPU usage for the container, supporting only positive integers. The GPU quota setting supports configuring the container to exclusively use an entire GPU or part of a vGPU. For example, for a GPU with 8 cores, entering the number 8 means the container exclusively uses the entire card, and entering the number 1 means configuring 1 core of the vGPU for the container.

                                                                                  Before setting the GPU, the administrator needs to pre-install the GPU and driver plugin on the cluster node and enable the GPU feature in the Cluster Settings.

                                                                                  Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle Setting.

                                                                                  It is used to judge the health status of containers and applications, which helps to improve the availability of applications. For details, refer to Container Health Check Setting.

                                                                                  Configure container parameters within the Pod, add environment variables or pass setting to the Pod, etc. For details, refer to Container environment variable setting.

                                                                                  Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage Setting.

                                                                                  Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-deployment.html#service-settings","title":"Service settings","text":"

                                                                                  Configure Service for the deployment, so that the deployment can be accessed externally.

                                                                                  1. Click the Create Service button.

                                                                                  2. Refer to Create Service to configure service parameters.

                                                                                  3. Click OK and click Next .

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-deployment.html#advanced-settings","title":"Advanced settings","text":"

                                                                                  Advanced setting includes four parts: Network Settings, Upgrade Policy, Scheduling Policies, Labels and Annotations. You can click the tabs below to view the setting requirements of each part.

                                                                                  Network SettingsUpgrade PolicyScheduling PoliciesLabels and Annotations
                                                                                  1. For container NIC setting, refer to Workload Usage IP Pool
                                                                                  2. DNS setting

                                                                                  In some cases, the application will have redundant DNS queries. Kubernetes provides DNS-related setting options for applications, which can effectively reduce redundant DNS queries and increase business concurrency in certain cases.

                                                                                  • DNS Policy

                                                                                    • Default: Make container use kubelet's -The domain name resolution file pointed to by the -resolv-conf parameter. This setting can only resolve external domain names registered on the Internet, but cannot resolve cluster internal domain names, and there is no invalid DNS query.
                                                                                    • ClusterFirstWithHostNet: The domain name file of the host to which the application is connected.
                                                                                    • ClusterFirst: application docking with Kube-DNS/CoreDNS.
                                                                                    • None: New option value introduced in Kubernetes v1.9 (Beta in v1.10). After setting to None, dnsConfig must be set. At this time, the domain name resolution file of the container will be completely generated through the setting of dnsConfig.
                                                                                  • Nameservers: fill in the address of the domain name server, such as 10.6.175.20 .

                                                                                  • Search domains: DNS search domain list for domain name query. When specified, the provided search domain list will be merged into the search field of the domain name resolution file generated based on dnsPolicy, and duplicate domain names will be deleted. Kubernetes allows up to 6 search domains.
                                                                                  • Options: Setting options for DNS, where each object can have a name attribute (required) and a value attribute (optional). The content in this field will be merged into the options field of the domain name resolution file generated based on dnsPolicy. If some options of dnsConfig options conflict with the options of the domain name resolution file generated based on dnsPolicy, they will be overwritten by dnsConfig.
                                                                                  • Host Alias: the alias set for the host.

                                                                                  • Upgrade Mode: Rolling upgrade refers to gradually replacing instances of the old version with instances of the new version. During the upgrade process, business traffic will be load-balanced to the old and new instances at the same time, so the business will not be interrupted. Rebuild and upgrade refers to deleting the workload instance of the old version first, and then installing the specified new version. During the upgrade process, the business will be interrupted.
                                                                                  • Max Unavailable: Specify the maximum value or ratio of unavailable pods during the workload update process, the default is 25%. If it is equal to the number of instances, there is a risk of service interruption.
                                                                                  • Max Surge: The maximum or ratio of the total number of Pods exceeding the desired replica count of Pods during a Pod update. Default is 25%.
                                                                                  • Revision History Limit: Set the number of old versions retained when the version is rolled back. The default is 10.
                                                                                  • Minimum Ready: The minimum time for a Pod to be ready. Only after this time is the Pod considered available. The default is 0 seconds.
                                                                                  • Upgrade Max Duration: If the deployment is not successful after the set time, the workload will be marked as failed. Default is 600 seconds.
                                                                                  • Graceful Period: The execution period (0-9,999 seconds) of the command before the workload stops, the default is 30 seconds.

                                                                                  • Toleration time: When the node where the workload instance is located is unavailable, the time for rescheduling the workload instance to other available nodes, the default is 300 seconds.
                                                                                  • Node Affinity: According to the label on the node, constrain which nodes the Pod can be scheduled on.
                                                                                  • Workload Affinity: Constrains which nodes a Pod can be scheduled to based on the labels of the Pods already running on the node.
                                                                                  • Workload Anti-affinity: Constrains which nodes a Pod cannot be scheduled to based on the labels of Pods already running on the node.

                                                                                  For details, refer to Scheduling Policy.

                                                                                  You can click the Add button to add tags and annotations to workloads and pods.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-deployment.html#create-from-yaml","title":"Create from YAML","text":"

                                                                                  In addition to image, you can also create deployments more quickly through YAML files.

                                                                                  1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the Cluster Details page.

                                                                                  2. On the cluster details page, click Workloads -> Deployments in the left navigation bar, and then click the Create from YAML button in the upper right corner of the page.

                                                                                  3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                                                                  Click to see an example YAML for creating a deployment
                                                                                   apiVersion: apps/v1\n kind: Deployment\n metadata:\n   name: nginx-deployment\n spec:\n   selector:\n     matchLabels:\n       app: nginx\n   replicas: 2 # (1)!\n   template:\n     metadata:\n       labels:\n         app: nginx\n     spec:\n       containers:\n       -name: nginx\n         image: nginx:1.14.2\n         ports:\n         - containerPort: 80\n
                                                                                  1. Tell the Deployment to run 2 Pods that match this template
                                                                                  "},{"location":"en/admin/kpanda/workloads/create-job.html","title":"Create Job","text":"

                                                                                  This page introduces how to create a job through image and YAML file.

                                                                                  Job is suitable for performing one-time jobs. A Job creates one or more Pods, and the Job keeps retrying to run Pods until a certain number of Pods are successfully terminated. A Job ends when the specified number of Pods are successfully terminated. When a Job is deleted, all Pods created by the Job will be cleared. When a Job is paused, all active Pods in the Job are deleted until the Job is resumed. For more information about jobs, refer to Job.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-job.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                                                  • Create a namespace and a user.

                                                                                  • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                                                  • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-job.html#create-by-image","title":"Create by image","text":"

                                                                                  Refer to the following steps to create a job using an image.

                                                                                  1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                                                                  2. On the cluster details page, click Workloads -> Jobs in the left navigation bar, and then click the Create by Image button in the upper right corner of the page.

                                                                                  3. Fill in Basic Information, Container Settings and Advanced Settings, click OK in the lower right corner of the page to complete the creation.

                                                                                    The system will automatically return to the job list. Click \u2507 on the right side of the list to perform operations such as updating, deleting, and restarting the job.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-job.html#basic-information","title":"Basic information","text":"

                                                                                  On the Create Jobs page, enter the basic information according to the table below, and click Next .

                                                                                  • Payload Name: Can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                                                                  • Namespace: Select which namespace to deploy the newly created job in, and the default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                                                                  • Number of Instances: Enter the number of Pod instances for the workload. By default, 1 Pod instance is created.
                                                                                  • Description: Enter the description information of the workload and customize the content. The number of characters should not exceed 512.
                                                                                  "},{"location":"en/admin/kpanda/workloads/create-job.html#container-settings","title":"Container settings","text":"

                                                                                  Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the setting requirements of each part.

                                                                                  Container settings is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                                                                  Basic information (required)Lifecycle (optional)Health Check (optional)Environment Variables (optional)Data Storage (optional)Security Settings (optional)

                                                                                  When configuring container-related parameters, you must correctly fill in the container name and image parameters, otherwise you will not be able to proceed to the next step. After filling in the settings with reference to the following requirements, click OK .

                                                                                  • Container Name: Up to 63 characters, lowercase letters, numbers and separators (\"-\") are supported. Must start and end with a lowercase letter or number, eg nginx-01.
                                                                                  • Image: Enter the address or name of the image. When entering the image name, the image will be pulled from the official DockerHub by default.
                                                                                  • Image Pull Policy: After checking Always pull image , the image will be pulled from the registry every time the workload restarts/upgrades. If it is not checked, only the local image will be pulled, and only when the image does not exist locally, it will be re-pulled from the container registry. For more details, refer to Image Pull Policy.
                                                                                  • Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
                                                                                  • CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
                                                                                  • GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU or part of the vGPU for the container. For example, for an 8-core GPU, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.

                                                                                  Before setting exclusive GPU, the administrator needs to install the GPU and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

                                                                                  Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle settings.

                                                                                  It is used to judge the health status of containers and applications, which helps to improve the availability of applications. For details, refer to Container Health Check settings.

                                                                                  Configure container parameters within the Pod, add environment variables or pass settings to the Pod, etc. For details, refer to Container environment variable settings.

                                                                                  Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage settings.

                                                                                  Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-job.html#advanced-settings","title":"Advanced settings","text":"

                                                                                  Advanced setting includes job settings, labels and annotations.

                                                                                  Job SettingsLabels and Annotations

                                                                                  • Parallel Pods: the maximum number of Pods that can be created at the same time during job execution, and the parallel number should not be greater than the total number of Pods. Default is 1.
                                                                                  • Timeout: When this time is exceeded, the job will be marked as failed to execute, and all Pods under the job will be deleted. When it is empty, it means that no timeout is set.
                                                                                  • Restart Policy: Whether to restart the Pod when the setting fails.

                                                                                  You can click the Add button to add labels and annotations to the workload instance Pod.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-job.html#create-from-yaml","title":"Create from YAML","text":"

                                                                                  In addition to image, creation jobs can also be created more quickly through YAML files.

                                                                                  1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                                                                  2. On the cluster details page, click Workloads -> Jobs in the left navigation bar, and then click the Create from YAML button in the upper right corner of the page.

                                                                                  3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                                                                  Click to view the complete YAML
                                                                                  kind: Job\napiVersion: batch/v1\nmetadata:\n  name: demo\n  namespace: default\n  uid: a9708239-0358-4aa1-87d3-a092c080836e\n  resourceVersion: '92751876'\n  generation: 1\n  creationTimestamp: '2022-12-26T10:52:22Z'\n  labels:\n    app: demo\n    controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n    job-name: demo\n  annotations:\n    revisions: >-\n      {\"1\":{\"status\":\"running\",\"uid\":\"a9708239-0358-4aa1-87d3-a092c080836e\",\"start-time\":\"2022-12-26T10:52:22Z\",\"completion-time\":\"0001-01-01T00:00:00Z\"}}\nspec:\n  parallelism: 1\n  backoffLimit: 6\n  selector:\n    matchLabels:\n      controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: demo\n        controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n        job-name: demo\n    spec:\n      containers:\n        - name: container-4\n          image: nginx\n          resources:\n            limits:\n              cpu: 250m\n              memory: 512Mi\n            requests:\n              cpu: 250m\n              memory: 512Mi\n          lifecycle: {}\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            privileged: false\n      restartPolicy: Never\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      securityContext: {}\n      schedulerName: default-scheduler\n  completionMode: NonIndexed\n  suspend: false\nstatus:\n  startTime: '2022-12-26T10:52:22Z'\n  active: 1\n
                                                                                  "},{"location":"en/admin/kpanda/workloads/create-statefulset.html","title":"Create StatefulSet","text":"

                                                                                  This page describes how to create a StatefulSet through image and YAML files.

                                                                                  StatefulSet is a common resource in Kubernetes, and Deployment, mainly used to manage the deployment and scaling of Pod collections. The main difference between the two is that Deployment is stateless and does not save data, while StatefulSet is stateful and is mainly used to manage stateful applications. In addition, Pods in a StatefulSet have a persistent ID, which makes it easy to identify the proper Pod when matching storage volumes.

                                                                                  Through the container management module of AI platform, workloads on multicloud and multiclusters can be easily managed based on proper role permissions, including the creation of StatefulSets, update, delete, elastic scaling, restart, version rollback and other full life cycle management.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-statefulset.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before using image to create StatefulSets, the following prerequisites need to be met:

                                                                                  • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                                                  • Create a namespace and a user.

                                                                                  • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                                                  • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-statefulset.html#create-by-image","title":"Create by image","text":"

                                                                                  Follow the steps below to create a statefulSet using image.

                                                                                  1. Click Clusters on the left navigation bar, then click the name of the target cluster to enter Cluster Details.

                                                                                  2. Click Workloads -> StatefulSets in the left navigation bar, and then click the Create by Image button in the upper right corner.

                                                                                  3. Fill in Basic Information, Container Settings, Service Settings, Advanced Settings, click OK in the lower right corner of the page to complete the creation.

                                                                                    The system will automatically return to the list of StatefulSets , and wait for the status of the workload to become running . If the workload status is abnormal, refer to Workload Status for specific exception information.

                                                                                    Click \u2507 on the right side of the New Workload column to perform operations such as update, delete, elastic scaling, restart, and version rollback on the workload.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-statefulset.html#basic-information","title":"Basic Information","text":"
                                                                                  • Workload Name: can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number, such as deployment-01. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                                                                  • Namespace: Select the namespace where the newly created payload will be deployed. The default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                                                                  • Pods: Enter the number of Pod instances for the load, and one Pod instance is created by default.
                                                                                  • Description: Enter the description information of the payload and customize the content. The number of characters cannot exceed 512.
                                                                                  "},{"location":"en/admin/kpanda/workloads/create-statefulset.html#container-settings","title":"Container settings","text":"

                                                                                  Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the requirements of each part.

                                                                                  Container settings is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                                                                  Basic information (required)Lifecycle (optional)Health Check (optional)Environment Variables (optional)Data Storage (optional)Security Settings (optional)

                                                                                  When configuring container-related parameters, you must correctly fill in the container name and image parameters, otherwise you will not be able to proceed to the next step. After filling in the settings with reference to the following requirements, click OK .

                                                                                  • Container Name: Up to 63 characters, lowercase letters, numbers and separators (\"-\") are supported. Must start and end with a lowercase letter or number, eg nginx-01.
                                                                                  • Image: Enter the address or name of the image. When entering the image name, the image will be pulled from the official DockerHub by default.
                                                                                  • Image Pull Policy: After checking Always pull image , the image will be pulled from the registry every time the workload restarts/upgrades. If it is not checked, only the local image will be pulled, and only when the image does not exist locally, it will be re-pulled from the container registry. For more details, refer to Image Pull Policy.
                                                                                  • Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
                                                                                  • CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
                                                                                  • GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU or part of the vGPU for the container. For example, for an 8-core GPU, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.

                                                                                  Before setting exclusive GPU, the administrator needs to install the GPU and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

                                                                                  Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle Configuration.

                                                                                  Used to judge the health status of containers and applications. Helps improve app usability. For details, refer to Container Health Check Configuration.

                                                                                  Configure container parameters within the Pod, add environment variables or pass settings to the Pod, etc. For details, refer to Container environment variable settings.

                                                                                  Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage Configuration.

                                                                                  Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-statefulset.html#service-settings","title":"Service settings","text":"

                                                                                  Configure Service (Service) for the statefulset, so that the statefulset can be accessed externally.

                                                                                  1. Click the Create Service button.

                                                                                  2. Refer to Create Service to configure service parameters.

                                                                                  3. Click OK and click Next .

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-statefulset.html#advanced-settings","title":"Advanced settings","text":"

                                                                                  Advanced setting includes four parts: load network settings, upgrade policy, scheduling policy, label and annotation. You can click the tabs below to view the requirements of each part.

                                                                                  Network ConfigurationUpgrade PolicyContainer Management PoliciesScheduling PoliciesLabels and Annotations
                                                                                  1. For container NIC settings, refer to Workload Usage IP Pool
                                                                                  2. DNS settings

                                                                                  In some cases, the application will have redundant DNS queries. Kubernetes provides DNS-related settings options for applications, which can effectively reduce redundant DNS queries and increase business concurrency in certain cases.

                                                                                  • DNS Policy

                                                                                    • Default: Make the container use the domain name resolution file pointed to by the --resolv-conf parameter of kubelet. This setting can only resolve external domain names registered on the Internet, but cannot resolve cluster internal domain names, and there is no invalid DNS query.
                                                                                    • ClusterFirstWithHostNet: The domain name file of the application docking host.
                                                                                    • ClusterFirst: application docking with Kube-DNS/CoreDNS.
                                                                                    • None: New option value introduced in Kubernetes v1.9 (Beta in v1.10). After setting to None, dnsConfig must be set. At this time, the domain name resolution file of the container will be completely generated through the settings of dnsConfig.
                                                                                  • Nameservers: fill in the address of the domain name server, such as 10.6.175.20 .

                                                                                  • Search domains: DNS search domain list for domain name query. When specified, the provided search domain list will be merged into the search field of the domain name resolution file generated based on dnsPolicy, and duplicate domain names will be deleted. Kubernetes allows up to 6 search domains.
                                                                                  • Options: Configuration options for DNS, where each object can have a name attribute (required) and a value attribute (optional). The content in this field will be merged into the options field of the domain name resolution file generated based on dnsPolicy. If some options of dnsConfig options conflict with the options of the domain name resolution file generated based on dnsPolicy, they will be overwritten by dnsConfig.
                                                                                  • Host Alias: the alias set for the host.

                                                                                  • Upgrade Mode: Rolling upgrade refers to gradually replacing instances of the old version with instances of the new version. During the upgrade process, business traffic will be load-balanced to the old and new instances at the same time, so the business will not be interrupted. Rebuild and upgrade refers to deleting the workload instance of the old version first, and then installing the specified new version. During the upgrade process, the business will be interrupted.
                                                                                  • Revision History Limit: Set the number of old versions retained when the version is rolled back. The default is 10.
                                                                                  • Graceful Period: The execution period (0-9,999 seconds) of the command before the workload stops, the default is 30 seconds.

                                                                                  Kubernetes v1.7 and later versions can set Pod management policies through .spec.podManagementPolicy , which supports the following two methods:

                                                                                  • OrderedReady : The default Pod management policy, which means that Pods are deployed in order. Only after the deployment of the previous Pod is successfully completed, the statefulset will start to deploy the next Pod. Pods are deleted in reverse order, with the last created being deleted first.

                                                                                  • Parallel : Create or delete containers in parallel, just like Pods of the Deployment type. The StatefulSet controller starts or terminates all containers in parallel. There is no need to wait for a Pod to enter the Running and ready state or to stop completely before starting or terminating other Pods. This option only affects the behavior of scaling operations, not the order of updates.

                                                                                  • Tolerance time: When the node where the workload instance is located is unavailable, the time for rescheduling the workload instance to other available nodes, the default is 300 seconds.
                                                                                  • Node affinity: According to the label on the node, constrain which nodes the Pod can be scheduled on.
                                                                                  • Workload Affinity: Constrains which nodes a Pod can be scheduled to based on the labels of the Pods already running on the node.
                                                                                  • Workload anti-affinity: Constrains which nodes a Pod cannot be scheduled to based on the labels of Pods already running on the node.
                                                                                  • Topology domain: namely topologyKey, used to specify a group of nodes that can be scheduled. For example, kubernetes.io/os indicates that as long as the node of an operating system meets the conditions of labelSelector, it can be scheduled to the node.

                                                                                  For details, refer to Scheduling Policy.

                                                                                  You can click the Add button to add tags and annotations to workloads and pods.

                                                                                  "},{"location":"en/admin/kpanda/workloads/create-statefulset.html#create-from-yaml","title":"Create from YAML","text":"

                                                                                  In addition to image, you can also create statefulsets more quickly through YAML files.

                                                                                  1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the Cluster Details page.

                                                                                  2. On the cluster details page, click Workloads -> StatefulSets in the left navigation bar, and then click the Create from YAML button in the upper right corner of the page.

                                                                                  3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                                                                  Click to see an example YAML for creating a statefulSet
                                                                                   kind: StatefulSet\n apiVersion: apps/v1\n metadata:\n   name: test-mysql-123-mysql\n   namespace: default\n   uid: d3f45527-a0ab-4b22-9013-5842a06f4e0e\n   resourceVersion: '20504385'\n   generation: 1\n   creationTimestamp: '2022-09-22T09:34:10Z'\n   ownerReferences:\n     - apiVersion: mysql.presslabs.org/v1alpha1\n       kind: MysqlCluster\n       name: test-mysql-123\n       uid: 5e877cc3-5167-49da-904e-820940cf1a6d\n       controller: true\n       blockOwnerDeletion: true\n spec:\n   replicas: 1\n   selector:\n     matchLabels:\n       app.kubernetes.io/managed-by: mysql.presslabs.org\n       app.kubernetes.io/name: mysql\n       mysql.presslabs.org/cluster: test-mysql-123\n   template:\n     metadata:\n       creationTimestamp: null\n       labels:\n         app.kubernetes.io/component: database\n         app.kubernetes.io/instance: test-mysql-123\n         app.kubernetes.io/managed-by: mysql.presslabs.org\n         app.kubernetes.io/name: mysql\n         app.kubernetes.io/version: 5.7.31\n         mysql.presslabs.org/cluster: test-mysql-123\n       annotations:\n         config_rev: '13941099'\n         prometheus.io/port: '9125'\n         prometheus.io/scrape: 'true'\n         secret_rev: '13941101'\n     spec:\n       volumes:\n         -name: conf\n           emptyDir: {}\n         - name: init-scripts\n           emptyDir: {}\n         - name: config-map\n           configMap:\n             name: test-mysql-123-mysql\n             defaultMode: 420\n         - name: data\n           persistentVolumeClaim:\n             claimName: data\n       initContainers:\n         -name: init\n           image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n           args:\n             - clone-and-init\n           envFrom:\n             - secretRef:\n                 name: test-mysql-123-mysql-operated\n           env:\n             - name: MY_NAMESPACE\n               valueFrom:\n                 fieldRef:\n                   apiVersion: v1\n                   fieldPath: metadata.namespace\n             - name: MY_POD_NAME\n               valueFrom:\n                 fieldRef:apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: BACKUP_USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: BACKUP_USER\n                  optional: true\n            - name: BACKUP_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: BACKUP_PASSWORD\n                  optional: true\n          resources: {}\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: config-map\n              mountPath: /mnt/conf\n            - name: data\n              mountPath: /var/lib/mysql\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n      containers:\n        - name: mysql\n          image: docker.m.daocloud.io/mysql:5.7.31\n          ports:\n            - name: mysql\n              containerPort: 3306\n              protocol: TCP\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: ORCH_CLUSTER_ALIAS\n              value: test-mysql-123.default\n            - name: ORCH_HTTP_API\n              value: http://mysql-operator.mcamel-system/api\n            - name: MYSQL_ROOT_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: ROOT_PASSWORD\n                  optional: false\n            - name: MYSQL_USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: USER\n                  optional: true\n            - name: MYSQL_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: PASSWORD\n                  optional: true\n            - name: MYSQL_DATABASE\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: DATABASE\n                  optional: true\n          resources:\n            limits:\n              cpu: '1'\n              memory: 1Gi\n            requests:\n              cpu: 100m\n              memory: 512Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: data\n              mountPath: /var/lib/mysql\n          livenessProbe:\n            exec:\n              command:\n                - mysqladmin\n                - '--defaults-file=/etc/mysql/client.conf'\n                - ping\n            initialDelaySeconds: 60\n            timeoutSeconds: 5\n            periodSeconds: 5\n            successThreshold: 1\n            failureThreshold: 3\n          readinessProbe:\n            exec:\n              command:\n                - /bin/sh\n                - '-c'\n                - >-\n                  test $(mysql --defaults-file=/etc/mysql/client.conf -NB -e\n                  'SELECT COUNT(*) FROM sys_operator.status WHERE\n                  name=\"configured\" AND value=\"1\"') -eq 1\n            initialDelaySeconds: 5\n            timeoutSeconds: 5\n            periodSeconds: 2\n            successThreshold: 1\n            failureThreshold: 3\n          lifecycle:preStop:\n              exec:\n                command:\n                  - bash\n                  - /etc/mysql/pre-shutdown-ha.sh\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: sidecar\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - config-and-serve\n          ports:\n            - name: sidecar-http\n              containerPort: 8080\n              protocol: TCP\n          envFrom:\n            - secretRef:\n                name: test-mysql-123-mysql-operated\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: XTRABACKUP_TARGET_DIR\n              value: /tmp/xtrabackup_backupfiles/\n          resources:\n            limits:\n              cpu: '1'\n              memory: 1Gi\n            requests:\n              cpu: 10m\n              memory: 64Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: data\n              mountPath: /var/lib/mysql\n          readinessProbe:\n            httpGet:\n              path: /health\n              port: 8080\n              scheme: HTTP\n            initialDelaySeconds: 30\n            timeoutSeconds: 5\n            periodSeconds: 5\n            successThreshold: 1\n            failureThreshold: 3\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: metrics-exporter\n          image: prom/mysqld-exporter:v0.13.0\n          args:\n            - '--web.listen-address=0.0.0.0:9125'\n            - '--web.telemetry-path=/metrics'\n            - '--collect.heartbeat'\n            - '--collect.heartbeat.database=sys_operator'\n          ports:\n            - name: prometheus\n              containerPort: 9125\n              protocol: TCP\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: METRICS_EXPORTER_USER\n                  optional: false\n            - name: PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: METRICS_EXPORTER_PASSWORD\n                  optional: false\n            - name: DATA_SOURCE_NAME\n              value: $(USER):$(PASSWORD)@(127.0.0.1:3306)/\n          resources:\n            limits:\n              cpu: 100m\n              memory: 128Mi\n            requests:\n              cpu: 10m\n              memory: 32Mi\n          livenessProbe:\n            httpGet:\n              path: /metrics\n              port: 9125\n              scheme: HTTP\n            initialDelaySeconds: 30\n            timeoutSeconds: 30\n            periodSeconds: 30\n            successThreshold: 1\n            failureThreshold: 3\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: pt-heartbeat\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - pt-heartbeat\n            - '--update'\n            - '--replace'\n            - '--check-read-only'\n            - '--create-table'\n            - '--database'\n            - sys_operator\n            - '--table'\n            - heartbeat\n            - '--utc'\n            - '--defaults-file'\n            - /etc/mysql/heartbeat.conf\n            - '--fail-successive-errors=20'\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n          resources:\n            limits:\n              cpu: 100m\n              memory: 64Mi\n            requests:\n              cpu: 10m\n              memory: 32Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n      restartPolicy: Always\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      securityContext:\n        runAsUser: 999\n        fsGroup: 999\n      affinity:\n        podAntiAffinity:\n          preferredDuringSchedulingIgnoredDuringExecution:\n            - weight: 100\n              podAffinityTerm:\n                labelSelector:\n                  matchLabels:\n                    app.kubernetes.io/component: database\n                    app.kubernetes.io/instance: test-mysql-123\n                    app.kubernetes.io/managed-by: mysql.presslabs.org\n                    app.kubernetes.io/name: mysql\n                    app.kubernetes.io/version: 5.7.31\n                    mysql.presslabs.org/cluster: test-mysql-123\n                topologyKey: kubernetes.io/hostname\n      schedulerName: default-scheduler\n  volumeClaimTemplates:\n    - kind: PersistentVolumeClaim\n      apiVersion: v1\n      metadata:\n        name: data\n        creationTimestamp: null\n        ownerReferences:\n          - apiVersion: mysql.presslabs.org/v1alpha1\n            kind: MysqlCluster\n            name: test-mysql-123\n            uid: 5e877cc3-5167-49da-904e-820940cf1a6d\n            controller: true\n      spec:\n        accessModes:\n          - ReadWriteOnce\n        resources:\n          limits:\n            storage: 1Gi\n          requests:\n            storage: 1Gi\n        storageClassName: local-path\n        volumeMode: Filesystem\n      status:\n        phase: Pending\n  serviceName: mysql\n  podManagementPolicy: OrderedReady\n  updateStrategy:\n    type: RollingUpdate\n    rollingUpdate:\n      partition: 0\n  revisionHistoryLimit: 10\nstatus:\n  observedGeneration: 1\n  replicas: 1\n  readyReplicas: 1\n  currentReplicas: 1\n  updatedReplicas: 1\n  currentRevision: test-mysql-123-mysql-6b8f5577c7\n  updateRevision: test-mysql-123-mysql-6b8f5577c7\n  collisionCount: 0\n  availableReplicas: 1\n
                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/env-variables.html","title":"Configure environment variables","text":"

                                                                                  An environment variable refers to a variable set in the container running environment, which is used to add environment flags to Pods or transfer configurations, etc. It supports configuring environment variables for Pods in the form of key-value pairs.

                                                                                  Suanova container management adds a graphical interface to configure environment variables for Pods on the basis of native Kubernetes, and supports the following configuration methods:

                                                                                  • Key-value pair (Key/Value Pair): Use a custom key-value pair as the environment variable of the container

                                                                                  • Resource reference (Resource): Use the fields defined by Container as the value of environment variables, such as the memory limit of the container, the number of copies, etc.

                                                                                  • Variable/Variable Reference (Pod Field): Use the Pod field as the value of an environment variable, such as the name of the Pod

                                                                                  • ConfigMap key value import (ConfigMap key): Import the value of a key in the ConfigMap as the value of an environment variable

                                                                                  • Key key value import (Secret Key): use the data from the Secret to define the value of the environment variable

                                                                                  • Key Import (Secret): Import all key values \u200b\u200bin Secret as environment variables

                                                                                  • ConfigMap import (ConfigMap): import all key values \u200b\u200bin the ConfigMap as environment variables

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/health-check.html","title":"Container health check","text":"

                                                                                  Container health check checks the health status of containers according to user requirements. After configuration, if the application in the container is abnormal, the container will automatically restart and recover. Kubernetes provides Liveness checks, Readiness checks, and Startup checks.

                                                                                  • LivenessProbe can detect application deadlock (the application is running, but cannot continue to run the following steps). Restarting containers in this state can help improve the availability of applications, even if there are bugs in them.

                                                                                  • ReadinessProbe can detect when a container is ready to accept request traffic. A Pod can only be considered ready when all containers in a Pod are ready. One use of this signal is to control which Pod is used as the backend of the Service. If the Pod is not ready, it will be removed from the Service's load balancer.

                                                                                  • Startup check (StartupProbe) can know when the application container is started. After configuration, it can control the container to check the viability and readiness after it starts successfully, so as to ensure that these liveness and readiness probes will not affect the start of the application. Startup detection can be used to perform liveness checks on slow-starting containers, preventing them from being killed before they start running.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/health-check.html#liveness-and-readiness-checks","title":"Liveness and readiness checks","text":"

                                                                                  The configuration of LivenessProbe is similar to that of ReadinessProbe, the only difference is to use readinessProbe field instead of livenessProbe field.

                                                                                  HTTP GET parameter description:

                                                                                  Parameter Description Path (Path) The requested path for access. Such as: /healthz path in the example Port (Port) Service listening port. Such as: port 8080 in the example protocol access protocol, Http or Https Delay time (initialDelaySeconds) Delay check time, in seconds, this setting is related to the normal startup time of business programs. For example, if it is set to 30, it means that the health check will start 30 seconds after the container is started, which is the time reserved for business program startup. Timeout (timeoutSeconds) Timeout, in seconds. For example, if it is set to 10, it indicates that the timeout waiting period for executing the health check is 10 seconds. If this time is exceeded, the health check will be regarded as a failure. If set to 0 or not set, the default timeout waiting time is 1 second. Timeout (timeoutSeconds) Timeout, in seconds. For example, if it is set to 10, it indicates that the timeout waiting period for executing the health check is 10 seconds. If this time is exceeded, the health check will be regarded as a failure. If set to 0 or not set, the default timeout waiting time is 1 second. SuccessThreshold (successThreshold) The minimum number of consecutive successes that are considered successful after a probe fails. The default value is 1, and the minimum value is 1. This value must be 1 for liveness and startup probes. Maximum number of failures (failureThreshold) The number of retries when the probe fails. Giving up in case of a liveness probe means restarting the container. Pods that are abandoned due to readiness probes are marked as not ready. The default value is 3. The minimum value is 1."},{"location":"en/admin/kpanda/workloads/pod-config/health-check.html#check-with-http-get-request","title":"Check with HTTP GET request","text":"

                                                                                  YAML example:

                                                                                  apiVersion: v1\nkind: Pod\nmetadata:\n  labels:\n    test: liveness\n  name: liveness-http\nspec:\n  containers:\n  - name: liveness  # Container name\n    image: k8s.gcr.io/liveness  # Container image\n    args:\n    - /server  # Arguments to pass to the container\n    livenessProbe:\n      httpGet:\n        path: /healthz  # Access request path\n        port: 8080  # Service listening port\n        httpHeaders:\n        - name: Custom-Header  # Custom header name\n          value: Awesome  # Custom header value\n      initialDelaySeconds: 3  # Wait 3 seconds before the first probe\n      periodSeconds: 3  # Perform liveness detection every 3 seconds\n

                                                                                  According to the set rules, Kubelet sends an HTTP GET request to the service running in the container (the service is listening on port 8080) to perform the detection. The kubelet considers the container alive if the handler under the /healthz path on the server returns a success code. If the handler returns a failure code, the kubelet kills the container and restarts it. Any return code greater than or equal to 200 and less than 400 indicates success, and any other return code indicates failure. The /healthz handler returns a 200 status code for the first 10 seconds of the container's lifetime. The handler then returns a status code of 500.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/health-check.html#use-tcp-port-check","title":"Use TCP port check","text":"

                                                                                  TCP port parameter description:

                                                                                  Parameter Description Port (Port) Service listening port. Such as: port 8080 in the example Delay time (initialDelaySeconds) Delay check time, in seconds, this setting is related to the normal startup time of business programs. For example, if it is set to 30, it means that the health check will start 30 seconds after the container is started, which is the time reserved for business program startup. Timeout (timeoutSeconds) Timeout, in seconds. For example, if it is set to 10, it indicates that the timeout waiting period for executing the health check is 10 seconds. If this time is exceeded, the health check will be regarded as a failure. If set to 0 or not set, the default timeout waiting time is 1 second.

                                                                                  For a container that provides TCP communication services, based on this configuration, the cluster establishes a TCP connection to the container according to the set rules. If the connection is successful, it proves that the detection is successful, otherwise the detection fails. If you choose the TCP port detection method, you must specify the port that the container listens to.

                                                                                  YAML example:

                                                                                  apiVersion: v1\nkind: Pod\nmetadata:\n  name: goproxy\n  labels:\n    app: goproxy\nspec:\n  containers:\n  - name: goproxy\n    image: k8s.gcr.io/goproxy:0.1\n    ports:\n    - containerPort: 8080\n    readinessProbe:\n      tcpSocket:\n        port: 8080\n      initialDelaySeconds: 5\n      periodSeconds: 10\n    livenessProbe:\n      tcpSocket:\n        port: 8080\n      initialDelaySeconds: 15\n      periodSeconds: 20\n

                                                                                  This example uses both readiness and liveness probes. The kubelet sends the first readiness probe 5 seconds after the container is started. Attempt to connect to port 8080 of the goproxy container. If the probe is successful, the Pod will be marked as ready and the kubelet will continue to run the check every 10 seconds.

                                                                                  In addition to the readiness probe, this configuration includes a liveness probe. The kubelet will perform the first liveness probe 15 seconds after the container is started. The readiness probe will attempt to connect to the goproxy container on port 8080. If the liveness probe fails, the container will be restarted.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/health-check.html#run-command-check","title":"Run command check","text":"

                                                                                  YAML example:

                                                                                  apiVersion: v1\nkind: Pod\nmetadata:\n  labels:\n    test: liveness\n  name: liveness-exec\nspec:\n  containers:\n  - name: liveness  # Container name\n    image: k8s.gcr.io/busybox  # Container image\n    args:\n    - /bin/sh  # Command to run\n    - -c  # Pass the following string as a command\n    - touch /tmp/healthy; sleep 30; rm -f /tmp/healthy; sleep 600  # Command to execute\n    livenessProbe:\n      exec:\n        command:\n        - cat  # Command to check liveness\n        - /tmp/healthy  # File to check\n      initialDelaySeconds: 5  # Wait 5 seconds before the first probe\n      periodSeconds: 5  # Perform liveness detection every 5 seconds\n

                                                                                  The periodSeconds field specifies that the kubelet performs a liveness probe every 5 seconds, and the initialDelaySeconds field specifies that the kubelet waits for 5 seconds before performing the first probe. According to the set rules, the cluster periodically executes the command cat /tmp/healthy in the container through the kubelet to detect. If the command executes successfully and the return value is 0, the kubelet considers the container to be healthy and alive. If this command returns a non-zero value, the kubelet will kill the container and restart it.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/health-check.html#protect-slow-starting-containers-with-pre-start-checks","title":"Protect slow-starting containers with pre-start checks","text":"

                                                                                  Some applications require a long initialization time at startup. You need to use the same command to set startup detection. For HTTP or TCP detection, you can set the failureThreshold * periodSeconds parameter to a long enough time to cope with the long startup time scene.

                                                                                  YAML example:

                                                                                  ports:\n- name: liveness-port\n  containerPort: 8080\n  hostPort: 8080\n\nlivenessProbe:\n  httpGet:\n    path: /healthz\n    port: liveness-port\n  failureThreshold: 1\n  periodSeconds: 10\n\nstartupProbe:\n  httpGet:\n    path: /healthz\n    port: liveness-port\n  failureThreshold: 30\n  periodSeconds: 10\n

                                                                                  With the above settings, the application will have up to 5 minutes (30 * 10 = 300s) to complete the startup process. Once the startup detection is successful, the survival detection task will take over the detection of the container and respond quickly to the container deadlock. If the start probe has been unsuccessful, the container is killed after 300 seconds and further disposition is performed according to the restartPolicy .

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/job-parameters.html","title":"Description of job parameters","text":"

                                                                                  According to the settings of .spec.completions and .spec.Parallelism , jobs (Job) can be divided into the following types:

                                                                                  Job Type Description Non-parallel Job Creates a Pod until its Job completes successfully Parallel Jobs with deterministic completion counts A Job is considered complete when the number of successful Pods reaches .spec.completions Parallel Job Creates one or more Pods until one finishes successfully

                                                                                  Parameter Description

                                                                                  RestartPolicy Creates a Pod until it terminates successfully .spec.completions Indicates the number of Pods that need to run successfully when the Job ends, the default is 1 .spec.parallelism Indicates the number of Pods running in parallel, the default is 1 spec.backoffLimit Indicates the maximum number of retries for a failed Pod, beyond which no more retries will continue. .spec.activeDeadlineSeconds Indicates the Pod running time. Once this time is reached, the Job, that is, all its Pods, will stop. And activeDeadlineSeconds has a higher priority than backoffLimit, that is, the job that reaches activeDeadlineSeconds will ignore the setting of backoffLimit.

                                                                                  The following is an example Job configuration, saved in myjob.yaml, which calculates \u03c0 to 2000 digits and prints the output.

                                                                                  apiVersion: batch/v1\nkind: Job #The type of the current resource\nmetadata:\n  name: myjob\nspec:\n  completions: 50 # Job needs to run 50 Pods at the end, in this example it prints \u03c0 50 times\n  parallelism: 5 # 5 Pods in parallel\n  backoffLimit: 5 # retry up to 5 times\n  template:\n    spec:\n      containers:\n      - name: pi\n        image: perl\n        command: [\"perl\", \"-Mbignum=bpi\", \"-wle\", \"print bpi(2000)\"]\n      restartPolicy: Never #restart policy\n

                                                                                  Related commands

                                                                                  kubectl apply -f myjob.yaml # Start job\nkubectl get job # View this job\nkubectl logs myjob-1122dswzs View Job Pod logs\n
                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/lifecycle.html","title":"Configure the container lifecycle","text":"

                                                                                  Pods follow a predefined lifecycle, starting in the Pending phase and entering the Running state if at least one container in the Pod starts normally. If any container in the Pod ends in a failed state, the state becomes Failed . The following phase field values \u200b\u200bindicate which phase of the lifecycle a Pod is in.

                                                                                  Value Description Pending The Pod has been accepted by the system, but one or more containers have not yet been created or run. This phase includes waiting for the pod to be scheduled and downloading the image over the network. Running (Running) The Pod has been bound to a node, and all containers in the Pod have been created. At least one container is still running, or in the process of starting or restarting. Succeeded (Success) All containers in the Pod were successfully terminated and will not be restarted. Failed All containers in the Pod have terminated, and at least one container terminated due to failure. That is, the container exited with a non-zero status or was terminated by the system. Unknown (Unknown) The status of the Pod cannot be obtained for some reason, usually due to a communication failure with the host where the Pod resides.

                                                                                  When creating a workload in Suanova container management, images are usually used to specify the running environment in the container. By default, when building an image, the Entrypoint and CMD fields can be used to define the commands and parameters to be executed when the container is running. If you need to change the commands and parameters of the container image before starting, after starting, and before stopping, you can override the default commands and parameters in the image by setting the lifecycle event commands and parameters of the container.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/lifecycle.html#lifecycle-configuration","title":"Lifecycle configuration","text":"

                                                                                  Configure the startup command, post-start command, and pre-stop command of the container according to business needs.

                                                                                  Parameter Description Example value Start command Type: Optional Meaning: The container will be started according to the start command. Command after startup Type: optionalMeaning: command after container startup Command before stopping Type: Optional Meaning: The command executed by the container after receiving the stop command. Ensure that the services running in the instance can be drained in advance when the instance is upgraded or deleted. -"},{"location":"en/admin/kpanda/workloads/pod-config/lifecycle.html#start-command","title":"start command","text":"

                                                                                  Configure the startup command according to the table below.

                                                                                  Parameter Description Example value Run command Type: RequiredMeaning: Enter an executable command, and separate multiple commands with spaces. If the command itself has spaces, you need to add (\"\"). Meaning: When there are multiple commands, it is recommended to use /bin/sh or other shells to run the command, and pass in all other commands as parameters. /run/server Running parameters Type: OptionalMeaning: Enter the parameters of the control container running command. port=8080"},{"location":"en/admin/kpanda/workloads/pod-config/lifecycle.html#post-start-commands","title":"Post-start commands","text":"

                                                                                  Suanova provides two processing types, command line script and HTTP request, to configure post-start commands. You can choose the configuration method that suits you according to the table below.

                                                                                  Command line script configuration

                                                                                  Parameter Description Example value Run Command Type: Optional Meaning: Enter an executable command, and separate multiple commands with spaces. If the command itself contains spaces, you need to add (\"\"). Meaning: When there are multiple commands, it is recommended to use /bin/sh or other shells to run the command, and pass in all other commands as parameters. /run/server Running parameters Type: OptionalMeaning: Enter the parameters of the control container running command. port=8080"},{"location":"en/admin/kpanda/workloads/pod-config/lifecycle.html#stop-pre-command","title":"stop pre-command","text":"

                                                                                  Suanova provides two processing types, command line script and HTTP request, to configure the pre-stop command. You can choose the configuration method that suits you according to the table below.

                                                                                  HTTP request configuration

                                                                                  Parameter Description Example value URL Path Type: Optional Meaning: Requested URL path. Meaning: When there are multiple commands, it is recommended to use /bin/sh or other shells to run the command, and pass in all other commands as parameters. /run/server Port Type: RequiredMeaning: Requested port. port=8080 Node Address Type: Optional Meaning: The requested IP address, the default is the node IP where the container is located. -"},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html","title":"Scheduling Policy","text":"

                                                                                  In a Kubernetes cluster, like many other Kubernetes objects, nodes have labels. You can manually add labels. Kubernetes also adds some standard labels to all nodes in the cluster. See Common Labels, Annotations, and Taints for common node labels. By adding labels to nodes, you can have pods scheduled on specific nodes or groups of nodes. You can use this feature to ensure that specific Pods can only run on nodes with certain isolation, security or governance properties.

                                                                                  nodeSelector is the simplest recommended form of a node selection constraint. You can add a nodeSelector field to the Pod's spec to set the node label. Kubernetes will only schedule pods on nodes with each label specified. nodeSelector provides one of the easiest ways to constrain Pods to nodes with specific labels. Affinity and anti-affinity expand the types of constraints you can define. Some benefits of using affinity and anti-affinity are:

                                                                                  • Affinity and anti-affinity languages are more expressive. nodeSelector can only select nodes that have all the specified labels. Affinity, anti-affinity give you greater control over selection logic.

                                                                                  • You can mark a rule as \"soft demand\" or \"preference\", so that the scheduler will still schedule the Pod if no matching node can be found.

                                                                                  • You can use the labels of other Pods running on the node (or in other topological domains) to enforce scheduling constraints, instead of only using the labels of the node itself. This capability allows you to define rules which allow Pods to be placed together.

                                                                                  You can choose which node the Pod will deploy to by setting affinity and anti-affinity.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#tolerance-time","title":"Tolerance time","text":"

                                                                                  When the node where the workload instance is located is unavailable, the period for the system to reschedule the instance to other available nodes. The default is 300 seconds.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#node-affinity-nodeaffinity","title":"Node affinity (nodeAffinity)","text":"

                                                                                  Node affinity is conceptually similar to nodeSelector , which allows you to constrain which nodes Pods can be scheduled on based on the labels on the nodes. There are two types of node affinity:

                                                                                  • Must be satisfied: ( requiredDuringSchedulingIgnoredDuringExecution ) The scheduler can only run scheduling when the rules are satisfied. This functionality is similar to nodeSelector , but with a more expressive syntax. You can define multiple hard constraint rules, but only one of them must be satisfied.

                                                                                  • Satisfy as much as possible: ( preferredDuringSchedulingIgnoredDuringExecution ) The scheduler will try to find nodes that meet the proper rules. If no matching node is found, the scheduler will still schedule the Pod. You can also set weights for soft constraint rules. During specific scheduling, if there are multiple nodes that meet the conditions, the node with the highest weight will be scheduled first. At the same time, you can also define multiple hard constraint rules, but only one of them needs to be satisfied.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#tag-name","title":"Tag name","text":"

                                                                                  The label proper to the node can use the default label or user-defined label.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#operators","title":"Operators","text":"
                                                                                  • In: the label value needs to be in the list of values
                                                                                  • NotIn: the tag's value is not in a list
                                                                                  • Exists: To judge whether a certain label exists, no need to set the label value
                                                                                  • DoesNotExist: Determine if a tag does not exist, no need to set the tag value
                                                                                  • Gt: the value of the label is greater than a certain value (string comparison)
                                                                                  • Lt: the value of the label is less than a certain value (string comparison)
                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#weights","title":"Weights","text":"

                                                                                  It can only be added in the \"as far as possible\" policy, which can be understood as the priority of scheduling, and those with the highest weight will be scheduled first. The value range is 1 to 100.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#workload-affinity","title":"Workload Affinity","text":"

                                                                                  Similar to node affinity, there are two types of workload affinity:

                                                                                  • Must be satisfied: (requiredDuringSchedulingIgnoredDuringExecution) The scheduler can only run scheduling when the rules are satisfied. This functionality is similar to nodeSelector , but with a more expressive syntax. You can define multiple hard constraint rules, but only one of them must be satisfied.
                                                                                  • Satisfy as much as possible: (preferredDuringSchedulingIgnoredDuringExecution) The scheduler will try to find nodes that meet the proper rules. If no matching node is found, the scheduler will still schedule the Pod. You can also set weights for soft constraint rules. During specific scheduling, if there are multiple nodes that meet the conditions, the node with the highest weight will be scheduled first. At the same time, you can also define multiple hard constraint rules, but only one of them needs to be satisfied.

                                                                                  The affinity of the workload is mainly used to determine which Pods of the workload can be deployed in the same topology domain. For example, services that communicate with each other can be deployed in the same topology domain (such as the same availability zone) by applying affinity scheduling to reduce the network delay between them.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#tag-name_1","title":"Tag name","text":"

                                                                                  The label proper to the node can use the default label or user-defined label.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#namespaces","title":"Namespaces","text":"

                                                                                  Specifies the namespace in which the scheduling policy takes effect.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#operators_1","title":"Operators","text":"
                                                                                  • In: the label value needs to be in the list of values
                                                                                  • NotIn: the tag's value is not in a list
                                                                                  • Exists: To judge whether a certain label exists, no need to set the label value
                                                                                  • DoesNotExist: Determine if a tag does not exist, no need to set the tag value
                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#topology-domain","title":"Topology domain","text":"

                                                                                  Specify the scope of influence during scheduling. If you specify kubernetes.io/Clustername, it will use the Node node as the distinguishing scope.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#workload-anti-affinity","title":"Workload Anti-Affinity","text":"

                                                                                  Similar to node affinity, there are two types of anti-affinity for workloads:

                                                                                  • Must be satisfied: (requiredDuringSchedulingIgnoredDuringExecution) The scheduler can only run scheduling when the rules are satisfied. This functionality is similar to nodeSelector , but with a more expressive syntax. You can define multiple hard constraint rules, but only one of them must be satisfied.
                                                                                  • Satisfy as much as possible: (preferredDuringSchedulingIgnoredDuringExecution) The scheduler will try to find nodes that meet the proper rules. If no matching node is found, the scheduler will still schedule the Pod. You can also set weights for soft constraint rules. During specific scheduling, if there are multiple nodes that meet the conditions, the node with the highest weight will be scheduled first. At the same time, you can also define multiple hard constraint rules, but only one of them needs to be satisfied.

                                                                                  The anti-affinity of the workload is mainly used to determine which Pods of the workload cannot be deployed in the same topology domain. For example, the same Pod of a load is distributed to different topological domains (such as different hosts) to improve the stability of the workload itself.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#tag-name_2","title":"Tag name","text":"

                                                                                  The label proper to the node can use the default label or user-defined label.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#namespaces_1","title":"Namespaces","text":"

                                                                                  Specifies the namespace in which the scheduling policy takes effect.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#operators_2","title":"Operators","text":"
                                                                                  • In: the label value needs to be in the list of values
                                                                                  • NotIn: the tag's value is not in a list
                                                                                  • Exists: To judge whether a certain label exists, no need to set the label value
                                                                                  • DoesNotExist: Determine if a tag does not exist, no need to set the tag value
                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/scheduling-policy.html#topology-domain_1","title":"Topology domain","text":"

                                                                                  Specify the scope of influence when scheduling, such as specifying kubernetes.io/Clustername, it will use the Node node as the distinguishing scope.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/workload-status.html","title":"Workload Status","text":"

                                                                                  A workload is an application running on Kubernetes, and in Kubernetes, whether your application is composed of a single same component or composed of many different components, you can use a set of Pods to run it. Kubernetes provides five built-in workload resources to manage pods:

                                                                                  • Deployment
                                                                                  • StatefulSet
                                                                                  • Daemonset
                                                                                  • Job
                                                                                  • CronJob

                                                                                  You can also expand workload resources by setting Custom Resource CRD. In the fifth-generation container management, it supports full lifecycle management of workloads such as creation, update, capacity expansion, monitoring, logging, deletion, and version management.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/workload-status.html#pod-status","title":"Pod Status","text":"

                                                                                  Pod is the smallest computing unit created and managed in Kubernetes, that is, a collection of containers. These containers share storage, networking, and management policies that control how the containers run. Pods are typically not created directly by users, but through workload resources. Pods follow a predefined lifecycle, starting at Pending phase, if at least one of the primary containers starts normally, it enters Running , and then enters the Succeeded or Failed stage depending on whether any container in the Pod ends in a failed status.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/workload-status.html#workload-status_1","title":"Workload Status","text":"

                                                                                  The fifth-generation container management module designs a built-in workload life cycle status set based on factors such as Pod status and number of replicas, so that users can more realistically perceive the running status of workloads. Because different workload types (such as Deployment and Jobs) have inconsistent management mechanisms for Pods, different workloads will have different lifecycle status during operation, as shown in the following table.

                                                                                  "},{"location":"en/admin/kpanda/workloads/pod-config/workload-status.html#deployment-statefulset-damemonset-status","title":"Deployment, StatefulSet, DamemonSet Status","text":"Status Description Waiting 1. A workload is in this status while its creation is in progress. 2. After an upgrade or rollback action is triggered, the workload is in this status. 3. Trigger operations such as pausing/scaling, and the workload is in this status. Running This status occurs when all instances under the workload are running and the number of replicas matches the user-defined number. Deleting When a delete operation is performed, the payload is in this status until the delete is complete. Exception Unable to get the status of the workload for some reason. This usually occurs because communication with the pod's host has failed. Not Ready When the container is in an abnormal, pending status, this status is displayed when the workload cannot be started due to an unknown error"},{"location":"en/admin/kpanda/workloads/pod-config/workload-status.html#job-status","title":"Job Status","text":"Status Description Waiting The workload is in this status while Job creation is in progress. Executing The Job is in progress and the workload is in this status. Execution Complete The Job execution is complete and the workload is in this status. Deleting A delete operation is triggered and the workload is in this status. Exception Pod status could not be obtained for some reason. This usually occurs because communication with the pod's host has failed."},{"location":"en/admin/kpanda/workloads/pod-config/workload-status.html#cronjob-status","title":"CronJob status","text":"Status Description Waiting The CronJob is in this status when it is being created. Started After the CronJob is successfully created, the CronJob is in this status when it is running normally or when the paused task is started. Stopped The CronJob is in this status when the stop task operation is performed. Deleting The deletion operation is triggered, and the CronJob is in this status.

                                                                                  When the workload is in an abnormal or unready status, you can move the mouse over the status value of the load, and the system will display more detailed error information through a prompt box. You can also view the log or events to obtain related running information of the workload.

                                                                                  "},{"location":"en/admin/register/index.html","title":"User Registration","text":"

                                                                                  New users need to register to use the AI platform for the first time.

                                                                                  "},{"location":"en/admin/register/index.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • The AI platform is installed
                                                                                  • Email registration feature is enabled
                                                                                  • An available email address
                                                                                  "},{"location":"en/admin/register/index.html#email-registration-steps","title":"Email Registration Steps","text":"
                                                                                  1. Open the AI platform homepage at https://ai.isuanova.com/ and click Register.

                                                                                  2. Enter your username, password, and email, then click Register.

                                                                                  3. The system will prompt that an email has been sent to your inbox.

                                                                                  4. Log in to your email, find the email, and click the link.

                                                                                  5. Congratulations, you have successfully accessed the AI platform, and you can now begin your AI journey.

                                                                                  Next step: Bind a Workspace for the User

                                                                                  "},{"location":"en/admin/register/bindws.html","title":"Binding a Workspace for the User","text":"

                                                                                  After a user successfully registers, a workspace needs to be bound to them.

                                                                                  "},{"location":"en/admin/register/bindws.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • AI platform installed
                                                                                  • User has successfully registered
                                                                                  • An available administrator account
                                                                                  "},{"location":"en/admin/register/bindws.html#steps-to-follow","title":"Steps to Follow","text":"
                                                                                  1. Log in to the AI platform as an administrator.
                                                                                  2. Navigate to Global Management -> Workspace and Folder, and click Create Workspace.

                                                                                  3. Enter the workspace name, select a folder, and click OK to create a workspace.

                                                                                  4. Bind resources to the workspace.

                                                                                    On this interface, you can click Create Namespace to create a namespace.

                                                                                  5. Add authorization: Assign the user to the workspace.

                                                                                  6. The user logs in to the AI platform to check if they have permissions for the workspace and namespace. The administrator can perform more actions through the \u2507 on the right side.

                                                                                  Next step: Allocate Resources for the Workspace

                                                                                  "},{"location":"en/admin/register/wsres.html","title":"Allocate Resources to the Workspace","text":"

                                                                                  After binding a user to a workspace, it is necessary to allocate appropriate resources to the workspace.

                                                                                  "},{"location":"en/admin/register/wsres.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • The AI platform is installed
                                                                                  • An available administrator account
                                                                                  • The workspace has been created and bound to a namespace
                                                                                  "},{"location":"en/admin/register/wsres.html#steps","title":"Steps","text":"
                                                                                  1. Log in to the AI platform as an administrator.
                                                                                  2. Navigate to Global Management -> Workspace and Folder, find the workspace to which you want to add resources, and click Add Shared Resources.

                                                                                  3. Select the cluster, set the appropriate resource quota, and then click OK

                                                                                  4. Return to the shared resources page. Resources have been successfully allocated to the workspace, and the administrator can modify them at any time using the \u2507 on the right side.

                                                                                  Next step: Create a Cloud Host

                                                                                  "},{"location":"en/admin/security/index.html","title":"Cloud Native Security","text":"

                                                                                  AI platform provides a fully automated security implementation for containers, Pods, images, runtimes, and microservices. The following table lists some of the security features that have been implemented or are in the process of being implemented.

                                                                                  Security Features Specific Items Description Image security Trusted image Distribution Key pairs and signature information are required to achieve secure transport of images. It's allowed to select a key for mirror signing during mirror transmission. Runtime security Event correlation analysis Support correlation and risk analysis of security events detected at runtime to enhance attack traceability. Support converge alerts, reduce invalid alerts, and improve event response efficiency. - Container decoy repository The container decoy repository is equipped with common decoys including but not limited to: unauthorized access vulnerabilities, code execution vulnerabilities, local file reading vulnerabilities, remote command execution RCE vulnerabilities, and other container decoys. - Container decoy deployment Support custom decoy containers, including service names, service locations, etc. - Container decoy alerting Support alerting on suspicious behavior in container decoys. - Offset detection While scanning the image, learn all the binary information in the image and form a \"whitelist\" to allow only the binaries in the \"whitelist\" to run after the container is online, which ensures that the container can not run unauthorized (such as illegal download) executable files. Micro-isolation Intelligent recommendation of isolation policies Support for recording historical access traffic to resources, and intelligent policy recommendation based on historical access traffic when configuring isolation policies for resources. - Tenant isolation Support isolation control of tenants in Kubernetes clusters, with the ability to set different network security groups for different tenants, and supports tenant-level security policies to achieve inter-tenant network access and isolation. Microservices security Service and API security scanning Supports automatic, manual and periodic scanning of services and APIs within a cluster. Support all traditional web scanning items including XSS vulnerabilities, SQL injection, command/code injection, directory enumeration, path traversal, XML entity injection, poc, file upload, weak password, jsonp, ssrf, arbitrary jump, CRLF injection and other risks. For vulnerabilities found in the container environment, support vulnerability type display, url display, parameter display, danger level display, test method display, etc."},{"location":"en/admin/security/falco-exporter.html","title":"What is Falco-exporter","text":"

                                                                                  Falco-exporter is a Prometheus Metrics exporter for Falco output events.

                                                                                  Falco-exporter is deployed as a DaemonSet on a Kubernetes cluster. If Prometheus is installed and running in the cluster, metrics provided by Falco-exporter will be automatically discovered.

                                                                                  "},{"location":"en/admin/security/falco-exporter.html#install-falco-exporter","title":"Install Falco-exporter","text":"

                                                                                  This section describes how to install Falco-exporter.

                                                                                  Note

                                                                                  Before installing and using Falco-exporter, you need to install and run Falco with gRPC output enabled (enabled by via Unix sockets by default). For more information on enabling gRPC output in Falco Helm Chart, see Enabling gRPC.

                                                                                  Please confirm that your cluster has successfully connected to the Container Management platform, and then perform the following steps to install Falco-exporter.

                                                                                  1. Click Container Management->Clusters in the left navigation bar, then find the cluster name where you want to install Falco-exporter.

                                                                                  2. In the left navigation bar, select Helm Releases -> Helm Charts, and then find and click falco-exporter.

                                                                                  3. Select the version you want to install in Version and click Install.

                                                                                  4. On the installation screen, fill in the required installation parameters.

                                                                                    Fill in application name, namespace, version, etc.

                                                                                    Fill in the following parameters:

                                                                                    • Falco Prometheus Exporter -> Image Settings -> Registry: set the repository address of the falco-exporter image, which is already filled with the available online repositories by default. If it is a private environment, you can change it to a private repository address.

                                                                                    • Falco Prometheus Exporter -> Prometheus ServiceMonitor Settings -> Repository: set the falco-exporter image name.

                                                                                    • Falco Prometheus Exporter -> Prometheus ServiceMonitor Settings -> Install ServiceMonitor: install Prometheus Operator service monitor. It is enabled by default.

                                                                                    • Falco Prometheus Exporter -> Prometheus ServiceMonitor Settings -> Scrape Interval: user-defined interval; if not specified, the Prometheus default interval is used.

                                                                                    • Falco Prometheus Exporter -> Prometheus ServiceMonitor Settings -> Scrape Timeout: user-defined scrape timeout; if not specified, the Prometheus default scrape timeout is used.

                                                                                    In the screen as above, fill in the following parameters:

                                                                                    • Falco Prometheus Exporter -> Prometheus prometheusRules -> Install prometheusRules: create PrometheusRules to alert on priority events. It is enabled by default.

                                                                                    • Falco Prometheus Exporter -> Prometheus prometheusRules -> Alerts settings: set whether alerts are enabled for different levels of log events, the interval between alerts, and the threshold for alerts.

                                                                                  5. Click the OK button at the bottom right corner to complete the installation.

                                                                                  "},{"location":"en/admin/security/falco-install.html","title":"Install Falco","text":"

                                                                                  Please confirm that your cluster has successfully connected to the Container Management platform, and then perform the following steps to install Falco.

                                                                                  1. Click Container Management->Clusters in the left navigation bar, then find the cluster name where you want to install Falco.

                                                                                  2. In the left navigation bar, select Helm Releases -> Helm Charts, and then find and click Falco.

                                                                                  3. Select the version you want to install in Version, and click Install.

                                                                                  4. On the installation page, fill in the required installation parameters.

                                                                                    Fill in the application name, namespace, version, etc.

                                                                                    Fill in the following parameters:

                                                                                    • Falco -> Image Settings -> Registry: set the repository address of the Falco image, which is already filled with the available online repositories by default. If it is a private environment, you can change it to a private repository address.

                                                                                    • Falco -> Image Settings -> Repository: set the Falco image name.

                                                                                    • Falco -> Falco Driver -> Image Settings -> Registry: set the repository address of the Falco Driver image, which is already filled with available online repositories by default. If it is a private environment, you can change it to a private repository address.

                                                                                    • Falco -> Falco Driver -> Image Settings -> Repository: set the Falco Driver image name.

                                                                                    • Falco -> Falco Driver -> Image Settings -> Driver Kind: set the Driver Kind, providing the following two options.

                                                                                      • ebpf: use ebpf to detect events, which requires the Linux kernel to support ebpf and enable CONFIG_BPF_JIT and sysctl net.core.bpf_jit_enable=1.

                                                                                      • module: use kernel module detection with limited OS version support. Refer to module support system version.

                                                                                    • Falco -> Falco Driver -> Image Settings -> Log Level: the minimum log level to be included in the log.

                                                                                      Optional values include: emergency, alert, critical, error, warning, notice, info, debug.

                                                                                  5. Click the OK button in the bottom right corner to complete the installation.

                                                                                  "},{"location":"en/admin/security/falco.html","title":"What is Falco","text":"

                                                                                  Falco is a cloudnative runtime security tool designed to detect anomalous activity in applications, and can be used to monitor the runtime security of Kubernetes applications and internal components. With only a set of rules, Falco can continuously monitor and watch for anomalous activity in containers, applications, hosts, and networks.

                                                                                  "},{"location":"en/admin/security/falco.html#what-does-falco-detect","title":"What does Falco detect?","text":"

                                                                                  Falco can detect and alert on any behavior involving Linux system calls. Falco alerts can be triggered using specific system calls, parameters, and properties of the calling process. For example, Falco can easily detect events including but not limited to the following:

                                                                                  • A shell is running inside a container or pod in Kubernetes.
                                                                                  • A container is running in privileged mode or mounting a sensitive path, such as /proc, from the host.
                                                                                  • A server process is spawning a child process of an unexpected type.
                                                                                  • A sensitive file, such as /etc/shadow, is being read unexpectedly.
                                                                                  • A non-device file is being written to /dev.
                                                                                  • A standard system binary, such as ls, is making an outbound network connection.
                                                                                  • A privileged pod is started in a Kubernetes cluster.

                                                                                  For more information on the default rules that come with Falco, see the Rules documentation.

                                                                                  "},{"location":"en/admin/security/falco.html#what-are-falco-rules","title":"What are Falco rules?","text":"

                                                                                  Falco rules define the behavior and events that Falco should monitor. Rules can be written in the Falco rules file or in a generic configuration file. For more information on writing, managing and deploying rules, see Falco Rules.

                                                                                  "},{"location":"en/admin/security/falco.html#what-are-falco-alerts","title":"What are Falco Alerts?","text":"

                                                                                  Alerts are configurable downstream operations that can be as simple as logging or as complex as STDOUT passing a gRPC call to a client. For more information on configuring, understanding, and developing alerts, see Falco Alerts. Falco can send alerts t:

                                                                                  • Standard output
                                                                                  • A file
                                                                                  • A system log
                                                                                  • A spawned program
                                                                                  • An HTTP[s] endpoint
                                                                                  • A client via the gRPC API
                                                                                  "},{"location":"en/admin/security/falco.html#what-are-the-components-of-falco","title":"What are the components of Falco?","text":"

                                                                                  Falco consists of the following main components:

                                                                                  • Userspace program: a CLI tool that can be used to interact with Falco. The userspace program handles signals, parses messages from a Falco driver, and sends alerts.

                                                                                  • Configuration: define how Falco is run, what rules to assert, and how to perform alerts. For more information, see Configuration.

                                                                                  • Driver: a software that adheres to the Falco driver specification and sends a stream of system call information. You cannot run Falco without installing a driver. Currently, Falco supports the following drivers:

                                                                                    • Kernel module built on libscap and libsinsp C++ libraries (default)
                                                                                    • BPF probe built from the same modules
                                                                                    • Userspace instrumentation

                                                                                      For more information, see Falco drivers.

                                                                                  • Plugins: allow users to extend the functionality of falco libraries/falco executable by adding new event sources and new fields that can extract information from events. For more information, see Plugins.

                                                                                  "},{"location":"en/admin/share/notebook.html","title":"Using Notebook","text":"

                                                                                  Notebook typically refers to Jupyter Notebook or similar interactive computing environments. This is a very popular tool widely used in fields such as data science, machine learning, and deep learning. This page explains how to use Notebook on the Canfeng AI platform.

                                                                                  "},{"location":"en/admin/share/notebook.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • The AI platform is installed
                                                                                  • The user has successfully registered
                                                                                  • The administrator has assigned a workspace to the user
                                                                                  • A dataset (code, data, etc.) is prepared
                                                                                  "},{"location":"en/admin/share/notebook.html#creating-and-using-notebook-instances","title":"Creating and Using Notebook Instances","text":"
                                                                                  1. Log in to the AI platform as an Administrator.
                                                                                  2. Navigate to AI Lab -> Queue Management, and click the Create button on the right.

                                                                                  3. After entering a name, selecting a cluster, workspace, and quota, click OK.

                                                                                  4. Log in to the AI platform as a User, navigate to AI Lab -> Notebook, and click the Create button on the right.

                                                                                  5. After configuring the parameters, click OK.

                                                                                    Basic InformationResource ConfigurationAdvanced Configuration

                                                                                    Enter a name, select a cluster, namespace, choose the newly created queue, and click One-click Initialization.

                                                                                    Select Notebook type, configure memory and CPU, enable GPU, create and configure PVC:

                                                                                    Enable SSH external access:

                                                                                  6. You will be automatically redirected to the Notebook instance list; click the instance name.

                                                                                  7. Enter the Notebook instance details page and click the Open button in the upper right corner.

                                                                                  8. You will enter the Notebook development environment, where a persistent volume is mounted in the /home/jovyan directory. You can clone code using git and upload data after connecting via SSH, etc.

                                                                                  "},{"location":"en/admin/share/notebook.html#accessing-notebook-instances-via-ssh","title":"Accessing Notebook Instances via SSH","text":"
                                                                                  1. Generate an SSH key pair on your own computer.

                                                                                    Open the command line on your computer, for example, open git bash on Windows, and enter ssh-keygen.exe -t rsa, then press enter until completion.

                                                                                  2. Use commands like cat ~/.ssh/id_rsa.pub to view and copy the public key.

                                                                                  3. Log in to the AI platform as a user, click Personal Center in the upper right corner -> SSH Public Key -> Import SSH Public Key.

                                                                                  4. Go to the details page of the Notebook instance and copy the SSH link.

                                                                                  5. Use SSH to access the Notebook instance from the client.

                                                                                  Next step: Create Training Jobs

                                                                                  "},{"location":"en/admin/share/quota.html","title":"Quota Management","text":"

                                                                                  Once a user is bound to a workspace, resources can be allocated to the workspace, and resource quotas can be managed.

                                                                                  "},{"location":"en/admin/share/quota.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • The AI platform is installed
                                                                                  • There is an available administrator account
                                                                                  "},{"location":"en/admin/share/quota.html#creating-and-managing-quotas","title":"Creating and Managing Quotas","text":"
                                                                                  1. Log in to the AI platform as an Administrator.
                                                                                  2. Create a workspace and namespace, and bind users.
                                                                                  3. Allocate resource quotas to the workspace.

                                                                                  4. Manage the resource quotas for the namespace test-ns-1, ensuring that the values do not exceed the workspace's quota.

                                                                                  5. Log in to the AI platform as a User to check if they have been assigned the test-ns-1 namespace.

                                                                                  Next step: Create AI Workloads Using GPUs

                                                                                  "},{"location":"en/admin/share/workload.html","title":"Creating AI Workloads Using GPU Resources","text":"

                                                                                  After the administrator allocates resource quotas for the workspace, users can create AI workloads to utilize GPU computing resources.

                                                                                  "},{"location":"en/admin/share/workload.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • The AI platform is installed
                                                                                  • User has successfully registered
                                                                                  • Administrator has assigned a workspace to the user
                                                                                  • Resource quotas have been set for the workspace
                                                                                  • A cluster has been created
                                                                                  "},{"location":"en/admin/share/workload.html#steps-to-create-ai-workloads","title":"Steps to Create AI Workloads","text":"
                                                                                  1. Log in to the AI platform as a User.
                                                                                  2. Navigate to Container Management, select a namespace, then click Workloads -> Deployments, and then click the Create from Image button on the right.

                                                                                  3. After configuring the parameters, click OK.

                                                                                    Basic InformationContainer ConfigurationOthers

                                                                                    Select your own namespace.

                                                                                    Set the image, configure resources such as CPU, memory, and GPU, and set the startup command.

                                                                                    Service configuration and advanced settings can use default configurations.

                                                                                  4. Automatically return to the stateless workload list and click the workload name.

                                                                                  5. Enter the details page to view the GPU quota.

                                                                                  6. You can also enter the console and run the mx-smi command to check the GPU resources.

                                                                                  Next step: Using Notebook

                                                                                  "},{"location":"en/admin/virtnest/best-practice/import-ubuntu.html","title":"Import a Linux Virtual Machine with Ubuntu from an External Platform","text":"

                                                                                  This page provides a detailed introduction on how to import Linux virtual machines from the external platform VMware into the virtual machines of AI platform through the command line.

                                                                                  Info

                                                                                  The external virtual platform in this document is VMware vSphere Client, abbreviated as vSphere. Technically, it relies on kubevirt cdi for implementation. Before proceeding, the virtual machine imported on vSphere needs to be shut down. Take a virtual machine of the Ubuntu operating system as an example.

                                                                                  "},{"location":"en/admin/virtnest/best-practice/import-ubuntu.html#fetch-basic-information-of-vsphere-virtual-machine","title":"Fetch Basic Information of vSphere Virtual Machine","text":"
                                                                                  • vSphere URL: Fetch information on the URL of the target platform

                                                                                  • vSphere SSL Certificate Thumbprint: Need to be fetched using openssl

                                                                                    openssl s_client -connect 10.64.56.11:443 </dev/null | openssl x509 -in /dev/stdin -fingerprint -sha1 -noout\n
                                                                                    Output will be similar to:
                                                                                    Can't use SSL_get_servername\ndepth=0 CN = vcsa.daocloud.io\nverify error:num=20:unable to get local issuer certificate\nverify return:1\ndepth=0 CN = vcsa.daocloud.io\nverify error:num=21:unable to verify the first certificate\nverify return:1\ndepth=0 CN = vcsa.daocloud.io\nverify return:1\nDONE\nsha1 Fingerprint=C3:9D:D7:55:6A:43:11:2B:DE:BA:27:EA:3B:C2:13:AF:E4:12:62:4D  # Value needed\n

                                                                                  • vSphere Account: Fetch account information for vSphere, and pay attention to permissions

                                                                                  • vSphere Password: Fetch password information for vSphere

                                                                                  • UUID of the virtual machine to be imported: Need to be fetched on the web page of vSphere

                                                                                    • Access the Vsphere page, go to the details page of the virtual machine to be imported, click Edit Settings , open the browser's developer console at this point, click Network -> Headers , find the URL as shown in the image below.

                                                                                    • Click Response , locate vmConfigContext -> config , and finally find the target value uuid .

                                                                                  • Path of the vmdk file of the virtual machine to be imported

                                                                                  "},{"location":"en/admin/virtnest/best-practice/import-ubuntu.html#network-configuration","title":"Network Configuration","text":"

                                                                                  Different information needs to be configured based on the chosen network mode. If a fixed IP address is required, you should select the Bridge network mode.

                                                                                  • Create a Multus CR of the ovs type. Refer to Creating a Multus CR.
                                                                                  • Create subnets and IP pools. Refer to Creating Subnets and IP Pools.

                                                                                    apiVersion: spiderpool.spidernet.io/v2beta1\nkind: SpiderIPPool\nmetadata:\n  name: test2\nspec:\n  ips:\n  - 10.20.3.90\n  subnet: 10.20.0.0/16\n  gateway: 10.20.0.1\n\n---\napiVersion: spiderpool.spidernet.io/v2beta1\nkind: SpiderIPPool\nmetadata:\n  name: test3\nspec:\n  ips:\n  - 10.20.240.1\n  subnet: 10.20.0.0/16\n  gateway: 10.20.0.1\n\n---\napiVersion: spiderpool.spidernet.io/v2beta1\nkind: SpiderMultusConfig\nmetadata:\n  name: test1\n  namespace: kube-system\nspec:\n  cniType: ovs\n  coordinator:\n    detectGateway: false\n    detectIPConflict: false\n    mode: auto\n    tunePodRoutes: true\n  disableIPAM: false\n  enableCoordinator: true\n  ovs:\n    bridge: br-1\n    ippools:\n    ipv4:\n    - test1\n    - test2\n
                                                                                  "},{"location":"en/admin/virtnest/best-practice/import-ubuntu.html#fetch-vsphere-account-password-secret","title":"Fetch vSphere Account Password Secret","text":"
                                                                                  apiVersion: v1\nkind: Secret\nmetadata:\n  name: vsphere   # Can be changed\n  labels:\n    app: containerized-data-importer  # Do not change\ntype: Opaque\ndata:\n  accessKeyId: \"username-base64\"\n  secretKey: \"password-base64\"\n
                                                                                  "},{"location":"en/admin/virtnest/best-practice/import-ubuntu.html#write-a-kubevirt-vm-yaml-to-create-vm","title":"Write a KubeVirt VM YAML to create VM","text":"

                                                                                  Tip

                                                                                  If a fixed IP address is required, the YAML configuration differs slightly from the one used for the default network. These differences have been highlighted.

                                                                                  apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n    virtnest.io/alias-name: \"\"\n    virtnest.io/image-secret: \"\"\n  creationTimestamp: \"2024-05-23T06:46:28Z\"\n  finalizers:\n  - kubevirt.io/virtualMachineControllerFinalize\n  generation: 1\n  labels:\n    virtnest.io/os-family: Ubuntu\n    virtnest.io/os-version: \"22.04\"\n  name: export-ubuntu\n  namespace: default\nspec:\n  dataVolumeTemplates:\n  - metadata:\n      creationTimestamp: null\n      name: export-ubuntu-rootdisk\n      namespace: default\n    spec:\n      pvc:\n        accessModes:\n        - ReadWriteOnce\n        resources:\n          requests:\n            storage: 10Gi\n        storageClassName: local-path\n      source:\n        vddk:\n          backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-ubuntu/virtnest-export-ubuntu.vmdk\"  \n          url: \"https://10.64.56.21\"                                                       \n          uuid: \"421d6135-4edb-df80-ee54-8c5b10cc4e78\"                                     \n          thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"            \n          secretRef: \"vsphere\"\n          initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"          \n  runStrategy: Manual\n  template:\n    metadata:\n      annotations:\n        ipam.spidernet.io/ippools: '[{\"cleangateway\":false,\"ipv4\":[\"test2\"]}]'  // Add spiderpool network here\n      creationTimestamp: null\n    spec:\n      architecture: amd64\n      domain:\n        devices:\n          disks:\n          - bootOrder: 1\n            disk:\n              bus: virtio\n            name: rootdisk\n          interfaces:                                                          // Modify the network configuration\n          - bridge: {}\n            name: ovs-bridge0\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 4Gi\n      networks:                                                                // Modify the network configuration\n      - multus:\n          default: true\n          networkName: kube-system/test1\n        name: ovs-bridge0\n      volumes:\n      - dataVolume:\n          name: export-ubuntu-rootdisk\n        name: rootdisk\n
                                                                                  "},{"location":"en/admin/virtnest/best-practice/import-ubuntu.html#access-vnc-to-verify-successful-operation","title":"Access VNC to verify successful operation","text":"
                                                                                  1. Modify the network configuration of the virtual machine

                                                                                  2. Check the current network

                                                                                    When the actual import is completed, the configuration shown in the image below has been completed. However, it should be noted that the enp1s0 interface does not contain the inet field, so it cannot connect to the external network.

                                                                                  3. Configure netplan

                                                                                    In the configuration shown in the image above, change the objects in ethernets to enp1s0 and obtain an IP address using DHCP.

                                                                                  4. Apply the netplan configuration to the system network configuration

                                                                                    sudo netplan apply\n
                                                                                  5. Perform a ping test on the external network

                                                                                  6. Access the virtual machine on the node via SSH.

                                                                                  "},{"location":"en/admin/virtnest/best-practice/import-windows.html","title":"Import a Windows Virtual Machine from the External Platform","text":"

                                                                                  This page provides a detailed introduction on how to import virtual machines from an external platform -- VMware, into the virtual machines of AI platform using the command line.

                                                                                  Info

                                                                                  The external virtual platform on this page is VMware vSphere Client, abbreviated as vSphere. Technically, it relies on kubevirt cdi for implementation. Before proceeding, the virtual machine imported on vSphere needs to be shut down. Take a virtual machine of the Windows operating system as an example.

                                                                                  "},{"location":"en/admin/virtnest/best-practice/import-windows.html#environment-preparation","title":"Environment Preparation","text":"

                                                                                  Before importing, refer to the Network Configuration to prepare the environment.

                                                                                  "},{"location":"en/admin/virtnest/best-practice/import-windows.html#fetch-information-of-the-windows-virtual-machine","title":"Fetch Information of the Windows Virtual Machine","text":"

                                                                                  Similar to importing a virtual machine with a Linux operating system, refer to Importing a Linux Virtual Machine with Ubuntu from an External Platform to get the following information:

                                                                                  • vSphere account and password
                                                                                  • vSphere virtual machine information
                                                                                  "},{"location":"en/admin/virtnest/best-practice/import-windows.html#check-the-boot-type-of-windows","title":"Check the Boot Type of Windows","text":"

                                                                                  When importing a virtual machine from an external platform into the AI platform virtualization platform, you need to configure it according to the boot type (BIOS or UEFI) to ensure it can boot and run correctly.

                                                                                  You can check whether Windows uses BIOS or UEFI through \"System Summary.\" If it uses UEFI, you need to add the relevant information in the YAML file.

                                                                                  "},{"location":"en/admin/virtnest/best-practice/import-windows.html#import-process","title":"Import Process","text":"

                                                                                  Prepare the window.yaml file and pay attention to the following configmaps:

                                                                                  • PVC booting Virtio drivers
                                                                                  • Disk bus type, set to SATA or Virtio depending on the boot type
                                                                                  • UEFI configuration (if UEFI is used)
                                                                                  Click to view the window.yaml example window.yaml
                                                                                  apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  labels:\n    virtnest.io/os-family: windows\n    virtnest.io/os-version: \"server2019\"\n  name: export-window-21\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: export-window-21-rootdisk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 22Gi\n          storageClassName: local-path\n        source:\n          vddk:\n            backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-window/virtnest-export-window.vmdk\"\n            url: \"https://10.64.56.21\"\n            uuid: \"421d40f2-21a2-cfeb-d5c9-e7f8abfc2faa\"\n            thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"\n            secretRef: \"vsphere21\"\n            initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"\n    - metadata:\n        name: export-window-21-datadisk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 1Gi\n          storageClassName: local-path\n        source:\n          vddk:\n            backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-window/virtnest-export-window_1.vmdk\"\n            url: \"https://10.64.56.21\"\n            uuid: \"421d40f2-21a2-cfeb-d5c9-e7f8abfc2faa\"\n            thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"\n            secretRef: \"vsphere21\"\n            initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"\n    # <1>. PVC for booting Virtio drivers\n    # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n    - metadata:\n        name: virtio-disk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Mi\n          storageClassName: local-path\n        source:\n          blank: {}\n          # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n  running: true\n  template:\n    metadata:\n      annotations:\n        ipam.spidernet.io/ippools: '[{\"cleangateway\":false,\"ipv4\":[\"test86\"]}]'\n    spec:\n      dnsConfig:\n        nameservers:\n        - 223.5.5.5\n      domain:\n        cpu:\n          cores: 2\n        memory:\n          guest: 4Gi\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: sata   # <2> Disk bus type, set to SATA or Virtio depending on the boot type\n              name: rootdisk\n            - bootOrder: 2\n              disk:\n                bus: sata   # <2> Disk bus type, set to SATA or Virtio depending on the boot type\n              name: datadisk\n            # <1>. disk for booting Virtio drivers\n            # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n            - bootOrder: 3\n              disk:\n                bus: virtio\n              name: virtdisk\n            - bootOrder: 4\n              cdrom:\n                bus: sata\n              name: virtiocontainerdisk\n            # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n          interfaces:\n            - bridge: {}\n              name: ovs-bridge0\n        # <3> In the above section \"Check the Boot Type of Windows\"\n        # If using UEFI, add the following information\n        # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n        features:\n          smm:\n            enabled: true\n        firmware:\n          bootloader:\n            efi:\n              secureBoot: false\n        # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 4Gi\n      networks:\n        - multus:\n            default: true\n            networkName: kube-system/test1\n          name: ovs-bridge0\n      volumes:\n        - dataVolume:\n            name: export-window-21-rootdisk\n          name: rootdisk\n        - dataVolume:\n            name: export-window-21-datadisk\n          name: datadisk      \n        # <1> Volumes for booting Virtio drivers\n        # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n        - dataVolume:\n            name: virtio-disk\n          name: virtdisk\n        - containerDisk:\n            image: release-ci.daocloud.io/virtnest/kubevirt/virtio-win:v4.12.12-5\n          name: virtiocontainerdisk\n        # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n
                                                                                  "},{"location":"en/admin/virtnest/best-practice/import-windows.html#install-virtio-drivers-via-vnc","title":"Install VirtIO Drivers via VNC","text":"
                                                                                  1. Access and connect to the virtual machine via VNC.
                                                                                  2. Download and install the appropriate VirtIO drivers based on the Windows version.
                                                                                  3. Enable Remote Desktop to facilitate future connections via RDP.
                                                                                  4. After installation, update the YAML file and reboot the virtual machine.
                                                                                  "},{"location":"en/admin/virtnest/best-practice/import-windows.html#update-yaml-after-reboot","title":"Update YAML After Reboot","text":"Click to view the modified `window.yaml` example window.yaml
                                                                                  # Delete fields marked with <1>, modify fields marked with <2>: change sata to virtio\napiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  labels:\n    virtnest.io/os-family: windows\n    virtnest.io/os-version: \"server2019\"\n  name: export-window-21\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: export-window-21-rootdisk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 22Gi\n          storageClassName: local-path\n        source:\n          vddk:\n            backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-window/virtnest-export-window.vmdk\"\n            url: \"https://10.64.56.21\"\n            uuid: \"421d40f2-21a2-cfeb-d5c9-e7f8abfc2faa\"\n            thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"\n            secretRef: \"vsphere21\"\n            initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"\n    - metadata:\n        name: export-window-21-datadisk\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 1Gi\n          storageClassName: local-path\n        source:\n          vddk:\n            backingFile: \"[A05-09-ShangPu-Local-DataStore] virtnest-export-window/virtnest-export-window_1.vmdk\"\n            url: \"https://10.64.56.21\"\n            uuid: \"421d40f2-21a2-cfeb-d5c9-e7f8abfc2faa\"\n            thumbprint: \"D7:C4:22:E3:6F:69:DA:72:50:81:12:FA:42:18:3F:29:5C:7F:41:CA\"\n            secretRef: \"vsphere21\"\n            initImageURL: \"release.daocloud.io/virtnest/vddk:v8\"\n  running: true\n  template:\n    metadata:\n      annotations:\n        ipam.spidernet.io/ippools: '[{\"cleangateway\":false,\"ipv4\":[\"test86\"]}]'\n    spec:\n      dnsConfig:\n        nameservers:\n        - 223.5.5.5\n      domain:\n        cpu:\n          cores: 2\n        memory:\n          guest: 4Gi\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio  # <2>\n              name: rootdisk\n            - bootOrder: 2\n              disk:\n                bus: virtio  # <2>\n              name: datadisk\n          interfaces:\n            - bridge: {}\n              name: ovs-bridge0\n        # <3> In the above section \"Check the Boot Type of Windows\"\n        # If using UEFI, add the following information\n        # \u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\u2193\n        features:\n          smm:\n            enabled: true\n        firmware:\n          bootloader:\n            efi:\n              secureBoot: false\n        # \u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\u2191\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 4Gi\n      networks:\n        - multus:\n            default: true\n            networkName: kube-system/test1\n          name: ovs-bridge0\n      volumes:\n        - dataVolume:\n            name: export-window-21-rootdisk\n          name: rootdisk\n        - dataVolume:\n            name: export-window-21-datadisk\n          name: datadisk\n
                                                                                  "},{"location":"en/admin/virtnest/best-practice/import-windows.html#access-and-verify-via-rdp","title":"Access and Verify via RDP","text":"
                                                                                  • Use an RDP client to connect to the virtual machine. Log in with the default account admin and password dangerous!123.

                                                                                  • Verify network access and data disk data

                                                                                  "},{"location":"en/admin/virtnest/best-practice/import-windows.html#differences-between-importing-linux-and-windows-virtual-machines","title":"Differences Between Importing Linux and Windows Virtual Machines","text":"
                                                                                  • Windows may require UEFI configuration.
                                                                                  • Windows typically requires the installation of VirtIO drivers.
                                                                                  • Windows multi-disk imports usually do not require re-mounting of disks.
                                                                                  "},{"location":"en/admin/virtnest/best-practice/vm-windows.html","title":"Create a Windows Virtual Machine","text":"

                                                                                  This document will explain how to create a Windows virtual machine via the command line.

                                                                                  "},{"location":"en/admin/virtnest/best-practice/vm-windows.html#prerequisites","title":"Prerequisites","text":"
                                                                                  1. Before creating a Windows virtual machine, it is recommended to first refer to installing dependencies and prerequisites for the virtual machine module to ensure that your environment is ready.
                                                                                  2. During the creation process, it is recommended to refer to the official documentation: Installing Windows documentation, Installing Windows related drivers.
                                                                                  3. It is recommended to access the Windows virtual machine using the VNC method.
                                                                                  "},{"location":"en/admin/virtnest/best-practice/vm-windows.html#import-an-iso-image","title":"Import an ISO Image","text":"

                                                                                  Creating a Windows virtual machine requires importing an ISO image primarily to install the Windows operating system. Unlike Linux operating systems, the Windows installation process usually involves booting from an installation disc or ISO image file. Therefore, when creating a Windows virtual machine, it is necessary to first import the installation ISO image of the Windows operating system so that the virtual machine can be installed properly.

                                                                                  Here are two methods for importing ISO images:

                                                                                  1. (Recommended) Creating a Docker image. It is recommended to refer to building images.

                                                                                  2. (Not recommended) Using virtctl to import the image into a Persistent Volume Claim (PVC).

                                                                                    You can refer to the following command:

                                                                                    virtctl image-upload -n <namespace> pvc <PVC name> \\\n   --image-path=<ISO file path> \\\n   --access-mode=ReadWriteOnce \\\n   --size=6G \\\n   --uploadproxy-url=<https://cdi-uploadproxy ClusterIP and port> \\\n   --force-bind \\\n   --insecure \\\n   --wait-secs=240 \\\n   --storage-class=<SC>\n

                                                                                    For example:

                                                                                    virtctl image-upload -n <namespace> pvc <PVC name> \\\n   --image-path=<ISO file path> \\\n   --access-mode=ReadWriteOnce \\\n   --size=6G \\\n   --uploadproxy-url=<https://cdi-uploadproxy ClusterIP and port> \\\n   --force-bind \\\n   --insecure \\\n   --wait-secs=240 \\\n   --storage-class=<SC>\n
                                                                                  "},{"location":"en/admin/virtnest/best-practice/vm-windows.html#create-a-windows-virtual-machine-using-yaml","title":"Create a Windows Virtual Machine Using YAML","text":"

                                                                                  Creating a Windows virtual machine using YAML is more flexible and easier to write and maintain. Below are three reference YAML examples:

                                                                                  1. (Recommended) Using Virtio drivers + Docker image:

                                                                                    • If you need to use storage capabilities - mount disks, please install viostor drivers.
                                                                                    • If you need to use network capabilities, please install NetKVM drivers.
                                                                                    apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n  labels:\n    virtnest.io/os-family: Windows\n    virtnest.io/os-version: '10'\n  name: windows10-virtio\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: win10-system-virtio\n        namespace: default\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 32Gi\n          storageClassName: local-path\n        source:\n          blank: {}\n  running: true\n  template:\n    metadata:\n      labels:\n        app: windows10-virtio\n        version: v1\n        kubevirt.io/domain: windows10-virtio\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 8\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio # Use virtio\n              name: win10-system-virtio \n            - bootOrder: 2\n              cdrom:\n                bus: sata # Use sata for ISO image\n              name: iso-win10\n            - bootOrder: 3\n              cdrom:\n                bus: sata # Use sata for containerdisk\n              name: virtiocontainerdisk\n          interfaces:\n            - name: default\n              masquerade: {}\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 8G\n      networks:\n        - name: default\n          pod: {}\n      volumes:\n        - name: iso-win10\n          persistentVolumeClaim:\n            claimName: iso-win10\n        - name: win10-system-virtio\n          persistentVolumeClaim:\n            claimName: win10-system-virtio\n        - containerDisk:\n            image: kubevirt/virtio-container-disk\n          name: virtiocontainerdisk\n
                                                                                  2. (Not recommended) Using a combination of Virtio drivers and virtctl tool to import the image into a Persistent Volume Claim (PVC).

                                                                                    apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n  labels:\n    virtnest.io/os-family: Windows\n    virtnest.io/os-version: '10'\n  name: windows10-virtio\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: win10-system-virtio\n        namespace: default\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 32Gi\n          storageClassName: local-path\n        source:\n          blank: {}\n  running: true\n  template:\n    metadata:\n      labels:\n        app: windows10-virtio\n        version: v1\n        kubevirt.io/domain: windows10-virtio\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 8\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              # Use virtio\n              disk:\n                bus: virtio\n              name: win10-system-virtio\n              # Use sata for ISO image\n            - bootOrder: 2\n              cdrom:\n                bus: sata\n              name: iso-win10\n\u00a0 \u00a0 \u00a0 \u00a0 \u00a0 \u00a0 \u00a0 # Use sata for containerdisk\n            - bootOrder: 3\n              cdrom:\n                bus: sata\n              name: virtiocontainerdisk\n          interfaces:\n            - name: default\n              masquerade: {}\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 8G\n      networks:\n        - name: default\n          pod: {}\n      volumes:\n        - name: iso-win10\n          persistentVolumeClaim:\n            claimName: iso-win10\n        - name: win10-system-virtio\n          persistentVolumeClaim:\n            claimName: win10-system-virtio\n        - containerDisk:\n            image: kubevirt/virtio-container-disk\n          name: virtiocontainerdisk\n
                                                                                  3. (Not recommended) In a scenario where Virtio drivers are not used, importing the image into a Persistent Volume Claim (PVC) using the virtctl tool. The virtual machine may use other types of drivers or default drivers to operate disk and network devices.

                                                                                    apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n  labels:\n    virtnest.io/os-family: Windows\n    virtnest.io/os-version: '10'\n  name: windows10\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    # Create multiple PVC (disks) for system disk\n    - metadata:\n        name: win10-system\n        namespace: default\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 32Gi\n          storageClassName: local-path\n        source:\n          blank: {}\n  running: true\n  template:\n    metadata:\n      labels:\n        app: windows10\n        version: v1\n        kubevirt.io/domain: windows10\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 8\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              # Use sata without virtio driver\n\u00a0 \u00a0 \u00a0 \u00a0 \u00a0 \u00a0 \u00a0cdrom:\n                bus: sata\n              name: win10-system\n              # Use sata for ISO\n            - bootOrder: 2\n              cdrom:\n                bus: sata\n              name: iso-win10\n          interfaces:\n            - name: default\n              masquerade: {}\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 8G\n      networks:\n        - name: default\n          pod: {}\n      volumes:\n        - name: iso-win10\n          persistentVolumeClaim:\n            claimName: iso-win10\n        - name: win10-system\n          persistentVolumeClaim:\n            claimName: win10-system\n
                                                                                  "},{"location":"en/admin/virtnest/best-practice/vm-windows.html#cloud-desktop","title":"Cloud Desktop","text":"

                                                                                  For Windows virtual machines, remote desktop control access is often required. It is recommended to use Microsoft Remote Desktop to control your virtual machine.

                                                                                  Note

                                                                                  • Your Windows version must support remote desktop control to use Microsoft Remote Desktop.
                                                                                  • You need to disable the Windows firewall.
                                                                                  "},{"location":"en/admin/virtnest/best-practice/vm-windows.html#add-data-disks","title":"Add Data Disks","text":"

                                                                                  Adding a data disk to a Windows virtual machine follows the same process as adding one to a Linux virtual machine. You can refer to the provided YAML example for guidance.

                                                                                    apiVersion: kubevirt.io/v1\n  kind: VirtualMachine\n  <...>\n  spec:\n    dataVolumeTemplates:\n      # Add a data disk\n      - metadata:\n        name: win10-disk\n        namespace: default\n        spec:\n          pvc:\n            accessModes:\n              - ReadWriteOnce\n            resources:\n              requests:\n                storage: 16Gi\n            storageClassName: hwameistor-storage-lvm-hdd\n          source:\n            blank: {}\n    template:\n      spec:\n        domain:\n          devices:\n            disks:\n              - bootOrder: 1\n                disk:\n                  bus: virtio\n                name: win10-system\n              # Add a data disk\n              - bootOrder: 2\n                disk:\n                  bus: virtio\n                name: win10-disk\n            <....>\n        volumes:\n          <....>\n          # Add a data disk\n          - name: win10-disk\n            persistentVolumeClaim:\n              claimName: win10-disk\n
                                                                                  "},{"location":"en/admin/virtnest/best-practice/vm-windows.html#snapshots-cloning-live-migration","title":"Snapshots, Cloning, Live Migration","text":"

                                                                                  These capabilities are consistent with Linux virtual machines and can be configured using the same methods.

                                                                                  "},{"location":"en/admin/virtnest/best-practice/vm-windows.html#access-your-windows-virtual-machine","title":"Access Your Windows Virtual Machine","text":"
                                                                                  1. After successful creation, access the virtual machine list page to confirm that the virtual machine is running properly.

                                                                                  2. Click the console access (VNC) to access it successfully.

                                                                                  "},{"location":"en/admin/virtnest/gpu/vm-gpu.html","title":"Configure GPU Passthrough for Virtual Machines","text":"

                                                                                  This page will explain the prerequisites for configuring GPU when creating a virtual machine.

                                                                                  The key to configuring GPU for virtual machines is to configure the GPU Operator to deploy different software components on the worker nodes, depending on the GPU workload configuration. Here are three example nodes:

                                                                                  • The controller-node-1 node is configured to run containers.
                                                                                  • The work-node-1 node is configured to run virtual machines with GPU passthrough.
                                                                                  • The work-node-2 node is configured to run virtual machines with virtual vGPU.
                                                                                  "},{"location":"en/admin/virtnest/gpu/vm-gpu.html#assumptions-limitations-and-dependencies","title":"Assumptions, Limitations, and Dependencies","text":"

                                                                                  The worker nodes can run GPU-accelerated containers, virtual machines with GPU passthrough, or virtual machines with vGPU. However, a combination of any of these is not supported.

                                                                                  1. The cluster administrator or developer needs to have prior knowledge of the cluster and correctly label the nodes to indicate the type of GPU workload they will run.
                                                                                  2. The worker node that runs a GPU-accelerated virtual machine with GPU passthrough or vGPU is assumed to be a bare metal machine. If the worker node is a virtual machine, the GPU passthrough feature needs to be enabled on the virtual machine platform. Please consult your virtual machine platform provider for guidance.
                                                                                  3. Nvidia MIG is not supported for vGPU.
                                                                                  4. The GPU Operator does not automatically install GPU drivers in the virtual machine.
                                                                                  "},{"location":"en/admin/virtnest/gpu/vm-gpu.html#enable-iommu","title":"Enable IOMMU","text":"

                                                                                  To enable GPU passthrough, the cluster nodes need to have IOMMU enabled. Refer to How to Enable IOMMU. If your cluster is running on a virtual machine, please consult your virtual machine platform provider.

                                                                                  "},{"location":"en/admin/virtnest/gpu/vm-gpu.html#label-the-cluster-nodes","title":"Label the Cluster Nodes","text":"

                                                                                  Go to Container Management, select your worker cluster, click Node Management, and then click Modify Labels in the action bar to add labels to the nodes. Each node can only have one label.

                                                                                  You can assign the following values to the labels: container, vm-passthrough, and vm-vgpu.

                                                                                  "},{"location":"en/admin/virtnest/gpu/vm-gpu.html#install-nvidia-operator","title":"Install Nvidia Operator","text":"
                                                                                  1. Go to Container Management, select your worker cluster, click Helm Apps -> Helm Chart , and choose and install gpu-operator. Modify the relevant fields in the yaml.

                                                                                    gpu-operator.sandboxWorkloads.enabled=true\ngpu-operator.vfioManager.enabled=true\ngpu-operator.sandboxDevicePlugin.enabled=true\ngpu-operator.sandboxDevicePlugin.version=v1.2.4   // version should be >= v1.2.4\ngpu-operator.toolkit.version=v1.14.3-ubuntu20.04\n
                                                                                  2. Wait for the installation to succeed, as shown in the following image:

                                                                                  "},{"location":"en/admin/virtnest/gpu/vm-gpu.html#install-virtnest-agent-and-configure-cr","title":"Install virtnest-agent and Configure CR","text":"
                                                                                  1. Install virtnest-agent, refer to Install virtnest-agent.

                                                                                  2. Add vGPU and GPU passthrough to the Virtnest Kubevirt CR. The following example shows the relevant yaml after adding vGPU and GPU passthrough:

                                                                                    spec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    permittedHostDevices: # (1)!\n      mediatedDevices:            # (2)!\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com /GRID_P4-1Q\n      pciHostDevices:             # (3)!\n      - externalResourceProvider:  true\n        pciVendorSelector: 10DE:1BB3\n        resourceName: nvidia.com /GP104GL_TESLA_P4\n
                                                                                    1. The following information needs to be filled in
                                                                                    2. vGPU
                                                                                    3. GPU passthrough
                                                                                  3. In the kubevirt CR yaml, permittedHostDevices is used to import VM devices. For vGPU, mediatedDevices needs to be added, with the following structure:

                                                                                    mediatedDevices:          \n- mdevNameSelector: GRID P4-1Q          # (1)!\n  resourceName: nvidia.com/GRID_P4-1Q   # (2)!\n
                                                                                    1. Device name
                                                                                    2. vGPU information registered by GPU Operator on the node
                                                                                  4. For GPU passthrough, pciHostDevices needs to be added under permittedHostDevices, with the following structure:

                                                                                    pciHostDevices:           \n- externalResourceProvider: true            # (1)!\n  pciVendorSelector: 10DE:1BB3              # (2)!\n  resourceName: nvidia.com/GP104GL_TESLA_P4 # (3)!\n
                                                                                    1. Do not change this by default
                                                                                    2. Vendor ID of the current PCI device
                                                                                    3. GPU information registered by GPU Operator on the node
                                                                                  5. Example of obtaining vGPU information (only applicable to vGPU): View the node information on the node marked as nvidia.com/gpu.workload.config=vm-gpu, such as work-node-2, in the Capacity section, nvidia.com/GRID_P4-1Q: 8 indicates the available vGPU:

                                                                                    kubectl describe node work-node-2\n
                                                                                    Capacity:\n  cpu:                                 64\n  devices.kubevirt.io/kvm:             1k\n  devices.kubevirt.io/tun:             1k\n  devices.kubevirt.io/vhost-net:       1k\n  ephemeral-storage:                   102626232Ki\n  hugepages-1Gi:                       0\n  hugepages-2Mi:                       0\n  memory:                              264010840Ki\n  nvidia.com/GRID_P4-1Q :              8\n  pods:                                110\nAllocatable:\n  cpu:                                  64\n  devices.kubevirt.io/kvm:              1k\n  devices.kubevirt.io/tun:              1k\n  devices.kubevirt.io/vhost-net:        1k\n  ephemeral-storage:                    94580335255\n  hugepages-1Gi:                        0\n  hugepages-2Mi:                        0\n  memory:                               263908440Ki\n  nvidia.com/GRID_P4-1Q:                8\n  pods:                                 110\n

                                                                                    In this case, the mdevNameSelector should be \"GRID P4-1Q\" and the resourceName should be \"GRID_P4-1Q\".

                                                                                  6. Get GPU passthrough information: On the node marked as nvidia.com/gpu.workload.config=vm-passthrough (work-node-1 in this example), view the node information. In the Capacity section, nvidia.com/GP104GL_TESLA_P4: 2 represents the available vGPU:

                                                                                    kubectl describe node work-node-1\n
                                                                                    Capacity:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              102626232Ki\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         264010840Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\nAllocatable:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              94580335255\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         263908440Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\n

                                                                                    The resourceName should be \"GRID_P4-1Q\". How to obtain the pciVendorSelector? Use SSH to log in to the target node work-node-1 and use the lspci -nnk -d 10de: command to obtain the Nvidia GPU PCI information, as shown below: The red box indicates the pciVendorSelector information.

                                                                                  7. Edit the kubevirt CR note: If there are multiple GPUs of the same model, you only need to write one in the CR, there is no need to list every GPU.

                                                                                    kubectl -n virtnest-system edit kubevirt kubevirt\n
                                                                                    spec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    permittedHostDevices: # (1)!\n      mediatedDevices:                    # (2)!\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com/GRID_P4-1Q\n      pciHostDevices:                     # (3)!\n      - externalResourceProvider: true\n        pciVendorSelector: 10DE:1BB3\n        resourceName: nvidia.com/GP104GL_TESLA_P4 \n

                                                                                    1. The following information needs to be filled in
                                                                                    2. vGPU
                                                                                    3. GPU passthrough; in the example above, there are two GPUs for TEESLA P4, so only one needs to be registered here
                                                                                  "},{"location":"en/admin/virtnest/gpu/vm-gpu.html#create-vm-using-yaml-and-enable-gpu-acceleration","title":"Create VM Using YAML and Enable GPU Acceleration","text":"

                                                                                  The only difference from a regular virtual machine is adding GPU-related information in the devices section.

                                                                                  Click to view the complete YAML
                                                                                  apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  name: testvm-gpu1\n  namespace: default\nspec:\n  dataVolumeTemplates:\n  - metadata:\n      creationTimestamp: null\n      name: systemdisk-testvm-gpu1\n      namespace: default\n    spec:\n      pvc:\n        accessModes:\n        - ReadWriteOnce\n        resources:\n          requests:\n            storage: 10Gi\n        storageClassName: www\n      source:\n        registry:\n          url: docker://release-ci.daocloud.io/virtnest/system-images/debian-12-x86_64:v1\nrunStrategy: Manual\ntemplate:\n    metadata:\n      creationTimestamp: null\n    spec:\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n          - bootOrder: 1\n            disk:\n              bus: virtio\n            name: systemdisk-testvm-gpu1\n          - disk:\n              bus: virtio\n            name: cloudinitdisk\n        gpus:\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n            name: gpu-0-0\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n          name: gpu-0-1\n        interfaces:\n        - masquerade: {}\n          name: default\n      machine:\n        type: q35\n      resources:\n        requests:\n          memory: 2Gi\n    networks:\n    - name: default\n      pod: {}\n    volumes:\n    - dataVolume:\n        name: systemdisk-testvm-gpu1\n      name: systemdisk-testvm-gpu1\n    - cloudInitNoCloud:\n        userDataBase64: I2Nsb3VkLWNvbmZpZwpzc2hfcHdhdXRoOiB0cnVlCmRpc2FibGVfcm9vdDogZmFsc2UKY2hwYXNzd2Q6IHsibGlzdCI6ICJyb290OmRhbmdlcm91cyIsIGV4cGlyZTogRmFsc2V9CgoKcnVuY21kOgogIC0gc2VkIC1pICIvI1w/UGVybWl0Um9vdExvZ2luL3MvXi4qJC9QZXJtaXRSb290TG9naW4geWVzL2ciIC9ldGMvc3NoL3NzaGRfY29uZmlnCiAgLSBzeXN0ZW1jdGwgcmVzdGFydCBzc2guc2VydmljZQ==\n        name: cloudinitdisk\n
                                                                                  "},{"location":"en/admin/virtnest/gpu/vm-vgpu.html","title":"Configure GPU (vGPU) for Virtual Machines","text":"

                                                                                  This page will explain the prerequisites for configuring GPU when creating a virtual machine.

                                                                                  The key to configuring GPU for virtual machines is to configure the GPU Operator to deploy different software components on the worker nodes, depending on the GPU workload configuration. Here are three example nodes:

                                                                                  • The controller-node-1 node is configured to run containers.
                                                                                  • The work-node-1 node is configured to run virtual machines with GPU passthrough.
                                                                                  • The work-node-2 node is configured to run virtual machines with virtual vGPU.
                                                                                  "},{"location":"en/admin/virtnest/gpu/vm-vgpu.html#assumptions-limitations-and-dependencies","title":"Assumptions, Limitations, and Dependencies","text":"

                                                                                  The worker nodes can run GPU-accelerated containers, virtual machines with GPU passthrough, or virtual machines with vGPU. However, a combination of any of these is not supported.

                                                                                  1. The worker nodes can run GPU-accelerated containers, virtual machines with GPU passthrough, or virtual machines with vGPU individually, but not in any combination.
                                                                                  2. The cluster administrator or developer needs to have prior knowledge of the cluster and correctly label the nodes to indicate the type of GPU workload they will run.
                                                                                  3. The worker node that runs a GPU-accelerated virtual machine with GPU passthrough or vGPU is assumed to be a bare metal machine. If the worker node is a virtual machine, the GPU passthrough feature needs to be enabled on the virtual machine platform. Please consult your virtual machine platform provider for guidance.
                                                                                  4. Nvidia MIG is not supported for vGPU.
                                                                                  5. The GPU Operator does not automatically install GPU drivers in the virtual machine.
                                                                                  "},{"location":"en/admin/virtnest/gpu/vm-vgpu.html#enable-iommu","title":"Enable IOMMU","text":"

                                                                                  To enable GPU passthrough, the cluster nodes need to have IOMMU enabled. Please refer to How to Enable IOMMU. If your cluster is running on a virtual machine, please consult your virtual machine platform provider.

                                                                                  "},{"location":"en/admin/virtnest/gpu/vm-vgpu.html#build-vgpu-manager-image","title":"Build vGPU Manager Image","text":"

                                                                                  Note: This step is only required when using NVIDIA vGPU. If you plan to use GPU passthrough only, skip this section.

                                                                                  Follow these steps to build the vGPU Manager image and push it to the container registry:

                                                                                  1. Download the vGPU software from the NVIDIA Licensing Portal.

                                                                                    • Log in to the NVIDIA Licensing Portal and go to the Software Downloads page.
                                                                                    • The NVIDIA vGPU software is located in the Driver downloads tab on the Software Downloads page.
                                                                                    • Select VGPU + Linux in the filter criteria and click Download to get the Linux KVM package. Unzip the downloaded file (NVIDIA-Linux-x86_64-<version>-vgpu-kvm.run).
                                                                                  2. Open a terminal and clone the container-images/driver repository.

                                                                                    git clone https://gitlab.com/nvidia/container-images/driver cd driver\n
                                                                                  3. Switch to the vgpu-manager directory proper to your operating system.

                                                                                    cd vgpu-manager/<your-os>\n
                                                                                  4. Copy the .run file extracted in step 1 to the current directory.

                                                                                    cp <local-driver-download-directory>/*-vgpu-kvm.run ./\n
                                                                                  5. Set the environment variables.

                                                                                    • PRIVATE_REGISTRY: The name of the private registry to store the driver image.
                                                                                    • VERSION: The version of the NVIDIA vGPU Manager, downloaded from the NVIDIA Software Portal.
                                                                                    • OS_TAG: Must match the operating system version of the cluster nodes.
                                                                                    • CUDA_VERSION: The base CUDA image version used to build the driver image.
                                                                                    export PRIVATE_REGISTRY=my/private/registry VERSION=510.73.06 OS_TAG=ubuntu22.04 CUDA_VERSION=12.2.0\n
                                                                                  6. Build the NVIDIA vGPU Manager Image.

                                                                                    docker build \\\n  --build-arg DRIVER_VERSION=${VERSION} \\\n  --build-arg CUDA_VERSION=${CUDA_VERSION} \\\n  -t ${PRIVATE_REGISTRY}``/vgpu-manager``:${VERSION}-${OS_TAG} .\n
                                                                                  7. Push the NVIDIA vGPU Manager image to your container registry.

                                                                                    docker push ${PRIVATE_REGISTRY}/vgpu-manager:${VERSION}-${OS_TAG}\n
                                                                                  "},{"location":"en/admin/virtnest/gpu/vm-vgpu.html#label-the-cluster-nodes","title":"Label the Cluster Nodes","text":"

                                                                                  Go to Container Management, select your worker cluster, click Node Management, and then click Modify Labels in the action bar to add labels to the nodes. Each node can only have one label.

                                                                                  You can assign the following values to the labels: container, vm-passthrough, and vm-vgpu.

                                                                                  "},{"location":"en/admin/virtnest/gpu/vm-vgpu.html#install-nvidia-operator","title":"Install Nvidia Operator","text":"
                                                                                  1. Go to Container Management, select your worker cluster, click Helm Apps -> Helm Chart, and choose and install gpu-operator. Modify the relevant fields in the yaml.

                                                                                    gpu-operator.sandboxWorkloads.enabled=true\ngpu-operator.vgpuManager.enabled=true\ngpu-operator.vgpuManager.repository=<your-register-url>      # (1)!\ngpu-operator.vgpuManager.image=vgpu-manager\ngpu-operator.vgpuManager.version=<your-vgpu-manager-version> # (2)!\ngpu-operator.vgpuDeviceManager.enabled=true\n
                                                                                    1. The container registry address from the \"Build vGPU Manager Image\" step.
                                                                                    2. The VERSION from the \"Build vGPU Manager Image\" step.
                                                                                  2. Wait for the installation to succeed, as shown in the following image:

                                                                                  "},{"location":"en/admin/virtnest/gpu/vm-vgpu.html#install-virtnest-agent-and-configure-cr","title":"Install virtnest-agent and Configure CR","text":"
                                                                                  1. Install virtnest-agent, refer to Install virtnest-agent.

                                                                                  2. Add vGPU and GPU passthrough to the Virtnest Kubevirt CR. The following example shows the relevant yaml after adding vGPU and GPU passthrough:

                                                                                    spec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    permittedHostDevices: # (1)!\n      mediatedDevices:            # (2)!\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com /GRID_P4-1Q\n      pciHostDevices:             # (3)!\n      - externalResourceProvider:  true\n        pciVendorSelector: 10DE:1BB3\n        resourceName: nvidia.com /GP104GL_TESLA_P4\n
                                                                                    1. The following information needs to be filled in
                                                                                    2. vGPU
                                                                                    3. GPU passthrough
                                                                                  3. In the kubevirt CR yaml, permittedHostDevices is used to import VM devices. For vGPU, mediatedDevices needs to be added, with the following structure:

                                                                                    mediatedDevices:          \n- mdevNameSelector: GRID P4-1Q          # (1)!\n  resourceName: nvidia.com/GRID_P4-1Q   # (2)!\n
                                                                                    1. Device name
                                                                                    2. vGPU information registered by the GPU Operator on the node
                                                                                  4. For GPU passthrough, pciHostDevices needs to be added under permittedHostDevices, with the following structure:

                                                                                    pciHostDevices:           \n- externalResourceProvider: true            # (1)!\n  pciVendorSelector: 10DE:1BB3              # (2)!\n  resourceName: nvidia.com/GP104GL_TESLA_P4 # (3)!\n
                                                                                    1. Do not change this by default
                                                                                    2. Vendor ID of the current PCI device
                                                                                    3. GPU information registered by the GPU Operator on the node
                                                                                  5. Example of obtaining vGPU information (only applicable to vGPU): View the node information on the node marked as nvidia.com/gpu.workload.config=vm-gpu, such as work-node-2, in the Capacity section, nvidia.com/GRID_P4-1Q: 8 indicates the available vGPU:

                                                                                    kubectl describe node work-node-2\n
                                                                                    Capacity:\n  cpu:                                 64\n  devices.kubevirt.io/kvm:             1k\n  devices.kubevirt.io/tun:             1k\n  devices.kubevirt.io/vhost-net:       1k\n  ephemeral-storage:                   102626232Ki\n  hugepages-1Gi:                       0\n  hugepages-2Mi:                       0\n  memory:                              264010840Ki\n  nvidia.com/GRID_P4-1Q :              8\n  pods:                                110\nAllocatable:\n  cpu:                                  64\n  devices.kubevirt.io/kvm:              1k\n  devices.kubevirt.io/tun:              1k\n  devices.kubevirt.io/vhost-net:        1k\n  ephemeral-storage:                    94580335255\n  hugepages-1Gi:                        0\n  hugepages-2Mi:                        0\n  memory:                               263908440Ki\n  nvidia.com/GRID_P4-1Q:                8\n  pods:                                 110\n

                                                                                    In this case, the mdevNameSelector should be \"GRID P4-1Q\" and the resourceName should be \"GRID_P4-1Q\".

                                                                                  6. Get GPU passthrough information: On the node marked as nvidia.com/gpu.workload.config=vm-passthrough (work-node-1 in this example), view the node information. In the Capacity section, nvidia.com/GP104GL_TESLA_P4: 2 represents the available vGPU:

                                                                                    kubectl describe node work-node-1\n
                                                                                    Capacity:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              102626232Ki\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         264010840Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\nAllocatable:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              94580335255\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         263908440Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\n

                                                                                    The resourceName should be \"GRID_P4-1Q\". How to obtain the pciVendorSelector? SSH into the target node work-node-1 and use the lspci -nnk -d 10de: command to obtain the Nvidia GPU PCI information, as shown below: The red box indicates the pciVendorSelector information.

                                                                                  7. Edit the kubevirt CR note: If there are multiple GPUs of the same model, you only need to write one in the CR, there is no need to list every GPU.

                                                                                    kubectl -n virtnest-system edit kubevirt kubevirt\n
                                                                                    spec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    permittedHostDevices: # (1)!\n      mediatedDevices:                    # (2)!\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com/GRID_P4-1Q\n      pciHostDevices:                       # (3)!\n      - externalResourceProvider: true\n        pciVendorSelector: 10DE:1BB3\n        resourceName: nvidia.com/GP104GL_TESLA_P4 \n

                                                                                    1. The following information needs to be filled in
                                                                                    2. vGPU
                                                                                    3. GPU passthrough; in the example above, there are two GPUs for TEESLA P4, so only one needs to be registered here
                                                                                  "},{"location":"en/admin/virtnest/gpu/vm-vgpu.html#create-vm-using-yaml-and-enable-gpu-acceleration","title":"Create VM Using YAML and Enable GPU Acceleration","text":"

                                                                                  The only difference from a regular virtual machine is adding the gpu-related information in the devices section.

                                                                                  Click to view the complete YAML
                                                                                  apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  name: testvm-gpu1\n  namespace: default\nspec:\n  dataVolumeTemplates:\n  - metadata:\n      creationTimestamp: null\n      name: systemdisk-testvm-gpu1\n      namespace: default\n    spec:\n      pvc:\n        accessModes:\n        - ReadWriteOnce\n        resources:\n          requests:\n            storage: 10Gi\n        storageClassName: www\n      source:\n        registry:\n          url: docker://release-ci.daocloud.io/virtnest/system-images/debian-12-x86_64:v1\nrunStrategy: Manual\ntemplate:\n    metadata:\n      creationTimestamp: null\n    spec:\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n          - bootOrder: 1\n            disk:\n              bus: virtio\n            name: systemdisk-testvm-gpu1\n          - disk:\n              bus: virtio\n            name: cloudinitdisk\n        gpus:\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n            name: gpu-0-0\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n          name: gpu-0-1\n        interfaces:\n        - masquerade: {}\n          name: default\n      machine:\n        type: q35\n      resources:\n        requests:\n          memory: 2Gi\n    networks:\n    - name: default\n      pod: {}\n    volumes:\n    - dataVolume:\n        name: systemdisk-testvm-gpu1\n      name: systemdisk-testvm-gpu1\n    - cloudInitNoCloud:\n        userDataBase64: I2Nsb3VkLWNvbmZpZwpzc2hfcHdhdXRoOiB0cnVlCmRpc2FibGVfcm9vdDogZmFsc2UKY2hwYXNzd2Q6IHsibGlzdCI6ICJyb290OmRhbmdlcm91cyIsIGV4cGlyZTogRmFsc2V9CgoKcnVuY21kOgogIC0gc2VkIC1pICIvI1w/UGVybWl0Um9vdExvZ2luL3MvXi4qJC9QZXJtaXRSb290TG9naW4geWVzL2ciIC9ldGMvc3NoL3NzaGRfY29uZmlnCiAgLSBzeXN0ZW1jdGwgcmVzdGFydCBzc2guc2VydmljZQ==\n        name: cloudinitdisk\n
                                                                                  "},{"location":"en/admin/virtnest/install/index.html","title":"Install Virtual Machine Module","text":"

                                                                                  This page explains how to install the virtual machine module.

                                                                                  Info

                                                                                  The term virtnest appearing in the following commands or scripts is the internal development code name for the Virtual Machine module.

                                                                                  "},{"location":"en/admin/virtnest/install/index.html#configure-helm-repo","title":"Configure Helm Repo","text":"

                                                                                  Helm-charts repository address: https://release.daocloud.io/harbor/projects/10/helm-charts/virtnest/versions

                                                                                  helm repo add virtnest-release https://release.daocloud.io/chartrepo/virtnest\nhelm repo update virtnest-release\n

                                                                                  If you want to experience the latest development version of virtnest, then please add the following repository address (the development version of virtnest is extremely unstable).

                                                                                  helm repo add virtnest-release-ci https://release-ci.daocloud.io/chartrepo/virtnest\nhelm repo update virtnest-release-ci\n
                                                                                  "},{"location":"en/admin/virtnest/install/index.html#choose-a-version-that-you-want-to-install","title":"Choose a Version that You Want to Install","text":"

                                                                                  It is recommended to install the latest version.

                                                                                  [root@master ~]# helm search repo virtnest-release/virtnest --versions\nNAME                   CHART VERSION  APP VERSION  DESCRIPTION\nvirtnest-release/virtnest  0.6.0          v0.6.0       A Helm chart for virtnest\n
                                                                                  "},{"location":"en/admin/virtnest/install/index.html#create-a-namespace","title":"Create a Namespace","text":"
                                                                                  kubectl create namespace virtnest-system\n
                                                                                  "},{"location":"en/admin/virtnest/install/index.html#perform-installation-steps","title":"Perform Installation Steps","text":"
                                                                                  helm install virtnest virtnest-release/virtnest -n virtnest-system --version 0.6.0\n
                                                                                  "},{"location":"en/admin/virtnest/install/index.html#upgrade","title":"Upgrade","text":""},{"location":"en/admin/virtnest/install/index.html#update-the-virtnest-helm-repository","title":"Update the virtnest Helm Repository","text":"
                                                                                  helm repo update virtnest-release\n
                                                                                  "},{"location":"en/admin/virtnest/install/index.html#back-up-the-set-parameters","title":"Back up the --set Parameters","text":"

                                                                                  Before upgrading the virtnest version, we recommend executing the following command to backup the --set parameters of the previous version

                                                                                  helm get values virtnest -n virtnest-system -o yaml > bak.yaml\n
                                                                                  "},{"location":"en/admin/virtnest/install/index.html#perform-helm-upgrade","title":"Perform Helm Upgrade","text":"
                                                                                  helm upgrade virtnest virtnest-release/virtnest \\\n    -n virtnest-system \\\n    -f ./bak.yaml \\\n    --version 0.6.0\n
                                                                                  "},{"location":"en/admin/virtnest/install/index.html#delete","title":"Delete","text":"
                                                                                  helm delete virtnest -n virtnest-system\n
                                                                                  "},{"location":"en/admin/virtnest/install/install-dependency.html","title":"Dependencies and Prerequisites","text":"

                                                                                  This page explains the dependencies and prerequisites for installing virtual machine.

                                                                                  Info

                                                                                  The term virtnest mentioned in the commands or scripts below is the internal development codename for the Global Management module.

                                                                                  "},{"location":"en/admin/virtnest/install/install-dependency.html#prerequisites","title":"Prerequisites","text":""},{"location":"en/admin/virtnest/install/install-dependency.html#kernel-version-being-above-v411","title":"Kernel version being above v4.11","text":"

                                                                                  The kernel version of all nodes in the target cluster needs to be higher than v4.11. For detail information, see kubevirt issue. Run the following command to see the version:

                                                                                  uname -a\n

                                                                                  Example output:

                                                                                  Linux master 6.5.3-1.el7.elrepo.x86_64 #1 SMP PREEMPT_DYNAMIC Wed Sep 13 11:46:28 EDT 2023 x86_64 x86_64 x86_64 GNU/Linux\n
                                                                                  "},{"location":"en/admin/virtnest/install/install-dependency.html#cpu-supporting-x86-64-v2-instruction-set-or-higher","title":"CPU supporting x86-64-v2 instruction set or higher","text":"

                                                                                  You can use the following script to check if the current node's CPU is usable:

                                                                                  Note

                                                                                  If you encounter a message like the one shown below, you can safely ignore it as it does not impact the final result.

                                                                                  \u793a\u4f8b
                                                                                  $ sh detect-cpu.sh\ndetect-cpu.sh: line 3: fpu: command not found\n
                                                                                  cat <<EOF > detect-cpu.sh\n#!/bin/sh -eu\n\nflags=$(cat /proc/cpuinfo | grep flags | head -n 1 | cut -d: -f2)\n\nsupports_v2='awk \"/cx16/&&/lahf/&&/popcnt/&&/sse4_1/&&/sse4_2/&&/ssse3/ {found=1} END {exit !found}\"'\nsupports_v3='awk \"/avx/&&/avx2/&&/bmi1/&&/bmi2/&&/f16c/&&/fma/&&/abm/&&/movbe/&&/xsave/ {found=1} END {exit !found}\"'\nsupports_v4='awk \"/avx512f/&&/avx512bw/&&/avx512cd/&&/avx512dq/&&/avx512vl/ {found=1} END {exit !found}\"'\n\necho \"$flags\" | eval $supports_v2 || exit 2 && echo \"CPU supports x86-64-v2\"\necho \"$flags\" | eval $supports_v3 || exit 3 && echo \"CPU supports x86-64-v3\"\necho \"$flags\" | eval $supports_v4 || exit 4 && echo \"CPU supports x86-64-v4\"\nEOF\nchmod +x detect-cpu.sh\nsh detect-cpu.sh\n
                                                                                  "},{"location":"en/admin/virtnest/install/install-dependency.html#all-nodes-having-hardware-virtualization-nested-virtualization-enabled","title":"All Nodes having hardware virtualization (nested virtualization) enabled","text":"
                                                                                  • Run the following command to check if it has been achieved:

                                                                                    virt-host-validate qemu\n
                                                                                    # Successful case\nQEMU: Checking for hardware virtualization                                 : PASS\nQEMU: Checking if device /dev/kvm exists                                   : PASS\nQEMU: Checking if device /dev/kvm is accessible                            : PASS\nQEMU: Checking if device /dev/vhost-net exists                             : PASS\nQEMU: Checking if device /dev/net/tun exists                               : PASS\nQEMU: Checking for cgroup 'cpu' controller support                         : PASS\nQEMU: Checking for cgroup 'cpuacct' controller support                     : PASS\nQEMU: Checking for cgroup 'cpuset' controller support                      : PASS\nQEMU: Checking for cgroup 'memory' controller support                      : PASS\nQEMU: Checking for cgroup 'devices' controller support                     : PASS\nQEMU: Checking for cgroup 'blkio' controller support                       : PASS\nQEMU: Checking for device assignment IOMMU support                         : PASS\nQEMU: Checking if IOMMU is enabled by kernel                               : PASS\nQEMU: Checking for secure guest support                                    : WARN (Unknown if this platform has Secure Guest support)\n\n# Failure case\nQEMU: Checking for hardware virtualization                                 : FAIL (Only emulated CPUs are available, performance will be significantly limited)\nQEMU: Checking if device /dev/vhost-net exists                             : PASS\nQEMU: Checking if device /dev/net/tun exists                               : PASS\nQEMU: Checking for cgroup 'memory' controller support                      : PASS\nQEMU: Checking for cgroup 'memory' controller mount-point                  : PASS\nQEMU: Checking for cgroup 'cpu' controller support                         : PASS\nQEMU: Checking for cgroup 'cpu' controller mount-point                     : PASS\nQEMU: Checking for cgroup 'cpuacct' controller support                     : PASS\nQEMU: Checking for cgroup 'cpuacct' controller mount-point                 : PASS\nQEMU: Checking for cgroup 'cpuset' controller support                      : PASS\nQEMU: Checking for cgroup 'cpuset' controller mount-point                  : PASS\nQEMU: Checking for cgroup 'devices' controller support                     : PASS\nQEMU: Checking for cgroup 'devices' controller mount-point                 : PASS\nQEMU: Checking for cgroup 'blkio' controller support                       : PASS\nQEMU: Checking for cgroup 'blkio' controller mount-point                   : PASS\nWARN (Unknown if this platform has IOMMU support)\n
                                                                                  • Install virt-host-validate:

                                                                                    On CentOSOn Ubuntu
                                                                                    yum install -y qemu-kvm libvirt virt-install bridge-utils\n
                                                                                    apt install qemu-kvm libvirt-daemon-system libvirt-clients bridge-utils\n
                                                                                  • Methods to enable hardware virtualization

                                                                                    Methods vary from platforms, and this page takes vsphere as an example. See vmware website.

                                                                                  "},{"location":"en/admin/virtnest/install/install-dependency.html#if-using-docker-engine-as-the-container-runtime","title":"If using Docker Engine as the container runtime","text":"

                                                                                  If Docker Engine is used as the container runtime, it must be higher than v20.10.10.

                                                                                  "},{"location":"en/admin/virtnest/install/install-dependency.html#enabling-iommu-is-recommended","title":"Enabling IOMMU is recommended","text":"

                                                                                  To prepare for future functions, it is recommended to enable IOMMU.

                                                                                  "},{"location":"en/admin/virtnest/install/offline-install.html","title":"Offline Upgrade of the Virtual Machine Module","text":"

                                                                                  This page explains how to install or upgrade the Virtual Machine module after downloading it from the Download Center.

                                                                                  Info

                                                                                  The term \"virtnest\" appearing in the following commands or scripts is the internal development code name for the Virtual Machine module.

                                                                                  "},{"location":"en/admin/virtnest/install/offline-install.html#load-images-from-the-installation-package","title":"Load Images from the Installation Package","text":"

                                                                                  You can load the images using one of the following two methods. When there is an container registry available in your environment, it is recommended to choose the chart-syncer method for synchronizing the images to the container registry, as it is more efficient and convenient.

                                                                                  "},{"location":"en/admin/virtnest/install/offline-install.html#synchronize-images-to-the-container-registry-using-chart-syncer","title":"Synchronize Images to the container registry using chart-syncer","text":"
                                                                                  1. Create load-image.yaml file.

                                                                                    Note

                                                                                    All parameters in this YAML file are mandatory. You need a private container registry and modify the relevant configurations.

                                                                                    Chart Repo InstalledChart Repo Not Installed

                                                                                    If the chart repo is already installed in your environment, chart-syncer also supports exporting the chart as a tgz file.

                                                                                    load-image.yaml
                                                                                    source:\n  intermediateBundlesPath: virtnest-offline # (1)\ntarget:\n  containerRegistry: 10.16.10.111 # (2)\n  containerRepository: release.daocloud.io/virtnest # (3)\n  repo:\n    kind: HARBOR # (4)\n    url: http://10.16.10.111/chartrepo/release.daocloud.io # (5)\n    auth:\n      username: \"admin\" # (6)\n      password: \"Harbor12345\" # (7)\n  containers:\n    auth:\n      username: \"admin\" # (8)\n      password: \"Harbor12345\" # (9)\n
                                                                                    1. The relative path to run the charts-syncer command, not the relative path between this YAML file and the offline package.
                                                                                    2. Change to your container registry URL.
                                                                                    3. Change to your container registry.
                                                                                    4. It can also be any other supported Helm Chart repository type.
                                                                                    5. Change to the chart repo URL.
                                                                                    6. Your container registry username.
                                                                                    7. Your container registry password.
                                                                                    8. Your container registry username.
                                                                                    9. Your container registry password.

                                                                                    If the chart repo is not installed in your environment, chart-syncer also supports exporting the chart as a tgz file and storing it in the specified path.

                                                                                    load-image.yaml
                                                                                    source:\n  intermediateBundlesPath: virtnest-offline # (1)\ntarget:\n  containerRegistry: 10.16.10.111 # (2)\n  containerRepository: release.daocloud.io/virtnest # (3)\n  repo:\n    kind: LOCAL\n    path: ./local-repo # (4)\n  containers:\n    auth:\n      username: \"admin\" # (5)\n      password: \"Harbor12345\" # (6)\n
                                                                                    1. The relative path to run the charts-syncer command, not the relative path between this YAML file and the offline package.
                                                                                    2. Change to your container registry URL.
                                                                                    3. Change to your container registry.
                                                                                    4. Local path of the chart.
                                                                                    5. Your container registry username.
                                                                                    6. Your container registry password.
                                                                                  2. Run the command to synchronize the images.

                                                                                    charts-syncer sync --config load-image.yaml\n
                                                                                  "},{"location":"en/admin/virtnest/install/offline-install.html#load-images-directly-using-docker-or-containerd","title":"Load Images Directly using Docker or containerd","text":"

                                                                                  Unpack and load the image files.

                                                                                  1. Unpack the tar archive.

                                                                                    tar xvf virtnest.bundle.tar\n

                                                                                    After successful extraction, you will have three files:

                                                                                    • hints.yaml
                                                                                    • images.tar
                                                                                    • original-chart
                                                                                  2. Load the images from the local file to Docker or containerd.

                                                                                    Dockercontainerd
                                                                                    docker load -i images.tar\n
                                                                                    ctr -n k8s.io image import images.tar\n

                                                                                  Note

                                                                                  Perform the Docker or containerd image loading operation on each node. After loading is complete, tag the images to match the Registry and Repository used during installation.

                                                                                  "},{"location":"en/admin/virtnest/install/offline-install.html#upgrade","title":"Upgrade","text":"

                                                                                  There are two upgrade methods available. You can choose the appropriate upgrade method based on the prerequisites:

                                                                                  Upgrade via helm repoUpgrade via chart package
                                                                                  1. Check if the Virtual Machine Helm repository exists.

                                                                                    helm repo list | grep virtnest\n

                                                                                    If the result is empty or shows the following message, proceed to the next step. Otherwise, skip the next step.

                                                                                    Error: no repositories to show\n
                                                                                  2. Add the Virtual Machine Helm repository.

                                                                                    helm repo add virtnest http://{harbor url}/chartrepo/{project}\n
                                                                                  3. Update the Virtual Machine Helm repository.

                                                                                    helm repo update virtnest # (1)\n
                                                                                    1. If the helm version is too low, it may fail. If it fails, try executing helm update repo.
                                                                                  4. Choose the version of the Virtual Machine you want to install (it is recommended to install the latest version).

                                                                                    helm search repo virtnest/virtnest --versions\n
                                                                                    [root@master ~]# helm search repo virtnest/virtnest --versions\nNAME                   CHART VERSION  APP VERSION  DESCRIPTION\nvirtnest/virtnest  0.2.0          v0.2.0       A Helm chart for virtnest\n...\n
                                                                                  5. Back up the --set parameters.

                                                                                    Before upgrading the Virtual Machine version, it is recommended to run the following command to backup the --set parameters of the previous version.

                                                                                    helm get values virtnest -n virtnest-system -o yaml > bak.yaml\n
                                                                                  6. Update the virtnest CRDs.

                                                                                    helm pull virtnest/virtnest --version 0.2.0 && tar -zxf virtnest-0.2.0.tgz\nkubectl apply -f virtnest/crds\n
                                                                                  7. Run helm upgrade.

                                                                                    Before upgrading, it is recommended to replace the global.imageRegistry field in bak.yaml with the current container registry address.

                                                                                    export imageRegistry={your container registry}\n
                                                                                    helm upgrade virtnest virtnest/virtnest \\\n  -n virtnest-system \\\n  -f ./bak.yaml \\\n  --set global.imageRegistry=$imageRegistry \\\n  --version 0.2.0\n
                                                                                  1. Back up the --set parameters.

                                                                                    Before upgrading the Virtual Machine version, it is recommended to run the following command to backup the --set parameters of the previous version.

                                                                                    helm get values virtnest -n virtnest-system -o yaml > bak.yaml\n
                                                                                  2. Update the virtnest CRDs.

                                                                                    kubectl apply -f ./crds\n
                                                                                  3. Run helm upgrade.

                                                                                    Before upgrading, it is recommended to replace the global.imageRegistry field in bak.yaml with the current container registry address.

                                                                                    export imageRegistry={your container registry}\n
                                                                                    helm upgrade virtnest . \\\n  -n virtnest-system \\\n  -f ./bak.yaml \\\n  --set global.imageRegistry=$imageRegistry\n
                                                                                  "},{"location":"en/admin/virtnest/install/virtnest-agent.html","title":"Install virtnest-agent in a Cluster","text":"

                                                                                  This guide explains how to install the virtnest-agent in a cluster.

                                                                                  "},{"location":"en/admin/virtnest/install/virtnest-agent.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before installing the virtnest-agent, the following prerequisite must be met:

                                                                                  • The kernel version needs to be v4.11 or above.
                                                                                  "},{"location":"en/admin/virtnest/install/virtnest-agent.html#steps","title":"Steps","text":"

                                                                                  To utilize the Virtual Machine (VM), the virtnest-agent component needs to be installed in the cluster using Helm.

                                                                                  1. Click Container Management in the left navigation menu, then click Virtual Machines . If the virtnest-agent component is not installed, you will not be able to use the VM. The interface will display a reminder for you to install within the required cluster.

                                                                                  2. Select the desired cluster, click Helm Apps in the left navigation menu, then click Helm Charts to view the template list.

                                                                                  3. Search for the virtnest-agent component, and click to the see details. Select the appropriate version and click Install button to install.

                                                                                  4. On the installation page, fill in the required information, and click OK to finish the installation.

                                                                                  5. Go back to the Virtual Machines in the navigation menu. If the installation is successful, you will see the VM list, and you can now use the VM.

                                                                                  "},{"location":"en/admin/virtnest/quickstart/index.html","title":"Create Virtual Machine","text":"

                                                                                  This article will explain how to create a virtual machine using two methods: image and YAML file.

                                                                                  Virtual machine, based on KubeVirt, manages virtual machines as cloud native applications, seamlessly integrating with containers. This allows users to easily deploy virtual machine applications and enjoy a smooth experience similar to containerized applications.

                                                                                  "},{"location":"en/admin/virtnest/quickstart/index.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before creating a virtual machine, make sure you meet the following prerequisites:

                                                                                  • Expose hardware-assisted virtualization to the user operating system.
                                                                                  • Install virtnest-agent on the specified cluster; the operating system kernel version must be 3.15 or higher.
                                                                                  • Create a namespace and user.
                                                                                  • Prepare the image in advance. The platform comes with three built-in images (as shown below). If you need to create your own image, refer to creating from an image with KubeVirt.
                                                                                  • When configuring the network, if you choose to use the Passt network mode, you need to upgrade to Version 0.4.0 or higher.
                                                                                  "},{"location":"en/admin/virtnest/quickstart/index.html#create-image","title":"Create image","text":"

                                                                                  Follow the steps below to create a virtual machine using an image.

                                                                                  1. Click Container Management on the left navigation bar, then click Virtual Machines to enter the VM page.

                                                                                  2. On the virtual machine list page, click Create VMs and select Create with Image.

                                                                                  3. Fill the basic information, image settings, storage and network, login settings, and click OK at the bottom right corner to complete the creation.

                                                                                    The system will automatically return to the virtual machine list. By clicking the \u2507 button on the right side of the list, you can perform operations such as power on/off, restart, clone, update, create snapshots, console access (VNC), and delete virtual machines. Cloning and snapshot capabilities depend on the selected StorageClass.

                                                                                  "},{"location":"en/admin/virtnest/quickstart/index.html#basic-information","title":"Basic Information","text":"

                                                                                  In the Create VMs page, enter the information according to the table below and click Next.

                                                                                  • Name: Up to 63 characters, can only contain lowercase letters, numbers, and hyphens ( - ), and must start and end with a lowercase letter or number. The name must be unique within the namespace, and cannot be changed once the virtual machine is created.
                                                                                  • Alias: Allows any characters, up to 60 characters.
                                                                                  • Cluster: Select the cluster to deploy the newly created virtual machine.
                                                                                  • Namespace: Select the namespace to deploy the newly created virtual machine. If the desired namespace is not found, you can create a new namespace according to the prompts on the page.
                                                                                  • Label/Annotation: Select the desired labels/annotations to add to the virtual machine.
                                                                                  "},{"location":"en/admin/virtnest/quickstart/index.html#image-settings","title":"Image Settings","text":"

                                                                                  Fill in the image-related information according to the table below, then click Next.

                                                                                  1. Image Source: Supports three types of sources.

                                                                                    • Registry: Images stored in the container registry. You can select images from the registry as needed.
                                                                                    • HTTP: Images stored in a file server using the HTTP protocol, supporting both HTTPS:// and HTTP:// prefixes.
                                                                                    • Object Storage (S3): Virtual machine images obtained through the object storage protocol (S3). For non-authenticated object storage files, please use the HTTP source.
                                                                                  2. The following are the built-in images provided by the platform, including the operating system, version, and the image URL. Custom virtual machine images are also supported.

                                                                                    Operating System Version Image Address CentOS CentOS 7.9 release-ci.daocloud.io/virtnest/system-images/centos-7.9-x86_64:v1 Ubuntu Ubuntu 22.04 release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1 Debian Debian 12 release-ci.daocloud.io/virtnest/system-images/debian-12-x86_64:v1
                                                                                  3. Image Secret: Only supports the default (Opaque) type of key, for specific operations you can refer to Create Secret.

                                                                                    The built-in image storage in the bootstrap cluster, and the container registry of the bootstrap cluster is not encrypted, so when selecting the built-in image, there is no need to select a secret.

                                                                                  Note

                                                                                  The hot-plug configuration for CPU and memory requires virtnest v0.10.0 or higher, and virtnest-agent v0.7.0 or higher.

                                                                                  1. Resource Config: For CPU, it is recommended to use whole numbers. If a decimal is entered, it will be rounded up. The hot-plug configuration for CPU and memory is supported.

                                                                                  2. GPU Configuration: Enabling GPU functionality requires meeting certain prerequisites. For details, refer to Configuring GPU for Virtual Machines (Nvidia). Virtual machines support two types of Nvidia GPUs: Nvidia-GPU and Nvidia-vGPU. After selecting the desired type, you will need to choose the proper GPU model and the number of cards.

                                                                                  "},{"location":"en/admin/virtnest/quickstart/index.html#storage-and-network","title":"Storage and Network","text":"
                                                                                  • Storage:

                                                                                    • Storage is closely related to the function of the virtual machine. Mainly by using Kubernetes' persistent volumes and storage classes, it provides flexible and scalable virtual machine storage capabilities. For example, the virtual machine image is stored in the PVC, and it supports cloning, snapshotting, etc. with other data.

                                                                                    • System Disk: The system automatically creates a VirtIO type rootfs system disk for storing the operating system and data.

                                                                                    • Data Disk: The data disk is a storage device in the virtual machine used to store user data, application data, or other non-operating system related files. Compared with the system disk, the data disk is optional and can be dynamically added or removed as needed. The capacity of the data disk can also be flexibly configured according to demand.

                                                                                    • Block storage is used by default. If you need to use the clone and snapshot functions, make sure that your storage pool has created the proper VolumeSnapshotClass, which you can refer to the following example. If you need to use the live migration function, make sure your storage supports and selects the ReadWriteMany access mode.

                                                                                      In most cases, the storage will not automatically create such a VolumeSnapshotClass during the installation process, so you need to manually create a VolumeSnapshotClass. The following is an example of HwameiStor creating a VolumeSnapshotClass:

                                                                                      kind: VolumeSnapshotClass\napiVersion: snapshot.storage.k8s.io/v1\nmetadata:\n  name: hwameistor-storage-lvm-snapshot\n  annotations:\n    snapshot.storage.kubernetes.io/is-default-class: \"true\"\nparameters:\n  snapsize: \"1073741824\"\ndriver: lvm.hwameistor.io\ndeletionPolicy: Delete\n
                                                                                    • Run the following command to check if the VolumeSnapshotClass was created successfully.

                                                                                      kubectl get VolumeSnapshotClass\n
                                                                                    • View the created Snapshotclass and confirm that the provisioner property is consistent with the Driver property in the storage pool.

                                                                                  • Network:

                                                                                    • Network setting can be combined as needed according to the table information.

                                                                                      Network Mode CNI Install Spiderpool Network Cards Fixed IP Live Migration Masquerade (NAT) Calico \u274c Single \u274c \u2705 Cilium \u274c Single \u274c \u2705 Flannel \u274c Single \u274c \u2705 Bridge OVS \u2705 Multiple \u2705 \u2705

                                                                                    • Network modes are divided into Masquerade (NAT) and Bridge, the latter mode need to be installed after the spiderpool component can be used.

                                                                                      • The network mode of Masquerade (NAT) is selected by default, using the default network card eth0.
                                                                                      • If the spiderpool component is installed in the cluster, you can choose the Bridge mode, and the Bridge mode supports multiple NICs.

                                                                                    • Add Network Card

                                                                                      • Passthrough / Bridge mode supports manual addition of network cards. Click Add NIC to configure the network card IP pool. Choose the Multus CR that matches the network mode, if not, you need to create it yourself.
                                                                                      • If you turn on the Use Default IP Pool switch, use the default IP pool in the multus CR setting. If the switch is off, manually select the IP pool.

                                                                                  "},{"location":"en/admin/virtnest/quickstart/index.html#login-settings","title":"Login Settings","text":"
                                                                                  • Username/Password: Allows login to the virtual machine using a username and password.
                                                                                  • SSH: When selecting the SSH login method, you can bind an SSH key to the virtual machine for future login.
                                                                                  "},{"location":"en/admin/virtnest/quickstart/index.html#create-with-yaml","title":"Create with YAML","text":"

                                                                                  In addition to creating virtual machines using images, you can also create them more quickly using YAML files.

                                                                                  Go to the Virtual Machine list page and click the Create with YAML button.

                                                                                  Click to view an example YAML for creating a virtual machine
                                                                                  apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  name: example\n  namespace: default\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        name: systemdisk-example\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Gi\n          storageClassName: rook-ceph-block\n        source:\n          registry:\n            url: >-\n              docker://release-ci.daocloud.io/virtnest/system-images/centos-7.9-x86_64:v1\n  runStrategy: Always\n  template:\n    spec:\n      domain:\n        cpu:\n          cores: 1\n        devices:\n          disks:\n            - disk:\n                bus: virtio\n              name: systemdisk-example\n            - disk:\n                bus: virtio\n              name: cloudinitdisk\n          interfaces:\n            - masquerade: {}\n              name: default\n        machine:\n          type: q35\n        resources:\n          requests:\n            memory: 1Gi\n      networks:\n        - name: default\n          pod: {}\n      volumes:\n        - dataVolume:\n            name: systemdisk-example\n          name: systemdisk-example\n
                                                                                  "},{"location":"en/admin/virtnest/quickstart/access.html","title":"Connect to Virtual Machines","text":"

                                                                                  This article will introduce two methods for connecting to virtual machines: Console Access (VNC) and Terminal Access.

                                                                                  "},{"location":"en/admin/virtnest/quickstart/access.html#terminal-access","title":"Terminal Access","text":"

                                                                                  Accessing virtual machines through the terminal provides more flexibility and lightweight access. However, it does not directly display the graphical interface, has limited interactivity, and does not support multiple concurrent terminal sessions.

                                                                                  Click Container Management in the left navigation bar, then click Virtual Machines to access the list page. Click the \u2507 button on the right side of the list to access the virtual machine via the terminal.

                                                                                  "},{"location":"en/admin/virtnest/quickstart/access.html#console-access-vnc","title":"Console Access (VNC)","text":"

                                                                                  Accessing virtual machines through VNC allows you to access and control the full graphical interface of the remote computer. It provides a more interactive experience and allows intuitive operation of the remote device. However, it may have some performance impact, and it does not support multiple concurrent terminal sessions.

                                                                                  Choose VNC for Windows systems.

                                                                                  Click Container Management in the left navigation bar, then click Virtual Machines to access the list page. Click the \u2507 button on the right side of the list to access the virtual machine via Console Access (VNC).

                                                                                  "},{"location":"en/admin/virtnest/quickstart/detail.html","title":"Virtual Machine Details","text":"

                                                                                  After successfully creating a virtual machine, you can enter the VM Detail page to view Basic Information, Settings, GPU Settings, Overview, Storage, Network, Snapshot and Event List.

                                                                                  Click Container Management in the left navigation bar, then click Clusters to enter the page of the cluster where the virtual machine is located. Click the VM Name to view the virtual machine details.

                                                                                  "},{"location":"en/admin/virtnest/quickstart/detail.html#basic-information","title":"Basic Information","text":"

                                                                                  The basic information of VM includes Status, Alias, Cluster, Namespace, IP, Label, Annotation, Node, Username, Password, and Create Time.

                                                                                  • Status: The current running state of the virtual machine (Running / Processing / Power Off / Error).
                                                                                  • IP: The IP of the virtual machine. For virtual machines with multiple network interfaces, multiple IP will be assigned.
                                                                                  "},{"location":"en/admin/virtnest/quickstart/detail.html#settings-gpu-settings","title":"Settings & GPU Settings","text":"

                                                                                  Settings includes:

                                                                                  • Operating System: The operating system installed on the virtual machine to execute programs.
                                                                                  • Image Address: A link to a virtual hard disk file or operating system installation media, which is used to load and install the operating system in the virtual machine software.
                                                                                  • Network Mode: The network mode configured for the virtual machine, including Bridge or Masquerade(NAT).
                                                                                  • CPU & Memory: The resources allocated to the virtual machine.

                                                                                  GPU Settings includes: GPU Type, GPU Model and GPU Counts

                                                                                  "},{"location":"en/admin/virtnest/quickstart/detail.html#other-information","title":"Other Information","text":"OverviewStorageNetworkSnapshotsEvent List

                                                                                  It allows you to view its insight content. Please note that if insight-agent is not installed, overview information cannot be obtained.

                                                                                  It displays the storage used by the virtual machine, including information about the system disk and data disk.

                                                                                  It displays the network settings of the virtual machine, including Multus CR, NIC Name, IP Address and so on.

                                                                                  If you have created snapshots, this part will display relative information. Restoring the virtual machine from snapshots is supported.

                                                                                  The event list includes various state changes, operation records, and system messages during the lifecycle of the virtual machine.

                                                                                  "},{"location":"en/admin/virtnest/quickstart/nodeport.html","title":"Accessing Virtual Machine via NodePort","text":"

                                                                                  This page explains how to access a virtual machine using NodePort.

                                                                                  "},{"location":"en/admin/virtnest/quickstart/nodeport.html#limitations-of-existing-access-methods","title":"Limitations of Existing Access Methods","text":"
                                                                                  1. Virtual machines support access via VNC or console, but both methods have a limitation: they do not allow multiple terminals to be simultaneously online.

                                                                                  2. Using a NodePort-formatted Service can help solve this problem.

                                                                                  "},{"location":"en/admin/virtnest/quickstart/nodeport.html#create-a-service","title":"Create a Service","text":"
                                                                                  1. Using the Container Management Page

                                                                                    • Select the cluster page where the target virtual machine is located and create a Service.
                                                                                    • Select the access type as NodePort.
                                                                                    • Choose the namespace (the namespace where the virtual machine resides).
                                                                                    • Fill in the label selector as vm.kubevirt.io/name: your-vm-name.
                                                                                    • Port Configuration: Choose TCP for the protocol, provide a custom port name, and set the service port and container port to 22.
                                                                                  2. After successful creation, you can access the virtual machine by using ssh username@nodeip -p port.

                                                                                  "},{"location":"en/admin/virtnest/quickstart/nodeport.html#create-the-service-via-kubectl","title":"Create the Service via kubectl","text":"
                                                                                  1. Write the YAML file as follows:

                                                                                    apiVersion: v1\nkind: Service\nmetadata:\n  name: test-ssh\nspec:\n  ports:\n  - name: tcp-ssh\n    nodePort: 32090\n    protocol: TCP\n    port: 22\n    targetPort: 22\n  selector:\n    vm.kubevirt.io/name: test-image-s3\n  type: NodePort\n
                                                                                  2. Run the following command:

                                                                                    kubectl apply -f your-svc.yaml\n
                                                                                  3. After successful creation, you can access the virtual machine by using ssh username@nodeip -p 32090.

                                                                                  "},{"location":"en/admin/virtnest/quickstart/update.html","title":"Update Virtual Machine","text":"

                                                                                  This page is about how to update a virtual machine using both forms and YAML files.

                                                                                  "},{"location":"en/admin/virtnest/quickstart/update.html#prerequisite","title":"Prerequisite","text":"

                                                                                  Before updating the CPU, memory, and data disks of the VM while it is powered on, the following prerequisite must be met:

                                                                                  • Live migration is supported.
                                                                                  "},{"location":"en/admin/virtnest/quickstart/update.html#update-virtual-machine-via-form","title":"Update Virtual Machine via Form","text":"

                                                                                  On the virtual machine list page, click Update to enter the Update VM page.

                                                                                  "},{"location":"en/admin/virtnest/quickstart/update.html#basic-information","title":"Basic Information","text":"

                                                                                  On this page, Alias , Label and Annotation can be updated, while other information cannot. After completing the updates, click Next to proceed to the Image Settings page.

                                                                                  "},{"location":"en/admin/virtnest/quickstart/update.html#image-settings","title":"Image Settings","text":"

                                                                                  On this page, parameters such as Image Address, Operating System, and Version cannot be changed once selected. Users are allowed to update the GPU Quota, including enabling or disabling GPU support, selecting the GPU type, specifying the required model, and configuring the number of GPU. A restart is required for taking effect. After completing the updates, click Next to proceed to the Storage and Network page.

                                                                                  "},{"location":"en/admin/virtnest/quickstart/update.html#storage-and-network","title":"Storage and Network","text":"

                                                                                  On the Storage and Network page, the StorageClass and PVC Mode for the System Disk cannot be changed once selected. You can increase Disk Capacity, but reducing it is not supported. And you can freely add or remove Data Disk. Network updates are not supported. After completing the updates, click Next to proceed to the Login Settings page.

                                                                                  Note

                                                                                  It is recommended to restart the virtual machine after modifying storage capacity or adding data disks to ensure the configuration takes effect.

                                                                                  "},{"location":"en/admin/virtnest/quickstart/update.html#login-settings","title":"Login Settings","text":"

                                                                                  On the Login Settings page, Username, Password, and SSH cannot be changed once set. After confirming your login information is correct, click OK to complete the update process.

                                                                                  "},{"location":"en/admin/virtnest/quickstart/update.html#edit-yaml","title":"Edit YAML","text":"

                                                                                  In addition to updating the virtual machine via forms, you can also quickly update it using a YAML file.

                                                                                  Go to the virtual machine list page and click the Edit YAML button.

                                                                                  "},{"location":"en/admin/virtnest/template/index.html","title":"Create Virtual Machines via Templates","text":"

                                                                                  This guide explains how to create virtual machines using templates.

                                                                                  With internal templates and custom templates, users can easily create new virtual machines. Additionally, we provide the ability to convert existing virtual machines into templates, allowing users to manage and utilize resources more flexibly.

                                                                                  "},{"location":"en/admin/virtnest/template/index.html#create-with-template","title":"Create with Template","text":"

                                                                                  Follow these steps to create a virtual machine using a template.

                                                                                  1. Click Container Management in the left navigation menu, then click Virtual Machines to access the Virtual Machine Management page. On the virtual machine list page, click Create Virtual Machine and select Create with Template .

                                                                                  2. On the template creation page, fill in the required information, including Basic Information, Template Config, Storage and Network, and Login Settings. Then, click OK in the bottom-right corner to complete the creation.

                                                                                    The system will automatically return to the virtual machine list. By clicking \u2507 on the right side of the list, you can perform operations such as power off/restart, clone, update, create snapshot, convert to template, console access (VNC), and delete. The ability to clone and create snapshots depends on the selected storage pool.

                                                                                  "},{"location":"en/admin/virtnest/template/index.html#basic-information","title":"Basic Information","text":"

                                                                                  On the Create VMs page, enter the information according to the table below and click Next .

                                                                                  • Name: Can contain up to 63 characters and can only include lowercase letters, numbers, and hyphens ( - ). The name must start and end with a lowercase letter or number. Names must be unique within the same namespace, and the name cannot be changed after the virtual machine is created.
                                                                                  • Alias: Can include any characters, up to 60 characters in length.
                                                                                  • Cluster: Select the cluster where the new virtual machine will be deployed.
                                                                                  • Namespace: Select the namespace where the new virtual machine will be deployed. If the desired namespace is not found, you can follow the instructions on the page to create a new namespace.
                                                                                  "},{"location":"en/admin/virtnest/template/index.html#template-config","title":"Template Config","text":"

                                                                                  The template list will appear, and you can choose either an internal template or a custom template based on your needs.

                                                                                  • Select an Internal Template: AI platform Virtual Machine provides several standard templates that cannot be edited or deleted. When selecting an internal template, the image source, operating system, image address, and other information will be based on the template and cannot be modified. GPU quota will also be based on the template but can be modified.

                                                                                  • Select a Custom Template: These templates are created from virtual machine configurations and can be edited or deleted. When using a custom template, you can modify the image source and other information based on your specific requirements.

                                                                                  "},{"location":"en/admin/virtnest/template/index.html#storage-and-network","title":"Storage and Network","text":"
                                                                                  • Storage: By default, the system creates a rootfs system disk of VirtIO type for storing the operating system and data. Block storage is used by default. If you need to use clone and snapshot functionality, make sure your storage pool supports the VolumeSnapshots feature and create it in the storage pool (SC). Please note that the storage pool (SC) has additional prerequisites that need to be met.

                                                                                    • Prerequisites:

                                                                                      • KubeVirt utilizes the VolumeSnapshot feature of the Kubernetes CSI driver to capture the persistent state of virtual machines. Therefore, you need to ensure that your virtual machine uses a StorageClass that supports VolumeSnapshots and is configured with the correct VolumeSnapshotClass.
                                                                                      • Check the created SnapshotClass and confirm that the provisioner property matches the Driver property in the storage pool.
                                                                                    • Supports adding one system disk and multiple data disks.

                                                                                  • Network: If no configuration is made, the system will create a VirtIO type network by default.

                                                                                  "},{"location":"en/admin/virtnest/template/index.html#login-settings","title":"Login Settings","text":"
                                                                                  • Username/Password: You can log in to the virtual machine using a username and password.
                                                                                  • SSH: When selecting SSH login, you can bind an SSH key to the virtual machine for future login purposes.
                                                                                  "},{"location":"en/admin/virtnest/template/tep.html","title":"VM Template","text":"

                                                                                  This guide explains the usage of internal VM templates and custom VM templates.

                                                                                  Using both internal and custom templates, users can easily create new VMs. Additionally, we provide the ability to convert existing VMs into VM templates, allowing users to manage and utilize resources more flexibly.

                                                                                  "},{"location":"en/admin/virtnest/template/tep.html#vm-templates","title":"VM Templates","text":"
                                                                                  1. Click Container Management in the left navigation menu, then click VM Template to access the VM Template page. If the template is converted from a virtual machine configured with a GPU, the template will also include GPU information and will be displayed in the template list.

                                                                                  2. Click the \u2507 on the right side of a template in the list. For internal templates, you can create VM and view YAML. For custom templates, you can create VM, edit YAML and delete template.

                                                                                  "},{"location":"en/admin/virtnest/template/tep.html#internal-template","title":"Internal Template","text":"
                                                                                  • The platform provides CentOS and Ubuntu as templates.

                                                                                  "},{"location":"en/admin/virtnest/template/tep.html#custom-template","title":"Custom Template","text":"

                                                                                  Custom templates are created from VM configurations. The following steps explain how to convert a VM configuration into a template.

                                                                                  1. Click Container Management in the left navigation menu, then click Virtual Machines to access the list page. Click the \u2507 on the right side of a VM in the list to convert the configuration into a template. Only running or stopped VMs can be converted.

                                                                                  2. Provide a name for the new template. A notification will indicate that the original VM will be preserved and remain available. After a successful conversion, a new entry will be added to the template list.

                                                                                  "},{"location":"en/admin/virtnest/template/tep.html#template-details","title":"Template Details","text":"

                                                                                  After successfully creating a template, you can click the template name to view the details of the VM, including Basic Information, GPU Settings, Storage, Network, and more. If you need to quickly deploy a new VM based on that template, simply click the Create VM button in the upper right corner of the page for easy operation.

                                                                                  "},{"location":"en/admin/virtnest/vm/auto-migrate.html","title":"Automatic VM Drifting","text":"

                                                                                  This article will explain how to seamlessly migrate running virtual machines to other nodes when a node in the cluster becomes inaccessible due to power outages or network failures, ensuring business continuity and data security.

                                                                                  Compared to automatic drifting, live migration requires you to manually initiate the migration process through the interface, rather than having the system automatically trigger it.

                                                                                  "},{"location":"en/admin/virtnest/vm/auto-migrate.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before implementing automatic drifting, the following prerequisites must be met:

                                                                                  • The virtual machine has not performed disk commit operations, or is using Rook-ceph or HwameiStor HA as the storage system.
                                                                                  • The node has been unreachable for more than five minutes.
                                                                                  • Ensure there are at least two available nodes in the cluster, and the virtual machine has not specified a scheduling node.
                                                                                  • The virtual machine's launcher pod has been deleted.
                                                                                  "},{"location":"en/admin/virtnest/vm/auto-migrate.html#steps","title":"Steps","text":"
                                                                                  1. Check the status of the virtual machine launcher pod:

                                                                                    kubectl get pod\n

                                                                                    Check if the launcher pod is in a Terminating state.

                                                                                  2. Force delete the launcher pod:

                                                                                    If the launcher pod is in a Terminating state, you can force delete it with the following command:

                                                                                    kubectl delete <launcher pod> --force\n

                                                                                    Replace <launcher pod> with the name of your launcher pod.

                                                                                  3. Wait for recreation and check the status:

                                                                                    After deletion, the system will automatically recreate the launcher pod. Wait for its status to become running, then refresh the virtual machine list to see if the VM has successfully migrated to the new node.

                                                                                  "},{"location":"en/admin/virtnest/vm/auto-migrate.html#notes","title":"Notes","text":"

                                                                                  If using rook-ceph as storage, it needs to be configured in ReadWriteOnce mode:

                                                                                  1. After force deleting the pod, you need to wait approximately six minutes for the launcher pod to start, or you can immediately start the pod using the following commands:

                                                                                    kubectl get pv | grep <vm name>\nkubectl get VolumeAttachment | grep <pv name>\n

                                                                                    Replace <vm name> and <pv name> with your virtual machine name and persistent volume name.

                                                                                  2. Then delete the proper VolumeAttachment with the following command:

                                                                                    kubectl delete VolumeAttachment <vm>\n

                                                                                    Replace <vm> with your virtual machine name.

                                                                                  "},{"location":"en/admin/virtnest/vm/clone.html","title":"Cloning a Cloud Host","text":"

                                                                                  This article will introduce how to clone a new cloud host.

                                                                                  Users can clone a new cloud host, which will have the same operating system and system configuration as the original cloud host. This enables quick deployment and scaling, allowing for the rapid creation of new cloud hosts with similar configurations without the need to install from scratch.

                                                                                  "},{"location":"en/admin/virtnest/vm/clone.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before using the cloning feature, the following prerequisites must be met (which are the same as those for the snapshot feature):

                                                                                  • Only cloud hosts that are not in an error state can use the cloning feature.
                                                                                  • Install Snapshot CRDs, Snapshot Controller, and CSI Driver. For specific installation steps, refer to CSI Snapshotter.
                                                                                  • Wait for the snapshot-controller component to be ready. This component will monitor events related to VolumeSnapshot and VolumeSnapshotContent and trigger related operations.
                                                                                  • Wait for the CSI Driver to be ready, ensuring that the csi-snapshotter sidecar is running in the CSI Driver. The csi-snapshotter sidecar will monitor events related to VolumeSnapshotContent and trigger related operations.
                                                                                    • If the storage is Rook-Ceph, refer to ceph-csi-snapshot
                                                                                    • If the storage is HwameiStor, refer to huameistor-snapshot
                                                                                  "},{"location":"en/admin/virtnest/vm/clone.html#cloning-a-cloud-host_1","title":"Cloning a Cloud Host","text":"
                                                                                  1. Click__Container Management__ in the left navigation bar, then click__Cloud Hosts__ to enter the list page. Clickthe \u2507 on the right side of the list to perform snapshot operations on cloud hosts that are not in an error state.

                                                                                  2. A popup will appear, requiring you to fill in the name and description for the new cloud host being cloned. The cloning operation may take some time, depending on the size of the cloud host and storage performance.

                                                                                  3. After a successful clone, you can view the new cloud host in the cloud host list. The newly created cloud host will be in a powered-off state and will need to be manually powered on if required.

                                                                                  4. It is recommended to take a snapshot of the original cloud host before cloning. If you encounter issues during the cloning process, please check whether the prerequisites are met and try to execute the cloning operation again.

                                                                                  "},{"location":"en/admin/virtnest/vm/create-secret.html","title":"Create Secret","text":"

                                                                                  When creating a virtual machine using Object Storage (S3) as the image source, sometimes you need to fill in a secret to get through S3's verification. The following will introduce how to create a secret that meets the requirements of the virtual machine.

                                                                                  1. Click Container Management in the left navigation bar, then click Clusters , enter the details of the cluster where the virtual machine is located, click ConfigMaps & Secrets , select the Secrets , and click Create Secret .

                                                                                  2. Enter the creation page, fill in the secret name, select the namespace that is the same as the virtual machine, and note that you need to select the default type Opaque . The secret data needs to follow the following principles.

                                                                                    • accessKeyId: Data represented in Base64 encoding
                                                                                    • secretKey: Data represented in Base64 encoding

                                                                                  3. After successful creation, you can use the required secret when creating a virtual machine.

                                                                                  "},{"location":"en/admin/virtnest/vm/cross-cluster-migrate.html","title":"Migrate VM across Clusters","text":"

                                                                                  This feature currently does not have a UI, so you can follow the steps in the documentation.

                                                                                  "},{"location":"en/admin/virtnest/vm/cross-cluster-migrate.html#use-cases","title":"Use Cases","text":"
                                                                                  • A VM needs to be migrated to another cluster when the original cluster experiences a failure or performance degradation that makes the VM inaccessible.
                                                                                  • A VM needs to be migrated to another cluster when perform planned maintenance or upgrades on the cluster.
                                                                                  • A VM needs to be migrated to another cluster to match more appropriate resource configurations when the performance requirements of specific applications change and resource allocation needs to be adjusted.
                                                                                  "},{"location":"en/admin/virtnest/vm/cross-cluster-migrate.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before performing migration of a VM across cluster, the following prerequisites must be met:

                                                                                  • Cluster network connectivity: Ensure that the network between the original cluster and the target migration cluster is accessible.
                                                                                  • Same storage type: The target migration cluster must support the same storage type as the original cluster. For example, if the exporting cluster uses rook-ceph-block type StorageClass, the importing cluster must also support this type.
                                                                                  • Enable VMExport Feature Gate in KubeVirt of the original cluster.
                                                                                  "},{"location":"en/admin/virtnest/vm/cross-cluster-migrate.html#enable-vmexport-feature-gate","title":"Enable VMExport Feature Gate","text":"

                                                                                  To activate the VMExport Feature Gate, run the following command in the original cluster. You can refer to How to activate a feature gate

                                                                                  kubectl edit kubevirt kubevirt -n virtnest-system\n

                                                                                  This command modifies the featureGates to include VMExport.

                                                                                  apiVersion: kubevirt.io/v1\nkind: KubeVirt\nmetadata:\n  name: kubevirt\n  namespace: virtnest-system\nspec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n        - DataVolumes\n        - LiveMigration\n        - VMExport\n
                                                                                  "},{"location":"en/admin/virtnest/vm/cross-cluster-migrate.html#configure-ingress-for-the-original-cluster","title":"Configure Ingress for the Original Cluster","text":"

                                                                                  Using Nginx Ingress as an example, configure Ingress to point to the virt-exportproxy Service:

                                                                                  apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: ingress-vm-export\n  namespace: virtnest-system\nspec:\n  tls:\n    - hosts:\n        - upgrade-test.com\n      secretName: nginx-tls\n  rules:\n    - host: upgrade-test.com\n      http:\n        paths:\n          - path: /\n            pathType: Prefix\n            backend:\n              service:\n                name: virt-exportproxy\n                port:\n                  number: 8443\n  ingressClassName: nginx\n
                                                                                  "},{"location":"en/admin/virtnest/vm/cross-cluster-migrate.html#migration-steps","title":"Migration Steps","text":"
                                                                                  1. Create a VirtualMachineExport CR.

                                                                                    • If cold migration is performed while the VM is powered off :

                                                                                      apiVersion: v1\nkind: Secret\nmetadata:\n  name: example-token # Export Token used by the VM\n  namespace: default # Namespace where the VM resides\nstringData:\n  token: 1234567890ab # Export the used Token (Modifiable)\n\n---\napiVersion: export.kubevirt.io/v1alpha1\nkind: VirtualMachineExport\nmetadata:\n  name: example-export # Export name (Modifiable)\n  namespace: default # Namespace where the VM resides\nspec:\n  tokenSecretRef: example-token # Must match the name of the token created above\n  source:\n    apiGroup: \"kubevirt.io\"\n    kind: VirtualMachine\n    name: testvm # VM name\n
                                                                                    • If hot migration is performed using a VM snapshot while the VM is powered on :

                                                                                      apiVersion: v1\nkind: Secret\nmetadata:\n  name: example-token # Export Token used by VM\n  namespace: default # Namespace where the VM resides\nstringData:\n  token: 1234567890ab # Export the used Token (Modifiable)\n\n---\napiVersion: export.kubevirt.io/v1alpha1\nkind: VirtualMachineExport\nmetadata:\n  name: export-snapshot # Export name (Modifiable)\n  namespace: default # Namespace where the VM resides\nspec:\n  tokenSecretRef: export-token # Must match the name of the token created above\n  source:\n    apiGroup: \"snapshot.kubevirt.io\"\n    kind: VirtualMachineSnapshot\n    name: export-snap-202407191524 # Name of the proper VM snapshot\n
                                                                                  2. Check if the VirtualMachineExport is ready:

                                                                                    # Replace example-export with the name of the created VirtualMachineExport\nkubectl get VirtualMachineExport example-export -n default\n\nNAME             SOURCEKIND       SOURCENAME   PHASE\nexample-export   VirtualMachine   testvm       Ready\n
                                                                                  3. Once the VirtualMachineExport is ready, export the VM YAML.

                                                                                    • If virtctl is installed, you can use the following command to export the VM YAML:

                                                                                      # Replace example-export with the name of the created VirtualMachineExport\n# Specify the namespace with -n\nvirtctl vmexport download example-export --manifest --include-secret --output=manifest.yaml\n
                                                                                    • If virtctl is not installed, you can use the following commands to export the VM YAML:

                                                                                      # Replace example-export with the name and  namespace of the created VirtualMachineExport\nmanifesturl=$(kubectl get VirtualMachineExport example-export -n default -o=jsonpath='{.status.links.internal.manifests[0].url}')\nsecreturl=$(kubectl get VirtualMachineExport example-export -n default -o=jsonpath='{.status.links.internal.manifests[1].url}')\n# Replace with the secret name and namespace\ntoken=$(kubectl get secret example-token -n default -o=jsonpath='{.data.token}' | base64 -d)\n\ncurl -H \"Accept: application/yaml\" -H \"x-kubevirt-export-token: $token\"  --insecure  $secreturl > manifest.yaml\ncurl -H \"Accept: application/yaml\" -H \"x-kubevirt-export-token: $token\"  --insecure  $manifesturl >> manifest.yaml\n
                                                                                  4. Import VM.

                                                                                    Copy the exported manifest.yaml to the target migration cluster and run the following command.(If the namespace does not exist, it need to be created in advance) :

                                                                                    kubectl apply -f manifest.yaml\n
                                                                                    After successfully creating a VM, you need to restart it. Once the VM is running successfully, the original VM need to be deleted in the original cluster (Do not delete the original VM if it has not started successfully).

                                                                                  "},{"location":"en/admin/virtnest/vm/health-check.html","title":"Health Check","text":"

                                                                                  When configuring the liveness and readiness probes for a cloud host, the process is similar to that of Kubernetes configuration. This article will introduce how to configure health check parameters for a cloud host using YAML.

                                                                                  However, it is important to note that the configuration must be done when the cloud host has been successfully created and is in a powered-off state.

                                                                                  "},{"location":"en/admin/virtnest/vm/health-check.html#configuring-http-liveness-probe","title":"Configuring HTTP Liveness Probe","text":"
                                                                                  1. Configure livenessProbe.httpGet in spec.template.spec.
                                                                                  2. Modify cloudInitNoCloud to start an HTTP server.

                                                                                    Click to view YAML example
                                                                                    apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n    virtnest.io/alias-name: ''\n    virtnest.io/image-secret: ''\n    virtnest.io/image-source: docker\n    virtnest.io/os-image: release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  creationTimestamp: '2024-10-15T02:39:45Z'\n  finalizers:\n    - kubevirt.io/virtualMachineControllerFinalize\n  generation: 1\n  labels:\n    virtnest.io/os-family: Ubuntu\n    virtnest.io/os-version: '22.04'\n  name: test-probe\n  namespace: amamba-team\n  resourceVersion: '254032135'\n  uid: 6d92779d-7415-4721-8c7b-a2dde163d758\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        creationTimestamp: null\n        name: test-probe-rootdisk\n        namespace: amamba-team\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Gi\n          storageClassName: hwameistor-storage-lvm-hdd\n        source:\n          registry:\n            url: >-\n          docker://release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  runStrategy: Halted\n  template:\n    metadata:\n      creationTimestamp: null\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio\n              name: rootdisk\n            - disk:\n                bus: virtio\n              name: cloudinitdisk\n          interfaces:\n            - masquerade: {}\n              name: default\n        machine:\n          type: q35\n        memory:\n          guest: 2Gi\n        resources:\n          requests:\n            memory: 2Gi\n      networks:\n        - name: default\n          pod: {}\n      livenessProbe:\n        initialDelaySeconds: 120\n        periodSeconds: 20\n        httpGet:\n          port: 1500\n        timeoutSeconds: 10\n      volumes:\n        - dataVolume:\n            name: test-probe-rootdisk\n          name: rootdisk\n        - cloudInitNoCloud:\n            userData: |\n              #cloud-config\n              ssh_pwauth: true\n              disable_root: false\n              chpasswd: {\"list\": \"root:dangerous\", expire: False}\n              runcmd:\n                - sed -i \"/#\\?PermitRootLogin/s/^.*$/PermitRootLogin yes/g\" /etc/ssh/sshd_config\n                - systemctl restart ssh.service\n                - dhclient -r && dhclient\n                - apt-get update && apt-get install -y ncat\n                - [\"systemd-run\", \"--unit=httpserver\", \"ncat\", \"-klp\", \"1500\", \"-e\", '/usr/bin/echo -e HTTP/1.1 200 OK\\nContent-Length: 12\\n\\nHello World!']\n          name: cloudinitdisk\n
                                                                                  3. The configuration of userData may vary depending on the operating system (such as Ubuntu/Debian or CentOS). The main differences are:

                                                                                    • Package manager:

                                                                                      Ubuntu/Debian uses apt-get as the package manager. CentOS uses yum as the package manager.

                                                                                    • SSH service restart command:

                                                                                      Ubuntu/Debian uses systemctl restart ssh.service. CentOS uses systemctl restart sshd.service (note that for CentOS 7 and earlier versions, it uses service sshd restart).

                                                                                    • Installed packages:

                                                                                      Ubuntu/Debian installs ncat. CentOS installs nmap-ncat (because ncat may not be available in the default repository for CentOS).

                                                                                  "},{"location":"en/admin/virtnest/vm/health-check.html#configuring-tcp-liveness-probe","title":"Configuring TCP Liveness Probe","text":"

                                                                                  Configure livenessProbe.tcpSocket in spec.template.spec.

                                                                                  Click to view YAML example configuration
                                                                                  apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n    virtnest.io/alias-name: ''\n    virtnest.io/image-secret: ''\n    virtnest.io/image-source: docker\n    virtnest.io/os-image: release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  creationTimestamp: '2024-10-15T02:39:45Z'\n  finalizers:\n    - kubevirt.io/virtualMachineControllerFinalize\n  generation: 1\n  labels:\n    virtnest.io/os-family: Ubuntu\n    virtnest.io/os-version: '22.04'\n  name: test-probe\n  namespace: amamba-team\n  resourceVersion: '254032135'\n  uid: 6d92779d-7415-4721-8c7b-a2dde163d758\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        creationTimestamp: null\n        name: test-probe-rootdisk\n        namespace: amamba-team\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Gi\n          storageClassName: hwameistor-storage-lvm-hdd\n        source:\n          registry:\n            url: >-\n          docker://release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  runStrategy: Halted\n  template:\n    metadata:\n      creationTimestamp: null\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio\n              name: rootdisk\n            - disk:\n                bus: virtio\n              name: cloudinitdisk\n          interfaces:\n            - masquerade: {}\n              name: default\n        machine:\n          type: q35\n        memory:\n          guest: 2Gi\n        resources:\n          requests:\n            memory: 2Gi\n      networks:\n        - name: default\n          pod: {}\n      livenessProbe:\n        initialDelaySeconds: 120\n        periodSeconds: 20\n        tcpSocket:\n          port: 1500\n        timeoutSeconds: 10\n      volumes:\n        - dataVolume:\n            name: test-probe-rootdisk\n          name: rootdisk\n        - cloudInitNoCloud:\n            userData: |\n              #cloud-config\n              ssh_pwauth: true\n              disable_root: false\n              chpasswd: {\"list\": \"root:dangerous\", expire: False}\n              runcmd:\n                - sed -i \"/#\\?PermitRootLogin/s/^.*$/PermitRootLogin yes/g\" /etc/ssh/sshd_config\n                - systemctl restart ssh.service\n                - dhclient -r && dhclient\n                - apt-get update && apt-get install -y ncat\n                - [\"systemd-run\", \"--unit=httpserver\", \"ncat\", \"-klp\", \"1500\", \"-e\", '/usr/bin/echo -e HTTP/1.1 200 OK\\nContent-Length: 12\\n\\nHello World!']\n          name: cloudinitdisk\n
                                                                                  "},{"location":"en/admin/virtnest/vm/health-check.html#configuring-readiness-probes","title":"Configuring Readiness Probes","text":"

                                                                                  Configure readiness in spec.template.spec.

                                                                                  Click to view YAML example configuration
                                                                                  apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  annotations:\n    kubevirt.io/latest-observed-api-version: v1\n    kubevirt.io/storage-observed-api-version: v1\n    virtnest.io/alias-name: ''\n    virtnest.io/image-secret: ''\n    virtnest.io/image-source: docker\n    virtnest.io/os-image: release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  creationTimestamp: '2024-10-15T02:39:45Z'\n  finalizers:\n    - kubevirt.io/virtualMachineControllerFinalize\n  generation: 1\n  labels:\n    virtnest.io/os-family: Ubuntu\n    virtnest.io/os-version: '22.04'\n  name: test-probe\n  namespace: amamba-team\n  resourceVersion: '254032135'\n  uid: 6d92779d-7415-4721-8c7b-a2dde163d758\nspec:\n  dataVolumeTemplates:\n    - metadata:\n        creationTimestamp: null\n        name: test-probe-rootdisk\n        namespace: amamba-team\n      spec:\n        pvc:\n          accessModes:\n            - ReadWriteOnce\n          resources:\n            requests:\n              storage: 10Gi\n          storageClassName: hwameistor-storage-lvm-hdd\n        source:\n          registry:\n            url: >-\n          docker://release-ci.daocloud.io/virtnest/system-images/ubuntu-22.04-x86_64:v1\n  runStrategy: Halted\n  template:\n    metadata:\n      creationTimestamp: null\n    spec:\n      architecture: amd64\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n            - bootOrder: 1\n              disk:\n                bus: virtio\n              name: rootdisk\n            - disk:\n                bus: virtio\n              name: cloudinitdisk\n          interfaces:\n            - masquerade: {}\n              name: default\n        machine:\n          type: q35\n        memory:\n          guest: 2Gi\n        resources:\n          requests:\n            memory: 2Gi\n      networks:\n        - name: default\n          pod: {}\n      readiness:\n        initialDelaySeconds: 120\n        periodSeconds: 20\n        httpGet:\n          port: 1500\n        timeoutSeconds: 10\n      volumes:\n        - dataVolume:\n            name: test-probe-rootdisk\n          name: rootdisk\n        - cloudInitNoCloud:\n            userData: |\n              #cloud-config\n              ssh_pwauth: true\n              disable_root: false\n              chpasswd: {\"list\": \"root:dangerous\", expire: False}\n              runcmd:\n                - sed -i \"/#\\?PermitRootLogin/s/^.*$/PermitRootLogin yes/g\" /etc/ssh/sshd_config\n                - systemctl restart ssh.service\n                - dhclient -r && dhclient\n                - apt-get update && apt-get install -y ncat\n                - [\"systemd-run\", \"--unit=httpserver\", \"ncat\", \"-klp\", \"1500\", \"-e\", '/usr/bin/echo -e HTTP/1.1 200 OK\\nContent-Length: 12\\n\\nHello World!']\n          name: cloudinitdisk\n
                                                                                  "},{"location":"en/admin/virtnest/vm/live-migration.html","title":"Live Migration","text":"

                                                                                  This article will explain how to migrate a virtual machine from one node to another.

                                                                                  When a node needs maintenance or upgrades, users can seamlessly migrate running virtual machines to other nodes while ensuring business continuity and data security.

                                                                                  "},{"location":"en/admin/virtnest/vm/live-migration.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before using live migration, the following prerequisites must be met:

                                                                                  • Only running virtual machines can use the live migration feature.
                                                                                  • If you need to use live migration, make sure that your PVC access mode is ReadWriteMany.
                                                                                  • The current cluster must have at least 2 usable nodes.
                                                                                  • When using the feature of live migration, Masquerade or Bridge can be selected as the network mode.
                                                                                  "},{"location":"en/admin/virtnest/vm/live-migration.html#live-migration_1","title":"Live Migration","text":"
                                                                                  1. Click Container Management on the left navigation bar, then click Virtual Machines to enter the list page. Click \u2507 on the right side of the list to migrate running virtual machines. Currently, the virtual machine is on the node controller-node-1 .

                                                                                  2. A pop-up box will appear, indicating that during live migration, the running virtual machine instances will be migrated to another node, but the target node cannot be predetermined. Please ensure that other nodes have sufficient resources.

                                                                                  3. After a successful migration, you can view the node information in the virtual machine list. At this time, the node has been migrated to controller-node-2 .

                                                                                  "},{"location":"en/admin/virtnest/vm/migratiom.html","title":"Cold Migration within the Cluster","text":"

                                                                                  This article will introduce how to move a cloud host from one node to another within the same cluster while it is powered off.

                                                                                  The main feature of cold migration is that the cloud host will be offline during the migration process, which may impact business continuity. Therefore, careful planning of the migration time window is necessary, taking into account business needs and system availability. Typically, cold migration is suitable for scenarios where downtime requirements are not very strict.

                                                                                  "},{"location":"en/admin/virtnest/vm/migratiom.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before using cold migration, the following prerequisites must be met:

                                                                                  • The cloud host must be powered off to perform cold migration.
                                                                                  "},{"location":"en/admin/virtnest/vm/migratiom.html#cold-migration","title":"Cold Migration","text":"
                                                                                  1. Click__Container Management__ in the left navigation bar, then click__Cloud Hosts__ to enter the list page. Clickthe \u2507 on the right side of the list to initiate the migration action for the cloud host that is in a powered-off state. The current node of the cloud host cannot be viewed while it is powered off, so prior planning or checking while powered on is required.

                                                                                    Note

                                                                                    If you have used local-path in the storage pool of the original node, there may be issues during cross-node migration. Please choose carefully.

                                                                                  2. After clicking migrate, a prompt will appear allowing you to choose to migrate to a specific node or randomly. If you need to change the storage pool, ensure that there is an available storage pool in the target node. Also, ensure that the target node has sufficient resources. The migration process may take a significant amount of time, so please be patient.

                                                                                  3. The migration will take some time, so please be patient. After it is successful, you need to restart the cloud host to check if the migration was successful. This example has already powered on the cloud host to check the migration effect.

                                                                                  "},{"location":"en/admin/virtnest/vm/monitor.html","title":"Virtual Machine Monitoring","text":"

                                                                                  The virtual machine's monitoring is based on the Grafana Dashboard open-sourced by Kubevirt, which generates monitoring dashboards for each virtual machine.

                                                                                  Monitoring information of the virtual machine can provide better insights into the resource consumption of the virtual machine, such as CPU, memory, storage, and network resource usage. These information can help optimize and plan resources, improving overall resource utilization efficiency.

                                                                                  "},{"location":"en/admin/virtnest/vm/monitor.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before viewing the virtual machine monitoring information, the following prerequisites need to be met:

                                                                                  • The insight-agent component needs to be installed in the same cluster where the virtual machine is located.
                                                                                  "},{"location":"en/admin/virtnest/vm/monitor.html#virtual-machine-monitoring_1","title":"Virtual Machine Monitoring","text":"

                                                                                  Navigate to the VM Detail page and click Overview to view the monitoring content of the virtual machine. Please note that without the insight-agent component installed, monitoring information cannot be obtained. Below are the detailed information:

                                                                                  • Total CPU, CPU Usage, Memory Total, Memory Usage.

                                                                                  • CPU Utilisation: the percentage of CPU resources currently used by the virtual machine;

                                                                                  • Memory Utilisation: the percentage of memory resources currently used by the virtual machine out of the total available memory.

                                                                                  • Network Traffic by Virtual Machines: the amount of network data sent and received by the virtual machine during a specific time period;

                                                                                  • Network Packet Loss Rate: the proportion of lost data packets during data transmission out of the total sent data packets.

                                                                                  • Network Packet Error Rate: the rate of errors that occur during network transmission;

                                                                                  • Storage Traffic: the speed and capacity at which the virtual machine system reads and writes to the disk within a certain time period.

                                                                                  • Storage IOPS: the number of input/output operations the virtual machine system performs in one second.

                                                                                  • Storage Delay: the time delay experienced by the virtual machine system when performing disk read and write operations.

                                                                                  "},{"location":"en/admin/virtnest/vm/scheduled-snapshot.html","title":"Scheduled Snapshot","text":"

                                                                                  This article introduces how to create snapshots for VMs on a schedule.

                                                                                  You can create scheduled snapshots for VMs, providing continuous protection for data and ensuring effective data recovery in case of data loss, corruption, or deletion.

                                                                                  "},{"location":"en/admin/virtnest/vm/scheduled-snapshot.html#steps","title":"Steps","text":"
                                                                                  1. In the left navigation bar, click Container Management -> Clusters to select the proper cluster where the target VM is located. After entering the cluster, click Workloads -> CronJobs, and choose Create from YAML to create a scheduled task. Refer to the following YAML example to create snapshots for the specified VM on a schedule.

                                                                                    Click to view the YAML example for creating a scheduled task
                                                                                    apiVersion: batch/v1\nkind: CronJob\nmetadata:\n  name: xxxxx-xxxxx-cronjob # Scheduled task name (Customizable)\n  namespace: virtnest-system # Do not modify the namespace\nspec:\n  schedule: \"5 * * * *\" # Modify the scheduled task execution interval as needed\n  concurrencyPolicy: Allow\n  suspend: false\n  successfulJobsHistoryLimit: 10\n  failedJobsHistoryLimit: 3\n  startingDeadlineSeconds: 60\n  jobTemplate:\n    spec:\n      template:\n        metadata:\n          labels:\n            virtnest.io/vm: xxxx # Modify to the name of the VM that needs to be snapshotted\n            virtnest.io/namespace: xxxx # Modify to the namespace where the VM is located\n        spec:\n          serviceAccountName: kubevirt-operator\n          containers:\n            - name: snapshot-job\n              image: release.daocloud.io/virtnest/tools:v0.1.5 # For offline environments, modify the registry address to the proper registry address of the cluster\n              imagePullPolicy: IfNotPresent\n              env:\n                - name: NS\n                  valueFrom:\n                    fieldRef:\n                      fieldPath: metadata.labels['virtnest.io/namespace']\n                - name: VM\n                  valueFrom:\n                    fieldRef:\n                      fieldPath: metadata.labels['virtnest.io/vm']\n              command:\n                - /bin/sh\n                - -c\n                - |\n                  export SUFFIX=$(date +\"%Y%m%d-%H%M%S\")\n                  cat <<EOF | kubectl apply -f -\n                  apiVersion: snapshot.kubevirt.io/v1alpha1\n                  kind: VirtualMachineSnapshot\n                  metadata:\n                    name: $(VM)-snapshot-$SUFFIX\n                    namespace: $(NS)\n                  spec:\n                    source:\n                      apiGroup: kubevirt.io\n                      kind: VirtualMachine\n                      name: $(VM)\n                  EOF\n          restartPolicy: OnFailure\n
                                                                                  2. After creating the scheduled task and running it successfully, you can click Virtual Machines in the list page to select the target VM. After entering the details, you can view the snapshot list.

                                                                                  "},{"location":"en/admin/virtnest/vm/snapshot.html","title":"Snapshot Management","text":"

                                                                                  This guide explains how to create snapshots for virtual machines and restore them.

                                                                                  You can create snapshots for virtual machines to save the current state of the virtual machine. A snapshot can be restored multiple times, and each time the virtual machine will be reverted to the state when the snapshot was created. Snapshots are commonly used for backup, recovery and rollback.

                                                                                  "},{"location":"en/admin/virtnest/vm/snapshot.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before using the snapshots, the following prerequisites need to be met:

                                                                                  • Only virtual machines in a non-error state can use the snapshot function.
                                                                                  • Install Snapshot CRDs, Snapshot Controller, and CSI Driver. For detailed installation steps, refer to CSI Snapshotter.
                                                                                  • Wait for the snapshot-controller component to be ready. This component monitors events related to VolumeSnapshot and VolumeSnapshotContent and triggers specific actions.
                                                                                  • Wait for the CSI Driver to be ready. Ensure that the csi-snapshotter sidecar is running within the CSI Driver. The csi-snapshotter sidecar monitors events related to VolumeSnapshotContent and triggers specific actions.
                                                                                    • If the storage is rook-ceph, refer to ceph-csi-snapshot.
                                                                                    • If the storage is HwameiStor, refer to huameistor-snapshot.
                                                                                  "},{"location":"en/admin/virtnest/vm/snapshot.html#create-a-snapshot","title":"Create a Snapshot","text":"
                                                                                  1. Click Container Management in the left navigation menu, then click Virtual Machines to access the list page. Click the \u2507 on the right side of the list for a virtual machine to perform snapshot operations (only available for non-error state virtual machines).

                                                                                  2. A dialog box will pop up, prompting you to input a name and description for the snapshot. Please note that the creation process may take a few minutes, during which you won't be able to perform any operations on the virtual machine.

                                                                                  3. After successfully creating the snapshot, you can view its details within the virtual machine's information section. Here, you have the option to edit the description, recover from the snapshot, delete it, among other operations.

                                                                                  "},{"location":"en/admin/virtnest/vm/snapshot.html#restore-from-a-snapshot","title":"Restore from a Snapshot","text":"
                                                                                  1. Click Restore from Snapshot and provide a name for the virtual machine recovery record. The recovery operation may take some time to complete, depending on the size of the snapshot and other factors. After a successful recovery, the virtual machine will be restored to the state when the snapshot was created.

                                                                                  2. After some time, you can scroll down to the snapshot information to view all the recovery records for the current snapshot. It also provides a way to locate the position of the recovery.

                                                                                  "},{"location":"en/admin/virtnest/vm/vm-network.html","title":"Virtual Machine Networking","text":"

                                                                                  This article will introduce how to configure network information when creating virtual machines.

                                                                                  In virtual machines, network management is a crucial part that allows us to manage and configure network connections for virtual machines in a Kubernetes environment. It can be configured according to different needs and scenarios, achieving a more flexible and diverse network architecture.

                                                                                  1. Single NIC Scenario: For simple applications that only require basic network connectivity or when there are resource constraints, using a single NIC can save network resources and prevent waste of resources.
                                                                                  2. Multiple NIC Scenario: When security isolation between different network environments needs to be achieved, multiple NICs can be used to divide different network areas. It also allows for control and management of traffic.
                                                                                  "},{"location":"en/admin/virtnest/vm/vm-network.html#prerequisites","title":"Prerequisites","text":"
                                                                                  1. When selecting the Bridge network mode, some information needs to be configured in advance:

                                                                                    • Install and run Open vSwitch on the host nodes. See Ovs-cni Quick Start.
                                                                                    • Configure Open vSwitch bridge on the host nodes. See vswitch for instructions.
                                                                                    • Install Spiderpool. See installing spiderpool for instructions. By default, Spiderpool will install both Multus CNI and Ovs CNI.
                                                                                    • Create a Multus CR of type ovs. You can create a custom Multus CR or use YAML for creation
                                                                                    • Create a subnet and IP pool. See creating subnets and IP pools .
                                                                                  2. Network configuration can be combined according to the table information.

                                                                                    Network Mode CNI Spiderpool Installed NIC Mode Fixed IP Live Migration Masquerade (NAT) Calico \u274c Single NIC \u274c \u2705 Cilium \u274c Single NIC \u274c \u2705 Flannel \u274c Single NIC \u274c \u2705 Bridge OVS \u2705 Multiple NIC \u2705 \u2705

                                                                                  3. Network Mode: There are two modes - Masquerade (NAT) and Bridge. Bridge mode requires the installation of the spiderpool component.

                                                                                    1. The default selection is Masquerade (NAT) network mode using the eth0 default NIC.

                                                                                    2. If the cluster has the spiderpool component installed, then Bridge mode can be selected. The Bridge mode supports multiple NICs.

                                                                                      • Ensure all prerequisites are met before selecting the Bridge mode.
                                                                                  4. Adding NICs

                                                                                    1. Bridge modes support manually adding NICs. Click Add NIC to configure the NIC IP pool. Choose a Multus CR that matches the network mode, if not available, it needs to be created manually.

                                                                                    2. If the Use Default IP Pool switch is turned on, it will use the default IP pool in the multus CR configuration. If turned off, manually select the IP pool.

                                                                                  "},{"location":"en/admin/virtnest/vm/vm-network.html#network-configuration","title":"Network Configuration","text":""},{"location":"en/admin/virtnest/vm/vm-sc.html","title":"Storage for Virtual Machine","text":"

                                                                                  This article will introduce how to configure storage when creating a virtual machine.

                                                                                  Storage and virtual machine functionality are closely related, mainly providing flexible and scalable virtual machine storage capabilities through the use of Kubernetes persistent volumes and storage classes. For example, virtual machine image storage in PVC supports cloning, snapshotting, and other operations with other data.

                                                                                  "},{"location":"en/admin/virtnest/vm/vm-sc.html#deploying-different-storage","title":"Deploying Different Storage","text":"

                                                                                  Before using virtual machine storage functionality, different storage needs to be deployed according to requirements:

                                                                                  1. Refer to Deploying hwameistor, or install hwameistor-operator in the Helm template of the container management module.
                                                                                  2. Refer to Deploying rook-ceph
                                                                                  3. Deploy localpath, use the command kubectl apply -f to create the following YAML:
                                                                                  Click to view complete YAML
                                                                                  ---\napiVersion: v1\nkind: Namespace\nmetadata:\n  name: local-path-storage\n\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: local-path-provisioner-service-account\n  namespace: local-path-storage\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: local-path-provisioner-role\nrules:\n- apiGroups: [\"\"]\n  resources: [\"nodes\", \"persistentvolumeclaims\", \"configmaps\"]\n  verbs: [\"get\", \"list\", \"watch\"]\n- apiGroups: [\"\"]\n  resources: [\"endpoints\", \"persistentvolumes\", \"pods\"]\n  verbs: [\"*\"]\n- apiGroups: [\"\"]\n  resources: [\"events\"]\n  verbs: [\"create\", \"patch\"]\n- apiGroups: [\"storage.k8s.io\"]\n  resources: [\"storageclasses\"]\n  verbs: [\"get\", \"list\", \"watch\"]\n\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: local-path-provisioner-bind\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: local-path-provisioner-role\nsubjects:\n- kind: ServiceAccount\n  name: local-path-provisioner-service-account\n  namespace: local-path-storage\n\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: local-path-provisioner\n  namespace: local-path-storage\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: local-path-provisioner\n  template:\n    metadata:\n      labels:\n        app: local-path-provisioner\n    spec:\n      serviceAccountName: local-path-provisioner-service-account\n      containers:\n      - name: local-path-provisioner\n        image: rancher/local-path-provisioner:v0.0.22\n        imagePullPolicy: IfNotPresent\n        command:\n        - local-path-provisioner\n        - --debug\n        - start\n        - --config\n        - /etc/config/config.json\n        volumeMounts:\n        - name: config-volume\n          mountPath: /etc/config/\n        env:\n        - name: POD_NAMESPACE\n          valueFrom:\n            fieldRef:\n              fieldPath: metadata.namespace\n      volumes:\n      - name: config-volume\n        configMap:\n          name: local-path-config\n\n---\napiVersion: storage.k8s.io/v1\nkind: StorageClass\nmetadata:\n  name: local-path\nprovisioner: rancher.io/local-path\nvolumeBindingMode: WaitForFirstConsumer\nreclaimPolicy: Delete\n\n---\nkind: ConfigMap\napiVersion: v1\nmetadata:\n  name: local-path-config\n  namespace: local-path-storage\ndata:\n  config.json: |-\n    {\n      \"nodePathMap\": [\n        {\n          \"node\": \"DEFAULT_PATH_FOR_NON_LISTED_NODES\",\n          \"paths\": [\"/opt/local-path-provisioner\"]\n        }\n      ]\n    }\n  setup: |-\n    #!/bin/sh\n    set -eu\n    mkdir -m 0777 -p \"$VOL_DIR\"\n  teardown: |-\n    #!/bin/sh\n    set -eu\n    rm -rf \"$VOL_DIR\"\n  helperPod.yaml: |-\n    apiVersion: v1\n    kind: Pod\n    metadata:\n      name: helper-pod\n    spec:\n      containers:\n      - name: helper-pod\n        image: busybox\n        imagePullPolicy: IfNotPresent\n
                                                                                  "},{"location":"en/admin/virtnest/vm/vm-sc.html#virtual-machine-storage","title":"Virtual Machine Storage","text":"
                                                                                  1. System Disk: By default, a VirtIO type rootfs system disk is created for the system to store the operating system and data.

                                                                                  2. Data Disk: The data disk is a storage device in the virtual machine used to store user data, application data, or other files unrelated to the operating system. Compared to the system disk, the data disk is optional and can be dynamically added or removed as needed. The capacity of the data disk can also be flexibly configured according to requirements.

                                                                                    Block storage is used by default. If you need to use cloning and snapshot functions, make sure that your storage pool has created the proper VolumeSnapshotClass, as shown in the example below. If you need to use real-time migration, make sure that your storage supports and has selected the ReadWriteMany access mode.

                                                                                    In most cases, such VolumeSnapshotClass is not automatically created during the installation process, so you need to manually create VolumeSnapshotClass. Here is an example of creating a VolumeSnapshotClass in HwameiStor:

                                                                                    kind: VolumeSnapshotClass\napiVersion: snapshot.storage.k8s.io/v1\nmetadata:\n  name: hwameistor-storage-lvm-snapshot\n  annotations:\n    snapshot.storage.kubernetes.io/is-default-class: \"true\"\nparameters:\n  snapsize: \"1073741824\"\ndriver: lvm.hwameistor.io\ndeletionPolicy: Delete\n
                                                                                    • Execute the following command to check if the VolumeSnapshotClass has been successfully created.

                                                                                      kubectl get VolumeSnapshotClass\n
                                                                                    • View the created Snapshotclass and confirm that the Provisioner property is consistent with the Driver property in the storage pool.

                                                                                  "},{"location":"en/admin/virtnest/vm-image/index.html","title":"Build Virtual Machine Images","text":"

                                                                                  This document will explain how to build the required virtual machine images.

                                                                                  A virtual machine image is essentially a replica file, which is a disk partition with an installed operating system. Common image file formats include raw, qcow2, vmdk, etc.

                                                                                  "},{"location":"en/admin/virtnest/vm-image/index.html#build-an-image","title":"Build an Image","text":"

                                                                                  Below are some detailed steps for building virtual machine images:

                                                                                  1. Download System Images

                                                                                    Before building virtual machine images, you need to download the required system images. We recommend using images in qcow2, raw, or vmdk formats. You can visit the following links to get CentOS and Fedora images:

                                                                                    • CentOS Cloud Images: Obtain CentOS images from the official CentOS project or other sources. Make sure to choose a version compatible with your virtualization platform.
                                                                                    • Fedora Cloud Images: Get images from the official Fedora project. Choose the appropriate version based on your requirements.
                                                                                  2. Build a Docker Image and Push it to a Containe Registry

                                                                                    In this step, we will use Docker to build an image and push it to a container registry for easy deployment and usage when needed.

                                                                                    • Create a Dockerfile

                                                                                      FROM scratch\nADD --chown=107:107 CentOS-7-x86_64-GenericCloud.qcow2 /disk/\n

                                                                                      The Dockerfile above adds a file named CentOS-7-x86_64-GenericCloud.qcow2 to the image being built from a scratch base image and places it in the /disk/ directory within the image. This operation includes the file in the image, allowing it to provide a CentOS 7 x86_64 operating system environment when used to create a virtual machine.

                                                                                    • Build the Image

                                                                                      docker build -t release-ci.daocloud.io/ghippo/kubevirt-demo/centos7:v1 .\n

                                                                                      The above command builds an image named release-ci.daocloud.io/ghippo/kubevirt-demo/centos7:v1 using the instructions in the Dockerfile. You can modify the image name according to your project requirements.

                                                                                    • Push the Image to the Container Registry

                                                                                      Use the following command to push the built image to the release-ci.daocloud.io container registry. You can modify the repository name and address as needed.

                                                                                      docker push release-ci.daocloud.io/ghippo/kubevirt-demo/centos7:v1\n

                                                                                  These are the detailed steps and instructions for building virtual machine images. By following these steps, you will be able to successfully build and push images for virtual machines to meet your usage needs.

                                                                                  "},{"location":"en/admin/ghippo/best-practice/navigator.html","title":"Custom Navigation Bar","text":"

                                                                                  Currently, the custom navigation bar needs to be manually created as a YAML file and applied to the cluster.

                                                                                  "},{"location":"en/admin/ghippo/best-practice/navigator.html#navigation-bar-categories","title":"Navigation Bar Categories","text":"

                                                                                  To add or reorder navigation bar categories, you can achieve it by adding or modifying the category YAML.

                                                                                  Here is an example of a category YAML:

                                                                                  apiVersion: ghippo.io/v1alpha1\nkind: NavigatorCategory\nmetadata:\n  name: management-custom # (1)!\nspec:\n  name: Management # (2)!\n  isCustom: true # (3)!\n  localizedName: # (4)!\n    zh-CN: \u7ba1\u7406\n    en-US: Management\n  order: 100 # (5)!\n
                                                                                  1. Naming convention: composed of lowercase \"spec.name\" and \"-custom\"
                                                                                  2. If used for modifying the category
                                                                                  3. This field must be true
                                                                                  4. Define the Chinese and English names of the category
                                                                                  5. The higher the number, the higher its position in the sorting order

                                                                                  After writing the YAML file, you can see the newly added or modified navigation bar categories by executing the following command and refreshing the page:

                                                                                  kubectl apply -f xxx.yaml\n
                                                                                  "},{"location":"en/admin/ghippo/best-practice/navigator.html#navigation-bar-menus","title":"Navigation Bar Menus","text":"

                                                                                  To add or reorder navigation bar menus, you can achieve it by adding a navigator YAML.

                                                                                  Note

                                                                                  If you need to edit an existing navigation bar menu (not a custom menu added by the user), you need to set the \"gproduct\" field of the new custom menu the same as the \"gproduct\" field of the menu to be overridden. The new navigation bar menu will overwrite the parts with the same \"name\" in the \"menus\" section, and perform an addition operation for the parts with different \"name\".

                                                                                  "},{"location":"en/admin/ghippo/best-practice/navigator.html#first-level-menu","title":"First-level Menu","text":"

                                                                                  Insert as a product under a navigation bar category

                                                                                  apiVersion: ghippo.io/v1alpha1\nkind: GProductNavigator\nmetadata:\n  name: gmagpie-custom # (1)!\nspec:\n  name: Operations Management\n  iconUrl: ./ui/gmagpie/gmagpie.svg\n  localizedName: # (2)!\n    zh-CN: \u8fd0\u8425\u7ba1\u7406\n    en-US: Operations Management\n  url: ./gmagpie\n  category: management # (3)!\n  menus: # (4)!\n    - name: Access Control\n      iconUrl: ./ui/ghippo/menus/access-control.svg\n      localizedName:\n        zh-CN: \u7528\u6237\u4e0e\u8bbf\u95ee\u63a7\u5236\n        en-US: Access Control\n      url: ./ghippo/users\n      order: 50 # (5)!\n    - name: Workspace\n      iconUrl: ./ui/ghippo/menus/workspace-folder.svg\n      localizedName:\n        zh-CN: \u5de5\u4f5c\u7a7a\u95f4\u4e0e\u5c42\u7ea7\n        en-US: Workspace and Folder\n      url: ./ghippo/workspaces\n      order: 40\n    - name: Audit Log\n      iconUrl: ./ui/ghippo/menus/audit-logs.svg\n      localizedName:\n        zh-CN: \u5ba1\u8ba1\u65e5\u5fd7\n        en-US: Audit Log\n      url: ./ghippo/audit\n      order: 30\n    - name: Settings\n      iconUrl: ./ui/ghippo/menus/setting.svg\n      localizedName:\n        zh-CN: \u5e73\u53f0\u8bbe\u7f6e\n        en-US: Settings\n      url: ./ghippo/settings\n      order: 10\n  gproduct: gmagpie # (6)!\n  visible: true # (7)!\n  isCustom: true # (8)!\n  order: 20 # (9)!\n  target: blank # (10)!\n
                                                                                  1. Naming convention: composed of lowercase \"spec.gproduct\" and \"-custom\"
                                                                                  2. Define the Chinese and English names of the menu
                                                                                  3. Either \"category\" or \"parentGProduct\" can be used to distinguish between first-level and second-level menus, and it should match the \"spec.name\" field of NavigatorCategory to complete the matching
                                                                                  4. Second-level menus
                                                                                  5. The lower the number, the higher its position in the sorting order
                                                                                  6. Define the identifier of the menu, used for linkage with the parentGProduct field to establish the parent-child relationship.
                                                                                  7. Set whether the menu is visible, default is true
                                                                                  8. This field must be true
                                                                                  9. The higher the number, the higher its position in the sorting order
                                                                                  10. Open a new tab
                                                                                  "},{"location":"en/admin/ghippo/best-practice/navigator.html#second-level-menu","title":"Second-level Menu","text":"

                                                                                  Insert as a sub-product under the second-level menu of a first-level menu

                                                                                  apiVersion: ghippo.io/v1alpha1\nkind: GProductNavigator\nmetadata:\n  name: gmagpie-custom # (1)!\nspec:\n  name: Operations Management\n  iconUrl: ./ui/gmagpie/gmagpie.svg\n  localizedName: # (2)!\n    zh-CN: \u8fd0\u8425\u7ba1\u7406\n    en-US: Operations Management\n  url: ./gmagpie\n  parentGProduct: ghippo # (3)!\n  gproduct: gmagpie # (4)!\n  visible: true # (5)!\n  isCustom: true # (6)!\n  order: 20 # (7)!\n
                                                                                  1. Naming convention: composed of lowercase \"spec.gproduct\" and \"-custom\"
                                                                                  2. Define the Chinese and English names of the menu
                                                                                  3. Either \"category\" or \"parentGProduct\" can be used to distinguish between first-level and second-level menus. If this field is added, it will ignore the \"menus\" field and insert this menu as a second-level menu under the first-level menu with the \"gproduct\" of \"ghippo\"
                                                                                  4. Define the identifier of the menu, used for linkage with the parentGProduct field to establish the parent-child relationship.
                                                                                  5. Set whether the menu is visible, default is true
                                                                                  6. This field must be true
                                                                                  7. The higher the number, the higher its position in the sorting order
                                                                                  "},{"location":"en/admin/insight/quickstart/agent-status.html","title":"Insight-agent component status","text":"

                                                                                  Insight is a multicluster observation product in AI platform. In order to realize the unified collection of multicluster observation data, users need to install the Helm App insight-agent (Installed in insight-system namespace by default). See How to install insight-agent .

                                                                                  "},{"location":"en/admin/insight/quickstart/agent-status.html#status-description","title":"Status description","text":"

                                                                                  In Insight -> Data Collection section, you can view the status of insight-agent installed in each cluster.

                                                                                  • not installed : insight-agent is not installed under the insight-system namespace in this cluster
                                                                                  • Running : insight-agent is successfully installed in the cluster, and all deployed components are running
                                                                                  • Exception : If insight-agent is in this state, it means that the helm deployment failed or the deployed components are not running

                                                                                  Can be checked by:

                                                                                  1. Run the following command, if the status is deployed , go to the next step. If it is failed , since it will affect the upgrade of the application, it is recommended to reinstall after uninstalling Container Management -> Helm Apps :

                                                                                    helm list -n insight-system\n
                                                                                  2. run the following command or check the status of the components deployed in the cluster in Insight -> Data Collection . If there is a pod that is not in the Running state, please restart the abnormal pod.

                                                                                    kubectl get pods -n insight-system\n
                                                                                  "},{"location":"en/admin/insight/quickstart/agent-status.html#supplementary-instructions","title":"Supplementary instructions","text":"
                                                                                  1. The resource consumption of the metric collection component Prometheus in insight-agent is directly proportional to the number of pods running in the cluster. Adjust Prometheus resources according to the cluster size, please refer to Prometheus Resource Planning.

                                                                                  2. Since the storage capacity of the metric storage component vmstorage in the global service cluster is directly proportional to the sum of the number of pods in each cluster.

                                                                                    • Please contact the platform administrator to adjust the disk capacity of vmstorage according to the cluster size, see vmstorage disk capacity planning.
                                                                                    • Adjust vmstorage disk according to multicluster size, see vmstorge disk expansion.
                                                                                  "},{"location":"en/admin/insight/quickstart/jvm-monitor/jmx-exporter.html","title":"Use JMX Exporter to expose JVM monitoring metrics","text":"

                                                                                  JMX-Exporter provides two usages:

                                                                                  1. Start a standalone process. Specify parameters when the JVM starts, expose the RMI interface of JMX, JMX Exporter calls RMI to obtain the JVM runtime status data, Convert to Prometheus metrics format, and expose ports for Prometheus to collect.
                                                                                  2. Start the JVM in-process. Specify parameters when the JVM starts, and run the jar package of JMX-Exporter in the form of javaagent. Read the JVM runtime status data in the process, convert it into Prometheus metrics format, and expose the port for Prometheus to collect.

                                                                                  Note

                                                                                  Officials do not recommend the first method. On the one hand, the configuration is complicated, and on the other hand, it requires a separate process, and the monitoring of this process itself has become a new problem. So This page focuses on the second usage and how to use JMX Exporter to expose JVM monitoring metrics in the Kubernetes environment.

                                                                                  The second usage is used here, and the JMX Exporter jar package file and configuration file need to be specified when starting the JVM. The jar package is a binary file, so it is not easy to mount it through configmap. We hardly need to modify the configuration file. So the suggestion is to directly package the jar package and configuration file of JMX Exporter into the business container image.

                                                                                  Among them, in the second way, we can choose to put the jar file of JMX Exporter in the business application mirror, You can also choose to mount it during deployment. Here is an introduction to the two methods:

                                                                                  "},{"location":"en/admin/insight/quickstart/jvm-monitor/jmx-exporter.html#method-1-build-the-jmx-exporter-jar-file-into-the-business-image","title":"Method 1: Build the JMX Exporter JAR file into the business image","text":"

                                                                                  The content of prometheus-jmx-config.yaml is as follows:

                                                                                  prometheus-jmx-config.yaml
                                                                                  ...\nssl: false\nlowercaseOutputName: false\nlowercaseOutputLabelNames: false\nrules:\n- pattern: \".*\"\n

                                                                                  Note

                                                                                  For more configmaps, please refer to the bottom introduction or Prometheus official documentation.

                                                                                  Then prepare the jar package file, you can find the latest jar package download address on the Github page of jmx_exporter and refer to the following Dockerfile:

                                                                                  FROM openjdk:11.0.15-jre\nWORKDIR /app/\nCOPY target/my-app.jar ./\nCOPY prometheus-jmx-config.yaml ./\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\nENV JAVA_TOOL_OPTIONS=-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\nEXPOSE 8081 8999 8080 8888\nENTRYPOINT java $JAVA_OPTS -jar my-app.jar\n

                                                                                  Notice:

                                                                                  • Start parameter format: -javaagent:=:
                                                                                  • Port 8088 is used here to expose the monitoring metrics of the JVM. If it conflicts with Java applications, you can change it yourself
                                                                                  "},{"location":"en/admin/insight/quickstart/jvm-monitor/jmx-exporter.html#method-2-mount-via-init-container-container","title":"Method 2: mount via init container container","text":"

                                                                                  We need to make the JMX exporter into a Docker image first, the following Dockerfile is for reference only:

                                                                                  FROM alpine/curl:3.14\nWORKDIR /app/\n# Copy the previously created config file to the mirror\nCOPY prometheus-jmx-config.yaml ./\n# Download jmx prometheus javaagent jar online\nRUN set -ex; \\\n     curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\n

                                                                                  Build the image according to the above Dockerfile: docker build -t my-jmx-exporter .

                                                                                  Add the following init container to the Java application deployment Yaml:

                                                                                  Click to view YAML file
                                                                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-demo-app\n  labels:\n    app: my-demo-app\nspec:\n  selector:\n    matchLabels:\n      app: my-demo-app\n  template:\n    metadata:\n      labels:\n        app: my-demo-app\n    spec:\n      imagePullSecrets:\n      - name: registry-pull\n      initContainers:\n      - name: jmx-sidecar\n        image: my-jmx-exporter\n        command: [\"cp\", \"-r\", \"/app/jmx_prometheus_javaagent-0.17.2.jar\", \"/target/jmx_prometheus_javaagent-0.17.2.jar\"]  \u278a\n        volumeMounts:\n        - name: sidecar\n          mountPath: /target\n      containers:\n      - image: my-demo-app-image\n        name: my-demo-app\n        resources:\n          requests:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n          limits:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n        ports:\n        - containerPort: 18083\n        env:\n        - name: JAVA_TOOL_OPTIONS\n          value: \"-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\" \u278b\n        volumeMounts:\n        - name: host-time\n          mountPath: /etc/localtime\n          readOnly: true\n        - name: sidecar\n          mountPath: /sidecar\n      volumes:\n      - name: host-time\n        hostPath:\n          path: /etc/localtime\n      - name: sidecar  # Share the agent folder\n        emptyDir: {}\n      restartPolicy: Always\n

                                                                                  After the above modification, the sample application my-demo-app has the ability to expose JVM metrics. After running the service, we can access the prometheus format metrics exposed by the service through http://lcoalhost:8088.

                                                                                  Then, you can refer to Java Application Docking Observability with JVM Metrics.

                                                                                  "},{"location":"en/admin/insight/quickstart/jvm-monitor/jvm-catelogy.html","title":"Start monitoring Java applications","text":"

                                                                                  This document mainly describes how to monitor the JVM of the customer's Java application. It describes how Java applications that have exposed JVM metrics, and those that have not, interface with Insight.

                                                                                  If your Java application does not start exposing JVM metrics, you can refer to the following documents:

                                                                                  • Expose JVM monitoring metrics with JMX Exporter
                                                                                  • Expose JVM monitoring metrics using OpenTelemetry Java Agent

                                                                                  If your Java application has exposed JVM metrics, you can refer to the following documents:

                                                                                  • Java application docking observability with existing JVM metrics
                                                                                  "},{"location":"en/admin/insight/quickstart/jvm-monitor/legacy-jvm.html","title":"Java Application with JVM Metrics to Dock Insight","text":"

                                                                                  If your Java application exposes JVM monitoring metrics through other means (such as Spring Boot Actuator), We need to allow monitoring data to be collected. You can let Insight collect existing JVM metrics by adding Kubernetes Annotations to the workload:

                                                                                  annatation:\n   insight.opentelemetry.io/metric-scrape: \"true\" # whether to collect\n   insight.opentelemetry.io/metric-path: \"/\" # path to collect metrics\n   insight.opentelemetry.io/metric-port: \"9464\" # port for collecting metrics\n

                                                                                  YAML Example to add annotations for my-deployment-app workload\uff1a

                                                                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-app\nspec:\n  selector:\n    matchLabels:\n      app: my-deployment-app\n      app.kubernetes.io/name: my-deployment-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-deployment-app\n        app.kubernetes.io/name: my-deployment-app\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\" # whether to collect\n        insight.opentelemetry.io/metric-path: \"/\" # path to collect metrics\n        insight.opentelemetry.io/metric-port: \"9464\" # port for collecting metrics\n

                                                                                  The following shows the complete YAML:

                                                                                  ---\napiVersion: v1\nkind: Service\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  type: NodePort\n  selector:\n    #app: my-deployment-with-aotu-instrumentation-app\n    app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  ports:\n    - name: http\n      port: 8080\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  selector:\n    matchLabels:\n      #app: my-deployment-with-aotu-instrumentation-app\n      app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\" # whether to collect\n        insight.opentelemetry.io/metric-path: \"/actuator/prometheus\"      # path to collect metrics\n        insight.opentelemetry.io/metric-port: \"8080\"   # port for collecting metrics\n    spec:\n      containers:\n        - name: myapp\n          image: docker.m.daocloud.io/wutang/spring-boot-actuator-prometheus-metrics-demo\n          ports:\n            - name: http\n              containerPort: 8080\n          resources:\n            limits:\n              cpu: 500m\n              memory: 800Mi\n            requests:\n              cpu: 200m\n              memory: 400Mi\n

                                                                                  In the above example\uff0cInsight will use :8080//actuator/prometheus to get Prometheus metrics exposed through Spring Boot Actuator .

                                                                                  "},{"location":"en/admin/insight/quickstart/jvm-monitor/otel-java-agent.html","title":"Use OpenTelemetry Java Agent to expose JVM monitoring metrics","text":"

                                                                                  In Opentelemetry Agent v1.20.0 and above, Opentelemetry Agent has added the JMX Metric Insight module. If your application has integrated Opentelemetry Agent to collect application traces, then you no longer need to introduce other Agents for our application Expose JMX metrics. The Opentelemetry Agent also collects and exposes metrics by instrumenting the metrics exposed by MBeans locally available in the application.

                                                                                  Opentelemetry Agent also has some built-in monitoring samples for common Java Servers or frameworks, please refer to predefined metrics.

                                                                                  Using the OpenTelemetry Java Agent also needs to consider how to mount the JAR into the container. In addition to referring to the JMX Exporter above to mount the JAR file, we can also use the Operator capabilities provided by OpenTelemetry to automatically enable JVM metric exposure for our applications. :

                                                                                  If your application has integrated Opentelemetry Agent to collect application traces, then you no longer need to introduce other Agents to expose JMX metrics for our application. The Opentelemetry Agent can now natively collect and expose metrics interfaces by instrumenting metrics exposed by MBeans available locally in the application.

                                                                                  However, for current version, you still need to manually add the proper annotations to workload before the JVM data will be collected by Insight.

                                                                                  "},{"location":"en/admin/insight/quickstart/jvm-monitor/otel-java-agent.html#expose-metrics-for-java-middleware","title":"Expose metrics for Java middleware","text":"

                                                                                  Opentelemetry Agent also has some built-in middleware monitoring samples, please refer to Predefined Metrics.

                                                                                  By default, no type is specified, and it needs to be specified through -Dotel.jmx.target.system JVM Options, such as -Dotel.jmx.target.system=jetty,kafka-broker .

                                                                                  "},{"location":"en/admin/insight/quickstart/jvm-monitor/otel-java-agent.html#reference","title":"Reference","text":"
                                                                                  • Gaining JMX Metric Insights with the OpenTelemetry Java Agent

                                                                                  • Otel jmx metrics

                                                                                  "},{"location":"en/admin/insight/quickstart/otel/golang-ebpf.html","title":"Enhance Go apps with OTel auto-instrumentation","text":"

                                                                                  If you don't want to manually change the application code, you can try This page's eBPF-based automatic enhancement method. This feature is currently in the review stage of donating to the OpenTelemetry community, and does not support Operator injection through annotations (it will be supported in the future), so you need to manually change the Deployment YAML or use a patch.

                                                                                  "},{"location":"en/admin/insight/quickstart/otel/golang-ebpf.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Make sure Insight Agent is ready. If not, see Install insight-agent to collect data and make sure the following three items are in place:

                                                                                  • Enable trace feature for Insight-agent
                                                                                  • Whether the address and port of the trace data are filled in correctly
                                                                                  • Pods proper to deployment/opentelemetry-operator-controller-manager and deployment/insight-agent-opentelemetry-collector are ready
                                                                                  "},{"location":"en/admin/insight/quickstart/otel/golang-ebpf.html#install-instrumentation-cr","title":"Install Instrumentation CR","text":"

                                                                                  Install under the Insight-system namespace, skip this step if it has already been installed.

                                                                                  Note: This CR currently only supports the injection of environment variables (including service name and trace address) required to connect to Insight, and will support the injection of Golang probes in the future.

                                                                                  kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.17.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.31.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.34b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:0.3.1-beta.1\nEOF\n
                                                                                  "},{"location":"en/admin/insight/quickstart/otel/golang-ebpf.html#change-the-application-deployment-file","title":"Change the application deployment file","text":"
                                                                                  • Add environment variable annotations

                                                                                    There is only one such annotation, which is used to add OpenTelemetry-related environment variables, such as link reporting address, cluster id where the container is located, and namespace:

                                                                                    instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                                                                    The value is divided into two parts by / , the first value insight-system is the namespace of the CR installed in the second step, and the second value insight-opentelemetry-autoinstrumentation is the name of the CR.

                                                                                  • Add golang ebpf probe container

                                                                                    Here is sample code:

                                                                                    apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: voting\n  namespace: emojivoto\n  labels:\n    app.kubernetes.io/name: voting\n    app.kubernetes.io/part-of: emojivoto\n    app.kubernetes.io/version: v11\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: voting-svc\n      version: v11\n  template:\n    metadata:\n      labels:\n        app: voting-svc\n        version: v11\n      annotations:\n        instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\" # (1)\n    spec:\n      containers:\n        - env:\n            - name: GRPC_PORT\n              value: \"8080\"\n            - name: PROM_PORT\n              value: \"8801\"\n          image: docker.l5d.io/buoyantio/emojivoto-voting-svc:v11 # (2)\n          name: voting-svc\n          command:\n            - /usr/local/bin/emojivoto-voting-svc\n          ports:\n            - containerPort: 8080\n              name: grpc\n            - containerPort: 8801\n              name: prom\n          resources:\n            requests:\n              cpu: 100m\n        - name: emojivoto-voting-instrumentation\n          image: docker.m.daocloud.io/keyval/otel-go-agent:v0.6.0\n          env:\n            - name: OTEL_TARGET_EXE\n              value: /usr/local/bin/emojivoto-voting-svc # (3)\n          securityContext:\n            runAsUser: 0\n            capabilities:\n              add:\n                - SYS_PTRACE\n            privileged: true\n          volumeMounts:\n            - mountPath: /sys/kernel/debug\n              name: kernel-debug\n      volumes:\n        - name: kernel-debug\n          hostPath:\n            path: /sys/kernel/debug\n
                                                                                    1. Used to add environment variables related to OpenTelemetry.
                                                                                    2. Assuming this is your Golang application.
                                                                                    3. Note that it should be consistent with the content of the command mentioned above: /usr/local/bin/emojivoto-voting-svc .

                                                                                  The final generated Yaml content is as follows:

                                                                                  apiVersion: v1\nkind: Pod\nmetadata:\n  name: voting-84b696c897-p9xbp\n  generateName: voting-84b696c897-\n  namespace: default\n  uid: 742639b0-db6e-4f06-ac90-68a80e2b8a11\n  resourceVersion: '65560793'\n  creationTimestamp: '2022-10-19T07:08:56Z'\n  labels:\n    app: voting-svc\n    pod-template-hash: 84b696c897\n    version: v11\n  annotations:\n    cni.projectcalico.org/containerID: 0a987cf0055ce0dfbe75c3f30d580719eb4fbbd7e1af367064b588d4d4e4c7c7\n    cni.projectcalico.org/podIP: 192.168.141.218/32\n    cni.projectcalico.org/podIPs: 192.168.141.218/32\n    instrumentation.opentelemetry.io/inject-sdk: insight-system/insight-opentelemetry-autoinstrumentation\nspec:\n  volumes:\n    - name: launcherdir\n      emptyDir: {}\n    - name: kernel-debug\n      hostPath:\n        path: /sys/kernel/debug\n        type: ''\n    - name: kube-api-access-gwj5v\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n  containers:\n    - name: voting-svc\n      image: docker.l5d.io/buoyantio/emojivoto-voting-svc:v11\n      command:\n        - /odigos-launcher/launch\n        - /usr/local/bin/emojivoto-voting-svc\n      ports:\n        - name: grpc\n          containerPort: 8080\n          protocol: TCP\n        - name: prom\n          containerPort: 8801\n          protocol: TCP\n      env:\n        - name: GRPC_PORT\n          value: '8080'\n        - name: PROM_PORT\n          value: '8801'\n        - name: OTEL_TRACES_EXPORTER\n          value: otlp\n        - name: OTEL_EXPORTER_OTLP_ENDPOINT\n          value: >-\n            http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n        - name: OTEL_EXPORTER_OTLP_TIMEOUT\n          value: '200'\n        - name: SPLUNK_TRACE_RESPONSE_HEADER_ENABLED\n          value: 'true'\n        - name: OTEL_SERVICE_NAME\n          value: voting\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.name\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_UID\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.uid\n        - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: spec.nodeName\n        - name: OTEL_PROPAGATORS\n          value: jaeger,b3\n        - name: OTEL_TRACES_SAMPLER\n          value: always_on\n        - name: OTEL_RESOURCE_ATTRIBUTES\n          value: >-\n            k8s.container.name=voting-svc,k8s.deployment.name=voting,k8s.deployment.uid=79e015e2-4643-44c0-993c-e486aebaba10,k8s.namespace.name=default,k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME),k8s.pod.uid=$(OTEL_RESOURCE_ATTRIBUTES_POD_UID),k8s.replicaset.name=voting-84b696c897,k8s.replicaset.uid=63f56167-6632-415d-8b01-43a3db9891ff\n      resources:\n        requests:\n          cpu: 100m\n      volumeMounts:\n        - name: launcherdir\n          mountPath: /odigos-launcher\n        - name: kube-api-access-gwj5v\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: IfNotPresent\n    - name: emojivoto-voting-instrumentation\n      image: keyval/otel-go-agent:v0.6.0\n      env:\n        - name: OTEL_TARGET_EXE\n          value: /usr/local/bin/emojivoto-voting-svc\n        - name: OTEL_EXPORTER_OTLP_ENDPOINT\n          value: jaeger:4317\n        - name: OTEL_SERVICE_NAME\n          value: emojivoto-voting\n      resources: {}\n      volumeMounts:\n        - name: kernel-debug\n          mountPath: /sys/kernel/debug\n        - name: kube-api-access-gwj5v\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: IfNotPresent\n      securityContext:\n        capabilities:\n          add:\n            - SYS_PTRACE\n        privileged: true\n        runAsUser: 0\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
                                                                                  "},{"location":"en/admin/insight/quickstart/otel/golang-ebpf.html#reference","title":"Reference","text":"
                                                                                  • Getting Started with Go OpenTelemetry Automatic Instrumentation
                                                                                  • Donating ebpf based instrumentation
                                                                                  "},{"location":"en/admin/insight/quickstart/other/install-agentindce.html","title":"Install insight-agent in Suanova 4.0","text":"

                                                                                  In AI platform, previous Suanova 4.0 can be accessed as a subcluster. This guide provides potential issues and solutions when installing insight-agent in a Suanova 4.0 cluster.

                                                                                  "},{"location":"en/admin/insight/quickstart/other/install-agentindce.html#issue-one","title":"Issue One","text":"

                                                                                  Since most Suanova 4.0 clusters have installed dx-insight as the monitoring system, installing insight-agent at this time will conflict with the existing prometheus operator in the cluster, making it impossible to install smoothly.

                                                                                  "},{"location":"en/admin/insight/quickstart/other/install-agentindce.html#solution","title":"Solution","text":"

                                                                                  Enable the parameters of the prometheus operator, retain the prometheus operator in dx-insight, and make it compatible with the prometheus operator in insight-agent in 5.0.

                                                                                  "},{"location":"en/admin/insight/quickstart/other/install-agentindce.html#steps","title":"Steps","text":"
                                                                                  1. Log in to the console.
                                                                                  2. Enable the --deny-namespaces parameter in the two prometheus operators respectively.
                                                                                  3. Run the following command (the following command is for reference only, the actual command needs to replace the prometheus operator name and namespace in the command).

                                                                                    kubectl edit deploy insight-agent-kube-prometh-operator -n insight-system\n

                                                                                  Note

                                                                                  • As shown in the figure above, the dx-insight component is deployed under the dx-insight tenant, and the insight-agent is deployed under the insight-system tenant. Add --deny-namespaces=insight-system in the prometheus operator in dx-insight, Add --deny-namespaces=dx-insight in the prometheus operator in insight-agent.
                                                                                  • Just add deny namespace, both prometheus operators can continue to scan other namespaces, and the related collection resources under kube-system or customer business namespaces are not affected.
                                                                                  • Please pay attention to the problem of node exporter port conflict.
                                                                                  "},{"location":"en/admin/insight/quickstart/other/install-agentindce.html#supplementary-explanation","title":"Supplementary Explanation","text":"

                                                                                  The open-source node-exporter turns on hostnetwork by default and the default port is 9100. If the monitoring system of the cluster has installed node-exporter , then installing insight-agent at this time will cause node-exporter port conflict and it cannot run normally.

                                                                                  Note

                                                                                  Insight's node exporter will enable some features to collect special indicators, so it is recommended to install.

                                                                                  Currently, it does not support modifying the port in the installation command. After helm install insight-agent , you need to manually modify the related ports of the insight node-exporter daemonset and svc.

                                                                                  "},{"location":"en/admin/insight/quickstart/other/install-agentindce.html#issue-two","title":"Issue Two","text":"

                                                                                  After Insight Agent is successfully deployed, fluentbit does not collect logs of Suanova 4.0.

                                                                                  "},{"location":"en/admin/insight/quickstart/other/install-agentindce.html#solution_1","title":"Solution","text":"

                                                                                  The docker storage directory of Suanova 4.0 is /var/lib/containers , which is different from the path in the configuration of insigh-agent, so the logs are not collected.

                                                                                  "},{"location":"en/admin/insight/quickstart/other/install-agentindce.html#steps_1","title":"Steps","text":"
                                                                                  1. Log in to the console.
                                                                                  2. Modify the following parameters in the insight-agent Chart.

                                                                                    fluent-bit:\ndaemonSetVolumeMounts:\n    - name: varlog\n    mountPath: /var/log\n    - name: varlibdockercontainers\n-     mountPath: /var/lib/docker/containers\n+     mountPath: /var/lib/containers/docker/containers\n    readOnly: true\n    - name: etcmachineid\n    mountPath: /etc/machine-id\n    readOnly: true\n    - name: dmesg\n    mountPath: /var/log/dmesg\n    readOnly: true\n
                                                                                  "},{"location":"en/admin/insight/trace/topology-helper.html","title":"Service Topology Element Explanations","text":"

                                                                                  The service topology provided by Observability allows you to quickly identify the request relationships between services and determine the health status of services based on different colors. The health status is determined based on the request latency and error rate of the service's overall traffic. This article explains the elements in the service topology.

                                                                                  "},{"location":"en/admin/insight/trace/topology-helper.html#node-status-explanation","title":"Node Status Explanation","text":"

                                                                                  The node health status is determined based on the error rate and request latency of the service's overall traffic, following these rules:

                                                                                  Color Status Rules Gray Healthy Error rate equals 0% and request latency is less than 100ms Orange Warning Error rate (0, 5%) or request latency (100ms, 200ms) Red Abnormal Error rate (5%, 100%) or request latency (200ms, +Infinity)"},{"location":"en/admin/insight/trace/topology-helper.html#connection-status-explanation","title":"Connection Status Explanation","text":"Color Status Rules Green Healthy Error rate equals 0% and request latency is less than 100ms Orange Warning Error rate (0, 5%) or request latency (100ms, 200ms) Red Abnormal Error rate (5%, 100%) or request latency (200ms, +Infinity)"},{"location":"en/admin/kpanda/gpu/gpu-metrics.html","title":"GPU Metrics","text":"

                                                                                  This page lists some commonly used GPU metrics.

                                                                                  "},{"location":"en/admin/kpanda/gpu/gpu-metrics.html#cluster-level","title":"Cluster Level","text":"Metric Name Description Number of GPUs Total number of GPUs in the cluster Average GPU Utilization Average compute utilization of all GPUs in the cluster Average GPU Memory Utilization Average memory utilization of all GPUs in the cluster GPU Power Power consumption of all GPUs in the cluster GPU Temperature Temperature of all GPUs in the cluster GPU Utilization Details 24-hour usage details of all GPUs in the cluster (includes max, avg, current) GPU Memory Usage Details 24-hour memory usage details of all GPUs in the cluster (includes min, max, avg, current) GPU Memory Bandwidth Utilization For example, an Nvidia V100 GPU has a maximum memory bandwidth of 900 GB/sec. If the current memory bandwidth is 450 GB/sec, the utilization is 50%"},{"location":"en/admin/kpanda/gpu/gpu-metrics.html#node-level","title":"Node Level","text":"Metric Name Description GPU Mode Usage mode of GPUs on the node, including full-card mode, MIG mode, vGPU mode Number of Physical GPUs Total number of physical GPUs on the node Number of Virtual GPUs Number of vGPU devices created on the node Number of MIG Instances Number of MIG instances created on the node GPU Memory Allocation Rate Memory allocation rate of all GPUs on the node Average GPU Utilization Average compute utilization of all GPUs on the node Average GPU Memory Utilization Average memory utilization of all GPUs on the node GPU Driver Version Driver version information of GPUs on the node GPU Utilization Details 24-hour usage details of each GPU on the node (includes max, avg, current) GPU Memory Usage Details 24-hour memory usage details of each GPU on the node (includes min, max, avg, current)"},{"location":"en/admin/kpanda/gpu/gpu-metrics.html#pod-level","title":"Pod Level","text":"Category Metric Name Description Application Overview GPU - Compute & Memory Pod GPU Utilization Compute utilization of the GPUs used by the current Pod Pod GPU Memory Utilization Memory utilization of the GPUs used by the current Pod Pod GPU Memory Usage Memory usage of the GPUs used by the current Pod Memory Allocation Memory allocation of the GPUs used by the current Pod Pod GPU Memory Copy Ratio Memory copy ratio of the GPUs used by the current Pod GPU - Engine Overview GPU Graphics Engine Activity Percentage Percentage of time the Graphics or Compute engine is active during a monitoring cycle GPU Memory Bandwidth Utilization Memory bandwidth utilization (Memory BW Utilization) indicates the fraction of cycles during which data is sent to or received from the device memory. This value represents the average over the interval, not an instantaneous value. A higher value indicates higher utilization of device memory.A value of 1 (100%) indicates that a DRAM instruction is executed every cycle during the interval (in practice, a peak of about 0.8 (80%) is the maximum achievable).A value of 0.2 (20%) indicates that 20% of the cycles during the interval are spent reading from or writing to device memory. Tensor Core Utilization Percentage of time the Tensor Core pipeline is active during a monitoring cycle FP16 Engine Utilization Percentage of time the FP16 pipeline is active during a monitoring cycle FP32 Engine Utilization Percentage of time the FP32 pipeline is active during a monitoring cycle FP64 Engine Utilization Percentage of time the FP64 pipeline is active during a monitoring cycle GPU Decode Utilization Decode engine utilization of the GPU GPU Encode Utilization Encode engine utilization of the GPU GPU - Temperature & Power GPU Temperature Temperature of all GPUs in the cluster GPU Power Power consumption of all GPUs in the cluster GPU Total Power Consumption Total power consumption of the GPUs GPU - Clock GPU Memory Clock Memory clock frequency GPU Application SM Clock Application SM clock frequency GPU Application Memory Clock Application memory clock frequency GPU Video Engine Clock Video engine clock frequency GPU Throttle Reasons Reasons for GPU throttling GPU - Other Details PCIe Transfer Rate Data transfer rate of the GPU through the PCIe bus PCIe Receive Rate Data receive rate of the GPU through the PCIe bus"},{"location":"en/admin/kpanda/gpu/ascend/Ascend_usage.html","title":"Use Ascend NPU","text":"

                                                                                  This section explains how to use Ascend NPU on the AI platform platform.

                                                                                  "},{"location":"en/admin/kpanda/gpu/ascend/Ascend_usage.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • The current NPU node has the Ascend driver installed.
                                                                                  • The current NPU node has the Ascend-Docker-Runtime component installed.
                                                                                  • The NPU MindX DL suite is installed on the current cluster.
                                                                                  • No virtualization is performed on the NPU card in the current cluster, and it is not occupied by other applications.

                                                                                  Refer to the Ascend NPU Component Installation Document to install the basic environment.

                                                                                  "},{"location":"en/admin/kpanda/gpu/ascend/Ascend_usage.html#quick-start","title":"Quick Start","text":"

                                                                                  This document uses the AscentCL Image Classification Application example from the Ascend sample library.

                                                                                  1. Download the Ascend repository

                                                                                    Run the following command to download the Ascend demo repository, and remember the storage location of the code for subsequent use.

                                                                                    git clone https://gitee.com/ascend/samples.git\n
                                                                                  2. Prepare the base image

                                                                                    This example uses the Ascent-pytorch base image, which can be obtained from the Ascend Container Registry.

                                                                                  3. Prepare the YAML file

                                                                                    ascend-demo.yaml
                                                                                    apiVersion: batch/v1\nkind: Job\nmetadata:\n  name: resnetinfer1-1-1usoc\nspec:\n  template:\n    spec:\n      containers:\n        - image: ascendhub.huawei.com/public-ascendhub/ascend-pytorch:23.0.RC2-ubuntu18.04 # Inference image name\n          imagePullPolicy: IfNotPresent\n          name: resnet50infer\n          securityContext:\n            runAsUser: 0\n          command:\n            - \"/bin/bash\"\n            - \"-c\"\n            - |\n              source /usr/local/Ascend/ascend-toolkit/set_env.sh &&\n              TEMP_DIR=/root/samples_copy_$(date '+%Y%m%d_%H%M%S_%N') &&\n              cp -r /root/samples \"$TEMP_DIR\" &&\n              cd \"$TEMP_DIR\"/inference/modelInference/sampleResnetQuickStart/python/model &&\n              wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/resnet50/resnet50.onnx &&\n              atc --model=resnet50.onnx --framework=5 --output=resnet50 --input_shape=\"actual_input_1:1,3,224,224\"  --soc_version=Ascend910 &&\n              cd ../data &&\n              wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg &&\n              cd ../scripts &&\n              bash sample_run.sh\n          resources:\n            requests:\n              huawei.com/Ascend910: 1 # Number of the Ascend 910 Processors\n            limits:\n              huawei.com/Ascend910: 1 # The value should be the same as that of requests\n          volumeMounts:\n            - name: hiai-driver\n              mountPath: /usr/local/Ascend/driver\n              readOnly: true\n            - name: slog\n              mountPath: /var/log/npu/conf/slog/slog.conf\n            - name: localtime # The container time must be the same as the host time\n              mountPath: /etc/localtime\n            - name: dmp\n              mountPath: /var/dmp_daemon\n            - name: slogd\n              mountPath: /var/slogd\n            - name: hbasic\n              mountPath: /etc/hdcBasic.cfg\n            - name: sys-version\n              mountPath: /etc/sys_version.conf\n            - name: aicpu\n              mountPath: /usr/lib64/aicpu_kernels\n            - name: tfso\n              mountPath: /usr/lib64/libtensorflow.so\n            - name: sample-path\n              mountPath: /root/samples\n      volumes:\n        - name: hiai-driver\n          hostPath:\n            path: /usr/local/Ascend/driver\n        - name: slog\n          hostPath:\n            path: /var/log/npu/conf/slog/slog.conf\n        - name: localtime\n          hostPath:\n            path: /etc/localtime\n        - name: dmp\n          hostPath:\n            path: /var/dmp_daemon\n        - name: slogd\n          hostPath:\n            path: /var/slogd\n        - name: hbasic\n          hostPath:\n            path: /etc/hdcBasic.cfg\n        - name: sys-version\n          hostPath:\n            path: /etc/sys_version.conf\n        - name: aicpu\n          hostPath:\n            path: /usr/lib64/aicpu_kernels\n        - name: tfso\n          hostPath:\n            path: /usr/lib64/libtensorflow.so\n        - name: sample-path\n          hostPath:\n            path: /root/samples\n      restartPolicy: OnFailure\n

                                                                                    Some fields in the above YAML need to be modified according to the actual situation:

                                                                                    1. atc ... --soc_version=Ascend910 uses Ascend910, adjust this field depending on your actual situation. You can use the npu-smi info command to check the GPU model and add the Ascend prefix.
                                                                                    2. samples-path should be adjusted according to the actual situation.
                                                                                    3. resources should be adjusted according to the actual situation.
                                                                                  4. Deploy a Job and check its results

                                                                                    Use the following command to create a Job:

                                                                                    kubectl apply -f ascend-demo.yaml\n

                                                                                    Check the Pod running status:

                                                                                    After the Pod runs successfully, check the log results. The key prompt information on the screen is shown in the figure below. The Label indicates the category identifier, Conf indicates the maximum confidence of the classification, and Class indicates the belonging category. These values may vary depending on the version and environment, so please refer to the actual situation:

                                                                                    Result image display:

                                                                                  "},{"location":"en/admin/kpanda/gpu/ascend/Ascend_usage.html#ui-usage","title":"UI Usage","text":"
                                                                                  1. Confirm whether the cluster has detected the GPU. Click Clusters -> Cluster Settings -> Addon Plugins , and check whether the proper GPU type is automatically enabled and detected. Currently, the cluster will automatically enable GPU and set the GPU type to Ascend .

                                                                                  2. Deploy the workload. Click Clusters -> Workloads , deploy the workload through an image, select the type (Ascend), and then configure the number of physical cards used by the application:

                                                                                    Number of Physical Cards (huawei.com/Ascend910) : This indicates how many physical cards the current Pod needs to mount. The input value must be an integer and less than or equal to the number of cards on the host.

                                                                                    If there is an issue with the above configuration, it will result in scheduling failure and resource allocation issues.

                                                                                  "},{"location":"en/admin/virtnest/vm/vm-gpu.html","title":"Virtual Machine Configuration GPU (Nvidia)","text":"

                                                                                  This article will introduce the prerequisites for configuring GPU when creating a virtual machine.

                                                                                  The key point of configuring GPU for virtual machines is to configure the GPU Operator so that different software components can be deployed on working nodes depending on the GPU workloads configured on these nodes. Taking the following three nodes as examples:

                                                                                  • The controller-node-1 node is configured to run containers.
                                                                                  • The work-node-1 node is configured to run virtual machines with direct GPUs.
                                                                                  • The work-node-2 node is configured to run virtual machines with virtual vGPUs.
                                                                                  "},{"location":"en/admin/virtnest/vm/vm-gpu.html#assumptions-limitations-and-dependencies","title":"Assumptions, Limitations, and Dependencies","text":"

                                                                                  Working nodes can run GPU-accelerated containers, virtual machines with direct GPUs, or virtual machines with vGPUs, but not a combination of any.

                                                                                  1. Working nodes can run GPU-accelerated containers, virtual machines with direct GPUs, or virtual machines with vGPUs separately, without supporting any combination forms.
                                                                                  2. Cluster administrators or developers need to understand the cluster situation in advance and correctly label the nodes to indicate the type of GPU workload they will run.
                                                                                  3. The working nodes running virtual machines with direct GPUs or vGPUs are assumed to be bare metal. If the working nodes are virtual machines, the GPU direct pass-through feature needs to be enabled on the virtual machine platform. Please consult the virtual machine platform provider.
                                                                                  4. Nvidia MIG vGPU is not supported.
                                                                                  5. The GPU Operator will not automatically install GPU drivers in virtual machines.
                                                                                  "},{"location":"en/admin/virtnest/vm/vm-gpu.html#enable-iommu","title":"Enable IOMMU","text":"

                                                                                  To enable the GPU direct pass-through feature, the cluster nodes need to enable IOMMU. Please refer to How to Enable IOMMU. If your cluster is running on a virtual machine, consult your virtual machine platform provider.

                                                                                  "},{"location":"en/admin/virtnest/vm/vm-gpu.html#build-vgpu-manager-image","title":"Build vGPU Manager Image","text":"

                                                                                  Note: Building a vGPU Manager image is only required when using NVIDIA vGPUs. If you plan to use only GPU direct pass-through, skip this section.

                                                                                  The following are the steps to build the vGPU Manager image and push it to the container registry:

                                                                                  1. Download the vGPU software from the NVIDIA Licensing Portal.

                                                                                    • Log in to the NVIDIA Licensing Portal and go to the Software Downloads page.
                                                                                    • The NVIDIA vGPU software is located in the Driver downloads tab on the Software Downloads page.
                                                                                    • Select VGPU + Linux in the filter criteria and click Download to get the software package for Linux KVM. Unzip the downloaded file (NVIDIA-Linux-x86_64-<version>-vgpu-kvm.run).

                                                                                  2. Clone the container-images/driver repository in the terminal

                                                                                    git clone https://gitlab.com/nvidia/container-images/driver cd driver\n
                                                                                  3. Switch to the vgpu-manager directory for your operating system

                                                                                    cd vgpu-manager/<your-os>\n
                                                                                  4. Copy the .run file extracted in step 1 to the current directory

                                                                                    cp <local-driver-download-directory>/*-vgpu-kvm.run ./\n
                                                                                  5. Set environment variables

                                                                                    • PRIVATE_REGISTRY: Name of the private registry to store the driver image.
                                                                                    • VERSION: Version of NVIDIA vGPU Manager, downloaded from the NVIDIA Software Portal.
                                                                                    • OS_TAG: Must match the operating system version of the cluster node.
                                                                                    • CUDA_VERSION: CUDA base image version used to build the driver image.
                                                                                    export PRIVATE_REGISTRY=my/private/registry VERSION=510.73.06 OS_TAG=ubuntu22.04 CUDA_VERSION=12.2.0\n
                                                                                  6. Build the NVIDIA vGPU Manager Image

                                                                                    docker build \\\n  --build-arg DRIVER_VERSION=${VERSION} \\\n  --build-arg CUDA_VERSION=${CUDA_VERSION} \\\n  -t ${PRIVATE_REGISTRY}/vgpu-manager:${VERSION}-${OS_TAG} .\n
                                                                                  7. Push the NVIDIA vGPU Manager image to your container registry

                                                                                    docker push ${PRIVATE_REGISTRY}/vgpu-manager:${VERSION}-${OS_TAG}\n
                                                                                  "},{"location":"en/admin/virtnest/vm/vm-gpu.html#label-cluster-nodes","title":"Label Cluster Nodes","text":"

                                                                                  Go to Container Management , select your worker cluster and click Nodes. On the right of the list, click \u2507 and select Edit Labels to add labels to the nodes. Each node can only have one label.

                                                                                  You can assign the following values to the labels: container, vm-passthrough, and vm-vgpu.

                                                                                  "},{"location":"en/admin/virtnest/vm/vm-gpu.html#install-nvidia-operator","title":"Install Nvidia Operator","text":"
                                                                                  1. Go to Container Management , select your worker cluster, click Helm Apps -> Helm Charts , choose and install gpu-operator. You need to modify some fields in the yaml.

                                                                                    gpu-operator.sandboxWorkloads.enabled=true\ngpu-operator.vgpuManager.enabled=true\ngpu-operator.vgpuManager.repository=<your-register-url>      # (1)!\ngpu-operator.vgpuManager.image=vgpu-manager\ngpu-operator.vgpuManager.version=<your-vgpu-manager-version> # (2)!\ngpu-operator.vgpuDeviceManager.enabled=true\n
                                                                                    1. Fill in the container registry address refered in the step \"Build vGPU Manager Image\".
                                                                                    2. Fill in the VERSION refered in the step \"Build vGPU Manager Image\".
                                                                                  2. Wait for the installation to be successful, as shown in the image below:

                                                                                  "},{"location":"en/admin/virtnest/vm/vm-gpu.html#install-virtnest-agent-and-configure-cr","title":"Install virtnest-agent and Configure CR","text":"
                                                                                  1. Install virtnest-agent, refer to Install virtnest-agent.

                                                                                  2. Add vGPU and GPU direct pass-through to the Virtnest Kubevirt CR. The following example shows the key yaml after adding vGPU and GPU direct pass-through:

                                                                                    spec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    # Fill in the information below\n    permittedHostDevices:\n      mediatedDevices:            # vGPU\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com /GRID_P4-1Q\n    pciHostDevices:             # GPU direct pass-through\n    - externalResourceProvider:  true\n      pciVendorSelector: 10DE:1BB3\n      resourceName: nvidia.com /GP104GL_TESLA_P4\n
                                                                                  3. In the kubevirt CR yaml, permittedHostDevices is used to import VM devices, and vGPU should be added in mediatedDevices with the following structure:

                                                                                    mediatedDevices:          \n- mdevNameSelector: GRID P4-1Q          # Device Name\n  resourceName: nvidia.com/GRID_P4-1Q   # vGPU information registered by GPU Operator to the node\n
                                                                                  4. GPU direct pass-through should be added in pciHostDevices under permittedHostDevices with the following structure:

                                                                                    pciHostDevices:           \n- externalResourceProvider: true           # Do not change by default\n  pciVendorSelector: 10DE:1BB3              # Vendor id of the current pci device\n  resourceName: nvidia.com/GP104GL_TESLA_P4 # GPU information registered by GPU Operator to the node\n
                                                                                  5. Example of obtaining vGPU information (only applicable to vGPU): View node information on a node marked as nvidia.com/gpu.workload.config=vm-vgpu (e.g., work-node-2), and the nvidia.com/GRID_P4-1Q: 8 in Capacity indicates available vGPUs:

                                                                                    # kubectl describe node work-node-2\nCapacity:\n  cpu:                                 64\n  devices.kubevirt.io/kvm:             1k\n  devices.kubevirt.io/tun:             1k\n  devices.kubevirt.io/vhost-net:       1k\n  ephemeral-storage:                   102626232Ki\n  hugepages-1Gi:                       0\n  hugepages-2Mi:                       0\n  memory:                              264010840Ki\n  nvidia.com/GRID_P4-1Q :              8\n  pods:                                110\nAllocatable:\n  cpu:                                  64\n  devices.kubevirt.io/kvm:              1k\n  devices.kubevirt.io/tun:              1k\n  devices.kubevirt.io/vhost-net:        1k\n  ephemeral-storage:                    94580335255\n  hugepages-1Gi:                        0\n  hugepages-2Mi:                        0\n  memory:                               263908440Ki\n  nvidia.com/GRID_P4-1Q:                8\n  pods:                                 110\n

                                                                                    So the mdevNameSelector should be \"GRID P4-1Q\" and the resourceName should be \"GRID_P4-1Q\".

                                                                                  6. Obtain GPU direct pass-through information: On a node marked as nvidia.com/gpu.workload.config=vm-passthrough (e.g., work-node-1), view the node information, and nvidia.com/GP104GL_TESLA_P4: 2 in Capacity indicates available vGPUs:

                                                                                    # kubectl describe node work-node-1\nCapacity:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              102626232Ki\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         264010840Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\nAllocatable:\n  cpu:                            64\n  devices.kubevirt.io/kvm:        1k\n  devices.kubevirt.io/tun:        1k\n  devices.kubevirt.io/vhost-net:  1k\n  ephemeral-storage:              94580335255\n  hugepages-1Gi:                  0\n  hugepages-2Mi:                  0\n  memory:                         263908440Ki\n  nvidia.com/GP104GL_TESLA_P4:    2\n  pods:                           110\n

                                                                                    So the resourceName should be \"GRID_P4-1Q\". How to obtain the pciVendorSelector? SSH into the target node work-node-1 and use the command \"lspci -nnk -d 10de:\" to get the Nvidia GPU PCI information, as shown in the image above.

                                                                                  7. Editing kubevirt CR note: If there are multiple GPUs of the same model, only one needs to be written in the CR, listing each GPU is not necessary.

                                                                                    # kubectl -n virtnest-system edit kubevirt kubevirt\nspec:\n  configuration:\n    developerConfiguration:\n      featureGates:\n      - GPU\n      - DisableMDEVConfiguration\n    # Fill in the information below\n    permittedHostDevices:\n      mediatedDevices:                    # vGPU\n      - mdevNameSelector: GRID P4-1Q\n        resourceName: nvidia.com/GRID_P4-1Q\n    pciHostDevices:                       # GPU direct pass-through, in the above example, TEESLA P4 has two GPUs, only register one here\n      - externalResourceProvider: true\n        pciVendorSelector: 10DE:1BB3\n        resourceName: nvidia.com/GP104GL_TESLA_P4 \n
                                                                                  "},{"location":"en/admin/virtnest/vm/vm-gpu.html#create-vm-yaml-and-use-gpu-acceleration","title":"Create VM YAML and Use GPU Acceleration","text":"

                                                                                  The only difference from a regular virtual machine is adding GPU-related information in the devices section.

                                                                                  Click to view complete YAML
                                                                                  apiVersion: kubevirt.io/v1\nkind: VirtualMachine\nmetadata:\n  name: testvm-gpu1\n  namespace: default\nspec:\n  dataVolumeTemplates:\n  - metadata:\n      creationTimestamp: null\n      name: systemdisk-testvm-gpu1\n      namespace: default\n    spec:\n      pvc:\n        accessModes:\n        - ReadWriteOnce\n        resources:\n          requests:\n            storage: 10Gi\n        storageClassName: www\n      source:\n        registry:\n          url: docker://release-ci.daocloud.io/virtnest/system-images/debian-12-x86_64:v1\nrunStrategy: Manual\ntemplate:\n    metadata:\n      creationTimestamp: null\n    spec:\n      domain:\n        cpu:\n          cores: 1\n          sockets: 1\n          threads: 1\n        devices:\n          disks:\n          - bootOrder: 1\n            disk:\n              bus: virtio\n            name: systemdisk-testvm-gpu1\n          - disk:\n              bus: virtio\n            name: cloudinitdisk\n        gpus:\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n            name: gpu-0-0\n        - deviceName: nvidia.com/GP104GL_TESLA_P4\n          name: gpu-0-1\n        interfaces:\n        - masquerade: {}\n          name: default\n      machine:\n        type: q35\n      resources:\n        requests:\n          memory: 2Gi\n    networks:\n    - name: default\n      pod: {}\n    volumes:\n    - dataVolume:\n        name: systemdisk-testvm-gpu1\n      name: systemdisk-testvm-gpu1\n    - cloudInitNoCloud:\n        userDataBase64: I2Nsb3VkLWNvbmZpZwpzc2hfcHdhdXRoOiB0cnVlCmRpc2FibGVfcm9vdDogZmFsc2UKY2hwYXNzd2Q6IHsibGlzdCI6ICJyb290OmRhbmdlcm91cyIsIGV4cGlyZTogRmFsc2V9CgoKcnVuY21kOgogIC0gc2VkIC1pICIvI1w/UGVybWl0Um9vdExvZ2luL3MvXi4qJC9QZXJtaXRSb290TG9naW4geWVzL2ciIC9ldGMvc3NoL3NzaGRfY29uZmlnCiAgLSBzeXN0ZW1jdGwgcmVzdGFydCBzc2guc2VydmljZQ==\n        name: cloudinitdisk\n
                                                                                  "},{"location":"en/end-user/index.html","title":"Suanova AI Platform - End User","text":"

                                                                                  This is the user documentation for the Suanova AI Platform aimed at end users.

                                                                                  • User Registration

                                                                                    User registration is the first step to using the AI platform.

                                                                                    • User Registration
                                                                                  • Cloud Host

                                                                                    A cloud host is a virtual machine deployed in the cloud.

                                                                                    • Create Cloud Host
                                                                                    • Use Cloud Host
                                                                                  • Container Management

                                                                                    Container management is the core module of the AI computing center.

                                                                                    • K8s Clusters on Cloud
                                                                                    • Node Management
                                                                                    • Workloads
                                                                                    • Helm Apps and Templates
                                                                                  • AI Lab

                                                                                    Manage datasets and run AI training and inference jobs.

                                                                                    • Create AI Workloads
                                                                                    • Use Notebook
                                                                                    • Create Training Jobs
                                                                                    • Create Inference Services
                                                                                  • Insight

                                                                                    Monitor the status of clusters, nodes, and workloads through dashboards.

                                                                                    • Monitor Clusters/Nodes
                                                                                    • Metrics
                                                                                    • Logs
                                                                                    • Tracing
                                                                                  • Personal Center

                                                                                    Set password, keys, and language in the personal center.

                                                                                    • Security Settings
                                                                                    • Access Keys
                                                                                    • Language Settings
                                                                                  "},{"location":"en/end-user/baize/dataset/create-use-delete.html","title":"Create, Use and Delete Datasets","text":"

                                                                                  AI Lab provides comprehensive dataset management functions needed for model development, training, and inference processes. Currently, it supports unified access to various data sources.

                                                                                  With simple configurations, you can connect data sources to AI Lab, achieving unified data management, preloading, dataset management, and other functionalities.

                                                                                  "},{"location":"en/end-user/baize/dataset/create-use-delete.html#create-a-dataset","title":"Create a Dataset","text":"
                                                                                  1. In the left navigation bar, click Data Management -> Dataset List, and then click the Create button on the right.

                                                                                  2. Select the worker cluster and namespace to which the dataset belongs, then click Next.

                                                                                  3. Configure the data source type for the target data, then click OK.

                                                                                    Currently supported data sources include:

                                                                                    • GIT: Supports repositories such as GitHub, GitLab, and Gitee
                                                                                    • S3: Supports object storage like Amazon Cloud
                                                                                    • HTTP: Directly input a valid HTTP URL
                                                                                    • PVC: Supports pre-created Kubernetes PersistentVolumeClaim
                                                                                    • NFS: Supports NFS shared storage
                                                                                  4. Upon successful creation, the dataset will be returned to the dataset list. You can perform more actions by clicking \u2507 on the right.

                                                                                  Info

                                                                                  The system will automatically perform a one-time data preloading after the dataset is successfully created; the dataset cannot be used until the preloading is complete.

                                                                                  "},{"location":"en/end-user/baize/dataset/create-use-delete.html#use-a-dataset","title":"Use a Dataset","text":"

                                                                                  Once the dataset is successfully created, it can be used in tasks such as model training and inference.

                                                                                  "},{"location":"en/end-user/baize/dataset/create-use-delete.html#use-in-notebook","title":"Use in Notebook","text":"

                                                                                  In creating a Notebook, you can directly use the dataset; the usage is as follows:

                                                                                  • Use the dataset as training data mount
                                                                                  • Use the dataset as code mount

                                                                                  "},{"location":"en/end-user/baize/dataset/create-use-delete.html#use-in-training-obs","title":"Use in Training obs","text":"
                                                                                  • Use the dataset to specify job output
                                                                                  • Use the dataset to specify job input
                                                                                  • Use the dataset to specify TensorBoard output
                                                                                  "},{"location":"en/end-user/baize/dataset/create-use-delete.html#use-in-inference-services","title":"Use in Inference Services","text":"
                                                                                  • Use the dataset to mount a model
                                                                                  "},{"location":"en/end-user/baize/dataset/create-use-delete.html#delete-a-dataset","title":"Delete a Dataset","text":"

                                                                                  If you find a dataset to be redundant, expired, or no longer needed, you can delete it from the dataset list.

                                                                                  1. Click the \u2507 on the right side of the dataset list, then choose Delete from the dropdown menu.

                                                                                  2. In the pop-up window, confirm the dataset you want to delete, enter the dataset name, and then click Delete.

                                                                                  3. A confirmation message will appear indicating successful deletion, and the dataset will disappear from the list.

                                                                                  Caution

                                                                                  Once a dataset is deleted, it cannot be recovered, so please proceed with caution.

                                                                                  "},{"location":"en/end-user/baize/dataset/environments.html","title":"Manage Python Environment Dependencies","text":"

                                                                                  This document aims to guide users on managing environment dependencies using AI platform. Below are the specific steps and considerations.

                                                                                  1. Overview of Environment Management
                                                                                  2. Create New Environment
                                                                                  3. Configure Environment
                                                                                  4. Troubleshooting
                                                                                  "},{"location":"en/end-user/baize/dataset/environments.html#overview","title":"Overview","text":"

                                                                                  Traditionally, Python environment dependencies are built into an image, which includes the Python version and dependency packages. This approach has high maintenance costs and is inconvenient to update, often requiring a complete rebuild of the image.

                                                                                  In AI Lab, users can manage pure environment dependencies through the Environment Management module, decoupling this part from the image. The advantages include:

                                                                                  • One environment can be used in multiple places, such as in Notebooks, distributed training tasks, and even inference services.
                                                                                  • Updating dependency packages is more convenient; you only need to update the environment dependencies without rebuilding the image.

                                                                                  The main components of the environment management are:

                                                                                  • Cluster : Select the cluster to operate on.
                                                                                  • Namespace : Select the namespace to limit the scope of operations.
                                                                                  • Environment List : Displays all environments and their statuses under the current cluster and namespace.

                                                                                  "},{"location":"en/end-user/baize/dataset/environments.html#explanation-of-environment-list-fields","title":"Explanation of Environment List Fields","text":"
                                                                                  • Name : The name of the environment.
                                                                                  • Status : The current status of the environment (normal or failed). New environments undergo a warming-up process, after which they can be used in other tasks.
                                                                                  • Creation Time : The time the environment was created.
                                                                                  "},{"location":"en/end-user/baize/dataset/environments.html#creat-new-environment","title":"Creat New Environment","text":"

                                                                                  On the Environment Management interface, click the Create button at the top right to enter the environment creation process.

                                                                                  Fill in the following basic information:

                                                                                  • Name : Enter the environment name, with a length of 2-63 characters, starting and ending with lowercase letters or numbers.
                                                                                  • Deployment Location:
                                                                                    • Cluster : Select the cluster to deploy, such as gpu-cluster.
                                                                                    • Namespace : Select the namespace, such as default.
                                                                                  • Remarks (optional): Enter remarks.
                                                                                  • Labels (optional): Add labels to the environment.
                                                                                  • Annotations (optional): Add annotations to the environment. After completing the information, click Next to proceed to environment configuration.
                                                                                  "},{"location":"en/end-user/baize/dataset/environments.html#configure-environment","title":"Configure Environment","text":"

                                                                                  In the environment configuration step, users need to configure the Python version and dependency management tool.

                                                                                  "},{"location":"en/end-user/baize/dataset/environments.html#environment-settings","title":"Environment Settings","text":"
                                                                                  • Python Version : Select the required Python version, such as 3.12.3.
                                                                                  • Package Manager : Choose the package management tool, either PIP or CONDA.
                                                                                  • Environment Data :
                                                                                    • If PIP is selected: Enter the dependency package list in requirements.txt format in the editor below.
                                                                                    • If CONDA is selected: Enter the dependency package list in environment.yaml format in the editor below.
                                                                                  • Other Options (optional):
                                                                                    • Additional pip Index URLs : Configure additional pip index URLs; suitable for internal enterprise private repositories or PIP acceleration sites.
                                                                                    • GPU Configuration : Enable or disable GPU configuration; some GPU-related dependency packages need GPU resources configured during preloading.
                                                                                    • Associated Storage : Select the associated storage configuration; environment dependency packages will be stored in the associated storage. Note: Storage must support ReadWriteMany.

                                                                                  After configuration, click the Create button, and the system will automatically create and configure the new Python environment.

                                                                                  "},{"location":"en/end-user/baize/dataset/environments.html#troubleshooting","title":"Troubleshooting","text":"
                                                                                  • If environment creation fails:

                                                                                    • Check if the network connection is normal.
                                                                                    • Verify that the Python version and package manager configuration are correct.
                                                                                    • Ensure the selected cluster and namespace are available.
                                                                                  • If dependency preloading fails:

                                                                                    • Check if the requirements.txt or environment.yaml file format is correct.
                                                                                    • Verify that the dependency package names and versions are correct. If other issues arise, contact the platform administrator or refer to the platform help documentation for more support.

                                                                                  These are the basic steps and considerations for managing Python dependencies in AI Lab.

                                                                                  "},{"location":"en/end-user/baize/inference/models.html","title":"Model Support","text":"

                                                                                  With the rapid iteration of AI Lab, we have now supported various model inference services. Here, you can see information about the supported models.

                                                                                  • AI Lab v0.3.0 launched model inference services, facilitating users to directly use the inference services of AI Lab without worrying about model deployment and maintenance for traditional deep learning models.
                                                                                  • AI Lab v0.6.0 supports the complete version of vLLM inference capabilities, supporting many large language models such as LLama, Qwen, ChatGLM, and more.

                                                                                  Note

                                                                                  The support for inference capabilities is related to the version of AI Lab.

                                                                                  You can use GPU types that have been verified by AI platform in AI Lab. For more details, refer to the GPU Support Matrix.

                                                                                  "},{"location":"en/end-user/baize/inference/models.html#triton-inference-server","title":"Triton Inference Server","text":"

                                                                                  Through the Triton Inference Server, traditional deep learning models can be well supported. Currently, AI Lab supports mainstream inference backend services:

                                                                                  Backend Supported Model Formats Description pytorch TorchScript, PyTorch 2.0 formats triton-inference-server/pytorch_backend tensorflow TensorFlow 2.x triton-inference-server/tensorflow_backend vLLM (Deprecated) TensorFlow 2.x triton-inference-server/tensorflow_backend

                                                                                  Danger

                                                                                  The use of Triton's Backend vLLM method has been deprecated. It is recommended to use the latest support for vLLM to deploy your large language models.

                                                                                  "},{"location":"en/end-user/baize/inference/models.html#vllm","title":"vLLM","text":"

                                                                                  With vLLM, we can quickly use large language models. Here, you can see the list of models we support, which generally aligns with the vLLM Support Models.

                                                                                  • HuggingFace Models: We support most of HuggingFace's models. You can see more models at the HuggingFace Model Hub.
                                                                                  • The vLLM Supported Models list includes supported large language models and vision-language models.
                                                                                  • Models fine-tuned using the vLLM support framework.
                                                                                  "},{"location":"en/end-user/baize/inference/models.html#new-features-of-vllm","title":"New Features of vLLM","text":"

                                                                                  Currently, AI Lab also supports some new features when using vLLM as an inference tool:

                                                                                  • Enable Lora Adapter to optimize model inference services during inference.
                                                                                  • Provide a compatible OpenAPI interface with OpenAI, making it easy for users to switch to local inference services at a low cost and quickly transition.
                                                                                  "},{"location":"en/end-user/baize/inference/triton-inference.html","title":"Create Inference Service Using Triton Framework","text":"

                                                                                  The AI Lab currently offers Triton and vLLM as inference frameworks. Users can quickly start a high-performance inference service with simple configurations.

                                                                                  Danger

                                                                                  The use of Triton's Backend vLLM method has been deprecated. It is recommended to use the latest support for vLLM to deploy your large language models.

                                                                                  "},{"location":"en/end-user/baize/inference/triton-inference.html#introduction-to-triton","title":"Introduction to Triton","text":"

                                                                                  Triton is an open-source inference server developed by NVIDIA, designed to simplify the deployment and inference of machine learning models. It supports a variety of deep learning frameworks, including TensorFlow and PyTorch, enabling users to easily manage and deploy different types of models.

                                                                                  "},{"location":"en/end-user/baize/inference/triton-inference.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Prepare model data: Manage the model code in dataset management and ensure that the data is successfully preloaded. The following example illustrates the PyTorch model for mnist handwritten digit recognition.

                                                                                  Note

                                                                                  The model to be inferred must adhere to the following directory structure within the dataset:

                                                                                    <model-repository-name>\n  \u2514\u2500\u2500 <model-name>\n     \u2514\u2500\u2500 <version>\n        \u2514\u2500\u2500 <model-definition-file>\n

                                                                                  The directory structure in this example is as follows:

                                                                                      model-repo\n    \u2514\u2500\u2500 mnist-cnn\n        \u2514\u2500\u2500 1\n            \u2514\u2500\u2500 model.pt\n
                                                                                  "},{"location":"en/end-user/baize/inference/triton-inference.html#create-inference-service","title":"Create Inference Service","text":"

                                                                                  Currently, form-based creation is supported, allowing you to create services with field prompts in the interface.

                                                                                  "},{"location":"en/end-user/baize/inference/triton-inference.html#configure-model-path","title":"Configure Model Path","text":"

                                                                                  The model path model-repo/mnist-cnn/1/model.pt must be consistent with the directory structure of the dataset.

                                                                                  "},{"location":"en/end-user/baize/inference/triton-inference.html#model-configuration","title":"Model Configuration","text":""},{"location":"en/end-user/baize/inference/triton-inference.html#configure-input-and-output-parameters","title":"Configure Input and Output Parameters","text":"

                                                                                  Note

                                                                                  The first dimension of the input and output parameters defaults to batchsize, setting it to -1 allows for the automatic calculation of the batchsize based on the input inference data. The remaining dimensions and data type must match the model's input.

                                                                                  "},{"location":"en/end-user/baize/inference/triton-inference.html#configure-environment","title":"Configure Environment","text":"

                                                                                  You can import the environment created in Manage Python Environment Dependencies to serve as the runtime environment for inference.

                                                                                  "},{"location":"en/end-user/baize/inference/triton-inference.html#advanced-settings","title":"Advanced Settings","text":""},{"location":"en/end-user/baize/inference/triton-inference.html#configure-authentication-policy","title":"Configure Authentication Policy","text":"

                                                                                  Supports API key-based request authentication. Users can customize and add authentication parameters.

                                                                                  "},{"location":"en/end-user/baize/inference/triton-inference.html#affinity-scheduling","title":"Affinity Scheduling","text":"

                                                                                  Supports automated affinity scheduling based on GPU resources and other node configurations. It also allows users to customize scheduling policies.

                                                                                  "},{"location":"en/end-user/baize/inference/triton-inference.html#access","title":"Access","text":""},{"location":"en/end-user/baize/inference/triton-inference.html#api-access","title":"API Access","text":"
                                                                                  • Triton provides a REST-based API, allowing clients to perform model inference via HTTP POST requests.
                                                                                  • Clients can send requests with JSON-formatted bodies containing input data and related metadata.
                                                                                  "},{"location":"en/end-user/baize/inference/triton-inference.html#http-access","title":"HTTP Access","text":"
                                                                                  1. Send HTTP POST Request: Use tools like curl or HTTP client libraries (e.g., Python's requests library) to send POST requests to the Triton Server.

                                                                                  2. Set HTTP Headers: Configuration generated automatically based on user settings, include metadata about the model inputs and outputs in the HTTP headers.

                                                                                  3. Construct Request Body: The request body usually contains the input data for inference and model-specific metadata.

                                                                                  "},{"location":"en/end-user/baize/inference/triton-inference.html#example-curl-command","title":"Example curl Command","text":"
                                                                                    curl -X POST \"http://<ip>:<port>/v2/models/<inference-name>/infer\" \\\n  -H \"Content-Type: application/json\" \\\n  -d '{\n    \"inputs\": [\n      {\n        \"name\": \"model_input\",            \n        \"shape\": [1, 1, 32, 32],          \n        \"datatype\": \"FP32\",               \n        \"data\": [\n          [0.1234, 0.5678, 0.9101, ... ]  \n        ]\n      }\n    ]\n  }'\n
                                                                                  • <ip> is the host address where the Triton Inference Server is running.
                                                                                  • <port> is the port where the Triton Inference Server is running.
                                                                                  • <inference-name> is the name of the inference service that has been created.
                                                                                  • \"name\" must match the name of the input parameter in the model configuration.
                                                                                  • \"shape\" must match the dims of the input parameter in the model configuration.
                                                                                  • \"datatype\" must match the Data Type of the input parameter in the model configuration.
                                                                                  • \"data\" should be replaced with the actual inference data.

                                                                                  Please note that the above example code needs to be adjusted according to your specific model and environment. The format and content of the input data must also comply with the model's requirements.

                                                                                  "},{"location":"en/end-user/baize/inference/vllm-inference.html","title":"Create Inference Service Using vLLM Framework","text":"

                                                                                  AI Lab supports using vLLM as an inference service, offering all the capabilities of vLLM while fully adapting to the OpenAI interface definition.

                                                                                  "},{"location":"en/end-user/baize/inference/vllm-inference.html#introduction-to-vllm","title":"Introduction to vLLM","text":"

                                                                                  vLLM is a fast and easy-to-use library for inference and services. It aims to significantly improve the throughput and memory efficiency of language model services in real-time scenarios. vLLM boasts several features in terms of speed and flexibility:

                                                                                  • Continuous batching of incoming requests.
                                                                                  • Efficiently manages attention keys and values memory using PagedAttention.
                                                                                  • Seamless integration with popular HuggingFace models.
                                                                                  • Compatible with OpenAI's API server.
                                                                                  "},{"location":"en/end-user/baize/inference/vllm-inference.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Prepare model data: Manage the model code in dataset management and ensure that the data is successfully preloaded.

                                                                                  "},{"location":"en/end-user/baize/inference/vllm-inference.html#create-inference-service","title":"Create Inference Service","text":"
                                                                                  1. Select the vLLM inference framework. In the model module selection, choose the pre-created model dataset hdd-models and fill in the path information where the model is located within the dataset.

                                                                                    This guide uses the ChatGLM3 model for creating the inference service.

                                                                                  2. Configure the resources for the inference service and adjust the parameters for running the inference service.

                                                                                    Parameter Name Description GPU Resources Configure GPU resources for inference based on the model scale and cluster resources. Allow Remote Code Controls whether vLLM trusts and executes code from remote sources. LoRA LoRA is a parameter-efficient fine-tuning technique for deep learning models. It reduces the number of parameters and computational complexity by decomposing the original model parameter matrix into low-rank matrices. 1. --lora-modules: Specifies specific modules or layers for low-rank approximation. 2. max_loras_rank: Specifies the maximum rank for each adapter layer in the LoRA model. For simpler tasks, a smaller rank value can be chosen, while more complex tasks may require a larger rank value to ensure model performance. 3. max_loras: Indicates the maximum number of LoRA layers that can be included in the model, customized based on model size and inference complexity. 4. max_cpu_loras: Specifies the maximum number of LoRA layers that can be handled in a CPU environment. Associated Environment Selects predefined environment dependencies required for inference.

                                                                                    Info

                                                                                    For models that support LoRA parameters, refer to vLLM Supported Models.

                                                                                  3. In the Advanced Configuration , support is provided for automated affinity scheduling based on GPU resources and other node configurations. Users can also customize scheduling policies.

                                                                                  "},{"location":"en/end-user/baize/inference/vllm-inference.html#verify-inference-service","title":"Verify Inference Service","text":"

                                                                                  Once the inference service is created, click the name of the inference service to enter the details and view the API call methods. Verify the execution results using Curl, Python, and Node.js.

                                                                                  Copy the curl command from the details and execute it in the terminal to send a model inference request. The expected output should be:

                                                                                  "},{"location":"en/end-user/baize/jobs/create.html","title":"Create Job","text":"

                                                                                  Job management refers to the functionality of creating and managing job lifecycles through job scheduling and control components.

                                                                                  AI platform Smart Computing Capability adopts Kubernetes' Job mechanism to schedule various AI inference and training jobs.

                                                                                  1. Click Job Center -> Jobs in the left navigation bar to enter the job list. Click the Create button on the right.

                                                                                  2. The system will pre-fill basic configuration data, including the cluster, namespace, type, queue, and priority. Adjust these parameters and click Next.

                                                                                  3. Configure the URL, runtime parameters, and associated datasets, then click Next.

                                                                                  4. Optionally add labels, annotations, runtime env variables, and other job parameters. Select a scheduling policy and click Confirm.

                                                                                  5. After the job is successfully created, it will have several running statuses:

                                                                                    • Running
                                                                                    • Queued
                                                                                    • Submission successful, Submission failed
                                                                                    • Successful, Failed
                                                                                  "},{"location":"en/end-user/baize/jobs/create.html#next-steps","title":"Next Steps","text":"
                                                                                  • View Job Load
                                                                                  • Delete Job
                                                                                  "},{"location":"en/end-user/baize/jobs/delete.html","title":"Delete Job","text":"

                                                                                  If you find a job to be redundant, expired, or no longer needed for any other reason, you can delete it from the job list.

                                                                                  1. Click the \u2507 on the right side of the job in the job list, then choose Delete from the dropdown menu.

                                                                                  2. In the pop-up window, confirm the job you want to delete, enter the job name, and then click Delete.

                                                                                  3. A confirmation message will appear indicating successful deletion, and the job will disappear from the list.

                                                                                  Caution

                                                                                  Once a job is deleted, it cannot be recovered, so please proceed with caution.

                                                                                  "},{"location":"en/end-user/baize/jobs/pytorch.html","title":"Pytorch Jobs","text":"

                                                                                  Pytorch is an open-source deep learning framework that provides a flexible environment for training and deployment. A Pytorch job is a job that uses the Pytorch framework.

                                                                                  In the AI Lab platform, we provide support and adaptation for Pytorch jobs. Through a graphical interface, you can quickly create Pytorch jobs and perform model training.

                                                                                  "},{"location":"en/end-user/baize/jobs/pytorch.html#job-configuration","title":"Job Configuration","text":"
                                                                                  • Job types support both Pytorch Single and Pytorch Distributed modes.
                                                                                  • The runtime image already supports the Pytorch framework by default, so no additional installation is required.
                                                                                  "},{"location":"en/end-user/baize/jobs/pytorch.html#job-runtime-environment","title":"Job Runtime Environment","text":"

                                                                                  Here we use the baize-notebook base image and the associated environment as the basic runtime environment for the job.

                                                                                  To learn how to create an environment, refer to Environments.

                                                                                  "},{"location":"en/end-user/baize/jobs/pytorch.html#create-jobs","title":"Create Jobs","text":""},{"location":"en/end-user/baize/jobs/pytorch.html#pytorch-single-jobs","title":"Pytorch Single Jobs","text":"
                                                                                  1. Log in to the AI Lab platform, click Job Center in the left navigation bar to enter the Jobs page.
                                                                                  2. Click the Create button in the upper right corner to enter the job creation page.
                                                                                  3. Select the job type as Pytorch Single and click Next .
                                                                                  4. Fill in the job name and description, then click OK .
                                                                                  "},{"location":"en/end-user/baize/jobs/pytorch.html#parameters","title":"Parameters","text":"
                                                                                  • Start command: bash
                                                                                  • Command parameters:
                                                                                  import torch\nimport torch.nn as nn\nimport torch.optim as optim\n\n# Define a simple neural network\nclass SimpleNet(nn.Module):\n    def __init__(self):\n        super(SimpleNet, self).__init__()\n        self.fc = nn.Linear(10, 1)\n\n    def forward(self, x):\n        return self.fc(x)\n\n# Create model, loss function, and optimizer\nmodel = SimpleNet()\ncriterion = nn.MSELoss()\noptimizer = optim.SGD(model.parameters(), lr=0.01)\n\n# Generate some random data\nx = torch.randn(100, 10)\ny = torch.randn(100, 1)\n\n# Train the model\nfor epoch in range(100):\n    # Forward pass\n    outputs = model(x)\n    loss = criterion(outputs, y)\n\n    # Backward pass and optimization\n    optimizer.zero_grad()\n    loss.backward()\n    optimizer.step()\n\n    if (epoch + 1) % 10 == 0:\n        print(f'Epoch [{epoch+1}/100], Loss: {loss.item():.4f}')\n\nprint('Training finished.')\n
                                                                                  "},{"location":"en/end-user/baize/jobs/pytorch.html#results","title":"Results","text":"

                                                                                  Once the job is successfully submitted, we can enter the job details to see the resource usage. From the upper right corner, go to Workload Details to view the log output during the training process.

                                                                                  [HAMI-core Warn(1:140244541377408:utils.c:183)]: get default cuda from (null)\n[HAMI-core Msg(1:140244541377408:libvgpu.c:855)]: Initialized\nEpoch [10/100], Loss: 1.1248\nEpoch [20/100], Loss: 1.0486\nEpoch [30/100], Loss: 0.9969\nEpoch [40/100], Loss: 0.9611\nEpoch [50/100], Loss: 0.9360\nEpoch [60/100], Loss: 0.9182\nEpoch [70/100], Loss: 0.9053\nEpoch [80/100], Loss: 0.8960\nEpoch [90/100], Loss: 0.8891\nEpoch [100/100], Loss: 0.8841\nTraining finished.\n[HAMI-core Msg(1:140244541377408:multiprocess_memory_limit.c:468)]: Calling exit handler 1\n
                                                                                  "},{"location":"en/end-user/baize/jobs/pytorch.html#pytorch-distributed-jobs","title":"Pytorch Distributed Jobs","text":"
                                                                                  1. Log in to the AI Lab platform, click Job Center in the left navigation bar to enter the Jobs page.
                                                                                  2. Click the Create button in the upper right corner to enter the job creation page.
                                                                                  3. Select the job type as Pytorch Distributed and click Next.
                                                                                  4. Fill in the job name and description, then click OK.
                                                                                  "},{"location":"en/end-user/baize/jobs/pytorch.html#parameters_1","title":"Parameters","text":"
                                                                                  • Start command: bash
                                                                                  • Command parameters:
                                                                                  import os\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nclass SimpleModel(nn.Module):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = nn.Linear(10, 1)\n\n    def forward(self, x):\n        return self.fc(x)\n\ndef train():\n    # Print environment information\n    print(f'PyTorch version: {torch.__version__}')\n    print(f'CUDA available: {torch.cuda.is_available()}')\n    if torch.cuda.is_available():\n        print(f'CUDA version: {torch.version.cuda}')\n        print(f'CUDA device count: {torch.cuda.device_count()}')\n\n    rank = int(os.environ.get('RANK', '0'))\n    world_size = int(os.environ.get('WORLD_SIZE', '1'))\n\n    print(f'Rank: {rank}, World Size: {world_size}')\n\n    # Initialize distributed environment\n    try:\n        if world_size > 1:\n            dist.init_process_group('nccl')\n            print('Distributed process group initialized successfully')\n        else:\n            print('Running in non-distributed mode')\n    except Exception as e:\n        print(f'Error initializing process group: {e}')\n        return\n\n    # Set device\n    try:\n        if torch.cuda.is_available():\n            device = torch.device(f'cuda:{rank % torch.cuda.device_count()}')\n            print(f'Using CUDA device: {device}')\n        else:\n            device = torch.device('cpu')\n            print('CUDA not available, using CPU')\n    except Exception as e:\n        print(f'Error setting device: {e}')\n        device = torch.device('cpu')\n        print('Falling back to CPU')\n\n    try:\n        model = SimpleModel().to(device)\n        print('Model moved to device successfully')\n    except Exception as e:\n        print(f'Error moving model to device: {e}')\n        return\n\n    try:\n        if world_size > 1:\n            ddp_model = DDP(model, device_ids=[rank % torch.cuda.device_count()] if torch.cuda.is_available() else None)\n            print('DDP model created successfully')\n        else:\n            ddp_model = model\n            print('Using non-distributed model')\n    except Exception as e:\n        print(f'Error creating DDP model: {e}')\n        return\n\n    loss_fn = nn.MSELoss()\n    optimizer = optim.SGD(ddp_model.parameters(), lr=0.001)\n\n    # Generate some random data\n    try:\n        data = torch.randn(100, 10, device=device)\n        labels = torch.randn(100, 1, device=device)\n        print('Data generated and moved to device successfully')\n    except Exception as e:\n        print(f'Error generating or moving data to device: {e}')\n        return\n\n    for epoch in range(10):\n        try:\n            ddp_model.train()\n            outputs = ddp_model(data)\n            loss = loss_fn(outputs, labels)\n            optimizer.zero_grad()\n            loss.backward()\n            optimizer.step()\n\n            if rank == 0:\n                print(f'Epoch {epoch}, Loss: {loss.item():.4f}')\n        except Exception as e:\n            print(f'Error during training epoch {epoch}: {e}')\n            break\n\n    if world_size > 1:\n        dist.destroy_process_group()\n\nif __name__ == '__main__':\n    train()\n
                                                                                  "},{"location":"en/end-user/baize/jobs/pytorch.html#number-of-job-replicas","title":"Number of Job Replicas","text":"

                                                                                  Note that Pytorch Distributed training jobs will create a group of Master and Worker training Pods, where the Master is responsible for coordinating the training job, and the Worker is responsible for the actual training work.

                                                                                  Note

                                                                                  In this demonstration: Master replica count is 1, Worker replica count is 2; Therefore, we need to set the replica count to 3 in the Job Configuration , which is the sum of Master and Worker replica counts. Pytorch will automatically tune the roles of Master and Worker.

                                                                                  "},{"location":"en/end-user/baize/jobs/pytorch.html#results_1","title":"Results","text":"

                                                                                  Similarly, we can enter the job details to view the resource usage and the log output of each Pod.

                                                                                  "},{"location":"en/end-user/baize/jobs/tensorboard.html","title":"Job Analysis","text":"

                                                                                  AI Lab provides important visualization analysis tools provided for the model development process, used to display the training process and results of machine learning models. This document will introduce the basic concepts of Job Analysis (Tensorboard), its usage in the AI Lab system, and how to configure the log content of datasets.

                                                                                  Note

                                                                                  Tensorboard is a visualization tool provided by TensorFlow, used to display the training process and results of machine learning models. It can help developers more intuitively understand the training dynamics of their models, analyze model performance, debug issues, and more.

                                                                                  The role and advantages of Tensorboard in the model development process:

                                                                                  • Visualize Training Process : Display metrics such as training and validation loss, and accuracy through charts, helping developers intuitively observe the training effects of the model.
                                                                                  • Debug and Optimize Models : By viewing the weights and gradient distributions of different layers, help developers discover and fix issues in the model.
                                                                                  • Compare Different Experiments : Simultaneously display the results of multiple experiments, making it convenient for developers to compare the effects of different models and hyperparameter configurations.
                                                                                  • Track Training Data : Record the datasets and parameters used during training to ensure the reproducibility of experiments.
                                                                                  "},{"location":"en/end-user/baize/jobs/tensorboard.html#how-to-create-tensorboard","title":"How to Create Tensorboard","text":"

                                                                                  In the AI Lab system, we provide a convenient way to create and manage Tensorboard. Here are the specific steps:

                                                                                  "},{"location":"en/end-user/baize/jobs/tensorboard.html#enable-tensorboard-when-creating-a-notebook","title":"Enable Tensorboard When Creating a Notebook","text":"
                                                                                  1. Create a Notebook : Create a new Notebook on the AI Lab platform.
                                                                                  2. Enable Tensorboard : On the Notebook creation page, enable the Tensorboard option and specify the dataset and log path.

                                                                                  "},{"location":"en/end-user/baize/jobs/tensorboard.html#enable-tensorboard-after-creating-and-completing-a-distributed-job","title":"Enable Tensorboard After Creating and Completing a Distributed Job","text":"
                                                                                  1. Create a Distributed Job : Create a new distributed training job on the AI Lab platform.
                                                                                  2. Configure Tensorboard : On the job configuration page, enable the Tensorboard option and specify the dataset and log path.
                                                                                  3. View Tensorboard After Job Completion : After the job is completed, you can view the Tensorboard link on the job details page. Click the link to see the visualized results of the training process.

                                                                                  "},{"location":"en/end-user/baize/jobs/tensorboard.html#directly-reference-tensorboard-in-a-notebook","title":"Directly Reference Tensorboard in a Notebook","text":"

                                                                                  In a Notebook, you can directly start Tensorboard through code. Here is a sample code snippet:

                                                                                  # Import necessary libraries\nimport tensorflow as tf\nimport datetime\n\n# Define log directory\nlog_dir = \"logs/fit/\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n# Create Tensorboard callback\ntensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n\n# Build and compile model\nmodel = tf.keras.models.Sequential([\n    tf.keras.layers.Flatten(input_shape=(28, 28)),\n    tf.keras.layers.Dense(512, activation='relu'),\n    tf.keras.layers.Dropout(0.2),\n    tf.keras.layers.Dense(10, activation='softmax')\n])\n\nmodel.compile(optimizer='adam',\n              loss='sparse_categorical_crossentropy',\n              metrics=['accuracy'])\n\n# Train model and enable Tensorboard callback\nmodel.fit(x_train, y_train, epochs=5, validation_data=(x_test, y_test), callbacks=[tensorboard_callback])\n
                                                                                  "},{"location":"en/end-user/baize/jobs/tensorboard.html#how-to-configure-dataset-log-content","title":"How to Configure Dataset Log Content","text":"

                                                                                  When using Tensorboard, you can record and configure different datasets and log content. Here are some common configuration methods:

                                                                                  "},{"location":"en/end-user/baize/jobs/tensorboard.html#configure-training-and-validation-dataset-logs","title":"Configure Training and Validation Dataset Logs","text":"

                                                                                  While training the model, you can use TensorFlow's tf.summary API to record logs for the training and validation datasets. Here is a sample code snippet:

                                                                                  # Import necessary libraries\nimport tensorflow as tf\n\n# Create log directories\ntrain_log_dir = 'logs/gradient_tape/train'\nval_log_dir = 'logs/gradient_tape/val'\ntrain_summary_writer = tf.summary.create_file_writer(train_log_dir)\nval_summary_writer = tf.summary.create_file_writer(val_log_dir)\n\n# Train model and record logs\nfor epoch in range(EPOCHS):\n    for (x_train, y_train) in train_dataset:\n        # Training step\n        train_step(x_train, y_train)\n        with train_summary_writer.as_default():\n            tf.summary.scalar('loss', train_loss.result(), step=epoch)\n            tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)\n\n    for (x_val, y_val) in val_dataset:\n        # Validation step\n        val_step(x_val, y_val)\n        with val_summary_writer.as_default():\n            tf.summary.scalar('loss', val_loss.result(), step=epoch)\n            tf.summary.scalar('accuracy', val_accuracy.result(), step=epoch)\n
                                                                                  "},{"location":"en/end-user/baize/jobs/tensorboard.html#configure-custom-logs","title":"Configure Custom Logs","text":"

                                                                                  In addition to logs for training and validation datasets, you can also record other custom log content such as learning rate and gradient distribution. Here is a sample code snippet:

                                                                                  # Record custom logs\nwith train_summary_writer.as_default():\n    tf.summary.scalar('learning_rate', learning_rate, step=epoch)\n    tf.summary.histogram('gradients', gradients, step=epoch)\n
                                                                                  "},{"location":"en/end-user/baize/jobs/tensorboard.html#tensorboard-management","title":"Tensorboard Management","text":"

                                                                                  In AI Lab, Tensorboards created through various methods are uniformly displayed on the job analysis page, making it convenient for users to view and manage.

                                                                                  Users can view information such as the link, status, and creation time of Tensorboard on the job analysis page and directly access the visualized results of Tensorboard through the link.

                                                                                  "},{"location":"en/end-user/baize/jobs/tensorflow.html","title":"Tensorflow Jobs","text":"

                                                                                  Tensorflow, along with Pytorch, is a highly active open-source deep learning framework that provides a flexible environment for training and deployment.

                                                                                  AI Lab provides support and adaptation for the Tensorflow framework. You can quickly create Tensorflow jobs and conduct model training through graphical operations.

                                                                                  "},{"location":"en/end-user/baize/jobs/tensorflow.html#job-configuration","title":"Job Configuration","text":"
                                                                                  • The job types support both Tensorflow Single and Tensorflow Distributed modes.
                                                                                  • The runtime image already supports the Tensorflow framework by default, so no additional installation is required.
                                                                                  "},{"location":"en/end-user/baize/jobs/tensorflow.html#job-runtime-environment","title":"Job Runtime Environment","text":"

                                                                                  Here, we use the baize-notebook base image and the associated environment as the basic runtime environment for jobs.

                                                                                  For information on how to create an environment, refer to Environment List.

                                                                                  "},{"location":"en/end-user/baize/jobs/tensorflow.html#creating-a-job","title":"Creating a Job","text":""},{"location":"en/end-user/baize/jobs/tensorflow.html#example-tfjob-single","title":"Example TFJob Single","text":"
                                                                                  1. Log in to the AI Lab platform and click Job Center in the left navigation bar to enter the Jobs page.
                                                                                  2. Click the Create button in the upper right corner to enter the job creation page.
                                                                                  3. Select the job type as Tensorflow Single and click Next .
                                                                                  4. Fill in the job name and description, then click OK .
                                                                                  "},{"location":"en/end-user/baize/jobs/tensorflow.html#pre-warming-the-code-repository","title":"Pre-warming the Code Repository","text":"

                                                                                  Use AI Lab -> Dataset List to create a dataset and pull the code from a remote GitHub repository into the dataset. This way, when creating a job, you can directly select the dataset and mount the code into the job.

                                                                                  Demo code repository address: https://github.com/d-run/training-sample-code/

                                                                                  "},{"location":"en/end-user/baize/jobs/tensorflow.html#parameters","title":"Parameters","text":"
                                                                                  • Launch command: Use bash
                                                                                  • Command parameters: Use python /code/tensorflow/tf-single.py
                                                                                  \"\"\"\n  pip install tensorflow numpy\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\n# Create some random data\nx = np.random.rand(100, 1)\ny = 2 * x + 1 + np.random.rand(100, 1) * 0.1\n\n# Create a simple model\nmodel = tf.keras.Sequential([\n    tf.keras.layers.Dense(1, input_shape=(1,))\n])\n\n# Compile the model\nmodel.compile(optimizer='adam', loss='mse')\n\n# Train the model, setting epochs to 10\nhistory = model.fit(x, y, epochs=10, verbose=1)\n\n# Print the final loss\nprint('Final loss: {' + str(history.history['loss'][-1]) +'}')\n\n# Use the model to make predictions\ntest_x = np.array([[0.5]])\nprediction = model.predict(test_x)\nprint(f'Prediction for x=0.5: {prediction[0][0]}')\n
                                                                                  "},{"location":"en/end-user/baize/jobs/tensorflow.html#results","title":"Results","text":"

                                                                                  After the job is successfully submitted, you can enter the job details to see the resource usage. From the upper right corner, navigate to Workload Details to view log outputs during the training process.

                                                                                  "},{"location":"en/end-user/baize/jobs/tensorflow.html#tfjob-distributed-job","title":"TFJob Distributed Job","text":"
                                                                                  1. Log in to AI Lab and click Job Center in the left navigation bar to enter the Jobs page.
                                                                                  2. Click the Create button in the upper right corner to enter the job creation page.
                                                                                  3. Select the job type as Tensorflow Distributed and click Next.
                                                                                  4. Fill in the job name and description, then click OK.
                                                                                  "},{"location":"en/end-user/baize/jobs/tensorflow.html#example-job-introduction","title":"Example Job Introduction","text":"

                                                                                  This job includes three roles: Chief, Worker, and Parameter Server (PS).

                                                                                  • Chief: Responsible for coordinating the training process and saving model checkpoints.
                                                                                  • Worker: Executes the actual model training.
                                                                                  • PS: Used in asynchronous training to store and update model parameters.

                                                                                  Different resources are allocated to different roles. Chief and Worker use GPUs, while PS uses CPUs and larger memory.

                                                                                  "},{"location":"en/end-user/baize/jobs/tensorflow.html#parameters_1","title":"Parameters","text":"
                                                                                  • Launch command: Use bash
                                                                                  • Command parameters: Use python /code/tensorflow/tensorflow-distributed.py
                                                                                  import os\nimport json\nimport tensorflow as tf\n\nclass SimpleModel(tf.keras.Model):\n    def __init__(self):\n        super(SimpleModel, self).__init__()\n        self.fc = tf.keras.layers.Dense(1, input_shape=(10,))\n\n    def call(self, x):\n        return self.fc(x)\n\ndef train():\n    # Print environment information\n    print(f\"TensorFlow version: {tf.__version__}\")\n    print(f\"GPU available: {tf.test.is_gpu_available()}\")\n    if tf.test.is_gpu_available():\n        print(f\"GPU device count: {len(tf.config.list_physical_devices('GPU'))}\")\n\n    # Retrieve distributed training information\n    tf_config = json.loads(os.environ.get('TF_CONFIG') or '{}')\n    job_type = tf_config.get('job', {}).get('type')\n    job_id = tf_config.get('job', {}).get('index')\n\n    print(f\"Job type: {job_type}, Job ID: {job_id}\")\n\n    # Set up distributed strategy\n    strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()\n\n    with strategy.scope():\n        model = SimpleModel()\n        loss_fn = tf.keras.losses.MeanSquaredError()\n        optimizer = tf.keras.optimizers.SGD(learning_rate=0.001)\n\n    # Generate some random data\n    data = tf.random.normal((100, 10))\n    labels = tf.random.normal((100, 1))\n\n    @tf.function\n    def train_step(inputs, labels):\n        with tf.GradientTape() as tape:\n            predictions = model(inputs)\n            loss = loss_fn(labels, predictions)\n        gradients = tape.gradient(loss, model.trainable_variables)\n        optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n        return loss\n\n    for epoch in range(10):\n        loss = train_step(data, labels)\n        if job_type == 'chief':\n            print(f'Epoch {epoch}, Loss: {loss.numpy():.4f}')\n\nif __name__ == '__main__':\n    train()\n
                                                                                  "},{"location":"en/end-user/baize/jobs/tensorflow.html#results_1","title":"Results","text":"

                                                                                  Similarly, you can enter the job details to view the resource usage and log outputs of each Pod.

                                                                                  "},{"location":"en/end-user/baize/jobs/view.html","title":"View Job Workloads","text":"

                                                                                  Once a job is created, it will be displayed in the job list.

                                                                                  1. In the job list, click the \u2507 on the right side of a job and select Job Workload Details .

                                                                                  2. A pop-up window will appear asking you to choose which Pod to view. Click Enter .

                                                                                  3. You will be redirected to the container management interface, where you can view the container\u2019s working status, labels and annotations, and any events that have occurred.

                                                                                  4. You can also view detailed logs of the current Pod for the recent period. By default, 100 lines of logs are displayed. To view more detailed logs or to download logs, click the blue Insight text at the top.

                                                                                  5. Additionally, you can use the ... in the upper right corner to view the current Pod's YAML, and to upload or download files. Below is an example of a Pod's YAML.

                                                                                  kind: Pod\napiVersion: v1\nmetadata:\n  name: neko-tensorboard-job-test-202404181843-skxivllb-worker-0\n  namespace: default\n  uid: ddedb6ff-c278-47eb-ae1e-0de9b7c62f8c\n  resourceVersion: '41092552'\n  creationTimestamp: '2024-04-18T10:43:36Z'\n  labels:\n    training.kubeflow.org/job-name: neko-tensorboard-job-test-202404181843-skxivllb\n    training.kubeflow.org/operator-name: pytorchjob-controller\n    training.kubeflow.org/replica-index: '0'\n    training.kubeflow.org/replica-type: worker\n  annotations:\n    cni.projectcalico.org/containerID: 0cfbb9af257d5e69027c603c6cb2d3890a17c4ae1a145748d5aef73a10d7fbe1\n    cni.projectcalico.org/podIP: ''\n    cni.projectcalico.org/podIPs: ''\n    hami.io/bind-phase: success\n    hami.io/bind-time: '1713437016'\n    hami.io/vgpu-devices-allocated: GPU-29d5fa0d-935b-2966-aff8-483a174d61d1,NVIDIA,1024,20:;\n    hami.io/vgpu-devices-to-allocate: ;\n    hami.io/vgpu-node: worker-a800-1\n    hami.io/vgpu-time: '1713437016'\n    k8s.v1.cni.cncf.io/network-status: |-\n      [{\n          \"name\": \"kube-system/calico\",\n          \"ips\": [\n              \"10.233.97.184\"\n          ],\n          \"default\": true,\n          \"dns\": {}\n      }]\n    k8s.v1.cni.cncf.io/networks-status: |-\n      [{\n          \"name\": \"kube-system/calico\",\n          \"ips\": [\n              \"10.233.97.184\"\n          ],\n          \"default\": true,\n          \"dns\": {}\n      }]\n  ownerReferences:\n    - apiVersion: kubeflow.org/v1\n      kind: PyTorchJob\n      name: neko-tensorboard-job-test-202404181843-skxivllb\n      uid: e5a8b05d-1f03-4717-8e1c-4ec928014b7b\n      controller: true\n      blockOwnerDeletion: true\nspec:\n  volumes:\n    - name: 0-dataset-pytorch-examples\n      persistentVolumeClaim:\n        claimName: pytorch-examples\n    - name: kube-api-access-wh9rh\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n  containers:\n    - name: pytorch\n      image: m.daocloud.io/docker.io/pytorch/pytorch\n      command:\n        - bash\n      args:\n        - '-c'\n        - >-\n          ls -la /root && which pip && pip install pytorch_lightning tensorboard\n          && python /root/Git/pytorch/examples/mnist/main.py\n      ports:\n        - name: pytorchjob-port\n          containerPort: 23456\n          protocol: TCP\n      env:\n        - name: PYTHONUNBUFFERED\n          value: '1'\n        - name: PET_NNODES\n          value: '1'\n      resources:\n        limits:\n          cpu: '4'\n          memory: 8Gi\n          nvidia.com/gpucores: '20'\n          nvidia.com/gpumem: '1024'\n          nvidia.com/vgpu: '1'\n        requests:\n          cpu: '4'\n          memory: 8Gi\n          nvidia.com/gpucores: '20'\n          nvidia.com/gpumem: '1024'\n          nvidia.com/vgpu: '1'\n      volumeMounts:\n        - name: 0-dataset-pytorch-examples\n          mountPath: /root/Git/pytorch/examples\n        - name: kube-api-access-wh9rh\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  restartPolicy: Never\n  terminationGracePeriodSeconds: 30\n  dnsPolicy: ClusterFirst\n  serviceAccountName: default\n  serviceAccount: default\n  nodeName: worker-a800-1\n  securityContext: {}\n  affinity: {}\n  schedulerName: hami-scheduler\n  tolerations:\n    - key: node.kubernetes.io/not-ready\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n    - key: node.kubernetes.io/unreachable\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n  priorityClassName: baize-high-priority\n  priority: 100000\n  enableServiceLinks: true\n  preemptionPolicy: PreemptLowerPriority\nstatus:\n  phase: Succeeded\n  conditions:\n    - type: Initialized\n      status: 'True'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:43:36Z'\n      reason: PodCompleted\n    - type: Ready\n      status: 'False'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:46:34Z'\n      reason: PodCompleted\n    - type: ContainersReady\n      status: 'False'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:46:34Z'\n      reason: PodCompleted\n    - type: PodScheduled\n      status: 'True'\n      lastProbeTime: null\n      lastTransitionTime: '2024-04-18T10:43:36Z'\n  hostIP: 10.20.100.211\n  podIP: 10.233.97.184\n  podIPs:\n    - ip: 10.233.97.184\n  startTime: '2024-04-18T10:43:36Z'\n  containerStatuses:\n    - name: pytorch\n      state:\n        terminated:\n          exitCode: 0\n          reason: Completed\n          startedAt: '2024-04-18T10:43:39Z'\n          finishedAt: '2024-04-18T10:46:34Z'\n          containerID: >-\n            containerd://09010214bcf3315e81d38fba50de3943c9d2b48f50a6cc2e83f8ef0e5c6eeec1\n      lastState: {}\n      ready: false\n      restartCount: 0\n      image: m.daocloud.io/docker.io/pytorch/pytorch:latest\n      imageID: >-\n        m.daocloud.io/docker.io/pytorch/pytorch@sha256:11691e035a3651d25a87116b4f6adc113a27a29d8f5a6a583f8569e0ee5ff897\n      containerID: >-\n        containerd://09010214bcf3315e81d38fba50de3943c9d2b48f50a6cc2e83f8ef0e5c6eeec1\n      started: false\n  qosClass: Guaranteed\n
                                                                                  "},{"location":"en/end-user/ghippo/personal-center/accesstoken.html","title":"Access key","text":"

                                                                                  The access key can be used to access the openAPI and continuous delivery. Users can obtain the key and access the API by referring to the following steps in the personal center.

                                                                                  "},{"location":"en/end-user/ghippo/personal-center/accesstoken.html#get-key","title":"Get key","text":"

                                                                                  Log in to AI platform, find Personal Center in the drop-down menu in the upper right corner, and you can manage the access key of the account on the Access Keys page.

                                                                                  Info

                                                                                  Access key is displayed only once. If you forget your access key, you will need to create a new key.

                                                                                  "},{"location":"en/end-user/ghippo/personal-center/accesstoken.html#use-the-key-to-access-api","title":"Use the key to access API","text":"

                                                                                  When accessing AI platform openAPI, add the header Authorization:Bearer ${token} to the request to identify the visitor, where ${token} is the key obtained in the previous step. For the specific API, see OpenAPI Documentation.

                                                                                  Request Example

                                                                                  curl -X GET -H 'Authorization:Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRKVjlBTHRBLXZ4MmtQUC1TQnVGS0dCSWc1cnBfdkxiQVVqM2U3RVByWnMiLCJ0eXAiOiJKV1QifQ.eyJleHAiOjE2NjE0MTU5NjksImlhdCI6MTY2MDgxMTE2OSwiaXNzIjoiZ2hpcHBvLmlvIiwic3ViIjoiZjdjOGIxZjUtMTc2MS00NjYwLTg2MWQtOWI3MmI0MzJmNGViIiwicHJlZmVycmVkX3VzZXJuYW1lIjoiYWRtaW4iLCJncm91cHMiOltdfQ.RsUcrAYkQQ7C6BxMOrdD3qbBRUt0VVxynIGeq4wyIgye6R8Ma4cjxG5CbU1WyiHKpvIKJDJbeFQHro2euQyVde3ygA672ozkwLTnx3Tu-_mB1BubvWCBsDdUjIhCQfT39rk6EQozMjb-1X1sbLwzkfzKMls-oxkjagI_RFrYlTVPwT3Oaw-qOyulRSw7Dxd7jb0vINPq84vmlQIsI3UuTZSNO5BCgHpubcWwBss-Aon_DmYA-Et_-QtmPBA3k8E2hzDSzc7eqK0I68P25r9rwQ3DeKwD1dbRyndqWORRnz8TLEXSiCFXdZT2oiMrcJtO188Ph4eLGut1-4PzKhwgrQ' https://demo-dev.daocloud.io/apis/ghippo.io/v1alpha1/users?page=1&pageSize=10 -k\n

                                                                                  Request result

                                                                                  {\n    \"items\": [\n        {\n            \"id\": \"a7cfd010-ebbe-4601-987f-d098d9ef766e\",\n            \"name\": \"a\",\n            \"email\": \"\",\n            \"description\": \"\",\n            \"firstname\": \"\",\n            \"lastname\": \"\",\n            \"source\": \"locale\",\n            \"enabled\": true,\n            \"createdAt\": \"1660632794800\",\n            \"updatedAt\": \"0\",\n            \"lastLoginAt\": \"\"\n        }\n    ],\n    \"pagination\": {\n        \"page\": 1,\n        \"pageSize\": 10,\n        \"total\": 1\n    }\n}\n
                                                                                  "},{"location":"en/end-user/ghippo/personal-center/language.html","title":"language settings","text":"

                                                                                  This section explains how to set the interface language. Currently supports Chinese, English two languages.

                                                                                  Language setting is the portal for the platform to provide multilingual services. The platform is displayed in Chinese by default. Users can switch the platform language by selecting English or automatically detecting the browser language preference according to their needs. Each user's multilingual service is independent of each other, and switching will not affect other users.

                                                                                  The platform provides three ways to switch languages: Chinese, English-English, and automatically detect your browser language preference.

                                                                                  The operation steps are as follows.

                                                                                  1. Log in to the AI platform with your username/password. Click Global Management at the bottom of the left navigation bar.

                                                                                  2. Click the username in the upper right corner and select Personal Center .

                                                                                  3. Click the Language Settings tab.

                                                                                  4. Toggle the language option.

                                                                                  "},{"location":"en/end-user/ghippo/personal-center/security-setting.html","title":"Security Settings","text":"

                                                                                  Function description: It is used to fill in the email address and modify the login password.

                                                                                  • Email: After the administrator configures the email server address, the user can click the Forget Password button on the login page to fill in the email address there to retrieve the password.
                                                                                  • Password: The password used to log in to the platform, it is recommended to change the password regularly.

                                                                                  The specific operation steps are as follows:

                                                                                  1. Click the username in the upper right corner and select Personal Center .

                                                                                  2. Click the Security Settings tab. Fill in your email address or change the login password.

                                                                                  "},{"location":"en/end-user/ghippo/personal-center/ssh-key.html","title":"Configuring SSH Public Key","text":"

                                                                                  This article explains how to configure SSH public key.

                                                                                  "},{"location":"en/end-user/ghippo/personal-center/ssh-key.html#step-1-view-existing-ssh-keys","title":"Step 1. View Existing SSH Keys","text":"

                                                                                  Before generating a new SSH key, please check if you need to use an existing SSH key stored in the root directory of the local user. For Linux and Mac, use the following command to view existing public keys. Windows users can use the following command in WSL (requires Windows 10 or above) or Git Bash to view the generated public keys.

                                                                                  • ED25519 Algorithm:

                                                                                    cat ~/.ssh/id_ed25519.pub\n
                                                                                  • RSA Algorithm:

                                                                                    cat ~/.ssh/id_rsa.pub\n

                                                                                  If a long string starting with ssh-ed25519 or ssh-rsa is returned, it means that a local public key already exists. You can skip Step 2 Generate SSH Key and proceed directly to Step 3.

                                                                                  "},{"location":"en/end-user/ghippo/personal-center/ssh-key.html#step-2-generate-ssh-key","title":"Step 2. Generate SSH Key","text":"

                                                                                  If Step 1 does not return the specified content string, it means that there is no available SSH key locally and a new SSH key needs to be generated. Please follow these steps:

                                                                                  1. Access the terminal (Windows users please use WSL or Git Bash), and run ssh-keygen -t.

                                                                                  2. Enter the key algorithm type and an optional comment.

                                                                                    The comment will appear in the .pub file and can generally use the email address as the comment content.

                                                                                    • To generate a key pair based on the ED25519 algorithm, use the following command:

                                                                                      ssh-keygen -t ed25519 -C \"<comment>\"\n
                                                                                    • To generate a key pair based on the RSA algorithm, use the following command:

                                                                                      ssh-keygen -t rsa -C \"<comment>\"\n
                                                                                  3. Press Enter to choose the SSH key generation path.

                                                                                    Taking the ED25519 algorithm as an example, the default path is as follows:

                                                                                    Generating public/private ed25519 key pair.\nEnter file in which to save the key (/home/user/.ssh/id_ed25519):\n

                                                                                    The default key generation path is /home/user/.ssh/id_ed25519, and the proper public key is /home/user/.ssh/id_ed25519.pub.

                                                                                  4. Set a passphrase for the key.

                                                                                    Enter passphrase (empty for no passphrase):\nEnter same passphrase again:\n

                                                                                    The passphrase is empty by default, and you can choose to use a passphrase to protect the private key file. If you do not want to enter a passphrase every time you access the repository using the SSH protocol, you can enter an empty passphrase when creating the key.

                                                                                  5. Press Enter to complete the key pair creation.

                                                                                  "},{"location":"en/end-user/ghippo/personal-center/ssh-key.html#step-3-copy-the-public-key","title":"Step 3. Copy the Public Key","text":"

                                                                                  In addition to manually copying the generated public key information printed on the command line, you can use the following commands to copy the public key to the clipboard, depending on the operating system.

                                                                                  • Windows (in WSL or Git Bash):

                                                                                    cat ~/.ssh/id_ed25519.pub | clip\n
                                                                                  • Mac:

                                                                                    tr -d '\\n'< ~/.ssh/id_ed25519.pub | pbcopy\n
                                                                                  • GNU/Linux (requires xclip):

                                                                                    xclip -sel clip < ~/.ssh/id_ed25519.pub\n
                                                                                  "},{"location":"en/end-user/ghippo/personal-center/ssh-key.html#step-4-set-the-public-key-on-ai-platform-platform","title":"Step 4. Set the Public Key on AI platform Platform","text":"
                                                                                  1. Log in to the AI platform UI page and select Profile -> SSH Public Key in the upper right corner of the page.

                                                                                  2. Add the generated SSH public key information.

                                                                                    1. SSH public key content.

                                                                                    2. Public key title: Supports customizing the public key name for management differentiation.

                                                                                    3. Expiration: Set the expiration period for the public key. After it expires, the public key will be automatically invalidated and cannot be used. If not set, it will be permanently valid.

                                                                                  "},{"location":"en/end-user/ghippo/workspace/folder-permission.html","title":"Description of folder permissions","text":"

                                                                                  Folders have permission mapping capabilities, which can map the permissions of users/groups in this folder to subfolders, workspaces and resources under it.

                                                                                  If the user/group is Folder Admin role in this folder, it is still Folder Admin role when mapped to a subfolder, and Workspace Admin is mapped to the workspace under it; If a Namespace is bound in Workspace and Folder -> Resource Group , the user/group is also a Namespace Admin after mapping.

                                                                                  Note

                                                                                  The permission mapping capability of folders will not be applied to shared resources, because sharing is to share the use permissions of the cluster to multiple workspaces, rather than assigning management permissions to workspaces, so permission inheritance and role mapping will not be implemented.

                                                                                  "},{"location":"en/end-user/ghippo/workspace/folder-permission.html#use-cases","title":"Use cases","text":"

                                                                                  Folders have hierarchical capabilities, so when folders are mapped to departments/suppliers/projects in the enterprise,

                                                                                  • If a user/group has administrative authority (Admin) in the first-level department, the second-level, third-level, and fourth-level departments or projects under it also have administrative authority;
                                                                                  • If a user/group has access rights (Editor) in the first-level department, the second-, third-, and fourth-level departments or projects under it also have access rights;
                                                                                  • If a user/group has read-only permission (Viewer) in the first-level department, the second-level, third-level, and fourth-level departments or projects under it also have read-only permission.
                                                                                  Objects Actions Folder Admin Folder Editor Folder Viewer on the folder itself view \u2713 \u2713 \u2713 Authorization \u2713 \u2717 \u2717 Modify Alias \u200b\u200b \u2713 \u2717 \u2717 To Subfolder Create \u2713 \u2717 \u2717 View \u2713 \u2713 \u2713 Authorization \u2713 \u2717 \u2717 Modify Alias \u200b\u200b \u2713 \u2717 \u2717 workspace under it create \u2713 \u2717 \u2717 View \u2713 \u2713 \u2713 Authorization \u2713 \u2717 \u2717 Modify Alias \u200b\u200b \u2713 \u2717 \u2717 Workspace under it - Resource Group View \u2713 \u2713 \u2713 resource binding \u2713 \u2717 \u2717 unbind \u2713 \u2717 \u2717 Workspaces under it - Shared Resources View \u2713 \u2713 \u2713 New share \u2713 \u2717 \u2717 Unshare \u2713 \u2717 \u2717 Resource Quota \u2713 \u2717 \u2717"},{"location":"en/end-user/ghippo/workspace/folders.html","title":"Create/Delete Folders","text":"

                                                                                  Folders have the capability to map permissions, allowing users/user groups to have their permissions in the folder mapped to its sub-folders, workspaces, and resources.

                                                                                  Follow the steps below to create a folder:

                                                                                  1. Log in to AI platform with a user account having the admin/folder admin role. Click Global Management -> Workspace and Folder at the bottom of the left navigation bar.

                                                                                  2. Click the Create Folder button in the top right corner.

                                                                                  3. Fill in the folder name, parent folder, and other information, then click OK to complete creating the folder.

                                                                                  Tip

                                                                                  After successful creation, the folder name will be displayed in the left tree structure, represented by different icons for workspaces and folders.

                                                                                  Note

                                                                                  To edit or delete a specific folder, select it and Click \u2507 on the right side.

                                                                                  • If there are resources bound to the resource group or shared resources within the folder, the folder cannot be deleted. All resources need to be unbound before deleting.

                                                                                  • If there are registry resources accessed by the microservice engine module within the folder, the folder cannot be deleted. All access to the registry needs to be removed before deleting the folder.

                                                                                  "},{"location":"en/end-user/ghippo/workspace/quota.html","title":"Resource Quota","text":"

                                                                                  Shared resources do not necessarily mean that the shared users can use the shared resources without any restrictions. Admin, Kpanda Owner, and Workspace Admin can limit the maximum usage quota of a user through the Resource Quota feature in shared resources. If no restrictions are set, it means the usage is unlimited.

                                                                                  • CPU Request (Core)
                                                                                  • CPU Limit (Core)
                                                                                  • Memory Request (MB)
                                                                                  • Memory Limit (MB)
                                                                                  • Total Storage Request (GB)
                                                                                  • Persistent Volume Claims (PVC)
                                                                                  • GPU Type, Spec, Quantity (including but not limited to Nvidia, Ascend, ILLUVATAR, and other GPUs)

                                                                                  A resource (cluster) can be shared among multiple workspaces, and a workspace can use resources from multiple shared clusters simultaneously.

                                                                                  "},{"location":"en/end-user/ghippo/workspace/quota.html#resource-groups-and-shared-resources","title":"Resource Groups and Shared Resources","text":"

                                                                                  Cluster resources in both shared resources and resource groups are derived from Container Management. However, different effects will occur when binding a cluster to a workspace or sharing it with a workspace.

                                                                                  1. Binding Resources

                                                                                    Users/User groups in the workspace will have full management and usage permissions for the cluster. Workspace Admin will be mapped as Cluster Admin. Workspace Admin can access the Container Management module to manage the cluster.

                                                                                    Note

                                                                                    As of now, there are no Cluster Editor and Cluster Viewer roles in the Container Management module. Therefore, Workspace Editor and Workspace Viewer cannot be mapped.

                                                                                  2. Adding Shared Resources

                                                                                    Users/User groups in the workspace will have usage permissions for the cluster resources.

                                                                                    Unlike resource groups, when sharing a cluster with a workspace, the roles of the users in the workspace will not be mapped to the resources. Therefore, Workspace Admin will not be mapped as Cluster Admin.

                                                                                  This section demonstrates three scenarios related to resource quotas.

                                                                                  "},{"location":"en/end-user/ghippo/workspace/quota.html#create-namespaces","title":"Create Namespaces","text":"

                                                                                  Creating a namespace involves resource quotas.

                                                                                  1. Add a shared cluster to workspace ws01 .

                                                                                  2. Select workspace ws01 and the shared cluster in Workbench, and create a namespace ns01 .

                                                                                    • If no resource quotas are set in the shared cluster, there is no need to set resource quotas when creating the namespace.
                                                                                    • If resource quotas are set in the shared cluster (e.g., CPU Request = 100 cores), the CPU request for the namespace must be less than or equal to 100 cores (CPU Request \u2264 100 core) for successful creation.
                                                                                  "},{"location":"en/end-user/ghippo/workspace/quota.html#bind-namespace-to-workspace","title":"Bind Namespace to Workspace","text":"

                                                                                  Prerequisite: Workspace ws01 has added a shared cluster, and the operator has the Workspace Admin + Kpanda Owner or Admin role.

                                                                                  The two methods of binding have the same effect.

                                                                                  • Bind the created namespace ns01 to ws01 in Container Management.

                                                                                    • If no resource quotas are set in the shared cluster, the namespace ns01 can be successfully bound regardless of whether resource quotas are set.
                                                                                    • If resource quotas are set in the shared cluster (e.g., CPU Request = 100 cores), the namespace ns01 must meet the requirement of CPU requests less than or equal to 100 cores (CPU Request \u2264 100 core) for successful binding.
                                                                                  • Bind the namespace ns01 to ws01 in Global Management.

                                                                                    • If no resource quotas are set in the shared cluster, the namespace ns01 can be successfully bound regardless of whether resource quotas are set.
                                                                                    • If resource quotas are set in the shared cluster (e.g., CPU Request = 100 cores), the namespace ns01 must meet the requirement of CPU requests less than or equal to 100 cores (CPU Request \u2264 100 core) for successful binding.
                                                                                  "},{"location":"en/end-user/ghippo/workspace/quota.html#unbind-namespace-from-workspace","title":"Unbind Namespace from Workspace","text":"

                                                                                  The two methods of unbinding have the same effect.

                                                                                  • Unbind the namespace ns01 from workspace ws01 in Container Management.

                                                                                    • If no resource quotas are set in the shared cluster, unbinding the namespace ns01 will not affect the resource quotas, regardless of whether resource quotas were set for the namespace.
                                                                                    • If resource quotas (CPU Request = 100 cores) are set in the shared cluster and the namespace ns01 has its own resource quotas, unbinding will release the proper resource quota.
                                                                                  • Unbind the namespace ns01 from workspace ws01 in Global Management.

                                                                                    • If no resource quotas are set in the shared cluster, unbinding the namespace ns01 will not affect the resource quotas, regardless of whether resource quotas were set for the namespace.
                                                                                    • If resource quotas (CPU Request = 100 cores) are set in the shared cluster and the namespace ns01 has its own resource quotas, unbinding will release the proper resource quota.
                                                                                  "},{"location":"en/end-user/ghippo/workspace/res-gp-and-shared-res.html","title":"Differences between Resource Groups and Shared Resources","text":"

                                                                                  Both resource groups and shared resources support cluster binding, but they have significant differences in usage.

                                                                                  "},{"location":"en/end-user/ghippo/workspace/res-gp-and-shared-res.html#differences-in-usage-scenarios","title":"Differences in Usage Scenarios","text":"
                                                                                  • Cluster Binding for Resource Groups: Resource groups are usually used for batch authorization. After binding a resource group to a cluster, the workspace administrator will be mapped as a cluster administrator and able to manage and use cluster resources.
                                                                                  • Cluster Binding for Shared Resources: Shared resources are usually used for resource quotas. A typical scenario is that the platform administrator assigns a cluster to a first-level supplier, who then assigns the cluster to a second-level supplier and sets resource quotas for the second-level supplier.

                                                                                  Note: In this scenario, the platform administrator needs to impose resource restrictions on secondary suppliers. Currently, it is not supported to limit the cluster quota of secondary suppliers by the primary supplier.

                                                                                  "},{"location":"en/end-user/ghippo/workspace/res-gp-and-shared-res.html#differences-in-cluster-quota-usage","title":"Differences in Cluster Quota Usage","text":"
                                                                                  • Cluster Binding for Resource Groups: The workspace administrator is mapped as the administrator of the cluster and is equivalent to being granted the Cluster Admin role in Container Management-Permission Management. They can have unrestricted access to cluster resources, manage important content such as management nodes, and cannot be subject to resource quotas.
                                                                                  • Cluster Binding for Shared Resources: The workspace administrator can only use the quota in the cluster to create namespaces in the Workbench and does not have cluster management permissions. If the workspace is restricted by a quota, the workspace administrator can only create and use namespaces within the quota range.
                                                                                  "},{"location":"en/end-user/ghippo/workspace/res-gp-and-shared-res.html#differences-in-resource-types","title":"Differences in Resource Types","text":"
                                                                                  • Resource Groups: Can bind to clusters, cluster-namespaces, multiclouds, multicloud namespaces, meshs, and mesh-namespaces.
                                                                                  • Shared Resources: Can only bind to clusters.
                                                                                  "},{"location":"en/end-user/ghippo/workspace/res-gp-and-shared-res.html#similarities-between-resource-groups-and-shared-resources","title":"Similarities between Resource Groups and Shared Resources","text":"

                                                                                  After binding to a cluster, both resource groups and shared resources can go to the Workbench to create namespaces, which will be automatically bound to the workspace.

                                                                                  "},{"location":"en/end-user/ghippo/workspace/workspace.html","title":"Creating/Deleting Workspaces","text":"

                                                                                  A workspace is a resource category that represents a hierarchical relationship of resources. A workspace can contain resources such as clusters, namespaces, and registries. Typically, each workspace corresponds to a project and different resources can be allocated, and different users and user groups can be assigned to each workspace.

                                                                                  Follow the steps below to create a workspace:

                                                                                  1. Log in to AI platform with a user account having the admin/folder admin role. Click Global Management -> Workspace and Folder at the bottom of the left navigation bar.

                                                                                  2. Click the Create Workspace button in the top right corner.

                                                                                  3. Fill in the workspace name, folder assignment, and other information, then click OK to complete creating the workspace.

                                                                                  Tip

                                                                                  After successful creation, the workspace name will be displayed in the left tree structure, represented by different icons for folders and workspaces.

                                                                                  Note

                                                                                  To edit or delete a specific workspace or folder, select it and click ... on the right side.

                                                                                  • If resource groups and shared resources have resources under the workspace, the workspace cannot be deleted. All resources need to be unbound before deletion of the workspace.

                                                                                  • If Microservices Engine has Integrated Registry under the workspace, the workspace cannot be deleted. Integrated Registry needs to be removed before deletion of the workspace.

                                                                                  • If Container Registry has Registry Space or Integrated Registry under the workspace, the workspace cannot be deleted. Registry Space needs to be removed, and Integrated Registry needs to be deleted before deletion of the workspace.

                                                                                  "},{"location":"en/end-user/ghippo/workspace/ws-folder.html","title":"Workspace and Folder","text":"

                                                                                  Workspace and Folder is a feature that provides resource isolation and grouping, addressing issues related to unified authorization, resource grouping, and resource quotas.

                                                                                  Workspace and Folder involves two concepts: workspaces and folders.

                                                                                  "},{"location":"en/end-user/ghippo/workspace/ws-folder.html#workspaces","title":"Workspaces","text":"

                                                                                  Workspaces allow the management of resources through Authorization , Resource Group , and Shared Resource , enabling users (and user groups) to share resources within the workspace.

                                                                                  • Resources

                                                                                    Resources are at the lowest level of the hierarchy in the resource management module. They include clusters, namespaces, pipelines, gateways, and more. All these resources can only have workspaces as their parent level. Workspaces act as containers for grouping resources.

                                                                                  • Workspace

                                                                                    A workspace usually refers to a project or environment, and the resources in each workspace are logically isolated from those in other workspaces. You can grant users (groups of users) different access rights to the same set of resources through authorization in the workspace.

                                                                                    Workspaces are at the first level, counting from the bottom of the hierarchy, and contain resources. All resources except shared resources have one and only one parent. All workspaces also have one and only one parent folder.

                                                                                    Resources are grouped by workspace, and there are two grouping modes in workspace, namely Resource Group and Shared Resource .

                                                                                  • Resource group

                                                                                    A resource can only be added to one resource group, and resource groups correspond to workspaces one by one. After a resource is added to a resource group, Workspace Admin will obtain the management authority of the resource, which is equivalent to the owner of the resource.

                                                                                  • Share resource

                                                                                    For shared resources, multiple workspaces can share one or more resources. Resource owners can choose to share their own resources with the workspace. Generally, when sharing, the resource owner will limit the amount of resources that can be used by the shared workspace. After resources are shared, Workspace Admin only has resource usage rights under the resource limit, and cannot manage resources or adjust the amount of resources that can be used by the workspace.

                                                                                    At the same time, shared resources also have certain requirements for the resources themselves. Only Cluster (cluster) resources can be shared. Cluster Admin can share Cluster resources to different workspaces, and limit the use of workspaces on this Cluster.

                                                                                    Workspace Admin can create multiple Namespaces within the resource quota, but the sum of the resource quotas of the Namespaces cannot exceed the resource quota of the Cluster in the workspace. For Kubernetes resources, the only resource type that can be shared currently is Cluster.

                                                                                  "},{"location":"en/end-user/ghippo/workspace/ws-folder.html#folder","title":"Folder","text":"

                                                                                  Folders can be used to build enterprise business hierarchy relationships.

                                                                                  • Folders are a further grouping mechanism based on workspaces and have a hierarchical structure. A folder can contain workspaces, other folders, or a combination of both, forming a tree-like organizational relationship.

                                                                                  • Folders allow you to map your business hierarchy and group workspaces by department. Folders are not directly linked to resources, but indirectly achieve resource grouping through workspaces.

                                                                                  • A folder has one and only one parent folder, and the root folder is the highest level of the hierarchy. The root folder has no parent, and folders and workspaces are attached to the root folder.

                                                                                  In addition, users (groups) in folders can inherit permissions from their parents through a hierarchical structure. The permissions of the user in the hierarchical structure come from the combination of the permissions of the current level and the permissions inherited from its parents. The permissions are additive and there is no mutual exclusion.

                                                                                  "},{"location":"en/end-user/ghippo/workspace/ws-permission.html","title":"Description of workspace permissions","text":"

                                                                                  The workspace has permission mapping and resource isolation capabilities, and can map the permissions of users/groups in the workspace to the resources under it. If the user/group has the Workspace Admin role in the workspace and the resource Namespace is bound to the workspace-resource group, the user/group will become Namespace Admin after mapping.

                                                                                  Note

                                                                                  The permission mapping capability of the workspace will not be applied to shared resources, because sharing is to share the cluster usage permissions to multiple workspaces, rather than assigning management permissions to the workspaces, so permission inheritance and role mapping will not be implemented.

                                                                                  "},{"location":"en/end-user/ghippo/workspace/ws-permission.html#use-cases","title":"Use cases","text":"

                                                                                  Resource isolation is achieved by binding resources to different workspaces. Therefore, resources can be flexibly allocated to each workspace (tenant) with the help of permission mapping, resource isolation, and resource sharing capabilities.

                                                                                  Generally applicable to the following two use cases:

                                                                                  • Cluster one-to-one

                                                                                    Ordinary Cluster Department/Tenant (Workspace) Purpose Cluster 01 A Administration and Usage Cluster 02 B Administration and Usage
                                                                                  • Cluster one-to-many

                                                                                    Cluster Department/Tenant (Workspace) Resource Quota Cluster 01 A 100 core CPU B 50-core CPU
                                                                                  "},{"location":"en/end-user/ghippo/workspace/ws-permission.html#permission-description","title":"Permission description","text":"Action Objects Operations Workspace Admin Workspace Editor Workspace Viewer itself view \u2713 \u2713 \u2713 - Authorization \u2713 \u2717 \u2717 - Modify Alias \u2713 \u2713 \u2717 Resource Group View \u2713 \u2713 \u2713 - resource binding \u2713 \u2717 \u2717 - unbind \u2713 \u2717 \u2717 Shared Resources View \u2713 \u2713 \u2713 - Add Share \u2713 \u2717 \u2717 - Unshare \u2713 \u2717 \u2717 - Resource Quota \u2713 \u2717 \u2717 - Using Shared Resources 1 \u2713 \u2717 \u2717
                                                                                  1. Authorized users can go to modules such as workbench, microservice engine, middleware, multicloud orchestration, and service mesh to use resources in the workspace. For the operation scope of the roles of Workspace Admin, Workspace Editor, and Workspace Viewer in each module, please refer to the permission description:

                                                                                    • Container Management Permissions

                                                                                    \u21a9

                                                                                  "},{"location":"en/end-user/ghippo/workspace/wsbind-permission.html","title":"Resource Binding Permission Instructions","text":"

                                                                                  If a user John (\"John\" represents any user who is required to bind resources) has the Workspace Admin role assigned or has been granted proper permissions through a custom role, which includes the Workspace's \"Resource Binding\" Permissions, and wants to bind a specific cluster or namespace to the workspace.

                                                                                  To bind cluster/namespace resources to a workspace, not only the workspace's \"Resource Binding\" permissions are required, but also the permissions of Cluster Admin.

                                                                                  "},{"location":"en/end-user/ghippo/workspace/wsbind-permission.html#granting-authorization-to-john","title":"Granting Authorization to John","text":"
                                                                                  1. Using the Platform Admin Role, grant John the role of Workspace Admin on the Workspace -> Authorization page.

                                                                                  2. Then, on the Container Management -> Permissions page, authorize John as a Cluster Admin by Add Permission.

                                                                                  "},{"location":"en/end-user/ghippo/workspace/wsbind-permission.html#binding-to-workspace","title":"Binding to Workspace","text":"

                                                                                  Using John's account to log in to AI platform, on the Container Management -> Clusters page, John can bind the specified cluster to his own workspace by using the Bind Workspace button.

                                                                                  Note

                                                                                  John can only bind clusters or namespaces to a specific workspace in the Container Management module, and cannot perform this operation in the Global Management module.

                                                                                  To bind a namespace to a workspace, you must have at least Workspace Admin and Cluster Admin permissions.

                                                                                  "},{"location":"en/end-user/host/createhost.html","title":"Creating and Starting a Cloud Host","text":"

                                                                                  Once the user completes registration and is assigned a workspace, namespace, and resources, they can create and start a cloud host.

                                                                                  "},{"location":"en/end-user/host/createhost.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • The AI platform is installed
                                                                                  • User has successfully registered
                                                                                  • Administrator has bound the workspace to the user
                                                                                  • Administrator has allocated resources for the workspace
                                                                                  "},{"location":"en/end-user/host/createhost.html#steps-to-operate","title":"Steps to Operate","text":"
                                                                                  1. User logs into the AI platform and go to the CloudHost module from left navigation bar
                                                                                  2. Click Create VMs -> Create with Template.

                                                                                  3. After defining the configurations for the cloud host, click Next.

                                                                                    Basic InformationTemplate ConfigurationStorage and Network

                                                                                  4. After configuring the root password or SSH key, click OK.

                                                                                  5. Return to the host list and wait for the status to change to Running. Then, you can start the host using the \u2507 button on the right.

                                                                                  Next step: Using the Cloud Host

                                                                                  "},{"location":"en/end-user/host/usehost.html","title":"Using the Cloud Host","text":"

                                                                                  After creating and starting the cloud host, the user can begin using the cloud host.

                                                                                  "},{"location":"en/end-user/host/usehost.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • The AI platform is installed
                                                                                  • User has created and started the cloud host
                                                                                  "},{"location":"en/end-user/host/usehost.html#steps-to-operate","title":"Steps to Operate","text":"
                                                                                  1. Log into the AI platform as an administrator.
                                                                                  2. Navigate to Container Management -> Container Network -> Services, click the service name to enter the service details page, and click Update in the upper right corner.

                                                                                  3. Change the port range to 30900-30999, ensuring there are no conflicts.

                                                                                  4. Log into the AI platform as an end user, navigate to the proper service, and check the access ports.

                                                                                  5. Use an SSH client to log into the cloud host from the external network.

                                                                                  6. At this point, you can perform various operations on the cloud host.

                                                                                  Next step: Using Notebook

                                                                                  "},{"location":"en/end-user/insight/alert-center/index.html","title":"Alert Center","text":"

                                                                                  The Alert Center is an important feature provided by AI platform that allows users to easily view all active and historical alerts by cluster and namespace through a graphical interface, and search alerts based on severity level (critical, warning, info).

                                                                                  All alerts are triggered based on the threshold conditions set in the preset alert rules. In AI platform, some global alert policies are built-in, but users can also create or delete alert policies at any time, and set thresholds for the following metrics:

                                                                                  • CPU usage
                                                                                  • Memory usage
                                                                                  • Disk usage
                                                                                  • Disk reads per second
                                                                                  • Disk writes per second
                                                                                  • Cluster disk read throughput
                                                                                  • Cluster disk write throughput
                                                                                  • Network send rate
                                                                                  • Network receive rate

                                                                                  Users can also add labels and annotations to alert rules. Alert rules can be classified as active or expired, and certain rules can be enabled/disabled to achieve silent alerts.

                                                                                  When the threshold condition is met, users can configure how they want to be notified, including email, DingTalk, WeCom, webhook, and SMS notifications. All notification message templates can be customized and all messages are sent at specified intervals.

                                                                                  In addition, the Alert Center also supports sending alert messages to designated users through short message services provided by Alibaba Cloud, Tencent Cloud, and more platforms that will be added soon, enabling multiple ways of alert notification.

                                                                                  AI platform Alert Center is a powerful alert management platform that helps users quickly detect and resolve problems in the cluster, improve business stability and availability, and facilitate cluster inspection and troubleshooting.

                                                                                  "},{"location":"en/end-user/insight/alert-center/alert-policy.html","title":"Alert Policies","text":"

                                                                                  In addition to the built-in alert policies, AI platform allows users to create custom alert policies. Each alert policy is a collection of alert rules that can be set for clusters, nodes, and workloads. When an alert object reaches the threshold set by any of the rules in the policy, an alert is automatically triggered and a notification is sent.

                                                                                  Taking the built-in alerts as an example, click the first alert policy alertmanager.rules .

                                                                                  You can see that some alert rules have been set under it. You can add more rules under this policy, or edit or delete them at any time. You can also view the historical and active alerts related to this alert policy and edit the notification configuration.

                                                                                  "},{"location":"en/end-user/insight/alert-center/alert-policy.html#create-alert-policies","title":"Create Alert Policies","text":"
                                                                                  1. Select Alert Center -> Alert Policies , and click the Create Alert Policy button.

                                                                                  2. Fill in the basic information, select one or more clusters, nodes, or workloads as the alert objects, and click Next .

                                                                                  3. The list must have at least one rule. If the list is empty, please Add Rule .

                                                                                    Create an alert rule in the pop-up window, fill in the parameters, and click OK .

                                                                                    • Template rules: Pre-defined basic metrics that can monitor CPU, memory, disk, and network.
                                                                                    • PromQL rules: Input a PromQL expression, please query Prometheus expressions.
                                                                                    • Duration: After the alert is triggered and the duration reaches the set value, the alert policy will become a triggered state.
                                                                                    • Alert level: Including emergency, warning, and information levels.
                                                                                    • Advanced settings: Custom tags and annotations.
                                                                                  4. After clicking Next , configure notifications.

                                                                                  5. After the configuration is complete, click the OK button to return to the Alert Policy list.

                                                                                  Tip

                                                                                  The newly created alert policy is in the Not Triggered state. Once the threshold conditions and duration specified in the rules are met, it will change to the Triggered state.

                                                                                  "},{"location":"en/end-user/insight/alert-center/alert-policy.html#create-log-rules","title":"Create Log Rules","text":"

                                                                                  After filling in the basic information, click Add Rule and select Log Rule as the rule type.

                                                                                  Creating log rules is supported only when the resource object is selected as a node or workload.

                                                                                  Field Explanation:

                                                                                  • Filter Condition : Field used to query log content, supports four filtering conditions: AND, OR, regular expression matching, and fuzzy matching.
                                                                                  • Condition : Based on the filter condition, enter keywords or matching conditions.
                                                                                  • Time Range : Time range for log queries.
                                                                                  • Threshold Condition : Enter the alert threshold value in the input box. When the set threshold is reached, an alert will be triggered. Supported comparison operators are: >, \u2265, =, \u2264, <.
                                                                                  • Alert Level : Select the alert level to indicate the severity of the alert.
                                                                                  "},{"location":"en/end-user/insight/alert-center/alert-policy.html#create-event-rules","title":"Create Event Rules","text":"

                                                                                  After filling in the basic information, click Add Rule and select Event Rule as the rule type.

                                                                                  Creating event rules is supported only when the resource object is selected as a workload.

                                                                                  Field Explanation:

                                                                                  • Event Rule : Only supports selecting the workload as the resource object.
                                                                                  • Event Reason : Different event reasons for different types of workloads, where the event reasons are combined with \"AND\" relationship.
                                                                                  • Time Range : Detect data generated within this time range. If the threshold condition is reached, an alert event will be triggered.
                                                                                  • Threshold Condition : When the generated events reach the set threshold, an alert event will be triggered.
                                                                                  • Trend Chart : By default, it queries the trend of event changes within the last 10 minutes. The value at each point represents the total number of occurrences within a certain period of time (time range) from the current time point to a previous time.
                                                                                  "},{"location":"en/end-user/insight/alert-center/alert-policy.html#other-operations","title":"Other Operations","text":"

                                                                                  Click \u2507 at the right side of the list, then choose Delete from the pop-up menu to delete an alert policy. By clicking on the policy name, you can enter the policy details where you can add, edit, or delete the alert rules under it.

                                                                                  Warning

                                                                                  Deleted alert strategies will be permanently removed, so please proceed with caution.

                                                                                  "},{"location":"en/end-user/insight/alert-center/alert-template.html","title":"Alert Template","text":"

                                                                                  The Alert template allows platform administrators to create Alert templates and rules, and business units can directly use Alert templates to create Alert policies. This feature can reduce the management of Alert rules by business personnel and allow for modification of Alert thresholds based on actual environment conditions.

                                                                                  "},{"location":"en/end-user/insight/alert-center/alert-template.html#create-alert-template","title":"Create Alert Template","text":"
                                                                                  1. In the navigation bar, select Alert -> Alert Policy, and click Alert Template at the top.

                                                                                  2. Click Create Alert Template, and set the name, description, and other information for the Alert template.

                                                                                    Parameter Description Template Name The name can only contain lowercase letters, numbers, and hyphens (-), must start and end with a lowercase letter or number, and can be up to 63 characters long. Description The description can contain any characters and can be up to 256 characters long. Resource Type Used to specify the matching type of the Alert template. Alert Rule Supports pre-defined multiple Alert rules, including template rules and PromQL rules.
                                                                                  3. Click OK to complete the creation and return to the Alert template list. Click the template name to view the template details.

                                                                                  "},{"location":"en/end-user/insight/alert-center/alert-template.html#edit-alert-template","title":"Edit Alert Template","text":"

                                                                                  Click \u2507 next to the target rule, then click Edit to enter the editing page for the suppression rule.

                                                                                  ![Edit](../images/template04.png){ width=1000px}\n
                                                                                  "},{"location":"en/end-user/insight/alert-center/alert-template.html#delete-alert-template","title":"Delete Alert Template","text":"

                                                                                  Click \u2507 next to the target template, then click Delete. Enter the name of the Alert template in the input box to confirm deletion.

                                                                                  ![Delete](../images/template05.png){ width=1000px}\n
                                                                                  "},{"location":"en/end-user/insight/alert-center/inhibition.html","title":"Alert Inhibition","text":"

                                                                                  Alert Inhibition is mainly a mechanism for temporarily hiding or reducing the priority of alerts that do not need immediate attention. The purpose of this feature is to reduce unnecessary alert information that may disturb operations personnel, allowing them to focus on more critical issues.

                                                                                  Alert inhibition recognizes and ignores certain alerts by defining a set of rules to deal with specific conditions. There are mainly the following conditions:

                                                                                  • Parent-child inhibition: when a parent alert (for example, a crash on a node) is triggered, all child alerts aroused by it (for example, a crash on a container running on that node) are inhibited.
                                                                                  • Similar alert inhibition: When alerts have the same characteristics (for example, the same problem on the same instance), multiple alerts are inhibited.
                                                                                  "},{"location":"en/end-user/insight/alert-center/inhibition.html#create-inhibition","title":"Create Inhibition","text":"
                                                                                  1. In the left navigation bar, select Alert -> Noise Reduction, and click Inhibition at the top.

                                                                                  2. Click Create Inhibition, and set the name and rules for the inhibition.

                                                                                    Note

                                                                                    The problem of avoiding multiple similar or related alerts that may be triggered by the same issue is achieved by defining a set of rules to identify and ignore certain alerts through Rule Details and Alert Details.

                                                                                    Parameter Description Name The name can only contain lowercase letters, numbers, and hyphens (-), must start and end with a lowercase letter or number, and can be up to 63 characters long. Description The description can contain any characters and can be up to 256 characters long. Cluster The cluster where the inhibition rule applies. Namespace The namespace where the inhibition rule applies. Source Alert Matching alerts by label conditions. It compares alerts that meet all label conditions with those that meet inhibition conditions, and alerts that do not meet inhibition conditions will be sent to the user as usual. Value range explanation: - Alert Level: The level of metric or event alerts, can be set as: Critical, Major, Minor. - Resource Type: The resource type specific for the alert object, can be set as: Cluster, Node, StatefulSet, Deployment, DaemonSet, Pod. - Labels: Alert identification attributes, consisting of label name and label value, supports user-defined values. Inhibition Specifies the matching conditions for the target alert (the alert to be inhibited). Alerts that meet all the conditions will no longer be sent to the user. Equal Specifies the list of labels to compare to determine if the source alert and target alert match. Inhibition is triggered only when the values of the labels specified in equal are exactly the same in the source and target alerts. The equal field is optional. If the equal field is omitted, all labels are used for matching.
                                                                                  3. Click OK to complete the creation and return to Inhibition list. Click the inhibition rule name to view the rule details.

                                                                                  "},{"location":"en/end-user/insight/alert-center/inhibition.html#view-rule-details","title":"View Rule Details","text":"

                                                                                  In the left navigation bar, select Alert -> Alert Policy, and click the policy name to view the rule details.

                                                                                  !!! note\n\n    You can add cuntom tags when adding rules.\n
                                                                                  "},{"location":"en/end-user/insight/alert-center/inhibition.html#view-alert-details","title":"View Alert Details","text":"

                                                                                  In the left navigation bar, select Alert -> Alerts, and click the policy name to view details.

                                                                                  !!! note\n\n    Alert details show information and settings for creating inhibitions.\n
                                                                                  "},{"location":"en/end-user/insight/alert-center/inhibition.html#edit-inhibition-rule","title":"Edit Inhibition Rule","text":"

                                                                                  Click \u2507 next to the target rule, then click Edit to enter the editing page for the inhibition rule.

                                                                                  "},{"location":"en/end-user/insight/alert-center/inhibition.html#delete-inhibition-rule","title":"Delete Inhibition Rule","text":"

                                                                                  Click \u2507 next to the target rule, then click Delete. Enter the name of the inhibition rule in the input box to confirm deletion.

                                                                                  "},{"location":"en/end-user/insight/alert-center/message.html","title":"Notification Settings","text":"

                                                                                  On the Notification Settings page, you can configure how to send messages to users through email, WeCom, DingTalk, Webhook, and SMS.

                                                                                  "},{"location":"en/end-user/insight/alert-center/message.html#email-group","title":"Email Group","text":"
                                                                                  1. After entering Insight , click Alert Center -> Notification Settings in the left navigation bar. By default, the email notification object is selected. Click Add email group and add one or more email addresses.

                                                                                  2. Multiple email addresses can be added.

                                                                                  3. After the configuration is complete, the notification list will automatically return. Click \u2507 on the right side of the list to edit or delete the email group.

                                                                                  "},{"location":"en/end-user/insight/alert-center/message.html#wecom","title":"WeCom","text":"
                                                                                  1. In the left navigation bar, click Alert Center -> Notification Settings -> WeCom . Click Add Group Robot and add one or more group robots.

                                                                                    For the URL of the WeCom group robot, please refer to the official document of WeCom: How to use group robots.

                                                                                  2. After the configuration is complete, the notification list will automatically return. Click \u2507 on the right side of the list, select Send Test Information , and you can also edit or delete the group robot.

                                                                                  "},{"location":"en/end-user/insight/alert-center/message.html#dingtalk","title":"DingTalk","text":"
                                                                                  1. In the left navigation bar, click Alert Center -> Notification Settings -> DingTalk . Click Add Group Robot and add one or more group robots.

                                                                                    For the URL of the DingTalk group robot, please refer to the official document of DingTalk: Custom Robot Access.

                                                                                  2. After the configuration is complete, the notification list will automatically return. Click \u2507 on the right side of the list, select Send Test Information , and you can also edit or delete the group robot.

                                                                                  "},{"location":"en/end-user/insight/alert-center/message.html#lark","title":"Lark","text":"
                                                                                  1. In the left navigation bar, click Alert Center -> Notification Settings -> Lark . Click Add Group Bot and add one or more group bots.

                                                                                    Note

                                                                                    When signature verification is required in Lark's group bot, you need to fill in the specific signature key when enabling notifications. Refer to Customizing Bot User Guide.

                                                                                  2. After configuration, you will be automatically redirected to the list page. Click \u2507 on the right side of the list and select Send Test Message . You can edit or delete group bots.

                                                                                  "},{"location":"en/end-user/insight/alert-center/message.html#webhook","title":"Webhook","text":"
                                                                                  1. In the left navigation bar, click Alert Center -> Notification Settings -> Webhook . Click New Webhook and add one or more Webhooks.

                                                                                    For the Webhook URL and more configuration methods, please refer to the webhook document.

                                                                                  2. After the configuration is complete, the notification list will automatically return. Click \u2507 on the right side of the list, select Send Test Information , and you can also edit or delete the Webhook.

                                                                                  "},{"location":"en/end-user/insight/alert-center/message.html#message","title":"Message","text":"

                                                                                  Note

                                                                                  Alert messages are sent to the personal Message sector and notifications can be viewed by clicking \ud83d\udd14 at the top.

                                                                                  1. In the left navigation bar, click Alert Center -> Notification Settings -> Message\uff0cclick Create Message .

                                                                                    You can add and notify multiple users for a message.

                                                                                  2. After configuration, you will be automatically redirected to the list page. Click \u2507 on the right side of the list and select Send Test Message .

                                                                                  "},{"location":"en/end-user/insight/alert-center/message.html#sms-group","title":"SMS Group","text":"
                                                                                  1. In the left navigation bar, click Alert Center -> Notification Settings -> SMS . Click Add SMS Group and add one or more SMS groups.

                                                                                  2. Enter the name, the object receiving the message, phone number, and notification server in the pop-up window.

                                                                                    The notification server needs to be created in advance under Notification Settings -> Notification Server . Currently, two cloud servers, Alibaba Cloud and Tencent Cloud, are supported. Please refer to your own cloud server information for the specific configuration parameters.

                                                                                  3. After the SMS group is successfully added, the notification list will automatically return. Click \u2507 on the right side of the list to edit or delete the SMS group.

                                                                                  "},{"location":"en/end-user/insight/alert-center/msg-template.html","title":"Message Templates","text":"

                                                                                  The message template feature supports customizing the content of message templates and can notify specified objects in the form of email, WeCom, DingTalk, Webhook, and SMS.

                                                                                  "},{"location":"en/end-user/insight/alert-center/msg-template.html#creating-a-message-template","title":"Creating a Message Template","text":"
                                                                                  1. In the left navigation bar, select Alert -> Message Template .

                                                                                    Insight comes with two default built-in templates in both Chinese and English for user convenience.

                                                                                  2. Fill in the template content.

                                                                                  Info

                                                                                  Observability comes with predefined message templates. If you need to define the content of the templates, refer to Configure Notification Templates.

                                                                                  "},{"location":"en/end-user/insight/alert-center/msg-template.html#message-template-details","title":"Message Template Details","text":"

                                                                                  Click the name of a message template to view the details of the message template in the right slider.

                                                                                  Parameters Variable Description ruleName {{ .Labels.alertname }} The name of the rule that triggered the alert groupName {{ .Labels.alertgroup }} The name of the alert policy to which the alert rule belongs severity {{ .Labels.severity }} The level of the alert that was triggered cluster {{ .Labels.cluster }} The cluster where the resource that triggered the alert is located namespace {{ .Labels.namespace }} The namespace where the resource that triggered the alert is located node {{ .Labels.node }} The node where the resource that triggered the alert is located targetType {{ .Labels.target_type }} The resource type of the alert target target {{ .Labels.target }} The name of the object that triggered the alert value {{ .Annotations.value }} The metric value at the time the alert notification was triggered startsAt {{ .StartsAt }} The time when the alert started to occur endsAt {{ .EndsAt }} The time when the alert ended description {{ .Annotations.description }} A detailed description of the alert labels {{ for .labels }} {{ end }} All labels of the alert use the for function to iterate through the labels list to get all label contents."},{"location":"en/end-user/insight/alert-center/msg-template.html#editing-or-deleting-a-message-template","title":"Editing or Deleting a Message Template","text":"

                                                                                  Click \u2507 on the right side of the list and select Edit or Delete from the pop-up menu to modify or delete the message template.

                                                                                  Warning

                                                                                  Once a template is deleted, it cannot be recovered, so please use caution when deleting templates.

                                                                                  "},{"location":"en/end-user/insight/alert-center/silent.html","title":"Alert Silence","text":"

                                                                                  Alert silence is a feature that allows alerts meeting certain criteria to be temporarily disabled from sending notifications within a specific time range. This feature helps operations personnel avoid receiving too many noisy alerts during certain operations or events, while also allowing for more precise handling of real issues that need to be addressed.

                                                                                  On the Alert Silence page, you can see two tabs: Active Rule and Expired Rule. The former presents the rules currently in effect, while the latter presents those that were defined in the past but have now expired (or have been deleted by the user).

                                                                                  "},{"location":"en/end-user/insight/alert-center/silent.html#creating-a-silent-rule","title":"Creating a Silent Rule","text":"
                                                                                  1. In the left navigation bar, select Alert -> Noice Reduction -> Alert Silence , and click the Create Silence Rule button.

                                                                                  2. Fill in the parameters for the silent rule, such as cluster, namespace, tags, and time, to define the scope and effective time of the rule, and then click OK .

                                                                                  3. Return to the rule list, and on the right side of the list, click \u2507 to edit or delete a silent rule.

                                                                                  Through the Alert Silence feature, you can flexibly control which alerts should be ignored and when they should be effective, thereby improving operational efficiency and reducing the possibility of false alerts.

                                                                                  "},{"location":"en/end-user/insight/alert-center/sms-provider.html","title":"Configure Notification Server","text":"

                                                                                  Insight supports SMS notifications and currently sends alert messages using integrated Alibaba Cloud and Tencent Cloud SMS services. This article explains how to configure the SMS notification server in Insight. The variables supported in the SMS signature are the default variables in the message template. As the number of SMS characters is limited, it is recommended to choose more explicit variables.

                                                                                  For information on how to configure SMS recipients, refer to the document: Configure SMS Notification Group.

                                                                                  "},{"location":"en/end-user/insight/alert-center/sms-provider.html#procedure","title":"Procedure","text":"
                                                                                  1. Go to Alert Center -> Notification Settings -> Notification Server .

                                                                                  2. Click Add Notification Server .

                                                                                    • Configure Alibaba Cloud server.

                                                                                      To apply for Alibaba Cloud SMS service, refer to Alibaba Cloud SMS Service.

                                                                                      Field descriptions:

                                                                                      • AccessKey ID : Parameter used by Alibaba Cloud to identify the user.
                                                                                      • AccessKey Secret : Key used by Alibaba Cloud to authenticate the user. AccessKey Secret must be kept confidential.
                                                                                      • SMS Signature : The SMS service supports creating signatures that meet the requirements according to user needs. When sending SMS, the SMS platform will add the approved SMS signature to the SMS content before sending it to the SMS recipient.
                                                                                      • Template CODE : The SMS template is the specific content of the SMS to be sent.
                                                                                      • Parameter Template : The SMS body template can contain variables. Users can use variables to customize the SMS content.

                                                                                      Please refer to Alibaba Cloud Variable Specification.

                                                                                      Note

                                                                                      Example: The template content defined in Alibaba Cloud is: ${severity}: ${alertname} triggered at ${startat}. Refer to the configuration in the parameter template.

                                                                                    • Configure Tencent Cloud server.

                                                                                      To apply for Tencent Cloud SMS service, please refer to Tencent Cloud SMS.

                                                                                      Field descriptions:

                                                                                      • Secret ID : Parameter used by Tencent Cloud to identify the API caller.
                                                                                      • SecretKey : Parameter used by Tencent Cloud to authenticate the API caller.
                                                                                      • SMS Template ID : The SMS template ID automatically generated by Tencent Cloud system.
                                                                                      • Signature Content : The SMS signature content, which is the full name or abbreviation of the actual website name defined in the Tencent Cloud SMS signature.
                                                                                      • SdkAppId : SMS SdkAppId, the actual SdkAppId generated after adding the application in the Tencent Cloud SMS console.
                                                                                      • Parameter Template : The SMS body template can contain variables. Users can use variables to customize the SMS content. Please refer to: Tencent Cloud Variable Specification.

                                                                                      Note

                                                                                      Example: The template content defined in Tencent Cloud is: {1}: {2} triggered at {3}. Refer to the configuration in the parameter template.

                                                                                  "},{"location":"en/end-user/insight/collection-manag/agent-status.html","title":"insight-agent Component Status Explanation","text":"

                                                                                  In AI platform, Insight acts as a multi-cluster observability product. To achieve unified data collection across multiple clusters, users need to install the Helm App insight-agent (installed by default in the insight-system namespace). Refer to How to Install insight-agent .

                                                                                  "},{"location":"en/end-user/insight/collection-manag/agent-status.html#status-explanation","title":"Status Explanation","text":"

                                                                                  In the \"Observability\" -> \"Collection Management\" section, you can view the installation status of insight-agent in each cluster.

                                                                                  • Not Installed : insight-agent is not installed in the insight-system namespace of the cluster.
                                                                                  • Running : insight-agent is successfully installed in the cluster, and all deployed components are running.
                                                                                  • Error : If insight-agent is in this state, it indicates that the helm deployment failed or there are components deployed that are not in a running state.

                                                                                  You can troubleshoot using the following steps:

                                                                                  1. Run the following command. If the status is deployed , proceed to the next step. If it is failed , it is recommended to uninstall and reinstall it from Container Management -> Helm Apps as it may affect application upgrades:

                                                                                    helm list -n insight-system\n
                                                                                  2. Run the following command or check the status of the deployed components in Insight -> Data Collection . If there are Pods not in the Running state, restart the containers in an abnormal state.

                                                                                    kubectl get pods -n insight-system\n
                                                                                  "},{"location":"en/end-user/insight/collection-manag/agent-status.html#additional-notes","title":"Additional Notes","text":"
                                                                                  1. The resource consumption of the Prometheus metric collection component in insight-agent is directly proportional to the number of Pods running in the cluster. Please adjust the resources for Prometheus according to the cluster size. Refer to Prometheus Resource Planning.

                                                                                  2. The storage capacity of the vmstorage metric storage component in the global service cluster is directly proportional to the total number of Pods in the clusters.

                                                                                    • Please contact the platform administrator to adjust the disk capacity of vmstorage based on the cluster size. Refer to vmstorage Disk Capacity Planning.
                                                                                    • Adjust vmstorage disk based on multi-cluster scale. Refer to vmstorge Disk Expansion.
                                                                                  "},{"location":"en/end-user/insight/collection-manag/collection-manag.html","title":"Data Collection","text":"

                                                                                  Data Collection is mainly to centrally manage and display the entrance of the cluster installation collection plug-in insight-agent , which helps users quickly view the health status of the cluster collection plug-in, and provides a quick entry to configure collection rules.

                                                                                  The specific operation steps are as follows:

                                                                                  1. Click in the upper left corner and select Insight -> Data Collection .

                                                                                  2. You can view the status of all cluster collection plug-ins.

                                                                                  3. When the cluster is connected to insight-agent and is running, click a cluster name to enter the details\u3002

                                                                                  4. In the Service Monitor tab, click the shortcut link to jump to Container Management -> CRD to add service discovery rules.

                                                                                  "},{"location":"en/end-user/insight/collection-manag/metric-collect.html","title":"Metrics Retrieval Methods","text":"

                                                                                  Prometheus primarily uses the Pull approach to retrieve monitoring metrics from target services' exposed endpoints. Therefore, it requires configuring proper scraping jobs to request monitoring data and write it into the storage provided by Prometheus. Currently, Prometheus offers several configurations for these jobs:

                                                                                  • Native Job Configuration: This provides native Prometheus job configuration for scraping.
                                                                                  • Pod Monitor: In the Kubernetes ecosystem, it allows scraping of monitoring data from Pods using Prometheus Operator.
                                                                                  • Service Monitor: In the Kubernetes ecosystem, it allows scraping monitoring data from Endpoints of Services using Prometheus Operator.

                                                                                  Note

                                                                                  [ ] indicates optional configmaps.

                                                                                  "},{"location":"en/end-user/insight/collection-manag/metric-collect.html#native-job-configuration","title":"Native Job Configuration","text":"

                                                                                  The proper configmaps are explained as follows:

                                                                                  # Name of the scraping job, also adds a label (job=job_name) to the scraped metrics\njob_name: <job_name>\n\n# Time interval between scrapes\n[ scrape_interval: <duration> | default = <global_config.scrape_interval> ]\n\n# Timeout for scrape requests\n[ scrape_timeout: <duration> | default = <global_config.scrape_timeout> ]\n\n# URI path for the scrape request\n[ metrics_path: <path> | default = /metrics ]\n\n# Handling of label conflicts between scraped labels and labels added by the backend Prometheus.\n# true: Retains the scraped labels and ignores conflicting labels from the backend Prometheus.\n# false: Adds an \"exported_<original-label>\" prefix to the scraped labels and includes the additional labels added by the backend Prometheus.\n[ honor_labels: <boolean> | default = false ]\n\n# Whether to use the timestamp generated by the target being scraped.\n# true: Uses the timestamp from the target if available.\n# false: Ignores the timestamp from the target.\n[ honor_timestamps: <boolean> | default = true ]\n\n# Protocol for the scrape request: http or https\n[ scheme: <scheme> | default = http ]\n\n# URL parameters for the scrape request\nparams:\n  [ <string>: [<string>, ...] ]\n\n# Set the value of the `Authorization` header in the scrape request through basic authentication. password/password_file are mutually exclusive, with password_file taking precedence.\nbasic_auth:\n  [ username: <string> ]\n  [ password: <secret> ]\n  [ password_file: <string> ]\n\n# Set the value of the `Authorization` header in the scrape request through bearer token authentication. bearer_token/bearer_token_file are mutually exclusive, with bearer_token taking precedence.\n[ bearer_token: <secret> ]\n\n# Set the value of the `Authorization` header in the scrape request through bearer token authentication. bearer_token/bearer_token_file are mutually exclusive, with bearer_token taking precedence.\n[ bearer_token_file: <filename> ]\n\n# Whether the scrape connection should use a TLS secure channel, configure the proper TLS parameters\ntls_config:\n  [ <tls_config> ]\n\n# Use a proxy service to scrape the metrics from the target, specify the address of the proxy service.\n[ proxy_url: <string> ]\n\n# Specify the targets using static configuration, see explanation below.\nstatic_configs:\n  [ - <static_config> ... ]\n\n# CVM service discovery configuration, see explanation below.\ncvm_sd_configs:\n  [ - <cvm_sd_config> ... ]\n\n# After scraping the data, rewrite the labels of the proper target using the relabel mechanism. Executes multiple relabel rules in order.\n# See explanation below for relabel_config.\nrelabel_configs:\n  [ - <relabel_config> ... ]\n\n# Before writing the scraped data, rewrite the values of the labels using the relabel mechanism. Executes multiple relabel rules in order.\n# See explanation below for relabel_config.\nmetric_relabel_configs:\n  [ - <relabel_config> ... ]\n\n# Limit the number of data points per scrape, 0: no limit, default is 0\n[ sample_limit: <int> | default = 0 ]\n\n# Limit the number of targets per scrape, 0: no limit, default is 0\n[ target_limit: <int> | default = 0 ]\n
                                                                                  "},{"location":"en/end-user/insight/collection-manag/metric-collect.html#pod-monitor","title":"Pod Monitor","text":"

                                                                                  The explanation for the proper configmaps is as follows:

                                                                                  # Prometheus Operator CRD version\napiVersion: monitoring.coreos.com/v1\n# proper Kubernetes resource type, here it is PodMonitor\nkind: PodMonitor\n# proper Kubernetes Metadata, only the name needs to be concerned. If jobLabel is not specified, the value of the job label in the scraped metrics will be <namespace>/<name>\nmetadata:\n  name: redis-exporter # Specify a unique name\n  namespace: cm-prometheus  # Fixed namespace, no need to modify\n# Describes the selection and configuration of the target Pods to be scraped\n  labels:\n    operator.insight.io/managed-by: insight # Label indicating managed by Insight\nspec:\n  # Specify the label of the proper Pod, pod monitor will use this value as the job label value.\n  # If viewing the Pod YAML, use the values in pod.metadata.labels.\n  # If viewing Deployment/Daemonset/Statefulset, use spec.template.metadata.labels.\n  [ jobLabel: string ]\n  # Adds the proper Pod's Labels to the Target's Labels\n  [ podTargetLabels: []string ]\n  # Limit the number of data points per scrape, 0: no limit, default is 0\n  [ sampleLimit: uint64 ]\n  # Limit the number of targets per scrape, 0: no limit, default is 0\n  [ targetLimit: uint64 ]\n  # Configure the Prometheus HTTP endpoints that need to be scraped and exposed. Multiple endpoints can be configured.\n  podMetricsEndpoints:\n  [ - <endpoint_config> ... ] # See explanation below for endpoint\n  # Select the namespaces where the monitored Pods are located. Leave it blank to select all namespaces.\n  [ namespaceSelector: ]\n    # Select all namespaces\n    [ any: bool ]\n    # Specify the list of namespaces to be selected\n    [ matchNames: []string ]\n  # Specify the Label values of the Pods to be monitored in order to locate the target Pods [K8S metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta)\n  selector:\n    [ matchExpressions: array ]\n      [ example: - {key: tier, operator: In, values: [cache]} ]\n    [ matchLabels: object ]\n      [ example: k8s-app: redis-exporter ]\n
                                                                                  "},{"location":"en/end-user/insight/collection-manag/metric-collect.html#example-1","title":"Example 1","text":"
                                                                                  apiVersion: monitoring.coreos.com/v1\nkind: PodMonitor\nmetadata:\n  name: redis-exporter # Specify a unique name\n  namespace: cm-prometheus # Fixed namespace, do not modify\n  labels:\n    operator.insight.io/managed-by: insight  # Label indicating managed by Insight, required.\nspec:\n  podMetricsEndpoints:\n    - interval: 30s\n      port: metric-port # Specify the Port Name proper to Prometheus Exporter in the pod YAML\n      path: /metrics # Specify the value of the Path proper to Prometheus Exporter, if not specified, default is /metrics\n      relabelings:\n        - action: replace\n          sourceLabels:\n            - instance\n          regex: (.*)\n          targetLabel: instance\n          replacement: \"crs-xxxxxx\" # Adjust to the proper Redis instance ID\n        - action: replace\n          sourceLabels:\n            - instance\n          regex: (.*)\n          targetLabel: ip\n          replacement: \"1.x.x.x\" # Adjust to the proper Redis instance IP\n  namespaceSelector: # Select the namespaces where the monitored Pods are located\n    matchNames:\n      - redis-test\n  selector: # Specify the Label values of the Pods to be monitored in order to locate the target pods\n    matchLabels:\n      k8s-app: redis-exporter\n
                                                                                  "},{"location":"en/end-user/insight/collection-manag/metric-collect.html#example-2","title":"Example 2","text":"
                                                                                  job_name: prometheus\nscrape_interval: 30s\nstatic_configs:\n- targets:\n  - 127.0.0.1:9090\n
                                                                                  "},{"location":"en/end-user/insight/collection-manag/metric-collect.html#service-monitor","title":"Service Monitor","text":"

                                                                                  The explanation for the proper configmaps is as follows:

                                                                                  # Prometheus Operator CRD version\napiVersion: monitoring.coreos.com/v1\n# proper Kubernetes resource type, here it is ServiceMonitor\nkind: ServiceMonitor\n# proper Kubernetes Metadata, only the name needs to be concerned. If jobLabel is not specified, the value of the job label in the scraped metrics will be the name of the Service.\nmetadata:\n  name: redis-exporter # Specify a unique name\n  namespace: cm-prometheus  # Fixed namespace, no need to modify\n# Describes the selection and configuration of the target Pods to be scraped\n  labels:\n    operator.insight.io/managed-by: insight # Label indicating managed by Insight, required.\nspec:\n  # Specify the label(metadata/labels) of the proper Pod, service monitor will use this value as the job label value.\n  [ jobLabel: string ]\n  # Adds the Labels of the proper service to the Target's Labels\n  [ targetLabels: []string ]\n  # Adds the Labels of the proper Pod to the Target's Labels\n  [ podTargetLabels: []string ]\n  # Limit the number of data points per scrape, 0: no limit, default is 0\n  [ sampleLimit: uint64 ]\n  # Limit the number of targets per scrape, 0: no limit, default is 0\n  [ targetLimit: uint64 ]\n  # Configure the Prometheus HTTP endpoints that need to be scraped and exposed. Multiple endpoints can be configured.\n  endpoints:\n  [ - <endpoint_config> ... ] # See explanation below for endpoint\n  # Select the namespaces where the monitored Pods are located. Leave it blank to select all namespaces.\n  [ namespaceSelector: ]\n    # Select all namespaces\n    [ any: bool ]\n    # Specify the list of namespaces to be selected\n    [ matchNames: []string ]\n  # Specify the Label values of the Pods to be monitored in order to locate the target Pods [K8S metav1.LabelSelector](https://v1-17.docs.kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#labelselector-v1-meta)\n  selector:\n    [ matchExpressions: array ]\n      [ example: - {key: tier, operator: In, values: [cache]} ]\n    [ matchLabels: object ]\n      [ example: k8s-app: redis-exporter ]\n
                                                                                  "},{"location":"en/end-user/insight/collection-manag/metric-collect.html#example","title":"Example","text":"
                                                                                  apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: go-demo # Specify a unique name\n  namespace: cm-prometheus # Fixed namespace, do not modify\n  labels:\n    operator.insight.io/managed-by: insight  # Label indicating managed by Insight, required.\nspec:\n  endpoints:\n    - interval: 30s\n      # Specify the Port Name proper to Prometheus Exporter in the service YAML\n      port: 8080-8080-tcp\n      # Specify the value of the Path proper to Prometheus Exporter, if not specified, default is /metrics\n      path: /metrics\n      relabelings:\n        # ** There must be a label named 'application', assuming there is a label named 'app' in k8s,\n        # we replace it with 'application' using the relabel 'replace' action\n        - action: replace\n          sourceLabels: [__meta_kubernetes_pod_label_app]\n          targetLabel: application\n  # Select the namespace where the monitored service is located\n  namespaceSelector:\n    matchNames:\n      - golang-demo\n  # Specify the Label values of the service to be monitored in order to locate the target service\n  selector:\n    matchLabels:\n      app: golang-app-demo\n
                                                                                  "},{"location":"en/end-user/insight/collection-manag/metric-collect.html#endpoint_config","title":"endpoint_config","text":"

                                                                                  The explanation for the proper configmaps is as follows:

                                                                                  # The name of the proper port. Please note that it's not the actual port number.\n# Default: 80. Possible values are as follows:\n# ServiceMonitor: corresponds to Service>spec/ports/name;\n# PodMonitor: explained as follows:\n#   If viewing the Pod YAML, take the value from pod.spec.containers.ports.name.\n#   If viewing Deployment/DaemonSet/StatefulSet, take the value from spec.template.spec.containers.ports.name.\n[ port: string | default = 80]\n# The URI path for the scrape request.\n[ path: string | default = /metrics ]\n# The protocol for the scrape: http or https.\n[ scheme: string | default = http]\n# URL parameters for the scrape request.\n[ params: map[string][]string]\n# The interval between scrape requests.\n[ interval: string | default = 30s ]\n# The timeout for the scrape request.\n[ scrapeTimeout: string | default = 30s]\n# Whether the scrape connection should be made over a secure TLS channel, and the TLS configuration.\n[ tlsConfig: TLSConfig ]\n# Read the bearer token value from the specified file and include it in the headers of the scrape request.\n[ bearerTokenFile: string ]\n# Read the bearer token from the specified K8S secret key. Note that the secret namespace must match the PodMonitor/ServiceMonitor.\n[ bearerTokenSecret: string ]\n# Handling conflicts when scraped labels conflict with labels added by the backend Prometheus.\n# true: Keep the scraped labels and ignore the conflicting labels from the backend Prometheus.\n# false: For conflicting labels, prefix the scraped label with 'exported_<original-label>' and add the labels added by the backend Prometheus.\n[ honorLabels: bool | default = false ]\n# Whether to use the timestamp generated on the target during the scrape.\n# true: Use the timestamp on the target if available.\n# false: Ignore the timestamp on the target.\n[ honorTimestamps: bool | default = true ]\n# Basic authentication credentials. Fill in the values of username/password from the proper K8S secret key. Note that the secret namespace must match the PodMonitor/ServiceMonitor.\n[ basicAuth: BasicAuth ]\n# Scrape the metrics from the target through a proxy server. Specify the address of the proxy server.\n[ proxyUrl: string ]\n# After scraping the data, rewrite the values of the labels on the target using the relabeling mechanism. Multiple relabel rules are executed in order.\n# See explanation below for relabel_config\nrelabelings:\n[ - <relabel_config> ...]\n# Before writing the scraped data, rewrite the values of the proper labels on the target using the relabeling mechanism. Multiple relabel rules are executed in order.\n# See explanation below for relabel_config\nmetricRelabelings:\n[ - <relabel_config> ...]\n
                                                                                  "},{"location":"en/end-user/insight/collection-manag/metric-collect.html#relabel_config","title":"relabel_config","text":"

                                                                                  The explanation for the proper configmaps is as follows:

                                                                                  # Specifies which labels to take from the original labels for relabeling. The values taken are concatenated using the separator defined in the configuration.\n# For PodMonitor/ServiceMonitor, the proper configmap is sourceLabels.\n[ source_labels: '[' <labelname> [, ...] ']' ]\n# Defines the character used to concatenate the values of the labels to be relabeled. Default is ';'.\n[ separator: <string> | default = ; ]\n\n# When the action is replace/hashmod, target_label is used to specify the proper label name.\n# For PodMonitor/ServiceMonitor, the proper configmap is targetLabel.\n[ target_label: <labelname> ]\n\n# Regular expression used to match the values of the source labels.\n[ regex: <regex> | default = (.*) ]\n\n# Used when action is hashmod, it takes the modulus value based on the MD5 hash of the source label's value.\n[ modulus: <int> ]\n\n# Used when action is replace, it defines the expression to replace when the regex matches. It can use regular expression replacement with regex.\n[ replacement: <string> | default = $1 ]\n\n# Actions performed based on the matched values of regex. The available actions are as follows, with replace being the default:\n# replace: If the regex matches, replace the proper value with the value defined in replacement. Set the value using target_label and add the proper label.\n# keep: If the regex doesn't match, discard the value.\n# drop: If the regex matches, discard the value.\n# hashmod: Take the modulus of the MD5 hash of the source label's value based on the value specified in modulus.\n# Add a new label with a label name specified by target_label.\n# labelmap: If the regex matches, replace the proper label name with the value specified in replacement.\n# labeldrop: If the regex matches, delete the proper label.\n# labelkeep: If the regex doesn't match, delete the proper label.\n[ action: <relabel_action> | default = replace ]\n
                                                                                  "},{"location":"en/end-user/insight/collection-manag/probe-module.html","title":"Custom probers","text":"

                                                                                  Insight uses the Blackbox Exporter provided by Prometheus as a blackbox monitoring solution, allowing detection of target instances via HTTP, HTTPS, DNS, ICMP, TCP, and gRPC. It can be used in the following scenarios:

                                                                                  • HTTP/HTTPS: URL/API availability monitoring
                                                                                  • ICMP: Host availability monitoring
                                                                                  • TCP: Port availability monitoring
                                                                                  • DNS: Domain name resolution

                                                                                  In this page, we will explain how to configure custom probers in an existing Blackbox ConfigMap.

                                                                                  ICMP prober is not enabled by default in Insight because it requires higher permissions. Therfore We will use the HTTP prober as an example to demonstrate how to modify the ConfigMap to achieve custom HTTP probing.

                                                                                  "},{"location":"en/end-user/insight/collection-manag/probe-module.html#procedure","title":"Procedure","text":"
                                                                                  1. Go to Clusters in Container Management and enter the details of the target cluster.
                                                                                  2. Click the left navigation bar and select ConfigMaps & Secrets -> ConfigMaps .
                                                                                  3. Find the ConfigMap named insight-agent-prometheus-blackbox-exporter and click Edit YAML .

                                                                                    Add custom probers under modules :

                                                                                  HTTP ProberICMP Prober
                                                                                  module:\n  http_2xx:\n    prober: http\n    timeout: 5s\n    http:\n      valid_http_versions: [HTTP/1.1, HTTP/2]\n      valid_status_codes: []  # Defaults to 2xx\n      method: GET\n

                                                                                  module:\n  ICMP: # Example of ICMP prober configuration\n    prober: icmp\n    timeout: 5s\n    icmp:\n      preferred_ip_protocol: ip4\nicmp_example: # Example 2 of ICMP prober configuration\n  prober: icmp\n  timeout: 5s\n  icmp:\n    preferred_ip_protocol: \"ip4\"\n    source_ip_address: \"127.0.0.1\"\n
                                                                                  Since ICMP requires higher permissions, we also need to elevate the pod permissions. Otherwise, an operation not permitted error will occur. There are two ways to elevate permissions:

                                                                                  • Directly edit the BlackBox Exporter deployment file to enable it

                                                                                    apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: insight-agent-prometheus-blackbox-exporter\n  namespace: insight-system\nspec:\n  template:\n    spec:\n      containers:\n        - name: blackbox-exporter\n          image: # ... (image, args, ports, etc. remain unchanged)\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            allowPrivilegeEscalation: false\n            capabilities:\n              add:\n              - NET_RAW\n              drop:\n              - ALL\n            readOnlyRootFilesystem: true\n            runAsGroup: 0\n            runAsNonRoot: false\n            runAsUser: 0\n
                                                                                  • Elevate permissions via helm upgrade

                                                                                    prometheus-blackbox-exporter:\n  enabled: true\n  securityContext:\n    runAsUser: 0\n    runAsGroup: 0\n    readOnlyRootFilesystem: true\n    runAsNonRoot: false\n    allowPrivilegeEscalation: false\n    capabilities:\n      add: [\"NET_RAW\"]\n

                                                                                  Info

                                                                                  For more probers, refer to blackbox_exporter Configuration.

                                                                                  "},{"location":"en/end-user/insight/collection-manag/probe-module.html#other-references","title":"Other References","text":"

                                                                                  The following YAML file contains various probers such as HTTP, TCP, SMTP, ICMP, and DNS. You can modify the configuration file of insight-agent-prometheus-blackbox-exporter according to your needs.

                                                                                  Click to view the complete YAML file
                                                                                  kind: ConfigMap\napiVersion: v1\nmetadata:\n  name: insight-agent-prometheus-blackbox-exporter\n  namespace: insight-system\n  labels:\n    app.kubernetes.io/instance: insight-agent\n    app.kubernetes.io/managed-by: Helm\n    app.kubernetes.io/name: prometheus-blackbox-exporter\n    app.kubernetes.io/version: v0.24.0\n    helm.sh/chart: prometheus-blackbox-exporter-8.8.0\n  annotations:\n    meta.helm.sh/release-name: insight-agent\n    meta.helm.sh/release-namespace: insight-system\ndata:\n  blackbox.yaml: |\n    modules:\n      HTTP_GET:\n        prober: http\n        timeout: 5s\n        http:\n          method: GET\n          valid_http_versions: [\"HTTP/1.1\", \"HTTP/2.0\"]\n          follow_redirects: true\n          preferred_ip_protocol: \"ip4\"\n      HTTP_POST:\n        prober: http\n        timeout: 5s\n        http:\n          method: POST\n          body_size_limit: 1MB\n      TCP:\n        prober: tcp\n        timeout: 5s\n      # Not enabled by default:\n      # ICMP:\n      #   prober: icmp\n      #   timeout: 5s\n      #   icmp:\n      #     preferred_ip_protocol: ip4\n      SSH:\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n          - expect: \"^SSH-2.0-\"\n      POP3S:\n        prober: tcp\n        tcp:\n          query_response:\n          - expect: \"^+OK\"\n          tls: true\n          tls_config:\n            insecure_skip_verify: false\n      http_2xx_example:               # http prober example\n        prober: http\n        timeout: 5s                   # probe timeout\n        http:\n          valid_http_versions: [\"HTTP/1.1\", \"HTTP/2.0\"]                   # Version in the response, usually default\n          valid_status_codes: []  # Defaults to 2xx                       # Valid range of response codes, probe successful if within this range\n          method: GET                 # request method\n          headers:                    # request headers\n            Host: vhost.example.com\n            Accept-Language: en-US\n            Origin: example.com\n          no_follow_redirects: false  # allow redirects\n          fail_if_ssl: false   \n          fail_if_not_ssl: false\n          fail_if_body_matches_regexp:\n            - \"Could not connect to database\"\n          fail_if_body_not_matches_regexp:\n            - \"Download the latest version here\"\n          fail_if_header_matches: # Verifies that no cookies are set\n            - header: Set-Cookie\n              allow_missing: true\n              regexp: '.*'\n          fail_if_header_not_matches:\n            - header: Access-Control-Allow-Origin\n              regexp: '(\\*|example\\.com)'\n          tls_config:                  # tls configuration for https requests\n            insecure_skip_verify: false\n          preferred_ip_protocol: \"ip4\" # defaults to \"ip6\"                 # Preferred IP protocol version\n          ip_protocol_fallback: false  # no fallback to \"ip6\"            \n      http_post_2xx:                   # http prober example with body\n        prober: http\n        timeout: 5s\n        http:\n          method: POST                 # probe request method\n          headers:\n            Content-Type: application/json\n          body: '{\"username\":\"admin\",\"password\":\"123456\"}'                   # body carried during probe\n      http_basic_auth_example:         # prober example with username and password\n        prober: http\n        timeout: 5s\n        http:\n          method: POST\n          headers:\n            Host: \"login.example.com\"\n          basic_auth:                  # username and password to be added during probe\n            username: \"username\"\n            password: \"mysecret\"\n      http_custom_ca_example:\n        prober: http\n        http:\n          method: GET\n          tls_config:                  # root certificate used during probe\n            ca_file: \"/certs/my_cert.crt\"\n      http_gzip:\n        prober: http\n        http:\n          method: GET\n          compression: gzip            # compression method used during probe\n      http_gzip_with_accept_encoding:\n        prober: http\n        http:\n          method: GET\n          compression: gzip\n          headers:\n            Accept-Encoding: gzip\n      tls_connect:                     # TCP prober example\n        prober: tcp\n        timeout: 5s\n        tcp:\n          tls: true                    # use TLS\n      tcp_connect_example:\n        prober: tcp\n        timeout: 5s\n      imap_starttls:                   # IMAP email server probe configuration example\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - expect: \"OK.*STARTTLS\"\n            - send: \". STARTTLS\"\n            - expect: \"OK\"\n            - starttls: true\n            - send: \". capability\"\n            - expect: \"CAPABILITY IMAP4rev1\"\n      smtp_starttls:                   # SMTP email server probe configuration example\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - expect: \"^220 ([^ ]+) ESMTP (.+)$\"\n            - send: \"EHLO prober\\r\"\n            - expect: \"^250-STARTTLS\"\n            - send: \"STARTTLS\\r\"\n            - expect: \"^220\"\n            - starttls: true\n            - send: \"EHLO prober\\r\"\n            - expect: \"^250-AUTH\"\n            - send: \"QUIT\\r\"\n      irc_banner_example:\n        prober: tcp\n        timeout: 5s\n        tcp:\n          query_response:\n            - send: \"NICK prober\"\n            - send: \"USER prober prober prober :prober\"\n            - expect: \"PING :([^ ]+)\"\n              send: \"PONG ${1}\"\n            - expect: \"^:[^ ]+ 001\"\n      # icmp_example:                    # ICMP prober configuration example\n      #  prober: icmp\n      #  timeout: 5s\n      #  icmp:\n      #    preferred_ip_protocol: \"ip4\"\n      #    source_ip_address: \"127.0.0.1\"\n      dns_udp_example:                 # DNS query example using UDP\n        prober: dns\n        timeout: 5s\n        dns:\n          query_name: \"www.prometheus.io\"                 # domain name to resolve\n          query_type: \"A\"              # type proper to this domain\n          valid_rcodes:\n          - NOERROR\n          validate_answer_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n            fail_if_all_match_regexp:\n            - \".*127.0.0.1\"\n            fail_if_not_matches_regexp:\n            - \"www.prometheus.io.\\t300\\tIN\\tA\\t127.0.0.1\"\n            fail_if_none_matches_regexp:\n            - \"127.0.0.1\"\n          validate_authority_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n          validate_additional_rrs:\n            fail_if_matches_regexp:\n            - \".*127.0.0.1\"\n      dns_soa:\n        prober: dns\n        dns:\n          query_name: \"prometheus.io\"\n          query_type: \"SOA\"\n      dns_tcp_example:               # DNS query example using TCP\n        prober: dns\n        dns:\n          transport_protocol: \"tcp\" # defaults to \"udp\"\n          preferred_ip_protocol: \"ip4\" # defaults to \"ip6\"\n          query_name: \"www.prometheus.io\"\n
                                                                                  "},{"location":"en/end-user/insight/collection-manag/service-monitor.html","title":"Configure service discovery rules","text":"

                                                                                  Observable Insight supports the way of creating CRD ServiceMonitor through container management to meet your collection requirements for custom service discovery. Users can use ServiceMonitor to define the scope of the Namespace discovered by the Pod and select the monitored Service through matchLabel .

                                                                                  "},{"location":"en/end-user/insight/collection-manag/service-monitor.html#prerequisites","title":"Prerequisites","text":"

                                                                                  The cluster has the Helm App insight-agent installed and in the running state.

                                                                                  "},{"location":"en/end-user/insight/collection-manag/service-monitor.html#steps","title":"Steps","text":"
                                                                                  1. Select Data Collection on the left navigation bar to view the status of all cluster collection plug-ins.

                                                                                  2. Click a cluster name to enter the collection configuration details.

                                                                                  3. Click the link to jump to Container Management to create a Service Monitor.

                                                                                    apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: micrometer-demo # (1)\n  namespace: insight-system # (2)\n    labels: \n     operator.insight.io/managed-by: insight\nspec:\n  endpoints: # (3)\n    - honorLabels: true\n       interval: 15s\n        path: /actuator/prometheus\n        port: http\n  namespaceSelector: # (4)\n    matchNames:\n      - insight-system  # (5)\n  selector: # (6)\n    matchLabels:\n      micrometer-prometheus-discovery: \"true\"\n
                                                                                    1. Specify the name of the ServiceMonitor.
                                                                                    2. Specify the namespace of the ServiceMonitor.
                                                                                    3. This is the service endpoint, which represents the address where Prometheus collects Metrics. endpoints is an array, and multiple endpoints can be created at the same time. Each endpoint contains three fields, and the meaning of each field is as follows:

                                                                                      • interval : Specifies the collection cycle of Prometheus for the current endpoint . The unit is seconds, set to 15s in this example.
                                                                                      • path : Specifies the collection path of Prometheus. In this example, it is specified as /actuator/prometheus .
                                                                                      • port : Specifies the port through which the collected data needs to pass. The set port is the name set by the port of the Service being collected.
                                                                                    4. This is the scope of the Service that needs to be discovered. namespaceSelector contains two mutually exclusive fields, and the meaning of the fields is as follows:

                                                                                      • any : Only one value true , when this field is set, it will listen to changes of all Services that meet the Selector filtering conditions.
                                                                                      • matchNames : An array value that specifies the scope of namespace to be monitored. For example, if you only want to monitor the Services in two namespaces, default and insight-system, the matchNames are set as follows:

                                                                                        namespaceSelector:\n  matchNames:\n    - default\n    - insight-system\n
                                                                                    5. The namespace where the application that needs to expose metrics is located

                                                                                    6. Used to select the Service
                                                                                  "},{"location":"en/end-user/insight/dashboard/dashboard.html","title":"Dashboard","text":"

                                                                                  Grafana is a cross-platform open source visual analysis tool. Insight uses open source Grafana to provide monitoring services, and supports viewing resource consumption from multiple dimensions such as clusters, nodes, and namespaces.

                                                                                  For more information on open source Grafana, see Grafana Official Documentation.

                                                                                  "},{"location":"en/end-user/insight/dashboard/dashboard.html#steps","title":"Steps","text":"
                                                                                  1. Select Dashboard from the left navigation bar .

                                                                                    • In the Insight / Overview dashboard, you can view the resource usage of multiple clusters and analyze resource usage, network, storage, and more based on dimensions such as namespaces and Pods.

                                                                                    • Click the dropdown menu in the upper-left corner of the dashboard to switch between clusters.

                                                                                    • Click the lower-right corner of the dashboard to switch the time range for queries.

                                                                                  2. Insight provides several recommended dashboards that allow monitoring from different dimensions such as nodes, namespaces, and workloads. Switch between dashboards by clicking the insight-system / Insight / Overview section.

                                                                                  Note

                                                                                  1. For accessing Grafana UI, refer to Access Native Grafana.

                                                                                  2. For importing custom dashboards, refer to Importing Custom Dashboards.

                                                                                  "},{"location":"en/end-user/insight/dashboard/import-dashboard.html","title":"Import Custom Dashboards","text":"

                                                                                  By using Grafana CRD, you can incorporate the management and deployment of dashboards into the lifecycle management of Kubernetes. This enables version control, automated deployment, and cluster-level management of dashboards. This page describes how to import custom dashboards using CRD and the UI interface.

                                                                                  "},{"location":"en/end-user/insight/dashboard/import-dashboard.html#steps","title":"Steps","text":"
                                                                                  1. Log in to the AI platform platform and go to Container Management . Select the kpanda-global-cluster from the cluster list.

                                                                                  2. Choose Custom Resources from the left navigation bar. Look for the grafanadashboards.integreatly.org file in the list and click it to view the details.

                                                                                  3. Click YAML Create and use the following template. Replace the dashboard JSON in the Json field.

                                                                                    • namespace : Specify the target namespace.
                                                                                    • name : Provide a name for the dashboard.
                                                                                    • label : Mandatory. Set the label as operator.insight.io/managed-by: insight .
                                                                                    apiVersion: integreatly.org/v1alpha1\nkind: GrafanaDashboard\nmetadata:\n  labels:\n    app: insight-grafana-operator\n    operator.insight.io/managed-by: insight\n  name: sample-dashboard\n  namespace: insight-system\nspec:\n  json: >\n    {\n      \"id\": null,\n      \"title\": \"Simple Dashboard\",\n      \"tags\": [],\n      \"style\": \"dark\",\n      \"timezone\": \"browser\",\n      \"editable\": true,\n      \"hideControls\": false,\n      \"graphTooltip\": 1,\n      \"panels\": [],\n      \"time\": {\n        \"from\": \"now-6h\",\n        \"to\": \"now\"\n      },\n      \"timepicker\": {\n        \"time_options\": [],\n        \"refresh_intervals\": []\n      },\n      \"templating\": {\n        \"list\": []\n      },\n      \"annotations\": {\n        \"list\": []\n      },\n      \"refresh\": \"5s\",\n      \"schemaVersion\": 17,\n      \"version\": 0,\n      \"links\": []\n    }\n
                                                                                  4. After clicking OK , wait for a while to view the newly imported dashboard in Dashboard .

                                                                                  Info

                                                                                  If you need to customize the dashboard, refer to Add Dashboard Panel.

                                                                                  "},{"location":"en/end-user/insight/dashboard/login-grafana.html","title":"Access Native Grafana","text":"

                                                                                  Please make sure that the Helm App Insight in your global management cluster is in Running state.

                                                                                  The specific operation steps are as follows:

                                                                                  1. Log in to the console to access native Grafana.

                                                                                    Access address: http://ip:port/ui/insight-grafana

                                                                                    For example: http://10.6.10.233:30209/ui/insight-grafana

                                                                                  2. Click Login in the lower right corner, and use the default username and password to log in.

                                                                                    • Default username: admin

                                                                                    • Default password: admin

                                                                                  3. Click Log in to complete the login.

                                                                                  "},{"location":"en/end-user/insight/dashboard/overview.html","title":"Overview","text":"

                                                                                  Insight only collects data from clusters that have insight-agent installed and running in a normal state. The overview provides an overview of resources across multiple clusters:

                                                                                  • Alert Statistics: Provides statistics on active alerts across all clusters.
                                                                                  • Resource Consumption: Displays the resource usage trends for the top 5 clusters and nodes in the past hour, based on CPU usage, memory usage, and disk usage.
                                                                                  • By default, the sorting is based on CPU usage. You can switch the metric to sort clusters and nodes.
                                                                                  • Resource Trends: Shows the trends in the number of nodes over the past 15 days and the running trend of pods in the last hour.
                                                                                  • Service Requests Ranking: Displays the top 5 services with the highest request latency and error rates, along with their respective clusters and namespaces in the multi-cluster environment.
                                                                                  "},{"location":"en/end-user/insight/dashboard/overview.html#operation-procedure","title":"Operation procedure","text":"

                                                                                  Select Overview in the left navigation bar to enter the details page.

                                                                                  "},{"location":"en/end-user/insight/data-query/log.html","title":"Log query","text":"

                                                                                  By default, Insight collects node logs, container logs, and Kubernetes audit logs. In the log query page, you can search for standard output (stdout) logs within the permissions of your login account. This includes node logs, product logs, and Kubernetes audit logs. You can quickly find the desired logs among a large volume of logs. Additionally, you can use the source information and contextual raw data of the logs to assist in troubleshooting and issue resolution.

                                                                                  "},{"location":"en/end-user/insight/data-query/log.html#prerequisites","title":"Prerequisites","text":"

                                                                                  The cluster has insight-agent installed and the application is in running state.

                                                                                  "},{"location":"en/end-user/insight/data-query/log.html#query-log","title":"Query log","text":"
                                                                                  1. In the left navigation bar, select Data Query -> Log Query .

                                                                                  2. After selecting the query criteria, click Search , and the log records in the form of graphs will be displayed. The most recent logs are displayed on top.

                                                                                  3. In the Filter panel, switch Type and select Node to check the logs of all nodes in the cluster.

                                                                                  4. In the Filter panel, switch Type and select Event to view the logs generated by all Kubernetes events in the cluster.

                                                                                  Lucene Syntax Explanation:

                                                                                  1. Use logical operators (AND, OR, NOT, \"\") to query multiple keywords. For example: keyword1 AND (keyword2 OR keyword3) NOT keyword4.
                                                                                  2. Use a tilde (~) for fuzzy queries. You can optionally specify a parameter after the \"~\" to control the similarity of the fuzzy query. If not specified, it defaults to 0.5. For example: error~.
                                                                                  3. Use wildcards (*, ?) as single-character placeholders to match any character.
                                                                                  4. Use square brackets [ ] or curly braces { } for range queries. Square brackets [ ] represent a closed interval and include the boundary values. Curly braces { } represent an open interval and exclude the boundary values. Range queries are applicable only to fields that can be sorted, such as numeric fields and date fields. For example timestamp:[2022-01-01 TO 2022-01-31].
                                                                                  "},{"location":"en/end-user/insight/data-query/log.html#view-log-context","title":"View log context","text":"

                                                                                  Clicking on the button next to a log will slide out a panel on the right side where you can view the default 100 lines of context for that log. You can switch the Display Rows option to view more contextual content.

                                                                                  "},{"location":"en/end-user/insight/data-query/log.html#export-log","title":"Export log","text":"

                                                                                  Click the download button located in the upper right corner of the list.

                                                                                  • You can configure the exported log fields. The available fields may vary depending on the log type, with the Log Content field being mandatory.
                                                                                  • You can export the log query results in .txt or .csv format.

                                                                                  "},{"location":"en/end-user/insight/data-query/metric.html","title":"Metric query","text":"

                                                                                  Metric query supports querying the index data of each container resource, and you can view the trend changes of the monitoring index. At the same time, advanced query supports native PromQL statements for Metric query.

                                                                                  "},{"location":"en/end-user/insight/data-query/metric.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • The cluster has insight-agent installed and the application is in running state.
                                                                                  "},{"location":"en/end-user/insight/data-query/metric.html#common-query","title":"Common query","text":"
                                                                                  1. In the left navigation bar, click Data Query -> metric Query .

                                                                                  2. After selecting query conditions such as cluster, type, node, and metric name, click Search , and the proper metric chart and data details will be displayed on the right side of the screen.

                                                                                  Tip

                                                                                  Support custom time range. You can manually click the Refresh icon or select a default time interval to refresh.

                                                                                  "},{"location":"en/end-user/insight/data-query/metric.html#advanced-search","title":"Advanced Search","text":"
                                                                                  1. In the left navigation bar, click Data Query -> metric Query , click the Advanced Query tab to switch to the advanced query page.

                                                                                  2. Enter a PromQL statement (see PromQL Syntax), click Query , and the query metric chart and data details will be displayed.

                                                                                  "},{"location":"en/end-user/insight/infra/cluster.html","title":"Cluster Monitoring","text":"

                                                                                  Through cluster monitoring, you can view the basic information of the cluster, the resource consumption and the trend of resource consumption over a period of time.

                                                                                  "},{"location":"en/end-user/insight/infra/cluster.html#prerequisites","title":"Prerequisites","text":"

                                                                                  The cluster has insight-agent installed and the application is in running state.

                                                                                  "},{"location":"en/end-user/insight/infra/cluster.html#steps","title":"Steps","text":"
                                                                                  1. Go to the Insight product module.

                                                                                  2. Select Infrastructure > Clusters from the left navigation bar. On this page, you can view the following information:

                                                                                    • Resource Overview: Provides statistics on the number of normal/all nodes and workloads across multiple clusters.
                                                                                    • Fault: Displays the number of alerts generated in the current cluster.
                                                                                    • Resource Consumption: Shows the actual usage and total capacity of CPU, memory, and disk for the selected cluster.
                                                                                    • Metric Explanations: Describes the trends in CPU, memory, disk I/O, and network bandwidth.

                                                                                  3. Click Resource Level Monitor, you can view more metrics of the current cluster.

                                                                                  "},{"location":"en/end-user/insight/infra/cluster.html#metric-explanations","title":"Metric Explanations","text":"Metric Name Description CPU Usage The ratio of the actual CPU usage of all pod resources in the cluster to the total CPU capacity of all nodes. CPU Allocation The ratio of the sum of CPU requests of all pods in the cluster to the total CPU capacity of all nodes. Memory Usage The ratio of the actual memory usage of all pod resources in the cluster to the total memory capacity of all nodes. Memory Allocation The ratio of the sum of memory requests of all pods in the cluster to the total memory capacity of all nodes."},{"location":"en/end-user/insight/infra/container.html","title":"Container Insight","text":"

                                                                                  Container insight is the process of monitoring workloads in cluster management. In the list, you can view basic information and status of workloads. On the Workloads details page, you can see the number of active alerts and the trend of resource consumption such as CPU and memory.

                                                                                  "},{"location":"en/end-user/insight/infra/container.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • The cluster has insight-agent installed, and all pods are in the Running state.

                                                                                  • To install insight-agent, please refer to: Installing insight-agent online or Offline upgrade of insight-agent.

                                                                                  "},{"location":"en/end-user/insight/infra/container.html#steps","title":"Steps","text":"

                                                                                  Follow these steps to view service monitoring metrics:

                                                                                  1. Go to the Insight product module.

                                                                                  2. Select Infrastructure > Workloads from the left navigation bar.

                                                                                  3. Switch between tabs at the top to view data for different types of workloads.

                                                                                  4. Click the target workload name to view the details.

                                                                                    1. Faults: Displays the total number of active alerts for the workload.
                                                                                    2. Resource Consumption: Shows the CPU, memory, and network usage of the workload.
                                                                                    3. Monitoring Metrics: Provides the trends of CPU, Memory, Network, and disk usage for the workload over the past hour.

                                                                                  5. Switch to the Pods tab to view the status of various pods for the workload, including their nodes, restart counts, and other information.

                                                                                  6. Switch to the JVM monitor tab to view the JVM metrics for each pods

                                                                                    Note

                                                                                    1. The JVM monitoring feature only supports the Java language.
                                                                                    2. To enable the JVM monitoring feature, refer to Getting Started with Monitoring Java Applications.
                                                                                  "},{"location":"en/end-user/insight/infra/container.html#metric-explanations","title":"Metric Explanations","text":"Metric Name Description CPU Usage The sum of CPU usage for all pods under the workload. CPU Requests The sum of CPU requests for all pods under the workload. CPU Limits The sum of CPU limits for all pods under the workload. Memory Usage The sum of memory usage for all pods under the workload. Memory Requests The sum of memory requests for all pods under the workload. Memory Limits The sum of memory limits for all pods under the workload. Disk Read/Write Rate The total number of continuous disk reads and writes per second within the specified time range, representing a performance measure of the number of read and write operations per second on the disk. Network Send/Receive Rate The incoming and outgoing rates of network traffic, aggregated by workload, within the specified time range."},{"location":"en/end-user/insight/infra/event.html","title":"Event Query","text":"

                                                                                  AI platform Insight supports event querying by cluster and namespace.

                                                                                  "},{"location":"en/end-user/insight/infra/event.html#event-status-distribution","title":"Event Status Distribution","text":"

                                                                                  By default, the events that occurred within the last 12 hours are displayed. You can select a different time range in the upper right corner to view longer or shorter periods. You can also customize the sampling interval from 1 minute to 5 hours.

                                                                                  The event status distribution chart provides a visual representation of the intensity and dispersion of events. This helps in evaluating and preparing for subsequent cluster operations and maintenance tasks. If events are densely concentrated during specific time periods, you may need to allocate more resources or take proper measures to ensure cluster stability and high availability. On the other hand, if events are dispersed, you can effectively schedule other maintenance tasks such as system optimization, upgrades, or handling other tasks during this period.

                                                                                  By considering the event status distribution chart and the selected time range, you can better plan and manage your cluster operations and maintenance work, ensuring system stability and reliability.

                                                                                  "},{"location":"en/end-user/insight/infra/event.html#event-count-and-statistics","title":"Event Count and Statistics","text":"

                                                                                  Through important event statistics, you can easily understand the number of image pull failures, health check failures, Pod execution failures, Pod scheduling failures, container OOM (Out-of-Memory) occurrences, volume mounting failures, and the total count of all events. These events are typically categorized as \"Warning\" and \"Normal\".

                                                                                  "},{"location":"en/end-user/insight/infra/event.html#event-list","title":"Event List","text":"

                                                                                  The event list is presented chronologically based on time. You can sort the events by Last Occurrend At and Type .

                                                                                  By clicking on the \u2699\ufe0f icon on the right side, you can customize the displayed columns according to your preferences and needs.

                                                                                  Additionally, you can click the refresh icon to update the current event list when needed.

                                                                                  In the operation column on the right, clicking the icon allows you to view the history of a specific event.

                                                                                  "},{"location":"en/end-user/insight/infra/event.html#reference","title":"Reference","text":"

                                                                                  For detailed meanings of the built-in Events in the system, refer to the Kubernetes API Event List.

                                                                                  "},{"location":"en/end-user/insight/infra/namespace.html","title":"Namespace Monitoring","text":"

                                                                                  With namespaces as the dimension, you can quickly query resource consumption and trends within a namespace.

                                                                                  "},{"location":"en/end-user/insight/infra/namespace.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • Insight Agent is installed in the cluster and the applications are in the Running state.
                                                                                  "},{"location":"en/end-user/insight/infra/namespace.html#steps","title":"Steps","text":"
                                                                                  1. Go to the Insight product module.

                                                                                  2. Select Infrastructure -> Namespaces from the left navigation bar. On this page, you can view the following information:

                                                                                    1. Switch Namespace: Switch between clusters or namespaces at the top.
                                                                                    2. Resource Overview: Provides statistics on the number of normal and total workloads within the selected namespace.
                                                                                    3. Incidents: Displays the number of alerts generated within the selected namespace.
                                                                                    4. Events: Shows the number of Warning level events within the selected namespace in the past 24 hours.
                                                                                    5. Resource Consumption: Provides the sum of CPU and memory usage for Pods within the selected namespace, along with the CPU and memory quota information.
                                                                                  "},{"location":"en/end-user/insight/infra/namespace.html#metric-explanations","title":"Metric Explanations","text":"Metric Name Description CPU Usage The sum of CPU usage for Pods within the selected namespace. Memory Usage The sum of memory usage for Pods within the selected namespace. Pod CPU Usage The CPU usage for each Pod within the selected namespace. Pod Memory Usage The memory usage for each Pod within the selected namespace."},{"location":"en/end-user/insight/infra/node.html","title":"Node Monitoring","text":"

                                                                                  Through node monitoring, you can get an overview of the current health status of the nodes in the selected cluster and the number of abnormal pod; on the current node details page, you can view the number of alerts and the trend of resource consumption such as CPU, memory, and disk.

                                                                                  "},{"location":"en/end-user/insight/infra/node.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • The cluster has insight-agent installed and the application is in running state.
                                                                                  "},{"location":"en/end-user/insight/infra/node.html#steps","title":"Steps","text":"
                                                                                  1. Go to the Insight product module.

                                                                                  2. Select Infrastructure -> Nodes from the left navigation bar. On this page, you can view the following information:

                                                                                    • Cluster: Uses the dropdown at the top to switch between clusters.
                                                                                    • Nodes: Shows a list of nodes within the selected cluster. Click a specific node to view detailed information.
                                                                                    • Alert: Displays the number of alerts generated in the current cluster.
                                                                                    • Resource Consumption: Shows the actual usage and total capacity of CPU, memory, and disk for the selected node.
                                                                                    • Metric Explanations: Describes the trends in CPU, memory, disk I/O, and network traffic for the selected node.

                                                                                  3. Click Resource Level Monitor, you can view more metrics of the current cluster.

                                                                                  "},{"location":"en/end-user/insight/infra/probe.html","title":"Probe","text":"

                                                                                  Probe refers to the use of black-box monitoring to regularly test the connectivity of targets through HTTP, TCP, and other methods, enabling quick detection of ongoing faults.

                                                                                  Insight uses the Prometheus Blackbox Exporter tool to probe the network using protocols such as HTTP, HTTPS, DNS, TCP, and ICMP, and returns the probe results to understand the network status.

                                                                                  "},{"location":"en/end-user/insight/infra/probe.html#prerequisites","title":"Prerequisites","text":"

                                                                                  The insight-agent has been successfully deployed in the target cluster and is in the Running state.

                                                                                  "},{"location":"en/end-user/insight/infra/probe.html#view-probes","title":"View Probes","text":"
                                                                                  1. Go to the Insight product module.
                                                                                  2. Select Infrastructure -> Probes in the left navigation bar.

                                                                                    • Click the cluster or namespace dropdown in the table to switch between clusters and namespaces.
                                                                                    • The list displays the name, probe method, probe target, connectivity status, and creation time of the probes by default.
                                                                                    • The connectivity status can be:
                                                                                      • Normal: The probe successfully connects to the target, and the target returns the expected response.
                                                                                      • Abnormal: The probe fails to connect to the target, or the target does not return the expected response.
                                                                                      • Pending: The probe is attempting to connect to the target.
                                                                                    • Supports fuzzy search of probe names.
                                                                                  "},{"location":"en/end-user/insight/infra/probe.html#create-a-probe","title":"Create a Probe","text":"
                                                                                  1. Click Create Probe .
                                                                                  2. Fill in the basic information and click Next .

                                                                                    • Name: The name can only contain lowercase letters, numbers, and hyphens (-), and must start and end with a lowercase letter or number, with a maximum length of 63 characters.
                                                                                    • Cluster: Select the cluster for the probe task.
                                                                                    • Namespace: The namespace where the probe task is located.
                                                                                  3. Configure the probe parameters.

                                                                                    • Blackbox Instance: Select the blackbox instance responsible for the probe.
                                                                                    • Probe Method:
                                                                                      • HTTP: Sends HTTP or HTTPS requests to the target URL to check its connectivity and response time. This can be used to monitor the availability and performance of websites or web applications.
                                                                                      • TCP: Establishes a TCP connection to the target host and port to check its connectivity and response time. This can be used to monitor TCP-based services such as web servers and database servers.
                                                                                      • Other: Supports custom probe methods by configuring ConfigMap. For more information, refer to: Custom Probe Methods
                                                                                    • Probe Target: The target address of the probe, supports domain names or IP addresses.
                                                                                    • Labels: Custom labels that will be automatically added to Prometheus' labels.
                                                                                    • Probe Interval: The interval between probes.
                                                                                    • Probe Timeout: The maximum waiting time when probing the target.
                                                                                  4. After configuring, click OK to complete the creation.

                                                                                  Warning

                                                                                  After the probe task is created, it takes about 3 minutes to synchronize the configuration. During this period, no probes will be performed, and probe results cannot be viewed.

                                                                                  "},{"location":"en/end-user/insight/infra/probe.html#view-monitoring-dashboards","title":"View Monitoring Dashboards","text":"

                                                                                  Click \u2507 in the operations column and click View Monitoring Dashboard .

                                                                                  Metric Name Description Current Status Response Represents the response status code of the HTTP probe request. Ping Status Indicates whether the probe request was successful. 1 indicates a successful probe request, and 0 indicates a failed probe request. IP Protocol Indicates the IP protocol version used in the probe request. SSL Expiry Represents the earliest expiration time of the SSL/TLS certificate. DNS Response (Latency) Represents the duration of the entire probe process in seconds. HTTP Duration Represents the duration of the entire process from sending the request to receiving the complete response."},{"location":"en/end-user/insight/infra/probe.html#edit-a-probe","title":"Edit a Probe","text":"

                                                                                  Click \u2507 in the operations column and click Edit .

                                                                                  "},{"location":"en/end-user/insight/infra/probe.html#delete-a-probe","title":"Delete a Probe","text":"

                                                                                  Click \u2507 in the operations column and click Delete .

                                                                                  "},{"location":"en/end-user/insight/quickstart/agent-status.html","title":"Insight-agent component status","text":"

                                                                                  Insight is a multicluster observation product in AI platform. In order to realize the unified collection of multicluster observation data, users need to install the Helm App insight-agent (Installed in insight-system namespace by default). See How to install insight-agent .

                                                                                  "},{"location":"en/end-user/insight/quickstart/agent-status.html#status-description","title":"Status description","text":"

                                                                                  In Insight -> Data Collection section, you can view the status of insight-agent installed in each cluster.

                                                                                  • not installed : insight-agent is not installed under the insight-system namespace in this cluster
                                                                                  • Running : insight-agent is successfully installed in the cluster, and all deployed components are running
                                                                                  • Exception : If insight-agent is in this state, it means that the helm deployment failed or the deployed components are not running

                                                                                  Can be checked by:

                                                                                  1. Run the following command, if the status is deployed , go to the next step. If it is failed , since it will affect the upgrade of the application, it is recommended to reinstall after uninstalling Container Management -> Helm Apps :

                                                                                    helm list -n insight-system\n
                                                                                  2. run the following command or check the status of the components deployed in the cluster in Insight -> Data Collection . If there is a pod that is not in the Running state, please restart the abnormal pod.

                                                                                    kubectl get pods -n insight-system\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/agent-status.html#supplementary-instructions","title":"Supplementary instructions","text":"
                                                                                  1. The resource consumption of the metric collection component Prometheus in insight-agent is directly proportional to the number of pods running in the cluster. Adjust Prometheus resources according to the cluster size, please refer to Prometheus Resource Planning.

                                                                                  2. Since the storage capacity of the metric storage component vmstorage in the global service cluster is directly proportional to the sum of the number of pods in each cluster.

                                                                                    • Please contact the platform administrator to adjust the disk capacity of vmstorage according to the cluster size, see vmstorage disk capacity planning.
                                                                                    • Adjust vmstorage disk according to multicluster size, see vmstorge disk expansion.
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/index.html","title":"Start Observing","text":"

                                                                                  AI platform platform enables the management and creation of multicloud and multiple clusters. Building upon this capability, Insight serves as a unified observability solution for multiple clusters. It collects observability data from multiple clusters by deploying the insight-agent plugin and allows querying of metrics, logs, and trace data through the AI platform Insight.

                                                                                  insight-agent is a tool that facilitates the collection of observability data from multiple clusters. Once installed, it automatically collects metrics, logs, and trace data without any modifications.

                                                                                  Clusters created through Container Management come pre-installed with insight-agent. Hence, this guide specifically provides instructions on enabling observability for integrated clusters.

                                                                                  • Install insight-agent online

                                                                                  As a unified observability platform for multiple clusters, Insight's resource consumption of certain components is closely related to the data of cluster creation and the number of integrated clusters. When installing insight-agent, it is necessary to adjust the resources of the proper components based on the cluster size.

                                                                                  1. Adjust the CPU and memory resources of the Prometheus collection component in insight-agent according to the size of the cluster created or integrated. Please refer to Prometheus resource planning.

                                                                                  2. As the metric data from multiple clusters is stored centrally, AI platform platform administrators need to adjust the disk space of vmstorage based on the cluster size. Please refer to vmstorage disk capacity planning.

                                                                                  3. For instructions on adjusting the disk space of vmstorage, please refer to Expanding vmstorage disk.

                                                                                  Since AI platform supports the management of multicloud and multiple clusters, insight-agent has undergone partial verification. However, there are known conflicts with monitoring components when installing insight-agent in Suanova 4.0 clusters and Openshift 4.x clusters. If you encounter similar issues, please refer to the following documents:

                                                                                  • Install insight-agent in Openshift 4.x

                                                                                  Currently, the insight-agent collection component has undergone functional testing for popular versions of Kubernetes. Please refer to:

                                                                                  • Kubernetes cluster compatibility testing
                                                                                  • Openshift 4.x cluster compatibility testing
                                                                                  • Rancher cluster compatibility testing
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html","title":"Enable Big Log and Big Trace Modes","text":"

                                                                                  The Insight Module supports switching log to Big Log mode and trace to Big Trace mode, in order to enhance data writing capabilities in large-scale environments. This page introduces following methods for enabling these modes:

                                                                                  • Enable or upgrade to Big Log and Big Trace modes through the installer (controlled by the same parameter value in manifest.yaml)
                                                                                  • Manually enable Big Log and Big Trace modes through Helm commands
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#logs","title":"Logs","text":"

                                                                                  This section explains the differences between the normal log mode and the Big Log mode.

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#log-mode","title":"Log Mode","text":"

                                                                                  Components: Fluentbit + Elasticsearch

                                                                                  This mode is referred to as the ES mode, and the data flow diagram is shown below:

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#big-log-mode","title":"Big Log Mode","text":"

                                                                                  Components: Fluentbit + Kafka + Vector + Elasticsearch

                                                                                  This mode is referred to as the Kafka mode, and the data flow diagram is shown below:

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#traces","title":"Traces","text":"

                                                                                  This section explains the differences between the normal trace mode and the Big Trace mode.

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#trace-mode","title":"Trace Mode","text":"

                                                                                  Components: Agent opentelemetry-collector + Global opentelemetry-collector + Jaeger-collector + Elasticsearch

                                                                                  This mode is referred to as the OTlp mode, and the data flow diagram is shown below:

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#big-trace-mode","title":"Big Trace Mode","text":"

                                                                                  Components: Agent opentelemetry-collector + Kafka + Global opentelemetry-collector + Jaeger-collector + Elasticsearch

                                                                                  This mode is referred to as the Kafka mode, and the data flow diagram is shown below:

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#enabling-via-installer","title":"Enabling via Installer","text":"

                                                                                  When deploying/upgrading AI platform using the installer, the manifest.yaml file includes the infrastructures.kafka field. To enable observable Big Log and Big Trace modes, Kafka must be activated:

                                                                                  manifest.yaml
                                                                                  apiVersion: manifest.daocloud.io/v1alpha1\nkind: SuanovaManifest\n...\ninfrastructures:\n  ...\n  kafka:\n    enable: true # Default is false\n    cpuLimit: 1\n    memLimit: 2Gi\n    pvcSize: 15Gi\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#enable","title":"Enable","text":"

                                                                                  When using a manifest.yaml that enables kafka during installation, Kafka middleware will be installed by default, and Big Log and Big Trace modes will be enabled automatically. The installation command is:

                                                                                  ./dce5-installer cluster-create -c clusterConfig.yaml -m manifest.yaml\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#upgrade","title":"Upgrade","text":"

                                                                                  The upgrade also involves modifying the kafka field. However, note that since the old environment was installed with kafka: false, Kafka is not present in the environment. Therefore, you need to specify the upgrade for middleware to install Kafka middleware simultaneously. The upgrade command is:

                                                                                  ./dce5-installer cluster-create -c clusterConfig.yaml -m manifest.yaml -u gproduct,middleware\n

                                                                                  Note

                                                                                  After the upgrade is complete, you need to manually restart the following components:

                                                                                  • insight-agent-fluent-bit
                                                                                  • insight-agent-opentelemetry-collector
                                                                                  • insight-opentelemetry-collector
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#enabling-via-helm-commands","title":"Enabling via Helm Commands","text":"

                                                                                  Prerequisites: Ensure that there is a usable Kafka and that the address is accessible.

                                                                                  Use the following commands to retrieve the values of the old versions of Insight and insight-agent (it's recommended to back them up):

                                                                                  helm get values insight -n insight-system -o yaml > insight.yaml\nhelm get values insight-agent -n insight-system -o yaml > insight-agent.yaml\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#enabling-big-log","title":"Enabling Big Log","text":"

                                                                                  There are several ways to enable or upgrade to Big Log mode:

                                                                                  Use --set in the helm upgrade commandModify YAML and run helm upgradeUpgrade via Container Management UI

                                                                                  First, run the following Insight upgrade command, ensuring the Kafka brokers address is correct:

                                                                                  helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --set global.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.kafka.enabled=true \\\n  --set vector.enabled=true \\\n  --version 0.30.1\n

                                                                                  Then, run the following insight-agent upgrade command, ensuring the Kafka brokers address is correct:

                                                                                  helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --set global.exporters.logging.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.exporters.logging.output=kafka \\\n  --version 0.30.1\n

                                                                                  Follow these steps to modify the YAML and then run the helm upgrade command:

                                                                                  1. Modify insight.yaml

                                                                                    insight.yaml
                                                                                    global:\n  ...\n  kafka:\n    brokers: 10.6.216.111:30592\n    enabled: true\n...\nvector:\n  enabled: true\n
                                                                                  2. Upgrade the Insight component:

                                                                                    helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --version 0.30.1\n
                                                                                  3. Modify insight-agent.yaml

                                                                                    insight-agent.yaml
                                                                                    global:\n  ...\n  exporters:\n    ...\n    logging:\n      ...\n      kafka:\n        brokers: 10.6.216.111:30592\n      output: kafka\n
                                                                                  4. Upgrade the insight-agent:

                                                                                    helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --version 0.30.1\n

                                                                                  In the Container Management module, find the cluster, select Helm Apps from the left navigation bar, and find and update the insight-agent.

                                                                                  In Logging Settings, select kafka for output and fill in the correct brokers address.

                                                                                  Note that after the upgrade is complete, you need to manually restart the insight-agent-fluent-bit component.

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/big-log-and-trace.html#enabling-big-trace","title":"Enabling Big Trace","text":"

                                                                                  There are several ways to enable or upgrade to Big Trace mode:

                                                                                  Using --set in the helm upgrade commandModify YAML and run helm upgradeUpgrade via Container Management UI

                                                                                  First, run the following Insight upgrade command, ensuring the Kafka brokers address is correct:

                                                                                  helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --set global.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.kafka.enabled=true \\\n  --set global.tracing.kafkaReceiver.enabled=true \\\n  --version 0.30.1\n

                                                                                  Then, run the following insight-agent upgrade command, ensuring the Kafka brokers address is correct:

                                                                                  helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --set global.exporters.trace.kafka.brokers=\"10.6.216.111:30592\" \\\n  --set global.exporters.trace.output=kafka \\\n  --version 0.30.1\n

                                                                                  Follow these steps to modify the YAML and then run the helm upgrade command:

                                                                                  1. Modify insight.yaml

                                                                                    insight.yaml
                                                                                    global:\n  ...\n  kafka:\n    brokers: 10.6.216.111:30592\n    enabled: true\n...\ntracing:\n  ...\n  kafkaReceiver:\n    enabled: true\n
                                                                                  2. Upgrade the Insight component:

                                                                                    helm upgrade insight insight-release/insight \\\n  -n insight-system \\\n  -f ./insight.yaml \\\n  --version 0.30.1\n
                                                                                  3. Modify insight-agent.yaml

                                                                                    insight-agent.yaml
                                                                                    global:\n  ...\n  exporters:\n    ...\n    trace:\n      ...\n      kafka:\n        brokers: 10.6.216.111:30592\n      output: kafka\n
                                                                                  4. Upgrade the insight-agent:

                                                                                    helm upgrade insight-agent insight-release/insight-agent \\\n  -n insight-system \\\n  -f ./insight-agent.yaml \\\n  --version 0.30.1\n

                                                                                  In the Container Management module, find the cluster, select Helm Apps from the left navigation bar, and find and update the insight-agent.

                                                                                  In Trace Settings, select kafka for output and fill in the correct brokers address.

                                                                                  Note that after the upgrade is complete, you need to manually restart the insight-agent-opentelemetry-collector and insight-opentelemetry-collector components.

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/component-scheduling.html","title":"Custom Insight Component Scheduling Policy","text":"

                                                                                  When deploying Insight to a Kubernetes environment, proper resource management and optimization are crucial. Insight includes several core components such as Prometheus, OpenTelemetry, FluentBit, Vector, and Elasticsearch. These components, during their operation, may negatively impact the performance of other pods within the cluster due to resource consumption issues. To effectively manage resources and optimize cluster operations, node affinity becomes an important option.

                                                                                  This page is about how to add taints and node affinity to ensure that each component runs on the appropriate nodes, avoiding resource competition or contention, thereby guranttee the stability and efficiency of the entire Kubernetes cluster.

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/component-scheduling.html#configure-dedicated-nodes-for-insight-using-taints","title":"Configure dedicated nodes for Insight using taints","text":"

                                                                                  Since the Insight Agent includes DaemonSet components, the configuration method described in this section is to have all components except the Insight DaemonSet run on dedicated nodes.

                                                                                  This is achieved by adding taints to the dedicated nodes and using tolerations to match them. More details can be found in the Kubernetes official documentation.

                                                                                  You can refer to the following commands to add and remove taints on nodes:

                                                                                  # Add taint\nkubectl taint nodes worker1 node.daocloud.io=insight-only:NoSchedule\n\n# Remove taint\nkubectl taint nodes worker1 node.daocloud.io:NoSchedule-\n

                                                                                  There are two ways to schedule Insight components to dedicated nodes:

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/component-scheduling.html#1-add-tolerations-for-each-component","title":"1. Add tolerations for each component","text":"

                                                                                  Configure the tolerations for the insight-server and insight-agent Charts respectively:

                                                                                  insight-server Chartinsight-agent Chart
                                                                                  server:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nui:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nrunbook:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\n# mysql:\nvictoria-metrics-k8s-stack:\n  victoria-metrics-operator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  vmcluster:\n    spec:\n      vmstorage:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n      vmselect:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n      vminsert:\n        tolerations:\n          - key: \"node.daocloud.io\"\n            operator: \"Equal\"\n            value: \"insight-only\"\n            effect: \"NoSchedule\"\n  vmalert:\n    spec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n  alertmanager:\n    spec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n\njaeger:\n  collector:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  query:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n\nopentelemetry-collector-aggregator:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nopentelemetry-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\ngrafana-operator:\n  operator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n  grafana:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\nkibana:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nelastic-alert:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n\nvector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\n
                                                                                  kube-prometheus-stack:\n  prometheus:\n    prometheusSpec:\n      tolerations:\n        - key: \"node.daocloud.io\"\n          operator: \"Equal\"\n          value: \"insight-only\"\n          effect: \"NoSchedule\"\n  prometheus-node-exporter:\n    tolerations:\n      - effect: NoSchedule\n        operator: Exists\n  prometheusOperator:\n    tolerations:\n      - key: \"node.daocloud.io\"\n        operator: \"Equal\"\n        value: \"insight-only\"\n        effect: \"NoSchedule\"\n\nkube-state-metrics:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-operator:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\ntailing-sidecar-operator:\n  operator:\n    tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nopentelemetry-kubernetes-collector:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\nprometheus-blackbox-exporter:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\"\netcd-exporter:\n  tolerations:\n    - key: \"node.daocloud.io\"\n      operator: \"Equal\"\n      value: \"insight-only\"\n      effect: \"NoSchedule\" \n
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/component-scheduling.html#2-configure-at-the-namespace-level","title":"2. Configure at the namespace level","text":"

                                                                                  Allow pods in the insight-system namespace to tolerate the node.daocloud.io=insight-only taint.

                                                                                  1. Adjust the apiserver configuration file /etc/kubernetes/manifests/kube-apiserver.yaml to include PodTolerationRestriction,PodNodeSelector. See the following picture:

                                                                                  2. Add an annotation to the insight-system namespace:

                                                                                    apiVersion: v1\nkind: Namespace\nmetadata:\n  name: insight-system\n  annotations:\n    scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Equal\", \"effect\": \"NoSchedule\", \"key\": \"node.daocloud.io\", \"value\": \"insight-only\"}]'\n

                                                                                  Restart the components under the insight-system namespace to allow normal scheduling of pods under the insight-system.

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/component-scheduling.html#use-node-labels-and-node-affinity-to-manage-component-scheduling","title":"Use node labels and node affinity to manage component scheduling","text":"

                                                                                  Info

                                                                                  Node affinity is conceptually similar to nodeSelector, allowing you to constrain which nodes a pod can be scheduled on based on labels on the nodes. There are two types of node affinity:

                                                                                  1. requiredDuringSchedulingIgnoredDuringExecution: The scheduler will only schedule the pod if the rules are met. This feature is similar to nodeSelector but has more expressive syntax.
                                                                                  2. preferredDuringSchedulingIgnoredDuringExecution: The scheduler will try to find nodes that meet the rules. If no matching nodes are found, the scheduler will still schedule the Pod.

                                                                                  For more details, please refer to the Kubernetes official documentation.

                                                                                  To meet different user needs for scheduling Insight components, Insight provides fine-grained labels for different components' scheduling policies. Below is a description of the labels and their associated components:

                                                                                  Label Key Label Value Description node.daocloud.io/insight-any Any value, recommended to use true Represents that all Insight components prefer nodes with this label node.daocloud.io/insight-prometheus Any value, recommended to use true Specifically for Prometheus components node.daocloud.io/insight-vmstorage Any value, recommended to use true Specifically for VictoriaMetrics vmstorage components node.daocloud.io/insight-vector Any value, recommended to use true Specifically for Vector components node.daocloud.io/insight-otel-col Any value, recommended to use true Specifically for OpenTelemetry components

                                                                                  You can refer to the following commands to add and remove labels on nodes:

                                                                                  # Add label to node8, prioritizing scheduling insight-prometheus to node8 \nkubectl label nodes node8 node.daocloud.io/insight-prometheus=true\n\n# Remove the node.daocloud.io/insight-prometheus label from node8\nkubectl label nodes node8 node.daocloud.io/insight-prometheus-\n

                                                                                  Below is the default affinity preference for the insight-prometheus component during deployment:

                                                                                  affinity:\n  nodeAffinity:\n    preferredDuringSchedulingIgnoredDuringExecution:\n    - preference:\n        matchExpressions:\n        - key: node-role.kubernetes.io/control-plane\n          operator: DoesNotExist\n      weight: 1\n    - preference:\n        matchExpressions:\n        - key: node.daocloud.io/insight-prometheus # (1)!\n          operator: Exists\n      weight: 2\n    - preference:\n        matchExpressions:\n        - key: node.daocloud.io/insight-any\n          operator: Exists\n      weight: 3\n    podAntiAffinity:\n      preferredDuringSchedulingIgnoredDuringExecution:\n        - weight: 1\n          podAffinityTerm:\n            topologyKey: kubernetes.io/hostname\n            labelSelector:\n              matchExpressions:\n                - key: app.kubernetes.io/instance\n                  operator: In\n                  values:\n                    - insight-agent-kube-prometh-prometheus\n
                                                                                  1. Prioritize scheduling insight-prometheus to nodes with the node.daocloud.io/insight-prometheus label
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/gethosturl.html","title":"Get Data Storage Address of Global Service Cluster","text":"

                                                                                  Insight is a product for unified observation of multiple clusters. To achieve unified storage and querying of observation data from multiple clusters, sub-clusters need to report the collected observation data to the global service cluster for unified storage. This document provides the required address of the storage component when installing the collection component insight-agent.

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/gethosturl.html#install-insight-agent-in-global-service-cluster","title":"Install insight-agent in Global Service Cluster","text":"

                                                                                  If installing insight-agent in the global service cluster, it is recommended to access the cluster via domain name:

                                                                                  export vminsert_host=\"vminsert-insight-victoria-metrics-k8s-stack.insight-system.svc.cluster.local\" # (1)!\nexport es_host=\"insight-es-master.insight-system.svc.cluster.local\" # (2)!\nexport otel_col_host=\"insight-opentelemetry-collector.insight-system.svc.cluster.local\" # (3)!\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/gethosturl.html#install-insight-agent-in-other-clusters","title":"Install insight-agent in Other Clusters","text":""},{"location":"en/end-user/insight/quickstart/install/gethosturl.html#get-address-via-interface-provided-by-insight-server","title":"Get Address via Interface Provided by Insight Server","text":"
                                                                                  1. The management cluster uses the default LoadBalancer mode for exposure.

                                                                                    Log in to the console of the global service cluster and run the following command:

                                                                                    export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam'\n

                                                                                    Note

                                                                                    Please replace the ${INSIGHT_SERVER_IP} parameter in the command.

                                                                                    You will get the following response:

                                                                                    {\n  \"values\": {\n    \"global\": {\n      \"exporters\": {\n        \"logging\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"metric\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"auditLog\": {\n          \"host\": \"10.6.182.32\"\n        },\n        \"trace\": {\n          \"host\": \"10.6.182.32\"\n        }\n      }\n    },\n    \"opentelemetry-operator\": {\n      \"enabled\": true\n    },\n    \"opentelemetry-collector\": {\n      \"enabled\": true\n    }\n  }\n}\n
                                                                                    • global.exporters.logging.host is the log service address, no need to set the proper service port, the default value will be used.
                                                                                    • global.exporters.metric.host is the metrics service address.
                                                                                    • global.exporters.trace.host is the trace service address.
                                                                                    • global.exporters.auditLog.host is the audit log service address (same service as trace but different port).
                                                                                  2. Management cluster disables LoadBalancer

                                                                                    When calling the interface, you need to additionally pass an externally accessible node IP from the cluster, which will be used to construct the complete access address of the proper service.

                                                                                    export INSIGHT_SERVER_IP=$(kubectl get service insight-server -n insight-system --output=jsonpath={.spec.clusterIP})\ncurl --location --request POST 'http://'\"${INSIGHT_SERVER_IP}\"'/apis/insight.io/v1alpha1/agentinstallparam' --data '{\"extra\": {\"EXPORTER_EXTERNAL_IP\": \"10.5.14.51\"}}'\n

                                                                                    You will get the following response:

                                                                                    {\n  \"values\": {\n    \"global\": {\n      \"exporters\": {\n        \"logging\": {\n          \"scheme\": \"https\",\n          \"host\": \"10.5.14.51\",\n          \"port\": 32007,\n          \"user\": \"elastic\",\n          \"password\": \"j8V1oVoM1184HvQ1F3C8Pom2\"\n        },\n        \"metric\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30683\n        },\n        \"auditLog\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30884\n        },\n        \"trace\": {\n          \"host\": \"10.5.14.51\",\n          \"port\": 30274\n        }\n      }\n    },\n    \"opentelemetry-operator\": {\n      \"enabled\": true\n    },\n    \"opentelemetry-collector\": {\n      \"enabled\": true\n    }\n  }\n}\n
                                                                                    • global.exporters.logging.host is the log service address.
                                                                                    • global.exporters.logging.port is the NodePort exposed by the log service.
                                                                                    • global.exporters.metric.host is the metrics service address.
                                                                                    • global.exporters.metric.port is the NodePort exposed by the metrics service.
                                                                                    • global.exporters.trace.host is the trace service address.
                                                                                    • global.exporters.trace.port is the NodePort exposed by the trace service.
                                                                                    • global.exporters.auditLog.host is the audit log service address (same service as trace but different port).
                                                                                    • global.exporters.auditLog.port is the NodePort exposed by the audit log service.
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/gethosturl.html#connect-via-loadbalancer","title":"Connect via LoadBalancer","text":"
                                                                                  1. If LoadBalancer is enabled in the cluster and a VIP is set for Insight, you can manually execute the following command to obtain the address information for vminsert and opentelemetry-collector:

                                                                                    $ kubectl get service -n insight-system | grep lb\nlb-insight-opentelemetry-collector               LoadBalancer   10.233.23.12    <pending>     4317:31286/TCP,8006:31351/TCP  24d\nlb-vminsert-insight-victoria-metrics-k8s-stack   LoadBalancer   10.233.63.67    <pending>     8480:31629/TCP                 24d\n
                                                                                    • lb-vminsert-insight-victoria-metrics-k8s-stack is the address for the metrics service.
                                                                                    • lb-insight-opentelemetry-collector is the address for the tracing service.
                                                                                  2. Execute the following command to obtain the address information for elasticsearch:

                                                                                    $ kubectl get service -n mcamel-system | grep es\nmcamel-common-es-cluster-masters-es-http               NodePort    10.233.16.120   <none>        9200:30465/TCP               47d\n

                                                                                    mcamel-common-es-cluster-masters-es-http is the address for the logging service.

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/gethosturl.html#connect-via-nodeport","title":"Connect via NodePort","text":"

                                                                                  The LoadBalancer feature is disabled in the global service cluster.

                                                                                  In this case, the LoadBalancer resources mentioned above will not be created by default. The relevant service names are:

                                                                                  • vminsert-insight-victoria-metrics-k8s-stack (metrics service)
                                                                                  • common-es (logging service)
                                                                                  • insight-opentelemetry-collector (tracing service)

                                                                                  After obtaining the proper port information for the services in the above two scenarios, make the following settings:

                                                                                  --set global.exporters.logging.host=  # (1)!\n--set global.exporters.logging.port=  # (2)!\n--set global.exporters.metric.host=   # (3)!\n--set global.exporters.metric.port=   # (4)!\n--set global.exporters.trace.host=    # (5)!\n--set global.exporters.trace.port=    # (6)!\n--set global.exporters.auditLog.host= # (7)!\n
                                                                                  1. NodeIP of the externally accessible management cluster
                                                                                  2. NodePort of the logging service port 9200
                                                                                  3. NodeIP of the externally accessible management cluster
                                                                                  4. NodePort of the metrics service port 8480
                                                                                  5. NodeIP of the externally accessible management cluster
                                                                                  6. NodePort of the tracing service port 4317
                                                                                  7. NodeIP of the externally accessible management cluster
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/install-agent.html","title":"Install insight-agent","text":"

                                                                                  insight-agent is a plugin for collecting insight data, supporting unified observation of metrics, links, and log data. This article describes how to install insight-agent in an online environment for the accessed cluster.

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/install-agent.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Please confirm that your cluster has successfully connected to the container management platform. You can refer to Integrate Clusters for details.

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/install-agent.html#steps","title":"Steps","text":"
                                                                                  1. Enter Container Management from the left navigation bar, and enter Clusters . Find the cluster where you want to install insight-agent.

                                                                                  2. Choose Install now to jump, or click the cluster and click Helm Apps -> Helm Templates in the left navigation bar, search for insight-agent in the search box, and click it for details.

                                                                                  3. Select the appropriate version and click Install .

                                                                                  4. Fill in the name, select the namespace and version, and fill in the addresses of logging, metric, audit, and trace reporting data in the yaml file. The system has filled in the address of the component for data reporting by default, please check it before clicking OK to install.

                                                                                    If you need to modify the data reporting address, please refer to Get Data Reporting Address.

                                                                                  5. The system will automatically return to Helm Apps . When the application status changes from Unknown to Deployed , it means that insight-agent is installed successfully.

                                                                                    Note

                                                                                    • Click \u2507 on the far right, and you can perform more operations such as Update , View YAML and Delete in the pop-up menu.
                                                                                    • For a practical installation demo, watch Video demo of installing insight-agent
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/knownissues.html","title":"Known Issues","text":"

                                                                                  This page lists some issues related to the installation and uninstallation of Insight Agent and their workarounds.

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/knownissues.html#uninstallation-failure-of-insight-agent","title":"Uninstallation Failure of Insight Agent","text":"

                                                                                  When you run the following command to uninstall Insight Agent,

                                                                                  helm uninstall insight agent\n

                                                                                  The tls secret used by otel-operator is failed to uninstall.

                                                                                  Due to the logic of \"reusing tls secret\" in the following code of otel-operator, it checks whether MutationConfiguration exists and reuses the CA cert bound in MutationConfiguration. However, since helm uninstall has uninstalled MutationConfiguration, it results in a null value.

                                                                                  Therefore, please manually delete the proper secret using one of the following methods:

                                                                                  • Delete via command line: Log in to the console of the target cluster and run the following command:

                                                                                    kubectl -n insight-system delete secret insight-agent-opentelemetry-operator-controller-manager-service-cert\n
                                                                                  • Delete via UI: Log in to AI platform container management, select the target cluster, select Secret from the left menu, input insight-agent-opentelemetry-operator-controller-manager-service-cert, then select Delete.

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/knownissues.html#insight-agent_1","title":"Insight Agent","text":""},{"location":"en/end-user/insight/quickstart/install/knownissues.html#log-collection-endpoint-not-updated-when-upgrading-insight-agent","title":"Log Collection Endpoint Not Updated When Upgrading Insight Agent","text":"

                                                                                  When updating the log configuration of the insight-agent from Elasticsearch to Kafka or from Kafka to Elasticsearch, the changes do not take effect and the agent continues to use the previous configuration.

                                                                                  Solution :

                                                                                  Manually restart Fluent Bit in the cluster.

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/knownissues.html#podmonitor-collects-multiple-sets-of-jvm-metrics","title":"PodMonitor Collects Multiple Sets of JVM Metrics","text":"
                                                                                  1. In this version, there is a defect in PodMonitor/insight-kubernetes-pod: it will incorrectly create Jobs to collect metrics for all containers in Pods that are marked with insight.opentelemetry.io/metric-scrape=true, instead of only the containers proper to insight.opentelemetry.io/metric-port.

                                                                                  2. After PodMonitor is declared, PrometheusOperator will pre-configure some service discovery configurations. Considering the compatibility of CRDs, it is abandoned to configure the collection tasks through annotations.

                                                                                  3. Use the additional scrape config mechanism provided by Prometheus to configure the service discovery rules in a secret and introduce them into Prometheus.

                                                                                  Therefore:

                                                                                  1. Delete the current PodMonitor for insight-kubernetes-pod
                                                                                  2. Use a new rule

                                                                                  In the new rule, action: keepequal is used to compare the consistency between source_labels and target_label to determine whether to create collection tasks for the ports of a container. Note that this feature is only available in Prometheus v2.41.0 (2022-12-20) and higher.

                                                                                  +    - source_labels: [__meta_kubernetes_pod_annotation_insight_opentelemetry_io_metric_port]\n+      separator: ;\n+      target_label: __meta_kubernetes_pod_container_port_number\n+      action: keepequal\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html","title":"Upgrade Notes","text":"

                                                                                  This page provides some considerations for upgrading insight-server and insight-agent.

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v028x-or-lower-to-v029x","title":"Upgrade from v0.28.x (or lower) to v0.29.x","text":"

                                                                                  Due to the upgrade of the Opentelemetry community operator chart version in v0.29.0, the supported values for featureGates in the values file have changed. Therefore, before upgrading, you need to set the value of featureGates to empty, as follows:

                                                                                  -  --set opentelemetry-operator.manager.featureGates=\"+operator.autoinstrumentation.go,+operator.autoinstrumentation.multi-instrumentation,+operator.autoinstrumentation.nginx\" \\\n+  --set opentelemetry-operator.manager.featureGates=\"\"\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v026x-or-lower-to-v027x-or-higher","title":"Upgrade from v0.26.x (or lower) to v0.27.x or higher","text":"

                                                                                  In v0.27.x, the switch for the vector component has been separated. If the existing environment has vector enabled, you need to specify --set vector.enabled=true when upgrading the insight-server.

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v019x-or-lower-to-020x","title":"Upgrade from v0.19.x (or lower) to 0.20.x","text":"

                                                                                  Before upgrading Insight , you need to manually delete the jaeger-collector and jaeger-query deployments by running the following command:

                                                                                  kubectl -n insight-system delete deployment insight-jaeger-collector\nkubectl -n insight-system delete deployment insight-jaeger-query\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v017x-or-lower-to-v018x","title":"Upgrade from v0.17.x (or lower) to v0.18.x","text":"

                                                                                  In v0.18.x, there have been updates to the Jaeger-related deployment files, so you need to manually run the following commands before upgrading insight-server:

                                                                                  kubectl -n insight-system delete deployment insight-jaeger-collector\nkubectl -n insight-system delete deployment insight-jaeger-query\n

                                                                                  There have been changes to metric names in v0.18.x, so after upgrading insight-server, insight-agent should also be upgraded.

                                                                                  In addition, the parameters for enabling the tracing module and adjusting the ElasticSearch connection have been modified. Refer to the following parameters:

                                                                                  +  --set global.tracing.enable=true \\\n-  --set jaeger.collector.enabled=true \\\n-  --set jaeger.query.enabled=true \\\n+  --set global.elasticsearch.scheme=${your-external-elasticsearch-scheme} \\\n+  --set global.elasticsearch.host=${your-external-elasticsearch-host} \\\n+  --set global.elasticsearch.port=${your-external-elasticsearch-port} \\\n+  --set global.elasticsearch.user=${your-external-elasticsearch-username} \\\n+  --set global.elasticsearch.password=${your-external-elasticsearch-password} \\\n-  --set jaeger.storage.elasticsearch.scheme=${your-external-elasticsearch-scheme} \\\n-  --set jaeger.storage.elasticsearch.host=${your-external-elasticsearch-host} \\\n-  --set jaeger.storage.elasticsearch.port=${your-external-elasticsearch-port} \\\n-  --set jaeger.storage.elasticsearch.user=${your-external-elasticsearch-username} \\\n-  --set jaeger.storage.elasticsearch.password=${your-external-elasticsearch-password} \\\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v015x-or-lower-to-v016x","title":"Upgrade from v0.15.x (or lower) to v0.16.x","text":"

                                                                                  In v0.16.x, a new feature parameter disableRouteContinueEnforce in the vmalertmanagers CRD is used. Therefore, you need to manually run the following command before upgrading insight-server:

                                                                                  kubectl apply --server-side -f https://raw.githubusercontent.com/VictoriaMetrics/operator/v0.33.0/config/crd/bases/operator.victoriametrics.com_vmalertmanagers.yaml --force-conflicts\n

                                                                                  Note

                                                                                  If you are performing an offline installation, after extracting the insight offline package, please run the following command to update CRDs.

                                                                                  kubectl apply --server-side -f insight/dependency-crds --force-conflicts \n
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v023x-or-lower-to-v024x","title":"Upgrade from v0.23.x (or lower) to v0.24.x","text":"

                                                                                  In v0.24.x, CRDs have been added to the OTEL operator chart. However, helm upgrade does not update CRDs, so you need to manually run the following command:

                                                                                  kubectl apply -f https://raw.githubusercontent.com/open-telemetry/opentelemetry-helm-charts/main/charts/opentelemetry-operator/crds/crd-opentelemetry.io_opampbridges.yaml\n

                                                                                  If you are performing an offline installation, you can find the above CRD yaml file after extracting the insight-agent offline package. After extracting the insight-agent Chart, manually run the following command:

                                                                                  kubectl apply -f charts/agent/crds/crd-opentelemetry.io_opampbridges.yaml\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v019x-or-lower-to-v020x","title":"Upgrade from v0.19.x (or lower) to v0.20.x","text":"

                                                                                  In v0.20.x, Kafka log export configuration has been added, and there have been some adjustments to the log export configuration. Before upgrading insight-agent , please note the parameter changes. The previous logging configuration has been moved to the logging.elasticsearch configuration:

                                                                                  -  --set global.exporters.logging.host \\\n-  --set global.exporters.logging.port \\\n+  --set global.exporters.logging.elasticsearch.host \\\n+  --set global.exporters.logging.elasticsearch.port \\\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v017x-or-lower-to-v018x_1","title":"Upgrade from v0.17.x (or lower) to v0.18.x","text":"

                                                                                  Due to the updated deployment files for Jaeger In v0.18.x, it is important to note the changes in parameters before upgrading the insight-agent.

                                                                                  +  --set global.exporters.trace.enable=true \\\n-  --set opentelemetry-collector.enabled=true \\\n-  --set opentelemetry-operator.enabled=true \\\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v016x-or-lower-to-v017x","title":"Upgrade from v0.16.x (or lower) to v0.17.x","text":"

                                                                                  In v0.17.x, the kube-prometheus-stack chart version was upgraded from 41.9.1 to 45.28.1, and there were also some field upgrades in the CRD used, such as the attachMetadata field of servicemonitor. Therefore, the following command needs to be rund before upgrading the insight-agent:

                                                                                  kubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.65.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --force-conflicts\n

                                                                                  If you are performing an offline installation, you can find the yaml for the above CRD in insight-agent/dependency-crds after extracting the insight-agent offline package.

                                                                                  "},{"location":"en/end-user/insight/quickstart/install/upgrade-note.html#upgrade-from-v011x-or-earlier-to-v012x","title":"Upgrade from v0.11.x (or earlier) to v0.12.x","text":"

                                                                                  v0.12.x upgrades kube-prometheus-stack chart from 39.6.0 to 41.9.1, including prometheus-operator to v0.60.1, prometheus-node-exporter chart to v4.3.0. Prometheus-node-exporter uses Kubernetes recommended label after upgrading, so you need to delete node-exporter daemonset. prometheus-operator has updated the CRD, so you need to run the following command before upgrading the insight-agent:

                                                                                  kubectl delete daemonset insight-agent-prometheus-node-exporter -n insight-system\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --force- conflicts\nkubectl apply --server-side -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.60.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml --force- conflicts\n

                                                                                  Note

                                                                                  If you are installing offline, you can run the following command to update the CRD after decompressing the insight-agent offline package.

                                                                                  kubectl apply --server-side -f insight-agent/dependency-crds --force-conflicts\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/jvm-monitor/jmx-exporter.html","title":"Use JMX Exporter to expose JVM monitoring metrics","text":"

                                                                                  JMX-Exporter provides two usages:

                                                                                  1. Start a standalone process. Specify parameters when the JVM starts, expose the RMI interface of JMX, JMX Exporter calls RMI to obtain the JVM runtime status data, Convert to Prometheus metrics format, and expose ports for Prometheus to collect.
                                                                                  2. Start the JVM in-process. Specify parameters when the JVM starts, and run the jar package of JMX-Exporter in the form of javaagent. Read the JVM runtime status data in the process, convert it into Prometheus metrics format, and expose the port for Prometheus to collect.

                                                                                  Note

                                                                                  Officials do not recommend the first method. On the one hand, the configuration is complicated, and on the other hand, it requires a separate process, and the monitoring of this process itself has become a new problem. So This page focuses on the second usage and how to use JMX Exporter to expose JVM monitoring metrics in the Kubernetes environment.

                                                                                  The second usage is used here, and the JMX Exporter jar package file and configuration file need to be specified when starting the JVM. The jar package is a binary file, so it is not easy to mount it through configmap. We hardly need to modify the configuration file. So the suggestion is to directly package the jar package and configuration file of JMX Exporter into the business container image.

                                                                                  Among them, in the second way, we can choose to put the jar file of JMX Exporter in the business application mirror, You can also choose to mount it during deployment. Here is an introduction to the two methods:

                                                                                  "},{"location":"en/end-user/insight/quickstart/jvm-monitor/jmx-exporter.html#method-1-build-the-jmx-exporter-jar-file-into-the-business-image","title":"Method 1: Build the JMX Exporter JAR file into the business image","text":"

                                                                                  The content of prometheus-jmx-config.yaml is as follows:

                                                                                  prometheus-jmx-config.yaml
                                                                                  ...\nssl: false\nlowercaseOutputName: false\nlowercaseOutputLabelNames: false\nrules:\n- pattern: \".*\"\n

                                                                                  Note

                                                                                  For more configmaps, please refer to the bottom introduction or Prometheus official documentation.

                                                                                  Then prepare the jar package file, you can find the latest jar package download address on the Github page of jmx_exporter and refer to the following Dockerfile:

                                                                                  FROM openjdk:11.0.15-jre\nWORKDIR /app/\nCOPY target/my-app.jar ./\nCOPY prometheus-jmx-config.yaml ./\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\nENV JAVA_TOOL_OPTIONS=-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\nEXPOSE 8081 8999 8080 8888\nENTRYPOINT java $JAVA_OPTS -jar my-app.jar\n

                                                                                  Notice:

                                                                                  • Start parameter format: -javaagent:=:
                                                                                  • Port 8088 is used here to expose the monitoring metrics of the JVM. If it conflicts with Java applications, you can change it yourself
                                                                                  "},{"location":"en/end-user/insight/quickstart/jvm-monitor/jmx-exporter.html#method-2-mount-via-init-container-container","title":"Method 2: mount via init container container","text":"

                                                                                  We need to make the JMX exporter into a Docker image first, the following Dockerfile is for reference only:

                                                                                  FROM alpine/curl:3.14\nWORKDIR /app/\n# Copy the previously created config file to the mirror\nCOPY prometheus-jmx-config.yaml ./\n# Download jmx prometheus javaagent jar online\nRUN set -ex; \\\n     curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\n

                                                                                  Build the image according to the above Dockerfile: docker build -t my-jmx-exporter .

                                                                                  Add the following init container to the Java application deployment Yaml:

                                                                                  Click to view YAML file
                                                                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-demo-app\n  labels:\n    app: my-demo-app\nspec:\n  selector:\n    matchLabels:\n      app: my-demo-app\n  template:\n    metadata:\n      labels:\n        app: my-demo-app\n    spec:\n      imagePullSecrets:\n      - name: registry-pull\n      initContainers:\n      - name: jmx-sidecar\n        image: my-jmx-exporter\n        command: [\"cp\", \"-r\", \"/app/jmx_prometheus_javaagent-0.17.2.jar\", \"/target/jmx_prometheus_javaagent-0.17.2.jar\"]  \u278a\n        volumeMounts:\n        - name: sidecar\n          mountPath: /target\n      containers:\n      - image: my-demo-app-image\n        name: my-demo-app\n        resources:\n          requests:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n          limits:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n        ports:\n        - containerPort: 18083\n        env:\n        - name: JAVA_TOOL_OPTIONS\n          value: \"-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\" \u278b\n        volumeMounts:\n        - name: host-time\n          mountPath: /etc/localtime\n          readOnly: true\n        - name: sidecar\n          mountPath: /sidecar\n      volumes:\n      - name: host-time\n        hostPath:\n          path: /etc/localtime\n      - name: sidecar  # Share the agent folder\n        emptyDir: {}\n      restartPolicy: Always\n

                                                                                  After the above modification, the sample application my-demo-app has the ability to expose JVM metrics. After running the service, we can access the prometheus format metrics exposed by the service through http://lcoalhost:8088.

                                                                                  Then, you can refer to Java Application Docking Observability with JVM Metrics.

                                                                                  "},{"location":"en/end-user/insight/quickstart/jvm-monitor/jvm-catelogy.html","title":"Start monitoring Java applications","text":"

                                                                                  This document mainly describes how to monitor the JVM of the customer's Java application. It describes how Java applications that have exposed JVM metrics, and those that have not, interface with Insight.

                                                                                  If your Java application does not start exposing JVM metrics, you can refer to the following documents:

                                                                                  • Expose JVM monitoring metrics with JMX Exporter
                                                                                  • Expose JVM monitoring metrics using OpenTelemetry Java Agent

                                                                                  If your Java application has exposed JVM metrics, you can refer to the following documents:

                                                                                  • Java application docking observability with existing JVM metrics
                                                                                  "},{"location":"en/end-user/insight/quickstart/jvm-monitor/legacy-jvm.html","title":"Java Application with JVM Metrics to Dock Insight","text":"

                                                                                  If your Java application exposes JVM monitoring metrics through other means (such as Spring Boot Actuator), We need to allow monitoring data to be collected. You can let Insight collect existing JVM metrics by adding Kubernetes Annotations to the workload:

                                                                                  annatation:\n   insight.opentelemetry.io/metric-scrape: \"true\" # whether to collect\n   insight.opentelemetry.io/metric-path: \"/\" # path to collect metrics\n   insight.opentelemetry.io/metric-port: \"9464\" # port for collecting metrics\n

                                                                                  YAML Example to add annotations for my-deployment-app workload\uff1a

                                                                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-app\nspec:\n  selector:\n    matchLabels:\n      app: my-deployment-app\n      app.kubernetes.io/name: my-deployment-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-deployment-app\n        app.kubernetes.io/name: my-deployment-app\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\" # whether to collect\n        insight.opentelemetry.io/metric-path: \"/\" # path to collect metrics\n        insight.opentelemetry.io/metric-port: \"9464\" # port for collecting metrics\n

                                                                                  The following shows the complete YAML:

                                                                                  ---\napiVersion: v1\nkind: Service\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  type: NodePort\n  selector:\n    #app: my-deployment-with-aotu-instrumentation-app\n    app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  ports:\n    - name: http\n      port: 8080\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  selector:\n    matchLabels:\n      #app: my-deployment-with-aotu-instrumentation-app\n      app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\" # whether to collect\n        insight.opentelemetry.io/metric-path: \"/actuator/prometheus\"      # path to collect metrics\n        insight.opentelemetry.io/metric-port: \"8080\"   # port for collecting metrics\n    spec:\n      containers:\n        - name: myapp\n          image: docker.m.daocloud.io/wutang/spring-boot-actuator-prometheus-metrics-demo\n          ports:\n            - name: http\n              containerPort: 8080\n          resources:\n            limits:\n              cpu: 500m\n              memory: 800Mi\n            requests:\n              cpu: 200m\n              memory: 400Mi\n

                                                                                  In the above example\uff0cInsight will use :8080//actuator/prometheus to get Prometheus metrics exposed through Spring Boot Actuator .

                                                                                  "},{"location":"en/end-user/insight/quickstart/jvm-monitor/otel-java-agent.html","title":"Use OpenTelemetry Java Agent to expose JVM monitoring metrics","text":"

                                                                                  In Opentelemetry Agent v1.20.0 and above, Opentelemetry Agent has added the JMX Metric Insight module. If your application has integrated Opentelemetry Agent to collect application traces, then you no longer need to introduce other Agents for our application Expose JMX metrics. The Opentelemetry Agent also collects and exposes metrics by instrumenting the metrics exposed by MBeans locally available in the application.

                                                                                  Opentelemetry Agent also has some built-in monitoring samples for common Java Servers or frameworks, please refer to predefined metrics.

                                                                                  Using the OpenTelemetry Java Agent also needs to consider how to mount the JAR into the container. In addition to referring to the JMX Exporter above to mount the JAR file, we can also use the Operator capabilities provided by OpenTelemetry to automatically enable JVM metric exposure for our applications. :

                                                                                  If your application has integrated Opentelemetry Agent to collect application traces, then you no longer need to introduce other Agents to expose JMX metrics for our application. The Opentelemetry Agent can now natively collect and expose metrics interfaces by instrumenting metrics exposed by MBeans available locally in the application.

                                                                                  However, for current version, you still need to manually add the proper annotations to workload before the JVM data will be collected by Insight.

                                                                                  "},{"location":"en/end-user/insight/quickstart/jvm-monitor/otel-java-agent.html#expose-metrics-for-java-middleware","title":"Expose metrics for Java middleware","text":"

                                                                                  Opentelemetry Agent also has some built-in middleware monitoring samples, please refer to Predefined Metrics.

                                                                                  By default, no type is specified, and it needs to be specified through -Dotel.jmx.target.system JVM Options, such as -Dotel.jmx.target.system=jetty,kafka-broker .

                                                                                  "},{"location":"en/end-user/insight/quickstart/jvm-monitor/otel-java-agent.html#reference","title":"Reference","text":"
                                                                                  • Gaining JMX Metric Insights with the OpenTelemetry Java Agent

                                                                                  • Otel jmx metrics

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang-ebpf.html","title":"Enhance Go apps with OTel auto-instrumentation","text":"

                                                                                  If you don't want to manually change the application code, you can try This page's eBPF-based automatic enhancement method. This feature is currently in the review stage of donating to the OpenTelemetry community, and does not support Operator injection through annotations (it will be supported in the future), so you need to manually change the Deployment YAML or use a patch.

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang-ebpf.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Make sure Insight Agent is ready. If not, see Install insight-agent to collect data and make sure the following three items are in place:

                                                                                  • Enable trace feature for Insight-agent
                                                                                  • Whether the address and port of the trace data are filled in correctly
                                                                                  • Pods proper to deployment/opentelemetry-operator-controller-manager and deployment/insight-agent-opentelemetry-collector are ready
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang-ebpf.html#install-instrumentation-cr","title":"Install Instrumentation CR","text":"

                                                                                  Install under the Insight-system namespace, skip this step if it has already been installed.

                                                                                  Note: This CR currently only supports the injection of environment variables (including service name and trace address) required to connect to Insight, and will support the injection of Golang probes in the future.

                                                                                  kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.17.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.31.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.34b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:0.3.1-beta.1\nEOF\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang-ebpf.html#change-the-application-deployment-file","title":"Change the application deployment file","text":"
                                                                                  • Add environment variable annotations

                                                                                    There is only one such annotation, which is used to add OpenTelemetry-related environment variables, such as link reporting address, cluster id where the container is located, and namespace:

                                                                                    instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                                                                    The value is divided into two parts by / , the first value insight-system is the namespace of the CR installed in the second step, and the second value insight-opentelemetry-autoinstrumentation is the name of the CR.

                                                                                  • Add golang ebpf probe container

                                                                                    Here is sample code:

                                                                                    apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: voting\n  namespace: emojivoto\n  labels:\n    app.kubernetes.io/name: voting\n    app.kubernetes.io/part-of: emojivoto\n    app.kubernetes.io/version: v11\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: voting-svc\n      version: v11\n  template:\n    metadata:\n      labels:\n        app: voting-svc\n        version: v11\n      annotations:\n        instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\" # (1)\n    spec:\n      containers:\n        - env:\n            - name: GRPC_PORT\n              value: \"8080\"\n            - name: PROM_PORT\n              value: \"8801\"\n          image: docker.l5d.io/buoyantio/emojivoto-voting-svc:v11 # (2)\n          name: voting-svc\n          command:\n            - /usr/local/bin/emojivoto-voting-svc\n          ports:\n            - containerPort: 8080\n              name: grpc\n            - containerPort: 8801\n              name: prom\n          resources:\n            requests:\n              cpu: 100m\n        - name: emojivoto-voting-instrumentation\n          image: docker.m.daocloud.io/keyval/otel-go-agent:v0.6.0\n          env:\n            - name: OTEL_TARGET_EXE\n              value: /usr/local/bin/emojivoto-voting-svc # (3)\n          securityContext:\n            runAsUser: 0\n            capabilities:\n              add:\n                - SYS_PTRACE\n            privileged: true\n          volumeMounts:\n            - mountPath: /sys/kernel/debug\n              name: kernel-debug\n      volumes:\n        - name: kernel-debug\n          hostPath:\n            path: /sys/kernel/debug\n
                                                                                    1. Used to add environment variables related to OpenTelemetry.
                                                                                    2. Assuming this is your Golang application.
                                                                                    3. Note that it should be consistent with the content of the command mentioned above: /usr/local/bin/emojivoto-voting-svc .

                                                                                  The final generated Yaml content is as follows:

                                                                                  apiVersion: v1\nkind: Pod\nmetadata:\n  name: voting-84b696c897-p9xbp\n  generateName: voting-84b696c897-\n  namespace: default\n  uid: 742639b0-db6e-4f06-ac90-68a80e2b8a11\n  resourceVersion: '65560793'\n  creationTimestamp: '2022-10-19T07:08:56Z'\n  labels:\n    app: voting-svc\n    pod-template-hash: 84b696c897\n    version: v11\n  annotations:\n    cni.projectcalico.org/containerID: 0a987cf0055ce0dfbe75c3f30d580719eb4fbbd7e1af367064b588d4d4e4c7c7\n    cni.projectcalico.org/podIP: 192.168.141.218/32\n    cni.projectcalico.org/podIPs: 192.168.141.218/32\n    instrumentation.opentelemetry.io/inject-sdk: insight-system/insight-opentelemetry-autoinstrumentation\nspec:\n  volumes:\n    - name: launcherdir\n      emptyDir: {}\n    - name: kernel-debug\n      hostPath:\n        path: /sys/kernel/debug\n        type: ''\n    - name: kube-api-access-gwj5v\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n  containers:\n    - name: voting-svc\n      image: docker.l5d.io/buoyantio/emojivoto-voting-svc:v11\n      command:\n        - /odigos-launcher/launch\n        - /usr/local/bin/emojivoto-voting-svc\n      ports:\n        - name: grpc\n          containerPort: 8080\n          protocol: TCP\n        - name: prom\n          containerPort: 8801\n          protocol: TCP\n      env:\n        - name: GRPC_PORT\n          value: '8080'\n        - name: PROM_PORT\n          value: '8801'\n        - name: OTEL_TRACES_EXPORTER\n          value: otlp\n        - name: OTEL_EXPORTER_OTLP_ENDPOINT\n          value: >-\n            http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n        - name: OTEL_EXPORTER_OTLP_TIMEOUT\n          value: '200'\n        - name: SPLUNK_TRACE_RESPONSE_HEADER_ENABLED\n          value: 'true'\n        - name: OTEL_SERVICE_NAME\n          value: voting\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.name\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_UID\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.uid\n        - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: spec.nodeName\n        - name: OTEL_PROPAGATORS\n          value: jaeger,b3\n        - name: OTEL_TRACES_SAMPLER\n          value: always_on\n        - name: OTEL_RESOURCE_ATTRIBUTES\n          value: >-\n            k8s.container.name=voting-svc,k8s.deployment.name=voting,k8s.deployment.uid=79e015e2-4643-44c0-993c-e486aebaba10,k8s.namespace.name=default,k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME),k8s.pod.uid=$(OTEL_RESOURCE_ATTRIBUTES_POD_UID),k8s.replicaset.name=voting-84b696c897,k8s.replicaset.uid=63f56167-6632-415d-8b01-43a3db9891ff\n      resources:\n        requests:\n          cpu: 100m\n      volumeMounts:\n        - name: launcherdir\n          mountPath: /odigos-launcher\n        - name: kube-api-access-gwj5v\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: IfNotPresent\n    - name: emojivoto-voting-instrumentation\n      image: keyval/otel-go-agent:v0.6.0\n      env:\n        - name: OTEL_TARGET_EXE\n          value: /usr/local/bin/emojivoto-voting-svc\n        - name: OTEL_EXPORTER_OTLP_ENDPOINT\n          value: jaeger:4317\n        - name: OTEL_SERVICE_NAME\n          value: emojivoto-voting\n      resources: {}\n      volumeMounts:\n        - name: kernel-debug\n          mountPath: /sys/kernel/debug\n        - name: kube-api-access-gwj5v\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: IfNotPresent\n      securityContext:\n        capabilities:\n          add:\n            - SYS_PTRACE\n        privileged: true\n        runAsUser: 0\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang-ebpf.html#reference","title":"Reference","text":"
                                                                                  • Getting Started with Go OpenTelemetry Automatic Instrumentation
                                                                                  • Donating ebpf based instrumentation
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/operator.html","title":"Enhance Applications Non-Intrusively with Operators","text":"

                                                                                  Currently, only Java, Node.js, Python, .NET, and Golang support non-intrusive integration through the Operator approach.

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/operator.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Please ensure that the insight-agent is ready. If not, please refer to Install insight-agent for data collection and make sure the following three items are ready:

                                                                                  • Enable trace functionality for insight-agent
                                                                                  • Check if the address and port for trace data are correctly filled
                                                                                  • Ensure that the Pods proper to deployment/insight-agent-opentelemetry-operator and deployment/insight-agent-opentelemetry-collector are ready
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/operator.html#install-instrumentation-cr","title":"Install Instrumentation CR","text":"

                                                                                  Tip

                                                                                  Starting from Insight v0.22.0, there is no longer a need to manually install the Instrumentation CR.

                                                                                  Install it in the insight-system namespace. There are some minor differences between different versions.

                                                                                  Insight v0.21.xInsight v0.20.xInsight v0.18.xInsight v0.17.xInsight v0.16.x
                                                                                  K8S_CLUSTER_UID=$(kubectl get namespace kube-system -o jsonpath='{.metadata.uid}')\nkubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/openinsight-proj/autoinstrumentation-java:1.31.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n      - name: OTEL_K8S_CLUSTER_UID\n        value: $K8S_CLUSTER_UID\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                                                                                  kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.29.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0-rc.2\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                                                                                  kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.25.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.37.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.38b0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.1-alpha\nEOF\n
                                                                                  kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.23.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.34.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.33b0\nEOF\n
                                                                                  kubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:1.23.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.34.0\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.33b0\nEOF\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/operator.html#works-with-the-service-mesh-product-mspider","title":"Works with the Service Mesh Product (Mspider)","text":"

                                                                                  If you enable the tracing capability of the Mspider(Service Mesh), you need to add an additional environment variable injection configuration:

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/operator.html#the-operation-steps-are-as-follows","title":"The operation steps are as follows","text":"
                                                                                  1. Log in to AI platform, then enter Container Management and select the target cluster.
                                                                                  2. Click CRDs in the left navigation bar, find instrumentations.opentelemetry.io, and enter the details page.
                                                                                  3. Select the insight-system namespace, then edit insight-opentelemetry-autoinstrumentation, and add the following content under spec:env::

                                                                                        - name: OTEL_SERVICE_NAME\n      valueFrom:\n        fieldRef:\n          fieldPath: metadata.labels['app'] \n

                                                                                    The complete example (for Insight v0.21.x) is as follows:

                                                                                    K8S_CLUSTER_UID=$(kubectl get namespace kube-system -o jsonpath='{.metadata.uid}')\nkubectl apply -f - <<EOF\napiVersion: opentelemetry.io/v1alpha1\nkind: Instrumentation\nmetadata:\n  name: insight-opentelemetry-autoinstrumentation\n  namespace: insight-system\nspec:\n  # https://github.com/open-telemetry/opentelemetry-operator/blob/main/docs/api.md#instrumentationspecresource\n  resource:\n    addK8sUIDAttributes: true\n  env:\n    - name: OTEL_EXPORTER_OTLP_ENDPOINT\n      value: http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\n    - name: OTEL_SERVICE_NAME\n      valueFrom:\n        fieldRef:\n          fieldPath: metadata.labels['app'] \n  sampler:\n    # Enum: always_on, always_off, traceidratio, parentbased_always_on, parentbased_always_off, parentbased_traceidratio, jaeger_remote, xray\n    type: always_on\n  java:\n    image: ghcr.m.daocloud.io/openinsight-proj/autoinstrumentation-java:1.31.0\n    env:\n      - name: OTEL_JAVAAGENT_DEBUG\n        value: \"false\"\n      - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n        value: \"true\"\n      - name: SPLUNK_PROFILER_ENABLED\n        value: \"false\"\n      - name: OTEL_METRICS_EXPORTER\n        value: \"prometheus\"\n      - name: OTEL_METRICS_EXPORTER_PORT\n        value: \"9464\"\n      - name: OTEL_K8S_CLUSTER_UID\n        value: $K8S_CLUSTER_UID\n  nodejs:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-nodejs:0.41.1\n  python:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-python:0.40b0\n  dotnet:\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-dotnet:1.0.0\n  go:\n    # Must set the default value manually for now.\n    # See https://github.com/open-telemetry/opentelemetry-operator/issues/1756 for details.\n    image: ghcr.m.daocloud.io/open-telemetry/opentelemetry-go-instrumentation/autoinstrumentation-go:v0.2.2-alpha\nEOF\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/operator.html#add-annotations-to-automatically-access-traces","title":"Add annotations to automatically access traces","text":"

                                                                                  After the above is ready, you can access traces for the application through annotations (Annotation). Otel currently supports accessing traces through annotations. Depending on the service language, different pod annotations need to be added. Each service can add one of two types of annotations:

                                                                                  • Only inject environment variable annotations

                                                                                    There is only one such annotation, which is used to add otel-related environment variables, such as link reporting address, cluster id where the container is located, and namespace (this annotation is very useful when the application does not support automatic probe language)

                                                                                    instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                                                                    The value is divided into two parts by /, the first value (insight-system) is the namespace of the CR installed in the previous step, and the second value (insight-opentelemetry-autoinstrumentation) is the name of the CR.

                                                                                  • Automatic probe injection and environment variable injection annotations

                                                                                    There are currently 4 such annotations, proper to 4 different programming languages: java, nodejs, python, dotnet. After using it, automatic probes and otel default environment variables will be injected into the first container under spec.pod:

                                                                                    Java applicationNodeJs applicationPython applicationDotnet applicationGolang application
                                                                                    instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                                                                    instrumentation.opentelemetry.io/inject-nodejs: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                                                                    instrumentation.opentelemetry.io/inject-python: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                                                                    instrumentation.opentelemetry.io/inject-dotnet: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                                                                    Since Go's automatic detection requires the setting of OTEL_GO_AUTO_TARGET_EXE, you must provide a valid executable path through annotations or Instrumentation resources. Failure to set this value will result in the termination of Go's automatic detection injection, leading to a failure in the connection trace.

                                                                                    instrumentation.opentelemetry.io/inject-go: \"insight-system/insight-opentelemetry-autoinstrumentation\"\ninstrumentation.opentelemetry.io/otel-go-auto-target-exe: \"/path/to/container/executable\"\n

                                                                                    Go's automatic detection also requires elevated permissions. The following permissions are automatically set and are necessary.

                                                                                    securityContext:\n  privileged: true\n  runAsUser: 0\n

                                                                                  Tip

                                                                                  The OpenTelemetry Operator automatically adds some OTel-related environment variables when injecting probes and also supports overriding these variables. The priority order for overriding these environment variables is as follows:

                                                                                  original container env vars -> language specific env vars -> common env vars -> instrument spec configs' vars\n

                                                                                  However, it is important to avoid manually overriding OTEL_RESOURCE_ATTRIBUTES_NODE_NAME . This variable serves as an identifier within the operator to determine if a pod has already been injected with a probe. Manually adding this variable may prevent the probe from being injected successfully.

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/operator.html#automatic-injection-demo","title":"Automatic injection Demo","text":"

                                                                                  Note that the annotation is added under spec.annotations.

                                                                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-app\n  labels:\n    app: my-app\nspec:\n  selector:\n    matchLabels:\n      app: my-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-app\n      annotations:\n        instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n    spec:\n      containers:\n      - name: myapp\n        image: jaegertracing/vertx-create-span:operator-e2e-tests\n        ports:\n          - containerPort: 8080\n            protocol: TCP\n

                                                                                  The final generated YAML is as follows:

                                                                                  apiVersion: v1\nkind: Pod\nmetadata:\n  name: my-deployment-with-sidecar-565bd877dd-nqkk6\n  generateName: my-deployment-with-sidecar-565bd877dd-\n  namespace: default\n  uid: aa89ca0d-620c-4d20-8bc1-37d67bad4ea4\n  resourceVersion: '2668986'\n  creationTimestamp: '2022-04-08T05:58:48Z'\n  labels:\n    app: my-pod-with-sidecar\n    pod-template-hash: 565bd877dd\n  annotations:\n    cni.projectcalico.org/containerID: 234eae5e55ea53db2a4bc2c0384b9a1021ed3908f82a675e4a92a49a7e80dd61\n    cni.projectcalico.org/podIP: 192.168.134.133/32\n    cni.projectcalico.org/podIPs: 192.168.134.133/32\n    instrumentation.opentelemetry.io/inject-java: \"insight-system/insight-opentelemetry-autoinstrumentation\"\nspec:\n  volumes:\n    - name: kube-api-access-sp2mz\n      projected:\n        sources:\n          - serviceAccountToken:\n              expirationSeconds: 3607\n              path: token\n          - configMap:\n              name: kube-root-ca.crt\n              items:\n                - key: ca.crt\n                  path: ca.crt\n          - downwardAPI:\n              items:\n                - path: namespace\n                  fieldRef:\n                    apiVersion: v1\n                    fieldPath: metadata.namespace\n        defaultMode: 420\n    - name: opentelemetry-auto-instrumentation\n      emptyDir: {}\n  initContainers:\n    - name: opentelemetry-auto-instrumentation\n      image: >-\n        ghcr.m.daocloud.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java\n      command:\n        - cp\n        - /javaagent.jar\n        - /otel-auto-instrumentation/javaagent.jar\n      resources: {}\n      volumeMounts:\n        - name: opentelemetry-auto-instrumentation\n          mountPath: /otel-auto-instrumentation\n        - name: kube-api-access-sp2mz\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  containers:\n    - name: myapp\n      image: ghcr.io/pavolloffay/spring-petclinic:latest\n      env:\n        - name: OTEL_JAVAAGENT_DEBUG\n          value: 'true'\n        - name: OTEL_INSTRUMENTATION_JDBC_ENABLED\n          value: 'true'\n        - name: SPLUNK_PROFILER_ENABLED\n          value: 'false'\n        - name: JAVA_TOOL_OPTIONS\n          value: ' -javaagent:/otel-auto-instrumentation/javaagent.jar'\n        - name: OTEL_TRACES_EXPORTER\n          value: otlp\n        - name: OTEL_EXPORTER_OTLP_ENDPOINT\n          value: http://insight-agent-opentelemetry-collector.svc.cluster.local:4317\n        - name: OTEL_EXPORTER_OTLP_TIMEOUT\n          value: '20'\n        - name: OTEL_TRACES_SAMPLER\n          value: parentbased_traceidratio\n        - name: OTEL_TRACES_SAMPLER_ARG\n          value: '0.85'\n        - name: SPLUNK_TRACE_RESPONSE_HEADER_ENABLED\n          value: 'true'\n        - name: OTEL_SERVICE_NAME\n          value: my-deployment-with-sidecar\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.name\n        - name: OTEL_RESOURCE_ATTRIBUTES_POD_UID\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: metadata.uid\n        - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n          valueFrom:\n            fieldRef:\n              apiVersion: v1\n              fieldPath: spec.nodeName\n        - name: OTEL_RESOURCE_ATTRIBUTES\n          value: >-\n            k8s.container.name=myapp,k8s.deployment.name=my-deployment-with-sidecar,k8s.deployment.uid=8de6929d-dda0-436c-bca1-604e9ca7ea4e,k8s.namespace.name=default,k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME),k8s.pod.uid=$(OTEL_RESOURCE_ATTRIBUTES_POD_UID),k8s.replicaset.name=my-deployment-with-sidecar-565bd877dd,k8s.replicaset.uid=190d5f6e-ba7f-4794-b2e6-390b5879a6c4\n        - name: OTEL_PROPAGATORS\n          value: jaeger,b3\n      resources: {}\n      volumeMounts:\n        - name: kube-api-access-sp2mz\n          readOnly: true\n          mountPath: /var/run/secrets/kubernetes.io/serviceaccount\n        - name: opentelemetry-auto-instrumentation\n          mountPath: /otel-auto-instrumentation\n      terminationMessagePath: /dev/termination-log\n      terminationMessagePolicy: File\n      imagePullPolicy: Always\n  restartPolicy: Always\n  terminationGracePeriodSeconds: 30\n  dnsPolicy: ClusterFirst\n  serviceAccountName: default\n  serviceAccount: default\n  nodeName: k8s-master3\n  securityContext:\n    runAsUser: 1000\n    runAsGroup: 3000\n    fsGroup: 2000\n  schedulerName: default-scheduler\n  tolerations:\n    - key: node.kubernetes.io/not-ready\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n    - key: node.kubernetes.io/unreachable\n      operator: Exists\n      effect: NoExecute\n      tolerationSeconds: 300\n  priority: 0\n  enableServiceLinks: true\n  preemptionPolicy: PreemptLowerPriority\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/operator.html#trace-query","title":"Trace query","text":"

                                                                                  How to query the connected services, refer to Trace Query.

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/otel.html","title":"Use OTel to provide the application observability","text":"

                                                                                  Enhancement is the process of enabling application code to generate telemetry data. i.e. something that helps you monitor or measure the performance and status of your application.

                                                                                  OpenTelemetry is a leading open source project providing instrumentation libraries for major programming languages \u200b\u200band popular frameworks. It is a project under the Cloud Native Computing Foundation and is supported by the vast resources of the community. It provides a standardized data format for collected data without the need to integrate specific vendors.

                                                                                  Insight supports OpenTelemetry for application instrumentation to enhance your applications.

                                                                                  This guide introduces the basic concepts of telemetry enhancement using OpenTelemetry. OpenTelemetry also has an ecosystem of libraries, plugins, integrations, and other useful tools to extend it. You can find these resources at the OTel Registry.

                                                                                  You can use any open standard library for telemetry enhancement and use Insight as an observability backend to ingest, analyze, and visualize data.

                                                                                  To enhance your code, you can use the enhanced operations provided by OpenTelemetry for specific languages:

                                                                                  Insight currently provides an easy way to enhance .Net NodeJS, Java, Python and Golang applications with OpenTelemetry. Please follow the guidelines below.

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/otel.html#trace-enhancement","title":"Trace Enhancement","text":"
                                                                                  • Best practices for integrate trace: Application Non-Intrusive Enhancement via Operator
                                                                                  • Manual instrumentation with Go language as an example: Enhance Go application with OpenTelemetry SDK
                                                                                  • Using ebpf to implement non-intrusive auto-instrumetation in Go language (experimental feature)
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/send_tracing_to_insight.html","title":"Sending Trace Data to Insight","text":"

                                                                                  This document describes how customers can send trace data to Insight on their own. It mainly includes the following two scenarios:

                                                                                  1. Customer apps report traces to Insight through OTEL Agent/SDK
                                                                                  2. Forwarding traces to Insight through Opentelemetry Collector (OTEL COL)

                                                                                  In each cluster where Insight Agent is installed, there is an insight-agent-otel-col component that is used to receive trace data from that cluster. Therefore, this component serves as the entry point for user access and needs to obtain its address first. You can get the address of the Opentelemetry Collector in the cluster through the AI platform interface, such as insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 :

                                                                                  In addition, there are some slight differences for different reporting methods:

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/send_tracing_to_insight.html#customer-apps-report-traces-to-insight-through-otel-agentsdk","title":"Customer apps report traces to Insight through OTEL Agent/SDK","text":"

                                                                                  To successfully report trace data to Insight and display it properly, it is recommended to provide the required metadata (Resource Attributes) for OTLP through the following environment variables. There are two ways to achieve this:

                                                                                  • Manually add them to the deployment YAML file, for example:

                                                                                    ...\n- name: OTEL_EXPORTER_OTLP_ENDPOINT\n  value: \"http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317\"\n- name: \"OTEL_SERVICE_NAME\"\n  value: my-java-app-name\n- name: \"OTEL_K8S_NAMESPACE\"\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: metadata.namespace\n- name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: spec.nodeName\n- name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n  valueFrom:\n    fieldRef:\n      apiVersion: v1\n      fieldPath: metadata.name\n- name: OTEL_RESOURCE_ATTRIBUTES\n  value: \"k8s.namespace.name=$(OTEL_K8S_NAMESPACE),k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME)\"\n
                                                                                  • Use the automatic injection capability of Insight Agent to inject the metadata (Resource Attributes)

                                                                                    Ensure that Insight Agent is working properly and after installing the Instrumentation CR, you only need to add the following annotation to the Pod:

                                                                                    instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                                                                    For example:

                                                                                    apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-with-aotu-instrumentation\nspec:\n  selector:\n    matchLabels:\n      app.kubernetes.io/name: my-deployment-with-aotu-instrumentation-kuberntes\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: my-deployment-with-aotu-instrumentation-kuberntes\n      annotations:\n        sidecar.opentelemetry.io/inject: \"false\"\n        instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/send_tracing_to_insight.html#forwarding-traces-to-insight-through-opentelemetry-collector","title":"Forwarding traces to Insight through Opentelemetry Collector","text":"

                                                                                  After ensuring that the application has added the metadata mentioned above, you only need to add an OTLP Exporter in your customer's Opentelemetry Collector to forward the trace data to Insight Agent Opentelemetry Collector. Below is an example Opentelemetry Collector configuration file:

                                                                                  ...\nexporters:\n  otlp/insight:\n    endpoint: insight-opentelemetry-collector.insight-system.svc.cluster.local:4317\nservice:\n...\npipelines:\n...\ntraces:\n  exporters:\n    - otlp/insight\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/send_tracing_to_insight.html#references","title":"References","text":"
                                                                                  • Enhancing Applications Non-intrusively with the Operator
                                                                                  • Achieving Observability with OTel
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html","title":"Enhance Go applications with OTel SDK","text":"

                                                                                  This page contains instructions on how to set up OpenTelemetry enhancements in a Go application.

                                                                                  OpenTelemetry, also known simply as OTel, is an open-source observability framework that helps generate and collect telemetry data: traces, metrics, and logs in Go apps.

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#enhance-go-apps-with-the-opentelemetry-sdk","title":"Enhance Go apps with the OpenTelemetry SDK","text":""},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#install-related-dependencies","title":"Install related dependencies","text":"

                                                                                  Dependencies related to the OpenTelemetry exporter and SDK must be installed first. If you are using another request router, please refer to request routing. After switching/going into the application source folder run the following command:

                                                                                  go get go.opentelemetry.io/otel@v1.8.0 \\\n  go.opentelemetry.io/otel/trace@v1.8.0 \\\n  go.opentelemetry.io/otel/sdk@v1.8.0 \\\n  go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin@v0.33.0 \\\n  go.opentelemetry.io/otel/exporters/otlp/otlptrace@v1.7.0 \\\n  go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc@v1.4.1\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#create-an-initialization-feature-using-the-opentelemetry-sdk","title":"Create an initialization feature using the OpenTelemetry SDK","text":"

                                                                                  In order for an application to be able to send data, a feature is required to initialize OpenTelemetry. Add the following code snippet to the main.go file:

                                                                                  import (\n    \"context\"\n    \"os\"\n    \"time\"\n\n    \"go.opentelemetry.io/otel\"\n    \"go.opentelemetry.io/otel/exporters/otlp/otlptrace\"\n    \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc\"\n    \"go.opentelemetry.io/otel/propagation\"\n    \"go.opentelemetry.io/otel/sdk/resource\"\n    sdktrace \"go.opentelemetry.io/otel/sdk/trace\"\n    semconv \"go.opentelemetry.io/otel/semconv/v1.7.0\"\n    \"go.uber.org/zap\"\n    \"google.golang.org/grpc\"\n)\n\nvar tracerExp *otlptrace.Exporter\n\nfunc retryInitTracer() func() {\n    var shutdown func()\n    go func() {\n        for {\n            // otel will reconnected and re-send spans when otel col recover. so, we don't need to re-init tracer exporter.\n            if tracerExp == nil {\n                shutdown = initTracer()\n            } else {\n                break\n            }\n            time.Sleep(time.Minute * 5)\n        }\n    }()\n    return shutdown\n}\n\nfunc initTracer() func() {\n    // temporarily set timeout to 10s\n    ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n    defer cancel()\n\n    serviceName, ok := os.LookupEnv(\"OTEL_SERVICE_NAME\")\n    if !ok {\n        serviceName = \"server_name\"\n        os.Setenv(\"OTEL_SERVICE_NAME\", serviceName)\n    }\n    otelAgentAddr, ok := os.LookupEnv(\"OTEL_EXPORTER_OTLP_ENDPOINT\")\n    if !ok {\n        otelAgentAddr = \"http://localhost:4317\"\n        os.Setenv(\"OTEL_EXPORTER_OTLP_ENDPOINT\", otelAgentAddr)\n    }\n    zap.S().Infof(\"OTLP Trace connect to: %s with service name: %s\", otelAgentAddr, serviceName)\n\n    traceExporter, err := otlptracegrpc.New(ctx, otlptracegrpc.WithInsecure(), otlptracegrpc.WithDialOption(grpc.WithBlock()))\n    if err != nil {\n        handleErr(err, \"OTLP Trace gRPC Creation\")\n        return nil\n    }\n\n    tracerProvider := sdktrace.NewTracerProvider(\n        sdktrace.WithBatcher(traceExporter),\n        sdktrace.WithSampler(sdktrace.AlwaysSample()),\n    sdktrace.WithResource(resource.NewWithAttributes(semconv.SchemaURL)))\n\n    otel.SetTracerProvider(tracerProvider)\n    otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}))\n\n    tracerExp = traceExporter\n    return func() {\n        // Shutdown will flush any remaining spans and shut down the exporter.\n        handleErr(tracerProvider.Shutdown(ctx), \"failed to shutdown TracerProvider\")\n    }\n}\n\nfunc handleErr(err error, message string) {\n    if err != nil {\n        zap.S().Errorf(\"%s: %v\", message, err)\n    }\n}\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#initialize-tracker-in-maingo","title":"Initialize tracker in main.go","text":"

                                                                                  Modify the main feature to initialize the tracker in main.go. Also when your service shuts down, you should call TracerProvider.Shutdown() to ensure all spans are exported. The service makes the call as a deferred feature in the main function:

                                                                                  func main() {\n    // start otel tracing\n    if shutdown := retryInitTracer(); shutdown != nil {\n            defer shutdown()\n        }\n    ......\n}\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#add-opentelemetry-gin-middleware-to-the-application","title":"Add OpenTelemetry Gin middleware to the application","text":"

                                                                                  Configure Gin to use the middleware by adding the following line to main.go :

                                                                                  import (\n    ....\n  \"go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin\"\n)\n\nfunc main() {\n    ......\n    r := gin.Default()\n    r.Use(otelgin.Middleware(\"my-app\"))\n    ......\n}\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#run-the-application","title":"Run the application","text":"
                                                                                  • Local debugging and running

                                                                                    Note: This step is only used for local development and debugging. In the production environment, the Operator will automatically complete the injection of the following environment variables.

                                                                                    The above steps have completed the work of initializing the SDK. Now if you need to develop and debug locally, you need to obtain the address of insight-agent-opentelemerty-collector in the insight-system namespace in advance, assuming: insight-agent-opentelemetry-collector .insight-system.svc.cluster.local:4317 .

                                                                                    Therefore, you can add the following environment variables when you start the application locally:

                                                                                    OTEL_SERVICE_NAME=my-golang-app OTEL_EXPORTER_OTLP_ENDPOINT=http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317 go run main.go...\n
                                                                                  • Running in a production environment

                                                                                    Please refer to the introduction of Only injecting environment variable annotations in Achieving non-intrusive enhancement of applications through Operators to add annotations to deployment yaml:

                                                                                    instrumentation.opentelemetry.io/inject-sdk: \"insight-system/insight-opentelemetry-autoinstrumentation\"\n

                                                                                    If you cannot use annotations, you can manually add the following environment variables to the deployment yaml:

                                                                                  \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\nenv:\n  - name: OTEL_EXPORTER_OTLP_ENDPOINT\n    value: 'http://insight-agent-opentelemetry-collector.insight-system.svc.cluster.local:4317'\n  - name: OTEL_SERVICE_NAME\n    value: \"your depolyment name\" # modify it.\n  - name: OTEL_K8S_NAMESPACE\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: metadata.namespace\n  - name: OTEL_RESOURCE_ATTRIBUTES_NODE_NAME\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: spec.nodeName\n  - name: OTEL_RESOURCE_ATTRIBUTES_POD_NAME\n    valueFrom:\n      fieldRef:\n        apiVersion: v1\n        fieldPath: metadata.name\n  - name: OTEL_RESOURCE_ATTRIBUTES\n    value: 'k8s.namespace.name=$(OTEL_K8S_NAMESPACE),k8s.node.name=$(OTEL_RESOURCE_ATTRIBUTES_NODE_NAME),k8s.pod.name=$(OTEL_RESOURCE_ATTRIBUTES_POD_NAME)'\n\u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#request-routing","title":"Request Routing","text":""},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#opentelemetry-gingonic-enhancements","title":"OpenTelemetry gin/gonic enhancements","text":"
                                                                                  # Add one line to your import() stanza depending upon your request router:\nmiddleware \"go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin\"\n

                                                                                  Then inject the OpenTelemetry middleware:

                                                                                  router. Use(middleware. Middleware(\"my-app\"))\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#opentelemetry-gorillamux-enhancements","title":"OpenTelemetry gorillamux enhancements","text":"
                                                                                  # Add one line to your import() stanza depending upon your request router:\nmiddleware \"go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux\"\n

                                                                                  Then inject the OpenTelemetry middleware:

                                                                                  router. Use(middleware. Middleware(\"my-app\"))\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#grpc-enhancements","title":"gRPC enhancements","text":"

                                                                                  Likewise, OpenTelemetry can help you auto-detect gRPC requests. To detect any gRPC server you have, add the interceptor to the server's instantiation.

                                                                                  import (\n  grpcotel \"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc\"\n)\nfunc main() {\n  [...]\n\n    s := grpc.NewServer(\n        grpc.UnaryInterceptor(grpcotel.UnaryServerInterceptor()),\n        grpc.StreamInterceptor(grpcotel.StreamServerInterceptor()),\n    )\n}\n

                                                                                  It should be noted that if your program uses Grpc Client to call third-party services, you also need to add an interceptor to Grpc Client:

                                                                                      [...]\n\n    conn, err := grpc.Dial(addr, grpc.WithTransportCredentials(insecure.NewCredentials()),\n        grpc.WithUnaryInterceptor(otelgrpc.UnaryClientInterceptor()),\n        grpc.WithStreamInterceptor(otelgrpc.StreamClientInterceptor()),\n    )\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#if-not-using-request-routing","title":"If not using request routing","text":"
                                                                                  import (\n  \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\"\n)\n

                                                                                  Everywhere you pass http.Handler to ServeMux you will wrap the handler function. For example, the following replacements would be made:

                                                                                  - mux.Handle(\"/path\", h)\n+ mux.Handle(\"/path\", otelhttp.NewHandler(h, \"description of path\"))\n---\n- mux.Handle(\"/path\", http.HandlerFunc(f))\n+ mux.Handle(\"/path\", otelhttp.NewHandler(http.HandlerFunc(f), \"description of path\"))\n

                                                                                  In this way, you can ensure that each feature wrapped with othttp will automatically collect its metadata and start the proper trace.

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#database-enhancements","title":"database enhancements","text":""},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#golang-gorm","title":"Golang Gorm","text":"

                                                                                  The OpenTelemetry community has also developed middleware for database access libraries, such as Gorm:

                                                                                  import (\n    \"github.com/uptrace/opentelemetry-go-extra/otelgorm\"\n    \"gorm.io/driver/sqlite\"\n    \"gorm.io/gorm\"\n)\n\ndb, err := gorm.Open(sqlite.Open(\"file::memory:?cache=shared\"), &gorm.Config{})\nif err != nil {\n    panic(err)\n}\n\notelPlugin := otelgorm.NewPlugin(otelgorm.WithDBName(\"mydb\"), # Missing this can lead to incomplete display of database related topology\n    otelgorm.WithAttributes(semconv.ServerAddress(\"memory\"))) # Missing this can lead to incomplete display of database related topology\nif err := db.Use(otelPlugin); err != nil {\n    panic(err)\n}\n

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#custom-span","title":"Custom Span","text":"

                                                                                  In many cases, the middleware provided by OpenTelemetry cannot help us record more internally called features, and we need to customize Span to record

                                                                                   \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n    _, span := otel.Tracer(\"GetServiceDetail\").Start(ctx,\n        \"spanMetricDao.GetServiceDetail\",\n        trace.WithSpanKind(trace.SpanKindInternal))\n    defer span.End()\n  \u00b7\u00b7\u00b7\u00b7\u00b7\u00b7\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#add-custom-properties-and-custom-events-to-span","title":"Add custom properties and custom events to span","text":"

                                                                                  It is also possible to set a custom attribute or tag as a span. To add custom properties and events, follow these steps:

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#import-tracking-and-property-libraries","title":"Import Tracking and Property Libraries","text":"
                                                                                  import (\n    ...\n    \"go.opentelemetry.io/otel/attribute\"\n    \"go.opentelemetry.io/otel/trace\"\n)\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#get-the-current-span-from-the-context","title":"Get the current Span from the context","text":"
                                                                                  span := trace.SpanFromContext(c.Request.Context())\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#set-properties-in-the-current-span","title":"Set properties in the current Span","text":"
                                                                                  span.SetAttributes(attribute. String(\"controller\", \"books\"))\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#add-an-event-to-the-current-span","title":"Add an Event to the current Span","text":"

                                                                                  Adding span events is done using AddEvent on the span object.

                                                                                  span.AddEvent(msg)\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#log-errors-and-exceptions","title":"Log errors and exceptions","text":"
                                                                                  import \"go.opentelemetry.io/otel/codes\"\n\n// Get the current span\nspan := trace.SpanFromContext(ctx)\n\n// RecordError will automatically convert an error into a span even\nspan.RecordError(err)\n\n// Flag this span as an error\nspan.SetStatus(codes.Error, \"internal error\")\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/golang.html#references","title":"References","text":"

                                                                                  For the Demo presentation, please refer to:

                                                                                  • otel-grpc-examples
                                                                                  • opentelemetry-demo/productcatalogservice
                                                                                  • opentelemetry-collector-contrib/demo
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/meter.html","title":"Exposing Metrics for Applications Using OpenTelemetry SDK","text":"

                                                                                  This article is intended for users who wish to evaluate or explore the developing OTLP metrics.

                                                                                  The OpenTelemetry project requires that APIs and SDKs must emit data in the OpenTelemetry Protocol (OTLP) for supported languages.

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/meter.html#for-golang-applications","title":"For Golang Applications","text":"

                                                                                  Golang can expose runtime metrics through the SDK by adding the following methods to enable the metrics exporter within the application:

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/meter.html#install-required-dependencies","title":"Install Required Dependencies","text":"

                                                                                  Navigate to your application\u2019s source folder and run the following command:

                                                                                  go get go.opentelemetry.io/otel \\\n  go.opentelemetry.io/otel/attribute \\\n  go.opentelemetry.io/otel/exporters/prometheus \\\n  go.opentelemetry.io/otel/metric/global \\\n  go.opentelemetry.io/otel/metric/instrument \\\n  go.opentelemetry.io/otel/sdk/metric\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/meter.html#create-an-initialization-function-using-otel-sdk","title":"Create an Initialization Function Using OTel SDK","text":"
                                                                                  import (\n    .....\n\n    \"go.opentelemetry.io/otel/attribute\"\n    otelPrometheus \"go.opentelemetry.io/otel/exporters/prometheus\"\n    \"go.opentelemetry.io/otel/metric/global\"\n    \"go.opentelemetry.io/otel/metric/instrument\"\n    \"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram\"\n    controller \"go.opentelemetry.io/otel/sdk/metric/controller/basic\"\n    \"go.opentelemetry.io/otel/sdk/metric/export/aggregation\"\n    processor \"go.opentelemetry.io/otel/sdk/metric/processor/basic\"\n    selector \"go.opentelemetry.io/otel/sdk/metric/selector/simple\"\n)\n\nfunc (s *insightServer) initMeter() *otelPrometheus.Exporter {\n    s.meter = global.Meter(\"xxx\")\n\n    config := otelPrometheus.Config{\n        DefaultHistogramBoundaries: []float64{1, 2, 5, 10, 20, 50},\n        Gatherer:                   prometheus.DefaultGatherer,\n        Registry:                   prometheus.NewRegistry(),\n        Registerer:                 prometheus.DefaultRegisterer,\n    }\n\n    c := controller.New(\n        processor.NewFactory(\n            selector.NewWithHistogramDistribution(\n                histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries),\n            ),\n            aggregation.CumulativeTemporalitySelector(),\n            processor.WithMemory(true),\n        ),\n    )\n\n    exporter, err := otelPrometheus.New(config, c)\n    if err != nil {\n        zap.S().Panicf(\"failed to initialize prometheus exporter %v\", err)\n    }\n\n    global.SetMeterProvider(exporter.MeterProvider())\n\n    http.HandleFunc(\"/metrics\", exporter.ServeHTTP)\n\n    go func() {\n        _ = http.ListenAndServe(fmt.Sprintf(\":%d\", 8888), nil)\n    }()\n\n    zap.S().Info(\"Prometheus server running on \", fmt.Sprintf(\":%d\", port))\n    return exporter\n}\n

                                                                                  The above method will expose a metrics endpoint for your application at: http://localhost:8888/metrics.

                                                                                  Next, initialize it in main.go:

                                                                                  func main() {\n    // ...\n    tp := initMeter()\n    // ...\n}\n

                                                                                  If you want to add custom metrics, you can refer to the following:

                                                                                  // exposeClusterMetric exposes a metric like \"insight_logging_count{} 1\"\nfunc (s *insightServer) exposeLoggingMetric(lserver *log.LogService) {\n    s.meter = global.Meter(\"insight.io/basic\")\n\n    var lock sync.Mutex\n    logCounter, err := s.meter.AsyncFloat64().Counter(\"insight_log_total\")\n    if err != nil {\n        zap.S().Panicf(\"failed to initialize instrument: %v\", err)\n    }\n\n    _ = s.meter.RegisterCallback([]instrument.Asynchronous{logCounter}, func(ctx context.Context) {\n        lock.Lock()\n        defer lock.Unlock()\n        count, err := lserver.Count(ctx)\n        if err == nil || count != -1 {\n            logCounter.Observe(ctx, float64(count))\n        }\n    })\n}\n

                                                                                  Then, call this method in main.go:

                                                                                  // ...\ns.exposeLoggingMetric(lservice)\n// ...\n

                                                                                  You can check if your metrics are working correctly by visiting http://localhost:8888/metrics.

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/meter.html#for-java-applications","title":"For Java Applications","text":"

                                                                                  For Java applications, you can directly expose JVM-related metrics by using the OpenTelemetry agent with the following environment variable:

                                                                                  OTEL_METRICS_EXPORTER=prometheus\n

                                                                                  You can then check your metrics at http://localhost:8888/metrics.

                                                                                  Next, combine it with a Prometheus ServiceMonitor to complete the metrics integration. If you want to expose custom metrics, please refer to opentelemetry-java-docs/prometheus.

                                                                                  The process is mainly divided into two steps:

                                                                                  • Create a meter provider and specify Prometheus as the exporter.
                                                                                  /*\n * Copyright The OpenTelemetry Authors\n * SPDX-License-Identifier: Apache-2.0\n */\n\npackage io.opentelemetry.example.prometheus;\n\nimport io.opentelemetry.api.metrics.MeterProvider;\nimport io.opentelemetry.exporter.prometheus.PrometheusHttpServer;\nimport io.opentelemetry.sdk.metrics.SdkMeterProvider;\nimport io.opentelemetry.sdk.metrics.export.MetricReader;\n\npublic final class ExampleConfiguration {\n\n  /**\n   * Initializes the Meter SDK and configures the Prometheus collector with all default settings.\n   *\n   * @param prometheusPort the port to open up for scraping.\n   * @return A MeterProvider for use in instrumentation.\n   */\n  static MeterProvider initializeOpenTelemetry(int prometheusPort) {\n    MetricReader prometheusReader = PrometheusHttpServer.builder().setPort(prometheusPort).build();\n\n    return SdkMeterProvider.builder().registerMetricReader(prometheusReader).build();\n  }\n}\n
                                                                                  • Create a custom meter and start the HTTP server.
                                                                                  package io.opentelemetry.example.prometheus;\n\nimport io.opentelemetry.api.common.Attributes;\nimport io.opentelemetry.api.metrics.Meter;\nimport io.opentelemetry.api.metrics.MeterProvider;\nimport java.util.concurrent.ThreadLocalRandom;\n\n/**\n * Example of using the PrometheusHttpServer to convert OTel metrics to Prometheus format and expose\n * these to a Prometheus instance via a HttpServer exporter.\n *\n * <p>A Gauge is used to periodically measure how many incoming messages are awaiting processing.\n * The Gauge callback gets executed every collection interval.\n */\npublic final class PrometheusExample {\n  private long incomingMessageCount;\n\n  public PrometheusExample(MeterProvider meterProvider) {\n    Meter meter = meterProvider.get(\"PrometheusExample\");\n    meter\n        .gaugeBuilder(\"incoming.messages\")\n        .setDescription(\"No of incoming messages awaiting processing\")\n        .setUnit(\"message\")\n        .buildWithCallback(result -> result.record(incomingMessageCount, Attributes.empty()));\n  }\n\n  void simulate() {\n    for (int i = 500; i > 0; i--) {\n      try {\n        System.out.println(\n            i + \" Iterations to go, current incomingMessageCount is:  \" + incomingMessageCount);\n        incomingMessageCount = ThreadLocalRandom.current().nextLong(100);\n        Thread.sleep(1000);\n      } catch (InterruptedException e) {\n        // ignored here\n      }\n    }\n  }\n\n  public static void main(String[] args) {\n    int prometheusPort = 8888;\n\n    // It is important to initialize the OpenTelemetry SDK as early as possible in your process.\n    MeterProvider meterProvider = ExampleConfiguration.initializeOpenTelemetry(prometheusPort);\n\n    PrometheusExample prometheusExample = new PrometheusExample(meterProvider);\n\n    prometheusExample.simulate();\n\n    System.out.println(\"Exiting\");\n  }\n}\n

                                                                                  After running the Java application, you can check if your metrics are working correctly by visiting http://localhost:8888/metrics.

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/meter.html#insight-collecting-metrics","title":"Insight Collecting Metrics","text":"

                                                                                  Lastly, it is important to note that you have exposed metrics in your application, and now you need Insight to collect those metrics.

                                                                                  The recommended way to expose metrics is via ServiceMonitor or PodMonitor.

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/golang/meter.html#creating-servicemonitorpodmonitor","title":"Creating ServiceMonitor/PodMonitor","text":"

                                                                                  The added ServiceMonitor/PodMonitor needs to have the label operator.insight.io/managed-by: insight for the Operator to recognize it:

                                                                                  apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: example-app\n  labels:\n    operator.insight.io/managed-by: insight\nspec:\n  selector:\n    matchLabels:\n      app: example-app\n  endpoints:\n  - port: web\n  namespaceSelector:\n    any: true\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/java/index.html","title":"Start Monitoring Java Applications","text":"
                                                                                  1. For accessing and monitoring Java application links, please refer to the document Implementing Non-Intrusive Enhancements for Applications via Operator, which explains how to automatically integrate links through annotations.

                                                                                  2. Monitoring the JVM of Java applications: How Java applications that have already exposed JVM metrics and those that have not yet exposed JVM metrics can connect with observability Insight.

                                                                                  3. If your Java application has not yet started exposing JVM metrics, you can refer to the following documents:

                                                                                    • Exposing JVM Monitoring Metrics Using JMX Exporter
                                                                                    • Exposing JVM Monitoring Metrics Using OpenTelemetry Java Agent
                                                                                  4. If your Java application has already exposed JVM metrics, you can refer to the following document:

                                                                                    • Connecting Existing JVM Metrics of Java Applications to Observability
                                                                                  5. Writing TraceId and SpanId into Java Application Logs to correlate link data with log data.

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/java/mdc.html","title":"Writing TraceId and SpanId into Java Application Logs","text":"

                                                                                  This article explains how to automatically write TraceId and SpanId into Java application logs using OpenTelemetry. By including TraceId and SpanId in your logs, you can correlate distributed tracing data with log data, enabling more efficient fault diagnosis and performance analysis.

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/java/mdc.html#supported-logging-libraries","title":"Supported Logging Libraries","text":"

                                                                                  For more information, please refer to the Logger MDC auto-instrumentation.

                                                                                  Logging Framework Supported Automatic Instrumentation Versions Dependencies Required for Manual Instrumentation Log4j 1 1.2+ None Log4j 2 2.7+ opentelemetry-log4j-context-data-2.17-autoconfigure Logback 1.0+ opentelemetry-logback-mdc-1.0"},{"location":"en/end-user/insight/quickstart/otel/java/mdc.html#using-logback-spring-boot-project","title":"Using Logback (Spring Boot Project)","text":"

                                                                                  Spring Boot projects come with a built-in logging framework and use Logback as the default logging implementation. If your Java project is a Spring Boot project, you can write TraceId into logs with minimal configuration.

                                                                                  Set logging.pattern.level in application.properties, adding %mdc{trace_id} and %mdc{span_id} to the logs.

                                                                                  logging.pattern.level=trace_id=%mdc{trace_id} span_id=%mdc{span_id} %5p ....omited...\n

                                                                                  Here is an example of the logs:

                                                                                  2024-06-26 10:56:31.200 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.a.c.c.C.[Tomcat].[localhost].[/]       : Initializing Spring DispatcherServlet 'dispatcherServlet'\n2024-06-26 10:56:31.201 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.s.web.servlet.DispatcherServlet        : Initializing Servlet 'dispatcherServlet'\n2024-06-26 10:56:31.209 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=1b08f18b8858bb9a  INFO 53724 --- [nio-8081-exec-1] o.s.web.servlet.DispatcherServlet        : Completed initialization in 8 ms\n2024-06-26 10:56:31.296 trace_id=8f7ebd8a73f9a8f50e6a00a87a20952a span_id=5743699405074f4e  INFO 53724 --- [nio-8081-exec-1] com.example.httpserver.ot.OTServer       : hello world\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/java/mdc.html#using-log4j2","title":"Using Log4j2","text":"
                                                                                  1. Add OpenTelemetry Log4j2 dependency in pom.xml:

                                                                                    Tip

                                                                                    Please replace OPENTELEMETRY_VERSION with the latest version.

                                                                                    <dependencies>\n  <dependency>\n    <groupId>io.opentelemetry.instrumentation</groupId>\n    <artifactId>opentelemetry-log4j-context-data-2.17-autoconfigure</artifactId>\n    <version>OPENTELEMETRY_VERSION</version>\n    <scope>runtime</scope>\n  </dependency>\n</dependencies>\n
                                                                                  2. Modify the log4j2.xml configuration, adding %X{trace_id} and %X{span_id} in the pattern to automatically write TraceId and SpanId into the logs:

                                                                                    <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Configuration>\n  <Appenders>\n    <Console name=\"Console\" target=\"SYSTEM_OUT\">\n      <PatternLayout\n          pattern=\"%d{HH:mm:ss.SSS} [%t] %-5level %logger{36} trace_id=%X{trace_id} span_id=%X{span_id} trace_flags=%X{trace_flags} - %msg%n\"/>\n    </Console>\n  </Appenders>\n  <Loggers>\n    <Root>\n      <AppenderRef ref=\"Console\" level=\"All\"/>\n    </Root>\n  </Loggers>\n</Configuration>\n
                                                                                  3. If using Logback, add OpenTelemetry Logback dependency in pom.xml.

                                                                                    Tip

                                                                                    Please replace OPENTELEMETRY_VERSION with the latest version.

                                                                                    <dependencies>\n  <dependency>\n    <groupId>io.opentelemetry.instrumentation</groupId>\n    <artifactId>opentelemetry-logback-mdc-1.0</artifactId>\n    <version>OPENTELEMETRY_VERSION</version>\n  </dependency>\n</dependencies>\n
                                                                                  4. Modify the log4j2.xml configuration, adding %X{trace_id} and %X{span_id} in the pattern to automatically write TraceId and SpanId into the logs:

                                                                                    <?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<configuration>\n  <appender name=\"CONSOLE\" class=\"ch.qos.logback.core.ConsoleAppender\">\n    <encoder>\n      <pattern>%d{HH:mm:ss.SSS} trace_id=%X{trace_id} span_id=%X{span_id} trace_flags=%X{trace_flags} %msg%n</pattern>\n    </encoder>\n  </appender>\n\n  <!-- Just wrap your logging appender, for example ConsoleAppender, with OpenTelemetryAppender -->\n  <appender name=\"OTEL\" class=\"io.opentelemetry.instrumentation.logback.mdc.v1_0.OpenTelemetryAppender\">\n    <appender-ref ref=\"CONSOLE\"/>\n  </appender>\n\n  <!-- Use the wrapped \"OTEL\" appender instead of the original \"CONSOLE\" one -->\n  <root level=\"INFO\">\n    <appender-ref ref=\"OTEL\"/>\n  </root>\n\n</configuration>\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html","title":"Exposing JVM Monitoring Metrics Using JMX Exporter","text":"

                                                                                  JMX Exporter provides two usage methods:

                                                                                  1. Standalone Process: Specify parameters when starting the JVM to expose a JMX RMI interface. The JMX Exporter calls RMI to obtain the JVM runtime state data, converts it into Prometheus metrics format, and exposes a port for Prometheus to scrape.
                                                                                  2. In-Process (JVM process): Specify parameters when starting the JVM to run the JMX Exporter jar file as a javaagent. This method reads the JVM runtime state data in-process, converts it into Prometheus metrics format, and exposes a port for Prometheus to scrape.

                                                                                  Note

                                                                                  The official recommendation is not to use the first method due to its complex configuration and the requirement for a separate process, which introduces additional monitoring challenges. Therefore, this article focuses on the second method, detailing how to use JMX Exporter to expose JVM monitoring metrics in a Kubernetes environment.

                                                                                  In this method, you need to specify the JMX Exporter jar file and configuration file when starting the JVM. Since the jar file is a binary file that is not ideal for mounting via a configmap, and the configuration file typically does not require modifications, it is recommended to package both the JMX Exporter jar file and the configuration file directly into the business container image.

                                                                                  For the second method, you can choose to include the JMX Exporter jar file in the application image or mount it during deployment. Below are explanations for both approaches:

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html#method-1-building-jmx-exporter-jar-file-into-the-business-image","title":"Method 1: Building JMX Exporter JAR File into the Business Image","text":"

                                                                                  The content of prometheus-jmx-config.yaml is as follows:

                                                                                  prometheus-jmx-config.yaml
                                                                                  ...\nssl: false\nlowercaseOutputName: false\nlowercaseOutputLabelNames: false\nrules:\n- pattern: \".*\"\n

                                                                                  Note

                                                                                  For more configuration options, please refer to the introduction at the bottom or Prometheus official documentation.

                                                                                  Next, prepare the jar file. You can find the latest jar download link on the jmx_exporter GitHub page and refer to the following Dockerfile:

                                                                                  FROM openjdk:11.0.15-jre\nWORKDIR /app/\nCOPY target/my-app.jar ./\nCOPY prometheus-jmx-config.yaml ./\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\nENV JAVA_TOOL_OPTIONS=-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\nEXPOSE 8081 8999 8080 8888\nENTRYPOINT java $JAVA_OPTS -jar my-app.jar\n

                                                                                  Note:

                                                                                  • The format for the startup parameter is: -javaagent:=:
                                                                                  • Here, port 8088 is used to expose JVM monitoring metrics; you may change it if it conflicts with the Java application.
                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/java/jvm-monitor/jmx-exporter.html#method-2-mounting-via-init-container","title":"Method 2: Mounting via Init Container","text":"

                                                                                  First, we need to create a Docker image for the JMX Exporter. The following Dockerfile is for reference:

                                                                                  FROM alpine/curl:3.14\nWORKDIR /app/\n# Copy the previously created config file into the image\nCOPY prometheus-jmx-config.yaml ./\n# Download the jmx prometheus javaagent jar online\nRUN set -ex; \\\n    curl -L -O https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.17.2/jmx_prometheus_javaagent-0.17.2.jar;\n

                                                                                  Build the image using the above Dockerfile: docker build -t my-jmx-exporter .

                                                                                  Add the following init container to the Java application deployment YAML:

                                                                                  Click to expand YAML file
                                                                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-demo-app\n  labels:\n    app: my-demo-app\nspec:\n  selector:\n    matchLabels:\n      app: my-demo-app\n  template:\n    metadata:\n      labels:\n        app: my-demo-app\n    spec:\n      imagePullSecrets:\n      - name: registry-pull\n      initContainers:\n      - name: jmx-sidecar\n        image: my-jmx-exporter\n        command: [\"cp\", \"-r\", \"/app/jmx_prometheus_javaagent-0.17.2.jar\", \"/target/jmx_prometheus_javaagent-0.17.2.jar\"]  \u278a\n        volumeMounts:\n        - name: sidecar\n          mountPath: /target\n      containers:\n      - image: my-demo-app-image\n        name: my-demo-app\n        resources:\n          requests:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n          limits:\n            memory: \"1000Mi\"\n            cpu: \"500m\"\n        ports:\n        - containerPort: 18083\n        env:\n        - name: JAVA_TOOL_OPTIONS\n          value: \"-javaagent:/app/jmx_prometheus_javaagent-0.17.2.jar=8088:/app/prometheus-jmx-config.yaml\" \u278b\n        volumeMounts:\n        - name: host-time\n          mountPath: /etc/localtime\n          readOnly: true\n        - name: sidecar\n          mountPath: /sidecar\n      volumes:\n      - name: host-time\n        hostPath:\n          path: /etc/localtime\n      - name: sidecar  # Shared agent folder\n        emptyDir: {}\n      restartPolicy: Always\n

                                                                                  With the above modifications, the example application my-demo-app now has the capability to expose JVM metrics. After running the service, you can access the Prometheus formatted metrics at http://localhost:8088.

                                                                                  Next, you can refer to Connecting Existing JVM Metrics of Java Applications to Observability.

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/java/jvm-monitor/legacy-jvm.html","title":"Integrating Existing JVM Metrics of Java Applications with Observability","text":"

                                                                                  If your Java application exposes JVM monitoring metrics through other means (such as Spring Boot Actuator), you will need to ensure that the monitoring data is collected. You can achieve this by adding annotations (Kubernetes Annotations) to your workload to allow Insight to scrape the existing JVM metrics:

                                                                                  annotations: \n  insight.opentelemetry.io/metric-scrape: \"true\"  # Whether to scrape\n  insight.opentelemetry.io/metric-path: \"/\"         # Path to scrape metrics\n  insight.opentelemetry.io/metric-port: \"9464\"      # Port to scrape metrics\n

                                                                                  For example, to add annotations to the my-deployment-app:

                                                                                  apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: my-deployment-app\nspec:\n  selector:\n    matchLabels:\n      app: my-deployment-app\n      app.kubernetes.io/name: my-deployment-app\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app: my-deployment-app\n        app.kubernetes.io/name: my-deployment-app\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\"  # Whether to scrape\n        insight.opentelemetry.io/metric-path: \"/\"         # Path to scrape metrics\n        insight.opentelemetry.io/metric-port: \"9464\"      # Port to scrape metrics\n

                                                                                  Here is a complete example:

                                                                                  ---\napiVersion: v1\nkind: Service\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  type: NodePort\n  selector:\n    app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  ports:\n    - name: http\n      port: 8080\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: spring-boot-actuator-prometheus-metrics-demo\nspec:\n  selector:\n    matchLabels:\n      app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n  replicas: 1\n  template:\n    metadata:\n      labels:\n        app.kubernetes.io/name: spring-boot-actuator-prometheus-metrics-demo\n      annotations:\n        insight.opentelemetry.io/metric-scrape: \"true\"  # Whether to scrape\n        insight.opentelemetry.io/metric-path: \"/actuator/prometheus\"  # Path to scrape metrics\n        insight.opentelemetry.io/metric-port: \"8080\"      # Port to scrape metrics\n    spec:\n      containers:\n        - name: myapp\n          image: docker.m.daocloud.io/wutang/spring-boot-actuator-prometheus-metrics-demo\n          ports:\n            - name: http\n              containerPort: 8080\n          resources:\n            limits:\n              cpu: 500m\n              memory: 800Mi\n            requests:\n              cpu: 200m\n              memory: 400Mi\n

                                                                                  In the above example, Insight will scrape the Prometheus metrics exposed through Spring Boot Actuator via http://<service-ip>:8080/actuator/prometheus.

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html","title":"Exposing JVM Metrics Using OpenTelemetry Java Agent","text":"

                                                                                  Starting from OpenTelemetry Agent v1.20.0 and later, the OpenTelemetry Agent has introduced the JMX Metric Insight module. If your application is already integrated with the OpenTelemetry Agent for tracing, you no longer need to introduce another agent to expose JMX metrics for your application. The OpenTelemetry Agent collects and exposes metrics by detecting the locally available MBeans in the application.

                                                                                  The OpenTelemetry Agent also provides built-in monitoring examples for common Java servers or frameworks. Please refer to the Predefined Metrics.

                                                                                  When using the OpenTelemetry Java Agent, you also need to consider how to mount the JAR into the container. In addition to the methods for mounting the JAR file as described with the JMX Exporter, you can leverage the capabilities provided by the OpenTelemetry Operator to automatically enable JVM metrics exposure for your application.

                                                                                  If your application is already integrated with the OpenTelemetry Agent for tracing, you do not need to introduce another agent to expose JMX metrics. The OpenTelemetry Agent can now locally collect and expose metrics interfaces by detecting the locally available MBeans in the application.

                                                                                  However, as of the current version, you still need to manually add the appropriate annotations to your application for the JVM data to be collected by Insight. For specific annotation content, please refer to Integrating Existing JVM Metrics of Java Applications with Observability.

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html#exposing-metrics-for-java-middleware","title":"Exposing Metrics for Java Middleware","text":"

                                                                                  The OpenTelemetry Agent also includes built-in examples for monitoring middleware. Please refer to the Predefined Metrics.

                                                                                  By default, no specific types are designated; you need to specify them using the -Dotel.jmx.target.system JVM options, for example, -Dotel.jmx.target.system=jetty,kafka-broker.

                                                                                  "},{"location":"en/end-user/insight/quickstart/otel/java/jvm-monitor/otel-java-agent.html#references","title":"References","text":"
                                                                                  • Gaining JMX Metric Insights with the OpenTelemetry Java Agent

                                                                                  • Otel JMX Metrics

                                                                                  "},{"location":"en/end-user/insight/quickstart/other/install-agent-on-ocp.html","title":"OpenShift Install Insight Agent","text":"

                                                                                  Although the OpenShift system comes with a monitoring system, we will still install Insight Agent because of some rules in the data collection agreement.

                                                                                  Among them, in addition to the basic installation configuration, the following parameters need to be added during helm install:

                                                                                  ## Parameters related to fluentbit;\n--set fluent-bit.ocp.enabled=true \\\n--set fluent-bit.serviceAccount.create=false \\\n--set fluent-bit.securityContext.runAsUser=0 \\\n--set fluent-bit.securityContext.seLinuxOptions.type=spc_t \\\n--set fluent-bit.securityContext.readOnlyRootFilesystem=false \\\n--set fluent-bit.securityContext.allowPrivilegeEscalation=false \\\n\n## Enable Prometheus(CR) for OpenShift4.x\n--set compatibility.openshift.prometheus.enabled=true \\\n\n## Close the Prometheus instance of the higher version\n--set kube-prometheus-stack.prometheus.enabled=false \\\n--set kube-prometheus-stack.kubeApiServer.enabled=false \\\n--set kube-prometheus-stack.kubelet.enabled=false \\\n--set kube-prometheus-stack.kubeControllerManager.enabled=false \\\n--set kube-prometheus-stack.coreDns.enabled=false \\\n--set kube-prometheus-stack.kubeDns.enabled=false \\\n--set kube-prometheus-stack.kubeEtcd.enabled=false \\\n--set kube-prometheus-stack.kubeEtcd.enabled=false \\\n--set kube-prometheus-stack.kubeScheduler.enabled=false \\\n--set kube-prometheus-stack.kubeStateMetrics.enabled=false \\\n--set kube-prometheus-stack.nodeExporter.enabled=false \\\n\n## Limit the namespace processed by PrometheusOperator to avoid competition with OpenShift's own PrometheusOperator\n--set kube-prometheus-stack.prometheusOperator.kubeletService.namespace=\"insight-system\" \\\n--set kube-prometheus-stack.prometheusOperator.prometheusInstanceNamespaces=\"insight-system\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[0]=\"openshift-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[1]=\"openshift-user-workload-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[2]=\"openshift-customer-monitoring\" \\\n--set kube-prometheus-stack.prometheusOperator.denyNamespaces[3]=\"openshift-route-monitor-operator\" \\\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/other/install-agent-on-ocp.html#write-system-monitoring-data-into-prometheus-through-openshifts-own-mechanism","title":"Write system monitoring data into Prometheus through OpenShift's own mechanism","text":"
                                                                                  apiVersion: v1\nkind: ConfigMap\nmetadata:\n   name: cluster-monitoring-config\n   namespace: openshift-monitoring\ndata:\n   config.yaml: |\n     prometheusK8s:\n       remoteWrite:\n         - queueConfig:\n             batchSendDeadline: 60s\n             maxBackoff: 5s\n             minBackoff: 30ms\n             minShards: 1\n             capacity: 5000\n             maxSamplesPerSend: 1000\n             maxShards: 100\n           remoteTimeout: 30s\n           url: http://insight-agent-prometheus.insight-system.svc.cluster.local:9090/api/v1/write\n           writeRelabelConfigs:\n             - action: keep\n               regex: etcd|kubelet|node-exporter|apiserver|kube-state-metrics\n               sourceLabels:\n                 - job\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/other/install-agentindce.html","title":"Install insight-agent in Suanova 4.0","text":"

                                                                                  In AI platform, previous Suanova 4.0 can be accessed as a subcluster. This guide provides potential issues and solutions when installing insight-agent in a Suanova 4.0 cluster.

                                                                                  "},{"location":"en/end-user/insight/quickstart/other/install-agentindce.html#issue-one","title":"Issue One","text":"

                                                                                  Since most Suanova 4.0 clusters have installed dx-insight as the monitoring system, installing insight-agent at this time will conflict with the existing prometheus operator in the cluster, making it impossible to install smoothly.

                                                                                  "},{"location":"en/end-user/insight/quickstart/other/install-agentindce.html#solution","title":"Solution","text":"

                                                                                  Enable the parameters of the prometheus operator, retain the prometheus operator in dx-insight, and make it compatible with the prometheus operator in insight-agent in 5.0.

                                                                                  "},{"location":"en/end-user/insight/quickstart/other/install-agentindce.html#steps","title":"Steps","text":"
                                                                                  1. Log in to the console.
                                                                                  2. Enable the --deny-namespaces parameter in the two prometheus operators respectively.
                                                                                  3. Run the following command (the following command is for reference only, the actual command needs to replace the prometheus operator name and namespace in the command).

                                                                                    kubectl edit deploy insight-agent-kube-prometh-operator -n insight-system\n

                                                                                  Note

                                                                                  • As shown in the figure above, the dx-insight component is deployed under the dx-insight tenant, and the insight-agent is deployed under the insight-system tenant. Add --deny-namespaces=insight-system in the prometheus operator in dx-insight, Add --deny-namespaces=dx-insight in the prometheus operator in insight-agent.
                                                                                  • Just add deny namespace, both prometheus operators can continue to scan other namespaces, and the related collection resources under kube-system or customer business namespaces are not affected.
                                                                                  • Please pay attention to the problem of node exporter port conflict.
                                                                                  "},{"location":"en/end-user/insight/quickstart/other/install-agentindce.html#supplementary-explanation","title":"Supplementary Explanation","text":"

                                                                                  The open-source node-exporter turns on hostnetwork by default and the default port is 9100. If the monitoring system of the cluster has installed node-exporter , then installing insight-agent at this time will cause node-exporter port conflict and it cannot run normally.

                                                                                  Note

                                                                                  Insight's node exporter will enable some features to collect special indicators, so it is recommended to install.

                                                                                  Currently, it does not support modifying the port in the installation command. After helm install insight-agent , you need to manually modify the related ports of the insight node-exporter daemonset and svc.

                                                                                  "},{"location":"en/end-user/insight/quickstart/other/install-agentindce.html#issue-two","title":"Issue Two","text":"

                                                                                  After Insight Agent is successfully deployed, fluentbit does not collect logs of Suanova 4.0.

                                                                                  "},{"location":"en/end-user/insight/quickstart/other/install-agentindce.html#solution_1","title":"Solution","text":"

                                                                                  The docker storage directory of Suanova 4.0 is /var/lib/containers , which is different from the path in the configuration of insigh-agent, so the logs are not collected.

                                                                                  "},{"location":"en/end-user/insight/quickstart/other/install-agentindce.html#steps_1","title":"Steps","text":"
                                                                                  1. Log in to the console.
                                                                                  2. Modify the following parameters in the insight-agent Chart.

                                                                                    fluent-bit:\ndaemonSetVolumeMounts:\n    - name: varlog\n    mountPath: /var/log\n    - name: varlibdockercontainers\n-     mountPath: /var/lib/docker/containers\n+     mountPath: /var/lib/containers/docker/containers\n    readOnly: true\n    - name: etcmachineid\n    mountPath: /etc/machine-id\n    readOnly: true\n    - name: dmesg\n    mountPath: /var/log/dmesg\n    readOnly: true\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/res-plan/modify-vms-disk.html","title":"vmstorage Disk Expansion","text":"

                                                                                  This article describes the method for expanding the vmstorage disk. Please refer to the vmstorage disk capacity planning for the specifications of the vmstorage disk.

                                                                                  "},{"location":"en/end-user/insight/quickstart/res-plan/modify-vms-disk.html#procedure","title":"Procedure","text":""},{"location":"en/end-user/insight/quickstart/res-plan/modify-vms-disk.html#enable-storageclass-expansion","title":"Enable StorageClass expansion","text":"
                                                                                  1. Log in to the AI platform platform as a global service cluster administrator. Click Container Management -> Clusters and go to the details of the kpanda-global-cluster cluster.

                                                                                  2. Select the left navigation menu Container Storage -> PVCs and find the PVC bound to the vmstorage.

                                                                                  3. Click a vmstorage PVC to enter the details of the volume claim for vmstorage and confirm the StorageClass that the PVC is bound to.

                                                                                  4. Select the left navigation menu Container Storage -> Storage Class and find local-path . Click the \u2507 on the right side of the target and select Edit in the popup menu.

                                                                                  5. Enable Scale Up and click OK .

                                                                                  "},{"location":"en/end-user/insight/quickstart/res-plan/modify-vms-disk.html#modify-the-disk-capacity-of-vmstorage","title":"Modify the disk capacity of vmstorage","text":"
                                                                                  1. Log in to the AI platform platform as a global service cluster administrator and go to the details of the kpanda-global-cluster cluster.

                                                                                  2. Select the left navigation menu CRDs and find the custom resource for vmcluster .

                                                                                  3. Click the custom resource for vmcluster to enter the details page, switch to the insight-system namespace, and select Edit YAML from the right menu of insight-victoria-metrics-k8s-stack .

                                                                                  4. Modify according to the legend and click OK .

                                                                                  5. Select the left navigation menu Container Storage -> PVCs again and find the volume claim bound to vmstorage. Confirm that the modification has taken effect. In the details page of a PVC, click the associated storage source (PV).

                                                                                  6. Open the volume details page and click the Update button in the upper right corner.

                                                                                  7. After modifying the Capacity , click OK and wait for a moment until the expansion is successful.

                                                                                  "},{"location":"en/end-user/insight/quickstart/res-plan/modify-vms-disk.html#clone-the-storage-volume","title":"Clone the storage volume","text":"

                                                                                  If the storage volume expansion fails, you can refer to the following method to clone the storage volume.

                                                                                  1. Log in to the AI platform platform as a global service cluster administrator and go to the details of the kpanda-global-cluster cluster.

                                                                                  2. Select the left navigation menu Workloads -> StatefulSets and find the statefulset for vmstorage . Click the \u2507 on the right side of the target and select Status -> Stop -> OK in the popup menu.

                                                                                  3. After logging into the master node of the kpanda-global-cluster cluster in the command line, run the following command to copy the vm-data directory in the vmstorage container to store the metric information locally:

                                                                                    kubectl cp -n insight-system vmstorage-insight-victoria-metrics-k8s-stack-1:vm-data ./vm-data\n
                                                                                  4. Log in to the AI platform platform and go to the details of the kpanda-global-cluster cluster. Select the left navigation menu Container Storage -> PVs , click Clone in the upper right corner, and modify the capacity of the volume.

                                                                                  5. Delete the previous data volume of vmstorage.

                                                                                  6. Wait for a moment until the volume claim is bound to the cloned data volume, then run the following command to import the exported data from step 3 into the proper container, and then start the previously paused vmstorage .

                                                                                    kubectl cp -n insight-system ./vm-data vmstorage-insight-victoria-metrics-k8s-stack-1:vm-data\n
                                                                                  "},{"location":"en/end-user/insight/quickstart/res-plan/prometheus-res.html","title":"Prometheus Resource Planning","text":"

                                                                                  In the actual use of Prometheus, affected by the number of cluster containers and the opening of Istio, the CPU, memory and other resource usage of Prometheus will exceed the set resources.

                                                                                  In order to ensure the normal operation of Prometheus in clusters of different sizes, it is necessary to adjust the resources of Prometheus according to the actual size of the cluster.

                                                                                  "},{"location":"en/end-user/insight/quickstart/res-plan/prometheus-res.html#reference-resource-planning","title":"Reference resource planning","text":"

                                                                                  In the case that the mesh is not enabled, the test statistics show that the relationship between the system Job index and pods is Series count = 800 * pod count

                                                                                  When the service mesh is enabled, the magnitude of the Istio-related metrics generated by the pod after the feature is enabled is Series count = 768 * pod count

                                                                                  "},{"location":"en/end-user/insight/quickstart/res-plan/prometheus-res.html#when-the-service-mesh-is-not-enabled","title":"When the service mesh is not enabled","text":"

                                                                                  The following resource planning is recommended by Prometheus when the service mesh is not enabled :

                                                                                  Cluster size (pod count) Metrics (service mesh is not enabled) CPU (core) Memory (GB) 100 8w Request: 0.5Limit: 1 Request: 2GBLimit: 4GB 200 16w Request: 1Limit: 1.5 Request: 3GBLimit: 6GB 300 24w Request: 1Limit: 2 Request: 3GBLimit: 6GB 400 32w Request: 1Limit: 2 Request: 4GBLimit: 8GB 500 40w Request: 1.5Limit: 3 Request: 5GBLimit: 10GB 800 64w Request: 2Limit: 4 Request: 8GBLimit: 16GB 1000 80w Request: 2.5Limit: 5 Request: 9GBLimit: 18GB 2000 160w Request: 3.5Limit: 7 Request: 20GBLimit: 40GB 3000 240w Request: 4Limit: 8 Request: 33GBLimit: 66GB"},{"location":"en/end-user/insight/quickstart/res-plan/prometheus-res.html#when-the-service-mesh-feature-is-enabled","title":"When the service mesh feature is enabled","text":"

                                                                                  The following resource planning is recommended by Prometheus in the scenario of starting the service mesh:

                                                                                  Cluster size (pod count) metric volume (service mesh enabled) CPU (core) Memory (GB) 100 15w Request: 1Limit: 2 Request: 3GBLimit: 6GB 200 31w Request: 2Limit: 3 Request: 5GBLimit: 10GB 300 46w Request: 2Limit: 4 Request: 6GBLimit: 12GB 400 62w Request: 2Limit: 4 Request: 8GBLimit: 16GB 500 78w Request: 3Limit: 6 Request: 10GBLimit: 20GB 800 125w Request: 4Limit: 8 Request: 15GBLimit: 30GB 1000 156w Request: 5Limit: 10 Request: 18GBLimit: 36GB 2000 312w Request: 7Limit: 14 Request: 40GBLimit: 80GB 3000 468w Request: 8Limit: 16 Request: 65GBLimit: 130GB

                                                                                  Note

                                                                                  1. Pod count in the table refers to the pod count that is basically running stably in the cluster. If a large number of pods are restarted, the index will increase sharply in a short period of time. At this time, resources need to be adjusted accordingly.
                                                                                  2. Prometheus stores two hours of data by default in memory, and when the Remote Write function is enabled in the cluster, a certain amount of memory will be occupied, and resources surge ratio is recommended to be set to 2.
                                                                                  3. The data in the table are recommended values, applicable to general situations. If the environment has precise resource requirements, it is recommended to check the resource usage of the proper Prometheus after the cluster has been running for a period of time for precise configuration.
                                                                                  "},{"location":"en/end-user/insight/quickstart/res-plan/vms-res-plan.html","title":"vmstorage disk capacity planning","text":"

                                                                                  vmstorage is responsible for storing multicluster metrics for observability. In order to ensure the stability of vmstorage, it is necessary to adjust the disk capacity of vmstorage according to the number of clusters and the size of the cluster. For more information, please refer to vmstorage retention period and disk space.

                                                                                  "},{"location":"en/end-user/insight/quickstart/res-plan/vms-res-plan.html#test-results","title":"Test Results","text":"

                                                                                  After 14 days of disk observation of vmstorage of clusters of different sizes, We found that the disk usage of vmstorage was positively correlated with the amount of metrics it stored and the disk usage of individual data points.

                                                                                  1. The amount of metrics stored instantaneously increase(vm_rows{ type != \"indexdb\"}[30s]) to obtain the increased amount of metrics within 30s
                                                                                  2. Disk usage of a single data point: sum(vm_data_size_bytes{type!=\"indexdb\"}) / sum(vm_rows{type != \"indexdb\"})
                                                                                  "},{"location":"en/end-user/insight/quickstart/res-plan/vms-res-plan.html#calculation-method","title":"calculation method","text":"

                                                                                  Disk usage = Instantaneous metrics x 2 x disk usage for a single data point x 60 x 24 x storage time (days)

                                                                                  Parameter Description:

                                                                                  1. The unit of disk usage is Byte .
                                                                                  2. Storage duration (days) x 60 x 24 converts time (days) into minutes to calculate disk usage.
                                                                                  3. The default collection time of Prometheus in Insight Agent is 30s, so twice the amount of metrics will be generated within 1 minute.
                                                                                  4. The default storage duration in vmstorage is 1 month, please refer to Modify System Configuration to modify the configuration.

                                                                                  Warning

                                                                                  This formula is a general solution, and it is recommended to reserve redundant disk capacity on the calculation result to ensure the normal operation of vmstorage.

                                                                                  "},{"location":"en/end-user/insight/quickstart/res-plan/vms-res-plan.html#reference-capacity","title":"reference capacity","text":"

                                                                                  The data in the table is calculated based on the default storage time of one month (30 days), and the disk usage of a single data point (datapoint) is calculated as 0.9. In a multicluster scenario, the number of Pods represents the sum of the number of Pods in the multicluster.

                                                                                  "},{"location":"en/end-user/insight/quickstart/res-plan/vms-res-plan.html#when-the-service-mesh-is-not-enabled","title":"When the service mesh is not enabled","text":"Cluster size (number of Pods) Metrics Disk capacity 100 8W 6 GiB 200 16W 12 GiB 300 24w 18 GiB 400 32w 24 GiB 500 40w 30 GiB 800 64w 48 GiB 1000 80W 60 GiB 2000 160w 120 GiB 3000 240w 180 GiB"},{"location":"en/end-user/insight/quickstart/res-plan/vms-res-plan.html#when-the-service-mesh-is-enabled","title":"When the service mesh is enabled","text":"Cluster size (number of Pods) Metrics Disk capacity 100 15W 12 GiB 200 31w 24 GiB 300 46w 36 GiB 400 62w 48 GiB 500 78w 60 GiB 800 125w 94 GiB 1000 156w 120 GiB 2000 312w 235 GiB 3000 468w 350 GiB"},{"location":"en/end-user/insight/quickstart/res-plan/vms-res-plan.html#example","title":"Example","text":"

                                                                                  There are two clusters in the AI platform platform, of which 500 Pods are running in the global management cluster (service mesh is turned on), and 1000 Pods are running in the worker cluster (service mesh is not turned on), and the expected metrics are stored for 30 days.

                                                                                  • The number of metrics in the global management cluster is 800x500 + 768x500 = 784000
                                                                                  • Worker cluster metrics are 800x1000 = 800000

                                                                                  Then the current vmstorage disk usage should be set to (784000+80000)x2x0.9x60x24x31 =124384896000 byte = 116 GiB

                                                                                  Note

                                                                                  For the relationship between the number of metrics and the number of Pods in the cluster, please refer to Prometheus Resource Planning.

                                                                                  "},{"location":"en/end-user/insight/system-config/modify-config.html","title":"Modify system configuration","text":"

                                                                                  Observability will persist the data of metrics, logs, and traces by default. Users can modify the system configuration according to This page.

                                                                                  "},{"location":"en/end-user/insight/system-config/modify-config.html#how-to-modify-the-metric-data-retention-period","title":"How to modify the metric data retention period","text":"

                                                                                  Refer to the following steps to modify the metric data retention period.

                                                                                  1. run the following command:

                                                                                    kubectl edit vmcluster insight-victoria-metrics-k8s-stack -n insight-system\n
                                                                                  2. In the Yaml file, the default value of retentionPeriod is 14 , and the unit is day . You can modify the parameters according to your needs.

                                                                                    apiVersion: operator.victoriametrics.com/v1beta1\nkind: VMCluster\nmetadata:\n  annotations:\n    meta.helm.sh/release-name: insight\n    meta.helm.sh/release-namespace: insight-system\n  creationTimestamp: \"2022-08-25T04:31:02Z\"\n  finalizers:\n  - apps.victoriametrics.com/finalizer\n  generation: 2\n  labels:\n    app.kubernetes.io/instance: insight\n    app.kubernetes.io/managed-by: Helm\n    app.kubernetes.io/name: victoria-metrics-k8s-stack\n    app.kubernetes.io/version: 1.77.2\n    helm.sh/chart: victoria-metrics-k8s-stack-0.9.3\n  name: insight-victoria-metrics-k8s-stack\n  namespace: insight-system\n  resourceVersion: \"123007381\"\n  uid: 55cee8d6-c651-404b-b2c9-50603b405b54\nspec:\n  replicationFactor: 1\n  retentionPeriod: \"14\"\n  vminsert:\n    extraArgs:\n      maxLabelsPerTimeseries: \"45\"\n    image:\n      repository: docker.m.daocloud.io/victoriametrics/vminsert\n      tag: v1.80.0-cluster\n      replicaCount: 1\n
                                                                                  3. After saving the modification, the pod of the component responsible for storing the metrics will automatically restart, just wait for a while.

                                                                                  "},{"location":"en/end-user/insight/system-config/modify-config.html#how-to-modify-the-log-data-storage-duration","title":"How to modify the log data storage duration","text":"

                                                                                  Refer to the following steps to modify the log data retention period:

                                                                                  "},{"location":"en/end-user/insight/system-config/modify-config.html#method-1-modify-the-json-file","title":"Method 1: Modify the Json file","text":"
                                                                                  1. Modify the max_age parameter in the rollover field in the following files, and set the retention period. The default storage period is 7d . Change http://localhost:9200 to the address of elastic .

                                                                                    curl -X PUT \"http://localhost:9200/_ilm/policy/insight-es-k8s-logs-policy?pretty\" -H 'Content-Type: application/json' -d'\n{\n\"policy\": {\n    \"phases\": {\n        \"hot\": {\n            \"min_age\": \"0ms\",\n            \"actions\": {\n            \"set_priority\": {\n                \"priority\": 100\n            },\n            \"rollover\": {\n                \"max_age\": \"7d\",\n                \"max_size\": \"10gb\"\n            }\n            }\n        },\n    \"warm\": {\n        \"min_age\": \"10d\",\n        \"actions\": {\n        \"forcemerge\": {\n            \"max_num_segments\": 1\n        }\n        }\n    },\n    \"delete\": {\n        \"min_age\": \"30d\",\n        \"actions\": {\n        \"delete\": {}\n        }\n    }\n    }\n}\n}\n
                                                                                  2. After modification, run the above command. It will print out the content as shown below, then the modification is successful.

                                                                                    {\n\"acknowledged\": true\n}\n
                                                                                  "},{"location":"en/end-user/insight/system-config/modify-config.html#method-2-modify-from-the-ui","title":"Method 2: Modify from the UI","text":"
                                                                                  1. Log in kibana , select Stack Management in the left navigation bar.

                                                                                  2. Select the left navigation Index Lifecycle Polices , and find the index insight-es-k8s-logs-policy , click to enter the details.

                                                                                  3. Expand the Hot phase configuration panel, modify the Maximum age parameter, and set the retention period. The default storage period is 7d .

                                                                                  4. After modification, click Save policy at the bottom of the page to complete the modification.

                                                                                  "},{"location":"en/end-user/insight/system-config/modify-config.html#how-to-modify-the-trace-data-storage-duration","title":"How to modify the trace data storage duration","text":"

                                                                                  Refer to the following steps to modify the trace data retention period:

                                                                                  "},{"location":"en/end-user/insight/system-config/modify-config.html#method-1-modify-the-json-file_1","title":"Method 1: Modify the Json file","text":"
                                                                                  1. Modify the max_age parameter in the rollover field in the following files, and set the retention period. The default storage period is 7d . At the same time, modify http://localhost:9200 to the access address of elastic .

                                                                                    curl -X PUT \"http://localhost:9200/_ilm/policy/jaeger-ilm-policy?pretty\" -H 'Content-Type: application/json' -d'\n{\n\"policy\": {\n    \"phases\": {\n        \"hot\": {\n            \"min_age\": \"0ms\",\n            \"actions\": {\n            \"set_priority\": {\n                \"priority\": 100\n            },\n            \"rollover\": {\n                \"max_age\": \"7d\",\n                \"max_size\": \"10gb\"\n            }\n            }\n        },\n    \"warm\": {\n        \"min_age\": \"10d\",\n        \"actions\": {\n        \"forcemerge\": {\n            \"max_num_segments\": 1\n        }\n        }\n    },\n    \"delete\": {\n        \"min_age\": \"30d\",\n        \"actions\": {\n        \"delete\": {}\n        }\n    }\n    }\n}\n}\n
                                                                                  2. After modification, run the above command on the console. It will print out the content as shown below, then the modification is successful.

                                                                                    {\n\"acknowledged\": true\n}\n
                                                                                  "},{"location":"en/end-user/insight/system-config/modify-config.html#method-2-modify-from-the-ui_1","title":"Method 2: Modify from the UI","text":"
                                                                                  1. Log in kibana , select Stack Management in the left navigation bar.

                                                                                  2. Select the left navigation Index Lifecycle Polices , and find the index jaeger-ilm-policy , click to enter the details.

                                                                                  3. Expand the Hot phase configuration panel, modify the Maximum age parameter, and set the retention period. The default storage period is 7d .

                                                                                  4. After modification, click Save policy at the bottom of the page to complete the modification.

                                                                                  "},{"location":"en/end-user/insight/system-config/system-component.html","title":"System Components","text":"

                                                                                  On the system component page, you can quickly view the running status of the system components in Insight. When a system component fails, some features in Insight will be unavailable.

                                                                                  1. Go to Insight product module,
                                                                                  2. In the left navigation bar, select System Management -> System Components .
                                                                                  "},{"location":"en/end-user/insight/system-config/system-component.html#component-description","title":"Component description","text":"Module Component Name Description Metrics vminsert-insight-victoria-metrics-k8s-stack Responsible for writing the metric data collected by Prometheus in each cluster to the storage component. If this component is abnormal, the metric data of the worker cluster cannot be written. Metrics vmalert-insight-victoria-metrics-k8s-stack Responsible for taking effect of the recording and alert rules configured in the VM Rule, and sending the triggered alert rules to alertmanager. Metrics vmalertmanager-insight-victoria-metrics-k8s-stack is responsible for sending messages when alerts are triggered. If this component is abnormal, the alert information cannot be sent. Metrics vmselect-insight-victoria-metrics-k8s-stack Responsible for querying metrics data. If this component is abnormal, the metric cannot be queried. Metrics vmstorage-insight-victoria-metrics-k8s-stack Responsible for storing multicluster metrics data. Dashboard grafana-deployment Provide monitoring panel capability. The exception of this component will make it impossible to view the built-in dashboard. Link insight-jaeger-collector Responsible for receiving trace data in opentelemetry-collector and storing it. Link insight-jaeger-query Responsible for querying the trace data collected in each cluster. Link insight-opentelemetry-collector Responsible for receiving trace data forwarded by each sub-cluster Log elasticsearch Responsible for storing the log data of each cluster."},{"location":"en/end-user/insight/system-config/system-config.html","title":"System Configuration","text":"

                                                                                  System Configuration displays the default storage time of metrics, logs, traces and the default Apdex threshold.

                                                                                  1. Click the right navigation bar and select System Configuration .

                                                                                  2. Currently only supports modifying the storage duration of historical alerts, click Edit to enter the target duration.

                                                                                    When the storage duration is set to \"0\", the historical alerts will not be cleared.

                                                                                  Note

                                                                                  To modify other configurations, please click to view How to modify the system configuration?

                                                                                  "},{"location":"en/end-user/insight/trace/service.html","title":"Service Insight","text":"

                                                                                  In Insight , a service refers to a group of workloads that provide the same behavior for incoming requests. Service insight helps observe the performance and status of applications during the operation process by using the OpenTelemetry SDK.

                                                                                  For how to use OpenTelemetry, please refer to: Using OTel to give your application insight.

                                                                                  "},{"location":"en/end-user/insight/trace/service.html#glossary","title":"Glossary","text":"
                                                                                  • Service: A service represents a group of workloads that provide the same behavior for incoming requests. You can define the service name when using the OpenTelemetry SDK or use the name defined in Istio.
                                                                                  • Operation: An operation refers to a specific request or action handled by a service. Each span has an operation name.
                                                                                  • Outbound Traffic: Outbound traffic refers to all the traffic generated by the current service when making requests.
                                                                                  • Inbound Traffic: Inbound traffic refers to all the traffic initiated by the upstream service targeting the current service.
                                                                                  "},{"location":"en/end-user/insight/trace/service.html#steps","title":"Steps","text":"

                                                                                  The Services List page displays key metrics such as throughput rate, error rate, and request latency for all services that have been instrumented with distributed tracing. You can filter services based on clusters or namespaces and sort the list by throughput rate, error rate, or request latency. By default, the data displayed in the list is for the last hour, but you can customize the time range.

                                                                                  Follow these steps to view service insight metrics:

                                                                                  1. Go to the Insight product module.

                                                                                  2. Select Trace Tracking -> Services from the left navigation bar.

                                                                                    Attention

                                                                                    1. If the namespace of a service in the list is unknown , it means that the service has not been properly instrumented. We recommend reconfiguring the instrumentation.
                                                                                    2. If multiple services have the same name and none of them have the correct Namespace environment variable configured, the metrics displayed in the list and service details page will be aggregated for all those services.
                                                                                  3. Click a service name (taking insight-system as an example) to view the detailed metrics and operation metrics for that service.

                                                                                    1. In the Service Topology section, you can view the service topology one layer above or below the current service. When you hover over a node, you can see its information.
                                                                                    2. In the Traffic Metrics section, you can view the monitoring metrics for all requests to the service within the past hour (including inbound and outbound traffic).
                                                                                    3. You can use the time selector in the upper right corner to quickly select a time range or specify a custom time range.
                                                                                    4. Sorting is available for throughput, error rate, and request latency in the operation metrics.
                                                                                    5. Clicking on the icon next to an individual operation will take you to the Traces page to quickly search for related traces.

                                                                                  "},{"location":"en/end-user/insight/trace/service.html#service-metric-explanations","title":"Service Metric Explanations","text":"Metric Description Throughput Rate The number of requests processed within a unit of time. Error Rate The ratio of erroneous requests to the total number of requests within the specified time range. P50 Request Latency The response time within which 50% of requests complete. P95 Request Latency The response time within which 95% of requests complete. P99 Request Latency The response time within which 99% of requests complete."},{"location":"en/end-user/insight/trace/topology-helper.html","title":"Service Topology Element Explanations","text":"

                                                                                  The service topology provided by Observability allows you to quickly identify the request relationships between services and determine the health status of services based on different colors. The health status is determined based on the request latency and error rate of the service's overall traffic. This article explains the elements in the service topology.

                                                                                  "},{"location":"en/end-user/insight/trace/topology-helper.html#node-status-explanation","title":"Node Status Explanation","text":"

                                                                                  The node health status is determined based on the error rate and request latency of the service's overall traffic, following these rules:

                                                                                  Color Status Rules Gray Healthy Error rate equals 0% and request latency is less than 100ms Orange Warning Error rate (0, 5%) or request latency (100ms, 200ms) Red Abnormal Error rate (5%, 100%) or request latency (200ms, +Infinity)"},{"location":"en/end-user/insight/trace/topology-helper.html#connection-status-explanation","title":"Connection Status Explanation","text":"Color Status Rules Green Healthy Error rate equals 0% and request latency is less than 100ms Orange Warning Error rate (0, 5%) or request latency (100ms, 200ms) Red Abnormal Error rate (5%, 100%) or request latency (200ms, +Infinity)"},{"location":"en/end-user/insight/trace/topology.html","title":"Service Map","text":"

                                                                                  Service map is a visual representation of the connections, communication, and dependencies between services. It provides insights into the service-to-service interactions, allowing you to view the calls and performance of services within a specified time range. The connections between nodes in the topology map represent the existence of service-to-service calls during the queried time period.

                                                                                  "},{"location":"en/end-user/insight/trace/topology.html#prerequisites","title":"Prerequisites","text":"
                                                                                  1. Insight Agent is installed in the cluster and the applications are in the Running state.
                                                                                  2. Services have been instrumented for distributed tracing using Operator or OpenTelemetry SDK.
                                                                                  "},{"location":"en/end-user/insight/trace/topology.html#steps","title":"Steps","text":"
                                                                                  1. Go to the Insight product module.

                                                                                  2. Select Tracing -> Service Map from the left navigation bar.

                                                                                  3. In the Service Map, you can perform the following actions:

                                                                                    • Click a node to slide out the details of the service on the right side. Here, you can view metrics such as request latency, throughput, and error rate for the service. Clicking on the service name takes you to the service details page.
                                                                                    • Hover over the connections to view the traffic metrics between the two services.
                                                                                    • Click Display Settings , you can configure the display elements in the service map.

                                                                                  "},{"location":"en/end-user/insight/trace/topology.html#other-nodes","title":"Other Nodes","text":"

                                                                                  In the Service Map, there can be nodes that are not part of the cluster. These external nodes can be categorized into three types:

                                                                                  • Database
                                                                                  • Message Queue
                                                                                  • Virtual Node

                                                                                  • If a service makes a request to a Database or Message Queue, these two types of nodes will be displayed by default in the topology map. However, Virtual Nodes represent nodes outside the cluster or services not integrated into the trace, and they will not be displayed by default in the map.

                                                                                  • When a service makes a request to MySQL, PostgreSQL, or Oracle Database, the detailed database type can be seen in the map.

                                                                                  "},{"location":"en/end-user/insight/trace/topology.html#enabling-virtual-nodes","title":"Enabling Virtual Nodes","text":"
                                                                                  1. Update the insight-server chart values, locate the parameter shown in the image below, and change false to true.
                                                                                  1. In the display settings of the service map, check the Virtual Services option to enable it.
                                                                                  "},{"location":"en/end-user/insight/trace/trace.html","title":"Trace Query","text":"

                                                                                  On the trace query page, you can query detailed information about a call trace by TraceID or filter call traces based on various conditions.

                                                                                  "},{"location":"en/end-user/insight/trace/trace.html#glossary","title":"Glossary","text":"
                                                                                  • TraceID: Used to identify a complete request call trace.
                                                                                  • Operation: Describes the specific operation or event represented by a Span.
                                                                                  • Entry Span: The entry Span represents the first request of the entire call.
                                                                                  • Latency: The duration from receiving the request to completing the response for the entire call trace.
                                                                                  • Span: The number of Spans included in the entire trace.
                                                                                  • Start Time: The time when the current trace starts.
                                                                                  • Tag: A collection of key-value pairs that constitute Span tags. Tags are used to annotate and supplement Spans, and each Span can have multiple key-value tag pairs.
                                                                                  "},{"location":"en/end-user/insight/trace/trace.html#steps","title":"Steps","text":"

                                                                                  Please follow these steps to search for a trace:

                                                                                  1. Go to the Insight product module.
                                                                                  2. Select Tracing -> Traces from the left navigation bar.

                                                                                    Note

                                                                                    Sorting by Span, Latency, and Start At is supported in the list.

                                                                                  3. Click the TraceID Query in the filter bar to switch to TraceID search.

                                                                                  4. To search using TraceID, please enter the complete TraceID.

                                                                                  "},{"location":"en/end-user/insight/trace/trace.html#other-operations","title":"Other Operations","text":""},{"location":"en/end-user/insight/trace/trace.html#view-trace-details","title":"View Trace Details","text":"
                                                                                  1. Click the TraceID of a trace in the trace list to view its detailed call information.

                                                                                  "},{"location":"en/end-user/insight/trace/trace.html#associated-logs","title":"Associated Logs","text":"
                                                                                  1. Click the icon on the right side of the trace data to search for associated logs.

                                                                                    • By default, it queries the log data within the duration of the trace and one minute after its completion.
                                                                                    • The queried logs include those with the trace's TraceID in their log text and container logs related to the trace invocation process.
                                                                                  2. Click View More to jump to the Associated Log page with conditions.

                                                                                  3. By default, all logs are searched, but you can filter by the TraceID or the relevant container logs from the trace call process using the dropdown.

                                                                                    Note

                                                                                    Since trace may span across clusters or namespaces, if the user does not have sufficient permissions, they will be unable to query the associated logs for that trace.

                                                                                  "},{"location":"en/end-user/k8s/add-node.html","title":"Adding Worker Nodes","text":"

                                                                                  If there are not enough nodes, you can add more nodes to the cluster.

                                                                                  "},{"location":"en/end-user/k8s/add-node.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • The AI platform is installed
                                                                                  • An administrator account is available
                                                                                  • A cluster with GPU nodes has been created
                                                                                  • A cloud host has been prepared
                                                                                  "},{"location":"en/end-user/k8s/add-node.html#steps-to-add-nodes","title":"Steps to Add Nodes","text":"
                                                                                  1. Log into the AI platform as an administrator.
                                                                                  2. Navigate to Container Management -> Clusters, and click the name of the target cluster.

                                                                                  3. On the cluster overview page, click Nodes, then click the Add Node button on the right.

                                                                                  4. Follow the wizard to fill in the parameters and click OK.

                                                                                  5. In the pop-up window, click OK.

                                                                                  6. Return to the node list; the status of the newly added node will be Connecting. After a few minutes, when the status changes to Running, it indicates that the connection was successful.

                                                                                  Tip

                                                                                  For newly connected nodes, it may take an additional 2-3 minutes to recognize the GPU.

                                                                                  "},{"location":"en/end-user/k8s/create-k8s.html","title":"Creating a Kubernetes Cluster in the Cloud","text":"

                                                                                  Deploying a Kubernetes cluster is aimed at supporting efficient AI computing resource scheduling and management, achieving elastic scaling, providing high availability, and optimizing the model training and inference processes.

                                                                                  "},{"location":"en/end-user/k8s/create-k8s.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • The AI platform is installed
                                                                                  • An administrator account is available
                                                                                  • A physical machine with a GPU is prepared
                                                                                  • Two segments of IP addresses are allocated (Pod CIDR 18 bits, SVC CIDR 18 bits, must not conflict with existing networks)
                                                                                  "},{"location":"en/end-user/k8s/create-k8s.html#steps-to-create","title":"Steps to Create","text":"
                                                                                  1. Log into the AI platform as an administrator.
                                                                                  2. Create and launch 3 cloud hosts without GPU to serve as Master nodes for the cluster.

                                                                                    • Configure resources: 16 CPU cores, 32 GB RAM, 200 GB system disk (ReadWriteOnce)
                                                                                    • Select Bridge network mode
                                                                                    • Set the root password or add an SSH public key for SSH connection
                                                                                    • Record the IPs of the 3 hosts
                                                                                  3. Navigate to Container Management -> Clusters, and click the Create Cluster button on the right.

                                                                                  4. Follow the wizard to configure various parameters of the cluster.

                                                                                  5. Wait for the cluster creation to complete.

                                                                                  6. In the cluster list, find the newly created cluster, click the cluster name, navigate to Helm Apps -> Helm Charts, and search for metax-gpu-extensions in the search box, then click the card.

                                                                                  7. Click the Install button on the right to start installing the GPU plugin.

                                                                                  8. Automatically return to the Helm App list and wait for the status of metax-gpu-extensions to change to Deployed.

                                                                                  9. At this point, the cluster has been successfully created. You can check the nodes included in the cluster. You can now create AI workloads and use GPUs.

                                                                                  Next step: Create AI Workloads

                                                                                  "},{"location":"en/end-user/k8s/remove-node.html","title":"Removing GPU Worker Nodes","text":"

                                                                                  The cost of GPU resources is relatively high. If GPUs are not needed temporarily, you can remove the worker nodes with GPUs. The following steps also apply to removing regular worker nodes.

                                                                                  "},{"location":"en/end-user/k8s/remove-node.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • The AI platform is installed
                                                                                  • An administrator account is available
                                                                                  • A cluster with GPU nodes has been created
                                                                                  "},{"location":"en/end-user/k8s/remove-node.html#steps-to-remove","title":"Steps to Remove","text":"
                                                                                  1. Log into the AI platform as an administrator.
                                                                                  2. Navigate to Container Management -> Clusters, and click the name of the target cluster.

                                                                                  3. Enter the cluster overview page, click Nodes, find the node to be removed, click the \u2507 on the right side of the list, and select Remove Node from the pop-up menu.

                                                                                  4. In the pop-up window, enter the node name, and after confirming it is correct, click Delete.

                                                                                  5. You will automatically return to the node list, where the status will be Removing. After a few minutes, refresh the page, and if the node is no longer present, it indicates that the node has been successfully removed.

                                                                                  6. After removing the node from the UI list, SSH into the removed node's host and execute the shutdown command.

                                                                                  Tip

                                                                                  After removing the node in the UI and shutting it down, the data on the node is not immediately deleted; the node's data will be retained for a period of time.

                                                                                  "},{"location":"en/end-user/kpanda/backup/index.html","title":"Backup and Restore","text":"

                                                                                  Backup and restore are essential aspects of system management. In practice, it is important to first back up the data of the system at a specific point in time and securely store the backup. In case of incidents such as data corruption, loss, or accidental deletion, the system can be quickly restored based on the previous backup data, reducing downtime and minimizing losses.

                                                                                  • In real production environments, services may be deployed across different clouds, regions, or availability zones. If one infrastructure faces a failure, organizations need to quickly restore applications in other available environments. In such cases, cross-cloud or cross-cluster backup and restore become crucial.
                                                                                  • Large-scale systems often involve multiple roles and users with complex permission management systems. With many operators involved, accidents caused by human error can lead to system failures. In such scenarios, the ability to roll back the system quickly using previously backed-up data is necessary. Relying solely on manual troubleshooting, fault repair, and system recovery can be time-consuming, resulting in prolonged system unavailability and increased losses for organizations.
                                                                                  • Additionally, factors like network attacks, natural disasters, and equipment malfunctions can also cause data accidents.

                                                                                  Therefore, backup and restore are vital as the last line of defense for maintaining system stability and ensuring data security.

                                                                                  Backups are typically classified into three types: full backups, incremental backups, and differential backups. Currently, AI platform supports full backups and incremental backups.

                                                                                  The backup and restore provided by AI platform can be divided into two categories: Application Backup and ETCD Backup. It supports both manual backups and scheduled automatic backups using CronJobs.

                                                                                  • Application Backup

                                                                                    Application backup refers to backing up data of a specific workload in the cluster and then restoring that data either within the same cluster or in another cluster. It supports backing up all resources under a namespace or filtering resources by specific labels.

                                                                                    Application backup also supports cross-cluster backup of stateful applications. For detailed steps, refer to the Backup and Restore MySQL Applications and Data Across Clusters guide.

                                                                                  • etcd Backup

                                                                                    etcd is the data storage component of Kubernetes. Kubernetes stores its own component's data and application data in etcd. Therefore, backing up etcd is equivalent to backing up the entire cluster's data, allowing quick restoration of the cluster to a previous state in case of failures.

                                                                                    It's worth noting that currently, restoring etcd backup data is only supported within the same cluster (the original cluster). To learn more about related best practices, refer to the ETCD Backup and Restore guide.

                                                                                  "},{"location":"en/end-user/kpanda/backup/deployment.html","title":"Application Backup","text":"

                                                                                  This article explains how to backup applications in AI platform. The demo application used in this tutorial is called dao-2048 , which is a deployment.

                                                                                  "},{"location":"en/end-user/kpanda/backup/deployment.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before backing up a deployment, the following prerequisites must be met:

                                                                                  • Integrate a Kubernetes cluster or create a Kubernetes cluster in the Container Management module, and be able to access the UI interface of the cluster.

                                                                                  • Create a Namespace and a user.

                                                                                  • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                                                  • Install the velero component, and ensure the velero component is running properly.

                                                                                  • Create a deployment (the workload in this tutorial is named dao-2048 ), and label the deployment with app: dao-2048 .

                                                                                  "},{"location":"en/end-user/kpanda/backup/deployment.html#backup-workload","title":"Backup workload","text":"

                                                                                  Follow the steps below to backup the deployment dao-2048 .

                                                                                  1. Enter the Container Management module, click Backup Recovery -> Application Backup on the left navigation bar, and enter the Application Backup list page.

                                                                                  2. On the Application Backup list page, select the cluster where the velero and dao-2048 applications have been installed. Click Backup Plan in the upper right corner to create a new backup cluster.

                                                                                  3. Refer to the instructions below to fill in the backup configuration.

                                                                                    • Name: The name of the new backup plan.
                                                                                    • Source Cluster: The cluster where the application backup plan is to be executed.
                                                                                    • Object Storage Location: The access path of the object storage configured when installing velero on the source cluster.
                                                                                    • Namespace: The namespaces that need to be backed up, multiple selections are supported.
                                                                                    • Advanced Configuration: Back up specific resources in the namespace based on resource labels, such as an application, or do not back up specific resources in the namespace based on resource labels during backup.

                                                                                  4. Refer to the instructions below to set the backup execution frequency, and then click Next .

                                                                                    • Backup Frequency: Set the time period for task execution based on minutes, hours, days, weeks, and months. Support custom Cron expressions with numbers and * , after inputting the expression, the meaning of the current expression will be prompted. For detailed expression syntax rules, refer to Cron Schedule Syntax.
                                                                                    • Retention Time (days): Set the storage time of backup resources, the default is 30 days, and will be deleted after expiration.
                                                                                    • Backup Data Volume (PV): Whether to back up the data in the data volume (PV), support direct copy and use CSI snapshot.

                                                                                      • Direct Replication: directly copy the data in the data volume (PV) for backup;
                                                                                      • Use CSI snapshots: Use CSI snapshots to back up data volumes (PVs). Requires a CSI snapshot type available for backup in the cluster.

                                                                                  5. Click OK , the page will automatically return to the application backup plan list, find the newly created dao-2048 backup plan, and perform the Immediate Execution operation.

                                                                                  6. At this point, the Last Execution State of the cluster will change to in progress . After the backup is complete, you can click the name of the backup plan to view the details of the backup plan.

                                                                                  "},{"location":"en/end-user/kpanda/backup/etcd-backup.html","title":"etcd backup","text":"

                                                                                  etcd backup is based on cluster data as the core backup. In cases such as hardware device damage, development and test configuration errors, etc., the backup cluster data can be restored through etcd backup.

                                                                                  This section will introduce how to realize the etcd backup for clusters. Also see etcd Backup and Restore Best Practices.

                                                                                  "},{"location":"en/end-user/kpanda/backup/etcd-backup.html#prerequisites","title":"Prerequisites","text":"
                                                                                  • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                                                                  • Created a namespace, user, and granted NS Admin or higher permissions to the user. For details, refer to Namespace Authorization.

                                                                                  • Prepared a MinIO instance. It is recommended to create it through AI platform's MinIO middleware.

                                                                                  "},{"location":"en/end-user/kpanda/backup/etcd-backup.html#create-etcd-backup","title":"Create etcd backup","text":"

                                                                                  Follow the steps below to create an etcd backup.

                                                                                  1. Enter Container Management -> Backup Recovery -> etcd Backup page, you can see all the current backup policies. Click Create Backup Policy on the right.

                                                                                  2. Fill in the Basic Information. Then, click Next to automatically verify the connectivity of etcd. If the verification passes, proceed to the next step.

                                                                                    • First select the backup cluster and log in to the terminal
                                                                                    • Enter etcd, and the format is https://${NodeIP}:${Port}.

                                                                                      • In a standard Kubernetes cluster, the default port for etcd is 2379.
                                                                                      • In a Suanova 4.0 cluster, the default port for etcd is 12379.
                                                                                      • In a public cloud managed cluster, you need to contact the relevant developers to obtain the etcd port number. This is because the control plane components of public cloud clusters are maintained and managed by the cloud service provider. Users cannot directly access or view these components, nor can they obtain control plane port information through regular commands (such as kubectl).
                                                                                      Ways to obtain port number
                                                                                      1. Find the etcd Pod in the kube-system namespace

                                                                                        kubectl get po -n kube-system | grep etcd\n
                                                                                      2. Get the port number from the listen-client-urls of the etcd Pod

                                                                                        kubectl get po -n kube-system ${etcd_pod_name} -oyaml | grep listen-client-urls # (1)!\n
                                                                                        1. Replace etcd_pod_name with the actual Pod name

                                                                                        The expected output is as follows, where the number after the node IP is the port number:

                                                                                        - --listen-client-urls=https://127.0.0.1:2379,https://10.6.229.191:2379\n
                                                                                    • Fill in the CA certificate, you can use the following command to view the certificate content. Then, copy and paste it to the proper location:

                                                                                      Standard Kubernetes ClusterSuanova 4.0 Cluster
                                                                                      cat /etc/kubernetes/ssl/etcd/ca.crt\n
                                                                                      cat /etc/daocloud/dce/certs/ca.crt\n
                                                                                    • Fill in the Cert certificate, you can use the following command to view the content of the certificate. Then, copy and paste it to the proper location:

                                                                                      Standard Kubernetes ClusterSuanova 4.0 Cluster
                                                                                      cat /etc/kubernetes/ssl/apiserver-etcd-client.crt\n
                                                                                      cat /etc/daocloud/dce/certs/etcd/server.crt\n
                                                                                    • Fill in the Key, you can use the following command to view the content of the certificate and copy and paste it to the proper location:

                                                                                      Standard Kubernetes ClusterSuanova 4.0 Cluster
                                                                                      cat /etc/kubernetes/ssl/apiserver-etcd-client.key\n
                                                                                      cat /etc/daocloud/dce/certs/etcd/server.key\n

                                                                                    Note

                                                                                    Click How to get below the input box to see how to obtain the proper information on the UI page.

                                                                                  3. Refer to the following information to fill in the Backup Policy.

                                                                                    • Backup Method: Choose either manual backup or scheduled backup

                                                                                      • Manual Backup: Immediately perform a full backup of etcd data based on the backup configuration.
                                                                                      • Scheduled Backup: Periodically perform full backups of etcd data according to the set backup frequency.
                                                                                    • Backup Chain Length: the maximum number of backup data to retain. The default is 30.

                                                                                    • Backup Frequency: it can be per hour, per day, per week or per month, and can also be customized.
                                                                                  4. Refer to the following information to fill in the Storage Path.

                                                                                    • Storage Provider: Default is S3 storage
                                                                                    • Object Storage Access Address: The access address of MinIO
                                                                                    • Bucket: Create a Bucket in MinIO and fill in the Bucket name
                                                                                    • Username: The login username for MinIO
                                                                                    • Password: The login password for MinIO
                                                                                  5. After clicking OK , the page will automatically redirect to the backup policy list, where you can view all the currently created ones.

                                                                                    • Click the \u2507 action button on the right side of the policy to view logs, view YAML, update the policy, stop the policy, or execute the policy immediately.
                                                                                    • When the backup method is manual, you can click Execute Now to perform the backup.
                                                                                    • When the backup method is scheduled, the backup will be performed according to the configured time.
                                                                                  "},{"location":"en/end-user/kpanda/backup/etcd-backup.html#view-backup-policy-logs","title":"View Backup Policy Logs","text":"

                                                                                  Click Logs to view the log content. By default, 100 lines are displayed. If you want to see more log information or download the logs, you can follow the prompts above the logs to go to the observability module.

                                                                                  "},{"location":"en/end-user/kpanda/backup/etcd-backup.html#view-backup-policy-details","title":"View Backup POlicy Details","text":"

                                                                                  Go to Container Management -> Backup Recovery -> etcd Backup , click the Backup Policy tab, and then click the policy to view the details.

                                                                                  "},{"location":"en/end-user/kpanda/backup/etcd-backup.html#view-recovery-point","title":"View Recovery Point","text":"
                                                                                  1. Go to Container Management -> Backup Recovery -> etcd Backup, and click the Recovery Point tab.
                                                                                  2. After selecting the target cluster, you can view all the backup information under that cluster.

                                                                                    Each time a backup is executed, a proper recovery point is generated, which can be used to quickly restore the application from a successful recovery point.

                                                                                  "},{"location":"en/end-user/kpanda/backup/install-velero.html","title":"Install the Velero Plugin","text":"

                                                                                  velero is an open source tool for backing up and restoring Kubernetes cluster resources. It can back up resources in a Kubernetes cluster to cloud storage services, local storage, or other locations, and restore those resources to the same or a different cluster when needed.

                                                                                  This section introduces how to deploy the Velero plugin in AI platform using the Helm Apps.

                                                                                  "},{"location":"en/end-user/kpanda/backup/install-velero.html#prerequisites","title":"Prerequisites","text":"

                                                                                  Before installing the velero plugin, the following prerequisites need to be met:

                                                                                  • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.
                                                                                  • Created a velero namespace.
                                                                                  • You should have permissions not lower than NS Editor. For details, refer to Namespace Authorization.
                                                                                  "},{"location":"en/end-user/kpanda/backup/install-velero.html#steps","title":"Steps","text":"

                                                                                  Please perform the following steps to install the velero plugin for your cluster.

                                                                                  1. On the cluster list page, find the target cluster that needs to install the velero plugin, click the name of the cluster, click Helm Apps -> Helm chart in the left navigation bar, and enter velero in the search bar to search .

                                                                                  2. Read the introduction of the velero plugin, select the version and click the Install button. This page will take 5.2.0 version as an example to install, and it is recommended that you install 5.2.0 and later versions.

                                                                                  3. Configure basic info .

                                                                                    • Name: Enter the plugin name, please note that the name can be up to 63 characters, can only contain lowercase letters, numbers and separators (\"-\"), and must start and end with lowercase letters or numbers, such as metrics-server-01.
                                                                                    • Namespace: Select the namespace for plugin installation, it must be velero namespace.
                                                                                    • Version: The version of the plugin, here we take 5.2.0 version as an example.
                                                                                    • Wait: When enabled, it will wait for all associated resources under the application to be ready before marking the application installation as successful.
                                                                                    • Deletion Failed: After it is enabled, the synchronization will be enabled by default and ready to wait. If the installation fails, the installation-related resources will be removed.
                                                                                    • Detailed Logs: Turn on the verbose output of the installation process log.

                                                                                    !!! note

                                                                                     After enabling __Ready Wait__ and/or __Failed Delete__ , it takes a long time for the app to be marked as __Running__ .\n
                                                                                  4. Configure Velero chart Parameter Settings according to the following instructions

                                                                                    • S3 Credentials: Configure the authentication information of object storage (minio).

                                                                                      • Use secret: Keep the default configuration true.
                                                                                      • Secret name: Keep the default configuration velero-s3-credential.
                                                                                      • SecretContents.aws_access_key_id = : Configure the username for accessing object storage, replace with the actual parameter.
                                                                                      • SecretContents.aws_secret_access_key = : Configure the password for accessing object storage, replace with the actual parameter.

                                                                                        Use existing secret parameter example is as follows:

                                                                                        [default]\naws_access_key_id = minio\naws_secret_access_key = minio123\n
                                                                                        • BackupStorageLocation: The location where Velero backs up data.

                                                                                          • S3 bucket: The name of the storage bucket used to save backup data (must be a real storage bucket that already exists in minio).
                                                                                          • Is default BackupStorage: Keep the default configuration true.
                                                                                          • S3 access mode: The access mode of Velero to data, which can be selected
                                                                                          • ReadWrite: Allow Velero to read and write backup data;
                                                                                          • ReadOnly: Allow Velero to read backup data, but cannot modify backup data;
                                                                                          • WriteOnly: Only allow Velero to write backup data, and cannot read backup data.
                                                                                        • S3 Configs: Detailed configuration of S3 storage (minio).

                                                                                          • S3 region: The geographical region of cloud storage. The default is to use the us-east-1 parameter, which is provided by the system administrator.
                                                                                          • S3 force path style: Keep the default configuration true.
                                                                                          • S3 server URL: The console access address of object storage (minio). Minio generally provides two services, UI access and console access. Please use the console access address here.

                                                                                      • Click the OK button to complete the installation of the Velero plugin. The system will automatically jump to the Helm Apps list page. After waiting for a few minutes, refresh the page, and you can see the application just installed.

                                                                                      • "},{"location":"en/end-user/kpanda/clusterops/cluster-settings.html","title":"Cluster Settings","text":"

                                                                                        Cluster settings are used to customize advanced feature settings for your cluster, including whether to enable GPU, helm repo refresh cycle, Helm operation record retention, etc.

                                                                                        • Enable GPU: GPUs and proper driver plug-ins need to be installed on the cluster in advance.

                                                                                          Click the name of the target cluster, and click Operations and Maintenance -> Cluster Settings -> Addons in the left navigation bar.

                                                                                        • Helm operation basic image, registry refresh cycle, number of operation records retained, whether to enable cluster deletion protection (the cluster cannot be uninstalled directly after enabling)

                                                                                        "},{"location":"en/end-user/kpanda/clusterops/latest-operations.html","title":"recent operations","text":"

                                                                                        On this page, you can view the recent cluster operation records and Helm operation records, as well as the YAML files and logs of each operation, and you can also delete a certain record.

                                                                                        Set the number of reserved entries for Helm operations:

                                                                                        By default, the system keeps the last 100 Helm operation records. If you keep too many entries, it may cause data redundancy, and if you keep too few entries, you may lose the key operation records you need. A reasonable reserved quantity needs to be set according to the actual situation. Specific steps are as follows:

                                                                                        1. Click the name of the target cluster, and click Recent Operations -> Helm Operations -> Set Number of Retained Items in the left navigation bar.

                                                                                        2. Set how many Helm operation records need to be kept, and click OK .

                                                                                        "},{"location":"en/end-user/kpanda/clusters/access-cluster.html","title":"Access Clusters","text":"

                                                                                        Clusters integrated or created using the AI platform Container Management platform can be accessed not only through the UI interface but also in two other ways for access control:

                                                                                        • Access online via CloudShell
                                                                                        • Access via kubectl after downloading the cluster certificate

                                                                                        Note

                                                                                        When accessing the cluster, the user should have Cluster Admin permission or higher.

                                                                                        "},{"location":"en/end-user/kpanda/clusters/access-cluster.html#access-via-cloudshell","title":"Access via CloudShell","text":"
                                                                                        1. Enter Clusters page, select the cluster you want to access via CloudShell, click the ... icon on the right, and then click Console from the dropdown list.

                                                                                        2. Run kubectl get node command in the Console to verify the connectivity between CloudShell and the cluster. If the console returns node information of the cluster, you can access and manage the cluster through CloudShell.

                                                                                        "},{"location":"en/end-user/kpanda/clusters/access-cluster.html#access-via-kubectl","title":"Access via kubectl","text":"

                                                                                        If you want to access and manage remote clusters from a local node, make sure you have met these prerequisites:

                                                                                        • Your local node and the cloud cluster are in a connected network.
                                                                                        • The cluster certificate has been downloaded to the local node.
                                                                                        • The kubectl tool has been installed on the local node. For detailed installation guides, see Installing tools.

                                                                                        If everything is in place, follow these steps to access a cloud cluster from your local environment.

                                                                                        1. Enter Clusters page, find your target cluster, click ... on the right, and select Download kubeconfig in the drop-down list.

                                                                                        2. Set the Kubeconfig period and click Download .

                                                                                        3. Open the downloaded certificate and copy its content to the config file of the local node.

                                                                                          By default, the kubectl tool will look for a file named config in the $HOME/.kube directory on the local node. This file stores access credentials of clusters. Kubectl can access the cluster with that configuration file.

                                                                                        4. Run the following command on the local node to verify its connectivity with the cluster:

                                                                                          kubectl get pod -n default\n

                                                                                          An expected output is as follows:

                                                                                          NAME                            READY   STATUS      RESTARTS    AGE\ndao-2048-2048-58c7f7fc5-mq7h4   1/1     Running     0           30h\n

                                                                                        Now you can access and manage the cluster locally with kubectl.

                                                                                        "},{"location":"en/end-user/kpanda/clusters/cluster-role.html","title":"Cluster Roles","text":"

                                                                                        Suanova AI platform categorizes clusters based on different functionalities to help users better manage IT infrastructure.

                                                                                        "},{"location":"en/end-user/kpanda/clusters/cluster-role.html#global-service-cluster","title":"Global Service Cluster","text":"

                                                                                        This cluster is used to run AI platform components. It generally does not carry business workloads.

                                                                                        Supported Features Description K8s Version 1.22+ Operating System RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86;Ubuntu 18.04 x86, Ubuntu 20.04 x86;CentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD Full Lifecycle Management Supported K8s Resource Management Supported Cloud Native Storage Supported Cloud Native Network Calico, Cillium, Multus, and other CNIs Policy Management Supports network policies, quota policies, resource limits, disaster recovery policies, security policies"},{"location":"en/end-user/kpanda/clusters/cluster-role.html#management-cluster","title":"Management Cluster","text":"

                                                                                        This cluster is used to manage worker clusters and generally does not carry business workloads.

                                                                                        Supported Features Description K8s Version 1.22+ Operating System RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86;Ubuntu 18.04 x86, Ubuntu 20.04 x86;CentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD Full Lifecycle Management Supported K8s Resource Management Supported Cloud Native Storage Supported Cloud Native Network Calico, Cillium, Multus, and other CNIs Policy Management Supports network policies, quota policies, resource limits, disaster recovery policies, security policies"},{"location":"en/end-user/kpanda/clusters/cluster-role.html#worker-cluster","title":"Worker Cluster","text":"

                                                                                        This is a cluster created using Container Management and is mainly used to carry business workloads. This cluster is managed by the management cluster.

                                                                                        Supported Features Description K8s Version Supports K8s 1.22 and above Operating System RedHat 7.6 x86/ARM, RedHat 7.9 x86, RedHat 8.4 x86/ARM, RedHat 8.6 x86;Ubuntu 18.04 x86, Ubuntu 20.04 x86;CentOS 7.6 x86/AMD, CentOS 7.9 x86/AMD Full Lifecycle Management Supported K8s Resource Management Supported Cloud Native Storage Supported Cloud Native Network Calico, Cillium, Multus, and other CNIs Policy Management Supports network policies, quota policies, resource limits, disaster recovery policies, security policies"},{"location":"en/end-user/kpanda/clusters/cluster-role.html#integrated-cluster","title":"Integrated Cluster","text":"

                                                                                        This cluster is used to integrate existing standard K8s clusters, including but not limited to self-built clusters in local data centers, clusters provided by public cloud vendors, clusters provided by private cloud vendors, edge clusters, Xinchuang clusters, heterogeneous clusters, and different Suanova clusters. It is mainly used to carry business workloads.

                                                                                        Supported Features Description K8s Version 1.18+ Supported Vendors VMware Tanzu, Amazon EKS, Redhat Openshift, SUSE Rancher, Alibaba ACK, Huawei CCE, Tencent TKE, Standard K8s Cluster, Suanova Full Lifecycle Management Not Supported K8s Resource Management Supported Cloud Native Storage Supported Cloud Native Network Depends on the network mode of the integrated cluster's kernel Policy Management Supports network policies, quota policies, resource limits, disaster recovery policies, security policies

                                                                                        Note

                                                                                        A cluster can have multiple cluster roles. For example, a cluster can be both a global service cluster and a management cluster or a worker cluster.

                                                                                        "},{"location":"en/end-user/kpanda/clusters/cluster-scheduler-plugin.html","title":"Deploy Second Scheduler scheduler-plugins in a Cluster","text":"

                                                                                        This page describes how to deploy a second scheduler-plugins in a cluster.

                                                                                        "},{"location":"en/end-user/kpanda/clusters/cluster-scheduler-plugin.html#why-do-we-need-scheduler-plugins","title":"Why do we need scheduler-plugins?","text":"

                                                                                        The cluster created through the platform will install the native K8s scheduler-plugin, but the native scheduler-plugin has many limitations:

                                                                                        • The native scheduler-plugin cannot meet scheduling requirements, so you can use either CoScheduling, CapacityScheduling or other types of scheduler-plugins.
                                                                                        • In special scenarios, a new scheduler-plugin is needed to complete scheduling tasks without affecting the process of the native scheduler-plugin.
                                                                                        • Distinguish scheduler-plugins with different functionalities and achieve different scheduling scenarios by switching scheduler-plugin names.

                                                                                        This page takes the scenario of using the vgpu scheduler-plugin while combining the coscheduling plugin capability of scheduler-plugins as an example to introduce how to install and use scheduler-plugins.

                                                                                        "},{"location":"en/end-user/kpanda/clusters/cluster-scheduler-plugin.html#installing-scheduler-plugins","title":"Installing scheduler-plugins","text":""},{"location":"en/end-user/kpanda/clusters/cluster-scheduler-plugin.html#prerequisites","title":"Prerequisites","text":"
                                                                                        • kubean is a new feature introduced in v0.13.0, please ensure that your version is v0.13.0 or higher.
                                                                                        • The installation version of scheduler-plugins is v0.27.8, please ensure that the cluster version is compatible with it. Refer to the document Compatibility Matrix.
                                                                                        "},{"location":"en/end-user/kpanda/clusters/cluster-scheduler-plugin.html#installation-process","title":"Installation Process","text":"
                                                                                        1. Add the scheduler-plugins parameter in Create Cluster -> Advanced Settings -> Custom Parameters.

                                                                                          scheduler_plugins_enabled:true\nscheduler_plugins_plugin_config:\n  - name: Coscheduling\n    args:\n      permitWaitingTimeSeconds: 10 # default is 60\n

                                                                                          Parameters:

                                                                                          • scheduler_plugins_enabled Set to true to enable the scheduler-plugins capability.
                                                                                          • You can enable or disable certain plugins by setting the scheduler_plugins_enabled_plugins or scheduler_plugins_disabled_plugins options. See K8s Official Plugin Names for reference.
                                                                                          • If you need to set parameters for custom plugins, please configure scheduler_plugins_plugin_config, for example: set the permitWaitingTimeoutSeconds parameter for coscheduling. See K8s Official Plugin Configuration for reference.
                                                                                        2. After successful cluster creation, the system will automatically install the scheduler-plugins and controller component loads. You can check the workload status in the proper cluster's deployment.

                                                                                        "},{"location":"en/end-user/kpanda/clusters/cluster-scheduler-plugin.html#using-scheduler-plugins","title":"Using scheduler-plugins","text":"

                                                                                        Here is an example of how to use scheduler-plugins by demonstrating a scenario where the vgpu scheduler is used in combination with the coscheduling plugin capability of scheduler-plugins.

                                                                                        1. Install vgpu in the Helm Charts and set the values.yaml parameters.

                                                                                          • schedulerName: scheduler-plugins-scheduler: This is the scheduler name for scheduler-plugins installed by kubean, and currently cannot be modified.
                                                                                          • scheduler.kubeScheduler.enabled: false: Do not install kube-scheduler and use vgpu-scheduler as a separate extender.
                                                                                        2. Extend vgpu-scheduler on scheduler-plugins.

                                                                                          [root@master01 charts]# kubectl get cm -n scheduler-plugins scheduler-config -ojsonpath=\"{.data.scheduler-config\\.yaml}\"\n
                                                                                          apiVersion: kubescheduler.config.k8s.io/v1\nkind: KubeSchedulerConfiguration\nleaderElection:\n  leaderElect: false\nprofiles:\n  # Compose all plugins in one profile\n  - schedulerName: scheduler-plugins-scheduler\n    plugins:\n      multiPoint:\n        enabled:\n          - name: Coscheduling\n          - name: CapacityScheduling\n          - name: NodeResourceTopologyMatch\n          - name: NodeResourcesAllocatable\n        disabled:\n          - name: PrioritySort\npluginConfig:\n  - args:\n      permitWaitingTimeSeconds: 10\n    name: Coscheduling\n

                                                                                          Modify configmap of scheduler-config for scheduler-plugins:

                                                                                          [root@master01 charts]# kubectl get cm -n scheduler-plugins scheduler-config -ojsonpath=\"{.data.scheduler-config\\.yaml}\"\n
                                                                                          apiVersion: kubescheduler.config.k8s.io/v1\nkind: KubeSchedulerConfiguration\nleaderElection:\n  leaderElect: false\nprofiles:\n  # Compose all plugins in one profile\n  - schedulerName: scheduler-plugins-scheduler\n    plugins:\n      multiPoint:\n        enabled:\n          - name: Coscheduling\n          - name: CapacityScheduling\n          - name: NodeResourceTopologyMatch\n          - name: NodeResourcesAllocatable\n        disabled:\n          - name: PrioritySort\npluginConfig:\n  - args:\n      permitWaitingTimeSeconds: 10\n    name: Coscheduling\nextenders:\n  - urlPrefix: \"${urlPrefix}\"\n    filterVerb: filter\n    bindVerb: bind\n    nodeCacheCapable: true\n    ignorable: true\n    httpTimeout: 30s\n    weight: 1\n    enableHTTPS: true\n    tlsConfig:\n      insecure: true\n    managedResources:\n      - name: nvidia.com/vgpu\n        ignoredByScheduler: true\n      - name: nvidia.com/gpumem\n        ignoredByScheduler: true\n      - name: nvidia.com/gpucores\n        ignoredByScheduler: true\n      - name: nvidia.com/gpumem-percentage\n        ignoredByScheduler: true\n      - name: nvidia.com/priority\n        ignoredByScheduler: true\n      - name: cambricon.com/mlunum\n        ignoredByScheduler: true\n
                                                                                        3. After installing vgpu-scheduler, the system will automatically create a service (svc), and the urlPrefix specifies the URL of the svc.

                                                                                          Note

                                                                                          • The svc refers to the pod service load. You can use the following command in the namespace where the nvidia-vgpu plugin is installed to get the external access information for port 443.

                                                                                            kubectl get svc -n ${namespace}\n
                                                                                          • The urlPrefix format is https://${ip address}:${port}

                                                                                        4. Restart the scheduler pod of scheduler-plugins to load the new configuration file.

                                                                                          Note

                                                                                          When creating a vgpu application, you do not need to specify the name of a scheduler-plugin. The vgpu-scheduler webhook will automatically change the scheduler's name to \"scheduler-plugins-scheduler\" without manual specification.

                                                                                        "},{"location":"en/end-user/kpanda/clusters/cluster-status.html","title":"Cluster Status","text":"

                                                                                        AI platform Container Management module can manage two types of clusters: integrated clusters and created clusters.

                                                                                        • Integrated clusters: clusters created in other platforms and now integrated into AI platform.
                                                                                        • Created clusters: clusters created in AI platform.

                                                                                        For more information about cluster types, see Cluster Role.

                                                                                        We designed several status for these two clusters.

                                                                                        "},{"location":"en/end-user/kpanda/clusters/cluster-status.html#integrated-clusters","title":"Integrated Clusters","text":"Status Description Integrating The cluster is being integrated into AI platform. Removing The cluster is being removed from AI platform. Running The cluster is running as expected. Unknown The cluster is lost. Data displayed in the AI platform UI is the cached data before the disconnection, which does not represent real-time data. Any operation during this status will not take effect. You should check cluster network connectivity or host status."},{"location":"en/end-user/kpanda/clusters/cluster-status.html#created-clusters","title":"Created Clusters","text":"Status Description Creating The cluster is being created. Updating The Kubernetes version of the cluster is being operating. Deleting The cluster is being deleted. Running The cluster is running as expected. Unknown The cluster is lost. Data displayed in the AI platform UI is the cached data before the disconnection, which does not represent real-time data. Any operation during this status will not take effect. You should check cluster network connectivity or host status. Failed The cluster creation is failed. You should check the logs for detailed reasons."},{"location":"en/end-user/kpanda/clusters/cluster-version.html","title":"Supported Kubernetes Versions","text":"

                                                                                        In AI platform, the integrated clusters and created clusters have different version support mechanisms.

                                                                                        This page focuses on the version support mechanism for created clusters.

                                                                                        The Kubernetes community supports three version ranges: 1.26, 1.27, and 1.28. When a new version is released by the community, the supported version range is incremented. For example, if the latest version released by the community is 1.27, the supported version range by the community will be 1.27, 1.28, and 1.29.

                                                                                        To ensure the security and stability of the clusters, when creating clusters in AI platform, the supported version range will always be one version lower than the community's version.

                                                                                        For instance, if the Kubernetes community supports v1.25, v1.26, and v1.27, then the version range for creating worker clusters in AI platform will be v1.24, v1.25, and v1.26. Additionally, a stable version, such as 1.24.7, will be recommended to users.

                                                                                        Furthermore, the version range for creating worker clusters in AI platform will remain highly synchronized with the community. When the community version increases incrementally, the version range for creating worker clusters in AI platform will also increase by one version.

                                                                                        "},{"location":"en/end-user/kpanda/clusters/cluster-version.html#supported-kubernetes-versions_1","title":"Supported Kubernetes Versions","text":"Kubernetes Community Versions Created Worker Cluster Versions Recommended Versions for Created Worker Cluster AI platform Installer Release Date
                                                                                        • 1.26
                                                                                        • 1.27
                                                                                        • 1.28
                                                                                        • 1.26
                                                                                        • 1.27
                                                                                        • 1.28
                                                                                        1.27.5 v0.13.0 2023.11.30"},{"location":"en/end-user/kpanda/clusters/create-cluster.html","title":"Create Worker Clusters","text":"

                                                                                        In AI platform Container Management, clusters can have four roles: global service cluster, management cluster, worker cluster, and integrated cluster. An integrated cluster can only be integrated from third-party vendors (see Integrate Cluster).

                                                                                        This page explains how to create a Worker Cluster. By default, when creating a new Worker Cluster, the operating system type and CPU architecture of the worker nodes should be consistent with the Global Service Cluster. If you want to create a cluster with a different operating system or architecture than the Global Management Cluster, refer to Creating an Ubuntu Worker Cluster on a CentOS Management Platform for instructions.

                                                                                        It is recommended to use the supported operating systems in AI platform to create the cluster. If your local nodes are not within the supported range, you can refer to Creating a Cluster on Non-Mainstream Operating Systems for instructions.

                                                                                        "},{"location":"en/end-user/kpanda/clusters/create-cluster.html#prerequisites","title":"Prerequisites","text":"

                                                                                        Certain prerequisites must be met before creating a cluster:

                                                                                        • Prepare enough nodes to be joined into the cluster.
                                                                                        • It is recommended to use Kubernetes version 1.25.7. For the specific version range, refer to the AI platform Cluster Version Support System. Currently, the supported version range for created worker clusters is v1.26.0-v1.28. If you need to create a cluster with a lower version, refer to the Supporterd Cluster Versions.
                                                                                        • The target host must allow IPv4 forwarding. If using IPv6 in Pods and Services, the target server needs to allow IPv6 forwarding.
                                                                                        • AI platform does not provide firewall management. You need to pre-define the firewall rules of the target host by yourself. To avoid errors during cluster creation, it is recommended to disable the firewall of the target host.
                                                                                        • See Node Availability Check.
                                                                                        "},{"location":"en/end-user/kpanda/clusters/create-cluster.html#steps","title":"Steps","text":"
                                                                                        1. Enter the Container Management module, click Create Cluster on the upper right corner of the Clusters page.

                                                                                        2. Fill in the basic information by referring to the following instructions.

                                                                                          • Cluster Name: only contain lowercase letters, numbers, and hyphens (\"-\"). Must start and end with a lowercase letter or number and totally up to 63 characters.
                                                                                          • Managed By: Choose a cluster to manage this new cluster through its lifecycle, such as creating, upgrading, node scaling, deleting the new cluster, etc.
                                                                                          • Runtime: Select the runtime environment of the cluster. Currently support containerd and docker (see How to Choose Container Runtime).
                                                                                          • Kubernetes Version: Allow span of three major versions, such as from 1.23-1.25, subject to the versions supported by the management cluster.

                                                                                        3. Fill in the node configuration information and click Node Check .

                                                                                          • High Availability: When enabled, at least 3 controller nodes are required. When disabled, only 1 controller node is needed.

                                                                                            It is recommended to use High Availability mode in production environments.

                                                                                          • Credential Type: Choose whether to access nodes using username/password or public/private keys.

                                                                                            If using public/private key authentication, SSH keys for the nodes need to be configured in advance. Refer to Using SSH Key Authentication for Nodes.

                                                                                          • Same Password: When enabled, all nodes in the cluster will have the same access password. Enter the unified password for accessing all nodes in the field below. If disabled, you can set separate usernames and passwords for each node.

                                                                                          • Node Information: Set note names and IPs.
                                                                                          • NTP Time Synchronization: When enabled, time will be automatically synchronized across all nodes. Provide the NTP server address.

                                                                                        4. If node check is passed, click Next . If the check failed, update Node Information and check again.

                                                                                        5. Fill in the network configuration and click Next .

                                                                                          • CNI: Provide network services for Pods in the cluster. CNI cannot be changed after the cluster is created. Supports cilium and calico. Set none means not installing CNI when creating the cluster. You may install a CNI later.

                                                                                            For CNI configuration details, see Cilium Installation Parameters or Calico Installation Parameters.

                                                                                          • Container IP Range: Set an IP range for allocating IPs for containers in the cluster. IP range determines the max number of containers allowed in the cluster. Cannot be modified after creation.

                                                                                          • Service IP Range: Set an IP range for allocating IPs for container Services in the cluster. This range determines the max number of container Services that can be created in the cluster. Cannot be modified after creation.
                                                                                        6. Fill in the plug-in configuration and click Next .

                                                                                        7. Fill in advanced settings and click OK .

                                                                                          • kubelet_max_pods : Set the maximum number of Pods per node. The default is 110.
                                                                                          • hostname_override : Reset the hostname (not recommended).
                                                                                          • kubernetes_audit : Kubernetes audit log, enabled by default.
                                                                                          • auto_renew_certificate : Automatically renew the certificate of the control plane on the first Monday of each month, enabled by default.
                                                                                          • disable_firewalld&ufw : Disable the firewall to prevent the node from being inaccessible during installation.
                                                                                          • Insecure_registries : Set the address of you private container registry. If you use a private container registry, fill in its address can bypass certificate authentication of the container engine and obtain the image.
                                                                                          • yum_repos : Fill in the Yum source registry address.

                                                                                        Success

                                                                                        • After correctly filling in the above information, the page will prompt that the cluster is being created.
                                                                                        • Creating a cluster takes a long time, so you need to wait patiently. You can click the Back to Clusters button to let it running backend.
                                                                                        • To view the current status, click Real-time Log .

                                                                                        Note

                                                                                        • hen the cluster is in an unknown state, it means that the current cluster has been disconnected.
                                                                                        • The data displayed by the system is the cached data before the disconnection, which does not represent real data.
                                                                                        • Any operations performed in the disconnected state will not take effect. Please check the cluster network connectivity or Host Status.
                                                                                        "},{"location":"en/end-user/kpanda/clusters/delete-cluster.html","title":"Delete/Remove Clusters","text":"

                                                                                        Clusters created in AI platform Container Management can be either deleted or removed. Clusters integrated into AI platform can only be removed.

                                                                                        Info

                                                                                        If you want to delete an integrated cluster, you should delete it in the platform where it is created.

                                                                                        In AI platform, the difference between Delete and Remove is:

                                                                                        • Delete will destroy the cluster and reset the data of all nodes under the cluster. All data will be totally cleared and lost. Making a backup before deleting a cluster is a recommended best practice. You can no longer use that cluster anymore.
                                                                                        • Remove just removes the cluster from AI platform. It will not destroy the cluster and no data will be lost. You can still use the cluster in other platforms or re-integrate it into AI platform later if needed.

                                                                                        Note

                                                                                        • You should have Admin or Kpanda Owner permissions to perform delete or remove operations.
                                                                                        • Before deleting a cluster, you should turn off Cluster Deletion Protection in Cluster Settings -> Advanced Settings , otherwise the Delete Cluster option will not be displayed.
                                                                                        • The global service cluster cannot be deleted or removed.
                                                                                        1. Enter the Container Management module, find your target cluster, click __ ...__ on the right, and select Delete cluster / Remove in the drop-down list.

                                                                                        2. Enter the cluster name to confirm and click Delete .

                                                                                        3. You will be auto directed to cluster lists. The status of this cluster will changed to Deleting . It may take a while to delete/remove a cluster.

                                                                                        "},{"location":"en/end-user/kpanda/clusters/integrate-cluster.html","title":"Integrate Clusters","text":"

                                                                                        With the features of integrating clusters, AI platform allows you to manage on-premise and cloud clusters of various providers in a unified manner. This is quite important in avoiding the risk of being locked in by a certain providers, helping enterprises safely migrate their business to the cloud.

                                                                                        In AI platform Container Management module, you can integrate a cluster of the following providers: standard Kubernetes clusters, Redhat Openshift, SUSE Rancher, VMware Tanzu, Amazon EKS, Aliyun ACK, Huawei CCE, Tencent TKE, etc.

                                                                                        "},{"location":"en/end-user/kpanda/clusters/integrate-cluster.html#prerequisites","title":"Prerequisites","text":"
                                                                                        • Prepare a cluster of K8s v1.22+ and ensure its network connectivity.
                                                                                        • The operator should have the NS Editor or higher permissions.
                                                                                        "},{"location":"en/end-user/kpanda/clusters/integrate-cluster.html#steps","title":"Steps","text":"
                                                                                        1. Enter Container Management module, and click Integrate Cluster in the upper right corner.

                                                                                        2. Fill in the basic information by referring to the following instructions.

                                                                                          • Cluster Name: It should be unique and cannot be changed after the integration. Maximum 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number.
                                                                                          • Cluster Alias: Enter any characters, no more than 60 characters.
                                                                                          • Release Distribution: the cluster provider, support mainstream vendors listed at the beginning.
                                                                                        3. Fill in the KubeConfig of the target cluster and click Verify Config . The cluster can be successfully connected only after the verification is passed.

                                                                                          Click How do I get the KubeConfig? to see the specific steps for getting this file.

                                                                                        4. Confirm that all parameters are filled in correctly and click OK in the lower right corner of the page.

                                                                                        Note

                                                                                        The status of the newly integrated cluster is Integrating , which will become Running after the integration succeeds.

                                                                                        "},{"location":"en/end-user/kpanda/clusters/integrate-rancher-cluster.html","title":"Integrate the Rancher Cluster","text":"

                                                                                        This page explains how to integrate a Rancher cluster.

                                                                                        "},{"location":"en/end-user/kpanda/clusters/integrate-rancher-cluster.html#prerequisites","title":"Prerequisites","text":"
                                                                                        • Prepare a Rancher cluster with administrator privileges and ensure network connectivity between the container management cluster and the target cluster.
                                                                                        • Be equipped with permissions not lower than kpanda owner.
                                                                                        "},{"location":"en/end-user/kpanda/clusters/integrate-rancher-cluster.html#steps","title":"Steps","text":""},{"location":"en/end-user/kpanda/clusters/integrate-rancher-cluster.html#step-1-create-a-serviceaccount-user-with-administrator-privileges-in-the-rancher-cluster","title":"Step 1: Create a ServiceAccount user with administrator privileges in the Rancher cluster","text":"
                                                                                        1. Log in to the Rancher cluster with a role that has administrator privileges, and create a file named sa.yaml using the terminal.

                                                                                          vi sa.yaml\n

                                                                                          Press the i key to enter insert mode, then copy and paste the following content:

                                                                                          sa.yaml
                                                                                          apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: rancher-rke\n  rules:\n  - apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n  - nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: rancher-rke\n  roleRef:\n    apiGroup: rbac.authorization.k8s.io\n    kind: ClusterRole\n    name: rancher-rke\n  subjects:\n  - kind: ServiceAccount\n    name: rancher-rke\n    namespace: kube-system\n---\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: rancher-rke\n  namespace: kube-system\n

                                                                                          Press the Esc key to exit insert mode, then type :wq to save and exit.

                                                                                        2. Run the following command in the current directory to create a ServiceAccount named rancher-rke (referred to as SA for short):

                                                                                          kubectl apply -f sa.yaml\n

                                                                                          The expected output is as follows:

                                                                                          clusterrole.rbac.authorization.k8s.io/rancher-rke created\nclusterrolebinding.rbac.authorization.k8s.io/rancher-rke created\nserviceaccount/rancher-rke created\n
                                                                                        3. Create a secret named rancher-rke-secret and bind the secret to the rancher-rke SA.

                                                                                          kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: rancher-rke-secret\n  namespace: kube-system\n  annotations:\n    kubernetes.io/service-account.name: rancher-rke\n  type: kubernetes.io/service-account-token\nEOF\n

                                                                                          The output is expected to be:

                                                                                          secret/rancher-rke-secret created\n

                                                                                          Note

                                                                                          If your cluster version is lower than 1.24, please ignore this step and proceed to the next one.

                                                                                        4. Check secret for rancher-rke SA:

                                                                                          kubectl -n kube-system get secret | grep rancher-rke | awk '{print $1}'\n

                                                                                          The output is expected to be:

                                                                                          rancher-rke-secret\n

                                                                                          Check the rancher-rke-secret secret:

                                                                                          kubectl -n kube-system describe secret rancher-rke-secret\n

                                                                                          The output is expected to be:

                                                                                          Name:         rancher-rke-secret\nNamespace:    kube-system\nLabels:       <none>\nAnnotations:  kubernetes.io/service-account.name: rancher-rke\n            kubernetes.io/service-account.uid: d83df5d9-bd7d-488d-a046-b740618a0174\n\nType:  kubernetes.io/service-account-token\n\nData\n====\nca.crt:     570 bytes\nnamespace:  11 bytes\ntoken:      eyJhbGciOiJSUzI1NiIsImtpZCI6IjUtNE9nUWZLRzVpbEJORkZaNmtCQXhqVzRsZHU4MHhHcDBfb0VCaUo0V1kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJyYW5jaGVyLXJrZS1zZWNyZXQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoicmFuY2hlci1ya2UiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkODNkZjVkOS1iZDdkLTQ4OGQtYTA0Ni1iNzQwNjE4YTAxNzQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06cmFuY2hlci1ya2UifQ.VNsMtPEFOdDDeGt_8VHblcMRvjOwPXMM-79o9UooHx6q-VkHOcIOp3FOT2hnEdNnIsyODZVKCpEdCgyozX-3y5x2cZSZpocnkMcBbQm-qfTyUcUhAY7N5gcYUtHUhvRAsNWJcsDCn6d96gT_qo-ddo_cT8Ri39Lc123FDYOnYG-YGFKSgRQVy7Vyv34HIajZCCjZzy7i--eE_7o4DXeTjNqAFMFstUxxHBOXI3Rdn1zKQKqh5Jhg4ES7X-edSviSUfJUX-QV_LlAw5DuAyGPH7bDH4QaQ5k-p6cIctmpWZE-9wRDlKA4LYRblKE7MJcI6OmM4ldlMM0Jc8N-gCtl4w\n
                                                                                        "},{"location":"en/end-user/kpanda/clusters/integrate-rancher-cluster.html#step-2-update-kubeconfig-with-the-rancher-rke-sa-authentication-on-your-local-machine","title":"Step 2: Update kubeconfig with the rancher-rke SA authentication on your local machine","text":"

                                                                                        Perform the following steps on any local node where kubelet is installed:

                                                                                        1. Configure kubelet token.

                                                                                          kubectl config set-credentials rancher-rke --token= __rancher-rke-secret__ # token \u4fe1\u606f\n

                                                                                          For example,

                                                                                          kubectl config set-credentials eks-admin --token=eyJhbGciOiJSUzI1NiIsImtpZCI6IjUtNE9nUWZLRzVpbEJORkZaNmtCQXhqVzRsZHU4MHhHcDBfb0VCaUo0V1kifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJyYW5jaGVyLXJrZS1zZWNyZXQiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoicmFuY2hlci1ya2UiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJkODNkZjVkOS1iZDdkLTQ4OGQtYTA0Ni1iNzQwNjE4YTAxNzQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06cmFuY2hlci1ya2UifQ.VNsMtPEFOdDDeGt_8VHblcMRvjOwPXMM-79o9UooHx6q-VkHOcIOp3FOT2hnEdNnIsyODZVKCpEdCgyozX-3y5x2cZSZpocnkMcBbQm-qfTyUcUhAY7N5gcYUtHUhvRAsNWJcsDCn6d96gT_qo-ddo_cT8Ri39Lc123FDYOnYG-YGFKSgRQVy7Vyv34HIajZCCjZzy7i--eE_7o4DXeTjNqAFMFstUxxHBOXI3Rdn1zKQKqh5Jhg4ES7X-edSviSUfJUX-QV_LlAw5DuAyGPH7bDH4QaQ5k-p6cIctmpWZE-9wRDlKA4LYRblKE7MJcI6OmM4ldlMM0Jc8N-gCtl4w\n
                                                                                        2. Configure the kubelet APIServer information.

                                                                                          kubectl config set-cluster {cluster-name} --insecure-skip-tls-verify=true --server={APIServer}\n
                                                                                          • {cluster-name} : the name of your Rancher cluster.
                                                                                          • {APIServer} : the access address of the cluster, usually refering to the IP address of the control node + port \"6443\", such as https://10.X.X.X:6443 .

                                                                                          For example,

                                                                                          kubectl config set-cluster rancher-rke --insecure-skip-tls-verify=true --server=https://10.X.X.X:6443\n
                                                                                        3. Configure the kubelet context.

                                                                                          kubectl config set-context {context-name} --cluster={cluster-name} --user={SA-usename}\n

                                                                                          For example,

                                                                                          kubectl config set-context rancher-rke-context --cluster=rancher-rke --user=rancher-rke\n
                                                                                        4. Specify the newly created context rancher-rke-context in kubelet.

                                                                                          kubectl config use-context rancher-rke-context\n
                                                                                        5. Fetch the kubeconfig information for the context rancher-rke-context .

                                                                                          kubectl config view --minify --flatten --raw\n

                                                                                          The output is expected to be:

                                                                                          ```yaml apiVersion: v1 clusters: - cluster: insecure-skip-tls-verify: true server: https://77C321BCF072682C70C8665ED4BFA10D.gr7.ap-southeast-1.eks.amazonaws.com name: joincluster contexts: - context: cluster: joincluster user: eks-admin name: ekscontext current-context: ekscontext kind: Config preferences: {} users: - name: eks-admin user: token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImcxTjJwNkktWm5IbmRJU1RFRExvdWY1TGFWVUtGQ3VIejFtNlFQcUNFalEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2V

                                                                                        "},{"location":"en/end-user/kpanda/clusters/integrate-rancher-cluster.html#step-3-connect-the-cluster-in-the-suanova-interface","title":"Step 3: Connect the cluster in the Suanova Interface","text":"

                                                                                        Using the kubeconfig file fetched earlier, refer to the Integrate Cluster documentation to integrate the Rancher cluster to the global cluster.

                                                                                        "},{"location":"en/end-user/kpanda/clusters/runtime.html","title":"How to choose the container runtime","text":"

                                                                                        The container runtime is an important component in kubernetes to manage the life cycle of containers and container images. Kubernetes made containerd the default container runtime in version 1.19, and removed support for the Dockershim component in version 1.24.

                                                                                        Therefore, compared to the Docker runtime, we recommend you to use the lightweight containerd as your container runtime, because this has become the current mainstream runtime choice.

                                                                                        In addition, some operating system distribution vendors are not friendly enough for Docker runtime compatibility. The runtime support of different operating systems is as follows:

                                                                                        "},{"location":"en/end-user/kpanda/clusters/runtime.html#operating-systems-and-supported-runtimes","title":"Operating systems and supported runtimes","text":"Operating System Supported containerd Versions Supported Docker Versions CentOS 1.5.5, 1.5.7, 1.5.8, 1.5.9, 1.5.10, 1.5.11, 1.5.12, 1.5.13, 1.6.0, 1.6.1, 1.6.2, 1.6.3, 1.6.4, 1.6.5, 1.6.6, 1.6.7, 1.6.8, 1.6.9, 1.6.10, 1.6.11, 1.6.12, 1.6.13, 1.6.14, 1.6.15 (default) 18.09, 19.03, 20.10 (default) RedHatOS 1.5.5, 1.5.7, 1.5.8, 1.5.9, 1.5.10, 1.5.11, 1.5.12, 1.5.13, 1.6.0, 1.6.1, 1.6.2, 1.6.3, 1.6.4, 1.6.5, 1.6.6, 1.6.7, 1.6.8, 1.6.9, 1.6.10, 1.6.11, 1.6.12, 1.6.13, 1.6.14, 1.6.15 (default) 18.09, 19.03, 20.10 (default) KylinOS 1.5.5, 1.5.7, 1.5.8, 1.5.9, 1.5.10, 1.5.11, 1.5.12, 1.5.13, 1.6.0, 1.6.1, 1.6.2, 1.6.3, 1.6.4, 1.6.5, 1.6.6, 1.6.7, 1.6.8, 1.6.9, 1.6.10, 1.6.11, 1.6.12, 1.6.13, 1.6.14, 1.6.15 (default) 19.03 (Only supported by ARM architecture, Docker is not supported as a runtime under x86 architecture)

                                                                                        Note

                                                                                        In the offline installation mode, you need to prepare the runtime offline package of the relevant operating system in advance.

                                                                                        "},{"location":"en/end-user/kpanda/clusters/upgrade-cluster.html","title":"Cluster Upgrade","text":"

                                                                                        The Kubernetes Community packages a small version every quarter, and the maintenance cycle of each version is only about 9 months. Some major bugs or security holes will not be updated after the version stops maintenance. Manually upgrading cluster operations is cumbersome and places a huge workload on administrators.

                                                                                        In Suanova, you can upgrade the Kubernetes cluster with one click through the web UI interface.

                                                                                        Danger

                                                                                        After the version is upgraded, it will not be possible to roll back to the previous version, please proceed with caution.

                                                                                        Note

                                                                                        • Kubernetes versions are denoted as x.y.z , where x is the major version, y is the minor version, and z is the patch version.
                                                                                        • Cluster upgrades across minor versions are not allowed, e.g. a direct upgrade from 1.23 to 1.25 is not possible.
                                                                                        • **Access clusters do not support version upgrades. If there is no \"cluster upgrade\" in the left navigation bar, please check whether the cluster is an access cluster. **
                                                                                        • The global service cluster can only be upgraded through the terminal.
                                                                                        • When upgrading a worker cluster, the Management Cluster of the worker cluster should have been connected to the container management module and be running normally.
                                                                                        1. Click the name of the target cluster in the cluster list.

                                                                                        2. Then click Cluster Operation and Maintenance -> Cluster Upgrade in the left navigation bar, and click Version Upgrade in the upper right corner of the page.

                                                                                        3. Select the version that can be upgraded, and enter the cluster name to confirm.

                                                                                        4. After clicking OK , you can see the upgrade progress of the cluster.

                                                                                        5. The cluster upgrade is expected to take 30 minutes. You can click the Real-time Log button to view the detailed log of the cluster upgrade.

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/create-configmap.html","title":"Create ConfigMaps","text":"

                                                                                        ConfigMaps store non-confidential data in the form of key-value pairs to achieve the effect of mutual decoupling of configuration data and application code. ConfigMaps can be used as environment variables for containers, command-line parameters, or configuration files in storage volumes.

                                                                                        Note

                                                                                        • The data saved in ConfigMaps cannot exceed 1 MiB. If you need to store larger volumes of data, it is recommended to mount a storage volume or use an independent database or file service.

                                                                                        • ConfigMaps do not provide confidentiality or encryption. If you want to store encrypted data, it is recommended to use secret, or other third-party tools to ensure the privacy of data.

                                                                                        You can create ConfigMaps with two methods:

                                                                                        • Graphical form creation
                                                                                        • YAML creation
                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/create-configmap.html#prerequisites","title":"Prerequisites","text":"
                                                                                        • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                                                                        • Created a namespace, user, and authorized the user as NS Editor. For details, refer to Namespace Authorization.

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/create-configmap.html#graphical-form-creation","title":"Graphical form creation","text":"
                                                                                        1. Click the name of a cluster on the Clusters page to enter Cluster Details .

                                                                                        2. In the left navigation bar, click ConfigMap and Secret -> ConfigMap , and click the Create ConfigMap button in the upper right corner.

                                                                                        3. Fill in the configuration information on the Create ConfigMap page, and click OK .

                                                                                          !!! note

                                                                                           Click __Upload File__ to import an existing file locally to quickly create ConfigMaps.\n
                                                                                        4. After the creation is complete, click More on the right side of the ConfigMap to edit YAML, update, export, delete and other operations.

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/create-configmap.html#yaml-creation","title":"YAML creation","text":"
                                                                                        1. Click the name of a cluster on the Clusters page to enter Cluster Details .

                                                                                        2. In the left navigation bar, click ConfigMap and Secret -> ConfigMap , and click the YAML Create button in the upper right corner.

                                                                                        3. Fill in or paste the configuration file prepared in advance, and then click OK in the lower right corner of the pop-up box.

                                                                                          !!! note

                                                                                           - Click __Import__ to import an existing file locally to quickly create ConfigMaps.\n - After filling in the data, click __Download__ to save the configuration file locally.\n
                                                                                        4. After the creation is complete, click More on the right side of the ConfigMap to edit YAML, update, export, delete and other operations.

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/create-configmap.html#configmap-yaml-example","title":"ConfigMap YAML example","text":"
                                                                                         ```yaml\n kind: ConfigMap\n apiVersion: v1\n metadata:\n   name: kube-root-ca.crt\n   namespace: default\n   annotations:\n data:\n   version: '1.0'\n ```\n

                                                                                        Next step: Use ConfigMaps

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/create-secret.html","title":"Create Secret","text":"

                                                                                        A secret is a resource object used to store and manage sensitive information such as passwords, OAuth tokens, SSH, TLS credentials, etc. Using keys means you don't need to include sensitive secrets in your application code.

                                                                                        Secrets can be used in some cases:

                                                                                        • Used as an environment variable of the container to provide some necessary information required during the running of the container.
                                                                                        • Use secrets as pod data volumes.
                                                                                        • As the identity authentication credential for the container registry when the kubelet pulls the container image.

                                                                                        You can create ConfigMaps with two methods:

                                                                                        • Graphical form creation
                                                                                        • YAML creation
                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/create-secret.html#prerequisites","title":"Prerequisites","text":"
                                                                                        • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster

                                                                                        • Created a namespace, user, and authorized the user as NS Editor. For details, refer to Namespace Authorization.

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/create-secret.html#create-secret-with-wizard","title":"Create secret with wizard","text":"
                                                                                        1. Click the name of a cluster on the Clusters page to enter Cluster Details .

                                                                                        2. In the left navigation bar, click ConfigMap and Secret -> Secret , and click the Create Secret button in the upper right corner.

                                                                                        3. Fill in the configuration information on the Create Secret page, and click OK .

                                                                                          Note when filling in the configuration:

                                                                                          • The name of the key must be unique within the same namespace
                                                                                          • Key type:
                                                                                            • Default (Opaque): Kubernetes default key type, which supports arbitrary data defined by users.
                                                                                            • TLS (kubernetes.io/tls): credentials for TLS client or server data access.
                                                                                            • Container registry information (kubernetes.io/dockerconfigjson): Credentials for Container registry access.
                                                                                            • username and password (kubernetes.io/basic-auth): Credentials for basic authentication.
                                                                                            • Custom: the type customized by the user according to business needs.
                                                                                          • Key data: the data stored in the key, the parameters that need to be filled in are different for different data
                                                                                            • When the key type is default (Opaque)/custom: multiple key-value pairs can be filled in.
                                                                                            • When the key type is TLS (kubernetes.io/tls): you need to fill in the certificate certificate and private key data. Certificates are self-signed or CA-signed credentials used for authentication. A certificate request is a request for a signature and needs to be signed with a private key.
                                                                                            • When the key type is container registry information (kubernetes.io/dockerconfigjson): you need to fill in the account and password of the private container registry.
                                                                                            • When the key type is username and password (kubernetes.io/basic-auth): Username and password need to be specified.
                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/create-secret.html#yaml-creation","title":"YAML creation","text":"
                                                                                        1. Click the name of a cluster on the Clusters page to enter Cluster Details .

                                                                                        2. In the left navigation bar, click ConfigMap and Secret -> Secret , and click the YAML Create button in the upper right corner.

                                                                                        3. Fill in the YAML configuration on the Create with YAML page, and click OK .

                                                                                          Supports importing YAML files from local or downloading and saving filled files to local.

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/create-secret.html#key-yaml-example","title":"key YAML example","text":"
                                                                                         ```yaml\n apiVersion: v1\n kind: Secret\n metadata:\n   name: secretdemo\n type: Opaque\n data:\n   username: ****\n   password: ****\n ```\n

                                                                                        Next step: use secret

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/use-configmap.html","title":"Use ConfigMaps","text":"

                                                                                        ConfigMap (ConfigMap) is an API object of Kubernetes, which is used to save non-confidential data into key-value pairs, and can store configurations that other objects need to use. When used, the container can use it as an environment variable, a command-line argument, or a configuration file in a storage volume. By using ConfigMaps, configuration data and application code can be separated, providing a more flexible way to modify application configuration.

                                                                                        Note

                                                                                        ConfigMaps do not provide confidentiality or encryption. If the data to be stored is confidential, please use secret, or use other third-party tools to ensure the privacy of the data instead of ConfigMaps. In addition, when using ConfigMaps in containers, the container and ConfigMaps must be in the same cluster namespace.

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/use-configmap.html#scenes-to-be-used","title":"scenes to be used","text":"

                                                                                        You can use ConfigMaps in Pods. There are many use cases, mainly including:

                                                                                        • Use ConfigMaps to set the environment variables of the container

                                                                                        • Use ConfigMaps to set the command line parameters of the container

                                                                                        • Use ConfigMaps as container data volumes

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/use-configmap.html#set-the-environment-variables-of-the-container","title":"Set the environment variables of the container","text":"

                                                                                        You can use the ConfigMap as the environment variable of the container through the graphical interface or the terminal command line.

                                                                                        Note

                                                                                        The ConfigMap import is to use the ConfigMap as the value of the environment variable; the ConfigMap key value import is to use a certain parameter in the ConfigMap as the value of the environment variable.

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/use-configmap.html#graphical-interface-operation","title":"Graphical interface operation","text":"

                                                                                        When creating a workload through an image, you can set environment variables for the container by selecting Import ConfigMaps or Import ConfigMap Key Values on the Environment Variables interface.

                                                                                        1. Go to the Image Creation Workload page, in the Container Configuration step, select the Environment Variables configuration, and click the Add Environment Variable button.

                                                                                        2. Select ConfigMap Import or ConfigMap Key Value Import in the environment variable type.

                                                                                          • When the environment variable type is selected as ConfigMap import , enter variable name , prefix name, ConfigMap name in sequence.

                                                                                          • When the environment variable type is selected as ConfigMap key-value import , enter variable name , ConfigMap name, and Secret name in sequence.

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/use-configmap.html#command-line-operation","title":"Command line operation","text":"

                                                                                        You can set ConfigMaps as environment variables when creating a workload, using the valueFrom parameter to refer to the Key/Value in the ConfigMap.

                                                                                        apiVersion: v1\nkind: Pod\nmetadata:\n   name: configmap-pod-1\nspec:\n   containers:\n     - name: test-container\n       image: busybox\n       command: [ \"/bin/sh\", \"-c\", \"env\" ]\n       env:\n         - name: SPECIAL_LEVEL_KEY\n           valueFrom: # (1)\n             configMapKeyRef:\n               name: kpanda-configmap # (2)\n               key: SPECIAL_LEVEL # (3)\n   restartPolicy: Never\n
                                                                                        1. Use valueFrom to specify the value of the env reference ConfigMap
                                                                                        2. Referenced configuration file name
                                                                                        3. Referenced ConfigMap key
                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/use-configmap.html#set-the-command-line-parameters-of-the-container","title":"Set the command line parameters of the container","text":"

                                                                                        You can use ConfigMaps to set the command or parameter value in the container, and use the environment variable substitution syntax $(VAR_NAME) to do so. As follows.

                                                                                        apiVersion: v1\nkind: Pod\nmetadata:\n   name: configmap-pod-3\nspec:\n   containers:\n     - name: test-container\n       image: busybox\n       command: [ \"/bin/sh\", \"-c\", \"echo $(SPECIAL_LEVEL_KEY) $(SPECIAL_TYPE_KEY)\" ]\n       env:\n         - name: SPECIAL_LEVEL_KEY\n           valueFrom:\n             configMapKeyRef:\n               name: kpanda-configmap\n               key: SPECIAL_LEVEL\n         - name: SPECIAL_TYPE_KEY\n           valueFrom:\n             configMapKeyRef:\n               name: kpanda-configmap\n               key: SPECIAL_TYPE\n   restartPolicy: Never\n

                                                                                        After the Pod runs, the output is as follows.

                                                                                        Hello Kpanda\n
                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/use-configmap.html#used-as-container-data-volume","title":"Used as container data volume","text":"

                                                                                        You can use the ConfigMap as the environment variable of the container through the graphical interface or the terminal command line.

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/use-configmap.html#graphical-operation","title":"Graphical operation","text":"

                                                                                        When creating a workload through an image, you can use the ConfigMap as the data volume of the container by selecting the storage type as \"ConfigMap\" on the \"Data Storage\" interface.

                                                                                        1. Go to the Image Creation Workload page, in the Container Configuration step, select the Data Storage configuration, and click __Add in the __ Node Path Mapping __ list __ button.

                                                                                        2. Select ConfigMap in the storage type, and enter container path , subpath and other information in sequence.

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/use-configmap.html#command-line-operation_1","title":"Command line operation","text":"

                                                                                        To use a ConfigMap in a Pod's storage volume.

                                                                                        Here is an example Pod that mounts a ConfigMap as a volume:

                                                                                        apiVersion: v1\nkind: Pod\nmetadata:\n   name: mypod\nspec:\n   containers:\n   -name: mypod\n     image: redis\n     volumeMounts:\n     - name: foo\n       mountPath: \"/etc/foo\"\n       readOnly: true\n   volumes:\n   - name: foo\n     configMap:\n       name: myconfigmap\n

                                                                                        If there are multiple containers in a Pod, each container needs its own volumeMounts block, but you only need to set one spec.volumes block per ConfigMap.

                                                                                        Note

                                                                                        When a ConfigMap is used as a data volume mounted on a container, the ConfigMap can only be read as a read-only file.

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html","title":"use key","text":"

                                                                                        A secret is a resource object used to store and manage sensitive information such as passwords, OAuth tokens, SSH, TLS credentials, etc. Using keys means you don't need to include sensitive secrets in your application code.

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html#scenes-to-be-used","title":"scenes to be used","text":"

                                                                                        You can use keys in Pods in a variety of use cases, mainly including:

                                                                                        • Used as an environment variable of the container to provide some necessary information required during the running of the container.
                                                                                        • Use secrets as pod data volumes.
                                                                                        • Used as the identity authentication credential for the container registry when the kubelet pulls the container image.
                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html#use-the-key-to-set-the-environment-variable-of-the-container","title":"Use the key to set the environment variable of the container","text":"

                                                                                        You can use the key as the environment variable of the container through the GUI or the terminal command line.

                                                                                        Note

                                                                                        Key import is to use the key as the value of an environment variable; key key value import is to use a parameter in the key as the value of an environment variable.

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html#graphical-interface-operation","title":"Graphical interface operation","text":"

                                                                                        When creating a workload from an image, you can set environment variables for the container by selecting Key Import or Key Key Value Import on the Environment Variables interface.

                                                                                        1. Go to the Image Creation Workload page.

                                                                                        2. Select the Environment Variables configuration in Container Configuration , and click the Add Environment Variable button.

                                                                                        3. Select Key Import or Key Key Value Import in the environment variable type.

                                                                                          • When the environment variable type is selected as Key Import , enter Variable Name , Prefix , and Secret in sequence.

                                                                                          • When the environment variable type is selected as key key value import , enter variable name , Secret , Secret name in sequence.

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html#command-line-operation","title":"Command line operation","text":"

                                                                                        As shown in the example below, you can set the secret as an environment variable when creating the workload, using the valueFrom parameter to refer to the Key/Value in the Secret.

                                                                                        apiVersion: v1\nkind: Pod\nmetadata:\n   name: secret-env-pod\nspec:\n   containers:\n   -name: mycontainer\n     image: redis\n     env:\n       - name: SECRET_USERNAME\n         valueFrom:\n           secretKeyRef:\n             name: mysecret\n             key: username\n             optional: false # (1)\n       - name: SECRET_PASSWORD\n         valueFrom:\n           secretKeyRef:\n             name: mysecret\n             key: password\n             optional: false # (2)\n
                                                                                        1. This value is the default; means \"mysecret\", which must exist and contain a primary key named \"username\"
                                                                                        2. This value is the default; means \"mysecret\", which must exist and contain a primary key named \"password\"
                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html#use-the-key-as-the-pods-data-volume","title":"Use the key as the pod's data volume","text":""},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html#graphical-interface-operation_1","title":"Graphical interface operation","text":"

                                                                                        When creating a workload through an image, you can use the key as the data volume of the container by selecting the storage type as \"key\" on the \"data storage\" interface.

                                                                                        1. Go to the Image Creation Workload page.

                                                                                        2. In the Container Configuration , select the Data Storage configuration, and click the Add button in the Node Path Mapping list.

                                                                                        3. Select Secret in the storage type, and enter container path , subpath and other information in sequence.

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html#command-line-operation_1","title":"Command line operation","text":"

                                                                                        The following is an example of a Pod that mounts a Secret named mysecret via a data volume:

                                                                                        apiVersion: v1\nkind: Pod\nmetadata:\n   name: mypod\nspec:\n   containers:\n   -name: mypod\n     image: redis\n     volumeMounts:\n     - name: foo\n       mountPath: \"/etc/foo\"\n       readOnly: true\n   volumes:\n   - name: foo\n     secret:\n       secretName: mysecret\n       optional: false # (1)\n
                                                                                        1. Default setting, means \"mysecret\" must already exist

                                                                                        If the Pod contains multiple containers, each container needs its own volumeMounts block, but only one .spec.volumes setting is required for each Secret.

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html#used-as-the-identity-authentication-credential-for-the-container-registry-when-the-kubelet-pulls-the-container-image","title":"Used as the identity authentication credential for the container registry when the kubelet pulls the container image","text":"

                                                                                        You can use the key as the identity authentication credential for the Container registry through the GUI or the terminal command line.

                                                                                        "},{"location":"en/end-user/kpanda/configmaps-secrets/use-secret.html#graphical-operation","title":"Graphical operation","text":"

                                                                                        When creating a workload through an image, you can use the key as the data volume of the container by selecting the storage type as \"key\" on the \"data storage\" interface.

                                                                                        1. Go to the Image Creation Workload page.

                                                                                        2. In the second step of Container Configuration , select the Basic Information configuration, and click the Select Image button.

                                                                                        3. Select the name of the private container registry in the drop-down list of `container registry' in the pop-up box. Please see Create Secret for details on private image secret creation.

                                                                                        4. Enter the image name in the private registry, click OK to complete the image selection.

                                                                                        Note

                                                                                        When creating a key, you need to ensure that you enter the correct container registry address, username, password, and select the correct mirror name, otherwise you will not be able to obtain the mirror image in the container registry.

                                                                                        "},{"location":"en/end-user/kpanda/custom-resources/create.html","title":"CustomResourceDefinition (CRD)","text":"

                                                                                        In Kubernetes, all objects are abstracted as resources, such as Pod, Deployment, Service, Volume, etc. are the default resources provided by Kubernetes. This provides important support for our daily operation and maintenance and management work, but in some special cases, the existing preset resources cannot meet the needs of the business. Therefore, we hope to expand the capabilities of the Kubernetes API, and CustomResourceDefinition (CRD) was born based on this requirement.

                                                                                        The container management module supports interface-based management of custom resources, and its main features are as follows:

                                                                                        • Obtain the list and detailed information of custom resources under the cluster
                                                                                        • Create custom resources based on YAML
                                                                                        • Create a custom resource example CR (Custom Resource) based on YAML
                                                                                        • Delete custom resources
                                                                                        "},{"location":"en/end-user/kpanda/custom-resources/create.html#prerequisites","title":"Prerequisites","text":"
                                                                                        • Integrated the Kubernetes cluster or created Kubernetes, and you can access the cluster UI interface.

                                                                                        • Created a namespace, user, and authorized the user as Cluster Admin For details, refer to Namespace Authorization.

                                                                                        "},{"location":"en/end-user/kpanda/custom-resources/create.html#create-crd-via-yaml","title":"Create CRD via YAML","text":"
                                                                                        1. Click a cluster name to enter Cluster Details .

                                                                                        2. In the left navigation bar, click Custom Resource , and click the YAML Create button in the upper right corner.

                                                                                        3. On the Create with YAML page, fill in the YAML statement and click OK .

                                                                                        4. Return to the custom resource list page, and you can view the custom resource named crontabs.stable.example.com just created.

                                                                                        Custom resource example:

                                                                                        CRD example
                                                                                        apiVersion: apiextensions.k8s.io/v1\nkind: CustomResourceDefinition\nmetadata:\n  name: crontabs.stable.example.com\nspec:\n  group: stable.example.com\n  versions:\n    - name: v1\n      served: true\n      storage: true\n      schema:\n        openAPIV3Schema:\n          type: object\n          properties:\n            spec:\n              type: object\n              properties:\n                cronSpec:\n                  type: string\n                image:\n                  type: string\n                replicas:\n                  type: integer\n  scope: Namespaced\n  names:\n    plural: crontabs\n    singular: crontab\n    kind: CronTab\n    shortNames:\n    - ct\n
                                                                                        "},{"location":"en/end-user/kpanda/custom-resources/create.html#create-a-custom-resource-example-via-yaml","title":"Create a custom resource example via YAML","text":"
                                                                                        1. Click a cluster name to enter Cluster Details .

                                                                                        2. In the left navigation bar, click Custom Resource , and click the YAML Create button in the upper right corner.

                                                                                        3. Click the custom resource named crontabs.stable.example.com , enter the details, and click the YAML Create button in the upper right corner.

                                                                                        4. On the Create with YAML page, fill in the YAML statement and click OK .

                                                                                        5. Return to the details page of crontabs.stable.example.com , and you can view the custom resource named my-new-cron-object just created.

                                                                                        CR Example:

                                                                                        CR example
                                                                                        apiVersion: \"stable.example.com/v1\"\nkind: CronTab\nmetadata:\n  name: my-new-cron-object\nspec:\n  cronSpec: \"* * * * */5\"\n  image: my-awesome-cron-image\n
                                                                                        "},{"location":"en/end-user/kpanda/gpu/index.html","title":"Overview of GPU Management","text":"

                                                                                        This article introduces the capability of Suanova container management platform in unified operations and management of heterogeneous resources, with a focus on GPUs.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/index.html#background","title":"Background","text":"

                                                                                        With the rapid development of emerging technologies such as AI applications, large-scale models, artificial intelligence, and autonomous driving, enterprises are facing an increasing demand for compute-intensive tasks and data processing. Traditional compute architectures represented by CPUs can no longer meet the growing computational requirements of enterprises. At this point, heterogeneous computing represented by GPUs has been widely applied due to its unique advantages in processing large-scale data, performing complex calculations, and real-time graphics rendering.

                                                                                        Meanwhile, due to the lack of experience and professional solutions in scheduling and managing heterogeneous resources, the utilization efficiency of GPU devices is extremely low, resulting in high AI production costs for enterprises. The challenge of reducing costs, increasing efficiency, and improving the utilization of GPUs and other heterogeneous resources has become a pressing issue for many enterprises.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/index.html#introduction-to-gpu-capabilities","title":"Introduction to GPU Capabilities","text":"

                                                                                        The Suanova container management platform supports unified scheduling and operations management of GPUs, NPUs, and other heterogeneous resources, fully unleashing the computational power of GPU resources, and accelerating the development of enterprise AI and other emerging applications. The GPU management capabilities of Suanova are as follows:

                                                                                        • Support for unified management of heterogeneous computing resources from domestic and foreign manufacturers such as NVIDIA, Huawei Ascend, and Iluvatar.
                                                                                        • Support for multi-card heterogeneous scheduling within the same cluster, with automatic recognition of GPUs in the cluster.
                                                                                        • Support for native management solutions for NVIDIA GPUs, vGPUs, and MIG, with cloud native capabilities.
                                                                                        • Support for partitioning a single physical card for use by different tenants, and allocate GPU resources to tenants and containers based on computing power and memory quotas.
                                                                                        • Support for multi-dimensional GPU resource monitoring at the cluster, node, and application levels, assisting operators in managing GPU resources.
                                                                                        • Compatibility with various training frameworks such as TensorFlow and PyTorch.
                                                                                        "},{"location":"en/end-user/kpanda/gpu/index.html#introduction-to-gpu-operator","title":"Introduction to GPU Operator","text":"

                                                                                        Similar to regular computer hardware, NVIDIA GPUs, as physical devices, need to have the NVIDIA GPU driver installed in order to be used. To reduce the cost of using GPUs on Kubernetes, NVIDIA provides the NVIDIA GPU Operator component to manage various components required for using NVIDIA GPUs. These components include the NVIDIA driver (for enabling CUDA), NVIDIA container runtime, GPU node labeling, DCGM-based monitoring, and more. In theory, users only need to plug the GPU into a compute device managed by Kubernetes, and they can use all the capabilities of NVIDIA GPUs through the GPU Operator. For more information about NVIDIA GPU Operator, refer to the NVIDIA official documentation. For deployment instructions, refer to Offline Installation of GPU Operator.

                                                                                        Architecture diagram of NVIDIA GPU Operator:

                                                                                        "},{"location":"en/end-user/kpanda/gpu/FAQ.html","title":"GPU FAQs","text":""},{"location":"en/end-user/kpanda/gpu/FAQ.html#gpu-processes-are-not-visible-while-running-nvidia-smi-inside-a-pod","title":"GPU processes are not visible while running nvidia-smi inside a pod","text":"

                                                                                        Q: When running the nvidia-smi command inside a GPU-utilizing pod, no GPU process information is visible in the full-card mode and vGPU mode.

                                                                                        A: Due to PID namespace isolation, GPU processes are not visible inside the Pod. To view GPU processes, you can use one of the following methods:

                                                                                        • Configure the workload using the GPU with hostPID: true to enable viewing PIDs on the host.
                                                                                        • Run the nvidia-smi command in the driver pod of the gpu-operator to view processes.
                                                                                        • Run the chroot /run/nvidia/driver nvidia-smi command on the host to view processes.
                                                                                        "},{"location":"en/end-user/kpanda/gpu/Iluvatar_usage.html","title":"How to Use Iluvatar GPU in Applications","text":"

                                                                                        This section describes how to use Iluvatar virtual GPU on AI platform.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/Iluvatar_usage.html#prerequisites","title":"Prerequisites","text":"
                                                                                        • Deployed AI platform container management platform and it is running smoothly.
                                                                                        • The container management module has been integrated with a Kubernetes cluster or a Kubernetes cluster has been created, and the UI interface of the cluster can be accessed.
                                                                                        • The Iluvatar GPU driver has been installed on the current cluster. Refer to the Iluvatar official documentation for driver installation instructions, or contact the Suanova ecosystem team for enterprise-level support at peg-pem@daocloud.io.
                                                                                        • The GPUs in the current cluster have not undergone any virtualization operations and not been occupied by other applications.
                                                                                        "},{"location":"en/end-user/kpanda/gpu/Iluvatar_usage.html#procedure","title":"Procedure","text":""},{"location":"en/end-user/kpanda/gpu/Iluvatar_usage.html#configuration-via-user-interface","title":"Configuration via User Interface","text":"
                                                                                        1. Check if the GPU in the cluster has been detected. Click Clusters -> Cluster Settings -> Addon Plugins , and check if the proper GPU type has been automatically enabled and detected. Currently, the cluster will automatically enable GPU and set the GPU type as Iluvatar .

                                                                                        2. Deploy a workload. Click Clusters -> Workloads and deploy a workload using the image. After selecting the type as (Iluvatar) , configure the GPU resources used by the application:

                                                                                          • Physical Card Count (iluvatar.ai/vcuda-core): Indicates the number of physical cards that the current pod needs to mount. The input value must be an integer and less than or equal to the number of cards on the host machine.

                                                                                          • Memory Usage (iluvatar.ai/vcuda-memory): Indicates the amount of GPU memory occupied by each card. The value is in MB, with a minimum value of 1 and a maximum value equal to the entire memory of the card.

                                                                                          If there are any issues with the configuration values, scheduling failures or resource allocation failures may occur.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/Iluvatar_usage.html#configuration-via-yaml","title":"Configuration via YAML","text":"

                                                                                        To request GPU resources for a workload, add the iluvatar.ai/vcuda-core: 1 and iluvatar.ai/vcuda-memory: 200 to the requests and limits. These parameters configure the application to use the physical card resources.

                                                                                        apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-iluvatar-gpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-iluvatar-gpu-demo\n  template:\n    metadata:\n      labels:\n        app: full-iluvatar-gpu-demo\n    spec:\n      containers:\n      - image: nginx:perl\n        name: container-0\n        resources:\n          limits:\n            cpu: 250m\n            iluvatar.ai/vcuda-core: '1'\n            iluvatar.ai/vcuda-memory: '200'\n            memory: 512Mi\n          requests:\n            cpu: 250m\n            memory: 512Mi\n      imagePullSecrets:\n      - name: default-secret\n
                                                                                        "},{"location":"en/end-user/kpanda/gpu/dynamic-regulation.html","title":"GPU Scheduling Configuration (Binpack and Spread)","text":"

                                                                                        This page introduces how to reduce GPU resource fragmentation and prevent single points of failure through Binpack and Spread when using NVIDIA vGPU, achieving advanced scheduling for vGPU. The AI platform platform provides Binpack and Spread scheduling policies across two dimensions: clusters and workloads, meeting different usage requirements in various scenarios.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/dynamic-regulation.html#prerequisites","title":"Prerequisites","text":"
                                                                                        • GPU devices are correctly installed on the cluster nodes.
                                                                                        • The gpu-operator component and Nvidia-vgpu component are correctly installed in the cluster.
                                                                                        • The NVIDIA-vGPU type exists in the GPU mode in the node list in the cluster.
                                                                                        "},{"location":"en/end-user/kpanda/gpu/dynamic-regulation.html#use-cases","title":"Use Cases","text":"
                                                                                        • Scheduling policy based on GPU dimension

                                                                                          • Binpack: Prioritizes using the same GPU on a node, suitable for increasing GPU utilization and reducing resource fragmentation.
                                                                                          • Spread: Multiple Pods are distributed across different GPUs on nodes, suitable for high availability scenarios to avoid single card failures.
                                                                                        • Scheduling policy based on node dimension

                                                                                          • Binpack: Multiple Pods prioritize using the same node, suitable for increasing GPU utilization and reducing resource fragmentation.
                                                                                          • Spread: Multiple Pods are distributed across different nodes, suitable for high availability scenarios to avoid single node failures.
                                                                                        "},{"location":"en/end-user/kpanda/gpu/dynamic-regulation.html#use-binpack-and-spread-at-cluster-level","title":"Use Binpack and Spread at Cluster-Level","text":"

                                                                                        Note

                                                                                        By default, workloads will follow the cluster-level Binpack and Spread. If a workload sets its own Binpack and Spread scheduling policies that differ from the cluster, the workload will prioritize its own scheduling policy.

                                                                                        1. On the Clusters page, select the cluster for which you want to adjust the Binpack and Spread scheduling policies. Click the \u2507 icon on the right and select GPU Scheduling Configuration from the dropdown list.

                                                                                        2. Adjust the GPU scheduling configuration according to your business scenario, and click OK to save.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/dynamic-regulation.html#use-binpack-and-spread-at-workload-level","title":"Use Binpack and Spread at Workload-Level","text":"

                                                                                        Note

                                                                                        When the Binpack and Spread scheduling policies at the workload level conflict with the cluster-level configuration, the workload-level configuration takes precedence.

                                                                                        Follow the steps below to create a deployment using an image and configure Binpack and Spread scheduling policies within the workload.

                                                                                        1. Click Clusters in the left navigation bar, then click the name of the target cluster to enter the Cluster Details page.

                                                                                        2. On the Cluster Details page, click Workloads -> Deployments in the left navigation bar, then click the Create by Image button in the upper right corner of the page.

                                                                                        3. Sequentially fill in the Basic Information, Container Settings, and in the Container Configuration section, enable GPU configuration, selecting the GPU type as NVIDIA vGPU. Click Advanced Settings, enable the Binpack / Spread scheduling policy, and adjust the GPU scheduling configuration according to the business scenario. After configuration, click Next to proceed to Service Settings and Advanced Settings. Finally, click OK at the bottom right of the page to complete the creation.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/gpu-metrics.html","title":"GPU Metrics","text":"

                                                                                        This page lists some commonly used GPU metrics.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/gpu-metrics.html#cluster-level","title":"Cluster Level","text":"Metric Name Description Number of GPUs Total number of GPUs in the cluster Average GPU Utilization Average compute utilization of all GPUs in the cluster Average GPU Memory Utilization Average memory utilization of all GPUs in the cluster GPU Power Power consumption of all GPUs in the cluster GPU Temperature Temperature of all GPUs in the cluster GPU Utilization Details 24-hour usage details of all GPUs in the cluster (includes max, avg, current) GPU Memory Usage Details 24-hour memory usage details of all GPUs in the cluster (includes min, max, avg, current) GPU Memory Bandwidth Utilization For example, an Nvidia V100 GPU has a maximum memory bandwidth of 900 GB/sec. If the current memory bandwidth is 450 GB/sec, the utilization is 50%"},{"location":"en/end-user/kpanda/gpu/gpu-metrics.html#node-level","title":"Node Level","text":"Metric Name Description GPU Mode Usage mode of GPUs on the node, including full-card mode, MIG mode, vGPU mode Number of Physical GPUs Total number of physical GPUs on the node Number of Virtual GPUs Number of vGPU devices created on the node Number of MIG Instances Number of MIG instances created on the node GPU Memory Allocation Rate Memory allocation rate of all GPUs on the node Average GPU Utilization Average compute utilization of all GPUs on the node Average GPU Memory Utilization Average memory utilization of all GPUs on the node GPU Driver Version Driver version information of GPUs on the node GPU Utilization Details 24-hour usage details of each GPU on the node (includes max, avg, current) GPU Memory Usage Details 24-hour memory usage details of each GPU on the node (includes min, max, avg, current)"},{"location":"en/end-user/kpanda/gpu/gpu-metrics.html#pod-level","title":"Pod Level","text":"Category Metric Name Description Application Overview GPU - Compute & Memory Pod GPU Utilization Compute utilization of the GPUs used by the current Pod Pod GPU Memory Utilization Memory utilization of the GPUs used by the current Pod Pod GPU Memory Usage Memory usage of the GPUs used by the current Pod Memory Allocation Memory allocation of the GPUs used by the current Pod Pod GPU Memory Copy Ratio Memory copy ratio of the GPUs used by the current Pod GPU - Engine Overview GPU Graphics Engine Activity Percentage Percentage of time the Graphics or Compute engine is active during a monitoring cycle GPU Memory Bandwidth Utilization Memory bandwidth utilization (Memory BW Utilization) indicates the fraction of cycles during which data is sent to or received from the device memory. This value represents the average over the interval, not an instantaneous value. A higher value indicates higher utilization of device memory.A value of 1 (100%) indicates that a DRAM instruction is executed every cycle during the interval (in practice, a peak of about 0.8 (80%) is the maximum achievable).A value of 0.2 (20%) indicates that 20% of the cycles during the interval are spent reading from or writing to device memory. Tensor Core Utilization Percentage of time the Tensor Core pipeline is active during a monitoring cycle FP16 Engine Utilization Percentage of time the FP16 pipeline is active during a monitoring cycle FP32 Engine Utilization Percentage of time the FP32 pipeline is active during a monitoring cycle FP64 Engine Utilization Percentage of time the FP64 pipeline is active during a monitoring cycle GPU Decode Utilization Decode engine utilization of the GPU GPU Encode Utilization Encode engine utilization of the GPU GPU - Temperature & Power GPU Temperature Temperature of all GPUs in the cluster GPU Power Power consumption of all GPUs in the cluster GPU Total Power Consumption Total power consumption of the GPUs GPU - Clock GPU Memory Clock Memory clock frequency GPU Application SM Clock Application SM clock frequency GPU Application Memory Clock Application memory clock frequency GPU Video Engine Clock Video engine clock frequency GPU Throttle Reasons Reasons for GPU throttling GPU - Other Details PCIe Transfer Rate Data transfer rate of the GPU through the PCIe bus PCIe Receive Rate Data receive rate of the GPU through the PCIe bus"},{"location":"en/end-user/kpanda/gpu/gpu_matrix.html","title":"GPU Support Matrix","text":"

                                                                                        This page explains the matrix of supported GPUs and operating systems for AI platform.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/gpu_matrix.html#nvidia-gpu","title":"NVIDIA GPU","text":"GPU Manufacturer and Type Supported GPU Models Compatible Operating System (Online) Recommended Kernel Recommended Operating System and Kernel Installation Documentation NVIDIA GPU (Full Card/vGPU)
                                                                                        • NVIDIA Fermi (2.1) Architecture:
                                                                                        • NVIDIA GeForce 400 Series
                                                                                        • NVIDIA Quadro 4000 Series
                                                                                        • NVIDIA Tesla 20 Series
                                                                                        • NVIDIA Ampere Architecture Series (A100; A800; H100)
                                                                                        CentOS 7
                                                                                        • Kernel 3.10.0-123 ~ 3.10.0-1160
                                                                                        • Kernel Reference Document
                                                                                        • Recommended Operating System with Proper Kernel Version
                                                                                        Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 Offline Installation with GPU Operator CentOS 8 Kernel 4.18.0-80 ~ 4.18.0-348 Ubuntu 20.04 Kernel 5.4 Ubuntu 22.04 Kernel 5.19 RHEL 7 Kernel 3.10.0-123 ~ 3.10.0-1160 RHEL 8 Kernel 4.18.0-80 ~ 4.18.0-348 NVIDIA MIG
                                                                                        • Ampere Architecture Series:
                                                                                        • A100
                                                                                        • A800
                                                                                        • H100
                                                                                        CentOS 7 Kernel 3.10.0-123 ~ 3.10.0-1160 Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 Offline Installation with GPU Operator CentOS 8 Kernel 4.18.0-80 ~ 4.18.0-348 Ubuntu 20.04 Kernel 5.4 Ubuntu 22.04 Kernel 5.19 RHEL 7 Kernel 3.10.0-123 ~ 3.10.0-1160 RHEL 8 Kernel 4.18.0-80 ~ 4.18.0-348"},{"location":"en/end-user/kpanda/gpu/gpu_matrix.html#ascend-npu","title":"Ascend NPU","text":"GPU Manufacturer and Type Supported NPU Models Compatible Operating System (Online) Recommended Kernel Recommended Operating System and Kernel Installation Documentation Ascend (Ascend 310)
                                                                                        • Ascend 310;
                                                                                        • Ascend 310P;
                                                                                        Ubuntu 20.04 Details refer to: Kernel Version Requirements Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 300 and 310P Driver Documentation CentOS 7.6 CentOS 8.2 KylinV10SP1 Operating System openEuler Operating System Ascend (Ascend 910P) Ascend 910 Ubuntu 20.04 Details refer to: Kernel Version Requirements Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 910 Driver Documentation CentOS 7.6 CentOS 8.2 KylinV10SP1 Operating System openEuler Operating System"},{"location":"en/end-user/kpanda/gpu/gpu_matrix.html#iluvatar-gpu","title":"Iluvatar GPU","text":"GPU Manufacturer and Type Supported GPU Models Compatible Operating System (Online) Recommended Kernel Recommended Operating System and Kernel Installation Documentation Iluvatar (Iluvatar vGPU)
                                                                                        • BI100;
                                                                                        • MR100;
                                                                                        CentOS 7
                                                                                        • Kernel 3.10.0-957.el7.x86_64 ~ 3.10.0-1160.42.2.el7.x86_64
                                                                                        Operating System: CentOS 7.9; Kernel Version: 3.10.0-1160 Coming Soon CentOS 8
                                                                                        • Kernel 4.18.0-80.el8.x86_64 ~ 4.18.0-305.19.1.el8_4.x86_64
                                                                                        Ubuntu 20.04
                                                                                        • Kernel 4.15.0-20-generic ~ 4.15.0-160-generic
                                                                                        • Kernel 5.4.0-26-generic ~ 5.4.0-89-generic
                                                                                        • Kernel 5.8.0-23-generic ~ 5.8.0-63-generic
                                                                                        Ubuntu 21.04
                                                                                        • Kernel 4.15.0-20-generic ~ 4.15.0-160-generic
                                                                                        • Kernel 5.4.0-26-generic ~ 5.4.0-89-generic
                                                                                        • Kernel 5.8.0-23-generic ~ 5.8.0-63-generic
                                                                                        openEuler 22.03 LTS
                                                                                        • Kernel version >= 5.1 and <= 5.10
                                                                                        "},{"location":"en/end-user/kpanda/gpu/gpu_scheduler_config.html","title":"GPU Scheduling Configuration","text":"

                                                                                        This document mainly introduces the configuration of GPU scheduling, which can implement advanced scheduling policies. Currently, the primary implementation is the vgpu scheduling policy.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/gpu_scheduler_config.html#vgpu-resource-scheduling-configuration","title":"vGPU Resource Scheduling Configuration","text":"

                                                                                        vGPU provides two policies for resource usage: binpack and spread. These correspond to node-level and GPU-level dimensions, respectively. The use case is whether you want to distribute workloads more sparsely across different nodes and GPUs or concentrate them on the same node and GPU, thereby making resource utilization more efficient and reducing resource fragmentation.

                                                                                        You can modify the scheduling policy in your cluster by following these steps:

                                                                                        1. Go to the cluster management list in the container management interface.
                                                                                        2. Click the settings button ... next to the cluster.
                                                                                        3. Click GPU Scheduling Configuration.
                                                                                        4. Toggle the scheduling policy between node-level and GPU-level. By default, the node-level policy is binpack, and the GPU-level policy is spread.

                                                                                        The above steps modify the cluster-level scheduling policy. Users can also specify their own scheduling policy at the workload level to change the scheduling results. Below is an example of modifying the scheduling policy at the workload level:

                                                                                        apiVersion: v1\nkind: Pod\nmetadata:\n  name: gpu-pod\n  annotations:\n    hami.io/node-scheduler-policy: \"binpack\"\n    hami.io/gpu-scheduler-policy: \"binpack\"\nspec:\n  containers:\n    - name: ubuntu-container\n      image: ubuntu:18.04\n      command: [\"bash\", \"-c\", \"sleep 86400\"]\n      resources:\n        limits:\n          nvidia.com/gpu: 1\n          nvidia.com/gpumem: 3000\n          nvidia.com/gpucores: 30\n

                                                                                        In this example, both the node- and GPU-level scheduling policies are set to binpack. This ensures that the workload is scheduled to maximize resource utilization and reduce fragmentation.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/vgpu_quota.html","title":"GPU Quota Management","text":"

                                                                                        This section describes how to use vGPU capabilities on the AI platform platform.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/vgpu_quota.html#prerequisites","title":"Prerequisites","text":"

                                                                                        The proper GPU driver (NVIDIA GPU, NVIDIA MIG, Iluvatar, Ascend) has been deployed on the current cluster either through an Operator or manually.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/vgpu_quota.html#procedure","title":"Procedure","text":"

                                                                                        Follow these steps to manage GPU quotas in AI platform:

                                                                                        1. Go to Namespaces and click Quota Management to configure the GPU resources that can be used by a specific namespace.

                                                                                        2. The currently supported card types for quota management in a namespace are: NVIDIA vGPU, NVIDIA MIG, Iluvatar, and Ascend.

                                                                                        3. NVIDIA vGPU Quota Management: Configure the specific quota that can be used. This will create a ResourcesQuota CR.

                                                                                          - Physical Card Count (nvidia.com/vgpu): Indicates the number of physical cards that the current pod needs to mount. The input value must be an integer and **less than or equal to** the number of cards on the host machine.\n- GPU Core Count (nvidia.com/gpucores): Indicates the GPU compute power occupied by each card. The value ranges from 0 to 100. If configured as 0, it is considered not to enforce isolation. If configured as 100, it is considered to exclusively occupy the entire card.\n- GPU Memory Usage (nvidia.com/gpumem): Indicates the amount of GPU memory occupied by each card. The value is in MB, with a minimum value of 1 and a maximum value equal to the entire memory of the card.\n
                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/Ascend_usage.html","title":"Use Ascend NPU","text":"

                                                                                        This section explains how to use Ascend NPU on the AI platform platform.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/Ascend_usage.html#prerequisites","title":"Prerequisites","text":"
                                                                                        • The current NPU node has the Ascend driver installed.
                                                                                        • The current NPU node has the Ascend-Docker-Runtime component installed.
                                                                                        • The NPU MindX DL suite is installed on the current cluster.
                                                                                        • No virtualization is performed on the NPU card in the current cluster, and it is not occupied by other applications.

                                                                                        Refer to the Ascend NPU Component Installation Document to install the basic environment.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/Ascend_usage.html#quick-start","title":"Quick Start","text":"

                                                                                        This document uses the AscentCL Image Classification Application example from the Ascend sample library.

                                                                                        1. Download the Ascend repository

                                                                                          Run the following command to download the Ascend demo repository, and remember the storage location of the code for subsequent use.

                                                                                          git clone https://gitee.com/ascend/samples.git\n
                                                                                        2. Prepare the base image

                                                                                          This example uses the Ascent-pytorch base image, which can be obtained from the Ascend Container Registry.

                                                                                        3. Prepare the YAML file

                                                                                          ascend-demo.yaml
                                                                                          apiVersion: batch/v1\nkind: Job\nmetadata:\n  name: resnetinfer1-1-1usoc\nspec:\n  template:\n    spec:\n      containers:\n        - image: ascendhub.huawei.com/public-ascendhub/ascend-pytorch:23.0.RC2-ubuntu18.04 # Inference image name\n          imagePullPolicy: IfNotPresent\n          name: resnet50infer\n          securityContext:\n            runAsUser: 0\n          command:\n            - \"/bin/bash\"\n            - \"-c\"\n            - |\n              source /usr/local/Ascend/ascend-toolkit/set_env.sh &&\n              TEMP_DIR=/root/samples_copy_$(date '+%Y%m%d_%H%M%S_%N') &&\n              cp -r /root/samples \"$TEMP_DIR\" &&\n              cd \"$TEMP_DIR\"/inference/modelInference/sampleResnetQuickStart/python/model &&\n              wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/003_Atc_Models/resnet50/resnet50.onnx &&\n              atc --model=resnet50.onnx --framework=5 --output=resnet50 --input_shape=\"actual_input_1:1,3,224,224\"  --soc_version=Ascend910 &&\n              cd ../data &&\n              wget https://obs-9be7.obs.cn-east-2.myhuaweicloud.com/models/aclsample/dog1_1024_683.jpg &&\n              cd ../scripts &&\n              bash sample_run.sh\n          resources:\n            requests:\n              huawei.com/Ascend910: 1 # Number of the Ascend 910 Processors\n            limits:\n              huawei.com/Ascend910: 1 # The value should be the same as that of requests\n          volumeMounts:\n            - name: hiai-driver\n              mountPath: /usr/local/Ascend/driver\n              readOnly: true\n            - name: slog\n              mountPath: /var/log/npu/conf/slog/slog.conf\n            - name: localtime # The container time must be the same as the host time\n              mountPath: /etc/localtime\n            - name: dmp\n              mountPath: /var/dmp_daemon\n            - name: slogd\n              mountPath: /var/slogd\n            - name: hbasic\n              mountPath: /etc/hdcBasic.cfg\n            - name: sys-version\n              mountPath: /etc/sys_version.conf\n            - name: aicpu\n              mountPath: /usr/lib64/aicpu_kernels\n            - name: tfso\n              mountPath: /usr/lib64/libtensorflow.so\n            - name: sample-path\n              mountPath: /root/samples\n      volumes:\n        - name: hiai-driver\n          hostPath:\n            path: /usr/local/Ascend/driver\n        - name: slog\n          hostPath:\n            path: /var/log/npu/conf/slog/slog.conf\n        - name: localtime\n          hostPath:\n            path: /etc/localtime\n        - name: dmp\n          hostPath:\n            path: /var/dmp_daemon\n        - name: slogd\n          hostPath:\n            path: /var/slogd\n        - name: hbasic\n          hostPath:\n            path: /etc/hdcBasic.cfg\n        - name: sys-version\n          hostPath:\n            path: /etc/sys_version.conf\n        - name: aicpu\n          hostPath:\n            path: /usr/lib64/aicpu_kernels\n        - name: tfso\n          hostPath:\n            path: /usr/lib64/libtensorflow.so\n        - name: sample-path\n          hostPath:\n            path: /root/samples\n      restartPolicy: OnFailure\n

                                                                                          Some fields in the above YAML need to be modified according to the actual situation:

                                                                                          1. atc ... --soc_version=Ascend910 uses Ascend910, adjust this field depending on your actual situation. You can use the npu-smi info command to check the GPU model and add the Ascend prefix.
                                                                                          2. samples-path should be adjusted according to the actual situation.
                                                                                          3. resources should be adjusted according to the actual situation.
                                                                                        4. Deploy a Job and check its results

                                                                                          Use the following command to create a Job:

                                                                                          kubectl apply -f ascend-demo.yaml\n

                                                                                          Check the Pod running status:

                                                                                          After the Pod runs successfully, check the log results. The key prompt information on the screen is shown in the figure below. The Label indicates the category identifier, Conf indicates the maximum confidence of the classification, and Class indicates the belonging category. These values may vary depending on the version and environment, so please refer to the actual situation:

                                                                                          Result image display:

                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/Ascend_usage.html#ui-usage","title":"UI Usage","text":"
                                                                                        1. Confirm whether the cluster has detected the GPU. Click Clusters -> Cluster Settings -> Addon Plugins , and check whether the proper GPU type is automatically enabled and detected. Currently, the cluster will automatically enable GPU and set the GPU type to Ascend .

                                                                                        2. Deploy the workload. Click Clusters -> Workloads , deploy the workload through an image, select the type (Ascend), and then configure the number of physical cards used by the application:

                                                                                          Number of Physical Cards (huawei.com/Ascend910) : This indicates how many physical cards the current Pod needs to mount. The input value must be an integer and less than or equal to the number of cards on the host.

                                                                                          If there is an issue with the above configuration, it will result in scheduling failure and resource allocation issues.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html","title":"Installation of Ascend NPU Components","text":"

                                                                                        This chapter provides installation guidance for Ascend NPU drivers, Device Plugin, NPU-Exporter, and other components.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html#prerequisites","title":"Prerequisites","text":"
                                                                                        1. Before installation, confirm the supported NPU models. For details, refer to the Ascend NPU Matrix.
                                                                                        2. Ensure that the kernel version required for the proper NPU model is compatible. For more details, refer to the Ascend NPU Matrix.
                                                                                        3. Prepare the basic Kubernetes environment.
                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html#installation-steps","title":"Installation Steps","text":"

                                                                                        Before using NPU resources, you need to complete the firmware installation, NPU driver installation, Docker Runtime installation, user creation, log directory creation, and NPU Device Plugin installation. Refer to the following steps for details.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html#install-firmware","title":"Install Firmware","text":"
                                                                                        1. Confirm that the kernel version is within the range proper to the \"binary installation\" method, and then you can directly install the NPU driver firmware.
                                                                                        2. For firmware and driver downloads, refer to: Firmware Download Link
                                                                                        3. For firmware installation, refer to: Install NPU Driver Firmware
                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html#install-npu-driver","title":"Install NPU Driver","text":"
                                                                                        1. If the driver is not installed, refer to the official Ascend documentation for installation. For example, for Ascend910, refer to: 910 Driver Installation Document.
                                                                                        2. Run the command npu-smi info, and if the NPU information is returned normally, it indicates that the NPU driver and firmware are ready.
                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html#install-docker-runtime","title":"Install Docker Runtime","text":"
                                                                                        1. Download Ascend Docker Runtime

                                                                                          Community edition download link: https://www.hiascend.com/zh/software/mindx-dl/community

                                                                                          wget -c https://mindx.obs.cn-south-1.myhuaweicloud.com/OpenSource/MindX/MindX%205.0.RC2/MindX%20DL%205.0.RC2/Ascend-docker-runtime_5.0.RC2_linux-x86_64.run\n

                                                                                          Install to the specified path by executing the following two commands in order, with parameters specifying the installation path:

                                                                                          chmod u+x Ascend-docker-runtime_5.0.RC2_linux-x86_64.run \n./Ascend-docker-runtime_{version}_linux-{arch}.run --install --install-path=<path>\n
                                                                                        2. Modify the containerd configuration file

                                                                                          If containerd has no default configuration file, execute the following three commands in order to create the configuration file:

                                                                                          mkdir /etc/containerd \ncontainerd config default > /etc/containerd/config.toml \nvim /etc/containerd/config.toml\n

                                                                                          If containerd has a configuration file:

                                                                                          vim /etc/containerd/config.toml\n

                                                                                          Modify the runtime installation path according to the actual situation, mainly modifying the runtime field:

                                                                                          ... \n[plugins.\"io.containerd.monitor.v1.cgroups\"]\n   no_prometheus = false  \n[plugins.\"io.containerd.runtime.v1.linux\"]\n   shim = \"containerd-shim\"\n   runtime = \"/usr/local/Ascend/Ascend-Docker-Runtime/ascend-docker-runtime\"\n   runtime_root = \"\"\n   no_shim = false\n   shim_debug = false\n [plugins.\"io.containerd.runtime.v2.task\"]\n   platforms = [\"linux/amd64\"]\n...\n

                                                                                          Execute the following command to restart containerd:

                                                                                          systemctl restart containerd\n
                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html#create-a-user","title":"Create a User","text":"

                                                                                        Execute the following commands on the node where the components are installed to create a user.

                                                                                        # Ubuntu operating system\nuseradd -d /home/hwMindX -u 9000 -m -s /usr/sbin/nologin hwMindX\nusermod -a -G HwHiAiUser hwMindX\n# CentOS operating system\nuseradd -d /home/hwMindX -u 9000 -m -s /sbin/nologin hwMindX\nusermod -a -G HwHiAiUser hwMindX\n
                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html#create-log-directory","title":"Create Log Directory","text":"

                                                                                        Create the parent directory for component logs and the log directories for each component on the proper node, and set the appropriate owner and permissions for the directories. Execute the following command to create the parent directory for component logs.

                                                                                        mkdir -m 755 /var/log/mindx-dl\nchown root:root /var/log/mindx-dl\n

                                                                                        Execute the following command to create the Device Plugin component log directory.

                                                                                        mkdir -m 750 /var/log/mindx-dl/devicePlugin\nchown root:root /var/log/mindx-dl/devicePlugin\n

                                                                                        Note

                                                                                        Please create the proper log directory for each required component. In this example, only the Device Plugin component is needed. For other component requirements, refer to the official documentation

                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html#create-node-labels","title":"Create Node Labels","text":"

                                                                                        Refer to the following commands to create labels on the proper nodes:

                                                                                        # Create this label on computing nodes where the driver is installed\nkubectl label node {nodename} huawei.com.ascend/Driver=installed\nkubectl label node {nodename} node-role.kubernetes.io/worker=worker\nkubectl label node {nodename} workerselector=dls-worker-node\nkubectl label node {nodename} host-arch=huawei-arm // or host-arch=huawei-x86, select according to the actual situation\nkubectl label node {nodename} accelerator=huawei-Ascend910 // select according to the actual situation\n# Create this label on control nodes\nkubectl label node {nodename} masterselector=dls-master-node\n
                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/ascend_driver_install.html#install-device-plugin-and-npuexporter","title":"Install Device Plugin and NpuExporter","text":"

                                                                                        Functional module path: Container Management -> Cluster, click the name of the target cluster, then click Helm Apps -> Helm Charts from the left navigation bar, and search for ascend-mindxdl.

                                                                                        • DevicePlugin: Provides a general device plugin mechanism and standard device API interface for Kubernetes to use devices. It is recommended to use the default image and version.
                                                                                        • NpuExporter: Based on the Prometheus/Telegraf ecosystem, this component provides interfaces to help users monitor the Ascend series AI processors and container-level allocation status. It is recommended to use the default image and version.
                                                                                        • ServiceMonitor: Disabled by default. If enabled, you can view NPU-related monitoring in the observability module. To enable, ensure that the insight-agent is installed and running, otherwise, the ascend-mindxdl installation will fail.
                                                                                        • isVirtualMachine: Disabled by default. If the NPU node is a virtual machine scenario, enable the isVirtualMachine parameter.

                                                                                        After a successful installation, two components will appear under the proper namespace, as shown below:

                                                                                        At the same time, the proper NPU information will also appear on the node information:

                                                                                        Once everything is ready, you can select the proper NPU device when creating a workload through the page, as shown below:

                                                                                        Note

                                                                                        For detailed information of how to use, refer to Using Ascend (Ascend) NPU.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/vnpu.html","title":"Enable Ascend Virtualization","text":"

                                                                                        Ascend virtualization is divided into dynamic virtualization and static virtualization. This document describes how to enable and use Ascend static virtualization capabilities.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/vnpu.html#prerequisites","title":"Prerequisites","text":"
                                                                                        • Setup of Kubernetes cluster environment.
                                                                                        • The current NPU node has the Ascend driver installed.
                                                                                        • The current NPU node has the Ascend-Docker-Runtime component installed.
                                                                                        • The NPU MindX DL suite is installed on the current cluster.
                                                                                        • Supported NPU models:

                                                                                          • Ascend 310P, verified
                                                                                          • Ascend 910b (20 cores), verified
                                                                                          • Ascend 910 (32 cores), officially supported but not verified
                                                                                          • Ascend 910 (30 cores), officially supported but not verified

                                                                                          For more details, refer to the official virtualization hardware documentation.

                                                                                        Refer to the Ascend NPU Component Installation Documentation for the basic environment setup.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/vnpu.html#enable-virtualization-capabilities","title":"Enable Virtualization Capabilities","text":"

                                                                                        To enable virtualization capabilities, you need to manually modify the startup parameters of the ascend-device-plugin-daemonset component. Refer to the following command:

                                                                                        - device-plugin -useAscendDocker=true -volcanoType=false -presetVirtualDevice=true\n- logFile=/var/log/mindx-dl/devicePlugin/devicePlugin.log -logLevel=0\n
                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/vnpu.html#split-vnpu-instances","title":"Split VNPU Instances","text":"

                                                                                        Static virtualization requires manually splitting VNPU instances. Refer to the following command:

                                                                                        npu-smi set -t create-vnpu -i 13 -c 0 -f vir02\n
                                                                                        • i refers to the card id.
                                                                                        • c refers to the chip id.
                                                                                        • vir02 refers to the split specification template.

                                                                                        Card id and chip id can be queried using npu-smi info. The split specifications can be found in the Ascend official templates.

                                                                                        After splitting the instance, you can query the split results using the following command:

                                                                                        npu-smi info -t info-vnpu -i 13 -c 0\n

                                                                                        The query result is as follows:

                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/vnpu.html#restart-ascend-device-plugin-daemonset","title":"Restart ascend-device-plugin-daemonset","text":"

                                                                                        After splitting the instance, manually restart the device-plugin pod, then use the kubectl describe command to check the resources of the registered node:

                                                                                        kubectl describe node {{nodename}}\n

                                                                                        "},{"location":"en/end-user/kpanda/gpu/ascend/vnpu.html#how-to-use-the-device","title":"How to Use the Device","text":"

                                                                                        When creating an application, specify the resource key as shown in the following YAML:

                                                                                        ......\nresources:\n  requests:\n    huawei.com/Ascend310P-2c: 1\n  limits:\n    huawei.com/Ascend310P-2c: 1\n......\n
                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/index.html","title":"NVIDIA GPU Usage Modes","text":"

                                                                                        NVIDIA, as a well-known graphics computing provider, offers various software and hardware solutions to enhance computational power. Among them, NVIDIA provides the following three solutions for GPU usage:

                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/index.html#full-gpu","title":"Full GPU","text":"

                                                                                        Full GPU refers to allocating the entire NVIDIA GPU to a single user or application. In this configuration, the application can fully occupy all the resources of the GPU and achieve maximum computational performance. Full GPU is suitable for workloads that require a large amount of computational resources and memory, such as deep learning training, scientific computing, etc.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/index.html#vgpu-virtual-gpu","title":"vGPU (Virtual GPU)","text":"

                                                                                        vGPU is a virtualization technology that allows one physical GPU to be partitioned into multiple virtual GPUs, with each virtual GPU assigned to different virtual machines or users. vGPU enables multiple users to share the same physical GPU and independently use GPU resources in their respective virtual environments. Each virtual GPU can access a certain amount of compute power and memory capacity. vGPU is suitable for virtualized environments and cloud computing scenarios, providing higher resource utilization and flexibility.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/index.html#mig-multi-instance-gpu","title":"MIG (Multi-Instance GPU)","text":"

                                                                                        MIG is a feature introduced by the NVIDIA Ampere architecture that allows one physical GPU to be divided into multiple physical GPU instances, each of which can be independently allocated to different users or workloads. Each MIG instance has its own compute resources, memory, and PCIe bandwidth, just like an independent virtual GPU. MIG provides finer-grained GPU resource allocation and management and allows dynamic adjustment of the number and size of instances based on demand. MIG is suitable for multi-tenant environments, containerized applications, batch jobs, and other scenarios.

                                                                                        Whether using vGPU in a virtualized environment or MIG on a physical GPU, NVIDIA provides users with more choices and optimized ways to utilize GPU resources. The Suanova container management platform fully supports the above NVIDIA capabilities. Users can easily access the full computational power of NVIDIA GPUs through simple UI operations, thereby improving resource utilization and reducing costs.

                                                                                        • Single Mode: The node only exposes a single type of MIG device on all its GPUs. All GPUs on the node must:
                                                                                          • Be of the same model (e.g., A100-SXM-40GB), with matching MIG profiles only for GPUs of the same model.
                                                                                          • Have MIG configuration enabled, which requires a machine reboot to take effect.
                                                                                          • Create identical GI and CI for exposing \"identical\" MIG devices across all products.
                                                                                        • Mixed Mode: The node exposes mixed MIG device types on all its GPUs. Requesting a specific MIG device type requires the number of compute slices and total memory provided by the device type.
                                                                                          • All GPUs on the node must: Be in the same product line (e.g., A100-SXM-40GB).
                                                                                          • Each GPU can enable or disable MIG individually and freely configure any available mixture of MIG device types.
                                                                                          • The k8s-device-plugin running on the node will:
                                                                                            • Expose any GPUs not in MIG mode using the traditional nvidia.com/gpu resource type.
                                                                                            • Expose individual MIG devices using resource types that follow the pattern nvidia.com/mig-<slice_count>g.<memory_size>gb .

                                                                                        For detailed instructions on enabling these configurations, refer to Offline Installation of GPU Operator.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/index.html#how-to-use","title":"How to Use","text":"

                                                                                        You can refer to the following links to quickly start using Suanova's management capabilities for NVIDIA GPUs.

                                                                                        • Using Full NVIDIA GPU
                                                                                        • Using NVIDIA vGPU
                                                                                        • Using NVIDIA MIG
                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/full_gpu_userguide.html","title":"Using the Whole NVIDIA GPU for an Application","text":"

                                                                                        This section describes how to allocate the entire NVIDIA GPU to a single application on the AI platform platform.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/full_gpu_userguide.html#prerequisites","title":"Prerequisites","text":"
                                                                                        • AI platform container management platform has been deployed and is running properly.
                                                                                        • The container management module has been connected to a Kubernetes cluster or a Kubernetes cluster has been created, and you can access the UI interface of the cluster.
                                                                                        • GPU Operator has been offline installed and NVIDIA DevicePlugin has been enabled on the current cluster. Refer to Offline Installation of GPU Operator for instructions.
                                                                                        • The GPU in the current cluster has not undergone any virtualization operations or been occupied by other applications.
                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/full_gpu_userguide.html#procedure","title":"Procedure","text":""},{"location":"en/end-user/kpanda/gpu/nvidia/full_gpu_userguide.html#configuring-via-the-user-interface","title":"Configuring via the User Interface","text":"
                                                                                        1. Check if the cluster has detected the GPUs. Click Clusters -> Cluster Settings -> Addon Plugins to see if it has automatically enabled and detected the proper GPU types. Currently, the cluster will automatically enable GPU and set the GPU Type as Nvidia GPU .

                                                                                        2. Deploy a workload. Click Clusters -> Workloads , and deploy the workload using the image method. After selecting the type ( Nvidia GPU ), configure the number of physical cards used by the application:

                                                                                          Physical Card Count (nvidia.com/gpu): Indicates the number of physical cards that the current pod needs to mount. The input value must be an integer and less than or equal to the number of cards on the host machine.

                                                                                          If the above value is configured incorrectly, scheduling failures and resource allocation issues may occur.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/full_gpu_userguide.html#configuring-via-yaml","title":"Configuring via YAML","text":"

                                                                                        To request GPU resources for a workload, add the nvidia.com/gpu: 1 parameter to the resource request and limit configuration in the YAML file. This parameter configures the number of physical cards used by the application.

                                                                                        apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-gpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-gpu-demo\n  template:\n    metadata:\n      labels:\n        app: full-gpu-demo\n    spec:\n      containers:\n      - image: chrstnhntschl/gpu_burn\n        name: container-0\n        resources:\n          requests:\n            cpu: 250m\n            memory: 512Mi\n            nvidia.com/gpu: 1   # Number of GPUs requested\n          limits:\n            cpu: 250m\n            memory: 512Mi\n            nvidia.com/gpu: 1   # Upper limit of GPU usage\n      imagePullSecrets:\n      - name: default-secret\n

                                                                                        Note

                                                                                        When using the nvidia.com/gpu parameter to specify the number of GPUs, the values for requests and limits must be consistent.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html","title":"Offline Install gpu-operator","text":"

                                                                                        AI platform comes with pre-installed driver images for the following three operating systems: Ubuntu 22.04, Ubuntu 20.04, and CentOS 7.9. The driver version is 535.104.12. Additionally, it includes the required Toolkit images for each operating system, so users no longer need to manually provide offline toolkit images.

                                                                                        This page demonstrates using AMD architecture with CentOS 7.9 (3.10.0-1160). If you need to deploy on Red Hat 8.4, refer to Uploading Red Hat gpu-operator Offline Image to the Bootstrap Node Repository and Building Offline Yum Source for Red Hat 8.4.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#prerequisites","title":"Prerequisites","text":"
                                                                                        • The kernel version of the cluster nodes where the gpu-operator is to be deployed must be completely consistent. The distribution and GPU model of the nodes must fall within the scope specified in the GPU Support Matrix.
                                                                                        • When installing the gpu-operator, select v23.9.0+2 or above.
                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#steps","title":"Steps","text":"

                                                                                        To install the gpu-operator plugin for your cluster, follow these steps:

                                                                                        1. Log in to the platform and go to Container Management -> Clusters , check cluster eetails.

                                                                                        2. On the Helm Charts page, select All Repositories and search for gpu-operator .

                                                                                        3. Select gpu-operator and click Install .

                                                                                        4. Configure the installation parameters for gpu-operator based on the instructions below to complete the installation.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#configure-parameters","title":"Configure parameters","text":"
                                                                                        • systemOS : Select the operating system for the host. The current options are Ubuntu 22.04, Ubuntu 20.04, Centos 7.9, and other. Please choose the correct operating system.
                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#basic-information","title":"Basic information","text":"
                                                                                        • Name : Enter the plugin name
                                                                                        • Namespace : Select the namespace for installing the plugin
                                                                                        • Version: The version of the plugin. Here, we use version v23.9.0+2 as an example.
                                                                                        • Failure Deletion: If the installation fails, it will delete the already installed associated resources. When enabled, Ready Wait will also be enabled by default.
                                                                                        • Ready Wait: When enabled, the application will be marked as successfully installed only when all associated resources are in a ready state.
                                                                                        • Detailed Logs: When enabled, detailed logs of the installation process will be recorded.
                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#advanced-settings","title":"Advanced settings","text":""},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#operator-parameters","title":"Operator parameters","text":"
                                                                                        • InitContainer.image : Configure the CUDA image, recommended default image: nvidia/cuda
                                                                                        • InitContainer.repository : Repository where the CUDA image is located, defaults to nvcr.m.daocloud.io repository
                                                                                        • InitContainer.version : Version of the CUDA image, please use the default parameter
                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#driver-parameters","title":"Driver parameters","text":"
                                                                                        • Driver.enable : Configure whether to deploy the NVIDIA driver on the node, default is enabled. If you have already deployed the NVIDIA driver on the node before using the gpu-operator, please disable this.
                                                                                        • Driver.image : Configure the GPU driver image, recommended default image: nvidia/driver .
                                                                                        • Driver.repository : Repository where the GPU driver image is located, default is nvidia's nvcr.io repository.
                                                                                        • Driver.usePrecompiled : Enable the precompiled mode to install the driver.
                                                                                        • Driver.version : Version of the GPU driver image, use default parameters for offline deployment. Configuration is only required for online installation. Different versions of the Driver image exist for different types of operating systems. For more details, refer to Nvidia GPU Driver Versions. Examples of Driver Version for different operating systems are as follows:

                                                                                          Note

                                                                                          When using the built-in operating system version, there is no need to modify the image version. For other operating system versions, please refer to Uploading Images to the Bootstrap Node Repository. note that there is no need to include the operating system name such as Ubuntu, CentOS, or Red Hat in the version number. If the official image contains an operating system suffix, please manually remove it.

                                                                                          • For Red Hat systems, for example, 525.105.17
                                                                                          • For Ubuntu systems, for example, 535-5.15.0-1043-nvidia
                                                                                          • For CentOS systems, for example, 525.147.05
                                                                                        • Driver.RepoConfig.ConfigMapName : Used to record the name of the offline yum repository configuration file for the gpu-operator. When using the pre-packaged offline bundle, refer to the following documents for different types of operating systems.

                                                                                          • Building CentOS 7.9 Offline Yum Repository
                                                                                          • Building Red Hat 8.4 Offline Yum Repository
                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#toolkit-parameters","title":"Toolkit parameters","text":"

                                                                                        Toolkit.enable : Enabled by default. This component allows containerd/docker to support running containers that require GPUs.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#mig-parameters","title":"MIG parameters","text":"

                                                                                        For detailed configuration methods, refer to Enabling MIG Functionality.

                                                                                        MigManager.Config.name : The name of the MIG split configuration file, used to define the MIG (GI, CI) split policy. The default is default-mig-parted-config . For custom parameters, refer to Enabling MIG Functionality.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/install_nvidia_driver_of_operator.html#next-steps","title":"Next Steps","text":"

                                                                                        After completing the configuration and creation of the above parameters:

                                                                                        • If using full-card mode , GPU resources can be used when creating applications.

                                                                                        • If using vGPU mode , after completing the above configuration and creation, proceed to vGPU Addon Installation.

                                                                                        • If using MIG mode and you need to use a specific split specification for individual GPU nodes, otherwise, split according to the default value in MigManager.Config.

                                                                                          • For single mode, add label to nodes as follows:

                                                                                            kubectl label nodes {node} nvidia.com/mig.config=\"all-1g.10gb\" --overwrite\n
                                                                                          • For mixed mode, add label to nodes as follows:

                                                                                            kubectl label nodes {node} nvidia.com/mig.config=\"custom-config\" --overwrite\n

                                                                                          After spliting, applications can use MIG GPU resources.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/push_image_to_repo.html","title":"Uploading Red Hat GPU Operator Offline Image to Bootstrap Repository","text":"

                                                                                        This guide explains how to upload an offline image to the bootstrap repository using the nvcr.io/nvidia/driver:525.105.17-rhel8.4 offline driver image for Red Hat 8.4 as an example.

                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/push_image_to_repo.html#prerequisites","title":"Prerequisites","text":"
                                                                                        1. The bootstrap node and its components are running properly.
                                                                                        2. Prepare a node that has internet access and can access the bootstrap node. Docker should also be installed on this node. You can refer to Installing Docker for installation instructions.
                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/push_image_to_repo.html#procedure","title":"Procedure","text":""},{"location":"en/end-user/kpanda/gpu/nvidia/push_image_to_repo.html#step-1-obtain-the-offline-image-on-an-internet-connected-node","title":"Step 1: Obtain the Offline Image on an Internet-Connected Node","text":"

                                                                                        Perform the following steps on the internet-connected node:

                                                                                        1. Pull the nvcr.io/nvidia/driver:525.105.17-rhel8.4 offline driver image:

                                                                                          docker pull nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                                                                        2. Once the image is pulled, save it as a compressed archive named nvidia-driver.tar :

                                                                                          docker save nvcr.io/nvidia/driver:525.105.17-rhel8.4 > nvidia-driver.tar\n
                                                                                        3. Copy the compressed image archive nvidia-driver.tar to the bootstrap node:

                                                                                          scp nvidia-driver.tar user@ip:/root\n

                                                                                          For example:

                                                                                          scp nvidia-driver.tar root@10.6.175.10:/root\n
                                                                                        "},{"location":"en/end-user/kpanda/gpu/nvidia/push_image_to_repo.html#step-2-push-the-image-to-the-bootstrap-repository","title":"Step 2: Push the Image to the Bootstrap Repository","text":"

                                                                                        Perform the following steps on the bootstrap node:

                                                                                        1. Log in to the bootstrap node and import the compressed image archive nvidia-driver.tar :

                                                                                          docker load -i nvidia-driver.tar\n
                                                                                        2. View the imported image:

                                                                                          docker images -a | grep nvidia\n

                                                                                          Expected output:

                                                                                          nvcr.io/nvidia/driver                 e3ed7dee73e9   1 days ago   1.02GB\n
                                                                                        3. Retag the image to correspond to the target repository in the remote Registry repository:

                                                                                          docker tag <image-name> <registry-url>/<repository-name>:<tag>\n

                                                                                          Replace with the name of the Nvidia image from the previous step, with the address of the Registry service on the bootstrap node, with the name of the repository you want to push the image to, and with the desired tag for the image.

                                                                                          For example:

                                                                                          docker tag nvcr.io/nvidia/driver 10.6.10.5/nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                                                                        4. Push the image to the bootstrap repository:

                                                                                          docker push {ip}/nvcr.io/nvidia/driver:525.105.17-rhel8.4\n
                                                                                        5. "},{"location":"en/end-user/kpanda/gpu/nvidia/push_image_to_repo.html#whats-next","title":"What's Next","text":"

                                                                                          Refer to Building Red Hat 8.4 Offline Yum Source and Offline Installation of GPU Operator to deploy the GPU Operator to your cluster.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html","title":"Offline Install gpu-operator Driver on Ubuntu 22.04","text":"

                                                                                          Prerequisite: Installed gpu-operator v23.9.0+2 or higher versions

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html#prepare-offline-image","title":"Prepare Offline Image","text":"
                                                                                          1. Check the kernel version

                                                                                            $ uname -r\n5.15.0-78-generic\n
                                                                                          2. Check the GPU Driver image version applicable to your kernel, at https://catalog.ngc.nvidia.com/orgs/nvidia/containers/driver/tags. Use the kernel to query the image version and save the image using ctr export.

                                                                                            ctr i pull nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04\nctr i export --all-platforms driver.tar.gz nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 \n
                                                                                          3. Import the image into the cluster's container registry

                                                                                            ctr i import driver.tar.gz\nctr i tag nvcr.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 {your_registry}/nvcr.m.daocloud.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04\nctr i push {your_registry}/nvcr.m.daocloud.io/nvidia/driver:535-5.15.0-78-generic-ubuntu22.04 --skip-verify=true\n
                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/ubuntu22.04_offline_install_driver.html#install-the-driver","title":"Install the Driver","text":"
                                                                                          1. Install the gpu-operator addon and set driver.usePrecompiled=true
                                                                                          2. Set driver.version=535, note that it should be 535, not 535.104.12
                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html","title":"Build CentOS 7.9 Offline Yum Source","text":"

                                                                                          The AI platform comes with a pre-installed GPU Operator offline package for CentOS 7.9 with kernel version 3.10.0-1160. or other OS types or kernel versions, users need to manually build an offline yum source.

                                                                                          This guide explains how to build an offline yum source for CentOS 7.9 with a specific kernel version and use it when installing the GPU Operator by specifying the RepoConfig.ConfigMapName parameter.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#prerequisites","title":"Prerequisites","text":"
                                                                                          1. The user has already installed the v0.12.0 or later version of the addon offline package on the platform.
                                                                                          2. Prepare a file server that is accessible from the cluster network, such as Nginx or MinIO.
                                                                                          3. Prepare a node that has internet access, can access the cluster where the GPU Operator will be deployed, and can access the file server. Docker should also be installed on this node. You can refer to Installing Docker for installation instructions.
                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#procedure","title":"Procedure","text":"

                                                                                          This guide uses CentOS 7.9 with kernel version 3.10.0-1160.95.1.el7.x86_64 as an example to explain how to upgrade the pre-installed GPU Operator offline package's yum source.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#check-os-and-kernel-versions-of-cluster-nodes","title":"Check OS and Kernel Versions of Cluster Nodes","text":"

                                                                                          Run the following commands on both the control node of the Global cluster and the node where GPU Operator will be deployed. If the OS and kernel versions of the two nodes are consistent, there is no need to build a yum source. You can directly refer to the Offline Installation of GPU Operator document for installation. If the OS or kernel versions of the two nodes are not consistent, please proceed to the next step.

                                                                                          1. Run the following command to view the distribution name and version of the node where GPU Operator will be deployed in the cluster.

                                                                                            cat /etc/redhat-release\n

                                                                                            Expected output:

                                                                                            CentOS Linux release 7.9 (Core)\n

                                                                                            The output shows the current node's OS version as CentOS 7.9.

                                                                                          2. Run the following command to view the kernel version of the node where GPU Operator will be deployed in the cluster.

                                                                                            uname -a\n

                                                                                            Expected output:

                                                                                            Linux localhost.localdomain 3.10.0-1160.95.1.el7.x86_64 #1 SMP Mon Oct 19 16:18:59 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux\n

                                                                                            The output shows the current node's kernel version as 3.10.0-1160.el7.x86_64.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#create-the-offline-yum-source","title":"Create the Offline Yum Source","text":"

                                                                                          Perform the following steps on a node that has internet access and can access the file server:

                                                                                          1. Create a script file named yum.sh by running the following command:

                                                                                            vi yum.sh\n

                                                                                            Then press the i key to enter insert mode and enter the following content:

                                                                                            export TARGET_KERNEL_VERSION=$1\n\ncat >> run.sh << \\EOF\n#! /bin/bash\necho \"start install kernel repo\"\necho ${KERNEL_VERSION}\nmkdir centos-base\n\nif [ \"$OS\" -eq 7 ]; then\n    yum install --downloadonly --downloaddir=./centos-base perl\n    yum install --downloadonly --downloaddir=./centos-base elfutils-libelf.x86_64\n    yum install --downloadonly --downloaddir=./redhat-base elfutils-libelf-devel.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-headers-${KERNEL_VERSION}.el7.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-devel-${KERNEL_VERSION}.el7.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-${KERNEL_VERSION}.el7.x86_64\n    yum install  -y --downloadonly --downloaddir=./centos-base groff-base\nelif [ \"$OS\" -eq 8 ]; then\n    yum install --downloadonly --downloaddir=./centos-base perl\n    yum install --downloadonly --downloaddir=./centos-base elfutils-libelf.x86_64\n    yum install --downloadonly --downloaddir=./redhat-base elfutils-libelf-devel.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-headers-${KERNEL_VERSION}.el8.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-devel-${KERNEL_VERSION}.el8.x86_64\n    yum install --downloadonly --downloaddir=./centos-base kernel-${KERNEL_VERSION}.el8.x86_64\n    yum install  -y --downloadonly --downloaddir=./centos-base groff-base\nelse\n    echo \"Error os version\"\nfi\n\ncreaterepo centos-base/\nls -lh centos-base/\ntar -zcf centos-base.tar.gz centos-base/\necho \"end install kernel repo\"\nEOF\n\ncat >> Dockerfile << EOF\nFROM centos:7\nENV KERNEL_VERSION=\"\"\nENV OS=7\nRUN yum install -y createrepo\nCOPY run.sh .\nENTRYPOINT [\"/bin/bash\",\"run.sh\"]\nEOF\n\ndocker build -t test:v1 -f Dockerfile .\ndocker run -e KERNEL_VERSION=$TARGET_KERNEL_VERSION --name centos7.9 test:v1\ndocker cp centos7.9:/centos-base.tar.gz .\ntar -xzf centos-base.tar.gz\n

                                                                                            Press the Esc key to exit insert mode, then enter :wq to save and exit.

                                                                                          2. Run the yum.sh file:

                                                                                            bash -x yum.sh TARGET_KERNEL_VERSION\n

                                                                                            The TARGET_KERNEL_VERSION parameter is used to specify the kernel version of the cluster nodes.

                                                                                            Note: You don't need to include the distribution identifier (e.g., __ .el7.x86_64__ ). For example:

                                                                                            bash -x yum.sh 3.10.0-1160.95.1\n

                                                                                          Now you have generated an offline yum source, centos-base , for the kernel version 3.10.0-1160.95.1.el7.x86_64 .

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#upload-the-offline-yum-source-to-the-file-server","title":"Upload the Offline Yum Source to the File Server","text":"

                                                                                          Perform the following steps on a node that has internet access and can access the file server. This step is used to upload the generated yum source from the previous step to a file server that can be accessed by the cluster where the GPU Operator will be deployed. The file server can be Nginx, MinIO, or any other file server that supports the HTTP protocol.

                                                                                          In this example, we will use the built-in MinIO as the file server. The MinIO details are as follows:

                                                                                          • Access URL: http://10.5.14.200:9000 (usually {bootstrap-node IP} + {port-9000} )
                                                                                          • Login username: rootuser
                                                                                          • Login password: rootpass123

                                                                                          • Run the following command in the current directory of the node to establish a connection between the node's local mc command-line tool and the MinIO server:

                                                                                            mc config host add minio http://10.5.14.200:9000 rootuser rootpass123\n

                                                                                            The expected output should resemble the following:

                                                                                            Added __minio__ successfully.\n

                                                                                            mc is the command-line tool provided by MinIO for interacting with the MinIO server. For more details, refer to the MinIO Client documentation.

                                                                                          • In the current directory of the node, create a bucket named centos-base :

                                                                                            mc mb -p minio/centos-base\n

                                                                                            The expected output should resemble the following:

                                                                                            Bucket created successfully __minio/centos-base__ .\n
                                                                                          • Set the access policy of the bucket centos-base to allow public download. This will enable access during the installation of the GPU Operator:

                                                                                            mc anonymous set download minio/centos-base\n

                                                                                            The expected output should resemble the following:

                                                                                            Access permission for __minio/centos-base__ is set to __download__ \n
                                                                                          • In the current directory of the node, copy the generated centos-base offline yum source to the minio/centos-base bucket on the MinIO server:

                                                                                            mc cp centos-base minio/centos-base --recursive\n
                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_centos7_9.html#create-a-configmap-to-store-the-yum-source-info-in-the-cluster","title":"Create a ConfigMap to Store the Yum Source Info in the Cluster","text":"

                                                                                          Perform the following steps on the control node of the cluster where the GPU Operator will be deployed.

                                                                                          1. Run the following command to create a file named CentOS-Base.repo that specifies the configmap for the yum source storage:

                                                                                            # The file name must be CentOS-Base.repo, otherwise it cannot be recognized during the installation of the GPU Operator\ncat > CentOS-Base.repo << EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # The file server address where the yum source is placed in step 3\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # The file server address where the yum source is placed in step 3\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                                                                          2. Based on the created CentOS-Base.repo file, create a configmap named local-repo-config in the gpu-operator namespace:

                                                                                            kubectl create configmap local-repo-config  -n gpu-operator --from-file=CentOS-Base.repo=/etc/yum.repos.d/extension.repo\n

                                                                                            The expected output should resemble the following:

                                                                                            configmap/local-repo-config created\n

                                                                                            The local-repo-config configmap will be used to provide the value for the RepoConfig.ConfigMapName parameter during the installation of the GPU Operator. You can customize the configuration file name.

                                                                                          3. View the content of the local-repo-config configmap:

                                                                                            kubectl get configmap local-repo-config -n gpu-operator -oyaml\n

                                                                                            The expected output should resemble the following:

                                                                                            apiVersion: v1\ndata:\nCentOS-Base.repo: \"[extension-0]\\nbaseurl = http://10.6.232.5:32618/centos-base# The file server path where the yum source is placed in step 2\\ngpgcheck = 0\\nname = kubean extension 0\\n  \\n[extension-1]\\nbaseurl = http://10.6.232.5:32618/centos-base # The file server path where the yum source is placed in step 2\\ngpgcheck = 0\\nname = kubean extension 1\\n\"\nkind: ConfigMap\nmetadata:\ncreationTimestamp: \"2023-10-18T01:59:02Z\"\nname: local-repo-config\nnamespace: gpu-operator\nresourceVersion: \"59445080\"\nuid: c5f0ebab-046f-442c-b932-f9003e014387\n

                                                                                          You have successfully created an offline yum source configuration file for the cluster where the GPU Operator will be deployed. You can use it during the offline installation of the GPU Operator by specifying the RepoConfig.ConfigMapName parameter.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html","title":"Building Red Hat 8.4 Offline Yum Source","text":"

                                                                                          The AI platform comes with pre-installed CentOS v7.9 and GPU Operator offline packages with kernel v3.10.0-1160. For other OS types or nodes with different kernels, users need to manually build the offline yum source.

                                                                                          This guide explains how to build an offline yum source package for Red Hat 8.4 based on any node in the Global cluster. It also demonstrates how to use it during the installation of the GPU Operator by specifying the RepoConfig.ConfigMapName parameter.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#prerequisites","title":"Prerequisites","text":"
                                                                                          1. The user has already installed the addon offline package v0.12.0 or higher on the platform.
                                                                                          2. The OS of the cluster nodes where the GPU Operator will be deployed must be Red Hat v8.4, and the kernel version must be identical.
                                                                                          3. Prepare a file server that can communicate with the cluster network where the GPU Operator will be deployed, such as Nginx or MinIO.
                                                                                          4. Prepare a node that can access the internet, the cluster where the GPU Operator will be deployed, and the file server. Ensure that Docker is already installed on this node.
                                                                                          5. The nodes in the Global cluster must be Red Hat 8.4 4.18.0-305.el8.x86_64.
                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#procedure","title":"Procedure","text":"

                                                                                          This guide uses a node with Red Hat 8.4 4.18.0-305.el8.x86_64 as an example to demonstrate how to build an offline yum source package for Red Hat 8.4 based on any node in the Global cluster. It also explains how to use it during the installation of the GPU Operator by specifying the RepoConfig.ConfigMapName parameter.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-1-download-the-yum-source-from-the-bootstrap-node","title":"Step 1: Download the Yum Source from the Bootstrap Node","text":"

                                                                                          Perform the following steps on the master node of the Global cluster.

                                                                                          1. Use SSH or any other method to access any node in the Global cluster and run the following command:

                                                                                            cat /etc/yum.repos.d/extension.repo # View the contents of extension.repo.\n

                                                                                            The expected output should resemble the following:

                                                                                            [extension-0]\nbaseurl = http://10.5.14.200:9000/kubean/redhat/$releasever/os/$basearch\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/kubean/redhat-iso/$releasever/os/$basearch/AppStream\ngpgcheck = 0\nname = kubean extension 1\n\n[extension-2]\nbaseurl = http://10.5.14.200:9000/kubean/redhat-iso/$releasever/os/$basearch/BaseOS\ngpgcheck = 0\nname = kubean extension 2\n
                                                                                          2. Create a folder named redhat-base-repo under the root directory:

                                                                                            mkdir redhat-base-repo\n
                                                                                          3. Download the RPM packages from the yum source to your local machine:

                                                                                            Download the RPM packages from extension-1 :

                                                                                            reposync -p redhat-base-repo -n --repoid=extension-1\n

                                                                                            Download the RPM packages from extension-2 :

                                                                                            reposync -p redhat-base-repo -n --repoid=extension-2\n
                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-2-download-the-elfutils-libelf-devel-0187-4el8x86_64rpm-package","title":"Step 2: Download the elfutils-libelf-devel-0.187-4.el8.x86_64.rpm Package","text":"

                                                                                          Perform the following steps on a node with internet access. Before proceeding, ensure that there is network connectivity between the node with internet access and the master node of the Global cluster.

                                                                                          1. Run the following command on the node with internet access to download the elfutils-libelf-devel-0.187-4.el8.x86_64.rpm package:

                                                                                            wget https://rpmfind.net/linux/centos/8-stream/BaseOS/x86_64/os/Packages/elfutils-libelf-devel-0.187-4.el8.x86_64.rpm\n
                                                                                          2. Transfer the elfutils-libelf-devel-0.187-4.el8.x86_64.rpm package from the current directory to the node mentioned in step 1:

                                                                                            scp elfutils-libelf-devel-0.187-4.el8.x86_64.rpm user@ip:~/redhat-base-repo/extension-2/Packages/\n

                                                                                            For example:

                                                                                            scp elfutils-libelf-devel-0.187-4.el8.x86_64.rpm root@10.6.175.10:~/redhat-base-repo/extension-2/Packages/\n
                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-3-generate-the-local-yum-repository","title":"Step 3: Generate the Local Yum Repository","text":"

                                                                                          Perform the following steps on the master node of the Global cluster mentioned in Step 1.

                                                                                          1. Enter the yum repository directories:

                                                                                            cd ~/redhat-base-repo/extension-1/Packages\ncd ~/redhat-base-repo/extension-2/Packages\n
                                                                                          2. Generate the repository index for the directories:

                                                                                            createrepo_c ./\n

                                                                                          You have now generated the offline yum source named redhat-base-repo for kernel version 4.18.0-305.el8.x86_64 .

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-4-upload-the-local-yum-repository-to-the-file-server","title":"Step 4: Upload the Local Yum Repository to the File Server","text":"

                                                                                          In this example, we will use Minio, which is built-in as the file server in the bootstrap node. However, you can choose any file server that suits your needs. Here are the details for Minio:

                                                                                          • Access URL: http://10.5.14.200:9000 (usually the {bootstrap-node-IP} + {port-9000})
                                                                                          • Login username: rootuser
                                                                                          • Login password: rootpass123

                                                                                          • On the current node, establish a connection between the local mc command-line tool and the Minio server by running the following command:

                                                                                            mc config host add minio <file_server_access_url> <username> <password>\n

                                                                                            For example:

                                                                                            mc config host add minio http://10.5.14.200:9000 rootuser rootpass123\n

                                                                                            The expected output should be similar to:

                                                                                            Added __minio__ successfully.\n

                                                                                            The mc command-line tool is provided by the Minio file server as a client command-line tool. For more details, refer to the MinIO Client documentation.

                                                                                          • Create a bucket named redhat-base in the current location:

                                                                                            mc mb -p minio/redhat-base\n

                                                                                            The expected output should be similar to:

                                                                                            Bucket created successfully __minio/redhat-base__ .\n
                                                                                          • Set the access policy of the redhat-base bucket to allow public downloads so that it can be accessed during the installation of the GPU Operator:

                                                                                            mc anonymous set download minio/redhat-base\n

                                                                                            The expected output should be similar to:

                                                                                            Access permission for __minio/redhat-base__ is set to __download__ \n
                                                                                          • Copy the offline yum repository files ( redhat-base-repo ) from the current location to the Minio server's minio/redhat-base bucket:

                                                                                            mc cp redhat-base-repo minio/redhat-base --recursive\n
                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/upgrade_yum_source_redhat8_4.html#step-5-create-a-configmap-to-store-yum-repository-information-in-the-cluster","title":"Step 5: Create a ConfigMap to Store Yum Repository Information in the Cluster","text":"

                                                                                          Perform the following steps on the control node of the cluster where you will deploy the GPU Operator.

                                                                                          1. Run the following command to create a file named redhat.repo , which specifies the configuration information for the yum repository storage:

                                                                                            # The file name must be redhat.repo, otherwise it won't be recognized when installing gpu-operator\ncat > redhat.repo << EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/redhat-base/redhat-base-repo/Packages # The file server address where the yum source is stored in Step 1\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/redhat-base/redhat-base-repo/Packages # The file server address where the yum source is stored in Step 1\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                                                                          2. Based on the created redhat.repo file, create a configmap named local-repo-config in the gpu-operator namespace:

                                                                                            kubectl create configmap local-repo-config -n gpu-operator --from-file=./redhat.repo\n

                                                                                            The expected output should be similar to:

                                                                                            configmap/local-repo-config created\n

                                                                                            The local-repo-config configuration file is used to provide the value for the RepoConfig.ConfigMapName parameter during the installation of the GPU Operator. You can choose a different name for the configuration file.

                                                                                          3. View the contents of the local-repo-config configuration file:

                                                                                            kubectl get configmap local-repo-config -n gpu-operator -oyaml\n

                                                                                          You have successfully created the offline yum source configuration file for the cluster where the GPU Operator will be deployed. You can use it by specifying the RepoConfig.ConfigMapName parameter during the offline installation of the GPU Operator.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html","title":"Build an Offline Yum Repository for Red Hat 7.9","text":""},{"location":"en/end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#introduction","title":"Introduction","text":"

                                                                                          AI platform comes with a pre-installed CentOS 7.9 with GPU Operator offline package for kernel 3.10.0-1160. You need to manually build an offline yum repository for other OS types or nodes with different kernels.

                                                                                          This page explains how to build an offline yum repository for Red Hat 7.9 based on any node in the Global cluster, and how to use the RepoConfig.ConfigMapName parameter when installing the GPU Operator.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#prerequisites","title":"Prerequisites","text":"
                                                                                          1. The cluster nodes where the GPU Operator is to be deployed must be Red Hat 7.9 with the exact same kernel version.
                                                                                          2. Prepare a file server that can be connected to the cluster network where the GPU Operator is to be deployed, such as nginx or minio.
                                                                                          3. Prepare a node that can access the internet, the cluster where the GPU Operator is to be deployed, and the file server. Docker installation must be completed on this node.
                                                                                          4. The nodes in the global service cluster must be Red Hat 7.9.
                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#steps","title":"Steps","text":""},{"location":"en/end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#1-build-offline-yum-repo-for-relevant-kernel","title":"1. Build Offline Yum Repo for Relevant Kernel","text":"
                                                                                          1. Download rhel7.9 ISO

                                                                                          2. Download the rhel7.9 ospackage that corresponds to your Kubean version.

                                                                                            Find the version number of Kubean in the Container Management section of the Global cluster under Helm Apps.

                                                                                            Download the rhel7.9 ospackage for that version from the Kubean repository.

                                                                                          3. Import offline resources using the installer.

                                                                                            Refer to the Import Offline Resources document.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#2-download-offline-driver-image-for-red-hat-79-os","title":"2. Download Offline Driver Image for Red Hat 7.9 OS","text":"

                                                                                          Click here to view the download url.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#3-upload-red-hat-gpu-operator-offline-image-to-boostrap-node-repository","title":"3. Upload Red Hat GPU Operator Offline Image to Boostrap Node Repository","text":"

                                                                                          Refer to Upload Red Hat GPU Operator Offline Image to Boostrap Node Repository.

                                                                                          Note

                                                                                          This reference is based on rhel8.4, so make sure to modify it for rhel7.9.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/yum_source_redhat7_9.html#4-create-configmaps-in-the-cluster-to-save-yum-repository-information","title":"4. Create ConfigMaps in the Cluster to Save Yum Repository Information","text":"

                                                                                          Run the following command on the control node of the cluster where the GPU Operator is to be deployed.

                                                                                          1. Run the following command to create a file named CentOS-Base.repo to specify the configuration information where the yum repository is stored.

                                                                                            # The file name must be CentOS-Base.repo, otherwise it will not be recognized when installing gpu-operator\ncat > CentOS-Base.repo <<  EOF\n[extension-0]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # The server file address of the boostrap node, usually {boostrap node IP} + {9000 port}\ngpgcheck = 0\nname = kubean extension 0\n\n[extension-1]\nbaseurl = http://10.5.14.200:9000/centos-base/centos-base # The server file address of the boostrap node, usually {boostrap node IP} + {9000 port}\ngpgcheck = 0\nname = kubean extension 1\nEOF\n
                                                                                          2. Based on the created CentOS-Base.repo file, create a profile named local-repo-config in the gpu-operator namespace:

                                                                                            kubectl create configmap local-repo-config -n gpu-operator --from-file=CentOS-Base.repo=/etc/yum.repos.d/extension.repo\n

                                                                                            The expected output is as follows:

                                                                                            configmap/local-repo-config created\n

                                                                                            The local-repo-config profile is used to provide the value of the RepoConfig.ConfigMapName parameter when installing gpu-operator, and the profile name can be customized by the user.

                                                                                          3. View the contents of the local-repo-config profile:

                                                                                            kubectl get configmap local-repo-config -n gpu-operator -oyaml\n

                                                                                            The expected output is as follows:

                                                                                            local-repo-config.yaml
                                                                                            apiVersion: v1\ndata:\n  CentOS-Base.repo: \"[extension-0]\\nbaseurl = http://10.6.232.5:32618/centos-base # The file path where yum repository is placed in Step 2 \\ngpgcheck = 0\\nname = kubean extension 0\\n  \\n[extension-1]\\nbaseurl\n  = http://10.6.232.5:32618/centos-base # The file path where yum repository is placed in Step 2 \\ngpgcheck = 0\\nname\n  = kubean extension 1\\n\"\nkind: ConfigMap\nmetadata:\n  creationTimestamp: \"2023-10-18T01:59:02Z\"\n  name: local-repo-config\n  namespace: gpu-operator\n  resourceVersion: \"59445080\"\n  uid: c5f0ebab-046f-442c-b932-f9003e014387\n

                                                                                          At this point, you have successfully created the offline yum repository profile for the cluster where the GPU Operator is to be deployed. The RepoConfig.ConfigMapName parameter was used during the Offline Installation of GPU Operator.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/index.html","title":"Overview of NVIDIA Multi-Instance GPU (MIG)","text":""},{"location":"en/end-user/kpanda/gpu/nvidia/mig/index.html#mig-scenarios","title":"MIG Scenarios","text":"
                                                                                          • Multi-Tenant Cloud Environments:

                                                                                          MIG allows cloud service providers to partition a physical GPU into multiple independent GPU instances, which can be allocated to different tenants. This enables resource isolation and independence, meeting the GPU computing needs of multiple tenants.

                                                                                          • Containerized Applications:

                                                                                          MIG enables finer-grained GPU resource management in containerized environments. By partitioning a physical GPU into multiple MIG instances, each container can be assigned with dedicated GPU compute resources, providing better performance isolation and resource utilization.

                                                                                          • Batch Processing Jobs:

                                                                                          For batch processing jobs requiring large-scale parallel computing, MIG provides higher computational performance and larger memory capacity. Each MIG instance can utilize a portion of the physical GPU's compute resources, accelerating the processing of large-scale computational tasks.

                                                                                          • AI/Machine Learning Training:

                                                                                          MIG offers increased compute power and memory capacity for training large-scale deep learning models. By partitioning the physical GPU into multiple MIG instances, each instance can independently carry out model training, improving training efficiency and throughput.

                                                                                          In general, NVIDIA MIG is suitable for scenarios that require finer-grained allocation and management of GPU resources. It enables resource isolation, improved performance utilization, and meets the GPU computing needs of multiple users or applications.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/index.html#overview-of-mig","title":"Overview of MIG","text":"

                                                                                          NVIDIA Multi-Instance GPU (MIG) is a new feature introduced by NVIDIA on H100, A100, and A30 series GPUs. Its purpose is to divide a physical GPU into multiple GPU instances to provide finer-grained resource sharing and isolation. MIG can split a GPU into up to seven GPU instances, allowing a single physical GPU to provide separate GPU resources to multiple users, maximizing GPU utilization.

                                                                                          This feature enables multiple applications or users to share GPU resources simultaneously, improving the utilization of computational resources and increasing system scalability.

                                                                                          With MIG, each GPU instance's processor has an independent and isolated path throughout the entire memory system, including cross-switch ports on the chip, L2 cache groups, memory controllers, and DRAM address buses, all uniquely allocated to a single instance.

                                                                                          This ensures that the workload of individual users can run with predictable throughput and latency, along with identical L2 cache allocation and DRAM bandwidth. MIG can partition available GPU compute resources (such as streaming multiprocessors or SMs and GPU engines like copy engines or decoders) to provide defined quality of service (QoS) and fault isolation for different clients such as virtual machines, containers, or processes. MIG enables multiple GPU instances to run in parallel on a single physical GPU.

                                                                                          MIG allows multiple vGPUs (and virtual machines) to run in parallel on a single GPU instance while retaining the isolation guarantees provided by vGPU. For more details on using vGPU and MIG for GPU partitioning, refer to NVIDIA Multi-Instance GPU and NVIDIA Virtual Compute Server.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/index.html#mig-architecture","title":"MIG Architecture","text":"

                                                                                          The following diagram provides an overview of MIG, illustrating how it virtualizes one physical GPU into seven GPU instances that can be used by multiple users.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/index.html#important-concepts","title":"Important Concepts","text":"
                                                                                          • SM (Streaming Multiprocessor): The core computational unit of a GPU responsible for executing graphics rendering and general-purpose computing tasks. Each SM contains a group of CUDA cores, as well as shared memory, register files, and other resources, capable of executing multiple threads concurrently. Each MIG instance has a certain number of SMs and other related resources, along with the allocated memory slices.
                                                                                          • GPU Memory Slice : The smallest portion of GPU memory, including the proper memory controller and cache. A GPU memory slice is approximately one-eighth of the total GPU memory resources in terms of capacity and bandwidth.
                                                                                          • GPU SM Slice : The smallest computational unit of SMs on a GPU. When configuring in MIG mode, the GPU SM slice is approximately one-seventh of the total available SMs in the GPU.
                                                                                          • GPU Slice : The GPU slice represents the smallest portion of the GPU, consisting of a single GPU memory slice and a single GPU SM slice combined together.
                                                                                          • GPU Instance (GI): A GPU instance is the combination of a GPU slice and GPU engines (DMA, NVDEC, etc.). Anything within a GPU instance always shares all GPU memory slices and other GPU engines, but its SM slice can be further subdivided into Compute Instances (CIs). A GPU instance provides memory QoS. Each GPU slice contains dedicated GPU memory resources, limiting available capacity and bandwidth while providing memory QoS. Each GPU memory slice gets one-eighth of the total GPU memory resources, and each GPU SM slice gets one-seventh of the total SM count.
                                                                                          • Compute Instance (CI): A Compute Instance represents the smallest computational unit within a GPU instance. It consists of a subset of SMs, along with dedicated register files, shared memory, and other resources. Each CI has its own CUDA context and can run independent CUDA kernels. The number of CIs in a GPU instance depends on the number of available SMs and the configuration chosen during MIG setup.
                                                                                          • Instance Slice : An Instance Slice represents a single CI within a GPU instance. It is the combination of a subset of SMs and a portion of the GPU memory slice. Each Instance Slice provides isolation and resource allocation for individual applications or users running on the GPU instance.
                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/index.html#key-benefits-of-mig","title":"Key Benefits of MIG","text":"
                                                                                          • Resource Sharing: MIG allows a single physical GPU to be divided into multiple GPU instances, providing efficient sharing of GPU resources among different users or applications. This maximizes GPU utilization and enables improved performance isolation.

                                                                                          • Fine-Grained Resource Allocation: With MIG, GPU resources can be allocated at a finer granularity, allowing for more precise partitioning and allocation of compute power and memory capacity.

                                                                                          • Improved Performance Isolation: Each MIG instance operates independently with its dedicated resources, ensuring predictable throughput and latency for individual users or applications. This improves performance isolation and prevents interference between different workloads running on the same GPU.

                                                                                          • Enhanced Security and Fault Isolation: MIG provides better security and fault isolation by ensuring that each user or application has its dedicated GPU resources. This prevents unauthorized access to data and mitigates the impact of faults or errors in one instance on others.

                                                                                          • Increased Scalability: MIG enables the simultaneous usage of GPU resources by multiple users or applications, increasing system scalability and accommodating the needs of various workloads.

                                                                                          • Efficient Containerization: By using MIG in containerized environments, GPU resources can be effectively allocated to different containers, improving performance isolation and resource utilization.

                                                                                          Overall, MIG offers significant advantages in terms of resource sharing, fine-grained allocation, performance isolation, security, scalability, and containerization, making it a valuable feature for various GPU computing scenarios.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/create_mig.html","title":"Enabling MIG Features","text":"

                                                                                          This section describes how to enable NVIDIA MIG features. NVIDIA currently provides two strategies for exposing MIG devices on Kubernetes nodes:

                                                                                          • Single mode : Nodes expose a single type of MIG device on all their GPUs.
                                                                                          • Mixed mode : Nodes expose a mixture of MIG device types on all their GPUs.

                                                                                          For more details, refer to the NVIDIA GPU Usage Modes.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/create_mig.html#prerequisites","title":"Prerequisites","text":"
                                                                                          • Check the system requirements for the GPU driver installation on the target node: GPU Support Matrix
                                                                                          • Ensure that the cluster nodes have GPUs of the proper models (NVIDIA H100, A100, and A30 Tensor Core GPUs). For more information, see the GPU Support Matrix.
                                                                                          • All GPUs on the nodes must belong to the same product line (e.g., A100-SXM-40GB).
                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/create_mig.html#install-gpu-operator-addon","title":"Install GPU Operator Addon","text":""},{"location":"en/end-user/kpanda/gpu/nvidia/mig/create_mig.html#parameter-configuration","title":"Parameter Configuration","text":"

                                                                                          When installing the Operator, you need to set the MigManager Config parameter accordingly. The default setting is default-mig-parted-config. You can also customize the sharding policy configuration file:

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/create_mig.html#custom-sharding-policy","title":"Custom Sharding Policy","text":"
                                                                                            ## Custom GI Instance Configuration\n  all-disabled:\n    - devices: all\n      mig-enabled: false\n  all-enabled:\n    - devices: all\n      mig-enabled: true\n      mig-devices: {}\n  all-1g.10gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.5gb: 7\n  all-1g.10gb.me:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.10gb+me: 1\n  all-1g.20gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.20gb: 4\n  all-2g.20gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        2g.20gb: 3\n  all-3g.40gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        3g.40gb: 2\n  all-4g.40gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        4g.40gb: 1\n  all-7g.80gb:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        7g.80gb: 1\n  all-balanced:\n    - device-filter: [\"0x233110DE\", \"0x232210DE\", \"0x20B210DE\", \"0x20B510DE\", \"0x20F310DE\", \"0x20F510DE\"]\n      devices: all\n      mig-enabled: true\n      mig-devices:\n        1g.10gb: 2\n        2g.20gb: 1\n        3g.40gb: 1\n  # After setting, CI instances will be partitioned according to the specified configuration\n  custom-config:\n    - devices: all\n      mig-enabled: true\n      mig-devices:\n        3g.40gb: 2\n

                                                                                          In the above YAML, set custom-config to partition CI instances according to the specifications.

                                                                                          custom-config:\n  - devices: all\n    mig-enabled: true\n    mig-devices:\n      1c.3g.40gb: 6\n

                                                                                          After completing the settings, you can use GPU MIG resources when confirming the deployment of the application.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/create_mig.html#switch-node-gpu-mode","title":"Switch Node GPU Mode","text":"

                                                                                          After successfully installing the GPU operator, the node is in full card mode by default. There will be an indicator on the node management page, as shown below:

                                                                                          Click the \u2507 at the right side of the node list, select a GPU mode to switch, and then choose the proper MIG mode and sharding policy. Here, we take MIXED mode as an example:

                                                                                          There are two configurations here:

                                                                                          1. MIG Policy: Mixed and Single.
                                                                                          2. Sharding Policy: The policy here needs to match the key in the default-mig-parted-config (or user-defined sharding policy) configuration file.

                                                                                          After clicking OK button, wait for about a minute and refresh the page. The MIG mode will be switched to:

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/mig_command.html","title":"MIG Related Commands","text":"

                                                                                          GI Related Commands:

                                                                                          Subcommand Description nvidia-smi mig -lgi View the list of created GI instances nvidia-smi mig -dgi -gi Delete a specific GI instance nvidia-smi mig -lgip View the profile of GI nvidia-smi mig -cgi Create a GI using the specified profile ID

                                                                                          CI Related Commands:

                                                                                          Subcommand Description nvidia-smi mig -lcip { -gi {gi Instance ID}} View the profile of CI, specifying -gi will show the CIs that can be created for a particular GI instance nvidia-smi mig -lci View the list of created CI instances nvidia-smi mig -cci {profile id} -gi {gi instance id} Create a CI instance with the specified GI nvidia-smi mig -dci -ci Delete a specific CI instance

                                                                                          GI+CI Related Commands:

                                                                                          Subcommand Description nvidia-smi mig -i 0 -cgi {gi profile id} -C {ci profile id} Create a GI + CI instance directly"},{"location":"en/end-user/kpanda/gpu/nvidia/mig/mig_usage.html","title":"Using MIG GPU Resources","text":"

                                                                                          This section explains how applications can use MIG GPU resources.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/mig_usage.html#prerequisites","title":"Prerequisites","text":"
                                                                                          • AI platform container management platform is deployed and running successfully.
                                                                                          • The container management module is integrated with a Kubernetes cluster or a Kubernetes cluster is created, and the UI interface of the cluster can be accessed.
                                                                                          • NVIDIA DevicePlugin and MIG capabilities are enabled. Refer to Offline installation of GPU Operator for details.
                                                                                          • The nodes in the cluster have GPUs of the proper models.
                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/mig_usage.html#using-mig-gpu-through-the-ui","title":"Using MIG GPU through the UI","text":"
                                                                                          1. Confirm if the cluster has recognized the GPU type.

                                                                                            Go to Cluster Details -> Nodes and check if it has been correctly recognized as MIG.

                                                                                          2. When deploying an application using an image, you can select and use NVIDIA MIG resources.

                                                                                          3. Example of MIG Single Mode (used in the same way as a full GPU):

                                                                                            Note

                                                                                            The MIG single policy allows users to request and use GPU resources in the same way as a full GPU (nvidia.com/gpu). The difference is that these resources can be a portion of the GPU (MIG device) rather than the entire GPU. Learn more from the GPU MIG Mode Design.

                                                                                          4. MIG Mixed Mode

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/mig/mig_usage.html#using-mig-through-yaml-configuration","title":"Using MIG through YAML Configuration","text":"

                                                                                          MIG Single mode:

                                                                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mig-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mig-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mig-demo\n    spec:\n      containers:\n        - name: mig-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/gpu: 2 # (1)!\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                                                                                          1. Number of MIG GPUs to request

                                                                                          MIG Mixed mode:

                                                                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mig-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mig-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: mig-demo\n    spec:\n      containers:\n        - name: mig-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/mig-4g.20gb: 1 # (1)!\n          imagePullPolicy: Always\n      restartPolicy: Always\n
                                                                                          1. Expose MIG device through nvidia.com/mig-g.gb resource type

                                                                                          After entering the container, you can check if only one MIG device is being used:

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/vgpu/hami.html","title":"Build a vGPU Memory Oversubscription Image","text":"

                                                                                          The vGPU memory oversubscription feature in the Hami Project no longer exists. To use this feature, you need to rebuild with the libvgpu.so file that supports memory oversubscription.

                                                                                          Dockerfile
                                                                                          FROM docker.m.daocloud.io/projecthami/hami:v2.3.11\nCOPY libvgpu.so /k8s-vgpu/lib/nvidia/\n

                                                                                          Run the following command to build the image:

                                                                                          docker build -t release.daocloud.io/projecthami/hami:v2.3.11 -f Dockerfile .\n

                                                                                          Then, push the image to release.daocloud.io.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_addon.html","title":"Installing NVIDIA vGPU Addon","text":"

                                                                                          To virtualize a single NVIDIA GPU into multiple virtual GPUs and allocate them to different virtual machines or users, you can use NVIDIA's vGPU capability. This section explains how to install the vGPU plugin in the AI platform platform, which is a prerequisite for using NVIDIA vGPU capability.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_addon.html#prerequisites","title":"Prerequisites","text":"
                                                                                          • Refer to the GPU Support Matrix to confirm that the nodes in the cluster have GPUs of the proper models.
                                                                                          • The current cluster has deployed NVIDIA drivers through the Operator. For specific instructions, refer to Offline Installation of GPU Operator.
                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_addon.html#procedure","title":"Procedure","text":"
                                                                                          1. Path: Container Management -> Cluster Management -> Click the target cluster -> Helm Apps -> Helm Charts -> Search for nvidia-vgpu .

                                                                                          2. During the installation of vGPU, several basic modification parameters are provided. If you need to modify advanced parameters, click the YAML column to make changes:

                                                                                            • deviceMemoryScaling : NVIDIA device memory scaling factor, the input value must be an integer, with a default value of 1. It can be greater than 1 (enabling virtual memory, experimental feature). For an NVIDIA GPU with a memory size of M, if we configure the devicePlugin.deviceMemoryScaling parameter as S, in a Kubernetes cluster where we have deployed our device plugin, the vGPUs assigned from this GPU will have a total memory of S * M .

                                                                                            • deviceSplitCount : An integer type, with a default value of 10. Number of GPU splits, each GPU cannot be assigned more tasks than its configuration count. If configured as N, each GPU can have up to N tasks simultaneously.

                                                                                            • Resources : Represents the resource usage of the vgpu-device-plugin and vgpu-schedule pods.

                                                                                          3. After a successful installation, you will see two types of pods in the specified namespace, indicating that the NVIDIA vGPU plugin has been successfully installed:

                                                                                          After a successful installation, you can deploy applications using vGPU resources.

                                                                                          Note

                                                                                          NVIDIA vGPU Addon does not support upgrading directly from the older v2.0.0 to the latest v2.0.0+1; To upgrade, please uninstall the older version and then reinstall the latest version.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html","title":"Using NVIDIA vGPU in Applications","text":"

                                                                                          This section explains how to use the vGPU capability in the AI platform platform.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html#prerequisites","title":"Prerequisites","text":"
                                                                                          • The nodes in the cluster have GPUs of the proper models.
                                                                                          • vGPU Addon has been successfully installed. Refer to Installing GPU Addon for details.
                                                                                          • GPU Operator is installed, and the Nvidia.DevicePlugin capability is disabled. Refer to Offline Installation of GPU Operator for details.
                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html#procedure","title":"Procedure","text":""},{"location":"en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html#using-vgpu-through-the-ui","title":"Using vGPU through the UI","text":"
                                                                                          1. Confirm if the cluster has detected GPUs. Click the Clusters -> Cluster Settings -> Addon Plugins and check if the GPU plugin has been automatically enabled and the proper GPU type has been detected. Currently, the cluster will automatically enable the GPU addon and set the GPU Type as Nvidia vGPU .

                                                                                          2. Deploy a workload by clicking Clusters -> Workloads . When deploying a workload using an image, select the type Nvidia vGPU , and you will be prompted with the following parameters:

                                                                                            • Number of Physical Cards (nvidia.com/vgpu) : Indicates how many physical cards need to be mounted by the current pod. The input value must be an integer and less than or equal to the number of cards on the host machine.
                                                                                            • GPU Cores (nvidia.com/gpucores): Indicates the GPU cores utilized by each card, with a value range from 0 to 100. Setting it to 0 means no enforced isolation, while setting it to 100 means exclusive use of the entire card.
                                                                                            • GPU Memory (nvidia.com/gpumem): Indicates the GPU memory occupied by each card, with a value in MB. The minimum value is 1, and the maximum value is the total memory of the card.

                                                                                            If there are issues with the configuration values above, it may result in scheduling failure or inability to allocate resources.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/nvidia/vgpu/vgpu_user.html#using-vgpu-through-yaml-configuration","title":"Using vGPU through YAML Configuration","text":"

                                                                                          Refer to the following workload configuration and add the parameter nvidia.com/vgpu: '1' in the resource requests and limits section to configure the number of physical cards used by the application.

                                                                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: full-vgpu-demo\n  namespace: default\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: full-vgpu-demo\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: full-vgpu-demo\n    spec:\n      containers:\n        - name: full-vgpu-demo1\n          image: chrstnhntschl/gpu_burn\n          resources:\n            limits:\n              nvidia.com/gpucores: '20'   # Request 20% of GPU cores for each card\n              nvidia.com/gpumem: '200'   # Request 200MB of GPU memory for each card\n              nvidia.com/vgpu: '1'   # Request 1 GPU\n          imagePullPolicy: Always\n      restartPolicy: Always\n

                                                                                          This YAML configuration requests the application to use vGPU resources. It specifies that each card should utilize 20% of GPU cores, 200MB of GPU memory, and requests 1 GPU.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/volcano/volcano-gang-scheduler.html","title":"Using Volcano's Gang Scheduler","text":"

                                                                                          The Gang scheduling policy is one of the core scheduling algorithms of the volcano-scheduler. It satisfies the \"All or nothing\" scheduling requirement during the scheduling process, preventing arbitrary scheduling of Pods that could waste cluster resources. The specific algorithm observes whether the number of scheduled Pods under a Job meets the minimum running quantity. When the Job's minimum running quantity is satisfied, scheduling actions are performed for all Pods under the Job; otherwise, no actions are taken.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/volcano/volcano-gang-scheduler.html#use-cases","title":"Use Cases","text":"

                                                                                          The Gang scheduling algorithm, based on the concept of a Pod group, is particularly suitable for scenarios that require multi-process collaboration. AI scenarios often involve complex workflows, such as Data Ingestion, Data Analysis, Data Splitting, Training, Serving, and Logging, which require a group of containers to work together. This makes the Gang scheduling policy based on pods very appropriate.

                                                                                          In multi-threaded parallel computing communication scenarios under the MPI computation framework, Gang scheduling is also very suitable because it requires master and slave processes to work together. High relevance among containers in a pod may lead to resource contention, and overall scheduling allocation can effectively resolve deadlocks.

                                                                                          In scenarios with insufficient cluster resources, the Gang scheduling policy significantly improves the utilization of cluster resources. For example, if the cluster can currently accommodate only 2 Pods, but the minimum number of Pods required for scheduling is 3, then all Pods of this Job will remain pending until the cluster can accommodate 3 Pods, at which point the Pods will be scheduled. This effectively prevents the partial scheduling of Pods, which would not meet the requirements and would occupy resources, making other Jobs unable to run.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/volcano/volcano-gang-scheduler.html#concept-explanation","title":"Concept Explanation","text":"

                                                                                          The Gang Scheduler is the core scheduling plugin of Volcano, and it is enabled by default upon installing Volcano. When creating a workload, you only need to specify the scheduler name as Volcano.

                                                                                          Volcano schedules based on PodGroups. When creating a workload, there is no need to manually create PodGroup resources; Volcano will automatically create them based on the workload information. Below is an example of a PodGroup:

                                                                                          apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  name: test\n  namespace: default\nspec:\n  minMember: 1  # (1)!\n  minResources:  # (2)!\n    cpu: \"3\"\n    memory: \"2048Mi\"\n  priorityClassName: high-prority # (3)!\n  queue: default # (4)!\n
                                                                                          1. Represents the minimum number of Pods or jobs that need to run under this PodGroup. If the cluster resources do not meet the requirements to run the number of jobs specified by miniMember, the scheduler will not schedule any jobs within this PodGroup.
                                                                                          2. Represents the minimum resources required to run this PodGroup. If the allocatable resources of the cluster do not meet the minResources, the scheduler will not schedule any jobs within this PodGroup.
                                                                                          3. Represents the priority of this PodGroup, used by the scheduler to sort all PodGroups within the queue during scheduling. system-node-critical and system-cluster-critical are two reserved values indicating the highest priority. If not specifically designated, the default priority or zero priority is used.
                                                                                          4. Represents the queue to which this PodGroup belongs. The queue must be pre-created and in the open state.
                                                                                          "},{"location":"en/end-user/kpanda/gpu/volcano/volcano-gang-scheduler.html#use-case","title":"Use Case","text":"

                                                                                          In a multi-threaded parallel computing communication scenario under the MPI computation framework, we need to ensure that all Pods can be successfully scheduled to ensure the job is completed correctly. Setting minAvailable to 4 means that 1 mpimaster and 3 mpiworkers are required to run.

                                                                                          apiVersion: batch.volcano.sh/v1alpha1\nkind: Job\nmetadata:\n  name: lm-mpi-job\n  labels:\n    \"volcano.sh/job-type\": \"MPI\"\nspec:\n  minAvailable: 4\n  schedulerName: volcano\n  plugins:\n    ssh: []\n    svc: []\n  policies:\n    - event: PodEvicted\n      action: RestartJob\n  tasks:\n    - replicas: 1\n      name: mpimaster\n      policies:\n        - event: TaskCompleted\n          action: CompleteJob\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  MPI_HOST=`cat /etc/volcano/mpiworker.host | tr \"\\n\" \",\"`;\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd;\n                  mpiexec --allow-run-as-root --host ${MPI_HOST} -np 3 mpi_hello_world;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpimaster\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"500m\"\n                limits:\n                  cpu: \"500m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n    - replicas: 3\n      name: mpiworker\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd -D;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpiworker\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"1000m\"\n                limits:\n                  cpu: \"1000m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n

                                                                                          Generate the resources for PodGroup:

                                                                                          apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  annotations:\n  creationTimestamp: \"2024-05-28T09:18:50Z\"\n  generation: 5\n  labels:\n    volcano.sh/job-type: MPI\n  name: lm-mpi-job-9c571015-37c7-4a1a-9604-eaa2248613f2\n  namespace: default\n  ownerReferences:\n  - apiVersion: batch.volcano.sh/v1alpha1\n    blockOwnerDeletion: true\n    controller: true\n    kind: Job\n    name: lm-mpi-job\n    uid: 9c571015-37c7-4a1a-9604-eaa2248613f2\n  resourceVersion: \"25173454\"\n  uid: 7b04632e-7cff-4884-8e9a-035b7649d33b\nspec:\n  minMember: 4\n  minResources:\n    count/pods: \"4\"\n    cpu: 3500m\n    limits.cpu: 3500m\n    pods: \"4\"\n    requests.cpu: 3500m\n  minTaskMember:\n    mpimaster: 1\n    mpiworker: 3\n  queue: default\nstatus:\n  conditions:\n  - lastTransitionTime: \"2024-05-28T09:19:01Z\"\n    message: '3/4 tasks in gang unschedulable: pod group is not ready, 1 Succeeded,\n      3 Releasing, 4 minAvailable'\n    reason: NotEnoughResources\n    status: \"True\"\n    transitionID: f875efa5-0358-4363-9300-06cebc0e7466\n    type: Unschedulable\n  - lastTransitionTime: \"2024-05-28T09:18:53Z\"\n    reason: tasks in gang are ready to be scheduled\n    status: \"True\"\n    transitionID: 5a7708c8-7d42-4c33-9d97-0581f7c06dab\n    type: Scheduled\n  phase: Pending\n  succeeded: 1\n

                                                                                          From the PodGroup, it can be seen that it is associated with the workload through ownerReferences and sets the minimum number of running Pods to 4.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/volcano/volcano_user_guide.html","title":"Use Volcano for AI Compute","text":""},{"location":"en/end-user/kpanda/gpu/volcano/volcano_user_guide.html#usage-scenarios","title":"Usage Scenarios","text":"

                                                                                          Kubernetes has become the de facto standard for orchestrating and managing cloud-native applications, and an increasing number of applications are choosing to migrate to K8s. The fields of artificial intelligence and machine learning inherently involve a large number of compute-intensive tasks, and developers are very willing to build AI platforms based on Kubernetes to fully leverage its resource management, application orchestration, and operations monitoring capabilities. However, the default Kubernetes scheduler was initially designed primarily for long-running services and has many shortcomings in batch and elastic scheduling for AI and big data tasks. For example, resource contention issues:

                                                                                          Take TensorFlow job scenarios as an example. TensorFlow jobs include two different roles, PS and Worker, and the Pods for these two roles need to work together to complete the entire job. If only one type of role Pod is running, the entire job cannot be executed properly. The default scheduler schedules Pods one by one and is unaware of the PS and Worker roles in a Kubeflow TFJob. In a high-load cluster (insufficient resources), multiple jobs may each be allocated some resources to run a portion of their Pods, but the jobs cannot complete successfully, leading to resource waste. For instance, if a cluster has 4 GPUs and both TFJob1 and TFJob2 each have 4 Workers, TFJob1 and TFJob2 might each be allocated 2 GPUs. However, both TFJob1 and TFJob2 require 4 GPUs to run. This mutual waiting for resource release creates a deadlock situation, resulting in GPU resource waste.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/volcano/volcano_user_guide.html#volcano-batch-scheduling-system","title":"Volcano Batch Scheduling System","text":"

                                                                                          Volcano is the first Kubernetes-based container batch computing platform under CNCF, focusing on high-performance computing scenarios. It fills in the missing functionalities of Kubernetes in fields such as machine learning, big data, and scientific computing, providing essential support for these high-performance workloads. Additionally, Volcano seamlessly integrates with mainstream computing frameworks like Spark, TensorFlow, and PyTorch, and supports hybrid scheduling of heterogeneous devices, including CPUs and GPUs, effectively resolving the deadlock issues mentioned above.

                                                                                          The following sections will introduce how to install and use Volcano.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/volcano/volcano_user_guide.html#install-volcano","title":"Install Volcano","text":"
                                                                                          1. Find Volcano in Cluster Details -> Helm Apps -> Helm Charts and install it.

                                                                                          2. Check and confirm whether Volcano is installed successfully, that is, whether the components volcano-admission, volcano-controllers, and volcano-scheduler are running properly.

                                                                                          Typically, Volcano is used in conjunction with the AI Lab to achieve an effective closed-loop process for the development and training of datasets, Notebooks, and task training.

                                                                                          "},{"location":"en/end-user/kpanda/gpu/volcano/volcano_user_guide.html#volcano-use-cases","title":"Volcano Use Cases","text":"
                                                                                          • Volcano is a standalone scheduler. To enable the Volcano scheduler when creating workloads, simply specify the scheduler's name (schedulerName: volcano).
                                                                                          • The volcanoJob resource is an extension of the Job in Volcano, breaking the Job down into smaller working units called tasks, which can interact with each other.
                                                                                          "},{"location":"en/end-user/kpanda/gpu/volcano/volcano_user_guide.html#volcano-supports-tensorflow","title":"Volcano Supports TensorFlow","text":"

                                                                                          Here is an example:

                                                                                          apiVersion: batch.volcano.sh/v1alpha1\nkind: Job\nmetadata:\n  name: tensorflow-benchmark\n  labels:\n    \"volcano.sh/job-type\": \"Tensorflow\"\nspec:\n  minAvailable: 3\n  schedulerName: volcano\n  plugins:\n    env: []\n    svc: []\n  policies:\n    - event: PodEvicted\n      action: RestartJob\n  tasks:\n    - replicas: 1\n      name: ps\n      template:\n        spec:\n          imagePullSecrets:\n            - name: default-secret\n          containers:\n            - command:\n                - sh\n                - -c\n                - |\n                  PS_HOST=`cat /etc/volcano/ps.host | sed 's/$/&:2222/g' | tr \"\\n\" \",\"`;\n                  WORKER_HOST=`cat /etc/volcano/worker.host | sed 's/$/&:2222/g' | tr \"\\n\" \",\"`;\n                  python tf_cnn_benchmarks.py --batch_size=32 --model=resnet50 --variable_update=parameter_server --flush_stdout=true --num_gpus=1 --local_parameter_device=cpu --device=cpu --data_format=NHWC --job_name=ps --task_index=${VK_TASK_INDEX} --ps_hosts=${PS_HOST} --worker_hosts=${WORKER_HOST}\n              image: docker.m.daocloud.io/volcanosh/example-tf:0.0.1\n              name: tensorflow\n              ports:\n                - containerPort: 2222\n                  name: tfjob-port\n              resources:\n                requests:\n                  cpu: \"1000m\"\n                  memory: \"2048Mi\"\n                limits:\n                  cpu: \"1000m\"\n                  memory: \"2048Mi\"\n              workingDir: /opt/tf-benchmarks/scripts/tf_cnn_benchmarks\n          restartPolicy: OnFailure\n    - replicas: 2\n      name: worker\n      policies:\n        - event: TaskCompleted\n          action: CompleteJob\n      template:\n        spec:\n          imagePullSecrets:\n            - name: default-secret\n          containers:\n            - command:\n                - sh\n                - -c\n                - |\n                  PS_HOST=`cat /etc/volcano/ps.host | sed 's/$/&:2222/g' | tr \"\\n\" \",\"`;\n                  WORKER_HOST=`cat /etc/volcano/worker.host | sed 's/$/&:2222/g' | tr \"\\n\" \",\"`;\n                  python tf_cnn_benchmarks.py --batch_size=32 --model=resnet50 --variable_update=parameter_server --flush_stdout=true --num_gpus=1 --local_parameter_device=cpu --device=cpu --data_format=NHWC --job_name=worker --task_index=${VK_TASK_INDEX} --ps_hosts=${PS_HOST} --worker_hosts=${WORKER_HOST}\n              image: docker.m.daocloud.io/volcanosh/example-tf:0.0.1\n              name: tensorflow\n              ports:\n                - containerPort: 2222\n                  name: tfjob-port\n              resources:\n                requests:\n                  cpu: \"2000m\"\n                  memory: \"2048Mi\"\n                limits:\n                  cpu: \"2000m\"\n                  memory: \"4096Mi\"\n              workingDir: /opt/tf-benchmarks/scripts/tf_cnn_benchmarks\n          restartPolicy: OnFailure\n
                                                                                          "},{"location":"en/end-user/kpanda/gpu/volcano/volcano_user_guide.html#parallel-computing-with-mpi","title":"Parallel Computing with MPI","text":"

                                                                                          In multi-threaded parallel computing communication scenarios under the MPI computing framework, we need to ensure that all Pods are successfully scheduled to guarantee the task's proper completion. Setting minAvailable to 4 indicates that 1 mpimaster and 3 mpiworkers are required to run. By simply setting the schedulerName field value to \"volcano,\" you can enable the Volcano scheduler.

                                                                                          Here is an example:

                                                                                          apiVersion: batch.volcano.sh/v1alpha1\nkind: Job\nmetadata:\n  name: lm-mpi-job\n  labels:\n    \"volcano.sh/job-type\": \"MPI\"\nspec:\n  minAvailable: 4\n  schedulerName: volcano\n  plugins:\n    ssh: []\n    svc: []\n  policies:\n    - event: PodEvicted\n      action: RestartJob\n  tasks:\n    - replicas: 1\n      name: mpimaster\n      policies:\n        - event: TaskCompleted\n          action: CompleteJob\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  MPI_HOST=`cat /etc/volcano/mpiworker.host | tr \"\\n\" \",\"`;\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd;\n                  mpiexec --allow-run-as-root --host ${MPI_HOST} -np 3 mpi_hello_world;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpimaster\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"500m\"\n                limits:\n                  cpu: \"500m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n    - replicas: 3\n      name: mpiworker\n      template:\n        spec:\n          containers:\n            - command:\n                - /bin/sh\n                - -c\n                - |\n                  mkdir -p /var/run/sshd; /usr/sbin/sshd -D;\n              image: docker.m.daocloud.io/volcanosh/example-mpi:0.0.1\n              name: mpiworker\n              ports:\n                - containerPort: 22\n                  name: mpijob-port\n              workingDir: /home\n              resources:\n                requests:\n                  cpu: \"1000m\"\n                limits:\n                  cpu: \"1000m\"\n          restartPolicy: OnFailure\n          imagePullSecrets:\n            - name: default-secret\n

                                                                                          Resources to generate PodGroup:

                                                                                          apiVersion: scheduling.volcano.sh/v1beta1\nkind: PodGroup\nmetadata:\n  annotations:\n  creationTimestamp: \"2024-05-28T09:18:50Z\"\n  generation: 5\n  labels:\n    volcano.sh/job-type: MPI\n  name: lm-mpi-job-9c571015-37c7-4a1a-9604-eaa2248613f2\n  namespace: default\n  ownerReferences:\n  - apiVersion: batch.volcano.sh/v1alpha1\n    blockOwnerDeletion: true\n    controller: true\n    kind: Job\n    name: lm-mpi-job\n    uid: 9c571015-37c7-4a1a-9604-eaa2248613f2\n  resourceVersion: \"25173454\"\n  uid: 7b04632e-7cff-4884-8e9a-035b7649d33b\nspec:\n  minMember: 4\n  minResources:\n    count/pods: \"4\"\n    cpu: 3500m\n    limits.cpu: 3500m\n    pods: \"4\"\n    requests.cpu: 3500m\n  minTaskMember:\n    mpimaster: 1\n    mpiworker: 3\n  queue: default\nstatus:\n  conditions:\n  - lastTransitionTime: \"2024-05-28T09:19:01Z\"\n    message: '3/4 tasks in gang unschedulable: pod group is not ready, 1 Succeeded,\n      3 Releasing, 4 minAvailable'\n    reason: NotEnoughResources\n    status: \"True\"\n    transitionID: f875efa5-0358-4363-9300-06cebc0e7466\n    type: Unschedulable\n  - lastTransitionTime: \"2024-05-28T09:18:53Z\"\n    reason: tasks in gang are ready to be scheduled\n    status: \"True\"\n    transitionID: 5a7708c8-7d42-4c33-9d97-0581f7c06dab\n    type: Scheduled\n  phase: Pending\n  succeeded: 1\n

                                                                                          From the PodGroup, it can be seen that it is associated with the workload through ownerReferences and sets the minimum number of running Pods to 4.

                                                                                          If you want to learn more about the features and usage scenarios of Volcano, refer to Volcano Introduction.

                                                                                          "},{"location":"en/end-user/kpanda/helm/index.html","title":"Helm Charts","text":"

                                                                                          Helm is a package management tool for Kubernetes, which makes it easy for users to quickly discover, share and use applications built with Kubernetes. Container Management provides hundreds of Helm charts, covering storage, network, monitoring, database and other main cases. With these templates, you can quickly deploy and easily manage Helm apps through the UI interface. In addition, it supports adding more personalized templates through Add Helm repository to meet various needs.

                                                                                          Key Concepts:

                                                                                          There are a few key concepts to understand when using Helm:

                                                                                          • Chart: A Helm installation package, which contains the images, dependencies, and resource definitions required to run an application, and may also contain service definitions in the Kubernetes cluster, similar to the formula in Homebrew, dpkg in APT, or rpm files in Yum. Charts are called Helm Charts in AI platform.

                                                                                          • Release: A Chart instance running on the Kubernetes cluster. A Chart can be installed multiple times in the same cluster, and each installation will create a new Release. Release is called Helm Apps in AI platform.

                                                                                          • Repository: A repository for publishing and storing Charts. Repository is called Helm Repositories in AI platform.

                                                                                          For more details, refer to Helm official website.

                                                                                          Related operations:

                                                                                          • Manage Helm apps, including installing, updating, uninstalling Helm apps, viewing Helm operation records, etc.
                                                                                          • Manage Helm repository, including installing, updating, deleting Helm repository, etc.
                                                                                          "},{"location":"en/end-user/kpanda/helm/Import-addon.html","title":"Import Custom Helm Apps into Built-in Addons","text":"

                                                                                          This article explains how to import Helm appss into the system's built-in addons in both offline and online environments.

                                                                                          "},{"location":"en/end-user/kpanda/helm/Import-addon.html#offline-environment","title":"Offline Environment","text":"

                                                                                          An offline environment refers to an environment that cannot connect to the internet or is a closed private network environment.

                                                                                          "},{"location":"en/end-user/kpanda/helm/Import-addon.html#prerequisites","title":"Prerequisites","text":"
                                                                                          • charts-syncer is available and running. If not, you can click here to download.
                                                                                          • The Helm Chart has been adapted for charts-syncer. This means adding a .relok8s-images.yaml file to the Helm Chart. This file should include all the images used in the Chart, including any images that are not directly used in the Chart but are used similar to images used in an Operator.

                                                                                          Note

                                                                                          • Refer to image-hints-file for instructions on how to write a Chart. It is required to separate the registry and repository of the image because the registry/repository needs to be replaced or modified when loading the image.
                                                                                          • The installer's fire cluster has charts-syncer installed. If you are importing a custom Helm apps into the installer's fire cluster, you can skip the download and proceed to the adaptation. If charts-syncer binary is not installed, you can download it immediately.
                                                                                          "},{"location":"en/end-user/kpanda/helm/Import-addon.html#sync-helm-chart","title":"Sync Helm Chart","text":"
                                                                                          1. Go to Container Management -> Helm Apps -> Helm Repositories , search for the addon, and obtain the built-in repository address and username/password (the default username/password for the system's built-in repository is rootuser/rootpass123).

                                                                                          2. Sync the Helm Chart to the built-in repository addon of the container management system

                                                                                            • Write the following configuration file, modify it according to your specific configuration, and save it as sync-dao-2048.yaml .

                                                                                              source:  # helm charts source information\n  repo:\n    kind: HARBOR # It can also be any other supported Helm Chart repository type, such as CHARTMUSEUM\n    url: https://release-ci.daocloud.io/chartrepo/community #  Change to the chart repo URL\n    #auth: # username/password, if no password is set, leave it blank\n      #username: \"admin\"\n      #password: \"Harbor12345\"\ncharts:  # charts to sync\n  - name: dao-2048 # helm charts information, if not specified, sync all charts in the source helm repo\n    versions:\n      - 1.4.1\ntarget:  # helm charts target information\n  containerRegistry: 10.5.14.40 # image repository URL\n  repo:\n    kind: CHARTMUSEUM # It can also be any other supported Helm Chart repository type, such as HARBOR\n    url: http://10.5.14.40:8081 #  Change to the correct chart repo URL, you can verify the address by using helm repo add $HELM-REPO\n    auth: # username/password, if no password is set, leave it blank\n      username: \"rootuser\"\n      password: \"rootpass123\"\n  containers:\n    # kind: HARBOR # If the image repository is HARBOR and you want charts-syncer to automatically create an image repository, fill in this field\n    # auth: # username/password, if no password is set, leave it blank\n      # username: \"admin\"\n      # password: \"Harbor12345\"\n\n# leverage .relok8s-images.yaml file inside the Charts to move the container images too\nrelocateContainerImages: true\n
                                                                                            • Run the charts-syncer command to sync the Chart and its included images

                                                                                              charts-syncer sync --config sync-dao-2048.yaml --insecure --auto-create-repository\n

                                                                                              The expected output is:

                                                                                              I1222 15:01:47.119777    8743 sync.go:45] Using config file: \"examples/sync-dao-2048.yaml\"\nW1222 15:01:47.234238    8743 syncer.go:263] Ignoring skipDependencies option as dependency sync is not supported if container image relocation is true or syncing from/to intermediate directory\nI1222 15:01:47.234685    8743 sync.go:58] There is 1 chart out of sync!\nI1222 15:01:47.234706    8743 sync.go:66] Syncing \"dao-2048_1.4.1\" chart...\n.relok8s-images.yaml hints file found\nComputing relocation...\n\nRelocating dao-2048@1.4.1...\nPushing 10.5.14.40/daocloud/dao-2048:v1.4.1...\nDone\nDone moving /var/folders/vm/08vw0t3j68z9z_4lcqyhg8nm0000gn/T/charts-syncer869598676/dao-2048-1.4.1.tgz\n
                                                                                          3. Once the previous step is completed, go to Container Management -> Helm Apps -> Helm Repositories , find the proper addon, click Sync Repository in the action column, and you will see the uploaded Helm apps in the Helm template.

                                                                                          4. You can then proceed with normal installation, upgrade, and uninstallation.

                                                                                          "},{"location":"en/end-user/kpanda/helm/Import-addon.html#online-environment","title":"Online Environment","text":"

                                                                                          The Helm Repo address for the online environment is release.daocloud.io . If the user does not have permission to add Helm Repo, they will not be able to import custom Helm appss into the system's built-in addons. You can add your own Helm repository and then integrate your Helm repository into the platform using the same steps as syncing Helm Chart in the offline environment.

                                                                                          "},{"location":"en/end-user/kpanda/helm/helm-app.html","title":"Manage Helm Apps","text":"

                                                                                          The container management module supports interface-based management of Helm, including creating Helm instances using Helm charts, customizing Helm instance arguments, and managing the full lifecycle of Helm instances.

                                                                                          This section will take cert-manager as an example to introduce how to create and manage Helm apps through the container management interface.

                                                                                          "},{"location":"en/end-user/kpanda/helm/helm-app.html#prerequisites","title":"Prerequisites","text":"
                                                                                          • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                                                                          • Created a namespace, user, and granted NS Admin or higher permissions to the user. For details, refer to Namespace Authorization.

                                                                                          "},{"location":"en/end-user/kpanda/helm/helm-app.html#install-the-helm-app","title":"Install the Helm app","text":"

                                                                                          Follow the steps below to install the Helm app.

                                                                                          1. Click a cluster name to enter Cluster Details .

                                                                                          2. In the left navigation bar, click Helm Apps -> Helm Chart to enter the Helm chart page.

                                                                                            On the Helm chart page, select the Helm repository named addon , and all the Helm chart templates under the addon repository will be displayed on the interface. Click the Chart named cert-manager .

                                                                                          3. On the installation page, you can see the relevant detailed information of the Chart, select the version to be installed in the upper right corner of the interface, and click the Install button. Here select v1.9.1 version for installation.

                                                                                          4. Configure Name , Namespace and Version Information . You can also customize arguments by modifying YAML in the argument Configuration area below. Click OK .

                                                                                          5. The system will automatically return to the list of Helm apps, and the status of the newly created Helm app is Installing , and the status will change to Running after a period of time.

                                                                                          "},{"location":"en/end-user/kpanda/helm/helm-app.html#update-the-helm-app","title":"Update the Helm app","text":"

                                                                                          After we have completed the installation of a Helm app through the interface, we can perform an update operation on the Helm app. Note: Update operations using the UI are only supported for Helm apps installed via the UI.

                                                                                          Follow the steps below to update the Helm app.

                                                                                          1. Click a cluster name to enter Cluster Details .

                                                                                          2. In the left navigation bar, click Helm Apps to enter the Helm app list page.

                                                                                            On the Helm app list page, select the Helm app that needs to be updated, click the __ ...__ operation button on the right side of the list, and select the Update operation in the drop-down selection.

                                                                                          3. After clicking the Update button, the system will jump to the update interface, where you can update the Helm app as needed. Here we take updating the http port of the dao-2048 application as an example.

                                                                                          4. After modifying the proper arguments. You can click the Change button under the argument configuration to compare the files before and after the modification. After confirming that there is no error, click the OK button at the bottom to complete the update of the Helm app.

                                                                                          5. The system will automatically return to the Helm app list, and a pop-up window in the upper right corner will prompt update successful .

                                                                                          "},{"location":"en/end-user/kpanda/helm/helm-app.html#view-helm-operation-records","title":"View Helm operation records","text":"

                                                                                          Every installation, update, and deletion of Helm apps has detailed operation records and logs for viewing.

                                                                                          1. In the left navigation bar, click Cluster Operations -> Recent Operations , and then select the Helm Operations tab at the top of the page. Each record corresponds to an install/update/delete operation.

                                                                                          2. To view the detailed log of each operation: Click \u2507 on the right side of the list, and select Log from the pop-up menu.

                                                                                          3. At this point, the detailed operation log will be displayed in the form of console at the bottom of the page.

                                                                                          "},{"location":"en/end-user/kpanda/helm/helm-app.html#delete-the-helm-app","title":"Delete the Helm app","text":"

                                                                                          Follow the steps below to delete the Helm app.

                                                                                          1. Find the cluster where the Helm app to be deleted resides, click the cluster name, and enter Cluster Details .

                                                                                          2. In the left navigation bar, click Helm Apps to enter the Helm app list page.

                                                                                            On the Helm app list page, select the Helm app you want to delete, click the __ ...__ operation button on the right side of the list, and select Delete from the drop-down selection.

                                                                                          3. Enter the name of the Helm app in the pop-up window to confirm, and then click the Delete button.

                                                                                          "},{"location":"en/end-user/kpanda/helm/helm-repo.html","title":"Manage Helm Repository","text":"

                                                                                          The Helm repository is a repository for storing and publishing Charts. The Helm App module supports HTTP(s) protocol to access Chart packages in the repository. By default, the system has 4 built-in helm repos as shown in the table below to meet common needs in the production process of enterprises.

                                                                                          Repository Description Example partner Various high-quality features provided by ecological partners Chart tidb system Chart that must be relied upon by system core functional components and some advanced features. For example, insight-agent must be installed to obtain cluster monitoring information Insight addon Common Chart in business cases cert-manager community The most popular open source components in the Kubernetes community Chart Istio

                                                                                          In addition to the above preset repositories, you can also add third-party Helm repositories yourself. This page will introduce how to add and update third-party Helm repositories.

                                                                                          "},{"location":"en/end-user/kpanda/helm/helm-repo.html#prerequisites","title":"Prerequisites","text":"
                                                                                          • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                                                                          • Created a namespace, user, and granted NS Admin or higher permissions to the user. For details, refer to Namespace Authorization.

                                                                                          • If using a private repository, you should have read and write permissions to the repository.

                                                                                          "},{"location":"en/end-user/kpanda/helm/helm-repo.html#introduce-third-party-helm-repository","title":"Introduce third-party Helm repository","text":"

                                                                                          The following takes the public container repository of Kubevela as an example to introduce and manage the helm repo.

                                                                                          1. Find the cluster that needs to be imported into the third-party helm repo, click the cluster name, and enter cluster details.

                                                                                          2. In the left navigation bar, click Helm Apps -> Helm Repositories to enter the helm repo page.

                                                                                          3. Click the Create Repository button on the helm repo page to enter the Create repository page, and configure relevant arguments according to the table below.

                                                                                            • Repository Name: Set the repository name. It can be up to 63 characters long and may only include lowercase letters, numbers, and separators -. It must start and end with a lowercase letter or number, for example, kubevela.
                                                                                            • Repository URL: The HTTP(S) address pointing to the target Helm repository. For example, https://charts.kubevela.net/core.
                                                                                            • Skip TLS Verification: If the added Helm repository uses an HTTPS address and requires skipping TLS verification, you can check this option. The default is unchecked.
                                                                                            • Authentication Method: The method used for identity verification after connecting to the repository URL. For public repositories, you can select None. For private repositories, you need to enter a username/password for identity verification.
                                                                                            • Labels: Add labels to this Helm repository. For example, key: repo4; value: Kubevela.
                                                                                            • Annotations: Add annotations to this Helm repository. For example, key: repo4; value: Kubevela.
                                                                                            • Description: Add a description for this Helm repository. For example: This is a Kubevela public Helm repository.

                                                                                          4. Click OK to complete the creation of the Helm repository. The page will automatically jump to the list of Helm repositories.

                                                                                          "},{"location":"en/end-user/kpanda/helm/helm-repo.html#update-the-helm-repository","title":"Update the Helm repository","text":"

                                                                                          When the address information of the helm repo changes, the address, authentication method, label, annotation, and description information of the helm repo can be updated.

                                                                                          1. Find the cluster where the repository to be updated is located, click the cluster name, and enter cluster details .

                                                                                          2. In the left navigation bar, click Helm Apps -> Helm Repositories to enter the helm repo list page.

                                                                                          3. Find the Helm repository that needs to be updated on the repository list page, click the \u2507 button on the right side of the list, and click Update in the pop-up menu.

                                                                                          4. Update on the Update Helm Repository page, and click OK when finished.

                                                                                          5. Return to the helm repo list, and the screen prompts that the update is successful.

                                                                                          "},{"location":"en/end-user/kpanda/helm/helm-repo.html#delete-the-helm-repository","title":"Delete the Helm repository","text":"

                                                                                          In addition to importing and updating repositorys, you can also delete unnecessary repositories, including system preset repositories and third-party repositories.

                                                                                          1. Find the cluster where the repository to be deleted is located, click the cluster name, and enter cluster details .

                                                                                          2. In the left navigation bar, click Helm Apps -> Helm Repositories to enter the helm repo list page.

                                                                                          3. Find the Helm repository that needs to be updated on the repository list page, click the \u2507 button on the right side of the list, and click Delete in the pop-up menu.

                                                                                          4. Enter the repository name to confirm, and click Delete .

                                                                                          5. Return to the list of Helm repositories, and the screen prompts that the deletion is successful.

                                                                                          "},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html","title":"Import and Upgrade Multi-Arch Helm Apps","text":"

                                                                                          In a multi-arch cluster, it is common to use Helm charts that support multiple architectures to address deployment issues caused by architectural differences. This guide will explain how to integrate single-arch Helm apps into multi-arch deployments and how to integrate multi-arch Helm apps.

                                                                                          "},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#import","title":"Import","text":""},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#import-single-arch","title":"Import Single-arch","text":"

                                                                                          Prepare the offline package addon-offline-full-package-${version}-${arch}.tar.gz.

                                                                                          Specify the path in the clusterConfig.yml configuration file, for example:

                                                                                          addonPackage:\n  path: \"/home/addon-offline-full-package-v0.9.0-amd64.tar.gz\"\n

                                                                                          Then run the import command:

                                                                                          ~/dce5-installer cluster-create -c /home/dce5/sample/clusterConfig.yaml -m /home/dce5/sample/manifest.yaml -d -j13\n
                                                                                          "},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#integrate-multi-arch","title":"Integrate Multi-arch","text":"

                                                                                          Prepare the offline package addon-offline-full-package-${version}-${arch}.tar.gz.

                                                                                          Take addon-offline-full-package-v0.9.0-arm64.tar.gz as an example and run the import command:

                                                                                          ~/dce5-installer import-addon -c /home/dce5/sample/clusterConfig.yaml --addon-path=/home/addon-offline-full-package-v0.9.0-arm64.tar.gz\n
                                                                                          "},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#upgrade","title":"Upgrade","text":""},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#upgrade-single-arch","title":"Upgrade Single-arch","text":"

                                                                                          Prepare the offline package addon-offline-full-package-${version}-${arch}.tar.gz.

                                                                                          Specify the path in the clusterConfig.yml configuration file, for example:

                                                                                          addonPackage:\n  path: \"/home/addon-offline-full-package-v0.11.0-amd64.tar.gz\"\n

                                                                                          Then run the import command:

                                                                                          ~/dce5-installer cluster-create -c /home/dce5/sample/clusterConfig.yaml -m /home/dce5/sample/manifest.yaml -d -j13\n
                                                                                          "},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#multi-arch-integration","title":"Multi-arch Integration","text":"

                                                                                          Prepare the offline package addon-offline-full-package-${version}-${arch}.tar.gz.

                                                                                          Take addon-offline-full-package-v0.11.0-arm64.tar.gz as an example and run the import command:

                                                                                          ~/dce5-installer import-addon -c /home/dce5/sample/clusterConfig.yaml --addon-path=/home/addon-offline-full-package-v0.11.0-arm64.tar.gz\n
                                                                                          "},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#notes","title":"Notes","text":""},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#disk-space","title":"Disk Space","text":"

                                                                                          The offline package is quite large and requires sufficient space for decompression and loading of images. Otherwise, it may interrupt the process with a \"no space left\" error.

                                                                                          "},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#retry-after-failure","title":"Retry after Failure","text":"

                                                                                          If the multi-arch fusion step fails, you need to clean up the residue before retrying:

                                                                                          rm -rf addon-offline-target-package\n
                                                                                          "},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#registry-space","title":"Registry Space","text":"

                                                                                          If the offline package for fusion contains registry spaces that are inconsistent with the imported offline package, an error may occur during the fusion process due to the non-existence of the registry spaces:

                                                                                          Solution: Simply create the registry space before the fusion. For example, in the above error, creating the registry space \"localhost\" in advance can prevent the error.

                                                                                          "},{"location":"en/end-user/kpanda/helm/multi-archi-helm.html#architecture-conflict","title":"Architecture Conflict","text":"

                                                                                          When upgrading to a version lower than 0.12.0 of the addon, the charts-syncer in the target offline package does not check the existence of the image before pushing, so it will recombine the multi-arch into a single architecture during the upgrade process. For example, if the addon is implemented as a multi-arch in v0.10, upgrading to v0.11 will overwrite the multi-arch addon with a single architecture. However, upgrading to v0.12.0 or above can still maintain the multi-arch.

                                                                                          "},{"location":"en/end-user/kpanda/helm/upload-helm.html","title":"Upload Helm Charts","text":"

                                                                                          This article explains how to upload Helm charts. See the steps below.

                                                                                          1. Add a Helm repository, refer to Adding a Third-Party Helm Repository for the procedure.

                                                                                          2. Upload the Helm Chart to the Helm repository.

                                                                                            Upload with ClientUpload with Web Page

                                                                                            Note

                                                                                            This method is suitable for Harbor, ChartMuseum, JFrog type repositories.

                                                                                            1. Log in to a node that can access the Helm repository, upload the Helm binary to the node, and install the cm-push plugin (VPN is needed and Git should be installed in advance).

                                                                                              Refer to the plugin installation process.

                                                                                            2. Push the Helm Chart to the Helm repository by executing the following command:

                                                                                              helm cm-push ${charts-dir} ${HELM_REPO_URL} --username ${username} --password ${password}\n

                                                                                              Argument descriptions:

                                                                                              • charts-dir: The directory of the Helm Chart, or the packaged Chart (i.e., .tgz file).
                                                                                              • HELM_REPO_URL: The URL of the Helm repository.
                                                                                              • username/password: The username and password for the Helm repository with push permissions.
                                                                                              • If you want to access via HTTPS and skip the certificate verification, you can add the argument --insecure.

                                                                                            Note

                                                                                            This method is only applicable to Harbor repositories.

                                                                                            1. Log into the Harbor repository, ensuring the logged-in user has permissions to push;

                                                                                            2. Go to the relevant project, select the Helm Charts tab, click the Upload button on the page to upload the Helm Chart.

                                                                                          3. Sync Remote Repository Data

                                                                                            Manual SyncAuto Sync

                                                                                            By default, the cluster does not enable Helm Repository Auto-Refresh, so you need to perform a manual sync operation. The general steps are:

                                                                                            Go to Helm Apps -> Helm Repositories, click the \u2507 button on the right side of the repository list, and select Sync Repository to complete the repository data synchronization.

                                                                                            If you need to enable the Helm repository auto-sync feature, you can go to Cluster Maintenance -> Cluster Settings -> Advanced Settings and turn on the Helm repository auto-refresh switch.

                                                                                          "},{"location":"en/end-user/kpanda/inspect/index.html","title":"Cluster Inspection","text":"

                                                                                          Cluster inspection allows administrators to regularly or ad-hoc check the overall health of the cluster, giving them proactive control over ensuring cluster security. With a well-planned inspection schedule, this proactive cluster check allows administrators to monitor the cluster status at any time and address potential issues in advance. It eliminates the previous dilemma of passive troubleshooting during failures, enabling proactive monitoring and prevention.

                                                                                          The cluster inspection feature provided by AI platform's container management module supports custom inspection items at the cluster, node, and pod levels. After the inspection is completed, it automatically generates visual inspection reports.

                                                                                          • Cluster Level: Checks the running status of system components in the cluster, including cluster status, resource usage, and specific inspection items for control nodes, such as the status of kube-apiserver and etcd .
                                                                                          • Node Level: Includes common inspection items for both control nodes and worker nodes, such as node resource usage, handle counts, PID status, and network status.
                                                                                          • pod Level: Checks the CPU and memory usage, running status of pods, and the status of PV (Persistent Volume) and PVC (PersistentVolumeClaim).

                                                                                          For information on security inspections or executing security-related inspections, refer to the supported security scan types in AI platform.

                                                                                          "},{"location":"en/end-user/kpanda/inspect/config.html","title":"Creating Inspection Configuration","text":"

                                                                                          AI platform Container Management module provides cluster inspection functionality, which supports inspection at the cluster, node, and pod levels.

                                                                                          • Cluster level: Check the running status of system components in the cluster, including cluster status, resource usage, and specific inspection items for control nodes such as kube-apiserver and etcd .
                                                                                          • Node level: Includes common inspection items for both control nodes and worker nodes, such as node resource usage, handle count, PID status, and network status.
                                                                                          • Pod level: Check the CPU and memory usage, running status, PV and PVC status of Pods.

                                                                                          Here's how to create an inspection configuration.

                                                                                          1. Click Cluster Inspection in the left navigation bar.

                                                                                          2. On the right side of the page, click Inspection Configuration .

                                                                                          3. Fill in the inspection configuration based on the following instructions, then click OK at the bottom of the page.

                                                                                            • Cluster: Select the clusters that you want to inspect from the dropdown list. If you select multiple clusters, multiple inspection configurations will be automatically generated (only the inspected clusters are inconsistent, all other configurations are identical).
                                                                                            • Scheduled Inspection: When enabled, it allows for regular automatic execution of cluster inspections based on a pre-set inspection frequency.
                                                                                            • Inspection Frequency: Set the interval for automatic inspections, e.g., every Tuesday at 10 AM. It supports custom CronExpressios, refer to Cron Schedule Syntax for more information.
                                                                                            • Number of Inspection Records to Retain: Specifies the maximum number of inspection records to be retained, including all inspection records for each cluster.
                                                                                            • Parameter Configuration: The parameter configuration is divided into three parts: cluster level, node level, and pod level. You can enable or disable specific inspection items based on your requirements.

                                                                                          After creating the inspection configuration, it will be automatically displayed in the inspection configuration list. Click the more options button on the right of the configuration to immediately perform an inspection, modify the inspection configuration or delete the inspection configuration and reports.

                                                                                          • Click Inspection to perform an inspection once based on the configuration.
                                                                                          • Click Inspection Configuration to modify the inspection configuration.
                                                                                          • Click Delete to delete the inspection configuration and reports.

                                                                                          Note

                                                                                          • After creating the inspection configuration, if the Scheduled Inspection configuration is enabled, inspections will be automatically executed at the specified time.
                                                                                          • If Scheduled Inspection configuration is not enabled, you need to manually trigger the inspection.
                                                                                          "},{"location":"en/end-user/kpanda/inspect/inspect.html","title":"Start Cluster Inspection","text":"

                                                                                          After creating an inspection configuration, if the Scheduled Inspection configuration is enabled, inspections will be automatically executed at the specified time. If the Scheduled Inspection configuration is not enabled, you need to manually trigger the inspection.

                                                                                          This page explains how to manually perform a cluster inspection.

                                                                                          "},{"location":"en/end-user/kpanda/inspect/inspect.html#prerequisites","title":"Prerequisites","text":"
                                                                                          • Integrate or create a cluster in the Container Management module.
                                                                                          • Create an inspection configuration.
                                                                                          • The selected cluster is in the Running state and the insight component has been installed in the cluster.
                                                                                          "},{"location":"en/end-user/kpanda/inspect/inspect.html#steps","title":"Steps","text":"

                                                                                          When performing an inspection, you can choose to inspect multiple clusters in batches or perform a separate inspection for a specific cluster.

                                                                                          Batch InspectionIndividual Inspection
                                                                                          1. Click Cluster Inspection in the top-level navigation bar of the Container Management module, then click Inspection on the right side of the page.

                                                                                          2. Select the clusters you want to inspect, then click OK at the bottom of the page.

                                                                                            • If you choose to inspect multiple clusters at the same time, the system will perform inspections based on different inspection configurations for each cluster.
                                                                                            • If no inspection configuration is set for a cluster, the system will use the default configuration.

                                                                                          1. Go to the Cluster Inspection page.
                                                                                          2. Click the more options button ( \u2507 ) on the right of the proper inspection configuration, then select Inspection from the popup menu.

                                                                                          "},{"location":"en/end-user/kpanda/inspect/report.html","title":"Check Inspection Reports","text":"

                                                                                          After the inspection execution is completed, you can view the inspection records and detailed inspection reports.

                                                                                          "},{"location":"en/end-user/kpanda/inspect/report.html#prerequisites","title":"Prerequisites","text":"
                                                                                          • Create an inspection configuration.
                                                                                          • Perform at least one inspection execution.
                                                                                          "},{"location":"en/end-user/kpanda/inspect/report.html#steps","title":"Steps","text":"
                                                                                          1. Go to the Cluster Inspection page and click the name of the target inspection cluster.
                                                                                          1. Click the name of the inspection record you want to view.

                                                                                            • Each inspection execution generates an inspection record.
                                                                                            • When the number of inspection records exceeds the maximum retention specified in the inspection configuration, the earliest record will be deleted starting from the execution time.

                                                                                          2. View the detailed information of the inspection, which may include an overview of cluster resources and the running status of system components.

                                                                                            You can download the inspection report or delete the inspection report from the top right corner of the page.

                                                                                          "},{"location":"en/end-user/kpanda/namespaces/createns.html","title":"Namespaces","text":"

                                                                                          Namespaces are an abstraction used in Kubernetes for resource isolation. A cluster can contain multiple namespaces with different names, and the resources in each namespace are isolated from each other. For a detailed introduction to namespaces, refer to Namespaces.

                                                                                          This page will introduce the related operations of the namespace.

                                                                                          "},{"location":"en/end-user/kpanda/namespaces/createns.html#create-a-namespace","title":"Create a namespace","text":"

                                                                                          Supports easy creation of namespaces through forms, and quick creation of namespaces by writing or importing YAML files.

                                                                                          Note

                                                                                          • Before creating a namespace, you need to Integrate a Kubernetes cluster or Create a Kubernetes cluster in the container management module.
                                                                                          • The default namespace default is usually automatically generated after cluster initialization. But for production clusters, for ease of management, it is recommended to create other namespaces instead of using the default namespace directly.
                                                                                          "},{"location":"en/end-user/kpanda/namespaces/createns.html#create-with-form","title":"Create with form","text":"
                                                                                          1. On the cluster list page, click the name of the target cluster.

                                                                                          2. Click Namespace in the left navigation bar, then click the Create button on the right side of the page.

                                                                                          3. Fill in the name of the namespace, configure the workspace and labels (optional), and then click OK.

                                                                                            Info

                                                                                            • After binding a namespace to a workspace, the resources of that namespace will be shared with the bound workspace. For a detailed explanation of workspaces, refer to Workspaces and Hierarchies.

                                                                                            • After the namespace is created, you can still bind/unbind the workspace.

                                                                                          4. Click OK to complete the creation of the namespace. On the right side of the namespace list, click \u2507 to select update, bind/unbind workspace, quota management, delete, and more from the pop-up menu.

                                                                                          "},{"location":"en/end-user/kpanda/namespaces/createns.html#create-from-yaml","title":"Create from YAML","text":"
                                                                                          1. On the Clusters page, click the name of the target cluster.

                                                                                          2. Click Namespace in the left navigation bar, then click the YAML Create button on the right side of the page.

                                                                                          3. Enter or paste the prepared YAML content, or directly import an existing YAML file locally.

                                                                                            After entering the YAML content, click Download to save the YAML file locally.

                                                                                          4. Finally, click OK in the lower right corner of the pop-up box.

                                                                                          "},{"location":"en/end-user/kpanda/namespaces/exclusive.html","title":"Namespace Exclusive Nodes","text":"

                                                                                          Namespace exclusive nodes in a Kubernetes cluster allow a specific namespace to have exclusive access to one or more node's CPU, memory, and other resources through taints and tolerations. Once exclusive nodes are configured for a specific namespace, applications and services from other namespaces cannot run on the exclusive nodes. Using exclusive nodes allows important applications to have exclusive access to some computing resources, achieving physical isolation from other applications.

                                                                                          Note

                                                                                          Applications and services running on a node before it is set to be an exclusive node will not be affected and will continue to run normally on that node. Only when these Pods are deleted or rebuilt will they be scheduled to other non-exclusive nodes.

                                                                                          "},{"location":"en/end-user/kpanda/namespaces/exclusive.html#preparation","title":"Preparation","text":"

                                                                                          Check whether the kube-apiserver of the current cluster has enabled the PodNodeSelector and PodTolerationRestriction admission controllers.

                                                                                          The use of namespace exclusive nodes requires users to enable the PodNodeSelector and PodTolerationRestriction admission controllers on the kube-apiserver. For more information about admission controllers, refer to Kubernetes Admission Controllers Reference.

                                                                                          You can go to any Master node in the current cluster to check whether these two features are enabled in the kube-apiserver.yaml file, or you can execute the following command on the Master node for a quick check:

                                                                                          [root@g-master1 ~]# cat /etc/kubernetes/manifests/kube-apiserver.yaml | grep  enable-admission-plugins\n\n# The expected output is as follows:\n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction\n
                                                                                          "},{"location":"en/end-user/kpanda/namespaces/exclusive.html#enable-namespace-exclusive-nodes-on-global-cluster","title":"Enable Namespace Exclusive Nodes on Global Cluster","text":"

                                                                                          Since the Global cluster runs platform basic components such as kpanda, ghippo, and insight, enabling namespace exclusive nodes on Global may cause system components to not be scheduled to the exclusive nodes when they restart, affecting the overall high availability of the system. Therefore, we generally do not recommend users to enable the namespace exclusive node feature on the Global cluster.

                                                                                          If you do need to enable namespace exclusive nodes on the Global cluster, please follow the steps below:

                                                                                          1. Enable the PodNodeSelector and PodTolerationRestriction admission controllers for the kube-apiserver of the Global cluster

                                                                                            Note

                                                                                            If the cluster has already enabled the above two admission controllers, please skip this step and go directly to configure system component tolerations.

                                                                                            Go to any Master node in the current cluster to modify the kube-apiserver.yaml configuration file, or execute the following command on the Master node for configuration:

                                                                                            [root@g-master1 ~]# vi /etc/kubernetes/manifests/kube-apiserver.yaml\n\n# The expected output is as follows:\napiVersion: v1\nkind: Pod\nmetadata:\n    ......\nspec:\ncontainers:\n- command:\n    - kube-apiserver\n    ......\n    - --default-not-ready-toleration-seconds=300\n    - --default-unreachable-toleration-seconds=300\n    - --enable-admission-plugins=NodeRestriction   # List of enabled admission controllers\n    - --enable-aggregator-routing=False\n    - --enable-bootstrap-token-auth=true\n    - --endpoint-reconciler-type=lease\n    - --etcd-cafile=/etc/kubernetes/ssl/etcd/ca.crt\n    ......\n

                                                                                            Find the --enable-admission-plugins parameter and add the PodNodeSelector and PodTolerationRestriction admission controllers (separated by commas). Refer to the following:

                                                                                            # Add __ ,PodNodeSelector,PodTolerationRestriction__ \n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction \n
                                                                                          2. Add toleration annotations to the namespace where the platform components are located

                                                                                            After enabling the admission controllers, you need to add toleration annotations to the namespace where the platform components are located to ensure the high availability of the platform components.

                                                                                            The system component namespaces for AI platform are as follows:

                                                                                            Namespace System Components Included kpanda-system kpanda hwameiStor-system hwameiStor istio-system istio metallb-system metallb cert-manager-system cert-manager contour-system contour kubean-system kubean ghippo-system ghippo kcoral-system kcoral kcollie-system kcollie insight-system insight, insight-agent: ipavo-system ipavo kairship-system kairship karmada-system karmada amamba-system amamba, jenkins skoala-system skoala mspider-system mspider mcamel-system mcamel-rabbitmq, mcamel-elasticsearch, mcamel-mysql, mcamel-redis, mcamel-kafka, mcamel-minio, mcamel-postgresql spidernet-system spidernet kangaroo-system kangaroo gmagpie-system gmagpie dowl-system dowl

                                                                                            Check whether there are the above namespaces in the current cluster, execute the following command, and add the annotation: scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]' for each namespace.

                                                                                            kubectl annotate ns <namespace-name> scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \n\"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\n
                                                                                            Please make sure to replace <namespace-name> with the name of the platform namespace you want to add the annotation to.

                                                                                          3. Use the interface to set exclusive nodes for the namespace

                                                                                            After confirming that the PodNodeSelector and PodTolerationRestriction admission controllers on the cluster API server have been enabled, please follow the steps below to use the AI platform UI management interface to set exclusive nodes for the namespace.

                                                                                            1. Click the cluster name in the cluster list page, then click Namespace in the left navigation bar.

                                                                                            2. Click the namespace name, then click the Exclusive Node tab, and click Add Node on the bottom right.

                                                                                            3. Select which nodes you want to be exclusive to this namespace on the left side of the page. On the right side, you can clear or delete a selected node. Finally, click OK at the bottom.

                                                                                            4. You can view the current exclusive nodes for this namespace in the list. You can choose to Stop Exclusivity on the right side of the node.

                                                                                              After cancelling exclusivity, Pods from other namespaces can also be scheduled to this node.

                                                                                          "},{"location":"en/end-user/kpanda/namespaces/exclusive.html#enable-namespace-exclusive-nodes-on-non-global-clusters","title":"Enable Namespace Exclusive Nodes on Non-Global Clusters","text":"

                                                                                          To enable namespace exclusive nodes on non-Global clusters, please follow the steps below:

                                                                                          1. Enable the PodNodeSelector and PodTolerationRestriction admission controllers for the kube-apiserver of the current cluster

                                                                                            Note

                                                                                            If the cluster has already enabled the above two admission controllers, please skip this step and go directly to using the interface to set exclusive nodes for the namespace.

                                                                                            Go to any Master node in the current cluster to modify the kube-apiserver.yaml configuration file, or execute the following command on the Master node for configuration:

                                                                                            [root@g-master1 ~]# vi /etc/kubernetes/manifests/kube-apiserver.yaml\n\n# The expected output is as follows:\napiVersion: v1\nkind: Pod\nmetadata:\n    ......\nspec:\ncontainers:\n- command:\n    - kube-apiserver\n    ......\n    - --default-not-ready-toleration-seconds=300\n    - --default-unreachable-toleration-seconds=300\n    - --enable-admission-plugins=NodeRestriction   #List of enabled admission controllers\n    - --enable-aggregator-routing=False\n    - --enable-bootstrap-token-auth=true\n    - --endpoint-reconciler-type=lease\n    - --etcd-cafile=/etc/kubernetes/ssl/etcd/ca.crt\n    ......\n

                                                                                            Find the --enable-admission-plugins parameter and add the PodNodeSelector and PodTolerationRestriction admission controllers (separated by commas). Refer to the following:

                                                                                            # Add __ ,PodNodeSelector,PodTolerationRestriction__ \n- --enable-admission-plugins=NodeRestriction,PodNodeSelector,PodTolerationRestriction \n
                                                                                          2. Use the interface to set exclusive nodes for the namespace

                                                                                            After confirming that the PodNodeSelector and PodTolerationRestriction admission controllers on the cluster API server have been enabled, please follow the steps below to use the AI platform UI management interface to set exclusive nodes for the namespace.

                                                                                            1. Click the cluster name in the cluster list page, then click Namespace in the left navigation bar.

                                                                                            2. Click the namespace name, then click the Exclusive Node tab, and click Add Node on the bottom right.

                                                                                            3. Select which nodes you want to be exclusive to this namespace on the left side of the page. On the right side, you can clear or delete a selected node. Finally, click OK at the bottom.

                                                                                            4. You can view the current exclusive nodes for this namespace in the list. You can choose to Stop Exclusivity on the right side of the node.

                                                                                              After cancelling exclusivity, Pods from other namespaces can also be scheduled to this node.

                                                                                          3. Add toleration annotations to the namespace where the components that need high availability are located (optional)

                                                                                            Execute the following command to add the annotation: scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]' to the namespace where the components that need high availability are located.

                                                                                            kubectl annotate ns <namespace-name> scheduler.alpha.kubernetes.io/defaultTolerations: '[{\"operator\": \"Exists\", \"effect\": \n\"NoSchedule\", \"key\": \"ExclusiveNamespace\"}]'\n

                                                                                            Please make sure to replace <namespace-name> with the name of the platform namespace you want to add the annotation to.

                                                                                          "},{"location":"en/end-user/kpanda/namespaces/podsecurity.html","title":"Pod Security Policy","text":"

                                                                                          Pod security policies in a Kubernetes cluster allow you to control the behavior of Pods in various aspects of security by configuring different levels and modes for specific namespaces. Only Pods that meet certain conditions will be accepted by the system. It sets three levels and three modes, allowing users to choose the most suitable scheme to set restriction policies according to their needs.

                                                                                          Note

                                                                                          Only one security policy can be configured for one security mode. Please be careful when configuring the enforce security mode for a namespace, as violations will prevent Pods from being created.

                                                                                          This section will introduce how to configure Pod security policies for namespaces through the container management interface.

                                                                                          "},{"location":"en/end-user/kpanda/namespaces/podsecurity.html#prerequisites","title":"Prerequisites","text":"
                                                                                          • The container management module has integrated a Kubernetes cluster or created a Kubernetes cluster. The cluster version needs to be v1.22 or above, and you should be able to access the cluster's UI interface.

                                                                                          • A namespace has been created, a user has been created, and the user has been granted NS Admin or higher permissions. For details, refer to Namespace Authorization.

                                                                                          "},{"location":"en/end-user/kpanda/namespaces/podsecurity.html#configure-pod-security-policies-for-namespace","title":"Configure Pod Security Policies for Namespace","text":"
                                                                                          1. Select the namespace for which you want to configure Pod security policies and go to the details page. Click Configure Policy on the Pod Security Policy page to go to the configuration page.

                                                                                          2. Click Add Policy on the configuration page, and a policy will appear, including security level and security mode. The following is a detailed introduction to the security level and security policy.

                                                                                            Security Level Description Privileged An unrestricted policy that provides the maximum possible range of permissions. This policy allows known privilege elevations. Baseline The least restrictive policy that prohibits known privilege elevations. Allows the use of default (minimum specified) Pod configurations. Restricted A highly restrictive policy that follows current best practices for protecting Pods. Security Mode Description Audit Violations of the specified policy will add new audit events in the audit log, and the Pod can be created. Warn Violations of the specified policy will return user-visible warning information, and the Pod can be created. Enforce Violations of the specified policy will prevent the Pod from being created.

                                                                                          3. Different security levels correspond to different check items. If you don't know how to configure your namespace, you can Policy ConfigMap Explanation at the top right corner of the page to view detailed information.

                                                                                          4. Click Confirm. If the creation is successful, the security policy you configured will appear on the page.

                                                                                          5. Click \u2507 to edit or delete the security policy you configured.

                                                                                          "},{"location":"en/end-user/kpanda/network/create-ingress.html","title":"Create an Ingress","text":"

                                                                                          In a Kubernetes cluster, Ingress exposes services from outside the cluster to inside the cluster HTTP and HTTPS ingress. Traffic ingress is controlled by rules defined on the Ingress resource. Here's an example of a simple Ingress that sends all traffic to the same Service:

                                                                                          Ingress is an API object that manages external access to services in the cluster, and the typical access method is HTTP. Ingress can provide load balancing, SSL termination, and name-based virtual hosting.

                                                                                          "},{"location":"en/end-user/kpanda/network/create-ingress.html#prerequisites","title":"Prerequisites","text":"
                                                                                          • Container management module connected to Kubernetes cluster or created Kubernetes, and can access the cluster UI interface.
                                                                                          • Completed a namespace creation, user creation, and authorize the user as NS Editor role, for details, refer to Namespace Authorization.
                                                                                          • Completed Create Ingress Instance, Deploy Application Workload, and have created the proper Service
                                                                                          • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.
                                                                                          "},{"location":"en/end-user/kpanda/network/create-ingress.html#create-ingress","title":"Create ingress","text":"
                                                                                          1. After successfully logging in as the NS Editor user, click Clusters in the upper left corner to enter the Clusters page. In the list of clusters, click a cluster name.

                                                                                          2. In the left navigation bar, click Container Network -> Ingress to enter the service list, and click the Create Ingress button in the upper right corner.

                                                                                            Note

                                                                                            It is also possible to Create from YAML .

                                                                                          3. Open Create Ingress page to configure. There are two protocol types to choose from, refer to the following two parameter tables for configuration.

                                                                                          "},{"location":"en/end-user/kpanda/network/create-ingress.html#create-http-protocol-ingress","title":"Create HTTP protocol ingress","text":"Parameter Description Example value Ingress name [Type] Required[Meaning] Enter the name of the new ingress. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter, lowercase English letters or numbers. Ing-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default Protocol [Type] Required [Meaning] Refers to the protocol that authorizes inbound access to the cluster service, and supports HTTP (no identity authentication required) or HTTPS (identity authentication needs to be configured) protocol. Here select the ingress of HTTP protocol. HTTP Domain Name [Type] Required [Meaning] Use the domain name to provide external access services. The default is the domain name of the cluster testing.daocloud.io LB Type [Type] Required [Meaning] The usage range of the Ingress instance. Scope of use of Ingress Platform-level load balancer : In the same cluster, share the same Ingress instance, where all Pods can receive requests distributed by the load balancer. Tenant-level load balancer : Tenant load balancer, the Ingress instance belongs exclusively to the current namespace, or belongs to a certain workspace, and the set workspace includes the current namespace, and all Pods can receive it Requests distributed by this load balancer. Platform Level Load Balancer Ingress Class [Type] Optional[Meaning] Select the proper Ingress instance, and import traffic to the specified Ingress instance after selection. When it is None, the default DefaultClass is used. Please set the DefaultClass when creating an Ingress instance. For more information, refer to Ingress Class< br /> Ngnix Session persistence [Type] Optional[Meaning] Session persistence is divided into three types: L4 source address hash , Cookie Key , L7 Header Name . Keep L4 Source Address Hash : : When enabled, the following tag is added to the Annotation by default: nginx.ingress.kubernetes.io/upstream-hash-by: \"\\(binary_remote_addr\"<br /> __Cookie Key__ : When enabled, the connection from a specific client will be passed to the same Pod. After enabled, the following parameters are added to the Annotation by default:<br /> nginx.ingress.kubernetes.io/affinity: \"cookie\"<br /> nginx.ingress.kubernetes .io/affinity-mode: persistent<br /> __L7 Header Name__ : After enabled, the following tag is added to the Annotation by default: nginx.ingress.kubernetes.io/upstream-hash-by: \"\\)http_x_forwarded_for\" Close Path Rewriting [Type] Optional [Meaning] rewrite-target , in some cases, the URL exposed by the backend service is different from the path specified in the Ingress rule. If no URL rewriting configuration is performed, There will be an error when accessing. close Redirect [Type] Optional[Meaning] permanent-redirect , permanent redirection, after entering the rewriting path, the access path will be redirected to the set address. close Traffic Distribution [Type] Optional[Meaning] After enabled and set, traffic distribution will be performed according to the set conditions. Based on weight : After setting the weight, add the following Annotation to the created Ingress: nginx.ingress.kubernetes.io/canary-weight: \"10\" Based on Cookie : set After the cookie rules, the traffic will be distributed according to the set cookie conditions Based on Header : After setting the header rules, the traffic will be distributed according to the set header conditions Close Labels [Type] Optional [Meaning] Add a label for the ingress - Annotations [Type] Optional [Meaning] Add annotation for ingress -"},{"location":"en/end-user/kpanda/network/create-ingress.html#create-https-protocol-ingress","title":"Create HTTPS protocol ingress","text":"Parameter Description Example value Ingress name [Type] Required[Meaning] Enter the name of the new ingress. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter, lowercase English letters or numbers. Ing-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default Protocol [Type] Required [Meaning] Refers to the protocol that authorizes inbound access to the cluster service, and supports HTTP (no identity authentication required) or HTTPS (identity authentication needs to be configured) protocol. Here select the ingress of HTTPS protocol. HTTPS Domain Name [Type] Required [Meaning] Use the domain name to provide external access services. The default is the domain name of the cluster testing.daocloud.io Secret [Type] Required [Meaning] Https TLS certificate, Create Secret. Forwarding policy [Type] Optional[Meaning] Specify the access policy of Ingress. Path: Specifies the URL path for service access, the default is the root path/directoryTarget service: Service name for ingressTarget service port: Port exposed by the service LB Type [Type] Required [Meaning] The usage range of the Ingress instance. Platform-level load balancer : In the same cluster, the same Ingress instance is shared, and all Pods can receive requests distributed by the load balancer. Tenant-level load balancer : Tenant load balancer, the Ingress instance belongs exclusively to the current namespace or to a certain workspace. This workspace contains the current namespace, and all Pods can receive the workload from this Balanced distribution of requests. Platform Level Load Balancer Ingress Class [Type] Optional[Meaning] Select the proper Ingress instance, and import traffic to the specified Ingress instance after selection. When it is None, the default DefaultClass is used. Please set the DefaultClass when creating an Ingress instance. For more information, refer to Ingress Class< br /> None Session persistence [Type] Optional[Meaning] Session persistence is divided into three types: L4 source address hash , Cookie Key , L7 Header Name . Keep L4 Source Address Hash : : When enabled, the following tag is added to the Annotation by default: nginx.ingress.kubernetes.io/upstream-hash-by: \"\\(binary_remote_addr\"<br /> __Cookie Key__ : When enabled, the connection from a specific client will be passed to the same Pod. After enabled, the following parameters are added to the Annotation by default:<br /> nginx.ingress.kubernetes.io/affinity: \"cookie\"<br /> nginx.ingress.kubernetes .io/affinity-mode: persistent<br /> __L7 Header Name__ : After enabled, the following tag is added to the Annotation by default: nginx.ingress.kubernetes.io/upstream-hash-by: \"\\)http_x_forwarded_for\" Close Labels [Type] Optional [Meaning] Add a label for the ingress Annotations [Type] Optional[Meaning] Add annotation for ingress"},{"location":"en/end-user/kpanda/network/create-ingress.html#create-ingress-successfully","title":"Create ingress successfully","text":"

                                                                                          After configuring all the parameters, click the OK button to return to the ingress list automatically. On the right side of the list, click \u2507 to modify or delete the selected ingress.

                                                                                          "},{"location":"en/end-user/kpanda/network/create-services.html","title":"Create a Service","text":"

                                                                                          In a Kubernetes cluster, each Pod has an internal independent IP address, but Pods in the workload may be created and deleted at any time, and directly using the Pod IP address cannot provide external services.

                                                                                          This requires creating a service through which you get a fixed IP address, decoupling the front-end and back-end of the workload, and allowing external users to access the service. At the same time, the service also provides the Load Balancer feature, enabling users to access workloads from the public network.

                                                                                          "},{"location":"en/end-user/kpanda/network/create-services.html#prerequisites","title":"Prerequisites","text":"
                                                                                          • Container management module connected to Kubernetes cluster or created Kubernetes, and can access the cluster UI interface.

                                                                                          • Completed a namespace creation, user creation, and authorize the user as NS Editor role, for details, refer to Namespace Authorization.

                                                                                          • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                                                                          "},{"location":"en/end-user/kpanda/network/create-services.html#create-service","title":"Create service","text":"
                                                                                          1. After successfully logging in as the NS Editor user, click Clusters in the upper left corner to enter the Clusters page. In the list of clusters, click a cluster name.

                                                                                          2. In the left navigation bar, click Container Network -> Service to enter the service list, and click the Create Service button in the upper right corner.

                                                                                            !!! tip

                                                                                             It is also possible to create a service via __YAML__ .\n
                                                                                          3. Open the Create Service page, select an access type, and refer to the following three parameter tables for configuration.

                                                                                          "},{"location":"en/end-user/kpanda/network/create-services.html#create-clusterip-service","title":"Create ClusterIP service","text":"

                                                                                          Click Intra-Cluster Access (ClusterIP) , which refers to exposing services through the internal IP of the cluster. The services selected for this option can only be accessed within the cluster. This is the default service type. Refer to the configuration parameters in the table below.

                                                                                          Parameter Description Example value Access type [Type] Required[Meaning] Specify the method of Pod service discovery, here select intra-cluster access (ClusterIP). ClusterIP Service Name [Type] Required[Meaning] Enter the name of the new service. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. Svc-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default Label selector [Type] Required[Meaning] Add a label, the Service selects a Pod according to the label, and click \"Add\" after filling. You can also refer to the label of an existing workload. Click Reference workload label , select the workload in the pop-up window, and the system will use the selected workload label as the selector by default. app:job01 Port configuration [Type] Required[Meaning] To add a protocol port for a service, you need to select the port protocol type first. Currently, it supports TCP and UDP. Port Name: Enter the name of the custom port. Service port (port): The access port for Pod to provide external services. Container port (targetport): The container port that the workload actually monitors, used to expose services to the cluster. Session Persistence [Type] Optional [Meaning] When enabled, requests from the same client will be forwarded to the same Pod Enabled Maximum session hold time [Type] Optional [Meaning] After session hold is enabled, the maximum hold time is 30 seconds by default 30 seconds Annotation [Type] Optional[Meaning] Add annotation for service"},{"location":"en/end-user/kpanda/network/create-services.html#create-nodeport-service","title":"Create NodePort service","text":"

                                                                                          Click NodePort , which means exposing the service via IP and static port ( NodePort ) on each node. The NodePort service is routed to the automatically created ClusterIP service. You can access a NodePort service from outside the cluster by requesting : . Refer to the configuration parameters in the table below. Parameter Description Example value Access type [Type] Required[Meaning] Specify the method of Pod service discovery, here select node access (NodePort). NodePort Service Name [Type] Required[Meaning] Enter the name of the new service. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. Svc-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default Label selector [Type] Required[Meaning] Add a label, the Service selects a Pod according to the label, and click \"Add\" after filling. You can also refer to the label of an existing workload. Click Reference workload label , select the workload in the pop-up window, and the system will use the selected workload label as the selector by default. Port configuration [Type] Required[Meaning] To add a protocol port for a service, you need to select the port protocol type first. Currently, it supports TCP and UDP. Port Name: Enter the name of the custom port. Service port (port): The access port for Pod to provide external services. By default, the service port is set to the same value as the container port field for convenience. ***Container port (targetport)*: The container port actually monitored by the workload. Node port (nodeport): The port of the node, which receives traffic from ClusterIP transmission. It is used as the entrance for external traffic access. Session Persistence [Type] Optional [Meaning] When enabled, requests from the same client will be forwarded to the same PodAfter enabled, .spec.sessionAffinity of Service is ClientIP , refer to for details : Session Affinity for Service Enabled Maximum session hold time [Type] Optional [Meaning] After session hold is enabled, the maximum hold time, the default timeout is 30 seconds.spec.sessionAffinityConfig.clientIP.timeoutSeconds is set to 30 by default seconds 30 seconds Annotation [Type] Optional[Meaning] Add annotation for service"},{"location":"en/end-user/kpanda/network/create-services.html#create-loadbalancer-service","title":"Create LoadBalancer service","text":"

                                                                                          Click Load Balancer , which refers to using the cloud provider's load balancer to expose services to the outside. External load balancers can route traffic to automatically created NodePort services and ClusterIP services. Refer to the configuration parameters in the table below.

                                                                                          Parameter Description Example value Access type [Type] Required[Meaning] Specify the method of Pod service discovery, here select node access (NodePort). NodePort Service Name [Type] Required[Meaning] Enter the name of the new service. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. Svc-01 Namespace [Type] Required[Meaning] Select the namespace where the new service is located. For more information about namespaces, refer to Namespace Overview. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. default External Traffic Policy [Type] Required[Meaning] Set external traffic policy. Cluster: Traffic can be forwarded to Pods on all nodes in the cluster. Local: Traffic is only sent to Pods on this node. [Note] Please enter a string of 4 to 63 characters, which can contain lowercase English letters, numbers and dashes (-), and start with a lowercase English letter and end with a lowercase English letter or number. Tag selector [Type] Required [Meaning] Add tag, Service Select the Pod according to the label, fill it out and click \"Add\". You can also refer to the label of an existing workload. Click Reference workload label , select the workload in the pop-up window, and the system will use the selected workload label as the selector by default. Load balancing type [Type] Required [Meaning] The type of load balancing used, currently supports MetalLB and others. MetalLB IP Pool [Type] Required[Meaning] When the selected load balancing type is MetalLB, LoadBalancer Service will allocate IP addresses from this pool by default, and declare all IP addresses in this pool through APR, For details, refer to: Install MetalLB Load balancing address [Type] Required[Meaning] 1. If you are using a public cloud CloudProvider, fill in the load balancing address provided by the cloud provider here;2. If the above load balancing type is selected as MetalLB, the IP will be obtained from the above IP pool by default, if not filled, it will be obtained automatically. Port configuration [Type] Required[Meaning] To add a protocol port for a service, you need to select the port protocol type first. Currently, it supports TCP and UDP. Port Name: Enter the name of the custom port. Service port (port): The access port for Pod to provide external services. By default, the service port is set to the same value as the container port field for convenience. Container port (targetport): The container port actually monitored by the workload. Node port (nodeport): The port of the node, which receives traffic from ClusterIP transmission. It is used as the entrance for external traffic access. Annotation [Type] Optional[Meaning] Add annotation for service"},{"location":"en/end-user/kpanda/network/create-services.html#complete-service-creation","title":"Complete service creation","text":"

                                                                                          After configuring all parameters, click the OK button to return to the service list automatically. On the right side of the list, click \u2507 to modify or delete the selected service.

                                                                                          "},{"location":"en/end-user/kpanda/network/network-policy.html","title":"Network Policies","text":"

                                                                                          Network policies in Kubernetes allow you to control network traffic at the IP address or port level (OSI layer 3 or layer 4). The container management module currently supports creating network policies based on Pods or namespaces, using label selectors to specify which traffic can enter or leave Pods with specific labels.

                                                                                          For more details on network policies, refer to the official Kubernetes documentation on Network Policies.

                                                                                          "},{"location":"en/end-user/kpanda/network/network-policy.html#creating-network-policies","title":"Creating Network Policies","text":"

                                                                                          Currently, there are two methods available for creating network policies: YAML and form-based creation. Each method has its advantages and disadvantages, catering to different user needs.

                                                                                          YAML creation requires fewer steps and is more efficient, but it has a higher learning curve as it requires familiarity with configuring network policy YAML files.

                                                                                          Form-based creation is more intuitive and straightforward. Users can simply fill in the proper values based on the prompts. However, this method involves more steps.

                                                                                          "},{"location":"en/end-user/kpanda/network/network-policy.html#yaml-creation","title":"YAML Creation","text":"
                                                                                          1. In the cluster list, click the name of the target cluster, then navigate to Container Network -> Network Policies -> Create with YAML in the left navigation bar.

                                                                                          2. In the pop-up dialog, enter or paste the pre-prepared YAML file, then click OK at the bottom of the dialog.

                                                                                          "},{"location":"en/end-user/kpanda/network/network-policy.html#form-based-creation","title":"Form-Based Creation","text":"
                                                                                          1. In the cluster list, click the name of the target cluster, then navigate to Container Network -> Network Policies -> Create Policy in the left navigation bar.

                                                                                          2. Fill in the basic information.

                                                                                            The name and namespace cannot be changed after creation.

                                                                                          3. Fill in the policy configuration.

                                                                                            The policy configuration includes ingress and egress policies. To establish a successful connection from a source Pod to a target Pod, both the egress policy of the source Pod and the ingress policy of the target Pod need to allow the connection. If either side does not allow the connection, the connection will fail.

                                                                                            • Ingress Policy: Click \u2795 to begin configuring the policy. Multiple policies can be configured. The effects of multiple network policies are cumulative. Only when all network policies are satisfied simultaneously can a connection be successfully established.

                                                                                            • Egress Policy

                                                                                          "},{"location":"en/end-user/kpanda/network/network-policy.html#viewing-network-policies","title":"Viewing Network Policies","text":"
                                                                                          1. In the cluster list, click the name of the target cluster, then navigate to Container Network -> Network Policies . Click the name of the network policy.

                                                                                          2. View the basic configuration, associated instances, ingress policies, and egress policies of the policy.

                                                                                          Info

                                                                                          Under the \"Associated Instances\" tab, you can view instance monitoring, logs, container lists, YAML files, events, and more.

                                                                                          "},{"location":"en/end-user/kpanda/network/network-policy.html#updating-network-policies","title":"Updating Network Policies","text":"

                                                                                          There are two ways to update network policies. You can either update them through the form or by using a YAML file.

                                                                                          • On the network policy list page, find the policy you want to update, and choose Update in the action column on the right to update it via the form. Choose Edit YAML to update it using a YAML file.

                                                                                          • Click the name of the network policy, then choose Update in the top right corner of the policy details page to update it via the form. Choose Edit YAML to update it using a YAML file.

                                                                                          "},{"location":"en/end-user/kpanda/network/network-policy.html#deleting-network-policies","title":"Deleting Network Policies","text":"

                                                                                          There are two ways to delete network policies. You can delete network policies either through the form or by using a YAML file.

                                                                                          • On the network policy list page, find the policy you want to delete, and choose Delete in the action column on the right to delete it via the form. Choose Edit YAML to delete it using a YAML file.

                                                                                          • Click the name of the network policy, then choose Delete in the top right corner of the policy details page to delete it via the form. Choose Edit YAML to delete it using a YAML file.

                                                                                          "},{"location":"en/end-user/kpanda/nodes/add-node.html","title":"Cluster Node Expansion","text":"

                                                                                          As the number of business applications continues to grow, the resources of the cluster become increasingly tight. At this point, you can expand the cluster nodes based on kubean. After the expansion, applications can run on the newly added nodes, alleviating resource pressure.

                                                                                          Only clusters created through the container management module support node autoscaling. Clusters accessed from the outside do not support this operation. This article mainly introduces the expansion of worker nodes in the same architecture work cluster. If you need to add control nodes or heterogeneous work nodes to the cluster, refer to: Expanding the control node of the work cluster, Adding heterogeneous nodes to the work cluster, Expanding the worker node of the global service cluster.

                                                                                          1. On the Clusters page, click the name of the target cluster.

                                                                                            If the Cluster Type contains the label Integrated Cluster, it means that the cluster does not support node autoscaling.

                                                                                          2. Click Nodes in the left navigation bar, and then click Integrate Node in the upper right corner of the page.

                                                                                          3. Enter the host name and node IP and click OK.

                                                                                            Click \u2795 Add Worker Node to continue accessing more nodes.

                                                                                          Note

                                                                                          Accessing the node takes about 20 minutes, please be patient.

                                                                                          "},{"location":"en/end-user/kpanda/nodes/delete-node.html","title":"Node Scales Down","text":"

                                                                                          When the peak business period is over, in order to save resource costs, you can reduce the size of the cluster and unload redundant nodes, that is, node scaling. After a node is uninstalled, applications cannot continue to run on the node.

                                                                                          "},{"location":"en/end-user/kpanda/nodes/delete-node.html#prerequisites","title":"Prerequisites","text":"
                                                                                          • The current operating user has the Cluster Admin role authorization.
                                                                                          • Only through the container management module created cluster can node autoscaling be supported, and the cluster accessed from the outside does not support this operation.
                                                                                          • Before uninstalling a node, you need to pause scheduling the node, and expel the applications on the node to other nodes.
                                                                                          • Eviction method: log in to the controller node, and use the kubectl drain command to evict all Pods on the node. The safe eviction method allows the containers in the pod to terminate gracefully.
                                                                                          "},{"location":"en/end-user/kpanda/nodes/delete-node.html#precautions","title":"Precautions","text":"
                                                                                          1. When cluster nodes scales down, they can only be uninstalled one by one, not in batches.

                                                                                          2. If you need to uninstall cluster controller nodes, you need to ensure that the final number of controller nodes is an odd number.

                                                                                          3. The first controller node cannot be offline when the cluster node scales down. If it is necessary to perform this operation, please contact the after-sales engineer.

                                                                                          "},{"location":"en/end-user/kpanda/nodes/delete-node.html#steps","title":"Steps","text":"
                                                                                          1. On the Clusters page, click the name of the target cluster.

                                                                                            If the Cluster Type has the tag Integrate Cluster , it means that the cluster does not support node autoscaling.

                                                                                          2. Click Nodes on the left navigation bar, find the node to be uninstalled, click \u2507 and select Remove .

                                                                                          3. Enter the node name, and click Delete to confirm.

                                                                                          "},{"location":"en/end-user/kpanda/nodes/labels-annotations.html","title":"Labels and Annotations","text":"

                                                                                          Labels are identifying key-value pairs added to Kubernetes objects such as Pods, nodes, and clusters, which can be combined with label selectors to find and filter Kubernetes objects that meet certain conditions. Each key must be unique for a given object.

                                                                                          Annotations, like tags, are key/value pairs, but they do not have identification or filtering features. Annotations can be used to add arbitrary metadata to nodes. Annotation keys usually use the format prefix(optional)/name(required) , for example nfd.node.kubernetes.io/extended-resources . If the prefix is \u200b\u200bomitted, it means that the annotation key is private to the user.

                                                                                          For more information about labels and annotations, refer to the official Kubernetes documentation labels and selectors Or Annotations.

                                                                                          The steps to add/delete tags and annotations are as follows:

                                                                                          1. On the Clusters page, click the name of the target cluster.

                                                                                          2. Click Nodes on the left navigation bar, click the \u2507 operation icon on the right side of the node, and click Edit Labels or Edit Annotations .

                                                                                          3. Click \u2795 Add to add tags or annotations, click X to delete tags or annotations, and finally click OK .

                                                                                          "},{"location":"en/end-user/kpanda/nodes/node-authentication.html","title":"Node Authentication","text":""},{"location":"en/end-user/kpanda/nodes/node-authentication.html#authenticate-nodes-using-ssh-keys","title":"Authenticate Nodes Using SSH Keys","text":"

                                                                                          If you choose to authenticate the nodes of the cluster-to-be-created using SSH keys, you need to configure the public and private keys according to the following instructions.

                                                                                          1. Run the following command on any node within the management cluster of the cluster-to-be-created to generate the public and private keys.

                                                                                            cd /root/.ssh\nssh-keygen -t rsa\n
                                                                                          2. Run the ls command to check if the keys have been successfully created in the management cluster. The correct output should be as follows:

                                                                                            ls\nid_rsa  id_rsa.pub  known_hosts\n

                                                                                            The file named id_rsa is the private key, and the file named id_rsa.pub is the public key.

                                                                                          3. Run the following command to load the public key file id_rsa.pub onto all the nodes of the cluster-to-be-created.

                                                                                            ssh-copy-id -i /root/.ssh/id_rsa.pub root@10.0.0.0\n

                                                                                            Replace the user account and node IP in the above command with the username and IP of the nodes in the cluster-to-be-created. The same operation needs to be performed on every node in the cluster-to-be-created.

                                                                                          4. Run the following command to view the private key file id_rsa created in step 1.

                                                                                            cat /root/.ssh/id_rsa\n

                                                                                            The output should be as follows:

                                                                                            -----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEA3UvyKINzY5BFuemQ+uJ6q+GqgfvnWwNC8HzZhpcMSjJy26MM\nUtBEBJxy8fMi57XcjYxPibXW/wnd+32ICCycqCwByUmuXeCC1cjlCQDqjcAvXae7\nY54IXGF7wm2IsMNwf0kjFEXjuS48FLDA0mGRaN3BG+Up5geXcHckg3K5LD8kXFFx\ndEmSIjdyw55NaUitmEdHzN7cIdfi6Z56jcV8dcFBgWKUx+ebiyPmZBkXToz6GnMF\nrswzzZCl+G6Jb2xTGy7g7ozb4BoZd1IpSD5EhDanRrESVE0C5YuJ5zUAC0CvVd1l\nv67AK8Ko6MXToHp01/bcsvlM6cqgwUFXZKVeOwIDAQABAoIBAQCO36GQlo3BEjxy\nM2HvGJmqrx+unDxafliRe4nVY2AD515Qf4xNSzke4QM1QoyenMOwf446krQkJPK0\nk+9nl6Xszby5gGCbK4BNFk8I6RaGPjZWeRx6zGUJf8avWJiPxx6yjz2esSC9RiR0\nF0nmiiefVMyAfgv2/5++dK2WUFNNRKLgSRRpP5bRaD5wMzzxtSSXrUon6217HO8p\n3RoWsI51MbVzhdVgpHUNABcoa0rpr9svT6XLKZxY8mxpKFYjM0Wv2JIDABg3kBvh\nQbJ7kStCO3naZjKMU9UuSqVJs06cflGYw7Or8/tABR3LErNQKPjkhAQqt0DXw7Iw\n3tKdTAJBAoGBAP687U7JAOqQkcphek2E/A/sbO/d37ix7Z3vNOy065STrA+ZWMZn\npZ6Ui1B/oJpoZssnfvIoz9sn559X0j67TljFALFd2ZGS0Fqh9KVCqDvfk+Vst1dq\n+3r/yZdTOyswoccxkJiC/GDwZGK0amJWqvob39JCZhDAKIGLbGMmjdAHAoGBAN5k\nm1WGnni1nZ+3dryIwgB6z1hWcnLTamzSET6KhSuo946ET0IRG9xtlheCx6dqICbr\nVk1Y4NtRZjK/p/YGx59rDWf7E3I8ZMgR7mjieOcUZ4lUlA4l7ZIlW/2WZHW+nUXO\nTi20fqJ8qSp4BUvOvuth1pz2GLUHe2/Fxjf7HIstAoGBAPHpPr9r+TfIlPsJeRj2\n6lzA3G8qWFRQfGRYjv0fjv0pA+RIb1rzgP/I90g5+63G6Z+R4WdcxI/OJJNY1iuG\nuw9n/pFxm7U4JC990BPE6nj5iLz+clpNGYckNDBF9VG9vFSrSDLdaYkxoVNvG/xJ\na9Na90H4lm7f3VewrPy310KvAoGAZr+mwNoEh5Kpc6xo8Gxi7aPP/mlaUVD6X7Ki\ngvmu02AqmC7rC4QqEiqTaONkaSXwGusqIWxJ3yp5hELmUBYLzszAEeV/s4zRp1oZ\ng133LBRSTbHFAdBmNdqK6Nu+KGRb92980UMOKvZbliKDl+W6cbfvVu+gtKrzTc3b\naevb4TUCgYEAnJAxyVYDP1nJf7bjBSHXQu1E/DMwbtrqw7dylRJ8cAzI7IxfSCez\n7BYWq41PqVd9/zrb3Pbh2phiVzKe783igAIMqummcjo/kZyCwFsYBzK77max1jF5\naPQsLbRS2aDz8kIH6jHPZ/R+15EROmdtLmA7vIJZGerWWQR0dUU+XXA=\n

                                                                                          Copy the content of the private key and paste it into the interface's key input field.

                                                                                          "},{"location":"en/end-user/kpanda/nodes/node-check.html","title":"Create a cluster node availability check","text":"

                                                                                          When creating a cluster or adding nodes to an existing cluster, refer to the table below to check the node configuration to avoid cluster creation or expansion failure due to wrong node configuration.

                                                                                          Check Item Description OS Refer to Supported Architectures and Operating Systems SELinux Off Firewall Off Architecture Consistency Consistent CPU architecture between nodes (such as ARM or x86) Host Time All hosts are out of sync within 10 seconds. Network Connectivity The node and its SSH port can be accessed normally by the platform. CPU Available CPU resources are greater than 4 Cores Memory Available memory resources are greater than 8 GB"},{"location":"en/end-user/kpanda/nodes/node-check.html#supported-architectures-and-operating-systems","title":"Supported architectures and operating systems","text":"Architecture Operating System Remarks ARM Kylin Linux Advanced Server release V10 (Sword) SP2 Recommended ARM UOS Linux ARM openEuler x86 CentOS 7.x Recommended x86 Redhat 7.x Recommended x86 Redhat 8.x Recommended x86 Flatcar Container Linux by Kinvolk x86 Debian Bullseye, Buster, Jessie, Stretch x86 Ubuntu 16.04, 18.04, 20.04, 22.04 x86 Fedora 35, 36 x86 Fedora CoreOS x86 openSUSE Leap 15.x/Tumbleweed x86 Oracle Linux 7, 8, 9 x86 Alma Linux 8, 9 x86 Rocky Linux 8, 9 x86 Amazon Linux 2 x86 Kylin Linux Advanced Server release V10 (Sword) - SP2 Haiguang x86 UOS Linux x86 openEuler"},{"location":"en/end-user/kpanda/nodes/node-details.html","title":"Node Details","text":"

                                                                                          After accessing or creating a cluster, you can view the information of each node in the cluster, including node status, labels, resource usage, Pod, monitoring information, etc.

                                                                                          1. On the Clusters page, click the name of the target cluster.

                                                                                          2. Click Nodes on the left navigation bar to view the node status, role, label, CPU/memory usage, IP address, and creation time.

                                                                                          3. Click the node name to enter the node details page to view more information, including overview information, pod information, label annotation information, event list, status, etc.

                                                                                            In addition, you can also view the node's YAML file, monitoring information, labels and annotations, etc.

                                                                                          "},{"location":"en/end-user/kpanda/nodes/schedule.html","title":"Node Scheduling","text":"

                                                                                          Supports suspending or resuming scheduling of nodes. Pausing scheduling means stopping the scheduling of Pods to the node. Resuming scheduling means that Pods can be scheduled to that node.

                                                                                          1. On the Clusters page, click the name of the target cluster.

                                                                                          2. Click Nodes on the left navigation bar, click the \u2507 operation icon on the right side of the node, and click the Cordon button to suspend scheduling the node.

                                                                                          3. Click the \u2507 operation icon on the right side of the node, and click the Uncordon button to resume scheduling the node.

                                                                                          The node scheduling status may be delayed due to network conditions. Click the refresh icon on the right side of the search box to refresh the node scheduling status.

                                                                                          "},{"location":"en/end-user/kpanda/nodes/taints.html","title":"Node Taints","text":"

                                                                                          Taint can make a node exclude a certain type of Pod and prevent Pod from being scheduled on the node. One or more taints can be applied to each node, and Pods that cannot tolerate these taints will not be scheduled on that node.

                                                                                          "},{"location":"en/end-user/kpanda/nodes/taints.html#precautions","title":"Precautions","text":"
                                                                                          1. The current operating user should have NS Editor role authorization or other higher permissions.
                                                                                          2. After adding a taint to a node, only Pods that can tolerate the taint can be scheduled to the node.
                                                                                          "},{"location":"en/end-user/kpanda/nodes/taints.html#steps","title":"Steps","text":"
                                                                                          1. Find the target cluster on the Clusters page, and click the cluster name to enter the Cluster page.

                                                                                          2. In the left navigation bar, click Nodes , find the node that needs to modify the taint, click the \u2507 operation icon on the right and click the Edit Taints button.

                                                                                          3. Enter the key value information of the taint in the pop-up box, select the taint effect, and click OK .

                                                                                            Click \u2795 Add to add multiple taints to the node, and click X on the right side of the taint effect to delete the taint.

                                                                                            Currently supports three taint effects:

                                                                                            • NoExecute: This affects pods that are already running on the node as follows:

                                                                                              • Pods that do not tolerate the taint are evicted immediately
                                                                                              • Pods that tolerate the taint without specifying tolerationSeconds in their toleration specification remain bound forever
                                                                                              • Pods that tolerate the taint with a specified tolerationSeconds remain bound for the specified amount of time. After that time elapses, the node lifecycle controller evicts the Pods from the node.
                                                                                            • NoSchedule: No new Pods will be scheduled on the tainted node unless they have a matching toleration. Pods currently running on the node are not evicted.

                                                                                            • PreferNoSchedule: This is a \"preference\" or \"soft\" version of NoSchedule. The control plane will try to avoid placing a Pod that does not tolerate the taint on the node, but it is not guaranteed, so this taint is not recommended to use in a production environment.

                                                                                          For more details about taints, refer to the Kubernetes documentation Taints and Tolerance.

                                                                                          "},{"location":"en/end-user/kpanda/olm/import-miniooperator.html","title":"Importing MinIo Operator Offline","text":"

                                                                                          This guide explains how to import the MinIo Operator offline in an environment without internet access.

                                                                                          "},{"location":"en/end-user/kpanda/olm/import-miniooperator.html#prerequisites","title":"Prerequisites","text":"
                                                                                          • The current cluster is connected to the container management and the Global cluster has installed the kolm component (search for helm templates for kolm).
                                                                                          • The current cluster has the olm component installed with a version of 0.2.4 or higher (search for helm templates for olm).
                                                                                          • Ability to execute Docker commands.
                                                                                          • Prepare a container registry.
                                                                                          "},{"location":"en/end-user/kpanda/olm/import-miniooperator.html#steps","title":"Steps","text":"
                                                                                          1. Set the environment variables in the execution environment and use them in the subsequent steps by running the following command:

                                                                                            export OPM_IMG=10.5.14.200/quay.m.daocloud.io/operator-framework/opm:v1.29.0 \nexport BUNDLE_IMG=10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3 \n

                                                                                            How to get the above image addresses:

                                                                                            Go to Container Management -> Select the current cluster -> Helm Apps -> View the olm component -> Plugin Settings , and find the images needed for the opm, minio, minio bundle, and minio operator in the subsequent steps.

                                                                                            Using the screenshot as an example, the four image addresses are as follows:\n\n# opm image\n10.5.14.200/quay.m.daocloud.io/operator-framework/opm:v1.29.0\n\n# minio image\n10.5.14.200/quay.m.daocloud.io/minio/minio:RELEASE.2023-03-24T21-41-23Z\n\n# minio bundle image\n10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3\n\n# minio operator image\n10.5.14.200/quay.m.daocloud.io/minio/operator:v5.0.3\n
                                                                                          2. Run the opm command to get the operators included in the offline bundle image.

                                                                                            # Create the operator directory\n$ mkdir minio-operator && cd minio-operator \n\n# Get the operator yaml\n$ docker run --user root -v $PWD/minio-operator:/minio-operator ${OPM_IMG} alpha bundle unpack --skip-tls-verify -v -d ${BUNDLE_IMG} -o ./minio-operator\n\n# Expected result\n.\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502   \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502   \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502   \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502   \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502   \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502   \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n3 directories, 9 files\n
                                                                                          3. Replace all image addresses in the minio-operator/manifests/minio-operator.clusterserviceversion.yaml file with the image addresses from the offline container registry.

                                                                                            Before replacement:

                                                                                            After replacement:

                                                                                          4. Generate a Dockerfile for building the bundle image.

                                                                                            $ docker run --user root -v $PWD:/minio-operator -w /minio-operator ${OPM_IMG} alpha bundle generate --channels stable,beta -d /minio-operator/minio-operator/manifests -e stable -p minio-operator \u00a0\n\n# Expected result\n.\n\u251c\u2500\u2500 bundle.Dockerfile\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502   \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502   \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502   \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502   \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502   \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502   \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n3 directories, 10 files\n
                                                                                          5. Build the bundle image and push it to the offline registry.

                                                                                            # Set the new bundle image\nexport OFFLINE_BUNDLE_IMG=10.5.14.200/quay.m.daocloud.io/operatorhubio/minio-operator:v5.0.3-offline \n\n$ docker build . -f bundle.Dockerfile -t ${OFFLINE_BUNDLE_IMG} \u00a0\n\n$ docker push ${OFFLINE_BUNDLE_IMG}\n
                                                                                          6. Generate a Dockerfile for building the catalog image.

                                                                                            $ docker run --user root -v $PWD:/minio-operator -w /minio-operator ${OPM_IMG} index add  --bundles ${OFFLINE_BUNDLE_IMG} --generate --binary-image ${OPM_IMG} --skip-tls-verify\n\n# Expected result\n.\n\u251c\u2500\u2500 bundle.Dockerfile\n\u251c\u2500\u2500 database\n\u2502   \u2514\u2500\u2500 index.db\n\u251c\u2500\u2500 index.Dockerfile\n\u2514\u2500\u2500 minio-operator\n    \u251c\u2500\u2500 manifests\n    \u2502   \u251c\u2500\u2500 console-env_v1_configmap.yaml\n    \u2502   \u251c\u2500\u2500 console-sa-secret_v1_secret.yaml\n    \u2502   \u251c\u2500\u2500 console_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 minio.min.io_tenants.yaml\n    \u2502   \u251c\u2500\u2500 minio-operator.clusterserviceversion.yaml\n    \u2502   \u251c\u2500\u2500 operator_v1_service.yaml\n    \u2502   \u251c\u2500\u2500 sts.min.io_policybindings.yaml\n    \u2502   \u2514\u2500\u2500 sts_v1_service.yaml\n    \u2514\u2500\u2500 metadata\n        \u2514\u2500\u2500 annotations.yaml\n\n4 directories, 12 files\n
                                                                                          7. Build the catalog image.

                                                                                            # Set the new catalog image  \nexport OFFLINE_CATALOG_IMG=10.5.14.200/release.daocloud.io/operator-framework/system-operator-index:v0.1.0-offline\n\n$ docker build . -f index.Dockerfile -t ${OFFLINE_CATALOG_IMG}  \n\n$ docker push ${OFFLINE_CATALOG_IMG}\n
                                                                                          8. Go to Container Management and update the built-in catsrc image for the Helm App olm (enter the catalog image specified in the construction of the catalog image, ${catalog-image} ).

                                                                                          9. After the update is successful, the minio-operator component will appear in the Operator Hub.

                                                                                          "},{"location":"en/end-user/kpanda/permissions/cluster-ns-auth.html","title":"Cluster and Namespace Authorization","text":"

                                                                                          Container management implements authorization based on global authority management and global user/group management. If you need to grant users the highest authority for container management (can create, manage, and delete all clusters), refer to What are Access Control.

                                                                                          "},{"location":"en/end-user/kpanda/permissions/cluster-ns-auth.html#prerequisites","title":"Prerequisites","text":"

                                                                                          Before authorizing users/groups, complete the following preparations:

                                                                                          • The user/group to be authorized has been created in the global management, refer to user.

                                                                                          • Only Kpanda Owner and Cluster Admin of the current cluster have Cluster authorization capability. For details, refer to Permission Description.

                                                                                          • only Kpanda Owner , Cluster Admin for the current cluster, NS Admin of the current namespace has namespace authorization capability.

                                                                                          "},{"location":"en/end-user/kpanda/permissions/cluster-ns-auth.html#cluster-authorization","title":"Cluster Authorization","text":"
                                                                                          1. After the user logs in to the platform, click Privilege Management under Container Management on the left menu bar, which is located on the Cluster Permissions tab by default.

                                                                                          2. Click the Add Authorization button.

                                                                                          3. On the Add Cluster Permission page, select the target cluster, the user/group to be authorized, and click OK .

                                                                                            Currently, the only cluster role supported is Cluster Admin . For details about permissions, refer to Permission Description. If you need to authorize multiple users/groups at the same time, you can click Add User Permissions to add multiple times.

                                                                                          4. Return to the cluster permission management page, and a message appears on the screen: Cluster permission added successfully .

                                                                                          "},{"location":"en/end-user/kpanda/permissions/cluster-ns-auth.html#namespace-authorization","title":"Namespace Authorization","text":"
                                                                                          1. After the user logs in to the platform, click Permissions under Container Management on the left menu bar, and click the Namespace Permissions tab.

                                                                                          2. Click the Add Authorization button. On the Add Namespace Permission page, select the target cluster, target namespace, and user/group to be authorized, and click OK .

                                                                                            The currently supported namespace roles are NS Admin, NS Editor, and NS Viewer. For details about permissions, refer to Permission Description. If you need to authorize multiple users/groups at the same time, you can click Add User Permission to add multiple times. Click OK to complete the permission authorization.

                                                                                          3. Return to the namespace permission management page, and a message appears on the screen: Cluster permission added successfully .

                                                                                            Tip

                                                                                            If you need to delete or edit permissions later, you can click \u2507 on the right side of the list and select Edit or Delete .

                                                                                          "},{"location":"en/end-user/kpanda/permissions/custom-kpanda-role.html","title":"Adding RBAC Rules to System Roles","text":"

                                                                                          In the past, the RBAC rules for those system roles in container management were pre-defined and could not be modified by users. To support more flexible permission settings and to meet the customized needs for system roles, now you can modify RBAC rules for system roles such as cluster admin, ns admin, ns editor, ns viewer.

                                                                                          The following example demonstrates how to add a new ns-view rule, granting the authority to delete workload deployments. Similar operations can be performed for other rules.

                                                                                          "},{"location":"en/end-user/kpanda/permissions/custom-kpanda-role.html#prerequisites","title":"Prerequisites","text":"

                                                                                          Before adding RBAC rules to system roles, the following prerequisites must be met:

                                                                                          • Container management v0.27.0 and above.
                                                                                          • Integrated Kubernetes cluster or created Kubernetes cluster, and able to access the cluster's UI interface.
                                                                                          • Completed creation of a namespace and user account, and the granting of NS Viewer. For details, refer to namespace authorization.

                                                                                          Note

                                                                                          • RBAC rules only need to be added in the Global Cluster, and the Kpanda controller will synchronize those added rules to all integrated subclusters. Synchronization may take some time to complete.
                                                                                          • RBAC rules can only be added in the Global Cluster. RBAC rules added in subclusters will be overridden by the system role permissions of the Global Cluster.
                                                                                          • Only ClusterRoles with fixed Label are supported for adding rules. Replacing or deleting rules is not supported, nor is adding rules by using role. The correspondence between built-in roles and ClusterRole Label created by users is as follows.

                                                                                            cluster-admin: rbac.kpanda.io/role-template-cluster-admin: \"true\"\ncluster-edit: rbac.kpanda.io/role-template-cluster-edit: \"true\"\ncluster-view: rbac.kpanda.io/role-template-cluster-view: \"true\"\nns-admin: rbac.kpanda.io/role-template-ns-admin: \"true\"\nns-edit: rbac.kpanda.io/role-template-ns-edit: \"true\"\nns-view: rbac.kpanda.io/role-template-ns-view: \"true\"\n
                                                                                          "},{"location":"en/end-user/kpanda/permissions/custom-kpanda-role.html#steps","title":"Steps","text":"
                                                                                          1. Create a deployment by a user with admin or cluster admin permissions.

                                                                                          2. Grant a user the ns-viewer role to provide them with the ns-view permission.

                                                                                          3. Switch the login user to ns-viewer, open the console to get the token for the ns-viewer user, and use curl to request and delete the nginx deployment mentioned above. However, a prompt appears as below, indicating the user doesn't have permission to delete it.

                                                                                            [root@master-01 ~]# curl -k -X DELETE  'https://${URL}/apis/kpanda.io/v1alpha1/clusters/cluster-member/namespaces/default/deployments/nginx' -H 'authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJOU044MG9BclBRMzUwZ2VVU2ZyNy1xMEREVWY4MmEtZmJqR05uRE1sd1lFIn0.eyJleHAiOjE3MTU3NjY1NzksImlhdCI6MTcxNTY4MDE3OSwiYXV0aF90aW1lIjoxNzE1NjgwMTc3LCJqdGkiOiIxZjI3MzJlNC1jYjFhLTQ4OTktYjBiZC1iN2IxZWY1MzAxNDEiLCJpc3MiOiJodHRwczovLzEwLjYuMjAxLjIwMTozMDE0Ny9hdXRoL3JlYWxtcy9naGlwcG8iLCJhdWQiOiJfX2ludGVybmFsLWdoaXBwbyIsInN1YiI6ImMxZmMxM2ViLTAwZGUtNDFiYS05ZTllLWE5OGU2OGM0MmVmMCIsInR5cCI6IklEIiwiYXpwIjoiX19pbnRlcm5hbC1naGlwcG8iLCJzZXNzaW9uX3N0YXRlIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiYXRfaGFzaCI6IlJhTHoyQjlKQ2FNc1RrbGVMR3V6blEiLCJhY3IiOiIwIiwic2lkIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJncm91cHMiOltdLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJucy12aWV3ZXIiLCJsb2NhbGUiOiIifQ.As2ipMjfvzvgONAGlc9RnqOd3zMwAj82VXlcqcR74ZK9tAq3Q4ruQ1a6WuIfqiq8Kq4F77ljwwzYUuunfBli2zhU2II8zyxVhLoCEBu4pBVBd_oJyUycXuNa6HfQGnl36E1M7-_QG8b-_T51wFxxVb5b7SEDE1AvIf54NAlAr-rhDmGRdOK1c9CohQcS00ab52MD3IPiFFZ8_Iljnii-RpXKZoTjdcULJVn_uZNk_SzSUK-7MVWmPBK15m6sNktOMSf0pCObKWRqHd15JSe-2aA2PKBo1jBH3tHbOgZyMPdsLI0QdmEnKB5FiiOeMpwn_oHnT6IjT-BZlB18VkW8rA'\n{\"code\":7,\"message\":\"[RBAC] delete resources(deployments: nginx) is forbidden for user(ns-viewer) in cluster(cluster-member)\",\"details\":[]}[root@master-01 ~]#\n[root@master-01 ~]#\n
                                                                                          4. Create a ClusterRole on the global cluster, as shown in the yaml below.

                                                                                            apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: append-ns-view # (1)!\n  labels:\n    rbac.kpanda.io/role-template-ns-view: \"true\" # (2)!\nrules:\n  - apiGroups: [ \"apps\" ]\n    resources: [ \"deployments\" ]\n    verbs: [ \"delete\" ]\n
                                                                                            1. This field value can be arbitrarily specified, as long as it is not duplicated and complies with the Kubernetes resource naming conventions.
                                                                                            2. When adding rules to different roles, make sure to apply different labels.
                                                                                          5. Wait for the kpanda controller to add a rule of user creation to the built-in role: ns-viewer, then you can check if the rules added in the previous step are present for ns-viewer.

                                                                                            [root@master-01 ~]# kubectl get clusterrole role-template-ns-view -oyaml|grep deployments -C 10|tail -n 6\n
                                                                                            - apiGroups:\n  - apps\n  resources:\n  - deployments\n  verbs:\n  - delete\n

                                                                                          6. When using curl again to request the deletion of the aforementioned nginx deployment, this time the deletion was successful. This means that ns-viewer has successfully added the rule to delete deployments.

                                                                                            [root@master-01 ~]# curl -k -X DELETE  'https://${URL}/apis/kpanda.io/v1alpha1/clusters/cluster-member/namespaces/default/deployments/nginx' -H 'authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJOU044MG9BclBRMzUwZ2VVU2ZyNy1xMEREVWY4MmEtZmJqR05uRE1sd1lFIn0.eyJleHAiOjE3MTU3NjY1NzksImlhdCI6MTcxNTY4MDE3OSwiYXV0aF90aW1lIjoxNzE1NjgwMTc3LCJqdGkiOiIxZjI3MzJlNC1jYjFhLTQ4OTktYjBiZC1iN2IxZWY1MzAxNDEiLCJpc3MiOiJodHRwczovLzEwLjYuMjAxLjIwMTozMDE0Ny9hdXRoL3JlYWxtcy9naGlwcG8iLCJhdWQiOiJfX2ludGVybmFsLWdoaXBwbyIsInN1YiI6ImMxZmMxM2ViLTAwZGUtNDFiYS05ZTllLWE5OGU2OGM0MmVmMCIsInR5cCI6IklEIiwiYXpwIjoiX19pbnRlcm5hbC1naGlwcG8iLCJzZXNzaW9uX3N0YXRlIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiYXRfaGFzaCI6IlJhTHoyQjlKQ2FNc1RrbGVMR3V6blEiLCJhY3IiOiIwIiwic2lkIjoiMGJjZWRjZTctMTliYS00NmU1LTkwYmUtOTliMWY2MWEyNzI0IiwiZW1haWxfdmVyaWZpZWQiOmZhbHNlLCJncm91cHMiOltdLCJwcmVmZXJyZWRfdXNlcm5hbWUiOiJucy12aWV3ZXIiLCJsb2NhbGUiOiIifQ.As2ipMjfvzvgONAGlc9RnqOd3zMwAj82VXlcqcR74ZK9tAq3Q4ruQ1a6WuIfqiq8Kq4F77ljwwzYUuunfBli2zhU2II8zyxVhLoCEBu4pBVBd_oJyUycXuNa6HfQGnl36E1M7-_QG8b-_T51wFxxVb5b7SEDE1AvIf54NAlAr-rhDmGRdOK1c9CohQcS00ab52MD3IPiFFZ8_Iljnii-RpXKZoTjdcULJVn_uZNk_SzSUK-7MVWmPBK15m6sNktOMSf0pCObKWRqHd15JSe-2aA2PKBo1jBH3tHbOgZyMPdsLI0QdmEnKB5FiiOeMpwn_oHnT6IjT-BZlB18VkW8rA'\n
                                                                                          "},{"location":"en/end-user/kpanda/permissions/permission-brief.html","title":"Container Management Permissions","text":"

                                                                                          Container management permissions are based on a multi-dimensional permission management system created by global permission management and Kubernetes RBAC permission management. It supports cluster-level and namespace-level permission control, helping users to conveniently and flexibly set different operation permissions for IAM users and user groups (collections of users) under a tenant.

                                                                                          "},{"location":"en/end-user/kpanda/permissions/permission-brief.html#cluster-permissions","title":"Cluster Permissions","text":"

                                                                                          Cluster permissions are authorized based on Kubernetes RBAC's ClusterRoleBinding, allowing users/user groups to have cluster-related permissions. The current default cluster role is Cluster Admin (does not have the permission to create or delete clusters).

                                                                                          "},{"location":"en/end-user/kpanda/permissions/permission-brief.html#cluster-admin","title":"Cluster Admin","text":"

                                                                                          Cluster Admin has the following permissions:

                                                                                          • Can manage, edit, and view the proper cluster
                                                                                          • Manage, edit, and view all workloads and all resources within the namespace
                                                                                          • Can authorize users for roles within the cluster (Cluster Admin, NS Admin, NS Editor, NS Viewer)

                                                                                          The YAML example for this cluster role is as follows:

                                                                                          apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:49Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-cluster-admin\n  resourceVersion: \"15168\"\n  uid: f8f86d42-d5ef-47aa-b284-097615795076\nrules:\n- apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n- nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'\n
                                                                                          "},{"location":"en/end-user/kpanda/permissions/permission-brief.html#namespace-permissions","title":"Namespace Permissions","text":"

                                                                                          Namespace permissions are authorized based on Kubernetes RBAC capabilities, allowing different users/user groups to have different operation permissions on resources under a namespace (including Kubernetes API permissions). For details, refer to: Kubernetes RBAC. Currently, the default roles for container management are: NS Admin, NS Editor, NS Viewer.

                                                                                          "},{"location":"en/end-user/kpanda/permissions/permission-brief.html#ns-admin","title":"NS Admin","text":"

                                                                                          NS Admin has the following permissions:

                                                                                          • Can view the proper namespace
                                                                                          • Manage, edit, and view all workloads and custom resources within the namespace
                                                                                          • Can authorize users for proper namespace roles (NS Editor, NS Viewer)

                                                                                          The YAML example for this cluster role is as follows:

                                                                                          apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:49Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-admin\n  resourceVersion: \"15173\"\n  uid: 69f64c7e-70e7-4c7c-a3e0-053f507f2bc3\nrules:\n- apiGroups:\n  - '*'\n  resources:\n  - '*'\n  verbs:\n  - '*'\n- nonResourceURLs:\n  - '*'\n  verbs:\n  - '*'    \n
                                                                                          "},{"location":"en/end-user/kpanda/permissions/permission-brief.html#ns-editor","title":"NS Editor","text":"

                                                                                          NS Editor has the following permissions:

                                                                                          • Can view proper namespaces where permissions are granted
                                                                                          • Manage, edit, and view all workloads within the namespace
                                                                                          Click to view the YAML example of the cluster role
                                                                                          apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:50Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-edit\n  resourceVersion: \"15175\"\n  uid: ca9e690e-96c0-4978-8915-6e4c00c748fe\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - configmaps\n  - endpoints\n  - persistentvolumeclaims\n  - persistentvolumeclaims/status\n  - pods\n  - replicationcontrollers\n  - replicationcontrollers/scale\n  - serviceaccounts\n  - services\n  - services/status\n  verbs:\n  - '*'\n- apiGroups:\n  - \"\"\n  resources:\n  - bindings\n  - events\n  - limitranges\n  - namespaces/status\n  - pods/log\n  - pods/status\n  - replicationcontrollers/status\n  - resourcequotas\n  - resourcequotas/status\n  verbs:\n  - '*'\n- apiGroups:\n  - \"\"\n  resources:\n  - namespaces\n  verbs:\n  - '*'\n- apiGroups:\n  - apps\n  resources:\n  - controllerrevisions\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - statefulsets\n  - statefulsets/scale\n  - statefulsets/status\n  verbs:\n  - '*'\n- apiGroups:\n  - autoscaling\n  resources:\n  - horizontalpodautoscalers\n  - horizontalpodautoscalers/status\n  verbs:\n  - '*'\n- apiGroups:\n  - batch\n  resources:\n  - cronjobs\n  - cronjobs/status\n  - jobs\n  - jobs/status\n  verbs:\n  - '*'\n- apiGroups:\n  - extensions\n  resources:\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - replicationcontrollers/scale\n  verbs:\n  - '*'\n- apiGroups:\n  - policy\n  resources:\n  - poddisruptionbudgets\n  - poddisruptionbudgets/status\n  verbs:\n  - '*'\n- apiGroups:\n  - networking.k8s.io\n  resources:\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  verbs:\n  - '*'      \n
                                                                                          "},{"location":"en/end-user/kpanda/permissions/permission-brief.html#ns-viewer","title":"NS Viewer","text":"

                                                                                          NS Viewer has the following permissions:

                                                                                          • Can view the proper namespace
                                                                                          • Can view all workloads and custom resources within the proper namespace
                                                                                          Click to view the YAML example of the cluster role
                                                                                          apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  annotations:\n    kpanda.io/creator: system\n  creationTimestamp: \"2022-06-16T09:42:50Z\"\n  labels:\n    iam.kpanda.io/role-template: \"true\"\n  name: role-template-ns-view\n  resourceVersion: \"15183\"\n  uid: 853888fd-6ee8-42ac-b91e-63923918baf8\nrules:\n- apiGroups:\n  - \"\"\n  resources:\n  - configmaps\n  - endpoints\n  - persistentvolumeclaims\n  - persistentvolumeclaims/status\n  - pods\n  - replicationcontrollers\n  - replicationcontrollers/scale\n  - serviceaccounts\n  - services\n  - services/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - bindings\n  - events\n  - limitranges\n  - namespaces/status\n  - pods/log\n  - pods/status\n  - replicationcontrollers/status\n  - resourcequotas\n  - resourcequotas/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - \"\"\n  resources:\n  - namespaces\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - apps\n  resources:\n  - controllerrevisions\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - statefulsets\n  - statefulsets/scale\n  - statefulsets/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - autoscaling\n  resources:\n  - horizontalpodautoscalers\n  - horizontalpodautoscalers/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - batch\n  resources:\n  - cronjobs\n  - cronjobs/status\n  - jobs\n  - jobs/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - extensions\n  resources:\n  - daemonsets\n  - daemonsets/status\n  - deployments\n  - deployments/scale\n  - deployments/status\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  - replicasets\n  - replicasets/scale\n  - replicasets/status\n  - replicationcontrollers/scale\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - policy\n  resources:\n  - poddisruptionbudgets\n  - poddisruptionbudgets/status\n  verbs:\n  - get\n  - list\n  - watch\n- apiGroups:\n  - networking.k8s.io\n  resources:\n  - ingresses\n  - ingresses/status\n  - networkpolicies\n  verbs:\n  - get\n  - list\n  - watch \n
                                                                                          "},{"location":"en/end-user/kpanda/permissions/permission-brief.html#permissions-faq","title":"Permissions FAQ","text":"
                                                                                          1. What is the relationship between global permissions and container management permissions?

                                                                                            Answer: Global permissions only authorize coarse-grained permissions, which can manage the creation, editing, and deletion of all clusters; while for fine-grained permissions, such as the management permissions of a single cluster, the management, editing, and deletion permissions of a single namespace, they need to be implemented based on Kubernetes RBAC container management permissions. Generally, users only need to be authorized in container management.

                                                                                          2. Currently, only four default roles are supported. Can the RoleBinding and ClusterRoleBinding (Kubernetes fine-grained RBAC) for custom roles also take effect?

                                                                                            Answer: Currently, custom permissions cannot be managed through the graphical interface, but the permission rules created using kubectl can still take effect.

                                                                                          "},{"location":"en/end-user/kpanda/scale/create-hpa.html","title":"Create HPA","text":"

                                                                                          Suanova AI platform supports elastic scaling of Pod resources based on metrics (Horizontal Pod Autoscaling, HPA). Users can dynamically adjust the number of copies of Pod resources by setting CPU utilization, memory usage, and custom metrics. For example, after setting an auto scaling policy based on the CPU utilization metric for the workload, when the CPU utilization of the Pod exceeds/belows the metric threshold you set, the workload controller will automatically increase/decrease the number of Pod replicas.

                                                                                          This page describes how to configure auto scaling based on built-in metrics and custom metrics for workloads.

                                                                                          Note

                                                                                          1. HPA is only applicable to Deployment and StatefulSet, and only one HPA can be created per workload.
                                                                                          2. If you create an HPA policy based on CPU utilization, you must set the configuration limit (Limit) for the workload in advance, otherwise the CPU utilization cannot be calculated.
                                                                                          3. If built-in metrics and multiple custom metrics are used at the same time, HPA will calculate the number of scaling copies required based on multiple metrics, and take the larger value (but not exceed the maximum number of copies configured when setting the HPA policy) for elastic scaling .
                                                                                          "},{"location":"en/end-user/kpanda/scale/create-hpa.html#built-in-metric-elastic-scaling-policy","title":"Built-in metric elastic scaling policy","text":"

                                                                                          The system has two built-in elastic scaling metrics of CPU and memory to meet users' basic business cases.

                                                                                          "},{"location":"en/end-user/kpanda/scale/create-hpa.html#prerequisites","title":"Prerequisites","text":"

                                                                                          Before configuring the built-in index auto scaling policy for the workload, the following prerequisites need to be met:

                                                                                          • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                                                                          • Created a namespace, deployment or statefulset.

                                                                                          • You should have permissions not lower than NS Editor. For details, refer to Namespace Authorization.

                                                                                          • Installed metrics-server plugin install.

                                                                                          "},{"location":"en/end-user/kpanda/scale/create-hpa.html#steps","title":"Steps","text":"

                                                                                          Refer to the following steps to configure the built-in index auto scaling policy for the workload.

                                                                                          1. Click Clusters on the left navigation bar to enter the cluster list page. Click a cluster name to enter the Cluster Details page.

                                                                                          2. On the cluster details page, click Workload in the left navigation bar to enter the workload list, and then click a workload name to enter the Workload Details page.

                                                                                          3. Click the Auto Scaling tab to view the auto scaling configuration of the current cluster.

                                                                                          4. After confirming that the cluster has installed the metrics-server plug-in, and the plug-in is running normally, you can click the New Scaling button.

                                                                                          5. Create custom metric auto scaling policy parameters.

                                                                                            • Policy name: Enter the name of the auto scaling policy. Please note that the name can contain up to 63 characters, and can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with lowercase letters or numbers, such as hpa- my-dep.
                                                                                            • Namespace: The namespace where the payload resides.
                                                                                            • Workload: The workload object that performs auto scaling.
                                                                                            • Target CPU Utilization: The CPU usage of the Pod under the workload resource. The calculation method is: the request (request) value of all Pod resources/workloads under the workload. When the actual CPU usage is greater/lower than the target value, the system automatically reduces/increases the number of Pod replicas.
                                                                                            • Target Memory Usage: The memory usage of the Pod under the workload resource. When the actual memory usage is greater/lower than the target value, the system automatically reduces/increases the number of Pod replicas.
                                                                                            • Replica range: the elastic scaling range of the number of Pod replicas. The default interval is 1 - 10.
                                                                                          6. After completing the parameter configuration, click the OK button to automatically return to the elastic scaling details page. Click \u2507 on the right side of the list to edit, delete, and view related events.

                                                                                          "},{"location":"en/end-user/kpanda/scale/create-vpa.html","title":"Create VPAs","text":"

                                                                                          The container Vertical Pod Autoscaler (VPA) calculates the most suitable CPU and memory request values \u200b\u200bfor the Pod by monitoring the Pod's resource application and usage over a period of time. Using VPA can allocate resources to each Pod in the cluster more reasonably, improve the overall resource utilization of the cluster, and avoid waste of cluster resources.

                                                                                          AI platform supports VPA through containers. Based on this feature, the Pod request value can be dynamically adjusted according to the usage of container resources. AI platform supports manual and automatic modification of resource request values, and you can configure them according to actual needs.

                                                                                          This page describes how to configure VPA for deployment.

                                                                                          Warning

                                                                                          Using VPA to modify a Pod resource request will trigger a Pod restart. Due to the limitations of Kubernetes itself, Pods may be scheduled to other nodes after restarting.

                                                                                          "},{"location":"en/end-user/kpanda/scale/create-vpa.html#prerequisites","title":"Prerequisites","text":"

                                                                                          Before configuring a vertical scaling policy for deployment, the following prerequisites must be met:

                                                                                          • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                                                          • Create a namespace, user, Deployments or Statefulsets.

                                                                                          • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                                                          • The current cluster has installed metrics-server and VPA plugins.

                                                                                          "},{"location":"en/end-user/kpanda/scale/create-vpa.html#steps","title":"Steps","text":"

                                                                                          Refer to the following steps to configure the built-in index auto scaling policy for the deployment.

                                                                                          1. Find the current cluster in Clusters , and click the name of the target cluster.

                                                                                          2. Click Deployments in the left navigation bar, find the deployment that needs to create a VPA, and click the name of the deployment.

                                                                                          3. Click the Auto Scaling tab to view the auto scaling configuration of the current cluster, and confirm that the relevant plug-ins have been installed and are running normally.

                                                                                          4. Click the Create Autoscaler button and configure the VPA vertical scaling policy parameters.

                                                                                            • Policy name: Enter the name of the vertical scaling policy. Please note that the name can contain up to 63 characters, and can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with lowercase letters or numbers, such as vpa- my-dep.
                                                                                            • Scaling mode: Run the method of modifying the CPU and memory request values. Currently, vertical scaling supports manual and automatic scaling modes.
                                                                                              • Manual scaling: After the vertical scaling policy calculates the recommended resource configuration value, the user needs to manually modify the resource quota of the application.
                                                                                              • Auto-scaling: The vertical scaling policy automatically calculates and modifies the resource quota of the application.
                                                                                            • Target container: Select the container to be scaled vertically.
                                                                                          5. After completing the parameter configuration, click the OK button to automatically return to the elastic scaling details page. Click \u2507 on the right side of the list to perform edit and delete operations.

                                                                                          "},{"location":"en/end-user/kpanda/scale/custom-hpa.html","title":"Creating HPA Based on Custom Metrics","text":"

                                                                                          When the built-in CPU and memory metrics in the system do not meet your business needs, you can add custom metrics by configuring ServiceMonitoring and achieve auto-scaling based on these custom metrics. This article will introduce how to configure auto-scaling for workloads based on custom metrics.

                                                                                          Note

                                                                                          1. HPA is only applicable to Deployment and StatefulSet, and each workload can only create one HPA.
                                                                                          2. If both built-in metrics and multiple custom metrics are used, HPA will calculate the required number of scaled replicas based on multiple metrics respectively, and take the larger value (but not exceeding the maximum number of replicas configured when setting the HPA policy) for scaling.
                                                                                          "},{"location":"en/end-user/kpanda/scale/custom-hpa.html#prerequisites","title":"Prerequisites","text":"

                                                                                          Before configuring the custom metrics auto-scaling policy for workloads, the following prerequisites must be met:

                                                                                          • Integrated Kubernetes cluster or created Kubernetes cluster, and able to access the cluster's UI interface.
                                                                                          • Completed creation of a namespace, deployment, or statefulSet.
                                                                                          • The current user should have permissions higher than NS Editor. For details, refer to namespace authorization.
                                                                                          • metrics-server plugin has been installed.
                                                                                          • insight-agent plugin has been installed.
                                                                                          • Prometheus-adapter plugin has been installed.
                                                                                          "},{"location":"en/end-user/kpanda/scale/custom-hpa.html#steps","title":"Steps","text":"

                                                                                          Refer to the following steps to configure the auto-scaling policy based on metrics for workloads.

                                                                                          1. Click Clusters in the left navigation bar to enter the clusters page. Click a cluster name to enter the Cluster Overview page.

                                                                                          2. On the Cluster Details page, click Workloads in the left navigation bar to enter the workload list, and click a workload name to enter the Workload Details page.

                                                                                          3. Click the Auto Scaling tab to view the current autoscaling configuration of the cluster.

                                                                                          4. Confirm that the cluster has installed metrics-server, Insight, and Prometheus-adapter plugins, and that the plugins are running normally, then click the Create AutoScaler button.

                                                                                            Note

                                                                                            If the related plugins are not installed or the plugins are in an abnormal state, you will not be able to see the entry for creating custom metrics auto-scaling on the page.

                                                                                          5. Create custom metrics auto-scaling policy parameters.

                                                                                            • Policy Name: Enter the name of the auto-scaling policy. Note that the name can be up to 63 characters long, can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with a lowercase letter or number, e.g., hpa-my-dep.
                                                                                            • Namespace: The namespace where the workload is located.
                                                                                            • Workload: The workload object that performs auto-scaling.
                                                                                            • Resource Type: The type of custom metric being monitored, including Pod and Service types.
                                                                                            • Metric: The name of the custom metric created using ServiceMonitoring or the name of the system-built custom metric.
                                                                                            • Data Type: The method used to calculate the metric value, including target value and target average value. When the resource type is Pod, only the target average value can be used.
                                                                                          "},{"location":"en/end-user/kpanda/scale/custom-hpa.html#operation-example","title":"Operation Example","text":"

                                                                                          This case takes a Golang business program as an example. The example program exposes the httpserver_requests_total metric and records HTTP requests. This metric can be used to calculate the QPS value of the business program.

                                                                                          "},{"location":"en/end-user/kpanda/scale/custom-hpa.html#deploy-business-program","title":"Deploy Business Program","text":"

                                                                                          Use Deployment to deploy the business program:

                                                                                          apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: httpserver\n  namespace: httpserver\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: httpserver\n  template:\n    metadata:\n      labels:\n        app: httpserver\n    spec:\n      containers:\n      - name: httpserver\n        image: registry.imroc.cc/test/httpserver:custom-metrics\n        imagePullPolicy: Always\n---\n\napiVersion: v1\nkind: Service\nmetadata:\n  name: httpserver\n  namespace: httpserver\n  labels:\n    app: httpserver\n  annotations:\n    prometheus.io/scrape: \"true\"\n    prometheus.io/path: \"/metrics\"\n    prometheus.io/port: \"http\"\nspec:\n  type: ClusterIP\n  ports:\n  - port: 80\n    protocol: TCP\n    name: http\n  selector:\n    app: httpserver\n
                                                                                          "},{"location":"en/end-user/kpanda/scale/custom-hpa.html#prometheus-collects-business-monitoring","title":"Prometheus Collects Business Monitoring","text":"

                                                                                          If the insight-agent is installed, Prometheus can be configured by creating a ServiceMonitor CRD object.

                                                                                          Operation steps: In Cluster Details -> Custom Resources, search for \u201cservicemonitors.monitoring.coreos.com\", click the name to enter the details. Create the following example CRD in the httpserver namespace via YAML:

                                                                                          apiVersion: monitoring.coreos.com/v1\nkind: ServiceMonitor\nmetadata:\n  name: httpserver\n  namespace: httpserver\n  labels:\n    operator.insight.io/managed-by: insight\nspec:\n  endpoints:\n  - port: http\n    interval: 5s\n  namespaceSelector:\n    matchNames:\n    - httpserver\n  selector:\n    matchLabels:\n      app: httpserver\n

                                                                                          Note

                                                                                          If Prometheus is installed via insight, the serviceMonitor must be labeled with operator.insight.io/managed-by: insight. If installed by other means, this label is not required.

                                                                                          "},{"location":"en/end-user/kpanda/scale/custom-hpa.html#configure-metric-rules-in-prometheus-adapter","title":"Configure Metric Rules in Prometheus-adapter","text":"

                                                                                          steps: In Clusters -> Helm Apps, search for \u201cprometheus-adapter\",enter the update page through the action bar, and configure custom metrics in YAML as follows:

                                                                                          rules:\n  custom:\n    - metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)\n      name:\n        as: httpserver_requests_qps\n        matches: httpserver_requests_total\n      resources:\n        template: <<.Resource>>\n      seriesQuery: httpserver_requests_total\n
                                                                                          "},{"location":"en/end-user/kpanda/scale/custom-hpa.html#create-custom-metrics-auto-scaling-policy-parameters","title":"Create Custom Metrics Auto-scaling Policy Parameters","text":"

                                                                                          Follow the above steps to find the application httpserver in the Deployment and create auto-scaling via custom metrics.

                                                                                          "},{"location":"en/end-user/kpanda/scale/hpa-cronhpa-compatibility-rules.html","title":"Compatibility Rules for HPA and CronHPA","text":"

                                                                                          HPA stands for HorizontalPodAutoscaler, which refers to horizontal pod auto-scaling.

                                                                                          CronHPA stands for Cron HorizontalPodAutoscaler, which refers to scheduled horizontal pod auto-scaling.

                                                                                          "},{"location":"en/end-user/kpanda/scale/hpa-cronhpa-compatibility-rules.html#conflict-between-cronhpa-and-hpa","title":"Conflict Between CronHPA and HPA","text":"

                                                                                          Scheduled scaling with CronHPA triggers horizontal pod scaling at specified times. To prevent sudden traffic surges, you may have configured HPA to ensure the normal operation of your application. If both HPA and CronHPA are detected simultaneously, conflicts arise because CronHPA and HPA operate independently without awareness of each other. Consequently, the actions performed last will override those executed first.

                                                                                          By comparing the definition templates of CronHPA and HPA, the following points can be observed:

                                                                                          • Both CronHPA and HPA use the scaleTargetRef field to identify the scaling target.
                                                                                          • CronHPA schedules the number of replicas to scale based on crontab rules in jobs.
                                                                                          • HPA determines scaling based on resource utilization.

                                                                                          Note

                                                                                          If both CronHPA and HPA are set, there will be scenarios where CronHPA and HPA simultaneously operate on a single scaleTargetRef.

                                                                                          "},{"location":"en/end-user/kpanda/scale/hpa-cronhpa-compatibility-rules.html#compatibility-solution-for-cronhpa-and-hpa","title":"Compatibility Solution for CronHPA and HPA","text":"

                                                                                          As noted above, the fundamental reason that simultaneous use of CronHPA and HPA results in the later action overriding the earlier one is that the two controllers cannot sense each other. Therefore, the conflict can be resolved by enabling CronHPA to be aware of HPA's current state.

                                                                                          The system will treat HPA as the scaling object for CronHPA, thus achieving scheduled scaling for the Deployment object defined by the HPA.

                                                                                          HPA's definition configures the Deployment in the scaleTargetRef field, and then the Deployment uses its definition to locate the ReplicaSet, which ultimately adjusts the actual number of replicas.

                                                                                          In AI platform, the scaleTargetRef in CronHPA is set to the HPA object, and it uses the HPA object to find the actual scaleTargetRef, allowing CronHPA to be aware of HPA's current state.

                                                                                          CronHPA senses HPA by adjusting HPA. CronHPA determines whether scaling is needed and modifies the HPA upper limit by comparing the target number of replicas with the current number of replicas, choosing the larger value. Similarly, CronHPA determines whether to modify the HPA lower limit by comparing the target number of replicas from CronHPA with the configuration in HPA, choosing the smaller value.

                                                                                          "},{"location":"en/end-user/kpanda/scale/install-cronhpa.html","title":"Install kubernetes-cronhpa-controller","text":"

                                                                                          The container copy timing horizontal autoscaling policy (CronHPA) can provide stable computing resource guarantee for periodic high-concurrency applications, and kubernetes-cronhpa-controller is a key component to implement CronHPA.

                                                                                          This section describes how to install the kubernetes-cronhpa-controller plugin.

                                                                                          Note

                                                                                          In order to use CornHPA, not only the kubernetes-cronhpa-controller plugin needs to be installed, but also install the metrics-server plugin.

                                                                                          "},{"location":"en/end-user/kpanda/scale/install-cronhpa.html#prerequisites","title":"Prerequisites","text":"

                                                                                          Before installing the kubernetes-cronhpa-controller plugin, the following prerequisites need to be met:

                                                                                          • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                                                          • Create a namespace.

                                                                                          • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                                                          "},{"location":"en/end-user/kpanda/scale/install-cronhpa.html#steps","title":"Steps","text":"

                                                                                          Refer to the following steps to install the kubernetes-cronhpa-controller plugin for the cluster.

                                                                                          1. On the Clusters page, find the target cluster where the plugin needs to be installed, click the name of the cluster, then click Workloads -> Deployments on the left, and click the name of the target workload.

                                                                                          2. On the workload details page, click the Auto Scaling tab, and click Install on the right side of CronHPA .

                                                                                          3. Read the relevant introduction of the plug-in, select the version and click the Install button. It is recommended to install 1.3.0 or later.

                                                                                          4. Refer to the following instructions to configure the parameters.

                                                                                            • Name: Enter the plugin name, please note that the name can be up to 63 characters, can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with lowercase letters or numbers, such as kubernetes-cronhpa-controller.
                                                                                            • Namespace: Select which namespace the plugin will be installed in, here we take default as an example.
                                                                                            • Version: The version of the plugin, here we take the 1.3.0 version as an example.
                                                                                            • Ready Wait: When enabled, it will wait for all associated resources under the application to be in the ready state before marking the application installation as successful.
                                                                                            • Failed to delete: If the plugin installation fails, delete the associated resources that have already been installed. When enabled, Wait will be enabled synchronously by default.
                                                                                            • Detailed log: When enabled, a detailed log of the installation process will be recorded.

                                                                                            Note

                                                                                            After enabling ready wait and/or failed deletion , it takes a long time for the application to be marked as \"running\".

                                                                                          5. Click OK in the lower right corner of the page, and the system will automatically jump to the Helm Apps list page. Wait a few minutes and refresh the page to see the application you just installed.

                                                                                            Warning

                                                                                            If you need to delete the kubernetes-cronhpa-controller plugin, you should go to the Helm Apps list page to delete it completely.

                                                                                            If you delete the plug-in under the Auto Scaling tab of the workload, this only deletes the workload copy of the plug-in, and the plug-in itself is still not deleted, and an error will be prompted when the plug-in is reinstalled later.

                                                                                          6. Go back to the Auto Scaling tab under the workload details page, and you can see that the interface displays Plug-in installed . Now it's time to start creating CronHPA policies.

                                                                                          "},{"location":"en/end-user/kpanda/scale/install-metrics-server.html","title":"Install metrics-server","text":"

                                                                                          metrics-server is the built-in resource usage metrics collection component of Kubernetes. You can automatically scale Pod copies horizontally for workload resources by configuring HPA policies.

                                                                                          This section describes how to install metrics-server .

                                                                                          "},{"location":"en/end-user/kpanda/scale/install-metrics-server.html#prerequisites","title":"Prerequisites","text":"

                                                                                          Before installing the metrics-server plugin, the following prerequisites need to be met:

                                                                                          • Integrated the Kubernetes cluster or created the Kubernetes cluster, and you can access the UI interface of the cluster.

                                                                                          • Created a namespace.

                                                                                          • You should have permissions not lower than NS Editor. For details, refer to Namespace Authorization.

                                                                                          "},{"location":"en/end-user/kpanda/scale/install-metrics-server.html#steps","title":"Steps","text":"

                                                                                          Please perform the following steps to install the metrics-server plugin for the cluster.

                                                                                          1. On the Auto Scaling page under workload details, click the Install button to enter the metrics-server plug-in installation interface.

                                                                                          2. Read the introduction of the metrics-server plugin, select the version and click the Install button. This page will use the 3.8.2 version as an example to install, and it is recommended that you install 3.8.2 and later versions.

                                                                                          3. Configure basic parameters on the installation configuration interface.

                                                                                            • Name: Enter the plugin name, please note that the name can be up to 63 characters, can only contain lowercase letters, numbers and separators (\"-\"), and must start and end with lowercase letters or numbers, such as metrics-server-01.
                                                                                            • Namespace: Select the namespace for plugin installation, here we take default as an example.
                                                                                            • Version: The version of the plugin, here we take 3.8.2 version as an example.
                                                                                            • Ready Wait: When enabled, it will wait for all associated resources under the application to be ready before marking the application installation as successful.
                                                                                            • Failed to delete: After it is enabled, the synchronization will be enabled by default and ready to wait. If the installation fails, the installation-related resources will be removed.
                                                                                            • Verbose log: Turn on the verbose output of the installation process log.

                                                                                            Note

                                                                                            After enabling Wait and/or Deletion failed , it takes a long time for the app to be marked as Running .

                                                                                          4. Advanced parameter configuration

                                                                                            • If the cluster network cannot access the k8s.gcr.io repository, please try to modify the repositort parameter to repository: k8s.m.daocloud.io/metrics-server/metrics-server .

                                                                                            • An SSL certificate is also required to install the metrics-server plugin. To bypass certificate verification, you need to add - --kubelet-insecure-tls parameter at defaultArgs: .

                                                                                            Click to view and use the YAML parameters to replace the default YAML
                                                                                            image:\n  repository: k8s.m.daocloud.io/metrics-server/metrics-server # Change the registry source address to k8s.m.daocloud.io\n  tag: ''\n  pullPolicy: IfNotPresent\nimagePullSecrets: []\nnameOverride: ''\nfullnameOverride: ''\nserviceAccount:\n  create: true\n  annotations: {}\n  name: ''\nrbac:\n  create: true\n  pspEnabled: false\napiService:\n  create: true\npodLabels: {}\npodAnnotations: {}\npodSecurityContext: {}\nsecurityContext:\n  allowPrivilegeEscalation: false\n  readOnlyRootFilesystem: true\n  runAsNonRoot: true\n  runAsUser: 1000\npriorityClassName: system-cluster-critical\ncontainerPort: 4443\nhostNetwork:\n  enabled: false\nreplicas: 1\nupdateStrategy: {}\npodDisruptionBudget:\n  enabled: false\n  minAvailable: null\n  maxUnavailable: null\ndefaultArgs:\n  - '--cert-dir=/tmp'\n  - '--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname'\n  - '--kubelet-use-node-status-port'\n  - '--metric-resolution=15s'\n  - --kubelet-insecure-tls # Bypass certificate verification\nargs: []\nlivenessProbe:\n  httpGet:\n    path: /livez\n    port:https\n    scheme: HTTPS\n  initialDelaySeconds: 0\n  periodSeconds: 10\n  failureThreshold: 3\nreadinessProbe:\n  httpGet:\n    path: /readyz\n    port:https\n    scheme: HTTPS\n  initialDelaySeconds: 20\n  periodSeconds: 10\n  failureThreshold: 3\nservice:\n  type: ClusterIP\n  port: 443\n  annotations: {}\n  labels: {}\nmetrics:\n  enabled: false\nserviceMonitor:\n  enabled: false\n  additionalLabels: {}\n  interval: 1m\n  scrapeTimeout: 10s\nresources: {}\nextraVolumeMounts: []\nextraVolumes: []\nnodeSelector: {}\ntolerations: []\naffinity: {}\n
                                                                                          5. Click the OK button to complete the installation of the metrics-server plug-in, and then the system will automatically jump to the Helm Apps list page. After a few minutes, refresh the page and you will see the newly installed Applications.

                                                                                          Note

                                                                                          When deleting the metrics-server plugin, the plugin can only be completely deleted on the Helm Apps list page. If you only delete metrics-server on the workload page, this only deletes the workload copy of the application, the application itself is still not deleted, and an error will be prompted when you reinstall the plugin later.

                                                                                          "},{"location":"en/end-user/kpanda/scale/install-vpa.html","title":"Install vpa","text":"

                                                                                          The Vertical Pod Autoscaler, VPA, can make the resource allocation of the cluster more reasonable and avoid the waste of cluster resources. vpa is the key component to realize the vertical autoscaling of the container.

                                                                                          This section describes how to install the vpa plugin.

                                                                                          In order to use VPA policies, not only the __vpa__ plugin needs to be installed, but also [install the __metrics-server__ plugin](install-metrics-server.md).\n
                                                                                          "},{"location":"en/end-user/kpanda/scale/install-vpa.html#prerequisites","title":"Prerequisites","text":"

                                                                                          Before installing the vpa plugin, the following prerequisites need to be met:

                                                                                          • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                                                          • Create a namespace.

                                                                                          • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                                                          "},{"location":"en/end-user/kpanda/scale/install-vpa.html#steps","title":"Steps","text":"

                                                                                          Refer to the following steps to install the vpa plugin for the cluster.

                                                                                          1. On the Clusters page, find the target cluster where the plugin needs to be installed, click the name of the cluster, then click Workloads -> Deployments on the left, and click the name of the target workload.

                                                                                          2. On the workload details page, click the Auto Scaling tab, and click Install on the right side of VPA .

                                                                                          3. Read the relevant introduction of the plug-in, select the version and click the Install button. It is recommended to install 1.5.0 or later.

                                                                                          4. Review the configuration parameters described below.

                                                                                            • Name: Enter the plugin name, please note that the name can be up to 63 characters, can only contain lowercase letters, numbers, and separators (\"-\"), and must start and end with lowercase letters or numbers, such as kubernetes-cronhpa-controller.
                                                                                            • Namespace: Select which namespace the plugin will be installed in, here we take default as an example.
                                                                                            • Version: The version of the plugin, here we take the 1.5.0 version as an example.
                                                                                            • Ready Wait: When enabled, it will wait for all associated resources under the application to be in the ready state before marking the application installation as successful.
                                                                                            • Failed to delete: If the plugin installation fails, delete the associated resources that have already been installed. When enabled, Wait will be enabled synchronously by default.
                                                                                            • Detailed log: When enabled, a detailed log of the installation process will be recorded.

                                                                                            Note

                                                                                            After enabling Wait and/or Deletion failed , it takes a long time for the application to be marked as running .

                                                                                          5. Click OK in the lower right corner of the page, and the system will automatically jump to the Helm Apps list page. Wait a few minutes and refresh the page to see the application you just installed.

                                                                                            Warning

                                                                                            If you need to delete the vpa plugin, you should go to the Helm Apps list page to delete it completely.

                                                                                            If you delete the plug-in under the Auto Scaling tab of the workload, this only deletes the workload copy of the plug-in, and the plug-in itself is still not deleted, and an error will be prompted when the plug-in is reinstalled later.

                                                                                          6. Go back to the Auto Scaling tab under the workload details page, and you can see that the interface displays Plug-in installed . Now you can start Create VPA policy.

                                                                                          "},{"location":"en/end-user/kpanda/scale/knative/install.html","title":"Installation","text":"

                                                                                          Knative is a platform-agnostic solution for running serverless deployments.

                                                                                          "},{"location":"en/end-user/kpanda/scale/knative/install.html#steps","title":"Steps","text":"
                                                                                          1. Log in to the cluster, click the sidebar Helm Apps \u2192 Helm Charts , enter knative in the search box at the top right, and then press the enter key to search.

                                                                                          2. Click the knative-operator to enter the installation configuration interface. You can view the available versions and the Parameters optional items of Helm values on this interface.

                                                                                          3. After clicking the install button, you will enter the installation configuration interface.

                                                                                          4. Enter the name, installation tenant, and it is recommended to check Wait and Detailed Logs .

                                                                                          5. In the settings below, you can tick Serving and enter the installation tenant of the Knative Serving component, which will deploy the Knative Serving component after installation. This component is managed by the Knative Operator.

                                                                                          "},{"location":"en/end-user/kpanda/scale/knative/knative.html","title":"Knative Introduction","text":"

                                                                                          Knative provides a higher level of abstraction, simplifying and speeding up the process of building, deploying, and managing applications on Kubernetes. It allows developers to focus more on implementing business logic, while leaving most of the infrastructure and operations work to Knative, significantly improving productivity.

                                                                                          "},{"location":"en/end-user/kpanda/scale/knative/knative.html#components","title":"Components","text":"

                                                                                          The Knative operator runs the following components.

                                                                                          knative-operator   knative-operator-58f7d7db5c-7f6r5      1/1     Running     0     6m55s\nknative-operator   operator-webhook-667dc67bc-qvrv4       1/1     Running     0     6m55s\n

                                                                                          The Knative serving components are as follows.

                                                                                          knative-serving        3scale-kourier-gateway-d69fbfbd-bd8d8   1/1     Running     0                 7m13s\nknative-serving        activator-7c6fddd698-wdlng              1/1     Running     0                 7m3s\nknative-serving        autoscaler-8f4b876bb-kd25p              1/1     Running     0                 7m17s\nknative-serving        autoscaler-hpa-5f7f74679c-vkc7p         1/1     Running     0                 7m15s\nknative-serving        controller-789c896c46-tfvsv             1/1     Running     0                 7m17s\nknative-serving        net-kourier-controller-7db578c889-7gd5l 1/1     Running     0                 7m14s\nknative-serving        webhook-5c88b94c5-78x7m                 1/1     Running     0                 7m1s\nknative-serving        storage-version-migration-serving-serving-1.12.2-t7zvd   0/1  Completed   0   7m15s\n
                                                                                          Component Features Activator Queues requests (if a Knative Service has scaled to zero). Calls the autoscaler to bring back services that have scaled down to zero and forward queued requests. The Activator can also act as a request buffer, handling bursts of traffic. Autoscaler Responsible for scaling Knative services based on configuration, metrics, and incoming requests. Controller Manages the state of Knative CRs. It monitors multiple objects, manages the lifecycle of dependent resources, and updates resource status. Queue-Proxy Sidecar container injected into each Knative Service. Responsible for collecting traffic data and reporting it to the Autoscaler, which then initiates scaling requests based on this data and preset rules. Webhooks Knative Serving has several Webhooks responsible for validating and mutating Knative resources."},{"location":"en/end-user/kpanda/scale/knative/knative.html#ingress-traffic-entry-solutions","title":"Ingress Traffic Entry Solutions","text":"Solution Use Case Istio If Istio is already in use, it can be chosen as the traffic entry solution. Contour If Contour has been enabled in the cluster, it can be chosen as the traffic entry solution. Kourier If neither of the above two Ingress components are present, Knative's Envoy-based Kourier Ingress can be used as the traffic entry solution."},{"location":"en/end-user/kpanda/scale/knative/knative.html#autoscaler-solutions-comparison","title":"Autoscaler Solutions Comparison","text":"Autoscaler Type Core Part of Knative Serving Default Enabled Scale to Zero Support CPU-based Autoscaling Support Knative Pod Autoscaler (KPA) Yes Yes Yes No Horizontal Pod Autoscaler (HPA) No Needs to be enabled after installing Knative Serving No Yes"},{"location":"en/end-user/kpanda/scale/knative/knative.html#crd","title":"CRD","text":"Resource Type API Name Description Services service.serving.knative.dev Automatically manages the entire lifecycle of Workloads, controls the creation of other objects, ensures applications have Routes, Configurations, and new revisions with each update. Routes route.serving.knative.dev Maps network endpoints to one or more revision versions, supports traffic distribution and version routing. Configurations configuration.serving.knative.dev Maintains the desired state of deployments, provides separation between code and configuration, follows the Twelve-Factor App methodology, modifying configurations creates new revisions. Revisions revision.serving.knative.dev Snapshot of the workload at each modification time point, immutable object, automatically scales based on traffic."},{"location":"en/end-user/kpanda/scale/knative/playground.html","title":"Knative Practices","text":"

                                                                                          In this section, we will delve into learning Knative through several practical exercises.

                                                                                          "},{"location":"en/end-user/kpanda/scale/knative/playground.html#case-1-hello-world","title":"case 1 - Hello World","text":"
                                                                                          apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n

                                                                                          You can use kubectl to check the status of a deployed application that has been automatically configured with ingress and scalers by Knative.

                                                                                          ~ kubectl get service.serving.knative.dev/hello\nNAME    URL                                              LATESTCREATED   LATESTREADY   READY   REASON\nhello   http://hello.knative-serving.knative.loulan.me   hello-00001     hello-00001   True\n

                                                                                          The deployed Pod YAML is as follows, consisting of two Pods: user-container and queue-proxy.

                                                                                          apiVersion: v1\nkind: Pod\nmetadata:\n  name: hello-00003-deployment-5fcb8ccbf-7qjfk\nspec:\n  containers:\n  - name: user-container\n  - name: queue-proxy\n

                                                                                          Request Flow:

                                                                                          1. case1 When there is low traffic or no traffic, traffic will be routed to the activator.
                                                                                          2. case2 When there is high traffic, traffic will be routed directly to the Pod only if it exceeds the target-burst-capacity.
                                                                                            1. Configured as 0, expansion from 0 is the only scenario.
                                                                                            2. Configured as -1, the activator will always be present in the request path.
                                                                                            3. Configured as >0, the number of additional concurrent requests that the system can handle before triggering scaling.
                                                                                          3. case3 When the traffic decreases again, traffic will be routed back to the activator if the traffic is lower than current_demand + target-burst-capacity > (pods * concurrency-target).

                                                                                            The total number of pending requests + the number of requests that can exceed the target concurrency > the target concurrency per Pod * number of Pods.

                                                                                          "},{"location":"en/end-user/kpanda/scale/knative/playground.html#case-2-based-on-concurrent-elastic-scaling","title":"case 2 - Based on Concurrent Elastic Scaling","text":"

                                                                                          We first apply the following YAML definition under the cluster.

                                                                                          apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"1\"\n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"\n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n

                                                                                          Execute the following command for testing, and you can observe the scaling of the Pods by using kubectl get pods -A -w.

                                                                                          wrk -t2 -c4 -d6s http://hello.knative-serving.knative.daocloud.io/\n
                                                                                          "},{"location":"en/end-user/kpanda/scale/knative/playground.html#case-3-based-on-concurrent-elastic-scaling-scale-out-in-advance-to-reach-a-specific-ratio","title":"case 3 - Based on concurrent elastic scaling, scale out in advance to reach a specific ratio.","text":"

                                                                                          We can easily achieve this, for example, by limiting the concurrency to 10 per container. This can be implemented through autoscaling.knative.dev/target-utilization-percentage: 70, starting to scale out the Pods when 70% is reached.

                                                                                          apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"10\"\n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"\n \u00a0 \u00a0 \u00a0 \u00a0autoscaling.knative.dev/target-utilization-percentage: \"70\" \n \u00a0 \u00a0 \u00a0 \u00a0autoscaling.knative.dev/metric: \"concurrency\"\n \u00a0 \u00a0 spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n
                                                                                          "},{"location":"en/end-user/kpanda/scale/knative/playground.html#case-4-canary-releasetraffic-percentage","title":"case 4 - Canary Release/Traffic Percentage","text":"

                                                                                          We can control the distribution of traffic to each version through spec.traffic.

                                                                                          apiVersion: serving.knative.dev/v1\nkind: Service\nmetadata:\n  name: hello\nspec:\n  template:\n    metadata:\n      annotations:\n        autoscaling.knative.dev/target: \"1\"  \n        autoscaling.knative.dev/class: \"kpa.autoscaling.knative.dev\"         \n    spec:\n      containers:\n        - image: m.daocloud.io/ghcr.io/knative/helloworld-go:latest\n          ports:\n            - containerPort: 8080\n          env:\n            - name: TARGET\n              value: \"World\"\n  traffic:\n  - latestRevision: true\n    percent: 50\n  - latestRevision: false\n    percent: 50\n    revisionName: hello-00001\n
                                                                                          "},{"location":"en/end-user/kpanda/scale/knative/scene.html","title":"Use Cases","text":""},{"location":"en/end-user/kpanda/scale/knative/scene.html#suitable-cases","title":"Suitable Cases","text":"
                                                                                          • High concurrency business with short connections
                                                                                          • Businesses that require elastic scaling
                                                                                          • A large number of applications need to scale down to 0 to improve resource utilization
                                                                                          • AI Serving services that scale based on specific metrics
                                                                                          "},{"location":"en/end-user/kpanda/scale/knative/scene.html#unsuitable-cases","title":"Unsuitable Cases","text":"
                                                                                          • Long-lived connection business
                                                                                          • Latency-sensitive business
                                                                                          • Traffic splitting based on cookies
                                                                                          • Traffic splitting based on headers
                                                                                          "},{"location":"en/end-user/kpanda/security/index.html","title":"Types of Security Scans","text":"

                                                                                          AI platform Container Management provides three types of security scans:

                                                                                          • Compliance Scan: Conducts security scans on cluster nodes based on CIS Benchmark.
                                                                                          • Authorization Scan: Checks for security and compliance issues in the Kubernetes cluster, records and verifies authorized access, object changes, events, and other activities related to the Kubernetes API.
                                                                                          • Vulnerability Scan: Scans the Kubernetes cluster for potential vulnerabilities and risks, such as unauthorized access, sensitive information leakage, weak authentication, container escape, etc.
                                                                                          "},{"location":"en/end-user/kpanda/security/index.html#compliance-scan","title":"Compliance Scan","text":"

                                                                                          The object of compliance scanning is the cluster node. The scan result lists the scan items and results and provides repair suggestions for any failed scan items. For specific security rules used during scanning, refer to the CIS Kubernetes Benchmark.

                                                                                          The focus of the scan varies when checking different types of nodes.

                                                                                          • Scan the control plane node (Controller)

                                                                                            • Focus on the security of system components such as API Server , controller-manager , scheduler , kubelet , etc.
                                                                                            • Check the security configuration of the Etcd database.
                                                                                            • Verify whether the cluster's authentication mechanism, authorization policy, and network security configuration meet security standards.
                                                                                          • Scan worker nodes

                                                                                            • Check if the configuration of container runtimes such as kubelet and Docker meets security standards.
                                                                                            • Verify whether the container image has been trusted and verified.
                                                                                            • Check if the network security configuration of the node meets security standards.

                                                                                          Tip

                                                                                          To use compliance scanning, you need to create a scan configuration first, and then create a scan policy based on that configuration. After executing the scan policy, you can view the scan report.

                                                                                          "},{"location":"en/end-user/kpanda/security/index.html#authorization-scan","title":"Authorization Scan","text":"

                                                                                          Authorization scanning focuses on security vulnerabilities caused by authorization issues. Authorization scans can help users identify security threats in Kubernetes clusters, identify which resources need further review and protection measures. By performing these checks, users can gain a clearer and more comprehensive understanding of their Kubernetes environment and ensure that the cluster environment meets Kubernetes' best practices and security standards.

                                                                                          Specifically, authorization scanning supports the following operations:

                                                                                          • Scans the health status of all nodes in the cluster.

                                                                                          • Scans the running state of components in the cluster, such as kube-apiserver , kube-controller-manager , kube-scheduler , etc.

                                                                                          • Scans security configurations: Check Kubernetes' security configuration.

                                                                                            • API security: whether unsafe API versions are enabled, whether appropriate RBAC roles and permission restrictions are set, etc.
                                                                                            • Container security: whether insecure images are used, whether privileged mode is enabled, whether appropriate security context is set, etc.
                                                                                            • Network security: whether appropriate network policy is enabled to restrict traffic, whether TLS encryption is used, etc.
                                                                                            • Storage security: whether appropriate encryption and access controls are enabled.
                                                                                            • Application security: whether necessary security measures are in place, such as password management, cross-site scripting attack defense, etc.
                                                                                          • Provides warnings and suggestions: Security best practices that cluster administrators should perform, such as regularly rotating certificates, using strong passwords, restricting network access, etc.

                                                                                          Tip

                                                                                          To use authorization scanning, you need to create a scan policy first. After executing the scan policy, you can view the scan report. For details, refer to Security Scanning.

                                                                                          "},{"location":"en/end-user/kpanda/security/index.html#vulnerability-scan","title":"Vulnerability Scan","text":"

                                                                                          Vulnerability scanning focuses on scanning potential malicious attacks and security vulnerabilities, such as remote code execution, SQL injection, XSS attacks, and some attacks specific to Kubernetes. The final scan report lists the security vulnerabilities in the cluster and provides repair suggestions.

                                                                                          Tip

                                                                                          To use vulnerability scanning, you need to create a scan policy first. After executing the scan policy, you can view the scan report. For details, refer to Vulnerability Scan.

                                                                                          "},{"location":"en/end-user/kpanda/security/audit.html","title":"Permission Scan","text":"

                                                                                          To use the Permission Scan feature, you need to create a scan policy first. After executing the policy, a scan report will be automatically generated for viewing.

                                                                                          "},{"location":"en/end-user/kpanda/security/audit.html#create-a-scan-policy","title":"Create a Scan Policy","text":"
                                                                                          1. On the left navigation bar of the homepage in the Container Management module, click Security Management .

                                                                                          2. Click Permission Scan on the left navigation bar, then click the Scan Policy tab and click Create Scan Policy on the right.

                                                                                          3. Fill in the configuration according to the following instructions, and then click OK .

                                                                                            • Cluster: Select the cluster to be scanned. The optional cluster list comes from the clusters accessed or created in the Container Management module. If the desired cluster is not available, you can access or create a cluster in the Container Management module.
                                                                                            • Scan Type:

                                                                                              • Immediate scan: Perform a scan immediately after the scan policy is created. It cannot be automatically/manually executed again later.
                                                                                              • Scheduled scan: Automatically repeat the scan at scheduled intervals.
                                                                                            • Number of Scan Reports to Keep: Set the maximum number of scan reports to be kept. When the specified retention quantity is exceeded, delete from the earliest report.

                                                                                          "},{"location":"en/end-user/kpanda/security/audit.html#updatedelete-scan-policies","title":"Update/Delete Scan Policies","text":"

                                                                                          After creating a scan policy, you can update or delete it as needed.

                                                                                          Under the Scan Policy tab, click the \u2507 action button to the right of a configuration:

                                                                                          • For periodic scan policies:

                                                                                            • Select Execute Immediately to perform an additional scan outside the regular schedule.
                                                                                            • Select Disable to interrupt the scanning plan until Enable is clicked to resume executing the scan policy according to the scheduling plan.
                                                                                            • Select Edit to update the configuration. You can update the scan configuration, type, scan cycle, and report retention quantity. The configuration name and the target cluster to be scanned cannot be changed.
                                                                                            • Select Delete to delete the configuration.
                                                                                          • For one-time scan policies: Only support the Delete operation.

                                                                                          "},{"location":"en/end-user/kpanda/security/audit.html#view-scan-reports","title":"View Scan Reports","text":"
                                                                                          1. Under the Security Management -> Permission Scanning -> Scan Reports tab, click the report name.

                                                                                            Clicking Delete on the right of a report allows you to manually delete the report.

                                                                                          2. View the scan report content, including:

                                                                                            • The target cluster scanned.
                                                                                            • The scan policy used.
                                                                                            • The total number of scan items, warnings, and errors.
                                                                                            • In periodic scan reports generated by periodic scan policies, you can also view the scan frequency.
                                                                                            • The start time of the scan.
                                                                                            • Check details, such as the checked resources, resource types, scan results, error types, and error details.
                                                                                          "},{"location":"en/end-user/kpanda/security/hunter.html","title":"Vulnerability Scan","text":"

                                                                                          To use the Vulnerability Scan feature, you need to create a scan policy first. After executing the policy, a scan report will be automatically generated for viewing.

                                                                                          "},{"location":"en/end-user/kpanda/security/hunter.html#create-a-scan-policy","title":"Create a Scan Policy","text":"
                                                                                          1. On the left navigation bar of the homepage in the Container Management module, click Security Management .

                                                                                          2. Click Vulnerability Scan on the left navigation bar, then click the Scan Policy tab and click Create Scan Policy on the right.

                                                                                          3. Fill in the configuration according to the following instructions, and then click OK .

                                                                                            • Cluster: Select the cluster to be scanned. The optional cluster list comes from the clusters accessed or created in the Container Management module. If the desired cluster is not available, you can access or create a cluster in the Container Management module.
                                                                                            • Scan Type:

                                                                                              • Immediate scan: Perform a scan immediately after the scan policy is created. It cannot be automatically/manually executed again later.
                                                                                              • Scheduled scan: Automatically repeat the scan at scheduled intervals.
                                                                                            • Number of Scan Reports to Keep: Set the maximum number of scan reports to be kept. When the specified retention quantity is exceeded, delete from the earliest report.

                                                                                          "},{"location":"en/end-user/kpanda/security/hunter.html#updatedelete-scan-policies","title":"Update/Delete Scan Policies","text":"

                                                                                          After creating a scan policy, you can update or delete it as needed.

                                                                                          Under the Scan Policy tab, click the \u2507 action button to the right of a configuration:

                                                                                          • For periodic scan policies:

                                                                                            • Select Execute Immediately to perform an additional scan outside the regular schedule.
                                                                                            • Select Disable to interrupt the scanning plan until Enable is clicked to resume executing the scan policy according to the scheduling plan.
                                                                                            • Select Edit to update the configuration. You can update the scan configuration, type, scan cycle, and report retention quantity. The configuration name and the target cluster to be scanned cannot be changed.
                                                                                            • Select Delete to delete the configuration.
                                                                                          • For one-time scan policies: Only support the Delete operation.

                                                                                          "},{"location":"en/end-user/kpanda/security/hunter.html#viewe-scan-reports","title":"Viewe Scan Reports","text":"
                                                                                          1. Under the Security Management -> Vulnerability Scanning -> Scan Reports tab, click the report name.

                                                                                            Clicking Delete on the right of a report allows you to manually delete the report.

                                                                                          2. View the scan report content, including:

                                                                                            • The target cluster scanned.
                                                                                            • The scan policy used.
                                                                                            • The scan frequency.
                                                                                            • The total number of risks, high risks, medium risks, and low risks.
                                                                                            • The time of the scan.
                                                                                            • Check details such as vulnerability ID, vulnerability type, vulnerability name, vulnerability description, etc.
                                                                                          "},{"location":"en/end-user/kpanda/security/cis/config.html","title":"Scan Configuration","text":"

                                                                                          The first step in using CIS Scanning is to create a scan configuration. Based on the scan configuration, you can then create scan policies, execute scan policies, and finally view scan results.

                                                                                          "},{"location":"en/end-user/kpanda/security/cis/config.html#create-a-scan-configuration","title":"Create a Scan Configuration","text":"

                                                                                          The steps for creating a scan configuration are as follows:

                                                                                          1. Click Security Management in the left navigation bar of the homepage of the container management module.

                                                                                          2. By default, enter the Compliance Scanning page, click the Scan Configuration tab, and then click Create Scan Configuration in the upper-right corner.

                                                                                          3. Fill in the configuration name, select the configuration template, and optionally check the scan items, then click OK .

                                                                                            Scan Template: Currently, two templates are provided. The kubeadm template is suitable for general Kubernetes clusters. The daocloud template ignores scan items that are not applicable to AI platform based on the kubeadm template and the platform design of AI platform.

                                                                                          "},{"location":"en/end-user/kpanda/security/cis/config.html#view-scan-configuration","title":"View Scan Configuration","text":"

                                                                                          Under the scan configuration tab, clicking the name of a scan configuration displays the type of the configuration, the number of scan items, the creation time, the configuration template, and the specific scan items enabled for the configuration.

                                                                                          "},{"location":"en/end-user/kpanda/security/cis/config.html#updatdelete-scan-configuration","title":"Updat/Delete Scan Configuration","text":"

                                                                                          After a scan configuration has been successfully created, it can be updated or deleted according to your needs.

                                                                                          Under the scan configuration tab, click the \u2507 action button to the right of a configuration:

                                                                                          • Select Edit to update the configuration. You can update the description, template, and scan items. The configuration name cannot be changed.
                                                                                          • Select Delete to delete the configuration.
                                                                                          "},{"location":"en/end-user/kpanda/security/cis/policy.html","title":"Scan Policy","text":""},{"location":"en/end-user/kpanda/security/cis/policy.html#create-a-scan-policy","title":"Create a Scan Policy","text":"

                                                                                          After creating a scan configuration, you can create a scan policy based on the configuration.

                                                                                          1. Under the Security Management -> Compliance Scanning page, click the Scan Policy tab on the right to create a scan policy.

                                                                                          2. Fill in the configuration according to the following instructions and click OK .

                                                                                            • Cluster: Select the cluster to be scanned. The optional cluster list comes from the clusters accessed or created in the Container Management module. If the desired cluster is not available, you can access or create a cluster in the Container Management module.
                                                                                            • Scan Configuration: Select a pre-created scan configuration. The scan configuration determines which specific scan items need to be performed.
                                                                                            • Scan Type:

                                                                                              • Immediate scan: Perform a scan immediately after the scan policy is created. It cannot be automatically/manually executed again later.
                                                                                              • Scheduled scan: Automatically repeat the scan at scheduled intervals.
                                                                                            • Number of Scan Reports to Keep: Set the maximum number of scan reports to be kept. When the specified retention quantity is exceeded, delete from the earliest report.

                                                                                          "},{"location":"en/end-user/kpanda/security/cis/policy.html#updatedelete-scan-policies","title":"Update/Delete Scan Policies","text":"

                                                                                          After creating a scan policy, you can update or delete it as needed.

                                                                                          Under the Scan Policy tab, click the \u2507 action button to the right of a configuration:

                                                                                          • For periodic scan policies:

                                                                                            • Select Execute Immediately to perform an additional scan outside the regular schedule.
                                                                                            • Select Disable to interrupt the scanning plan until Enable is clicked to resume executing the scan policy according to the scheduling plan.
                                                                                            • Select Edit to update the configuration. You can update the scan configuration, type, scan cycle, and report retention quantity. The configuration name and the target cluster to be scanned cannot be changed.
                                                                                            • Select Delete to delete the configuration.
                                                                                          • For one-time scan policies: Only support the Delete operation.

                                                                                          "},{"location":"en/end-user/kpanda/security/cis/report.html","title":"Scan Report","text":"

                                                                                          After executing a scan policy, a scan report will be generated automatically. You can view the scan report online or download it to your local computer.

                                                                                          • Download and View

                                                                                            Under the Security Management -> Compliance Scanning page, click the Scan Report tab, then click the \u2507 action button to the right of a report and select Download .

                                                                                          • View Online

                                                                                            Clicking the name of a report allows you to view its content online, which includes:

                                                                                            • The target cluster scanned.
                                                                                            • The scan policy and scan configuration used.
                                                                                            • The start time of the scan.
                                                                                            • The total number of scan items, the number passed, and the number failed.
                                                                                            • For failed scan items, repair suggestions are provided.
                                                                                            • For passed scan items, more secure operational suggestions are provided.
                                                                                          "},{"location":"en/end-user/kpanda/storage/pv.html","title":"data volume (PV)","text":"

                                                                                          A data volume (PersistentVolume, PV) is a piece of storage in the cluster, which can be prepared in advance by the administrator, or dynamically prepared using a storage class (Storage Class). PV is a cluster resource, but it has an independent life cycle and will not be deleted when the Pod process ends. Mounting PVs to workloads can achieve data persistence for workloads. The PV holds the data directory that can be accessed by the containers in the Pod.

                                                                                          "},{"location":"en/end-user/kpanda/storage/pv.html#create-data-volume","title":"Create data volume","text":"

                                                                                          Currently, there are two ways to create data volumes: YAML and form. These two ways have their own advantages and disadvantages, and can meet the needs of different users.

                                                                                          • There are fewer steps and more efficient creation through YAML, but the threshold requirement is high, and you need to be familiar with the YAML file configuration of the data volume.

                                                                                          • It is more intuitive and easier to create through the form, just fill in the proper values \u200b\u200baccording to the prompts, but the steps are more cumbersome.

                                                                                          "},{"location":"en/end-user/kpanda/storage/pv.html#yaml-creation","title":"YAML creation","text":"
                                                                                          1. Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume (PV) -> Create with YAML in the left navigation bar.

                                                                                          2. Enter or paste the prepared YAML file in the pop-up box, and click OK at the bottom of the pop-up box.

                                                                                            Supports importing YAML files from local or downloading and saving filled files to local.

                                                                                          "},{"location":"en/end-user/kpanda/storage/pv.html#form-creation","title":"Form Creation","text":"
                                                                                          1. Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume (PV) -> Create Data Volume (PV) in the left navigation bar.

                                                                                          2. Fill in the basic information.

                                                                                            • The data volume name, data volume type, mount path, volume mode, and node affinity cannot be changed after creation.
                                                                                            • Data volume type: For a detailed introduction to volume types, refer to the official Kubernetes document Volumes.

                                                                                            • Local: The local storage of the Node node is packaged into a PVC interface, and the container directly uses the PVC without paying attention to the underlying storage type. Local volumes do not support dynamic configuration of data volumes, but support configuration of node affinity, which can limit which nodes can access the data volume.

                                                                                            • HostPath: Use files or directories on the file system of Node nodes as data volumes, and do not support Pod scheduling based on node affinity.

                                                                                            • Mount path: mount the data volume to a specific directory in the container.

                                                                                            • access mode:

                                                                                              • ReadWriteOnce: The data volume can be mounted by a node in read-write mode.
                                                                                              • ReadWriteMany: The data volume can be mounted by multiple nodes in read-write mode.
                                                                                              • ReadOnlyMany: The data volume can be mounted read-only by multiple nodes.
                                                                                              • ReadWriteOncePod: The data volume can be mounted read-write by a single Pod.
                                                                                            • Recycling policy:

                                                                                              • Retain: The PV is not deleted, but its status is only changed to released , which needs to be manually recycled by the user. For how to manually reclaim, refer to Persistent Volume.
                                                                                              • Recycle: keep the PV but empty its data, perform a basic wipe ( rm -rf /thevolume/* ).
                                                                                              • Delete: When deleting a PV and its data.
                                                                                            • Volume mode:

                                                                                              • File system: The data volume will be mounted to a certain directory by the Pod. If the data volume is stored from a device and the device is currently empty, a file system is created on the device before the volume is mounted for the first time.
                                                                                              • Block: Use the data volume as a raw block device. This type of volume is given to the Pod as a block device without any file system on it, allowing the Pod to access the data volume faster.
                                                                                            • Node affinity:

                                                                                          "},{"location":"en/end-user/kpanda/storage/pv.html#view-data-volume","title":"View data volume","text":"

                                                                                          Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume (PV) in the left navigation bar.

                                                                                          • On this page, you can view all data volumes in the current cluster, as well as information such as the status, capacity, and namespace of each data volume.

                                                                                          • Supports sequential or reverse sorting according to the name, status, namespace, and creation time of data volumes.

                                                                                          • Click the name of a data volume to view the basic configuration, StorageClass information, labels, comments, etc. of the data volume.

                                                                                          "},{"location":"en/end-user/kpanda/storage/pv.html#clone-data-volume","title":"Clone data volume","text":"

                                                                                          By cloning a data volume, a new data volume can be recreated based on the configuration of the cloned data volume.

                                                                                          1. Enter the clone page

                                                                                            • On the data volume list page, find the data volume to be cloned, and select Clone under the operation bar on the right.

                                                                                              You can also click the name of the data volume, click the operation button in the upper right corner of the details page and select Clone .

                                                                                          2. Use the original configuration directly, or modify it as needed, and click OK at the bottom of the page.

                                                                                          "},{"location":"en/end-user/kpanda/storage/pv.html#update-data-volume","title":"Update data volume","text":"

                                                                                          There are two ways to update data volumes. Support for updating data volumes via forms or YAML files.

                                                                                          Note

                                                                                          Only updating the alias, capacity, access mode, reclamation policy, label, and comment of the data volume is supported.

                                                                                          • On the data volume list page, find the data volume that needs to be updated, select Update under the operation bar on the right to update through the form, select Edit YAML to update through YAML.

                                                                                          • Click the name of the data volume to enter the details page of the data volume, select Update in the upper right corner of the page to update through the form, select Edit YAML to update through YAML.

                                                                                          "},{"location":"en/end-user/kpanda/storage/pv.html#delete-data-volume","title":"Delete data volume","text":"

                                                                                          On the data volume list page, find the data to be deleted, and select Delete in the operation column on the right.

                                                                                          You can also click the name of the data volume, click the operation button in the upper right corner of the details page and select Delete .

                                                                                          "},{"location":"en/end-user/kpanda/storage/pvc.html","title":"Data volume declaration (PVC)","text":"

                                                                                          A persistent volume claim (PersistentVolumeClaim, PVC) expresses a user's request for storage. PVC consumes PV resources and claims a data volume with a specific size and specific access mode. For example, the PV volume is required to be mounted in ReadWriteOnce, ReadOnlyMany or ReadWriteMany modes.

                                                                                          "},{"location":"en/end-user/kpanda/storage/pvc.html#create-data-volume-statement","title":"Create data volume statement","text":"

                                                                                          Currently, there are two ways to create data volume declarations: YAML and form. These two ways have their own advantages and disadvantages, and can meet the needs of different users.

                                                                                          • There are fewer steps and more efficient creation through YAML, but the threshold requirement is high, and you need to be familiar with the YAML file configuration of the data volume declaration.

                                                                                          • It is more intuitive and easier to create through the form, just fill in the proper values \u200b\u200baccording to the prompts, but the steps are more cumbersome.

                                                                                          "},{"location":"en/end-user/kpanda/storage/pvc.html#yaml-creation","title":"YAML creation","text":"
                                                                                          1. Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume Declaration (PVC) -> Create with YAML in the left navigation bar.

                                                                                          2. Enter or paste the prepared YAML file in the pop-up box, and click OK at the bottom of the pop-up box.

                                                                                            Supports importing YAML files from local or downloading and saving filled files to local.

                                                                                          "},{"location":"en/end-user/kpanda/storage/pvc.html#form-creation","title":"Form Creation","text":"
                                                                                          1. Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume Declaration (PVC) -> Create Data Volume Declaration (PVC) in the left navigation bar.

                                                                                          2. Fill in the basic information.

                                                                                            • The name, namespace, creation method, data volume, capacity, and access mode of the data volume declaration cannot be changed after creation.
                                                                                            • Creation method: dynamically create a new data volume claim in an existing StorageClass or data volume, or create a new data volume claim based on a snapshot of a data volume claim.

                                                                                              The declared capacity of the data volume cannot be modified when the snapshot is created, and can be modified after the creation is complete.

                                                                                            • After selecting the creation method, select the desired StorageClass/data volume/snapshot from the drop-down list.

                                                                                            • access mode:

                                                                                            • ReadWriteOnce, the data volume declaration can be mounted by a node in read-write mode.

                                                                                            • ReadWriteMany, the data volume declaration can be mounted by multiple nodes in read-write mode.
                                                                                            • ReadOnlyMany, the data volume declaration can be mounted read-only by multiple nodes.
                                                                                            • ReadWriteOncePod, the data volume declaration can be mounted by a single Pod in read-write mode.
                                                                                          "},{"location":"en/end-user/kpanda/storage/pvc.html#view-data-volume-statement","title":"View data volume statement","text":"

                                                                                          Click the name of the target cluster in the cluster list, and then click Container Storage -> Data Volume Declaration (PVC) in the left navigation bar.

                                                                                          • On this page, you can view all data volume declarations in the current cluster, as well as information such as the status, capacity, and namespace of each data volume declaration.

                                                                                          • Supports sorting in sequential or reverse order according to the declared name, status, namespace, and creation time of the data volume.

                                                                                          • Click the name of the data volume declaration to view the basic configuration, StorageClass information, labels, comments and other information of the data volume declaration.

                                                                                          "},{"location":"en/end-user/kpanda/storage/pvc.html#expansion-data-volume-statement","title":"Expansion data volume statement","text":"
                                                                                          1. In the left navigation bar, click Container Storage -> Data Volume Declaration (PVC) , and find the data volume declaration whose capacity you want to adjust.

                                                                                          2. Click the name of the data volume declaration, and then click the operation button in the upper right corner of the page and select Expansion .

                                                                                          3. Enter the target capacity and click OK .

                                                                                          "},{"location":"en/end-user/kpanda/storage/pvc.html#clone-data-volume-statement","title":"Clone data volume statement","text":"

                                                                                          By cloning a data volume claim, a new data volume claim can be recreated based on the configuration of the cloned data volume claim.

                                                                                          1. Enter the clone page

                                                                                            • On the data volume declaration list page, find the data volume declaration that needs to be cloned, and select Clone under the operation bar on the right.

                                                                                              You can also click the name of the data volume declaration, click the operation button in the upper right corner of the details page and select Clone .

                                                                                          2. Use the original configuration directly, or modify it as needed, and click OK at the bottom of the page.

                                                                                          "},{"location":"en/end-user/kpanda/storage/pvc.html#update-data-volume-statement","title":"Update data volume statement","text":"

                                                                                          There are two ways to update data volume claims. Support for updating data volume claims via form or YAML file.

                                                                                          Note

                                                                                          Only aliases, labels, and annotations for data volume claims are updated.

                                                                                          • On the data volume list page, find the data volume declaration that needs to be updated, select Update in the operation bar on the right to update it through the form, and select Edit YAML to update it through YAML.

                                                                                          • Click the name of the data volume declaration, enter the details page of the data volume declaration, select Update in the upper right corner of the page to update through the form, select Edit YAML to update through YAML.

                                                                                          "},{"location":"en/end-user/kpanda/storage/pvc.html#delete-data-volume-statement","title":"Delete data volume statement","text":"

                                                                                          On the data volume declaration list page, find the data to be deleted, and select Delete in the operation column on the right.

                                                                                          You can also click the name of the data volume statement, click the operation button in the upper right corner of the details page and select Delete .

                                                                                          "},{"location":"en/end-user/kpanda/storage/pvc.html#common-problem","title":"common problem","text":"
                                                                                          1. If there is no optional StorageClass or data volume in the list, you can Create a StorageClass or Create a data volume.

                                                                                          2. If there is no optional snapshot in the list, you can enter the details page of the data volume declaration and create a snapshot in the upper right corner.

                                                                                          3. If the StorageClass (SC) used by the data volume declaration is not enabled for snapshots, snapshots cannot be made, and the page will not display the \"Make Snapshot\" option.

                                                                                          4. If the StorageClass (SC) used by the data volume declaration does not have the capacity expansion feature enabled, the data volume does not support capacity expansion, and the page will not display the capacity expansion option.
                                                                                          "},{"location":"en/end-user/kpanda/storage/sc-share.html","title":"shared StorageClass","text":"

                                                                                          The AI platform container management module supports sharing a StorageClass with multiple namespaces to improve resource utilization efficiency.

                                                                                          1. Find the StorageClass that needs to be shared in the StorageClass list, and click Authorize Namespace under the operation bar on the right.

                                                                                          2. Click Custom Namespace to select which namespaces this StorageClass needs to be shared to one by one.

                                                                                            • Click Authorize All Namespaces to share this StorageClass to all namespaces under the current cluster at one time.
                                                                                            • Click Remove Authorization under the operation bar on the right side of the list to deauthorize and stop sharing this StorageClass to this namespace.
                                                                                          "},{"location":"en/end-user/kpanda/storage/sc.html","title":"StorageClass (SC)","text":"

                                                                                          A StorageClass refers to a large storage resource pool composed of many physical disks. This platform supports the creation of block StorageClass, local StorageClass, and custom StorageClass after accessing various storage vendors, and then dynamically configures data volumes for workloads.

                                                                                          "},{"location":"en/end-user/kpanda/storage/sc.html#create-storageclass-sc","title":"Create StorageClass (SC)","text":"

                                                                                          Currently, it supports creating StorageClass through YAML and forms. These two methods have their own advantages and disadvantages, and can meet the needs of different users.

                                                                                          • There are fewer steps and more efficient creation through YAML, but the threshold requirement is high, and you need to be familiar with the YAML file configuration of the StorageClass.

                                                                                          • It is more intuitive and easier to create through the form, just fill in the proper values \u200b\u200baccording to the prompts, but the steps are more cumbersome.

                                                                                          "},{"location":"en/end-user/kpanda/storage/sc.html#yaml-creation","title":"YAML creation","text":"
                                                                                          1. Click the name of the target cluster in the cluster list, and then click Container Storage -> StorageClass (SC) -> Create with YAML in the left navigation bar.

                                                                                          2. Enter or paste the prepared YAML file in the pop-up box, and click OK at the bottom of the pop-up box.

                                                                                            Supports importing YAML files from local or downloading and saving filled files to local.

                                                                                          "},{"location":"en/end-user/kpanda/storage/sc.html#form-creation","title":"Form Creation","text":"
                                                                                          1. Click the name of the target cluster in the cluster list, and then click Container Storage -> StorageClass (SC) -> Create StorageClass (SC) in the left navigation bar.

                                                                                          2. Fill in the basic information and click OK at the bottom.

                                                                                            CUSTOM STORAGE SYSTEM

                                                                                            • The StorageClass name, driver, and reclamation policy cannot be modified after creation.
                                                                                            • CSI storage driver: A standard Kubernetes-based container storage interface plug-in, which must comply with the format specified by the storage manufacturer, such as rancher.io/local-path .

                                                                                              • For how to fill in the CSI drivers provided by different vendors, refer to the official Kubernetes document Storage Class.
                                                                                                • Recycling policy: When deleting a data volume, keep the data in the data volume or delete the data in it.
                                                                                                • Snapshot/Expansion: After it is enabled, the data volume/data volume declaration based on the StorageClass can support the expansion and snapshot features, but the premise is that the underlying storage driver supports the snapshot and expansion features.

                                                                                            HwameiStor storage system

                                                                                            • The StorageClass name, driver, and reclamation policy cannot be modified after creation.
                                                                                            • Storage system: HwameiStor storage system.
                                                                                            • Storage type: support LVM, raw disk type
                                                                                              • LVM type : HwameiStor recommended usage method, which can use highly available data volumes, and the proper CSI storage driver is lvm.hwameistor.io .
                                                                                              • Raw disk data volume : suitable for high availability cases, without high availability capability, the proper CSI driver is hdd.hwameistor.io .
                                                                                            • High Availability Mode: Before using the high availability capability, please make sure DRBD component has been installed. After the high availability mode is turned on, the number of data volume copies can be set to 1 and 2. Convert data volume copy from 1 to 1 if needed.
                                                                                            • Recycling policy: When deleting a data volume, keep the data in the data volume or delete the data in it.
                                                                                            • Snapshot/Expansion: After it is enabled, the data volume/data volume declaration based on the StorageClass can support the expansion and snapshot features, but the premise is that the underlying storage driver supports the snapshot and expansion features.
                                                                                          "},{"location":"en/end-user/kpanda/storage/sc.html#update-storageclass-sc","title":"Update StorageClass (SC)","text":"

                                                                                          On the StorageClass list page, find the StorageClass that needs to be updated, and select Edit under the operation bar on the right to update the StorageClass.

                                                                                          Info

                                                                                          Select View YAML to view the YAML file of the StorageClass, but editing is not supported.

                                                                                          "},{"location":"en/end-user/kpanda/storage/sc.html#delete-storageclass-sc","title":"Delete StorageClass (SC)","text":"

                                                                                          On the StorageClass list page, find the StorageClass to be deleted, and select Delete in the operation column on the right.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-cronjob.html","title":"Create CronJob","text":"

                                                                                          This page introduces how to create a CronJob through images and YAML files.

                                                                                          CronJobs are suitable for performing periodic operations, such as backup and report generation. These jobs can be configured to repeat periodically (for example: daily/weekly/monthly), and the time interval at which the job starts to run can be defined.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-cronjob.html#prerequisites","title":"Prerequisites","text":"

                                                                                          Before creating a CronJob, the following prerequisites need to be met:

                                                                                          • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                                                          • Create a namespace and a user.

                                                                                          • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                                                          • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-cronjob.html#create-by-image","title":"Create by image","text":"

                                                                                          Refer to the following steps to create a CronJob using the image.

                                                                                          1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                                                                          2. On the cluster details page, click Workloads -> CronJobs in the left navigation bar, and then click the Create by Image button in the upper right corner of the page.

                                                                                          3. Fill in Basic Information, Container Settings, CronJob Settings, Advanced Configuration, click OK in the lower right corner of the page to complete the creation.

                                                                                            The system will automatically return to the CronJobs list. Click \u2507 on the right side of the list to perform operations such as updating, deleting, and restarting the CronJob.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-cronjob.html#basic-information","title":"Basic information","text":"

                                                                                          On the Create CronJobs page, enter the information according to the table below, and click Next .

                                                                                          • Workload Name: Can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                                                                          • Namespace: Select which namespace to deploy the newly created CronJob in, and the default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                                                                          • Description: Enter the description information of the workload and customize the content. The number of characters should not exceed 512.
                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-cronjob.html#container-settings","title":"Container settings","text":"

                                                                                          Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the requirements of each part.

                                                                                          Container setting is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                                                                          Basic information (required)Lifecycle (optional)Health Check (optional)Environment variables (optional)Data storage (optional)Security settings (optional)

                                                                                          When configuring container-related parameters, you must correctly fill in the container name and image parameters, otherwise you will not be able to proceed to the next step. After filling in the configuration with reference to the following requirements, click OK .

                                                                                          • Container Name: Up to 63 characters, lowercase letters, numbers and separators (\"-\") are supported. Must start and end with a lowercase letter or number, eg nginx-01.
                                                                                          • Image: Enter the address or name of the image. When entering the image name, the image will be pulled from the official DockerHub by default.
                                                                                          • Image Pull Policy: After checking Always pull the image , the image will be pulled from the registry every time the workload restarts/upgrades. If it is not checked, only the local mirror will be pulled, and only when the mirror does not exist locally, it will be re-pulled from the container registry. For more details, refer to Image Pull Policy.
                                                                                          • Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
                                                                                          • CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
                                                                                          • GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU or part of the vGPU for the container. For example, for an 8-core GPU, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.

                                                                                            Before setting exclusive GPU, the administrator needs to install the GPU and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

                                                                                          Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle Configuration.

                                                                                          It is used to judge the health status of containers and applications, which helps to improve the availability of applications. For details, refer to Container Health Check Configuration.

                                                                                          Configure container parameters within the Pod, add environment variables or pass configuration to the Pod, etc. For details, refer to Container environment variable configuration.

                                                                                          Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage Configuration.

                                                                                          Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-cronjob.html#cronjob-settings","title":"CronJob Settings","text":"
                                                                                          • Concurrency Policy: Whether to allow multiple Job jobs to run in parallel.

                                                                                            • Allow : A new CronJob can be created before the previous job is completed, and multiple jobs can be parallelized. Too many jobs may occupy cluster resources.
                                                                                            • Forbid : Before the previous job is completed, a new job cannot be created. If the execution time of the new job is up and the previous job has not been completed, CronJob will ignore the execution of the new job.
                                                                                            • Replace : If the execution time of the new job is up, but the previous job has not been completed, the new job will replace the previous job.

                                                                                            The above rules only apply to multiple jobs created by the same CronJob. Multiple jobs created by multiple CronJobs are always allowed to run concurrently.

                                                                                          • Policy Settings: Set the time period for job execution based on minutes, hours, days, weeks, and months. Support custom Cron expressions with numbers and * , after inputting the expression, the meaning of the current expression will be prompted. For detailed expression syntax rules, refer to Cron Schedule Syntax.

                                                                                          • Job Records: Set how many records of successful or failed jobs to keep. 0 means do not keep.
                                                                                          • Timeout: When this time is exceeded, the job will be marked as failed to execute, and all Pods under the job will be deleted. When it is empty, it means that no timeout is set. The default is 360 s.
                                                                                          • Retries: the number of times the job can be retried, the default value is 6.
                                                                                          • Restart Policy: Set whether to restart the Pod when the job fails.
                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-cronjob.html#service-settings","title":"Service settings","text":"

                                                                                          Configure Service for the statefulset, so that the statefulset can be accessed externally.

                                                                                          1. Click the Create Service button.

                                                                                          2. Refer to Create Service to configure service parameters.

                                                                                          3. Click OK and click Next .

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-cronjob.html#advanced-configuration","title":"Advanced configuration","text":"

                                                                                          The advanced configuration of CronJobs mainly involves labels and annotations.

                                                                                          You can click the Add button to add labels and annotations to the workload instance Pod.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-cronjob.html#create-from-yaml","title":"Create from YAML","text":"

                                                                                          In addition to mirroring, you can also create timed jobs more quickly through YAML files.

                                                                                          1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                                                                          2. On the cluster details page, click Workloads -> CronJobs in the left navigation bar, and then click the Create from YAML button in the upper right corner of the page.

                                                                                          3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                                                                          click to view the complete YAML
                                                                                          apiVersion: batch/v1\nkind: CronJob\nmetadata:\n  creationTimestamp: '2022-12-26T09:45:47Z'\n  generation: 1\n  name: demo\n  namespace: default\n  resourceVersion: '92726617'\n  uid: d030d8d7-a405-4dcd-b09a-176942ef36c9\nspec:\n  concurrencyPolicy: Allow\n  failedJobsHistoryLimit: 1\n  jobTemplate:\n    metadata:\n      creationTimestamp: null\n    spec:\n      activeDeadlineSeconds: 360\n      backoffLimit: 6\n      template:\n        metadata:\n          creationTimestamp: null\n        spec:\n          containers:\n            - image: nginx\n              imagePullPolicy: IfNotPresent\n              lifecycle: {}\n              name: container-3\n              resources:\n                limits:\n                  cpu: 250m\n                  memory: 512Mi\n                requests:\n                  cpu: 250m\n                  memory: 512Mi\n              securityContext:\n                privileged: false\n              terminationMessagePath: /dev/termination-log\n              terminationMessagePolicy: File\n          dnsPolicy: ClusterFirst\n          restartPolicy: Never\n          schedulerName: default-scheduler\n          securityContext: {}\n          terminationGracePeriodSeconds: 30\n  schedule: 0 0 13 * 5\n  successfulJobsHistoryLimit: 3\n  suspend: false\nstatus: {}\n
                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-daemonset.html","title":"Create DaemonSet","text":"

                                                                                          This page introduces how to create a daemonSet through image and YAML files.

                                                                                          DaemonSet is connected to taint through node affinity feature ensures that a replica of a Pod is running on all or some of the nodes. For nodes that newly joined the cluster, DaemonSet automatically deploys the proper Pod on the new node and tracks the running status of the Pod. When a node is removed, the DaemonSet deletes all Pods it created.

                                                                                          Common cases for daemons include:

                                                                                          • Run cluster daemons on each node.
                                                                                          • Run a log collection daemon on each node.
                                                                                          • Run a monitoring daemon on each node.

                                                                                          For simplicity, a DaemonSet can be started on each node for each type of daemon. For finer and more advanced daemon management, you can also deploy multiple DaemonSets for the same daemon. Each DaemonSet has different flags and has different memory, CPU requirements for different hardware types.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-daemonset.html#prerequisites","title":"Prerequisites","text":"

                                                                                          Before creating a DaemonSet, the following prerequisites need to be met:

                                                                                          • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                                                          • Create a namespace and a user.

                                                                                          • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                                                          • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-daemonset.html#create-by-image","title":"Create by image","text":"

                                                                                          Refer to the following steps to create a daemon using the image.

                                                                                          1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                                                                          2. On the cluster details page, click Workloads -> DaemonSets in the left navigation bar, and then click the Create by Image button in the upper right corner of the page.

                                                                                          3. Fill in Basic Information, Container Settings, Service Settings, Advanced Settings, click OK in the lower right corner of the page to complete the creation.

                                                                                            The system will automatically return the list of DaemonSets . Click \u2507 on the right side of the list to perform operations such as updating, deleting, and restarting the DaemonSet.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-daemonset.html#basic-information","title":"Basic information","text":"

                                                                                          On the Create DaemonSets page, after entering the information according to the table below, click Next .

                                                                                          • Workload Name: Can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                                                                          • Namespace: Select which namespace to deploy the newly created DaemonSet in, and the default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                                                                          • Description: Enter the description information of the workload and customize the content. The number of characters should not exceed 512.
                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-daemonset.html#container-settings","title":"Container settings","text":"

                                                                                          Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the requirements of each part.

                                                                                          Container setting is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                                                                          Basic information (required)Lifecycle (optional)Health Check (optional)Environment variables (optional)Data storage (optional)Security settings (optional)

                                                                                          When configuring container-related parameters, you must correctly fill in the container name and image parameters, otherwise you will not be able to proceed to the next step. After filling in the settings with reference to the following requirements, click OK .

                                                                                          • Container Name: Up to 63 characters, lowercase letters, numbers and separators (\"-\") are supported. Must start and end with a lowercase letter or number, eg nginx-01.
                                                                                          • Image: Enter the address or name of the image. When entering the image name, the image will be pulled from the official DockerHub by default.
                                                                                          • Image Pull Policy: After checking Always pull image , the image will be pulled from the registry every time the workload restarts/upgrades. If it is not checked, only the local image will be pulled, and only when the image does not exist locally, it will be re-pulled from the container registry. For more details, refer to Image Pull Policy.
                                                                                          • Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
                                                                                          • CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
                                                                                          • GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU or part of the vGPU for the container. For example, for an 8-core GPU, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.

                                                                                            Before setting exclusive GPU, the administrator needs to install the GPU and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

                                                                                          Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle Configuration.

                                                                                          It is used to judge the health status of containers and applications, which helps to improve the availability of applications. For details, refer to Container Health Check Configuration.

                                                                                          Configure container parameters within the Pod, add environment variables or pass settings to the Pod, etc. For details, refer to Container environment variable settings.

                                                                                          Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage Configuration.

                                                                                          Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-daemonset.html#service-settings","title":"Service settings","text":"

                                                                                          Create a Service (Service) for the daemon, so that the daemon can be accessed externally.

                                                                                          1. Click the Create Service button.

                                                                                          2. Configure service parameters, refer to Create Service for details.

                                                                                          3. Click OK and click Next .

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-daemonset.html#advanced-settings","title":"Advanced settings","text":"

                                                                                          Advanced setting includes four parts: load network settings, upgrade policy, scheduling policy, label and annotation. You can click the tabs below to view the requirements of each part.

                                                                                          Network ConfigurationUpgrade PolicyScheduling PoliciesLabels and Annotations

                                                                                          In some cases, the application will have redundant DNS queries. Kubernetes provides DNS-related settings options for applications, which can effectively reduce redundant DNS queries and increase business concurrency in certain cases.

                                                                                          • DNS Policy

                                                                                            • Default: Make the container use the domain name resolution file pointed to by the --resolv-conf parameter of kubelet. This setting can only resolve external domain names registered on the Internet, but cannot resolve cluster internal domain names, and there is no invalid DNS query.
                                                                                            • ClusterFirstWithHostNet: The domain name file of the host to which the application is connected.
                                                                                            • ClusterFirst: application docking with Kube-DNS/CoreDNS.
                                                                                            • None: New option value introduced in Kubernetes v1.9 (Beta in v1.10). After setting to None, dnsConfig must be set, at this time the domain name of the containerThe parsing file will be completely generated through the settings of dnsConfig.
                                                                                          • Nameservers: fill in the address of the domain name server, such as 10.6.175.20 .

                                                                                          • Search domains: DNS search domain list for domain name query. When specified, the provided search domain list will be merged into the search field of the domain name resolution file generated based on dnsPolicy, and duplicate domain names will be deleted. Kubernetes allows up to 6 search domains.
                                                                                          • Options: Configuration options for DNS, where each object can have a name attribute (required) and a value attribute (optional). The content in this field will be merged into the options field of the domain name resolution file generated based on dnsPolicy. If some options of dnsConfig options conflict with the options of the domain name resolution file generated based on dnsPolicy, they will be overwritten by dnsConfig.
                                                                                          • Host Alias: the alias set for the host.

                                                                                          • Upgrade Mode: Rolling upgrade refers to gradually replacing instances of the old version with instances of the new version. During the upgrade process, business traffic will be load-balanced to the old and new instances at the same time, so the business will not be interrupted. Rebuild and upgrade refers to deleting the workload instance of the old version first, and then installing the specified new version. During the upgrade process, the business will be interrupted.
                                                                                          • Max Unavailable Pods: Specify the maximum value or ratio of unavailable pods during the workload update process, the default is 25%. If it is equal to the number of instances, there is a risk of service interruption.
                                                                                          • Max Surge: The maximum or ratio of the total number of Pods exceeding the desired replica count of Pods during a Pod update. Default is 25%.
                                                                                          • Revision History Limit: Set the number of old versions retained when the version is rolled back. The default is 10.
                                                                                          • Minimum Ready: The minimum time for a Pod to be ready. Only after this time is the Pod considered available. The default is 0 seconds.
                                                                                          • Upgrade Max Duration: If the deployment is not successful after the set time, the workload will be marked as failed. Default is 600 seconds.
                                                                                          • Graceful Period: The execution period (0-9,999 seconds) of the command before the workload stops, the default is 30 seconds.

                                                                                          • Toleration time: When the node where the workload instance is located is unavailable, the time for rescheduling the workload instance to other available nodes, the default is 300 seconds.
                                                                                          • Node affinity: According to the label on the node, constrain which nodes the Pod can be scheduled on.
                                                                                          • Workload Affinity: Constrains which nodes a Pod can be scheduled to based on the labels of the Pods already running on the node.
                                                                                          • Workload anti-affinity: Constrains which nodes a Pod cannot be scheduled to based on the labels of Pods already running on the node.
                                                                                          • Topology domain: namely topologyKey, used to specify a group of nodes that can be scheduled. For example, kubernetes.io/os indicates that as long as the node of an operating system meets the conditions of labelSelector, it can be scheduled to the node.

                                                                                          For details, refer to Scheduling Policy.

                                                                                          You can click the Add button to add tags and annotations to workloads and pods.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-daemonset.html#create-from-yaml","title":"Create from YAML","text":"

                                                                                          In addition to image, you can also create daemons more quickly through YAML files.

                                                                                          1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the Cluster Details page.

                                                                                          2. On the cluster details page, click Workload -> Daemons in the left navigation bar, and then click the YAML Create button in the upper right corner of the page.

                                                                                          3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                                                                          Click to see an example YAML for creating a daemon
                                                                                           kind: DaemonSet\n apiVersion: apps/v1\n metadata:\n   name: hwameistor-local-disk-manager\n   namespace: hwameistor\n   uid: ccbdc098-7de3-4a8a-96dd-d1cee159c92b\n   resourceVersion: '90999552'\n   generation: 1\n   creationTimestamp: '2022-12-15T09:03:44Z'\n   labels:\n     app.kubernetes.io/managed-by: Helm\n   annotations:\n     deprecated.DaemonSet.template.generation: '1'\n     meta.helm.sh/release-name: hwameistor\n     meta.helm.sh/release-namespace:hwameistor\n spec:\n   selector:\n     matchLabels:\n       app: hwameistor-local-disk-manager\n   template:\n     metadata:\n       creationTimestamp: null\n       labels:\n         app: hwameistor-local-disk-manager\n     spec:\n       volumes:\n         - name: udev\n           hostPath:\n             path: /run/udev\n             type: Directory\n         - name: procmount\n           hostPath:\n             path: /proc\n             type: Directory\n         - name: devmount\n           hostPath:\n             path: /dev\n             type: Directory\n         - name: socket-dir\n           hostPath:\n             path: /var/lib/kubelet/plugins/disk.hwameistor.io\n             type: DirectoryOrCreate\n         - name: registration-dir\n           hostPath:\n             path: /var/lib/kubelet/plugins_registry/\n             type: Directory\n         - name: plugin-dir\n           hostPath:\n             path: /var/lib/kubelet/plugins\n             type: DirectoryOrCreate\n         - name: pods-mount-dir\n           hostPath:\n             path: /var/lib/kubelet/pods\n             type: DirectoryOrCreate\n       containers:\n         - name: registrar\n           image: k8s-gcr.m.daocloud.io/sig-storage/csi-node-driver-registrar:v2.5.0\n           args:\n             - '--v=5'\n             - '--csi-address=/csi/csi.sock'\n             - >-\n               --kubelet-registration-path=/var/lib/kubelet/plugins/disk.hwameistor.io/csi.sock\n           env:\n             - name: KUBE_NODE_NAME\n               valueFrom:\n                 fieldRef:\n                   apiVersion: v1\n                   fieldPath: spec.nodeName\n           resources: {}\n           volumeMounts:\n             - name: socket-dir\n               mountPath: /csi\n             - name: registration-dir\n               mountPath: /registration\n           lifecycle:\n             preStop:\n               exec:\n                 command:\n                   - /bin/sh\n                   - '-c'\n                   - >-\n                     rm -rf /registration/disk.hwameistor.io\n                     /registration/disk.hwameistor.io-reg.sock\n           terminationMessagePath: /dev/termination-log\n           terminationMessagePolicy: File\n           imagePullPolicy: IfNotPresent\n         -name: managerimage: ghcr.m.daocloud.io/hwameistor/local-disk-manager:v0.6.1\n          command:\n            - /local-disk-manager\n          args:\n            - '--endpoint=$(CSI_ENDPOINT)'\n            - '--nodeid=$(NODENAME)'\n            - '--csi-enable=true'\n          env:\n            - name: CSI_ENDPOINT\n              value: unix://var/lib/kubelet/plugins/disk.hwameistor.io/csi.sock\n            - name: NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: WATCH_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: NODENAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: spec.nodeName\n            - name: OPERATOR_NAME\n              value: local-disk-manager\n          resources: {}\n          volumeMounts:\n            - name: udev\n              mountPath: /run/udev\n            - name: procmount\n              readOnly: true\n              mountPath: /host/proc\n            - name: devmount\n              mountPath: /dev\n            - name: registration-dir\n              mountPath: /var/lib/kubelet/plugins_registry\n            - name: plugin-dir\n              mountPath: /var/lib/kubelet/plugins\n              mountPropagation: Bidirectional\n            - name: pods-mount-dir\n              mountPath: /var/lib/kubelet/pods\n              mountPropagation: Bidirectional\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            privileged: true\n      restartPolicy: Always\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      serviceAccountName: hwameistor-admin\n      serviceAccount: hwameistor-admin\n      hostNetwork: true\n      hostPID: true\n      securityContext: {}\n      schedulerName: default-scheduler\n      tolerations:\n        - key: CriticalAddonsOnly\n          operator: Exists\n        - key: node.kubernetes.io/not-ready\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/master\n          operator: Exists\n          effect: NoSchedule\n        - key: node-role.kubernetes.io/control-plane\n          operator: Exists\n          effect: NoSchedule\n        - key: node.cloudprovider.kubernetes.io/uninitialized\n          operator: Exists\n          effect: NoSchedule\n  updateStrategy:\n    type: RollingUpdate\n    rollingUpdate:\n      maxUnavailable: 1\n      maxSurge: 0\n  revisionHistoryLimit: 10\nstatus:\n  currentNumberScheduled: 4\n  numberMisscheduled: 0\n  desiredNumberScheduled: 4\n  numberReady: 4\n  observedGeneration: 1\n  updatedNumberScheduled: 4\n  numberAvailable: 4\n
                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-deployment.html","title":"Create Deployment","text":"

                                                                                          This page describes how to create deployments through images and YAML files.

                                                                                          Deployment is a common resource in Kubernetes, mainly Pod and ReplicaSet provide declarative updates, support elastic scaling, rolling upgrades, and version rollbacks features. Declare the desired Pod state in the Deployment, and the Deployment Controller will modify the current state through the ReplicaSet to make it reach the pre-declared desired state. Deployment is stateless and does not support data persistence. It is suitable for deploying stateless applications that do not need to save data and can be restarted and rolled back at any time.

                                                                                          Through the container management module of AI platform, workloads on multicloud and multiclusters can be easily managed based on proper role permissions, including the creation of deployments, Full life cycle management such as update, deletion, elastic scaling, restart, and version rollback.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-deployment.html#prerequisites","title":"Prerequisites","text":"

                                                                                          Before using image to create deployments, the following prerequisites need to be met:

                                                                                          • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                                                          • Create a namespace and a user.

                                                                                          • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                                                          • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-deployment.html#create-by-image","title":"Create by image","text":"

                                                                                          Follow the steps below to create a deployment by image.

                                                                                          1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the Cluster Details page.

                                                                                          2. On the cluster details page, click Workloads -> Deployments in the left navigation bar, and then click the Create by Image button in the upper right corner of the page.

                                                                                          3. Fill in Basic Information, Container Setting, Service Setting, Advanced Setting in turn, click OK in the lower right corner of the page to complete the creation.

                                                                                            The system will automatically return the list of Deployments . Click \u2507 on the right side of the list to perform operations such as update, delete, elastic scaling, restart, and version rollback on the load. If the workload status is abnormal, please check the specific abnormal information, refer to Workload Status.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-deployment.html#basic-information","title":"Basic information","text":"
                                                                                          • Workload Name: can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number, such as deployment-01. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                                                                          • Namespace: Select the namespace where the newly created payload will be deployed. The default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                                                                          • Pods: Enter the number of Pod instances for the load, and one Pod instance is created by default.
                                                                                          • Description: Enter the description information of the payload and customize the content. The number of characters cannot exceed 512.
                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-deployment.html#container-settings","title":"Container settings","text":"

                                                                                          Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the requirements of each part.

                                                                                          Container setting is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                                                                          Basic Information (Required)Lifecycle (optional)Health Check (optional)Environment variables (optional)Data storage (optional)Security settings (optional)

                                                                                          When configuring container-related parameters, it is essential to correctly fill in the container name and image parameters; otherwise, you will not be able to proceed to the next step. After filling in the configuration according to the following requirements, click OK.

                                                                                          • Container Type: The default is Work Container. For information on init containers, see the [K8s Official Documentation] (https://kubernetes.io/docs/concepts/workloads/pods/init-containers/).
                                                                                          • Container Name: No more than 63 characters, supporting lowercase letters, numbers, and separators (\"-\"). It must start and end with a lowercase letter or number, for example, nginx-01.
                                                                                          • Image:
                                                                                            • Image: Select an appropriate image from the list. When entering the image name, the default is to pull the image from the official DockerHub.
                                                                                            • Image Version: Select an appropriate version from the dropdown list.
                                                                                            • Image Pull Policy: By checking Always pull the image, the image will be pulled from the repository each time the workload restarts/upgrades. If unchecked, it will only pull the local image, and will pull from the repository only if the image does not exist locally. For more details, refer to Image Pull Policy.
                                                                                            • Registry Secret: Optional. If the target repository requires a Secret to access, you need to create secret first.
                                                                                          • Privileged Container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and has all the privileges of running processes on the host.
                                                                                          • CPU/Memory Request: The request value (the minimum resource needed) and the limit value (the maximum resource allowed) for CPU/memory resources. Configure resources for the container as needed to avoid resource waste and system failures caused by container resource overages. Default values are shown in the figure.
                                                                                          • GPU Configuration: Configure GPU usage for the container, supporting only positive integers. The GPU quota setting supports configuring the container to exclusively use an entire GPU or part of a vGPU. For example, for a GPU with 8 cores, entering the number 8 means the container exclusively uses the entire card, and entering the number 1 means configuring 1 core of the vGPU for the container.

                                                                                          Before setting the GPU, the administrator needs to pre-install the GPU and driver plugin on the cluster node and enable the GPU feature in the Cluster Settings.

                                                                                          Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle Setting.

                                                                                          It is used to judge the health status of containers and applications, which helps to improve the availability of applications. For details, refer to Container Health Check Setting.

                                                                                          Configure container parameters within the Pod, add environment variables or pass setting to the Pod, etc. For details, refer to Container environment variable setting.

                                                                                          Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage Setting.

                                                                                          Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-deployment.html#service-settings","title":"Service settings","text":"

                                                                                          Configure Service for the deployment, so that the deployment can be accessed externally.

                                                                                          1. Click the Create Service button.

                                                                                          2. Refer to Create Service to configure service parameters.

                                                                                          3. Click OK and click Next .

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-deployment.html#advanced-settings","title":"Advanced settings","text":"

                                                                                          Advanced setting includes four parts: Network Settings, Upgrade Policy, Scheduling Policies, Labels and Annotations. You can click the tabs below to view the setting requirements of each part.

                                                                                          Network SettingsUpgrade PolicyScheduling PoliciesLabels and Annotations
                                                                                          1. For container NIC setting, refer to Workload Usage IP Pool
                                                                                          2. DNS setting

                                                                                          In some cases, the application will have redundant DNS queries. Kubernetes provides DNS-related setting options for applications, which can effectively reduce redundant DNS queries and increase business concurrency in certain cases.

                                                                                          • DNS Policy

                                                                                            • Default: Make container use kubelet's -The domain name resolution file pointed to by the -resolv-conf parameter. This setting can only resolve external domain names registered on the Internet, but cannot resolve cluster internal domain names, and there is no invalid DNS query.
                                                                                            • ClusterFirstWithHostNet: The domain name file of the host to which the application is connected.
                                                                                            • ClusterFirst: application docking with Kube-DNS/CoreDNS.
                                                                                            • None: New option value introduced in Kubernetes v1.9 (Beta in v1.10). After setting to None, dnsConfig must be set. At this time, the domain name resolution file of the container will be completely generated through the setting of dnsConfig.
                                                                                          • Nameservers: fill in the address of the domain name server, such as 10.6.175.20 .

                                                                                          • Search domains: DNS search domain list for domain name query. When specified, the provided search domain list will be merged into the search field of the domain name resolution file generated based on dnsPolicy, and duplicate domain names will be deleted. Kubernetes allows up to 6 search domains.
                                                                                          • Options: Setting options for DNS, where each object can have a name attribute (required) and a value attribute (optional). The content in this field will be merged into the options field of the domain name resolution file generated based on dnsPolicy. If some options of dnsConfig options conflict with the options of the domain name resolution file generated based on dnsPolicy, they will be overwritten by dnsConfig.
                                                                                          • Host Alias: the alias set for the host.

                                                                                          • Upgrade Mode: Rolling upgrade refers to gradually replacing instances of the old version with instances of the new version. During the upgrade process, business traffic will be load-balanced to the old and new instances at the same time, so the business will not be interrupted. Rebuild and upgrade refers to deleting the workload instance of the old version first, and then installing the specified new version. During the upgrade process, the business will be interrupted.
                                                                                          • Max Unavailable: Specify the maximum value or ratio of unavailable pods during the workload update process, the default is 25%. If it is equal to the number of instances, there is a risk of service interruption.
                                                                                          • Max Surge: The maximum or ratio of the total number of Pods exceeding the desired replica count of Pods during a Pod update. Default is 25%.
                                                                                          • Revision History Limit: Set the number of old versions retained when the version is rolled back. The default is 10.
                                                                                          • Minimum Ready: The minimum time for a Pod to be ready. Only after this time is the Pod considered available. The default is 0 seconds.
                                                                                          • Upgrade Max Duration: If the deployment is not successful after the set time, the workload will be marked as failed. Default is 600 seconds.
                                                                                          • Graceful Period: The execution period (0-9,999 seconds) of the command before the workload stops, the default is 30 seconds.

                                                                                          • Toleration time: When the node where the workload instance is located is unavailable, the time for rescheduling the workload instance to other available nodes, the default is 300 seconds.
                                                                                          • Node Affinity: According to the label on the node, constrain which nodes the Pod can be scheduled on.
                                                                                          • Workload Affinity: Constrains which nodes a Pod can be scheduled to based on the labels of the Pods already running on the node.
                                                                                          • Workload Anti-affinity: Constrains which nodes a Pod cannot be scheduled to based on the labels of Pods already running on the node.

                                                                                          For details, refer to Scheduling Policy.

                                                                                          You can click the Add button to add tags and annotations to workloads and pods.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-deployment.html#create-from-yaml","title":"Create from YAML","text":"

                                                                                          In addition to image, you can also create deployments more quickly through YAML files.

                                                                                          1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the Cluster Details page.

                                                                                          2. On the cluster details page, click Workloads -> Deployments in the left navigation bar, and then click the Create from YAML button in the upper right corner of the page.

                                                                                          3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                                                                          Click to see an example YAML for creating a deployment
                                                                                           apiVersion: apps/v1\n kind: Deployment\n metadata:\n   name: nginx-deployment\n spec:\n   selector:\n     matchLabels:\n       app: nginx\n   replicas: 2 # (1)!\n   template:\n     metadata:\n       labels:\n         app: nginx\n     spec:\n       containers:\n       -name: nginx\n         image: nginx:1.14.2\n         ports:\n         - containerPort: 80\n
                                                                                          1. Tell the Deployment to run 2 Pods that match this template
                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-job.html","title":"Create Job","text":"

                                                                                          This page introduces how to create a job through image and YAML file.

                                                                                          Job is suitable for performing one-time jobs. A Job creates one or more Pods, and the Job keeps retrying to run Pods until a certain number of Pods are successfully terminated. A Job ends when the specified number of Pods are successfully terminated. When a Job is deleted, all Pods created by the Job will be cleared. When a Job is paused, all active Pods in the Job are deleted until the Job is resumed. For more information about jobs, refer to Job.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-job.html#prerequisites","title":"Prerequisites","text":"
                                                                                          • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                                                          • Create a namespace and a user.

                                                                                          • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                                                          • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-job.html#create-by-image","title":"Create by image","text":"

                                                                                          Refer to the following steps to create a job using an image.

                                                                                          1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                                                                          2. On the cluster details page, click Workloads -> Jobs in the left navigation bar, and then click the Create by Image button in the upper right corner of the page.

                                                                                          3. Fill in Basic Information, Container Settings and Advanced Settings, click OK in the lower right corner of the page to complete the creation.

                                                                                            The system will automatically return to the job list. Click \u2507 on the right side of the list to perform operations such as updating, deleting, and restarting the job.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-job.html#basic-information","title":"Basic information","text":"

                                                                                          On the Create Jobs page, enter the basic information according to the table below, and click Next .

                                                                                          • Payload Name: Can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                                                                          • Namespace: Select which namespace to deploy the newly created job in, and the default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                                                                          • Number of Instances: Enter the number of Pod instances for the workload. By default, 1 Pod instance is created.
                                                                                          • Description: Enter the description information of the workload and customize the content. The number of characters should not exceed 512.
                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-job.html#container-settings","title":"Container settings","text":"

                                                                                          Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the setting requirements of each part.

                                                                                          Container settings is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                                                                          Basic information (required)Lifecycle (optional)Health Check (optional)Environment Variables (optional)Data Storage (optional)Security Settings (optional)

                                                                                          When configuring container-related parameters, you must correctly fill in the container name and image parameters, otherwise you will not be able to proceed to the next step. After filling in the settings with reference to the following requirements, click OK .

                                                                                          • Container Name: Up to 63 characters, lowercase letters, numbers and separators (\"-\") are supported. Must start and end with a lowercase letter or number, eg nginx-01.
                                                                                          • Image: Enter the address or name of the image. When entering the image name, the image will be pulled from the official DockerHub by default.
                                                                                          • Image Pull Policy: After checking Always pull image , the image will be pulled from the registry every time the workload restarts/upgrades. If it is not checked, only the local image will be pulled, and only when the image does not exist locally, it will be re-pulled from the container registry. For more details, refer to Image Pull Policy.
                                                                                          • Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
                                                                                          • CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
                                                                                          • GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU or part of the vGPU for the container. For example, for an 8-core GPU, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.

                                                                                          Before setting exclusive GPU, the administrator needs to install the GPU and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

                                                                                          Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle settings.

                                                                                          It is used to judge the health status of containers and applications, which helps to improve the availability of applications. For details, refer to Container Health Check settings.

                                                                                          Configure container parameters within the Pod, add environment variables or pass settings to the Pod, etc. For details, refer to Container environment variable settings.

                                                                                          Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage settings.

                                                                                          Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-job.html#advanced-settings","title":"Advanced settings","text":"

                                                                                          Advanced setting includes job settings, labels and annotations.

                                                                                          Job SettingsLabels and Annotations

                                                                                          • Parallel Pods: the maximum number of Pods that can be created at the same time during job execution, and the parallel number should not be greater than the total number of Pods. Default is 1.
                                                                                          • Timeout: When this time is exceeded, the job will be marked as failed to execute, and all Pods under the job will be deleted. When it is empty, it means that no timeout is set.
                                                                                          • Restart Policy: Whether to restart the Pod when the setting fails.

                                                                                          You can click the Add button to add labels and annotations to the workload instance Pod.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-job.html#create-from-yaml","title":"Create from YAML","text":"

                                                                                          In addition to image, creation jobs can also be created more quickly through YAML files.

                                                                                          1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the cluster details page.

                                                                                          2. On the cluster details page, click Workloads -> Jobs in the left navigation bar, and then click the Create from YAML button in the upper right corner of the page.

                                                                                          3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                                                                          Click to view the complete YAML
                                                                                          kind: Job\napiVersion: batch/v1\nmetadata:\n  name: demo\n  namespace: default\n  uid: a9708239-0358-4aa1-87d3-a092c080836e\n  resourceVersion: '92751876'\n  generation: 1\n  creationTimestamp: '2022-12-26T10:52:22Z'\n  labels:\n    app: demo\n    controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n    job-name: demo\n  annotations:\n    revisions: >-\n      {\"1\":{\"status\":\"running\",\"uid\":\"a9708239-0358-4aa1-87d3-a092c080836e\",\"start-time\":\"2022-12-26T10:52:22Z\",\"completion-time\":\"0001-01-01T00:00:00Z\"}}\nspec:\n  parallelism: 1\n  backoffLimit: 6\n  selector:\n    matchLabels:\n      controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n  template:\n    metadata:\n      creationTimestamp: null\n      labels:\n        app: demo\n        controller-uid: a9708239-0358-4aa1-87d3-a092c080836e\n        job-name: demo\n    spec:\n      containers:\n        - name: container-4\n          image: nginx\n          resources:\n            limits:\n              cpu: 250m\n              memory: 512Mi\n            requests:\n              cpu: 250m\n              memory: 512Mi\n          lifecycle: {}\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n          securityContext:\n            privileged: false\n      restartPolicy: Never\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      securityContext: {}\n      schedulerName: default-scheduler\n  completionMode: NonIndexed\n  suspend: false\nstatus:\n  startTime: '2022-12-26T10:52:22Z'\n  active: 1\n
                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-statefulset.html","title":"Create StatefulSet","text":"

                                                                                          This page describes how to create a StatefulSet through image and YAML files.

                                                                                          StatefulSet is a common resource in Kubernetes, and Deployment, mainly used to manage the deployment and scaling of Pod collections. The main difference between the two is that Deployment is stateless and does not save data, while StatefulSet is stateful and is mainly used to manage stateful applications. In addition, Pods in a StatefulSet have a persistent ID, which makes it easy to identify the proper Pod when matching storage volumes.

                                                                                          Through the container management module of AI platform, workloads on multicloud and multiclusters can be easily managed based on proper role permissions, including the creation of StatefulSets, update, delete, elastic scaling, restart, version rollback and other full life cycle management.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-statefulset.html#prerequisites","title":"Prerequisites","text":"

                                                                                          Before using image to create StatefulSets, the following prerequisites need to be met:

                                                                                          • In the Container Management module Integrate Kubernetes Cluster or Create Kubernetes Cluster, and can access the cluster UI interface.

                                                                                          • Create a namespace and a user.

                                                                                          • The current operating user should have NS Editor or higher permissions, for details, refer to Namespace Authorization.

                                                                                          • When there are multiple containers in a single instance, please make sure that the ports used by the containers do not conflict, otherwise the deployment will fail.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-statefulset.html#create-by-image","title":"Create by image","text":"

                                                                                          Follow the steps below to create a statefulSet using image.

                                                                                          1. Click Clusters on the left navigation bar, then click the name of the target cluster to enter Cluster Details.

                                                                                          2. Click Workloads -> StatefulSets in the left navigation bar, and then click the Create by Image button in the upper right corner.

                                                                                          3. Fill in Basic Information, Container Settings, Service Settings, Advanced Settings, click OK in the lower right corner of the page to complete the creation.

                                                                                            The system will automatically return to the list of StatefulSets , and wait for the status of the workload to become running . If the workload status is abnormal, refer to Workload Status for specific exception information.

                                                                                            Click \u2507 on the right side of the New Workload column to perform operations such as update, delete, elastic scaling, restart, and version rollback on the workload.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-statefulset.html#basic-information","title":"Basic Information","text":"
                                                                                          • Workload Name: can contain up to 63 characters, can only contain lowercase letters, numbers, and a separator (\"-\"), and must start and end with a lowercase letter or number, such as deployment-01. The name of the same type of workload in the same namespace cannot be repeated, and the name of the workload cannot be changed after the workload is created.
                                                                                          • Namespace: Select the namespace where the newly created payload will be deployed. The default namespace is used by default. If you can't find the desired namespace, you can go to Create a new namespace according to the prompt on the page.
                                                                                          • Pods: Enter the number of Pod instances for the load, and one Pod instance is created by default.
                                                                                          • Description: Enter the description information of the payload and customize the content. The number of characters cannot exceed 512.
                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-statefulset.html#container-settings","title":"Container settings","text":"

                                                                                          Container setting is divided into six parts: basic information, life cycle, health check, environment variables, data storage, and security settings. Click the tab below to view the requirements of each part.

                                                                                          Container settings is only configured for a single container. To add multiple containers to a pod, click + on the right to add multiple containers.

                                                                                          Basic information (required)Lifecycle (optional)Health Check (optional)Environment Variables (optional)Data Storage (optional)Security Settings (optional)

                                                                                          When configuring container-related parameters, you must correctly fill in the container name and image parameters, otherwise you will not be able to proceed to the next step. After filling in the settings with reference to the following requirements, click OK .

                                                                                          • Container Name: Up to 63 characters, lowercase letters, numbers and separators (\"-\") are supported. Must start and end with a lowercase letter or number, eg nginx-01.
                                                                                          • Image: Enter the address or name of the image. When entering the image name, the image will be pulled from the official DockerHub by default.
                                                                                          • Image Pull Policy: After checking Always pull image , the image will be pulled from the registry every time the workload restarts/upgrades. If it is not checked, only the local image will be pulled, and only when the image does not exist locally, it will be re-pulled from the container registry. For more details, refer to Image Pull Policy.
                                                                                          • Privileged container: By default, the container cannot access any device on the host. After enabling the privileged container, the container can access all devices on the host and enjoy all the permissions of the running process on the host.
                                                                                          • CPU/Memory Quota: Requested value (minimum resource to be used) and limit value (maximum resource allowed to be used) of CPU/Memory resource. Please configure resources for containers as needed to avoid resource waste and system failures caused by excessive container resources. The default value is shown in the figure.
                                                                                          • GPU Exclusive: Configure the GPU usage for the container, only positive integers are supported. The GPU quota setting supports setting exclusive use of the entire GPU or part of the vGPU for the container. For example, for an 8-core GPU, enter the number 8 to let the container exclusively use the entire length of the card, and enter the number 1 to configure a 1-core vGPU for the container.

                                                                                          Before setting exclusive GPU, the administrator needs to install the GPU and driver plug-in on the cluster nodes in advance, and enable the GPU feature in Cluster Settings.

                                                                                          Set the commands that need to be executed when the container starts, after starting, and before stopping. For details, refer to Container Lifecycle Configuration.

                                                                                          Used to judge the health status of containers and applications. Helps improve app usability. For details, refer to Container Health Check Configuration.

                                                                                          Configure container parameters within the Pod, add environment variables or pass settings to the Pod, etc. For details, refer to Container environment variable settings.

                                                                                          Configure the settings for container mounting data volumes and data persistence. For details, refer to Container Data Storage Configuration.

                                                                                          Containers are securely isolated through Linux's built-in account authority isolation mechanism. You can limit container permissions by using account UIDs (digital identity tokens) with different permissions. For example, enter 0 to use the privileges of the root account.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-statefulset.html#service-settings","title":"Service settings","text":"

                                                                                          Configure Service (Service) for the statefulset, so that the statefulset can be accessed externally.

                                                                                          1. Click the Create Service button.

                                                                                          2. Refer to Create Service to configure service parameters.

                                                                                          3. Click OK and click Next .

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-statefulset.html#advanced-settings","title":"Advanced settings","text":"

                                                                                          Advanced setting includes four parts: load network settings, upgrade policy, scheduling policy, label and annotation. You can click the tabs below to view the requirements of each part.

                                                                                          Network ConfigurationUpgrade PolicyContainer Management PoliciesScheduling PoliciesLabels and Annotations
                                                                                          1. For container NIC settings, refer to Workload Usage IP Pool
                                                                                          2. DNS settings

                                                                                          In some cases, the application will have redundant DNS queries. Kubernetes provides DNS-related settings options for applications, which can effectively reduce redundant DNS queries and increase business concurrency in certain cases.

                                                                                          • DNS Policy

                                                                                            • Default: Make the container use the domain name resolution file pointed to by the --resolv-conf parameter of kubelet. This setting can only resolve external domain names registered on the Internet, but cannot resolve cluster internal domain names, and there is no invalid DNS query.
                                                                                            • ClusterFirstWithHostNet: The domain name file of the application docking host.
                                                                                            • ClusterFirst: application docking with Kube-DNS/CoreDNS.
                                                                                            • None: New option value introduced in Kubernetes v1.9 (Beta in v1.10). After setting to None, dnsConfig must be set. At this time, the domain name resolution file of the container will be completely generated through the settings of dnsConfig.
                                                                                          • Nameservers: fill in the address of the domain name server, such as 10.6.175.20 .

                                                                                          • Search domains: DNS search domain list for domain name query. When specified, the provided search domain list will be merged into the search field of the domain name resolution file generated based on dnsPolicy, and duplicate domain names will be deleted. Kubernetes allows up to 6 search domains.
                                                                                          • Options: Configuration options for DNS, where each object can have a name attribute (required) and a value attribute (optional). The content in this field will be merged into the options field of the domain name resolution file generated based on dnsPolicy. If some options of dnsConfig options conflict with the options of the domain name resolution file generated based on dnsPolicy, they will be overwritten by dnsConfig.
                                                                                          • Host Alias: the alias set for the host.

                                                                                          • Upgrade Mode: Rolling upgrade refers to gradually replacing instances of the old version with instances of the new version. During the upgrade process, business traffic will be load-balanced to the old and new instances at the same time, so the business will not be interrupted. Rebuild and upgrade refers to deleting the workload instance of the old version first, and then installing the specified new version. During the upgrade process, the business will be interrupted.
                                                                                          • Revision History Limit: Set the number of old versions retained when the version is rolled back. The default is 10.
                                                                                          • Graceful Period: The execution period (0-9,999 seconds) of the command before the workload stops, the default is 30 seconds.

                                                                                          Kubernetes v1.7 and later versions can set Pod management policies through .spec.podManagementPolicy , which supports the following two methods:

                                                                                          • OrderedReady : The default Pod management policy, which means that Pods are deployed in order. Only after the deployment of the previous Pod is successfully completed, the statefulset will start to deploy the next Pod. Pods are deleted in reverse order, with the last created being deleted first.

                                                                                          • Parallel : Create or delete containers in parallel, just like Pods of the Deployment type. The StatefulSet controller starts or terminates all containers in parallel. There is no need to wait for a Pod to enter the Running and ready state or to stop completely before starting or terminating other Pods. This option only affects the behavior of scaling operations, not the order of updates.

                                                                                          • Tolerance time: When the node where the workload instance is located is unavailable, the time for rescheduling the workload instance to other available nodes, the default is 300 seconds.
                                                                                          • Node affinity: According to the label on the node, constrain which nodes the Pod can be scheduled on.
                                                                                          • Workload Affinity: Constrains which nodes a Pod can be scheduled to based on the labels of the Pods already running on the node.
                                                                                          • Workload anti-affinity: Constrains which nodes a Pod cannot be scheduled to based on the labels of Pods already running on the node.
                                                                                          • Topology domain: namely topologyKey, used to specify a group of nodes that can be scheduled. For example, kubernetes.io/os indicates that as long as the node of an operating system meets the conditions of labelSelector, it can be scheduled to the node.

                                                                                          For details, refer to Scheduling Policy.

                                                                                          You can click the Add button to add tags and annotations to workloads and pods.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/create-statefulset.html#create-from-yaml","title":"Create from YAML","text":"

                                                                                          In addition to image, you can also create statefulsets more quickly through YAML files.

                                                                                          1. Click Clusters on the left navigation bar, and then click the name of the target cluster to enter the Cluster Details page.

                                                                                          2. On the cluster details page, click Workloads -> StatefulSets in the left navigation bar, and then click the Create from YAML button in the upper right corner of the page.

                                                                                          3. Enter or paste the YAML file prepared in advance, click OK to complete the creation.

                                                                                          Click to see an example YAML for creating a statefulSet
                                                                                           kind: StatefulSet\n apiVersion: apps/v1\n metadata:\n   name: test-mysql-123-mysql\n   namespace: default\n   uid: d3f45527-a0ab-4b22-9013-5842a06f4e0e\n   resourceVersion: '20504385'\n   generation: 1\n   creationTimestamp: '2022-09-22T09:34:10Z'\n   ownerReferences:\n     - apiVersion: mysql.presslabs.org/v1alpha1\n       kind: MysqlCluster\n       name: test-mysql-123\n       uid: 5e877cc3-5167-49da-904e-820940cf1a6d\n       controller: true\n       blockOwnerDeletion: true\n spec:\n   replicas: 1\n   selector:\n     matchLabels:\n       app.kubernetes.io/managed-by: mysql.presslabs.org\n       app.kubernetes.io/name: mysql\n       mysql.presslabs.org/cluster: test-mysql-123\n   template:\n     metadata:\n       creationTimestamp: null\n       labels:\n         app.kubernetes.io/component: database\n         app.kubernetes.io/instance: test-mysql-123\n         app.kubernetes.io/managed-by: mysql.presslabs.org\n         app.kubernetes.io/name: mysql\n         app.kubernetes.io/version: 5.7.31\n         mysql.presslabs.org/cluster: test-mysql-123\n       annotations:\n         config_rev: '13941099'\n         prometheus.io/port: '9125'\n         prometheus.io/scrape: 'true'\n         secret_rev: '13941101'\n     spec:\n       volumes:\n         -name: conf\n           emptyDir: {}\n         - name: init-scripts\n           emptyDir: {}\n         - name: config-map\n           configMap:\n             name: test-mysql-123-mysql\n             defaultMode: 420\n         - name: data\n           persistentVolumeClaim:\n             claimName: data\n       initContainers:\n         -name: init\n           image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n           args:\n             - clone-and-init\n           envFrom:\n             - secretRef:\n                 name: test-mysql-123-mysql-operated\n           env:\n             - name: MY_NAMESPACE\n               valueFrom:\n                 fieldRef:\n                   apiVersion: v1\n                   fieldPath: metadata.namespace\n             - name: MY_POD_NAME\n               valueFrom:\n                 fieldRef:apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: BACKUP_USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: BACKUP_USER\n                  optional: true\n            - name: BACKUP_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: BACKUP_PASSWORD\n                  optional: true\n          resources: {}\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: config-map\n              mountPath: /mnt/conf\n            - name: data\n              mountPath: /var/lib/mysql\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n      containers:\n        - name: mysql\n          image: docker.m.daocloud.io/mysql:5.7.31\n          ports:\n            - name: mysql\n              containerPort: 3306\n              protocol: TCP\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: ORCH_CLUSTER_ALIAS\n              value: test-mysql-123.default\n            - name: ORCH_HTTP_API\n              value: http://mysql-operator.mcamel-system/api\n            - name: MYSQL_ROOT_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: ROOT_PASSWORD\n                  optional: false\n            - name: MYSQL_USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: USER\n                  optional: true\n            - name: MYSQL_PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: PASSWORD\n                  optional: true\n            - name: MYSQL_DATABASE\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-secret\n                  key: DATABASE\n                  optional: true\n          resources:\n            limits:\n              cpu: '1'\n              memory: 1Gi\n            requests:\n              cpu: 100m\n              memory: 512Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: data\n              mountPath: /var/lib/mysql\n          livenessProbe:\n            exec:\n              command:\n                - mysqladmin\n                - '--defaults-file=/etc/mysql/client.conf'\n                - ping\n            initialDelaySeconds: 60\n            timeoutSeconds: 5\n            periodSeconds: 5\n            successThreshold: 1\n            failureThreshold: 3\n          readinessProbe:\n            exec:\n              command:\n                - /bin/sh\n                - '-c'\n                - >-\n                  test $(mysql --defaults-file=/etc/mysql/client.conf -NB -e\n                  'SELECT COUNT(*) FROM sys_operator.status WHERE\n                  name=\"configured\" AND value=\"1\"') -eq 1\n            initialDelaySeconds: 5\n            timeoutSeconds: 5\n            periodSeconds: 2\n            successThreshold: 1\n            failureThreshold: 3\n          lifecycle:preStop:\n              exec:\n                command:\n                  - bash\n                  - /etc/mysql/pre-shutdown-ha.sh\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: sidecar\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - config-and-serve\n          ports:\n            - name: sidecar-http\n              containerPort: 8080\n              protocol: TCP\n          envFrom:\n            - secretRef:\n                name: test-mysql-123-mysql-operated\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: XTRABACKUP_TARGET_DIR\n              value: /tmp/xtrabackup_backupfiles/\n          resources:\n            limits:\n              cpu: '1'\n              memory: 1Gi\n            requests:\n              cpu: 10m\n              memory: 64Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n            - name: data\n              mountPath: /var/lib/mysql\n          readinessProbe:\n            httpGet:\n              path: /health\n              port: 8080\n              scheme: HTTP\n            initialDelaySeconds: 30\n            timeoutSeconds: 5\n            periodSeconds: 5\n            successThreshold: 1\n            failureThreshold: 3\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: metrics-exporter\n          image: prom/mysqld-exporter:v0.13.0\n          args:\n            - '--web.listen-address=0.0.0.0:9125'\n            - '--web.telemetry-path=/metrics'\n            - '--collect.heartbeat'\n            - '--collect.heartbeat.database=sys_operator'\n          ports:\n            - name: prometheus\n              containerPort: 9125\n              protocol: TCP\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n            - name: USER\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: METRICS_EXPORTER_USER\n                  optional: false\n            - name: PASSWORD\n              valueFrom:\n                secretKeyRef:\n                  name: test-mysql-123-mysql-operated\n                  key: METRICS_EXPORTER_PASSWORD\n                  optional: false\n            - name: DATA_SOURCE_NAME\n              value: $(USER):$(PASSWORD)@(127.0.0.1:3306)/\n          resources:\n            limits:\n              cpu: 100m\n              memory: 128Mi\n            requests:\n              cpu: 10m\n              memory: 32Mi\n          livenessProbe:\n            httpGet:\n              path: /metrics\n              port: 9125\n              scheme: HTTP\n            initialDelaySeconds: 30\n            timeoutSeconds: 30\n            periodSeconds: 30\n            successThreshold: 1\n            failureThreshold: 3\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n        - name: pt-heartbeat\n          image: docker.m.daocloud.io/bitpoke/mysql-operator-sidecar-5.7:v0.6.1\n          args:\n            - pt-heartbeat\n            - '--update'\n            - '--replace'\n            - '--check-read-only'\n            - '--create-table'\n            - '--database'\n            - sys_operator\n            - '--table'\n            - heartbeat\n            - '--utc'\n            - '--defaults-file'\n            - /etc/mysql/heartbeat.conf\n            - '--fail-successive-errors=20'\n          env:\n            - name: MY_NAMESPACE\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.namespace\n            - name: MY_POD_NAME\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: metadata.name\n            - name: MY_POD_IP\n              valueFrom:\n                fieldRef:\n                  apiVersion: v1\n                  fieldPath: status.podIP\n            - name: MY_SERVICE_NAME\n              value: mysql\n            - name: MY_CLUSTER_NAME\n              value: test-mysql-123\n            - name: MY_FQDN\n              value: $(MY_POD_NAME).$(MY_SERVICE_NAME).$(MY_NAMESPACE)\n            - name: MY_MYSQL_VERSION\n              value: 5.7.31\n          resources:\n            limits:\n              cpu: 100m\n              memory: 64Mi\n            requests:\n              cpu: 10m\n              memory: 32Mi\n          volumeMounts:\n            - name: conf\n              mountPath: /etc/mysql\n          terminationMessagePath: /dev/termination-log\n          terminationMessagePolicy: File\n          imagePullPolicy: IfNotPresent\n      restartPolicy: Always\n      terminationGracePeriodSeconds: 30\n      dnsPolicy: ClusterFirst\n      securityContext:\n        runAsUser: 999\n        fsGroup: 999\n      affinity:\n        podAntiAffinity:\n          preferredDuringSchedulingIgnoredDuringExecution:\n            - weight: 100\n              podAffinityTerm:\n                labelSelector:\n                  matchLabels:\n                    app.kubernetes.io/component: database\n                    app.kubernetes.io/instance: test-mysql-123\n                    app.kubernetes.io/managed-by: mysql.presslabs.org\n                    app.kubernetes.io/name: mysql\n                    app.kubernetes.io/version: 5.7.31\n                    mysql.presslabs.org/cluster: test-mysql-123\n                topologyKey: kubernetes.io/hostname\n      schedulerName: default-scheduler\n  volumeClaimTemplates:\n    - kind: PersistentVolumeClaim\n      apiVersion: v1\n      metadata:\n        name: data\n        creationTimestamp: null\n        ownerReferences:\n          - apiVersion: mysql.presslabs.org/v1alpha1\n            kind: MysqlCluster\n            name: test-mysql-123\n            uid: 5e877cc3-5167-49da-904e-820940cf1a6d\n            controller: true\n      spec:\n        accessModes:\n          - ReadWriteOnce\n        resources:\n          limits:\n            storage: 1Gi\n          requests:\n            storage: 1Gi\n        storageClassName: local-path\n        volumeMode: Filesystem\n      status:\n        phase: Pending\n  serviceName: mysql\n  podManagementPolicy: OrderedReady\n  updateStrategy:\n    type: RollingUpdate\n    rollingUpdate:\n      partition: 0\n  revisionHistoryLimit: 10\nstatus:\n  observedGeneration: 1\n  replicas: 1\n  readyReplicas: 1\n  currentReplicas: 1\n  updatedReplicas: 1\n  currentRevision: test-mysql-123-mysql-6b8f5577c7\n  updateRevision: test-mysql-123-mysql-6b8f5577c7\n  collisionCount: 0\n  availableReplicas: 1\n
                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/env-variables.html","title":"Configure environment variables","text":"

                                                                                          An environment variable refers to a variable set in the container running environment, which is used to add environment flags to Pods or transfer configurations, etc. It supports configuring environment variables for Pods in the form of key-value pairs.

                                                                                          Suanova container management adds a graphical interface to configure environment variables for Pods on the basis of native Kubernetes, and supports the following configuration methods:

                                                                                          • Key-value pair (Key/Value Pair): Use a custom key-value pair as the environment variable of the container

                                                                                          • Resource reference (Resource): Use the fields defined by Container as the value of environment variables, such as the memory limit of the container, the number of copies, etc.

                                                                                          • Variable/Variable Reference (Pod Field): Use the Pod field as the value of an environment variable, such as the name of the Pod

                                                                                          • ConfigMap key value import (ConfigMap key): Import the value of a key in the ConfigMap as the value of an environment variable

                                                                                          • Key key value import (Secret Key): use the data from the Secret to define the value of the environment variable

                                                                                          • Key Import (Secret): Import all key values \u200b\u200bin Secret as environment variables

                                                                                          • ConfigMap import (ConfigMap): import all key values \u200b\u200bin the ConfigMap as environment variables

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/health-check.html","title":"Container health check","text":"

                                                                                          Container health check checks the health status of containers according to user requirements. After configuration, if the application in the container is abnormal, the container will automatically restart and recover. Kubernetes provides Liveness checks, Readiness checks, and Startup checks.

                                                                                          • LivenessProbe can detect application deadlock (the application is running, but cannot continue to run the following steps). Restarting containers in this state can help improve the availability of applications, even if there are bugs in them.

                                                                                          • ReadinessProbe can detect when a container is ready to accept request traffic. A Pod can only be considered ready when all containers in a Pod are ready. One use of this signal is to control which Pod is used as the backend of the Service. If the Pod is not ready, it will be removed from the Service's load balancer.

                                                                                          • Startup check (StartupProbe) can know when the application container is started. After configuration, it can control the container to check the viability and readiness after it starts successfully, so as to ensure that these liveness and readiness probes will not affect the start of the application. Startup detection can be used to perform liveness checks on slow-starting containers, preventing them from being killed before they start running.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/health-check.html#liveness-and-readiness-checks","title":"Liveness and readiness checks","text":"

                                                                                          The configuration of LivenessProbe is similar to that of ReadinessProbe, the only difference is to use readinessProbe field instead of livenessProbe field.

                                                                                          HTTP GET parameter description:

                                                                                          Parameter Description Path (Path) The requested path for access. Such as: /healthz path in the example Port (Port) Service listening port. Such as: port 8080 in the example protocol access protocol, Http or Https Delay time (initialDelaySeconds) Delay check time, in seconds, this setting is related to the normal startup time of business programs. For example, if it is set to 30, it means that the health check will start 30 seconds after the container is started, which is the time reserved for business program startup. Timeout (timeoutSeconds) Timeout, in seconds. For example, if it is set to 10, it indicates that the timeout waiting period for executing the health check is 10 seconds. If this time is exceeded, the health check will be regarded as a failure. If set to 0 or not set, the default timeout waiting time is 1 second. Timeout (timeoutSeconds) Timeout, in seconds. For example, if it is set to 10, it indicates that the timeout waiting period for executing the health check is 10 seconds. If this time is exceeded, the health check will be regarded as a failure. If set to 0 or not set, the default timeout waiting time is 1 second. SuccessThreshold (successThreshold) The minimum number of consecutive successes that are considered successful after a probe fails. The default value is 1, and the minimum value is 1. This value must be 1 for liveness and startup probes. Maximum number of failures (failureThreshold) The number of retries when the probe fails. Giving up in case of a liveness probe means restarting the container. Pods that are abandoned due to readiness probes are marked as not ready. The default value is 3. The minimum value is 1."},{"location":"en/end-user/kpanda/workloads/pod-config/health-check.html#check-with-http-get-request","title":"Check with HTTP GET request","text":"

                                                                                          YAML example:

                                                                                          apiVersion: v1\nkind: Pod\nmetadata:\n  labels:\n    test: liveness\n  name: liveness-http\nspec:\n  containers:\n  - name: liveness  # Container name\n    image: k8s.gcr.io/liveness  # Container image\n    args:\n    - /server  # Arguments to pass to the container\n    livenessProbe:\n      httpGet:\n        path: /healthz  # Access request path\n        port: 8080  # Service listening port\n        httpHeaders:\n        - name: Custom-Header  # Custom header name\n          value: Awesome  # Custom header value\n      initialDelaySeconds: 3  # Wait 3 seconds before the first probe\n      periodSeconds: 3  # Perform liveness detection every 3 seconds\n

                                                                                          According to the set rules, Kubelet sends an HTTP GET request to the service running in the container (the service is listening on port 8080) to perform the detection. The kubelet considers the container alive if the handler under the /healthz path on the server returns a success code. If the handler returns a failure code, the kubelet kills the container and restarts it. Any return code greater than or equal to 200 and less than 400 indicates success, and any other return code indicates failure. The /healthz handler returns a 200 status code for the first 10 seconds of the container's lifetime. The handler then returns a status code of 500.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/health-check.html#use-tcp-port-check","title":"Use TCP port check","text":"

                                                                                          TCP port parameter description:

                                                                                          Parameter Description Port (Port) Service listening port. Such as: port 8080 in the example Delay time (initialDelaySeconds) Delay check time, in seconds, this setting is related to the normal startup time of business programs. For example, if it is set to 30, it means that the health check will start 30 seconds after the container is started, which is the time reserved for business program startup. Timeout (timeoutSeconds) Timeout, in seconds. For example, if it is set to 10, it indicates that the timeout waiting period for executing the health check is 10 seconds. If this time is exceeded, the health check will be regarded as a failure. If set to 0 or not set, the default timeout waiting time is 1 second.

                                                                                          For a container that provides TCP communication services, based on this configuration, the cluster establishes a TCP connection to the container according to the set rules. If the connection is successful, it proves that the detection is successful, otherwise the detection fails. If you choose the TCP port detection method, you must specify the port that the container listens to.

                                                                                          YAML example:

                                                                                          apiVersion: v1\nkind: Pod\nmetadata:\n  name: goproxy\n  labels:\n    app: goproxy\nspec:\n  containers:\n  - name: goproxy\n    image: k8s.gcr.io/goproxy:0.1\n    ports:\n    - containerPort: 8080\n    readinessProbe:\n      tcpSocket:\n        port: 8080\n      initialDelaySeconds: 5\n      periodSeconds: 10\n    livenessProbe:\n      tcpSocket:\n        port: 8080\n      initialDelaySeconds: 15\n      periodSeconds: 20\n

                                                                                          This example uses both readiness and liveness probes. The kubelet sends the first readiness probe 5 seconds after the container is started. Attempt to connect to port 8080 of the goproxy container. If the probe is successful, the Pod will be marked as ready and the kubelet will continue to run the check every 10 seconds.

                                                                                          In addition to the readiness probe, this configuration includes a liveness probe. The kubelet will perform the first liveness probe 15 seconds after the container is started. The readiness probe will attempt to connect to the goproxy container on port 8080. If the liveness probe fails, the container will be restarted.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/health-check.html#run-command-check","title":"Run command check","text":"

                                                                                          YAML example:

                                                                                          apiVersion: v1\nkind: Pod\nmetadata:\n  labels:\n    test: liveness\n  name: liveness-exec\nspec:\n  containers:\n  - name: liveness  # Container name\n    image: k8s.gcr.io/busybox  # Container image\n    args:\n    - /bin/sh  # Command to run\n    - -c  # Pass the following string as a command\n    - touch /tmp/healthy; sleep 30; rm -f /tmp/healthy; sleep 600  # Command to execute\n    livenessProbe:\n      exec:\n        command:\n        - cat  # Command to check liveness\n        - /tmp/healthy  # File to check\n      initialDelaySeconds: 5  # Wait 5 seconds before the first probe\n      periodSeconds: 5  # Perform liveness detection every 5 seconds\n

                                                                                          The periodSeconds field specifies that the kubelet performs a liveness probe every 5 seconds, and the initialDelaySeconds field specifies that the kubelet waits for 5 seconds before performing the first probe. According to the set rules, the cluster periodically executes the command cat /tmp/healthy in the container through the kubelet to detect. If the command executes successfully and the return value is 0, the kubelet considers the container to be healthy and alive. If this command returns a non-zero value, the kubelet will kill the container and restart it.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/health-check.html#protect-slow-starting-containers-with-pre-start-checks","title":"Protect slow-starting containers with pre-start checks","text":"

                                                                                          Some applications require a long initialization time at startup. You need to use the same command to set startup detection. For HTTP or TCP detection, you can set the failureThreshold * periodSeconds parameter to a long enough time to cope with the long startup time scene.

                                                                                          YAML example:

                                                                                          ports:\n- name: liveness-port\n  containerPort: 8080\n  hostPort: 8080\n\nlivenessProbe:\n  httpGet:\n    path: /healthz\n    port: liveness-port\n  failureThreshold: 1\n  periodSeconds: 10\n\nstartupProbe:\n  httpGet:\n    path: /healthz\n    port: liveness-port\n  failureThreshold: 30\n  periodSeconds: 10\n

                                                                                          With the above settings, the application will have up to 5 minutes (30 * 10 = 300s) to complete the startup process. Once the startup detection is successful, the survival detection task will take over the detection of the container and respond quickly to the container deadlock. If the start probe has been unsuccessful, the container is killed after 300 seconds and further disposition is performed according to the restartPolicy .

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/job-parameters.html","title":"Description of job parameters","text":"

                                                                                          According to the settings of .spec.completions and .spec.Parallelism , jobs (Job) can be divided into the following types:

                                                                                          Job Type Description Non-parallel Job Creates a Pod until its Job completes successfully Parallel Jobs with deterministic completion counts A Job is considered complete when the number of successful Pods reaches .spec.completions Parallel Job Creates one or more Pods until one finishes successfully

                                                                                          Parameter Description

                                                                                          RestartPolicy Creates a Pod until it terminates successfully .spec.completions Indicates the number of Pods that need to run successfully when the Job ends, the default is 1 .spec.parallelism Indicates the number of Pods running in parallel, the default is 1 spec.backoffLimit Indicates the maximum number of retries for a failed Pod, beyond which no more retries will continue. .spec.activeDeadlineSeconds Indicates the Pod running time. Once this time is reached, the Job, that is, all its Pods, will stop. And activeDeadlineSeconds has a higher priority than backoffLimit, that is, the job that reaches activeDeadlineSeconds will ignore the setting of backoffLimit.

                                                                                          The following is an example Job configuration, saved in myjob.yaml, which calculates \u03c0 to 2000 digits and prints the output.

                                                                                          apiVersion: batch/v1\nkind: Job #The type of the current resource\nmetadata:\n  name: myjob\nspec:\n  completions: 50 # Job needs to run 50 Pods at the end, in this example it prints \u03c0 50 times\n  parallelism: 5 # 5 Pods in parallel\n  backoffLimit: 5 # retry up to 5 times\n  template:\n    spec:\n      containers:\n      - name: pi\n        image: perl\n        command: [\"perl\", \"-Mbignum=bpi\", \"-wle\", \"print bpi(2000)\"]\n      restartPolicy: Never #restart policy\n

                                                                                          Related commands

                                                                                          kubectl apply -f myjob.yaml # Start job\nkubectl get job # View this job\nkubectl logs myjob-1122dswzs View Job Pod logs\n
                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/lifecycle.html","title":"Configure the container lifecycle","text":"

                                                                                          Pods follow a predefined lifecycle, starting in the Pending phase and entering the Running state if at least one container in the Pod starts normally. If any container in the Pod ends in a failed state, the state becomes Failed . The following phase field values \u200b\u200bindicate which phase of the lifecycle a Pod is in.

                                                                                          Value Description Pending The Pod has been accepted by the system, but one or more containers have not yet been created or run. This phase includes waiting for the pod to be scheduled and downloading the image over the network. Running (Running) The Pod has been bound to a node, and all containers in the Pod have been created. At least one container is still running, or in the process of starting or restarting. Succeeded (Success) All containers in the Pod were successfully terminated and will not be restarted. Failed All containers in the Pod have terminated, and at least one container terminated due to failure. That is, the container exited with a non-zero status or was terminated by the system. Unknown (Unknown) The status of the Pod cannot be obtained for some reason, usually due to a communication failure with the host where the Pod resides.

                                                                                          When creating a workload in Suanova container management, images are usually used to specify the running environment in the container. By default, when building an image, the Entrypoint and CMD fields can be used to define the commands and parameters to be executed when the container is running. If you need to change the commands and parameters of the container image before starting, after starting, and before stopping, you can override the default commands and parameters in the image by setting the lifecycle event commands and parameters of the container.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/lifecycle.html#lifecycle-configuration","title":"Lifecycle configuration","text":"

                                                                                          Configure the startup command, post-start command, and pre-stop command of the container according to business needs.

                                                                                          Parameter Description Example value Start command Type: Optional Meaning: The container will be started according to the start command. Command after startup Type: optionalMeaning: command after container startup Command before stopping Type: Optional Meaning: The command executed by the container after receiving the stop command. Ensure that the services running in the instance can be drained in advance when the instance is upgraded or deleted. -"},{"location":"en/end-user/kpanda/workloads/pod-config/lifecycle.html#start-command","title":"start command","text":"

                                                                                          Configure the startup command according to the table below.

                                                                                          Parameter Description Example value Run command Type: RequiredMeaning: Enter an executable command, and separate multiple commands with spaces. If the command itself has spaces, you need to add (\"\"). Meaning: When there are multiple commands, it is recommended to use /bin/sh or other shells to run the command, and pass in all other commands as parameters. /run/server Running parameters Type: OptionalMeaning: Enter the parameters of the control container running command. port=8080"},{"location":"en/end-user/kpanda/workloads/pod-config/lifecycle.html#post-start-commands","title":"Post-start commands","text":"

                                                                                          Suanova provides two processing types, command line script and HTTP request, to configure post-start commands. You can choose the configuration method that suits you according to the table below.

                                                                                          Command line script configuration

                                                                                          Parameter Description Example value Run Command Type: Optional Meaning: Enter an executable command, and separate multiple commands with spaces. If the command itself contains spaces, you need to add (\"\"). Meaning: When there are multiple commands, it is recommended to use /bin/sh or other shells to run the command, and pass in all other commands as parameters. /run/server Running parameters Type: OptionalMeaning: Enter the parameters of the control container running command. port=8080"},{"location":"en/end-user/kpanda/workloads/pod-config/lifecycle.html#stop-pre-command","title":"stop pre-command","text":"

                                                                                          Suanova provides two processing types, command line script and HTTP request, to configure the pre-stop command. You can choose the configuration method that suits you according to the table below.

                                                                                          HTTP request configuration

                                                                                          Parameter Description Example value URL Path Type: Optional Meaning: Requested URL path. Meaning: When there are multiple commands, it is recommended to use /bin/sh or other shells to run the command, and pass in all other commands as parameters. /run/server Port Type: RequiredMeaning: Requested port. port=8080 Node Address Type: Optional Meaning: The requested IP address, the default is the node IP where the container is located. -"},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html","title":"Scheduling Policy","text":"

                                                                                          In a Kubernetes cluster, like many other Kubernetes objects, nodes have labels. You can manually add labels. Kubernetes also adds some standard labels to all nodes in the cluster. See Common Labels, Annotations, and Taints for common node labels. By adding labels to nodes, you can have pods scheduled on specific nodes or groups of nodes. You can use this feature to ensure that specific Pods can only run on nodes with certain isolation, security or governance properties.

                                                                                          nodeSelector is the simplest recommended form of a node selection constraint. You can add a nodeSelector field to the Pod's spec to set the node label. Kubernetes will only schedule pods on nodes with each label specified. nodeSelector provides one of the easiest ways to constrain Pods to nodes with specific labels. Affinity and anti-affinity expand the types of constraints you can define. Some benefits of using affinity and anti-affinity are:

                                                                                          • Affinity and anti-affinity languages are more expressive. nodeSelector can only select nodes that have all the specified labels. Affinity, anti-affinity give you greater control over selection logic.

                                                                                          • You can mark a rule as \"soft demand\" or \"preference\", so that the scheduler will still schedule the Pod if no matching node can be found.

                                                                                          • You can use the labels of other Pods running on the node (or in other topological domains) to enforce scheduling constraints, instead of only using the labels of the node itself. This capability allows you to define rules which allow Pods to be placed together.

                                                                                          You can choose which node the Pod will deploy to by setting affinity and anti-affinity.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#tolerance-time","title":"Tolerance time","text":"

                                                                                          When the node where the workload instance is located is unavailable, the period for the system to reschedule the instance to other available nodes. The default is 300 seconds.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#node-affinity-nodeaffinity","title":"Node affinity (nodeAffinity)","text":"

                                                                                          Node affinity is conceptually similar to nodeSelector , which allows you to constrain which nodes Pods can be scheduled on based on the labels on the nodes. There are two types of node affinity:

                                                                                          • Must be satisfied: ( requiredDuringSchedulingIgnoredDuringExecution ) The scheduler can only run scheduling when the rules are satisfied. This functionality is similar to nodeSelector , but with a more expressive syntax. You can define multiple hard constraint rules, but only one of them must be satisfied.

                                                                                          • Satisfy as much as possible: ( preferredDuringSchedulingIgnoredDuringExecution ) The scheduler will try to find nodes that meet the proper rules. If no matching node is found, the scheduler will still schedule the Pod. You can also set weights for soft constraint rules. During specific scheduling, if there are multiple nodes that meet the conditions, the node with the highest weight will be scheduled first. At the same time, you can also define multiple hard constraint rules, but only one of them needs to be satisfied.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#tag-name","title":"Tag name","text":"

                                                                                          The label proper to the node can use the default label or user-defined label.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#operators","title":"Operators","text":"
                                                                                          • In: the label value needs to be in the list of values
                                                                                          • NotIn: the tag's value is not in a list
                                                                                          • Exists: To judge whether a certain label exists, no need to set the label value
                                                                                          • DoesNotExist: Determine if a tag does not exist, no need to set the tag value
                                                                                          • Gt: the value of the label is greater than a certain value (string comparison)
                                                                                          • Lt: the value of the label is less than a certain value (string comparison)
                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#weights","title":"Weights","text":"

                                                                                          It can only be added in the \"as far as possible\" policy, which can be understood as the priority of scheduling, and those with the highest weight will be scheduled first. The value range is 1 to 100.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#workload-affinity","title":"Workload Affinity","text":"

                                                                                          Similar to node affinity, there are two types of workload affinity:

                                                                                          • Must be satisfied: (requiredDuringSchedulingIgnoredDuringExecution) The scheduler can only run scheduling when the rules are satisfied. This functionality is similar to nodeSelector , but with a more expressive syntax. You can define multiple hard constraint rules, but only one of them must be satisfied.
                                                                                          • Satisfy as much as possible: (preferredDuringSchedulingIgnoredDuringExecution) The scheduler will try to find nodes that meet the proper rules. If no matching node is found, the scheduler will still schedule the Pod. You can also set weights for soft constraint rules. During specific scheduling, if there are multiple nodes that meet the conditions, the node with the highest weight will be scheduled first. At the same time, you can also define multiple hard constraint rules, but only one of them needs to be satisfied.

                                                                                          The affinity of the workload is mainly used to determine which Pods of the workload can be deployed in the same topology domain. For example, services that communicate with each other can be deployed in the same topology domain (such as the same availability zone) by applying affinity scheduling to reduce the network delay between them.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#tag-name_1","title":"Tag name","text":"

                                                                                          The label proper to the node can use the default label or user-defined label.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#namespaces","title":"Namespaces","text":"

                                                                                          Specifies the namespace in which the scheduling policy takes effect.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#operators_1","title":"Operators","text":"
                                                                                          • In: the label value needs to be in the list of values
                                                                                          • NotIn: the tag's value is not in a list
                                                                                          • Exists: To judge whether a certain label exists, no need to set the label value
                                                                                          • DoesNotExist: Determine if a tag does not exist, no need to set the tag value
                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#topology-domain","title":"Topology domain","text":"

                                                                                          Specify the scope of influence during scheduling. If you specify kubernetes.io/Clustername, it will use the Node node as the distinguishing scope.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#workload-anti-affinity","title":"Workload Anti-Affinity","text":"

                                                                                          Similar to node affinity, there are two types of anti-affinity for workloads:

                                                                                          • Must be satisfied: (requiredDuringSchedulingIgnoredDuringExecution) The scheduler can only run scheduling when the rules are satisfied. This functionality is similar to nodeSelector , but with a more expressive syntax. You can define multiple hard constraint rules, but only one of them must be satisfied.
                                                                                          • Satisfy as much as possible: (preferredDuringSchedulingIgnoredDuringExecution) The scheduler will try to find nodes that meet the proper rules. If no matching node is found, the scheduler will still schedule the Pod. You can also set weights for soft constraint rules. During specific scheduling, if there are multiple nodes that meet the conditions, the node with the highest weight will be scheduled first. At the same time, you can also define multiple hard constraint rules, but only one of them needs to be satisfied.

                                                                                          The anti-affinity of the workload is mainly used to determine which Pods of the workload cannot be deployed in the same topology domain. For example, the same Pod of a load is distributed to different topological domains (such as different hosts) to improve the stability of the workload itself.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#tag-name_2","title":"Tag name","text":"

                                                                                          The label proper to the node can use the default label or user-defined label.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#namespaces_1","title":"Namespaces","text":"

                                                                                          Specifies the namespace in which the scheduling policy takes effect.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#operators_2","title":"Operators","text":"
                                                                                          • In: the label value needs to be in the list of values
                                                                                          • NotIn: the tag's value is not in a list
                                                                                          • Exists: To judge whether a certain label exists, no need to set the label value
                                                                                          • DoesNotExist: Determine if a tag does not exist, no need to set the tag value
                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/scheduling-policy.html#topology-domain_1","title":"Topology domain","text":"

                                                                                          Specify the scope of influence when scheduling, such as specifying kubernetes.io/Clustername, it will use the Node node as the distinguishing scope.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/workload-status.html","title":"Workload Status","text":"

                                                                                          A workload is an application running on Kubernetes, and in Kubernetes, whether your application is composed of a single same component or composed of many different components, you can use a set of Pods to run it. Kubernetes provides five built-in workload resources to manage pods:

                                                                                          • Deployment
                                                                                          • StatefulSet
                                                                                          • Daemonset
                                                                                          • Job
                                                                                          • CronJob

                                                                                          You can also expand workload resources by setting Custom Resource CRD. In the fifth-generation container management, it supports full lifecycle management of workloads such as creation, update, capacity expansion, monitoring, logging, deletion, and version management.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/workload-status.html#pod-status","title":"Pod Status","text":"

                                                                                          Pod is the smallest computing unit created and managed in Kubernetes, that is, a collection of containers. These containers share storage, networking, and management policies that control how the containers run. Pods are typically not created directly by users, but through workload resources. Pods follow a predefined lifecycle, starting at Pending phase, if at least one of the primary containers starts normally, it enters Running , and then enters the Succeeded or Failed stage depending on whether any container in the Pod ends in a failed status.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/workload-status.html#workload-status_1","title":"Workload Status","text":"

                                                                                          The fifth-generation container management module designs a built-in workload life cycle status set based on factors such as Pod status and number of replicas, so that users can more realistically perceive the running status of workloads. Because different workload types (such as Deployment and Jobs) have inconsistent management mechanisms for Pods, different workloads will have different lifecycle status during operation, as shown in the following table.

                                                                                          "},{"location":"en/end-user/kpanda/workloads/pod-config/workload-status.html#deployment-statefulset-damemonset-status","title":"Deployment, StatefulSet, DamemonSet Status","text":"Status Description Waiting 1. A workload is in this status while its creation is in progress. 2. After an upgrade or rollback action is triggered, the workload is in this status. 3. Trigger operations such as pausing/scaling, and the workload is in this status. Running This status occurs when all instances under the workload are running and the number of replicas matches the user-defined number. Deleting When a delete operation is performed, the payload is in this status until the delete is complete. Exception Unable to get the status of the workload for some reason. This usually occurs because communication with the pod's host has failed. Not Ready When the container is in an abnormal, pending status, this status is displayed when the workload cannot be started due to an unknown error"},{"location":"en/end-user/kpanda/workloads/pod-config/workload-status.html#job-status","title":"Job Status","text":"Status Description Waiting The workload is in this status while Job creation is in progress. Executing The Job is in progress and the workload is in this status. Execution Complete The Job execution is complete and the workload is in this status. Deleting A delete operation is triggered and the workload is in this status. Exception Pod status could not be obtained for some reason. This usually occurs because communication with the pod's host has failed."},{"location":"en/end-user/kpanda/workloads/pod-config/workload-status.html#cronjob-status","title":"CronJob status","text":"Status Description Waiting The CronJob is in this status when it is being created. Started After the CronJob is successfully created, the CronJob is in this status when it is running normally or when the paused task is started. Stopped The CronJob is in this status when the stop task operation is performed. Deleting The deletion operation is triggered, and the CronJob is in this status.

                                                                                          When the workload is in an abnormal or unready status, you can move the mouse over the status value of the load, and the system will display more detailed error information through a prompt box. You can also view the log or events to obtain related running information of the workload.

                                                                                          "},{"location":"en/end-user/register/index.html","title":"User Registration","text":"

                                                                                          New users need to register when using the AI platform for the first time.

                                                                                          "},{"location":"en/end-user/register/index.html#prerequisites","title":"Prerequisites","text":"
                                                                                          • The AI platform is installed
                                                                                          • Email registration functionality is enabled
                                                                                          • An available email address
                                                                                          "},{"location":"en/end-user/register/index.html#email-registration-steps","title":"Email Registration Steps","text":"
                                                                                          1. Open the AI platform homepage at https://ai.isuanova.com/ and click Register.

                                                                                          2. Enter your username, password, and email, then click Register.

                                                                                          3. The system will prompt that an email has been sent to your inbox.

                                                                                          4. Log into your email, find the email, and click the link.

                                                                                          5. Congratulations, you have successfully accessed the AI platform and can now start your AI journey.

                                                                                          "},{"location":"en/end-user/share/notebook.html","title":"Using Notebook","text":"

                                                                                          Notebook usually refers to Jupyter Notebook or similar interactive computing environments. It is a very popular tool widely used in fields such as data science, machine learning, and deep learning. This page explains how to use Notebook in the AI platform.

                                                                                          "},{"location":"en/end-user/share/notebook.html#prerequisites","title":"Prerequisites","text":"
                                                                                          • The AI platform is installed
                                                                                          • User has successfully registered
                                                                                          • The administrator has assigned a workspace to the user
                                                                                          • Datasets (code, data, etc.) are prepared
                                                                                          "},{"location":"en/end-user/share/notebook.html#creating-and-using-notebook-instances","title":"Creating and Using Notebook Instances","text":"
                                                                                          1. Log into the AI platform as an Administrator.
                                                                                          2. Navigate to AI Lab -> Operator -> Queue Management, and click the Create button on the right.

                                                                                          3. Enter a name, select the cluster, workspace, and quota, then click OK.

                                                                                          4. Log into the AI platform as a User, navigate to AI Lab -> Notebook, and click the Create button on the right.

                                                                                          5. After configuring the various parameters, click OK.

                                                                                            Basic InformationResource ConfigurationAdvanced Configuration

                                                                                            Enter a name, select the cluster, namespace, choose the queue just created, and click One-Click Initialization.

                                                                                            Select the Notebook type, configure memory, CPU, enable GPU, create and configure PVC:

                                                                                            Enable SSH external network access:

                                                                                          6. You will be automatically redirected to the Notebook instance list, click the instance name.

                                                                                          7. Enter the Notebook instance detail page and click the Open button in the upper right corner.

                                                                                          8. You have entered the Notebook development environment, where a persistent volume is mounted in the /home/jovyan directory. You can clone code through git, upload data after connecting via SSH, etc.

                                                                                          "},{"location":"en/end-user/share/notebook.html#accessing-notebook-instances-via-ssh","title":"Accessing Notebook Instances via SSH","text":"
                                                                                          1. Generate an SSH key pair on your own computer.

                                                                                            Open the command line on your computer, for example, open git bash on Windows, enter ssh-keygen.exe -t rsa, and press enter through the prompts.

                                                                                          2. Use commands like cat ~/.ssh/id_rsa.pub to view and copy the public key.

                                                                                          3. Log into the AI platform as a user, click Personal Center -> SSH Public Key -> Import SSH Public Key in the upper right corner.

                                                                                          4. Enter the detail page of the Notebook instance and copy the SSH link.

                                                                                          5. Use SSH to access the Notebook instance from the client.

                                                                                          Next step: Create Training Job

                                                                                          "},{"location":"en/end-user/share/workload.html","title":"Creating AI Workloads Using GPU Resources","text":"

                                                                                          After the administrator allocates resource quotas for the workspace, users can create AI workloads to utilize GPU computing resources.

                                                                                          "},{"location":"en/end-user/share/workload.html#prerequisites","title":"Prerequisites","text":"
                                                                                          • The AI platform is installed
                                                                                          • User has successfully registered
                                                                                          • The administrator has assigned a workspace to the user
                                                                                          • The administrator has set resource quotas for the workspace
                                                                                          • The administrator has assigned a cluster to the user
                                                                                          "},{"location":"en/end-user/share/workload.html#steps-to-create-ai-workloads","title":"Steps to Create AI Workloads","text":"
                                                                                          1. Log into the AI platform as a user.
                                                                                          2. Navigate to Container Management, select a namespace, click Workloads -> Deployments , and then click the Create Image button on the right.

                                                                                          3. After configuring various parameters, click OK.

                                                                                            Basic InformationContainer ConfigurationOther

                                                                                            Select your namespace.

                                                                                            Set the image, configure CPU, memory, GPU, and other resources, and set the startup command.

                                                                                            Service configuration and advanced configuration can use the default settings.

                                                                                          4. You will be automatically redirected to the stateless workload list; click the workload name.

                                                                                          5. Enter the detail page where you can see the GPU quota.

                                                                                          6. You can also access the console and run the nvidia-smi command to view GPU resources.

                                                                                          Next step: Using Notebook

                                                                                          "},{"location":"en/openapi/index.html","title":"OpenAPI Documentation","text":"

                                                                                          This is some OpenAPI documentation aimed at developers.

                                                                                          • CloudHost OpenAPI Documentation
                                                                                          • AI Lab OpenAPI Documentation
                                                                                          • Container Management OpenAPI Documentation
                                                                                          • Insight OpenAPI Documentation
                                                                                          • Global Management OpenAPI Documentation
                                                                                          "},{"location":"en/openapi/index.html#obtaining-openapi-access-keys","title":"Obtaining OpenAPI Access Keys","text":"

                                                                                          Access Keys can be used to access the OpenAPI and for continuous publishing. You can follow the steps below to obtain their keys and access the API in their personal center.

                                                                                          Log in to the AI platform, find Personal Center in the dropdown menu at the top right corner, and manage your account's access keys on the Access Keys page.

                                                                                          Info

                                                                                          Access key information is displayed only once. If you forget the access key information, you will need to create a new access key.

                                                                                          "},{"location":"en/openapi/index.html#using-the-key-to-access-the-api","title":"Using the Key to Access the API","text":"

                                                                                          When accessing the AI platform's OpenAPI, include the request header Authorization:Bearer ${token} in the request to identify the visitor's identity, where ${token} is the key obtained in the previous step.

                                                                                          Request Example

                                                                                          curl -X GET -H 'Authorization:Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRKVjlBTHRBLXZ4MmtQUC1TQnVGS0dCSWc1cnBfdkxiQVVqM2U3RVByWnMiLCJ0eXAiOiJKV1QifQ.eyJleHAiOjE2NjE0MTU5NjksImlhdCI6MTY2MDgxMTE2OSwiaXNzIjoiZ2hpcHBvLmlvIiwic3ViIjoiZjdjOGIxZjUtMTc2MS00NjYwLTg2MWQtOWI3MmI0MzJmNGViIiwicHJlZmVycmVkX3VzZXJuYW1lIjoiYWRtaW4iLCJncm91cHMiOltdfQ.RsUcrAYkQQ7C6BxMOrdD3qbBRUt0VVxynIGeq4wyIgye6R8Ma4cjxG5CbU1WyiHKpvIKJDJbeFQHro2euQyVde3ygA672ozkwLTnx3Tu-_mB1BubvWCBsDdUjIhCQfT39rk6EQozMjb-1X1sbLwzkfzKMls-oxkjagI_RFrYlTVPwT3Oaw-qOyulRSw7Dxd7jb0vINPq84vmlQIsI3UuTZSNO5BCgHpubcWwBss-Aon_DmYA-Et_-QtmPBA3k8E2hzDSzc7eqK0I68P25r9rwQ3DeKwD1dbRyndqWORRnz8TLEXSiCFXdZT2oiMrcJtO188Ph4eLGut1-4PzKhwgrQ' https://demo-dev.daocloud.io/apis/ghippo.io/v1alpha1/users?page=1&pageSize=10 -k\n

                                                                                          Request Result

                                                                                          {\n    \"items\": [\n        {\n            \"id\": \"a7cfd010-ebbe-4601-987f-d098d9ef766e\",\n            \"name\": \"a\",\n            \"email\": \"\",\n            \"description\": \"\",\n            \"firstname\": \"\",\n            \"lastname\": \"\",\n            \"source\": \"locale\",\n            \"enabled\": true,\n            \"createdAt\": \"1660632794800\",\n            \"updatedAt\": \"0\",\n            \"lastLoginAt\": \"\"\n        }\n    ],\n    \"pagination\": {\n        \"page\": 1,\n        \"pageSize\": 10,\n        \"total\": 1\n    }\n}\n
                                                                                          "},{"location":"en/openapi/baize/index.html","title":"AI Lab OpenAPI Docs","text":""},{"location":"en/openapi/ghippo/index.html","title":"Global Management OpenAPI Docs","text":""},{"location":"en/openapi/insight/index.html","title":"Insight OpenAPI Docs","text":""},{"location":"en/openapi/kpanda/index.html","title":"Container Management OpenAPI Docs","text":""},{"location":"en/openapi/virtnest/index.html","title":"Cloud Host OpenAPI Docs","text":""}]} \ No newline at end of file